aboutsummaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
authorTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
committerTimos Ampelikiotis <t.ampelikiotis@virtualopensystems.com>2023-10-10 11:40:56 +0000
commite02cda008591317b1625707ff8e115a4841aa889 (patch)
treeaee302e3cf8b59ec2d32ec481be3d1afddfc8968 /target
parentcc668e6b7e0ffd8c9d130513d12053cf5eda1d3b (diff)
Introduce Virtio-loopback epsilon release:
Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis <t.ampelikiotis@virtualopensystems.com> Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9
Diffstat (limited to 'target')
-rw-r--r--target/Kconfig19
-rw-r--r--target/alpha/Kconfig2
-rw-r--r--target/alpha/STATUS28
-rw-r--r--target/alpha/cpu-param.h20
-rw-r--r--target/alpha/cpu-qom.h48
-rw-r--r--target/alpha/cpu.c286
-rw-r--r--target/alpha/cpu.h512
-rw-r--r--target/alpha/fpu_helper.c561
-rw-r--r--target/alpha/gdbstub.c93
-rw-r--r--target/alpha/helper.c548
-rw-r--r--target/alpha/helper.h100
-rw-r--r--target/alpha/int_helper.c265
-rw-r--r--target/alpha/machine.c87
-rw-r--r--target/alpha/mem_helper.c79
-rw-r--r--target/alpha/meson.build18
-rw-r--r--target/alpha/sys_helper.c92
-rw-r--r--target/alpha/translate.c3043
-rw-r--r--target/alpha/vax_helper.c355
-rw-r--r--target/arm/Kconfig6
-rw-r--r--target/arm/a32-uncond.decode74
-rw-r--r--target/arm/a32.decode553
-rw-r--r--target/arm/arch_dump.c467
-rw-r--r--target/arm/arm-powerctl.c368
-rw-r--r--target/arm/arm-powerctl.h93
-rw-r--r--target/arm/arm_ldst.h47
-rw-r--r--target/arm/cpu-param.h37
-rw-r--r--target/arm/cpu-qom.h98
-rw-r--r--target/arm/cpu.c2162
-rw-r--r--target/arm/cpu.h4395
-rw-r--r--target/arm/cpu64.c1005
-rw-r--r--target/arm/cpu_tcg.c1082
-rw-r--r--target/arm/crypto_helper.c812
-rw-r--r--target/arm/debug_helper.c329
-rw-r--r--target/arm/gdbstub.c500
-rw-r--r--target/arm/gdbstub64.c211
-rw-r--r--target/arm/helper-a64.c1099
-rw-r--r--target/arm/helper-a64.h120
-rw-r--r--target/arm/helper-mve.h890
-rw-r--r--target/arm/helper-sve.h2800
-rw-r--r--target/arm/helper.c13572
-rw-r--r--target/arm/helper.h1023
-rw-r--r--target/arm/hvf/hvf.c1285
-rw-r--r--target/arm/hvf/meson.build3
-rw-r--r--target/arm/hvf/trace-events11
-rw-r--r--target/arm/hvf_arm.h18
-rw-r--r--target/arm/idau.h58
-rw-r--r--target/arm/internals.h1288
-rw-r--r--target/arm/iwmmxt_helper.c670
-rw-r--r--target/arm/kvm-consts.h180
-rw-r--r--target/arm/kvm-stub.c24
-rw-r--r--target/arm/kvm.c1059
-rw-r--r--target/arm/kvm64.c1606
-rw-r--r--target/arm/kvm_arm.h531
-rw-r--r--target/arm/m-nocp.decode72
-rw-r--r--target/arm/m_helper.c2901
-rw-r--r--target/arm/machine.c882
-rw-r--r--target/arm/meson.build66
-rw-r--r--target/arm/monitor.c230
-rw-r--r--target/arm/mte_helper.c940
-rw-r--r--target/arm/mve.decode832
-rw-r--r--target/arm/mve_helper.c3450
-rw-r--r--target/arm/neon-dp.decode646
-rw-r--r--target/arm/neon-ls.decode52
-rw-r--r--target/arm/neon-shared.decode99
-rw-r--r--target/arm/neon_helper.c1740
-rw-r--r--target/arm/op_addsub.h103
-rw-r--r--target/arm/op_helper.c974
-rw-r--r--target/arm/pauth_helper.c515
-rw-r--r--target/arm/psci.c216
-rw-r--r--target/arm/sve.decode1645
-rw-r--r--target/arm/sve_helper.c7654
-rw-r--r--target/arm/syndrome.h285
-rw-r--r--target/arm/t16.decode281
-rw-r--r--target/arm/t32.decode753
-rw-r--r--target/arm/tlb_helper.c222
-rw-r--r--target/arm/trace-events13
-rw-r--r--target/arm/trace.h1
-rw-r--r--target/arm/translate-a32.h154
-rw-r--r--target/arm/translate-a64.c14963
-rw-r--r--target/arm/translate-a64.h127
-rw-r--r--target/arm/translate-m-nocp.c791
-rw-r--r--target/arm/translate-mve.c2311
-rw-r--r--target/arm/translate-neon.c4064
-rw-r--r--target/arm/translate-sve.c8744
-rw-r--r--target/arm/translate-vfp.c3627
-rw-r--r--target/arm/translate.c9923
-rw-r--r--target/arm/translate.h586
-rw-r--r--target/arm/vec_helper.c2666
-rw-r--r--target/arm/vec_internal.h220
-rw-r--r--target/arm/vfp-uncond.decode82
-rw-r--r--target/arm/vfp.decode247
-rw-r--r--target/arm/vfp_helper.c1348
-rw-r--r--target/avr/Kconfig2
-rw-r--r--target/avr/cpu-param.h36
-rw-r--r--target/avr/cpu-qom.h49
-rw-r--r--target/avr/cpu.c378
-rw-r--r--target/avr/cpu.h255
-rw-r--r--target/avr/disas.c245
-rw-r--r--target/avr/gdbstub.c97
-rw-r--r--target/avr/helper.c347
-rw-r--r--target/avr/helper.h29
-rw-r--r--target/avr/insn.decode187
-rw-r--r--target/avr/machine.c119
-rw-r--r--target/avr/meson.build20
-rw-r--r--target/avr/translate.c3043
-rw-r--r--target/cris/Kconfig2
-rw-r--r--target/cris/cpu-param.h17
-rw-r--r--target/cris/cpu-qom.h51
-rw-r--r--target/cris/cpu.c338
-rw-r--r--target/cris/cpu.h286
-rw-r--r--target/cris/crisv10-decode.h112
-rw-r--r--target/cris/crisv32-decode.h133
-rw-r--r--target/cris/gdbstub.c130
-rw-r--r--target/cris/helper.c290
-rw-r--r--target/cris/helper.h23
-rw-r--r--target/cris/machine.c93
-rw-r--r--target/cris/meson.build17
-rw-r--r--target/cris/mmu.c355
-rw-r--r--target/cris/mmu.h22
-rw-r--r--target/cris/op_helper.c581
-rw-r--r--target/cris/opcode-cris.h355
-rw-r--r--target/cris/translate.c3398
-rw-r--r--target/cris/translate_v10.c.inc1333
-rw-r--r--target/hexagon/README314
-rw-r--r--target/hexagon/arch.c411
-rw-r--r--target/hexagon/arch.h41
-rw-r--r--target/hexagon/attribs.h35
-rw-r--r--target/hexagon/attribs_def.h.inc120
-rw-r--r--target/hexagon/cpu-param.h29
-rw-r--r--target/hexagon/cpu.c368
-rw-r--r--target/hexagon/cpu.h191
-rw-r--r--target/hexagon/cpu_bits.h58
-rw-r--r--target/hexagon/decode.c980
-rw-r--r--target/hexagon/decode.h32
-rwxr-xr-xtarget/hexagon/dectree.py351
-rw-r--r--target/hexagon/fma_emu.c702
-rw-r--r--target/hexagon/fma_emu.h36
-rw-r--r--target/hexagon/gdbstub.c47
-rw-r--r--target/hexagon/gen_dectree_import.c201
-rwxr-xr-xtarget/hexagon/gen_helper_funcs.py315
-rwxr-xr-xtarget/hexagon/gen_helper_protos.py165
-rwxr-xr-xtarget/hexagon/gen_op_attribs.py39
-rwxr-xr-xtarget/hexagon/gen_op_regs.py110
-rwxr-xr-xtarget/hexagon/gen_opcodes_def.py36
-rwxr-xr-xtarget/hexagon/gen_printinsn.py173
-rw-r--r--target/hexagon/gen_semantics.c121
-rwxr-xr-xtarget/hexagon/gen_shortcode.py60
-rw-r--r--target/hexagon/gen_tcg.h743
-rwxr-xr-xtarget/hexagon/gen_tcg_func_table.py58
-rwxr-xr-xtarget/hexagon/gen_tcg_funcs.py707
-rw-r--r--target/hexagon/gen_tcg_hvx.h903
-rw-r--r--target/hexagon/genptr.c638
-rw-r--r--target/hexagon/genptr.h25
-rw-r--r--target/hexagon/helper.h109
-rw-r--r--target/hexagon/hex_arch_types.h43
-rwxr-xr-xtarget/hexagon/hex_common.py249
-rw-r--r--target/hexagon/hex_regs.h84
-rw-r--r--target/hexagon/iclass.c69
-rw-r--r--target/hexagon/iclass.h50
-rw-r--r--target/hexagon/imported/allext.idef25
-rw-r--r--target/hexagon/imported/allext_macros.def25
-rw-r--r--target/hexagon/imported/allextenc.def20
-rw-r--r--target/hexagon/imported/allidefs.def31
-rw-r--r--target/hexagon/imported/alu.idef1302
-rw-r--r--target/hexagon/imported/branch.idef326
-rw-r--r--target/hexagon/imported/compare.idef619
-rw-r--r--target/hexagon/imported/encode.def125
-rw-r--r--target/hexagon/imported/encode_pp.def2143
-rw-r--r--target/hexagon/imported/encode_subinsn.def149
-rw-r--r--target/hexagon/imported/float.idef344
-rw-r--r--target/hexagon/imported/iclass.def51
-rw-r--r--target/hexagon/imported/ldst.idef354
-rwxr-xr-xtarget/hexagon/imported/macros.def1666
-rw-r--r--target/hexagon/imported/mmvec/encode_ext.def794
-rw-r--r--target/hexagon/imported/mmvec/ext.idef2606
-rwxr-xr-xtarget/hexagon/imported/mmvec/macros.def842
-rw-r--r--target/hexagon/imported/mpy.idef1208
-rw-r--r--target/hexagon/imported/shift.idef1113
-rw-r--r--target/hexagon/imported/subinsns.idef149
-rw-r--r--target/hexagon/imported/system.idef68
-rw-r--r--target/hexagon/insn.h78
-rw-r--r--target/hexagon/internal.h41
-rw-r--r--target/hexagon/macros.h716
-rw-r--r--target/hexagon/meson.build182
-rw-r--r--target/hexagon/mmvec/decode_ext_mmvec.c236
-rw-r--r--target/hexagon/mmvec/decode_ext_mmvec.h24
-rw-r--r--target/hexagon/mmvec/macros.h354
-rw-r--r--target/hexagon/mmvec/mmvec.h82
-rw-r--r--target/hexagon/mmvec/system_ext_mmvec.c47
-rw-r--r--target/hexagon/mmvec/system_ext_mmvec.h25
-rw-r--r--target/hexagon/op_helper.c1475
-rw-r--r--target/hexagon/opcodes.c143
-rw-r--r--target/hexagon/opcodes.h58
-rw-r--r--target/hexagon/printinsn.c146
-rw-r--r--target/hexagon/printinsn.h27
-rw-r--r--target/hexagon/reg_fields.c26
-rw-r--r--target/hexagon/reg_fields.h36
-rw-r--r--target/hexagon/reg_fields_def.h.inc41
-rw-r--r--target/hexagon/translate.c973
-rw-r--r--target/hexagon/translate.h151
-rw-r--r--target/hppa/Kconfig2
-rw-r--r--target/hppa/cpu-param.h34
-rw-r--r--target/hppa/cpu-qom.h48
-rw-r--r--target/hppa/cpu.c196
-rw-r--r--target/hppa/cpu.h341
-rw-r--r--target/hppa/gdbstub.c282
-rw-r--r--target/hppa/helper.c129
-rw-r--r--target/hppa/helper.h95
-rw-r--r--target/hppa/insns.decode533
-rw-r--r--target/hppa/int_helper.c260
-rw-r--r--target/hppa/machine.c180
-rw-r--r--target/hppa/mem_helper.c380
-rw-r--r--target/hppa/meson.build21
-rw-r--r--target/hppa/op_helper.c705
-rw-r--r--target/hppa/trace-events18
-rw-r--r--target/hppa/trace.h1
-rw-r--r--target/hppa/translate.c4333
-rw-r--r--target/i386/Kconfig5
-rw-r--r--target/i386/arch_dump.c461
-rw-r--r--target/i386/arch_memory_mapping.c314
-rw-r--r--target/i386/cpu-dump.c570
-rw-r--r--target/i386/cpu-internal.h70
-rw-r--r--target/i386/cpu-param.h28
-rw-r--r--target/i386/cpu-qom.h75
-rw-r--r--target/i386/cpu-sysemu.c352
-rw-r--r--target/i386/cpu.c7055
-rw-r--r--target/i386/cpu.h2298
-rw-r--r--target/i386/gdbstub.c397
-rw-r--r--target/i386/hax/hax-accel-ops.c102
-rw-r--r--target/i386/hax/hax-accel-ops.h31
-rw-r--r--target/i386/hax/hax-all.c1140
-rw-r--r--target/i386/hax/hax-i386.h96
-rw-r--r--target/i386/hax/hax-interface.h369
-rw-r--r--target/i386/hax/hax-mem.c323
-rw-r--r--target/i386/hax/hax-posix.c305
-rw-r--r--target/i386/hax/hax-posix.h61
-rw-r--r--target/i386/hax/hax-windows.c485
-rw-r--r--target/i386/hax/hax-windows.h88
-rw-r--r--target/i386/hax/meson.build7
-rw-r--r--target/i386/helper.c679
-rw-r--r--target/i386/helper.h233
-rw-r--r--target/i386/host-cpu.c207
-rw-r--r--target/i386/host-cpu.h19
-rw-r--r--target/i386/hvf/README.md7
-rw-r--r--target/i386/hvf/hvf-cpu.c97
-rw-r--r--target/i386/hvf/hvf-i386.h34
-rw-r--r--target/i386/hvf/hvf.c673
-rw-r--r--target/i386/hvf/meson.build13
-rw-r--r--target/i386/hvf/panic.h45
-rw-r--r--target/i386/hvf/vmcs.h374
-rw-r--r--target/i386/hvf/vmx.h238
-rw-r--r--target/i386/hvf/x86.c186
-rw-r--r--target/i386/hvf/x86.h323
-rw-r--r--target/i386/hvf/x86_cpuid.c164
-rw-r--r--target/i386/hvf/x86_decode.c2197
-rw-r--r--target/i386/hvf/x86_decode.h325
-rw-r--r--target/i386/hvf/x86_descr.c125
-rw-r--r--target/i386/hvf/x86_descr.h58
-rw-r--r--target/i386/hvf/x86_emu.c1489
-rw-r--r--target/i386/hvf/x86_emu.h50
-rw-r--r--target/i386/hvf/x86_flags.c314
-rw-r--r--target/i386/hvf/x86_flags.h81
-rw-r--r--target/i386/hvf/x86_mmu.c268
-rw-r--r--target/i386/hvf/x86_mmu.h44
-rw-r--r--target/i386/hvf/x86_task.c185
-rw-r--r--target/i386/hvf/x86_task.h20
-rw-r--r--target/i386/hvf/x86hvf.c463
-rw-r--r--target/i386/hvf/x86hvf.h36
-rw-r--r--target/i386/kvm/hyperv-proto.h172
-rw-r--r--target/i386/kvm/hyperv-stub.c48
-rw-r--r--target/i386/kvm/hyperv.c101
-rw-r--r--target/i386/kvm/hyperv.h29
-rw-r--r--target/i386/kvm/kvm-cpu.c201
-rw-r--r--target/i386/kvm/kvm-cpu.h41
-rw-r--r--target/i386/kvm/kvm-stub.c46
-rw-r--r--target/i386/kvm/kvm.c5052
-rw-r--r--target/i386/kvm/kvm_i386.h56
-rw-r--r--target/i386/kvm/meson.build14
-rw-r--r--target/i386/kvm/sev-stub.c22
-rw-r--r--target/i386/kvm/trace-events7
-rw-r--r--target/i386/kvm/trace.h1
-rw-r--r--target/i386/machine.c1598
-rw-r--r--target/i386/meson.build36
-rw-r--r--target/i386/monitor.c669
-rw-r--r--target/i386/nvmm/meson.build8
-rw-r--r--target/i386/nvmm/nvmm-accel-ops.c111
-rw-r--r--target/i386/nvmm/nvmm-accel-ops.h24
-rw-r--r--target/i386/nvmm/nvmm-all.c1236
-rw-r--r--target/i386/ops_sse.h2326
-rw-r--r--target/i386/ops_sse_header.h356
-rw-r--r--target/i386/sev-sysemu-stub.c70
-rw-r--r--target/i386/sev.c1341
-rw-r--r--target/i386/sev.h62
-rw-r--r--target/i386/shift_helper_template.h108
-rw-r--r--target/i386/svm.h240
-rw-r--r--target/i386/tcg/bpt_helper.c39
-rw-r--r--target/i386/tcg/cc_helper.c389
-rw-r--r--target/i386/tcg/cc_helper_template.h242
-rw-r--r--target/i386/tcg/excp_helper.c141
-rw-r--r--target/i386/tcg/fpu_helper.c3041
-rw-r--r--target/i386/tcg/helper-tcg.h110
-rw-r--r--target/i386/tcg/int_helper.c494
-rw-r--r--target/i386/tcg/mem_helper.c183
-rw-r--r--target/i386/tcg/meson.build15
-rw-r--r--target/i386/tcg/misc_helper.c138
-rw-r--r--target/i386/tcg/mpx_helper.c139
-rw-r--r--target/i386/tcg/seg_helper.c2343
-rw-r--r--target/i386/tcg/seg_helper.h66
-rw-r--r--target/i386/tcg/sysemu/bpt_helper.c298
-rw-r--r--target/i386/tcg/sysemu/excp_helper.c474
-rw-r--r--target/i386/tcg/sysemu/fpu_helper.c57
-rw-r--r--target/i386/tcg/sysemu/meson.build10
-rw-r--r--target/i386/tcg/sysemu/misc_helper.c516
-rw-r--r--target/i386/tcg/sysemu/seg_helper.c216
-rw-r--r--target/i386/tcg/sysemu/smm_helper.c319
-rw-r--r--target/i386/tcg/sysemu/svm_helper.c863
-rw-r--r--target/i386/tcg/sysemu/tcg-cpu.c83
-rw-r--r--target/i386/tcg/tcg-cpu.c160
-rw-r--r--target/i386/tcg/tcg-cpu.h81
-rw-r--r--target/i386/tcg/tcg-stub.c25
-rw-r--r--target/i386/tcg/translate.c8717
-rw-r--r--target/i386/tcg/user/excp_helper.c50
-rw-r--r--target/i386/tcg/user/meson.build4
-rw-r--r--target/i386/tcg/user/seg_helper.c109
-rw-r--r--target/i386/trace-events13
-rw-r--r--target/i386/trace.h1
-rw-r--r--target/i386/whpx/meson.build5
-rw-r--r--target/i386/whpx/whpx-accel-ops.c111
-rw-r--r--target/i386/whpx/whpx-accel-ops.h32
-rw-r--r--target/i386/whpx/whpx-all.c1939
-rw-r--r--target/i386/whpx/whpx-apic.c281
-rw-r--r--target/i386/whpx/whpx-internal.h86
-rw-r--r--target/i386/xsave_helper.c251
-rw-r--r--target/m68k/Kconfig2
-rw-r--r--target/m68k/cpu-param.h22
-rw-r--r--target/m68k/cpu-qom.h48
-rw-r--r--target/m68k/cpu.c605
-rw-r--r--target/m68k/cpu.h611
-rw-r--r--target/m68k/fpu_helper.c666
-rw-r--r--target/m68k/gdbstub.c78
-rw-r--r--target/m68k/helper.c1536
-rw-r--r--target/m68k/helper.h130
-rw-r--r--target/m68k/m68k-semi.c469
-rw-r--r--target/m68k/meson.build17
-rw-r--r--target/m68k/monitor.c62
-rw-r--r--target/m68k/op_helper.c1110
-rw-r--r--target/m68k/qregs.def10
-rw-r--r--target/m68k/softfloat.c2835
-rw-r--r--target/m68k/softfloat.h48
-rw-r--r--target/m68k/softfloat_fpsp_tables.h642
-rw-r--r--target/m68k/translate.c6373
-rw-r--r--target/meson.build20
-rw-r--r--target/microblaze/Kconfig2
-rw-r--r--target/microblaze/cpu-param.h33
-rw-r--r--target/microblaze/cpu-qom.h48
-rw-r--r--target/microblaze/cpu.c421
-rw-r--r--target/microblaze/cpu.h443
-rw-r--r--target/microblaze/gdbstub.c146
-rw-r--r--target/microblaze/helper.c298
-rw-r--r--target/microblaze/helper.h31
-rw-r--r--target/microblaze/insns.decode256
-rw-r--r--target/microblaze/machine.c106
-rw-r--r--target/microblaze/meson.build20
-rw-r--r--target/microblaze/mmu.c327
-rw-r--r--target/microblaze/mmu.h92
-rw-r--r--target/microblaze/op_helper.c430
-rw-r--r--target/microblaze/translate.c1953
-rw-r--r--target/mips/Kconfig6
-rw-r--r--target/mips/TODO51
-rw-r--r--target/mips/cpu-defs.c.inc977
-rw-r--r--target/mips/cpu-param.h34
-rw-r--r--target/mips/cpu-qom.h56
-rw-r--r--target/mips/cpu.c679
-rw-r--r--target/mips/cpu.h1350
-rw-r--r--target/mips/fpu.c25
-rw-r--r--target/mips/fpu_helper.h65
-rw-r--r--target/mips/gdbstub.c150
-rw-r--r--target/mips/helper.h599
-rw-r--r--target/mips/internal.h400
-rw-r--r--target/mips/kvm.c1297
-rw-r--r--target/mips/kvm_mips.h37
-rw-r--r--target/mips/meson.build23
-rw-r--r--target/mips/mips-defs.h94
-rw-r--r--target/mips/msa.c60
-rw-r--r--target/mips/sysemu/addr.c61
-rw-r--r--target/mips/sysemu/cp0.c123
-rw-r--r--target/mips/sysemu/cp0_timer.c145
-rw-r--r--target/mips/sysemu/machine.c333
-rw-r--r--target/mips/sysemu/meson.build7
-rw-r--r--target/mips/sysemu/physaddr.c257
-rw-r--r--target/mips/tcg/dsp_helper.c3771
-rw-r--r--target/mips/tcg/exception.c149
-rw-r--r--target/mips/tcg/fpu_helper.c2254
-rw-r--r--target/mips/tcg/ldst_helper.c306
-rw-r--r--target/mips/tcg/lmmi_helper.c747
-rw-r--r--target/mips/tcg/meson.build33
-rw-r--r--target/mips/tcg/micromips_translate.c.inc3231
-rw-r--r--target/mips/tcg/mips16e_translate.c.inc1123
-rw-r--r--target/mips/tcg/msa.decode258
-rw-r--r--target/mips/tcg/msa_helper.c8388
-rw-r--r--target/mips/tcg/msa_helper.h.inc443
-rw-r--r--target/mips/tcg/msa_translate.c799
-rw-r--r--target/mips/tcg/mxu_translate.c1605
-rw-r--r--target/mips/tcg/nanomips_translate.c.inc4928
-rw-r--r--target/mips/tcg/op_helper.c303
-rw-r--r--target/mips/tcg/rel6.decode49
-rw-r--r--target/mips/tcg/rel6_translate.c37
-rw-r--r--target/mips/tcg/sysemu/cp0_helper.c1706
-rw-r--r--target/mips/tcg/sysemu/meson.build6
-rw-r--r--target/mips/tcg/sysemu/mips-semi.c367
-rw-r--r--target/mips/tcg/sysemu/special_helper.c173
-rw-r--r--target/mips/tcg/sysemu/tlb_helper.c1423
-rw-r--r--target/mips/tcg/sysemu_helper.h.inc185
-rw-r--r--target/mips/tcg/tcg-internal.h66
-rw-r--r--target/mips/tcg/trace-events5
-rw-r--r--target/mips/tcg/trace.h1
-rw-r--r--target/mips/tcg/translate.c16220
-rw-r--r--target/mips/tcg/translate.h232
-rw-r--r--target/mips/tcg/translate_addr_const.c61
-rw-r--r--target/mips/tcg/tx79.decode73
-rw-r--r--target/mips/tcg/tx79_translate.c685
-rw-r--r--target/mips/tcg/txx9_translate.c20
-rw-r--r--target/mips/tcg/vr54xx.decode27
-rw-r--r--target/mips/tcg/vr54xx_helper.c142
-rw-r--r--target/mips/tcg/vr54xx_helper.h.inc24
-rw-r--r--target/mips/tcg/vr54xx_translate.c72
-rw-r--r--target/nios2/Kconfig2
-rw-r--r--target/nios2/cpu-param.h21
-rw-r--r--target/nios2/cpu.c269
-rw-r--r--target/nios2/cpu.h249
-rw-r--r--target/nios2/helper.c315
-rw-r--r--target/nios2/helper.h27
-rw-r--r--target/nios2/meson.build15
-rw-r--r--target/nios2/mmu.c281
-rw-r--r--target/nios2/mmu.h51
-rw-r--r--target/nios2/monitor.c35
-rw-r--r--target/nios2/nios2-semi.c454
-rw-r--r--target/nios2/op_helper.c61
-rw-r--r--target/nios2/translate.c901
-rw-r--r--target/openrisc/Kconfig2
-rw-r--r--target/openrisc/cpu-param.h17
-rw-r--r--target/openrisc/cpu.c285
-rw-r--r--target/openrisc/cpu.h416
-rw-r--r--target/openrisc/disas.c249
-rw-r--r--target/openrisc/exception.c31
-rw-r--r--target/openrisc/exception.h27
-rw-r--r--target/openrisc/exception_helper.c60
-rw-r--r--target/openrisc/fpu_helper.c171
-rw-r--r--target/openrisc/gdbstub.c88
-rw-r--r--target/openrisc/helper.h66
-rw-r--r--target/openrisc/insns.decode234
-rw-r--r--target/openrisc/interrupt.c120
-rw-r--r--target/openrisc/interrupt_helper.c31
-rw-r--r--target/openrisc/machine.c144
-rw-r--r--target/openrisc/meson.build25
-rw-r--r--target/openrisc/mmu.c171
-rw-r--r--target/openrisc/sys_helper.c316
-rw-r--r--target/openrisc/translate.c1735
-rw-r--r--target/ppc/Kconfig5
-rw-r--r--target/ppc/arch_dump.c309
-rw-r--r--target/ppc/compat.c327
-rw-r--r--target/ppc/cpu-models.c968
-rw-r--r--target/ppc/cpu-models.h505
-rw-r--r--target/ppc/cpu-param.h37
-rw-r--r--target/ppc/cpu-qom.h225
-rw-r--r--target/ppc/cpu.c126
-rw-r--r--target/ppc/cpu.h2689
-rw-r--r--target/ppc/cpu_init.c9291
-rw-r--r--target/ppc/dfp_helper.c1393
-rw-r--r--target/ppc/excp_helper.c1484
-rw-r--r--target/ppc/fpu_helper.c3289
-rw-r--r--target/ppc/gdbstub.c628
-rw-r--r--target/ppc/helper.h776
-rw-r--r--target/ppc/helper_regs.c301
-rw-r--r--target/ppc/helper_regs.h34
-rw-r--r--target/ppc/insn32.decode429
-rw-r--r--target/ppc/insn64.decode196
-rw-r--r--target/ppc/int_helper.c3160
-rw-r--r--target/ppc/internal.h294
-rw-r--r--target/ppc/kvm-stub.c19
-rw-r--r--target/ppc/kvm.c2961
-rw-r--r--target/ppc/kvm_ppc.h478
-rw-r--r--target/ppc/machine.c859
-rw-r--r--target/ppc/mem_helper.c622
-rw-r--r--target/ppc/meson.build57
-rw-r--r--target/ppc/mfrom_table.c.inc78
-rw-r--r--target/ppc/mfrom_table_gen.c34
-rw-r--r--target/ppc/misc_helper.c320
-rw-r--r--target/ppc/mmu-book3s-v3.c42
-rw-r--r--target/ppc/mmu-book3s-v3.h117
-rw-r--r--target/ppc/mmu-books.h30
-rw-r--r--target/ppc/mmu-hash32.c571
-rw-r--r--target/ppc/mmu-hash32.h120
-rw-r--r--target/ppc/mmu-hash64.c1178
-rw-r--r--target/ppc/mmu-hash64.h175
-rw-r--r--target/ppc/mmu-radix64.c590
-rw-r--r--target/ppc/mmu-radix64.h73
-rw-r--r--target/ppc/mmu_common.c1620
-rw-r--r--target/ppc/mmu_helper.c1382
-rw-r--r--target/ppc/monitor.c165
-rw-r--r--target/ppc/power8-pmu-regs.c.inc262
-rw-r--r--target/ppc/spr_tcg.h144
-rw-r--r--target/ppc/tcg-stub.c45
-rw-r--r--target/ppc/timebase_helper.c208
-rw-r--r--target/ppc/trace-events38
-rw-r--r--target/ppc/trace.h1
-rw-r--r--target/ppc/translate.c8618
-rw-r--r--target/ppc/translate/dfp-impl.c.inc229
-rw-r--r--target/ppc/translate/fixedpoint-impl.c.inc494
-rw-r--r--target/ppc/translate/fp-impl.c.inc1404
-rw-r--r--target/ppc/translate/fp-ops.c.inc90
-rw-r--r--target/ppc/translate/spe-impl.c.inc1252
-rw-r--r--target/ppc/translate/spe-ops.c.inc105
-rw-r--r--target/ppc/translate/vmx-impl.c.inc1894
-rw-r--r--target/ppc/translate/vmx-ops.c.inc303
-rw-r--r--target/ppc/translate/vsx-impl.c.inc2193
-rw-r--r--target/ppc/translate/vsx-ops.c.inc398
-rw-r--r--target/ppc/user_only_helper.c56
-rw-r--r--target/riscv/Kconfig5
-rw-r--r--target/riscv/arch_dump.c202
-rw-r--r--target/riscv/bitmanip_helper.c51
-rw-r--r--target/riscv/cpu-param.h32
-rw-r--r--target/riscv/cpu.c819
-rw-r--r--target/riscv/cpu.h495
-rw-r--r--target/riscv/cpu_bits.h618
-rw-r--r--target/riscv/cpu_helper.c1138
-rw-r--r--target/riscv/cpu_user.h19
-rw-r--r--target/riscv/csr.c2098
-rw-r--r--target/riscv/fpu_helper.c376
-rw-r--r--target/riscv/gdbstub.c212
-rw-r--r--target/riscv/helper.h1149
-rw-r--r--target/riscv/insn16.decode162
-rw-r--r--target/riscv/insn32.decode728
-rw-r--r--target/riscv/insn_trans/trans_privileged.c.inc128
-rw-r--r--target/riscv/insn_trans/trans_rva.c.inc226
-rw-r--r--target/riscv/insn_trans/trans_rvb.c.inc506
-rw-r--r--target/riscv/insn_trans/trans_rvd.c.inc449
-rw-r--r--target/riscv/insn_trans/trans_rvf.c.inc473
-rw-r--r--target/riscv/insn_trans/trans_rvh.c.inc186
-rw-r--r--target/riscv/insn_trans/trans_rvi.c.inc577
-rw-r--r--target/riscv/insn_trans/trans_rvm.c.inc271
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc2867
-rw-r--r--target/riscv/instmap.h369
-rw-r--r--target/riscv/internals.h61
-rw-r--r--target/riscv/machine.c223
-rw-r--r--target/riscv/meson.build32
-rw-r--r--target/riscv/monitor.c236
-rw-r--r--target/riscv/op_helper.c249
-rw-r--r--target/riscv/pmp.c664
-rw-r--r--target/riscv/pmp.h85
-rw-r--r--target/riscv/trace-events11
-rw-r--r--target/riscv/trace.h1
-rw-r--r--target/riscv/translate.c773
-rw-r--r--target/riscv/vector_helper.c4872
-rw-r--r--target/rx/Kconfig2
-rw-r--r--target/rx/cpu-param.h30
-rw-r--r--target/rx/cpu-qom.h50
-rw-r--r--target/rx/cpu.c246
-rw-r--r--target/rx/cpu.h177
-rw-r--r--target/rx/disas.c1446
-rw-r--r--target/rx/gdbstub.c112
-rw-r--r--target/rx/helper.c152
-rw-r--r--target/rx/helper.h30
-rw-r--r--target/rx/insns.decode621
-rw-r--r--target/rx/meson.build16
-rw-r--r--target/rx/op_helper.c462
-rw-r--r--target/rx/translate.c2408
-rw-r--r--target/s390x/Kconfig2
-rw-r--r--target/s390x/arch_dump.c282
-rw-r--r--target/s390x/cpu-dump.c134
-rw-r--r--target/s390x/cpu-param.h17
-rw-r--r--target/s390x/cpu-qom.h68
-rw-r--r--target/s390x/cpu-sysemu.c308
-rw-r--r--target/s390x/cpu.c339
-rw-r--r--target/s390x/cpu.h848
-rw-r--r--target/s390x/cpu_features.c255
-rw-r--r--target/s390x/cpu_features.h96
-rw-r--r--target/s390x/cpu_features_def.h25
-rw-r--r--target/s390x/cpu_features_def.h.inc380
-rw-r--r--target/s390x/cpu_models.c1021
-rw-r--r--target/s390x/cpu_models.h128
-rw-r--r--target/s390x/cpu_models_sysemu.c426
-rw-r--r--target/s390x/cpu_models_user.c20
-rw-r--r--target/s390x/diag.c185
-rw-r--r--target/s390x/gdbstub.c329
-rw-r--r--target/s390x/gen-features.c1015
-rw-r--r--target/s390x/helper.c283
-rw-r--r--target/s390x/helper.h381
-rw-r--r--target/s390x/interrupt.c244
-rw-r--r--target/s390x/ioinst.c818
-rw-r--r--target/s390x/kvm/kvm.c2564
-rw-r--r--target/s390x/kvm/kvm_s390x.h49
-rw-r--r--target/s390x/kvm/meson.build17
-rw-r--r--target/s390x/kvm/trace-events7
-rw-r--r--target/s390x/kvm/trace.h1
-rw-r--r--target/s390x/machine.c292
-rw-r--r--target/s390x/meson.build44
-rw-r--r--target/s390x/mmu_helper.c610
-rw-r--r--target/s390x/s390x-internal.h408
-rw-r--r--target/s390x/sigp.c495
-rw-r--r--target/s390x/tcg/cc_helper.c538
-rw-r--r--target/s390x/tcg/crypto_helper.c61
-rw-r--r--target/s390x/tcg/excp_helper.c645
-rw-r--r--target/s390x/tcg/fpu_helper.c976
-rw-r--r--target/s390x/tcg/insn-data.def1398
-rw-r--r--target/s390x/tcg/insn-format.def81
-rw-r--r--target/s390x/tcg/int_helper.c148
-rw-r--r--target/s390x/tcg/mem_helper.c3017
-rw-r--r--target/s390x/tcg/meson.build14
-rw-r--r--target/s390x/tcg/misc_helper.c798
-rw-r--r--target/s390x/tcg/s390-tod.h29
-rw-r--r--target/s390x/tcg/tcg_s390x.h24
-rw-r--r--target/s390x/tcg/translate.c6652
-rw-r--r--target/s390x/tcg/translate_vx.c.inc3109
-rw-r--r--target/s390x/tcg/vec.h141
-rw-r--r--target/s390x/tcg/vec_fpu_helper.c1072
-rw-r--r--target/s390x/tcg/vec_helper.c214
-rw-r--r--target/s390x/tcg/vec_int_helper.c587
-rw-r--r--target/s390x/tcg/vec_string_helper.c473
-rw-r--r--target/s390x/trace-events19
-rw-r--r--target/s390x/trace.h1
-rw-r--r--target/sh4/Kconfig2
-rw-r--r--target/sh4/README.sh4150
-rw-r--r--target/sh4/cpu-param.h21
-rw-r--r--target/sh4/cpu-qom.h59
-rw-r--r--target/sh4/cpu.c302
-rw-r--r--target/sh4/cpu.h378
-rw-r--r--target/sh4/gdbstub.c144
-rw-r--r--target/sh4/helper.c863
-rw-r--r--target/sh4/helper.h43
-rw-r--r--target/sh4/meson.build14
-rw-r--r--target/sh4/monitor.c58
-rw-r--r--target/sh4/op_helper.c491
-rw-r--r--target/sh4/translate.c2367
-rw-r--r--target/sparc/Kconfig5
-rw-r--r--target/sparc/asi.h312
-rw-r--r--target/sparc/cc_helper.c471
-rw-r--r--target/sparc/cpu-param.h28
-rw-r--r--target/sparc/cpu-qom.h54
-rw-r--r--target/sparc/cpu.c954
-rw-r--r--target/sparc/cpu.h839
-rw-r--r--target/sparc/fop_helper.c401
-rw-r--r--target/sparc/gdbstub.c208
-rw-r--r--target/sparc/helper.c253
-rw-r--r--target/sparc/helper.h168
-rw-r--r--target/sparc/int32_helper.c171
-rw-r--r--target/sparc/int64_helper.c304
-rw-r--r--target/sparc/ldst_helper.c1955
-rw-r--r--target/sparc/machine.c196
-rw-r--r--target/sparc/meson.build23
-rw-r--r--target/sparc/mmu_helper.c944
-rw-r--r--target/sparc/monitor.c165
-rw-r--r--target/sparc/trace-events32
-rw-r--r--target/sparc/trace.h1
-rw-r--r--target/sparc/translate.c6031
-rw-r--r--target/sparc/vis_helper.c490
-rw-r--r--target/sparc/win_helper.c455
-rw-r--r--target/tricore/Kconfig2
-rw-r--r--target/tricore/cpu-param.h17
-rw-r--r--target/tricore/cpu-qom.h40
-rw-r--r--target/tricore/cpu.c205
-rw-r--r--target/tricore/cpu.h397
-rw-r--r--target/tricore/csfr.def125
-rw-r--r--target/tricore/fpu_helper.c471
-rw-r--r--target/tricore/gdbstub.c139
-rw-r--r--target/tricore/helper.c154
-rw-r--r--target/tricore/helper.h155
-rw-r--r--target/tricore/meson.build15
-rw-r--r--target/tricore/op_helper.c2795
-rw-r--r--target/tricore/translate.c8949
-rw-r--r--target/tricore/tricore-defs.h23
-rw-r--r--target/tricore/tricore-opcodes.h1474
-rw-r--r--target/xtensa/Kconfig2
-rw-r--r--target/xtensa/core-dc232b.c52
-rw-r--r--target/xtensa/core-dc232b/core-isa.h422
-rw-r--r--target/xtensa/core-dc232b/gdb-config.c.inc262
-rw-r--r--target/xtensa/core-dc232b/xtensa-modules.c.inc14078
-rw-r--r--target/xtensa/core-dc233c.c51
-rw-r--r--target/xtensa/core-dc233c/core-isa.h473
-rw-r--r--target/xtensa/core-dc233c/gdb-config.c.inc146
-rw-r--r--target/xtensa/core-dc233c/xtensa-modules.c.inc15205
-rw-r--r--target/xtensa/core-de212.c51
-rw-r--r--target/xtensa/core-de212/core-isa.h620
-rw-r--r--target/xtensa/core-de212/gdb-config.c.inc198
-rw-r--r--target/xtensa/core-de212/xtensa-modules.c.inc14543
-rw-r--r--target/xtensa/core-de233_fpu.c58
-rw-r--r--target/xtensa/core-de233_fpu/core-isa.h727
-rw-r--r--target/xtensa/core-de233_fpu/core-matmap.h717
-rw-r--r--target/xtensa/core-de233_fpu/gdb-config.c.inc277
-rw-r--r--target/xtensa/core-de233_fpu/xtensa-modules.c.inc20758
-rw-r--r--target/xtensa/core-dsp3400.c58
-rw-r--r--target/xtensa/core-dsp3400/core-isa.h452
-rw-r--r--target/xtensa/core-dsp3400/core-matmap.h312
-rw-r--r--target/xtensa/core-dsp3400/gdb-config.c.inc400
-rw-r--r--target/xtensa/core-dsp3400/xtensa-modules.c.inc171906
-rw-r--r--target/xtensa/core-fsf.c52
-rw-r--r--target/xtensa/core-fsf/core-isa.h360
-rw-r--r--target/xtensa/core-fsf/xtensa-modules.c.inc9826
-rw-r--r--target/xtensa/core-sample_controller.c51
-rw-r--r--target/xtensa/core-sample_controller/core-isa.h642
-rw-r--r--target/xtensa/core-sample_controller/gdb-config.c.inc141
-rw-r--r--target/xtensa/core-sample_controller/xtensa-modules.c.inc11366
-rw-r--r--target/xtensa/core-test_kc705_be.c51
-rw-r--r--target/xtensa/core-test_kc705_be/core-isa.h573
-rw-r--r--target/xtensa/core-test_kc705_be/gdb-config.c.inc259
-rw-r--r--target/xtensa/core-test_kc705_be/xtensa-modules.c.inc45117
-rw-r--r--target/xtensa/core-test_mmuhifi_c3.c52
-rw-r--r--target/xtensa/core-test_mmuhifi_c3/core-isa.h472
-rw-r--r--target/xtensa/core-test_mmuhifi_c3/gdb-config.c.inc220
-rw-r--r--target/xtensa/core-test_mmuhifi_c3/xtensa-modules.c.inc36387
-rw-r--r--target/xtensa/cores.list9
-rw-r--r--target/xtensa/cpu-param.h21
-rw-r--r--target/xtensa/cpu-qom.h62
-rw-r--r--target/xtensa/cpu.c247
-rw-r--r--target/xtensa/cpu.h798
-rw-r--r--target/xtensa/dbg_helper.c129
-rw-r--r--target/xtensa/exc_helper.c269
-rw-r--r--target/xtensa/fpu_helper.c448
-rw-r--r--target/xtensa/gdbstub.c182
-rw-r--r--target/xtensa/helper.c319
-rw-r--r--target/xtensa/helper.h105
-rwxr-xr-xtarget/xtensa/import_core.sh71
-rw-r--r--target/xtensa/meson.build27
-rw-r--r--target/xtensa/mmu_helper.c1192
-rw-r--r--target/xtensa/monitor.c39
-rw-r--r--target/xtensa/op_helper.c216
-rw-r--r--target/xtensa/overlay_tool.h879
-rw-r--r--target/xtensa/translate.c7873
-rw-r--r--target/xtensa/win_helper.c212
-rw-r--r--target/xtensa/xtensa-isa-internal.h231
-rw-r--r--target/xtensa/xtensa-isa.c1743
-rw-r--r--target/xtensa/xtensa-isa.h1
-rw-r--r--target/xtensa/xtensa-semi.c437
733 files changed, 821541 insertions, 0 deletions
diff --git a/target/Kconfig b/target/Kconfig
new file mode 100644
index 000000000..ae7f24fc6
--- /dev/null
+++ b/target/Kconfig
@@ -0,0 +1,19 @@
+source alpha/Kconfig
+source arm/Kconfig
+source avr/Kconfig
+source cris/Kconfig
+source hppa/Kconfig
+source i386/Kconfig
+source m68k/Kconfig
+source microblaze/Kconfig
+source mips/Kconfig
+source nios2/Kconfig
+source openrisc/Kconfig
+source ppc/Kconfig
+source riscv/Kconfig
+source rx/Kconfig
+source s390x/Kconfig
+source sh4/Kconfig
+source sparc/Kconfig
+source tricore/Kconfig
+source xtensa/Kconfig
diff --git a/target/alpha/Kconfig b/target/alpha/Kconfig
new file mode 100644
index 000000000..267222c05
--- /dev/null
+++ b/target/alpha/Kconfig
@@ -0,0 +1,2 @@
+config ALPHA
+ bool
diff --git a/target/alpha/STATUS b/target/alpha/STATUS
new file mode 100644
index 000000000..6c9744569
--- /dev/null
+++ b/target/alpha/STATUS
@@ -0,0 +1,28 @@
+(to be completed)
+
+Alpha emulation structure:
+cpu.h : CPU definitions globally exported
+exec.h : CPU definitions used only for translated code execution
+helper.c : helpers that can be called either by the translated code
+ or the QEMU core, including the exception handler.
+op_helper.c : helpers that can be called only from TCG
+helper.h : TCG helpers prototypes
+translate.c : Alpha instructions to micro-operations translator
+
+Code translator status:
+The Alpha CPU instruction emulation should be quite complete with the
+limitation that the VAX floating-point load and stores are not tested.
+The 4 MMU modes are implemented.
+
+Linux user mode emulation status:
+a few programs start to run. Most crash at a certain point, dereferencing a
+NULL pointer. It seems that the UNIQUE register is not initialized properly.
+It may appear that old executables, not relying on TLS support, run but
+this is to be proved...
+
+Full system emulation status:
+* Alpha PALCode emulation is in a very early stage and is not sufficient
+ to run any real OS. The alpha-softmmu target is not enabled for now.
+* no hardware platform description is implemented
+* there might be problems in the Alpha PALCode dedicated instructions
+ that would prevent to use a native PALCode image.
diff --git a/target/alpha/cpu-param.h b/target/alpha/cpu-param.h
new file mode 100644
index 000000000..1153992e4
--- /dev/null
+++ b/target/alpha/cpu-param.h
@@ -0,0 +1,20 @@
+/*
+ * Alpha cpu parameters for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef ALPHA_CPU_PARAM_H
+#define ALPHA_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 64
+#define TARGET_PAGE_BITS 13
+
+/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
+#define TARGET_PHYS_ADDR_SPACE_BITS 44
+#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
+
+#define NB_MMU_MODES 3
+
+#endif
diff --git a/target/alpha/cpu-qom.h b/target/alpha/cpu-qom.h
new file mode 100644
index 000000000..7bb9173c5
--- /dev/null
+++ b/target/alpha/cpu-qom.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU Alpha CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_ALPHA_CPU_QOM_H
+#define QEMU_ALPHA_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_ALPHA_CPU "alpha-cpu"
+
+OBJECT_DECLARE_TYPE(AlphaCPU, AlphaCPUClass,
+ ALPHA_CPU)
+
+/**
+ * AlphaCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An Alpha CPU model.
+ */
+struct AlphaCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#endif
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
new file mode 100644
index 000000000..a8990d401
--- /dev/null
+++ b/target/alpha/cpu.c
@@ -0,0 +1,286 @@
+/*
+ * QEMU Alpha CPU
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+
+
+static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool alpha_cpu_has_work(CPUState *cs)
+{
+ /* Here we are checking to see if the CPU should wake up from HALT.
+ We will have gotten into this state only for WTINT from PALmode. */
+ /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
+ asleep even if (some) interrupts have been asserted. For now,
+ assume that if a CPU really wants to stay asleep, it will mask
+ interrupts at the chipset level, which will prevent these bits
+ from being set in the first place. */
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_TIMER
+ | CPU_INTERRUPT_SMP
+ | CPU_INTERRUPT_MCHK);
+}
+
+static void alpha_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_alpha_ev6;
+ info->print_insn = print_insn_alpha;
+}
+
+static void alpha_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ AlphaCPUClass *acc = ALPHA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ acc->parent_realize(dev, errp);
+}
+
+static void alpha_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+
+ qemu_printf(" %s\n", object_class_get_name(oc));
+}
+
+void alpha_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list_sorted(TYPE_ALPHA_CPU, false);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, alpha_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+/* Models */
+typedef struct AlphaCPUAlias {
+ const char *alias;
+ const char *typename;
+} AlphaCPUAlias;
+
+static const AlphaCPUAlias alpha_cpu_aliases[] = {
+ { "21064", ALPHA_CPU_TYPE_NAME("ev4") },
+ { "21164", ALPHA_CPU_TYPE_NAME("ev5") },
+ { "21164a", ALPHA_CPU_TYPE_NAME("ev56") },
+ { "21164pc", ALPHA_CPU_TYPE_NAME("pca56") },
+ { "21264", ALPHA_CPU_TYPE_NAME("ev6") },
+ { "21264a", ALPHA_CPU_TYPE_NAME("ev67") },
+};
+
+static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+ int i;
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL && object_class_dynamic_cast(oc, TYPE_ALPHA_CPU) != NULL &&
+ !object_class_is_abstract(oc)) {
+ return oc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(alpha_cpu_aliases); i++) {
+ if (strcmp(cpu_model, alpha_cpu_aliases[i].alias) == 0) {
+ oc = object_class_by_name(alpha_cpu_aliases[i].typename);
+ assert(oc != NULL && !object_class_is_abstract(oc));
+ return oc;
+ }
+ }
+
+ typename = g_strdup_printf(ALPHA_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && object_class_is_abstract(oc)) {
+ oc = NULL;
+ }
+
+ /* TODO: remove match everything nonsense */
+ /* Default to ev67; no reason not to emulate insns by default. */
+ if (!oc) {
+ oc = object_class_by_name(ALPHA_CPU_TYPE_NAME("ev67"));
+ }
+
+ return oc;
+}
+
+static void ev4_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_2106x;
+}
+
+static void ev5_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21164;
+}
+
+static void ev56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_BWX;
+}
+
+static void pca56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_MVI;
+}
+
+static void ev6_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21264;
+ env->amask = AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP;
+}
+
+static void ev67_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_CIX | AMASK_PREFETCH;
+}
+
+static void alpha_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+
+ env->lock_addr = -1;
+#if defined(CONFIG_USER_ONLY)
+ env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN;
+ cpu_alpha_store_fpcr(env, (uint64_t)(FPCR_INVD | FPCR_DZED | FPCR_OVFD
+ | FPCR_UNFD | FPCR_INED | FPCR_DNOD
+ | FPCR_DYN_NORMAL) << 32);
+#else
+ env->flags = ENV_FLAG_PAL_MODE | ENV_FLAG_FEN;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps alpha_sysemu_ops = {
+ .get_phys_page_debug = alpha_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps alpha_tcg_ops = {
+ .initialize = alpha_translate_init,
+
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = alpha_cpu_record_sigsegv,
+ .record_sigbus = alpha_cpu_record_sigbus,
+#else
+ .tlb_fill = alpha_cpu_tlb_fill,
+ .cpu_exec_interrupt = alpha_cpu_exec_interrupt,
+ .do_interrupt = alpha_cpu_do_interrupt,
+ .do_transaction_failed = alpha_cpu_do_transaction_failed,
+ .do_unaligned_access = alpha_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void alpha_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ AlphaCPUClass *acc = ALPHA_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, alpha_cpu_realizefn,
+ &acc->parent_realize);
+
+ cc->class_by_name = alpha_cpu_class_by_name;
+ cc->has_work = alpha_cpu_has_work;
+ cc->dump_state = alpha_cpu_dump_state;
+ cc->set_pc = alpha_cpu_set_pc;
+ cc->gdb_read_register = alpha_cpu_gdb_read_register;
+ cc->gdb_write_register = alpha_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ dc->vmsd = &vmstate_alpha_cpu;
+ cc->sysemu_ops = &alpha_sysemu_ops;
+#endif
+ cc->disas_set_info = alpha_cpu_disas_set_info;
+
+ cc->tcg_ops = &alpha_tcg_ops;
+ cc->gdb_num_core_regs = 67;
+}
+
+#define DEFINE_ALPHA_CPU_TYPE(base_type, cpu_model, initfn) \
+ { \
+ .parent = base_type, \
+ .instance_init = initfn, \
+ .name = ALPHA_CPU_TYPE_NAME(cpu_model), \
+ }
+
+static const TypeInfo alpha_cpu_type_infos[] = {
+ {
+ .name = TYPE_ALPHA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(AlphaCPU),
+ .instance_init = alpha_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(AlphaCPUClass),
+ .class_init = alpha_cpu_class_init,
+ },
+ DEFINE_ALPHA_CPU_TYPE(TYPE_ALPHA_CPU, "ev4", ev4_cpu_initfn),
+ DEFINE_ALPHA_CPU_TYPE(TYPE_ALPHA_CPU, "ev5", ev5_cpu_initfn),
+ DEFINE_ALPHA_CPU_TYPE(ALPHA_CPU_TYPE_NAME("ev5"), "ev56", ev56_cpu_initfn),
+ DEFINE_ALPHA_CPU_TYPE(ALPHA_CPU_TYPE_NAME("ev56"), "pca56",
+ pca56_cpu_initfn),
+ DEFINE_ALPHA_CPU_TYPE(TYPE_ALPHA_CPU, "ev6", ev6_cpu_initfn),
+ DEFINE_ALPHA_CPU_TYPE(ALPHA_CPU_TYPE_NAME("ev6"), "ev67", ev67_cpu_initfn),
+ DEFINE_ALPHA_CPU_TYPE(ALPHA_CPU_TYPE_NAME("ev67"), "ev68", NULL),
+};
+
+DEFINE_TYPES(alpha_cpu_type_infos)
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
new file mode 100644
index 000000000..afd975c87
--- /dev/null
+++ b/target/alpha/cpu.h
@@ -0,0 +1,512 @@
+/*
+ * Alpha emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ALPHA_CPU_H
+#define ALPHA_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+/* Alpha processors have a weak memory model */
+#define TCG_GUEST_DEFAULT_MO (0)
+
+#define ICACHE_LINE_SIZE 32
+#define DCACHE_LINE_SIZE 32
+
+/* Alpha major type */
+enum {
+ ALPHA_EV3 = 1,
+ ALPHA_EV4 = 2,
+ ALPHA_SIM = 3,
+ ALPHA_LCA = 4,
+ ALPHA_EV5 = 5, /* 21164 */
+ ALPHA_EV45 = 6, /* 21064A */
+ ALPHA_EV56 = 7, /* 21164A */
+};
+
+/* EV4 minor type */
+enum {
+ ALPHA_EV4_2 = 0,
+ ALPHA_EV4_3 = 1,
+};
+
+/* LCA minor type */
+enum {
+ ALPHA_LCA_1 = 1, /* 21066 */
+ ALPHA_LCA_2 = 2, /* 20166 */
+ ALPHA_LCA_3 = 3, /* 21068 */
+ ALPHA_LCA_4 = 4, /* 21068 */
+ ALPHA_LCA_5 = 5, /* 21066A */
+ ALPHA_LCA_6 = 6, /* 21068A */
+};
+
+/* EV5 minor type */
+enum {
+ ALPHA_EV5_1 = 1, /* Rev BA, CA */
+ ALPHA_EV5_2 = 2, /* Rev DA, EA */
+ ALPHA_EV5_3 = 3, /* Pass 3 */
+ ALPHA_EV5_4 = 4, /* Pass 3.2 */
+ ALPHA_EV5_5 = 5, /* Pass 4 */
+};
+
+/* EV45 minor type */
+enum {
+ ALPHA_EV45_1 = 1, /* Pass 1 */
+ ALPHA_EV45_2 = 2, /* Pass 1.1 */
+ ALPHA_EV45_3 = 3, /* Pass 2 */
+};
+
+/* EV56 minor type */
+enum {
+ ALPHA_EV56_1 = 1, /* Pass 1 */
+ ALPHA_EV56_2 = 2, /* Pass 2 */
+};
+
+enum {
+ IMPLVER_2106x = 0, /* EV4, EV45 & LCA45 */
+ IMPLVER_21164 = 1, /* EV5, EV56 & PCA45 */
+ IMPLVER_21264 = 2, /* EV6, EV67 & EV68x */
+ IMPLVER_21364 = 3, /* EV7 & EV79 */
+};
+
+enum {
+ AMASK_BWX = 0x00000001,
+ AMASK_FIX = 0x00000002,
+ AMASK_CIX = 0x00000004,
+ AMASK_MVI = 0x00000100,
+ AMASK_TRAP = 0x00000200,
+ AMASK_PREFETCH = 0x00001000,
+};
+
+enum {
+ VAX_ROUND_NORMAL = 0,
+ VAX_ROUND_CHOPPED,
+};
+
+enum {
+ IEEE_ROUND_NORMAL = 0,
+ IEEE_ROUND_DYNAMIC,
+ IEEE_ROUND_PLUS,
+ IEEE_ROUND_MINUS,
+ IEEE_ROUND_CHOPPED,
+};
+
+/* IEEE floating-point operations encoding */
+/* Trap mode */
+enum {
+ FP_TRAP_I = 0x0,
+ FP_TRAP_U = 0x1,
+ FP_TRAP_S = 0x4,
+ FP_TRAP_SU = 0x5,
+ FP_TRAP_SUI = 0x7,
+};
+
+/* Rounding mode */
+enum {
+ FP_ROUND_CHOPPED = 0x0,
+ FP_ROUND_MINUS = 0x1,
+ FP_ROUND_NORMAL = 0x2,
+ FP_ROUND_DYNAMIC = 0x3,
+};
+
+/* FPCR bits -- right-shifted 32 so we can use a uint32_t. */
+#define FPCR_SUM (1U << (63 - 32))
+#define FPCR_INED (1U << (62 - 32))
+#define FPCR_UNFD (1U << (61 - 32))
+#define FPCR_UNDZ (1U << (60 - 32))
+#define FPCR_DYN_SHIFT (58 - 32)
+#define FPCR_DYN_CHOPPED (0U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_MINUS (1U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_NORMAL (2U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_PLUS (3U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_MASK (3U << FPCR_DYN_SHIFT)
+#define FPCR_IOV (1U << (57 - 32))
+#define FPCR_INE (1U << (56 - 32))
+#define FPCR_UNF (1U << (55 - 32))
+#define FPCR_OVF (1U << (54 - 32))
+#define FPCR_DZE (1U << (53 - 32))
+#define FPCR_INV (1U << (52 - 32))
+#define FPCR_OVFD (1U << (51 - 32))
+#define FPCR_DZED (1U << (50 - 32))
+#define FPCR_INVD (1U << (49 - 32))
+#define FPCR_DNZ (1U << (48 - 32))
+#define FPCR_DNOD (1U << (47 - 32))
+#define FPCR_STATUS_MASK (FPCR_IOV | FPCR_INE | FPCR_UNF \
+ | FPCR_OVF | FPCR_DZE | FPCR_INV)
+
+/* The silly software trap enables implemented by the kernel emulation.
+ These are more or less architecturally required, since the real hardware
+ has read-as-zero bits in the FPCR when the features aren't implemented.
+ For the purposes of QEMU, we pretend the FPCR can hold everything. */
+#define SWCR_TRAP_ENABLE_INV (1U << 1)
+#define SWCR_TRAP_ENABLE_DZE (1U << 2)
+#define SWCR_TRAP_ENABLE_OVF (1U << 3)
+#define SWCR_TRAP_ENABLE_UNF (1U << 4)
+#define SWCR_TRAP_ENABLE_INE (1U << 5)
+#define SWCR_TRAP_ENABLE_DNO (1U << 6)
+#define SWCR_TRAP_ENABLE_MASK ((1U << 7) - (1U << 1))
+
+#define SWCR_MAP_DMZ (1U << 12)
+#define SWCR_MAP_UMZ (1U << 13)
+#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ)
+
+#define SWCR_STATUS_INV (1U << 17)
+#define SWCR_STATUS_DZE (1U << 18)
+#define SWCR_STATUS_OVF (1U << 19)
+#define SWCR_STATUS_UNF (1U << 20)
+#define SWCR_STATUS_INE (1U << 21)
+#define SWCR_STATUS_DNO (1U << 22)
+#define SWCR_STATUS_MASK ((1U << 23) - (1U << 17))
+
+#define SWCR_STATUS_TO_EXCSUM_SHIFT 16
+
+#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK)
+
+/* MMU modes definitions */
+
+/* Alpha has 5 MMU modes: PALcode, Kernel, Executive, Supervisor, and User.
+ The Unix PALcode only exposes the kernel and user modes; presumably
+ executive and supervisor are used by VMS.
+
+ PALcode itself uses physical mode for code and kernel mode for data;
+ there are PALmode instructions that can access data via physical mode
+ or via an os-installed "alternate mode", which is one of the 4 above.
+
+ That said, we're only emulating Unix PALcode, and not attempting VMS,
+ so we don't need to implement Executive and Supervisor. QEMU's own
+ PALcode cheats and usees the KSEG mapping for its code+data rather than
+ physical addresses. */
+
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_PHYS_IDX 2
+
+typedef struct CPUAlphaState CPUAlphaState;
+
+struct CPUAlphaState {
+ uint64_t ir[31];
+ float64 fir[31];
+ uint64_t pc;
+ uint64_t unique;
+ uint64_t lock_addr;
+ uint64_t lock_value;
+
+ /* The FPCR, and disassembled portions thereof. */
+ uint32_t fpcr;
+#ifdef CONFIG_USER_ONLY
+ uint32_t swcr;
+#endif
+ uint32_t fpcr_exc_enable;
+ float_status fp_status;
+ uint8_t fpcr_dyn_round;
+ uint8_t fpcr_flush_to_zero;
+
+ /* Mask of PALmode, Processor State et al. Most of this gets copied
+ into the TranslatorBlock flags and controls code generation. */
+ uint32_t flags;
+
+ /* The high 32-bits of the processor cycle counter. */
+ uint32_t pcc_ofs;
+
+ /* These pass data from the exception logic in the translator and
+ helpers to the OS entry point. This is used for both system
+ emulation and user-mode. */
+ uint64_t trap_arg0;
+ uint64_t trap_arg1;
+ uint64_t trap_arg2;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* The internal data required by our emulation of the Unix PALcode. */
+ uint64_t exc_addr;
+ uint64_t palbr;
+ uint64_t ptbr;
+ uint64_t vptptr;
+ uint64_t sysval;
+ uint64_t usp;
+ uint64_t shadow[8];
+ uint64_t scratch[24];
+#endif
+
+ /* This alarm doesn't exist in real hardware; we wish it did. */
+ uint64_t alarm_expire;
+
+ int error_code;
+
+ uint32_t features;
+ uint32_t amask;
+ int implver;
+};
+
+/**
+ * AlphaCPU:
+ * @env: #CPUAlphaState
+ *
+ * An Alpha CPU.
+ */
+struct AlphaCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUAlphaState env;
+
+ /* This alarm doesn't exist in real hardware; we wish it did. */
+ QEMUTimer *alarm_timer;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_alpha_cpu;
+
+void alpha_cpu_do_interrupt(CPUState *cpu);
+bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
+void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags);
+hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+#define cpu_list alpha_cpu_list
+
+typedef CPUAlphaState CPUArchState;
+typedef AlphaCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+enum {
+ FEATURE_ASN = 0x00000001,
+ FEATURE_SPS = 0x00000002,
+ FEATURE_VIRBND = 0x00000004,
+ FEATURE_TBCHK = 0x00000008,
+};
+
+enum {
+ EXCP_RESET,
+ EXCP_MCHK,
+ EXCP_SMP_INTERRUPT,
+ EXCP_CLK_INTERRUPT,
+ EXCP_DEV_INTERRUPT,
+ EXCP_MMFAULT,
+ EXCP_UNALIGN,
+ EXCP_OPCDEC,
+ EXCP_ARITH,
+ EXCP_FEN,
+ EXCP_CALL_PAL,
+};
+
+/* Alpha-specific interrupt pending bits. */
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0
+#define CPU_INTERRUPT_SMP CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2
+
+/* OSF/1 Page table bits. */
+enum {
+ PTE_VALID = 0x0001,
+ PTE_FOR = 0x0002, /* used for page protection (fault on read) */
+ PTE_FOW = 0x0004, /* used for page protection (fault on write) */
+ PTE_FOE = 0x0008, /* used for page protection (fault on exec) */
+ PTE_ASM = 0x0010,
+ PTE_KRE = 0x0100,
+ PTE_URE = 0x0200,
+ PTE_KWE = 0x1000,
+ PTE_UWE = 0x2000
+};
+
+/* Hardware interrupt (entInt) constants. */
+enum {
+ INT_K_IP,
+ INT_K_CLK,
+ INT_K_MCHK,
+ INT_K_DEV,
+ INT_K_PERF,
+};
+
+/* Memory management (entMM) constants. */
+enum {
+ MM_K_TNV,
+ MM_K_ACV,
+ MM_K_FOR,
+ MM_K_FOE,
+ MM_K_FOW
+};
+
+/* Arithmetic exception (entArith) constants. */
+enum {
+ EXC_M_SWC = 1, /* Software completion */
+ EXC_M_INV = 2, /* Invalid operation */
+ EXC_M_DZE = 4, /* Division by zero */
+ EXC_M_FOV = 8, /* Overflow */
+ EXC_M_UNF = 16, /* Underflow */
+ EXC_M_INE = 32, /* Inexact result */
+ EXC_M_IOV = 64 /* Integer Overflow */
+};
+
+/* Processor status constants. */
+/* Low 3 bits are interrupt mask level. */
+#define PS_INT_MASK 7u
+
+/* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes;
+ The Unix PALcode only uses bit 4. */
+#define PS_USER_MODE 8u
+
+/* CPUAlphaState->flags constants. These are layed out so that we
+ can set or reset the pieces individually by assigning to the byte,
+ or manipulated as a whole. */
+
+#define ENV_FLAG_PAL_SHIFT 0
+#define ENV_FLAG_PS_SHIFT 8
+#define ENV_FLAG_RX_SHIFT 16
+#define ENV_FLAG_FEN_SHIFT 24
+
+#define ENV_FLAG_PAL_MODE (1u << ENV_FLAG_PAL_SHIFT)
+#define ENV_FLAG_PS_USER (PS_USER_MODE << ENV_FLAG_PS_SHIFT)
+#define ENV_FLAG_RX_FLAG (1u << ENV_FLAG_RX_SHIFT)
+#define ENV_FLAG_FEN (1u << ENV_FLAG_FEN_SHIFT)
+
+#define ENV_FLAG_TB_MASK \
+ (ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN)
+
+static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
+{
+ int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
+ if (env->flags & ENV_FLAG_PAL_MODE) {
+ ret = MMU_KERNEL_IDX;
+ }
+ return ret;
+}
+
+enum {
+ IR_V0 = 0,
+ IR_T0 = 1,
+ IR_T1 = 2,
+ IR_T2 = 3,
+ IR_T3 = 4,
+ IR_T4 = 5,
+ IR_T5 = 6,
+ IR_T6 = 7,
+ IR_T7 = 8,
+ IR_S0 = 9,
+ IR_S1 = 10,
+ IR_S2 = 11,
+ IR_S3 = 12,
+ IR_S4 = 13,
+ IR_S5 = 14,
+ IR_S6 = 15,
+ IR_FP = IR_S6,
+ IR_A0 = 16,
+ IR_A1 = 17,
+ IR_A2 = 18,
+ IR_A3 = 19,
+ IR_A4 = 20,
+ IR_A5 = 21,
+ IR_T8 = 22,
+ IR_T9 = 23,
+ IR_T10 = 24,
+ IR_T11 = 25,
+ IR_RA = 26,
+ IR_T12 = 27,
+ IR_PV = IR_T12,
+ IR_AT = 28,
+ IR_GP = 29,
+ IR_SP = 30,
+ IR_ZERO = 31,
+};
+
+void alpha_translate_init(void);
+
+#define ALPHA_CPU_TYPE_SUFFIX "-" TYPE_ALPHA_CPU
+#define ALPHA_CPU_TYPE_NAME(model) model ALPHA_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
+
+void alpha_cpu_list(void);
+void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
+void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
+
+uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env);
+void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val);
+uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg);
+void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val);
+
+#ifdef CONFIG_USER_ONLY
+void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr);
+void alpha_cpu_record_sigbus(CPUState *cs, vaddr address,
+ MMUAccessType access_type, uintptr_t retaddr);
+#else
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *pflags = env->flags & ENV_FLAG_TB_MASK;
+}
+
+#ifdef CONFIG_USER_ONLY
+/* Copied from linux ieee_swcr_to_fpcr. */
+static inline uint64_t alpha_ieee_swcr_to_fpcr(uint64_t swcr)
+{
+ uint64_t fpcr = 0;
+
+ fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
+ fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
+ fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
+ | SWCR_TRAP_ENABLE_DZE
+ | SWCR_TRAP_ENABLE_OVF)) << 48;
+ fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
+ | SWCR_TRAP_ENABLE_INE)) << 57;
+ fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
+ fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
+
+ return fpcr;
+}
+
+/* Copied from linux ieee_fpcr_to_swcr. */
+static inline uint64_t alpha_ieee_fpcr_to_swcr(uint64_t fpcr)
+{
+ uint64_t swcr = 0;
+
+ swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
+ swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
+ swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
+ | SWCR_TRAP_ENABLE_DZE
+ | SWCR_TRAP_ENABLE_OVF);
+ swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE);
+ swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
+ swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
+
+ return swcr;
+}
+#endif /* CONFIG_USER_ONLY */
+
+#endif /* ALPHA_CPU_H */
diff --git a/target/alpha/fpu_helper.c b/target/alpha/fpu_helper.c
new file mode 100644
index 000000000..3ff8bb456
--- /dev/null
+++ b/target/alpha/fpu_helper.c
@@ -0,0 +1,561 @@
+/*
+ * Helpers for floating point instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define FP_STATUS (env->fp_status)
+
+
+void helper_setroundmode(CPUAlphaState *env, uint32_t val)
+{
+ set_float_rounding_mode(val, &FP_STATUS);
+}
+
+void helper_setflushzero(CPUAlphaState *env, uint32_t val)
+{
+ set_flush_to_zero(val, &FP_STATUS);
+}
+
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
+
+static uint32_t soft_to_fpcr_exc(CPUAlphaState *env)
+{
+ uint8_t exc = get_float_exception_flags(&FP_STATUS);
+ uint32_t ret = 0;
+
+ if (unlikely(exc)) {
+ set_float_exception_flags(0, &FP_STATUS);
+ ret |= CONVERT_BIT(exc, float_flag_invalid, FPCR_INV);
+ ret |= CONVERT_BIT(exc, float_flag_divbyzero, FPCR_DZE);
+ ret |= CONVERT_BIT(exc, float_flag_overflow, FPCR_OVF);
+ ret |= CONVERT_BIT(exc, float_flag_underflow, FPCR_UNF);
+ ret |= CONVERT_BIT(exc, float_flag_inexact, FPCR_INE);
+ }
+
+ return ret;
+}
+
+static void fp_exc_raise1(CPUAlphaState *env, uintptr_t retaddr,
+ uint32_t exc, uint32_t regno, uint32_t hw_exc)
+{
+ hw_exc |= CONVERT_BIT(exc, FPCR_INV, EXC_M_INV);
+ hw_exc |= CONVERT_BIT(exc, FPCR_DZE, EXC_M_DZE);
+ hw_exc |= CONVERT_BIT(exc, FPCR_OVF, EXC_M_FOV);
+ hw_exc |= CONVERT_BIT(exc, FPCR_UNF, EXC_M_UNF);
+ hw_exc |= CONVERT_BIT(exc, FPCR_INE, EXC_M_INE);
+ hw_exc |= CONVERT_BIT(exc, FPCR_IOV, EXC_M_IOV);
+
+ arith_excp(env, retaddr, hw_exc, 1ull << regno);
+}
+
+/* Raise exceptions for ieee fp insns without software completion.
+ In that case there are no exceptions that don't trap; the mask
+ doesn't apply. */
+void helper_fp_exc_raise(CPUAlphaState *env, uint32_t ignore, uint32_t regno)
+{
+ uint32_t exc = env->error_code;
+ if (exc) {
+ env->fpcr |= exc;
+ exc &= ~ignore;
+ if (exc) {
+ fp_exc_raise1(env, GETPC(), exc, regno, 0);
+ }
+ }
+}
+
+/* Raise exceptions for ieee fp insns with software completion. */
+void helper_fp_exc_raise_s(CPUAlphaState *env, uint32_t ignore, uint32_t regno)
+{
+ uint32_t exc = env->error_code & ~ignore;
+ if (exc) {
+ env->fpcr |= exc;
+ exc &= env->fpcr_exc_enable;
+ /*
+ * In system mode, the software handler gets invoked
+ * for any non-ignored exception.
+ * In user mode, the kernel's software handler only
+ * delivers a signal if the exception is enabled.
+ */
+#ifdef CONFIG_USER_ONLY
+ if (!exc) {
+ return;
+ }
+#endif
+ fp_exc_raise1(env, GETPC(), exc, regno, EXC_M_SWC);
+ }
+}
+
+/* Input handing without software completion. Trap for all
+ non-finite numbers. */
+void helper_ieee_input(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ /* Denormals without /S raise an exception. */
+ if (frac != 0) {
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+ } else if (exp == 0x7ff) {
+ /* Infinity or NaN. */
+ env->fpcr |= FPCR_INV;
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+}
+
+/* Similar, but does not trap for infinities. Used for comparisons. */
+void helper_ieee_input_cmp(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ /* Denormals without /S raise an exception. */
+ if (frac != 0) {
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+ } else if (exp == 0x7ff && frac) {
+ /* NaN. */
+ env->fpcr |= FPCR_INV;
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+}
+
+/* Input handing with software completion. Trap for denorms, unless DNZ
+ is set. If we try to support DNOD (which none of the produced hardware
+ did, AFAICS), we'll need to suppress the trap when FPCR.DNOD is set;
+ then the code downstream of that will need to cope with denorms sans
+ flush_input_to_zero. Most of it should work sanely, but there's
+ nothing to compare with. */
+void helper_ieee_input_s(CPUAlphaState *env, uint64_t val)
+{
+ if (unlikely(2 * val - 1 < 0x1fffffffffffffull)
+ && !env->fp_status.flush_inputs_to_zero) {
+ arith_excp(env, GETPC(), EXC_M_INV | EXC_M_SWC, 0);
+ }
+}
+
+/* S floating (single) */
+
+/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
+static inline uint64_t float32_to_s_int(uint32_t fi)
+{
+ uint32_t frac = fi & 0x7fffff;
+ uint32_t sign = fi >> 31;
+ uint32_t exp_msb = (fi >> 30) & 1;
+ uint32_t exp_low = (fi >> 23) & 0x7f;
+ uint32_t exp;
+
+ exp = (exp_msb << 10) | exp_low;
+ if (exp_msb) {
+ if (exp_low == 0x7f) {
+ exp = 0x7ff;
+ }
+ } else {
+ if (exp_low != 0x00) {
+ exp |= 0x380;
+ }
+ }
+
+ return (((uint64_t)sign << 63)
+ | ((uint64_t)exp << 52)
+ | ((uint64_t)frac << 29));
+}
+
+static inline uint64_t float32_to_s(float32 fa)
+{
+ CPU_FloatU a;
+ a.f = fa;
+ return float32_to_s_int(a.l);
+}
+
+static inline uint32_t s_to_float32_int(uint64_t a)
+{
+ return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
+}
+
+static inline float32 s_to_float32(uint64_t a)
+{
+ CPU_FloatU r;
+ r.l = s_to_float32_int(a);
+ return r.f;
+}
+
+uint32_t helper_s_to_memory(uint64_t a)
+{
+ return s_to_float32_int(a);
+}
+
+uint64_t helper_memory_to_s(uint32_t a)
+{
+ return float32_to_s_int(a);
+}
+
+uint64_t helper_adds(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_add(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_subs(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_muls(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_divs(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_div(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_sqrts(CPUAlphaState *env, uint64_t a)
+{
+ float32 fa, fr;
+
+ fa = s_to_float32(a);
+ fr = float32_sqrt(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+
+/* T floating (double) */
+static inline float64 t_to_float64(uint64_t a)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.ll = a;
+ return r.d;
+}
+
+static inline uint64_t float64_to_t(float64 fa)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.d = fa;
+ return r.ll;
+}
+
+uint64_t helper_addt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_add(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_subt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_mult(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_divt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_div(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_sqrtt(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa, fr;
+
+ fa = t_to_float64(a);
+ fr = float64_sqrt(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+/* Comparisons */
+uint64_t helper_cmptun(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_cmpteq(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_cmptle(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_le(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_cmptlt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_lt(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+/* Floating point format conversion */
+uint64_t helper_cvtts(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa;
+ float32 fr;
+
+ fa = t_to_float64(a);
+ fr = float64_to_float32(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_cvtst(CPUAlphaState *env, uint64_t a)
+{
+ float32 fa;
+ float64 fr;
+
+ fa = s_to_float32(a);
+ fr = float32_to_float64(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_cvtqs(CPUAlphaState *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+/* Implement float64 to uint64_t conversion without saturation -- we must
+ supply the truncated result. This behaviour is used by the compiler
+ to get unsigned conversion for free with the same instruction. */
+
+static uint64_t do_cvttq(CPUAlphaState *env, uint64_t a, int roundmode)
+{
+ uint64_t frac, ret = 0;
+ uint32_t exp, sign, exc = 0;
+ int shift;
+
+ sign = (a >> 63);
+ exp = (uint32_t)(a >> 52) & 0x7ff;
+ frac = a & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ if (unlikely(frac != 0) && !env->fp_status.flush_inputs_to_zero) {
+ goto do_underflow;
+ }
+ } else if (exp == 0x7ff) {
+ exc = FPCR_INV;
+ } else {
+ /* Restore implicit bit. */
+ frac |= 0x10000000000000ull;
+
+ shift = exp - 1023 - 52;
+ if (shift >= 0) {
+ /* In this case the number is so large that we must shift
+ the fraction left. There is no rounding to do. */
+ if (shift < 64) {
+ ret = frac << shift;
+ }
+ /* Check for overflow. Note the special case of -0x1p63. */
+ if (shift >= 11 && a != 0xC3E0000000000000ull) {
+ exc = FPCR_IOV | FPCR_INE;
+ }
+ } else {
+ uint64_t round;
+
+ /* In this case the number is smaller than the fraction as
+ represented by the 52 bit number. Here we must think
+ about rounding the result. Handle this by shifting the
+ fractional part of the number into the high bits of ROUND.
+ This will let us efficiently handle round-to-nearest. */
+ shift = -shift;
+ if (shift < 63) {
+ ret = frac >> shift;
+ round = frac << (64 - shift);
+ } else {
+ /* The exponent is so small we shift out everything.
+ Leave a sticky bit for proper rounding below. */
+ do_underflow:
+ round = 1;
+ }
+
+ if (round) {
+ exc = FPCR_INE;
+ switch (roundmode) {
+ case float_round_nearest_even:
+ if (round == (1ull << 63)) {
+ /* Fraction is exactly 0.5; round to even. */
+ ret += (ret & 1);
+ } else if (round > (1ull << 63)) {
+ ret += 1;
+ }
+ break;
+ case float_round_to_zero:
+ break;
+ case float_round_up:
+ ret += 1 - sign;
+ break;
+ case float_round_down:
+ ret += sign;
+ break;
+ }
+ }
+ }
+ if (sign) {
+ ret = -ret;
+ }
+ }
+ env->error_code = exc;
+
+ return ret;
+}
+
+uint64_t helper_cvttq(CPUAlphaState *env, uint64_t a)
+{
+ return do_cvttq(env, a, FP_STATUS.float_rounding_mode);
+}
+
+uint64_t helper_cvttq_c(CPUAlphaState *env, uint64_t a)
+{
+ return do_cvttq(env, a, float_round_to_zero);
+}
+
+uint64_t helper_cvtqt(CPUAlphaState *env, uint64_t a)
+{
+ float64 fr = int64_to_float64(a, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_cvtql(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exc = 0;
+ if (val != (int32_t)val) {
+ exc = FPCR_IOV | FPCR_INE;
+ }
+ env->error_code = exc;
+
+ return ((val & 0xc0000000) << 32) | ((val & 0x3fffffff) << 29);
+}
diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c
new file mode 100644
index 000000000..7db14f443
--- /dev/null
+++ b/target/alpha/gdbstub.c
@@ -0,0 +1,93 @@
+/*
+ * Alpha gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ uint64_t val;
+ CPU_DoubleU d;
+
+ switch (n) {
+ case 0 ... 30:
+ val = cpu_alpha_load_gr(env, n);
+ break;
+ case 32 ... 62:
+ d.d = env->fir[n - 32];
+ val = d.ll;
+ break;
+ case 63:
+ val = cpu_alpha_load_fpcr(env);
+ break;
+ case 64:
+ val = env->pc;
+ break;
+ case 66:
+ val = env->unique;
+ break;
+ case 31:
+ case 65:
+ /* 31 really is the zero register; 65 is unassigned in the
+ gdb protocol, but is still required to occupy 8 bytes. */
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+ return gdb_get_regl(mem_buf, val);
+}
+
+int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ target_ulong tmp = ldtul_p(mem_buf);
+ CPU_DoubleU d;
+
+ switch (n) {
+ case 0 ... 30:
+ cpu_alpha_store_gr(env, n, tmp);
+ break;
+ case 32 ... 62:
+ d.ll = tmp;
+ env->fir[n - 32] = d.d;
+ break;
+ case 63:
+ cpu_alpha_store_fpcr(env, tmp);
+ break;
+ case 64:
+ env->pc = tmp;
+ break;
+ case 66:
+ env->unique = tmp;
+ break;
+ case 31:
+ case 65:
+ /* 31 really is the zero register; 65 is unassigned in the
+ gdb protocol, but is still required to occupy 8 bytes. */
+ break;
+ default:
+ return 0;
+ }
+ return 8;
+}
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
new file mode 100644
index 000000000..b7e7f73b1
--- /dev/null
+++ b/target/alpha/helper.c
@@ -0,0 +1,548 @@
+/*
+ * Alpha emulation cpu helpers for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat-types.h"
+#include "exec/helper-proto.h"
+#include "qemu/qemu-print.h"
+
+
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
+
+uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
+{
+ return (uint64_t)env->fpcr << 32;
+}
+
+void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
+{
+ static const uint8_t rm_map[] = {
+ [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
+ [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
+ [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
+ [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
+ };
+
+ uint32_t fpcr = val >> 32;
+ uint32_t t = 0;
+
+ /* Record the raw value before adjusting for linux-user. */
+ env->fpcr = fpcr;
+
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Override some of these bits with the contents of ENV->SWCR.
+ * In system mode, some of these would trap to the kernel, at
+ * which point the kernel's handler would emulate and apply
+ * the software exception mask.
+ */
+ uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
+ fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
+
+ /*
+ * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
+ * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
+ * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
+ */
+ t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
+#endif
+
+ t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
+ t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
+ t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
+ t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
+ t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
+
+ env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
+
+ env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
+ env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
+
+ t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
+#ifdef CONFIG_USER_ONLY
+ t |= (env->swcr & SWCR_MAP_UMZ) != 0;
+#endif
+ env->fpcr_flush_to_zero = t;
+}
+
+uint64_t helper_load_fpcr(CPUAlphaState *env)
+{
+ return cpu_alpha_load_fpcr(env);
+}
+
+void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
+{
+ cpu_alpha_store_fpcr(env, val);
+}
+
+static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
+{
+#ifndef CONFIG_USER_ONLY
+ if (env->flags & ENV_FLAG_PAL_MODE) {
+ if (reg >= 8 && reg <= 14) {
+ return &env->shadow[reg - 8];
+ } else if (reg == 25) {
+ return &env->shadow[7];
+ }
+ }
+#endif
+ return &env->ir[reg];
+}
+
+uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
+{
+ return *cpu_alpha_addr_gr(env, reg);
+}
+
+void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
+{
+ *cpu_alpha_addr_gr(env, reg) = val;
+}
+
+#if defined(CONFIG_USER_ONLY)
+void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ target_ulong mmcsr, cause;
+
+ /* Assuming !maperr, infer the missing protection. */
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ mmcsr = MM_K_FOR;
+ cause = 0;
+ break;
+ case MMU_DATA_STORE:
+ mmcsr = MM_K_FOW;
+ cause = 1;
+ break;
+ case MMU_INST_FETCH:
+ mmcsr = MM_K_FOE;
+ cause = -1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (maperr) {
+ if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
+ /* Userspace address, therefore page not mapped. */
+ mmcsr = MM_K_TNV;
+ } else {
+ /* Kernel or invalid address. */
+ mmcsr = MM_K_ACV;
+ }
+ }
+
+ /* Record the arguments that PALcode would give to the kernel. */
+ cpu->env.trap_arg0 = address;
+ cpu->env.trap_arg1 = mmcsr;
+ cpu->env.trap_arg2 = cause;
+}
+#else
+/* Returns the OSF/1 entMM failure indication, or -1 on success. */
+static int get_physical_address(CPUAlphaState *env, target_ulong addr,
+ int prot_need, int mmu_idx,
+ target_ulong *pphys, int *pprot)
+{
+ CPUState *cs = env_cpu(env);
+ target_long saddr = addr;
+ target_ulong phys = 0;
+ target_ulong L1pte, L2pte, L3pte;
+ target_ulong pt, index;
+ int prot = 0;
+ int ret = MM_K_ACV;
+
+ /* Handle physical accesses. */
+ if (mmu_idx == MMU_PHYS_IDX) {
+ phys = addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ }
+
+ /* Ensure that the virtual address is properly sign-extended from
+ the last implemented virtual address bit. */
+ if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
+ goto exit;
+ }
+
+ /* Translate the superpage. */
+ /* ??? When we do more than emulate Unix PALcode, we'll need to
+ determine which KSEG is actually active. */
+ if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
+ /* User-space cannot access KSEG addresses. */
+ if (mmu_idx != MMU_KERNEL_IDX) {
+ goto exit;
+ }
+
+ /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
+ We would not do this if the 48-bit KSEG is enabled. */
+ phys = saddr & ((1ull << 40) - 1);
+ phys |= (saddr & (1ull << 40)) << 3;
+
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ }
+
+ /* Interpret the page table exactly like PALcode does. */
+
+ pt = env->ptbr;
+
+ /* TODO: rather than using ldq_phys() to read the page table we should
+ * use address_space_ldq() so that we can handle the case when
+ * the page table read gives a bus fault, rather than ignoring it.
+ * For the existing code the zero data that ldq_phys will return for
+ * an access to invalid memory will result in our treating the page
+ * table as invalid, which may even be the right behaviour.
+ */
+
+ /* L1 page table read. */
+ index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
+ L1pte = ldq_phys(cs->as, pt + index*8);
+
+ if (unlikely((L1pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (unlikely((L1pte & PTE_KRE) == 0)) {
+ goto exit;
+ }
+ pt = L1pte >> 32 << TARGET_PAGE_BITS;
+
+ /* L2 page table read. */
+ index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
+ L2pte = ldq_phys(cs->as, pt + index*8);
+
+ if (unlikely((L2pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (unlikely((L2pte & PTE_KRE) == 0)) {
+ goto exit;
+ }
+ pt = L2pte >> 32 << TARGET_PAGE_BITS;
+
+ /* L3 page table read. */
+ index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
+ L3pte = ldq_phys(cs->as, pt + index*8);
+
+ phys = L3pte >> 32 << TARGET_PAGE_BITS;
+ if (unlikely((L3pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+
+#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
+# error page bits out of date
+#endif
+
+ /* Check access violations. */
+ if (L3pte & (PTE_KRE << mmu_idx)) {
+ prot |= PAGE_READ | PAGE_EXEC;
+ }
+ if (L3pte & (PTE_KWE << mmu_idx)) {
+ prot |= PAGE_WRITE;
+ }
+ if (unlikely((prot & prot_need) == 0 && prot_need)) {
+ goto exit;
+ }
+
+ /* Check fault-on-operation violations. */
+ prot &= ~(L3pte >> 1);
+ ret = -1;
+ if (unlikely((prot & prot_need) == 0)) {
+ ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
+ prot_need & PAGE_WRITE ? MM_K_FOW :
+ prot_need & PAGE_READ ? MM_K_FOR : -1);
+ }
+
+ exit:
+ *pphys = phys;
+ *pprot = prot;
+ return ret;
+}
+
+hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ target_ulong phys;
+ int prot, fail;
+
+ fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
+ return (fail >= 0 ? -1 : phys);
+}
+
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ target_ulong phys;
+ int prot, fail;
+
+ fail = get_physical_address(env, addr, 1 << access_type,
+ mmu_idx, &phys, &prot);
+ if (unlikely(fail >= 0)) {
+ if (probe) {
+ return false;
+ }
+ cs->exception_index = EXCP_MMFAULT;
+ env->trap_arg0 = addr;
+ env->trap_arg1 = fail;
+ env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
+ access_type == MMU_DATA_STORE ? 1ull :
+ /* access_type == MMU_INST_FETCH */ -1ull);
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+}
+
+void alpha_cpu_do_interrupt(CPUState *cs)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ int i = cs->exception_index;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name = "<unknown>";
+
+ switch (i) {
+ case EXCP_RESET:
+ name = "reset";
+ break;
+ case EXCP_MCHK:
+ name = "mchk";
+ break;
+ case EXCP_SMP_INTERRUPT:
+ name = "smp_interrupt";
+ break;
+ case EXCP_CLK_INTERRUPT:
+ name = "clk_interrupt";
+ break;
+ case EXCP_DEV_INTERRUPT:
+ name = "dev_interrupt";
+ break;
+ case EXCP_MMFAULT:
+ name = "mmfault";
+ break;
+ case EXCP_UNALIGN:
+ name = "unalign";
+ break;
+ case EXCP_OPCDEC:
+ name = "opcdec";
+ break;
+ case EXCP_ARITH:
+ name = "arith";
+ break;
+ case EXCP_FEN:
+ name = "fen";
+ break;
+ case EXCP_CALL_PAL:
+ name = "call_pal";
+ break;
+ }
+ qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
+ PRIx64 " sp=%016" PRIx64 "\n",
+ ++count, name, env->error_code, cs->cpu_index,
+ env->pc, env->ir[IR_SP]);
+ }
+
+ cs->exception_index = -1;
+
+ switch (i) {
+ case EXCP_RESET:
+ i = 0x0000;
+ break;
+ case EXCP_MCHK:
+ i = 0x0080;
+ break;
+ case EXCP_SMP_INTERRUPT:
+ i = 0x0100;
+ break;
+ case EXCP_CLK_INTERRUPT:
+ i = 0x0180;
+ break;
+ case EXCP_DEV_INTERRUPT:
+ i = 0x0200;
+ break;
+ case EXCP_MMFAULT:
+ i = 0x0280;
+ break;
+ case EXCP_UNALIGN:
+ i = 0x0300;
+ break;
+ case EXCP_OPCDEC:
+ i = 0x0380;
+ break;
+ case EXCP_ARITH:
+ i = 0x0400;
+ break;
+ case EXCP_FEN:
+ i = 0x0480;
+ break;
+ case EXCP_CALL_PAL:
+ i = env->error_code;
+ /* There are 64 entry points for both privileged and unprivileged,
+ with bit 0x80 indicating unprivileged. Each entry point gets
+ 64 bytes to do its job. */
+ if (i & 0x80) {
+ i = 0x2000 + (i - 0x80) * 64;
+ } else {
+ i = 0x1000 + i * 64;
+ }
+ break;
+ default:
+ cpu_abort(cs, "Unhandled CPU exception");
+ }
+
+ /* Remember where the exception happened. Emulate real hardware in
+ that the low bit of the PC indicates PALmode. */
+ env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
+
+ /* Continue execution at the PALcode entry point. */
+ env->pc = env->palbr + i;
+
+ /* Switch to PALmode. */
+ env->flags |= ENV_FLAG_PAL_MODE;
+}
+
+bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ int idx = -1;
+
+ /* We never take interrupts while in PALmode. */
+ if (env->flags & ENV_FLAG_PAL_MODE) {
+ return false;
+ }
+
+ /* Fall through the switch, collecting the highest priority
+ interrupt that isn't masked by the processor status IPL. */
+ /* ??? This hard-codes the OSF/1 interrupt levels. */
+ switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
+ case 0 ... 3:
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ idx = EXCP_DEV_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 4:
+ if (interrupt_request & CPU_INTERRUPT_TIMER) {
+ idx = EXCP_CLK_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 5:
+ if (interrupt_request & CPU_INTERRUPT_SMP) {
+ idx = EXCP_SMP_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 6:
+ if (interrupt_request & CPU_INTERRUPT_MCHK) {
+ idx = EXCP_MCHK;
+ }
+ }
+ if (idx >= 0) {
+ cs->exception_index = idx;
+ env->error_code = 0;
+ alpha_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ static const char linux_reg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "t12", "at", "gp", "sp"
+ };
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "PC " TARGET_FMT_lx " PS %02x\n",
+ env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
+ for (i = 0; i < 31; i++) {
+ qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
+ linux_reg_names[i], cpu_alpha_load_gr(env, i),
+ (i % 3) == 2 ? '\n' : ' ');
+ }
+
+ qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
+ env->lock_addr, env->lock_value);
+
+ if (flags & CPU_DUMP_FPU) {
+ for (i = 0; i < 31; i++) {
+ qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
+ (i % 3) == 2 ? '\n' : ' ');
+ }
+ qemu_fprintf(f, "fpcr %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
+ }
+ qemu_fprintf(f, "\n");
+}
+
+/* This should only be called from translate, via gen_excp.
+ We expect that ENV->PC has already been updated. */
+void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = excp;
+ env->error_code = error;
+ cpu_loop_exit(cs);
+}
+
+/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
+void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
+ int excp, int error)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = excp;
+ env->error_code = error;
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr, true);
+ /* Floating-point exceptions (our only users) point to the next PC. */
+ env->pc += 4;
+ }
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
+ int exc, uint64_t mask)
+{
+ env->trap_arg0 = exc;
+ env->trap_arg1 = mask;
+ dynamic_excp(env, retaddr, EXCP_ARITH, 0);
+}
diff --git a/target/alpha/helper.h b/target/alpha/helper.h
new file mode 100644
index 000000000..d60f20870
--- /dev/null
+++ b/target/alpha/helper.h
@@ -0,0 +1,100 @@
+DEF_HELPER_3(excp, noreturn, env, int, int)
+DEF_HELPER_FLAGS_1(load_pcc, TCG_CALL_NO_RWG_SE, i64, env)
+
+DEF_HELPER_FLAGS_3(check_overflow, TCG_CALL_NO_WG, void, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_1(cmpbe0, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(cmpbge, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(minub8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(minsb8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(minuw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(minsw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxub8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxsb8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxuw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxsw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(perr, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(pklb, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(pkwb, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(unpkbl, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(unpkbw, TCG_CALL_NO_RWG_SE, i64, i64)
+
+DEF_HELPER_FLAGS_1(load_fpcr, TCG_CALL_NO_RWG_SE, i64, env)
+DEF_HELPER_FLAGS_2(store_fpcr, TCG_CALL_NO_RWG, void, env, i64)
+
+DEF_HELPER_FLAGS_1(f_to_memory, TCG_CALL_NO_RWG_SE, i32, i64)
+DEF_HELPER_FLAGS_1(memory_to_f, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_3(addf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mulf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtf, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_1(g_to_memory, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(memory_to_g, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_3(addg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mulg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtg, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_NO_RWG_SE, i32, i64)
+DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_3(adds, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subs, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(muls, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divs, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrts, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(addt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mult, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtt, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(cmptun, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpteq, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmptle, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmptlt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpgeq, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpgle, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpglt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(cvtts, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtst, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqs, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqt, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqf, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtgf, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtgq, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqg, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(cvttq, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvttq_c, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(cvtql, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(setroundmode, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_2(setflushzero, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_3(fp_exc_raise, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(fp_exc_raise_s, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(ieee_input, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64)
+
+#if !defined (CONFIG_USER_ONLY)
+DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_1(halt, void, i64)
+
+DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64)
+DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64)
+DEF_HELPER_FLAGS_2(set_alarm, TCG_CALL_NO_RWG, void, env, i64)
+#endif
diff --git a/target/alpha/int_helper.c b/target/alpha/int_helper.c
new file mode 100644
index 000000000..5672696f6
--- /dev/null
+++ b/target/alpha/int_helper.c
@@ -0,0 +1,265 @@
+/*
+ * Helpers for integer and multimedia instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+
+
+uint64_t helper_zapnot(uint64_t val, uint64_t mskb)
+{
+ uint64_t mask;
+
+ mask = -(mskb & 0x01) & 0x00000000000000ffull;
+ mask |= -(mskb & 0x02) & 0x000000000000ff00ull;
+ mask |= -(mskb & 0x04) & 0x0000000000ff0000ull;
+ mask |= -(mskb & 0x08) & 0x00000000ff000000ull;
+ mask |= -(mskb & 0x10) & 0x000000ff00000000ull;
+ mask |= -(mskb & 0x20) & 0x0000ff0000000000ull;
+ mask |= -(mskb & 0x40) & 0x00ff000000000000ull;
+ mask |= -(mskb & 0x80) & 0xff00000000000000ull;
+
+ return val & mask;
+}
+
+uint64_t helper_zap(uint64_t val, uint64_t mask)
+{
+ return helper_zapnot(val, ~mask);
+}
+
+uint64_t helper_cmpbe0(uint64_t a)
+{
+ uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
+ uint64_t c = ~(((a & m) + m) | a | m);
+ /* a.......b.......c.......d.......e.......f.......g.......h....... */
+ c |= c << 7;
+ /* ab......bc......cd......de......ef......fg......gh......h....... */
+ c |= c << 14;
+ /* abcd....bcde....cdef....defg....efgh....fgh.....gh......h....... */
+ c |= c << 28;
+ /* abcdefghbcdefgh.cdefgh..defgh...efgh....fgh.....gh......h....... */
+ return c >> 56;
+}
+
+uint64_t helper_cmpbge(uint64_t a, uint64_t b)
+{
+ uint64_t mask = 0x00ff00ff00ff00ffULL;
+ uint64_t test = 0x0100010001000100ULL;
+ uint64_t al, ah, bl, bh, cl, ch;
+
+ /* Separate the bytes to avoid false positives. */
+ al = a & mask;
+ bl = b & mask;
+ ah = (a >> 8) & mask;
+ bh = (b >> 8) & mask;
+
+ /* "Compare". If a byte in B is greater than a byte in A,
+ it will clear the test bit. */
+ cl = ((al | test) - bl) & test;
+ ch = ((ah | test) - bh) & test;
+
+ /* Fold all of the test bits into a contiguous set. */
+ /* ch=.......a...............c...............e...............g........ */
+ /* cl=.......b...............d...............f...............h........ */
+ cl += ch << 1;
+ /* cl=......ab..............cd..............ef..............gh........ */
+ cl |= cl << 14;
+ /* cl=......abcd............cdef............efgh............gh........ */
+ cl |= cl << 28;
+ /* cl=......abcdefgh........cdefgh..........efgh............gh........ */
+ return cl >> 50;
+}
+
+uint64_t helper_minub8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint8_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_minsb8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int8_t opa, opb;
+ uint8_t opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_minuw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint16_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_minsw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int16_t opa, opb;
+ uint16_t opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_maxub8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint8_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_maxsb8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int8_t opa, opb;
+ uint8_t opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_maxuw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint16_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_maxsw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int16_t opa, opb;
+ uint16_t opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_perr(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint8_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ if (opa >= opb) {
+ opr = opa - opb;
+ } else {
+ opr = opb - opa;
+ }
+ res += opr;
+ }
+ return res;
+}
+
+uint64_t helper_pklb(uint64_t op1)
+{
+ return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
+}
+
+uint64_t helper_pkwb(uint64_t op1)
+{
+ return ((op1 & 0xff)
+ | ((op1 >> 8) & 0xff00)
+ | ((op1 >> 16) & 0xff0000)
+ | ((op1 >> 24) & 0xff000000));
+}
+
+uint64_t helper_unpkbl(uint64_t op1)
+{
+ return (op1 & 0xff) | ((op1 & 0xff00) << 24);
+}
+
+uint64_t helper_unpkbw(uint64_t op1)
+{
+ return ((op1 & 0xff)
+ | ((op1 & 0xff00) << 8)
+ | ((op1 & 0xff0000) << 16)
+ | ((op1 & 0xff000000) << 24));
+}
+
+void helper_check_overflow(CPUAlphaState *env, uint64_t op1, uint64_t op2)
+{
+ if (unlikely(op1 != op2)) {
+ arith_excp(env, GETPC(), EXC_M_IOV, 0);
+ }
+}
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
new file mode 100644
index 000000000..2b7c8148f
--- /dev/null
+++ b/target/alpha/machine.c
@@ -0,0 +1,87 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+static int get_fpcr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ CPUAlphaState *env = opaque;
+ cpu_alpha_store_fpcr(env, qemu_get_be64(f));
+ return 0;
+}
+
+static int put_fpcr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ CPUAlphaState *env = opaque;
+ qemu_put_be64(f, cpu_alpha_load_fpcr(env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_fpcr = {
+ .name = "fpcr",
+ .get = get_fpcr,
+ .put = put_fpcr,
+};
+
+static VMStateField vmstate_env_fields[] = {
+ VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31),
+ VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31),
+ /* Save the architecture value of the fpcr, not the internally
+ expanded version. Since this architecture value does not
+ exist in memory to be stored, this requires a but of hoop
+ jumping. We want OFFSET=0 so that we effectively pass ENV
+ to the helper functions, and we need to fill in the name by
+ hand since there's no field of that name. */
+ {
+ .name = "fpcr",
+ .version_id = 0,
+ .size = sizeof(uint64_t),
+ .info = &vmstate_fpcr,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+ VMSTATE_UINTTL(pc, CPUAlphaState),
+ VMSTATE_UINTTL(unique, CPUAlphaState),
+ VMSTATE_UINTTL(lock_addr, CPUAlphaState),
+ VMSTATE_UINTTL(lock_value, CPUAlphaState),
+
+ VMSTATE_UINT32(flags, CPUAlphaState),
+ VMSTATE_UINT32(pcc_ofs, CPUAlphaState),
+
+ VMSTATE_UINTTL(trap_arg0, CPUAlphaState),
+ VMSTATE_UINTTL(trap_arg1, CPUAlphaState),
+ VMSTATE_UINTTL(trap_arg2, CPUAlphaState),
+
+ VMSTATE_UINTTL(exc_addr, CPUAlphaState),
+ VMSTATE_UINTTL(palbr, CPUAlphaState),
+ VMSTATE_UINTTL(ptbr, CPUAlphaState),
+ VMSTATE_UINTTL(vptptr, CPUAlphaState),
+ VMSTATE_UINTTL(sysval, CPUAlphaState),
+ VMSTATE_UINTTL(usp, CPUAlphaState),
+
+ VMSTATE_UINTTL_ARRAY(shadow, CPUAlphaState, 8),
+ VMSTATE_UINTTL_ARRAY(scratch, CPUAlphaState, 24),
+
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .fields = vmstate_env_fields,
+};
+
+static VMStateField vmstate_cpu_fields[] = {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_alpha_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_cpu_fields,
+};
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
new file mode 100644
index 000000000..47283a061
--- /dev/null
+++ b/target/alpha/mem_helper.c
@@ -0,0 +1,79 @@
+/*
+ * Helpers for loads and stores
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr)
+{
+ uint64_t pc;
+ uint32_t insn;
+
+ cpu_restore_state(env_cpu(env), retaddr, true);
+
+ pc = env->pc;
+ insn = cpu_ldl_code(env, pc);
+
+ env->trap_arg0 = addr;
+ env->trap_arg1 = insn >> 26; /* opcode */
+ env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
+}
+
+#ifdef CONFIG_USER_ONLY
+void alpha_cpu_record_sigbus(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+
+ do_unaligned_access(env, addr, retaddr);
+}
+#else
+void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+
+ do_unaligned_access(env, addr, retaddr);
+ cs->exception_index = EXCP_UNALIGN;
+ env->error_code = 0;
+ cpu_loop_exit(cs);
+}
+
+void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+
+ env->trap_arg0 = addr;
+ env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0;
+ cs->exception_index = EXCP_MCHK;
+ env->error_code = 0;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/alpha/meson.build b/target/alpha/meson.build
new file mode 100644
index 000000000..1aec55abb
--- /dev/null
+++ b/target/alpha/meson.build
@@ -0,0 +1,18 @@
+alpha_ss = ss.source_set()
+alpha_ss.add(files(
+ 'cpu.c',
+ 'fpu_helper.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'int_helper.c',
+ 'mem_helper.c',
+ 'sys_helper.c',
+ 'translate.c',
+ 'vax_helper.c',
+))
+
+alpha_softmmu_ss = ss.source_set()
+alpha_softmmu_ss.add(files('machine.c'))
+
+target_arch += {'alpha': alpha_ss}
+target_softmmu_arch += {'alpha': alpha_softmmu_ss}
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
new file mode 100644
index 000000000..25f6cb889
--- /dev/null
+++ b/target/alpha/sys_helper.c
@@ -0,0 +1,92 @@
+/*
+ * Helpers for system instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "sysemu/runstate.h"
+#include "sysemu/sysemu.h"
+#include "qemu/timer.h"
+
+
+uint64_t helper_load_pcc(CPUAlphaState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ /* In system mode we have access to a decent high-resolution clock.
+ In order to make OS-level time accounting work with the RPCC,
+ present it with a well-timed clock fixed at 250MHz. */
+ return (((uint64_t)env->pcc_ofs << 32)
+ | (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
+#else
+ /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
+ clock ticks. Also, don't bother taking PCC_OFS into account. */
+ return (uint32_t)cpu_get_host_ticks();
+#endif
+}
+
+/* PALcode support special instructions */
+#ifndef CONFIG_USER_ONLY
+void helper_tbia(CPUAlphaState *env)
+{
+ tlb_flush(env_cpu(env));
+}
+
+void helper_tbis(CPUAlphaState *env, uint64_t p)
+{
+ tlb_flush_page(env_cpu(env), p);
+}
+
+void helper_tb_flush(CPUAlphaState *env)
+{
+ tb_flush(env_cpu(env));
+}
+
+void helper_halt(uint64_t restart)
+{
+ if (restart) {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ } else {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ }
+}
+
+uint64_t helper_get_vmtime(void)
+{
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+}
+
+uint64_t helper_get_walltime(void)
+{
+ return qemu_clock_get_ns(rtc_clock);
+}
+
+void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
+{
+ AlphaCPU *cpu = env_archcpu(env);
+
+ if (expire) {
+ env->alarm_expire = expire;
+ timer_mod(cpu->alarm_timer, expire);
+ } else {
+ timer_del(cpu->alarm_timer);
+ }
+}
+
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
new file mode 100644
index 000000000..a4c3f43e7
--- /dev/null
+++ b/target/alpha/translate.c
@@ -0,0 +1,3043 @@
+/*
+ * Alpha emulation cpu translation for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/cpu-timers.h"
+#include "disas/disas.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+
+
+#undef ALPHA_DEBUG_DISAS
+#define CONFIG_SOFTFLOAT_INLINE
+
+#ifdef ALPHA_DEBUG_DISAS
+# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+
+typedef struct DisasContext DisasContext;
+struct DisasContext {
+ DisasContextBase base;
+
+#ifndef CONFIG_USER_ONLY
+ uint64_t palbr;
+#endif
+ uint32_t tbflags;
+ int mem_idx;
+
+ /* implver and amask values for this CPU. */
+ int implver;
+ int amask;
+
+ /* Current rounding mode for this TB. */
+ int tb_rm;
+ /* Current flush-to-zero setting for this TB. */
+ int tb_ftz;
+
+ /* The set of registers active in the current context. */
+ TCGv *ir;
+
+ /* Temporaries for $31 and $f31 as source and destination. */
+ TCGv zero;
+ TCGv sink;
+};
+
+/* Target-specific return values from translate_one, indicating the
+ state of the TB. Note that DISAS_NEXT indicates that we are not
+ exiting the TB. */
+#define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
+#define DISAS_PC_UPDATED DISAS_TARGET_1
+#define DISAS_PC_STALE DISAS_TARGET_2
+
+/* global register indexes */
+static TCGv cpu_std_ir[31];
+static TCGv cpu_fir[31];
+static TCGv cpu_pc;
+static TCGv cpu_lock_addr;
+static TCGv cpu_lock_value;
+
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_pal_ir[31];
+#endif
+
+#include "exec/gen-icount.h"
+
+void alpha_translate_init(void)
+{
+#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
+
+ typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+ static const GlobalVar vars[] = {
+ DEF_VAR(pc),
+ DEF_VAR(lock_addr),
+ DEF_VAR(lock_value),
+ };
+
+#undef DEF_VAR
+
+ /* Use the symbolic register names that match the disassembler. */
+ static const char greg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "t12", "at", "gp", "sp"
+ };
+ static const char freg_names[31][4] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30"
+ };
+#ifndef CONFIG_USER_ONLY
+ static const char shadow_names[8][8] = {
+ "pal_t7", "pal_s0", "pal_s1", "pal_s2",
+ "pal_s3", "pal_s4", "pal_s5", "pal_t11"
+ };
+#endif
+
+ int i;
+
+ for (i = 0; i < 31; i++) {
+ cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState, ir[i]),
+ greg_names[i]);
+ }
+
+ for (i = 0; i < 31; i++) {
+ cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState, fir[i]),
+ freg_names[i]);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
+ for (i = 0; i < 8; i++) {
+ int r = (i == 7 ? 25 : i + 8);
+ cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState,
+ shadow[i]),
+ shadow_names[i]);
+ }
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+ const GlobalVar *v = &vars[i];
+ *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
+ }
+}
+
+static TCGv load_zero(DisasContext *ctx)
+{
+ if (!ctx->zero) {
+ ctx->zero = tcg_constant_i64(0);
+ }
+ return ctx->zero;
+}
+
+static TCGv dest_sink(DisasContext *ctx)
+{
+ if (!ctx->sink) {
+ ctx->sink = tcg_temp_new();
+ }
+ return ctx->sink;
+}
+
+static void free_context_temps(DisasContext *ctx)
+{
+ if (ctx->sink) {
+ tcg_gen_discard_i64(ctx->sink);
+ tcg_temp_free(ctx->sink);
+ ctx->sink = NULL;
+ }
+}
+
+static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
+ uint8_t lit, bool islit)
+{
+ if (islit) {
+ return tcg_constant_i64(lit);
+ } else if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return dest_sink(ctx);
+ }
+}
+
+static TCGv load_fpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_fir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_fir[reg];
+ } else {
+ return dest_sink(ctx);
+ }
+}
+
+static int get_flag_ofs(unsigned shift)
+{
+ int ofs = offsetof(CPUAlphaState, flags);
+#ifdef HOST_WORDS_BIGENDIAN
+ ofs += 3 - (shift / 8);
+#else
+ ofs += shift / 8;
+#endif
+ return ofs;
+}
+
+static void ld_flag_byte(TCGv val, unsigned shift)
+{
+ tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
+}
+
+static void st_flag_byte(TCGv val, unsigned shift)
+{
+ tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
+}
+
+static void gen_excp_1(int exception, int error_code)
+{
+ TCGv_i32 tmp1, tmp2;
+
+ tmp1 = tcg_constant_i32(exception);
+ tmp2 = tcg_constant_i32(error_code);
+ gen_helper_excp(cpu_env, tmp1, tmp2);
+}
+
+static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
+{
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ gen_excp_1(exception, error_code);
+ return DISAS_NORETURN;
+}
+
+static inline DisasJumpType gen_invalid(DisasContext *ctx)
+{
+ return gen_excp(ctx, EXCP_OPCDEC, 0);
+}
+
+static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ gen_helper_memory_to_f(dest, tmp32);
+ tcg_temp_free_i32(tmp32);
+}
+
+static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
+{
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
+ gen_helper_memory_to_g(dest, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ gen_helper_memory_to_s(dest, tmp32);
+ tcg_temp_free_i32(tmp32);
+}
+
+static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
+{
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ);
+}
+
+static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
+ void (*func)(DisasContext *, TCGv, TCGv))
+{
+ /* Loads to $f31 are prefetches, which we can treat as nops. */
+ if (likely(ra != 31)) {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
+ func(ctx, cpu_fir[ra], addr);
+ tcg_temp_free(addr);
+ }
+}
+
+static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
+ MemOp op, bool clear, bool locked)
+{
+ TCGv addr, dest;
+
+ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
+ prefetches, which we can treat as nops. No worries about
+ missed exceptions here. */
+ if (unlikely(ra == 31)) {
+ return;
+ }
+
+ addr = tcg_temp_new();
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
+ if (clear) {
+ tcg_gen_andi_i64(addr, addr, ~0x7);
+ }
+
+ dest = ctx->ir[ra];
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
+
+ if (locked) {
+ tcg_gen_mov_i64(cpu_lock_addr, addr);
+ tcg_gen_mov_i64(cpu_lock_value, dest);
+ }
+ tcg_temp_free(addr);
+}
+
+static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ gen_helper_f_to_memory(tmp32, addr);
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_temp_free_i32(tmp32);
+}
+
+static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
+{
+ TCGv tmp = tcg_temp_new();
+ gen_helper_g_to_memory(tmp, src);
+ tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
+ tcg_temp_free(tmp);
+}
+
+static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ gen_helper_s_to_memory(tmp32, src);
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_temp_free_i32(tmp32);
+}
+
+static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
+{
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ);
+}
+
+static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
+ void (*func)(DisasContext *, TCGv, TCGv))
+{
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
+ func(ctx, load_fpr(ctx, ra), addr);
+ tcg_temp_free(addr);
+}
+
+static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
+ MemOp op, bool clear)
+{
+ TCGv addr, src;
+
+ addr = tcg_temp_new();
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
+ if (clear) {
+ tcg_gen_andi_i64(addr, addr, ~0x7);
+ }
+
+ src = load_gpr(ctx, ra);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
+
+ tcg_temp_free(addr);
+}
+
+static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
+ int32_t disp16, int mem_idx,
+ MemOp op)
+{
+ TCGLabel *lab_fail, *lab_done;
+ TCGv addr, val;
+
+ addr = tcg_temp_new_i64();
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
+ free_context_temps(ctx);
+
+ lab_fail = gen_new_label();
+ lab_done = gen_new_label();
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
+ tcg_temp_free_i64(addr);
+
+ val = tcg_temp_new_i64();
+ tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
+ load_gpr(ctx, ra), mem_idx, op);
+ free_context_temps(ctx);
+
+ if (ra != 31) {
+ tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
+ }
+ tcg_temp_free_i64(val);
+ tcg_gen_br(lab_done);
+
+ gen_set_label(lab_fail);
+ if (ra != 31) {
+ tcg_gen_movi_i64(ctx->ir[ra], 0);
+ }
+
+ gen_set_label(lab_done);
+ tcg_gen_movi_i64(cpu_lock_addr, -1);
+ return DISAS_NEXT;
+}
+
+static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
+{
+ return translator_use_goto_tb(&ctx->base, dest);
+}
+
+static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
+{
+ uint64_t dest = ctx->base.pc_next + (disp << 2);
+
+ if (ra != 31) {
+ tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
+ }
+
+ /* Notice branch-to-next; used to initialize RA with the PC. */
+ if (disp == 0) {
+ return 0;
+ } else if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, dest);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+ return DISAS_NORETURN;
+ } else {
+ tcg_gen_movi_i64(cpu_pc, dest);
+ return DISAS_PC_UPDATED;
+ }
+}
+
+static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
+ TCGv cmp, int32_t disp)
+{
+ uint64_t dest = ctx->base.pc_next + (disp << 2);
+ TCGLabel *lab_true = gen_new_label();
+
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
+
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+
+ gen_set_label(lab_true);
+ tcg_gen_goto_tb(1);
+ tcg_gen_movi_i64(cpu_pc, dest);
+ tcg_gen_exit_tb(ctx->base.tb, 1);
+
+ return DISAS_NORETURN;
+ } else {
+ TCGv_i64 z = load_zero(ctx);
+ TCGv_i64 d = tcg_constant_i64(dest);
+ TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
+
+ tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
+ return DISAS_PC_UPDATED;
+ }
+}
+
+static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
+ int32_t disp, int mask)
+{
+ if (mask) {
+ TCGv tmp = tcg_temp_new();
+ DisasJumpType ret;
+
+ tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
+ ret = gen_bcond_internal(ctx, cond, tmp, disp);
+ tcg_temp_free(tmp);
+ return ret;
+ }
+ return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
+}
+
+/* Fold -0.0 for comparison with COND. */
+
+static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
+{
+ uint64_t mzero = 1ull << 63;
+
+ switch (cond) {
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ /* For <= or >, the -0.0 value directly compares the way we want. */
+ tcg_gen_mov_i64(dest, src);
+ break;
+
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ /* For == or !=, we can simply mask off the sign bit and compare. */
+ tcg_gen_andi_i64(dest, src, mzero - 1);
+ break;
+
+ case TCG_COND_GE:
+ case TCG_COND_LT:
+ /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
+ tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
+ tcg_gen_neg_i64(dest, dest);
+ tcg_gen_and_i64(dest, dest, src);
+ break;
+
+ default:
+ abort();
+ }
+}
+
+static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
+ int32_t disp)
+{
+ TCGv cmp_tmp = tcg_temp_new();
+ DisasJumpType ret;
+
+ gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
+ ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
+ tcg_temp_free(cmp_tmp);
+ return ret;
+}
+
+static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
+{
+ TCGv_i64 va, vb, z;
+
+ z = load_zero(ctx);
+ vb = load_fpr(ctx, rb);
+ va = tcg_temp_new();
+ gen_fold_mzero(cond, va, load_fpr(ctx, ra));
+
+ tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
+
+ tcg_temp_free(va);
+}
+
+#define QUAL_RM_N 0x080 /* Round mode nearest even */
+#define QUAL_RM_C 0x000 /* Round mode chopped */
+#define QUAL_RM_M 0x040 /* Round mode minus infinity */
+#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
+#define QUAL_RM_MASK 0x0c0
+
+#define QUAL_U 0x100 /* Underflow enable (fp output) */
+#define QUAL_V 0x100 /* Overflow enable (int output) */
+#define QUAL_S 0x400 /* Software completion enable */
+#define QUAL_I 0x200 /* Inexact detection enable */
+
+static void gen_qual_roundmode(DisasContext *ctx, int fn11)
+{
+ TCGv_i32 tmp;
+
+ fn11 &= QUAL_RM_MASK;
+ if (fn11 == ctx->tb_rm) {
+ return;
+ }
+ ctx->tb_rm = fn11;
+
+ tmp = tcg_temp_new_i32();
+ switch (fn11) {
+ case QUAL_RM_N:
+ tcg_gen_movi_i32(tmp, float_round_nearest_even);
+ break;
+ case QUAL_RM_C:
+ tcg_gen_movi_i32(tmp, float_round_to_zero);
+ break;
+ case QUAL_RM_M:
+ tcg_gen_movi_i32(tmp, float_round_down);
+ break;
+ case QUAL_RM_D:
+ tcg_gen_ld8u_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fpcr_dyn_round));
+ break;
+ }
+
+#if defined(CONFIG_SOFTFLOAT_INLINE)
+ /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
+ With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
+ sets the one field. */
+ tcg_gen_st8_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fp_status.float_rounding_mode));
+#else
+ gen_helper_setroundmode(tmp);
+#endif
+
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_qual_flushzero(DisasContext *ctx, int fn11)
+{
+ TCGv_i32 tmp;
+
+ fn11 &= QUAL_U;
+ if (fn11 == ctx->tb_ftz) {
+ return;
+ }
+ ctx->tb_ftz = fn11;
+
+ tmp = tcg_temp_new_i32();
+ if (fn11) {
+ /* Underflow is enabled, use the FPCR setting. */
+ tcg_gen_ld8u_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fpcr_flush_to_zero));
+ } else {
+ /* Underflow is disabled, force flush-to-zero. */
+ tcg_gen_movi_i32(tmp, 1);
+ }
+
+#if defined(CONFIG_SOFTFLOAT_INLINE)
+ tcg_gen_st8_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fp_status.flush_to_zero));
+#else
+ gen_helper_setflushzero(tmp);
+#endif
+
+ tcg_temp_free_i32(tmp);
+}
+
+static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
+{
+ TCGv val;
+
+ if (unlikely(reg == 31)) {
+ val = load_zero(ctx);
+ } else {
+ val = cpu_fir[reg];
+ if ((fn11 & QUAL_S) == 0) {
+ if (is_cmp) {
+ gen_helper_ieee_input_cmp(cpu_env, val);
+ } else {
+ gen_helper_ieee_input(cpu_env, val);
+ }
+ } else {
+#ifndef CONFIG_USER_ONLY
+ /* In system mode, raise exceptions for denormals like real
+ hardware. In user mode, proceed as if the OS completion
+ handler is handling the denormal as per spec. */
+ gen_helper_ieee_input_s(cpu_env, val);
+#endif
+ }
+ }
+ return val;
+}
+
+static void gen_fp_exc_raise(int rc, int fn11)
+{
+ /* ??? We ought to be able to do something with imprecise exceptions.
+ E.g. notice we're still in the trap shadow of something within the
+ TB and do not generate the code to signal the exception; end the TB
+ when an exception is forced to arrive, either by consumption of a
+ register value or TRAPB or EXCB. */
+ TCGv_i32 reg, ign;
+ uint32_t ignore = 0;
+
+ if (!(fn11 & QUAL_U)) {
+ /* Note that QUAL_U == QUAL_V, so ignore either. */
+ ignore |= FPCR_UNF | FPCR_IOV;
+ }
+ if (!(fn11 & QUAL_I)) {
+ ignore |= FPCR_INE;
+ }
+ ign = tcg_constant_i32(ignore);
+
+ /* ??? Pass in the regno of the destination so that the helper can
+ set EXC_MASK, which contains a bitmask of destination registers
+ that have caused arithmetic traps. A simple userspace emulation
+ does not require this. We do need it for a guest kernel's entArith,
+ or if we were to do something clever with imprecise exceptions. */
+ reg = tcg_constant_i32(rc + 32);
+ if (fn11 & QUAL_S) {
+ gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
+ } else {
+ gen_helper_fp_exc_raise(cpu_env, ign, reg);
+ }
+}
+
+static void gen_cvtlq(TCGv vc, TCGv vb)
+{
+ TCGv tmp = tcg_temp_new();
+
+ /* The arithmetic right shift here, plus the sign-extended mask below
+ yields a sign-extended result without an explicit ext32s_i64. */
+ tcg_gen_shri_i64(tmp, vb, 29);
+ tcg_gen_sari_i64(vc, vb, 32);
+ tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_ieee_arith2(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv),
+ int rb, int rc, int fn11)
+{
+ TCGv vb;
+
+ gen_qual_roundmode(ctx, fn11);
+ gen_qual_flushzero(ctx, fn11);
+
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ helper(dest_fpr(ctx, rc), cpu_env, vb);
+
+ gen_fp_exc_raise(rc, fn11);
+}
+
+#define IEEE_ARITH2(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int rb, int rc, int fn11) \
+{ \
+ gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
+}
+IEEE_ARITH2(sqrts)
+IEEE_ARITH2(sqrtt)
+IEEE_ARITH2(cvtst)
+IEEE_ARITH2(cvtts)
+
+static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
+{
+ TCGv vb, vc;
+
+ /* No need to set flushzero, since we have an integer output. */
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ vc = dest_fpr(ctx, rc);
+
+ /* Almost all integer conversions use cropped rounding;
+ special case that. */
+ if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
+ gen_helper_cvttq_c(vc, cpu_env, vb);
+ } else {
+ gen_qual_roundmode(ctx, fn11);
+ gen_helper_cvttq(vc, cpu_env, vb);
+ }
+ gen_fp_exc_raise(rc, fn11);
+}
+
+static void gen_ieee_intcvt(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv),
+ int rb, int rc, int fn11)
+{
+ TCGv vb, vc;
+
+ gen_qual_roundmode(ctx, fn11);
+ vb = load_fpr(ctx, rb);
+ vc = dest_fpr(ctx, rc);
+
+ /* The only exception that can be raised by integer conversion
+ is inexact. Thus we only need to worry about exceptions when
+ inexact handling is requested. */
+ if (fn11 & QUAL_I) {
+ helper(vc, cpu_env, vb);
+ gen_fp_exc_raise(rc, fn11);
+ } else {
+ helper(vc, cpu_env, vb);
+ }
+}
+
+#define IEEE_INTCVT(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int rb, int rc, int fn11) \
+{ \
+ gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
+}
+IEEE_INTCVT(cvtqs)
+IEEE_INTCVT(cvtqt)
+
+static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
+{
+ TCGv vmask = tcg_constant_i64(mask);
+ TCGv tmp = tcg_temp_new_i64();
+
+ if (inv_a) {
+ tcg_gen_andc_i64(tmp, vmask, va);
+ } else {
+ tcg_gen_and_i64(tmp, va, vmask);
+ }
+
+ tcg_gen_andc_i64(vc, vb, vmask);
+ tcg_gen_or_i64(vc, vc, tmp);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_ieee_arith3(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
+ int ra, int rb, int rc, int fn11)
+{
+ TCGv va, vb, vc;
+
+ gen_qual_roundmode(ctx, fn11);
+ gen_qual_flushzero(ctx, fn11);
+
+ va = gen_ieee_input(ctx, ra, fn11, 0);
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ vc = dest_fpr(ctx, rc);
+ helper(vc, cpu_env, va, vb);
+
+ gen_fp_exc_raise(rc, fn11);
+}
+
+#define IEEE_ARITH3(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int ra, int rb, int rc, int fn11) \
+{ \
+ gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
+}
+IEEE_ARITH3(adds)
+IEEE_ARITH3(subs)
+IEEE_ARITH3(muls)
+IEEE_ARITH3(divs)
+IEEE_ARITH3(addt)
+IEEE_ARITH3(subt)
+IEEE_ARITH3(mult)
+IEEE_ARITH3(divt)
+
+static void gen_ieee_compare(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
+ int ra, int rb, int rc, int fn11)
+{
+ TCGv va, vb, vc;
+
+ va = gen_ieee_input(ctx, ra, fn11, 1);
+ vb = gen_ieee_input(ctx, rb, fn11, 1);
+ vc = dest_fpr(ctx, rc);
+ helper(vc, cpu_env, va, vb);
+
+ gen_fp_exc_raise(rc, fn11);
+}
+
+#define IEEE_CMP3(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int ra, int rb, int rc, int fn11) \
+{ \
+ gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
+}
+IEEE_CMP3(cmptun)
+IEEE_CMP3(cmpteq)
+IEEE_CMP3(cmptlt)
+IEEE_CMP3(cmptle)
+
+static inline uint64_t zapnot_mask(uint8_t lit)
+{
+ uint64_t mask = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ if ((lit >> i) & 1) {
+ mask |= 0xffull << (i * 8);
+ }
+ }
+ return mask;
+}
+
+/* Implement zapnot with an immediate operand, which expands to some
+ form of immediate AND. This is a basic building block in the
+ definition of many of the other byte manipulation instructions. */
+static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
+{
+ switch (lit) {
+ case 0x00:
+ tcg_gen_movi_i64(dest, 0);
+ break;
+ case 0x01:
+ tcg_gen_ext8u_i64(dest, src);
+ break;
+ case 0x03:
+ tcg_gen_ext16u_i64(dest, src);
+ break;
+ case 0x0f:
+ tcg_gen_ext32u_i64(dest, src);
+ break;
+ case 0xff:
+ tcg_gen_mov_i64(dest, src);
+ break;
+ default:
+ tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
+ break;
+ }
+}
+
+/* EXTWH, EXTLH, EXTQH */
+static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ int pos = (64 - lit * 8) & 0x3f;
+ int len = cto32(byte_mask) * 8;
+ if (pos < len) {
+ tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
+ } else {
+ tcg_gen_movi_i64(vc, 0);
+ }
+ } else {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
+ tcg_gen_neg_i64(tmp, tmp);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ gen_zapnoti(vc, vc, byte_mask);
+}
+
+/* EXTBL, EXTWL, EXTLL, EXTQL */
+static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ int pos = (lit & 7) * 8;
+ int len = cto32(byte_mask) * 8;
+ if (pos + len >= 64) {
+ len = 64 - pos;
+ }
+ tcg_gen_extract_i64(vc, va, pos, len);
+ } else {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(tmp, tmp, 3);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ gen_zapnoti(vc, vc, byte_mask);
+ }
+}
+
+/* INSWH, INSLH, INSQH */
+static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ int pos = 64 - (lit & 7) * 8;
+ int len = cto32(byte_mask) * 8;
+ if (pos < len) {
+ tcg_gen_extract_i64(vc, va, pos, len - pos);
+ } else {
+ tcg_gen_movi_i64(vc, 0);
+ }
+ } else {
+ TCGv tmp = tcg_temp_new();
+ TCGv shift = tcg_temp_new();
+
+ /* The instruction description has us left-shift the byte mask
+ and extract bits <15:8> and apply that zap at the end. This
+ is equivalent to simply performing the zap first and shifting
+ afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
+
+ /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
+ portably by splitting the shift into two parts: shift_count-1 and 1.
+ Arrange for the -1 by using ones-complement instead of
+ twos-complement in the negation: ~(B * 8) & 63. */
+
+ tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+
+ tcg_gen_shr_i64(vc, tmp, shift);
+ tcg_gen_shri_i64(vc, vc, 1);
+ tcg_temp_free(shift);
+ tcg_temp_free(tmp);
+ }
+}
+
+/* INSBL, INSWL, INSLL, INSQL */
+static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ int pos = (lit & 7) * 8;
+ int len = cto32(byte_mask) * 8;
+ if (pos + len > 64) {
+ len = 64 - pos;
+ }
+ tcg_gen_deposit_z_i64(vc, va, pos, len);
+ } else {
+ TCGv tmp = tcg_temp_new();
+ TCGv shift = tcg_temp_new();
+
+ /* The instruction description has us left-shift the byte mask
+ and extract bits <15:8> and apply that zap at the end. This
+ is equivalent to simply performing the zap first and shifting
+ afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
+
+ tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_shl_i64(vc, tmp, shift);
+ tcg_temp_free(shift);
+ tcg_temp_free(tmp);
+ }
+}
+
+/* MSKWH, MSKLH, MSKQH */
+static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
+ } else {
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ /* The instruction description is as above, where the byte_mask
+ is shifted left, and then we extract bits <15:8>. This can be
+ emulated with a right-shift on the expanded byte mask. This
+ requires extra care because for an input <2:0> == 0 we need a
+ shift of 64 bits in order to generate a zero. This is done by
+ splitting the shift into two parts, the variable shift - 1
+ followed by a constant 1 shift. The code we expand below is
+ equivalent to ~(B * 8) & 63. */
+
+ tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+ tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
+ tcg_gen_shr_i64(mask, mask, shift);
+ tcg_gen_shri_i64(mask, mask, 1);
+
+ tcg_gen_andc_i64(vc, va, mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+ }
+}
+
+/* MSKBL, MSKWL, MSKLL, MSKQL */
+static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
+ } else {
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
+ tcg_gen_shl_i64(mask, mask, shift);
+
+ tcg_gen_andc_i64(vc, va, mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+ }
+}
+
+static void gen_rx(DisasContext *ctx, int ra, int set)
+{
+ if (ra != 31) {
+ ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
+ }
+
+ st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
+}
+
+static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
+{
+ /* We're emulating OSF/1 PALcode. Many of these are trivial access
+ to internal cpu registers. */
+
+ /* Unprivileged PAL call */
+ if (palcode >= 0x80 && palcode < 0xC0) {
+ switch (palcode) {
+ case 0x86:
+ /* IMB */
+ /* No-op inside QEMU. */
+ break;
+ case 0x9E:
+ /* RDUNIQUE */
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, unique));
+ break;
+ case 0x9F:
+ /* WRUNIQUE */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, unique));
+ break;
+ default:
+ palcode &= 0xbf;
+ goto do_call_pal;
+ }
+ return DISAS_NEXT;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* Privileged PAL code */
+ if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
+ switch (palcode) {
+ case 0x01:
+ /* CFLUSH */
+ /* No-op inside QEMU. */
+ break;
+ case 0x02:
+ /* DRAINA */
+ /* No-op inside QEMU. */
+ break;
+ case 0x2D:
+ /* WRVPTPTR */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, vptptr));
+ break;
+ case 0x31:
+ /* WRVAL */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, sysval));
+ break;
+ case 0x32:
+ /* RDVAL */
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, sysval));
+ break;
+
+ case 0x35:
+ /* SWPIPL */
+ /* Note that we already know we're in kernel mode, so we know
+ that PS only contains the 3 IPL bits. */
+ ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
+
+ /* But make sure and store only the 3 IPL bits from the user. */
+ {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
+ st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
+ tcg_temp_free(tmp);
+ }
+
+ /* Allow interrupts to be recognized right away. */
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ return DISAS_PC_UPDATED_NOCHAIN;
+
+ case 0x36:
+ /* RDPS */
+ ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
+ break;
+
+ case 0x38:
+ /* WRUSP */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, usp));
+ break;
+ case 0x3A:
+ /* RDUSP */
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, usp));
+ break;
+ case 0x3C:
+ /* WHAMI */
+ tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
+ -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
+ break;
+
+ case 0x3E:
+ /* WTINT */
+ tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ -offsetof(AlphaCPU, env) +
+ offsetof(CPUState, halted));
+ tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
+ return gen_excp(ctx, EXCP_HALTED, 0);
+
+ default:
+ palcode &= 0x3f;
+ goto do_call_pal;
+ }
+ return DISAS_NEXT;
+ }
+#endif
+ return gen_invalid(ctx);
+
+ do_call_pal:
+#ifdef CONFIG_USER_ONLY
+ return gen_excp(ctx, EXCP_CALL_PAL, palcode);
+#else
+ {
+ TCGv tmp = tcg_temp_new();
+ uint64_t exc_addr = ctx->base.pc_next;
+ uint64_t entry = ctx->palbr;
+
+ if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
+ exc_addr |= 1;
+ } else {
+ tcg_gen_movi_i64(tmp, 1);
+ st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
+ }
+
+ tcg_gen_movi_i64(tmp, exc_addr);
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ tcg_temp_free(tmp);
+
+ entry += (palcode & 0x80
+ ? 0x2000 + (palcode - 0x80) * 64
+ : 0x1000 + palcode * 64);
+
+ tcg_gen_movi_i64(cpu_pc, entry);
+ return DISAS_PC_UPDATED;
+ }
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+
+#define PR_LONG 0x200000
+
+static int cpu_pr_data(int pr)
+{
+ switch (pr) {
+ case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
+ case 3: return offsetof(CPUAlphaState, trap_arg0);
+ case 4: return offsetof(CPUAlphaState, trap_arg1);
+ case 5: return offsetof(CPUAlphaState, trap_arg2);
+ case 6: return offsetof(CPUAlphaState, exc_addr);
+ case 7: return offsetof(CPUAlphaState, palbr);
+ case 8: return offsetof(CPUAlphaState, ptbr);
+ case 9: return offsetof(CPUAlphaState, vptptr);
+ case 10: return offsetof(CPUAlphaState, unique);
+ case 11: return offsetof(CPUAlphaState, sysval);
+ case 12: return offsetof(CPUAlphaState, usp);
+
+ case 40 ... 63:
+ return offsetof(CPUAlphaState, scratch[pr - 40]);
+
+ case 251:
+ return offsetof(CPUAlphaState, alarm_expire);
+ }
+ return 0;
+}
+
+static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
+{
+ void (*helper)(TCGv);
+ int data;
+
+ switch (regno) {
+ case 32 ... 39:
+ /* Accessing the "non-shadow" general registers. */
+ regno = regno == 39 ? 25 : regno - 32 + 8;
+ tcg_gen_mov_i64(va, cpu_std_ir[regno]);
+ break;
+
+ case 250: /* WALLTIME */
+ helper = gen_helper_get_walltime;
+ goto do_helper;
+ case 249: /* VMTIME */
+ helper = gen_helper_get_vmtime;
+ do_helper:
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ helper(va);
+ return DISAS_PC_STALE;
+ } else {
+ helper(va);
+ }
+ break;
+
+ case 0: /* PS */
+ ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
+ break;
+ case 1: /* FEN */
+ ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
+ break;
+
+ default:
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ data = cpu_pr_data(regno);
+ if (data == 0) {
+ tcg_gen_movi_i64(va, 0);
+ } else if (data & PR_LONG) {
+ tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_ld_i64(va, cpu_env, data);
+ }
+ break;
+ }
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
+{
+ int data;
+ DisasJumpType ret = DISAS_NEXT;
+
+ switch (regno) {
+ case 255:
+ /* TBIA */
+ gen_helper_tbia(cpu_env);
+ break;
+
+ case 254:
+ /* TBIS */
+ gen_helper_tbis(cpu_env, vb);
+ break;
+
+ case 253:
+ /* WAIT */
+ tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
+ return gen_excp(ctx, EXCP_HALTED, 0);
+
+ case 252:
+ /* HALT */
+ gen_helper_halt(vb);
+ return DISAS_PC_STALE;
+
+ case 251:
+ /* ALARM */
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ ret = DISAS_PC_STALE;
+ }
+ gen_helper_set_alarm(cpu_env, vb);
+ break;
+
+ case 7:
+ /* PALBR */
+ tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
+ /* Changing the PAL base register implies un-chaining all of the TBs
+ that ended with a CALL_PAL. Since the base register usually only
+ changes during boot, flushing everything works well. */
+ gen_helper_tb_flush(cpu_env);
+ return DISAS_PC_STALE;
+
+ case 32 ... 39:
+ /* Accessing the "non-shadow" general registers. */
+ regno = regno == 39 ? 25 : regno - 32 + 8;
+ tcg_gen_mov_i64(cpu_std_ir[regno], vb);
+ break;
+
+ case 0: /* PS */
+ st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
+ break;
+ case 1: /* FEN */
+ st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
+ break;
+
+ default:
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ data = cpu_pr_data(regno);
+ if (data != 0) {
+ if (data & PR_LONG) {
+ tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_st_i64(vb, cpu_env, data);
+ }
+ }
+ break;
+ }
+
+ return ret;
+}
+#endif /* !USER_ONLY*/
+
+#define REQUIRE_NO_LIT \
+ do { \
+ if (real_islit) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_AMASK(FLAG) \
+ do { \
+ if ((ctx->amask & AMASK_##FLAG) == 0) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_TB_FLAG(FLAG) \
+ do { \
+ if ((ctx->tbflags & (FLAG)) == 0) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_REG_31(WHICH) \
+ do { \
+ if (WHICH != 31) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_FEN \
+ do { \
+ if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
+ goto raise_fen; \
+ } \
+ } while (0)
+
+static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
+{
+ int32_t disp21, disp16, disp12 __attribute__((unused));
+ uint16_t fn11;
+ uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
+ bool islit, real_islit;
+ TCGv va, vb, vc, tmp, tmp2;
+ TCGv_i32 t32;
+ DisasJumpType ret;
+
+ /* Decode all instruction fields */
+ opc = extract32(insn, 26, 6);
+ ra = extract32(insn, 21, 5);
+ rb = extract32(insn, 16, 5);
+ rc = extract32(insn, 0, 5);
+ real_islit = islit = extract32(insn, 12, 1);
+ lit = extract32(insn, 13, 8);
+
+ disp21 = sextract32(insn, 0, 21);
+ disp16 = sextract32(insn, 0, 16);
+ disp12 = sextract32(insn, 0, 12);
+
+ fn11 = extract32(insn, 5, 11);
+ fpfn = extract32(insn, 5, 6);
+ fn7 = extract32(insn, 5, 7);
+
+ if (rb == 31 && !islit) {
+ islit = true;
+ lit = 0;
+ }
+
+ ret = DISAS_NEXT;
+ switch (opc) {
+ case 0x00:
+ /* CALL_PAL */
+ ret = gen_call_pal(ctx, insn & 0x03ffffff);
+ break;
+ case 0x01:
+ /* OPC01 */
+ goto invalid_opc;
+ case 0x02:
+ /* OPC02 */
+ goto invalid_opc;
+ case 0x03:
+ /* OPC03 */
+ goto invalid_opc;
+ case 0x04:
+ /* OPC04 */
+ goto invalid_opc;
+ case 0x05:
+ /* OPC05 */
+ goto invalid_opc;
+ case 0x06:
+ /* OPC06 */
+ goto invalid_opc;
+ case 0x07:
+ /* OPC07 */
+ goto invalid_opc;
+
+ case 0x09:
+ /* LDAH */
+ disp16 = (uint32_t)disp16 << 16;
+ /* fall through */
+ case 0x08:
+ /* LDA */
+ va = dest_gpr(ctx, ra);
+ /* It's worth special-casing immediate loads. */
+ if (rb == 31) {
+ tcg_gen_movi_i64(va, disp16);
+ } else {
+ tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
+ }
+ break;
+
+ case 0x0A:
+ /* LDBU */
+ REQUIRE_AMASK(BWX);
+ gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
+ break;
+ case 0x0B:
+ /* LDQ_U */
+ gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0);
+ break;
+ case 0x0C:
+ /* LDWU */
+ REQUIRE_AMASK(BWX);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
+ break;
+ case 0x0D:
+ /* STW */
+ REQUIRE_AMASK(BWX);
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
+ break;
+ case 0x0E:
+ /* STB */
+ REQUIRE_AMASK(BWX);
+ gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
+ break;
+ case 0x0F:
+ /* STQ_U */
+ gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1);
+ break;
+
+ case 0x10:
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+
+ if (ra == 31) {
+ if (fn7 == 0x00) {
+ /* Special case ADDL as SEXTL. */
+ tcg_gen_ext32s_i64(vc, vb);
+ break;
+ }
+ if (fn7 == 0x29) {
+ /* Special case SUBQ as NEGQ. */
+ tcg_gen_neg_i64(vc, vb);
+ break;
+ }
+ }
+
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x00:
+ /* ADDL */
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x02:
+ /* S4ADDL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x09:
+ /* SUBL */
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x0B:
+ /* S4SUBL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x0F:
+ /* CMPBGE */
+ if (ra == 31) {
+ /* Special case 0 >= X as X == 0. */
+ gen_helper_cmpbe0(vc, vb);
+ } else {
+ gen_helper_cmpbge(vc, va, vb);
+ }
+ break;
+ case 0x12:
+ /* S8ADDL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x1B:
+ /* S8SUBL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x1D:
+ /* CMPULT */
+ tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
+ break;
+ case 0x20:
+ /* ADDQ */
+ tcg_gen_add_i64(vc, va, vb);
+ break;
+ case 0x22:
+ /* S4ADDQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x29:
+ /* SUBQ */
+ tcg_gen_sub_i64(vc, va, vb);
+ break;
+ case 0x2B:
+ /* S4SUBQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x2D:
+ /* CMPEQ */
+ tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
+ break;
+ case 0x32:
+ /* S8ADDQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x3B:
+ /* S8SUBQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x3D:
+ /* CMPULE */
+ tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
+ break;
+ case 0x40:
+ /* ADDL/V */
+ tmp = tcg_temp_new();
+ tcg_gen_ext32s_i64(tmp, va);
+ tcg_gen_ext32s_i64(vc, vb);
+ tcg_gen_add_i64(tmp, tmp, vc);
+ tcg_gen_ext32s_i64(vc, tmp);
+ gen_helper_check_overflow(cpu_env, vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x49:
+ /* SUBL/V */
+ tmp = tcg_temp_new();
+ tcg_gen_ext32s_i64(tmp, va);
+ tcg_gen_ext32s_i64(vc, vb);
+ tcg_gen_sub_i64(tmp, tmp, vc);
+ tcg_gen_ext32s_i64(vc, tmp);
+ gen_helper_check_overflow(cpu_env, vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x4D:
+ /* CMPLT */
+ tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
+ break;
+ case 0x60:
+ /* ADDQ/V */
+ tmp = tcg_temp_new();
+ tmp2 = tcg_temp_new();
+ tcg_gen_eqv_i64(tmp, va, vb);
+ tcg_gen_mov_i64(tmp2, va);
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_xor_i64(tmp2, tmp2, vc);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ tcg_gen_shri_i64(tmp, tmp, 63);
+ tcg_gen_movi_i64(tmp2, 0);
+ gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ tcg_temp_free(tmp);
+ tcg_temp_free(tmp2);
+ break;
+ case 0x69:
+ /* SUBQ/V */
+ tmp = tcg_temp_new();
+ tmp2 = tcg_temp_new();
+ tcg_gen_xor_i64(tmp, va, vb);
+ tcg_gen_mov_i64(tmp2, va);
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_xor_i64(tmp2, tmp2, vc);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ tcg_gen_shri_i64(tmp, tmp, 63);
+ tcg_gen_movi_i64(tmp2, 0);
+ gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ tcg_temp_free(tmp);
+ tcg_temp_free(tmp2);
+ break;
+ case 0x6D:
+ /* CMPLE */
+ tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x11:
+ if (fn7 == 0x20) {
+ if (rc == 31) {
+ /* Special case BIS as NOP. */
+ break;
+ }
+ if (ra == 31) {
+ /* Special case BIS as MOV. */
+ vc = dest_gpr(ctx, rc);
+ if (islit) {
+ tcg_gen_movi_i64(vc, lit);
+ } else {
+ tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
+ }
+ break;
+ }
+ }
+
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+
+ if (fn7 == 0x28 && ra == 31) {
+ /* Special case ORNOT as NOT. */
+ tcg_gen_not_i64(vc, vb);
+ break;
+ }
+
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x00:
+ /* AND */
+ tcg_gen_and_i64(vc, va, vb);
+ break;
+ case 0x08:
+ /* BIC */
+ tcg_gen_andc_i64(vc, va, vb);
+ break;
+ case 0x14:
+ /* CMOVLBS */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ tcg_temp_free(tmp);
+ break;
+ case 0x16:
+ /* CMOVLBC */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ tcg_temp_free(tmp);
+ break;
+ case 0x20:
+ /* BIS */
+ tcg_gen_or_i64(vc, va, vb);
+ break;
+ case 0x24:
+ /* CMOVEQ */
+ tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x26:
+ /* CMOVNE */
+ tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x28:
+ /* ORNOT */
+ tcg_gen_orc_i64(vc, va, vb);
+ break;
+ case 0x40:
+ /* XOR */
+ tcg_gen_xor_i64(vc, va, vb);
+ break;
+ case 0x44:
+ /* CMOVLT */
+ tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x46:
+ /* CMOVGE */
+ tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x48:
+ /* EQV */
+ tcg_gen_eqv_i64(vc, va, vb);
+ break;
+ case 0x61:
+ /* AMASK */
+ REQUIRE_REG_31(ra);
+ tcg_gen_andi_i64(vc, vb, ~ctx->amask);
+ break;
+ case 0x64:
+ /* CMOVLE */
+ tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x66:
+ /* CMOVGT */
+ tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x6C:
+ /* IMPLVER */
+ REQUIRE_REG_31(ra);
+ tcg_gen_movi_i64(vc, ctx->implver);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x12:
+ vc = dest_gpr(ctx, rc);
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x02:
+ /* MSKBL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
+ break;
+ case 0x06:
+ /* EXTBL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
+ break;
+ case 0x0B:
+ /* INSBL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
+ break;
+ case 0x12:
+ /* MSKWL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x16:
+ /* EXTWL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x1B:
+ /* INSWL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x22:
+ /* MSKLL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x26:
+ /* EXTLL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x2B:
+ /* INSLL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x30:
+ /* ZAP */
+ if (islit) {
+ gen_zapnoti(vc, va, ~lit);
+ } else {
+ gen_helper_zap(vc, va, load_gpr(ctx, rb));
+ }
+ break;
+ case 0x31:
+ /* ZAPNOT */
+ if (islit) {
+ gen_zapnoti(vc, va, lit);
+ } else {
+ gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
+ }
+ break;
+ case 0x32:
+ /* MSKQL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x34:
+ /* SRL */
+ if (islit) {
+ tcg_gen_shri_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ case 0x36:
+ /* EXTQL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x39:
+ /* SLL */
+ if (islit) {
+ tcg_gen_shli_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ case 0x3B:
+ /* INSQL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x3C:
+ /* SRA */
+ if (islit) {
+ tcg_gen_sari_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_sar_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ case 0x52:
+ /* MSKWH */
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x57:
+ /* INSWH */
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x5A:
+ /* EXTWH */
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x62:
+ /* MSKLH */
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x67:
+ /* INSLH */
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x6A:
+ /* EXTLH */
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x72:
+ /* MSKQH */
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x77:
+ /* INSQH */
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x7A:
+ /* EXTQH */
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x13:
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x00:
+ /* MULL */
+ tcg_gen_mul_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x20:
+ /* MULQ */
+ tcg_gen_mul_i64(vc, va, vb);
+ break;
+ case 0x30:
+ /* UMULH */
+ tmp = tcg_temp_new();
+ tcg_gen_mulu2_i64(tmp, vc, va, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x40:
+ /* MULL/V */
+ tmp = tcg_temp_new();
+ tcg_gen_ext32s_i64(tmp, va);
+ tcg_gen_ext32s_i64(vc, vb);
+ tcg_gen_mul_i64(tmp, tmp, vc);
+ tcg_gen_ext32s_i64(vc, tmp);
+ gen_helper_check_overflow(cpu_env, vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x60:
+ /* MULQ/V */
+ tmp = tcg_temp_new();
+ tmp2 = tcg_temp_new();
+ tcg_gen_muls2_i64(vc, tmp, va, vb);
+ tcg_gen_sari_i64(tmp2, vc, 63);
+ gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ tcg_temp_free(tmp);
+ tcg_temp_free(tmp2);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x14:
+ REQUIRE_AMASK(FIX);
+ vc = dest_fpr(ctx, rc);
+ switch (fpfn) { /* fn11 & 0x3F */
+ case 0x04:
+ /* ITOFS */
+ REQUIRE_REG_31(rb);
+ REQUIRE_FEN;
+ t32 = tcg_temp_new_i32();
+ va = load_gpr(ctx, ra);
+ tcg_gen_extrl_i64_i32(t32, va);
+ gen_helper_memory_to_s(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ case 0x0A:
+ /* SQRTF */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ vb = load_fpr(ctx, rb);
+ gen_helper_sqrtf(vc, cpu_env, vb);
+ break;
+ case 0x0B:
+ /* SQRTS */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_sqrts(ctx, rb, rc, fn11);
+ break;
+ case 0x14:
+ /* ITOFF */
+ REQUIRE_REG_31(rb);
+ REQUIRE_FEN;
+ t32 = tcg_temp_new_i32();
+ va = load_gpr(ctx, ra);
+ tcg_gen_extrl_i64_i32(t32, va);
+ gen_helper_memory_to_f(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ case 0x24:
+ /* ITOFT */
+ REQUIRE_REG_31(rb);
+ REQUIRE_FEN;
+ va = load_gpr(ctx, ra);
+ tcg_gen_mov_i64(vc, va);
+ break;
+ case 0x2A:
+ /* SQRTG */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ vb = load_fpr(ctx, rb);
+ gen_helper_sqrtg(vc, cpu_env, vb);
+ break;
+ case 0x02B:
+ /* SQRTT */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_sqrtt(ctx, rb, rc, fn11);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x15:
+ /* VAX floating point */
+ /* XXX: rounding mode and trap are ignored (!) */
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ switch (fpfn) { /* fn11 & 0x3F */
+ case 0x00:
+ /* ADDF */
+ REQUIRE_FEN;
+ gen_helper_addf(vc, cpu_env, va, vb);
+ break;
+ case 0x01:
+ /* SUBF */
+ REQUIRE_FEN;
+ gen_helper_subf(vc, cpu_env, va, vb);
+ break;
+ case 0x02:
+ /* MULF */
+ REQUIRE_FEN;
+ gen_helper_mulf(vc, cpu_env, va, vb);
+ break;
+ case 0x03:
+ /* DIVF */
+ REQUIRE_FEN;
+ gen_helper_divf(vc, cpu_env, va, vb);
+ break;
+ case 0x1E:
+ /* CVTDG -- TODO */
+ REQUIRE_REG_31(ra);
+ goto invalid_opc;
+ case 0x20:
+ /* ADDG */
+ REQUIRE_FEN;
+ gen_helper_addg(vc, cpu_env, va, vb);
+ break;
+ case 0x21:
+ /* SUBG */
+ REQUIRE_FEN;
+ gen_helper_subg(vc, cpu_env, va, vb);
+ break;
+ case 0x22:
+ /* MULG */
+ REQUIRE_FEN;
+ gen_helper_mulg(vc, cpu_env, va, vb);
+ break;
+ case 0x23:
+ /* DIVG */
+ REQUIRE_FEN;
+ gen_helper_divg(vc, cpu_env, va, vb);
+ break;
+ case 0x25:
+ /* CMPGEQ */
+ REQUIRE_FEN;
+ gen_helper_cmpgeq(vc, cpu_env, va, vb);
+ break;
+ case 0x26:
+ /* CMPGLT */
+ REQUIRE_FEN;
+ gen_helper_cmpglt(vc, cpu_env, va, vb);
+ break;
+ case 0x27:
+ /* CMPGLE */
+ REQUIRE_FEN;
+ gen_helper_cmpgle(vc, cpu_env, va, vb);
+ break;
+ case 0x2C:
+ /* CVTGF */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_helper_cvtgf(vc, cpu_env, vb);
+ break;
+ case 0x2D:
+ /* CVTGD -- TODO */
+ REQUIRE_REG_31(ra);
+ goto invalid_opc;
+ case 0x2F:
+ /* CVTGQ */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_helper_cvtgq(vc, cpu_env, vb);
+ break;
+ case 0x3C:
+ /* CVTQF */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_helper_cvtqf(vc, cpu_env, vb);
+ break;
+ case 0x3E:
+ /* CVTQG */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_helper_cvtqg(vc, cpu_env, vb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x16:
+ /* IEEE floating-point */
+ switch (fpfn) { /* fn11 & 0x3F */
+ case 0x00:
+ /* ADDS */
+ REQUIRE_FEN;
+ gen_adds(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x01:
+ /* SUBS */
+ REQUIRE_FEN;
+ gen_subs(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x02:
+ /* MULS */
+ REQUIRE_FEN;
+ gen_muls(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x03:
+ /* DIVS */
+ REQUIRE_FEN;
+ gen_divs(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x20:
+ /* ADDT */
+ REQUIRE_FEN;
+ gen_addt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x21:
+ /* SUBT */
+ REQUIRE_FEN;
+ gen_subt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x22:
+ /* MULT */
+ REQUIRE_FEN;
+ gen_mult(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x23:
+ /* DIVT */
+ REQUIRE_FEN;
+ gen_divt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x24:
+ /* CMPTUN */
+ REQUIRE_FEN;
+ gen_cmptun(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x25:
+ /* CMPTEQ */
+ REQUIRE_FEN;
+ gen_cmpteq(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x26:
+ /* CMPTLT */
+ REQUIRE_FEN;
+ gen_cmptlt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x27:
+ /* CMPTLE */
+ REQUIRE_FEN;
+ gen_cmptle(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x2C:
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ if (fn11 == 0x2AC || fn11 == 0x6AC) {
+ /* CVTST */
+ gen_cvtst(ctx, rb, rc, fn11);
+ } else {
+ /* CVTTS */
+ gen_cvtts(ctx, rb, rc, fn11);
+ }
+ break;
+ case 0x2F:
+ /* CVTTQ */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_cvttq(ctx, rb, rc, fn11);
+ break;
+ case 0x3C:
+ /* CVTQS */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_cvtqs(ctx, rb, rc, fn11);
+ break;
+ case 0x3E:
+ /* CVTQT */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ gen_cvtqt(ctx, rb, rc, fn11);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x17:
+ switch (fn11) {
+ case 0x010:
+ /* CVTLQ */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_cvtlq(vc, vb);
+ break;
+ case 0x020:
+ /* CPYS */
+ REQUIRE_FEN;
+ if (rc == 31) {
+ /* Special case CPYS as FNOP. */
+ } else {
+ vc = dest_fpr(ctx, rc);
+ va = load_fpr(ctx, ra);
+ if (ra == rb) {
+ /* Special case CPYS as FMOV. */
+ tcg_gen_mov_i64(vc, va);
+ } else {
+ vb = load_fpr(ctx, rb);
+ gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
+ }
+ }
+ break;
+ case 0x021:
+ /* CPYSN */
+ REQUIRE_FEN;
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
+ break;
+ case 0x022:
+ /* CPYSE */
+ REQUIRE_FEN;
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
+ break;
+ case 0x024:
+ /* MT_FPCR */
+ REQUIRE_FEN;
+ va = load_fpr(ctx, ra);
+ gen_helper_store_fpcr(cpu_env, va);
+ if (ctx->tb_rm == QUAL_RM_D) {
+ /* Re-do the copy of the rounding mode to fp_status
+ the next time we use dynamic rounding. */
+ ctx->tb_rm = -1;
+ }
+ break;
+ case 0x025:
+ /* MF_FPCR */
+ REQUIRE_FEN;
+ va = dest_fpr(ctx, ra);
+ gen_helper_load_fpcr(va, cpu_env);
+ break;
+ case 0x02A:
+ /* FCMOVEQ */
+ REQUIRE_FEN;
+ gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
+ break;
+ case 0x02B:
+ /* FCMOVNE */
+ REQUIRE_FEN;
+ gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
+ break;
+ case 0x02C:
+ /* FCMOVLT */
+ REQUIRE_FEN;
+ gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
+ break;
+ case 0x02D:
+ /* FCMOVGE */
+ REQUIRE_FEN;
+ gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
+ break;
+ case 0x02E:
+ /* FCMOVLE */
+ REQUIRE_FEN;
+ gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
+ break;
+ case 0x02F:
+ /* FCMOVGT */
+ REQUIRE_FEN;
+ gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
+ break;
+ case 0x030: /* CVTQL */
+ case 0x130: /* CVTQL/V */
+ case 0x530: /* CVTQL/SV */
+ REQUIRE_REG_31(ra);
+ REQUIRE_FEN;
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_helper_cvtql(vc, cpu_env, vb);
+ gen_fp_exc_raise(rc, fn11);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x18:
+ switch ((uint16_t)disp16) {
+ case 0x0000:
+ /* TRAPB */
+ /* No-op. */
+ break;
+ case 0x0400:
+ /* EXCB */
+ /* No-op. */
+ break;
+ case 0x4000:
+ /* MB */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ break;
+ case 0x4400:
+ /* WMB */
+ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
+ break;
+ case 0x8000:
+ /* FETCH */
+ /* No-op */
+ break;
+ case 0xA000:
+ /* FETCH_M */
+ /* No-op */
+ break;
+ case 0xC000:
+ /* RPCC */
+ va = dest_gpr(ctx, ra);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ gen_helper_load_pcc(va, cpu_env);
+ ret = DISAS_PC_STALE;
+ } else {
+ gen_helper_load_pcc(va, cpu_env);
+ }
+ break;
+ case 0xE000:
+ /* RC */
+ gen_rx(ctx, ra, 0);
+ break;
+ case 0xE800:
+ /* ECB */
+ break;
+ case 0xF000:
+ /* RS */
+ gen_rx(ctx, ra, 1);
+ break;
+ case 0xF800:
+ /* WH64 */
+ /* No-op */
+ break;
+ case 0xFC00:
+ /* WH64EN */
+ /* No-op */
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x19:
+ /* HW_MFPR (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
+ va = dest_gpr(ctx, ra);
+ ret = gen_mfpr(ctx, va, insn & 0xffff);
+ break;
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1A:
+ /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
+ prediction stack action, which of course we don't implement. */
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(cpu_pc, vb, ~3);
+ if (ra != 31) {
+ tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
+ }
+ ret = DISAS_PC_UPDATED;
+ break;
+
+ case 0x1B:
+ /* HW_LD (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
+ {
+ TCGv addr = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ va = dest_gpr(ctx, ra);
+
+ tcg_gen_addi_i64(addr, vb, disp12);
+ switch ((insn >> 12) & 0xF) {
+ case 0x0:
+ /* Longword physical access (hw_ldl/p) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ break;
+ case 0x1:
+ /* Quadword physical access (hw_ldq/p) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ break;
+ case 0x2:
+ /* Longword physical access with lock (hw_ldl_l/p) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ tcg_gen_mov_i64(cpu_lock_addr, addr);
+ tcg_gen_mov_i64(cpu_lock_value, va);
+ break;
+ case 0x3:
+ /* Quadword physical access with lock (hw_ldq_l/p) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_mov_i64(cpu_lock_addr, addr);
+ tcg_gen_mov_i64(cpu_lock_value, va);
+ break;
+ case 0x4:
+ /* Longword virtual PTE fetch (hw_ldl/v) */
+ goto invalid_opc;
+ case 0x5:
+ /* Quadword virtual PTE fetch (hw_ldq/v) */
+ goto invalid_opc;
+ break;
+ case 0x6:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x7:
+ /* Invaliid */
+ goto invalid_opc;
+ case 0x8:
+ /* Longword virtual access (hw_ldl) */
+ goto invalid_opc;
+ case 0x9:
+ /* Quadword virtual access (hw_ldq) */
+ goto invalid_opc;
+ case 0xA:
+ /* Longword virtual access with protection check (hw_ldl/w) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
+ break;
+ case 0xB:
+ /* Quadword virtual access with protection check (hw_ldq/w) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
+ break;
+ case 0xC:
+ /* Longword virtual access with alt access mode (hw_ldl/a)*/
+ goto invalid_opc;
+ case 0xD:
+ /* Quadword virtual access with alt access mode (hw_ldq/a) */
+ goto invalid_opc;
+ case 0xE:
+ /* Longword virtual access with alternate access mode and
+ protection checks (hw_ldl/wa) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
+ break;
+ case 0xF:
+ /* Quadword virtual access with alternate access mode and
+ protection checks (hw_ldq/wa) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
+ break;
+ }
+ tcg_temp_free(addr);
+ break;
+ }
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1C:
+ vc = dest_gpr(ctx, rc);
+ if (fn7 == 0x70) {
+ /* FTOIT */
+ REQUIRE_AMASK(FIX);
+ REQUIRE_REG_31(rb);
+ va = load_fpr(ctx, ra);
+ tcg_gen_mov_i64(vc, va);
+ break;
+ } else if (fn7 == 0x78) {
+ /* FTOIS */
+ REQUIRE_AMASK(FIX);
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_fpr(ctx, ra);
+ gen_helper_s_to_memory(t32, va);
+ tcg_gen_ext_i32_i64(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ }
+
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+ switch (fn7) {
+ case 0x00:
+ /* SEXTB */
+ REQUIRE_AMASK(BWX);
+ REQUIRE_REG_31(ra);
+ tcg_gen_ext8s_i64(vc, vb);
+ break;
+ case 0x01:
+ /* SEXTW */
+ REQUIRE_AMASK(BWX);
+ REQUIRE_REG_31(ra);
+ tcg_gen_ext16s_i64(vc, vb);
+ break;
+ case 0x30:
+ /* CTPOP */
+ REQUIRE_AMASK(CIX);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ tcg_gen_ctpop_i64(vc, vb);
+ break;
+ case 0x31:
+ /* PERR */
+ REQUIRE_AMASK(MVI);
+ REQUIRE_NO_LIT;
+ va = load_gpr(ctx, ra);
+ gen_helper_perr(vc, va, vb);
+ break;
+ case 0x32:
+ /* CTLZ */
+ REQUIRE_AMASK(CIX);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ tcg_gen_clzi_i64(vc, vb, 64);
+ break;
+ case 0x33:
+ /* CTTZ */
+ REQUIRE_AMASK(CIX);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ tcg_gen_ctzi_i64(vc, vb, 64);
+ break;
+ case 0x34:
+ /* UNPKBW */
+ REQUIRE_AMASK(MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_unpkbw(vc, vb);
+ break;
+ case 0x35:
+ /* UNPKBL */
+ REQUIRE_AMASK(MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_unpkbl(vc, vb);
+ break;
+ case 0x36:
+ /* PKWB */
+ REQUIRE_AMASK(MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_pkwb(vc, vb);
+ break;
+ case 0x37:
+ /* PKLB */
+ REQUIRE_AMASK(MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_pklb(vc, vb);
+ break;
+ case 0x38:
+ /* MINSB8 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minsb8(vc, va, vb);
+ break;
+ case 0x39:
+ /* MINSW4 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minsw4(vc, va, vb);
+ break;
+ case 0x3A:
+ /* MINUB8 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minub8(vc, va, vb);
+ break;
+ case 0x3B:
+ /* MINUW4 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minuw4(vc, va, vb);
+ break;
+ case 0x3C:
+ /* MAXUB8 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxub8(vc, va, vb);
+ break;
+ case 0x3D:
+ /* MAXUW4 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxuw4(vc, va, vb);
+ break;
+ case 0x3E:
+ /* MAXSB8 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxsb8(vc, va, vb);
+ break;
+ case 0x3F:
+ /* MAXSW4 */
+ REQUIRE_AMASK(MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxsw4(vc, va, vb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x1D:
+ /* HW_MTPR (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
+ vb = load_gpr(ctx, rb);
+ ret = gen_mtpr(ctx, vb, insn & 0xffff);
+ break;
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1E:
+ /* HW_RET (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
+ if (rb == 31) {
+ /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
+ address from EXC_ADDR. This turns out to be useful for our
+ emulation PALcode, so continue to accept it. */
+ vb = dest_sink(ctx);
+ tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ } else {
+ vb = load_gpr(ctx, rb);
+ }
+ tcg_gen_movi_i64(cpu_lock_addr, -1);
+ st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, vb, 1);
+ st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
+ tcg_temp_free(tmp);
+ tcg_gen_andi_i64(cpu_pc, vb, ~3);
+ /* Allow interrupts to be recognized right away. */
+ ret = DISAS_PC_UPDATED_NOCHAIN;
+ break;
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1F:
+ /* HW_ST (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
+ {
+ switch ((insn >> 12) & 0xF) {
+ case 0x0:
+ /* Longword physical access */
+ va = load_gpr(ctx, ra);
+ vb = load_gpr(ctx, rb);
+ tmp = tcg_temp_new();
+ tcg_gen_addi_i64(tmp, vb, disp12);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
+ tcg_temp_free(tmp);
+ break;
+ case 0x1:
+ /* Quadword physical access */
+ va = load_gpr(ctx, ra);
+ vb = load_gpr(ctx, rb);
+ tmp = tcg_temp_new();
+ tcg_gen_addi_i64(tmp, vb, disp12);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
+ tcg_temp_free(tmp);
+ break;
+ case 0x2:
+ /* Longword physical access with lock */
+ ret = gen_store_conditional(ctx, ra, rb, disp12,
+ MMU_PHYS_IDX, MO_LESL);
+ break;
+ case 0x3:
+ /* Quadword physical access with lock */
+ ret = gen_store_conditional(ctx, ra, rb, disp12,
+ MMU_PHYS_IDX, MO_LEQ);
+ break;
+ case 0x4:
+ /* Longword virtual access */
+ goto invalid_opc;
+ case 0x5:
+ /* Quadword virtual access */
+ goto invalid_opc;
+ case 0x6:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x7:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x8:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x9:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xA:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xB:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xC:
+ /* Longword virtual access with alternate access mode */
+ goto invalid_opc;
+ case 0xD:
+ /* Quadword virtual access with alternate access mode */
+ goto invalid_opc;
+ case 0xE:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xF:
+ /* Invalid */
+ goto invalid_opc;
+ }
+ break;
+ }
+#else
+ goto invalid_opc;
+#endif
+ case 0x20:
+ /* LDF */
+ REQUIRE_FEN;
+ gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
+ break;
+ case 0x21:
+ /* LDG */
+ REQUIRE_FEN;
+ gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
+ break;
+ case 0x22:
+ /* LDS */
+ REQUIRE_FEN;
+ gen_load_fp(ctx, ra, rb, disp16, gen_lds);
+ break;
+ case 0x23:
+ /* LDT */
+ REQUIRE_FEN;
+ gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
+ break;
+ case 0x24:
+ /* STF */
+ REQUIRE_FEN;
+ gen_store_fp(ctx, ra, rb, disp16, gen_stf);
+ break;
+ case 0x25:
+ /* STG */
+ REQUIRE_FEN;
+ gen_store_fp(ctx, ra, rb, disp16, gen_stg);
+ break;
+ case 0x26:
+ /* STS */
+ REQUIRE_FEN;
+ gen_store_fp(ctx, ra, rb, disp16, gen_sts);
+ break;
+ case 0x27:
+ /* STT */
+ REQUIRE_FEN;
+ gen_store_fp(ctx, ra, rb, disp16, gen_stt);
+ break;
+ case 0x28:
+ /* LDL */
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
+ break;
+ case 0x29:
+ /* LDQ */
+ gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0);
+ break;
+ case 0x2A:
+ /* LDL_L */
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
+ break;
+ case 0x2B:
+ /* LDQ_L */
+ gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1);
+ break;
+ case 0x2C:
+ /* STL */
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
+ break;
+ case 0x2D:
+ /* STQ */
+ gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0);
+ break;
+ case 0x2E:
+ /* STL_C */
+ ret = gen_store_conditional(ctx, ra, rb, disp16,
+ ctx->mem_idx, MO_LESL);
+ break;
+ case 0x2F:
+ /* STQ_C */
+ ret = gen_store_conditional(ctx, ra, rb, disp16,
+ ctx->mem_idx, MO_LEQ);
+ break;
+ case 0x30:
+ /* BR */
+ ret = gen_bdirect(ctx, ra, disp21);
+ break;
+ case 0x31: /* FBEQ */
+ REQUIRE_FEN;
+ ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
+ break;
+ case 0x32: /* FBLT */
+ REQUIRE_FEN;
+ ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
+ break;
+ case 0x33: /* FBLE */
+ REQUIRE_FEN;
+ ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
+ break;
+ case 0x34:
+ /* BSR */
+ ret = gen_bdirect(ctx, ra, disp21);
+ break;
+ case 0x35: /* FBNE */
+ REQUIRE_FEN;
+ ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
+ break;
+ case 0x36: /* FBGE */
+ REQUIRE_FEN;
+ ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
+ break;
+ case 0x37: /* FBGT */
+ REQUIRE_FEN;
+ ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
+ break;
+ case 0x38:
+ /* BLBC */
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
+ break;
+ case 0x39:
+ /* BEQ */
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
+ break;
+ case 0x3A:
+ /* BLT */
+ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
+ break;
+ case 0x3B:
+ /* BLE */
+ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
+ break;
+ case 0x3C:
+ /* BLBS */
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
+ break;
+ case 0x3D:
+ /* BNE */
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
+ break;
+ case 0x3E:
+ /* BGE */
+ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
+ break;
+ case 0x3F:
+ /* BGT */
+ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
+ break;
+ invalid_opc:
+ ret = gen_invalid(ctx);
+ break;
+ raise_fen:
+ ret = gen_excp(ctx, EXCP_FEN, 0);
+ break;
+ }
+
+ return ret;
+}
+
+static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUAlphaState *env = cpu->env_ptr;
+ int64_t bound;
+
+ ctx->tbflags = ctx->base.tb->flags;
+ ctx->mem_idx = cpu_mmu_index(env, false);
+ ctx->implver = env->implver;
+ ctx->amask = env->amask;
+
+#ifdef CONFIG_USER_ONLY
+ ctx->ir = cpu_std_ir;
+#else
+ ctx->palbr = env->palbr;
+ ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
+#endif
+
+ /* ??? Every TB begins with unset rounding mode, to be initialized on
+ the first fp insn of the TB. Alternately we could define a proper
+ default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
+ to reset the FP_STATUS to that default at the end of any TB that
+ changes the default. We could even (gasp) dynamiclly figure out
+ what default would be most efficient given the running program. */
+ ctx->tb_rm = -1;
+ /* Similarly for flush-to-zero. */
+ ctx->tb_ftz = -1;
+
+ ctx->zero = NULL;
+ ctx->sink = NULL;
+
+ /* Bound the number of insns to execute to those left on the page. */
+ bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
+ ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
+}
+
+static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ tcg_gen_insn_start(dcbase->pc_next);
+}
+
+static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUAlphaState *env = cpu->env_ptr;
+ uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
+
+ ctx->base.pc_next += 4;
+ ctx->base.is_jmp = translate_one(ctx, insn);
+
+ free_context_temps(ctx);
+ translator_loop_temp_check(&ctx->base);
+}
+
+static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_NORETURN:
+ break;
+ case DISAS_TOO_MANY:
+ if (use_goto_tb(ctx, ctx->base.pc_next)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+ }
+ /* FALLTHRU */
+ case DISAS_PC_STALE:
+ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
+ /* FALLTHRU */
+ case DISAS_PC_UPDATED:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_PC_UPDATED_NOCHAIN:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps alpha_tr_ops = {
+ .init_disas_context = alpha_tr_init_disas_context,
+ .tb_start = alpha_tr_tb_start,
+ .insn_start = alpha_tr_insn_start,
+ .translate_insn = alpha_tr_translate_insn,
+ .tb_stop = alpha_tr_tb_stop,
+ .disas_log = alpha_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+ translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/alpha/vax_helper.c b/target/alpha/vax_helper.c
new file mode 100644
index 000000000..f94fb519d
--- /dev/null
+++ b/target/alpha/vax_helper.c
@@ -0,0 +1,355 @@
+/*
+ * Helpers for vax floating point instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define FP_STATUS (env->fp_status)
+
+
+/* F floating (VAX) */
+static uint64_t float32_to_f(float32 fa)
+{
+ uint64_t r, exp, mant, sig;
+ CPU_FloatU a;
+
+ a.f = fa;
+ sig = ((uint64_t)a.l & 0x80000000) << 32;
+ exp = (a.l >> 23) & 0xff;
+ mant = ((uint64_t)a.l & 0x007fffff) << 29;
+
+ if (exp == 255) {
+ /* NaN or infinity */
+ r = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ r = 0;
+ } else {
+ /* Denormalized */
+ r = sig | ((exp + 1) << 52) | mant;
+ }
+ } else {
+ if (exp >= 253) {
+ /* Overflow */
+ r = 1; /* VAX dirty zero */
+ } else {
+ r = sig | ((exp + 2) << 52);
+ }
+ }
+
+ return r;
+}
+
+static float32 f_to_float32(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
+{
+ uint32_t exp, mant_sig;
+ CPU_FloatU r;
+
+ exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
+ mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
+
+ if (unlikely(!exp && mant_sig)) {
+ /* Reserved operands / Dirty zero */
+ dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
+ }
+
+ if (exp < 3) {
+ /* Underflow */
+ r.l = 0;
+ } else {
+ r.l = ((exp - 2) << 23) | mant_sig;
+ }
+
+ return r.f;
+}
+
+uint32_t helper_f_to_memory(uint64_t a)
+{
+ uint32_t r;
+ r = (a & 0x00001fffe0000000ull) >> 13;
+ r |= (a & 0x07ffe00000000000ull) >> 45;
+ r |= (a & 0xc000000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_memory_to_f(uint32_t a)
+{
+ uint64_t r;
+ r = ((uint64_t)(a & 0x0000c000)) << 48;
+ r |= ((uint64_t)(a & 0x003fffff)) << 45;
+ r |= ((uint64_t)(a & 0xffff0000)) << 13;
+ if (!(a & 0x00004000)) {
+ r |= 0x7ll << 59;
+ }
+ return r;
+}
+
+/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
+ either implement VAX arithmetic properly or just signal invalid opcode. */
+
+uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_add(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_div(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t)
+{
+ float32 ft, fr;
+
+ ft = f_to_float32(env, GETPC(), t);
+ fr = float32_sqrt(ft, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+
+/* G floating (VAX) */
+static uint64_t float64_to_g(float64 fa)
+{
+ uint64_t r, exp, mant, sig;
+ CPU_DoubleU a;
+
+ a.d = fa;
+ sig = a.ll & 0x8000000000000000ull;
+ exp = (a.ll >> 52) & 0x7ff;
+ mant = a.ll & 0x000fffffffffffffull;
+
+ if (exp == 2047) {
+ /* NaN or infinity */
+ r = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ r = 0;
+ } else {
+ /* Denormalized */
+ r = sig | ((exp + 1) << 52) | mant;
+ }
+ } else {
+ if (exp >= 2045) {
+ /* Overflow */
+ r = 1; /* VAX dirty zero */
+ } else {
+ r = sig | ((exp + 2) << 52);
+ }
+ }
+
+ return r;
+}
+
+static float64 g_to_float64(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
+{
+ uint64_t exp, mant_sig;
+ CPU_DoubleU r;
+
+ exp = (a >> 52) & 0x7ff;
+ mant_sig = a & 0x800fffffffffffffull;
+
+ if (!exp && mant_sig) {
+ /* Reserved operands / Dirty zero */
+ dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
+ }
+
+ if (exp < 3) {
+ /* Underflow */
+ r.ll = 0;
+ } else {
+ r.ll = ((exp - 2) << 52) | mant_sig;
+ }
+
+ return r.d;
+}
+
+uint64_t helper_g_to_memory(uint64_t a)
+{
+ uint64_t r;
+ r = (a & 0x000000000000ffffull) << 48;
+ r |= (a & 0x00000000ffff0000ull) << 16;
+ r |= (a & 0x0000ffff00000000ull) >> 16;
+ r |= (a & 0xffff000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_memory_to_g(uint64_t a)
+{
+ uint64_t r;
+ r = (a & 0x000000000000ffffull) << 48;
+ r |= (a & 0x00000000ffff0000ull) << 16;
+ r |= (a & 0x0000ffff00000000ull) >> 16;
+ r |= (a & 0xffff000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_add(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_div(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fr = float64_sqrt(fa, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_le(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_lt(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa;
+ float32 fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fr = float64_to_float32(fa, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa = g_to_float64(env, GETPC(), a);
+ return float64_to_int64_round_to_zero(fa, &FP_STATUS);
+}
+
+uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
+{
+ float64 fr;
+ fr = int64_to_float64(a, &FP_STATUS);
+ return float64_to_g(fr);
+}
diff --git a/target/arm/Kconfig b/target/arm/Kconfig
new file mode 100644
index 000000000..3f3394a22
--- /dev/null
+++ b/target/arm/Kconfig
@@ -0,0 +1,6 @@
+config ARM
+ bool
+
+config AARCH64
+ bool
+ select ARM
diff --git a/target/arm/a32-uncond.decode b/target/arm/a32-uncond.decode
new file mode 100644
index 000000000..2339de2e9
--- /dev/null
+++ b/target/arm/a32-uncond.decode
@@ -0,0 +1,74 @@
+# A32 unconditional instructions
+#
+# Copyright (c) 2019 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+# All insns that have 0xf in insn[31:28] are decoded here.
+# All of those that have a COND field in insn[31:28] are in a32.decode
+#
+
+&empty !extern
+&i !extern imm
+&setend E
+
+# Branch with Link and Exchange
+
+%imm24h 0:s24 24:1 !function=times_2
+
+BLX_i 1111 101 . ........................ &i imm=%imm24h
+
+# System Instructions
+
+&rfe rn w pu
+&srs mode w pu
+&cps mode imod M A I F
+
+RFE 1111 100 pu:2 0 w:1 1 rn:4 0000 1010 0000 0000 &rfe
+SRS 1111 100 pu:2 1 w:1 0 1101 0000 0101 000 mode:5 &srs
+CPS 1111 0001 0000 imod:2 M:1 0 0000 000 A:1 I:1 F:1 0 mode:5 \
+ &cps
+
+# Clear-Exclusive, Barriers
+
+# QEMU does not require the option field for the barriers.
+CLREX 1111 0101 0111 1111 1111 0000 0001 1111
+DSB 1111 0101 0111 1111 1111 0000 0100 ----
+DMB 1111 0101 0111 1111 1111 0000 0101 ----
+ISB 1111 0101 0111 1111 1111 0000 0110 ----
+SB 1111 0101 0111 1111 1111 0000 0111 0000
+
+# Set Endianness
+SETEND 1111 0001 0000 0001 0000 00 E:1 0 0000 0000 &setend
+
+# Preload instructions
+
+PLD 1111 0101 -101 ---- 1111 ---- ---- ---- # (imm, lit) 5te
+PLDW 1111 0101 -001 ---- 1111 ---- ---- ---- # (imm, lit) 7mp
+PLI 1111 0100 -101 ---- 1111 ---- ---- ---- # (imm, lit) 7
+
+PLD 1111 0111 -101 ---- 1111 ----- -- 0 ---- # (register) 5te
+PLDW 1111 0111 -001 ---- 1111 ----- -- 0 ---- # (register) 7mp
+PLI 1111 0110 -101 ---- 1111 ----- -- 0 ---- # (register) 7
+
+# Unallocated memory hints
+#
+# Since these are v7MP nops, and PLDW is v7MP and implemented as nop,
+# (ab)use the PLDW helper.
+
+PLDW 1111 0100 -001 ---- ---- ---- ---- ----
+PLDW 1111 0110 -001 ---- ---- ---- ---0 ----
diff --git a/target/arm/a32.decode b/target/arm/a32.decode
new file mode 100644
index 000000000..fcd8cd4f7
--- /dev/null
+++ b/target/arm/a32.decode
@@ -0,0 +1,553 @@
+# A32 conditional instructions
+#
+# Copyright (c) 2019 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+# All of the insn that have a COND field in insn[31:28] are here.
+# All insns that have 0xf in insn[31:28] are in a32-uncond.decode.
+#
+
+&empty
+&s_rrr_shi s rd rn rm shim shty
+&s_rrr_shr s rn rd rm rs shty
+&s_rri_rot s rn rd imm rot
+&s_rrrr s rd rn rm ra
+&rrrr rd rn rm ra
+&rrr_rot rd rn rm rot
+&rrr rd rn rm
+&rr rd rm
+&ri rd imm
+&r rm
+&i imm
+&msr_reg rn r mask
+&mrs_reg rd r
+&msr_bank rn r sysm
+&mrs_bank rd r sysm
+&ldst_rr p w u rn rt rm shimm shtype
+&ldst_ri p w u rn rt imm
+&ldst_block rn i b u w list
+&strex rn rd rt rt2 imm
+&ldrex rn rt rt2 imm
+&bfx rd rn lsb widthm1
+&bfi rd rn lsb msb
+&sat rd rn satimm imm sh
+&pkh rd rn rm imm tb
+&mcr cp opc1 crn crm opc2 rt
+&mcrr cp opc1 crm rt rt2
+
+# Data-processing (register)
+
+@s_rrr_shi ---- ... .... s:1 rn:4 rd:4 shim:5 shty:2 . rm:4 \
+ &s_rrr_shi
+@s_rxr_shi ---- ... .... s:1 .... rd:4 shim:5 shty:2 . rm:4 \
+ &s_rrr_shi rn=0
+@S_xrr_shi ---- ... .... . rn:4 .... shim:5 shty:2 . rm:4 \
+ &s_rrr_shi s=1 rd=0
+
+AND_rrri .... 000 0000 . .... .... ..... .. 0 .... @s_rrr_shi
+EOR_rrri .... 000 0001 . .... .... ..... .. 0 .... @s_rrr_shi
+SUB_rrri .... 000 0010 . .... .... ..... .. 0 .... @s_rrr_shi
+RSB_rrri .... 000 0011 . .... .... ..... .. 0 .... @s_rrr_shi
+ADD_rrri .... 000 0100 . .... .... ..... .. 0 .... @s_rrr_shi
+ADC_rrri .... 000 0101 . .... .... ..... .. 0 .... @s_rrr_shi
+SBC_rrri .... 000 0110 . .... .... ..... .. 0 .... @s_rrr_shi
+RSC_rrri .... 000 0111 . .... .... ..... .. 0 .... @s_rrr_shi
+TST_xrri .... 000 1000 1 .... 0000 ..... .. 0 .... @S_xrr_shi
+TEQ_xrri .... 000 1001 1 .... 0000 ..... .. 0 .... @S_xrr_shi
+CMP_xrri .... 000 1010 1 .... 0000 ..... .. 0 .... @S_xrr_shi
+CMN_xrri .... 000 1011 1 .... 0000 ..... .. 0 .... @S_xrr_shi
+ORR_rrri .... 000 1100 . .... .... ..... .. 0 .... @s_rrr_shi
+MOV_rxri .... 000 1101 . 0000 .... ..... .. 0 .... @s_rxr_shi
+BIC_rrri .... 000 1110 . .... .... ..... .. 0 .... @s_rrr_shi
+MVN_rxri .... 000 1111 . 0000 .... ..... .. 0 .... @s_rxr_shi
+
+%imm16 16:4 0:12
+@mov16 ---- .... .... .... rd:4 ............ &ri imm=%imm16
+
+MOVW .... 0011 0000 .... .... ............ @mov16
+MOVT .... 0011 0100 .... .... ............ @mov16
+
+# Data-processing (register-shifted register)
+
+@s_rrr_shr ---- ... .... s:1 rn:4 rd:4 rs:4 . shty:2 . rm:4 \
+ &s_rrr_shr
+@s_rxr_shr ---- ... .... s:1 .... rd:4 rs:4 . shty:2 . rm:4 \
+ &s_rrr_shr rn=0
+@S_xrr_shr ---- ... .... . rn:4 .... rs:4 . shty:2 . rm:4 \
+ &s_rrr_shr rd=0 s=1
+
+AND_rrrr .... 000 0000 . .... .... .... 0 .. 1 .... @s_rrr_shr
+EOR_rrrr .... 000 0001 . .... .... .... 0 .. 1 .... @s_rrr_shr
+SUB_rrrr .... 000 0010 . .... .... .... 0 .. 1 .... @s_rrr_shr
+RSB_rrrr .... 000 0011 . .... .... .... 0 .. 1 .... @s_rrr_shr
+ADD_rrrr .... 000 0100 . .... .... .... 0 .. 1 .... @s_rrr_shr
+ADC_rrrr .... 000 0101 . .... .... .... 0 .. 1 .... @s_rrr_shr
+SBC_rrrr .... 000 0110 . .... .... .... 0 .. 1 .... @s_rrr_shr
+RSC_rrrr .... 000 0111 . .... .... .... 0 .. 1 .... @s_rrr_shr
+TST_xrrr .... 000 1000 1 .... 0000 .... 0 .. 1 .... @S_xrr_shr
+TEQ_xrrr .... 000 1001 1 .... 0000 .... 0 .. 1 .... @S_xrr_shr
+CMP_xrrr .... 000 1010 1 .... 0000 .... 0 .. 1 .... @S_xrr_shr
+CMN_xrrr .... 000 1011 1 .... 0000 .... 0 .. 1 .... @S_xrr_shr
+ORR_rrrr .... 000 1100 . .... .... .... 0 .. 1 .... @s_rrr_shr
+MOV_rxrr .... 000 1101 . 0000 .... .... 0 .. 1 .... @s_rxr_shr
+BIC_rrrr .... 000 1110 . .... .... .... 0 .. 1 .... @s_rrr_shr
+MVN_rxrr .... 000 1111 . 0000 .... .... 0 .. 1 .... @s_rxr_shr
+
+# Data-processing (immediate)
+
+%a32extrot 8:4 !function=times_2
+
+@s_rri_rot ---- ... .... s:1 rn:4 rd:4 .... imm:8 \
+ &s_rri_rot rot=%a32extrot
+@s_rxi_rot ---- ... .... s:1 .... rd:4 .... imm:8 \
+ &s_rri_rot rot=%a32extrot rn=0
+@S_xri_rot ---- ... .... . rn:4 .... .... imm:8 \
+ &s_rri_rot rot=%a32extrot rd=0 s=1
+
+AND_rri .... 001 0000 . .... .... ............ @s_rri_rot
+EOR_rri .... 001 0001 . .... .... ............ @s_rri_rot
+SUB_rri .... 001 0010 . .... .... ............ @s_rri_rot
+RSB_rri .... 001 0011 . .... .... ............ @s_rri_rot
+ADD_rri .... 001 0100 . .... .... ............ @s_rri_rot
+ADC_rri .... 001 0101 . .... .... ............ @s_rri_rot
+SBC_rri .... 001 0110 . .... .... ............ @s_rri_rot
+RSC_rri .... 001 0111 . .... .... ............ @s_rri_rot
+TST_xri .... 001 1000 1 .... 0000 ............ @S_xri_rot
+TEQ_xri .... 001 1001 1 .... 0000 ............ @S_xri_rot
+CMP_xri .... 001 1010 1 .... 0000 ............ @S_xri_rot
+CMN_xri .... 001 1011 1 .... 0000 ............ @S_xri_rot
+ORR_rri .... 001 1100 . .... .... ............ @s_rri_rot
+MOV_rxi .... 001 1101 . 0000 .... ............ @s_rxi_rot
+BIC_rri .... 001 1110 . .... .... ............ @s_rri_rot
+MVN_rxi .... 001 1111 . 0000 .... ............ @s_rxi_rot
+
+# Multiply and multiply accumulate
+
+@s_rdamn ---- .... ... s:1 rd:4 ra:4 rm:4 .... rn:4 &s_rrrr
+@s_rd0mn ---- .... ... s:1 rd:4 .... rm:4 .... rn:4 &s_rrrr ra=0
+@rdamn ---- .... ... . rd:4 ra:4 rm:4 .... rn:4 &rrrr
+@rd0mn ---- .... ... . rd:4 .... rm:4 .... rn:4 &rrrr ra=0
+
+MUL .... 0000 000 . .... 0000 .... 1001 .... @s_rd0mn
+MLA .... 0000 001 . .... .... .... 1001 .... @s_rdamn
+UMAAL .... 0000 010 0 .... .... .... 1001 .... @rdamn
+MLS .... 0000 011 0 .... .... .... 1001 .... @rdamn
+UMULL .... 0000 100 . .... .... .... 1001 .... @s_rdamn
+UMLAL .... 0000 101 . .... .... .... 1001 .... @s_rdamn
+SMULL .... 0000 110 . .... .... .... 1001 .... @s_rdamn
+SMLAL .... 0000 111 . .... .... .... 1001 .... @s_rdamn
+
+# Saturating addition and subtraction
+
+@rndm ---- .... .... rn:4 rd:4 .... .... rm:4 &rrr
+
+QADD .... 0001 0000 .... .... 0000 0101 .... @rndm
+QSUB .... 0001 0010 .... .... 0000 0101 .... @rndm
+QDADD .... 0001 0100 .... .... 0000 0101 .... @rndm
+QDSUB .... 0001 0110 .... .... 0000 0101 .... @rndm
+
+# Halfword multiply and multiply accumulate
+
+SMLABB .... 0001 0000 .... .... .... 1000 .... @rdamn
+SMLABT .... 0001 0000 .... .... .... 1100 .... @rdamn
+SMLATB .... 0001 0000 .... .... .... 1010 .... @rdamn
+SMLATT .... 0001 0000 .... .... .... 1110 .... @rdamn
+SMLAWB .... 0001 0010 .... .... .... 1000 .... @rdamn
+SMULWB .... 0001 0010 .... 0000 .... 1010 .... @rd0mn
+SMLAWT .... 0001 0010 .... .... .... 1100 .... @rdamn
+SMULWT .... 0001 0010 .... 0000 .... 1110 .... @rd0mn
+SMLALBB .... 0001 0100 .... .... .... 1000 .... @rdamn
+SMLALBT .... 0001 0100 .... .... .... 1100 .... @rdamn
+SMLALTB .... 0001 0100 .... .... .... 1010 .... @rdamn
+SMLALTT .... 0001 0100 .... .... .... 1110 .... @rdamn
+SMULBB .... 0001 0110 .... 0000 .... 1000 .... @rd0mn
+SMULBT .... 0001 0110 .... 0000 .... 1100 .... @rd0mn
+SMULTB .... 0001 0110 .... 0000 .... 1010 .... @rd0mn
+SMULTT .... 0001 0110 .... 0000 .... 1110 .... @rd0mn
+
+# MSR (immediate) and hints
+
+&msr_i r mask rot imm
+@msr_i ---- .... .... mask:4 .... rot:4 imm:8 &msr_i
+
+{
+ {
+ YIELD ---- 0011 0010 0000 1111 ---- 0000 0001
+ WFE ---- 0011 0010 0000 1111 ---- 0000 0010
+ WFI ---- 0011 0010 0000 1111 ---- 0000 0011
+
+ # TODO: Implement SEV, SEVL; may help SMP performance.
+ # SEV ---- 0011 0010 0000 1111 ---- 0000 0100
+ # SEVL ---- 0011 0010 0000 1111 ---- 0000 0101
+
+ # The canonical nop ends in 00000000, but the whole of the
+ # rest of the space executes as nop if otherwise unsupported.
+ NOP ---- 0011 0010 0000 1111 ---- ---- ----
+ }
+ # Note mask = 0 is covered by NOP
+ MSR_imm .... 0011 0010 .... 1111 .... .... .... @msr_i r=0
+}
+MSR_imm .... 0011 0110 .... 1111 .... .... .... @msr_i r=1
+
+# Cyclic Redundancy Check
+
+CRC32B .... 0001 0000 .... .... 0000 0100 .... @rndm
+CRC32H .... 0001 0010 .... .... 0000 0100 .... @rndm
+CRC32W .... 0001 0100 .... .... 0000 0100 .... @rndm
+CRC32CB .... 0001 0000 .... .... 0010 0100 .... @rndm
+CRC32CH .... 0001 0010 .... .... 0010 0100 .... @rndm
+CRC32CW .... 0001 0100 .... .... 0010 0100 .... @rndm
+
+# Miscellaneous instructions
+
+%sysm 8:1 16:4
+%imm16_8_0 8:12 0:4
+
+@rm ---- .... .... .... .... .... .... rm:4 &r
+@rdm ---- .... .... .... rd:4 .... .... rm:4 &rr
+@i16 ---- .... .... .... .... .... .... .... &i imm=%imm16_8_0
+
+MRS_bank ---- 0001 0 r:1 00 .... rd:4 001. 0000 0000 &mrs_bank %sysm
+MSR_bank ---- 0001 0 r:1 10 .... 1111 001. 0000 rn:4 &msr_bank %sysm
+
+MRS_reg ---- 0001 0 r:1 00 1111 rd:4 0000 0000 0000 &mrs_reg
+MSR_reg ---- 0001 0 r:1 10 mask:4 1111 0000 0000 rn:4 &msr_reg
+
+BX .... 0001 0010 1111 1111 1111 0001 .... @rm
+BXJ .... 0001 0010 1111 1111 1111 0010 .... @rm
+BLX_r .... 0001 0010 1111 1111 1111 0011 .... @rm
+
+CLZ .... 0001 0110 1111 .... 1111 0001 .... @rdm
+
+ERET ---- 0001 0110 0000 0000 0000 0110 1110
+
+HLT .... 0001 0000 .... .... .... 0111 .... @i16
+BKPT .... 0001 0010 .... .... .... 0111 .... @i16
+HVC .... 0001 0100 .... .... .... 0111 .... @i16
+SMC ---- 0001 0110 0000 0000 0000 0111 imm:4 &i
+
+# Load/Store Dual, Half, Signed Byte (register)
+
+@ldst_rr_p1w ---- ...1 u:1 . w:1 . rn:4 rt:4 .... .... rm:4 \
+ &ldst_rr p=1 shimm=0 shtype=0
+@ldst_rr_pw0 ---- ...0 u:1 . 0 . rn:4 rt:4 .... .... rm:4 \
+ &ldst_rr p=0 w=0 shimm=0 shtype=0
+
+STRH_rr .... 000. .0.0 .... .... 0000 1011 .... @ldst_rr_pw0
+STRH_rr .... 000. .0.0 .... .... 0000 1011 .... @ldst_rr_p1w
+
+LDRD_rr .... 000. .0.0 .... .... 0000 1101 .... @ldst_rr_pw0
+LDRD_rr .... 000. .0.0 .... .... 0000 1101 .... @ldst_rr_p1w
+
+STRD_rr .... 000. .0.0 .... .... 0000 1111 .... @ldst_rr_pw0
+STRD_rr .... 000. .0.0 .... .... 0000 1111 .... @ldst_rr_p1w
+
+LDRH_rr .... 000. .0.1 .... .... 0000 1011 .... @ldst_rr_pw0
+LDRH_rr .... 000. .0.1 .... .... 0000 1011 .... @ldst_rr_p1w
+
+LDRSB_rr .... 000. .0.1 .... .... 0000 1101 .... @ldst_rr_pw0
+LDRSB_rr .... 000. .0.1 .... .... 0000 1101 .... @ldst_rr_p1w
+
+LDRSH_rr .... 000. .0.1 .... .... 0000 1111 .... @ldst_rr_pw0
+LDRSH_rr .... 000. .0.1 .... .... 0000 1111 .... @ldst_rr_p1w
+
+# Note the unpriv load/stores use the previously invalid P=0, W=1 encoding,
+# and act as normal post-indexed (P=0, W=0).
+@ldst_rr_p0w1 ---- ...0 u:1 . 1 . rn:4 rt:4 .... .... rm:4 \
+ &ldst_rr p=0 w=0 shimm=0 shtype=0
+
+STRHT_rr .... 000. .0.0 .... .... 0000 1011 .... @ldst_rr_p0w1
+LDRHT_rr .... 000. .0.1 .... .... 0000 1011 .... @ldst_rr_p0w1
+LDRSBT_rr .... 000. .0.1 .... .... 0000 1101 .... @ldst_rr_p0w1
+LDRSHT_rr .... 000. .0.1 .... .... 0000 1111 .... @ldst_rr_p0w1
+
+# Load/Store word and unsigned byte (register)
+
+@ldst_rs_p1w ---- ...1 u:1 . w:1 . rn:4 rt:4 shimm:5 shtype:2 . rm:4 \
+ &ldst_rr p=1
+@ldst_rs_pw0 ---- ...0 u:1 . 0 . rn:4 rt:4 shimm:5 shtype:2 . rm:4 \
+ &ldst_rr p=0 w=0
+
+STR_rr .... 011. .0.0 .... .... .... ...0 .... @ldst_rs_pw0
+STR_rr .... 011. .0.0 .... .... .... ...0 .... @ldst_rs_p1w
+STRB_rr .... 011. .1.0 .... .... .... ...0 .... @ldst_rs_pw0
+STRB_rr .... 011. .1.0 .... .... .... ...0 .... @ldst_rs_p1w
+
+LDR_rr .... 011. .0.1 .... .... .... ...0 .... @ldst_rs_pw0
+LDR_rr .... 011. .0.1 .... .... .... ...0 .... @ldst_rs_p1w
+LDRB_rr .... 011. .1.1 .... .... .... ...0 .... @ldst_rs_pw0
+LDRB_rr .... 011. .1.1 .... .... .... ...0 .... @ldst_rs_p1w
+
+@ldst_rs_p0w1 ---- ...0 u:1 . 1 . rn:4 rt:4 shimm:5 shtype:2 . rm:4 \
+ &ldst_rr p=0 w=0
+
+STRT_rr .... 011. .0.0 .... .... .... ...0 .... @ldst_rs_p0w1
+STRBT_rr .... 011. .1.0 .... .... .... ...0 .... @ldst_rs_p0w1
+LDRT_rr .... 011. .0.1 .... .... .... ...0 .... @ldst_rs_p0w1
+LDRBT_rr .... 011. .1.1 .... .... .... ...0 .... @ldst_rs_p0w1
+
+# Load/Store Dual, Half, Signed Byte (immediate)
+
+%imm8s_8_0 8:4 0:4
+@ldst_ri8_p1w ---- ...1 u:1 . w:1 . rn:4 rt:4 .... .... .... \
+ &ldst_ri imm=%imm8s_8_0 p=1
+@ldst_ri8_pw0 ---- ...0 u:1 . 0 . rn:4 rt:4 .... .... .... \
+ &ldst_ri imm=%imm8s_8_0 p=0 w=0
+
+STRH_ri .... 000. .1.0 .... .... .... 1011 .... @ldst_ri8_pw0
+STRH_ri .... 000. .1.0 .... .... .... 1011 .... @ldst_ri8_p1w
+
+LDRD_ri_a32 .... 000. .1.0 .... .... .... 1101 .... @ldst_ri8_pw0
+LDRD_ri_a32 .... 000. .1.0 .... .... .... 1101 .... @ldst_ri8_p1w
+
+STRD_ri_a32 .... 000. .1.0 .... .... .... 1111 .... @ldst_ri8_pw0
+STRD_ri_a32 .... 000. .1.0 .... .... .... 1111 .... @ldst_ri8_p1w
+
+LDRH_ri .... 000. .1.1 .... .... .... 1011 .... @ldst_ri8_pw0
+LDRH_ri .... 000. .1.1 .... .... .... 1011 .... @ldst_ri8_p1w
+
+LDRSB_ri .... 000. .1.1 .... .... .... 1101 .... @ldst_ri8_pw0
+LDRSB_ri .... 000. .1.1 .... .... .... 1101 .... @ldst_ri8_p1w
+
+LDRSH_ri .... 000. .1.1 .... .... .... 1111 .... @ldst_ri8_pw0
+LDRSH_ri .... 000. .1.1 .... .... .... 1111 .... @ldst_ri8_p1w
+
+# Note the unpriv load/stores use the previously invalid P=0, W=1 encoding,
+# and act as normal post-indexed (P=0, W=0).
+@ldst_ri8_p0w1 ---- ...0 u:1 . 1 . rn:4 rt:4 .... .... .... \
+ &ldst_ri imm=%imm8s_8_0 p=0 w=0
+
+STRHT_ri .... 000. .1.0 .... .... .... 1011 .... @ldst_ri8_p0w1
+LDRHT_ri .... 000. .1.1 .... .... .... 1011 .... @ldst_ri8_p0w1
+LDRSBT_ri .... 000. .1.1 .... .... .... 1101 .... @ldst_ri8_p0w1
+LDRSHT_ri .... 000. .1.1 .... .... .... 1111 .... @ldst_ri8_p0w1
+
+# Load/Store word and unsigned byte (immediate)
+
+@ldst_ri12_p1w ---- ...1 u:1 . w:1 . rn:4 rt:4 imm:12 &ldst_ri p=1
+@ldst_ri12_pw0 ---- ...0 u:1 . 0 . rn:4 rt:4 imm:12 &ldst_ri p=0 w=0
+
+STR_ri .... 010. .0.0 .... .... ............ @ldst_ri12_p1w
+STR_ri .... 010. .0.0 .... .... ............ @ldst_ri12_pw0
+STRB_ri .... 010. .1.0 .... .... ............ @ldst_ri12_p1w
+STRB_ri .... 010. .1.0 .... .... ............ @ldst_ri12_pw0
+
+LDR_ri .... 010. .0.1 .... .... ............ @ldst_ri12_p1w
+LDR_ri .... 010. .0.1 .... .... ............ @ldst_ri12_pw0
+LDRB_ri .... 010. .1.1 .... .... ............ @ldst_ri12_p1w
+LDRB_ri .... 010. .1.1 .... .... ............ @ldst_ri12_pw0
+
+@ldst_ri12_p0w1 ---- ...0 u:1 . 1 . rn:4 rt:4 imm:12 &ldst_ri p=0 w=0
+
+STRT_ri .... 010. .0.0 .... .... ............ @ldst_ri12_p0w1
+STRBT_ri .... 010. .1.0 .... .... ............ @ldst_ri12_p0w1
+LDRT_ri .... 010. .0.1 .... .... ............ @ldst_ri12_p0w1
+LDRBT_ri .... 010. .1.1 .... .... ............ @ldst_ri12_p0w1
+
+# Synchronization primitives
+
+@swp ---- .... .... rn:4 rt:4 .... .... rt2:4
+
+SWP .... 0001 0000 .... .... 0000 1001 .... @swp
+SWPB .... 0001 0100 .... .... 0000 1001 .... @swp
+
+# Load/Store Exclusive and Load-Acquire/Store-Release
+#
+# Note rt2 for STREXD/LDREXD is set by the helper after checking rt is even.
+
+@strex ---- .... .... rn:4 rd:4 .... .... rt:4 \
+ &strex imm=0 rt2=15
+@ldrex ---- .... .... rn:4 rt:4 .... .... .... \
+ &ldrex imm=0 rt2=15
+@stl ---- .... .... rn:4 .... .... .... rt:4 \
+ &ldrex imm=0 rt2=15
+
+STREX .... 0001 1000 .... .... 1111 1001 .... @strex
+STREXD_a32 .... 0001 1010 .... .... 1111 1001 .... @strex
+STREXB .... 0001 1100 .... .... 1111 1001 .... @strex
+STREXH .... 0001 1110 .... .... 1111 1001 .... @strex
+
+STLEX .... 0001 1000 .... .... 1110 1001 .... @strex
+STLEXD_a32 .... 0001 1010 .... .... 1110 1001 .... @strex
+STLEXB .... 0001 1100 .... .... 1110 1001 .... @strex
+STLEXH .... 0001 1110 .... .... 1110 1001 .... @strex
+
+STL .... 0001 1000 .... 1111 1100 1001 .... @stl
+STLB .... 0001 1100 .... 1111 1100 1001 .... @stl
+STLH .... 0001 1110 .... 1111 1100 1001 .... @stl
+
+LDREX .... 0001 1001 .... .... 1111 1001 1111 @ldrex
+LDREXD_a32 .... 0001 1011 .... .... 1111 1001 1111 @ldrex
+LDREXB .... 0001 1101 .... .... 1111 1001 1111 @ldrex
+LDREXH .... 0001 1111 .... .... 1111 1001 1111 @ldrex
+
+LDAEX .... 0001 1001 .... .... 1110 1001 1111 @ldrex
+LDAEXD_a32 .... 0001 1011 .... .... 1110 1001 1111 @ldrex
+LDAEXB .... 0001 1101 .... .... 1110 1001 1111 @ldrex
+LDAEXH .... 0001 1111 .... .... 1110 1001 1111 @ldrex
+
+LDA .... 0001 1001 .... .... 1100 1001 1111 @ldrex
+LDAB .... 0001 1101 .... .... 1100 1001 1111 @ldrex
+LDAH .... 0001 1111 .... .... 1100 1001 1111 @ldrex
+
+# Media instructions
+
+# usad8 is usada8 w/ ra=15
+USADA8 ---- 0111 1000 rd:4 ra:4 rm:4 0001 rn:4
+
+# ubfx and sbfx
+@bfx ---- .... ... widthm1:5 rd:4 lsb:5 ... rn:4 &bfx
+
+SBFX .... 0111 101 ..... .... ..... 101 .... @bfx
+UBFX .... 0111 111 ..... .... ..... 101 .... @bfx
+
+# bfc is bfi w/ rn=15
+BFCI ---- 0111 110 msb:5 rd:4 lsb:5 001 rn:4 &bfi
+
+# While we could get UDEF by not including this, add the pattern for
+# documentation and to conflict with any other typos in this file.
+UDF 1110 0111 1111 ---- ---- ---- 1111 ----
+
+# Parallel addition and subtraction
+
+SADD16 .... 0110 0001 .... .... 1111 0001 .... @rndm
+SASX .... 0110 0001 .... .... 1111 0011 .... @rndm
+SSAX .... 0110 0001 .... .... 1111 0101 .... @rndm
+SSUB16 .... 0110 0001 .... .... 1111 0111 .... @rndm
+SADD8 .... 0110 0001 .... .... 1111 1001 .... @rndm
+SSUB8 .... 0110 0001 .... .... 1111 1111 .... @rndm
+
+QADD16 .... 0110 0010 .... .... 1111 0001 .... @rndm
+QASX .... 0110 0010 .... .... 1111 0011 .... @rndm
+QSAX .... 0110 0010 .... .... 1111 0101 .... @rndm
+QSUB16 .... 0110 0010 .... .... 1111 0111 .... @rndm
+QADD8 .... 0110 0010 .... .... 1111 1001 .... @rndm
+QSUB8 .... 0110 0010 .... .... 1111 1111 .... @rndm
+
+SHADD16 .... 0110 0011 .... .... 1111 0001 .... @rndm
+SHASX .... 0110 0011 .... .... 1111 0011 .... @rndm
+SHSAX .... 0110 0011 .... .... 1111 0101 .... @rndm
+SHSUB16 .... 0110 0011 .... .... 1111 0111 .... @rndm
+SHADD8 .... 0110 0011 .... .... 1111 1001 .... @rndm
+SHSUB8 .... 0110 0011 .... .... 1111 1111 .... @rndm
+
+UADD16 .... 0110 0101 .... .... 1111 0001 .... @rndm
+UASX .... 0110 0101 .... .... 1111 0011 .... @rndm
+USAX .... 0110 0101 .... .... 1111 0101 .... @rndm
+USUB16 .... 0110 0101 .... .... 1111 0111 .... @rndm
+UADD8 .... 0110 0101 .... .... 1111 1001 .... @rndm
+USUB8 .... 0110 0101 .... .... 1111 1111 .... @rndm
+
+UQADD16 .... 0110 0110 .... .... 1111 0001 .... @rndm
+UQASX .... 0110 0110 .... .... 1111 0011 .... @rndm
+UQSAX .... 0110 0110 .... .... 1111 0101 .... @rndm
+UQSUB16 .... 0110 0110 .... .... 1111 0111 .... @rndm
+UQADD8 .... 0110 0110 .... .... 1111 1001 .... @rndm
+UQSUB8 .... 0110 0110 .... .... 1111 1111 .... @rndm
+
+UHADD16 .... 0110 0111 .... .... 1111 0001 .... @rndm
+UHASX .... 0110 0111 .... .... 1111 0011 .... @rndm
+UHSAX .... 0110 0111 .... .... 1111 0101 .... @rndm
+UHSUB16 .... 0110 0111 .... .... 1111 0111 .... @rndm
+UHADD8 .... 0110 0111 .... .... 1111 1001 .... @rndm
+UHSUB8 .... 0110 0111 .... .... 1111 1111 .... @rndm
+
+# Packing, unpacking, saturation, and reversal
+
+PKH ---- 0110 1000 rn:4 rd:4 imm:5 tb:1 01 rm:4 &pkh
+
+@sat ---- .... ... satimm:5 rd:4 imm:5 sh:1 .. rn:4 &sat
+@sat16 ---- .... .... satimm:4 rd:4 .... .... rn:4 \
+ &sat imm=0 sh=0
+
+SSAT .... 0110 101. .... .... .... ..01 .... @sat
+USAT .... 0110 111. .... .... .... ..01 .... @sat
+
+SSAT16 .... 0110 1010 .... .... 1111 0011 .... @sat16
+USAT16 .... 0110 1110 .... .... 1111 0011 .... @sat16
+
+@rrr_rot ---- .... .... rn:4 rd:4 rot:2 ...... rm:4 &rrr_rot
+
+SXTAB16 .... 0110 1000 .... .... ..00 0111 .... @rrr_rot
+SXTAB .... 0110 1010 .... .... ..00 0111 .... @rrr_rot
+SXTAH .... 0110 1011 .... .... ..00 0111 .... @rrr_rot
+UXTAB16 .... 0110 1100 .... .... ..00 0111 .... @rrr_rot
+UXTAB .... 0110 1110 .... .... ..00 0111 .... @rrr_rot
+UXTAH .... 0110 1111 .... .... ..00 0111 .... @rrr_rot
+
+SEL .... 0110 1000 .... .... 1111 1011 .... @rndm
+REV .... 0110 1011 1111 .... 1111 0011 .... @rdm
+REV16 .... 0110 1011 1111 .... 1111 1011 .... @rdm
+REVSH .... 0110 1111 1111 .... 1111 1011 .... @rdm
+RBIT .... 0110 1111 1111 .... 1111 0011 .... @rdm
+
+# Signed multiply, signed and unsigned divide
+
+@rdmn ---- .... .... rd:4 .... rm:4 .... rn:4 &rrr
+
+SMLAD .... 0111 0000 .... .... .... 0001 .... @rdamn
+SMLADX .... 0111 0000 .... .... .... 0011 .... @rdamn
+SMLSD .... 0111 0000 .... .... .... 0101 .... @rdamn
+SMLSDX .... 0111 0000 .... .... .... 0111 .... @rdamn
+
+SDIV .... 0111 0001 .... 1111 .... 0001 .... @rdmn
+UDIV .... 0111 0011 .... 1111 .... 0001 .... @rdmn
+
+SMLALD .... 0111 0100 .... .... .... 0001 .... @rdamn
+SMLALDX .... 0111 0100 .... .... .... 0011 .... @rdamn
+SMLSLD .... 0111 0100 .... .... .... 0101 .... @rdamn
+SMLSLDX .... 0111 0100 .... .... .... 0111 .... @rdamn
+
+SMMLA .... 0111 0101 .... .... .... 0001 .... @rdamn
+SMMLAR .... 0111 0101 .... .... .... 0011 .... @rdamn
+SMMLS .... 0111 0101 .... .... .... 1101 .... @rdamn
+SMMLSR .... 0111 0101 .... .... .... 1111 .... @rdamn
+
+# Block data transfer
+
+STM ---- 100 b:1 i:1 u:1 w:1 0 rn:4 list:16 &ldst_block
+LDM_a32 ---- 100 b:1 i:1 u:1 w:1 1 rn:4 list:16 &ldst_block
+
+# Branch, branch with link
+
+%imm26 0:s24 !function=times_4
+@branch ---- .... ........................ &i imm=%imm26
+
+B .... 1010 ........................ @branch
+BL .... 1011 ........................ @branch
+
+# Coprocessor instructions
+
+# We decode MCR, MCR, MRRC and MCRR only, because for QEMU the
+# other coprocessor instructions always UNDEF.
+# The trans_ functions for these will ignore cp values 8..13 for v7 or
+# earlier, and 0..13 for v8 and later, because those areas of the
+# encoding space may be used for other things, such as VFP or Neon.
+
+@mcr ---- .... opc1:3 . crn:4 rt:4 cp:4 opc2:3 . crm:4 &mcr
+@mcrr ---- .... .... rt2:4 rt:4 cp:4 opc1:4 crm:4 &mcrr
+
+MCRR .... 1100 0100 .... .... .... .... .... @mcrr
+MRRC .... 1100 0101 .... .... .... .... .... @mcrr
+
+MCR .... 1110 ... 0 .... .... .... ... 1 .... @mcr
+MRC .... 1110 ... 1 .... .... .... ... 1 .... @mcr
+
+# Supervisor call
+
+SVC ---- 1111 imm:24 &i
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
new file mode 100644
index 000000000..018484531
--- /dev/null
+++ b/target/arm/arch_dump.c
@@ -0,0 +1,467 @@
+/* Support for writing ELF notes for ARM architectures
+ *
+ * Copyright (C) 2015 Red Hat Inc.
+ *
+ * Author: Andrew Jones <drjones@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+
+/* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */
+struct aarch64_user_regs {
+ uint64_t regs[31];
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t pstate;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_regs) != 272);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct aarch64_elf_prstatus {
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct aarch64_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+ char pad3[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_elf_prstatus) != 392);
+
+/* struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h
+ *
+ * While the vregs member of user_fpsimd_state is of type __uint128_t,
+ * QEMU uses an array of uint64_t, where the high half of the 128-bit
+ * value is always in the 2n+1'th index. Thus we also break the 128-
+ * bit values into two halves in this reproduction of user_fpsimd_state.
+ */
+struct aarch64_user_vfp_state {
+ uint64_t vregs[64];
+ uint32_t fpsr;
+ uint32_t fpcr;
+ char pad[8];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_vfp_state) != 528);
+
+/* struct user_sve_header from arch/arm64/include/uapi/asm/ptrace.h */
+struct aarch64_user_sve_header {
+ uint32_t size;
+ uint32_t max_size;
+ uint16_t vl;
+ uint16_t max_vl;
+ uint16_t flags;
+ uint16_t reserved;
+} QEMU_PACKED;
+
+struct aarch64_note {
+ Elf64_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ union {
+ struct aarch64_elf_prstatus prstatus;
+ struct aarch64_user_vfp_state vfp;
+ struct aarch64_user_sve_header sve;
+ };
+} QEMU_PACKED;
+
+#define AARCH64_NOTE_HEADER_SIZE offsetof(struct aarch64_note, prstatus)
+#define AARCH64_PRSTATUS_NOTE_SIZE \
+ (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_elf_prstatus))
+#define AARCH64_PRFPREG_NOTE_SIZE \
+ (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_user_vfp_state))
+#define AARCH64_SVE_NOTE_SIZE(env) \
+ (AARCH64_NOTE_HEADER_SIZE + sve_size(env))
+
+static void aarch64_note_init(struct aarch64_note *note, DumpState *s,
+ const char *name, Elf64_Word namesz,
+ Elf64_Word type, Elf64_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f,
+ CPUARMState *env, int cpuid,
+ DumpState *s)
+{
+ struct aarch64_note note;
+ int ret, i;
+
+ aarch64_note_init(&note, s, "CORE", 5, NT_PRFPREG, sizeof(note.vfp));
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+ note.vfp.vregs[2 * i + 0] = cpu_to_dump64(s, q[0]);
+ note.vfp.vregs[2 * i + 1] = cpu_to_dump64(s, q[1]);
+ }
+
+ if (s->dump_info.d_endian == ELFDATA2MSB) {
+ /* For AArch64 we must always swap the vfp.regs's 2n and 2n+1
+ * entries when generating BE notes, because even big endian
+ * hosts use 2n+1 for the high half.
+ */
+ for (i = 0; i < 32; ++i) {
+ uint64_t tmp = note.vfp.vregs[2*i];
+ note.vfp.vregs[2 * i] = note.vfp.vregs[2 * i + 1];
+ note.vfp.vregs[2 * i + 1] = tmp;
+ }
+ }
+
+ note.vfp.fpsr = cpu_to_dump32(s, vfp_get_fpsr(env));
+ note.vfp.fpcr = cpu_to_dump32(s, vfp_get_fpcr(env));
+
+ ret = f(&note, AARCH64_PRFPREG_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef TARGET_AARCH64
+static off_t sve_zreg_offset(uint32_t vq, int n)
+{
+ off_t off = sizeof(struct aarch64_user_sve_header);
+ return ROUND_UP(off, 16) + vq * 16 * n;
+}
+
+static off_t sve_preg_offset(uint32_t vq, int n)
+{
+ return sve_zreg_offset(vq, 32) + vq * 16 / 8 * n;
+}
+
+static off_t sve_fpsr_offset(uint32_t vq)
+{
+ off_t off = sve_preg_offset(vq, 17);
+ return ROUND_UP(off, 16);
+}
+
+static off_t sve_fpcr_offset(uint32_t vq)
+{
+ return sve_fpsr_offset(vq) + sizeof(uint32_t);
+}
+
+static uint32_t sve_current_vq(CPUARMState *env)
+{
+ return sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
+}
+
+static size_t sve_size_vq(uint32_t vq)
+{
+ off_t off = sve_fpcr_offset(vq) + sizeof(uint32_t);
+ return ROUND_UP(off, 16);
+}
+
+static size_t sve_size(CPUARMState *env)
+{
+ return sve_size_vq(sve_current_vq(env));
+}
+
+static int aarch64_write_elf64_sve(WriteCoreDumpFunction f,
+ CPUARMState *env, int cpuid,
+ DumpState *s)
+{
+ struct aarch64_note *note;
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t vq = sve_current_vq(env);
+ uint64_t tmp[ARM_MAX_VQ * 2], *r;
+ uint32_t fpr;
+ uint8_t *buf;
+ int ret, i;
+
+ note = g_malloc0(AARCH64_SVE_NOTE_SIZE(env));
+ buf = (uint8_t *)&note->sve;
+
+ aarch64_note_init(note, s, "LINUX", 6, NT_ARM_SVE, sve_size_vq(vq));
+
+ note->sve.size = cpu_to_dump32(s, sve_size_vq(vq));
+ note->sve.max_size = cpu_to_dump32(s, sve_size_vq(cpu->sve_max_vq));
+ note->sve.vl = cpu_to_dump16(s, vq * 16);
+ note->sve.max_vl = cpu_to_dump16(s, cpu->sve_max_vq * 16);
+ note->sve.flags = cpu_to_dump16(s, 1);
+
+ for (i = 0; i < 32; ++i) {
+ r = sve_bswap64(tmp, &env->vfp.zregs[i].d[0], vq * 2);
+ memcpy(&buf[sve_zreg_offset(vq, i)], r, vq * 16);
+ }
+
+ for (i = 0; i < 17; ++i) {
+ r = sve_bswap64(tmp, r = &env->vfp.pregs[i].p[0],
+ DIV_ROUND_UP(vq * 2, 8));
+ memcpy(&buf[sve_preg_offset(vq, i)], r, vq * 16 / 8);
+ }
+
+ fpr = cpu_to_dump32(s, vfp_get_fpsr(env));
+ memcpy(&buf[sve_fpsr_offset(vq)], &fpr, sizeof(uint32_t));
+
+ fpr = cpu_to_dump32(s, vfp_get_fpcr(env));
+ memcpy(&buf[sve_fpcr_offset(vq)], &fpr, sizeof(uint32_t));
+
+ ret = f(note, AARCH64_SVE_NOTE_SIZE(env), s);
+ g_free(note);
+
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct aarch64_note note;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ DumpState *s = opaque;
+ uint64_t pstate, sp;
+ int ret, i;
+
+ aarch64_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1);
+
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ pstate = cpsr_read(env);
+ sp = 0;
+ } else {
+ pstate = pstate_read(env);
+ sp = env->xregs[31];
+ }
+
+ for (i = 0; i < 31; ++i) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->xregs[i]);
+ }
+ note.prstatus.pr_reg.sp = cpu_to_dump64(s, sp);
+ note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc);
+ note.prstatus.pr_reg.pstate = cpu_to_dump64(s, pstate);
+
+ ret = f(&note, AARCH64_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = aarch64_write_elf64_prfpreg(f, env, cpuid, s);
+ if (ret) {
+ return ret;
+ }
+
+#ifdef TARGET_AARCH64
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = aarch64_write_elf64_sve(f, env, cpuid, s);
+ }
+#endif
+
+ return ret;
+}
+
+/* struct pt_regs from arch/arm/include/asm/ptrace.h */
+struct arm_user_regs {
+ uint32_t regs[17];
+ char pad[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_user_regs) != 72);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct arm_elf_prstatus {
+ char pad1[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct arm_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+} QEMU_PACKED arm_elf_prstatus;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_elf_prstatus) != 148);
+
+/* struct user_vfp from arch/arm/include/asm/user.h */
+struct arm_user_vfp_state {
+ uint64_t vregs[32];
+ uint32_t fpscr;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_user_vfp_state) != 260);
+
+struct arm_note {
+ Elf32_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("LINUX"), 4) */
+ union {
+ struct arm_elf_prstatus prstatus;
+ struct arm_user_vfp_state vfp;
+ };
+} QEMU_PACKED;
+
+#define ARM_NOTE_HEADER_SIZE offsetof(struct arm_note, prstatus)
+#define ARM_PRSTATUS_NOTE_SIZE \
+ (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_elf_prstatus))
+#define ARM_VFP_NOTE_SIZE \
+ (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_user_vfp_state))
+
+static void arm_note_init(struct arm_note *note, DumpState *s,
+ const char *name, Elf32_Word namesz,
+ Elf32_Word type, Elf32_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int arm_write_elf32_vfp(WriteCoreDumpFunction f, CPUARMState *env,
+ int cpuid, DumpState *s)
+{
+ struct arm_note note;
+ int ret, i;
+
+ arm_note_init(&note, s, "LINUX", 6, NT_ARM_VFP, sizeof(note.vfp));
+
+ for (i = 0; i < 32; ++i) {
+ note.vfp.vregs[i] = cpu_to_dump64(s, *aa32_vfp_dreg(env, i));
+ }
+
+ note.vfp.fpscr = cpu_to_dump32(s, vfp_get_fpscr(env));
+
+ ret = f(&note, ARM_VFP_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct arm_note note;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ DumpState *s = opaque;
+ int ret, i;
+ bool fpvalid = cpu_isar_feature(aa32_vfp_simd, cpu);
+
+ arm_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, fpvalid);
+
+ for (i = 0; i < 16; ++i) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->regs[i]);
+ }
+ note.prstatus.pr_reg.regs[16] = cpu_to_dump32(s, cpsr_read(env));
+
+ ret = f(&note, ARM_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ } else if (fpvalid) {
+ return arm_write_elf32_vfp(f, env, cpuid, s);
+ }
+
+ return 0;
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ ARMCPU *cpu;
+ CPUARMState *env;
+ GuestPhysBlock *block;
+ hwaddr lowest_addr = ULLONG_MAX;
+
+ if (first_cpu == NULL) {
+ return -1;
+ }
+
+ cpu = ARM_CPU(first_cpu);
+ env = &cpu->env;
+
+ /* Take a best guess at the phys_base. If we get it wrong then crash
+ * will need '--machdep phys_offset=<phys-offset>' added to its command
+ * line, which isn't any worse than assuming we can use zero, but being
+ * wrong. This is the same algorithm the crash utility uses when
+ * attempting to guess as it loads non-dumpfile formatted files.
+ */
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_start < lowest_addr) {
+ lowest_addr = block->target_start;
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ info->d_machine = EM_AARCH64;
+ info->d_class = ELFCLASS64;
+ info->page_size = (1 << 16); /* aarch64 max pagesize */
+ if (lowest_addr != ULLONG_MAX) {
+ info->phys_base = lowest_addr;
+ }
+ } else {
+ info->d_machine = EM_ARM;
+ info->d_class = ELFCLASS32;
+ info->page_size = (1 << 12);
+ if (lowest_addr < UINT_MAX) {
+ info->phys_base = lowest_addr;
+ }
+ }
+
+ /* We assume the relevant endianness is that of EL1; this is right
+ * for kernels, but might give the wrong answer if you're trying to
+ * dump a hypervisor that happens to be running an opposite-endian
+ * kernel.
+ */
+ info->d_endian = (env->cp15.sctlr_el[1] & SCTLR_EE) != 0
+ ? ELFDATA2MSB : ELFDATA2LSB;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ ARMCPU *cpu = ARM_CPU(first_cpu);
+ size_t note_size;
+
+ if (class == ELFCLASS64) {
+ note_size = AARCH64_PRSTATUS_NOTE_SIZE;
+ note_size += AARCH64_PRFPREG_NOTE_SIZE;
+#ifdef TARGET_AARCH64
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env);
+ }
+#endif
+ } else {
+ note_size = ARM_PRSTATUS_NOTE_SIZE;
+ if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ note_size += ARM_VFP_NOTE_SIZE;
+ }
+ }
+
+ return note_size * nr_cpus;
+}
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
new file mode 100644
index 000000000..b75f813b4
--- /dev/null
+++ b/target/arm/arm-powerctl.c
@@ -0,0 +1,368 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-qom.h"
+#include "internals.h"
+#include "arm-powerctl.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+
+#ifndef DEBUG_ARM_POWERCTL
+#define DEBUG_ARM_POWERCTL 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_ARM_POWERCTL) { \
+ fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \
+ } \
+ } while (0)
+
+CPUState *arm_get_cpu_by_id(uint64_t id)
+{
+ CPUState *cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", id);
+
+ CPU_FOREACH(cpu) {
+ ARMCPU *armcpu = ARM_CPU(cpu);
+
+ if (armcpu->mp_affinity == id) {
+ return cpu;
+ }
+ }
+
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: Requesting unknown CPU %" PRId64 "\n",
+ __func__, id);
+
+ return NULL;
+}
+
+struct CpuOnInfo {
+ uint64_t entry;
+ uint64_t context_id;
+ uint32_t target_el;
+ bool target_aa64;
+};
+
+
+static void arm_set_cpu_on_async_work(CPUState *target_cpu_state,
+ run_on_cpu_data data)
+{
+ ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
+ struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr;
+
+ /* Initialize the cpu we are turning on */
+ cpu_reset(target_cpu_state);
+ target_cpu_state->halted = 0;
+
+ if (info->target_aa64) {
+ if ((info->target_el < 3) && arm_feature(&target_cpu->env,
+ ARM_FEATURE_EL3)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 2) to AArch64
+ */
+ target_cpu->env.cp15.scr_el3 |= SCR_RW;
+ }
+
+ if ((info->target_el < 2) && arm_feature(&target_cpu->env,
+ ARM_FEATURE_EL2)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 1) to AArch64
+ */
+ target_cpu->env.cp15.hcr_el2 |= HCR_RW;
+ }
+
+ target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true);
+ } else {
+ /* We are requested to boot in AArch32 mode */
+ static const uint32_t mode_for_el[] = { 0,
+ ARM_CPU_MODE_SVC,
+ ARM_CPU_MODE_HYP,
+ ARM_CPU_MODE_SVC };
+
+ cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M,
+ CPSRWriteRaw);
+ }
+
+ if (info->target_el == 3) {
+ /* Processor is in secure mode */
+ target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
+ } else {
+ /* Processor is not in secure mode */
+ target_cpu->env.cp15.scr_el3 |= SCR_NS;
+
+ /* Set NSACR.{CP11,CP10} so NS can access the FPU */
+ target_cpu->env.cp15.nsacr |= 3 << 10;
+
+ /*
+ * If QEMU is providing the equivalent of EL3 firmware, then we need
+ * to make sure a CPU targeting EL2 comes out of reset with a
+ * functional HVC insn.
+ */
+ if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3)
+ && info->target_el == 2) {
+ target_cpu->env.cp15.scr_el3 |= SCR_HCE;
+ }
+ }
+
+ /* We check if the started CPU is now at the correct level */
+ assert(info->target_el == arm_current_el(&target_cpu->env));
+
+ if (info->target_aa64) {
+ target_cpu->env.xregs[0] = info->context_id;
+ } else {
+ target_cpu->env.regs[0] = info->context_id;
+ }
+
+ /* CP15 update requires rebuilding hflags */
+ arm_rebuild_hflags(&target_cpu->env);
+
+ /* Start the new CPU at the requested address */
+ cpu_set_pc(target_cpu_state, info->entry);
+
+ g_free(info);
+
+ /* Finally set the power status */
+ assert(qemu_mutex_iothread_locked());
+ target_cpu->power_state = PSCI_ON;
+}
+
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+ struct CpuOnInfo *info;
+
+ assert(qemu_mutex_iothread_locked());
+
+ DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
+ "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
+ context_id);
+
+ /* requested EL level need to be in the 1 to 3 range */
+ assert((target_el > 0) && (target_el < 4));
+
+ if (target_aa64 && (entry & 3)) {
+ /*
+ * if we are booting in AArch64 mode then "entry" needs to be 4 bytes
+ * aligned.
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Retrieve the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ /* The cpu was not found */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->power_state == PSCI_ON) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ALREADY_ON;
+ }
+
+ /*
+ * The newly brought CPU is requested to enter the exception level
+ * "target_el" and be in the requested mode (AArch64 or AArch32).
+ */
+
+ if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) ||
+ ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) {
+ /*
+ * The CPU does not support requested level
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) {
+ /*
+ * For now we don't support booting an AArch64 CPU in AArch32 mode
+ * TODO: We should add this support later
+ */
+ qemu_log_mask(LOG_UNIMP,
+ "[ARM]%s: Starting AArch64 CPU %" PRId64
+ " in AArch32 mode is not supported yet\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /*
+ * If another CPU has powered the target on we are in the state
+ * ON_PENDING and additional attempts to power on the CPU should
+ * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
+ * spec)
+ */
+ if (target_cpu->power_state == PSCI_ON_PENDING) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already powering on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ON_PENDING;
+ }
+
+ /* To avoid racing with a CPU we are just kicking off we do the
+ * final bit of preparation for the work in the target CPUs
+ * context.
+ */
+ info = g_new(struct CpuOnInfo, 1);
+ info->entry = entry;
+ info->context_id = context_id;
+ info->target_el = target_el;
+ info->target_aa64 = target_aa64;
+
+ async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work,
+ RUN_ON_CPU_HOST_PTR(info));
+
+ /* We are good to go */
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state,
+ run_on_cpu_data data)
+{
+ ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
+
+ /* Initialize the cpu we are turning on */
+ cpu_reset(target_cpu_state);
+ target_cpu_state->halted = 0;
+
+ /* Finally set the power status */
+ assert(qemu_mutex_iothread_locked());
+ target_cpu->power_state = PSCI_ON;
+}
+
+int arm_set_cpu_on_and_reset(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ assert(qemu_mutex_iothread_locked());
+
+ /* Retrieve the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ /* The cpu was not found */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->power_state == PSCI_ON) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ALREADY_ON;
+ }
+
+ /*
+ * If another CPU has powered the target on we are in the state
+ * ON_PENDING and additional attempts to power on the CPU should
+ * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI
+ * spec)
+ */
+ if (target_cpu->power_state == PSCI_ON_PENDING) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already powering on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ON_PENDING;
+ }
+
+ async_run_on_cpu(target_cpu_state, arm_set_cpu_on_and_reset_async_work,
+ RUN_ON_CPU_NULL);
+
+ /* We are good to go */
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+static void arm_set_cpu_off_async_work(CPUState *target_cpu_state,
+ run_on_cpu_data data)
+{
+ ARMCPU *target_cpu = ARM_CPU(target_cpu_state);
+
+ assert(qemu_mutex_iothread_locked());
+ target_cpu->power_state = PSCI_OFF;
+ target_cpu_state->halted = 1;
+ target_cpu_state->exception_index = EXCP_HLT;
+}
+
+int arm_set_cpu_off(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ assert(qemu_mutex_iothread_locked());
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->power_state == PSCI_OFF) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ /* Queue work to run under the target vCPUs context */
+ async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work,
+ RUN_ON_CPU_NULL);
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+static void arm_reset_cpu_async_work(CPUState *target_cpu_state,
+ run_on_cpu_data data)
+{
+ /* Reset the cpu */
+ cpu_reset(target_cpu_state);
+}
+
+int arm_reset_cpu(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ assert(qemu_mutex_iothread_locked());
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are resetting */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+
+ if (target_cpu->power_state == PSCI_OFF) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ /* Queue work to run under the target vCPUs context */
+ async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work,
+ RUN_ON_CPU_NULL);
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
diff --git a/target/arm/arm-powerctl.h b/target/arm/arm-powerctl.h
new file mode 100644
index 000000000..37c8a04f0
--- /dev/null
+++ b/target/arm/arm-powerctl.h
@@ -0,0 +1,93 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_ARM_POWERCTL_H
+#define QEMU_ARM_POWERCTL_H
+
+#include "kvm-consts.h"
+
+#define QEMU_ARM_POWERCTL_RET_SUCCESS QEMU_PSCI_RET_SUCCESS
+#define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS
+#define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON
+#define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED
+#define QEMU_ARM_POWERCTL_ON_PENDING QEMU_PSCI_RET_ON_PENDING
+
+/*
+ * arm_get_cpu_by_id:
+ * @cpuid: the id of the CPU we want to retrieve the state
+ *
+ * Retrieve a CPUState object from its CPU ID provided in @cpuid.
+ *
+ * Returns: a pointer to the CPUState structure of the requested CPU.
+ */
+CPUState *arm_get_cpu_by_id(uint64_t cpuid);
+
+/*
+ * arm_set_cpu_on:
+ * @cpuid: the id of the CPU we want to start/wake up.
+ * @entry: the address the CPU shall start from.
+ * @context_id: the value to put in r0/x0.
+ * @target_el: The desired exception level.
+ * @target_aa64: 1 if the requested mode is AArch64. 0 otherwise.
+ *
+ * Start the cpu designated by @cpuid in @target_el exception level. The mode
+ * shall be AArch64 if @target_aa64 is set to 1. Otherwise the mode is
+ * AArch32. The CPU shall start at @entry with @context_id in r0/x0.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started.
+ * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is still powering up
+ */
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64);
+
+/*
+ * arm_set_cpu_off:
+ * @cpuid: the id of the CPU we want to stop/shut down.
+ *
+ * Stop the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is already off
+ */
+
+int arm_set_cpu_off(uint64_t cpuid);
+
+/*
+ * arm_reset_cpu:
+ * @cpuid: the id of the CPU we want to reset.
+ *
+ * Reset the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is off
+ */
+int arm_reset_cpu(uint64_t cpuid);
+
+/*
+ * arm_set_cpu_on_and_reset:
+ * @cpuid: the id of the CPU we want to star
+ *
+ * Start the cpu designated by @cpuid and put it through its normal
+ * CPU reset process. The CPU will start in the way it is architected
+ * to start after a power-on reset.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if there is no CPU with that ID.
+ * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU is already on.
+ * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is already partway through
+ * powering on.
+ */
+int arm_set_cpu_on_and_reset(uint64_t cpuid);
+
+#endif
diff --git a/target/arm/arm_ldst.h b/target/arm/arm_ldst.h
new file mode 100644
index 000000000..cee0548a1
--- /dev/null
+++ b/target/arm/arm_ldst.h
@@ -0,0 +1,47 @@
+/*
+ * ARM load/store instructions for code (armeb-user support)
+ *
+ * Copyright (c) 2012 CodeSourcery, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ARM_LDST_H
+#define ARM_LDST_H
+
+#include "exec/translator.h"
+#include "qemu/bswap.h"
+
+/* Load an instruction and return it in the standard little-endian order */
+static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s,
+ target_ulong addr, bool sctlr_b)
+{
+ return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b));
+}
+
+/* Ditto, for a halfword (Thumb) instruction */
+static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s,
+ target_ulong addr, bool sctlr_b)
+{
+#ifndef CONFIG_USER_ONLY
+ /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped
+ within each word. Undo that now. */
+ if (sctlr_b) {
+ addr ^= 2;
+ }
+#endif
+ return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b));
+}
+
+#endif
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
new file mode 100644
index 000000000..7f38d33b8
--- /dev/null
+++ b/target/arm/cpu-param.h
@@ -0,0 +1,37 @@
+/*
+ * ARM cpu parameters for qemu.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef ARM_CPU_PARAM_H
+#define ARM_CPU_PARAM_H 1
+
+#ifdef TARGET_AARCH64
+# define TARGET_LONG_BITS 64
+# define TARGET_PHYS_ADDR_SPACE_BITS 48
+# define TARGET_VIRT_ADDR_SPACE_BITS 48
+#else
+# define TARGET_LONG_BITS 32
+# define TARGET_PHYS_ADDR_SPACE_BITS 40
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+#ifdef CONFIG_USER_ONLY
+#define TARGET_PAGE_BITS 12
+# ifdef TARGET_AARCH64
+# define TARGET_TAGGED_ADDRESSES
+# endif
+#else
+/*
+ * ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6
+ * have to support 1K tiny pages.
+ */
+# define TARGET_PAGE_BITS_VARY
+# define TARGET_PAGE_BITS_MIN 10
+#endif
+
+#define NB_MMU_MODES 15
+
+#endif
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
new file mode 100644
index 000000000..a22bd506d
--- /dev/null
+++ b/target/arm/cpu-qom.h
@@ -0,0 +1,98 @@
+/*
+ * QEMU ARM CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+#ifndef QEMU_ARM_CPU_QOM_H
+#define QEMU_ARM_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+struct arm_boot_info;
+
+#define TYPE_ARM_CPU "arm-cpu"
+
+OBJECT_DECLARE_TYPE(ARMCPU, ARMCPUClass,
+ ARM_CPU)
+
+#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU
+
+typedef struct ARMCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} ARMCPUInfo;
+
+void arm_cpu_register(const ARMCPUInfo *info);
+void aarch64_cpu_register(const ARMCPUInfo *info);
+
+/**
+ * ARMCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An ARM CPU model.
+ */
+struct ARMCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ const ARMCPUInfo *info;
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#define TYPE_AARCH64_CPU "aarch64-cpu"
+typedef struct AArch64CPUClass AArch64CPUClass;
+DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
+ TYPE_AARCH64_CPU)
+
+struct AArch64CPUClass {
+ /*< private >*/
+ ARMCPUClass parent_class;
+ /*< public >*/
+};
+
+void register_cp_regs_for_features(ARMCPU *cpu);
+void init_cpreg_list(ARMCPU *cpu);
+
+/* Callback functions for the generic timer's timers. */
+void arm_gt_ptimer_cb(void *opaque);
+void arm_gt_vtimer_cb(void *opaque);
+void arm_gt_htimer_cb(void *opaque);
+void arm_gt_stimer_cb(void *opaque);
+void arm_gt_hvtimer_cb(void *opaque);
+
+#define ARM_AFF0_SHIFT 0
+#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
+#define ARM_AFF1_SHIFT 8
+#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT)
+#define ARM_AFF2_SHIFT 16
+#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT)
+#define ARM_AFF3_SHIFT 32
+#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT)
+#define ARM_DEFAULT_CPUS_PER_CLUSTER 8
+
+#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK)
+#define ARM64_AFFINITY_MASK \
+ (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK)
+#define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK)
+
+#endif
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
new file mode 100644
index 000000000..a211804fd
--- /dev/null
+++ b/target/arm/cpu.c
@@ -0,0 +1,2162 @@
+/*
+ * QEMU ARM CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "qemu-common.h"
+#include "target/arm/idau.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "cpu.h"
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+#endif /* CONFIG_TCG */
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "hw/qdev-properties.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#include "hw/boards.h"
+#endif
+#include "sysemu/tcg.h"
+#include "sysemu/hw_accel.h"
+#include "kvm_arm.h"
+#include "hvf_arm.h"
+#include "disas/capstone.h"
+#include "fpu/softfloat.h"
+
+static void arm_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (is_a64(env)) {
+ env->pc = value;
+ env->thumb = 0;
+ } else {
+ env->regs[15] = value & ~1;
+ env->thumb = value & 1;
+ }
+}
+
+#ifdef CONFIG_TCG
+void arm_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * It's OK to look at env for the current mode here, because it's
+ * never possible for an AArch64 TB to chain to an AArch32 TB.
+ */
+ if (is_a64(env)) {
+ env->pc = tb->pc;
+ } else {
+ env->regs[15] = tb->pc;
+ }
+}
+#endif /* CONFIG_TCG */
+
+static bool arm_cpu_has_work(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return (cpu->power_state != PSCI_OFF)
+ && cs->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
+ | CPU_INTERRUPT_EXITTB);
+}
+
+void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
+ void *opaque)
+{
+ ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
+
+ entry->hook = hook;
+ entry->opaque = opaque;
+
+ QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
+}
+
+void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
+ void *opaque)
+{
+ ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);
+
+ entry->hook = hook;
+ entry->opaque = opaque;
+
+ QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
+}
+
+static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
+{
+ /* Reset a single ARMCPRegInfo register */
+ ARMCPRegInfo *ri = value;
+ ARMCPU *cpu = opaque;
+
+ if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
+ return;
+ }
+
+ if (ri->resetfn) {
+ ri->resetfn(&cpu->env, ri);
+ return;
+ }
+
+ /* A zero offset is never possible as it would be regs[0]
+ * so we use it to indicate that reset is being handled elsewhere.
+ * This is basically only used for fields in non-core coprocessors
+ * (like the pxa2xx ones).
+ */
+ if (!ri->fieldoffset) {
+ return;
+ }
+
+ if (cpreg_field_is_64bit(ri)) {
+ CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
+ } else {
+ CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
+ }
+}
+
+static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
+{
+ /* Purely an assertion check: we've already done reset once,
+ * so now check that running the reset for the cpreg doesn't
+ * change its value. This traps bugs where two different cpregs
+ * both try to reset the same state field but to different values.
+ */
+ ARMCPRegInfo *ri = value;
+ ARMCPU *cpu = opaque;
+ uint64_t oldvalue, newvalue;
+
+ if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
+ return;
+ }
+
+ oldvalue = read_raw_cp_reg(&cpu->env, ri);
+ cp_reg_reset(key, value, opaque);
+ newvalue = read_raw_cp_reg(&cpu->env, ri);
+ assert(oldvalue == newvalue);
+}
+
+static void arm_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ ARMCPU *cpu = ARM_CPU(s);
+ ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
+ CPUARMState *env = &cpu->env;
+
+ acc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUARMState, end_reset_fields));
+
+ g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
+ g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
+
+ env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
+ env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
+ env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
+ env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
+
+ cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON;
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 64 bit CPUs always start in 64 bit mode */
+ env->aarch64 = 1;
+#if defined(CONFIG_USER_ONLY)
+ env->pstate = PSTATE_MODE_EL0t;
+ /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
+ env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
+ /* Enable all PAC keys. */
+ env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
+ SCTLR_EnDA | SCTLR_EnDB);
+ /* and to the FP/Neon instructions */
+ env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
+ /* and to the SVE instructions */
+ env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
+ /* with reasonable vector length */
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ env->vfp.zcr_el[1] =
+ aarch64_sve_zcr_get_valid_len(cpu, cpu->sve_default_vq - 1);
+ }
+ /*
+ * Enable TBI0 but not TBI1.
+ * Note that this must match useronly_clean_ptr.
+ */
+ env->cp15.tcr_el[1].raw_tcr = (1ULL << 37);
+
+ /* Enable MTE */
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ /* Enable tag access, but leave TCF0 as No Effect (0). */
+ env->cp15.sctlr_el[1] |= SCTLR_ATA0;
+ /*
+ * Exclude all tags, so that tag 0 is always used.
+ * This corresponds to Linux current->thread.gcr_incl = 0.
+ *
+ * Set RRND, so that helper_irg() will generate a seed later.
+ * Here in cpu_reset(), the crypto subsystem has not yet been
+ * initialized.
+ */
+ env->cp15.gcr_el1 = 0x1ffff;
+ }
+#else
+ /* Reset into the highest available EL */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ env->pstate = PSTATE_MODE_EL3h;
+ } else if (arm_feature(env, ARM_FEATURE_EL2)) {
+ env->pstate = PSTATE_MODE_EL2h;
+ } else {
+ env->pstate = PSTATE_MODE_EL1h;
+ }
+ env->pc = cpu->rvbar;
+#endif
+ } else {
+#if defined(CONFIG_USER_ONLY)
+ /* Userspace expects access to cp10 and cp11 for FP/Neon */
+ env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
+#endif
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ env->uncached_cpsr = ARM_CPU_MODE_USR;
+ /* For user mode we must enable access to coprocessors */
+ env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ env->cp15.c15_cpar = 3;
+ } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ env->cp15.c15_cpar = 1;
+ }
+#else
+
+ /*
+ * If the highest available EL is EL2, AArch32 will start in Hyp
+ * mode; otherwise it starts in SVC. Note that if we start in
+ * AArch64 then these values in the uncached_cpsr will be ignored.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
+ !arm_feature(env, ARM_FEATURE_EL3)) {
+ env->uncached_cpsr = ARM_CPU_MODE_HYP;
+ } else {
+ env->uncached_cpsr = ARM_CPU_MODE_SVC;
+ }
+ env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
+
+ /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
+ * executing as AArch32 then check if highvecs are enabled and
+ * adjust the PC accordingly.
+ */
+ if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
+ env->regs[15] = 0xFFFF0000;
+ }
+
+ env->vfp.xregs[ARM_VFP_FPEXC] = 0;
+#endif
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+#ifndef CONFIG_USER_ONLY
+ uint32_t initial_msp; /* Loaded from 0x0 */
+ uint32_t initial_pc; /* Loaded from 0x4 */
+ uint8_t *rom;
+ uint32_t vecbase;
+#endif
+
+ if (cpu_isar_feature(aa32_lob, cpu)) {
+ /*
+ * LTPSIZE is constant 4 if MVE not implemented, and resets
+ * to an UNKNOWN value if MVE is implemented. We choose to
+ * always reset to 4.
+ */
+ env->v7m.ltpsize = 4;
+ /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
+ env->v7m.fpdscr[M_REG_NS] = 4 << FPCR_LTPSIZE_SHIFT;
+ env->v7m.fpdscr[M_REG_S] = 4 << FPCR_LTPSIZE_SHIFT;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ env->v7m.secure = true;
+ } else {
+ /* This bit resets to 0 if security is supported, but 1 if
+ * it is not. The bit is not present in v7M, but we set it
+ * here so we can avoid having to make checks on it conditional
+ * on ARM_FEATURE_V8 (we don't let the guest see the bit).
+ */
+ env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
+ /*
+ * Set NSACR to indicate "NS access permitted to everything";
+ * this avoids having to have all the tests of it being
+ * conditional on ARM_FEATURE_M_SECURITY. Note also that from
+ * v8.1M the guest-visible value of NSACR in a CPU without the
+ * Security Extension is 0xcff.
+ */
+ env->v7m.nsacr = 0xcff;
+ }
+
+ /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
+ * that it resets to 1, so QEMU always does that rather than making
+ * it dependent on CPU model. In v8M it is RES1.
+ */
+ env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
+ env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* in v8M the NONBASETHRDENA bit [0] is RES1 */
+ env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
+ env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
+ }
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
+ env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
+ }
+
+ if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ env->v7m.fpccr[M_REG_NS] = R_V7M_FPCCR_ASPEN_MASK;
+ env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK |
+ R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* Unlike A/R profile, M profile defines the reset LR value */
+ env->regs[14] = 0xffffffff;
+
+ env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;
+ env->v7m.vecbase[M_REG_NS] = cpu->init_nsvtor & 0xffffff80;
+
+ /* Load the initial SP and PC from offset 0 and 4 in the vector table */
+ vecbase = env->v7m.vecbase[env->v7m.secure];
+ rom = rom_ptr_for_as(s->as, vecbase, 8);
+ if (rom) {
+ /* Address zero is covered by ROM which hasn't yet been
+ * copied into physical memory.
+ */
+ initial_msp = ldl_p(rom);
+ initial_pc = ldl_p(rom + 4);
+ } else {
+ /* Address zero not covered by a ROM blob, or the ROM blob
+ * is in non-modifiable memory and this is a second reset after
+ * it got copied into memory. In the latter case, rom_ptr
+ * will return a NULL pointer and we should use ldl_phys instead.
+ */
+ initial_msp = ldl_phys(s->as, vecbase);
+ initial_pc = ldl_phys(s->as, vecbase + 4);
+ }
+
+ env->regs[13] = initial_msp & 0xFFFFFFFC;
+ env->regs[15] = initial_pc & ~1;
+ env->thumb = initial_pc & 1;
+#else
+ /*
+ * For user mode we run non-secure and with access to the FPU.
+ * The FPU context is active (ie does not need further setup)
+ * and is owned by non-secure.
+ */
+ env->v7m.secure = false;
+ env->v7m.nsacr = 0xcff;
+ env->v7m.cpacr[M_REG_NS] = 0xf0ffff;
+ env->v7m.fpccr[M_REG_S] &=
+ ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK);
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
+#endif
+ }
+
+ /* M profile requires that reset clears the exclusive monitor;
+ * A profile does not, but clearing it makes more sense than having it
+ * set with an exclusive access on address zero.
+ */
+ arm_clear_exclusive(env);
+
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
+ if (cpu->pmsav7_dregion > 0) {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ memset(env->pmsav8.rbar[M_REG_NS], 0,
+ sizeof(*env->pmsav8.rbar[M_REG_NS])
+ * cpu->pmsav7_dregion);
+ memset(env->pmsav8.rlar[M_REG_NS], 0,
+ sizeof(*env->pmsav8.rlar[M_REG_NS])
+ * cpu->pmsav7_dregion);
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ memset(env->pmsav8.rbar[M_REG_S], 0,
+ sizeof(*env->pmsav8.rbar[M_REG_S])
+ * cpu->pmsav7_dregion);
+ memset(env->pmsav8.rlar[M_REG_S], 0,
+ sizeof(*env->pmsav8.rlar[M_REG_S])
+ * cpu->pmsav7_dregion);
+ }
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ memset(env->pmsav7.drbar, 0,
+ sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
+ memset(env->pmsav7.drsr, 0,
+ sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
+ memset(env->pmsav7.dracr, 0,
+ sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
+ }
+ }
+ env->pmsav7.rnr[M_REG_NS] = 0;
+ env->pmsav7.rnr[M_REG_S] = 0;
+ env->pmsav8.mair0[M_REG_NS] = 0;
+ env->pmsav8.mair0[M_REG_S] = 0;
+ env->pmsav8.mair1[M_REG_NS] = 0;
+ env->pmsav8.mair1[M_REG_S] = 0;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ if (cpu->sau_sregion > 0) {
+ memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
+ memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
+ }
+ env->sau.rnr = 0;
+ /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
+ * the Cortex-M33 does.
+ */
+ env->sau.ctrl = 0;
+ }
+
+ set_flush_to_zero(1, &env->vfp.standard_fp_status);
+ set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
+ set_default_nan_mode(1, &env->vfp.standard_fp_status);
+ set_default_nan_mode(1, &env->vfp.standard_fp_status_f16);
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->vfp.fp_status);
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->vfp.standard_fp_status);
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->vfp.fp_status_f16);
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->vfp.standard_fp_status_f16);
+#ifndef CONFIG_USER_ONLY
+ if (kvm_enabled()) {
+ kvm_arm_reset_vcpu(cpu);
+ }
+#endif
+
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+ arm_rebuild_hflags(env);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
+ unsigned int target_el,
+ unsigned int cur_el, bool secure,
+ uint64_t hcr_el2)
+{
+ CPUARMState *env = cs->env_ptr;
+ bool pstate_unmasked;
+ bool unmasked = false;
+
+ /*
+ * Don't take exceptions if they target a lower EL.
+ * This check should catch any exceptions that would not be taken
+ * but left pending.
+ */
+ if (cur_el > target_el) {
+ return false;
+ }
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ pstate_unmasked = !(env->daif & PSTATE_F);
+ break;
+
+ case EXCP_IRQ:
+ pstate_unmasked = !(env->daif & PSTATE_I);
+ break;
+
+ case EXCP_VFIQ:
+ if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
+ /* VFIQs are only taken when hypervized. */
+ return false;
+ }
+ return !(env->daif & PSTATE_F);
+ case EXCP_VIRQ:
+ if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
+ /* VIRQs are only taken when hypervized. */
+ return false;
+ }
+ return !(env->daif & PSTATE_I);
+ default:
+ g_assert_not_reached();
+ }
+
+ /*
+ * Use the target EL, current execution state and SCR/HCR settings to
+ * determine whether the corresponding CPSR bit is used to mask the
+ * interrupt.
+ */
+ if ((target_el > cur_el) && (target_el != 1)) {
+ /* Exceptions targeting a higher EL may not be maskable */
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /*
+ * 64-bit masking rules are simple: exceptions to EL3
+ * can't be masked, and exceptions to EL2 can only be
+ * masked from Secure state. The HCR and SCR settings
+ * don't affect the masking logic, only the interrupt routing.
+ */
+ if (target_el == 3 || !secure || (env->cp15.scr_el3 & SCR_EEL2)) {
+ unmasked = true;
+ }
+ } else {
+ /*
+ * The old 32-bit-only environment has a more complicated
+ * masking setup. HCR and SCR bits not only affect interrupt
+ * routing but also change the behaviour of masking.
+ */
+ bool hcr, scr;
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ /*
+ * If FIQs are routed to EL3 or EL2 then there are cases where
+ * we override the CPSR.F in determining if the exception is
+ * masked or not. If neither of these are set then we fall back
+ * to the CPSR.F setting otherwise we further assess the state
+ * below.
+ */
+ hcr = hcr_el2 & HCR_FMO;
+ scr = (env->cp15.scr_el3 & SCR_FIQ);
+
+ /*
+ * When EL3 is 32-bit, the SCR.FW bit controls whether the
+ * CPSR.F bit masks FIQ interrupts when taken in non-secure
+ * state. If SCR.FW is set then FIQs can be masked by CPSR.F
+ * when non-secure but only when FIQs are only routed to EL3.
+ */
+ scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
+ break;
+ case EXCP_IRQ:
+ /*
+ * When EL3 execution state is 32-bit, if HCR.IMO is set then
+ * we may override the CPSR.I masking when in non-secure state.
+ * The SCR.IRQ setting has already been taken into consideration
+ * when setting the target EL, so it does not have a further
+ * affect here.
+ */
+ hcr = hcr_el2 & HCR_IMO;
+ scr = false;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((scr || hcr) && !secure) {
+ unmasked = true;
+ }
+ }
+ }
+
+ /*
+ * The PSTATE bits only mask the interrupt if we have not overriden the
+ * ability above.
+ */
+ return unmasked || pstate_unmasked;
+}
+
+static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUARMState *env = cs->env_ptr;
+ uint32_t cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ uint32_t target_el;
+ uint32_t excp_idx;
+
+ /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
+
+ if (interrupt_request & CPU_INTERRUPT_FIQ) {
+ excp_idx = EXCP_FIQ;
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ excp_idx = EXCP_IRQ;
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_VIRQ) {
+ excp_idx = EXCP_VIRQ;
+ target_el = 1;
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_VFIQ) {
+ excp_idx = EXCP_VFIQ;
+ target_el = 1;
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ goto found;
+ }
+ }
+ return false;
+
+ found:
+ cs->exception_index = excp_idx;
+ env->exception.target_el = target_el;
+ cc->tcg_ops->do_interrupt(cs);
+ return true;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+void arm_cpu_update_virq(ARMCPU *cpu)
+{
+ /*
+ * Update the interrupt level for VIRQ, which is the logical OR of
+ * the HCR_EL2.VI bit and the input line level from the GIC.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
+ (env->irq_line_state & CPU_INTERRUPT_VIRQ);
+
+ if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
+ if (new_state) {
+ cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
+ }
+ }
+}
+
+void arm_cpu_update_vfiq(ARMCPU *cpu)
+{
+ /*
+ * Update the interrupt level for VFIQ, which is the logical OR of
+ * the HCR_EL2.VF bit and the input line level from the GIC.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
+ (env->irq_line_state & CPU_INTERRUPT_VFIQ);
+
+ if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
+ if (new_state) {
+ cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
+ }
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+static void arm_cpu_set_irq(void *opaque, int irq, int level)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ static const int mask[] = {
+ [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
+ [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
+ [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
+ [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+ };
+
+ if (level) {
+ env->irq_line_state |= mask[irq];
+ } else {
+ env->irq_line_state &= ~mask[irq];
+ }
+
+ switch (irq) {
+ case ARM_CPU_VIRQ:
+ assert(arm_feature(env, ARM_FEATURE_EL2));
+ arm_cpu_update_virq(cpu);
+ break;
+ case ARM_CPU_VFIQ:
+ assert(arm_feature(env, ARM_FEATURE_EL2));
+ arm_cpu_update_vfiq(cpu);
+ break;
+ case ARM_CPU_IRQ:
+ case ARM_CPU_FIQ:
+ if (level) {
+ cpu_interrupt(cs, mask[irq]);
+ } else {
+ cpu_reset_interrupt(cs, mask[irq]);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
+{
+#ifdef CONFIG_KVM
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t linestate_bit;
+ int irq_id;
+
+ switch (irq) {
+ case ARM_CPU_IRQ:
+ irq_id = KVM_ARM_IRQ_CPU_IRQ;
+ linestate_bit = CPU_INTERRUPT_HARD;
+ break;
+ case ARM_CPU_FIQ:
+ irq_id = KVM_ARM_IRQ_CPU_FIQ;
+ linestate_bit = CPU_INTERRUPT_FIQ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (level) {
+ env->irq_line_state |= linestate_bit;
+ } else {
+ env->irq_line_state &= ~linestate_bit;
+ }
+ kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+#endif
+}
+
+static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ cpu_synchronize_state(cs);
+ return arm_cpu_data_is_big_endian(env);
+}
+
+#endif
+
+static int
+print_insn_thumb1(bfd_vma pc, disassemble_info *info)
+{
+ return print_insn_arm(pc | 1, info);
+}
+
+static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ ARMCPU *ac = ARM_CPU(cpu);
+ CPUARMState *env = &ac->env;
+ bool sctlr_b;
+
+ if (is_a64(env)) {
+ /* We might not be compiled with the A64 disassembler
+ * because it needs a C++ compiler. Leave print_insn
+ * unset in this case to use the caller default behaviour.
+ */
+#if defined(CONFIG_ARM_A64_DIS)
+ info->print_insn = print_insn_arm_a64;
+#endif
+ info->cap_arch = CS_ARCH_ARM64;
+ info->cap_insn_unit = 4;
+ info->cap_insn_split = 4;
+ } else {
+ int cap_mode;
+ if (env->thumb) {
+ info->print_insn = print_insn_thumb1;
+ info->cap_insn_unit = 2;
+ info->cap_insn_split = 4;
+ cap_mode = CS_MODE_THUMB;
+ } else {
+ info->print_insn = print_insn_arm;
+ info->cap_insn_unit = 4;
+ info->cap_insn_split = 4;
+ cap_mode = CS_MODE_ARM;
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ cap_mode |= CS_MODE_V8;
+ }
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ cap_mode |= CS_MODE_MCLASS;
+ }
+ info->cap_arch = CS_ARCH_ARM;
+ info->cap_mode = cap_mode;
+ }
+
+ sctlr_b = arm_sctlr_b(env);
+ if (bswap_code(sctlr_b)) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ info->endian = BFD_ENDIAN_LITTLE;
+#else
+ info->endian = BFD_ENDIAN_BIG;
+#endif
+ }
+ info->flags &= ~INSN_ARM_BE32;
+#ifndef CONFIG_USER_ONLY
+ if (sctlr_b) {
+ info->flags |= INSN_ARM_BE32;
+ }
+#endif
+}
+
+#ifdef TARGET_AARCH64
+
+static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t psr = pstate_read(env);
+ int i;
+ int el = arm_current_el(env);
+ const char *ns_status;
+
+ qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
+ for (i = 0; i < 32; i++) {
+ if (i == 31) {
+ qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
+ } else {
+ qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
+ (i + 2) % 3 ? " " : "\n");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+ qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
+ psr,
+ psr & PSTATE_N ? 'N' : '-',
+ psr & PSTATE_Z ? 'Z' : '-',
+ psr & PSTATE_C ? 'C' : '-',
+ psr & PSTATE_V ? 'V' : '-',
+ ns_status,
+ el,
+ psr & PSTATE_SP ? 'h' : 't');
+
+ if (cpu_isar_feature(aa64_bti, cpu)) {
+ qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
+ }
+ if (!(flags & CPU_DUMP_FPU)) {
+ qemu_fprintf(f, "\n");
+ return;
+ }
+ if (fp_exception_el(env, el) != 0) {
+ qemu_fprintf(f, " FPU disabled\n");
+ return;
+ }
+ qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
+ vfp_get_fpcr(env), vfp_get_fpsr(env));
+
+ if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
+ int j, zcr_len = sve_zcr_len_for_el(env, el);
+
+ for (i = 0; i <= FFR_PRED_NUM; i++) {
+ bool eol;
+ if (i == FFR_PRED_NUM) {
+ qemu_fprintf(f, "FFR=");
+ /* It's last, so end the line. */
+ eol = true;
+ } else {
+ qemu_fprintf(f, "P%02d=", i);
+ switch (zcr_len) {
+ case 0:
+ eol = i % 8 == 7;
+ break;
+ case 1:
+ eol = i % 6 == 5;
+ break;
+ case 2:
+ case 3:
+ eol = i % 3 == 2;
+ break;
+ default:
+ /* More than one quadword per predicate. */
+ eol = true;
+ break;
+ }
+ }
+ for (j = zcr_len / 4; j >= 0; j--) {
+ int digits;
+ if (j * 4 + 4 <= zcr_len + 1) {
+ digits = 16;
+ } else {
+ digits = (zcr_len % 4 + 1) * 4;
+ }
+ qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
+ env->vfp.pregs[i].p[j],
+ j ? ":" : eol ? "\n" : " ");
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (zcr_len == 0) {
+ qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, env->vfp.zregs[i].d[1],
+ env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
+ } else if (zcr_len == 1) {
+ qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
+ ":%016" PRIx64 ":%016" PRIx64 "\n",
+ i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
+ env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
+ } else {
+ for (j = zcr_len; j >= 0; j--) {
+ bool odd = (zcr_len - j) % 2 != 0;
+ if (j == zcr_len) {
+ qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
+ } else if (!odd) {
+ if (j > 0) {
+ qemu_fprintf(f, " [%x-%x]=", j, j - 1);
+ } else {
+ qemu_fprintf(f, " [%x]=", j);
+ }
+ }
+ qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
+ env->vfp.zregs[i].d[j * 2 + 1],
+ env->vfp.zregs[i].d[j * 2],
+ odd || j == 0 ? "\n" : ":");
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+ qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, q[1], q[0], (i & 1 ? "\n" : " "));
+ }
+ }
+}
+
+#else
+
+static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ g_assert_not_reached();
+}
+
+#endif
+
+static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int i;
+
+ if (is_a64(env)) {
+ aarch64_cpu_dump_state(cs, f, flags);
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ uint32_t xpsr = xpsr_read(env);
+ const char *mode;
+ const char *ns_status = "";
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ ns_status = env->v7m.secure ? "S " : "NS ";
+ }
+
+ if (xpsr & XPSR_EXCP) {
+ mode = "handler";
+ } else {
+ if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
+ mode = "unpriv-thread";
+ } else {
+ mode = "priv-thread";
+ }
+ }
+
+ qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
+ xpsr,
+ xpsr & XPSR_N ? 'N' : '-',
+ xpsr & XPSR_Z ? 'Z' : '-',
+ xpsr & XPSR_C ? 'C' : '-',
+ xpsr & XPSR_V ? 'V' : '-',
+ xpsr & XPSR_T ? 'T' : 'A',
+ ns_status,
+ mode);
+ } else {
+ uint32_t psr = cpsr_read(env);
+ const char *ns_status = "";
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ (psr & CPSR_M) != ARM_CPU_MODE_MON) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ }
+
+ qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
+ psr,
+ psr & CPSR_N ? 'N' : '-',
+ psr & CPSR_Z ? 'Z' : '-',
+ psr & CPSR_C ? 'C' : '-',
+ psr & CPSR_V ? 'V' : '-',
+ psr & CPSR_T ? 'T' : 'A',
+ ns_status,
+ aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
+ }
+
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 0;
+ if (cpu_isar_feature(aa32_simd_r32, cpu)) {
+ numvfpregs = 32;
+ } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ numvfpregs = 16;
+ }
+ for (i = 0; i < numvfpregs; i++) {
+ uint64_t v = *aa32_vfp_dreg(env, i);
+ qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
+ i * 2, (uint32_t)v,
+ i * 2 + 1, (uint32_t)(v >> 32),
+ i, v);
+ }
+ qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ qemu_fprintf(f, "VPR: %08x\n", env->v7m.vpr);
+ }
+ }
+}
+
+uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
+{
+ uint32_t Aff1 = idx / clustersz;
+ uint32_t Aff0 = idx % clustersz;
+ return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
+}
+
+static void cpreg_hashtable_data_destroy(gpointer data)
+{
+ /*
+ * Destroy function for cpu->cp_regs hashtable data entries.
+ * We must free the name string because it was g_strdup()ed in
+ * add_cpreg_to_hashtable(). It's OK to cast away the 'const'
+ * from r->name because we know we definitely allocated it.
+ */
+ ARMCPRegInfo *r = data;
+
+ g_free((void *)r->name);
+ g_free(r);
+}
+
+static void arm_cpu_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+ cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
+ g_free, cpreg_hashtable_data_destroy);
+
+ QLIST_INIT(&cpu->pre_el_change_hooks);
+ QLIST_INIT(&cpu->el_change_hooks);
+
+#ifdef CONFIG_USER_ONLY
+# ifdef TARGET_AARCH64
+ /*
+ * The linux kernel defaults to 512-bit vectors, when sve is supported.
+ * See documentation for /proc/sys/abi/sve_default_vector_length, and
+ * our corresponding sve-default-vector-length cpu property.
+ */
+ cpu->sve_default_vq = 4;
+# endif
+#else
+ /* Our inbound IRQ and FIQ lines */
+ if (kvm_enabled()) {
+ /* VIRQ and VFIQ are unused with KVM but we add them to maintain
+ * the same interface as non-KVM CPUs.
+ */
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
+ } else {
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
+ }
+
+ qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
+ ARRAY_SIZE(cpu->gt_timer_outputs));
+
+ qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
+ "gicv3-maintenance-interrupt", 1);
+ qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
+ "pmu-interrupt", 1);
+#endif
+
+ /* DTB consumers generally don't in fact care what the 'compatible'
+ * string is, so always provide some string and trust that a hypothetical
+ * picky DTB consumer will also provide a helpful error message.
+ */
+ cpu->dtb_compatible = "qemu,unknown";
+ cpu->psci_version = 1; /* By default assume PSCI v0.1 */
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
+
+ if (tcg_enabled() || hvf_enabled()) {
+ cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */
+ }
+}
+
+static Property arm_cpu_gt_cntfrq_property =
+ DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz,
+ NANOSECONDS_PER_SECOND / GTIMER_SCALE);
+
+static Property arm_cpu_reset_cbar_property =
+ DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
+
+static Property arm_cpu_reset_hivecs_property =
+ DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
+
+static Property arm_cpu_rvbar_property =
+ DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
+
+#ifndef CONFIG_USER_ONLY
+static Property arm_cpu_has_el2_property =
+ DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
+
+static Property arm_cpu_has_el3_property =
+ DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
+#endif
+
+static Property arm_cpu_cfgend_property =
+ DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);
+
+static Property arm_cpu_has_vfp_property =
+ DEFINE_PROP_BOOL("vfp", ARMCPU, has_vfp, true);
+
+static Property arm_cpu_has_neon_property =
+ DEFINE_PROP_BOOL("neon", ARMCPU, has_neon, true);
+
+static Property arm_cpu_has_dsp_property =
+ DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
+
+static Property arm_cpu_has_mpu_property =
+ DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
+
+/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
+ * because the CPU initfn will have already set cpu->pmsav7_dregion to
+ * the right value for that particular CPU type, and we don't want
+ * to override that with an incorrect constant value.
+ */
+static Property arm_cpu_pmsav7_dregion_property =
+ DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
+ pmsav7_dregion,
+ qdev_prop_uint32, uint32_t);
+
+static bool arm_get_pmu(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return cpu->has_pmu;
+}
+
+static void arm_set_pmu(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ if (value) {
+ if (kvm_enabled() && !kvm_arm_pmu_supported()) {
+ error_setg(errp, "'pmu' feature not supported by KVM on this host");
+ return;
+ }
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ } else {
+ unset_feature(&cpu->env, ARM_FEATURE_PMU);
+ }
+ cpu->has_pmu = value;
+}
+
+unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
+{
+ /*
+ * The exact approach to calculating guest ticks is:
+ *
+ * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
+ * NANOSECONDS_PER_SECOND);
+ *
+ * We don't do that. Rather we intentionally use integer division
+ * truncation below and in the caller for the conversion of host monotonic
+ * time to guest ticks to provide the exact inverse for the semantics of
+ * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
+ * it loses precision when representing frequencies where
+ * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
+ * provide an exact inverse leads to scheduling timers with negative
+ * periods, which in turn leads to sticky behaviour in the guest.
+ *
+ * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
+ * cannot become zero.
+ */
+ return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ?
+ NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
+}
+
+void arm_cpu_post_init(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /* M profile implies PMSA. We have to do this here rather than
+ * in realize with the other feature-implication checks because
+ * we look at the PMSA bit to see if we should add some properties.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
+ arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
+ }
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
+ /* Add the has_el3 state CPU property only if EL3 is allowed. This will
+ * prevent "has_el3" from existing on CPUs which cannot support EL3.
+ */
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property);
+
+ object_property_add_link(obj, "secure-memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&cpu->secure_memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property);
+ }
+#endif
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
+ cpu->has_pmu = true;
+ object_property_add_bool(obj, "pmu", arm_get_pmu, arm_set_pmu);
+ }
+
+ /*
+ * Allow user to turn off VFP and Neon support, but only for TCG --
+ * KVM does not currently allow us to lie to the guest about its
+ * ID/feature registers, so the guest always sees what the host has.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
+ ? cpu_isar_feature(aa64_fp_simd, cpu)
+ : cpu_isar_feature(aa32_vfp, cpu)) {
+ cpu->has_vfp = true;
+ if (!kvm_enabled()) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_vfp_property);
+ }
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) {
+ cpu->has_neon = true;
+ if (!kvm_enabled()) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_neon_property);
+ }
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_M) &&
+ arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
+ if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ qdev_property_add_static(DEVICE(obj),
+ &arm_cpu_pmsav7_dregion_property);
+ }
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
+ object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+ /*
+ * M profile: initial value of the Secure VTOR. We can't just use
+ * a simple DEFINE_PROP_UINT32 for this because we want to permit
+ * the property to be set after realize.
+ */
+ object_property_add_uint32_ptr(obj, "init-svtor",
+ &cpu->init_svtor,
+ OBJ_PROP_FLAG_READWRITE);
+ }
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ /*
+ * Initial value of the NS VTOR (for cores without the Security
+ * extension, this is the only VTOR)
+ */
+ object_property_add_uint32_ptr(obj, "init-nsvtor",
+ &cpu->init_nsvtor,
+ OBJ_PROP_FLAG_READWRITE);
+ }
+
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
+ qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
+ }
+
+ if (kvm_enabled()) {
+ kvm_arm_add_vcpu_properties(obj);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
+ cpu_isar_feature(aa64_mte, cpu)) {
+ object_property_add_link(obj, "tag-memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&cpu->tag_memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
+ object_property_add_link(obj, "secure-tag-memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&cpu->secure_tag_memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_STRONG);
+ }
+ }
+#endif
+}
+
+static void arm_cpu_finalizefn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ ARMELChangeHook *hook, *next;
+
+ g_hash_table_destroy(cpu->cp_regs);
+
+ QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
+ QLIST_REMOVE(hook, node);
+ g_free(hook);
+ }
+ QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
+ QLIST_REMOVE(hook, node);
+ g_free(hook);
+ }
+#ifndef CONFIG_USER_ONLY
+ if (cpu->pmu_timer) {
+ timer_free(cpu->pmu_timer);
+ }
+#endif
+}
+
+void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
+{
+ Error *local_err = NULL;
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ arm_cpu_sve_finalize(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /*
+ * KVM does not support modifications to this feature.
+ * We have not registered the cpu properties when KVM
+ * is in use, so the user will not be able to set them.
+ */
+ if (!kvm_enabled()) {
+ arm_cpu_pauth_finalize(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ }
+ }
+
+ if (kvm_enabled()) {
+ kvm_arm_steal_time_finalize(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ }
+}
+
+static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ ARMCPU *cpu = ARM_CPU(dev);
+ ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
+ CPUARMState *env = &cpu->env;
+ int pagebits;
+ Error *local_err = NULL;
+ bool no_aa32 = false;
+
+ /* If we needed to query the host kernel for the CPU features
+ * then it's possible that might have failed in the initfn, but
+ * this is the first point where we can report it.
+ */
+ if (cpu->host_cpu_probe_failed) {
+ if (!kvm_enabled() && !hvf_enabled()) {
+ error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
+ } else {
+ error_setg(errp, "Failed to retrieve host CPU features");
+ }
+ return;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* The NVIC and M-profile CPU are two halves of a single piece of
+ * hardware; trying to use one without the other is a command line
+ * error and will result in segfaults if not caught here.
+ */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (!env->nvic) {
+ error_setg(errp, "This board cannot be used with Cortex-M CPUs");
+ return;
+ }
+ } else {
+ if (env->nvic) {
+ error_setg(errp, "This board can only be used with Cortex-M CPUs");
+ return;
+ }
+ }
+
+ if (kvm_enabled()) {
+ /*
+ * Catch all the cases which might cause us to create more than one
+ * address space for the CPU (otherwise we will assert() later in
+ * cpu_address_space_init()).
+ */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ error_setg(errp,
+ "Cannot enable KVM when using an M-profile guest CPU");
+ return;
+ }
+ if (cpu->has_el3) {
+ error_setg(errp,
+ "Cannot enable KVM when guest CPU has EL3 enabled");
+ return;
+ }
+ if (cpu->tag_memory) {
+ error_setg(errp,
+ "Cannot enable KVM when guest CPUs has MTE enabled");
+ return;
+ }
+ }
+
+ {
+ uint64_t scale;
+
+ if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
+ if (!cpu->gt_cntfrq_hz) {
+ error_setg(errp, "Invalid CNTFRQ: %"PRId64"Hz",
+ cpu->gt_cntfrq_hz);
+ return;
+ }
+ scale = gt_cntfrq_period_ns(cpu);
+ } else {
+ scale = GTIMER_SCALE;
+ }
+
+ cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_ptimer_cb, cpu);
+ cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_vtimer_cb, cpu);
+ cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_htimer_cb, cpu);
+ cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_stimer_cb, cpu);
+ cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale,
+ arm_gt_hvtimer_cb, cpu);
+ }
+#endif
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ arm_cpu_finalize_features(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
+ cpu->has_vfp != cpu->has_neon) {
+ /*
+ * This is an architectural requirement for AArch64; AArch32 is
+ * more flexible and permits VFP-no-Neon and Neon-no-VFP.
+ */
+ error_setg(errp,
+ "AArch64 CPUs must have both VFP and Neon or neither");
+ return;
+ }
+
+ if (!cpu->has_vfp) {
+ uint64_t t;
+ uint32_t u;
+
+ t = cpu->isar.id_aa64isar1;
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
+ cpu->isar.id_aa64isar1 = t;
+
+ t = cpu->isar.id_aa64pfr0;
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
+ cpu->isar.id_aa64pfr0 = t;
+
+ u = cpu->isar.id_isar6;
+ u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
+ u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
+ cpu->isar.id_isar6 = u;
+
+ u = cpu->isar.mvfr0;
+ u = FIELD_DP32(u, MVFR0, FPSP, 0);
+ u = FIELD_DP32(u, MVFR0, FPDP, 0);
+ u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0);
+ u = FIELD_DP32(u, MVFR0, FPSQRT, 0);
+ u = FIELD_DP32(u, MVFR0, FPROUND, 0);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ u = FIELD_DP32(u, MVFR0, FPTRAP, 0);
+ u = FIELD_DP32(u, MVFR0, FPSHVEC, 0);
+ }
+ cpu->isar.mvfr0 = u;
+
+ u = cpu->isar.mvfr1;
+ u = FIELD_DP32(u, MVFR1, FPFTZ, 0);
+ u = FIELD_DP32(u, MVFR1, FPDNAN, 0);
+ u = FIELD_DP32(u, MVFR1, FPHP, 0);
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ u = FIELD_DP32(u, MVFR1, FP16, 0);
+ }
+ cpu->isar.mvfr1 = u;
+
+ u = cpu->isar.mvfr2;
+ u = FIELD_DP32(u, MVFR2, FPMISC, 0);
+ cpu->isar.mvfr2 = u;
+ }
+
+ if (!cpu->has_neon) {
+ uint64_t t;
+ uint32_t u;
+
+ unset_feature(env, ARM_FEATURE_NEON);
+
+ t = cpu->isar.id_aa64isar0;
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
+ cpu->isar.id_aa64isar0 = t;
+
+ t = cpu->isar.id_aa64isar1;
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
+ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
+ cpu->isar.id_aa64isar1 = t;
+
+ t = cpu->isar.id_aa64pfr0;
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
+ cpu->isar.id_aa64pfr0 = t;
+
+ u = cpu->isar.id_isar5;
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
+ cpu->isar.id_isar5 = u;
+
+ u = cpu->isar.id_isar6;
+ u = FIELD_DP32(u, ID_ISAR6, DP, 0);
+ u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
+ u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
+ u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
+ cpu->isar.id_isar6 = u;
+
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ u = cpu->isar.mvfr1;
+ u = FIELD_DP32(u, MVFR1, SIMDLS, 0);
+ u = FIELD_DP32(u, MVFR1, SIMDINT, 0);
+ u = FIELD_DP32(u, MVFR1, SIMDSP, 0);
+ u = FIELD_DP32(u, MVFR1, SIMDHP, 0);
+ cpu->isar.mvfr1 = u;
+
+ u = cpu->isar.mvfr2;
+ u = FIELD_DP32(u, MVFR2, SIMDMISC, 0);
+ cpu->isar.mvfr2 = u;
+ }
+ }
+
+ if (!cpu->has_neon && !cpu->has_vfp) {
+ uint64_t t;
+ uint32_t u;
+
+ t = cpu->isar.id_aa64isar0;
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
+ cpu->isar.id_aa64isar0 = t;
+
+ t = cpu->isar.id_aa64isar1;
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
+ cpu->isar.id_aa64isar1 = t;
+
+ u = cpu->isar.mvfr0;
+ u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
+ cpu->isar.mvfr0 = u;
+
+ /* Despite the name, this field covers both VFP and Neon */
+ u = cpu->isar.mvfr1;
+ u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0);
+ cpu->isar.mvfr1 = u;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) {
+ uint32_t u;
+
+ unset_feature(env, ARM_FEATURE_THUMB_DSP);
+
+ u = cpu->isar.id_isar1;
+ u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
+ cpu->isar.id_isar1 = u;
+
+ u = cpu->isar.id_isar2;
+ u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
+ u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
+ cpu->isar.id_isar2 = u;
+
+ u = cpu->isar.id_isar3;
+ u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
+ u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
+ cpu->isar.id_isar3 = u;
+ }
+
+ /* Some features automatically imply others: */
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_V7);
+ } else {
+ set_feature(env, ARM_FEATURE_V7VE);
+ }
+ }
+
+ /*
+ * There exist AArch64 cpus without AArch32 support. When KVM
+ * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
+ * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
+ * As a general principle, we also do not make ID register
+ * consistency checks anywhere unless using TCG, because only
+ * for TCG would a consistency-check failure be a QEMU bug.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V7VE)) {
+ /* v7 Virtualization Extensions. In real hardware this implies
+ * EL2 and also the presence of the Security Extensions.
+ * For QEMU, for backwards-compatibility we implement some
+ * CPUs or CPU configs which have no actual EL2 or EL3 but do
+ * include the various other features that V7VE implies.
+ * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
+ * Security Extensions is ARM_FEATURE_EL3.
+ */
+ assert(!tcg_enabled() || no_aa32 ||
+ cpu_isar_feature(aa32_arm_div, cpu));
+ set_feature(env, ARM_FEATURE_LPAE);
+ set_feature(env, ARM_FEATURE_V7);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ set_feature(env, ARM_FEATURE_VAPA);
+ set_feature(env, ARM_FEATURE_THUMB2);
+ set_feature(env, ARM_FEATURE_MPIDR);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_V6K);
+ } else {
+ set_feature(env, ARM_FEATURE_V6);
+ }
+
+ /* Always define VBAR for V7 CPUs even if it doesn't exist in
+ * non-EL3 configs. This is needed by some legacy boards.
+ */
+ set_feature(env, ARM_FEATURE_VBAR);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6K)) {
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_MVFR);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ set_feature(env, ARM_FEATURE_V5);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ assert(!tcg_enabled() || no_aa32 ||
+ cpu_isar_feature(aa32_jazelle, cpu));
+ set_feature(env, ARM_FEATURE_AUXCR);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_V5)) {
+ set_feature(env, ARM_FEATURE_V4T);
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ set_feature(env, ARM_FEATURE_V7MP);
+ }
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ set_feature(env, ARM_FEATURE_CBAR);
+ }
+ if (arm_feature(env, ARM_FEATURE_THUMB2) &&
+ !arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_THUMB_DSP);
+ }
+
+ /*
+ * We rely on no XScale CPU having VFP so we can use the same bits in the
+ * TB flags field for VECSTRIDE and XSCALE_CPAR.
+ */
+ assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) ||
+ !cpu_isar_feature(aa32_vfp_simd, cpu) ||
+ !arm_feature(env, ARM_FEATURE_XSCALE));
+
+ if (arm_feature(env, ARM_FEATURE_V7) &&
+ !arm_feature(env, ARM_FEATURE_M) &&
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
+ /* v7VMSA drops support for the old ARMv5 tiny pages, so we
+ * can use 4K pages.
+ */
+ pagebits = 12;
+ } else {
+ /* For CPUs which might have tiny 1K pages, or which have an
+ * MPU and might have small region sizes, stick with 1K pages.
+ */
+ pagebits = 10;
+ }
+ if (!set_preferred_target_page_bits(pagebits)) {
+ /* This can only ever happen for hotplugging a CPU, or if
+ * the board code incorrectly creates a CPU which it has
+ * promised via minimum_page_size that it will not.
+ */
+ error_setg(errp, "This CPU requires a smaller page size than the "
+ "system is using");
+ return;
+ }
+
+ /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
+ * We don't support setting cluster ID ([16..23]) (known as Aff2
+ * in later ARM ARM versions), or any of the higher affinity level fields,
+ * so these bits always RAZ.
+ */
+ if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
+ cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
+ ARM_DEFAULT_CPUS_PER_CLUSTER);
+ }
+
+ if (cpu->reset_hivecs) {
+ cpu->reset_sctlr |= (1 << 13);
+ }
+
+ if (cpu->cfgend) {
+ if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ cpu->reset_sctlr |= SCTLR_EE;
+ } else {
+ cpu->reset_sctlr |= SCTLR_B;
+ }
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) {
+ /* If the has_el3 CPU property is disabled then we need to disable the
+ * feature.
+ */
+ unset_feature(env, ARM_FEATURE_EL3);
+
+ /* Disable the security extension feature bits in the processor feature
+ * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
+ */
+ cpu->isar.id_pfr1 &= ~0xf0;
+ cpu->isar.id_aa64pfr0 &= ~0xf000;
+ }
+
+ if (!cpu->has_el2) {
+ unset_feature(env, ARM_FEATURE_EL2);
+ }
+
+ if (!cpu->has_pmu) {
+ unset_feature(env, ARM_FEATURE_PMU);
+ }
+ if (arm_feature(env, ARM_FEATURE_PMU)) {
+ pmu_init(cpu);
+
+ if (!kvm_enabled()) {
+ arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
+ arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
+ cpu);
+#endif
+ } else {
+ cpu->isar.id_aa64dfr0 =
+ FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
+ cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
+ cpu->pmceid0 = 0;
+ cpu->pmceid1 = 0;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ /* Disable the hypervisor feature bits in the processor feature
+ * registers if we don't have EL2. These are id_pfr1[15:12] and
+ * id_aa64pfr0_el1[11:8].
+ */
+ cpu->isar.id_aa64pfr0 &= ~0xf00;
+ cpu->isar.id_pfr1 &= ~0xf000;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
+ /*
+ * Disable the MTE feature bits if we do not have tag-memory
+ * provided by the machine.
+ */
+ cpu->isar.id_aa64pfr1 =
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ }
+#endif
+
+ /* MPU can be configured out of a PMSA CPU either by setting has-mpu
+ * to false or by setting pmsav7-dregion to 0.
+ */
+ if (!cpu->has_mpu) {
+ cpu->pmsav7_dregion = 0;
+ }
+ if (cpu->pmsav7_dregion == 0) {
+ cpu->has_mpu = false;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_PMSA) &&
+ arm_feature(env, ARM_FEATURE_V7)) {
+ uint32_t nr = cpu->pmsav7_dregion;
+
+ if (nr > 0xff) {
+ error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
+ return;
+ }
+
+ if (nr) {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* PMSAv8 */
+ env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
+ env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
+ env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
+ }
+ } else {
+ env->pmsav7.drbar = g_new0(uint32_t, nr);
+ env->pmsav7.drsr = g_new0(uint32_t, nr);
+ env->pmsav7.dracr = g_new0(uint32_t, nr);
+ }
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ uint32_t nr = cpu->sau_sregion;
+
+ if (nr > 0xff) {
+ error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
+ return;
+ }
+
+ if (nr) {
+ env->sau.rbar = g_new0(uint32_t, nr);
+ env->sau.rlar = g_new0(uint32_t, nr);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ set_feature(env, ARM_FEATURE_VBAR);
+ }
+
+ register_cp_regs_for_features(cpu);
+ arm_cpu_register_gdb_regs_for_features(cpu);
+
+ init_cpreg_list(cpu);
+
+#ifndef CONFIG_USER_ONLY
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int smp_cpus = ms->smp.cpus;
+ bool has_secure = cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY);
+
+ /*
+ * We must set cs->num_ases to the final value before
+ * the first call to cpu_address_space_init.
+ */
+ if (cpu->tag_memory != NULL) {
+ cs->num_ases = 3 + has_secure;
+ } else {
+ cs->num_ases = 1 + has_secure;
+ }
+
+ if (has_secure) {
+ if (!cpu->secure_memory) {
+ cpu->secure_memory = cs->memory;
+ }
+ cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
+ cpu->secure_memory);
+ }
+
+ if (cpu->tag_memory != NULL) {
+ cpu_address_space_init(cs, ARMASIdx_TagNS, "cpu-tag-memory",
+ cpu->tag_memory);
+ if (has_secure) {
+ cpu_address_space_init(cs, ARMASIdx_TagS, "cpu-tag-memory",
+ cpu->secure_tag_memory);
+ }
+ }
+
+ cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
+
+ /* No core_count specified, default to smp_cpus. */
+ if (cpu->core_count == -1) {
+ cpu->core_count = smp_cpus;
+ }
+#endif
+
+ if (tcg_enabled()) {
+ int dcz_blocklen = 4 << cpu->dcz_blocksize;
+
+ /*
+ * We only support DCZ blocklen that fits on one page.
+ *
+ * Architectually this is always true. However TARGET_PAGE_SIZE
+ * is variable and, for compatibility with -machine virt-2.7,
+ * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
+ * But even then, while the largest architectural DCZ blocklen
+ * is 2KiB, no cpu actually uses such a large blocklen.
+ */
+ assert(dcz_blocklen <= TARGET_PAGE_SIZE);
+
+ /*
+ * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
+ * both nibbles of each byte storing tag data may be written at once.
+ * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
+ */
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ assert(dcz_blocklen >= 2 * TAG_GRANULE);
+ }
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ acc->parent_realize(dev, errp);
+}
+
+static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+ char **cpuname;
+ const char *cpunamestr;
+
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ cpunamestr = cpuname[0];
+#ifdef CONFIG_USER_ONLY
+ /* For backwards compatibility usermode emulation allows "-cpu any",
+ * which has the same semantics as "-cpu max".
+ */
+ if (!strcmp(cpunamestr, "any")) {
+ cpunamestr = "max";
+ }
+#endif
+ typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
+ oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
+ g_free(typename);
+ if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static Property arm_cpu_properties[] = {
+ DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
+ DEFINE_PROP_UINT64("midr", ARMCPU, midr, 0),
+ DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
+ mp_affinity, ARM64_AFFINITY_INVALID),
+ DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static gchar *arm_gdb_arch_name(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ return g_strdup("iwmmxt");
+ }
+ return g_strdup("arm");
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps arm_sysemu_ops = {
+ .get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
+ .asidx_from_attrs = arm_asidx_from_attrs,
+ .write_elf32_note = arm_cpu_write_elf32_note,
+ .write_elf64_note = arm_cpu_write_elf64_note,
+ .virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
+ .legacy_vmsd = &vmstate_arm_cpu,
+};
+#endif
+
+#ifdef CONFIG_TCG
+static const struct TCGCPUOps arm_tcg_ops = {
+ .initialize = arm_translate_init,
+ .synchronize_from_tb = arm_cpu_synchronize_from_tb,
+ .debug_excp_handler = arm_debug_excp_handler,
+
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = arm_cpu_record_sigsegv,
+ .record_sigbus = arm_cpu_record_sigbus,
+#else
+ .tlb_fill = arm_cpu_tlb_fill,
+ .cpu_exec_interrupt = arm_cpu_exec_interrupt,
+ .do_interrupt = arm_cpu_do_interrupt,
+ .do_transaction_failed = arm_cpu_do_transaction_failed,
+ .do_unaligned_access = arm_cpu_do_unaligned_access,
+ .adjust_watchpoint_address = arm_adjust_watchpoint_address,
+ .debug_check_watchpoint = arm_debug_check_watchpoint,
+ .debug_check_breakpoint = arm_debug_check_breakpoint,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+static void arm_cpu_class_init(ObjectClass *oc, void *data)
+{
+ ARMCPUClass *acc = ARM_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(acc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_parent_realize(dc, arm_cpu_realizefn,
+ &acc->parent_realize);
+
+ device_class_set_props(dc, arm_cpu_properties);
+ device_class_set_parent_reset(dc, arm_cpu_reset, &acc->parent_reset);
+
+ cc->class_by_name = arm_cpu_class_by_name;
+ cc->has_work = arm_cpu_has_work;
+ cc->dump_state = arm_cpu_dump_state;
+ cc->set_pc = arm_cpu_set_pc;
+ cc->gdb_read_register = arm_cpu_gdb_read_register;
+ cc->gdb_write_register = arm_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &arm_sysemu_ops;
+#endif
+ cc->gdb_num_core_regs = 26;
+ cc->gdb_core_xml_file = "arm-core.xml";
+ cc->gdb_arch_name = arm_gdb_arch_name;
+ cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
+ cc->gdb_stop_before_watchpoint = true;
+ cc->disas_set_info = arm_disas_set_info;
+
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &arm_tcg_ops;
+#endif /* CONFIG_TCG */
+}
+
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
+static void arm_host_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+#ifdef CONFIG_KVM
+ kvm_arm_set_cpu_features_from_host(cpu);
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ aarch64_add_sve_properties(obj);
+ }
+#else
+ hvf_arm_set_cpu_features_from_host(cpu);
+#endif
+ arm_cpu_post_init(obj);
+}
+
+static const TypeInfo host_arm_cpu_type_info = {
+ .name = TYPE_ARM_HOST_CPU,
+ .parent = TYPE_AARCH64_CPU,
+ .instance_init = arm_host_initfn,
+};
+
+#endif
+
+static void arm_cpu_instance_init(Object *obj)
+{
+ ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
+
+ acc->info->initfn(obj);
+ arm_cpu_post_init(obj);
+}
+
+static void cpu_register_class_init(ObjectClass *oc, void *data)
+{
+ ARMCPUClass *acc = ARM_CPU_CLASS(oc);
+
+ acc->info = data;
+}
+
+void arm_cpu_register(const ARMCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_ARM_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_align = __alignof__(ARMCPU),
+ .instance_init = arm_cpu_instance_init,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = info->class_init ?: cpu_register_class_init,
+ .class_data = (void *)info,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo arm_cpu_type_info = {
+ .name = TYPE_ARM_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_align = __alignof__(ARMCPU),
+ .instance_init = arm_cpu_initfn,
+ .instance_finalize = arm_cpu_finalizefn,
+ .abstract = true,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = arm_cpu_class_init,
+};
+
+static void arm_cpu_register_types(void)
+{
+ type_register_static(&arm_cpu_type_info);
+
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
+ type_register_static(&host_arm_cpu_type_info);
+#endif
+}
+
+type_init(arm_cpu_register_types)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
new file mode 100644
index 000000000..e33f37b70
--- /dev/null
+++ b/target/arm/cpu.h
@@ -0,0 +1,4395 @@
+/*
+ * ARM virtual CPU header
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ARM_CPU_H
+#define ARM_CPU_H
+
+#include "kvm-consts.h"
+#include "hw/registerfields.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "qapi/qapi-types-common.h"
+
+/* ARM processors have a weak memory model */
+#define TCG_GUEST_DEFAULT_MO (0)
+
+#ifdef TARGET_AARCH64
+#define KVM_HAVE_MCE_INJECTION 1
+#endif
+
+#define EXCP_UDEF 1 /* undefined instruction */
+#define EXCP_SWI 2 /* software interrupt */
+#define EXCP_PREFETCH_ABORT 3
+#define EXCP_DATA_ABORT 4
+#define EXCP_IRQ 5
+#define EXCP_FIQ 6
+#define EXCP_BKPT 7
+#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
+#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
+#define EXCP_HVC 11 /* HyperVisor Call */
+#define EXCP_HYP_TRAP 12
+#define EXCP_SMC 13 /* Secure Monitor Call */
+#define EXCP_VIRQ 14
+#define EXCP_VFIQ 15
+#define EXCP_SEMIHOST 16 /* semihosting call */
+#define EXCP_NOCP 17 /* v7M NOCP UsageFault */
+#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
+#define EXCP_STKOF 19 /* v8M STKOF UsageFault */
+#define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */
+#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
+#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
+#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
+/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
+
+#define ARMV7M_EXCP_RESET 1
+#define ARMV7M_EXCP_NMI 2
+#define ARMV7M_EXCP_HARD 3
+#define ARMV7M_EXCP_MEM 4
+#define ARMV7M_EXCP_BUS 5
+#define ARMV7M_EXCP_USAGE 6
+#define ARMV7M_EXCP_SECURE 7
+#define ARMV7M_EXCP_SVC 11
+#define ARMV7M_EXCP_DEBUG 12
+#define ARMV7M_EXCP_PENDSV 14
+#define ARMV7M_EXCP_SYSTICK 15
+
+/* For M profile, some registers are banked secure vs non-secure;
+ * these are represented as a 2-element array where the first element
+ * is the non-secure copy and the second is the secure copy.
+ * When the CPU does not have implement the security extension then
+ * only the first element is used.
+ * This means that the copy for the current security state can be
+ * accessed via env->registerfield[env->v7m.secure] (whether the security
+ * extension is implemented or not).
+ */
+enum {
+ M_REG_NS = 0,
+ M_REG_S = 1,
+ M_REG_NUM_BANKS = 2,
+};
+
+/* ARM-specific interrupt pending bits. */
+#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
+
+/* The usual mapping for an AArch64 system register to its AArch32
+ * counterpart is for the 32 bit world to have access to the lower
+ * half only (with writes leaving the upper half untouched). It's
+ * therefore useful to be able to pass TCG the offset of the least
+ * significant half of a uint64_t struct member.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
+#define offsetofhigh32(S, M) offsetof(S, M)
+#else
+#define offsetoflow32(S, M) offsetof(S, M)
+#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
+#endif
+
+/* Meanings of the ARMCPU object's four inbound GPIO lines */
+#define ARM_CPU_IRQ 0
+#define ARM_CPU_FIQ 1
+#define ARM_CPU_VIRQ 2
+#define ARM_CPU_VFIQ 3
+
+/* ARM-specific extra insn start words:
+ * 1: Conditional execution bits
+ * 2: Partial exception syndrome for data aborts
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+/* The 2nd extra word holding syndrome info for data aborts does not use
+ * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
+ * help the sleb128 encoder do a better job.
+ * When restoring the CPU state, we shift it back up.
+ */
+#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
+#define ARM_INSN_START_WORD2_SHIFT 14
+
+/* We currently assume float and double are IEEE single and double
+ precision respectively.
+ Doing runtime conversions is tricky because VFP registers may contain
+ integer values (eg. as the result of a FTOSI instruction).
+ s<2n> maps to the least significant half of d<n>
+ s<2n+1> maps to the most significant half of d<n>
+ */
+
+/**
+ * DynamicGDBXMLInfo:
+ * @desc: Contains the XML descriptions.
+ * @num: Number of the registers in this XML seen by GDB.
+ * @data: A union with data specific to the set of registers
+ * @cpregs_keys: Array that contains the corresponding Key of
+ * a given cpreg with the same order of the cpreg
+ * in the XML description.
+ */
+typedef struct DynamicGDBXMLInfo {
+ char *desc;
+ int num;
+ union {
+ struct {
+ uint32_t *keys;
+ } cpregs;
+ } data;
+} DynamicGDBXMLInfo;
+
+/* CPU state for each instance of a generic timer (in cp15 c14) */
+typedef struct ARMGenericTimer {
+ uint64_t cval; /* Timer CompareValue register */
+ uint64_t ctl; /* Timer Control register */
+} ARMGenericTimer;
+
+#define GTIMER_PHYS 0
+#define GTIMER_VIRT 1
+#define GTIMER_HYP 2
+#define GTIMER_SEC 3
+#define GTIMER_HYPVIRT 4
+#define NUM_GTIMERS 5
+
+typedef struct {
+ uint64_t raw_tcr;
+ uint32_t mask;
+ uint32_t base_mask;
+} TCR;
+
+#define VTCR_NSW (1u << 29)
+#define VTCR_NSA (1u << 30)
+#define VSTCR_SW VTCR_NSW
+#define VSTCR_SA VTCR_NSA
+
+/* Define a maximum sized vector register.
+ * For 32-bit, this is a 128-bit NEON/AdvSIMD register.
+ * For 64-bit, this is a 2048-bit SVE register.
+ *
+ * Note that the mapping between S, D, and Q views of the register bank
+ * differs between AArch64 and AArch32.
+ * In AArch32:
+ * Qn = regs[n].d[1]:regs[n].d[0]
+ * Dn = regs[n / 2].d[n & 1]
+ * Sn = regs[n / 4].d[n % 4 / 2],
+ * bits 31..0 for even n, and bits 63..32 for odd n
+ * (and regs[16] to regs[31] are inaccessible)
+ * In AArch64:
+ * Zn = regs[n].d[*]
+ * Qn = regs[n].d[1]:regs[n].d[0]
+ * Dn = regs[n].d[0]
+ * Sn = regs[n].d[0] bits 31..0
+ * Hn = regs[n].d[0] bits 15..0
+ *
+ * This corresponds to the architecturally defined mapping between
+ * the two execution states, and means we do not need to explicitly
+ * map these registers when changing states.
+ *
+ * Align the data for use with TCG host vector operations.
+ */
+
+#ifdef TARGET_AARCH64
+# define ARM_MAX_VQ 16
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
+void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
+#else
+# define ARM_MAX_VQ 1
+static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
+static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { }
+#endif
+
+typedef struct ARMVectorReg {
+ uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
+} ARMVectorReg;
+
+#ifdef TARGET_AARCH64
+/* In AArch32 mode, predicate registers do not exist at all. */
+typedef struct ARMPredicateReg {
+ uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
+} ARMPredicateReg;
+
+/* In AArch32 mode, PAC keys do not exist at all. */
+typedef struct ARMPACKey {
+ uint64_t lo, hi;
+} ARMPACKey;
+#endif
+
+/* See the commentary above the TBFLAG field definitions. */
+typedef struct CPUARMTBFlags {
+ uint32_t flags;
+ target_ulong flags2;
+} CPUARMTBFlags;
+
+typedef struct CPUARMState {
+ /* Regs for current mode. */
+ uint32_t regs[16];
+
+ /* 32/64 switch only happens when taking and returning from
+ * exceptions so the overlap semantics are taken care of then
+ * instead of having a complicated union.
+ */
+ /* Regs for A64 mode. */
+ uint64_t xregs[32];
+ uint64_t pc;
+ /* PSTATE isn't an architectural register for ARMv8. However, it is
+ * convenient for us to assemble the underlying state into a 32 bit format
+ * identical to the architectural format used for the SPSR. (This is also
+ * what the Linux kernel's 'pstate' field in signal handlers and KVM's
+ * 'pstate' register are.) Of the PSTATE bits:
+ * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
+ * semantics as for AArch32, as described in the comments on each field)
+ * nRW (also known as M[4]) is kept, inverted, in env->aarch64
+ * DAIF (exception masks) are kept in env->daif
+ * BTYPE is kept in env->btype
+ * all other bits are stored in their correct places in env->pstate
+ */
+ uint32_t pstate;
+ uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
+
+ /* Cached TBFLAGS state. See below for which bits are included. */
+ CPUARMTBFlags hflags;
+
+ /* Frequently accessed CPSR bits are stored separately for efficiency.
+ This contains all the other bits. Use cpsr_{read,write} to access
+ the whole CPSR. */
+ uint32_t uncached_cpsr;
+ uint32_t spsr;
+
+ /* Banked registers. */
+ uint64_t banked_spsr[8];
+ uint32_t banked_r13[8];
+ uint32_t banked_r14[8];
+
+ /* These hold r8-r12. */
+ uint32_t usr_regs[5];
+ uint32_t fiq_regs[5];
+
+ /* cpsr flag cache for faster execution */
+ uint32_t CF; /* 0 or 1 */
+ uint32_t VF; /* V is the bit 31. All other bits are undefined */
+ uint32_t NF; /* N is bit 31. All other bits are undefined. */
+ uint32_t ZF; /* Z set if zero. */
+ uint32_t QF; /* 0 or 1 */
+ uint32_t GE; /* cpsr[19:16] */
+ uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
+ uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
+ uint32_t btype; /* BTI branch type. spsr[11:10]. */
+ uint64_t daif; /* exception masks, in the bits they are in PSTATE */
+
+ uint64_t elr_el[4]; /* AArch64 exception link regs */
+ uint64_t sp_el[4]; /* AArch64 banked stack pointers */
+
+ /* System control coprocessor (cp15) */
+ struct {
+ uint32_t c0_cpuid;
+ union { /* Cache size selection */
+ struct {
+ uint64_t _unused_csselr0;
+ uint64_t csselr_ns;
+ uint64_t _unused_csselr1;
+ uint64_t csselr_s;
+ };
+ uint64_t csselr_el[4];
+ };
+ union { /* System control register. */
+ struct {
+ uint64_t _unused_sctlr;
+ uint64_t sctlr_ns;
+ uint64_t hsctlr;
+ uint64_t sctlr_s;
+ };
+ uint64_t sctlr_el[4];
+ };
+ uint64_t cpacr_el1; /* Architectural feature access control register */
+ uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
+ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
+ uint64_t sder; /* Secure debug enable register. */
+ uint32_t nsacr; /* Non-secure access control register. */
+ union { /* MMU translation table base 0. */
+ struct {
+ uint64_t _unused_ttbr0_0;
+ uint64_t ttbr0_ns;
+ uint64_t _unused_ttbr0_1;
+ uint64_t ttbr0_s;
+ };
+ uint64_t ttbr0_el[4];
+ };
+ union { /* MMU translation table base 1. */
+ struct {
+ uint64_t _unused_ttbr1_0;
+ uint64_t ttbr1_ns;
+ uint64_t _unused_ttbr1_1;
+ uint64_t ttbr1_s;
+ };
+ uint64_t ttbr1_el[4];
+ };
+ uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
+ uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
+ /* MMU translation table base control. */
+ TCR tcr_el[4];
+ TCR vtcr_el2; /* Virtualization Translation Control. */
+ TCR vstcr_el2; /* Secure Virtualization Translation Control. */
+ uint32_t c2_data; /* MPU data cacheable bits. */
+ uint32_t c2_insn; /* MPU instruction cacheable bits. */
+ union { /* MMU domain access control register
+ * MPU write buffer control.
+ */
+ struct {
+ uint64_t dacr_ns;
+ uint64_t dacr_s;
+ };
+ struct {
+ uint64_t dacr32_el2;
+ };
+ };
+ uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
+ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
+ uint64_t hcr_el2; /* Hypervisor configuration register */
+ uint64_t scr_el3; /* Secure configuration register. */
+ union { /* Fault status registers. */
+ struct {
+ uint64_t ifsr_ns;
+ uint64_t ifsr_s;
+ };
+ struct {
+ uint64_t ifsr32_el2;
+ };
+ };
+ union {
+ struct {
+ uint64_t _unused_dfsr;
+ uint64_t dfsr_ns;
+ uint64_t hsr;
+ uint64_t dfsr_s;
+ };
+ uint64_t esr_el[4];
+ };
+ uint32_t c6_region[8]; /* MPU base/size registers. */
+ union { /* Fault address registers. */
+ struct {
+ uint64_t _unused_far0;
+#ifdef HOST_WORDS_BIGENDIAN
+ uint32_t ifar_ns;
+ uint32_t dfar_ns;
+ uint32_t ifar_s;
+ uint32_t dfar_s;
+#else
+ uint32_t dfar_ns;
+ uint32_t ifar_ns;
+ uint32_t dfar_s;
+ uint32_t ifar_s;
+#endif
+ uint64_t _unused_far3;
+ };
+ uint64_t far_el[4];
+ };
+ uint64_t hpfar_el2;
+ uint64_t hstr_el2;
+ union { /* Translation result. */
+ struct {
+ uint64_t _unused_par_0;
+ uint64_t par_ns;
+ uint64_t _unused_par_1;
+ uint64_t par_s;
+ };
+ uint64_t par_el[4];
+ };
+
+ uint32_t c9_insn; /* Cache lockdown registers. */
+ uint32_t c9_data;
+ uint64_t c9_pmcr; /* performance monitor control register */
+ uint64_t c9_pmcnten; /* perf monitor counter enables */
+ uint64_t c9_pmovsr; /* perf monitor overflow status */
+ uint64_t c9_pmuserenr; /* perf monitor user enable */
+ uint64_t c9_pmselr; /* perf monitor counter selection register */
+ uint64_t c9_pminten; /* perf monitor interrupt enables */
+ union { /* Memory attribute redirection */
+ struct {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t _unused_mair_0;
+ uint32_t mair1_ns;
+ uint32_t mair0_ns;
+ uint64_t _unused_mair_1;
+ uint32_t mair1_s;
+ uint32_t mair0_s;
+#else
+ uint64_t _unused_mair_0;
+ uint32_t mair0_ns;
+ uint32_t mair1_ns;
+ uint64_t _unused_mair_1;
+ uint32_t mair0_s;
+ uint32_t mair1_s;
+#endif
+ };
+ uint64_t mair_el[4];
+ };
+ union { /* vector base address register */
+ struct {
+ uint64_t _unused_vbar;
+ uint64_t vbar_ns;
+ uint64_t hvbar;
+ uint64_t vbar_s;
+ };
+ uint64_t vbar_el[4];
+ };
+ uint32_t mvbar; /* (monitor) vector base address register */
+ struct { /* FCSE PID. */
+ uint32_t fcseidr_ns;
+ uint32_t fcseidr_s;
+ };
+ union { /* Context ID. */
+ struct {
+ uint64_t _unused_contextidr_0;
+ uint64_t contextidr_ns;
+ uint64_t _unused_contextidr_1;
+ uint64_t contextidr_s;
+ };
+ uint64_t contextidr_el[4];
+ };
+ union { /* User RW Thread register. */
+ struct {
+ uint64_t tpidrurw_ns;
+ uint64_t tpidrprw_ns;
+ uint64_t htpidr;
+ uint64_t _tpidr_el3;
+ };
+ uint64_t tpidr_el[4];
+ };
+ /* The secure banks of these registers don't map anywhere */
+ uint64_t tpidrurw_s;
+ uint64_t tpidrprw_s;
+ uint64_t tpidruro_s;
+
+ union { /* User RO Thread register. */
+ uint64_t tpidruro_ns;
+ uint64_t tpidrro_el[1];
+ };
+ uint64_t c14_cntfrq; /* Counter Frequency register */
+ uint64_t c14_cntkctl; /* Timer Control register */
+ uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
+ uint64_t cntvoff_el2; /* Counter Virtual Offset register */
+ ARMGenericTimer c14_timer[NUM_GTIMERS];
+ uint32_t c15_cpar; /* XScale Coprocessor Access Register */
+ uint32_t c15_ticonfig; /* TI925T configuration byte. */
+ uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
+ uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
+ uint32_t c15_threadid; /* TI debugger thread-ID. */
+ uint32_t c15_config_base_address; /* SCU base address. */
+ uint32_t c15_diagnostic; /* diagnostic register */
+ uint32_t c15_power_diagnostic;
+ uint32_t c15_power_control; /* power control */
+ uint64_t dbgbvr[16]; /* breakpoint value registers */
+ uint64_t dbgbcr[16]; /* breakpoint control registers */
+ uint64_t dbgwvr[16]; /* watchpoint value registers */
+ uint64_t dbgwcr[16]; /* watchpoint control registers */
+ uint64_t mdscr_el1;
+ uint64_t oslsr_el1; /* OS Lock Status */
+ uint64_t mdcr_el2;
+ uint64_t mdcr_el3;
+ /* Stores the architectural value of the counter *the last time it was
+ * updated* by pmccntr_op_start. Accesses should always be surrounded
+ * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest
+ * architecturally-correct value is being read/set.
+ */
+ uint64_t c15_ccnt;
+ /* Stores the delta between the architectural value and the underlying
+ * cycle count during normal operation. It is used to update c15_ccnt
+ * to be the correct architectural value before accesses. During
+ * accesses, c15_ccnt_delta contains the underlying count being used
+ * for the access, after which it reverts to the delta value in
+ * pmccntr_op_finish.
+ */
+ uint64_t c15_ccnt_delta;
+ uint64_t c14_pmevcntr[31];
+ uint64_t c14_pmevcntr_delta[31];
+ uint64_t c14_pmevtyper[31];
+ uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
+ uint64_t vpidr_el2; /* Virtualization Processor ID Register */
+ uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
+ uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0. */
+ uint64_t gcr_el1;
+ uint64_t rgsr_el1;
+ } cp15;
+
+ struct {
+ /* M profile has up to 4 stack pointers:
+ * a Main Stack Pointer and a Process Stack Pointer for each
+ * of the Secure and Non-Secure states. (If the CPU doesn't support
+ * the security extension then it has only two SPs.)
+ * In QEMU we always store the currently active SP in regs[13],
+ * and the non-active SP for the current security state in
+ * v7m.other_sp. The stack pointers for the inactive security state
+ * are stored in other_ss_msp and other_ss_psp.
+ * switch_v7m_security_state() is responsible for rearranging them
+ * when we change security state.
+ */
+ uint32_t other_sp;
+ uint32_t other_ss_msp;
+ uint32_t other_ss_psp;
+ uint32_t vecbase[M_REG_NUM_BANKS];
+ uint32_t basepri[M_REG_NUM_BANKS];
+ uint32_t control[M_REG_NUM_BANKS];
+ uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */
+ uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */
+ uint32_t hfsr; /* HardFault Status */
+ uint32_t dfsr; /* Debug Fault Status Register */
+ uint32_t sfsr; /* Secure Fault Status Register */
+ uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */
+ uint32_t bfar; /* BusFault Address */
+ uint32_t sfar; /* Secure Fault Address Register */
+ unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */
+ int exception;
+ uint32_t primask[M_REG_NUM_BANKS];
+ uint32_t faultmask[M_REG_NUM_BANKS];
+ uint32_t aircr; /* only holds r/w state if security extn implemented */
+ uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
+ uint32_t csselr[M_REG_NUM_BANKS];
+ uint32_t scr[M_REG_NUM_BANKS];
+ uint32_t msplim[M_REG_NUM_BANKS];
+ uint32_t psplim[M_REG_NUM_BANKS];
+ uint32_t fpcar[M_REG_NUM_BANKS];
+ uint32_t fpccr[M_REG_NUM_BANKS];
+ uint32_t fpdscr[M_REG_NUM_BANKS];
+ uint32_t cpacr[M_REG_NUM_BANKS];
+ uint32_t nsacr;
+ uint32_t ltpsize;
+ uint32_t vpr;
+ } v7m;
+
+ /* Information associated with an exception about to be taken:
+ * code which raises an exception must set cs->exception_index and
+ * the relevant parts of this structure; the cpu_do_interrupt function
+ * will then set the guest-visible registers as part of the exception
+ * entry process.
+ */
+ struct {
+ uint32_t syndrome; /* AArch64 format syndrome register */
+ uint32_t fsr; /* AArch32 format fault status register info */
+ uint64_t vaddress; /* virtual addr associated with exception, if any */
+ uint32_t target_el; /* EL the exception should be targeted for */
+ /* If we implement EL2 we will also need to store information
+ * about the intermediate physical address for stage 2 faults.
+ */
+ } exception;
+
+ /* Information associated with an SError */
+ struct {
+ uint8_t pending;
+ uint8_t has_esr;
+ uint64_t esr;
+ } serror;
+
+ uint8_t ext_dabt_raised; /* Tracking/verifying injection of ext DABT */
+
+ /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */
+ uint32_t irq_line_state;
+
+ /* Thumb-2 EE state. */
+ uint32_t teecr;
+ uint32_t teehbr;
+
+ /* VFP coprocessor state. */
+ struct {
+ ARMVectorReg zregs[32];
+
+#ifdef TARGET_AARCH64
+ /* Store FFR as pregs[16] to make it easier to treat as any other. */
+#define FFR_PRED_NUM 16
+ ARMPredicateReg pregs[17];
+ /* Scratch space for aa64 sve predicate temporary. */
+ ARMPredicateReg preg_tmp;
+#endif
+
+ /* We store these fpcsr fields separately for convenience. */
+ uint32_t qc[4] QEMU_ALIGNED(16);
+ int vec_len;
+ int vec_stride;
+
+ uint32_t xregs[16];
+
+ /* Scratch space for aa32 neon expansion. */
+ uint32_t scratch[8];
+
+ /* There are a number of distinct float control structures:
+ *
+ * fp_status: is the "normal" fp status.
+ * fp_status_fp16: used for half-precision calculations
+ * standard_fp_status : the ARM "Standard FPSCR Value"
+ * standard_fp_status_fp16 : used for half-precision
+ * calculations with the ARM "Standard FPSCR Value"
+ *
+ * Half-precision operations are governed by a separate
+ * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
+ * status structure to control this.
+ *
+ * The "Standard FPSCR", ie default-NaN, flush-to-zero,
+ * round-to-nearest and is used by any operations (generally
+ * Neon) which the architecture defines as controlled by the
+ * standard FPSCR value rather than the FPSCR.
+ *
+ * The "standard FPSCR but for fp16 ops" is needed because
+ * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
+ * using a fixed value for it.
+ *
+ * To avoid having to transfer exception bits around, we simply
+ * say that the FPSCR cumulative exception flags are the logical
+ * OR of the flags in the four fp statuses. This relies on the
+ * only thing which needs to read the exception flags being
+ * an explicit FPSCR read.
+ */
+ float_status fp_status;
+ float_status fp_status_f16;
+ float_status standard_fp_status;
+ float_status standard_fp_status_f16;
+
+ /* ZCR_EL[1-3] */
+ uint64_t zcr_el[4];
+ } vfp;
+ uint64_t exclusive_addr;
+ uint64_t exclusive_val;
+ uint64_t exclusive_high;
+
+ /* iwMMXt coprocessor state. */
+ struct {
+ uint64_t regs[16];
+ uint64_t val;
+
+ uint32_t cregs[16];
+ } iwmmxt;
+
+#ifdef TARGET_AARCH64
+ struct {
+ ARMPACKey apia;
+ ARMPACKey apib;
+ ARMPACKey apda;
+ ARMPACKey apdb;
+ ARMPACKey apga;
+ } keys;
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+ /* For usermode syscall translation. */
+ int eabi;
+#endif
+
+ struct CPUBreakpoint *cpu_breakpoint[16];
+ struct CPUWatchpoint *cpu_watchpoint[16];
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields after this point are preserved across CPU reset. */
+
+ /* Internal CPU feature flags. */
+ uint64_t features;
+
+ /* PMSAv7 MPU */
+ struct {
+ uint32_t *drbar;
+ uint32_t *drsr;
+ uint32_t *dracr;
+ uint32_t rnr[M_REG_NUM_BANKS];
+ } pmsav7;
+
+ /* PMSAv8 MPU */
+ struct {
+ /* The PMSAv8 implementation also shares some PMSAv7 config
+ * and state:
+ * pmsav7.rnr (region number register)
+ * pmsav7_dregion (number of configured regions)
+ */
+ uint32_t *rbar[M_REG_NUM_BANKS];
+ uint32_t *rlar[M_REG_NUM_BANKS];
+ uint32_t mair0[M_REG_NUM_BANKS];
+ uint32_t mair1[M_REG_NUM_BANKS];
+ } pmsav8;
+
+ /* v8M SAU */
+ struct {
+ uint32_t *rbar;
+ uint32_t *rlar;
+ uint32_t rnr;
+ uint32_t ctrl;
+ } sau;
+
+ void *nvic;
+ const struct arm_boot_info *boot_info;
+ /* Store GICv3CPUState to access from this struct */
+ void *gicv3state;
+
+#ifdef TARGET_TAGGED_ADDRESSES
+ /* Linux syscall tagged address support */
+ bool tagged_addr_enable;
+#endif
+} CPUARMState;
+
+static inline void set_feature(CPUARMState *env, int feature)
+{
+ env->features |= 1ULL << feature;
+}
+
+static inline void unset_feature(CPUARMState *env, int feature)
+{
+ env->features &= ~(1ULL << feature);
+}
+
+/**
+ * ARMELChangeHookFn:
+ * type of a function which can be registered via arm_register_el_change_hook()
+ * to get callbacks when the CPU changes its exception level or mode.
+ */
+typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque);
+typedef struct ARMELChangeHook ARMELChangeHook;
+struct ARMELChangeHook {
+ ARMELChangeHookFn *hook;
+ void *opaque;
+ QLIST_ENTRY(ARMELChangeHook) node;
+};
+
+/* These values map onto the return values for
+ * QEMU_PSCI_0_2_FN_AFFINITY_INFO */
+typedef enum ARMPSCIState {
+ PSCI_ON = 0,
+ PSCI_OFF = 1,
+ PSCI_ON_PENDING = 2
+} ARMPSCIState;
+
+typedef struct ARMISARegisters ARMISARegisters;
+
+/**
+ * ARMCPU:
+ * @env: #CPUARMState
+ *
+ * An ARM CPU core.
+ */
+struct ARMCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUARMState env;
+
+ /* Coprocessor information */
+ GHashTable *cp_regs;
+ /* For marshalling (mostly coprocessor) register state between the
+ * kernel and QEMU (for KVM) and between two QEMUs (for migration),
+ * we use these arrays.
+ */
+ /* List of register indexes managed via these arrays; (full KVM style
+ * 64 bit indexes, not CPRegInfo 32 bit indexes)
+ */
+ uint64_t *cpreg_indexes;
+ /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
+ uint64_t *cpreg_values;
+ /* Length of the indexes, values, reset_values arrays */
+ int32_t cpreg_array_len;
+ /* These are used only for migration: incoming data arrives in
+ * these fields and is sanity checked in post_load before copying
+ * to the working data structures above.
+ */
+ uint64_t *cpreg_vmstate_indexes;
+ uint64_t *cpreg_vmstate_values;
+ int32_t cpreg_vmstate_array_len;
+
+ DynamicGDBXMLInfo dyn_sysreg_xml;
+ DynamicGDBXMLInfo dyn_svereg_xml;
+
+ /* Timers used by the generic (architected) timer */
+ QEMUTimer *gt_timer[NUM_GTIMERS];
+ /*
+ * Timer used by the PMU. Its state is restored after migration by
+ * pmu_op_finish() - it does not need other handling during migration
+ */
+ QEMUTimer *pmu_timer;
+ /* GPIO outputs for generic timer */
+ qemu_irq gt_timer_outputs[NUM_GTIMERS];
+ /* GPIO output for GICv3 maintenance interrupt signal */
+ qemu_irq gicv3_maintenance_interrupt;
+ /* GPIO output for the PMU interrupt */
+ qemu_irq pmu_interrupt;
+
+ /* MemoryRegion to use for secure physical accesses */
+ MemoryRegion *secure_memory;
+
+ /* MemoryRegion to use for allocation tag accesses */
+ MemoryRegion *tag_memory;
+ MemoryRegion *secure_tag_memory;
+
+ /* For v8M, pointer to the IDAU interface provided by board/SoC */
+ Object *idau;
+
+ /* 'compatible' string for this CPU for Linux device trees */
+ const char *dtb_compatible;
+
+ /* PSCI version for this CPU
+ * Bits[31:16] = Major Version
+ * Bits[15:0] = Minor Version
+ */
+ uint32_t psci_version;
+
+ /* Current power state, access guarded by BQL */
+ ARMPSCIState power_state;
+
+ /* CPU has virtualization extension */
+ bool has_el2;
+ /* CPU has security extension */
+ bool has_el3;
+ /* CPU has PMU (Performance Monitor Unit) */
+ bool has_pmu;
+ /* CPU has VFP */
+ bool has_vfp;
+ /* CPU has Neon */
+ bool has_neon;
+ /* CPU has M-profile DSP extension */
+ bool has_dsp;
+
+ /* CPU has memory protection unit */
+ bool has_mpu;
+ /* PMSAv7 MPU number of supported regions */
+ uint32_t pmsav7_dregion;
+ /* v8M SAU number of supported regions */
+ uint32_t sau_sregion;
+
+ /* PSCI conduit used to invoke PSCI methods
+ * 0 - disabled, 1 - smc, 2 - hvc
+ */
+ uint32_t psci_conduit;
+
+ /* For v8M, initial value of the Secure VTOR */
+ uint32_t init_svtor;
+ /* For v8M, initial value of the Non-secure VTOR */
+ uint32_t init_nsvtor;
+
+ /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
+ * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
+ */
+ uint32_t kvm_target;
+
+ /* KVM init features for this CPU */
+ uint32_t kvm_init_features[7];
+
+ /* KVM CPU state */
+
+ /* KVM virtual time adjustment */
+ bool kvm_adjvtime;
+ bool kvm_vtime_dirty;
+ uint64_t kvm_vtime;
+
+ /* KVM steal time */
+ OnOffAuto kvm_steal_time;
+
+ /* Uniprocessor system with MP extensions */
+ bool mp_is_up;
+
+ /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init
+ * and the probe failed (so we need to report the error in realize)
+ */
+ bool host_cpu_probe_failed;
+
+ /* Specify the number of cores in this CPU cluster. Used for the L2CTLR
+ * register.
+ */
+ int32_t core_count;
+
+ /* The instance init functions for implementation-specific subclasses
+ * set these fields to specify the implementation-dependent values of
+ * various constant registers and reset values of non-constant
+ * registers.
+ * Some of these might become QOM properties eventually.
+ * Field names match the official register names as defined in the
+ * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
+ * is used for reset values of non-constant registers; no reset_
+ * prefix means a constant register.
+ * Some of these registers are split out into a substructure that
+ * is shared with the translators to control the ISA.
+ *
+ * Note that if you add an ID register to the ARMISARegisters struct
+ * you need to also update the 32-bit and 64-bit versions of the
+ * kvm_arm_get_host_cpu_features() function to correctly populate the
+ * field by reading the value from the KVM vCPU.
+ */
+ struct ARMISARegisters {
+ uint32_t id_isar0;
+ uint32_t id_isar1;
+ uint32_t id_isar2;
+ uint32_t id_isar3;
+ uint32_t id_isar4;
+ uint32_t id_isar5;
+ uint32_t id_isar6;
+ uint32_t id_mmfr0;
+ uint32_t id_mmfr1;
+ uint32_t id_mmfr2;
+ uint32_t id_mmfr3;
+ uint32_t id_mmfr4;
+ uint32_t id_pfr0;
+ uint32_t id_pfr1;
+ uint32_t id_pfr2;
+ uint32_t mvfr0;
+ uint32_t mvfr1;
+ uint32_t mvfr2;
+ uint32_t id_dfr0;
+ uint32_t dbgdidr;
+ uint64_t id_aa64isar0;
+ uint64_t id_aa64isar1;
+ uint64_t id_aa64pfr0;
+ uint64_t id_aa64pfr1;
+ uint64_t id_aa64mmfr0;
+ uint64_t id_aa64mmfr1;
+ uint64_t id_aa64mmfr2;
+ uint64_t id_aa64dfr0;
+ uint64_t id_aa64dfr1;
+ uint64_t id_aa64zfr0;
+ } isar;
+ uint64_t midr;
+ uint32_t revidr;
+ uint32_t reset_fpsid;
+ uint64_t ctr;
+ uint32_t reset_sctlr;
+ uint64_t pmceid0;
+ uint64_t pmceid1;
+ uint32_t id_afr0;
+ uint64_t id_aa64afr0;
+ uint64_t id_aa64afr1;
+ uint64_t clidr;
+ uint64_t mp_affinity; /* MP ID without feature bits */
+ /* The elements of this array are the CCSIDR values for each cache,
+ * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
+ */
+ uint64_t ccsidr[16];
+ uint64_t reset_cbar;
+ uint32_t reset_auxcr;
+ bool reset_hivecs;
+
+ /*
+ * Intermediate values used during property parsing.
+ * Once finalized, the values should be read from ID_AA64ISAR1.
+ */
+ bool prop_pauth;
+ bool prop_pauth_impdef;
+
+ /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
+ uint32_t dcz_blocksize;
+ uint64_t rvbar;
+
+ /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
+ int gic_num_lrs; /* number of list registers */
+ int gic_vpribits; /* number of virtual priority bits */
+ int gic_vprebits; /* number of virtual preemption bits */
+
+ /* Whether the cfgend input is high (i.e. this CPU should reset into
+ * big-endian mode). This setting isn't used directly: instead it modifies
+ * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the
+ * architecture version.
+ */
+ bool cfgend;
+
+ QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks;
+ QLIST_HEAD(, ARMELChangeHook) el_change_hooks;
+
+ int32_t node_id; /* NUMA node this CPU belongs to */
+
+ /* Used to synchronize KVM and QEMU in-kernel device levels */
+ uint8_t device_irq_level;
+
+ /* Used to set the maximum vector length the cpu will support. */
+ uint32_t sve_max_vq;
+
+#ifdef CONFIG_USER_ONLY
+ /* Used to set the default vector length at process start. */
+ uint32_t sve_default_vq;
+#endif
+
+ /*
+ * In sve_vq_map each set bit is a supported vector length of
+ * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector
+ * length in quadwords.
+ *
+ * While processing properties during initialization, corresponding
+ * sve_vq_init bits are set for bits in sve_vq_map that have been
+ * set by properties.
+ *
+ * Bits set in sve_vq_supported represent valid vector lengths for
+ * the CPU type.
+ */
+ DECLARE_BITMAP(sve_vq_map, ARM_MAX_VQ);
+ DECLARE_BITMAP(sve_vq_init, ARM_MAX_VQ);
+ DECLARE_BITMAP(sve_vq_supported, ARM_MAX_VQ);
+
+ /* Generic timer counter frequency, in Hz */
+ uint64_t gt_cntfrq_hz;
+};
+
+unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
+
+void arm_cpu_post_init(Object *obj);
+
+uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_arm_cpu;
+
+void arm_cpu_do_interrupt(CPUState *cpu);
+void arm_v7m_cpu_do_interrupt(CPUState *cpu);
+#endif /* !CONFIG_USER_ONLY */
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
+
+int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+/*
+ * Helpers to dynamically generates XML descriptions of the sysregs
+ * and SVE registers. Returns the number of registers in each set.
+ */
+int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg);
+int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
+
+/* Returns the dynamically generated XML for the gdb stub.
+ * Returns a pointer to the XML contents for the specified XML file or NULL
+ * if the XML name doesn't match the predefined one.
+ */
+const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname);
+
+int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+
+#ifdef TARGET_AARCH64
+int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
+void aarch64_sve_change_el(CPUARMState *env, int old_el,
+ int new_el, bool el0_a64);
+void aarch64_add_sve_properties(Object *obj);
+
+/*
+ * SVE registers are encoded in KVM's memory in an endianness-invariant format.
+ * The byte at offset i from the start of the in-memory representation contains
+ * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the
+ * lowest offsets are stored in the lowest memory addresses, then that nearly
+ * matches QEMU's representation, which is to use an array of host-endian
+ * uint64_t's, where the lower offsets are at the lower indices. To complete
+ * the translation we just need to byte swap the uint64_t's on big-endian hosts.
+ */
+static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ int i;
+
+ for (i = 0; i < nr; ++i) {
+ dst[i] = bswap64(src[i]);
+ }
+
+ return dst;
+#else
+ return src;
+#endif
+}
+
+#else
+static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
+static inline void aarch64_sve_change_el(CPUARMState *env, int o,
+ int n, bool a)
+{ }
+static inline void aarch64_add_sve_properties(Object *obj) { }
+#endif
+
+void aarch64_sync_32_to_64(CPUARMState *env);
+void aarch64_sync_64_to_32(CPUARMState *env);
+
+int fp_exception_el(CPUARMState *env, int cur_el);
+int sve_exception_el(CPUARMState *env, int cur_el);
+uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
+
+static inline bool is_a64(CPUARMState *env)
+{
+ return env->aarch64;
+}
+
+/**
+ * pmu_op_start/finish
+ * @env: CPUARMState
+ *
+ * Convert all PMU counters between their delta form (the typical mode when
+ * they are enabled) and the guest-visible values. These two calls must
+ * surround any action which might affect the counters.
+ */
+void pmu_op_start(CPUARMState *env);
+void pmu_op_finish(CPUARMState *env);
+
+/*
+ * Called when a PMU counter is due to overflow
+ */
+void arm_pmu_timer_cb(void *opaque);
+
+/**
+ * Functions to register as EL change hooks for PMU mode filtering
+ */
+void pmu_pre_el_change(ARMCPU *cpu, void *ignored);
+void pmu_post_el_change(ARMCPU *cpu, void *ignored);
+
+/*
+ * pmu_init
+ * @cpu: ARMCPU
+ *
+ * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state
+ * for the current configuration
+ */
+void pmu_init(ARMCPU *cpu);
+
+/* SCTLR bit meanings. Several bits have been reused in newer
+ * versions of the architecture; in that case we define constants
+ * for both old and new bit meanings. Code which tests against those
+ * bits should probably check or otherwise arrange that the CPU
+ * is the architectural version it expects.
+ */
+#define SCTLR_M (1U << 0)
+#define SCTLR_A (1U << 1)
+#define SCTLR_C (1U << 2)
+#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
+#define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */
+#define SCTLR_SA (1U << 3) /* AArch64 only */
+#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
+#define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */
+#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
+#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
+#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
+#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
+#define SCTLR_nAA (1U << 6) /* when v8.4-LSE is implemented */
+#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
+#define SCTLR_ITD (1U << 7) /* v8 onward */
+#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
+#define SCTLR_SED (1U << 8) /* v8 onward */
+#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
+#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
+#define SCTLR_F (1U << 10) /* up to v6 */
+#define SCTLR_SW (1U << 10) /* v7 */
+#define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */
+#define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */
+#define SCTLR_EOS (1U << 11) /* v8.5-ExS */
+#define SCTLR_I (1U << 12)
+#define SCTLR_V (1U << 13) /* AArch32 only */
+#define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */
+#define SCTLR_RR (1U << 14) /* up to v7 */
+#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
+#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
+#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
+#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
+#define SCTLR_nTWI (1U << 16) /* v8 onward */
+#define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */
+#define SCTLR_BR (1U << 17) /* PMSA only */
+#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
+#define SCTLR_nTWE (1U << 18) /* v8 onward */
+#define SCTLR_WXN (1U << 19)
+#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
+#define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */
+#define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */
+#define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */
+#define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */
+#define SCTLR_EIS (1U << 22) /* v8.5-ExS */
+#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
+#define SCTLR_SPAN (1U << 23) /* v8.1-PAN */
+#define SCTLR_VE (1U << 24) /* up to v7 */
+#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
+#define SCTLR_EE (1U << 25)
+#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
+#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
+#define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */
+#define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */
+#define SCTLR_TRE (1U << 28) /* AArch32 only */
+#define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */
+#define SCTLR_AFE (1U << 29) /* AArch32 only */
+#define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */
+#define SCTLR_TE (1U << 30) /* AArch32 only */
+#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
+#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
+#define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
+#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
+#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
+#define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */
+#define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */
+#define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */
+#define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */
+#define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */
+#define SCTLR_DSSBS_64 (1ULL << 44) /* v8.5, AArch64 only */
+
+#define CPTR_TCPAC (1U << 31)
+#define CPTR_TTA (1U << 20)
+#define CPTR_TFP (1U << 10)
+#define CPTR_TZ (1U << 8) /* CPTR_EL2 */
+#define CPTR_EZ (1U << 8) /* CPTR_EL3 */
+
+#define MDCR_EPMAD (1U << 21)
+#define MDCR_EDAD (1U << 20)
+#define MDCR_SPME (1U << 17) /* MDCR_EL3 */
+#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */
+#define MDCR_SDD (1U << 16)
+#define MDCR_SPD (3U << 14)
+#define MDCR_TDRA (1U << 11)
+#define MDCR_TDOSA (1U << 10)
+#define MDCR_TDA (1U << 9)
+#define MDCR_TDE (1U << 8)
+#define MDCR_HPME (1U << 7)
+#define MDCR_TPM (1U << 6)
+#define MDCR_TPMCR (1U << 5)
+#define MDCR_HPMN (0x1fU)
+
+/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
+#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
+
+#define CPSR_M (0x1fU)
+#define CPSR_T (1U << 5)
+#define CPSR_F (1U << 6)
+#define CPSR_I (1U << 7)
+#define CPSR_A (1U << 8)
+#define CPSR_E (1U << 9)
+#define CPSR_IT_2_7 (0xfc00U)
+#define CPSR_GE (0xfU << 16)
+#define CPSR_IL (1U << 20)
+#define CPSR_DIT (1U << 21)
+#define CPSR_PAN (1U << 22)
+#define CPSR_SSBS (1U << 23)
+#define CPSR_J (1U << 24)
+#define CPSR_IT_0_1 (3U << 25)
+#define CPSR_Q (1U << 27)
+#define CPSR_V (1U << 28)
+#define CPSR_C (1U << 29)
+#define CPSR_Z (1U << 30)
+#define CPSR_N (1U << 31)
+#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
+#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
+
+#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
+#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
+ | CPSR_NZCV)
+/* Bits writable in user mode. */
+#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE | CPSR_E)
+/* Execution state bits. MRS read as zero, MSR writes ignored. */
+#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
+
+/* Bit definitions for M profile XPSR. Most are the same as CPSR. */
+#define XPSR_EXCP 0x1ffU
+#define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
+#define XPSR_IT_2_7 CPSR_IT_2_7
+#define XPSR_GE CPSR_GE
+#define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
+#define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
+#define XPSR_IT_0_1 CPSR_IT_0_1
+#define XPSR_Q CPSR_Q
+#define XPSR_V CPSR_V
+#define XPSR_C CPSR_C
+#define XPSR_Z CPSR_Z
+#define XPSR_N CPSR_N
+#define XPSR_NZCV CPSR_NZCV
+#define XPSR_IT CPSR_IT
+
+#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
+#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
+#define TTBCR_PD0 (1U << 4)
+#define TTBCR_PD1 (1U << 5)
+#define TTBCR_EPD0 (1U << 7)
+#define TTBCR_IRGN0 (3U << 8)
+#define TTBCR_ORGN0 (3U << 10)
+#define TTBCR_SH0 (3U << 12)
+#define TTBCR_T1SZ (3U << 16)
+#define TTBCR_A1 (1U << 22)
+#define TTBCR_EPD1 (1U << 23)
+#define TTBCR_IRGN1 (3U << 24)
+#define TTBCR_ORGN1 (3U << 26)
+#define TTBCR_SH1 (1U << 28)
+#define TTBCR_EAE (1U << 31)
+
+/* Bit definitions for ARMv8 SPSR (PSTATE) format.
+ * Only these are valid when in AArch64 mode; in
+ * AArch32 mode SPSRs are basically CPSR-format.
+ */
+#define PSTATE_SP (1U)
+#define PSTATE_M (0xFU)
+#define PSTATE_nRW (1U << 4)
+#define PSTATE_F (1U << 6)
+#define PSTATE_I (1U << 7)
+#define PSTATE_A (1U << 8)
+#define PSTATE_D (1U << 9)
+#define PSTATE_BTYPE (3U << 10)
+#define PSTATE_SSBS (1U << 12)
+#define PSTATE_IL (1U << 20)
+#define PSTATE_SS (1U << 21)
+#define PSTATE_PAN (1U << 22)
+#define PSTATE_UAO (1U << 23)
+#define PSTATE_DIT (1U << 24)
+#define PSTATE_TCO (1U << 25)
+#define PSTATE_V (1U << 28)
+#define PSTATE_C (1U << 29)
+#define PSTATE_Z (1U << 30)
+#define PSTATE_N (1U << 31)
+#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
+#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
+#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
+/* Mode values for AArch64 */
+#define PSTATE_MODE_EL3h 13
+#define PSTATE_MODE_EL3t 12
+#define PSTATE_MODE_EL2h 9
+#define PSTATE_MODE_EL2t 8
+#define PSTATE_MODE_EL1h 5
+#define PSTATE_MODE_EL1t 4
+#define PSTATE_MODE_EL0t 0
+
+/* Write a new value to v7m.exception, thus transitioning into or out
+ * of Handler mode; this may result in a change of active stack pointer.
+ */
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc);
+
+/* Map EL and handler into a PSTATE_MODE. */
+static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
+{
+ return (el << 2) | handler;
+}
+
+/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
+ * interprocessing, so we don't attempt to sync with the cpsr state used by
+ * the 32 bit decoder.
+ */
+static inline uint32_t pstate_read(CPUARMState *env)
+{
+ int ZF;
+
+ ZF = (env->ZF == 0);
+ return (env->NF & 0x80000000) | (ZF << 30)
+ | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
+ | env->pstate | env->daif | (env->btype << 10);
+}
+
+static inline void pstate_write(CPUARMState *env, uint32_t val)
+{
+ env->ZF = (~val) & PSTATE_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ env->daif = val & PSTATE_DAIF;
+ env->btype = (val >> 10) & 3;
+ env->pstate = val & ~CACHED_PSTATE_BITS;
+}
+
+/* Return the current CPSR value. */
+uint32_t cpsr_read(CPUARMState *env);
+
+typedef enum CPSRWriteType {
+ CPSRWriteByInstr = 0, /* from guest MSR or CPS */
+ CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
+ CPSRWriteRaw = 2,
+ /* trust values, no reg bank switch, no hflags rebuild */
+ CPSRWriteByGDBStub = 3, /* from the GDB stub */
+} CPSRWriteType;
+
+/*
+ * Set the CPSR. Note that some bits of mask must be all-set or all-clear.
+ * This will do an arm_rebuild_hflags() if any of the bits in @mask
+ * correspond to TB flags bits cached in the hflags, unless @write_type
+ * is CPSRWriteRaw.
+ */
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type);
+
+/* Return the current xPSR value. */
+static inline uint32_t xpsr_read(CPUARMState *env)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return (env->NF & 0x80000000) | (ZF << 30)
+ | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+ | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
+ | ((env->condexec_bits & 0xfc) << 8)
+ | (env->GE << 16)
+ | env->v7m.exception;
+}
+
+/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
+static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ if (mask & XPSR_NZCV) {
+ env->ZF = (~val) & XPSR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+ if (mask & XPSR_Q) {
+ env->QF = ((val & XPSR_Q) != 0);
+ }
+ if (mask & XPSR_GE) {
+ env->GE = (val & XPSR_GE) >> 16;
+ }
+#ifndef CONFIG_USER_ONLY
+ if (mask & XPSR_T) {
+ env->thumb = ((val & XPSR_T) != 0);
+ }
+ if (mask & XPSR_IT_0_1) {
+ env->condexec_bits &= ~3;
+ env->condexec_bits |= (val >> 25) & 3;
+ }
+ if (mask & XPSR_IT_2_7) {
+ env->condexec_bits &= 3;
+ env->condexec_bits |= (val >> 8) & 0xfc;
+ }
+ if (mask & XPSR_EXCP) {
+ /* Note that this only happens on exception exit */
+ write_v7m_exception(env, val & XPSR_EXCP);
+ }
+#endif
+}
+
+#define HCR_VM (1ULL << 0)
+#define HCR_SWIO (1ULL << 1)
+#define HCR_PTW (1ULL << 2)
+#define HCR_FMO (1ULL << 3)
+#define HCR_IMO (1ULL << 4)
+#define HCR_AMO (1ULL << 5)
+#define HCR_VF (1ULL << 6)
+#define HCR_VI (1ULL << 7)
+#define HCR_VSE (1ULL << 8)
+#define HCR_FB (1ULL << 9)
+#define HCR_BSU_MASK (3ULL << 10)
+#define HCR_DC (1ULL << 12)
+#define HCR_TWI (1ULL << 13)
+#define HCR_TWE (1ULL << 14)
+#define HCR_TID0 (1ULL << 15)
+#define HCR_TID1 (1ULL << 16)
+#define HCR_TID2 (1ULL << 17)
+#define HCR_TID3 (1ULL << 18)
+#define HCR_TSC (1ULL << 19)
+#define HCR_TIDCP (1ULL << 20)
+#define HCR_TACR (1ULL << 21)
+#define HCR_TSW (1ULL << 22)
+#define HCR_TPCP (1ULL << 23)
+#define HCR_TPU (1ULL << 24)
+#define HCR_TTLB (1ULL << 25)
+#define HCR_TVM (1ULL << 26)
+#define HCR_TGE (1ULL << 27)
+#define HCR_TDZ (1ULL << 28)
+#define HCR_HCD (1ULL << 29)
+#define HCR_TRVM (1ULL << 30)
+#define HCR_RW (1ULL << 31)
+#define HCR_CD (1ULL << 32)
+#define HCR_ID (1ULL << 33)
+#define HCR_E2H (1ULL << 34)
+#define HCR_TLOR (1ULL << 35)
+#define HCR_TERR (1ULL << 36)
+#define HCR_TEA (1ULL << 37)
+#define HCR_MIOCNCE (1ULL << 38)
+/* RES0 bit 39 */
+#define HCR_APK (1ULL << 40)
+#define HCR_API (1ULL << 41)
+#define HCR_NV (1ULL << 42)
+#define HCR_NV1 (1ULL << 43)
+#define HCR_AT (1ULL << 44)
+#define HCR_NV2 (1ULL << 45)
+#define HCR_FWB (1ULL << 46)
+#define HCR_FIEN (1ULL << 47)
+/* RES0 bit 48 */
+#define HCR_TID4 (1ULL << 49)
+#define HCR_TICAB (1ULL << 50)
+#define HCR_AMVOFFEN (1ULL << 51)
+#define HCR_TOCU (1ULL << 52)
+#define HCR_ENSCXT (1ULL << 53)
+#define HCR_TTLBIS (1ULL << 54)
+#define HCR_TTLBOS (1ULL << 55)
+#define HCR_ATA (1ULL << 56)
+#define HCR_DCT (1ULL << 57)
+#define HCR_TID5 (1ULL << 58)
+#define HCR_TWEDEN (1ULL << 59)
+#define HCR_TWEDEL MAKE_64BIT_MASK(60, 4)
+
+#define HPFAR_NS (1ULL << 63)
+
+#define SCR_NS (1U << 0)
+#define SCR_IRQ (1U << 1)
+#define SCR_FIQ (1U << 2)
+#define SCR_EA (1U << 3)
+#define SCR_FW (1U << 4)
+#define SCR_AW (1U << 5)
+#define SCR_NET (1U << 6)
+#define SCR_SMD (1U << 7)
+#define SCR_HCE (1U << 8)
+#define SCR_SIF (1U << 9)
+#define SCR_RW (1U << 10)
+#define SCR_ST (1U << 11)
+#define SCR_TWI (1U << 12)
+#define SCR_TWE (1U << 13)
+#define SCR_TLOR (1U << 14)
+#define SCR_TERR (1U << 15)
+#define SCR_APK (1U << 16)
+#define SCR_API (1U << 17)
+#define SCR_EEL2 (1U << 18)
+#define SCR_EASE (1U << 19)
+#define SCR_NMEA (1U << 20)
+#define SCR_FIEN (1U << 21)
+#define SCR_ENSCXT (1U << 25)
+#define SCR_ATA (1U << 26)
+
+#define HSTR_TTEE (1 << 16)
+#define HSTR_TJDBX (1 << 17)
+
+/* Return the current FPSCR value. */
+uint32_t vfp_get_fpscr(CPUARMState *env);
+void vfp_set_fpscr(CPUARMState *env, uint32_t val);
+
+/* FPCR, Floating Point Control Register
+ * FPSR, Floating Poiht Status Register
+ *
+ * For A64 the FPSCR is split into two logically distinct registers,
+ * FPCR and FPSR. However since they still use non-overlapping bits
+ * we store the underlying state in fpscr and just mask on read/write.
+ */
+#define FPSR_MASK 0xf800009f
+#define FPCR_MASK 0x07ff9f00
+
+#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
+#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
+#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
+#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
+#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
+#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
+#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
+#define FPCR_RMODE_MASK (3 << 22) /* Rounding mode */
+#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
+#define FPCR_DN (1 << 25) /* Default NaN enable bit */
+#define FPCR_AHP (1 << 26) /* Alternative half-precision */
+#define FPCR_QC (1 << 27) /* Cumulative saturation bit */
+#define FPCR_V (1 << 28) /* FP overflow flag */
+#define FPCR_C (1 << 29) /* FP carry flag */
+#define FPCR_Z (1 << 30) /* FP zero flag */
+#define FPCR_N (1 << 31) /* FP negative flag */
+
+#define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */
+#define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT)
+#define FPCR_LTPSIZE_LENGTH 3
+
+#define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V)
+#define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC)
+
+static inline uint32_t vfp_get_fpsr(CPUARMState *env)
+{
+ return vfp_get_fpscr(env) & FPSR_MASK;
+}
+
+static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
+{
+ uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
+ vfp_set_fpscr(env, new_fpscr);
+}
+
+static inline uint32_t vfp_get_fpcr(CPUARMState *env)
+{
+ return vfp_get_fpscr(env) & FPCR_MASK;
+}
+
+static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
+{
+ uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
+ vfp_set_fpscr(env, new_fpscr);
+}
+
+enum arm_cpu_mode {
+ ARM_CPU_MODE_USR = 0x10,
+ ARM_CPU_MODE_FIQ = 0x11,
+ ARM_CPU_MODE_IRQ = 0x12,
+ ARM_CPU_MODE_SVC = 0x13,
+ ARM_CPU_MODE_MON = 0x16,
+ ARM_CPU_MODE_ABT = 0x17,
+ ARM_CPU_MODE_HYP = 0x1a,
+ ARM_CPU_MODE_UND = 0x1b,
+ ARM_CPU_MODE_SYS = 0x1f
+};
+
+/* VFP system registers. */
+#define ARM_VFP_FPSID 0
+#define ARM_VFP_FPSCR 1
+#define ARM_VFP_MVFR2 5
+#define ARM_VFP_MVFR1 6
+#define ARM_VFP_MVFR0 7
+#define ARM_VFP_FPEXC 8
+#define ARM_VFP_FPINST 9
+#define ARM_VFP_FPINST2 10
+/* These ones are M-profile only */
+#define ARM_VFP_FPSCR_NZCVQC 2
+#define ARM_VFP_VPR 12
+#define ARM_VFP_P0 13
+#define ARM_VFP_FPCXT_NS 14
+#define ARM_VFP_FPCXT_S 15
+
+/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
+#define QEMU_VFP_FPSCR_NZCV 0xffff
+
+/* iwMMXt coprocessor control registers. */
+#define ARM_IWMMXT_wCID 0
+#define ARM_IWMMXT_wCon 1
+#define ARM_IWMMXT_wCSSF 2
+#define ARM_IWMMXT_wCASF 3
+#define ARM_IWMMXT_wCGR0 8
+#define ARM_IWMMXT_wCGR1 9
+#define ARM_IWMMXT_wCGR2 10
+#define ARM_IWMMXT_wCGR3 11
+
+/* V7M CCR bits */
+FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
+FIELD(V7M_CCR, USERSETMPEND, 1, 1)
+FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
+FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
+FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
+FIELD(V7M_CCR, STKALIGN, 9, 1)
+FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
+FIELD(V7M_CCR, DC, 16, 1)
+FIELD(V7M_CCR, IC, 17, 1)
+FIELD(V7M_CCR, BP, 18, 1)
+FIELD(V7M_CCR, LOB, 19, 1)
+FIELD(V7M_CCR, TRD, 20, 1)
+
+/* V7M SCR bits */
+FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
+FIELD(V7M_SCR, SLEEPDEEP, 2, 1)
+FIELD(V7M_SCR, SLEEPDEEPS, 3, 1)
+FIELD(V7M_SCR, SEVONPEND, 4, 1)
+
+/* V7M AIRCR bits */
+FIELD(V7M_AIRCR, VECTRESET, 0, 1)
+FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1)
+FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1)
+FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1)
+FIELD(V7M_AIRCR, PRIGROUP, 8, 3)
+FIELD(V7M_AIRCR, BFHFNMINS, 13, 1)
+FIELD(V7M_AIRCR, PRIS, 14, 1)
+FIELD(V7M_AIRCR, ENDIANNESS, 15, 1)
+FIELD(V7M_AIRCR, VECTKEY, 16, 16)
+
+/* V7M CFSR bits for MMFSR */
+FIELD(V7M_CFSR, IACCVIOL, 0, 1)
+FIELD(V7M_CFSR, DACCVIOL, 1, 1)
+FIELD(V7M_CFSR, MUNSTKERR, 3, 1)
+FIELD(V7M_CFSR, MSTKERR, 4, 1)
+FIELD(V7M_CFSR, MLSPERR, 5, 1)
+FIELD(V7M_CFSR, MMARVALID, 7, 1)
+
+/* V7M CFSR bits for BFSR */
+FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1)
+FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1)
+FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1)
+FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1)
+FIELD(V7M_CFSR, STKERR, 8 + 4, 1)
+FIELD(V7M_CFSR, LSPERR, 8 + 5, 1)
+FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1)
+
+/* V7M CFSR bits for UFSR */
+FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
+FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
+FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
+FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
+FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
+FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
+FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
+
+/* V7M CFSR bit masks covering all of the subregister bits */
+FIELD(V7M_CFSR, MMFSR, 0, 8)
+FIELD(V7M_CFSR, BFSR, 8, 8)
+FIELD(V7M_CFSR, UFSR, 16, 16)
+
+/* V7M HFSR bits */
+FIELD(V7M_HFSR, VECTTBL, 1, 1)
+FIELD(V7M_HFSR, FORCED, 30, 1)
+FIELD(V7M_HFSR, DEBUGEVT, 31, 1)
+
+/* V7M DFSR bits */
+FIELD(V7M_DFSR, HALTED, 0, 1)
+FIELD(V7M_DFSR, BKPT, 1, 1)
+FIELD(V7M_DFSR, DWTTRAP, 2, 1)
+FIELD(V7M_DFSR, VCATCH, 3, 1)
+FIELD(V7M_DFSR, EXTERNAL, 4, 1)
+
+/* V7M SFSR bits */
+FIELD(V7M_SFSR, INVEP, 0, 1)
+FIELD(V7M_SFSR, INVIS, 1, 1)
+FIELD(V7M_SFSR, INVER, 2, 1)
+FIELD(V7M_SFSR, AUVIOL, 3, 1)
+FIELD(V7M_SFSR, INVTRAN, 4, 1)
+FIELD(V7M_SFSR, LSPERR, 5, 1)
+FIELD(V7M_SFSR, SFARVALID, 6, 1)
+FIELD(V7M_SFSR, LSERR, 7, 1)
+
+/* v7M MPU_CTRL bits */
+FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
+FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
+FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
+
+/* v7M CLIDR bits */
+FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21)
+FIELD(V7M_CLIDR, LOUIS, 21, 3)
+FIELD(V7M_CLIDR, LOC, 24, 3)
+FIELD(V7M_CLIDR, LOUU, 27, 3)
+FIELD(V7M_CLIDR, ICB, 30, 2)
+
+FIELD(V7M_CSSELR, IND, 0, 1)
+FIELD(V7M_CSSELR, LEVEL, 1, 3)
+/* We use the combination of InD and Level to index into cpu->ccsidr[];
+ * define a mask for this and check that it doesn't permit running off
+ * the end of the array.
+ */
+FIELD(V7M_CSSELR, INDEX, 0, 4)
+
+/* v7M FPCCR bits */
+FIELD(V7M_FPCCR, LSPACT, 0, 1)
+FIELD(V7M_FPCCR, USER, 1, 1)
+FIELD(V7M_FPCCR, S, 2, 1)
+FIELD(V7M_FPCCR, THREAD, 3, 1)
+FIELD(V7M_FPCCR, HFRDY, 4, 1)
+FIELD(V7M_FPCCR, MMRDY, 5, 1)
+FIELD(V7M_FPCCR, BFRDY, 6, 1)
+FIELD(V7M_FPCCR, SFRDY, 7, 1)
+FIELD(V7M_FPCCR, MONRDY, 8, 1)
+FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1)
+FIELD(V7M_FPCCR, UFRDY, 10, 1)
+FIELD(V7M_FPCCR, RES0, 11, 15)
+FIELD(V7M_FPCCR, TS, 26, 1)
+FIELD(V7M_FPCCR, CLRONRETS, 27, 1)
+FIELD(V7M_FPCCR, CLRONRET, 28, 1)
+FIELD(V7M_FPCCR, LSPENS, 29, 1)
+FIELD(V7M_FPCCR, LSPEN, 30, 1)
+FIELD(V7M_FPCCR, ASPEN, 31, 1)
+/* These bits are banked. Others are non-banked and live in the M_REG_S bank */
+#define R_V7M_FPCCR_BANKED_MASK \
+ (R_V7M_FPCCR_LSPACT_MASK | \
+ R_V7M_FPCCR_USER_MASK | \
+ R_V7M_FPCCR_THREAD_MASK | \
+ R_V7M_FPCCR_MMRDY_MASK | \
+ R_V7M_FPCCR_SPLIMVIOL_MASK | \
+ R_V7M_FPCCR_UFRDY_MASK | \
+ R_V7M_FPCCR_ASPEN_MASK)
+
+/* v7M VPR bits */
+FIELD(V7M_VPR, P0, 0, 16)
+FIELD(V7M_VPR, MASK01, 16, 4)
+FIELD(V7M_VPR, MASK23, 20, 4)
+
+/*
+ * System register ID fields.
+ */
+FIELD(CLIDR_EL1, CTYPE1, 0, 3)
+FIELD(CLIDR_EL1, CTYPE2, 3, 3)
+FIELD(CLIDR_EL1, CTYPE3, 6, 3)
+FIELD(CLIDR_EL1, CTYPE4, 9, 3)
+FIELD(CLIDR_EL1, CTYPE5, 12, 3)
+FIELD(CLIDR_EL1, CTYPE6, 15, 3)
+FIELD(CLIDR_EL1, CTYPE7, 18, 3)
+FIELD(CLIDR_EL1, LOUIS, 21, 3)
+FIELD(CLIDR_EL1, LOC, 24, 3)
+FIELD(CLIDR_EL1, LOUU, 27, 3)
+FIELD(CLIDR_EL1, ICB, 30, 3)
+
+/* When FEAT_CCIDX is implemented */
+FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3)
+FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21)
+FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24)
+
+/* When FEAT_CCIDX is not implemented */
+FIELD(CCSIDR_EL1, LINESIZE, 0, 3)
+FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10)
+FIELD(CCSIDR_EL1, NUMSETS, 13, 15)
+
+FIELD(CTR_EL0, IMINLINE, 0, 4)
+FIELD(CTR_EL0, L1IP, 14, 2)
+FIELD(CTR_EL0, DMINLINE, 16, 4)
+FIELD(CTR_EL0, ERG, 20, 4)
+FIELD(CTR_EL0, CWG, 24, 4)
+FIELD(CTR_EL0, IDC, 28, 1)
+FIELD(CTR_EL0, DIC, 29, 1)
+FIELD(CTR_EL0, TMINLINE, 32, 6)
+
+FIELD(MIDR_EL1, REVISION, 0, 4)
+FIELD(MIDR_EL1, PARTNUM, 4, 12)
+FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
+FIELD(MIDR_EL1, VARIANT, 20, 4)
+FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
+
+FIELD(ID_ISAR0, SWAP, 0, 4)
+FIELD(ID_ISAR0, BITCOUNT, 4, 4)
+FIELD(ID_ISAR0, BITFIELD, 8, 4)
+FIELD(ID_ISAR0, CMPBRANCH, 12, 4)
+FIELD(ID_ISAR0, COPROC, 16, 4)
+FIELD(ID_ISAR0, DEBUG, 20, 4)
+FIELD(ID_ISAR0, DIVIDE, 24, 4)
+
+FIELD(ID_ISAR1, ENDIAN, 0, 4)
+FIELD(ID_ISAR1, EXCEPT, 4, 4)
+FIELD(ID_ISAR1, EXCEPT_AR, 8, 4)
+FIELD(ID_ISAR1, EXTEND, 12, 4)
+FIELD(ID_ISAR1, IFTHEN, 16, 4)
+FIELD(ID_ISAR1, IMMEDIATE, 20, 4)
+FIELD(ID_ISAR1, INTERWORK, 24, 4)
+FIELD(ID_ISAR1, JAZELLE, 28, 4)
+
+FIELD(ID_ISAR2, LOADSTORE, 0, 4)
+FIELD(ID_ISAR2, MEMHINT, 4, 4)
+FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4)
+FIELD(ID_ISAR2, MULT, 12, 4)
+FIELD(ID_ISAR2, MULTS, 16, 4)
+FIELD(ID_ISAR2, MULTU, 20, 4)
+FIELD(ID_ISAR2, PSR_AR, 24, 4)
+FIELD(ID_ISAR2, REVERSAL, 28, 4)
+
+FIELD(ID_ISAR3, SATURATE, 0, 4)
+FIELD(ID_ISAR3, SIMD, 4, 4)
+FIELD(ID_ISAR3, SVC, 8, 4)
+FIELD(ID_ISAR3, SYNCHPRIM, 12, 4)
+FIELD(ID_ISAR3, TABBRANCH, 16, 4)
+FIELD(ID_ISAR3, T32COPY, 20, 4)
+FIELD(ID_ISAR3, TRUENOP, 24, 4)
+FIELD(ID_ISAR3, T32EE, 28, 4)
+
+FIELD(ID_ISAR4, UNPRIV, 0, 4)
+FIELD(ID_ISAR4, WITHSHIFTS, 4, 4)
+FIELD(ID_ISAR4, WRITEBACK, 8, 4)
+FIELD(ID_ISAR4, SMC, 12, 4)
+FIELD(ID_ISAR4, BARRIER, 16, 4)
+FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4)
+FIELD(ID_ISAR4, PSR_M, 24, 4)
+FIELD(ID_ISAR4, SWP_FRAC, 28, 4)
+
+FIELD(ID_ISAR5, SEVL, 0, 4)
+FIELD(ID_ISAR5, AES, 4, 4)
+FIELD(ID_ISAR5, SHA1, 8, 4)
+FIELD(ID_ISAR5, SHA2, 12, 4)
+FIELD(ID_ISAR5, CRC32, 16, 4)
+FIELD(ID_ISAR5, RDM, 24, 4)
+FIELD(ID_ISAR5, VCMA, 28, 4)
+
+FIELD(ID_ISAR6, JSCVT, 0, 4)
+FIELD(ID_ISAR6, DP, 4, 4)
+FIELD(ID_ISAR6, FHM, 8, 4)
+FIELD(ID_ISAR6, SB, 12, 4)
+FIELD(ID_ISAR6, SPECRES, 16, 4)
+FIELD(ID_ISAR6, BF16, 20, 4)
+FIELD(ID_ISAR6, I8MM, 24, 4)
+
+FIELD(ID_MMFR0, VMSA, 0, 4)
+FIELD(ID_MMFR0, PMSA, 4, 4)
+FIELD(ID_MMFR0, OUTERSHR, 8, 4)
+FIELD(ID_MMFR0, SHARELVL, 12, 4)
+FIELD(ID_MMFR0, TCM, 16, 4)
+FIELD(ID_MMFR0, AUXREG, 20, 4)
+FIELD(ID_MMFR0, FCSE, 24, 4)
+FIELD(ID_MMFR0, INNERSHR, 28, 4)
+
+FIELD(ID_MMFR1, L1HVDVA, 0, 4)
+FIELD(ID_MMFR1, L1UNIVA, 4, 4)
+FIELD(ID_MMFR1, L1HVDSW, 8, 4)
+FIELD(ID_MMFR1, L1UNISW, 12, 4)
+FIELD(ID_MMFR1, L1HVD, 16, 4)
+FIELD(ID_MMFR1, L1UNI, 20, 4)
+FIELD(ID_MMFR1, L1TSTCLN, 24, 4)
+FIELD(ID_MMFR1, BPRED, 28, 4)
+
+FIELD(ID_MMFR2, L1HVDFG, 0, 4)
+FIELD(ID_MMFR2, L1HVDBG, 4, 4)
+FIELD(ID_MMFR2, L1HVDRNG, 8, 4)
+FIELD(ID_MMFR2, HVDTLB, 12, 4)
+FIELD(ID_MMFR2, UNITLB, 16, 4)
+FIELD(ID_MMFR2, MEMBARR, 20, 4)
+FIELD(ID_MMFR2, WFISTALL, 24, 4)
+FIELD(ID_MMFR2, HWACCFLG, 28, 4)
+
+FIELD(ID_MMFR3, CMAINTVA, 0, 4)
+FIELD(ID_MMFR3, CMAINTSW, 4, 4)
+FIELD(ID_MMFR3, BPMAINT, 8, 4)
+FIELD(ID_MMFR3, MAINTBCST, 12, 4)
+FIELD(ID_MMFR3, PAN, 16, 4)
+FIELD(ID_MMFR3, COHWALK, 20, 4)
+FIELD(ID_MMFR3, CMEMSZ, 24, 4)
+FIELD(ID_MMFR3, SUPERSEC, 28, 4)
+
+FIELD(ID_MMFR4, SPECSEI, 0, 4)
+FIELD(ID_MMFR4, AC2, 4, 4)
+FIELD(ID_MMFR4, XNX, 8, 4)
+FIELD(ID_MMFR4, CNP, 12, 4)
+FIELD(ID_MMFR4, HPDS, 16, 4)
+FIELD(ID_MMFR4, LSM, 20, 4)
+FIELD(ID_MMFR4, CCIDX, 24, 4)
+FIELD(ID_MMFR4, EVT, 28, 4)
+
+FIELD(ID_MMFR5, ETS, 0, 4)
+
+FIELD(ID_PFR0, STATE0, 0, 4)
+FIELD(ID_PFR0, STATE1, 4, 4)
+FIELD(ID_PFR0, STATE2, 8, 4)
+FIELD(ID_PFR0, STATE3, 12, 4)
+FIELD(ID_PFR0, CSV2, 16, 4)
+FIELD(ID_PFR0, AMU, 20, 4)
+FIELD(ID_PFR0, DIT, 24, 4)
+FIELD(ID_PFR0, RAS, 28, 4)
+
+FIELD(ID_PFR1, PROGMOD, 0, 4)
+FIELD(ID_PFR1, SECURITY, 4, 4)
+FIELD(ID_PFR1, MPROGMOD, 8, 4)
+FIELD(ID_PFR1, VIRTUALIZATION, 12, 4)
+FIELD(ID_PFR1, GENTIMER, 16, 4)
+FIELD(ID_PFR1, SEC_FRAC, 20, 4)
+FIELD(ID_PFR1, VIRT_FRAC, 24, 4)
+FIELD(ID_PFR1, GIC, 28, 4)
+
+FIELD(ID_PFR2, CSV3, 0, 4)
+FIELD(ID_PFR2, SSBS, 4, 4)
+FIELD(ID_PFR2, RAS_FRAC, 8, 4)
+
+FIELD(ID_AA64ISAR0, AES, 4, 4)
+FIELD(ID_AA64ISAR0, SHA1, 8, 4)
+FIELD(ID_AA64ISAR0, SHA2, 12, 4)
+FIELD(ID_AA64ISAR0, CRC32, 16, 4)
+FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
+FIELD(ID_AA64ISAR0, RDM, 28, 4)
+FIELD(ID_AA64ISAR0, SHA3, 32, 4)
+FIELD(ID_AA64ISAR0, SM3, 36, 4)
+FIELD(ID_AA64ISAR0, SM4, 40, 4)
+FIELD(ID_AA64ISAR0, DP, 44, 4)
+FIELD(ID_AA64ISAR0, FHM, 48, 4)
+FIELD(ID_AA64ISAR0, TS, 52, 4)
+FIELD(ID_AA64ISAR0, TLB, 56, 4)
+FIELD(ID_AA64ISAR0, RNDR, 60, 4)
+
+FIELD(ID_AA64ISAR1, DPB, 0, 4)
+FIELD(ID_AA64ISAR1, APA, 4, 4)
+FIELD(ID_AA64ISAR1, API, 8, 4)
+FIELD(ID_AA64ISAR1, JSCVT, 12, 4)
+FIELD(ID_AA64ISAR1, FCMA, 16, 4)
+FIELD(ID_AA64ISAR1, LRCPC, 20, 4)
+FIELD(ID_AA64ISAR1, GPA, 24, 4)
+FIELD(ID_AA64ISAR1, GPI, 28, 4)
+FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
+FIELD(ID_AA64ISAR1, SB, 36, 4)
+FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
+FIELD(ID_AA64ISAR1, BF16, 44, 4)
+FIELD(ID_AA64ISAR1, DGH, 48, 4)
+FIELD(ID_AA64ISAR1, I8MM, 52, 4)
+
+FIELD(ID_AA64PFR0, EL0, 0, 4)
+FIELD(ID_AA64PFR0, EL1, 4, 4)
+FIELD(ID_AA64PFR0, EL2, 8, 4)
+FIELD(ID_AA64PFR0, EL3, 12, 4)
+FIELD(ID_AA64PFR0, FP, 16, 4)
+FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
+FIELD(ID_AA64PFR0, GIC, 24, 4)
+FIELD(ID_AA64PFR0, RAS, 28, 4)
+FIELD(ID_AA64PFR0, SVE, 32, 4)
+FIELD(ID_AA64PFR0, SEL2, 36, 4)
+FIELD(ID_AA64PFR0, MPAM, 40, 4)
+FIELD(ID_AA64PFR0, AMU, 44, 4)
+FIELD(ID_AA64PFR0, DIT, 48, 4)
+FIELD(ID_AA64PFR0, CSV2, 56, 4)
+FIELD(ID_AA64PFR0, CSV3, 60, 4)
+
+FIELD(ID_AA64PFR1, BT, 0, 4)
+FIELD(ID_AA64PFR1, SSBS, 4, 4)
+FIELD(ID_AA64PFR1, MTE, 8, 4)
+FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
+FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4)
+
+FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
+FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
+FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
+FIELD(ID_AA64MMFR0, SNSMEM, 12, 4)
+FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4)
+FIELD(ID_AA64MMFR0, TGRAN16, 20, 4)
+FIELD(ID_AA64MMFR0, TGRAN64, 24, 4)
+FIELD(ID_AA64MMFR0, TGRAN4, 28, 4)
+FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4)
+FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4)
+FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4)
+FIELD(ID_AA64MMFR0, EXS, 44, 4)
+FIELD(ID_AA64MMFR0, FGT, 56, 4)
+FIELD(ID_AA64MMFR0, ECV, 60, 4)
+
+FIELD(ID_AA64MMFR1, HAFDBS, 0, 4)
+FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4)
+FIELD(ID_AA64MMFR1, VH, 8, 4)
+FIELD(ID_AA64MMFR1, HPDS, 12, 4)
+FIELD(ID_AA64MMFR1, LO, 16, 4)
+FIELD(ID_AA64MMFR1, PAN, 20, 4)
+FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
+FIELD(ID_AA64MMFR1, XNX, 28, 4)
+FIELD(ID_AA64MMFR1, TWED, 32, 4)
+FIELD(ID_AA64MMFR1, ETS, 36, 4)
+
+FIELD(ID_AA64MMFR2, CNP, 0, 4)
+FIELD(ID_AA64MMFR2, UAO, 4, 4)
+FIELD(ID_AA64MMFR2, LSM, 8, 4)
+FIELD(ID_AA64MMFR2, IESB, 12, 4)
+FIELD(ID_AA64MMFR2, VARANGE, 16, 4)
+FIELD(ID_AA64MMFR2, CCIDX, 20, 4)
+FIELD(ID_AA64MMFR2, NV, 24, 4)
+FIELD(ID_AA64MMFR2, ST, 28, 4)
+FIELD(ID_AA64MMFR2, AT, 32, 4)
+FIELD(ID_AA64MMFR2, IDS, 36, 4)
+FIELD(ID_AA64MMFR2, FWB, 40, 4)
+FIELD(ID_AA64MMFR2, TTL, 48, 4)
+FIELD(ID_AA64MMFR2, BBM, 52, 4)
+FIELD(ID_AA64MMFR2, EVT, 56, 4)
+FIELD(ID_AA64MMFR2, E0PD, 60, 4)
+
+FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
+FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
+FIELD(ID_AA64DFR0, PMUVER, 8, 4)
+FIELD(ID_AA64DFR0, BRPS, 12, 4)
+FIELD(ID_AA64DFR0, WRPS, 20, 4)
+FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
+FIELD(ID_AA64DFR0, PMSVER, 32, 4)
+FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
+FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
+FIELD(ID_AA64DFR0, MTPMU, 48, 4)
+
+FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
+FIELD(ID_AA64ZFR0, AES, 4, 4)
+FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
+FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
+FIELD(ID_AA64ZFR0, SHA3, 32, 4)
+FIELD(ID_AA64ZFR0, SM4, 40, 4)
+FIELD(ID_AA64ZFR0, I8MM, 44, 4)
+FIELD(ID_AA64ZFR0, F32MM, 52, 4)
+FIELD(ID_AA64ZFR0, F64MM, 56, 4)
+
+FIELD(ID_DFR0, COPDBG, 0, 4)
+FIELD(ID_DFR0, COPSDBG, 4, 4)
+FIELD(ID_DFR0, MMAPDBG, 8, 4)
+FIELD(ID_DFR0, COPTRC, 12, 4)
+FIELD(ID_DFR0, MMAPTRC, 16, 4)
+FIELD(ID_DFR0, MPROFDBG, 20, 4)
+FIELD(ID_DFR0, PERFMON, 24, 4)
+FIELD(ID_DFR0, TRACEFILT, 28, 4)
+
+FIELD(ID_DFR1, MTPMU, 0, 4)
+
+FIELD(DBGDIDR, SE_IMP, 12, 1)
+FIELD(DBGDIDR, NSUHD_IMP, 14, 1)
+FIELD(DBGDIDR, VERSION, 16, 4)
+FIELD(DBGDIDR, CTX_CMPS, 20, 4)
+FIELD(DBGDIDR, BRPS, 24, 4)
+FIELD(DBGDIDR, WRPS, 28, 4)
+
+FIELD(MVFR0, SIMDREG, 0, 4)
+FIELD(MVFR0, FPSP, 4, 4)
+FIELD(MVFR0, FPDP, 8, 4)
+FIELD(MVFR0, FPTRAP, 12, 4)
+FIELD(MVFR0, FPDIVIDE, 16, 4)
+FIELD(MVFR0, FPSQRT, 20, 4)
+FIELD(MVFR0, FPSHVEC, 24, 4)
+FIELD(MVFR0, FPROUND, 28, 4)
+
+FIELD(MVFR1, FPFTZ, 0, 4)
+FIELD(MVFR1, FPDNAN, 4, 4)
+FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */
+FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */
+FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */
+FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */
+FIELD(MVFR1, MVE, 8, 4) /* M-profile only */
+FIELD(MVFR1, FP16, 20, 4) /* M-profile only */
+FIELD(MVFR1, FPHP, 24, 4)
+FIELD(MVFR1, SIMDFMAC, 28, 4)
+
+FIELD(MVFR2, SIMDMISC, 0, 4)
+FIELD(MVFR2, FPMISC, 4, 4)
+
+QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
+
+/* If adding a feature bit which corresponds to a Linux ELF
+ * HWCAP bit, remember to update the feature-bit-to-hwcap
+ * mapping in linux-user/elfload.c:get_elf_hwcap().
+ */
+enum arm_features {
+ ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
+ ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
+ ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
+ ARM_FEATURE_V6,
+ ARM_FEATURE_V6K,
+ ARM_FEATURE_V7,
+ ARM_FEATURE_THUMB2,
+ ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */
+ ARM_FEATURE_NEON,
+ ARM_FEATURE_M, /* Microcontroller profile. */
+ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
+ ARM_FEATURE_THUMB2EE,
+ ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
+ ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
+ ARM_FEATURE_V4T,
+ ARM_FEATURE_V5,
+ ARM_FEATURE_STRONGARM,
+ ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
+ ARM_FEATURE_GENERIC_TIMER,
+ ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
+ ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
+ ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
+ ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
+ ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
+ ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
+ ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
+ ARM_FEATURE_V8,
+ ARM_FEATURE_AARCH64, /* supports 64 bit mode */
+ ARM_FEATURE_CBAR, /* has cp15 CBAR */
+ ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
+ ARM_FEATURE_EL2, /* has EL2 Virtualization support */
+ ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
+ ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
+ ARM_FEATURE_PMU, /* has PMU support */
+ ARM_FEATURE_VBAR, /* has cp15 VBAR */
+ ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
+ ARM_FEATURE_M_MAIN, /* M profile Main Extension */
+ ARM_FEATURE_V8_1M, /* M profile extras only in v8.1M and later */
+};
+
+static inline int arm_feature(CPUARMState *env, int feature)
+{
+ return (env->features & (1ULL << feature)) != 0;
+}
+
+void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp);
+
+#if !defined(CONFIG_USER_ONLY)
+/* Return true if exception levels below EL3 are in secure state,
+ * or would be following an exception return to that level.
+ * Unlike arm_is_secure() (which is always a question about the
+ * _current_ state of the CPU) this doesn't care about the current
+ * EL or mode.
+ */
+static inline bool arm_is_secure_below_el3(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return !(env->cp15.scr_el3 & SCR_NS);
+ } else {
+ /* If EL3 is not supported then the secure state is implementation
+ * defined, in which case QEMU defaults to non-secure.
+ */
+ return false;
+ }
+}
+
+/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
+static inline bool arm_is_el3_or_mon(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
+ /* CPU currently in AArch64 state and EL3 */
+ return true;
+ } else if (!is_a64(env) &&
+ (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
+ /* CPU currently in AArch32 state and monitor mode */
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Return true if the processor is in secure state */
+static inline bool arm_is_secure(CPUARMState *env)
+{
+ if (arm_is_el3_or_mon(env)) {
+ return true;
+ }
+ return arm_is_secure_below_el3(env);
+}
+
+/*
+ * Return true if the current security state has AArch64 EL2 or AArch32 Hyp.
+ * This corresponds to the pseudocode EL2Enabled()
+ */
+static inline bool arm_is_el2_enabled(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ if (arm_is_secure_below_el3(env)) {
+ return (env->cp15.scr_el3 & SCR_EEL2) != 0;
+ }
+ return true;
+ }
+ return false;
+}
+
+#else
+static inline bool arm_is_secure_below_el3(CPUARMState *env)
+{
+ return false;
+}
+
+static inline bool arm_is_secure(CPUARMState *env)
+{
+ return false;
+}
+
+static inline bool arm_is_el2_enabled(CPUARMState *env)
+{
+ return false;
+}
+#endif
+
+/**
+ * arm_hcr_el2_eff(): Return the effective value of HCR_EL2.
+ * E.g. when in secure state, fields in HCR_EL2 are suppressed,
+ * "for all purposes other than a direct read or write access of HCR_EL2."
+ * Not included here is HCR_RW.
+ */
+uint64_t arm_hcr_el2_eff(CPUARMState *env);
+
+/* Return true if the specified exception level is running in AArch64 state. */
+static inline bool arm_el_is_aa64(CPUARMState *env, int el)
+{
+ /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
+ * and if we're not in EL0 then the state of EL0 isn't well defined.)
+ */
+ assert(el >= 1 && el <= 3);
+ bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
+
+ /* The highest exception level is always at the maximum supported
+ * register width, and then lower levels have a register width controlled
+ * by bits in the SCR or HCR registers.
+ */
+ if (el == 3) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
+ aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
+ }
+
+ if (el == 2) {
+ return aa64;
+ }
+
+ if (arm_is_el2_enabled(env)) {
+ aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
+ }
+
+ return aa64;
+}
+
+/* Function for determing whether guest cp register reads and writes should
+ * access the secure or non-secure bank of a cp register. When EL3 is
+ * operating in AArch32 state, the NS-bit determines whether the secure
+ * instance of a cp register should be used. When EL3 is AArch64 (or if
+ * it doesn't exist at all) then there is no register banking, and all
+ * accesses are to the non-secure version.
+ */
+static inline bool access_secure_reg(CPUARMState *env)
+{
+ bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) &&
+ !(env->cp15.scr_el3 & SCR_NS));
+
+ return ret;
+}
+
+/* Macros for accessing a specified CP register bank */
+#define A32_BANKED_REG_GET(_env, _regname, _secure) \
+ ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
+
+#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
+ do { \
+ if (_secure) { \
+ (_env)->cp15._regname##_s = (_val); \
+ } else { \
+ (_env)->cp15._regname##_ns = (_val); \
+ } \
+ } while (0)
+
+/* Macros for automatically accessing a specific CP register bank depending on
+ * the current secure state of the system. These macros are not intended for
+ * supporting instruction translation reads/writes as these are dependent
+ * solely on the SCR.NS bit and not the mode.
+ */
+#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
+ A32_BANKED_REG_GET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
+
+#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
+ A32_BANKED_REG_SET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
+ (_val))
+
+void arm_cpu_list(void);
+uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure);
+
+/* Interface between CPU and Interrupt controller. */
+#ifndef CONFIG_USER_ONLY
+bool armv7m_nvic_can_take_pending_exception(void *opaque);
+#else
+static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
+{
+ return true;
+}
+#endif
+/**
+ * armv7m_nvic_set_pending: mark the specified exception as pending
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Marks the specified exception as pending. Note that we will assert()
+ * if @secure is true and @irq does not specify one of the fixed set
+ * of architecturally banked exceptions.
+ */
+void armv7m_nvic_set_pending(void *opaque, int irq, bool secure);
+/**
+ * armv7m_nvic_set_pending_derived: mark this derived exception as pending
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for derived
+ * exceptions (exceptions generated in the course of trying to take
+ * a different exception).
+ */
+void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure);
+/**
+ * armv7m_nvic_set_pending_lazyfp: mark this lazy FP exception as pending
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Similar to armv7m_nvic_set_pending(), but specifically for exceptions
+ * generated in the course of lazy stacking of FP registers.
+ */
+void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure);
+/**
+ * armv7m_nvic_get_pending_irq_info: return highest priority pending
+ * exception, and whether it targets Secure state
+ * @opaque: the NVIC
+ * @pirq: set to pending exception number
+ * @ptargets_secure: set to whether pending exception targets Secure
+ *
+ * This function writes the number of the highest priority pending
+ * exception (the one which would be made active by
+ * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure
+ * to true if the current highest priority pending exception should
+ * be taken to Secure state, false for NS.
+ */
+void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq,
+ bool *ptargets_secure);
+/**
+ * armv7m_nvic_acknowledge_irq: make highest priority pending exception active
+ * @opaque: the NVIC
+ *
+ * Move the current highest priority pending exception from the pending
+ * state to the active state, and update v7m.exception to indicate that
+ * it is the exception currently being handled.
+ */
+void armv7m_nvic_acknowledge_irq(void *opaque);
+/**
+ * armv7m_nvic_complete_irq: complete specified interrupt or exception
+ * @opaque: the NVIC
+ * @irq: the exception number to complete
+ * @secure: true if this exception was secure
+ *
+ * Returns: -1 if the irq was not active
+ * 1 if completing this irq brought us back to base (no active irqs)
+ * 0 if there is still an irq active after this one was completed
+ * (Ignoring -1, this is the same as the RETTOBASE value before completion.)
+ */
+int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure);
+/**
+ * armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure)
+ * @opaque: the NVIC
+ * @irq: the exception number to mark pending
+ * @secure: false for non-banked exceptions or for the nonsecure
+ * version of a banked exception, true for the secure version of a banked
+ * exception.
+ *
+ * Return whether an exception is "ready", i.e. whether the exception is
+ * enabled and is configured at a priority which would allow it to
+ * interrupt the current execution priority. This controls whether the
+ * RDY bit for it in the FPCCR is set.
+ */
+bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure);
+/**
+ * armv7m_nvic_raw_execution_priority: return the raw execution priority
+ * @opaque: the NVIC
+ *
+ * Returns: the raw execution priority as defined by the v8M architecture.
+ * This is the execution priority minus the effects of AIRCR.PRIS,
+ * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
+ * (v8M ARM ARM I_PKLD.)
+ */
+int armv7m_nvic_raw_execution_priority(void *opaque);
+/**
+ * armv7m_nvic_neg_prio_requested: return true if the requested execution
+ * priority is negative for the specified security state.
+ * @opaque: the NVIC
+ * @secure: the security state to test
+ * This corresponds to the pseudocode IsReqExecPriNeg().
+ */
+#ifndef CONFIG_USER_ONLY
+bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure);
+#else
+static inline bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure)
+{
+ return false;
+}
+#endif
+
+/* Interface for defining coprocessor registers.
+ * Registers are defined in tables of arm_cp_reginfo structs
+ * which are passed to define_arm_cp_regs().
+ */
+
+/* When looking up a coprocessor register we look for it
+ * via an integer which encodes all of:
+ * coprocessor number
+ * Crn, Crm, opc1, opc2 fields
+ * 32 or 64 bit register (ie is it accessed via MRC/MCR
+ * or via MRRC/MCRR?)
+ * non-secure/secure bank (AArch32 only)
+ * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
+ * (In this case crn and opc2 should be zero.)
+ * For AArch64, there is no 32/64 bit size distinction;
+ * instead all registers have a 2 bit op0, 3 bit op1 and op2,
+ * and 4 bit CRn and CRm. The encoding patterns are chosen
+ * to be easy to convert to and from the KVM encodings, and also
+ * so that the hashtable can contain both AArch32 and AArch64
+ * registers (to allow for interprocessing where we might run
+ * 32 bit code on a 64 bit core).
+ */
+/* This bit is private to our hashtable cpreg; in KVM register
+ * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
+ * in the upper bits of the 64 bit ID.
+ */
+#define CP_REG_AA64_SHIFT 28
+#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
+
+/* To enable banking of coprocessor registers depending on ns-bit we
+ * add a bit to distinguish between secure and non-secure cpregs in the
+ * hashtable.
+ */
+#define CP_REG_NS_SHIFT 29
+#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
+
+#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
+ ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
+ ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
+
+#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
+ (CP_REG_AA64_MASK | \
+ ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
+ ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
+ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
+ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
+ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
+ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
+
+/* Convert a full 64 bit KVM register ID to the truncated 32 bit
+ * version used as a key for the coprocessor register hashtable
+ */
+static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
+{
+ uint32_t cpregid = kvmid;
+ if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
+ cpregid |= CP_REG_AA64_MASK;
+ } else {
+ if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
+ cpregid |= (1 << 15);
+ }
+
+ /* KVM is always non-secure so add the NS flag on AArch32 register
+ * entries.
+ */
+ cpregid |= 1 << CP_REG_NS_SHIFT;
+ }
+ return cpregid;
+}
+
+/* Convert a truncated 32 bit hashtable key into the full
+ * 64 bit KVM register ID.
+ */
+static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
+{
+ uint64_t kvmid;
+
+ if (cpregid & CP_REG_AA64_MASK) {
+ kvmid = cpregid & ~CP_REG_AA64_MASK;
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
+ } else {
+ kvmid = cpregid & ~(1 << 15);
+ if (cpregid & (1 << 15)) {
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
+ } else {
+ kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
+ }
+ }
+ return kvmid;
+}
+
+/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
+ * special-behaviour cp reg and bits [11..8] indicate what behaviour
+ * it has. Otherwise it is a simple cp reg, where CONST indicates that
+ * TCG can assume the value to be constant (ie load at translate time)
+ * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
+ * indicates that the TB should not be ended after a write to this register
+ * (the default is that the TB ends after cp writes). OVERRIDE permits
+ * a register definition to override a previous definition for the
+ * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
+ * old must have the OVERRIDE bit set.
+ * ALIAS indicates that this register is an alias view of some underlying
+ * state which is also visible via another register, and that the other
+ * register is handling migration and reset; registers marked ALIAS will not be
+ * migrated but may have their state set by syncing of register state from KVM.
+ * NO_RAW indicates that this register has no underlying state and does not
+ * support raw access for state saving/loading; it will not be used for either
+ * migration or KVM state synchronization. (Typically this is for "registers"
+ * which are actually used as instructions for cache maintenance and so on.)
+ * IO indicates that this register does I/O and therefore its accesses
+ * need to be marked with gen_io_start() and also end the TB. In particular,
+ * registers which implement clocks or timers require this.
+ * RAISES_EXC is for when the read or write hook might raise an exception;
+ * the generated code will synchronize the CPU state before calling the hook
+ * so that it is safe for the hook to call raise_exception().
+ * NEWEL is for writes to registers that might change the exception
+ * level - typically on older ARM chips. For those cases we need to
+ * re-read the new el when recomputing the translation flags.
+ */
+#define ARM_CP_SPECIAL 0x0001
+#define ARM_CP_CONST 0x0002
+#define ARM_CP_64BIT 0x0004
+#define ARM_CP_SUPPRESS_TB_END 0x0008
+#define ARM_CP_OVERRIDE 0x0010
+#define ARM_CP_ALIAS 0x0020
+#define ARM_CP_IO 0x0040
+#define ARM_CP_NO_RAW 0x0080
+#define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100)
+#define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200)
+#define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300)
+#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400)
+#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500)
+#define ARM_CP_DC_GVA (ARM_CP_SPECIAL | 0x0600)
+#define ARM_CP_DC_GZVA (ARM_CP_SPECIAL | 0x0700)
+#define ARM_LAST_SPECIAL ARM_CP_DC_GZVA
+#define ARM_CP_FPU 0x1000
+#define ARM_CP_SVE 0x2000
+#define ARM_CP_NO_GDB 0x4000
+#define ARM_CP_RAISES_EXC 0x8000
+#define ARM_CP_NEWEL 0x10000
+/* Used only as a terminator for ARMCPRegInfo lists */
+#define ARM_CP_SENTINEL 0xfffff
+/* Mask of only the flag bits in a type field */
+#define ARM_CP_FLAG_MASK 0x1f0ff
+
+/* Valid values for ARMCPRegInfo state field, indicating which of
+ * the AArch32 and AArch64 execution states this register is visible in.
+ * If the reginfo doesn't explicitly specify then it is AArch32 only.
+ * If the reginfo is declared to be visible in both states then a second
+ * reginfo is synthesised for the AArch32 view of the AArch64 register,
+ * such that the AArch32 view is the lower 32 bits of the AArch64 one.
+ * Note that we rely on the values of these enums as we iterate through
+ * the various states in some places.
+ */
+enum {
+ ARM_CP_STATE_AA32 = 0,
+ ARM_CP_STATE_AA64 = 1,
+ ARM_CP_STATE_BOTH = 2,
+};
+
+/* ARM CP register secure state flags. These flags identify security state
+ * attributes for a given CP register entry.
+ * The existence of both or neither secure and non-secure flags indicates that
+ * the register has both a secure and non-secure hash entry. A single one of
+ * these flags causes the register to only be hashed for the specified
+ * security state.
+ * Although definitions may have any combination of the S/NS bits, each
+ * registered entry will only have one to identify whether the entry is secure
+ * or non-secure.
+ */
+enum {
+ ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
+ ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
+};
+
+/* Return true if cptype is a valid type field. This is used to try to
+ * catch errors where the sentinel has been accidentally left off the end
+ * of a list of registers.
+ */
+static inline bool cptype_valid(int cptype)
+{
+ return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
+ || ((cptype & ARM_CP_SPECIAL) &&
+ ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
+}
+
+/* Access rights:
+ * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
+ * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
+ * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
+ * (ie any of the privileged modes in Secure state, or Monitor mode).
+ * If a register is accessible in one privilege level it's always accessible
+ * in higher privilege levels too. Since "Secure PL1" also follows this rule
+ * (ie anything visible in PL2 is visible in S-PL1, some things are only
+ * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
+ * terminology a little and call this PL3.
+ * In AArch64 things are somewhat simpler as the PLx bits line up exactly
+ * with the ELx exception levels.
+ *
+ * If access permissions for a register are more complex than can be
+ * described with these bits, then use a laxer set of restrictions, and
+ * do the more restrictive/complex check inside a helper function.
+ */
+#define PL3_R 0x80
+#define PL3_W 0x40
+#define PL2_R (0x20 | PL3_R)
+#define PL2_W (0x10 | PL3_W)
+#define PL1_R (0x08 | PL2_R)
+#define PL1_W (0x04 | PL2_W)
+#define PL0_R (0x02 | PL1_R)
+#define PL0_W (0x01 | PL1_W)
+
+/*
+ * For user-mode some registers are accessible to EL0 via a kernel
+ * trap-and-emulate ABI. In this case we define the read permissions
+ * as actually being PL0_R. However some bits of any given register
+ * may still be masked.
+ */
+#ifdef CONFIG_USER_ONLY
+#define PL0U_R PL0_R
+#else
+#define PL0U_R PL1_R
+#endif
+
+#define PL3_RW (PL3_R | PL3_W)
+#define PL2_RW (PL2_R | PL2_W)
+#define PL1_RW (PL1_R | PL1_W)
+#define PL0_RW (PL0_R | PL0_W)
+
+/* Return the highest implemented Exception Level */
+static inline int arm_highest_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return 3;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ return 2;
+ }
+ return 1;
+}
+
+/* Return true if a v7M CPU is in Handler mode */
+static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
+{
+ return env->v7m.exception != 0;
+}
+
+/* Return the current Exception Level (as per ARMv8; note that this differs
+ * from the ARMv7 Privilege Level).
+ */
+static inline int arm_current_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[env->v7m.secure] & 1);
+ }
+
+ if (is_a64(env)) {
+ return extract32(env->pstate, 2, 2);
+ }
+
+ switch (env->uncached_cpsr & 0x1f) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_MON:
+ return 3;
+ default:
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ /* If EL3 is 32-bit then all secure privileged modes run in
+ * EL3
+ */
+ return 3;
+ }
+
+ return 1;
+ }
+}
+
+typedef struct ARMCPRegInfo ARMCPRegInfo;
+
+typedef enum CPAccessResult {
+ /* Access is permitted */
+ CP_ACCESS_OK = 0,
+ /* Access fails due to a configurable trap or enable which would
+ * result in a categorized exception syndrome giving information about
+ * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
+ * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
+ * PL1 if in EL0, otherwise to the current EL).
+ */
+ CP_ACCESS_TRAP = 1,
+ /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
+ * Note that this is not a catch-all case -- the set of cases which may
+ * result in this failure is specifically defined by the architecture.
+ */
+ CP_ACCESS_TRAP_UNCATEGORIZED = 2,
+ /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_EL2 = 3,
+ CP_ACCESS_TRAP_EL3 = 4,
+ /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
+ /* Access fails and results in an exception syndrome for an FP access,
+ * trapped directly to EL2 or EL3
+ */
+ CP_ACCESS_TRAP_FP_EL2 = 7,
+ CP_ACCESS_TRAP_FP_EL3 = 8,
+} CPAccessResult;
+
+/* Access functions for coprocessor registers. These cannot fail and
+ * may not raise exceptions.
+ */
+typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
+typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
+ uint64_t value);
+/* Access permission check functions for coprocessor registers. */
+typedef CPAccessResult CPAccessFn(CPUARMState *env,
+ const ARMCPRegInfo *opaque,
+ bool isread);
+/* Hook function for register reset */
+typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
+
+#define CP_ANY 0xff
+
+/* Definition of an ARM coprocessor register */
+struct ARMCPRegInfo {
+ /* Name of register (useful mainly for debugging, need not be unique) */
+ const char *name;
+ /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
+ * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
+ * 'wildcard' field -- any value of that field in the MRC/MCR insn
+ * will be decoded to this register. The register read and write
+ * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
+ * used by the program, so it is possible to register a wildcard and
+ * then behave differently on read/write if necessary.
+ * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
+ * must both be zero.
+ * For AArch64-visible registers, opc0 is also used.
+ * Since there are no "coprocessors" in AArch64, cp is purely used as a
+ * way to distinguish (for KVM's benefit) guest-visible system registers
+ * from demuxed ones provided to preserve the "no side effects on
+ * KVM register read/write from QEMU" semantics. cp==0x13 is guest
+ * visible (to match KVM's encoding); cp==0 will be converted to
+ * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
+ */
+ uint8_t cp;
+ uint8_t crn;
+ uint8_t crm;
+ uint8_t opc0;
+ uint8_t opc1;
+ uint8_t opc2;
+ /* Execution state in which this register is visible: ARM_CP_STATE_* */
+ int state;
+ /* Register type: ARM_CP_* bits/values */
+ int type;
+ /* Access rights: PL*_[RW] */
+ int access;
+ /* Security state: ARM_CP_SECSTATE_* bits/values */
+ int secure;
+ /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
+ * this register was defined: can be used to hand data through to the
+ * register read/write functions, since they are passed the ARMCPRegInfo*.
+ */
+ void *opaque;
+ /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
+ * fieldoffset is non-zero, the reset value of the register.
+ */
+ uint64_t resetvalue;
+ /* Offset of the field in CPUARMState for this register.
+ *
+ * This is not needed if either:
+ * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
+ * 2. both readfn and writefn are specified
+ */
+ ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
+
+ /* Offsets of the secure and non-secure fields in CPUARMState for the
+ * register if it is banked. These fields are only used during the static
+ * registration of a register. During hashing the bank associated
+ * with a given security state is copied to fieldoffset which is used from
+ * there on out.
+ *
+ * It is expected that register definitions use either fieldoffset or
+ * bank_fieldoffsets in the definition but not both. It is also expected
+ * that both bank offsets are set when defining a banked register. This
+ * use indicates that a register is banked.
+ */
+ ptrdiff_t bank_fieldoffsets[2];
+
+ /* Function for making any access checks for this register in addition to
+ * those specified by the 'access' permissions bits. If NULL, no extra
+ * checks required. The access check is performed at runtime, not at
+ * translate time.
+ */
+ CPAccessFn *accessfn;
+ /* Function for handling reads of this register. If NULL, then reads
+ * will be done by loading from the offset into CPUARMState specified
+ * by fieldoffset.
+ */
+ CPReadFn *readfn;
+ /* Function for handling writes of this register. If NULL, then writes
+ * will be done by writing to the offset into CPUARMState specified
+ * by fieldoffset.
+ */
+ CPWriteFn *writefn;
+ /* Function for doing a "raw" read; used when we need to copy
+ * coprocessor state to the kernel for KVM or out for
+ * migration. This only needs to be provided if there is also a
+ * readfn and it has side effects (for instance clear-on-read bits).
+ */
+ CPReadFn *raw_readfn;
+ /* Function for doing a "raw" write; used when we need to copy KVM
+ * kernel coprocessor state into userspace, or for inbound
+ * migration. This only needs to be provided if there is also a
+ * writefn and it masks out "unwritable" bits or has write-one-to-clear
+ * or similar behaviour.
+ */
+ CPWriteFn *raw_writefn;
+ /* Function for resetting the register. If NULL, then reset will be done
+ * by writing resetvalue to the field specified in fieldoffset. If
+ * fieldoffset is 0 then no reset will be done.
+ */
+ CPResetFn *resetfn;
+
+ /*
+ * "Original" writefn and readfn.
+ * For ARMv8.1-VHE register aliases, we overwrite the read/write
+ * accessor functions of various EL1/EL0 to perform the runtime
+ * check for which sysreg should actually be modified, and then
+ * forwards the operation. Before overwriting the accessors,
+ * the original function is copied here, so that accesses that
+ * really do go to the EL1/EL0 version proceed normally.
+ * (The corresponding EL2 register is linked via opaque.)
+ */
+ CPReadFn *orig_readfn;
+ CPWriteFn *orig_writefn;
+};
+
+/* Macros which are lvalues for the field in CPUARMState for the
+ * ARMCPRegInfo *ri.
+ */
+#define CPREG_FIELD32(env, ri) \
+ (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
+#define CPREG_FIELD64(env, ri) \
+ (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
+
+#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
+
+void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *regs, void *opaque);
+void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *regs, void *opaque);
+static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
+{
+ define_arm_cp_regs_with_opaque(cpu, regs, 0);
+}
+static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
+{
+ define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
+}
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
+
+/*
+ * Definition of an ARM co-processor register as viewed from
+ * userspace. This is used for presenting sanitised versions of
+ * registers to userspace when emulating the Linux AArch64 CPU
+ * ID/feature ABI (advertised as HWCAP_CPUID).
+ */
+typedef struct ARMCPRegUserSpaceInfo {
+ /* Name of register */
+ const char *name;
+
+ /* Is the name actually a glob pattern */
+ bool is_glob;
+
+ /* Only some bits are exported to user space */
+ uint64_t exported_bits;
+
+ /* Fixed bits are applied after the mask */
+ uint64_t fixed_bits;
+} ARMCPRegUserSpaceInfo;
+
+#define REGUSERINFO_SENTINEL { .name = NULL }
+
+void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods);
+
+/* CPWriteFn that can be used to implement writes-ignored behaviour */
+void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value);
+/* CPReadFn that can be used for read-as-zero behaviour */
+uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
+
+/* CPResetFn that does nothing, for use if no reset is required even
+ * if fieldoffset is non zero.
+ */
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
+
+/* Return true if this reginfo struct's field in the cpu state struct
+ * is 64 bits wide.
+ */
+static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
+{
+ return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
+}
+
+static inline bool cp_access_ok(int current_el,
+ const ARMCPRegInfo *ri, int isread)
+{
+ return (ri->access >> ((current_el * 2) + isread)) & 1;
+}
+
+/* Raw read of a coprocessor register (as needed for migration, etc) */
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
+
+/**
+ * write_list_to_cpustate
+ * @cpu: ARMCPU
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the cpreg_values list into the ARMCPUState structure.
+ * This updates TCG's working data structures from KVM data or
+ * from incoming migration state.
+ *
+ * Returns: true if all register values were updated correctly,
+ * false if some register was unknown or could not be written.
+ * Note that we do not stop early on failure -- we will attempt
+ * writing all registers in the list.
+ */
+bool write_list_to_cpustate(ARMCPU *cpu);
+
+/**
+ * write_cpustate_to_list:
+ * @cpu: ARMCPU
+ * @kvm_sync: true if this is for syncing back to KVM
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the ARMCPUState structure into the cpreg_values list.
+ * This is used to copy info from TCG's working data structures into
+ * KVM or for outbound migration.
+ *
+ * @kvm_sync is true if we are doing this in order to sync the
+ * register state back to KVM. In this case we will only update
+ * values in the list if the previous list->cpustate sync actually
+ * successfully wrote the CPU state. Otherwise we will keep the value
+ * that is in the list.
+ *
+ * Returns: true if all register values were read correctly,
+ * false if some register was unknown or could not be read.
+ * Note that we do not stop early on failure -- we will attempt
+ * reading all registers in the list.
+ */
+bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
+
+#define ARM_CPUID_TI915T 0x54029152
+#define ARM_CPUID_TI925T 0x54029252
+
+#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
+#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_ARM_CPU
+
+#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
+
+#define cpu_list arm_cpu_list
+
+/* ARM has the following "translation regimes" (as the ARM ARM calls them):
+ *
+ * If EL3 is 64-bit:
+ * + NonSecure EL1 & 0 stage 1
+ * + NonSecure EL1 & 0 stage 2
+ * + NonSecure EL2
+ * + NonSecure EL2 & 0 (ARMv8.1-VHE)
+ * + Secure EL1 & 0
+ * + Secure EL3
+ * If EL3 is 32-bit:
+ * + NonSecure PL1 & 0 stage 1
+ * + NonSecure PL1 & 0 stage 2
+ * + NonSecure PL2
+ * + Secure PL0
+ * + Secure PL1
+ * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
+ *
+ * For QEMU, an mmu_idx is not quite the same as a translation regime because:
+ * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
+ * because they may differ in access permissions even if the VA->PA map is
+ * the same
+ * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
+ * translation, which means that we have one mmu_idx that deals with two
+ * concatenated translation regimes [this sort of combined s1+2 TLB is
+ * architecturally permitted]
+ * 3. we don't need to allocate an mmu_idx to translations that we won't be
+ * handling via the TLB. The only way to do a stage 1 translation without
+ * the immediate stage 2 translation is via the ATS or AT system insns,
+ * which can be slow-pathed and always do a page table walk.
+ * The only use of stage 2 translations is either as part of an s1+2
+ * lookup or when loading the descriptors during a stage 1 page table walk,
+ * and in both those cases we don't use the TLB.
+ * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
+ * translation regimes, because they map reasonably well to each other
+ * and they can't both be active at the same time.
+ * 5. we want to be able to use the TLB for accesses done as part of a
+ * stage1 page table walk, rather than having to walk the stage2 page
+ * table over and over.
+ * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
+ * Never (PAN) bit within PSTATE.
+ *
+ * This gives us the following list of cases:
+ *
+ * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
+ * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
+ * NS EL1 EL1&0 stage 1+2 +PAN
+ * NS EL0 EL2&0
+ * NS EL2 EL2&0
+ * NS EL2 EL2&0 +PAN
+ * NS EL2 (aka NS PL2)
+ * S EL0 EL1&0 (aka S PL0)
+ * S EL1 EL1&0 (not used if EL3 is 32 bit)
+ * S EL1 EL1&0 +PAN
+ * S EL3 (aka S PL1)
+ *
+ * for a total of 11 different mmu_idx.
+ *
+ * R profile CPUs have an MPU, but can use the same set of MMU indexes
+ * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
+ * NS EL2 if we ever model a Cortex-R52).
+ *
+ * M profile CPUs are rather different as they do not have a true MMU.
+ * They have the following different MMU indexes:
+ * User
+ * Privileged
+ * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
+ * Privileged, execution priority negative (ditto)
+ * If the CPU supports the v8M Security Extension then there are also:
+ * Secure User
+ * Secure Privileged
+ * Secure User, execution priority negative
+ * Secure Privileged, execution priority negative
+ *
+ * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
+ * are not quite the same -- different CPU types (most notably M profile
+ * vs A/R profile) would like to use MMU indexes with different semantics,
+ * but since we don't ever need to use all of those in a single CPU we
+ * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
+ * modes + total number of M profile MMU modes". The lower bits of
+ * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
+ * the same for any particular CPU.
+ * Variables of type ARMMUIdx are always full values, and the core
+ * index values are in variables of type 'int'.
+ *
+ * Our enumeration includes at the end some entries which are not "true"
+ * mmu_idx values in that they don't have corresponding TLBs and are only
+ * valid for doing slow path page table walks.
+ *
+ * The constant names here are patterned after the general style of the names
+ * of the AT/ATS operations.
+ * The values used are carefully arranged to make mmu_idx => EL lookup easy.
+ * For M profile we arrange them to have a bit for priv, a bit for negpri
+ * and a bit for secure.
+ */
+#define ARM_MMU_IDX_A 0x10 /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
+#define ARM_MMU_IDX_M 0x40 /* M profile */
+
+/* Meanings of the bits for A profile mmu idx values */
+#define ARM_MMU_IDX_A_NS 0x8
+
+/* Meanings of the bits for M profile mmu idx values */
+#define ARM_MMU_IDX_M_PRIV 0x1
+#define ARM_MMU_IDX_M_NEGPRI 0x2
+#define ARM_MMU_IDX_M_S 0x4 /* Secure */
+
+#define ARM_MMU_IDX_TYPE_MASK \
+ (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
+#define ARM_MMU_IDX_COREIDX_MASK 0xf
+
+typedef enum ARMMMUIdx {
+ /*
+ * A-profile.
+ */
+ ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
+
+ /*
+ * These are not allocated TLBs and are used only for AT system
+ * instructions or for the first stage of an S12 page table walk.
+ */
+ ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
+ /*
+ * Not allocated a TLB: used only for second stage of an S12 page
+ * table walk, or for descriptor loads during first stage of an S1
+ * page table walk. Note that if we ever want to have a TLB for this
+ * then various TLB flush insns which currently are no-ops or flush
+ * only stage 1 MMU indexes will need to change to flush stage 2.
+ */
+ ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB,
+
+ /*
+ * M-profile.
+ */
+ ARMMMUIdx_MUser = ARM_MMU_IDX_M,
+ ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
+ ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
+ ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
+ ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
+ ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
+ ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
+ ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
+} ARMMMUIdx;
+
+/*
+ * Bit macros for the core-mmu-index values for each index,
+ * for use when calling tlb_flush_by_mmuidx() and friends.
+ */
+#define TO_CORE_BIT(NAME) \
+ ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
+
+typedef enum ARMMMUIdxBit {
+ TO_CORE_BIT(E10_0),
+ TO_CORE_BIT(E20_0),
+ TO_CORE_BIT(E10_1),
+ TO_CORE_BIT(E10_1_PAN),
+ TO_CORE_BIT(E2),
+ TO_CORE_BIT(E20_2),
+ TO_CORE_BIT(E20_2_PAN),
+ TO_CORE_BIT(SE10_0),
+ TO_CORE_BIT(SE20_0),
+ TO_CORE_BIT(SE10_1),
+ TO_CORE_BIT(SE20_2),
+ TO_CORE_BIT(SE10_1_PAN),
+ TO_CORE_BIT(SE20_2_PAN),
+ TO_CORE_BIT(SE2),
+ TO_CORE_BIT(SE3),
+
+ TO_CORE_BIT(MUser),
+ TO_CORE_BIT(MPriv),
+ TO_CORE_BIT(MUserNegPri),
+ TO_CORE_BIT(MPrivNegPri),
+ TO_CORE_BIT(MSUser),
+ TO_CORE_BIT(MSPriv),
+ TO_CORE_BIT(MSUserNegPri),
+ TO_CORE_BIT(MSPrivNegPri),
+} ARMMMUIdxBit;
+
+#undef TO_CORE_BIT
+
+#define MMU_USER_IDX 0
+
+/* Indexes used when registering address spaces with cpu_address_space_init */
+typedef enum ARMASIdx {
+ ARMASIdx_NS = 0,
+ ARMASIdx_S = 1,
+ ARMASIdx_TagNS = 2,
+ ARMASIdx_TagS = 3,
+} ARMASIdx;
+
+/* Return the Exception Level targeted by debug exceptions. */
+static inline int arm_debug_target_el(CPUARMState *env)
+{
+ bool secure = arm_is_secure(env);
+ bool route_to_el2 = false;
+
+ if (arm_is_el2_enabled(env)) {
+ route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
+ env->cp15.mdcr_el2 & MDCR_TDE;
+ }
+
+ if (route_to_el2) {
+ return 2;
+ } else if (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) && secure) {
+ return 3;
+ } else {
+ return 1;
+ }
+}
+
+static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
+{
+ /* If all the CLIDR.Ctypem bits are 0 there are no caches, and
+ * CSSELR is RAZ/WI.
+ */
+ return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
+}
+
+/* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */
+static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
+{
+ int cur_el = arm_current_el(env);
+ int debug_el;
+
+ if (cur_el == 3) {
+ return false;
+ }
+
+ /* MDCR_EL3.SDD disables debug events from Secure state */
+ if (arm_is_secure_below_el3(env)
+ && extract32(env->cp15.mdcr_el3, 16, 1)) {
+ return false;
+ }
+
+ /*
+ * Same EL to same EL debug exceptions need MDSCR_KDE enabled
+ * while not masking the (D)ebug bit in DAIF.
+ */
+ debug_el = arm_debug_target_el(env);
+
+ if (cur_el == debug_el) {
+ return extract32(env->cp15.mdscr_el1, 13, 1)
+ && !(env->daif & PSTATE_D);
+ }
+
+ /* Otherwise the debug target needs to be a higher EL */
+ return debug_el > cur_el;
+}
+
+static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el == 0 && arm_el_is_aa64(env, 1)) {
+ return aa64_generate_debug_exceptions(env);
+ }
+
+ if (arm_is_secure(env)) {
+ int spd;
+
+ if (el == 0 && (env->cp15.sder & 1)) {
+ /* SDER.SUIDEN means debug exceptions from Secure EL0
+ * are always enabled. Otherwise they are controlled by
+ * SDCR.SPD like those from other Secure ELs.
+ */
+ return true;
+ }
+
+ spd = extract32(env->cp15.mdcr_el3, 14, 2);
+ switch (spd) {
+ case 1:
+ /* SPD == 0b01 is reserved, but behaves as 0b00. */
+ case 0:
+ /* For 0b00 we return true if external secure invasive debug
+ * is enabled. On real hardware this is controlled by external
+ * signals to the core. QEMU always permits debug, and behaves
+ * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
+ */
+ return true;
+ case 2:
+ return false;
+ case 3:
+ return true;
+ }
+ }
+
+ return el != 2;
+}
+
+/* Return true if debugging exceptions are currently enabled.
+ * This corresponds to what in ARM ARM pseudocode would be
+ * if UsingAArch32() then
+ * return AArch32.GenerateDebugExceptions()
+ * else
+ * return AArch64.GenerateDebugExceptions()
+ * We choose to push the if() down into this function for clarity,
+ * since the pseudocode has it at all callsites except for the one in
+ * CheckSoftwareStep(), where it is elided because both branches would
+ * always return the same value.
+ */
+static inline bool arm_generate_debug_exceptions(CPUARMState *env)
+{
+ if (env->aarch64) {
+ return aa64_generate_debug_exceptions(env);
+ } else {
+ return aa32_generate_debug_exceptions(env);
+ }
+}
+
+/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
+ * implicitly means this always returns false in pre-v8 CPUs.)
+ */
+static inline bool arm_singlestep_active(CPUARMState *env)
+{
+ return extract32(env->cp15.mdscr_el1, 0, 1)
+ && arm_el_is_aa64(env, arm_debug_target_el(env))
+ && arm_generate_debug_exceptions(env);
+}
+
+static inline bool arm_sctlr_b(CPUARMState *env)
+{
+ return
+ /* We need not implement SCTLR.ITD in user-mode emulation, so
+ * let linux-user ignore the fact that it conflicts with SCTLR_B.
+ * This lets people run BE32 binaries with "-cpu any".
+ */
+#ifndef CONFIG_USER_ONLY
+ !arm_feature(env, ARM_FEATURE_V7) &&
+#endif
+ (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
+}
+
+uint64_t arm_sctlr(CPUARMState *env, int el);
+
+static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
+ bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * In system mode, BE32 is modelled in line with the
+ * architecture (as word-invariant big-endianness), where loads
+ * and stores are done little endian but from addresses which
+ * are adjusted by XORing with the appropriate constant. So the
+ * endianness to use for the raw data access is not affected by
+ * SCTLR.B.
+ * In user mode, however, we model BE32 as byte-invariant
+ * big-endianness (because user-only code cannot tell the
+ * difference), and so we need to use a data access endianness
+ * that depends on SCTLR.B.
+ */
+ if (sctlr_b) {
+ return true;
+ }
+#endif
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
+ return env->uncached_cpsr & CPSR_E;
+}
+
+static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
+{
+ return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
+}
+
+/* Return true if the processor is in big-endian mode. */
+static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
+{
+ if (!is_a64(env)) {
+ return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
+ } else {
+ int cur_el = arm_current_el(env);
+ uint64_t sctlr = arm_sctlr(env, cur_el);
+ return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
+ }
+}
+
+typedef CPUARMState CPUArchState;
+typedef ARMCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+/*
+ * We have more than 32-bits worth of state per TB, so we split the data
+ * between tb->flags and tb->cs_base, which is otherwise unused for ARM.
+ * We collect these two parts in CPUARMTBFlags where they are named
+ * flags and flags2 respectively.
+ *
+ * The flags that are shared between all execution modes, TBFLAG_ANY,
+ * are stored in flags. The flags that are specific to a given mode
+ * are stores in flags2. Since cs_base is sized on the configured
+ * address size, flags2 always has 64-bits for A64, and a minimum of
+ * 32-bits for A32 and M32.
+ *
+ * The bits for 32-bit A-profile and M-profile partially overlap:
+ *
+ * 31 23 11 10 0
+ * +-------------+----------+----------------+
+ * | | | TBFLAG_A32 |
+ * | TBFLAG_AM32 | +-----+----------+
+ * | | |TBFLAG_M32|
+ * +-------------+----------------+----------+
+ * 31 23 6 5 0
+ *
+ * Unless otherwise noted, these bits are cached in env->hflags.
+ */
+FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1)
+FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1)
+FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */
+FIELD(TBFLAG_ANY, BE_DATA, 3, 1)
+FIELD(TBFLAG_ANY, MMUIDX, 4, 4)
+/* Target EL if we take a floating-point-disabled exception */
+FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2)
+/* For A-profile only, target EL for debug exceptions. */
+FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2)
+/* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */
+FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1)
+FIELD(TBFLAG_ANY, PSTATE__IL, 13, 1)
+
+/*
+ * Bit usage when in AArch32 state, both A- and M-profile.
+ */
+FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */
+FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
+
+/*
+ * Bit usage when in AArch32 state, for A-profile only.
+ */
+FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
+FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
+/*
+ * We store the bottom two bits of the CPAR as TB flags and handle
+ * checks on the other bits at runtime. This shares the same bits as
+ * VECSTRIDE, which is OK as no XScale CPU has VFP.
+ * Not cached, because VECLEN+VECSTRIDE are not cached.
+ */
+FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
+FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
+FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
+FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
+/*
+ * Indicates whether cp register reads and writes by guest code should access
+ * the secure or nonsecure bank of banked registers; note that this is not
+ * the same thing as the current security state of the processor!
+ */
+FIELD(TBFLAG_A32, NS, 10, 1)
+
+/*
+ * Bit usage when in AArch32 state, for M-profile only.
+ */
+/* Handler (ie not Thread) mode */
+FIELD(TBFLAG_M32, HANDLER, 0, 1)
+/* Whether we should generate stack-limit checks */
+FIELD(TBFLAG_M32, STACKCHECK, 1, 1)
+/* Set if FPCCR.LSPACT is set */
+FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */
+/* Set if we must create a new FP context */
+FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */
+/* Set if FPCCR.S does not match current security state */
+FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */
+/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */
+FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */
+
+/*
+ * Bit usage when in AArch64 state
+ */
+FIELD(TBFLAG_A64, TBII, 0, 2)
+FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2)
+FIELD(TBFLAG_A64, ZCR_LEN, 4, 4)
+FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1)
+FIELD(TBFLAG_A64, BT, 9, 1)
+FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */
+FIELD(TBFLAG_A64, TBID, 12, 2)
+FIELD(TBFLAG_A64, UNPRIV, 14, 1)
+FIELD(TBFLAG_A64, ATA, 15, 1)
+FIELD(TBFLAG_A64, TCMA, 16, 2)
+FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1)
+FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1)
+
+/*
+ * Helpers for using the above.
+ */
+#define DP_TBFLAG_ANY(DST, WHICH, VAL) \
+ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL))
+#define DP_TBFLAG_A64(DST, WHICH, VAL) \
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL))
+#define DP_TBFLAG_A32(DST, WHICH, VAL) \
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL))
+#define DP_TBFLAG_M32(DST, WHICH, VAL) \
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL))
+#define DP_TBFLAG_AM32(DST, WHICH, VAL) \
+ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL))
+
+#define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH)
+#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH)
+#define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH)
+#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
+#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
+
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ */
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
+{
+ return EX_TBFLAG_ANY(env->hflags, MMUIDX);
+}
+
+static inline bool bswap_code(bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
+ * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
+ * would also end up as a mixed-endian mode with BE code, LE data.
+ */
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ sctlr_b;
+#else
+ /* All code access in ARM is little endian, and there are no loaders
+ * doing swaps that need to be reversed
+ */
+ return 0;
+#endif
+}
+
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_cpu_bswap_data(CPUARMState *env)
+{
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ arm_cpu_data_is_big_endian(env);
+}
+#endif
+
+void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags);
+
+enum {
+ QEMU_PSCI_CONDUIT_DISABLED = 0,
+ QEMU_PSCI_CONDUIT_SMC = 1,
+ QEMU_PSCI_CONDUIT_HVC = 2,
+};
+
+#ifndef CONFIG_USER_ONLY
+/* Return the address space index to use for a memory access */
+static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
+}
+
+/* Return the AddressSpace to use for a memory access
+ * (which depends on whether the access is S or NS, and whether
+ * the board gave us a separate AddressSpace for S accesses).
+ */
+static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
+}
+#endif
+
+/**
+ * arm_register_pre_el_change_hook:
+ * Register a hook function which will be called immediately before this
+ * CPU changes exception level or mode. The hook function will be
+ * passed a pointer to the ARMCPU and the opaque data pointer passed
+ * to this function when the hook was registered.
+ *
+ * Note that if a pre-change hook is called, any registered post-change hooks
+ * are guaranteed to subsequently be called.
+ */
+void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
+ void *opaque);
+/**
+ * arm_register_el_change_hook:
+ * Register a hook function which will be called immediately after this
+ * CPU changes exception level or mode. The hook function will be
+ * passed a pointer to the ARMCPU and the opaque data pointer passed
+ * to this function when the hook was registered.
+ *
+ * Note that any registered hooks registered here are guaranteed to be called
+ * if pre-change hooks have been.
+ */
+void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void
+ *opaque);
+
+/**
+ * arm_rebuild_hflags:
+ * Rebuild the cached TBFLAGS for arbitrary changed processor state.
+ */
+void arm_rebuild_hflags(CPUARMState *env);
+
+/**
+ * aa32_vfp_dreg:
+ * Return a pointer to the Dn register within env in 32-bit mode.
+ */
+static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno)
+{
+ return &env->vfp.zregs[regno >> 1].d[regno & 1];
+}
+
+/**
+ * aa32_vfp_qreg:
+ * Return a pointer to the Qn register within env in 32-bit mode.
+ */
+static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno)
+{
+ return &env->vfp.zregs[regno].d[0];
+}
+
+/**
+ * aa64_vfp_qreg:
+ * Return a pointer to the Qn register within env in 64-bit mode.
+ */
+static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
+{
+ return &env->vfp.zregs[regno].d[0];
+}
+
+/* Shared between translate-sve.c and sve_helper.c. */
+extern const uint64_t pred_esz_masks[4];
+
+/* Helper for the macros below, validating the argument type. */
+static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
+{
+ return x;
+}
+
+/*
+ * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
+ * Using these should be a bit more self-documenting than using the
+ * generic target bits directly.
+ */
+#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
+#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1)
+
+/*
+ * AArch64 usage of the PAGE_TARGET_* bits for linux-user.
+ */
+#define PAGE_BTI PAGE_TARGET_1
+#define PAGE_MTE PAGE_TARGET_2
+
+#ifdef TARGET_TAGGED_ADDRESSES
+/**
+ * cpu_untagged_addr:
+ * @cs: CPU context
+ * @x: tagged address
+ *
+ * Remove any address tag from @x. This is explicitly related to the
+ * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
+ *
+ * There should be a better place to put this, but we need this in
+ * include/exec/cpu_ldst.h, and not some place linux-user specific.
+ */
+static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ if (cpu->env.tagged_addr_enable) {
+ /*
+ * TBI is enabled for userspace but not kernelspace addresses.
+ * Only clear the tag if bit 55 is clear.
+ */
+ x &= sextract64(x, 0, 56);
+ }
+ return x;
+}
+#endif
+
+/*
+ * Naming convention for isar_feature functions:
+ * Functions which test 32-bit ID registers should have _aa32_ in
+ * their name. Functions which test 64-bit ID registers should have
+ * _aa64_ in their name. These must only be used in code where we
+ * know for certain that the CPU has AArch32 or AArch64 respectively
+ * or where the correct answer for a CPU which doesn't implement that
+ * CPU state is "false" (eg when generating A32 or A64 code, if adding
+ * system registers that are specific to that CPU state, for "should
+ * we let this system register bit be set" tests where the 32-bit
+ * flavour of the register doesn't have the bit, and so on).
+ * Functions which simply ask "does this feature exist at all" have
+ * _any_ in their name, and always return the logical OR of the _aa64_
+ * and the _aa32_ function.
+ */
+
+/*
+ * 32-bit feature tests via id registers.
+ */
+static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
+}
+
+static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
+}
+
+static inline bool isar_feature_aa32_lob(const ARMISARegisters *id)
+{
+ /* (M-profile) low-overhead loops and branch future */
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3;
+}
+
+static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
+}
+
+static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
+}
+
+static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
+}
+
+static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
+}
+
+static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
+}
+
+static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
+}
+
+static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
+}
+
+static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
+}
+
+static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0;
+}
+
+static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
+}
+
+static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0;
+}
+
+static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
+}
+
+static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
+}
+
+static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0;
+}
+
+static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
+}
+
+static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
+}
+
+static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
+}
+
+static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
+{
+ /*
+ * Return true if M-profile state handling insns
+ * (VSCCLRM, CLRM, FPCTX access insns) are implemented
+ */
+ return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3;
+}
+
+static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
+{
+ /* Sadly this is encoded differently for A-profile and M-profile */
+ if (isar_feature_aa32_mprofile(id)) {
+ return FIELD_EX32(id->mvfr1, MVFR1, FP16) > 0;
+ } else {
+ return FIELD_EX32(id->mvfr1, MVFR1, FPHP) >= 3;
+ }
+}
+
+static inline bool isar_feature_aa32_mve(const ARMISARegisters *id)
+{
+ /*
+ * Return true if MVE is supported (either integer or floating point).
+ * We must check for M-profile as the MVFR1 field means something
+ * else for A-profile.
+ */
+ return isar_feature_aa32_mprofile(id) &&
+ FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0;
+}
+
+static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id)
+{
+ /*
+ * Return true if MVE is supported (either integer or floating point).
+ * We must check for M-profile as the MVFR1 field means something
+ * else for A-profile.
+ */
+ return isar_feature_aa32_mprofile(id) &&
+ FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2;
+}
+
+static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id)
+{
+ /*
+ * Return true if either VFP or SIMD is implemented.
+ * In this case, a minimum of VFP w/ D0-D15.
+ */
+ return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0;
+}
+
+static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id)
+{
+ /* Return true if D16-D31 are implemented */
+ return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2;
+}
+
+static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0;
+}
+
+static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id)
+{
+ /* Return true if CPU supports single precision floating point, VFPv2 */
+ return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0;
+}
+
+static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id)
+{
+ /* Return true if CPU supports single precision floating point, VFPv3 */
+ return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2;
+}
+
+static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id)
+{
+ /* Return true if CPU supports double precision floating point, VFPv2 */
+ return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0;
+}
+
+static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id)
+{
+ /* Return true if CPU supports double precision floating point, VFPv3 */
+ return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2;
+}
+
+static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id)
+{
+ return isar_feature_aa32_fpsp_v2(id) || isar_feature_aa32_fpdp_v2(id);
+}
+
+/*
+ * We always set the FP and SIMD FP16 fields to indicate identical
+ * levels of support (assuming SIMD is implemented at all), so
+ * we only need one set of accessors.
+ */
+static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0;
+}
+
+static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1;
+}
+
+/*
+ * Note that this ID register field covers both VFP and Neon FMAC,
+ * so should usually be tested in combination with some other
+ * check that confirms the presence of whichever of VFP or Neon is
+ * relevant, to avoid accidentally enabling a Neon feature on
+ * a VFP-no-Neon core or vice-versa.
+ */
+static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0;
+}
+
+static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1;
+}
+
+static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2;
+}
+
+static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3;
+}
+
+static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4;
+}
+
+static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4;
+}
+
+static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0;
+}
+
+static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2;
+}
+
+static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id)
+{
+ /* 0xf means "non-standard IMPDEF PMU" */
+ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
+ FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+}
+
+static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id)
+{
+ /* 0xf means "non-standard IMPDEF PMU" */
+ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 &&
+ FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+}
+
+static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
+}
+
+static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0;
+}
+
+static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
+}
+
+static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
+}
+
+static inline bool isar_feature_aa32_dit(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0;
+}
+
+static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0;
+}
+
+/*
+ * 64-bit feature tests via id registers.
+ */
+static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
+}
+
+static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
+}
+
+static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
+}
+
+static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
+}
+
+static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
+}
+
+static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
+}
+
+static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
+}
+
+static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
+}
+
+static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
+}
+
+static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
+}
+
+static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
+}
+
+static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
+}
+
+static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0;
+}
+
+static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
+}
+
+static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2;
+}
+
+static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0;
+}
+
+static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
+}
+
+static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
+}
+
+static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
+{
+ /*
+ * Return true if any form of pauth is enabled, as this
+ * predicate controls migration of the 128-bit keys.
+ */
+ return (id->id_aa64isar1 &
+ (FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
+ FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) |
+ FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) |
+ FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
+}
+
+static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
+{
+ /*
+ * Return true if pauth is enabled with the architected QARMA algorithm.
+ * QEMU will always set APA+GPA to the same value.
+ */
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
+}
+
+static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+}
+
+static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
+}
+
+static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
+}
+
+static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
+}
+
+static inline bool isar_feature_aa64_frint(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0;
+}
+
+static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0;
+}
+
+static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
+}
+
+static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
+}
+
+static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
+{
+ /* We always set the AdvSIMD and FP fields identically. */
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf;
+}
+
+static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
+{
+ /* We always set the AdvSIMD and FP fields identically wrt FP16. */
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+}
+
+static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2;
+}
+
+static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2;
+}
+
+static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
+}
+
+static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
+}
+
+static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
+}
+
+static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
+}
+
+static inline bool isar_feature_aa64_pan(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0;
+}
+
+static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
+}
+
+static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
+}
+
+static inline bool isar_feature_aa64_st(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0;
+}
+
+static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
+}
+
+static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0;
+}
+
+static inline bool isar_feature_aa64_mte(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2;
+}
+
+static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 &&
+ FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+}
+
+static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 &&
+ FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+}
+
+static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
+}
+
+static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2;
+}
+
+static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
+}
+
+static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
+}
+
+static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
+}
+
+static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
+}
+
+static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
+}
+
+static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
+}
+
+/*
+ * Feature tests for "does this exist in either 32-bit or 64-bit?"
+ */
+static inline bool isar_feature_any_fp16(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id);
+}
+
+static inline bool isar_feature_any_predinv(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id);
+}
+
+static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id);
+}
+
+static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id);
+}
+
+static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id);
+}
+
+static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_tts2uxn(id) || isar_feature_aa32_tts2uxn(id);
+}
+
+/*
+ * Forward to the above feature tests given an ARMCPU pointer.
+ */
+#define cpu_isar_feature(name, cpu) \
+ ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
+
+#endif
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
new file mode 100644
index 000000000..15245a60a
--- /dev/null
+++ b/target/arm/cpu64.c
@@ -0,0 +1,1005 @@
+/*
+ * QEMU AArch64 CPU
+ *
+ * Copyright (c) 2013 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+#endif /* CONFIG_TCG */
+#include "qemu/module.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#endif
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "qapi/visitor.h"
+#include "hw/qdev-properties.h"
+
+
+#ifndef CONFIG_USER_ONLY
+static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ /* Number of cores is in [25:24]; otherwise we RAZ */
+ return (cpu->core_count - 1) << 24;
+}
+#endif
+
+static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = {
+#ifndef CONFIG_USER_ONLY
+ { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
+ .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
+ .writefn = arm_cp_write_ignore },
+ { .name = "L2CTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
+ .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
+ .writefn = arm_cp_write_ignore },
+#endif
+ { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2ECTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR",
+ .cp = 15, .opc1 = 0, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUECTLR",
+ .cp = 15, .opc1 = 1, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUMERRSR",
+ .cp = 15, .opc1 = 2, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2MERRSR",
+ .cp = 15, .opc1 = 3, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void aarch64_a57_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a57";
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
+ cpu->midr = 0x411fd070;
+ cpu->revidr = 0x00000000;
+ cpu->reset_fpsid = 0x41034070;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x12111111;
+ cpu->isar.mvfr2 = 0x00000043;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50838;
+ cpu->isar.id_pfr0 = 0x00000131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x03010066;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10101105;
+ cpu->isar.id_mmfr1 = 0x40000000;
+ cpu->isar.id_mmfr2 = 0x01260000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232042;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x00011142;
+ cpu->isar.id_isar5 = 0x00011121;
+ cpu->isar.id_isar6 = 0;
+ cpu->isar.id_aa64pfr0 = 0x00002222;
+ cpu->isar.id_aa64dfr0 = 0x10305106;
+ cpu->isar.id_aa64isar0 = 0x00011120;
+ cpu->isar.id_aa64mmfr0 = 0x00001124;
+ cpu->isar.dbgdidr = 0x3516d000;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
+ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
+ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
+ cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
+ define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
+}
+
+static void aarch64_a53_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a53";
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
+ cpu->midr = 0x410fd034;
+ cpu->revidr = 0x00000000;
+ cpu->reset_fpsid = 0x41034070;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x12111111;
+ cpu->isar.mvfr2 = 0x00000043;
+ cpu->ctr = 0x84448004; /* L1Ip = VIPT */
+ cpu->reset_sctlr = 0x00c50838;
+ cpu->isar.id_pfr0 = 0x00000131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x03010066;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10101105;
+ cpu->isar.id_mmfr1 = 0x40000000;
+ cpu->isar.id_mmfr2 = 0x01260000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232042;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x00011142;
+ cpu->isar.id_isar5 = 0x00011121;
+ cpu->isar.id_isar6 = 0;
+ cpu->isar.id_aa64pfr0 = 0x00002222;
+ cpu->isar.id_aa64dfr0 = 0x10305106;
+ cpu->isar.id_aa64isar0 = 0x00011120;
+ cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
+ cpu->isar.dbgdidr = 0x3516d000;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
+ cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
+ cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
+ define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
+}
+
+static void aarch64_a72_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a72";
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->midr = 0x410fd083;
+ cpu->revidr = 0x00000000;
+ cpu->reset_fpsid = 0x41034080;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x12111111;
+ cpu->isar.mvfr2 = 0x00000043;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50838;
+ cpu->isar.id_pfr0 = 0x00000131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x03010066;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10201105;
+ cpu->isar.id_mmfr1 = 0x40000000;
+ cpu->isar.id_mmfr2 = 0x01260000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232042;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x00011142;
+ cpu->isar.id_isar5 = 0x00011121;
+ cpu->isar.id_aa64pfr0 = 0x00002222;
+ cpu->isar.id_aa64dfr0 = 0x10305106;
+ cpu->isar.id_aa64isar0 = 0x00011120;
+ cpu->isar.id_aa64mmfr0 = 0x00001124;
+ cpu->isar.dbgdidr = 0x3516d000;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
+ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
+ cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */
+ cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
+ define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
+}
+
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
+{
+ /*
+ * If any vector lengths are explicitly enabled with sve<N> properties,
+ * then all other lengths are implicitly disabled. If sve-max-vq is
+ * specified then it is the same as explicitly enabling all lengths
+ * up to and including the specified maximum, which means all larger
+ * lengths will be implicitly disabled. If no sve<N> properties
+ * are enabled and sve-max-vq is not specified, then all lengths not
+ * explicitly disabled will be enabled. Additionally, all power-of-two
+ * vector lengths less than the maximum enabled length will be
+ * automatically enabled and all vector lengths larger than the largest
+ * disabled power-of-two vector length will be automatically disabled.
+ * Errors are generated if the user provided input that interferes with
+ * any of the above. Finally, if SVE is not disabled, then at least one
+ * vector length must be enabled.
+ */
+ DECLARE_BITMAP(tmp, ARM_MAX_VQ);
+ uint32_t vq, max_vq = 0;
+
+ /*
+ * CPU models specify a set of supported vector lengths which are
+ * enabled by default. Attempting to enable any vector length not set
+ * in the supported bitmap results in an error. When KVM is enabled we
+ * fetch the supported bitmap from the host.
+ */
+ if (kvm_enabled() && kvm_arm_sve_supported()) {
+ kvm_arm_sve_get_vls(CPU(cpu), cpu->sve_vq_supported);
+ } else if (kvm_enabled()) {
+ assert(!cpu_isar_feature(aa64_sve, cpu));
+ }
+
+ /*
+ * Process explicit sve<N> properties.
+ * From the properties, sve_vq_map<N> implies sve_vq_init<N>.
+ * Check first for any sve<N> enabled.
+ */
+ if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) {
+ max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1;
+
+ if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) {
+ error_setg(errp, "cannot enable sve%d", max_vq * 128);
+ error_append_hint(errp, "sve%d is larger than the maximum vector "
+ "length, sve-max-vq=%d (%d bits)\n",
+ max_vq * 128, cpu->sve_max_vq,
+ cpu->sve_max_vq * 128);
+ return;
+ }
+
+ if (kvm_enabled()) {
+ /*
+ * For KVM we have to automatically enable all supported unitialized
+ * lengths, even when the smaller lengths are not all powers-of-two.
+ */
+ bitmap_andnot(tmp, cpu->sve_vq_supported, cpu->sve_vq_init, max_vq);
+ bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
+ } else {
+ /* Propagate enabled bits down through required powers-of-two. */
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
+ if (!test_bit(vq - 1, cpu->sve_vq_init)) {
+ set_bit(vq - 1, cpu->sve_vq_map);
+ }
+ }
+ }
+ } else if (cpu->sve_max_vq == 0) {
+ /*
+ * No explicit bits enabled, and no implicit bits from sve-max-vq.
+ */
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
+ /* SVE is disabled and so are all vector lengths. Good. */
+ return;
+ }
+
+ if (kvm_enabled()) {
+ /* Disabling a supported length disables all larger lengths. */
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
+ if (test_bit(vq - 1, cpu->sve_vq_init) &&
+ test_bit(vq - 1, cpu->sve_vq_supported)) {
+ break;
+ }
+ }
+ } else {
+ /* Disabling a power-of-two disables all larger lengths. */
+ for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
+ if (test_bit(vq - 1, cpu->sve_vq_init)) {
+ break;
+ }
+ }
+ }
+
+ max_vq = vq <= ARM_MAX_VQ ? vq - 1 : ARM_MAX_VQ;
+ bitmap_andnot(cpu->sve_vq_map, cpu->sve_vq_supported,
+ cpu->sve_vq_init, max_vq);
+ if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) {
+ error_setg(errp, "cannot disable sve%d", vq * 128);
+ error_append_hint(errp, "Disabling sve%d results in all "
+ "vector lengths being disabled.\n",
+ vq * 128);
+ error_append_hint(errp, "With SVE enabled, at least one "
+ "vector length must be enabled.\n");
+ return;
+ }
+
+ max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1;
+ }
+
+ /*
+ * Process the sve-max-vq property.
+ * Note that we know from the above that no bit above
+ * sve-max-vq is currently set.
+ */
+ if (cpu->sve_max_vq != 0) {
+ max_vq = cpu->sve_max_vq;
+
+ if (!test_bit(max_vq - 1, cpu->sve_vq_map) &&
+ test_bit(max_vq - 1, cpu->sve_vq_init)) {
+ error_setg(errp, "cannot disable sve%d", max_vq * 128);
+ error_append_hint(errp, "The maximum vector length must be "
+ "enabled, sve-max-vq=%d (%d bits)\n",
+ max_vq, max_vq * 128);
+ return;
+ }
+
+ /* Set all bits not explicitly set within sve-max-vq. */
+ bitmap_complement(tmp, cpu->sve_vq_init, max_vq);
+ bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq);
+ }
+
+ /*
+ * We should know what max-vq is now. Also, as we're done
+ * manipulating sve-vq-map, we ensure any bits above max-vq
+ * are clear, just in case anybody looks.
+ */
+ assert(max_vq != 0);
+ bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq);
+
+ /* Ensure the set of lengths matches what is supported. */
+ bitmap_xor(tmp, cpu->sve_vq_map, cpu->sve_vq_supported, max_vq);
+ if (!bitmap_empty(tmp, max_vq)) {
+ vq = find_last_bit(tmp, max_vq) + 1;
+ if (test_bit(vq - 1, cpu->sve_vq_map)) {
+ if (cpu->sve_max_vq) {
+ error_setg(errp, "cannot set sve-max-vq=%d", cpu->sve_max_vq);
+ error_append_hint(errp, "This CPU does not support "
+ "the vector length %d-bits.\n", vq * 128);
+ error_append_hint(errp, "It may not be possible to use "
+ "sve-max-vq with this CPU. Try "
+ "using only sve<N> properties.\n");
+ } else {
+ error_setg(errp, "cannot enable sve%d", vq * 128);
+ error_append_hint(errp, "This CPU does not support "
+ "the vector length %d-bits.\n", vq * 128);
+ }
+ return;
+ } else {
+ if (kvm_enabled()) {
+ error_setg(errp, "cannot disable sve%d", vq * 128);
+ error_append_hint(errp, "The KVM host requires all "
+ "supported vector lengths smaller "
+ "than %d bits to also be enabled.\n",
+ max_vq * 128);
+ return;
+ } else {
+ /* Ensure all required powers-of-two are enabled. */
+ for (vq = pow2floor(max_vq); vq >= 1; vq >>= 1) {
+ if (!test_bit(vq - 1, cpu->sve_vq_map)) {
+ error_setg(errp, "cannot disable sve%d", vq * 128);
+ error_append_hint(errp, "sve%d is required as it "
+ "is a power-of-two length smaller "
+ "than the maximum, sve%d\n",
+ vq * 128, max_vq * 128);
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ * Now that we validated all our vector lengths, the only question
+ * left to answer is if we even want SVE at all.
+ */
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
+ error_setg(errp, "cannot enable sve%d", max_vq * 128);
+ error_append_hint(errp, "SVE must be enabled to enable vector "
+ "lengths.\n");
+ error_append_hint(errp, "Add sve=on to the CPU property list.\n");
+ return;
+ }
+
+ /* From now on sve_max_vq is the actual maximum supported length. */
+ cpu->sve_max_vq = max_vq;
+}
+
+static void cpu_max_get_sve_max_vq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint32_t value;
+
+ /* All vector lengths are disabled when SVE is off. */
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
+ value = 0;
+ } else {
+ value = cpu->sve_max_vq;
+ }
+ visit_type_uint32(v, name, &value, errp);
+}
+
+static void cpu_max_set_sve_max_vq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint32_t max_vq;
+
+ if (!visit_type_uint32(v, name, &max_vq, errp)) {
+ return;
+ }
+
+ if (kvm_enabled() && !kvm_arm_sve_supported()) {
+ error_setg(errp, "cannot set sve-max-vq");
+ error_append_hint(errp, "SVE not supported by KVM on this host\n");
+ return;
+ }
+
+ if (max_vq == 0 || max_vq > ARM_MAX_VQ) {
+ error_setg(errp, "unsupported SVE vector length");
+ error_append_hint(errp, "Valid sve-max-vq in range [1-%d]\n",
+ ARM_MAX_VQ);
+ return;
+ }
+
+ cpu->sve_max_vq = max_vq;
+}
+
+/*
+ * Note that cpu_arm_get/set_sve_vq cannot use the simpler
+ * object_property_add_bool interface because they make use
+ * of the contents of "name" to determine which bit on which
+ * to operate.
+ */
+static void cpu_arm_get_sve_vq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint32_t vq = atoi(&name[3]) / 128;
+ bool value;
+
+ /* All vector lengths are disabled when SVE is off. */
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
+ value = false;
+ } else {
+ value = test_bit(vq - 1, cpu->sve_vq_map);
+ }
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void cpu_arm_set_sve_vq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint32_t vq = atoi(&name[3]) / 128;
+ bool value;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
+ error_setg(errp, "cannot enable %s", name);
+ error_append_hint(errp, "SVE not supported by KVM on this host\n");
+ return;
+ }
+
+ if (value) {
+ set_bit(vq - 1, cpu->sve_vq_map);
+ } else {
+ clear_bit(vq - 1, cpu->sve_vq_map);
+ }
+ set_bit(vq - 1, cpu->sve_vq_init);
+}
+
+static bool cpu_arm_get_sve(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ return cpu_isar_feature(aa64_sve, cpu);
+}
+
+static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint64_t t;
+
+ if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
+ error_setg(errp, "'sve' feature not supported by KVM on this host");
+ return;
+ }
+
+ t = cpu->isar.id_aa64pfr0;
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
+ cpu->isar.id_aa64pfr0 = t;
+}
+
+#ifdef CONFIG_USER_ONLY
+/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
+static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ int32_t default_len, default_vq, remainder;
+
+ if (!visit_type_int32(v, name, &default_len, errp)) {
+ return;
+ }
+
+ /* Undocumented, but the kernel allows -1 to indicate "maximum". */
+ if (default_len == -1) {
+ cpu->sve_default_vq = ARM_MAX_VQ;
+ return;
+ }
+
+ default_vq = default_len / 16;
+ remainder = default_len % 16;
+
+ /*
+ * Note that the 512 max comes from include/uapi/asm/sve_context.h
+ * and is the maximum architectural width of ZCR_ELx.LEN.
+ */
+ if (remainder || default_vq < 1 || default_vq > 512) {
+ error_setg(errp, "cannot set sve-default-vector-length");
+ if (remainder) {
+ error_append_hint(errp, "Vector length not a multiple of 16\n");
+ } else if (default_vq < 1) {
+ error_append_hint(errp, "Vector length smaller than 16\n");
+ } else {
+ error_append_hint(errp, "Vector length larger than %d\n",
+ 512 * 16);
+ }
+ return;
+ }
+
+ cpu->sve_default_vq = default_vq;
+}
+
+static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ int32_t value = cpu->sve_default_vq * 16;
+
+ visit_type_int32(v, name, &value, errp);
+}
+#endif
+
+void aarch64_add_sve_properties(Object *obj)
+{
+ uint32_t vq;
+
+ object_property_add_bool(obj, "sve", cpu_arm_get_sve, cpu_arm_set_sve);
+
+ for (vq = 1; vq <= ARM_MAX_VQ; ++vq) {
+ char name[8];
+ sprintf(name, "sve%d", vq * 128);
+ object_property_add(obj, name, "bool", cpu_arm_get_sve_vq,
+ cpu_arm_set_sve_vq, NULL, NULL);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ /* Mirror linux /proc/sys/abi/sve_default_vector_length. */
+ object_property_add(obj, "sve-default-vector-length", "int32",
+ cpu_arm_get_sve_default_vec_len,
+ cpu_arm_set_sve_default_vec_len, NULL, NULL);
+#endif
+}
+
+void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
+{
+ int arch_val = 0, impdef_val = 0;
+ uint64_t t;
+
+ /* TODO: Handle HaveEnhancedPAC, HaveEnhancedPAC2, HaveFPAC. */
+ if (cpu->prop_pauth) {
+ if (cpu->prop_pauth_impdef) {
+ impdef_val = 1;
+ } else {
+ arch_val = 1;
+ }
+ } else if (cpu->prop_pauth_impdef) {
+ error_setg(errp, "cannot enable pauth-impdef without pauth");
+ error_append_hint(errp, "Add pauth=on to the CPU property list.\n");
+ }
+
+ t = cpu->isar.id_aa64isar1;
+ t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val);
+ t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val);
+ t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val);
+ t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val);
+ cpu->isar.id_aa64isar1 = t;
+}
+
+static Property arm_cpu_pauth_property =
+ DEFINE_PROP_BOOL("pauth", ARMCPU, prop_pauth, true);
+static Property arm_cpu_pauth_impdef_property =
+ DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false);
+
+/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
+ * otherwise, a CPU with as many features enabled as our emulation supports.
+ * The version of '-cpu max' for qemu-system-arm is defined in cpu.c;
+ * this only needs to handle 64 bits.
+ */
+static void aarch64_max_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ if (kvm_enabled()) {
+ kvm_arm_set_cpu_features_from_host(cpu);
+ } else {
+ uint64_t t;
+ uint32_t u;
+ aarch64_a57_initfn(obj);
+
+ /*
+ * Reset MIDR so the guest doesn't mistake our 'max' CPU type for a real
+ * one and try to apply errata workarounds or use impdef features we
+ * don't provide.
+ * An IMPLEMENTER field of 0 means "reserved for software use";
+ * ARCHITECTURE must be 0xf indicating "v7 or later, check ID registers
+ * to see which features are present";
+ * the VARIANT, PARTNUM and REVISION fields are all implementation
+ * defined and we choose to define PARTNUM just in case guest
+ * code needs to distinguish this QEMU CPU from other software
+ * implementations, though this shouldn't be needed.
+ */
+ t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0);
+ t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf);
+ t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 'Q');
+ t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
+ t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
+ cpu->midr = t;
+
+ t = cpu->isar.id_aa64isar0;
+ t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
+ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
+ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */
+ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
+ t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1);
+ cpu->isar.id_aa64isar0 = t;
+
+ t = cpu->isar.id_aa64isar1;
+ t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2);
+ t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */
+ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1);
+ cpu->isar.id_aa64isar1 = t;
+
+ t = cpu->isar.id_aa64pfr0;
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
+ t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1);
+ t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1);
+ cpu->isar.id_aa64pfr0 = t;
+
+ t = cpu->isar.id_aa64pfr1;
+ t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
+ t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
+ /*
+ * Begin with full support for MTE. This will be downgraded to MTE=0
+ * during realize if the board provides no tag memory, much like
+ * we do for EL2 with the virtualization=on property.
+ */
+ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
+ cpu->isar.id_aa64pfr1 = t;
+
+ t = cpu->isar.id_aa64mmfr0;
+ t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */
+ cpu->isar.id_aa64mmfr0 = t;
+
+ t = cpu->isar.id_aa64mmfr1;
+ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
+ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
+ t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
+ t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
+ t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */
+ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */
+ cpu->isar.id_aa64mmfr1 = t;
+
+ t = cpu->isar.id_aa64mmfr2;
+ t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
+ t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
+ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
+ cpu->isar.id_aa64mmfr2 = t;
+
+ t = cpu->isar.id_aa64zfr0;
+ t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */
+ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1);
+ t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1);
+ cpu->isar.id_aa64zfr0 = t;
+
+ /* Replicate the same data to the 32-bit id registers. */
+ u = cpu->isar.id_isar5;
+ u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
+ u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
+ u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
+ u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
+ cpu->isar.id_isar5 = u;
+
+ u = cpu->isar.id_isar6;
+ u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1);
+ u = FIELD_DP32(u, ID_ISAR6, DP, 1);
+ u = FIELD_DP32(u, ID_ISAR6, FHM, 1);
+ u = FIELD_DP32(u, ID_ISAR6, SB, 1);
+ u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
+ u = FIELD_DP32(u, ID_ISAR6, BF16, 1);
+ u = FIELD_DP32(u, ID_ISAR6, I8MM, 1);
+ cpu->isar.id_isar6 = u;
+
+ u = cpu->isar.id_pfr0;
+ u = FIELD_DP32(u, ID_PFR0, DIT, 1);
+ cpu->isar.id_pfr0 = u;
+
+ u = cpu->isar.id_pfr2;
+ u = FIELD_DP32(u, ID_PFR2, SSBS, 1);
+ cpu->isar.id_pfr2 = u;
+
+ u = cpu->isar.id_mmfr3;
+ u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
+ cpu->isar.id_mmfr3 = u;
+
+ u = cpu->isar.id_mmfr4;
+ u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */
+ u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
+ u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */
+ u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */
+ cpu->isar.id_mmfr4 = u;
+
+ t = cpu->isar.id_aa64dfr0;
+ t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */
+ cpu->isar.id_aa64dfr0 = t;
+
+ u = cpu->isar.id_dfr0;
+ u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */
+ cpu->isar.id_dfr0 = u;
+
+ u = cpu->isar.mvfr1;
+ u = FIELD_DP32(u, MVFR1, FPHP, 3); /* v8.2-FP16 */
+ u = FIELD_DP32(u, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
+ cpu->isar.mvfr1 = u;
+
+#ifdef CONFIG_USER_ONLY
+ /* For usermode -cpu max we can use a larger and more efficient DCZ
+ * blocksize since we don't have to follow what the hardware does.
+ */
+ cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
+ cpu->dcz_blocksize = 7; /* 512 bytes */
+#endif
+
+ /* Default to PAUTH on, with the architected algorithm. */
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_property);
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_pauth_impdef_property);
+
+ bitmap_fill(cpu->sve_vq_supported, ARM_MAX_VQ);
+ }
+
+ aarch64_add_sve_properties(obj);
+ object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
+ cpu_max_set_sve_max_vq, NULL, NULL);
+}
+
+static void aarch64_a64fx_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,a64fx";
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->midr = 0x461f0010;
+ cpu->revidr = 0x00000000;
+ cpu->ctr = 0x86668006;
+ cpu->reset_sctlr = 0x30000180;
+ cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */
+ cpu->isar.id_aa64pfr1 = 0x0000000000000000;
+ cpu->isar.id_aa64dfr0 = 0x0000000010305408;
+ cpu->isar.id_aa64dfr1 = 0x0000000000000000;
+ cpu->id_aa64afr0 = 0x0000000000000000;
+ cpu->id_aa64afr1 = 0x0000000000000000;
+ cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
+ cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
+ cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
+ cpu->isar.id_aa64isar0 = 0x0000000010211120;
+ cpu->isar.id_aa64isar1 = 0x0000000000010001;
+ cpu->isar.id_aa64zfr0 = 0x0000000000000000;
+ cpu->clidr = 0x0000000080000023;
+ cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */
+ cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */
+ cpu->ccsidr[2] = 0x70ffe07c; /* 8MB L2 cache */
+ cpu->dcz_blocksize = 6; /* 256 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
+
+ /* Suppport of A64FX's vector length are 128,256 and 512bit only */
+ aarch64_add_sve_properties(obj);
+ bitmap_zero(cpu->sve_vq_supported, ARM_MAX_VQ);
+ set_bit(0, cpu->sve_vq_supported); /* 128bit */
+ set_bit(1, cpu->sve_vq_supported); /* 256bit */
+ set_bit(3, cpu->sve_vq_supported); /* 512bit */
+
+ /* TODO: Add A64FX specific HPC extension registers */
+}
+
+static const ARMCPUInfo aarch64_cpus[] = {
+ { .name = "cortex-a57", .initfn = aarch64_a57_initfn },
+ { .name = "cortex-a53", .initfn = aarch64_a53_initfn },
+ { .name = "cortex-a72", .initfn = aarch64_a72_initfn },
+ { .name = "a64fx", .initfn = aarch64_a64fx_initfn },
+ { .name = "max", .initfn = aarch64_max_initfn },
+};
+
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /* At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (value == false) {
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled and 32-bit EL1 "
+ "is supported");
+ return;
+ }
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
+static void aarch64_cpu_finalizefn(Object *obj)
+{
+}
+
+static gchar *aarch64_gdb_arch_name(CPUState *cs)
+{
+ return g_strdup("aarch64");
+}
+
+static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+
+ cc->gdb_read_register = aarch64_cpu_gdb_read_register;
+ cc->gdb_write_register = aarch64_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 34;
+ cc->gdb_core_xml_file = "aarch64-core.xml";
+ cc->gdb_arch_name = aarch64_gdb_arch_name;
+
+ object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64);
+ object_class_property_set_description(oc, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ");
+}
+
+static void aarch64_cpu_instance_init(Object *obj)
+{
+ ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
+
+ acc->info->initfn(obj);
+ arm_cpu_post_init(obj);
+}
+
+static void cpu_register_class_init(ObjectClass *oc, void *data)
+{
+ ARMCPUClass *acc = ARM_CPU_CLASS(oc);
+
+ acc->info = data;
+}
+
+void aarch64_cpu_register(const ARMCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_AARCH64_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = aarch64_cpu_instance_init,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = info->class_init ?: cpu_register_class_init,
+ .class_data = (void *)info,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo aarch64_cpu_type_info = {
+ .name = TYPE_AARCH64_CPU,
+ .parent = TYPE_ARM_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_finalize = aarch64_cpu_finalizefn,
+ .abstract = true,
+ .class_size = sizeof(AArch64CPUClass),
+ .class_init = aarch64_cpu_class_init,
+};
+
+static void aarch64_cpu_register_types(void)
+{
+ size_t i;
+
+ type_register_static(&aarch64_cpu_type_info);
+
+ for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
+ aarch64_cpu_register(&aarch64_cpus[i]);
+ }
+}
+
+type_init(aarch64_cpu_register_types)
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
new file mode 100644
index 000000000..13d0e9b19
--- /dev/null
+++ b/target/arm/cpu_tcg.c
@@ -0,0 +1,1082 @@
+/*
+ * QEMU ARM TCG CPUs.
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+#endif /* CONFIG_TCG */
+#include "internals.h"
+#include "target/arm/idau.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/boards.h"
+#endif
+
+/* CPU models. These are not needed for the AArch64 linux-user build. */
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
+
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
+static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ bool ret = false;
+
+ /*
+ * ARMv7-M interrupt masking works differently than -A or -R.
+ * There is no FIQ/IRQ distinction. Instead of I and F bits
+ * masking FIQ and IRQ interrupts, an exception is taken only
+ * if it is higher priority than the current execution priority
+ * (which depends on state like BASEPRI, FAULTMASK and the
+ * currently active exception).
+ */
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
+ cs->exception_index = EXCP_IRQ;
+ cc->tcg_ops->do_interrupt(cs);
+ ret = true;
+ }
+ return ret;
+}
+#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
+
+static void arm926_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm926";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
+ cpu->midr = 0x41069265;
+ cpu->reset_fpsid = 0x41011090;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00090078;
+
+ /*
+ * ARMv5 does not have the ID_ISAR registers, but we can still
+ * set the field to indicate Jazelle support within QEMU.
+ */
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+ /*
+ * Similarly, we need to set MVFR0 fields to enable vfp and short vector
+ * support even though ARMv5 doesn't have this register.
+ */
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1);
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1);
+}
+
+static void arm946_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm946";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x41059461;
+ cpu->ctr = 0x0f004006;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void arm1026_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm1026";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_AUXCR);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
+ cpu->midr = 0x4106a262;
+ cpu->reset_fpsid = 0x410110a0;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00090078;
+ cpu->reset_auxcr = 1;
+
+ /*
+ * ARMv5 does not have the ID_ISAR registers, but we can still
+ * set the field to indicate Jazelle support within QEMU.
+ */
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+ /*
+ * Similarly, we need to set MVFR0 fields to enable vfp and short vector
+ * support even though ARMv5 doesn't have this register.
+ */
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1);
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1);
+
+ {
+ /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
+ ARMCPRegInfo ifar = {
+ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
+ .resetvalue = 0
+ };
+ define_one_arm_cp_reg(cpu, &ifar);
+ }
+}
+
+static void arm1136_r2_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ /*
+ * What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
+ * older core than plain "arm1136". In particular this does not
+ * have the v6K features.
+ * These ID register values are correct for 1136 but may be wrong
+ * for 1136_r2 (in particular r0p2 does not actually implement most
+ * of the ID registers).
+ */
+
+ cpu->dtb_compatible = "arm,arm1136";
+ set_feature(&cpu->env, ARM_FEATURE_V6);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ cpu->midr = 0x4107b362;
+ cpu->reset_fpsid = 0x410120b4;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00050078;
+ cpu->isar.id_pfr0 = 0x111;
+ cpu->isar.id_pfr1 = 0x1;
+ cpu->isar.id_dfr0 = 0x2;
+ cpu->id_afr0 = 0x3;
+ cpu->isar.id_mmfr0 = 0x01130003;
+ cpu->isar.id_mmfr1 = 0x10030302;
+ cpu->isar.id_mmfr2 = 0x01222110;
+ cpu->isar.id_isar0 = 0x00140011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11231111;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x141;
+ cpu->reset_auxcr = 7;
+}
+
+static void arm1136_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm1136";
+ set_feature(&cpu->env, ARM_FEATURE_V6K);
+ set_feature(&cpu->env, ARM_FEATURE_V6);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ cpu->midr = 0x4117b363;
+ cpu->reset_fpsid = 0x410120b4;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00050078;
+ cpu->isar.id_pfr0 = 0x111;
+ cpu->isar.id_pfr1 = 0x1;
+ cpu->isar.id_dfr0 = 0x2;
+ cpu->id_afr0 = 0x3;
+ cpu->isar.id_mmfr0 = 0x01130003;
+ cpu->isar.id_mmfr1 = 0x10030302;
+ cpu->isar.id_mmfr2 = 0x01222110;
+ cpu->isar.id_isar0 = 0x00140011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11231111;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x141;
+ cpu->reset_auxcr = 7;
+}
+
+static void arm1176_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm1176";
+ set_feature(&cpu->env, ARM_FEATURE_V6K);
+ set_feature(&cpu->env, ARM_FEATURE_VAPA);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->midr = 0x410fb767;
+ cpu->reset_fpsid = 0x410120b5;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00050078;
+ cpu->isar.id_pfr0 = 0x111;
+ cpu->isar.id_pfr1 = 0x11;
+ cpu->isar.id_dfr0 = 0x33;
+ cpu->id_afr0 = 0;
+ cpu->isar.id_mmfr0 = 0x01130003;
+ cpu->isar.id_mmfr1 = 0x10030302;
+ cpu->isar.id_mmfr2 = 0x01222100;
+ cpu->isar.id_isar0 = 0x0140011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11231121;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x01141;
+ cpu->reset_auxcr = 7;
+}
+
+static void arm11mpcore_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm11mpcore";
+ set_feature(&cpu->env, ARM_FEATURE_V6K);
+ set_feature(&cpu->env, ARM_FEATURE_VAPA);
+ set_feature(&cpu->env, ARM_FEATURE_MPIDR);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x410fb022;
+ cpu->reset_fpsid = 0x410120b4;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
+ cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
+ cpu->isar.id_pfr0 = 0x111;
+ cpu->isar.id_pfr1 = 0x1;
+ cpu->isar.id_dfr0 = 0;
+ cpu->id_afr0 = 0x2;
+ cpu->isar.id_mmfr0 = 0x01100103;
+ cpu->isar.id_mmfr1 = 0x10020302;
+ cpu->isar.id_mmfr2 = 0x01222000;
+ cpu->isar.id_isar0 = 0x00100011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11221011;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x141;
+ cpu->reset_auxcr = 1;
+}
+
+static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
+ { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a8_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a8";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->midr = 0x410fc080;
+ cpu->reset_fpsid = 0x410330c0;
+ cpu->isar.mvfr0 = 0x11110222;
+ cpu->isar.mvfr1 = 0x00011111;
+ cpu->ctr = 0x82048004;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x1031;
+ cpu->isar.id_pfr1 = 0x11;
+ cpu->isar.id_dfr0 = 0x400;
+ cpu->id_afr0 = 0;
+ cpu->isar.id_mmfr0 = 0x31100003;
+ cpu->isar.id_mmfr1 = 0x20000000;
+ cpu->isar.id_mmfr2 = 0x01202000;
+ cpu->isar.id_mmfr3 = 0x11;
+ cpu->isar.id_isar0 = 0x00101111;
+ cpu->isar.id_isar1 = 0x12112111;
+ cpu->isar.id_isar2 = 0x21232031;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x00111142;
+ cpu->isar.dbgdidr = 0x15141000;
+ cpu->clidr = (1 << 27) | (2 << 24) | 3;
+ cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
+ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
+ cpu->reset_auxcr = 2;
+ define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
+}
+
+static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
+ /*
+ * power_control should be set to maximum latency. Again,
+ * default to 0 and set by private hook
+ */
+ { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
+ { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
+ { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
+ { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ /* TLB lockdown control */
+ { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
+ .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
+ .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
+ { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a9_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a9";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ /*
+ * Note that A9 supports the MP extensions even for
+ * A9UP and single-core A9MP (which are both different
+ * and valid configurations; we don't model A9UP).
+ */
+ set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR);
+ cpu->midr = 0x410fc090;
+ cpu->reset_fpsid = 0x41033090;
+ cpu->isar.mvfr0 = 0x11110222;
+ cpu->isar.mvfr1 = 0x01111111;
+ cpu->ctr = 0x80038003;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x1031;
+ cpu->isar.id_pfr1 = 0x11;
+ cpu->isar.id_dfr0 = 0x000;
+ cpu->id_afr0 = 0;
+ cpu->isar.id_mmfr0 = 0x00100103;
+ cpu->isar.id_mmfr1 = 0x20000000;
+ cpu->isar.id_mmfr2 = 0x01230000;
+ cpu->isar.id_mmfr3 = 0x00002111;
+ cpu->isar.id_isar0 = 0x00101111;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x00111142;
+ cpu->isar.dbgdidr = 0x35141000;
+ cpu->clidr = (1 << 27) | (1 << 24) | 3;
+ cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
+ define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
+}
+
+#ifndef CONFIG_USER_ONLY
+static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+
+ /*
+ * Linux wants the number of processors from here.
+ * Might as well set the interrupt-controller bit too.
+ */
+ return ((ms->smp.cpus - 1) << 24) | (1 << 23);
+}
+#endif
+
+static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
+#ifndef CONFIG_USER_ONLY
+ { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
+ .writefn = arm_cp_write_ignore, },
+#endif
+ { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a7_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a7";
+ set_feature(&cpu->env, ARM_FEATURE_V7VE);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
+ cpu->midr = 0x410fc075;
+ cpu->reset_fpsid = 0x41023075;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x11111111;
+ cpu->ctr = 0x84448003;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x00001131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x02010555;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10101105;
+ cpu->isar.id_mmfr1 = 0x40000000;
+ cpu->isar.id_mmfr2 = 0x01240000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ /*
+ * a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
+ * table 4-41 gives 0x02101110, which includes the arm div insns.
+ */
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x10011142;
+ cpu->isar.dbgdidr = 0x3515f005;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
+ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
+}
+
+static void cortex_a15_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a15";
+ set_feature(&cpu->env, ARM_FEATURE_V7VE);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
+ cpu->midr = 0x412fc0f1;
+ cpu->reset_fpsid = 0x410430f0;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x11111111;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->isar.id_pfr0 = 0x00001131;
+ cpu->isar.id_pfr1 = 0x00011011;
+ cpu->isar.id_dfr0 = 0x02010555;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x10201105;
+ cpu->isar.id_mmfr1 = 0x20000000;
+ cpu->isar.id_mmfr2 = 0x01240000;
+ cpu->isar.id_mmfr3 = 0x02102211;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x10011142;
+ cpu->isar.dbgdidr = 0x3515f021;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
+ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
+}
+
+static void cortex_m0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_V6);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+
+ cpu->midr = 0x410cc200;
+
+ /*
+ * These ID register values are not guest visible, because
+ * we do not implement the Main Extension. They must be set
+ * to values corresponding to the Cortex-M0's implemented
+ * features, because QEMU generally controls its emulation
+ * by looking at ID register fields. We use the same values as
+ * for the M3.
+ */
+ cpu->isar.id_pfr0 = 0x00000030;
+ cpu->isar.id_pfr1 = 0x00000200;
+ cpu->isar.id_dfr0 = 0x00100000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x00000030;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x00000000;
+ cpu->isar.id_mmfr3 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01141110;
+ cpu->isar.id_isar1 = 0x02111000;
+ cpu->isar.id_isar2 = 0x21112231;
+ cpu->isar.id_isar3 = 0x01111110;
+ cpu->isar.id_isar4 = 0x01310102;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
+}
+
+static void cortex_m3_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
+ cpu->midr = 0x410fc231;
+ cpu->pmsav7_dregion = 8;
+ cpu->isar.id_pfr0 = 0x00000030;
+ cpu->isar.id_pfr1 = 0x00000200;
+ cpu->isar.id_dfr0 = 0x00100000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x00000030;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x00000000;
+ cpu->isar.id_mmfr3 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01141110;
+ cpu->isar.id_isar1 = 0x02111000;
+ cpu->isar.id_isar2 = 0x21112231;
+ cpu->isar.id_isar3 = 0x01111110;
+ cpu->isar.id_isar4 = 0x01310102;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
+}
+
+static void cortex_m4_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ cpu->midr = 0x410fc240; /* r0p0 */
+ cpu->pmsav7_dregion = 8;
+ cpu->isar.mvfr0 = 0x10110021;
+ cpu->isar.mvfr1 = 0x11000011;
+ cpu->isar.mvfr2 = 0x00000000;
+ cpu->isar.id_pfr0 = 0x00000030;
+ cpu->isar.id_pfr1 = 0x00000200;
+ cpu->isar.id_dfr0 = 0x00100000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x00000030;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x00000000;
+ cpu->isar.id_mmfr3 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01141110;
+ cpu->isar.id_isar1 = 0x02111000;
+ cpu->isar.id_isar2 = 0x21112231;
+ cpu->isar.id_isar3 = 0x01111110;
+ cpu->isar.id_isar4 = 0x01310102;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
+}
+
+static void cortex_m7_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ cpu->midr = 0x411fc272; /* r1p2 */
+ cpu->pmsav7_dregion = 8;
+ cpu->isar.mvfr0 = 0x10110221;
+ cpu->isar.mvfr1 = 0x12000011;
+ cpu->isar.mvfr2 = 0x00000040;
+ cpu->isar.id_pfr0 = 0x00000030;
+ cpu->isar.id_pfr1 = 0x00000200;
+ cpu->isar.id_dfr0 = 0x00100000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x00100030;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x01000000;
+ cpu->isar.id_mmfr3 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01101110;
+ cpu->isar.id_isar1 = 0x02112000;
+ cpu->isar.id_isar2 = 0x20232231;
+ cpu->isar.id_isar3 = 0x01111131;
+ cpu->isar.id_isar4 = 0x01310132;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
+}
+
+static void cortex_m33_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
+ set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ cpu->midr = 0x410fd213; /* r0p3 */
+ cpu->pmsav7_dregion = 16;
+ cpu->sau_sregion = 8;
+ cpu->isar.mvfr0 = 0x10110021;
+ cpu->isar.mvfr1 = 0x11000011;
+ cpu->isar.mvfr2 = 0x00000040;
+ cpu->isar.id_pfr0 = 0x00000030;
+ cpu->isar.id_pfr1 = 0x00000210;
+ cpu->isar.id_dfr0 = 0x00200000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x00101F40;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x01000000;
+ cpu->isar.id_mmfr3 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01101110;
+ cpu->isar.id_isar1 = 0x02212000;
+ cpu->isar.id_isar2 = 0x20232232;
+ cpu->isar.id_isar3 = 0x01111131;
+ cpu->isar.id_isar4 = 0x01310132;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
+ cpu->clidr = 0x00000000;
+ cpu->ctr = 0x8000c000;
+}
+
+static void cortex_m55_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_V8_1M);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
+ set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ cpu->midr = 0x410fd221; /* r0p1 */
+ cpu->revidr = 0;
+ cpu->pmsav7_dregion = 16;
+ cpu->sau_sregion = 8;
+ /* These are the MVFR* values for the FPU + full MVE configuration */
+ cpu->isar.mvfr0 = 0x10110221;
+ cpu->isar.mvfr1 = 0x12100211;
+ cpu->isar.mvfr2 = 0x00000040;
+ cpu->isar.id_pfr0 = 0x20000030;
+ cpu->isar.id_pfr1 = 0x00000230;
+ cpu->isar.id_dfr0 = 0x10200000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->isar.id_mmfr0 = 0x00111040;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x01000000;
+ cpu->isar.id_mmfr3 = 0x00000011;
+ cpu->isar.id_isar0 = 0x01103110;
+ cpu->isar.id_isar1 = 0x02212000;
+ cpu->isar.id_isar2 = 0x20232232;
+ cpu->isar.id_isar3 = 0x01111131;
+ cpu->isar.id_isar4 = 0x01310132;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
+ cpu->clidr = 0x00000000; /* caches not implemented */
+ cpu->ctr = 0x8303c003;
+}
+
+static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
+ /* Dummy the TCM region regs for the moment */
+ { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST },
+ { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST },
+ { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
+ .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static void cortex_r5_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_PMSA);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->midr = 0x411fc153; /* r1p3 */
+ cpu->isar.id_pfr0 = 0x0131;
+ cpu->isar.id_pfr1 = 0x001;
+ cpu->isar.id_dfr0 = 0x010400;
+ cpu->id_afr0 = 0x0;
+ cpu->isar.id_mmfr0 = 0x0210030;
+ cpu->isar.id_mmfr1 = 0x00000000;
+ cpu->isar.id_mmfr2 = 0x01200000;
+ cpu->isar.id_mmfr3 = 0x0211;
+ cpu->isar.id_isar0 = 0x02101111;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232141;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x0010142;
+ cpu->isar.id_isar5 = 0x0;
+ cpu->isar.id_isar6 = 0x0;
+ cpu->mp_is_up = true;
+ cpu->pmsav7_dregion = 16;
+ define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
+}
+
+static void cortex_r5f_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cortex_r5_initfn(obj);
+ cpu->isar.mvfr0 = 0x10110221;
+ cpu->isar.mvfr1 = 0x00000011;
+}
+
+static void ti925t_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_V4T);
+ set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
+ cpu->midr = ARM_CPUID_TI925T;
+ cpu->ctr = 0x5109149;
+ cpu->reset_sctlr = 0x00000070;
+}
+
+static void sa1100_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "intel,sa1100";
+ set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x4401A11B;
+ cpu->reset_sctlr = 0x00000070;
+}
+
+static void sa1110_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x6901B119;
+ cpu->reset_sctlr = 0x00000070;
+}
+
+static void pxa250_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052100;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa255_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052d00;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa260_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052903;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa261_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052d05;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa262_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052d06;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270a0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054110;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270a1_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054111;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270b0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054112;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270b1_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054113;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270c0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054114;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270c5_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054117;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+#ifdef CONFIG_TCG
+static const struct TCGCPUOps arm_v7m_tcg_ops = {
+ .initialize = arm_translate_init,
+ .synchronize_from_tb = arm_cpu_synchronize_from_tb,
+ .debug_excp_handler = arm_debug_excp_handler,
+
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = arm_cpu_record_sigsegv,
+ .record_sigbus = arm_cpu_record_sigbus,
+#else
+ .tlb_fill = arm_cpu_tlb_fill,
+ .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
+ .do_interrupt = arm_v7m_cpu_do_interrupt,
+ .do_transaction_failed = arm_cpu_do_transaction_failed,
+ .do_unaligned_access = arm_cpu_do_unaligned_access,
+ .adjust_watchpoint_address = arm_adjust_watchpoint_address,
+ .debug_check_watchpoint = arm_debug_check_watchpoint,
+ .debug_check_breakpoint = arm_debug_check_breakpoint,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+static void arm_v7m_class_init(ObjectClass *oc, void *data)
+{
+ ARMCPUClass *acc = ARM_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ acc->info = data;
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &arm_v7m_tcg_ops;
+#endif /* CONFIG_TCG */
+
+ cc->gdb_core_xml_file = "arm-m-profile.xml";
+}
+
+#ifndef TARGET_AARCH64
+/*
+ * -cpu max: a CPU with as many features enabled as our emulation supports.
+ * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
+ * this only needs to handle 32 bits, and need not care about KVM.
+ */
+static void arm_max_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cortex_a15_initfn(obj);
+
+ /* old-style VFP short-vector support */
+ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1);
+
+#ifdef CONFIG_USER_ONLY
+ /*
+ * We don't set these in system emulation mode for the moment,
+ * since we don't correctly set (all of) the ID registers to
+ * advertise them.
+ */
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ {
+ uint32_t t;
+
+ t = cpu->isar.id_isar5;
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
+ cpu->isar.id_isar5 = t;
+
+ t = cpu->isar.id_isar6;
+ t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
+ t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
+ t = FIELD_DP32(t, ID_ISAR6, SB, 1);
+ t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
+ t = FIELD_DP32(t, ID_ISAR6, BF16, 1);
+ t = FIELD_DP32(t, ID_ISAR6, I8MM, 1);
+ cpu->isar.id_isar6 = t;
+
+ t = cpu->isar.mvfr1;
+ t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */
+ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */
+ cpu->isar.mvfr1 = t;
+
+ t = cpu->isar.mvfr2;
+ t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
+ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
+ cpu->isar.mvfr2 = t;
+
+ t = cpu->isar.id_mmfr3;
+ t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
+ cpu->isar.id_mmfr3 = t;
+
+ t = cpu->isar.id_mmfr4;
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
+ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
+ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */
+ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */
+ cpu->isar.id_mmfr4 = t;
+
+ t = cpu->isar.id_pfr0;
+ t = FIELD_DP32(t, ID_PFR0, DIT, 1);
+ cpu->isar.id_pfr0 = t;
+
+ t = cpu->isar.id_pfr2;
+ t = FIELD_DP32(t, ID_PFR2, SSBS, 1);
+ cpu->isar.id_pfr2 = t;
+ }
+#endif /* CONFIG_USER_ONLY */
+}
+#endif /* !TARGET_AARCH64 */
+
+static const ARMCPUInfo arm_tcg_cpus[] = {
+ { .name = "arm926", .initfn = arm926_initfn },
+ { .name = "arm946", .initfn = arm946_initfn },
+ { .name = "arm1026", .initfn = arm1026_initfn },
+ /*
+ * What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
+ * older core than plain "arm1136". In particular this does not
+ * have the v6K features.
+ */
+ { .name = "arm1136-r2", .initfn = arm1136_r2_initfn },
+ { .name = "arm1136", .initfn = arm1136_initfn },
+ { .name = "arm1176", .initfn = arm1176_initfn },
+ { .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
+ { .name = "cortex-a7", .initfn = cortex_a7_initfn },
+ { .name = "cortex-a8", .initfn = cortex_a8_initfn },
+ { .name = "cortex-a9", .initfn = cortex_a9_initfn },
+ { .name = "cortex-a15", .initfn = cortex_a15_initfn },
+ { .name = "cortex-m0", .initfn = cortex_m0_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-m3", .initfn = cortex_m3_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-m4", .initfn = cortex_m4_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-m7", .initfn = cortex_m7_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-m33", .initfn = cortex_m33_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-m55", .initfn = cortex_m55_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-r5", .initfn = cortex_r5_initfn },
+ { .name = "cortex-r5f", .initfn = cortex_r5f_initfn },
+ { .name = "ti925t", .initfn = ti925t_initfn },
+ { .name = "sa1100", .initfn = sa1100_initfn },
+ { .name = "sa1110", .initfn = sa1110_initfn },
+ { .name = "pxa250", .initfn = pxa250_initfn },
+ { .name = "pxa255", .initfn = pxa255_initfn },
+ { .name = "pxa260", .initfn = pxa260_initfn },
+ { .name = "pxa261", .initfn = pxa261_initfn },
+ { .name = "pxa262", .initfn = pxa262_initfn },
+ /* "pxa270" is an alias for "pxa270-a0" */
+ { .name = "pxa270", .initfn = pxa270a0_initfn },
+ { .name = "pxa270-a0", .initfn = pxa270a0_initfn },
+ { .name = "pxa270-a1", .initfn = pxa270a1_initfn },
+ { .name = "pxa270-b0", .initfn = pxa270b0_initfn },
+ { .name = "pxa270-b1", .initfn = pxa270b1_initfn },
+ { .name = "pxa270-c0", .initfn = pxa270c0_initfn },
+ { .name = "pxa270-c5", .initfn = pxa270c5_initfn },
+#ifndef TARGET_AARCH64
+ { .name = "max", .initfn = arm_max_initfn },
+#endif
+#ifdef CONFIG_USER_ONLY
+ { .name = "any", .initfn = arm_max_initfn },
+#endif
+};
+
+static const TypeInfo idau_interface_type_info = {
+ .name = TYPE_IDAU_INTERFACE,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(IDAUInterfaceClass),
+};
+
+static void arm_tcg_cpu_register_types(void)
+{
+ size_t i;
+
+ type_register_static(&idau_interface_type_info);
+ for (i = 0; i < ARRAY_SIZE(arm_tcg_cpus); ++i) {
+ arm_cpu_register(&arm_tcg_cpus[i]);
+ }
+}
+
+type_init(arm_tcg_cpu_register_types)
+
+#endif /* !CONFIG_USER_ONLY || !TARGET_AARCH64 */
diff --git a/target/arm/crypto_helper.c b/target/arm/crypto_helper.c
new file mode 100644
index 000000000..28a84c2db
--- /dev/null
+++ b/target/arm/crypto_helper.c
@@ -0,0 +1,812 @@
+/*
+ * crypto_helper.c - emulate v8 Crypto Extensions instructions
+ *
+ * Copyright (C) 2013 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "crypto/aes.h"
+#include "vec_internal.h"
+
+union CRYPTO_STATE {
+ uint8_t bytes[16];
+ uint32_t words[4];
+ uint64_t l[2];
+};
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define CR_ST_BYTE(state, i) ((state).bytes[(15 - (i)) ^ 8])
+#define CR_ST_WORD(state, i) ((state).words[(3 - (i)) ^ 2])
+#else
+#define CR_ST_BYTE(state, i) ((state).bytes[i])
+#define CR_ST_WORD(state, i) ((state).words[i])
+#endif
+
+/*
+ * The caller has not been converted to full gvec, and so only
+ * modifies the low 16 bytes of the vector register.
+ */
+static void clear_tail_16(void *vd, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int max_sz = simd_maxsz(desc);
+
+ assert(opr_sz == 16);
+ clear_tail(vd, opr_sz, max_sz);
+}
+
+static void do_crypto_aese(uint64_t *rd, uint64_t *rn,
+ uint64_t *rm, bool decrypt)
+{
+ static uint8_t const * const sbox[2] = { AES_sbox, AES_isbox };
+ static uint8_t const * const shift[2] = { AES_shifts, AES_ishifts };
+ union CRYPTO_STATE rk = { .l = { rm[0], rm[1] } };
+ union CRYPTO_STATE st = { .l = { rn[0], rn[1] } };
+ int i;
+
+ /* xor state vector with round key */
+ rk.l[0] ^= st.l[0];
+ rk.l[1] ^= st.l[1];
+
+ /* combine ShiftRows operation and sbox substitution */
+ for (i = 0; i < 16; i++) {
+ CR_ST_BYTE(st, i) = sbox[decrypt][CR_ST_BYTE(rk, shift[decrypt][i])];
+ }
+
+ rd[0] = st.l[0];
+ rd[1] = st.l[1];
+}
+
+void HELPER(crypto_aese)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ bool decrypt = simd_data(desc);
+
+ for (i = 0; i < opr_sz; i += 16) {
+ do_crypto_aese(vd + i, vn + i, vm + i, decrypt);
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
+
+static void do_crypto_aesmc(uint64_t *rd, uint64_t *rm, bool decrypt)
+{
+ static uint32_t const mc[][256] = { {
+ /* MixColumns lookup table */
+ 0x00000000, 0x03010102, 0x06020204, 0x05030306,
+ 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e,
+ 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16,
+ 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e,
+ 0x30101020, 0x33111122, 0x36121224, 0x35131326,
+ 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e,
+ 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36,
+ 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e,
+ 0x60202040, 0x63212142, 0x66222244, 0x65232346,
+ 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e,
+ 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56,
+ 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e,
+ 0x50303060, 0x53313162, 0x56323264, 0x55333366,
+ 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e,
+ 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76,
+ 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e,
+ 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386,
+ 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e,
+ 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96,
+ 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e,
+ 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6,
+ 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae,
+ 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6,
+ 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe,
+ 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6,
+ 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce,
+ 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6,
+ 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde,
+ 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6,
+ 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee,
+ 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6,
+ 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe,
+ 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d,
+ 0x97848413, 0x94858511, 0x91868617, 0x92878715,
+ 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d,
+ 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05,
+ 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d,
+ 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735,
+ 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d,
+ 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25,
+ 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d,
+ 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755,
+ 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d,
+ 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45,
+ 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d,
+ 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775,
+ 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d,
+ 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65,
+ 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d,
+ 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795,
+ 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d,
+ 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85,
+ 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd,
+ 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5,
+ 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad,
+ 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5,
+ 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd,
+ 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5,
+ 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd,
+ 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5,
+ 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd,
+ 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5,
+ 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed,
+ 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5,
+ }, {
+ /* Inverse MixColumns lookup table */
+ 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12,
+ 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a,
+ 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362,
+ 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a,
+ 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2,
+ 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca,
+ 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382,
+ 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba,
+ 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9,
+ 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1,
+ 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9,
+ 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81,
+ 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029,
+ 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411,
+ 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859,
+ 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61,
+ 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf,
+ 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987,
+ 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf,
+ 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7,
+ 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f,
+ 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967,
+ 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f,
+ 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117,
+ 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664,
+ 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c,
+ 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14,
+ 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c,
+ 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684,
+ 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc,
+ 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4,
+ 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc,
+ 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753,
+ 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b,
+ 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23,
+ 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b,
+ 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3,
+ 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b,
+ 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3,
+ 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb,
+ 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88,
+ 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0,
+ 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8,
+ 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0,
+ 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68,
+ 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850,
+ 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418,
+ 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020,
+ 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe,
+ 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6,
+ 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e,
+ 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6,
+ 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e,
+ 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526,
+ 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e,
+ 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56,
+ 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25,
+ 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d,
+ 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255,
+ 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d,
+ 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5,
+ 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd,
+ 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5,
+ 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d,
+ } };
+
+ union CRYPTO_STATE st = { .l = { rm[0], rm[1] } };
+ int i;
+
+ for (i = 0; i < 16; i += 4) {
+ CR_ST_WORD(st, i >> 2) =
+ mc[decrypt][CR_ST_BYTE(st, i)] ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 1)], 8) ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 2)], 16) ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 3)], 24);
+ }
+
+ rd[0] = st.l[0];
+ rd[1] = st.l[1];
+}
+
+void HELPER(crypto_aesmc)(void *vd, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ bool decrypt = simd_data(desc);
+
+ for (i = 0; i < opr_sz; i += 16) {
+ do_crypto_aesmc(vd + i, vm + i, decrypt);
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
+
+/*
+ * SHA-1 logical functions
+ */
+
+static uint32_t cho(uint32_t x, uint32_t y, uint32_t z)
+{
+ return (x & (y ^ z)) ^ z;
+}
+
+static uint32_t par(uint32_t x, uint32_t y, uint32_t z)
+{
+ return x ^ y ^ z;
+}
+
+static uint32_t maj(uint32_t x, uint32_t y, uint32_t z)
+{
+ return (x & y) | ((x | y) & z);
+}
+
+void HELPER(crypto_sha1su0)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t d0, d1;
+
+ d0 = d[1] ^ d[0] ^ m[0];
+ d1 = n[0] ^ d[1] ^ m[1];
+ d[0] = d0;
+ d[1] = d1;
+
+ clear_tail_16(vd, desc);
+}
+
+static inline void crypto_sha1_3reg(uint64_t *rd, uint64_t *rn,
+ uint64_t *rm, uint32_t desc,
+ uint32_t (*fn)(union CRYPTO_STATE *d))
+{
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t t = fn(&d);
+
+ t += rol32(CR_ST_WORD(d, 0), 5) + CR_ST_WORD(n, 0)
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3);
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = ror32(CR_ST_WORD(d, 1), 2);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = t;
+ }
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(rd, desc);
+}
+
+static uint32_t do_sha1c(union CRYPTO_STATE *d)
+{
+ return cho(CR_ST_WORD(*d, 1), CR_ST_WORD(*d, 2), CR_ST_WORD(*d, 3));
+}
+
+void HELPER(crypto_sha1c)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ crypto_sha1_3reg(vd, vn, vm, desc, do_sha1c);
+}
+
+static uint32_t do_sha1p(union CRYPTO_STATE *d)
+{
+ return par(CR_ST_WORD(*d, 1), CR_ST_WORD(*d, 2), CR_ST_WORD(*d, 3));
+}
+
+void HELPER(crypto_sha1p)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ crypto_sha1_3reg(vd, vn, vm, desc, do_sha1p);
+}
+
+static uint32_t do_sha1m(union CRYPTO_STATE *d)
+{
+ return maj(CR_ST_WORD(*d, 1), CR_ST_WORD(*d, 2), CR_ST_WORD(*d, 3));
+}
+
+void HELPER(crypto_sha1m)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ crypto_sha1_3reg(vd, vn, vm, desc, do_sha1m);
+}
+
+void HELPER(crypto_sha1h)(void *vd, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+
+ CR_ST_WORD(m, 0) = ror32(CR_ST_WORD(m, 0), 2);
+ CR_ST_WORD(m, 1) = CR_ST_WORD(m, 2) = CR_ST_WORD(m, 3) = 0;
+
+ rd[0] = m.l[0];
+ rd[1] = m.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha1su1)(void *vd, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+
+ CR_ST_WORD(d, 0) = rol32(CR_ST_WORD(d, 0) ^ CR_ST_WORD(m, 1), 1);
+ CR_ST_WORD(d, 1) = rol32(CR_ST_WORD(d, 1) ^ CR_ST_WORD(m, 2), 1);
+ CR_ST_WORD(d, 2) = rol32(CR_ST_WORD(d, 2) ^ CR_ST_WORD(m, 3), 1);
+ CR_ST_WORD(d, 3) = rol32(CR_ST_WORD(d, 3) ^ CR_ST_WORD(d, 0), 1);
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+/*
+ * The SHA-256 logical functions, according to
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ */
+
+static uint32_t S0(uint32_t x)
+{
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
+}
+
+static uint32_t S1(uint32_t x)
+{
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
+}
+
+static uint32_t s0(uint32_t x)
+{
+ return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3);
+}
+
+static uint32_t s1(uint32_t x)
+{
+ return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
+}
+
+void HELPER(crypto_sha256h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t t = cho(CR_ST_WORD(n, 0), CR_ST_WORD(n, 1), CR_ST_WORD(n, 2))
+ + CR_ST_WORD(n, 3) + S1(CR_ST_WORD(n, 0))
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(n, 3) = CR_ST_WORD(n, 2);
+ CR_ST_WORD(n, 2) = CR_ST_WORD(n, 1);
+ CR_ST_WORD(n, 1) = CR_ST_WORD(n, 0);
+ CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3) + t;
+
+ t += maj(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2))
+ + S0(CR_ST_WORD(d, 0));
+
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = t;
+ }
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha256h2)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t t = cho(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2))
+ + CR_ST_WORD(d, 3) + S1(CR_ST_WORD(d, 0))
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = CR_ST_WORD(n, 3 - i) + t;
+ }
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha256su0)(void *vd, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+
+ CR_ST_WORD(d, 0) += s0(CR_ST_WORD(d, 1));
+ CR_ST_WORD(d, 1) += s0(CR_ST_WORD(d, 2));
+ CR_ST_WORD(d, 2) += s0(CR_ST_WORD(d, 3));
+ CR_ST_WORD(d, 3) += s0(CR_ST_WORD(m, 0));
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+
+ CR_ST_WORD(d, 0) += s1(CR_ST_WORD(m, 2)) + CR_ST_WORD(n, 1);
+ CR_ST_WORD(d, 1) += s1(CR_ST_WORD(m, 3)) + CR_ST_WORD(n, 2);
+ CR_ST_WORD(d, 2) += s1(CR_ST_WORD(d, 0)) + CR_ST_WORD(n, 3);
+ CR_ST_WORD(d, 3) += s1(CR_ST_WORD(d, 1)) + CR_ST_WORD(m, 0);
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+/*
+ * The SHA-512 logical functions (same as above but using 64-bit operands)
+ */
+
+static uint64_t cho512(uint64_t x, uint64_t y, uint64_t z)
+{
+ return (x & (y ^ z)) ^ z;
+}
+
+static uint64_t maj512(uint64_t x, uint64_t y, uint64_t z)
+{
+ return (x & y) | ((x | y) & z);
+}
+
+static uint64_t S0_512(uint64_t x)
+{
+ return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
+}
+
+static uint64_t S1_512(uint64_t x)
+{
+ return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
+}
+
+static uint64_t s0_512(uint64_t x)
+{
+ return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7);
+}
+
+static uint64_t s1_512(uint64_t x)
+{
+ return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
+}
+
+void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ uint64_t d0 = rd[0];
+ uint64_t d1 = rd[1];
+
+ d1 += S1_512(rm[1]) + cho512(rm[1], rn[0], rn[1]);
+ d0 += S1_512(d1 + rm[0]) + cho512(d1 + rm[0], rm[1], rn[0]);
+
+ rd[0] = d0;
+ rd[1] = d1;
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ uint64_t d0 = rd[0];
+ uint64_t d1 = rd[1];
+
+ d1 += S0_512(rm[0]) + maj512(rn[0], rm[1], rm[0]);
+ d0 += S0_512(d1) + maj512(d1, rm[0], rm[1]);
+
+ rd[0] = d0;
+ rd[1] = d1;
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha512su0)(void *vd, void *vn, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t d0 = rd[0];
+ uint64_t d1 = rd[1];
+
+ d0 += s0_512(rd[1]);
+ d1 += s0_512(rn[0]);
+
+ rd[0] = d0;
+ rd[1] = d1;
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+
+ rd[0] += s1_512(rn[0]) + rm[0];
+ rd[1] += s1_512(rn[1]) + rm[1];
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ uint32_t t;
+
+ t = CR_ST_WORD(d, 0) ^ CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 1), 17);
+ CR_ST_WORD(d, 0) = t ^ ror32(t, 17) ^ ror32(t, 9);
+
+ t = CR_ST_WORD(d, 1) ^ CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 2), 17);
+ CR_ST_WORD(d, 1) = t ^ ror32(t, 17) ^ ror32(t, 9);
+
+ t = CR_ST_WORD(d, 2) ^ CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 3), 17);
+ CR_ST_WORD(d, 2) = t ^ ror32(t, 17) ^ ror32(t, 9);
+
+ t = CR_ST_WORD(d, 3) ^ CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 0), 17);
+ CR_ST_WORD(d, 3) = t ^ ror32(t, 17) ^ ror32(t, 9);
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ uint64_t *rd = vd;
+ uint64_t *rn = vn;
+ uint64_t *rm = vm;
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ uint32_t t = CR_ST_WORD(n, 0) ^ ror32(CR_ST_WORD(m, 0), 25);
+
+ CR_ST_WORD(d, 0) ^= t;
+ CR_ST_WORD(d, 1) ^= CR_ST_WORD(n, 1) ^ ror32(CR_ST_WORD(m, 1), 25);
+ CR_ST_WORD(d, 2) ^= CR_ST_WORD(n, 2) ^ ror32(CR_ST_WORD(m, 2), 25);
+ CR_ST_WORD(d, 3) ^= CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(m, 3), 25) ^
+ ror32(t, 17) ^ ror32(t, 2) ^ ror32(t, 26);
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
+}
+
+static inline void QEMU_ALWAYS_INLINE
+crypto_sm3tt(uint64_t *rd, uint64_t *rn, uint64_t *rm,
+ uint32_t desc, uint32_t opcode)
+{
+ union CRYPTO_STATE d = { .l = { rd[0], rd[1] } };
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ uint32_t imm2 = simd_data(desc);
+ uint32_t t;
+
+ assert(imm2 < 4);
+
+ if (opcode == 0 || opcode == 2) {
+ /* SM3TT1A, SM3TT2A */
+ t = par(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1));
+ } else if (opcode == 1) {
+ /* SM3TT1B */
+ t = maj(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1));
+ } else if (opcode == 3) {
+ /* SM3TT2B */
+ t = cho(CR_ST_WORD(d, 3), CR_ST_WORD(d, 2), CR_ST_WORD(d, 1));
+ } else {
+ qemu_build_not_reached();
+ }
+
+ t += CR_ST_WORD(d, 0) + CR_ST_WORD(m, imm2);
+
+ CR_ST_WORD(d, 0) = CR_ST_WORD(d, 1);
+
+ if (opcode < 2) {
+ /* SM3TT1A, SM3TT1B */
+ t += CR_ST_WORD(n, 3) ^ ror32(CR_ST_WORD(d, 3), 20);
+
+ CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 23);
+ } else {
+ /* SM3TT2A, SM3TT2B */
+ t += CR_ST_WORD(n, 3);
+ t ^= rol32(t, 9) ^ rol32(t, 17);
+
+ CR_ST_WORD(d, 1) = ror32(CR_ST_WORD(d, 2), 13);
+ }
+
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 3);
+ CR_ST_WORD(d, 3) = t;
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+
+ clear_tail_16(rd, desc);
+}
+
+#define DO_SM3TT(NAME, OPCODE) \
+ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+ { crypto_sm3tt(vd, vn, vm, desc, OPCODE); }
+
+DO_SM3TT(crypto_sm3tt1a, 0)
+DO_SM3TT(crypto_sm3tt1b, 1)
+DO_SM3TT(crypto_sm3tt2a, 2)
+DO_SM3TT(crypto_sm3tt2b, 3)
+
+#undef DO_SM3TT
+
+static uint8_t const sm4_sbox[] = {
+ 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+ 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+ 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+ 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+ 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+ 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+ 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+ 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+ 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+ 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+ 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+ 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+ 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+ 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+ 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+ 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+ 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+ 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+ 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+ 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+ 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+ 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+ 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+ 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+ 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+ 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+ 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+ 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+ 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+ 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+ 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+ 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
+};
+
+static void do_crypto_sm4e(uint64_t *rd, uint64_t *rn, uint64_t *rm)
+{
+ union CRYPTO_STATE d = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE n = { .l = { rm[0], rm[1] } };
+ uint32_t t, i;
+
+ for (i = 0; i < 4; i++) {
+ t = CR_ST_WORD(d, (i + 1) % 4) ^
+ CR_ST_WORD(d, (i + 2) % 4) ^
+ CR_ST_WORD(d, (i + 3) % 4) ^
+ CR_ST_WORD(n, i);
+
+ t = sm4_sbox[t & 0xff] |
+ sm4_sbox[(t >> 8) & 0xff] << 8 |
+ sm4_sbox[(t >> 16) & 0xff] << 16 |
+ sm4_sbox[(t >> 24) & 0xff] << 24;
+
+ CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^
+ rol32(t, 24);
+ }
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+}
+
+void HELPER(crypto_sm4e)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+
+ for (i = 0; i < opr_sz; i += 16) {
+ do_crypto_sm4e(vd + i, vn + i, vm + i);
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
+
+static void do_crypto_sm4ekey(uint64_t *rd, uint64_t *rn, uint64_t *rm)
+{
+ union CRYPTO_STATE d;
+ union CRYPTO_STATE n = { .l = { rn[0], rn[1] } };
+ union CRYPTO_STATE m = { .l = { rm[0], rm[1] } };
+ uint32_t t, i;
+
+ d = n;
+ for (i = 0; i < 4; i++) {
+ t = CR_ST_WORD(d, (i + 1) % 4) ^
+ CR_ST_WORD(d, (i + 2) % 4) ^
+ CR_ST_WORD(d, (i + 3) % 4) ^
+ CR_ST_WORD(m, i);
+
+ t = sm4_sbox[t & 0xff] |
+ sm4_sbox[(t >> 8) & 0xff] << 8 |
+ sm4_sbox[(t >> 16) & 0xff] << 16 |
+ sm4_sbox[(t >> 24) & 0xff] << 24;
+
+ CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23);
+ }
+
+ rd[0] = d.l[0];
+ rd[1] = d.l[1];
+}
+
+void HELPER(crypto_sm4ekey)(void *vd, void *vn, void* vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+
+ for (i = 0; i < opr_sz; i += 16) {
+ do_crypto_sm4ekey(vd + i, vn + i, vm + i);
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(crypto_rax1)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = n[i] ^ rol64(m[i], 1);
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
new file mode 100644
index 000000000..2983e36dd
--- /dev/null
+++ b/target/arm/debug_helper.c
@@ -0,0 +1,329 @@
+/*
+ * ARM debug helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+/* Return true if the linked breakpoint entry lbn passes its checks */
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
+ int brps = arm_num_brps(cpu);
+ int ctx_cmps = arm_num_ctx_cmps(cpu);
+ int bt;
+ uint32_t contextidr;
+ uint64_t hcr_el2;
+
+ /*
+ * Links to unimplemented or non-context aware breakpoints are
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
+ * We choose the former.
+ */
+ if (lbn >= brps || lbn < (brps - ctx_cmps)) {
+ return false;
+ }
+
+ bcr = env->cp15.dbgbcr[lbn];
+
+ if (extract64(bcr, 0, 1) == 0) {
+ /* Linked breakpoint disabled : generate no events */
+ return false;
+ }
+
+ bt = extract64(bcr, 20, 4);
+ hcr_el2 = arm_hcr_el2_eff(env);
+
+ switch (bt) {
+ case 3: /* linked context ID match */
+ switch (arm_current_el(env)) {
+ default:
+ /* Context matches never fire in AArch64 EL3 */
+ return false;
+ case 2:
+ if (!(hcr_el2 & HCR_E2H)) {
+ /* Context matches never fire in EL2 without E2H enabled. */
+ return false;
+ }
+ contextidr = env->cp15.contextidr_el[2];
+ break;
+ case 1:
+ contextidr = env->cp15.contextidr_el[1];
+ break;
+ case 0:
+ if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ contextidr = env->cp15.contextidr_el[2];
+ } else {
+ contextidr = env->cp15.contextidr_el[1];
+ }
+ break;
+ }
+ break;
+
+ case 7: /* linked contextidr_el1 match */
+ contextidr = env->cp15.contextidr_el[1];
+ break;
+ case 13: /* linked contextidr_el2 match */
+ contextidr = env->cp15.contextidr_el[2];
+ break;
+
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ case 15: /* linked full context ID match */
+ default:
+ /*
+ * Links to Unlinked context breakpoints must generate no
+ * events; we choose to do the same for reserved values too.
+ */
+ return false;
+ }
+
+ /*
+ * We match the whole register even if this is AArch32 using the
+ * short descriptor format (in which case it holds both PROCID and ASID),
+ * since we don't implement the optional v7 context ID masking.
+ */
+ return contextidr == (uint32_t)env->cp15.dbgbvr[lbn];
+}
+
+static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t cr;
+ int pac, hmc, ssc, wt, lbn;
+ /*
+ * Note that for watchpoints the check is against the CPU security
+ * state, not the S/NS attribute on the offending data access.
+ */
+ bool is_secure = arm_is_secure(env);
+ int access_el = arm_current_el(env);
+
+ if (is_wp) {
+ CPUWatchpoint *wp = env->cpu_watchpoint[n];
+
+ if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
+ return false;
+ }
+ cr = env->cp15.dbgwcr[n];
+ if (wp->hitattrs.user) {
+ /*
+ * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
+ * match watchpoints as if they were accesses done at EL0, even if
+ * the CPU is at EL1 or higher.
+ */
+ access_el = 0;
+ }
+ } else {
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
+
+ if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
+ return false;
+ }
+ cr = env->cp15.dbgbcr[n];
+ }
+ /*
+ * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
+ * enabled and that the address and access type match; for breakpoints
+ * we know the address matched; check the remaining fields, including
+ * linked breakpoints. We rely on WCR and BCR having the same layout
+ * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
+ * Note that some combinations of {PAC, HMC, SSC} are reserved and
+ * must act either like some valid combination or as if the watchpoint
+ * were disabled. We choose the former, and use this together with
+ * the fact that EL3 must always be Secure and EL2 must always be
+ * Non-Secure to simplify the code slightly compared to the full
+ * table in the ARM ARM.
+ */
+ pac = extract64(cr, 1, 2);
+ hmc = extract64(cr, 13, 1);
+ ssc = extract64(cr, 14, 2);
+
+ switch (ssc) {
+ case 0:
+ break;
+ case 1:
+ case 3:
+ if (is_secure) {
+ return false;
+ }
+ break;
+ case 2:
+ if (!is_secure) {
+ return false;
+ }
+ break;
+ }
+
+ switch (access_el) {
+ case 3:
+ case 2:
+ if (!hmc) {
+ return false;
+ }
+ break;
+ case 1:
+ if (extract32(pac, 0, 1) == 0) {
+ return false;
+ }
+ break;
+ case 0:
+ if (extract32(pac, 1, 1) == 0) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ wt = extract64(cr, 20, 1);
+ lbn = extract64(cr, 16, 4);
+
+ if (wt && !linked_bp_matches(cpu, lbn)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_watchpoints(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /*
+ * If watchpoints are disabled globally or we can't take debug
+ * exceptions here then watchpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
+ if (bp_wp_matches(cpu, n, true)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool arm_debug_check_breakpoint(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /*
+ * If breakpoints are disabled globally or we can't take debug
+ * exceptions here then breakpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
+ if (bp_wp_matches(cpu, n, false)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+ /*
+ * Called by core code when a CPU watchpoint fires; need to check if this
+ * is also an architectural watchpoint match.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return check_watchpoints(cpu);
+}
+
+void arm_debug_excp_handler(CPUState *cs)
+{
+ /*
+ * Called by core code when a watchpoint or breakpoint fires;
+ * need to check which one and raise the appropriate exception.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+ if (wp_hit) {
+ if (wp_hit->flags & BP_CPU) {
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
+
+ cs->watchpoint_hit = NULL;
+
+ env->exception.fsr = arm_debug_exception_fsr(env);
+ env->exception.vaddress = wp_hit->hitaddr;
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_watchpoint(same_el, 0, wnr),
+ arm_debug_target_el(env));
+ }
+ } else {
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
+
+ /*
+ * (1) GDB breakpoints should be handled first.
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
+ * since singlestep is also done by generating a debug internal
+ * exception.
+ */
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
+ return;
+ }
+
+ env->exception.fsr = arm_debug_exception_fsr(env);
+ /*
+ * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
+ * values to the guest that it shouldn't be able to see at its
+ * exception/security level.
+ */
+ env->exception.vaddress = 0;
+ raise_exception(env, EXCP_PREFETCH_ABORT,
+ syn_breakpoint(same_el),
+ arm_debug_target_el(env));
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * In BE32 system mode, target memory is stored byteswapped (on a
+ * little-endian host system), and by the time we reach here (via an
+ * opcode helper) the addresses of subword accesses have been adjusted
+ * to account for that, which means that watchpoints will not match.
+ * Undo the adjustment here.
+ */
+ if (arm_sctlr_b(env)) {
+ if (len == 1) {
+ addr ^= 3;
+ } else if (len == 2) {
+ addr ^= 2;
+ }
+ }
+
+ return addr;
+}
+
+#endif
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
new file mode 100644
index 000000000..134da0d0a
--- /dev/null
+++ b/target/arm/gdbstub.c
@@ -0,0 +1,500 @@
+/*
+ * ARM gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/gdbstub.h"
+
+typedef struct RegisterSysregXmlParam {
+ CPUState *cs;
+ GString *s;
+ int n;
+} RegisterSysregXmlParam;
+
+/* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
+ whatever the target description contains. Due to a historical mishap
+ the FPA registers appear in between core integer regs and the CPSR.
+ We hack round this by giving the FPA regs zero size when talking to a
+ newer gdb. */
+
+int arm_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (n < 16) {
+ /* Core integer register. */
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ }
+ if (n < 24) {
+ /* FPA registers. */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return gdb_get_zeroes(mem_buf, 12);
+ }
+ switch (n) {
+ case 24:
+ /* FPA status register. */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return gdb_get_reg32(mem_buf, 0);
+ case 25:
+ /* CPSR, or XPSR for M-profile */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return gdb_get_reg32(mem_buf, xpsr_read(env));
+ } else {
+ return gdb_get_reg32(mem_buf, cpsr_read(env));
+ }
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t tmp;
+
+ tmp = ldl_p(mem_buf);
+
+ /* Mask out low bit of PC to workaround gdb bugs. This will probably
+ cause problems if we ever implement the Jazelle DBX extensions. */
+ if (n == 15) {
+ tmp &= ~1;
+ }
+
+ if (n < 16) {
+ /* Core integer register. */
+ if (n == 13 && arm_feature(env, ARM_FEATURE_M)) {
+ /* M profile SP low bits are always 0 */
+ tmp &= ~3;
+ }
+ env->regs[n] = tmp;
+ return 4;
+ }
+ if (n < 24) { /* 16-23 */
+ /* FPA registers (ignored). */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return 12;
+ }
+ switch (n) {
+ case 24:
+ /* FPA status register (ignored). */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return 4;
+ case 25:
+ /* CPSR, or XPSR for M-profile */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * Don't allow writing to XPSR.Exception as it can cause
+ * a transition into or out of handler mode (it's not
+ * writeable via the MSR insn so this is a reasonable
+ * restriction). Other fields are safe to update.
+ */
+ xpsr_write(env, tmp, ~XPSR_EXCP);
+ } else {
+ cpsr_write(env, tmp, 0xffffffff, CPSRWriteByGDBStub);
+ }
+ return 4;
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
+
+ /* VFP data registers are always little-endian. */
+ if (reg < nregs) {
+ return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
+ }
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ /* Aliases for Q regs. */
+ nregs += 16;
+ if (reg < nregs) {
+ uint64_t *q = aa32_vfp_qreg(env, reg - 32);
+ return gdb_get_reg128(buf, q[0], q[1]);
+ }
+ }
+ switch (reg - nregs) {
+ case 0:
+ return gdb_get_reg32(buf, vfp_get_fpscr(env));
+ }
+ return 0;
+}
+
+static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
+
+ if (reg < nregs) {
+ *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
+ return 8;
+ }
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ nregs += 16;
+ if (reg < nregs) {
+ uint64_t *q = aa32_vfp_qreg(env, reg - 32);
+ q[0] = ldq_le_p(buf);
+ q[1] = ldq_le_p(buf + 8);
+ return 16;
+ }
+ }
+ switch (reg - nregs) {
+ case 0:
+ vfp_set_fpscr(env, ldl_p(buf));
+ return 4;
+ }
+ return 0;
+}
+
+static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]);
+ case 1:
+ return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]);
+ }
+ return 0;
+}
+
+static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf);
+ return 4;
+ case 1:
+ env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30);
+ return 4;
+ }
+ return 0;
+}
+
+static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ return gdb_get_reg32(buf, env->v7m.vpr);
+ default:
+ return 0;
+ }
+}
+
+static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ env->v7m.vpr = ldl_p(buf);
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * arm_get/set_gdb_*: get/set a gdb register
+ * @env: the CPU state
+ * @buf: a buffer to copy to/from
+ * @reg: register number (offset from start of group)
+ *
+ * We return the number of bytes copied
+ */
+
+static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ const ARMCPRegInfo *ri;
+ uint32_t key;
+
+ key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
+ ri = get_arm_cp_reginfo(cpu->cp_regs, key);
+ if (ri) {
+ if (cpreg_field_is_64bit(ri)) {
+ return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
+ } else {
+ return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
+ }
+ }
+ return 0;
+}
+
+static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ return 0;
+}
+
+static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml,
+ ARMCPRegInfo *ri, uint32_t ri_key,
+ int bitsize, int regnum)
+{
+ g_string_append_printf(s, "<reg name=\"%s\"", ri->name);
+ g_string_append_printf(s, " bitsize=\"%d\"", bitsize);
+ g_string_append_printf(s, " regnum=\"%d\"", regnum);
+ g_string_append_printf(s, " group=\"cp_regs\"/>");
+ dyn_xml->data.cpregs.keys[dyn_xml->num] = ri_key;
+ dyn_xml->num++;
+}
+
+static void arm_register_sysreg_for_xml(gpointer key, gpointer value,
+ gpointer p)
+{
+ uint32_t ri_key = *(uint32_t *)key;
+ ARMCPRegInfo *ri = value;
+ RegisterSysregXmlParam *param = (RegisterSysregXmlParam *)p;
+ GString *s = param->s;
+ ARMCPU *cpu = ARM_CPU(param->cs);
+ CPUARMState *env = &cpu->env;
+ DynamicGDBXMLInfo *dyn_xml = &cpu->dyn_sysreg_xml;
+
+ if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_NO_GDB))) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ if (ri->state == ARM_CP_STATE_AA64) {
+ arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 64,
+ param->n++);
+ }
+ } else {
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ (ri->secure & ARM_CP_SECSTATE_S)) {
+ return;
+ }
+ if (ri->type & ARM_CP_64BIT) {
+ arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 64,
+ param->n++);
+ } else {
+ arm_gen_one_xml_sysreg_tag(s , dyn_xml, ri, ri_key, 32,
+ param->n++);
+ }
+ }
+ }
+ }
+}
+
+int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ GString *s = g_string_new(NULL);
+ RegisterSysregXmlParam param = {cs, s, base_reg};
+
+ cpu->dyn_sysreg_xml.num = 0;
+ cpu->dyn_sysreg_xml.data.cpregs.keys = g_new(uint32_t, g_hash_table_size(cpu->cp_regs));
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
+ g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
+ g_string_append_printf(s, "<feature name=\"org.qemu.gdb.arm.sys.regs\">");
+ g_hash_table_foreach(cpu->cp_regs, arm_register_sysreg_for_xml, &param);
+ g_string_append_printf(s, "</feature>");
+ cpu->dyn_sysreg_xml.desc = g_string_free(s, false);
+ return cpu->dyn_sysreg_xml.num;
+}
+
+struct TypeSize {
+ const char *gdb_type;
+ int size;
+ const char sz, suffix;
+};
+
+static const struct TypeSize vec_lanes[] = {
+ /* quads */
+ { "uint128", 128, 'q', 'u' },
+ { "int128", 128, 'q', 's' },
+ /* 64 bit */
+ { "ieee_double", 64, 'd', 'f' },
+ { "uint64", 64, 'd', 'u' },
+ { "int64", 64, 'd', 's' },
+ /* 32 bit */
+ { "ieee_single", 32, 's', 'f' },
+ { "uint32", 32, 's', 'u' },
+ { "int32", 32, 's', 's' },
+ /* 16 bit */
+ { "ieee_half", 16, 'h', 'f' },
+ { "uint16", 16, 'h', 'u' },
+ { "int16", 16, 'h', 's' },
+ /* bytes */
+ { "uint8", 8, 'b', 'u' },
+ { "int8", 8, 'b', 's' },
+};
+
+
+int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ GString *s = g_string_new(NULL);
+ DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml;
+ g_autoptr(GString) ts = g_string_new("");
+ int i, j, bits, reg_width = (cpu->sve_max_vq * 128);
+ info->num = 0;
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
+ g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
+ g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">");
+
+ /* First define types and totals in a whole VL */
+ for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
+ int count = reg_width / vec_lanes[i].size;
+ g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix);
+ g_string_append_printf(s,
+ "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
+ ts->str, vec_lanes[i].gdb_type, count);
+ }
+ /*
+ * Now define a union for each size group containing unsigned and
+ * signed and potentially float versions of each size from 128 to
+ * 8 bits.
+ */
+ for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
+ const char suf[] = { 'q', 'd', 's', 'h', 'b' };
+ g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]);
+ for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) {
+ if (vec_lanes[j].size == bits) {
+ g_string_append_printf(s, "<field name=\"%c\" type=\"svev%c%c\"/>",
+ vec_lanes[j].suffix,
+ vec_lanes[j].sz, vec_lanes[j].suffix);
+ }
+ }
+ g_string_append(s, "</union>");
+ }
+ /* And now the final union of unions */
+ g_string_append(s, "<union id=\"svev\">");
+ for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) {
+ const char suf[] = { 'q', 'd', 's', 'h', 'b' };
+ g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>",
+ suf[i], suf[i]);
+ }
+ g_string_append(s, "</union>");
+
+ /* Finally the sve prefix type */
+ g_string_append_printf(s,
+ "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>",
+ reg_width / 8);
+
+ /* Then define each register in parts for each vq */
+ for (i = 0; i < 32; i++) {
+ g_string_append_printf(s,
+ "<reg name=\"z%d\" bitsize=\"%d\""
+ " regnum=\"%d\" type=\"svev\"/>",
+ i, reg_width, base_reg++);
+ info->num++;
+ }
+ /* fpscr & status registers */
+ g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\""
+ " regnum=\"%d\" group=\"float\""
+ " type=\"int\"/>", base_reg++);
+ g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\""
+ " regnum=\"%d\" group=\"float\""
+ " type=\"int\"/>", base_reg++);
+ info->num += 2;
+
+ for (i = 0; i < 16; i++) {
+ g_string_append_printf(s,
+ "<reg name=\"p%d\" bitsize=\"%d\""
+ " regnum=\"%d\" type=\"svep\"/>",
+ i, cpu->sve_max_vq * 16, base_reg++);
+ info->num++;
+ }
+ g_string_append_printf(s,
+ "<reg name=\"ffr\" bitsize=\"%d\""
+ " regnum=\"%d\" group=\"vector\""
+ " type=\"svep\"/>",
+ cpu->sve_max_vq * 16, base_reg++);
+ g_string_append_printf(s,
+ "<reg name=\"vg\" bitsize=\"64\""
+ " regnum=\"%d\" type=\"int\"/>",
+ base_reg++);
+ info->num += 2;
+ g_string_append_printf(s, "</feature>");
+ cpu->dyn_svereg_xml.desc = g_string_free(s, false);
+
+ return cpu->dyn_svereg_xml.num;
+}
+
+
+const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ if (strcmp(xmlname, "system-registers.xml") == 0) {
+ return cpu->dyn_sysreg_xml.desc;
+ } else if (strcmp(xmlname, "sve-registers.xml") == 0) {
+ return cpu->dyn_svereg_xml.desc;
+ }
+ return NULL;
+}
+
+void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /*
+ * The lower part of each SVE register aliases to the FPU
+ * registers so we don't need to include both.
+ */
+#ifdef TARGET_AARCH64
+ if (isar_feature_aa64_sve(&cpu->isar)) {
+ gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
+ arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
+ "sve-registers.xml", 0);
+ } else {
+ gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
+ aarch64_fpu_gdb_set_reg,
+ 34, "aarch64-fpu.xml", 0);
+ }
+#endif
+ } else {
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 49, "arm-neon.xml", 0);
+ } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 33, "arm-vfp3.xml", 0);
+ } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 17, "arm-vfp.xml", 0);
+ }
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * A and R profile have FP sysregs FPEXC and FPSID that we
+ * expose to gdb.
+ */
+ gdb_register_coprocessor(cs, vfp_gdb_get_sysreg, vfp_gdb_set_sysreg,
+ 2, "arm-vfp-sysregs.xml", 0);
+ }
+ }
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg,
+ 1, "arm-m-profile-mve.xml", 0);
+ }
+ gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
+ arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
+ "system-registers.xml", 0);
+
+}
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
new file mode 100644
index 000000000..596878666
--- /dev/null
+++ b/target/arm/gdbstub64.c
@@ -0,0 +1,211 @@
+/*
+ * ARM gdb server stub: AArch64 specific functions.
+ *
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/gdbstub.h"
+
+int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (n < 31) {
+ /* Core integer register. */
+ return gdb_get_reg64(mem_buf, env->xregs[n]);
+ }
+ switch (n) {
+ case 31:
+ return gdb_get_reg64(mem_buf, env->xregs[31]);
+ case 32:
+ return gdb_get_reg64(mem_buf, env->pc);
+ case 33:
+ return gdb_get_reg32(mem_buf, pstate_read(env));
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t tmp;
+
+ tmp = ldq_p(mem_buf);
+
+ if (n < 31) {
+ /* Core integer register. */
+ env->xregs[n] = tmp;
+ return 8;
+ }
+ switch (n) {
+ case 31:
+ env->xregs[31] = tmp;
+ return 8;
+ case 32:
+ env->pc = tmp;
+ return 8;
+ case 33:
+ /* CPSR */
+ pstate_write(env, tmp);
+ return 4;
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ switch (reg) {
+ case 0 ... 31:
+ {
+ /* 128 bit FP register - quads are in LE order */
+ uint64_t *q = aa64_vfp_qreg(env, reg);
+ return gdb_get_reg128(buf, q[1], q[0]);
+ }
+ case 32:
+ /* FPSR */
+ return gdb_get_reg32(buf, vfp_get_fpsr(env));
+ case 33:
+ /* FPCR */
+ return gdb_get_reg32(buf, vfp_get_fpcr(env));
+ default:
+ return 0;
+ }
+}
+
+int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0 ... 31:
+ /* 128 bit FP register */
+ {
+ uint64_t *q = aa64_vfp_qreg(env, reg);
+ q[0] = ldq_le_p(buf);
+ q[1] = ldq_le_p(buf + 8);
+ return 16;
+ }
+ case 32:
+ /* FPSR */
+ vfp_set_fpsr(env, ldl_p(buf));
+ return 4;
+ case 33:
+ /* FPCR */
+ vfp_set_fpcr(env, ldl_p(buf));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ switch (reg) {
+ /* The first 32 registers are the zregs */
+ case 0 ... 31:
+ {
+ int vq, len = 0;
+ for (vq = 0; vq < cpu->sve_max_vq; vq++) {
+ len += gdb_get_reg128(buf,
+ env->vfp.zregs[reg].d[vq * 2 + 1],
+ env->vfp.zregs[reg].d[vq * 2]);
+ }
+ return len;
+ }
+ case 32:
+ return gdb_get_reg32(buf, vfp_get_fpsr(env));
+ case 33:
+ return gdb_get_reg32(buf, vfp_get_fpcr(env));
+ /* then 16 predicates and the ffr */
+ case 34 ... 50:
+ {
+ int preg = reg - 34;
+ int vq, len = 0;
+ for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
+ len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
+ }
+ return len;
+ }
+ case 51:
+ {
+ /*
+ * We report in Vector Granules (VG) which is 64bit in a Z reg
+ * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
+ */
+ int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
+ return gdb_get_reg64(buf, vq * 2);
+ }
+ default:
+ /* gdbstub asked for something out our range */
+ qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
+ break;
+ }
+
+ return 0;
+}
+
+int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ /* The first 32 registers are the zregs */
+ switch (reg) {
+ /* The first 32 registers are the zregs */
+ case 0 ... 31:
+ {
+ int vq, len = 0;
+ uint64_t *p = (uint64_t *) buf;
+ for (vq = 0; vq < cpu->sve_max_vq; vq++) {
+ env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
+ env->vfp.zregs[reg].d[vq * 2] = *p++;
+ len += 16;
+ }
+ return len;
+ }
+ case 32:
+ vfp_set_fpsr(env, *(uint32_t *)buf);
+ return 4;
+ case 33:
+ vfp_set_fpcr(env, *(uint32_t *)buf);
+ return 4;
+ case 34 ... 50:
+ {
+ int preg = reg - 34;
+ int vq, len = 0;
+ uint64_t *p = (uint64_t *) buf;
+ for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
+ env->vfp.pregs[preg].p[vq / 4] = *p++;
+ len += 8;
+ }
+ return len;
+ }
+ case 51:
+ /* cannot set vg via gdbstub */
+ return 0;
+ default:
+ /* gdbstub asked for something out our range */
+ break;
+ }
+
+ return 0;
+}
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
new file mode 100644
index 000000000..5ae2ecb0f
--- /dev/null
+++ b/target/arm/helper-a64.c
@@ -0,0 +1,1099 @@
+/*
+ * AArch64 specific helpers
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "qemu/bitops.h"
+#include "internals.h"
+#include "qemu/crc32c.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/int128.h"
+#include "qemu/atomic128.h"
+#include "fpu/softfloat.h"
+#include <zlib.h> /* For crc32 */
+
+/* C2.4.7 Multiply and divide */
+/* special cases for 0 and LLONG_MIN are mandated by the standard */
+uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
+{
+ if (den == 0) {
+ return 0;
+ }
+ return num / den;
+}
+
+int64_t HELPER(sdiv64)(int64_t num, int64_t den)
+{
+ if (den == 0) {
+ return 0;
+ }
+ if (num == LLONG_MIN && den == -1) {
+ return LLONG_MIN;
+ }
+ return num / den;
+}
+
+uint64_t HELPER(rbit64)(uint64_t x)
+{
+ return revbit64(x);
+}
+
+void HELPER(msr_i_spsel)(CPUARMState *env, uint32_t imm)
+{
+ update_spsel(env, imm);
+}
+
+static void daif_check(CPUARMState *env, uint32_t op,
+ uint32_t imm, uintptr_t ra)
+{
+ /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
+ if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
+ raise_exception_ra(env, EXCP_UDEF,
+ syn_aa64_sysregtrap(0, extract32(op, 0, 3),
+ extract32(op, 3, 3), 4,
+ imm, 0x1f, 0),
+ exception_target_el(env), ra);
+ }
+}
+
+void HELPER(msr_i_daifset)(CPUARMState *env, uint32_t imm)
+{
+ daif_check(env, 0x1e, imm, GETPC());
+ env->daif |= (imm << 6) & PSTATE_DAIF;
+}
+
+void HELPER(msr_i_daifclear)(CPUARMState *env, uint32_t imm)
+{
+ daif_check(env, 0x1f, imm, GETPC());
+ env->daif &= ~((imm << 6) & PSTATE_DAIF);
+}
+
+/* Convert a softfloat float_relation_ (as returned by
+ * the float*_compare functions) to the correct ARM
+ * NZCV flag state.
+ */
+static inline uint32_t float_rel_to_flags(int res)
+{
+ uint64_t flags;
+ switch (res) {
+ case float_relation_equal:
+ flags = PSTATE_Z | PSTATE_C;
+ break;
+ case float_relation_less:
+ flags = PSTATE_N;
+ break;
+ case float_relation_greater:
+ flags = PSTATE_C;
+ break;
+ case float_relation_unordered:
+ default:
+ flags = PSTATE_C | PSTATE_V;
+ break;
+ }
+ return flags;
+}
+
+uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status)
+{
+ return float_rel_to_flags(float16_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status)
+{
+ return float_rel_to_flags(float16_compare(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
+{
+ return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
+{
+ return float_rel_to_flags(float32_compare(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
+{
+ return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
+{
+ return float_rel_to_flags(float64_compare(x, y, fp_status));
+}
+
+float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
+ if ((float32_is_zero(a) && float32_is_infinity(b)) ||
+ (float32_is_infinity(a) && float32_is_zero(b))) {
+ /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
+ return make_float32((1U << 30) |
+ ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
+ }
+ return float32_mul(a, b, fpst);
+}
+
+float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
+ if ((float64_is_zero(a) && float64_is_infinity(b)) ||
+ (float64_is_infinity(a) && float64_is_zero(b))) {
+ /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
+ return make_float64((1ULL << 62) |
+ ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
+ }
+ return float64_mul(a, b, fpst);
+}
+
+/* 64bit/double versions of the neon float compare functions */
+uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float64_eq_quiet(a, b, fpst);
+}
+
+uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float64_le(b, a, fpst);
+}
+
+uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float64_lt(b, a, fpst);
+}
+
+/* Reciprocal step and sqrt step. Note that unlike the A32/T32
+ * versions, these do a fully fused multiply-add or
+ * multiply-add-and-halve.
+ */
+
+uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float16_squash_input_denormal(a, fpst);
+ b = float16_squash_input_denormal(b, fpst);
+
+ a = float16_chs(a);
+ if ((float16_is_infinity(a) && float16_is_zero(b)) ||
+ (float16_is_infinity(b) && float16_is_zero(a))) {
+ return float16_two;
+ }
+ return float16_muladd(a, b, float16_two, 0, fpst);
+}
+
+float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
+ a = float32_chs(a);
+ if ((float32_is_infinity(a) && float32_is_zero(b)) ||
+ (float32_is_infinity(b) && float32_is_zero(a))) {
+ return float32_two;
+ }
+ return float32_muladd(a, b, float32_two, 0, fpst);
+}
+
+float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
+ a = float64_chs(a);
+ if ((float64_is_infinity(a) && float64_is_zero(b)) ||
+ (float64_is_infinity(b) && float64_is_zero(a))) {
+ return float64_two;
+ }
+ return float64_muladd(a, b, float64_two, 0, fpst);
+}
+
+uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float16_squash_input_denormal(a, fpst);
+ b = float16_squash_input_denormal(b, fpst);
+
+ a = float16_chs(a);
+ if ((float16_is_infinity(a) && float16_is_zero(b)) ||
+ (float16_is_infinity(b) && float16_is_zero(a))) {
+ return float16_one_point_five;
+ }
+ return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
+}
+
+float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
+ a = float32_chs(a);
+ if ((float32_is_infinity(a) && float32_is_zero(b)) ||
+ (float32_is_infinity(b) && float32_is_zero(a))) {
+ return float32_one_point_five;
+ }
+ return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
+}
+
+float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
+ a = float64_chs(a);
+ if ((float64_is_infinity(a) && float64_is_zero(b)) ||
+ (float64_is_infinity(b) && float64_is_zero(a))) {
+ return float64_one_point_five;
+ }
+ return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
+}
+
+/* Pairwise long add: add pairs of adjacent elements into
+ * double-width elements in the result (eg _s8 is an 8x8->16 op)
+ */
+uint64_t HELPER(neon_addlp_s8)(uint64_t a)
+{
+ uint64_t nsignmask = 0x0080008000800080ULL;
+ uint64_t wsignmask = 0x8000800080008000ULL;
+ uint64_t elementmask = 0x00ff00ff00ff00ffULL;
+ uint64_t tmp1, tmp2;
+ uint64_t res, signres;
+
+ /* Extract odd elements, sign extend each to a 16 bit field */
+ tmp1 = a & elementmask;
+ tmp1 ^= nsignmask;
+ tmp1 |= wsignmask;
+ tmp1 = (tmp1 - nsignmask) ^ wsignmask;
+ /* Ditto for the even elements */
+ tmp2 = (a >> 8) & elementmask;
+ tmp2 ^= nsignmask;
+ tmp2 |= wsignmask;
+ tmp2 = (tmp2 - nsignmask) ^ wsignmask;
+
+ /* calculate the result by summing bits 0..14, 16..22, etc,
+ * and then adjusting the sign bits 15, 23, etc manually.
+ * This ensures the addition can't overflow the 16 bit field.
+ */
+ signres = (tmp1 ^ tmp2) & wsignmask;
+ res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
+ res ^= signres;
+
+ return res;
+}
+
+uint64_t HELPER(neon_addlp_u8)(uint64_t a)
+{
+ uint64_t tmp;
+
+ tmp = a & 0x00ff00ff00ff00ffULL;
+ tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
+ return tmp;
+}
+
+uint64_t HELPER(neon_addlp_s16)(uint64_t a)
+{
+ int32_t reslo, reshi;
+
+ reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
+ reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
+
+ return (uint32_t)reslo | (((uint64_t)reshi) << 32);
+}
+
+uint64_t HELPER(neon_addlp_u16)(uint64_t a)
+{
+ uint64_t tmp;
+
+ tmp = a & 0x0000ffff0000ffffULL;
+ tmp += (a >> 16) & 0x0000ffff0000ffffULL;
+ return tmp;
+}
+
+/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
+uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint16_t val16, sbit;
+ int16_t exp;
+
+ if (float16_is_any_nan(a)) {
+ float16 nan = a;
+ if (float16_is_signaling_nan(a, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float16_silence_nan(a, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float16_default_nan(fpst);
+ }
+ return nan;
+ }
+
+ a = float16_squash_input_denormal(a, fpst);
+
+ val16 = float16_val(a);
+ sbit = 0x8000 & val16;
+ exp = extract32(val16, 10, 5);
+
+ if (exp == 0) {
+ return make_float16(deposit32(sbit, 10, 5, 0x1e));
+ } else {
+ return make_float16(deposit32(sbit, 10, 5, ~exp));
+ }
+}
+
+float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint32_t val32, sbit;
+ int32_t exp;
+
+ if (float32_is_any_nan(a)) {
+ float32 nan = a;
+ if (float32_is_signaling_nan(a, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float32_silence_nan(a, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan(fpst);
+ }
+ return nan;
+ }
+
+ a = float32_squash_input_denormal(a, fpst);
+
+ val32 = float32_val(a);
+ sbit = 0x80000000ULL & val32;
+ exp = extract32(val32, 23, 8);
+
+ if (exp == 0) {
+ return make_float32(sbit | (0xfe << 23));
+ } else {
+ return make_float32(sbit | (~exp & 0xff) << 23);
+ }
+}
+
+float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint64_t val64, sbit;
+ int64_t exp;
+
+ if (float64_is_any_nan(a)) {
+ float64 nan = a;
+ if (float64_is_signaling_nan(a, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float64_silence_nan(a, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan(fpst);
+ }
+ return nan;
+ }
+
+ a = float64_squash_input_denormal(a, fpst);
+
+ val64 = float64_val(a);
+ sbit = 0x8000000000000000ULL & val64;
+ exp = extract64(float64_val(a), 52, 11);
+
+ if (exp == 0) {
+ return make_float64(sbit | (0x7feULL << 52));
+ } else {
+ return make_float64(sbit | (~exp & 0x7ffULL) << 52);
+ }
+}
+
+float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
+{
+ /* Von Neumann rounding is implemented by using round-to-zero
+ * and then setting the LSB of the result if Inexact was raised.
+ */
+ float32 r;
+ float_status *fpst = &env->vfp.fp_status;
+ float_status tstat = *fpst;
+ int exflags;
+
+ set_float_rounding_mode(float_round_to_zero, &tstat);
+ set_float_exception_flags(0, &tstat);
+ r = float64_to_float32(a, &tstat);
+ exflags = get_float_exception_flags(&tstat);
+ if (exflags & float_flag_inexact) {
+ r = make_float32(float32_val(r) | 1);
+ }
+ exflags |= get_float_exception_flags(fpst);
+ set_float_exception_flags(exflags, fpst);
+ return r;
+}
+
+/* 64-bit versions of the CRC helpers. Note that although the operation
+ * (and the prototypes of crc32c() and crc32() mean that only the bottom
+ * 32 bits of the accumulator and result are used, we pass and return
+ * uint64_t for convenience of the generated code. Unlike the 32-bit
+ * instruction set versions, val may genuinely have 64 bits of data in it.
+ * The upper bytes of val (above the number specified by 'bytes') must have
+ * been zeroed out by the caller.
+ */
+uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
+{
+ uint8_t buf[8];
+
+ stq_le_p(buf, val);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
+{
+ uint8_t buf[8];
+
+ stq_le_p(buf, val);
+
+ /* Linux crc32c converts the output to one's complement. */
+ return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}
+
+uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
+ Int128 newv = int128_make128(new_lo, new_hi);
+ Int128 oldv;
+ uintptr_t ra = GETPC();
+ uint64_t o0, o1;
+ bool success;
+ int mem_idx = cpu_mmu_index(env, false);
+ MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
+
+ o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
+ o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
+ oldv = int128_make128(o0, o1);
+
+ success = int128_eq(oldv, cmpv);
+ if (success) {
+ cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
+ cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
+ }
+
+ return !success;
+}
+
+uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ Int128 oldv, cmpv, newv;
+ uintptr_t ra = GETPC();
+ bool success;
+ int mem_idx;
+ MemOpIdx oi;
+
+ assert(HAVE_CMPXCHG128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
+
+ cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+
+ success = int128_eq(oldv, cmpv);
+ return !success;
+}
+
+uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ /*
+ * High and low need to be switched here because this is not actually a
+ * 128bit store but two doublewords stored consecutively
+ */
+ Int128 cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
+ Int128 newv = int128_make128(new_hi, new_lo);
+ Int128 oldv;
+ uintptr_t ra = GETPC();
+ uint64_t o0, o1;
+ bool success;
+ int mem_idx = cpu_mmu_index(env, false);
+ MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
+
+ o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
+ o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
+ oldv = int128_make128(o0, o1);
+
+ success = int128_eq(oldv, cmpv);
+ if (success) {
+ cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
+ cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
+ }
+
+ return !success;
+}
+
+uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ Int128 oldv, cmpv, newv;
+ uintptr_t ra = GETPC();
+ bool success;
+ int mem_idx;
+ MemOpIdx oi;
+
+ assert(HAVE_CMPXCHG128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx);
+
+ /*
+ * High and low need to be switched here because this is not actually a
+ * 128bit store but two doublewords stored consecutively
+ */
+ cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
+ newv = int128_make128(new_hi, new_lo);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+
+ success = int128_eq(oldv, cmpv);
+ return !success;
+}
+
+/* Writes back the old data into Rs. */
+void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ Int128 oldv, cmpv, newv;
+ uintptr_t ra = GETPC();
+ int mem_idx;
+ MemOpIdx oi;
+
+ assert(HAVE_CMPXCHG128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
+
+ cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+
+ env->xregs[rs] = int128_getlo(oldv);
+ env->xregs[rs + 1] = int128_gethi(oldv);
+}
+
+void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
+ uint64_t new_hi, uint64_t new_lo)
+{
+ Int128 oldv, cmpv, newv;
+ uintptr_t ra = GETPC();
+ int mem_idx;
+ MemOpIdx oi;
+
+ assert(HAVE_CMPXCHG128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
+
+ cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+
+ env->xregs[rs + 1] = int128_getlo(oldv);
+ env->xregs[rs] = int128_gethi(oldv);
+}
+
+/*
+ * AdvSIMD half-precision
+ */
+
+#define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
+
+#define ADVSIMD_HALFOP(name) \
+uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return float16_ ## name(a, b, fpst); \
+}
+
+ADVSIMD_HALFOP(add)
+ADVSIMD_HALFOP(sub)
+ADVSIMD_HALFOP(mul)
+ADVSIMD_HALFOP(div)
+ADVSIMD_HALFOP(min)
+ADVSIMD_HALFOP(max)
+ADVSIMD_HALFOP(minnum)
+ADVSIMD_HALFOP(maxnum)
+
+#define ADVSIMD_TWOHALFOP(name) \
+uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
+{ \
+ float16 a1, a2, b1, b2; \
+ uint32_t r1, r2; \
+ float_status *fpst = fpstp; \
+ a1 = extract32(two_a, 0, 16); \
+ a2 = extract32(two_a, 16, 16); \
+ b1 = extract32(two_b, 0, 16); \
+ b2 = extract32(two_b, 16, 16); \
+ r1 = float16_ ## name(a1, b1, fpst); \
+ r2 = float16_ ## name(a2, b2, fpst); \
+ return deposit32(r1, 16, 16, r2); \
+}
+
+ADVSIMD_TWOHALFOP(add)
+ADVSIMD_TWOHALFOP(sub)
+ADVSIMD_TWOHALFOP(mul)
+ADVSIMD_TWOHALFOP(div)
+ADVSIMD_TWOHALFOP(min)
+ADVSIMD_TWOHALFOP(max)
+ADVSIMD_TWOHALFOP(minnum)
+ADVSIMD_TWOHALFOP(maxnum)
+
+/* Data processing - scalar floating-point and advanced SIMD */
+static float16 float16_mulx(float16 a, float16 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float16_squash_input_denormal(a, fpst);
+ b = float16_squash_input_denormal(b, fpst);
+
+ if ((float16_is_zero(a) && float16_is_infinity(b)) ||
+ (float16_is_infinity(a) && float16_is_zero(b))) {
+ /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
+ return make_float16((1U << 14) |
+ ((float16_val(a) ^ float16_val(b)) & (1U << 15)));
+ }
+ return float16_mul(a, b, fpst);
+}
+
+ADVSIMD_HALFOP(mulx)
+ADVSIMD_TWOHALFOP(mulx)
+
+/* fused multiply-accumulate */
+uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c,
+ void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return float16_muladd(a, b, c, 0, fpst);
+}
+
+uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
+ uint32_t two_c, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float16 a1, a2, b1, b2, c1, c2;
+ uint32_t r1, r2;
+ a1 = extract32(two_a, 0, 16);
+ a2 = extract32(two_a, 16, 16);
+ b1 = extract32(two_b, 0, 16);
+ b2 = extract32(two_b, 16, 16);
+ c1 = extract32(two_c, 0, 16);
+ c2 = extract32(two_c, 16, 16);
+ r1 = float16_muladd(a1, b1, c1, 0, fpst);
+ r2 = float16_muladd(a2, b2, c2, 0, fpst);
+ return deposit32(r1, 16, 16, r2);
+}
+
+/*
+ * Floating point comparisons produce an integer result. Softfloat
+ * routines return float_relation types which we convert to the 0/-1
+ * Neon requires.
+ */
+
+#define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
+
+uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ int compare = float16_compare_quiet(a, b, fpst);
+ return ADVSIMD_CMPRES(compare == float_relation_equal);
+}
+
+uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ int compare = float16_compare(a, b, fpst);
+ return ADVSIMD_CMPRES(compare == float_relation_greater ||
+ compare == float_relation_equal);
+}
+
+uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ int compare = float16_compare(a, b, fpst);
+ return ADVSIMD_CMPRES(compare == float_relation_greater);
+}
+
+uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float16 f0 = float16_abs(a);
+ float16 f1 = float16_abs(b);
+ int compare = float16_compare(f0, f1, fpst);
+ return ADVSIMD_CMPRES(compare == float_relation_greater ||
+ compare == float_relation_equal);
+}
+
+uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float16 f0 = float16_abs(a);
+ float16 f1 = float16_abs(b);
+ int compare = float16_compare(f0, f1, fpst);
+ return ADVSIMD_CMPRES(compare == float_relation_greater);
+}
+
+/* round to integral */
+uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status)
+{
+ return float16_round_to_int(x, fp_status);
+}
+
+uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float16 ret;
+
+ ret = float16_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+/*
+ * Half-precision floating point conversion functions
+ *
+ * There are a multitude of conversion functions with various
+ * different rounding modes. This is dealt with by the calling code
+ * setting the mode appropriately before calling the helper.
+ */
+
+uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ /* Invalid if we are passed a NaN */
+ if (float16_is_any_nan(a)) {
+ float_raise(float_flag_invalid, fpst);
+ return 0;
+ }
+ return float16_to_int16(a, fpst);
+}
+
+uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ /* Invalid if we are passed a NaN */
+ if (float16_is_any_nan(a)) {
+ float_raise(float_flag_invalid, fpst);
+ return 0;
+ }
+ return float16_to_uint16(a, fpst);
+}
+
+static int el_from_spsr(uint32_t spsr)
+{
+ /* Return the exception level that this SPSR is requesting a return to,
+ * or -1 if it is invalid (an illegal return)
+ */
+ if (spsr & PSTATE_nRW) {
+ switch (spsr & CPSR_M) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ return 1;
+ case ARM_CPU_MODE_MON:
+ /* Returning to Mon from AArch64 is never possible,
+ * so this is an illegal return.
+ */
+ default:
+ return -1;
+ }
+ } else {
+ if (extract32(spsr, 1, 1)) {
+ /* Return with reserved M[1] bit set */
+ return -1;
+ }
+ if (extract32(spsr, 0, 4) == 1) {
+ /* return to EL0 with M[0] bit set */
+ return -1;
+ }
+ return extract32(spsr, 2, 2);
+ }
+}
+
+static void cpsr_write_from_spsr_elx(CPUARMState *env,
+ uint32_t val)
+{
+ uint32_t mask;
+
+ /* Save SPSR_ELx.SS into PSTATE. */
+ env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
+ val &= ~PSTATE_SS;
+
+ /* Move DIT to the correct location for CPSR */
+ if (val & PSTATE_DIT) {
+ val &= ~PSTATE_DIT;
+ val |= CPSR_DIT;
+ }
+
+ mask = aarch32_cpsr_valid_mask(env->features, \
+ &env_archcpu(env)->isar);
+ cpsr_write(env, val, mask, CPSRWriteRaw);
+}
+
+void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
+{
+ int cur_el = arm_current_el(env);
+ unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
+ uint32_t spsr = env->banked_spsr[spsr_idx];
+ int new_el;
+ bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
+
+ aarch64_save_sp(env, cur_el);
+
+ arm_clear_exclusive(env);
+
+ /* We must squash the PSTATE.SS bit to zero unless both of the
+ * following hold:
+ * 1. debug exceptions are currently disabled
+ * 2. singlestep will be active in the EL we return to
+ * We check 1 here and 2 after we've done the pstate/cpsr write() to
+ * transition to the EL we're going to.
+ */
+ if (arm_generate_debug_exceptions(env)) {
+ spsr &= ~PSTATE_SS;
+ }
+
+ new_el = el_from_spsr(spsr);
+ if (new_el == -1) {
+ goto illegal_return;
+ }
+ if (new_el > cur_el || (new_el == 2 && !arm_is_el2_enabled(env))) {
+ /* Disallow return to an EL which is unimplemented or higher
+ * than the current one.
+ */
+ goto illegal_return;
+ }
+
+ if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
+ /* Return to an EL which is configured for a different register width */
+ goto illegal_return;
+ }
+
+ if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
+ goto illegal_return;
+ }
+
+ qemu_mutex_lock_iothread();
+ arm_call_pre_el_change_hook(env_archcpu(env));
+ qemu_mutex_unlock_iothread();
+
+ if (!return_to_aa64) {
+ env->aarch64 = 0;
+ /* We do a raw CPSR write because aarch64_sync_64_to_32()
+ * will sort the register banks out for us, and we've already
+ * caught all the bad-mode cases in el_from_spsr().
+ */
+ cpsr_write_from_spsr_elx(env, spsr);
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+ aarch64_sync_64_to_32(env);
+
+ if (spsr & CPSR_T) {
+ env->regs[15] = new_pc & ~0x1;
+ } else {
+ env->regs[15] = new_pc & ~0x3;
+ }
+ helper_rebuild_hflags_a32(env, new_el);
+ qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
+ "AArch32 EL%d PC 0x%" PRIx32 "\n",
+ cur_el, new_el, env->regs[15]);
+ } else {
+ int tbii;
+
+ env->aarch64 = 1;
+ spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
+ pstate_write(env, spsr);
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+ aarch64_restore_sp(env, new_el);
+ helper_rebuild_hflags_a64(env, new_el);
+
+ /*
+ * Apply TBI to the exception return address. We had to delay this
+ * until after we selected the new EL, so that we could select the
+ * correct TBI+TBID bits. This is made easier by waiting until after
+ * the hflags rebuild, since we can pull the composite TBII field
+ * from there.
+ */
+ tbii = EX_TBFLAG_A64(env->hflags, TBII);
+ if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
+ /* TBI is enabled. */
+ int core_mmu_idx = cpu_mmu_index(env, false);
+ if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx))) {
+ new_pc = sextract64(new_pc, 0, 56);
+ } else {
+ new_pc = extract64(new_pc, 0, 56);
+ }
+ }
+ env->pc = new_pc;
+
+ qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
+ "AArch64 EL%d PC 0x%" PRIx64 "\n",
+ cur_el, new_el, env->pc);
+ }
+
+ /*
+ * Note that cur_el can never be 0. If new_el is 0, then
+ * el0_a64 is return_to_aa64, else el0_a64 is ignored.
+ */
+ aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
+
+ qemu_mutex_lock_iothread();
+ arm_call_el_change_hook(env_archcpu(env));
+ qemu_mutex_unlock_iothread();
+
+ return;
+
+illegal_return:
+ /* Illegal return events of various kinds have architecturally
+ * mandated behaviour:
+ * restore NZCV and DAIF from SPSR_ELx
+ * set PSTATE.IL
+ * restore PC from ELR_ELx
+ * no change to exception level, execution state or stack pointer
+ */
+ env->pstate |= PSTATE_IL;
+ env->pc = new_pc;
+ spsr &= PSTATE_NZCV | PSTATE_DAIF;
+ spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
+ pstate_write(env, spsr);
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+ helper_rebuild_hflags_a64(env, cur_el);
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
+ "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
+}
+
+/*
+ * Square Root and Reciprocal square root
+ */
+
+uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
+{
+ float_status *s = fpstp;
+
+ return float16_sqrt(a, s);
+}
+
+void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
+{
+ /*
+ * Implement DC ZVA, which zeroes a fixed-length block of memory.
+ * Note that we do not implement the (architecturally mandated)
+ * alignment fault for attempts to use this on Device memory
+ * (which matches the usual QEMU behaviour of not implementing either
+ * alignment faults or any memory attribute handling).
+ */
+ int blocklen = 4 << env_archcpu(env)->dcz_blocksize;
+ uint64_t vaddr = vaddr_in & ~(blocklen - 1);
+ int mmu_idx = cpu_mmu_index(env, false);
+ void *mem;
+
+ /*
+ * Trapless lookup. In addition to actual invalid page, may
+ * return NULL for I/O, watchpoints, clean pages, etc.
+ */
+ mem = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
+
+#ifndef CONFIG_USER_ONLY
+ if (unlikely(!mem)) {
+ uintptr_t ra = GETPC();
+
+ /*
+ * Trap if accessing an invalid page. DC_ZVA requires that we supply
+ * the original pointer for an invalid page. But watchpoints require
+ * that we probe the actual space. So do both.
+ */
+ (void) probe_write(env, vaddr_in, 1, mmu_idx, ra);
+ mem = probe_write(env, vaddr, blocklen, mmu_idx, ra);
+
+ if (unlikely(!mem)) {
+ /*
+ * The only remaining reason for mem == NULL is I/O.
+ * Just do a series of byte writes as the architecture demands.
+ */
+ for (int i = 0; i < blocklen; i++) {
+ cpu_stb_mmuidx_ra(env, vaddr + i, 0, mmu_idx, ra);
+ }
+ return;
+ }
+ }
+#endif
+
+ memset(mem, 0, blocklen);
+}
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
new file mode 100644
index 000000000..7b706571b
--- /dev/null
+++ b/target/arm/helper-a64.h
@@ -0,0 +1,120 @@
+/*
+ * AArch64 specific helper definitions
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
+DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_2(msr_i_spsel, void, env, i32)
+DEF_HELPER_2(msr_i_daifset, void, env, i32)
+DEF_HELPER_2(msr_i_daifclear, void, env, i32)
+DEF_HELPER_3(vfp_cmph_a64, i64, f16, f16, ptr)
+DEF_HELPER_3(vfp_cmpeh_a64, i64, f16, f16, ptr)
+DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
+DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
+DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
+DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)
+DEF_HELPER_FLAGS_4(simd_tblx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
+DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
+DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
+DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
+DEF_HELPER_FLAGS_3(recpsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
+DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
+DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_3(rsqrtsf_f16, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
+DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
+DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(frecpx_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
+DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
+DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG,
+ i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG,
+ i64, env, i64, i64, i64)
+DEF_HELPER_5(casp_le_parallel, void, env, i32, i64, i64, i64)
+DEF_HELPER_5(casp_be_parallel, void, env, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_3(advsimd_maxh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
+DEF_HELPER_FLAGS_3(advsimd_minh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
+DEF_HELPER_FLAGS_3(advsimd_maxnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
+DEF_HELPER_FLAGS_3(advsimd_minnumh, TCG_CALL_NO_RWG, f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_addh, f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_subh, f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_mulh, f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_divh, f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_ceq_f16, i32, f16, f16, ptr)
+DEF_HELPER_3(advsimd_cge_f16, i32, f16, f16, ptr)
+DEF_HELPER_3(advsimd_cgt_f16, i32, f16, f16, ptr)
+DEF_HELPER_3(advsimd_acge_f16, i32, f16, f16, ptr)
+DEF_HELPER_3(advsimd_acgt_f16, i32, f16, f16, ptr)
+DEF_HELPER_3(advsimd_mulxh, f16, f16, f16, ptr)
+DEF_HELPER_4(advsimd_muladdh, f16, f16, f16, f16, ptr)
+DEF_HELPER_3(advsimd_add2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_sub2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_mul2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_div2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_max2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_min2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_maxnum2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_minnum2h, i32, i32, i32, ptr)
+DEF_HELPER_3(advsimd_mulx2h, i32, i32, i32, ptr)
+DEF_HELPER_4(advsimd_muladd2h, i32, i32, i32, i32, ptr)
+DEF_HELPER_2(advsimd_rinth_exact, f16, f16, ptr)
+DEF_HELPER_2(advsimd_rinth, f16, f16, ptr)
+DEF_HELPER_2(advsimd_f16tosinth, i32, f16, ptr)
+DEF_HELPER_2(advsimd_f16touinth, i32, f16, ptr)
+DEF_HELPER_2(sqrt_f16, f16, f16, ptr)
+
+DEF_HELPER_2(exception_return, void, env, i64)
+DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64)
+
+DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(pacib, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(pacda, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(pacdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(pacga, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(autia, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(autib, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(autda, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(autdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(mte_check, TCG_CALL_NO_WG, i64, env, i32, i64)
+DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64)
+DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
+DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(stg, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(stg_parallel, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(stg_stub, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_3(st2g, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(st2g_parallel, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(st2g_stub, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(ldgm, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(stgm, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(stzgm_tags, TCG_CALL_NO_WG, void, env, i64, i64)
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
new file mode 100644
index 000000000..76bd25006
--- /dev/null
+++ b/target/arm/helper-mve.h
@@ -0,0 +1,890 @@
+/*
+ * M-profile MVE specific helper definitions
+ *
+ * Copyright (c) 2021 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+DEF_HELPER_FLAGS_3(mve_vldrb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrw, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vstrb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vstrh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vstrw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vldrb_sh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrb_sw, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrb_uh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrb_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrh_sw, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vldrh_uw, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vstrb_h, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vstrb_w, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrb_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrw_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrd_sg_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vstrb_sg_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrb_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrb_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrw_sg_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrd_sg_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrh_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrw_sg_os_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrd_sg_os_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vldrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vldrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrw_sg_wb_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vstrd_sg_wb_ud, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vld20b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld20h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld20w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vld21b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld21h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld21w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vld40b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld40h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld40w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vld41b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld41h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld41w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vld42b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld42h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld42w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vld43b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld43h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vld43w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vst20b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst20h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst20w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vst21b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst21h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst21w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vst40b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst40h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst40w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vst41b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst41h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst41w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vst42b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst42h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst42w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vst43b, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst43h, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_vst43w, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
+DEF_HELPER_FLAGS_4(mve_viduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
+DEF_HELPER_FLAGS_4(mve_vidupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
+
+DEF_HELPER_FLAGS_5(mve_viwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mve_viwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mve_viwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
+
+DEF_HELPER_FLAGS_5(mve_vdwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mve_vdwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mve_vdwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vclzb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vclzh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vclzw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vrev16b, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vrev32b, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vrev32h, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vrev64b, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vrev64h, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vrev64w, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vmvn, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vabsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfabss, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vnegb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfnegs, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vqabsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqabsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqabsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vqnegb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqnegh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqnegw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vmaxab, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vmaxah, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vmaxaw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vminab, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vminah, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vminaw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vcvt_rm_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_rm_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_rm_ss, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_rm_us, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcvtb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcvtt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcvtb_hs, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcvtt_hs, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vmovnbb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vmovnbh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vmovntb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vmovnth, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vqmovunbb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovunbh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovuntb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovunth, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vqmovnbsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovnbsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovntsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovntsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vqmovnbub, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovnbuh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovntub, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vqmovntuh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vand, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vpsel, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_1(mve_vpnot, TCG_CALL_NO_WG, void, env)
+
+DEF_HELPER_FLAGS_2(mve_vctp, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vsubb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vsubh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vsubw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmulb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmulhsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulhsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vrmulhsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrmulhsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrmulhsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrmulhub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrmulhuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrmulhuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmaxsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vminsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vabdsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vabdsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vabdsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vabdub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vabduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vabduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vhaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vhsubsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhsubsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmullbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullbsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullbub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullbuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmulltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulltsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulltub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmulltuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmullpbh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullpth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullpbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmullptw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmulhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmulhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmulhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqsubsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqsubsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqsubsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqsubub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqsubuh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqsubuw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrshlsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrshlsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrshlsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrshlub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrshluh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrshluw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqdmladhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmladhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmladhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmladhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmladhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmladhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmladhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqrdmlsdhxw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vqdmullbh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmullbw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmullth, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vqdmulltw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vrhaddsb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrhaddsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrhaddsw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vadc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vadci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vsbc, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vsbci, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vcadd90b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcadd90w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vhcadd90b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhcadd90w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vhcadd270b, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vhcadd270w, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfadds, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfsubh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfsubs, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfmuls, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfabdh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfabds, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmaxnmh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxnms, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vminnmh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminnms, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vmaxnmah, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vmaxnmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vminnmah, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vminnmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfcadd90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfcadd90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfcadd270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfcadd270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfmah, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vfmsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vfmss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vcmul0h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul0s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul180h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul180s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmul270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vcmla0h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla0s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla180h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla180s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(mve_vcmla270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vsub_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vsub_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vsub_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmul_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmul_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhadds_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhaddu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhsubs_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vhsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqadds_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqaddu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqsubs_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqsubu_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmulh_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmulh_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vbrsrb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vbrsrh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vbrsrw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullb_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmullt_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmlab, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlah, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlaw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmlasb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlash, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlasw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmlahb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlahh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlahw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrdmlashb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlashh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrdmlashw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmlaldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlaldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlaldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlaldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vmlaldavuh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlaldavuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vmlsldavsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlsldavsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlsldavxsh, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vmlsldavxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vmladavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vmladavsxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmladavsxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vmlsdavxw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vmaxvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxavb, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxavh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxavw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vminvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminavb, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminavh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminavw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vmaxnmvh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxnmvs, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vminnmvh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminnmvs, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vmaxnmavh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vmaxnmavs, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vminnmavh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vminnmavs, TCG_CALL_NO_WG, i32, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vabavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
+DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
+
+DEF_HELPER_FLAGS_4(mve_vshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqshlui_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshlui_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshlui_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrshli_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshli_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshli_sw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrshli_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshli_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshli_uw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vshllbsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshllbsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshllbub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshllbuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vrshrnbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshrnbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshrntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrshrnth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrnb_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_ub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrnt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vqrshrunbb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_sshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_ushll, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_sqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_uqshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_sqrshrl, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_uqrshll, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_sqrshrl48, TCG_CALL_NO_RWG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(mve_uqrshll48, TCG_CALL_NO_RWG, i64, env, i64, i32)
+
+DEF_HELPER_FLAGS_3(mve_uqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_sqshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_uqrshl, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(mve_sqrshr, TCG_CALL_NO_RWG, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmpeqb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpeqh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpeqw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpneb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpneh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpnew, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpcsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpcsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpcsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmphib, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmphih, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmphiw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpgeb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpgeh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpgew, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpltb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmplth, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpltw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpgtb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpgth, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpgtw, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpleb, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmpleh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vcmplew, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpeq_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmpne_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpne_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpne_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpcs_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmphi_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmphi_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmphi_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmpge_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpge_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpge_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmplt_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmplt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmplt_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmpgt_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vcmple_scalarb, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmple_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vcmple_scalarw, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpeqh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfcmpeqs, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpneh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfcmpnes, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpgeh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfcmpges, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vfcmplth, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfcmplts, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpgth, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfcmpgts, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpleh, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vfcmples, TCG_CALL_NO_WG, void, env, ptr, ptr)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpeq_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vfcmpeq_scalars, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpne_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vfcmpne_scalars, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpge_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vfcmpge_scalars, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vfcmplt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vfcmplt_scalars, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vfcmpgt_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vfcmpgt_scalars, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vfcmple_scalarh, TCG_CALL_NO_WG, void, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vfcmple_scalars, TCG_CALL_NO_WG, void, env, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfadd_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfsub_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfsub_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfmul_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfmul_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfma_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfma_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vfmas_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vfmas_scalars, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vcvt_sh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_uh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_hs, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_hu, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_sf, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_uf, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_fs, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vcvt_fu, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vrint_rm_h, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vrint_rm_s, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(mve_vrintx_h, TCG_CALL_NO_WG, void, env, ptr, ptr)
+DEF_HELPER_FLAGS_3(mve_vrintx_s, TCG_CALL_NO_WG, void, env, ptr, ptr)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
new file mode 100644
index 000000000..dc629f851
--- /dev/null
+++ b/target/arm/helper-sve.h
@@ -0,0 +1,2800 @@
+/*
+ * AArch64 SVE specific helper definitions
+ *
+ * Copyright (c) 2018 Linaro, Ltd
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+DEF_HELPER_FLAGS_2(sve_predtest1, TCG_CALL_NO_WG, i32, i64, i64)
+DEF_HELPER_FLAGS_3(sve_predtest, TCG_CALL_NO_WG, i32, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_pfirst, TCG_CALL_NO_WG, i32, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_pnext, TCG_CALL_NO_WG, i32, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_and_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_and_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_and_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_and_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_eor_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_eor_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_eor_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_eor_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_orr_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_orr_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_orr_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_orr_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_bic_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_bic_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_bic_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_bic_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_add_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_add_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_add_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_add_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_sub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_smax_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smax_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smax_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smax_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_umax_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umax_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umax_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umax_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_smin_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smin_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smin_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smin_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_umin_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umin_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umin_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umin_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_sabd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sabd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sabd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sabd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_uabd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_uabd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_uabd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_uabd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_mul_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_mul_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_mul_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_mul_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_smulh_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smulh_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smulh_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_smulh_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_umulh_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umulh_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umulh_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_umulh_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sadalp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uadalp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqrshl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_srhadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_urhadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_shsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uhsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sdiv_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_udiv_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_udiv_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_asr_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_asr_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_asr_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_asr_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_lsr_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsr_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsr_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsr_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_lsl_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsl_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsl_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsl_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_addp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uqsub_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_suqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_usqadd_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_asr_zpzw_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_asr_zpzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_asr_zpzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_lsr_zpzw_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsr_zpzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsr_zpzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_lsl_zpzw_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsl_zpzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_lsl_zpzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_orv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_orv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_orv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_orv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_eorv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_eorv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_eorv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_eorv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_andv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_andv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_andv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_andv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_saddv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_saddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_saddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_uaddv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uaddv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uaddv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uaddv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_smaxv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_smaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_smaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_smaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_umaxv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_umaxv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_umaxv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_umaxv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_sminv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_sminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_sminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_sminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_uminv_b, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uminv_h, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uminv_s, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uminv_d, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_movz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_movz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_movz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_movz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_asr_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asr_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asr_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asr_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_lsr_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsr_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsr_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsr_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_lsl_zpzi_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsl_zpzi_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsl_zpzi_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsl_zpzi_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_asrd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asrd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asrd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asrd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cls_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cls_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_clz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_clz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_clz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_clz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cnt_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cnt_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cnt_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cnt_zpz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cnot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cnot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cnot_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cnot_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_fabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_fneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_not_zpz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_not_zpz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_not_zpz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_not_zpz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_sxtb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_sxtb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_sxtb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_uxtb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uxtb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uxtb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_sxth_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_sxth_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_uxth_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uxth_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_sxtw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uxtw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_abs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_abs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_abs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_abs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_neg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_neg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_neg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_neg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_mla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_mls_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mls_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mls_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mls_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_index_b, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_4(sve_index_h, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_4(sve_index_s, TCG_CALL_NO_RWG, void, ptr, i32, i32, i32)
+DEF_HELPER_FLAGS_4(sve_index_d, TCG_CALL_NO_RWG, void, ptr, i64, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_asr_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asr_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_asr_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_lsr_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsr_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsr_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_lsl_zzw_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsl_zzw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_lsl_zzw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_adr_p32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_adr_p64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_adr_s32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_adr_u32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_fexpa_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_fexpa_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_fexpa_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_ftssel_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ftssel_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_ftssel_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_sqaddi_b, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32)
+DEF_HELPER_FLAGS_4(sve_sqaddi_h, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32)
+DEF_HELPER_FLAGS_4(sve_sqaddi_s, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32)
+DEF_HELPER_FLAGS_4(sve_sqaddi_d, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32)
+
+DEF_HELPER_FLAGS_4(sve_uqaddi_b, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32)
+DEF_HELPER_FLAGS_4(sve_uqaddi_h, TCG_CALL_NO_RWG, void, ptr, ptr, s32, i32)
+DEF_HELPER_FLAGS_4(sve_uqaddi_s, TCG_CALL_NO_RWG, void, ptr, ptr, s64, i32)
+DEF_HELPER_FLAGS_4(sve_uqaddi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_uqsubi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_5(sve_cpy_m_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_5(sve_cpy_m_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_5(sve_cpy_m_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_5(sve_cpy_m_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_cpy_z_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_cpy_z_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_cpy_z_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_cpy_z_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_ext, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_insr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_insr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_insr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_insr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_3(sve_rev_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_rev_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_rev_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_rev_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_tbl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_tbx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_tbx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_tbx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_tbx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_uunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_uunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_zip_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_rev_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve_punpk_p, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_zip_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_zip_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_zip_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uzp_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_trn_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_trn_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_compact_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_compact_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_2(sve_last_active_element, TCG_CALL_NO_RWG, s32, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_revb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_revb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_revb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_revh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_rbit_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqabs_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqabs_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqneg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqneg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqneg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqneg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_splice, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzz_d, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzz_d, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzz_d, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzz_d, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzz_d, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzz_d, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmple_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmple_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_cmpeq_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpne_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpge_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpgt_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphi_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmphs_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmple_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmplt_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmplo_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_cmpls_ppzw_s, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmple_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_b, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmple_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_h, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmple_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_s, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_cmpeq_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpne_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpgt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpge_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplt_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmple_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphs_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmphi_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmplo_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_cmpls_ppzi_d, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_and_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_bic_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_eor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_sel_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_orr_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_orn_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_nor_pppp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_nand_pppp, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_brkpa, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_brkpb, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_brkpas, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_brkpbs, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_brka_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brkb_z, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brka_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brkb_m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_brkas_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brkbs_z, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brkas_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brkbs_m, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_brkn, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve_whilel, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+DEF_HELPER_FLAGS_3(sve_whileg, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+
+DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_subri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_smaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_smaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_smaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_smaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_smini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_smini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_smini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_smini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_umaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_umaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_umaxi_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_umaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(sve_umini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_umini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_umini_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(sve_umini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_5(gvec_recps_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_recps_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_recps_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_rsqrts_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_rsqrts_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_rsqrts_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_faddv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_faddv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_faddv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_fmaxnmv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fmaxnmv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fmaxnmv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_fminnmv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fminnmv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fminnmv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_fmaxv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fmaxv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fmaxv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_fminv_h, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fminv_s, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve_fminv_d, TCG_CALL_NO_RWG,
+ i64, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG,
+ i64, i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG,
+ i64, i64, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fadda_d, TCG_CALL_NO_RWG,
+ i64, i64, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcmge0_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmge0_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmge0_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcmgt0_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmgt0_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmgt0_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcmlt0_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmlt0_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmlt0_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcmle0_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmle0_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmle0_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcmeq0_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmeq0_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmeq0_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcmne0_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmne0_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcmne0_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmul_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmul_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmul_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fdiv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fdiv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fdiv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmin_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmin_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmax_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fminnum_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmaxnum_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmaxnum_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmaxnum_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fabd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fabd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fabd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fscalbn_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fscalbn_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fscalbn_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmulx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmulx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmulx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fadds_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fadds_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fadds_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fsubs_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fsubs_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fsubs_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmuls_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmuls_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmuls_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fsubrs_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fsubrs_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fsubrs_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmaxnms_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmaxnms_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmaxnms_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fminnms_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fminnms_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fminnms_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmaxs_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmaxs_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmaxs_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fmins_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmins_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fmins_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i64, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcvt_sh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvt_dh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvt_hs, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvt_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvt_hd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvt_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_bfcvt, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcvtzs_hh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzs_hs, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzs_ss, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzs_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzs_hd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzs_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzs_dd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fcvtzu_hh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzu_hs, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzu_ss, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzu_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzu_hd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzu_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fcvtzu_dd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_frint_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_frint_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_frint_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_frintx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_frintx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_frintx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_frecpx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_frecpx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_frecpx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_fsqrt_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fsqrt_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_fsqrt_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_scvt_hh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_scvt_sh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_scvt_dh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_scvt_ss, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_scvt_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_scvt_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_scvt_dd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_ucvt_hh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ucvt_sh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ucvt_dh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ucvt_ss, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ucvt_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ucvt_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ucvt_dd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fcmge_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmge_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmge_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fcmgt_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmgt_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmgt_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fcmeq_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmeq_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmeq_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fcmne_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmne_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmne_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fcmuo_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmuo_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcmuo_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_facge_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_facge_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_facge_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_facgt_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_facgt_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_facgt_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_fcadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_7(sve_fcmla_zpzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_saddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_ssubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uaddl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_usubl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uabdl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uabdl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uabdl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_saddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_saddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_ssubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_ssubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uaddw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uaddw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_usubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_usubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1hds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1sds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1sds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r_mte, TCG_CALL_NO_WG,
+ void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1bh_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1bs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1bd_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1hs_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1hd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1hs_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1hd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st1sd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1sd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbsu_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbss_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbsu_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldss_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbss_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhss_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbdu_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbds_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbdu_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbds_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldbdu_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_lddd_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldbds_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldhds_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldsds_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbss_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbsu_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffss_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbss_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbds_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbdu_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbds_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_ldffbdu_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffbds_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbs_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbs_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sths_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stss_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbd_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_le_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_be_zsu_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbd_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_le_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_be_zss_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_6(sve_stbd_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_sthd_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stsd_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_zzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_pmull_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sshll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sshll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sshll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_ushll_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_ushll_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_ushll_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_eoril_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_eoril_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_eoril_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_eoril_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_bext_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bext_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bext_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bext_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_bdep_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bdep_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bdep_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bdep_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_bgrp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bgrp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bgrp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_bgrp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_cadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_cadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_cadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_cadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqcadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqcadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqcadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sabal_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sabal_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sabal_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_uabal_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uabal_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_uabal_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_adcl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_adcl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqxtnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqxtunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_shrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_shrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_shrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_rshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_rshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_rshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrunt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_sqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sve2_uqrshrnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_addhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_addhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_addhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_raddhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_raddhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_raddhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_subhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_subhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_subhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_rsubhnt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_match_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_match_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_b, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_nmatch_ppzz_h, TCG_CALL_NO_RWG,
+ i32, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_histcnt_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_histcnt_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_histseg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_xar_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_xar_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_xar_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_faddp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxnmp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminnmp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmaxp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fminp_zpzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_eor3, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bcax, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bsl1n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_bsl2n, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_nbsl, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlal_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(fmmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(fmmla_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlal_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqdmlsl_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmull_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_smlal_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlal_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_smlsl_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlal_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_umlsl_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_smull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_smull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_umull_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cmla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cmla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cdot_zzzz_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_cdot_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_cdot_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve_bfcvtnt, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_fcvtlt_hs, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_fcvtlt_sd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(flogb_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(flogb_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(flogb_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqshlu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
diff --git a/target/arm/helper.c b/target/arm/helper.c
new file mode 100644
index 000000000..9b317899a
--- /dev/null
+++ b/target/arm/helper.c
@@ -0,0 +1,13572 @@
+/*
+ * ARM generic helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "target/arm/idau.h"
+#include "trace.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "qemu/bitops.h"
+#include "qemu/crc32c.h"
+#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
+#include <zlib.h> /* For crc32 */
+#include "hw/irq.h"
+#include "semihosting/semihost.h"
+#include "sysemu/cpus.h"
+#include "sysemu/cpu-timers.h"
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+#include "qemu/range.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qemu/guest-random.h"
+#ifdef CONFIG_TCG
+#include "arm_ldst.h"
+#include "exec/cpu_ldst.h"
+#include "semihosting/common-semi.h"
+#endif
+
+#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
+
+#ifndef CONFIG_USER_ONLY
+
+static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ bool s1_is_el0,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
+ target_ulong *page_size_ptr,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
+#endif
+
+static void switch_mode(CPUARMState *env, int mode);
+static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
+
+static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ assert(ri->fieldoffset);
+ if (cpreg_field_is_64bit(ri)) {
+ return CPREG_FIELD64(env, ri);
+ } else {
+ return CPREG_FIELD32(env, ri);
+ }
+}
+
+static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ assert(ri->fieldoffset);
+ if (cpreg_field_is_64bit(ri)) {
+ CPREG_FIELD64(env, ri) = value;
+ } else {
+ CPREG_FIELD32(env, ri) = value;
+ }
+}
+
+static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return (char *)env + ri->fieldoffset;
+}
+
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Raw read of a coprocessor register (as needed for migration, etc). */
+ if (ri->type & ARM_CP_CONST) {
+ return ri->resetvalue;
+ } else if (ri->raw_readfn) {
+ return ri->raw_readfn(env, ri);
+ } else if (ri->readfn) {
+ return ri->readfn(env, ri);
+ } else {
+ return raw_read(env, ri);
+ }
+}
+
+static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t v)
+{
+ /* Raw write of a coprocessor register (as needed for migration, etc).
+ * Note that constant registers are treated as write-ignored; the
+ * caller should check for success by whether a readback gives the
+ * value written.
+ */
+ if (ri->type & ARM_CP_CONST) {
+ return;
+ } else if (ri->raw_writefn) {
+ ri->raw_writefn(env, ri, v);
+ } else if (ri->writefn) {
+ ri->writefn(env, ri, v);
+ } else {
+ raw_write(env, ri, v);
+ }
+}
+
+static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
+{
+ /* Return true if the regdef would cause an assertion if you called
+ * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
+ * program bug for it not to have the NO_RAW flag).
+ * NB that returning false here doesn't necessarily mean that calling
+ * read/write_raw_cp_reg() is safe, because we can't distinguish "has
+ * read/write access functions which are safe for raw use" from "has
+ * read/write access functions which have side effects but has forgotten
+ * to provide raw access functions".
+ * The tests here line up with the conditions in read/write_raw_cp_reg()
+ * and assertions in raw_read()/raw_write().
+ */
+ if ((ri->type & ARM_CP_CONST) ||
+ ri->fieldoffset ||
+ ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
+ return false;
+ }
+ return true;
+}
+
+bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
+{
+ /* Write the coprocessor state from cpu->env to the (index,value) list. */
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
+ const ARMCPRegInfo *ri;
+ uint64_t newval;
+
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+ if (!ri) {
+ ok = false;
+ continue;
+ }
+ if (ri->type & ARM_CP_NO_RAW) {
+ continue;
+ }
+
+ newval = read_raw_cp_reg(&cpu->env, ri);
+ if (kvm_sync) {
+ /*
+ * Only sync if the previous list->cpustate sync succeeded.
+ * Rather than tracking the success/failure state for every
+ * item in the list, we just recheck "does the raw write we must
+ * have made in write_list_to_cpustate() read back OK" here.
+ */
+ uint64_t oldval = cpu->cpreg_values[i];
+
+ if (oldval == newval) {
+ continue;
+ }
+
+ write_raw_cp_reg(&cpu->env, ri, oldval);
+ if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
+ continue;
+ }
+
+ write_raw_cp_reg(&cpu->env, ri, newval);
+ }
+ cpu->cpreg_values[i] = newval;
+ }
+ return ok;
+}
+
+bool write_list_to_cpustate(ARMCPU *cpu)
+{
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
+ uint64_t v = cpu->cpreg_values[i];
+ const ARMCPRegInfo *ri;
+
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+ if (!ri) {
+ ok = false;
+ continue;
+ }
+ if (ri->type & ARM_CP_NO_RAW) {
+ continue;
+ }
+ /* Write value and confirm it reads back as written
+ * (to catch read-only registers and partially read-only
+ * registers where the incoming migration value doesn't match)
+ */
+ write_raw_cp_reg(&cpu->env, ri, v);
+ if (read_raw_cp_reg(&cpu->env, ri) != v) {
+ ok = false;
+ }
+ }
+ return ok;
+}
+
+static void add_cpreg_to_list(gpointer key, gpointer opaque)
+{
+ ARMCPU *cpu = opaque;
+ uint64_t regidx;
+ const ARMCPRegInfo *ri;
+
+ regidx = *(uint32_t *)key;
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
+ cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
+ /* The value array need not be initialized at this point */
+ cpu->cpreg_array_len++;
+ }
+}
+
+static void count_cpreg(gpointer key, gpointer opaque)
+{
+ ARMCPU *cpu = opaque;
+ uint64_t regidx;
+ const ARMCPRegInfo *ri;
+
+ regidx = *(uint32_t *)key;
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
+ cpu->cpreg_array_len++;
+ }
+}
+
+static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
+{
+ uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
+ uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
+
+ if (aidx > bidx) {
+ return 1;
+ }
+ if (aidx < bidx) {
+ return -1;
+ }
+ return 0;
+}
+
+void init_cpreg_list(ARMCPU *cpu)
+{
+ /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
+ * Note that we require cpreg_tuples[] to be sorted by key ID.
+ */
+ GList *keys;
+ int arraylen;
+
+ keys = g_hash_table_get_keys(cpu->cp_regs);
+ keys = g_list_sort(keys, cpreg_key_compare);
+
+ cpu->cpreg_array_len = 0;
+
+ g_list_foreach(keys, count_cpreg, cpu);
+
+ arraylen = cpu->cpreg_array_len;
+ cpu->cpreg_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_values = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
+ cpu->cpreg_array_len = 0;
+
+ g_list_foreach(keys, add_cpreg_to_list, cpu);
+
+ assert(cpu->cpreg_array_len == arraylen);
+
+ g_list_free(keys);
+}
+
+/*
+ * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
+ */
+static CPAccessResult access_el3_aa32ns(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (!is_a64(env) && arm_current_el(env) == 3 &&
+ arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Some secure-only AArch32 registers trap to EL3 if used from
+ * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
+ * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
+ * We assume that the .access field is set to PL1_RW.
+ */
+static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3) {
+ return CP_ACCESS_OK;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ if (env->cp15.scr_el3 & SCR_EEL2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_TRAP_EL3;
+ }
+ /* This will be EL1 NS and EL2 NS, which just UNDEF */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
+static uint64_t arm_mdcr_el2_eff(CPUARMState *env)
+{
+ return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
+}
+
+/* Check for traps to "powerdown debug" registers, which are controlled
+ * by MDCR.TDOSA
+ */
+static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+ bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
+ (arm_hcr_el2_eff(env) & HCR_TGE);
+
+ if (el < 2 && mdcr_el2_tdosa) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to "debug ROM" registers, which are controlled
+ * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
+ */
+static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+ bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
+ (arm_hcr_el2_eff(env) & HCR_TGE);
+
+ if (el < 2 && mdcr_el2_tdra) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to general debug registers, which are controlled
+ * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
+ */
+static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+ bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
+ (arm_hcr_el2_eff(env) & HCR_TGE);
+
+ if (el < 2 && mdcr_el2_tda) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to performance monitor registers, which are controlled
+ * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
+ */
+static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+ if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
+static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1) {
+ uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
+ if (arm_hcr_el2_eff(env) & trap) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps from EL1 due to HCR_EL2.TSW. */
+static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps from EL1 due to HCR_EL2.TACR. */
+static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps from EL1 due to HCR_EL2.TTLB. */
+static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ raw_write(env, ri, value);
+ tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
+}
+
+static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (raw_read(env, ri) != value) {
+ /* Unlike real hardware the qemu TLB uses virtual addresses,
+ * not modified virtual addresses, so this causes a TLB flush.
+ */
+ tlb_flush(CPU(cpu));
+ raw_write(env, ri, value);
+ }
+}
+
+static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
+ && !extended_addresses_enabled(env)) {
+ /* For VMSA (when not using the LPAE long descriptor page table
+ * format) this register includes the ASID, so do a TLB flush.
+ * For PMSA it is purely a process ID and no action is needed.
+ */
+ tlb_flush(CPU(cpu));
+ }
+ raw_write(env, ri, value);
+}
+
+/* IS variants of TLB operations must affect all cores */
+static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+}
+
+static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+}
+
+/*
+ * Non-IS variants of TLB operations are upgraded to
+ * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
+ * force broadcast of these operations.
+ */
+static bool tlb_force_broadcast(CPUARMState *env)
+{
+ return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
+}
+
+static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate all (TLBIALL) */
+ CPUState *cs = env_cpu(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_all_cpus_synced(cs);
+ } else {
+ tlb_flush(cs);
+ }
+}
+
+static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
+ CPUState *cs = env_cpu(env);
+
+ value &= TARGET_PAGE_MASK;
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_all_cpus_synced(cs, value);
+ } else {
+ tlb_flush_page(cs, value);
+ }
+}
+
+static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by ASID (TLBIASID) */
+ CPUState *cs = env_cpu(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_all_cpus_synced(cs);
+ } else {
+ tlb_flush(cs);
+ }
+}
+
+static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
+ CPUState *cs = env_cpu(env);
+
+ value &= TARGET_PAGE_MASK;
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_all_cpus_synced(cs, value);
+ } else {
+ tlb_flush_page(cs, value);
+ }
+}
+
+static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx(cs,
+ ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0);
+}
+
+static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs,
+ ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0);
+}
+
+
+static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
+}
+
+static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
+}
+
+static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
+}
+
+static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
+
+ tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ ARMMMUIdxBit_E2);
+}
+
+static const ARMCPRegInfo cp_reginfo[] = {
+ /* Define the secure and non-secure FCSE identifier CP registers
+ * separately because there is no secure bank in V8 (no _EL3). This allows
+ * the secure register to be properly reset and migrated. There is also no
+ * v8 EL1 version of the register so the non-secure instance stands alone.
+ */
+ { .name = "FCSEIDR",
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
+ .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
+ .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
+ { .name = "FCSEIDR_S",
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
+ .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
+ .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
+ /* Define the secure and non-secure context identifier CP registers
+ * separately because there is no secure bank in V8 (no _EL3). This allows
+ * the secure register to be properly reset and migrated. In the
+ * non-secure case, the 32-bit register will have reset and migration
+ * disabled during registration as it is handled by the 64-bit instance.
+ */
+ { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .secure = ARM_CP_SECSTATE_NS,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
+ .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
+ { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .secure = ARM_CP_SECSTATE_S,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
+ .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v8_cp_reginfo[] = {
+ /* NB: Some of these registers exist in v8 but with more precise
+ * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
+ */
+ /* MMU Domain access control / MPU write buffer control */
+ { .name = "DACR",
+ .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
+ offsetoflow32(CPUARMState, cp15.dacr_ns) } },
+ /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
+ * For v6 and v5, these mappings are overly broad.
+ */
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ /* Cache maintenance ops; some of this space may be overridden later. */
+ { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
+ .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
+ .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v6_cp_reginfo[] = {
+ /* Not all pre-v6 cores implemented this WFI, so this is slightly
+ * over-broad.
+ */
+ { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_WFI },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v7_cp_reginfo[] = {
+ /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
+ * is UNPREDICTABLE; we choose to NOP as most implementations do).
+ */
+ { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_WFI },
+ /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
+ * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
+ * OMAPCP will override this space.
+ */
+ { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
+ .resetvalue = 0 },
+ { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
+ .resetvalue = 0 },
+ /* v6 doesn't have the cache ID registers but Linux reads them anyway */
+ { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = 0 },
+ /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
+ * implementing it as RAZ means the "debug architecture version" bits
+ * will read as a reserved value, which should cause Linux to not try
+ * to use the debug hardware.
+ */
+ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MMU TLB control. Note that the wildcarding means we cover not just
+ * the unified TLB ops but also the dside/iside/inner-shareable variants.
+ */
+ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
+ .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
+ .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint32_t mask = 0;
+
+ /* In ARMv8 most bits of CPACR_EL1 are RES0. */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
+ * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
+ * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
+ */
+ if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
+ /* VFP coprocessor: cp10 & cp11 [23:20] */
+ mask |= (1 << 31) | (1 << 30) | (0xf << 20);
+
+ if (!arm_feature(env, ARM_FEATURE_NEON)) {
+ /* ASEDIS [31] bit is RAO/WI */
+ value |= (1 << 31);
+ }
+
+ /* VFPv3 and upwards with NEON implement 32 double precision
+ * registers (D0-D31).
+ */
+ if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
+ /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
+ value |= (1 << 30);
+ }
+ }
+ value &= mask;
+ }
+
+ /*
+ * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
+ * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
+ value &= ~(0xf << 20);
+ value |= env->cp15.cpacr_el1 & (0xf << 20);
+ }
+
+ env->cp15.cpacr_el1 = value;
+}
+
+static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /*
+ * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
+ * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
+ */
+ uint64_t value = env->cp15.cpacr_el1;
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
+ value &= ~(0xf << 20);
+ }
+ return value;
+}
+
+
+static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Call cpacr_write() so that we reset with the correct RAO bits set
+ * for our CPU features.
+ */
+ cpacr_write(env, ri, 0);
+}
+
+static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* Check if CPACR accesses are to be trapped to EL2 */
+ if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
+ (env->cp15.cptr_el[2] & CPTR_TCPAC)) {
+ return CP_ACCESS_TRAP_EL2;
+ /* Check if CPACR accesses are to be trapped to EL3 */
+ } else if (arm_current_el(env) < 3 &&
+ (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Check if CPTR accesses are set to trap to EL3 */
+ if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo v6_cp_reginfo[] = {
+ /* prefetch by MVA in v6, NOP in v7 */
+ { .name = "MVA_prefetch",
+ .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ /* We need to break the TB after ISB to execute self-modifying code
+ * correctly and also to take any pending interrupts immediately.
+ * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
+ */
+ { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
+ { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .type = ARM_CP_NOP },
+ { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
+ .access = PL0_W, .type = ARM_CP_NOP },
+ { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
+ offsetof(CPUARMState, cp15.ifar_ns) },
+ .resetvalue = 0, },
+ /* Watchpoint Fault Address Register : should actually only be present
+ * for 1136, 1176, 11MPCore.
+ */
+ { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
+ { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
+ .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
+ .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
+ REGINFO_SENTINEL
+};
+
+typedef struct pm_event {
+ uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
+ /* If the event is supported on this CPU (used to generate PMCEID[01]) */
+ bool (*supported)(CPUARMState *);
+ /*
+ * Retrieve the current count of the underlying event. The programmed
+ * counters hold a difference from the return value from this function
+ */
+ uint64_t (*get_count)(CPUARMState *);
+ /*
+ * Return how many nanoseconds it will take (at a minimum) for count events
+ * to occur. A negative value indicates the counter will never overflow, or
+ * that the counter has otherwise arranged for the overflow bit to be set
+ * and the PMU interrupt to be raised on overflow.
+ */
+ int64_t (*ns_per_count)(uint64_t);
+} pm_event;
+
+static bool event_always_supported(CPUARMState *env)
+{
+ return true;
+}
+
+static uint64_t swinc_get_count(CPUARMState *env)
+{
+ /*
+ * SW_INCR events are written directly to the pmevcntr's by writes to
+ * PMSWINC, so there is no underlying count maintained by the PMU itself
+ */
+ return 0;
+}
+
+static int64_t swinc_ns_per(uint64_t ignored)
+{
+ return -1;
+}
+
+/*
+ * Return the underlying cycle count for the PMU cycle counters. If we're in
+ * usermode, simply return 0.
+ */
+static uint64_t cycles_get_count(CPUARMState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+#else
+ return cpu_get_host_ticks();
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static int64_t cycles_ns_per(uint64_t cycles)
+{
+ return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
+}
+
+static bool instructions_supported(CPUARMState *env)
+{
+ return icount_enabled() == 1; /* Precise instruction counting */
+}
+
+static uint64_t instructions_get_count(CPUARMState *env)
+{
+ return (uint64_t)icount_get_raw();
+}
+
+static int64_t instructions_ns_per(uint64_t icount)
+{
+ return icount_to_ns((int64_t)icount);
+}
+#endif
+
+static bool pmu_8_1_events_supported(CPUARMState *env)
+{
+ /* For events which are supported in any v8.1 PMU */
+ return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
+}
+
+static bool pmu_8_4_events_supported(CPUARMState *env)
+{
+ /* For events which are supported in any v8.1 PMU */
+ return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
+}
+
+static uint64_t zero_event_get_count(CPUARMState *env)
+{
+ /* For events which on QEMU never fire, so their count is always zero */
+ return 0;
+}
+
+static int64_t zero_event_ns_per(uint64_t cycles)
+{
+ /* An event which never fires can never overflow */
+ return -1;
+}
+
+static const pm_event pm_events[] = {
+ { .number = 0x000, /* SW_INCR */
+ .supported = event_always_supported,
+ .get_count = swinc_get_count,
+ .ns_per_count = swinc_ns_per,
+ },
+#ifndef CONFIG_USER_ONLY
+ { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
+ .supported = instructions_supported,
+ .get_count = instructions_get_count,
+ .ns_per_count = instructions_ns_per,
+ },
+ { .number = 0x011, /* CPU_CYCLES, Cycle */
+ .supported = event_always_supported,
+ .get_count = cycles_get_count,
+ .ns_per_count = cycles_ns_per,
+ },
+#endif
+ { .number = 0x023, /* STALL_FRONTEND */
+ .supported = pmu_8_1_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+ { .number = 0x024, /* STALL_BACKEND */
+ .supported = pmu_8_1_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+ { .number = 0x03c, /* STALL */
+ .supported = pmu_8_4_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+};
+
+/*
+ * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
+ * events (i.e. the statistical profiling extension), this implementation
+ * should first be updated to something sparse instead of the current
+ * supported_event_map[] array.
+ */
+#define MAX_EVENT_ID 0x3c
+#define UNSUPPORTED_EVENT UINT16_MAX
+static uint16_t supported_event_map[MAX_EVENT_ID + 1];
+
+/*
+ * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
+ * of ARM event numbers to indices in our pm_events array.
+ *
+ * Note: Events in the 0x40XX range are not currently supported.
+ */
+void pmu_init(ARMCPU *cpu)
+{
+ unsigned int i;
+
+ /*
+ * Empty supported_event_map and cpu->pmceid[01] before adding supported
+ * events to them
+ */
+ for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
+ supported_event_map[i] = UNSUPPORTED_EVENT;
+ }
+ cpu->pmceid0 = 0;
+ cpu->pmceid1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
+ const pm_event *cnt = &pm_events[i];
+ assert(cnt->number <= MAX_EVENT_ID);
+ /* We do not currently support events in the 0x40xx range */
+ assert(cnt->number <= 0x3f);
+
+ if (cnt->supported(&cpu->env)) {
+ supported_event_map[cnt->number] = i;
+ uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
+ if (cnt->number & 0x20) {
+ cpu->pmceid1 |= event_mask;
+ } else {
+ cpu->pmceid0 |= event_mask;
+ }
+ }
+ }
+}
+
+/*
+ * Check at runtime whether a PMU event is supported for the current machine
+ */
+static bool event_supported(uint16_t number)
+{
+ if (number > MAX_EVENT_ID) {
+ return false;
+ }
+ return supported_event_map[number] != UNSUPPORTED_EVENT;
+}
+
+static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Performance monitor registers user accessibility is controlled
+ * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
+ * trapping to EL2 or EL3 for other accesses.
+ */
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+ if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
+ return CP_ACCESS_TRAP;
+ }
+ if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* ER: event counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
+ && isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_swinc(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* SW: software increment write trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
+ && !isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_selr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* ER: event counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* CR: cycle counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
+ && isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+ uint64_t filter;
+ bool e, p, u, nsk, nsu, nsh, m;
+ bool enabled, prohibited, filtered;
+ bool secure = arm_is_secure(env);
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+ uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
+
+ if (!arm_feature(env, ARM_FEATURE_PMU)) {
+ return false;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) ||
+ (counter < hpmn || counter == 31)) {
+ e = env->cp15.c9_pmcr & PMCRE;
+ } else {
+ e = mdcr_el2 & MDCR_HPME;
+ }
+ enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
+
+ if (!secure) {
+ if (el == 2 && (counter < hpmn || counter == 31)) {
+ prohibited = mdcr_el2 & MDCR_HPMD;
+ } else {
+ prohibited = false;
+ }
+ } else {
+ prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
+ !(env->cp15.mdcr_el3 & MDCR_SPME);
+ }
+
+ if (prohibited && counter == 31) {
+ prohibited = env->cp15.c9_pmcr & PMCRDP;
+ }
+
+ if (counter == 31) {
+ filter = env->cp15.pmccfiltr_el0;
+ } else {
+ filter = env->cp15.c14_pmevtyper[counter];
+ }
+
+ p = filter & PMXEVTYPER_P;
+ u = filter & PMXEVTYPER_U;
+ nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
+ nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
+ nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
+ m = arm_el_is_aa64(env, 1) &&
+ arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
+
+ if (el == 0) {
+ filtered = secure ? u : u != nsu;
+ } else if (el == 1) {
+ filtered = secure ? p : p != nsk;
+ } else if (el == 2) {
+ filtered = !nsh;
+ } else { /* EL3 */
+ filtered = m != p;
+ }
+
+ if (counter != 31) {
+ /*
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
+ * support
+ */
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+ if (!event_supported(event)) {
+ return false;
+ }
+ }
+
+ return enabled && !prohibited && !filtered;
+}
+
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+/*
+ * Ensure c15_ccnt is the guest-visible count so that operations such as
+ * enabling/disabling the counter or filtering, modifying the count itself,
+ * etc. can be done logically. This is essentially a no-op if the counter is
+ * not enabled at the time of the call.
+ */
+static void pmccntr_op_start(CPUARMState *env)
+{
+ uint64_t cycles = cycles_get_count(env);
+
+ if (pmu_counter_enabled(env, 31)) {
+ uint64_t eff_cycles = cycles;
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ eff_cycles /= 64;
+ }
+
+ uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
+
+ uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
+ 1ull << 63 : 1ull << 31;
+ if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1 << 31);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c15_ccnt = new_pmccntr;
+ }
+ env->cp15.c15_ccnt_delta = cycles;
+}
+
+/*
+ * If PMCCNTR is enabled, recalculate the delta between the clock and the
+ * guest-visible count. A call to pmccntr_op_finish should follow every call to
+ * pmccntr_op_start.
+ */
+static void pmccntr_op_finish(CPUARMState *env)
+{
+ if (pmu_counter_enabled(env, 31)) {
+#ifndef CONFIG_USER_ONLY
+ /* Calculate when the counter will next overflow */
+ uint64_t remaining_cycles = -env->cp15.c15_ccnt;
+ if (!(env->cp15.c9_pmcr & PMCRLC)) {
+ remaining_cycles = (uint32_t)remaining_cycles;
+ }
+ int64_t overflow_in = cycles_ns_per(remaining_cycles);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ overflow_in;
+ ARMCPU *cpu = env_archcpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+#endif
+
+ uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ prev_cycles /= 64;
+ }
+ env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
+ }
+}
+
+static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
+{
+
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint64_t count = 0;
+ if (event_supported(event)) {
+ uint16_t event_idx = supported_event_map[event];
+ count = pm_events[event_idx].get_count(env);
+ }
+
+ if (pmu_counter_enabled(env, counter)) {
+ uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+
+ if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << counter);
+ pmu_update_irq(env);
+ }
+ env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
+ }
+ env->cp15.c14_pmevcntr_delta[counter] = count;
+}
+
+static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
+{
+ if (pmu_counter_enabled(env, counter)) {
+#ifndef CONFIG_USER_ONLY
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint16_t event_idx = supported_event_map[event];
+ uint64_t delta = UINT32_MAX -
+ (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
+ int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ overflow_in;
+ ARMCPU *cpu = env_archcpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+#endif
+
+ env->cp15.c14_pmevcntr_delta[counter] -=
+ env->cp15.c14_pmevcntr[counter];
+ }
+}
+
+void pmu_op_start(CPUARMState *env)
+{
+ unsigned int i;
+ pmccntr_op_start(env);
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ pmevcntr_op_start(env, i);
+ }
+}
+
+void pmu_op_finish(CPUARMState *env)
+{
+ unsigned int i;
+ pmccntr_op_finish(env);
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ pmevcntr_op_finish(env, i);
+ }
+}
+
+void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
+{
+ pmu_op_start(&cpu->env);
+}
+
+void pmu_post_el_change(ARMCPU *cpu, void *ignored)
+{
+ pmu_op_finish(&cpu->env);
+}
+
+void arm_pmu_timer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ /*
+ * Update all the counter values based on the current underlying counts,
+ * triggering interrupts to be raised, if necessary. pmu_op_finish() also
+ * has the effect of setting the cpu->pmu_timer to the next earliest time a
+ * counter may expire.
+ */
+ pmu_op_start(&cpu->env);
+ pmu_op_finish(&cpu->env);
+}
+
+static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+
+ if (value & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ if (value & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
+ }
+
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
+ env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
+
+ pmu_op_finish(env);
+}
+
+static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ /* Increment a counter's count iff: */
+ if ((value & (1 << i)) && /* counter's bit is set */
+ /* counter is enabled and not filtered */
+ pmu_counter_enabled(env, i) &&
+ /* counter is SW_INCR */
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+ pmevcntr_op_start(env, i);
+
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+
+ pmevcntr_op_finish(env, i);
+ }
+ }
+}
+
+static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t ret;
+ pmccntr_op_start(env);
+ ret = env->cp15.c15_ccnt;
+ pmccntr_op_finish(env);
+ return ret;
+}
+
+static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
+ * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
+ * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
+ * accessed.
+ */
+ env->cp15.c9_pmselr = value & 0x1f;
+}
+
+static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ env->cp15.c15_ccnt = value;
+ pmccntr_op_finish(env);
+}
+
+static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t cur_val = pmccntr_read(env, NULL);
+
+ pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
+}
+
+static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
+ pmccntr_op_finish(env);
+}
+
+static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ /* M is not accessible from AArch32 */
+ env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
+ (value & PMCCFILTR);
+ pmccntr_op_finish(env);
+}
+
+static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* M is not visible in AArch32 */
+ return env->cp15.pmccfiltr_el0 & PMCCFILTR;
+}
+
+static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmcnten |= value;
+}
+
+static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmcnten &= ~value;
+}
+
+static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmovsr &= ~value;
+ pmu_update_irq(env);
+}
+
+static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmovsr |= value;
+ pmu_update_irq(env);
+}
+
+static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value, const uint8_t counter)
+{
+ if (counter == 31) {
+ pmccfiltr_write(env, ri, value);
+ } else if (counter < pmu_num_counters(env)) {
+ pmevcntr_op_start(env, counter);
+
+ /*
+ * If this counter's event type is changing, store the current
+ * underlying count for the new type in c14_pmevcntr_delta[counter] so
+ * pmevcntr_op_finish has the correct baseline when it converts back to
+ * a delta.
+ */
+ uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
+ PMXEVTYPER_EVTCOUNT;
+ uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
+ if (old_event != new_event) {
+ uint64_t count = 0;
+ if (event_supported(new_event)) {
+ uint16_t event_idx = supported_event_map[new_event];
+ count = pm_events[event_idx].get_count(env);
+ }
+ env->cp15.c14_pmevcntr_delta[counter] = count;
+ }
+
+ env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
+ pmevcntr_op_finish(env, counter);
+ }
+ /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
+ * PMSELR value is equal to or greater than the number of implemented
+ * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
+ */
+}
+
+static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ const uint8_t counter)
+{
+ if (counter == 31) {
+ return env->cp15.pmccfiltr_el0;
+ } else if (counter < pmu_num_counters(env)) {
+ return env->cp15.c14_pmevtyper[counter];
+ } else {
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
+ * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
+ */
+ return 0;
+ }
+}
+
+static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ pmevtyper_write(env, ri, value, counter);
+}
+
+static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ env->cp15.c14_pmevtyper[counter] = value;
+
+ /*
+ * pmevtyper_rawwrite is called between a pair of pmu_op_start and
+ * pmu_op_finish calls when loading saved state for a migration. Because
+ * we're potentially updating the type of event here, the value written to
+ * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
+ * different counter type. Therefore, we need to set this value to the
+ * current count for the counter type we're writing so that pmu_op_finish
+ * has the correct count for its calculation.
+ */
+ uint16_t event = value & PMXEVTYPER_EVTCOUNT;
+ if (event_supported(event)) {
+ uint16_t event_idx = supported_event_map[event];
+ env->cp15.c14_pmevcntr_delta[counter] =
+ pm_events[event_idx].get_count(env);
+ }
+}
+
+static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ return pmevtyper_read(env, ri, counter);
+}
+
+static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value, uint8_t counter)
+{
+ if (counter < pmu_num_counters(env)) {
+ pmevcntr_op_start(env, counter);
+ env->cp15.c14_pmevcntr[counter] = value;
+ pmevcntr_op_finish(env, counter);
+ }
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+ * are CONSTRAINED UNPREDICTABLE.
+ */
+}
+
+static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint8_t counter)
+{
+ if (counter < pmu_num_counters(env)) {
+ uint64_t ret;
+ pmevcntr_op_start(env, counter);
+ ret = env->cp15.c14_pmevcntr[counter];
+ pmevcntr_op_finish(env, counter);
+ return ret;
+ } else {
+ /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+ * are CONSTRAINED UNPREDICTABLE. */
+ return 0;
+ }
+}
+
+static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ return pmevcntr_read(env, ri, counter);
+}
+
+static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ assert(counter < pmu_num_counters(env));
+ env->cp15.c14_pmevcntr[counter] = value;
+ pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ assert(counter < pmu_num_counters(env));
+ return env->cp15.c14_pmevcntr[counter];
+}
+
+static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ env->cp15.c9_pmuserenr = value & 0xf;
+ } else {
+ env->cp15.c9_pmuserenr = value & 1;
+ }
+}
+
+static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* We have no event counters so only the C bit can be changed */
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pminten |= value;
+ pmu_update_irq(env);
+}
+
+static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pminten &= ~value;
+ pmu_update_irq(env);
+}
+
+static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that even though the AArch64 view of this register has bits
+ * [10:0] all RES0 we can only mask the bottom 5, to comply with the
+ * architectural requirements for bits which are RES0 only in some
+ * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
+ * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
+ */
+ raw_write(env, ri, value & ~0x1FULL);
+}
+
+static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ /* Begin with base v8.0 state. */
+ uint32_t valid_mask = 0x3fff;
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (ri->state == ARM_CP_STATE_AA64) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
+ !cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
+ }
+ valid_mask &= ~SCR_NET;
+
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= SCR_TLOR;
+ }
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ valid_mask |= SCR_API | SCR_APK;
+ }
+ if (cpu_isar_feature(aa64_sel2, cpu)) {
+ valid_mask |= SCR_EEL2;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= SCR_ATA;
+ }
+ } else {
+ valid_mask &= ~(SCR_RW | SCR_ST);
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ valid_mask &= ~SCR_HCE;
+
+ /* On ARMv7, SMD (or SCD as it is called in v7) is only
+ * supported if EL2 exists. The bit is UNK/SBZP when
+ * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
+ * when EL2 is unavailable.
+ * On ARMv8, this bit is always available.
+ */
+ if (arm_feature(env, ARM_FEATURE_V7) &&
+ !arm_feature(env, ARM_FEATURE_V8)) {
+ valid_mask &= ~SCR_SMD;
+ }
+ }
+
+ /* Clear all-context RES0 bits. */
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /*
+ * scr_write will set the RES1 bits on an AArch64-only CPU.
+ * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
+ */
+ scr_write(env, ri, 0);
+}
+
+static CPAccessResult access_aa64_tid2(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
+ * bank
+ */
+ uint32_t index = A32_BANKED_REG_GET(env, csselr,
+ ri->secure & ARM_CP_SECSTATE_S);
+
+ return cpu->ccsidr[index];
+}
+
+static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ raw_write(env, ri, value & 0xf);
+}
+
+static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ CPUState *cs = env_cpu(env);
+ bool el1 = arm_current_el(env) == 1;
+ uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
+ uint64_t ret = 0;
+
+ if (hcr_el2 & HCR_IMO) {
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ ret |= CPSR_I;
+ }
+ } else {
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ ret |= CPSR_I;
+ }
+ }
+
+ if (hcr_el2 & HCR_FMO) {
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
+ ret |= CPSR_F;
+ }
+ } else {
+ if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
+ ret |= CPSR_F;
+ }
+ }
+
+ /* External aborts are not possible in QEMU so A bit is always clear */
+ return ret;
+}
+
+static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ return access_aa64_tid1(env, ri, isread);
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo v7_cp_reginfo[] = {
+ /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
+ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ /* Performance monitors are implementation defined in v7,
+ * but with an ARM recommended set of registers, which we
+ * follow.
+ *
+ * Performance registers fall into three categories:
+ * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
+ * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
+ * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
+ * For the cases controlled by PMUSERENR we must set .access to PL0_RW
+ * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
+ */
+ { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenset_write,
+ .accessfn = pmreg_access,
+ .raw_writefn = raw_write },
+ { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
+ .writefn = pmcntenset_write, .raw_writefn = raw_write },
+ { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .accessfn = pmreg_access,
+ .writefn = pmcntenclr_write,
+ .type = ARM_CP_ALIAS },
+ { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenclr_write },
+ { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
+ .access = PL0_RW, .type = ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+ .accessfn = pmreg_access,
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .writefn = pmswinc_write },
+ { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .writefn = pmswinc_write },
+ { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
+ .accessfn = pmreg_access_selr, .writefn = pmselr_write,
+ .raw_writefn = raw_write},
+ { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
+ .access = PL0_RW, .accessfn = pmreg_access_selr,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
+ .writefn = pmselr_write, .raw_writefn = raw_write, },
+ { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .readfn = pmccntr_read, .writefn = pmccntr_write32,
+ .accessfn = pmreg_access_ccntr },
+ { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access_ccntr,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
+ .readfn = pmccntr_read, .writefn = pmccntr_write,
+ .raw_readfn = raw_read, .raw_writefn = raw_write, },
+ { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .resetvalue = 0, },
+ { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write, .raw_writefn = raw_write,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
+ .resetvalue = 0, },
+ { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access,
+ .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+ { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access,
+ .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+ { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access_xevcntr,
+ .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+ { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access_xevcntr,
+ .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+ { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
+ .resetvalue = 0,
+ .writefn = pmintenset_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenset_write, .raw_writefn = raw_write,
+ .resetvalue = 0x0 },
+ { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write, },
+ { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write },
+ { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
+ .access = PL1_R,
+ .accessfn = access_aa64_tid2,
+ .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
+ { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
+ .access = PL1_RW,
+ .accessfn = access_aa64_tid2,
+ .writefn = csselr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
+ offsetof(CPUARMState, cp15.csselr_ns) } },
+ /* Auxiliary ID register: this actually has an IMPDEF value but for now
+ * just RAZ for all cores:
+ */
+ { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid1,
+ .resetvalue = 0 },
+ /* Auxiliary fault status registers: these also are IMPDEF, and we
+ * choose to RAZ/WI for all cores.
+ */
+ { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MAIR can just read-as-written because we don't implement caches
+ * and so don't need to care about memory attributes.
+ */
+ { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
+ .resetvalue = 0 },
+ { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
+ .resetvalue = 0 },
+ /* For non-long-descriptor page tables these are PRRR and NMRR;
+ * regardless they still act as reads-as-written for QEMU.
+ */
+ /* MAIR0/1 are defined separately from their 64-bit counterpart which
+ * allows them to assign the correct fieldoffset based on the endianness
+ * handled in the field definitions.
+ */
+ { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
+ offsetof(CPUARMState, cp15.mair0_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
+ offsetof(CPUARMState, cp15.mair1_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
+ /* 32 bit ITLB invalidates */
+ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_write },
+ { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_write },
+ /* 32 bit DTLB invalidates */
+ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_write },
+ { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_write },
+ /* 32 bit TLB invalidates */
+ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_write },
+ { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_write },
+ { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimvaa_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v7mp_cp_reginfo[] = {
+ /* 32 bit TLB invalidates, Inner Shareable */
+ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiall_is_write },
+ { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_is_write },
+ { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbiasid_is_write },
+ { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimvaa_is_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
+ /* PMOVSSET is not implemented in v7 before v7ve */
+ { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsset_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsset_write,
+ .raw_writefn = raw_write },
+ REGINFO_SENTINEL
+};
+
+static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= 1;
+ env->teecr = value;
+}
+
+static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /*
+ * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
+ * at all, so we don't need to check whether we're v8A.
+ */
+ if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
+ (env->cp15.hstr_el2 & HSTR_TTEE)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 0 && (env->teecr & 1)) {
+ return CP_ACCESS_TRAP;
+ }
+ return teecr_access(env, ri, isread);
+}
+
+static const ARMCPRegInfo t2ee_cp_reginfo[] = {
+ { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
+ .resetvalue = 0,
+ .writefn = teecr_write, .accessfn = teecr_access },
+ { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
+ .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
+ .accessfn = teehbr_access, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v6k_cp_reginfo[] = {
+ { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
+ .access = PL0_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
+ { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
+ offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
+ .access = PL0_R|PL1_W,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
+ .resetvalue = 0},
+ { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL0_R|PL1_W,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
+ offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
+ { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
+ .access = PL1_RW,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
+ offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+#ifndef CONFIG_USER_ONLY
+
+static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
+ * Writable only at the highest implemented exception level.
+ */
+ int el = arm_current_el(env);
+ uint64_t hcr;
+ uint32_t cntkctl;
+
+ switch (el) {
+ case 0:
+ hcr = arm_hcr_el2_eff(env);
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ cntkctl = env->cp15.cnthctl_el2;
+ } else {
+ cntkctl = env->cp15.c14_cntkctl;
+ }
+ if (!extract32(cntkctl, 0, 2)) {
+ return CP_ACCESS_TRAP;
+ }
+ break;
+ case 1:
+ if (!isread && ri->state == ARM_CP_STATE_AA32 &&
+ arm_is_secure_below_el3(env)) {
+ /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ break;
+ case 2:
+ case 3:
+ break;
+ }
+
+ if (!isread && el < arm_highest_el(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
+ bool isread)
+{
+ unsigned int cur_el = arm_current_el(env);
+ bool has_el2 = arm_is_el2_enabled(env);
+ uint64_t hcr = arm_hcr_el2_eff(env);
+
+ switch (cur_el) {
+ case 0:
+ /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
+ ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
+ }
+
+ /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
+ if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
+ return CP_ACCESS_TRAP;
+ }
+
+ /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
+ if (hcr & HCR_E2H) {
+ if (timeridx == GTIMER_PHYS &&
+ !extract32(env->cp15.cnthctl_el2, 10, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ } else {
+ /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
+ if (has_el2 && timeridx == GTIMER_PHYS &&
+ !extract32(env->cp15.cnthctl_el2, 1, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ break;
+
+ case 1:
+ /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
+ if (has_el2 && timeridx == GTIMER_PHYS &&
+ (hcr & HCR_E2H
+ ? !extract32(env->cp15.cnthctl_el2, 10, 1)
+ : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ break;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
+ bool isread)
+{
+ unsigned int cur_el = arm_current_el(env);
+ bool has_el2 = arm_is_el2_enabled(env);
+ uint64_t hcr = arm_hcr_el2_eff(env);
+
+ switch (cur_el) {
+ case 0:
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
+ return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
+ ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
+ }
+
+ /*
+ * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
+ * EL0 if EL0[PV]TEN is zero.
+ */
+ if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
+ return CP_ACCESS_TRAP;
+ }
+ /* fall through */
+
+ case 1:
+ if (has_el2 && timeridx == GTIMER_PHYS) {
+ if (hcr & HCR_E2H) {
+ /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
+ if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ } else {
+ /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
+ if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ }
+ break;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult gt_pct_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_counter_access(env, GTIMER_PHYS, isread);
+}
+
+static CPAccessResult gt_vct_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_counter_access(env, GTIMER_VIRT, isread);
+}
+
+static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_timer_access(env, GTIMER_PHYS, isread);
+}
+
+static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_timer_access(env, GTIMER_VIRT, isread);
+}
+
+static CPAccessResult gt_stimer_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* The AArch64 register view of the secure physical timer is
+ * always accessible from EL3, and configurably accessible from
+ * Secure EL1.
+ */
+ switch (arm_current_el(env)) {
+ case 1:
+ if (!arm_is_secure(env)) {
+ return CP_ACCESS_TRAP;
+ }
+ if (!(env->cp15.scr_el3 & SCR_ST)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+ case 0:
+ case 2:
+ return CP_ACCESS_TRAP;
+ case 3:
+ return CP_ACCESS_OK;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint64_t gt_get_countervalue(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
+}
+
+static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
+{
+ ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
+
+ if (gt->ctl & 1) {
+ /* Timer enabled: calculate and set current ISTATUS, irq, and
+ * reset timer to when ISTATUS next has to change
+ */
+ uint64_t offset = timeridx == GTIMER_VIRT ?
+ cpu->env.cp15.cntvoff_el2 : 0;
+ uint64_t count = gt_get_countervalue(&cpu->env);
+ /* Note that this must be unsigned 64 bit arithmetic: */
+ int istatus = count - offset >= gt->cval;
+ uint64_t nexttick;
+ int irqstate;
+
+ gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
+
+ irqstate = (istatus && !(gt->ctl & 2));
+ qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
+
+ if (istatus) {
+ /* Next transition is when count rolls back over to zero */
+ nexttick = UINT64_MAX;
+ } else {
+ /* Next transition is when we hit cval */
+ nexttick = gt->cval + offset;
+ }
+ /* Note that the desired next expiry time might be beyond the
+ * signed-64-bit range of a QEMUTimer -- in this case we just
+ * set the timer for as far in the future as possible. When the
+ * timer expires we will reset the timer for any remaining period.
+ */
+ if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
+ timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
+ } else {
+ timer_mod(cpu->gt_timer[timeridx], nexttick);
+ }
+ trace_arm_gt_recalc(timeridx, irqstate, nexttick);
+ } else {
+ /* Timer disabled: ISTATUS and timer output always clear */
+ gt->ctl &= ~4;
+ qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
+ timer_del(cpu->gt_timer[timeridx]);
+ trace_arm_gt_recalc_disabled(timeridx);
+ }
+}
+
+static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ timer_del(cpu->gt_timer[timeridx]);
+}
+
+static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_get_countervalue(env);
+}
+
+static uint64_t gt_virt_cnt_offset(CPUARMState *env)
+{
+ uint64_t hcr;
+
+ switch (arm_current_el(env)) {
+ case 2:
+ hcr = arm_hcr_el2_eff(env);
+ if (hcr & HCR_E2H) {
+ return 0;
+ }
+ break;
+ case 0:
+ hcr = arm_hcr_el2_eff(env);
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ return 0;
+ }
+ break;
+ }
+
+ return env->cp15.cntvoff_el2;
+}
+
+static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
+}
+
+static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ trace_arm_gt_cval_write(timeridx, value);
+ env->cp15.c14_timer[timeridx].cval = value;
+ gt_recalc_timer(env_archcpu(env), timeridx);
+}
+
+static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
+{
+ uint64_t offset = 0;
+
+ switch (timeridx) {
+ case GTIMER_VIRT:
+ case GTIMER_HYPVIRT:
+ offset = gt_virt_cnt_offset(env);
+ break;
+ }
+
+ return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
+ (gt_get_countervalue(env) - offset));
+}
+
+static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ uint64_t offset = 0;
+
+ switch (timeridx) {
+ case GTIMER_VIRT:
+ case GTIMER_HYPVIRT:
+ offset = gt_virt_cnt_offset(env);
+ break;
+ }
+
+ trace_arm_gt_tval_write(timeridx, value);
+ env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
+ sextract64(value, 0, 32);
+ gt_recalc_timer(env_archcpu(env), timeridx);
+}
+
+static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
+
+ trace_arm_gt_ctl_write(timeridx, value);
+ env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
+ if ((oldval ^ value) & 1) {
+ /* Enable toggled */
+ gt_recalc_timer(cpu, timeridx);
+ } else if ((oldval ^ value) & 2) {
+ /* IMASK toggled: don't need to recalculate,
+ * just set the interrupt line based on ISTATUS
+ */
+ int irqstate = (oldval & 4) && !(value & 2);
+
+ trace_arm_gt_imask_toggle(timeridx, irqstate);
+ qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
+ }
+}
+
+static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_PHYS);
+}
+
+static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_PHYS, value);
+}
+
+static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_PHYS);
+}
+
+static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_PHYS, value);
+}
+
+static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_PHYS, value);
+}
+
+static int gt_phys_redir_timeridx(CPUARMState *env)
+{
+ switch (arm_mmu_idx(env)) {
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ return GTIMER_HYP;
+ default:
+ return GTIMER_PHYS;
+ }
+}
+
+static int gt_virt_redir_timeridx(CPUARMState *env)
+{
+ switch (arm_mmu_idx(env)) {
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ return GTIMER_HYPVIRT;
+ default:
+ return GTIMER_VIRT;
+ }
+}
+
+static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
+ const ARMCPRegInfo *ri)
+{
+ int timeridx = gt_phys_redir_timeridx(env);
+ return env->cp15.c14_timer[timeridx].cval;
+}
+
+static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int timeridx = gt_phys_redir_timeridx(env);
+ gt_cval_write(env, ri, timeridx, value);
+}
+
+static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
+ const ARMCPRegInfo *ri)
+{
+ int timeridx = gt_phys_redir_timeridx(env);
+ return gt_tval_read(env, ri, timeridx);
+}
+
+static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int timeridx = gt_phys_redir_timeridx(env);
+ gt_tval_write(env, ri, timeridx, value);
+}
+
+static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
+ const ARMCPRegInfo *ri)
+{
+ int timeridx = gt_phys_redir_timeridx(env);
+ return env->cp15.c14_timer[timeridx].ctl;
+}
+
+static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int timeridx = gt_phys_redir_timeridx(env);
+ gt_ctl_write(env, ri, timeridx, value);
+}
+
+static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_VIRT);
+}
+
+static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_VIRT, value);
+}
+
+static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_VIRT);
+}
+
+static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_VIRT, value);
+}
+
+static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_VIRT, value);
+}
+
+static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ trace_arm_gt_cntvoff_write(value);
+ raw_write(env, ri, value);
+ gt_recalc_timer(cpu, GTIMER_VIRT);
+}
+
+static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
+ const ARMCPRegInfo *ri)
+{
+ int timeridx = gt_virt_redir_timeridx(env);
+ return env->cp15.c14_timer[timeridx].cval;
+}
+
+static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int timeridx = gt_virt_redir_timeridx(env);
+ gt_cval_write(env, ri, timeridx, value);
+}
+
+static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
+ const ARMCPRegInfo *ri)
+{
+ int timeridx = gt_virt_redir_timeridx(env);
+ return gt_tval_read(env, ri, timeridx);
+}
+
+static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int timeridx = gt_virt_redir_timeridx(env);
+ gt_tval_write(env, ri, timeridx, value);
+}
+
+static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
+ const ARMCPRegInfo *ri)
+{
+ int timeridx = gt_virt_redir_timeridx(env);
+ return env->cp15.c14_timer[timeridx].ctl;
+}
+
+static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int timeridx = gt_virt_redir_timeridx(env);
+ gt_ctl_write(env, ri, timeridx, value);
+}
+
+static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_HYP);
+}
+
+static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_HYP, value);
+}
+
+static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_HYP);
+}
+
+static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_HYP, value);
+}
+
+static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_HYP, value);
+}
+
+static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_SEC);
+}
+
+static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_SEC, value);
+}
+
+static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_SEC);
+}
+
+static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_SEC, value);
+}
+
+static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_SEC, value);
+}
+
+static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_HYPVIRT);
+}
+
+static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
+}
+
+static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_HYPVIRT);
+}
+
+static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
+}
+
+static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
+}
+
+void arm_gt_ptimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_PHYS);
+}
+
+void arm_gt_vtimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_VIRT);
+}
+
+void arm_gt_htimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_HYP);
+}
+
+void arm_gt_stimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_SEC);
+}
+
+void arm_gt_hvtimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_HYPVIRT);
+}
+
+static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
+}
+
+static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
+ /* Note that CNTFRQ is purely reads-as-written for the benefit
+ * of software; writing it doesn't actually change the timer frequency.
+ * Our reset value matches the fixed frequency we implement the timer at.
+ */
+ { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
+ },
+ { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
+ .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
+ .resetfn = arm_gt_cntfrq_reset,
+ },
+ /* overall control: mostly access permissions */
+ { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
+ .resetvalue = 0,
+ },
+ /* per-timer control */
+ { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .secure = ARM_CP_SECSTATE_NS,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_PHYS].ctl),
+ .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
+ .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CTL_S",
+ .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .secure = ARM_CP_SECSTATE_S,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_SEC].ctl),
+ .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
+ .resetvalue = 0,
+ .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
+ .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
+ .accessfn = gt_vtimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_VIRT].ctl),
+ .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
+ .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_vtimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
+ .resetvalue = 0,
+ .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
+ .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
+ },
+ /* TimerValue views: a 32 bit downcounting view of the underlying state */
+ { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .secure = ARM_CP_SECSTATE_NS,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_ptimer_access,
+ .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
+ },
+ { .name = "CNTP_TVAL_S",
+ .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .secure = ARM_CP_SECSTATE_S,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_ptimer_access,
+ .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
+ },
+ { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
+ .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
+ },
+ { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_vtimer_access,
+ .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
+ },
+ { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
+ .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
+ .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
+ },
+ /* The counter itself */
+ { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_pct_access,
+ .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_pct_access, .readfn = gt_cnt_read,
+ },
+ { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_vct_access,
+ .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
+ },
+ /* Comparison value, indicating when the timer goes off */
+ { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
+ .secure = ARM_CP_SECSTATE_NS,
+ .access = PL0_RW,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
+ .accessfn = gt_ptimer_access,
+ .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
+ .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
+ .secure = ARM_CP_SECSTATE_S,
+ .access = PL0_RW,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
+ .accessfn = gt_ptimer_access,
+ .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
+ .access = PL0_RW,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
+ .resetvalue = 0, .accessfn = gt_ptimer_access,
+ .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
+ .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
+ .access = PL0_RW,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
+ .accessfn = gt_vtimer_access,
+ .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
+ .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
+ .access = PL0_RW,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
+ .resetvalue = 0, .accessfn = gt_vtimer_access,
+ .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
+ .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
+ },
+ /* Secure timer -- this is actually restricted to only EL3
+ * and configurably Secure-EL1 via the accessfn.
+ */
+ { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .readfn = gt_sec_tval_read,
+ .writefn = gt_sec_tval_write,
+ .resetfn = gt_sec_timer_reset,
+ },
+ { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
+ .resetvalue = 0,
+ .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
+ .type = ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
+ .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+#else
+
+/* In user-mode most of the generic timer registers are inaccessible
+ * however modern kernels (4.12+) allow access to cntvct_el0
+ */
+
+static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ /* Currently we have no support for QEMUTimer in linux-user so we
+ * can't call gt_get_countervalue(env), instead we directly
+ * call the lower level functions.
+ */
+ return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
+}
+
+static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
+ { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
+ .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
+ .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
+ },
+ { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .readfn = gt_virt_cnt_read,
+ },
+ REGINFO_SENTINEL
+};
+
+#endif
+
+static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ raw_write(env, ri, value);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ raw_write(env, ri, value & 0xfffff6ff);
+ } else {
+ raw_write(env, ri, value & 0xfffff1ff);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* get_phys_addr() isn't present for user-mode-only targets */
+
+static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (ri->opc2 & 4) {
+ /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
+ * Secure EL1 (which can only happen if EL3 is AArch64).
+ * They are simply UNDEF if executed from NS EL1.
+ * They function normally from EL2 or EL3.
+ */
+ if (arm_current_el(env) == 1) {
+ if (arm_is_secure_below_el3(env)) {
+ if (env->cp15.scr_el3 & SCR_EEL2) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+#ifdef CONFIG_TCG
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx)
+{
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot;
+ bool ret;
+ uint64_t par64;
+ bool format64 = false;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
+
+ ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
+ &prot, &page_size, &fi, &cacheattrs);
+
+ if (ret) {
+ /*
+ * Some kinds of translation fault must cause exceptions rather
+ * than being reported in the PAR.
+ */
+ int current_el = arm_current_el(env);
+ int target_el;
+ uint32_t syn, fsr, fsc;
+ bool take_exc = false;
+
+ if (fi.s1ptw && current_el == 1
+ && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
+ /*
+ * Synchronous stage 2 fault on an access made as part of the
+ * translation table walk for AT S1E0* or AT S1E1* insn
+ * executed from NS EL1. If this is a synchronous external abort
+ * and SCR_EL3.EA == 1, then we take a synchronous external abort
+ * to EL3. Otherwise the fault is taken as an exception to EL2,
+ * and HPFAR_EL2 holds the faulting IPA.
+ */
+ if (fi.type == ARMFault_SyncExternalOnWalk &&
+ (env->cp15.scr_el3 & SCR_EA)) {
+ target_el = 3;
+ } else {
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+ if (arm_is_secure_below_el3(env) && fi.s1ns) {
+ env->cp15.hpfar_el2 |= HPFAR_NS;
+ }
+ target_el = 2;
+ }
+ take_exc = true;
+ } else if (fi.type == ARMFault_SyncExternalOnWalk) {
+ /*
+ * Synchronous external aborts during a translation table walk
+ * are taken as Data Abort exceptions.
+ */
+ if (fi.stage2) {
+ if (current_el == 3) {
+ target_el = 3;
+ } else {
+ target_el = 2;
+ }
+ } else {
+ target_el = exception_target_el(env);
+ }
+ take_exc = true;
+ }
+
+ if (take_exc) {
+ /* Construct FSR and FSC using same logic as arm_deliver_fault() */
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, mmu_idx)) {
+ fsr = arm_fi_to_lfsc(&fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(&fi);
+ fsc = 0x3f;
+ }
+ /*
+ * Report exception with ESR indicating a fault due to a
+ * translation table walk for a cache maintenance instruction.
+ */
+ syn = syn_data_abort_no_iss(current_el == target_el, 0,
+ fi.ea, 1, fi.s1ptw, 1, fsc);
+ env->exception.vaddress = value;
+ env->exception.fsr = fsr;
+ raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+ }
+ }
+
+ if (is_a64(env)) {
+ format64 = true;
+ } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ /*
+ * ATS1Cxx:
+ * * TTBCR.EAE determines whether the result is returned using the
+ * 32-bit or the 64-bit PAR format
+ * * Instructions executed in Hyp mode always use the 64bit format
+ *
+ * ATS1S2NSOxx uses the 64bit format if any of the following is true:
+ * * The Non-secure TTBCR.EAE bit is set to 1
+ * * The implementation includes EL2, and the value of HCR.VM is 1
+ *
+ * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
+ *
+ * ATS1Hx always uses the 64bit format.
+ */
+ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ if (mmu_idx == ARMMMUIdx_E10_0 ||
+ mmu_idx == ARMMMUIdx_E10_1 ||
+ mmu_idx == ARMMMUIdx_E10_1_PAN) {
+ format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
+ } else {
+ format64 |= arm_current_el(env) == 2;
+ }
+ }
+ }
+
+ if (format64) {
+ /* Create a 64-bit PAR */
+ par64 = (1 << 11); /* LPAE bit always set */
+ if (!ret) {
+ par64 |= phys_addr & ~0xfffULL;
+ if (!attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
+ par64 |= cacheattrs.shareability << 7; /* SH */
+ } else {
+ uint32_t fsr = arm_fi_to_lfsc(&fi);
+
+ par64 |= 1; /* F */
+ par64 |= (fsr & 0x3f) << 1; /* FS */
+ if (fi.stage2) {
+ par64 |= (1 << 9); /* S */
+ }
+ if (fi.s1ptw) {
+ par64 |= (1 << 8); /* PTW */
+ }
+ }
+ } else {
+ /* fsr is a DFSR/IFSR value for the short descriptor
+ * translation table format (with WnR always clear).
+ * Convert it to a 32-bit PAR.
+ */
+ if (!ret) {
+ /* We do not set any attribute bits in the PAR */
+ if (page_size == (1 << 24)
+ && arm_feature(env, ARM_FEATURE_V7)) {
+ par64 = (phys_addr & 0xff000000) | (1 << 1);
+ } else {
+ par64 = phys_addr & 0xfffff000;
+ }
+ if (!attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ } else {
+ uint32_t fsr = arm_fi_to_sfsc(&fi);
+
+ par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
+ ((fsr & 0xf) << 1) | 1;
+ }
+ }
+ return par64;
+}
+#endif /* CONFIG_TCG */
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+#ifdef CONFIG_TCG
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t par64;
+ ARMMMUIdx mmu_idx;
+ int el = arm_current_el(env);
+ bool secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_SE3;
+ break;
+ case 2:
+ g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
+ /* fall through */
+ case 1:
+ if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
+ mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
+ : ARMMMUIdx_Stage1_E1_PAN);
+ } else {
+ mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2:
+ /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_SE10_0;
+ break;
+ case 2:
+ g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
+ mmu_idx = ARMMMUIdx_Stage1_E0;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 4:
+ /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+ mmu_idx = ARMMMUIdx_E10_1;
+ break;
+ case 6:
+ /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+ mmu_idx = ARMMMUIdx_E10_0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ par64 = do_ats_write(env, value, access_type, mmu_idx);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
+}
+
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+#ifdef CONFIG_TCG
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t par64;
+
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3 &&
+ !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+#ifdef CONFIG_TCG
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ ARMMMUIdx mmu_idx;
+ int secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ switch (ri->opc1) {
+ case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
+ if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
+ mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
+ : ARMMMUIdx_Stage1_E1_PAN);
+ } else {
+ mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
+ }
+ break;
+ case 4: /* AT S1E2R, AT S1E2W */
+ mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
+ break;
+ case 6: /* AT S1E3R, AT S1E3W */
+ mmu_idx = ARMMMUIdx_SE3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2: /* AT S1E0R, AT S1E0W */
+ mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
+ break;
+ case 4: /* AT S12E1R, AT S12E1W */
+ mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
+ break;
+ case 6: /* AT S12E0R, AT S12E0W */
+ mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
+}
+#endif
+
+static const ARMCPRegInfo vapa_cp_reginfo[] = {
+ { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
+ offsetoflow32(CPUARMState, cp15.par_ns) },
+ .writefn = par_write },
+#ifndef CONFIG_USER_ONLY
+ /* This underdecoding is safe because the reginfo is NO_RAW. */
+ { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_W, .accessfn = ats_access,
+ .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+#endif
+ REGINFO_SENTINEL
+};
+
+/* Return basic MPU access permission bits. */
+static uint32_t simple_mpu_ap_bits(uint32_t val)
+{
+ uint32_t ret;
+ uint32_t mask;
+ int i;
+ ret = 0;
+ mask = 3;
+ for (i = 0; i < 16; i += 2) {
+ ret |= (val >> i) & mask;
+ mask <<= 2;
+ }
+ return ret;
+}
+
+/* Pad basic MPU access permission bits to extended format. */
+static uint32_t extended_mpu_ap_bits(uint32_t val)
+{
+ uint32_t ret;
+ uint32_t mask;
+ int i;
+ ret = 0;
+ mask = 3;
+ for (i = 0; i < 16; i += 2) {
+ ret |= (val & mask) << i;
+ mask <<= 2;
+ }
+ return ret;
+}
+
+static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
+}
+
+static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
+}
+
+static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
+}
+
+static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
+}
+
+static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
+
+ if (!u32p) {
+ return 0;
+ }
+
+ u32p += env->pmsav7.rnr[M_REG_NS];
+ return *u32p;
+}
+
+static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
+
+ if (!u32p) {
+ return;
+ }
+
+ u32p += env->pmsav7.rnr[M_REG_NS];
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
+ *u32p = value;
+}
+
+static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t nrgs = cpu->pmsav7_dregion;
+
+ if (value >= nrgs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "PMSAv7 RGNR write >= # supported regions, %" PRIu32
+ " > %" PRIu32 "\n", (uint32_t)value, nrgs);
+ return;
+ }
+
+ raw_write(env, ri, value);
+}
+
+static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
+ /* Reset for all these registers is handled in arm_cpu_reset(),
+ * because the PMSAv7 is also used by M-profile CPUs, which do
+ * not register cpregs but still need the state to be reset.
+ */
+ { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
+ .readfn = pmsav7_read, .writefn = pmsav7_write,
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
+ .readfn = pmsav7_read, .writefn = pmsav7_write,
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
+ .readfn = pmsav7_read, .writefn = pmsav7_write,
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
+ .writefn = pmsav7_rgnr_write,
+ .resetfn = arm_cp_reset_ignore },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
+ { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
+ .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
+ { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
+ .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
+ { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
+ .resetvalue = 0, },
+ { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
+ .resetvalue = 0, },
+ { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
+ { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
+ /* Protection region base and size registers */
+ { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
+ { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
+ { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
+ { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
+ { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
+ { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
+ { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
+ { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
+ REGINFO_SENTINEL
+};
+
+static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ TCR *tcr = raw_ptr(env, ri);
+ int maskshift = extract32(value, 0, 3);
+
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
+ /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
+ * using Long-desciptor translation table format */
+ value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
+ } else if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* In an implementation that includes the Security Extensions
+ * TTBCR has additional fields PD0 [4] and PD1 [5] for
+ * Short-descriptor translation table format.
+ */
+ value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
+ } else {
+ value &= TTBCR_N;
+ }
+ }
+
+ /* Update the masks corresponding to the TCR bank being written
+ * Note that we always calculate mask and base_mask, but
+ * they are only used for short-descriptor tables (ie if EAE is 0);
+ * for long-descriptor tables the TCR fields are used differently
+ * and the mask and base_mask values are meaningless.
+ */
+ tcr->raw_tcr = value;
+ tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
+ tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
+}
+
+static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ TCR *tcr = raw_ptr(env, ri);
+
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ /* With LPAE the TTBCR could result in a change of ASID
+ * via the TTBCR.A1 bit, so do a TLB flush.
+ */
+ tlb_flush(CPU(cpu));
+ }
+ /* Preserve the high half of TCR_EL1, set via TTBCR2. */
+ value = deposit64(tcr->raw_tcr, 0, 32, value);
+ vmsa_ttbcr_raw_write(env, ri, value);
+}
+
+static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ TCR *tcr = raw_ptr(env, ri);
+
+ /* Reset both the TCR as well as the masks corresponding to the bank of
+ * the TCR being reset.
+ */
+ tcr->raw_tcr = 0;
+ tcr->mask = 0;
+ tcr->base_mask = 0xffffc000u;
+}
+
+static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ TCR *tcr = raw_ptr(env, ri);
+
+ /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
+ tlb_flush(CPU(cpu));
+ tcr->raw_tcr = value;
+}
+
+static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
+ if (cpreg_field_is_64bit(ri) &&
+ extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
+ ARMCPU *cpu = env_archcpu(env);
+ tlb_flush(CPU(cpu));
+ }
+ raw_write(env, ri, value);
+}
+
+static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * If we are running with E2&0 regime, then an ASID is active.
+ * Flush if that might be changing. Note we're not checking
+ * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
+ * holds the active ASID, only checking the field that might.
+ */
+ if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
+ (arm_hcr_el2_eff(env) & HCR_E2H)) {
+ uint16_t mask = ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0;
+
+ if (arm_is_secure_below_el3(env)) {
+ mask >>= ARM_MMU_IDX_A_NS;
+ }
+
+ tlb_flush_by_mmuidx(env_cpu(env), mask);
+ }
+ raw_write(env, ri, value);
+}
+
+static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+
+ /*
+ * A change in VMID to the stage2 page table (Stage2) invalidates
+ * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
+ */
+ if (raw_read(env, ri) != value) {
+ uint16_t mask = ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0;
+
+ if (arm_is_secure_below_el3(env)) {
+ mask >>= ARM_MMU_IDX_A_NS;
+ }
+
+ tlb_flush_by_mmuidx(cs, mask);
+ raw_write(env, ri, value);
+ }
+}
+
+static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
+ { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
+ offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
+ { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
+ offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
+ { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
+ offsetof(CPUARMState, cp15.dfar_ns) } },
+ { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
+ .resetvalue = 0, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo vmsa_cp_reginfo[] = {
+ { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
+ { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
+ offsetof(CPUARMState, cp15.ttbr0_ns) } },
+ { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
+ offsetof(CPUARMState, cp15.ttbr1_ns) } },
+ { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .writefn = vmsa_tcr_el12_write,
+ .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
+ { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
+ .raw_writefn = vmsa_ttbcr_raw_write,
+ /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]),
+ offsetof(CPUARMState, cp15.tcr_el[1])} },
+ REGINFO_SENTINEL
+};
+
+/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
+ * qemu tlbs nor adjusting cached masks.
+ */
+static const ARMCPRegInfo ttbcr2_reginfo = {
+ .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_ALIAS,
+ .bank_fieldoffsets = {
+ offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr),
+ offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr),
+ },
+};
+
+static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c15_ticonfig = value & 0xe7;
+ /* The OS_TYPE bit in this register changes the reported CPUID! */
+ env->cp15.c0_cpuid = (value & (1 << 5)) ?
+ ARM_CPUID_TI915T : ARM_CPUID_TI925T;
+}
+
+static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c15_threadid = value & 0xffff;
+}
+
+static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Wait-for-interrupt (deprecated) */
+ cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
+}
+
+static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* On OMAP there are registers indicating the max/min index of dcache lines
+ * containing a dirty line; cache flush operations have to reset these.
+ */
+ env->cp15.c15_i_max = 0x000;
+ env->cp15.c15_i_min = 0xff0;
+}
+
+static const ARMCPRegInfo omap_cp_reginfo[] = {
+ { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
+ .resetvalue = 0, },
+ { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
+ .writefn = omap_ticonfig_write },
+ { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
+ { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0xff0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
+ { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
+ .writefn = omap_threadid_write },
+ { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
+ .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
+ .type = ARM_CP_NO_RAW,
+ .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
+ /* TODO: Peripheral port remap register:
+ * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
+ * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
+ * when MMU is off.
+ */
+ { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
+ .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
+ .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
+ .writefn = omap_cachemaint_write },
+ { .name = "C9", .cp = 15, .crn = 9,
+ .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
+ .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c15_cpar = value & 0x3fff;
+}
+
+static const ARMCPRegInfo xscale_cp_reginfo[] = {
+ { .name = "XSCALE_CPAR",
+ .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
+ .writefn = xscale_cpar_write, },
+ { .name = "XSCALE_AUXCR",
+ .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
+ .resetvalue = 0, },
+ /* XScale specific cache-lockdown: since we have no cache we NOP these
+ * and hope the guest does not really rely on cache behaviour.
+ */
+ { .name = "XSCALE_LOCK_ICACHE_LINE",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "XSCALE_UNLOCK_ICACHE",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "XSCALE_DCACHE_LOCK",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "XSCALE_UNLOCK_DCACHE",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
+ /* RAZ/WI the whole crn=15 space, when we don't have a more specific
+ * implementation of this implementation-defined space.
+ * Ideally this should eventually disappear in favour of actually
+ * implementing the correct behaviour for all cores.
+ */
+ { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
+ .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW,
+ .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
+ /* Cache status: RAZ because we have no cache so it's always clean */
+ { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
+ /* We never have a a block transfer operation in progress */
+ { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = 0 },
+ /* The cache ops themselves: these all NOP for QEMU */
+ { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
+ .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
+ .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
+ /* The cache test-and-clean instructions always return (1 << 30)
+ * to indicate that there are no dirty cache lines.
+ */
+ { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = (1 << 30) },
+ { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = (1 << 30) },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo strongarm_cp_reginfo[] = {
+ /* Ignore ReadBuffer accesses */
+ { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
+ .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW, .resetvalue = 0,
+ .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
+ REGINFO_SENTINEL
+};
+
+static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ unsigned int cur_el = arm_current_el(env);
+
+ if (arm_is_el2_enabled(env) && cur_el == 1) {
+ return env->cp15.vpidr_el2;
+ }
+ return raw_read(env, ri);
+}
+
+static uint64_t mpidr_read_val(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t mpidr = cpu->mp_affinity;
+
+ if (arm_feature(env, ARM_FEATURE_V7MP)) {
+ mpidr |= (1U << 31);
+ /* Cores which are uniprocessor (non-coherent)
+ * but still implement the MP extensions set
+ * bit 30. (For instance, Cortex-R5).
+ */
+ if (cpu->mp_is_up) {
+ mpidr |= (1u << 30);
+ }
+ }
+ return mpidr;
+}
+
+static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ unsigned int cur_el = arm_current_el(env);
+
+ if (arm_is_el2_enabled(env) && cur_el == 1) {
+ return env->cp15.vmpidr_el2;
+ }
+ return mpidr_read_val(env);
+}
+
+static const ARMCPRegInfo lpae_cp_reginfo[] = {
+ /* NOP AMAIR0/1 */
+ { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
+ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
+ .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
+ offsetof(CPUARMState, cp15.par_ns)} },
+ { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
+ offsetof(CPUARMState, cp15.ttbr0_ns) },
+ .writefn = vmsa_ttbr_write, },
+ { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
+ offsetof(CPUARMState, cp15.ttbr1_ns) },
+ .writefn = vmsa_ttbr_write, },
+ REGINFO_SENTINEL
+};
+
+static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return vfp_get_fpcr(env);
+}
+
+static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ vfp_set_fpcr(env, value);
+}
+
+static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return vfp_get_fpsr(env);
+}
+
+static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ vfp_set_fpsr(env, value);
+}
+
+static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->daif = value & PSTATE_DAIF;
+}
+
+static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_PAN;
+}
+
+static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
+}
+
+static const ARMCPRegInfo pan_reginfo = {
+ .name = "PAN", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_RW,
+ .readfn = aa64_pan_read, .writefn = aa64_pan_write
+};
+
+static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_UAO;
+}
+
+static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
+}
+
+static const ARMCPRegInfo uao_reginfo = {
+ .name = "UAO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL1_RW,
+ .readfn = aa64_uao_read, .writefn = aa64_uao_write
+};
+
+static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_DIT;
+}
+
+static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
+}
+
+static const ARMCPRegInfo dit_reginfo = {
+ .name = "DIT", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL0_RW,
+ .readfn = aa64_dit_read, .writefn = aa64_dit_write
+};
+
+static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_SSBS;
+}
+
+static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
+}
+
+static const ARMCPRegInfo ssbs_reginfo = {
+ .name = "SSBS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
+ .type = ARM_CP_NO_RAW, .access = PL0_RW,
+ .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
+};
+
+static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Cache invalidate/clean to Point of Coherency or Persistence... */
+ switch (arm_current_el(env)) {
+ case 0:
+ /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
+ if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
+ return CP_ACCESS_TRAP;
+ }
+ /* fall through */
+ case 1:
+ /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
+ if (arm_hcr_el2_eff(env) & HCR_TPCP) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ break;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Cache invalidate/clean to Point of Unification... */
+ switch (arm_current_el(env)) {
+ case 0:
+ /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
+ if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
+ return CP_ACCESS_TRAP;
+ }
+ /* fall through */
+ case 1:
+ /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
+ if (arm_hcr_el2_eff(env) & HCR_TPU) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ break;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
+ * Page D4-1736 (DDI0487A.b)
+ */
+
+static int vae1_tlbmask(CPUARMState *env)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ uint16_t mask;
+
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ mask = ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E20_0;
+ } else {
+ mask = ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0;
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ mask >>= ARM_MMU_IDX_A_NS;
+ }
+
+ return mask;
+}
+
+/* Return 56 if TBI is enabled, 64 otherwise. */
+static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint64_t addr)
+{
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ int select = extract64(addr, 55, 1);
+
+ return (tbi >> select) & 1 ? 56 : 64;
+}
+
+static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ ARMMMUIdx mmu_idx;
+
+ /* Only the regime of the mmu_idx below is significant. */
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ mmu_idx = ARMMMUIdx_E20_0;
+ } else {
+ mmu_idx = ARMMMUIdx_E10_0;
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ mmu_idx &= ~ARM_MMU_IDX_A_NS;
+ }
+
+ return tlbbits_for_regime(env, mmu_idx, addr);
+}
+
+static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+}
+
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+ } else {
+ tlb_flush_by_mmuidx(cs, mask);
+ }
+}
+
+static int alle1_tlbmask(CPUARMState *env)
+{
+ /*
+ * Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ if (arm_is_secure_below_el3(env)) {
+ return ARMMMUIdxBit_SE10_1 |
+ ARMMMUIdxBit_SE10_1_PAN |
+ ARMMMUIdxBit_SE10_0;
+ } else {
+ return ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0;
+ }
+}
+
+static int e2_tlbmask(CPUARMState *env)
+{
+ if (arm_is_secure_below_el3(env)) {
+ return ARMMMUIdxBit_SE20_0 |
+ ARMMMUIdxBit_SE20_2 |
+ ARMMMUIdxBit_SE20_2_PAN |
+ ARMMMUIdxBit_SE2;
+ } else {
+ return ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2;
+ }
+}
+
+static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = alle1_tlbmask(env);
+
+ tlb_flush_by_mmuidx(cs, mask);
+}
+
+static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = e2_tlbmask(env);
+
+ tlb_flush_by_mmuidx(cs, mask);
+}
+
+static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
+}
+
+static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = alle1_tlbmask(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+}
+
+static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = e2_tlbmask(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
+}
+
+static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
+}
+
+static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL2
+ * Currently handles both VAE2 and VALE2, since we don't support
+ * flush-last-level-only.
+ */
+ CPUState *cs = env_cpu(env);
+ int mask = e2_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
+}
+
+static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL3
+ * Currently handles both VAE3 and VALE3, since we don't support
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
+}
+
+static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae1_tlbbits(env, pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+}
+
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL1&0 (AArch64 version).
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ CPUState *cs = env_cpu(env);
+ int mask = vae1_tlbmask(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = vae1_tlbbits(env, pageaddr);
+
+ if (tlb_force_broadcast(env)) {
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+ } else {
+ tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
+ }
+}
+
+static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ bool secure = arm_is_secure_below_el3(env);
+ int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
+ int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
+ pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+}
+
+static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = env_cpu(env);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ ARMMMUIdxBit_SE3, bits);
+}
+
+#ifdef TARGET_AARCH64
+static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
+ uint64_t value)
+{
+ unsigned int page_shift;
+ unsigned int page_size_granule;
+ uint64_t num;
+ uint64_t scale;
+ uint64_t exponent;
+ uint64_t length;
+
+ num = extract64(value, 39, 4);
+ scale = extract64(value, 44, 2);
+ page_size_granule = extract64(value, 46, 2);
+
+ page_shift = page_size_granule * 2 + 12;
+
+ if (page_size_granule == 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
+ page_size_granule);
+ return 0;
+ }
+
+ exponent = (5 * scale) + 1;
+ length = (num + 1) << (exponent + page_shift);
+
+ return length;
+}
+
+static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
+ bool two_ranges)
+{
+ /* TODO: ARMv8.7 FEAT_LPA2 */
+ uint64_t pageaddr;
+
+ if (two_ranges) {
+ pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
+ } else {
+ pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
+ }
+
+ return pageaddr;
+}
+
+static void do_rvae_write(CPUARMState *env, uint64_t value,
+ int idxmap, bool synced)
+{
+ ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
+ bool two_ranges = regime_has_2_ranges(one_idx);
+ uint64_t baseaddr, length;
+ int bits;
+
+ baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
+ length = tlbi_aa64_range_get_length(env, value);
+ bits = tlbbits_for_regime(env, one_idx, baseaddr);
+
+ if (synced) {
+ tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
+ baseaddr,
+ length,
+ idxmap,
+ bits);
+ } else {
+ tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
+ length, idxmap, bits);
+ }
+}
+
+static void tlbi_aa64_rvae1_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL1&0.
+ * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, vae1_tlbmask(env),
+ tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae1is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable EL1&0.
+ * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
+ * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
+ * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
+ * shareable specific flushes.
+ */
+
+ do_rvae_write(env, value, vae1_tlbmask(env), true);
+}
+
+static int vae2_tlbmask(CPUARMState *env)
+{
+ return (arm_is_secure_below_el3(env)
+ ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
+}
+
+static void tlbi_aa64_rvae2_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL2.
+ * Currently handles all of RVAE2 and RVALE2,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, vae2_tlbmask(env),
+ tlb_force_broadcast(env));
+
+
+}
+
+static void tlbi_aa64_rvae2is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, Inner/Outer Shareable, EL2.
+ * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer shareable specific flushes.
+ */
+
+ do_rvae_write(env, value, vae2_tlbmask(env), true);
+
+}
+
+static void tlbi_aa64_rvae3_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3.
+ * Currently handles all of RVAE3 and RVALE3,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+
+ do_rvae_write(env, value, ARMMMUIdxBit_SE3,
+ tlb_force_broadcast(env));
+}
+
+static void tlbi_aa64_rvae3is_write(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Invalidate by VA range, EL3, Inner/Outer Shareable.
+ * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
+ * since we don't support flush-for-specific-ASID-only,
+ * flush-last-level-only or inner/outer specific flushes.
+ */
+
+ do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
+}
+#endif
+
+static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int cur_el = arm_current_el(env);
+
+ if (cur_el < 2) {
+ uint64_t hcr = arm_hcr_el2_eff(env);
+
+ if (cur_el == 0) {
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ } else {
+ if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
+ return CP_ACCESS_TRAP;
+ }
+ if (hcr & HCR_TDZ) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ } else if (hcr & HCR_TDZ) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int dzp_bit = 1 << 4;
+
+ /* DZP indicates whether DC ZVA access is allowed */
+ if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
+ dzp_bit = 0;
+ }
+ return cpu->dcz_blocksize | dzp_bit;
+}
+
+static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (!(env->pstate & PSTATE_SP)) {
+ /* Access to SP_EL0 is undefined if it's being used as
+ * the stack pointer.
+ */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_SP;
+}
+
+static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
+{
+ update_spsel(env, val);
+}
+
+static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
+ /* M bit is RAZ/WI for PMSA with no MPU implemented */
+ value &= ~SCTLR_M;
+ }
+
+ /* ??? Lots of these bits are not implemented. */
+
+ if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
+ if (ri->opc1 == 6) { /* SCTLR_EL3 */
+ value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
+ } else {
+ value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
+ SCTLR_ATA0 | SCTLR_ATA);
+ }
+ }
+
+ if (raw_read(env, ri) == value) {
+ /* Skip the TLB flush if nothing actually changed; Linux likes
+ * to do a lot of pointless SCTLR writes.
+ */
+ return;
+ }
+
+ raw_write(env, ri, value);
+
+ /* This may enable/disable the MMU, so do a TLB flush. */
+ tlb_flush(CPU(cpu));
+
+ if (ri->type & ARM_CP_SUPPRESS_TB_END) {
+ /*
+ * Normally we would always end the TB on an SCTLR write; see the
+ * comment in ARMCPRegInfo sctlr initialization below for why Xscale
+ * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
+ * of hflags from the translator, so do it here.
+ */
+ arm_rebuild_hflags(env);
+ }
+}
+
+static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
+ return CP_ACCESS_TRAP_FP_EL2;
+ }
+ if (env->cp15.cptr_el[3] & CPTR_TFP) {
+ return CP_ACCESS_TRAP_FP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
+}
+
+static const ARMCPRegInfo v8_cp_reginfo[] = {
+ /* Minimal set of EL0-visible registers. This will need to be expanded
+ * significantly for system emulation of AArch64 CPUs.
+ */
+ { .name = "NZCV", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
+ .access = PL0_RW, .type = ARM_CP_NZCV },
+ { .name = "DAIF", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
+ .type = ARM_CP_NO_RAW,
+ .access = PL0_RW, .accessfn = aa64_daif_access,
+ .fieldoffset = offsetof(CPUARMState, daif),
+ .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
+ { .name = "FPCR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
+ .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
+ .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
+ { .name = "FPSR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
+ .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
+ .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
+ { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
+ .access = PL0_R, .type = ARM_CP_NO_RAW,
+ .readfn = aa64_dczid_read },
+ { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_DC_ZVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
+ { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
+ .access = PL1_R, .type = ARM_CP_CURRENTEL },
+ /* Cache ops: all NOPs since we don't emulate caches */
+ { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_pou_access },
+ { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_pou_access },
+ { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_pou_access },
+ { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
+ .type = ARM_CP_NOP },
+ { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
+ { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
+ { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_pou_access },
+ { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
+ /* TLBI operations */
+ { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1_write },
+ { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1_write },
+ { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1_write },
+ { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+#ifndef CONFIG_USER_ONLY
+ /* 64 bit address translation operations */
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
+ .writefn = par_write },
+#endif
+ /* TLB invalidate last level of translation table walk */
+ { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_is_write },
+ { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimvaa_is_write },
+ { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimva_write },
+ { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
+ .writefn = tlbimvaa_write },
+ { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_write },
+ { .name = "TLBIMVALHIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_is_write },
+ { .name = "TLBIIPAS2",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL2_W },
+ { .name = "TLBIIPAS2IS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL2_W },
+ { .name = "TLBIIPAS2L",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL2_W },
+ { .name = "TLBIIPAS2LIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL2_W },
+ /* 32 bit cache operations */
+ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
+ { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
+ { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
+ { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
+ { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
+ { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
+ { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
+ { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ /* MMU Domain access control / MPU write buffer control */
+ { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
+ offsetoflow32(CPUARMState, cp15.dacr_ns) } },
+ { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
+ { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
+ /* We rely on the access checks not allowing the guest to write to the
+ * state field when SPSel indicates that it's being used as the stack
+ * pointer.
+ */
+ { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = sp_el0_access,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
+ { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
+ { .name = "SPSel", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW,
+ .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
+ { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
+ .access = PL2_RW, .accessfn = fpexc32_access },
+ { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
+ { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
+ { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
+ { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
+ { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
+ { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
+ { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
+ .resetvalue = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
+ { .name = "SDCR", .type = ARM_CP_ALIAS,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .writefn = sdcr_write,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
+ REGINFO_SENTINEL
+};
+
+/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
+static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
+ { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL2_RW,
+ .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
+ { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL2_RW,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 6, .crm = 2,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
+ .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+/* Ditto, but for registers which exist in ARMv8 but not v7 */
+static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
+ { .name = "HCR2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
+ .access = PL2_RW,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
+ } else {
+ valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ valid_mask &= ~HCR_HCD;
+ } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
+ * However, if we're using the SMC PSCI conduit then QEMU is
+ * effectively acting like EL3 firmware and so the guest at
+ * EL2 should retain the ability to prevent EL1 from being
+ * able to make SMC calls into the ersatz firmware, so in
+ * that case HCR.TSC should be read/write.
+ */
+ valid_mask &= ~HCR_TSC;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ if (cpu_isar_feature(aa64_vh, cpu)) {
+ valid_mask |= HCR_E2H;
+ }
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= HCR_TLOR;
+ }
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ valid_mask |= HCR_API | HCR_APK;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
+ }
+ }
+
+ /* Clear RES0 bits. */
+ value &= valid_mask;
+
+ /*
+ * These bits change the MMU setup:
+ * HCR_VM enables stage 2 translation
+ * HCR_PTW forbids certain page-table setups
+ * HCR_DC disables stage1 and enables stage2 translation
+ * HCR_DCT enables tagging on (disabled) stage1 translation
+ */
+ if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
+ tlb_flush(CPU(cpu));
+ }
+ env->cp15.hcr_el2 = value;
+
+ /*
+ * Updates to VI and VF require us to update the status of
+ * virtual interrupts, which are the logical OR of these bits
+ * and the state of the input lines from the GIC. (This requires
+ * that we have the iothread lock, which is done by marking the
+ * reginfo structs as ARM_CP_IO.)
+ * Note that if a write to HCR pends a VIRQ or VFIQ it is never
+ * possible for it to be taken immediately, because VIRQ and
+ * VFIQ are masked unless running at EL0 or EL1, and HCR
+ * can only be written at EL2.
+ */
+ g_assert(qemu_mutex_iothread_locked());
+ arm_cpu_update_virq(cpu);
+ arm_cpu_update_vfiq(cpu);
+}
+
+static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ do_hcr_write(env, value, 0);
+}
+
+static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
+ value = deposit64(env->cp15.hcr_el2, 32, 32, value);
+ do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
+}
+
+static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Handle HCR write, i.e. write to low half of HCR_EL2 */
+ value = deposit64(env->cp15.hcr_el2, 0, 32, value);
+ do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
+}
+
+/*
+ * Return the effective value of HCR_EL2.
+ * Bits that are not included here:
+ * RW (read from SCR_EL3.RW as needed)
+ */
+uint64_t arm_hcr_el2_eff(CPUARMState *env)
+{
+ uint64_t ret = env->cp15.hcr_el2;
+
+ if (!arm_is_el2_enabled(env)) {
+ /*
+ * "This register has no effect if EL2 is not enabled in the
+ * current Security state". This is ARMv8.4-SecEL2 speak for
+ * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
+ *
+ * Prior to that, the language was "In an implementation that
+ * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
+ * as if this field is 0 for all purposes other than a direct
+ * read or write access of HCR_EL2". With lots of enumeration
+ * on a per-field basis. In current QEMU, this is condition
+ * is arm_is_secure_below_el3.
+ *
+ * Since the v8.4 language applies to the entire register, and
+ * appears to be backward compatible, use that.
+ */
+ return 0;
+ }
+
+ /*
+ * For a cpu that supports both aarch64 and aarch32, we can set bits
+ * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
+ * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
+ */
+ if (!arm_el_is_aa64(env, 2)) {
+ uint64_t aa32_valid;
+
+ /*
+ * These bits are up-to-date as of ARMv8.6.
+ * For HCR, it's easiest to list just the 2 bits that are invalid.
+ * For HCR2, list those that are valid.
+ */
+ aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
+ aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
+ HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
+ ret &= aa32_valid;
+ }
+
+ if (ret & HCR_TGE) {
+ /* These bits are up-to-date as of ARMv8.6. */
+ if (ret & HCR_E2H) {
+ ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
+ HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
+ HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
+ HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
+ HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
+ HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
+ } else {
+ ret |= HCR_FMO | HCR_IMO | HCR_AMO;
+ }
+ ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
+ HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
+ HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
+ HCR_TLOR);
+ }
+
+ return ret;
+}
+
+static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * For A-profile AArch32 EL3, if NSACR.CP10
+ * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
+ value &= ~(0x3 << 10);
+ value |= env->cp15.cptr_el[2] & (0x3 << 10);
+ }
+ env->cp15.cptr_el[2] = value;
+}
+
+static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /*
+ * For A-profile AArch32 EL3, if NSACR.CP10
+ * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
+ */
+ uint64_t value = env->cp15.cptr_el[2];
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
+ !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
+ value |= 0x3 << 10;
+ }
+ return value;
+}
+
+static const ARMCPRegInfo el2_cp_reginfo[] = {
+ { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
+ .writefn = hcr_write },
+ { .name = "HCR", .state = ARM_CP_STATE_AA32,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
+ .writefn = hcr_writelow },
+ { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
+ { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
+ { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
+ { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
+ .type = ARM_CP_ALIAS,
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
+ .access = PL2_RW,
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
+ { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
+ { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .writefn = vbar_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
+ .resetvalue = 0 },
+ { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
+ { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
+ .readfn = cptr_el2_read, .writefn = cptr_el2_write },
+ { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
+ { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
+ /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
+ { .name = "VTCR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .type = ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL2_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
+ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 6, .crm = 2,
+ .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
+ .writefn = vttbr_write },
+ { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .writefn = vttbr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
+ { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
+ { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
+ { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
+ { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
+ { .name = "TLBIALLNSNH",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_nsnh_write },
+ { .name = "TLBIALLNSNHIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_nsnh_is_write },
+ { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_hyp_write },
+ { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_hyp_is_write },
+ { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_write },
+ { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_is_write },
+ { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbi_aa64_alle2_write },
+ { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
+#ifndef CONFIG_USER_ONLY
+ /* Unlike the other EL2-related AT operations, these must
+ * UNDEF from EL3 if EL2 is not implemented, which is why we
+ * define them here rather than with the rest of the AT ops.
+ */
+ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
+ { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
+ /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+ * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+ * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+ * to behave as if SCR.NS was 1.
+ */
+ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+ { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+ { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
+ /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
+ * reset values as IMPDEF. We choose to reset to 3 to comply with
+ * both ARMv7 and ARMv8.
+ */
+ .access = PL2_RW, .resetvalue = 3,
+ .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
+ { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
+ .writefn = gt_cntvoff_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
+ { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
+ .writefn = gt_cntvoff_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
+ { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
+ .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
+ .resetfn = gt_hyp_timer_reset,
+ .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
+ { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
+ .resetvalue = 0,
+ .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
+#endif
+ /* The only field of MDCR_EL2 that has a defined architectural reset value
+ * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
+ */
+ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
+ { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
+ { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
+ { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
+ { .name = "HCR2", .state = ARM_CP_STATE_AA32,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
+ .access = PL2_RW,
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
+ .writefn = hcr_writehigh },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_OK;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
+static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
+ { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
+ .access = PL2_RW, .accessfn = sel2_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
+ { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
+ .access = PL2_RW, .accessfn = sel2_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
+ * At Secure EL1 it traps to EL3 or EL2.
+ */
+ if (arm_current_el(env) == 3) {
+ return CP_ACCESS_OK;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ if (env->cp15.scr_el3 & SCR_EEL2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_TRAP_EL3;
+ }
+ /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
+ if (isread) {
+ return CP_ACCESS_OK;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
+static const ARMCPRegInfo el3_cp_reginfo[] = {
+ { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
+ .resetfn = scr_reset, .writefn = scr_write },
+ { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
+ .writefn = scr_write },
+ { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.sder) },
+ { .name = "SDER",
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
+ { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .writefn = vbar_write, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
+ { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
+ { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL3_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * we must provide a .raw_writefn and .resetfn because we handle
+ * reset and migration for the AArch32 TTBCR(S), which might be
+ * using mask and base_mask.
+ */
+ .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
+ { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
+ { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
+ { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
+ { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
+ { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .writefn = vbar_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
+ .resetvalue = 0 },
+ { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
+ { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
+ { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3_write },
+ { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
+ { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
+ REGINFO_SENTINEL
+};
+
+#ifndef CONFIG_USER_ONLY
+/* Test if system register redirection is to occur in the current state. */
+static bool redirect_for_e2h(CPUARMState *env)
+{
+ return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
+}
+
+static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ CPReadFn *readfn;
+
+ if (redirect_for_e2h(env)) {
+ /* Switch to the saved EL2 version of the register. */
+ ri = ri->opaque;
+ readfn = ri->readfn;
+ } else {
+ readfn = ri->orig_readfn;
+ }
+ if (readfn == NULL) {
+ readfn = raw_read;
+ }
+ return readfn(env, ri);
+}
+
+static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPWriteFn *writefn;
+
+ if (redirect_for_e2h(env)) {
+ /* Switch to the saved EL2 version of the register. */
+ ri = ri->opaque;
+ writefn = ri->writefn;
+ } else {
+ writefn = ri->orig_writefn;
+ }
+ if (writefn == NULL) {
+ writefn = raw_write;
+ }
+ writefn(env, ri, value);
+}
+
+static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
+{
+ struct E2HAlias {
+ uint32_t src_key, dst_key, new_key;
+ const char *src_name, *dst_name, *new_name;
+ bool (*feature)(const ARMISARegisters *id);
+ };
+
+#define K(op0, op1, crn, crm, op2) \
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
+
+ static const struct E2HAlias aliases[] = {
+ { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
+ "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
+ { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
+ "CPACR", "CPTR_EL2", "CPACR_EL12" },
+ { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
+ "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
+ { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
+ "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
+ { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
+ "TCR_EL1", "TCR_EL2", "TCR_EL12" },
+ { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
+ "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
+ { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
+ "ELR_EL1", "ELR_EL2", "ELR_EL12" },
+ { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
+ "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
+ { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
+ "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
+ { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
+ "ESR_EL1", "ESR_EL2", "ESR_EL12" },
+ { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
+ "FAR_EL1", "FAR_EL2", "FAR_EL12" },
+ { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
+ "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
+ { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
+ "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
+ { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
+ "VBAR", "VBAR_EL2", "VBAR_EL12" },
+ { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
+ "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
+ { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
+ "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
+
+ /*
+ * Note that redirection of ZCR is mentioned in the description
+ * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
+ * not in the summary table.
+ */
+ { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
+ "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
+
+ { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
+ "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
+
+ /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
+ /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
+ };
+#undef K
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(aliases); i++) {
+ const struct E2HAlias *a = &aliases[i];
+ ARMCPRegInfo *src_reg, *dst_reg;
+
+ if (a->feature && !a->feature(&cpu->isar)) {
+ continue;
+ }
+
+ src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
+ dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
+ g_assert(src_reg != NULL);
+ g_assert(dst_reg != NULL);
+
+ /* Cross-compare names to detect typos in the keys. */
+ g_assert(strcmp(src_reg->name, a->src_name) == 0);
+ g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
+
+ /* None of the core system registers use opaque; we will. */
+ g_assert(src_reg->opaque == NULL);
+
+ /* Create alias before redirection so we dup the right data. */
+ if (a->new_key) {
+ ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
+ uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
+ bool ok;
+
+ new_reg->name = a->new_name;
+ new_reg->type |= ARM_CP_ALIAS;
+ /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
+ new_reg->access &= PL2_RW | PL3_RW;
+
+ ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
+ g_assert(ok);
+ }
+
+ src_reg->opaque = dst_reg;
+ src_reg->orig_readfn = src_reg->readfn ?: raw_read;
+ src_reg->orig_writefn = src_reg->writefn ?: raw_write;
+ if (!src_reg->raw_readfn) {
+ src_reg->raw_readfn = raw_read;
+ }
+ if (!src_reg->raw_writefn) {
+ src_reg->raw_writefn = raw_write;
+ }
+ src_reg->readfn = el2_e2h_read;
+ src_reg->writefn = el2_e2h_write;
+ }
+}
+#endif
+
+static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int cur_el = arm_current_el(env);
+
+ if (cur_el < 2) {
+ uint64_t hcr = arm_hcr_el2_eff(env);
+
+ if (cur_el == 0) {
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ } else {
+ if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
+ return CP_ACCESS_TRAP;
+ }
+ if (hcr & HCR_TID2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ } else if (hcr & HCR_TID2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+
+ if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Writes to OSLAR_EL1 may update the OS lock status, which can be
+ * read via a bit in OSLSR_EL1.
+ */
+ int oslock;
+
+ if (ri->state == ARM_CP_STATE_AA32) {
+ oslock = (value == 0xC5ACCE55);
+ } else {
+ oslock = value & 1;
+ }
+
+ env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
+}
+
+static const ARMCPRegInfo debug_cp_reginfo[] = {
+ /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
+ * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
+ * unlike DBGDRAR it is never accessible from EL0.
+ * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
+ * accessor.
+ */
+ { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
+ { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
+ .resetvalue = 0 },
+ /*
+ * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external
+ * Debug Communication Channel is not implemented.
+ */
+ { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ /*
+ * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as
+ * it is unlikely a guest will care.
+ * We don't implement the configurable EL0 access.
+ */
+ { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32,
+ .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .access = PL1_R, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
+ { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .accessfn = access_tdosa,
+ .writefn = oslar_write },
+ { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
+ .access = PL1_R, .resetvalue = 10,
+ .accessfn = access_tdosa,
+ .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
+ /* Dummy OSDLR_EL1: 32-bit Linux will read this */
+ { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
+ .access = PL1_RW, .accessfn = access_tdosa,
+ .type = ARM_CP_NOP },
+ /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
+ * implement vector catch debug events yet.
+ */
+ { .name = "DBGVCR",
+ .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
+ /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
+ * to save and restore a 32-bit guest's DBGVCR)
+ */
+ { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
+ /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
+ * Channel but Linux may try to access this register. The 32-bit
+ * alias is DBGDCCINT.
+ */
+ { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
+ /* 64 bit access versions of the (dummy) debug registers */
+ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+/* Return the exception level to which exceptions should be taken
+ * via SVEAccessTrap. If an exception should be routed through
+ * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
+ * take care of raising that exception.
+ * C.f. the ARM pseudocode function CheckSVEEnabled.
+ */
+int sve_exception_el(CPUARMState *env, int el)
+{
+#ifndef CONFIG_USER_ONLY
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+
+ if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ bool disabled = false;
+
+ /* The CPACR.ZEN controls traps to EL1:
+ * 0, 2 : trap EL0 and EL1 accesses
+ * 1 : trap only EL0 accesses
+ * 3 : trap no accesses
+ */
+ if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
+ disabled = true;
+ } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
+ disabled = el == 0;
+ }
+ if (disabled) {
+ /* route_to_el2 */
+ return hcr_el2 & HCR_TGE ? 2 : 1;
+ }
+
+ /* Check CPACR.FPEN. */
+ if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
+ disabled = true;
+ } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
+ disabled = el == 0;
+ }
+ if (disabled) {
+ return 0;
+ }
+ }
+
+ /* CPTR_EL2. Since TZ and TFP are positive,
+ * they will be zero when EL2 is not present.
+ */
+ if (el <= 2 && arm_is_el2_enabled(env)) {
+ if (env->cp15.cptr_el[2] & CPTR_TZ) {
+ return 2;
+ }
+ if (env->cp15.cptr_el[2] & CPTR_TFP) {
+ return 0;
+ }
+ }
+
+ /* CPTR_EL3. Since EZ is negative we must check for EL3. */
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
+ return 3;
+ }
+#endif
+ return 0;
+}
+
+uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
+{
+ uint32_t end_len;
+
+ start_len = MIN(start_len, ARM_MAX_VQ - 1);
+ end_len = start_len;
+
+ if (!test_bit(start_len, cpu->sve_vq_map)) {
+ end_len = find_last_bit(cpu->sve_vq_map, start_len);
+ assert(end_len < start_len);
+ }
+ return end_len;
+}
+
+/*
+ * Given that SVE is enabled, return the vector length for EL.
+ */
+uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t zcr_len = cpu->sve_max_vq - 1;
+
+ if (el <= 1) {
+ zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
+ }
+ if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
+ zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
+ }
+
+ return aarch64_sve_zcr_get_valid_len(cpu, zcr_len);
+}
+
+static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int cur_el = arm_current_el(env);
+ int old_len = sve_zcr_len_for_el(env, cur_el);
+ int new_len;
+
+ /* Bits other than [3:0] are RAZ/WI. */
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
+ raw_write(env, ri, value & 0xf);
+
+ /*
+ * Because we arrived here, we know both FP and SVE are enabled;
+ * otherwise we would have trapped access to the ZCR_ELn register.
+ */
+ new_len = sve_zcr_len_for_el(env, cur_el);
+ if (new_len < old_len) {
+ aarch64_sve_narrow_vq(env, new_len + 1);
+ }
+}
+
+static const ARMCPRegInfo zcr_el1_reginfo = {
+ .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_SVE,
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
+ .writefn = zcr_write, .raw_writefn = raw_write
+};
+
+static const ARMCPRegInfo zcr_el2_reginfo = {
+ .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_SVE,
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
+ .writefn = zcr_write, .raw_writefn = raw_write
+};
+
+static const ARMCPRegInfo zcr_no_el2_reginfo = {
+ .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_SVE,
+ .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
+};
+
+static const ARMCPRegInfo zcr_el3_reginfo = {
+ .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_SVE,
+ .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
+ .writefn = zcr_write, .raw_writefn = raw_write
+};
+
+void hw_watchpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ vaddr len = 0;
+ vaddr wvr = env->cp15.dbgwvr[n];
+ uint64_t wcr = env->cp15.dbgwcr[n];
+ int mask;
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+
+ if (env->cpu_watchpoint[n]) {
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
+ env->cpu_watchpoint[n] = NULL;
+ }
+
+ if (!extract64(wcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ switch (extract64(wcr, 3, 2)) {
+ case 0:
+ /* LSC 00 is reserved and must behave as if the wp is disabled */
+ return;
+ case 1:
+ flags |= BP_MEM_READ;
+ break;
+ case 2:
+ flags |= BP_MEM_WRITE;
+ break;
+ case 3:
+ flags |= BP_MEM_ACCESS;
+ break;
+ }
+
+ /* Attempts to use both MASK and BAS fields simultaneously are
+ * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
+ * thus generating a watchpoint for every byte in the masked region.
+ */
+ mask = extract64(wcr, 24, 4);
+ if (mask == 1 || mask == 2) {
+ /* Reserved values of MASK; we must act as if the mask value was
+ * some non-reserved value, or as if the watchpoint were disabled.
+ * We choose the latter.
+ */
+ return;
+ } else if (mask) {
+ /* Watchpoint covers an aligned area up to 2GB in size */
+ len = 1ULL << mask;
+ /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
+ * whether the watchpoint fires when the unmasked bits match; we opt
+ * to generate the exceptions.
+ */
+ wvr &= ~(len - 1);
+ } else {
+ /* Watchpoint covers bytes defined by the byte address select bits */
+ int bas = extract64(wcr, 5, 8);
+ int basstart;
+
+ if (extract64(wvr, 2, 1)) {
+ /* Deprecated case of an only 4-aligned address. BAS[7:4] are
+ * ignored, and BAS[3:0] define which bytes to watch.
+ */
+ bas &= 0xf;
+ }
+
+ if (bas == 0) {
+ /* This must act as if the watchpoint is disabled */
+ return;
+ }
+
+ /* The BAS bits are supposed to be programmed to indicate a contiguous
+ * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
+ * we fire for each byte in the word/doubleword addressed by the WVR.
+ * We choose to ignore any non-zero bits after the first range of 1s.
+ */
+ basstart = ctz32(bas);
+ len = cto32(bas >> basstart);
+ wvr += basstart;
+ }
+
+ cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
+ &env->cpu_watchpoint[n]);
+}
+
+void hw_watchpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /* Completely clear out existing QEMU watchpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
+ hw_watchpoint_update(cpu, i);
+ }
+}
+
+static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int i = ri->crm;
+
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
+ * register reads and behaves as if values written are sign extended.
+ * Bits [1:0] are RES0.
+ */
+ value = sextract64(value, 0, 49) & ~3ULL;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+void hw_breakpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bvr = env->cp15.dbgbvr[n];
+ uint64_t bcr = env->cp15.dbgbcr[n];
+ vaddr addr;
+ int bt;
+ int flags = BP_CPU;
+
+ if (env->cpu_breakpoint[n]) {
+ cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
+ env->cpu_breakpoint[n] = NULL;
+ }
+
+ if (!extract64(bcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ switch (bt) {
+ case 4: /* unlinked address mismatch (reserved if AArch64) */
+ case 5: /* linked address mismatch (reserved if AArch64) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: address mismatch breakpoint types not implemented\n");
+ return;
+ case 0: /* unlinked address match */
+ case 1: /* linked address match */
+ {
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is,
+ * we behave as if the register was sign extended. Bits [1:0] are
+ * RES0. The BAS field is used to allow setting breakpoints on 16
+ * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
+ * a bp will fire if the addresses covered by the bp and the addresses
+ * covered by the insn overlap but the insn doesn't start at the
+ * start of the bp address range. We choose to require the insn and
+ * the bp to have the same address. The constraints on writing to
+ * BAS enforced in dbgbcr_write mean we have only four cases:
+ * 0b0000 => no breakpoint
+ * 0b0011 => breakpoint on addr
+ * 0b1100 => breakpoint on addr + 2
+ * 0b1111 => breakpoint on addr
+ * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
+ */
+ int bas = extract64(bcr, 5, 4);
+ addr = sextract64(bvr, 0, 49) & ~3ULL;
+ if (bas == 0) {
+ return;
+ }
+ if (bas == 0xc) {
+ addr += 2;
+ }
+ break;
+ }
+ case 2: /* unlinked context ID match */
+ case 8: /* unlinked VMID match (reserved if no EL2) */
+ case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: unlinked context breakpoint types not implemented\n");
+ return;
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ case 3: /* linked context ID match */
+ default:
+ /* We must generate no events for Linked context matches (unless
+ * they are linked to by some other bp/wp, which is handled in
+ * updates for the linking bp/wp). We choose to also generate no events
+ * for reserved values.
+ */
+ return;
+ }
+
+ cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
+}
+
+void hw_breakpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /* Completely clear out existing QEMU breakpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
+ hw_breakpoint_update(cpu, i);
+ }
+}
+
+static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_breakpoint_update(cpu, i);
+}
+
+static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int i = ri->crm;
+
+ /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
+ * copy of BAS[0].
+ */
+ value = deposit64(value, 6, 1, extract64(value, 5, 1));
+ value = deposit64(value, 8, 1, extract64(value, 7, 1));
+
+ raw_write(env, ri, value);
+ hw_breakpoint_update(cpu, i);
+}
+
+static void define_debug_regs(ARMCPU *cpu)
+{
+ /* Define v7 and v8 architectural debug registers.
+ * These are just dummy implementations for now.
+ */
+ int i;
+ int wrps, brps, ctx_cmps;
+
+ /*
+ * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
+ * use AArch32. Given that bit 15 is RES1, if the value is 0 then
+ * the register must not exist for this cpu.
+ */
+ if (cpu->isar.dbgdidr != 0) {
+ ARMCPRegInfo dbgdidr = {
+ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0,
+ .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
+ };
+ define_one_arm_cp_reg(cpu, &dbgdidr);
+ }
+
+ /* Note that all these register fields hold "number of Xs minus 1". */
+ brps = arm_num_brps(cpu);
+ wrps = arm_num_wrps(cpu);
+ ctx_cmps = arm_num_ctx_cmps(cpu);
+
+ assert(ctx_cmps <= brps);
+
+ define_arm_cp_regs(cpu, debug_cp_reginfo);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
+ define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
+ }
+
+ for (i = 0; i < brps; i++) {
+ ARMCPRegInfo dbgregs[] = {
+ { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
+ .writefn = dbgbvr_write, .raw_writefn = raw_write
+ },
+ { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
+ .writefn = dbgbcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, dbgregs);
+ }
+
+ for (i = 0; i < wrps; i++) {
+ ARMCPRegInfo dbgregs[] = {
+ { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
+ .writefn = dbgwvr_write, .raw_writefn = raw_write
+ },
+ { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
+ .writefn = dbgwcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, dbgregs);
+ }
+}
+
+static void define_pmu_regs(ARMCPU *cpu)
+{
+ /*
+ * v7 performance monitor control register: same implementor
+ * field as main ID register, and we implement four counters in
+ * addition to the cycle count register.
+ */
+ unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
+ ARMCPRegInfo pmcr = {
+ .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
+ .accessfn = pmreg_access, .writefn = pmcr_write,
+ .raw_writefn = raw_write,
+ };
+ ARMCPRegInfo pmcr64 = {
+ .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+ .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
+ PMCRLC,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
+ define_one_arm_cp_reg(cpu, &pmcr);
+ define_one_arm_cp_reg(cpu, &pmcr64);
+ for (i = 0; i < pmcrn; i++) {
+ char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
+ char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
+ char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
+ char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
+ ARMCPRegInfo pmev_regs[] = {
+ { .name = pmevcntr_name, .cp = 15, .crn = 14,
+ .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+ .accessfn = pmreg_access },
+ { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+ .raw_readfn = pmevcntr_rawread,
+ .raw_writefn = pmevcntr_rawwrite },
+ { .name = pmevtyper_name, .cp = 15, .crn = 14,
+ .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+ .accessfn = pmreg_access },
+ { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+ .raw_writefn = pmevtyper_rawwrite },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, pmev_regs);
+ g_free(pmevcntr_name);
+ g_free(pmevcntr_el0_name);
+ g_free(pmevtyper_name);
+ g_free(pmevtyper_el0_name);
+ }
+ if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
+ ARMCPRegInfo v81_pmu_regs[] = {
+ { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = extract64(cpu->pmceid0, 32, 32) },
+ { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = extract64(cpu->pmceid1, 32, 32) },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, v81_pmu_regs);
+ }
+ if (cpu_isar_feature(any_pmu_8_4, cpu)) {
+ static const ARMCPRegInfo v84_pmmir = {
+ .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
+ .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = 0
+ };
+ define_one_arm_cp_reg(cpu, &v84_pmmir);
+ }
+}
+
+/* We don't know until after realize whether there's a GICv3
+ * attached, and that is what registers the gicv3 sysregs.
+ * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
+ * at runtime.
+ */
+static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t pfr1 = cpu->isar.id_pfr1;
+
+ if (env->gicv3state) {
+ pfr1 |= 1 << 28;
+ }
+ return pfr1;
+}
+
+#ifndef CONFIG_USER_ONLY
+static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t pfr0 = cpu->isar.id_aa64pfr0;
+
+ if (env->gicv3state) {
+ pfr0 |= 1 << 24;
+ }
+ return pfr0;
+}
+#endif
+
+/* Shared logic between LORID and the rest of the LOR* registers.
+ * Secure state exclusion has already been dealt with.
+ */
+static CPAccessResult access_lor_ns(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_lor_other(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access denied in secure mode. */
+ return CP_ACCESS_TRAP;
+ }
+ return access_lor_ns(env, ri, isread);
+}
+
+/*
+ * A trivial implementation of ARMv8.1-LOR leaves all of these
+ * registers fixed at 0, which indicates that there are zero
+ * supported Limited Ordering regions.
+ */
+static const ARMCPRegInfo lor_reginfo[] = {
+ { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .accessfn = access_lor_ns,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+#ifdef TARGET_AARCH64
+static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 &&
+ arm_feature(env, ARM_FEATURE_EL2) &&
+ !(arm_hcr_el2_eff(env) & HCR_APK)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 &&
+ arm_feature(env, ARM_FEATURE_EL3) &&
+ !(env->cp15.scr_el3 & SCR_APK)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo pauth_reginfo[] = {
+ { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
+ { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
+ { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
+ { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
+ { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
+ { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
+ { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
+ { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
+ { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
+ { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_pauth,
+ .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo tlbirange_reginfo[] = {
+ { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1is_write },
+ { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae1_write },
+ { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2is_write },
+ { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae2_write },
+ { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3is_write },
+ { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_rvae3_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo tlbios_reginfo[] = {
+ { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NOP },
+ { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ REGINFO_SENTINEL
+};
+
+static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ Error *err = NULL;
+ uint64_t ret;
+
+ /* Success sets NZCV = 0000. */
+ env->NF = env->CF = env->VF = 0, env->ZF = 1;
+
+ if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
+ /*
+ * ??? Failed, for unknown reasons in the crypto subsystem.
+ * The best we can do is log the reason and return the
+ * timed-out indication to the guest. There is no reason
+ * we know to expect this failure to be transitory, so the
+ * guest may well hang retrying the operation.
+ */
+ qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
+ ri->name, error_get_pretty(err));
+ error_free(err);
+
+ env->ZF = 0; /* NZCF = 0100 */
+ return 0;
+ }
+ return ret;
+}
+
+/* We do not support re-seeding, so the two registers operate the same. */
+static const ARMCPRegInfo rndr_reginfo[] = {
+ { .name = "RNDR", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
+ .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
+ .access = PL0_R, .readfn = rndr_readfn },
+ { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
+ .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
+ .access = PL0_R, .readfn = rndr_readfn },
+ REGINFO_SENTINEL
+};
+
+#ifndef CONFIG_USER_ONLY
+static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ /* CTR_EL0 System register -> DminLine, bits [19:16] */
+ uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
+ uint64_t vaddr_in = (uint64_t) value;
+ uint64_t vaddr = vaddr_in & ~(dline_size - 1);
+ void *haddr;
+ int mem_idx = cpu_mmu_index(env, false);
+
+ /* This won't be crossing page boundaries */
+ haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
+ if (haddr) {
+
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ /* RCU lock is already being held */
+ mr = memory_region_from_host(haddr, &offset);
+
+ if (mr) {
+ memory_region_writeback(mr, offset, dline_size);
+ }
+ }
+}
+
+static const ARMCPRegInfo dcpop_reg[] = {
+ { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
+ .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo dcpodp_reg[] = {
+ { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
+ .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
+ REGINFO_SENTINEL
+};
+#endif /*CONFIG_USER_ONLY*/
+
+static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ if (el < 3 &&
+ arm_feature(env, ARM_FEATURE_EL3) &&
+ !(env->cp15.scr_el3 & SCR_ATA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_TCO;
+}
+
+static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
+{
+ env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
+}
+
+static const ARMCPRegInfo mte_reginfo[] = {
+ { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
+ { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
+ { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
+ { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
+ { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
+ { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
+ .access = PL1_RW, .accessfn = access_mte,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
+ { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
+ .access = PL1_R, .accessfn = access_aa64_tid5,
+ .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
+ { .name = "TCO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
+ .type = ARM_CP_NO_RAW,
+ .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
+ { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL1_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL1_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
+ { .name = "TCO", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
+ .type = ARM_CP_CONST, .access = PL0_RW, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
+ { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W,
+ .accessfn = aa64_cacheop_poc_access },
+ { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
+ .access = PL0_W, .type = ARM_CP_DC_GVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
+ { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
+ .access = PL0_W, .type = ARM_CP_DC_GZVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
+ REGINFO_SENTINEL
+};
+
+#endif
+
+static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el == 0) {
+ uint64_t sctlr = arm_sctlr(env, el);
+ if (!(sctlr & SCTLR_EnRCTX)) {
+ return CP_ACCESS_TRAP;
+ }
+ } else if (el == 1) {
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ if (hcr & HCR_NV) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo predinv_reginfo[] = {
+ { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
+ { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
+ { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
+ /*
+ * Note the AArch32 opcodes have a different OPC1.
+ */
+ { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
+ { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
+ { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
+ .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
+ REGINFO_SENTINEL
+};
+
+static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Read the high 32 bits of the current CCSIDR */
+ return extract64(ccsidr_read(env, ri), 32, 32);
+}
+
+static const ARMCPRegInfo ccsidr2_reginfo[] = {
+ { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
+ .access = PL1_R,
+ .accessfn = access_aa64_tid2,
+ .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ return access_aa64_tid3(env, ri, isread);
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_joscr_jmcr(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ /*
+ * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
+ * in v7A, not in v8A.
+ */
+ if (!arm_feature(env, ARM_FEATURE_V8) &&
+ arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
+ (env->cp15.hstr_el2 & HSTR_TJDBX)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo jazelle_regs[] = {
+ { .name = "JIDR",
+ .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
+ .access = PL1_R, .accessfn = access_jazelle,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "JOSCR",
+ .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
+ .accessfn = access_joscr_jmcr,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "JMCR",
+ .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
+ .accessfn = access_joscr_jmcr,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo vhe_reginfo[] = {
+ { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
+ { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
+#ifndef CONFIG_USER_ONLY
+ { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
+ .fieldoffset =
+ offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
+ .resetfn = gt_hv_timer_reset,
+ .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
+ { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
+ .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
+ { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = e2h_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
+ .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
+ { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = e2h_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
+ .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
+ { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = e2h_access,
+ .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
+ { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = e2h_access,
+ .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
+ { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
+ .access = PL2_RW, .accessfn = e2h_access,
+ .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
+ .access = PL2_RW, .accessfn = e2h_access,
+ .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
+#endif
+ REGINFO_SENTINEL
+};
+
+#ifndef CONFIG_USER_ONLY
+static const ARMCPRegInfo ats1e1_reginfo[] = {
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo ats1cp_reginfo[] = {
+ { .name = "ATS1CPRP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+ { .name = "ATS1CPWP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+ REGINFO_SENTINEL
+};
+#endif
+
+/*
+ * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
+ * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
+ * is non-zero, which is never for ARMv7, optionally in ARMv8
+ * and mandatorily for ARMv8.2 and up.
+ * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
+ * implementation is RAZ/WI we can ignore this detail, as we
+ * do for ACTLR.
+ */
+static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
+ { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_tacr,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+void register_cp_regs_for_features(ARMCPU *cpu)
+{
+ /* Register all the coprocessor registers based on feature bits */
+ CPUARMState *env = &cpu->env;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* M profile has no coprocessor registers */
+ return;
+ }
+
+ define_arm_cp_regs(cpu, cp_reginfo);
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /* Must go early as it is full of wildcards that may be
+ * overridden by later definitions.
+ */
+ define_arm_cp_regs(cpu, not_v8_cp_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ /* The ID registers all have impdef reset values */
+ ARMCPRegInfo v6_idregs[] = {
+ { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_pfr0 },
+ /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
+ * the value of the GIC field until after we define these regs.
+ */
+ { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_NO_RAW,
+ .accessfn = access_aa32_tid3,
+ .readfn = id_pfr1_read,
+ .writefn = arm_cp_write_ignore },
+ { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_dfr0 },
+ { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->id_afr0 },
+ { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_mmfr0 },
+ { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_mmfr1 },
+ { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_mmfr2 },
+ { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_mmfr3 },
+ { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar0 },
+ { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar1 },
+ { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar2 },
+ { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar3 },
+ { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar4 },
+ { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar5 },
+ { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_mmfr4 },
+ { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa32_tid3,
+ .resetvalue = cpu->isar.id_isar6 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, v6_idregs);
+ define_arm_cp_regs(cpu, v6_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, not_v6_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6K)) {
+ define_arm_cp_regs(cpu, v6k_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7MP) &&
+ !arm_feature(env, ARM_FEATURE_PMSA)) {
+ define_arm_cp_regs(cpu, v7mp_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7VE)) {
+ define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ ARMCPRegInfo clidr = {
+ .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid2,
+ .resetvalue = cpu->clidr
+ };
+ define_one_arm_cp_reg(cpu, &clidr);
+ define_arm_cp_regs(cpu, v7_cp_reginfo);
+ define_debug_regs(cpu);
+ define_pmu_regs(cpu);
+ } else {
+ define_arm_cp_regs(cpu, not_v7_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* AArch64 ID registers, which all have impdef reset values.
+ * Note that within the ID register ranges the unused slots
+ * must all RAZ, not UNDEF; future architecture versions may
+ * define new registers here.
+ */
+ ARMCPRegInfo v8_idregs[] = {
+ /*
+ * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
+ * emulation because we don't know the right value for the
+ * GIC field until after we define these regs.
+ */
+ { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
+ .access = PL1_R,
+#ifdef CONFIG_USER_ONLY
+ .type = ARM_CP_CONST,
+ .resetvalue = cpu->isar.id_aa64pfr0
+#else
+ .type = ARM_CP_NO_RAW,
+ .accessfn = access_aa64_tid3,
+ .readfn = id_aa64pfr0_read,
+ .writefn = arm_cp_write_ignore
+#endif
+ },
+ { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64pfr1},
+ { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64zfr0 },
+ { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64dfr0 },
+ { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64dfr1 },
+ { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->id_aa64afr0 },
+ { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->id_aa64afr1 },
+ { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64isar0 },
+ { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64isar1 },
+ { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64mmfr0 },
+ { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64mmfr1 },
+ { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_aa64mmfr2 },
+ { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.mvfr0 },
+ { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.mvfr1 },
+ { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.mvfr2 },
+ { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = cpu->isar.id_pfr2 },
+ { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .accessfn = access_aa64_tid3,
+ .resetvalue = 0 },
+ { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = extract64(cpu->pmceid0, 0, 32) },
+ { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = extract64(cpu->pmceid1, 0, 32) },
+ { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid1 },
+ REGINFO_SENTINEL
+ };
+#ifdef CONFIG_USER_ONLY
+ ARMCPRegUserSpaceInfo v8_user_idregs[] = {
+ { .name = "ID_AA64PFR0_EL1",
+ .exported_bits = 0x000f000f00ff0000,
+ .fixed_bits = 0x0000000000000011 },
+ { .name = "ID_AA64PFR1_EL1",
+ .exported_bits = 0x00000000000000f0 },
+ { .name = "ID_AA64PFR*_EL1_RESERVED",
+ .is_glob = true },
+ { .name = "ID_AA64ZFR0_EL1" },
+ { .name = "ID_AA64MMFR0_EL1",
+ .fixed_bits = 0x00000000ff000000 },
+ { .name = "ID_AA64MMFR1_EL1" },
+ { .name = "ID_AA64MMFR*_EL1_RESERVED",
+ .is_glob = true },
+ { .name = "ID_AA64DFR0_EL1",
+ .fixed_bits = 0x0000000000000006 },
+ { .name = "ID_AA64DFR1_EL1" },
+ { .name = "ID_AA64DFR*_EL1_RESERVED",
+ .is_glob = true },
+ { .name = "ID_AA64AFR*",
+ .is_glob = true },
+ { .name = "ID_AA64ISAR0_EL1",
+ .exported_bits = 0x00fffffff0fffff0 },
+ { .name = "ID_AA64ISAR1_EL1",
+ .exported_bits = 0x000000f0ffffffff },
+ { .name = "ID_AA64ISAR*_EL1_RESERVED",
+ .is_glob = true },
+ REGUSERINFO_SENTINEL
+ };
+ modify_arm_cp_regs(v8_idregs, v8_user_idregs);
+#endif
+ /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
+ define_arm_cp_regs(cpu, v8_idregs);
+ define_arm_cp_regs(cpu, v8_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ uint64_t vmpidr_def = mpidr_read_val(env);
+ ARMCPRegInfo vpidr_regs[] = {
+ { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
+ { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW,
+ .resetvalue = vmpidr_def,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vpidr_regs);
+ define_arm_cp_regs(cpu, el2_cp_reginfo);
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
+ }
+ if (cpu_isar_feature(aa64_sel2, cpu)) {
+ define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
+ }
+ /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
+ } else {
+ /* If EL2 is missing but higher ELs are enabled, we need to
+ * register the no_el2 reginfos.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
+ * of MIDR_EL1 and MPIDR_EL1.
+ */
+ ARMCPRegInfo vpidr_regs[] = {
+ { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_CONST, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_NO_RAW,
+ .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vpidr_regs);
+ define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
+ }
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ define_arm_cp_regs(cpu, el3_cp_reginfo);
+ ARMCPRegInfo el3_regs[] = {
+ { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
+ { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL3_RW,
+ .raw_writefn = raw_write, .writefn = sctlr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
+ .resetvalue = cpu->reset_sctlr },
+ REGINFO_SENTINEL
+ };
+
+ define_arm_cp_regs(cpu, el3_regs);
+ }
+ /* The behaviour of NSACR is sufficiently various that we don't
+ * try to describe it in a single reginfo:
+ * if EL3 is 64 bit, then trap to EL3 from S EL1,
+ * reads as constant 0xc00 from NS EL1 and NS EL2
+ * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
+ * if v7 without EL3, register doesn't exist
+ * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR", .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .accessfn = nsacr_access,
+ .resetvalue = 0xc00
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ } else {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR",
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL3_RW | PL1_R,
+ .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ }
+ } else {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR", .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL1_R,
+ .resetvalue = 0xc00
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ /* PMSAv6 not implemented */
+ assert(arm_feature(env, ARM_FEATURE_V7));
+ define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
+ define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
+ }
+ } else {
+ define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
+ define_arm_cp_regs(cpu, vmsa_cp_reginfo);
+ /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
+ if (cpu_isar_feature(aa32_hpd, cpu)) {
+ define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
+ define_arm_cp_regs(cpu, t2ee_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
+ define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_VAPA)) {
+ define_arm_cp_regs(cpu, vapa_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
+ define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
+ define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
+ define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
+ define_arm_cp_regs(cpu, omap_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
+ define_arm_cp_regs(cpu, strongarm_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ define_arm_cp_regs(cpu, xscale_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
+ define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ define_arm_cp_regs(cpu, lpae_cp_reginfo);
+ }
+ if (cpu_isar_feature(aa32_jazelle, cpu)) {
+ define_arm_cp_regs(cpu, jazelle_regs);
+ }
+ /* Slightly awkwardly, the OMAP and StrongARM cores need all of
+ * cp15 crn=0 to be writes-ignored, whereas for other cores they should
+ * be read-only (ie write causes UNDEF exception).
+ */
+ {
+ ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
+ /* Pre-v8 MIDR space.
+ * Note that the MIDR isn't a simple constant register because
+ * of the TI925 behaviour where writes to another register can
+ * cause the MIDR value to change.
+ *
+ * Unimplemented registers in the c15 0 0 0 space default to
+ * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
+ * and friends override accordingly.
+ */
+ { .name = "MIDR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .resetvalue = cpu->midr,
+ .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
+ .readfn = midr_read,
+ .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
+ .type = ARM_CP_OVERRIDE },
+ /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
+ { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
+ .readfn = midr_read },
+ /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
+ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_R, .resetvalue = cpu->midr },
+ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
+ .access = PL1_R, .resetvalue = cpu->midr },
+ { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
+ .access = PL1_R,
+ .accessfn = access_aa64_tid1,
+ .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
+ REGINFO_SENTINEL
+ };
+ ARMCPRegInfo id_cp_reginfo[] = {
+ /* These are common to v8 and pre-v8 */
+ { .name = "CTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_R, .accessfn = ctr_el0_access,
+ .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
+ .access = PL0_R, .accessfn = ctr_el0_access,
+ .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
+ { .name = "TCMTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_R,
+ .accessfn = access_aa32_tid1,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ /* TLBTR is specific to VMSA */
+ ARMCPRegInfo id_tlbtr_reginfo = {
+ .name = "TLBTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL1_R,
+ .accessfn = access_aa32_tid1,
+ .type = ARM_CP_CONST, .resetvalue = 0,
+ };
+ /* MPUIR is specific to PMSA V6+ */
+ ARMCPRegInfo id_mpuir_reginfo = {
+ .name = "MPUIR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmsav7_dregion << 8
+ };
+ ARMCPRegInfo crn0_wi_reginfo = {
+ .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
+ .type = ARM_CP_NOP | ARM_CP_OVERRIDE
+ };
+#ifdef CONFIG_USER_ONLY
+ ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
+ { .name = "MIDR_EL1",
+ .exported_bits = 0x00000000ffffffff },
+ { .name = "REVIDR_EL1" },
+ REGUSERINFO_SENTINEL
+ };
+ modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
+#endif
+ if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
+ arm_feature(env, ARM_FEATURE_STRONGARM)) {
+ ARMCPRegInfo *r;
+ /* Register the blanket "writes ignored" value first to cover the
+ * whole space. Then update the specific ID registers to allow write
+ * access, so that they ignore writes rather than causing them to
+ * UNDEF.
+ */
+ define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
+ for (r = id_pre_v8_midr_cp_reginfo;
+ r->type != ARM_CP_SENTINEL; r++) {
+ r->access = PL1_RW;
+ }
+ for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
+ r->access = PL1_RW;
+ }
+ id_mpuir_reginfo.access = PL1_RW;
+ id_tlbtr_reginfo.access = PL1_RW;
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
+ }
+ define_arm_cp_regs(cpu, id_cp_reginfo);
+ if (!arm_feature(env, ARM_FEATURE_PMSA)) {
+ define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPIDR)) {
+ ARMCPRegInfo mpidr_cp_reginfo[] = {
+ { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
+ .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
+ REGINFO_SENTINEL
+ };
+#ifdef CONFIG_USER_ONLY
+ ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
+ { .name = "MPIDR_EL1",
+ .fixed_bits = 0x0000000080000000 },
+ REGUSERINFO_SENTINEL
+ };
+ modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
+#endif
+ define_arm_cp_regs(cpu, mpidr_cp_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AUXCR)) {
+ ARMCPRegInfo auxcr_reginfo[] = {
+ { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tacr,
+ .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
+ { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, auxcr_reginfo);
+ if (cpu_isar_feature(aa32_ac2, cpu)) {
+ define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_CBAR)) {
+ /*
+ * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
+ * There are two flavours:
+ * (1) older 32-bit only cores have a simple 32-bit CBAR
+ * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
+ * 32-bit register visible to AArch32 at a different encoding
+ * to the "flavour 1" register and with the bits rearranged to
+ * be able to squash a 64-bit address into the 32-bit view.
+ * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
+ * in future if we support AArch32-only configs of some of the
+ * AArch64 cores we might need to add a specific feature flag
+ * to indicate cores with "flavour 2" CBAR.
+ */
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 32 bit view is [31:18] 0...0 [43:32]. */
+ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
+ | extract64(cpu->reset_cbar, 32, 12);
+ ARMCPRegInfo cbar_reginfo[] = {
+ { .name = "CBAR",
+ .type = ARM_CP_CONST,
+ .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cbar32 },
+ { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_CONST,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cpu->reset_cbar },
+ REGINFO_SENTINEL
+ };
+ /* We don't implement a r/w 64 bit CBAR currently */
+ assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
+ define_arm_cp_regs(cpu, cbar_reginfo);
+ } else {
+ ARMCPRegInfo cbar = {
+ .name = "CBAR",
+ .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
+ .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
+ .fieldoffset = offsetof(CPUARMState,
+ cp15.c15_config_base_address)
+ };
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ cbar.access = PL1_R;
+ cbar.fieldoffset = 0;
+ cbar.type = ARM_CP_CONST;
+ }
+ define_one_arm_cp_reg(cpu, &cbar);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_VBAR)) {
+ ARMCPRegInfo vbar_cp_reginfo[] = {
+ { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .writefn = vbar_write,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
+ offsetof(CPUARMState, cp15.vbar_ns) },
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vbar_cp_reginfo);
+ }
+
+ /* Generic registers whose values depend on the implementation */
+ {
+ ARMCPRegInfo sctlr = {
+ .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tvm_trvm,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
+ offsetof(CPUARMState, cp15.sctlr_ns) },
+ .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
+ .raw_writefn = raw_write,
+ };
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ /* Normally we would always end the TB on an SCTLR write, but Linux
+ * arch/arm/mach-pxa/sleep.S expects two instructions following
+ * an MMU enable to execute from cache. Imitate this behaviour.
+ */
+ sctlr.type |= ARM_CP_SUPPRESS_TB_END;
+ }
+ define_one_arm_cp_reg(cpu, &sctlr);
+ }
+
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ define_arm_cp_regs(cpu, lor_reginfo);
+ }
+ if (cpu_isar_feature(aa64_pan, cpu)) {
+ define_one_arm_cp_reg(cpu, &pan_reginfo);
+ }
+#ifndef CONFIG_USER_ONLY
+ if (cpu_isar_feature(aa64_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1e1_reginfo);
+ }
+ if (cpu_isar_feature(aa32_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1cp_reginfo);
+ }
+#endif
+ if (cpu_isar_feature(aa64_uao, cpu)) {
+ define_one_arm_cp_reg(cpu, &uao_reginfo);
+ }
+
+ if (cpu_isar_feature(aa64_dit, cpu)) {
+ define_one_arm_cp_reg(cpu, &dit_reginfo);
+ }
+ if (cpu_isar_feature(aa64_ssbs, cpu)) {
+ define_one_arm_cp_reg(cpu, &ssbs_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
+ define_arm_cp_regs(cpu, vhe_reginfo);
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
+ } else {
+ define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
+ }
+ }
+
+#ifdef TARGET_AARCH64
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ define_arm_cp_regs(cpu, pauth_reginfo);
+ }
+ if (cpu_isar_feature(aa64_rndr, cpu)) {
+ define_arm_cp_regs(cpu, rndr_reginfo);
+ }
+ if (cpu_isar_feature(aa64_tlbirange, cpu)) {
+ define_arm_cp_regs(cpu, tlbirange_reginfo);
+ }
+ if (cpu_isar_feature(aa64_tlbios, cpu)) {
+ define_arm_cp_regs(cpu, tlbios_reginfo);
+ }
+#ifndef CONFIG_USER_ONLY
+ /* Data Cache clean instructions up to PoP */
+ if (cpu_isar_feature(aa64_dcpop, cpu)) {
+ define_one_arm_cp_reg(cpu, dcpop_reg);
+
+ if (cpu_isar_feature(aa64_dcpodp, cpu)) {
+ define_one_arm_cp_reg(cpu, dcpodp_reg);
+ }
+ }
+#endif /*CONFIG_USER_ONLY*/
+
+ /*
+ * If full MTE is enabled, add all of the system registers.
+ * If only "instructions available at EL0" are enabled,
+ * then define only a RAZ/WI version of PSTATE.TCO.
+ */
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ define_arm_cp_regs(cpu, mte_reginfo);
+ define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
+ } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
+ define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
+ define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
+ }
+#endif
+
+ if (cpu_isar_feature(any_predinv, cpu)) {
+ define_arm_cp_regs(cpu, predinv_reginfo);
+ }
+
+ if (cpu_isar_feature(any_ccidx, cpu)) {
+ define_arm_cp_regs(cpu, ccsidr2_reginfo);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * Register redirections and aliases must be done last,
+ * after the registers from the other extensions have been defined.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
+ define_arm_vh_e2h_redirects_aliases(cpu);
+ }
+#endif
+}
+
+/* Sort alphabetically by type name, except for "any". */
+static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
+ return -1;
+ } else {
+ return strcmp(name_a, name_b);
+ }
+}
+
+static void arm_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(oc);
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
+ qemu_printf(" %s\n", name);
+ g_free(name);
+}
+
+void arm_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ARM_CPU, false);
+ list = g_slist_sort(list, arm_cpu_list_compare);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, arm_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+static void arm_cpu_add_definition(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfo *info;
+ const char *typename;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_ARM_CPU));
+ info->q_typename = g_strdup(typename);
+
+ QAPI_LIST_PREPEND(*cpu_list, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ARM_CPU, false);
+ g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
+ g_slist_free(list);
+
+ return cpu_list;
+}
+
+static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
+ void *opaque, int state, int secstate,
+ int crm, int opc1, int opc2,
+ const char *name)
+{
+ /* Private utility function for define_one_arm_cp_reg_with_opaque():
+ * add a single reginfo struct to the hash table.
+ */
+ uint32_t *key = g_new(uint32_t, 1);
+ ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
+ int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
+ int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
+
+ r2->name = g_strdup(name);
+ /* Reset the secure state to the specific incoming state. This is
+ * necessary as the register may have been defined with both states.
+ */
+ r2->secure = secstate;
+
+ if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
+ /* Register is banked (using both entries in array).
+ * Overwriting fieldoffset as the array is only used to define
+ * banked registers but later only fieldoffset is used.
+ */
+ r2->fieldoffset = r->bank_fieldoffsets[ns];
+ }
+
+ if (state == ARM_CP_STATE_AA32) {
+ if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
+ /* If the register is banked then we don't need to migrate or
+ * reset the 32-bit instance in certain cases:
+ *
+ * 1) If the register has both 32-bit and 64-bit instances then we
+ * can count on the 64-bit instance taking care of the
+ * non-secure bank.
+ * 2) If ARMv8 is enabled then we can count on a 64-bit version
+ * taking care of the secure bank. This requires that separate
+ * 32 and 64-bit definitions are provided.
+ */
+ if ((r->state == ARM_CP_STATE_BOTH && ns) ||
+ (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
+ r2->type |= ARM_CP_ALIAS;
+ }
+ } else if ((secstate != r->secure) && !ns) {
+ /* The register is not banked so we only want to allow migration of
+ * the non-secure instance.
+ */
+ r2->type |= ARM_CP_ALIAS;
+ }
+
+ if (r->state == ARM_CP_STATE_BOTH) {
+ /* We assume it is a cp15 register if the .cp field is left unset.
+ */
+ if (r2->cp == 0) {
+ r2->cp = 15;
+ }
+
+#ifdef HOST_WORDS_BIGENDIAN
+ if (r2->fieldoffset) {
+ r2->fieldoffset += sizeof(uint32_t);
+ }
+#endif
+ }
+ }
+ if (state == ARM_CP_STATE_AA64) {
+ /* To allow abbreviation of ARMCPRegInfo
+ * definitions, we treat cp == 0 as equivalent to
+ * the value for "standard guest-visible sysreg".
+ * STATE_BOTH definitions are also always "standard
+ * sysreg" in their AArch64 view (the .cp value may
+ * be non-zero for the benefit of the AArch32 view).
+ */
+ if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
+ r2->cp = CP_REG_ARM64_SYSREG_CP;
+ }
+ *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
+ r2->opc0, opc1, opc2);
+ } else {
+ *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
+ }
+ if (opaque) {
+ r2->opaque = opaque;
+ }
+ /* reginfo passed to helpers is correct for the actual access,
+ * and is never ARM_CP_STATE_BOTH:
+ */
+ r2->state = state;
+ /* Make sure reginfo passed to helpers for wildcarded regs
+ * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
+ */
+ r2->crm = crm;
+ r2->opc1 = opc1;
+ r2->opc2 = opc2;
+ /* By convention, for wildcarded registers only the first
+ * entry is used for migration; the others are marked as
+ * ALIAS so we don't try to transfer the register
+ * multiple times. Special registers (ie NOP/WFI) are
+ * never migratable and not even raw-accessible.
+ */
+ if ((r->type & ARM_CP_SPECIAL)) {
+ r2->type |= ARM_CP_NO_RAW;
+ }
+ if (((r->crm == CP_ANY) && crm != 0) ||
+ ((r->opc1 == CP_ANY) && opc1 != 0) ||
+ ((r->opc2 == CP_ANY) && opc2 != 0)) {
+ r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
+ }
+
+ /* Check that raw accesses are either forbidden or handled. Note that
+ * we can't assert this earlier because the setup of fieldoffset for
+ * banked registers has to be done first.
+ */
+ if (!(r2->type & ARM_CP_NO_RAW)) {
+ assert(!raw_accessors_invalid(r2));
+ }
+
+ /* Overriding of an existing definition must be explicitly
+ * requested.
+ */
+ if (!(r->type & ARM_CP_OVERRIDE)) {
+ ARMCPRegInfo *oldreg;
+ oldreg = g_hash_table_lookup(cpu->cp_regs, key);
+ if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
+ fprintf(stderr, "Register redefined: cp=%d %d bit "
+ "crn=%d crm=%d opc1=%d opc2=%d, "
+ "was %s, now %s\n", r2->cp, 32 + 32 * is64,
+ r2->crn, r2->crm, r2->opc1, r2->opc2,
+ oldreg->name, r2->name);
+ g_assert_not_reached();
+ }
+ }
+ g_hash_table_insert(cpu->cp_regs, key, r2);
+}
+
+
+void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *r, void *opaque)
+{
+ /* Define implementations of coprocessor registers.
+ * We store these in a hashtable because typically
+ * there are less than 150 registers in a space which
+ * is 16*16*16*8*8 = 262144 in size.
+ * Wildcarding is supported for the crm, opc1 and opc2 fields.
+ * If a register is defined twice then the second definition is
+ * used, so this can be used to define some generic registers and
+ * then override them with implementation specific variations.
+ * At least one of the original and the second definition should
+ * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
+ * against accidental use.
+ *
+ * The state field defines whether the register is to be
+ * visible in the AArch32 or AArch64 execution state. If the
+ * state is set to ARM_CP_STATE_BOTH then we synthesise a
+ * reginfo structure for the AArch32 view, which sees the lower
+ * 32 bits of the 64 bit register.
+ *
+ * Only registers visible in AArch64 may set r->opc0; opc0 cannot
+ * be wildcarded. AArch64 registers are always considered to be 64
+ * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
+ * the register, if any.
+ */
+ int crm, opc1, opc2, state;
+ int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
+ int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
+ int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
+ int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
+ int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
+ int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
+ /* 64 bit registers have only CRm and Opc1 fields */
+ assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
+ /* op0 only exists in the AArch64 encodings */
+ assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
+ /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
+ assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
+ /*
+ * This API is only for Arm's system coprocessors (14 and 15) or
+ * (M-profile or v7A-and-earlier only) for implementation defined
+ * coprocessors in the range 0..7. Our decode assumes this, since
+ * 8..13 can be used for other insns including VFP and Neon. See
+ * valid_cp() in translate.c. Assert here that we haven't tried
+ * to use an invalid coprocessor number.
+ */
+ switch (r->state) {
+ case ARM_CP_STATE_BOTH:
+ /* 0 has a special meaning, but otherwise the same rules as AA32. */
+ if (r->cp == 0) {
+ break;
+ }
+ /* fall through */
+ case ARM_CP_STATE_AA32:
+ if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
+ !arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ assert(r->cp >= 14 && r->cp <= 15);
+ } else {
+ assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
+ }
+ break;
+ case ARM_CP_STATE_AA64:
+ assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
+ * encodes a minimum access level for the register. We roll this
+ * runtime check into our general permission check code, so check
+ * here that the reginfo's specified permissions are strict enough
+ * to encompass the generic architectural permission check.
+ */
+ if (r->state != ARM_CP_STATE_AA32) {
+ int mask = 0;
+ switch (r->opc1) {
+ case 0:
+ /* min_EL EL1, but some accessible to EL0 via kernel ABI */
+ mask = PL0U_R | PL1_RW;
+ break;
+ case 1: case 2:
+ /* min_EL EL1 */
+ mask = PL1_RW;
+ break;
+ case 3:
+ /* min_EL EL0 */
+ mask = PL0_RW;
+ break;
+ case 4:
+ case 5:
+ /* min_EL EL2 */
+ mask = PL2_RW;
+ break;
+ case 6:
+ /* min_EL EL3 */
+ mask = PL3_RW;
+ break;
+ case 7:
+ /* min_EL EL1, secure mode only (we don't check the latter) */
+ mask = PL1_RW;
+ break;
+ default:
+ /* broken reginfo with out-of-range opc1 */
+ assert(false);
+ break;
+ }
+ /* assert our permissions are not too lax (stricter is fine) */
+ assert((r->access & ~mask) == 0);
+ }
+
+ /* Check that the register definition has enough info to handle
+ * reads and writes if they are permitted.
+ */
+ if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
+ if (r->access & PL3_R) {
+ assert((r->fieldoffset ||
+ (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
+ r->readfn);
+ }
+ if (r->access & PL3_W) {
+ assert((r->fieldoffset ||
+ (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
+ r->writefn);
+ }
+ }
+ /* Bad type field probably means missing sentinel at end of reg list */
+ assert(cptype_valid(r->type));
+ for (crm = crmmin; crm <= crmmax; crm++) {
+ for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
+ for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
+ for (state = ARM_CP_STATE_AA32;
+ state <= ARM_CP_STATE_AA64; state++) {
+ if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
+ continue;
+ }
+ if (state == ARM_CP_STATE_AA32) {
+ /* Under AArch32 CP registers can be common
+ * (same for secure and non-secure world) or banked.
+ */
+ char *name;
+
+ switch (r->secure) {
+ case ARM_CP_SECSTATE_S:
+ case ARM_CP_SECSTATE_NS:
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ r->secure, crm, opc1, opc2,
+ r->name);
+ break;
+ default:
+ name = g_strdup_printf("%s_S", r->name);
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_S,
+ crm, opc1, opc2, name);
+ g_free(name);
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2, r->name);
+ break;
+ }
+ } else {
+ /* AArch64 registers get mapped to non-secure instance
+ * of AArch32 */
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2, r->name);
+ }
+ }
+ }
+ }
+ }
+}
+
+void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *regs, void *opaque)
+{
+ /* Define a whole list of registers */
+ const ARMCPRegInfo *r;
+ for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
+ define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
+ }
+}
+
+/*
+ * Modify ARMCPRegInfo for access from userspace.
+ *
+ * This is a data driven modification directed by
+ * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
+ * user-space cannot alter any values and dynamic values pertaining to
+ * execution state are hidden from user space view anyway.
+ */
+void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
+{
+ const ARMCPRegUserSpaceInfo *m;
+ ARMCPRegInfo *r;
+
+ for (m = mods; m->name; m++) {
+ GPatternSpec *pat = NULL;
+ if (m->is_glob) {
+ pat = g_pattern_spec_new(m->name);
+ }
+ for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
+ if (pat && g_pattern_match_string(pat, r->name)) {
+ r->type = ARM_CP_CONST;
+ r->access = PL0U_R;
+ r->resetvalue = 0;
+ /* continue */
+ } else if (strcmp(r->name, m->name) == 0) {
+ r->type = ARM_CP_CONST;
+ r->access = PL0U_R;
+ r->resetvalue &= m->exported_bits;
+ r->resetvalue |= m->fixed_bits;
+ break;
+ }
+ }
+ if (pat) {
+ g_pattern_spec_free(pat);
+ }
+ }
+}
+
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
+{
+ return g_hash_table_lookup(cpregs, &encoded_cp);
+}
+
+void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Helper coprocessor write function for write-ignore registers */
+}
+
+uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Helper coprocessor write function for read-as-zero registers */
+ return 0;
+}
+
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
+{
+ /* Helper coprocessor reset function for do-nothing-on-reset registers */
+}
+
+static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
+{
+ /* Return true if it is not valid for us to switch to
+ * this CPU mode (ie all the UNPREDICTABLE cases in
+ * the ARM ARM CPSRWriteByInstr pseudocode).
+ */
+
+ /* Changes to or from Hyp via MSR and CPS are illegal. */
+ if (write_type == CPSRWriteByInstr &&
+ ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
+ mode == ARM_CPU_MODE_HYP)) {
+ return 1;
+ }
+
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_SYS:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_FIQ:
+ /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
+ * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
+ */
+ /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
+ * and CPS are treated as illegal mode changes.
+ */
+ if (write_type == CPSRWriteByInstr &&
+ (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
+ (arm_hcr_el2_eff(env) & HCR_TGE)) {
+ return 1;
+ }
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
+ case ARM_CPU_MODE_MON:
+ return arm_current_el(env) < 3;
+ default:
+ return 1;
+ }
+}
+
+uint32_t cpsr_read(CPUARMState *env)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
+ (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+ | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
+ | ((env->condexec_bits & 0xfc) << 8)
+ | (env->GE << 16) | (env->daif & CPSR_AIF);
+}
+
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type)
+{
+ uint32_t changed_daif;
+ bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
+ (mask & (CPSR_M | CPSR_E | CPSR_IL));
+
+ if (mask & CPSR_NZCV) {
+ env->ZF = (~val) & CPSR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+ if (mask & CPSR_Q)
+ env->QF = ((val & CPSR_Q) != 0);
+ if (mask & CPSR_T)
+ env->thumb = ((val & CPSR_T) != 0);
+ if (mask & CPSR_IT_0_1) {
+ env->condexec_bits &= ~3;
+ env->condexec_bits |= (val >> 25) & 3;
+ }
+ if (mask & CPSR_IT_2_7) {
+ env->condexec_bits &= 3;
+ env->condexec_bits |= (val >> 8) & 0xfc;
+ }
+ if (mask & CPSR_GE) {
+ env->GE = (val >> 16) & 0xf;
+ }
+
+ /* In a V7 implementation that includes the security extensions but does
+ * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
+ * whether non-secure software is allowed to change the CPSR_F and CPSR_A
+ * bits respectively.
+ *
+ * In a V8 implementation, it is permitted for privileged software to
+ * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
+ */
+ if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
+ arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2) &&
+ !arm_is_secure(env)) {
+
+ changed_daif = (env->daif ^ val) & mask;
+
+ if (changed_daif & CPSR_A) {
+ /* Check to see if we are allowed to change the masking of async
+ * abort exceptions from a non-secure state.
+ */
+ if (!(env->cp15.scr_el3 & SCR_AW)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to switch CPSR_A flag from "
+ "non-secure world with SCR.AW bit clear\n");
+ mask &= ~CPSR_A;
+ }
+ }
+
+ if (changed_daif & CPSR_F) {
+ /* Check to see if we are allowed to change the masking of FIQ
+ * exceptions from a non-secure state.
+ */
+ if (!(env->cp15.scr_el3 & SCR_FW)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to switch CPSR_F flag from "
+ "non-secure world with SCR.FW bit clear\n");
+ mask &= ~CPSR_F;
+ }
+
+ /* Check whether non-maskable FIQ (NMFI) support is enabled.
+ * If this bit is set software is not allowed to mask
+ * FIQs, but is allowed to set CPSR_F to 0.
+ */
+ if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
+ (val & CPSR_F)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to enable CPSR_F flag "
+ "(non-maskable FIQ [NMFI] support enabled)\n");
+ mask &= ~CPSR_F;
+ }
+ }
+ }
+
+ env->daif &= ~(CPSR_AIF & mask);
+ env->daif |= val & CPSR_AIF & mask;
+
+ if (write_type != CPSRWriteRaw &&
+ ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
+ /* Note that we can only get here in USR mode if this is a
+ * gdb stub write; for this case we follow the architectural
+ * behaviour for guest writes in USR mode of ignoring an attempt
+ * to switch mode. (Those are caught by translate.c for writes
+ * triggered by guest instructions.)
+ */
+ mask &= ~CPSR_M;
+ } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
+ /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
+ * v7, and has defined behaviour in v8:
+ * + leave CPSR.M untouched
+ * + allow changes to the other CPSR fields
+ * + set PSTATE.IL
+ * For user changes via the GDB stub, we don't set PSTATE.IL,
+ * as this would be unnecessarily harsh for a user error.
+ */
+ mask &= ~CPSR_M;
+ if (write_type != CPSRWriteByGDBStub &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ mask |= CPSR_IL;
+ val |= CPSR_IL;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Illegal AArch32 mode switch attempt from %s to %s\n",
+ aarch32_mode_name(env->uncached_cpsr),
+ aarch32_mode_name(val));
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
+ write_type == CPSRWriteExceptionReturn ?
+ "Exception return from AArch32" :
+ "AArch32 mode switch from",
+ aarch32_mode_name(env->uncached_cpsr),
+ aarch32_mode_name(val), env->regs[15]);
+ switch_mode(env, val & CPSR_M);
+ }
+ }
+ mask &= ~CACHED_CPSR_BITS;
+ env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
+ if (rebuild_hflags) {
+ arm_rebuild_hflags(env);
+ }
+}
+
+/* Sign/zero extend */
+uint32_t HELPER(sxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(int8_t)x;
+ res |= (uint32_t)(int8_t)(x >> 16) << 16;
+ return res;
+}
+
+static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
+{
+ /*
+ * Take a division-by-zero exception if necessary; otherwise return
+ * to get the usual non-trapping division behaviour (result of 0)
+ */
+ if (arm_feature(env, ARM_FEATURE_M)
+ && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
+ raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
+ }
+}
+
+uint32_t HELPER(uxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(uint8_t)x;
+ res |= (uint32_t)(uint8_t)(x >> 16) << 16;
+ return res;
+}
+
+int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
+{
+ if (den == 0) {
+ handle_possible_div0_trap(env, GETPC());
+ return 0;
+ }
+ if (num == INT_MIN && den == -1) {
+ return INT_MIN;
+ }
+ return num / den;
+}
+
+uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
+{
+ if (den == 0) {
+ handle_possible_div0_trap(env, GETPC());
+ return 0;
+ }
+ return num / den;
+}
+
+uint32_t HELPER(rbit)(uint32_t x)
+{
+ return revbit32(x);
+}
+
+#ifdef CONFIG_USER_ONLY
+
+static void switch_mode(CPUARMState *env, int mode)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (mode != ARM_CPU_MODE_USR) {
+ cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
+ }
+}
+
+uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure)
+{
+ return 1;
+}
+
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ g_assert_not_reached();
+}
+
+#else
+
+static void switch_mode(CPUARMState *env, int mode)
+{
+ int old_mode;
+ int i;
+
+ old_mode = env->uncached_cpsr & CPSR_M;
+ if (mode == old_mode)
+ return;
+
+ if (old_mode == ARM_CPU_MODE_FIQ) {
+ memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
+ } else if (mode == ARM_CPU_MODE_FIQ) {
+ memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
+ }
+
+ i = bank_number(old_mode);
+ env->banked_r13[i] = env->regs[13];
+ env->banked_spsr[i] = env->spsr;
+
+ i = bank_number(mode);
+ env->regs[13] = env->banked_r13[i];
+ env->spsr = env->banked_spsr[i];
+
+ env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
+ env->regs[14] = env->banked_r14[r14_bank_number(mode)];
+}
+
+/* Physical Interrupt Target EL Lookup Table
+ *
+ * [ From ARM ARM section G1.13.4 (Table G1-15) ]
+ *
+ * The below multi-dimensional table is used for looking up the target
+ * exception level given numerous condition criteria. Specifically, the
+ * target EL is based on SCR and HCR routing controls as well as the
+ * currently executing EL and secure state.
+ *
+ * Dimensions:
+ * target_el_table[2][2][2][2][2][4]
+ * | | | | | +--- Current EL
+ * | | | | +------ Non-secure(0)/Secure(1)
+ * | | | +--------- HCR mask override
+ * | | +------------ SCR exec state control
+ * | +--------------- SCR mask override
+ * +------------------ 32-bit(0)/64-bit(1) EL3
+ *
+ * The table values are as such:
+ * 0-3 = EL0-EL3
+ * -1 = Cannot occur
+ *
+ * The ARM ARM target EL table includes entries indicating that an "exception
+ * is not taken". The two cases where this is applicable are:
+ * 1) An exception is taken from EL3 but the SCR does not have the exception
+ * routed to EL3.
+ * 2) An exception is taken from EL2 but the HCR does not have the exception
+ * routed to EL2.
+ * In these two cases, the below table contain a target of EL1. This value is
+ * returned as it is expected that the consumer of the table data will check
+ * for "target EL >= current EL" to ensure the exception is not taken.
+ *
+ * SCR HCR
+ * 64 EA AMO From
+ * BIT IRQ IMO Non-secure Secure
+ * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
+ */
+static const int8_t target_el_table[2][2][2][2][2][4] = {
+ {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
+ {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
+ {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
+ {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
+ {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
+ {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
+ {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
+ {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
+ {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
+ {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
+ {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
+ {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
+};
+
+/*
+ * Determine the target EL for physical exceptions
+ */
+uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure)
+{
+ CPUARMState *env = cs->env_ptr;
+ bool rw;
+ bool scr;
+ bool hcr;
+ int target_el;
+ /* Is the highest EL AArch64? */
+ bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+ uint64_t hcr_el2;
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ } else {
+ /* Either EL2 is the highest EL (and so the EL2 register width
+ * is given by is64); or there is no EL2 or EL3, in which case
+ * the value of 'rw' does not affect the table lookup anyway.
+ */
+ rw = is64;
+ }
+
+ hcr_el2 = arm_hcr_el2_eff(env);
+ switch (excp_idx) {
+ case EXCP_IRQ:
+ scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
+ hcr = hcr_el2 & HCR_IMO;
+ break;
+ case EXCP_FIQ:
+ scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
+ hcr = hcr_el2 & HCR_FMO;
+ break;
+ default:
+ scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
+ hcr = hcr_el2 & HCR_AMO;
+ break;
+ };
+
+ /*
+ * For these purposes, TGE and AMO/IMO/FMO both force the
+ * interrupt to EL2. Fold TGE into the bit extracted above.
+ */
+ hcr |= (hcr_el2 & HCR_TGE) != 0;
+
+ /* Perform a table-lookup for the target EL given the current state */
+ target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
+
+ assert(target_el > 0);
+
+ return target_el;
+}
+
+void arm_log_exception(int idx)
+{
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *exc = NULL;
+ static const char * const excnames[] = {
+ [EXCP_UDEF] = "Undefined Instruction",
+ [EXCP_SWI] = "SVC",
+ [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
+ [EXCP_DATA_ABORT] = "Data Abort",
+ [EXCP_IRQ] = "IRQ",
+ [EXCP_FIQ] = "FIQ",
+ [EXCP_BKPT] = "Breakpoint",
+ [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
+ [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
+ [EXCP_HVC] = "Hypervisor Call",
+ [EXCP_HYP_TRAP] = "Hypervisor Trap",
+ [EXCP_SMC] = "Secure Monitor Call",
+ [EXCP_VIRQ] = "Virtual IRQ",
+ [EXCP_VFIQ] = "Virtual FIQ",
+ [EXCP_SEMIHOST] = "Semihosting call",
+ [EXCP_NOCP] = "v7M NOCP UsageFault",
+ [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
+ [EXCP_STKOF] = "v8M STKOF UsageFault",
+ [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
+ [EXCP_LSERR] = "v8M LSERR UsageFault",
+ [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
+ [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
+ };
+
+ if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
+ exc = excnames[idx];
+ }
+ if (!exc) {
+ exc = "unknown";
+ }
+ qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
+ }
+}
+
+/*
+ * Function used to synchronize QEMU's AArch64 register set with AArch32
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_32_to_64(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy R[0:7] to X[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+
+ /*
+ * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
+ * Otherwise, they come from the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->usr_regs[i - 8];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+ }
+
+ /*
+ * Registers x13-x23 are the various mode SP and FP registers. Registers
+ * r13 and r14 are only copied if we are in that mode, otherwise we copy
+ * from the mode banked register.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->xregs[13] = env->regs[13];
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
+ /* HYP is an exception in that it is copied from r14 */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[15] = env->regs[13];
+ } else {
+ env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->xregs[16] = env->regs[14];
+ env->xregs[17] = env->regs[13];
+ } else {
+ env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->xregs[18] = env->regs[14];
+ env->xregs[19] = env->regs[13];
+ } else {
+ env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->xregs[20] = env->regs[14];
+ env->xregs[21] = env->regs[13];
+ } else {
+ env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->xregs[22] = env->regs[14];
+ env->xregs[23] = env->regs[13];
+ } else {
+ env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
+ }
+
+ /*
+ * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy from r8-r14. Otherwise, we copy from the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->xregs[i] = env->fiq_regs[i - 24];
+ }
+ env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
+ env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
+ }
+
+ env->pc = env->regs[15];
+}
+
+/*
+ * Function used to synchronize QEMU's AArch32 register set with AArch64
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy X[0:7] to R[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+
+ /*
+ * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
+ * Otherwise, we copy x8-x12 into the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->usr_regs[i - 8] = env->xregs[i];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+ }
+
+ /*
+ * Registers r13 & r14 depend on the current mode.
+ * If we are in a given mode, we copy the corresponding x registers to r13
+ * and r14. Otherwise, we copy the x register to the banked r13 and r14
+ * for the mode.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->regs[13] = env->xregs[13];
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
+
+ /*
+ * HYP is an exception in that it does not have its own banked r14 but
+ * shares the USR r14
+ */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[13] = env->xregs[15];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->regs[14] = env->xregs[16];
+ env->regs[13] = env->xregs[17];
+ } else {
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
+ env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->regs[14] = env->xregs[18];
+ env->regs[13] = env->xregs[19];
+ } else {
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
+ env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->regs[14] = env->xregs[20];
+ env->regs[13] = env->xregs[21];
+ } else {
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
+ env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->regs[14] = env->xregs[22];
+ env->regs[13] = env->xregs[23];
+ } else {
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
+ env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy to r8-r14. Otherwise, we copy to the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->fiq_regs[i - 24] = env->xregs[i];
+ }
+ env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
+ env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
+ }
+
+ env->regs[15] = env->pc;
+}
+
+static void take_aarch32_exception(CPUARMState *env, int new_mode,
+ uint32_t mask, uint32_t offset,
+ uint32_t newpc)
+{
+ int new_el;
+
+ /* Change the CPU state so as to actually take the exception. */
+ switch_mode(env, new_mode);
+
+ /*
+ * For exceptions taken to AArch32 we must clear the SS bit in both
+ * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
+ */
+ env->pstate &= ~PSTATE_SS;
+ env->spsr = cpsr_read(env);
+ /* Clear IT bits. */
+ env->condexec_bits = 0;
+ /* Switch to the new mode, and to the correct instruction set. */
+ env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
+
+ /* This must be after mode switching. */
+ new_el = arm_current_el(env);
+
+ /* Set new mode endianness */
+ env->uncached_cpsr &= ~CPSR_E;
+ if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
+ env->uncached_cpsr |= CPSR_E;
+ }
+ /* J and IL must always be cleared for exception entry */
+ env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
+ env->daif |= mask;
+
+ if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
+ if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
+ env->uncached_cpsr |= CPSR_SSBS;
+ } else {
+ env->uncached_cpsr &= ~CPSR_SSBS;
+ }
+ }
+
+ if (new_mode == ARM_CPU_MODE_HYP) {
+ env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
+ env->elr_el[2] = env->regs[15];
+ } else {
+ /* CPSR.PAN is normally preserved preserved unless... */
+ if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
+ switch (new_el) {
+ case 3:
+ if (!arm_is_secure_below_el3(env)) {
+ /* ... the target is EL3, from non-secure state. */
+ env->uncached_cpsr &= ~CPSR_PAN;
+ break;
+ }
+ /* ... the target is EL3, from secure state ... */
+ /* fall through */
+ case 1:
+ /* ... the target is EL1 and SCTLR.SPAN is 0. */
+ if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
+ env->uncached_cpsr |= CPSR_PAN;
+ }
+ break;
+ }
+ }
+ /*
+ * this is a lie, as there was no c1_sys on V4T/V5, but who cares
+ * and we should just guard the thumb mode on V4
+ */
+ if (arm_feature(env, ARM_FEATURE_V4T)) {
+ env->thumb =
+ (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
+ }
+ env->regs[14] = env->regs[15] + offset;
+ }
+ env->regs[15] = newpc;
+ arm_rebuild_hflags(env);
+}
+
+static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
+{
+ /*
+ * Handle exception entry to Hyp mode; this is sufficiently
+ * different to entry to other AArch32 modes that we handle it
+ * separately here.
+ *
+ * The vector table entry used is always the 0x14 Hyp mode entry point,
+ * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
+ * The offset applied to the preferred return address is always zero
+ * (see DDI0487C.a section G1.12.3).
+ * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
+ */
+ uint32_t addr, mask;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ switch (cs->exception_index) {
+ case EXCP_UDEF:
+ addr = 0x04;
+ break;
+ case EXCP_SWI:
+ addr = 0x14;
+ break;
+ case EXCP_BKPT:
+ /* Fall through to prefetch abort. */
+ case EXCP_PREFETCH_ABORT:
+ env->cp15.ifar_s = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
+ (uint32_t)env->exception.vaddress);
+ addr = 0x0c;
+ break;
+ case EXCP_DATA_ABORT:
+ env->cp15.dfar_s = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
+ (uint32_t)env->exception.vaddress);
+ addr = 0x10;
+ break;
+ case EXCP_IRQ:
+ addr = 0x18;
+ break;
+ case EXCP_FIQ:
+ addr = 0x1c;
+ break;
+ case EXCP_HVC:
+ addr = 0x08;
+ break;
+ case EXCP_HYP_TRAP:
+ addr = 0x14;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ }
+
+ if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /*
+ * QEMU syndrome values are v8-style. v7 has the IL bit
+ * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
+ * If this is a v7 CPU, squash the IL bit in those cases.
+ */
+ if (cs->exception_index == EXCP_PREFETCH_ABORT ||
+ (cs->exception_index == EXCP_DATA_ABORT &&
+ !(env->exception.syndrome & ARM_EL_ISV)) ||
+ syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
+ env->exception.syndrome &= ~ARM_EL_IL;
+ }
+ }
+ env->cp15.esr_el[2] = env->exception.syndrome;
+ }
+
+ if (arm_current_el(env) != 2 && addr < 0x14) {
+ addr = 0x14;
+ }
+
+ mask = 0;
+ if (!(env->cp15.scr_el3 & SCR_EA)) {
+ mask |= CPSR_A;
+ }
+ if (!(env->cp15.scr_el3 & SCR_IRQ)) {
+ mask |= CPSR_I;
+ }
+ if (!(env->cp15.scr_el3 & SCR_FIQ)) {
+ mask |= CPSR_F;
+ }
+
+ addr += env->cp15.hvbar;
+
+ take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
+}
+
+static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t addr;
+ uint32_t mask;
+ int new_mode;
+ uint32_t offset;
+ uint32_t moe;
+
+ /* If this is a debug exception we must update the DBGDSCR.MOE bits */
+ switch (syn_get_ec(env->exception.syndrome)) {
+ case EC_BREAKPOINT:
+ case EC_BREAKPOINT_SAME_EL:
+ moe = 1;
+ break;
+ case EC_WATCHPOINT:
+ case EC_WATCHPOINT_SAME_EL:
+ moe = 10;
+ break;
+ case EC_AA32_BKPT:
+ moe = 3;
+ break;
+ case EC_VECTORCATCH:
+ moe = 5;
+ break;
+ default:
+ moe = 0;
+ break;
+ }
+
+ if (moe) {
+ env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
+ }
+
+ if (env->exception.target_el == 2) {
+ arm_cpu_do_interrupt_aarch32_hyp(cs);
+ return;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_UDEF:
+ new_mode = ARM_CPU_MODE_UND;
+ addr = 0x04;
+ mask = CPSR_I;
+ if (env->thumb)
+ offset = 2;
+ else
+ offset = 4;
+ break;
+ case EXCP_SWI:
+ new_mode = ARM_CPU_MODE_SVC;
+ addr = 0x08;
+ mask = CPSR_I;
+ /* The PC already points to the next instruction. */
+ offset = 0;
+ break;
+ case EXCP_BKPT:
+ /* Fall through to prefetch abort. */
+ case EXCP_PREFETCH_ABORT:
+ A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
+ qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
+ env->exception.fsr, (uint32_t)env->exception.vaddress);
+ new_mode = ARM_CPU_MODE_ABT;
+ addr = 0x0c;
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
+ case EXCP_DATA_ABORT:
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
+ qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
+ env->exception.fsr,
+ (uint32_t)env->exception.vaddress);
+ new_mode = ARM_CPU_MODE_ABT;
+ addr = 0x10;
+ mask = CPSR_A | CPSR_I;
+ offset = 8;
+ break;
+ case EXCP_IRQ:
+ new_mode = ARM_CPU_MODE_IRQ;
+ addr = 0x18;
+ /* Disable IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ if (env->cp15.scr_el3 & SCR_IRQ) {
+ /* IRQ routed to monitor mode */
+ new_mode = ARM_CPU_MODE_MON;
+ mask |= CPSR_F;
+ }
+ break;
+ case EXCP_FIQ:
+ new_mode = ARM_CPU_MODE_FIQ;
+ addr = 0x1c;
+ /* Disable FIQ, IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ if (env->cp15.scr_el3 & SCR_FIQ) {
+ /* FIQ routed to monitor mode */
+ new_mode = ARM_CPU_MODE_MON;
+ }
+ offset = 4;
+ break;
+ case EXCP_VIRQ:
+ new_mode = ARM_CPU_MODE_IRQ;
+ addr = 0x18;
+ /* Disable IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
+ case EXCP_VFIQ:
+ new_mode = ARM_CPU_MODE_FIQ;
+ addr = 0x1c;
+ /* Disable FIQ, IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ offset = 4;
+ break;
+ case EXCP_SMC:
+ new_mode = ARM_CPU_MODE_MON;
+ addr = 0x08;
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ offset = 0;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ return; /* Never happens. Keep compiler happy. */
+ }
+
+ if (new_mode == ARM_CPU_MODE_MON) {
+ addr += env->cp15.mvbar;
+ } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
+ /* High vectors. When enabled, base address cannot be remapped. */
+ addr += 0xffff0000;
+ } else {
+ /* ARM v7 architectures provide a vector base address register to remap
+ * the interrupt vector table.
+ * This register is only followed in non-monitor mode, and is banked.
+ * Note: only bits 31:5 are valid.
+ */
+ addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
+ }
+
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
+ env->cp15.scr_el3 &= ~SCR_NS;
+ }
+
+ take_aarch32_exception(env, new_mode, mask, offset, addr);
+}
+
+static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
+{
+ /*
+ * Return the register number of the AArch64 view of the AArch32
+ * register @aarch32_reg. The CPUARMState CPSR is assumed to still
+ * be that of the AArch32 mode the exception came from.
+ */
+ int mode = env->uncached_cpsr & CPSR_M;
+
+ switch (aarch32_reg) {
+ case 0 ... 7:
+ return aarch32_reg;
+ case 8 ... 12:
+ return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
+ case 13:
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_SYS:
+ return 13;
+ case ARM_CPU_MODE_HYP:
+ return 15;
+ case ARM_CPU_MODE_IRQ:
+ return 17;
+ case ARM_CPU_MODE_SVC:
+ return 19;
+ case ARM_CPU_MODE_ABT:
+ return 21;
+ case ARM_CPU_MODE_UND:
+ return 23;
+ case ARM_CPU_MODE_FIQ:
+ return 29;
+ default:
+ g_assert_not_reached();
+ }
+ case 14:
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_SYS:
+ case ARM_CPU_MODE_HYP:
+ return 14;
+ case ARM_CPU_MODE_IRQ:
+ return 16;
+ case ARM_CPU_MODE_SVC:
+ return 18;
+ case ARM_CPU_MODE_ABT:
+ return 20;
+ case ARM_CPU_MODE_UND:
+ return 22;
+ case ARM_CPU_MODE_FIQ:
+ return 30;
+ default:
+ g_assert_not_reached();
+ }
+ case 15:
+ return 31;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
+{
+ uint32_t ret = cpsr_read(env);
+
+ /* Move DIT to the correct location for SPSR_ELx */
+ if (ret & CPSR_DIT) {
+ ret &= ~CPSR_DIT;
+ ret |= PSTATE_DIT;
+ }
+ /* Merge PSTATE.SS into SPSR_ELx */
+ ret |= env->pstate & PSTATE_SS;
+
+ return ret;
+}
+
+/* Handle exception entry to a target EL which is using AArch64 */
+static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int new_el = env->exception.target_el;
+ target_ulong addr = env->cp15.vbar_el[new_el];
+ unsigned int new_mode = aarch64_pstate_mode(new_el, true);
+ unsigned int old_mode;
+ unsigned int cur_el = arm_current_el(env);
+ int rt;
+
+ /*
+ * Note that new_el can never be 0. If cur_el is 0, then
+ * el0_a64 is is_a64(), else el0_a64 is ignored.
+ */
+ aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
+
+ if (cur_el < new_el) {
+ /* Entry vector offset depends on whether the implemented EL
+ * immediately lower than the target level is using AArch32 or AArch64
+ */
+ bool is_aa64;
+ uint64_t hcr;
+
+ switch (new_el) {
+ case 3:
+ is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
+ break;
+ case 2:
+ hcr = arm_hcr_el2_eff(env);
+ if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ is_aa64 = (hcr & HCR_RW) != 0;
+ break;
+ }
+ /* fall through */
+ case 1:
+ is_aa64 = is_a64(env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_aa64) {
+ addr += 0x400;
+ } else {
+ addr += 0x600;
+ }
+ } else if (pstate_read(env) & PSTATE_SP) {
+ addr += 0x200;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ env->cp15.far_el[new_el] = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
+ env->cp15.far_el[new_el]);
+ /* fall through */
+ case EXCP_BKPT:
+ case EXCP_UDEF:
+ case EXCP_SWI:
+ case EXCP_HVC:
+ case EXCP_HYP_TRAP:
+ case EXCP_SMC:
+ switch (syn_get_ec(env->exception.syndrome)) {
+ case EC_ADVSIMDFPACCESSTRAP:
+ /*
+ * QEMU internal FP/SIMD syndromes from AArch32 include the
+ * TA and coproc fields which are only exposed if the exception
+ * is taken to AArch32 Hyp mode. Mask them out to get a valid
+ * AArch64 format syndrome.
+ */
+ env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
+ break;
+ case EC_CP14RTTRAP:
+ case EC_CP15RTTRAP:
+ case EC_CP14DTTRAP:
+ /*
+ * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
+ * the raw register field from the insn; when taking this to
+ * AArch64 we must convert it to the AArch64 view of the register
+ * number. Notice that we read a 4-bit AArch32 register number and
+ * write back a 5-bit AArch64 one.
+ */
+ rt = extract32(env->exception.syndrome, 5, 4);
+ rt = aarch64_regnum(env, rt);
+ env->exception.syndrome = deposit32(env->exception.syndrome,
+ 5, 5, rt);
+ break;
+ case EC_CP15RRTTRAP:
+ case EC_CP14RRTTRAP:
+ /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
+ rt = extract32(env->exception.syndrome, 5, 4);
+ rt = aarch64_regnum(env, rt);
+ env->exception.syndrome = deposit32(env->exception.syndrome,
+ 5, 5, rt);
+ rt = extract32(env->exception.syndrome, 10, 4);
+ rt = aarch64_regnum(env, rt);
+ env->exception.syndrome = deposit32(env->exception.syndrome,
+ 10, 5, rt);
+ break;
+ }
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
+ break;
+ case EXCP_IRQ:
+ case EXCP_VIRQ:
+ addr += 0x80;
+ break;
+ case EXCP_FIQ:
+ case EXCP_VFIQ:
+ addr += 0x100;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ }
+
+ if (is_a64(env)) {
+ old_mode = pstate_read(env);
+ aarch64_save_sp(env, arm_current_el(env));
+ env->elr_el[new_el] = env->pc;
+ } else {
+ old_mode = cpsr_read_for_spsr_elx(env);
+ env->elr_el[new_el] = env->regs[15];
+
+ aarch64_sync_32_to_64(env);
+
+ env->condexec_bits = 0;
+ }
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
+
+ qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
+ env->elr_el[new_el]);
+
+ if (cpu_isar_feature(aa64_pan, cpu)) {
+ /* The value of PSTATE.PAN is normally preserved, except when ... */
+ new_mode |= old_mode & PSTATE_PAN;
+ switch (new_el) {
+ case 2:
+ /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
+ if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
+ != (HCR_E2H | HCR_TGE)) {
+ break;
+ }
+ /* fall through */
+ case 1:
+ /* ... the target is EL1 ... */
+ /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
+ if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
+ new_mode |= PSTATE_PAN;
+ }
+ break;
+ }
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ new_mode |= PSTATE_TCO;
+ }
+
+ if (cpu_isar_feature(aa64_ssbs, cpu)) {
+ if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
+ new_mode |= PSTATE_SSBS;
+ } else {
+ new_mode &= ~PSTATE_SSBS;
+ }
+ }
+
+ pstate_write(env, PSTATE_DAIF | new_mode);
+ env->aarch64 = 1;
+ aarch64_restore_sp(env, new_el);
+ helper_rebuild_hflags_a64(env, new_el);
+
+ env->pc = addr;
+
+ qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
+ new_el, env->pc, pstate_read(env));
+}
+
+/*
+ * Do semihosting call and set the appropriate return value. All the
+ * permission and validity checks have been done at translate time.
+ *
+ * We only see semihosting exceptions in TCG only as they are not
+ * trapped to the hypervisor in KVM.
+ */
+#ifdef CONFIG_TCG
+static void handle_semihosting(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (is_a64(env)) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%" PRIx64 "\n",
+ env->xregs[0]);
+ env->xregs[0] = do_common_semihosting(cs);
+ env->pc += 4;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%x\n",
+ env->regs[0]);
+ env->regs[0] = do_common_semihosting(cs);
+ env->regs[15] += env->thumb ? 2 : 4;
+ }
+}
+#endif
+
+/* Handle a CPU exception for A and R profile CPUs.
+ * Do any appropriate logging, handle PSCI calls, and then hand off
+ * to the AArch64-entry or AArch32-entry function depending on the
+ * target exception level's register width.
+ *
+ * Note: this is used for both TCG (as the do_interrupt tcg op),
+ * and KVM to re-inject guest debug exceptions, and to
+ * inject a Synchronous-External-Abort.
+ */
+void arm_cpu_do_interrupt(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int new_el = env->exception.target_el;
+
+ assert(!arm_feature(env, ARM_FEATURE_M));
+
+ arm_log_exception(cs->exception_index);
+ qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
+ new_el);
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && !excp_is_internal(cs->exception_index)) {
+ qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
+ syn_get_ec(env->exception.syndrome),
+ env->exception.syndrome);
+ }
+
+ if (arm_is_psci_call(cpu, cs->exception_index)) {
+ arm_handle_psci_call(cpu);
+ qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
+ return;
+ }
+
+ /*
+ * Semihosting semantics depend on the register width of the code
+ * that caused the exception, not the target exception level, so
+ * must be handled here.
+ */
+#ifdef CONFIG_TCG
+ if (cs->exception_index == EXCP_SEMIHOST) {
+ handle_semihosting(cs);
+ return;
+ }
+#endif
+
+ /* Hooks may change global state so BQL should be held, also the
+ * BQL needs to be held for any modification of
+ * cs->interrupt_request.
+ */
+ g_assert(qemu_mutex_iothread_locked());
+
+ arm_call_pre_el_change_hook(cpu);
+
+ assert(!excp_is_internal(cs->exception_index));
+ if (arm_el_is_aa64(env, new_el)) {
+ arm_cpu_do_interrupt_aarch64(cs);
+ } else {
+ arm_cpu_do_interrupt_aarch32(cs);
+ }
+
+ arm_call_el_change_hook(cpu);
+
+ if (!kvm_enabled()) {
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
+
+uint64_t arm_sctlr(CPUARMState *env, int el)
+{
+ /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
+ if (el == 0) {
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
+ el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
+ ? 2 : 1;
+ }
+ return env->cp15.sctlr_el[el];
+}
+
+/* Return the SCTLR value which controls this address translation regime */
+static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* Return true if the specified stage of address translation is disabled */
+static inline bool regime_translation_disabled(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ uint64_t hcr_el2;
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
+ (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
+ case R_V7M_MPU_CTRL_ENABLE_MASK:
+ /* Enabled, but not for HardFault and NMI */
+ return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
+ case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
+ /* Enabled for all cases */
+ return false;
+ case 0:
+ default:
+ /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
+ * we warned about that in armv7m_nvic.c when the guest set it.
+ */
+ return true;
+ }
+ }
+
+ hcr_el2 = arm_hcr_el2_eff(env);
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ /* HCR.DC means HCR.VM behaves as 1 */
+ return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
+ }
+
+ if (hcr_el2 & HCR_TGE) {
+ /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
+ if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
+ return true;
+ }
+ }
+
+ if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
+ /* HCR.DC means SCTLR_EL1.M behaves as 0 */
+ return true;
+ }
+
+ return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
+}
+
+static inline bool regime_translation_big_endian(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
+}
+
+/* Return the TTBR associated with this translation regime */
+static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ttbrn)
+{
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ return env->cp15.vttbr_el2;
+ }
+ if (mmu_idx == ARMMMUIdx_Stage2_S) {
+ return env->cp15.vsttbr_el2;
+ }
+ if (ttbrn == 0) {
+ return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
+ } else {
+ return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
+ }
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+/* Convert a possible stage1+2 MMU index into the appropriate
+ * stage 1 MMU index
+ */
+static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_SE10_0:
+ return ARMMMUIdx_Stage1_SE0;
+ case ARMMMUIdx_SE10_1:
+ return ARMMMUIdx_Stage1_SE1;
+ case ARMMMUIdx_SE10_1_PAN:
+ return ARMMMUIdx_Stage1_SE1_PAN;
+ case ARMMMUIdx_E10_0:
+ return ARMMMUIdx_Stage1_E0;
+ case ARMMMUIdx_E10_1:
+ return ARMMMUIdx_Stage1_E1;
+ case ARMMMUIdx_E10_1_PAN:
+ return ARMMMUIdx_Stage1_E1_PAN;
+ default:
+ return mmu_idx;
+ }
+}
+
+/* Return true if the translation regime is using LPAE format page tables */
+static inline bool regime_using_lpae_format(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ int el = regime_el(env, mmu_idx);
+ if (el == 2 || arm_el_is_aa64(env, el)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)
+ && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ return true;
+ }
+ return false;
+}
+
+/* Returns true if the stage 1 translation regime is using LPAE format page
+ * tables. Used when raising alignment exceptions, whose FSR changes depending
+ * on whether the long or short descriptor format is in use. */
+bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
+
+ return regime_using_lpae_format(env, mmu_idx);
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_SE0:
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MSUserNegPri:
+ return true;
+ default:
+ return false;
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @ap: The 3-bit access permissions (AP[2:0])
+ * @domain_prot: The 2-bit domain access permissions
+ */
+static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ap, int domain_prot)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (domain_prot == 3) {
+ return PAGE_READ | PAGE_WRITE;
+ }
+
+ switch (ap) {
+ case 0:
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ return 0;
+ }
+ switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
+ case SCTLR_S:
+ return is_user ? 0 : PAGE_READ;
+ case SCTLR_R:
+ return PAGE_READ;
+ default:
+ return 0;
+ }
+ case 1:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 2:
+ if (is_user) {
+ return PAGE_READ;
+ } else {
+ return PAGE_READ | PAGE_WRITE;
+ }
+ case 3:
+ return PAGE_READ | PAGE_WRITE;
+ case 4: /* Reserved. */
+ return 0;
+ case 5:
+ return is_user ? 0 : PAGE_READ;
+ case 6:
+ return PAGE_READ;
+ case 7:
+ if (!arm_feature(env, ARM_FEATURE_V6K)) {
+ return 0;
+ }
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags.
+ *
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @is_user: TRUE if accessing from PL0
+ */
+static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
+{
+ switch (ap) {
+ case 0:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 1:
+ return PAGE_READ | PAGE_WRITE;
+ case 2:
+ return is_user ? 0 : PAGE_READ;
+ case 3:
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int
+simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
+{
+ return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
+}
+
+/* Translate S2 section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @s2ap: The 2-bit stage2 access permissions (S2AP)
+ * @xn: XN (execute-never) bits
+ * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
+ */
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
+{
+ int prot = 0;
+
+ if (s2ap & 1) {
+ prot |= PAGE_READ;
+ }
+ if (s2ap & 2) {
+ prot |= PAGE_WRITE;
+ }
+
+ if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
+ switch (xn) {
+ case 0:
+ prot |= PAGE_EXEC;
+ break;
+ case 1:
+ if (s1_is_el0) {
+ prot |= PAGE_EXEC;
+ }
+ break;
+ case 2:
+ break;
+ case 3:
+ if (!s1_is_el0) {
+ prot |= PAGE_EXEC;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ if (!extract32(xn, 1, 1)) {
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
+ prot |= PAGE_EXEC;
+ }
+ }
+ }
+ return prot;
+}
+
+/* Translate section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @is_aa64: TRUE if AArch64
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @ns: NS (non-secure) bit
+ * @xn: XN (execute-never) bit
+ * @pxn: PXN (privileged execute-never) bit
+ */
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int ap, int ns, int xn, int pxn)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+ int prot_rw, user_rw;
+ bool have_wxn;
+ int wxn = 0;
+
+ assert(mmu_idx != ARMMMUIdx_Stage2);
+ assert(mmu_idx != ARMMMUIdx_Stage2_S);
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ if (is_user) {
+ prot_rw = user_rw;
+ } else {
+ if (user_rw && regime_is_pan(env, mmu_idx)) {
+ /* PAN forbids data accesses but doesn't affect insn fetch */
+ prot_rw = 0;
+ } else {
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ }
+ }
+
+ if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
+ return prot_rw;
+ }
+
+ /* TODO have_wxn should be replaced with
+ * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
+ * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
+ * compatible processors have EL2, which is required for [U]WXN.
+ */
+ have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
+
+ if (have_wxn) {
+ wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
+ }
+
+ if (is_aa64) {
+ if (regime_has_2_ranges(mmu_idx) && !is_user) {
+ xn = pxn || (user_rw & PAGE_WRITE);
+ }
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ case 3:
+ if (is_user) {
+ xn = xn || !(user_rw & PAGE_READ);
+ } else {
+ int uwxn = 0;
+ if (have_wxn) {
+ uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
+ }
+ xn = xn || !(prot_rw & PAGE_READ) || pxn ||
+ (uwxn && (user_rw & PAGE_WRITE));
+ }
+ break;
+ case 2:
+ break;
+ }
+ } else {
+ xn = wxn = 0;
+ }
+
+ if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
+ return prot_rw;
+ }
+ return prot_rw | PAGE_EXEC;
+}
+
+static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t *table, uint32_t address)
+{
+ /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
+ TCR *tcr = regime_tcr(env, mmu_idx);
+
+ if (address & tcr->mask) {
+ if (tcr->raw_tcr & TTBCR_PD1) {
+ /* Translation table walk disabled for TTBR1 */
+ return false;
+ }
+ *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
+ } else {
+ if (tcr->raw_tcr & TTBCR_PD0) {
+ /* Translation table walk disabled for TTBR0 */
+ return false;
+ }
+ *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
+ }
+ *table |= (address >> 18) & 0x3ffc;
+ return true;
+}
+
+/* Translate a S1 pagetable walk through S2 if needed. */
+static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
+ hwaddr addr, bool *is_secure,
+ ARMMMUFaultInfo *fi)
+{
+ if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
+ !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
+ target_ulong s2size;
+ hwaddr s2pa;
+ int s2prot;
+ int ret;
+ ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
+ : ARMMMUIdx_Stage2;
+ ARMCacheAttrs cacheattrs = {};
+ MemTxAttrs txattrs = {};
+
+ ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
+ &s2pa, &txattrs, &s2prot, &s2size, fi,
+ &cacheattrs);
+ if (ret) {
+ assert(fi->type != ARMFault_None);
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ fi->s1ns = !*is_secure;
+ return ~0;
+ }
+ if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
+ (cacheattrs.attrs & 0xf0) == 0) {
+ /*
+ * PTW set and S1 walk touched S2 Device memory:
+ * generate Permission fault.
+ */
+ fi->type = ARMFault_Permission;
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ fi->s1ns = !*is_secure;
+ return ~0;
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ /* Check if page table walk is to secure or non-secure PA space. */
+ if (*is_secure) {
+ *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
+ } else {
+ *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
+ }
+ } else {
+ assert(!*is_secure);
+ }
+
+ addr = s2pa;
+ }
+ return addr;
+}
+
+/* All loads done in the course of a page table walk go through here. */
+static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult result = MEMTX_OK;
+ AddressSpace *as;
+ uint32_t data;
+
+ addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
+ attrs.secure = is_secure;
+ as = arm_addressspace(cs, attrs);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ data = address_space_ldl_be(as, addr, attrs, &result);
+ } else {
+ data = address_space_ldl_le(as, addr, attrs, &result);
+ }
+ if (result == MEMTX_OK) {
+ return data;
+ }
+ fi->type = ARMFault_SyncExternalOnWalk;
+ fi->ea = arm_extabort_type(result);
+ return 0;
+}
+
+static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult result = MEMTX_OK;
+ AddressSpace *as;
+ uint64_t data;
+
+ addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
+ attrs.secure = is_secure;
+ as = arm_addressspace(cs, attrs);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ data = address_space_ldq_be(as, addr, attrs, &result);
+ } else {
+ data = address_space_ldq_le(as, addr, attrs, &result);
+ }
+ if (result == MEMTX_OK) {
+ return data;
+ }
+ fi->type = ARMFault_SyncExternalOnWalk;
+ fi->ea = arm_extabort_type(result);
+ return 0;
+}
+
+static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi)
+{
+ CPUState *cs = env_cpu(env);
+ int level = 1;
+ uint32_t table;
+ uint32_t desc;
+ int type;
+ int ap;
+ int domain = 0;
+ int domain_prot;
+ hwaddr phys_addr;
+ uint32_t dacr;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
+ /* Section translation fault if page walk is disabled by PD0 or PD1 */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ type = (desc & 3);
+ domain = (desc >> 5) & 0x0f;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
+ if (type == 0) {
+ /* Section translation fault. */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ if (type != 2) {
+ level = 2;
+ }
+ if (domain_prot == 0 || domain_prot == 2) {
+ fi->type = ARMFault_Domain;
+ goto do_fault;
+ }
+ if (type == 2) {
+ /* 1Mb section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ ap = (desc >> 10) & 3;
+ *page_size = 1024 * 1024;
+ } else {
+ /* Lookup l2 entry. */
+ if (type == 1) {
+ /* Coarse pagetable. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ } else {
+ /* Fine pagetable. */
+ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x10000;
+ break;
+ case 2: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
+ *page_size = 0x1000;
+ break;
+ case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
+ if (type == 1) {
+ /* ARMv6/XScale extended small page format */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)
+ || arm_feature(env, ARM_FEATURE_V6)) {
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ *page_size = 0x1000;
+ } else {
+ /* UNPREDICTABLE in ARMv5; we choose to take a
+ * page translation fault.
+ */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ } else {
+ phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
+ *page_size = 0x400;
+ }
+ ap = (desc >> 4) & 3;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
+ }
+ }
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ *prot |= *prot ? PAGE_EXEC : 0;
+ if (!(*prot & (1 << access_type))) {
+ /* Access permission fault. */
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+ *phys_ptr = phys_addr;
+ return false;
+do_fault:
+ fi->domain = domain;
+ fi->level = level;
+ return true;
+}
+
+static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, ARMMMUFaultInfo *fi)
+{
+ CPUState *cs = env_cpu(env);
+ ARMCPU *cpu = env_archcpu(env);
+ int level = 1;
+ uint32_t table;
+ uint32_t desc;
+ uint32_t xn;
+ uint32_t pxn = 0;
+ int type;
+ int ap;
+ int domain = 0;
+ int domain_prot;
+ hwaddr phys_addr;
+ uint32_t dacr;
+ bool ns;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
+ /* Section translation fault if page walk is disabled by PD0 or PD1 */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ type = (desc & 3);
+ if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
+ /* Section translation fault, or attempt to use the encoding
+ * which is Reserved on implementations without PXN.
+ */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ }
+ if ((type == 1) || !(desc & (1 << 18))) {
+ /* Page or Section. */
+ domain = (desc >> 5) & 0x0f;
+ }
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ if (type == 1) {
+ level = 2;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
+ if (domain_prot == 0 || domain_prot == 2) {
+ /* Section or Page domain fault */
+ fi->type = ARMFault_Domain;
+ goto do_fault;
+ }
+ if (type != 1) {
+ if (desc & (1 << 18)) {
+ /* Supersection. */
+ phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
+ phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
+ phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
+ *page_size = 0x1000000;
+ } else {
+ /* Section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ *page_size = 0x100000;
+ }
+ ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
+ xn = desc & (1 << 4);
+ pxn = desc & 1;
+ ns = extract32(desc, 19, 1);
+ } else {
+ if (cpu_isar_feature(aa32_pxn, cpu)) {
+ pxn = (desc >> 2) & 1;
+ }
+ ns = extract32(desc, 3, 1);
+ /* Lookup l2 entry. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ fi->type = ARMFault_Translation;
+ goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ xn = desc & (1 << 15);
+ *page_size = 0x10000;
+ break;
+ case 2: case 3: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ xn = desc & 1;
+ *page_size = 0x1000;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
+ }
+ }
+ if (domain_prot == 3) {
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ } else {
+ if (pxn && !regime_is_user(env, mmu_idx)) {
+ xn = 1;
+ }
+ if (xn && access_type == MMU_INST_FETCH) {
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V6K) &&
+ (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
+ /* The simplified model uses AP[0] as an access control bit. */
+ if ((ap & 1) == 0) {
+ /* Access flag fault. */
+ fi->type = ARMFault_AccessFlag;
+ goto do_fault;
+ }
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
+ } else {
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ }
+ if (*prot && !xn) {
+ *prot |= PAGE_EXEC;
+ }
+ if (!(*prot & (1 << access_type))) {
+ /* Access permission fault. */
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+ }
+ if (ns) {
+ /* The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the attribute will already be non-secure.
+ */
+ attrs->secure = false;
+ }
+ *phys_ptr = phys_addr;
+ return false;
+do_fault:
+ fi->domain = domain;
+ fi->level = level;
+ return true;
+}
+
+/*
+ * check_s2_mmu_setup
+ * @cpu: ARMCPU
+ * @is_aa64: True if the translation regime is in AArch64 state
+ * @startlevel: Suggested starting level
+ * @inputsize: Bitsize of IPAs
+ * @stride: Page-table stride (See the ARM ARM)
+ *
+ * Returns true if the suggested S2 translation parameters are OK and
+ * false otherwise.
+ */
+static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
+ int inputsize, int stride)
+{
+ const int grainsize = stride + 3;
+ int startsizecheck;
+
+ /* Negative levels are never allowed. */
+ if (level < 0) {
+ return false;
+ }
+
+ startsizecheck = inputsize - ((3 - level) * stride + grainsize);
+ if (startsizecheck < 1 || startsizecheck > stride + 4) {
+ return false;
+ }
+
+ if (is_aa64) {
+ CPUARMState *env = &cpu->env;
+ unsigned int pamax = arm_pamax(cpu);
+
+ switch (stride) {
+ case 13: /* 64KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 42)) {
+ return false;
+ }
+ break;
+ case 11: /* 16KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 40)) {
+ return false;
+ }
+ break;
+ case 9: /* 4KB Pages. */
+ if (level == 0 && pamax <= 42) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Inputsize checks. */
+ if (inputsize > pamax &&
+ (arm_el_is_aa64(env, 1) || inputsize > 40)) {
+ /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
+ return false;
+ }
+ } else {
+ /* AArch32 only supports 4KB pages. Assert on that. */
+ assert(stride == 9);
+
+ if (level == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* Translate from the 4-bit stage 2 representation of
+ * memory attributes (without cache-allocation hints) to
+ * the 8-bit representation of the stage 1 MAIR registers
+ * (which includes allocation hints).
+ *
+ * ref: shared/translation/attrs/S2AttrDecode()
+ * .../S2ConvertAttrsHints()
+ */
+static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
+{
+ uint8_t hiattr = extract32(s2attrs, 2, 2);
+ uint8_t loattr = extract32(s2attrs, 0, 2);
+ uint8_t hihint = 0, lohint = 0;
+
+ if (hiattr != 0) { /* normal memory */
+ if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
+ hiattr = loattr = 1; /* non-cacheable */
+ } else {
+ if (hiattr != 1) { /* Write-through or write-back */
+ hihint = 3; /* RW allocate */
+ }
+ if (loattr != 1) { /* Write-through or write-back */
+ lohint = 3; /* RW allocate */
+ }
+ }
+ }
+
+ return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 37, 2);
+ } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ return 0; /* VTCR_EL2 */
+ } else {
+ /* Replicate the single TBI bit so we always have 2 bits. */
+ return extract32(tcr, 20, 1) * 3;
+ }
+}
+
+static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 51, 2);
+ } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ return 0; /* VTCR_EL2 */
+ } else {
+ /* Replicate the single TBID bit so we always have 2 bits. */
+ return extract32(tcr, 29, 1) * 3;
+ }
+}
+
+static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
+{
+ if (regime_has_2_ranges(mmu_idx)) {
+ return extract64(tcr, 57, 2);
+ } else {
+ /* Replicate the single TCMA bit so we always have 2 bits. */
+ return extract32(tcr, 30, 1) * 3;
+ }
+}
+
+ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
+ ARMMMUIdx mmu_idx, bool data)
+{
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ bool epd, hpd, using16k, using64k;
+ int select, tsz, tbi, max_tsz;
+
+ if (!regime_has_2_ranges(mmu_idx)) {
+ select = 0;
+ tsz = extract32(tcr, 0, 6);
+ using64k = extract32(tcr, 14, 1);
+ using16k = extract32(tcr, 15, 1);
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ /* VTCR_EL2 */
+ hpd = false;
+ } else {
+ hpd = extract32(tcr, 24, 1);
+ }
+ epd = false;
+ } else {
+ /*
+ * Bit 55 is always between the two regions, and is canonical for
+ * determining if address tagging is enabled.
+ */
+ select = extract64(va, 55, 1);
+ if (!select) {
+ tsz = extract32(tcr, 0, 6);
+ epd = extract32(tcr, 7, 1);
+ using64k = extract32(tcr, 14, 1);
+ using16k = extract32(tcr, 15, 1);
+ hpd = extract64(tcr, 41, 1);
+ } else {
+ int tg = extract32(tcr, 30, 2);
+ using16k = tg == 1;
+ using64k = tg == 3;
+ tsz = extract32(tcr, 16, 6);
+ epd = extract32(tcr, 23, 1);
+ hpd = extract64(tcr, 42, 1);
+ }
+ }
+
+ if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
+ max_tsz = 48 - using64k;
+ } else {
+ max_tsz = 39;
+ }
+
+ tsz = MIN(tsz, max_tsz);
+ tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
+
+ /* Present TBI as a composite with TBID. */
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (!data) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> select) & 1;
+
+ return (ARMVAParameters) {
+ .tsz = tsz,
+ .select = select,
+ .tbi = tbi,
+ .epd = epd,
+ .hpd = hpd,
+ .using16k = using16k,
+ .using64k = using64k,
+ };
+}
+
+#ifndef CONFIG_USER_ONLY
+static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
+ ARMMMUIdx mmu_idx)
+{
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint32_t el = regime_el(env, mmu_idx);
+ int select, tsz;
+ bool epd, hpd;
+
+ assert(mmu_idx != ARMMMUIdx_Stage2_S);
+
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ /* VTCR */
+ bool sext = extract32(tcr, 4, 1);
+ bool sign = extract32(tcr, 3, 1);
+
+ /*
+ * If the sign-extend bit is not the same as t0sz[3], the result
+ * is unpredictable. Flag this as a guest error.
+ */
+ if (sign != sext) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
+ }
+ tsz = sextract32(tcr, 0, 4) + 8;
+ select = 0;
+ hpd = false;
+ epd = false;
+ } else if (el == 2) {
+ /* HTCR */
+ tsz = extract32(tcr, 0, 3);
+ select = 0;
+ hpd = extract64(tcr, 24, 1);
+ epd = false;
+ } else {
+ int t0sz = extract32(tcr, 0, 3);
+ int t1sz = extract32(tcr, 16, 3);
+
+ if (t1sz == 0) {
+ select = va > (0xffffffffu >> t0sz);
+ } else {
+ /* Note that we will detect errors later. */
+ select = va >= ~(0xffffffffu >> t1sz);
+ }
+ if (!select) {
+ tsz = t0sz;
+ epd = extract32(tcr, 7, 1);
+ hpd = extract64(tcr, 41, 1);
+ } else {
+ tsz = t1sz;
+ epd = extract32(tcr, 23, 1);
+ hpd = extract64(tcr, 42, 1);
+ }
+ /* For aarch32, hpd0 is not enabled without t2e as well. */
+ hpd &= extract32(tcr, 6, 1);
+ }
+
+ return (ARMVAParameters) {
+ .tsz = tsz,
+ .select = select,
+ .epd = epd,
+ .hpd = hpd,
+ };
+}
+
+/**
+ * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
+ *
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
+ * prot and page_size may not be filled in, and the populated fsr value provides
+ * information on why the translation aborted, in the format of a long-format
+ * DFSR/IFSR fault register, with the following caveats:
+ * * the WnR bit is never set (the caller must do this).
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
+ * @mmu_idx: MMU index indicating required translation regime
+ * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
+ * walk), must be true if this is stage 2 of a stage 1+2 walk for an
+ * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @attrs: set to the memory transaction attributes to use
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size_ptr: set to the size of the page containing phys_ptr
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
+ */
+static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ bool s1_is_el0,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
+ target_ulong *page_size_ptr,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+ /* Read an LPAE long-descriptor translation table. */
+ ARMFaultType fault_type = ARMFault_Translation;
+ uint32_t level;
+ ARMVAParameters param;
+ uint64_t ttbr;
+ hwaddr descaddr, indexmask, indexmask_grainsize;
+ uint32_t tableattrs;
+ target_ulong page_size;
+ uint32_t attrs;
+ int32_t stride;
+ int addrsize, inputsize;
+ TCR *tcr = regime_tcr(env, mmu_idx);
+ int ap, ns, xn, pxn;
+ uint32_t el = regime_el(env, mmu_idx);
+ uint64_t descaddrmask;
+ bool aarch64 = arm_el_is_aa64(env, el);
+ bool guarded = false;
+
+ /* TODO: This code does not support shareability levels. */
+ if (aarch64) {
+ param = aa64_va_parameters(env, address, mmu_idx,
+ access_type != MMU_INST_FETCH);
+ level = 0;
+ addrsize = 64 - 8 * param.tbi;
+ inputsize = 64 - param.tsz;
+ } else {
+ param = aa32_va_parameters(env, address, mmu_idx);
+ level = 1;
+ addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
+ inputsize = addrsize - param.tsz;
+ }
+
+ /*
+ * We determined the region when collecting the parameters, but we
+ * have not yet validated that the address is valid for the region.
+ * Extract the top bits and verify that they all match select.
+ *
+ * For aa32, if inputsize == addrsize, then we have selected the
+ * region by exclusion in aa32_va_parameters and there is no more
+ * validation to do here.
+ */
+ if (inputsize < addrsize) {
+ target_ulong top_bits = sextract64(address, inputsize,
+ addrsize - inputsize);
+ if (-top_bits != param.select) {
+ /* The gap between the two regions is a Translation fault */
+ fault_type = ARMFault_Translation;
+ goto do_fault;
+ }
+ }
+
+ if (param.using64k) {
+ stride = 13;
+ } else if (param.using16k) {
+ stride = 11;
+ } else {
+ stride = 9;
+ }
+
+ /* Note that QEMU ignores shareability and cacheability attributes,
+ * so we don't need to do anything with the SH, ORGN, IRGN fields
+ * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
+ * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
+ * implement any ASID-like capability so we can ignore it (instead
+ * we will always flush the TLB any time the ASID is changed).
+ */
+ ttbr = regime_ttbr(env, mmu_idx, param.select);
+
+ /* Here we should have set up all the parameters for the translation:
+ * inputsize, ttbr, epd, stride, tbi
+ */
+
+ if (param.epd) {
+ /* Translation table walk disabled => Translation fault on TLB miss
+ * Note: This is always 0 on 64-bit EL2 and EL3.
+ */
+ goto do_fault;
+ }
+
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ /* The starting level depends on the virtual address size (which can
+ * be up to 48 bits) and the translation granule size. It indicates
+ * the number of strides (stride bits at a time) needed to
+ * consume the bits of the input address. In the pseudocode this is:
+ * level = 4 - RoundUp((inputsize - grainsize) / stride)
+ * where their 'inputsize' is our 'inputsize', 'grainsize' is
+ * our 'stride + 3' and 'stride' is our 'stride'.
+ * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
+ * = 4 - (inputsize - stride - 3 + stride - 1) / stride
+ * = 4 - (inputsize - 4) / stride;
+ */
+ level = 4 - (inputsize - 4) / stride;
+ } else {
+ /* For stage 2 translations the starting level is specified by the
+ * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
+ */
+ uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
+ uint32_t startlevel;
+ bool ok;
+
+ if (!aarch64 || stride == 9) {
+ /* AArch32 or 4KB pages */
+ startlevel = 2 - sl0;
+
+ if (cpu_isar_feature(aa64_st, cpu)) {
+ startlevel &= 3;
+ }
+ } else {
+ /* 16KB or 64KB pages */
+ startlevel = 3 - sl0;
+ }
+
+ /* Check that the starting level is valid. */
+ ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
+ inputsize, stride);
+ if (!ok) {
+ fault_type = ARMFault_Translation;
+ goto do_fault;
+ }
+ level = startlevel;
+ }
+
+ indexmask_grainsize = (1ULL << (stride + 3)) - 1;
+ indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
+
+ /* Now we can extract the actual base address from the TTBR */
+ descaddr = extract64(ttbr, 0, 48);
+ /*
+ * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
+ * and also to mask out CnP (bit 0) which could validly be non-zero.
+ */
+ descaddr &= ~indexmask;
+
+ /* The address field in the descriptor goes up to bit 39 for ARMv7
+ * but up to bit 47 for ARMv8, but we use the descaddrmask
+ * up to bit 39 for AArch32, because we don't need other bits in that case
+ * to construct next descriptor address (anyway they should be all zeroes).
+ */
+ descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
+ ~indexmask_grainsize;
+
+ /* Secure accesses start with the page table in secure memory and
+ * can be downgraded to non-secure at any step. Non-secure accesses
+ * remain non-secure. We implement this by just ORing in the NSTable/NS
+ * bits at each step.
+ */
+ tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
+ for (;;) {
+ uint64_t descriptor;
+ bool nstable;
+
+ descaddr |= (address >> (stride * (4 - level))) & indexmask;
+ descaddr &= ~7ULL;
+ nstable = extract32(tableattrs, 4, 1);
+ descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+
+ if (!(descriptor & 1) ||
+ (!(descriptor & 2) && (level == 3))) {
+ /* Invalid, or the Reserved level 3 encoding */
+ goto do_fault;
+ }
+ descaddr = descriptor & descaddrmask;
+
+ if ((descriptor & 2) && (level < 3)) {
+ /* Table entry. The top five bits are attributes which may
+ * propagate down through lower levels of the table (and
+ * which are all arranged so that 0 means "no effect", so
+ * we can gather them up by ORing in the bits at each level).
+ */
+ tableattrs |= extract64(descriptor, 59, 5);
+ level++;
+ indexmask = indexmask_grainsize;
+ continue;
+ }
+ /* Block entry at level 1 or 2, or page entry at level 3.
+ * These are basically the same thing, although the number
+ * of bits we pull in from the vaddr varies.
+ */
+ page_size = (1ULL << ((stride * (4 - level)) + 3));
+ descaddr |= (address & (page_size - 1));
+ /* Extract attributes from the descriptor */
+ attrs = extract64(descriptor, 2, 10)
+ | (extract64(descriptor, 52, 12) << 10);
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ /* Stage 2 table descriptors do not include any attribute fields */
+ break;
+ }
+ /* Merge in attributes from table descriptors */
+ attrs |= nstable << 3; /* NS */
+ guarded = extract64(descriptor, 50, 1); /* GP */
+ if (param.hpd) {
+ /* HPD disables all the table attributes except NSTable. */
+ break;
+ }
+ attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
+ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ * means "force PL1 access only", which means forcing AP[1] to 0.
+ */
+ attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
+ attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
+ break;
+ }
+ /* Here descaddr is the final physical address, and attributes
+ * are all in attrs.
+ */
+ fault_type = ARMFault_AccessFlag;
+ if ((attrs & (1 << 8)) == 0) {
+ /* Access flag */
+ goto do_fault;
+ }
+
+ ap = extract32(attrs, 4, 2);
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ ns = mmu_idx == ARMMMUIdx_Stage2;
+ xn = extract32(attrs, 11, 2);
+ *prot = get_S2prot(env, ap, xn, s1_is_el0);
+ } else {
+ ns = extract32(attrs, 3, 1);
+ xn = extract32(attrs, 12, 1);
+ pxn = extract32(attrs, 11, 1);
+ *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
+ }
+
+ fault_type = ARMFault_Permission;
+ if (!(*prot & (1 << access_type))) {
+ goto do_fault;
+ }
+
+ if (ns) {
+ /* The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the attribute will already be non-secure.
+ */
+ txattrs->secure = false;
+ }
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
+ if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
+ arm_tlb_bti_gp(txattrs) = true;
+ }
+
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
+ }
+ cacheattrs->shareability = extract32(attrs, 6, 2);
+
+ *phys_ptr = descaddr;
+ *page_size_ptr = page_size;
+ return false;
+
+do_fault:
+ fi->type = fault_type;
+ fi->level = level;
+ /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
+ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
+ mmu_idx == ARMMMUIdx_Stage2_S);
+ fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
+ return true;
+}
+
+static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
+ ARMMMUIdx mmu_idx,
+ int32_t address, int *prot)
+{
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ *prot = PAGE_READ | PAGE_WRITE;
+ switch (address) {
+ case 0xF0000000 ... 0xFFFFFFFF:
+ if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
+ /* hivecs execing is ok */
+ *prot |= PAGE_EXEC;
+ }
+ break;
+ case 0x00000000 ... 0x7FFFFFFF:
+ *prot |= PAGE_EXEC;
+ break;
+ }
+ } else {
+ /* Default system address map for M profile cores.
+ * The architecture specifies which regions are execute-never;
+ * at the MPU level no other checks are defined.
+ */
+ switch (address) {
+ case 0x00000000 ... 0x1fffffff: /* ROM */
+ case 0x20000000 ... 0x3fffffff: /* SRAM */
+ case 0x60000000 ... 0x7fffffff: /* RAM */
+ case 0x80000000 ... 0x9fffffff: /* RAM */
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ break;
+ case 0x40000000 ... 0x5fffffff: /* Peripheral */
+ case 0xa0000000 ... 0xbfffffff: /* Device */
+ case 0xc0000000 ... 0xdfffffff: /* Device */
+ case 0xe0000000 ... 0xffffffff: /* System */
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static bool pmsav7_use_background_region(ARMCPU *cpu,
+ ARMMMUIdx mmu_idx, bool is_user)
+{
+ /* Return true if we should use the default memory map as a
+ * "background" region if there are no hits against any MPU regions.
+ */
+ CPUARMState *env = &cpu->env;
+
+ if (is_user) {
+ return false;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
+ & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
+ } else {
+ return regime_sctlr(env, mmu_idx) & SCTLR_BR;
+ }
+}
+
+static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
+{
+ /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
+ return arm_feature(env, ARM_FEATURE_M) &&
+ extract32(address, 20, 12) == 0xe00;
+}
+
+static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
+{
+ /* True if address is in the M profile system region
+ * 0xe0000000 - 0xffffffff
+ */
+ return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
+}
+
+static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int n;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ *phys_ptr = address;
+ *page_size = TARGET_PAGE_SIZE;
+ *prot = 0;
+
+ if (regime_translation_disabled(env, mmu_idx) ||
+ m_is_ppb_region(env, address)) {
+ /* MPU disabled or M profile PPB access: use default memory map.
+ * The other case which uses the default memory map in the
+ * v7M ARM ARM pseudocode is exception vector reads from the vector
+ * table. In QEMU those accesses are done in arm_v7m_load_vector(),
+ * which always does a direct read using address_space_ldl(), rather
+ * than going via this function, so we don't need to check that here.
+ */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else { /* MPU enabled */
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ uint32_t base = env->pmsav7.drbar[n];
+ uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
+ uint32_t rmask;
+ bool srdis = false;
+
+ if (!(env->pmsav7.drsr[n] & 0x1)) {
+ continue;
+ }
+
+ if (!rsize) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRSR[%d]: Rsize field cannot be 0\n", n);
+ continue;
+ }
+ rsize++;
+ rmask = (1ull << rsize) - 1;
+
+ if (base & rmask) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRBAR[%d]: 0x%" PRIx32 " misaligned "
+ "to DRSR region size, mask = 0x%" PRIx32 "\n",
+ n, base, rmask);
+ continue;
+ }
+
+ if (address < base || address > base + rmask) {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result in
+ * incorrect TLB hits for subsequent accesses to addresses that
+ * are in this MPU region.
+ */
+ if (ranges_overlap(base, rmask,
+ address & TARGET_PAGE_MASK,
+ TARGET_PAGE_SIZE)) {
+ *page_size = 1;
+ }
+ continue;
+ }
+
+ /* Region matched */
+
+ if (rsize >= 8) { /* no subregions for regions < 256 bytes */
+ int i, snd;
+ uint32_t srdis_mask;
+
+ rsize -= 3; /* sub region size (power of 2) */
+ snd = ((address - base) >> rsize) & 0x7;
+ srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
+
+ srdis_mask = srdis ? 0x3 : 0x0;
+ for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
+ /* This will check in groups of 2, 4 and then 8, whether
+ * the subregion bits are consistent. rsize is incremented
+ * back up to give the region size, considering consistent
+ * adjacent subregions as one region. Stop testing if rsize
+ * is already big enough for an entire QEMU page.
+ */
+ int snd_rounded = snd & ~(i - 1);
+ uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
+ snd_rounded + 8, i);
+ if (srdis_mask ^ srdis_multi) {
+ break;
+ }
+ srdis_mask = (srdis_mask << i) | srdis_mask;
+ rsize++;
+ }
+ }
+ if (srdis) {
+ continue;
+ }
+ if (rsize < TARGET_PAGE_BITS) {
+ *page_size = 1 << rsize;
+ }
+ break;
+ }
+
+ if (n == -1) { /* no hits */
+ if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
+ /* background fault */
+ fi->type = ARMFault_Background;
+ return true;
+ }
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else { /* a MPU hit! */
+ uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
+ uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
+
+ if (is_user) { /* User mode AP bit decoding */
+ switch (ap) {
+ case 0:
+ case 1:
+ case 5:
+ break; /* no access */
+ case 3:
+ *prot |= PAGE_WRITE;
+ /* fall through */
+ case 2:
+ case 6:
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ case 7:
+ /* for v7M, same as 6; for R profile a reserved value */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ }
+ /* fall through */
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
+ }
+ } else { /* Priv. mode AP bits decoding */
+ switch (ap) {
+ case 0:
+ break; /* no access */
+ case 1:
+ case 2:
+ case 3:
+ *prot |= PAGE_WRITE;
+ /* fall through */
+ case 5:
+ case 6:
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ case 7:
+ /* for v7M, same as 6; for R profile a reserved value */
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ }
+ /* fall through */
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DRACR[%d]: Bad value for AP bits: 0x%"
+ PRIx32 "\n", n, ap);
+ }
+ }
+
+ /* execute never */
+ if (xn) {
+ *prot &= ~PAGE_EXEC;
+ }
+ }
+ }
+
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return !(*prot & (1 << access_type));
+}
+
+static bool v8m_is_sau_exempt(CPUARMState *env,
+ uint32_t address, MMUAccessType access_type)
+{
+ /* The architecture specifies that certain address ranges are
+ * exempt from v8M SAU/IDAU checks.
+ */
+ return
+ (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
+ (address >= 0xe0000000 && address <= 0xe0002fff) ||
+ (address >= 0xe000e000 && address <= 0xe000efff) ||
+ (address >= 0xe002e000 && address <= 0xe002efff) ||
+ (address >= 0xe0040000 && address <= 0xe0041fff) ||
+ (address >= 0xe00ff000 && address <= 0xe00fffff);
+}
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs)
+{
+ /* Look up the security attributes for this address. Compare the
+ * pseudocode SecurityCheck() function.
+ * We assume the caller has zero-initialized *sattrs.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ int r;
+ bool idau_exempt = false, idau_ns = true, idau_nsc = true;
+ int idau_region = IREGION_NOTVALID;
+ uint32_t addr_page_base = address & TARGET_PAGE_MASK;
+ uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
+
+ if (cpu->idau) {
+ IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
+ IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
+
+ iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
+ &idau_nsc);
+ }
+
+ if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
+ /* 0xf0000000..0xffffffff is always S for insn fetches */
+ return;
+ }
+
+ if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
+ sattrs->ns = !regime_is_secure(env, mmu_idx);
+ return;
+ }
+
+ if (idau_region != IREGION_NOTVALID) {
+ sattrs->irvalid = true;
+ sattrs->iregion = idau_region;
+ }
+
+ switch (env->sau.ctrl & 3) {
+ case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
+ break;
+ case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
+ sattrs->ns = true;
+ break;
+ default: /* SAU.ENABLE == 1 */
+ for (r = 0; r < cpu->sau_sregion; r++) {
+ if (env->sau.rlar[r] & 1) {
+ uint32_t base = env->sau.rbar[r] & ~0x1f;
+ uint32_t limit = env->sau.rlar[r] | 0x1f;
+
+ if (base <= address && limit >= address) {
+ if (base > addr_page_base || limit < addr_page_limit) {
+ sattrs->subpage = true;
+ }
+ if (sattrs->srvalid) {
+ /* If we hit in more than one region then we must report
+ * as Secure, not NS-Callable, with no valid region
+ * number info.
+ */
+ sattrs->ns = false;
+ sattrs->nsc = false;
+ sattrs->sregion = 0;
+ sattrs->srvalid = false;
+ break;
+ } else {
+ if (env->sau.rlar[r] & 2) {
+ sattrs->nsc = true;
+ } else {
+ sattrs->ns = true;
+ }
+ sattrs->srvalid = true;
+ sattrs->sregion = r;
+ }
+ } else {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result
+ * in incorrect TLB hits for subsequent accesses to
+ * addresses that are in this MPU region.
+ */
+ if (limit >= base &&
+ ranges_overlap(base, limit - base + 1,
+ addr_page_base,
+ TARGET_PAGE_SIZE)) {
+ sattrs->subpage = true;
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ /*
+ * The IDAU will override the SAU lookup results if it specifies
+ * higher security than the SAU does.
+ */
+ if (!idau_ns) {
+ if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
+ sattrs->ns = false;
+ sattrs->nsc = idau_nsc;
+ }
+ }
+}
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion)
+{
+ /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ * that a full phys-to-virt translation does).
+ * mregion is (if not NULL) set to the region number which matched,
+ * or -1 if no region number is returned (MPU off, address did not
+ * hit a region, address hit in multiple regions).
+ * We set is_subpage to true if the region hit doesn't cover the
+ * entire TARGET_PAGE the address is within.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ bool is_user = regime_is_user(env, mmu_idx);
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ int n;
+ int matchregion = -1;
+ bool hit = false;
+ uint32_t addr_page_base = address & TARGET_PAGE_MASK;
+ uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
+
+ *is_subpage = false;
+ *phys_ptr = address;
+ *prot = 0;
+ if (mregion) {
+ *mregion = -1;
+ }
+
+ /* Unlike the ARM ARM pseudocode, we don't need to check whether this
+ * was an exception vector read from the vector table (which is always
+ * done using the default system address map), because those accesses
+ * are done in arm_v7m_load_vector(), which always does a direct
+ * read using address_space_ldl(), rather than going via this function.
+ */
+ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
+ hit = true;
+ } else if (m_is_ppb_region(env, address)) {
+ hit = true;
+ } else {
+ if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
+ hit = true;
+ }
+
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ /* Note that the base address is bits [31:5] from the register
+ * with bits [4:0] all zeroes, but the limit address is bits
+ * [31:5] from the register with bits [4:0] all ones.
+ */
+ uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
+ uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
+
+ if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
+ /* Region disabled */
+ continue;
+ }
+
+ if (address < base || address > limit) {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result in
+ * incorrect TLB hits for subsequent accesses to addresses that
+ * are in this MPU region.
+ */
+ if (limit >= base &&
+ ranges_overlap(base, limit - base + 1,
+ addr_page_base,
+ TARGET_PAGE_SIZE)) {
+ *is_subpage = true;
+ }
+ continue;
+ }
+
+ if (base > addr_page_base || limit < addr_page_limit) {
+ *is_subpage = true;
+ }
+
+ if (matchregion != -1) {
+ /* Multiple regions match -- always a failure (unlike
+ * PMSAv7 where highest-numbered-region wins)
+ */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+
+ matchregion = n;
+ hit = true;
+ }
+ }
+
+ if (!hit) {
+ /* background fault */
+ fi->type = ARMFault_Background;
+ return true;
+ }
+
+ if (matchregion == -1) {
+ /* hit using the background region */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else {
+ uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
+ uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
+ bool pxn = false;
+
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
+ pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
+ }
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
+
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
+ if (*prot && !xn && !(pxn && !is_user)) {
+ *prot |= PAGE_EXEC;
+ }
+ /* We don't need to look the attribute up in the MAIR0/MAIR1
+ * registers because that only tells us about cacheability.
+ */
+ if (mregion) {
+ *mregion = matchregion;
+ }
+ }
+
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return !(*prot & (1 << access_type));
+}
+
+
+static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, target_ulong *page_size,
+ ARMMMUFaultInfo *fi)
+{
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ V8M_SAttributes sattrs = {};
+ bool ret;
+ bool mpu_is_subpage;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
+ if (access_type == MMU_INST_FETCH) {
+ /* Instruction fetches always use the MMU bank and the
+ * transaction attribute determined by the fetch address,
+ * regardless of CPU state. This is painful for QEMU
+ * to handle, because it would mean we need to encode
+ * into the mmu_idx not just the (user, negpri) information
+ * for the current security state but also that for the
+ * other security state, which would balloon the number
+ * of mmu_idx values needed alarmingly.
+ * Fortunately we can avoid this because it's not actually
+ * possible to arbitrarily execute code from memory with
+ * the wrong security attribute: it will always generate
+ * an exception of some kind or another, apart from the
+ * special case of an NS CPU executing an SG instruction
+ * in S&NSC memory. So we always just fail the translation
+ * here and sort things out in the exception handler
+ * (including possibly emulating an SG instruction).
+ */
+ if (sattrs.ns != !secure) {
+ if (sattrs.nsc) {
+ fi->type = ARMFault_QEMU_NSCExec;
+ } else {
+ fi->type = ARMFault_QEMU_SFault;
+ }
+ *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ } else {
+ /* For data accesses we always use the MMU bank indicated
+ * by the current CPU state, but the security attributes
+ * might downgrade a secure access to nonsecure.
+ */
+ if (sattrs.ns) {
+ txattrs->secure = false;
+ } else if (!secure) {
+ /* NS access to S memory must fault.
+ * Architecturally we should first check whether the
+ * MPU information for this address indicates that we
+ * are doing an unaligned access to Device memory, which
+ * should generate a UsageFault instead. QEMU does not
+ * currently check for that kind of unaligned access though.
+ * If we added it we would need to do so as a special case
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
+ */
+ fi->type = ARMFault_QEMU_SFault;
+ *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ }
+ }
+
+ ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
+ txattrs, prot, &mpu_is_subpage, fi, NULL);
+ *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
+ return ret;
+}
+
+static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ ARMMMUFaultInfo *fi)
+{
+ int n;
+ uint32_t mask;
+ uint32_t base;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (regime_translation_disabled(env, mmu_idx)) {
+ /* MPU disabled. */
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return false;
+ }
+
+ *phys_ptr = address;
+ for (n = 7; n >= 0; n--) {
+ base = env->cp15.c6_region[n];
+ if ((base & 1) == 0) {
+ continue;
+ }
+ mask = 1 << ((base >> 1) & 0x1f);
+ /* Keep this shift separate from the above to avoid an
+ (undefined) << 32. */
+ mask = (mask << 1) - 1;
+ if (((base ^ address) & ~mask) == 0) {
+ break;
+ }
+ }
+ if (n < 0) {
+ fi->type = ARMFault_Background;
+ return true;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ mask = env->cp15.pmsav5_insn_ap;
+ } else {
+ mask = env->cp15.pmsav5_data_ap;
+ }
+ mask = (mask >> (n * 4)) & 0xf;
+ switch (mask) {
+ case 0:
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ case 1:
+ if (is_user) {
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 2:
+ *prot = PAGE_READ;
+ if (!is_user) {
+ *prot |= PAGE_WRITE;
+ }
+ break;
+ case 3:
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 5:
+ if (is_user) {
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+ *prot = PAGE_READ;
+ break;
+ case 6:
+ *prot = PAGE_READ;
+ break;
+ default:
+ /* Bad permission. */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+ *prot |= PAGE_EXEC;
+ return false;
+}
+
+/* Combine either inner or outer cacheability attributes for normal
+ * memory, according to table D4-42 and pseudocode procedure
+ * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
+ *
+ * NB: only stage 1 includes allocation hints (RW bits), leading to
+ * some asymmetry.
+ */
+static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
+{
+ if (s1 == 4 || s2 == 4) {
+ /* non-cacheable has precedence */
+ return 4;
+ } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
+ /* stage 1 write-through takes precedence */
+ return s1;
+ } else if (extract32(s2, 2, 2) == 2) {
+ /* stage 2 write-through takes precedence, but the allocation hint
+ * is still taken from stage 1
+ */
+ return (2 << 2) | extract32(s1, 0, 2);
+ } else { /* write-back */
+ return s1;
+ }
+}
+
+/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
+ * and CombineS1S2Desc()
+ *
+ * @s1: Attributes from stage 1 walk
+ * @s2: Attributes from stage 2 walk
+ */
+static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ uint8_t s1lo, s2lo, s1hi, s2hi;
+ ARMCacheAttrs ret;
+ bool tagged = false;
+
+ if (s1.attrs == 0xf0) {
+ tagged = true;
+ s1.attrs = 0xff;
+ }
+
+ s1lo = extract32(s1.attrs, 0, 4);
+ s2lo = extract32(s2.attrs, 0, 4);
+ s1hi = extract32(s1.attrs, 4, 4);
+ s2hi = extract32(s2.attrs, 4, 4);
+
+ /* Combine shareability attributes (table D4-43) */
+ if (s1.shareability == 2 || s2.shareability == 2) {
+ /* if either are outer-shareable, the result is outer-shareable */
+ ret.shareability = 2;
+ } else if (s1.shareability == 3 || s2.shareability == 3) {
+ /* if either are inner-shareable, the result is inner-shareable */
+ ret.shareability = 3;
+ } else {
+ /* both non-shareable */
+ ret.shareability = 0;
+ }
+
+ /* Combine memory type and cacheability attributes */
+ if (s1hi == 0 || s2hi == 0) {
+ /* Device has precedence over normal */
+ if (s1lo == 0 || s2lo == 0) {
+ /* nGnRnE has precedence over anything */
+ ret.attrs = 0;
+ } else if (s1lo == 4 || s2lo == 4) {
+ /* non-Reordering has precedence over Reordering */
+ ret.attrs = 4; /* nGnRE */
+ } else if (s1lo == 8 || s2lo == 8) {
+ /* non-Gathering has precedence over Gathering */
+ ret.attrs = 8; /* nGRE */
+ } else {
+ ret.attrs = 0xc; /* GRE */
+ }
+
+ /* Any location for which the resultant memory type is any
+ * type of Device memory is always treated as Outer Shareable.
+ */
+ ret.shareability = 2;
+ } else { /* Normal memory */
+ /* Outer/inner cacheability combine independently */
+ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
+ | combine_cacheattr_nibble(s1lo, s2lo);
+
+ if (ret.attrs == 0x44) {
+ /* Any location for which the resultant memory type is Normal
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
+ * as Outer Shareable.
+ */
+ ret.shareability = 2;
+ }
+ }
+
+ /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
+ if (tagged && ret.attrs == 0xff) {
+ ret.attrs = 0xf0;
+ }
+
+ return ret;
+}
+
+
+/* get_phys_addr - get the physical address for this virtual address
+ *
+ * Find the physical address corresponding to the given virtual address,
+ * by doing a translation table walk on MMU based systems or using the
+ * MPU state on MPU based systems.
+ *
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
+ * prot and page_size may not be filled in, and the populated fsr value provides
+ * information on why the translation aborted, in the format of a
+ * DFSR/IFSR fault register, with the following caveats:
+ * * we honour the short vs long DFSR format differences.
+ * * the WnR bit is never set (the caller must do this).
+ * * for PSMAv5 based systems we don't bother to return a full FSR format
+ * value.
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index indicating required translation regime
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @attrs: set to the memory transaction attributes to use
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size: set to the size of the page containing phys_ptr
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
+ */
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+{
+ ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
+
+ if (mmu_idx != s1_mmu_idx) {
+ /* Call ourselves recursively to do the stage 1 and then stage 2
+ * translations if mmu_idx is a two-stage regime.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ hwaddr ipa;
+ int s2_prot;
+ int ret;
+ ARMCacheAttrs cacheattrs2 = {};
+ ARMMMUIdx s2_mmu_idx;
+ bool is_el0;
+
+ ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
+ attrs, prot, page_size, fi, cacheattrs);
+
+ /* If S1 fails or S2 is disabled, return early. */
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
+ *phys_ptr = ipa;
+ return ret;
+ }
+
+ s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
+ is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
+
+ /* S1 is done. Now do S2 translation. */
+ ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
+ phys_ptr, attrs, &s2_prot,
+ page_size, fi, &cacheattrs2);
+ fi->s2addr = ipa;
+ /* Combine the S1 and S2 perms. */
+ *prot &= s2_prot;
+
+ /* If S2 fails, return early. */
+ if (ret) {
+ return ret;
+ }
+
+ /* Combine the S1 and S2 cache attributes. */
+ if (arm_hcr_el2_eff(env) & HCR_DC) {
+ /*
+ * HCR.DC forces the first stage attributes to
+ * Normal Non-Shareable,
+ * Inner Write-Back Read-Allocate Write-Allocate,
+ * Outer Write-Back Read-Allocate Write-Allocate.
+ * Do not overwrite Tagged within attrs.
+ */
+ if (cacheattrs->attrs != 0xf0) {
+ cacheattrs->attrs = 0xff;
+ }
+ cacheattrs->shareability = 0;
+ }
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+
+ /* Check if IPA translates to secure or non-secure PA space. */
+ if (arm_is_secure_below_el3(env)) {
+ if (attrs->secure) {
+ attrs->secure =
+ !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
+ } else {
+ attrs->secure =
+ !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
+ || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
+ }
+ }
+ return 0;
+ } else {
+ /*
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ mmu_idx = stage_1_mmu_idx(mmu_idx);
+ }
+ }
+
+ /* The page table entries may downgrade secure to non-secure, but
+ * cannot upgrade an non-secure translation regime's attributes
+ * to secure.
+ */
+ attrs->secure = regime_is_secure(env, mmu_idx);
+ attrs->user = regime_is_user(env, mmu_idx);
+
+ /* Fast Context Switch Extension. This doesn't exist at all in v8.
+ * In v7 and earlier it affects all stage 1 translations.
+ */
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
+ && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (regime_el(env, mmu_idx) == 3) {
+ address += env->cp15.fcseidr_s;
+ } else {
+ address += env->cp15.fcseidr_ns;
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
+ bool ret;
+ *page_size = TARGET_PAGE_SIZE;
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* PMSAv8 */
+ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ /* PMSAv7 */
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
+ } else {
+ /* Pre-v7 MPU */
+ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fi);
+ }
+ qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
+ " mmu_idx %u -> %s (prot %c%c%c)\n",
+ access_type == MMU_DATA_LOAD ? "reading" :
+ (access_type == MMU_DATA_STORE ? "writing" : "execute"),
+ (uint32_t)address, mmu_idx,
+ ret ? "Miss" : "Hit",
+ *prot & PAGE_READ ? 'r' : '-',
+ *prot & PAGE_WRITE ? 'w' : '-',
+ *prot & PAGE_EXEC ? 'x' : '-');
+
+ return ret;
+ }
+
+ /* Definitely a real MMU, not an MPU */
+
+ if (regime_translation_disabled(env, mmu_idx)) {
+ uint64_t hcr;
+ uint8_t memattr;
+
+ /*
+ * MMU disabled. S1 addresses within aa64 translation regimes are
+ * still checked for bounds -- see AArch64.TranslateAddressS1Off.
+ */
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ int r_el = regime_el(env, mmu_idx);
+ if (arm_el_is_aa64(env, r_el)) {
+ int pamax = arm_pamax(env_archcpu(env));
+ uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
+ int addrtop, tbi;
+
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (access_type == MMU_INST_FETCH) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> extract64(address, 55, 1)) & 1;
+ addrtop = (tbi ? 55 : 63);
+
+ if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
+ fi->type = ARMFault_AddressSize;
+ fi->level = 0;
+ fi->stage2 = false;
+ return 1;
+ }
+
+ /*
+ * When TBI is disabled, we've just validated that all of the
+ * bits above PAMax are zero, so logically we only need to
+ * clear the top byte for TBI. But it's clearer to follow
+ * the pseudocode set of addrdesc.paddress.
+ */
+ address = extract64(address, 0, 52);
+ }
+ }
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ *page_size = TARGET_PAGE_SIZE;
+
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
+ hcr = arm_hcr_el2_eff(env);
+ cacheattrs->shareability = 0;
+ if (hcr & HCR_DC) {
+ if (hcr & HCR_DCT) {
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
+ } else {
+ memattr = 0xff; /* Normal, WB, RWA */
+ }
+ } else if (access_type == MMU_INST_FETCH) {
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
+ memattr = 0xee; /* Normal, WT, RA, NT */
+ } else {
+ memattr = 0x44; /* Normal, NC, No */
+ }
+ cacheattrs->shareability = 2; /* outer sharable */
+ } else {
+ memattr = 0x00; /* Device, nGnRnE */
+ }
+ cacheattrs->attrs = memattr;
+ return 0;
+ }
+
+ if (regime_using_lpae_format(env, mmu_idx)) {
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
+ phys_ptr, attrs, prot, page_size,
+ fi, cacheattrs);
+ } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+ return get_phys_addr_v6(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
+ } else {
+ return get_phys_addr_v5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
+ }
+}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot;
+ bool ret;
+ ARMMMUFaultInfo fi = {};
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ ARMCacheAttrs cacheattrs = {};
+
+ *attrs = (MemTxAttrs) {};
+
+ ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
+ attrs, &prot, &page_size, &fi, &cacheattrs);
+
+ if (ret) {
+ return -1;
+ }
+ return phys_addr;
+}
+
+#endif
+
+/* Note that signed overflow is undefined in C. The following routines are
+ careful to use unsigned types where modulo arithmetic is required.
+ Failure to do so _will_ break on newer gcc. */
+
+/* Signed saturating arithmetic. */
+
+/* Perform 16-bit signed saturating addition. */
+static inline uint16_t add16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
+ if (a & 0x8000)
+ res = 0x8000;
+ else
+ res = 0x7fff;
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating addition. */
+static inline uint8_t add8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
+ if (a & 0x80)
+ res = 0x80;
+ else
+ res = 0x7f;
+ }
+ return res;
+}
+
+/* Perform 16-bit signed saturating subtraction. */
+static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
+ if (a & 0x8000)
+ res = 0x8000;
+ else
+ res = 0x7fff;
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating subtraction. */
+static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
+ if (a & 0x80)
+ res = 0x80;
+ else
+ res = 0x7f;
+ }
+ return res;
+}
+
+#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
+#define PFX q
+
+#include "op_addsub.h"
+
+/* Unsigned saturating arithmetic. */
+static inline uint16_t add16_usat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+ res = a + b;
+ if (res < a)
+ res = 0xffff;
+ return res;
+}
+
+static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return 0;
+}
+
+static inline uint8_t add8_usat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+ res = a + b;
+ if (res < a)
+ res = 0xff;
+ return res;
+}
+
+static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return 0;
+}
+
+#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
+#define PFX uq
+
+#include "op_addsub.h"
+
+/* Signed modulo arithmetic. */
+#define SARITH16(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
+ RESULT(sum, n, 16); \
+ if (sum >= 0) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define SARITH8(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
+ RESULT(sum, n, 8); \
+ if (sum >= 0) \
+ ge |= 1 << n; \
+ } while(0)
+
+
+#define ADD16(a, b, n) SARITH16(a, b, n, +)
+#define SUB16(a, b, n) SARITH16(a, b, n, -)
+#define ADD8(a, b, n) SARITH8(a, b, n, +)
+#define SUB8(a, b, n) SARITH8(a, b, n, -)
+#define PFX s
+#define ARITH_GE
+
+#include "op_addsub.h"
+
+/* Unsigned modulo arithmetic. */
+#define ADD16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 1) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define ADD8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 1) \
+ ge |= 1 << n; \
+ } while(0)
+
+#define SUB16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 0) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define SUB8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 0) \
+ ge |= 1 << n; \
+ } while(0)
+
+#define PFX u
+#define ARITH_GE
+
+#include "op_addsub.h"
+
+/* Halved signed arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define PFX sh
+
+#include "op_addsub.h"
+
+/* Halved unsigned arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define PFX uh
+
+#include "op_addsub.h"
+
+static inline uint8_t do_usad(uint8_t a, uint8_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return b - a;
+}
+
+/* Unsigned sum of absolute byte differences. */
+uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
+{
+ uint32_t sum;
+ sum = do_usad(a, b);
+ sum += do_usad(a >> 8, b >> 8);
+ sum += do_usad(a >> 16, b >> 16);
+ sum += do_usad(a >> 24, b >> 24);
+ return sum;
+}
+
+/* For ARMv6 SEL instruction. */
+uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (flags & 1)
+ mask |= 0xff;
+ if (flags & 2)
+ mask |= 0xff00;
+ if (flags & 4)
+ mask |= 0xff0000;
+ if (flags & 8)
+ mask |= 0xff000000;
+ return (a & mask) | (b & ~mask);
+}
+
+/* CRC helpers.
+ * The upper bytes of val (above the number specified by 'bytes') must have
+ * been zeroed out by the caller.
+ */
+uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, val);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, val);
+
+ /* Linux crc32c converts the output to one's complement. */
+ return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}
+
+/* Return the exception level to which FP-disabled exceptions should
+ * be taken, or 0 if FP is enabled.
+ */
+int fp_exception_el(CPUARMState *env, int cur_el)
+{
+#ifndef CONFIG_USER_ONLY
+ /* CPACR and the CPTR registers don't exist before v6, so FP is
+ * always accessible
+ */
+ if (!arm_feature(env, ARM_FEATURE_V6)) {
+ return 0;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* CPACR can cause a NOCP UsageFault taken to current security state */
+ if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
+ return 1;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
+ if (!extract32(env->v7m.nsacr, 10, 1)) {
+ /* FP insns cause a NOCP UsageFault taken to Secure */
+ return 3;
+ }
+ }
+
+ return 0;
+ }
+
+ /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
+ * 0, 2 : trap EL0 and EL1/PL1 accesses
+ * 1 : trap only EL0 accesses
+ * 3 : trap no accesses
+ * This register is ignored if E2H+TGE are both set.
+ */
+ if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
+
+ switch (fpen) {
+ case 0:
+ case 2:
+ if (cur_el == 0 || cur_el == 1) {
+ /* Trap to PL1, which might be EL1 or EL3 */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ return 3;
+ }
+ return 1;
+ }
+ if (cur_el == 3 && !is_a64(env)) {
+ /* Secure PL1 running at EL3 */
+ return 3;
+ }
+ break;
+ case 1:
+ if (cur_el == 0) {
+ return 1;
+ }
+ break;
+ case 3:
+ break;
+ }
+ }
+
+ /*
+ * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
+ * to control non-secure access to the FPU. It doesn't have any
+ * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
+ */
+ if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
+ cur_el <= 2 && !arm_is_secure_below_el3(env))) {
+ if (!extract32(env->cp15.nsacr, 10, 1)) {
+ /* FP insns act as UNDEF */
+ return cur_el == 2 ? 2 : 1;
+ }
+ }
+
+ /* For the CPTR registers we don't need to guard with an ARM_FEATURE
+ * check because zero bits in the registers mean "don't trap".
+ */
+
+ /* CPTR_EL2 : present in v7VE or v8 */
+ if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
+ && arm_is_el2_enabled(env)) {
+ /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
+ return 2;
+ }
+
+ /* CPTR_EL3 : present in v8 */
+ if (extract32(env->cp15.cptr_el[3], 10, 1)) {
+ /* Trap all FP ops to EL3 */
+ return 3;
+ }
+#endif
+ return 0;
+}
+
+/* Return the exception level we're running at if this is our mmu_idx */
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx & ARM_MMU_IDX_M) {
+ return mmu_idx & ARM_MMU_IDX_M_PRIV;
+ }
+
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_SE20_0:
+ return 0;
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ return 1;
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE2:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ return 2;
+ case ARMMMUIdx_SE3:
+ return 3;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#ifndef CONFIG_TCG
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
+{
+ g_assert_not_reached();
+}
+#endif
+
+ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
+{
+ ARMMMUIdx idx;
+ uint64_t hcr;
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
+ }
+
+ /* See ARM pseudo-function ELIsInHost. */
+ switch (el) {
+ case 0:
+ hcr = arm_hcr_el2_eff(env);
+ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
+ idx = ARMMMUIdx_E20_0;
+ } else {
+ idx = ARMMMUIdx_E10_0;
+ }
+ break;
+ case 1:
+ if (env->pstate & PSTATE_PAN) {
+ idx = ARMMMUIdx_E10_1_PAN;
+ } else {
+ idx = ARMMMUIdx_E10_1;
+ }
+ break;
+ case 2:
+ /* Note that TGE does not apply at EL2. */
+ if (arm_hcr_el2_eff(env) & HCR_E2H) {
+ if (env->pstate & PSTATE_PAN) {
+ idx = ARMMMUIdx_E20_2_PAN;
+ } else {
+ idx = ARMMMUIdx_E20_2;
+ }
+ } else {
+ idx = ARMMMUIdx_E2;
+ }
+ break;
+ case 3:
+ return ARMMMUIdx_SE3;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ idx &= ~ARM_MMU_IDX_A_NS;
+ }
+
+ return idx;
+}
+
+ARMMMUIdx arm_mmu_idx(CPUARMState *env)
+{
+ return arm_mmu_idx_el(env, arm_current_el(env));
+}
+
+#ifndef CONFIG_USER_ONLY
+ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
+{
+ return stage_1_mmu_idx(arm_mmu_idx(env));
+}
+#endif
+
+static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx,
+ CPUARMTBFlags flags)
+{
+ DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
+ DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
+
+ if (arm_singlestep_active(env)) {
+ DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
+ }
+ return flags;
+}
+
+static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx,
+ CPUARMTBFlags flags)
+{
+ bool sctlr_b = arm_sctlr_b(env);
+
+ if (sctlr_b) {
+ DP_TBFLAG_A32(flags, SCTLR__B, 1);
+ }
+ if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
+ }
+ DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
+
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ CPUARMTBFlags flags = {};
+ uint32_t ccr = env->v7m.ccr[env->v7m.secure];
+
+ /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
+ if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
+ }
+
+ if (arm_v7m_is_handler_mode(env)) {
+ DP_TBFLAG_M32(flags, HANDLER, 1);
+ }
+
+ /*
+ * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
+ * is suppressing them because the requested execution priority
+ * is less than 0.
+ */
+ if (arm_feature(env, ARM_FEATURE_V8) &&
+ !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
+ (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
+ DP_TBFLAG_M32(flags, STACKCHECK, 1);
+ }
+
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env)
+{
+ CPUARMTBFlags flags = {};
+
+ DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env));
+ return flags;
+}
+
+static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
+ int el = arm_current_el(env);
+
+ if (arm_sctlr(env, el) & SCTLR_A) {
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
+ }
+
+ if (arm_el_is_aa64(env, 1)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+
+ if (el < 2 && env->cp15.hstr_el2 &&
+ (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
+ }
+
+ if (env->uncached_cpsr & CPSR_IL) {
+ DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
+ }
+
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ CPUARMTBFlags flags = rebuild_hflags_aprofile(env);
+ ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
+ uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
+ uint64_t sctlr;
+ int tbii, tbid;
+
+ DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
+
+ /* Get control bits for tagged addresses. */
+ tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
+ tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
+
+ DP_TBFLAG_A64(flags, TBII, tbii);
+ DP_TBFLAG_A64(flags, TBID, tbid);
+
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
+ int sve_el = sve_exception_el(env, el);
+ uint32_t zcr_len;
+
+ /*
+ * If SVE is disabled, but FP is enabled,
+ * then the effective len is 0.
+ */
+ if (sve_el != 0 && fp_el == 0) {
+ zcr_len = 0;
+ } else {
+ zcr_len = sve_zcr_len_for_el(env, el);
+ }
+ DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
+ DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
+ }
+
+ sctlr = regime_sctlr(env, stage1);
+
+ if (sctlr & SCTLR_A) {
+ DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
+ }
+
+ if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
+ DP_TBFLAG_ANY(flags, BE_DATA, 1);
+ }
+
+ if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
+ /*
+ * In order to save space in flags, we record only whether
+ * pauth is "inactive", meaning all insns are implemented as
+ * a nop, or "active" when some action must be performed.
+ * The decision of which action to take is left to a helper.
+ */
+ if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
+ DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
+ }
+ }
+
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
+ if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
+ DP_TBFLAG_A64(flags, BT, 1);
+ }
+ }
+
+ /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
+ if (!(env->pstate & PSTATE_UAO)) {
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ /* TODO: ARMv8.3-NV */
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
+ break;
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ /*
+ * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
+ * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
+ */
+ if (env->cp15.hcr_el2 & HCR_TGE) {
+ DP_TBFLAG_A64(flags, UNPRIV, 1);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (env->pstate & PSTATE_IL) {
+ DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
+ }
+
+ if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
+ /*
+ * Set MTE_ACTIVE if any access may be Checked, and leave clear
+ * if all accesses must be Unchecked:
+ * 1) If no TBI, then there are no tags in the address to check,
+ * 2) If Tag Check Override, then all accesses are Unchecked,
+ * 3) If Tag Check Fail == 0, then Checked access have no effect,
+ * 4) If no Allocation Tag Access, then all accesses are Unchecked.
+ */
+ if (allocation_tag_access_enabled(env, el, sctlr)) {
+ DP_TBFLAG_A64(flags, ATA, 1);
+ if (tbid
+ && !(env->pstate & PSTATE_TCO)
+ && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
+ DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
+ }
+ }
+ /* And again for unprivileged accesses, if required. */
+ if (EX_TBFLAG_A64(flags, UNPRIV)
+ && tbid
+ && !(env->pstate & PSTATE_TCO)
+ && (sctlr & SCTLR_TCF0)
+ && allocation_tag_access_enabled(env, 0, sctlr)) {
+ DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
+ }
+ /* Cache TCMA as well as TBI. */
+ DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
+ }
+
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+}
+
+static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ if (is_a64(env)) {
+ return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
+ } else if (arm_feature(env, ARM_FEATURE_M)) {
+ return rebuild_hflags_m32(env, fp_el, mmu_idx);
+ } else {
+ return rebuild_hflags_a32(env, fp_el, mmu_idx);
+ }
+}
+
+void arm_rebuild_hflags(CPUARMState *env)
+{
+ env->hflags = rebuild_hflags_internal(env);
+}
+
+/*
+ * If we have triggered a EL state change we can't rely on the
+ * translator having passed it to us, we need to recompute.
+ */
+void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
+}
+
+void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
+{
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
+}
+
+/*
+ * If we have triggered a EL state change we can't rely on the
+ * translator having passed it to us, we need to recompute.
+ */
+void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+ env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
+}
+
+void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
+{
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
+}
+
+void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
+{
+ int fp_el = fp_exception_el(env, el);
+ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
+
+ env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
+}
+
+static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
+{
+#ifdef CONFIG_DEBUG_TCG
+ CPUARMTBFlags c = env->hflags;
+ CPUARMTBFlags r = rebuild_hflags_internal(env);
+
+ if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
+ fprintf(stderr, "TCG hflags mismatch "
+ "(current:(0x%08x,0x" TARGET_FMT_lx ")"
+ " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
+ c.flags, c.flags2, r.flags, r.flags2);
+ abort();
+ }
+#endif
+}
+
+static bool mve_no_pred(CPUARMState *env)
+{
+ /*
+ * Return true if there is definitely no predication of MVE
+ * instructions by VPR or LTPSIZE. (Returning false even if there
+ * isn't any predication is OK; generated code will just be
+ * a little worse.)
+ * If the CPU does not implement MVE then this TB flag is always 0.
+ *
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
+ * logic in gen_update_fp_context() needs to be updated to match.
+ *
+ * We do not include the effect of the ECI bits here -- they are
+ * tracked in other TB flags. This simplifies the logic for
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
+ * and thus need to end the TB?".
+ */
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
+ return false;
+ }
+ if (env->v7m.vpr) {
+ return false;
+ }
+ if (env->v7m.ltpsize < 4) {
+ return false;
+ }
+ return true;
+}
+
+void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ CPUARMTBFlags flags;
+
+ assert_hflags_rebuild_correctly(env);
+ flags = env->hflags;
+
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
+ *pc = env->pc;
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
+ }
+ } else {
+ *pc = env->regs[15];
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
+ != env->v7m.secure) {
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
+ }
+
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
+ * active FP context; we must create a new FP context before
+ * executing any FP insn.
+ */
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
+ }
+
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ DP_TBFLAG_M32(flags, LSPACT, 1);
+ }
+
+ if (mve_no_pred(env)) {
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
+ }
+ } else {
+ /*
+ * Note that XSCALE_CPAR shares bits with VECSTRIDE.
+ * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
+ */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
+ } else {
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+ }
+
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
+ }
+
+ /*
+ * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
+ */
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
+ }
+
+ *pflags = flags.flags;
+ *cs_base = flags.flags2;
+}
+
+#ifdef TARGET_AARCH64
+/*
+ * The manual says that when SVE is enabled and VQ is widened the
+ * implementation is allowed to zero the previously inaccessible
+ * portion of the registers. The corollary to that is that when
+ * SVE is enabled and VQ is narrowed we are also allowed to zero
+ * the now inaccessible portion of the registers.
+ *
+ * The intent of this is that no predicate bit beyond VQ is ever set.
+ * Which means that some operations on predicate registers themselves
+ * may operate on full uint64_t or even unrolled across the maximum
+ * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
+ * may well be cheaper than conditionals to restrict the operation
+ * to the relevant portion of a uint16_t[16].
+ */
+void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
+{
+ int i, j;
+ uint64_t pmask;
+
+ assert(vq >= 1 && vq <= ARM_MAX_VQ);
+ assert(vq <= env_archcpu(env)->sve_max_vq);
+
+ /* Zap the high bits of the zregs. */
+ for (i = 0; i < 32; i++) {
+ memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
+ }
+
+ /* Zap the high bits of the pregs and ffr. */
+ pmask = 0;
+ if (vq & 3) {
+ pmask = ~(-1ULL << (16 * (vq & 3)));
+ }
+ for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
+ for (i = 0; i < 17; ++i) {
+ env->vfp.pregs[i].p[j] &= pmask;
+ }
+ pmask = 0;
+ }
+}
+
+/*
+ * Notice a change in SVE vector size when changing EL.
+ */
+void aarch64_sve_change_el(CPUARMState *env, int old_el,
+ int new_el, bool el0_a64)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int old_len, new_len;
+ bool old_a64, new_a64;
+
+ /* Nothing to do if no SVE. */
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
+ return;
+ }
+
+ /* Nothing to do if FP is disabled in either EL. */
+ if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
+ return;
+ }
+
+ /*
+ * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
+ * at ELx, or not available because the EL is in AArch32 state, then
+ * for all purposes other than a direct read, the ZCR_ELx.LEN field
+ * has an effective value of 0".
+ *
+ * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
+ * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
+ * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
+ * we already have the correct register contents when encountering the
+ * vq0->vq0 transition between EL0->EL1.
+ */
+ old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
+ old_len = (old_a64 && !sve_exception_el(env, old_el)
+ ? sve_zcr_len_for_el(env, old_el) : 0);
+ new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
+ new_len = (new_a64 && !sve_exception_el(env, new_el)
+ ? sve_zcr_len_for_el(env, new_el) : 0);
+
+ /* When changing vector length, clear inaccessible state. */
+ if (new_len < old_len) {
+ aarch64_sve_narrow_vq(env, new_len + 1);
+ }
+}
+#endif
diff --git a/target/arm/helper.h b/target/arm/helper.h
new file mode 100644
index 000000000..448a86edf
--- /dev/null
+++ b/target/arm/helper.h
@@ -0,0 +1,1023 @@
+DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
+DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
+
+PAS_OP(s)
+PAS_OP(u)
+#undef PAS_OP
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
+PAS_OP(q)
+PAS_OP(sh)
+PAS_OP(uq)
+PAS_OP(uh)
+#undef PAS_OP
+
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
+ i32, i32, i32, i32)
+DEF_HELPER_2(exception_internal, void, env, i32)
+DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
+DEF_HELPER_2(exception_bkpt_insn, void, env, i32)
+DEF_HELPER_1(setend, void, env)
+DEF_HELPER_2(wfi, void, env, i32)
+DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_1(yield, void, env)
+DEF_HELPER_1(pre_hvc, void, env)
+DEF_HELPER_2(pre_smc, void, env, i32)
+
+DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_2(cpsr_write_eret, void, env, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
+
+DEF_HELPER_3(v7m_msr, void, env, i32, i32)
+DEF_HELPER_2(v7m_mrs, i32, env, i32)
+
+DEF_HELPER_2(v7m_bxns, void, env, i32)
+DEF_HELPER_2(v7m_blxns, void, env, i32)
+
+DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
+
+DEF_HELPER_1(v7m_preserve_fp_state, void, env)
+
+DEF_HELPER_2(v7m_vlstm, void, env, i32)
+DEF_HELPER_2(v7m_vlldm, void, env, i32)
+
+DEF_HELPER_2(v8m_stackcheck, void, env, i32)
+
+DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
+DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
+DEF_HELPER_2(get_cp_reg, i32, env, ptr)
+DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
+DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
+
+DEF_HELPER_2(get_r13_banked, i32, env, i32)
+DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+
+DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
+DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
+
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
+
+DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, tl, i32, i32, i32)
+
+DEF_HELPER_1(vfp_get_fpscr, i32, env)
+DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(vfp_addh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_subh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_mulh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_divh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_minh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_minnumh, f16, f16, f16, ptr)
+DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
+DEF_HELPER_1(vfp_negh, f16, f16)
+DEF_HELPER_1(vfp_negs, f32, f32)
+DEF_HELPER_1(vfp_negd, f64, f64)
+DEF_HELPER_1(vfp_absh, f16, f16)
+DEF_HELPER_1(vfp_abss, f32, f32)
+DEF_HELPER_1(vfp_absd, f64, f64)
+DEF_HELPER_2(vfp_sqrth, f16, f16, env)
+DEF_HELPER_2(vfp_sqrts, f32, f32, env)
+DEF_HELPER_2(vfp_sqrtd, f64, f64, env)
+DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
+DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
+
+DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
+DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
+DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, ptr)
+DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, ptr)
+
+DEF_HELPER_2(vfp_uitoh, f16, i32, ptr)
+DEF_HELPER_2(vfp_uitos, f32, i32, ptr)
+DEF_HELPER_2(vfp_uitod, f64, i32, ptr)
+DEF_HELPER_2(vfp_sitoh, f16, i32, ptr)
+DEF_HELPER_2(vfp_sitos, f32, i32, ptr)
+DEF_HELPER_2(vfp_sitod, f64, i32, ptr)
+
+DEF_HELPER_2(vfp_touih, i32, f16, ptr)
+DEF_HELPER_2(vfp_touis, i32, f32, ptr)
+DEF_HELPER_2(vfp_touid, i32, f64, ptr)
+DEF_HELPER_2(vfp_touizh, i32, f16, ptr)
+DEF_HELPER_2(vfp_touizs, i32, f32, ptr)
+DEF_HELPER_2(vfp_touizd, i32, f64, ptr)
+DEF_HELPER_2(vfp_tosih, s32, f16, ptr)
+DEF_HELPER_2(vfp_tosis, s32, f32, ptr)
+DEF_HELPER_2(vfp_tosid, s32, f64, ptr)
+DEF_HELPER_2(vfp_tosizh, s32, f16, ptr)
+DEF_HELPER_2(vfp_tosizs, s32, f32, ptr)
+DEF_HELPER_2(vfp_tosizd, s32, f64, ptr)
+
+DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touhh, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_toshh, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_toulh, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_toslh, i32, f16, i32, ptr)
+DEF_HELPER_3(vfp_touqh, i64, f16, i32, ptr)
+DEF_HELPER_3(vfp_tosqh, i64, f16, i32, ptr)
+DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
+DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr)
+DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr)
+DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr)
+DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_shtoh, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_sltoh, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_ultoh, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, ptr)
+DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, ptr)
+
+DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, ptr)
+DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, ptr)
+
+DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, ptr)
+
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, ptr, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, ptr, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, ptr, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, ptr, i32)
+
+DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
+DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
+DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, ptr)
+
+DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
+DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
+
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, ptr)
+DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, ptr)
+DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
+
+DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
+DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr)
+
+DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
+
+/* neon_helper.c */
+DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64)
+
+DEF_HELPER_2(neon_hadd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_s32, s32, s32, s32)
+DEF_HELPER_2(neon_hadd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32)
+DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s8, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s16, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s32, s32, s32, s32)
+DEF_HELPER_2(neon_hsub_u32, i32, i32, i32)
+
+DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
+
+DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+
+DEF_HELPER_2(neon_add_u8, i32, i32, i32)
+DEF_HELPER_2(neon_add_u16, i32, i32, i32)
+DEF_HELPER_2(neon_padd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_padd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
+
+DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
+
+DEF_HELPER_1(neon_clz_u8, i32, i32)
+DEF_HELPER_1(neon_clz_u16, i32, i32)
+DEF_HELPER_1(neon_cls_s8, i32, i32)
+DEF_HELPER_1(neon_cls_s16, i32, i32)
+DEF_HELPER_1(neon_cls_s32, i32, i32)
+DEF_HELPER_1(neon_cnt_u8, i32, i32)
+DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
+DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
+DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
+
+DEF_HELPER_1(neon_narrow_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
+DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
+DEF_HELPER_1(neon_widen_u8, i64, i32)
+DEF_HELPER_1(neon_widen_s8, i64, i32)
+DEF_HELPER_1(neon_widen_u16, i64, i32)
+DEF_HELPER_1(neon_widen_s16, i64, i32)
+
+DEF_HELPER_2(neon_addl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_addl_u32, i64, i64, i64)
+DEF_HELPER_2(neon_paddl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
+DEF_HELPER_2(neon_subl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_subl_u32, i64, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
+DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
+
+DEF_HELPER_1(neon_negl_u16, i64, i64)
+DEF_HELPER_1(neon_negl_u32, i64, i64)
+
+DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acge_f64, i64, i64, i64, ptr)
+DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, ptr)
+
+/* iwmmxt_helper.c */
+DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
+DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
+
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
+DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
+
+DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
+
+DEF_HELPER_1(iwmmxt_addcb, i64, i64)
+DEF_HELPER_1(iwmmxt_addcw, i64, i64)
+DEF_HELPER_1(iwmmxt_addcl, i64, i64)
+
+DEF_HELPER_1(iwmmxt_msbb, i32, i64)
+DEF_HELPER_1(iwmmxt_msbw, i32, i64)
+DEF_HELPER_1(iwmmxt_msbl, i32, i64)
+
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
+
+DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_paddh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_pmaxh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_pminh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_padds, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_pmaxs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_pmins, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_fs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_fu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_hs, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr)
+
+DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+#ifdef TARGET_AARCH64
+#include "helper-a64.h"
+#include "helper-sve.h"
+#endif
+
+#include "helper-mve.h"
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
new file mode 100644
index 000000000..0dc96560d
--- /dev/null
+++ b/target/arm/hvf/hvf.c
@@ -0,0 +1,1285 @@
+/*
+ * QEMU Hypervisor.framework support for Apple Silicon
+
+ * Copyright 2020 Alexander Graf <agraf@csgraf.de>
+ * Copyright 2020 Google LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+
+#include "sysemu/runstate.h"
+#include "sysemu/hvf.h"
+#include "sysemu/hvf_int.h"
+#include "sysemu/hw_accel.h"
+#include "hvf_arm.h"
+
+#include <mach/mach_time.h>
+
+#include "exec/address-spaces.h"
+#include "hw/irq.h"
+#include "qemu/main-loop.h"
+#include "sysemu/cpus.h"
+#include "arm-powerctl.h"
+#include "target/arm/cpu.h"
+#include "target/arm/internals.h"
+#include "trace/trace-target_arm_hvf.h"
+#include "migration/vmstate.h"
+
+#define HVF_SYSREG(crn, crm, op0, op1, op2) \
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
+#define PL1_WRITE_MASK 0x4
+
+#define SYSREG(op0, op1, crn, crm, op2) \
+ ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
+#define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7)
+#define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
+#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
+#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
+#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
+#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
+#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0)
+#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1)
+#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2)
+#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2)
+#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3)
+#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4)
+#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5)
+#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6)
+#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7)
+#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0)
+#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7)
+
+#define WFX_IS_WFE (1 << 0)
+
+#define TMR_CTL_ENABLE (1 << 0)
+#define TMR_CTL_IMASK (1 << 1)
+#define TMR_CTL_ISTATUS (1 << 2)
+
+static void hvf_wfi(CPUState *cpu);
+
+typedef struct HVFVTimer {
+ /* Vtimer value during migration and paused state */
+ uint64_t vtimer_val;
+} HVFVTimer;
+
+static HVFVTimer vtimer;
+
+typedef struct ARMHostCPUFeatures {
+ ARMISARegisters isar;
+ uint64_t features;
+ uint64_t midr;
+ uint32_t reset_sctlr;
+ const char *dtb_compatible;
+} ARMHostCPUFeatures;
+
+static ARMHostCPUFeatures arm_host_cpu_features;
+
+struct hvf_reg_match {
+ int reg;
+ uint64_t offset;
+};
+
+static const struct hvf_reg_match hvf_reg_match[] = {
+ { HV_REG_X0, offsetof(CPUARMState, xregs[0]) },
+ { HV_REG_X1, offsetof(CPUARMState, xregs[1]) },
+ { HV_REG_X2, offsetof(CPUARMState, xregs[2]) },
+ { HV_REG_X3, offsetof(CPUARMState, xregs[3]) },
+ { HV_REG_X4, offsetof(CPUARMState, xregs[4]) },
+ { HV_REG_X5, offsetof(CPUARMState, xregs[5]) },
+ { HV_REG_X6, offsetof(CPUARMState, xregs[6]) },
+ { HV_REG_X7, offsetof(CPUARMState, xregs[7]) },
+ { HV_REG_X8, offsetof(CPUARMState, xregs[8]) },
+ { HV_REG_X9, offsetof(CPUARMState, xregs[9]) },
+ { HV_REG_X10, offsetof(CPUARMState, xregs[10]) },
+ { HV_REG_X11, offsetof(CPUARMState, xregs[11]) },
+ { HV_REG_X12, offsetof(CPUARMState, xregs[12]) },
+ { HV_REG_X13, offsetof(CPUARMState, xregs[13]) },
+ { HV_REG_X14, offsetof(CPUARMState, xregs[14]) },
+ { HV_REG_X15, offsetof(CPUARMState, xregs[15]) },
+ { HV_REG_X16, offsetof(CPUARMState, xregs[16]) },
+ { HV_REG_X17, offsetof(CPUARMState, xregs[17]) },
+ { HV_REG_X18, offsetof(CPUARMState, xregs[18]) },
+ { HV_REG_X19, offsetof(CPUARMState, xregs[19]) },
+ { HV_REG_X20, offsetof(CPUARMState, xregs[20]) },
+ { HV_REG_X21, offsetof(CPUARMState, xregs[21]) },
+ { HV_REG_X22, offsetof(CPUARMState, xregs[22]) },
+ { HV_REG_X23, offsetof(CPUARMState, xregs[23]) },
+ { HV_REG_X24, offsetof(CPUARMState, xregs[24]) },
+ { HV_REG_X25, offsetof(CPUARMState, xregs[25]) },
+ { HV_REG_X26, offsetof(CPUARMState, xregs[26]) },
+ { HV_REG_X27, offsetof(CPUARMState, xregs[27]) },
+ { HV_REG_X28, offsetof(CPUARMState, xregs[28]) },
+ { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
+ { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
+ { HV_REG_PC, offsetof(CPUARMState, pc) },
+};
+
+static const struct hvf_reg_match hvf_fpreg_match[] = {
+ { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) },
+ { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) },
+ { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) },
+ { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) },
+ { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) },
+ { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) },
+ { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) },
+ { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) },
+ { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) },
+ { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) },
+ { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
+ { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
+ { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
+ { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
+ { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
+ { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
+ { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
+ { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
+ { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
+ { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
+ { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
+ { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
+ { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
+ { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
+ { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
+ { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
+ { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
+ { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
+ { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
+ { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
+ { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
+ { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
+};
+
+struct hvf_sreg_match {
+ int reg;
+ uint32_t key;
+ uint32_t cp_idx;
+};
+
+static struct hvf_sreg_match hvf_sreg_match[] = {
+ { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
+
+ { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
+ { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
+ { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
+ { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
+
+#ifdef SYNC_NO_RAW_REGS
+ /*
+ * The registers below are manually synced on init because they are
+ * marked as NO_RAW. We still list them to make number space sync easier.
+ */
+ { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
+ { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
+ { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
+ { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
+#endif
+ { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
+ { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
+ { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
+#ifdef SYNC_NO_MMFR0
+ /* We keep the hardware MMFR0 around. HW limits are there anyway */
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
+#endif
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
+
+ { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
+ { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
+ { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
+ { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
+ { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
+ { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
+
+ { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
+ { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
+ { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
+ { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
+ { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
+ { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
+ { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
+ { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
+ { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
+ { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
+
+ { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
+ { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
+ { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
+ { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
+ { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
+ { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
+ { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
+ { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
+ { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
+ { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
+ { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
+ { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
+ { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
+ { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
+ { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
+ { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
+ { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
+ { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
+ { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
+ { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
+};
+
+int hvf_get_registers(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+ hv_return_t ret;
+ uint64_t val;
+ hv_simd_fp_uchar16_t fpval;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
+ *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
+ assert_hvf_ok(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
+ ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
+ &fpval);
+ memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
+ assert_hvf_ok(ret);
+ }
+
+ val = 0;
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
+ assert_hvf_ok(ret);
+ vfp_set_fpcr(env, val);
+
+ val = 0;
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
+ assert_hvf_ok(ret);
+ vfp_set_fpsr(env, val);
+
+ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
+ assert_hvf_ok(ret);
+ pstate_write(env, val);
+
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
+ if (hvf_sreg_match[i].cp_idx == -1) {
+ continue;
+ }
+
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
+ assert_hvf_ok(ret);
+
+ arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
+ }
+ assert(write_list_to_cpustate(arm_cpu));
+
+ aarch64_restore_sp(env, arm_current_el(env));
+
+ return 0;
+}
+
+int hvf_put_registers(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+ hv_return_t ret;
+ uint64_t val;
+ hv_simd_fp_uchar16_t fpval;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
+ val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
+ assert_hvf_ok(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
+ memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
+ ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
+ fpval);
+ assert_hvf_ok(ret);
+ }
+
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
+ assert_hvf_ok(ret);
+
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
+ assert_hvf_ok(ret);
+
+ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
+ assert_hvf_ok(ret);
+
+ aarch64_save_sp(env, arm_current_el(env));
+
+ assert(write_cpustate_to_list(arm_cpu, false));
+ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
+ if (hvf_sreg_match[i].cp_idx == -1) {
+ continue;
+ }
+
+ val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
+ assert_hvf_ok(ret);
+ }
+
+ ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
+ assert_hvf_ok(ret);
+
+ return 0;
+}
+
+static void flush_cpu_state(CPUState *cpu)
+{
+ if (cpu->vcpu_dirty) {
+ hvf_put_registers(cpu);
+ cpu->vcpu_dirty = false;
+ }
+}
+
+static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
+{
+ hv_return_t r;
+
+ flush_cpu_state(cpu);
+
+ if (rt < 31) {
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
+ assert_hvf_ok(r);
+ }
+}
+
+static uint64_t hvf_get_reg(CPUState *cpu, int rt)
+{
+ uint64_t val = 0;
+ hv_return_t r;
+
+ flush_cpu_state(cpu);
+
+ if (rt < 31) {
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
+ assert_hvf_ok(r);
+ }
+
+ return val;
+}
+
+static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
+{
+ ARMISARegisters host_isar = {};
+ const struct isar_regs {
+ int reg;
+ uint64_t *val;
+ } regs[] = {
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
+ };
+ hv_vcpu_t fd;
+ hv_return_t r = HV_SUCCESS;
+ hv_vcpu_exit_t *exit;
+ int i;
+
+ ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->features = (1ULL << ARM_FEATURE_V8) |
+ (1ULL << ARM_FEATURE_NEON) |
+ (1ULL << ARM_FEATURE_AARCH64) |
+ (1ULL << ARM_FEATURE_PMU) |
+ (1ULL << ARM_FEATURE_GENERIC_TIMER);
+
+ /* We set up a small vcpu to extract host registers */
+
+ if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
+ }
+ r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
+ r |= hv_vcpu_destroy(fd);
+
+ ahcf->isar = host_isar;
+
+ /*
+ * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1
+ * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97
+ */
+ ahcf->reset_sctlr = 0x30100180;
+ /*
+ * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility,
+ * let's disable it on boot and then allow guest software to turn it on by
+ * setting it to 0.
+ */
+ ahcf->reset_sctlr |= 0x00800000;
+
+ /* Make sure we don't advertise AArch32 support for EL0/EL1 */
+ if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
+ return false;
+ }
+
+ return r == HV_SUCCESS;
+}
+
+void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
+{
+ if (!arm_host_cpu_features.dtb_compatible) {
+ if (!hvf_enabled() ||
+ !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) {
+ /*
+ * We can't report this error yet, so flag that we need to
+ * in arm_cpu_realizefn().
+ */
+ cpu->host_cpu_probe_failed = true;
+ return;
+ }
+ }
+
+ cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
+ cpu->isar = arm_host_cpu_features.isar;
+ cpu->env.features = arm_host_cpu_features.features;
+ cpu->midr = arm_host_cpu_features.midr;
+ cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr;
+}
+
+void hvf_arch_vcpu_destroy(CPUState *cpu)
+{
+}
+
+int hvf_arch_init_vcpu(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+ uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
+ uint32_t sregs_cnt = 0;
+ uint64_t pfr;
+ hv_return_t ret;
+ int i;
+
+ env->aarch64 = 1;
+ asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
+
+ /* Allocate enough space for our sysreg sync */
+ arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
+ sregs_match_len);
+ arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
+ sregs_match_len);
+ arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
+ arm_cpu->cpreg_vmstate_indexes,
+ sregs_match_len);
+ arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
+ arm_cpu->cpreg_vmstate_values,
+ sregs_match_len);
+
+ memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
+
+ /* Populate cp list for all known sysregs */
+ for (i = 0; i < sregs_match_len; i++) {
+ const ARMCPRegInfo *ri;
+ uint32_t key = hvf_sreg_match[i].key;
+
+ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
+ if (ri) {
+ assert(!(ri->type & ARM_CP_NO_RAW));
+ hvf_sreg_match[i].cp_idx = sregs_cnt;
+ arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
+ } else {
+ hvf_sreg_match[i].cp_idx = -1;
+ }
+ }
+ arm_cpu->cpreg_array_len = sregs_cnt;
+ arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
+
+ assert(write_cpustate_to_list(arm_cpu, false));
+
+ /* Set CP_NO_RAW system registers on init */
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
+ arm_cpu->midr);
+ assert_hvf_ok(ret);
+
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
+ arm_cpu->mp_affinity);
+ assert_hvf_ok(ret);
+
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
+ assert_hvf_ok(ret);
+ pfr |= env->gicv3state ? (1 << 24) : 0;
+ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
+ assert_hvf_ok(ret);
+
+ /* We're limited to underlying hardware caps, override internal versions */
+ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
+ &arm_cpu->isar.id_aa64mmfr0);
+ assert_hvf_ok(ret);
+
+ return 0;
+}
+
+void hvf_kick_vcpu_thread(CPUState *cpu)
+{
+ cpus_kick_thread(cpu);
+ hv_vcpus_exit(&cpu->hvf->fd, 1);
+}
+
+static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
+ uint32_t syndrome)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+
+ cpu->exception_index = excp;
+ env->exception.target_el = 1;
+ env->exception.syndrome = syndrome;
+
+ arm_cpu_do_interrupt(cpu);
+}
+
+static void hvf_psci_cpu_off(ARMCPU *arm_cpu)
+{
+ int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity);
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
+}
+
+/*
+ * Handle a PSCI call.
+ *
+ * Returns 0 on success
+ * -1 when the PSCI call is unknown,
+ */
+static bool hvf_handle_psci_call(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+ uint64_t param[4] = {
+ env->xregs[0],
+ env->xregs[1],
+ env->xregs[2],
+ env->xregs[3]
+ };
+ uint64_t context_id, mpidr;
+ bool target_aarch64 = true;
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+ target_ulong entry;
+ int target_el = 1;
+ int32_t ret = 0;
+
+ trace_hvf_psci_call(param[0], param[1], param[2], param[3],
+ arm_cpu->mp_affinity);
+
+ switch (param[0]) {
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
+ break;
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
+ break;
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ mpidr = param[1];
+
+ switch (param[2]) {
+ case 0:
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
+ if (!target_cpu_state) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+
+ ret = target_cpu->power_state;
+ break;
+ default:
+ /* Everything above affinity level 0 is always on. */
+ ret = 0;
+ }
+ break;
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ /*
+ * QEMU reset and shutdown are async requests, but PSCI
+ * mandates that we never return from the reset/shutdown
+ * call, so power the CPU off now so it doesn't execute
+ * anything further.
+ */
+ hvf_psci_cpu_off(arm_cpu);
+ break;
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ hvf_psci_cpu_off(arm_cpu);
+ break;
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ mpidr = param[1];
+ entry = param[2];
+ context_id = param[3];
+ ret = arm_set_cpu_on(mpidr, entry, context_id,
+ target_el, target_aarch64);
+ break;
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ hvf_psci_cpu_off(arm_cpu);
+ break;
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ /* Affinity levels are not supported in QEMU */
+ if (param[1] & 0xfffe0000) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ /* Powerdown is not supported, we always go into WFI */
+ env->xregs[0] = 0;
+ hvf_wfi(cpu);
+ break;
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
+ break;
+ default:
+ return false;
+ }
+
+ env->xregs[0] = ret;
+ return true;
+}
+
+static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+ uint64_t val = 0;
+
+ switch (reg) {
+ case SYSREG_CNTPCT_EL0:
+ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
+ gt_cntfrq_period_ns(arm_cpu);
+ break;
+ case SYSREG_PMCR_EL0:
+ val = env->cp15.c9_pmcr;
+ break;
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ val = env->cp15.c15_ccnt;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMCNTENCLR_EL0:
+ val = env->cp15.c9_pmcnten;
+ break;
+ case SYSREG_PMOVSCLR_EL0:
+ val = env->cp15.c9_pmovsr;
+ break;
+ case SYSREG_PMSELR_EL0:
+ val = env->cp15.c9_pmselr;
+ break;
+ case SYSREG_PMINTENCLR_EL1:
+ val = env->cp15.c9_pminten;
+ break;
+ case SYSREG_PMCCFILTR_EL0:
+ val = env->cp15.pmccfiltr_el0;
+ break;
+ case SYSREG_PMCNTENSET_EL0:
+ val = env->cp15.c9_pmcnten;
+ break;
+ case SYSREG_PMUSERENR_EL0:
+ val = env->cp15.c9_pmuserenr;
+ break;
+ case SYSREG_PMCEID0_EL0:
+ case SYSREG_PMCEID1_EL0:
+ /* We can't really count anything yet, declare all events invalid */
+ val = 0;
+ break;
+ case SYSREG_OSLSR_EL1:
+ val = env->cp15.oslsr_el1;
+ break;
+ case SYSREG_OSDLR_EL1:
+ /* Dummy register */
+ break;
+ default:
+ cpu_synchronize_state(cpu);
+ trace_hvf_unhandled_sysreg_read(env->pc, reg,
+ (reg >> 20) & 0x3,
+ (reg >> 14) & 0x7,
+ (reg >> 10) & 0xf,
+ (reg >> 1) & 0xf,
+ (reg >> 17) & 0x7);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ return 1;
+ }
+
+ trace_hvf_sysreg_read(reg,
+ (reg >> 20) & 0x3,
+ (reg >> 14) & 0x7,
+ (reg >> 10) & 0xf,
+ (reg >> 1) & 0xf,
+ (reg >> 17) & 0x7,
+ val);
+ hvf_set_reg(cpu, rt, val);
+
+ return 0;
+}
+
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+static bool pmu_event_supported(uint16_t number)
+{
+ return false;
+}
+
+/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+ uint64_t filter;
+ bool enabled, filtered = true;
+ int el = arm_current_el(env);
+
+ enabled = (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pmcnten & (1 << counter));
+
+ if (counter == 31) {
+ filter = env->cp15.pmccfiltr_el0;
+ } else {
+ filter = env->cp15.c14_pmevtyper[counter];
+ }
+
+ if (el == 0) {
+ filtered = filter & PMXEVTYPER_U;
+ } else if (el == 1) {
+ filtered = filter & PMXEVTYPER_P;
+ }
+
+ if (counter != 31) {
+ /*
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
+ * support
+ */
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+ if (!pmu_event_supported(event)) {
+ return false;
+ }
+ }
+
+ return enabled && !filtered;
+}
+
+static void pmswinc_write(CPUARMState *env, uint64_t value)
+{
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ /* Increment a counter's count iff: */
+ if ((value & (1 << i)) && /* counter's bit is set */
+ /* counter is enabled and not filtered */
+ pmu_counter_enabled(env, i) &&
+ /* counter is SW_INCR */
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+ }
+ }
+}
+
+static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+
+ trace_hvf_sysreg_write(reg,
+ (reg >> 20) & 0x3,
+ (reg >> 14) & 0x7,
+ (reg >> 10) & 0xf,
+ (reg >> 1) & 0xf,
+ (reg >> 17) & 0x7,
+ val);
+
+ switch (reg) {
+ case SYSREG_PMCCNTR_EL0:
+ pmu_op_start(env);
+ env->cp15.c15_ccnt = val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMCR_EL0:
+ pmu_op_start(env);
+
+ if (val & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ if (val & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
+ }
+
+ env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
+ env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK);
+
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMUSERENR_EL0:
+ env->cp15.c9_pmuserenr = val & 0xf;
+ break;
+ case SYSREG_PMCNTENSET_EL0:
+ env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env));
+ break;
+ case SYSREG_PMCNTENCLR_EL0:
+ env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env));
+ break;
+ case SYSREG_PMINTENCLR_EL1:
+ pmu_op_start(env);
+ env->cp15.c9_pminten |= val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMOVSCLR_EL0:
+ pmu_op_start(env);
+ env->cp15.c9_pmovsr &= ~val;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMSWINC_EL0:
+ pmu_op_start(env);
+ pmswinc_write(env, val);
+ pmu_op_finish(env);
+ break;
+ case SYSREG_PMSELR_EL0:
+ env->cp15.c9_pmselr = val & 0x1f;
+ break;
+ case SYSREG_PMCCFILTR_EL0:
+ pmu_op_start(env);
+ env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0;
+ pmu_op_finish(env);
+ break;
+ case SYSREG_OSLAR_EL1:
+ env->cp15.oslsr_el1 = val & 1;
+ break;
+ case SYSREG_OSDLR_EL1:
+ /* Dummy register */
+ break;
+ default:
+ cpu_synchronize_state(cpu);
+ trace_hvf_unhandled_sysreg_write(env->pc, reg,
+ (reg >> 20) & 0x3,
+ (reg >> 14) & 0x7,
+ (reg >> 10) & 0xf,
+ (reg >> 1) & 0xf,
+ (reg >> 17) & 0x7);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ return 1;
+ }
+
+ return 0;
+}
+
+static int hvf_inject_interrupts(CPUState *cpu)
+{
+ if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
+ trace_hvf_inject_fiq();
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
+ true);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
+ trace_hvf_inject_irq();
+ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
+ true);
+ }
+
+ return 0;
+}
+
+static uint64_t hvf_vtimer_val_raw(void)
+{
+ /*
+ * mach_absolute_time() returns the vtimer value without the VM
+ * offset that we define. Add our own offset on top.
+ */
+ return mach_absolute_time() - hvf_state->vtimer_offset;
+}
+
+static uint64_t hvf_vtimer_val(void)
+{
+ if (!runstate_is_running()) {
+ /* VM is paused, the vtimer value is in vtimer.vtimer_val */
+ return vtimer.vtimer_val;
+ }
+
+ return hvf_vtimer_val_raw();
+}
+
+static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
+{
+ /*
+ * Use pselect to sleep so that other threads can IPI us while we're
+ * sleeping.
+ */
+ qatomic_mb_set(&cpu->thread_kicked, false);
+ qemu_mutex_unlock_iothread();
+ pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
+ qemu_mutex_lock_iothread();
+}
+
+static void hvf_wfi(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ struct timespec ts;
+ hv_return_t r;
+ uint64_t ctl;
+ uint64_t cval;
+ int64_t ticks_to_sleep;
+ uint64_t seconds;
+ uint64_t nanos;
+ uint32_t cntfrq;
+
+ if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
+ /* Interrupt pending, no need to wait */
+ return;
+ }
+
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+ assert_hvf_ok(r);
+
+ if (!(ctl & 1) || (ctl & 2)) {
+ /* Timer disabled or masked, just wait for an IPI. */
+ hvf_wait_for_ipi(cpu, NULL);
+ return;
+ }
+
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
+ assert_hvf_ok(r);
+
+ ticks_to_sleep = cval - hvf_vtimer_val();
+ if (ticks_to_sleep < 0) {
+ return;
+ }
+
+ cntfrq = gt_cntfrq_period_ns(arm_cpu);
+ seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
+ ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
+ nanos = ticks_to_sleep * cntfrq;
+
+ /*
+ * Don't sleep for less than the time a context switch would take,
+ * so that we can satisfy fast timer requests on the same CPU.
+ * Measurements on M1 show the sweet spot to be ~2ms.
+ */
+ if (!seconds && nanos < (2 * SCALE_MS)) {
+ return;
+ }
+
+ ts = (struct timespec) { seconds, nanos };
+ hvf_wait_for_ipi(cpu, &ts);
+}
+
+static void hvf_sync_vtimer(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ hv_return_t r;
+ uint64_t ctl;
+ bool irq_state;
+
+ if (!cpu->hvf->vtimer_masked) {
+ /* We will get notified on vtimer changes by hvf, nothing to do */
+ return;
+ }
+
+ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+ assert_hvf_ok(r);
+
+ irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
+ (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
+
+ if (!irq_state) {
+ /* Timer no longer asserting, we can unmask it */
+ hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
+ cpu->hvf->vtimer_masked = false;
+ }
+}
+
+int hvf_vcpu_exec(CPUState *cpu)
+{
+ ARMCPU *arm_cpu = ARM_CPU(cpu);
+ CPUARMState *env = &arm_cpu->env;
+ hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
+ hv_return_t r;
+ bool advance_pc = false;
+
+ if (hvf_inject_interrupts(cpu)) {
+ return EXCP_INTERRUPT;
+ }
+
+ if (cpu->halted) {
+ return EXCP_HLT;
+ }
+
+ flush_cpu_state(cpu);
+
+ qemu_mutex_unlock_iothread();
+ assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
+
+ /* handle VMEXIT */
+ uint64_t exit_reason = hvf_exit->reason;
+ uint64_t syndrome = hvf_exit->exception.syndrome;
+ uint32_t ec = syn_get_ec(syndrome);
+
+ qemu_mutex_lock_iothread();
+ switch (exit_reason) {
+ case HV_EXIT_REASON_EXCEPTION:
+ /* This is the main one, handle below. */
+ break;
+ case HV_EXIT_REASON_VTIMER_ACTIVATED:
+ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
+ cpu->hvf->vtimer_masked = true;
+ return 0;
+ case HV_EXIT_REASON_CANCELED:
+ /* we got kicked, no exit to process */
+ return 0;
+ default:
+ assert(0);
+ }
+
+ hvf_sync_vtimer(cpu);
+
+ switch (ec) {
+ case EC_DATAABORT: {
+ bool isv = syndrome & ARM_EL_ISV;
+ bool iswrite = (syndrome >> 6) & 1;
+ bool s1ptw = (syndrome >> 7) & 1;
+ uint32_t sas = (syndrome >> 22) & 3;
+ uint32_t len = 1 << sas;
+ uint32_t srt = (syndrome >> 16) & 0x1f;
+ uint32_t cm = (syndrome >> 8) & 0x1;
+ uint64_t val = 0;
+
+ trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
+ hvf_exit->exception.physical_address, isv,
+ iswrite, s1ptw, len, srt);
+
+ if (cm) {
+ /* We don't cache MMIO regions */
+ advance_pc = true;
+ break;
+ }
+
+ assert(isv);
+
+ if (iswrite) {
+ val = hvf_get_reg(cpu, srt);
+ address_space_write(&address_space_memory,
+ hvf_exit->exception.physical_address,
+ MEMTXATTRS_UNSPECIFIED, &val, len);
+ } else {
+ address_space_read(&address_space_memory,
+ hvf_exit->exception.physical_address,
+ MEMTXATTRS_UNSPECIFIED, &val, len);
+ hvf_set_reg(cpu, srt, val);
+ }
+
+ advance_pc = true;
+ break;
+ }
+ case EC_SYSTEMREGISTERTRAP: {
+ bool isread = (syndrome >> 0) & 1;
+ uint32_t rt = (syndrome >> 5) & 0x1f;
+ uint32_t reg = syndrome & SYSREG_MASK;
+ uint64_t val;
+ int ret = 0;
+
+ if (isread) {
+ ret = hvf_sysreg_read(cpu, reg, rt);
+ } else {
+ val = hvf_get_reg(cpu, rt);
+ ret = hvf_sysreg_write(cpu, reg, val);
+ }
+
+ advance_pc = !ret;
+ break;
+ }
+ case EC_WFX_TRAP:
+ advance_pc = true;
+ if (!(syndrome & WFX_IS_WFE)) {
+ hvf_wfi(cpu);
+ }
+ break;
+ case EC_AA64_HVC:
+ cpu_synchronize_state(cpu);
+ if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
+ if (!hvf_handle_psci_call(cpu)) {
+ trace_hvf_unknown_hvc(env->xregs[0]);
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
+ env->xregs[0] = -1;
+ }
+ } else {
+ trace_hvf_unknown_hvc(env->xregs[0]);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ }
+ break;
+ case EC_AA64_SMC:
+ cpu_synchronize_state(cpu);
+ if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
+ advance_pc = true;
+
+ if (!hvf_handle_psci_call(cpu)) {
+ trace_hvf_unknown_smc(env->xregs[0]);
+ /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
+ env->xregs[0] = -1;
+ }
+ } else {
+ trace_hvf_unknown_smc(env->xregs[0]);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ }
+ break;
+ default:
+ cpu_synchronize_state(cpu);
+ trace_hvf_exit(syndrome, ec, env->pc);
+ error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
+ }
+
+ if (advance_pc) {
+ uint64_t pc;
+
+ flush_cpu_state(cpu);
+
+ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
+ assert_hvf_ok(r);
+ pc += 4;
+ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
+ assert_hvf_ok(r);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_hvf_vtimer = {
+ .name = "hvf-vtimer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(vtimer_val, HVFVTimer),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void hvf_vm_state_change(void *opaque, bool running, RunState state)
+{
+ HVFVTimer *s = opaque;
+
+ if (running) {
+ /* Update vtimer offset on all CPUs */
+ hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
+ cpu_synchronize_all_states();
+ } else {
+ /* Remember vtimer value on every pause */
+ s->vtimer_val = hvf_vtimer_val_raw();
+ }
+}
+
+int hvf_arch_init(void)
+{
+ hvf_state->vtimer_offset = mach_absolute_time();
+ vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
+ qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
+ return 0;
+}
diff --git a/target/arm/hvf/meson.build b/target/arm/hvf/meson.build
new file mode 100644
index 000000000..855e6cce5
--- /dev/null
+++ b/target/arm/hvf/meson.build
@@ -0,0 +1,3 @@
+arm_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
+ 'hvf.c',
+))
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
new file mode 100644
index 000000000..820e8e029
--- /dev/null
+++ b/target/arm/hvf/trace-events
@@ -0,0 +1,11 @@
+hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg read at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)"
+hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)"
+hvf_inject_fiq(void) "injecting FIQ"
+hvf_inject_irq(void) "injecting IRQ"
+hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]"
+hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64
+hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")"
+hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
+hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
+hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
+hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x"
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
new file mode 100644
index 000000000..ea238cff8
--- /dev/null
+++ b/target/arm/hvf_arm.h
@@ -0,0 +1,18 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support -- ARM specifics
+ *
+ * Copyright (c) 2021 Alexander Graf
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_HVF_ARM_H
+#define QEMU_HVF_ARM_H
+
+#include "cpu.h"
+
+void hvf_arm_set_cpu_features_from_host(struct ARMCPU *cpu);
+
+#endif
diff --git a/target/arm/idau.h b/target/arm/idau.h
new file mode 100644
index 000000000..0ef525197
--- /dev/null
+++ b/target/arm/idau.h
@@ -0,0 +1,58 @@
+/*
+ * QEMU ARM CPU -- interface for the Arm v8M IDAU
+ *
+ * Copyright (c) 2018 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * In the v8M architecture, the IDAU is a small piece of hardware
+ * typically implemented in the SoC which provides board or SoC
+ * specific security attribution information for each address that
+ * the CPU performs MPU/SAU checks on. For QEMU, we model this with a
+ * QOM interface which is implemented by the board or SoC object and
+ * connected to the CPU using a link property.
+ */
+
+#ifndef TARGET_ARM_IDAU_H
+#define TARGET_ARM_IDAU_H
+
+#include "qom/object.h"
+
+#define TYPE_IDAU_INTERFACE "idau-interface"
+#define IDAU_INTERFACE(obj) \
+ INTERFACE_CHECK(IDAUInterface, (obj), TYPE_IDAU_INTERFACE)
+typedef struct IDAUInterfaceClass IDAUInterfaceClass;
+DECLARE_CLASS_CHECKERS(IDAUInterfaceClass, IDAU_INTERFACE,
+ TYPE_IDAU_INTERFACE)
+
+typedef struct IDAUInterface IDAUInterface;
+
+#define IREGION_NOTVALID -1
+
+struct IDAUInterfaceClass {
+ InterfaceClass parent;
+
+ /* Check the specified address and return the IDAU security information
+ * for it by filling in iregion, exempt, ns and nsc:
+ * iregion: IDAU region number, or IREGION_NOTVALID if not valid
+ * exempt: true if address is exempt from security attribution
+ * ns: true if the address is NonSecure
+ * nsc: true if the address is NonSecure-callable
+ */
+ void (*check)(IDAUInterface *ii, uint32_t address, int *iregion,
+ bool *exempt, bool *ns, bool *nsc);
+};
+
+#endif
diff --git a/target/arm/internals.h b/target/arm/internals.h
new file mode 100644
index 000000000..89f7610eb
--- /dev/null
+++ b/target/arm/internals.h
@@ -0,0 +1,1288 @@
+/*
+ * QEMU ARM CPU -- internal functions and types
+ *
+ * Copyright (c) 2014 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This header defines functions, types, etc which need to be shared
+ * between different source files within target/arm/ but which are
+ * private to it and not required by the rest of QEMU.
+ */
+
+#ifndef TARGET_ARM_INTERNALS_H
+#define TARGET_ARM_INTERNALS_H
+
+#include "hw/registerfields.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "syndrome.h"
+
+/* register banks for CPU modes */
+#define BANK_USRSYS 0
+#define BANK_SVC 1
+#define BANK_ABT 2
+#define BANK_UND 3
+#define BANK_IRQ 4
+#define BANK_FIQ 5
+#define BANK_HYP 6
+#define BANK_MON 7
+
+static inline bool excp_is_internal(int excp)
+{
+ /* Return true if this exception number represents a QEMU-internal
+ * exception that will not be passed to the guest.
+ */
+ return excp == EXCP_INTERRUPT
+ || excp == EXCP_HLT
+ || excp == EXCP_DEBUG
+ || excp == EXCP_HALTED
+ || excp == EXCP_EXCEPTION_EXIT
+ || excp == EXCP_KERNEL_TRAP
+ || excp == EXCP_SEMIHOST;
+}
+
+/* Scale factor for generic timers, ie number of ns per tick.
+ * This gives a 62.5MHz timer.
+ */
+#define GTIMER_SCALE 16
+
+/* Bit definitions for the v7M CONTROL register */
+FIELD(V7M_CONTROL, NPRIV, 0, 1)
+FIELD(V7M_CONTROL, SPSEL, 1, 1)
+FIELD(V7M_CONTROL, FPCA, 2, 1)
+FIELD(V7M_CONTROL, SFPA, 3, 1)
+
+/* Bit definitions for v7M exception return payload */
+FIELD(V7M_EXCRET, ES, 0, 1)
+FIELD(V7M_EXCRET, RES0, 1, 1)
+FIELD(V7M_EXCRET, SPSEL, 2, 1)
+FIELD(V7M_EXCRET, MODE, 3, 1)
+FIELD(V7M_EXCRET, FTYPE, 4, 1)
+FIELD(V7M_EXCRET, DCRS, 5, 1)
+FIELD(V7M_EXCRET, S, 6, 1)
+FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
+
+/* Minimum value which is a magic number for exception return */
+#define EXC_RETURN_MIN_MAGIC 0xff000000
+/* Minimum number which is a magic number for function or exception return
+ * when using v8M security extension
+ */
+#define FNC_RETURN_MIN_MAGIC 0xfefffffe
+
+/* We use a few fake FSR values for internal purposes in M profile.
+ * M profile cores don't have A/R format FSRs, but currently our
+ * get_phys_addr() code assumes A/R profile and reports failures via
+ * an A/R format FSR value. We then translate that into the proper
+ * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
+ * Mostly the FSR values we use for this are those defined for v7PMSA,
+ * since we share some of that codepath. A few kinds of fault are
+ * only for M profile and have no A/R equivalent, though, so we have
+ * to pick a value from the reserved range (which we never otherwise
+ * generate) to use for these.
+ * These values will never be visible to the guest.
+ */
+#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
+#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
+
+/**
+ * raise_exception: Raise the specified exception.
+ * Raise a guest exception with the specified value, syndrome register
+ * and target exception level. This should be called from helper functions,
+ * and never returns because we will longjump back up to the CPU main loop.
+ */
+void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el);
+
+/*
+ * Similarly, but also use unwinding to restore cpu state.
+ */
+void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el,
+ uintptr_t ra);
+
+/*
+ * For AArch64, map a given EL to an index in the banked_spsr array.
+ * Note that this mapping and the AArch32 mapping defined in bank_number()
+ * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
+ * mandated mapping between each other.
+ */
+static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
+{
+ static const unsigned int map[4] = {
+ [1] = BANK_SVC, /* EL1. */
+ [2] = BANK_HYP, /* EL2. */
+ [3] = BANK_MON, /* EL3. */
+ };
+ assert(el >= 1 && el <= 3);
+ return map[el];
+}
+
+/* Map CPU modes onto saved register banks. */
+static inline int bank_number(int mode)
+{
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_SYS:
+ return BANK_USRSYS;
+ case ARM_CPU_MODE_SVC:
+ return BANK_SVC;
+ case ARM_CPU_MODE_ABT:
+ return BANK_ABT;
+ case ARM_CPU_MODE_UND:
+ return BANK_UND;
+ case ARM_CPU_MODE_IRQ:
+ return BANK_IRQ;
+ case ARM_CPU_MODE_FIQ:
+ return BANK_FIQ;
+ case ARM_CPU_MODE_HYP:
+ return BANK_HYP;
+ case ARM_CPU_MODE_MON:
+ return BANK_MON;
+ }
+ g_assert_not_reached();
+}
+
+/**
+ * r14_bank_number: Map CPU mode onto register bank for r14
+ *
+ * Given an AArch32 CPU mode, return the index into the saved register
+ * banks to use for the R14 (LR) in that mode. This is the same as
+ * bank_number(), except for the special case of Hyp mode, where
+ * R14 is shared with USR and SYS, unlike its R13 and SPSR.
+ * This should be used as the index into env->banked_r14[], and
+ * bank_number() used for the index into env->banked_r13[] and
+ * env->banked_spsr[].
+ */
+static inline int r14_bank_number(int mode)
+{
+ return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
+}
+
+void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
+void arm_translate_init(void);
+
+#ifdef CONFIG_TCG
+void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
+#endif /* CONFIG_TCG */
+
+/**
+ * aarch64_sve_zcr_get_valid_len:
+ * @cpu: cpu context
+ * @start_len: maximum len to consider
+ *
+ * Return the maximum supported sve vector length <= @start_len.
+ * Note that both @start_len and the return value are in units
+ * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128.
+ */
+uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len);
+
+enum arm_fprounding {
+ FPROUNDING_TIEEVEN,
+ FPROUNDING_POSINF,
+ FPROUNDING_NEGINF,
+ FPROUNDING_ZERO,
+ FPROUNDING_TIEAWAY,
+ FPROUNDING_ODD
+};
+
+int arm_rmode_to_sf(int rmode);
+
+static inline void aarch64_save_sp(CPUARMState *env, int el)
+{
+ if (env->pstate & PSTATE_SP) {
+ env->sp_el[el] = env->xregs[31];
+ } else {
+ env->sp_el[0] = env->xregs[31];
+ }
+}
+
+static inline void aarch64_restore_sp(CPUARMState *env, int el)
+{
+ if (env->pstate & PSTATE_SP) {
+ env->xregs[31] = env->sp_el[el];
+ } else {
+ env->xregs[31] = env->sp_el[0];
+ }
+}
+
+static inline void update_spsel(CPUARMState *env, uint32_t imm)
+{
+ unsigned int cur_el = arm_current_el(env);
+ /* Update PSTATE SPSel bit; this requires us to update the
+ * working stack pointer in xregs[31].
+ */
+ if (!((imm ^ env->pstate) & PSTATE_SP)) {
+ return;
+ }
+ aarch64_save_sp(env, cur_el);
+ env->pstate = deposit32(env->pstate, 0, 1, imm);
+
+ /* We rely on illegal updates to SPsel from EL0 to get trapped
+ * at translation time.
+ */
+ assert(cur_el >= 1 && cur_el <= 3);
+ aarch64_restore_sp(env, cur_el);
+}
+
+/*
+ * arm_pamax
+ * @cpu: ARMCPU
+ *
+ * Returns the implementation defined bit-width of physical addresses.
+ * The ARMv8 reference manuals refer to this as PAMax().
+ */
+static inline unsigned int arm_pamax(ARMCPU *cpu)
+{
+ static const unsigned int pamax_map[] = {
+ [0] = 32,
+ [1] = 36,
+ [2] = 40,
+ [3] = 42,
+ [4] = 44,
+ [5] = 48,
+ };
+ unsigned int parange =
+ FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+
+ /* id_aa64mmfr0 is a read-only register so values outside of the
+ * supported mappings can be considered an implementation error. */
+ assert(parange < ARRAY_SIZE(pamax_map));
+ return pamax_map[parange];
+}
+
+/* Return true if extended addresses are enabled.
+ * This is always the case if our translation regime is 64 bit,
+ * but depends on TTBCR.EAE for 32 bit.
+ */
+static inline bool extended_addresses_enabled(CPUARMState *env)
+{
+ TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
+ return arm_el_is_aa64(env, 1) ||
+ (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
+}
+
+/* Update a QEMU watchpoint based on the information the guest has set in the
+ * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
+ */
+void hw_watchpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU watchpoints for every guest watchpoint. This does a
+ * complete delete-and-reinstate of the QEMU watchpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_watchpoint_update_all(ARMCPU *cpu);
+/* Update a QEMU breakpoint based on the information the guest has set in the
+ * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
+ */
+void hw_breakpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU breakpoints for every guest breakpoint. This does a
+ * complete delete-and-reinstate of the QEMU breakpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_breakpoint_update_all(ARMCPU *cpu);
+
+/* Callback function for checking if a breakpoint should trigger. */
+bool arm_debug_check_breakpoint(CPUState *cs);
+
+/* Callback function for checking if a watchpoint should trigger. */
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
+
+/* Adjust addresses (in BE32 mode) before testing against watchpoint
+ * addresses.
+ */
+vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
+
+/* Callback function for when a watchpoint or breakpoint triggers. */
+void arm_debug_excp_handler(CPUState *cs);
+
+#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
+static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
+{
+ return false;
+}
+static inline void arm_handle_psci_call(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+#else
+/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
+bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
+/* Actually handle a PSCI call */
+void arm_handle_psci_call(ARMCPU *cpu);
+#endif
+
+/**
+ * arm_clear_exclusive: clear the exclusive monitor
+ * @env: CPU env
+ * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
+ */
+static inline void arm_clear_exclusive(CPUARMState *env)
+{
+ env->exclusive_addr = -1;
+}
+
+/**
+ * ARMFaultType: type of an ARM MMU fault
+ * This corresponds to the v8A pseudocode's Fault enumeration,
+ * with extensions for QEMU internal conditions.
+ */
+typedef enum ARMFaultType {
+ ARMFault_None,
+ ARMFault_AccessFlag,
+ ARMFault_Alignment,
+ ARMFault_Background,
+ ARMFault_Domain,
+ ARMFault_Permission,
+ ARMFault_Translation,
+ ARMFault_AddressSize,
+ ARMFault_SyncExternal,
+ ARMFault_SyncExternalOnWalk,
+ ARMFault_SyncParity,
+ ARMFault_SyncParityOnWalk,
+ ARMFault_AsyncParity,
+ ARMFault_AsyncExternal,
+ ARMFault_Debug,
+ ARMFault_TLBConflict,
+ ARMFault_Lockdown,
+ ARMFault_Exclusive,
+ ARMFault_ICacheMaint,
+ ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
+ ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
+} ARMFaultType;
+
+/**
+ * ARMMMUFaultInfo: Information describing an ARM MMU Fault
+ * @type: Type of fault
+ * @level: Table walk level (for translation, access flag and permission faults)
+ * @domain: Domain of the fault address (for non-LPAE CPUs only)
+ * @s2addr: Address that caused a fault at stage 2
+ * @stage2: True if we faulted at stage 2
+ * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
+ * @s1ns: True if we faulted on a non-secure IPA while in secure state
+ * @ea: True if we should set the EA (external abort type) bit in syndrome
+ */
+typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
+struct ARMMMUFaultInfo {
+ ARMFaultType type;
+ target_ulong s2addr;
+ int level;
+ int domain;
+ bool stage2;
+ bool s1ptw;
+ bool s1ns;
+ bool ea;
+};
+
+/**
+ * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
+ * Compare pseudocode EncodeSDFSC(), though unlike that function
+ * we set up a whole FSR-format code including domain field and
+ * putting the high bit of the FSC into bit 10.
+ */
+static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
+{
+ uint32_t fsc;
+
+ switch (fi->type) {
+ case ARMFault_None:
+ return 0;
+ case ARMFault_AccessFlag:
+ fsc = fi->level == 1 ? 0x3 : 0x6;
+ break;
+ case ARMFault_Alignment:
+ fsc = 0x1;
+ break;
+ case ARMFault_Permission:
+ fsc = fi->level == 1 ? 0xd : 0xf;
+ break;
+ case ARMFault_Domain:
+ fsc = fi->level == 1 ? 0x9 : 0xb;
+ break;
+ case ARMFault_Translation:
+ fsc = fi->level == 1 ? 0x5 : 0x7;
+ break;
+ case ARMFault_SyncExternal:
+ fsc = 0x8 | (fi->ea << 12);
+ break;
+ case ARMFault_SyncExternalOnWalk:
+ fsc = fi->level == 1 ? 0xc : 0xe;
+ fsc |= (fi->ea << 12);
+ break;
+ case ARMFault_SyncParity:
+ fsc = 0x409;
+ break;
+ case ARMFault_SyncParityOnWalk:
+ fsc = fi->level == 1 ? 0x40c : 0x40e;
+ break;
+ case ARMFault_AsyncParity:
+ fsc = 0x408;
+ break;
+ case ARMFault_AsyncExternal:
+ fsc = 0x406 | (fi->ea << 12);
+ break;
+ case ARMFault_Debug:
+ fsc = 0x2;
+ break;
+ case ARMFault_TLBConflict:
+ fsc = 0x400;
+ break;
+ case ARMFault_Lockdown:
+ fsc = 0x404;
+ break;
+ case ARMFault_Exclusive:
+ fsc = 0x405;
+ break;
+ case ARMFault_ICacheMaint:
+ fsc = 0x4;
+ break;
+ case ARMFault_Background:
+ fsc = 0x0;
+ break;
+ case ARMFault_QEMU_NSCExec:
+ fsc = M_FAKE_FSR_NSC_EXEC;
+ break;
+ case ARMFault_QEMU_SFault:
+ fsc = M_FAKE_FSR_SFAULT;
+ break;
+ default:
+ /* Other faults can't occur in a context that requires a
+ * short-format status code.
+ */
+ g_assert_not_reached();
+ }
+
+ fsc |= (fi->domain << 4);
+ return fsc;
+}
+
+/**
+ * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
+ * Compare pseudocode EncodeLDFSC(), though unlike that function
+ * we fill in also the LPAE bit 9 of a DFSR format.
+ */
+static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
+{
+ uint32_t fsc;
+
+ switch (fi->type) {
+ case ARMFault_None:
+ return 0;
+ case ARMFault_AddressSize:
+ fsc = fi->level & 3;
+ break;
+ case ARMFault_AccessFlag:
+ fsc = (fi->level & 3) | (0x2 << 2);
+ break;
+ case ARMFault_Permission:
+ fsc = (fi->level & 3) | (0x3 << 2);
+ break;
+ case ARMFault_Translation:
+ fsc = (fi->level & 3) | (0x1 << 2);
+ break;
+ case ARMFault_SyncExternal:
+ fsc = 0x10 | (fi->ea << 12);
+ break;
+ case ARMFault_SyncExternalOnWalk:
+ fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
+ break;
+ case ARMFault_SyncParity:
+ fsc = 0x18;
+ break;
+ case ARMFault_SyncParityOnWalk:
+ fsc = (fi->level & 3) | (0x7 << 2);
+ break;
+ case ARMFault_AsyncParity:
+ fsc = 0x19;
+ break;
+ case ARMFault_AsyncExternal:
+ fsc = 0x11 | (fi->ea << 12);
+ break;
+ case ARMFault_Alignment:
+ fsc = 0x21;
+ break;
+ case ARMFault_Debug:
+ fsc = 0x22;
+ break;
+ case ARMFault_TLBConflict:
+ fsc = 0x30;
+ break;
+ case ARMFault_Lockdown:
+ fsc = 0x34;
+ break;
+ case ARMFault_Exclusive:
+ fsc = 0x35;
+ break;
+ default:
+ /* Other faults can't occur in a context that requires a
+ * long-format status code.
+ */
+ g_assert_not_reached();
+ }
+
+ fsc |= 1 << 9;
+ return fsc;
+}
+
+static inline bool arm_extabort_type(MemTxResult result)
+{
+ /* The EA bit in syndromes and fault status registers is an
+ * IMPDEF classification of external aborts. ARM implementations
+ * usually use this to indicate AXI bus Decode error (0) or
+ * Slave error (1); in QEMU we follow that.
+ */
+ return result != MEMTX_DECODE_ERROR;
+}
+
+#ifdef CONFIG_USER_ONLY
+void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+#else
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+#endif
+
+static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
+}
+
+static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return mmu_idx | ARM_MMU_IDX_M;
+ } else {
+ return mmu_idx | ARM_MMU_IDX_A;
+ }
+}
+
+static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
+{
+ /* AArch64 is always a-profile. */
+ return mmu_idx | ARM_MMU_IDX_A;
+}
+
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
+
+/*
+ * Return the MMU index for a v7M CPU with all relevant information
+ * manually specified.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
+ bool secstate, bool priv, bool negpri);
+
+/*
+ * Return the MMU index for a v7M CPU in the specified security and
+ * privilege state.
+ */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
+ bool secstate, bool priv);
+
+/* Return the MMU index for a v7M CPU in the specified security state */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
+
+/* Return true if the stage 1 translation regime is using LPAE format page
+ * tables */
+bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr) QEMU_NORETURN;
+
+/* arm_cpu_do_transaction_failed: handle a memory system error response
+ * (eg "no device/memory present at address") by raising an external abort
+ * exception
+ */
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+
+/* Call any registered EL change hooks */
+static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
+{
+ ARMELChangeHook *hook, *next;
+ QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
+ hook->hook(cpu, hook->opaque);
+ }
+}
+static inline void arm_call_el_change_hook(ARMCPU *cpu)
+{
+ ARMELChangeHook *hook, *next;
+ QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
+ hook->hook(cpu, hook->opaque);
+ }
+}
+
+/* Return true if this address translation regime has two ranges. */
+static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_SE0:
+ case ARMMMUIdx_Stage1_SE1:
+ case ARMMMUIdx_Stage1_SE1_PAN:
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Return true if this address translation regime is secure */
+static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_Stage2:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MUser:
+ return false;
+ case ARMMMUIdx_SE3:
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ case ARMMMUIdx_Stage1_SE0:
+ case ARMMMUIdx_Stage1_SE1:
+ case ARMMMUIdx_Stage1_SE1_PAN:
+ case ARMMMUIdx_SE2:
+ case ARMMMUIdx_Stage2_S:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
+ case ARMMMUIdx_MSPriv:
+ case ARMMMUIdx_MSUser:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_SE1_PAN:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_SE10_1_PAN:
+ case ARMMMUIdx_SE20_2_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Return the exception level which controls this address translation regime */
+static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_SE20_0:
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_Stage2:
+ case ARMMMUIdx_Stage2_S:
+ case ARMMMUIdx_SE2:
+ case ARMMMUIdx_E2:
+ return 2;
+ case ARMMMUIdx_SE3:
+ return 3;
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_Stage1_SE0:
+ return arm_el_is_aa64(env, 3) ? 1 : 3;
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_SE1:
+ case ARMMMUIdx_Stage1_SE1_PAN:
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MPriv:
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
+ case ARMMMUIdx_MSPriv:
+ case ARMMMUIdx_MSUser:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Return the TCR controlling this translation regime */
+static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ return &env->cp15.vtcr_el2;
+ }
+ if (mmu_idx == ARMMMUIdx_Stage2_S) {
+ /*
+ * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
+ * those are not currently used by QEMU, so just return VSTCR_EL2.
+ */
+ return &env->cp15.vstcr_el2;
+ }
+ return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
+}
+
+/* Return the FSR value for a debug exception (watchpoint, hardware
+ * breakpoint or BKPT insn) targeting the specified exception level.
+ */
+static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
+{
+ ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
+ int target_el = arm_debug_target_el(env);
+ bool using_lpae = false;
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
+ using_lpae = true;
+ } else {
+ if (arm_feature(env, ARM_FEATURE_LPAE) &&
+ (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
+ using_lpae = true;
+ }
+ }
+
+ if (using_lpae) {
+ return arm_fi_to_lfsc(&fi);
+ } else {
+ return arm_fi_to_sfsc(&fi);
+ }
+}
+
+/**
+ * arm_num_brps: Return number of implemented breakpoints.
+ * Note that the ID register BRPS field is "number of bps - 1",
+ * and we return the actual number of breakpoints.
+ */
+static inline int arm_num_brps(ARMCPU *cpu)
+{
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
+ } else {
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
+ }
+}
+
+/**
+ * arm_num_wrps: Return number of implemented watchpoints.
+ * Note that the ID register WRPS field is "number of wps - 1",
+ * and we return the actual number of watchpoints.
+ */
+static inline int arm_num_wrps(ARMCPU *cpu)
+{
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
+ } else {
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
+ }
+}
+
+/**
+ * arm_num_ctx_cmps: Return number of implemented context comparators.
+ * Note that the ID register CTX_CMPS field is "number of cmps - 1",
+ * and we return the actual number of comparators.
+ */
+static inline int arm_num_ctx_cmps(ARMCPU *cpu)
+{
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
+ } else {
+ return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
+ }
+}
+
+/**
+ * v7m_using_psp: Return true if using process stack pointer
+ * Return true if the CPU is currently using the process stack
+ * pointer, or false if it is using the main stack pointer.
+ */
+static inline bool v7m_using_psp(CPUARMState *env)
+{
+ /* Handler mode always uses the main stack; for thread mode
+ * the CONTROL.SPSEL bit determines the answer.
+ * Note that in v7M it is not possible to be in Handler mode with
+ * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
+ */
+ return !arm_v7m_is_handler_mode(env) &&
+ env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
+}
+
+/**
+ * v7m_sp_limit: Return SP limit for current CPU state
+ * Return the SP limit value for the current CPU security state
+ * and stack pointer.
+ */
+static inline uint32_t v7m_sp_limit(CPUARMState *env)
+{
+ if (v7m_using_psp(env)) {
+ return env->v7m.psplim[env->v7m.secure];
+ } else {
+ return env->v7m.msplim[env->v7m.secure];
+ }
+}
+
+/**
+ * v7m_cpacr_pass:
+ * Return true if the v7M CPACR permits access to the FPU for the specified
+ * security state and privilege level.
+ */
+static inline bool v7m_cpacr_pass(CPUARMState *env,
+ bool is_secure, bool is_priv)
+{
+ switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
+ case 0:
+ case 2: /* UNPREDICTABLE: we treat like 0 */
+ return false;
+ case 1:
+ return is_priv;
+ case 3:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
+ * aarch32_mode_name(): Return name of the AArch32 CPU mode
+ * @psr: Program Status Register indicating CPU mode
+ *
+ * Returns, for debug logging purposes, a printable representation
+ * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
+ * the low bits of the specified PSR.
+ */
+static inline const char *aarch32_mode_name(uint32_t psr)
+{
+ static const char cpu_mode_names[16][4] = {
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
+ };
+
+ return cpu_mode_names[psr & 0xf];
+}
+
+/**
+ * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
+ *
+ * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
+ * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
+ * Must be called with the iothread lock held.
+ */
+void arm_cpu_update_virq(ARMCPU *cpu);
+
+/**
+ * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
+ *
+ * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
+ * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
+ * Must be called with the iothread lock held.
+ */
+void arm_cpu_update_vfiq(ARMCPU *cpu);
+
+/**
+ * arm_mmu_idx_el:
+ * @env: The cpu environment
+ * @el: The EL to use.
+ *
+ * Return the full ARMMMUIdx for the translation regime for EL.
+ */
+ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
+
+/**
+ * arm_mmu_idx:
+ * @env: The cpu environment
+ *
+ * Return the full ARMMMUIdx for the current translation regime.
+ */
+ARMMMUIdx arm_mmu_idx(CPUARMState *env);
+
+/**
+ * arm_stage1_mmu_idx:
+ * @env: The cpu environment
+ *
+ * Return the ARMMMUIdx for the stage1 traversal for the current regime.
+ */
+#ifdef CONFIG_USER_ONLY
+static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
+{
+ return ARMMMUIdx_Stage1_E0;
+}
+#else
+ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
+#endif
+
+/**
+ * arm_mmu_idx_is_stage1_of_2:
+ * @mmu_idx: The ARMMMUIdx to test
+ *
+ * Return true if @mmu_idx is a NOTLB mmu_idx that is the
+ * first stage of a two stage regime.
+ */
+static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_SE0:
+ case ARMMMUIdx_Stage1_SE1:
+ case ARMMMUIdx_Stage1_SE1_PAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
+ const ARMISARegisters *id)
+{
+ uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
+
+ if ((features >> ARM_FEATURE_V4T) & 1) {
+ valid |= CPSR_T;
+ }
+ if ((features >> ARM_FEATURE_V5) & 1) {
+ valid |= CPSR_Q; /* V5TE in reality*/
+ }
+ if ((features >> ARM_FEATURE_V6) & 1) {
+ valid |= CPSR_E | CPSR_GE;
+ }
+ if ((features >> ARM_FEATURE_THUMB2) & 1) {
+ valid |= CPSR_IT;
+ }
+ if (isar_feature_aa32_jazelle(id)) {
+ valid |= CPSR_J;
+ }
+ if (isar_feature_aa32_pan(id)) {
+ valid |= CPSR_PAN;
+ }
+ if (isar_feature_aa32_dit(id)) {
+ valid |= CPSR_DIT;
+ }
+ if (isar_feature_aa32_ssbs(id)) {
+ valid |= CPSR_SSBS;
+ }
+
+ return valid;
+}
+
+static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
+{
+ uint32_t valid;
+
+ valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
+ if (isar_feature_aa64_bti(id)) {
+ valid |= PSTATE_BTYPE;
+ }
+ if (isar_feature_aa64_pan(id)) {
+ valid |= PSTATE_PAN;
+ }
+ if (isar_feature_aa64_uao(id)) {
+ valid |= PSTATE_UAO;
+ }
+ if (isar_feature_aa64_dit(id)) {
+ valid |= PSTATE_DIT;
+ }
+ if (isar_feature_aa64_ssbs(id)) {
+ valid |= PSTATE_SSBS;
+ }
+ if (isar_feature_aa64_mte(id)) {
+ valid |= PSTATE_TCO;
+ }
+
+ return valid;
+}
+
+/*
+ * Parameters of a given virtual address, as extracted from the
+ * translation control register (TCR) for a given regime.
+ */
+typedef struct ARMVAParameters {
+ unsigned tsz : 8;
+ unsigned select : 1;
+ bool tbi : 1;
+ bool epd : 1;
+ bool hpd : 1;
+ bool using16k : 1;
+ bool using64k : 1;
+} ARMVAParameters;
+
+ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
+ ARMMMUIdx mmu_idx, bool data);
+
+static inline int exception_target_el(CPUARMState *env)
+{
+ int target_el = MAX(1, arm_current_el(env));
+
+ /*
+ * No such thing as secure EL1 if EL3 is aarch32,
+ * so update the target EL to EL3 in this case.
+ */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
+ target_el = 3;
+ }
+
+ return target_el;
+}
+
+/* Determine if allocation tags are available. */
+static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
+ uint64_t sctlr)
+{
+ if (el < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_ATA)) {
+ return false;
+ }
+ if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
+ return false;
+ }
+ }
+ sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
+ return sctlr != 0;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
+
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
+
+void arm_log_exception(int idx);
+
+#endif /* !CONFIG_USER_ONLY */
+
+/*
+ * The log2 of the words in the tag block, for GMID_EL1.BS.
+ * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
+ */
+#define GMID_EL1_BS 6
+
+/* We associate one allocation tag per 16 bytes, the minimum. */
+#define LOG2_TAG_GRANULE 4
+#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
+
+/*
+ * SVE predicates are 1/8 the size of SVE vectors, and cannot use
+ * the same simd_desc() encoding due to restrictions on size.
+ * Use these instead.
+ */
+FIELD(PREDDESC, OPRSZ, 0, 6)
+FIELD(PREDDESC, ESZ, 6, 2)
+FIELD(PREDDESC, DATA, 8, 24)
+
+/*
+ * The SVE simd_data field, for memory ops, contains either
+ * rd (5 bits) or a shift count (2 bits).
+ */
+#define SVE_MTEDESC_SHIFT 5
+
+/* Bits within a descriptor passed to the helper_mte_check* functions. */
+FIELD(MTEDESC, MIDX, 0, 4)
+FIELD(MTEDESC, TBI, 4, 2)
+FIELD(MTEDESC, TCMA, 6, 2)
+FIELD(MTEDESC, WRITE, 8, 1)
+FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
+
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
+
+static inline int allocation_tag_from_addr(uint64_t ptr)
+{
+ return extract64(ptr, 56, 4);
+}
+
+static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
+{
+ return deposit64(ptr, 56, 4, rtag);
+}
+
+/* Return true if tbi bits mean that the access is checked. */
+static inline bool tbi_check(uint32_t desc, int bit55)
+{
+ return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
+}
+
+/* Return true if tcma bits mean that the access is unchecked. */
+static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
+{
+ /*
+ * We had extracted bit55 and ptr_tag for other reasons, so fold
+ * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
+ */
+ bool match = ((ptr_tag + bit55) & 0xf) == 0;
+ bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
+ return tcma && match;
+}
+
+/*
+ * For TBI, ideally, we would do nothing. Proper behaviour on fault is
+ * for the tag to be present in the FAR_ELx register. But for user-only
+ * mode, we do not have a TLB with which to implement this, so we must
+ * remove the top byte.
+ */
+static inline uint64_t useronly_clean_ptr(uint64_t ptr)
+{
+#ifdef CONFIG_USER_ONLY
+ /* TBI0 is known to be enabled, while TBI1 is disabled. */
+ ptr &= sextract64(ptr, 0, 56);
+#endif
+ return ptr;
+}
+
+static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
+{
+#ifdef CONFIG_USER_ONLY
+ int64_t clean_ptr = sextract64(ptr, 0, 56);
+ if (tbi_check(desc, clean_ptr < 0)) {
+ ptr = clean_ptr;
+ }
+#endif
+ return ptr;
+}
+
+/* Values for M-profile PSR.ECI for MVE insns */
+enum MVEECIState {
+ ECI_NONE = 0, /* No completed beats */
+ ECI_A0 = 1, /* Completed: A0 */
+ ECI_A0A1 = 2, /* Completed: A0, A1 */
+ /* 3 is reserved */
+ ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
+ ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
+ /* All other values reserved */
+};
+
+/* Definitions for the PMU registers */
+#define PMCRN_MASK 0xf800
+#define PMCRN_SHIFT 11
+#define PMCRLC 0x40
+#define PMCRDP 0x20
+#define PMCRX 0x10
+#define PMCRD 0x8
+#define PMCRC 0x4
+#define PMCRP 0x2
+#define PMCRE 0x1
+/*
+ * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
+ * which can be written as 1 to trigger behaviour but which stay RAZ).
+ */
+#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
+
+#define PMXEVTYPER_P 0x80000000
+#define PMXEVTYPER_U 0x40000000
+#define PMXEVTYPER_NSK 0x20000000
+#define PMXEVTYPER_NSU 0x10000000
+#define PMXEVTYPER_NSH 0x08000000
+#define PMXEVTYPER_M 0x04000000
+#define PMXEVTYPER_MT 0x02000000
+#define PMXEVTYPER_EVTCOUNT 0x0000ffff
+#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
+ PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
+ PMXEVTYPER_M | PMXEVTYPER_MT | \
+ PMXEVTYPER_EVTCOUNT)
+
+#define PMCCFILTR 0xf8000000
+#define PMCCFILTR_M PMXEVTYPER_M
+#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
+
+static inline uint32_t pmu_num_counters(CPUARMState *env)
+{
+ return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
+}
+
+/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
+static inline uint64_t pmu_counter_mask(CPUARMState *env)
+{
+ return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
+}
+
+#ifdef TARGET_AARCH64
+int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
+int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
+int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
+int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
+#endif
+
+#endif
diff --git a/target/arm/iwmmxt_helper.c b/target/arm/iwmmxt_helper.c
new file mode 100644
index 000000000..610b1b210
--- /dev/null
+++ b/target/arm/iwmmxt_helper.c
@@ -0,0 +1,670 @@
+/*
+ * iwMMXt micro operations for XScale.
+ *
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ * Copyright (c) 2008 CodeSourcery
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/* iwMMXt macros extracted from GNU gdb. */
+
+/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */
+#define SIMD8_SET(v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
+#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
+#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
+#define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
+/* Flags to pass as "n" above. */
+#define SIMD_NBIT -1
+#define SIMD_ZBIT -2
+#define SIMD_CBIT -3
+#define SIMD_VBIT -4
+/* Various status bit macros. */
+#define NBIT8(x) ((x) & 0x80)
+#define NBIT16(x) ((x) & 0x8000)
+#define NBIT32(x) ((x) & 0x80000000)
+#define NBIT64(x) ((x) & 0x8000000000000000ULL)
+#define ZBIT8(x) (((x) & 0xff) == 0)
+#define ZBIT16(x) (((x) & 0xffff) == 0)
+#define ZBIT32(x) (((x) & 0xffffffff) == 0)
+#define ZBIT64(x) (x == 0)
+/* Sign extension macros. */
+#define EXTEND8H(a) ((uint16_t) (int8_t) (a))
+#define EXTEND8(a) ((uint32_t) (int8_t) (a))
+#define EXTEND16(a) ((uint32_t) (int16_t) (a))
+#define EXTEND16S(a) ((int32_t) (int16_t) (a))
+#define EXTEND32(a) ((uint64_t) (int32_t) (a))
+
+uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b)
+{
+ a = ((
+ EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) +
+ EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)
+ ) & 0xffffffff) | ((uint64_t) (
+ EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) +
+ EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff)
+ ) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b)
+{
+ a = ((
+ ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) +
+ ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)
+ ) & 0xffffffff) | ((
+ ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) +
+ ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)
+ ) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b)
+{
+#define abs(x) (((x) >= 0) ? x : -x)
+#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff))
+ return
+ SADB(0) + SADB(8) + SADB(16) + SADB(24) +
+ SADB(32) + SADB(40) + SADB(48) + SADB(56);
+#undef SADB
+}
+
+uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b)
+{
+#define SADW(SHR) \
+ abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff))
+ return SADW(0) + SADW(16) + SADW(32) + SADW(48);
+#undef SADW
+}
+
+uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b)
+{
+#define MULS(SHR) ((uint64_t) ((( \
+ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
+ ) >> 0) & 0xffff) << SHR)
+ return MULS(0) | MULS(16) | MULS(32) | MULS(48);
+#undef MULS
+}
+
+uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b)
+{
+#define MULS(SHR) ((uint64_t) ((( \
+ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
+ ) >> 16) & 0xffff) << SHR)
+ return MULS(0) | MULS(16) | MULS(32) | MULS(48);
+#undef MULS
+}
+
+uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b)
+{
+#define MULU(SHR) ((uint64_t) ((( \
+ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
+ ) >> 0) & 0xffff) << SHR)
+ return MULU(0) | MULU(16) | MULU(32) | MULU(48);
+#undef MULU
+}
+
+uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b)
+{
+#define MULU(SHR) ((uint64_t) ((( \
+ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
+ ) >> 16) & 0xffff) << SHR)
+ return MULU(0) | MULU(16) | MULU(32) | MULU(48);
+#undef MULU
+}
+
+uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b)
+{
+#define MACS(SHR) ( \
+ EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff))
+ return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48));
+#undef MACS
+}
+
+uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b)
+{
+#define MACU(SHR) ( \
+ (uint32_t) ((a >> SHR) & 0xffff) * \
+ (uint32_t) ((b >> SHR) & 0xffff))
+ return MACU(0) + MACU(16) + MACU(32) + MACU(48);
+#undef MACU
+}
+
+#define NZBIT8(x, i) \
+ SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \
+ SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i)
+#define NZBIT16(x, i) \
+ SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \
+ SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i)
+#define NZBIT32(x, i) \
+ SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \
+ SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i)
+#define NZBIT64(x) \
+ SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
+ SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
+#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
+ (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
+ (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
+ (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xffff) << 0) | \
+ (((b >> SH0) & 0xffff) << 16) | \
+ (((a >> SH2) & 0xffff) << 32) | \
+ (((b >> SH2) & 0xffff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
+ NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xffffffff) << 0) | \
+ (((b >> SH0) & 0xffffffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ (((x >> SH0) & 0xff) << 0) | \
+ (((x >> SH1) & 0xff) << 16) | \
+ (((x >> SH2) & 0xff) << 32) | \
+ (((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ (((x >> SH0) & 0xffff) << 0) | \
+ (((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = (((x >> SH0) & 0xffffffff) << 0); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
+ ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
+ ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
+ ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
+ ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = EXTEND32((x >> SH0) & 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+ return x; \
+}
+IWMMXT_OP_UNPACK(l, 0, 8, 16, 24)
+IWMMXT_OP_UNPACK(h, 32, 40, 48, 56)
+
+#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
+ CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
+ CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
+ CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
+ CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = CMP(0, Tl, O, 0xffffffff) | \
+ CMP(32, Tl, O, 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+ return a; \
+}
+#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
+ (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR)
+IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==)
+IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >)
+IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >)
+#undef CMP
+#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
+ (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR))
+IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <)
+IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <)
+IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >)
+IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >)
+#undef CMP
+#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
+ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
+IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -)
+IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +)
+#undef CMP
+/* TODO Signed- and Unsigned-Saturation */
+#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
+ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
+IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -)
+IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +)
+IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -)
+IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +)
+#undef CMP
+#undef IWMMXT_OP_CMP
+
+#define AVGB(SHR) ((( \
+ ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR)
+#define IWMMXT_OP_AVGB(r) \
+uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \
+{ \
+ const int round = r; \
+ a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \
+ AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \
+ SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \
+ SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \
+ SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \
+ SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \
+ SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \
+ SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \
+ SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \
+ return a; \
+}
+IWMMXT_OP_AVGB(0)
+IWMMXT_OP_AVGB(1)
+#undef IWMMXT_OP_AVGB
+#undef AVGB
+
+#define AVGW(SHR) ((( \
+ ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR)
+#define IWMMXT_OP_AVGW(r) \
+uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \
+{ \
+ const int round = r; \
+ a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \
+ SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \
+ SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \
+ SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \
+ return a; \
+}
+IWMMXT_OP_AVGW(0)
+IWMMXT_OP_AVGW(1)
+#undef IWMMXT_OP_AVGW
+#undef AVGW
+
+uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n)
+{
+ a >>= n << 3;
+ a |= b << (64 - (n << 3));
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n)
+{
+ x &= ~((uint64_t) b << n);
+ x |= (uint64_t) (a & b) << n;
+ return x;
+}
+
+uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x)
+{
+ return SIMD64_SET((x == 0), SIMD_ZBIT) |
+ SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT);
+}
+
+uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg)
+{
+ arg &= 0xff;
+ return
+ ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) |
+ ((uint64_t) arg << 16) | ((uint64_t) arg << 24) |
+ ((uint64_t) arg << 32) | ((uint64_t) arg << 40) |
+ ((uint64_t) arg << 48) | ((uint64_t) arg << 56);
+}
+
+uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg)
+{
+ arg &= 0xffff;
+ return
+ ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) |
+ ((uint64_t) arg << 32) | ((uint64_t) arg << 48);
+}
+
+uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg)
+{
+ return arg | ((uint64_t) arg << 32);
+}
+
+uint64_t HELPER(iwmmxt_addcb)(uint64_t x)
+{
+ return
+ ((x >> 0) & 0xff) + ((x >> 8) & 0xff) +
+ ((x >> 16) & 0xff) + ((x >> 24) & 0xff) +
+ ((x >> 32) & 0xff) + ((x >> 40) & 0xff) +
+ ((x >> 48) & 0xff) + ((x >> 56) & 0xff);
+}
+
+uint64_t HELPER(iwmmxt_addcw)(uint64_t x)
+{
+ return
+ ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) +
+ ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff);
+}
+
+uint64_t HELPER(iwmmxt_addcl)(uint64_t x)
+{
+ return (x & 0xffffffff) + (x >> 32);
+}
+
+uint32_t HELPER(iwmmxt_msbb)(uint64_t x)
+{
+ return
+ ((x >> 7) & 0x01) | ((x >> 14) & 0x02) |
+ ((x >> 21) & 0x04) | ((x >> 28) & 0x08) |
+ ((x >> 35) & 0x10) | ((x >> 42) & 0x20) |
+ ((x >> 49) & 0x40) | ((x >> 56) & 0x80);
+}
+
+uint32_t HELPER(iwmmxt_msbw)(uint64_t x)
+{
+ return
+ ((x >> 15) & 0x01) | ((x >> 30) & 0x02) |
+ ((x >> 45) & 0x04) | ((x >> 52) & 0x08);
+}
+
+uint32_t HELPER(iwmmxt_msbl)(uint64_t x)
+{
+ return ((x >> 31) & 0x01) | ((x >> 62) & 0x02);
+}
+
+/* FIXME: Split wCASF setting into a separate op to avoid env use. */
+uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) |
+ (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) |
+ (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) |
+ (((x & (0xffffll << 48)) >> n) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((x & (0xffffffffll << 0)) >> n) |
+ ((x >> n) & (0xffffffffll << 32));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x >>= n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) |
+ (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) |
+ (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) |
+ (((x & (0xffffll << 48)) << n) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((x << n) & (0xffffffffll << 0)) |
+ ((x & (0xffffffffll << 32)) << n);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x <<= n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) |
+ ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) |
+ ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) |
+ ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) |
+ (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (int64_t) x >> n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((((x & (0xffffll << 0)) >> n) |
+ ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) |
+ ((((x & (0xffffll << 16)) >> n) |
+ ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) |
+ ((((x & (0xffffll << 32)) >> n) |
+ ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) |
+ ((((x & (0xffffll << 48)) >> n) |
+ ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((x & (0xffffffffll << 0)) >> n) |
+ ((x >> n) & (0xffffffffll << 32)) |
+ ((x << (32 - n)) & (0xffffffffll << 0)) |
+ ((x & (0xffffffffll << 32)) << (32 - n));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ror64(x, n);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) |
+ (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) |
+ (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) |
+ (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+/* TODO: Unsigned-Saturation */
+uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
+ (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
+ (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
+ (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
+ (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
+ return a;
+}
+
+/* TODO: Signed-Saturation */
+uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
+ (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
+ (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
+ (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
+ (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b)
+{
+ return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b));
+}
+
+uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b)
+{
+ c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) *
+ EXTEND16S((b >> 0) & 0xffff));
+ c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) *
+ EXTEND16S((b >> 16) & 0xffff));
+ return c;
+}
+
+uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b)
+{
+ return c + (EXTEND32(EXTEND16S(a & 0xffff) *
+ EXTEND16S(b & 0xffff)));
+}
diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h
new file mode 100644
index 000000000..580f1c1fe
--- /dev/null
+++ b/target/arm/kvm-consts.h
@@ -0,0 +1,180 @@
+/*
+ * KVM ARM ABI constant definitions
+ *
+ * Copyright (c) 2013 Linaro Limited
+ *
+ * Provide versions of KVM constant defines that can be used even
+ * when CONFIG_KVM is not set and we don't have access to the
+ * KVM headers. If CONFIG_KVM is set, we do a compile-time check
+ * that we haven't got out of sync somehow.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef ARM_KVM_CONSTS_H
+#define ARM_KVM_CONSTS_H
+
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#include <linux/psci.h>
+
+#define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y)
+
+#else
+
+#define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(0)
+
+#endif
+
+#define CP_REG_SIZE_SHIFT 52
+#define CP_REG_SIZE_MASK 0x00f0000000000000ULL
+#define CP_REG_SIZE_U32 0x0020000000000000ULL
+#define CP_REG_SIZE_U64 0x0030000000000000ULL
+#define CP_REG_ARM 0x4000000000000000ULL
+#define CP_REG_ARCH_MASK 0xff00000000000000ULL
+
+MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT);
+MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK);
+MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32);
+MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64);
+MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM);
+MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK);
+
+#define QEMU_PSCI_0_1_FN_BASE 0x95c1ba5e
+#define QEMU_PSCI_0_1_FN(n) (QEMU_PSCI_0_1_FN_BASE + (n))
+#define QEMU_PSCI_0_1_FN_CPU_SUSPEND QEMU_PSCI_0_1_FN(0)
+#define QEMU_PSCI_0_1_FN_CPU_OFF QEMU_PSCI_0_1_FN(1)
+#define QEMU_PSCI_0_1_FN_CPU_ON QEMU_PSCI_0_1_FN(2)
+#define QEMU_PSCI_0_1_FN_MIGRATE QEMU_PSCI_0_1_FN(3)
+
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND);
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF);
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON);
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE);
+
+#define QEMU_PSCI_0_2_FN_BASE 0x84000000
+#define QEMU_PSCI_0_2_FN(n) (QEMU_PSCI_0_2_FN_BASE + (n))
+
+#define QEMU_PSCI_0_2_64BIT 0x40000000
+#define QEMU_PSCI_0_2_FN64_BASE \
+ (QEMU_PSCI_0_2_FN_BASE + QEMU_PSCI_0_2_64BIT)
+#define QEMU_PSCI_0_2_FN64(n) (QEMU_PSCI_0_2_FN64_BASE + (n))
+
+#define QEMU_PSCI_0_2_FN_PSCI_VERSION QEMU_PSCI_0_2_FN(0)
+#define QEMU_PSCI_0_2_FN_CPU_SUSPEND QEMU_PSCI_0_2_FN(1)
+#define QEMU_PSCI_0_2_FN_CPU_OFF QEMU_PSCI_0_2_FN(2)
+#define QEMU_PSCI_0_2_FN_CPU_ON QEMU_PSCI_0_2_FN(3)
+#define QEMU_PSCI_0_2_FN_AFFINITY_INFO QEMU_PSCI_0_2_FN(4)
+#define QEMU_PSCI_0_2_FN_MIGRATE QEMU_PSCI_0_2_FN(5)
+#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE QEMU_PSCI_0_2_FN(6)
+#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU QEMU_PSCI_0_2_FN(7)
+#define QEMU_PSCI_0_2_FN_SYSTEM_OFF QEMU_PSCI_0_2_FN(8)
+#define QEMU_PSCI_0_2_FN_SYSTEM_RESET QEMU_PSCI_0_2_FN(9)
+
+#define QEMU_PSCI_0_2_FN64_CPU_SUSPEND QEMU_PSCI_0_2_FN64(1)
+#define QEMU_PSCI_0_2_FN64_CPU_OFF QEMU_PSCI_0_2_FN64(2)
+#define QEMU_PSCI_0_2_FN64_CPU_ON QEMU_PSCI_0_2_FN64(3)
+#define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4)
+#define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5)
+
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND);
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF);
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON);
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE);
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND);
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON);
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE);
+
+/* PSCI v0.2 return values used by TCG emulation of PSCI */
+
+/* No Trusted OS migration to worry about when offlining CPUs */
+#define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2
+
+/* We implement version 0.2 only */
+#define QEMU_PSCI_0_2_RET_VERSION_0_2 2
+
+MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP);
+MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2,
+ (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2)));
+
+/* PSCI return values (inclusive of all PSCI versions) */
+#define QEMU_PSCI_RET_SUCCESS 0
+#define QEMU_PSCI_RET_NOT_SUPPORTED -1
+#define QEMU_PSCI_RET_INVALID_PARAMS -2
+#define QEMU_PSCI_RET_DENIED -3
+#define QEMU_PSCI_RET_ALREADY_ON -4
+#define QEMU_PSCI_RET_ON_PENDING -5
+#define QEMU_PSCI_RET_INTERNAL_FAILURE -6
+#define QEMU_PSCI_RET_NOT_PRESENT -7
+#define QEMU_PSCI_RET_DISABLED -8
+
+MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS);
+MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED);
+MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS);
+MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED);
+MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON);
+MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING);
+MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE);
+MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT);
+MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED);
+
+/* Note that KVM uses overlapping values for AArch32 and AArch64
+ * target CPU numbers. AArch32 targets:
+ */
+#define QEMU_KVM_ARM_TARGET_CORTEX_A15 0
+#define QEMU_KVM_ARM_TARGET_CORTEX_A7 1
+
+/* AArch64 targets: */
+#define QEMU_KVM_ARM_TARGET_AEM_V8 0
+#define QEMU_KVM_ARM_TARGET_FOUNDATION_V8 1
+#define QEMU_KVM_ARM_TARGET_CORTEX_A57 2
+#define QEMU_KVM_ARM_TARGET_XGENE_POTENZA 3
+#define QEMU_KVM_ARM_TARGET_CORTEX_A53 4
+
+/* There's no kernel define for this: sentinel value which
+ * matches no KVM target value for either 64 or 32 bit
+ */
+#define QEMU_KVM_ARM_TARGET_NONE UINT_MAX
+
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_AEM_V8, KVM_ARM_TARGET_AEM_V8);
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8);
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57);
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_XGENE_POTENZA, KVM_ARM_TARGET_XGENE_POTENZA);
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53);
+
+#define CP_REG_ARM64 0x6000000000000000ULL
+#define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define CP_REG_ARM_COPROC_SHIFT 16
+#define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT)
+#define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
+#define CP_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
+#define CP_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
+#define CP_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
+#define CP_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
+#define CP_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+/* No kernel define but it's useful to QEMU */
+#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT)
+
+MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64);
+MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK);
+MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK);
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT);
+
+#undef MISMATCH_CHECK
+
+#endif
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
new file mode 100644
index 000000000..56a7099e6
--- /dev/null
+++ b/target/arm/kvm-stub.c
@@ -0,0 +1,24 @@
+/*
+ * QEMU KVM ARM specific function stubs
+ *
+ * Copyright Linaro Limited 2013
+ *
+ * Author: Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "kvm_arm.h"
+
+bool write_kvmstate_to_list(ARMCPU *cpu)
+{
+ abort();
+}
+
+bool write_list_to_kvmstate(ARMCPU *cpu, int level)
+{
+ abort();
+}
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
new file mode 100644
index 000000000..bbf1ce7ba
--- /dev/null
+++ b/target/arm/kvm.c
@@ -0,0 +1,1059 @@
+/*
+ * ARM implementation of KVM hooks
+ *
+ * Copyright Christoffer Dall 2009-2010
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qom/object.h"
+#include "qapi/error.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_int.h"
+#include "kvm_arm.h"
+#include "cpu.h"
+#include "trace.h"
+#include "internals.h"
+#include "hw/pci/pci.h"
+#include "exec/memattrs.h"
+#include "exec/address-spaces.h"
+#include "hw/boards.h"
+#include "hw/irq.h"
+#include "qemu/log.h"
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static bool cap_has_mp_state;
+static bool cap_has_inject_serror_esr;
+static bool cap_has_inject_ext_dabt;
+
+static ARMHostCPUFeatures arm_host_cpu_features;
+
+int kvm_arm_vcpu_init(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ struct kvm_vcpu_init init;
+
+ init.target = cpu->kvm_target;
+ memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
+
+ return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
+}
+
+int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
+{
+ return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
+}
+
+void kvm_arm_init_serror_injection(CPUState *cs)
+{
+ cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
+ KVM_CAP_ARM_INJECT_SERROR_ESR);
+}
+
+bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
+ int *fdarray,
+ struct kvm_vcpu_init *init)
+{
+ int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
+ int max_vm_pa_size;
+
+ kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
+ if (kvmfd < 0) {
+ goto err;
+ }
+ max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE);
+ if (max_vm_pa_size < 0) {
+ max_vm_pa_size = 0;
+ }
+ vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size);
+ if (vmfd < 0) {
+ goto err;
+ }
+ cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
+ if (cpufd < 0) {
+ goto err;
+ }
+
+ if (!init) {
+ /* Caller doesn't want the VCPU to be initialized, so skip it */
+ goto finish;
+ }
+
+ if (init->target == -1) {
+ struct kvm_vcpu_init preferred;
+
+ ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
+ if (!ret) {
+ init->target = preferred.target;
+ }
+ }
+ if (ret >= 0) {
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret < 0) {
+ goto err;
+ }
+ } else if (cpus_to_try) {
+ /* Old kernel which doesn't know about the
+ * PREFERRED_TARGET ioctl: we know it will only support
+ * creating one kind of guest CPU which is its preferred
+ * CPU type.
+ */
+ struct kvm_vcpu_init try;
+
+ while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
+ try.target = *cpus_to_try++;
+ memcpy(try.features, init->features, sizeof(init->features));
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
+ if (ret >= 0) {
+ break;
+ }
+ }
+ if (ret < 0) {
+ goto err;
+ }
+ init->target = try.target;
+ } else {
+ /* Treat a NULL cpus_to_try argument the same as an empty
+ * list, which means we will fail the call since this must
+ * be an old kernel which doesn't support PREFERRED_TARGET.
+ */
+ goto err;
+ }
+
+finish:
+ fdarray[0] = kvmfd;
+ fdarray[1] = vmfd;
+ fdarray[2] = cpufd;
+
+ return true;
+
+err:
+ if (cpufd >= 0) {
+ close(cpufd);
+ }
+ if (vmfd >= 0) {
+ close(vmfd);
+ }
+ if (kvmfd >= 0) {
+ close(kvmfd);
+ }
+
+ return false;
+}
+
+void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
+{
+ int i;
+
+ for (i = 2; i >= 0; i--) {
+ close(fdarray[i]);
+ }
+}
+
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+
+ if (!arm_host_cpu_features.dtb_compatible) {
+ if (!kvm_enabled() ||
+ !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
+ /* We can't report this error yet, so flag that we need to
+ * in arm_cpu_realizefn().
+ */
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
+ cpu->host_cpu_probe_failed = true;
+ return;
+ }
+ }
+
+ cpu->kvm_target = arm_host_cpu_features.target;
+ cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
+ cpu->isar = arm_host_cpu_features.isar;
+ env->features = arm_host_cpu_features.features;
+}
+
+static bool kvm_no_adjvtime_get(Object *obj, Error **errp)
+{
+ return !ARM_CPU(obj)->kvm_adjvtime;
+}
+
+static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp)
+{
+ ARM_CPU(obj)->kvm_adjvtime = !value;
+}
+
+static bool kvm_steal_time_get(Object *obj, Error **errp)
+{
+ return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF;
+}
+
+static void kvm_steal_time_set(Object *obj, bool value, Error **errp)
+{
+ ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+}
+
+/* KVM VCPU properties should be prefixed with "kvm-". */
+void kvm_arm_add_vcpu_properties(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
+ cpu->kvm_adjvtime = true;
+ object_property_add_bool(obj, "kvm-no-adjvtime", kvm_no_adjvtime_get,
+ kvm_no_adjvtime_set);
+ object_property_set_description(obj, "kvm-no-adjvtime",
+ "Set on to disable the adjustment of "
+ "the virtual counter. VM stopped time "
+ "will be counted.");
+ }
+
+ cpu->kvm_steal_time = ON_OFF_AUTO_AUTO;
+ object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get,
+ kvm_steal_time_set);
+ object_property_set_description(obj, "kvm-steal-time",
+ "Set off to disable KVM steal time.");
+}
+
+bool kvm_arm_pmu_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3);
+}
+
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
+{
+ KVMState *s = KVM_STATE(ms->accelerator);
+ int ret;
+
+ ret = kvm_check_extension(s, KVM_CAP_ARM_VM_IPA_SIZE);
+ *fixed_ipa = ret <= 0;
+
+ return ret > 0 ? ret : 40;
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ int ret = 0;
+ /* For ARM interrupt delivery is always asynchronous,
+ * whether we are using an in-kernel VGIC or not.
+ */
+ kvm_async_interrupts_allowed = true;
+
+ /*
+ * PSCI wakes up secondary cores, so we always need to
+ * have vCPUs waiting in kernel space
+ */
+ kvm_halt_in_kernel_allowed = true;
+
+ cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
+
+ if (ms->smp.cpus > 256 &&
+ !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) {
+ error_report("Using more than 256 vcpus requires a host kernel "
+ "with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2");
+ ret = -EINVAL;
+ }
+
+ if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) {
+ if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) {
+ error_report("Failed to enable KVM_CAP_ARM_NISV_TO_USER cap");
+ } else {
+ /* Set status for supporting the external dabt injection */
+ cap_has_inject_ext_dabt = kvm_check_extension(s,
+ KVM_CAP_ARM_INJECT_EXT_DABT);
+ }
+ }
+
+ return ret;
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return cpu->cpu_index;
+}
+
+/* We track all the KVM devices which need their memory addresses
+ * passing to the kernel in a list of these structures.
+ * When board init is complete we run through the list and
+ * tell the kernel the base addresses of the memory regions.
+ * We use a MemoryListener to track mapping and unmapping of
+ * the regions during board creation, so the board models don't
+ * need to do anything special for the KVM case.
+ *
+ * Sometimes the address must be OR'ed with some other fields
+ * (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
+ * @kda_addr_ormask aims at storing the value of those fields.
+ */
+typedef struct KVMDevice {
+ struct kvm_arm_device_addr kda;
+ struct kvm_device_attr kdattr;
+ uint64_t kda_addr_ormask;
+ MemoryRegion *mr;
+ QSLIST_ENTRY(KVMDevice) entries;
+ int dev_fd;
+} KVMDevice;
+
+static QSLIST_HEAD(, KVMDevice) kvm_devices_head;
+
+static void kvm_arm_devlistener_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMDevice *kd;
+
+ QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
+ if (section->mr == kd->mr) {
+ kd->kda.addr = section->offset_within_address_space;
+ }
+ }
+}
+
+static void kvm_arm_devlistener_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMDevice *kd;
+
+ QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
+ if (section->mr == kd->mr) {
+ kd->kda.addr = -1;
+ }
+ }
+}
+
+static MemoryListener devlistener = {
+ .name = "kvm-arm",
+ .region_add = kvm_arm_devlistener_add,
+ .region_del = kvm_arm_devlistener_del,
+};
+
+static void kvm_arm_set_device_addr(KVMDevice *kd)
+{
+ struct kvm_device_attr *attr = &kd->kdattr;
+ int ret;
+
+ /* If the device control API is available and we have a device fd on the
+ * KVMDevice struct, let's use the newer API
+ */
+ if (kd->dev_fd >= 0) {
+ uint64_t addr = kd->kda.addr;
+
+ addr |= kd->kda_addr_ormask;
+ attr->addr = (uintptr_t)&addr;
+ ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
+ } else {
+ ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
+ }
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed to set device address: %s\n",
+ strerror(-ret));
+ abort();
+ }
+}
+
+static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
+{
+ KVMDevice *kd, *tkd;
+
+ QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
+ if (kd->kda.addr != -1) {
+ kvm_arm_set_device_addr(kd);
+ }
+ memory_region_unref(kd->mr);
+ QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
+ g_free(kd);
+ }
+ memory_listener_unregister(&devlistener);
+}
+
+static Notifier notify = {
+ .notify = kvm_arm_machine_init_done,
+};
+
+void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
+ uint64_t attr, int dev_fd, uint64_t addr_ormask)
+{
+ KVMDevice *kd;
+
+ if (!kvm_irqchip_in_kernel()) {
+ return;
+ }
+
+ if (QSLIST_EMPTY(&kvm_devices_head)) {
+ memory_listener_register(&devlistener, &address_space_memory);
+ qemu_add_machine_init_done_notifier(&notify);
+ }
+ kd = g_new0(KVMDevice, 1);
+ kd->mr = mr;
+ kd->kda.id = devid;
+ kd->kda.addr = -1;
+ kd->kdattr.flags = 0;
+ kd->kdattr.group = group;
+ kd->kdattr.attr = attr;
+ kd->dev_fd = dev_fd;
+ kd->kda_addr_ormask = addr_ormask;
+ QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
+ memory_region_ref(kd->mr);
+}
+
+static int compare_u64(const void *a, const void *b)
+{
+ if (*(uint64_t *)a > *(uint64_t *)b) {
+ return 1;
+ }
+ if (*(uint64_t *)a < *(uint64_t *)b) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * cpreg_values are sorted in ascending order by KVM register ID
+ * (see kvm_arm_init_cpreg_list). This allows us to cheaply find
+ * the storage for a KVM register by ID with a binary search.
+ */
+static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx)
+{
+ uint64_t *res;
+
+ res = bsearch(&regidx, cpu->cpreg_indexes, cpu->cpreg_array_len,
+ sizeof(uint64_t), compare_u64);
+ assert(res);
+
+ return &cpu->cpreg_values[res - cpu->cpreg_indexes];
+}
+
+/* Initialize the ARMCPU cpreg list according to the kernel's
+ * definition of what CPU registers it knows about (and throw away
+ * the previous TCG-created cpreg list).
+ */
+int kvm_arm_init_cpreg_list(ARMCPU *cpu)
+{
+ struct kvm_reg_list rl;
+ struct kvm_reg_list *rlp;
+ int i, ret, arraylen;
+ CPUState *cs = CPU(cpu);
+
+ rl.n = 0;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
+ if (ret != -E2BIG) {
+ return ret;
+ }
+ rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
+ rlp->n = rl.n;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
+ if (ret) {
+ goto out;
+ }
+ /* Sort the list we get back from the kernel, since cpreg_tuples
+ * must be in strictly ascending order.
+ */
+ qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
+ continue;
+ }
+ switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ case KVM_REG_SIZE_U64:
+ break;
+ default:
+ fprintf(stderr, "Can't handle size of register in kernel list\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ arraylen++;
+ }
+
+ cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
+ cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
+ cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
+ arraylen);
+ cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
+ arraylen);
+ cpu->cpreg_array_len = arraylen;
+ cpu->cpreg_vmstate_array_len = arraylen;
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ uint64_t regidx = rlp->reg[i];
+ if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
+ continue;
+ }
+ cpu->cpreg_indexes[arraylen] = regidx;
+ arraylen++;
+ }
+ assert(cpu->cpreg_array_len == arraylen);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ /* Shouldn't happen unless kernel is inconsistent about
+ * what registers exist.
+ */
+ fprintf(stderr, "Initial read of kernel register state failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ g_free(rlp);
+ return ret;
+}
+
+bool write_kvmstate_to_list(ARMCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ struct kvm_one_reg r;
+ uint64_t regidx = cpu->cpreg_indexes[i];
+ uint32_t v32;
+ int ret;
+
+ r.id = regidx;
+
+ switch (regidx & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ r.addr = (uintptr_t)&v32;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (!ret) {
+ cpu->cpreg_values[i] = v32;
+ }
+ break;
+ case KVM_REG_SIZE_U64:
+ r.addr = (uintptr_t)(cpu->cpreg_values + i);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ break;
+ default:
+ abort();
+ }
+ if (ret) {
+ ok = false;
+ }
+ }
+ return ok;
+}
+
+bool write_list_to_kvmstate(ARMCPU *cpu, int level)
+{
+ CPUState *cs = CPU(cpu);
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ struct kvm_one_reg r;
+ uint64_t regidx = cpu->cpreg_indexes[i];
+ uint32_t v32;
+ int ret;
+
+ if (kvm_arm_cpreg_level(regidx) > level) {
+ continue;
+ }
+
+ r.id = regidx;
+ switch (regidx & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ v32 = cpu->cpreg_values[i];
+ r.addr = (uintptr_t)&v32;
+ break;
+ case KVM_REG_SIZE_U64:
+ r.addr = (uintptr_t)(cpu->cpreg_values + i);
+ break;
+ default:
+ abort();
+ }
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ /* We might fail for "unknown register" and also for
+ * "you tried to set a register which is constant with
+ * a different value from what it actually contains".
+ */
+ ok = false;
+ }
+ }
+ return ok;
+}
+
+void kvm_arm_cpu_pre_save(ARMCPU *cpu)
+{
+ /* KVM virtual time adjustment */
+ if (cpu->kvm_vtime_dirty) {
+ *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime;
+ }
+}
+
+void kvm_arm_cpu_post_load(ARMCPU *cpu)
+{
+ /* KVM virtual time adjustment */
+ if (cpu->kvm_adjvtime) {
+ cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
+ cpu->kvm_vtime_dirty = true;
+ }
+}
+
+void kvm_arm_reset_vcpu(ARMCPU *cpu)
+{
+ int ret;
+
+ /* Re-init VCPU so that all registers are set to
+ * their respective reset values.
+ */
+ ret = kvm_arm_vcpu_init(CPU(cpu));
+ if (ret < 0) {
+ fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
+ abort();
+ }
+ if (!write_kvmstate_to_list(cpu)) {
+ fprintf(stderr, "write_kvmstate_to_list failed\n");
+ abort();
+ }
+ /*
+ * Sync the reset values also into the CPUState. This is necessary
+ * because the next thing we do will be a kvm_arch_put_registers()
+ * which will update the list values from the CPUState before copying
+ * the list values back to KVM. It's OK to ignore failure returns here
+ * for the same reason we do so in kvm_arch_get_registers().
+ */
+ write_list_to_cpustate(cpu);
+}
+
+/*
+ * Update KVM's MP_STATE based on what QEMU thinks it is
+ */
+int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
+{
+ if (cap_has_mp_state) {
+ struct kvm_mp_state mp_state = {
+ .mp_state = (cpu->power_state == PSCI_OFF) ?
+ KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
+ };
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+ if (ret) {
+ fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
+ __func__, ret, strerror(-ret));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Sync the KVM MP_STATE into QEMU
+ */
+int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
+{
+ if (cap_has_mp_state) {
+ struct kvm_mp_state mp_state;
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
+ if (ret) {
+ fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
+ __func__, ret, strerror(-ret));
+ abort();
+ }
+ cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
+ PSCI_OFF : PSCI_ON;
+ }
+
+ return 0;
+}
+
+void kvm_arm_get_virtual_time(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_ARM_TIMER_CNT,
+ .addr = (uintptr_t)&cpu->kvm_vtime,
+ };
+ int ret;
+
+ if (cpu->kvm_vtime_dirty) {
+ return;
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
+ abort();
+ }
+
+ cpu->kvm_vtime_dirty = true;
+}
+
+void kvm_arm_put_virtual_time(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_ARM_TIMER_CNT,
+ .addr = (uintptr_t)&cpu->kvm_vtime,
+ };
+ int ret;
+
+ if (!cpu->kvm_vtime_dirty) {
+ return;
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
+ abort();
+ }
+
+ cpu->kvm_vtime_dirty = false;
+}
+
+int kvm_put_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ events.exception.serror_pending = env->serror.pending;
+
+ /* Inject SError to guest with specified syndrome if host kernel
+ * supports it, otherwise inject SError without syndrome.
+ */
+ if (cap_has_inject_serror_esr) {
+ events.exception.serror_has_esr = env->serror.has_esr;
+ events.exception.serror_esr = env->serror.esr;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+ if (ret) {
+ error_report("failed to put vcpu events");
+ }
+
+ return ret;
+}
+
+int kvm_get_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+ if (ret) {
+ error_report("failed to get vcpu events");
+ return ret;
+ }
+
+ env->serror.pending = events.exception.serror_pending;
+ env->serror.has_esr = events.exception.serror_has_esr;
+ env->serror.esr = events.exception.serror_esr;
+
+ return 0;
+}
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (unlikely(env->ext_dabt_raised)) {
+ /*
+ * Verifying that the ext DABT has been properly injected,
+ * otherwise risking indefinitely re-running the faulting instruction
+ * Covering a very narrow case for kernels 5.5..5.5.4
+ * when injected abort was misconfigured to be
+ * an IMPLEMENTATION DEFINED exception (for 32-bit EL1)
+ */
+ if (!arm_feature(env, ARM_FEATURE_AARCH64) &&
+ unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) {
+
+ error_report("Data abort exception with no valid ISS generated by "
+ "guest memory access. KVM unable to emulate faulting "
+ "instruction. Failed to inject an external data abort "
+ "into the guest.");
+ abort();
+ }
+ /* Clear the status */
+ env->ext_dabt_raised = 0;
+ }
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ ARMCPU *cpu;
+ uint32_t switched_level;
+
+ if (kvm_irqchip_in_kernel()) {
+ /*
+ * We only need to sync timer states with user-space interrupt
+ * controllers, so return early and save cycles if we don't.
+ */
+ return MEMTXATTRS_UNSPECIFIED;
+ }
+
+ cpu = ARM_CPU(cs);
+
+ /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
+ if (run->s.regs.device_irq_level != cpu->device_irq_level) {
+ switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
+
+ qemu_mutex_lock_iothread();
+
+ if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
+ qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
+ !!(run->s.regs.device_irq_level &
+ KVM_ARM_DEV_EL1_VTIMER));
+ switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
+ }
+
+ if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
+ qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
+ !!(run->s.regs.device_irq_level &
+ KVM_ARM_DEV_EL1_PTIMER));
+ switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
+ }
+
+ if (switched_level & KVM_ARM_DEV_PMU) {
+ qemu_set_irq(cpu->pmu_interrupt,
+ !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
+ switched_level &= ~KVM_ARM_DEV_PMU;
+ }
+
+ if (switched_level) {
+ qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
+ __func__, switched_level);
+ }
+
+ /* We also mark unknown levels as processed to not waste cycles */
+ cpu->device_irq_level = run->s.regs.device_irq_level;
+ qemu_mutex_unlock_iothread();
+ }
+
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+void kvm_arm_vm_state_change(void *opaque, bool running, RunState state)
+{
+ CPUState *cs = opaque;
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ if (running) {
+ if (cpu->kvm_adjvtime) {
+ kvm_arm_put_virtual_time(cs);
+ }
+ } else {
+ if (cpu->kvm_adjvtime) {
+ kvm_arm_get_virtual_time(cs);
+ }
+ }
+}
+
+/**
+ * kvm_arm_handle_dabt_nisv:
+ * @cs: CPUState
+ * @esr_iss: ISS encoding (limited) for the exception from Data Abort
+ * ISV bit set to '0b0' -> no valid instruction syndrome
+ * @fault_ipa: faulting address for the synchronous data abort
+ *
+ * Returns: 0 if the exception has been handled, < 0 otherwise
+ */
+static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,
+ uint64_t fault_ipa)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ /*
+ * Request KVM to inject the external data abort into the guest
+ */
+ if (cap_has_inject_ext_dabt) {
+ struct kvm_vcpu_events events = { };
+ /*
+ * The external data abort event will be handled immediately by KVM
+ * using the address fault that triggered the exit on given VCPU.
+ * Requesting injection of the external data abort does not rely
+ * on any other VCPU state. Therefore, in this particular case, the VCPU
+ * synchronization can be exceptionally skipped.
+ */
+ events.exception.ext_dabt_pending = 1;
+ /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */
+ if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) {
+ env->ext_dabt_raised = 1;
+ return 0;
+ }
+ } else {
+ error_report("Data abort exception triggered by guest memory access "
+ "at physical address: 0x" TARGET_FMT_lx,
+ (target_ulong)fault_ipa);
+ error_printf("KVM unable to emulate faulting instruction.\n");
+ }
+ return -1;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ int ret = 0;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_DEBUG:
+ if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
+ ret = EXCP_DEBUG;
+ } /* otherwise return to guest */
+ break;
+ case KVM_EXIT_ARM_NISV:
+ /* External DABT with no valid iss to decode */
+ ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,
+ run->arm_nisv.fault_ipa);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
+ __func__, run->exit_reason);
+ break;
+ }
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ return true;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return 0;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+ if (kvm_arm_hw_debug_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
+ kvm_arm_copy_hw_debug_data(&dbg->arch);
+ }
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ if (kvm_kernel_irqchip_split()) {
+ perror("-machine kernel_irqchip=split is not supported on ARM.");
+ exit(1);
+ }
+
+ /* If we can create the VGIC using the newer device control API, we
+ * let the device do this when it initializes itself, otherwise we
+ * fall back to the old API */
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
+}
+
+int kvm_arm_vgic_probe(void)
+{
+ int val = 0;
+
+ if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
+ val |= KVM_ARM_VGIC_V3;
+ }
+ if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
+ val |= KVM_ARM_VGIC_V2;
+ }
+ return val;
+}
+
+int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
+{
+ int kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT) | irq;
+ int cpu_idx1 = cpu % 256;
+ int cpu_idx2 = cpu / 256;
+
+ kvm_irq |= (cpu_idx1 << KVM_ARM_IRQ_VCPU_SHIFT) |
+ (cpu_idx2 << KVM_ARM_IRQ_VCPU2_SHIFT);
+
+ return kvm_set_irq(kvm_state, kvm_irq, !!level);
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ AddressSpace *as = pci_device_iommu_address_space(dev);
+ hwaddr xlat, len, doorbell_gpa;
+ MemoryRegionSection mrs;
+ MemoryRegion *mr;
+
+ if (as == &address_space_memory) {
+ return 0;
+ }
+
+ /* MSI doorbell address is translated by an IOMMU */
+
+ RCU_READ_LOCK_GUARD();
+
+ mr = address_space_translate(as, address, &xlat, &len, true,
+ MEMTXATTRS_UNSPECIFIED);
+
+ if (!mr) {
+ return 1;
+ }
+
+ mrs = memory_region_find(mr, xlat, 1);
+
+ if (!mrs.mr) {
+ return 1;
+ }
+
+ doorbell_gpa = mrs.offset_within_address_space;
+ memory_region_unref(mrs.mr);
+
+ route->u.msi.address_lo = doorbell_gpa;
+ route->u.msi.address_hi = doorbell_gpa >> 32;
+
+ trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
+
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ return (data - 32) & 0xffff;
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return true;
+}
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
new file mode 100644
index 000000000..e790d6c9a
--- /dev/null
+++ b/target/arm/kvm64.c
@@ -0,0 +1,1606 @@
+/*
+ * ARM implementation of KVM hooks, 64 bit specific code
+ *
+ * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
+ * Copyright Alex Bennée 2014, Linaro
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#include <sys/ptrace.h>
+
+#include <linux/elf.h>
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "exec/gdbstub.h"
+#include "sysemu/runstate.h"
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_int.h"
+#include "kvm_arm.h"
+#include "internals.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ghes.h"
+#include "hw/arm/virt.h"
+
+static bool have_guest_debug;
+
+/*
+ * Although the ARM implementation of hardware assisted debugging
+ * allows for different breakpoints per-core, the current GDB
+ * interface treats them as a global pool of registers (which seems to
+ * be the case for x86, ppc and s390). As a result we store one copy
+ * of registers which is used for all active cores.
+ *
+ * Write access is serialised by virtue of the GDB protocol which
+ * updates things. Read access (i.e. when the values are copied to the
+ * vCPU) is also gated by GDB's run control.
+ *
+ * This is not unreasonable as most of the time debugging kernels you
+ * never know which core will eventually execute your function.
+ */
+
+typedef struct {
+ uint64_t bcr;
+ uint64_t bvr;
+} HWBreakpoint;
+
+/* The watchpoint registers can cover more area than the requested
+ * watchpoint so we need to store the additional information
+ * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
+ * when the watchpoint is hit.
+ */
+typedef struct {
+ uint64_t wcr;
+ uint64_t wvr;
+ CPUWatchpoint details;
+} HWWatchpoint;
+
+/* Maximum and current break/watch point counts */
+int max_hw_bps, max_hw_wps;
+GArray *hw_breakpoints, *hw_watchpoints;
+
+#define cur_hw_wps (hw_watchpoints->len)
+#define cur_hw_bps (hw_breakpoints->len)
+#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
+#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
+
+/**
+ * kvm_arm_init_debug() - check for guest debug capabilities
+ * @cs: CPUState
+ *
+ * kvm_check_extension returns the number of debug registers we have
+ * or 0 if we have none.
+ *
+ */
+static void kvm_arm_init_debug(CPUState *cs)
+{
+ have_guest_debug = kvm_check_extension(cs->kvm_state,
+ KVM_CAP_SET_GUEST_DEBUG);
+
+ max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
+ hw_watchpoints = g_array_sized_new(true, true,
+ sizeof(HWWatchpoint), max_hw_wps);
+
+ max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
+ hw_breakpoints = g_array_sized_new(true, true,
+ sizeof(HWBreakpoint), max_hw_bps);
+ return;
+}
+
+/**
+ * insert_hw_breakpoint()
+ * @addr: address of breakpoint
+ *
+ * See ARM ARM D2.9.1 for details but here we are only going to create
+ * simple un-linked breakpoints (i.e. we don't chain breakpoints
+ * together to match address and context or vmid). The hardware is
+ * capable of fancier matching but that will require exposing that
+ * fanciness to GDB's interface
+ *
+ * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
+ *
+ * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
+ * +------+------+-------+-----+----+------+-----+------+-----+---+
+ * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
+ * +------+------+-------+-----+----+------+-----+------+-----+---+
+ *
+ * BT: Breakpoint type (0 = unlinked address match)
+ * LBN: Linked BP number (0 = unused)
+ * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
+ * BAS: Byte Address Select (RES1 for AArch64)
+ * E: Enable bit
+ *
+ * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
+ *
+ * 63 53 52 49 48 2 1 0
+ * +------+-----------+----------+-----+
+ * | RESS | VA[52:49] | VA[48:2] | 0 0 |
+ * +------+-----------+----------+-----+
+ *
+ * Depending on the addressing mode bits the top bits of the register
+ * are a sign extension of the highest applicable VA bit. Some
+ * versions of GDB don't do it correctly so we ensure they are correct
+ * here so future PC comparisons will work properly.
+ */
+
+static int insert_hw_breakpoint(target_ulong addr)
+{
+ HWBreakpoint brk = {
+ .bcr = 0x1, /* BCR E=1, enable */
+ .bvr = sextract64(addr, 0, 53)
+ };
+
+ if (cur_hw_bps >= max_hw_bps) {
+ return -ENOBUFS;
+ }
+
+ brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */
+ brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */
+
+ g_array_append_val(hw_breakpoints, brk);
+
+ return 0;
+}
+
+/**
+ * delete_hw_breakpoint()
+ * @pc: address of breakpoint
+ *
+ * Delete a breakpoint and shuffle any above down
+ */
+
+static int delete_hw_breakpoint(target_ulong pc)
+{
+ int i;
+ for (i = 0; i < hw_breakpoints->len; i++) {
+ HWBreakpoint *brk = get_hw_bp(i);
+ if (brk->bvr == pc) {
+ g_array_remove_index(hw_breakpoints, i);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * insert_hw_watchpoint()
+ * @addr: address of watch point
+ * @len: size of area
+ * @type: type of watch point
+ *
+ * See ARM ARM D2.10. As with the breakpoints we can do some advanced
+ * stuff if we want to. The watch points can be linked with the break
+ * points above to make them context aware. However for simplicity
+ * currently we only deal with simple read/write watch points.
+ *
+ * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
+ *
+ * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
+ * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
+ * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
+ * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
+ *
+ * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
+ * WT: 0 - unlinked, 1 - linked (not currently used)
+ * LBN: Linked BP number (not currently used)
+ * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
+ * BAS: Byte Address Select
+ * LSC: Load/Store control (01: load, 10: store, 11: both)
+ * E: Enable
+ *
+ * The bottom 2 bits of the value register are masked. Therefore to
+ * break on any sizes smaller than an unaligned word you need to set
+ * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
+ * need to ensure you mask the address as required and set BAS=0xff
+ */
+
+static int insert_hw_watchpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ HWWatchpoint wp = {
+ .wcr = 1, /* E=1, enable */
+ .wvr = addr & (~0x7ULL),
+ .details = { .vaddr = addr, .len = len }
+ };
+
+ if (cur_hw_wps >= max_hw_wps) {
+ return -ENOBUFS;
+ }
+
+ /*
+ * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
+ * valid whether EL3 is implemented or not
+ */
+ wp.wcr = deposit32(wp.wcr, 1, 2, 3);
+
+ switch (type) {
+ case GDB_WATCHPOINT_READ:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 1);
+ wp.details.flags = BP_MEM_READ;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 2);
+ wp.details.flags = BP_MEM_WRITE;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 3);
+ wp.details.flags = BP_MEM_ACCESS;
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+ if (len <= 8) {
+ /* we align the address and set the bits in BAS */
+ int off = addr & 0x7;
+ int bas = (1 << len) - 1;
+
+ wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
+ } else {
+ /* For ranges above 8 bytes we need to be a power of 2 */
+ if (is_power_of_2(len)) {
+ int bits = ctz64(len);
+
+ wp.wvr &= ~((1 << bits) - 1);
+ wp.wcr = deposit32(wp.wcr, 24, 4, bits);
+ wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
+ } else {
+ return -ENOBUFS;
+ }
+ }
+
+ g_array_append_val(hw_watchpoints, wp);
+ return 0;
+}
+
+
+static bool check_watchpoint_in_range(int i, target_ulong addr)
+{
+ HWWatchpoint *wp = get_hw_wp(i);
+ uint64_t addr_top, addr_bottom = wp->wvr;
+ int bas = extract32(wp->wcr, 5, 8);
+ int mask = extract32(wp->wcr, 24, 4);
+
+ if (mask) {
+ addr_top = addr_bottom + (1 << mask);
+ } else {
+ /* BAS must be contiguous but can offset against the base
+ * address in DBGWVR */
+ addr_bottom = addr_bottom + ctz32(bas);
+ addr_top = addr_bottom + clo32(bas);
+ }
+
+ if (addr >= addr_bottom && addr <= addr_top) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * delete_hw_watchpoint()
+ * @addr: address of breakpoint
+ *
+ * Delete a breakpoint and shuffle any above down
+ */
+
+static int delete_hw_watchpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int i;
+ for (i = 0; i < cur_hw_wps; i++) {
+ if (check_watchpoint_in_range(i, addr)) {
+ g_array_remove_index(hw_watchpoints, i);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return insert_hw_breakpoint(addr);
+ break;
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return insert_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return delete_hw_breakpoint(addr);
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return delete_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ if (cur_hw_wps > 0) {
+ g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
+ }
+ if (cur_hw_bps > 0) {
+ g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
+ }
+}
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
+{
+ int i;
+ memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
+
+ for (i = 0; i < max_hw_wps; i++) {
+ HWWatchpoint *wp = get_hw_wp(i);
+ ptr->dbg_wcr[i] = wp->wcr;
+ ptr->dbg_wvr[i] = wp->wvr;
+ }
+ for (i = 0; i < max_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ ptr->dbg_bcr[i] = bp->bcr;
+ ptr->dbg_bvr[i] = bp->bvr;
+ }
+}
+
+bool kvm_arm_hw_debug_active(CPUState *cs)
+{
+ return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
+}
+
+static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
+{
+ int i;
+
+ for (i = 0; i < cur_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ if (bp->bvr == pc) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
+{
+ int i;
+
+ for (i = 0; i < cur_hw_wps; i++) {
+ if (check_watchpoint_in_range(i, addr)) {
+ return &get_hw_wp(i)->details;
+ }
+ }
+ return NULL;
+}
+
+static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
+ const char *name)
+{
+ int err;
+
+ err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
+ if (err != 0) {
+ error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
+ return false;
+ }
+
+ err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
+ if (err != 0) {
+ error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
+ return false;
+ }
+
+ return true;
+}
+
+void kvm_arm_pmu_init(CPUState *cs)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_INIT,
+ };
+
+ if (!ARM_CPU(cs)->has_pmu) {
+ return;
+ }
+ if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
+ error_report("failed to init PMU");
+ abort();
+ }
+}
+
+void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .addr = (intptr_t)&irq,
+ .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
+ };
+
+ if (!ARM_CPU(cs)->has_pmu) {
+ return;
+ }
+ if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
+ error_report("failed to set irq for PMU");
+ abort();
+ }
+}
+
+void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PVTIME_CTRL,
+ .attr = KVM_ARM_VCPU_PVTIME_IPA,
+ .addr = (uint64_t)&ipa,
+ };
+
+ if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
+ return;
+ }
+ if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
+ error_report("failed to init PVTIME IPA");
+ abort();
+ }
+}
+
+static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
+{
+ uint64_t ret;
+ struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
+ int err;
+
+ assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
+ err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
+ if (err < 0) {
+ return -1;
+ }
+ *pret = ret;
+ return 0;
+}
+
+static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
+{
+ struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
+
+ assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
+ return ioctl(fd, KVM_GET_ONE_REG, &idreg);
+}
+
+bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
+{
+ /* Identify the feature bits corresponding to the host CPU, and
+ * fill out the ARMHostCPUClass fields accordingly. To do this
+ * we have to create a scratch VM, create a single CPU inside it,
+ * and then query that CPU for the relevant ID registers.
+ */
+ int fdarray[3];
+ bool sve_supported;
+ uint64_t features = 0;
+ uint64_t t;
+ int err;
+
+ /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
+ * we know these will only support creating one kind of guest CPU,
+ * which is its preferred CPU type. Fortunately these old kernels
+ * support only a very limited number of CPUs.
+ */
+ static const uint32_t cpus_to_try[] = {
+ KVM_ARM_TARGET_AEM_V8,
+ KVM_ARM_TARGET_FOUNDATION_V8,
+ KVM_ARM_TARGET_CORTEX_A57,
+ QEMU_KVM_ARM_TARGET_NONE
+ };
+ /*
+ * target = -1 informs kvm_arm_create_scratch_host_vcpu()
+ * to use the preferred target
+ */
+ struct kvm_vcpu_init init = { .target = -1, };
+
+ if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ return false;
+ }
+
+ ahcf->target = init.target;
+ ahcf->dtb_compatible = "arm,arm-v8";
+
+ err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
+ ARM64_SYS_REG(3, 0, 0, 4, 0));
+ if (unlikely(err < 0)) {
+ /*
+ * Before v4.15, the kernel only exposed a limited number of system
+ * registers, not including any of the interesting AArch64 ID regs.
+ * For the most part we could leave these fields as zero with minimal
+ * effect, since this does not affect the values seen by the guest.
+ *
+ * However, it could cause problems down the line for QEMU,
+ * so provide a minimal v8.0 default.
+ *
+ * ??? Could read MIDR and use knowledge from cpu64.c.
+ * ??? Could map a page of memory into our temp guest and
+ * run the tiniest of hand-crafted kernels to extract
+ * the values seen by the guest.
+ * ??? Either of these sounds like too much effort just
+ * to work around running a modern host kernel.
+ */
+ ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
+ err = 0;
+ } else {
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
+ ARM64_SYS_REG(3, 0, 0, 4, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
+ ARM64_SYS_REG(3, 0, 0, 5, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
+ ARM64_SYS_REG(3, 0, 0, 5, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
+ ARM64_SYS_REG(3, 0, 0, 6, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
+ ARM64_SYS_REG(3, 0, 0, 6, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
+ ARM64_SYS_REG(3, 0, 0, 7, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
+ ARM64_SYS_REG(3, 0, 0, 7, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
+ ARM64_SYS_REG(3, 0, 0, 7, 2));
+
+ /*
+ * Note that if AArch32 support is not present in the host,
+ * the AArch32 sysregs are present to be read, but will
+ * return UNKNOWN values. This is neither better nor worse
+ * than skipping the reads and leaving 0, as we must avoid
+ * considering the values in every case.
+ */
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
+ ARM64_SYS_REG(3, 0, 0, 1, 0));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
+ ARM64_SYS_REG(3, 0, 0, 1, 1));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
+ ARM64_SYS_REG(3, 0, 0, 3, 4));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
+ ARM64_SYS_REG(3, 0, 0, 1, 2));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
+ ARM64_SYS_REG(3, 0, 0, 1, 4));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
+ ARM64_SYS_REG(3, 0, 0, 1, 5));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
+ ARM64_SYS_REG(3, 0, 0, 1, 6));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
+ ARM64_SYS_REG(3, 0, 0, 1, 7));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
+ ARM64_SYS_REG(3, 0, 0, 2, 0));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
+ ARM64_SYS_REG(3, 0, 0, 2, 1));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
+ ARM64_SYS_REG(3, 0, 0, 2, 2));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
+ ARM64_SYS_REG(3, 0, 0, 2, 3));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
+ ARM64_SYS_REG(3, 0, 0, 2, 4));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
+ ARM64_SYS_REG(3, 0, 0, 2, 5));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
+ ARM64_SYS_REG(3, 0, 0, 2, 6));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
+ ARM64_SYS_REG(3, 0, 0, 2, 7));
+
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
+ ARM64_SYS_REG(3, 0, 0, 3, 0));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
+ ARM64_SYS_REG(3, 0, 0, 3, 1));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
+ ARM64_SYS_REG(3, 0, 0, 3, 2));
+
+ /*
+ * DBGDIDR is a bit complicated because the kernel doesn't
+ * provide an accessor for it in 64-bit mode, which is what this
+ * scratch VM is in, and there's no architected "64-bit sysreg
+ * which reads the same as the 32-bit register" the way there is
+ * for other ID registers. Instead we synthesize a value from the
+ * AArch64 ID_AA64DFR0, the same way the kernel code in
+ * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
+ * We only do this if the CPU supports AArch32 at EL1.
+ */
+ if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
+ int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
+ int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
+ int ctx_cmps =
+ FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
+ int version = 6; /* ARMv8 debug architecture */
+ bool has_el3 =
+ !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
+ uint32_t dbgdidr = 0;
+
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
+ dbgdidr |= (1 << 15); /* RES1 bit */
+ ahcf->isar.dbgdidr = dbgdidr;
+ }
+ }
+
+ sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
+
+ /* Add feature bits that can't appear until after VCPU init. */
+ if (sve_supported) {
+ t = ahcf->isar.id_aa64pfr0;
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
+ ahcf->isar.id_aa64pfr0 = t;
+
+ /*
+ * Before v5.1, KVM did not support SVE and did not expose
+ * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does
+ * not expose the register to "user" requests like this
+ * unless the host supports SVE.
+ */
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
+ ARM64_SYS_REG(3, 0, 0, 4, 4));
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ if (err < 0) {
+ return false;
+ }
+
+ /*
+ * We can assume any KVM supporting CPU is at least a v8
+ * with VFPv4+Neon; this in turn implies most of the other
+ * feature bits.
+ */
+ features |= 1ULL << ARM_FEATURE_V8;
+ features |= 1ULL << ARM_FEATURE_NEON;
+ features |= 1ULL << ARM_FEATURE_AARCH64;
+ features |= 1ULL << ARM_FEATURE_PMU;
+ features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
+
+ ahcf->features = features;
+
+ return true;
+}
+
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
+{
+ bool has_steal_time = kvm_arm_steal_time_supported();
+
+ if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
+ if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
+ } else {
+ cpu->kvm_steal_time = ON_OFF_AUTO_ON;
+ }
+ } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
+ if (!has_steal_time) {
+ error_setg(errp, "'kvm-steal-time' cannot be enabled "
+ "on this host");
+ return;
+ } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ /*
+ * DEN0057A chapter 2 says "This specification only covers
+ * systems in which the Execution state of the hypervisor
+ * as well as EL1 of virtual machines is AArch64.". And,
+ * to ensure that, the smc/hvc calls are only specified as
+ * smc64/hvc64.
+ */
+ error_setg(errp, "'kvm-steal-time' cannot be enabled "
+ "for AArch32 guests");
+ return;
+ }
+ }
+}
+
+bool kvm_arm_aarch32_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
+}
+
+bool kvm_arm_sve_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
+}
+
+bool kvm_arm_steal_time_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
+}
+
+QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
+
+void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
+{
+ /* Only call this function if kvm_arm_sve_supported() returns true. */
+ static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
+ static bool probed;
+ uint32_t vq = 0;
+ int i, j;
+
+ bitmap_zero(map, ARM_MAX_VQ);
+
+ /*
+ * KVM ensures all host CPUs support the same set of vector lengths.
+ * So we only need to create the scratch VCPUs once and then cache
+ * the results.
+ */
+ if (!probed) {
+ struct kvm_vcpu_init init = {
+ .target = -1,
+ .features[0] = (1 << KVM_ARM_VCPU_SVE),
+ };
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_ARM64_SVE_VLS,
+ .addr = (uint64_t)&vls[0],
+ };
+ int fdarray[3], ret;
+
+ probed = true;
+
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
+ error_report("failed to create scratch VCPU with SVE enabled");
+ abort();
+ }
+ ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+ if (ret) {
+ error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
+ strerror(errno));
+ abort();
+ }
+
+ for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
+ if (vls[i]) {
+ vq = 64 - clz64(vls[i]) + i * 64;
+ break;
+ }
+ }
+ if (vq > ARM_MAX_VQ) {
+ warn_report("KVM supports vector lengths larger than "
+ "QEMU can enable");
+ }
+ }
+
+ for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
+ if (!vls[i]) {
+ continue;
+ }
+ for (j = 1; j <= 64; ++j) {
+ vq = j + i * 64;
+ if (vq > ARM_MAX_VQ) {
+ return;
+ }
+ if (vls[i] & (1UL << (j - 1))) {
+ set_bit(vq - 1, map);
+ }
+ }
+ }
+}
+
+static int kvm_arm_sve_set_vls(CPUState *cs)
+{
+ uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_ARM64_SVE_VLS,
+ .addr = (uint64_t)&vls[0],
+ };
+ ARMCPU *cpu = ARM_CPU(cs);
+ uint32_t vq;
+ int i, j;
+
+ assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
+
+ for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
+ if (test_bit(vq - 1, cpu->sve_vq_map)) {
+ i = (vq - 1) / 64;
+ j = (vq - 1) % 64;
+ vls[i] |= 1UL << j;
+ }
+ }
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret;
+ uint64_t mpidr;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
+ !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ error_report("KVM is not supported for this guest CPU type");
+ return -EINVAL;
+ }
+
+ qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
+
+ /* Determine init features for this CPU */
+ memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
+ if (cs->start_powered_off) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
+ }
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
+ cpu->psci_version = 2;
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
+ }
+ if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
+ }
+ if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
+ cpu->has_pmu = false;
+ }
+ if (cpu->has_pmu) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
+ } else {
+ env->features &= ~(1ULL << ARM_FEATURE_PMU);
+ }
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ assert(kvm_arm_sve_supported());
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
+ }
+
+ /* Do KVM_ARM_VCPU_INIT ioctl */
+ ret = kvm_arm_vcpu_init(cs);
+ if (ret) {
+ return ret;
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = kvm_arm_sve_set_vls(cs);
+ if (ret) {
+ return ret;
+ }
+ ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /*
+ * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
+ * Currently KVM has its own idea about MPIDR assignment, so we
+ * override our defaults with what we get from KVM.
+ */
+ ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
+ if (ret) {
+ return ret;
+ }
+ cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
+
+ kvm_arm_init_debug(cs);
+
+ /* Check whether user space can specify guest syndrome value */
+ kvm_arm_init_serror_injection(cs);
+
+ return kvm_arm_init_cpreg_list(cpu);
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
+{
+ /* Return true if the regidx is a register we should synchronize
+ * via the cpreg_tuples array (ie is not a core or sve reg that
+ * we sync by hand in kvm_arch_get/put_registers())
+ */
+ switch (regidx & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ case KVM_REG_ARM64_SVE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+typedef struct CPRegStateLevel {
+ uint64_t regidx;
+ int level;
+} CPRegStateLevel;
+
+/* All system registers not listed in the following table are assumed to be
+ * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
+ * often, you must add it to this table with a state of either
+ * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
+ */
+static const CPRegStateLevel non_runtime_cpregs[] = {
+ { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
+};
+
+int kvm_arm_cpreg_level(uint64_t regidx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
+ const CPRegStateLevel *l = &non_runtime_cpregs[i];
+ if (l->regidx == regidx) {
+ return l->level;
+ }
+ }
+
+ return KVM_PUT_RUNTIME_STATE;
+}
+
+/* Callers must hold the iothread mutex lock */
+static void kvm_inject_arm_sea(CPUState *c)
+{
+ ARMCPU *cpu = ARM_CPU(c);
+ CPUARMState *env = &cpu->env;
+ uint32_t esr;
+ bool same_el;
+
+ c->exception_index = EXCP_DATA_ABORT;
+ env->exception.target_el = 1;
+
+ /*
+ * Set the DFSC to synchronous external abort and set FnV to not valid,
+ * this will tell guest the FAR_ELx is UNKNOWN for this abort.
+ */
+ same_el = arm_current_el(env) == env->exception.target_el;
+ esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
+
+ env->exception.syndrome = esr;
+
+ arm_cpu_do_interrupt(c);
+}
+
+#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+static int kvm_arch_put_fpsimd(CPUState *cs)
+{
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ struct kvm_one_reg reg;
+ int i, ret;
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t fp_val[2] = { q[1], q[0] };
+ reg.addr = (uintptr_t)fp_val;
+#else
+ reg.addr = (uintptr_t)q;
+#endif
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
+ * code the slice index to zero for now as it's unlikely we'll need more than
+ * one slice for quite some time.
+ */
+static int kvm_arch_put_sve(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t tmp[ARM_MAX_VQ * 2];
+ uint64_t *r;
+ struct kvm_one_reg reg;
+ int n, ret;
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
+ r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
+ reg.addr = (uintptr_t)r;
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
+ r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ reg.addr = (uintptr_t)r;
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ reg.addr = (uintptr_t)r;
+ reg.id = KVM_REG_ARM64_SVE_FFR(0);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ struct kvm_one_reg reg;
+ uint64_t val;
+ uint32_t fpr;
+ int i, ret;
+ unsigned int el;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
+ * AArch64 registers before pushing them out to 64-bit KVM.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ }
+
+ for (i = 0; i < 31; i++) {
+ reg.id = AARCH64_CORE_REG(regs.regs[i]);
+ reg.addr = (uintptr_t) &env->xregs[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ aarch64_save_sp(env, 1);
+
+ reg.id = AARCH64_CORE_REG(regs.sp);
+ reg.addr = (uintptr_t) &env->sp_el[0];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(sp_el1);
+ reg.addr = (uintptr_t) &env->sp_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
+ reg.id = AARCH64_CORE_REG(regs.pstate);
+ reg.addr = (uintptr_t) &val;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.pc);
+ reg.addr = (uintptr_t) &env->pc;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(elr_el1);
+ reg.addr = (uintptr_t) &env->elr_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Saved Program State Registers
+ *
+ * Before we restore from the banked_spsr[] array we need to
+ * ensure that any modifications to env->spsr are correctly
+ * reflected in the banks.
+ */
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->banked_spsr[i] = env->spsr;
+ }
+
+ /* KVM 0-4 map to QEMU banks 1-5 */
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ reg.id = AARCH64_CORE_REG(spsr[i]);
+ reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = kvm_arch_put_sve(cs);
+ } else {
+ ret = kvm_arch_put_fpsimd(cs);
+ }
+ if (ret) {
+ return ret;
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ fpr = vfp_get_fpsr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ fpr = vfp_get_fpcr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ write_cpustate_to_list(cpu, true);
+
+ if (!write_list_to_kvmstate(cpu, level)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Setting VCPU events should be triggered after syncing the registers
+ * to avoid overwriting potential changes made by KVM upon calling
+ * KVM_SET_VCPU_EVENTS ioctl
+ */
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
+ kvm_arm_sync_mpstate_to_kvm(cpu);
+
+ return ret;
+}
+
+static int kvm_arch_get_fpsimd(CPUState *cs)
+{
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ struct kvm_one_reg reg;
+ int i, ret;
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ reg.addr = (uintptr_t)q;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ } else {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t t;
+ t = q[0], q[0] = q[1], q[1] = t;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
+ * code the slice index to zero for now as it's unlikely we'll need more than
+ * one slice for quite some time.
+ */
+static int kvm_arch_get_sve(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ uint64_t *r;
+ int n, ret;
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
+ r = &env->vfp.zregs[n].d[0];
+ reg.addr = (uintptr_t)r;
+ reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ sve_bswap64(r, r, cpu->sve_max_vq * 2);
+ }
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
+ r = &env->vfp.pregs[n].p[0];
+ reg.addr = (uintptr_t)r;
+ reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ }
+
+ r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
+ reg.addr = (uintptr_t)r;
+ reg.id = KVM_REG_ARM64_SVE_FFR(0);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ struct kvm_one_reg reg;
+ uint64_t val;
+ unsigned int el;
+ uint32_t fpr;
+ int i, ret;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ for (i = 0; i < 31; i++) {
+ reg.id = AARCH64_CORE_REG(regs.regs[i]);
+ reg.addr = (uintptr_t) &env->xregs[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.sp);
+ reg.addr = (uintptr_t) &env->sp_el[0];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(sp_el1);
+ reg.addr = (uintptr_t) &env->sp_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.pstate);
+ reg.addr = (uintptr_t) &val;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ } else {
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
+ }
+
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ aarch64_restore_sp(env, 1);
+
+ reg.id = AARCH64_CORE_REG(regs.pc);
+ reg.addr = (uintptr_t) &env->pc;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
+ * incoming AArch64 regs received from 64-bit KVM.
+ * We must perform this after all of the registers have been acquired from
+ * the kernel.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_64_to_32(env);
+ }
+
+ reg.id = AARCH64_CORE_REG(elr_el1);
+ reg.addr = (uintptr_t) &env->elr_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Fetch the SPSR registers
+ *
+ * KVM SPSRs 0-4 map to QEMU banks 1-5
+ */
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ reg.id = AARCH64_CORE_REG(spsr[i]);
+ reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->spsr = env->banked_spsr[i];
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = kvm_arch_get_sve(cs);
+ } else {
+ ret = kvm_arch_get_fpsimd(cs);
+ }
+ if (ret) {
+ return ret;
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpsr(env, fpr);
+
+ reg.addr = (uintptr_t)(&fpr);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpcr(env, fpr);
+
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
+ if (!write_kvmstate_to_list(cpu)) {
+ return -EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
+ */
+ write_list_to_cpustate(cpu);
+
+ kvm_arm_sync_mpstate_to_qemu(cpu);
+
+ /* TODO: other registers */
+ return ret;
+}
+
+void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
+{
+ ram_addr_t ram_addr;
+ hwaddr paddr;
+
+ assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
+
+ if (acpi_ghes_present() && addr) {
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr != RAM_ADDR_INVALID &&
+ kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+ kvm_hwpoison_page_add(ram_addr);
+ /*
+ * If this is a BUS_MCEERR_AR, we know we have been called
+ * synchronously from the vCPU thread, so we can easily
+ * synchronize the state and inject an error.
+ *
+ * TODO: we currently don't tell the guest at all about
+ * BUS_MCEERR_AO. In that case we might either be being
+ * called synchronously from the vCPU thread, or a bit
+ * later from the main thread, so doing the injection of
+ * the error would be more complicated.
+ */
+ if (code == BUS_MCEERR_AR) {
+ kvm_cpu_synchronize_state(c);
+ if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
+ kvm_inject_arm_sea(c);
+ } else {
+ error_report("failed to record the error");
+ abort();
+ }
+ }
+ return;
+ }
+ if (code == BUS_MCEERR_AO) {
+ error_report("Hardware memory error at addr %p for memory used by "
+ "QEMU itself instead of guest system!", addr);
+ }
+ }
+
+ if (code == BUS_MCEERR_AR) {
+ error_report("Hardware memory error!");
+ exit(1);
+ }
+}
+
+/* C6.6.29 BRK instruction */
+static const uint32_t brk_insn = 0xd4200000;
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ if (have_guest_debug) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ error_report("guest debug not supported on this kernel");
+ return -EINVAL;
+ }
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static uint32_t brk;
+
+ if (have_guest_debug) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
+ brk != brk_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ error_report("guest debug not supported on this kernel");
+ return -EINVAL;
+ }
+}
+
+/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
+ *
+ * To minimise translating between kernel and user-space the kernel
+ * ABI just provides user-space with the full exception syndrome
+ * register value to be decoded in QEMU.
+ */
+
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
+{
+ int hsr_ec = syn_get_ec(debug_exit->hsr);
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /* Ensure PC is synchronised */
+ kvm_cpu_synchronize_state(cs);
+
+ switch (hsr_ec) {
+ case EC_SOFTWARESTEP:
+ if (cs->singlestep_enabled) {
+ return true;
+ } else {
+ /*
+ * The kernel should have suppressed the guest's ability to
+ * single step at this point so something has gone wrong.
+ */
+ error_report("%s: guest single-step while debugging unsupported"
+ " (%"PRIx64", %"PRIx32")",
+ __func__, env->pc, debug_exit->hsr);
+ return false;
+ }
+ break;
+ case EC_AA64_BKPT:
+ if (kvm_find_sw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_BREAKPOINT:
+ if (find_hw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_WATCHPOINT:
+ {
+ CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
+ if (wp) {
+ cs->watchpoint_hit = wp;
+ return true;
+ }
+ break;
+ }
+ default:
+ error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
+ __func__, debug_exit->hsr, env->pc);
+ }
+
+ /* If we are not handling the debug exception it must belong to
+ * the guest. Let's re-use the existing TCG interrupt code to set
+ * everything up properly.
+ */
+ cs->exception_index = EXCP_BKPT;
+ env->exception.syndrome = debug_exit->hsr;
+ env->exception.vaddress = debug_exit->far;
+ env->exception.target_el = 1;
+ qemu_mutex_lock_iothread();
+ arm_cpu_do_interrupt(cs);
+ qemu_mutex_unlock_iothread();
+
+ return false;
+}
+
+#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
+#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
+
+/*
+ * ESR_EL1
+ * ISS encoding
+ * AARCH64: DFSC, bits [5:0]
+ * AARCH32:
+ * TTBCR.EAE == 0
+ * FS[4] - DFSR[10]
+ * FS[3:0] - DFSR[3:0]
+ * TTBCR.EAE == 1
+ * FS, bits [5:0]
+ */
+#define ESR_DFSC(aarch64, lpae, v) \
+ ((aarch64 || (lpae)) ? ((v) & 0x3F) \
+ : (((v) >> 6) | ((v) & 0x1F)))
+
+#define ESR_DFSC_EXTABT(aarch64, lpae) \
+ ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
+
+bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
+{
+ uint64_t dfsr_val;
+
+ if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
+ int lpae = 0;
+
+ if (!aarch64_mode) {
+ uint64_t ttbcr;
+
+ if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
+ lpae = arm_feature(env, ARM_FEATURE_LPAE)
+ && (ttbcr & TTBCR_EAE);
+ }
+ }
+ /*
+ * The verification here is based on the DFSC bits
+ * of the ESR_EL1 reg only
+ */
+ return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
+ ESR_DFSC_EXTABT(aarch64_mode, lpae));
+ }
+ return false;
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
new file mode 100644
index 000000000..b7f78b521
--- /dev/null
+++ b/target/arm/kvm_arm.h
@@ -0,0 +1,531 @@
+/*
+ * QEMU KVM support -- ARM specific functions.
+ *
+ * Copyright (c) 2012 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_ARM_H
+#define QEMU_KVM_ARM_H
+
+#include "sysemu/kvm.h"
+#include "exec/memory.h"
+#include "qemu/error-report.h"
+
+#define KVM_ARM_VGIC_V2 (1 << 0)
+#define KVM_ARM_VGIC_V3 (1 << 1)
+
+/**
+ * kvm_arm_vcpu_init:
+ * @cs: CPUState
+ *
+ * Initialize (or reinitialize) the VCPU by invoking the
+ * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
+ * bitmask specified in the CPUState.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+int kvm_arm_vcpu_init(CPUState *cs);
+
+/**
+ * kvm_arm_vcpu_finalize:
+ * @cs: CPUState
+ * @feature: feature to finalize
+ *
+ * Finalizes the configuration of the specified VCPU feature by
+ * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
+ * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
+ * KVM's API documentation.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+int kvm_arm_vcpu_finalize(CPUState *cs, int feature);
+
+/**
+ * kvm_arm_register_device:
+ * @mr: memory region for this device
+ * @devid: the KVM device ID
+ * @group: device control API group for setting addresses
+ * @attr: device control API address type
+ * @dev_fd: device control device file descriptor (or -1 if not supported)
+ * @addr_ormask: value to be OR'ed with resolved address
+ *
+ * Remember the memory region @mr, and when it is mapped by the
+ * machine model, tell the kernel that base address using the
+ * KVM_ARM_SET_DEVICE_ADDRESS ioctl or the newer device control API. @devid
+ * should be the ID of the device as defined by KVM_ARM_SET_DEVICE_ADDRESS or
+ * the arm-vgic device in the device control API.
+ * The machine model may map
+ * and unmap the device multiple times; the kernel will only be told the final
+ * address at the point where machine init is complete.
+ */
+void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
+ uint64_t attr, int dev_fd, uint64_t addr_ormask);
+
+/**
+ * kvm_arm_init_cpreg_list:
+ * @cpu: ARMCPU
+ *
+ * Initialize the ARMCPU cpreg list according to the kernel's
+ * definition of what CPU registers it knows about (and throw away
+ * the previous TCG-created cpreg list).
+ *
+ * Returns: 0 if success, else < 0 error code
+ */
+int kvm_arm_init_cpreg_list(ARMCPU *cpu);
+
+/**
+ * kvm_arm_reg_syncs_via_cpreg_list:
+ * @regidx: KVM register index
+ *
+ * Return true if this KVM register should be synchronized via the
+ * cpreg list of arbitrary system registers, false if it is synchronized
+ * by hand using code in kvm_arch_get/put_registers().
+ */
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
+
+/**
+ * kvm_arm_cpreg_level:
+ * @regidx: KVM register index
+ *
+ * Return the level of this coprocessor/system register. Return value is
+ * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
+ */
+int kvm_arm_cpreg_level(uint64_t regidx);
+
+/**
+ * write_list_to_kvmstate:
+ * @cpu: ARMCPU
+ * @level: the state level to sync
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the cpreg_values list into the kernel (via ioctl).
+ * This updates KVM's working data structures from TCG data or
+ * from incoming migration state.
+ *
+ * Returns: true if all register values were updated correctly,
+ * false if some register was unknown to the kernel or could not
+ * be written (eg constant register with the wrong value).
+ * Note that we do not stop early on failure -- we will attempt
+ * writing all registers in the list.
+ */
+bool write_list_to_kvmstate(ARMCPU *cpu, int level);
+
+/**
+ * write_kvmstate_to_list:
+ * @cpu: ARMCPU
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the kernel into the cpreg_values list. This is used to
+ * copy info from KVM's working data structures into TCG or
+ * for outbound migration.
+ *
+ * Returns: true if all register values were read correctly,
+ * false if some register was unknown or could not be read.
+ * Note that we do not stop early on failure -- we will attempt
+ * reading all registers in the list.
+ */
+bool write_kvmstate_to_list(ARMCPU *cpu);
+
+/**
+ * kvm_arm_cpu_pre_save:
+ * @cpu: ARMCPU
+ *
+ * Called after write_kvmstate_to_list() from cpu_pre_save() to update
+ * the cpreg list with KVM CPU state.
+ */
+void kvm_arm_cpu_pre_save(ARMCPU *cpu);
+
+/**
+ * kvm_arm_cpu_post_load:
+ * @cpu: ARMCPU
+ *
+ * Called from cpu_post_load() to update KVM CPU state from the cpreg list.
+ */
+void kvm_arm_cpu_post_load(ARMCPU *cpu);
+
+/**
+ * kvm_arm_reset_vcpu:
+ * @cpu: ARMCPU
+ *
+ * Called at reset time to kernel registers to their initial values.
+ */
+void kvm_arm_reset_vcpu(ARMCPU *cpu);
+
+/**
+ * kvm_arm_init_serror_injection:
+ * @cs: CPUState
+ *
+ * Check whether KVM can set guest SError syndrome.
+ */
+void kvm_arm_init_serror_injection(CPUState *cs);
+
+/**
+ * kvm_get_vcpu_events:
+ * @cpu: ARMCPU
+ *
+ * Get VCPU related state from kvm.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+int kvm_get_vcpu_events(ARMCPU *cpu);
+
+/**
+ * kvm_put_vcpu_events:
+ * @cpu: ARMCPU
+ *
+ * Put VCPU related state to kvm.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+int kvm_put_vcpu_events(ARMCPU *cpu);
+
+#ifdef CONFIG_KVM
+/**
+ * kvm_arm_create_scratch_host_vcpu:
+ * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
+ * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
+ * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
+ * an empty array.
+ * @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
+ * @init: filled in with the necessary values for creating a host
+ * vcpu. If NULL is provided, will not init the vCPU (though the cpufd
+ * will still be set up).
+ *
+ * Create a scratch vcpu in its own VM of the type preferred by the host
+ * kernel (as would be used for '-cpu host'), for purposes of probing it
+ * for capabilities.
+ *
+ * Returns: true on success (and fdarray and init are filled in),
+ * false on failure (and fdarray and init are not valid).
+ */
+bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
+ int *fdarray,
+ struct kvm_vcpu_init *init);
+
+/**
+ * kvm_arm_destroy_scratch_host_vcpu:
+ * @fdarray: array of fds as set up by kvm_arm_create_scratch_host_vcpu
+ *
+ * Tear down the scratch vcpu created by kvm_arm_create_scratch_host_vcpu.
+ */
+void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
+
+/**
+ * ARMHostCPUFeatures: information about the host CPU (identified
+ * by asking the host kernel)
+ */
+typedef struct ARMHostCPUFeatures {
+ ARMISARegisters isar;
+ uint64_t features;
+ uint32_t target;
+ const char *dtb_compatible;
+} ARMHostCPUFeatures;
+
+/**
+ * kvm_arm_get_host_cpu_features:
+ * @ahcf: ARMHostCPUClass to fill in
+ *
+ * Probe the capabilities of the host kernel's preferred CPU and fill
+ * in the ARMHostCPUClass struct accordingly.
+ *
+ * Returns true on success and false otherwise.
+ */
+bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
+
+/**
+ * kvm_arm_sve_get_vls:
+ * @cs: CPUState
+ * @map: bitmap to fill in
+ *
+ * Get all the SVE vector lengths supported by the KVM host, setting
+ * the bits corresponding to their length in quadwords minus one
+ * (vq - 1) in @map up to ARM_MAX_VQ.
+ */
+void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map);
+
+/**
+ * kvm_arm_set_cpu_features_from_host:
+ * @cpu: ARMCPU to set the features for
+ *
+ * Set up the ARMCPU struct fields up to match the information probed
+ * from the host CPU.
+ */
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
+
+/**
+ * kvm_arm_add_vcpu_properties:
+ * @obj: The CPU object to add the properties to
+ *
+ * Add all KVM specific CPU properties to the CPU object. These
+ * are the CPU properties with "kvm-" prefixed names.
+ */
+void kvm_arm_add_vcpu_properties(Object *obj);
+
+/**
+ * kvm_arm_steal_time_finalize:
+ * @cpu: ARMCPU for which to finalize kvm-steal-time
+ * @errp: Pointer to Error* for error propagation
+ *
+ * Validate the kvm-steal-time property selection and set its default
+ * based on KVM support and guest configuration.
+ */
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp);
+
+/**
+ * kvm_arm_steal_time_supported:
+ *
+ * Returns: true if KVM can enable steal time reporting
+ * and false otherwise.
+ */
+bool kvm_arm_steal_time_supported(void);
+
+/**
+ * kvm_arm_aarch32_supported:
+ *
+ * Returns: true if KVM can enable AArch32 mode
+ * and false otherwise.
+ */
+bool kvm_arm_aarch32_supported(void);
+
+/**
+ * kvm_arm_pmu_supported:
+ *
+ * Returns: true if KVM can enable the PMU
+ * and false otherwise.
+ */
+bool kvm_arm_pmu_supported(void);
+
+/**
+ * kvm_arm_sve_supported:
+ *
+ * Returns true if KVM can enable SVE and false otherwise.
+ */
+bool kvm_arm_sve_supported(void);
+
+/**
+ * kvm_arm_get_max_vm_ipa_size:
+ * @ms: Machine state handle
+ * @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
+ * for legacy KVM.
+ *
+ * Returns the number of bits in the IPA address space supported by KVM
+ */
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa);
+
+/**
+ * kvm_arm_sync_mpstate_to_kvm:
+ * @cpu: ARMCPU
+ *
+ * If supported set the KVM MP_STATE based on QEMU's model.
+ *
+ * Returns 0 on success and -1 on failure.
+ */
+int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu);
+
+/**
+ * kvm_arm_sync_mpstate_to_qemu:
+ * @cpu: ARMCPU
+ *
+ * If supported get the MP_STATE from KVM and store in QEMU's model.
+ *
+ * Returns 0 on success and aborts on failure.
+ */
+int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
+
+/**
+ * kvm_arm_get_virtual_time:
+ * @cs: CPUState
+ *
+ * Gets the VCPU's virtual counter and stores it in the KVM CPU state.
+ */
+void kvm_arm_get_virtual_time(CPUState *cs);
+
+/**
+ * kvm_arm_put_virtual_time:
+ * @cs: CPUState
+ *
+ * Sets the VCPU's virtual counter to the value stored in the KVM CPU state.
+ */
+void kvm_arm_put_virtual_time(CPUState *cs);
+
+void kvm_arm_vm_state_change(void *opaque, bool running, RunState state);
+
+int kvm_arm_vgic_probe(void);
+
+void kvm_arm_pmu_set_irq(CPUState *cs, int irq);
+void kvm_arm_pmu_init(CPUState *cs);
+
+/**
+ * kvm_arm_pvtime_init:
+ * @cs: CPUState
+ * @ipa: Per-vcpu guest physical base address of the pvtime structures
+ *
+ * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa.
+ */
+void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
+
+int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
+
+#else
+
+/*
+ * It's safe to call these functions without KVM support.
+ * They should either do nothing or return "not supported".
+ */
+static inline bool kvm_arm_aarch32_supported(void)
+{
+ return false;
+}
+
+static inline bool kvm_arm_pmu_supported(void)
+{
+ return false;
+}
+
+static inline bool kvm_arm_sve_supported(void)
+{
+ return false;
+}
+
+static inline bool kvm_arm_steal_time_supported(void)
+{
+ return false;
+}
+
+/*
+ * These functions should never actually be called without KVM support.
+ */
+static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvm_arm_add_vcpu_properties(Object *obj)
+{
+ g_assert_not_reached();
+}
+
+static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
+{
+ g_assert_not_reached();
+}
+
+static inline int kvm_arm_vgic_probe(void)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvm_arm_pmu_init(CPUState *cs)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
+{
+ g_assert_not_reached();
+}
+
+#endif
+
+static inline const char *gic_class_name(void)
+{
+ return kvm_irqchip_in_kernel() ? "kvm-arm-gic" : "arm_gic";
+}
+
+/**
+ * gicv3_class_name
+ *
+ * Return name of GICv3 class to use depending on whether KVM acceleration is
+ * in use. May throw an error if the chosen implementation is not available.
+ *
+ * Returns: class name to use
+ */
+static inline const char *gicv3_class_name(void)
+{
+ if (kvm_irqchip_in_kernel()) {
+ return "kvm-arm-gicv3";
+ } else {
+ if (kvm_enabled()) {
+ error_report("Userspace GICv3 is not supported with KVM");
+ exit(1);
+ }
+ return "arm-gicv3";
+ }
+}
+
+/**
+ * kvm_arm_handle_debug:
+ * @cs: CPUState
+ * @debug_exit: debug part of the KVM exit structure
+ *
+ * Returns: TRUE if the debug exception was handled.
+ */
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit);
+
+/**
+ * kvm_arm_hw_debug_active:
+ * @cs: CPU State
+ *
+ * Return: TRUE if any hardware breakpoints in use.
+ */
+bool kvm_arm_hw_debug_active(CPUState *cs);
+
+/**
+ * kvm_arm_copy_hw_debug_data:
+ * @ptr: kvm_guest_debug_arch structure
+ *
+ * Copy the architecture specific debug registers into the
+ * kvm_guest_debug ioctl structure.
+ */
+struct kvm_guest_debug_arch;
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);
+
+/**
+ * kvm_arm_verify_ext_dabt_pending:
+ * @cs: CPUState
+ *
+ * Verify the fault status code wrt the Ext DABT injection
+ *
+ * Returns: true if the fault status code is as expected, false otherwise
+ */
+bool kvm_arm_verify_ext_dabt_pending(CPUState *cs);
+
+/**
+ * its_class_name:
+ *
+ * Return the ITS class name to use depending on whether KVM acceleration
+ * and KVM CAP_SIGNAL_MSI are supported
+ *
+ * Returns: class name to use or NULL
+ */
+static inline const char *its_class_name(void)
+{
+ if (kvm_irqchip_in_kernel()) {
+ /* KVM implementation requires this capability */
+ return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL;
+ } else {
+ /* Software emulation based model */
+ return "arm-gicv3-its";
+ }
+}
+
+#endif
diff --git a/target/arm/m-nocp.decode b/target/arm/m-nocp.decode
new file mode 100644
index 000000000..b65c801c9
--- /dev/null
+++ b/target/arm/m-nocp.decode
@@ -0,0 +1,72 @@
+# M-profile UserFault.NOCP exception handling
+#
+# Copyright (c) 2020 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+# For M-profile, the architecture specifies that NOCP UsageFaults
+# should take precedence over UNDEF faults over the whole wide
+# range of coprocessor-space encodings, with the exception of
+# VLLDM and VLSTM. (Compare v8.1M IsCPInstruction() pseudocode and
+# v8M Arm ARM rule R_QLGM.) This isn't mandatory for v8.0M but we choose
+# to behave the same as v8.1M.
+# This decode is handled before any others (and in particular before
+# decoding FP instructions which are in the coprocessor space).
+# If the coprocessor is not present or disabled then we will generate
+# the NOCP exception; otherwise we let the insn through to the main decode.
+
+%vd_dp 22:1 12:4
+%vd_sp 12:4 22:1
+
+&nocp cp
+
+# M-profile VLDR/VSTR to sysreg
+%vldr_sysreg 22:1 13:3
+%imm7_0x4 0:7 !function=times_4
+
+&vldr_sysreg rn reg imm a w p
+@vldr_sysreg .... ... . a:1 . . . rn:4 ... . ... .. ....... \
+ reg=%vldr_sysreg imm=%imm7_0x4 &vldr_sysreg
+
+{
+ # Special cases which do not take an early NOCP: VLLDM and VLSTM
+ VLLDM_VLSTM 1110 1100 001 l:1 rn:4 0000 1010 op:1 000 0000
+ # VSCCLRM (new in v8.1M) is similar:
+ VSCCLRM 1110 1100 1.01 1111 .... 1011 imm:7 0 vd=%vd_dp size=3
+ VSCCLRM 1110 1100 1.01 1111 .... 1010 imm:8 vd=%vd_sp size=2
+
+ # FP system register accesses: these are a special case because accesses
+ # to FPCXT_NS succeed even if the FPU is disabled. We therefore need
+ # to handle them before the big NOCP blocks. Note that within these
+ # insns NOCP still has higher priority than UNDEFs; this is implemented
+ # by their returning 'false' for UNDEF so as to fall through into the
+ # NOCP check (in contrast to VLLDM etc, which call unallocated_encoding()
+ # for the UNDEFs there that must take precedence over NOCP.)
+
+ VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
+
+ # P=0 W=0 is SEE "Related encodings", so split into two patterns
+ VLDR_sysreg ---- 110 1 . . w:1 1 .... ... 0 111 11 ....... @vldr_sysreg p=1
+ VLDR_sysreg ---- 110 0 . . 1 1 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
+ VSTR_sysreg ---- 110 1 . . w:1 0 .... ... 0 111 11 ....... @vldr_sysreg p=1
+ VSTR_sysreg ---- 110 0 . . 1 0 .... ... 0 111 11 ....... @vldr_sysreg p=0 w=1
+
+ NOCP 111- 1110 ---- ---- ---- cp:4 ---- ---- &nocp
+ NOCP 111- 110- ---- ---- ---- cp:4 ---- ---- &nocp
+ # From v8.1M onwards this range will also NOCP:
+ NOCP_8_1 111- 1111 ---- ---- ---- ---- ---- ---- &nocp cp=10
+}
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
new file mode 100644
index 000000000..2c9922dc2
--- /dev/null
+++ b/target/arm/m_helper.c
@@ -0,0 +1,2901 @@
+/*
+ * ARM generic helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "target/arm/idau.h"
+#include "trace.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "qemu/bitops.h"
+#include "qemu/crc32c.h"
+#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
+#include <zlib.h> /* For crc32 */
+#include "semihosting/semihost.h"
+#include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
+#include "qemu/range.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qemu/guest-random.h"
+#ifdef CONFIG_TCG
+#include "arm_ldst.h"
+#include "exec/cpu_ldst.h"
+#include "semihosting/common-semi.h"
+#endif
+
+static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
+ uint32_t reg, uint32_t val)
+{
+ /* Only APSR is actually writable */
+ if (!(reg & 4)) {
+ uint32_t apsrmask = 0;
+
+ if (mask & 8) {
+ apsrmask |= XPSR_NZCV | XPSR_Q;
+ }
+ if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
+ apsrmask |= XPSR_GE;
+ }
+ xpsr_write(env, val, apsrmask);
+ }
+}
+
+static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
+{
+ uint32_t mask = 0;
+
+ if ((reg & 1) && el) {
+ mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
+ }
+ if (!(reg & 4)) {
+ mask |= XPSR_NZCV | XPSR_Q; /* APSR */
+ if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
+ mask |= XPSR_GE;
+ }
+ }
+ /* EPSR reads as zero */
+ return xpsr_read(env) & mask;
+}
+
+static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
+{
+ uint32_t value = env->v7m.control[secure];
+
+ if (!secure) {
+ /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
+ value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
+ }
+ return value;
+}
+
+#ifdef CONFIG_USER_ONLY
+
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
+{
+ uint32_t mask = extract32(maskreg, 8, 4);
+ uint32_t reg = extract32(maskreg, 0, 8);
+
+ switch (reg) {
+ case 0 ... 7: /* xPSR sub-fields */
+ v7m_msr_xpsr(env, mask, reg, val);
+ break;
+ case 20: /* CONTROL */
+ /* There are no sub-fields that are actually writable from EL0. */
+ break;
+ default:
+ /* Unprivileged writes to other registers are ignored */
+ break;
+ }
+}
+
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
+{
+ switch (reg) {
+ case 0 ... 7: /* xPSR sub-fields */
+ return v7m_mrs_xpsr(env, reg, 0);
+ case 20: /* CONTROL */
+ return v7m_mrs_control(env, 0);
+ default:
+ /* Unprivileged reads others as zero. */
+ return 0;
+ }
+}
+
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /*
+ * The TT instructions can be used by unprivileged code, but in
+ * user-only emulation we don't have the MPU.
+ * Luckily since we know we are NonSecure unprivileged (and that in
+ * turn means that the A flag wasn't specified), all the bits in the
+ * register must be zero:
+ * IREGION: 0 because IRVALID is 0
+ * IRVALID: 0 because NS
+ * S: 0 because NS
+ * NSRW: 0 because NS
+ * NSR: 0 because NS
+ * RW: 0 because unpriv and A flag not set
+ * R: 0 because unpriv and A flag not set
+ * SRVALID: 0 because NS
+ * MRVALID: 0 because unpriv and A flag not set
+ * SREGION: 0 becaus SRVALID is 0
+ * MREGION: 0 because MRVALID is 0
+ */
+ return 0;
+}
+
+#else
+
+/*
+ * What kind of stack write are we doing? This affects how exceptions
+ * generated during the stacking are treated.
+ */
+typedef enum StackingMode {
+ STACK_NORMAL,
+ STACK_IGNFAULTS,
+ STACK_LAZYFP,
+} StackingMode;
+
+static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
+ ARMMMUIdx mmu_idx, StackingMode mode)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+ ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
+ int exc;
+ bool exc_secure;
+
+ if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
+ /* MPU/SAU lookup failed */
+ if (fi.type == ARMFault_QEMU_SFault) {
+ if (mode == STACK_LAZYFP) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault with SFSR.LSPERR "
+ "during lazy stacking\n");
+ env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault with SFSR.AUVIOL "
+ "during stacking\n");
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
+ }
+ env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
+ env->v7m.sfar = addr;
+ exc = ARMV7M_EXCP_SECURE;
+ exc_secure = false;
+ } else {
+ if (mode == STACK_LAZYFP) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...MemManageFault with CFSR.MLSPERR\n");
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...MemManageFault with CFSR.MSTKERR\n");
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
+ }
+ exc = ARMV7M_EXCP_MEM;
+ exc_secure = secure;
+ }
+ goto pend_fault;
+ }
+ address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ /* BusFault trying to write the data */
+ if (mode == STACK_LAZYFP) {
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
+ }
+ exc = ARMV7M_EXCP_BUS;
+ exc_secure = false;
+ goto pend_fault;
+ }
+ return true;
+
+pend_fault:
+ /*
+ * By pending the exception at this point we are making
+ * the IMPDEF choice "overridden exceptions pended" (see the
+ * MergeExcInfo() pseudocode). The other choice would be to not
+ * pend them now and then make a choice about which to throw away
+ * later if we have two derived exceptions.
+ * The only case when we must not pend the exception but instead
+ * throw it away is if we are doing the push of the callee registers
+ * and we've already generated a derived exception (this is indicated
+ * by the caller passing STACK_IGNFAULTS). Even in this case we will
+ * still update the fault status registers.
+ */
+ switch (mode) {
+ case STACK_NORMAL:
+ armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
+ break;
+ case STACK_LAZYFP:
+ armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
+ break;
+ case STACK_IGNFAULTS:
+ break;
+ }
+ return false;
+}
+
+static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
+ ARMMMUIdx mmu_idx)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+ ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
+ bool secure = mmu_idx & ARM_MMU_IDX_M_S;
+ int exc;
+ bool exc_secure;
+ uint32_t value;
+
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
+ /* MPU/SAU lookup failed */
+ if (fi.type == ARMFault_QEMU_SFault) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault with SFSR.AUVIOL during unstack\n");
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
+ env->v7m.sfar = addr;
+ exc = ARMV7M_EXCP_SECURE;
+ exc_secure = false;
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...MemManageFault with CFSR.MUNSTKERR\n");
+ env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
+ exc = ARMV7M_EXCP_MEM;
+ exc_secure = secure;
+ }
+ goto pend_fault;
+ }
+
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ /* BusFault trying to read the data */
+ qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
+ exc = ARMV7M_EXCP_BUS;
+ exc_secure = false;
+ goto pend_fault;
+ }
+
+ *dest = value;
+ return true;
+
+pend_fault:
+ /*
+ * By pending the exception at this point we are making
+ * the IMPDEF choice "overridden exceptions pended" (see the
+ * MergeExcInfo() pseudocode). The other choice would be to not
+ * pend them now and then make a choice about which to throw away
+ * later if we have two derived exceptions.
+ */
+ armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
+ return false;
+}
+
+void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
+{
+ /*
+ * Preserve FP state (because LSPACT was set and we are about
+ * to execute an FP instruction). This corresponds to the
+ * PreserveFPState() pseudocode.
+ * We may throw an exception if the stacking fails.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
+ bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
+ bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
+ uint32_t fpcar = env->v7m.fpcar[is_secure];
+ bool stacked_ok = true;
+ bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
+ bool take_exception;
+
+ /* Take the iothread lock as we are going to touch the NVIC */
+ qemu_mutex_lock_iothread();
+
+ /* Check the background context had access to the FPU */
+ if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
+ env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
+ stacked_ok = false;
+ } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
+ armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
+ stacked_ok = false;
+ }
+
+ if (!splimviol && stacked_ok) {
+ /* We only stack if the stack limit wasn't violated */
+ int i;
+ ARMMMUIdx mmu_idx;
+
+ mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
+ uint32_t faddr = fpcar + 4 * i;
+ uint32_t slo = extract64(dn, 0, 32);
+ uint32_t shi = extract64(dn, 32, 32);
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR/VPR */
+ }
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
+ v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
+ }
+
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, fpcar + 0x40,
+ vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, fpcar + 0x44,
+ env->v7m.vpr, mmu_idx, STACK_LAZYFP);
+ }
+ }
+
+ /*
+ * We definitely pended an exception, but it's possible that it
+ * might not be able to be taken now. If its priority permits us
+ * to take it now, then we must not update the LSPACT or FP regs,
+ * but instead jump out to take the exception immediately.
+ * If it's just pending and won't be taken until the current
+ * handler exits, then we do update LSPACT and the FP regs.
+ */
+ take_exception = !stacked_ok &&
+ armv7m_nvic_can_take_pending_exception(env->nvic);
+
+ qemu_mutex_unlock_iothread();
+
+ if (take_exception) {
+ raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
+ }
+
+ env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
+
+ if (ts) {
+ /* Clear s0 to s31 and the FPSCR and VPR */
+ int i;
+
+ for (i = 0; i < 32; i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.vpr = 0;
+ }
+ }
+ /*
+ * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
+ * unchanged.
+ */
+}
+
+/*
+ * Write to v7M CONTROL.SPSEL bit for the specified security bank.
+ * This may change the current stack pointer between Main and Process
+ * stack pointers if it is done for the CONTROL register for the current
+ * security state.
+ */
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
+ bool new_spsel,
+ bool secstate)
+{
+ bool old_is_psp = v7m_using_psp(env);
+
+ env->v7m.control[secstate] =
+ deposit32(env->v7m.control[secstate],
+ R_V7M_CONTROL_SPSEL_SHIFT,
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
+
+ if (secstate == env->v7m.secure) {
+ bool new_is_psp = v7m_using_psp(env);
+ uint32_t tmp;
+
+ if (old_is_psp != new_is_psp) {
+ tmp = env->v7m.other_sp;
+ env->v7m.other_sp = env->regs[13];
+ env->regs[13] = tmp;
+ }
+ }
+}
+
+/*
+ * Write to v7M CONTROL.SPSEL bit. This may change the current
+ * stack pointer between Main and Process stack pointers.
+ */
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
+{
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
+}
+
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
+{
+ /*
+ * Write a new value to v7m.exception, thus transitioning into or out
+ * of Handler mode; this may result in a change of active stack pointer.
+ */
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
+ uint32_t tmp;
+
+ env->v7m.exception = new_exc;
+
+ new_is_psp = v7m_using_psp(env);
+
+ if (old_is_psp != new_is_psp) {
+ tmp = env->v7m.other_sp;
+ env->v7m.other_sp = env->regs[13];
+ env->regs[13] = tmp;
+ }
+}
+
+/* Switch M profile security state between NS and S */
+static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
+{
+ uint32_t new_ss_msp, new_ss_psp;
+
+ if (env->v7m.secure == new_secstate) {
+ return;
+ }
+
+ /*
+ * All the banked state is accessed by looking at env->v7m.secure
+ * except for the stack pointer; rearrange the SP appropriately.
+ */
+ new_ss_msp = env->v7m.other_ss_msp;
+ new_ss_psp = env->v7m.other_ss_psp;
+
+ if (v7m_using_psp(env)) {
+ env->v7m.other_ss_psp = env->regs[13];
+ env->v7m.other_ss_msp = env->v7m.other_sp;
+ } else {
+ env->v7m.other_ss_msp = env->regs[13];
+ env->v7m.other_ss_psp = env->v7m.other_sp;
+ }
+
+ env->v7m.secure = new_secstate;
+
+ if (v7m_using_psp(env)) {
+ env->regs[13] = new_ss_psp;
+ env->v7m.other_sp = new_ss_msp;
+ } else {
+ env->regs[13] = new_ss_msp;
+ env->v7m.other_sp = new_ss_psp;
+ }
+}
+
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
+{
+ /*
+ * Handle v7M BXNS:
+ * - if the return value is a magic value, do exception return (like BX)
+ * - otherwise bit 0 of the return value is the target security state
+ */
+ uint32_t min_magic;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* Covers FNC_RETURN and EXC_RETURN magic */
+ min_magic = FNC_RETURN_MIN_MAGIC;
+ } else {
+ /* EXC_RETURN magic only */
+ min_magic = EXC_RETURN_MIN_MAGIC;
+ }
+
+ if (dest >= min_magic) {
+ /*
+ * This is an exception return magic value; put it where
+ * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
+ * Note that if we ever add gen_ss_advance() singlestep support to
+ * M profile this should count as an "instruction execution complete"
+ * event (compare gen_bx_excret_final_code()).
+ */
+ env->regs[15] = dest & ~1;
+ env->thumb = dest & 1;
+ HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
+ /* notreached */
+ }
+
+ /* translate.c should have made BXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ if (!(dest & 1)) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ }
+ switch_v7m_security_state(env, dest & 1);
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+ arm_rebuild_hflags(env);
+}
+
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /*
+ * Handle v7M BLXNS:
+ * - bit 0 of the destination address is the target security state
+ */
+
+ /* At this point regs[15] is the address just after the BLXNS */
+ uint32_t nextinst = env->regs[15] | 1;
+ uint32_t sp = env->regs[13] - 8;
+ uint32_t saved_psr;
+
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ if (dest & 1) {
+ /*
+ * Target is Secure, so this is just a normal BLX,
+ * except that the low bit doesn't indicate Thumb/not.
+ */
+ env->regs[14] = nextinst;
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+ return;
+ }
+
+ /* Target is non-secure: first push a stack frame */
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
+ }
+
+ if (sp < v7m_sp_limit(env)) {
+ raise_exception(env, EXCP_STKOF, 0, 1);
+ }
+
+ saved_psr = env->v7m.exception;
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
+ saved_psr |= XPSR_SFPA;
+ }
+
+ /* Note that these stores can throw exceptions on MPU faults */
+ cpu_stl_data_ra(env, sp, nextinst, GETPC());
+ cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
+
+ env->regs[13] = sp;
+ env->regs[14] = 0xfeffffff;
+ if (arm_v7m_is_handler_mode(env)) {
+ /*
+ * Write a dummy value to IPSR, to avoid leaking the current secure
+ * exception number to non-secure code. This is guaranteed not
+ * to cause write_v7m_exception() to actually change stacks.
+ */
+ write_v7m_exception(env, 1);
+ }
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ switch_v7m_security_state(env, 0);
+ env->thumb = 1;
+ env->regs[15] = dest;
+ arm_rebuild_hflags(env);
+}
+
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
+ bool spsel)
+{
+ /*
+ * Return a pointer to the location where we currently store the
+ * stack pointer for the requested security state and thread mode.
+ * This pointer will become invalid if the CPU state is updated
+ * such that the stack pointers are switched around (eg changing
+ * the SPSEL control bit).
+ * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
+ * Unlike that pseudocode, we require the caller to pass us in the
+ * SPSEL control bit value; this is because we also use this
+ * function in handling of pushing of the callee-saves registers
+ * part of the v8M stack frame (pseudocode PushCalleeStack()),
+ * and in the tailchain codepath the SPSEL bit comes from the exception
+ * return magic LR value from the previous exception. The pseudocode
+ * opencodes the stack-selection in PushCalleeStack(), but we prefer
+ * to make this utility function generic enough to do the job.
+ */
+ bool want_psp = threadmode && spsel;
+
+ if (secure == env->v7m.secure) {
+ if (want_psp == v7m_using_psp(env)) {
+ return &env->regs[13];
+ } else {
+ return &env->v7m.other_sp;
+ }
+ } else {
+ if (want_psp) {
+ return &env->v7m.other_ss_psp;
+ } else {
+ return &env->v7m.other_ss_msp;
+ }
+ }
+}
+
+static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
+ uint32_t *pvec)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ MemTxResult result;
+ uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
+ uint32_t vector_entry;
+ MemTxAttrs attrs = {};
+ ARMMMUIdx mmu_idx;
+ bool exc_secure;
+
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
+
+ /*
+ * We don't do a get_phys_addr() here because the rules for vector
+ * loads are special: they always use the default memory map, and
+ * the default memory map permits reads from all addresses.
+ * Since there's no easy way to pass through to pmsav8_mpu_lookup()
+ * that we want this special case which would always say "yes",
+ * we just do the SAU lookup here followed by a direct physical load.
+ */
+ attrs.secure = targets_secure;
+ attrs.user = false;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ V8M_SAttributes sattrs = {};
+
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
+ if (sattrs.ns) {
+ attrs.secure = false;
+ } else if (!targets_secure) {
+ /*
+ * NS access to S memory: the underlying exception which we escalate
+ * to HardFault is SecureFault, which always targets Secure.
+ */
+ exc_secure = true;
+ goto load_fail;
+ }
+ }
+
+ vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
+ attrs, &result);
+ if (result != MEMTX_OK) {
+ /*
+ * Underlying exception is BusFault: its target security state
+ * depends on BFHFNMINS.
+ */
+ exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
+ goto load_fail;
+ }
+ *pvec = vector_entry;
+ return true;
+
+load_fail:
+ /*
+ * All vector table fetch fails are reported as HardFault, with
+ * HFSR.VECTTBL and .FORCED set. (FORCED is set because
+ * technically the underlying exception is a SecureFault or BusFault
+ * that is escalated to HardFault.) This is a terminal exception,
+ * so we will either take the HardFault immediately or else enter
+ * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
+ * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
+ * secure); otherwise it targets the same security state as the
+ * underlying exception.
+ * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
+ */
+ if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
+ exc_secure = true;
+ }
+ env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
+ if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
+ env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
+ }
+ armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
+ return false;
+}
+
+static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
+{
+ /*
+ * Return the integrity signature value for the callee-saves
+ * stack frame section. @lr is the exception return payload/LR value
+ * whose FType bit forms bit 0 of the signature if FP is present.
+ */
+ uint32_t sig = 0xfefa125a;
+
+ if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
+ || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
+ sig |= 1;
+ }
+ return sig;
+}
+
+static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
+ bool ignore_faults)
+{
+ /*
+ * For v8M, push the callee-saves register part of the stack frame.
+ * Compare the v8M pseudocode PushCalleeStack().
+ * In the tailchaining case this may not be the current stack.
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+ ARMMMUIdx mmu_idx;
+ bool stacked_ok;
+ uint32_t limit;
+ bool want_psp;
+ uint32_t sig;
+ StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
+
+ if (dotailchain) {
+ bool mode = lr & R_V7M_EXCRET_MODE_MASK;
+ bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
+ !mode;
+
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
+ frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
+ lr & R_V7M_EXCRET_SPSEL_MASK);
+ want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
+ if (want_psp) {
+ limit = env->v7m.psplim[M_REG_S];
+ } else {
+ limit = env->v7m.msplim[M_REG_S];
+ }
+ } else {
+ mmu_idx = arm_mmu_idx(env);
+ frame_sp_p = &env->regs[13];
+ limit = v7m_sp_limit(env);
+ }
+
+ frameptr = *frame_sp_p - 0x28;
+ if (frameptr < limit) {
+ /*
+ * Stack limit failure: set SP to the limit value, and generate
+ * STKOF UsageFault. Stack pushes below the limit must not be
+ * performed. It is IMPDEF whether pushes above the limit are
+ * performed; we choose not to.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...STKOF during callee-saves register stacking\n");
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ *frame_sp_p = limit;
+ return true;
+ }
+
+ /*
+ * Write as much of the stack frame as we can. A write failure may
+ * cause us to pend a derived exception.
+ */
+ sig = v7m_integrity_sig(env, lr);
+ stacked_ok =
+ v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
+ v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
+
+ /* Update SP regardless of whether any of the stack accesses failed. */
+ *frame_sp_p = frameptr;
+
+ return !stacked_ok;
+}
+
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
+ bool ignore_stackfaults)
+{
+ /*
+ * Do the "take the exception" parts of exception entry,
+ * but not the pushing of state to the stack. This is
+ * similar to the pseudocode ExceptionTaken() function.
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t addr;
+ bool targets_secure;
+ int exc;
+ bool push_failed = false;
+
+ armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
+ qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
+ targets_secure ? "secure" : "nonsecure", exc);
+
+ if (dotailchain) {
+ /* Sanitize LR FType and PREFIX bits */
+ if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
+ }
+ lr = deposit32(lr, 24, 8, 0xff);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (lr & R_V7M_EXCRET_S_MASK)) {
+ /*
+ * The background code (the owner of the registers in the
+ * exception frame) is Secure. This means it may either already
+ * have or now needs to push callee-saves registers.
+ */
+ if (targets_secure) {
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
+ /*
+ * We took an exception from Secure to NonSecure
+ * (which means the callee-saved registers got stacked)
+ * and are now tailchaining to a Secure exception.
+ * Clear DCRS so eventual return from this Secure
+ * exception unstacks the callee-saved registers.
+ */
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
+ }
+ } else {
+ /*
+ * We're going to a non-secure exception; push the
+ * callee-saves registers to the stack now, if they're
+ * not already saved.
+ */
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
+ !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
+ push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
+ ignore_stackfaults);
+ }
+ lr |= R_V7M_EXCRET_DCRS_MASK;
+ }
+ }
+
+ lr &= ~R_V7M_EXCRET_ES_MASK;
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ lr |= R_V7M_EXCRET_ES_MASK;
+ }
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
+
+ /*
+ * Clear registers if necessary to prevent non-secure exception
+ * code being able to see register values from secure code.
+ * Where register values become architecturally UNKNOWN we leave
+ * them with their previous values. v8.1M is tighter than v8.0M
+ * here and always zeroes the caller-saved registers regardless
+ * of the security state the exception is targeting.
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
+ /*
+ * Always clear the caller-saved registers (they have been
+ * pushed to the stack earlier in v7m_push_stack()).
+ * Clear callee-saved registers if the background code is
+ * Secure (in which case these regs were saved in
+ * v7m_push_callee_stack()).
+ */
+ int i;
+ /*
+ * r4..r11 are callee-saves, zero only if background
+ * state was Secure (EXCRET.S == 1) and exception
+ * targets Non-secure state
+ */
+ bool zero_callee_saves = !targets_secure &&
+ (lr & R_V7M_EXCRET_S_MASK);
+
+ for (i = 0; i < 13; i++) {
+ if (i < 4 || i > 11 || zero_callee_saves) {
+ env->regs[i] = 0;
+ }
+ }
+ /* Clear EAPSR */
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
+ }
+ }
+ }
+
+ if (push_failed && !ignore_stackfaults) {
+ /*
+ * Derived exception on callee-saves register stacking:
+ * we might now want to take a different exception which
+ * targets a different security state, so try again from the top.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...derived exception on callee-saves register stacking");
+ v7m_exception_taken(cpu, lr, true, true);
+ return;
+ }
+
+ if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
+ /* Vector load failed: derived exception */
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
+ v7m_exception_taken(cpu, lr, true, true);
+ return;
+ }
+
+ /*
+ * Now we've done everything that might cause a derived exception
+ * we can go ahead and activate whichever exception we're going to
+ * take (which might now be the derived exception).
+ */
+ armv7m_nvic_acknowledge_irq(env->nvic);
+
+ /* Switch to target security state -- must do this before writing SPSEL */
+ switch_v7m_security_state(env, targets_secure);
+ write_v7m_control_spsel(env, 0);
+ arm_clear_exclusive(env);
+ /* Clear SFPA and FPCA (has no effect if no FPU) */
+ env->v7m.control[M_REG_S] &=
+ ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
+ /* Clear IT bits */
+ env->condexec_bits = 0;
+ env->regs[14] = lr;
+ env->regs[15] = addr & 0xfffffffe;
+ env->thumb = addr & 1;
+ arm_rebuild_hflags(env);
+}
+
+static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
+ bool apply_splim)
+{
+ /*
+ * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
+ * that we will need later in order to do lazy FP reg stacking.
+ */
+ bool is_secure = env->v7m.secure;
+ void *nvic = env->nvic;
+ /*
+ * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
+ * are banked and we want to update the bit in the bank for the
+ * current security state; and in one case we want to specifically
+ * update the NS banked version of a bit even if we are secure.
+ */
+ uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
+ uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
+ uint32_t *fpccr = &env->v7m.fpccr[is_secure];
+ bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
+
+ env->v7m.fpcar[is_secure] = frameptr & ~0x7;
+
+ if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
+ bool splimviol;
+ uint32_t splim = v7m_sp_limit(env);
+ bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
+ (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
+
+ splimviol = !ign && frameptr < splim;
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
+ }
+
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
+
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
+
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
+
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
+ !arm_v7m_is_handler_mode(env));
+
+ hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
+
+ bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
+
+ mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
+ *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
+
+ ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
+ *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
+
+ monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
+
+ sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
+ *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
+ }
+}
+
+void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
+{
+ /* fptr is the value of Rn, the frame pointer we store the FP regs to */
+ ARMCPU *cpu = env_archcpu(env);
+ bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
+ uintptr_t ra = GETPC();
+
+ assert(env->v7m.secure);
+
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
+ return;
+ }
+
+ /* Check access to the coprocessor is permitted */
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
+ }
+
+ if (lspact) {
+ /* LSPACT should not be active when there is active FP state */
+ raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
+ }
+
+ if (fptr & 7) {
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
+ }
+
+ /*
+ * Note that we do not use v7m_stack_write() here, because the
+ * accesses should not set the FSR bits for stacking errors if they
+ * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
+ * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
+ * and longjmp out.
+ */
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
+ int i;
+
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
+ uint32_t faddr = fptr + 4 * i;
+ uint32_t slo = extract64(dn, 0, 32);
+ uint32_t shi = extract64(dn, 32, 32);
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR */
+ }
+ cpu_stl_data_ra(env, faddr, slo, ra);
+ cpu_stl_data_ra(env, faddr + 4, shi, ra);
+ }
+ cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
+ }
+
+ /*
+ * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
+ * leave them unchanged, matching our choice in v7m_preserve_fp_state.
+ */
+ if (ts) {
+ for (i = 0; i < 32; i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.vpr = 0;
+ }
+ }
+ } else {
+ v7m_update_fpccr(env, fptr, false);
+ }
+
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
+}
+
+void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uintptr_t ra = GETPC();
+
+ /* fptr is the value of Rn, the frame pointer we load the FP regs from */
+ assert(env->v7m.secure);
+
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
+ return;
+ }
+
+ /* Check access to the coprocessor is permitted */
+ if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
+ raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
+ }
+
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
+ /* State in FP is still valid */
+ env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
+ } else {
+ bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
+ int i;
+ uint32_t fpscr;
+
+ if (fptr & 7) {
+ raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
+ }
+
+ for (i = 0; i < (ts ? 32 : 16); i += 2) {
+ uint32_t slo, shi;
+ uint64_t dn;
+ uint32_t faddr = fptr + 4 * i;
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR and VPR */
+ }
+
+ slo = cpu_ldl_data_ra(env, faddr, ra);
+ shi = cpu_ldl_data_ra(env, faddr + 4, ra);
+
+ dn = (uint64_t) shi << 32 | slo;
+ *aa32_vfp_dreg(env, i / 2) = dn;
+ }
+ fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
+ vfp_set_fpscr(env, fpscr);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
+ }
+ }
+
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
+}
+
+static bool v7m_push_stack(ARMCPU *cpu)
+{
+ /*
+ * Do the "set up stack frame" part of exception entry,
+ * similar to pseudocode PushStack().
+ * Return true if we generate a derived exception (and so
+ * should ignore further stack faults trying to process
+ * that derived exception.)
+ */
+ bool stacked_ok = true, limitviol = false;
+ CPUARMState *env = &cpu->env;
+ uint32_t xpsr = xpsr_read(env);
+ uint32_t frameptr = env->regs[13];
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ uint32_t framesize;
+ bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
+
+ if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
+ (env->v7m.secure || nsacr_cp10)) {
+ if (env->v7m.secure &&
+ env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
+ framesize = 0xa8;
+ } else {
+ framesize = 0x68;
+ }
+ } else {
+ framesize = 0x20;
+ }
+
+ /* Align stack pointer if the guest wants that */
+ if ((frameptr & 4) &&
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
+ frameptr -= 4;
+ xpsr |= XPSR_SPREALIGN;
+ }
+
+ xpsr &= ~XPSR_SFPA;
+ if (env->v7m.secure &&
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
+ xpsr |= XPSR_SFPA;
+ }
+
+ frameptr -= framesize;
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ uint32_t limit = v7m_sp_limit(env);
+
+ if (frameptr < limit) {
+ /*
+ * Stack limit failure: set SP to the limit value, and generate
+ * STKOF UsageFault. Stack pushes below the limit must not be
+ * performed. It is IMPDEF whether pushes above the limit are
+ * performed; we choose not to.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...STKOF during stacking\n");
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ env->regs[13] = limit;
+ /*
+ * We won't try to perform any further memory accesses but
+ * we must continue through the following code to check for
+ * permission faults during FPU state preservation, and we
+ * must update FPCCR if lazy stacking is enabled.
+ */
+ limitviol = true;
+ stacked_ok = false;
+ }
+ }
+
+ /*
+ * Write as much of the stack frame as we can. If we fail a stack
+ * write this will result in a derived exception being pended
+ * (which may be taken in preference to the one we started with
+ * if it has higher priority).
+ */
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 4, env->regs[1],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 8, env->regs[2],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 12, env->regs[3],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 16, env->regs[12],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 20, env->regs[14],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 24, env->regs[15],
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
+
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
+ /* FPU is active, try to save its registers */
+ bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
+
+ if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault because LSPACT and FPCA both set\n");
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ } else if (!env->v7m.secure && !nsacr_cp10) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...Secure UsageFault with CFSR.NOCP because "
+ "NSACR.CP10 prevents stacking FP regs\n");
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
+ } else {
+ if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
+ /* Lazy stacking disabled, save registers now */
+ int i;
+ bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
+ arm_current_el(env) != 0);
+
+ if (stacked_ok && !cpacr_pass) {
+ /*
+ * Take UsageFault if CPACR forbids access. The pseudocode
+ * here does a full CheckCPEnabled() but we know the NSACR
+ * check can never fail as we have already handled that.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...UsageFault with CFSR.NOCP because "
+ "CPACR.CP10 prevents stacking FP regs\n");
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
+ stacked_ok = false;
+ }
+
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
+ uint64_t dn = *aa32_vfp_dreg(env, i / 2);
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
+ uint32_t slo = extract64(dn, 0, 32);
+ uint32_t shi = extract64(dn, 32, 32);
+
+ if (i >= 16) {
+ faddr += 8; /* skip the slot for the FPSCR and VPR */
+ }
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, faddr, slo,
+ mmu_idx, STACK_NORMAL) &&
+ v7m_stack_write(cpu, faddr + 4, shi,
+ mmu_idx, STACK_NORMAL);
+ }
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, frameptr + 0x60,
+ vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ stacked_ok = stacked_ok &&
+ v7m_stack_write(cpu, frameptr + 0x64,
+ env->v7m.vpr, mmu_idx, STACK_NORMAL);
+ }
+ if (cpacr_pass) {
+ for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.vpr = 0;
+ }
+ }
+ } else {
+ /* Lazy stacking enabled, save necessary info to stack later */
+ v7m_update_fpccr(env, frameptr + 0x20, true);
+ }
+ }
+ }
+
+ /*
+ * If we broke a stack limit then SP was already updated earlier;
+ * otherwise we update SP regardless of whether any of the stack
+ * accesses failed or we took some other kind of fault.
+ */
+ if (!limitviol) {
+ env->regs[13] = frameptr;
+ }
+
+ return !stacked_ok;
+}
+
+static void do_v7m_exception_exit(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ uint32_t excret;
+ uint32_t xpsr, xpsr_mask;
+ bool ufault = false;
+ bool sfault = false;
+ bool return_to_sp_process;
+ bool return_to_handler;
+ bool rettobase = false;
+ bool exc_secure = false;
+ bool return_to_secure;
+ bool ftype;
+ bool restore_s16_s31 = false;
+
+ /*
+ * If we're not in Handler mode then jumps to magic exception-exit
+ * addresses don't have magic behaviour. However for the v8M
+ * security extensions the magic secure-function-return has to
+ * work in thread mode too, so to avoid doing an extra check in
+ * the generated code we allow exception-exit magic to also cause the
+ * internal exception and bring us here in thread mode. Correct code
+ * will never try to do this (the following insn fetch will always
+ * fault) so we the overhead of having taken an unnecessary exception
+ * doesn't matter.
+ */
+ if (!arm_v7m_is_handler_mode(env)) {
+ return;
+ }
+
+ /*
+ * In the spec pseudocode ExceptionReturn() is called directly
+ * from BXWritePC() and gets the full target PC value including
+ * bit zero. In QEMU's implementation we treat it as a normal
+ * jump-to-register (which is then caught later on), and so split
+ * the target value up between env->regs[15] and env->thumb in
+ * gen_bx(). Reconstitute it.
+ */
+ excret = env->regs[15];
+ if (env->thumb) {
+ excret |= 1;
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
+ " previous exception %d\n",
+ excret, env->v7m.exception);
+
+ if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
+ excret);
+ }
+
+ ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
+
+ if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
+ "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
+ "if FPU not present\n",
+ excret);
+ ftype = true;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /*
+ * EXC_RETURN.ES validation check (R_SMFL). We must do this before
+ * we pick which FAULTMASK to clear.
+ */
+ if (!env->v7m.secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
+ sfault = 1;
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
+ excret &= ~R_V7M_EXCRET_ES_MASK;
+ }
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
+ }
+
+ if (env->v7m.exception != ARMV7M_EXCP_NMI) {
+ /*
+ * Auto-clear FAULTMASK on return from other than NMI.
+ * If the security extension is implemented then this only
+ * happens if the raw execution priority is >= 0; the
+ * value of the ES bit in the exception return value indicates
+ * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
+ env->v7m.faultmask[exc_secure] = 0;
+ }
+ } else {
+ env->v7m.faultmask[M_REG_NS] = 0;
+ }
+ }
+
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
+ exc_secure)) {
+ case -1:
+ /* attempt to exit an exception that isn't active */
+ ufault = true;
+ break;
+ case 0:
+ /* still an irq active now */
+ break;
+ case 1:
+ /*
+ * We returned to base exception level, no nesting.
+ * (In the pseudocode this is written using "NestedActivation != 1"
+ * where we have 'rettobase == false'.)
+ */
+ rettobase = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (excret & R_V7M_EXCRET_S_MASK);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /*
+ * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
+ * we choose to take the UsageFault.
+ */
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
+ (excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
+ ufault = true;
+ }
+ }
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
+ ufault = true;
+ }
+ } else {
+ /* For v7M we only recognize certain combinations of the low bits */
+ switch (excret & 0xf) {
+ case 1: /* Return to Handler */
+ break;
+ case 13: /* Return to Thread using Process stack */
+ case 9: /* Return to Thread using Main stack */
+ /*
+ * We only need to check NONBASETHRDENA for v7M, because in
+ * v8M this bit does not exist (it is RES1).
+ */
+ if (!rettobase &&
+ !(env->v7m.ccr[env->v7m.secure] &
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ ufault = true;
+ }
+ break;
+ default:
+ ufault = true;
+ }
+ }
+
+ /*
+ * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
+ * Handler mode (and will be until we write the new XPSR.Interrupt
+ * field) this does not switch around the current stack pointer.
+ * We must do this before we do any kind of tailchaining, including
+ * for the derived exceptions on integrity check failures, or we will
+ * give the guest an incorrect EXCRET.SPSEL value on exception entry.
+ */
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+
+ /*
+ * Clear scratch FP values left in caller saved registers; this
+ * must happen before any kind of tail chaining.
+ */
+ if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
+ if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: error during lazy state deactivation\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ } else {
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
+ /* v8.1M adds this NOCP check */
+ bool nsacr_pass = exc_secure ||
+ extract32(env->v7m.nsacr, 10, 1);
+ bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
+ if (!nsacr_pass) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: NSACR prevents clearing FPU registers\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ } else if (!cpacr_pass) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ exc_secure);
+ env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: CPACR prevents clearing FPU registers\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+ }
+ /* Clear s0..s15, FPSCR and VPR */
+ int i;
+
+ for (i = 0; i < 16; i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.vpr = 0;
+ }
+ }
+ }
+
+ if (sfault) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed EXC_RETURN.ES validity check\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ if (ufault) {
+ /*
+ * Bad exception return: instead of popping the exception
+ * stack, directly take a usage fault on the current stack.
+ */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: failed exception return integrity check\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ /*
+ * Tailchaining: if there is currently a pending exception that
+ * is high enough priority to preempt execution at the level we're
+ * about to return to, then just directly take that exception now,
+ * avoiding an unstack-and-then-stack. Note that now we have
+ * deactivated the previous exception by calling armv7m_nvic_complete_irq()
+ * our current execution priority is already the execution priority we are
+ * returning to -- none of the state we would unstack or set based on
+ * the EXCRET value affects it.
+ */
+ if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
+ qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ switch_v7m_security_state(env, return_to_secure);
+
+ {
+ /*
+ * The stack pointer we should be reading the exception frame from
+ * depends on bits in the magic exception return type value (and
+ * for v8M isn't necessarily the stack pointer we will eventually
+ * end up resuming execution with). Get a pointer to the location
+ * in the CPU state struct where the SP we need is currently being
+ * stored; we will use and modify it in place.
+ * We use this limited C variable scope so we don't accidentally
+ * use 'frame_sp_p' after we do something that makes it invalid.
+ */
+ bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
+ return_to_secure,
+ !return_to_handler,
+ spsel);
+ uint32_t frameptr = *frame_sp_p;
+ bool pop_ok = true;
+ ARMMMUIdx mmu_idx;
+ bool return_to_priv = return_to_handler ||
+ !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
+
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
+ return_to_priv);
+
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile exception return with non-8-aligned SP "
+ "for destination state is UNPREDICTABLE\n");
+ }
+
+ /* Do we need to pop callee-saved registers? */
+ if (return_to_secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
+ uint32_t actual_sig;
+
+ pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
+
+ if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
+ /* Take a SecureFault on the current stack */
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed exception return integrity "
+ "signature check\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
+
+ frameptr += 0x28;
+ }
+
+ /* Pop registers */
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
+ v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
+ v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
+
+ if (!pop_ok) {
+ /*
+ * v7m_stack_read() pended a fault, so take it (as a tail
+ * chained exception on the same stack frame)
+ */
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ /*
+ * Returning from an exception with a PC with bit 0 set is defined
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
+ * the lsbit, and there are several RTOSes out there which incorrectly
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
+ * complain about the badly behaved guest.
+ */
+ if (env->regs[15] & 1) {
+ env->regs[15] &= ~1U;
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile return from interrupt with misaligned "
+ "PC is UNPREDICTABLE on v7M\n");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /*
+ * For v8M we have to check whether the xPSR exception field
+ * matches the EXCRET value for return to handler/thread
+ * before we commit to changing the SP and xPSR.
+ */
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
+ if (return_to_handler != will_be_handler) {
+ /*
+ * Take an INVPC UsageFault on the current stack.
+ * By this point we will have switched to the security state
+ * for the background state, so this UsageFault will target
+ * that state.
+ */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: failed exception return integrity "
+ "check\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+ }
+
+ if (!ftype) {
+ /* FP present and we need to handle it */
+ if (!return_to_secure &&
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking SecureFault on existing stackframe: "
+ "Secure LSPACT set but exception return is "
+ "not to secure state\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ restore_s16_s31 = return_to_secure &&
+ (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
+
+ if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ /* State in FPU is still valid, just clear LSPACT */
+ env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
+ } else {
+ int i;
+ uint32_t fpscr;
+ bool cpacr_pass, nsacr_pass;
+
+ cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
+ return_to_priv);
+ nsacr_pass = return_to_secure ||
+ extract32(env->v7m.nsacr, 10, 1);
+
+ if (!cpacr_pass) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ return_to_secure);
+ env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking UsageFault on existing "
+ "stackframe: CPACR.CP10 prevents unstacking "
+ "FP regs\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ } else if (!nsacr_pass) {
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking Secure UsageFault on existing "
+ "stackframe: NSACR.CP10 prevents unstacking "
+ "FP regs\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
+ uint32_t slo, shi;
+ uint64_t dn;
+ uint32_t faddr = frameptr + 0x20 + 4 * i;
+
+ if (i >= 16) {
+ faddr += 8; /* Skip the slot for the FPSCR and VPR */
+ }
+
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
+ v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
+
+ if (!pop_ok) {
+ break;
+ }
+
+ dn = (uint64_t)shi << 32 | slo;
+ *aa32_vfp_dreg(env, i / 2) = dn;
+ }
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
+ if (pop_ok) {
+ vfp_set_fpscr(env, fpscr);
+ }
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ pop_ok = pop_ok &&
+ v7m_stack_read(cpu, &env->v7m.vpr,
+ frameptr + 0x64, mmu_idx);
+ }
+ if (!pop_ok) {
+ /*
+ * These regs are 0 if security extension present;
+ * otherwise merely UNKNOWN. We zero always.
+ */
+ for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
+ *aa32_vfp_dreg(env, i / 2) = 0;
+ }
+ vfp_set_fpscr(env, 0);
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.vpr = 0;
+ }
+ }
+ }
+ }
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
+ V7M_CONTROL, FPCA, !ftype);
+
+ /* Commit to consuming the stack frame */
+ frameptr += 0x20;
+ if (!ftype) {
+ frameptr += 0x48;
+ if (restore_s16_s31) {
+ frameptr += 0x40;
+ }
+ }
+ /*
+ * Undo stack alignment (the SPREALIGN bit indicates that the original
+ * pre-exception SP was not 8-aligned and we added a padding word to
+ * align it, so we undo this by ORing in the bit that increases it
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
+ * would work too but a logical OR is how the pseudocode specifies it.)
+ */
+ if (xpsr & XPSR_SPREALIGN) {
+ frameptr |= 4;
+ }
+ *frame_sp_p = frameptr;
+ }
+
+ xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
+ if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
+ xpsr_mask &= ~XPSR_GE;
+ }
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
+ xpsr_write(env, xpsr, xpsr_mask);
+
+ if (env->v7m.secure) {
+ bool sfpa = xpsr & XPSR_SFPA;
+
+ env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
+ V7M_CONTROL, SFPA, sfpa);
+ }
+
+ /*
+ * The restored xPSR exception field will be zero if we're
+ * resuming in Thread mode. If that doesn't match what the
+ * exception return excret specified then this is a UsageFault.
+ * v7M requires we make this check here; v8M did it earlier.
+ */
+ if (return_to_handler != arm_v7m_is_handler_mode(env)) {
+ /*
+ * Take an INVPC UsageFault by pushing the stack again;
+ * we know we're v7M so this is never a Secure UsageFault.
+ */
+ bool ignore_stackfaults;
+
+ assert(!arm_feature(env, ARM_FEATURE_V8));
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ ignore_stackfaults = v7m_push_stack(cpu);
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
+ "failed exception return integrity check\n");
+ v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
+ return;
+ }
+
+ /* Otherwise, we have a successful exception exit. */
+ arm_clear_exclusive(env);
+ arm_rebuild_hflags(env);
+ qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
+}
+
+static bool do_v7m_function_return(ARMCPU *cpu)
+{
+ /*
+ * v8M security extensions magic function return.
+ * We may either:
+ * (1) throw an exception (longjump)
+ * (2) return true if we successfully handled the function return
+ * (3) return false if we failed a consistency check and have
+ * pended a UsageFault that needs to be taken now
+ *
+ * At this point the magic return value is split between env->regs[15]
+ * and env->thumb. We don't bother to reconstitute it because we don't
+ * need it (all values are handled the same way).
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t newpc, newpsr, newpsr_exc;
+
+ qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
+
+ {
+ bool threadmode, spsel;
+ MemOpIdx oi;
+ ARMMMUIdx mmu_idx;
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+
+ /* Pull the return address and IPSR from the Secure stack */
+ threadmode = !arm_v7m_is_handler_mode(env);
+ spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
+
+ frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
+ frameptr = *frame_sp_p;
+
+ /*
+ * These loads may throw an exception (for MPU faults). We want to
+ * do them as secure, so work out what MMU index that is.
+ */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+ oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
+ newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
+ newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
+
+ /* Consistency checks on new IPSR */
+ newpsr_exc = newpsr & XPSR_EXCP;
+ if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
+ (env->v7m.exception == 1 && newpsr_exc != 0))) {
+ /* Pend the fault and tell our caller to take it */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking INVPC UsageFault: "
+ "IPSR consistency check failed\n");
+ return false;
+ }
+
+ *frame_sp_p = frameptr + 8;
+ }
+
+ /* This invalidates frame_sp_p */
+ switch_v7m_security_state(env, true);
+ env->v7m.exception = newpsr_exc;
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ if (newpsr & XPSR_SFPA) {
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
+ }
+ xpsr_write(env, 0, XPSR_IT);
+ env->thumb = newpc & 1;
+ env->regs[15] = newpc & ~1;
+ arm_rebuild_hflags(env);
+
+ qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
+ return true;
+}
+
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
+ uint32_t addr, uint16_t *insn)
+{
+ /*
+ * Load a 16-bit portion of a v7M instruction, returning true on success,
+ * or false on failure (in which case we will have pended the appropriate
+ * exception).
+ * We need to do the instruction fetch's MPU and SAU checks
+ * like this because there is no MMU index that would allow
+ * doing the load with a single function call. Instead we must
+ * first check that the security attributes permit the load
+ * and that they don't mismatch on the two halves of the instruction,
+ * and then we do the load as a secure load (ie using the security
+ * attributes of the address, not the CPU, as architecturally required).
+ */
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ V8M_SAttributes sattrs = {};
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
+ if (!sattrs.nsc || sattrs.ns) {
+ /*
+ * This must be the second half of the insn, and it straddles a
+ * region boundary with the second half not being S&NSC.
+ */
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+ }
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
+ /* the MPU lookup failed */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
+ return false;
+ }
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
+ return false;
+ }
+ return true;
+}
+
+static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
+ uint32_t addr, uint32_t *spdata)
+{
+ /*
+ * Read a word of data from the stack for the SG instruction,
+ * writing the value into *spdata. If the load succeeds, return
+ * true; otherwise pend an appropriate exception and return false.
+ * (We can't use data load helpers here that throw an exception
+ * because of the context we're called in, which is halfway through
+ * arm_v7m_cpu_do_interrupt().)
+ */
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+ ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
+ uint32_t value;
+
+ if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
+ /* MPU/SAU lookup failed */
+ if (fi.type == ARMFault_QEMU_SFault) {
+ qemu_log_mask(CPU_LOG_INT,
+ "...SecureFault during stack word read\n");
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
+ env->v7m.sfar = addr;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "...MemManageFault during stack word read\n");
+ env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
+ R_V7M_CFSR_MMARVALID_MASK;
+ env->v7m.mmfar[M_REG_S] = addr;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
+ }
+ return false;
+ }
+ value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ /* BusFault trying to read the data */
+ qemu_log_mask(CPU_LOG_INT,
+ "...BusFault during stack word read\n");
+ env->v7m.cfsr[M_REG_NS] |=
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.bfar = addr;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
+ return false;
+ }
+
+ *spdata = value;
+ return true;
+}
+
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
+{
+ /*
+ * Check whether this attempt to execute code in a Secure & NS-Callable
+ * memory region is for an SG instruction; if so, then emulate the
+ * effect of the SG instruction and return true. Otherwise pend
+ * the correct kind of exception and return false.
+ */
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx;
+ uint16_t insn;
+
+ /*
+ * We should never get here unless get_phys_addr_pmsav8() caused
+ * an exception for NS executing in S&NSC memory.
+ */
+ assert(!env->v7m.secure);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
+ return false;
+ }
+
+ if (!env->thumb) {
+ goto gen_invep;
+ }
+
+ if (insn != 0xe97f) {
+ /*
+ * Not an SG instruction first half (we choose the IMPDEF
+ * early-SG-check option).
+ */
+ goto gen_invep;
+ }
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
+ return false;
+ }
+
+ if (insn != 0xe97f) {
+ /*
+ * Not an SG instruction second half (yes, both halves of the SG
+ * insn have the same hex value)
+ */
+ goto gen_invep;
+ }
+
+ /*
+ * OK, we have confirmed that we really have an SG instruction.
+ * We know we're NS in S memory so don't need to repeat those checks.
+ */
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
+ ", executing it\n", env->regs[15]);
+
+ if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
+ !arm_v7m_is_handler_mode(env)) {
+ /*
+ * v8.1M exception stack frame integrity check. Note that we
+ * must perform the memory access even if CCR_S.TRD is zero
+ * and we aren't going to check what the data loaded is.
+ */
+ uint32_t spdata, sp;
+
+ /*
+ * We know we are currently NS, so the S stack pointers must be
+ * in other_ss_{psp,msp}, not in regs[13]/other_sp.
+ */
+ sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
+ if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
+ /* Stack access failed and an exception has been pended */
+ return false;
+ }
+
+ if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
+ if (((spdata & ~1) == 0xfefa125a) ||
+ !(env->v7m.control[M_REG_S] & 1)) {
+ goto gen_invep;
+ }
+ }
+ }
+
+ env->regs[14] &= ~1;
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ switch_v7m_security_state(env, true);
+ xpsr_write(env, 0, XPSR_IT);
+ env->regs[15] += 4;
+ arm_rebuild_hflags(env);
+ return true;
+
+gen_invep:
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+}
+
+void arm_v7m_cpu_do_interrupt(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t lr;
+ bool ignore_stackfaults;
+
+ arm_log_exception(cs->exception_index);
+
+ /*
+ * For exceptions we just mark as pending on the NVIC, and let that
+ * handle it.
+ */
+ switch (cs->exception_index) {
+ case EXCP_UDEF:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
+ break;
+ case EXCP_NOCP:
+ {
+ /*
+ * NOCP might be directed to something other than the current
+ * security state if this fault is because of NSACR; we indicate
+ * the target security state using exception.target_el.
+ */
+ int target_secstate;
+
+ if (env->exception.target_el == 3) {
+ target_secstate = M_REG_S;
+ } else {
+ target_secstate = env->v7m.secure;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
+ env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
+ break;
+ }
+ case EXCP_INVSTATE:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
+ break;
+ case EXCP_STKOF:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
+ break;
+ case EXCP_LSERR:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
+ break;
+ case EXCP_UNALIGNED:
+ /* Unaligned faults reported by M-profile aware code */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
+ break;
+ case EXCP_DIVBYZERO:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
+ break;
+ case EXCP_SWI:
+ /* The PC already points to the next instruction. */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
+ break;
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ /*
+ * Note that for M profile we don't have a guest facing FSR, but
+ * the env->exception.fsr will be populated by the code that
+ * raises the fault, in the A profile short-descriptor format.
+ */
+ switch (env->exception.fsr & 0xf) {
+ case M_FAKE_FSR_NSC_EXEC:
+ /*
+ * Exception generated when we try to execute code at an address
+ * which is marked as Secure & Non-Secure Callable and the CPU
+ * is in the Non-Secure state. The only instruction which can
+ * be executed like this is SG (and that only if both halves of
+ * the SG instruction have the same security attributes.)
+ * Everything else must generate an INVEP SecureFault, so we
+ * emulate the SG instruction here.
+ */
+ if (v7m_handle_execute_nsc(cpu)) {
+ return;
+ }
+ break;
+ case M_FAKE_FSR_SFAULT:
+ /*
+ * Various flavours of SecureFault for attempts to execute or
+ * access data in the wrong security state.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ if (env->v7m.secure) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVTRAN\n");
+ } else {
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ }
+ break;
+ case EXCP_DATA_ABORT:
+ /* This must be an NS access to S memory */
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.AUVIOL\n");
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ break;
+ case 0x8: /* External Abort */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
+ break;
+ case EXCP_DATA_ABORT:
+ env->v7m.cfsr[M_REG_NS] |=
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.bfar = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
+ env->v7m.bfar);
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
+ break;
+ case 0x1: /* Alignment fault reported by generic code */
+ qemu_log_mask(CPU_LOG_INT,
+ "...really UsageFault with UFSR.UNALIGNED\n");
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ break;
+ default:
+ /*
+ * All other FSR values are either MPU faults or "can't happen
+ * for M profile" cases.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
+ break;
+ case EXCP_DATA_ABORT:
+ env->v7m.cfsr[env->v7m.secure] |=
+ (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
+ env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT,
+ "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
+ env->v7m.mmfar[env->v7m.secure]);
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
+ env->v7m.secure);
+ break;
+ }
+ break;
+ case EXCP_SEMIHOST:
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%x\n",
+ env->regs[0]);
+#ifdef CONFIG_TCG
+ env->regs[0] = do_common_semihosting(cs);
+#else
+ g_assert_not_reached();
+#endif
+ env->regs[15] += env->thumb ? 2 : 4;
+ return;
+ case EXCP_BKPT:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
+ break;
+ case EXCP_IRQ:
+ break;
+ case EXCP_EXCEPTION_EXIT:
+ if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
+ /* Must be v8M security extension function return */
+ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+ if (do_v7m_function_return(cpu)) {
+ return;
+ }
+ } else {
+ do_v7m_exception_exit(cpu);
+ return;
+ }
+ break;
+ case EXCP_LAZYFP:
+ /*
+ * We already pended the specific exception in the NVIC in the
+ * v7m_preserve_fp_state() helper function.
+ */
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ return; /* Never happens. Keep compiler happy. */
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_DCRS_MASK;
+ /*
+ * The S bit indicates whether we should return to Secure
+ * or NonSecure (ie our current state).
+ * The ES bit indicates whether we're taking this exception
+ * to Secure or NonSecure (ie our target state). We set it
+ * later, in v7m_exception_taken().
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
+ * This corresponds to the ARM ARM pseudocode for v8M setting
+ * some LR bits in PushStack() and some in ExceptionTaken();
+ * the distinction matters for the tailchain cases where we
+ * can take an exception without pushing the stack.
+ */
+ if (env->v7m.secure) {
+ lr |= R_V7M_EXCRET_S_MASK;
+ }
+ } else {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_S_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_ES_MASK;
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
+ }
+ if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
+ lr |= R_V7M_EXCRET_FTYPE_MASK;
+ }
+ if (!arm_v7m_is_handler_mode(env)) {
+ lr |= R_V7M_EXCRET_MODE_MASK;
+ }
+
+ ignore_stackfaults = v7m_push_stack(cpu);
+ v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
+}
+
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
+{
+ unsigned el = arm_current_el(env);
+
+ /* First handle registers which unprivileged can read */
+ switch (reg) {
+ case 0 ... 7: /* xPSR sub-fields */
+ return v7m_mrs_xpsr(env, reg, el);
+ case 20: /* CONTROL */
+ return v7m_mrs_control(env, env->v7m.secure);
+ case 0x94: /* CONTROL_NS */
+ /*
+ * We have to handle this here because unprivileged Secure code
+ * can read the NS CONTROL register.
+ */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.control[M_REG_NS] |
+ (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
+ }
+
+ if (el == 0) {
+ return 0; /* unprivileged reads others as zero */
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ switch (reg) {
+ case 0x88: /* MSP_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.other_ss_msp;
+ case 0x89: /* PSP_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.other_ss_psp;
+ case 0x8a: /* MSPLIM_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.msplim[M_REG_NS];
+ case 0x8b: /* PSPLIM_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.psplim[M_REG_NS];
+ case 0x90: /* PRIMASK_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.primask[M_REG_NS];
+ case 0x91: /* BASEPRI_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.basepri[M_REG_NS];
+ case 0x93: /* FAULTMASK_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.faultmask[M_REG_NS];
+ case 0x98: /* SP_NS */
+ {
+ /*
+ * This gives the non-secure SP selected based on whether we're
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
+ */
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
+
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
+ return env->v7m.other_ss_psp;
+ } else {
+ return env->v7m.other_ss_msp;
+ }
+ }
+ default:
+ break;
+ }
+ }
+
+ switch (reg) {
+ case 8: /* MSP */
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
+ case 9: /* PSP */
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
+ case 10: /* MSPLIM */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ goto bad_reg;
+ }
+ return env->v7m.msplim[env->v7m.secure];
+ case 11: /* PSPLIM */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ goto bad_reg;
+ }
+ return env->v7m.psplim[env->v7m.secure];
+ case 16: /* PRIMASK */
+ return env->v7m.primask[env->v7m.secure];
+ case 17: /* BASEPRI */
+ case 18: /* BASEPRI_MAX */
+ return env->v7m.basepri[env->v7m.secure];
+ case 19: /* FAULTMASK */
+ return env->v7m.faultmask[env->v7m.secure];
+ default:
+ bad_reg:
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
+ " register %d\n", reg);
+ return 0;
+ }
+}
+
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
+{
+ /*
+ * We're passed bits [11..0] of the instruction; extract
+ * SYSm and the mask bits.
+ * Invalid combinations of SYSm and mask are UNPREDICTABLE;
+ * we choose to treat them as if the mask bits were valid.
+ * NB that the pseudocode 'mask' variable is bits [11..10],
+ * whereas ours is [11..8].
+ */
+ uint32_t mask = extract32(maskreg, 8, 4);
+ uint32_t reg = extract32(maskreg, 0, 8);
+ int cur_el = arm_current_el(env);
+
+ if (cur_el == 0 && reg > 7 && reg != 20) {
+ /*
+ * only xPSR sub-fields and CONTROL.SFPA may be written by
+ * unprivileged code
+ */
+ return;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ switch (reg) {
+ case 0x88: /* MSP_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.other_ss_msp = val & ~3;
+ return;
+ case 0x89: /* PSP_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.other_ss_psp = val & ~3;
+ return;
+ case 0x8a: /* MSPLIM_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.msplim[M_REG_NS] = val & ~7;
+ return;
+ case 0x8b: /* PSPLIM_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.psplim[M_REG_NS] = val & ~7;
+ return;
+ case 0x90: /* PRIMASK_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.primask[M_REG_NS] = val & 1;
+ return;
+ case 0x91: /* BASEPRI_NS */
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ return;
+ }
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
+ return;
+ case 0x93: /* FAULTMASK_NS */
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ return;
+ }
+ env->v7m.faultmask[M_REG_NS] = val & 1;
+ return;
+ case 0x94: /* CONTROL_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ write_v7m_control_spsel_for_secstate(env,
+ val & R_V7M_CONTROL_SPSEL_MASK,
+ M_REG_NS);
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ }
+ /*
+ * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
+ * RES0 if the FPU is not present, and is stored in the S bank
+ */
+ if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
+ extract32(env->v7m.nsacr, 10, 1)) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
+ }
+ return;
+ case 0x98: /* SP_NS */
+ {
+ /*
+ * This gives the non-secure SP selected based on whether we're
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
+ */
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
+ bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
+ uint32_t limit;
+
+ if (!env->v7m.secure) {
+ return;
+ }
+
+ limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
+
+ val &= ~0x3;
+
+ if (val < limit) {
+ raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
+ }
+
+ if (is_psp) {
+ env->v7m.other_ss_psp = val;
+ } else {
+ env->v7m.other_ss_msp = val;
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ }
+
+ switch (reg) {
+ case 0 ... 7: /* xPSR sub-fields */
+ v7m_msr_xpsr(env, mask, reg, val);
+ break;
+ case 8: /* MSP */
+ if (v7m_using_psp(env)) {
+ env->v7m.other_sp = val & ~3;
+ } else {
+ env->regs[13] = val & ~3;
+ }
+ break;
+ case 9: /* PSP */
+ if (v7m_using_psp(env)) {
+ env->regs[13] = val & ~3;
+ } else {
+ env->v7m.other_sp = val & ~3;
+ }
+ break;
+ case 10: /* MSPLIM */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ goto bad_reg;
+ }
+ env->v7m.msplim[env->v7m.secure] = val & ~7;
+ break;
+ case 11: /* PSPLIM */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ goto bad_reg;
+ }
+ env->v7m.psplim[env->v7m.secure] = val & ~7;
+ break;
+ case 16: /* PRIMASK */
+ env->v7m.primask[env->v7m.secure] = val & 1;
+ break;
+ case 17: /* BASEPRI */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
+ env->v7m.basepri[env->v7m.secure] = val & 0xff;
+ break;
+ case 18: /* BASEPRI_MAX */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
+ val &= 0xff;
+ if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
+ || env->v7m.basepri[env->v7m.secure] == 0)) {
+ env->v7m.basepri[env->v7m.secure] = val;
+ }
+ break;
+ case 19: /* FAULTMASK */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
+ env->v7m.faultmask[env->v7m.secure] = val & 1;
+ break;
+ case 20: /* CONTROL */
+ /*
+ * Writing to the SPSEL bit only has an effect if we are in
+ * thread mode; other bits can be updated by any privileged code.
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
+ * env->v7m.control, so we only need update the others.
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
+ * mode; for v8M the write is permitted but will have no effect.
+ * All these bits are writes-ignored from non-privileged code,
+ * except for SFPA.
+ */
+ if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
+ !arm_v7m_is_handler_mode(env))) {
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
+ }
+ if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ }
+ if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
+ /*
+ * SFPA is RAZ/WI from NS or if no FPU.
+ * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
+ * Both are stored in the S bank.
+ */
+ if (env->v7m.secure) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
+ }
+ if (cur_el > 0 &&
+ (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
+ extract32(env->v7m.nsacr, 10, 1))) {
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
+ env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
+ }
+ }
+ break;
+ default:
+ bad_reg:
+ qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
+ " register %d\n", reg);
+ return;
+ }
+}
+
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
+ bool forceunpriv = op & 1;
+ bool alt = op & 2;
+ V8M_SAttributes sattrs = {};
+ uint32_t tt_resp;
+ bool r, rw, nsr, nsrw, mrvalid;
+ int prot;
+ ARMMMUFaultInfo fi = {};
+ MemTxAttrs attrs = {};
+ hwaddr phys_addr;
+ ARMMMUIdx mmu_idx;
+ uint32_t mregion;
+ bool targetpriv;
+ bool targetsec = env->v7m.secure;
+ bool is_subpage;
+
+ /*
+ * Work out what the security state and privilege level we're
+ * interested in is...
+ */
+ if (alt) {
+ targetsec = !targetsec;
+ }
+
+ if (forceunpriv) {
+ targetpriv = false;
+ } else {
+ targetpriv = arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
+ }
+
+ /* ...and then figure out which MMU index this is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
+
+ /*
+ * We know that the MPU and SAU don't care about the access type
+ * for our purposes beyond that we don't want to claim to be
+ * an insn fetch, so we arbitrarily call this a read.
+ */
+
+ /*
+ * MPU region info only available for privileged or if
+ * inspecting the other MPU state.
+ */
+ if (arm_current_el(env) != 0 || alt) {
+ /* We can ignore the return value as prot is always set */
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
+ &phys_addr, &attrs, &prot, &is_subpage,
+ &fi, &mregion);
+ if (mregion == -1) {
+ mrvalid = false;
+ mregion = 0;
+ } else {
+ mrvalid = true;
+ }
+ r = prot & PAGE_READ;
+ rw = prot & PAGE_WRITE;
+ } else {
+ r = false;
+ rw = false;
+ mrvalid = false;
+ mregion = 0;
+ }
+
+ if (env->v7m.secure) {
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
+ nsr = sattrs.ns && r;
+ nsrw = sattrs.ns && rw;
+ } else {
+ sattrs.ns = true;
+ nsr = false;
+ nsrw = false;
+ }
+
+ tt_resp = (sattrs.iregion << 24) |
+ (sattrs.irvalid << 23) |
+ ((!sattrs.ns) << 22) |
+ (nsrw << 21) |
+ (nsr << 20) |
+ (rw << 19) |
+ (r << 18) |
+ (sattrs.srvalid << 17) |
+ (mrvalid << 16) |
+ (sattrs.sregion << 8) |
+ mregion;
+
+ return tt_resp;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
+ bool secstate, bool priv, bool negpri)
+{
+ ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
+
+ if (priv) {
+ mmu_idx |= ARM_MMU_IDX_M_PRIV;
+ }
+
+ if (negpri) {
+ mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
+ }
+
+ if (secstate) {
+ mmu_idx |= ARM_MMU_IDX_M_S;
+ }
+
+ return mmu_idx;
+}
+
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
+ bool secstate, bool priv)
+{
+ bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
+
+ return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
+}
+
+/* Return the MMU index for a v7M CPU in the specified security state */
+ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
+{
+ bool priv = arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[secstate] & 1);
+
+ return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
+}
diff --git a/target/arm/machine.c b/target/arm/machine.c
new file mode 100644
index 000000000..c74d8c3f4
--- /dev/null
+++ b/target/arm/machine.c
@@ -0,0 +1,882 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "internals.h"
+#include "migration/cpu.h"
+
+static bool vfp_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
+ ? cpu_isar_feature(aa64_fp_simd, cpu)
+ : cpu_isar_feature(aa32_vfp_simd, cpu));
+}
+
+static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint32_t val = qemu_get_be32(f);
+
+ vfp_set_fpscr(env, val);
+ return 0;
+}
+
+static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ qemu_put_be32(f, vfp_get_fpscr(env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_fpscr = {
+ .name = "fpscr",
+ .get = get_fpscr,
+ .put = put_fpscr,
+};
+
+static const VMStateDescription vmstate_vfp = {
+ .name = "cpu/vfp",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .needed = vfp_needed,
+ .fields = (VMStateField[]) {
+ /* For compatibility, store Qn out of Zn here. */
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
+ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
+
+ /* The xregs array is a little awkward because element 1 (FPSCR)
+ * requires a specific accessor, so we have to split it up in
+ * the vmstate:
+ */
+ VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
+ VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
+ {
+ .name = "fpscr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_fpscr,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool iwmmxt_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_IWMMXT);
+}
+
+static const VMStateDescription vmstate_iwmmxt = {
+ .name = "cpu/iwmmxt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = iwmmxt_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
+ VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#ifdef TARGET_AARCH64
+/* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
+ * and ARMPredicateReg is actively empty. This triggers errors
+ * in the expansion of the VMSTATE macros.
+ */
+
+static bool sve_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu_isar_feature(aa64_sve, cpu);
+}
+
+/* The first two words of each Zreg is stored in VFP state. */
+static const VMStateDescription vmstate_zreg_hi_reg = {
+ .name = "cpu/sve/zreg_hi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_preg_reg = {
+ .name = "cpu/sve/preg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_sve = {
+ .name = "cpu/sve",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = sve_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
+ vmstate_zreg_hi_reg, ARMVectorReg),
+ VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
+ vmstate_preg_reg, ARMPredicateReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif /* AARCH64 */
+
+static bool serror_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return env->serror.pending != 0;
+}
+
+static const VMStateDescription vmstate_serror = {
+ .name = "cpu/serror",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = serror_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(env.serror.pending, ARMCPU),
+ VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
+ VMSTATE_UINT64(env.serror.esr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool irq_line_state_needed(void *opaque)
+{
+ return true;
+}
+
+static const VMStateDescription vmstate_irq_line_state = {
+ .name = "cpu/irq-line-state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = irq_line_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.irq_line_state, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool m_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_M);
+}
+
+static const VMStateDescription vmstate_m_faultmask_primask = {
+ .name = "cpu/m/faultmask-primask",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* CSSELR is in a subsection because we didn't implement it previously.
+ * Migration from an old implementation will leave it at zero, which
+ * is OK since the only CPUs in the old implementation make the
+ * register RAZ/WI.
+ * Since there was no version of QEMU which implemented the CSSELR for
+ * just non-secure, we transfer both banks here rather than putting
+ * the secure banked version in the m-security subsection.
+ */
+static bool csselr_vmstate_validate(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
+ && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
+}
+
+static bool m_csselr_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ return !arm_v7m_csselr_razwi(cpu);
+}
+
+static const VMStateDescription vmstate_m_csselr = {
+ .name = "cpu/m/csselr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_csselr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_m_scr = {
+ .name = "cpu/m/scr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_m_other_sp = {
+ .name = "cpu/m/other-sp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool m_v8m_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
+}
+
+static const VMStateDescription vmstate_m_v8m = {
+ .name = "cpu/m/v8m",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_v8m_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_m_fp = {
+ .name = "cpu/m/fp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vfp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
+ VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool mve_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu_isar_feature(aa32_mve, cpu);
+}
+
+static const VMStateDescription vmstate_m_mve = {
+ .name = "cpu/m/mve",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = mve_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.vpr, ARMCPU),
+ VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_m = {
+ .name = "cpu/m",
+ .version_id = 4,
+ .minimum_version_id = 4,
+ .needed = m_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
+ VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
+ VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
+ VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
+ VMSTATE_INT32(env.v7m.exception, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_m_faultmask_primask,
+ &vmstate_m_csselr,
+ &vmstate_m_scr,
+ &vmstate_m_other_sp,
+ &vmstate_m_v8m,
+ &vmstate_m_fp,
+ &vmstate_m_mve,
+ NULL
+ }
+};
+
+static bool thumb2ee_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_THUMB2EE);
+}
+
+static const VMStateDescription vmstate_thumb2ee = {
+ .name = "cpu/thumb2ee",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = thumb2ee_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.teecr, ARMCPU),
+ VMSTATE_UINT32(env.teehbr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pmsav7_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_PMSA) &&
+ arm_feature(env, ARM_FEATURE_V7) &&
+ !arm_feature(env, ARM_FEATURE_V8);
+}
+
+static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
+}
+
+static const VMStateDescription vmstate_pmsav7 = {
+ .name = "cpu/pmsav7",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmsav7_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pmsav7_rnr_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ /* For R profile cores pmsav7.rnr is migrated via the cpreg
+ * "RGNR" definition in helper.h. For M profile we have to
+ * migrate it separately.
+ */
+ return arm_feature(env, ARM_FEATURE_M);
+}
+
+static const VMStateDescription vmstate_pmsav7_rnr = {
+ .name = "cpu/pmsav7-rnr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmsav7_rnr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pmsav8_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_PMSA) &&
+ arm_feature(env, ARM_FEATURE_V8);
+}
+
+static const VMStateDescription vmstate_pmsav8 = {
+ .name = "cpu/pmsav8",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmsav8_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
+ 0, vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
+ 0, vmstate_info_uint32, uint32_t),
+ VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool s_rnr_vmstate_validate(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
+}
+
+static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu->env.sau.rnr < cpu->sau_sregion;
+}
+
+static bool m_security_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_M_SECURITY);
+}
+
+static const VMStateDescription vmstate_m_security = {
+ .name = "cpu/m-security",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_security_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.secure, ARMCPU),
+ VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
+ VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
+ VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
+ VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
+ 0, vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
+ 0, vmstate_info_uint32, uint32_t),
+ VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
+ VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
+ VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
+ VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
+ VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
+ VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_UINT32(env.sau.rnr, ARMCPU),
+ VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
+ VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
+ VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
+ /* AIRCR is not secure-only, but our implementation is R/O if the
+ * security extension is unimplemented, so we migrate it here.
+ */
+ VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint32_t val = qemu_get_be32(f);
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (val & XPSR_EXCP) {
+ /* This is a CPSR format value from an older QEMU. (We can tell
+ * because values transferred in XPSR format always have zero
+ * for the EXCP field, and CPSR format will always have bit 4
+ * set in CPSR_M.) Rearrange it into XPSR format. The significant
+ * differences are that the T bit is not in the same place, the
+ * primask/faultmask info may be in the CPSR I and F bits, and
+ * we do not want the mode bits.
+ * We know that this cleanup happened before v8M, so there
+ * is no complication with banked primask/faultmask.
+ */
+ uint32_t newval = val;
+
+ assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
+
+ newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
+ if (val & CPSR_T) {
+ newval |= XPSR_T;
+ }
+ /* If the I or F bits are set then this is a migration from
+ * an old QEMU which still stored the M profile FAULTMASK
+ * and PRIMASK in env->daif. For a new QEMU, the data is
+ * transferred using the vmstate_m_faultmask_primask subsection.
+ */
+ if (val & CPSR_F) {
+ env->v7m.faultmask[M_REG_NS] = 1;
+ }
+ if (val & CPSR_I) {
+ env->v7m.primask[M_REG_NS] = 1;
+ }
+ val = newval;
+ }
+ /* Ignore the low bits, they are handled by vmstate_m. */
+ xpsr_write(env, val, ~XPSR_EXCP);
+ return 0;
+ }
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ return 0;
+ }
+
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
+ return 0;
+}
+
+static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint32_t val;
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
+ val = xpsr_read(env) & ~XPSR_EXCP;
+ } else if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
+
+ qemu_put_be32(f, val);
+ return 0;
+}
+
+static const VMStateInfo vmstate_cpsr = {
+ .name = "cpsr",
+ .get = get_cpsr,
+ .put = put_cpsr,
+};
+
+static int get_power(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ ARMCPU *cpu = opaque;
+ bool powered_off = qemu_get_byte(f);
+ cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
+ return 0;
+}
+
+static int put_power(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ARMCPU *cpu = opaque;
+
+ /* Migration should never happen while we transition power states */
+
+ if (cpu->power_state == PSCI_ON ||
+ cpu->power_state == PSCI_OFF) {
+ bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
+ qemu_put_byte(f, powered_off);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static const VMStateInfo vmstate_powered_off = {
+ .name = "powered_off",
+ .get = get_power,
+ .put = put_power,
+};
+
+static int cpu_pre_save(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ if (!kvm_enabled()) {
+ pmu_op_start(&cpu->env);
+ }
+
+ if (kvm_enabled()) {
+ if (!write_kvmstate_to_list(cpu)) {
+ /* This should never fail */
+ abort();
+ }
+
+ /*
+ * kvm_arm_cpu_pre_save() must be called after
+ * write_kvmstate_to_list()
+ */
+ kvm_arm_cpu_pre_save(cpu);
+ } else {
+ if (!write_cpustate_to_list(cpu, false)) {
+ /* This should never fail. */
+ abort();
+ }
+ }
+
+ cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
+ memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
+ cpu->cpreg_array_len * sizeof(uint64_t));
+ memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
+ cpu->cpreg_array_len * sizeof(uint64_t));
+
+ return 0;
+}
+
+static int cpu_post_save(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ if (!kvm_enabled()) {
+ pmu_op_finish(&cpu->env);
+ }
+
+ return 0;
+}
+
+static int cpu_pre_load(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ /*
+ * Pre-initialize irq_line_state to a value that's never valid as
+ * real data, so cpu_post_load() can tell whether we've seen the
+ * irq-line-state subsection in the incoming migration state.
+ */
+ env->irq_line_state = UINT32_MAX;
+
+ if (!kvm_enabled()) {
+ pmu_op_start(&cpu->env);
+ }
+
+ return 0;
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ int i, v;
+
+ /*
+ * Handle migration compatibility from old QEMU which didn't
+ * send the irq-line-state subsection. A QEMU without it did not
+ * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
+ * so for TCG the line state matches the bits set in cs->interrupt_request.
+ * For KVM the line state is not stored in cs->interrupt_request
+ * and so this will leave irq_line_state as 0, but this is OK because
+ * we only need to care about it for TCG.
+ */
+ if (env->irq_line_state == UINT32_MAX) {
+ CPUState *cs = CPU(cpu);
+
+ env->irq_line_state = cs->interrupt_request &
+ (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
+ CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
+ }
+
+ /* Update the values list from the incoming migration data.
+ * Anything in the incoming data which we don't know about is
+ * a migration failure; anything we know about but the incoming
+ * data doesn't specify retains its current (reset) value.
+ * The indexes list remains untouched -- we only inspect the
+ * incoming migration index list so we can match the values array
+ * entries with the right slots in our own values array.
+ */
+
+ for (i = 0, v = 0; i < cpu->cpreg_array_len
+ && v < cpu->cpreg_vmstate_array_len; i++) {
+ if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
+ /* register in our list but not incoming : skip it */
+ continue;
+ }
+ if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
+ /* register in their list but not ours: fail migration */
+ return -1;
+ }
+ /* matching register, copy the value over */
+ cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
+ v++;
+ }
+
+ if (kvm_enabled()) {
+ if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ return -1;
+ }
+ /* Note that it's OK for the TCG side not to know about
+ * every register in the list; KVM is authoritative if
+ * we're using it.
+ */
+ write_list_to_cpustate(cpu);
+ kvm_arm_cpu_post_load(cpu);
+ } else {
+ if (!write_list_to_cpustate(cpu)) {
+ return -1;
+ }
+ }
+
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+
+ /*
+ * TCG gen_update_fp_context() relies on the invariant that
+ * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
+ * forbid bogus incoming data with some other value.
+ */
+ if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) {
+ if (extract32(env->v7m.fpdscr[M_REG_NS],
+ FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 ||
+ extract32(env->v7m.fpdscr[M_REG_S],
+ FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) {
+ return -1;
+ }
+ }
+ if (!kvm_enabled()) {
+ pmu_op_finish(&cpu->env);
+ }
+ arm_rebuild_hflags(&cpu->env);
+
+ return 0;
+}
+
+const VMStateDescription vmstate_arm_cpu = {
+ .name = "cpu",
+ .version_id = 22,
+ .minimum_version_id = 22,
+ .pre_save = cpu_pre_save,
+ .post_save = cpu_post_save,
+ .pre_load = cpu_pre_load,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
+ VMSTATE_UINT64(env.pc, ARMCPU),
+ {
+ .name = "cpsr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_cpsr,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_UINT32(env.spsr, ARMCPU),
+ VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
+ VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
+ VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
+ VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
+ /* The length-check must come before the arrays to avoid
+ * incoming data possibly overflowing the array.
+ */
+ VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
+ VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
+ cpreg_vmstate_array_len,
+ 0, vmstate_info_uint64, uint64_t),
+ VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
+ cpreg_vmstate_array_len,
+ 0, vmstate_info_uint64, uint64_t),
+ VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_val, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_high, ARMCPU),
+ VMSTATE_UNUSED(sizeof(uint64_t)),
+ VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
+ VMSTATE_UINT32(env.exception.fsr, ARMCPU),
+ VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
+ VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
+ VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
+ {
+ .name = "power_state",
+ .version_id = 0,
+ .size = sizeof(bool),
+ .info = &vmstate_powered_off,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vfp,
+ &vmstate_iwmmxt,
+ &vmstate_m,
+ &vmstate_thumb2ee,
+ /* pmsav7_rnr must come before pmsav7 so that we have the
+ * region number before we test it in the VMSTATE_VALIDATE
+ * in vmstate_pmsav7.
+ */
+ &vmstate_pmsav7_rnr,
+ &vmstate_pmsav7,
+ &vmstate_pmsav8,
+ &vmstate_m_security,
+#ifdef TARGET_AARCH64
+ &vmstate_sve,
+#endif
+ &vmstate_serror,
+ &vmstate_irq_line_state,
+ NULL
+ }
+};
diff --git a/target/arm/meson.build b/target/arm/meson.build
new file mode 100644
index 000000000..50f152214
--- /dev/null
+++ b/target/arm/meson.build
@@ -0,0 +1,66 @@
+gen = [
+ decodetree.process('sve.decode', extra_args: '--decode=disas_sve'),
+ decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'),
+ decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'),
+ decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'),
+ decodetree.process('vfp.decode', extra_args: '--decode=disas_vfp'),
+ decodetree.process('vfp-uncond.decode', extra_args: '--decode=disas_vfp_uncond'),
+ decodetree.process('m-nocp.decode', extra_args: '--decode=disas_m_nocp'),
+ decodetree.process('mve.decode', extra_args: '--decode=disas_mve'),
+ decodetree.process('a32.decode', extra_args: '--static-decode=disas_a32'),
+ decodetree.process('a32-uncond.decode', extra_args: '--static-decode=disas_a32_uncond'),
+ decodetree.process('t32.decode', extra_args: '--static-decode=disas_t32'),
+ decodetree.process('t16.decode', extra_args: ['-w', '16', '--static-decode=disas_t16']),
+]
+
+arm_ss = ss.source_set()
+arm_ss.add(gen)
+arm_ss.add(files(
+ 'cpu.c',
+ 'crypto_helper.c',
+ 'debug_helper.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'iwmmxt_helper.c',
+ 'm_helper.c',
+ 'mve_helper.c',
+ 'neon_helper.c',
+ 'op_helper.c',
+ 'tlb_helper.c',
+ 'translate.c',
+ 'translate-m-nocp.c',
+ 'translate-mve.c',
+ 'translate-neon.c',
+ 'translate-vfp.c',
+ 'vec_helper.c',
+ 'vfp_helper.c',
+ 'cpu_tcg.c',
+))
+arm_ss.add(zlib)
+
+arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c'))
+
+arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
+ 'cpu64.c',
+ 'gdbstub64.c',
+ 'helper-a64.c',
+ 'mte_helper.c',
+ 'pauth_helper.c',
+ 'sve_helper.c',
+ 'translate-a64.c',
+ 'translate-sve.c',
+))
+
+arm_softmmu_ss = ss.source_set()
+arm_softmmu_ss.add(files(
+ 'arch_dump.c',
+ 'arm-powerctl.c',
+ 'machine.c',
+ 'monitor.c',
+ 'psci.c',
+))
+
+subdir('hvf')
+
+target_arch += {'arm': arm_ss}
+target_softmmu_arch += {'arm': arm_softmmu_ss}
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
new file mode 100644
index 000000000..80c64fa35
--- /dev/null
+++ b/target/arm/monitor.c
@@ -0,0 +1,230 @@
+/*
+ * QEMU monitor.c for ARM.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/boards.h"
+#include "kvm_arm.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qmp/qerror.h"
+#include "qapi/qmp/qdict.h"
+#include "qom/qom-qobject.h"
+
+static GICCapability *gic_cap_new(int version)
+{
+ GICCapability *cap = g_new0(GICCapability, 1);
+ cap->version = version;
+ /* by default, support none */
+ cap->emulated = false;
+ cap->kernel = false;
+ return cap;
+}
+
+static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3)
+{
+#ifdef CONFIG_KVM
+ int fdarray[3];
+
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) {
+ return;
+ }
+
+ /* Test KVM GICv2 */
+ if (kvm_device_supported(fdarray[1], KVM_DEV_TYPE_ARM_VGIC_V2)) {
+ v2->kernel = true;
+ }
+
+ /* Test KVM GICv3 */
+ if (kvm_device_supported(fdarray[1], KVM_DEV_TYPE_ARM_VGIC_V3)) {
+ v3->kernel = true;
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+#endif
+}
+
+GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
+{
+ GICCapabilityList *head = NULL;
+ GICCapability *v2 = gic_cap_new(2), *v3 = gic_cap_new(3);
+
+ v2->emulated = true;
+ v3->emulated = true;
+
+ gic_cap_kvm_probe(v2, v3);
+
+ QAPI_LIST_PREPEND(head, v2);
+ QAPI_LIST_PREPEND(head, v3);
+
+ return head;
+}
+
+QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
+
+/*
+ * These are cpu model features we want to advertise. The order here
+ * matters as this is the order in which qmp_query_cpu_model_expansion
+ * will attempt to set them. If there are dependencies between features,
+ * then the order that considers those dependencies must be used.
+ */
+static const char *cpu_model_advertised_features[] = {
+ "aarch64", "pmu", "sve",
+ "sve128", "sve256", "sve384", "sve512",
+ "sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280",
+ "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
+ "kvm-no-adjvtime", "kvm-steal-time",
+ "pauth", "pauth-impdef",
+ NULL
+};
+
+CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ CpuModelExpansionInfo *expansion_info;
+ const QDict *qdict_in = NULL;
+ QDict *qdict_out;
+ ObjectClass *oc;
+ Object *obj;
+ const char *name;
+ int i;
+
+ if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
+ error_setg(errp, "The requested expansion type is not supported");
+ return NULL;
+ }
+
+ if (!kvm_enabled() && !strcmp(model->name, "host")) {
+ error_setg(errp, "The CPU type '%s' requires KVM", model->name);
+ return NULL;
+ }
+
+ oc = cpu_class_by_name(TYPE_ARM_CPU, model->name);
+ if (!oc) {
+ error_setg(errp, "The CPU type '%s' is not a recognized ARM CPU type",
+ model->name);
+ return NULL;
+ }
+
+ if (kvm_enabled()) {
+ bool supported = false;
+
+ if (!strcmp(model->name, "host") || !strcmp(model->name, "max")) {
+ /* These are kvmarm's recommended cpu types */
+ supported = true;
+ } else if (current_machine->cpu_type) {
+ const char *cpu_type = current_machine->cpu_type;
+ int len = strlen(cpu_type) - strlen(ARM_CPU_TYPE_SUFFIX);
+
+ if (strlen(model->name) == len &&
+ !strncmp(model->name, cpu_type, len)) {
+ /* KVM is enabled and we're using this type, so it works. */
+ supported = true;
+ }
+ }
+ if (!supported) {
+ error_setg(errp, "We cannot guarantee the CPU type '%s' works "
+ "with KVM on this host", model->name);
+ return NULL;
+ }
+ }
+
+ if (model->props) {
+ qdict_in = qobject_to(QDict, model->props);
+ if (!qdict_in) {
+ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict");
+ return NULL;
+ }
+ }
+
+ obj = object_new(object_class_get_name(oc));
+
+ if (qdict_in) {
+ Visitor *visitor;
+ Error *err = NULL;
+
+ visitor = qobject_input_visitor_new(model->props);
+ if (!visit_start_struct(visitor, NULL, NULL, 0, errp)) {
+ visit_free(visitor);
+ object_unref(obj);
+ return NULL;
+ }
+
+ i = 0;
+ while ((name = cpu_model_advertised_features[i++]) != NULL) {
+ if (qdict_get(qdict_in, name)) {
+ if (!object_property_set(obj, name, visitor, &err)) {
+ break;
+ }
+ }
+ }
+
+ if (!err) {
+ visit_check_struct(visitor, &err);
+ }
+ if (!err) {
+ arm_cpu_finalize_features(ARM_CPU(obj), &err);
+ }
+ visit_end_struct(visitor, NULL);
+ visit_free(visitor);
+ if (err) {
+ object_unref(obj);
+ error_propagate(errp, err);
+ return NULL;
+ }
+ } else {
+ arm_cpu_finalize_features(ARM_CPU(obj), &error_abort);
+ }
+
+ expansion_info = g_new0(CpuModelExpansionInfo, 1);
+ expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
+ expansion_info->model->name = g_strdup(model->name);
+
+ qdict_out = qdict_new();
+
+ i = 0;
+ while ((name = cpu_model_advertised_features[i++]) != NULL) {
+ ObjectProperty *prop = object_property_find(obj, name);
+ if (prop) {
+ QObject *value;
+
+ assert(prop->get);
+ value = object_property_get_qobject(obj, name, &error_abort);
+
+ qdict_put_obj(qdict_out, name, value);
+ }
+ }
+
+ if (!qdict_size(qdict_out)) {
+ qobject_unref(qdict_out);
+ } else {
+ expansion_info->model->props = QOBJECT(qdict_out);
+ expansion_info->model->has_props = true;
+ }
+
+ object_unref(obj);
+
+ return expansion_info;
+}
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
new file mode 100644
index 000000000..e09b7e46a
--- /dev/null
+++ b/target/arm/mte_helper.c
@@ -0,0 +1,940 @@
+/*
+ * ARM v8.5-MemTag Operations
+ *
+ * Copyright (c) 2020 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "exec/ram_addr.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "qapi/error.h"
+#include "qemu/guest-random.h"
+
+
+static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
+{
+ if (exclude == 0xffff) {
+ return 0;
+ }
+ if (offset == 0) {
+ while (exclude & (1 << tag)) {
+ tag = (tag + 1) & 15;
+ }
+ } else {
+ do {
+ do {
+ tag = (tag + 1) & 15;
+ } while (exclude & (1 << tag));
+ } while (--offset > 0);
+ }
+ return tag;
+}
+
+/**
+ * allocation_tag_mem:
+ * @env: the cpu environment
+ * @ptr_mmu_idx: the addressing regime to use for the virtual address
+ * @ptr: the virtual address for which to look up tag memory
+ * @ptr_access: the access to use for the virtual address
+ * @ptr_size: the number of bytes in the normal memory access
+ * @tag_access: the access to use for the tag memory
+ * @tag_size: the number of bytes in the tag memory access
+ * @ra: the return address for exception handling
+ *
+ * Our tag memory is formatted as a sequence of little-endian nibbles.
+ * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
+ * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
+ * for the higher addr.
+ *
+ * Here, resolve the physical address from the virtual address, and return
+ * a pointer to the corresponding tag byte. Exit with exception if the
+ * virtual address is not accessible for @ptr_access.
+ *
+ * The @ptr_size and @tag_size values may not have an obvious relation
+ * due to the alignment of @ptr, and the number of tag checks required.
+ *
+ * If there is no tag storage corresponding to @ptr, return NULL.
+ */
+static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
+ uint64_t ptr, MMUAccessType ptr_access,
+ int ptr_size, MMUAccessType tag_access,
+ int tag_size, uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ uint64_t clean_ptr = useronly_clean_ptr(ptr);
+ int flags = page_get_flags(clean_ptr);
+ uint8_t *tags;
+ uintptr_t index;
+
+ if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
+ cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
+ !(flags & PAGE_VALID), ra);
+ }
+
+ /* Require both MAP_ANON and PROT_MTE for the page. */
+ if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
+ return NULL;
+ }
+
+ tags = page_get_target_data(clean_ptr);
+ if (tags == NULL) {
+ size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
+ tags = page_alloc_target_data(clean_ptr, alloc_size);
+ assert(tags != NULL);
+ }
+
+ index = extract32(ptr, LOG2_TAG_GRANULE + 1,
+ TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
+ return tags + index;
+#else
+ uintptr_t index;
+ CPUIOTLBEntry *iotlbentry;
+ int in_page, flags;
+ ram_addr_t ptr_ra;
+ hwaddr ptr_paddr, tag_paddr, xlat;
+ MemoryRegion *mr;
+ ARMASIdx tag_asi;
+ AddressSpace *tag_as;
+ void *host;
+
+ /*
+ * Probe the first byte of the virtual address. This raises an
+ * exception for inaccessible pages, and resolves the virtual address
+ * into the softmmu tlb.
+ *
+ * When RA == 0, this is for mte_probe. The page is expected to be
+ * valid. Indicate to probe_access_flags no-fault, then assert that
+ * we received a valid page.
+ */
+ flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
+ ra == 0, &host, ra);
+ assert(!(flags & TLB_INVALID_MASK));
+
+ /*
+ * Find the iotlbentry for ptr. This *must* be present in the TLB
+ * because we just found the mapping.
+ * TODO: Perhaps there should be a cputlb helper that returns a
+ * matching tlb entry + iotlb entry.
+ */
+ index = tlb_index(env, ptr_mmu_idx, ptr);
+# ifdef CONFIG_DEBUG_TCG
+ {
+ CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
+ target_ulong comparator = (ptr_access == MMU_DATA_LOAD
+ ? entry->addr_read
+ : tlb_addr_write(entry));
+ g_assert(tlb_hit(comparator, ptr));
+ }
+# endif
+ iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
+
+ /* If the virtual page MemAttr != Tagged, access unchecked. */
+ if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
+ return NULL;
+ }
+
+ /*
+ * If not backed by host ram, there is no tag storage: access unchecked.
+ * This is probably a guest os bug though, so log it.
+ */
+ if (unlikely(flags & TLB_MMIO)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
+ "but is not backed by host ram\n", ptr);
+ return NULL;
+ }
+
+ /*
+ * The Normal memory access can extend to the next page. E.g. a single
+ * 8-byte access to the last byte of a page will check only the last
+ * tag on the first page.
+ * Any page access exception has priority over tag check exception.
+ */
+ in_page = -(ptr | TARGET_PAGE_MASK);
+ if (unlikely(ptr_size > in_page)) {
+ void *ignore;
+ flags |= probe_access_flags(env, ptr + in_page, ptr_access,
+ ptr_mmu_idx, ra == 0, &ignore, ra);
+ assert(!(flags & TLB_INVALID_MASK));
+ }
+
+ /* Any debug exception has priority over a tag check exception. */
+ if (unlikely(flags & TLB_WATCHPOINT)) {
+ int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
+ assert(ra != 0);
+ cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
+ iotlbentry->attrs, wp, ra);
+ }
+
+ /*
+ * Find the physical address within the normal mem space.
+ * The memory region lookup must succeed because TLB_MMIO was
+ * not set in the cputlb lookup above.
+ */
+ mr = memory_region_from_host(host, &ptr_ra);
+ tcg_debug_assert(mr != NULL);
+ tcg_debug_assert(memory_region_is_ram(mr));
+ ptr_paddr = ptr_ra;
+ do {
+ ptr_paddr += mr->addr;
+ mr = mr->container;
+ } while (mr);
+
+ /* Convert to the physical address in tag space. */
+ tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
+
+ /* Look up the address in tag space. */
+ tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
+ tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
+ mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
+ tag_access == MMU_DATA_STORE,
+ iotlbentry->attrs);
+
+ /*
+ * Note that @mr will never be NULL. If there is nothing in the address
+ * space at @tag_paddr, the translation will return the unallocated memory
+ * region. For our purposes, the result must be ram.
+ */
+ if (unlikely(!memory_region_is_ram(mr))) {
+ /* ??? Failure is a board configuration error. */
+ qemu_log_mask(LOG_UNIMP,
+ "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
+ "Normal Memory @ 0x%" HWADDR_PRIx "\n",
+ tag_paddr, ptr_paddr);
+ return NULL;
+ }
+
+ /*
+ * Ensure the tag memory is dirty on write, for migration.
+ * Tag memory can never contain code or display memory (vga).
+ */
+ if (tag_access == MMU_DATA_STORE) {
+ ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
+ cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
+ }
+
+ return memory_region_get_ram_ptr(mr) + xlat;
+#endif
+}
+
+uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
+{
+ uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
+ int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
+ int start = extract32(env->cp15.rgsr_el1, 0, 4);
+ int seed = extract32(env->cp15.rgsr_el1, 8, 16);
+ int offset, i, rtag;
+
+ /*
+ * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
+ * deterministic algorithm. Except that with RRND==1 the kernel is
+ * not required to have set RGSR_EL1.SEED != 0, which is required for
+ * the deterministic algorithm to function. So we force a non-zero
+ * SEED for that case.
+ */
+ if (unlikely(seed == 0) && rrnd) {
+ do {
+ Error *err = NULL;
+ uint16_t two;
+
+ if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
+ /*
+ * Failed, for unknown reasons in the crypto subsystem.
+ * Best we can do is log the reason and use a constant seed.
+ */
+ qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
+ error_get_pretty(err));
+ error_free(err);
+ two = 1;
+ }
+ seed = two;
+ } while (seed == 0);
+ }
+
+ /* RandomTag */
+ for (i = offset = 0; i < 4; ++i) {
+ /* NextRandomTagBit */
+ int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
+ extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
+ seed = (top << 15) | (seed >> 1);
+ offset |= top << i;
+ }
+ rtag = choose_nonexcluded_tag(start, offset, exclude);
+ env->cp15.rgsr_el1 = rtag | (seed << 8);
+
+ return address_with_allocation_tag(rn, rtag);
+}
+
+uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
+ int32_t offset, uint32_t tag_offset)
+{
+ int start_tag = allocation_tag_from_addr(ptr);
+ uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
+ int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
+
+ return address_with_allocation_tag(ptr + offset, rtag);
+}
+
+static int load_tag1(uint64_t ptr, uint8_t *mem)
+{
+ int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
+ return extract32(*mem, ofs, 4);
+}
+
+uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ uint8_t *mem;
+ int rtag = 0;
+
+ /* Trap if accessing an invalid page. */
+ mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
+ MMU_DATA_LOAD, 1, GETPC());
+
+ /* Load if page supports tags. */
+ if (mem) {
+ rtag = load_tag1(ptr, mem);
+ }
+
+ return address_with_allocation_tag(xt, rtag);
+}
+
+static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
+{
+ if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
+ arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
+ cpu_mmu_index(env, false), ra);
+ g_assert_not_reached();
+ }
+}
+
+/* For use in a non-parallel context, store to the given nibble. */
+static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
+{
+ int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
+ *mem = deposit32(*mem, ofs, 4, tag);
+}
+
+/* For use in a parallel context, atomically store to the given nibble. */
+static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
+{
+ int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
+ uint8_t old = qatomic_read(mem);
+
+ while (1) {
+ uint8_t new = deposit32(old, ofs, 4, tag);
+ uint8_t cmp = qatomic_cmpxchg(mem, old, new);
+ if (likely(cmp == old)) {
+ return;
+ }
+ old = cmp;
+ }
+}
+
+typedef void stg_store1(uint64_t, uint8_t *, int);
+
+static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
+ uintptr_t ra, stg_store1 store1)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ uint8_t *mem;
+
+ check_tag_aligned(env, ptr, ra);
+
+ /* Trap if accessing an invalid page. */
+ mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
+ MMU_DATA_STORE, 1, ra);
+
+ /* Store if page supports tags. */
+ if (mem) {
+ store1(ptr, mem, allocation_tag_from_addr(xt));
+ }
+}
+
+void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+ do_stg(env, ptr, xt, GETPC(), store_tag1);
+}
+
+void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+ do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
+}
+
+void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+
+ check_tag_aligned(env, ptr, ra);
+ probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
+}
+
+static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
+ uintptr_t ra, stg_store1 store1)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ int tag = allocation_tag_from_addr(xt);
+ uint8_t *mem1, *mem2;
+
+ check_tag_aligned(env, ptr, ra);
+
+ /*
+ * Trap if accessing an invalid page(s).
+ * This takes priority over !allocation_tag_access_enabled.
+ */
+ if (ptr & TAG_GRANULE) {
+ /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
+ TAG_GRANULE, MMU_DATA_STORE, 1, ra);
+ mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
+ MMU_DATA_STORE, TAG_GRANULE,
+ MMU_DATA_STORE, 1, ra);
+
+ /* Store if page(s) support tags. */
+ if (mem1) {
+ store1(TAG_GRANULE, mem1, tag);
+ }
+ if (mem2) {
+ store1(0, mem2, tag);
+ }
+ } else {
+ /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
+ 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
+ if (mem1) {
+ tag |= tag << 4;
+ qatomic_set(mem1, tag);
+ }
+ }
+}
+
+void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+ do_st2g(env, ptr, xt, GETPC(), store_tag1);
+}
+
+void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
+{
+ do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
+}
+
+void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ int in_page = -(ptr | TARGET_PAGE_MASK);
+
+ check_tag_aligned(env, ptr, ra);
+
+ if (likely(in_page >= 2 * TAG_GRANULE)) {
+ probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
+ } else {
+ probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
+ probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
+ }
+}
+
+#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
+
+uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ void *tag_mem;
+
+ ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
+
+ /* Trap if accessing an invalid page. */
+ tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
+ LDGM_STGM_SIZE, MMU_DATA_LOAD,
+ LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
+
+ /* The tag is squashed to zero if the page does not support tags. */
+ if (!tag_mem) {
+ return 0;
+ }
+
+ QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
+ /*
+ * We are loading 64-bits worth of tags. The ordering of elements
+ * within the word corresponds to a 64-bit little-endian operation.
+ */
+ return ldq_le_p(tag_mem);
+}
+
+void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ void *tag_mem;
+
+ ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
+
+ /* Trap if accessing an invalid page. */
+ tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
+ LDGM_STGM_SIZE, MMU_DATA_LOAD,
+ LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
+
+ /*
+ * Tag store only happens if the page support tags,
+ * and if the OS has enabled access to the tags.
+ */
+ if (!tag_mem) {
+ return;
+ }
+
+ QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
+ /*
+ * We are storing 64-bits worth of tags. The ordering of elements
+ * within the word corresponds to a 64-bit little-endian operation.
+ */
+ stq_le_p(tag_mem, val);
+}
+
+void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
+{
+ uintptr_t ra = GETPC();
+ int mmu_idx = cpu_mmu_index(env, false);
+ int log2_dcz_bytes, log2_tag_bytes;
+ intptr_t dcz_bytes, tag_bytes;
+ uint8_t *mem;
+
+ /*
+ * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
+ * i.e. 32 bytes, which is an unreasonably small dcz anyway,
+ * to make sure that we can access one complete tag byte here.
+ */
+ log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
+ log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
+ dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
+ tag_bytes = (intptr_t)1 << log2_tag_bytes;
+ ptr &= -dcz_bytes;
+
+ mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
+ MMU_DATA_STORE, tag_bytes, ra);
+ if (mem) {
+ int tag_pair = (val & 0xf) * 0x11;
+ memset(mem, tag_pair, tag_bytes);
+ }
+}
+
+static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
+ uint64_t dirty_ptr, uintptr_t ra)
+{
+ int is_write, syn;
+
+ env->exception.vaddress = dirty_ptr;
+
+ is_write = FIELD_EX32(desc, MTEDESC, WRITE);
+ syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
+ 0x11);
+ raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
+ g_assert_not_reached();
+}
+
+static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
+ uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
+{
+ int select;
+
+ if (regime_has_2_ranges(arm_mmu_idx)) {
+ select = extract64(dirty_ptr, 55, 1);
+ } else {
+ select = 0;
+ }
+ env->cp15.tfsr_el[el] |= 1 << select;
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
+ * which then sends a SIGSEGV when the thread is next scheduled.
+ * This cpu will return to the main loop at the end of the TB,
+ * which is rather sooner than "normal". But the alternative
+ * is waiting until the next syscall.
+ */
+ qemu_cpu_kick(env_cpu(env));
+#endif
+}
+
+/* Record a tag check failure. */
+static void mte_check_fail(CPUARMState *env, uint32_t desc,
+ uint64_t dirty_ptr, uintptr_t ra)
+{
+ int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
+ int el, reg_el, tcf;
+ uint64_t sctlr;
+
+ reg_el = regime_el(env, arm_mmu_idx);
+ sctlr = env->cp15.sctlr_el[reg_el];
+
+ switch (arm_mmu_idx) {
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E20_0:
+ el = 0;
+ tcf = extract64(sctlr, 38, 2);
+ break;
+ default:
+ el = reg_el;
+ tcf = extract64(sctlr, 40, 2);
+ }
+
+ switch (tcf) {
+ case 1:
+ /* Tag check fail causes a synchronous exception. */
+ mte_sync_check_fail(env, desc, dirty_ptr, ra);
+ break;
+
+ case 0:
+ /*
+ * Tag check fail does not affect the PE.
+ * We eliminate this case by not setting MTE_ACTIVE
+ * in tb_flags, so that we never make this runtime call.
+ */
+ g_assert_not_reached();
+
+ case 2:
+ /* Tag check fail causes asynchronous flag set. */
+ mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
+ break;
+
+ case 3:
+ /*
+ * Tag check fail causes asynchronous flag set for stores, or
+ * a synchronous exception for loads.
+ */
+ if (FIELD_EX32(desc, MTEDESC, WRITE)) {
+ mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
+ } else {
+ mte_sync_check_fail(env, desc, dirty_ptr, ra);
+ }
+ break;
+ }
+}
+
+/**
+ * checkN:
+ * @tag: tag memory to test
+ * @odd: true to begin testing at tags at odd nibble
+ * @cmp: the tag to compare against
+ * @count: number of tags to test
+ *
+ * Return the number of successful tests.
+ * Thus a return value < @count indicates a failure.
+ *
+ * A note about sizes: count is expected to be small.
+ *
+ * The most common use will be LDP/STP of two integer registers,
+ * which means 16 bytes of memory touching at most 2 tags, but
+ * often the access is aligned and thus just 1 tag.
+ *
+ * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
+ * touching at most 5 tags. SVE LDR/STR (vector) with the default
+ * vector length is also 64 bytes; the maximum architectural length
+ * is 256 bytes touching at most 9 tags.
+ *
+ * The loop below uses 7 logical operations and 1 memory operation
+ * per tag pair. An implementation that loads an aligned word and
+ * uses masking to ignore adjacent tags requires 18 logical operations
+ * and thus does not begin to pay off until 6 tags.
+ * Which, according to the survey above, is unlikely to be common.
+ */
+static int checkN(uint8_t *mem, int odd, int cmp, int count)
+{
+ int n = 0, diff;
+
+ /* Replicate the test tag and compare. */
+ cmp *= 0x11;
+ diff = *mem++ ^ cmp;
+
+ if (odd) {
+ goto start_odd;
+ }
+
+ while (1) {
+ /* Test even tag. */
+ if (unlikely((diff) & 0x0f)) {
+ break;
+ }
+ if (++n == count) {
+ break;
+ }
+
+ start_odd:
+ /* Test odd tag. */
+ if (unlikely((diff) & 0xf0)) {
+ break;
+ }
+ if (++n == count) {
+ break;
+ }
+
+ diff = *mem++ ^ cmp;
+ }
+ return n;
+}
+
+/**
+ * mte_probe_int() - helper for mte_probe and mte_check
+ * @env: CPU environment
+ * @desc: MTEDESC descriptor
+ * @ptr: virtual address of the base of the access
+ * @fault: return virtual address of the first check failure
+ *
+ * Internal routine for both mte_probe and mte_check.
+ * Return zero on failure, filling in *fault.
+ * Return negative on trivial success for tbi disabled.
+ * Return positive on success with tbi enabled.
+ */
+static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
+ uintptr_t ra, uint64_t *fault)
+{
+ int mmu_idx, ptr_tag, bit55;
+ uint64_t ptr_last, prev_page, next_page;
+ uint64_t tag_first, tag_last;
+ uint64_t tag_byte_first, tag_byte_last;
+ uint32_t sizem1, tag_count, tag_size, n, c;
+ uint8_t *mem1, *mem2;
+ MMUAccessType type;
+
+ bit55 = extract64(ptr, 55, 1);
+ *fault = ptr;
+
+ /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
+ if (unlikely(!tbi_check(desc, bit55))) {
+ return -1;
+ }
+
+ ptr_tag = allocation_tag_from_addr(ptr);
+
+ if (tcma_check(desc, bit55, ptr_tag)) {
+ return 1;
+ }
+
+ mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
+
+ /* Find the addr of the end of the access */
+ ptr_last = ptr + sizem1;
+
+ /* Round the bounds to the tag granule, and compute the number of tags. */
+ tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
+ tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
+ tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
+
+ /* Round the bounds to twice the tag granule, and compute the bytes. */
+ tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
+ tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
+
+ /* Locate the page boundaries. */
+ prev_page = ptr & TARGET_PAGE_MASK;
+ next_page = prev_page + TARGET_PAGE_SIZE;
+
+ if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
+ /* Memory access stays on one page. */
+ tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
+ MMU_DATA_LOAD, tag_size, ra);
+ if (!mem1) {
+ return 1;
+ }
+ /* Perform all of the comparisons. */
+ n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
+ } else {
+ /* Memory access crosses to next page. */
+ tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
+ mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
+ MMU_DATA_LOAD, tag_size, ra);
+
+ tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
+ mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
+ ptr_last - next_page + 1,
+ MMU_DATA_LOAD, tag_size, ra);
+
+ /*
+ * Perform all of the comparisons.
+ * Note the possible but unlikely case of the operation spanning
+ * two pages that do not both have tagging enabled.
+ */
+ n = c = (next_page - tag_first) / TAG_GRANULE;
+ if (mem1) {
+ n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
+ }
+ if (n == c) {
+ if (!mem2) {
+ return 1;
+ }
+ n += checkN(mem2, 0, ptr_tag, tag_count - c);
+ }
+ }
+
+ if (likely(n == tag_count)) {
+ return 1;
+ }
+
+ /*
+ * If we failed, we know which granule. For the first granule, the
+ * failure address is @ptr, the first byte accessed. Otherwise the
+ * failure address is the first byte of the nth granule.
+ */
+ if (n > 0) {
+ *fault = tag_first + n * TAG_GRANULE;
+ }
+ return 0;
+}
+
+uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
+{
+ uint64_t fault;
+ int ret = mte_probe_int(env, desc, ptr, ra, &fault);
+
+ if (unlikely(ret == 0)) {
+ mte_check_fail(env, desc, fault, ra);
+ } else if (ret < 0) {
+ return ptr;
+ }
+ return useronly_clean_ptr(ptr);
+}
+
+uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+ return mte_check(env, desc, ptr, GETPC());
+}
+
+/*
+ * No-fault version of mte_check, to be used by SVE for MemSingleNF.
+ * Returns false if the access is Checked and the check failed. This
+ * is only intended to probe the tag -- the validity of the page must
+ * be checked beforehand.
+ */
+bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+ uint64_t fault;
+ int ret = mte_probe_int(env, desc, ptr, 0, &fault);
+
+ return ret != 0;
+}
+
+/*
+ * Perform an MTE checked access for DC_ZVA.
+ */
+uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+ uintptr_t ra = GETPC();
+ int log2_dcz_bytes, log2_tag_bytes;
+ int mmu_idx, bit55;
+ intptr_t dcz_bytes, tag_bytes, i;
+ void *mem;
+ uint64_t ptr_tag, mem_tag, align_ptr;
+
+ bit55 = extract64(ptr, 55, 1);
+
+ /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
+ if (unlikely(!tbi_check(desc, bit55))) {
+ return ptr;
+ }
+
+ ptr_tag = allocation_tag_from_addr(ptr);
+
+ if (tcma_check(desc, bit55, ptr_tag)) {
+ goto done;
+ }
+
+ /*
+ * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
+ * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
+ * sure that we can access one complete tag byte here.
+ */
+ log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
+ log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
+ dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
+ tag_bytes = (intptr_t)1 << log2_tag_bytes;
+ align_ptr = ptr & -dcz_bytes;
+
+ /*
+ * Trap if accessing an invalid page. DC_ZVA requires that we supply
+ * the original pointer for an invalid page. But watchpoints require
+ * that we probe the actual space. So do both.
+ */
+ mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ (void) probe_write(env, ptr, 1, mmu_idx, ra);
+ mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
+ dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
+ if (!mem) {
+ goto done;
+ }
+
+ /*
+ * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
+ * it is quite easy to perform all of the comparisons at once without
+ * any extra masking.
+ *
+ * The most common zva block size is 64; some of the thunderx cpus use
+ * a block size of 128. For user-only, aarch64_max_initfn will set the
+ * block size to 512. Fill out the other cases for future-proofing.
+ *
+ * In order to be able to find the first miscompare later, we want the
+ * tag bytes to be in little-endian order.
+ */
+ switch (log2_tag_bytes) {
+ case 0: /* zva_blocksize 32 */
+ mem_tag = *(uint8_t *)mem;
+ ptr_tag *= 0x11u;
+ break;
+ case 1: /* zva_blocksize 64 */
+ mem_tag = cpu_to_le16(*(uint16_t *)mem);
+ ptr_tag *= 0x1111u;
+ break;
+ case 2: /* zva_blocksize 128 */
+ mem_tag = cpu_to_le32(*(uint32_t *)mem);
+ ptr_tag *= 0x11111111u;
+ break;
+ case 3: /* zva_blocksize 256 */
+ mem_tag = cpu_to_le64(*(uint64_t *)mem);
+ ptr_tag *= 0x1111111111111111ull;
+ break;
+
+ default: /* zva_blocksize 512, 1024, 2048 */
+ ptr_tag *= 0x1111111111111111ull;
+ i = 0;
+ do {
+ mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
+ if (unlikely(mem_tag != ptr_tag)) {
+ goto fail;
+ }
+ i += 8;
+ align_ptr += 16 * TAG_GRANULE;
+ } while (i < tag_bytes);
+ goto done;
+ }
+
+ if (likely(mem_tag == ptr_tag)) {
+ goto done;
+ }
+
+ fail:
+ /* Locate the first nibble that differs. */
+ i = ctz64(mem_tag ^ ptr_tag) >> 4;
+ mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
+
+ done:
+ return useronly_clean_ptr(ptr);
+}
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
new file mode 100644
index 000000000..14a4f3980
--- /dev/null
+++ b/target/arm/mve.decode
@@ -0,0 +1,832 @@
+# M-profile MVE instruction descriptions
+#
+# Copyright (c) 2021 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+%qd 22:1 13:3
+%qm 5:1 1:3
+%qn 7:1 17:3
+
+# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit
+%size_28 28:1 !function=plus_1
+
+# 2 operand fp insns have size in bit 20: 1 for 16 bit, 0 for 32 bit,
+# like Neon FP insns.
+%2op_fp_size 20:1 !function=neon_3same_fp_size
+# VCADD is an exception, where bit 20 is 0 for 16 bit and 1 for 32 bit
+%2op_fp_size_rev 20:1 !function=plus_1
+# FP scalars have size in bit 28, 1 for 16 bit, 0 for 32 bit
+%2op_fp_scalar_size 28:1 !function=neon_3same_fp_size
+
+# 1imm format immediate
+%imm_28_16_0 28:1 16:3 0:4
+
+&vldr_vstr rn qd imm p a w size l u
+&1op qd qm size
+&2op qd qm qn size
+&2scalar qd qn rm size
+&1imm qd imm cmode op
+&2shift qd qm shift size
+&vidup qd rn size imm
+&viwdup qd rn rm size imm
+&vcmp qm qn size mask
+&vcmp_scalar qn rm size mask
+&shl_scalar qda rm size
+&vmaxv qm rda size
+&vabav qn qm rda size
+&vldst_sg qd qm rn size msize os
+&vldst_sg_imm qd qm a w imm
+&vldst_il qd rn size pat w
+
+# scatter-gather memory size is in bits 6:4
+%sg_msize 6:1 4:1
+
+@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
+# Note that both Rn and Qd are 3 bits only (no D bit)
+@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr
+
+@vldst_sg .... .... .... rn:4 .... ... size:2 ... ... os:1 &vldst_sg \
+ qd=%qd qm=%qm msize=%sg_msize
+
+# Qm is in the fields usually labeled Qn
+@vldst_sg_imm .... .... a:1 . w:1 . .... .... .... . imm:7 &vldst_sg_imm \
+ qd=%qd qm=%qn
+
+# Deinterleaving load/interleaving store
+@vldst_il .... .... .. w:1 . rn:4 .... ... size:2 pat:2 ..... &vldst_il \
+ qd=%qd
+
+@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm
+@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0
+@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn
+@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0
+@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
+ size=%size_28
+@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0
+
+# The _rev suffix indicates that Vn and Vm are reversed. This is
+# the case for shifts. In the Arm ARM these insns are documented
+# with the Vm and Vn fields in their usual places, but in the
+# assembly the operands are listed "backwards", ie in the order
+# Qd, Qm, Qn where other insns use Qd, Qn, Qm. For QEMU we choose
+# to consider Vm and Vn as being in different fields in the insn.
+# This gives us consistency with A64 and Neon.
+@2op_rev .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qn qn=%qm
+
+@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
+@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn
+
+@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
+@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
+@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2
+
+@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0
+@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1
+# VSHLL encoding T2 where shift == esize
+@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \
+ qd=%qd qm=%qm size=0 shift=8
+@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \
+ qd=%qd qm=%qm size=1 shift=16
+
+# Right shifts are encoded as N - shift, where N is the element size in bits.
+%rshift_i5 16:5 !function=rsub_32
+%rshift_i4 16:4 !function=rsub_16
+%rshift_i3 16:3 !function=rsub_8
+
+@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \
+ size=0 shift=%rshift_i3
+@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \
+ size=1 shift=%rshift_i4
+@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \
+ size=2 shift=%rshift_i5
+
+@shl_scalar .... .... .... size:2 .. .... .... .... rm:4 &shl_scalar qda=%qd
+
+# Vector comparison; 4-bit Qm but 3-bit Qn
+%mask_22_13 22:1 13:3
+@vcmp .... .... .. size:2 qn:3 . .... .... .... .... &vcmp qm=%qm mask=%mask_22_13
+@vcmp_scalar .... .... .. size:2 qn:3 . .... .... .... rm:4 &vcmp_scalar \
+ mask=%mask_22_13
+
+@vcmp_fp .... .... .... qn:3 . .... .... .... .... &vcmp \
+ qm=%qm size=%2op_fp_scalar_size mask=%mask_22_13
+
+# Bit 28 is a 2op_fp_scalar_size bit, but we do not decode it in this
+# format to avoid complicated overlapping-instruction-groups
+@vcmp_fp_scalar .... .... .... qn:3 . .... .... .... rm:4 &vcmp_scalar \
+ mask=%mask_22_13
+
+@vmaxv .... .... .... size:2 .. rda:4 .... .... .... &vmaxv qm=%qm
+
+@2op_fp .... .... .... .... .... .... .... .... &2op \
+ qd=%qd qn=%qn qm=%qm size=%2op_fp_size
+
+@2op_fp_size_rev .... .... .... .... .... .... .... .... &2op \
+ qd=%qd qn=%qn qm=%qm size=%2op_fp_size_rev
+
+# 2-operand, but Qd and Qn share a field. Size is in bit 28, but we
+# don't decode it in this format
+@vmaxnma .... .... .... .... .... .... .... .... &2op \
+ qd=%qd qn=%qd qm=%qm
+
+# Here also we don't decode the bit 28 size in the format to avoid
+# awkward nested overlap groups
+@vmaxnmv .... .... .... .... rda:4 .... .... .... &vmaxv qm=%qm
+
+@2op_fp_scalar .... .... .... .... .... .... .... rm:4 &2scalar \
+ qd=%qd qn=%qn size=%2op_fp_scalar_size
+
+# Vector loads and stores
+
+# Widening loads and narrowing stores:
+# for these P=0 W=0 is 'related encoding'; sz=11 is 'related encoding'
+# This means we need to expand out to multiple patterns for P, W, SZ.
+# For stores the U bit must be 0 but we catch that in the trans_ function.
+# The naming scheme here is "VLDSTB_H == in-memory byte load/store to/from
+# signed halfword element in register", etc.
+VLDSTB_H 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 01 ....... @vldst_wn \
+ p=0 w=1 size=1
+VLDSTB_H 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 01 ....... @vldst_wn \
+ p=1 size=1
+VLDSTB_W 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 10 ....... @vldst_wn \
+ p=0 w=1 size=2
+VLDSTB_W 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 10 ....... @vldst_wn \
+ p=1 size=2
+VLDSTH_W 111 . 110 0 a:1 0 1 . 1 ... ... 0 111 10 ....... @vldst_wn \
+ p=0 w=1 size=2
+VLDSTH_W 111 . 110 1 a:1 0 w:1 . 1 ... ... 0 111 10 ....... @vldst_wn \
+ p=1 size=2
+
+# Non-widening loads/stores (P=0 W=0 is 'related encoding')
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111100 ....... @vldr_vstr \
+ size=0 p=0 w=1
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111101 ....... @vldr_vstr \
+ size=1 p=0 w=1
+VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111110 ....... @vldr_vstr \
+ size=2 p=0 w=1
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111100 ....... @vldr_vstr \
+ size=0 p=1
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \
+ size=1 p=1
+VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \
+ size=2 p=1
+
+# gather loads/scatter stores
+VLDR_S_sg 111 0 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg
+VLDR_U_sg 111 1 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg
+VSTR_sg 111 0 1100 1 . 00 .... ... 0 111 . .... .... @vldst_sg
+
+VLDRW_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1110 .... .... @vldst_sg_imm
+VLDRD_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1111 .... .... @vldst_sg_imm
+VSTRW_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1110 .... .... @vldst_sg_imm
+VSTRD_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1111 .... .... @vldst_sg_imm
+
+# deinterleaving loads/interleaving stores
+VLD2 1111 1100 1 .. 1 .... ... 1 111 .. .. 00000 @vldst_il
+VLD4 1111 1100 1 .. 1 .... ... 1 111 .. .. 00001 @vldst_il
+VST2 1111 1100 1 .. 0 .... ... 1 111 .. .. 00000 @vldst_il
+VST4 1111 1100 1 .. 0 .... ... 1 111 .. .. 00001 @vldst_il
+
+# Moves between 2 32-bit vector lanes and 2 general purpose registers
+VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
+VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd
+
+# Vector 2-op
+VAND 1110 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
+VBIC 1110 1111 0 . 01 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
+VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
+VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
+VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz
+
+VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
+VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op
+VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op
+
+# The VSHLL T2 encoding is not a @2op pattern, but is here because it
+# overlaps what would be size=0b11 VMULH/VRMULH
+{
+ VCVTB_SH 111 0 1110 0 . 11 1111 ... 0 1110 0 0 . 0 ... 1 @1op_nosz
+
+ VMAXNMA 111 0 1110 0 . 11 1111 ... 0 1110 1 0 . 0 ... 1 @vmaxnma size=2
+
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
+ VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
+
+ VQMOVUNB 111 0 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op
+ VQMOVN_BS 111 0 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op
+
+ VMAXA 111 0 1110 0 . 11 .. 11 ... 0 1110 1 0 . 0 ... 1 @1op
+
+ VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
+}
+
+{
+ VCVTB_HS 111 1 1110 0 . 11 1111 ... 0 1110 0 0 . 0 ... 1 @1op_nosz
+
+ VMAXNMA 111 1 1110 0 . 11 1111 ... 0 1110 1 0 . 0 ... 1 @vmaxnma size=1
+
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b
+ VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h
+
+ VMOVNB 111 1 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op
+ VQMOVN_BU 111 1 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op
+
+ VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op
+}
+
+{
+ VCVTT_SH 111 0 1110 0 . 11 1111 ... 1 1110 0 0 . 0 ... 1 @1op_nosz
+
+ VMINNMA 111 0 1110 0 . 11 1111 ... 1 1110 1 0 . 0 ... 1 @vmaxnma size=2
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
+ VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
+
+ VQMOVUNT 111 0 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op
+ VQMOVN_TS 111 0 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op
+
+ VMINA 111 0 1110 0 . 11 .. 11 ... 1 1110 1 0 . 0 ... 1 @1op
+
+ VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
+}
+
+{
+ VCVTT_HS 111 1 1110 0 . 11 1111 ... 1 1110 0 0 . 0 ... 1 @1op_nosz
+
+ VMINNMA 111 1 1110 0 . 11 1111 ... 1 1110 1 0 . 0 ... 1 @vmaxnma size=1
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b
+ VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h
+
+ VMOVNT 111 1 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op
+ VQMOVN_TU 111 1 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op
+
+ VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op
+}
+
+VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
+VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op
+VMIN_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
+VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op
+
+VABD_S 111 0 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
+VABD_U 111 1 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op
+
+VHADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
+VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op
+VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
+VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op
+
+{
+ VMULLP_B 111 . 1110 0 . 11 ... 1 ... 0 1110 . 0 . 0 ... 0 @2op_sz28
+ VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
+ VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op
+}
+{
+ VMULLP_T 111 . 1110 0 . 11 ... 1 ... 1 1110 . 0 . 0 ... 0 @2op_sz28
+ VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
+ VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op
+}
+
+VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
+VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op
+
+VQADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
+VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op
+VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
+VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op
+
+VSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
+VSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev
+
+VRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev
+VRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev
+
+VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
+VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev
+
+VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
+VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev
+
+{
+ VCMUL0 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 0 @2op_sz28
+ VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op
+ VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op
+}
+
+{
+ VCMUL180 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 0 @2op_sz28
+ VQDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
+ VQDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op
+}
+
+{
+ VCMUL90 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 1 @2op_sz28
+ VQRDMLADH 111 0 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
+ VQRDMLSDH 111 1 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op
+}
+
+{
+ VCMUL270 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 1 @2op_sz28
+ VQRDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
+ VQRDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op
+}
+
+VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28
+VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
+
+VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
+VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
+
+{
+ VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
+ VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
+ VHCADD90 1110 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op
+ VHCADD270 1110 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op
+}
+
+{
+ VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz
+ VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz
+ VCADD90 1111 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op
+ VCADD270 1111 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op
+}
+
+# Vector miscellaneous
+
+VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
+VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op
+
+VREV16 1111 1111 1 . 11 .. 00 ... 0 0001 01 . 0 ... 0 @1op
+VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op
+VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op
+
+VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz
+
+VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op
+VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op
+VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op
+VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op
+
+VQABS 1111 1111 1 . 11 .. 00 ... 0 0111 01 . 0 ... 0 @1op
+VQNEG 1111 1111 1 . 11 .. 00 ... 0 0111 11 . 0 ... 0 @1op
+
+&vdup qd rt size
+# Qd is in the fields usually named Qn
+@vdup .... .... . . .. ... . rt:4 .... . . . . .... qd=%qn &vdup
+
+# B and E bits encode size, which we decode here to the usual size values
+VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
+VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
+VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
+
+# Incrementing and decrementing dup
+
+# VIDUP, VDDUP format immediate: 1 << (immh:imml)
+%imm_vidup 7:1 0:1 !function=vidup_imm
+
+# VIDUP, VDDUP registers: Rm bits [3:1] from insn, bit 0 is 1;
+# Rn bits [3:1] from insn, bit 0 is 0
+%vidup_rm 1:3 !function=times_2_plus_1
+%vidup_rn 17:3 !function=times_2
+
+@vidup .... .... . . size:2 .... .... .... .... .... \
+ qd=%qd imm=%imm_vidup rn=%vidup_rn &vidup
+@viwdup .... .... . . size:2 .... .... .... .... .... \
+ qd=%qd imm=%imm_vidup rm=%vidup_rm rn=%vidup_rn &viwdup
+{
+ VIDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 111 . @vidup
+ VIWDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 ... . @viwdup
+}
+{
+ VCMPGT_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 0110 .... @vcmp_fp_scalar size=2
+ VCMPLE_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 1110 .... @vcmp_fp_scalar size=2
+ VDDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 111 . @vidup
+ VDWDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 ... . @viwdup
+}
+
+# multiply-add long dual accumulate
+# rdahi: bits [3:1] from insn, bit 0 is 1
+# rdalo: bits [3:1] from insn, bit 0 is 0
+%rdahi 20:3 !function=times_2_plus_1
+%rdalo 13:3 !function=times_2
+# size bit is 0 for 16 bit, 1 for 32 bit
+%size_16 16:1 !function=plus_1
+
+&vmlaldav rdahi rdalo size qn qm x a
+&vmladav rda size qn qm x a
+
+@vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
+ qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav
+@vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \
+ qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav
+@vmladav .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
+ qn=%qn rda=%rdalo size=%size_16 &vmladav
+@vmladav_nosz .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \
+ qn=%qn rda=%rdalo size=0 &vmladav
+
+{
+ VMLADAV_S 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav
+ VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+}
+{
+ VMLADAV_U 1111 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav
+ VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav
+}
+
+{
+ VMLSDAV 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 1 @vmladav
+ VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav
+}
+
+{
+ VMLSDAV 1111 1110 1111 ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz
+ VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz
+}
+
+VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
+VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz
+
+{
+ [
+ VMAXNMAV 1110 1110 1110 11 00 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=2
+ VMINNMAV 1110 1110 1110 11 00 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=2
+ VMAXNMV 1110 1110 1110 11 10 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=2
+ VMINNMV 1110 1110 1110 11 10 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=2
+ ]
+ [
+ VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
+ VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
+ VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv
+ VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv
+ ]
+ VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
+ VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
+}
+
+{
+ [
+ VMAXNMAV 1111 1110 1110 11 00 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=1
+ VMINNMAV 1111 1110 1110 11 00 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=1
+ VMAXNMV 1111 1110 1110 11 10 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=1
+ VMINNMV 1111 1110 1110 11 10 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=1
+ ]
+ [
+ VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv
+ VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv
+ ]
+ VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz
+ VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz
+}
+
+# Scalar operations
+
+{
+ VCMPEQ_fp_scalar 1110 1110 0 . 11 ... 1 ... 0 1111 0100 .... @vcmp_fp_scalar size=2
+ VCMPNE_fp_scalar 1110 1110 0 . 11 ... 1 ... 0 1111 1100 .... @vcmp_fp_scalar size=2
+ VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar
+}
+
+{
+ VCMPLT_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 1100 .... @vcmp_fp_scalar size=2
+ VCMPGE_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 0100 .... @vcmp_fp_scalar size=2
+ VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar
+}
+
+{
+ VSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar
+ VRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar
+ VQSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar
+ VQRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar
+ VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
+}
+
+{
+ VSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar
+ VRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar
+ VQSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar
+ VQRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar
+ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
+}
+
+{
+ VADD_fp_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 100 .... @2op_fp_scalar
+ VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
+ VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar
+}
+
+{
+ VSUB_fp_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 100 .... @2op_fp_scalar
+ VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
+ VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar
+}
+
+{
+ VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
+ VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar
+ VQDMULLB_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 110 .... @2scalar_nosz \
+ size=%size_28
+}
+
+{
+ VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
+ VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar
+ VQDMULLT_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 110 .... @2scalar_nosz \
+ size=%size_28
+}
+
+{
+ VMUL_fp_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 110 .... @2op_fp_scalar
+ VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
+ VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
+}
+
+{
+ VFMA_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 100 .... @2op_fp_scalar
+ # The U bit (28) is don't-care because it does not affect the result
+ VMLA 111 - 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar
+}
+
+{
+ VFMAS_scalar 111 . 1110 0 . 11 ... 1 ... 1 1110 . 100 .... @2op_fp_scalar
+ # The U bit (28) is don't-care because it does not affect the result
+ VMLAS 111 - 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
+}
+
+VQRDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 100 .... @2scalar
+VQRDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 100 .... @2scalar
+VQDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 110 .... @2scalar
+VQDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 110 .... @2scalar
+
+# Vector add across vector
+{
+ VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
+ VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \
+ rdahi=%rdahi rdalo=%rdalo
+}
+
+@vabav .... .... .. size:2 .... rda:4 .... .... .... &vabav qn=%qn qm=%qm
+
+VABAV_S 111 0 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav
+VABAV_U 111 1 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav
+
+# Logical immediate operations (1 reg and modified-immediate)
+
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
+# not in a way we can conveniently represent in decodetree without
+# a lot of repetition:
+# VORR: op=0, (cmode & 1) && cmode < 12
+# VBIC: op=1, (cmode & 1) && cmode < 12
+# VMOV: everything else
+# So we have a single decode line and check the cmode/op in the
+# trans function.
+Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm
+
+# Shifts by immediate
+
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
+VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
+
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
+VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
+
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h
+VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w
+
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h
+VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w
+
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
+VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
+
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h
+VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w
+
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
+VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
+
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h
+VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w
+
+# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file
+# Note that VMOVL is encoded as "VSHLL with a zero shift count"; we
+# implement it that way rather than special-casing it in the decode.
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
+VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
+
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b
+VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h
+
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
+VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
+
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
+VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
+
+# Shift-and-insert
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
+VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
+
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
+VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w
+
+# Narrowing shifts (which only support b and h sizes)
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
+VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
+VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
+
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b
+VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b
+VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h
+
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
+VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
+VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b
+VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b
+VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h
+
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
+VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
+VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
+
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
+VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
+VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b
+VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b
+VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h
+
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b
+VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
+VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
+
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
+
+# Comparisons. We expand out the conditions which are split across
+# encodings T1, T2, T3 and the fc bits. These include VPT, which is
+# effectively "VCMP then VPST". A plain "VCMP" has a mask field of zero.
+{
+ VCMPEQ_fp 111 . 1110 0 . 11 ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp_fp
+ VCMPEQ 111 1 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp
+}
+
+{
+ VCMPNE_fp 111 . 1110 0 . 11 ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp_fp
+ VCMPNE 111 1 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp
+}
+
+{
+ VCMPGE_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp_fp
+ VCMPGE 111 1 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp
+}
+
+{
+ VCMPLT_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp_fp
+ VCMPLT 111 1 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp
+}
+
+{
+ VCMPGT_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp_fp
+ VCMPGT 111 1 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp
+}
+
+{
+ VCMPLE_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp_fp
+ VCMPLE 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp
+}
+
+{
+ VPSEL 1111 1110 0 . 11 ... 1 ... 0 1111 . 0 . 0 ... 1 @2op_nosz
+ VCMPCS 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 1 @vcmp
+ VCMPHI 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 1 @vcmp
+}
+
+{
+ VPNOT 1111 1110 0 0 11 000 1 000 0 1111 0100 1101
+ VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13
+ VCMPEQ_fp_scalar 1111 1110 0 . 11 ... 1 ... 0 1111 0100 .... @vcmp_fp_scalar size=1
+ VCMPEQ_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0100 .... @vcmp_scalar
+}
+
+{
+ VCMPNE_fp_scalar 1111 1110 0 . 11 ... 1 ... 0 1111 1100 .... @vcmp_fp_scalar size=1
+ VCMPNE_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1100 .... @vcmp_scalar
+}
+
+{
+ VCMPGT_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 0110 .... @vcmp_fp_scalar size=1
+ VCMPGT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0110 .... @vcmp_scalar
+}
+
+{
+ VCMPLE_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 1110 .... @vcmp_fp_scalar size=1
+ VCMPLE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1110 .... @vcmp_scalar
+}
+
+{
+ VCMPGE_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 0100 .... @vcmp_fp_scalar size=1
+ VCMPGE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0100 .... @vcmp_scalar
+}
+{
+ VCMPLT_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 1100 .... @vcmp_fp_scalar size=1
+ VCMPLT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1100 .... @vcmp_scalar
+}
+
+VCMPCS_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0 1 1 0 .... @vcmp_scalar
+VCMPHI_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1 1 1 0 .... @vcmp_scalar
+
+# 2-operand FP
+VADD_fp 1110 1111 0 . 0 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp
+VSUB_fp 1110 1111 0 . 1 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp
+VMUL_fp 1111 1111 0 . 0 . ... 0 ... 0 1101 . 1 . 1 ... 0 @2op_fp
+VABD_fp 1111 1111 0 . 1 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp
+
+VMAXNM 1111 1111 0 . 0 . ... 0 ... 0 1111 . 1 . 1 ... 0 @2op_fp
+VMINNM 1111 1111 0 . 1 . ... 0 ... 0 1111 . 1 . 1 ... 0 @2op_fp
+
+VCADD90_fp 1111 1100 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev
+VCADD270_fp 1111 1101 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev
+
+VFMA 1110 1111 0 . 0 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp
+VFMS 1110 1111 0 . 1 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp
+
+VCMLA0 1111 110 00 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev
+VCMLA90 1111 110 01 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev
+VCMLA180 1111 110 10 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev
+VCMLA270 1111 110 11 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev
+
+# floating-point <-> fixed-point conversions. Naming convention:
+# VCVT_<from><to>, S = signed int, U = unsigned int, H = halfprec, F = singleprec
+@vcvt .... .... .. 1 ..... .... .. 1 . .... .... &2shift \
+ qd=%qd qm=%qm shift=%rshift_i5 size=2
+@vcvt_f16 .... .... .. 11 .... .... .. 0 . .... .... &2shift \
+ qd=%qd qm=%qm shift=%rshift_i4 size=1
+
+VCVT_SH_fixed 1110 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt_f16
+VCVT_UH_fixed 1111 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt_f16
+
+VCVT_HS_fixed 1110 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt_f16
+VCVT_HU_fixed 1111 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt_f16
+
+VCVT_SF_fixed 1110 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt
+VCVT_UF_fixed 1111 1111 1 . ...... ... 0 11 . 0 01 . 1 ... 0 @vcvt
+
+VCVT_FS_fixed 1110 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt
+VCVT_FU_fixed 1111 1111 1 . ...... ... 0 11 . 1 01 . 1 ... 0 @vcvt
+
+# VCVT between floating point and integer (halfprec and single);
+# VCVT_<from><to>, S = signed int, U = unsigned int, F = float
+VCVT_SF 1111 1111 1 . 11 .. 11 ... 0 011 00 1 . 0 ... 0 @1op
+VCVT_UF 1111 1111 1 . 11 .. 11 ... 0 011 01 1 . 0 ... 0 @1op
+VCVT_FS 1111 1111 1 . 11 .. 11 ... 0 011 10 1 . 0 ... 0 @1op
+VCVT_FU 1111 1111 1 . 11 .. 11 ... 0 011 11 1 . 0 ... 0 @1op
+
+# VCVT from floating point to integer with specified rounding mode
+VCVTAS 1111 1111 1 . 11 .. 11 ... 000 00 0 1 . 0 ... 0 @1op
+VCVTAU 1111 1111 1 . 11 .. 11 ... 000 00 1 1 . 0 ... 0 @1op
+VCVTNS 1111 1111 1 . 11 .. 11 ... 000 01 0 1 . 0 ... 0 @1op
+VCVTNU 1111 1111 1 . 11 .. 11 ... 000 01 1 1 . 0 ... 0 @1op
+VCVTPS 1111 1111 1 . 11 .. 11 ... 000 10 0 1 . 0 ... 0 @1op
+VCVTPU 1111 1111 1 . 11 .. 11 ... 000 10 1 1 . 0 ... 0 @1op
+VCVTMS 1111 1111 1 . 11 .. 11 ... 000 11 0 1 . 0 ... 0 @1op
+VCVTMU 1111 1111 1 . 11 .. 11 ... 000 11 1 1 . 0 ... 0 @1op
+
+VRINTN 1111 1111 1 . 11 .. 10 ... 001 000 1 . 0 ... 0 @1op
+VRINTX 1111 1111 1 . 11 .. 10 ... 001 001 1 . 0 ... 0 @1op
+VRINTA 1111 1111 1 . 11 .. 10 ... 001 010 1 . 0 ... 0 @1op
+VRINTZ 1111 1111 1 . 11 .. 10 ... 001 011 1 . 0 ... 0 @1op
+VRINTM 1111 1111 1 . 11 .. 10 ... 001 101 1 . 0 ... 0 @1op
+VRINTP 1111 1111 1 . 11 .. 10 ... 001 111 1 . 0 ... 0 @1op
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
new file mode 100644
index 000000000..846962bf4
--- /dev/null
+++ b/target/arm/mve_helper.c
@@ -0,0 +1,3450 @@
+/*
+ * M-profile MVE Operations
+ *
+ * Copyright (c) 2021 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "vec_internal.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg.h"
+#include "fpu/softfloat.h"
+
+static uint16_t mve_eci_mask(CPUARMState *env)
+{
+ /*
+ * Return the mask of which elements in the MVE vector correspond
+ * to beats being executed. The mask has 1 bits for executed lanes
+ * and 0 bits where ECI says this beat was already executed.
+ */
+ int eci;
+
+ if ((env->condexec_bits & 0xf) != 0) {
+ return 0xffff;
+ }
+
+ eci = env->condexec_bits >> 4;
+ switch (eci) {
+ case ECI_NONE:
+ return 0xffff;
+ case ECI_A0:
+ return 0xfff0;
+ case ECI_A0A1:
+ return 0xff00;
+ case ECI_A0A1A2:
+ case ECI_A0A1A2B0:
+ return 0xf000;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint16_t mve_element_mask(CPUARMState *env)
+{
+ /*
+ * Return the mask of which elements in the MVE vector should be
+ * updated. This is a combination of multiple things:
+ * (1) by default, we update every lane in the vector
+ * (2) VPT predication stores its state in the VPR register;
+ * (3) low-overhead-branch tail predication will mask out part
+ * the vector on the final iteration of the loop
+ * (4) if EPSR.ECI is set then we must execute only some beats
+ * of the insn
+ * We combine all these into a 16-bit result with the same semantics
+ * as VPR.P0: 0 to mask the lane, 1 if it is active.
+ * 8-bit vector ops will look at all bits of the result;
+ * 16-bit ops will look at bits 0, 2, 4, ...;
+ * 32-bit ops will look at bits 0, 4, 8 and 12.
+ * Compare pseudocode GetCurInstrBeat(), though that only returns
+ * the 4-bit slice of the mask corresponding to a single beat.
+ */
+ uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
+
+ if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
+ mask |= 0xff;
+ }
+ if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
+ mask |= 0xff00;
+ }
+
+ if (env->v7m.ltpsize < 4 &&
+ env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
+ /*
+ * Tail predication active, and this is the last loop iteration.
+ * The element size is (1 << ltpsize), and we only want to process
+ * loopcount elements, so we want to retain the least significant
+ * (loopcount * esize) predicate bits and zero out bits above that.
+ */
+ int masklen = env->regs[14] << env->v7m.ltpsize;
+ assert(masklen <= 16);
+ uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
+ mask &= ltpmask;
+ }
+
+ /*
+ * ECI bits indicate which beats are already executed;
+ * we handle this by effectively predicating them out.
+ */
+ mask &= mve_eci_mask(env);
+ return mask;
+}
+
+static void mve_advance_vpt(CPUARMState *env)
+{
+ /* Advance the VPT and ECI state if necessary */
+ uint32_t vpr = env->v7m.vpr;
+ unsigned mask01, mask23;
+ uint16_t inv_mask;
+ uint16_t eci_mask = mve_eci_mask(env);
+
+ if ((env->condexec_bits & 0xf) == 0) {
+ env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
+ (ECI_A0 << 4) : (ECI_NONE << 4);
+ }
+
+ if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
+ /* VPT not enabled, nothing to do */
+ return;
+ }
+
+ /* Invert P0 bits if needed, but only for beats we actually executed */
+ mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
+ mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
+ /* Start by assuming we invert all bits corresponding to executed beats */
+ inv_mask = eci_mask;
+ if (mask01 <= 8) {
+ /* MASK01 says don't invert low half of P0 */
+ inv_mask &= ~0xff;
+ }
+ if (mask23 <= 8) {
+ /* MASK23 says don't invert high half of P0 */
+ inv_mask &= ~0xff00;
+ }
+ vpr ^= inv_mask;
+ /* Only update MASK01 if beat 1 executed */
+ if (eci_mask & 0xf0) {
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
+ }
+ /* Beat 3 always executes, so update MASK23 */
+ vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
+ env->v7m.vpr = vpr;
+}
+
+/* For loads, predicated lanes are zeroed instead of keeping their old values */
+#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
+ { \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned b, e; \
+ /* \
+ * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
+ * beats so we don't care if we update part of the dest and \
+ * then take an exception. \
+ */ \
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
+ if (eci_mask & (1 << b)) { \
+ d[H##ESIZE(e)] = (mask & (1 << b)) ? \
+ cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
+ } \
+ addr += MSIZE; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
+ { \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned b, e; \
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
+ if (mask & (1 << b)) { \
+ cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
+ } \
+ addr += MSIZE; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
+DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
+DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
+
+DO_VSTR(vstrb, 1, stb, 1, uint8_t)
+DO_VSTR(vstrh, 2, stw, 2, uint16_t)
+DO_VSTR(vstrw, 4, stl, 4, uint32_t)
+
+DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
+DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
+DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
+DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
+DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
+DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
+
+DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
+DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
+DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
+
+#undef DO_VLDR
+#undef DO_VSTR
+
+/*
+ * Gather loads/scatter stores. Here each element of Qm specifies
+ * an offset to use from the base register Rm. In the _os_ versions
+ * that offset is scaled by the element size.
+ * For loads, predicated lanes are zeroed instead of retaining
+ * their previous values.
+ */
+#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ TYPE *d = vd; \
+ OFFTYPE *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H##ESIZE(e)]); \
+ d[H##ESIZE(e)] = (mask & 1) ? \
+ cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
+ if (WB) { \
+ m[H##ESIZE(e)] = addr; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* We know here TYPE is unsigned so always the same as the offset type */
+#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ TYPE *d = vd; \
+ TYPE *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H##ESIZE(e)]); \
+ if (mask & 1) { \
+ cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
+ } \
+ if (WB) { \
+ m[H##ESIZE(e)] = addr; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/*
+ * 64-bit accesses are slightly different: they are done as two 32-bit
+ * accesses, controlled by the predicate mask for the relevant beat,
+ * and with a single 32-bit offset in the first of the two Qm elements.
+ * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little).
+ * Address writeback happens on the odd beats and updates the address
+ * stored in the even-beat element.
+ */
+#define DO_VLDR64_SG(OP, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ uint32_t *d = vd; \
+ uint32_t *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H4(e & ~1)]); \
+ addr += 4 * (e & 1); \
+ d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
+ if (WB && (e & 1)) { \
+ m[H4(e & ~1)] = addr - 4; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VSTR64_SG(OP, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ uint32_t *d = vd; \
+ uint32_t *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H4(e & ~1)]); \
+ addr += 4 * (e & 1); \
+ if (mask & 1) { \
+ cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
+ } \
+ if (WB && (e & 1)) { \
+ m[H4(e & ~1)] = addr - 4; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET))
+#define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1))
+#define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
+#define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
+
+DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false)
+
+DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false)
+
+DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false)
+DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false)
+DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false)
+DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false)
+DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false)
+
+DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false)
+DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false)
+
+DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false)
+DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false)
+DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false)
+DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false)
+
+DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true)
+DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true)
+DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true)
+DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
+
+/*
+ * Deinterleaving loads/interleaving stores.
+ *
+ * For these helpers we are passed the index of the first Qreg
+ * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3)
+ * and the value of the base address register Rn.
+ * The helpers are specialized for pattern and element size, so
+ * for instance vld42h is VLD4 with pattern 2, element size MO_16.
+ *
+ * These insns are beatwise but not predicated, so we must honour ECI,
+ * but need not look at mve_element_mask().
+ *
+ * The pseudocode implements these insns with multiple memory accesses
+ * of the element size, but rules R_VVVG and R_FXDM permit us to make
+ * one 32-bit memory access per beat.
+ */
+#define DO_VLD4B(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat, e; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 4; \
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ for (e = 0; e < 4; e++, data >>= 8) { \
+ uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
+ qd[H1(off[beat])] = data; \
+ } \
+ } \
+ }
+
+#define DO_VLD4H(OP, O1, O2) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O1, O2, O2 }; \
+ uint32_t addr, data; \
+ int y; /* y counts 0 2 0 2 */ \
+ uint16_t *qd; \
+ for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 8 + (beat & 1) * 4; \
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
+ qd[H2(off[beat])] = data; \
+ data >>= 16; \
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
+ qd[H2(off[beat])] = data; \
+ } \
+ }
+
+#define DO_VLD4W(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ uint32_t *qd; \
+ int y; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 4; \
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ y = (beat + (O1 & 2)) & 3; \
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
+ qd[H4(off[beat] >> 2)] = data; \
+ } \
+ }
+
+DO_VLD4B(vld40b, 0, 1, 10, 11)
+DO_VLD4B(vld41b, 2, 3, 12, 13)
+DO_VLD4B(vld42b, 4, 5, 14, 15)
+DO_VLD4B(vld43b, 6, 7, 8, 9)
+
+DO_VLD4H(vld40h, 0, 5)
+DO_VLD4H(vld41h, 1, 6)
+DO_VLD4H(vld42h, 2, 7)
+DO_VLD4H(vld43h, 3, 4)
+
+DO_VLD4W(vld40w, 0, 1, 10, 11)
+DO_VLD4W(vld41w, 2, 3, 12, 13)
+DO_VLD4W(vld42w, 4, 5, 14, 15)
+DO_VLD4W(vld43w, 6, 7, 8, 9)
+
+#define DO_VLD2B(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat, e; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ uint8_t *qd; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 2; \
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ for (e = 0; e < 4; e++, data >>= 8) { \
+ qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
+ qd[H1(off[beat] + (e >> 1))] = data; \
+ } \
+ } \
+ }
+
+#define DO_VLD2H(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ int e; \
+ uint16_t *qd; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 4; \
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ for (e = 0; e < 2; e++, data >>= 16) { \
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
+ qd[H2(off[beat])] = data; \
+ } \
+ } \
+ }
+
+#define DO_VLD2W(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ uint32_t *qd; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat]; \
+ data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
+ qd[H4(off[beat] >> 3)] = data; \
+ } \
+ }
+
+DO_VLD2B(vld20b, 0, 2, 12, 14)
+DO_VLD2B(vld21b, 4, 6, 8, 10)
+
+DO_VLD2H(vld20h, 0, 1, 6, 7)
+DO_VLD2H(vld21h, 2, 3, 4, 5)
+
+DO_VLD2W(vld20w, 0, 4, 24, 28)
+DO_VLD2W(vld21w, 8, 12, 16, 20)
+
+#define DO_VST4B(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat, e; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 4; \
+ data = 0; \
+ for (e = 3; e >= 0; e--) { \
+ uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
+ data = (data << 8) | qd[H1(off[beat])]; \
+ } \
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ } \
+ }
+
+#define DO_VST4H(OP, O1, O2) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O1, O2, O2 }; \
+ uint32_t addr, data; \
+ int y; /* y counts 0 2 0 2 */ \
+ uint16_t *qd; \
+ for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 8 + (beat & 1) * 4; \
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
+ data = qd[H2(off[beat])]; \
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
+ data |= qd[H2(off[beat])] << 16; \
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ } \
+ }
+
+#define DO_VST4W(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ uint32_t *qd; \
+ int y; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 4; \
+ y = (beat + (O1 & 2)) & 3; \
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
+ data = qd[H4(off[beat] >> 2)]; \
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ } \
+ }
+
+DO_VST4B(vst40b, 0, 1, 10, 11)
+DO_VST4B(vst41b, 2, 3, 12, 13)
+DO_VST4B(vst42b, 4, 5, 14, 15)
+DO_VST4B(vst43b, 6, 7, 8, 9)
+
+DO_VST4H(vst40h, 0, 5)
+DO_VST4H(vst41h, 1, 6)
+DO_VST4H(vst42h, 2, 7)
+DO_VST4H(vst43h, 3, 4)
+
+DO_VST4W(vst40w, 0, 1, 10, 11)
+DO_VST4W(vst41w, 2, 3, 12, 13)
+DO_VST4W(vst42w, 4, 5, 14, 15)
+DO_VST4W(vst43w, 6, 7, 8, 9)
+
+#define DO_VST2B(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat, e; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ uint8_t *qd; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 2; \
+ data = 0; \
+ for (e = 3; e >= 0; e--) { \
+ qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
+ data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \
+ } \
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ } \
+ }
+
+#define DO_VST2H(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ int e; \
+ uint16_t *qd; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat] * 4; \
+ data = 0; \
+ for (e = 1; e >= 0; e--) { \
+ qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
+ data = (data << 16) | qd[H2(off[beat])]; \
+ } \
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ } \
+ }
+
+#define DO_VST2W(OP, O1, O2, O3, O4) \
+ void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \
+ uint32_t base) \
+ { \
+ int beat; \
+ uint16_t mask = mve_eci_mask(env); \
+ static const uint8_t off[4] = { O1, O2, O3, O4 }; \
+ uint32_t addr, data; \
+ uint32_t *qd; \
+ for (beat = 0; beat < 4; beat++, mask >>= 4) { \
+ if ((mask & 1) == 0) { \
+ /* ECI says skip this beat */ \
+ continue; \
+ } \
+ addr = base + off[beat]; \
+ qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
+ data = qd[H4(off[beat] >> 3)]; \
+ cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ } \
+ }
+
+DO_VST2B(vst20b, 0, 2, 12, 14)
+DO_VST2B(vst21b, 4, 6, 8, 10)
+
+DO_VST2H(vst20h, 0, 1, 6, 7)
+DO_VST2H(vst21h, 2, 3, 4, 5)
+
+DO_VST2W(vst20w, 0, 4, 24, 28)
+DO_VST2W(vst21w, 8, 12, 16, 20)
+
+/*
+ * The mergemask(D, R, M) macro performs the operation "*D = R" but
+ * storing only the bytes which correspond to 1 bits in M,
+ * leaving other bytes in *D unchanged. We use _Generic
+ * to select the correct implementation based on the type of D.
+ */
+
+static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
+{
+ if (mask & 1) {
+ *d = r;
+ }
+}
+
+static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
+{
+ mergemask_ub((uint8_t *)d, r, mask);
+}
+
+static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
+{
+ uint16_t bmask = expand_pred_b_data[mask & 3];
+ *d = (*d & ~bmask) | (r & bmask);
+}
+
+static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
+{
+ mergemask_uh((uint16_t *)d, r, mask);
+}
+
+static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
+{
+ uint32_t bmask = expand_pred_b_data[mask & 0xf];
+ *d = (*d & ~bmask) | (r & bmask);
+}
+
+static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
+{
+ mergemask_uw((uint32_t *)d, r, mask);
+}
+
+static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
+{
+ uint64_t bmask = expand_pred_b_data[mask & 0xff];
+ *d = (*d & ~bmask) | (r & bmask);
+}
+
+static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
+{
+ mergemask_uq((uint64_t *)d, r, mask);
+}
+
+#define mergemask(D, R, M) \
+ _Generic(D, \
+ uint8_t *: mergemask_ub, \
+ int8_t *: mergemask_sb, \
+ uint16_t *: mergemask_uh, \
+ int16_t *: mergemask_sh, \
+ uint32_t *: mergemask_uw, \
+ int32_t *: mergemask_sw, \
+ uint64_t *: mergemask_uq, \
+ int64_t *: mergemask_sq)(D, R, M)
+
+void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
+{
+ /*
+ * The generated code already replicated an 8 or 16 bit constant
+ * into the 32-bit value, so we only need to write the 32-bit
+ * value to all elements of the Qreg, allowing for predication.
+ */
+ uint32_t *d = vd;
+ uint16_t mask = mve_element_mask(env);
+ unsigned e;
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ mergemask(&d[H4(e)], val, mask);
+ }
+ mve_advance_vpt(env);
+}
+
+#define DO_1OP(OP, ESIZE, TYPE, FN) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_CLS_B(N) (clrsb32(N) - 24)
+#define DO_CLS_H(N) (clrsb32(N) - 16)
+
+DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
+DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
+DO_1OP(vclsw, 4, int32_t, clrsb32)
+
+#define DO_CLZ_B(N) (clz32(N) - 24)
+#define DO_CLZ_H(N) (clz32(N) - 16)
+
+DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
+DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
+DO_1OP(vclzw, 4, uint32_t, clz32)
+
+DO_1OP(vrev16b, 2, uint16_t, bswap16)
+DO_1OP(vrev32b, 4, uint32_t, bswap32)
+DO_1OP(vrev32h, 4, uint32_t, hswap32)
+DO_1OP(vrev64b, 8, uint64_t, bswap64)
+DO_1OP(vrev64h, 8, uint64_t, hswap64)
+DO_1OP(vrev64w, 8, uint64_t, wswap64)
+
+#define DO_NOT(N) (~(N))
+
+DO_1OP(vmvn, 8, uint64_t, DO_NOT)
+
+#define DO_ABS(N) ((N) < 0 ? -(N) : (N))
+#define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
+#define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
+
+DO_1OP(vabsb, 1, int8_t, DO_ABS)
+DO_1OP(vabsh, 2, int16_t, DO_ABS)
+DO_1OP(vabsw, 4, int32_t, DO_ABS)
+
+/* We can do these 64 bits at a time */
+DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
+DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
+
+#define DO_NEG(N) (-(N))
+#define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
+#define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
+
+DO_1OP(vnegb, 1, int8_t, DO_NEG)
+DO_1OP(vnegh, 2, int16_t, DO_NEG)
+DO_1OP(vnegw, 4, int32_t, DO_NEG)
+
+/* We can do these 64 bits at a time */
+DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
+DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
+
+/*
+ * 1 operand immediates: Vda is destination and possibly also one source.
+ * All these insns work at 64-bit widths.
+ */
+#define DO_1OP_IMM(OP, FN) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \
+ { \
+ uint64_t *da = vda; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
+ mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_MOVI(N, I) (I)
+#define DO_ANDI(N, I) ((N) & (I))
+#define DO_ORRI(N, I) ((N) | (I))
+
+DO_1OP_IMM(vmovi, DO_MOVI)
+DO_1OP_IMM(vandi, DO_ANDI)
+DO_1OP_IMM(vorri, DO_ORRI)
+
+#define DO_2OP(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], \
+ FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* provide unsigned 2-op helpers for all sizes */
+#define DO_2OP_U(OP, FN) \
+ DO_2OP(OP##b, 1, uint8_t, FN) \
+ DO_2OP(OP##h, 2, uint16_t, FN) \
+ DO_2OP(OP##w, 4, uint32_t, FN)
+
+/* provide signed 2-op helpers for all sizes */
+#define DO_2OP_S(OP, FN) \
+ DO_2OP(OP##b, 1, int8_t, FN) \
+ DO_2OP(OP##h, 2, int16_t, FN) \
+ DO_2OP(OP##w, 4, int32_t, FN)
+
+/*
+ * "Long" operations where two half-sized inputs (taken from either the
+ * top or the bottom of the input vector) produce a double-width result.
+ * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
+ */
+#define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
+ { \
+ LTYPE *d = vd; \
+ TYPE *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
+ m[H##ESIZE(le * 2 + TOP)]); \
+ mergemask(&d[H##LESIZE(le)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ bool qc = false; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ bool sat = false; \
+ TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* provide unsigned 2-op helpers for all sizes */
+#define DO_2OP_SAT_U(OP, FN) \
+ DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
+ DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
+ DO_2OP_SAT(OP##w, 4, uint32_t, FN)
+
+/* provide signed 2-op helpers for all sizes */
+#define DO_2OP_SAT_S(OP, FN) \
+ DO_2OP_SAT(OP##b, 1, int8_t, FN) \
+ DO_2OP_SAT(OP##h, 2, int16_t, FN) \
+ DO_2OP_SAT(OP##w, 4, int32_t, FN)
+
+#define DO_AND(N, M) ((N) & (M))
+#define DO_BIC(N, M) ((N) & ~(M))
+#define DO_ORR(N, M) ((N) | (M))
+#define DO_ORN(N, M) ((N) | ~(M))
+#define DO_EOR(N, M) ((N) ^ (M))
+
+DO_2OP(vand, 8, uint64_t, DO_AND)
+DO_2OP(vbic, 8, uint64_t, DO_BIC)
+DO_2OP(vorr, 8, uint64_t, DO_ORR)
+DO_2OP(vorn, 8, uint64_t, DO_ORN)
+DO_2OP(veor, 8, uint64_t, DO_EOR)
+
+#define DO_ADD(N, M) ((N) + (M))
+#define DO_SUB(N, M) ((N) - (M))
+#define DO_MUL(N, M) ((N) * (M))
+
+DO_2OP_U(vadd, DO_ADD)
+DO_2OP_U(vsub, DO_SUB)
+DO_2OP_U(vmul, DO_MUL)
+
+DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
+DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
+DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
+DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
+DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
+DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
+
+DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
+DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
+DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
+DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
+DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
+DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
+
+/*
+ * Polynomial multiply. We can always do this generating 64 bits
+ * of the result at a time, so we don't need to use DO_2OP_L.
+ */
+#define VMULLPH_MASK 0x00ff00ff00ff00ffULL
+#define VMULLPW_MASK 0x0000ffff0000ffffULL
+#define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK)
+#define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8)
+#define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK)
+#define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16)
+
+DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH)
+DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH)
+DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW)
+DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW)
+
+/*
+ * Because the computation type is at least twice as large as required,
+ * these work for both signed and unsigned source types.
+ */
+static inline uint8_t do_mulh_b(int32_t n, int32_t m)
+{
+ return (n * m) >> 8;
+}
+
+static inline uint16_t do_mulh_h(int32_t n, int32_t m)
+{
+ return (n * m) >> 16;
+}
+
+static inline uint32_t do_mulh_w(int64_t n, int64_t m)
+{
+ return (n * m) >> 32;
+}
+
+static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
+{
+ return (n * m + (1U << 7)) >> 8;
+}
+
+static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
+{
+ return (n * m + (1U << 15)) >> 16;
+}
+
+static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
+{
+ return (n * m + (1U << 31)) >> 32;
+}
+
+DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
+DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
+DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
+DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
+DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
+DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
+
+DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
+DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
+DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
+DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
+DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
+DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
+
+#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
+#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
+
+DO_2OP_S(vmaxs, DO_MAX)
+DO_2OP_U(vmaxu, DO_MAX)
+DO_2OP_S(vmins, DO_MIN)
+DO_2OP_U(vminu, DO_MIN)
+
+#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
+
+DO_2OP_S(vabds, DO_ABD)
+DO_2OP_U(vabdu, DO_ABD)
+
+static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
+{
+ return ((uint64_t)n + m) >> 1;
+}
+
+static inline int32_t do_vhadd_s(int32_t n, int32_t m)
+{
+ return ((int64_t)n + m) >> 1;
+}
+
+static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
+{
+ return ((uint64_t)n - m) >> 1;
+}
+
+static inline int32_t do_vhsub_s(int32_t n, int32_t m)
+{
+ return ((int64_t)n - m) >> 1;
+}
+
+DO_2OP_S(vhadds, do_vhadd_s)
+DO_2OP_U(vhaddu, do_vhadd_u)
+DO_2OP_S(vhsubs, do_vhsub_s)
+DO_2OP_U(vhsubu, do_vhsub_u)
+
+#define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
+#define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
+#define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
+#define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
+
+DO_2OP_S(vshls, DO_VSHLS)
+DO_2OP_U(vshlu, DO_VSHLU)
+DO_2OP_S(vrshls, DO_VRSHLS)
+DO_2OP_U(vrshlu, DO_VRSHLU)
+
+#define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
+#define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
+
+DO_2OP_S(vrhadds, DO_RHADD_S)
+DO_2OP_U(vrhaddu, DO_RHADD_U)
+
+static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
+ uint32_t inv, uint32_t carry_in, bool update_flags)
+{
+ uint16_t mask = mve_element_mask(env);
+ unsigned e;
+
+ /* If any additions trigger, we will update flags. */
+ if (mask & 0x1111) {
+ update_flags = true;
+ }
+
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ uint64_t r = carry_in;
+ r += n[H4(e)];
+ r += m[H4(e)] ^ inv;
+ if (mask & 1) {
+ carry_in = r >> 32;
+ }
+ mergemask(&d[H4(e)], r, mask);
+ }
+
+ if (update_flags) {
+ /* Store C, clear NZV. */
+ env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
+ env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
+ }
+ mve_advance_vpt(env);
+}
+
+void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
+{
+ bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
+ do_vadc(env, vd, vn, vm, 0, carry_in, false);
+}
+
+void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
+{
+ bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
+ do_vadc(env, vd, vn, vm, -1, carry_in, false);
+}
+
+
+void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
+{
+ do_vadc(env, vd, vn, vm, 0, 0, true);
+}
+
+void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
+{
+ do_vadc(env, vd, vn, vm, -1, 1, true);
+}
+
+#define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE r[16 / ESIZE]; \
+ /* Calculate all results first to avoid overwriting inputs */ \
+ for (e = 0; e < 16 / ESIZE; e++) { \
+ if (!(e & 1)) { \
+ r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
+ } else { \
+ r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
+ } \
+ } \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], r[e], mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VCADD_ALL(OP, FN0, FN1) \
+ DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
+ DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
+ DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
+
+DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
+DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
+DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
+DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
+
+static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
+{
+ if (val > max) {
+ *s = true;
+ return max;
+ } else if (val < min) {
+ *s = true;
+ return min;
+ }
+ return val;
+}
+
+#define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
+#define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
+#define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
+
+#define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
+#define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
+#define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
+
+#define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
+#define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
+#define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
+
+#define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
+#define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
+#define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
+
+/*
+ * For QDMULH and QRDMULH we simplify "double and shift by esize" into
+ * "shift by esize-1", adjusting the QRDMULH rounding constant to match.
+ */
+#define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
+ INT8_MIN, INT8_MAX, s)
+#define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
+ INT16_MIN, INT16_MAX, s)
+#define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
+ INT32_MIN, INT32_MAX, s)
+
+#define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
+ INT8_MIN, INT8_MAX, s)
+#define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
+ INT16_MIN, INT16_MAX, s)
+#define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
+ INT32_MIN, INT32_MAX, s)
+
+DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
+DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
+DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
+
+DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
+DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
+DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
+
+DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
+DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
+DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
+DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
+DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
+DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
+
+DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
+DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
+DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
+DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
+DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
+DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
+
+/*
+ * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
+ * and friends wanting a uint32_t* sat and our needing a bool*.
+ */
+#define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
+ ({ \
+ uint32_t su32 = 0; \
+ typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
+ if (su32) { \
+ *satp = true; \
+ } \
+ r; \
+ })
+
+#define DO_SQSHL_OP(N, M, satp) \
+ WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
+#define DO_UQSHL_OP(N, M, satp) \
+ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
+#define DO_SQRSHL_OP(N, M, satp) \
+ WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
+#define DO_UQRSHL_OP(N, M, satp) \
+ WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
+#define DO_SUQSHL_OP(N, M, satp) \
+ WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp)
+
+DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
+DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
+DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
+DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
+
+/*
+ * Multiply add dual returning high half
+ * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
+ * whether to add the rounding constant, and the pointer to the
+ * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
+ * saturate to twice the input size and return the high half; or
+ * (A * B - C * D) etc for VQDMLSDH.
+ */
+#define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ bool qc = false; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ bool sat = false; \
+ if ((e & 1) == XCHG) { \
+ TYPE r = FN(n[H##ESIZE(e)], \
+ m[H##ESIZE(e - XCHG)], \
+ n[H##ESIZE(e + (1 - 2 * XCHG))], \
+ m[H##ESIZE(e + (1 - XCHG))], \
+ ROUND, &sat); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ qc |= sat & mask & 1; \
+ } \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
+ int round, bool *sat)
+{
+ int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
+}
+
+static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
+ int round, bool *sat)
+{
+ int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
+}
+
+static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
+ int round, bool *sat)
+{
+ int64_t m1 = (int64_t)a * b;
+ int64_t m2 = (int64_t)c * d;
+ int64_t r;
+ /*
+ * Architecturally we should do the entire add, double, round
+ * and then check for saturation. We do three saturating adds,
+ * but we need to be careful about the order. If the first
+ * m1 + m2 saturates then it's impossible for the *2+rc to
+ * bring it back into the non-saturated range. However, if
+ * m1 + m2 is negative then it's possible that doing the doubling
+ * would take the intermediate result below INT64_MAX and the
+ * addition of the rounding constant then brings it back in range.
+ * So we add half the rounding constant before doubling rather
+ * than adding the rounding constant after the doubling.
+ */
+ if (sadd64_overflow(m1, m2, &r) ||
+ sadd64_overflow(r, (round << 30), &r) ||
+ sadd64_overflow(r, r, &r)) {
+ *sat = true;
+ return r < 0 ? INT32_MAX : INT32_MIN;
+ }
+ return r >> 32;
+}
+
+static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
+ int round, bool *sat)
+{
+ int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
+}
+
+static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
+ int round, bool *sat)
+{
+ int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
+}
+
+static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
+ int round, bool *sat)
+{
+ int64_t m1 = (int64_t)a * b;
+ int64_t m2 = (int64_t)c * d;
+ int64_t r;
+ /* The same ordering issue as in do_vqdmladh_w applies here too */
+ if (ssub64_overflow(m1, m2, &r) ||
+ sadd64_overflow(r, (round << 30), &r) ||
+ sadd64_overflow(r, r, &r)) {
+ *sat = true;
+ return r < 0 ? INT32_MAX : INT32_MIN;
+ }
+ return r >> 32;
+}
+
+DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
+DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
+DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
+DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
+DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
+DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
+
+DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
+DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
+DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
+DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
+DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
+DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
+
+DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
+DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
+DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
+DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
+DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
+DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
+
+DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
+DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
+DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
+DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
+DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
+DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
+
+#define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ uint32_t rm) \
+ { \
+ TYPE *d = vd, *n = vn; \
+ TYPE m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ uint32_t rm) \
+ { \
+ TYPE *d = vd, *n = vn; \
+ TYPE m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ bool qc = false; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ bool sat = false; \
+ mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
+ mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* "accumulating" version where FN takes d as well as n and m */
+#define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ uint32_t rm) \
+ { \
+ TYPE *d = vd, *n = vn; \
+ TYPE m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], \
+ FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ uint32_t rm) \
+ { \
+ TYPE *d = vd, *n = vn; \
+ TYPE m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ bool qc = false; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ bool sat = false; \
+ mergemask(&d[H##ESIZE(e)], \
+ FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \
+ mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* provide unsigned 2-op scalar helpers for all sizes */
+#define DO_2OP_SCALAR_U(OP, FN) \
+ DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
+ DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
+ DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
+#define DO_2OP_SCALAR_S(OP, FN) \
+ DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
+ DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
+ DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
+
+#define DO_2OP_ACC_SCALAR_U(OP, FN) \
+ DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \
+ DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \
+ DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN)
+
+DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
+DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
+DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
+DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
+DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
+DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
+DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
+
+DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
+DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
+DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
+DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
+DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
+DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
+
+DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
+DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
+DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
+DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
+DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
+DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
+
+DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
+DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
+DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
+DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
+
+static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat)
+{
+ int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7);
+ return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
+}
+
+static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c,
+ int round, bool *sat)
+{
+ int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15);
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
+}
+
+static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c,
+ int round, bool *sat)
+{
+ /*
+ * Architecturally we should do the entire add, double, round
+ * and then check for saturation. We do three saturating adds,
+ * but we need to be careful about the order. If the first
+ * m1 + m2 saturates then it's impossible for the *2+rc to
+ * bring it back into the non-saturated range. However, if
+ * m1 + m2 is negative then it's possible that doing the doubling
+ * would take the intermediate result below INT64_MAX and the
+ * addition of the rounding constant then brings it back in range.
+ * So we add half the rounding constant and half the "c << esize"
+ * before doubling rather than adding the rounding constant after
+ * the doubling.
+ */
+ int64_t m1 = (int64_t)a * b;
+ int64_t m2 = (int64_t)c << 31;
+ int64_t r;
+ if (sadd64_overflow(m1, m2, &r) ||
+ sadd64_overflow(r, (round << 30), &r) ||
+ sadd64_overflow(r, r, &r)) {
+ *sat = true;
+ return r < 0 ? INT32_MAX : INT32_MIN;
+ }
+ return r >> 32;
+}
+
+/*
+ * The *MLAH insns are vector * scalar + vector;
+ * the *MLASH insns are vector * vector + scalar
+ */
+#define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S)
+#define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S)
+#define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S)
+#define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S)
+#define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S)
+#define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S)
+
+#define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S)
+#define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S)
+#define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S)
+#define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S)
+#define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S)
+#define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S)
+
+DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B)
+DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H)
+DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W)
+
+DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B)
+DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H)
+DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H)
+DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W)
+
+/* Vector by scalar plus vector */
+#define DO_VMLA(D, N, M) ((N) * (M) + (D))
+
+DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA)
+
+/* Vector by vector plus scalar */
+#define DO_VMLAS(D, N, M) ((N) * (D) + (M))
+
+DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS)
+
+/*
+ * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
+ * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
+ * SATMASK specifies which bits of the predicate mask matter for determining
+ * whether to propagate a saturation indication into FPSCR.QC -- for
+ * the 16x16->32 case we must check only the bit corresponding to the T or B
+ * half that we used, but for the 32x32->64 case we propagate if the mask
+ * bit is set for either half.
+ */
+#define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ uint32_t rm) \
+ { \
+ LTYPE *d = vd; \
+ TYPE *n = vn; \
+ TYPE m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ bool qc = false; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ bool sat = false; \
+ LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
+ mergemask(&d[H##LESIZE(le)], r, mask); \
+ qc |= sat && (mask & SATMASK); \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
+{
+ int64_t r = ((int64_t)n * m) * 2;
+ return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
+}
+
+static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
+{
+ /* The multiply can't overflow, but the doubling might */
+ int64_t r = (int64_t)n * m;
+ if (r > INT64_MAX / 2) {
+ *sat = true;
+ return INT64_MAX;
+ } else if (r < INT64_MIN / 2) {
+ *sat = true;
+ return INT64_MIN;
+ } else {
+ return r * 2;
+ }
+}
+
+#define SATMASK16B 1
+#define SATMASK16T (1 << 2)
+#define SATMASK32 ((1 << 4) | 1)
+
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
+ do_qdmullh, SATMASK16B)
+DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
+ do_qdmullw, SATMASK32)
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
+ do_qdmullh, SATMASK16T)
+DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
+ do_qdmullw, SATMASK32)
+
+/*
+ * Long saturating ops
+ */
+#define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
+ void *vm) \
+ { \
+ LTYPE *d = vd; \
+ TYPE *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ bool qc = false; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ bool sat = false; \
+ LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
+ LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
+ mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
+ qc |= sat && (mask & SATMASK); \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
+DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
+DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
+DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
+
+static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
+{
+ m &= 0xff;
+ if (m == 0) {
+ return 0;
+ }
+ n = revbit8(n);
+ if (m < 8) {
+ n >>= 8 - m;
+ }
+ return n;
+}
+
+static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
+{
+ m &= 0xff;
+ if (m == 0) {
+ return 0;
+ }
+ n = revbit16(n);
+ if (m < 16) {
+ n >>= 16 - m;
+ }
+ return n;
+}
+
+static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
+{
+ m &= 0xff;
+ if (m == 0) {
+ return 0;
+ }
+ n = revbit32(n);
+ if (m < 32) {
+ n >>= 32 - m;
+ }
+ return n;
+}
+
+DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
+DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
+DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
+
+/*
+ * Multiply add long dual accumulate ops.
+ */
+#define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ void *vm, uint64_t a) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *n = vn, *m = vm; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ if (e & 1) { \
+ a ODDACC \
+ (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
+ } else { \
+ a EVENACC \
+ (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
+ } \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return a; \
+ }
+
+DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
+DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
+DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
+DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
+
+DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
+DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
+
+DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
+DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
+DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
+DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
+
+/*
+ * Multiply add dual accumulate ops
+ */
+#define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ void *vm, uint32_t a) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *n = vn, *m = vm; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ if (e & 1) { \
+ a ODDACC \
+ n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
+ } else { \
+ a EVENACC \
+ n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
+ } \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return a; \
+ }
+
+#define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC)
+
+#define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \
+ DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC)
+
+DO_DAV_S(vmladavs, false, +=, +=)
+DO_DAV_U(vmladavu, false, +=, +=)
+DO_DAV_S(vmlsdav, false, +=, -=)
+DO_DAV_S(vmladavsx, true, +=, +=)
+DO_DAV_S(vmlsdavx, true, +=, -=)
+
+/*
+ * Rounding multiply add long dual accumulate high. In the pseudocode
+ * this is implemented with a 72-bit internal accumulator value of which
+ * the top 64 bits are returned. We optimize this to avoid having to
+ * use 128-bit arithmetic -- we can do this because the 74-bit accumulator
+ * is squashed back into 64-bits after each beat.
+ */
+#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ void *vm, uint64_t a) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *n = vn, *m = vm; \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
+ if (mask & 1) { \
+ LTYPE mul; \
+ if (e & 1) { \
+ mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \
+ if (SUB) { \
+ mul = -mul; \
+ } \
+ } else { \
+ mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \
+ } \
+ mul = (mul >> 8) + ((mul >> 7) & 1); \
+ a += mul; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return a; \
+ }
+
+DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false)
+DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false)
+
+DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false)
+
+DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true)
+DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true)
+
+/* Vector add across vector */
+#define DO_VADDV(OP, ESIZE, TYPE) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
+ uint32_t ra) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ ra += m[H##ESIZE(e)]; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ } \
+
+DO_VADDV(vaddvsb, 1, int8_t)
+DO_VADDV(vaddvsh, 2, int16_t)
+DO_VADDV(vaddvsw, 4, int32_t)
+DO_VADDV(vaddvub, 1, uint8_t)
+DO_VADDV(vaddvuh, 2, uint16_t)
+DO_VADDV(vaddvuw, 4, uint32_t)
+
+/*
+ * Vector max/min across vector. Unlike VADDV, we must
+ * read ra as the element size, not its full width.
+ * We work with int64_t internally for simplicity.
+ */
+#define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
+ uint32_t ra_in) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm; \
+ int64_t ra = (RATYPE)ra_in; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ ra = FN(ra, m[H##ESIZE(e)]); \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ } \
+
+#define DO_VMAXMINV_U(INSN, FN) \
+ DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \
+ DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \
+ DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN)
+#define DO_VMAXMINV_S(INSN, FN) \
+ DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \
+ DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \
+ DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN)
+
+/*
+ * Helpers for max and min of absolute values across vector:
+ * note that we only take the absolute value of 'm', not 'n'
+ */
+static int64_t do_maxa(int64_t n, int64_t m)
+{
+ if (m < 0) {
+ m = -m;
+ }
+ return MAX(n, m);
+}
+
+static int64_t do_mina(int64_t n, int64_t m)
+{
+ if (m < 0) {
+ m = -m;
+ }
+ return MIN(n, m);
+}
+
+DO_VMAXMINV_S(vmaxvs, DO_MAX)
+DO_VMAXMINV_U(vmaxvu, DO_MAX)
+DO_VMAXMINV_S(vminvs, DO_MIN)
+DO_VMAXMINV_U(vminvu, DO_MIN)
+/*
+ * VMAXAV, VMINAV treat the general purpose input as unsigned
+ * and the vector elements as signed.
+ */
+DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa)
+DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa)
+DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa)
+DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
+DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
+DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
+
+#define DO_VABAV(OP, ESIZE, TYPE) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ void *vm, uint32_t ra) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm, *n = vn; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ int64_t n0 = n[H##ESIZE(e)]; \
+ int64_t m0 = m[H##ESIZE(e)]; \
+ uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
+ ra += r; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ }
+
+DO_VABAV(vabavsb, 1, int8_t)
+DO_VABAV(vabavsh, 2, int16_t)
+DO_VABAV(vabavsw, 4, int32_t)
+DO_VABAV(vabavub, 1, uint8_t)
+DO_VABAV(vabavuh, 2, uint16_t)
+DO_VABAV(vabavuw, 4, uint32_t)
+
+#define DO_VADDLV(OP, TYPE, LTYPE) \
+ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
+ uint64_t ra) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm; \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
+ if (mask & 1) { \
+ ra += (LTYPE)m[H4(e)]; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ } \
+
+DO_VADDLV(vaddlv_s, int32_t, int64_t)
+DO_VADDLV(vaddlv_u, uint32_t, uint64_t)
+
+/* Shifts by immediate */
+#define DO_2SHIFT(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vm, uint32_t shift) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], \
+ FN(m[H##ESIZE(e)], shift), mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vm, uint32_t shift) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ bool qc = false; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ bool sat = false; \
+ mergemask(&d[H##ESIZE(e)], \
+ FN(m[H##ESIZE(e)], shift, &sat), mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* provide unsigned 2-op shift helpers for all sizes */
+#define DO_2SHIFT_U(OP, FN) \
+ DO_2SHIFT(OP##b, 1, uint8_t, FN) \
+ DO_2SHIFT(OP##h, 2, uint16_t, FN) \
+ DO_2SHIFT(OP##w, 4, uint32_t, FN)
+#define DO_2SHIFT_S(OP, FN) \
+ DO_2SHIFT(OP##b, 1, int8_t, FN) \
+ DO_2SHIFT(OP##h, 2, int16_t, FN) \
+ DO_2SHIFT(OP##w, 4, int32_t, FN)
+
+#define DO_2SHIFT_SAT_U(OP, FN) \
+ DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \
+ DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \
+ DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN)
+#define DO_2SHIFT_SAT_S(OP, FN) \
+ DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \
+ DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \
+ DO_2SHIFT_SAT(OP##w, 4, int32_t, FN)
+
+DO_2SHIFT_U(vshli_u, DO_VSHLU)
+DO_2SHIFT_S(vshli_s, DO_VSHLS)
+DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP)
+DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP)
+DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
+DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
+DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
+DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP)
+DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP)
+
+/* Shift-and-insert; we always work with 64 bits at a time */
+#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vm, uint32_t shift) \
+ { \
+ uint64_t *d = vd, *m = vm; \
+ uint16_t mask; \
+ uint64_t shiftmask; \
+ unsigned e; \
+ if (shift == ESIZE * 8) { \
+ /* \
+ * Only VSRI can shift by <dt>; it should mean "don't \
+ * update the destination". The generic logic can't handle \
+ * this because it would try to shift by an out-of-range \
+ * amount, so special case it here. \
+ */ \
+ goto done; \
+ } \
+ assert(shift < ESIZE * 8); \
+ mask = mve_element_mask(env); \
+ /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
+ shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
+ for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
+ uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
+ (d[H8(e)] & ~shiftmask); \
+ mergemask(&d[H8(e)], r, mask); \
+ } \
+done: \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
+#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
+#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
+#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
+
+DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
+DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
+DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
+DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
+DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
+DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
+
+/*
+ * Long shifts taking half-sized inputs from top or bottom of the input
+ * vector and producing a double-width result. ESIZE, TYPE are for
+ * the input, and LESIZE, LTYPE for the output.
+ * Unlike the normal shift helpers, we do not handle negative shift counts,
+ * because the long shift is strictly left-only.
+ */
+#define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vm, uint32_t shift) \
+ { \
+ LTYPE *d = vd; \
+ TYPE *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ assert(shift <= 16); \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \
+ mergemask(&d[H##LESIZE(le)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VSHLL_ALL(OP, TOP) \
+ DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \
+ DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \
+ DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \
+ DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \
+
+DO_VSHLL_ALL(vshllb, false)
+DO_VSHLL_ALL(vshllt, true)
+
+/*
+ * Narrowing right shifts, taking a double sized input, shifting it
+ * and putting the result in either the top or bottom half of the output.
+ * ESIZE, TYPE are the output, and LESIZE, LTYPE the input.
+ */
+#define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vm, uint32_t shift) \
+ { \
+ LTYPE *m = vm; \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ mask >>= ESIZE * TOP; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ TYPE r = FN(m[H##LESIZE(le)], shift); \
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VSHRN_ALL(OP, FN) \
+ DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \
+ DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \
+ DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
+ DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
+
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else if (sh == 64) {
+ return x >> 63;
+ } else {
+ return 0;
+ }
+}
+
+static inline int64_t do_srshr(int64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else {
+ /* Rounding the sign bit always produces 0. */
+ return 0;
+ }
+}
+
+DO_VSHRN_ALL(vshrn, DO_SHR)
+DO_VSHRN_ALL(vrshrn, do_urshr)
+
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max,
+ bool *satp)
+{
+ if (val > max) {
+ *satp = true;
+ return max;
+ } else if (val < min) {
+ *satp = true;
+ return min;
+ } else {
+ return val;
+ }
+}
+
+/* Saturating narrowing right shifts */
+#define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vm, uint32_t shift) \
+ { \
+ LTYPE *m = vm; \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ bool qc = false; \
+ unsigned le; \
+ mask >>= ESIZE * TOP; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ bool sat = false; \
+ TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VSHRN_SAT_UB(BOP, TOP, FN) \
+ DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
+ DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
+
+#define DO_VSHRN_SAT_UH(BOP, TOP, FN) \
+ DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
+ DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
+
+#define DO_VSHRN_SAT_SB(BOP, TOP, FN) \
+ DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
+ DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
+
+#define DO_VSHRN_SAT_SH(BOP, TOP, FN) \
+ DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
+ DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
+
+#define DO_SHRN_SB(N, M, SATP) \
+ do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP)
+#define DO_SHRN_UB(N, M, SATP) \
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP)
+#define DO_SHRUN_B(N, M, SATP) \
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP)
+
+#define DO_SHRN_SH(N, M, SATP) \
+ do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP)
+#define DO_SHRN_UH(N, M, SATP) \
+ do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP)
+#define DO_SHRUN_H(N, M, SATP) \
+ do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP)
+
+#define DO_RSHRN_SB(N, M, SATP) \
+ do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP)
+#define DO_RSHRN_UB(N, M, SATP) \
+ do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP)
+#define DO_RSHRUN_B(N, M, SATP) \
+ do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP)
+
+#define DO_RSHRN_SH(N, M, SATP) \
+ do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP)
+#define DO_RSHRN_UH(N, M, SATP) \
+ do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP)
+#define DO_RSHRUN_H(N, M, SATP) \
+ do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP)
+
+DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB)
+DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH)
+DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB)
+DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH)
+DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B)
+DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H)
+
+DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB)
+DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH)
+DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB)
+DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
+DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
+DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
+
+#define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
+ { \
+ LTYPE *m = vm; \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned le; \
+ mask >>= ESIZE * TOP; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], \
+ m[H##LESIZE(le)], mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t)
+DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t)
+DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t)
+DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t)
+
+#define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
+ { \
+ LTYPE *m = vm; \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ bool qc = false; \
+ unsigned le; \
+ mask >>= ESIZE * TOP; \
+ for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
+ bool sat = false; \
+ TYPE r = FN(m[H##LESIZE(le)], &sat); \
+ mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VMOVN_SAT_UB(BOP, TOP, FN) \
+ DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \
+ DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN)
+
+#define DO_VMOVN_SAT_UH(BOP, TOP, FN) \
+ DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \
+ DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN)
+
+#define DO_VMOVN_SAT_SB(BOP, TOP, FN) \
+ DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \
+ DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN)
+
+#define DO_VMOVN_SAT_SH(BOP, TOP, FN) \
+ DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \
+ DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN)
+
+#define DO_VQMOVN_SB(N, SATP) \
+ do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP)
+#define DO_VQMOVN_UB(N, SATP) \
+ do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP)
+#define DO_VQMOVUN_B(N, SATP) \
+ do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP)
+
+#define DO_VQMOVN_SH(N, SATP) \
+ do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP)
+#define DO_VQMOVN_UH(N, SATP) \
+ do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP)
+#define DO_VQMOVUN_H(N, SATP) \
+ do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP)
+
+DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB)
+DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH)
+DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB)
+DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH)
+DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B)
+DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H)
+
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
+ uint32_t shift)
+{
+ uint32_t *d = vd;
+ uint16_t mask = mve_element_mask(env);
+ unsigned e;
+ uint32_t r;
+
+ /*
+ * For each 32-bit element, we shift it left, bringing in the
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
+ * the top become the new rdm, if the predicate mask permits.
+ * The final rdm value is returned to update the register.
+ * shift == 0 here means "shift by 32 bits".
+ */
+ if (shift == 0) {
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ r = rdm;
+ if (mask & 1) {
+ rdm = d[H4(e)];
+ }
+ mergemask(&d[H4(e)], r, mask);
+ }
+ } else {
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
+
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
+ if (mask & 1) {
+ rdm = d[H4(e)] >> (32 - shift);
+ }
+ mergemask(&d[H4(e)], r, mask);
+ }
+ }
+ mve_advance_vpt(env);
+ return rdm;
+}
+
+uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_sqrshl_d(n, -(int8_t)shift, false, NULL);
+}
+
+uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_uqrshl_d(n, (int8_t)shift, false, NULL);
+}
+
+uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_sqrshl_d(n, (int8_t)shift, false, &env->QF);
+}
+
+uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_uqrshl_d(n, (int8_t)shift, false, &env->QF);
+}
+
+uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF);
+}
+
+uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_uqrshl_d(n, (int8_t)shift, true, &env->QF);
+}
+
+/* Operate on 64-bit values, but saturate at 48 bits */
+static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ int64_t val, extval;
+
+ if (shift <= -48) {
+ /* Rounding the sign bit always produces 0. */
+ if (round) {
+ return 0;
+ }
+ return src >> 63;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ val = (src >> 1) + (src & 1);
+ } else {
+ val = src >> -shift;
+ }
+ extval = sextract64(val, 0, 48);
+ if (!sat || val == extval) {
+ return extval;
+ }
+ } else if (shift < 48) {
+ int64_t extval = sextract64(src << shift, 0, 48);
+ if (!sat || src == (extval >> shift)) {
+ return extval;
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17);
+}
+
+/* Operate on 64-bit values, but saturate at 48 bits */
+static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ uint64_t val, extval;
+
+ if (shift <= -(48 + round)) {
+ return 0;
+ } else if (shift < 0) {
+ if (round) {
+ val = src >> (-shift - 1);
+ val = (val >> 1) + (val & 1);
+ } else {
+ val = src >> -shift;
+ }
+ extval = extract64(val, 0, 48);
+ if (!sat || val == extval) {
+ return extval;
+ }
+ } else if (shift < 48) {
+ uint64_t extval = extract64(src << shift, 0, 48);
+ if (!sat || src == (extval >> shift)) {
+ return extval;
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return MAKE_64BIT_MASK(0, 48);
+}
+
+uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF);
+}
+
+uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift)
+{
+ return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF);
+}
+
+uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
+{
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
+}
+
+uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift)
+{
+ return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF);
+}
+
+uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift)
+{
+ return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF);
+}
+
+uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
+{
+ return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
+}
+
+#define DO_VIDUP(OP, ESIZE, TYPE, FN) \
+ uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
+ uint32_t offset, uint32_t imm) \
+ { \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], offset, mask); \
+ offset = FN(offset, imm); \
+ } \
+ mve_advance_vpt(env); \
+ return offset; \
+ }
+
+#define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
+ uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
+ uint32_t offset, uint32_t wrap, \
+ uint32_t imm) \
+ { \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], offset, mask); \
+ offset = FN(offset, wrap, imm); \
+ } \
+ mve_advance_vpt(env); \
+ return offset; \
+ }
+
+#define DO_VIDUP_ALL(OP, FN) \
+ DO_VIDUP(OP##b, 1, int8_t, FN) \
+ DO_VIDUP(OP##h, 2, int16_t, FN) \
+ DO_VIDUP(OP##w, 4, int32_t, FN)
+
+#define DO_VIWDUP_ALL(OP, FN) \
+ DO_VIWDUP(OP##b, 1, int8_t, FN) \
+ DO_VIWDUP(OP##h, 2, int16_t, FN) \
+ DO_VIWDUP(OP##w, 4, int32_t, FN)
+
+static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
+{
+ offset += imm;
+ if (offset == wrap) {
+ offset = 0;
+ }
+ return offset;
+}
+
+static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
+{
+ if (offset == 0) {
+ offset = wrap;
+ }
+ offset -= imm;
+ return offset;
+}
+
+DO_VIDUP_ALL(vidup, DO_ADD)
+DO_VIWDUP_ALL(viwdup, do_add_wrap)
+DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
+
+/*
+ * Vector comparison.
+ * P0 bits for non-executed beats (where eci_mask is 0) are unchanged.
+ * P0 bits for predicated lanes in executed beats (where mask is 0) are 0.
+ * P0 bits otherwise are updated with the results of the comparisons.
+ * We must also keep unchanged the MASK fields at the top of v7m.vpr.
+ */
+#define DO_VCMP(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
+ { \
+ TYPE *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ uint16_t beatpred = 0; \
+ uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++) { \
+ bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \
+ /* Comparison sets 0/1 bits for each byte in the element */ \
+ beatpred |= r * emask; \
+ emask <<= ESIZE; \
+ } \
+ beatpred &= mask; \
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
+ (beatpred & eci_mask); \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ uint32_t rm) \
+ { \
+ TYPE *n = vn; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ uint16_t beatpred = 0; \
+ uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++) { \
+ bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \
+ /* Comparison sets 0/1 bits for each byte in the element */ \
+ beatpred |= r * emask; \
+ emask <<= ESIZE; \
+ } \
+ beatpred &= mask; \
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
+ (beatpred & eci_mask); \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VCMP_S(OP, FN) \
+ DO_VCMP(OP##b, 1, int8_t, FN) \
+ DO_VCMP(OP##h, 2, int16_t, FN) \
+ DO_VCMP(OP##w, 4, int32_t, FN) \
+ DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \
+ DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \
+ DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN)
+
+#define DO_VCMP_U(OP, FN) \
+ DO_VCMP(OP##b, 1, uint8_t, FN) \
+ DO_VCMP(OP##h, 2, uint16_t, FN) \
+ DO_VCMP(OP##w, 4, uint32_t, FN) \
+ DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \
+ DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \
+ DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN)
+
+#define DO_EQ(N, M) ((N) == (M))
+#define DO_NE(N, M) ((N) != (M))
+#define DO_EQ(N, M) ((N) == (M))
+#define DO_EQ(N, M) ((N) == (M))
+#define DO_GE(N, M) ((N) >= (M))
+#define DO_LT(N, M) ((N) < (M))
+#define DO_GT(N, M) ((N) > (M))
+#define DO_LE(N, M) ((N) <= (M))
+
+DO_VCMP_U(vcmpeq, DO_EQ)
+DO_VCMP_U(vcmpne, DO_NE)
+DO_VCMP_U(vcmpcs, DO_GE)
+DO_VCMP_U(vcmphi, DO_GT)
+DO_VCMP_S(vcmpge, DO_GE)
+DO_VCMP_S(vcmplt, DO_LT)
+DO_VCMP_S(vcmpgt, DO_GT)
+DO_VCMP_S(vcmple, DO_LE)
+
+void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm)
+{
+ /*
+ * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n]
+ * but note that whether bytes are written to Qd is still subject
+ * to (all forms of) predication in the usual way.
+ */
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint16_t mask = mve_element_mask(env);
+ uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
+ unsigned e;
+ for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) {
+ uint64_t r = m[H8(e)];
+ mergemask(&r, n[H8(e)], p0);
+ mergemask(&d[H8(e)], r, mask);
+ }
+ mve_advance_vpt(env);
+}
+
+void HELPER(mve_vpnot)(CPUARMState *env)
+{
+ /*
+ * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged.
+ * P0 bits for predicated lanes in executed bits (where mask is 0) are 0.
+ * P0 bits otherwise are inverted.
+ * (This is the same logic as VCMP.)
+ * This insn is itself subject to predication and to beat-wise execution,
+ * and after it executes VPT state advances in the usual way.
+ */
+ uint16_t mask = mve_element_mask(env);
+ uint16_t eci_mask = mve_eci_mask(env);
+ uint16_t beatpred = ~env->v7m.vpr & mask;
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask);
+ mve_advance_vpt(env);
+}
+
+/*
+ * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed,
+ * otherwise set according to value of Rn. The calculation of
+ * newmask here works in the same way as the calculation of the
+ * ltpmask in mve_element_mask(), but we have pre-calculated
+ * the masklen in the generated code.
+ */
+void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen)
+{
+ uint16_t mask = mve_element_mask(env);
+ uint16_t eci_mask = mve_eci_mask(env);
+ uint16_t newmask;
+
+ assert(masklen <= 16);
+ newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0;
+ newmask &= mask;
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask);
+ mve_advance_vpt(env);
+}
+
+#define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ bool qc = false; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ bool sat = false; \
+ mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \
+ qc |= sat & mask & 1; \
+ } \
+ if (qc) { \
+ env->vfp.qc[0] = qc; \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VQABS_B(N, SATP) \
+ do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP)
+#define DO_VQABS_H(N, SATP) \
+ do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP)
+#define DO_VQABS_W(N, SATP) \
+ do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP)
+
+#define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP)
+#define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP)
+#define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP)
+
+DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B)
+DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H)
+DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W)
+
+DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B)
+DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H)
+DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W)
+
+/*
+ * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its
+ * absolute value; we then do an unsigned comparison.
+ */
+#define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
+ { \
+ UTYPE *d = vd; \
+ STYPE *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ UTYPE r = DO_ABS(m[H##ESIZE(e)]); \
+ r = FN(d[H##ESIZE(e)], r); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX)
+DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX)
+DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX)
+DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN)
+DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN)
+DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN)
+
+/*
+ * 2-operand floating point. Note that if an element is partially
+ * predicated we must do the FP operation to update the non-predicated
+ * bytes, but we must be careful to avoid updating the FP exception
+ * state unless byte 0 of the element was unpredicated.
+ */
+#define DO_2OP_FP(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ TYPE r; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_2OP_FP_ALL(OP, FN) \
+ DO_2OP_FP(OP##h, 2, float16, float16_##FN) \
+ DO_2OP_FP(OP##s, 4, float32, float32_##FN)
+
+DO_2OP_FP_ALL(vfadd, add)
+DO_2OP_FP_ALL(vfsub, sub)
+DO_2OP_FP_ALL(vfmul, mul)
+
+static inline float16 float16_abd(float16 a, float16 b, float_status *s)
+{
+ return float16_abs(float16_sub(a, b, s));
+}
+
+static inline float32 float32_abd(float32 a, float32 b, float_status *s)
+{
+ return float32_abs(float32_sub(a, b, s));
+}
+
+DO_2OP_FP_ALL(vfabd, abd)
+DO_2OP_FP_ALL(vmaxnm, maxnum)
+DO_2OP_FP_ALL(vminnm, minnum)
+
+static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s)
+{
+ return float16_maxnum(float16_abs(a), float16_abs(b), s);
+}
+
+static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s)
+{
+ return float32_maxnum(float32_abs(a), float32_abs(b), s);
+}
+
+static inline float16 float16_minnuma(float16 a, float16 b, float_status *s)
+{
+ return float16_minnum(float16_abs(a), float16_abs(b), s);
+}
+
+static inline float32 float32_minnuma(float32 a, float32 b, float_status *s)
+{
+ return float32_minnum(float32_abs(a), float32_abs(b), s);
+}
+
+DO_2OP_FP_ALL(vmaxnma, maxnuma)
+DO_2OP_FP_ALL(vminnma, minnuma)
+
+#define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ TYPE r[16 / ESIZE]; \
+ uint16_t tm, mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ /* Calculate all results first to avoid overwriting inputs */ \
+ for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \
+ if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ r[e] = 0; \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(tm & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ if (!(e & 1)) { \
+ r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \
+ } else { \
+ r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \
+ } \
+ } \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ mergemask(&d[H##ESIZE(e)], r[e], mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add)
+DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add)
+DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub)
+DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub)
+
+#define DO_VFMA(OP, ESIZE, TYPE, CHS) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ TYPE r; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = n[H##ESIZE(e)]; \
+ if (CHS) { \
+ r = TYPE##_chs(r); \
+ } \
+ r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \
+ 0, fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_VFMA(vfmah, 2, float16, false)
+DO_VFMA(vfmas, 4, float32, false)
+DO_VFMA(vfmsh, 2, float16, true)
+DO_VFMA(vfmss, 4, float32, true)
+
+#define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, void *vm) \
+ { \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ TYPE r0, r1, e1, e2, e3, e4; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst0, *fpst1; \
+ float_status scratch_fpst; \
+ /* We loop through pairs of elements at a time */ \
+ for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \
+ continue; \
+ } \
+ fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ fpst1 = fpst0; \
+ if (!(mask & 1)) { \
+ scratch_fpst = *fpst0; \
+ fpst0 = &scratch_fpst; \
+ } \
+ if (!(mask & (1 << ESIZE))) { \
+ scratch_fpst = *fpst1; \
+ fpst1 = &scratch_fpst; \
+ } \
+ switch (ROT) { \
+ case 0: \
+ e1 = m[H##ESIZE(e)]; \
+ e2 = n[H##ESIZE(e)]; \
+ e3 = m[H##ESIZE(e + 1)]; \
+ e4 = n[H##ESIZE(e)]; \
+ break; \
+ case 1: \
+ e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
+ e2 = n[H##ESIZE(e + 1)]; \
+ e3 = m[H##ESIZE(e)]; \
+ e4 = n[H##ESIZE(e + 1)]; \
+ break; \
+ case 2: \
+ e1 = TYPE##_chs(m[H##ESIZE(e)]); \
+ e2 = n[H##ESIZE(e)]; \
+ e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \
+ e4 = n[H##ESIZE(e)]; \
+ break; \
+ case 3: \
+ e1 = m[H##ESIZE(e + 1)]; \
+ e2 = n[H##ESIZE(e + 1)]; \
+ e3 = TYPE##_chs(m[H##ESIZE(e)]); \
+ e4 = n[H##ESIZE(e + 1)]; \
+ break; \
+ default: \
+ g_assert_not_reached(); \
+ } \
+ r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \
+ r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \
+ mergemask(&d[H##ESIZE(e)], r0, mask); \
+ mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VCMULH(N, M, D, S) float16_mul(N, M, S)
+#define DO_VCMULS(N, M, D, S) float32_mul(N, M, S)
+
+#define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S)
+#define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S)
+
+DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH)
+DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS)
+DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH)
+DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS)
+DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH)
+DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS)
+DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH)
+DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS)
+
+DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH)
+DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS)
+DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH)
+DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS)
+DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH)
+DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS)
+DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH)
+DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS)
+
+#define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, uint32_t rm) \
+ { \
+ TYPE *d = vd, *n = vn; \
+ TYPE r, m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(n[H##ESIZE(e)], m, fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_2OP_FP_SCALAR_ALL(OP, FN) \
+ DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \
+ DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN)
+
+DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add)
+DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub)
+DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul)
+
+#define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vn, uint32_t rm) \
+ { \
+ TYPE *d = vd, *n = vn; \
+ TYPE r, m = rm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+/* VFMAS is vector * vector + scalar, so swap op2 and op3 */
+#define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S)
+#define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S)
+
+/* VFMA is vector * scalar + vector */
+DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd)
+DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd)
+DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH)
+DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS)
+
+/* Floating point max/min across vector. */
+#define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
+ uint32_t ra_in) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm; \
+ TYPE ra = (TYPE)ra_in; \
+ float_status *fpst = (ESIZE == 2) ? \
+ &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ TYPE v = m[H##ESIZE(e)]; \
+ if (TYPE##_is_signaling_nan(ra, fpst)) { \
+ ra = TYPE##_silence_nan(ra, fpst); \
+ float_raise(float_flag_invalid, fpst); \
+ } \
+ if (TYPE##_is_signaling_nan(v, fpst)) { \
+ v = TYPE##_silence_nan(v, fpst); \
+ float_raise(float_flag_invalid, fpst); \
+ } \
+ if (ABS) { \
+ v = TYPE##_abs(v); \
+ } \
+ ra = FN(ra, v, fpst); \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ } \
+
+#define NOP(X) (X)
+
+DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum)
+DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum)
+DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum)
+DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum)
+DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum)
+DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum)
+DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum)
+DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum)
+
+/* FP compares; note that all comparisons signal InvalidOp for QNaNs */
+#define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \
+ { \
+ TYPE *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ uint16_t beatpred = 0; \
+ uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ bool r; \
+ for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
+ if ((mask & emask) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & (1 << (e * ESIZE)))) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \
+ /* Comparison sets 0/1 bits for each byte in the element */ \
+ beatpred |= r * emask; \
+ } \
+ beatpred &= mask; \
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
+ (beatpred & eci_mask); \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ uint32_t rm) \
+ { \
+ TYPE *n = vn; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ uint16_t beatpred = 0; \
+ uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ bool r; \
+ for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \
+ if ((mask & emask) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & (1 << (e * ESIZE)))) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \
+ /* Comparison sets 0/1 bits for each byte in the element */ \
+ beatpred |= r * emask; \
+ } \
+ beatpred &= mask; \
+ env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \
+ (beatpred & eci_mask); \
+ mve_advance_vpt(env); \
+ }
+
+#define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \
+ DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \
+ DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN)
+
+/*
+ * Some care is needed here to get the correct result for the unordered case.
+ * Architecturally EQ, GE and GT are defined to be false for unordered, but
+ * the NE, LT and LE comparisons are defined as simple logical inverses of
+ * EQ, GE and GT and so they must return true for unordered. The softfloat
+ * comparison functions float*_{eq,le,lt} all return false for unordered.
+ */
+#define DO_GE16(X, Y, S) float16_le(Y, X, S)
+#define DO_GE32(X, Y, S) float32_le(Y, X, S)
+#define DO_GT16(X, Y, S) float16_lt(Y, X, S)
+#define DO_GT32(X, Y, S) float32_lt(Y, X, S)
+
+DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq)
+DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq)
+
+DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq)
+DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq)
+
+DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16)
+DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32)
+
+DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16)
+DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32)
+
+DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16)
+DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32)
+
+DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16)
+DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32)
+
+#define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \
+ uint32_t shift) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ TYPE r; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(m[H##ESIZE(e)], shift, fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh)
+DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh)
+DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero)
+DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero)
+DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos)
+DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos)
+DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero)
+DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero)
+
+/* VCVT with specified rmode */
+#define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, \
+ void *vd, void *vm, uint32_t rmode) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ TYPE r; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ float_status *base_fpst = (ESIZE == 2) ? \
+ &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \
+ set_float_rounding_mode(rmode, base_fpst); \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = base_fpst; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(m[H##ESIZE(e)], 0, fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ set_float_rounding_mode(prev_rmode, base_fpst); \
+ mve_advance_vpt(env); \
+ }
+
+DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh)
+DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh)
+DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls)
+DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls)
+
+#define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S)
+#define DO_VRINT_RM_S(M, F, S) helper_rints(M, S)
+
+DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H)
+DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S)
+
+/*
+ * VCVT between halfprec and singleprec. As usual for halfprec
+ * conversions, FZ16 is ignored and AHP is observed.
+ */
+static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top)
+{
+ uint16_t *d = vd;
+ uint32_t *m = vm;
+ uint16_t r;
+ uint16_t mask = mve_element_mask(env);
+ bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP);
+ unsigned e;
+ float_status *fpst;
+ float_status scratch_fpst;
+ float_status *base_fpst = &env->vfp.standard_fp_status;
+ bool old_fz = get_flush_to_zero(base_fpst);
+ set_flush_to_zero(false, base_fpst);
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) {
+ continue;
+ }
+ fpst = base_fpst;
+ if (!(mask & 1)) {
+ /* We need the result but without updating flags */
+ scratch_fpst = *fpst;
+ fpst = &scratch_fpst;
+ }
+ r = float32_to_float16(m[H4(e)], ieee, fpst);
+ mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2));
+ }
+ set_flush_to_zero(old_fz, base_fpst);
+ mve_advance_vpt(env);
+}
+
+static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top)
+{
+ uint32_t *d = vd;
+ uint16_t *m = vm;
+ uint32_t r;
+ uint16_t mask = mve_element_mask(env);
+ bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP);
+ unsigned e;
+ float_status *fpst;
+ float_status scratch_fpst;
+ float_status *base_fpst = &env->vfp.standard_fp_status;
+ bool old_fiz = get_flush_inputs_to_zero(base_fpst);
+ set_flush_inputs_to_zero(false, base_fpst);
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) {
+ continue;
+ }
+ fpst = base_fpst;
+ if (!(mask & (1 << (top * 2)))) {
+ /* We need the result but without updating flags */
+ scratch_fpst = *fpst;
+ fpst = &scratch_fpst;
+ }
+ r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst);
+ mergemask(&d[H4(e)], r, mask);
+ }
+ set_flush_inputs_to_zero(old_fiz, base_fpst);
+ mve_advance_vpt(env);
+}
+
+void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm)
+{
+ do_vcvt_sh(env, vd, vm, 0);
+}
+void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm)
+{
+ do_vcvt_sh(env, vd, vm, 1);
+}
+void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm)
+{
+ do_vcvt_hs(env, vd, vm, 0);
+}
+void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm)
+{
+ do_vcvt_hs(env, vd, vm, 1);
+}
+
+#define DO_1OP_FP(OP, ESIZE, TYPE, FN) \
+ void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \
+ { \
+ TYPE *d = vd, *m = vm; \
+ TYPE r; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ float_status *fpst; \
+ float_status scratch_fpst; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \
+ continue; \
+ } \
+ fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \
+ &env->vfp.standard_fp_status; \
+ if (!(mask & 1)) { \
+ /* We need the result but without updating flags */ \
+ scratch_fpst = *fpst; \
+ fpst = &scratch_fpst; \
+ } \
+ r = FN(m[H##ESIZE(e)], fpst); \
+ mergemask(&d[H##ESIZE(e)], r, mask); \
+ } \
+ mve_advance_vpt(env); \
+ }
+
+DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int)
+DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int)
diff --git a/target/arm/neon-dp.decode b/target/arm/neon-dp.decode
new file mode 100644
index 000000000..fd3a01bfa
--- /dev/null
+++ b/target/arm/neon-dp.decode
@@ -0,0 +1,646 @@
+# AArch32 Neon data-processing instruction descriptions
+#
+# Copyright (c) 2020 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+# VFP/Neon register fields; same as vfp.decode
+%vm_dp 5:1 0:4
+%vn_dp 7:1 16:4
+%vd_dp 22:1 12:4
+
+# Encodings for Neon data processing instructions where the T32 encoding
+# is a simple transformation of the A32 encoding.
+# More specifically, this file covers instructions where the A32 encoding is
+# 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
+# and the T32 encoding is
+# 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
+# This file works on the A32 encoding only; calling code for T32 has to
+# transform the insn into the A32 version first.
+
+######################################################################
+# 3-reg-same grouping:
+# 1111 001 U 0 D sz:2 Vn:4 Vd:4 opc:4 N Q M op Vm:4
+######################################################################
+
+&3same vm vn vd q size
+
+@3same .... ... . . . size:2 .... .... .... . q:1 . . .... \
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+@3same_q0 .... ... . . . size:2 .... .... .... . 0 . . .... \
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp q=0
+
+# For FP insns the high bit of 'size' is used as part of opcode decode,
+# and the 'size' bit is 0 for 32-bit float and 1 for 16-bit float.
+# This converts this encoding to the same MO_8/16/32/64 values that the
+# integer neon insns use.
+%3same_fp_size 20:1 !function=neon_3same_fp_size
+
+@3same_fp .... ... . . . . . .... .... .... . q:1 . . .... \
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=%3same_fp_size
+@3same_fp_q0 .... ... . . . . . .... .... .... . 0 . . .... \
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp q=0 size=%3same_fp_size
+
+VHADD_S_3s 1111 001 0 0 . .. .... .... 0000 . . . 0 .... @3same
+VHADD_U_3s 1111 001 1 0 . .. .... .... 0000 . . . 0 .... @3same
+VQADD_S_3s 1111 001 0 0 . .. .... .... 0000 . . . 1 .... @3same
+VQADD_U_3s 1111 001 1 0 . .. .... .... 0000 . . . 1 .... @3same
+
+VRHADD_S_3s 1111 001 0 0 . .. .... .... 0001 . . . 0 .... @3same
+VRHADD_U_3s 1111 001 1 0 . .. .... .... 0001 . . . 0 .... @3same
+
+@3same_logic .... ... . . . .. .... .... .... . q:1 .. .... \
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0
+
+VAND_3s 1111 001 0 0 . 00 .... .... 0001 ... 1 .... @3same_logic
+VBIC_3s 1111 001 0 0 . 01 .... .... 0001 ... 1 .... @3same_logic
+VORR_3s 1111 001 0 0 . 10 .... .... 0001 ... 1 .... @3same_logic
+VORN_3s 1111 001 0 0 . 11 .... .... 0001 ... 1 .... @3same_logic
+VEOR_3s 1111 001 1 0 . 00 .... .... 0001 ... 1 .... @3same_logic
+VBSL_3s 1111 001 1 0 . 01 .... .... 0001 ... 1 .... @3same_logic
+VBIT_3s 1111 001 1 0 . 10 .... .... 0001 ... 1 .... @3same_logic
+VBIF_3s 1111 001 1 0 . 11 .... .... 0001 ... 1 .... @3same_logic
+
+VHSUB_S_3s 1111 001 0 0 . .. .... .... 0010 . . . 0 .... @3same
+VHSUB_U_3s 1111 001 1 0 . .. .... .... 0010 . . . 0 .... @3same
+
+VQSUB_S_3s 1111 001 0 0 . .. .... .... 0010 . . . 1 .... @3same
+VQSUB_U_3s 1111 001 1 0 . .. .... .... 0010 . . . 1 .... @3same
+
+VCGT_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 0 .... @3same
+VCGT_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 0 .... @3same
+VCGE_S_3s 1111 001 0 0 . .. .... .... 0011 . . . 1 .... @3same
+VCGE_U_3s 1111 001 1 0 . .. .... .... 0011 . . . 1 .... @3same
+
+# The _rev suffix indicates that Vn and Vm are reversed. This is
+# the case for shifts. In the Arm ARM these insns are documented
+# with the Vm and Vn fields in their usual places, but in the
+# assembly the operands are listed "backwards", ie in the order
+# Dd, Dm, Dn where other insns use Dd, Dn, Dm. For QEMU we choose
+# to consider Vm and Vn as being in different fields in the insn,
+# which allows us to avoid special-casing shifts in the trans_
+# function code. We would otherwise need to manually swap the operands
+# over to call Neon helper functions that are shared with AArch64,
+# which does not have this odd reversed-operand situation.
+@3same_rev .... ... . . . size:2 .... .... .... . q:1 . . .... \
+ &3same vn=%vm_dp vm=%vn_dp vd=%vd_dp
+
+VSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 0 .... @3same_rev
+VSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 0 .... @3same_rev
+
+# Insns operating on 64-bit elements (size!=0b11 handled elsewhere)
+# The _rev suffix indicates that Vn and Vm are reversed (as explained
+# by the comment for the @3same_rev format).
+@3same_64_rev .... ... . . . 11 .... .... .... . q:1 . . .... \
+ &3same vm=%vn_dp vn=%vm_dp vd=%vd_dp size=3
+
+{
+ VQSHL_S64_3s 1111 001 0 0 . .. .... .... 0100 . . . 1 .... @3same_64_rev
+ VQSHL_S_3s 1111 001 0 0 . .. .... .... 0100 . . . 1 .... @3same_rev
+}
+{
+ VQSHL_U64_3s 1111 001 1 0 . .. .... .... 0100 . . . 1 .... @3same_64_rev
+ VQSHL_U_3s 1111 001 1 0 . .. .... .... 0100 . . . 1 .... @3same_rev
+}
+{
+ VRSHL_S64_3s 1111 001 0 0 . .. .... .... 0101 . . . 0 .... @3same_64_rev
+ VRSHL_S_3s 1111 001 0 0 . .. .... .... 0101 . . . 0 .... @3same_rev
+}
+{
+ VRSHL_U64_3s 1111 001 1 0 . .. .... .... 0101 . . . 0 .... @3same_64_rev
+ VRSHL_U_3s 1111 001 1 0 . .. .... .... 0101 . . . 0 .... @3same_rev
+}
+{
+ VQRSHL_S64_3s 1111 001 0 0 . .. .... .... 0101 . . . 1 .... @3same_64_rev
+ VQRSHL_S_3s 1111 001 0 0 . .. .... .... 0101 . . . 1 .... @3same_rev
+}
+{
+ VQRSHL_U64_3s 1111 001 1 0 . .. .... .... 0101 . . . 1 .... @3same_64_rev
+ VQRSHL_U_3s 1111 001 1 0 . .. .... .... 0101 . . . 1 .... @3same_rev
+}
+
+VMAX_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 0 .... @3same
+VMAX_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 0 .... @3same
+VMIN_S_3s 1111 001 0 0 . .. .... .... 0110 . . . 1 .... @3same
+VMIN_U_3s 1111 001 1 0 . .. .... .... 0110 . . . 1 .... @3same
+
+VABD_S_3s 1111 001 0 0 . .. .... .... 0111 . . . 0 .... @3same
+VABD_U_3s 1111 001 1 0 . .. .... .... 0111 . . . 0 .... @3same
+
+VABA_S_3s 1111 001 0 0 . .. .... .... 0111 . . . 1 .... @3same
+VABA_U_3s 1111 001 1 0 . .. .... .... 0111 . . . 1 .... @3same
+
+VADD_3s 1111 001 0 0 . .. .... .... 1000 . . . 0 .... @3same
+VSUB_3s 1111 001 1 0 . .. .... .... 1000 . . . 0 .... @3same
+
+VTST_3s 1111 001 0 0 . .. .... .... 1000 . . . 1 .... @3same
+VCEQ_3s 1111 001 1 0 . .. .... .... 1000 . . . 1 .... @3same
+
+VMLA_3s 1111 001 0 0 . .. .... .... 1001 . . . 0 .... @3same
+VMLS_3s 1111 001 1 0 . .. .... .... 1001 . . . 0 .... @3same
+
+VMUL_3s 1111 001 0 0 . .. .... .... 1001 . . . 1 .... @3same
+VMUL_p_3s 1111 001 1 0 . .. .... .... 1001 . . . 1 .... @3same
+
+VPMAX_S_3s 1111 001 0 0 . .. .... .... 1010 . . . 0 .... @3same_q0
+VPMAX_U_3s 1111 001 1 0 . .. .... .... 1010 . . . 0 .... @3same_q0
+
+VPMIN_S_3s 1111 001 0 0 . .. .... .... 1010 . . . 1 .... @3same_q0
+VPMIN_U_3s 1111 001 1 0 . .. .... .... 1010 . . . 1 .... @3same_q0
+
+VQDMULH_3s 1111 001 0 0 . .. .... .... 1011 . . . 0 .... @3same
+VQRDMULH_3s 1111 001 1 0 . .. .... .... 1011 . . . 0 .... @3same
+
+VPADD_3s 1111 001 0 0 . .. .... .... 1011 . . . 1 .... @3same_q0
+
+VQRDMLAH_3s 1111 001 1 0 . .. .... .... 1011 ... 1 .... @3same
+
+@3same_crypto .... .... .... .... .... .... .... .... \
+ &3same vm=%vm_dp vn=%vn_dp vd=%vd_dp size=0 q=1
+
+SHA1C_3s 1111 001 0 0 . 00 .... .... 1100 . 1 . 0 .... @3same_crypto
+SHA1P_3s 1111 001 0 0 . 01 .... .... 1100 . 1 . 0 .... @3same_crypto
+SHA1M_3s 1111 001 0 0 . 10 .... .... 1100 . 1 . 0 .... @3same_crypto
+SHA1SU0_3s 1111 001 0 0 . 11 .... .... 1100 . 1 . 0 .... @3same_crypto
+SHA256H_3s 1111 001 1 0 . 00 .... .... 1100 . 1 . 0 .... @3same_crypto
+SHA256H2_3s 1111 001 1 0 . 01 .... .... 1100 . 1 . 0 .... @3same_crypto
+SHA256SU1_3s 1111 001 1 0 . 10 .... .... 1100 . 1 . 0 .... @3same_crypto
+
+VFMA_fp_3s 1111 001 0 0 . 0 . .... .... 1100 ... 1 .... @3same_fp
+VFMS_fp_3s 1111 001 0 0 . 1 . .... .... 1100 ... 1 .... @3same_fp
+
+VQRDMLSH_3s 1111 001 1 0 . .. .... .... 1100 ... 1 .... @3same
+
+VADD_fp_3s 1111 001 0 0 . 0 . .... .... 1101 ... 0 .... @3same_fp
+VSUB_fp_3s 1111 001 0 0 . 1 . .... .... 1101 ... 0 .... @3same_fp
+VPADD_fp_3s 1111 001 1 0 . 0 . .... .... 1101 ... 0 .... @3same_fp_q0
+VABD_fp_3s 1111 001 1 0 . 1 . .... .... 1101 ... 0 .... @3same_fp
+VMLA_fp_3s 1111 001 0 0 . 0 . .... .... 1101 ... 1 .... @3same_fp
+VMLS_fp_3s 1111 001 0 0 . 1 . .... .... 1101 ... 1 .... @3same_fp
+VMUL_fp_3s 1111 001 1 0 . 0 . .... .... 1101 ... 1 .... @3same_fp
+VCEQ_fp_3s 1111 001 0 0 . 0 . .... .... 1110 ... 0 .... @3same_fp
+VCGE_fp_3s 1111 001 1 0 . 0 . .... .... 1110 ... 0 .... @3same_fp
+VACGE_fp_3s 1111 001 1 0 . 0 . .... .... 1110 ... 1 .... @3same_fp
+VCGT_fp_3s 1111 001 1 0 . 1 . .... .... 1110 ... 0 .... @3same_fp
+VACGT_fp_3s 1111 001 1 0 . 1 . .... .... 1110 ... 1 .... @3same_fp
+VMAX_fp_3s 1111 001 0 0 . 0 . .... .... 1111 ... 0 .... @3same_fp
+VMIN_fp_3s 1111 001 0 0 . 1 . .... .... 1111 ... 0 .... @3same_fp
+VPMAX_fp_3s 1111 001 1 0 . 0 . .... .... 1111 ... 0 .... @3same_fp_q0
+VPMIN_fp_3s 1111 001 1 0 . 1 . .... .... 1111 ... 0 .... @3same_fp_q0
+VRECPS_fp_3s 1111 001 0 0 . 0 . .... .... 1111 ... 1 .... @3same_fp
+VRSQRTS_fp_3s 1111 001 0 0 . 1 . .... .... 1111 ... 1 .... @3same_fp
+VMAXNM_fp_3s 1111 001 1 0 . 0 . .... .... 1111 ... 1 .... @3same_fp
+VMINNM_fp_3s 1111 001 1 0 . 1 . .... .... 1111 ... 1 .... @3same_fp
+
+######################################################################
+# 2-reg-and-shift grouping:
+# 1111 001 U 1 D immH:3 immL:3 Vd:4 opc:4 L Q M 1 Vm:4
+######################################################################
+&2reg_shift vm vd q shift size
+
+# Right shifts are encoded as N - shift, where N is the element size in bits.
+%neon_rshift_i6 16:6 !function=rsub_64
+%neon_rshift_i5 16:5 !function=rsub_32
+%neon_rshift_i4 16:4 !function=rsub_16
+%neon_rshift_i3 16:3 !function=rsub_8
+
+@2reg_shr_d .... ... . . . ...... .... .... 1 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=3 shift=%neon_rshift_i6
+@2reg_shr_s .... ... . . . 1 ..... .... .... 0 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=2 shift=%neon_rshift_i5
+@2reg_shr_h .... ... . . . 01 .... .... .... 0 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=1 shift=%neon_rshift_i4
+@2reg_shr_b .... ... . . . 001 ... .... .... 0 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=0 shift=%neon_rshift_i3
+
+@2reg_shl_d .... ... . . . shift:6 .... .... 1 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=3
+@2reg_shl_s .... ... . . . 1 shift:5 .... .... 0 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=2
+@2reg_shl_h .... ... . . . 01 shift:4 .... .... 0 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=1
+@2reg_shl_b .... ... . . . 001 shift:3 .... .... 0 q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=0
+
+# Narrowing right shifts: here the Q bit is part of the opcode decode
+@2reg_shrn_d .... ... . . . 1 ..... .... .... 0 . . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=3 q=0 \
+ shift=%neon_rshift_i5
+@2reg_shrn_s .... ... . . . 01 .... .... .... 0 . . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=2 q=0 \
+ shift=%neon_rshift_i4
+@2reg_shrn_h .... ... . . . 001 ... .... .... 0 . . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=1 q=0 \
+ shift=%neon_rshift_i3
+
+# Long left shifts: again Q is part of opcode decode
+@2reg_shll_s .... ... . . . 1 shift:5 .... .... 0 . . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=2 q=0
+@2reg_shll_h .... ... . . . 01 shift:4 .... .... 0 . . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=1 q=0
+@2reg_shll_b .... ... . . . 001 shift:3 .... .... 0 . . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=0 q=0
+
+@2reg_vcvt .... ... . . . 1 ..... .... .... . q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=2 shift=%neon_rshift_i5
+@2reg_vcvt_f16 .... ... . . . 11 .... .... .... . q:1 . . .... \
+ &2reg_shift vm=%vm_dp vd=%vd_dp size=1 shift=%neon_rshift_i4
+
+VSHR_S_2sh 1111 001 0 1 . ...... .... 0000 . . . 1 .... @2reg_shr_d
+VSHR_S_2sh 1111 001 0 1 . ...... .... 0000 . . . 1 .... @2reg_shr_s
+VSHR_S_2sh 1111 001 0 1 . ...... .... 0000 . . . 1 .... @2reg_shr_h
+VSHR_S_2sh 1111 001 0 1 . ...... .... 0000 . . . 1 .... @2reg_shr_b
+
+VSHR_U_2sh 1111 001 1 1 . ...... .... 0000 . . . 1 .... @2reg_shr_d
+VSHR_U_2sh 1111 001 1 1 . ...... .... 0000 . . . 1 .... @2reg_shr_s
+VSHR_U_2sh 1111 001 1 1 . ...... .... 0000 . . . 1 .... @2reg_shr_h
+VSHR_U_2sh 1111 001 1 1 . ...... .... 0000 . . . 1 .... @2reg_shr_b
+
+VSRA_S_2sh 1111 001 0 1 . ...... .... 0001 . . . 1 .... @2reg_shr_d
+VSRA_S_2sh 1111 001 0 1 . ...... .... 0001 . . . 1 .... @2reg_shr_s
+VSRA_S_2sh 1111 001 0 1 . ...... .... 0001 . . . 1 .... @2reg_shr_h
+VSRA_S_2sh 1111 001 0 1 . ...... .... 0001 . . . 1 .... @2reg_shr_b
+
+VSRA_U_2sh 1111 001 1 1 . ...... .... 0001 . . . 1 .... @2reg_shr_d
+VSRA_U_2sh 1111 001 1 1 . ...... .... 0001 . . . 1 .... @2reg_shr_s
+VSRA_U_2sh 1111 001 1 1 . ...... .... 0001 . . . 1 .... @2reg_shr_h
+VSRA_U_2sh 1111 001 1 1 . ...... .... 0001 . . . 1 .... @2reg_shr_b
+
+VRSHR_S_2sh 1111 001 0 1 . ...... .... 0010 . . . 1 .... @2reg_shr_d
+VRSHR_S_2sh 1111 001 0 1 . ...... .... 0010 . . . 1 .... @2reg_shr_s
+VRSHR_S_2sh 1111 001 0 1 . ...... .... 0010 . . . 1 .... @2reg_shr_h
+VRSHR_S_2sh 1111 001 0 1 . ...... .... 0010 . . . 1 .... @2reg_shr_b
+
+VRSHR_U_2sh 1111 001 1 1 . ...... .... 0010 . . . 1 .... @2reg_shr_d
+VRSHR_U_2sh 1111 001 1 1 . ...... .... 0010 . . . 1 .... @2reg_shr_s
+VRSHR_U_2sh 1111 001 1 1 . ...... .... 0010 . . . 1 .... @2reg_shr_h
+VRSHR_U_2sh 1111 001 1 1 . ...... .... 0010 . . . 1 .... @2reg_shr_b
+
+VRSRA_S_2sh 1111 001 0 1 . ...... .... 0011 . . . 1 .... @2reg_shr_d
+VRSRA_S_2sh 1111 001 0 1 . ...... .... 0011 . . . 1 .... @2reg_shr_s
+VRSRA_S_2sh 1111 001 0 1 . ...... .... 0011 . . . 1 .... @2reg_shr_h
+VRSRA_S_2sh 1111 001 0 1 . ...... .... 0011 . . . 1 .... @2reg_shr_b
+
+VRSRA_U_2sh 1111 001 1 1 . ...... .... 0011 . . . 1 .... @2reg_shr_d
+VRSRA_U_2sh 1111 001 1 1 . ...... .... 0011 . . . 1 .... @2reg_shr_s
+VRSRA_U_2sh 1111 001 1 1 . ...... .... 0011 . . . 1 .... @2reg_shr_h
+VRSRA_U_2sh 1111 001 1 1 . ...... .... 0011 . . . 1 .... @2reg_shr_b
+
+VSRI_2sh 1111 001 1 1 . ...... .... 0100 . . . 1 .... @2reg_shr_d
+VSRI_2sh 1111 001 1 1 . ...... .... 0100 . . . 1 .... @2reg_shr_s
+VSRI_2sh 1111 001 1 1 . ...... .... 0100 . . . 1 .... @2reg_shr_h
+VSRI_2sh 1111 001 1 1 . ...... .... 0100 . . . 1 .... @2reg_shr_b
+
+VSHL_2sh 1111 001 0 1 . ...... .... 0101 . . . 1 .... @2reg_shl_d
+VSHL_2sh 1111 001 0 1 . ...... .... 0101 . . . 1 .... @2reg_shl_s
+VSHL_2sh 1111 001 0 1 . ...... .... 0101 . . . 1 .... @2reg_shl_h
+VSHL_2sh 1111 001 0 1 . ...... .... 0101 . . . 1 .... @2reg_shl_b
+
+VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_d
+VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_s
+VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_h
+VSLI_2sh 1111 001 1 1 . ...... .... 0101 . . . 1 .... @2reg_shl_b
+
+VQSHLU_64_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_d
+VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_s
+VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_h
+VQSHLU_2sh 1111 001 1 1 . ...... .... 0110 . . . 1 .... @2reg_shl_b
+
+VQSHL_S_64_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
+VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s
+VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h
+VQSHL_S_2sh 1111 001 0 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b
+
+VQSHL_U_64_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_d
+VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_s
+VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_h
+VQSHL_U_2sh 1111 001 1 1 . ...... .... 0111 . . . 1 .... @2reg_shl_b
+
+VSHRN_64_2sh 1111 001 0 1 . ...... .... 1000 . 0 . 1 .... @2reg_shrn_d
+VSHRN_32_2sh 1111 001 0 1 . ...... .... 1000 . 0 . 1 .... @2reg_shrn_s
+VSHRN_16_2sh 1111 001 0 1 . ...... .... 1000 . 0 . 1 .... @2reg_shrn_h
+
+VRSHRN_64_2sh 1111 001 0 1 . ...... .... 1000 . 1 . 1 .... @2reg_shrn_d
+VRSHRN_32_2sh 1111 001 0 1 . ...... .... 1000 . 1 . 1 .... @2reg_shrn_s
+VRSHRN_16_2sh 1111 001 0 1 . ...... .... 1000 . 1 . 1 .... @2reg_shrn_h
+
+VQSHRUN_64_2sh 1111 001 1 1 . ...... .... 1000 . 0 . 1 .... @2reg_shrn_d
+VQSHRUN_32_2sh 1111 001 1 1 . ...... .... 1000 . 0 . 1 .... @2reg_shrn_s
+VQSHRUN_16_2sh 1111 001 1 1 . ...... .... 1000 . 0 . 1 .... @2reg_shrn_h
+
+VQRSHRUN_64_2sh 1111 001 1 1 . ...... .... 1000 . 1 . 1 .... @2reg_shrn_d
+VQRSHRUN_32_2sh 1111 001 1 1 . ...... .... 1000 . 1 . 1 .... @2reg_shrn_s
+VQRSHRUN_16_2sh 1111 001 1 1 . ...... .... 1000 . 1 . 1 .... @2reg_shrn_h
+
+# VQSHRN with signed input
+VQSHRN_S64_2sh 1111 001 0 1 . ...... .... 1001 . 0 . 1 .... @2reg_shrn_d
+VQSHRN_S32_2sh 1111 001 0 1 . ...... .... 1001 . 0 . 1 .... @2reg_shrn_s
+VQSHRN_S16_2sh 1111 001 0 1 . ...... .... 1001 . 0 . 1 .... @2reg_shrn_h
+
+# VQRSHRN with signed input
+VQRSHRN_S64_2sh 1111 001 0 1 . ...... .... 1001 . 1 . 1 .... @2reg_shrn_d
+VQRSHRN_S32_2sh 1111 001 0 1 . ...... .... 1001 . 1 . 1 .... @2reg_shrn_s
+VQRSHRN_S16_2sh 1111 001 0 1 . ...... .... 1001 . 1 . 1 .... @2reg_shrn_h
+
+# VQSHRN with unsigned input
+VQSHRN_U64_2sh 1111 001 1 1 . ...... .... 1001 . 0 . 1 .... @2reg_shrn_d
+VQSHRN_U32_2sh 1111 001 1 1 . ...... .... 1001 . 0 . 1 .... @2reg_shrn_s
+VQSHRN_U16_2sh 1111 001 1 1 . ...... .... 1001 . 0 . 1 .... @2reg_shrn_h
+
+# VQRSHRN with unsigned input
+VQRSHRN_U64_2sh 1111 001 1 1 . ...... .... 1001 . 1 . 1 .... @2reg_shrn_d
+VQRSHRN_U32_2sh 1111 001 1 1 . ...... .... 1001 . 1 . 1 .... @2reg_shrn_s
+VQRSHRN_U16_2sh 1111 001 1 1 . ...... .... 1001 . 1 . 1 .... @2reg_shrn_h
+
+VSHLL_S_2sh 1111 001 0 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_s
+VSHLL_S_2sh 1111 001 0 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_h
+VSHLL_S_2sh 1111 001 0 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_b
+
+VSHLL_U_2sh 1111 001 1 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_s
+VSHLL_U_2sh 1111 001 1 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_h
+VSHLL_U_2sh 1111 001 1 1 . ...... .... 1010 . 0 . 1 .... @2reg_shll_b
+
+# VCVT fixed<->float conversions
+VCVT_SH_2sh 1111 001 0 1 . ...... .... 1100 0 . . 1 .... @2reg_vcvt_f16
+VCVT_UH_2sh 1111 001 1 1 . ...... .... 1100 0 . . 1 .... @2reg_vcvt_f16
+VCVT_HS_2sh 1111 001 0 1 . ...... .... 1101 0 . . 1 .... @2reg_vcvt_f16
+VCVT_HU_2sh 1111 001 1 1 . ...... .... 1101 0 . . 1 .... @2reg_vcvt_f16
+
+VCVT_SF_2sh 1111 001 0 1 . ...... .... 1110 0 . . 1 .... @2reg_vcvt
+VCVT_UF_2sh 1111 001 1 1 . ...... .... 1110 0 . . 1 .... @2reg_vcvt
+VCVT_FS_2sh 1111 001 0 1 . ...... .... 1111 0 . . 1 .... @2reg_vcvt
+VCVT_FU_2sh 1111 001 1 1 . ...... .... 1111 0 . . 1 .... @2reg_vcvt
+
+######################################################################
+# 1-reg-and-modified-immediate grouping:
+# 1111 001 i 1 D 000 imm:3 Vd:4 cmode:4 0 Q op 1 Vm:4
+######################################################################
+
+&1reg_imm vd q imm cmode op
+
+%asimd_imm_value 24:1 16:3 0:4
+
+@1reg_imm .... ... . . . ... ... .... .... . q:1 . . .... \
+ &1reg_imm imm=%asimd_imm_value vd=%vd_dp
+
+# The cmode/op bits here decode VORR/VBIC/VMOV/VMNV, but
+# not in a way we can conveniently represent in decodetree without
+# a lot of repetition:
+# VORR: op=0, (cmode & 1) && cmode < 12
+# VBIC: op=1, (cmode & 1) && cmode < 12
+# VMOV: everything else
+# So we have a single decode line and check the cmode/op in the
+# trans function.
+Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
+
+######################################################################
+# Within the "two registers, or three registers of different lengths"
+# grouping ([23,4]=0b10), bits [21:20] are either part of the opcode
+# decode: 0b11 for VEXT, two-reg-misc, VTBL, and duplicate-scalar;
+# or they are a size field for the three-reg-different-lengths and
+# two-reg-and-scalar insn groups (where size cannot be 0b11). This
+# is slightly awkward for decodetree: we handle it with this
+# non-exclusive group which contains within it two exclusive groups:
+# one for the size=0b11 patterns, and one for the size-not-0b11
+# patterns. This allows us to check that none of the insns within
+# each subgroup accidentally overlap each other. Note that all the
+# trans functions for the size-not-0b11 patterns must check and
+# return false for size==3.
+######################################################################
+{
+ [
+ ##################################################################
+ # Miscellaneous size=0b11 insns
+ ##################################################################
+ VEXT 1111 001 0 1 . 11 .... .... imm:4 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+ VTBL 1111 001 1 1 . 11 .... .... 10 len:2 . op:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+ VDUP_scalar 1111 001 1 1 . 11 index:3 1 .... 11 000 q:1 . 0 .... \
+ vm=%vm_dp vd=%vd_dp size=0
+ VDUP_scalar 1111 001 1 1 . 11 index:2 10 .... 11 000 q:1 . 0 .... \
+ vm=%vm_dp vd=%vd_dp size=1
+ VDUP_scalar 1111 001 1 1 . 11 index:1 100 .... 11 000 q:1 . 0 .... \
+ vm=%vm_dp vd=%vd_dp size=2
+
+ ##################################################################
+ # 2-reg-misc grouping:
+ # 1111 001 11 D 11 size:2 opc1:2 Vd:4 0 opc2:4 q:1 M 0 Vm:4
+ ##################################################################
+
+ &2misc vd vm q size
+
+ @2misc .... ... .. . .. size:2 .. .... . .... q:1 . . .... \
+ &2misc vm=%vm_dp vd=%vd_dp
+ @2misc_q0 .... ... .. . .. size:2 .. .... . .... . . . .... \
+ &2misc vm=%vm_dp vd=%vd_dp q=0
+ @2misc_q1 .... ... .. . .. size:2 .. .... . .... . . . .... \
+ &2misc vm=%vm_dp vd=%vd_dp q=1
+
+ VREV64 1111 001 11 . 11 .. 00 .... 0 0000 . . 0 .... @2misc
+ VREV32 1111 001 11 . 11 .. 00 .... 0 0001 . . 0 .... @2misc
+ VREV16 1111 001 11 . 11 .. 00 .... 0 0010 . . 0 .... @2misc
+
+ VPADDL_S 1111 001 11 . 11 .. 00 .... 0 0100 . . 0 .... @2misc
+ VPADDL_U 1111 001 11 . 11 .. 00 .... 0 0101 . . 0 .... @2misc
+
+ AESE 1111 001 11 . 11 .. 00 .... 0 0110 0 . 0 .... @2misc_q1
+ AESD 1111 001 11 . 11 .. 00 .... 0 0110 1 . 0 .... @2misc_q1
+ AESMC 1111 001 11 . 11 .. 00 .... 0 0111 0 . 0 .... @2misc_q1
+ AESIMC 1111 001 11 . 11 .. 00 .... 0 0111 1 . 0 .... @2misc_q1
+
+ VCLS 1111 001 11 . 11 .. 00 .... 0 1000 . . 0 .... @2misc
+ VCLZ 1111 001 11 . 11 .. 00 .... 0 1001 . . 0 .... @2misc
+ VCNT 1111 001 11 . 11 .. 00 .... 0 1010 . . 0 .... @2misc
+
+ VMVN 1111 001 11 . 11 .. 00 .... 0 1011 . . 0 .... @2misc
+
+ VPADAL_S 1111 001 11 . 11 .. 00 .... 0 1100 . . 0 .... @2misc
+ VPADAL_U 1111 001 11 . 11 .. 00 .... 0 1101 . . 0 .... @2misc
+
+ VQABS 1111 001 11 . 11 .. 00 .... 0 1110 . . 0 .... @2misc
+ VQNEG 1111 001 11 . 11 .. 00 .... 0 1111 . . 0 .... @2misc
+
+ VCGT0 1111 001 11 . 11 .. 01 .... 0 0000 . . 0 .... @2misc
+ VCGE0 1111 001 11 . 11 .. 01 .... 0 0001 . . 0 .... @2misc
+ VCEQ0 1111 001 11 . 11 .. 01 .... 0 0010 . . 0 .... @2misc
+ VCLE0 1111 001 11 . 11 .. 01 .... 0 0011 . . 0 .... @2misc
+ VCLT0 1111 001 11 . 11 .. 01 .... 0 0100 . . 0 .... @2misc
+
+ SHA1H 1111 001 11 . 11 .. 01 .... 0 0101 1 . 0 .... @2misc_q1
+
+ VABS 1111 001 11 . 11 .. 01 .... 0 0110 . . 0 .... @2misc
+ VNEG 1111 001 11 . 11 .. 01 .... 0 0111 . . 0 .... @2misc
+
+ VCGT0_F 1111 001 11 . 11 .. 01 .... 0 1000 . . 0 .... @2misc
+ VCGE0_F 1111 001 11 . 11 .. 01 .... 0 1001 . . 0 .... @2misc
+ VCEQ0_F 1111 001 11 . 11 .. 01 .... 0 1010 . . 0 .... @2misc
+ VCLE0_F 1111 001 11 . 11 .. 01 .... 0 1011 . . 0 .... @2misc
+ VCLT0_F 1111 001 11 . 11 .. 01 .... 0 1100 . . 0 .... @2misc
+
+ VABS_F 1111 001 11 . 11 .. 01 .... 0 1110 . . 0 .... @2misc
+ VNEG_F 1111 001 11 . 11 .. 01 .... 0 1111 . . 0 .... @2misc
+
+ VSWP 1111 001 11 . 11 .. 10 .... 0 0000 . . 0 .... @2misc
+ VTRN 1111 001 11 . 11 .. 10 .... 0 0001 . . 0 .... @2misc
+ VUZP 1111 001 11 . 11 .. 10 .... 0 0010 . . 0 .... @2misc
+ VZIP 1111 001 11 . 11 .. 10 .... 0 0011 . . 0 .... @2misc
+
+ VMOVN 1111 001 11 . 11 .. 10 .... 0 0100 0 . 0 .... @2misc_q0
+ # VQMOVUN: unsigned result (source is always signed)
+ VQMOVUN 1111 001 11 . 11 .. 10 .... 0 0100 1 . 0 .... @2misc_q0
+ # VQMOVN: signed result, source may be signed (_S) or unsigned (_U)
+ VQMOVN_S 1111 001 11 . 11 .. 10 .... 0 0101 0 . 0 .... @2misc_q0
+ VQMOVN_U 1111 001 11 . 11 .. 10 .... 0 0101 1 . 0 .... @2misc_q0
+
+ VSHLL 1111 001 11 . 11 .. 10 .... 0 0110 0 . 0 .... @2misc_q0
+
+ SHA1SU1 1111 001 11 . 11 .. 10 .... 0 0111 0 . 0 .... @2misc_q1
+ SHA256SU0 1111 001 11 . 11 .. 10 .... 0 0111 1 . 0 .... @2misc_q1
+
+ VRINTN 1111 001 11 . 11 .. 10 .... 0 1000 . . 0 .... @2misc
+ VRINTX 1111 001 11 . 11 .. 10 .... 0 1001 . . 0 .... @2misc
+ VRINTA 1111 001 11 . 11 .. 10 .... 0 1010 . . 0 .... @2misc
+ VRINTZ 1111 001 11 . 11 .. 10 .... 0 1011 . . 0 .... @2misc
+
+ VCVT_F16_F32 1111 001 11 . 11 .. 10 .... 0 1100 0 . 0 .... @2misc_q0
+ VCVT_B16_F32 1111 001 11 . 11 .. 10 .... 0 1100 1 . 0 .... @2misc_q0
+
+ VRINTM 1111 001 11 . 11 .. 10 .... 0 1101 . . 0 .... @2misc
+
+ VCVT_F32_F16 1111 001 11 . 11 .. 10 .... 0 1110 0 . 0 .... @2misc_q0
+
+ VRINTP 1111 001 11 . 11 .. 10 .... 0 1111 . . 0 .... @2misc
+
+ VCVTAS 1111 001 11 . 11 .. 11 .... 0 0000 . . 0 .... @2misc
+ VCVTAU 1111 001 11 . 11 .. 11 .... 0 0001 . . 0 .... @2misc
+ VCVTNS 1111 001 11 . 11 .. 11 .... 0 0010 . . 0 .... @2misc
+ VCVTNU 1111 001 11 . 11 .. 11 .... 0 0011 . . 0 .... @2misc
+ VCVTPS 1111 001 11 . 11 .. 11 .... 0 0100 . . 0 .... @2misc
+ VCVTPU 1111 001 11 . 11 .. 11 .... 0 0101 . . 0 .... @2misc
+ VCVTMS 1111 001 11 . 11 .. 11 .... 0 0110 . . 0 .... @2misc
+ VCVTMU 1111 001 11 . 11 .. 11 .... 0 0111 . . 0 .... @2misc
+
+ VRECPE 1111 001 11 . 11 .. 11 .... 0 1000 . . 0 .... @2misc
+ VRSQRTE 1111 001 11 . 11 .. 11 .... 0 1001 . . 0 .... @2misc
+ VRECPE_F 1111 001 11 . 11 .. 11 .... 0 1010 . . 0 .... @2misc
+ VRSQRTE_F 1111 001 11 . 11 .. 11 .... 0 1011 . . 0 .... @2misc
+ VCVT_FS 1111 001 11 . 11 .. 11 .... 0 1100 . . 0 .... @2misc
+ VCVT_FU 1111 001 11 . 11 .. 11 .... 0 1101 . . 0 .... @2misc
+ VCVT_SF 1111 001 11 . 11 .. 11 .... 0 1110 . . 0 .... @2misc
+ VCVT_UF 1111 001 11 . 11 .. 11 .... 0 1111 . . 0 .... @2misc
+ ]
+
+ # Subgroup for size != 0b11
+ [
+ ##################################################################
+ # 3-reg-different-length grouping:
+ # 1111 001 U 1 D sz!=11 Vn:4 Vd:4 opc:4 N 0 M 0 Vm:4
+ ##################################################################
+
+ &3diff vm vn vd size
+
+ @3diff .... ... . . . size:2 .... .... .... . . . . .... \
+ &3diff vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+ VADDL_S_3d 1111 001 0 1 . .. .... .... 0000 . 0 . 0 .... @3diff
+ VADDL_U_3d 1111 001 1 1 . .. .... .... 0000 . 0 . 0 .... @3diff
+
+ VADDW_S_3d 1111 001 0 1 . .. .... .... 0001 . 0 . 0 .... @3diff
+ VADDW_U_3d 1111 001 1 1 . .. .... .... 0001 . 0 . 0 .... @3diff
+
+ VSUBL_S_3d 1111 001 0 1 . .. .... .... 0010 . 0 . 0 .... @3diff
+ VSUBL_U_3d 1111 001 1 1 . .. .... .... 0010 . 0 . 0 .... @3diff
+
+ VSUBW_S_3d 1111 001 0 1 . .. .... .... 0011 . 0 . 0 .... @3diff
+ VSUBW_U_3d 1111 001 1 1 . .. .... .... 0011 . 0 . 0 .... @3diff
+
+ VADDHN_3d 1111 001 0 1 . .. .... .... 0100 . 0 . 0 .... @3diff
+ VRADDHN_3d 1111 001 1 1 . .. .... .... 0100 . 0 . 0 .... @3diff
+
+ VABAL_S_3d 1111 001 0 1 . .. .... .... 0101 . 0 . 0 .... @3diff
+ VABAL_U_3d 1111 001 1 1 . .. .... .... 0101 . 0 . 0 .... @3diff
+
+ VSUBHN_3d 1111 001 0 1 . .. .... .... 0110 . 0 . 0 .... @3diff
+ VRSUBHN_3d 1111 001 1 1 . .. .... .... 0110 . 0 . 0 .... @3diff
+
+ VABDL_S_3d 1111 001 0 1 . .. .... .... 0111 . 0 . 0 .... @3diff
+ VABDL_U_3d 1111 001 1 1 . .. .... .... 0111 . 0 . 0 .... @3diff
+
+ VMLAL_S_3d 1111 001 0 1 . .. .... .... 1000 . 0 . 0 .... @3diff
+ VMLAL_U_3d 1111 001 1 1 . .. .... .... 1000 . 0 . 0 .... @3diff
+
+ VQDMLAL_3d 1111 001 0 1 . .. .... .... 1001 . 0 . 0 .... @3diff
+
+ VMLSL_S_3d 1111 001 0 1 . .. .... .... 1010 . 0 . 0 .... @3diff
+ VMLSL_U_3d 1111 001 1 1 . .. .... .... 1010 . 0 . 0 .... @3diff
+
+ VQDMLSL_3d 1111 001 0 1 . .. .... .... 1011 . 0 . 0 .... @3diff
+
+ VMULL_S_3d 1111 001 0 1 . .. .... .... 1100 . 0 . 0 .... @3diff
+ VMULL_U_3d 1111 001 1 1 . .. .... .... 1100 . 0 . 0 .... @3diff
+
+ VQDMULL_3d 1111 001 0 1 . .. .... .... 1101 . 0 . 0 .... @3diff
+
+ VMULL_P_3d 1111 001 0 1 . .. .... .... 1110 . 0 . 0 .... @3diff
+
+ ##################################################################
+ # 2-regs-plus-scalar grouping:
+ # 1111 001 Q 1 D sz!=11 Vn:4 Vd:4 opc:4 N 1 M 0 Vm:4
+ ##################################################################
+ &2scalar vm vn vd size q
+
+ @2scalar .... ... q:1 . . size:2 .... .... .... . . . . .... \
+ &2scalar vm=%vm_dp vn=%vn_dp vd=%vd_dp
+ # For the 'long' ops the Q bit is part of insn decode
+ @2scalar_q0 .... ... . . . size:2 .... .... .... . . . . .... \
+ &2scalar vm=%vm_dp vn=%vn_dp vd=%vd_dp q=0
+
+ VMLA_2sc 1111 001 . 1 . .. .... .... 0000 . 1 . 0 .... @2scalar
+ VMLA_F_2sc 1111 001 . 1 . .. .... .... 0001 . 1 . 0 .... @2scalar
+
+ VMLAL_S_2sc 1111 001 0 1 . .. .... .... 0010 . 1 . 0 .... @2scalar_q0
+ VMLAL_U_2sc 1111 001 1 1 . .. .... .... 0010 . 1 . 0 .... @2scalar_q0
+
+ VQDMLAL_2sc 1111 001 0 1 . .. .... .... 0011 . 1 . 0 .... @2scalar_q0
+
+ VMLS_2sc 1111 001 . 1 . .. .... .... 0100 . 1 . 0 .... @2scalar
+ VMLS_F_2sc 1111 001 . 1 . .. .... .... 0101 . 1 . 0 .... @2scalar
+
+ VMLSL_S_2sc 1111 001 0 1 . .. .... .... 0110 . 1 . 0 .... @2scalar_q0
+ VMLSL_U_2sc 1111 001 1 1 . .. .... .... 0110 . 1 . 0 .... @2scalar_q0
+
+ VQDMLSL_2sc 1111 001 0 1 . .. .... .... 0111 . 1 . 0 .... @2scalar_q0
+
+ VMUL_2sc 1111 001 . 1 . .. .... .... 1000 . 1 . 0 .... @2scalar
+ VMUL_F_2sc 1111 001 . 1 . .. .... .... 1001 . 1 . 0 .... @2scalar
+
+ VMULL_S_2sc 1111 001 0 1 . .. .... .... 1010 . 1 . 0 .... @2scalar_q0
+ VMULL_U_2sc 1111 001 1 1 . .. .... .... 1010 . 1 . 0 .... @2scalar_q0
+
+ VQDMULL_2sc 1111 001 0 1 . .. .... .... 1011 . 1 . 0 .... @2scalar_q0
+
+ VQDMULH_2sc 1111 001 . 1 . .. .... .... 1100 . 1 . 0 .... @2scalar
+ VQRDMULH_2sc 1111 001 . 1 . .. .... .... 1101 . 1 . 0 .... @2scalar
+
+ VQRDMLAH_2sc 1111 001 . 1 . .. .... .... 1110 . 1 . 0 .... @2scalar
+ VQRDMLSH_2sc 1111 001 . 1 . .. .... .... 1111 . 1 . 0 .... @2scalar
+ ]
+}
diff --git a/target/arm/neon-ls.decode b/target/arm/neon-ls.decode
new file mode 100644
index 000000000..c5f364cbc
--- /dev/null
+++ b/target/arm/neon-ls.decode
@@ -0,0 +1,52 @@
+# AArch32 Neon load/store instruction descriptions
+#
+# Copyright (c) 2020 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+# Encodings for Neon load/store instructions where the T32 encoding
+# is a simple transformation of the A32 encoding.
+# More specifically, this file covers instructions where the A32 encoding is
+# 0b1111_0100_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
+# and the T32 encoding is
+# 0b1111_1001_xxx0_xxxx_xxxx_xxxx_xxxx_xxxx
+# This file works on the A32 encoding only; calling code for T32 has to
+# transform the insn into the A32 version first.
+
+%vd_dp 22:1 12:4
+
+# Neon load/store multiple structures
+
+VLDST_multiple 1111 0100 0 . l:1 0 rn:4 .... itype:4 size:2 align:2 rm:4 \
+ vd=%vd_dp
+
+# Neon load single element to all lanes
+
+VLD_all_lanes 1111 0100 1 . 1 0 rn:4 .... 11 n:2 size:2 t:1 a:1 rm:4 \
+ vd=%vd_dp
+
+# Neon load/store single structure to one lane
+%imm1_5_p1 5:1 !function=plus_1
+%imm1_6_p1 6:1 !function=plus_1
+
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 00 n:2 reg_idx:3 align:1 rm:4 \
+ vd=%vd_dp size=0 stride=1
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 01 n:2 reg_idx:2 . align:1 rm:4 \
+ vd=%vd_dp size=1 stride=%imm1_5_p1
+VLDST_single 1111 0100 1 . l:1 0 rn:4 .... 10 n:2 reg_idx:1 . align:2 rm:4 \
+ vd=%vd_dp size=2 stride=%imm1_6_p1
diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode
new file mode 100644
index 000000000..8e6bd0b61
--- /dev/null
+++ b/target/arm/neon-shared.decode
@@ -0,0 +1,99 @@
+# AArch32 Neon instruction descriptions
+#
+# Copyright (c) 2020 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+# Encodings for Neon instructions whose encoding is the same for
+# both A32 and T32.
+
+# More specifically, this covers:
+# 2reg scalar ext: 0b1111_1110_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
+# 3same ext: 0b1111_110x_xxxx_xxxx_xxxx_1x0x_xxxx_xxxx
+
+# VFP/Neon register fields; same as vfp.decode
+%vm_dp 5:1 0:4
+%vm_sp 0:4 5:1
+%vn_dp 7:1 16:4
+%vn_sp 16:4 7:1
+%vd_dp 22:1 12:4
+%vd_sp 12:4 22:1
+
+# For VCMLA/VCADD insns, convert the single-bit size field
+# which is 0 for fp16 and 1 for fp32 into a MO_* constant.
+# (Note that this is the reverse of the sense of the 1-bit size
+# field in the 3same_fp Neon insns.)
+%vcadd_size 20:1 !function=plus_1
+
+VCMLA 1111 110 rot:2 . 1 . .... .... 1000 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp size=%vcadd_size
+
+VCADD 1111 110 rot:1 1 . 0 . .... .... 1000 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp size=%vcadd_size
+
+VSDOT 1111 110 00 . 10 .... .... 1101 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUDOT 1111 110 00 . 10 .... .... 1101 . q:1 . 1 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUSDOT 1111 110 01 . 10 .... .... 1101 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VDOT_b16 1111 110 00 . 00 .... .... 1101 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+# VFM[AS]L
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 0 . 1 .... \
+ vm=%vm_sp vn=%vn_sp vd=%vd_dp q=0
+VFML 1111 110 0 s:1 . 10 .... .... 1000 . 1 . 1 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp q=1
+
+VSMMLA 1111 1100 0.10 .... .... 1100 .1.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUMMLA 1111 1100 0.10 .... .... 1100 .1.1 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+VMMLA_b16 1111 1100 0.00 .... .... 1100 .1.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+VFMA_b16 1111 110 0 0.11 .... .... 1000 . q:1 . 1 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \
+ vn=%vn_dp vd=%vd_dp size=1
+VCMLA_scalar 1111 1110 1 . rot:2 .... .... 1000 . q:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp size=2 index=0
+
+VSDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 0 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VUDOT_scalar 1111 1110 0 . 10 .... .... 1101 . q:1 index:1 1 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VUSDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VSUDOT_scalar 1111 1110 1 . 00 .... .... 1101 . q:1 index:1 1 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+VDOT_b16_scal 1111 1110 0 . 00 .... .... 1101 . q:1 index:1 0 vm:4 \
+ vn=%vn_dp vd=%vd_dp
+
+%vfml_scalar_q0_rm 0:3 5:1
+%vfml_scalar_q1_index 5:1 3:1
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 0 . 1 index:1 ... \
+ rm=%vfml_scalar_q0_rm vn=%vn_sp vd=%vd_dp q=0
+VFML_scalar 1111 1110 0 . 0 s:1 .... .... 1000 . 1 . 1 . rm:3 \
+ index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp q=1
+VFMA_b16_scal 1111 1110 0.11 .... .... 1000 . q:1 . 1 . vm:3 \
+ index=%vfml_scalar_q1_index vn=%vn_dp vd=%vd_dp
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
new file mode 100644
index 000000000..338b9189d
--- /dev/null
+++ b/target/arm/neon_helper.c
@@ -0,0 +1,1740 @@
+/*
+ * ARM NEON vector operations.
+ *
+ * Copyright (c) 2007, 2008 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GNU GPL v2.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "vec_internal.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+#define SET_QC() env->vfp.qc[0] = 1
+
+#define NEON_TYPE1(name, type) \
+typedef struct \
+{ \
+ type v1; \
+} neon_##name;
+#ifdef HOST_WORDS_BIGENDIAN
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+ type v2; \
+ type v1; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+ type v4; \
+ type v3; \
+ type v2; \
+ type v1; \
+} neon_##name;
+#else
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+ type v1; \
+ type v2; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+ type v1; \
+ type v2; \
+ type v3; \
+ type v4; \
+} neon_##name;
+#endif
+
+NEON_TYPE4(s8, int8_t)
+NEON_TYPE4(u8, uint8_t)
+NEON_TYPE2(s16, int16_t)
+NEON_TYPE2(u16, uint16_t)
+NEON_TYPE1(s32, int32_t)
+NEON_TYPE1(u32, uint32_t)
+#undef NEON_TYPE4
+#undef NEON_TYPE2
+#undef NEON_TYPE1
+
+/* Copy from a uint32_t to a vector structure type. */
+#define NEON_UNPACK(vtype, dest, val) do { \
+ union { \
+ vtype v; \
+ uint32_t i; \
+ } conv_u; \
+ conv_u.i = (val); \
+ dest = conv_u.v; \
+ } while(0)
+
+/* Copy from a vector structure type to a uint32_t. */
+#define NEON_PACK(vtype, dest, val) do { \
+ union { \
+ vtype v; \
+ uint32_t i; \
+ } conv_u; \
+ conv_u.v = (val); \
+ dest = conv_u.i; \
+ } while(0)
+
+#define NEON_DO1 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
+#define NEON_DO2 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
+#define NEON_DO4 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
+ NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
+ NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
+
+#define NEON_VOP_BODY(vtype, n) \
+{ \
+ uint32_t res; \
+ vtype vsrc1; \
+ vtype vsrc2; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg1); \
+ NEON_UNPACK(vtype, vsrc2, arg2); \
+ NEON_DO##n; \
+ NEON_PACK(vtype, res, vdest); \
+ return res; \
+}
+
+#define NEON_VOP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+#define NEON_VOP_ENV(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+/* Pairwise operations. */
+/* For 32-bit elements each segment only contains a single element, so
+ the elementwise and pairwise operations are the same. */
+#define NEON_PDO2 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+ NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
+#define NEON_PDO4 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+ NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
+ NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
+ NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
+
+#define NEON_POP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+{ \
+ uint32_t res; \
+ vtype vsrc1; \
+ vtype vsrc2; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg1); \
+ NEON_UNPACK(vtype, vsrc2, arg2); \
+ NEON_PDO##n; \
+ NEON_PACK(vtype, res, vdest); \
+ return res; \
+}
+
+/* Unary operators. */
+#define NEON_VOP1(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
+{ \
+ vtype vsrc1; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg); \
+ NEON_DO##n; \
+ NEON_PACK(vtype, arg, vdest); \
+ return arg; \
+}
+
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+ uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = tmp; \
+ }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qadd_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qadd_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (res < a) {
+ SET_QC();
+ res = ~0;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 + src2;
+ if (res < src1) {
+ SET_QC();
+ res = ~(uint64_t)0;
+ }
+ return res;
+}
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+ int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ if (src2 > 0) { \
+ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+ } else { \
+ tmp = 1 << (sizeof(type) * 8 - 1); \
+ } \
+ } \
+ dest = tmp; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qadd_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qadd_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
+ SET_QC();
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 + src2;
+ if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
+ SET_QC();
+ res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+ }
+ return res;
+}
+
+/* Unsigned saturating accumulate of signed value
+ *
+ * Op1/Rn is treated as signed
+ * Op2/Rd is treated as unsigned
+ *
+ * Explicit casting is used to ensure the correct sign extension of
+ * inputs. The result is treated as a unsigned value and saturated as such.
+ *
+ * We use a macro for the 8/16 bit cases which expects signed integers of va,
+ * vb, and vr for interim calculation and an unsigned 32 bit result value r.
+ */
+
+#define USATACC(bits, shift) \
+ do { \
+ va = sextract32(a, shift, bits); \
+ vb = extract32(b, shift, bits); \
+ vr = va + vb; \
+ if (vr > UINT##bits##_MAX) { \
+ SET_QC(); \
+ vr = UINT##bits##_MAX; \
+ } else if (vr < 0) { \
+ SET_QC(); \
+ vr = 0; \
+ } \
+ r = deposit32(r, shift, bits, vr); \
+ } while (0)
+
+uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int16_t va, vb, vr;
+ uint32_t r = 0;
+
+ USATACC(8, 0);
+ USATACC(8, 8);
+ USATACC(8, 16);
+ USATACC(8, 24);
+ return r;
+}
+
+uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int32_t va, vb, vr;
+ uint64_t r = 0;
+
+ USATACC(16, 0);
+ USATACC(16, 16);
+ return r;
+}
+
+#undef USATACC
+
+uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int64_t va = (int32_t)a;
+ int64_t vb = (uint32_t)b;
+ int64_t vr = va + vb;
+ if (vr > UINT32_MAX) {
+ SET_QC();
+ vr = UINT32_MAX;
+ } else if (vr < 0) {
+ SET_QC();
+ vr = 0;
+ }
+ return vr;
+}
+
+uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t res;
+ res = a + b;
+ /* We only need to look at the pattern of SIGN bits to detect
+ * +ve/-ve saturation
+ */
+ if (~a & b & ~res & SIGNBIT64) {
+ SET_QC();
+ res = UINT64_MAX;
+ } else if (a & ~b & res & SIGNBIT64) {
+ SET_QC();
+ res = 0;
+ }
+ return res;
+}
+
+/* Signed saturating accumulate of unsigned value
+ *
+ * Op1/Rn is treated as unsigned
+ * Op2/Rd is treated as signed
+ *
+ * The result is treated as a signed value and saturated as such
+ *
+ * We use a macro for the 8/16 bit cases which expects signed integers of va,
+ * vb, and vr for interim calculation and an unsigned 32 bit result value r.
+ */
+
+#define SSATACC(bits, shift) \
+ do { \
+ va = extract32(a, shift, bits); \
+ vb = sextract32(b, shift, bits); \
+ vr = va + vb; \
+ if (vr > INT##bits##_MAX) { \
+ SET_QC(); \
+ vr = INT##bits##_MAX; \
+ } else if (vr < INT##bits##_MIN) { \
+ SET_QC(); \
+ vr = INT##bits##_MIN; \
+ } \
+ r = deposit32(r, shift, bits, vr); \
+ } while (0)
+
+uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int16_t va, vb, vr;
+ uint32_t r = 0;
+
+ SSATACC(8, 0);
+ SSATACC(8, 8);
+ SSATACC(8, 16);
+ SSATACC(8, 24);
+ return r;
+}
+
+uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int32_t va, vb, vr;
+ uint32_t r = 0;
+
+ SSATACC(16, 0);
+ SSATACC(16, 16);
+
+ return r;
+}
+
+#undef SSATACC
+
+uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int64_t res;
+ int64_t op1 = (uint32_t)a;
+ int64_t op2 = (int32_t)b;
+ res = op1 + op2;
+ if (res > INT32_MAX) {
+ SET_QC();
+ res = INT32_MAX;
+ } else if (res < INT32_MIN) {
+ SET_QC();
+ res = INT32_MIN;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t res;
+ res = a + b;
+ /* We only need to look at the pattern of SIGN bits to detect an overflow */
+ if (((a & res)
+ | (~b & res)
+ | (a & ~b)) & SIGNBIT64) {
+ SET_QC();
+ res = INT64_MAX;
+ }
+ return res;
+}
+
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+ uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ dest = 0; \
+ } else { \
+ dest = tmp; \
+ }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qsub_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qsub_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (res > a) {
+ SET_QC();
+ res = 0;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ if (src1 < src2) {
+ SET_QC();
+ res = 0;
+ } else {
+ res = src1 - src2;
+ }
+ return res;
+}
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+ int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ if (src2 < 0) { \
+ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+ } else { \
+ tmp = 1 << (sizeof(type) * 8 - 1); \
+ } \
+ } \
+ dest = tmp; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qsub_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qsub_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
+ SET_QC();
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 - src2;
+ if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
+ SET_QC();
+ res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+ }
+ return res;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
+NEON_VOP(hadd_s8, neon_s8, 4)
+NEON_VOP(hadd_u8, neon_u8, 4)
+NEON_VOP(hadd_s16, neon_s16, 2)
+NEON_VOP(hadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if (src1 & src2 & 1)
+ dest++;
+ return dest;
+}
+
+uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if (src1 & src2 & 1)
+ dest++;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
+NEON_VOP(rhadd_s8, neon_s8, 4)
+NEON_VOP(rhadd_u8, neon_u8, 4)
+NEON_VOP(rhadd_s16, neon_s16, 2)
+NEON_VOP(rhadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if ((src1 | src2) & 1)
+ dest++;
+ return dest;
+}
+
+uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if ((src1 | src2) & 1)
+ dest++;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
+NEON_VOP(hsub_s8, neon_s8, 4)
+NEON_VOP(hsub_u8, neon_u8, 4)
+NEON_VOP(hsub_s16, neon_s16, 2)
+NEON_VOP(hsub_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) - (src2 >> 1);
+ if ((~src1) & src2 & 1)
+ dest--;
+ return dest;
+}
+
+uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) - (src2 >> 1);
+ if ((~src1) & src2 & 1)
+ dest--;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
+NEON_POP(pmin_s8, neon_s8, 4)
+NEON_POP(pmin_u8, neon_u8, 4)
+NEON_POP(pmin_s16, neon_s16, 2)
+NEON_POP(pmin_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
+NEON_POP(pmax_s8, neon_s8, 4)
+NEON_POP(pmax_u8, neon_u8, 4)
+NEON_POP(pmax_s16, neon_s16, 2)
+NEON_POP(pmax_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
+NEON_VOP(shl_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
+NEON_VOP(shl_s16, neon_s16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
+NEON_VOP(rshl_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
+NEON_VOP(rshl_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift)
+{
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
+}
+
+uint64_t HELPER(neon_rshl_s64)(uint64_t val, uint64_t shift)
+{
+ return do_sqrshl_d(val, (int8_t)shift, true, NULL);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
+NEON_VOP(rshl_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
+NEON_VOP(rshl_u16, neon_u16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift)
+{
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
+}
+
+uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shift)
+{
+ return do_uqrshl_d(val, (int8_t)shift, true, NULL);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
+NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
+NEON_VOP_ENV(qshl_u16, neon_u16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_qshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
+}
+
+uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_uqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
+NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
+NEON_VOP_ENV(qshl_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_qshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
+}
+
+uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_sqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
+NEON_VOP_ENV(qshlu_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
+NEON_VOP_ENV(qshlu_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_suqrshl_bhs(val, (int8_t)shift, 32, false, env->vfp.qc);
+}
+
+uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_suqrshl_d(val, (int8_t)shift, false, env->vfp.qc);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
+NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
+NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_uqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc);
+}
+
+uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_uqrshl_d(val, (int8_t)shift, true, env->vfp.qc);
+}
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
+NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
+NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t val, uint32_t shift)
+{
+ return do_sqrshl_bhs(val, (int8_t)shift, 32, true, env->vfp.qc);
+}
+
+uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t val, uint64_t shift)
+{
+ return do_sqrshl_d(val, (int8_t)shift, true, env->vfp.qc);
+}
+
+uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+ mask = (a ^ b) & 0x80808080u;
+ a &= ~0x80808080u;
+ b &= ~0x80808080u;
+ return (a + b) ^ mask;
+}
+
+uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+ mask = (a ^ b) & 0x80008000u;
+ a &= ~0x80008000u;
+ b &= ~0x80008000u;
+ return (a + b) ^ mask;
+}
+
+#define NEON_FN(dest, src1, src2) dest = src1 + src2
+NEON_POP(padd_u8, neon_u8, 4)
+NEON_POP(padd_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 - src2
+NEON_VOP(sub_u8, neon_u8, 4)
+NEON_VOP(sub_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 * src2
+NEON_VOP(mul_u8, neon_u8, 4)
+NEON_VOP(mul_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
+NEON_VOP(tst_u8, neon_u8, 4)
+NEON_VOP(tst_u16, neon_u16, 2)
+NEON_VOP(tst_u32, neon_u32, 1)
+#undef NEON_FN
+
+/* Count Leading Sign/Zero Bits. */
+static inline int do_clz8(uint8_t x)
+{
+ int n;
+ for (n = 8; x; n--)
+ x >>= 1;
+ return n;
+}
+
+static inline int do_clz16(uint16_t x)
+{
+ int n;
+ for (n = 16; x; n--)
+ x >>= 1;
+ return n;
+}
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
+NEON_VOP1(clz_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
+NEON_VOP1(clz_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_cls_s32)(uint32_t x)
+{
+ int count;
+ if ((int32_t)x < 0)
+ x = ~x;
+ for (count = 32; x; count--)
+ x = x >> 1;
+ return count - 1;
+}
+
+/* Bit count. */
+uint32_t HELPER(neon_cnt_u8)(uint32_t x)
+{
+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
+ return x;
+}
+
+/* Reverse bits in each 8 bit word */
+uint32_t HELPER(neon_rbit_u8)(uint32_t x)
+{
+ x = ((x & 0xf0f0f0f0) >> 4)
+ | ((x & 0x0f0f0f0f) << 4);
+ x = ((x & 0x88888888) >> 3)
+ | ((x & 0x44444444) >> 1)
+ | ((x & 0x22222222) << 1)
+ | ((x & 0x11111111) << 3);
+ return x;
+}
+
+#define NEON_QDMULH16(dest, src1, src2, round) do { \
+ uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
+ if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
+ SET_QC(); \
+ tmp = (tmp >> 31) ^ ~SIGNBIT; \
+ } else { \
+ tmp <<= 1; \
+ } \
+ if (round) { \
+ int32_t old = tmp; \
+ tmp += 1 << 15; \
+ if ((int32_t)tmp < old) { \
+ SET_QC(); \
+ tmp = SIGNBIT - 1; \
+ } \
+ } \
+ dest = tmp >> 16; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_QDMULH16
+
+#define NEON_QDMULH32(dest, src1, src2, round) do { \
+ uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
+ if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
+ SET_QC(); \
+ tmp = (tmp >> 63) ^ ~SIGNBIT64; \
+ } else { \
+ tmp <<= 1; \
+ } \
+ if (round) { \
+ int64_t old = tmp; \
+ tmp += (int64_t)1 << 31; \
+ if ((int64_t)tmp < old) { \
+ SET_QC(); \
+ tmp = SIGNBIT64 - 1; \
+ } \
+ } \
+ dest = tmp >> 32; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#undef NEON_QDMULH32
+
+uint32_t HELPER(neon_narrow_u8)(uint64_t x)
+{
+ return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
+ | ((x >> 24) & 0xff000000u);
+}
+
+uint32_t HELPER(neon_narrow_u16)(uint64_t x)
+{
+ return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
+}
+
+uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
+{
+ return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+ | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
+{
+ return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
+{
+ x &= 0xff80ff80ff80ff80ull;
+ x += 0x0080008000800080ull;
+ return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+ | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
+{
+ x &= 0xffff8000ffff8000ull;
+ x += 0x0000800000008000ull;
+ return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s & 0x8000) { \
+ SET_QC(); \
+ } else { \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2); \
+ }
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2);
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
+{
+ int16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s != (int8_t)s) { \
+ d = (s >> 15) ^ 0x7f; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2);
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low & 0x80000000) {
+ low = 0;
+ SET_QC();
+ } else if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high & 0x80000000) {
+ high = 0;
+ SET_QC();
+ } else if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
+{
+ int32_t low;
+ int32_t high;
+ low = x;
+ if (low != (int16_t)low) {
+ low = (low >> 31) ^ 0x7fff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high != (int16_t)high) {
+ high = (high >> 31) ^ 0x7fff;
+ SET_QC();
+ }
+ return (uint16_t)low | (high << 16);
+}
+
+uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
+{
+ if (x & 0x8000000000000000ull) {
+ SET_QC();
+ return 0;
+ }
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
+{
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
+{
+ if ((int64_t)x != (int32_t)x) {
+ SET_QC();
+ return ((int64_t)x >> 63) ^ 0x7fffffff;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_widen_u8)(uint32_t x)
+{
+ uint64_t tmp;
+ uint64_t ret;
+ ret = (uint8_t)x;
+ tmp = (uint8_t)(x >> 8);
+ ret |= tmp << 16;
+ tmp = (uint8_t)(x >> 16);
+ ret |= tmp << 32;
+ tmp = (uint8_t)(x >> 24);
+ ret |= tmp << 48;
+ return ret;
+}
+
+uint64_t HELPER(neon_widen_s8)(uint32_t x)
+{
+ uint64_t tmp;
+ uint64_t ret;
+ ret = (uint16_t)(int8_t)x;
+ tmp = (uint16_t)(int8_t)(x >> 8);
+ ret |= tmp << 16;
+ tmp = (uint16_t)(int8_t)(x >> 16);
+ ret |= tmp << 32;
+ tmp = (uint16_t)(int8_t)(x >> 24);
+ ret |= tmp << 48;
+ return ret;
+}
+
+uint64_t HELPER(neon_widen_u16)(uint32_t x)
+{
+ uint64_t high = (uint16_t)(x >> 16);
+ return ((uint16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_widen_s16)(uint32_t x)
+{
+ uint64_t high = (int16_t)(x >> 16);
+ return ((uint32_t)(int16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ b) & 0x8000800080008000ull;
+ a &= ~0x8000800080008000ull;
+ b &= ~0x8000800080008000ull;
+ return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ b) & 0x8000000080000000ull;
+ a &= ~0x8000000080000000ull;
+ b &= ~0x8000000080000000ull;
+ return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t tmp;
+ uint64_t tmp2;
+
+ tmp = a & 0x0000ffff0000ffffull;
+ tmp += (a >> 16) & 0x0000ffff0000ffffull;
+ tmp2 = b & 0xffff0000ffff0000ull;
+ tmp2 += (b << 16) & 0xffff0000ffff0000ull;
+ return ( tmp & 0xffff)
+ | ((tmp >> 16) & 0xffff0000ull)
+ | ((tmp2 << 16) & 0xffff00000000ull)
+ | ( tmp2 & 0xffff000000000000ull);
+}
+
+uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
+{
+ uint32_t low = a + (a >> 32);
+ uint32_t high = b + (b >> 32);
+ return low + ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ ~b) & 0x8000800080008000ull;
+ a |= 0x8000800080008000ull;
+ b &= ~0x8000800080008000ull;
+ return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ ~b) & 0x8000000080000000ull;
+ a |= 0x8000000080000000ull;
+ b &= ~0x8000000080000000ull;
+ return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint32_t x, y;
+ uint32_t low, high;
+
+ x = a;
+ y = b;
+ low = x + y;
+ if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+ SET_QC();
+ low = ((int32_t)x >> 31) ^ ~SIGNBIT;
+ }
+ x = a >> 32;
+ y = b >> 32;
+ high = x + y;
+ if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+ SET_QC();
+ high = ((int32_t)x >> 31) ^ ~SIGNBIT;
+ }
+ return low | ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t result;
+
+ result = a + b;
+ if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
+ SET_QC();
+ result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
+ }
+ return result;
+}
+
+/* We have to do the arithmetic in a larger type than
+ * the input type, because for example with a signed 32 bit
+ * op the absolute difference can overflow a signed 32 bit value.
+ */
+#define DO_ABD(dest, x, y, intype, arithtype) do { \
+ arithtype tmp_x = (intype)(x); \
+ arithtype tmp_y = (intype)(y); \
+ dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
+ } while(0)
+
+uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, uint8_t, uint32_t);
+ DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t);
+ result |= tmp << 16;
+ DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t);
+ result |= tmp << 32;
+ DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, int8_t, int32_t);
+ DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t);
+ result |= tmp << 16;
+ DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t);
+ result |= tmp << 32;
+ DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, uint16_t, uint32_t);
+ DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, int16_t, int32_t);
+ DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
+{
+ uint64_t result;
+ DO_ABD(result, a, b, uint32_t, uint64_t);
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
+{
+ uint64_t result;
+ DO_ABD(result, a, b, int32_t, int64_t);
+ return result;
+}
+#undef DO_ABD
+
+/* Widening multiply. Named type is the source type. */
+#define DO_MULL(dest, x, y, type1, type2) do { \
+ type1 tmp_x = x; \
+ type1 tmp_y = y; \
+ dest = (type2)((type2)tmp_x * (type2)tmp_y); \
+ } while(0)
+
+uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, uint8_t, uint16_t);
+ DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
+ result |= tmp << 16;
+ DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
+ result |= tmp << 32;
+ DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, int8_t, uint16_t);
+ DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
+ result |= tmp << 16;
+ DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
+ result |= tmp << 32;
+ DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, uint16_t, uint32_t);
+ DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, int16_t, uint32_t);
+ DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_negl_u16)(uint64_t x)
+{
+ uint16_t tmp;
+ uint64_t result;
+ result = (uint16_t)-x;
+ tmp = -(x >> 16);
+ result |= (uint64_t)tmp << 16;
+ tmp = -(x >> 32);
+ result |= (uint64_t)tmp << 32;
+ tmp = -(x >> 48);
+ result |= (uint64_t)tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_negl_u32)(uint64_t x)
+{
+ uint32_t low = -x;
+ uint32_t high = -(x >> 32);
+ return low | ((uint64_t)high << 32);
+}
+
+/* Saturating sign manipulation. */
+/* ??? Make these use NEON_VOP1 */
+#define DO_QABS8(x) do { \
+ if (x == (int8_t)0x80) { \
+ x = 0x7f; \
+ SET_QC(); \
+ } else if (x < 0) { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x)
+{
+ neon_s8 vec;
+ NEON_UNPACK(neon_s8, vec, x);
+ DO_QABS8(vec.v1);
+ DO_QABS8(vec.v2);
+ DO_QABS8(vec.v3);
+ DO_QABS8(vec.v4);
+ NEON_PACK(neon_s8, x, vec);
+ return x;
+}
+#undef DO_QABS8
+
+#define DO_QNEG8(x) do { \
+ if (x == (int8_t)0x80) { \
+ x = 0x7f; \
+ SET_QC(); \
+ } else { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x)
+{
+ neon_s8 vec;
+ NEON_UNPACK(neon_s8, vec, x);
+ DO_QNEG8(vec.v1);
+ DO_QNEG8(vec.v2);
+ DO_QNEG8(vec.v3);
+ DO_QNEG8(vec.v4);
+ NEON_PACK(neon_s8, x, vec);
+ return x;
+}
+#undef DO_QNEG8
+
+#define DO_QABS16(x) do { \
+ if (x == (int16_t)0x8000) { \
+ x = 0x7fff; \
+ SET_QC(); \
+ } else if (x < 0) { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x)
+{
+ neon_s16 vec;
+ NEON_UNPACK(neon_s16, vec, x);
+ DO_QABS16(vec.v1);
+ DO_QABS16(vec.v2);
+ NEON_PACK(neon_s16, x, vec);
+ return x;
+}
+#undef DO_QABS16
+
+#define DO_QNEG16(x) do { \
+ if (x == (int16_t)0x8000) { \
+ x = 0x7fff; \
+ SET_QC(); \
+ } else { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x)
+{
+ neon_s16 vec;
+ NEON_UNPACK(neon_s16, vec, x);
+ DO_QNEG16(vec.v1);
+ DO_QNEG16(vec.v2);
+ NEON_PACK(neon_s16, x, vec);
+ return x;
+}
+#undef DO_QNEG16
+
+uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x)
+{
+ if (x == SIGNBIT) {
+ SET_QC();
+ x = ~SIGNBIT;
+ } else if ((int32_t)x < 0) {
+ x = -x;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
+{
+ if (x == SIGNBIT) {
+ SET_QC();
+ x = ~SIGNBIT;
+ } else {
+ x = -x;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x)
+{
+ if (x == SIGNBIT64) {
+ SET_QC();
+ x = ~SIGNBIT64;
+ } else if ((int64_t)x < 0) {
+ x = -x;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
+{
+ if (x == SIGNBIT64) {
+ SET_QC();
+ x = ~SIGNBIT64;
+ } else {
+ x = -x;
+ }
+ return x;
+}
+
+/* NEON Float helpers. */
+
+/* Floating point comparisons produce an integer result.
+ * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
+ * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
+ */
+uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
+}
+
+uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float32_le(make_float32(b), make_float32(a), fpst);
+}
+
+uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float32_lt(make_float32(b), make_float32(a), fpst);
+}
+
+uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f0 = float32_abs(make_float32(a));
+ float32 f1 = float32_abs(make_float32(b));
+ return -float32_le(f1, f0, fpst);
+}
+
+uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f0 = float32_abs(make_float32(a));
+ float32 f1 = float32_abs(make_float32(b));
+ return -float32_lt(f1, f0, fpst);
+}
+
+uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f0 = float64_abs(make_float64(a));
+ float64 f1 = float64_abs(make_float64(b));
+ return -float64_le(f1, f0, fpst);
+}
+
+uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f0 = float64_abs(make_float64(a));
+ float64 f1 = float64_abs(make_float64(b));
+ return -float64_lt(f1, f0, fpst);
+}
+
+#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
+
+void HELPER(neon_qunzip8)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd0 = rd[0], zd1 = rd[1];
+ uint64_t zm0 = rm[0], zm1 = rm[1];
+
+ uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
+ | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
+ | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
+ | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
+ uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
+ | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
+ | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
+ | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
+ uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
+ | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
+ | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
+ | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
+ uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
+ | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
+ | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
+ | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
+
+ rm[0] = m0;
+ rm[1] = m1;
+ rd[0] = d0;
+ rd[1] = d1;
+}
+
+void HELPER(neon_qunzip16)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd0 = rd[0], zd1 = rd[1];
+ uint64_t zm0 = rm[0], zm1 = rm[1];
+
+ uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
+ | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
+ uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
+ | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
+ uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
+ | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
+ uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
+ | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
+
+ rm[0] = m0;
+ rm[1] = m1;
+ rd[0] = d0;
+ rd[1] = d1;
+}
+
+void HELPER(neon_qunzip32)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd0 = rd[0], zd1 = rd[1];
+ uint64_t zm0 = rm[0], zm1 = rm[1];
+
+ uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
+ uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
+ uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
+ uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
+
+ rm[0] = m0;
+ rm[1] = m1;
+ rd[0] = d0;
+ rd[1] = d1;
+}
+
+void HELPER(neon_unzip8)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd = rd[0], zm = rm[0];
+
+ uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
+ | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
+ | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
+ | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
+ uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
+ | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
+ | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
+ | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
+
+ rm[0] = m0;
+ rd[0] = d0;
+}
+
+void HELPER(neon_unzip16)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd = rd[0], zm = rm[0];
+
+ uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
+ | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
+ uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
+ | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
+
+ rm[0] = m0;
+ rd[0] = d0;
+}
+
+void HELPER(neon_qzip8)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd0 = rd[0], zd1 = rd[1];
+ uint64_t zm0 = rm[0], zm1 = rm[1];
+
+ uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
+ | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
+ | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
+ | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
+ uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
+ | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
+ | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
+ | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
+ uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
+ | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
+ | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
+ | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
+ uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
+ | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
+ | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
+ | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
+
+ rm[0] = m0;
+ rm[1] = m1;
+ rd[0] = d0;
+ rd[1] = d1;
+}
+
+void HELPER(neon_qzip16)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd0 = rd[0], zd1 = rd[1];
+ uint64_t zm0 = rm[0], zm1 = rm[1];
+
+ uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
+ | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
+ uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
+ | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
+ uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
+ | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
+ uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
+ | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
+
+ rm[0] = m0;
+ rm[1] = m1;
+ rd[0] = d0;
+ rd[1] = d1;
+}
+
+void HELPER(neon_qzip32)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd0 = rd[0], zd1 = rd[1];
+ uint64_t zm0 = rm[0], zm1 = rm[1];
+
+ uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
+ uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
+ uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
+ uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
+
+ rm[0] = m0;
+ rm[1] = m1;
+ rd[0] = d0;
+ rd[1] = d1;
+}
+
+void HELPER(neon_zip8)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd = rd[0], zm = rm[0];
+
+ uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
+ | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
+ | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
+ | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
+ uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
+ | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
+ | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
+ | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
+
+ rm[0] = m0;
+ rd[0] = d0;
+}
+
+void HELPER(neon_zip16)(void *vd, void *vm)
+{
+ uint64_t *rd = vd, *rm = vm;
+ uint64_t zd = rd[0], zm = rm[0];
+
+ uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
+ | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
+ uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
+ | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
+
+ rm[0] = m0;
+ rd[0] = d0;
+}
diff --git a/target/arm/op_addsub.h b/target/arm/op_addsub.h
new file mode 100644
index 000000000..ca4a1893c
--- /dev/null
+++ b/target/arm/op_addsub.h
@@ -0,0 +1,103 @@
+/*
+ * ARMv6 integer SIMD operations.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifdef ARITH_GE
+#define GE_ARG , void *gep
+#define DECLARE_GE uint32_t ge = 0
+#define SET_GE *(uint32_t *)gep = ge
+#else
+#define GE_ARG
+#define DECLARE_GE do{}while(0)
+#define SET_GE do{}while(0)
+#endif
+
+#define RESULT(val, n, width) \
+ res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width)
+
+uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD16(a, b, 0);
+ ADD16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD8(a, b, 0);
+ ADD8(a >> 8, b >> 8, 1);
+ ADD8(a >> 16, b >> 16, 2);
+ ADD8(a >> 24, b >> 24, 3);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB16(a, b, 0);
+ SUB16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB8(a, b, 0);
+ SUB8(a >> 8, b >> 8, 1);
+ SUB8(a >> 16, b >> 16, 2);
+ SUB8(a >> 24, b >> 24, 3);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD16(a, b >> 16, 0);
+ SUB16(a >> 16, b, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB16(a, b >> 16, 0);
+ ADD16(a >> 16, b, 1);
+ SET_GE;
+ return res;
+}
+
+#undef GE_ARG
+#undef DECLARE_GE
+#undef SET_GE
+#undef RESULT
+
+#undef ARITH_GE
+#undef PFX
+#undef ADD16
+#undef SUB16
+#undef ADD8
+#undef SUB8
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
new file mode 100644
index 000000000..70b42b55f
--- /dev/null
+++ b/target/arm/op_helper.c
@@ -0,0 +1,974 @@
+/*
+ * ARM helper routines
+ *
+ * Copyright (c) 2005-2007 CodeSourcery, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+void raise_exception(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
+ /*
+ * Redirect NS EL1 exceptions to NS EL2. These are reported with
+ * their original syndrome register value, with the exception of
+ * SIMD/FP access traps, which are reported as uncategorized
+ * (see DDI0478C.a D1.10.4)
+ */
+ target_el = 2;
+ if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
+ syndrome = syn_uncategorized();
+ }
+ }
+
+ assert(!excp_is_internal(excp));
+ cs->exception_index = excp;
+ env->exception.syndrome = syndrome;
+ env->exception.target_el = target_el;
+ cpu_loop_exit(cs);
+}
+
+void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
+ uint32_t target_el, uintptr_t ra)
+{
+ CPUState *cs = env_cpu(env);
+
+ /*
+ * restore_state_to_opc() will set env->exception.syndrome, so
+ * we must restore CPU state here before setting the syndrome
+ * the caller passed us, and cannot use cpu_loop_exit_restore().
+ */
+ cpu_restore_state(cs, ra, true);
+ raise_exception(env, excp, syndrome, target_el);
+}
+
+uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
+ uint64_t ireg, uint64_t def)
+{
+ uint64_t tmp, val = 0;
+ uint32_t maxindex = ((desc & 3) + 1) * 8;
+ uint32_t base_reg = desc >> 2;
+ uint32_t shift, index, reg;
+
+ for (shift = 0; shift < 64; shift += 8) {
+ index = (ireg >> shift) & 0xff;
+ if (index < maxindex) {
+ reg = base_reg + (index >> 3);
+ tmp = *aa32_vfp_dreg(env, reg);
+ tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
+ } else {
+ tmp = def & (0xffull << shift);
+ }
+ val |= tmp;
+ }
+ return val;
+}
+
+void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
+{
+ /*
+ * Perform the v8M stack limit check for SP updates from translated code,
+ * raising an exception if the limit is breached.
+ */
+ if (newvalue < v7m_sp_limit(env)) {
+ /*
+ * Stack limit exceptions are a rare case, so rather than syncing
+ * PC/condbits before the call, we use raise_exception_ra() so
+ * that cpu_restore_state() will sort them out.
+ */
+ raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
+ }
+}
+
+uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
+ env->QF = 1;
+ return res;
+}
+
+uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
+ env->QF = 1;
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
+ env->QF = 1;
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (res < a) {
+ env->QF = 1;
+ res = ~0;
+ }
+ return res;
+}
+
+uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (res > a) {
+ env->QF = 1;
+ res = 0;
+ }
+ return res;
+}
+
+/* Signed saturation. */
+static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
+{
+ int32_t top;
+ uint32_t mask;
+
+ top = val >> shift;
+ mask = (1u << shift) - 1;
+ if (top > 0) {
+ env->QF = 1;
+ return mask;
+ } else if (top < -1) {
+ env->QF = 1;
+ return ~mask;
+ }
+ return val;
+}
+
+/* Unsigned saturation. */
+static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
+{
+ uint32_t max;
+
+ max = (1u << shift) - 1;
+ if (val < 0) {
+ env->QF = 1;
+ return 0;
+ } else if (val > max) {
+ env->QF = 1;
+ return max;
+ }
+ return val;
+}
+
+/* Signed saturate. */
+uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ return do_ssat(env, x, shift);
+}
+
+/* Dual halfword signed saturate. */
+uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ uint32_t res;
+
+ res = (uint16_t)do_ssat(env, (int16_t)x, shift);
+ res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
+ return res;
+}
+
+/* Unsigned saturate. */
+uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ return do_usat(env, x, shift);
+}
+
+/* Dual halfword unsigned saturate. */
+uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ uint32_t res;
+
+ res = (uint16_t)do_usat(env, (int16_t)x, shift);
+ res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
+ return res;
+}
+
+void HELPER(setend)(CPUARMState *env)
+{
+ env->uncached_cpsr ^= CPSR_E;
+ arm_rebuild_hflags(env);
+}
+
+void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
+{
+ /*
+ * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
+ * check if HSTR.TJDBX means we need to trap to EL2.
+ */
+ if (env->cp15.hstr_el2 & HSTR_TJDBX) {
+ /*
+ * We know the condition code check passed, so take the IMPDEF
+ * choice to always report CV=1 COND 0xe
+ */
+ uint32_t syn = syn_bxjtrap(1, 0xe, rm);
+ raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
+ * The function returns the target EL (1-3) if the instruction is to be trapped;
+ * otherwise it returns 0 indicating it is not trapped.
+ */
+static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
+{
+ int cur_el = arm_current_el(env);
+ uint64_t mask;
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* M profile cores can never trap WFI/WFE. */
+ return 0;
+ }
+
+ /* If we are currently in EL0 then we need to check if SCTLR is set up for
+ * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
+ */
+ if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
+ int target_el;
+
+ mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
+ if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
+ /* Secure EL0 and Secure PL1 is at EL3 */
+ target_el = 3;
+ } else {
+ target_el = 1;
+ }
+
+ if (!(env->cp15.sctlr_el[target_el] & mask)) {
+ return target_el;
+ }
+ }
+
+ /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
+ * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
+ * bits will be zero indicating no trap.
+ */
+ if (cur_el < 2) {
+ mask = is_wfe ? HCR_TWE : HCR_TWI;
+ if (arm_hcr_el2_eff(env) & mask) {
+ return 2;
+ }
+ }
+
+ /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
+ if (cur_el < 3) {
+ mask = (is_wfe) ? SCR_TWE : SCR_TWI;
+ if (env->cp15.scr_el3 & mask) {
+ return 3;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * WFI in the user-mode emulator is technically permitted but not
+ * something any real-world code would do. AArch64 Linux kernels
+ * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
+ * AArch32 kernels don't trap it so it will delay a bit.
+ * For QEMU, make it NOP here, because trying to raise EXCP_HLT
+ * would trigger an abort.
+ */
+ return;
+#else
+ CPUState *cs = env_cpu(env);
+ int target_el = check_wfx_trap(env, false);
+
+ if (cpu_has_work(cs)) {
+ /* Don't bother to go into our "low power state" if
+ * we would just wake up immediately.
+ */
+ return;
+ }
+
+ if (target_el) {
+ if (env->aarch64) {
+ env->pc -= insn_len;
+ } else {
+ env->regs[15] -= insn_len;
+ }
+
+ raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
+ target_el);
+ }
+
+ cs->exception_index = EXCP_HLT;
+ cs->halted = 1;
+ cpu_loop_exit(cs);
+#endif
+}
+
+void HELPER(wfe)(CPUARMState *env)
+{
+ /* This is a hint instruction that is semantically different
+ * from YIELD even though we currently implement it identically.
+ * Don't actually halt the CPU, just yield back to top
+ * level loop. This is not going into a "low power state"
+ * (ie halting until some event occurs), so we never take
+ * a configurable trap to a different exception level.
+ */
+ HELPER(yield)(env);
+}
+
+void HELPER(yield)(CPUARMState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* This is a non-trappable hint instruction that generally indicates
+ * that the guest is currently busy-looping. Yield control back to the
+ * top level loop so that a more deserving VCPU has a chance to run.
+ */
+ cs->exception_index = EXCP_YIELD;
+ cpu_loop_exit(cs);
+}
+
+/* Raise an internal-to-QEMU exception. This is limited to only
+ * those EXCP values which are special cases for QEMU to interrupt
+ * execution and not to be used for exceptions which are passed to
+ * the guest (those must all have syndrome information and thus should
+ * use exception_with_syndrome).
+ */
+void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
+{
+ CPUState *cs = env_cpu(env);
+
+ assert(excp_is_internal(excp));
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+/* Raise an exception with the specified syndrome register value */
+void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el)
+{
+ raise_exception(env, excp, syndrome, target_el);
+}
+
+/* Raise an EXCP_BKPT with the specified syndrome register value,
+ * targeting the correct exception level for debug exceptions.
+ */
+void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
+{
+ int debug_el = arm_debug_target_el(env);
+ int cur_el = arm_current_el(env);
+
+ /* FSR will only be used if the debug target EL is AArch32. */
+ env->exception.fsr = arm_debug_exception_fsr(env);
+ /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
+ * values to the guest that it shouldn't be able to see at its
+ * exception/security level.
+ */
+ env->exception.vaddress = 0;
+ /*
+ * Other kinds of architectural debug exception are ignored if
+ * they target an exception level below the current one (in QEMU
+ * this is checked by arm_generate_debug_exceptions()). Breakpoint
+ * instructions are special because they always generate an exception
+ * to somewhere: if they can't go to the configured debug exception
+ * level they are taken to the current exception level.
+ */
+ if (debug_el < cur_el) {
+ debug_el = cur_el;
+ }
+ raise_exception(env, EXCP_BKPT, syndrome, debug_el);
+}
+
+uint32_t HELPER(cpsr_read)(CPUARMState *env)
+{
+ return cpsr_read(env) & ~CPSR_EXEC;
+}
+
+void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ cpsr_write(env, val, mask, CPSRWriteByInstr);
+ /* TODO: Not all cpsr bits are relevant to hflags. */
+ arm_rebuild_hflags(env);
+}
+
+/* Write the CPSR for a 32-bit exception return */
+void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
+{
+ uint32_t mask;
+
+ qemu_mutex_lock_iothread();
+ arm_call_pre_el_change_hook(env_archcpu(env));
+ qemu_mutex_unlock_iothread();
+
+ mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
+ cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
+
+ /* Generated code has already stored the new PC value, but
+ * without masking out its low bits, because which bits need
+ * masking depends on whether we're returning to Thumb or ARM
+ * state. Do the masking now.
+ */
+ env->regs[15] &= (env->thumb ? ~1 : ~3);
+ arm_rebuild_hflags(env);
+
+ qemu_mutex_lock_iothread();
+ arm_call_el_change_hook(env_archcpu(env));
+ qemu_mutex_unlock_iothread();
+}
+
+/* Access to user mode registers from privileged modes. */
+uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
+{
+ uint32_t val;
+
+ if (regno == 13) {
+ val = env->banked_r13[BANK_USRSYS];
+ } else if (regno == 14) {
+ val = env->banked_r14[BANK_USRSYS];
+ } else if (regno >= 8
+ && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
+ val = env->usr_regs[regno - 8];
+ } else {
+ val = env->regs[regno];
+ }
+ return val;
+}
+
+void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
+{
+ if (regno == 13) {
+ env->banked_r13[BANK_USRSYS] = val;
+ } else if (regno == 14) {
+ env->banked_r14[BANK_USRSYS] = val;
+ } else if (regno >= 8
+ && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
+ env->usr_regs[regno - 8] = val;
+ } else {
+ env->regs[regno] = val;
+ }
+}
+
+void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
+{
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ env->regs[13] = val;
+ } else {
+ env->banked_r13[bank_number(mode)] = val;
+ }
+}
+
+uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
+{
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
+ /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
+ * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
+ */
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ return env->regs[13];
+ } else {
+ return env->banked_r13[bank_number(mode)];
+ }
+}
+
+static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
+ uint32_t regno)
+{
+ /* Raise an exception if the requested access is one of the UNPREDICTABLE
+ * cases; otherwise return. This broadly corresponds to the pseudocode
+ * BankedRegisterAccessValid() and SPSRAccessValid(),
+ * except that we have already handled some cases at translate time.
+ */
+ int curmode = env->uncached_cpsr & CPSR_M;
+
+ if (regno == 17) {
+ /* ELR_Hyp: a special case because access from tgtmode is OK */
+ if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ return;
+ }
+
+ if (curmode == tgtmode) {
+ goto undef;
+ }
+
+ if (tgtmode == ARM_CPU_MODE_USR) {
+ switch (regno) {
+ case 8 ... 12:
+ if (curmode != ARM_CPU_MODE_FIQ) {
+ goto undef;
+ }
+ break;
+ case 13:
+ if (curmode == ARM_CPU_MODE_SYS) {
+ goto undef;
+ }
+ break;
+ case 14:
+ if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
+ goto undef;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (tgtmode == ARM_CPU_MODE_HYP) {
+ /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
+ if (curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ }
+
+ return;
+
+undef:
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+}
+
+void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
+ uint32_t regno)
+{
+ msr_mrs_banked_exc_checks(env, tgtmode, regno);
+
+ switch (regno) {
+ case 16: /* SPSRs */
+ env->banked_spsr[bank_number(tgtmode)] = value;
+ break;
+ case 17: /* ELR_Hyp */
+ env->elr_el[2] = value;
+ break;
+ case 13:
+ env->banked_r13[bank_number(tgtmode)] = value;
+ break;
+ case 14:
+ env->banked_r14[r14_bank_number(tgtmode)] = value;
+ break;
+ case 8 ... 12:
+ switch (tgtmode) {
+ case ARM_CPU_MODE_USR:
+ env->usr_regs[regno - 8] = value;
+ break;
+ case ARM_CPU_MODE_FIQ:
+ env->fiq_regs[regno - 8] = value;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
+{
+ msr_mrs_banked_exc_checks(env, tgtmode, regno);
+
+ switch (regno) {
+ case 16: /* SPSRs */
+ return env->banked_spsr[bank_number(tgtmode)];
+ case 17: /* ELR_Hyp */
+ return env->elr_el[2];
+ case 13:
+ return env->banked_r13[bank_number(tgtmode)];
+ case 14:
+ return env->banked_r14[r14_bank_number(tgtmode)];
+ case 8 ... 12:
+ switch (tgtmode) {
+ case ARM_CPU_MODE_USR:
+ return env->usr_regs[regno - 8];
+ case ARM_CPU_MODE_FIQ:
+ return env->fiq_regs[regno - 8];
+ default:
+ g_assert_not_reached();
+ }
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
+ uint32_t isread)
+{
+ const ARMCPRegInfo *ri = rip;
+ int target_el;
+
+ if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
+ && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
+ raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
+ }
+
+ /*
+ * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
+ * to sysregs non accessible at EL0 to have UNDEF-ed already.
+ */
+ if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 &&
+ (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
+ uint32_t mask = 1 << ri->crn;
+
+ if (ri->type & ARM_CP_64BIT) {
+ mask = 1 << ri->crm;
+ }
+
+ /* T4 and T14 are RES0 */
+ mask &= ~((1 << 4) | (1 << 14));
+
+ if (env->cp15.hstr_el2 & mask) {
+ target_el = 2;
+ goto exept;
+ }
+ }
+
+ if (!ri->accessfn) {
+ return;
+ }
+
+ switch (ri->accessfn(env, ri, isread)) {
+ case CP_ACCESS_OK:
+ return;
+ case CP_ACCESS_TRAP:
+ target_el = exception_target_el(env);
+ break;
+ case CP_ACCESS_TRAP_EL2:
+ /* Requesting a trap to EL2 when we're in EL3 is
+ * a bug in the access function.
+ */
+ assert(arm_current_el(env) != 3);
+ target_el = 2;
+ break;
+ case CP_ACCESS_TRAP_EL3:
+ target_el = 3;
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED:
+ target_el = exception_target_el(env);
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
+ target_el = 2;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
+ target_el = 3;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_FP_EL2:
+ target_el = 2;
+ /* Since we are an implementation that takes exceptions on a trapped
+ * conditional insn only if the insn has passed its condition code
+ * check, we take the IMPDEF choice to always report CV=1 COND=0xe
+ * (which is also the required value for AArch64 traps).
+ */
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
+ case CP_ACCESS_TRAP_FP_EL3:
+ target_el = 3;
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+exept:
+ raise_exception(env, EXCP_UDEF, syndrome, target_el);
+}
+
+void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
+{
+ const ARMCPRegInfo *ri = rip;
+
+ if (ri->type & ARM_CP_IO) {
+ qemu_mutex_lock_iothread();
+ ri->writefn(env, ri, value);
+ qemu_mutex_unlock_iothread();
+ } else {
+ ri->writefn(env, ri, value);
+ }
+}
+
+uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
+{
+ const ARMCPRegInfo *ri = rip;
+ uint32_t res;
+
+ if (ri->type & ARM_CP_IO) {
+ qemu_mutex_lock_iothread();
+ res = ri->readfn(env, ri);
+ qemu_mutex_unlock_iothread();
+ } else {
+ res = ri->readfn(env, ri);
+ }
+
+ return res;
+}
+
+void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
+{
+ const ARMCPRegInfo *ri = rip;
+
+ if (ri->type & ARM_CP_IO) {
+ qemu_mutex_lock_iothread();
+ ri->writefn(env, ri, value);
+ qemu_mutex_unlock_iothread();
+ } else {
+ ri->writefn(env, ri, value);
+ }
+}
+
+uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
+{
+ const ARMCPRegInfo *ri = rip;
+ uint64_t res;
+
+ if (ri->type & ARM_CP_IO) {
+ qemu_mutex_lock_iothread();
+ res = ri->readfn(env, ri);
+ qemu_mutex_unlock_iothread();
+ } else {
+ res = ri->readfn(env, ri);
+ }
+
+ return res;
+}
+
+void HELPER(pre_hvc)(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int cur_el = arm_current_el(env);
+ /* FIXME: Use actual secure state. */
+ bool secure = false;
+ bool undef;
+
+ if (arm_is_psci_call(cpu, EXCP_HVC)) {
+ /* If PSCI is enabled and this looks like a valid PSCI call then
+ * that overrides the architecturally mandated HVC behaviour.
+ */
+ return;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ /* If EL2 doesn't exist, HVC always UNDEFs */
+ undef = true;
+ } else if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* EL3.HCE has priority over EL2.HCD. */
+ undef = !(env->cp15.scr_el3 & SCR_HCE);
+ } else {
+ undef = env->cp15.hcr_el2 & HCR_HCD;
+ }
+
+ /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
+ * For ARMv8/AArch64, HVC is allowed in EL3.
+ * Note that we've already trapped HVC from EL0 at translation
+ * time.
+ */
+ if (secure && (!is_a64(env) || cur_el == 1)) {
+ undef = true;
+ }
+
+ if (undef) {
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+}
+
+void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+ bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
+
+ /*
+ * SMC behaviour is summarized in the following table.
+ * This helper handles the "Trap to EL2" and "Undef insn" cases.
+ * The "Trap to EL3" and "PSCI call" cases are handled in the exception
+ * helper.
+ *
+ * -> ARM_FEATURE_EL3 and !SMD
+ * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
+ *
+ * Conduit SMC, valid call Trap to EL2 PSCI Call
+ * Conduit SMC, inval call Trap to EL2 Trap to EL3
+ * Conduit not SMC Trap to EL2 Trap to EL3
+ *
+ *
+ * -> ARM_FEATURE_EL3 and SMD
+ * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
+ *
+ * Conduit SMC, valid call Trap to EL2 PSCI Call
+ * Conduit SMC, inval call Trap to EL2 Undef insn
+ * Conduit not SMC Trap to EL2 Undef insn
+ *
+ *
+ * -> !ARM_FEATURE_EL3
+ * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
+ *
+ * Conduit SMC, valid call Trap to EL2 PSCI Call
+ * Conduit SMC, inval call Trap to EL2 Undef insn
+ * Conduit not SMC Undef insn Undef insn
+ */
+
+ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
+ * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
+ * extensions, SMD only applies to NS state.
+ * On ARMv7 without the Virtualization extensions, the SMD bit
+ * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
+ * so we need not special case this here.
+ */
+ bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
+ : smd_flag && !secure;
+
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ /* If we have no EL3 then SMC always UNDEFs and can't be
+ * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
+ * firmware within QEMU, and we want an EL2 guest to be able
+ * to forbid its EL1 from making PSCI calls into QEMU's
+ * "firmware" via HCR.TSC, so for these purposes treat
+ * PSCI-via-SMC as implying an EL3.
+ * This handles the very last line of the previous table.
+ */
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+
+ if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
+ /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
+ * We also want an EL2 guest to be able to forbid its EL1 from
+ * making PSCI calls into QEMU's "firmware" via HCR.TSC.
+ * This handles all the "Trap to EL2" cases of the previous table.
+ */
+ raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
+ }
+
+ /* Catch the two remaining "Undef insn" cases of the previous table:
+ * - PSCI conduit is SMC but we don't have a valid PCSI call,
+ * - We don't have EL3 or SMD is set.
+ */
+ if (!arm_is_psci_call(cpu, EXCP_SMC) &&
+ (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+}
+
+/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
+ The only way to do that in TCG is a conditional branch, which clobbers
+ all our temporaries. For now implement these as helper functions. */
+
+/* Similarly for variable shift instructions. */
+
+uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = x & 1;
+ else
+ env->CF = 0;
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (32 - shift)) & 1;
+ return x << shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = (x >> 31) & 1;
+ else
+ env->CF = 0;
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ env->CF = (x >> 31) & 1;
+ return (int32_t)x >> 31;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return (int32_t)x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift1, shift;
+ shift1 = i & 0xff;
+ shift = shift1 & 0x1f;
+ if (shift == 0) {
+ if (shift1 != 0)
+ env->CF = (x >> 31) & 1;
+ return x;
+ } else {
+ env->CF = (x >> (shift - 1)) & 1;
+ return ((uint32_t)x >> shift) | (x << (32 - shift));
+ }
+}
+
+void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
+ uint32_t access_type, uint32_t mmu_idx,
+ uint32_t size)
+{
+ uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
+ uintptr_t ra = GETPC();
+
+ if (likely(size <= in_page)) {
+ probe_access(env, ptr, size, access_type, mmu_idx, ra);
+ } else {
+ probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
+ probe_access(env, ptr + in_page, size - in_page,
+ access_type, mmu_idx, ra);
+ }
+}
diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
new file mode 100644
index 000000000..cd6df1815
--- /dev/null
+++ b/target/arm/pauth_helper.c
@@ -0,0 +1,515 @@
+/*
+ * ARM v8.3-PAuth Operations
+ *
+ * Copyright (c) 2019 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "qemu/xxhash.h"
+
+
+static uint64_t pac_cell_shuffle(uint64_t i)
+{
+ uint64_t o = 0;
+
+ o |= extract64(i, 52, 4);
+ o |= extract64(i, 24, 4) << 4;
+ o |= extract64(i, 44, 4) << 8;
+ o |= extract64(i, 0, 4) << 12;
+
+ o |= extract64(i, 28, 4) << 16;
+ o |= extract64(i, 48, 4) << 20;
+ o |= extract64(i, 4, 4) << 24;
+ o |= extract64(i, 40, 4) << 28;
+
+ o |= extract64(i, 32, 4) << 32;
+ o |= extract64(i, 12, 4) << 36;
+ o |= extract64(i, 56, 4) << 40;
+ o |= extract64(i, 20, 4) << 44;
+
+ o |= extract64(i, 8, 4) << 48;
+ o |= extract64(i, 36, 4) << 52;
+ o |= extract64(i, 16, 4) << 56;
+ o |= extract64(i, 60, 4) << 60;
+
+ return o;
+}
+
+static uint64_t pac_cell_inv_shuffle(uint64_t i)
+{
+ uint64_t o = 0;
+
+ o |= extract64(i, 12, 4);
+ o |= extract64(i, 24, 4) << 4;
+ o |= extract64(i, 48, 4) << 8;
+ o |= extract64(i, 36, 4) << 12;
+
+ o |= extract64(i, 56, 4) << 16;
+ o |= extract64(i, 44, 4) << 20;
+ o |= extract64(i, 4, 4) << 24;
+ o |= extract64(i, 16, 4) << 28;
+
+ o |= i & MAKE_64BIT_MASK(32, 4);
+ o |= extract64(i, 52, 4) << 36;
+ o |= extract64(i, 28, 4) << 40;
+ o |= extract64(i, 8, 4) << 44;
+
+ o |= extract64(i, 20, 4) << 48;
+ o |= extract64(i, 0, 4) << 52;
+ o |= extract64(i, 40, 4) << 56;
+ o |= i & MAKE_64BIT_MASK(60, 4);
+
+ return o;
+}
+
+static uint64_t pac_sub(uint64_t i)
+{
+ static const uint8_t sub[16] = {
+ 0xb, 0x6, 0x8, 0xf, 0xc, 0x0, 0x9, 0xe,
+ 0x3, 0x7, 0x4, 0x5, 0xd, 0x2, 0x1, 0xa,
+ };
+ uint64_t o = 0;
+ int b;
+
+ for (b = 0; b < 64; b += 4) {
+ o |= (uint64_t)sub[(i >> b) & 0xf] << b;
+ }
+ return o;
+}
+
+static uint64_t pac_inv_sub(uint64_t i)
+{
+ static const uint8_t inv_sub[16] = {
+ 0x5, 0xe, 0xd, 0x8, 0xa, 0xb, 0x1, 0x9,
+ 0x2, 0x6, 0xf, 0x0, 0x4, 0xc, 0x7, 0x3,
+ };
+ uint64_t o = 0;
+ int b;
+
+ for (b = 0; b < 64; b += 4) {
+ o |= (uint64_t)inv_sub[(i >> b) & 0xf] << b;
+ }
+ return o;
+}
+
+static int rot_cell(int cell, int n)
+{
+ /* 4-bit rotate left by n. */
+ cell |= cell << 4;
+ return extract32(cell, 4 - n, 4);
+}
+
+static uint64_t pac_mult(uint64_t i)
+{
+ uint64_t o = 0;
+ int b;
+
+ for (b = 0; b < 4 * 4; b += 4) {
+ int i0, i4, i8, ic, t0, t1, t2, t3;
+
+ i0 = extract64(i, b, 4);
+ i4 = extract64(i, b + 4 * 4, 4);
+ i8 = extract64(i, b + 8 * 4, 4);
+ ic = extract64(i, b + 12 * 4, 4);
+
+ t0 = rot_cell(i8, 1) ^ rot_cell(i4, 2) ^ rot_cell(i0, 1);
+ t1 = rot_cell(ic, 1) ^ rot_cell(i4, 1) ^ rot_cell(i0, 2);
+ t2 = rot_cell(ic, 2) ^ rot_cell(i8, 1) ^ rot_cell(i0, 1);
+ t3 = rot_cell(ic, 1) ^ rot_cell(i8, 2) ^ rot_cell(i4, 1);
+
+ o |= (uint64_t)t3 << b;
+ o |= (uint64_t)t2 << (b + 4 * 4);
+ o |= (uint64_t)t1 << (b + 8 * 4);
+ o |= (uint64_t)t0 << (b + 12 * 4);
+ }
+ return o;
+}
+
+static uint64_t tweak_cell_rot(uint64_t cell)
+{
+ return (cell >> 1) | (((cell ^ (cell >> 1)) & 1) << 3);
+}
+
+static uint64_t tweak_shuffle(uint64_t i)
+{
+ uint64_t o = 0;
+
+ o |= extract64(i, 16, 4) << 0;
+ o |= extract64(i, 20, 4) << 4;
+ o |= tweak_cell_rot(extract64(i, 24, 4)) << 8;
+ o |= extract64(i, 28, 4) << 12;
+
+ o |= tweak_cell_rot(extract64(i, 44, 4)) << 16;
+ o |= extract64(i, 8, 4) << 20;
+ o |= extract64(i, 12, 4) << 24;
+ o |= tweak_cell_rot(extract64(i, 32, 4)) << 28;
+
+ o |= extract64(i, 48, 4) << 32;
+ o |= extract64(i, 52, 4) << 36;
+ o |= extract64(i, 56, 4) << 40;
+ o |= tweak_cell_rot(extract64(i, 60, 4)) << 44;
+
+ o |= tweak_cell_rot(extract64(i, 0, 4)) << 48;
+ o |= extract64(i, 4, 4) << 52;
+ o |= tweak_cell_rot(extract64(i, 40, 4)) << 56;
+ o |= tweak_cell_rot(extract64(i, 36, 4)) << 60;
+
+ return o;
+}
+
+static uint64_t tweak_cell_inv_rot(uint64_t cell)
+{
+ return ((cell << 1) & 0xf) | ((cell & 1) ^ (cell >> 3));
+}
+
+static uint64_t tweak_inv_shuffle(uint64_t i)
+{
+ uint64_t o = 0;
+
+ o |= tweak_cell_inv_rot(extract64(i, 48, 4));
+ o |= extract64(i, 52, 4) << 4;
+ o |= extract64(i, 20, 4) << 8;
+ o |= extract64(i, 24, 4) << 12;
+
+ o |= extract64(i, 0, 4) << 16;
+ o |= extract64(i, 4, 4) << 20;
+ o |= tweak_cell_inv_rot(extract64(i, 8, 4)) << 24;
+ o |= extract64(i, 12, 4) << 28;
+
+ o |= tweak_cell_inv_rot(extract64(i, 28, 4)) << 32;
+ o |= tweak_cell_inv_rot(extract64(i, 60, 4)) << 36;
+ o |= tweak_cell_inv_rot(extract64(i, 56, 4)) << 40;
+ o |= tweak_cell_inv_rot(extract64(i, 16, 4)) << 44;
+
+ o |= extract64(i, 32, 4) << 48;
+ o |= extract64(i, 36, 4) << 52;
+ o |= extract64(i, 40, 4) << 56;
+ o |= tweak_cell_inv_rot(extract64(i, 44, 4)) << 60;
+
+ return o;
+}
+
+static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
+ ARMPACKey key)
+{
+ static const uint64_t RC[5] = {
+ 0x0000000000000000ull,
+ 0x13198A2E03707344ull,
+ 0xA4093822299F31D0ull,
+ 0x082EFA98EC4E6C89ull,
+ 0x452821E638D01377ull,
+ };
+ const uint64_t alpha = 0xC0AC29B7C97C50DDull;
+ /*
+ * Note that in the ARM pseudocode, key0 contains bits <127:64>
+ * and key1 contains bits <63:0> of the 128-bit key.
+ */
+ uint64_t key0 = key.hi, key1 = key.lo;
+ uint64_t workingval, runningmod, roundkey, modk0;
+ int i;
+
+ modk0 = (key0 << 63) | ((key0 >> 1) ^ (key0 >> 63));
+ runningmod = modifier;
+ workingval = data ^ key0;
+
+ for (i = 0; i <= 4; ++i) {
+ roundkey = key1 ^ runningmod;
+ workingval ^= roundkey;
+ workingval ^= RC[i];
+ if (i > 0) {
+ workingval = pac_cell_shuffle(workingval);
+ workingval = pac_mult(workingval);
+ }
+ workingval = pac_sub(workingval);
+ runningmod = tweak_shuffle(runningmod);
+ }
+ roundkey = modk0 ^ runningmod;
+ workingval ^= roundkey;
+ workingval = pac_cell_shuffle(workingval);
+ workingval = pac_mult(workingval);
+ workingval = pac_sub(workingval);
+ workingval = pac_cell_shuffle(workingval);
+ workingval = pac_mult(workingval);
+ workingval ^= key1;
+ workingval = pac_cell_inv_shuffle(workingval);
+ workingval = pac_inv_sub(workingval);
+ workingval = pac_mult(workingval);
+ workingval = pac_cell_inv_shuffle(workingval);
+ workingval ^= key0;
+ workingval ^= runningmod;
+ for (i = 0; i <= 4; ++i) {
+ workingval = pac_inv_sub(workingval);
+ if (i < 4) {
+ workingval = pac_mult(workingval);
+ workingval = pac_cell_inv_shuffle(workingval);
+ }
+ runningmod = tweak_inv_shuffle(runningmod);
+ roundkey = key1 ^ runningmod;
+ workingval ^= RC[4 - i];
+ workingval ^= roundkey;
+ workingval ^= alpha;
+ }
+ workingval ^= modk0;
+
+ return workingval;
+}
+
+static uint64_t pauth_computepac_impdef(uint64_t data, uint64_t modifier,
+ ARMPACKey key)
+{
+ return qemu_xxhash64_4(data, modifier, key.lo, key.hi);
+}
+
+static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
+ uint64_t modifier, ARMPACKey key)
+{
+ if (cpu_isar_feature(aa64_pauth_arch, env_archcpu(env))) {
+ return pauth_computepac_architected(data, modifier, key);
+ } else {
+ return pauth_computepac_impdef(data, modifier, key);
+ }
+}
+
+static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
+ ARMPACKey *key, bool data)
+{
+ ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
+ ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
+ uint64_t pac, ext_ptr, ext, test;
+ int bot_bit, top_bit;
+
+ /* If tagged pointers are in use, use ptr<55>, otherwise ptr<63>. */
+ if (param.tbi) {
+ ext = sextract64(ptr, 55, 1);
+ } else {
+ ext = sextract64(ptr, 63, 1);
+ }
+
+ /* Build a pointer with known good extension bits. */
+ top_bit = 64 - 8 * param.tbi;
+ bot_bit = 64 - param.tsz;
+ ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext);
+
+ pac = pauth_computepac(env, ext_ptr, modifier, *key);
+
+ /*
+ * Check if the ptr has good extension bits and corrupt the
+ * pointer authentication code if not.
+ */
+ test = sextract64(ptr, bot_bit, top_bit - bot_bit);
+ if (test != 0 && test != -1) {
+ /*
+ * Note that our top_bit is one greater than the pseudocode's
+ * version, hence "- 2" here.
+ */
+ pac ^= MAKE_64BIT_MASK(top_bit - 2, 1);
+ }
+
+ /*
+ * Preserve the determination between upper and lower at bit 55,
+ * and insert pointer authentication code.
+ */
+ if (param.tbi) {
+ ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1);
+ pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1);
+ } else {
+ ptr &= MAKE_64BIT_MASK(0, bot_bit);
+ pac &= ~(MAKE_64BIT_MASK(55, 1) | MAKE_64BIT_MASK(0, bot_bit));
+ }
+ ext &= MAKE_64BIT_MASK(55, 1);
+ return pac | ext | ptr;
+}
+
+static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param)
+{
+ /* Note that bit 55 is used whether or not the regime has 2 ranges. */
+ uint64_t extfield = sextract64(ptr, 55, 1);
+ int bot_pac_bit = 64 - param.tsz;
+ int top_pac_bit = 64 - 8 * param.tbi;
+
+ return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield);
+}
+
+static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
+ ARMPACKey *key, bool data, int keynumber)
+{
+ ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
+ ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
+ int bot_bit, top_bit;
+ uint64_t pac, orig_ptr, test;
+
+ orig_ptr = pauth_original_ptr(ptr, param);
+ pac = pauth_computepac(env, orig_ptr, modifier, *key);
+ bot_bit = 64 - param.tsz;
+ top_bit = 64 - 8 * param.tbi;
+
+ test = (pac ^ ptr) & ~MAKE_64BIT_MASK(55, 1);
+ if (unlikely(extract64(test, bot_bit, top_bit - bot_bit))) {
+ int error_code = (keynumber << 1) | (keynumber ^ 1);
+ if (param.tbi) {
+ return deposit64(orig_ptr, 53, 2, error_code);
+ } else {
+ return deposit64(orig_ptr, 61, 2, error_code);
+ }
+ }
+ return orig_ptr;
+}
+
+static uint64_t pauth_strip(CPUARMState *env, uint64_t ptr, bool data)
+{
+ ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env);
+ ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data);
+
+ return pauth_original_ptr(ptr, param);
+}
+
+static void QEMU_NORETURN pauth_trap(CPUARMState *env, int target_el,
+ uintptr_t ra)
+{
+ raise_exception_ra(env, EXCP_UDEF, syn_pactrap(), target_el, ra);
+}
+
+static void pauth_check_trap(CPUARMState *env, int el, uintptr_t ra)
+{
+ if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
+ uint64_t hcr = arm_hcr_el2_eff(env);
+ bool trap = !(hcr & HCR_API);
+ if (el == 0) {
+ /* Trap only applies to EL1&0 regime. */
+ trap &= (hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE);
+ }
+ /* FIXME: ARMv8.3-NV: HCR_NV trap takes precedence for ERETA[AB]. */
+ if (trap) {
+ pauth_trap(env, 2, ra);
+ }
+ }
+ if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
+ if (!(env->cp15.scr_el3 & SCR_API)) {
+ pauth_trap(env, 3, ra);
+ }
+ }
+}
+
+static bool pauth_key_enabled(CPUARMState *env, int el, uint32_t bit)
+{
+ return (arm_sctlr(env, el) & bit) != 0;
+}
+
+uint64_t HELPER(pacia)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnIA)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_addpac(env, x, y, &env->keys.apia, false);
+}
+
+uint64_t HELPER(pacib)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnIB)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_addpac(env, x, y, &env->keys.apib, false);
+}
+
+uint64_t HELPER(pacda)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_addpac(env, x, y, &env->keys.apda, true);
+}
+
+uint64_t HELPER(pacdb)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnDB)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_addpac(env, x, y, &env->keys.apdb, true);
+}
+
+uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ uint64_t pac;
+
+ pauth_check_trap(env, arm_current_el(env), GETPC());
+ pac = pauth_computepac(env, x, y, env->keys.apga);
+
+ return pac & 0xffffffff00000000ull;
+}
+
+uint64_t HELPER(autia)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnIA)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_auth(env, x, y, &env->keys.apia, false, 0);
+}
+
+uint64_t HELPER(autib)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnIB)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_auth(env, x, y, &env->keys.apib, false, 1);
+}
+
+uint64_t HELPER(autda)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnDA)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_auth(env, x, y, &env->keys.apda, true, 0);
+}
+
+uint64_t HELPER(autdb)(CPUARMState *env, uint64_t x, uint64_t y)
+{
+ int el = arm_current_el(env);
+ if (!pauth_key_enabled(env, el, SCTLR_EnDB)) {
+ return x;
+ }
+ pauth_check_trap(env, el, GETPC());
+ return pauth_auth(env, x, y, &env->keys.apdb, true, 1);
+}
+
+uint64_t HELPER(xpaci)(CPUARMState *env, uint64_t a)
+{
+ return pauth_strip(env, a, false);
+}
+
+uint64_t HELPER(xpacd)(CPUARMState *env, uint64_t a)
+{
+ return pauth_strip(env, a, true);
+}
diff --git a/target/arm/psci.c b/target/arm/psci.c
new file mode 100644
index 000000000..6709e2801
--- /dev/null
+++ b/target/arm/psci.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2014 - Linaro
+ * Author: Rob Herring <rob.herring@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "kvm-consts.h"
+#include "qemu/main-loop.h"
+#include "sysemu/runstate.h"
+#include "internals.h"
+#include "arm-powerctl.h"
+
+bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
+{
+ /* Return true if the r0/x0 value indicates a PSCI call and
+ * the exception type matches the configured PSCI conduit. This is
+ * called before the SMC/HVC instruction is executed, to decide whether
+ * we should treat it as a PSCI call or with the architecturally
+ * defined behaviour for an SMC or HVC (which might be UNDEF or trap
+ * to EL2 or to EL3).
+ */
+ CPUARMState *env = &cpu->env;
+ uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0];
+
+ switch (excp_type) {
+ case EXCP_HVC:
+ if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) {
+ return false;
+ }
+ break;
+ case EXCP_SMC:
+ if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ switch (param) {
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void arm_handle_psci_call(ARMCPU *cpu)
+{
+ /*
+ * This function partially implements the logic for dispatching Power State
+ * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b),
+ * to the extent required for bringing up and taking down secondary cores,
+ * and for handling reset and poweroff requests.
+ * Additional information about the calling convention used is available in
+ * the document 'SMC Calling Convention' (ARM DEN 0028)
+ */
+ CPUARMState *env = &cpu->env;
+ uint64_t param[4];
+ uint64_t context_id, mpidr;
+ target_ulong entry;
+ int32_t ret = 0;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * All PSCI functions take explicit 32-bit or native int sized
+ * arguments so we can simply zero-extend all arguments regardless
+ * of which exact function we are about to call.
+ */
+ param[i] = is_a64(env) ? env->xregs[i] : env->regs[i];
+ }
+
+ if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ goto err;
+ }
+
+ switch (param[0]) {
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
+ break;
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
+ break;
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ mpidr = param[1];
+
+ switch (param[2]) {
+ case 0:
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
+ if (!target_cpu_state) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+
+ g_assert(qemu_mutex_iothread_locked());
+ ret = target_cpu->power_state;
+ break;
+ default:
+ /* Everything above affinity level 0 is always on. */
+ ret = 0;
+ }
+ break;
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ /* QEMU reset and shutdown are async requests, but PSCI
+ * mandates that we never return from the reset/shutdown
+ * call, so power the CPU off now so it doesn't execute
+ * anything further.
+ */
+ goto cpu_off;
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ goto cpu_off;
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ {
+ /* The PSCI spec mandates that newly brought up CPUs start
+ * in the highest exception level which exists and is enabled
+ * on the calling CPU. Since the QEMU PSCI implementation is
+ * acting as a "fake EL3" or "fake EL2" firmware, this for us
+ * means that we want to start at the highest NS exception level
+ * that we are providing to the guest.
+ * The execution mode should be that which is currently in use
+ * by the same exception level on the calling CPU.
+ * The CPU should be started with the context_id value
+ * in x0 (if AArch64) or r0 (if AArch32).
+ */
+ int target_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1;
+ bool target_aarch64 = arm_el_is_aa64(env, target_el);
+
+ mpidr = param[1];
+ entry = param[2];
+ context_id = param[3];
+ ret = arm_set_cpu_on(mpidr, entry, context_id,
+ target_el, target_aarch64);
+ break;
+ }
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ goto cpu_off;
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ /* Affinity levels are not supported in QEMU */
+ if (param[1] & 0xfffe0000) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ /* Powerdown is not supported, we always go into WFI */
+ if (is_a64(env)) {
+ env->xregs[0] = 0;
+ } else {
+ env->regs[0] = 0;
+ }
+ helper_wfi(env, 4);
+ break;
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+err:
+ if (is_a64(env)) {
+ env->xregs[0] = ret;
+ } else {
+ env->regs[0] = ret;
+ }
+ return;
+
+cpu_off:
+ ret = arm_set_cpu_off(cpu->mp_affinity);
+ /* notreached */
+ /* sanity check in case something failed */
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
+}
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
new file mode 100644
index 000000000..c60b9f0fe
--- /dev/null
+++ b/target/arm/sve.decode
@@ -0,0 +1,1645 @@
+# AArch64 SVE instruction descriptions
+#
+# Copyright (c) 2017 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+###########################################################################
+# Named fields. These are primarily for disjoint fields.
+
+%imm4_16_p1 16:4 !function=plus_1
+%imm6_22_5 22:1 5:5
+%imm7_22_16 22:2 16:5
+%imm8_16_10 16:5 10:3
+%imm9_16_10 16:s6 10:3
+%size_23 23:2
+%dtype_23_13 23:2 13:2
+%index3_22_19 22:1 19:2
+%index3_19_11 19:2 11:1
+%index2_20_11 20:1 11:1
+
+# A combination of tsz:imm3 -- extract esize.
+%tszimm_esz 22:2 5:5 !function=tszimm_esz
+# A combination of tsz:imm3 -- extract (2 * esize) - (tsz:imm3)
+%tszimm_shr 22:2 5:5 !function=tszimm_shr
+# A combination of tsz:imm3 -- extract (tsz:imm3) - esize
+%tszimm_shl 22:2 5:5 !function=tszimm_shl
+
+# Similarly for the tszh/tszl pair at 22/16 for zzi
+%tszimm16_esz 22:2 16:5 !function=tszimm_esz
+%tszimm16_shr 22:2 16:5 !function=tszimm_shr
+%tszimm16_shl 22:2 16:5 !function=tszimm_shl
+
+# Signed 8-bit immediate, optionally shifted left by 8.
+%sh8_i8s 5:9 !function=expand_imm_sh8s
+# Unsigned 8-bit immediate, optionally shifted left by 8.
+%sh8_i8u 5:9 !function=expand_imm_sh8u
+
+# Unsigned load of msz into esz=2, represented as a dtype.
+%msz_dtype 23:2 !function=msz_dtype
+
+# Either a copy of rd (at bit 0), or a different source
+# as propagated via the MOVPRFX instruction.
+%reg_movprfx 0:5
+
+###########################################################################
+# Named attribute sets. These are used to make nice(er) names
+# when creating helpers common to those for the individual
+# instruction patterns.
+
+&rr_esz rd rn esz
+&rri rd rn imm
+&rr_dbm rd rn dbm
+&rrri rd rn rm imm
+&rri_esz rd rn imm esz
+&rrri_esz rd rn rm imm esz
+&rrr_esz rd rn rm esz
+&rrx_esz rd rn rm index esz
+&rpr_esz rd pg rn esz
+&rpr_s rd pg rn s
+&rprr_s rd pg rn rm s
+&rprr_esz rd pg rn rm esz
+&rrrr_esz rd ra rn rm esz
+&rrxr_esz rd rn rm ra index esz
+&rprrr_esz rd pg rn rm ra esz
+&rpri_esz rd pg rn imm esz
+&ptrue rd esz pat s
+&incdec_cnt rd pat esz imm d u
+&incdec2_cnt rd rn pat esz imm d u
+&incdec_pred rd pg esz d u
+&incdec2_pred rd rn pg esz d u
+&rprr_load rd pg rn rm dtype nreg
+&rpri_load rd pg rn imm dtype nreg
+&rprr_store rd pg rn rm msz esz nreg
+&rpri_store rd pg rn imm msz esz nreg
+&rprr_gather_load rd pg rn rm esz msz u ff xs scale
+&rpri_gather_load rd pg rn imm esz msz u ff
+&rprr_scatter_store rd pg rn rm esz msz xs scale
+&rpri_scatter_store rd pg rn imm esz msz
+
+###########################################################################
+# Named instruction formats. These are generally used to
+# reduce the amount of duplication between instruction patterns.
+
+# Two operand with unused vector element size
+@pd_pn_e0 ........ ........ ....... rn:4 . rd:4 &rr_esz esz=0
+
+# Two operand
+@pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz
+@rd_rn ........ esz:2 ...... ...... rn:5 rd:5 &rr_esz
+
+# Two operand with governing predicate, flags setting
+@pd_pg_pn_s ........ . s:1 ...... .. pg:4 . rn:4 . rd:4 &rpr_s
+@pd_pg_pn_s0 ........ . . ...... .. pg:4 . rn:4 . rd:4 &rpr_s s=0
+
+# Three operand with unused vector element size
+@rd_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 &rrr_esz esz=0
+
+# Three predicate operand, with governing predicate, flag setting
+@pd_pg_pn_pm_s ........ . s:1 .. rm:4 .. pg:4 . rn:4 . rd:4 &rprr_s
+
+# Three operand, vector element size
+@rd_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 &rrr_esz
+@pd_pn_pm ........ esz:2 .. rm:4 ....... rn:4 . rd:4 &rrr_esz
+@rdn_rm ........ esz:2 ...... ...... rm:5 rd:5 \
+ &rrr_esz rn=%reg_movprfx
+@rdn_rm_e0 ........ .. ...... ...... rm:5 rd:5 \
+ &rrr_esz rn=%reg_movprfx esz=0
+@rdn_sh_i8u ........ esz:2 ...... ...... ..... rd:5 \
+ &rri_esz rn=%reg_movprfx imm=%sh8_i8u
+@rdn_i8u ........ esz:2 ...... ... imm:8 rd:5 \
+ &rri_esz rn=%reg_movprfx
+@rdn_i8s ........ esz:2 ...... ... imm:s8 rd:5 \
+ &rri_esz rn=%reg_movprfx
+
+# Four operand, vector element size
+@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \
+ &rrrr_esz ra=%reg_movprfx
+
+# Four operand with unused vector element size
+@rda_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 \
+ &rrrr_esz esz=0 ra=%reg_movprfx
+@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \
+ &rrrr_esz esz=0 rn=%reg_movprfx
+
+# Three operand with "memory" size, aka immediate left shift
+@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
+
+# Two register operand, with governing predicate, vector element size
+@rdn_pg_rm ........ esz:2 ... ... ... pg:3 rm:5 rd:5 \
+ &rprr_esz rn=%reg_movprfx
+@rdm_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 \
+ &rprr_esz rm=%reg_movprfx
+@rd_pg4_rn_rm ........ esz:2 . rm:5 .. pg:4 rn:5 rd:5 &rprr_esz
+@pd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 . rd:4 &rprr_esz
+
+# Three register operand, with governing predicate, vector element size
+@rda_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 \
+ &rprrr_esz ra=%reg_movprfx
+@rdn_pg_ra_rm ........ esz:2 . rm:5 ... pg:3 ra:5 rd:5 \
+ &rprrr_esz rn=%reg_movprfx
+@rdn_pg_rm_ra ........ esz:2 . ra:5 ... pg:3 rm:5 rd:5 \
+ &rprrr_esz rn=%reg_movprfx
+@rd_pg_rn_rm ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 &rprr_esz
+
+# One register operand, with governing predicate, vector element size
+@rd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
+@rd_pg4_pn ........ esz:2 ... ... .. pg:4 . rn:4 rd:5 &rpr_esz
+@pd_pg_rn ........ esz:2 ... ... ... pg:3 rn:5 . rd:4 &rpr_esz
+
+# One register operand, with governing predicate, no vector element size
+@rd_pg_rn_e0 ........ .. ... ... ... pg:3 rn:5 rd:5 &rpr_esz esz=0
+
+# Two register operands with a 6-bit signed immediate.
+@rd_rn_i6 ........ ... rn:5 ..... imm:s6 rd:5 &rri
+
+# Two register operand, one immediate operand, with predicate,
+# element size encoded as TSZHL.
+@rdn_pg_tszimm_shl ........ .. ... ... ... pg:3 ..... rd:5 \
+ &rpri_esz rn=%reg_movprfx esz=%tszimm_esz imm=%tszimm_shl
+@rdn_pg_tszimm_shr ........ .. ... ... ... pg:3 ..... rd:5 \
+ &rpri_esz rn=%reg_movprfx esz=%tszimm_esz imm=%tszimm_shr
+
+# Similarly without predicate.
+@rd_rn_tszimm_shl ........ .. ... ... ...... rn:5 rd:5 \
+ &rri_esz esz=%tszimm16_esz imm=%tszimm16_shl
+@rd_rn_tszimm_shr ........ .. ... ... ...... rn:5 rd:5 \
+ &rri_esz esz=%tszimm16_esz imm=%tszimm16_shr
+
+# Two register operand, one immediate operand, with 4-bit predicate.
+# User must fill in imm.
+@rdn_pg4 ........ esz:2 .. pg:4 ... ........ rd:5 \
+ &rpri_esz rn=%reg_movprfx
+
+# Two register operand, one one-bit floating-point operand.
+@rdn_i1 ........ esz:2 ......... pg:3 .... imm:1 rd:5 \
+ &rpri_esz rn=%reg_movprfx
+
+# Two register operand, one encoded bitmask.
+@rdn_dbm ........ .. .... dbm:13 rd:5 \
+ &rr_dbm rn=%reg_movprfx
+
+# Predicate output, vector and immediate input,
+# controlling predicate, element size.
+@pd_pg_rn_i7 ........ esz:2 . imm:7 . pg:3 rn:5 . rd:4 &rpri_esz
+@pd_pg_rn_i5 ........ esz:2 . imm:s5 ... pg:3 rn:5 . rd:4 &rpri_esz
+
+# Basic Load/Store with 9-bit immediate offset
+@pd_rn_i9 ........ ........ ...... rn:5 . rd:4 \
+ &rri imm=%imm9_16_10
+@rd_rn_i9 ........ ........ ...... rn:5 rd:5 \
+ &rri imm=%imm9_16_10
+
+# One register, pattern, and uint4+1.
+# User must fill in U and D.
+@incdec_cnt ........ esz:2 .. .... ...... pat:5 rd:5 \
+ &incdec_cnt imm=%imm4_16_p1
+@incdec2_cnt ........ esz:2 .. .... ...... pat:5 rd:5 \
+ &incdec2_cnt imm=%imm4_16_p1 rn=%reg_movprfx
+
+# One register, predicate.
+# User must fill in U and D.
+@incdec_pred ........ esz:2 .... .. ..... .. pg:4 rd:5 &incdec_pred
+@incdec2_pred ........ esz:2 .... .. ..... .. pg:4 rd:5 \
+ &incdec2_pred rn=%reg_movprfx
+
+# Loads; user must fill in NREG.
+@rprr_load_dt ....... dtype:4 rm:5 ... pg:3 rn:5 rd:5 &rprr_load
+@rpri_load_dt ....... dtype:4 . imm:s4 ... pg:3 rn:5 rd:5 &rpri_load
+
+@rprr_load_msz ....... .... rm:5 ... pg:3 rn:5 rd:5 \
+ &rprr_load dtype=%msz_dtype
+@rpri_load_msz ....... .... . imm:s4 ... pg:3 rn:5 rd:5 \
+ &rpri_load dtype=%msz_dtype
+
+# Gather Loads.
+@rprr_g_load_u ....... .. . . rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=2
+@rprr_g_load_xs_u ....... .. xs:1 . rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load
+@rprr_g_load_xs_u_sc ....... .. xs:1 scale:1 rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load
+@rprr_g_load_xs_sc ....... .. xs:1 scale:1 rm:5 . . ff:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load
+@rprr_g_load_u_sc ....... .. . scale:1 rm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=2
+@rprr_g_load_sc ....... .. . scale:1 rm:5 . . ff:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=2
+@rpri_g_load ....... msz:2 .. imm:5 . u:1 ff:1 pg:3 rn:5 rd:5 \
+ &rpri_gather_load
+
+# Stores; user must fill in ESZ, MSZ, NREG as needed.
+@rprr_store ....... .. .. rm:5 ... pg:3 rn:5 rd:5 &rprr_store
+@rpri_store_msz ....... msz:2 .. . imm:s4 ... pg:3 rn:5 rd:5 &rpri_store
+@rprr_store_esz_n0 ....... .. esz:2 rm:5 ... pg:3 rn:5 rd:5 \
+ &rprr_store nreg=0
+@rprr_scatter_store ....... msz:2 .. rm:5 ... pg:3 rn:5 rd:5 \
+ &rprr_scatter_store
+@rpri_scatter_store ....... msz:2 .. imm:5 ... pg:3 rn:5 rd:5 \
+ &rpri_scatter_store
+
+# Two registers and a scalar by N-bit index
+@rrx_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
+ &rrx_esz index=%index3_22_19
+@rrx_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 &rrx_esz
+@rrx_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 &rrx_esz
+
+# Two registers and a scalar by N-bit index, alternate
+@rrx_3a ........ .. . .. rm:3 ...... rn:5 rd:5 \
+ &rrx_esz index=%index3_19_11
+@rrx_2a ........ .. . . rm:4 ...... rn:5 rd:5 \
+ &rrx_esz index=%index2_20_11
+
+# Three registers and a scalar by N-bit index
+@rrxr_3 ........ .. . .. rm:3 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx index=%index3_22_19
+@rrxr_2 ........ .. . index:2 rm:3 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx
+@rrxr_1 ........ .. . index:1 rm:4 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx
+
+# Three registers and a scalar by N-bit index, alternate
+@rrxr_3a ........ .. ... rm:3 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx index=%index3_19_11
+@rrxr_2a ........ .. .. rm:4 ...... rn:5 rd:5 \
+ &rrxr_esz ra=%reg_movprfx index=%index2_20_11
+
+###########################################################################
+# Instruction patterns. Grouped according to the SVE encodingindex.xhtml.
+
+### SVE Integer Arithmetic - Binary Predicated Group
+
+# SVE bitwise logical vector operations (predicated)
+ORR_zpzz 00000100 .. 011 000 000 ... ..... ..... @rdn_pg_rm
+EOR_zpzz 00000100 .. 011 001 000 ... ..... ..... @rdn_pg_rm
+AND_zpzz 00000100 .. 011 010 000 ... ..... ..... @rdn_pg_rm
+BIC_zpzz 00000100 .. 011 011 000 ... ..... ..... @rdn_pg_rm
+
+# SVE integer add/subtract vectors (predicated)
+ADD_zpzz 00000100 .. 000 000 000 ... ..... ..... @rdn_pg_rm
+SUB_zpzz 00000100 .. 000 001 000 ... ..... ..... @rdn_pg_rm
+SUB_zpzz 00000100 .. 000 011 000 ... ..... ..... @rdm_pg_rn # SUBR
+
+# SVE integer min/max/difference (predicated)
+SMAX_zpzz 00000100 .. 001 000 000 ... ..... ..... @rdn_pg_rm
+UMAX_zpzz 00000100 .. 001 001 000 ... ..... ..... @rdn_pg_rm
+SMIN_zpzz 00000100 .. 001 010 000 ... ..... ..... @rdn_pg_rm
+UMIN_zpzz 00000100 .. 001 011 000 ... ..... ..... @rdn_pg_rm
+SABD_zpzz 00000100 .. 001 100 000 ... ..... ..... @rdn_pg_rm
+UABD_zpzz 00000100 .. 001 101 000 ... ..... ..... @rdn_pg_rm
+
+# SVE integer multiply/divide (predicated)
+MUL_zpzz 00000100 .. 010 000 000 ... ..... ..... @rdn_pg_rm
+SMULH_zpzz 00000100 .. 010 010 000 ... ..... ..... @rdn_pg_rm
+UMULH_zpzz 00000100 .. 010 011 000 ... ..... ..... @rdn_pg_rm
+# Note that divide requires size >= 2; below 2 is unallocated.
+SDIV_zpzz 00000100 .. 010 100 000 ... ..... ..... @rdn_pg_rm
+UDIV_zpzz 00000100 .. 010 101 000 ... ..... ..... @rdn_pg_rm
+SDIV_zpzz 00000100 .. 010 110 000 ... ..... ..... @rdm_pg_rn # SDIVR
+UDIV_zpzz 00000100 .. 010 111 000 ... ..... ..... @rdm_pg_rn # UDIVR
+
+### SVE Integer Reduction Group
+
+# SVE bitwise logical reduction (predicated)
+ORV 00000100 .. 011 000 001 ... ..... ..... @rd_pg_rn
+EORV 00000100 .. 011 001 001 ... ..... ..... @rd_pg_rn
+ANDV 00000100 .. 011 010 001 ... ..... ..... @rd_pg_rn
+
+# SVE constructive prefix (predicated)
+MOVPRFX_z 00000100 .. 010 000 001 ... ..... ..... @rd_pg_rn
+MOVPRFX_m 00000100 .. 010 001 001 ... ..... ..... @rd_pg_rn
+
+# SVE integer add reduction (predicated)
+# Note that saddv requires size != 3.
+UADDV 00000100 .. 000 001 001 ... ..... ..... @rd_pg_rn
+SADDV 00000100 .. 000 000 001 ... ..... ..... @rd_pg_rn
+
+# SVE integer min/max reduction (predicated)
+SMAXV 00000100 .. 001 000 001 ... ..... ..... @rd_pg_rn
+UMAXV 00000100 .. 001 001 001 ... ..... ..... @rd_pg_rn
+SMINV 00000100 .. 001 010 001 ... ..... ..... @rd_pg_rn
+UMINV 00000100 .. 001 011 001 ... ..... ..... @rd_pg_rn
+
+### SVE Shift by Immediate - Predicated Group
+
+# SVE bitwise shift by immediate (predicated)
+ASR_zpzi 00000100 .. 000 000 100 ... .. ... ..... @rdn_pg_tszimm_shr
+LSR_zpzi 00000100 .. 000 001 100 ... .. ... ..... @rdn_pg_tszimm_shr
+LSL_zpzi 00000100 .. 000 011 100 ... .. ... ..... @rdn_pg_tszimm_shl
+ASRD 00000100 .. 000 100 100 ... .. ... ..... @rdn_pg_tszimm_shr
+SQSHL_zpzi 00000100 .. 000 110 100 ... .. ... ..... @rdn_pg_tszimm_shl
+UQSHL_zpzi 00000100 .. 000 111 100 ... .. ... ..... @rdn_pg_tszimm_shl
+SRSHR 00000100 .. 001 100 100 ... .. ... ..... @rdn_pg_tszimm_shr
+URSHR 00000100 .. 001 101 100 ... .. ... ..... @rdn_pg_tszimm_shr
+SQSHLU 00000100 .. 001 111 100 ... .. ... ..... @rdn_pg_tszimm_shl
+
+# SVE bitwise shift by vector (predicated)
+ASR_zpzz 00000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm
+LSR_zpzz 00000100 .. 010 001 100 ... ..... ..... @rdn_pg_rm
+LSL_zpzz 00000100 .. 010 011 100 ... ..... ..... @rdn_pg_rm
+ASR_zpzz 00000100 .. 010 100 100 ... ..... ..... @rdm_pg_rn # ASRR
+LSR_zpzz 00000100 .. 010 101 100 ... ..... ..... @rdm_pg_rn # LSRR
+LSL_zpzz 00000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # LSLR
+
+# SVE bitwise shift by wide elements (predicated)
+# Note these require size != 3.
+ASR_zpzw 00000100 .. 011 000 100 ... ..... ..... @rdn_pg_rm
+LSR_zpzw 00000100 .. 011 001 100 ... ..... ..... @rdn_pg_rm
+LSL_zpzw 00000100 .. 011 011 100 ... ..... ..... @rdn_pg_rm
+
+### SVE Integer Arithmetic - Unary Predicated Group
+
+# SVE unary bit operations (predicated)
+# Note esz != 0 for FABS and FNEG.
+CLS 00000100 .. 011 000 101 ... ..... ..... @rd_pg_rn
+CLZ 00000100 .. 011 001 101 ... ..... ..... @rd_pg_rn
+CNT_zpz 00000100 .. 011 010 101 ... ..... ..... @rd_pg_rn
+CNOT 00000100 .. 011 011 101 ... ..... ..... @rd_pg_rn
+NOT_zpz 00000100 .. 011 110 101 ... ..... ..... @rd_pg_rn
+FABS 00000100 .. 011 100 101 ... ..... ..... @rd_pg_rn
+FNEG 00000100 .. 011 101 101 ... ..... ..... @rd_pg_rn
+
+# SVE integer unary operations (predicated)
+# Note esz > original size for extensions.
+ABS 00000100 .. 010 110 101 ... ..... ..... @rd_pg_rn
+NEG 00000100 .. 010 111 101 ... ..... ..... @rd_pg_rn
+SXTB 00000100 .. 010 000 101 ... ..... ..... @rd_pg_rn
+UXTB 00000100 .. 010 001 101 ... ..... ..... @rd_pg_rn
+SXTH 00000100 .. 010 010 101 ... ..... ..... @rd_pg_rn
+UXTH 00000100 .. 010 011 101 ... ..... ..... @rd_pg_rn
+SXTW 00000100 .. 010 100 101 ... ..... ..... @rd_pg_rn
+UXTW 00000100 .. 010 101 101 ... ..... ..... @rd_pg_rn
+
+### SVE Floating Point Compare - Vectors Group
+
+# SVE floating-point compare vectors
+FCMGE_ppzz 01100101 .. 0 ..... 010 ... ..... 0 .... @pd_pg_rn_rm
+FCMGT_ppzz 01100101 .. 0 ..... 010 ... ..... 1 .... @pd_pg_rn_rm
+FCMEQ_ppzz 01100101 .. 0 ..... 011 ... ..... 0 .... @pd_pg_rn_rm
+FCMNE_ppzz 01100101 .. 0 ..... 011 ... ..... 1 .... @pd_pg_rn_rm
+FCMUO_ppzz 01100101 .. 0 ..... 110 ... ..... 0 .... @pd_pg_rn_rm
+FACGE_ppzz 01100101 .. 0 ..... 110 ... ..... 1 .... @pd_pg_rn_rm
+FACGT_ppzz 01100101 .. 0 ..... 111 ... ..... 1 .... @pd_pg_rn_rm
+
+### SVE Integer Multiply-Add Group
+
+# SVE integer multiply-add writing addend (predicated)
+MLA 00000100 .. 0 ..... 010 ... ..... ..... @rda_pg_rn_rm
+MLS 00000100 .. 0 ..... 011 ... ..... ..... @rda_pg_rn_rm
+
+# SVE integer multiply-add writing multiplicand (predicated)
+MLA 00000100 .. 0 ..... 110 ... ..... ..... @rdn_pg_ra_rm # MAD
+MLS 00000100 .. 0 ..... 111 ... ..... ..... @rdn_pg_ra_rm # MSB
+
+### SVE Integer Arithmetic - Unpredicated Group
+
+# SVE integer add/subtract vectors (unpredicated)
+ADD_zzz 00000100 .. 1 ..... 000 000 ..... ..... @rd_rn_rm
+SUB_zzz 00000100 .. 1 ..... 000 001 ..... ..... @rd_rn_rm
+SQADD_zzz 00000100 .. 1 ..... 000 100 ..... ..... @rd_rn_rm
+UQADD_zzz 00000100 .. 1 ..... 000 101 ..... ..... @rd_rn_rm
+SQSUB_zzz 00000100 .. 1 ..... 000 110 ..... ..... @rd_rn_rm
+UQSUB_zzz 00000100 .. 1 ..... 000 111 ..... ..... @rd_rn_rm
+
+### SVE Logical - Unpredicated Group
+
+# SVE bitwise logical operations (unpredicated)
+AND_zzz 00000100 00 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
+ORR_zzz 00000100 01 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
+EOR_zzz 00000100 10 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
+BIC_zzz 00000100 11 1 ..... 001 100 ..... ..... @rd_rn_rm_e0
+
+XAR 00000100 .. 1 ..... 001 101 rm:5 rd:5 &rrri_esz \
+ rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr
+
+# SVE2 bitwise ternary operations
+EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
+BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
+BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+
+### SVE Index Generation Group
+
+# SVE index generation (immediate start, immediate increment)
+INDEX_ii 00000100 esz:2 1 imm2:s5 010000 imm1:s5 rd:5
+
+# SVE index generation (immediate start, register increment)
+INDEX_ir 00000100 esz:2 1 rm:5 010010 imm:s5 rd:5
+
+# SVE index generation (register start, immediate increment)
+INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
+
+# SVE index generation (register start, register increment)
+INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
+
+### SVE Stack Allocation Group
+
+# SVE stack frame adjustment
+ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
+ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
+
+# SVE stack frame size
+RDVL 00000100 101 11111 01010 imm:s6 rd:5
+
+### SVE Bitwise Shift - Unpredicated Group
+
+# SVE bitwise shift by immediate (unpredicated)
+ASR_zzi 00000100 .. 1 ..... 1001 00 ..... ..... @rd_rn_tszimm_shr
+LSR_zzi 00000100 .. 1 ..... 1001 01 ..... ..... @rd_rn_tszimm_shr
+LSL_zzi 00000100 .. 1 ..... 1001 11 ..... ..... @rd_rn_tszimm_shl
+
+# SVE bitwise shift by wide elements (unpredicated)
+# Note esz != 3
+ASR_zzw 00000100 .. 1 ..... 1000 00 ..... ..... @rd_rn_rm
+LSR_zzw 00000100 .. 1 ..... 1000 01 ..... ..... @rd_rn_rm
+LSL_zzw 00000100 .. 1 ..... 1000 11 ..... ..... @rd_rn_rm
+
+### SVE Compute Vector Address Group
+
+# SVE vector address generation
+ADR_s32 00000100 00 1 ..... 1010 .. ..... ..... @rd_rn_msz_rm
+ADR_u32 00000100 01 1 ..... 1010 .. ..... ..... @rd_rn_msz_rm
+ADR_p32 00000100 10 1 ..... 1010 .. ..... ..... @rd_rn_msz_rm
+ADR_p64 00000100 11 1 ..... 1010 .. ..... ..... @rd_rn_msz_rm
+
+### SVE Integer Misc - Unpredicated Group
+
+# SVE constructive prefix (unpredicated)
+MOVPRFX 00000100 00 1 00000 101111 rn:5 rd:5
+
+# SVE floating-point exponential accelerator
+# Note esz != 0
+FEXPA 00000100 .. 1 00000 101110 ..... ..... @rd_rn
+
+# SVE floating-point trig select coefficient
+# Note esz != 0
+FTSSEL 00000100 .. 1 ..... 101100 ..... ..... @rd_rn_rm
+
+### SVE Element Count Group
+
+# SVE element count
+CNT_r 00000100 .. 10 .... 1110 0 0 ..... ..... @incdec_cnt d=0 u=1
+
+# SVE inc/dec register by element count
+INCDEC_r 00000100 .. 11 .... 1110 0 d:1 ..... ..... @incdec_cnt u=1
+
+# SVE saturating inc/dec register by element count
+SINCDEC_r_32 00000100 .. 10 .... 1111 d:1 u:1 ..... ..... @incdec_cnt
+SINCDEC_r_64 00000100 .. 11 .... 1111 d:1 u:1 ..... ..... @incdec_cnt
+
+# SVE inc/dec vector by element count
+# Note this requires esz != 0.
+INCDEC_v 00000100 .. 1 1 .... 1100 0 d:1 ..... ..... @incdec2_cnt u=1
+
+# SVE saturating inc/dec vector by element count
+# Note these require esz != 0.
+SINCDEC_v 00000100 .. 1 0 .... 1100 d:1 u:1 ..... ..... @incdec2_cnt
+
+### SVE Bitwise Immediate Group
+
+# SVE bitwise logical with immediate (unpredicated)
+ORR_zzi 00000101 00 0000 ............. ..... @rdn_dbm
+EOR_zzi 00000101 01 0000 ............. ..... @rdn_dbm
+AND_zzi 00000101 10 0000 ............. ..... @rdn_dbm
+
+# SVE broadcast bitmask immediate
+DUPM 00000101 11 0000 dbm:13 rd:5
+
+### SVE Integer Wide Immediate - Predicated Group
+
+# SVE copy floating-point immediate (predicated)
+FCPY 00000101 .. 01 .... 110 imm:8 ..... @rdn_pg4
+
+# SVE copy integer immediate (predicated)
+CPY_m_i 00000101 .. 01 .... 01 . ........ ..... @rdn_pg4 imm=%sh8_i8s
+CPY_z_i 00000101 .. 01 .... 00 . ........ ..... @rdn_pg4 imm=%sh8_i8s
+
+### SVE Permute - Extract Group
+
+# SVE extract vector (destructive)
+EXT 00000101 001 ..... 000 ... rm:5 rd:5 \
+ &rrri rn=%reg_movprfx imm=%imm8_16_10
+
+# SVE2 extract vector (constructive)
+EXT_sve2 00000101 011 ..... 000 ... rn:5 rd:5 \
+ &rri imm=%imm8_16_10
+
+### SVE Permute - Unpredicated Group
+
+# SVE broadcast general register
+DUP_s 00000101 .. 1 00000 001110 ..... ..... @rd_rn
+
+# SVE broadcast indexed element
+DUP_x 00000101 .. 1 ..... 001000 rn:5 rd:5 \
+ &rri imm=%imm7_22_16
+
+# SVE insert SIMD&FP scalar register
+INSR_f 00000101 .. 1 10100 001110 ..... ..... @rdn_rm
+
+# SVE insert general register
+INSR_r 00000101 .. 1 00100 001110 ..... ..... @rdn_rm
+
+# SVE reverse vector elements
+REV_v 00000101 .. 1 11000 001110 ..... ..... @rd_rn
+
+# SVE vector table lookup
+TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm
+
+# SVE unpack vector elements
+UNPK 00000101 esz:2 1100 u:1 h:1 001110 rn:5 rd:5
+
+# SVE2 Table Lookup (three sources)
+
+TBL_sve2 00000101 .. 1 ..... 001010 ..... ..... @rd_rn_rm
+TBX 00000101 .. 1 ..... 001011 ..... ..... @rd_rn_rm
+
+### SVE Permute - Predicates Group
+
+# SVE permute predicate elements
+ZIP1_p 00000101 .. 10 .... 010 000 0 .... 0 .... @pd_pn_pm
+ZIP2_p 00000101 .. 10 .... 010 001 0 .... 0 .... @pd_pn_pm
+UZP1_p 00000101 .. 10 .... 010 010 0 .... 0 .... @pd_pn_pm
+UZP2_p 00000101 .. 10 .... 010 011 0 .... 0 .... @pd_pn_pm
+TRN1_p 00000101 .. 10 .... 010 100 0 .... 0 .... @pd_pn_pm
+TRN2_p 00000101 .. 10 .... 010 101 0 .... 0 .... @pd_pn_pm
+
+# SVE reverse predicate elements
+REV_p 00000101 .. 11 0100 010 000 0 .... 0 .... @pd_pn
+
+# SVE unpack predicate elements
+PUNPKLO 00000101 00 11 0000 010 000 0 .... 0 .... @pd_pn_e0
+PUNPKHI 00000101 00 11 0001 010 000 0 .... 0 .... @pd_pn_e0
+
+### SVE Permute - Interleaving Group
+
+# SVE permute vector elements
+ZIP1_z 00000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
+ZIP2_z 00000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
+UZP1_z 00000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
+UZP2_z 00000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
+TRN1_z 00000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
+TRN2_z 00000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
+
+# SVE2 permute vector segments
+ZIP1_q 00000101 10 1 ..... 000 000 ..... ..... @rd_rn_rm_e0
+ZIP2_q 00000101 10 1 ..... 000 001 ..... ..... @rd_rn_rm_e0
+UZP1_q 00000101 10 1 ..... 000 010 ..... ..... @rd_rn_rm_e0
+UZP2_q 00000101 10 1 ..... 000 011 ..... ..... @rd_rn_rm_e0
+TRN1_q 00000101 10 1 ..... 000 110 ..... ..... @rd_rn_rm_e0
+TRN2_q 00000101 10 1 ..... 000 111 ..... ..... @rd_rn_rm_e0
+
+### SVE Permute - Predicated Group
+
+# SVE compress active elements
+# Note esz >= 2
+COMPACT 00000101 .. 100001 100 ... ..... ..... @rd_pg_rn
+
+# SVE conditionally broadcast element to vector
+CLASTA_z 00000101 .. 10100 0 100 ... ..... ..... @rdn_pg_rm
+CLASTB_z 00000101 .. 10100 1 100 ... ..... ..... @rdn_pg_rm
+
+# SVE conditionally copy element to SIMD&FP scalar
+CLASTA_v 00000101 .. 10101 0 100 ... ..... ..... @rd_pg_rn
+CLASTB_v 00000101 .. 10101 1 100 ... ..... ..... @rd_pg_rn
+
+# SVE conditionally copy element to general register
+CLASTA_r 00000101 .. 11000 0 101 ... ..... ..... @rd_pg_rn
+CLASTB_r 00000101 .. 11000 1 101 ... ..... ..... @rd_pg_rn
+
+# SVE copy element to SIMD&FP scalar register
+LASTA_v 00000101 .. 10001 0 100 ... ..... ..... @rd_pg_rn
+LASTB_v 00000101 .. 10001 1 100 ... ..... ..... @rd_pg_rn
+
+# SVE copy element to general register
+LASTA_r 00000101 .. 10000 0 101 ... ..... ..... @rd_pg_rn
+LASTB_r 00000101 .. 10000 1 101 ... ..... ..... @rd_pg_rn
+
+# SVE copy element from SIMD&FP scalar register
+CPY_m_v 00000101 .. 100000 100 ... ..... ..... @rd_pg_rn
+
+# SVE copy element from general register to vector (predicated)
+CPY_m_r 00000101 .. 101000 101 ... ..... ..... @rd_pg_rn
+
+# SVE reverse within elements
+# Note esz >= operation size
+REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
+REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
+REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
+RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
+
+# SVE vector splice (predicated, destructive)
+SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
+
+# SVE2 vector splice (predicated, constructive)
+SPLICE_sve2 00000101 .. 101 101 100 ... ..... ..... @rd_pg_rn
+
+### SVE Select Vectors Group
+
+# SVE select vector elements (predicated)
+SEL_zpzz 00000101 .. 1 ..... 11 .... ..... ..... @rd_pg4_rn_rm
+
+### SVE Integer Compare - Vectors Group
+
+# SVE integer compare_vectors
+CMPHS_ppzz 00100100 .. 0 ..... 000 ... ..... 0 .... @pd_pg_rn_rm
+CMPHI_ppzz 00100100 .. 0 ..... 000 ... ..... 1 .... @pd_pg_rn_rm
+CMPGE_ppzz 00100100 .. 0 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
+CMPGT_ppzz 00100100 .. 0 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
+CMPEQ_ppzz 00100100 .. 0 ..... 101 ... ..... 0 .... @pd_pg_rn_rm
+CMPNE_ppzz 00100100 .. 0 ..... 101 ... ..... 1 .... @pd_pg_rn_rm
+
+# SVE integer compare with wide elements
+# Note these require esz != 3.
+CMPEQ_ppzw 00100100 .. 0 ..... 001 ... ..... 0 .... @pd_pg_rn_rm
+CMPNE_ppzw 00100100 .. 0 ..... 001 ... ..... 1 .... @pd_pg_rn_rm
+CMPGE_ppzw 00100100 .. 0 ..... 010 ... ..... 0 .... @pd_pg_rn_rm
+CMPGT_ppzw 00100100 .. 0 ..... 010 ... ..... 1 .... @pd_pg_rn_rm
+CMPLT_ppzw 00100100 .. 0 ..... 011 ... ..... 0 .... @pd_pg_rn_rm
+CMPLE_ppzw 00100100 .. 0 ..... 011 ... ..... 1 .... @pd_pg_rn_rm
+CMPHS_ppzw 00100100 .. 0 ..... 110 ... ..... 0 .... @pd_pg_rn_rm
+CMPHI_ppzw 00100100 .. 0 ..... 110 ... ..... 1 .... @pd_pg_rn_rm
+CMPLO_ppzw 00100100 .. 0 ..... 111 ... ..... 0 .... @pd_pg_rn_rm
+CMPLS_ppzw 00100100 .. 0 ..... 111 ... ..... 1 .... @pd_pg_rn_rm
+
+### SVE Integer Compare - Unsigned Immediate Group
+
+# SVE integer compare with unsigned immediate
+CMPHS_ppzi 00100100 .. 1 ....... 0 ... ..... 0 .... @pd_pg_rn_i7
+CMPHI_ppzi 00100100 .. 1 ....... 0 ... ..... 1 .... @pd_pg_rn_i7
+CMPLO_ppzi 00100100 .. 1 ....... 1 ... ..... 0 .... @pd_pg_rn_i7
+CMPLS_ppzi 00100100 .. 1 ....... 1 ... ..... 1 .... @pd_pg_rn_i7
+
+### SVE Integer Compare - Signed Immediate Group
+
+# SVE integer compare with signed immediate
+CMPGE_ppzi 00100101 .. 0 ..... 000 ... ..... 0 .... @pd_pg_rn_i5
+CMPGT_ppzi 00100101 .. 0 ..... 000 ... ..... 1 .... @pd_pg_rn_i5
+CMPLT_ppzi 00100101 .. 0 ..... 001 ... ..... 0 .... @pd_pg_rn_i5
+CMPLE_ppzi 00100101 .. 0 ..... 001 ... ..... 1 .... @pd_pg_rn_i5
+CMPEQ_ppzi 00100101 .. 0 ..... 100 ... ..... 0 .... @pd_pg_rn_i5
+CMPNE_ppzi 00100101 .. 0 ..... 100 ... ..... 1 .... @pd_pg_rn_i5
+
+### SVE Predicate Logical Operations Group
+
+# SVE predicate logical operations
+AND_pppp 00100101 0. 00 .... 01 .... 0 .... 0 .... @pd_pg_pn_pm_s
+BIC_pppp 00100101 0. 00 .... 01 .... 0 .... 1 .... @pd_pg_pn_pm_s
+EOR_pppp 00100101 0. 00 .... 01 .... 1 .... 0 .... @pd_pg_pn_pm_s
+SEL_pppp 00100101 0. 00 .... 01 .... 1 .... 1 .... @pd_pg_pn_pm_s
+ORR_pppp 00100101 1. 00 .... 01 .... 0 .... 0 .... @pd_pg_pn_pm_s
+ORN_pppp 00100101 1. 00 .... 01 .... 0 .... 1 .... @pd_pg_pn_pm_s
+NOR_pppp 00100101 1. 00 .... 01 .... 1 .... 0 .... @pd_pg_pn_pm_s
+NAND_pppp 00100101 1. 00 .... 01 .... 1 .... 1 .... @pd_pg_pn_pm_s
+
+### SVE Predicate Misc Group
+
+# SVE predicate test
+PTEST 00100101 01 010000 11 pg:4 0 rn:4 0 0000
+
+# SVE predicate initialize
+PTRUE 00100101 esz:2 01100 s:1 111000 pat:5 0 rd:4
+
+# SVE initialize FFR
+SETFFR 00100101 0010 1100 1001 0000 0000 0000
+
+# SVE zero predicate register
+PFALSE 00100101 0001 1000 1110 0100 0000 rd:4
+
+# SVE predicate read from FFR (predicated)
+RDFFR_p 00100101 0 s:1 0110001111000 pg:4 0 rd:4
+
+# SVE predicate read from FFR (unpredicated)
+RDFFR 00100101 0001 1001 1111 0000 0000 rd:4
+
+# SVE FFR write from predicate (WRFFR)
+WRFFR 00100101 0010 1000 1001 000 rn:4 00000
+
+# SVE predicate first active
+PFIRST 00100101 01 011 000 11000 00 .... 0 .... @pd_pn_e0
+
+# SVE predicate next active
+PNEXT 00100101 .. 011 001 11000 10 .... 0 .... @pd_pn
+
+### SVE Partition Break Group
+
+# SVE propagate break from previous partition
+BRKPA 00100101 0. 00 .... 11 .... 0 .... 0 .... @pd_pg_pn_pm_s
+BRKPB 00100101 0. 00 .... 11 .... 0 .... 1 .... @pd_pg_pn_pm_s
+
+# SVE partition break condition
+BRKA_z 00100101 0. 01000001 .... 0 .... 0 .... @pd_pg_pn_s
+BRKB_z 00100101 1. 01000001 .... 0 .... 0 .... @pd_pg_pn_s
+BRKA_m 00100101 00 01000001 .... 0 .... 1 .... @pd_pg_pn_s0
+BRKB_m 00100101 10 01000001 .... 0 .... 1 .... @pd_pg_pn_s0
+
+# SVE propagate break to next partition
+BRKN 00100101 0. 01100001 .... 0 .... 0 .... @pd_pg_pn_s
+
+### SVE Predicate Count Group
+
+# SVE predicate count
+CNTP 00100101 .. 100 000 10 .... 0 .... ..... @rd_pg4_pn
+
+# SVE inc/dec register by predicate count
+INCDECP_r 00100101 .. 10110 d:1 10001 00 .... ..... @incdec_pred u=1
+
+# SVE inc/dec vector by predicate count
+INCDECP_z 00100101 .. 10110 d:1 10000 00 .... ..... @incdec2_pred u=1
+
+# SVE saturating inc/dec register by predicate count
+SINCDECP_r_32 00100101 .. 1010 d:1 u:1 10001 00 .... ..... @incdec_pred
+SINCDECP_r_64 00100101 .. 1010 d:1 u:1 10001 10 .... ..... @incdec_pred
+
+# SVE saturating inc/dec vector by predicate count
+SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred
+
+### SVE Integer Compare - Scalars Group
+
+# SVE conditionally terminate scalars
+CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000
+
+# SVE integer compare scalar count and limit
+WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4
+
+# SVE2 pointer conflict compare
+WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4
+
+### SVE Integer Wide Immediate - Unpredicated Group
+
+# SVE broadcast floating-point immediate (unpredicated)
+FDUP 00100101 esz:2 111 00 1110 imm:8 rd:5
+
+# SVE broadcast integer immediate (unpredicated)
+DUP_i 00100101 esz:2 111 00 011 . ........ rd:5 imm=%sh8_i8s
+
+# SVE integer add/subtract immediate (unpredicated)
+ADD_zzi 00100101 .. 100 000 11 . ........ ..... @rdn_sh_i8u
+SUB_zzi 00100101 .. 100 001 11 . ........ ..... @rdn_sh_i8u
+SUBR_zzi 00100101 .. 100 011 11 . ........ ..... @rdn_sh_i8u
+SQADD_zzi 00100101 .. 100 100 11 . ........ ..... @rdn_sh_i8u
+UQADD_zzi 00100101 .. 100 101 11 . ........ ..... @rdn_sh_i8u
+SQSUB_zzi 00100101 .. 100 110 11 . ........ ..... @rdn_sh_i8u
+UQSUB_zzi 00100101 .. 100 111 11 . ........ ..... @rdn_sh_i8u
+
+# SVE integer min/max immediate (unpredicated)
+SMAX_zzi 00100101 .. 101 000 110 ........ ..... @rdn_i8s
+UMAX_zzi 00100101 .. 101 001 110 ........ ..... @rdn_i8u
+SMIN_zzi 00100101 .. 101 010 110 ........ ..... @rdn_i8s
+UMIN_zzi 00100101 .. 101 011 110 ........ ..... @rdn_i8u
+
+# SVE integer multiply immediate (unpredicated)
+MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
+
+# SVE integer dot product (unpredicated)
+DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 complex dot product (vectors)
+CDOT_zzzz 01000100 esz:2 0 rm:5 0001 rot:2 rn:5 rd:5 ra=%reg_movprfx
+
+#### SVE Multiply - Indexed
+
+# SVE integer dot product (indexed)
+SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
+SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
+UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
+UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
+
+# SVE2 integer multiply-add (indexed)
+MLA_zzxz_h 01000100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=1
+MLA_zzxz_s 01000100 10 1 ..... 000010 ..... ..... @rrxr_2 esz=2
+MLA_zzxz_d 01000100 11 1 ..... 000010 ..... ..... @rrxr_1 esz=3
+MLS_zzxz_h 01000100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=1
+MLS_zzxz_s 01000100 10 1 ..... 000011 ..... ..... @rrxr_2 esz=2
+MLS_zzxz_d 01000100 11 1 ..... 000011 ..... ..... @rrxr_1 esz=3
+
+# SVE2 saturating multiply-add high (indexed)
+SQRDMLAH_zzxz_h 01000100 0. 1 ..... 000100 ..... ..... @rrxr_3 esz=1
+SQRDMLAH_zzxz_s 01000100 10 1 ..... 000100 ..... ..... @rrxr_2 esz=2
+SQRDMLAH_zzxz_d 01000100 11 1 ..... 000100 ..... ..... @rrxr_1 esz=3
+SQRDMLSH_zzxz_h 01000100 0. 1 ..... 000101 ..... ..... @rrxr_3 esz=1
+SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2
+SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3
+
+# SVE mixed sign dot product (indexed)
+USDOT_zzxw_s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2
+SUDOT_zzxw_s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2
+
+# SVE2 saturating multiply-add (indexed)
+SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2
+SQDMLALB_zzxw_d 01000100 11 1 ..... 0010.0 ..... ..... @rrxr_2a esz=3
+SQDMLALT_zzxw_s 01000100 10 1 ..... 0010.1 ..... ..... @rrxr_3a esz=2
+SQDMLALT_zzxw_d 01000100 11 1 ..... 0010.1 ..... ..... @rrxr_2a esz=3
+SQDMLSLB_zzxw_s 01000100 10 1 ..... 0011.0 ..... ..... @rrxr_3a esz=2
+SQDMLSLB_zzxw_d 01000100 11 1 ..... 0011.0 ..... ..... @rrxr_2a esz=3
+SQDMLSLT_zzxw_s 01000100 10 1 ..... 0011.1 ..... ..... @rrxr_3a esz=2
+SQDMLSLT_zzxw_d 01000100 11 1 ..... 0011.1 ..... ..... @rrxr_2a esz=3
+
+# SVE2 complex integer dot product (indexed)
+CDOT_zzxw_s 01000100 10 1 index:2 rm:3 0100 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+CDOT_zzxw_d 01000100 11 1 index:1 rm:4 0100 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 complex integer multiply-add (indexed)
+CMLA_zzxz_h 01000100 10 1 index:2 rm:3 0110 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+CMLA_zzxz_s 01000100 11 1 index:1 rm:4 0110 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 complex saturating integer multiply-add (indexed)
+SQRDCMLAH_zzxz_h 01000100 10 1 index:2 rm:3 0111 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+SQRDCMLAH_zzxz_s 01000100 11 1 index:1 rm:4 0111 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE2 multiply-add long (indexed)
+SMLALB_zzxw_s 01000100 10 1 ..... 1000.0 ..... ..... @rrxr_3a esz=2
+SMLALB_zzxw_d 01000100 11 1 ..... 1000.0 ..... ..... @rrxr_2a esz=3
+SMLALT_zzxw_s 01000100 10 1 ..... 1000.1 ..... ..... @rrxr_3a esz=2
+SMLALT_zzxw_d 01000100 11 1 ..... 1000.1 ..... ..... @rrxr_2a esz=3
+UMLALB_zzxw_s 01000100 10 1 ..... 1001.0 ..... ..... @rrxr_3a esz=2
+UMLALB_zzxw_d 01000100 11 1 ..... 1001.0 ..... ..... @rrxr_2a esz=3
+UMLALT_zzxw_s 01000100 10 1 ..... 1001.1 ..... ..... @rrxr_3a esz=2
+UMLALT_zzxw_d 01000100 11 1 ..... 1001.1 ..... ..... @rrxr_2a esz=3
+SMLSLB_zzxw_s 01000100 10 1 ..... 1010.0 ..... ..... @rrxr_3a esz=2
+SMLSLB_zzxw_d 01000100 11 1 ..... 1010.0 ..... ..... @rrxr_2a esz=3
+SMLSLT_zzxw_s 01000100 10 1 ..... 1010.1 ..... ..... @rrxr_3a esz=2
+SMLSLT_zzxw_d 01000100 11 1 ..... 1010.1 ..... ..... @rrxr_2a esz=3
+UMLSLB_zzxw_s 01000100 10 1 ..... 1011.0 ..... ..... @rrxr_3a esz=2
+UMLSLB_zzxw_d 01000100 11 1 ..... 1011.0 ..... ..... @rrxr_2a esz=3
+UMLSLT_zzxw_s 01000100 10 1 ..... 1011.1 ..... ..... @rrxr_3a esz=2
+UMLSLT_zzxw_d 01000100 11 1 ..... 1011.1 ..... ..... @rrxr_2a esz=3
+
+# SVE2 integer multiply long (indexed)
+SMULLB_zzx_s 01000100 10 1 ..... 1100.0 ..... ..... @rrx_3a esz=2
+SMULLB_zzx_d 01000100 11 1 ..... 1100.0 ..... ..... @rrx_2a esz=3
+SMULLT_zzx_s 01000100 10 1 ..... 1100.1 ..... ..... @rrx_3a esz=2
+SMULLT_zzx_d 01000100 11 1 ..... 1100.1 ..... ..... @rrx_2a esz=3
+UMULLB_zzx_s 01000100 10 1 ..... 1101.0 ..... ..... @rrx_3a esz=2
+UMULLB_zzx_d 01000100 11 1 ..... 1101.0 ..... ..... @rrx_2a esz=3
+UMULLT_zzx_s 01000100 10 1 ..... 1101.1 ..... ..... @rrx_3a esz=2
+UMULLT_zzx_d 01000100 11 1 ..... 1101.1 ..... ..... @rrx_2a esz=3
+
+# SVE2 saturating multiply (indexed)
+SQDMULLB_zzx_s 01000100 10 1 ..... 1110.0 ..... ..... @rrx_3a esz=2
+SQDMULLB_zzx_d 01000100 11 1 ..... 1110.0 ..... ..... @rrx_2a esz=3
+SQDMULLT_zzx_s 01000100 10 1 ..... 1110.1 ..... ..... @rrx_3a esz=2
+SQDMULLT_zzx_d 01000100 11 1 ..... 1110.1 ..... ..... @rrx_2a esz=3
+
+# SVE2 saturating multiply high (indexed)
+SQDMULH_zzx_h 01000100 0. 1 ..... 111100 ..... ..... @rrx_3 esz=1
+SQDMULH_zzx_s 01000100 10 1 ..... 111100 ..... ..... @rrx_2 esz=2
+SQDMULH_zzx_d 01000100 11 1 ..... 111100 ..... ..... @rrx_1 esz=3
+SQRDMULH_zzx_h 01000100 0. 1 ..... 111101 ..... ..... @rrx_3 esz=1
+SQRDMULH_zzx_s 01000100 10 1 ..... 111101 ..... ..... @rrx_2 esz=2
+SQRDMULH_zzx_d 01000100 11 1 ..... 111101 ..... ..... @rrx_1 esz=3
+
+# SVE2 integer multiply (indexed)
+MUL_zzx_h 01000100 0. 1 ..... 111110 ..... ..... @rrx_3 esz=1
+MUL_zzx_s 01000100 10 1 ..... 111110 ..... ..... @rrx_2 esz=2
+MUL_zzx_d 01000100 11 1 ..... 111110 ..... ..... @rrx_1 esz=3
+
+# SVE floating-point complex add (predicated)
+FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
+ rn=%reg_movprfx
+
+# SVE floating-point complex multiply-add (predicated)
+FCMLA_zpzzz 01100100 esz:2 0 rm:5 0 rot:2 pg:3 rn:5 rd:5 \
+ ra=%reg_movprfx
+
+# SVE floating-point complex multiply-add (indexed)
+FCMLA_zzxz 01100100 10 1 index:2 rm:3 0001 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx esz=1
+FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \
+ ra=%reg_movprfx esz=2
+
+### SVE FP Multiply-Add Indexed Group
+
+# SVE floating-point multiply-add (indexed)
+FMLA_zzxz 01100100 0. 1 ..... 000000 ..... ..... @rrxr_3 esz=1
+FMLA_zzxz 01100100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
+FMLA_zzxz 01100100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
+FMLS_zzxz 01100100 0. 1 ..... 000001 ..... ..... @rrxr_3 esz=1
+FMLS_zzxz 01100100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
+FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
+
+### SVE FP Multiply Indexed Group
+
+# SVE floating-point multiply (indexed)
+FMUL_zzx 01100100 0. 1 ..... 001000 ..... ..... @rrx_3 esz=1
+FMUL_zzx 01100100 10 1 ..... 001000 ..... ..... @rrx_2 esz=2
+FMUL_zzx 01100100 11 1 ..... 001000 ..... ..... @rrx_1 esz=3
+
+### SVE FP Fast Reduction Group
+
+FADDV 01100101 .. 000 000 001 ... ..... ..... @rd_pg_rn
+FMAXNMV 01100101 .. 000 100 001 ... ..... ..... @rd_pg_rn
+FMINNMV 01100101 .. 000 101 001 ... ..... ..... @rd_pg_rn
+FMAXV 01100101 .. 000 110 001 ... ..... ..... @rd_pg_rn
+FMINV 01100101 .. 000 111 001 ... ..... ..... @rd_pg_rn
+
+## SVE Floating Point Unary Operations - Unpredicated Group
+
+FRECPE 01100101 .. 001 110 001100 ..... ..... @rd_rn
+FRSQRTE 01100101 .. 001 111 001100 ..... ..... @rd_rn
+
+### SVE FP Compare with Zero Group
+
+FCMGE_ppz0 01100101 .. 0100 00 001 ... ..... 0 .... @pd_pg_rn
+FCMGT_ppz0 01100101 .. 0100 00 001 ... ..... 1 .... @pd_pg_rn
+FCMLT_ppz0 01100101 .. 0100 01 001 ... ..... 0 .... @pd_pg_rn
+FCMLE_ppz0 01100101 .. 0100 01 001 ... ..... 1 .... @pd_pg_rn
+FCMEQ_ppz0 01100101 .. 0100 10 001 ... ..... 0 .... @pd_pg_rn
+FCMNE_ppz0 01100101 .. 0100 11 001 ... ..... 0 .... @pd_pg_rn
+
+### SVE FP Accumulating Reduction Group
+
+# SVE floating-point serial reduction (predicated)
+FADDA 01100101 .. 011 000 001 ... ..... ..... @rdn_pg_rm
+
+### SVE Floating Point Arithmetic - Unpredicated Group
+
+# SVE floating-point arithmetic (unpredicated)
+FADD_zzz 01100101 .. 0 ..... 000 000 ..... ..... @rd_rn_rm
+FSUB_zzz 01100101 .. 0 ..... 000 001 ..... ..... @rd_rn_rm
+FMUL_zzz 01100101 .. 0 ..... 000 010 ..... ..... @rd_rn_rm
+FTSMUL 01100101 .. 0 ..... 000 011 ..... ..... @rd_rn_rm
+FRECPS 01100101 .. 0 ..... 000 110 ..... ..... @rd_rn_rm
+FRSQRTS 01100101 .. 0 ..... 000 111 ..... ..... @rd_rn_rm
+
+### SVE FP Arithmetic Predicated Group
+
+# SVE floating-point arithmetic (predicated)
+FADD_zpzz 01100101 .. 00 0000 100 ... ..... ..... @rdn_pg_rm
+FSUB_zpzz 01100101 .. 00 0001 100 ... ..... ..... @rdn_pg_rm
+FMUL_zpzz 01100101 .. 00 0010 100 ... ..... ..... @rdn_pg_rm
+FSUB_zpzz 01100101 .. 00 0011 100 ... ..... ..... @rdm_pg_rn # FSUBR
+FMAXNM_zpzz 01100101 .. 00 0100 100 ... ..... ..... @rdn_pg_rm
+FMINNM_zpzz 01100101 .. 00 0101 100 ... ..... ..... @rdn_pg_rm
+FMAX_zpzz 01100101 .. 00 0110 100 ... ..... ..... @rdn_pg_rm
+FMIN_zpzz 01100101 .. 00 0111 100 ... ..... ..... @rdn_pg_rm
+FABD 01100101 .. 00 1000 100 ... ..... ..... @rdn_pg_rm
+FSCALE 01100101 .. 00 1001 100 ... ..... ..... @rdn_pg_rm
+FMULX 01100101 .. 00 1010 100 ... ..... ..... @rdn_pg_rm
+FDIV 01100101 .. 00 1100 100 ... ..... ..... @rdm_pg_rn # FDIVR
+FDIV 01100101 .. 00 1101 100 ... ..... ..... @rdn_pg_rm
+
+# SVE floating-point arithmetic with immediate (predicated)
+FADD_zpzi 01100101 .. 011 000 100 ... 0000 . ..... @rdn_i1
+FSUB_zpzi 01100101 .. 011 001 100 ... 0000 . ..... @rdn_i1
+FMUL_zpzi 01100101 .. 011 010 100 ... 0000 . ..... @rdn_i1
+FSUBR_zpzi 01100101 .. 011 011 100 ... 0000 . ..... @rdn_i1
+FMAXNM_zpzi 01100101 .. 011 100 100 ... 0000 . ..... @rdn_i1
+FMINNM_zpzi 01100101 .. 011 101 100 ... 0000 . ..... @rdn_i1
+FMAX_zpzi 01100101 .. 011 110 100 ... 0000 . ..... @rdn_i1
+FMIN_zpzi 01100101 .. 011 111 100 ... 0000 . ..... @rdn_i1
+
+# SVE floating-point trig multiply-add coefficient
+FTMAD 01100101 esz:2 010 imm:3 100000 rm:5 rd:5 rn=%reg_movprfx
+
+### SVE FP Multiply-Add Group
+
+# SVE floating-point multiply-accumulate writing addend
+FMLA_zpzzz 01100101 .. 1 ..... 000 ... ..... ..... @rda_pg_rn_rm
+FMLS_zpzzz 01100101 .. 1 ..... 001 ... ..... ..... @rda_pg_rn_rm
+FNMLA_zpzzz 01100101 .. 1 ..... 010 ... ..... ..... @rda_pg_rn_rm
+FNMLS_zpzzz 01100101 .. 1 ..... 011 ... ..... ..... @rda_pg_rn_rm
+
+# SVE floating-point multiply-accumulate writing multiplicand
+# Alter the operand extraction order and reuse the helpers from above.
+# FMAD, FMSB, FNMAD, FNMS
+FMLA_zpzzz 01100101 .. 1 ..... 100 ... ..... ..... @rdn_pg_rm_ra
+FMLS_zpzzz 01100101 .. 1 ..... 101 ... ..... ..... @rdn_pg_rm_ra
+FNMLA_zpzzz 01100101 .. 1 ..... 110 ... ..... ..... @rdn_pg_rm_ra
+FNMLS_zpzzz 01100101 .. 1 ..... 111 ... ..... ..... @rdn_pg_rm_ra
+
+### SVE FP Unary Operations Predicated Group
+
+# SVE floating-point convert precision
+FCVT_sh 01100101 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
+FCVT_hs 01100101 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
+BFCVT 01100101 10 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVT_dh 01100101 11 0010 00 101 ... ..... ..... @rd_pg_rn_e0
+FCVT_hd 01100101 11 0010 01 101 ... ..... ..... @rd_pg_rn_e0
+FCVT_ds 01100101 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVT_sd 01100101 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0
+
+# SVE floating-point convert to integer
+FCVTZS_hh 01100101 01 011 01 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_hh 01100101 01 011 01 1 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZS_hs 01100101 01 011 10 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_hs 01100101 01 011 10 1 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZS_hd 01100101 01 011 11 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_hd 01100101 01 011 11 1 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZS_ss 01100101 10 011 10 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_ss 01100101 10 011 10 1 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZS_ds 01100101 11 011 00 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_ds 01100101 11 011 00 1 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZS_sd 01100101 11 011 10 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_sd 01100101 11 011 10 1 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZS_dd 01100101 11 011 11 0 101 ... ..... ..... @rd_pg_rn_e0
+FCVTZU_dd 01100101 11 011 11 1 101 ... ..... ..... @rd_pg_rn_e0
+
+# SVE floating-point round to integral value
+FRINTN 01100101 .. 000 000 101 ... ..... ..... @rd_pg_rn
+FRINTP 01100101 .. 000 001 101 ... ..... ..... @rd_pg_rn
+FRINTM 01100101 .. 000 010 101 ... ..... ..... @rd_pg_rn
+FRINTZ 01100101 .. 000 011 101 ... ..... ..... @rd_pg_rn
+FRINTA 01100101 .. 000 100 101 ... ..... ..... @rd_pg_rn
+FRINTX 01100101 .. 000 110 101 ... ..... ..... @rd_pg_rn
+FRINTI 01100101 .. 000 111 101 ... ..... ..... @rd_pg_rn
+
+# SVE floating-point unary operations
+FRECPX 01100101 .. 001 100 101 ... ..... ..... @rd_pg_rn
+FSQRT 01100101 .. 001 101 101 ... ..... ..... @rd_pg_rn
+
+# SVE integer convert to floating-point
+SCVTF_hh 01100101 01 010 01 0 101 ... ..... ..... @rd_pg_rn_e0
+SCVTF_sh 01100101 01 010 10 0 101 ... ..... ..... @rd_pg_rn_e0
+SCVTF_dh 01100101 01 010 11 0 101 ... ..... ..... @rd_pg_rn_e0
+SCVTF_ss 01100101 10 010 10 0 101 ... ..... ..... @rd_pg_rn_e0
+SCVTF_sd 01100101 11 010 00 0 101 ... ..... ..... @rd_pg_rn_e0
+SCVTF_ds 01100101 11 010 10 0 101 ... ..... ..... @rd_pg_rn_e0
+SCVTF_dd 01100101 11 010 11 0 101 ... ..... ..... @rd_pg_rn_e0
+
+UCVTF_hh 01100101 01 010 01 1 101 ... ..... ..... @rd_pg_rn_e0
+UCVTF_sh 01100101 01 010 10 1 101 ... ..... ..... @rd_pg_rn_e0
+UCVTF_dh 01100101 01 010 11 1 101 ... ..... ..... @rd_pg_rn_e0
+UCVTF_ss 01100101 10 010 10 1 101 ... ..... ..... @rd_pg_rn_e0
+UCVTF_sd 01100101 11 010 00 1 101 ... ..... ..... @rd_pg_rn_e0
+UCVTF_ds 01100101 11 010 10 1 101 ... ..... ..... @rd_pg_rn_e0
+UCVTF_dd 01100101 11 010 11 1 101 ... ..... ..... @rd_pg_rn_e0
+
+### SVE Memory - 32-bit Gather and Unsized Contiguous Group
+
+# SVE load predicate register
+LDR_pri 10000101 10 ...... 000 ... ..... 0 .... @pd_rn_i9
+
+# SVE load vector register
+LDR_zri 10000101 10 ...... 010 ... ..... ..... @rd_rn_i9
+
+# SVE load and broadcast element
+LD1R_zpri 1000010 .. 1 imm:6 1.. pg:3 rn:5 rd:5 \
+ &rpri_load dtype=%dtype_23_13 nreg=0
+
+# SVE 32-bit gather load (scalar plus 32-bit unscaled offsets)
+# SVE 32-bit gather load (scalar plus 32-bit scaled offsets)
+LD1_zprz 1000010 00 .0 ..... 0.. ... ..... ..... \
+ @rprr_g_load_xs_u esz=2 msz=0 scale=0
+LD1_zprz 1000010 01 .. ..... 0.. ... ..... ..... \
+ @rprr_g_load_xs_u_sc esz=2 msz=1
+LD1_zprz 1000010 10 .. ..... 01. ... ..... ..... \
+ @rprr_g_load_xs_sc esz=2 msz=2 u=1
+
+# SVE 32-bit gather load (vector plus immediate)
+LD1_zpiz 1000010 .. 01 ..... 1.. ... ..... ..... \
+ @rpri_g_load esz=2
+
+### SVE Memory Contiguous Load Group
+
+# SVE contiguous load (scalar plus scalar)
+LD_zprr 1010010 .... ..... 010 ... ..... ..... @rprr_load_dt nreg=0
+
+# SVE contiguous first-fault load (scalar plus scalar)
+LDFF1_zprr 1010010 .... ..... 011 ... ..... ..... @rprr_load_dt nreg=0
+
+# SVE contiguous load (scalar plus immediate)
+LD_zpri 1010010 .... 0.... 101 ... ..... ..... @rpri_load_dt nreg=0
+
+# SVE contiguous non-fault load (scalar plus immediate)
+LDNF1_zpri 1010010 .... 1.... 101 ... ..... ..... @rpri_load_dt nreg=0
+
+# SVE contiguous non-temporal load (scalar plus scalar)
+# LDNT1B, LDNT1H, LDNT1W, LDNT1D
+# SVE load multiple structures (scalar plus scalar)
+# LD2B, LD2H, LD2W, LD2D; etc.
+LD_zprr 1010010 .. nreg:2 ..... 110 ... ..... ..... @rprr_load_msz
+
+# SVE contiguous non-temporal load (scalar plus immediate)
+# LDNT1B, LDNT1H, LDNT1W, LDNT1D
+# SVE load multiple structures (scalar plus immediate)
+# LD2B, LD2H, LD2W, LD2D; etc.
+LD_zpri 1010010 .. nreg:2 0.... 111 ... ..... ..... @rpri_load_msz
+
+# SVE load and broadcast quadword (scalar plus scalar)
+LD1RQ_zprr 1010010 .. 00 ..... 000 ... ..... ..... \
+ @rprr_load_msz nreg=0
+LD1RO_zprr 1010010 .. 01 ..... 000 ... ..... ..... \
+ @rprr_load_msz nreg=0
+
+# SVE load and broadcast quadword (scalar plus immediate)
+# LD1RQB, LD1RQH, LD1RQS, LD1RQD
+LD1RQ_zpri 1010010 .. 00 0.... 001 ... ..... ..... \
+ @rpri_load_msz nreg=0
+LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \
+ @rpri_load_msz nreg=0
+
+# SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets)
+PRF 1000010 00 -1 ----- 0-- --- ----- 0 ----
+
+# SVE 32-bit gather prefetch (vector plus immediate)
+PRF 1000010 -- 00 ----- 111 --- ----- 0 ----
+
+# SVE contiguous prefetch (scalar plus immediate)
+PRF 1000010 11 1- ----- 0-- --- ----- 0 ----
+
+# SVE contiguous prefetch (scalar plus scalar)
+PRF_rr 1000010 -- 00 rm:5 110 --- ----- 0 ----
+
+### SVE Memory 64-bit Gather Group
+
+# SVE 64-bit gather load (scalar plus 32-bit unpacked unscaled offsets)
+# SVE 64-bit gather load (scalar plus 32-bit unpacked scaled offsets)
+LD1_zprz 1100010 00 .0 ..... 0.. ... ..... ..... \
+ @rprr_g_load_xs_u esz=3 msz=0 scale=0
+LD1_zprz 1100010 01 .. ..... 0.. ... ..... ..... \
+ @rprr_g_load_xs_u_sc esz=3 msz=1
+LD1_zprz 1100010 10 .. ..... 0.. ... ..... ..... \
+ @rprr_g_load_xs_u_sc esz=3 msz=2
+LD1_zprz 1100010 11 .. ..... 01. ... ..... ..... \
+ @rprr_g_load_xs_sc esz=3 msz=3 u=1
+
+# SVE 64-bit gather load (scalar plus 64-bit unscaled offsets)
+# SVE 64-bit gather load (scalar plus 64-bit scaled offsets)
+LD1_zprz 1100010 00 10 ..... 1.. ... ..... ..... \
+ @rprr_g_load_u esz=3 msz=0 scale=0
+LD1_zprz 1100010 01 1. ..... 1.. ... ..... ..... \
+ @rprr_g_load_u_sc esz=3 msz=1
+LD1_zprz 1100010 10 1. ..... 1.. ... ..... ..... \
+ @rprr_g_load_u_sc esz=3 msz=2
+LD1_zprz 1100010 11 1. ..... 11. ... ..... ..... \
+ @rprr_g_load_sc esz=3 msz=3 u=1
+
+# SVE 64-bit gather load (vector plus immediate)
+LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
+ @rpri_g_load esz=3
+
+# SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets)
+PRF 1100010 00 11 ----- 1-- --- ----- 0 ----
+
+# SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets)
+PRF 1100010 00 -1 ----- 0-- --- ----- 0 ----
+
+# SVE 64-bit gather prefetch (vector plus immediate)
+PRF 1100010 -- 00 ----- 111 --- ----- 0 ----
+
+### SVE Memory Store Group
+
+# SVE store predicate register
+STR_pri 1110010 11 0. ..... 000 ... ..... 0 .... @pd_rn_i9
+
+# SVE store vector register
+STR_zri 1110010 11 0. ..... 010 ... ..... ..... @rd_rn_i9
+
+# SVE contiguous store (scalar plus immediate)
+# ST1B, ST1H, ST1W, ST1D; require msz <= esz
+ST_zpri 1110010 .. esz:2 0.... 111 ... ..... ..... \
+ @rpri_store_msz nreg=0
+
+# SVE contiguous store (scalar plus scalar)
+# ST1B, ST1H, ST1W, ST1D; require msz <= esz
+# Enumerate msz lest we conflict with STR_zri.
+ST_zprr 1110010 00 .. ..... 010 ... ..... ..... \
+ @rprr_store_esz_n0 msz=0
+ST_zprr 1110010 01 .. ..... 010 ... ..... ..... \
+ @rprr_store_esz_n0 msz=1
+ST_zprr 1110010 10 .. ..... 010 ... ..... ..... \
+ @rprr_store_esz_n0 msz=2
+ST_zprr 1110010 11 11 ..... 010 ... ..... ..... \
+ @rprr_store msz=3 esz=3 nreg=0
+
+# SVE contiguous non-temporal store (scalar plus immediate) (nreg == 0)
+# SVE store multiple structures (scalar plus immediate) (nreg != 0)
+ST_zpri 1110010 .. nreg:2 1.... 111 ... ..... ..... \
+ @rpri_store_msz esz=%size_23
+
+# SVE contiguous non-temporal store (scalar plus scalar) (nreg == 0)
+# SVE store multiple structures (scalar plus scalar) (nreg != 0)
+ST_zprr 1110010 msz:2 nreg:2 ..... 011 ... ..... ..... \
+ @rprr_store esz=%size_23
+
+# SVE 32-bit scatter store (scalar plus 32-bit scaled offsets)
+# Require msz > 0 && msz <= esz.
+ST1_zprz 1110010 .. 11 ..... 100 ... ..... ..... \
+ @rprr_scatter_store xs=0 esz=2 scale=1
+ST1_zprz 1110010 .. 11 ..... 110 ... ..... ..... \
+ @rprr_scatter_store xs=1 esz=2 scale=1
+
+# SVE 32-bit scatter store (scalar plus 32-bit unscaled offsets)
+# Require msz <= esz.
+ST1_zprz 1110010 .. 10 ..... 100 ... ..... ..... \
+ @rprr_scatter_store xs=0 esz=2 scale=0
+ST1_zprz 1110010 .. 10 ..... 110 ... ..... ..... \
+ @rprr_scatter_store xs=1 esz=2 scale=0
+
+# SVE 64-bit scatter store (scalar plus 64-bit scaled offset)
+# Require msz > 0
+ST1_zprz 1110010 .. 01 ..... 101 ... ..... ..... \
+ @rprr_scatter_store xs=2 esz=3 scale=1
+
+# SVE 64-bit scatter store (scalar plus 64-bit unscaled offset)
+ST1_zprz 1110010 .. 00 ..... 101 ... ..... ..... \
+ @rprr_scatter_store xs=2 esz=3 scale=0
+
+# SVE 64-bit scatter store (vector plus immediate)
+ST1_zpiz 1110010 .. 10 ..... 101 ... ..... ..... \
+ @rpri_scatter_store esz=3
+
+# SVE 32-bit scatter store (vector plus immediate)
+ST1_zpiz 1110010 .. 11 ..... 101 ... ..... ..... \
+ @rpri_scatter_store esz=2
+
+# SVE 64-bit scatter store (scalar plus unpacked 32-bit scaled offset)
+# Require msz > 0
+ST1_zprz 1110010 .. 01 ..... 100 ... ..... ..... \
+ @rprr_scatter_store xs=0 esz=3 scale=1
+ST1_zprz 1110010 .. 01 ..... 110 ... ..... ..... \
+ @rprr_scatter_store xs=1 esz=3 scale=1
+
+# SVE 64-bit scatter store (scalar plus unpacked 32-bit unscaled offset)
+ST1_zprz 1110010 .. 00 ..... 100 ... ..... ..... \
+ @rprr_scatter_store xs=0 esz=3 scale=0
+ST1_zprz 1110010 .. 00 ..... 110 ... ..... ..... \
+ @rprr_scatter_store xs=1 esz=3 scale=0
+
+#### SVE2 Support
+
+### SVE2 Integer Multiply - Unpredicated
+
+# SVE2 integer multiply vectors (unpredicated)
+MUL_zzz 00000100 .. 1 ..... 0110 00 ..... ..... @rd_rn_rm
+SMULH_zzz 00000100 .. 1 ..... 0110 10 ..... ..... @rd_rn_rm
+UMULH_zzz 00000100 .. 1 ..... 0110 11 ..... ..... @rd_rn_rm
+PMUL_zzz 00000100 00 1 ..... 0110 01 ..... ..... @rd_rn_rm_e0
+
+# SVE2 signed saturating doubling multiply high (unpredicated)
+SQDMULH_zzz 00000100 .. 1 ..... 0111 00 ..... ..... @rd_rn_rm
+SQRDMULH_zzz 00000100 .. 1 ..... 0111 01 ..... ..... @rd_rn_rm
+
+### SVE2 Integer - Predicated
+
+SADALP_zpzz 01000100 .. 000 100 101 ... ..... ..... @rdm_pg_rn
+UADALP_zpzz 01000100 .. 000 101 101 ... ..... ..... @rdm_pg_rn
+
+### SVE2 integer unary operations (predicated)
+
+URECPE 01000100 .. 000 000 101 ... ..... ..... @rd_pg_rn
+URSQRTE 01000100 .. 000 001 101 ... ..... ..... @rd_pg_rn
+SQABS 01000100 .. 001 000 101 ... ..... ..... @rd_pg_rn
+SQNEG 01000100 .. 001 001 101 ... ..... ..... @rd_pg_rn
+
+### SVE2 saturating/rounding bitwise shift left (predicated)
+
+SRSHL 01000100 .. 000 010 100 ... ..... ..... @rdn_pg_rm
+URSHL 01000100 .. 000 011 100 ... ..... ..... @rdn_pg_rm
+SRSHL 01000100 .. 000 110 100 ... ..... ..... @rdm_pg_rn # SRSHLR
+URSHL 01000100 .. 000 111 100 ... ..... ..... @rdm_pg_rn # URSHLR
+
+SQSHL 01000100 .. 001 000 100 ... ..... ..... @rdn_pg_rm
+UQSHL 01000100 .. 001 001 100 ... ..... ..... @rdn_pg_rm
+SQSHL 01000100 .. 001 100 100 ... ..... ..... @rdm_pg_rn # SQSHLR
+UQSHL 01000100 .. 001 101 100 ... ..... ..... @rdm_pg_rn # UQSHLR
+
+SQRSHL 01000100 .. 001 010 100 ... ..... ..... @rdn_pg_rm
+UQRSHL 01000100 .. 001 011 100 ... ..... ..... @rdn_pg_rm
+SQRSHL 01000100 .. 001 110 100 ... ..... ..... @rdm_pg_rn # SQRSHLR
+UQRSHL 01000100 .. 001 111 100 ... ..... ..... @rdm_pg_rn # UQRSHLR
+
+### SVE2 integer halving add/subtract (predicated)
+
+SHADD 01000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm
+UHADD 01000100 .. 010 001 100 ... ..... ..... @rdn_pg_rm
+SHSUB 01000100 .. 010 010 100 ... ..... ..... @rdn_pg_rm
+UHSUB 01000100 .. 010 011 100 ... ..... ..... @rdn_pg_rm
+SRHADD 01000100 .. 010 100 100 ... ..... ..... @rdn_pg_rm
+URHADD 01000100 .. 010 101 100 ... ..... ..... @rdn_pg_rm
+SHSUB 01000100 .. 010 110 100 ... ..... ..... @rdm_pg_rn # SHSUBR
+UHSUB 01000100 .. 010 111 100 ... ..... ..... @rdm_pg_rn # UHSUBR
+
+### SVE2 integer pairwise arithmetic
+
+ADDP 01000100 .. 010 001 101 ... ..... ..... @rdn_pg_rm
+SMAXP 01000100 .. 010 100 101 ... ..... ..... @rdn_pg_rm
+UMAXP 01000100 .. 010 101 101 ... ..... ..... @rdn_pg_rm
+SMINP 01000100 .. 010 110 101 ... ..... ..... @rdn_pg_rm
+UMINP 01000100 .. 010 111 101 ... ..... ..... @rdn_pg_rm
+
+### SVE2 saturating add/subtract (predicated)
+
+SQADD_zpzz 01000100 .. 011 000 100 ... ..... ..... @rdn_pg_rm
+UQADD_zpzz 01000100 .. 011 001 100 ... ..... ..... @rdn_pg_rm
+SQSUB_zpzz 01000100 .. 011 010 100 ... ..... ..... @rdn_pg_rm
+UQSUB_zpzz 01000100 .. 011 011 100 ... ..... ..... @rdn_pg_rm
+SUQADD 01000100 .. 011 100 100 ... ..... ..... @rdn_pg_rm
+USQADD 01000100 .. 011 101 100 ... ..... ..... @rdn_pg_rm
+SQSUB_zpzz 01000100 .. 011 110 100 ... ..... ..... @rdm_pg_rn # SQSUBR
+UQSUB_zpzz 01000100 .. 011 111 100 ... ..... ..... @rdm_pg_rn # UQSUBR
+
+#### SVE2 Widening Integer Arithmetic
+
+## SVE2 integer add/subtract long
+
+SADDLB 01000101 .. 0 ..... 00 0000 ..... ..... @rd_rn_rm
+SADDLT 01000101 .. 0 ..... 00 0001 ..... ..... @rd_rn_rm
+UADDLB 01000101 .. 0 ..... 00 0010 ..... ..... @rd_rn_rm
+UADDLT 01000101 .. 0 ..... 00 0011 ..... ..... @rd_rn_rm
+
+SSUBLB 01000101 .. 0 ..... 00 0100 ..... ..... @rd_rn_rm
+SSUBLT 01000101 .. 0 ..... 00 0101 ..... ..... @rd_rn_rm
+USUBLB 01000101 .. 0 ..... 00 0110 ..... ..... @rd_rn_rm
+USUBLT 01000101 .. 0 ..... 00 0111 ..... ..... @rd_rn_rm
+
+SABDLB 01000101 .. 0 ..... 00 1100 ..... ..... @rd_rn_rm
+SABDLT 01000101 .. 0 ..... 00 1101 ..... ..... @rd_rn_rm
+UABDLB 01000101 .. 0 ..... 00 1110 ..... ..... @rd_rn_rm
+UABDLT 01000101 .. 0 ..... 00 1111 ..... ..... @rd_rn_rm
+
+## SVE2 integer add/subtract interleaved long
+
+SADDLBT 01000101 .. 0 ..... 1000 00 ..... ..... @rd_rn_rm
+SSUBLBT 01000101 .. 0 ..... 1000 10 ..... ..... @rd_rn_rm
+SSUBLTB 01000101 .. 0 ..... 1000 11 ..... ..... @rd_rn_rm
+
+## SVE2 integer add/subtract wide
+
+SADDWB 01000101 .. 0 ..... 010 000 ..... ..... @rd_rn_rm
+SADDWT 01000101 .. 0 ..... 010 001 ..... ..... @rd_rn_rm
+UADDWB 01000101 .. 0 ..... 010 010 ..... ..... @rd_rn_rm
+UADDWT 01000101 .. 0 ..... 010 011 ..... ..... @rd_rn_rm
+
+SSUBWB 01000101 .. 0 ..... 010 100 ..... ..... @rd_rn_rm
+SSUBWT 01000101 .. 0 ..... 010 101 ..... ..... @rd_rn_rm
+USUBWB 01000101 .. 0 ..... 010 110 ..... ..... @rd_rn_rm
+USUBWT 01000101 .. 0 ..... 010 111 ..... ..... @rd_rn_rm
+
+## SVE2 integer multiply long
+
+SQDMULLB_zzz 01000101 .. 0 ..... 011 000 ..... ..... @rd_rn_rm
+SQDMULLT_zzz 01000101 .. 0 ..... 011 001 ..... ..... @rd_rn_rm
+PMULLB 01000101 .. 0 ..... 011 010 ..... ..... @rd_rn_rm
+PMULLT 01000101 .. 0 ..... 011 011 ..... ..... @rd_rn_rm
+SMULLB_zzz 01000101 .. 0 ..... 011 100 ..... ..... @rd_rn_rm
+SMULLT_zzz 01000101 .. 0 ..... 011 101 ..... ..... @rd_rn_rm
+UMULLB_zzz 01000101 .. 0 ..... 011 110 ..... ..... @rd_rn_rm
+UMULLT_zzz 01000101 .. 0 ..... 011 111 ..... ..... @rd_rn_rm
+
+## SVE2 bitwise shift left long
+
+# Note bit23 == 0 is handled by esz > 0 in do_sve2_shll_tb.
+SSHLLB 01000101 .. 0 ..... 1010 00 ..... ..... @rd_rn_tszimm_shl
+SSHLLT 01000101 .. 0 ..... 1010 01 ..... ..... @rd_rn_tszimm_shl
+USHLLB 01000101 .. 0 ..... 1010 10 ..... ..... @rd_rn_tszimm_shl
+USHLLT 01000101 .. 0 ..... 1010 11 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 bitwise exclusive-or interleaved
+
+EORBT 01000101 .. 0 ..... 10010 0 ..... ..... @rd_rn_rm
+EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm
+
+## SVE integer matrix multiply accumulate
+
+SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+
+## SVE2 bitwise permute
+
+BEXT 01000101 .. 0 ..... 1011 00 ..... ..... @rd_rn_rm
+BDEP 01000101 .. 0 ..... 1011 01 ..... ..... @rd_rn_rm
+BGRP 01000101 .. 0 ..... 1011 10 ..... ..... @rd_rn_rm
+
+#### SVE2 Accumulate
+
+## SVE2 complex integer add
+
+CADD_rot90 01000101 .. 00000 0 11011 0 ..... ..... @rdn_rm
+CADD_rot270 01000101 .. 00000 0 11011 1 ..... ..... @rdn_rm
+SQCADD_rot90 01000101 .. 00000 1 11011 0 ..... ..... @rdn_rm
+SQCADD_rot270 01000101 .. 00000 1 11011 1 ..... ..... @rdn_rm
+
+## SVE2 integer absolute difference and accumulate long
+
+SABALB 01000101 .. 0 ..... 1100 00 ..... ..... @rda_rn_rm
+SABALT 01000101 .. 0 ..... 1100 01 ..... ..... @rda_rn_rm
+UABALB 01000101 .. 0 ..... 1100 10 ..... ..... @rda_rn_rm
+UABALT 01000101 .. 0 ..... 1100 11 ..... ..... @rda_rn_rm
+
+## SVE2 integer add/subtract long with carry
+
+# ADC and SBC decoded via size in helper dispatch.
+ADCLB 01000101 .. 0 ..... 11010 0 ..... ..... @rda_rn_rm
+ADCLT 01000101 .. 0 ..... 11010 1 ..... ..... @rda_rn_rm
+
+## SVE2 bitwise shift right and accumulate
+
+# TODO: Use @rda and %reg_movprfx here.
+SSRA 01000101 .. 0 ..... 1110 00 ..... ..... @rd_rn_tszimm_shr
+USRA 01000101 .. 0 ..... 1110 01 ..... ..... @rd_rn_tszimm_shr
+SRSRA 01000101 .. 0 ..... 1110 10 ..... ..... @rd_rn_tszimm_shr
+URSRA 01000101 .. 0 ..... 1110 11 ..... ..... @rd_rn_tszimm_shr
+
+## SVE2 bitwise shift and insert
+
+SRI 01000101 .. 0 ..... 11110 0 ..... ..... @rd_rn_tszimm_shr
+SLI 01000101 .. 0 ..... 11110 1 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 integer absolute difference and accumulate
+
+# TODO: Use @rda and %reg_movprfx here.
+SABA 01000101 .. 0 ..... 11111 0 ..... ..... @rd_rn_rm
+UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm
+
+#### SVE2 Narrowing
+
+## SVE2 saturating extract narrow
+
+# Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0.
+SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl
+SQXTNT 01000101 .. 1 ..... 010 001 ..... ..... @rd_rn_tszimm_shl
+UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl
+UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
+SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
+SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
+
+## SVE2 bitwise shift right narrow
+
+# Bit 23 == 0 is handled by esz > 0 in the translator.
+SQSHRUNB 01000101 .. 1 ..... 00 0000 ..... ..... @rd_rn_tszimm_shr
+SQSHRUNT 01000101 .. 1 ..... 00 0001 ..... ..... @rd_rn_tszimm_shr
+SQRSHRUNB 01000101 .. 1 ..... 00 0010 ..... ..... @rd_rn_tszimm_shr
+SQRSHRUNT 01000101 .. 1 ..... 00 0011 ..... ..... @rd_rn_tszimm_shr
+SHRNB 01000101 .. 1 ..... 00 0100 ..... ..... @rd_rn_tszimm_shr
+SHRNT 01000101 .. 1 ..... 00 0101 ..... ..... @rd_rn_tszimm_shr
+RSHRNB 01000101 .. 1 ..... 00 0110 ..... ..... @rd_rn_tszimm_shr
+RSHRNT 01000101 .. 1 ..... 00 0111 ..... ..... @rd_rn_tszimm_shr
+SQSHRNB 01000101 .. 1 ..... 00 1000 ..... ..... @rd_rn_tszimm_shr
+SQSHRNT 01000101 .. 1 ..... 00 1001 ..... ..... @rd_rn_tszimm_shr
+SQRSHRNB 01000101 .. 1 ..... 00 1010 ..... ..... @rd_rn_tszimm_shr
+SQRSHRNT 01000101 .. 1 ..... 00 1011 ..... ..... @rd_rn_tszimm_shr
+UQSHRNB 01000101 .. 1 ..... 00 1100 ..... ..... @rd_rn_tszimm_shr
+UQSHRNT 01000101 .. 1 ..... 00 1101 ..... ..... @rd_rn_tszimm_shr
+UQRSHRNB 01000101 .. 1 ..... 00 1110 ..... ..... @rd_rn_tszimm_shr
+UQRSHRNT 01000101 .. 1 ..... 00 1111 ..... ..... @rd_rn_tszimm_shr
+
+## SVE2 integer add/subtract narrow high part
+
+ADDHNB 01000101 .. 1 ..... 011 000 ..... ..... @rd_rn_rm
+ADDHNT 01000101 .. 1 ..... 011 001 ..... ..... @rd_rn_rm
+RADDHNB 01000101 .. 1 ..... 011 010 ..... ..... @rd_rn_rm
+RADDHNT 01000101 .. 1 ..... 011 011 ..... ..... @rd_rn_rm
+SUBHNB 01000101 .. 1 ..... 011 100 ..... ..... @rd_rn_rm
+SUBHNT 01000101 .. 1 ..... 011 101 ..... ..... @rd_rn_rm
+RSUBHNB 01000101 .. 1 ..... 011 110 ..... ..... @rd_rn_rm
+RSUBHNT 01000101 .. 1 ..... 011 111 ..... ..... @rd_rn_rm
+
+### SVE2 Character Match
+
+MATCH 01000101 .. 1 ..... 100 ... ..... 0 .... @pd_pg_rn_rm
+NMATCH 01000101 .. 1 ..... 100 ... ..... 1 .... @pd_pg_rn_rm
+
+### SVE2 Histogram Computation
+
+HISTCNT 01000101 .. 1 ..... 110 ... ..... ..... @rd_pg_rn_rm
+HISTSEG 01000101 .. 1 ..... 101 000 ..... ..... @rd_rn_rm
+
+## SVE2 floating-point pairwise operations
+
+FADDP 01100100 .. 010 00 0 100 ... ..... ..... @rdn_pg_rm
+FMAXNMP 01100100 .. 010 10 0 100 ... ..... ..... @rdn_pg_rm
+FMINNMP 01100100 .. 010 10 1 100 ... ..... ..... @rdn_pg_rm
+FMAXP 01100100 .. 010 11 0 100 ... ..... ..... @rdn_pg_rm
+FMINP 01100100 .. 010 11 1 100 ... ..... ..... @rdn_pg_rm
+
+#### SVE Integer Multiply-Add (unpredicated)
+
+## SVE2 saturating multiply-add long
+
+SQDMLALB_zzzw 01000100 .. 0 ..... 0110 00 ..... ..... @rda_rn_rm
+SQDMLALT_zzzw 01000100 .. 0 ..... 0110 01 ..... ..... @rda_rn_rm
+SQDMLSLB_zzzw 01000100 .. 0 ..... 0110 10 ..... ..... @rda_rn_rm
+SQDMLSLT_zzzw 01000100 .. 0 ..... 0110 11 ..... ..... @rda_rn_rm
+
+## SVE2 saturating multiply-add interleaved long
+
+SQDMLALBT 01000100 .. 0 ..... 00001 0 ..... ..... @rda_rn_rm
+SQDMLSLBT 01000100 .. 0 ..... 00001 1 ..... ..... @rda_rn_rm
+
+## SVE2 saturating multiply-add high
+
+SQRDMLAH_zzzz 01000100 .. 0 ..... 01110 0 ..... ..... @rda_rn_rm
+SQRDMLSH_zzzz 01000100 .. 0 ..... 01110 1 ..... ..... @rda_rn_rm
+
+## SVE2 integer multiply-add long
+
+SMLALB_zzzw 01000100 .. 0 ..... 010 000 ..... ..... @rda_rn_rm
+SMLALT_zzzw 01000100 .. 0 ..... 010 001 ..... ..... @rda_rn_rm
+UMLALB_zzzw 01000100 .. 0 ..... 010 010 ..... ..... @rda_rn_rm
+UMLALT_zzzw 01000100 .. 0 ..... 010 011 ..... ..... @rda_rn_rm
+SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm
+SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm
+UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm
+UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
+
+## SVE2 complex integer multiply-add
+
+CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
+SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
+
+## SVE mixed sign dot product
+
+USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm
+
+### SVE2 floating point matrix multiply accumulate
+{
+ BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0
+ FMMLA 01100100 .. 1 ..... 111 001 ..... ..... @rda_rn_rm
+}
+
+### SVE2 Memory Gather Load Group
+
+# SVE2 64-bit gather non-temporal load
+# (scalar plus unpacked 32-bit unscaled offsets)
+LDNT1_zprz 1100010 msz:2 00 rm:5 1 u:1 0 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=0 esz=3 scale=0 ff=0
+
+# SVE2 32-bit gather non-temporal load (scalar plus 32-bit unscaled offsets)
+LDNT1_zprz 1000010 msz:2 00 rm:5 10 u:1 pg:3 rn:5 rd:5 \
+ &rprr_gather_load xs=0 esz=2 scale=0 ff=0
+
+### SVE2 Memory Store Group
+
+# SVE2 64-bit scatter non-temporal store (vector plus scalar)
+STNT1_zprz 1110010 .. 00 ..... 001 ... ..... ..... \
+ @rprr_scatter_store xs=2 esz=3 scale=0
+
+# SVE2 32-bit scatter non-temporal store (vector plus scalar)
+STNT1_zprz 1110010 .. 10 ..... 001 ... ..... ..... \
+ @rprr_scatter_store xs=0 esz=2 scale=0
+
+### SVE2 Crypto Extensions
+
+# SVE2 crypto unary operations
+# AESMC and AESIMC
+AESMC 01000101 00 10000011100 decrypt:1 00000 rd:5
+
+# SVE2 crypto destructive binary operations
+AESE 01000101 00 10001 0 11100 0 ..... ..... @rdn_rm_e0
+AESD 01000101 00 10001 0 11100 1 ..... ..... @rdn_rm_e0
+SM4E 01000101 00 10001 1 11100 0 ..... ..... @rdn_rm_e0
+
+# SVE2 crypto constructive binary operations
+SM4EKEY 01000101 00 1 ..... 11110 0 ..... ..... @rd_rn_rm_e0
+RAX1 01000101 00 1 ..... 11110 1 ..... ..... @rd_rn_rm_e0
+
+### SVE2 floating-point convert precision odd elements
+FCVTXNT_ds 01100100 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTX_ds 01100101 00 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTNT_sh 01100100 10 0010 00 101 ... ..... ..... @rd_pg_rn_e0
+BFCVTNT 01100100 10 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTLT_hs 01100100 10 0010 01 101 ... ..... ..... @rd_pg_rn_e0
+FCVTNT_ds 01100100 11 0010 10 101 ... ..... ..... @rd_pg_rn_e0
+FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0
+
+### SVE2 floating-point convert to integer
+FLOGB 01100101 00 011 esz:2 0101 pg:3 rn:5 rd:5 &rpr_esz
+
+### SVE2 floating-point multiply-add long (vectors)
+FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
+FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
+FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0
+FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0
+
+BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
+BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
+
+### SVE2 floating-point bfloat16 dot-product
+BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
+
+### SVE2 floating-point multiply-add long (indexed)
+FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
+FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
+FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
+FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
+BFMLALB_zzxw 01100100 11 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
+BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
+
+### SVE2 floating-point bfloat16 dot-product (indexed)
+BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
new file mode 100644
index 000000000..07be55b7e
--- /dev/null
+++ b/target/arm/sve_helper.c
@@ -0,0 +1,7654 @@
+/*
+ * ARM SVE Operations
+ *
+ * Copyright (c) 2018 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "fpu/softfloat.h"
+#include "tcg/tcg.h"
+#include "vec_internal.h"
+
+
+/* Return a value for NZCV as per the ARM PredTest pseudofunction.
+ *
+ * The return value has bit 31 set if N is set, bit 1 set if Z is clear,
+ * and bit 0 set if C is set. Compare the definitions of these variables
+ * within CPUARMState.
+ */
+
+/* For no G bits set, NZCV = C. */
+#define PREDTEST_INIT 1
+
+/* This is an iterative function, called for each Pd and Pg word
+ * moving forward.
+ */
+static uint32_t iter_predtest_fwd(uint64_t d, uint64_t g, uint32_t flags)
+{
+ if (likely(g)) {
+ /* Compute N from first D & G.
+ Use bit 2 to signal first G bit seen. */
+ if (!(flags & 4)) {
+ flags |= ((d & (g & -g)) != 0) << 31;
+ flags |= 4;
+ }
+
+ /* Accumulate Z from each D & G. */
+ flags |= ((d & g) != 0) << 1;
+
+ /* Compute C from last !(D & G). Replace previous. */
+ flags = deposit32(flags, 0, 1, (d & pow2floor(g)) == 0);
+ }
+ return flags;
+}
+
+/* This is an iterative function, called for each Pd and Pg word
+ * moving backward.
+ */
+static uint32_t iter_predtest_bwd(uint64_t d, uint64_t g, uint32_t flags)
+{
+ if (likely(g)) {
+ /* Compute C from first (i.e last) !(D & G).
+ Use bit 2 to signal first G bit seen. */
+ if (!(flags & 4)) {
+ flags += 4 - 1; /* add bit 2, subtract C from PREDTEST_INIT */
+ flags |= (d & pow2floor(g)) == 0;
+ }
+
+ /* Accumulate Z from each D & G. */
+ flags |= ((d & g) != 0) << 1;
+
+ /* Compute N from last (i.e first) D & G. Replace previous. */
+ flags = deposit32(flags, 31, 1, (d & (g & -g)) != 0);
+ }
+ return flags;
+}
+
+/* The same for a single word predicate. */
+uint32_t HELPER(sve_predtest1)(uint64_t d, uint64_t g)
+{
+ return iter_predtest_fwd(d, g, PREDTEST_INIT);
+}
+
+/* The same for a multi-word predicate. */
+uint32_t HELPER(sve_predtest)(void *vd, void *vg, uint32_t words)
+{
+ uint32_t flags = PREDTEST_INIT;
+ uint64_t *d = vd, *g = vg;
+ uintptr_t i = 0;
+
+ do {
+ flags = iter_predtest_fwd(d[i], g[i], flags);
+ } while (++i < words);
+
+ return flags;
+}
+
+/*
+ * Expand active predicate bits to bytes, for byte elements.
+ * (The data table itself is in vec_helper.c as MVE also needs it.)
+ */
+static inline uint64_t expand_pred_b(uint8_t byte)
+{
+ return expand_pred_b_data[byte];
+}
+
+/* Similarly for half-word elements.
+ * for (i = 0; i < 256; ++i) {
+ * unsigned long m = 0;
+ * if (i & 0xaa) {
+ * continue;
+ * }
+ * for (j = 0; j < 8; j += 2) {
+ * if ((i >> j) & 1) {
+ * m |= 0xfffful << (j << 3);
+ * }
+ * }
+ * printf("[0x%x] = 0x%016lx,\n", i, m);
+ * }
+ */
+static inline uint64_t expand_pred_h(uint8_t byte)
+{
+ static const uint64_t word[] = {
+ [0x01] = 0x000000000000ffff, [0x04] = 0x00000000ffff0000,
+ [0x05] = 0x00000000ffffffff, [0x10] = 0x0000ffff00000000,
+ [0x11] = 0x0000ffff0000ffff, [0x14] = 0x0000ffffffff0000,
+ [0x15] = 0x0000ffffffffffff, [0x40] = 0xffff000000000000,
+ [0x41] = 0xffff00000000ffff, [0x44] = 0xffff0000ffff0000,
+ [0x45] = 0xffff0000ffffffff, [0x50] = 0xffffffff00000000,
+ [0x51] = 0xffffffff0000ffff, [0x54] = 0xffffffffffff0000,
+ [0x55] = 0xffffffffffffffff,
+ };
+ return word[byte & 0x55];
+}
+
+/* Similarly for single word elements. */
+static inline uint64_t expand_pred_s(uint8_t byte)
+{
+ static const uint64_t word[] = {
+ [0x01] = 0x00000000ffffffffull,
+ [0x10] = 0xffffffff00000000ull,
+ [0x11] = 0xffffffffffffffffull,
+ };
+ return word[byte & 0x11];
+}
+
+#define LOGICAL_PPPP(NAME, FUNC) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ uintptr_t opr_sz = simd_oprsz(desc); \
+ uint64_t *d = vd, *n = vn, *m = vm, *g = vg; \
+ uintptr_t i; \
+ for (i = 0; i < opr_sz / 8; ++i) { \
+ d[i] = FUNC(n[i], m[i], g[i]); \
+ } \
+}
+
+#define DO_AND(N, M, G) (((N) & (M)) & (G))
+#define DO_BIC(N, M, G) (((N) & ~(M)) & (G))
+#define DO_EOR(N, M, G) (((N) ^ (M)) & (G))
+#define DO_ORR(N, M, G) (((N) | (M)) & (G))
+#define DO_ORN(N, M, G) (((N) | ~(M)) & (G))
+#define DO_NOR(N, M, G) (~((N) | (M)) & (G))
+#define DO_NAND(N, M, G) (~((N) & (M)) & (G))
+#define DO_SEL(N, M, G) (((N) & (G)) | ((M) & ~(G)))
+
+LOGICAL_PPPP(sve_and_pppp, DO_AND)
+LOGICAL_PPPP(sve_bic_pppp, DO_BIC)
+LOGICAL_PPPP(sve_eor_pppp, DO_EOR)
+LOGICAL_PPPP(sve_sel_pppp, DO_SEL)
+LOGICAL_PPPP(sve_orr_pppp, DO_ORR)
+LOGICAL_PPPP(sve_orn_pppp, DO_ORN)
+LOGICAL_PPPP(sve_nor_pppp, DO_NOR)
+LOGICAL_PPPP(sve_nand_pppp, DO_NAND)
+
+#undef DO_AND
+#undef DO_BIC
+#undef DO_EOR
+#undef DO_ORR
+#undef DO_ORN
+#undef DO_NOR
+#undef DO_NAND
+#undef DO_SEL
+#undef LOGICAL_PPPP
+
+/* Fully general three-operand expander, controlled by a predicate.
+ * This is complicated by the host-endian storage of the register file.
+ */
+/* ??? I don't expect the compiler could ever vectorize this itself.
+ * With some tables we can convert bit masks to byte masks, and with
+ * extra care wrt byte/word ordering we could use gcc generic vectors
+ * and do 16 bytes at a time.
+ */
+#define DO_ZPZZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ TYPE mm = *(TYPE *)(vm + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, mm); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZZ_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 1) { \
+ if (pg[H1(i)] & 1) { \
+ TYPE nn = n[i], mm = m[i]; \
+ d[i] = OP(nn, mm); \
+ } \
+ } \
+}
+
+#define DO_AND(N, M) (N & M)
+#define DO_EOR(N, M) (N ^ M)
+#define DO_ORR(N, M) (N | M)
+#define DO_BIC(N, M) (N & ~M)
+#define DO_ADD(N, M) (N + M)
+#define DO_SUB(N, M) (N - M)
+#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
+#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
+#define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
+#define DO_MUL(N, M) (N * M)
+
+
+/*
+ * We must avoid the C undefined behaviour cases: division by
+ * zero and signed division of INT_MIN by -1. Both of these
+ * have architecturally defined required results for Arm.
+ * We special case all signed divisions by -1 to avoid having
+ * to deduce the minimum integer for the type involved.
+ */
+#define DO_SDIV(N, M) (unlikely(M == 0) ? 0 : unlikely(M == -1) ? -N : N / M)
+#define DO_UDIV(N, M) (unlikely(M == 0) ? 0 : N / M)
+
+DO_ZPZZ(sve_and_zpzz_b, uint8_t, H1, DO_AND)
+DO_ZPZZ(sve_and_zpzz_h, uint16_t, H1_2, DO_AND)
+DO_ZPZZ(sve_and_zpzz_s, uint32_t, H1_4, DO_AND)
+DO_ZPZZ_D(sve_and_zpzz_d, uint64_t, DO_AND)
+
+DO_ZPZZ(sve_orr_zpzz_b, uint8_t, H1, DO_ORR)
+DO_ZPZZ(sve_orr_zpzz_h, uint16_t, H1_2, DO_ORR)
+DO_ZPZZ(sve_orr_zpzz_s, uint32_t, H1_4, DO_ORR)
+DO_ZPZZ_D(sve_orr_zpzz_d, uint64_t, DO_ORR)
+
+DO_ZPZZ(sve_eor_zpzz_b, uint8_t, H1, DO_EOR)
+DO_ZPZZ(sve_eor_zpzz_h, uint16_t, H1_2, DO_EOR)
+DO_ZPZZ(sve_eor_zpzz_s, uint32_t, H1_4, DO_EOR)
+DO_ZPZZ_D(sve_eor_zpzz_d, uint64_t, DO_EOR)
+
+DO_ZPZZ(sve_bic_zpzz_b, uint8_t, H1, DO_BIC)
+DO_ZPZZ(sve_bic_zpzz_h, uint16_t, H1_2, DO_BIC)
+DO_ZPZZ(sve_bic_zpzz_s, uint32_t, H1_4, DO_BIC)
+DO_ZPZZ_D(sve_bic_zpzz_d, uint64_t, DO_BIC)
+
+DO_ZPZZ(sve_add_zpzz_b, uint8_t, H1, DO_ADD)
+DO_ZPZZ(sve_add_zpzz_h, uint16_t, H1_2, DO_ADD)
+DO_ZPZZ(sve_add_zpzz_s, uint32_t, H1_4, DO_ADD)
+DO_ZPZZ_D(sve_add_zpzz_d, uint64_t, DO_ADD)
+
+DO_ZPZZ(sve_sub_zpzz_b, uint8_t, H1, DO_SUB)
+DO_ZPZZ(sve_sub_zpzz_h, uint16_t, H1_2, DO_SUB)
+DO_ZPZZ(sve_sub_zpzz_s, uint32_t, H1_4, DO_SUB)
+DO_ZPZZ_D(sve_sub_zpzz_d, uint64_t, DO_SUB)
+
+DO_ZPZZ(sve_smax_zpzz_b, int8_t, H1, DO_MAX)
+DO_ZPZZ(sve_smax_zpzz_h, int16_t, H1_2, DO_MAX)
+DO_ZPZZ(sve_smax_zpzz_s, int32_t, H1_4, DO_MAX)
+DO_ZPZZ_D(sve_smax_zpzz_d, int64_t, DO_MAX)
+
+DO_ZPZZ(sve_umax_zpzz_b, uint8_t, H1, DO_MAX)
+DO_ZPZZ(sve_umax_zpzz_h, uint16_t, H1_2, DO_MAX)
+DO_ZPZZ(sve_umax_zpzz_s, uint32_t, H1_4, DO_MAX)
+DO_ZPZZ_D(sve_umax_zpzz_d, uint64_t, DO_MAX)
+
+DO_ZPZZ(sve_smin_zpzz_b, int8_t, H1, DO_MIN)
+DO_ZPZZ(sve_smin_zpzz_h, int16_t, H1_2, DO_MIN)
+DO_ZPZZ(sve_smin_zpzz_s, int32_t, H1_4, DO_MIN)
+DO_ZPZZ_D(sve_smin_zpzz_d, int64_t, DO_MIN)
+
+DO_ZPZZ(sve_umin_zpzz_b, uint8_t, H1, DO_MIN)
+DO_ZPZZ(sve_umin_zpzz_h, uint16_t, H1_2, DO_MIN)
+DO_ZPZZ(sve_umin_zpzz_s, uint32_t, H1_4, DO_MIN)
+DO_ZPZZ_D(sve_umin_zpzz_d, uint64_t, DO_MIN)
+
+DO_ZPZZ(sve_sabd_zpzz_b, int8_t, H1, DO_ABD)
+DO_ZPZZ(sve_sabd_zpzz_h, int16_t, H1_2, DO_ABD)
+DO_ZPZZ(sve_sabd_zpzz_s, int32_t, H1_4, DO_ABD)
+DO_ZPZZ_D(sve_sabd_zpzz_d, int64_t, DO_ABD)
+
+DO_ZPZZ(sve_uabd_zpzz_b, uint8_t, H1, DO_ABD)
+DO_ZPZZ(sve_uabd_zpzz_h, uint16_t, H1_2, DO_ABD)
+DO_ZPZZ(sve_uabd_zpzz_s, uint32_t, H1_4, DO_ABD)
+DO_ZPZZ_D(sve_uabd_zpzz_d, uint64_t, DO_ABD)
+
+/* Because the computation type is at least twice as large as required,
+ these work for both signed and unsigned source types. */
+static inline uint8_t do_mulh_b(int32_t n, int32_t m)
+{
+ return (n * m) >> 8;
+}
+
+static inline uint16_t do_mulh_h(int32_t n, int32_t m)
+{
+ return (n * m) >> 16;
+}
+
+static inline uint32_t do_mulh_s(int64_t n, int64_t m)
+{
+ return (n * m) >> 32;
+}
+
+static inline uint64_t do_smulh_d(uint64_t n, uint64_t m)
+{
+ uint64_t lo, hi;
+ muls64(&lo, &hi, n, m);
+ return hi;
+}
+
+static inline uint64_t do_umulh_d(uint64_t n, uint64_t m)
+{
+ uint64_t lo, hi;
+ mulu64(&lo, &hi, n, m);
+ return hi;
+}
+
+DO_ZPZZ(sve_mul_zpzz_b, uint8_t, H1, DO_MUL)
+DO_ZPZZ(sve_mul_zpzz_h, uint16_t, H1_2, DO_MUL)
+DO_ZPZZ(sve_mul_zpzz_s, uint32_t, H1_4, DO_MUL)
+DO_ZPZZ_D(sve_mul_zpzz_d, uint64_t, DO_MUL)
+
+DO_ZPZZ(sve_smulh_zpzz_b, int8_t, H1, do_mulh_b)
+DO_ZPZZ(sve_smulh_zpzz_h, int16_t, H1_2, do_mulh_h)
+DO_ZPZZ(sve_smulh_zpzz_s, int32_t, H1_4, do_mulh_s)
+DO_ZPZZ_D(sve_smulh_zpzz_d, uint64_t, do_smulh_d)
+
+DO_ZPZZ(sve_umulh_zpzz_b, uint8_t, H1, do_mulh_b)
+DO_ZPZZ(sve_umulh_zpzz_h, uint16_t, H1_2, do_mulh_h)
+DO_ZPZZ(sve_umulh_zpzz_s, uint32_t, H1_4, do_mulh_s)
+DO_ZPZZ_D(sve_umulh_zpzz_d, uint64_t, do_umulh_d)
+
+DO_ZPZZ(sve_sdiv_zpzz_s, int32_t, H1_4, DO_SDIV)
+DO_ZPZZ_D(sve_sdiv_zpzz_d, int64_t, DO_SDIV)
+
+DO_ZPZZ(sve_udiv_zpzz_s, uint32_t, H1_4, DO_UDIV)
+DO_ZPZZ_D(sve_udiv_zpzz_d, uint64_t, DO_UDIV)
+
+/* Note that all bits of the shift are significant
+ and not modulo the element size. */
+#define DO_ASR(N, M) (N >> MIN(M, sizeof(N) * 8 - 1))
+#define DO_LSR(N, M) (M < sizeof(N) * 8 ? N >> M : 0)
+#define DO_LSL(N, M) (M < sizeof(N) * 8 ? N << M : 0)
+
+DO_ZPZZ(sve_asr_zpzz_b, int8_t, H1, DO_ASR)
+DO_ZPZZ(sve_lsr_zpzz_b, uint8_t, H1_2, DO_LSR)
+DO_ZPZZ(sve_lsl_zpzz_b, uint8_t, H1_4, DO_LSL)
+
+DO_ZPZZ(sve_asr_zpzz_h, int16_t, H1, DO_ASR)
+DO_ZPZZ(sve_lsr_zpzz_h, uint16_t, H1_2, DO_LSR)
+DO_ZPZZ(sve_lsl_zpzz_h, uint16_t, H1_4, DO_LSL)
+
+DO_ZPZZ(sve_asr_zpzz_s, int32_t, H1, DO_ASR)
+DO_ZPZZ(sve_lsr_zpzz_s, uint32_t, H1_2, DO_LSR)
+DO_ZPZZ(sve_lsl_zpzz_s, uint32_t, H1_4, DO_LSL)
+
+DO_ZPZZ_D(sve_asr_zpzz_d, int64_t, DO_ASR)
+DO_ZPZZ_D(sve_lsr_zpzz_d, uint64_t, DO_LSR)
+DO_ZPZZ_D(sve_lsl_zpzz_d, uint64_t, DO_LSL)
+
+static inline uint16_t do_sadalp_h(int16_t n, int16_t m)
+{
+ int8_t n1 = n, n2 = n >> 8;
+ return m + n1 + n2;
+}
+
+static inline uint32_t do_sadalp_s(int32_t n, int32_t m)
+{
+ int16_t n1 = n, n2 = n >> 16;
+ return m + n1 + n2;
+}
+
+static inline uint64_t do_sadalp_d(int64_t n, int64_t m)
+{
+ int32_t n1 = n, n2 = n >> 32;
+ return m + n1 + n2;
+}
+
+DO_ZPZZ(sve2_sadalp_zpzz_h, int16_t, H1_2, do_sadalp_h)
+DO_ZPZZ(sve2_sadalp_zpzz_s, int32_t, H1_4, do_sadalp_s)
+DO_ZPZZ_D(sve2_sadalp_zpzz_d, int64_t, do_sadalp_d)
+
+static inline uint16_t do_uadalp_h(uint16_t n, uint16_t m)
+{
+ uint8_t n1 = n, n2 = n >> 8;
+ return m + n1 + n2;
+}
+
+static inline uint32_t do_uadalp_s(uint32_t n, uint32_t m)
+{
+ uint16_t n1 = n, n2 = n >> 16;
+ return m + n1 + n2;
+}
+
+static inline uint64_t do_uadalp_d(uint64_t n, uint64_t m)
+{
+ uint32_t n1 = n, n2 = n >> 32;
+ return m + n1 + n2;
+}
+
+DO_ZPZZ(sve2_uadalp_zpzz_h, uint16_t, H1_2, do_uadalp_h)
+DO_ZPZZ(sve2_uadalp_zpzz_s, uint32_t, H1_4, do_uadalp_s)
+DO_ZPZZ_D(sve2_uadalp_zpzz_d, uint64_t, do_uadalp_d)
+
+#define do_srshl_b(n, m) do_sqrshl_bhs(n, m, 8, true, NULL)
+#define do_srshl_h(n, m) do_sqrshl_bhs(n, m, 16, true, NULL)
+#define do_srshl_s(n, m) do_sqrshl_bhs(n, m, 32, true, NULL)
+#define do_srshl_d(n, m) do_sqrshl_d(n, m, true, NULL)
+
+DO_ZPZZ(sve2_srshl_zpzz_b, int8_t, H1, do_srshl_b)
+DO_ZPZZ(sve2_srshl_zpzz_h, int16_t, H1_2, do_srshl_h)
+DO_ZPZZ(sve2_srshl_zpzz_s, int32_t, H1_4, do_srshl_s)
+DO_ZPZZ_D(sve2_srshl_zpzz_d, int64_t, do_srshl_d)
+
+#define do_urshl_b(n, m) do_uqrshl_bhs(n, (int8_t)m, 8, true, NULL)
+#define do_urshl_h(n, m) do_uqrshl_bhs(n, (int16_t)m, 16, true, NULL)
+#define do_urshl_s(n, m) do_uqrshl_bhs(n, m, 32, true, NULL)
+#define do_urshl_d(n, m) do_uqrshl_d(n, m, true, NULL)
+
+DO_ZPZZ(sve2_urshl_zpzz_b, uint8_t, H1, do_urshl_b)
+DO_ZPZZ(sve2_urshl_zpzz_h, uint16_t, H1_2, do_urshl_h)
+DO_ZPZZ(sve2_urshl_zpzz_s, uint32_t, H1_4, do_urshl_s)
+DO_ZPZZ_D(sve2_urshl_zpzz_d, uint64_t, do_urshl_d)
+
+/*
+ * Unlike the NEON and AdvSIMD versions, there is no QC bit to set.
+ * We pass in a pointer to a dummy saturation field to trigger
+ * the saturating arithmetic but discard the information about
+ * whether it has occurred.
+ */
+#define do_sqshl_b(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, false, &discard); })
+#define do_sqshl_h(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, false, &discard); })
+#define do_sqshl_s(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, false, &discard); })
+#define do_sqshl_d(n, m) \
+ ({ uint32_t discard; do_sqrshl_d(n, m, false, &discard); })
+
+DO_ZPZZ(sve2_sqshl_zpzz_b, int8_t, H1_2, do_sqshl_b)
+DO_ZPZZ(sve2_sqshl_zpzz_h, int16_t, H1_2, do_sqshl_h)
+DO_ZPZZ(sve2_sqshl_zpzz_s, int32_t, H1_4, do_sqshl_s)
+DO_ZPZZ_D(sve2_sqshl_zpzz_d, int64_t, do_sqshl_d)
+
+#define do_uqshl_b(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
+#define do_uqshl_h(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
+#define do_uqshl_s(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, false, &discard); })
+#define do_uqshl_d(n, m) \
+ ({ uint32_t discard; do_uqrshl_d(n, m, false, &discard); })
+
+DO_ZPZZ(sve2_uqshl_zpzz_b, uint8_t, H1_2, do_uqshl_b)
+DO_ZPZZ(sve2_uqshl_zpzz_h, uint16_t, H1_2, do_uqshl_h)
+DO_ZPZZ(sve2_uqshl_zpzz_s, uint32_t, H1_4, do_uqshl_s)
+DO_ZPZZ_D(sve2_uqshl_zpzz_d, uint64_t, do_uqshl_d)
+
+#define do_sqrshl_b(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 8, true, &discard); })
+#define do_sqrshl_h(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 16, true, &discard); })
+#define do_sqrshl_s(n, m) \
+ ({ uint32_t discard; do_sqrshl_bhs(n, m, 32, true, &discard); })
+#define do_sqrshl_d(n, m) \
+ ({ uint32_t discard; do_sqrshl_d(n, m, true, &discard); })
+
+DO_ZPZZ(sve2_sqrshl_zpzz_b, int8_t, H1_2, do_sqrshl_b)
+DO_ZPZZ(sve2_sqrshl_zpzz_h, int16_t, H1_2, do_sqrshl_h)
+DO_ZPZZ(sve2_sqrshl_zpzz_s, int32_t, H1_4, do_sqrshl_s)
+DO_ZPZZ_D(sve2_sqrshl_zpzz_d, int64_t, do_sqrshl_d)
+
+#undef do_sqrshl_d
+
+#define do_uqrshl_b(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int8_t)m, 8, true, &discard); })
+#define do_uqrshl_h(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, (int16_t)m, 16, true, &discard); })
+#define do_uqrshl_s(n, m) \
+ ({ uint32_t discard; do_uqrshl_bhs(n, m, 32, true, &discard); })
+#define do_uqrshl_d(n, m) \
+ ({ uint32_t discard; do_uqrshl_d(n, m, true, &discard); })
+
+DO_ZPZZ(sve2_uqrshl_zpzz_b, uint8_t, H1_2, do_uqrshl_b)
+DO_ZPZZ(sve2_uqrshl_zpzz_h, uint16_t, H1_2, do_uqrshl_h)
+DO_ZPZZ(sve2_uqrshl_zpzz_s, uint32_t, H1_4, do_uqrshl_s)
+DO_ZPZZ_D(sve2_uqrshl_zpzz_d, uint64_t, do_uqrshl_d)
+
+#undef do_uqrshl_d
+
+#define DO_HADD_BHS(n, m) (((int64_t)n + m) >> 1)
+#define DO_HADD_D(n, m) ((n >> 1) + (m >> 1) + (n & m & 1))
+
+DO_ZPZZ(sve2_shadd_zpzz_b, int8_t, H1, DO_HADD_BHS)
+DO_ZPZZ(sve2_shadd_zpzz_h, int16_t, H1_2, DO_HADD_BHS)
+DO_ZPZZ(sve2_shadd_zpzz_s, int32_t, H1_4, DO_HADD_BHS)
+DO_ZPZZ_D(sve2_shadd_zpzz_d, int64_t, DO_HADD_D)
+
+DO_ZPZZ(sve2_uhadd_zpzz_b, uint8_t, H1, DO_HADD_BHS)
+DO_ZPZZ(sve2_uhadd_zpzz_h, uint16_t, H1_2, DO_HADD_BHS)
+DO_ZPZZ(sve2_uhadd_zpzz_s, uint32_t, H1_4, DO_HADD_BHS)
+DO_ZPZZ_D(sve2_uhadd_zpzz_d, uint64_t, DO_HADD_D)
+
+#define DO_RHADD_BHS(n, m) (((int64_t)n + m + 1) >> 1)
+#define DO_RHADD_D(n, m) ((n >> 1) + (m >> 1) + ((n | m) & 1))
+
+DO_ZPZZ(sve2_srhadd_zpzz_b, int8_t, H1, DO_RHADD_BHS)
+DO_ZPZZ(sve2_srhadd_zpzz_h, int16_t, H1_2, DO_RHADD_BHS)
+DO_ZPZZ(sve2_srhadd_zpzz_s, int32_t, H1_4, DO_RHADD_BHS)
+DO_ZPZZ_D(sve2_srhadd_zpzz_d, int64_t, DO_RHADD_D)
+
+DO_ZPZZ(sve2_urhadd_zpzz_b, uint8_t, H1, DO_RHADD_BHS)
+DO_ZPZZ(sve2_urhadd_zpzz_h, uint16_t, H1_2, DO_RHADD_BHS)
+DO_ZPZZ(sve2_urhadd_zpzz_s, uint32_t, H1_4, DO_RHADD_BHS)
+DO_ZPZZ_D(sve2_urhadd_zpzz_d, uint64_t, DO_RHADD_D)
+
+#define DO_HSUB_BHS(n, m) (((int64_t)n - m) >> 1)
+#define DO_HSUB_D(n, m) ((n >> 1) - (m >> 1) - (~n & m & 1))
+
+DO_ZPZZ(sve2_shsub_zpzz_b, int8_t, H1, DO_HSUB_BHS)
+DO_ZPZZ(sve2_shsub_zpzz_h, int16_t, H1_2, DO_HSUB_BHS)
+DO_ZPZZ(sve2_shsub_zpzz_s, int32_t, H1_4, DO_HSUB_BHS)
+DO_ZPZZ_D(sve2_shsub_zpzz_d, int64_t, DO_HSUB_D)
+
+DO_ZPZZ(sve2_uhsub_zpzz_b, uint8_t, H1, DO_HSUB_BHS)
+DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
+DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
+DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
+
+static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max)
+{
+ return val >= max ? max : val <= min ? min : val;
+}
+
+#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX)
+#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX)
+#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_sqadd_d(int64_t n, int64_t m)
+{
+ int64_t r = n + m;
+ if (((r ^ n) & ~(n ^ m)) < 0) {
+ /* Signed overflow. */
+ return r < 0 ? INT64_MAX : INT64_MIN;
+ }
+ return r;
+}
+
+DO_ZPZZ(sve2_sqadd_zpzz_b, int8_t, H1, DO_SQADD_B)
+DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H)
+DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S)
+DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d)
+
+#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX)
+#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX)
+#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX)
+
+static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m)
+{
+ uint64_t r = n + m;
+ return r < n ? UINT64_MAX : r;
+}
+
+DO_ZPZZ(sve2_uqadd_zpzz_b, uint8_t, H1, DO_UQADD_B)
+DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H)
+DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S)
+DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d)
+
+#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX)
+#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX)
+#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_sqsub_d(int64_t n, int64_t m)
+{
+ int64_t r = n - m;
+ if (((r ^ n) & (n ^ m)) < 0) {
+ /* Signed overflow. */
+ return r < 0 ? INT64_MAX : INT64_MIN;
+ }
+ return r;
+}
+
+DO_ZPZZ(sve2_sqsub_zpzz_b, int8_t, H1, DO_SQSUB_B)
+DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H)
+DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S)
+DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d)
+
+#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX)
+#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX)
+#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX)
+
+static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m)
+{
+ return n > m ? n - m : 0;
+}
+
+DO_ZPZZ(sve2_uqsub_zpzz_b, uint8_t, H1, DO_UQSUB_B)
+DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H)
+DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S)
+DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d)
+
+#define DO_SUQADD_B(n, m) \
+ do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX)
+#define DO_SUQADD_H(n, m) \
+ do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX)
+#define DO_SUQADD_S(n, m) \
+ do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX)
+
+static inline int64_t do_suqadd_d(int64_t n, uint64_t m)
+{
+ uint64_t r = n + m;
+
+ if (n < 0) {
+ /* Note that m - abs(n) cannot underflow. */
+ if (r > INT64_MAX) {
+ /* Result is either very large positive or negative. */
+ if (m > -n) {
+ /* m > abs(n), so r is a very large positive. */
+ return INT64_MAX;
+ }
+ /* Result is negative. */
+ }
+ } else {
+ /* Both inputs are positive: check for overflow. */
+ if (r < m || r > INT64_MAX) {
+ return INT64_MAX;
+ }
+ }
+ return r;
+}
+
+DO_ZPZZ(sve2_suqadd_zpzz_b, uint8_t, H1, DO_SUQADD_B)
+DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H)
+DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S)
+DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d)
+
+#define DO_USQADD_B(n, m) \
+ do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX)
+#define DO_USQADD_H(n, m) \
+ do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX)
+#define DO_USQADD_S(n, m) \
+ do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX)
+
+static inline uint64_t do_usqadd_d(uint64_t n, int64_t m)
+{
+ uint64_t r = n + m;
+
+ if (m < 0) {
+ return n < -m ? 0 : r;
+ }
+ return r < n ? UINT64_MAX : r;
+}
+
+DO_ZPZZ(sve2_usqadd_zpzz_b, uint8_t, H1, DO_USQADD_B)
+DO_ZPZZ(sve2_usqadd_zpzz_h, uint16_t, H1_2, DO_USQADD_H)
+DO_ZPZZ(sve2_usqadd_zpzz_s, uint32_t, H1_4, DO_USQADD_S)
+DO_ZPZZ_D(sve2_usqadd_zpzz_d, uint64_t, do_usqadd_d)
+
+#undef DO_ZPZZ
+#undef DO_ZPZZ_D
+
+/*
+ * Three operand expander, operating on element pairs.
+ * If the slot I is even, the elements from from VN {I, I+1}.
+ * If the slot I is odd, the elements from from VM {I-1, I}.
+ * Load all of the input elements in each pair before overwriting output.
+ */
+#define DO_ZPZZ_PAIR(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(n0, n1); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(m0, m1); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZZ_PAIR_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 2) { \
+ TYPE n0 = n[i], n1 = n[i + 1]; \
+ TYPE m0 = m[i], m1 = m[i + 1]; \
+ if (pg[H1(i)] & 1) { \
+ d[i] = OP(n0, n1); \
+ } \
+ if (pg[H1(i + 1)] & 1) { \
+ d[i + 1] = OP(m0, m1); \
+ } \
+ } \
+}
+
+DO_ZPZZ_PAIR(sve2_addp_zpzz_b, uint8_t, H1, DO_ADD)
+DO_ZPZZ_PAIR(sve2_addp_zpzz_h, uint16_t, H1_2, DO_ADD)
+DO_ZPZZ_PAIR(sve2_addp_zpzz_s, uint32_t, H1_4, DO_ADD)
+DO_ZPZZ_PAIR_D(sve2_addp_zpzz_d, uint64_t, DO_ADD)
+
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_b, uint8_t, H1, DO_MAX)
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_h, uint16_t, H1_2, DO_MAX)
+DO_ZPZZ_PAIR(sve2_umaxp_zpzz_s, uint32_t, H1_4, DO_MAX)
+DO_ZPZZ_PAIR_D(sve2_umaxp_zpzz_d, uint64_t, DO_MAX)
+
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_b, uint8_t, H1, DO_MIN)
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_h, uint16_t, H1_2, DO_MIN)
+DO_ZPZZ_PAIR(sve2_uminp_zpzz_s, uint32_t, H1_4, DO_MIN)
+DO_ZPZZ_PAIR_D(sve2_uminp_zpzz_d, uint64_t, DO_MIN)
+
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_b, int8_t, H1, DO_MAX)
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_h, int16_t, H1_2, DO_MAX)
+DO_ZPZZ_PAIR(sve2_smaxp_zpzz_s, int32_t, H1_4, DO_MAX)
+DO_ZPZZ_PAIR_D(sve2_smaxp_zpzz_d, int64_t, DO_MAX)
+
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_b, int8_t, H1, DO_MIN)
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_h, int16_t, H1_2, DO_MIN)
+DO_ZPZZ_PAIR(sve2_sminp_zpzz_s, int32_t, H1_4, DO_MIN)
+DO_ZPZZ_PAIR_D(sve2_sminp_zpzz_d, int64_t, DO_MIN)
+
+#undef DO_ZPZZ_PAIR
+#undef DO_ZPZZ_PAIR_D
+
+#define DO_ZPZZ_PAIR_FP(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ TYPE n0 = *(TYPE *)(vn + H(i)); \
+ TYPE m0 = *(TYPE *)(vm + H(i)); \
+ TYPE n1 = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE m1 = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(n0, n1, status); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ if (pg & 1) { \
+ *(TYPE *)(vd + H(i)) = OP(m0, m1, status); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_h, float16, H1_2, float16_add)
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_s, float32, H1_4, float32_add)
+DO_ZPZZ_PAIR_FP(sve2_faddp_zpzz_d, float64, H1_8, float64_add)
+
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_h, float16, H1_2, float16_maxnum)
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_s, float32, H1_4, float32_maxnum)
+DO_ZPZZ_PAIR_FP(sve2_fmaxnmp_zpzz_d, float64, H1_8, float64_maxnum)
+
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_h, float16, H1_2, float16_minnum)
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_s, float32, H1_4, float32_minnum)
+DO_ZPZZ_PAIR_FP(sve2_fminnmp_zpzz_d, float64, H1_8, float64_minnum)
+
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_h, float16, H1_2, float16_max)
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_s, float32, H1_4, float32_max)
+DO_ZPZZ_PAIR_FP(sve2_fmaxp_zpzz_d, float64, H1_8, float64_max)
+
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_h, float16, H1_2, float16_min)
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_s, float32, H1_4, float32_min)
+DO_ZPZZ_PAIR_FP(sve2_fminp_zpzz_d, float64, H1_8, float64_min)
+
+#undef DO_ZPZZ_PAIR_FP
+
+/* Three-operand expander, controlled by a predicate, in which the
+ * third operand is "wide". That is, for D = N op M, the same 64-bit
+ * value of M is used with all of the narrower values of N.
+ */
+#define DO_ZPZW(NAME, TYPE, TYPEW, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint8_t pg = *(uint8_t *)(vg + H1(i >> 3)); \
+ TYPEW mm = *(TYPEW *)(vm + i); \
+ do { \
+ if (pg & 1) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, mm); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 7); \
+ } \
+}
+
+DO_ZPZW(sve_asr_zpzw_b, int8_t, uint64_t, H1, DO_ASR)
+DO_ZPZW(sve_lsr_zpzw_b, uint8_t, uint64_t, H1, DO_LSR)
+DO_ZPZW(sve_lsl_zpzw_b, uint8_t, uint64_t, H1, DO_LSL)
+
+DO_ZPZW(sve_asr_zpzw_h, int16_t, uint64_t, H1_2, DO_ASR)
+DO_ZPZW(sve_lsr_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSR)
+DO_ZPZW(sve_lsl_zpzw_h, uint16_t, uint64_t, H1_2, DO_LSL)
+
+DO_ZPZW(sve_asr_zpzw_s, int32_t, uint64_t, H1_4, DO_ASR)
+DO_ZPZW(sve_lsr_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSR)
+DO_ZPZW(sve_lsl_zpzw_s, uint32_t, uint64_t, H1_4, DO_LSL)
+
+#undef DO_ZPZW
+
+/* Fully general two-operand expander, controlled by a predicate.
+ */
+#define DO_ZPZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZ_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *n = vn; \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 1) { \
+ if (pg[H1(i)] & 1) { \
+ TYPE nn = n[i]; \
+ d[i] = OP(nn); \
+ } \
+ } \
+}
+
+#define DO_CLS_B(N) (clrsb32(N) - 24)
+#define DO_CLS_H(N) (clrsb32(N) - 16)
+
+DO_ZPZ(sve_cls_b, int8_t, H1, DO_CLS_B)
+DO_ZPZ(sve_cls_h, int16_t, H1_2, DO_CLS_H)
+DO_ZPZ(sve_cls_s, int32_t, H1_4, clrsb32)
+DO_ZPZ_D(sve_cls_d, int64_t, clrsb64)
+
+#define DO_CLZ_B(N) (clz32(N) - 24)
+#define DO_CLZ_H(N) (clz32(N) - 16)
+
+DO_ZPZ(sve_clz_b, uint8_t, H1, DO_CLZ_B)
+DO_ZPZ(sve_clz_h, uint16_t, H1_2, DO_CLZ_H)
+DO_ZPZ(sve_clz_s, uint32_t, H1_4, clz32)
+DO_ZPZ_D(sve_clz_d, uint64_t, clz64)
+
+DO_ZPZ(sve_cnt_zpz_b, uint8_t, H1, ctpop8)
+DO_ZPZ(sve_cnt_zpz_h, uint16_t, H1_2, ctpop16)
+DO_ZPZ(sve_cnt_zpz_s, uint32_t, H1_4, ctpop32)
+DO_ZPZ_D(sve_cnt_zpz_d, uint64_t, ctpop64)
+
+#define DO_CNOT(N) (N == 0)
+
+DO_ZPZ(sve_cnot_b, uint8_t, H1, DO_CNOT)
+DO_ZPZ(sve_cnot_h, uint16_t, H1_2, DO_CNOT)
+DO_ZPZ(sve_cnot_s, uint32_t, H1_4, DO_CNOT)
+DO_ZPZ_D(sve_cnot_d, uint64_t, DO_CNOT)
+
+#define DO_FABS(N) (N & ((__typeof(N))-1 >> 1))
+
+DO_ZPZ(sve_fabs_h, uint16_t, H1_2, DO_FABS)
+DO_ZPZ(sve_fabs_s, uint32_t, H1_4, DO_FABS)
+DO_ZPZ_D(sve_fabs_d, uint64_t, DO_FABS)
+
+#define DO_FNEG(N) (N ^ ~((__typeof(N))-1 >> 1))
+
+DO_ZPZ(sve_fneg_h, uint16_t, H1_2, DO_FNEG)
+DO_ZPZ(sve_fneg_s, uint32_t, H1_4, DO_FNEG)
+DO_ZPZ_D(sve_fneg_d, uint64_t, DO_FNEG)
+
+#define DO_NOT(N) (~N)
+
+DO_ZPZ(sve_not_zpz_b, uint8_t, H1, DO_NOT)
+DO_ZPZ(sve_not_zpz_h, uint16_t, H1_2, DO_NOT)
+DO_ZPZ(sve_not_zpz_s, uint32_t, H1_4, DO_NOT)
+DO_ZPZ_D(sve_not_zpz_d, uint64_t, DO_NOT)
+
+#define DO_SXTB(N) ((int8_t)N)
+#define DO_SXTH(N) ((int16_t)N)
+#define DO_SXTS(N) ((int32_t)N)
+#define DO_UXTB(N) ((uint8_t)N)
+#define DO_UXTH(N) ((uint16_t)N)
+#define DO_UXTS(N) ((uint32_t)N)
+
+DO_ZPZ(sve_sxtb_h, uint16_t, H1_2, DO_SXTB)
+DO_ZPZ(sve_sxtb_s, uint32_t, H1_4, DO_SXTB)
+DO_ZPZ(sve_sxth_s, uint32_t, H1_4, DO_SXTH)
+DO_ZPZ_D(sve_sxtb_d, uint64_t, DO_SXTB)
+DO_ZPZ_D(sve_sxth_d, uint64_t, DO_SXTH)
+DO_ZPZ_D(sve_sxtw_d, uint64_t, DO_SXTS)
+
+DO_ZPZ(sve_uxtb_h, uint16_t, H1_2, DO_UXTB)
+DO_ZPZ(sve_uxtb_s, uint32_t, H1_4, DO_UXTB)
+DO_ZPZ(sve_uxth_s, uint32_t, H1_4, DO_UXTH)
+DO_ZPZ_D(sve_uxtb_d, uint64_t, DO_UXTB)
+DO_ZPZ_D(sve_uxth_d, uint64_t, DO_UXTH)
+DO_ZPZ_D(sve_uxtw_d, uint64_t, DO_UXTS)
+
+#define DO_ABS(N) (N < 0 ? -N : N)
+
+DO_ZPZ(sve_abs_b, int8_t, H1, DO_ABS)
+DO_ZPZ(sve_abs_h, int16_t, H1_2, DO_ABS)
+DO_ZPZ(sve_abs_s, int32_t, H1_4, DO_ABS)
+DO_ZPZ_D(sve_abs_d, int64_t, DO_ABS)
+
+#define DO_NEG(N) (-N)
+
+DO_ZPZ(sve_neg_b, uint8_t, H1, DO_NEG)
+DO_ZPZ(sve_neg_h, uint16_t, H1_2, DO_NEG)
+DO_ZPZ(sve_neg_s, uint32_t, H1_4, DO_NEG)
+DO_ZPZ_D(sve_neg_d, uint64_t, DO_NEG)
+
+DO_ZPZ(sve_revb_h, uint16_t, H1_2, bswap16)
+DO_ZPZ(sve_revb_s, uint32_t, H1_4, bswap32)
+DO_ZPZ_D(sve_revb_d, uint64_t, bswap64)
+
+DO_ZPZ(sve_revh_s, uint32_t, H1_4, hswap32)
+DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
+
+DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
+
+DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
+DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
+DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
+DO_ZPZ_D(sve_rbit_d, uint64_t, revbit64)
+
+#define DO_SQABS(X) \
+ ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
+ x_ >= 0 ? x_ : x_ == min_ ? -min_ - 1 : -x_; })
+
+DO_ZPZ(sve2_sqabs_b, int8_t, H1, DO_SQABS)
+DO_ZPZ(sve2_sqabs_h, int16_t, H1_2, DO_SQABS)
+DO_ZPZ(sve2_sqabs_s, int32_t, H1_4, DO_SQABS)
+DO_ZPZ_D(sve2_sqabs_d, int64_t, DO_SQABS)
+
+#define DO_SQNEG(X) \
+ ({ __typeof(X) x_ = (X), min_ = 1ull << (sizeof(X) * 8 - 1); \
+ x_ == min_ ? -min_ - 1 : -x_; })
+
+DO_ZPZ(sve2_sqneg_b, uint8_t, H1, DO_SQNEG)
+DO_ZPZ(sve2_sqneg_h, uint16_t, H1_2, DO_SQNEG)
+DO_ZPZ(sve2_sqneg_s, uint32_t, H1_4, DO_SQNEG)
+DO_ZPZ_D(sve2_sqneg_d, uint64_t, DO_SQNEG)
+
+DO_ZPZ(sve2_urecpe_s, uint32_t, H1_4, helper_recpe_u32)
+DO_ZPZ(sve2_ursqrte_s, uint32_t, H1_4, helper_rsqrte_u32)
+
+/* Three-operand expander, unpredicated, in which the third operand is "wide".
+ */
+#define DO_ZZW(NAME, TYPE, TYPEW, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ TYPEW mm = *(TYPEW *)(vm + i); \
+ do { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, mm); \
+ i += sizeof(TYPE); \
+ } while (i & 7); \
+ } \
+}
+
+DO_ZZW(sve_asr_zzw_b, int8_t, uint64_t, H1, DO_ASR)
+DO_ZZW(sve_lsr_zzw_b, uint8_t, uint64_t, H1, DO_LSR)
+DO_ZZW(sve_lsl_zzw_b, uint8_t, uint64_t, H1, DO_LSL)
+
+DO_ZZW(sve_asr_zzw_h, int16_t, uint64_t, H1_2, DO_ASR)
+DO_ZZW(sve_lsr_zzw_h, uint16_t, uint64_t, H1_2, DO_LSR)
+DO_ZZW(sve_lsl_zzw_h, uint16_t, uint64_t, H1_2, DO_LSL)
+
+DO_ZZW(sve_asr_zzw_s, int32_t, uint64_t, H1_4, DO_ASR)
+DO_ZZW(sve_lsr_zzw_s, uint32_t, uint64_t, H1_4, DO_LSR)
+DO_ZZW(sve_lsl_zzw_s, uint32_t, uint64_t, H1_4, DO_LSL)
+
+#undef DO_ZZW
+
+#undef DO_CLS_B
+#undef DO_CLS_H
+#undef DO_CLZ_B
+#undef DO_CLZ_H
+#undef DO_CNOT
+#undef DO_FABS
+#undef DO_FNEG
+#undef DO_ABS
+#undef DO_NEG
+#undef DO_ZPZ
+#undef DO_ZPZ_D
+
+/*
+ * Three-operand expander, unpredicated, in which the two inputs are
+ * selected from the top or bottom half of the wide column.
+ */
+#define DO_ZZZ_TB(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
+ } \
+}
+
+DO_ZZZ_TB(sve2_saddl_h, int16_t, int8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_TB(sve2_saddl_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_TB(sve2_saddl_d, int64_t, int32_t, H1_8, H1_4, DO_ADD)
+
+DO_ZZZ_TB(sve2_ssubl_h, int16_t, int8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_TB(sve2_ssubl_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_TB(sve2_ssubl_d, int64_t, int32_t, H1_8, H1_4, DO_SUB)
+
+DO_ZZZ_TB(sve2_sabdl_h, int16_t, int8_t, H1_2, H1, DO_ABD)
+DO_ZZZ_TB(sve2_sabdl_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZ_TB(sve2_sabdl_d, int64_t, int32_t, H1_8, H1_4, DO_ABD)
+
+DO_ZZZ_TB(sve2_uaddl_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_TB(sve2_uaddl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_TB(sve2_uaddl_d, uint64_t, uint32_t, H1_8, H1_4, DO_ADD)
+
+DO_ZZZ_TB(sve2_usubl_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_TB(sve2_usubl_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_TB(sve2_usubl_d, uint64_t, uint32_t, H1_8, H1_4, DO_SUB)
+
+DO_ZZZ_TB(sve2_uabdl_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
+DO_ZZZ_TB(sve2_uabdl_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZ_TB(sve2_uabdl_d, uint64_t, uint32_t, H1_8, H1_4, DO_ABD)
+
+DO_ZZZ_TB(sve2_smull_zzz_h, int16_t, int8_t, H1_2, H1, DO_MUL)
+DO_ZZZ_TB(sve2_smull_zzz_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZ_TB(sve2_smull_zzz_d, int64_t, int32_t, H1_8, H1_4, DO_MUL)
+
+DO_ZZZ_TB(sve2_umull_zzz_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
+DO_ZZZ_TB(sve2_umull_zzz_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZ_TB(sve2_umull_zzz_d, uint64_t, uint32_t, H1_8, H1_4, DO_MUL)
+
+/* Note that the multiply cannot overflow, but the doubling can. */
+static inline int16_t do_sqdmull_h(int16_t n, int16_t m)
+{
+ int16_t val = n * m;
+ return DO_SQADD_H(val, val);
+}
+
+static inline int32_t do_sqdmull_s(int32_t n, int32_t m)
+{
+ int32_t val = n * m;
+ return DO_SQADD_S(val, val);
+}
+
+static inline int64_t do_sqdmull_d(int64_t n, int64_t m)
+{
+ int64_t val = n * m;
+ return do_sqadd_d(val, val);
+}
+
+DO_ZZZ_TB(sve2_sqdmull_zzz_h, int16_t, int8_t, H1_2, H1, do_sqdmull_h)
+DO_ZZZ_TB(sve2_sqdmull_zzz_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
+DO_ZZZ_TB(sve2_sqdmull_zzz_d, int64_t, int32_t, H1_8, H1_4, do_sqdmull_d)
+
+#undef DO_ZZZ_TB
+
+#define DO_ZZZ_WTB(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm); \
+ } \
+}
+
+DO_ZZZ_WTB(sve2_saddw_h, int16_t, int8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_WTB(sve2_saddw_s, int32_t, int16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_WTB(sve2_saddw_d, int64_t, int32_t, H1_8, H1_4, DO_ADD)
+
+DO_ZZZ_WTB(sve2_ssubw_h, int16_t, int8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_WTB(sve2_ssubw_s, int32_t, int16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_WTB(sve2_ssubw_d, int64_t, int32_t, H1_8, H1_4, DO_SUB)
+
+DO_ZZZ_WTB(sve2_uaddw_h, uint16_t, uint8_t, H1_2, H1, DO_ADD)
+DO_ZZZ_WTB(sve2_uaddw_s, uint32_t, uint16_t, H1_4, H1_2, DO_ADD)
+DO_ZZZ_WTB(sve2_uaddw_d, uint64_t, uint32_t, H1_8, H1_4, DO_ADD)
+
+DO_ZZZ_WTB(sve2_usubw_h, uint16_t, uint8_t, H1_2, H1, DO_SUB)
+DO_ZZZ_WTB(sve2_usubw_s, uint32_t, uint16_t, H1_4, H1_2, DO_SUB)
+DO_ZZZ_WTB(sve2_usubw_d, uint64_t, uint32_t, H1_8, H1_4, DO_SUB)
+
+#undef DO_ZZZ_WTB
+
+#define DO_ZZZ_NTB(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPE); \
+ intptr_t sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPE); \
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + H(i + sel1)); \
+ TYPE mm = *(TYPE *)(vm + H(i + sel2)); \
+ *(TYPE *)(vd + H(i + sel1)) = OP(nn, mm); \
+ } \
+}
+
+DO_ZZZ_NTB(sve2_eoril_b, uint8_t, H1, DO_EOR)
+DO_ZZZ_NTB(sve2_eoril_h, uint16_t, H1_2, DO_EOR)
+DO_ZZZ_NTB(sve2_eoril_s, uint32_t, H1_4, DO_EOR)
+DO_ZZZ_NTB(sve2_eoril_d, uint64_t, H1_8, DO_EOR)
+
+#undef DO_ZZZ_NTB
+
+#define DO_ZZZW_ACC(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel1 = simd_data(desc) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel1)); \
+ TYPEW aa = *(TYPEW *)(va + HW(i)); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, mm) + aa; \
+ } \
+}
+
+DO_ZZZW_ACC(sve2_sabal_h, int16_t, int8_t, H1_2, H1, DO_ABD)
+DO_ZZZW_ACC(sve2_sabal_s, int32_t, int16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZW_ACC(sve2_sabal_d, int64_t, int32_t, H1_8, H1_4, DO_ABD)
+
+DO_ZZZW_ACC(sve2_uabal_h, uint16_t, uint8_t, H1_2, H1, DO_ABD)
+DO_ZZZW_ACC(sve2_uabal_s, uint32_t, uint16_t, H1_4, H1_2, DO_ABD)
+DO_ZZZW_ACC(sve2_uabal_d, uint64_t, uint32_t, H1_8, H1_4, DO_ABD)
+
+DO_ZZZW_ACC(sve2_smlal_zzzw_h, int16_t, int8_t, H1_2, H1, DO_MUL)
+DO_ZZZW_ACC(sve2_smlal_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZW_ACC(sve2_smlal_zzzw_d, int64_t, int32_t, H1_8, H1_4, DO_MUL)
+
+DO_ZZZW_ACC(sve2_umlal_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_MUL)
+DO_ZZZW_ACC(sve2_umlal_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZZW_ACC(sve2_umlal_zzzw_d, uint64_t, uint32_t, H1_8, H1_4, DO_MUL)
+
+#define DO_NMUL(N, M) -(N * M)
+
+DO_ZZZW_ACC(sve2_smlsl_zzzw_h, int16_t, int8_t, H1_2, H1, DO_NMUL)
+DO_ZZZW_ACC(sve2_smlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2, DO_NMUL)
+DO_ZZZW_ACC(sve2_smlsl_zzzw_d, int64_t, int32_t, H1_8, H1_4, DO_NMUL)
+
+DO_ZZZW_ACC(sve2_umlsl_zzzw_h, uint16_t, uint8_t, H1_2, H1, DO_NMUL)
+DO_ZZZW_ACC(sve2_umlsl_zzzw_s, uint32_t, uint16_t, H1_4, H1_2, DO_NMUL)
+DO_ZZZW_ACC(sve2_umlsl_zzzw_d, uint64_t, uint32_t, H1_8, H1_4, DO_NMUL)
+
+#undef DO_ZZZW_ACC
+
+#define DO_XTNB(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ nn = OP(nn) & MAKE_64BIT_MASK(0, sizeof(TYPE) * 4); \
+ *(TYPE *)(vd + i) = nn; \
+ } \
+}
+
+#define DO_XTNT(NAME, TYPE, TYPEN, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc), odd = H(sizeof(TYPEN)); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ *(TYPEN *)(vd + i + odd) = OP(nn); \
+ } \
+}
+
+#define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX)
+#define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX)
+#define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX)
+
+DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H)
+DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S)
+DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D)
+
+DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H)
+DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S)
+DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D)
+
+#define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX)
+#define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX)
+#define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX)
+
+DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H)
+DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S)
+DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D)
+
+DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H)
+DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S)
+DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D)
+
+DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H)
+DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S)
+DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D)
+
+DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H)
+DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S)
+DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D)
+
+#undef DO_XTNB
+#undef DO_XTNT
+
+void HELPER(sve2_adcl_s)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int sel = H4(extract32(desc, SIMD_DATA_SHIFT, 1));
+ uint32_t inv = -extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t *a = va, *n = vn;
+ uint64_t *d = vd, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ uint32_t e1 = a[2 * i + H4(0)];
+ uint32_t e2 = n[2 * i + sel] ^ inv;
+ uint64_t c = extract64(m[i], 32, 1);
+ /* Compute and store the entire 33-bit result at once. */
+ d[i] = c + e1 + e2;
+ }
+}
+
+void HELPER(sve2_adcl_d)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int sel = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t inv = -(uint64_t)extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint64_t *d = vd, *a = va, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; i += 2) {
+ Int128 e1 = int128_make64(a[i]);
+ Int128 e2 = int128_make64(n[i + sel] ^ inv);
+ Int128 c = int128_make64(m[i + 1] & 1);
+ Int128 r = int128_add(int128_add(e1, e2), c);
+ d[i + 0] = int128_getlo(r);
+ d[i + 1] = int128_gethi(r);
+ }
+}
+
+#define DO_SQDMLAL(NAME, TYPEW, TYPEN, HW, HN, DMUL_OP, SUM_OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sel1 = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ int sel2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(TYPEN); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel1)); \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + sel2)); \
+ TYPEW aa = *(TYPEW *)(va + HW(i)); \
+ *(TYPEW *)(vd + HW(i)) = SUM_OP(aa, DMUL_OP(nn, mm)); \
+ } \
+}
+
+DO_SQDMLAL(sve2_sqdmlal_zzzw_h, int16_t, int8_t, H1_2, H1,
+ do_sqdmull_h, DO_SQADD_H)
+DO_SQDMLAL(sve2_sqdmlal_zzzw_s, int32_t, int16_t, H1_4, H1_2,
+ do_sqdmull_s, DO_SQADD_S)
+DO_SQDMLAL(sve2_sqdmlal_zzzw_d, int64_t, int32_t, H1_8, H1_4,
+ do_sqdmull_d, do_sqadd_d)
+
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_h, int16_t, int8_t, H1_2, H1,
+ do_sqdmull_h, DO_SQSUB_H)
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_s, int32_t, int16_t, H1_4, H1_2,
+ do_sqdmull_s, DO_SQSUB_S)
+DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, H1_8, H1_4,
+ do_sqdmull_d, do_sqsub_d)
+
+#undef DO_SQDMLAL
+
+#define DO_CMLA_FUNC(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
+ int rot = simd_data(desc); \
+ int sel_a = rot & 1, sel_b = sel_a ^ 1; \
+ bool sub_r = rot == 1 || rot == 2; \
+ bool sub_i = rot >= 2; \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ for (i = 0; i < opr_sz; i += 2) { \
+ TYPE elt1_a = n[H(i + sel_a)]; \
+ TYPE elt2_a = m[H(i + sel_a)]; \
+ TYPE elt2_b = m[H(i + sel_b)]; \
+ d[H(i)] = OP(elt1_a, elt2_a, a[H(i)], sub_r); \
+ d[H(i + 1)] = OP(elt1_a, elt2_b, a[H(i + 1)], sub_i); \
+ } \
+}
+
+#define DO_CMLA(N, M, A, S) (A + (N * M) * (S ? -1 : 1))
+
+DO_CMLA_FUNC(sve2_cmla_zzzz_b, uint8_t, H1, DO_CMLA)
+DO_CMLA_FUNC(sve2_cmla_zzzz_h, uint16_t, H2, DO_CMLA)
+DO_CMLA_FUNC(sve2_cmla_zzzz_s, uint32_t, H4, DO_CMLA)
+DO_CMLA_FUNC(sve2_cmla_zzzz_d, uint64_t, H8, DO_CMLA)
+
+#define DO_SQRDMLAH_B(N, M, A, S) \
+ do_sqrdmlah_b(N, M, A, S, true)
+#define DO_SQRDMLAH_H(N, M, A, S) \
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, S, true, &discard); })
+#define DO_SQRDMLAH_S(N, M, A, S) \
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, S, true, &discard); })
+#define DO_SQRDMLAH_D(N, M, A, S) \
+ do_sqrdmlah_d(N, M, A, S, true)
+
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_b, int8_t, H1, DO_SQRDMLAH_B)
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H)
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S)
+DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, H8, DO_SQRDMLAH_D)
+
+#define DO_CMLA_IDX_FUNC(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2); \
+ int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2) * 2; \
+ int sel_a = rot & 1, sel_b = sel_a ^ 1; \
+ bool sub_r = rot == 1 || rot == 2; \
+ bool sub_i = rot >= 2; \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += 16 / sizeof(TYPE)) { \
+ TYPE elt2_a = m[H(i + idx + sel_a)]; \
+ TYPE elt2_b = m[H(i + idx + sel_b)]; \
+ for (j = 0; j < 16 / sizeof(TYPE); j += 2) { \
+ TYPE elt1_a = n[H(i + j + sel_a)]; \
+ d[H2(i + j)] = OP(elt1_a, elt2_a, a[H(i + j)], sub_r); \
+ d[H2(i + j + 1)] = OP(elt1_a, elt2_b, a[H(i + j + 1)], sub_i); \
+ } \
+ } \
+}
+
+DO_CMLA_IDX_FUNC(sve2_cmla_idx_h, int16_t, H2, DO_CMLA)
+DO_CMLA_IDX_FUNC(sve2_cmla_idx_s, int32_t, H4, DO_CMLA)
+
+DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
+DO_CMLA_IDX_FUNC(sve2_sqrdcmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
+
+#undef DO_CMLA
+#undef DO_CMLA_FUNC
+#undef DO_CMLA_IDX_FUNC
+#undef DO_SQRDMLAH_B
+#undef DO_SQRDMLAH_H
+#undef DO_SQRDMLAH_S
+#undef DO_SQRDMLAH_D
+
+/* Note N and M are 4 elements bundled into one unit. */
+static int32_t do_cdot_s(uint32_t n, uint32_t m, int32_t a,
+ int sel_a, int sel_b, int sub_i)
+{
+ for (int i = 0; i <= 1; i++) {
+ int32_t elt1_r = (int8_t)(n >> (16 * i));
+ int32_t elt1_i = (int8_t)(n >> (16 * i + 8));
+ int32_t elt2_a = (int8_t)(m >> (16 * i + 8 * sel_a));
+ int32_t elt2_b = (int8_t)(m >> (16 * i + 8 * sel_b));
+
+ a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
+ }
+ return a;
+}
+
+static int64_t do_cdot_d(uint64_t n, uint64_t m, int64_t a,
+ int sel_a, int sel_b, int sub_i)
+{
+ for (int i = 0; i <= 1; i++) {
+ int64_t elt1_r = (int16_t)(n >> (32 * i + 0));
+ int64_t elt1_i = (int16_t)(n >> (32 * i + 16));
+ int64_t elt2_a = (int16_t)(m >> (32 * i + 16 * sel_a));
+ int64_t elt2_b = (int16_t)(m >> (32 * i + 16 * sel_b));
+
+ a += elt1_r * elt2_a + elt1_i * elt2_b * sub_i;
+ }
+ return a;
+}
+
+void HELPER(sve2_cdot_zzzz_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int rot = simd_data(desc);
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint32_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (int e = 0; e < opr_sz / 4; e++) {
+ d[e] = do_cdot_s(n[e], m[e], a[e], sel_a, sel_b, sub_i);
+ }
+}
+
+void HELPER(sve2_cdot_zzzz_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int rot = simd_data(desc);
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint64_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (int e = 0; e < opr_sz / 8; e++) {
+ d[e] = do_cdot_d(n[e], m[e], a[e], sel_a, sel_b, sub_i);
+ }
+}
+
+void HELPER(sve2_cdot_idx_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
+ int idx = H4(extract32(desc, SIMD_DATA_SHIFT + 2, 2));
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint32_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (int seg = 0; seg < opr_sz / 4; seg += 4) {
+ uint32_t seg_m = m[seg + idx];
+ for (int e = 0; e < 4; e++) {
+ d[seg + e] = do_cdot_s(n[seg + e], seg_m, a[seg + e],
+ sel_a, sel_b, sub_i);
+ }
+ }
+}
+
+void HELPER(sve2_cdot_idx_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ int seg, opr_sz = simd_oprsz(desc);
+ int rot = extract32(desc, SIMD_DATA_SHIFT, 2);
+ int idx = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
+ int sel_a = rot & 1;
+ int sel_b = sel_a ^ 1;
+ int sub_i = (rot == 0 || rot == 3 ? -1 : 1);
+ uint64_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (seg = 0; seg < opr_sz / 8; seg += 2) {
+ uint64_t seg_m = m[seg + idx];
+ for (int e = 0; e < 2; e++) {
+ d[seg + e] = do_cdot_d(n[seg + e], seg_m, a[seg + e],
+ sel_a, sel_b, sub_i);
+ }
+ }
+}
+
+#define DO_ZZXZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
+ intptr_t i, j, idx = simd_data(desc); \
+ TYPE *d = vd, *a = va, *n = vn, *m = (TYPE *)vm + H(idx); \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[i]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = OP(n[i + j], mm, a[i + j]); \
+ } \
+ } \
+}
+
+#define DO_SQRDMLAH_H(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, false, true, &discard); })
+#define DO_SQRDMLAH_S(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, false, true, &discard); })
+#define DO_SQRDMLAH_D(N, M, A) do_sqrdmlah_d(N, M, A, false, true)
+
+DO_ZZXZ(sve2_sqrdmlah_idx_h, int16_t, H2, DO_SQRDMLAH_H)
+DO_ZZXZ(sve2_sqrdmlah_idx_s, int32_t, H4, DO_SQRDMLAH_S)
+DO_ZZXZ(sve2_sqrdmlah_idx_d, int64_t, H8, DO_SQRDMLAH_D)
+
+#define DO_SQRDMLSH_H(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_h(N, M, A, true, true, &discard); })
+#define DO_SQRDMLSH_S(N, M, A) \
+ ({ uint32_t discard; do_sqrdmlah_s(N, M, A, true, true, &discard); })
+#define DO_SQRDMLSH_D(N, M, A) do_sqrdmlah_d(N, M, A, true, true)
+
+DO_ZZXZ(sve2_sqrdmlsh_idx_h, int16_t, H2, DO_SQRDMLSH_H)
+DO_ZZXZ(sve2_sqrdmlsh_idx_s, int32_t, H4, DO_SQRDMLSH_S)
+DO_ZZXZ(sve2_sqrdmlsh_idx_d, int64_t, H8, DO_SQRDMLSH_D)
+
+#undef DO_ZZXZ
+
+#define DO_ZZXW(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
+ for (i = 0; i < oprsz; i += 16) { \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
+ TYPEW aa = *(TYPEW *)(va + HW(i + j)); \
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm, aa); \
+ } \
+ } \
+}
+
+#define DO_MLA(N, M, A) (A + N * M)
+
+DO_ZZXW(sve2_smlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLA)
+DO_ZZXW(sve2_smlal_idx_d, int64_t, int32_t, H1_8, H1_4, DO_MLA)
+DO_ZZXW(sve2_umlal_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLA)
+DO_ZZXW(sve2_umlal_idx_d, uint64_t, uint32_t, H1_8, H1_4, DO_MLA)
+
+#define DO_MLS(N, M, A) (A - N * M)
+
+DO_ZZXW(sve2_smlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MLS)
+DO_ZZXW(sve2_smlsl_idx_d, int64_t, int32_t, H1_8, H1_4, DO_MLS)
+DO_ZZXW(sve2_umlsl_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MLS)
+DO_ZZXW(sve2_umlsl_idx_d, uint64_t, uint32_t, H1_8, H1_4, DO_MLS)
+
+#define DO_SQDMLAL_S(N, M, A) DO_SQADD_S(A, do_sqdmull_s(N, M))
+#define DO_SQDMLAL_D(N, M, A) do_sqadd_d(A, do_sqdmull_d(N, M))
+
+DO_ZZXW(sve2_sqdmlal_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLAL_S)
+DO_ZZXW(sve2_sqdmlal_idx_d, int64_t, int32_t, H1_8, H1_4, DO_SQDMLAL_D)
+
+#define DO_SQDMLSL_S(N, M, A) DO_SQSUB_S(A, do_sqdmull_s(N, M))
+#define DO_SQDMLSL_D(N, M, A) do_sqsub_d(A, do_sqdmull_d(N, M))
+
+DO_ZZXW(sve2_sqdmlsl_idx_s, int32_t, int16_t, H1_4, H1_2, DO_SQDMLSL_S)
+DO_ZZXW(sve2_sqdmlsl_idx_d, int64_t, int32_t, H1_8, H1_4, DO_SQDMLSL_D)
+
+#undef DO_MLA
+#undef DO_MLS
+#undef DO_ZZXW
+
+#define DO_ZZX(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1) * sizeof(TYPEN); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 1, 3) * sizeof(TYPEN); \
+ for (i = 0; i < oprsz; i += 16) { \
+ TYPEW mm = *(TYPEN *)(vm + HN(i + idx)); \
+ for (j = 0; j < 16; j += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + j + sel)); \
+ *(TYPEW *)(vd + HW(i + j)) = OP(nn, mm); \
+ } \
+ } \
+}
+
+DO_ZZX(sve2_sqdmull_idx_s, int32_t, int16_t, H1_4, H1_2, do_sqdmull_s)
+DO_ZZX(sve2_sqdmull_idx_d, int64_t, int32_t, H1_8, H1_4, do_sqdmull_d)
+
+DO_ZZX(sve2_smull_idx_s, int32_t, int16_t, H1_4, H1_2, DO_MUL)
+DO_ZZX(sve2_smull_idx_d, int64_t, int32_t, H1_8, H1_4, DO_MUL)
+
+DO_ZZX(sve2_umull_idx_s, uint32_t, uint16_t, H1_4, H1_2, DO_MUL)
+DO_ZZX(sve2_umull_idx_d, uint64_t, uint32_t, H1_8, H1_4, DO_MUL)
+
+#undef DO_ZZX
+
+#define DO_BITPERM(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ TYPE mm = *(TYPE *)(vm + i); \
+ *(TYPE *)(vd + i) = OP(nn, mm, sizeof(TYPE) * 8); \
+ } \
+}
+
+static uint64_t bitextract(uint64_t data, uint64_t mask, int n)
+{
+ uint64_t res = 0;
+ int db, rb = 0;
+
+ for (db = 0; db < n; ++db) {
+ if ((mask >> db) & 1) {
+ res |= ((data >> db) & 1) << rb;
+ ++rb;
+ }
+ }
+ return res;
+}
+
+DO_BITPERM(sve2_bext_b, uint8_t, bitextract)
+DO_BITPERM(sve2_bext_h, uint16_t, bitextract)
+DO_BITPERM(sve2_bext_s, uint32_t, bitextract)
+DO_BITPERM(sve2_bext_d, uint64_t, bitextract)
+
+static uint64_t bitdeposit(uint64_t data, uint64_t mask, int n)
+{
+ uint64_t res = 0;
+ int rb, db = 0;
+
+ for (rb = 0; rb < n; ++rb) {
+ if ((mask >> rb) & 1) {
+ res |= ((data >> db) & 1) << rb;
+ ++db;
+ }
+ }
+ return res;
+}
+
+DO_BITPERM(sve2_bdep_b, uint8_t, bitdeposit)
+DO_BITPERM(sve2_bdep_h, uint16_t, bitdeposit)
+DO_BITPERM(sve2_bdep_s, uint32_t, bitdeposit)
+DO_BITPERM(sve2_bdep_d, uint64_t, bitdeposit)
+
+static uint64_t bitgroup(uint64_t data, uint64_t mask, int n)
+{
+ uint64_t resm = 0, resu = 0;
+ int db, rbm = 0, rbu = 0;
+
+ for (db = 0; db < n; ++db) {
+ uint64_t val = (data >> db) & 1;
+ if ((mask >> db) & 1) {
+ resm |= val << rbm++;
+ } else {
+ resu |= val << rbu++;
+ }
+ }
+
+ return resm | (resu << rbm);
+}
+
+DO_BITPERM(sve2_bgrp_b, uint8_t, bitgroup)
+DO_BITPERM(sve2_bgrp_h, uint16_t, bitgroup)
+DO_BITPERM(sve2_bgrp_s, uint32_t, bitgroup)
+DO_BITPERM(sve2_bgrp_d, uint64_t, bitgroup)
+
+#undef DO_BITPERM
+
+#define DO_CADD(NAME, TYPE, H, ADD_OP, SUB_OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int sub_r = simd_data(desc); \
+ if (sub_r) { \
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
+ TYPE acc_r = *(TYPE *)(vn + H(i)); \
+ TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE el2_r = *(TYPE *)(vm + H(i)); \
+ TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ acc_r = ADD_OP(acc_r, el2_i); \
+ acc_i = SUB_OP(acc_i, el2_r); \
+ *(TYPE *)(vd + H(i)) = acc_r; \
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
+ } \
+ } else { \
+ for (i = 0; i < opr_sz; i += 2 * sizeof(TYPE)) { \
+ TYPE acc_r = *(TYPE *)(vn + H(i)); \
+ TYPE acc_i = *(TYPE *)(vn + H(i + sizeof(TYPE))); \
+ TYPE el2_r = *(TYPE *)(vm + H(i)); \
+ TYPE el2_i = *(TYPE *)(vm + H(i + sizeof(TYPE))); \
+ acc_r = SUB_OP(acc_r, el2_i); \
+ acc_i = ADD_OP(acc_i, el2_r); \
+ *(TYPE *)(vd + H(i)) = acc_r; \
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = acc_i; \
+ } \
+ } \
+}
+
+DO_CADD(sve2_cadd_b, int8_t, H1, DO_ADD, DO_SUB)
+DO_CADD(sve2_cadd_h, int16_t, H1_2, DO_ADD, DO_SUB)
+DO_CADD(sve2_cadd_s, int32_t, H1_4, DO_ADD, DO_SUB)
+DO_CADD(sve2_cadd_d, int64_t, H1_8, DO_ADD, DO_SUB)
+
+DO_CADD(sve2_sqcadd_b, int8_t, H1, DO_SQADD_B, DO_SQSUB_B)
+DO_CADD(sve2_sqcadd_h, int16_t, H1_2, DO_SQADD_H, DO_SQSUB_H)
+DO_CADD(sve2_sqcadd_s, int32_t, H1_4, DO_SQADD_S, DO_SQSUB_S)
+DO_CADD(sve2_sqcadd_d, int64_t, H1_8, do_sqadd_d, do_sqsub_d)
+
+#undef DO_CADD
+
+#define DO_ZZI_SHLL(NAME, TYPEW, TYPEN, HW, HN) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ intptr_t sel = (simd_data(desc) & 1) * sizeof(TYPEN); \
+ int shift = simd_data(desc) >> 1; \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEN *)(vn + HN(i + sel)); \
+ *(TYPEW *)(vd + HW(i)) = nn << shift; \
+ } \
+}
+
+DO_ZZI_SHLL(sve2_sshll_h, int16_t, int8_t, H1_2, H1)
+DO_ZZI_SHLL(sve2_sshll_s, int32_t, int16_t, H1_4, H1_2)
+DO_ZZI_SHLL(sve2_sshll_d, int64_t, int32_t, H1_8, H1_4)
+
+DO_ZZI_SHLL(sve2_ushll_h, uint16_t, uint8_t, H1_2, H1)
+DO_ZZI_SHLL(sve2_ushll_s, uint32_t, uint16_t, H1_4, H1_2)
+DO_ZZI_SHLL(sve2_ushll_d, uint64_t, uint32_t, H1_8, H1_4)
+
+#undef DO_ZZI_SHLL
+
+/* Two-operand reduction expander, controlled by a predicate.
+ * The difference between TYPERED and TYPERET has to do with
+ * sign-extension. E.g. for SMAX, TYPERED must be signed,
+ * but TYPERET must be unsigned so that e.g. a 32-bit value
+ * is not sign-extended to the ABI uint64_t return type.
+ */
+/* ??? If we were to vectorize this by hand the reduction ordering
+ * would change. For integer operands, this is perfectly fine.
+ */
+#define DO_VPZ(NAME, TYPEELT, TYPERED, TYPERET, H, INIT, OP) \
+uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPERED ret = INIT; \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ TYPEELT nn = *(TYPEELT *)(vn + H(i)); \
+ ret = OP(ret, nn); \
+ } \
+ i += sizeof(TYPEELT), pg >>= sizeof(TYPEELT); \
+ } while (i & 15); \
+ } \
+ return (TYPERET)ret; \
+}
+
+#define DO_VPZ_D(NAME, TYPEE, TYPER, INIT, OP) \
+uint64_t HELPER(NAME)(void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPEE *n = vn; \
+ uint8_t *pg = vg; \
+ TYPER ret = INIT; \
+ for (i = 0; i < opr_sz; i += 1) { \
+ if (pg[H1(i)] & 1) { \
+ TYPEE nn = n[i]; \
+ ret = OP(ret, nn); \
+ } \
+ } \
+ return ret; \
+}
+
+DO_VPZ(sve_orv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_ORR)
+DO_VPZ(sve_orv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_ORR)
+DO_VPZ(sve_orv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_ORR)
+DO_VPZ_D(sve_orv_d, uint64_t, uint64_t, 0, DO_ORR)
+
+DO_VPZ(sve_eorv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_EOR)
+DO_VPZ(sve_eorv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_EOR)
+DO_VPZ(sve_eorv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_EOR)
+DO_VPZ_D(sve_eorv_d, uint64_t, uint64_t, 0, DO_EOR)
+
+DO_VPZ(sve_andv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_AND)
+DO_VPZ(sve_andv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_AND)
+DO_VPZ(sve_andv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_AND)
+DO_VPZ_D(sve_andv_d, uint64_t, uint64_t, -1, DO_AND)
+
+DO_VPZ(sve_saddv_b, int8_t, uint64_t, uint64_t, H1, 0, DO_ADD)
+DO_VPZ(sve_saddv_h, int16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD)
+DO_VPZ(sve_saddv_s, int32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD)
+
+DO_VPZ(sve_uaddv_b, uint8_t, uint64_t, uint64_t, H1, 0, DO_ADD)
+DO_VPZ(sve_uaddv_h, uint16_t, uint64_t, uint64_t, H1_2, 0, DO_ADD)
+DO_VPZ(sve_uaddv_s, uint32_t, uint64_t, uint64_t, H1_4, 0, DO_ADD)
+DO_VPZ_D(sve_uaddv_d, uint64_t, uint64_t, 0, DO_ADD)
+
+DO_VPZ(sve_smaxv_b, int8_t, int8_t, uint8_t, H1, INT8_MIN, DO_MAX)
+DO_VPZ(sve_smaxv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MIN, DO_MAX)
+DO_VPZ(sve_smaxv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MIN, DO_MAX)
+DO_VPZ_D(sve_smaxv_d, int64_t, int64_t, INT64_MIN, DO_MAX)
+
+DO_VPZ(sve_umaxv_b, uint8_t, uint8_t, uint8_t, H1, 0, DO_MAX)
+DO_VPZ(sve_umaxv_h, uint16_t, uint16_t, uint16_t, H1_2, 0, DO_MAX)
+DO_VPZ(sve_umaxv_s, uint32_t, uint32_t, uint32_t, H1_4, 0, DO_MAX)
+DO_VPZ_D(sve_umaxv_d, uint64_t, uint64_t, 0, DO_MAX)
+
+DO_VPZ(sve_sminv_b, int8_t, int8_t, uint8_t, H1, INT8_MAX, DO_MIN)
+DO_VPZ(sve_sminv_h, int16_t, int16_t, uint16_t, H1_2, INT16_MAX, DO_MIN)
+DO_VPZ(sve_sminv_s, int32_t, int32_t, uint32_t, H1_4, INT32_MAX, DO_MIN)
+DO_VPZ_D(sve_sminv_d, int64_t, int64_t, INT64_MAX, DO_MIN)
+
+DO_VPZ(sve_uminv_b, uint8_t, uint8_t, uint8_t, H1, -1, DO_MIN)
+DO_VPZ(sve_uminv_h, uint16_t, uint16_t, uint16_t, H1_2, -1, DO_MIN)
+DO_VPZ(sve_uminv_s, uint32_t, uint32_t, uint32_t, H1_4, -1, DO_MIN)
+DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN)
+
+#undef DO_VPZ
+#undef DO_VPZ_D
+
+/* Two vector operand, one scalar operand, unpredicated. */
+#define DO_ZZI(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
+ TYPE s = s64, *d = vd, *n = vn; \
+ for (i = 0; i < opr_sz; ++i) { \
+ d[i] = OP(n[i], s); \
+ } \
+}
+
+#define DO_SUBR(X, Y) (Y - X)
+
+DO_ZZI(sve_subri_b, uint8_t, DO_SUBR)
+DO_ZZI(sve_subri_h, uint16_t, DO_SUBR)
+DO_ZZI(sve_subri_s, uint32_t, DO_SUBR)
+DO_ZZI(sve_subri_d, uint64_t, DO_SUBR)
+
+DO_ZZI(sve_smaxi_b, int8_t, DO_MAX)
+DO_ZZI(sve_smaxi_h, int16_t, DO_MAX)
+DO_ZZI(sve_smaxi_s, int32_t, DO_MAX)
+DO_ZZI(sve_smaxi_d, int64_t, DO_MAX)
+
+DO_ZZI(sve_smini_b, int8_t, DO_MIN)
+DO_ZZI(sve_smini_h, int16_t, DO_MIN)
+DO_ZZI(sve_smini_s, int32_t, DO_MIN)
+DO_ZZI(sve_smini_d, int64_t, DO_MIN)
+
+DO_ZZI(sve_umaxi_b, uint8_t, DO_MAX)
+DO_ZZI(sve_umaxi_h, uint16_t, DO_MAX)
+DO_ZZI(sve_umaxi_s, uint32_t, DO_MAX)
+DO_ZZI(sve_umaxi_d, uint64_t, DO_MAX)
+
+DO_ZZI(sve_umini_b, uint8_t, DO_MIN)
+DO_ZZI(sve_umini_h, uint16_t, DO_MIN)
+DO_ZZI(sve_umini_s, uint32_t, DO_MIN)
+DO_ZZI(sve_umini_d, uint64_t, DO_MIN)
+
+#undef DO_ZZI
+
+#undef DO_AND
+#undef DO_ORR
+#undef DO_EOR
+#undef DO_BIC
+#undef DO_ADD
+#undef DO_SUB
+#undef DO_MAX
+#undef DO_MIN
+#undef DO_ABD
+#undef DO_MUL
+#undef DO_DIV
+#undef DO_ASR
+#undef DO_LSR
+#undef DO_LSL
+#undef DO_SUBR
+
+/* Similar to the ARM LastActiveElement pseudocode function, except the
+ result is multiplied by the element size. This includes the not found
+ indication; e.g. not found for esz=3 is -8. */
+static intptr_t last_active_element(uint64_t *g, intptr_t words, intptr_t esz)
+{
+ uint64_t mask = pred_esz_masks[esz];
+ intptr_t i = words;
+
+ do {
+ uint64_t this_g = g[--i] & mask;
+ if (this_g) {
+ return i * 64 + (63 - clz64(this_g));
+ }
+ } while (i > 0);
+ return (intptr_t)-1 << esz;
+}
+
+uint32_t HELPER(sve_pfirst)(void *vd, void *vg, uint32_t pred_desc)
+{
+ intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
+ uint32_t flags = PREDTEST_INIT;
+ uint64_t *d = vd, *g = vg;
+ intptr_t i = 0;
+
+ do {
+ uint64_t this_d = d[i];
+ uint64_t this_g = g[i];
+
+ if (this_g) {
+ if (!(flags & 4)) {
+ /* Set in D the first bit of G. */
+ this_d |= this_g & -this_g;
+ d[i] = this_d;
+ }
+ flags = iter_predtest_fwd(this_d, this_g, flags);
+ }
+ } while (++i < words);
+
+ return flags;
+}
+
+uint32_t HELPER(sve_pnext)(void *vd, void *vg, uint32_t pred_desc)
+{
+ intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t flags = PREDTEST_INIT;
+ uint64_t *d = vd, *g = vg, esz_mask;
+ intptr_t i, next;
+
+ next = last_active_element(vd, words, esz) + (1 << esz);
+ esz_mask = pred_esz_masks[esz];
+
+ /* Similar to the pseudocode for pnext, but scaled by ESZ
+ so that we find the correct bit. */
+ if (next < words * 64) {
+ uint64_t mask = -1;
+
+ if (next & 63) {
+ mask = ~((1ull << (next & 63)) - 1);
+ next &= -64;
+ }
+ do {
+ uint64_t this_g = g[next / 64] & esz_mask & mask;
+ if (this_g != 0) {
+ next = (next & -64) + ctz64(this_g);
+ break;
+ }
+ next += 64;
+ mask = -1;
+ } while (next < words * 64);
+ }
+
+ i = 0;
+ do {
+ uint64_t this_d = 0;
+ if (i == next / 64) {
+ this_d = 1ull << (next & 63);
+ }
+ d[i] = this_d;
+ flags = iter_predtest_fwd(this_d, g[i] & esz_mask, flags);
+ } while (++i < words);
+
+ return flags;
+}
+
+/*
+ * Copy Zn into Zd, and store zero into inactive elements.
+ * If inv, store zeros into the active elements.
+ */
+void HELPER(sve_movz_b)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t inv = -(uint64_t)(simd_data(desc) & 1);
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] & (expand_pred_b(pg[H1(i)]) ^ inv);
+ }
+}
+
+void HELPER(sve_movz_h)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t inv = -(uint64_t)(simd_data(desc) & 1);
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] & (expand_pred_h(pg[H1(i)]) ^ inv);
+ }
+}
+
+void HELPER(sve_movz_s)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t inv = -(uint64_t)(simd_data(desc) & 1);
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] & (expand_pred_s(pg[H1(i)]) ^ inv);
+ }
+}
+
+void HELPER(sve_movz_d)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+ uint8_t inv = simd_data(desc);
+
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] & -(uint64_t)((pg[H1(i)] ^ inv) & 1);
+ }
+}
+
+/* Three-operand expander, immediate operand, controlled by a predicate.
+ */
+#define DO_ZPZI(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPE imm = simd_data(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, imm); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZI_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *n = vn; \
+ TYPE imm = simd_data(desc); \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 1) { \
+ if (pg[H1(i)] & 1) { \
+ TYPE nn = n[i]; \
+ d[i] = OP(nn, imm); \
+ } \
+ } \
+}
+
+#define DO_SHR(N, M) (N >> M)
+#define DO_SHL(N, M) (N << M)
+
+/* Arithmetic shift right for division. This rounds negative numbers
+ toward zero as per signed division. Therefore before shifting,
+ when N is negative, add 2**M-1. */
+#define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M)
+
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else if (sh == 64) {
+ return x >> 63;
+ } else {
+ return 0;
+ }
+}
+
+static inline int64_t do_srshr(int64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else {
+ /* Rounding the sign bit always produces 0. */
+ return 0;
+ }
+}
+
+DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR)
+DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR)
+DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR)
+DO_ZPZI_D(sve_asr_zpzi_d, int64_t, DO_SHR)
+
+DO_ZPZI(sve_lsr_zpzi_b, uint8_t, H1, DO_SHR)
+DO_ZPZI(sve_lsr_zpzi_h, uint16_t, H1_2, DO_SHR)
+DO_ZPZI(sve_lsr_zpzi_s, uint32_t, H1_4, DO_SHR)
+DO_ZPZI_D(sve_lsr_zpzi_d, uint64_t, DO_SHR)
+
+DO_ZPZI(sve_lsl_zpzi_b, uint8_t, H1, DO_SHL)
+DO_ZPZI(sve_lsl_zpzi_h, uint16_t, H1_2, DO_SHL)
+DO_ZPZI(sve_lsl_zpzi_s, uint32_t, H1_4, DO_SHL)
+DO_ZPZI_D(sve_lsl_zpzi_d, uint64_t, DO_SHL)
+
+DO_ZPZI(sve_asrd_b, int8_t, H1, DO_ASRD)
+DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD)
+DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD)
+DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD)
+
+/* SVE2 bitwise shift by immediate */
+DO_ZPZI(sve2_sqshl_zpzi_b, int8_t, H1, do_sqshl_b)
+DO_ZPZI(sve2_sqshl_zpzi_h, int16_t, H1_2, do_sqshl_h)
+DO_ZPZI(sve2_sqshl_zpzi_s, int32_t, H1_4, do_sqshl_s)
+DO_ZPZI_D(sve2_sqshl_zpzi_d, int64_t, do_sqshl_d)
+
+DO_ZPZI(sve2_uqshl_zpzi_b, uint8_t, H1, do_uqshl_b)
+DO_ZPZI(sve2_uqshl_zpzi_h, uint16_t, H1_2, do_uqshl_h)
+DO_ZPZI(sve2_uqshl_zpzi_s, uint32_t, H1_4, do_uqshl_s)
+DO_ZPZI_D(sve2_uqshl_zpzi_d, uint64_t, do_uqshl_d)
+
+DO_ZPZI(sve2_srshr_b, int8_t, H1, do_srshr)
+DO_ZPZI(sve2_srshr_h, int16_t, H1_2, do_srshr)
+DO_ZPZI(sve2_srshr_s, int32_t, H1_4, do_srshr)
+DO_ZPZI_D(sve2_srshr_d, int64_t, do_srshr)
+
+DO_ZPZI(sve2_urshr_b, uint8_t, H1, do_urshr)
+DO_ZPZI(sve2_urshr_h, uint16_t, H1_2, do_urshr)
+DO_ZPZI(sve2_urshr_s, uint32_t, H1_4, do_urshr)
+DO_ZPZI_D(sve2_urshr_d, uint64_t, do_urshr)
+
+#define do_suqrshl_b(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
+#define do_suqrshl_h(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
+#define do_suqrshl_s(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, m, 32, false, &discard); })
+#define do_suqrshl_d(n, m) \
+ ({ uint32_t discard; do_suqrshl_d(n, m, false, &discard); })
+
+DO_ZPZI(sve2_sqshlu_b, int8_t, H1, do_suqrshl_b)
+DO_ZPZI(sve2_sqshlu_h, int16_t, H1_2, do_suqrshl_h)
+DO_ZPZI(sve2_sqshlu_s, int32_t, H1_4, do_suqrshl_s)
+DO_ZPZI_D(sve2_sqshlu_d, int64_t, do_suqrshl_d)
+
+#undef DO_ASRD
+#undef DO_ZPZI
+#undef DO_ZPZI_D
+
+#define DO_SHRNB(NAME, TYPEW, TYPEN, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + i); \
+ *(TYPEW *)(vd + i) = (TYPEN)OP(nn, shift); \
+ } \
+}
+
+#define DO_SHRNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, shift); \
+ } \
+}
+
+DO_SHRNB(sve2_shrnb_h, uint16_t, uint8_t, DO_SHR)
+DO_SHRNB(sve2_shrnb_s, uint32_t, uint16_t, DO_SHR)
+DO_SHRNB(sve2_shrnb_d, uint64_t, uint32_t, DO_SHR)
+
+DO_SHRNT(sve2_shrnt_h, uint16_t, uint8_t, H1_2, H1, DO_SHR)
+DO_SHRNT(sve2_shrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_SHR)
+DO_SHRNT(sve2_shrnt_d, uint64_t, uint32_t, H1_8, H1_4, DO_SHR)
+
+DO_SHRNB(sve2_rshrnb_h, uint16_t, uint8_t, do_urshr)
+DO_SHRNB(sve2_rshrnb_s, uint32_t, uint16_t, do_urshr)
+DO_SHRNB(sve2_rshrnb_d, uint64_t, uint32_t, do_urshr)
+
+DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr)
+DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr)
+DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, H1_8, H1_4, do_urshr)
+
+#define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX)
+#define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX)
+#define DO_SQSHRUN_D(x, sh) \
+ do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX)
+
+DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H)
+DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S)
+DO_SHRNB(sve2_sqshrunb_d, int64_t, uint32_t, DO_SQSHRUN_D)
+
+DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H)
+DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S)
+DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQSHRUN_D)
+
+#define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX)
+#define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX)
+#define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX)
+
+DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H)
+DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S)
+DO_SHRNB(sve2_sqrshrunb_d, int64_t, uint32_t, DO_SQRSHRUN_D)
+
+DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
+DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
+DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQRSHRUN_D)
+
+#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX)
+#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX)
+#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX)
+
+DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H)
+DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S)
+DO_SHRNB(sve2_sqshrnb_d, int64_t, uint32_t, DO_SQSHRN_D)
+
+DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H)
+DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S)
+DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQSHRN_D)
+
+#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX)
+#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX)
+#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX)
+
+DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H)
+DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S)
+DO_SHRNB(sve2_sqrshrnb_d, int64_t, uint32_t, DO_SQRSHRN_D)
+
+DO_SHRNT(sve2_sqrshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRN_H)
+DO_SHRNT(sve2_sqrshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRN_S)
+DO_SHRNT(sve2_sqrshrnt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQRSHRN_D)
+
+#define DO_UQSHRN_H(x, sh) MIN(x >> sh, UINT8_MAX)
+#define DO_UQSHRN_S(x, sh) MIN(x >> sh, UINT16_MAX)
+#define DO_UQSHRN_D(x, sh) MIN(x >> sh, UINT32_MAX)
+
+DO_SHRNB(sve2_uqshrnb_h, uint16_t, uint8_t, DO_UQSHRN_H)
+DO_SHRNB(sve2_uqshrnb_s, uint32_t, uint16_t, DO_UQSHRN_S)
+DO_SHRNB(sve2_uqshrnb_d, uint64_t, uint32_t, DO_UQSHRN_D)
+
+DO_SHRNT(sve2_uqshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQSHRN_H)
+DO_SHRNT(sve2_uqshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQSHRN_S)
+DO_SHRNT(sve2_uqshrnt_d, uint64_t, uint32_t, H1_8, H1_4, DO_UQSHRN_D)
+
+#define DO_UQRSHRN_H(x, sh) MIN(do_urshr(x, sh), UINT8_MAX)
+#define DO_UQRSHRN_S(x, sh) MIN(do_urshr(x, sh), UINT16_MAX)
+#define DO_UQRSHRN_D(x, sh) MIN(do_urshr(x, sh), UINT32_MAX)
+
+DO_SHRNB(sve2_uqrshrnb_h, uint16_t, uint8_t, DO_UQRSHRN_H)
+DO_SHRNB(sve2_uqrshrnb_s, uint32_t, uint16_t, DO_UQRSHRN_S)
+DO_SHRNB(sve2_uqrshrnb_d, uint64_t, uint32_t, DO_UQRSHRN_D)
+
+DO_SHRNT(sve2_uqrshrnt_h, uint16_t, uint8_t, H1_2, H1, DO_UQRSHRN_H)
+DO_SHRNT(sve2_uqrshrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_UQRSHRN_S)
+DO_SHRNT(sve2_uqrshrnt_d, uint64_t, uint32_t, H1_8, H1_4, DO_UQRSHRN_D)
+
+#undef DO_SHRNB
+#undef DO_SHRNT
+
+#define DO_BINOPNB(NAME, TYPEW, TYPEN, SHIFT, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + i); \
+ TYPEW mm = *(TYPEW *)(vm + i); \
+ *(TYPEW *)(vd + i) = (TYPEN)OP(nn, mm, SHIFT); \
+ } \
+}
+
+#define DO_BINOPNT(NAME, TYPEW, TYPEN, SHIFT, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPEW)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ TYPEW mm = *(TYPEW *)(vm + HW(i)); \
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, mm, SHIFT); \
+ } \
+}
+
+#define DO_ADDHN(N, M, SH) ((N + M) >> SH)
+#define DO_RADDHN(N, M, SH) ((N + M + ((__typeof(N))1 << (SH - 1))) >> SH)
+#define DO_SUBHN(N, M, SH) ((N - M) >> SH)
+#define DO_RSUBHN(N, M, SH) ((N - M + ((__typeof(N))1 << (SH - 1))) >> SH)
+
+DO_BINOPNB(sve2_addhnb_h, uint16_t, uint8_t, 8, DO_ADDHN)
+DO_BINOPNB(sve2_addhnb_s, uint32_t, uint16_t, 16, DO_ADDHN)
+DO_BINOPNB(sve2_addhnb_d, uint64_t, uint32_t, 32, DO_ADDHN)
+
+DO_BINOPNT(sve2_addhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_ADDHN)
+DO_BINOPNT(sve2_addhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_ADDHN)
+DO_BINOPNT(sve2_addhnt_d, uint64_t, uint32_t, 32, H1_8, H1_4, DO_ADDHN)
+
+DO_BINOPNB(sve2_raddhnb_h, uint16_t, uint8_t, 8, DO_RADDHN)
+DO_BINOPNB(sve2_raddhnb_s, uint32_t, uint16_t, 16, DO_RADDHN)
+DO_BINOPNB(sve2_raddhnb_d, uint64_t, uint32_t, 32, DO_RADDHN)
+
+DO_BINOPNT(sve2_raddhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RADDHN)
+DO_BINOPNT(sve2_raddhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RADDHN)
+DO_BINOPNT(sve2_raddhnt_d, uint64_t, uint32_t, 32, H1_8, H1_4, DO_RADDHN)
+
+DO_BINOPNB(sve2_subhnb_h, uint16_t, uint8_t, 8, DO_SUBHN)
+DO_BINOPNB(sve2_subhnb_s, uint32_t, uint16_t, 16, DO_SUBHN)
+DO_BINOPNB(sve2_subhnb_d, uint64_t, uint32_t, 32, DO_SUBHN)
+
+DO_BINOPNT(sve2_subhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_SUBHN)
+DO_BINOPNT(sve2_subhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_SUBHN)
+DO_BINOPNT(sve2_subhnt_d, uint64_t, uint32_t, 32, H1_8, H1_4, DO_SUBHN)
+
+DO_BINOPNB(sve2_rsubhnb_h, uint16_t, uint8_t, 8, DO_RSUBHN)
+DO_BINOPNB(sve2_rsubhnb_s, uint32_t, uint16_t, 16, DO_RSUBHN)
+DO_BINOPNB(sve2_rsubhnb_d, uint64_t, uint32_t, 32, DO_RSUBHN)
+
+DO_BINOPNT(sve2_rsubhnt_h, uint16_t, uint8_t, 8, H1_2, H1, DO_RSUBHN)
+DO_BINOPNT(sve2_rsubhnt_s, uint32_t, uint16_t, 16, H1_4, H1_2, DO_RSUBHN)
+DO_BINOPNT(sve2_rsubhnt_d, uint64_t, uint32_t, 32, H1_8, H1_4, DO_RSUBHN)
+
+#undef DO_RSUBHN
+#undef DO_SUBHN
+#undef DO_RADDHN
+#undef DO_ADDHN
+
+#undef DO_BINOPNB
+
+/* Fully general four-operand expander, controlled by a predicate.
+ */
+#define DO_ZPZZZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \
+ void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ if (pg & 1) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ TYPE mm = *(TYPE *)(vm + H(i)); \
+ TYPE aa = *(TYPE *)(va + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(aa, nn, mm); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZZZ_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \
+ void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *a = va, *n = vn, *m = vm; \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 1) { \
+ if (pg[H1(i)] & 1) { \
+ TYPE aa = a[i], nn = n[i], mm = m[i]; \
+ d[i] = OP(aa, nn, mm); \
+ } \
+ } \
+}
+
+#define DO_MLA(A, N, M) (A + N * M)
+#define DO_MLS(A, N, M) (A - N * M)
+
+DO_ZPZZZ(sve_mla_b, uint8_t, H1, DO_MLA)
+DO_ZPZZZ(sve_mls_b, uint8_t, H1, DO_MLS)
+
+DO_ZPZZZ(sve_mla_h, uint16_t, H1_2, DO_MLA)
+DO_ZPZZZ(sve_mls_h, uint16_t, H1_2, DO_MLS)
+
+DO_ZPZZZ(sve_mla_s, uint32_t, H1_4, DO_MLA)
+DO_ZPZZZ(sve_mls_s, uint32_t, H1_4, DO_MLS)
+
+DO_ZPZZZ_D(sve_mla_d, uint64_t, DO_MLA)
+DO_ZPZZZ_D(sve_mls_d, uint64_t, DO_MLS)
+
+#undef DO_MLA
+#undef DO_MLS
+#undef DO_ZPZZZ
+#undef DO_ZPZZZ_D
+
+void HELPER(sve_index_b)(void *vd, uint32_t start,
+ uint32_t incr, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint8_t *d = vd;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[H1(i)] = start + i * incr;
+ }
+}
+
+void HELPER(sve_index_h)(void *vd, uint32_t start,
+ uint32_t incr, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 2;
+ uint16_t *d = vd;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[H2(i)] = start + i * incr;
+ }
+}
+
+void HELPER(sve_index_s)(void *vd, uint32_t start,
+ uint32_t incr, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ uint32_t *d = vd;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[H4(i)] = start + i * incr;
+ }
+}
+
+void HELPER(sve_index_d)(void *vd, uint64_t start,
+ uint64_t incr, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = start + i * incr;
+ }
+}
+
+void HELPER(sve_adr_p32)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ uint32_t sh = simd_data(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] + (m[i] << sh);
+ }
+}
+
+void HELPER(sve_adr_p64)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t sh = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] + (m[i] << sh);
+ }
+}
+
+void HELPER(sve_adr_s32)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t sh = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] + ((uint64_t)(int32_t)m[i] << sh);
+ }
+}
+
+void HELPER(sve_adr_u32)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t sh = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = n[i] + ((uint64_t)(uint32_t)m[i] << sh);
+ }
+}
+
+void HELPER(sve_fexpa_h)(void *vd, void *vn, uint32_t desc)
+{
+ /* These constants are cut-and-paste directly from the ARM pseudocode. */
+ static const uint16_t coeff[] = {
+ 0x0000, 0x0016, 0x002d, 0x0045, 0x005d, 0x0075, 0x008e, 0x00a8,
+ 0x00c2, 0x00dc, 0x00f8, 0x0114, 0x0130, 0x014d, 0x016b, 0x0189,
+ 0x01a8, 0x01c8, 0x01e8, 0x0209, 0x022b, 0x024e, 0x0271, 0x0295,
+ 0x02ba, 0x02e0, 0x0306, 0x032e, 0x0356, 0x037f, 0x03a9, 0x03d4,
+ };
+ intptr_t i, opr_sz = simd_oprsz(desc) / 2;
+ uint16_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz; i++) {
+ uint16_t nn = n[i];
+ intptr_t idx = extract32(nn, 0, 5);
+ uint16_t exp = extract32(nn, 5, 5);
+ d[i] = coeff[idx] | (exp << 10);
+ }
+}
+
+void HELPER(sve_fexpa_s)(void *vd, void *vn, uint32_t desc)
+{
+ /* These constants are cut-and-paste directly from the ARM pseudocode. */
+ static const uint32_t coeff[] = {
+ 0x000000, 0x0164d2, 0x02cd87, 0x043a29,
+ 0x05aac3, 0x071f62, 0x08980f, 0x0a14d5,
+ 0x0b95c2, 0x0d1adf, 0x0ea43a, 0x1031dc,
+ 0x11c3d3, 0x135a2b, 0x14f4f0, 0x16942d,
+ 0x1837f0, 0x19e046, 0x1b8d3a, 0x1d3eda,
+ 0x1ef532, 0x20b051, 0x227043, 0x243516,
+ 0x25fed7, 0x27cd94, 0x29a15b, 0x2b7a3a,
+ 0x2d583f, 0x2f3b79, 0x3123f6, 0x3311c4,
+ 0x3504f3, 0x36fd92, 0x38fbaf, 0x3aff5b,
+ 0x3d08a4, 0x3f179a, 0x412c4d, 0x4346cd,
+ 0x45672a, 0x478d75, 0x49b9be, 0x4bec15,
+ 0x4e248c, 0x506334, 0x52a81e, 0x54f35b,
+ 0x5744fd, 0x599d16, 0x5bfbb8, 0x5e60f5,
+ 0x60ccdf, 0x633f89, 0x65b907, 0x68396a,
+ 0x6ac0c7, 0x6d4f30, 0x6fe4ba, 0x728177,
+ 0x75257d, 0x77d0df, 0x7a83b3, 0x7d3e0c,
+ };
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ uint32_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz; i++) {
+ uint32_t nn = n[i];
+ intptr_t idx = extract32(nn, 0, 6);
+ uint32_t exp = extract32(nn, 6, 8);
+ d[i] = coeff[idx] | (exp << 23);
+ }
+}
+
+void HELPER(sve_fexpa_d)(void *vd, void *vn, uint32_t desc)
+{
+ /* These constants are cut-and-paste directly from the ARM pseudocode. */
+ static const uint64_t coeff[] = {
+ 0x0000000000000ull, 0x02C9A3E778061ull, 0x059B0D3158574ull,
+ 0x0874518759BC8ull, 0x0B5586CF9890Full, 0x0E3EC32D3D1A2ull,
+ 0x11301D0125B51ull, 0x1429AAEA92DE0ull, 0x172B83C7D517Bull,
+ 0x1A35BEB6FCB75ull, 0x1D4873168B9AAull, 0x2063B88628CD6ull,
+ 0x2387A6E756238ull, 0x26B4565E27CDDull, 0x29E9DF51FDEE1ull,
+ 0x2D285A6E4030Bull, 0x306FE0A31B715ull, 0x33C08B26416FFull,
+ 0x371A7373AA9CBull, 0x3A7DB34E59FF7ull, 0x3DEA64C123422ull,
+ 0x4160A21F72E2Aull, 0x44E086061892Dull, 0x486A2B5C13CD0ull,
+ 0x4BFDAD5362A27ull, 0x4F9B2769D2CA7ull, 0x5342B569D4F82ull,
+ 0x56F4736B527DAull, 0x5AB07DD485429ull, 0x5E76F15AD2148ull,
+ 0x6247EB03A5585ull, 0x6623882552225ull, 0x6A09E667F3BCDull,
+ 0x6DFB23C651A2Full, 0x71F75E8EC5F74ull, 0x75FEB564267C9ull,
+ 0x7A11473EB0187ull, 0x7E2F336CF4E62ull, 0x82589994CCE13ull,
+ 0x868D99B4492EDull, 0x8ACE5422AA0DBull, 0x8F1AE99157736ull,
+ 0x93737B0CDC5E5ull, 0x97D829FDE4E50ull, 0x9C49182A3F090ull,
+ 0xA0C667B5DE565ull, 0xA5503B23E255Dull, 0xA9E6B5579FDBFull,
+ 0xAE89F995AD3ADull, 0xB33A2B84F15FBull, 0xB7F76F2FB5E47ull,
+ 0xBCC1E904BC1D2ull, 0xC199BDD85529Cull, 0xC67F12E57D14Bull,
+ 0xCB720DCEF9069ull, 0xD072D4A07897Cull, 0xD5818DCFBA487ull,
+ 0xDA9E603DB3285ull, 0xDFC97337B9B5Full, 0xE502EE78B3FF6ull,
+ 0xEA4AFA2A490DAull, 0xEFA1BEE615A27ull, 0xF50765B6E4540ull,
+ 0xFA7C1819E90D8ull,
+ };
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+
+ for (i = 0; i < opr_sz; i++) {
+ uint64_t nn = n[i];
+ intptr_t idx = extract32(nn, 0, 6);
+ uint64_t exp = extract32(nn, 6, 11);
+ d[i] = coeff[idx] | (exp << 52);
+ }
+}
+
+void HELPER(sve_ftssel_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 2;
+ uint16_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ uint16_t nn = n[i];
+ uint16_t mm = m[i];
+ if (mm & 1) {
+ nn = float16_one;
+ }
+ d[i] = nn ^ (mm & 2) << 14;
+ }
+}
+
+void HELPER(sve_ftssel_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ uint32_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ uint32_t nn = n[i];
+ uint32_t mm = m[i];
+ if (mm & 1) {
+ nn = float32_one;
+ }
+ d[i] = nn ^ (mm & 2) << 30;
+ }
+}
+
+void HELPER(sve_ftssel_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i];
+ uint64_t mm = m[i];
+ if (mm & 1) {
+ nn = float64_one;
+ }
+ d[i] = nn ^ (mm & 2) << 62;
+ }
+}
+
+/*
+ * Signed saturating addition with scalar operand.
+ */
+
+void HELPER(sve_sqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(int8_t)) {
+ *(int8_t *)(d + i) = DO_SQADD_B(b, *(int8_t *)(a + i));
+ }
+}
+
+void HELPER(sve_sqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(int16_t)) {
+ *(int16_t *)(d + i) = DO_SQADD_H(b, *(int16_t *)(a + i));
+ }
+}
+
+void HELPER(sve_sqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(int32_t)) {
+ *(int32_t *)(d + i) = DO_SQADD_S(b, *(int32_t *)(a + i));
+ }
+}
+
+void HELPER(sve_sqaddi_d)(void *d, void *a, int64_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(int64_t)) {
+ *(int64_t *)(d + i) = do_sqadd_d(b, *(int64_t *)(a + i));
+ }
+}
+
+/*
+ * Unsigned saturating addition with scalar operand.
+ */
+
+void HELPER(sve_uqaddi_b)(void *d, void *a, int32_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
+ *(uint8_t *)(d + i) = DO_UQADD_B(b, *(uint8_t *)(a + i));
+ }
+}
+
+void HELPER(sve_uqaddi_h)(void *d, void *a, int32_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
+ *(uint16_t *)(d + i) = DO_UQADD_H(b, *(uint16_t *)(a + i));
+ }
+}
+
+void HELPER(sve_uqaddi_s)(void *d, void *a, int64_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
+ *(uint32_t *)(d + i) = DO_UQADD_S(b, *(uint32_t *)(a + i));
+ }
+}
+
+void HELPER(sve_uqaddi_d)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+ *(uint64_t *)(d + i) = do_uqadd_d(b, *(uint64_t *)(a + i));
+ }
+}
+
+void HELPER(sve_uqsubi_d)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+ *(uint64_t *)(d + i) = do_uqsub_d(*(uint64_t *)(a + i), b);
+ }
+}
+
+/* Two operand predicated copy immediate with merge. All valid immediates
+ * can fit within 17 signed bits in the simd_data field.
+ */
+void HELPER(sve_cpy_m_b)(void *vd, void *vn, void *vg,
+ uint64_t mm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ mm = dup_const(MO_8, mm);
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i];
+ uint64_t pp = expand_pred_b(pg[H1(i)]);
+ d[i] = (mm & pp) | (nn & ~pp);
+ }
+}
+
+void HELPER(sve_cpy_m_h)(void *vd, void *vn, void *vg,
+ uint64_t mm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ mm = dup_const(MO_16, mm);
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i];
+ uint64_t pp = expand_pred_h(pg[H1(i)]);
+ d[i] = (mm & pp) | (nn & ~pp);
+ }
+}
+
+void HELPER(sve_cpy_m_s)(void *vd, void *vn, void *vg,
+ uint64_t mm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ mm = dup_const(MO_32, mm);
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i];
+ uint64_t pp = expand_pred_s(pg[H1(i)]);
+ d[i] = (mm & pp) | (nn & ~pp);
+ }
+}
+
+void HELPER(sve_cpy_m_d)(void *vd, void *vn, void *vg,
+ uint64_t mm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i];
+ d[i] = (pg[H1(i)] & 1 ? mm : nn);
+ }
+}
+
+void HELPER(sve_cpy_z_b)(void *vd, void *vg, uint64_t val, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd;
+ uint8_t *pg = vg;
+
+ val = dup_const(MO_8, val);
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = val & expand_pred_b(pg[H1(i)]);
+ }
+}
+
+void HELPER(sve_cpy_z_h)(void *vd, void *vg, uint64_t val, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd;
+ uint8_t *pg = vg;
+
+ val = dup_const(MO_16, val);
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = val & expand_pred_h(pg[H1(i)]);
+ }
+}
+
+void HELPER(sve_cpy_z_s)(void *vd, void *vg, uint64_t val, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd;
+ uint8_t *pg = vg;
+
+ val = dup_const(MO_32, val);
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = val & expand_pred_s(pg[H1(i)]);
+ }
+}
+
+void HELPER(sve_cpy_z_d)(void *vd, void *vg, uint64_t val, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ d[i] = (pg[H1(i)] & 1 ? val : 0);
+ }
+}
+
+/* Big-endian hosts need to frob the byte indices. If the copy
+ * happens to be 8-byte aligned, then no frobbing necessary.
+ */
+static void swap_memmove(void *vd, void *vs, size_t n)
+{
+ uintptr_t d = (uintptr_t)vd;
+ uintptr_t s = (uintptr_t)vs;
+ uintptr_t o = (d | s | n) & 7;
+ size_t i;
+
+#ifndef HOST_WORDS_BIGENDIAN
+ o = 0;
+#endif
+ switch (o) {
+ case 0:
+ memmove(vd, vs, n);
+ break;
+
+ case 4:
+ if (d < s || d >= s + n) {
+ for (i = 0; i < n; i += 4) {
+ *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i);
+ }
+ } else {
+ for (i = n; i > 0; ) {
+ i -= 4;
+ *(uint32_t *)H1_4(d + i) = *(uint32_t *)H1_4(s + i);
+ }
+ }
+ break;
+
+ case 2:
+ case 6:
+ if (d < s || d >= s + n) {
+ for (i = 0; i < n; i += 2) {
+ *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i);
+ }
+ } else {
+ for (i = n; i > 0; ) {
+ i -= 2;
+ *(uint16_t *)H1_2(d + i) = *(uint16_t *)H1_2(s + i);
+ }
+ }
+ break;
+
+ default:
+ if (d < s || d >= s + n) {
+ for (i = 0; i < n; i++) {
+ *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i);
+ }
+ } else {
+ for (i = n; i > 0; ) {
+ i -= 1;
+ *(uint8_t *)H1(d + i) = *(uint8_t *)H1(s + i);
+ }
+ }
+ break;
+ }
+}
+
+/* Similarly for memset of 0. */
+static void swap_memzero(void *vd, size_t n)
+{
+ uintptr_t d = (uintptr_t)vd;
+ uintptr_t o = (d | n) & 7;
+ size_t i;
+
+ /* Usually, the first bit of a predicate is set, so N is 0. */
+ if (likely(n == 0)) {
+ return;
+ }
+
+#ifndef HOST_WORDS_BIGENDIAN
+ o = 0;
+#endif
+ switch (o) {
+ case 0:
+ memset(vd, 0, n);
+ break;
+
+ case 4:
+ for (i = 0; i < n; i += 4) {
+ *(uint32_t *)H1_4(d + i) = 0;
+ }
+ break;
+
+ case 2:
+ case 6:
+ for (i = 0; i < n; i += 2) {
+ *(uint16_t *)H1_2(d + i) = 0;
+ }
+ break;
+
+ default:
+ for (i = 0; i < n; i++) {
+ *(uint8_t *)H1(d + i) = 0;
+ }
+ break;
+ }
+}
+
+void HELPER(sve_ext)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t opr_sz = simd_oprsz(desc);
+ size_t n_ofs = simd_data(desc);
+ size_t n_siz = opr_sz - n_ofs;
+
+ if (vd != vm) {
+ swap_memmove(vd, vn + n_ofs, n_siz);
+ swap_memmove(vd + n_siz, vm, n_ofs);
+ } else if (vd != vn) {
+ swap_memmove(vd + n_siz, vd, n_ofs);
+ swap_memmove(vd, vn + n_ofs, n_siz);
+ } else {
+ /* vd == vn == vm. Need temp space. */
+ ARMVectorReg tmp;
+ swap_memmove(&tmp, vm, n_ofs);
+ swap_memmove(vd, vd + n_ofs, n_siz);
+ memcpy(vd + n_siz, &tmp, n_ofs);
+ }
+}
+
+#define DO_INSR(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, uint64_t val, uint32_t desc) \
+{ \
+ intptr_t opr_sz = simd_oprsz(desc); \
+ swap_memmove(vd + sizeof(TYPE), vn, opr_sz - sizeof(TYPE)); \
+ *(TYPE *)(vd + H(0)) = val; \
+}
+
+DO_INSR(sve_insr_b, uint8_t, H1)
+DO_INSR(sve_insr_h, uint16_t, H1_2)
+DO_INSR(sve_insr_s, uint32_t, H1_4)
+DO_INSR(sve_insr_d, uint64_t, H1_8)
+
+#undef DO_INSR
+
+void HELPER(sve_rev_b)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
+ uint64_t f = *(uint64_t *)(vn + i);
+ uint64_t b = *(uint64_t *)(vn + j);
+ *(uint64_t *)(vd + i) = bswap64(b);
+ *(uint64_t *)(vd + j) = bswap64(f);
+ }
+}
+
+void HELPER(sve_rev_h)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
+ uint64_t f = *(uint64_t *)(vn + i);
+ uint64_t b = *(uint64_t *)(vn + j);
+ *(uint64_t *)(vd + i) = hswap64(b);
+ *(uint64_t *)(vd + j) = hswap64(f);
+ }
+}
+
+void HELPER(sve_rev_s)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
+ uint64_t f = *(uint64_t *)(vn + i);
+ uint64_t b = *(uint64_t *)(vn + j);
+ *(uint64_t *)(vd + i) = rol64(b, 32);
+ *(uint64_t *)(vd + j) = rol64(f, 32);
+ }
+}
+
+void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ for (i = 0, j = opr_sz - 8; i < opr_sz / 2; i += 8, j -= 8) {
+ uint64_t f = *(uint64_t *)(vn + i);
+ uint64_t b = *(uint64_t *)(vn + j);
+ *(uint64_t *)(vd + i) = b;
+ *(uint64_t *)(vd + j) = f;
+ }
+}
+
+typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool);
+
+static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc,
+ bool is_tbx, tb_impl_fn *fn)
+{
+ ARMVectorReg scratch;
+ uintptr_t oprsz = simd_oprsz(desc);
+
+ if (unlikely(vd == vn)) {
+ vn = memcpy(&scratch, vn, oprsz);
+ }
+
+ fn(vd, vn, NULL, vm, oprsz, is_tbx);
+}
+
+static inline void do_tbl2(void *vd, void *vn0, void *vn1, void *vm,
+ uint32_t desc, bool is_tbx, tb_impl_fn *fn)
+{
+ ARMVectorReg scratch;
+ uintptr_t oprsz = simd_oprsz(desc);
+
+ if (unlikely(vd == vn0)) {
+ vn0 = memcpy(&scratch, vn0, oprsz);
+ if (vd == vn1) {
+ vn1 = vn0;
+ }
+ } else if (unlikely(vd == vn1)) {
+ vn1 = memcpy(&scratch, vn1, oprsz);
+ }
+
+ fn(vd, vn0, vn1, vm, oprsz, is_tbx);
+}
+
+#define DO_TB(SUFF, TYPE, H) \
+static inline void do_tb_##SUFF(void *vd, void *vt0, void *vt1, \
+ void *vm, uintptr_t oprsz, bool is_tbx) \
+{ \
+ TYPE *d = vd, *tbl0 = vt0, *tbl1 = vt1, *indexes = vm; \
+ uintptr_t i, nelem = oprsz / sizeof(TYPE); \
+ for (i = 0; i < nelem; ++i) { \
+ TYPE index = indexes[H1(i)], val = 0; \
+ if (index < nelem) { \
+ val = tbl0[H(index)]; \
+ } else { \
+ index -= nelem; \
+ if (tbl1 && index < nelem) { \
+ val = tbl1[H(index)]; \
+ } else if (is_tbx) { \
+ continue; \
+ } \
+ } \
+ d[H(i)] = val; \
+ } \
+} \
+void HELPER(sve_tbl_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ do_tbl1(vd, vn, vm, desc, false, do_tb_##SUFF); \
+} \
+void HELPER(sve2_tbl_##SUFF)(void *vd, void *vn0, void *vn1, \
+ void *vm, uint32_t desc) \
+{ \
+ do_tbl2(vd, vn0, vn1, vm, desc, false, do_tb_##SUFF); \
+} \
+void HELPER(sve2_tbx_##SUFF)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ do_tbl1(vd, vn, vm, desc, true, do_tb_##SUFF); \
+}
+
+DO_TB(b, uint8_t, H1)
+DO_TB(h, uint16_t, H2)
+DO_TB(s, uint32_t, H4)
+DO_TB(d, uint64_t, H8)
+
+#undef DO_TB
+
+#define DO_UNPK(NAME, TYPED, TYPES, HD, HS) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPED *d = vd; \
+ TYPES *n = vn; \
+ ARMVectorReg tmp; \
+ if (unlikely(vn - vd < opr_sz)) { \
+ n = memcpy(&tmp, n, opr_sz / 2); \
+ } \
+ for (i = 0; i < opr_sz / sizeof(TYPED); i++) { \
+ d[HD(i)] = n[HS(i)]; \
+ } \
+}
+
+DO_UNPK(sve_sunpk_h, int16_t, int8_t, H2, H1)
+DO_UNPK(sve_sunpk_s, int32_t, int16_t, H4, H2)
+DO_UNPK(sve_sunpk_d, int64_t, int32_t, H8, H4)
+
+DO_UNPK(sve_uunpk_h, uint16_t, uint8_t, H2, H1)
+DO_UNPK(sve_uunpk_s, uint32_t, uint16_t, H4, H2)
+DO_UNPK(sve_uunpk_d, uint64_t, uint32_t, H8, H4)
+
+#undef DO_UNPK
+
+/* Mask of bits included in the even numbered predicates of width esz.
+ * We also use this for expand_bits/compress_bits, and so extend the
+ * same pattern out to 16-bit units.
+ */
+static const uint64_t even_bit_esz_masks[5] = {
+ 0x5555555555555555ull,
+ 0x3333333333333333ull,
+ 0x0f0f0f0f0f0f0f0full,
+ 0x00ff00ff00ff00ffull,
+ 0x0000ffff0000ffffull,
+};
+
+/* Zero-extend units of 2**N bits to units of 2**(N+1) bits.
+ * For N==0, this corresponds to the operation that in qemu/bitops.h
+ * we call half_shuffle64; this algorithm is from Hacker's Delight,
+ * section 7-2 Shuffling Bits.
+ */
+static uint64_t expand_bits(uint64_t x, int n)
+{
+ int i;
+
+ x &= 0xffffffffu;
+ for (i = 4; i >= n; i--) {
+ int sh = 1 << i;
+ x = ((x << sh) | x) & even_bit_esz_masks[i];
+ }
+ return x;
+}
+
+/* Compress units of 2**(N+1) bits to units of 2**N bits.
+ * For N==0, this corresponds to the operation that in qemu/bitops.h
+ * we call half_unshuffle64; this algorithm is from Hacker's Delight,
+ * section 7-2 Shuffling Bits, where it is called an inverse half shuffle.
+ */
+static uint64_t compress_bits(uint64_t x, int n)
+{
+ int i;
+
+ for (i = n; i <= 4; i++) {
+ int sh = 1 << i;
+ x &= even_bit_esz_masks[i];
+ x = (x >> sh) | x;
+ }
+ return x & 0xffffffffu;
+}
+
+void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
+ int esize = 1 << esz;
+ uint64_t *d = vd;
+ intptr_t i;
+
+ if (oprsz <= 8) {
+ uint64_t nn = *(uint64_t *)vn;
+ uint64_t mm = *(uint64_t *)vm;
+ int half = 4 * oprsz;
+
+ nn = extract64(nn, high * half, half);
+ mm = extract64(mm, high * half, half);
+ nn = expand_bits(nn, esz);
+ mm = expand_bits(mm, esz);
+ d[0] = nn | (mm << esize);
+ } else {
+ ARMPredicateReg tmp;
+
+ /* We produce output faster than we consume input.
+ Therefore we must be mindful of possible overlap. */
+ if (vd == vn) {
+ vn = memcpy(&tmp, vn, oprsz);
+ if (vd == vm) {
+ vm = vn;
+ }
+ } else if (vd == vm) {
+ vm = memcpy(&tmp, vm, oprsz);
+ }
+ if (high) {
+ high = oprsz >> 1;
+ }
+
+ if ((oprsz & 7) == 0) {
+ uint32_t *n = vn, *m = vm;
+ high >>= 2;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ uint64_t nn = n[H4(high + i)];
+ uint64_t mm = m[H4(high + i)];
+
+ nn = expand_bits(nn, esz);
+ mm = expand_bits(mm, esz);
+ d[i] = nn | (mm << esize);
+ }
+ } else {
+ uint8_t *n = vn, *m = vm;
+ uint16_t *d16 = vd;
+
+ for (i = 0; i < oprsz / 2; i++) {
+ uint16_t nn = n[H1(high + i)];
+ uint16_t mm = m[H1(high + i)];
+
+ nn = expand_bits(nn, esz);
+ mm = expand_bits(mm, esz);
+ d16[H2(i)] = nn | (mm << esize);
+ }
+ }
+ }
+}
+
+void HELPER(sve_uzp_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ int odd = FIELD_EX32(pred_desc, PREDDESC, DATA) << esz;
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t l, h;
+ intptr_t i;
+
+ if (oprsz <= 8) {
+ l = compress_bits(n[0] >> odd, esz);
+ h = compress_bits(m[0] >> odd, esz);
+ d[0] = l | (h << (4 * oprsz));
+ } else {
+ ARMPredicateReg tmp_m;
+ intptr_t oprsz_16 = oprsz / 16;
+
+ if ((vm - vd) < (uintptr_t)oprsz) {
+ m = memcpy(&tmp_m, vm, oprsz);
+ }
+
+ for (i = 0; i < oprsz_16; i++) {
+ l = n[2 * i + 0];
+ h = n[2 * i + 1];
+ l = compress_bits(l >> odd, esz);
+ h = compress_bits(h >> odd, esz);
+ d[i] = l | (h << 32);
+ }
+
+ /*
+ * For VL which is not a multiple of 512, the results from M do not
+ * align nicely with the uint64_t for D. Put the aligned results
+ * from M into TMP_M and then copy it into place afterward.
+ */
+ if (oprsz & 15) {
+ int final_shift = (oprsz & 15) * 2;
+
+ l = n[2 * i + 0];
+ h = n[2 * i + 1];
+ l = compress_bits(l >> odd, esz);
+ h = compress_bits(h >> odd, esz);
+ d[i] = l | (h << final_shift);
+
+ for (i = 0; i < oprsz_16; i++) {
+ l = m[2 * i + 0];
+ h = m[2 * i + 1];
+ l = compress_bits(l >> odd, esz);
+ h = compress_bits(h >> odd, esz);
+ tmp_m.p[i] = l | (h << 32);
+ }
+ l = m[2 * i + 0];
+ h = m[2 * i + 1];
+ l = compress_bits(l >> odd, esz);
+ h = compress_bits(h >> odd, esz);
+ tmp_m.p[i] = l | (h << final_shift);
+
+ swap_memmove(vd + oprsz / 2, &tmp_m, oprsz / 2);
+ } else {
+ for (i = 0; i < oprsz_16; i++) {
+ l = m[2 * i + 0];
+ h = m[2 * i + 1];
+ l = compress_bits(l >> odd, esz);
+ h = compress_bits(h >> odd, esz);
+ d[oprsz_16 + i] = l | (h << 32);
+ }
+ }
+ }
+}
+
+void HELPER(sve_trn_p)(void *vd, void *vn, void *vm, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ int odd = FIELD_EX32(pred_desc, PREDDESC, DATA);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t mask;
+ int shr, shl;
+ intptr_t i;
+
+ shl = 1 << esz;
+ shr = 0;
+ mask = even_bit_esz_masks[esz];
+ if (odd) {
+ mask <<= shl;
+ shr = shl;
+ shl = 0;
+ }
+
+ for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) {
+ uint64_t nn = (n[i] & mask) >> shr;
+ uint64_t mm = (m[i] & mask) << shl;
+ d[i] = nn + mm;
+ }
+}
+
+/* Reverse units of 2**N bits. */
+static uint64_t reverse_bits_64(uint64_t x, int n)
+{
+ int i, sh;
+
+ x = bswap64(x);
+ for (i = 2, sh = 4; i >= n; i--, sh >>= 1) {
+ uint64_t mask = even_bit_esz_masks[i];
+ x = ((x & mask) << sh) | ((x >> sh) & mask);
+ }
+ return x;
+}
+
+static uint8_t reverse_bits_8(uint8_t x, int n)
+{
+ static const uint8_t mask[3] = { 0x55, 0x33, 0x0f };
+ int i, sh;
+
+ for (i = 2, sh = 4; i >= n; i--, sh >>= 1) {
+ x = ((x & mask[i]) << sh) | ((x >> sh) & mask[i]);
+ }
+ return x;
+}
+
+void HELPER(sve_rev_p)(void *vd, void *vn, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ intptr_t i, oprsz_2 = oprsz / 2;
+
+ if (oprsz <= 8) {
+ uint64_t l = *(uint64_t *)vn;
+ l = reverse_bits_64(l << (64 - 8 * oprsz), esz);
+ *(uint64_t *)vd = l;
+ } else if ((oprsz & 15) == 0) {
+ for (i = 0; i < oprsz_2; i += 8) {
+ intptr_t ih = oprsz - 8 - i;
+ uint64_t l = reverse_bits_64(*(uint64_t *)(vn + i), esz);
+ uint64_t h = reverse_bits_64(*(uint64_t *)(vn + ih), esz);
+ *(uint64_t *)(vd + i) = h;
+ *(uint64_t *)(vd + ih) = l;
+ }
+ } else {
+ for (i = 0; i < oprsz_2; i += 1) {
+ intptr_t il = H1(i);
+ intptr_t ih = H1(oprsz - 1 - i);
+ uint8_t l = reverse_bits_8(*(uint8_t *)(vn + il), esz);
+ uint8_t h = reverse_bits_8(*(uint8_t *)(vn + ih), esz);
+ *(uint8_t *)(vd + il) = h;
+ *(uint8_t *)(vd + ih) = l;
+ }
+ }
+}
+
+void HELPER(sve_punpk_p)(void *vd, void *vn, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA);
+ uint64_t *d = vd;
+ intptr_t i;
+
+ if (oprsz <= 8) {
+ uint64_t nn = *(uint64_t *)vn;
+ int half = 4 * oprsz;
+
+ nn = extract64(nn, high * half, half);
+ nn = expand_bits(nn, 0);
+ d[0] = nn;
+ } else {
+ ARMPredicateReg tmp_n;
+
+ /* We produce output faster than we consume input.
+ Therefore we must be mindful of possible overlap. */
+ if ((vn - vd) < (uintptr_t)oprsz) {
+ vn = memcpy(&tmp_n, vn, oprsz);
+ }
+ if (high) {
+ high = oprsz >> 1;
+ }
+
+ if ((oprsz & 7) == 0) {
+ uint32_t *n = vn;
+ high >>= 2;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ uint64_t nn = n[H4(high + i)];
+ d[i] = expand_bits(nn, 0);
+ }
+ } else {
+ uint16_t *d16 = vd;
+ uint8_t *n = vn;
+
+ for (i = 0; i < oprsz / 2; i++) {
+ uint16_t nn = n[H1(high + i)];
+ d16[H2(i)] = expand_bits(nn, 0);
+ }
+ }
+ }
+}
+
+#define DO_ZIP(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t oprsz = simd_oprsz(desc); \
+ intptr_t i, oprsz_2 = oprsz / 2; \
+ ARMVectorReg tmp_n, tmp_m; \
+ /* We produce output faster than we consume input. \
+ Therefore we must be mindful of possible overlap. */ \
+ if (unlikely((vn - vd) < (uintptr_t)oprsz)) { \
+ vn = memcpy(&tmp_n, vn, oprsz_2); \
+ } \
+ if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \
+ vm = memcpy(&tmp_m, vm, oprsz_2); \
+ } \
+ for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \
+ *(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(2 * i + sizeof(TYPE))) = *(TYPE *)(vm + H(i)); \
+ } \
+ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \
+ memset(vd + oprsz - 16, 0, 16); \
+ } \
+}
+
+DO_ZIP(sve_zip_b, uint8_t, H1)
+DO_ZIP(sve_zip_h, uint16_t, H1_2)
+DO_ZIP(sve_zip_s, uint32_t, H1_4)
+DO_ZIP(sve_zip_d, uint64_t, H1_8)
+DO_ZIP(sve2_zip_q, Int128, )
+
+#define DO_UZP(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t oprsz = simd_oprsz(desc); \
+ intptr_t odd_ofs = simd_data(desc); \
+ intptr_t i, p; \
+ ARMVectorReg tmp_m; \
+ if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \
+ vm = memcpy(&tmp_m, vm, oprsz); \
+ } \
+ i = 0, p = odd_ofs; \
+ do { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(vn + H(p)); \
+ i += sizeof(TYPE), p += 2 * sizeof(TYPE); \
+ } while (p < oprsz); \
+ p -= oprsz; \
+ do { \
+ *(TYPE *)(vd + H(i)) = *(TYPE *)(vm + H(p)); \
+ i += sizeof(TYPE), p += 2 * sizeof(TYPE); \
+ } while (p < oprsz); \
+ tcg_debug_assert(i == oprsz); \
+}
+
+DO_UZP(sve_uzp_b, uint8_t, H1)
+DO_UZP(sve_uzp_h, uint16_t, H1_2)
+DO_UZP(sve_uzp_s, uint32_t, H1_4)
+DO_UZP(sve_uzp_d, uint64_t, H1_8)
+DO_UZP(sve2_uzp_q, Int128, )
+
+#define DO_TRN(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t oprsz = simd_oprsz(desc); \
+ intptr_t odd_ofs = simd_data(desc); \
+ intptr_t i; \
+ for (i = 0; i < oprsz; i += 2 * sizeof(TYPE)) { \
+ TYPE ae = *(TYPE *)(vn + H(i + odd_ofs)); \
+ TYPE be = *(TYPE *)(vm + H(i + odd_ofs)); \
+ *(TYPE *)(vd + H(i + 0)) = ae; \
+ *(TYPE *)(vd + H(i + sizeof(TYPE))) = be; \
+ } \
+ if (sizeof(TYPE) == 16 && unlikely(oprsz & 16)) { \
+ memset(vd + oprsz - 16, 0, 16); \
+ } \
+}
+
+DO_TRN(sve_trn_b, uint8_t, H1)
+DO_TRN(sve_trn_h, uint16_t, H1_2)
+DO_TRN(sve_trn_s, uint32_t, H1_4)
+DO_TRN(sve_trn_d, uint64_t, H1_8)
+DO_TRN(sve2_trn_q, Int128, )
+
+#undef DO_ZIP
+#undef DO_UZP
+#undef DO_TRN
+
+void HELPER(sve_compact_s)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc) / 4;
+ uint32_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = j = 0; i < opr_sz; i++) {
+ if (pg[H1(i / 2)] & (i & 1 ? 0x10 : 0x01)) {
+ d[H4(j)] = n[H4(i)];
+ j++;
+ }
+ }
+ for (; j < opr_sz; j++) {
+ d[H4(j)] = 0;
+ }
+}
+
+void HELPER(sve_compact_d)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = j = 0; i < opr_sz; i++) {
+ if (pg[H1(i)] & 1) {
+ d[j] = n[i];
+ j++;
+ }
+ }
+ for (; j < opr_sz; j++) {
+ d[j] = 0;
+ }
+}
+
+/* Similar to the ARM LastActiveElement pseudocode function, except the
+ * result is multiplied by the element size. This includes the not found
+ * indication; e.g. not found for esz=3 is -8.
+ */
+int32_t HELPER(sve_last_active_element)(void *vg, uint32_t pred_desc)
+{
+ intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+
+ return last_active_element(vg, words, esz);
+}
+
+void HELPER(sve_splice)(void *vd, void *vn, void *vm, void *vg, uint32_t desc)
+{
+ intptr_t opr_sz = simd_oprsz(desc) / 8;
+ int esz = simd_data(desc);
+ uint64_t pg, first_g, last_g, len, mask = pred_esz_masks[esz];
+ intptr_t i, first_i, last_i;
+ ARMVectorReg tmp;
+
+ first_i = last_i = 0;
+ first_g = last_g = 0;
+
+ /* Find the extent of the active elements within VG. */
+ for (i = QEMU_ALIGN_UP(opr_sz, 8) - 8; i >= 0; i -= 8) {
+ pg = *(uint64_t *)(vg + i) & mask;
+ if (pg) {
+ if (last_g == 0) {
+ last_g = pg;
+ last_i = i;
+ }
+ first_g = pg;
+ first_i = i;
+ }
+ }
+
+ len = 0;
+ if (first_g != 0) {
+ first_i = first_i * 8 + ctz64(first_g);
+ last_i = last_i * 8 + 63 - clz64(last_g);
+ len = last_i - first_i + (1 << esz);
+ if (vd == vm) {
+ vm = memcpy(&tmp, vm, opr_sz * 8);
+ }
+ swap_memmove(vd, vn + first_i, len);
+ }
+ swap_memmove(vd + len, vm, opr_sz * 8 - len);
+}
+
+void HELPER(sve_sel_zpzz_b)(void *vd, void *vn, void *vm,
+ void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i], mm = m[i];
+ uint64_t pp = expand_pred_b(pg[H1(i)]);
+ d[i] = (nn & pp) | (mm & ~pp);
+ }
+}
+
+void HELPER(sve_sel_zpzz_h)(void *vd, void *vn, void *vm,
+ void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i], mm = m[i];
+ uint64_t pp = expand_pred_h(pg[H1(i)]);
+ d[i] = (nn & pp) | (mm & ~pp);
+ }
+}
+
+void HELPER(sve_sel_zpzz_s)(void *vd, void *vn, void *vm,
+ void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i], mm = m[i];
+ uint64_t pp = expand_pred_s(pg[H1(i)]);
+ d[i] = (nn & pp) | (mm & ~pp);
+ }
+}
+
+void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm,
+ void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 1) {
+ uint64_t nn = n[i], mm = m[i];
+ d[i] = (pg[H1(i)] & 1 ? nn : mm);
+ }
+}
+
+/* Two operand comparison controlled by a predicate.
+ * ??? It is very tempting to want to be able to expand this inline
+ * with x86 instructions, e.g.
+ *
+ * vcmpeqw zm, zn, %ymm0
+ * vpmovmskb %ymm0, %eax
+ * and $0x5555, %eax
+ * and pg, %eax
+ *
+ * or even aarch64, e.g.
+ *
+ * // mask = 4000 1000 0400 0100 0040 0010 0004 0001
+ * cmeq v0.8h, zn, zm
+ * and v0.8h, v0.8h, mask
+ * addv h0, v0.8h
+ * and v0.8b, pg
+ *
+ * However, coming up with an abstraction that allows vector inputs and
+ * a scalar output, and also handles the byte-ordering of sub-uint64_t
+ * scalar outputs, is tricky.
+ */
+#define DO_CMP_PPZZ(NAME, TYPE, OP, H, MASK) \
+uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t opr_sz = simd_oprsz(desc); \
+ uint32_t flags = PREDTEST_INIT; \
+ intptr_t i = opr_sz; \
+ do { \
+ uint64_t out = 0, pg; \
+ do { \
+ i -= sizeof(TYPE), out <<= sizeof(TYPE); \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ TYPE mm = *(TYPE *)(vm + H(i)); \
+ out |= nn OP mm; \
+ } while (i & 63); \
+ pg = *(uint64_t *)(vg + (i >> 3)) & MASK; \
+ out &= pg; \
+ *(uint64_t *)(vd + (i >> 3)) = out; \
+ flags = iter_predtest_bwd(out, pg, flags); \
+ } while (i > 0); \
+ return flags; \
+}
+
+#define DO_CMP_PPZZ_B(NAME, TYPE, OP) \
+ DO_CMP_PPZZ(NAME, TYPE, OP, H1, 0xffffffffffffffffull)
+#define DO_CMP_PPZZ_H(NAME, TYPE, OP) \
+ DO_CMP_PPZZ(NAME, TYPE, OP, H1_2, 0x5555555555555555ull)
+#define DO_CMP_PPZZ_S(NAME, TYPE, OP) \
+ DO_CMP_PPZZ(NAME, TYPE, OP, H1_4, 0x1111111111111111ull)
+#define DO_CMP_PPZZ_D(NAME, TYPE, OP) \
+ DO_CMP_PPZZ(NAME, TYPE, OP, H1_8, 0x0101010101010101ull)
+
+DO_CMP_PPZZ_B(sve_cmpeq_ppzz_b, uint8_t, ==)
+DO_CMP_PPZZ_H(sve_cmpeq_ppzz_h, uint16_t, ==)
+DO_CMP_PPZZ_S(sve_cmpeq_ppzz_s, uint32_t, ==)
+DO_CMP_PPZZ_D(sve_cmpeq_ppzz_d, uint64_t, ==)
+
+DO_CMP_PPZZ_B(sve_cmpne_ppzz_b, uint8_t, !=)
+DO_CMP_PPZZ_H(sve_cmpne_ppzz_h, uint16_t, !=)
+DO_CMP_PPZZ_S(sve_cmpne_ppzz_s, uint32_t, !=)
+DO_CMP_PPZZ_D(sve_cmpne_ppzz_d, uint64_t, !=)
+
+DO_CMP_PPZZ_B(sve_cmpgt_ppzz_b, int8_t, >)
+DO_CMP_PPZZ_H(sve_cmpgt_ppzz_h, int16_t, >)
+DO_CMP_PPZZ_S(sve_cmpgt_ppzz_s, int32_t, >)
+DO_CMP_PPZZ_D(sve_cmpgt_ppzz_d, int64_t, >)
+
+DO_CMP_PPZZ_B(sve_cmpge_ppzz_b, int8_t, >=)
+DO_CMP_PPZZ_H(sve_cmpge_ppzz_h, int16_t, >=)
+DO_CMP_PPZZ_S(sve_cmpge_ppzz_s, int32_t, >=)
+DO_CMP_PPZZ_D(sve_cmpge_ppzz_d, int64_t, >=)
+
+DO_CMP_PPZZ_B(sve_cmphi_ppzz_b, uint8_t, >)
+DO_CMP_PPZZ_H(sve_cmphi_ppzz_h, uint16_t, >)
+DO_CMP_PPZZ_S(sve_cmphi_ppzz_s, uint32_t, >)
+DO_CMP_PPZZ_D(sve_cmphi_ppzz_d, uint64_t, >)
+
+DO_CMP_PPZZ_B(sve_cmphs_ppzz_b, uint8_t, >=)
+DO_CMP_PPZZ_H(sve_cmphs_ppzz_h, uint16_t, >=)
+DO_CMP_PPZZ_S(sve_cmphs_ppzz_s, uint32_t, >=)
+DO_CMP_PPZZ_D(sve_cmphs_ppzz_d, uint64_t, >=)
+
+#undef DO_CMP_PPZZ_B
+#undef DO_CMP_PPZZ_H
+#undef DO_CMP_PPZZ_S
+#undef DO_CMP_PPZZ_D
+#undef DO_CMP_PPZZ
+
+/* Similar, but the second source is "wide". */
+#define DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H, MASK) \
+uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ intptr_t opr_sz = simd_oprsz(desc); \
+ uint32_t flags = PREDTEST_INIT; \
+ intptr_t i = opr_sz; \
+ do { \
+ uint64_t out = 0, pg; \
+ do { \
+ TYPEW mm = *(TYPEW *)(vm + i - 8); \
+ do { \
+ i -= sizeof(TYPE), out <<= sizeof(TYPE); \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ out |= nn OP mm; \
+ } while (i & 7); \
+ } while (i & 63); \
+ pg = *(uint64_t *)(vg + (i >> 3)) & MASK; \
+ out &= pg; \
+ *(uint64_t *)(vd + (i >> 3)) = out; \
+ flags = iter_predtest_bwd(out, pg, flags); \
+ } while (i > 0); \
+ return flags; \
+}
+
+#define DO_CMP_PPZW_B(NAME, TYPE, TYPEW, OP) \
+ DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1, 0xffffffffffffffffull)
+#define DO_CMP_PPZW_H(NAME, TYPE, TYPEW, OP) \
+ DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_2, 0x5555555555555555ull)
+#define DO_CMP_PPZW_S(NAME, TYPE, TYPEW, OP) \
+ DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_4, 0x1111111111111111ull)
+
+DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, int8_t, uint64_t, ==)
+DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, int16_t, uint64_t, ==)
+DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, int32_t, uint64_t, ==)
+
+DO_CMP_PPZW_B(sve_cmpne_ppzw_b, int8_t, uint64_t, !=)
+DO_CMP_PPZW_H(sve_cmpne_ppzw_h, int16_t, uint64_t, !=)
+DO_CMP_PPZW_S(sve_cmpne_ppzw_s, int32_t, uint64_t, !=)
+
+DO_CMP_PPZW_B(sve_cmpgt_ppzw_b, int8_t, int64_t, >)
+DO_CMP_PPZW_H(sve_cmpgt_ppzw_h, int16_t, int64_t, >)
+DO_CMP_PPZW_S(sve_cmpgt_ppzw_s, int32_t, int64_t, >)
+
+DO_CMP_PPZW_B(sve_cmpge_ppzw_b, int8_t, int64_t, >=)
+DO_CMP_PPZW_H(sve_cmpge_ppzw_h, int16_t, int64_t, >=)
+DO_CMP_PPZW_S(sve_cmpge_ppzw_s, int32_t, int64_t, >=)
+
+DO_CMP_PPZW_B(sve_cmphi_ppzw_b, uint8_t, uint64_t, >)
+DO_CMP_PPZW_H(sve_cmphi_ppzw_h, uint16_t, uint64_t, >)
+DO_CMP_PPZW_S(sve_cmphi_ppzw_s, uint32_t, uint64_t, >)
+
+DO_CMP_PPZW_B(sve_cmphs_ppzw_b, uint8_t, uint64_t, >=)
+DO_CMP_PPZW_H(sve_cmphs_ppzw_h, uint16_t, uint64_t, >=)
+DO_CMP_PPZW_S(sve_cmphs_ppzw_s, uint32_t, uint64_t, >=)
+
+DO_CMP_PPZW_B(sve_cmplt_ppzw_b, int8_t, int64_t, <)
+DO_CMP_PPZW_H(sve_cmplt_ppzw_h, int16_t, int64_t, <)
+DO_CMP_PPZW_S(sve_cmplt_ppzw_s, int32_t, int64_t, <)
+
+DO_CMP_PPZW_B(sve_cmple_ppzw_b, int8_t, int64_t, <=)
+DO_CMP_PPZW_H(sve_cmple_ppzw_h, int16_t, int64_t, <=)
+DO_CMP_PPZW_S(sve_cmple_ppzw_s, int32_t, int64_t, <=)
+
+DO_CMP_PPZW_B(sve_cmplo_ppzw_b, uint8_t, uint64_t, <)
+DO_CMP_PPZW_H(sve_cmplo_ppzw_h, uint16_t, uint64_t, <)
+DO_CMP_PPZW_S(sve_cmplo_ppzw_s, uint32_t, uint64_t, <)
+
+DO_CMP_PPZW_B(sve_cmpls_ppzw_b, uint8_t, uint64_t, <=)
+DO_CMP_PPZW_H(sve_cmpls_ppzw_h, uint16_t, uint64_t, <=)
+DO_CMP_PPZW_S(sve_cmpls_ppzw_s, uint32_t, uint64_t, <=)
+
+#undef DO_CMP_PPZW_B
+#undef DO_CMP_PPZW_H
+#undef DO_CMP_PPZW_S
+#undef DO_CMP_PPZW
+
+/* Similar, but the second source is immediate. */
+#define DO_CMP_PPZI(NAME, TYPE, OP, H, MASK) \
+uint32_t HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ intptr_t opr_sz = simd_oprsz(desc); \
+ uint32_t flags = PREDTEST_INIT; \
+ TYPE mm = simd_data(desc); \
+ intptr_t i = opr_sz; \
+ do { \
+ uint64_t out = 0, pg; \
+ do { \
+ i -= sizeof(TYPE), out <<= sizeof(TYPE); \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ out |= nn OP mm; \
+ } while (i & 63); \
+ pg = *(uint64_t *)(vg + (i >> 3)) & MASK; \
+ out &= pg; \
+ *(uint64_t *)(vd + (i >> 3)) = out; \
+ flags = iter_predtest_bwd(out, pg, flags); \
+ } while (i > 0); \
+ return flags; \
+}
+
+#define DO_CMP_PPZI_B(NAME, TYPE, OP) \
+ DO_CMP_PPZI(NAME, TYPE, OP, H1, 0xffffffffffffffffull)
+#define DO_CMP_PPZI_H(NAME, TYPE, OP) \
+ DO_CMP_PPZI(NAME, TYPE, OP, H1_2, 0x5555555555555555ull)
+#define DO_CMP_PPZI_S(NAME, TYPE, OP) \
+ DO_CMP_PPZI(NAME, TYPE, OP, H1_4, 0x1111111111111111ull)
+#define DO_CMP_PPZI_D(NAME, TYPE, OP) \
+ DO_CMP_PPZI(NAME, TYPE, OP, H1_8, 0x0101010101010101ull)
+
+DO_CMP_PPZI_B(sve_cmpeq_ppzi_b, uint8_t, ==)
+DO_CMP_PPZI_H(sve_cmpeq_ppzi_h, uint16_t, ==)
+DO_CMP_PPZI_S(sve_cmpeq_ppzi_s, uint32_t, ==)
+DO_CMP_PPZI_D(sve_cmpeq_ppzi_d, uint64_t, ==)
+
+DO_CMP_PPZI_B(sve_cmpne_ppzi_b, uint8_t, !=)
+DO_CMP_PPZI_H(sve_cmpne_ppzi_h, uint16_t, !=)
+DO_CMP_PPZI_S(sve_cmpne_ppzi_s, uint32_t, !=)
+DO_CMP_PPZI_D(sve_cmpne_ppzi_d, uint64_t, !=)
+
+DO_CMP_PPZI_B(sve_cmpgt_ppzi_b, int8_t, >)
+DO_CMP_PPZI_H(sve_cmpgt_ppzi_h, int16_t, >)
+DO_CMP_PPZI_S(sve_cmpgt_ppzi_s, int32_t, >)
+DO_CMP_PPZI_D(sve_cmpgt_ppzi_d, int64_t, >)
+
+DO_CMP_PPZI_B(sve_cmpge_ppzi_b, int8_t, >=)
+DO_CMP_PPZI_H(sve_cmpge_ppzi_h, int16_t, >=)
+DO_CMP_PPZI_S(sve_cmpge_ppzi_s, int32_t, >=)
+DO_CMP_PPZI_D(sve_cmpge_ppzi_d, int64_t, >=)
+
+DO_CMP_PPZI_B(sve_cmphi_ppzi_b, uint8_t, >)
+DO_CMP_PPZI_H(sve_cmphi_ppzi_h, uint16_t, >)
+DO_CMP_PPZI_S(sve_cmphi_ppzi_s, uint32_t, >)
+DO_CMP_PPZI_D(sve_cmphi_ppzi_d, uint64_t, >)
+
+DO_CMP_PPZI_B(sve_cmphs_ppzi_b, uint8_t, >=)
+DO_CMP_PPZI_H(sve_cmphs_ppzi_h, uint16_t, >=)
+DO_CMP_PPZI_S(sve_cmphs_ppzi_s, uint32_t, >=)
+DO_CMP_PPZI_D(sve_cmphs_ppzi_d, uint64_t, >=)
+
+DO_CMP_PPZI_B(sve_cmplt_ppzi_b, int8_t, <)
+DO_CMP_PPZI_H(sve_cmplt_ppzi_h, int16_t, <)
+DO_CMP_PPZI_S(sve_cmplt_ppzi_s, int32_t, <)
+DO_CMP_PPZI_D(sve_cmplt_ppzi_d, int64_t, <)
+
+DO_CMP_PPZI_B(sve_cmple_ppzi_b, int8_t, <=)
+DO_CMP_PPZI_H(sve_cmple_ppzi_h, int16_t, <=)
+DO_CMP_PPZI_S(sve_cmple_ppzi_s, int32_t, <=)
+DO_CMP_PPZI_D(sve_cmple_ppzi_d, int64_t, <=)
+
+DO_CMP_PPZI_B(sve_cmplo_ppzi_b, uint8_t, <)
+DO_CMP_PPZI_H(sve_cmplo_ppzi_h, uint16_t, <)
+DO_CMP_PPZI_S(sve_cmplo_ppzi_s, uint32_t, <)
+DO_CMP_PPZI_D(sve_cmplo_ppzi_d, uint64_t, <)
+
+DO_CMP_PPZI_B(sve_cmpls_ppzi_b, uint8_t, <=)
+DO_CMP_PPZI_H(sve_cmpls_ppzi_h, uint16_t, <=)
+DO_CMP_PPZI_S(sve_cmpls_ppzi_s, uint32_t, <=)
+DO_CMP_PPZI_D(sve_cmpls_ppzi_d, uint64_t, <=)
+
+#undef DO_CMP_PPZI_B
+#undef DO_CMP_PPZI_H
+#undef DO_CMP_PPZI_S
+#undef DO_CMP_PPZI_D
+#undef DO_CMP_PPZI
+
+/* Similar to the ARM LastActive pseudocode function. */
+static bool last_active_pred(void *vd, void *vg, intptr_t oprsz)
+{
+ intptr_t i;
+
+ for (i = QEMU_ALIGN_UP(oprsz, 8) - 8; i >= 0; i -= 8) {
+ uint64_t pg = *(uint64_t *)(vg + i);
+ if (pg) {
+ return (pow2floor(pg) & *(uint64_t *)(vd + i)) != 0;
+ }
+ }
+ return 0;
+}
+
+/* Compute a mask into RETB that is true for all G, up to and including
+ * (if after) or excluding (if !after) the first G & N.
+ * Return true if BRK found.
+ */
+static bool compute_brk(uint64_t *retb, uint64_t n, uint64_t g,
+ bool brk, bool after)
+{
+ uint64_t b;
+
+ if (brk) {
+ b = 0;
+ } else if ((g & n) == 0) {
+ /* For all G, no N are set; break not found. */
+ b = g;
+ } else {
+ /* Break somewhere in N. Locate it. */
+ b = g & n; /* guard true, pred true */
+ b = b & -b; /* first such */
+ if (after) {
+ b = b | (b - 1); /* break after same */
+ } else {
+ b = b - 1; /* break before same */
+ }
+ brk = true;
+ }
+
+ *retb = b;
+ return brk;
+}
+
+/* Compute a zeroing BRK. */
+static void compute_brk_z(uint64_t *d, uint64_t *n, uint64_t *g,
+ intptr_t oprsz, bool after)
+{
+ bool brk = false;
+ intptr_t i;
+
+ for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) {
+ uint64_t this_b, this_g = g[i];
+
+ brk = compute_brk(&this_b, n[i], this_g, brk, after);
+ d[i] = this_b & this_g;
+ }
+}
+
+/* Likewise, but also compute flags. */
+static uint32_t compute_brks_z(uint64_t *d, uint64_t *n, uint64_t *g,
+ intptr_t oprsz, bool after)
+{
+ uint32_t flags = PREDTEST_INIT;
+ bool brk = false;
+ intptr_t i;
+
+ for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) {
+ uint64_t this_b, this_d, this_g = g[i];
+
+ brk = compute_brk(&this_b, n[i], this_g, brk, after);
+ d[i] = this_d = this_b & this_g;
+ flags = iter_predtest_fwd(this_d, this_g, flags);
+ }
+ return flags;
+}
+
+/* Compute a merging BRK. */
+static void compute_brk_m(uint64_t *d, uint64_t *n, uint64_t *g,
+ intptr_t oprsz, bool after)
+{
+ bool brk = false;
+ intptr_t i;
+
+ for (i = 0; i < DIV_ROUND_UP(oprsz, 8); ++i) {
+ uint64_t this_b, this_g = g[i];
+
+ brk = compute_brk(&this_b, n[i], this_g, brk, after);
+ d[i] = (this_b & this_g) | (d[i] & ~this_g);
+ }
+}
+
+/* Likewise, but also compute flags. */
+static uint32_t compute_brks_m(uint64_t *d, uint64_t *n, uint64_t *g,
+ intptr_t oprsz, bool after)
+{
+ uint32_t flags = PREDTEST_INIT;
+ bool brk = false;
+ intptr_t i;
+
+ for (i = 0; i < oprsz / 8; ++i) {
+ uint64_t this_b, this_d = d[i], this_g = g[i];
+
+ brk = compute_brk(&this_b, n[i], this_g, brk, after);
+ d[i] = this_d = (this_b & this_g) | (this_d & ~this_g);
+ flags = iter_predtest_fwd(this_d, this_g, flags);
+ }
+ return flags;
+}
+
+static uint32_t do_zero(ARMPredicateReg *d, intptr_t oprsz)
+{
+ /* It is quicker to zero the whole predicate than loop on OPRSZ.
+ * The compiler should turn this into 4 64-bit integer stores.
+ */
+ memset(d, 0, sizeof(ARMPredicateReg));
+ return PREDTEST_INIT;
+}
+
+void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ if (last_active_pred(vn, vg, oprsz)) {
+ compute_brk_z(vd, vm, vg, oprsz, true);
+ } else {
+ do_zero(vd, oprsz);
+ }
+}
+
+uint32_t HELPER(sve_brkpas)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ if (last_active_pred(vn, vg, oprsz)) {
+ return compute_brks_z(vd, vm, vg, oprsz, true);
+ } else {
+ return do_zero(vd, oprsz);
+ }
+}
+
+void HELPER(sve_brkpb)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ if (last_active_pred(vn, vg, oprsz)) {
+ compute_brk_z(vd, vm, vg, oprsz, false);
+ } else {
+ do_zero(vd, oprsz);
+ }
+}
+
+uint32_t HELPER(sve_brkpbs)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ if (last_active_pred(vn, vg, oprsz)) {
+ return compute_brks_z(vd, vm, vg, oprsz, false);
+ } else {
+ return do_zero(vd, oprsz);
+ }
+}
+
+void HELPER(sve_brka_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ compute_brk_z(vd, vn, vg, oprsz, true);
+}
+
+uint32_t HELPER(sve_brkas_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ return compute_brks_z(vd, vn, vg, oprsz, true);
+}
+
+void HELPER(sve_brkb_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ compute_brk_z(vd, vn, vg, oprsz, false);
+}
+
+uint32_t HELPER(sve_brkbs_z)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ return compute_brks_z(vd, vn, vg, oprsz, false);
+}
+
+void HELPER(sve_brka_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ compute_brk_m(vd, vn, vg, oprsz, true);
+}
+
+uint32_t HELPER(sve_brkas_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ return compute_brks_m(vd, vn, vg, oprsz, true);
+}
+
+void HELPER(sve_brkb_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ compute_brk_m(vd, vn, vg, oprsz, false);
+}
+
+uint32_t HELPER(sve_brkbs_m)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ return compute_brks_m(vd, vn, vg, oprsz, false);
+}
+
+void HELPER(sve_brkn)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ if (!last_active_pred(vn, vg, oprsz)) {
+ do_zero(vd, oprsz);
+ }
+}
+
+/* As if PredTest(Ones(PL), D, esz). */
+static uint32_t predtest_ones(ARMPredicateReg *d, intptr_t oprsz,
+ uint64_t esz_mask)
+{
+ uint32_t flags = PREDTEST_INIT;
+ intptr_t i;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ flags = iter_predtest_fwd(d->p[i], esz_mask, flags);
+ }
+ if (oprsz & 7) {
+ uint64_t mask = ~(-1ULL << (8 * (oprsz & 7)));
+ flags = iter_predtest_fwd(d->p[i], esz_mask & mask, flags);
+ }
+ return flags;
+}
+
+uint32_t HELPER(sve_brkns)(void *vd, void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ if (last_active_pred(vn, vg, oprsz)) {
+ return predtest_ones(vd, oprsz, -1);
+ } else {
+ return do_zero(vd, oprsz);
+ }
+}
+
+uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc)
+{
+ intptr_t words = DIV_ROUND_UP(FIELD_EX32(pred_desc, PREDDESC, OPRSZ), 8);
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint64_t *n = vn, *g = vg, sum = 0, mask = pred_esz_masks[esz];
+ intptr_t i;
+
+ for (i = 0; i < words; ++i) {
+ uint64_t t = n[i] & g[i] & mask;
+ sum += ctpop64(t);
+ }
+ return sum;
+}
+
+uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint64_t esz_mask = pred_esz_masks[esz];
+ ARMPredicateReg *d = vd;
+ uint32_t flags;
+ intptr_t i;
+
+ /* Begin with a zero predicate register. */
+ flags = do_zero(d, oprsz);
+ if (count == 0) {
+ return flags;
+ }
+
+ /* Set all of the requested bits. */
+ for (i = 0; i < count / 64; ++i) {
+ d->p[i] = esz_mask;
+ }
+ if (count & 63) {
+ d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask;
+ }
+
+ return predtest_ones(d, oprsz, esz_mask);
+}
+
+uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint64_t esz_mask = pred_esz_masks[esz];
+ ARMPredicateReg *d = vd;
+ intptr_t i, invcount, oprbits;
+ uint64_t bits;
+
+ if (count == 0) {
+ return do_zero(d, oprsz);
+ }
+
+ oprbits = oprsz * 8;
+ tcg_debug_assert(count <= oprbits);
+
+ bits = esz_mask;
+ if (oprbits & 63) {
+ bits &= MAKE_64BIT_MASK(0, oprbits & 63);
+ }
+
+ invcount = oprbits - count;
+ for (i = (oprsz - 1) / 8; i > invcount / 64; --i) {
+ d->p[i] = bits;
+ bits = esz_mask;
+ }
+
+ d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64);
+
+ while (--i >= 0) {
+ d->p[i] = 0;
+ }
+
+ return predtest_ones(d, oprsz, esz_mask);
+}
+
+/* Recursive reduction on a function;
+ * C.f. the ARM ARM function ReducePredicated.
+ *
+ * While it would be possible to write this without the DATA temporary,
+ * it is much simpler to process the predicate register this way.
+ * The recursion is bounded to depth 7 (128 fp16 elements), so there's
+ * little to gain with a more complex non-recursive form.
+ */
+#define DO_REDUCE(NAME, TYPE, H, FUNC, IDENT) \
+static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \
+{ \
+ if (n == 1) { \
+ return *data; \
+ } else { \
+ uintptr_t half = n / 2; \
+ TYPE lo = NAME##_reduce(data, status, half); \
+ TYPE hi = NAME##_reduce(data + half, status, half); \
+ return TYPE##_##FUNC(lo, hi, status); \
+ } \
+} \
+uint64_t HELPER(NAME)(void *vn, void *vg, void *vs, uint32_t desc) \
+{ \
+ uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_data(desc); \
+ TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \
+ for (i = 0; i < oprsz; ) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \
+ do { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)((void *)data + i) = (pg & 1 ? nn : IDENT); \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (i & 15); \
+ } \
+ for (; i < maxsz; i += sizeof(TYPE)) { \
+ *(TYPE *)((void *)data + i) = IDENT; \
+ } \
+ return NAME##_reduce(data, vs, maxsz / sizeof(TYPE)); \
+}
+
+DO_REDUCE(sve_faddv_h, float16, H1_2, add, float16_zero)
+DO_REDUCE(sve_faddv_s, float32, H1_4, add, float32_zero)
+DO_REDUCE(sve_faddv_d, float64, H1_8, add, float64_zero)
+
+/* Identity is floatN_default_nan, without the function call. */
+DO_REDUCE(sve_fminnmv_h, float16, H1_2, minnum, 0x7E00)
+DO_REDUCE(sve_fminnmv_s, float32, H1_4, minnum, 0x7FC00000)
+DO_REDUCE(sve_fminnmv_d, float64, H1_8, minnum, 0x7FF8000000000000ULL)
+
+DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, maxnum, 0x7E00)
+DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, maxnum, 0x7FC00000)
+DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, maxnum, 0x7FF8000000000000ULL)
+
+DO_REDUCE(sve_fminv_h, float16, H1_2, min, float16_infinity)
+DO_REDUCE(sve_fminv_s, float32, H1_4, min, float32_infinity)
+DO_REDUCE(sve_fminv_d, float64, H1_8, min, float64_infinity)
+
+DO_REDUCE(sve_fmaxv_h, float16, H1_2, max, float16_chs(float16_infinity))
+DO_REDUCE(sve_fmaxv_s, float32, H1_4, max, float32_chs(float32_infinity))
+DO_REDUCE(sve_fmaxv_d, float64, H1_8, max, float64_chs(float64_infinity))
+
+#undef DO_REDUCE
+
+uint64_t HELPER(sve_fadda_h)(uint64_t nn, void *vm, void *vg,
+ void *status, uint32_t desc)
+{
+ intptr_t i = 0, opr_sz = simd_oprsz(desc);
+ float16 result = nn;
+
+ do {
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
+ do {
+ if (pg & 1) {
+ float16 mm = *(float16 *)(vm + H1_2(i));
+ result = float16_add(result, mm, status);
+ }
+ i += sizeof(float16), pg >>= sizeof(float16);
+ } while (i & 15);
+ } while (i < opr_sz);
+
+ return result;
+}
+
+uint64_t HELPER(sve_fadda_s)(uint64_t nn, void *vm, void *vg,
+ void *status, uint32_t desc)
+{
+ intptr_t i = 0, opr_sz = simd_oprsz(desc);
+ float32 result = nn;
+
+ do {
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
+ do {
+ if (pg & 1) {
+ float32 mm = *(float32 *)(vm + H1_2(i));
+ result = float32_add(result, mm, status);
+ }
+ i += sizeof(float32), pg >>= sizeof(float32);
+ } while (i & 15);
+ } while (i < opr_sz);
+
+ return result;
+}
+
+uint64_t HELPER(sve_fadda_d)(uint64_t nn, void *vm, void *vg,
+ void *status, uint32_t desc)
+{
+ intptr_t i = 0, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *m = vm;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i++) {
+ if (pg[H1(i)] & 1) {
+ nn = float64_add(nn, m[i], status);
+ }
+ }
+
+ return nn;
+}
+
+/* Fully general three-operand expander, controlled by a predicate,
+ * With the extra float_status parameter.
+ */
+#define DO_ZPZZ_FP(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPE); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ TYPE mm = *(TYPE *)(vm + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, mm, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+DO_ZPZZ_FP(sve_fadd_h, uint16_t, H1_2, float16_add)
+DO_ZPZZ_FP(sve_fadd_s, uint32_t, H1_4, float32_add)
+DO_ZPZZ_FP(sve_fadd_d, uint64_t, H1_8, float64_add)
+
+DO_ZPZZ_FP(sve_fsub_h, uint16_t, H1_2, float16_sub)
+DO_ZPZZ_FP(sve_fsub_s, uint32_t, H1_4, float32_sub)
+DO_ZPZZ_FP(sve_fsub_d, uint64_t, H1_8, float64_sub)
+
+DO_ZPZZ_FP(sve_fmul_h, uint16_t, H1_2, float16_mul)
+DO_ZPZZ_FP(sve_fmul_s, uint32_t, H1_4, float32_mul)
+DO_ZPZZ_FP(sve_fmul_d, uint64_t, H1_8, float64_mul)
+
+DO_ZPZZ_FP(sve_fdiv_h, uint16_t, H1_2, float16_div)
+DO_ZPZZ_FP(sve_fdiv_s, uint32_t, H1_4, float32_div)
+DO_ZPZZ_FP(sve_fdiv_d, uint64_t, H1_8, float64_div)
+
+DO_ZPZZ_FP(sve_fmin_h, uint16_t, H1_2, float16_min)
+DO_ZPZZ_FP(sve_fmin_s, uint32_t, H1_4, float32_min)
+DO_ZPZZ_FP(sve_fmin_d, uint64_t, H1_8, float64_min)
+
+DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max)
+DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max)
+DO_ZPZZ_FP(sve_fmax_d, uint64_t, H1_8, float64_max)
+
+DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum)
+DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum)
+DO_ZPZZ_FP(sve_fminnum_d, uint64_t, H1_8, float64_minnum)
+
+DO_ZPZZ_FP(sve_fmaxnum_h, uint16_t, H1_2, float16_maxnum)
+DO_ZPZZ_FP(sve_fmaxnum_s, uint32_t, H1_4, float32_maxnum)
+DO_ZPZZ_FP(sve_fmaxnum_d, uint64_t, H1_8, float64_maxnum)
+
+static inline float16 abd_h(float16 a, float16 b, float_status *s)
+{
+ return float16_abs(float16_sub(a, b, s));
+}
+
+static inline float32 abd_s(float32 a, float32 b, float_status *s)
+{
+ return float32_abs(float32_sub(a, b, s));
+}
+
+static inline float64 abd_d(float64 a, float64 b, float_status *s)
+{
+ return float64_abs(float64_sub(a, b, s));
+}
+
+DO_ZPZZ_FP(sve_fabd_h, uint16_t, H1_2, abd_h)
+DO_ZPZZ_FP(sve_fabd_s, uint32_t, H1_4, abd_s)
+DO_ZPZZ_FP(sve_fabd_d, uint64_t, H1_8, abd_d)
+
+static inline float64 scalbn_d(float64 a, int64_t b, float_status *s)
+{
+ int b_int = MIN(MAX(b, INT_MIN), INT_MAX);
+ return float64_scalbn(a, b_int, s);
+}
+
+DO_ZPZZ_FP(sve_fscalbn_h, int16_t, H1_2, float16_scalbn)
+DO_ZPZZ_FP(sve_fscalbn_s, int32_t, H1_4, float32_scalbn)
+DO_ZPZZ_FP(sve_fscalbn_d, int64_t, H1_8, scalbn_d)
+
+DO_ZPZZ_FP(sve_fmulx_h, uint16_t, H1_2, helper_advsimd_mulxh)
+DO_ZPZZ_FP(sve_fmulx_s, uint32_t, H1_4, helper_vfp_mulxs)
+DO_ZPZZ_FP(sve_fmulx_d, uint64_t, H1_8, helper_vfp_mulxd)
+
+#undef DO_ZPZZ_FP
+
+/* Three-operand expander, with one scalar operand, controlled by
+ * a predicate, with the extra float_status parameter.
+ */
+#define DO_ZPZS_FP(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, uint64_t scalar, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ TYPE mm = scalar; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPE); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, mm, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+DO_ZPZS_FP(sve_fadds_h, float16, H1_2, float16_add)
+DO_ZPZS_FP(sve_fadds_s, float32, H1_4, float32_add)
+DO_ZPZS_FP(sve_fadds_d, float64, H1_8, float64_add)
+
+DO_ZPZS_FP(sve_fsubs_h, float16, H1_2, float16_sub)
+DO_ZPZS_FP(sve_fsubs_s, float32, H1_4, float32_sub)
+DO_ZPZS_FP(sve_fsubs_d, float64, H1_8, float64_sub)
+
+DO_ZPZS_FP(sve_fmuls_h, float16, H1_2, float16_mul)
+DO_ZPZS_FP(sve_fmuls_s, float32, H1_4, float32_mul)
+DO_ZPZS_FP(sve_fmuls_d, float64, H1_8, float64_mul)
+
+static inline float16 subr_h(float16 a, float16 b, float_status *s)
+{
+ return float16_sub(b, a, s);
+}
+
+static inline float32 subr_s(float32 a, float32 b, float_status *s)
+{
+ return float32_sub(b, a, s);
+}
+
+static inline float64 subr_d(float64 a, float64 b, float_status *s)
+{
+ return float64_sub(b, a, s);
+}
+
+DO_ZPZS_FP(sve_fsubrs_h, float16, H1_2, subr_h)
+DO_ZPZS_FP(sve_fsubrs_s, float32, H1_4, subr_s)
+DO_ZPZS_FP(sve_fsubrs_d, float64, H1_8, subr_d)
+
+DO_ZPZS_FP(sve_fmaxnms_h, float16, H1_2, float16_maxnum)
+DO_ZPZS_FP(sve_fmaxnms_s, float32, H1_4, float32_maxnum)
+DO_ZPZS_FP(sve_fmaxnms_d, float64, H1_8, float64_maxnum)
+
+DO_ZPZS_FP(sve_fminnms_h, float16, H1_2, float16_minnum)
+DO_ZPZS_FP(sve_fminnms_s, float32, H1_4, float32_minnum)
+DO_ZPZS_FP(sve_fminnms_d, float64, H1_8, float64_minnum)
+
+DO_ZPZS_FP(sve_fmaxs_h, float16, H1_2, float16_max)
+DO_ZPZS_FP(sve_fmaxs_s, float32, H1_4, float32_max)
+DO_ZPZS_FP(sve_fmaxs_d, float64, H1_8, float64_max)
+
+DO_ZPZS_FP(sve_fmins_h, float16, H1_2, float16_min)
+DO_ZPZS_FP(sve_fmins_s, float32, H1_4, float32_min)
+DO_ZPZS_FP(sve_fmins_d, float64, H1_8, float64_min)
+
+/* Fully general two-operand expander, controlled by a predicate,
+ * With the extra float_status parameter.
+ */
+#define DO_ZPZ_FP(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPE); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ *(TYPE *)(vd + H(i)) = OP(nn, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+/* SVE fp16 conversions always use IEEE mode. Like AdvSIMD, they ignore
+ * FZ16. When converting from fp16, this affects flushing input denormals;
+ * when converting to fp16, this affects flushing output denormals.
+ */
+static inline float32 sve_f16_to_f32(float16 f, float_status *fpst)
+{
+ bool save = get_flush_inputs_to_zero(fpst);
+ float32 ret;
+
+ set_flush_inputs_to_zero(false, fpst);
+ ret = float16_to_float32(f, true, fpst);
+ set_flush_inputs_to_zero(save, fpst);
+ return ret;
+}
+
+static inline float64 sve_f16_to_f64(float16 f, float_status *fpst)
+{
+ bool save = get_flush_inputs_to_zero(fpst);
+ float64 ret;
+
+ set_flush_inputs_to_zero(false, fpst);
+ ret = float16_to_float64(f, true, fpst);
+ set_flush_inputs_to_zero(save, fpst);
+ return ret;
+}
+
+static inline float16 sve_f32_to_f16(float32 f, float_status *fpst)
+{
+ bool save = get_flush_to_zero(fpst);
+ float16 ret;
+
+ set_flush_to_zero(false, fpst);
+ ret = float32_to_float16(f, true, fpst);
+ set_flush_to_zero(save, fpst);
+ return ret;
+}
+
+static inline float16 sve_f64_to_f16(float64 f, float_status *fpst)
+{
+ bool save = get_flush_to_zero(fpst);
+ float16 ret;
+
+ set_flush_to_zero(false, fpst);
+ ret = float64_to_float16(f, true, fpst);
+ set_flush_to_zero(save, fpst);
+ return ret;
+}
+
+static inline int16_t vfp_float16_to_int16_rtz(float16 f, float_status *s)
+{
+ if (float16_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float16_to_int16_round_to_zero(f, s);
+}
+
+static inline int64_t vfp_float16_to_int64_rtz(float16 f, float_status *s)
+{
+ if (float16_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float16_to_int64_round_to_zero(f, s);
+}
+
+static inline int64_t vfp_float32_to_int64_rtz(float32 f, float_status *s)
+{
+ if (float32_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float32_to_int64_round_to_zero(f, s);
+}
+
+static inline int64_t vfp_float64_to_int64_rtz(float64 f, float_status *s)
+{
+ if (float64_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float64_to_int64_round_to_zero(f, s);
+}
+
+static inline uint16_t vfp_float16_to_uint16_rtz(float16 f, float_status *s)
+{
+ if (float16_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float16_to_uint16_round_to_zero(f, s);
+}
+
+static inline uint64_t vfp_float16_to_uint64_rtz(float16 f, float_status *s)
+{
+ if (float16_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float16_to_uint64_round_to_zero(f, s);
+}
+
+static inline uint64_t vfp_float32_to_uint64_rtz(float32 f, float_status *s)
+{
+ if (float32_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float32_to_uint64_round_to_zero(f, s);
+}
+
+static inline uint64_t vfp_float64_to_uint64_rtz(float64 f, float_status *s)
+{
+ if (float64_is_any_nan(f)) {
+ float_raise(float_flag_invalid, s);
+ return 0;
+ }
+ return float64_to_uint64_round_to_zero(f, s);
+}
+
+DO_ZPZ_FP(sve_fcvt_sh, uint32_t, H1_4, sve_f32_to_f16)
+DO_ZPZ_FP(sve_fcvt_hs, uint32_t, H1_4, sve_f16_to_f32)
+DO_ZPZ_FP(sve_bfcvt, uint32_t, H1_4, float32_to_bfloat16)
+DO_ZPZ_FP(sve_fcvt_dh, uint64_t, H1_8, sve_f64_to_f16)
+DO_ZPZ_FP(sve_fcvt_hd, uint64_t, H1_8, sve_f16_to_f64)
+DO_ZPZ_FP(sve_fcvt_ds, uint64_t, H1_8, float64_to_float32)
+DO_ZPZ_FP(sve_fcvt_sd, uint64_t, H1_8, float32_to_float64)
+
+DO_ZPZ_FP(sve_fcvtzs_hh, uint16_t, H1_2, vfp_float16_to_int16_rtz)
+DO_ZPZ_FP(sve_fcvtzs_hs, uint32_t, H1_4, helper_vfp_tosizh)
+DO_ZPZ_FP(sve_fcvtzs_ss, uint32_t, H1_4, helper_vfp_tosizs)
+DO_ZPZ_FP(sve_fcvtzs_hd, uint64_t, H1_8, vfp_float16_to_int64_rtz)
+DO_ZPZ_FP(sve_fcvtzs_sd, uint64_t, H1_8, vfp_float32_to_int64_rtz)
+DO_ZPZ_FP(sve_fcvtzs_ds, uint64_t, H1_8, helper_vfp_tosizd)
+DO_ZPZ_FP(sve_fcvtzs_dd, uint64_t, H1_8, vfp_float64_to_int64_rtz)
+
+DO_ZPZ_FP(sve_fcvtzu_hh, uint16_t, H1_2, vfp_float16_to_uint16_rtz)
+DO_ZPZ_FP(sve_fcvtzu_hs, uint32_t, H1_4, helper_vfp_touizh)
+DO_ZPZ_FP(sve_fcvtzu_ss, uint32_t, H1_4, helper_vfp_touizs)
+DO_ZPZ_FP(sve_fcvtzu_hd, uint64_t, H1_8, vfp_float16_to_uint64_rtz)
+DO_ZPZ_FP(sve_fcvtzu_sd, uint64_t, H1_8, vfp_float32_to_uint64_rtz)
+DO_ZPZ_FP(sve_fcvtzu_ds, uint64_t, H1_8, helper_vfp_touizd)
+DO_ZPZ_FP(sve_fcvtzu_dd, uint64_t, H1_8, vfp_float64_to_uint64_rtz)
+
+DO_ZPZ_FP(sve_frint_h, uint16_t, H1_2, helper_advsimd_rinth)
+DO_ZPZ_FP(sve_frint_s, uint32_t, H1_4, helper_rints)
+DO_ZPZ_FP(sve_frint_d, uint64_t, H1_8, helper_rintd)
+
+DO_ZPZ_FP(sve_frintx_h, uint16_t, H1_2, float16_round_to_int)
+DO_ZPZ_FP(sve_frintx_s, uint32_t, H1_4, float32_round_to_int)
+DO_ZPZ_FP(sve_frintx_d, uint64_t, H1_8, float64_round_to_int)
+
+DO_ZPZ_FP(sve_frecpx_h, uint16_t, H1_2, helper_frecpx_f16)
+DO_ZPZ_FP(sve_frecpx_s, uint32_t, H1_4, helper_frecpx_f32)
+DO_ZPZ_FP(sve_frecpx_d, uint64_t, H1_8, helper_frecpx_f64)
+
+DO_ZPZ_FP(sve_fsqrt_h, uint16_t, H1_2, float16_sqrt)
+DO_ZPZ_FP(sve_fsqrt_s, uint32_t, H1_4, float32_sqrt)
+DO_ZPZ_FP(sve_fsqrt_d, uint64_t, H1_8, float64_sqrt)
+
+DO_ZPZ_FP(sve_scvt_hh, uint16_t, H1_2, int16_to_float16)
+DO_ZPZ_FP(sve_scvt_sh, uint32_t, H1_4, int32_to_float16)
+DO_ZPZ_FP(sve_scvt_ss, uint32_t, H1_4, int32_to_float32)
+DO_ZPZ_FP(sve_scvt_sd, uint64_t, H1_8, int32_to_float64)
+DO_ZPZ_FP(sve_scvt_dh, uint64_t, H1_8, int64_to_float16)
+DO_ZPZ_FP(sve_scvt_ds, uint64_t, H1_8, int64_to_float32)
+DO_ZPZ_FP(sve_scvt_dd, uint64_t, H1_8, int64_to_float64)
+
+DO_ZPZ_FP(sve_ucvt_hh, uint16_t, H1_2, uint16_to_float16)
+DO_ZPZ_FP(sve_ucvt_sh, uint32_t, H1_4, uint32_to_float16)
+DO_ZPZ_FP(sve_ucvt_ss, uint32_t, H1_4, uint32_to_float32)
+DO_ZPZ_FP(sve_ucvt_sd, uint64_t, H1_8, uint32_to_float64)
+DO_ZPZ_FP(sve_ucvt_dh, uint64_t, H1_8, uint64_to_float16)
+DO_ZPZ_FP(sve_ucvt_ds, uint64_t, H1_8, uint64_to_float32)
+DO_ZPZ_FP(sve_ucvt_dd, uint64_t, H1_8, uint64_to_float64)
+
+static int16_t do_float16_logb_as_int(float16 a, float_status *s)
+{
+ /* Extract frac to the top of the uint32_t. */
+ uint32_t frac = (uint32_t)a << (16 + 6);
+ int16_t exp = extract32(a, 10, 5);
+
+ if (unlikely(exp == 0)) {
+ if (frac != 0) {
+ if (!get_flush_inputs_to_zero(s)) {
+ /* denormal: bias - fractional_zeros */
+ return -15 - clz32(frac);
+ }
+ /* flush to zero */
+ float_raise(float_flag_input_denormal, s);
+ }
+ } else if (unlikely(exp == 0x1f)) {
+ if (frac == 0) {
+ return INT16_MAX; /* infinity */
+ }
+ } else {
+ /* normal: exp - bias */
+ return exp - 15;
+ }
+ /* nan or zero */
+ float_raise(float_flag_invalid, s);
+ return INT16_MIN;
+}
+
+static int32_t do_float32_logb_as_int(float32 a, float_status *s)
+{
+ /* Extract frac to the top of the uint32_t. */
+ uint32_t frac = a << 9;
+ int32_t exp = extract32(a, 23, 8);
+
+ if (unlikely(exp == 0)) {
+ if (frac != 0) {
+ if (!get_flush_inputs_to_zero(s)) {
+ /* denormal: bias - fractional_zeros */
+ return -127 - clz32(frac);
+ }
+ /* flush to zero */
+ float_raise(float_flag_input_denormal, s);
+ }
+ } else if (unlikely(exp == 0xff)) {
+ if (frac == 0) {
+ return INT32_MAX; /* infinity */
+ }
+ } else {
+ /* normal: exp - bias */
+ return exp - 127;
+ }
+ /* nan or zero */
+ float_raise(float_flag_invalid, s);
+ return INT32_MIN;
+}
+
+static int64_t do_float64_logb_as_int(float64 a, float_status *s)
+{
+ /* Extract frac to the top of the uint64_t. */
+ uint64_t frac = a << 12;
+ int64_t exp = extract64(a, 52, 11);
+
+ if (unlikely(exp == 0)) {
+ if (frac != 0) {
+ if (!get_flush_inputs_to_zero(s)) {
+ /* denormal: bias - fractional_zeros */
+ return -1023 - clz64(frac);
+ }
+ /* flush to zero */
+ float_raise(float_flag_input_denormal, s);
+ }
+ } else if (unlikely(exp == 0x7ff)) {
+ if (frac == 0) {
+ return INT64_MAX; /* infinity */
+ }
+ } else {
+ /* normal: exp - bias */
+ return exp - 1023;
+ }
+ /* nan or zero */
+ float_raise(float_flag_invalid, s);
+ return INT64_MIN;
+}
+
+DO_ZPZ_FP(flogb_h, float16, H1_2, do_float16_logb_as_int)
+DO_ZPZ_FP(flogb_s, float32, H1_4, do_float32_logb_as_int)
+DO_ZPZ_FP(flogb_d, float64, H1_8, do_float64_logb_as_int)
+
+#undef DO_ZPZ_FP
+
+static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg,
+ float_status *status, uint32_t desc,
+ uint16_t neg1, uint16_t neg3)
+{
+ intptr_t i = simd_oprsz(desc);
+ uint64_t *g = vg;
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ i -= 2;
+ if (likely((pg >> (i & 63)) & 1)) {
+ float16 e1, e2, e3, r;
+
+ e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1;
+ e2 = *(uint16_t *)(vm + H1_2(i));
+ e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3;
+ r = float16_muladd(e1, e2, e3, 0, status);
+ *(uint16_t *)(vd + H1_2(i)) = r;
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0);
+}
+
+void HELPER(sve_fmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0);
+}
+
+void HELPER(sve_fnmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000);
+}
+
+void HELPER(sve_fnmls_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_h(vd, vn, vm, va, vg, status, desc, 0, 0x8000);
+}
+
+static void do_fmla_zpzzz_s(void *vd, void *vn, void *vm, void *va, void *vg,
+ float_status *status, uint32_t desc,
+ uint32_t neg1, uint32_t neg3)
+{
+ intptr_t i = simd_oprsz(desc);
+ uint64_t *g = vg;
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ i -= 4;
+ if (likely((pg >> (i & 63)) & 1)) {
+ float32 e1, e2, e3, r;
+
+ e1 = *(uint32_t *)(vn + H1_4(i)) ^ neg1;
+ e2 = *(uint32_t *)(vm + H1_4(i));
+ e3 = *(uint32_t *)(va + H1_4(i)) ^ neg3;
+ r = float32_muladd(e1, e2, e3, 0, status);
+ *(uint32_t *)(vd + H1_4(i)) = r;
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0);
+}
+
+void HELPER(sve_fmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0);
+}
+
+void HELPER(sve_fnmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0x80000000, 0x80000000);
+}
+
+void HELPER(sve_fnmls_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_s(vd, vn, vm, va, vg, status, desc, 0, 0x80000000);
+}
+
+static void do_fmla_zpzzz_d(void *vd, void *vn, void *vm, void *va, void *vg,
+ float_status *status, uint32_t desc,
+ uint64_t neg1, uint64_t neg3)
+{
+ intptr_t i = simd_oprsz(desc);
+ uint64_t *g = vg;
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ i -= 8;
+ if (likely((pg >> (i & 63)) & 1)) {
+ float64 e1, e2, e3, r;
+
+ e1 = *(uint64_t *)(vn + i) ^ neg1;
+ e2 = *(uint64_t *)(vm + i);
+ e3 = *(uint64_t *)(va + i) ^ neg3;
+ r = float64_muladd(e1, e2, e3, 0, status);
+ *(uint64_t *)(vd + i) = r;
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, 0);
+}
+
+void HELPER(sve_fmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, 0);
+}
+
+void HELPER(sve_fnmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, INT64_MIN, INT64_MIN);
+}
+
+void HELPER(sve_fnmls_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ do_fmla_zpzzz_d(vd, vn, vm, va, vg, status, desc, 0, INT64_MIN);
+}
+
+/* Two operand floating-point comparison controlled by a predicate.
+ * Unlike the integer version, we are not allowed to optimistically
+ * compare operands, since the comparison may have side effects wrt
+ * the FPSR.
+ */
+#define DO_FPCMP_PPZZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \
+ uint64_t *d = vd, *g = vg; \
+ do { \
+ uint64_t out = 0, pg = g[j]; \
+ do { \
+ i -= sizeof(TYPE), out <<= sizeof(TYPE); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ TYPE mm = *(TYPE *)(vm + H(i)); \
+ out |= OP(TYPE, nn, mm, status); \
+ } \
+ } while (i & 63); \
+ d[j--] = out; \
+ } while (i > 0); \
+}
+
+#define DO_FPCMP_PPZZ_H(NAME, OP) \
+ DO_FPCMP_PPZZ(NAME##_h, float16, H1_2, OP)
+#define DO_FPCMP_PPZZ_S(NAME, OP) \
+ DO_FPCMP_PPZZ(NAME##_s, float32, H1_4, OP)
+#define DO_FPCMP_PPZZ_D(NAME, OP) \
+ DO_FPCMP_PPZZ(NAME##_d, float64, H1_8, OP)
+
+#define DO_FPCMP_PPZZ_ALL(NAME, OP) \
+ DO_FPCMP_PPZZ_H(NAME, OP) \
+ DO_FPCMP_PPZZ_S(NAME, OP) \
+ DO_FPCMP_PPZZ_D(NAME, OP)
+
+#define DO_FCMGE(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) <= 0
+#define DO_FCMGT(TYPE, X, Y, ST) TYPE##_compare(Y, X, ST) < 0
+#define DO_FCMLE(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) <= 0
+#define DO_FCMLT(TYPE, X, Y, ST) TYPE##_compare(X, Y, ST) < 0
+#define DO_FCMEQ(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) == 0
+#define DO_FCMNE(TYPE, X, Y, ST) TYPE##_compare_quiet(X, Y, ST) != 0
+#define DO_FCMUO(TYPE, X, Y, ST) \
+ TYPE##_compare_quiet(X, Y, ST) == float_relation_unordered
+#define DO_FACGE(TYPE, X, Y, ST) \
+ TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) <= 0
+#define DO_FACGT(TYPE, X, Y, ST) \
+ TYPE##_compare(TYPE##_abs(Y), TYPE##_abs(X), ST) < 0
+
+DO_FPCMP_PPZZ_ALL(sve_fcmge, DO_FCMGE)
+DO_FPCMP_PPZZ_ALL(sve_fcmgt, DO_FCMGT)
+DO_FPCMP_PPZZ_ALL(sve_fcmeq, DO_FCMEQ)
+DO_FPCMP_PPZZ_ALL(sve_fcmne, DO_FCMNE)
+DO_FPCMP_PPZZ_ALL(sve_fcmuo, DO_FCMUO)
+DO_FPCMP_PPZZ_ALL(sve_facge, DO_FACGE)
+DO_FPCMP_PPZZ_ALL(sve_facgt, DO_FACGT)
+
+#undef DO_FPCMP_PPZZ_ALL
+#undef DO_FPCMP_PPZZ_D
+#undef DO_FPCMP_PPZZ_S
+#undef DO_FPCMP_PPZZ_H
+#undef DO_FPCMP_PPZZ
+
+/* One operand floating-point comparison against zero, controlled
+ * by a predicate.
+ */
+#define DO_FPCMP_PPZ0(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, \
+ void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc), j = (i - 1) >> 6; \
+ uint64_t *d = vd, *g = vg; \
+ do { \
+ uint64_t out = 0, pg = g[j]; \
+ do { \
+ i -= sizeof(TYPE), out <<= sizeof(TYPE); \
+ if ((pg >> (i & 63)) & 1) { \
+ TYPE nn = *(TYPE *)(vn + H(i)); \
+ out |= OP(TYPE, nn, 0, status); \
+ } \
+ } while (i & 63); \
+ d[j--] = out; \
+ } while (i > 0); \
+}
+
+#define DO_FPCMP_PPZ0_H(NAME, OP) \
+ DO_FPCMP_PPZ0(NAME##_h, float16, H1_2, OP)
+#define DO_FPCMP_PPZ0_S(NAME, OP) \
+ DO_FPCMP_PPZ0(NAME##_s, float32, H1_4, OP)
+#define DO_FPCMP_PPZ0_D(NAME, OP) \
+ DO_FPCMP_PPZ0(NAME##_d, float64, H1_8, OP)
+
+#define DO_FPCMP_PPZ0_ALL(NAME, OP) \
+ DO_FPCMP_PPZ0_H(NAME, OP) \
+ DO_FPCMP_PPZ0_S(NAME, OP) \
+ DO_FPCMP_PPZ0_D(NAME, OP)
+
+DO_FPCMP_PPZ0_ALL(sve_fcmge0, DO_FCMGE)
+DO_FPCMP_PPZ0_ALL(sve_fcmgt0, DO_FCMGT)
+DO_FPCMP_PPZ0_ALL(sve_fcmle0, DO_FCMLE)
+DO_FPCMP_PPZ0_ALL(sve_fcmlt0, DO_FCMLT)
+DO_FPCMP_PPZ0_ALL(sve_fcmeq0, DO_FCMEQ)
+DO_FPCMP_PPZ0_ALL(sve_fcmne0, DO_FCMNE)
+
+/* FP Trig Multiply-Add. */
+
+void HELPER(sve_ftmad_h)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
+{
+ static const float16 coeff[16] = {
+ 0x3c00, 0xb155, 0x2030, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3c00, 0xb800, 0x293a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ };
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float16);
+ intptr_t x = simd_data(desc);
+ float16 *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i++) {
+ float16 mm = m[i];
+ intptr_t xx = x;
+ if (float16_is_neg(mm)) {
+ mm = float16_abs(mm);
+ xx += 8;
+ }
+ d[i] = float16_muladd(n[i], mm, coeff[xx], 0, vs);
+ }
+}
+
+void HELPER(sve_ftmad_s)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
+{
+ static const float32 coeff[16] = {
+ 0x3f800000, 0xbe2aaaab, 0x3c088886, 0xb95008b9,
+ 0x36369d6d, 0x00000000, 0x00000000, 0x00000000,
+ 0x3f800000, 0xbf000000, 0x3d2aaaa6, 0xbab60705,
+ 0x37cd37cc, 0x00000000, 0x00000000, 0x00000000,
+ };
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float32);
+ intptr_t x = simd_data(desc);
+ float32 *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i++) {
+ float32 mm = m[i];
+ intptr_t xx = x;
+ if (float32_is_neg(mm)) {
+ mm = float32_abs(mm);
+ xx += 8;
+ }
+ d[i] = float32_muladd(n[i], mm, coeff[xx], 0, vs);
+ }
+}
+
+void HELPER(sve_ftmad_d)(void *vd, void *vn, void *vm, void *vs, uint32_t desc)
+{
+ static const float64 coeff[16] = {
+ 0x3ff0000000000000ull, 0xbfc5555555555543ull,
+ 0x3f8111111110f30cull, 0xbf2a01a019b92fc6ull,
+ 0x3ec71de351f3d22bull, 0xbe5ae5e2b60f7b91ull,
+ 0x3de5d8408868552full, 0x0000000000000000ull,
+ 0x3ff0000000000000ull, 0xbfe0000000000000ull,
+ 0x3fa5555555555536ull, 0xbf56c16c16c13a0bull,
+ 0x3efa01a019b1e8d8ull, 0xbe927e4f7282f468ull,
+ 0x3e21ee96d2641b13ull, 0xbda8f76380fbb401ull,
+ };
+ intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(float64);
+ intptr_t x = simd_data(desc);
+ float64 *d = vd, *n = vn, *m = vm;
+ for (i = 0; i < opr_sz; i++) {
+ float64 mm = m[i];
+ intptr_t xx = x;
+ if (float64_is_neg(mm)) {
+ mm = float64_abs(mm);
+ xx += 8;
+ }
+ d[i] = float64_muladd(n[i], mm, coeff[xx], 0, vs);
+ }
+}
+
+/*
+ * FP Complex Add
+ */
+
+void HELPER(sve_fcadd_h)(void *vd, void *vn, void *vm, void *vg,
+ void *vs, uint32_t desc)
+{
+ intptr_t j, i = simd_oprsz(desc);
+ uint64_t *g = vg;
+ float16 neg_imag = float16_set_sign(0, simd_data(desc));
+ float16 neg_real = float16_chs(neg_imag);
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ float16 e0, e1, e2, e3;
+
+ /* I holds the real index; J holds the imag index. */
+ j = i - sizeof(float16);
+ i -= 2 * sizeof(float16);
+
+ e0 = *(float16 *)(vn + H1_2(i));
+ e1 = *(float16 *)(vm + H1_2(j)) ^ neg_real;
+ e2 = *(float16 *)(vn + H1_2(j));
+ e3 = *(float16 *)(vm + H1_2(i)) ^ neg_imag;
+
+ if (likely((pg >> (i & 63)) & 1)) {
+ *(float16 *)(vd + H1_2(i)) = float16_add(e0, e1, vs);
+ }
+ if (likely((pg >> (j & 63)) & 1)) {
+ *(float16 *)(vd + H1_2(j)) = float16_add(e2, e3, vs);
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fcadd_s)(void *vd, void *vn, void *vm, void *vg,
+ void *vs, uint32_t desc)
+{
+ intptr_t j, i = simd_oprsz(desc);
+ uint64_t *g = vg;
+ float32 neg_imag = float32_set_sign(0, simd_data(desc));
+ float32 neg_real = float32_chs(neg_imag);
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ float32 e0, e1, e2, e3;
+
+ /* I holds the real index; J holds the imag index. */
+ j = i - sizeof(float32);
+ i -= 2 * sizeof(float32);
+
+ e0 = *(float32 *)(vn + H1_2(i));
+ e1 = *(float32 *)(vm + H1_2(j)) ^ neg_real;
+ e2 = *(float32 *)(vn + H1_2(j));
+ e3 = *(float32 *)(vm + H1_2(i)) ^ neg_imag;
+
+ if (likely((pg >> (i & 63)) & 1)) {
+ *(float32 *)(vd + H1_2(i)) = float32_add(e0, e1, vs);
+ }
+ if (likely((pg >> (j & 63)) & 1)) {
+ *(float32 *)(vd + H1_2(j)) = float32_add(e2, e3, vs);
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fcadd_d)(void *vd, void *vn, void *vm, void *vg,
+ void *vs, uint32_t desc)
+{
+ intptr_t j, i = simd_oprsz(desc);
+ uint64_t *g = vg;
+ float64 neg_imag = float64_set_sign(0, simd_data(desc));
+ float64 neg_real = float64_chs(neg_imag);
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ float64 e0, e1, e2, e3;
+
+ /* I holds the real index; J holds the imag index. */
+ j = i - sizeof(float64);
+ i -= 2 * sizeof(float64);
+
+ e0 = *(float64 *)(vn + H1_2(i));
+ e1 = *(float64 *)(vm + H1_2(j)) ^ neg_real;
+ e2 = *(float64 *)(vn + H1_2(j));
+ e3 = *(float64 *)(vm + H1_2(i)) ^ neg_imag;
+
+ if (likely((pg >> (i & 63)) & 1)) {
+ *(float64 *)(vd + H1_2(i)) = float64_add(e0, e1, vs);
+ }
+ if (likely((pg >> (j & 63)) & 1)) {
+ *(float64 *)(vd + H1_2(j)) = float64_add(e2, e3, vs);
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+/*
+ * FP Complex Multiply
+ */
+
+void HELPER(sve_fcmla_zpzzz_h)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ intptr_t j, i = simd_oprsz(desc);
+ unsigned rot = simd_data(desc);
+ bool flip = rot & 1;
+ float16 neg_imag, neg_real;
+ uint64_t *g = vg;
+
+ neg_imag = float16_set_sign(0, (rot & 2) != 0);
+ neg_real = float16_set_sign(0, rot == 1 || rot == 2);
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ float16 e1, e2, e3, e4, nr, ni, mr, mi, d;
+
+ /* I holds the real index; J holds the imag index. */
+ j = i - sizeof(float16);
+ i -= 2 * sizeof(float16);
+
+ nr = *(float16 *)(vn + H1_2(i));
+ ni = *(float16 *)(vn + H1_2(j));
+ mr = *(float16 *)(vm + H1_2(i));
+ mi = *(float16 *)(vm + H1_2(j));
+
+ e2 = (flip ? ni : nr);
+ e1 = (flip ? mi : mr) ^ neg_real;
+ e4 = e2;
+ e3 = (flip ? mr : mi) ^ neg_imag;
+
+ if (likely((pg >> (i & 63)) & 1)) {
+ d = *(float16 *)(va + H1_2(i));
+ d = float16_muladd(e2, e1, d, 0, status);
+ *(float16 *)(vd + H1_2(i)) = d;
+ }
+ if (likely((pg >> (j & 63)) & 1)) {
+ d = *(float16 *)(va + H1_2(j));
+ d = float16_muladd(e4, e3, d, 0, status);
+ *(float16 *)(vd + H1_2(j)) = d;
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fcmla_zpzzz_s)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ intptr_t j, i = simd_oprsz(desc);
+ unsigned rot = simd_data(desc);
+ bool flip = rot & 1;
+ float32 neg_imag, neg_real;
+ uint64_t *g = vg;
+
+ neg_imag = float32_set_sign(0, (rot & 2) != 0);
+ neg_real = float32_set_sign(0, rot == 1 || rot == 2);
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ float32 e1, e2, e3, e4, nr, ni, mr, mi, d;
+
+ /* I holds the real index; J holds the imag index. */
+ j = i - sizeof(float32);
+ i -= 2 * sizeof(float32);
+
+ nr = *(float32 *)(vn + H1_2(i));
+ ni = *(float32 *)(vn + H1_2(j));
+ mr = *(float32 *)(vm + H1_2(i));
+ mi = *(float32 *)(vm + H1_2(j));
+
+ e2 = (flip ? ni : nr);
+ e1 = (flip ? mi : mr) ^ neg_real;
+ e4 = e2;
+ e3 = (flip ? mr : mi) ^ neg_imag;
+
+ if (likely((pg >> (i & 63)) & 1)) {
+ d = *(float32 *)(va + H1_2(i));
+ d = float32_muladd(e2, e1, d, 0, status);
+ *(float32 *)(vd + H1_2(i)) = d;
+ }
+ if (likely((pg >> (j & 63)) & 1)) {
+ d = *(float32 *)(va + H1_2(j));
+ d = float32_muladd(e4, e3, d, 0, status);
+ *(float32 *)(vd + H1_2(j)) = d;
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+void HELPER(sve_fcmla_zpzzz_d)(void *vd, void *vn, void *vm, void *va,
+ void *vg, void *status, uint32_t desc)
+{
+ intptr_t j, i = simd_oprsz(desc);
+ unsigned rot = simd_data(desc);
+ bool flip = rot & 1;
+ float64 neg_imag, neg_real;
+ uint64_t *g = vg;
+
+ neg_imag = float64_set_sign(0, (rot & 2) != 0);
+ neg_real = float64_set_sign(0, rot == 1 || rot == 2);
+
+ do {
+ uint64_t pg = g[(i - 1) >> 6];
+ do {
+ float64 e1, e2, e3, e4, nr, ni, mr, mi, d;
+
+ /* I holds the real index; J holds the imag index. */
+ j = i - sizeof(float64);
+ i -= 2 * sizeof(float64);
+
+ nr = *(float64 *)(vn + H1_2(i));
+ ni = *(float64 *)(vn + H1_2(j));
+ mr = *(float64 *)(vm + H1_2(i));
+ mi = *(float64 *)(vm + H1_2(j));
+
+ e2 = (flip ? ni : nr);
+ e1 = (flip ? mi : mr) ^ neg_real;
+ e4 = e2;
+ e3 = (flip ? mr : mi) ^ neg_imag;
+
+ if (likely((pg >> (i & 63)) & 1)) {
+ d = *(float64 *)(va + H1_2(i));
+ d = float64_muladd(e2, e1, d, 0, status);
+ *(float64 *)(vd + H1_2(i)) = d;
+ }
+ if (likely((pg >> (j & 63)) & 1)) {
+ d = *(float64 *)(va + H1_2(j));
+ d = float64_muladd(e4, e3, d, 0, status);
+ *(float64 *)(vd + H1_2(j)) = d;
+ }
+ } while (i & 63);
+ } while (i != 0);
+}
+
+/*
+ * Load contiguous data, protected by a governing predicate.
+ */
+
+/*
+ * Load one element into @vd + @reg_off from @host.
+ * The controlling predicate is known to be true.
+ */
+typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host);
+
+/*
+ * Load one element into @vd + @reg_off from (@env, @vaddr, @ra).
+ * The controlling predicate is known to be true.
+ */
+typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
+ target_ulong vaddr, uintptr_t retaddr);
+
+/*
+ * Generate the above primitives.
+ */
+
+#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \
+static void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
+{ \
+ TYPEM val = HOST(host); \
+ *(TYPEE *)(vd + H(reg_off)) = val; \
+}
+
+#define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \
+static void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
+{ HOST(host, (TYPEM)*(TYPEE *)(vd + H(reg_off))); }
+
+#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \
+static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
+ target_ulong addr, uintptr_t ra) \
+{ \
+ *(TYPEE *)(vd + H(reg_off)) = \
+ (TYPEM)TLB(env, useronly_clean_ptr(addr), ra); \
+}
+
+#define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \
+static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off, \
+ target_ulong addr, uintptr_t ra) \
+{ \
+ TLB(env, useronly_clean_ptr(addr), \
+ (TYPEM)*(TYPEE *)(vd + H(reg_off)), ra); \
+}
+
+#define DO_LD_PRIM_1(NAME, H, TE, TM) \
+ DO_LD_HOST(NAME, H, TE, TM, ldub_p) \
+ DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra)
+
+DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t)
+DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t)
+DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t)
+DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t)
+DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t)
+DO_LD_PRIM_1(ld1bdu, H1_8, uint64_t, uint8_t)
+DO_LD_PRIM_1(ld1bds, H1_8, uint64_t, int8_t)
+
+#define DO_ST_PRIM_1(NAME, H, TE, TM) \
+ DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \
+ DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra)
+
+DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t)
+DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t)
+DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t)
+DO_ST_PRIM_1(bd, H1_8, uint64_t, uint8_t)
+
+#define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \
+ DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \
+ DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \
+ DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \
+ DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra)
+
+#define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \
+ DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \
+ DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \
+ DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \
+ DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra)
+
+DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw)
+DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw)
+DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw)
+DO_LD_PRIM_2(hdu, H1_8, uint64_t, uint16_t, lduw)
+DO_LD_PRIM_2(hds, H1_8, uint64_t, int16_t, lduw)
+
+DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw)
+DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw)
+DO_ST_PRIM_2(hd, H1_8, uint64_t, uint16_t, stw)
+
+DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl)
+DO_LD_PRIM_2(sdu, H1_8, uint64_t, uint32_t, ldl)
+DO_LD_PRIM_2(sds, H1_8, uint64_t, int32_t, ldl)
+
+DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl)
+DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl)
+
+DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq)
+DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
+
+#undef DO_LD_TLB
+#undef DO_ST_TLB
+#undef DO_LD_HOST
+#undef DO_LD_PRIM_1
+#undef DO_ST_PRIM_1
+#undef DO_LD_PRIM_2
+#undef DO_ST_PRIM_2
+
+/*
+ * Skip through a sequence of inactive elements in the guarding predicate @vg,
+ * beginning at @reg_off bounded by @reg_max. Return the offset of the active
+ * element >= @reg_off, or @reg_max if there were no active elements at all.
+ */
+static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off,
+ intptr_t reg_max, int esz)
+{
+ uint64_t pg_mask = pred_esz_masks[esz];
+ uint64_t pg = (vg[reg_off >> 6] & pg_mask) >> (reg_off & 63);
+
+ /* In normal usage, the first element is active. */
+ if (likely(pg & 1)) {
+ return reg_off;
+ }
+
+ if (pg == 0) {
+ reg_off &= -64;
+ do {
+ reg_off += 64;
+ if (unlikely(reg_off >= reg_max)) {
+ /* The entire predicate was false. */
+ return reg_max;
+ }
+ pg = vg[reg_off >> 6] & pg_mask;
+ } while (pg == 0);
+ }
+ reg_off += ctz64(pg);
+
+ /* We should never see an out of range predicate bit set. */
+ tcg_debug_assert(reg_off < reg_max);
+ return reg_off;
+}
+
+/*
+ * Resolve the guest virtual address to info->host and info->flags.
+ * If @nofault, return false if the page is invalid, otherwise
+ * exit via page fault exception.
+ */
+
+typedef struct {
+ void *host;
+ int flags;
+ MemTxAttrs attrs;
+} SVEHostPage;
+
+static bool sve_probe_page(SVEHostPage *info, bool nofault,
+ CPUARMState *env, target_ulong addr,
+ int mem_off, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int flags;
+
+ addr += mem_off;
+
+ /*
+ * User-only currently always issues with TBI. See the comment
+ * above useronly_clean_ptr. Usually we clean this top byte away
+ * during translation, but we can't do that for e.g. vector + imm
+ * addressing modes.
+ *
+ * We currently always enable TBI for user-only, and do not provide
+ * a way to turn it off. So clean the pointer unconditionally here,
+ * rather than look it up here, or pass it down from above.
+ */
+ addr = useronly_clean_ptr(addr);
+
+ flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
+ &info->host, retaddr);
+ info->flags = flags;
+
+ if (flags & TLB_INVALID_MASK) {
+ g_assert(nofault);
+ return false;
+ }
+
+ /* Ensure that info->host[] is relative to addr, not addr + mem_off. */
+ info->host -= mem_off;
+
+#ifdef CONFIG_USER_ONLY
+ memset(&info->attrs, 0, sizeof(info->attrs));
+#else
+ /*
+ * Find the iotlbentry for addr and return the transaction attributes.
+ * This *must* be present in the TLB because we just found the mapping.
+ */
+ {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+
+# ifdef CONFIG_DEBUG_TCG
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ target_ulong comparator = (access_type == MMU_DATA_LOAD
+ ? entry->addr_read
+ : tlb_addr_write(entry));
+ g_assert(tlb_hit(comparator, addr));
+# endif
+
+ CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+ info->attrs = iotlbentry->attrs;
+ }
+#endif
+
+ return true;
+}
+
+
+/*
+ * Analyse contiguous data, protected by a governing predicate.
+ */
+
+typedef enum {
+ FAULT_NO,
+ FAULT_FIRST,
+ FAULT_ALL,
+} SVEContFault;
+
+typedef struct {
+ /*
+ * First and last element wholly contained within the two pages.
+ * mem_off_first[0] and reg_off_first[0] are always set >= 0.
+ * reg_off_last[0] may be < 0 if the first element crosses pages.
+ * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
+ * are set >= 0 only if there are complete elements on a second page.
+ *
+ * The reg_off_* offsets are relative to the internal vector register.
+ * The mem_off_first offset is relative to the memory address; the
+ * two offsets are different when a load operation extends, a store
+ * operation truncates, or for multi-register operations.
+ */
+ int16_t mem_off_first[2];
+ int16_t reg_off_first[2];
+ int16_t reg_off_last[2];
+
+ /*
+ * One element that is misaligned and spans both pages,
+ * or -1 if there is no such active element.
+ */
+ int16_t mem_off_split;
+ int16_t reg_off_split;
+
+ /*
+ * The byte offset at which the entire operation crosses a page boundary.
+ * Set >= 0 if and only if the entire operation spans two pages.
+ */
+ int16_t page_split;
+
+ /* TLB data for the two pages. */
+ SVEHostPage page[2];
+} SVEContLdSt;
+
+/*
+ * Find first active element on each page, and a loose bound for the
+ * final element on each page. Identify any single element that spans
+ * the page boundary. Return true if there are any active elements.
+ */
+static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr,
+ uint64_t *vg, intptr_t reg_max,
+ int esz, int msize)
+{
+ const int esize = 1 << esz;
+ const uint64_t pg_mask = pred_esz_masks[esz];
+ intptr_t reg_off_first = -1, reg_off_last = -1, reg_off_split;
+ intptr_t mem_off_last, mem_off_split;
+ intptr_t page_split, elt_split;
+ intptr_t i;
+
+ /* Set all of the element indices to -1, and the TLB data to 0. */
+ memset(info, -1, offsetof(SVEContLdSt, page));
+ memset(info->page, 0, sizeof(info->page));
+
+ /* Gross scan over the entire predicate to find bounds. */
+ i = 0;
+ do {
+ uint64_t pg = vg[i] & pg_mask;
+ if (pg) {
+ reg_off_last = i * 64 + 63 - clz64(pg);
+ if (reg_off_first < 0) {
+ reg_off_first = i * 64 + ctz64(pg);
+ }
+ }
+ } while (++i * 64 < reg_max);
+
+ if (unlikely(reg_off_first < 0)) {
+ /* No active elements, no pages touched. */
+ return false;
+ }
+ tcg_debug_assert(reg_off_last >= 0 && reg_off_last < reg_max);
+
+ info->reg_off_first[0] = reg_off_first;
+ info->mem_off_first[0] = (reg_off_first >> esz) * msize;
+ mem_off_last = (reg_off_last >> esz) * msize;
+
+ page_split = -(addr | TARGET_PAGE_MASK);
+ if (likely(mem_off_last + msize <= page_split)) {
+ /* The entire operation fits within a single page. */
+ info->reg_off_last[0] = reg_off_last;
+ return true;
+ }
+
+ info->page_split = page_split;
+ elt_split = page_split / msize;
+ reg_off_split = elt_split << esz;
+ mem_off_split = elt_split * msize;
+
+ /*
+ * This is the last full element on the first page, but it is not
+ * necessarily active. If there is no full element, i.e. the first
+ * active element is the one that's split, this value remains -1.
+ * It is useful as iteration bounds.
+ */
+ if (elt_split != 0) {
+ info->reg_off_last[0] = reg_off_split - esize;
+ }
+
+ /* Determine if an unaligned element spans the pages. */
+ if (page_split % msize != 0) {
+ /* It is helpful to know if the split element is active. */
+ if ((vg[reg_off_split >> 6] >> (reg_off_split & 63)) & 1) {
+ info->reg_off_split = reg_off_split;
+ info->mem_off_split = mem_off_split;
+
+ if (reg_off_split == reg_off_last) {
+ /* The page crossing element is last. */
+ return true;
+ }
+ }
+ reg_off_split += esize;
+ mem_off_split += msize;
+ }
+
+ /*
+ * We do want the first active element on the second page, because
+ * this may affect the address reported in an exception.
+ */
+ reg_off_split = find_next_active(vg, reg_off_split, reg_max, esz);
+ tcg_debug_assert(reg_off_split <= reg_off_last);
+ info->reg_off_first[1] = reg_off_split;
+ info->mem_off_first[1] = (reg_off_split >> esz) * msize;
+ info->reg_off_last[1] = reg_off_last;
+ return true;
+}
+
+/*
+ * Resolve the guest virtual addresses to info->page[].
+ * Control the generation of page faults with @fault. Return false if
+ * there is no work to do, which can only happen with @fault == FAULT_NO.
+ */
+static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
+ CPUARMState *env, target_ulong addr,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+ int mem_off = info->mem_off_first[0];
+ bool nofault = fault == FAULT_NO;
+ bool have_work = true;
+
+ if (!sve_probe_page(&info->page[0], nofault, env, addr, mem_off,
+ access_type, mmu_idx, retaddr)) {
+ /* No work to be done. */
+ return false;
+ }
+
+ if (likely(info->page_split < 0)) {
+ /* The entire operation was on the one page. */
+ return true;
+ }
+
+ /*
+ * If the second page is invalid, then we want the fault address to be
+ * the first byte on that page which is accessed.
+ */
+ if (info->mem_off_split >= 0) {
+ /*
+ * There is an element split across the pages. The fault address
+ * should be the first byte of the second page.
+ */
+ mem_off = info->page_split;
+ /*
+ * If the split element is also the first active element
+ * of the vector, then: For first-fault we should continue
+ * to generate faults for the second page. For no-fault,
+ * we have work only if the second page is valid.
+ */
+ if (info->mem_off_first[0] < info->mem_off_split) {
+ nofault = FAULT_FIRST;
+ have_work = false;
+ }
+ } else {
+ /*
+ * There is no element split across the pages. The fault address
+ * should be the first active element on the second page.
+ */
+ mem_off = info->mem_off_first[1];
+ /*
+ * There must have been one active element on the first page,
+ * so we're out of first-fault territory.
+ */
+ nofault = fault != FAULT_ALL;
+ }
+
+ have_work |= sve_probe_page(&info->page[1], nofault, env, addr, mem_off,
+ access_type, mmu_idx, retaddr);
+ return have_work;
+}
+
+static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
+ uint64_t *vg, target_ulong addr,
+ int esize, int msize, int wp_access,
+ uintptr_t retaddr)
+{
+#ifndef CONFIG_USER_ONLY
+ intptr_t mem_off, reg_off, reg_last;
+ int flags0 = info->page[0].flags;
+ int flags1 = info->page[1].flags;
+
+ if (likely(!((flags0 | flags1) & TLB_WATCHPOINT))) {
+ return;
+ }
+
+ /* Indicate that watchpoints are handled. */
+ info->page[0].flags = flags0 & ~TLB_WATCHPOINT;
+ info->page[1].flags = flags1 & ~TLB_WATCHPOINT;
+
+ if (flags0 & TLB_WATCHPOINT) {
+ mem_off = info->mem_off_first[0];
+ reg_off = info->reg_off_first[0];
+ reg_last = info->reg_off_last[0];
+
+ while (reg_off <= reg_last) {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ cpu_check_watchpoint(env_cpu(env), addr + mem_off,
+ msize, info->page[0].attrs,
+ wp_access, retaddr);
+ }
+ reg_off += esize;
+ mem_off += msize;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ }
+ }
+
+ mem_off = info->mem_off_split;
+ if (mem_off >= 0) {
+ cpu_check_watchpoint(env_cpu(env), addr + mem_off, msize,
+ info->page[0].attrs, wp_access, retaddr);
+ }
+
+ mem_off = info->mem_off_first[1];
+ if ((flags1 & TLB_WATCHPOINT) && mem_off >= 0) {
+ reg_off = info->reg_off_first[1];
+ reg_last = info->reg_off_last[1];
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ cpu_check_watchpoint(env_cpu(env), addr + mem_off,
+ msize, info->page[1].attrs,
+ wp_access, retaddr);
+ }
+ reg_off += esize;
+ mem_off += msize;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ }
+#endif
+}
+
+static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
+ uint64_t *vg, target_ulong addr, int esize,
+ int msize, uint32_t mtedesc, uintptr_t ra)
+{
+ intptr_t mem_off, reg_off, reg_last;
+
+ /* Process the page only if MemAttr == Tagged. */
+ if (arm_tlb_mte_tagged(&info->page[0].attrs)) {
+ mem_off = info->mem_off_first[0];
+ reg_off = info->reg_off_first[0];
+ reg_last = info->reg_off_split;
+ if (reg_last < 0) {
+ reg_last = info->reg_off_last[0];
+ }
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ mte_check(env, mtedesc, addr, ra);
+ }
+ reg_off += esize;
+ mem_off += msize;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ } while (reg_off <= reg_last);
+ }
+
+ mem_off = info->mem_off_first[1];
+ if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) {
+ reg_off = info->reg_off_first[1];
+ reg_last = info->reg_off_last[1];
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ mte_check(env, mtedesc, addr, ra);
+ }
+ reg_off += esize;
+ mem_off += msize;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ }
+}
+
+/*
+ * Common helper for all contiguous 1,2,3,4-register predicated stores.
+ */
+static inline QEMU_ALWAYS_INLINE
+void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
+ uint32_t desc, const uintptr_t retaddr,
+ const int esz, const int msz, const int N, uint32_t mtedesc,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const unsigned rd = simd_data(desc);
+ const intptr_t reg_max = simd_oprsz(desc);
+ intptr_t reg_off, reg_last, mem_off;
+ SVEContLdSt info;
+ void *host;
+ int flags, i;
+
+ /* Find the active elements. */
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, N << msz)) {
+ /* The entire predicate was false; no load occurs. */
+ for (i = 0; i < N; ++i) {
+ memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
+ }
+ return;
+ }
+
+ /* Probe the page(s). Exit with exception for any invalid page. */
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, retaddr);
+
+ /* Handle watchpoints for all active elements. */
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, N << msz,
+ BP_MEM_READ, retaddr);
+
+ /*
+ * Handle mte checks for all active elements.
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
+ */
+ if (mtedesc) {
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
+ mtedesc, retaddr);
+ }
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ /*
+ * At least one page includes MMIO.
+ * Any bus operation can fail with cpu_transaction_failed,
+ * which for ARM will raise SyncExternal. Perform the load
+ * into scratch memory to preserve register state until the end.
+ */
+ ARMVectorReg scratch[4] = { };
+
+ mem_off = info.mem_off_first[0];
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[1];
+ if (reg_last < 0) {
+ reg_last = info.reg_off_split;
+ if (reg_last < 0) {
+ reg_last = info.reg_off_last[0];
+ }
+ }
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ for (i = 0; i < N; ++i) {
+ tlb_fn(env, &scratch[i], reg_off,
+ addr + mem_off + (i << msz), retaddr);
+ }
+ }
+ reg_off += 1 << esz;
+ mem_off += N << msz;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+
+ for (i = 0; i < N; ++i) {
+ memcpy(&env->vfp.zregs[(rd + i) & 31], &scratch[i], reg_max);
+ }
+ return;
+#endif
+ }
+
+ /* The entire operation is in RAM, on valid pages. */
+
+ for (i = 0; i < N; ++i) {
+ memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
+ }
+
+ mem_off = info.mem_off_first[0];
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[0];
+ host = info.page[0].host;
+
+ while (reg_off <= reg_last) {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ for (i = 0; i < N; ++i) {
+ host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
+ host + mem_off + (i << msz));
+ }
+ }
+ reg_off += 1 << esz;
+ mem_off += N << msz;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ }
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ mem_off = info.mem_off_split;
+ if (unlikely(mem_off >= 0)) {
+ reg_off = info.reg_off_split;
+ for (i = 0; i < N; ++i) {
+ tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
+ addr + mem_off + (i << msz), retaddr);
+ }
+ }
+
+ mem_off = info.mem_off_first[1];
+ if (unlikely(mem_off >= 0)) {
+ reg_off = info.reg_off_first[1];
+ reg_last = info.reg_off_last[1];
+ host = info.page[1].host;
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ for (i = 0; i < N; ++i) {
+ host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
+ host + mem_off + (i << msz));
+ }
+ }
+ reg_off += 1 << esz;
+ mem_off += N << msz;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
+ uint32_t desc, const uintptr_t ra,
+ const int esz, const int msz, const int N,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ int bit55 = extract64(addr, 55, 1);
+
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /* Perform gross MTE suppression early. */
+ if (!tbi_check(desc, bit55) ||
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ mtedesc = 0;
+ }
+
+ sve_ldN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
+}
+
+#define DO_LD1_1(NAME, ESZ) \
+void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \
+ sve_##NAME##_host, sve_##NAME##_tlb); \
+} \
+void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, \
+ sve_##NAME##_host, sve_##NAME##_tlb); \
+}
+
+#define DO_LD1_2(NAME, ESZ, MSZ) \
+void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
+} \
+void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
+} \
+void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
+} \
+void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
+}
+
+DO_LD1_1(ld1bb, MO_8)
+DO_LD1_1(ld1bhu, MO_16)
+DO_LD1_1(ld1bhs, MO_16)
+DO_LD1_1(ld1bsu, MO_32)
+DO_LD1_1(ld1bss, MO_32)
+DO_LD1_1(ld1bdu, MO_64)
+DO_LD1_1(ld1bds, MO_64)
+
+DO_LD1_2(ld1hh, MO_16, MO_16)
+DO_LD1_2(ld1hsu, MO_32, MO_16)
+DO_LD1_2(ld1hss, MO_32, MO_16)
+DO_LD1_2(ld1hdu, MO_64, MO_16)
+DO_LD1_2(ld1hds, MO_64, MO_16)
+
+DO_LD1_2(ld1ss, MO_32, MO_32)
+DO_LD1_2(ld1sdu, MO_64, MO_32)
+DO_LD1_2(ld1sds, MO_64, MO_32)
+
+DO_LD1_2(ld1dd, MO_64, MO_64)
+
+#undef DO_LD1_1
+#undef DO_LD1_2
+
+#define DO_LDN_1(N) \
+void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \
+ sve_ld1bb_host, sve_ld1bb_tlb); \
+} \
+void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, \
+ sve_ld1bb_host, sve_ld1bb_tlb); \
+}
+
+#define DO_LDN_2(N, SUFF, ESZ) \
+void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
+} \
+void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
+} \
+void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
+ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
+} \
+void HELPER(sve_ld##N##SUFF##_be_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
+ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
+}
+
+DO_LDN_1(2)
+DO_LDN_1(3)
+DO_LDN_1(4)
+
+DO_LDN_2(2, hh, MO_16)
+DO_LDN_2(3, hh, MO_16)
+DO_LDN_2(4, hh, MO_16)
+
+DO_LDN_2(2, ss, MO_32)
+DO_LDN_2(3, ss, MO_32)
+DO_LDN_2(4, ss, MO_32)
+
+DO_LDN_2(2, dd, MO_64)
+DO_LDN_2(3, dd, MO_64)
+DO_LDN_2(4, dd, MO_64)
+
+#undef DO_LDN_1
+#undef DO_LDN_2
+
+/*
+ * Load contiguous data, first-fault and no-fault.
+ *
+ * For user-only, one could argue that we should hold the mmap_lock during
+ * the operation so that there is no race between page_check_range and the
+ * load operation. However, unmapping pages out from under a running thread
+ * is extraordinarily unlikely. This theoretical race condition also affects
+ * linux-user/ in its get_user/put_user macros.
+ *
+ * TODO: Construct some helpers, written in assembly, that interact with
+ * host_signal_handler to produce memory ops which can properly report errors
+ * without racing.
+ */
+
+/* Fault on byte I. All bits in FFR from I are cleared. The vector
+ * result from I is CONSTRAINED UNPREDICTABLE; we choose the MERGE
+ * option, which leaves subsequent data unchanged.
+ */
+static void record_fault(CPUARMState *env, uintptr_t i, uintptr_t oprsz)
+{
+ uint64_t *ffr = env->vfp.pregs[FFR_PRED_NUM].p;
+
+ if (i & 63) {
+ ffr[i / 64] &= MAKE_64BIT_MASK(0, i & 63);
+ i = ROUND_UP(i, 64);
+ }
+ for (; i < oprsz; i += 64) {
+ ffr[i / 64] = 0;
+ }
+}
+
+/*
+ * Common helper for all contiguous no-fault and first-fault loads.
+ */
+static inline QEMU_ALWAYS_INLINE
+void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
+ uint32_t desc, const uintptr_t retaddr, uint32_t mtedesc,
+ const int esz, const int msz, const SVEContFault fault,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const unsigned rd = simd_data(desc);
+ void *vd = &env->vfp.zregs[rd];
+ const intptr_t reg_max = simd_oprsz(desc);
+ intptr_t reg_off, mem_off, reg_last;
+ SVEContLdSt info;
+ int flags;
+ void *host;
+
+ /* Find the active elements. */
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, 1 << msz)) {
+ /* The entire predicate was false; no load occurs. */
+ memset(vd, 0, reg_max);
+ return;
+ }
+ reg_off = info.reg_off_first[0];
+
+ /* Probe the page(s). */
+ if (!sve_cont_ldst_pages(&info, fault, env, addr, MMU_DATA_LOAD, retaddr)) {
+ /* Fault on first element. */
+ tcg_debug_assert(fault == FAULT_NO);
+ memset(vd, 0, reg_max);
+ goto do_fault;
+ }
+
+ mem_off = info.mem_off_first[0];
+ flags = info.page[0].flags;
+
+ /*
+ * Disable MTE checking if the Tagged bit is not set. Since TBI must
+ * be set within MTEDESC for MTE, !mtedesc => !mte_active.
+ */
+ if (arm_tlb_mte_tagged(&info.page[0].attrs)) {
+ mtedesc = 0;
+ }
+
+ if (fault == FAULT_FIRST) {
+ /* Trapping mte check for the first-fault element. */
+ if (mtedesc) {
+ mte_check(env, mtedesc, addr + mem_off, retaddr);
+ }
+
+ /*
+ * Special handling of the first active element,
+ * if it crosses a page boundary or is MMIO.
+ */
+ bool is_split = mem_off == info.mem_off_split;
+ if (unlikely(flags != 0) || unlikely(is_split)) {
+ /*
+ * Use the slow path for cross-page handling.
+ * Might trap for MMIO or watchpoints.
+ */
+ tlb_fn(env, vd, reg_off, addr + mem_off, retaddr);
+
+ /* After any fault, zero the other elements. */
+ swap_memzero(vd, reg_off);
+ reg_off += 1 << esz;
+ mem_off += 1 << msz;
+ swap_memzero(vd + reg_off, reg_max - reg_off);
+
+ if (is_split) {
+ goto second_page;
+ }
+ } else {
+ memset(vd, 0, reg_max);
+ }
+ } else {
+ memset(vd, 0, reg_max);
+ if (unlikely(mem_off == info.mem_off_split)) {
+ /* The first active element crosses a page boundary. */
+ flags |= info.page[1].flags;
+ if (unlikely(flags & TLB_MMIO)) {
+ /* Some page is MMIO, see below. */
+ goto do_fault;
+ }
+ if (unlikely(flags & TLB_WATCHPOINT) &&
+ (cpu_watchpoint_address_matches
+ (env_cpu(env), addr + mem_off, 1 << msz)
+ & BP_MEM_READ)) {
+ /* Watchpoint hit, see below. */
+ goto do_fault;
+ }
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
+ goto do_fault;
+ }
+ /*
+ * Use the slow path for cross-page handling.
+ * This is RAM, without a watchpoint, and will not trap.
+ */
+ tlb_fn(env, vd, reg_off, addr + mem_off, retaddr);
+ goto second_page;
+ }
+ }
+
+ /*
+ * From this point on, all memory operations are MemSingleNF.
+ *
+ * Per the MemSingleNF pseudocode, a no-fault load from Device memory
+ * must not actually hit the bus -- it returns (UNKNOWN, FAULT) instead.
+ *
+ * Unfortuately we do not have access to the memory attributes from the
+ * PTE to tell Device memory from Normal memory. So we make a mostly
+ * correct check, and indicate (UNKNOWN, FAULT) for any MMIO.
+ * This gives the right answer for the common cases of "Normal memory,
+ * backed by host RAM" and "Device memory, backed by MMIO".
+ * The architecture allows us to suppress an NF load and return
+ * (UNKNOWN, FAULT) for any reason, so our behaviour for the corner
+ * case of "Normal memory, backed by MMIO" is permitted. The case we
+ * get wrong is "Device memory, backed by host RAM", for which we
+ * should return (UNKNOWN, FAULT) for but do not.
+ *
+ * Similarly, CPU_BP breakpoints would raise exceptions, and so
+ * return (UNKNOWN, FAULT). For simplicity, we consider gdb and
+ * architectural breakpoints the same.
+ */
+ if (unlikely(flags & TLB_MMIO)) {
+ goto do_fault;
+ }
+
+ reg_last = info.reg_off_last[0];
+ host = info.page[0].host;
+
+ do {
+ uint64_t pg = *(uint64_t *)(vg + (reg_off >> 3));
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ if (unlikely(flags & TLB_WATCHPOINT) &&
+ (cpu_watchpoint_address_matches
+ (env_cpu(env), addr + mem_off, 1 << msz)
+ & BP_MEM_READ)) {
+ goto do_fault;
+ }
+ if (mtedesc && !mte_probe(env, mtedesc, addr + mem_off)) {
+ goto do_fault;
+ }
+ host_fn(vd, reg_off, host + mem_off);
+ }
+ reg_off += 1 << esz;
+ mem_off += 1 << msz;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ } while (reg_off <= reg_last);
+
+ /*
+ * MemSingleNF is allowed to fail for any reason. We have special
+ * code above to handle the first element crossing a page boundary.
+ * As an implementation choice, decline to handle a cross-page element
+ * in any other position.
+ */
+ reg_off = info.reg_off_split;
+ if (reg_off >= 0) {
+ goto do_fault;
+ }
+
+ second_page:
+ reg_off = info.reg_off_first[1];
+ if (likely(reg_off < 0)) {
+ /* No active elements on the second page. All done. */
+ return;
+ }
+
+ /*
+ * MemSingleNF is allowed to fail for any reason. As an implementation
+ * choice, decline to handle elements on the second page. This should
+ * be low frequency as the guest walks through memory -- the next
+ * iteration of the guest's loop should be aligned on the page boundary,
+ * and then all following iterations will stay aligned.
+ */
+
+ do_fault:
+ record_fault(env, reg_off, reg_max);
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr,
+ uint32_t desc, const uintptr_t retaddr,
+ const int esz, const int msz, const SVEContFault fault,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ int bit55 = extract64(addr, 55, 1);
+
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /* Perform gross MTE suppression early. */
+ if (!tbi_check(desc, bit55) ||
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ mtedesc = 0;
+ }
+
+ sve_ldnfff1_r(env, vg, addr, desc, retaddr, mtedesc,
+ esz, msz, fault, host_fn, tlb_fn);
+}
+
+#define DO_LDFF1_LDNF1_1(PART, ESZ) \
+void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_FIRST, \
+ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
+} \
+void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_NO, \
+ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
+} \
+void HELPER(sve_ldff1##PART##_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_FIRST, \
+ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
+} \
+void HELPER(sve_ldnf1##PART##_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_NO, \
+ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \
+}
+
+#define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \
+void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \
+ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
+} \
+void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \
+ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
+} \
+void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \
+ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
+} \
+void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \
+ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
+} \
+void HELPER(sve_ldff1##PART##_le_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \
+ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
+} \
+void HELPER(sve_ldnf1##PART##_le_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \
+ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \
+} \
+void HELPER(sve_ldff1##PART##_be_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \
+ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
+} \
+void HELPER(sve_ldnf1##PART##_be_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \
+ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \
+}
+
+DO_LDFF1_LDNF1_1(bb, MO_8)
+DO_LDFF1_LDNF1_1(bhu, MO_16)
+DO_LDFF1_LDNF1_1(bhs, MO_16)
+DO_LDFF1_LDNF1_1(bsu, MO_32)
+DO_LDFF1_LDNF1_1(bss, MO_32)
+DO_LDFF1_LDNF1_1(bdu, MO_64)
+DO_LDFF1_LDNF1_1(bds, MO_64)
+
+DO_LDFF1_LDNF1_2(hh, MO_16, MO_16)
+DO_LDFF1_LDNF1_2(hsu, MO_32, MO_16)
+DO_LDFF1_LDNF1_2(hss, MO_32, MO_16)
+DO_LDFF1_LDNF1_2(hdu, MO_64, MO_16)
+DO_LDFF1_LDNF1_2(hds, MO_64, MO_16)
+
+DO_LDFF1_LDNF1_2(ss, MO_32, MO_32)
+DO_LDFF1_LDNF1_2(sdu, MO_64, MO_32)
+DO_LDFF1_LDNF1_2(sds, MO_64, MO_32)
+
+DO_LDFF1_LDNF1_2(dd, MO_64, MO_64)
+
+#undef DO_LDFF1_LDNF1_1
+#undef DO_LDFF1_LDNF1_2
+
+/*
+ * Common helper for all contiguous 1,2,3,4-register predicated stores.
+ */
+
+static inline QEMU_ALWAYS_INLINE
+void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr,
+ uint32_t desc, const uintptr_t retaddr,
+ const int esz, const int msz, const int N, uint32_t mtedesc,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const unsigned rd = simd_data(desc);
+ const intptr_t reg_max = simd_oprsz(desc);
+ intptr_t reg_off, reg_last, mem_off;
+ SVEContLdSt info;
+ void *host;
+ int i, flags;
+
+ /* Find the active elements. */
+ if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, N << msz)) {
+ /* The entire predicate was false; no store occurs. */
+ return;
+ }
+
+ /* Probe the page(s). Exit with exception for any invalid page. */
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, retaddr);
+
+ /* Handle watchpoints for all active elements. */
+ sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, N << msz,
+ BP_MEM_WRITE, retaddr);
+
+ /*
+ * Handle mte checks for all active elements.
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
+ */
+ if (mtedesc) {
+ sve_cont_ldst_mte_check(&info, env, vg, addr, 1 << esz, N << msz,
+ mtedesc, retaddr);
+ }
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ /*
+ * At least one page includes MMIO.
+ * Any bus operation can fail with cpu_transaction_failed,
+ * which for ARM will raise SyncExternal. We cannot avoid
+ * this fault and will leave with the store incomplete.
+ */
+ mem_off = info.mem_off_first[0];
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[1];
+ if (reg_last < 0) {
+ reg_last = info.reg_off_split;
+ if (reg_last < 0) {
+ reg_last = info.reg_off_last[0];
+ }
+ }
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ for (i = 0; i < N; ++i) {
+ tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
+ addr + mem_off + (i << msz), retaddr);
+ }
+ }
+ reg_off += 1 << esz;
+ mem_off += N << msz;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ return;
+#endif
+ }
+
+ mem_off = info.mem_off_first[0];
+ reg_off = info.reg_off_first[0];
+ reg_last = info.reg_off_last[0];
+ host = info.page[0].host;
+
+ while (reg_off <= reg_last) {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ for (i = 0; i < N; ++i) {
+ host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
+ host + mem_off + (i << msz));
+ }
+ }
+ reg_off += 1 << esz;
+ mem_off += N << msz;
+ } while (reg_off <= reg_last && (reg_off & 63));
+ }
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ mem_off = info.mem_off_split;
+ if (unlikely(mem_off >= 0)) {
+ reg_off = info.reg_off_split;
+ for (i = 0; i < N; ++i) {
+ tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
+ addr + mem_off + (i << msz), retaddr);
+ }
+ }
+
+ mem_off = info.mem_off_first[1];
+ if (unlikely(mem_off >= 0)) {
+ reg_off = info.reg_off_first[1];
+ reg_last = info.reg_off_last[1];
+ host = info.page[1].host;
+
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if ((pg >> (reg_off & 63)) & 1) {
+ for (i = 0; i < N; ++i) {
+ host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
+ host + mem_off + (i << msz));
+ }
+ }
+ reg_off += 1 << esz;
+ mem_off += N << msz;
+ } while (reg_off & 63);
+ } while (reg_off <= reg_last);
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr,
+ uint32_t desc, const uintptr_t ra,
+ const int esz, const int msz, const int N,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ int bit55 = extract64(addr, 55, 1);
+
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /* Perform gross MTE suppression early. */
+ if (!tbi_check(desc, bit55) ||
+ tcma_check(desc, bit55, allocation_tag_from_addr(addr))) {
+ mtedesc = 0;
+ }
+
+ sve_stN_r(env, vg, addr, desc, ra, esz, msz, N, mtedesc, host_fn, tlb_fn);
+}
+
+#define DO_STN_1(N, NAME, ESZ) \
+void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \
+ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
+} \
+void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, \
+ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \
+}
+
+#define DO_STN_2(N, NAME, ESZ, MSZ) \
+void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
+ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
+} \
+void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \
+ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
+} \
+void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \
+ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \
+} \
+void HELPER(sve_st##N##NAME##_be_r_mte)(CPUARMState *env, void *vg, \
+ target_ulong addr, uint32_t desc) \
+{ \
+ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \
+ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \
+}
+
+DO_STN_1(1, bb, MO_8)
+DO_STN_1(1, bh, MO_16)
+DO_STN_1(1, bs, MO_32)
+DO_STN_1(1, bd, MO_64)
+DO_STN_1(2, bb, MO_8)
+DO_STN_1(3, bb, MO_8)
+DO_STN_1(4, bb, MO_8)
+
+DO_STN_2(1, hh, MO_16, MO_16)
+DO_STN_2(1, hs, MO_32, MO_16)
+DO_STN_2(1, hd, MO_64, MO_16)
+DO_STN_2(2, hh, MO_16, MO_16)
+DO_STN_2(3, hh, MO_16, MO_16)
+DO_STN_2(4, hh, MO_16, MO_16)
+
+DO_STN_2(1, ss, MO_32, MO_32)
+DO_STN_2(1, sd, MO_64, MO_32)
+DO_STN_2(2, ss, MO_32, MO_32)
+DO_STN_2(3, ss, MO_32, MO_32)
+DO_STN_2(4, ss, MO_32, MO_32)
+
+DO_STN_2(1, dd, MO_64, MO_64)
+DO_STN_2(2, dd, MO_64, MO_64)
+DO_STN_2(3, dd, MO_64, MO_64)
+DO_STN_2(4, dd, MO_64, MO_64)
+
+#undef DO_STN_1
+#undef DO_STN_2
+
+/*
+ * Loads with a vector index.
+ */
+
+/*
+ * Load the element at @reg + @reg_ofs, sign or zero-extend as needed.
+ */
+typedef target_ulong zreg_off_fn(void *reg, intptr_t reg_ofs);
+
+static target_ulong off_zsu_s(void *reg, intptr_t reg_ofs)
+{
+ return *(uint32_t *)(reg + H1_4(reg_ofs));
+}
+
+static target_ulong off_zss_s(void *reg, intptr_t reg_ofs)
+{
+ return *(int32_t *)(reg + H1_4(reg_ofs));
+}
+
+static target_ulong off_zsu_d(void *reg, intptr_t reg_ofs)
+{
+ return (uint32_t)*(uint64_t *)(reg + reg_ofs);
+}
+
+static target_ulong off_zss_d(void *reg, intptr_t reg_ofs)
+{
+ return (int32_t)*(uint64_t *)(reg + reg_ofs);
+}
+
+static target_ulong off_zd_d(void *reg, intptr_t reg_ofs)
+{
+ return *(uint64_t *)(reg + reg_ofs);
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
+ target_ulong base, uint32_t desc, uintptr_t retaddr,
+ uint32_t mtedesc, int esize, int msize,
+ zreg_off_fn *off_fn,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ const intptr_t reg_max = simd_oprsz(desc);
+ const int scale = simd_data(desc);
+ ARMVectorReg scratch;
+ intptr_t reg_off;
+ SVEHostPage info, info2;
+
+ memset(&scratch, 0, reg_max);
+ reg_off = 0;
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if (likely(pg & 1)) {
+ target_ulong addr = base + (off_fn(vm, reg_off) << scale);
+ target_ulong in_page = -(addr | TARGET_PAGE_MASK);
+
+ sve_probe_page(&info, false, env, addr, 0, MMU_DATA_LOAD,
+ mmu_idx, retaddr);
+
+ if (likely(in_page >= msize)) {
+ if (unlikely(info.flags & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), addr, msize,
+ info.attrs, BP_MEM_READ, retaddr);
+ }
+ if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
+ mte_check(env, mtedesc, addr, retaddr);
+ }
+ host_fn(&scratch, reg_off, info.host);
+ } else {
+ /* Element crosses the page boundary. */
+ sve_probe_page(&info2, false, env, addr + in_page, 0,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ if (unlikely((info.flags | info2.flags) & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), addr,
+ msize, info.attrs,
+ BP_MEM_READ, retaddr);
+ }
+ if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
+ mte_check(env, mtedesc, addr, retaddr);
+ }
+ tlb_fn(env, &scratch, reg_off, addr, retaddr);
+ }
+ }
+ reg_off += esize;
+ pg >>= esize;
+ } while (reg_off & 63);
+ } while (reg_off < reg_max);
+
+ /* Wait until all exceptions have been raised to write back. */
+ memcpy(vd, &scratch, reg_max);
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
+ target_ulong base, uint32_t desc, uintptr_t retaddr,
+ int esize, int msize, zreg_off_fn *off_fn,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /*
+ * ??? TODO: For the 32-bit offset extractions, base + ofs cannot
+ * offset base entirely over the address space hole to change the
+ * pointer tag, or change the bit55 selector. So we could here
+ * examine TBI + TCMA like we do for sve_ldN_r_mte().
+ */
+ sve_ld1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc,
+ esize, msize, off_fn, host_fn, tlb_fn);
+}
+
+#define DO_LD1_ZPZ_S(MEM, OFS, MSZ) \
+void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \
+ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+} \
+void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \
+ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+}
+
+#define DO_LD1_ZPZ_D(MEM, OFS, MSZ) \
+void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \
+ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+} \
+void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \
+ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+}
+
+DO_LD1_ZPZ_S(bsu, zsu, MO_8)
+DO_LD1_ZPZ_S(bsu, zss, MO_8)
+DO_LD1_ZPZ_D(bdu, zsu, MO_8)
+DO_LD1_ZPZ_D(bdu, zss, MO_8)
+DO_LD1_ZPZ_D(bdu, zd, MO_8)
+
+DO_LD1_ZPZ_S(bss, zsu, MO_8)
+DO_LD1_ZPZ_S(bss, zss, MO_8)
+DO_LD1_ZPZ_D(bds, zsu, MO_8)
+DO_LD1_ZPZ_D(bds, zss, MO_8)
+DO_LD1_ZPZ_D(bds, zd, MO_8)
+
+DO_LD1_ZPZ_S(hsu_le, zsu, MO_16)
+DO_LD1_ZPZ_S(hsu_le, zss, MO_16)
+DO_LD1_ZPZ_D(hdu_le, zsu, MO_16)
+DO_LD1_ZPZ_D(hdu_le, zss, MO_16)
+DO_LD1_ZPZ_D(hdu_le, zd, MO_16)
+
+DO_LD1_ZPZ_S(hsu_be, zsu, MO_16)
+DO_LD1_ZPZ_S(hsu_be, zss, MO_16)
+DO_LD1_ZPZ_D(hdu_be, zsu, MO_16)
+DO_LD1_ZPZ_D(hdu_be, zss, MO_16)
+DO_LD1_ZPZ_D(hdu_be, zd, MO_16)
+
+DO_LD1_ZPZ_S(hss_le, zsu, MO_16)
+DO_LD1_ZPZ_S(hss_le, zss, MO_16)
+DO_LD1_ZPZ_D(hds_le, zsu, MO_16)
+DO_LD1_ZPZ_D(hds_le, zss, MO_16)
+DO_LD1_ZPZ_D(hds_le, zd, MO_16)
+
+DO_LD1_ZPZ_S(hss_be, zsu, MO_16)
+DO_LD1_ZPZ_S(hss_be, zss, MO_16)
+DO_LD1_ZPZ_D(hds_be, zsu, MO_16)
+DO_LD1_ZPZ_D(hds_be, zss, MO_16)
+DO_LD1_ZPZ_D(hds_be, zd, MO_16)
+
+DO_LD1_ZPZ_S(ss_le, zsu, MO_32)
+DO_LD1_ZPZ_S(ss_le, zss, MO_32)
+DO_LD1_ZPZ_D(sdu_le, zsu, MO_32)
+DO_LD1_ZPZ_D(sdu_le, zss, MO_32)
+DO_LD1_ZPZ_D(sdu_le, zd, MO_32)
+
+DO_LD1_ZPZ_S(ss_be, zsu, MO_32)
+DO_LD1_ZPZ_S(ss_be, zss, MO_32)
+DO_LD1_ZPZ_D(sdu_be, zsu, MO_32)
+DO_LD1_ZPZ_D(sdu_be, zss, MO_32)
+DO_LD1_ZPZ_D(sdu_be, zd, MO_32)
+
+DO_LD1_ZPZ_D(sds_le, zsu, MO_32)
+DO_LD1_ZPZ_D(sds_le, zss, MO_32)
+DO_LD1_ZPZ_D(sds_le, zd, MO_32)
+
+DO_LD1_ZPZ_D(sds_be, zsu, MO_32)
+DO_LD1_ZPZ_D(sds_be, zss, MO_32)
+DO_LD1_ZPZ_D(sds_be, zd, MO_32)
+
+DO_LD1_ZPZ_D(dd_le, zsu, MO_64)
+DO_LD1_ZPZ_D(dd_le, zss, MO_64)
+DO_LD1_ZPZ_D(dd_le, zd, MO_64)
+
+DO_LD1_ZPZ_D(dd_be, zsu, MO_64)
+DO_LD1_ZPZ_D(dd_be, zss, MO_64)
+DO_LD1_ZPZ_D(dd_be, zd, MO_64)
+
+#undef DO_LD1_ZPZ_S
+#undef DO_LD1_ZPZ_D
+
+/* First fault loads with a vector index. */
+
+/*
+ * Common helpers for all gather first-faulting loads.
+ */
+
+static inline QEMU_ALWAYS_INLINE
+void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
+ target_ulong base, uint32_t desc, uintptr_t retaddr,
+ uint32_t mtedesc, const int esz, const int msz,
+ zreg_off_fn *off_fn,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ const intptr_t reg_max = simd_oprsz(desc);
+ const int scale = simd_data(desc);
+ const int esize = 1 << esz;
+ const int msize = 1 << msz;
+ intptr_t reg_off;
+ SVEHostPage info;
+ target_ulong addr, in_page;
+
+ /* Skip to the first true predicate. */
+ reg_off = find_next_active(vg, 0, reg_max, esz);
+ if (unlikely(reg_off >= reg_max)) {
+ /* The entire predicate was false; no load occurs. */
+ memset(vd, 0, reg_max);
+ return;
+ }
+
+ /*
+ * Probe the first element, allowing faults.
+ */
+ addr = base + (off_fn(vm, reg_off) << scale);
+ if (mtedesc) {
+ mte_check(env, mtedesc, addr, retaddr);
+ }
+ tlb_fn(env, vd, reg_off, addr, retaddr);
+
+ /* After any fault, zero the other elements. */
+ swap_memzero(vd, reg_off);
+ reg_off += esize;
+ swap_memzero(vd + reg_off, reg_max - reg_off);
+
+ /*
+ * Probe the remaining elements, not allowing faults.
+ */
+ while (reg_off < reg_max) {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ if (likely((pg >> (reg_off & 63)) & 1)) {
+ addr = base + (off_fn(vm, reg_off) << scale);
+ in_page = -(addr | TARGET_PAGE_MASK);
+
+ if (unlikely(in_page < msize)) {
+ /* Stop if the element crosses a page boundary. */
+ goto fault;
+ }
+
+ sve_probe_page(&info, true, env, addr, 0, MMU_DATA_LOAD,
+ mmu_idx, retaddr);
+ if (unlikely(info.flags & (TLB_INVALID_MASK | TLB_MMIO))) {
+ goto fault;
+ }
+ if (unlikely(info.flags & TLB_WATCHPOINT) &&
+ (cpu_watchpoint_address_matches
+ (env_cpu(env), addr, msize) & BP_MEM_READ)) {
+ goto fault;
+ }
+ if (mtedesc &&
+ arm_tlb_mte_tagged(&info.attrs) &&
+ !mte_probe(env, mtedesc, addr)) {
+ goto fault;
+ }
+
+ host_fn(vd, reg_off, info.host);
+ }
+ reg_off += esize;
+ } while (reg_off & 63);
+ }
+ return;
+
+ fault:
+ record_fault(env, reg_off, reg_max);
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
+ target_ulong base, uint32_t desc, uintptr_t retaddr,
+ const int esz, const int msz,
+ zreg_off_fn *off_fn,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /*
+ * ??? TODO: For the 32-bit offset extractions, base + ofs cannot
+ * offset base entirely over the address space hole to change the
+ * pointer tag, or change the bit55 selector. So we could here
+ * examine TBI + TCMA like we do for sve_ldN_r_mte().
+ */
+ sve_ldff1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc,
+ esz, msz, off_fn, host_fn, tlb_fn);
+}
+
+#define DO_LDFF1_ZPZ_S(MEM, OFS, MSZ) \
+void HELPER(sve_ldff##MEM##_##OFS) \
+ (CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_32, MSZ, \
+ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+} \
+void HELPER(sve_ldff##MEM##_##OFS##_mte) \
+ (CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_32, MSZ, \
+ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+}
+
+#define DO_LDFF1_ZPZ_D(MEM, OFS, MSZ) \
+void HELPER(sve_ldff##MEM##_##OFS) \
+ (CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_64, MSZ, \
+ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+} \
+void HELPER(sve_ldff##MEM##_##OFS##_mte) \
+ (CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_64, MSZ, \
+ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \
+}
+
+DO_LDFF1_ZPZ_S(bsu, zsu, MO_8)
+DO_LDFF1_ZPZ_S(bsu, zss, MO_8)
+DO_LDFF1_ZPZ_D(bdu, zsu, MO_8)
+DO_LDFF1_ZPZ_D(bdu, zss, MO_8)
+DO_LDFF1_ZPZ_D(bdu, zd, MO_8)
+
+DO_LDFF1_ZPZ_S(bss, zsu, MO_8)
+DO_LDFF1_ZPZ_S(bss, zss, MO_8)
+DO_LDFF1_ZPZ_D(bds, zsu, MO_8)
+DO_LDFF1_ZPZ_D(bds, zss, MO_8)
+DO_LDFF1_ZPZ_D(bds, zd, MO_8)
+
+DO_LDFF1_ZPZ_S(hsu_le, zsu, MO_16)
+DO_LDFF1_ZPZ_S(hsu_le, zss, MO_16)
+DO_LDFF1_ZPZ_D(hdu_le, zsu, MO_16)
+DO_LDFF1_ZPZ_D(hdu_le, zss, MO_16)
+DO_LDFF1_ZPZ_D(hdu_le, zd, MO_16)
+
+DO_LDFF1_ZPZ_S(hsu_be, zsu, MO_16)
+DO_LDFF1_ZPZ_S(hsu_be, zss, MO_16)
+DO_LDFF1_ZPZ_D(hdu_be, zsu, MO_16)
+DO_LDFF1_ZPZ_D(hdu_be, zss, MO_16)
+DO_LDFF1_ZPZ_D(hdu_be, zd, MO_16)
+
+DO_LDFF1_ZPZ_S(hss_le, zsu, MO_16)
+DO_LDFF1_ZPZ_S(hss_le, zss, MO_16)
+DO_LDFF1_ZPZ_D(hds_le, zsu, MO_16)
+DO_LDFF1_ZPZ_D(hds_le, zss, MO_16)
+DO_LDFF1_ZPZ_D(hds_le, zd, MO_16)
+
+DO_LDFF1_ZPZ_S(hss_be, zsu, MO_16)
+DO_LDFF1_ZPZ_S(hss_be, zss, MO_16)
+DO_LDFF1_ZPZ_D(hds_be, zsu, MO_16)
+DO_LDFF1_ZPZ_D(hds_be, zss, MO_16)
+DO_LDFF1_ZPZ_D(hds_be, zd, MO_16)
+
+DO_LDFF1_ZPZ_S(ss_le, zsu, MO_32)
+DO_LDFF1_ZPZ_S(ss_le, zss, MO_32)
+DO_LDFF1_ZPZ_D(sdu_le, zsu, MO_32)
+DO_LDFF1_ZPZ_D(sdu_le, zss, MO_32)
+DO_LDFF1_ZPZ_D(sdu_le, zd, MO_32)
+
+DO_LDFF1_ZPZ_S(ss_be, zsu, MO_32)
+DO_LDFF1_ZPZ_S(ss_be, zss, MO_32)
+DO_LDFF1_ZPZ_D(sdu_be, zsu, MO_32)
+DO_LDFF1_ZPZ_D(sdu_be, zss, MO_32)
+DO_LDFF1_ZPZ_D(sdu_be, zd, MO_32)
+
+DO_LDFF1_ZPZ_D(sds_le, zsu, MO_32)
+DO_LDFF1_ZPZ_D(sds_le, zss, MO_32)
+DO_LDFF1_ZPZ_D(sds_le, zd, MO_32)
+
+DO_LDFF1_ZPZ_D(sds_be, zsu, MO_32)
+DO_LDFF1_ZPZ_D(sds_be, zss, MO_32)
+DO_LDFF1_ZPZ_D(sds_be, zd, MO_32)
+
+DO_LDFF1_ZPZ_D(dd_le, zsu, MO_64)
+DO_LDFF1_ZPZ_D(dd_le, zss, MO_64)
+DO_LDFF1_ZPZ_D(dd_le, zd, MO_64)
+
+DO_LDFF1_ZPZ_D(dd_be, zsu, MO_64)
+DO_LDFF1_ZPZ_D(dd_be, zss, MO_64)
+DO_LDFF1_ZPZ_D(dd_be, zd, MO_64)
+
+/* Stores with a vector index. */
+
+static inline QEMU_ALWAYS_INLINE
+void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
+ target_ulong base, uint32_t desc, uintptr_t retaddr,
+ uint32_t mtedesc, int esize, int msize,
+ zreg_off_fn *off_fn,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ const intptr_t reg_max = simd_oprsz(desc);
+ const int scale = simd_data(desc);
+ void *host[ARM_MAX_VQ * 4];
+ intptr_t reg_off, i;
+ SVEHostPage info, info2;
+
+ /*
+ * Probe all of the elements for host addresses and flags.
+ */
+ i = reg_off = 0;
+ do {
+ uint64_t pg = vg[reg_off >> 6];
+ do {
+ target_ulong addr = base + (off_fn(vm, reg_off) << scale);
+ target_ulong in_page = -(addr | TARGET_PAGE_MASK);
+
+ host[i] = NULL;
+ if (likely((pg >> (reg_off & 63)) & 1)) {
+ if (likely(in_page >= msize)) {
+ sve_probe_page(&info, false, env, addr, 0, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ host[i] = info.host;
+ } else {
+ /*
+ * Element crosses the page boundary.
+ * Probe both pages, but do not record the host address,
+ * so that we use the slow path.
+ */
+ sve_probe_page(&info, false, env, addr, 0,
+ MMU_DATA_STORE, mmu_idx, retaddr);
+ sve_probe_page(&info2, false, env, addr + in_page, 0,
+ MMU_DATA_STORE, mmu_idx, retaddr);
+ info.flags |= info2.flags;
+ }
+
+ if (unlikely(info.flags & TLB_WATCHPOINT)) {
+ cpu_check_watchpoint(env_cpu(env), addr, msize,
+ info.attrs, BP_MEM_WRITE, retaddr);
+ }
+
+ if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
+ mte_check(env, mtedesc, addr, retaddr);
+ }
+ }
+ i += 1;
+ reg_off += esize;
+ } while (reg_off & 63);
+ } while (reg_off < reg_max);
+
+ /*
+ * Now that we have recognized all exceptions except SyncExternal
+ * (from TLB_MMIO), which we cannot avoid, perform all of the stores.
+ *
+ * Note for the common case of an element in RAM, not crossing a page
+ * boundary, we have stored the host address in host[]. This doubles
+ * as a first-level check against the predicate, since only enabled
+ * elements have non-null host addresses.
+ */
+ i = reg_off = 0;
+ do {
+ void *h = host[i];
+ if (likely(h != NULL)) {
+ host_fn(vd, reg_off, h);
+ } else if ((vg[reg_off >> 6] >> (reg_off & 63)) & 1) {
+ target_ulong addr = base + (off_fn(vm, reg_off) << scale);
+ tlb_fn(env, vd, reg_off, addr, retaddr);
+ }
+ i += 1;
+ reg_off += esize;
+ } while (reg_off < reg_max);
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
+ target_ulong base, uint32_t desc, uintptr_t retaddr,
+ int esize, int msize, zreg_off_fn *off_fn,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ /* Remove mtedesc from the normal sve descriptor. */
+ desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+
+ /*
+ * ??? TODO: For the 32-bit offset extractions, base + ofs cannot
+ * offset base entirely over the address space hole to change the
+ * pointer tag, or change the bit55 selector. So we could here
+ * examine TBI + TCMA like we do for sve_ldN_r_mte().
+ */
+ sve_st1_z(env, vd, vg, vm, base, desc, retaddr, mtedesc,
+ esize, msize, off_fn, host_fn, tlb_fn);
+}
+
+#define DO_ST1_ZPZ_S(MEM, OFS, MSZ) \
+void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \
+ off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
+} \
+void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \
+ off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
+}
+
+#define DO_ST1_ZPZ_D(MEM, OFS, MSZ) \
+void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \
+ off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
+} \
+void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \
+ void *vm, target_ulong base, uint32_t desc) \
+{ \
+ sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \
+ off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \
+}
+
+DO_ST1_ZPZ_S(bs, zsu, MO_8)
+DO_ST1_ZPZ_S(hs_le, zsu, MO_16)
+DO_ST1_ZPZ_S(hs_be, zsu, MO_16)
+DO_ST1_ZPZ_S(ss_le, zsu, MO_32)
+DO_ST1_ZPZ_S(ss_be, zsu, MO_32)
+
+DO_ST1_ZPZ_S(bs, zss, MO_8)
+DO_ST1_ZPZ_S(hs_le, zss, MO_16)
+DO_ST1_ZPZ_S(hs_be, zss, MO_16)
+DO_ST1_ZPZ_S(ss_le, zss, MO_32)
+DO_ST1_ZPZ_S(ss_be, zss, MO_32)
+
+DO_ST1_ZPZ_D(bd, zsu, MO_8)
+DO_ST1_ZPZ_D(hd_le, zsu, MO_16)
+DO_ST1_ZPZ_D(hd_be, zsu, MO_16)
+DO_ST1_ZPZ_D(sd_le, zsu, MO_32)
+DO_ST1_ZPZ_D(sd_be, zsu, MO_32)
+DO_ST1_ZPZ_D(dd_le, zsu, MO_64)
+DO_ST1_ZPZ_D(dd_be, zsu, MO_64)
+
+DO_ST1_ZPZ_D(bd, zss, MO_8)
+DO_ST1_ZPZ_D(hd_le, zss, MO_16)
+DO_ST1_ZPZ_D(hd_be, zss, MO_16)
+DO_ST1_ZPZ_D(sd_le, zss, MO_32)
+DO_ST1_ZPZ_D(sd_be, zss, MO_32)
+DO_ST1_ZPZ_D(dd_le, zss, MO_64)
+DO_ST1_ZPZ_D(dd_be, zss, MO_64)
+
+DO_ST1_ZPZ_D(bd, zd, MO_8)
+DO_ST1_ZPZ_D(hd_le, zd, MO_16)
+DO_ST1_ZPZ_D(hd_be, zd, MO_16)
+DO_ST1_ZPZ_D(sd_le, zd, MO_32)
+DO_ST1_ZPZ_D(sd_be, zd, MO_32)
+DO_ST1_ZPZ_D(dd_le, zd, MO_64)
+DO_ST1_ZPZ_D(dd_be, zd, MO_64)
+
+#undef DO_ST1_ZPZ_S
+#undef DO_ST1_ZPZ_D
+
+void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = n[i] ^ m[i] ^ k[i];
+ }
+}
+
+void HELPER(sve2_bcax)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = n[i] ^ (m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_bsl1n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = (~n[i] & k[i]) | (m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_bsl2n)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = (n[i] & k[i]) | (~m[i] & ~k[i]);
+ }
+}
+
+void HELPER(sve2_nbsl)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn, *m = vm, *k = vk;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ~((n[i] & k[i]) | (m[i] & ~k[i]));
+ }
+}
+
+/*
+ * Returns true if m0 or m1 contains the low uint8_t/uint16_t in n.
+ * See hasless(v,1) from
+ * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ */
+static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
+{
+ int bits = 8 << esz;
+ uint64_t ones = dup_const(esz, 1);
+ uint64_t signs = ones << (bits - 1);
+ uint64_t cmp0, cmp1;
+
+ cmp1 = dup_const(esz, n);
+ cmp0 = cmp1 ^ m0;
+ cmp1 = cmp1 ^ m1;
+ cmp0 = (cmp0 - ones) & ~cmp0;
+ cmp1 = (cmp1 - ones) & ~cmp1;
+ return (cmp0 | cmp1) & signs;
+}
+
+static inline uint32_t do_match(void *vd, void *vn, void *vm, void *vg,
+ uint32_t desc, int esz, bool nmatch)
+{
+ uint16_t esz_mask = pred_esz_masks[esz];
+ intptr_t opr_sz = simd_oprsz(desc);
+ uint32_t flags = PREDTEST_INIT;
+ intptr_t i, j, k;
+
+ for (i = 0; i < opr_sz; i += 16) {
+ uint64_t m0 = *(uint64_t *)(vm + i);
+ uint64_t m1 = *(uint64_t *)(vm + i + 8);
+ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)) & esz_mask;
+ uint16_t out = 0;
+
+ for (j = 0; j < 16; j += 8) {
+ uint64_t n = *(uint64_t *)(vn + i + j);
+
+ for (k = 0; k < 8; k += 1 << esz) {
+ if (pg & (1 << (j + k))) {
+ bool o = do_match2(n >> (k * 8), m0, m1, esz);
+ out |= (o ^ nmatch) << (j + k);
+ }
+ }
+ }
+ *(uint16_t *)(vd + H1_2(i >> 3)) = out;
+ flags = iter_predtest_fwd(out, pg, flags);
+ }
+ return flags;
+}
+
+#define DO_PPZZ_MATCH(NAME, ESZ, INV) \
+uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
+{ \
+ return do_match(vd, vn, vm, vg, desc, ESZ, INV); \
+}
+
+DO_PPZZ_MATCH(sve2_match_ppzz_b, MO_8, false)
+DO_PPZZ_MATCH(sve2_match_ppzz_h, MO_16, false)
+
+DO_PPZZ_MATCH(sve2_nmatch_ppzz_b, MO_8, true)
+DO_PPZZ_MATCH(sve2_nmatch_ppzz_h, MO_16, true)
+
+#undef DO_PPZZ_MATCH
+
+void HELPER(sve2_histcnt_s)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t desc)
+{
+ ARMVectorReg scratch;
+ intptr_t i, j;
+ intptr_t opr_sz = simd_oprsz(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ if (d == n) {
+ n = memcpy(&scratch, n, opr_sz);
+ if (d == m) {
+ m = n;
+ }
+ } else if (d == m) {
+ m = memcpy(&scratch, m, opr_sz);
+ }
+
+ for (i = 0; i < opr_sz; i += 4) {
+ uint64_t count = 0;
+ uint8_t pred;
+
+ pred = pg[H1(i >> 3)] >> (i & 7);
+ if (pred & 1) {
+ uint32_t nn = n[H4(i >> 2)];
+
+ for (j = 0; j <= i; j += 4) {
+ pred = pg[H1(j >> 3)] >> (j & 7);
+ if ((pred & 1) && nn == m[H4(j >> 2)]) {
+ ++count;
+ }
+ }
+ }
+ d[H4(i >> 2)] = count;
+ }
+}
+
+void HELPER(sve2_histcnt_d)(void *vd, void *vn, void *vm, void *vg,
+ uint32_t desc)
+{
+ ARMVectorReg scratch;
+ intptr_t i, j;
+ intptr_t opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint8_t *pg = vg;
+
+ if (d == n) {
+ n = memcpy(&scratch, n, opr_sz);
+ if (d == m) {
+ m = n;
+ }
+ } else if (d == m) {
+ m = memcpy(&scratch, m, opr_sz);
+ }
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ uint64_t count = 0;
+ if (pg[H1(i)] & 1) {
+ uint64_t nn = n[i];
+ for (j = 0; j <= i; ++j) {
+ if ((pg[H1(j)] & 1) && nn == m[j]) {
+ ++count;
+ }
+ }
+ }
+ d[i] = count;
+ }
+}
+
+/*
+ * Returns the number of bytes in m0 and m1 that match n.
+ * Unlike do_match2 we don't just need true/false, we need an exact count.
+ * This requires two extra logical operations.
+ */
+static inline uint64_t do_histseg_cnt(uint8_t n, uint64_t m0, uint64_t m1)
+{
+ const uint64_t mask = dup_const(MO_8, 0x7f);
+ uint64_t cmp0, cmp1;
+
+ cmp1 = dup_const(MO_8, n);
+ cmp0 = cmp1 ^ m0;
+ cmp1 = cmp1 ^ m1;
+
+ /*
+ * 1: clear msb of each byte to avoid carry to next byte (& mask)
+ * 2: carry in to msb if byte != 0 (+ mask)
+ * 3: set msb if cmp has msb set (| cmp)
+ * 4: set ~msb to ignore them (| mask)
+ * We now have 0xff for byte != 0 or 0x7f for byte == 0.
+ * 5: invert, resulting in 0x80 if and only if byte == 0.
+ */
+ cmp0 = ~(((cmp0 & mask) + mask) | cmp0 | mask);
+ cmp1 = ~(((cmp1 & mask) + mask) | cmp1 | mask);
+
+ /*
+ * Combine the two compares in a way that the bits do
+ * not overlap, and so preserves the count of set bits.
+ * If the host has an efficient instruction for ctpop,
+ * then ctpop(x) + ctpop(y) has the same number of
+ * operations as ctpop(x | (y >> 1)). If the host does
+ * not have an efficient ctpop, then we only want to
+ * use it once.
+ */
+ return ctpop64(cmp0 | (cmp1 >> 1));
+}
+
+void HELPER(sve2_histseg)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j;
+ intptr_t opr_sz = simd_oprsz(desc);
+
+ for (i = 0; i < opr_sz; i += 16) {
+ uint64_t n0 = *(uint64_t *)(vn + i);
+ uint64_t m0 = *(uint64_t *)(vm + i);
+ uint64_t n1 = *(uint64_t *)(vn + i + 8);
+ uint64_t m1 = *(uint64_t *)(vm + i + 8);
+ uint64_t out0 = 0;
+ uint64_t out1 = 0;
+
+ for (j = 0; j < 64; j += 8) {
+ uint64_t cnt0 = do_histseg_cnt(n0 >> j, m0, m1);
+ uint64_t cnt1 = do_histseg_cnt(n1 >> j, m0, m1);
+ out0 |= cnt0 << j;
+ out1 |= cnt1 << j;
+ }
+
+ *(uint64_t *)(vd + i) = out0;
+ *(uint64_t *)(vd + i + 8) = out1;
+ }
+}
+
+void HELPER(sve2_xar_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ int shr = simd_data(desc);
+ int shl = 8 - shr;
+ uint64_t mask = dup_const(MO_8, 0xff >> shr);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ uint64_t t = n[i] ^ m[i];
+ d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
+ }
+}
+
+void HELPER(sve2_xar_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ int shr = simd_data(desc);
+ int shl = 16 - shr;
+ uint64_t mask = dup_const(MO_16, 0xffff >> shr);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ uint64_t t = n[i] ^ m[i];
+ d[i] = ((t >> shr) & mask) | ((t << shl) & ~mask);
+ }
+}
+
+void HELPER(sve2_xar_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 4;
+ int shr = simd_data(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ror32(n[i] ^ m[i], shr);
+ }
+}
+
+void HELPER(fmmla_s)(void *vd, void *vn, void *vm, void *va,
+ void *status, uint32_t desc)
+{
+ intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float32) * 4);
+
+ for (s = 0; s < opr_sz; ++s) {
+ float32 *n = vn + s * sizeof(float32) * 4;
+ float32 *m = vm + s * sizeof(float32) * 4;
+ float32 *a = va + s * sizeof(float32) * 4;
+ float32 *d = vd + s * sizeof(float32) * 4;
+ float32 n00 = n[H4(0)], n01 = n[H4(1)];
+ float32 n10 = n[H4(2)], n11 = n[H4(3)];
+ float32 m00 = m[H4(0)], m01 = m[H4(1)];
+ float32 m10 = m[H4(2)], m11 = m[H4(3)];
+ float32 p0, p1;
+
+ /* i = 0, j = 0 */
+ p0 = float32_mul(n00, m00, status);
+ p1 = float32_mul(n01, m01, status);
+ d[H4(0)] = float32_add(a[H4(0)], float32_add(p0, p1, status), status);
+
+ /* i = 0, j = 1 */
+ p0 = float32_mul(n00, m10, status);
+ p1 = float32_mul(n01, m11, status);
+ d[H4(1)] = float32_add(a[H4(1)], float32_add(p0, p1, status), status);
+
+ /* i = 1, j = 0 */
+ p0 = float32_mul(n10, m00, status);
+ p1 = float32_mul(n11, m01, status);
+ d[H4(2)] = float32_add(a[H4(2)], float32_add(p0, p1, status), status);
+
+ /* i = 1, j = 1 */
+ p0 = float32_mul(n10, m10, status);
+ p1 = float32_mul(n11, m11, status);
+ d[H4(3)] = float32_add(a[H4(3)], float32_add(p0, p1, status), status);
+ }
+}
+
+void HELPER(fmmla_d)(void *vd, void *vn, void *vm, void *va,
+ void *status, uint32_t desc)
+{
+ intptr_t s, opr_sz = simd_oprsz(desc) / (sizeof(float64) * 4);
+
+ for (s = 0; s < opr_sz; ++s) {
+ float64 *n = vn + s * sizeof(float64) * 4;
+ float64 *m = vm + s * sizeof(float64) * 4;
+ float64 *a = va + s * sizeof(float64) * 4;
+ float64 *d = vd + s * sizeof(float64) * 4;
+ float64 n00 = n[0], n01 = n[1], n10 = n[2], n11 = n[3];
+ float64 m00 = m[0], m01 = m[1], m10 = m[2], m11 = m[3];
+ float64 p0, p1;
+
+ /* i = 0, j = 0 */
+ p0 = float64_mul(n00, m00, status);
+ p1 = float64_mul(n01, m01, status);
+ d[0] = float64_add(a[0], float64_add(p0, p1, status), status);
+
+ /* i = 0, j = 1 */
+ p0 = float64_mul(n00, m10, status);
+ p1 = float64_mul(n01, m11, status);
+ d[1] = float64_add(a[1], float64_add(p0, p1, status), status);
+
+ /* i = 1, j = 0 */
+ p0 = float64_mul(n10, m00, status);
+ p1 = float64_mul(n11, m01, status);
+ d[2] = float64_add(a[2], float64_add(p0, p1, status), status);
+
+ /* i = 1, j = 1 */
+ p0 = float64_mul(n10, m10, status);
+ p1 = float64_mul(n11, m11, status);
+ d[3] = float64_add(a[3], float64_add(p0, p1, status), status);
+ }
+}
+
+#define DO_FCVTNT(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPEW); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPEW nn = *(TYPEW *)(vn + HW(i)); \
+ *(TYPEN *)(vd + HN(i + sizeof(TYPEN))) = OP(nn, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+DO_FCVTNT(sve_bfcvtnt, uint32_t, uint16_t, H1_4, H1_2, float32_to_bfloat16)
+DO_FCVTNT(sve2_fcvtnt_sh, uint32_t, uint16_t, H1_4, H1_2, sve_f32_to_f16)
+DO_FCVTNT(sve2_fcvtnt_ds, uint64_t, uint32_t, H1_8, H1_4, float64_to_float32)
+
+#define DO_FCVTLT(NAME, TYPEW, TYPEN, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, void *status, uint32_t desc) \
+{ \
+ intptr_t i = simd_oprsz(desc); \
+ uint64_t *g = vg; \
+ do { \
+ uint64_t pg = g[(i - 1) >> 6]; \
+ do { \
+ i -= sizeof(TYPEW); \
+ if (likely((pg >> (i & 63)) & 1)) { \
+ TYPEN nn = *(TYPEN *)(vn + HN(i + sizeof(TYPEN))); \
+ *(TYPEW *)(vd + HW(i)) = OP(nn, status); \
+ } \
+ } while (i & 63); \
+ } while (i != 0); \
+}
+
+DO_FCVTLT(sve2_fcvtlt_hs, uint32_t, uint16_t, H1_4, H1_2, sve_f16_to_f32)
+DO_FCVTLT(sve2_fcvtlt_sd, uint64_t, uint32_t, H1_8, H1_4, float32_to_float64)
+
+#undef DO_FCVTLT
+#undef DO_FCVTNT
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
new file mode 100644
index 000000000..f30f4130a
--- /dev/null
+++ b/target/arm/syndrome.h
@@ -0,0 +1,285 @@
+/*
+ * QEMU ARM CPU -- syndrome functions and types
+ *
+ * Copyright (c) 2014 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This header defines functions, types, etc which need to be shared
+ * between different source files within target/arm/ but which are
+ * private to it and not required by the rest of QEMU.
+ */
+
+#ifndef TARGET_ARM_SYNDROME_H
+#define TARGET_ARM_SYNDROME_H
+
+/* Valid Syndrome Register EC field values */
+enum arm_exception_class {
+ EC_UNCATEGORIZED = 0x00,
+ EC_WFX_TRAP = 0x01,
+ EC_CP15RTTRAP = 0x03,
+ EC_CP15RRTTRAP = 0x04,
+ EC_CP14RTTRAP = 0x05,
+ EC_CP14DTTRAP = 0x06,
+ EC_ADVSIMDFPACCESSTRAP = 0x07,
+ EC_FPIDTRAP = 0x08,
+ EC_PACTRAP = 0x09,
+ EC_BXJTRAP = 0x0a,
+ EC_CP14RRTTRAP = 0x0c,
+ EC_BTITRAP = 0x0d,
+ EC_ILLEGALSTATE = 0x0e,
+ EC_AA32_SVC = 0x11,
+ EC_AA32_HVC = 0x12,
+ EC_AA32_SMC = 0x13,
+ EC_AA64_SVC = 0x15,
+ EC_AA64_HVC = 0x16,
+ EC_AA64_SMC = 0x17,
+ EC_SYSTEMREGISTERTRAP = 0x18,
+ EC_SVEACCESSTRAP = 0x19,
+ EC_INSNABORT = 0x20,
+ EC_INSNABORT_SAME_EL = 0x21,
+ EC_PCALIGNMENT = 0x22,
+ EC_DATAABORT = 0x24,
+ EC_DATAABORT_SAME_EL = 0x25,
+ EC_SPALIGNMENT = 0x26,
+ EC_AA32_FPTRAP = 0x28,
+ EC_AA64_FPTRAP = 0x2c,
+ EC_SERROR = 0x2f,
+ EC_BREAKPOINT = 0x30,
+ EC_BREAKPOINT_SAME_EL = 0x31,
+ EC_SOFTWARESTEP = 0x32,
+ EC_SOFTWARESTEP_SAME_EL = 0x33,
+ EC_WATCHPOINT = 0x34,
+ EC_WATCHPOINT_SAME_EL = 0x35,
+ EC_AA32_BKPT = 0x38,
+ EC_VECTORCATCH = 0x3a,
+ EC_AA64_BKPT = 0x3c,
+};
+
+#define ARM_EL_EC_SHIFT 26
+#define ARM_EL_IL_SHIFT 25
+#define ARM_EL_ISV_SHIFT 24
+#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
+#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
+
+static inline uint32_t syn_get_ec(uint32_t syn)
+{
+ return syn >> ARM_EL_EC_SHIFT;
+}
+
+/*
+ * Utility functions for constructing various kinds of syndrome value.
+ * Note that in general we follow the AArch64 syndrome values; in a
+ * few cases the value in HSR for exceptions taken to AArch32 Hyp
+ * mode differs slightly, and we fix this up when populating HSR in
+ * arm_cpu_do_interrupt_aarch32_hyp().
+ * The exception is FP/SIMD access traps -- these report extra information
+ * when taking an exception to AArch32. For those we include the extra coproc
+ * and TA fields, and mask them out when taking the exception to AArch64.
+ */
+static inline uint32_t syn_uncategorized(void)
+{
+ return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
+static inline uint32_t syn_aa64_svc(uint32_t imm16)
+{
+ return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_hvc(uint32_t imm16)
+{
+ return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_smc(uint32_t imm16)
+{
+ return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
+{
+ return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
+ | (is_16bit ? 0 : ARM_EL_IL);
+}
+
+static inline uint32_t syn_aa32_hvc(uint32_t imm16)
+{
+ return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_smc(void)
+{
+ return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
+static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
+{
+ return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
+{
+ return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
+ | (is_16bit ? 0 : ARM_EL_IL);
+}
+
+static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
+ int crn, int crm, int rt,
+ int isread)
+{
+ return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
+ | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
+ | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
+ int crn, int crm, int rt, int isread,
+ bool is_16bit)
+{
+ return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
+ int crn, int crm, int rt, int isread,
+ bool is_16bit)
+{
+ return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
+ int rt, int rt2, int isread,
+ bool is_16bit)
+{
+ return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
+ int rt, int rt2, int isread,
+ bool is_16bit)
+{
+ return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
+{
+ /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | 0xa;
+}
+
+static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
+{
+ /* AArch32 SIMD trap: TA == 1 coproc == 0 */
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (1 << 5);
+}
+
+static inline uint32_t syn_sve_access_trap(void)
+{
+ return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
+}
+
+static inline uint32_t syn_pactrap(void)
+{
+ return EC_PACTRAP << ARM_EL_EC_SHIFT;
+}
+
+static inline uint32_t syn_btitrap(int btype)
+{
+ return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
+}
+
+static inline uint32_t syn_bxjtrap(int cv, int cond, int rm)
+{
+ return (EC_BXJTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL |
+ (cv << 24) | (cond << 20) | rm;
+}
+
+static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
+{
+ return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
+}
+
+static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL
+ | (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
+ | (wnr << 6) | fsc;
+}
+
+static inline uint32_t syn_data_abort_with_iss(int same_el,
+ int sas, int sse, int srt,
+ int sf, int ar,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc,
+ bool is_16bit)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
+ | (sf << 15) | (ar << 14)
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+}
+
+static inline uint32_t syn_swstep(int same_el, int isv, int ex)
+{
+ return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
+}
+
+static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
+{
+ return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
+}
+
+static inline uint32_t syn_breakpoint(int same_el)
+{
+ return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | 0x22;
+}
+
+static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
+{
+ return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
+ (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
+ (cv << 24) | (cond << 20) | ti;
+}
+
+static inline uint32_t syn_illegalstate(void)
+{
+ return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
+#endif /* TARGET_ARM_SYNDROME_H */
diff --git a/target/arm/t16.decode b/target/arm/t16.decode
new file mode 100644
index 000000000..646c74929
--- /dev/null
+++ b/target/arm/t16.decode
@@ -0,0 +1,281 @@
+# Thumb1 instructions
+#
+# Copyright (c) 2019 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+&empty !extern
+&s_rrr_shi !extern s rd rn rm shim shty
+&s_rrr_shr !extern s rn rd rm rs shty
+&s_rri_rot !extern s rn rd imm rot
+&s_rrrr !extern s rd rn rm ra
+&rrr_rot !extern rd rn rm rot
+&rr !extern rd rm
+&ri !extern rd imm
+&r !extern rm
+&i !extern imm
+&ldst_rr !extern p w u rn rt rm shimm shtype
+&ldst_ri !extern p w u rn rt imm
+&ldst_block !extern rn i b u w list
+&setend !extern E
+&cps !extern mode imod M A I F
+&ci !extern cond imm
+
+# Set S if the instruction is outside of an IT block.
+%s !function=t16_setflags
+
+# Data-processing (two low registers)
+
+%reg_0 0:3
+
+@lll_noshr ...... .... rm:3 rd:3 \
+ &s_rrr_shi %s rn=%reg_0 shim=0 shty=0
+@xll_noshr ...... .... rm:3 rn:3 \
+ &s_rrr_shi s=1 rd=0 shim=0 shty=0
+@lxl_shr ...... .... rs:3 rd:3 \
+ &s_rrr_shr %s rm=%reg_0 rn=0
+
+AND_rrri 010000 0000 ... ... @lll_noshr
+EOR_rrri 010000 0001 ... ... @lll_noshr
+MOV_rxrr 010000 0010 ... ... @lxl_shr shty=0 # LSL
+MOV_rxrr 010000 0011 ... ... @lxl_shr shty=1 # LSR
+MOV_rxrr 010000 0100 ... ... @lxl_shr shty=2 # ASR
+ADC_rrri 010000 0101 ... ... @lll_noshr
+SBC_rrri 010000 0110 ... ... @lll_noshr
+MOV_rxrr 010000 0111 ... ... @lxl_shr shty=3 # ROR
+TST_xrri 010000 1000 ... ... @xll_noshr
+RSB_rri 010000 1001 rn:3 rd:3 &s_rri_rot %s imm=0 rot=0
+CMP_xrri 010000 1010 ... ... @xll_noshr
+CMN_xrri 010000 1011 ... ... @xll_noshr
+ORR_rrri 010000 1100 ... ... @lll_noshr
+MUL 010000 1101 rn:3 rd:3 &s_rrrr %s rm=%reg_0 ra=0
+BIC_rrri 010000 1110 ... ... @lll_noshr
+MVN_rxri 010000 1111 ... ... @lll_noshr
+
+# Load/store (register offset)
+
+@ldst_rr ....... rm:3 rn:3 rt:3 \
+ &ldst_rr p=1 w=0 u=1 shimm=0 shtype=0
+
+STR_rr 0101 000 ... ... ... @ldst_rr
+STRH_rr 0101 001 ... ... ... @ldst_rr
+STRB_rr 0101 010 ... ... ... @ldst_rr
+LDRSB_rr 0101 011 ... ... ... @ldst_rr
+LDR_rr 0101 100 ... ... ... @ldst_rr
+LDRH_rr 0101 101 ... ... ... @ldst_rr
+LDRB_rr 0101 110 ... ... ... @ldst_rr
+LDRSH_rr 0101 111 ... ... ... @ldst_rr
+
+# Load/store word/byte (immediate offset)
+
+%imm5_6x4 6:5 !function=times_4
+
+@ldst_ri_1 ..... imm:5 rn:3 rt:3 \
+ &ldst_ri p=1 w=0 u=1
+@ldst_ri_4 ..... ..... rn:3 rt:3 \
+ &ldst_ri p=1 w=0 u=1 imm=%imm5_6x4
+
+STR_ri 01100 ..... ... ... @ldst_ri_4
+LDR_ri 01101 ..... ... ... @ldst_ri_4
+STRB_ri 01110 ..... ... ... @ldst_ri_1
+LDRB_ri 01111 ..... ... ... @ldst_ri_1
+
+# Load/store halfword (immediate offset)
+
+%imm5_6x2 6:5 !function=times_2
+@ldst_ri_2 ..... ..... rn:3 rt:3 \
+ &ldst_ri p=1 w=0 u=1 imm=%imm5_6x2
+
+STRH_ri 10000 ..... ... ... @ldst_ri_2
+LDRH_ri 10001 ..... ... ... @ldst_ri_2
+
+# Load/store (SP-relative)
+
+%imm8_0x4 0:8 !function=times_4
+@ldst_spec_i ..... rt:3 ........ \
+ &ldst_ri p=1 w=0 u=1 imm=%imm8_0x4
+
+STR_ri 10010 ... ........ @ldst_spec_i rn=13
+LDR_ri 10011 ... ........ @ldst_spec_i rn=13
+
+# Load (PC-relative)
+
+LDR_ri 01001 ... ........ @ldst_spec_i rn=15
+
+# Add PC/SP (immediate)
+
+ADR 10100 rd:3 ........ imm=%imm8_0x4
+ADD_rri 10101 rd:3 ........ \
+ &s_rri_rot rn=13 s=0 rot=0 imm=%imm8_0x4 # SP
+
+# Load/store multiple
+
+@ldstm ..... rn:3 list:8 &ldst_block i=1 b=0 u=0 w=1
+
+STM 11000 ... ........ @ldstm
+LDM_t16 11001 ... ........ @ldstm
+
+# Shift (immediate)
+
+@shift_i ..... shim:5 rm:3 rd:3 &s_rrr_shi %s rn=%reg_0
+
+MOV_rxri 000 00 ..... ... ... @shift_i shty=0 # LSL
+MOV_rxri 000 01 ..... ... ... @shift_i shty=1 # LSR
+MOV_rxri 000 10 ..... ... ... @shift_i shty=2 # ASR
+
+# Add/subtract (three low registers)
+
+@addsub_3 ....... rm:3 rn:3 rd:3 \
+ &s_rrr_shi %s shim=0 shty=0
+
+ADD_rrri 0001100 ... ... ... @addsub_3
+SUB_rrri 0001101 ... ... ... @addsub_3
+
+# Add/subtract (two low registers and immediate)
+
+@addsub_2i ....... imm:3 rn:3 rd:3 \
+ &s_rri_rot %s rot=0
+
+ADD_rri 0001 110 ... ... ... @addsub_2i
+SUB_rri 0001 111 ... ... ... @addsub_2i
+
+# Add, subtract, compare, move (one low register and immediate)
+
+%reg_8 8:3
+@arith_1i ..... rd:3 imm:8 \
+ &s_rri_rot rot=0 rn=%reg_8
+
+MOV_rxi 00100 ... ........ @arith_1i %s
+CMP_xri 00101 ... ........ @arith_1i s=1
+ADD_rri 00110 ... ........ @arith_1i %s
+SUB_rri 00111 ... ........ @arith_1i %s
+
+# Add, compare, move (two high registers)
+
+%reg_0_7 7:1 0:3
+@addsub_2h .... .... . rm:4 ... \
+ &s_rrr_shi rd=%reg_0_7 rn=%reg_0_7 shim=0 shty=0
+
+ADD_rrri 0100 0100 . .... ... @addsub_2h s=0
+CMP_xrri 0100 0101 . .... ... @addsub_2h s=1
+MOV_rxri 0100 0110 . .... ... @addsub_2h s=0
+
+# Adjust SP (immediate)
+
+%imm7_0x4 0:7 !function=times_4
+@addsub_sp_i .... .... . ....... \
+ &s_rri_rot s=0 rd=13 rn=13 rot=0 imm=%imm7_0x4
+
+ADD_rri 1011 0000 0 ....... @addsub_sp_i
+SUB_rri 1011 0000 1 ....... @addsub_sp_i
+
+# Branch and exchange
+
+@branchr .... .... . rm:4 ... &r
+
+BX 0100 0111 0 .... 000 @branchr
+BLX_r 0100 0111 1 .... 000 @branchr
+BXNS 0100 0111 0 .... 100 @branchr
+BLXNS 0100 0111 1 .... 100 @branchr
+
+# Extend
+
+@extend .... .... .. rm:3 rd:3 &rrr_rot rn=15 rot=0
+
+SXTAH 1011 0010 00 ... ... @extend
+SXTAB 1011 0010 01 ... ... @extend
+UXTAH 1011 0010 10 ... ... @extend
+UXTAB 1011 0010 11 ... ... @extend
+
+# Change processor state
+
+%imod 4:1 !function=plus_2
+
+SETEND 1011 0110 010 1 E:1 000 &setend
+{
+ CPS 1011 0110 011 . 0 A:1 I:1 F:1 &cps mode=0 M=0 %imod
+ CPS_v7m 1011 0110 011 im:1 00 I:1 F:1
+}
+
+# Reverse bytes
+
+@rdm .... .... .. rm:3 rd:3 &rr
+
+REV 1011 1010 00 ... ... @rdm
+REV16 1011 1010 01 ... ... @rdm
+REVSH 1011 1010 11 ... ... @rdm
+
+# Hints
+
+{
+ {
+ YIELD 1011 1111 0001 0000
+ WFE 1011 1111 0010 0000
+ WFI 1011 1111 0011 0000
+
+ # TODO: Implement SEV, SEVL; may help SMP performance.
+ # SEV 1011 1111 0100 0000
+ # SEVL 1011 1111 0101 0000
+
+ # The canonical nop has the second nibble as 0000, but the whole of the
+ # rest of the space is a reserved hint, behaves as nop.
+ NOP 1011 1111 ---- 0000
+ }
+ IT 1011 1111 cond_mask:8
+}
+
+# Miscellaneous 16-bit instructions
+
+%imm6_9_3 9:1 3:5 !function=times_2
+
+HLT 1011 1010 10 imm:6 &i
+BKPT 1011 1110 imm:8 &i
+CBZ 1011 nz:1 0.1 ..... rn:3 imm=%imm6_9_3
+
+# Push and Pop
+
+%push_list 0:9 !function=t16_push_list
+%pop_list 0:9 !function=t16_pop_list
+
+STM 1011 010 ......... \
+ &ldst_block i=0 b=1 u=0 w=1 rn=13 list=%push_list
+LDM_t16 1011 110 ......... \
+ &ldst_block i=1 b=0 u=0 w=1 rn=13 list=%pop_list
+
+# Conditional branches, Supervisor call
+
+%imm8_0x2 0:s8 !function=times_2
+
+{
+ UDF 1101 1110 ---- ----
+ SVC 1101 1111 imm:8 &i
+ B_cond_thumb 1101 cond:4 ........ &ci imm=%imm8_0x2
+}
+
+# Unconditional Branch
+
+%imm11_0x2 0:s11 !function=times_2
+
+B 11100 ........... &i imm=%imm11_0x2
+
+# thumb_insn_is_16bit() ensures we won't be decoding these as
+# T16 instructions for a Thumb2 CPU, so these patterns must be
+# a Thumb1 split BL/BLX.
+BLX_suffix 11101 imm:11 &i
+BL_BLX_prefix 11110 imm:s11 &i
+BL_suffix 11111 imm:11 &i
diff --git a/target/arm/t32.decode b/target/arm/t32.decode
new file mode 100644
index 000000000..78fadef9d
--- /dev/null
+++ b/target/arm/t32.decode
@@ -0,0 +1,753 @@
+# Thumb2 instructions
+#
+# Copyright (c) 2019 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+
+&empty !extern
+&s_rrr_shi !extern s rd rn rm shim shty
+&s_rrr_shr !extern s rn rd rm rs shty
+&s_rri_rot !extern s rn rd imm rot
+&s_rrrr !extern s rd rn rm ra
+&rrrr !extern rd rn rm ra
+&rrr_rot !extern rd rn rm rot
+&rrr !extern rd rn rm
+&rr !extern rd rm
+&ri !extern rd imm
+&r !extern rm
+&i !extern imm
+&msr_reg !extern rn r mask
+&mrs_reg !extern rd r
+&msr_bank !extern rn r sysm
+&mrs_bank !extern rd r sysm
+&ldst_rr !extern p w u rn rt rm shimm shtype
+&ldst_ri !extern p w u rn rt imm
+&ldst_block !extern rn i b u w list
+&strex !extern rn rd rt rt2 imm
+&ldrex !extern rn rt rt2 imm
+&bfx !extern rd rn lsb widthm1
+&bfi !extern rd rn lsb msb
+&sat !extern rd rn satimm imm sh
+&pkh !extern rd rn rm imm tb
+&cps !extern mode imod M A I F
+&mcr !extern cp opc1 crn crm opc2 rt
+&mcrr !extern cp opc1 crm rt rt2
+
+&mve_shl_ri rdalo rdahi shim
+&mve_shl_rr rdalo rdahi rm
+&mve_sh_ri rda shim
+&mve_sh_rr rda rm
+
+# rdahi: bits [3:1] from insn, bit 0 is 1
+# rdalo: bits [3:1] from insn, bit 0 is 0
+%rdahi_9 9:3 !function=times_2_plus_1
+%rdalo_17 17:3 !function=times_2
+
+# Data-processing (register)
+
+%imm5_12_6 12:3 6:2
+
+@s_rrr_shi ....... .... s:1 rn:4 .... rd:4 .. shty:2 rm:4 \
+ &s_rrr_shi shim=%imm5_12_6
+@s_rxr_shi ....... .... s:1 .... .... rd:4 .. shty:2 rm:4 \
+ &s_rrr_shi shim=%imm5_12_6 rn=0
+@S_xrr_shi ....... .... . rn:4 .... .... .. shty:2 rm:4 \
+ &s_rrr_shi shim=%imm5_12_6 s=1 rd=0
+
+@mve_shl_ri ....... .... . ... . . ... ... . .. .. .... \
+ &mve_shl_ri shim=%imm5_12_6 rdalo=%rdalo_17 rdahi=%rdahi_9
+@mve_shl_rr ....... .... . ... . rm:4 ... . .. .. .... \
+ &mve_shl_rr rdalo=%rdalo_17 rdahi=%rdahi_9
+@mve_sh_ri ....... .... . rda:4 . ... ... . .. .. .... \
+ &mve_sh_ri shim=%imm5_12_6
+@mve_sh_rr ....... .... . rda:4 rm:4 .... .... .... &mve_sh_rr
+
+{
+ TST_xrri 1110101 0000 1 .... 0 ... 1111 .... .... @S_xrr_shi
+ AND_rrri 1110101 0000 . .... 0 ... .... .... .... @s_rrr_shi
+}
+BIC_rrri 1110101 0001 . .... 0 ... .... .... .... @s_rrr_shi
+{
+ # The v8.1M MVE shift insns overlap in encoding with MOVS/ORRS
+ # and are distinguished by having Rm==13 or 15. Those are UNPREDICTABLE
+ # cases for MOVS/ORRS. We decode the MVE cases first, ensuring that
+ # they explicitly call unallocated_encoding() for cases that must UNDEF
+ # (eg "using a new shift insn on a v8.1M CPU without MVE"), and letting
+ # the rest fall through (where ORR_rrri and MOV_rxri will end up
+ # handling them as r13 and r15 accesses with the same semantics as A32).
+ [
+ {
+ UQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 00 1111 @mve_sh_ri
+ LSLL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 00 1111 @mve_shl_ri
+ UQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 00 1111 @mve_shl_ri
+ }
+
+ {
+ URSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 01 1111 @mve_sh_ri
+ LSRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 01 1111 @mve_shl_ri
+ URSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 01 1111 @mve_shl_ri
+ }
+
+ {
+ SRSHR_ri 1110101 0010 1 .... 0 ... 1111 .. 10 1111 @mve_sh_ri
+ ASRL_ri 1110101 0010 1 ... 0 0 ... ... 1 .. 10 1111 @mve_shl_ri
+ SRSHRL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 10 1111 @mve_shl_ri
+ }
+
+ {
+ SQSHL_ri 1110101 0010 1 .... 0 ... 1111 .. 11 1111 @mve_sh_ri
+ SQSHLL_ri 1110101 0010 1 ... 1 0 ... ... 1 .. 11 1111 @mve_shl_ri
+ }
+
+ {
+ UQRSHL_rr 1110101 0010 1 .... .... 1111 0000 1101 @mve_sh_rr
+ LSLL_rr 1110101 0010 1 ... 0 .... ... 1 0000 1101 @mve_shl_rr
+ UQRSHLL64_rr 1110101 0010 1 ... 1 .... ... 1 0000 1101 @mve_shl_rr
+ }
+
+ {
+ SQRSHR_rr 1110101 0010 1 .... .... 1111 0010 1101 @mve_sh_rr
+ ASRL_rr 1110101 0010 1 ... 0 .... ... 1 0010 1101 @mve_shl_rr
+ SQRSHRL64_rr 1110101 0010 1 ... 1 .... ... 1 0010 1101 @mve_shl_rr
+ }
+
+ UQRSHLL48_rr 1110101 0010 1 ... 1 .... ... 1 1000 1101 @mve_shl_rr
+ SQRSHRL48_rr 1110101 0010 1 ... 1 .... ... 1 1010 1101 @mve_shl_rr
+ ]
+
+ MOV_rxri 1110101 0010 . 1111 0 ... .... .... .... @s_rxr_shi
+ ORR_rrri 1110101 0010 . .... 0 ... .... .... .... @s_rrr_shi
+
+ # v8.1M CSEL and friends
+ CSEL 1110101 0010 1 rn:4 10 op:2 rd:4 fcond:4 rm:4
+}
+{
+ MVN_rxri 1110101 0011 . 1111 0 ... .... .... .... @s_rxr_shi
+ ORN_rrri 1110101 0011 . .... 0 ... .... .... .... @s_rrr_shi
+}
+{
+ TEQ_xrri 1110101 0100 1 .... 0 ... 1111 .... .... @S_xrr_shi
+ EOR_rrri 1110101 0100 . .... 0 ... .... .... .... @s_rrr_shi
+}
+PKH 1110101 0110 0 rn:4 0 ... rd:4 .. tb:1 0 rm:4 \
+ &pkh imm=%imm5_12_6
+{
+ CMN_xrri 1110101 1000 1 .... 0 ... 1111 .... .... @S_xrr_shi
+ ADD_rrri 1110101 1000 . .... 0 ... .... .... .... @s_rrr_shi
+}
+ADC_rrri 1110101 1010 . .... 0 ... .... .... .... @s_rrr_shi
+SBC_rrri 1110101 1011 . .... 0 ... .... .... .... @s_rrr_shi
+{
+ CMP_xrri 1110101 1101 1 .... 0 ... 1111 .... .... @S_xrr_shi
+ SUB_rrri 1110101 1101 . .... 0 ... .... .... .... @s_rrr_shi
+}
+RSB_rrri 1110101 1110 . .... 0 ... .... .... .... @s_rrr_shi
+
+# Data-processing (register-shifted register)
+
+MOV_rxrr 1111 1010 0 shty:2 s:1 rm:4 1111 rd:4 0000 rs:4 \
+ &s_rrr_shr rn=0
+
+# Data-processing (immediate)
+
+%t32extrot 26:1 12:3 0:8 !function=t32_expandimm_rot
+%t32extimm 26:1 12:3 0:8 !function=t32_expandimm_imm
+
+@s_rri_rot ....... .... s:1 rn:4 . ... rd:4 ........ \
+ &s_rri_rot imm=%t32extimm rot=%t32extrot
+@s_rxi_rot ....... .... s:1 .... . ... rd:4 ........ \
+ &s_rri_rot imm=%t32extimm rot=%t32extrot rn=0
+@S_xri_rot ....... .... . rn:4 . ... .... ........ \
+ &s_rri_rot imm=%t32extimm rot=%t32extrot s=1 rd=0
+
+{
+ TST_xri 1111 0.0 0000 1 .... 0 ... 1111 ........ @S_xri_rot
+ AND_rri 1111 0.0 0000 . .... 0 ... .... ........ @s_rri_rot
+}
+BIC_rri 1111 0.0 0001 . .... 0 ... .... ........ @s_rri_rot
+{
+ MOV_rxi 1111 0.0 0010 . 1111 0 ... .... ........ @s_rxi_rot
+ ORR_rri 1111 0.0 0010 . .... 0 ... .... ........ @s_rri_rot
+}
+{
+ MVN_rxi 1111 0.0 0011 . 1111 0 ... .... ........ @s_rxi_rot
+ ORN_rri 1111 0.0 0011 . .... 0 ... .... ........ @s_rri_rot
+}
+{
+ TEQ_xri 1111 0.0 0100 1 .... 0 ... 1111 ........ @S_xri_rot
+ EOR_rri 1111 0.0 0100 . .... 0 ... .... ........ @s_rri_rot
+}
+{
+ CMN_xri 1111 0.0 1000 1 .... 0 ... 1111 ........ @S_xri_rot
+ ADD_rri 1111 0.0 1000 . .... 0 ... .... ........ @s_rri_rot
+}
+ADC_rri 1111 0.0 1010 . .... 0 ... .... ........ @s_rri_rot
+SBC_rri 1111 0.0 1011 . .... 0 ... .... ........ @s_rri_rot
+{
+ CMP_xri 1111 0.0 1101 1 .... 0 ... 1111 ........ @S_xri_rot
+ SUB_rri 1111 0.0 1101 . .... 0 ... .... ........ @s_rri_rot
+}
+RSB_rri 1111 0.0 1110 . .... 0 ... .... ........ @s_rri_rot
+
+# Data processing (plain binary immediate)
+
+%imm12_26_12_0 26:1 12:3 0:8
+%neg12_26_12_0 26:1 12:3 0:8 !function=negate
+@s0_rri_12 .... ... .... . rn:4 . ... rd:4 ........ \
+ &s_rri_rot imm=%imm12_26_12_0 rot=0 s=0
+
+{
+ ADR 1111 0.1 0000 0 1111 0 ... rd:4 ........ \
+ &ri imm=%imm12_26_12_0
+ ADD_rri 1111 0.1 0000 0 .... 0 ... .... ........ @s0_rri_12
+}
+{
+ ADR 1111 0.1 0101 0 1111 0 ... rd:4 ........ \
+ &ri imm=%neg12_26_12_0
+ SUB_rri 1111 0.1 0101 0 .... 0 ... .... ........ @s0_rri_12
+}
+
+# Move Wide
+
+%imm16_26_16_12_0 16:4 26:1 12:3 0:8
+@mov16 .... .... .... .... .... rd:4 .... .... \
+ &ri imm=%imm16_26_16_12_0
+
+MOVW 1111 0.10 0100 .... 0 ... .... ........ @mov16
+MOVT 1111 0.10 1100 .... 0 ... .... ........ @mov16
+
+# Saturate, bitfield
+
+@sat .... .... .. sh:1 . rn:4 . ... rd:4 .. . satimm:5 \
+ &sat imm=%imm5_12_6
+@sat16 .... .... .. . . rn:4 . ... rd:4 .. . satimm:5 \
+ &sat sh=0 imm=0
+
+{
+ SSAT16 1111 0011 001 0 .... 0 000 .... 00 0 ..... @sat16
+ SSAT 1111 0011 00. 0 .... 0 ... .... .. 0 ..... @sat
+}
+{
+ USAT16 1111 0011 101 0 .... 0 000 .... 00 0 ..... @sat16
+ USAT 1111 0011 10. 0 .... 0 ... .... .. 0 ..... @sat
+}
+
+@bfx .... .... ... . rn:4 . ... rd:4 .. . widthm1:5 \
+ &bfx lsb=%imm5_12_6
+@bfi .... .... ... . rn:4 . ... rd:4 .. . msb:5 \
+ &bfi lsb=%imm5_12_6
+
+SBFX 1111 0011 010 0 .... 0 ... .... ..0..... @bfx
+UBFX 1111 0011 110 0 .... 0 ... .... ..0..... @bfx
+
+# bfc is bfi w/ rn=15
+BFCI 1111 0011 011 0 .... 0 ... .... ..0..... @bfi
+
+# Multiply and multiply accumulate
+
+@s0_rnadm .... .... .... rn:4 ra:4 rd:4 .... rm:4 &s_rrrr s=0
+@s0_rn0dm .... .... .... rn:4 .... rd:4 .... rm:4 &s_rrrr ra=0 s=0
+@rnadm .... .... .... rn:4 ra:4 rd:4 .... rm:4 &rrrr
+@rn0dm .... .... .... rn:4 .... rd:4 .... rm:4 &rrrr ra=0
+@rndm .... .... .... rn:4 .... rd:4 .... rm:4 &rrr
+@rdm .... .... .... .... .... rd:4 .... rm:4 &rr
+
+{
+ MUL 1111 1011 0000 .... 1111 .... 0000 .... @s0_rn0dm
+ MLA 1111 1011 0000 .... .... .... 0000 .... @s0_rnadm
+}
+MLS 1111 1011 0000 .... .... .... 0001 .... @rnadm
+SMULL 1111 1011 1000 .... .... .... 0000 .... @s0_rnadm
+UMULL 1111 1011 1010 .... .... .... 0000 .... @s0_rnadm
+SMLAL 1111 1011 1100 .... .... .... 0000 .... @s0_rnadm
+UMLAL 1111 1011 1110 .... .... .... 0000 .... @s0_rnadm
+UMAAL 1111 1011 1110 .... .... .... 0110 .... @rnadm
+{
+ SMULWB 1111 1011 0011 .... 1111 .... 0000 .... @rn0dm
+ SMLAWB 1111 1011 0011 .... .... .... 0000 .... @rnadm
+}
+{
+ SMULWT 1111 1011 0011 .... 1111 .... 0001 .... @rn0dm
+ SMLAWT 1111 1011 0011 .... .... .... 0001 .... @rnadm
+}
+{
+ SMULBB 1111 1011 0001 .... 1111 .... 0000 .... @rn0dm
+ SMLABB 1111 1011 0001 .... .... .... 0000 .... @rnadm
+}
+{
+ SMULBT 1111 1011 0001 .... 1111 .... 0001 .... @rn0dm
+ SMLABT 1111 1011 0001 .... .... .... 0001 .... @rnadm
+}
+{
+ SMULTB 1111 1011 0001 .... 1111 .... 0010 .... @rn0dm
+ SMLATB 1111 1011 0001 .... .... .... 0010 .... @rnadm
+}
+{
+ SMULTT 1111 1011 0001 .... 1111 .... 0011 .... @rn0dm
+ SMLATT 1111 1011 0001 .... .... .... 0011 .... @rnadm
+}
+SMLALBB 1111 1011 1100 .... .... .... 1000 .... @rnadm
+SMLALBT 1111 1011 1100 .... .... .... 1001 .... @rnadm
+SMLALTB 1111 1011 1100 .... .... .... 1010 .... @rnadm
+SMLALTT 1111 1011 1100 .... .... .... 1011 .... @rnadm
+
+# usad8 is usada8 w/ ra=15
+USADA8 1111 1011 0111 .... .... .... 0000 .... @rnadm
+
+SMLAD 1111 1011 0010 .... .... .... 0000 .... @rnadm
+SMLADX 1111 1011 0010 .... .... .... 0001 .... @rnadm
+SMLSD 1111 1011 0100 .... .... .... 0000 .... @rnadm
+SMLSDX 1111 1011 0100 .... .... .... 0001 .... @rnadm
+
+SMLALD 1111 1011 1100 .... .... .... 1100 .... @rnadm
+SMLALDX 1111 1011 1100 .... .... .... 1101 .... @rnadm
+SMLSLD 1111 1011 1101 .... .... .... 1100 .... @rnadm
+SMLSLDX 1111 1011 1101 .... .... .... 1101 .... @rnadm
+
+SMMLA 1111 1011 0101 .... .... .... 0000 .... @rnadm
+SMMLAR 1111 1011 0101 .... .... .... 0001 .... @rnadm
+SMMLS 1111 1011 0110 .... .... .... 0000 .... @rnadm
+SMMLSR 1111 1011 0110 .... .... .... 0001 .... @rnadm
+
+SDIV 1111 1011 1001 .... 1111 .... 1111 .... @rndm
+UDIV 1111 1011 1011 .... 1111 .... 1111 .... @rndm
+
+# Data-processing (two source registers)
+
+QADD 1111 1010 1000 .... 1111 .... 1000 .... @rndm
+QSUB 1111 1010 1000 .... 1111 .... 1010 .... @rndm
+QDADD 1111 1010 1000 .... 1111 .... 1001 .... @rndm
+QDSUB 1111 1010 1000 .... 1111 .... 1011 .... @rndm
+
+CRC32B 1111 1010 1100 .... 1111 .... 1000 .... @rndm
+CRC32H 1111 1010 1100 .... 1111 .... 1001 .... @rndm
+CRC32W 1111 1010 1100 .... 1111 .... 1010 .... @rndm
+CRC32CB 1111 1010 1101 .... 1111 .... 1000 .... @rndm
+CRC32CH 1111 1010 1101 .... 1111 .... 1001 .... @rndm
+CRC32CW 1111 1010 1101 .... 1111 .... 1010 .... @rndm
+
+SEL 1111 1010 1010 .... 1111 .... 1000 .... @rndm
+
+# Note rn != rm is CONSTRAINED UNPREDICTABLE; we choose to ignore rn.
+REV 1111 1010 1001 ---- 1111 .... 1000 .... @rdm
+REV16 1111 1010 1001 ---- 1111 .... 1001 .... @rdm
+RBIT 1111 1010 1001 ---- 1111 .... 1010 .... @rdm
+REVSH 1111 1010 1001 ---- 1111 .... 1011 .... @rdm
+CLZ 1111 1010 1011 ---- 1111 .... 1000 .... @rdm
+
+# Branches and miscellaneous control
+
+%msr_sysm 4:1 8:4
+%mrs_sysm 4:1 16:4
+%imm16_16_0 16:4 0:12
+%imm21 26:s1 11:1 13:1 16:6 0:11 !function=times_2
+&ci cond imm
+
+{
+ # Group insn[25:23] = 111, which is cond=111x for the branch below,
+ # or unconditional, which would be illegal for the branch.
+ [
+ # Hints, and CPS
+ {
+ YIELD 1111 0011 1010 1111 1000 0000 0000 0001
+ WFE 1111 0011 1010 1111 1000 0000 0000 0010
+ WFI 1111 0011 1010 1111 1000 0000 0000 0011
+
+ # TODO: Implement SEV, SEVL; may help SMP performance.
+ # SEV 1111 0011 1010 1111 1000 0000 0000 0100
+ # SEVL 1111 0011 1010 1111 1000 0000 0000 0101
+
+ # For M-profile minimal-RAS ESB can be a NOP, which is the
+ # default behaviour since it is in the hint space.
+ # ESB 1111 0011 1010 1111 1000 0000 0001 0000
+
+ # The canonical nop ends in 0000 0000, but the whole rest
+ # of the space is "reserved hint, behaves as nop".
+ NOP 1111 0011 1010 1111 1000 0000 ---- ----
+
+ # If imod == '00' && M == '0' then SEE "Hint instructions", above.
+ CPS 1111 0011 1010 1111 1000 0 imod:2 M:1 A:1 I:1 F:1 mode:5 \
+ &cps
+ }
+
+ # Miscellaneous control
+ CLREX 1111 0011 1011 1111 1000 1111 0010 1111
+ DSB 1111 0011 1011 1111 1000 1111 0100 ----
+ DMB 1111 0011 1011 1111 1000 1111 0101 ----
+ ISB 1111 0011 1011 1111 1000 1111 0110 ----
+ SB 1111 0011 1011 1111 1000 1111 0111 0000
+
+ # Note that the v7m insn overlaps both the normal and banked insn.
+ {
+ MRS_bank 1111 0011 111 r:1 .... 1000 rd:4 001. 0000 \
+ &mrs_bank sysm=%mrs_sysm
+ MRS_reg 1111 0011 111 r:1 1111 1000 rd:4 0000 0000 &mrs_reg
+ MRS_v7m 1111 0011 111 0 1111 1000 rd:4 sysm:8
+ }
+ {
+ MSR_bank 1111 0011 100 r:1 rn:4 1000 .... 001. 0000 \
+ &msr_bank sysm=%msr_sysm
+ MSR_reg 1111 0011 100 r:1 rn:4 1000 mask:4 0000 0000 &msr_reg
+ MSR_v7m 1111 0011 100 0 rn:4 1000 mask:2 00 sysm:8
+ }
+ BXJ 1111 0011 1100 rm:4 1000 1111 0000 0000 &r
+ {
+ # At v6T2, this is the T5 encoding of SUBS PC, LR, #IMM, and works as for
+ # every other encoding of SUBS. With v7VE, IMM=0 is redefined as ERET.
+ # The distinction between the two only matters for Hyp mode.
+ ERET 1111 0011 1101 1110 1000 1111 0000 0000
+ SUB_rri 1111 0011 1101 1110 1000 1111 imm:8 \
+ &s_rri_rot rot=0 s=1 rd=15 rn=14
+ }
+ SMC 1111 0111 1111 imm:4 1000 0000 0000 0000 &i
+ HVC 1111 0111 1110 .... 1000 .... .... .... \
+ &i imm=%imm16_16_0
+ UDF 1111 0111 1111 ---- 1010 ---- ---- ----
+ ]
+ B_cond_thumb 1111 0. cond:4 ...... 10.0 ............ &ci imm=%imm21
+}
+
+# Load/store (register, immediate, literal)
+
+@ldst_rr .... .... .... rn:4 rt:4 ...... shimm:2 rm:4 \
+ &ldst_rr p=1 w=0 u=1 shtype=0
+@ldst_ri_idx .... .... .... rn:4 rt:4 . p:1 u:1 . imm:8 \
+ &ldst_ri w=1
+@ldst_ri_neg .... .... .... rn:4 rt:4 .... imm:8 \
+ &ldst_ri p=1 w=0 u=0
+@ldst_ri_unp .... .... .... rn:4 rt:4 .... imm:8 \
+ &ldst_ri p=1 w=0 u=1
+@ldst_ri_pos .... .... .... rn:4 rt:4 imm:12 \
+ &ldst_ri p=1 w=0 u=1
+@ldst_ri_lit .... .... u:1 ... .... rt:4 imm:12 \
+ &ldst_ri p=1 w=0 rn=15
+
+STRB_rr 1111 1000 0000 .... .... 000000 .. .... @ldst_rr
+STRB_ri 1111 1000 0000 .... .... 1..1 ........ @ldst_ri_idx
+STRB_ri 1111 1000 0000 .... .... 1100 ........ @ldst_ri_neg
+STRBT_ri 1111 1000 0000 .... .... 1110 ........ @ldst_ri_unp
+STRB_ri 1111 1000 1000 .... .... ............ @ldst_ri_pos
+
+STRH_rr 1111 1000 0010 .... .... 000000 .. .... @ldst_rr
+STRH_ri 1111 1000 0010 .... .... 1..1 ........ @ldst_ri_idx
+STRH_ri 1111 1000 0010 .... .... 1100 ........ @ldst_ri_neg
+STRHT_ri 1111 1000 0010 .... .... 1110 ........ @ldst_ri_unp
+STRH_ri 1111 1000 1010 .... .... ............ @ldst_ri_pos
+
+STR_rr 1111 1000 0100 .... .... 000000 .. .... @ldst_rr
+STR_ri 1111 1000 0100 .... .... 1..1 ........ @ldst_ri_idx
+STR_ri 1111 1000 0100 .... .... 1100 ........ @ldst_ri_neg
+STRT_ri 1111 1000 0100 .... .... 1110 ........ @ldst_ri_unp
+STR_ri 1111 1000 1100 .... .... ............ @ldst_ri_pos
+
+# Note that Load, unsigned (literal) overlaps all other load encodings.
+{
+ {
+ NOP 1111 1000 -001 1111 1111 ------------ # PLD
+ LDRB_ri 1111 1000 .001 1111 .... ............ @ldst_ri_lit
+ }
+ {
+ NOP 1111 1000 1001 ---- 1111 ------------ # PLD
+ LDRB_ri 1111 1000 1001 .... .... ............ @ldst_ri_pos
+ }
+ LDRB_ri 1111 1000 0001 .... .... 1..1 ........ @ldst_ri_idx
+ {
+ NOP 1111 1000 0001 ---- 1111 1100 -------- # PLD
+ LDRB_ri 1111 1000 0001 .... .... 1100 ........ @ldst_ri_neg
+ }
+ LDRBT_ri 1111 1000 0001 .... .... 1110 ........ @ldst_ri_unp
+ {
+ NOP 1111 1000 0001 ---- 1111 000000 -- ---- # PLD
+ LDRB_rr 1111 1000 0001 .... .... 000000 .. .... @ldst_rr
+ }
+}
+{
+ {
+ NOP 1111 1000 -011 1111 1111 ------------ # PLD
+ LDRH_ri 1111 1000 .011 1111 .... ............ @ldst_ri_lit
+ }
+ {
+ NOP 1111 1000 1011 ---- 1111 ------------ # PLDW
+ LDRH_ri 1111 1000 1011 .... .... ............ @ldst_ri_pos
+ }
+ LDRH_ri 1111 1000 0011 .... .... 1..1 ........ @ldst_ri_idx
+ {
+ NOP 1111 1000 0011 ---- 1111 1100 -------- # PLDW
+ LDRH_ri 1111 1000 0011 .... .... 1100 ........ @ldst_ri_neg
+ }
+ LDRHT_ri 1111 1000 0011 .... .... 1110 ........ @ldst_ri_unp
+ {
+ NOP 1111 1000 0011 ---- 1111 000000 -- ---- # PLDW
+ LDRH_rr 1111 1000 0011 .... .... 000000 .. .... @ldst_rr
+ }
+}
+{
+ LDR_ri 1111 1000 .101 1111 .... ............ @ldst_ri_lit
+ LDR_ri 1111 1000 1101 .... .... ............ @ldst_ri_pos
+ LDR_ri 1111 1000 0101 .... .... 1..1 ........ @ldst_ri_idx
+ LDR_ri 1111 1000 0101 .... .... 1100 ........ @ldst_ri_neg
+ LDRT_ri 1111 1000 0101 .... .... 1110 ........ @ldst_ri_unp
+ LDR_rr 1111 1000 0101 .... .... 000000 .. .... @ldst_rr
+}
+# NOPs here are PLI.
+{
+ {
+ NOP 1111 1001 -001 1111 1111 ------------
+ LDRSB_ri 1111 1001 .001 1111 .... ............ @ldst_ri_lit
+ }
+ {
+ NOP 1111 1001 1001 ---- 1111 ------------
+ LDRSB_ri 1111 1001 1001 .... .... ............ @ldst_ri_pos
+ }
+ LDRSB_ri 1111 1001 0001 .... .... 1..1 ........ @ldst_ri_idx
+ {
+ NOP 1111 1001 0001 ---- 1111 1100 --------
+ LDRSB_ri 1111 1001 0001 .... .... 1100 ........ @ldst_ri_neg
+ }
+ LDRSBT_ri 1111 1001 0001 .... .... 1110 ........ @ldst_ri_unp
+ {
+ NOP 1111 1001 0001 ---- 1111 000000 -- ----
+ LDRSB_rr 1111 1001 0001 .... .... 000000 .. .... @ldst_rr
+ }
+}
+# NOPs here are unallocated memory hints, treated as NOP.
+{
+ {
+ NOP 1111 1001 -011 1111 1111 ------------
+ LDRSH_ri 1111 1001 .011 1111 .... ............ @ldst_ri_lit
+ }
+ {
+ NOP 1111 1001 1011 ---- 1111 ------------
+ LDRSH_ri 1111 1001 1011 .... .... ............ @ldst_ri_pos
+ }
+ LDRSH_ri 1111 1001 0011 .... .... 1..1 ........ @ldst_ri_idx
+ {
+ NOP 1111 1001 0011 ---- 1111 1100 --------
+ LDRSH_ri 1111 1001 0011 .... .... 1100 ........ @ldst_ri_neg
+ }
+ LDRSHT_ri 1111 1001 0011 .... .... 1110 ........ @ldst_ri_unp
+ {
+ NOP 1111 1001 0011 ---- 1111 000000 -- ----
+ LDRSH_rr 1111 1001 0011 .... .... 000000 .. .... @ldst_rr
+ }
+}
+
+%imm8x4 0:8 !function=times_4
+&ldst_ri2 p w u rn rt rt2 imm
+@ldstd_ri8 .... .... u:1 ... rn:4 rt:4 rt2:4 ........ \
+ &ldst_ri2 imm=%imm8x4
+
+STRD_ri_t32 1110 1000 .110 .... .... .... ........ @ldstd_ri8 w=1 p=0
+LDRD_ri_t32 1110 1000 .111 .... .... .... ........ @ldstd_ri8 w=1 p=0
+
+STRD_ri_t32 1110 1001 .100 .... .... .... ........ @ldstd_ri8 w=0 p=1
+LDRD_ri_t32 1110 1001 .101 .... .... .... ........ @ldstd_ri8 w=0 p=1
+
+STRD_ri_t32 1110 1001 .110 .... .... .... ........ @ldstd_ri8 w=1 p=1
+{
+ SG 1110 1001 0111 1111 1110 1001 01111111
+ LDRD_ri_t32 1110 1001 .111 .... .... .... ........ @ldstd_ri8 w=1 p=1
+}
+
+# Load/Store Exclusive, Load-Acquire/Store-Release, and Table Branch
+
+@strex_i .... .... .... rn:4 rt:4 rd:4 .... .... \
+ &strex rt2=15 imm=%imm8x4
+@strex_0 .... .... .... rn:4 rt:4 .... .... rd:4 \
+ &strex rt2=15 imm=0
+@strex_d .... .... .... rn:4 rt:4 rt2:4 .... rd:4 \
+ &strex imm=0
+
+@ldrex_i .... .... .... rn:4 rt:4 .... .... .... \
+ &ldrex rt2=15 imm=%imm8x4
+@ldrex_0 .... .... .... rn:4 rt:4 .... .... .... \
+ &ldrex rt2=15 imm=0
+@ldrex_d .... .... .... rn:4 rt:4 rt2:4 .... .... \
+ &ldrex imm=0
+
+{
+ TT 1110 1000 0100 rn:4 1111 rd:4 A:1 T:1 000000
+ STREX 1110 1000 0100 .... .... .... .... .... @strex_i
+}
+STREXB 1110 1000 1100 .... .... 1111 0100 .... @strex_0
+STREXH 1110 1000 1100 .... .... 1111 0101 .... @strex_0
+STREXD_t32 1110 1000 1100 .... .... .... 0111 .... @strex_d
+
+STLEX 1110 1000 1100 .... .... 1111 1110 .... @strex_0
+STLEXB 1110 1000 1100 .... .... 1111 1100 .... @strex_0
+STLEXH 1110 1000 1100 .... .... 1111 1101 .... @strex_0
+STLEXD_t32 1110 1000 1100 .... .... .... 1111 .... @strex_d
+
+STL 1110 1000 1100 .... .... 1111 1010 1111 @ldrex_0
+STLB 1110 1000 1100 .... .... 1111 1000 1111 @ldrex_0
+STLH 1110 1000 1100 .... .... 1111 1001 1111 @ldrex_0
+
+LDREX 1110 1000 0101 .... .... 1111 .... .... @ldrex_i
+LDREXB 1110 1000 1101 .... .... 1111 0100 1111 @ldrex_0
+LDREXH 1110 1000 1101 .... .... 1111 0101 1111 @ldrex_0
+LDREXD_t32 1110 1000 1101 .... .... .... 0111 1111 @ldrex_d
+
+LDAEX 1110 1000 1101 .... .... 1111 1110 1111 @ldrex_0
+LDAEXB 1110 1000 1101 .... .... 1111 1100 1111 @ldrex_0
+LDAEXH 1110 1000 1101 .... .... 1111 1101 1111 @ldrex_0
+LDAEXD_t32 1110 1000 1101 .... .... .... 1111 1111 @ldrex_d
+
+LDA 1110 1000 1101 .... .... 1111 1010 1111 @ldrex_0
+LDAB 1110 1000 1101 .... .... 1111 1000 1111 @ldrex_0
+LDAH 1110 1000 1101 .... .... 1111 1001 1111 @ldrex_0
+
+&tbranch rn rm
+@tbranch .... .... .... rn:4 .... .... .... rm:4 &tbranch
+
+TBB 1110 1000 1101 .... 1111 0000 0000 .... @tbranch
+TBH 1110 1000 1101 .... 1111 0000 0001 .... @tbranch
+
+# Parallel addition and subtraction
+
+SADD8 1111 1010 1000 .... 1111 .... 0000 .... @rndm
+QADD8 1111 1010 1000 .... 1111 .... 0001 .... @rndm
+SHADD8 1111 1010 1000 .... 1111 .... 0010 .... @rndm
+UADD8 1111 1010 1000 .... 1111 .... 0100 .... @rndm
+UQADD8 1111 1010 1000 .... 1111 .... 0101 .... @rndm
+UHADD8 1111 1010 1000 .... 1111 .... 0110 .... @rndm
+
+SADD16 1111 1010 1001 .... 1111 .... 0000 .... @rndm
+QADD16 1111 1010 1001 .... 1111 .... 0001 .... @rndm
+SHADD16 1111 1010 1001 .... 1111 .... 0010 .... @rndm
+UADD16 1111 1010 1001 .... 1111 .... 0100 .... @rndm
+UQADD16 1111 1010 1001 .... 1111 .... 0101 .... @rndm
+UHADD16 1111 1010 1001 .... 1111 .... 0110 .... @rndm
+
+SASX 1111 1010 1010 .... 1111 .... 0000 .... @rndm
+QASX 1111 1010 1010 .... 1111 .... 0001 .... @rndm
+SHASX 1111 1010 1010 .... 1111 .... 0010 .... @rndm
+UASX 1111 1010 1010 .... 1111 .... 0100 .... @rndm
+UQASX 1111 1010 1010 .... 1111 .... 0101 .... @rndm
+UHASX 1111 1010 1010 .... 1111 .... 0110 .... @rndm
+
+SSUB8 1111 1010 1100 .... 1111 .... 0000 .... @rndm
+QSUB8 1111 1010 1100 .... 1111 .... 0001 .... @rndm
+SHSUB8 1111 1010 1100 .... 1111 .... 0010 .... @rndm
+USUB8 1111 1010 1100 .... 1111 .... 0100 .... @rndm
+UQSUB8 1111 1010 1100 .... 1111 .... 0101 .... @rndm
+UHSUB8 1111 1010 1100 .... 1111 .... 0110 .... @rndm
+
+SSUB16 1111 1010 1101 .... 1111 .... 0000 .... @rndm
+QSUB16 1111 1010 1101 .... 1111 .... 0001 .... @rndm
+SHSUB16 1111 1010 1101 .... 1111 .... 0010 .... @rndm
+USUB16 1111 1010 1101 .... 1111 .... 0100 .... @rndm
+UQSUB16 1111 1010 1101 .... 1111 .... 0101 .... @rndm
+UHSUB16 1111 1010 1101 .... 1111 .... 0110 .... @rndm
+
+SSAX 1111 1010 1110 .... 1111 .... 0000 .... @rndm
+QSAX 1111 1010 1110 .... 1111 .... 0001 .... @rndm
+SHSAX 1111 1010 1110 .... 1111 .... 0010 .... @rndm
+USAX 1111 1010 1110 .... 1111 .... 0100 .... @rndm
+UQSAX 1111 1010 1110 .... 1111 .... 0101 .... @rndm
+UHSAX 1111 1010 1110 .... 1111 .... 0110 .... @rndm
+
+# Register extends
+
+@rrr_rot .... .... .... rn:4 .... rd:4 .. rot:2 rm:4 &rrr_rot
+
+SXTAH 1111 1010 0000 .... 1111 .... 10.. .... @rrr_rot
+UXTAH 1111 1010 0001 .... 1111 .... 10.. .... @rrr_rot
+SXTAB16 1111 1010 0010 .... 1111 .... 10.. .... @rrr_rot
+UXTAB16 1111 1010 0011 .... 1111 .... 10.. .... @rrr_rot
+SXTAB 1111 1010 0100 .... 1111 .... 10.. .... @rrr_rot
+UXTAB 1111 1010 0101 .... 1111 .... 10.. .... @rrr_rot
+
+# Load/store multiple
+
+@ldstm .... .... .. w:1 . rn:4 list:16 &ldst_block u=0
+
+STM_t32 1110 1000 10.0 .... ................ @ldstm i=1 b=0
+STM_t32 1110 1001 00.0 .... ................ @ldstm i=0 b=1
+{
+ # Rn=15 UNDEFs for LDM; M-profile CLRM uses that encoding
+ CLRM 1110 1000 1001 1111 list:16
+ LDM_t32 1110 1000 10.1 .... ................ @ldstm i=1 b=0
+}
+LDM_t32 1110 1001 00.1 .... ................ @ldstm i=0 b=1
+
+&rfe !extern rn w pu
+@rfe .... .... .. w:1 . rn:4 ................ &rfe
+
+RFE 1110 1000 00.1 .... 1100000000000000 @rfe pu=2
+RFE 1110 1001 10.1 .... 1100000000000000 @rfe pu=1
+
+&srs !extern mode w pu
+@srs .... .... .. w:1 . .... ........... mode:5 &srs
+
+SRS 1110 1000 00.0 1101 1100 0000 000. .... @srs pu=2
+SRS 1110 1001 10.0 1101 1100 0000 000. .... @srs pu=1
+
+# Coprocessor instructions
+
+# We decode MCR, MCR, MRRC and MCRR only, because for QEMU the
+# other coprocessor instructions always UNDEF.
+# The trans_ functions for these will ignore cp values 8..13 for v7 or
+# earlier, and 0..13 for v8 and later, because those areas of the
+# encoding space may be used for other things, such as VFP or Neon.
+
+@mcr .... .... opc1:3 . crn:4 rt:4 cp:4 opc2:3 . crm:4
+@mcrr .... .... .... rt2:4 rt:4 cp:4 opc1:4 crm:4
+
+MCRR 1110 1100 0100 .... .... .... .... .... @mcrr
+MRRC 1110 1100 0101 .... .... .... .... .... @mcrr
+
+MCR 1110 1110 ... 0 .... .... .... ... 1 .... @mcr
+MRC 1110 1110 ... 1 .... .... .... ... 1 .... @mcr
+
+# Branches
+
+%imm24 26:s1 13:1 11:1 16:10 0:11 !function=t32_branch24
+@branch24 ................................ &i imm=%imm24
+
+B 1111 0. .......... 10.1 ............ @branch24
+BL 1111 0. .......... 11.1 ............ @branch24
+{
+ # BLX_i is non-M-profile only
+ BLX_i 1111 0. .......... 11.0 ............ @branch24
+ # M-profile only: loop and branch insns
+ [
+ # All these BF insns have boff != 0b0000; we NOP them all
+ BF 1111 0 boff:4 ------- 1100 - ---------- 1 # BFL
+ BF 1111 0 boff:4 0 ------ 1110 - ---------- 1 # BFCSEL
+ BF 1111 0 boff:4 10 ----- 1110 - ---------- 1 # BF
+ BF 1111 0 boff:4 11 ----- 1110 0 0000000000 1 # BFX, BFLX
+ ]
+ [
+ # LE and WLS immediate
+ %lob_imm 1:10 11:1 !function=times_2
+
+ DLS 1111 0 0000 100 rn:4 1110 0000 0000 0001 size=4
+ WLS 1111 0 0000 100 rn:4 1100 . .......... 1 imm=%lob_imm size=4
+ {
+ LE 1111 0 0000 0 f:1 tp:1 1111 1100 . .......... 1 imm=%lob_imm
+ # This is WLSTP
+ WLS 1111 0 0000 0 size:2 rn:4 1100 . .......... 1 imm=%lob_imm
+ }
+ {
+ LCTP 1111 0 0000 000 1111 1110 0000 0000 0001
+ # This is DLSTP
+ DLS 1111 0 0000 0 size:2 rn:4 1110 0000 0000 0001
+ }
+ VCTP 1111 0 0000 0 size:2 rn:4 1110 1000 0000 0001
+ ]
+}
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
new file mode 100644
index 000000000..12a934e92
--- /dev/null
+++ b/target/arm/tlb_helper.c
@@ -0,0 +1,222 @@
+/*
+ * ARM TLB (Translation lookaside buffer) helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+
+static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
+ unsigned int target_el,
+ bool same_el, bool ea,
+ bool s1ptw, bool is_write,
+ int fsc)
+{
+ uint32_t syn;
+
+ /*
+ * ISV is only set for data aborts routed to EL2 and
+ * never for stage-1 page table walks faulting on stage 2.
+ *
+ * Furthermore, ISV is only set for certain kinds of load/stores.
+ * If the template syndrome does not have ISV set, we should leave
+ * it cleared.
+ *
+ * See ARMv8 specs, D7-1974:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
+ syn = syn_data_abort_no_iss(same_el, 0,
+ ea, 0, s1ptw, is_write, fsc);
+ } else {
+ /*
+ * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
+ * syndrome created at translation time.
+ * Now we create the runtime syndrome with the remaining fields.
+ */
+ syn = syn_data_abort_with_iss(same_el,
+ 0, 0, 0, 0, 0,
+ ea, 0, s1ptw, is_write, fsc,
+ true);
+ /* Merge the runtime syndrome with the template syndrome. */
+ syn |= template_syn;
+ }
+ return syn;
+}
+
+static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
+{
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+ uint32_t syn, exc, fsr, fsc;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
+
+ target_el = exception_target_el(env);
+ if (fi->stage2) {
+ target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
+ if (arm_is_secure_below_el3(env) && fi->s1ns) {
+ env->cp15.hpfar_el2 |= HPFAR_NS;
+ }
+ }
+ same_el = (arm_current_el(env) == target_el);
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
+ /*
+ * LPAE format fault status register : bottom 6 bits are
+ * status code in the same form as needed for syndrome
+ */
+ fsr = arm_fi_to_lfsc(fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(fi);
+ /*
+ * Short format FSR : this fault will never actually be reported
+ * to an EL that uses a syndrome register. Use a (currently)
+ * reserved FSR code in case the constructed syndrome does leak
+ * into the guest somehow.
+ */
+ fsc = 0x3f;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
+ exc = EXCP_PREFETCH_ABORT;
+ } else {
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, fi->ea, fi->s1ptw,
+ access_type == MMU_DATA_STORE,
+ fsc);
+ if (access_type == MMU_DATA_STORE
+ && arm_feature(env, ARM_FEATURE_V6)) {
+ fsr |= (1 << 11);
+ }
+ exc = EXCP_DATA_ABORT;
+ }
+
+ env->exception.vaddress = addr;
+ env->exception.fsr = fsr;
+ raise_exception(env, exc, syn, target_el);
+}
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+
+ fi.type = ARMFault_Alignment;
+ arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+/*
+ * arm_cpu_do_transaction_failed: handle a memory system error response
+ * (eg "no device/memory present at address") by raising an external abort
+ * exception
+ */
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+
+ fi.ea = arm_extabort_type(response);
+ fi.type = ARMFault_SyncExternal;
+ arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
+}
+
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot, ret;
+ MemTxAttrs attrs = {};
+ ARMCacheAttrs cacheattrs = {};
+
+ /*
+ * Walk the page table and (if the mapping exists) add the page
+ * to the TLB. On success, return true. Otherwise, if probing,
+ * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
+ * register format, and signal the fault.
+ */
+ ret = get_phys_addr(&cpu->env, address, access_type,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &phys_addr, &attrs, &prot, &page_size,
+ &fi, &cacheattrs);
+ if (likely(!ret)) {
+ /*
+ * Map a single [sub]page. Regions smaller than our declared
+ * target page size are handled specially, so for those we
+ * pass in the exact addresses.
+ */
+ if (page_size >= TARGET_PAGE_SIZE) {
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ }
+ /* Notice and record tagged memory. */
+ if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) {
+ arm_tlb_mte_tagged(&attrs) = true;
+ }
+
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
+ }
+}
+#else
+void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra)
+{
+ ARMMMUFaultInfo fi = {
+ .type = maperr ? ARMFault_Translation : ARMFault_Permission,
+ .level = 3,
+ };
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ /*
+ * We report both ESR and FAR to signal handlers.
+ * For now, it's easiest to deliver the fault normally.
+ */
+ cpu_restore_state(cs, ra, true);
+ arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
+}
+
+void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra)
+{
+ arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
+}
+#endif /* !defined(CONFIG_USER_ONLY) */
diff --git a/target/arm/trace-events b/target/arm/trace-events
new file mode 100644
index 000000000..2a0ba7bff
--- /dev/null
+++ b/target/arm/trace-events
@@ -0,0 +1,13 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# helper.c
+arm_gt_recalc(int timer, int irqstate, uint64_t nexttick) "gt recalc: timer %d irqstate %d next tick 0x%" PRIx64
+arm_gt_recalc_disabled(int timer) "gt recalc: timer %d irqstate 0 timer disabled"
+arm_gt_cval_write(int timer, uint64_t value) "gt_cval_write: timer %d value 0x%" PRIx64
+arm_gt_tval_write(int timer, uint64_t value) "gt_tval_write: timer %d value 0x%" PRIx64
+arm_gt_ctl_write(int timer, uint64_t value) "gt_ctl_write: timer %d value 0x%" PRIx64
+arm_gt_imask_toggle(int timer, int irqstate) "gt_ctl_write: timer %d IMASK toggle, new irqstate %d"
+arm_gt_cntvoff_write(uint64_t value) "gt_cntvoff_write: value 0x%" PRIx64
+
+# kvm.c
+kvm_arm_fixup_msi_route(uint64_t iova, uint64_t gpa) "MSI iova = 0x%"PRIx64" is translated into 0x%"PRIx64
diff --git a/target/arm/trace.h b/target/arm/trace.h
new file mode 100644
index 000000000..60372d8e2
--- /dev/null
+++ b/target/arm/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_arm.h"
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
new file mode 100644
index 000000000..17af8dc95
--- /dev/null
+++ b/target/arm/translate-a32.h
@@ -0,0 +1,154 @@
+/*
+ * AArch32 translation, common definitions.
+ *
+ * Copyright (c) 2021 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARM_TRANSLATE_A64_H
+#define TARGET_ARM_TRANSLATE_A64_H
+
+/* Prototypes for autogenerated disassembler functions */
+bool disas_m_nocp(DisasContext *dc, uint32_t insn);
+bool disas_mve(DisasContext *dc, uint32_t insn);
+bool disas_vfp(DisasContext *s, uint32_t insn);
+bool disas_vfp_uncond(DisasContext *s, uint32_t insn);
+bool disas_neon_dp(DisasContext *s, uint32_t insn);
+bool disas_neon_ls(DisasContext *s, uint32_t insn);
+bool disas_neon_shared(DisasContext *s, uint32_t insn);
+
+void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
+void arm_gen_condlabel(DisasContext *s);
+bool vfp_access_check(DisasContext *s);
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update);
+void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
+void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
+void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
+void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop);
+TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs);
+void gen_set_cpsr(TCGv_i32 var, uint32_t mask);
+void gen_set_condexec(DisasContext *s);
+void gen_set_pc_im(DisasContext *s, target_ulong val);
+void gen_lookup_tb(DisasContext *s);
+long vfp_reg_offset(bool dp, unsigned reg);
+long neon_full_reg_offset(unsigned reg);
+long neon_element_offset(int reg, int element, MemOp memop);
+void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
+void clear_eci_state(DisasContext *s);
+bool mve_eci_check(DisasContext *s);
+void mve_update_eci(DisasContext *s);
+void mve_update_and_store_eci(DisasContext *s);
+bool mve_skip_vmov(DisasContext *s, int vn, int index, int size);
+
+static inline TCGv_i32 load_cpu_offset(int offset)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp, cpu_env, offset);
+ return tmp;
+}
+
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
+
+static inline void store_cpu_offset(TCGv_i32 var, int offset)
+{
+ tcg_gen_st_i32(var, cpu_env, offset);
+ tcg_temp_free_i32(var);
+}
+
+#define store_cpu_field(var, name) \
+ store_cpu_offset(var, offsetof(CPUARMState, name))
+
+#define store_cpu_field_constant(val, name) \
+ tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, offsetof(CPUARMState, name))
+
+/* Create a new temporary and set it to the value of a CPU register. */
+static inline TCGv_i32 load_reg(DisasContext *s, int reg)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ load_reg_var(s, tmp, reg);
+ return tmp;
+}
+
+void store_reg(DisasContext *s, int reg, TCGv_i32 var);
+
+void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, MemOp opc);
+void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, MemOp opc);
+void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc);
+void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc);
+
+#define DO_GEN_LD(SUFF, OPC) \
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
+ }
+
+#define DO_GEN_ST(SUFF, OPC) \
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
+ }
+
+static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q);
+}
+
+static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_st_i64(s, val, a32, index, MO_Q);
+}
+
+DO_GEN_LD(8u, MO_UB)
+DO_GEN_LD(16u, MO_UW)
+DO_GEN_LD(32u, MO_UL)
+DO_GEN_ST(8, MO_UB)
+DO_GEN_ST(16, MO_UW)
+DO_GEN_ST(32, MO_UL)
+
+#undef DO_GEN_LD
+#undef DO_GEN_ST
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(s) 1
+#else
+#define IS_USER(s) (s->user)
+#endif
+
+/* Set NZCV flags from the high 4 bits of var. */
+#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
+
+/* Swap low and high halfwords. */
+static inline void gen_swap_half(TCGv_i32 dest, TCGv_i32 var)
+{
+ tcg_gen_rotri_i32(dest, var, 16);
+}
+
+#endif
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
new file mode 100644
index 000000000..cec672f22
--- /dev/null
+++ b/target/arm/translate-a64.c
@@ -0,0 +1,14963 @@
+/*
+ * AArch64 translation
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "qemu/log.h"
+#include "arm_ldst.h"
+#include "translate.h"
+#include "internals.h"
+#include "qemu/host-utils.h"
+
+#include "semihosting/semihost.h"
+#include "exec/gen-icount.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+
+#include "translate-a64.h"
+#include "qemu/atomic128.h"
+
+static TCGv_i64 cpu_X[32];
+static TCGv_i64 cpu_pc;
+
+/* Load/store exclusive handling */
+static TCGv_i64 cpu_exclusive_high;
+
+static const char *regnames[] = {
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+ "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
+};
+
+enum a64_shift_type {
+ A64_SHIFT_TYPE_LSL = 0,
+ A64_SHIFT_TYPE_LSR = 1,
+ A64_SHIFT_TYPE_ASR = 2,
+ A64_SHIFT_TYPE_ROR = 3
+};
+
+/* Table based decoder typedefs - used when the relevant bits for decode
+ * are too awkwardly scattered across the instruction (eg SIMD).
+ */
+typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
+
+typedef struct AArch64DecodeTable {
+ uint32_t pattern;
+ uint32_t mask;
+ AArch64DecodeFn *disas_fn;
+} AArch64DecodeTable;
+
+/* initialize TCG globals. */
+void a64_translate_init(void)
+{
+ int i;
+
+ cpu_pc = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, pc),
+ "pc");
+ for (i = 0; i < 32; i++) {
+ cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, xregs[i]),
+ regnames[i]);
+ }
+
+ cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, exclusive_high), "exclusive_high");
+}
+
+/*
+ * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
+ */
+static int get_a64_user_mem_index(DisasContext *s)
+{
+ /*
+ * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
+ * which is the usual mmu_idx for this cpu state.
+ */
+ ARMMMUIdx useridx = s->mmu_idx;
+
+ if (s->unpriv) {
+ /*
+ * We have pre-computed the condition for AccType_UNPRIV.
+ * Therefore we should never get here with a mmu_idx for
+ * which we do not know the corresponding user mmu_idx.
+ */
+ switch (useridx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ useridx = ARMMMUIdx_E10_0;
+ break;
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ useridx = ARMMMUIdx_E20_0;
+ break;
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ useridx = ARMMMUIdx_SE10_0;
+ break;
+ case ARMMMUIdx_SE20_2:
+ case ARMMMUIdx_SE20_2_PAN:
+ useridx = ARMMMUIdx_SE20_0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return arm_to_core_mmu_idx(useridx);
+}
+
+static void reset_btype(DisasContext *s)
+{
+ if (s->btype != 0) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ tcg_gen_st_i32(zero, cpu_env, offsetof(CPUARMState, btype));
+ tcg_temp_free_i32(zero);
+ s->btype = 0;
+ }
+}
+
+static void set_btype(DisasContext *s, int val)
+{
+ TCGv_i32 tcg_val;
+
+ /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */
+ tcg_debug_assert(val >= 1 && val <= 3);
+
+ tcg_val = tcg_const_i32(val);
+ tcg_gen_st_i32(tcg_val, cpu_env, offsetof(CPUARMState, btype));
+ tcg_temp_free_i32(tcg_val);
+ s->btype = -1;
+}
+
+void gen_a64_set_pc_im(uint64_t val)
+{
+ tcg_gen_movi_i64(cpu_pc, val);
+}
+
+/*
+ * Handle Top Byte Ignore (TBI) bits.
+ *
+ * If address tagging is enabled via the TCR TBI bits:
+ * + for EL2 and EL3 there is only one TBI bit, and if it is set
+ * then the address is zero-extended, clearing bits [63:56]
+ * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
+ * and TBI1 controls addressses with bit 55 == 1.
+ * If the appropriate TBI bit is set for the address then
+ * the address is sign-extended from bit 55 into bits [63:56]
+ *
+ * Here We have concatenated TBI{1,0} into tbi.
+ */
+static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
+ TCGv_i64 src, int tbi)
+{
+ if (tbi == 0) {
+ /* Load unmodified address */
+ tcg_gen_mov_i64(dst, src);
+ } else if (!regime_has_2_ranges(s->mmu_idx)) {
+ /* Force tag byte to all zero */
+ tcg_gen_extract_i64(dst, src, 0, 56);
+ } else {
+ /* Sign-extend from bit 55. */
+ tcg_gen_sextract_i64(dst, src, 0, 56);
+
+ switch (tbi) {
+ case 1:
+ /* tbi0 but !tbi1: only use the extension if positive */
+ tcg_gen_and_i64(dst, dst, src);
+ break;
+ case 2:
+ /* !tbi0 but tbi1: only use the extension if negative */
+ tcg_gen_or_i64(dst, dst, src);
+ break;
+ case 3:
+ /* tbi0 and tbi1: always use the extension */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
+{
+ /*
+ * If address tagging is enabled for instructions via the TCR TBI bits,
+ * then loading an address into the PC will clear out any tag.
+ */
+ gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
+}
+
+/*
+ * Handle MTE and/or TBI.
+ *
+ * For TBI, ideally, we would do nothing. Proper behaviour on fault is
+ * for the tag to be present in the FAR_ELx register. But for user-only
+ * mode we do not have a TLB with which to implement this, so we must
+ * remove the top byte now.
+ *
+ * Always return a fresh temporary that we can increment independently
+ * of the write-back address.
+ */
+
+TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
+{
+ TCGv_i64 clean = new_tmp_a64(s);
+#ifdef CONFIG_USER_ONLY
+ gen_top_byte_ignore(s, clean, addr, s->tbid);
+#else
+ tcg_gen_mov_i64(clean, addr);
+#endif
+ return clean;
+}
+
+/* Insert a zero tag into src, with the result at dst. */
+static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
+{
+ tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
+}
+
+static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
+ MMUAccessType acc, int log2_size)
+{
+ TCGv_i32 t_acc = tcg_const_i32(acc);
+ TCGv_i32 t_idx = tcg_const_i32(get_mem_index(s));
+ TCGv_i32 t_size = tcg_const_i32(1 << log2_size);
+
+ gen_helper_probe_access(cpu_env, ptr, t_acc, t_idx, t_size);
+ tcg_temp_free_i32(t_acc);
+ tcg_temp_free_i32(t_idx);
+ tcg_temp_free_i32(t_size);
+}
+
+/*
+ * For MTE, check a single logical or atomic access. This probes a single
+ * address, the exact one specified. The size and alignment of the access
+ * is not relevant to MTE, per se, but watchpoints do require the size,
+ * and we want to recognize those before making any other changes to state.
+ */
+static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
+ bool is_write, bool tag_checked,
+ int log2_size, bool is_unpriv,
+ int core_idx)
+{
+ if (tag_checked && s->mte_active[is_unpriv]) {
+ TCGv_i32 tcg_desc;
+ TCGv_i64 ret;
+ int desc = 0;
+
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
+ tcg_desc = tcg_const_i32(desc);
+
+ ret = new_tmp_a64(s);
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
+ tcg_temp_free_i32(tcg_desc);
+
+ return ret;
+ }
+ return clean_data_tbi(s, addr);
+}
+
+TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
+ bool tag_checked, int log2_size)
+{
+ return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
+ false, get_mem_index(s));
+}
+
+/*
+ * For MTE, check multiple logical sequential accesses.
+ */
+TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
+ bool tag_checked, int size)
+{
+ if (tag_checked && s->mte_active[0]) {
+ TCGv_i32 tcg_desc;
+ TCGv_i64 ret;
+ int desc = 0;
+
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
+ tcg_desc = tcg_const_i32(desc);
+
+ ret = new_tmp_a64(s);
+ gen_helper_mte_check(ret, cpu_env, tcg_desc, addr);
+ tcg_temp_free_i32(tcg_desc);
+
+ return ret;
+ }
+ return clean_data_tbi(s, addr);
+}
+
+typedef struct DisasCompare64 {
+ TCGCond cond;
+ TCGv_i64 value;
+} DisasCompare64;
+
+static void a64_test_cc(DisasCompare64 *c64, int cc)
+{
+ DisasCompare c32;
+
+ arm_test_cc(&c32, cc);
+
+ /* Sign-extend the 32-bit value so that the GE/LT comparisons work
+ * properly. The NE/EQ comparisons are also fine with this choice. */
+ c64->cond = c32.cond;
+ c64->value = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(c64->value, c32.value);
+
+ arm_free_cc(&c32);
+}
+
+static void a64_free_cc(DisasCompare64 *c64)
+{
+ tcg_temp_free_i64(c64->value);
+}
+
+static void gen_exception_internal(int excp)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception_internal_insn(DisasContext *s, uint64_t pc, int excp)
+{
+ gen_a64_set_pc_im(pc);
+ gen_exception_internal(excp);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
+{
+ TCGv_i32 tcg_syn;
+
+ gen_a64_set_pc_im(s->pc_curr);
+ tcg_syn = tcg_const_i32(syndrome);
+ gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
+ tcg_temp_free_i32(tcg_syn);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_step_complete_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_swstep_exception(s, 1, s->is_ldex);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
+{
+ if (s->ss_active) {
+ return false;
+ }
+ return translator_use_goto_tb(&s->base, dest);
+}
+
+static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
+{
+ if (use_goto_tb(s, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_a64_set_pc_im(dest);
+ tcg_gen_exit_tb(s->base.tb, n);
+ s->base.is_jmp = DISAS_NORETURN;
+ } else {
+ gen_a64_set_pc_im(dest);
+ if (s->ss_active) {
+ gen_step_complete_exception(s);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ s->base.is_jmp = DISAS_NORETURN;
+ }
+ }
+}
+
+static void init_tmp_a64_array(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
+#endif
+ s->tmp_a64_count = 0;
+}
+
+static void free_tmp_a64(DisasContext *s)
+{
+ int i;
+ for (i = 0; i < s->tmp_a64_count; i++) {
+ tcg_temp_free_i64(s->tmp_a64[i]);
+ }
+ init_tmp_a64_array(s);
+}
+
+TCGv_i64 new_tmp_a64(DisasContext *s)
+{
+ assert(s->tmp_a64_count < TMP_A64_MAX);
+ return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
+}
+
+TCGv_i64 new_tmp_a64_local(DisasContext *s)
+{
+ assert(s->tmp_a64_count < TMP_A64_MAX);
+ return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64();
+}
+
+TCGv_i64 new_tmp_a64_zero(DisasContext *s)
+{
+ TCGv_i64 t = new_tmp_a64(s);
+ tcg_gen_movi_i64(t, 0);
+ return t;
+}
+
+/*
+ * Register access functions
+ *
+ * These functions are used for directly accessing a register in where
+ * changes to the final register value are likely to be made. If you
+ * need to use a register for temporary calculation (e.g. index type
+ * operations) use the read_* form.
+ *
+ * B1.2.1 Register mappings
+ *
+ * In instruction register encoding 31 can refer to ZR (zero register) or
+ * the SP (stack pointer) depending on context. In QEMU's case we map SP
+ * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
+ * This is the point of the _sp forms.
+ */
+TCGv_i64 cpu_reg(DisasContext *s, int reg)
+{
+ if (reg == 31) {
+ return new_tmp_a64_zero(s);
+ } else {
+ return cpu_X[reg];
+ }
+}
+
+/* register access for when 31 == SP */
+TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
+{
+ return cpu_X[reg];
+}
+
+/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
+ * representing the register contents. This TCGv is an auto-freed
+ * temporary so it need not be explicitly freed, and may be modified.
+ */
+TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
+{
+ TCGv_i64 v = new_tmp_a64(s);
+ if (reg != 31) {
+ if (sf) {
+ tcg_gen_mov_i64(v, cpu_X[reg]);
+ } else {
+ tcg_gen_ext32u_i64(v, cpu_X[reg]);
+ }
+ } else {
+ tcg_gen_movi_i64(v, 0);
+ }
+ return v;
+}
+
+TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
+{
+ TCGv_i64 v = new_tmp_a64(s);
+ if (sf) {
+ tcg_gen_mov_i64(v, cpu_X[reg]);
+ } else {
+ tcg_gen_ext32u_i64(v, cpu_X[reg]);
+ }
+ return v;
+}
+
+/* Return the offset into CPUARMState of a slice (from
+ * the least significant end) of FP register Qn (ie
+ * Dn, Sn, Hn or Bn).
+ * (Note that this is not the same mapping as for A32; see cpu.h)
+ */
+static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
+{
+ return vec_reg_offset(s, regno, 0, size);
+}
+
+/* Offset of the high half of the 128 bit vector Qn */
+static inline int fp_reg_hi_offset(DisasContext *s, int regno)
+{
+ return vec_reg_offset(s, regno, 1, MO_64);
+}
+
+/* Convenience accessors for reading and writing single and double
+ * FP registers. Writing clears the upper parts of the associated
+ * 128 bit vector register, as required by the architecture.
+ * Note that unlike the GP register accessors, the values returned
+ * by the read functions must be manually freed.
+ */
+static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
+{
+ TCGv_i64 v = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
+ return v;
+}
+
+static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
+{
+ TCGv_i32 v = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
+ return v;
+}
+
+static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
+{
+ TCGv_i32 v = tcg_temp_new_i32();
+
+ tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
+ return v;
+}
+
+/* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
+ * If SVE is not enabled, then there are only 128 bits in the vector.
+ */
+static void clear_vec_high(DisasContext *s, bool is_q, int rd)
+{
+ unsigned ofs = fp_reg_offset(s, rd, MO_64);
+ unsigned vsz = vec_full_reg_size(s);
+
+ /* Nop move, with side effect of clearing the tail. */
+ tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
+}
+
+void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
+{
+ unsigned ofs = fp_reg_offset(s, reg, MO_64);
+
+ tcg_gen_st_i64(v, cpu_env, ofs);
+ clear_vec_high(s, false, reg);
+}
+
+static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tmp, v);
+ write_fp_dreg(s, reg, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* Expand a 2-operand AdvSIMD vector operation using an expander function. */
+static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
+ GVecGen2Fn *gvec_fn, int vece)
+{
+ gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
+ is_q ? 16 : 8, vec_full_reg_size(s));
+}
+
+/* Expand a 2-operand + immediate AdvSIMD vector operation using
+ * an expander function.
+ */
+static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
+ int64_t imm, GVecGen2iFn *gvec_fn, int vece)
+{
+ gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
+ imm, is_q ? 16 : 8, vec_full_reg_size(s));
+}
+
+/* Expand a 3-operand AdvSIMD vector operation using an expander function. */
+static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
+ GVecGen3Fn *gvec_fn, int vece)
+{
+ gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
+}
+
+/* Expand a 4-operand AdvSIMD vector operation using an expander function. */
+static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
+ int rx, GVecGen4Fn *gvec_fn, int vece)
+{
+ gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
+ is_q ? 16 : 8, vec_full_reg_size(s));
+}
+
+/* Expand a 2-operand operation using an out-of-line helper. */
+static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
+ int rn, int data, gen_helper_gvec_2 *fn)
+{
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+}
+
+/* Expand a 3-operand operation using an out-of-line helper. */
+static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
+ int rn, int rm, int data, gen_helper_gvec_3 *fn)
+{
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+}
+
+/* Expand a 3-operand + fpstatus pointer + simd data value operation using
+ * an out-of-line helper.
+ */
+static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, bool is_fp16, int data,
+ gen_helper_gvec_3_ptr *fn)
+{
+ TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+ tcg_temp_free_ptr(fpst);
+}
+
+/* Expand a 3-operand + qc + operation using an out-of-line helper. */
+static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, gen_helper_gvec_3_ptr *fn)
+{
+ TCGv_ptr qc_ptr = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), qc_ptr,
+ is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
+ tcg_temp_free_ptr(qc_ptr);
+}
+
+/* Expand a 4-operand operation using an out-of-line helper. */
+static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, int ra, int data, gen_helper_gvec_4 *fn)
+{
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+}
+
+/*
+ * Expand a 4-operand + fpstatus pointer + simd data value operation using
+ * an out-of-line helper.
+ */
+static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
+ int rm, int ra, bool is_fp16, int data,
+ gen_helper_gvec_4_ptr *fn)
+{
+ TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
+ tcg_temp_free_ptr(fpst);
+}
+
+/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
+ * than the 32 bit equivalent.
+ */
+static inline void gen_set_NZ64(TCGv_i64 result)
+{
+ tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
+}
+
+/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
+static inline void gen_logic_CC(int sf, TCGv_i64 result)
+{
+ if (sf) {
+ gen_set_NZ64(result);
+ } else {
+ tcg_gen_extrl_i64_i32(cpu_ZF, result);
+ tcg_gen_mov_i32(cpu_NF, cpu_ZF);
+ }
+ tcg_gen_movi_i32(cpu_CF, 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
+}
+
+/* dest = T0 + T1; compute C, N, V and Z flags */
+static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ TCGv_i64 result, flag, tmp;
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
+
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(flag, flag, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+
+ tcg_gen_mov_i64(dest, result);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(flag);
+ } else {
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t0_32);
+ tcg_temp_free_i32(t1_32);
+ }
+}
+
+/* dest = T0 - T1; compute C, N, V and Z flags */
+static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ /* 64 bit arithmetic */
+ TCGv_i64 result, flag, tmp;
+
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tcg_gen_sub_i64(result, t0, t1);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_and_i64(flag, flag, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+ tcg_gen_mov_i64(dest, result);
+ tcg_temp_free_i64(flag);
+ tcg_temp_free_i64(result);
+ } else {
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp;
+
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_temp_free_i32(t0_32);
+ tcg_temp_free_i32(t1_32);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+ }
+}
+
+/* dest = T0 + T1 + CF; do not compute flags. */
+static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ TCGv_i64 flag = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(flag, cpu_CF);
+ tcg_gen_add_i64(dest, t0, t1);
+ tcg_gen_add_i64(dest, dest, flag);
+ tcg_temp_free_i64(flag);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(dest, dest);
+ }
+}
+
+/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
+static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ TCGv_i64 result, cf_64, vf_64, tmp;
+ result = tcg_temp_new_i64();
+ cf_64 = tcg_temp_new_i64();
+ vf_64 = tcg_temp_new_i64();
+ tmp = tcg_const_i64(0);
+
+ tcg_gen_extu_i32_i64(cf_64, cpu_CF);
+ tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
+ tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
+ tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(vf_64, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(vf_64, vf_64, tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
+
+ tcg_gen_mov_i64(dest, result);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(vf_64);
+ tcg_temp_free_i64(cf_64);
+ tcg_temp_free_i64(result);
+ } else {
+ TCGv_i32 t0_32, t1_32, tmp;
+ t0_32 = tcg_temp_new_i32();
+ t1_32 = tcg_temp_new_i32();
+ tmp = tcg_const_i32(0);
+
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
+
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t1_32);
+ tcg_temp_free_i32(t0_32);
+ }
+}
+
+/*
+ * Load/Store generators
+ */
+
+/*
+ * Store from GPR register to memory.
+ */
+static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
+ TCGv_i64 tcg_addr, MemOp memop, int memidx,
+ bool iss_valid,
+ unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ memop = finalize_memop(s, memop);
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
+
+ if (iss_valid) {
+ uint32_t syn;
+
+ syn = syn_data_abort_with_iss(0,
+ (memop & MO_SIZE),
+ false,
+ iss_srt,
+ iss_sf,
+ iss_ar,
+ 0, 0, 0, 0, 0, false);
+ disas_set_insn_syndrome(s, syn);
+ }
+}
+
+static void do_gpr_st(DisasContext *s, TCGv_i64 source,
+ TCGv_i64 tcg_addr, MemOp memop,
+ bool iss_valid,
+ unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
+ iss_valid, iss_srt, iss_sf, iss_ar);
+}
+
+/*
+ * Load from memory to GPR register
+ */
+static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ MemOp memop, bool extend, int memidx,
+ bool iss_valid, unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ memop = finalize_memop(s, memop);
+ tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
+
+ if (extend && (memop & MO_SIGN)) {
+ g_assert((memop & MO_SIZE) <= MO_32);
+ tcg_gen_ext32u_i64(dest, dest);
+ }
+
+ if (iss_valid) {
+ uint32_t syn;
+
+ syn = syn_data_abort_with_iss(0,
+ (memop & MO_SIZE),
+ (memop & MO_SIGN) != 0,
+ iss_srt,
+ iss_sf,
+ iss_ar,
+ 0, 0, 0, 0, 0, false);
+ disas_set_insn_syndrome(s, syn);
+ }
+}
+
+static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
+ MemOp memop, bool extend,
+ bool iss_valid, unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
+ iss_valid, iss_srt, iss_sf, iss_ar);
+}
+
+/*
+ * Store from FP register to memory
+ */
+static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
+{
+ /* This writes the bottom N bits of a 128 bit wide vector to memory */
+ TCGv_i64 tmplo = tcg_temp_new_i64();
+ MemOp mop;
+
+ tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+
+ if (size < 4) {
+ mop = finalize_memop(s, size);
+ tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
+ } else {
+ bool be = s->be_data == MO_BE;
+ TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
+ TCGv_i64 tmphi = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
+
+ mop = s->be_data | MO_Q;
+ tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
+ tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
+ tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
+ get_mem_index(s), mop);
+
+ tcg_temp_free_i64(tcg_hiaddr);
+ tcg_temp_free_i64(tmphi);
+ }
+
+ tcg_temp_free_i64(tmplo);
+}
+
+/*
+ * Load from memory to FP register
+ */
+static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
+{
+ /* This always zero-extends and writes to a full 128 bit wide vector */
+ TCGv_i64 tmplo = tcg_temp_new_i64();
+ TCGv_i64 tmphi = NULL;
+ MemOp mop;
+
+ if (size < 4) {
+ mop = finalize_memop(s, size);
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
+ } else {
+ bool be = s->be_data == MO_BE;
+ TCGv_i64 tcg_hiaddr;
+
+ tmphi = tcg_temp_new_i64();
+ tcg_hiaddr = tcg_temp_new_i64();
+
+ mop = s->be_data | MO_Q;
+ tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
+ mop | (s->align_mem ? MO_ALIGN_16 : 0));
+ tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
+ tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
+ get_mem_index(s), mop);
+ tcg_temp_free_i64(tcg_hiaddr);
+ }
+
+ tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
+ tcg_temp_free_i64(tmplo);
+
+ if (tmphi) {
+ tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
+ tcg_temp_free_i64(tmphi);
+ }
+ clear_vec_high(s, tmphi != NULL, destidx);
+}
+
+/*
+ * Vector load/store helpers.
+ *
+ * The principal difference between this and a FP load is that we don't
+ * zero extend as we are filling a partial chunk of the vector register.
+ * These functions don't support 128 bit loads/stores, which would be
+ * normal load/store operations.
+ *
+ * The _i32 versions are useful when operating on 32 bit quantities
+ * (eg for floating point single or using Neon helper functions).
+ */
+
+/* Get value of an element within a vector register */
+static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
+ int element, MemOp memop)
+{
+ int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
+ switch ((unsigned)memop) {
+ case MO_8:
+ tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_32:
+ tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_8|MO_SIGN:
+ tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16|MO_SIGN:
+ tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_32|MO_SIGN:
+ tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_64:
+ case MO_64|MO_SIGN:
+ tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
+ int element, MemOp memop)
+{
+ int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_8|MO_SIGN:
+ tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16|MO_SIGN:
+ tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_32:
+ case MO_32|MO_SIGN:
+ tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Set value of an element within a vector register */
+static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
+ int element, MemOp memop)
+{
+ int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_32:
+ tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_64:
+ tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
+ int destidx, int element, MemOp memop)
+{
+ int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_32:
+ tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Store from vector register to memory */
+static void do_vec_st(DisasContext *s, int srcidx, int element,
+ TCGv_i64 tcg_addr, MemOp mop)
+{
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* Load from memory to vector register */
+static void do_vec_ld(DisasContext *s, int destidx, int element,
+ TCGv_i64 tcg_addr, MemOp mop)
+{
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
+ write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* Check that FP/Neon access is enabled. If it is, return
+ * true. If not, emit code to generate an appropriate exception,
+ * and return false; the caller should not emit any code for
+ * the instruction. Note that this check must happen after all
+ * unallocated-encoding checks (otherwise the syndrome information
+ * for the resulting exception will be incorrect).
+ */
+static bool fp_access_check(DisasContext *s)
+{
+ if (s->fp_excp_el) {
+ assert(!s->fp_access_checked);
+ s->fp_access_checked = true;
+
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ return false;
+ }
+ s->fp_access_checked = true;
+ return true;
+}
+
+/* Check that SVE access is enabled. If it is, return true.
+ * If not, emit code to generate an appropriate exception and return false.
+ */
+bool sve_access_check(DisasContext *s)
+{
+ if (s->sve_excp_el) {
+ assert(!s->sve_access_checked);
+ s->sve_access_checked = true;
+
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_sve_access_trap(), s->sve_excp_el);
+ return false;
+ }
+ s->sve_access_checked = true;
+ return fp_access_check(s);
+}
+
+/*
+ * This utility function is for doing register extension with an
+ * optional shift. You will likely want to pass a temporary for the
+ * destination register. See DecodeRegExtend() in the ARM ARM.
+ */
+static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
+ int option, unsigned int shift)
+{
+ int extsize = extract32(option, 0, 2);
+ bool is_signed = extract32(option, 2, 1);
+
+ if (is_signed) {
+ switch (extsize) {
+ case 0:
+ tcg_gen_ext8s_i64(tcg_out, tcg_in);
+ break;
+ case 1:
+ tcg_gen_ext16s_i64(tcg_out, tcg_in);
+ break;
+ case 2:
+ tcg_gen_ext32s_i64(tcg_out, tcg_in);
+ break;
+ case 3:
+ tcg_gen_mov_i64(tcg_out, tcg_in);
+ break;
+ }
+ } else {
+ switch (extsize) {
+ case 0:
+ tcg_gen_ext8u_i64(tcg_out, tcg_in);
+ break;
+ case 1:
+ tcg_gen_ext16u_i64(tcg_out, tcg_in);
+ break;
+ case 2:
+ tcg_gen_ext32u_i64(tcg_out, tcg_in);
+ break;
+ case 3:
+ tcg_gen_mov_i64(tcg_out, tcg_in);
+ break;
+ }
+ }
+
+ if (shift) {
+ tcg_gen_shli_i64(tcg_out, tcg_out, shift);
+ }
+}
+
+static inline void gen_check_sp_alignment(DisasContext *s)
+{
+ /* The AArch64 architecture mandates that (if enabled via PSTATE
+ * or SCTLR bits) there is a check that SP is 16-aligned on every
+ * SP-relative load or store (with an exception generated if it is not).
+ * In line with general QEMU practice regarding misaligned accesses,
+ * we omit these checks for the sake of guest program performance.
+ * This function is provided as a hook so we can more easily add these
+ * checks in future (possibly as a "favour catching guest program bugs
+ * over speed" user selectable option).
+ */
+}
+
+/*
+ * This provides a simple table based table lookup decoder. It is
+ * intended to be used when the relevant bits for decode are too
+ * awkwardly placed and switch/if based logic would be confusing and
+ * deeply nested. Since it's a linear search through the table, tables
+ * should be kept small.
+ *
+ * It returns the first handler where insn & mask == pattern, or
+ * NULL if there is no match.
+ * The table is terminated by an empty mask (i.e. 0)
+ */
+static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
+ uint32_t insn)
+{
+ const AArch64DecodeTable *tptr = table;
+
+ while (tptr->mask) {
+ if ((insn & tptr->mask) == tptr->pattern) {
+ return tptr->disas_fn;
+ }
+ tptr++;
+ }
+ return NULL;
+}
+
+/*
+ * The instruction disassembly implemented here matches
+ * the instruction encoding classifications in chapter C4
+ * of the ARM Architecture Reference Manual (DDI0487B_a);
+ * classification names and decode diagrams here should generally
+ * match up with those in the manual.
+ */
+
+/* Unconditional branch (immediate)
+ * 31 30 26 25 0
+ * +----+-----------+-------------------------------------+
+ * | op | 0 0 1 0 1 | imm26 |
+ * +----+-----------+-------------------------------------+
+ */
+static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
+{
+ uint64_t addr = s->pc_curr + sextract32(insn, 0, 26) * 4;
+
+ if (insn & (1U << 31)) {
+ /* BL Branch with link */
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
+ }
+
+ /* B Branch / BL Branch with link */
+ reset_btype(s);
+ gen_goto_tb(s, 0, addr);
+}
+
+/* Compare and branch (immediate)
+ * 31 30 25 24 23 5 4 0
+ * +----+-------------+----+---------------------+--------+
+ * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
+ * +----+-------------+----+---------------------+--------+
+ */
+static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, rt;
+ uint64_t addr;
+ TCGLabel *label_match;
+ TCGv_i64 tcg_cmp;
+
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
+ rt = extract32(insn, 0, 5);
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
+
+ tcg_cmp = read_cpu_reg(s, rt, sf);
+ label_match = gen_new_label();
+
+ reset_btype(s);
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_cmp, 0, label_match);
+
+ gen_goto_tb(s, 0, s->base.pc_next);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+}
+
+/* Test and branch (immediate)
+ * 31 30 25 24 23 19 18 5 4 0
+ * +----+-------------+----+-------+-------------+------+
+ * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
+ * +----+-------------+----+-------+-------------+------+
+ */
+static void disas_test_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int bit_pos, op, rt;
+ uint64_t addr;
+ TCGLabel *label_match;
+ TCGv_i64 tcg_cmp;
+
+ bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
+ op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
+ addr = s->pc_curr + sextract32(insn, 5, 14) * 4;
+ rt = extract32(insn, 0, 5);
+
+ tcg_cmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
+ label_match = gen_new_label();
+
+ reset_btype(s);
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_cmp, 0, label_match);
+ tcg_temp_free_i64(tcg_cmp);
+ gen_goto_tb(s, 0, s->base.pc_next);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+}
+
+/* Conditional branch (immediate)
+ * 31 25 24 23 5 4 3 0
+ * +---------------+----+---------------------+----+------+
+ * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
+ * +---------------+----+---------------------+----+------+
+ */
+static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int cond;
+ uint64_t addr;
+
+ if ((insn & (1 << 4)) || (insn & (1 << 24))) {
+ unallocated_encoding(s);
+ return;
+ }
+ addr = s->pc_curr + sextract32(insn, 5, 19) * 4;
+ cond = extract32(insn, 0, 4);
+
+ reset_btype(s);
+ if (cond < 0x0e) {
+ /* genuinely conditional branches */
+ TCGLabel *label_match = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ gen_goto_tb(s, 0, s->base.pc_next);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+ } else {
+ /* 0xe and 0xf are both "always" conditions */
+ gen_goto_tb(s, 0, addr);
+ }
+}
+
+/* HINT instruction group, including various allocated HINTs */
+static void handle_hint(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ unsigned int selector = crm << 3 | op2;
+
+ if (op1 != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (selector) {
+ case 0b00000: /* NOP */
+ break;
+ case 0b00011: /* WFI */
+ s->base.is_jmp = DISAS_WFI;
+ break;
+ case 0b00001: /* YIELD */
+ /* When running in MTTCG we don't generate jumps to the yield and
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
+ * If we wanted to more completely model WFE/SEV so we don't busy
+ * spin unnecessarily we would need to do something more involved.
+ */
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ s->base.is_jmp = DISAS_YIELD;
+ }
+ break;
+ case 0b00010: /* WFE */
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ s->base.is_jmp = DISAS_WFE;
+ }
+ break;
+ case 0b00100: /* SEV */
+ case 0b00101: /* SEVL */
+ /* we treat all as NOP at least for now */
+ break;
+ case 0b00111: /* XPACLRI */
+ if (s->pauth_active) {
+ gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
+ }
+ break;
+ case 0b01000: /* PACIA1716 */
+ if (s->pauth_active) {
+ gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ }
+ break;
+ case 0b01010: /* PACIB1716 */
+ if (s->pauth_active) {
+ gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ }
+ break;
+ case 0b01100: /* AUTIA1716 */
+ if (s->pauth_active) {
+ gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ }
+ break;
+ case 0b01110: /* AUTIB1716 */
+ if (s->pauth_active) {
+ gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
+ }
+ break;
+ case 0b11000: /* PACIAZ */
+ if (s->pauth_active) {
+ gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
+ new_tmp_a64_zero(s));
+ }
+ break;
+ case 0b11001: /* PACIASP */
+ if (s->pauth_active) {
+ gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ }
+ break;
+ case 0b11010: /* PACIBZ */
+ if (s->pauth_active) {
+ gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
+ new_tmp_a64_zero(s));
+ }
+ break;
+ case 0b11011: /* PACIBSP */
+ if (s->pauth_active) {
+ gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ }
+ break;
+ case 0b11100: /* AUTIAZ */
+ if (s->pauth_active) {
+ gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
+ new_tmp_a64_zero(s));
+ }
+ break;
+ case 0b11101: /* AUTIASP */
+ if (s->pauth_active) {
+ gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ }
+ break;
+ case 0b11110: /* AUTIBZ */
+ if (s->pauth_active) {
+ gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
+ new_tmp_a64_zero(s));
+ }
+ break;
+ case 0b11111: /* AUTIBSP */
+ if (s->pauth_active) {
+ gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
+ }
+ break;
+ default:
+ /* default specified as NOP equivalent */
+ break;
+ }
+}
+
+static void gen_clrex(DisasContext *s, uint32_t insn)
+{
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+/* CLREX, DSB, DMB, ISB */
+static void handle_sync(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ TCGBar bar;
+
+ if (op1 != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (op2) {
+ case 2: /* CLREX */
+ gen_clrex(s, insn);
+ return;
+ case 4: /* DSB */
+ case 5: /* DMB */
+ switch (crm & 3) {
+ case 1: /* MBReqTypes_Reads */
+ bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
+ break;
+ case 2: /* MBReqTypes_Writes */
+ bar = TCG_BAR_SC | TCG_MO_ST_ST;
+ break;
+ default: /* MBReqTypes_All */
+ bar = TCG_BAR_SC | TCG_MO_ALL;
+ break;
+ }
+ tcg_gen_mb(bar);
+ return;
+ case 6: /* ISB */
+ /* We need to break the TB after this insn to execute
+ * a self-modified code correctly and also to take
+ * any pending interrupts immediately.
+ */
+ reset_btype(s);
+ gen_goto_tb(s, 0, s->base.pc_next);
+ return;
+
+ case 7: /* SB */
+ if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
+ goto do_unallocated;
+ }
+ /*
+ * TODO: There is no speculation barrier opcode for TCG;
+ * MB and end the TB instead.
+ */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ gen_goto_tb(s, 0, s->base.pc_next);
+ return;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+static void gen_xaflag(void)
+{
+ TCGv_i32 z = tcg_temp_new_i32();
+
+ tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
+
+ /*
+ * (!C & !Z) << 31
+ * (!(C | Z)) << 31
+ * ~((C | Z) << 31)
+ * ~-(C | Z)
+ * (C | Z) - 1
+ */
+ tcg_gen_or_i32(cpu_NF, cpu_CF, z);
+ tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
+
+ /* !(Z & C) */
+ tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
+ tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
+
+ /* (!C & Z) << 31 -> -(Z & ~C) */
+ tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
+ tcg_gen_neg_i32(cpu_VF, cpu_VF);
+
+ /* C | Z */
+ tcg_gen_or_i32(cpu_CF, cpu_CF, z);
+
+ tcg_temp_free_i32(z);
+}
+
+static void gen_axflag(void)
+{
+ tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */
+
+ /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
+
+ tcg_gen_movi_i32(cpu_NF, 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
+}
+
+/* MSR (immediate) - move immediate to processor state field */
+static void handle_msr_i(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ TCGv_i32 t1;
+ int op = op1 << 3 | op2;
+
+ /* End the TB by default, chaining is ok. */
+ s->base.is_jmp = DISAS_TOO_MANY;
+
+ switch (op) {
+ case 0x00: /* CFINV */
+ if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
+ goto do_unallocated;
+ }
+ tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
+ s->base.is_jmp = DISAS_NEXT;
+ break;
+
+ case 0x01: /* XAFlag */
+ if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
+ goto do_unallocated;
+ }
+ gen_xaflag();
+ s->base.is_jmp = DISAS_NEXT;
+ break;
+
+ case 0x02: /* AXFlag */
+ if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
+ goto do_unallocated;
+ }
+ gen_axflag();
+ s->base.is_jmp = DISAS_NEXT;
+ break;
+
+ case 0x03: /* UAO */
+ if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_UAO);
+ } else {
+ clear_pstate_bits(PSTATE_UAO);
+ }
+ t1 = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
+ case 0x04: /* PAN */
+ if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_PAN);
+ } else {
+ clear_pstate_bits(PSTATE_PAN);
+ }
+ t1 = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
+ case 0x05: /* SPSel */
+ if (s->current_el == 0) {
+ goto do_unallocated;
+ }
+ t1 = tcg_const_i32(crm & PSTATE_SP);
+ gen_helper_msr_i_spsel(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
+ case 0x19: /* SSBS */
+ if (!dc_isar_feature(aa64_ssbs, s)) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_SSBS);
+ } else {
+ clear_pstate_bits(PSTATE_SSBS);
+ }
+ /* Don't need to rebuild hflags since SSBS is a nop */
+ break;
+
+ case 0x1a: /* DIT */
+ if (!dc_isar_feature(aa64_dit, s)) {
+ goto do_unallocated;
+ }
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_DIT);
+ } else {
+ clear_pstate_bits(PSTATE_DIT);
+ }
+ /* There's no need to rebuild hflags because DIT is a nop */
+ break;
+
+ case 0x1e: /* DAIFSet */
+ t1 = tcg_const_i32(crm);
+ gen_helper_msr_i_daifset(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ break;
+
+ case 0x1f: /* DAIFClear */
+ t1 = tcg_const_i32(crm);
+ gen_helper_msr_i_daifclear(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+ break;
+
+ case 0x1c: /* TCO */
+ if (dc_isar_feature(aa64_mte, s)) {
+ /* Full MTE is enabled -- set the TCO bit as directed. */
+ if (crm & 1) {
+ set_pstate_bits(PSTATE_TCO);
+ } else {
+ clear_pstate_bits(PSTATE_TCO);
+ }
+ t1 = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, t1);
+ tcg_temp_free_i32(t1);
+ /* Many factors, including TCO, go into MTE_ACTIVE. */
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
+ /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */
+ s->base.is_jmp = DISAS_NEXT;
+ } else {
+ goto do_unallocated;
+ }
+ break;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+static void gen_get_nzcv(TCGv_i64 tcg_rt)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 nzcv = tcg_temp_new_i32();
+
+ /* build bit 31, N */
+ tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
+ /* build bit 30, Z */
+ tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
+ tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
+ /* build bit 29, C */
+ tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
+ /* build bit 28, V */
+ tcg_gen_shri_i32(tmp, cpu_VF, 31);
+ tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
+ /* generate result */
+ tcg_gen_extu_i32_i64(tcg_rt, nzcv);
+
+ tcg_temp_free_i32(nzcv);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_set_nzcv(TCGv_i64 tcg_rt)
+{
+ TCGv_i32 nzcv = tcg_temp_new_i32();
+
+ /* take NZCV from R[t] */
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
+
+ /* bit 31, N */
+ tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
+ /* bit 30, Z */
+ tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
+ /* bit 29, C */
+ tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
+ tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
+ /* bit 28, V */
+ tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
+ tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
+ tcg_temp_free_i32(nzcv);
+}
+
+/* MRS - move from system register
+ * MSR (register) - move to system register
+ * SYS
+ * SYSL
+ * These are all essentially the same insn in 'read' and 'write'
+ * versions, with varying op0 fields.
+ */
+static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
+ unsigned int op0, unsigned int op1, unsigned int op2,
+ unsigned int crn, unsigned int crm, unsigned int rt)
+{
+ const ARMCPRegInfo *ri;
+ TCGv_i64 tcg_rt;
+
+ ri = get_arm_cp_reginfo(s->cp_regs,
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
+ crn, crm, op0, op1, op2));
+
+ if (!ri) {
+ /* Unknown register; this might be a guest error or a QEMU
+ * unimplemented feature.
+ */
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
+ "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
+ isread ? "read" : "write", op0, op1, crn, crm, op2);
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Check access permissions */
+ if (!cp_access_ok(s->current_el, ri, isread)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (ri->accessfn) {
+ /* Emit code to perform further access permissions checks at
+ * runtime; this may result in an exception.
+ */
+ TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn, tcg_isread;
+ uint32_t syndrome;
+
+ gen_a64_set_pc_im(s->pc_curr);
+ tmpptr = tcg_const_ptr(ri);
+ syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
+ tcg_syn = tcg_const_i32(syndrome);
+ tcg_isread = tcg_const_i32(isread);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_isread);
+ } else if (ri->type & ARM_CP_RAISES_EXC) {
+ /*
+ * The readfn or writefn might raise an exception;
+ * synchronize the CPU state in case it does.
+ */
+ gen_a64_set_pc_im(s->pc_curr);
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return;
+ case ARM_CP_NZCV:
+ tcg_rt = cpu_reg(s, rt);
+ if (isread) {
+ gen_get_nzcv(tcg_rt);
+ } else {
+ gen_set_nzcv(tcg_rt);
+ }
+ return;
+ case ARM_CP_CURRENTEL:
+ /* Reads as current EL value from pstate, which is
+ * guaranteed to be constant by the tb flags.
+ */
+ tcg_rt = cpu_reg(s, rt);
+ tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
+ return;
+ case ARM_CP_DC_ZVA:
+ /* Writes clear the aligned block of memory which rt points into. */
+ if (s->mte_active[0]) {
+ TCGv_i32 t_desc;
+ int desc = 0;
+
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ t_desc = tcg_const_i32(desc);
+
+ tcg_rt = new_tmp_a64(s);
+ gen_helper_mte_check_zva(tcg_rt, cpu_env, t_desc, cpu_reg(s, rt));
+ tcg_temp_free_i32(t_desc);
+ } else {
+ tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
+ }
+ gen_helper_dc_zva(cpu_env, tcg_rt);
+ return;
+ case ARM_CP_DC_GVA:
+ {
+ TCGv_i64 clean_addr, tag;
+
+ /*
+ * DC_GVA, like DC_ZVA, requires that we supply the original
+ * pointer for an invalid page. Probe that address first.
+ */
+ tcg_rt = cpu_reg(s, rt);
+ clean_addr = clean_data_tbi(s, tcg_rt);
+ gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
+
+ if (s->ata) {
+ /* Extract the tag from the register to match STZGM. */
+ tag = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tag, tcg_rt, 56);
+ gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
+ tcg_temp_free_i64(tag);
+ }
+ }
+ return;
+ case ARM_CP_DC_GZVA:
+ {
+ TCGv_i64 clean_addr, tag;
+
+ /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
+ tcg_rt = cpu_reg(s, rt);
+ clean_addr = clean_data_tbi(s, tcg_rt);
+ gen_helper_dc_zva(cpu_env, clean_addr);
+
+ if (s->ata) {
+ /* Extract the tag from the register to match STZGM. */
+ tag = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tag, tcg_rt, 56);
+ gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
+ tcg_temp_free_i64(tag);
+ }
+ }
+ return;
+ default:
+ break;
+ }
+ if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
+ return;
+ } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
+ return;
+ }
+
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ gen_io_start();
+ }
+
+ tcg_rt = cpu_reg(s, rt);
+
+ if (isread) {
+ if (ri->type & ARM_CP_CONST) {
+ tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ }
+ } else {
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return;
+ } else if (ri->writefn) {
+ TCGv_ptr tmpptr;
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ }
+ }
+
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ /* I/O operations must end the TB here (whether read or write) */
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+ }
+ if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ /*
+ * A write to any coprocessor regiser that ends a TB
+ * must rebuild the hflags for the next TB.
+ */
+ TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_a64(cpu_env, tcg_el);
+ tcg_temp_free_i32(tcg_el);
+ /*
+ * We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+ }
+}
+
+/* System
+ * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
+ * +---------------------+---+-----+-----+-------+-------+-----+------+
+ * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
+ * +---------------------+---+-----+-----+-------+-------+-----+------+
+ */
+static void disas_system(DisasContext *s, uint32_t insn)
+{
+ unsigned int l, op0, op1, crn, crm, op2, rt;
+ l = extract32(insn, 21, 1);
+ op0 = extract32(insn, 19, 2);
+ op1 = extract32(insn, 16, 3);
+ crn = extract32(insn, 12, 4);
+ crm = extract32(insn, 8, 4);
+ op2 = extract32(insn, 5, 3);
+ rt = extract32(insn, 0, 5);
+
+ if (op0 == 0) {
+ if (l || rt != 31) {
+ unallocated_encoding(s);
+ return;
+ }
+ switch (crn) {
+ case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
+ handle_hint(s, insn, op1, op2, crm);
+ break;
+ case 3: /* CLREX, DSB, DMB, ISB */
+ handle_sync(s, insn, op1, op2, crm);
+ break;
+ case 4: /* MSR (immediate) */
+ handle_msr_i(s, insn, op1, op2, crm);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ return;
+ }
+ handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
+}
+
+/* Exception generation
+ *
+ * 31 24 23 21 20 5 4 2 1 0
+ * +-----------------+-----+------------------------+-----+----+
+ * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
+ * +-----------------------+------------------------+----------+
+ */
+static void disas_exc(DisasContext *s, uint32_t insn)
+{
+ int opc = extract32(insn, 21, 3);
+ int op2_ll = extract32(insn, 0, 5);
+ int imm16 = extract32(insn, 5, 16);
+ TCGv_i32 tmp;
+
+ switch (opc) {
+ case 0:
+ /* For SVC, HVC and SMC we advance the single-step state
+ * machine before taking the exception. This is architecturally
+ * mandated, to ensure that single-stepping a system call
+ * instruction works properly.
+ */
+ switch (op2_ll) {
+ case 1: /* SVC */
+ gen_ss_advance(s);
+ gen_exception_insn(s, s->base.pc_next, EXCP_SWI,
+ syn_aa64_svc(imm16), default_exception_el(s));
+ break;
+ case 2: /* HVC */
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration.
+ */
+ gen_a64_set_pc_im(s->pc_curr);
+ gen_helper_pre_hvc(cpu_env);
+ gen_ss_advance(s);
+ gen_exception_insn(s, s->base.pc_next, EXCP_HVC,
+ syn_aa64_hvc(imm16), 2);
+ break;
+ case 3: /* SMC */
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ gen_a64_set_pc_im(s->pc_curr);
+ tmp = tcg_const_i32(syn_aa64_smc(imm16));
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_ss_advance(s);
+ gen_exception_insn(s, s->base.pc_next, EXCP_SMC,
+ syn_aa64_smc(imm16), 3);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ break;
+ case 1:
+ if (op2_ll != 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* BRK */
+ gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
+ break;
+ case 2:
+ if (op2_ll != 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* HLT. This has two purposes.
+ * Architecturally, it is an external halting debug instruction.
+ * Since QEMU doesn't implement external debug, we treat this as
+ * it is required for halting debug disabled: it will UNDEF.
+ * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
+ */
+ if (semihosting_enabled() && imm16 == 0xf000) {
+#ifndef CONFIG_USER_ONLY
+ /* In system mode, don't allow userspace access to semihosting,
+ * to provide some semblance of security (and for consistency
+ * with our 32-bit semihosting).
+ */
+ if (s->current_el == 0) {
+ unsupported_encoding(s, insn);
+ break;
+ }
+#endif
+ gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
+ } else {
+ unsupported_encoding(s, insn);
+ }
+ break;
+ case 5:
+ if (op2_ll < 1 || op2_ll > 3) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* DCPS1, DCPS2, DCPS3 */
+ unsupported_encoding(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Unconditional branch (register)
+ * 31 25 24 21 20 16 15 10 9 5 4 0
+ * +---------------+-------+-------+-------+------+-------+
+ * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
+ * +---------------+-------+-------+-------+------+-------+
+ */
+static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
+{
+ unsigned int opc, op2, op3, rn, op4;
+ unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */
+ TCGv_i64 dst;
+ TCGv_i64 modifier;
+
+ opc = extract32(insn, 21, 4);
+ op2 = extract32(insn, 16, 5);
+ op3 = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ op4 = extract32(insn, 0, 5);
+
+ if (op2 != 0x1f) {
+ goto do_unallocated;
+ }
+
+ switch (opc) {
+ case 0: /* BR */
+ case 1: /* BLR */
+ case 2: /* RET */
+ btype_mod = opc;
+ switch (op3) {
+ case 0:
+ /* BR, BLR, RET */
+ if (op4 != 0) {
+ goto do_unallocated;
+ }
+ dst = cpu_reg(s, rn);
+ break;
+
+ case 2:
+ case 3:
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ if (opc == 2) {
+ /* RETAA, RETAB */
+ if (rn != 0x1f || op4 != 0x1f) {
+ goto do_unallocated;
+ }
+ rn = 30;
+ modifier = cpu_X[31];
+ } else {
+ /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */
+ if (op4 != 0x1f) {
+ goto do_unallocated;
+ }
+ modifier = new_tmp_a64_zero(s);
+ }
+ if (s->pauth_active) {
+ dst = new_tmp_a64(s);
+ if (op3 == 2) {
+ gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
+ } else {
+ gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
+ }
+ } else {
+ dst = cpu_reg(s, rn);
+ }
+ break;
+
+ default:
+ goto do_unallocated;
+ }
+ gen_a64_set_pc(s, dst);
+ /* BLR also needs to load return address */
+ if (opc == 1) {
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
+ }
+ break;
+
+ case 8: /* BRAA */
+ case 9: /* BLRAA */
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ if ((op3 & ~1) != 2) {
+ goto do_unallocated;
+ }
+ btype_mod = opc & 1;
+ if (s->pauth_active) {
+ dst = new_tmp_a64(s);
+ modifier = cpu_reg_sp(s, op4);
+ if (op3 == 2) {
+ gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier);
+ } else {
+ gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier);
+ }
+ } else {
+ dst = cpu_reg(s, rn);
+ }
+ gen_a64_set_pc(s, dst);
+ /* BLRAA also needs to load return address */
+ if (opc == 9) {
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next);
+ }
+ break;
+
+ case 4: /* ERET */
+ if (s->current_el == 0) {
+ goto do_unallocated;
+ }
+ switch (op3) {
+ case 0: /* ERET */
+ if (op4 != 0) {
+ goto do_unallocated;
+ }
+ dst = tcg_temp_new_i64();
+ tcg_gen_ld_i64(dst, cpu_env,
+ offsetof(CPUARMState, elr_el[s->current_el]));
+ break;
+
+ case 2: /* ERETAA */
+ case 3: /* ERETAB */
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ if (rn != 0x1f || op4 != 0x1f) {
+ goto do_unallocated;
+ }
+ dst = tcg_temp_new_i64();
+ tcg_gen_ld_i64(dst, cpu_env,
+ offsetof(CPUARMState, elr_el[s->current_el]));
+ if (s->pauth_active) {
+ modifier = cpu_X[31];
+ if (op3 == 2) {
+ gen_helper_autia(dst, cpu_env, dst, modifier);
+ } else {
+ gen_helper_autib(dst, cpu_env, dst, modifier);
+ }
+ }
+ break;
+
+ default:
+ goto do_unallocated;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ gen_helper_exception_return(cpu_env, dst);
+ tcg_temp_free_i64(dst);
+ /* Must exit loop to check un-masked IRQs */
+ s->base.is_jmp = DISAS_EXIT;
+ return;
+
+ case 5: /* DRPS */
+ if (op3 != 0 || op4 != 0 || rn != 0x1f) {
+ goto do_unallocated;
+ } else {
+ unsupported_encoding(s, insn);
+ }
+ return;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (btype_mod) {
+ case 0: /* BR */
+ if (dc_isar_feature(aa64_bti, s)) {
+ /* BR to {x16,x17} or !guard -> 1, else 3. */
+ set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
+ }
+ break;
+
+ case 1: /* BLR */
+ if (dc_isar_feature(aa64_bti, s)) {
+ /* BLR sets BTYPE to 2, regardless of source guarded page. */
+ set_btype(s, 2);
+ }
+ break;
+
+ default: /* RET or none of the above. */
+ /* BTYPE will be set to 0 by normal end-of-insn processing. */
+ break;
+ }
+
+ s->base.is_jmp = DISAS_JUMP;
+}
+
+/* Branches, exception generating and system instructions */
+static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 25, 7)) {
+ case 0x0a: case 0x0b:
+ case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
+ disas_uncond_b_imm(s, insn);
+ break;
+ case 0x1a: case 0x5a: /* Compare & branch (immediate) */
+ disas_comp_b_imm(s, insn);
+ break;
+ case 0x1b: case 0x5b: /* Test & branch (immediate) */
+ disas_test_b_imm(s, insn);
+ break;
+ case 0x2a: /* Conditional branch (immediate) */
+ disas_cond_b_imm(s, insn);
+ break;
+ case 0x6a: /* Exception generation / System */
+ if (insn & (1 << 24)) {
+ if (extract32(insn, 22, 2) == 0) {
+ disas_system(s, insn);
+ } else {
+ unallocated_encoding(s);
+ }
+ } else {
+ disas_exc(s, insn);
+ }
+ break;
+ case 0x6b: /* Unconditional branch (register) */
+ disas_uncond_b_reg(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/*
+ * Load/Store exclusive instructions are implemented by remembering
+ * the value/address loaded, and seeing if these are the same
+ * when the store is performed. This is not actually the architecturally
+ * mandated semantics, but it works for typical guest code sequences
+ * and avoids having to monitor regular stores.
+ *
+ * The store exclusive uses the atomic cmpxchg primitives to avoid
+ * races in multi-threaded linux-user and when MTTCG softmmu is
+ * enabled.
+ */
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv_i64 addr, int size, bool is_pair)
+{
+ int idx = get_mem_index(s);
+ MemOp memop = s->be_data;
+
+ g_assert(size <= 3);
+ if (is_pair) {
+ g_assert(size >= 2);
+ if (size == 2) {
+ /* The pair must be single-copy atomic for the doubleword. */
+ memop |= MO_64 | MO_ALIGN;
+ tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+ if (s->be_data == MO_LE) {
+ tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
+ tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
+ } else {
+ tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
+ tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
+ }
+ } else {
+ /* The pair must be single-copy atomic for *each* doubleword, not
+ the entire quadword, however it must be quadword aligned. */
+ memop |= MO_64;
+ tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
+ memop | MO_ALIGN_16);
+
+ TCGv_i64 addr2 = tcg_temp_new_i64();
+ tcg_gen_addi_i64(addr2, addr, 8);
+ tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
+ tcg_temp_free_i64(addr2);
+
+ tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
+ tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
+ }
+ } else {
+ memop |= size | MO_ALIGN;
+ tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+ tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
+ }
+ tcg_gen_mov_i64(cpu_exclusive_addr, addr);
+}
+
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i64 addr, int size, int is_pair)
+{
+ /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
+ * && (!is_pair || env->exclusive_high == [addr + datasize])) {
+ * [addr] = {Rt};
+ * if (is_pair) {
+ * [addr + datasize] = {Rt2};
+ * }
+ * {Rd} = 0;
+ * } else {
+ * {Rd} = 1;
+ * }
+ * env->exclusive_addr = -1;
+ */
+ TCGLabel *fail_label = gen_new_label();
+ TCGLabel *done_label = gen_new_label();
+ TCGv_i64 tmp;
+
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+
+ tmp = tcg_temp_new_i64();
+ if (is_pair) {
+ if (size == 2) {
+ if (s->be_data == MO_LE) {
+ tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
+ } else {
+ tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
+ }
+ tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
+ cpu_exclusive_val, tmp,
+ get_mem_index(s),
+ MO_64 | MO_ALIGN | s->be_data);
+ tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ if (!HAVE_CMPXCHG128) {
+ gen_helper_exit_atomic(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
+ } else if (s->be_data == MO_LE) {
+ gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
+ cpu_exclusive_addr,
+ cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ }
+ } else if (s->be_data == MO_LE) {
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
+ cpu_reg(s, rt), cpu_reg(s, rt2));
+ }
+ } else {
+ tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
+ cpu_reg(s, rt), get_mem_index(s),
+ size | MO_ALIGN | s->be_data);
+ tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
+ }
+ tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_br(done_label);
+
+ gen_set_label(fail_label);
+ tcg_gen_movi_i64(cpu_reg(s, rd), 1);
+ gen_set_label(done_label);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
+ int rn, int size)
+{
+ TCGv_i64 tcg_rs = cpu_reg(s, rs);
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ int memidx = get_mem_index(s);
+ TCGv_i64 clean_addr;
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
+ tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
+ size | MO_ALIGN | s->be_data);
+}
+
+static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
+ int rn, int size)
+{
+ TCGv_i64 s1 = cpu_reg(s, rs);
+ TCGv_i64 s2 = cpu_reg(s, rs + 1);
+ TCGv_i64 t1 = cpu_reg(s, rt);
+ TCGv_i64 t2 = cpu_reg(s, rt + 1);
+ TCGv_i64 clean_addr;
+ int memidx = get_mem_index(s);
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ /* This is a single atomic access, despite the "pair". */
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
+
+ if (size == 2) {
+ TCGv_i64 cmp = tcg_temp_new_i64();
+ TCGv_i64 val = tcg_temp_new_i64();
+
+ if (s->be_data == MO_LE) {
+ tcg_gen_concat32_i64(val, t1, t2);
+ tcg_gen_concat32_i64(cmp, s1, s2);
+ } else {
+ tcg_gen_concat32_i64(val, t2, t1);
+ tcg_gen_concat32_i64(cmp, s2, s1);
+ }
+
+ tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
+ MO_64 | MO_ALIGN | s->be_data);
+ tcg_temp_free_i64(val);
+
+ if (s->be_data == MO_LE) {
+ tcg_gen_extr32_i64(s1, s2, cmp);
+ } else {
+ tcg_gen_extr32_i64(s2, s1, cmp);
+ }
+ tcg_temp_free_i64(cmp);
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ if (HAVE_CMPXCHG128) {
+ TCGv_i32 tcg_rs = tcg_const_i32(rs);
+ if (s->be_data == MO_LE) {
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs,
+ clean_addr, t1, t2);
+ } else {
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs,
+ clean_addr, t1, t2);
+ }
+ tcg_temp_free_i32(tcg_rs);
+ } else {
+ gen_helper_exit_atomic(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
+ }
+ } else {
+ TCGv_i64 d1 = tcg_temp_new_i64();
+ TCGv_i64 d2 = tcg_temp_new_i64();
+ TCGv_i64 a2 = tcg_temp_new_i64();
+ TCGv_i64 c1 = tcg_temp_new_i64();
+ TCGv_i64 c2 = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+
+ /* Load the two words, in memory order. */
+ tcg_gen_qemu_ld_i64(d1, clean_addr, memidx,
+ MO_64 | MO_ALIGN_16 | s->be_data);
+ tcg_gen_addi_i64(a2, clean_addr, 8);
+ tcg_gen_qemu_ld_i64(d2, a2, memidx, MO_64 | s->be_data);
+
+ /* Compare the two words, also in memory order. */
+ tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
+ tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
+ tcg_gen_and_i64(c2, c2, c1);
+
+ /* If compare equal, write back new data, else write back old data. */
+ tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
+ tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
+ tcg_gen_qemu_st_i64(c1, clean_addr, memidx, MO_64 | s->be_data);
+ tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
+ tcg_temp_free_i64(a2);
+ tcg_temp_free_i64(c1);
+ tcg_temp_free_i64(c2);
+ tcg_temp_free_i64(zero);
+
+ /* Write back the data from memory to Rs. */
+ tcg_gen_mov_i64(s1, d1);
+ tcg_gen_mov_i64(s2, d2);
+ tcg_temp_free_i64(d1);
+ tcg_temp_free_i64(d2);
+ }
+}
+
+/* Update the Sixty-Four bit (SF) registersize. This logic is derived
+ * from the ARMv8 specs for LDR (Shared decode for all encodings).
+ */
+static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
+{
+ int opc0 = extract32(opc, 0, 1);
+ int regsize;
+
+ if (is_signed) {
+ regsize = opc0 ? 32 : 64;
+ } else {
+ regsize = size == 3 ? 64 : 32;
+ }
+ return regsize == 64;
+}
+
+/* Load/store exclusive
+ *
+ * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
+ * +-----+-------------+----+---+----+------+----+-------+------+------+
+ * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
+ * +-----+-------------+----+---+----+------+----+-------+------+------+
+ *
+ * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
+ * L: 0 -> store, 1 -> load
+ * o2: 0 -> exclusive, 1 -> not
+ * o1: 0 -> single register, 1 -> register pair
+ * o0: 1 -> load-acquire/store-release, 0 -> not
+ */
+static void disas_ldst_excl(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rt2 = extract32(insn, 10, 5);
+ int rs = extract32(insn, 16, 5);
+ int is_lasr = extract32(insn, 15, 1);
+ int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
+ int size = extract32(insn, 30, 2);
+ TCGv_i64 clean_addr;
+
+ switch (o2_L_o1_o0) {
+ case 0x0: /* STXR */
+ case 0x1: /* STLXR */
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+ true, rn != 31, size);
+ gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
+ return;
+
+ case 0x4: /* LDXR */
+ case 0x5: /* LDAXR */
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+ false, rn != 31, size);
+ s->is_ldex = true;
+ gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+ return;
+
+ case 0x8: /* STLLR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* StoreLORelease is the same as Store-Release for QEMU. */
+ /* fall through */
+ case 0x9: /* STLR */
+ /* Generate ISS for non-exclusive accesses including LASR. */
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+ true, rn != 31, size);
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
+ disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
+ return;
+
+ case 0xc: /* LDLAR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
+ /* fall through */
+ case 0xd: /* LDAR */
+ /* Generate ISS for non-exclusive accesses including LASR. */
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+ false, rn != 31, size);
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
+ rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ return;
+
+ case 0x2: case 0x3: /* CASP / STXP */
+ if (size & 2) { /* STXP / STLXP */
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+ true, rn != 31, size);
+ gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
+ return;
+ }
+ if (rt2 == 31
+ && ((rt | rs) & 1) == 0
+ && dc_isar_feature(aa64_atomics, s)) {
+ /* CASP / CASPL */
+ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
+ return;
+ }
+ break;
+
+ case 0x6: case 0x7: /* CASPA / LDXP */
+ if (size & 2) { /* LDXP / LDAXP */
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
+ false, rn != 31, size);
+ s->is_ldex = true;
+ gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+ return;
+ }
+ if (rt2 == 31
+ && ((rt | rs) & 1) == 0
+ && dc_isar_feature(aa64_atomics, s)) {
+ /* CASPA / CASPAL */
+ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
+ return;
+ }
+ break;
+
+ case 0xa: /* CAS */
+ case 0xb: /* CASL */
+ case 0xe: /* CASA */
+ case 0xf: /* CASAL */
+ if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
+ gen_compare_and_swap(s, rs, rt, rn, size);
+ return;
+ }
+ break;
+ }
+ unallocated_encoding(s);
+}
+
+/*
+ * Load register (literal)
+ *
+ * 31 30 29 27 26 25 24 23 5 4 0
+ * +-----+-------+---+-----+-------------------+-------+
+ * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
+ * +-----+-------+---+-----+-------------------+-------+
+ *
+ * V: 1 -> vector (simd/fp)
+ * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
+ * 10-> 32 bit signed, 11 -> prefetch
+ * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
+ */
+static void disas_ld_lit(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int64_t imm = sextract32(insn, 5, 19) << 2;
+ bool is_vector = extract32(insn, 26, 1);
+ int opc = extract32(insn, 30, 2);
+ bool is_signed = false;
+ int size = 2;
+ TCGv_i64 tcg_rt, clean_addr;
+
+ if (is_vector) {
+ if (opc == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = 2 + opc;
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (opc == 3) {
+ /* PRFM (literal) : prefetch */
+ return;
+ }
+ size = 2 + extract32(opc, 0, 1);
+ is_signed = extract32(opc, 1, 1);
+ }
+
+ tcg_rt = cpu_reg(s, rt);
+
+ clean_addr = tcg_const_i64(s->pc_curr + imm);
+ if (is_vector) {
+ do_fp_ld(s, rt, clean_addr, size);
+ } else {
+ /* Only unsigned 32bit loads target 32bit registers. */
+ bool iss_sf = opc != 0;
+
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ false, true, rt, iss_sf, false);
+ }
+ tcg_temp_free_i64(clean_addr);
+}
+
+/*
+ * LDNP (Load Pair - non-temporal hint)
+ * LDP (Load Pair - non vector)
+ * LDPSW (Load Pair Signed Word - non vector)
+ * STNP (Store Pair - non-temporal hint)
+ * STP (Store Pair - non vector)
+ * LDNP (Load Pair of SIMD&FP - non-temporal hint)
+ * LDP (Load Pair of SIMD&FP)
+ * STNP (Store Pair of SIMD&FP - non-temporal hint)
+ * STP (Store Pair of SIMD&FP)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
+ * +-----+-------+---+---+-------+---+-----------------------------+
+ * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
+ * +-----+-------+---+---+-------+---+-------+-------+------+------+
+ *
+ * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
+ * LDPSW/STGP 01
+ * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
+ * V: 0 -> GPR, 1 -> Vector
+ * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
+ * 10 -> signed offset, 11 -> pre-index
+ * L: 0 -> Store 1 -> Load
+ *
+ * Rt, Rt2 = GPR or SIMD registers to be stored
+ * Rn = general purpose register containing address
+ * imm7 = signed offset (multiple of 4 or 8 depending on size)
+ */
+static void disas_ldst_pair(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rt2 = extract32(insn, 10, 5);
+ uint64_t offset = sextract64(insn, 15, 7);
+ int index = extract32(insn, 23, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ bool is_load = extract32(insn, 22, 1);
+ int opc = extract32(insn, 30, 2);
+
+ bool is_signed = false;
+ bool postindex = false;
+ bool wback = false;
+ bool set_tag = false;
+
+ TCGv_i64 clean_addr, dirty_addr;
+
+ int size;
+
+ if (opc == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_vector) {
+ size = 2 + opc;
+ } else if (opc == 1 && !is_load) {
+ /* STGP */
+ if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = 3;
+ set_tag = true;
+ } else {
+ size = 2 + extract32(opc, 1, 1);
+ is_signed = extract32(opc, 0, 1);
+ if (!is_load && is_signed) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ switch (index) {
+ case 1: /* post-index */
+ postindex = true;
+ wback = true;
+ break;
+ case 0:
+ /* signed offset with "non-temporal" hint. Since we don't emulate
+ * caches we don't care about hints to the cache system about
+ * data access patterns, and handle this identically to plain
+ * signed offset.
+ */
+ if (is_signed) {
+ /* There is no non-temporal-hint version of LDPSW */
+ unallocated_encoding(s);
+ return;
+ }
+ postindex = false;
+ break;
+ case 2: /* signed offset, rn not updated */
+ postindex = false;
+ break;
+ case 3: /* pre-index */
+ postindex = false;
+ wback = true;
+ break;
+ }
+
+ if (is_vector && !fp_access_check(s)) {
+ return;
+ }
+
+ offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
+ if (!postindex) {
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+ }
+
+ if (set_tag) {
+ if (!s->ata) {
+ /*
+ * TODO: We could rely on the stores below, at least for
+ * system mode, if we arrange to add MO_ALIGN_16.
+ */
+ gen_helper_stg_stub(cpu_env, dirty_addr);
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
+ } else {
+ gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
+ }
+ }
+
+ clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
+ (wback || rn != 31) && !set_tag, 2 << size);
+
+ if (is_vector) {
+ if (is_load) {
+ do_fp_ld(s, rt, clean_addr, size);
+ } else {
+ do_fp_st(s, rt, clean_addr, size);
+ }
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
+ if (is_load) {
+ do_fp_ld(s, rt2, clean_addr, size);
+ } else {
+ do_fp_st(s, rt2, clean_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
+
+ if (is_load) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* Do not modify tcg_rt before recognizing any exception
+ * from the second load.
+ */
+ do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
+ false, false, 0, false, false);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
+ do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
+ false, false, 0, false, false);
+
+ tcg_gen_mov_i64(tcg_rt, tmp);
+ tcg_temp_free_i64(tmp);
+ } else {
+ do_gpr_st(s, tcg_rt, clean_addr, size,
+ false, 0, false, false);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
+ do_gpr_st(s, tcg_rt2, clean_addr, size,
+ false, 0, false, false);
+ }
+ }
+
+ if (wback) {
+ if (postindex) {
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+ }
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
+ }
+}
+
+/*
+ * Load/store (immediate post-indexed)
+ * Load/store (immediate pre-indexed)
+ * Load/store (unscaled immediate)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
+ * +----+-------+---+-----+-----+---+--------+-----+------+------+
+ * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
+ * +----+-------+---+-----+-----+---+--------+-----+------+------+
+ *
+ * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
+ 10 -> unprivileged
+ * V = 0 -> non-vector
+ * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ */
+static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ int imm9 = sextract32(insn, 12, 9);
+ int idx = extract32(insn, 10, 2);
+ bool is_signed = false;
+ bool is_store = false;
+ bool is_extended = false;
+ bool is_unpriv = (idx == 2);
+ bool iss_valid = !is_vector;
+ bool post_index;
+ bool writeback;
+ int memidx;
+
+ TCGv_i64 clean_addr, dirty_addr;
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4 || is_unpriv) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = ((opc & 1) == 0);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ if (idx != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ switch (idx) {
+ case 0:
+ case 2:
+ post_index = false;
+ writeback = false;
+ break;
+ case 1:
+ post_index = true;
+ writeback = true;
+ break;
+ case 3:
+ post_index = false;
+ writeback = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
+ if (!post_index) {
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
+ }
+
+ memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
+ clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
+ writeback || rn != 31,
+ size, is_unpriv, memidx);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, clean_addr, size);
+ } else {
+ do_fp_ld(s, rt, clean_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+
+ if (is_store) {
+ do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
+ iss_valid, rt, iss_sf, false);
+ } else {
+ do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ is_extended, memidx,
+ iss_valid, rt, iss_sf, false);
+ }
+ }
+
+ if (writeback) {
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
+ if (post_index) {
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
+ }
+ tcg_gen_mov_i64(tcg_rn, dirty_addr);
+ }
+}
+
+/*
+ * Load/store (register offset)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
+ * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
+ * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
+ *
+ * For non-vector:
+ * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ * For vector:
+ * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
+ * opc<0>: 0 -> store, 1 -> load
+ * V: 1 -> vector/simd
+ * opt: extend encoding (see DecodeRegExtend)
+ * S: if S=1 then scale (essentially index by sizeof(size))
+ * Rt: register to transfer into/out of
+ * Rn: address register or SP for base
+ * Rm: offset register or ZR for offset
+ */
+static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ int shift = extract32(insn, 12, 1);
+ int rm = extract32(insn, 16, 5);
+ int opt = extract32(insn, 13, 3);
+ bool is_signed = false;
+ bool is_store = false;
+ bool is_extended = false;
+
+ TCGv_i64 tcg_rm, clean_addr, dirty_addr;
+
+ if (extract32(opt, 1, 1) == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
+
+ tcg_rm = read_cpu_reg(s, rm, 1);
+ ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
+
+ tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
+ clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, clean_addr, size);
+ } else {
+ do_fp_ld(s, rt, clean_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, clean_addr, size,
+ true, rt, iss_sf, false);
+ } else {
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ is_extended, true, rt, iss_sf, false);
+ }
+ }
+}
+
+/*
+ * Load/store (unsigned immediate)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 10 9 5
+ * +----+-------+---+-----+-----+------------+-------+------+
+ * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
+ * +----+-------+---+-----+-----+------------+-------+------+
+ *
+ * For non-vector:
+ * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ * For vector:
+ * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
+ * opc<0>: 0 -> store, 1 -> load
+ * Rn: base address register (inc SP)
+ * Rt: target register
+ */
+static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ unsigned int imm12 = extract32(insn, 10, 12);
+ unsigned int offset;
+
+ TCGv_i64 clean_addr, dirty_addr;
+
+ bool is_store;
+ bool is_signed = false;
+ bool is_extended = false;
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
+ offset = imm12 << size;
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+ clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, clean_addr, size);
+ } else {
+ do_fp_ld(s, rt, clean_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, clean_addr, size,
+ true, rt, iss_sf, false);
+ } else {
+ do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
+ is_extended, true, rt, iss_sf, false);
+ }
+ }
+}
+
+/* Atomic memory operations
+ *
+ * 31 30 27 26 24 22 21 16 15 12 10 5 0
+ * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
+ * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
+ * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
+ *
+ * Rt: the result register
+ * Rn: base address or SP
+ * Rs: the source register for the operation
+ * V: vector flag (always 0 as of v8.3)
+ * A: acquire flag
+ * R: release flag
+ */
+static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
+ int size, int rt, bool is_vector)
+{
+ int rs = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int o3_opc = extract32(insn, 12, 4);
+ bool r = extract32(insn, 22, 1);
+ bool a = extract32(insn, 23, 1);
+ TCGv_i64 tcg_rs, tcg_rt, clean_addr;
+ AtomicThreeOpFn *fn = NULL;
+ MemOp mop = s->be_data | size | MO_ALIGN;
+
+ if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ switch (o3_opc) {
+ case 000: /* LDADD */
+ fn = tcg_gen_atomic_fetch_add_i64;
+ break;
+ case 001: /* LDCLR */
+ fn = tcg_gen_atomic_fetch_and_i64;
+ break;
+ case 002: /* LDEOR */
+ fn = tcg_gen_atomic_fetch_xor_i64;
+ break;
+ case 003: /* LDSET */
+ fn = tcg_gen_atomic_fetch_or_i64;
+ break;
+ case 004: /* LDSMAX */
+ fn = tcg_gen_atomic_fetch_smax_i64;
+ mop |= MO_SIGN;
+ break;
+ case 005: /* LDSMIN */
+ fn = tcg_gen_atomic_fetch_smin_i64;
+ mop |= MO_SIGN;
+ break;
+ case 006: /* LDUMAX */
+ fn = tcg_gen_atomic_fetch_umax_i64;
+ break;
+ case 007: /* LDUMIN */
+ fn = tcg_gen_atomic_fetch_umin_i64;
+ break;
+ case 010: /* SWP */
+ fn = tcg_gen_atomic_xchg_i64;
+ break;
+ case 014: /* LDAPR, LDAPRH, LDAPRB */
+ if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
+ rs != 31 || a != 1 || r != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
+
+ if (o3_opc == 014) {
+ /*
+ * LDAPR* are a special case because they are a simple load, not a
+ * fetch-and-do-something op.
+ * The architectural consistency requirements here are weaker than
+ * full load-acquire (we only need "load-acquire processor consistent"),
+ * but we choose to implement them as full LDAQ.
+ */
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
+ true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ return;
+ }
+
+ tcg_rs = read_cpu_reg(s, rs, true);
+ tcg_rt = cpu_reg(s, rt);
+
+ if (o3_opc == 1) { /* LDCLR */
+ tcg_gen_not_i64(tcg_rs, tcg_rs);
+ }
+
+ /* The tcg atomic primitives are all full barriers. Therefore we
+ * can ignore the Acquire and Release bits of this instruction.
+ */
+ fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
+
+ if ((mop & MO_SIGN) && size != MO_64) {
+ tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
+ }
+}
+
+/*
+ * PAC memory operations
+ *
+ * 31 30 27 26 24 22 21 12 11 10 5 0
+ * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
+ * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt |
+ * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
+ *
+ * Rt: the result register
+ * Rn: base address or SP
+ * V: vector flag (always 0 as of v8.3)
+ * M: clear for key DA, set for key DB
+ * W: pre-indexing flag
+ * S: sign for imm9.
+ */
+static void disas_ldst_pac(DisasContext *s, uint32_t insn,
+ int size, int rt, bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ bool is_wback = extract32(insn, 11, 1);
+ bool use_key_a = !extract32(insn, 23, 1);
+ int offset;
+ TCGv_i64 clean_addr, dirty_addr, tcg_rt;
+
+ if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
+
+ if (s->pauth_active) {
+ if (use_key_a) {
+ gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
+ new_tmp_a64_zero(s));
+ } else {
+ gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
+ new_tmp_a64_zero(s));
+ }
+ }
+
+ /* Form the 10-bit signed, scaled offset. */
+ offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
+ offset = sextract32(offset << size, 0, 10 + size);
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+
+ /* Note that "clean" and "dirty" here refer to TBI not PAC. */
+ clean_addr = gen_mte_check1(s, dirty_addr, false,
+ is_wback || rn != 31, size);
+
+ tcg_rt = cpu_reg(s, rt);
+ do_gpr_ld(s, tcg_rt, clean_addr, size,
+ /* extend */ false, /* iss_valid */ !is_wback,
+ /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
+
+ if (is_wback) {
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
+ }
+}
+
+/*
+ * LDAPR/STLR (unscaled immediate)
+ *
+ * 31 30 24 22 21 12 10 5 0
+ * +------+-------------+-----+---+--------+-----+----+-----+
+ * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt |
+ * +------+-------------+-----+---+--------+-----+----+-----+
+ *
+ * Rt: source or destination register
+ * Rn: base register
+ * imm9: unscaled immediate offset
+ * opc: 00: STLUR*, 01/10/11: various LDAPUR*
+ * size: size of load/store
+ */
+static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int offset = sextract32(insn, 12, 9);
+ int opc = extract32(insn, 22, 2);
+ int size = extract32(insn, 30, 2);
+ TCGv_i64 clean_addr, dirty_addr;
+ bool is_store = false;
+ bool extend = false;
+ bool iss_sf;
+ MemOp mop;
+
+ if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* TODO: ARMv8.4-LSE SCTLR.nAA */
+ mop = size | MO_ALIGN;
+
+ switch (opc) {
+ case 0: /* STLURB */
+ is_store = true;
+ break;
+ case 1: /* LDAPUR* */
+ break;
+ case 2: /* LDAPURS* 64-bit variant */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ mop |= MO_SIGN;
+ break;
+ case 3: /* LDAPURS* 32-bit variant */
+ if (size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ mop |= MO_SIGN;
+ extend = true; /* zero-extend 32->64 after signed load */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ dirty_addr = read_cpu_reg_sp(s, rn, 1);
+ tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
+ clean_addr = clean_data_tbi(s, dirty_addr);
+
+ if (is_store) {
+ /* Store-Release semantics */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
+ } else {
+ /*
+ * Load-AcquirePC semantics; we implement as the slightly more
+ * restrictive Load-Acquire.
+ */
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
+ extend, true, rt, iss_sf, true);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+}
+
+/* Load/store register (all forms) */
+static void disas_ldst_reg(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int opc = extract32(insn, 22, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ int size = extract32(insn, 30, 2);
+
+ switch (extract32(insn, 24, 2)) {
+ case 0:
+ if (extract32(insn, 21, 1) == 0) {
+ /* Load/store register (unscaled immediate)
+ * Load/store immediate pre/post-indexed
+ * Load/store register unprivileged
+ */
+ disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
+ return;
+ }
+ switch (extract32(insn, 10, 2)) {
+ case 0:
+ disas_ldst_atomic(s, insn, size, rt, is_vector);
+ return;
+ case 2:
+ disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
+ return;
+ default:
+ disas_ldst_pac(s, insn, size, rt, is_vector);
+ return;
+ }
+ break;
+ case 1:
+ disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
+ return;
+ }
+ unallocated_encoding(s);
+}
+
+/* AdvSIMD load/store multiple structures
+ *
+ * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
+ * +---+---+---------------+---+-------------+--------+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
+ * +---+---+---------------+---+-------------+--------+------+------+------+
+ *
+ * AdvSIMD load/store multiple structures (post-indexed)
+ *
+ * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---------------+---+---+---------+--------+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
+ * +---+---+---------------+---+---+---------+--------+------+------+------+
+ *
+ * Rt: first (or only) SIMD&FP register to be transferred
+ * Rn: base address or SP
+ * Rm (post-index only): post-index register (when !31) or size dependent #imm
+ */
+static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 10, 2);
+ int opcode = extract32(insn, 12, 4);
+ bool is_store = !extract32(insn, 22, 1);
+ bool is_postidx = extract32(insn, 23, 1);
+ bool is_q = extract32(insn, 30, 1);
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
+ MemOp endian, align, mop;
+
+ int total; /* total bytes */
+ int elements; /* elements per vector */
+ int rpt; /* num iterations */
+ int selem; /* structure elements */
+ int r;
+
+ if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!is_postidx && rm != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* From the shared decode logic */
+ switch (opcode) {
+ case 0x0:
+ rpt = 1;
+ selem = 4;
+ break;
+ case 0x2:
+ rpt = 4;
+ selem = 1;
+ break;
+ case 0x4:
+ rpt = 1;
+ selem = 3;
+ break;
+ case 0x6:
+ rpt = 3;
+ selem = 1;
+ break;
+ case 0x7:
+ rpt = 1;
+ selem = 1;
+ break;
+ case 0x8:
+ rpt = 1;
+ selem = 2;
+ break;
+ case 0xa:
+ rpt = 2;
+ selem = 1;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 3 && !is_q && selem != 1) {
+ /* reserved */
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ /* For our purposes, bytes are always little-endian. */
+ endian = s->be_data;
+ if (size == 0) {
+ endian = MO_LE;
+ }
+
+ total = rpt * selem * (is_q ? 16 : 8);
+ tcg_rn = cpu_reg_sp(s, rn);
+
+ /*
+ * Issue the MTE check vs the logical repeat count, before we
+ * promote consecutive little-endian elements below.
+ */
+ clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
+ total);
+
+ /*
+ * Consecutive little-endian elements from a single register
+ * can be promoted to a larger little-endian operation.
+ */
+ align = MO_ALIGN;
+ if (selem == 1 && endian == MO_LE) {
+ align = pow2_align(size);
+ size = 3;
+ }
+ if (!s->align_mem) {
+ align = 0;
+ }
+ mop = endian | size | align;
+
+ elements = (is_q ? 16 : 8) >> size;
+ tcg_ebytes = tcg_const_i64(1 << size);
+ for (r = 0; r < rpt; r++) {
+ int e;
+ for (e = 0; e < elements; e++) {
+ int xs;
+ for (xs = 0; xs < selem; xs++) {
+ int tt = (rt + r + xs) % 32;
+ if (is_store) {
+ do_vec_st(s, tt, e, clean_addr, mop);
+ } else {
+ do_vec_ld(s, tt, e, clean_addr, mop);
+ }
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
+ }
+ }
+ }
+ tcg_temp_free_i64(tcg_ebytes);
+
+ if (!is_store) {
+ /* For non-quad operations, setting a slice of the low
+ * 64 bits of the register clears the high 64 bits (in
+ * the ARM ARM pseudocode this is implicit in the fact
+ * that 'rval' is a 64 bit wide variable).
+ * For quad operations, we might still need to zero the
+ * high bits of SVE.
+ */
+ for (r = 0; r < rpt * selem; r++) {
+ int tt = (rt + r) % 32;
+ clear_vec_high(s, is_q, tt);
+ }
+ }
+
+ if (is_postidx) {
+ if (rm == 31) {
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
+ } else {
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
+ }
+ }
+}
+
+/* AdvSIMD load/store single structure
+ *
+ * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ *
+ * AdvSIMD load/store single structure (post-indexed)
+ *
+ * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ *
+ * Rt: first (or only) SIMD&FP register to be transferred
+ * Rn: base address or SP
+ * Rm (post-index only): post-index register (when !31) or size dependent #imm
+ * index = encoded in Q:S:size dependent on size
+ *
+ * lane_size = encoded in R, opc
+ * transfer width = encoded in opc, S, size
+ */
+static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 10, 2);
+ int S = extract32(insn, 12, 1);
+ int opc = extract32(insn, 13, 3);
+ int R = extract32(insn, 21, 1);
+ int is_load = extract32(insn, 22, 1);
+ int is_postidx = extract32(insn, 23, 1);
+ int is_q = extract32(insn, 30, 1);
+
+ int scale = extract32(opc, 1, 2);
+ int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
+ bool replicate = false;
+ int index = is_q << 3 | S << 2 | size;
+ int xs, total;
+ TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
+ MemOp mop;
+
+ if (extract32(insn, 31, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!is_postidx && rm != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (scale) {
+ case 3:
+ if (!is_load || S) {
+ unallocated_encoding(s);
+ return;
+ }
+ scale = size;
+ replicate = true;
+ break;
+ case 0:
+ break;
+ case 1:
+ if (extract32(size, 0, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ index >>= 1;
+ break;
+ case 2:
+ if (extract32(size, 1, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!extract32(size, 0, 1)) {
+ index >>= 2;
+ } else {
+ if (S) {
+ unallocated_encoding(s);
+ return;
+ }
+ index >>= 3;
+ scale = 3;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ total = selem << scale;
+ tcg_rn = cpu_reg_sp(s, rn);
+
+ clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
+ total);
+ mop = finalize_memop(s, scale);
+
+ tcg_ebytes = tcg_const_i64(1 << scale);
+ for (xs = 0; xs < selem; xs++) {
+ if (replicate) {
+ /* Load and replicate to all elements */
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
+ tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
+ (is_q + 1) * 8, vec_full_reg_size(s),
+ tcg_tmp);
+ tcg_temp_free_i64(tcg_tmp);
+ } else {
+ /* Load/store one element per register */
+ if (is_load) {
+ do_vec_ld(s, rt, index, clean_addr, mop);
+ } else {
+ do_vec_st(s, rt, index, clean_addr, mop);
+ }
+ }
+ tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
+ rt = (rt + 1) % 32;
+ }
+ tcg_temp_free_i64(tcg_ebytes);
+
+ if (is_postidx) {
+ if (rm == 31) {
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
+ } else {
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
+ }
+ }
+}
+
+/*
+ * Load/Store memory tags
+ *
+ * 31 30 29 24 22 21 12 10 5 0
+ * +-----+-------------+-----+---+------+-----+------+------+
+ * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt |
+ * +-----+-------------+-----+---+------+-----+------+------+
+ */
+static void disas_ldst_tag(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
+ int op2 = extract32(insn, 10, 2);
+ int op1 = extract32(insn, 22, 2);
+ bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
+ int index = 0;
+ TCGv_i64 addr, clean_addr, tcg_rt;
+
+ /* We checked insn bits [29:24,21] in the caller. */
+ if (extract32(insn, 30, 2) != 3) {
+ goto do_unallocated;
+ }
+
+ /*
+ * @index is a tri-state variable which has 3 states:
+ * < 0 : post-index, writeback
+ * = 0 : signed offset
+ * > 0 : pre-index, writeback
+ */
+ switch (op1) {
+ case 0:
+ if (op2 != 0) {
+ /* STG */
+ index = op2 - 2;
+ } else {
+ /* STZGM */
+ if (s->current_el == 0 || offset != 0) {
+ goto do_unallocated;
+ }
+ is_mult = is_zero = true;
+ }
+ break;
+ case 1:
+ if (op2 != 0) {
+ /* STZG */
+ is_zero = true;
+ index = op2 - 2;
+ } else {
+ /* LDG */
+ is_load = true;
+ }
+ break;
+ case 2:
+ if (op2 != 0) {
+ /* ST2G */
+ is_pair = true;
+ index = op2 - 2;
+ } else {
+ /* STGM */
+ if (s->current_el == 0 || offset != 0) {
+ goto do_unallocated;
+ }
+ is_mult = true;
+ }
+ break;
+ case 3:
+ if (op2 != 0) {
+ /* STZ2G */
+ is_pair = is_zero = true;
+ index = op2 - 2;
+ } else {
+ /* LDGM */
+ if (s->current_el == 0 || offset != 0) {
+ goto do_unallocated;
+ }
+ is_mult = is_load = true;
+ }
+ break;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_mult
+ ? !dc_isar_feature(aa64_mte, s)
+ : !dc_isar_feature(aa64_mte_insn_reg, s)) {
+ goto do_unallocated;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ addr = read_cpu_reg_sp(s, rn, true);
+ if (index >= 0) {
+ /* pre-index or signed offset */
+ tcg_gen_addi_i64(addr, addr, offset);
+ }
+
+ if (is_mult) {
+ tcg_rt = cpu_reg(s, rt);
+
+ if (is_zero) {
+ int size = 4 << s->dcz_blocksize;
+
+ if (s->ata) {
+ gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
+ }
+ /*
+ * The non-tags portion of STZGM is mostly like DC_ZVA,
+ * except the alignment happens before the access.
+ */
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_helper_dc_zva(cpu_env, clean_addr);
+ } else if (s->ata) {
+ if (is_load) {
+ gen_helper_ldgm(tcg_rt, cpu_env, addr);
+ } else {
+ gen_helper_stgm(cpu_env, addr, tcg_rt);
+ }
+ } else {
+ MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
+ int size = 4 << GMID_EL1_BS;
+
+ clean_addr = clean_data_tbi(s, addr);
+ tcg_gen_andi_i64(clean_addr, clean_addr, -size);
+ gen_probe_access(s, clean_addr, acc, size);
+
+ if (is_load) {
+ /* The result tags are zeros. */
+ tcg_gen_movi_i64(tcg_rt, 0);
+ }
+ }
+ return;
+ }
+
+ if (is_load) {
+ tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
+ tcg_rt = cpu_reg(s, rt);
+ if (s->ata) {
+ gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
+ } else {
+ clean_addr = clean_data_tbi(s, addr);
+ gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
+ gen_address_with_allocation_tag0(tcg_rt, addr);
+ }
+ } else {
+ tcg_rt = cpu_reg_sp(s, rt);
+ if (!s->ata) {
+ /*
+ * For STG and ST2G, we need to check alignment and probe memory.
+ * TODO: For STZG and STZ2G, we could rely on the stores below,
+ * at least for system mode; user-only won't enforce alignment.
+ */
+ if (is_pair) {
+ gen_helper_st2g_stub(cpu_env, addr);
+ } else {
+ gen_helper_stg_stub(cpu_env, addr);
+ }
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ if (is_pair) {
+ gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
+ } else {
+ gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
+ }
+ } else {
+ if (is_pair) {
+ gen_helper_st2g(cpu_env, addr, tcg_rt);
+ } else {
+ gen_helper_stg(cpu_env, addr, tcg_rt);
+ }
+ }
+ }
+
+ if (is_zero) {
+ TCGv_i64 clean_addr = clean_data_tbi(s, addr);
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ int mem_index = get_mem_index(s);
+ int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
+
+ tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
+ MO_Q | MO_ALIGN_16);
+ for (i = 8; i < n; i += 8) {
+ tcg_gen_addi_i64(clean_addr, clean_addr, 8);
+ tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q);
+ }
+ tcg_temp_free_i64(tcg_zero);
+ }
+
+ if (index != 0) {
+ /* pre-index or post-index */
+ if (index < 0) {
+ /* post-index */
+ tcg_gen_addi_i64(addr, addr, offset);
+ }
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
+ }
+}
+
+/* Loads and stores */
+static void disas_ldst(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 24, 6)) {
+ case 0x08: /* Load/store exclusive */
+ disas_ldst_excl(s, insn);
+ break;
+ case 0x18: case 0x1c: /* Load register (literal) */
+ disas_ld_lit(s, insn);
+ break;
+ case 0x28: case 0x29:
+ case 0x2c: case 0x2d: /* Load/store pair (all forms) */
+ disas_ldst_pair(s, insn);
+ break;
+ case 0x38: case 0x39:
+ case 0x3c: case 0x3d: /* Load/store register (all forms) */
+ disas_ldst_reg(s, insn);
+ break;
+ case 0x0c: /* AdvSIMD load/store multiple structures */
+ disas_ldst_multiple_struct(s, insn);
+ break;
+ case 0x0d: /* AdvSIMD load/store single structure */
+ disas_ldst_single_struct(s, insn);
+ break;
+ case 0x19:
+ if (extract32(insn, 21, 1) != 0) {
+ disas_ldst_tag(s, insn);
+ } else if (extract32(insn, 10, 2) == 0) {
+ disas_ldst_ldapr_stlr(s, insn);
+ } else {
+ unallocated_encoding(s);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* PC-rel. addressing
+ * 31 30 29 28 24 23 5 4 0
+ * +----+-------+-----------+-------------------+------+
+ * | op | immlo | 1 0 0 0 0 | immhi | Rd |
+ * +----+-------+-----------+-------------------+------+
+ */
+static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
+{
+ unsigned int page, rd;
+ uint64_t base;
+ uint64_t offset;
+
+ page = extract32(insn, 31, 1);
+ /* SignExtend(immhi:immlo) -> offset */
+ offset = sextract64(insn, 5, 19);
+ offset = offset << 2 | extract32(insn, 29, 2);
+ rd = extract32(insn, 0, 5);
+ base = s->pc_curr;
+
+ if (page) {
+ /* ADRP (page based) */
+ base &= ~0xfff;
+ offset <<= 12;
+ }
+
+ tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
+}
+
+/*
+ * Add/subtract (immediate)
+ *
+ * 31 30 29 28 23 22 21 10 9 5 4 0
+ * +--+--+--+-------------+--+-------------+-----+-----+
+ * |sf|op| S| 1 0 0 0 1 0 |sh| imm12 | Rn | Rd |
+ * +--+--+--+-------------+--+-------------+-----+-----+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * sh: 1 -> LSL imm by 12
+ */
+static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ uint64_t imm = extract32(insn, 10, 12);
+ bool shift = extract32(insn, 22, 1);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool is_64bit = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
+ TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
+ TCGv_i64 tcg_result;
+
+ if (shift) {
+ imm <<= 12;
+ }
+
+ tcg_result = tcg_temp_new_i64();
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
+ } else {
+ tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
+ }
+ } else {
+ TCGv_i64 tcg_imm = tcg_const_i64(imm);
+ if (sub_op) {
+ gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
+ } else {
+ gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
+ }
+ tcg_temp_free_i64(tcg_imm);
+ }
+
+ if (is_64bit) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
+}
+
+/*
+ * Add/subtract (immediate, with tags)
+ *
+ * 31 30 29 28 23 22 21 16 14 10 9 5 4 0
+ * +--+--+--+-------------+--+---------+--+-------+-----+-----+
+ * |sf|op| S| 1 0 0 0 1 1 |o2| uimm6 |o3| uimm4 | Rn | Rd |
+ * +--+--+--+-------------+--+---------+--+-------+-----+-----+
+ *
+ * op: 0 -> add, 1 -> sub
+ */
+static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int uimm4 = extract32(insn, 10, 4);
+ int uimm6 = extract32(insn, 16, 6);
+ bool sub_op = extract32(insn, 30, 1);
+ TCGv_i64 tcg_rn, tcg_rd;
+ int imm;
+
+ /* Test all of sf=1, S=0, o2=0, o3=0. */
+ if ((insn & 0xa040c000u) != 0x80000000u ||
+ !dc_isar_feature(aa64_mte_insn_reg, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ imm = uimm6 << LOG2_TAG_GRANULE;
+ if (sub_op) {
+ imm = -imm;
+ }
+
+ tcg_rn = cpu_reg_sp(s, rn);
+ tcg_rd = cpu_reg_sp(s, rd);
+
+ if (s->ata) {
+ TCGv_i32 offset = tcg_const_i32(imm);
+ TCGv_i32 tag_offset = tcg_const_i32(uimm4);
+
+ gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, offset, tag_offset);
+ tcg_temp_free_i32(tag_offset);
+ tcg_temp_free_i32(offset);
+ } else {
+ tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
+ gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
+ }
+}
+
+/* The input should be a value in the bottom e bits (with higher
+ * bits zero); returns that value replicated into every element
+ * of size e in a 64 bit integer.
+ */
+static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
+{
+ assert(e != 0);
+ while (e < 64) {
+ mask |= mask << e;
+ e *= 2;
+ }
+ return mask;
+}
+
+/* Return a value with the bottom len bits set (where 0 < len <= 64) */
+static inline uint64_t bitmask64(unsigned int length)
+{
+ assert(length > 0 && length <= 64);
+ return ~0ULL >> (64 - length);
+}
+
+/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
+ * only require the wmask. Returns false if the imms/immr/immn are a reserved
+ * value (ie should cause a guest UNDEF exception), and true if they are
+ * valid, in which case the decoded bit pattern is written to result.
+ */
+bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
+ unsigned int imms, unsigned int immr)
+{
+ uint64_t mask;
+ unsigned e, levels, s, r;
+ int len;
+
+ assert(immn < 2 && imms < 64 && immr < 64);
+
+ /* The bit patterns we create here are 64 bit patterns which
+ * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
+ * 64 bits each. Each element contains the same value: a run
+ * of between 1 and e-1 non-zero bits, rotated within the
+ * element by between 0 and e-1 bits.
+ *
+ * The element size and run length are encoded into immn (1 bit)
+ * and imms (6 bits) as follows:
+ * 64 bit elements: immn = 1, imms = <length of run - 1>
+ * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
+ * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
+ * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
+ * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
+ * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
+ * Notice that immn = 0, imms = 11111x is the only combination
+ * not covered by one of the above options; this is reserved.
+ * Further, <length of run - 1> all-ones is a reserved pattern.
+ *
+ * In all cases the rotation is by immr % e (and immr is 6 bits).
+ */
+
+ /* First determine the element size */
+ len = 31 - clz32((immn << 6) | (~imms & 0x3f));
+ if (len < 1) {
+ /* This is the immn == 0, imms == 0x11111x case */
+ return false;
+ }
+ e = 1 << len;
+
+ levels = e - 1;
+ s = imms & levels;
+ r = immr & levels;
+
+ if (s == levels) {
+ /* <length of run - 1> mustn't be all-ones. */
+ return false;
+ }
+
+ /* Create the value of one element: s+1 set bits rotated
+ * by r within the element (which is e bits wide)...
+ */
+ mask = bitmask64(s + 1);
+ if (r) {
+ mask = (mask >> r) | (mask << (e - r));
+ mask &= bitmask64(e);
+ }
+ /* ...then replicate the element over the whole 64 bit value */
+ mask = bitfield_replicate(mask, e);
+ *result = mask;
+ return true;
+}
+
+/* Logical (immediate)
+ * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
+ * +----+-----+-------------+---+------+------+------+------+
+ * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
+ * +----+-----+-------------+---+------+------+------+------+
+ */
+static void disas_logic_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, opc, is_n, immr, imms, rn, rd;
+ TCGv_i64 tcg_rd, tcg_rn;
+ uint64_t wmask;
+ bool is_and = false;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ is_n = extract32(insn, 22, 1);
+ immr = extract32(insn, 16, 6);
+ imms = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (!sf && is_n) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (opc == 0x3) { /* ANDS */
+ tcg_rd = cpu_reg(s, rd);
+ } else {
+ tcg_rd = cpu_reg_sp(s, rd);
+ }
+ tcg_rn = cpu_reg(s, rn);
+
+ if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
+ /* some immediate field values are reserved */
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!sf) {
+ wmask &= 0xffffffff;
+ }
+
+ switch (opc) {
+ case 0x3: /* ANDS */
+ case 0x0: /* AND */
+ tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
+ is_and = true;
+ break;
+ case 0x1: /* ORR */
+ tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
+ break;
+ case 0x2: /* EOR */
+ tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
+ break;
+ default:
+ assert(FALSE); /* must handle all above */
+ break;
+ }
+
+ if (!sf && !is_and) {
+ /* zero extend final result; we know we can skip this for AND
+ * since the immediate had the high 32 bits clear.
+ */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+
+ if (opc == 3) { /* ANDS */
+ gen_logic_CC(sf, tcg_rd);
+ }
+}
+
+/*
+ * Move wide (immediate)
+ *
+ * 31 30 29 28 23 22 21 20 5 4 0
+ * +--+-----+-------------+-----+----------------+------+
+ * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
+ * +--+-----+-------------+-----+----------------+------+
+ *
+ * sf: 0 -> 32 bit, 1 -> 64 bit
+ * opc: 00 -> N, 10 -> Z, 11 -> K
+ * hw: shift/16 (0,16, and sf only 32, 48)
+ */
+static void disas_movw_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ uint64_t imm = extract32(insn, 5, 16);
+ int sf = extract32(insn, 31, 1);
+ int opc = extract32(insn, 29, 2);
+ int pos = extract32(insn, 21, 2) << 4;
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_imm;
+
+ if (!sf && (pos >= 32)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opc) {
+ case 0: /* MOVN */
+ case 2: /* MOVZ */
+ imm <<= pos;
+ if (opc == 0) {
+ imm = ~imm;
+ }
+ if (!sf) {
+ imm &= 0xffffffffu;
+ }
+ tcg_gen_movi_i64(tcg_rd, imm);
+ break;
+ case 3: /* MOVK */
+ tcg_imm = tcg_const_i64(imm);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
+ tcg_temp_free_i64(tcg_imm);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Bitfield
+ * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
+ * +----+-----+-------------+---+------+------+------+------+
+ * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
+ * +----+-----+-------------+---+------+------+------+------+
+ */
+static void disas_bitfield(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
+ TCGv_i64 tcg_rd, tcg_tmp;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ n = extract32(insn, 22, 1);
+ ri = extract32(insn, 16, 6);
+ si = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+ bitsize = sf ? 64 : 32;
+
+ if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+
+ /* Suppress the zero-extend for !sf. Since RI and SI are constrained
+ to be smaller than bitsize, we'll never reference data outside the
+ low 32-bits anyway. */
+ tcg_tmp = read_cpu_reg(s, rn, 1);
+
+ /* Recognize simple(r) extractions. */
+ if (si >= ri) {
+ /* Wd<s-r:0> = Wn<s:r> */
+ len = (si - ri) + 1;
+ if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
+ tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
+ goto done;
+ } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
+ tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
+ return;
+ }
+ /* opc == 1, BFXIL fall through to deposit */
+ tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
+ pos = 0;
+ } else {
+ /* Handle the ri > si case with a deposit
+ * Wd<32+s-r,32-r> = Wn<s:0>
+ */
+ len = si + 1;
+ pos = (bitsize - ri) & (bitsize - 1);
+ }
+
+ if (opc == 0 && len < ri) {
+ /* SBFM: sign extend the destination field from len to fill
+ the balance of the word. Let the deposit below insert all
+ of those sign bits. */
+ tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
+ len = ri;
+ }
+
+ if (opc == 1) { /* BFM, BFXIL */
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+ } else {
+ /* SBFM or UBFM: We start with zero, and we haven't modified
+ any bits outside bitsize, therefore the zero-extension
+ below is unneeded. */
+ tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
+ return;
+ }
+
+ done:
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* Extract
+ * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
+ * +----+------+-------------+---+----+------+--------+------+------+
+ * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
+ * +----+------+-------------+---+----+------+--------+------+------+
+ */
+static void disas_extract(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
+
+ sf = extract32(insn, 31, 1);
+ n = extract32(insn, 22, 1);
+ rm = extract32(insn, 16, 5);
+ imm = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+ op21 = extract32(insn, 29, 2);
+ op0 = extract32(insn, 21, 1);
+ bitsize = sf ? 64 : 32;
+
+ if (sf != n || op21 || op0 || imm >= bitsize) {
+ unallocated_encoding(s);
+ } else {
+ TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (unlikely(imm == 0)) {
+ /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
+ * so an extract from bit 0 is a special case.
+ */
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
+ }
+ } else {
+ tcg_rm = cpu_reg(s, rm);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ /* Specialization to ROR happens in EXTRACT2. */
+ tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(t0, tcg_rm);
+ if (rm == rn) {
+ tcg_gen_rotri_i32(t0, t0, imm);
+ } else {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, tcg_rn);
+ tcg_gen_extract2_i32(t0, t0, t1, imm);
+ tcg_temp_free_i32(t1);
+ }
+ tcg_gen_extu_i32_i64(tcg_rd, t0);
+ tcg_temp_free_i32(t0);
+ }
+ }
+ }
+}
+
+/* Data processing - immediate */
+static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 23, 6)) {
+ case 0x20: case 0x21: /* PC-rel. addressing */
+ disas_pc_rel_adr(s, insn);
+ break;
+ case 0x22: /* Add/subtract (immediate) */
+ disas_add_sub_imm(s, insn);
+ break;
+ case 0x23: /* Add/subtract (immediate, with tags) */
+ disas_add_sub_imm_with_tags(s, insn);
+ break;
+ case 0x24: /* Logical (immediate) */
+ disas_logic_imm(s, insn);
+ break;
+ case 0x25: /* Move wide (immediate) */
+ disas_movw_imm(s, insn);
+ break;
+ case 0x26: /* Bitfield */
+ disas_bitfield(s, insn);
+ break;
+ case 0x27: /* Extract */
+ disas_extract(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Shift a TCGv src by TCGv shift_amount, put result in dst.
+ * Note that it is the caller's responsibility to ensure that the
+ * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
+ * mandated semantics for out of range shifts.
+ */
+static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, TCGv_i64 shift_amount)
+{
+ switch (shift_type) {
+ case A64_SHIFT_TYPE_LSL:
+ tcg_gen_shl_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_LSR:
+ tcg_gen_shr_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ASR:
+ if (!sf) {
+ tcg_gen_ext32s_i64(dst, src);
+ }
+ tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ROR:
+ if (sf) {
+ tcg_gen_rotr_i64(dst, src, shift_amount);
+ } else {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t0, src);
+ tcg_gen_extrl_i64_i32(t1, shift_amount);
+ tcg_gen_rotr_i32(t0, t0, t1);
+ tcg_gen_extu_i32_i64(dst, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+ break;
+ default:
+ assert(FALSE); /* all shift types should be handled */
+ break;
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(dst, dst);
+ }
+}
+
+/* Shift a TCGv src by immediate, put result in dst.
+ * The shift amount must be in range (this should always be true as the
+ * relevant instructions will UNDEF on bad shift immediates).
+ */
+static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, unsigned int shift_i)
+{
+ assert(shift_i < (sf ? 64 : 32));
+
+ if (shift_i == 0) {
+ tcg_gen_mov_i64(dst, src);
+ } else {
+ TCGv_i64 shift_const;
+
+ shift_const = tcg_const_i64(shift_i);
+ shift_reg(dst, src, sf, shift_type, shift_const);
+ tcg_temp_free_i64(shift_const);
+ }
+}
+
+/* Logical (shifted register)
+ * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
+ * +----+-----+-----------+-------+---+------+--------+------+------+
+ * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
+ * +----+-----+-----------+-------+---+------+--------+------+------+
+ */
+static void disas_logic_reg(DisasContext *s, uint32_t insn)
+{
+ TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
+ unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ shift_type = extract32(insn, 22, 2);
+ invert = extract32(insn, 21, 1);
+ rm = extract32(insn, 16, 5);
+ shift_amount = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (!sf && (shift_amount & (1 << 5))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
+ /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
+ * register-register MOV and MVN, so it is worth special casing.
+ */
+ tcg_rm = cpu_reg(s, rm);
+ if (invert) {
+ tcg_gen_not_i64(tcg_rd, tcg_rm);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_rm);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
+ }
+ }
+ return;
+ }
+
+ tcg_rm = read_cpu_reg(s, rm, sf);
+
+ if (shift_amount) {
+ shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
+ }
+
+ tcg_rn = cpu_reg(s, rn);
+
+ switch (opc | (invert << 2)) {
+ case 0: /* AND */
+ case 3: /* ANDS */
+ tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 1: /* ORR */
+ tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 2: /* EOR */
+ tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 4: /* BIC */
+ case 7: /* BICS */
+ tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 5: /* ORN */
+ tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 6: /* EON */
+ tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ default:
+ assert(FALSE);
+ break;
+ }
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+
+ if (opc == 3) {
+ gen_logic_CC(sf, tcg_rd);
+ }
+}
+
+/*
+ * Add/subtract (extended register)
+ *
+ * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
+ * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
+ * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * opt: 00
+ * option: extension type (see DecodeRegExtend)
+ * imm3: optional shift to Rm
+ *
+ * Rd = Rn + LSL(extend(Rm), amount)
+ */
+static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm3 = extract32(insn, 10, 3);
+ int option = extract32(insn, 13, 3);
+ int rm = extract32(insn, 16, 5);
+ int opt = extract32(insn, 22, 2);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rm, tcg_rn; /* temps */
+ TCGv_i64 tcg_rd;
+ TCGv_i64 tcg_result;
+
+ if (imm3 > 4 || opt != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* non-flag setting ops may use SP */
+ if (!setflags) {
+ tcg_rd = cpu_reg_sp(s, rd);
+ } else {
+ tcg_rd = cpu_reg(s, rd);
+ }
+ tcg_rn = read_cpu_reg_sp(s, rn, sf);
+
+ tcg_rm = read_cpu_reg(s, rm, sf);
+ ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
+
+ tcg_result = tcg_temp_new_i64();
+
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
+ }
+ } else {
+ if (sub_op) {
+ gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ }
+ }
+
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
+}
+
+/*
+ * Add/subtract (shifted register)
+ *
+ * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
+ * +--+--+--+-----------+-----+--+-------+---------+------+------+
+ * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
+ * +--+--+--+-----------+-----+--+-------+---------+------+------+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
+ * imm6: Shift amount to apply to Rm before the add/sub
+ */
+static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm6 = extract32(insn, 10, 6);
+ int rm = extract32(insn, 16, 5);
+ int shift_type = extract32(insn, 22, 2);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn, tcg_rm;
+ TCGv_i64 tcg_result;
+
+ if ((shift_type == 3) || (!sf && (imm6 > 31))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_rm = read_cpu_reg(s, rm, sf);
+
+ shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
+
+ tcg_result = tcg_temp_new_i64();
+
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
+ }
+ } else {
+ if (sub_op) {
+ gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ }
+ }
+
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
+}
+
+/* Data-processing (3 source)
+ *
+ * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
+ * +--+------+-----------+------+------+----+------+------+------+
+ * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
+ * +--+------+-----------+------+------+----+------+------+------+
+ */
+static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int ra = extract32(insn, 10, 5);
+ int rm = extract32(insn, 16, 5);
+ int op_id = (extract32(insn, 29, 3) << 4) |
+ (extract32(insn, 21, 3) << 1) |
+ extract32(insn, 15, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool is_sub = extract32(op_id, 0, 1);
+ bool is_high = extract32(op_id, 2, 1);
+ bool is_signed = false;
+ TCGv_i64 tcg_op1;
+ TCGv_i64 tcg_op2;
+ TCGv_i64 tcg_tmp;
+
+ /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
+ switch (op_id) {
+ case 0x42: /* SMADDL */
+ case 0x43: /* SMSUBL */
+ case 0x44: /* SMULH */
+ is_signed = true;
+ break;
+ case 0x0: /* MADD (32bit) */
+ case 0x1: /* MSUB (32bit) */
+ case 0x40: /* MADD (64bit) */
+ case 0x41: /* MSUB (64bit) */
+ case 0x4a: /* UMADDL */
+ case 0x4b: /* UMSUBL */
+ case 0x4c: /* UMULH */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_high) {
+ TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+ TCGv_i64 tcg_rm = cpu_reg(s, rm);
+
+ if (is_signed) {
+ tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ }
+
+ tcg_temp_free_i64(low_bits);
+ return;
+ }
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
+ tcg_tmp = tcg_temp_new_i64();
+
+ if (op_id < 0x42) {
+ tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
+ } else {
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
+ }
+ }
+
+ if (ra == 31 && !is_sub) {
+ /* Special-case MADD with rA == XZR; it is the standard MUL alias */
+ tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
+ } else {
+ tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
+ if (is_sub) {
+ tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ } else {
+ tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ }
+ }
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* Add/subtract (with carry)
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
+ * +--+--+--+------------------------+------+-------------+------+-----+
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd |
+ * +--+--+--+------------------------+------+-------------+------+-----+
+ */
+
+static void disas_adc_sbc(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, setflags, rm, rn, rd;
+ TCGv_i64 tcg_y, tcg_rn, tcg_rd;
+
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 30, 1);
+ setflags = extract32(insn, 29, 1);
+ rm = extract32(insn, 16, 5);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (op) {
+ tcg_y = new_tmp_a64(s);
+ tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
+ } else {
+ tcg_y = cpu_reg(s, rm);
+ }
+
+ if (setflags) {
+ gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
+ } else {
+ gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
+ }
+}
+
+/*
+ * Rotate right into flags
+ * 31 30 29 21 15 10 5 4 0
+ * +--+--+--+-----------------+--------+-----------+------+--+------+
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask |
+ * +--+--+--+-----------------+--------+-----------+------+--+------+
+ */
+static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
+{
+ int mask = extract32(insn, 0, 4);
+ int o2 = extract32(insn, 4, 1);
+ int rn = extract32(insn, 5, 5);
+ int imm6 = extract32(insn, 15, 6);
+ int sf_op_s = extract32(insn, 29, 3);
+ TCGv_i64 tcg_rn;
+ TCGv_i32 nzcv;
+
+ if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rn = read_cpu_reg(s, rn, 1);
+ tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
+
+ nzcv = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
+
+ if (mask & 8) { /* N */
+ tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
+ }
+ if (mask & 4) { /* Z */
+ tcg_gen_not_i32(cpu_ZF, nzcv);
+ tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
+ }
+ if (mask & 2) { /* C */
+ tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
+ }
+ if (mask & 1) { /* V */
+ tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
+ }
+
+ tcg_temp_free_i32(nzcv);
+}
+
+/*
+ * Evaluate into flags
+ * 31 30 29 21 15 14 10 5 4 0
+ * +--+--+--+-----------------+---------+----+---------+------+--+------+
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask |
+ * +--+--+--+-----------------+---------+----+---------+------+--+------+
+ */
+static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
+{
+ int o3_mask = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int o2 = extract32(insn, 15, 6);
+ int sz = extract32(insn, 14, 1);
+ int sf_op_s = extract32(insn, 29, 3);
+ TCGv_i32 tmp;
+ int shift;
+
+ if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
+ !dc_isar_feature(aa64_condm_4, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ shift = sz ? 16 : 24; /* SETF16 or SETF8 */
+
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
+ tcg_gen_shli_i32(cpu_NF, tmp, shift);
+ tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
+ tcg_temp_free_i32(tmp);
+}
+
+/* Conditional compare (immediate / register)
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
+ * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
+ * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
+ * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
+ * [1] y [0] [0]
+ */
+static void disas_cc(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, y, cond, rn, nzcv, is_imm;
+ TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
+ TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
+ DisasCompare c;
+
+ if (!extract32(insn, 29, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (insn & (1 << 10 | 1 << 4)) {
+ unallocated_encoding(s);
+ return;
+ }
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 30, 1);
+ is_imm = extract32(insn, 11, 1);
+ y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ nzcv = extract32(insn, 0, 4);
+
+ /* Set T0 = !COND. */
+ tcg_t0 = tcg_temp_new_i32();
+ arm_test_cc(&c, cond);
+ tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
+ arm_free_cc(&c);
+
+ /* Load the arguments for the new comparison. */
+ if (is_imm) {
+ tcg_y = new_tmp_a64(s);
+ tcg_gen_movi_i64(tcg_y, y);
+ } else {
+ tcg_y = cpu_reg(s, y);
+ }
+ tcg_rn = cpu_reg(s, rn);
+
+ /* Set the flags for the new comparison. */
+ tcg_tmp = tcg_temp_new_i64();
+ if (op) {
+ gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+ } else {
+ gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+ }
+ tcg_temp_free_i64(tcg_tmp);
+
+ /* If COND was false, force the flags to #nzcv. Compute two masks
+ * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
+ * For tcg hosts that support ANDC, we can make do with just T1.
+ * In either case, allow the tcg optimizer to delete any unused mask.
+ */
+ tcg_t1 = tcg_temp_new_i32();
+ tcg_t2 = tcg_temp_new_i32();
+ tcg_gen_neg_i32(tcg_t1, tcg_t0);
+ tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
+
+ if (nzcv & 8) { /* N */
+ tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
+ }
+ }
+ if (nzcv & 4) { /* Z */
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
+ }
+ } else {
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
+ }
+ if (nzcv & 2) { /* C */
+ tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
+ }
+ }
+ if (nzcv & 1) { /* V */
+ tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
+ }
+ }
+ tcg_temp_free_i32(tcg_t0);
+ tcg_temp_free_i32(tcg_t1);
+ tcg_temp_free_i32(tcg_t2);
+}
+
+/* Conditional select
+ * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
+ * +----+----+---+-----------------+------+------+-----+------+------+
+ * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
+ * +----+----+---+-----------------+------+------+-----+------+------+
+ */
+static void disas_cond_select(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
+ TCGv_i64 tcg_rd, zero;
+ DisasCompare64 c;
+
+ if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
+ /* S == 1 or op2<1> == 1 */
+ unallocated_encoding(s);
+ return;
+ }
+ sf = extract32(insn, 31, 1);
+ else_inv = extract32(insn, 30, 1);
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ else_inc = extract32(insn, 10, 1);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ tcg_rd = cpu_reg(s, rd);
+
+ a64_test_cc(&c, cond);
+ zero = tcg_const_i64(0);
+
+ if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
+ /* CSET & CSETM. */
+ tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
+ if (else_inv) {
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ TCGv_i64 t_true = cpu_reg(s, rn);
+ TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
+ if (else_inv && else_inc) {
+ tcg_gen_neg_i64(t_false, t_false);
+ } else if (else_inv) {
+ tcg_gen_not_i64(t_false, t_false);
+ } else if (else_inc) {
+ tcg_gen_addi_i64(t_false, t_false, 1);
+ }
+ tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
+ }
+
+ tcg_temp_free_i64(zero);
+ a64_free_cc(&c);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+static void handle_clz(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+static void handle_cls(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+static void handle_rbit(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_rbit64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_rbit(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+/* REV with sf==1, opcode==3 ("REV64") */
+static void handle_rev64(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ if (!sf) {
+ unallocated_encoding(s);
+ return;
+ }
+ tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
+}
+
+/* REV with sf==0, opcode==2
+ * REV32 (sf==1, opcode==2)
+ */
+static void handle_rev32(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
+ tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
+ } else {
+ tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
+ }
+}
+
+/* REV16 (opcode==1) */
+static void handle_rev16(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+ TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
+
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
+ tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
+ tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
+ tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
+
+ tcg_temp_free_i64(mask);
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* Data-processing (1 source)
+ * 31 30 29 28 21 20 16 15 10 9 5 4 0
+ * +----+---+---+-----------------+---------+--------+------+------+
+ * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
+ * +----+---+---+-----------------+---------+--------+------+------+
+ */
+static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, opcode, opcode2, rn, rd;
+ TCGv_i64 tcg_rd;
+
+ if (extract32(insn, 29, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ sf = extract32(insn, 31, 1);
+ opcode = extract32(insn, 10, 6);
+ opcode2 = extract32(insn, 16, 5);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+#define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
+
+ switch (MAP(sf, opcode2, opcode)) {
+ case MAP(0, 0x00, 0x00): /* RBIT */
+ case MAP(1, 0x00, 0x00):
+ handle_rbit(s, sf, rn, rd);
+ break;
+ case MAP(0, 0x00, 0x01): /* REV16 */
+ case MAP(1, 0x00, 0x01):
+ handle_rev16(s, sf, rn, rd);
+ break;
+ case MAP(0, 0x00, 0x02): /* REV/REV32 */
+ case MAP(1, 0x00, 0x02):
+ handle_rev32(s, sf, rn, rd);
+ break;
+ case MAP(1, 0x00, 0x03): /* REV64 */
+ handle_rev64(s, sf, rn, rd);
+ break;
+ case MAP(0, 0x00, 0x04): /* CLZ */
+ case MAP(1, 0x00, 0x04):
+ handle_clz(s, sf, rn, rd);
+ break;
+ case MAP(0, 0x00, 0x05): /* CLS */
+ case MAP(1, 0x00, 0x05):
+ handle_cls(s, sf, rn, rd);
+ break;
+ case MAP(1, 0x01, 0x00): /* PACIA */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x01): /* PACIB */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x02): /* PACDA */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x03): /* PACDB */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x04): /* AUTIA */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x05): /* AUTIB */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x06): /* AUTDA */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x07): /* AUTDB */
+ if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
+ } else if (!dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ break;
+ case MAP(1, 0x01, 0x08): /* PACIZA */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x09): /* PACIZB */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x0a): /* PACDZA */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x0b): /* PACDZB */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x0c): /* AUTIZA */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x0d): /* AUTIZB */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x0e): /* AUTDZA */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x0f): /* AUTDZB */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s));
+ }
+ break;
+ case MAP(1, 0x01, 0x10): /* XPACI */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
+ }
+ break;
+ case MAP(1, 0x01, 0x11): /* XPACD */
+ if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
+ goto do_unallocated;
+ } else if (s->pauth_active) {
+ tcg_rd = cpu_reg(s, rd);
+ gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
+ }
+ break;
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ break;
+ }
+
+#undef MAP
+}
+
+static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_n, tcg_m, tcg_rd;
+ tcg_rd = cpu_reg(s, rd);
+
+ if (!sf && is_signed) {
+ tcg_n = new_tmp_a64(s);
+ tcg_m = new_tmp_a64(s);
+ tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
+ tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
+ } else {
+ tcg_n = read_cpu_reg(s, rn, sf);
+ tcg_m = read_cpu_reg(s, rm, sf);
+ }
+
+ if (is_signed) {
+ gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
+ } else {
+ gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* LSLV, LSRV, ASRV, RORV */
+static void handle_shift_reg(DisasContext *s,
+ enum a64_shift_type shift_type, unsigned int sf,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_shift = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
+ shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
+ tcg_temp_free_i64(tcg_shift);
+}
+
+/* CRC32[BHWX], CRC32C[BHWX] */
+static void handle_crc32(DisasContext *s,
+ unsigned int sf, unsigned int sz, bool crc32c,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_acc, tcg_val;
+ TCGv_i32 tcg_bytes;
+
+ if (!dc_isar_feature(aa64_crc32, s)
+ || (sf == 1 && sz != 3)
+ || (sf == 0 && sz == 3)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (sz == 3) {
+ tcg_val = cpu_reg(s, rm);
+ } else {
+ uint64_t mask;
+ switch (sz) {
+ case 0:
+ mask = 0xFF;
+ break;
+ case 1:
+ mask = 0xFFFF;
+ break;
+ case 2:
+ mask = 0xFFFFFFFF;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_val = new_tmp_a64(s);
+ tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
+ }
+
+ tcg_acc = cpu_reg(s, rn);
+ tcg_bytes = tcg_const_i32(1 << sz);
+
+ if (crc32c) {
+ gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
+ } else {
+ gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
+ }
+
+ tcg_temp_free_i32(tcg_bytes);
+}
+
+/* Data-processing (2 source)
+ * 31 30 29 28 21 20 16 15 10 9 5 4 0
+ * +----+---+---+-----------------+------+--------+------+------+
+ * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
+ * +----+---+---+-----------------+------+--------+------+------+
+ */
+static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, rm, opcode, rn, rd, setflag;
+ sf = extract32(insn, 31, 1);
+ setflag = extract32(insn, 29, 1);
+ rm = extract32(insn, 16, 5);
+ opcode = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (setflag && opcode != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0: /* SUBP(S) */
+ if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
+ goto do_unallocated;
+ } else {
+ TCGv_i64 tcg_n, tcg_m, tcg_d;
+
+ tcg_n = read_cpu_reg_sp(s, rn, true);
+ tcg_m = read_cpu_reg_sp(s, rm, true);
+ tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
+ tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
+ tcg_d = cpu_reg(s, rd);
+
+ if (setflag) {
+ gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
+ } else {
+ tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
+ }
+ }
+ break;
+ case 2: /* UDIV */
+ handle_div(s, false, sf, rm, rn, rd);
+ break;
+ case 3: /* SDIV */
+ handle_div(s, true, sf, rm, rn, rd);
+ break;
+ case 4: /* IRG */
+ if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
+ goto do_unallocated;
+ }
+ if (s->ata) {
+ gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
+ cpu_reg_sp(s, rn), cpu_reg(s, rm));
+ } else {
+ gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
+ cpu_reg_sp(s, rn));
+ }
+ break;
+ case 5: /* GMI */
+ if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
+ goto do_unallocated;
+ } else {
+ TCGv_i64 t1 = tcg_const_i64(1);
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_extract_i64(t2, cpu_reg_sp(s, rn), 56, 4);
+ tcg_gen_shl_i64(t1, t1, t2);
+ tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t1);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case 8: /* LSLV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
+ break;
+ case 9: /* LSRV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
+ break;
+ case 10: /* ASRV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
+ break;
+ case 11: /* RORV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
+ break;
+ case 12: /* PACGA */
+ if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
+ goto do_unallocated;
+ }
+ gen_helper_pacga(cpu_reg(s, rd), cpu_env,
+ cpu_reg(s, rn), cpu_reg_sp(s, rm));
+ break;
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 23: /* CRC32 */
+ {
+ int sz = extract32(opcode, 0, 2);
+ bool crc32c = extract32(opcode, 2, 1);
+ handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
+ break;
+ }
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/*
+ * Data processing - register
+ * 31 30 29 28 25 21 20 16 10 0
+ * +--+---+--+---+-------+-----+-------+-------+---------+
+ * | |op0| |op1| 1 0 1 | op2 | | op3 | |
+ * +--+---+--+---+-------+-----+-------+-------+---------+
+ */
+static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
+{
+ int op0 = extract32(insn, 30, 1);
+ int op1 = extract32(insn, 28, 1);
+ int op2 = extract32(insn, 21, 4);
+ int op3 = extract32(insn, 10, 6);
+
+ if (!op1) {
+ if (op2 & 8) {
+ if (op2 & 1) {
+ /* Add/sub (extended register) */
+ disas_add_sub_ext_reg(s, insn);
+ } else {
+ /* Add/sub (shifted register) */
+ disas_add_sub_reg(s, insn);
+ }
+ } else {
+ /* Logical (shifted register) */
+ disas_logic_reg(s, insn);
+ }
+ return;
+ }
+
+ switch (op2) {
+ case 0x0:
+ switch (op3) {
+ case 0x00: /* Add/subtract (with carry) */
+ disas_adc_sbc(s, insn);
+ break;
+
+ case 0x01: /* Rotate right into flags */
+ case 0x21:
+ disas_rotate_right_into_flags(s, insn);
+ break;
+
+ case 0x02: /* Evaluate into flags */
+ case 0x12:
+ case 0x22:
+ case 0x32:
+ disas_evaluate_into_flags(s, insn);
+ break;
+
+ default:
+ goto do_unallocated;
+ }
+ break;
+
+ case 0x2: /* Conditional compare */
+ disas_cc(s, insn); /* both imm and reg forms */
+ break;
+
+ case 0x4: /* Conditional select */
+ disas_cond_select(s, insn);
+ break;
+
+ case 0x6: /* Data-processing */
+ if (op0) { /* (1 source) */
+ disas_data_proc_1src(s, insn);
+ } else { /* (2 source) */
+ disas_data_proc_2src(s, insn);
+ }
+ break;
+ case 0x8 ... 0xf: /* (3 source) */
+ disas_data_proc_3src(s, insn);
+ break;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+static void handle_fp_compare(DisasContext *s, int size,
+ unsigned int rn, unsigned int rm,
+ bool cmp_with_zero, bool signal_all_nans)
+{
+ TCGv_i64 tcg_flags = tcg_temp_new_i64();
+ TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ if (size == MO_64) {
+ TCGv_i64 tcg_vn, tcg_vm;
+
+ tcg_vn = read_fp_dreg(s, rn);
+ if (cmp_with_zero) {
+ tcg_vm = tcg_const_i64(0);
+ } else {
+ tcg_vm = read_fp_dreg(s, rm);
+ }
+ if (signal_all_nans) {
+ gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ tcg_temp_free_i64(tcg_vn);
+ tcg_temp_free_i64(tcg_vm);
+ } else {
+ TCGv_i32 tcg_vn = tcg_temp_new_i32();
+ TCGv_i32 tcg_vm = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_vn, rn, 0, size);
+ if (cmp_with_zero) {
+ tcg_gen_movi_i32(tcg_vm, 0);
+ } else {
+ read_vec_element_i32(s, tcg_vm, rm, 0, size);
+ }
+
+ switch (size) {
+ case MO_32:
+ if (signal_all_nans) {
+ gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ break;
+ case MO_16:
+ if (signal_all_nans) {
+ gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_temp_free_i32(tcg_vn);
+ tcg_temp_free_i32(tcg_vm);
+ }
+
+ tcg_temp_free_ptr(fpst);
+
+ gen_set_nzcv(tcg_flags);
+
+ tcg_temp_free_i64(tcg_flags);
+}
+
+/* Floating point compare
+ * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
+ * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
+ */
+static void disas_fp_compare(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, op, rn, opc, op2r;
+ int size;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2);
+ rm = extract32(insn, 16, 5);
+ op = extract32(insn, 14, 2);
+ rn = extract32(insn, 5, 5);
+ opc = extract32(insn, 3, 2);
+ op2r = extract32(insn, 0, 3);
+
+ if (mos || op || op2r) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ size = MO_32;
+ break;
+ case 1:
+ size = MO_64;
+ break;
+ case 3:
+ size = MO_16;
+ if (dc_isar_feature(aa64_fp16, s)) {
+ break;
+ }
+ /* fallthru */
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
+}
+
+/* Floating point conditional compare
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
+ * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
+ * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
+ */
+static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, cond, rn, op, nzcv;
+ TCGv_i64 tcg_flags;
+ TCGLabel *label_continue = NULL;
+ int size;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2);
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ op = extract32(insn, 4, 1);
+ nzcv = extract32(insn, 0, 4);
+
+ if (mos) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ size = MO_32;
+ break;
+ case 1:
+ size = MO_64;
+ break;
+ case 3:
+ size = MO_16;
+ if (dc_isar_feature(aa64_fp16, s)) {
+ break;
+ }
+ /* fallthru */
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (cond < 0x0e) { /* not always */
+ TCGLabel *label_match = gen_new_label();
+ label_continue = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ /* nomatch: */
+ tcg_flags = tcg_const_i64(nzcv << 28);
+ gen_set_nzcv(tcg_flags);
+ tcg_temp_free_i64(tcg_flags);
+ tcg_gen_br(label_continue);
+ gen_set_label(label_match);
+ }
+
+ handle_fp_compare(s, size, rn, rm, false, op);
+
+ if (cond < 0x0e) {
+ gen_set_label(label_continue);
+ }
+}
+
+/* Floating point conditional select
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+------+-----+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+------+-----+------+------+
+ */
+static void disas_fp_csel(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, cond, rn, rd;
+ TCGv_i64 t_true, t_false, t_zero;
+ DisasCompare64 c;
+ MemOp sz;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2);
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (mos) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ sz = MO_32;
+ break;
+ case 1:
+ sz = MO_64;
+ break;
+ case 3:
+ sz = MO_16;
+ if (dc_isar_feature(aa64_fp16, s)) {
+ break;
+ }
+ /* fallthru */
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* Zero extend sreg & hreg inputs to 64 bits now. */
+ t_true = tcg_temp_new_i64();
+ t_false = tcg_temp_new_i64();
+ read_vec_element(s, t_true, rn, 0, sz);
+ read_vec_element(s, t_false, rm, 0, sz);
+
+ a64_test_cc(&c, cond);
+ t_zero = tcg_const_i64(0);
+ tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
+ tcg_temp_free_i64(t_zero);
+ tcg_temp_free_i64(t_false);
+ a64_free_cc(&c);
+
+ /* Note that sregs & hregs write back zeros to the high bits,
+ and we've already done the zero-extension. */
+ write_fp_dreg(s, rd, t_true);
+ tcg_temp_free_i64(t_true);
+}
+
+/* Floating-point data-processing (1 source) - half precision */
+static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
+{
+ TCGv_ptr fpst = NULL;
+ TCGv_i32 tcg_op = read_fp_hreg(s, rn);
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ tcg_gen_mov_i32(tcg_res, tcg_op);
+ break;
+ case 0x1: /* FABS */
+ tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
+ break;
+ case 0x2: /* FNEG */
+ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
+ break;
+ case 0x3: /* FSQRT */
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
+ break;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ {
+ TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ break;
+ }
+ case 0xe: /* FRINTX */
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
+ break;
+ case 0xf: /* FRINTI */
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ abort();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ if (fpst) {
+ tcg_temp_free_ptr(fpst);
+ }
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* Floating-point data-processing (1 source) - single precision */
+static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
+{
+ void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
+ TCGv_i32 tcg_op, tcg_res;
+ TCGv_ptr fpst;
+ int rmode = -1;
+
+ tcg_op = read_fp_sreg(s, rn);
+ tcg_res = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ tcg_gen_mov_i32(tcg_res, tcg_op);
+ goto done;
+ case 0x1: /* FABS */
+ gen_helper_vfp_abss(tcg_res, tcg_op);
+ goto done;
+ case 0x2: /* FNEG */
+ gen_helper_vfp_negs(tcg_res, tcg_op);
+ goto done;
+ case 0x3: /* FSQRT */
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ goto done;
+ case 0x6: /* BFCVT */
+ gen_fpst = gen_helper_bfcvt;
+ break;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ rmode = arm_rmode_to_sf(opcode & 7);
+ gen_fpst = gen_helper_rints;
+ break;
+ case 0xe: /* FRINTX */
+ gen_fpst = gen_helper_rints_exact;
+ break;
+ case 0xf: /* FRINTI */
+ gen_fpst = gen_helper_rints;
+ break;
+ case 0x10: /* FRINT32Z */
+ rmode = float_round_to_zero;
+ gen_fpst = gen_helper_frint32_s;
+ break;
+ case 0x11: /* FRINT32X */
+ gen_fpst = gen_helper_frint32_s;
+ break;
+ case 0x12: /* FRINT64Z */
+ rmode = float_round_to_zero;
+ gen_fpst = gen_helper_frint64_s;
+ break;
+ case 0x13: /* FRINT64X */
+ gen_fpst = gen_helper_frint64_s;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ if (rmode >= 0) {
+ TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_fpst(tcg_res, tcg_op, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ } else {
+ gen_fpst(tcg_res, tcg_op, fpst);
+ }
+ tcg_temp_free_ptr(fpst);
+
+ done:
+ write_fp_sreg(s, rd, tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* Floating-point data-processing (1 source) - double precision */
+static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
+{
+ void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
+ TCGv_i64 tcg_op, tcg_res;
+ TCGv_ptr fpst;
+ int rmode = -1;
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
+ return;
+ }
+
+ tcg_op = read_fp_dreg(s, rn);
+ tcg_res = tcg_temp_new_i64();
+
+ switch (opcode) {
+ case 0x1: /* FABS */
+ gen_helper_vfp_absd(tcg_res, tcg_op);
+ goto done;
+ case 0x2: /* FNEG */
+ gen_helper_vfp_negd(tcg_res, tcg_op);
+ goto done;
+ case 0x3: /* FSQRT */
+ gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
+ goto done;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ rmode = arm_rmode_to_sf(opcode & 7);
+ gen_fpst = gen_helper_rintd;
+ break;
+ case 0xe: /* FRINTX */
+ gen_fpst = gen_helper_rintd_exact;
+ break;
+ case 0xf: /* FRINTI */
+ gen_fpst = gen_helper_rintd;
+ break;
+ case 0x10: /* FRINT32Z */
+ rmode = float_round_to_zero;
+ gen_fpst = gen_helper_frint32_d;
+ break;
+ case 0x11: /* FRINT32X */
+ gen_fpst = gen_helper_frint32_d;
+ break;
+ case 0x12: /* FRINT64Z */
+ rmode = float_round_to_zero;
+ gen_fpst = gen_helper_frint64_d;
+ break;
+ case 0x13: /* FRINT64X */
+ gen_fpst = gen_helper_frint64_d;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ if (rmode >= 0) {
+ TCGv_i32 tcg_rmode = tcg_const_i32(rmode);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_fpst(tcg_res, tcg_op, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ } else {
+ gen_fpst(tcg_res, tcg_op, fpst);
+ }
+ tcg_temp_free_ptr(fpst);
+
+ done:
+ write_fp_dreg(s, rd, tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+}
+
+static void handle_fp_fcvt(DisasContext *s, int opcode,
+ int rd, int rn, int dtype, int ntype)
+{
+ switch (ntype) {
+ case 0x0:
+ {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
+ if (dtype == 1) {
+ /* Single to double */
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ } else {
+ /* Single to half */
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ TCGv_i32 ahp = get_ahp_flag();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
+ /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(ahp);
+ tcg_temp_free_ptr(fpst);
+ }
+ tcg_temp_free_i32(tcg_rn);
+ break;
+ }
+ case 0x1:
+ {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ if (dtype == 0) {
+ /* Double to single */
+ gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
+ } else {
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+ TCGv_i32 ahp = get_ahp_flag();
+ /* Double to half */
+ gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
+ /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(ahp);
+ }
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ break;
+ }
+ case 0x3:
+ {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
+ TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
+ TCGv_i32 tcg_ahp = get_ahp_flag();
+ tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
+ if (dtype == 0) {
+ /* Half to single */
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ } else {
+ /* Half to double */
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ }
+ tcg_temp_free_i32(tcg_rn);
+ tcg_temp_free_ptr(tcg_fpst);
+ tcg_temp_free_i32(tcg_ahp);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+/* Floating point data-processing (1 source)
+ * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
+ * +---+---+---+-----------+------+---+--------+-----------+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+--------+-----------+------+------+
+ */
+static void disas_fp_1src(DisasContext *s, uint32_t insn)
+{
+ int mos = extract32(insn, 29, 3);
+ int type = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 15, 6);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ if (mos) {
+ goto do_unallocated;
+ }
+
+ switch (opcode) {
+ case 0x4: case 0x5: case 0x7:
+ {
+ /* FCVT between half, single and double precision */
+ int dtype = extract32(opcode, 0, 2);
+ if (type == 2 || dtype == type) {
+ goto do_unallocated;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
+ break;
+ }
+
+ case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
+ if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
+ goto do_unallocated;
+ }
+ /* fall through */
+ case 0x0 ... 0x3:
+ case 0x8 ... 0xc:
+ case 0xe ... 0xf:
+ /* 32-to-32 and 64-to-64 ops */
+ switch (type) {
+ case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_1src_single(s, opcode, rd, rn);
+ break;
+ case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_1src_double(s, opcode, rd, rn);
+ break;
+ case 3:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ goto do_unallocated;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_1src_half(s, opcode, rd, rn);
+ break;
+ default:
+ goto do_unallocated;
+ }
+ break;
+
+ case 0x6:
+ switch (type) {
+ case 1: /* BFCVT */
+ if (!dc_isar_feature(aa64_bf16, s)) {
+ goto do_unallocated;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_1src_single(s, opcode, rd, rn);
+ break;
+ default:
+ goto do_unallocated;
+ }
+ break;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Floating-point data-processing (2 source) - single precision */
+static void handle_fp_2src_single(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_op1;
+ TCGv_i32 tcg_op2;
+ TCGv_i32 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i32();
+ fpst = fpstatus_ptr(FPST_FPCR);
+ tcg_op1 = read_fp_sreg(s, rn);
+ tcg_op2 = read_fp_sreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_negs(tcg_res, tcg_res);
+ break;
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* Floating-point data-processing (2 source) - double precision */
+static void handle_fp_2src_double(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i64 tcg_op1;
+ TCGv_i64 tcg_op2;
+ TCGv_i64 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i64();
+ fpst = fpstatus_ptr(FPST_FPCR);
+ tcg_op1 = read_fp_dreg(s, rn);
+ tcg_op2 = read_fp_dreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_negd(tcg_res, tcg_res);
+ break;
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* Floating-point data-processing (2 source) - half precision */
+static void handle_fp_2src_half(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_op1;
+ TCGv_i32 tcg_op2;
+ TCGv_i32 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i32();
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ tcg_op1 = read_fp_hreg(s, rn);
+ tcg_op2 = read_fp_hreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
+ tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* Floating point data-processing (2 source)
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_fp_2src(DisasContext *s, uint32_t insn)
+{
+ int mos = extract32(insn, 29, 3);
+ int type = extract32(insn, 22, 2);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int opcode = extract32(insn, 12, 4);
+
+ if (opcode > 8 || mos) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_2src_single(s, opcode, rd, rn, rm);
+ break;
+ case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_2src_double(s, opcode, rd, rn, rm);
+ break;
+ case 3:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_2src_half(s, opcode, rd, rn, rm);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+}
+
+/* Floating-point data-processing (3 source) - single precision */
+static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+
+ tcg_op1 = read_fp_sreg(s, rn);
+ tcg_op2 = read_fp_sreg(s, rm);
+ tcg_op3 = read_fp_sreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ gen_helper_vfp_negs(tcg_op3, tcg_op3);
+ }
+
+ if (o0 != o1) {
+ gen_helper_vfp_negs(tcg_op1, tcg_op1);
+ }
+
+ gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_op3);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* Floating-point data-processing (3 source) - double precision */
+static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+
+ tcg_op1 = read_fp_dreg(s, rn);
+ tcg_op2 = read_fp_dreg(s, rm);
+ tcg_op3 = read_fp_dreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ gen_helper_vfp_negd(tcg_op3, tcg_op3);
+ }
+
+ if (o0 != o1) {
+ gen_helper_vfp_negd(tcg_op1, tcg_op1);
+ }
+
+ gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_op3);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* Floating-point data-processing (3 source) - half precision */
+static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
+
+ tcg_op1 = read_fp_hreg(s, rn);
+ tcg_op2 = read_fp_hreg(s, rm);
+ tcg_op3 = read_fp_hreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
+ }
+
+ if (o0 != o1) {
+ tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
+ }
+
+ gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_op3);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* Floating point data-processing (3 source)
+ * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
+ * +---+---+---+-----------+------+----+------+----+------+------+------+
+ * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
+ * +---+---+---+-----------+------+----+------+----+------+------+------+
+ */
+static void disas_fp_3src(DisasContext *s, uint32_t insn)
+{
+ int mos = extract32(insn, 29, 3);
+ int type = extract32(insn, 22, 2);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int ra = extract32(insn, 10, 5);
+ int rm = extract32(insn, 16, 5);
+ bool o0 = extract32(insn, 15, 1);
+ bool o1 = extract32(insn, 21, 1);
+
+ if (mos) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
+ break;
+ case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
+ break;
+ case 3:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+}
+
+/* Floating point immediate
+ * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------------+-------+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
+ * +---+---+---+-----------+------+---+------------+-------+------+------+
+ */
+static void disas_fp_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int imm5 = extract32(insn, 5, 5);
+ int imm8 = extract32(insn, 13, 8);
+ int type = extract32(insn, 22, 2);
+ int mos = extract32(insn, 29, 3);
+ uint64_t imm;
+ TCGv_i64 tcg_res;
+ MemOp sz;
+
+ if (mos || imm5) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ sz = MO_32;
+ break;
+ case 1:
+ sz = MO_64;
+ break;
+ case 3:
+ sz = MO_16;
+ if (dc_isar_feature(aa64_fp16, s)) {
+ break;
+ }
+ /* fallthru */
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ imm = vfp_expand_imm(sz, imm8);
+
+ tcg_res = tcg_const_i64(imm);
+ write_fp_dreg(s, rd, tcg_res);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* Handle floating point <=> fixed point conversions. Note that we can
+ * also deal with fp <=> integer conversions as a special case (scale == 64)
+ * OPTME: consider handling that special case specially or at least skipping
+ * the call to scalbn in the helpers for zero shifts.
+ */
+static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
+ bool itof, int rmode, int scale, int sf, int type)
+{
+ bool is_signed = !(opcode & 1);
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_shift, tcg_single;
+ TCGv_i64 tcg_double;
+
+ tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ tcg_shift = tcg_const_i32(64 - scale);
+
+ if (itof) {
+ TCGv_i64 tcg_int = cpu_reg(s, rn);
+ if (!sf) {
+ TCGv_i64 tcg_extend = new_tmp_a64(s);
+
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_extend, tcg_int);
+ } else {
+ tcg_gen_ext32u_i64(tcg_extend, tcg_int);
+ }
+
+ tcg_int = tcg_extend;
+ }
+
+ switch (type) {
+ case 1: /* float64 */
+ tcg_double = tcg_temp_new_i64();
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_dreg(s, rd, tcg_double);
+ tcg_temp_free_i64(tcg_double);
+ break;
+
+ case 0: /* float32 */
+ tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_sreg(s, rd, tcg_single);
+ tcg_temp_free_i32(tcg_single);
+ break;
+
+ case 3: /* float16 */
+ tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtoh(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtoh(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_sreg(s, rd, tcg_single);
+ tcg_temp_free_i32(tcg_single);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ TCGv_i64 tcg_int = cpu_reg(s, rd);
+ TCGv_i32 tcg_rmode;
+
+ if (extract32(opcode, 2, 1)) {
+ /* There are too many rounding modes to all fit into rmode,
+ * so FCVTA[US] is a special case.
+ */
+ rmode = FPROUNDING_TIEAWAY;
+ }
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+
+ switch (type) {
+ case 1: /* float64 */
+ tcg_double = read_fp_dreg(s, rn);
+ if (is_signed) {
+ if (!sf) {
+ gen_helper_vfp_tosld(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosqd(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ if (!sf) {
+ gen_helper_vfp_tould(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqd(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ }
+ }
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_int, tcg_int);
+ }
+ tcg_temp_free_i64(tcg_double);
+ break;
+
+ case 0: /* float32 */
+ tcg_single = read_fp_sreg(s, rn);
+ if (sf) {
+ if (is_signed) {
+ gen_helper_vfp_tosqs(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqs(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ TCGv_i32 tcg_dest = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_tosls(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touls(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
+ tcg_temp_free_i32(tcg_dest);
+ }
+ tcg_temp_free_i32(tcg_single);
+ break;
+
+ case 3: /* float16 */
+ tcg_single = read_fp_sreg(s, rn);
+ if (sf) {
+ if (is_signed) {
+ gen_helper_vfp_tosqh(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqh(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ TCGv_i32 tcg_dest = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_toslh(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_toulh(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
+ tcg_temp_free_i32(tcg_dest);
+ }
+ tcg_temp_free_i32(tcg_single);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_rmode);
+ }
+
+ tcg_temp_free_ptr(tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+}
+
+/* Floating point <-> fixed point conversions
+ * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
+ * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
+ * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
+ * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
+ */
+static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int scale = extract32(insn, 10, 6);
+ int opcode = extract32(insn, 16, 3);
+ int rmode = extract32(insn, 19, 2);
+ int type = extract32(insn, 22, 2);
+ bool sbit = extract32(insn, 29, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool itof;
+
+ if (sbit || (!sf && scale < 32)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0: /* float32 */
+ case 1: /* float64 */
+ break;
+ case 3: /* float16 */
+ if (dc_isar_feature(aa64_fp16, s)) {
+ break;
+ }
+ /* fallthru */
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch ((rmode << 3) | opcode) {
+ case 0x2: /* SCVTF */
+ case 0x3: /* UCVTF */
+ itof = true;
+ break;
+ case 0x18: /* FCVTZS */
+ case 0x19: /* FCVTZU */
+ itof = false;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
+}
+
+static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
+{
+ /* FMOV: gpr to or from float, double, or top half of quad fp reg,
+ * without conversion.
+ */
+
+ if (itof) {
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+ TCGv_i64 tmp;
+
+ switch (type) {
+ case 0:
+ /* 32 bit */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(tmp, tcg_rn);
+ write_fp_dreg(s, rd, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 1:
+ /* 64 bit */
+ write_fp_dreg(s, rd, tcg_rn);
+ break;
+ case 2:
+ /* 64 bit to top half. */
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
+ clear_vec_high(s, true, rd);
+ break;
+ case 3:
+ /* 16 bit */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ext16u_i64(tmp, tcg_rn);
+ write_fp_dreg(s, rd, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+
+ switch (type) {
+ case 0:
+ /* 32 bit */
+ tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
+ break;
+ case 1:
+ /* 64 bit */
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
+ break;
+ case 2:
+ /* 64 bits from top half */
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
+ break;
+ case 3:
+ /* 16 bit */
+ tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
+{
+ TCGv_i64 t = read_fp_dreg(s, rn);
+ TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
+
+ gen_helper_fjcvtzs(t, t, fpstatus);
+
+ tcg_temp_free_ptr(fpstatus);
+
+ tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
+ tcg_gen_extrh_i64_i32(cpu_ZF, t);
+ tcg_gen_movi_i32(cpu_CF, 0);
+ tcg_gen_movi_i32(cpu_NF, 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
+
+ tcg_temp_free_i64(t);
+}
+
+/* Floating point <-> integer conversions
+ * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
+ * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
+ * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
+ * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
+ */
+static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 16, 3);
+ int rmode = extract32(insn, 19, 2);
+ int type = extract32(insn, 22, 2);
+ bool sbit = extract32(insn, 29, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool itof = false;
+
+ if (sbit) {
+ goto do_unallocated;
+ }
+
+ switch (opcode) {
+ case 2: /* SCVTF */
+ case 3: /* UCVTF */
+ itof = true;
+ /* fallthru */
+ case 4: /* FCVTAS */
+ case 5: /* FCVTAU */
+ if (rmode != 0) {
+ goto do_unallocated;
+ }
+ /* fallthru */
+ case 0: /* FCVT[NPMZ]S */
+ case 1: /* FCVT[NPMZ]U */
+ switch (type) {
+ case 0: /* float32 */
+ case 1: /* float64 */
+ break;
+ case 3: /* float16 */
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ goto do_unallocated;
+ }
+ break;
+ default:
+ goto do_unallocated;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
+ break;
+
+ default:
+ switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
+ case 0b01100110: /* FMOV half <-> 32-bit int */
+ case 0b01100111:
+ case 0b11100110: /* FMOV half <-> 64-bit int */
+ case 0b11100111:
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ goto do_unallocated;
+ }
+ /* fallthru */
+ case 0b00000110: /* FMOV 32-bit */
+ case 0b00000111:
+ case 0b10100110: /* FMOV 64-bit */
+ case 0b10100111:
+ case 0b11001110: /* FMOV top half of 128-bit */
+ case 0b11001111:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ itof = opcode & 1;
+ handle_fmov(s, rd, rn, type, itof);
+ break;
+
+ case 0b00111110: /* FJCVTZS */
+ if (!dc_isar_feature(aa64_jscvt, s)) {
+ goto do_unallocated;
+ } else if (fp_access_check(s)) {
+ handle_fjcvtzs(s, rd, rn);
+ }
+ break;
+
+ default:
+ do_unallocated:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+}
+
+/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
+ * 31 30 29 28 25 24 0
+ * +---+---+---+---------+-----------------------------+
+ * | | 0 | | 1 1 1 1 | |
+ * +---+---+---+---------+-----------------------------+
+ */
+static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
+{
+ if (extract32(insn, 24, 1)) {
+ /* Floating point data-processing (3 source) */
+ disas_fp_3src(s, insn);
+ } else if (extract32(insn, 21, 1) == 0) {
+ /* Floating point to fixed point conversions */
+ disas_fp_fixed_conv(s, insn);
+ } else {
+ switch (extract32(insn, 10, 2)) {
+ case 1:
+ /* Floating point conditional compare */
+ disas_fp_ccomp(s, insn);
+ break;
+ case 2:
+ /* Floating point data-processing (2 source) */
+ disas_fp_2src(s, insn);
+ break;
+ case 3:
+ /* Floating point conditional select */
+ disas_fp_csel(s, insn);
+ break;
+ case 0:
+ switch (ctz32(extract32(insn, 12, 4))) {
+ case 0: /* [15:12] == xxx1 */
+ /* Floating point immediate */
+ disas_fp_imm(s, insn);
+ break;
+ case 1: /* [15:12] == xx10 */
+ /* Floating point compare */
+ disas_fp_compare(s, insn);
+ break;
+ case 2: /* [15:12] == x100 */
+ /* Floating point data-processing (1 source) */
+ disas_fp_1src(s, insn);
+ break;
+ case 3: /* [15:12] == 1000 */
+ unallocated_encoding(s);
+ break;
+ default: /* [15:12] == 0000 */
+ /* Floating point <-> integer conversions */
+ disas_fp_int_conv(s, insn);
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
+ int pos)
+{
+ /* Extract 64 bits from the middle of two concatenated 64 bit
+ * vector register slices left:right. The extracted bits start
+ * at 'pos' bits into the right (least significant) side.
+ * We return the result in tcg_right, and guarantee not to
+ * trash tcg_left.
+ */
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ assert(pos > 0 && pos < 64);
+
+ tcg_gen_shri_i64(tcg_right, tcg_right, pos);
+ tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
+ tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* EXT
+ * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
+ * +---+---+-------------+-----+---+------+---+------+---+------+------+
+ * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
+ * +---+---+-------------+-----+---+------+---+------+---+------+------+
+ */
+static void disas_simd_ext(DisasContext *s, uint32_t insn)
+{
+ int is_q = extract32(insn, 30, 1);
+ int op2 = extract32(insn, 22, 2);
+ int imm4 = extract32(insn, 11, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int pos = imm4 << 3;
+ TCGv_i64 tcg_resl, tcg_resh;
+
+ if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_resh = tcg_temp_new_i64();
+ tcg_resl = tcg_temp_new_i64();
+
+ /* Vd gets bits starting at pos bits into Vm:Vn. This is
+ * either extracting 128 bits from a 128:128 concatenation, or
+ * extracting 64 bits from a 64:64 concatenation.
+ */
+ if (!is_q) {
+ read_vec_element(s, tcg_resl, rn, 0, MO_64);
+ if (pos != 0) {
+ read_vec_element(s, tcg_resh, rm, 0, MO_64);
+ do_ext64(s, tcg_resh, tcg_resl, pos);
+ }
+ } else {
+ TCGv_i64 tcg_hh;
+ typedef struct {
+ int reg;
+ int elt;
+ } EltPosns;
+ EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
+ EltPosns *elt = eltposns;
+
+ if (pos >= 64) {
+ elt++;
+ pos -= 64;
+ }
+
+ read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
+ elt++;
+ read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
+ elt++;
+ if (pos != 0) {
+ do_ext64(s, tcg_resh, tcg_resl, pos);
+ tcg_hh = tcg_temp_new_i64();
+ read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
+ do_ext64(s, tcg_hh, tcg_resh, pos);
+ tcg_temp_free_i64(tcg_hh);
+ }
+ }
+
+ write_vec_element(s, tcg_resl, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_resl);
+ if (is_q) {
+ write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ }
+ tcg_temp_free_i64(tcg_resh);
+ clear_vec_high(s, is_q, rd);
+}
+
+/* TBL/TBX
+ * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
+ * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
+ * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
+ */
+static void disas_simd_tb(DisasContext *s, uint32_t insn)
+{
+ int op2 = extract32(insn, 22, 2);
+ int is_q = extract32(insn, 30, 1);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int is_tbx = extract32(insn, 12, 1);
+ int len = (extract32(insn, 13, 2) + 1) * 16;
+
+ if (op2 != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rm), cpu_env,
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ (len << 6) | (is_tbx << 5) | rn,
+ gen_helper_simd_tblx);
+}
+
+/* ZIP/UZP/TRN
+ * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
+ * +---+---+-------------+------+---+------+---+------------------+------+
+ * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
+ * +---+---+-------------+------+---+------+---+------------------+------+
+ */
+static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ /* opc field bits [1:0] indicate ZIP/UZP/TRN;
+ * bit 2 indicates 1 vs 2 variant of the insn.
+ */
+ int opcode = extract32(insn, 12, 2);
+ bool part = extract32(insn, 14, 1);
+ bool is_q = extract32(insn, 30, 1);
+ int esize = 8 << size;
+ int i, ofs;
+ int datasize = is_q ? 128 : 64;
+ int elements = datasize / esize;
+ TCGv_i64 tcg_res, tcg_resl, tcg_resh;
+
+ if (opcode == 0 || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_resl = tcg_const_i64(0);
+ tcg_resh = is_q ? tcg_const_i64(0) : NULL;
+ tcg_res = tcg_temp_new_i64();
+
+ for (i = 0; i < elements; i++) {
+ switch (opcode) {
+ case 1: /* UZP1/2 */
+ {
+ int midpoint = elements / 2;
+ if (i < midpoint) {
+ read_vec_element(s, tcg_res, rn, 2 * i + part, size);
+ } else {
+ read_vec_element(s, tcg_res, rm,
+ 2 * (i - midpoint) + part, size);
+ }
+ break;
+ }
+ case 2: /* TRN1/2 */
+ if (i & 1) {
+ read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
+ } else {
+ read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
+ }
+ break;
+ case 3: /* ZIP1/2 */
+ {
+ int base = part * elements / 2;
+ if (i & 1) {
+ read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
+ } else {
+ read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
+ }
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ ofs = i * esize;
+ if (ofs < 64) {
+ tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
+ tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
+ } else {
+ tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
+ tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
+ }
+ }
+
+ tcg_temp_free_i64(tcg_res);
+
+ write_vec_element(s, tcg_resl, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_resl);
+
+ if (is_q) {
+ write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_resh);
+ }
+ clear_vec_high(s, is_q, rd);
+}
+
+/*
+ * do_reduction_op helper
+ *
+ * This mirrors the Reduce() pseudocode in the ARM ARM. It is
+ * important for correct NaN propagation that we do these
+ * operations in exactly the order specified by the pseudocode.
+ *
+ * This is a recursive function, TCG temps should be freed by the
+ * calling function once it is done with the values.
+ */
+static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
+ int esize, int size, int vmap, TCGv_ptr fpst)
+{
+ if (esize == size) {
+ int element;
+ MemOp msize = esize == 16 ? MO_16 : MO_32;
+ TCGv_i32 tcg_elem;
+
+ /* We should have one register left here */
+ assert(ctpop8(vmap) == 1);
+ element = ctz32(vmap);
+ assert(element < 8);
+
+ tcg_elem = tcg_temp_new_i32();
+ read_vec_element_i32(s, tcg_elem, rn, element, msize);
+ return tcg_elem;
+ } else {
+ int bits = size / 2;
+ int shift = ctpop8(vmap) / 2;
+ int vmap_lo = (vmap >> shift) & vmap;
+ int vmap_hi = (vmap & ~vmap_lo);
+ TCGv_i32 tcg_hi, tcg_lo, tcg_res;
+
+ tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
+ tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
+ tcg_res = tcg_temp_new_i32();
+
+ switch (fpopcode) {
+ case 0x0c: /* fmaxnmv half-precision */
+ gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x0f: /* fmaxv half-precision */
+ gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x1c: /* fminnmv half-precision */
+ gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x1f: /* fminv half-precision */
+ gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x2c: /* fmaxnmv */
+ gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x2f: /* fmaxv */
+ gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x3c: /* fminnmv */
+ gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ case 0x3f: /* fminv */
+ gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_temp_free_i32(tcg_hi);
+ tcg_temp_free_i32(tcg_lo);
+ return tcg_res;
+ }
+}
+
+/* AdvSIMD across lanes
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ bool is_q = extract32(insn, 30, 1);
+ bool is_u = extract32(insn, 29, 1);
+ bool is_fp = false;
+ bool is_min = false;
+ int esize;
+ int elements;
+ int i;
+ TCGv_i64 tcg_res, tcg_elt;
+
+ switch (opcode) {
+ case 0x1b: /* ADDV */
+ if (is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x3: /* SADDLV, UADDLV */
+ case 0xa: /* SMAXV, UMAXV */
+ case 0x1a: /* SMINV, UMINV */
+ if (size == 3 || (size == 2 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0xc: /* FMAXNMV, FMINNMV */
+ case 0xf: /* FMAXV, FMINV */
+ /* Bit 1 of size field encodes min vs max and the actual size
+ * depends on the encoding of the U bit. If not set (and FP16
+ * enabled) then we do half-precision float instead of single
+ * precision.
+ */
+ is_min = extract32(size, 1, 1);
+ is_fp = true;
+ if (!is_u && dc_isar_feature(aa64_fp16, s)) {
+ size = 1;
+ } else if (!is_u || !is_q || extract32(size, 0, 1)) {
+ unallocated_encoding(s);
+ return;
+ } else {
+ size = 2;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ esize = 8 << size;
+ elements = (is_q ? 128 : 64) / esize;
+
+ tcg_res = tcg_temp_new_i64();
+ tcg_elt = tcg_temp_new_i64();
+
+ /* These instructions operate across all lanes of a vector
+ * to produce a single result. We can guarantee that a 64
+ * bit intermediate is sufficient:
+ * + for [US]ADDLV the maximum element size is 32 bits, and
+ * the result type is 64 bits
+ * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
+ * same as the element size, which is 32 bits at most
+ * For the integer operations we can choose to work at 64
+ * or 32 bits and truncate at the end; for simplicity
+ * we use 64 bits always. The floating point
+ * ops do require 32 bit intermediates, though.
+ */
+ if (!is_fp) {
+ read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
+
+ for (i = 1; i < elements; i++) {
+ read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
+
+ switch (opcode) {
+ case 0x03: /* SADDLV / UADDLV */
+ case 0x1b: /* ADDV */
+ tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
+ break;
+ case 0x0a: /* SMAXV / UMAXV */
+ if (is_u) {
+ tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
+ } else {
+ tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
+ }
+ break;
+ case 0x1a: /* SMINV / UMINV */
+ if (is_u) {
+ tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
+ } else {
+ tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ }
+ } else {
+ /* Floating point vector reduction ops which work across 32
+ * bit (single) or 16 bit (half-precision) intermediates.
+ * Note that correct NaN propagation requires that we do these
+ * operations in exactly the order specified by the pseudocode.
+ */
+ TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ int fpopcode = opcode | is_min << 4 | is_u << 5;
+ int vmap = (1 << elements) - 1;
+ TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
+ (is_q ? 128 : 64), vmap, fpst);
+ tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
+ tcg_temp_free_i32(tcg_res32);
+ tcg_temp_free_ptr(fpst);
+ }
+
+ tcg_temp_free_i64(tcg_elt);
+
+ /* Now truncate the result to the width required for the final output */
+ if (opcode == 0x03) {
+ /* SADDLV, UADDLV: result is 2*esize */
+ size++;
+ }
+
+ switch (size) {
+ case 0:
+ tcg_gen_ext8u_i64(tcg_res, tcg_res);
+ break;
+ case 1:
+ tcg_gen_ext16u_i64(tcg_res, tcg_res);
+ break;
+ case 2:
+ tcg_gen_ext32u_i64(tcg_res, tcg_res);
+ break;
+ case 3:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* DUP (Element, Vector)
+ *
+ * 31 30 29 21 20 16 15 10 9 5 4 0
+ * +---+---+-------------------+--------+-------------+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
+ * +---+---+-------------------+--------+-------------+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ */
+static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
+ int imm5)
+{
+ int size = ctz32(imm5);
+ int index;
+
+ if (size > 3 || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ index = imm5 >> (size + 1);
+ tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
+ vec_reg_offset(s, rn, index, size),
+ is_q ? 16 : 8, vec_full_reg_size(s));
+}
+
+/* DUP (element, scalar)
+ * 31 21 20 16 15 10 9 5 4 0
+ * +-----------------------+--------+-------------+------+------+
+ * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
+ * +-----------------------+--------+-------------+------+------+
+ */
+static void handle_simd_dupes(DisasContext *s, int rd, int rn,
+ int imm5)
+{
+ int size = ctz32(imm5);
+ int index;
+ TCGv_i64 tmp;
+
+ if (size > 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ index = imm5 >> (size + 1);
+
+ /* This instruction just extracts the specified element and
+ * zero-extends it into the bottom of the destination register.
+ */
+ tmp = tcg_temp_new_i64();
+ read_vec_element(s, tmp, rn, index, size);
+ write_fp_dreg(s, rd, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* DUP (General)
+ *
+ * 31 30 29 21 20 16 15 10 9 5 4 0
+ * +---+---+-------------------+--------+-------------+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
+ * +---+---+-------------------+--------+-------------+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ */
+static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
+ int imm5)
+{
+ int size = ctz32(imm5);
+ uint32_t dofs, oprsz, maxsz;
+
+ if (size > 3 || ((size == 3) && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ dofs = vec_full_reg_offset(s, rd);
+ oprsz = is_q ? 16 : 8;
+ maxsz = vec_full_reg_size(s);
+
+ tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
+}
+
+/* INS (Element)
+ *
+ * 31 21 20 16 15 14 11 10 9 5 4 0
+ * +-----------------------+--------+------------+---+------+------+
+ * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
+ * +-----------------------+--------+------------+---+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ * index: encoded in imm5<4:size+1>
+ */
+static void handle_simd_inse(DisasContext *s, int rd, int rn,
+ int imm4, int imm5)
+{
+ int size = ctz32(imm5);
+ int src_index, dst_index;
+ TCGv_i64 tmp;
+
+ if (size > 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ dst_index = extract32(imm5, 1+size, 5);
+ src_index = extract32(imm4, size, 4);
+
+ tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tmp, rn, src_index, size);
+ write_vec_element(s, tmp, rd, dst_index, size);
+
+ tcg_temp_free_i64(tmp);
+
+ /* INS is considered a 128-bit write for SVE. */
+ clear_vec_high(s, true, rd);
+}
+
+
+/* INS (General)
+ *
+ * 31 21 20 16 15 10 9 5 4 0
+ * +-----------------------+--------+-------------+------+------+
+ * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
+ * +-----------------------+--------+-------------+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ * index: encoded in imm5<4:size+1>
+ */
+static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
+{
+ int size = ctz32(imm5);
+ int idx;
+
+ if (size > 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ idx = extract32(imm5, 1 + size, 4 - size);
+ write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
+
+ /* INS is considered a 128-bit write for SVE. */
+ clear_vec_high(s, true, rd);
+}
+
+/*
+ * UMOV (General)
+ * SMOV (General)
+ *
+ * 31 30 29 21 20 16 15 12 10 9 5 4 0
+ * +---+---+-------------------+--------+-------------+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
+ * +---+---+-------------------+--------+-------------+------+------+
+ *
+ * U: unsigned when set
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ */
+static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
+ int rn, int rd, int imm5)
+{
+ int size = ctz32(imm5);
+ int element;
+ TCGv_i64 tcg_rd;
+
+ /* Check for UnallocatedEncodings */
+ if (is_signed) {
+ if (size > 2 || (size == 2 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ if (size > 3
+ || (size < 3 && is_q)
+ || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ element = extract32(imm5, 1+size, 4);
+
+ tcg_rd = cpu_reg(s, rd);
+ read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
+ if (is_signed && !is_q) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* AdvSIMD copy
+ * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
+ * +---+---+----+-----------------+------+---+------+---+------+------+
+ * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
+ * +---+---+----+-----------------+------+---+------+---+------+------+
+ */
+static void disas_simd_copy(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm4 = extract32(insn, 11, 4);
+ int op = extract32(insn, 29, 1);
+ int is_q = extract32(insn, 30, 1);
+ int imm5 = extract32(insn, 16, 5);
+
+ if (op) {
+ if (is_q) {
+ /* INS (element) */
+ handle_simd_inse(s, rd, rn, imm4, imm5);
+ } else {
+ unallocated_encoding(s);
+ }
+ } else {
+ switch (imm4) {
+ case 0:
+ /* DUP (element - vector) */
+ handle_simd_dupe(s, is_q, rd, rn, imm5);
+ break;
+ case 1:
+ /* DUP (general) */
+ handle_simd_dupg(s, is_q, rd, rn, imm5);
+ break;
+ case 3:
+ if (is_q) {
+ /* INS (general) */
+ handle_simd_insg(s, rd, rn, imm5);
+ } else {
+ unallocated_encoding(s);
+ }
+ break;
+ case 5:
+ case 7:
+ /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
+ handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ }
+}
+
+/* AdvSIMD modified immediate
+ * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
+ * +---+---+----+---------------------+-----+-------+----+---+-------+------+
+ * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
+ * +---+---+----+---------------------+-----+-------+----+---+-------+------+
+ *
+ * There are a number of operations that can be carried out here:
+ * MOVI - move (shifted) imm into register
+ * MVNI - move inverted (shifted) imm into register
+ * ORR - bitwise OR of (shifted) imm with register
+ * BIC - bitwise clear of (shifted) imm with register
+ * With ARMv8.2 we also have:
+ * FMOV half-precision
+ */
+static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int cmode = extract32(insn, 12, 4);
+ int o2 = extract32(insn, 11, 1);
+ uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
+ bool is_neg = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ uint64_t imm = 0;
+
+ if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
+ /* Check for FMOV (vector, immediate) - half-precision */
+ if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (cmode == 15 && o2 && !is_neg) {
+ /* FMOV (vector, immediate) - half-precision */
+ imm = vfp_expand_imm(MO_16, abcdefgh);
+ /* now duplicate across the lanes */
+ imm = dup_const(MO_16, imm);
+ } else {
+ imm = asimd_imm_const(abcdefgh, cmode, is_neg);
+ }
+
+ if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
+ /* MOVI or MVNI, with MVNI negation handled above. */
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
+ vec_full_reg_size(s), imm);
+ } else {
+ /* ORR or BIC, with BIC negation to AND handled above. */
+ if (is_neg) {
+ gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
+ } else {
+ gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
+ }
+ }
+}
+
+/* AdvSIMD scalar copy
+ * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
+ * +-----+----+-----------------+------+---+------+---+------+------+
+ * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
+ * +-----+----+-----------------+------+---+------+---+------+------+
+ */
+static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm4 = extract32(insn, 11, 4);
+ int imm5 = extract32(insn, 16, 5);
+ int op = extract32(insn, 29, 1);
+
+ if (op != 0 || imm4 != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* DUP (element, scalar) */
+ handle_simd_dupes(s, rd, rn, imm5);
+}
+
+/* AdvSIMD scalar pairwise
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
+{
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ TCGv_ptr fpst;
+
+ /* For some ops (the FP ones), size[1] is part of the encoding.
+ * For ADDP strictly it is not but size[1] is always 1 for valid
+ * encodings.
+ */
+ opcode |= (extract32(size, 1, 1) << 5);
+
+ switch (opcode) {
+ case 0x3b: /* ADDP */
+ if (u || size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = NULL;
+ break;
+ case 0xc: /* FMAXNMP */
+ case 0xd: /* FADDP */
+ case 0xf: /* FMAXP */
+ case 0x2c: /* FMINNMP */
+ case 0x2f: /* FMINP */
+ /* FP op, size[0] is 32 or 64 bit*/
+ if (!u) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ } else {
+ size = MO_16;
+ }
+ } else {
+ size = extract32(size, 0, 1) ? MO_64 : MO_32;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == MO_64) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, 0, MO_64);
+ read_vec_element(s, tcg_op2, rn, 1, MO_64);
+
+ switch (opcode) {
+ case 0x3b: /* ADDP */
+ tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
+ break;
+ case 0xc: /* FMAXNMP */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op1, rn, 0, size);
+ read_vec_element_i32(s, tcg_op2, rn, 1, size);
+
+ if (size == MO_16) {
+ switch (opcode) {
+ case 0xc: /* FMAXNMP */
+ gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (opcode) {
+ case 0xc: /* FMAXNMP */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ if (fpst) {
+ tcg_temp_free_ptr(fpst);
+ }
+}
+
+/*
+ * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
+ *
+ * This code is handles the common shifting code and is used by both
+ * the vector and scalar code.
+ */
+static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
+ TCGv_i64 tcg_rnd, bool accumulate,
+ bool is_u, int size, int shift)
+{
+ bool extended_result = false;
+ bool round = tcg_rnd != NULL;
+ int ext_lshift = 0;
+ TCGv_i64 tcg_src_hi;
+
+ if (round && size == 3) {
+ extended_result = true;
+ ext_lshift = 64 - shift;
+ tcg_src_hi = tcg_temp_new_i64();
+ } else if (shift == 64) {
+ if (!accumulate && is_u) {
+ /* result is zero */
+ tcg_gen_movi_i64(tcg_res, 0);
+ return;
+ }
+ }
+
+ /* Deal with the rounding step */
+ if (round) {
+ if (extended_result) {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ if (!is_u) {
+ /* take care of sign extending tcg_res */
+ tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
+ tcg_gen_add2_i64(tcg_src, tcg_src_hi,
+ tcg_src, tcg_src_hi,
+ tcg_rnd, tcg_zero);
+ } else {
+ tcg_gen_add2_i64(tcg_src, tcg_src_hi,
+ tcg_src, tcg_zero,
+ tcg_rnd, tcg_zero);
+ }
+ tcg_temp_free_i64(tcg_zero);
+ } else {
+ tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
+ }
+ }
+
+ /* Now do the shift right */
+ if (round && extended_result) {
+ /* extended case, >64 bit precision required */
+ if (ext_lshift == 0) {
+ /* special case, only high bits matter */
+ tcg_gen_mov_i64(tcg_src, tcg_src_hi);
+ } else {
+ tcg_gen_shri_i64(tcg_src, tcg_src, shift);
+ tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
+ tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
+ }
+ } else {
+ if (is_u) {
+ if (shift == 64) {
+ /* essentially shifting in 64 zeros */
+ tcg_gen_movi_i64(tcg_src, 0);
+ } else {
+ tcg_gen_shri_i64(tcg_src, tcg_src, shift);
+ }
+ } else {
+ if (shift == 64) {
+ /* effectively extending the sign-bit */
+ tcg_gen_sari_i64(tcg_src, tcg_src, 63);
+ } else {
+ tcg_gen_sari_i64(tcg_src, tcg_src, shift);
+ }
+ }
+ }
+
+ if (accumulate) {
+ tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
+ } else {
+ tcg_gen_mov_i64(tcg_res, tcg_src);
+ }
+
+ if (extended_result) {
+ tcg_temp_free_i64(tcg_src_hi);
+ }
+}
+
+/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
+static void handle_scalar_simd_shri(DisasContext *s,
+ bool is_u, int immh, int immb,
+ int opcode, int rn, int rd)
+{
+ const int size = 3;
+ int immhb = immh << 3 | immb;
+ int shift = 2 * (8 << size) - immhb;
+ bool accumulate = false;
+ bool round = false;
+ bool insert = false;
+ TCGv_i64 tcg_rn;
+ TCGv_i64 tcg_rd;
+ TCGv_i64 tcg_round;
+
+ if (!extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0x02: /* SSRA / USRA (accumulate) */
+ accumulate = true;
+ break;
+ case 0x04: /* SRSHR / URSHR (rounding) */
+ round = true;
+ break;
+ case 0x06: /* SRSRA / URSRA (accum + rounding) */
+ accumulate = round = true;
+ break;
+ case 0x08: /* SRI */
+ insert = true;
+ break;
+ }
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ tcg_round = NULL;
+ }
+
+ tcg_rn = read_fp_dreg(s, rn);
+ tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
+
+ if (insert) {
+ /* shift count same as element size is valid but does nothing;
+ * special case to avoid potential shift by 64.
+ */
+ int esize = 8 << size;
+ if (shift != esize) {
+ tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
+ }
+ } else {
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ accumulate, is_u, size, shift);
+ }
+
+ write_fp_dreg(s, rd, tcg_rd);
+
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+}
+
+/* SHL/SLI - Scalar shift left */
+static void handle_scalar_simd_shli(DisasContext *s, bool insert,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = immhb - (8 << size);
+ TCGv_i64 tcg_rn;
+ TCGv_i64 tcg_rd;
+
+ if (!extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rn = read_fp_dreg(s, rn);
+ tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
+
+ if (insert) {
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
+ } else {
+ tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
+ }
+
+ write_fp_dreg(s, rd, tcg_rd);
+
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+}
+
+/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
+ * (signed/unsigned) narrowing */
+static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
+ bool is_u_shift, bool is_u_narrow,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int esize = 8 << size;
+ int shift = (2 * esize) - immhb;
+ int elements = is_scalar ? 1 : (64 / esize);
+ bool round = extract32(opcode, 0, 1);
+ MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
+ TCGv_i64 tcg_rn, tcg_rd, tcg_round;
+ TCGv_i32 tcg_rd_narrowed;
+ TCGv_i64 tcg_final;
+
+ static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_unarrow_sat8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_unarrow_sat16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_unarrow_sat32 },
+ { NULL, NULL },
+ };
+ static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
+ gen_helper_neon_narrow_sat_u8,
+ gen_helper_neon_narrow_sat_u16,
+ gen_helper_neon_narrow_sat_u32,
+ NULL
+ };
+ NeonGenNarrowEnvFn *narrowfn;
+
+ int i;
+
+ assert(size < 4);
+
+ if (extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_u_shift) {
+ narrowfn = unsigned_narrow_fns[size];
+ } else {
+ narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
+ }
+
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_rd_narrowed = tcg_temp_new_i32();
+ tcg_final = tcg_const_i64(0);
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ tcg_round = NULL;
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, ldop);
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ false, is_u_shift, size+1, shift);
+ narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
+
+ if (!is_q) {
+ write_vec_element(s, tcg_final, rd, 0, MO_64);
+ } else {
+ write_vec_element(s, tcg_final, rd, 1, MO_64);
+ }
+
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i32(tcg_rd_narrowed);
+ tcg_temp_free_i64(tcg_final);
+
+ clear_vec_high(s, is_q, rd);
+}
+
+/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
+static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
+ bool src_unsigned, bool dst_unsigned,
+ int immh, int immb, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int shift = immhb - (8 << size);
+ int pass;
+
+ assert(immh != 0);
+ assert(!(scalar && is_q));
+
+ if (!scalar) {
+ if (!is_q && extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Since we use the variable-shift helpers we must
+ * replicate the shift count into each element of
+ * the tcg_shift value.
+ */
+ switch (size) {
+ case 0:
+ shift |= shift << 8;
+ /* fall through */
+ case 1:
+ shift |= shift << 16;
+ break;
+ case 2:
+ case 3:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_shift = tcg_const_i64(shift);
+ static NeonGenTwo64OpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
+ { NULL, gen_helper_neon_qshl_u64 },
+ };
+ NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
+ int maxpass = is_q ? 2 : 1;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ write_vec_element(s, tcg_op, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_op);
+ }
+ tcg_temp_free_i64(tcg_shift);
+ clear_vec_high(s, is_q, rd);
+ } else {
+ TCGv_i32 tcg_shift = tcg_const_i32(shift);
+ static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
+ {
+ { gen_helper_neon_qshl_s8,
+ gen_helper_neon_qshl_s16,
+ gen_helper_neon_qshl_s32 },
+ { gen_helper_neon_qshlu_s8,
+ gen_helper_neon_qshlu_s16,
+ gen_helper_neon_qshlu_s32 }
+ }, {
+ { NULL, NULL, NULL },
+ { gen_helper_neon_qshl_u8,
+ gen_helper_neon_qshl_u16,
+ gen_helper_neon_qshl_u32 }
+ }
+ };
+ NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
+ MemOp memop = scalar ? size : MO_32;
+ int maxpass = scalar ? 1 : is_q ? 4 : 2;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, memop);
+ genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ if (scalar) {
+ switch (size) {
+ case 0:
+ tcg_gen_ext8u_i32(tcg_op, tcg_op);
+ break;
+ case 1:
+ tcg_gen_ext16u_i32(tcg_op, tcg_op);
+ break;
+ case 2:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_fp_sreg(s, rd, tcg_op);
+ } else {
+ write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_op);
+ }
+ tcg_temp_free_i32(tcg_shift);
+
+ if (!scalar) {
+ clear_vec_high(s, is_q, rd);
+ }
+ }
+}
+
+/* Common vector code for handling integer to FP conversion */
+static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
+ int elements, int is_signed,
+ int fracbits, int size)
+{
+ TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ TCGv_i32 tcg_shift = NULL;
+
+ MemOp mop = size | (is_signed ? MO_SIGN : 0);
+ int pass;
+
+ if (fracbits || size == MO_64) {
+ tcg_shift = tcg_const_i32(fracbits);
+ }
+
+ if (size == MO_64) {
+ TCGv_i64 tcg_int64 = tcg_temp_new_i64();
+ TCGv_i64 tcg_double = tcg_temp_new_i64();
+
+ for (pass = 0; pass < elements; pass++) {
+ read_vec_element(s, tcg_int64, rn, pass, mop);
+
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int64,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_uqtod(tcg_double, tcg_int64,
+ tcg_shift, tcg_fpst);
+ }
+ if (elements == 1) {
+ write_fp_dreg(s, rd, tcg_double);
+ } else {
+ write_vec_element(s, tcg_double, rd, pass, MO_64);
+ }
+ }
+
+ tcg_temp_free_i64(tcg_int64);
+ tcg_temp_free_i64(tcg_double);
+
+ } else {
+ TCGv_i32 tcg_int32 = tcg_temp_new_i32();
+ TCGv_i32 tcg_float = tcg_temp_new_i32();
+
+ for (pass = 0; pass < elements; pass++) {
+ read_vec_element_i32(s, tcg_int32, rn, pass, mop);
+
+ switch (size) {
+ case MO_32:
+ if (fracbits) {
+ if (is_signed) {
+ gen_helper_vfp_sltos(tcg_float, tcg_int32,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_ultos(tcg_float, tcg_int32,
+ tcg_shift, tcg_fpst);
+ }
+ } else {
+ if (is_signed) {
+ gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
+ } else {
+ gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
+ }
+ }
+ break;
+ case MO_16:
+ if (fracbits) {
+ if (is_signed) {
+ gen_helper_vfp_sltoh(tcg_float, tcg_int32,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_ultoh(tcg_float, tcg_int32,
+ tcg_shift, tcg_fpst);
+ }
+ } else {
+ if (is_signed) {
+ gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
+ } else {
+ gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (elements == 1) {
+ write_fp_sreg(s, rd, tcg_float);
+ } else {
+ write_vec_element_i32(s, tcg_float, rd, pass, size);
+ }
+ }
+
+ tcg_temp_free_i32(tcg_int32);
+ tcg_temp_free_i32(tcg_float);
+ }
+
+ tcg_temp_free_ptr(tcg_fpst);
+ if (tcg_shift) {
+ tcg_temp_free_i32(tcg_shift);
+ }
+
+ clear_vec_high(s, elements << size == 16, rd);
+}
+
+/* UCVTF/SCVTF - Integer to FP conversion */
+static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
+ bool is_q, bool is_u,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ int size, elements, fracbits;
+ int immhb = immh << 3 | immb;
+
+ if (immh & 8) {
+ size = MO_64;
+ if (!is_scalar && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else if (immh & 4) {
+ size = MO_32;
+ } else if (immh & 2) {
+ size = MO_16;
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ /* immh == 0 would be a failure of the decode logic */
+ g_assert(immh == 1);
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_scalar) {
+ elements = 1;
+ } else {
+ elements = (8 << is_q) >> size;
+ }
+ fracbits = (16 << size) - immhb;
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
+}
+
+/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
+static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
+ bool is_q, bool is_u,
+ int immh, int immb, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int pass, size, fracbits;
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_rmode, tcg_shift;
+
+ if (immh & 0x8) {
+ size = MO_64;
+ if (!is_scalar && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else if (immh & 0x4) {
+ size = MO_32;
+ } else if (immh & 0x2) {
+ size = MO_16;
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ /* Should have split out AdvSIMD modified immediate earlier. */
+ assert(immh == 1);
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ assert(!(is_scalar && is_q));
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
+ tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ fracbits = (16 << size) - immhb;
+ tcg_shift = tcg_const_i32(fracbits);
+
+ if (size == MO_64) {
+ int maxpass = is_scalar ? 1 : 2;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ if (is_u) {
+ gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ }
+ write_vec_element(s, tcg_op, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_op);
+ }
+ clear_vec_high(s, is_q, rd);
+ } else {
+ void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
+ int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
+
+ switch (size) {
+ case MO_16:
+ if (is_u) {
+ fn = gen_helper_vfp_touhh;
+ } else {
+ fn = gen_helper_vfp_toshh;
+ }
+ break;
+ case MO_32:
+ if (is_u) {
+ fn = gen_helper_vfp_touls;
+ } else {
+ fn = gen_helper_vfp_tosls;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
+ fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_op);
+ } else {
+ write_vec_element_i32(s, tcg_op, rd, pass, size);
+ }
+ tcg_temp_free_i32(tcg_op);
+ }
+ if (!is_scalar) {
+ clear_vec_high(s, is_q, rd);
+ }
+ }
+
+ tcg_temp_free_ptr(tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_rmode);
+}
+
+/* AdvSIMD scalar shift by immediate
+ * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
+ * +-----+---+-------------+------+------+--------+---+------+------+
+ * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
+ * +-----+---+-------------+------+------+--------+---+------+------+
+ *
+ * This is the scalar version so it works on a fixed sized registers
+ */
+static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 5);
+ int immb = extract32(insn, 16, 3);
+ int immh = extract32(insn, 19, 4);
+ bool is_u = extract32(insn, 29, 1);
+
+ if (immh == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x08: /* SRI */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x00: /* SSHR / USHR */
+ case 0x02: /* SSRA / USRA */
+ case 0x04: /* SRSHR / URSHR */
+ case 0x06: /* SRSRA / URSRA */
+ handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x0a: /* SHL / SLI */
+ handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x1c: /* SCVTF, UCVTF */
+ handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0x10: /* SQSHRUN, SQSHRUN2 */
+ case 0x11: /* SQRSHRUN, SQRSHRUN2 */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_vec_simd_sqshrn(s, true, false, false, true,
+ immh, immb, opcode, rn, rd);
+ break;
+ case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
+ case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
+ handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
+ immh, immb, opcode, rn, rd);
+ break;
+ case 0xc: /* SQSHLU */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
+ break;
+ case 0xe: /* SQSHL, UQSHL */
+ handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
+ break;
+ case 0x1f: /* FCVTZS, FCVTZU */
+ handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* AdvSIMD scalar three different
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+------+--------+-----+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
+ * +-----+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
+{
+ bool is_u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ if (is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ if (size == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 2) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
+ read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
+
+ tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
+
+ switch (opcode) {
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ break;
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ tcg_gen_neg_i64(tcg_res, tcg_res);
+ /* fall through */
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ read_vec_element(s, tcg_op1, rd, 0, MO_64);
+ gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
+ tcg_res, tcg_op1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
+ TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
+
+ switch (opcode) {
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ break;
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ gen_helper_neon_negl_u32(tcg_res, tcg_res);
+ /* fall through */
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ {
+ TCGv_i64 tcg_op3 = tcg_temp_new_i64();
+ read_vec_element(s, tcg_op3, rd, 0, MO_32);
+ gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
+ tcg_res, tcg_op3);
+ tcg_temp_free_i64(tcg_op3);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_ext32u_i64(tcg_res, tcg_res);
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ }
+}
+
+static void handle_3same_64(DisasContext *s, int opcode, bool u,
+ TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
+{
+ /* Handle 64x64->64 opcodes which are shared between the scalar
+ * and vector 3-same groups. We cover every opcode where size == 3
+ * is valid in either the three-reg-same (integer, not pairwise)
+ * or scalar-three-reg-same groups.
+ */
+ TCGCond cond;
+
+ switch (opcode) {
+ case 0x1: /* SQADD */
+ if (u) {
+ gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x5: /* SQSUB */
+ if (u) {
+ gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x6: /* CMGT, CMHI */
+ /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
+ * We implement this using setcond (test) and then negating.
+ */
+ cond = u ? TCG_COND_GTU : TCG_COND_GT;
+ do_cmop:
+ tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ break;
+ case 0x7: /* CMGE, CMHS */
+ cond = u ? TCG_COND_GEU : TCG_COND_GE;
+ goto do_cmop;
+ case 0x11: /* CMTST, CMEQ */
+ if (u) {
+ cond = TCG_COND_EQ;
+ goto do_cmop;
+ }
+ gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 0x8: /* SSHL, USHL */
+ if (u) {
+ gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x9: /* SQSHL, UQSHL */
+ if (u) {
+ gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0xa: /* SRSHL, URSHL */
+ if (u) {
+ gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0xb: /* SQRSHL, UQRSHL */
+ if (u) {
+ gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x10: /* ADD, SUB */
+ if (u) {
+ tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Handle the 3-same-operands float operations; shared by the scalar
+ * and vector encodings. The caller must filter out any encodings
+ * not allocated for the encoding it is dealing with.
+ */
+static void handle_3same_float(DisasContext *s, int size, int elements,
+ int fpopcode, int rd, int rn, int rm)
+{
+ int pass;
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+
+ for (pass = 0; pass < elements; pass++) {
+ if (size) {
+ /* Double */
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ switch (fpopcode) {
+ case 0x39: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negd(tcg_op1, tcg_op1);
+ /* fall through */
+ case 0x19: /* FMLA */
+ read_vec_element(s, tcg_res, rd, pass, MO_64);
+ gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
+ tcg_res, fpst);
+ break;
+ case 0x18: /* FMAXNM */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1a: /* FADD */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1b: /* FMULX */
+ gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1c: /* FCMEQ */
+ gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1e: /* FMAX */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1f: /* FRECPS */
+ gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x38: /* FMINNM */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3a: /* FSUB */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3e: /* FMIN */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5b: /* FMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5c: /* FCMGE */
+ gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5d: /* FACGE */
+ gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5f: /* FDIV */
+ gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7a: /* FABD */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_absd(tcg_res, tcg_res);
+ break;
+ case 0x7c: /* FCMGT */
+ gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7d: /* FACGT */
+ gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ } else {
+ /* Single */
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
+
+ switch (fpopcode) {
+ case 0x39: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negs(tcg_op1, tcg_op1);
+ /* fall through */
+ case 0x19: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
+ tcg_res, fpst);
+ break;
+ case 0x1a: /* FADD */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1b: /* FMULX */
+ gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1c: /* FCMEQ */
+ gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1e: /* FMAX */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1f: /* FRECPS */
+ gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x18: /* FMAXNM */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x38: /* FMINNM */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3a: /* FSUB */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3e: /* FMIN */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5b: /* FMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5c: /* FCMGE */
+ gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5d: /* FACGE */
+ gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5f: /* FDIV */
+ gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7a: /* FABD */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_abss(tcg_res, tcg_res);
+ break;
+ case 0x7c: /* FCMGT */
+ gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7d: /* FACGT */
+ gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (elements == 1) {
+ /* scalar single so clear high part */
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
+ write_vec_element(s, tcg_tmp, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_tmp);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+ }
+
+ tcg_temp_free_ptr(fpst);
+
+ clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
+}
+
+/* AdvSIMD scalar three same
+ * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+------+--------+---+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
+ * +-----+---+-----------+------+---+------+--------+---+------+------+
+ */
+static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+ TCGv_i64 tcg_rd;
+
+ if (opcode >= 0x18) {
+ /* Floating point: U, size[1] and opcode indicate operation */
+ int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
+ switch (fpopcode) {
+ case 0x1b: /* FMULX */
+ case 0x1f: /* FRECPS */
+ case 0x3f: /* FRSQRTS */
+ case 0x5d: /* FACGE */
+ case 0x7d: /* FACGT */
+ case 0x1c: /* FCMEQ */
+ case 0x5c: /* FCMGE */
+ case 0x7c: /* FCMGT */
+ case 0x7a: /* FABD */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x1: /* SQADD, UQADD */
+ case 0x5: /* SQSUB, UQSUB */
+ case 0x9: /* SQSHL, UQSHL */
+ case 0xb: /* SQRSHL, UQRSHL */
+ break;
+ case 0x8: /* SSHL, USHL */
+ case 0xa: /* SRSHL, URSHL */
+ case 0x6: /* CMGT, CMHI */
+ case 0x7: /* CMGE, CMHS */
+ case 0x11: /* CMTST, CMEQ */
+ case 0x10: /* ADD, SUB (vector) */
+ if (size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x16: /* SQDMULH, SQRDMULH (vector) */
+ if (size != 1 && size != 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rd = tcg_temp_new_i64();
+
+ if (size == 3) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
+
+ handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rm);
+ } else {
+ /* Do a single operation on the lowest element in the vector.
+ * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
+ * no side effects for all these operations.
+ * OPTME: special-purpose helpers would avoid doing some
+ * unnecessary work in the helper for the 8 and 16 bit cases.
+ */
+ NeonGenTwoOpEnvFn *genenvfn;
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rm = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_rn, rn, 0, size);
+ read_vec_element_i32(s, tcg_rm, rm, 0, size);
+
+ switch (opcode) {
+ case 0x1: /* SQADD, UQADD */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
+ { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
+ { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x5: /* SQSUB, UQSUB */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
+ { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
+ { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x9: /* SQSHL, UQSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
+ { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
+ { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xb: /* SQRSHL, UQRSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
+ { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
+ { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x16: /* SQDMULH, SQRDMULH */
+ {
+ static NeonGenTwoOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
+ { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
+ };
+ assert(size == 1 || size == 2);
+ genenvfn = fns[size - 1][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
+ tcg_temp_free_i32(tcg_rd32);
+ tcg_temp_free_i32(tcg_rn);
+ tcg_temp_free_i32(tcg_rm);
+ }
+
+ write_fp_dreg(s, rd, tcg_rd);
+
+ tcg_temp_free_i64(tcg_rd);
+}
+
+/* AdvSIMD scalar three same FP16
+ * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
+ * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
+ * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
+ * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
+ * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
+ * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
+ */
+static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
+ uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 3);
+ int rm = extract32(insn, 16, 5);
+ bool u = extract32(insn, 29, 1);
+ bool a = extract32(insn, 23, 1);
+ int fpopcode = opcode | (a << 3) | (u << 4);
+ TCGv_ptr fpst;
+ TCGv_i32 tcg_op1;
+ TCGv_i32 tcg_op2;
+ TCGv_i32 tcg_res;
+
+ switch (fpopcode) {
+ case 0x03: /* FMULX */
+ case 0x04: /* FCMEQ (reg) */
+ case 0x07: /* FRECPS */
+ case 0x0f: /* FRSQRTS */
+ case 0x14: /* FCMGE (reg) */
+ case 0x15: /* FACGE */
+ case 0x1a: /* FABD */
+ case 0x1c: /* FCMGT (reg) */
+ case 0x1d: /* FACGT */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+
+ tcg_op1 = read_fp_hreg(s, rn);
+ tcg_op2 = read_fp_hreg(s, rm);
+ tcg_res = tcg_temp_new_i32();
+
+ switch (fpopcode) {
+ case 0x03: /* FMULX */
+ gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x04: /* FCMEQ (reg) */
+ gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x07: /* FRECPS */
+ gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x0f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x14: /* FCMGE (reg) */
+ gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x15: /* FACGE */
+ gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1a: /* FABD */
+ gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
+ tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
+ break;
+ case 0x1c: /* FCMGT (reg) */
+ gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1d: /* FACGT */
+ gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_ptr(fpst);
+}
+
+/* AdvSIMD scalar three same extra
+ * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+------+---+--------+---+----+----+
+ * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
+ * +-----+---+-----------+------+---+------+---+--------+---+----+----+
+ */
+static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
+ uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 4);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+ TCGv_i32 ele1, ele2, ele3;
+ TCGv_i64 res;
+ bool feature;
+
+ switch (u * 16 + opcode) {
+ case 0x10: /* SQRDMLAH (vector) */
+ case 0x11: /* SQRDMLSH (vector) */
+ if (size != 1 && size != 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_rdm, s);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* Do a single operation on the lowest element in the vector.
+ * We use the standard Neon helpers and rely on 0 OP 0 == 0
+ * with no side effects for all these operations.
+ * OPTME: special-purpose helpers would avoid doing some
+ * unnecessary work in the helper for the 16 bit cases.
+ */
+ ele1 = tcg_temp_new_i32();
+ ele2 = tcg_temp_new_i32();
+ ele3 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, ele1, rn, 0, size);
+ read_vec_element_i32(s, ele2, rm, 0, size);
+ read_vec_element_i32(s, ele3, rd, 0, size);
+
+ switch (opcode) {
+ case 0x0: /* SQRDMLAH */
+ if (size == 1) {
+ gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
+ } else {
+ gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
+ }
+ break;
+ case 0x1: /* SQRDMLSH */
+ if (size == 1) {
+ gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
+ } else {
+ gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i32(ele1);
+ tcg_temp_free_i32(ele2);
+
+ res = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(res, ele3);
+ tcg_temp_free_i32(ele3);
+
+ write_fp_dreg(s, rd, res);
+ tcg_temp_free_i64(res);
+}
+
+static void handle_2misc_64(DisasContext *s, int opcode, bool u,
+ TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
+ TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
+{
+ /* Handle 64->64 opcodes which are shared between the scalar and
+ * vector 2-reg-misc groups. We cover every integer opcode where size == 3
+ * is valid in either group and also the double-precision fp ops.
+ * The caller only need provide tcg_rmode and tcg_fpstatus if the op
+ * requires them.
+ */
+ TCGCond cond;
+
+ switch (opcode) {
+ case 0x4: /* CLS, CLZ */
+ if (u) {
+ tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
+ } else {
+ tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
+ }
+ break;
+ case 0x5: /* NOT */
+ /* This opcode is shared with CNT and RBIT but we have earlier
+ * enforced that size == 3 if and only if this is the NOT insn.
+ */
+ tcg_gen_not_i64(tcg_rd, tcg_rn);
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ if (u) {
+ gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
+ } else {
+ gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
+ }
+ break;
+ case 0xa: /* CMLT */
+ /* 64 bit integer comparison against zero, result is
+ * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
+ * subtracting 1.
+ */
+ cond = TCG_COND_LT;
+ do_cmop:
+ tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ break;
+ case 0x8: /* CMGT, CMGE */
+ cond = u ? TCG_COND_GE : TCG_COND_GT;
+ goto do_cmop;
+ case 0x9: /* CMEQ, CMLE */
+ cond = u ? TCG_COND_LE : TCG_COND_EQ;
+ goto do_cmop;
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ tcg_gen_neg_i64(tcg_rd, tcg_rn);
+ } else {
+ tcg_gen_abs_i64(tcg_rd, tcg_rn);
+ }
+ break;
+ case 0x2f: /* FABS */
+ gen_helper_vfp_absd(tcg_rd, tcg_rn);
+ break;
+ case 0x6f: /* FNEG */
+ gen_helper_vfp_negd(tcg_rd, tcg_rn);
+ break;
+ case 0x7f: /* FSQRT */
+ gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ case 0x1e: /* FRINT32Z */
+ case 0x5e: /* FRINT32X */
+ gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ case 0x1f: /* FRINT64Z */
+ case 0x5f: /* FRINT64X */
+ gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
+ bool is_scalar, bool is_u, bool is_q,
+ int size, int rn, int rd)
+{
+ bool is_double = (size == MO_64);
+ TCGv_ptr fpst;
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ if (is_double) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ NeonGenTwoDoubleOpFn *genfn;
+ bool swap = false;
+ int pass;
+
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fallthrough */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_neon_cgt_f64;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_neon_ceq_f64;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_neon_cge_f64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ if (swap) {
+ genfn(tcg_res, tcg_zero, tcg_op, fpst);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero, fpst);
+ }
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ }
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_zero);
+ tcg_temp_free_i64(tcg_op);
+
+ clear_vec_high(s, !is_scalar, rd);
+ } else {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ NeonGenTwoSingleOpFn *genfn;
+ bool swap = false;
+ int pass, maxpasses;
+
+ if (size == MO_16) {
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fall through */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_advsimd_cgt_f16;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_advsimd_ceq_f16;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_advsimd_cge_f16;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fall through */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_neon_cgt_f32;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_neon_ceq_f32;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_neon_cge_f32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ int vector_size = 8 << is_q;
+ maxpasses = vector_size >> size;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
+ if (swap) {
+ genfn(tcg_res, tcg_zero, tcg_op, fpst);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero, fpst);
+ }
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, size);
+ }
+ }
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_zero);
+ tcg_temp_free_i32(tcg_op);
+ if (!is_scalar) {
+ clear_vec_high(s, is_q, rd);
+ }
+ }
+
+ tcg_temp_free_ptr(fpst);
+}
+
+static void handle_2misc_reciprocal(DisasContext *s, int opcode,
+ bool is_scalar, bool is_u, bool is_q,
+ int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+
+ if (is_double) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ int pass;
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ switch (opcode) {
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ }
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ clear_vec_high(s, !is_scalar, rd);
+ } else {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+
+ switch (opcode) {
+ case 0x3c: /* URECPE */
+ gen_helper_recpe_u32(tcg_res, tcg_op);
+ break;
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+ }
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ if (!is_scalar) {
+ clear_vec_high(s, is_q, rd);
+ }
+ }
+ tcg_temp_free_ptr(fpst);
+}
+
+static void handle_2misc_narrow(DisasContext *s, bool scalar,
+ int opcode, bool u, bool is_q,
+ int size, int rn, int rd)
+{
+ /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
+ * in the source becomes a size element in the destination).
+ */
+ int pass;
+ TCGv_i32 tcg_res[2];
+ int destelt = is_q ? 2 : 0;
+ int passes = scalar ? 1 : 2;
+
+ if (scalar) {
+ tcg_res[1] = tcg_const_i32(0);
+ }
+
+ for (pass = 0; pass < passes; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenNarrowFn *genfn = NULL;
+ NeonGenNarrowEnvFn *genenvfn = NULL;
+
+ if (scalar) {
+ read_vec_element(s, tcg_op, rn, pass, size + 1);
+ } else {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ }
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x12: /* XTN, SQXTUN */
+ {
+ static NeonGenNarrowFn * const xtnfns[3] = {
+ gen_helper_neon_narrow_u8,
+ gen_helper_neon_narrow_u16,
+ tcg_gen_extrl_i64_i32,
+ };
+ static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
+ gen_helper_neon_unarrow_sat8,
+ gen_helper_neon_unarrow_sat16,
+ gen_helper_neon_unarrow_sat32,
+ };
+ if (u) {
+ genenvfn = sqxtunfns[size];
+ } else {
+ genfn = xtnfns[size];
+ }
+ break;
+ }
+ case 0x14: /* SQXTN, UQXTN */
+ {
+ static NeonGenNarrowEnvFn * const fns[3][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_narrow_sat_u8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_narrow_sat_u16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_narrow_sat_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x16: /* FCVTN, FCVTN2 */
+ /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
+ if (size == 2) {
+ gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
+ } else {
+ TCGv_i32 tcg_lo = tcg_temp_new_i32();
+ TCGv_i32 tcg_hi = tcg_temp_new_i32();
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+ TCGv_i32 ahp = get_ahp_flag();
+
+ tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
+ tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
+ tcg_temp_free_i32(tcg_lo);
+ tcg_temp_free_i32(tcg_hi);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(ahp);
+ }
+ break;
+ case 0x36: /* BFCVTN, BFCVTN2 */
+ {
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
+ tcg_temp_free_ptr(fpst);
+ }
+ break;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ /* 64 bit to 32 bit float conversion
+ * with von Neumann rounding (round to odd)
+ */
+ assert(size == 2);
+ gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op);
+ } else if (genenvfn) {
+ genenvfn(tcg_res[pass], cpu_env, tcg_op);
+ }
+
+ tcg_temp_free_i64(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ clear_vec_high(s, is_q, rd);
+}
+
+/* Remaining saturating accumulating ops */
+static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
+ bool is_q, int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+
+ if (is_double) {
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ int pass;
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_rn, rn, pass, MO_64);
+ read_vec_element(s, tcg_rd, rd, pass, MO_64);
+
+ if (is_u) { /* USQADD */
+ gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ } else { /* SUQADD */
+ gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ }
+ write_vec_element(s, tcg_rd, rd, pass, MO_64);
+ }
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ clear_vec_high(s, !is_scalar, rd);
+ } else {
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ if (is_scalar) {
+ read_vec_element_i32(s, tcg_rn, rn, pass, size);
+ read_vec_element_i32(s, tcg_rd, rd, pass, size);
+ } else {
+ read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
+ }
+
+ if (is_u) { /* USQADD */
+ switch (size) {
+ case 0:
+ gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 1:
+ gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 2:
+ gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else { /* SUQADD */
+ switch (size) {
+ case 0:
+ gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 1:
+ gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 2:
+ gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (is_scalar) {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ write_vec_element(s, tcg_zero, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_zero);
+ }
+ write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(tcg_rn);
+ clear_vec_high(s, is_q, rd);
+ }
+}
+
+/* AdvSIMD scalar two reg misc
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 12, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+ bool is_fcvt = false;
+ int rmode;
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr tcg_fpstatus;
+
+ switch (opcode) {
+ case 0x3: /* USQADD / SUQADD*/
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_satacc(s, true, u, false, size, rn, rd);
+ return;
+ case 0x7: /* SQABS / SQNEG */
+ break;
+ case 0xa: /* CMLT */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xb: /* ABS, NEG */
+ if (size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x12: /* SQXTUN */
+ if (!u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x14: /* SQXTN, UQXTN */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
+ return;
+ case 0xc ... 0xf:
+ case 0x16 ... 0x1d:
+ case 0x1f:
+ /* Floating point: U, size[1] and opcode indicate operation;
+ * size[0] indicates single or double precision.
+ */
+ opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
+ size = extract32(size, 0, 1) ? 3 : 2;
+ switch (opcode) {
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
+ return;
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ bool is_signed = (opcode == 0x1d);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
+ return;
+ }
+ case 0x3d: /* FRECPE */
+ case 0x3f: /* FRECPX */
+ case 0x7d: /* FRSQRTE */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
+ return;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ is_fcvt = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ break;
+ case 0x1c: /* FCVTAS */
+ case 0x5c: /* FCVTAU */
+ /* TIEAWAY doesn't fit in the usual rounding mode encoding */
+ is_fcvt = true;
+ rmode = FPROUNDING_TIEAWAY;
+ break;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ if (size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_fcvt) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ } else {
+ tcg_rmode = NULL;
+ tcg_fpstatus = NULL;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+
+ handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ } else {
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_rn, rn, 0, size);
+
+ switch (opcode) {
+ case 0x7: /* SQABS, SQNEG */
+ {
+ NeonGenOneOpEnvFn *genfn;
+ static NeonGenOneOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
+ { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
+ { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
+ };
+ genfn = fns[size][u];
+ genfn(tcg_rd, cpu_env, tcg_rn);
+ break;
+ }
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(tcg_rn);
+ }
+
+ if (is_fcvt) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_ptr(tcg_fpstatus);
+ }
+}
+
+/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
+static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = 2 * (8 << size) - immhb;
+ GVecGen2iFn *gvec_fn;
+
+ if (extract32(immh, 3, 1) && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ tcg_debug_assert(size <= 3);
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0x02: /* SSRA / USRA (accumulate) */
+ gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
+ break;
+
+ case 0x08: /* SRI */
+ gvec_fn = gen_gvec_sri;
+ break;
+
+ case 0x00: /* SSHR / USHR */
+ if (is_u) {
+ if (shift == 8 << size) {
+ /* Shift count the same size as element size produces zero. */
+ tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
+ is_q ? 16 : 8, vec_full_reg_size(s), 0);
+ return;
+ }
+ gvec_fn = tcg_gen_gvec_shri;
+ } else {
+ /* Shift count the same size as element size produces all sign. */
+ if (shift == 8 << size) {
+ shift -= 1;
+ }
+ gvec_fn = tcg_gen_gvec_sari;
+ }
+ break;
+
+ case 0x04: /* SRSHR / URSHR (rounding) */
+ gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
+ break;
+
+ case 0x06: /* SRSRA / URSRA (accum + rounding) */
+ gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
+}
+
+/* SHL/SLI - Vector shift left */
+static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = immhb - (8 << size);
+
+ /* Range of size is limited by decode: immh is a non-zero 4 bit field */
+ assert(size >= 0 && size <= 3);
+
+ if (extract32(immh, 3, 1) && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (insert) {
+ gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
+ } else {
+ gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
+ }
+}
+
+/* USHLL/SHLL - Vector shift left with widening */
+static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = immhb - (8 << size);
+ int dsize = 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ TCGv_i64 tcg_rn = new_tmp_a64(s);
+ TCGv_i64 tcg_rd = new_tmp_a64(s);
+ int i;
+
+ if (size >= 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* For the LL variants the store is larger than the load,
+ * so if rd == rn we would overwrite parts of our input.
+ * So load everything right now and use shifts in the main loop.
+ */
+ read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
+
+ for (i = 0; i < elements; i++) {
+ tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
+ ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
+ write_vec_element(s, tcg_rd, rd, i, size + 1);
+ }
+}
+
+/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
+static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int dsize = 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ int shift = (2 * esize) - immhb;
+ bool round = extract32(opcode, 0, 1);
+ TCGv_i64 tcg_rn, tcg_rd, tcg_final;
+ TCGv_i64 tcg_round;
+ int i;
+
+ if (extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_final = tcg_temp_new_i64();
+ read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ tcg_round = NULL;
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, size+1);
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ false, true, size+1, shift);
+
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
+
+ if (!is_q) {
+ write_vec_element(s, tcg_final, rd, 0, MO_64);
+ } else {
+ write_vec_element(s, tcg_final, rd, 1, MO_64);
+ }
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_final);
+
+ clear_vec_high(s, is_q, rd);
+}
+
+
+/* AdvSIMD shift by immediate
+ * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
+ * +---+---+---+-------------+------+------+--------+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
+ * +---+---+---+-------------+------+------+--------+---+------+------+
+ */
+static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 5);
+ int immb = extract32(insn, 16, 3);
+ int immh = extract32(insn, 19, 4);
+ bool is_u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+
+ /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
+ assert(immh != 0);
+
+ switch (opcode) {
+ case 0x08: /* SRI */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x00: /* SSHR / USHR */
+ case 0x02: /* SSRA / USRA (accumulate) */
+ case 0x04: /* SRSHR / URSHR (rounding) */
+ case 0x06: /* SRSRA / URSRA (accum + rounding) */
+ handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x0a: /* SHL / SLI */
+ handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x10: /* SHRN */
+ case 0x11: /* RSHRN / SQRSHRUN */
+ if (is_u) {
+ handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
+ opcode, rn, rd);
+ } else {
+ handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
+ }
+ break;
+ case 0x12: /* SQSHRN / UQSHRN */
+ case 0x13: /* SQRSHRN / UQRSHRN */
+ handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0x14: /* SSHLL / USHLL */
+ handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x1c: /* SCVTF / UCVTF */
+ handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0xc: /* SQSHLU */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
+ break;
+ case 0xe: /* SQSHL, UQSHL */
+ handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
+ break;
+ case 0x1f: /* FCVTZS/ FCVTZU */
+ handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+/* Generate code to do a "long" addition or subtraction, ie one done in
+ * TCGv_i64 on vector lanes twice the width specified by size.
+ */
+static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
+ TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
+{
+ static NeonGenTwo64OpFn * const fns[3][2] = {
+ { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
+ { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
+ { tcg_gen_add_i64, tcg_gen_sub_i64 },
+ };
+ NeonGenTwo64OpFn *genfn;
+ assert(size < 3);
+
+ genfn = fns[size][is_sub];
+ genfn(tcg_res, tcg_op1, tcg_op2);
+}
+
+static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ /* 3-reg-different widening insns: 64 x 64 -> 128 */
+ TCGv_i64 tcg_res[2];
+ int pass, accop;
+
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = tcg_temp_new_i64();
+
+ /* Does this op do an adding accumulate, a subtracting accumulate,
+ * or no accumulate at all?
+ */
+ switch (opcode) {
+ case 5:
+ case 8:
+ case 9:
+ accop = 1;
+ break;
+ case 10:
+ case 11:
+ accop = -1;
+ break;
+ default:
+ accop = 0;
+ break;
+ }
+
+ if (accop != 0) {
+ read_vec_element(s, tcg_res[0], rd, 0, MO_64);
+ read_vec_element(s, tcg_res[1], rd, 1, MO_64);
+ }
+
+ /* size == 2 means two 32x32->64 operations; this is worth special
+ * casing because we can generally handle it inline.
+ */
+ if (size == 2) {
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_passres;
+ MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
+
+ int elt = pass + is_q * 2;
+
+ read_vec_element(s, tcg_op1, rn, elt, memop);
+ read_vec_element(s, tcg_op2, rm, elt, memop);
+
+ if (accop == 0) {
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ switch (opcode) {
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ {
+ TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
+ tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
+ tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
+ tcg_passres,
+ tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
+ tcg_temp_free_i64(tcg_tmp1);
+ tcg_temp_free_i64(tcg_tmp2);
+ break;
+ }
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
+ tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (opcode == 9 || opcode == 11) {
+ /* saturating accumulate ops */
+ if (accop < 0) {
+ tcg_gen_neg_i64(tcg_passres, tcg_passres);
+ }
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ tcg_res[pass], tcg_passres);
+ } else if (accop > 0) {
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ } else if (accop < 0) {
+ tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ }
+
+ if (accop != 0) {
+ tcg_temp_free_i64(tcg_passres);
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ /* size 0 or 1, generally helper functions */
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_passres;
+ int elt = pass + is_q * 2;
+
+ read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
+
+ if (accop == 0) {
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ switch (opcode) {
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ {
+ TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
+ static NeonGenWidenFn * const widenfns[2][2] = {
+ { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
+ { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
+ };
+ NeonGenWidenFn *widenfn = widenfns[size][is_u];
+
+ widenfn(tcg_op2_64, tcg_op2);
+ widenfn(tcg_passres, tcg_op1);
+ gen_neon_addl(size, (opcode == 2), tcg_passres,
+ tcg_passres, tcg_op2_64);
+ tcg_temp_free_i64(tcg_op2_64);
+ break;
+ }
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ if (size == 0) {
+ if (is_u) {
+ gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
+ }
+ } else {
+ if (is_u) {
+ gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
+ }
+ }
+ break;
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
+ if (size == 0) {
+ if (is_u) {
+ gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
+ }
+ } else {
+ if (is_u) {
+ gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
+ }
+ }
+ break;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ assert(size == 1);
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+
+ if (accop != 0) {
+ if (opcode == 9 || opcode == 11) {
+ /* saturating accumulate ops */
+ if (accop < 0) {
+ gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
+ }
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ } else {
+ gen_neon_addl(size, (accop < 0), tcg_res[pass],
+ tcg_res[pass], tcg_passres);
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ }
+ }
+
+ write_vec_element(s, tcg_res[0], rd, 0, MO_64);
+ write_vec_element(s, tcg_res[1], rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_res[0]);
+ tcg_temp_free_i64(tcg_res[1]);
+}
+
+static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ TCGv_i64 tcg_res[2];
+ int part = is_q ? 2 : 0;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
+ static NeonGenWidenFn * const widenfns[3][2] = {
+ { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
+ { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
+ { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
+ };
+ NeonGenWidenFn *widenfn = widenfns[size][is_u];
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
+ widenfn(tcg_op2_wide, tcg_op2);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_res[pass] = tcg_temp_new_i64();
+ gen_neon_addl(size, (opcode == 3),
+ tcg_res[pass], tcg_op1, tcg_op2_wide);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2_wide);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
+{
+ tcg_gen_addi_i64(in, in, 1U << 31);
+ tcg_gen_extrh_i64_i32(res, in);
+}
+
+static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_res[2];
+ int part = is_q ? 2 : 0;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_wideres = tcg_temp_new_i64();
+ static NeonGenNarrowFn * const narrowfns[3][2] = {
+ { gen_helper_neon_narrow_high_u8,
+ gen_helper_neon_narrow_round_high_u8 },
+ { gen_helper_neon_narrow_high_u16,
+ gen_helper_neon_narrow_round_high_u16 },
+ { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
+ };
+ NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+
+ tcg_res[pass] = tcg_temp_new_i32();
+ gennarrow(tcg_res[pass], tcg_wideres);
+ tcg_temp_free_i64(tcg_wideres);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ clear_vec_high(s, is_q, rd);
+}
+
+/* AdvSIMD three different
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
+{
+ /* Instructions in this group fall into three basic classes
+ * (in each case with the operation working on each element in
+ * the input vectors):
+ * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
+ * 128 bit input)
+ * (2) wide 64 x 128 -> 128
+ * (3) narrowing 128 x 128 -> 64
+ * Here we do initial decode, catch unallocated cases and
+ * dispatch to separate functions for each class.
+ */
+ int is_q = extract32(insn, 30, 1);
+ int is_u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
+ case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
+ /* 64 x 128 -> 128 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
+ break;
+ case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
+ case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
+ /* 128 x 128 -> 64 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
+ break;
+ case 14: /* PMULL, PMULL2 */
+ if (is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ switch (size) {
+ case 0: /* PMULL.P8 */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ /* The Q field specifies lo/hi half input for this insn. */
+ gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
+ gen_helper_neon_pmull_h);
+ break;
+
+ case 3: /* PMULL.P64 */
+ if (!dc_isar_feature(aa64_pmull, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ /* The Q field specifies lo/hi half input for this insn. */
+ gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
+ gen_helper_gvec_pmull_q);
+ break;
+
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ return;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ if (is_u || size == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
+ /* 64 x 64 -> 128 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
+ break;
+ default:
+ /* opcode 15 not allocated */
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Logic op (opcode == 3) subgroup of C3.6.16. */
+static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ bool is_u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (size + 4 * is_u) {
+ case 0: /* AND */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
+ return;
+ case 1: /* BIC */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
+ return;
+ case 2: /* ORR */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
+ return;
+ case 3: /* ORN */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
+ return;
+ case 4: /* EOR */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
+ return;
+
+ case 5: /* BSL bitwise select */
+ gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
+ return;
+ case 6: /* BIT, bitwise insert if true */
+ gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
+ return;
+ case 7: /* BIF, bitwise insert if false */
+ gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
+ return;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Pairwise op subgroup of C3.6.16.
+ *
+ * This is called directly or via the handle_3same_float for float pairwise
+ * operations where the opcode and size are calculated differently.
+ */
+static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
+ int size, int rn, int rm, int rd)
+{
+ TCGv_ptr fpst;
+ int pass;
+
+ /* Floating point operations need fpst */
+ if (opcode >= 0x58) {
+ fpst = fpstatus_ptr(FPST_FPCR);
+ } else {
+ fpst = NULL;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* These operations work on the concatenated rm:rn, with each pair of
+ * adjacent elements being operated on to produce an element in the result.
+ */
+ if (size == 3) {
+ TCGv_i64 tcg_res[2];
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ int passreg = (pass == 0) ? rn : rm;
+
+ read_vec_element(s, tcg_op1, passreg, 0, MO_64);
+ read_vec_element(s, tcg_op2, passreg, 1, MO_64);
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ switch (opcode) {
+ case 0x17: /* ADDP */
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 0x58: /* FMAXNMP */
+ gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5a: /* FADDP */
+ gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5e: /* FMAXP */
+ gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x78: /* FMINNMP */
+ gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7e: /* FMINP */
+ gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ } else {
+ int maxpass = is_q ? 4 : 2;
+ TCGv_i32 tcg_res[4];
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ NeonGenTwoOpFn *genfn = NULL;
+ int passreg = pass < (maxpass / 2) ? rn : rm;
+ int passelt = (is_q && (pass & 1)) ? 2 : 0;
+
+ read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
+ read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x17: /* ADDP */
+ {
+ static NeonGenTwoOpFn * const fns[3] = {
+ gen_helper_neon_padd_u8,
+ gen_helper_neon_padd_u16,
+ tcg_gen_add_i32,
+ };
+ genfn = fns[size];
+ break;
+ }
+ case 0x14: /* SMAXP, UMAXP */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
+ { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
+ { tcg_gen_smax_i32, tcg_gen_umax_i32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x15: /* SMINP, UMINP */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
+ { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
+ { tcg_gen_smin_i32, tcg_gen_umin_i32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ /* The FP operations are all on single floats (32 bit) */
+ case 0x58: /* FMAXNMP */
+ gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5a: /* FADDP */
+ gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5e: /* FMAXP */
+ gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x78: /* FMINNMP */
+ gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7e: /* FMINP */
+ gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* FP ops called directly, otherwise call now */
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op1, tcg_op2);
+ }
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+
+ for (pass = 0; pass < maxpass; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ clear_vec_high(s, is_q, rd);
+ }
+
+ if (fpst) {
+ tcg_temp_free_ptr(fpst);
+ }
+}
+
+/* Floating point op subgroup of C3.6.16. */
+static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
+{
+ /* For floating point ops, the U, size[1] and opcode bits
+ * together indicate the operation. size[0] indicates single
+ * or double.
+ */
+ int fpopcode = extract32(insn, 11, 5)
+ | (extract32(insn, 23, 1) << 5)
+ | (extract32(insn, 29, 1) << 6);
+ int is_q = extract32(insn, 30, 1);
+ int size = extract32(insn, 22, 1);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ int datasize = is_q ? 128 : 64;
+ int esize = 32 << size;
+ int elements = datasize / esize;
+
+ if (size == 1 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (fpopcode) {
+ case 0x58: /* FMAXNMP */
+ case 0x5a: /* FADDP */
+ case 0x5e: /* FMAXP */
+ case 0x78: /* FMINNMP */
+ case 0x7e: /* FMINP */
+ if (size && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
+ rn, rm, rd);
+ return;
+ case 0x1b: /* FMULX */
+ case 0x1f: /* FRECPS */
+ case 0x3f: /* FRSQRTS */
+ case 0x5d: /* FACGE */
+ case 0x7d: /* FACGT */
+ case 0x19: /* FMLA */
+ case 0x39: /* FMLS */
+ case 0x18: /* FMAXNM */
+ case 0x1a: /* FADD */
+ case 0x1c: /* FCMEQ */
+ case 0x1e: /* FMAX */
+ case 0x38: /* FMINNM */
+ case 0x3a: /* FSUB */
+ case 0x3e: /* FMIN */
+ case 0x5b: /* FMUL */
+ case 0x5c: /* FCMGE */
+ case 0x5f: /* FDIV */
+ case 0x7a: /* FABD */
+ case 0x7c: /* FCMGT */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
+ return;
+
+ case 0x1d: /* FMLAL */
+ case 0x3d: /* FMLSL */
+ case 0x59: /* FMLAL2 */
+ case 0x79: /* FMLSL2 */
+ if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (fp_access_check(s)) {
+ int is_s = extract32(insn, 23, 1);
+ int is_2 = extract32(insn, 29, 1);
+ int data = (is_2 << 1) | is_s;
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), cpu_env,
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ data, gen_helper_gvec_fmlal_a64);
+ }
+ return;
+
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+/* Integer op subgroup of C3.6.16. */
+static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
+{
+ int is_q = extract32(insn, 30, 1);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 11, 5);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int pass;
+ TCGCond cond;
+
+ switch (opcode) {
+ case 0x13: /* MUL, PMUL */
+ if (u && size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x0: /* SHADD, UHADD */
+ case 0x2: /* SRHADD, URHADD */
+ case 0x4: /* SHSUB, UHSUB */
+ case 0xc: /* SMAX, UMAX */
+ case 0xd: /* SMIN, UMIN */
+ case 0xe: /* SABD, UABD */
+ case 0xf: /* SABA, UABA */
+ case 0x12: /* MLA, MLS */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x16: /* SQDMULH, SQRDMULH */
+ if (size == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0x01: /* SQADD, UQADD */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
+ }
+ return;
+ case 0x05: /* SQSUB, UQSUB */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
+ }
+ return;
+ case 0x08: /* SSHL, USHL */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
+ }
+ return;
+ case 0x0c: /* SMAX, UMAX */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
+ }
+ return;
+ case 0x0d: /* SMIN, UMIN */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
+ }
+ return;
+ case 0xe: /* SABD, UABD */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
+ }
+ return;
+ case 0xf: /* SABA, UABA */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
+ }
+ return;
+ case 0x10: /* ADD, SUB */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
+ }
+ return;
+ case 0x13: /* MUL, PMUL */
+ if (!u) { /* MUL */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
+ } else { /* PMUL */
+ gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
+ }
+ return;
+ case 0x12: /* MLA, MLS */
+ if (u) {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
+ } else {
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
+ }
+ return;
+ case 0x16: /* SQDMULH, SQRDMULH */
+ {
+ static gen_helper_gvec_3_ptr * const fns[2][2] = {
+ { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
+ { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
+ };
+ gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
+ }
+ return;
+ case 0x11:
+ if (!u) { /* CMTST */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
+ return;
+ }
+ /* else CMEQ */
+ cond = TCG_COND_EQ;
+ goto do_gvec_cmp;
+ case 0x06: /* CMGT, CMHI */
+ cond = u ? TCG_COND_GTU : TCG_COND_GT;
+ goto do_gvec_cmp;
+ case 0x07: /* CMGE, CMHS */
+ cond = u ? TCG_COND_GEU : TCG_COND_GE;
+ do_gvec_cmp:
+ tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ is_q ? 16 : 8, vec_full_reg_size(s));
+ return;
+ }
+
+ if (size == 3) {
+ assert(is_q);
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ NeonGenTwoOpFn *genfn = NULL;
+ NeonGenTwoOpEnvFn *genenvfn = NULL;
+
+ read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
+
+ switch (opcode) {
+ case 0x0: /* SHADD, UHADD */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
+ { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
+ { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x2: /* SRHADD, URHADD */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
+ { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
+ { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x4: /* SHSUB, UHSUB */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
+ { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
+ { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x9: /* SQSHL, UQSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
+ { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
+ { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xa: /* SRSHL, URSHL */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
+ { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
+ { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0xb: /* SQRSHL, UQRSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
+ { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
+ { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ if (genenvfn) {
+ genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
+ } else {
+ genfn(tcg_res, tcg_op1, tcg_op2);
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+ }
+ clear_vec_high(s, is_q, rd);
+}
+
+/* AdvSIMD three same
+ * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+---+------+------+
+ */
+static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
+{
+ int opcode = extract32(insn, 11, 5);
+
+ switch (opcode) {
+ case 0x3: /* logic ops */
+ disas_simd_3same_logic(s, insn);
+ break;
+ case 0x17: /* ADDP */
+ case 0x14: /* SMAXP, UMAXP */
+ case 0x15: /* SMINP, UMINP */
+ {
+ /* Pairwise operations */
+ int is_q = extract32(insn, 30, 1);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ if (opcode == 0x17) {
+ if (u || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+ handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
+ break;
+ }
+ case 0x18 ... 0x31:
+ /* floating point ops, sz[1] and U are part of opcode */
+ disas_simd_3same_float(s, insn);
+ break;
+ default:
+ disas_simd_3same_int(s, insn);
+ break;
+ }
+}
+
+/*
+ * Advanced SIMD three same (ARMv8.2 FP16 variants)
+ *
+ * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
+ * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
+ * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
+ *
+ * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
+ * (register), FACGE, FABD, FCMGT (register) and FACGT.
+ *
+ */
+static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
+{
+ int opcode = extract32(insn, 11, 3);
+ int u = extract32(insn, 29, 1);
+ int a = extract32(insn, 23, 1);
+ int is_q = extract32(insn, 30, 1);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ /*
+ * For these floating point ops, the U, a and opcode bits
+ * together indicate the operation.
+ */
+ int fpopcode = opcode | (a << 3) | (u << 4);
+ int datasize = is_q ? 128 : 64;
+ int elements = datasize / 16;
+ bool pairwise;
+ TCGv_ptr fpst;
+ int pass;
+
+ switch (fpopcode) {
+ case 0x0: /* FMAXNM */
+ case 0x1: /* FMLA */
+ case 0x2: /* FADD */
+ case 0x3: /* FMULX */
+ case 0x4: /* FCMEQ */
+ case 0x6: /* FMAX */
+ case 0x7: /* FRECPS */
+ case 0x8: /* FMINNM */
+ case 0x9: /* FMLS */
+ case 0xa: /* FSUB */
+ case 0xe: /* FMIN */
+ case 0xf: /* FRSQRTS */
+ case 0x13: /* FMUL */
+ case 0x14: /* FCMGE */
+ case 0x15: /* FACGE */
+ case 0x17: /* FDIV */
+ case 0x1a: /* FABD */
+ case 0x1c: /* FCMGT */
+ case 0x1d: /* FACGT */
+ pairwise = false;
+ break;
+ case 0x10: /* FMAXNMP */
+ case 0x12: /* FADDP */
+ case 0x16: /* FMAXP */
+ case 0x18: /* FMINNMP */
+ case 0x1e: /* FMINP */
+ pairwise = true;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+
+ if (pairwise) {
+ int maxpass = is_q ? 8 : 4;
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res[8];
+
+ for (pass = 0; pass < maxpass; pass++) {
+ int passreg = pass < (maxpass / 2) ? rn : rm;
+ int passelt = (pass << 1) & (maxpass - 1);
+
+ read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
+ read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (fpopcode) {
+ case 0x10: /* FMAXNMP */
+ gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
+ fpst);
+ break;
+ case 0x12: /* FADDP */
+ gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x16: /* FMAXP */
+ gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x18: /* FMINNMP */
+ gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
+ fpst);
+ break;
+ case 0x1e: /* FMINP */
+ gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ for (pass = 0; pass < maxpass; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+
+ } else {
+ for (pass = 0; pass < elements; pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
+ read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
+
+ switch (fpopcode) {
+ case 0x0: /* FMAXNM */
+ gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
+ gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
+ fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FMULX */
+ gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FCMEQ */
+ gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAX */
+ gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FRECPS */
+ gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FMINNM */
+ gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x9: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
+ gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
+ fpst);
+ break;
+ case 0xa: /* FSUB */
+ gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xe: /* FMIN */
+ gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FRSQRTS */
+ gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x13: /* FMUL */
+ gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x14: /* FCMGE */
+ gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x15: /* FACGE */
+ gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x17: /* FDIV */
+ gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1a: /* FABD */
+ gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
+ tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
+ break;
+ case 0x1c: /* FCMGT */
+ gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1d: /* FACGT */
+ gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+ }
+
+ tcg_temp_free_ptr(fpst);
+
+ clear_vec_high(s, is_q, rd);
+}
+
+/* AdvSIMD three same extra
+ * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
+ */
+static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 4);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ bool feature;
+ int rot;
+
+ switch (u * 16 + opcode) {
+ case 0x10: /* SQRDMLAH (vector) */
+ case 0x11: /* SQRDMLSH (vector) */
+ if (size != 1 && size != 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_rdm, s);
+ break;
+ case 0x02: /* SDOT (vector) */
+ case 0x12: /* UDOT (vector) */
+ if (size != MO_32) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_dp, s);
+ break;
+ case 0x03: /* USDOT */
+ if (size != MO_32) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_i8mm, s);
+ break;
+ case 0x04: /* SMMLA */
+ case 0x14: /* UMMLA */
+ case 0x05: /* USMMLA */
+ if (!is_q || size != MO_32) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_i8mm, s);
+ break;
+ case 0x18: /* FCMLA, #0 */
+ case 0x19: /* FCMLA, #90 */
+ case 0x1a: /* FCMLA, #180 */
+ case 0x1b: /* FCMLA, #270 */
+ case 0x1c: /* FCADD, #90 */
+ case 0x1e: /* FCADD, #270 */
+ if (size == 0
+ || (size == 1 && !dc_isar_feature(aa64_fp16, s))
+ || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_fcma, s);
+ break;
+ case 0x1d: /* BFMMLA */
+ if (size != MO_16 || !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ feature = dc_isar_feature(aa64_bf16, s);
+ break;
+ case 0x1f:
+ switch (size) {
+ case 1: /* BFDOT */
+ case 3: /* BFMLAL{B,T} */
+ feature = dc_isar_feature(aa64_bf16, s);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0x0: /* SQRDMLAH (vector) */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
+ return;
+
+ case 0x1: /* SQRDMLSH (vector) */
+ gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
+ return;
+
+ case 0x2: /* SDOT / UDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
+ u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
+ return;
+
+ case 0x3: /* USDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
+ return;
+
+ case 0x04: /* SMMLA, UMMLA */
+ gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
+ u ? gen_helper_gvec_ummla_b
+ : gen_helper_gvec_smmla_b);
+ return;
+ case 0x05: /* USMMLA */
+ gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
+ return;
+
+ case 0x8: /* FCMLA, #0 */
+ case 0x9: /* FCMLA, #90 */
+ case 0xa: /* FCMLA, #180 */
+ case 0xb: /* FCMLA, #270 */
+ rot = extract32(opcode, 0, 2);
+ switch (size) {
+ case 1:
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
+ gen_helper_gvec_fcmlah);
+ break;
+ case 2:
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
+ gen_helper_gvec_fcmlas);
+ break;
+ case 3:
+ gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
+ gen_helper_gvec_fcmlad);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return;
+
+ case 0xc: /* FCADD, #90 */
+ case 0xe: /* FCADD, #270 */
+ rot = extract32(opcode, 1, 1);
+ switch (size) {
+ case 1:
+ gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
+ gen_helper_gvec_fcaddh);
+ break;
+ case 2:
+ gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
+ gen_helper_gvec_fcadds);
+ break;
+ case 3:
+ gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
+ gen_helper_gvec_fcaddd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return;
+
+ case 0xd: /* BFMMLA */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
+ return;
+ case 0xf:
+ switch (size) {
+ case 1: /* BFDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
+ break;
+ case 3: /* BFMLAL{B,T} */
+ gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
+ gen_helper_gvec_bfmlal);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
+ int size, int rn, int rd)
+{
+ /* Handle 2-reg-misc ops which are widening (so each size element
+ * in the source becomes a 2*size element in the destination.
+ * The only instruction like this is FCVTL.
+ */
+ int pass;
+
+ if (size == 3) {
+ /* 32 -> 64 bit fp conversion */
+ TCGv_i64 tcg_res[2];
+ int srcelt = is_q ? 2 : 0;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
+ gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
+ tcg_temp_free_i32(tcg_op);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ } else {
+ /* 16 -> 32 bit fp conversion */
+ int srcelt = is_q ? 4 : 0;
+ TCGv_i32 tcg_res[4];
+ TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
+ TCGv_i32 ahp = get_ahp_flag();
+
+ for (pass = 0; pass < 4; pass++) {
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
+ fpst, ahp);
+ }
+ for (pass = 0; pass < 4; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(ahp);
+ }
+}
+
+static void handle_rev(DisasContext *s, int opcode, bool u,
+ bool is_q, int size, int rn, int rd)
+{
+ int op = (opcode << 1) | u;
+ int opsz = op + size;
+ int grp_size = 3 - opsz;
+ int dsize = is_q ? 128 : 64;
+ int i;
+
+ if (opsz >= 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 0) {
+ /* Special case bytes, use bswap op on each group of elements */
+ int groups = dsize / (8 << grp_size);
+
+ for (i = 0; i < groups; i++) {
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_tmp, rn, i, grp_size);
+ switch (grp_size) {
+ case MO_16:
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_vec_element(s, tcg_tmp, rd, i, grp_size);
+ tcg_temp_free_i64(tcg_tmp);
+ }
+ clear_vec_high(s, is_q, rd);
+ } else {
+ int revmask = (1 << grp_size) - 1;
+ int esize = 8 << size;
+ int elements = dsize / esize;
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = tcg_const_i64(0);
+ TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
+
+ for (i = 0; i < elements; i++) {
+ int e_rev = (i & 0xf) ^ revmask;
+ int off = e_rev * esize;
+ read_vec_element(s, tcg_rn, rn, i, size);
+ if (off >= 64) {
+ tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
+ tcg_rn, off - 64, esize);
+ } else {
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
+ }
+ }
+ write_vec_element(s, tcg_rd, rd, 0, MO_64);
+ write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_rd_hi);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ }
+}
+
+static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
+ bool is_q, int size, int rn, int rd)
+{
+ /* Implement the pairwise operations from 2-misc:
+ * SADDLP, UADDLP, SADALP, UADALP.
+ * These all add pairs of elements in the input to produce a
+ * double-width result element in the output (possibly accumulating).
+ */
+ bool accum = (opcode == 0x6);
+ int maxpass = is_q ? 2 : 1;
+ int pass;
+ TCGv_i64 tcg_res[2];
+
+ if (size == 2) {
+ /* 32 + 32 -> 64 op */
+ MemOp memop = size + (u ? 0 : MO_SIGN);
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass * 2, memop);
+ read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ if (accum) {
+ read_vec_element(s, tcg_op1, rd, pass, MO_64);
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenOne64OpFn *genfn;
+ static NeonGenOne64OpFn * const fns[2][2] = {
+ { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
+ { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
+ };
+
+ genfn = fns[size][u];
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ genfn(tcg_res[pass], tcg_op);
+
+ if (accum) {
+ read_vec_element(s, tcg_op, rd, pass, MO_64);
+ if (size == 0) {
+ gen_helper_neon_addl_u16(tcg_res[pass],
+ tcg_res[pass], tcg_op);
+ } else {
+ gen_helper_neon_addl_u32(tcg_res[pass],
+ tcg_res[pass], tcg_op);
+ }
+ }
+ tcg_temp_free_i64(tcg_op);
+ }
+ }
+ if (!is_q) {
+ tcg_res[1] = tcg_const_i64(0);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
+{
+ /* Implement SHLL and SHLL2 */
+ int pass;
+ int part = is_q ? 2 : 0;
+ TCGv_i64 tcg_res[2];
+
+ for (pass = 0; pass < 2; pass++) {
+ static NeonGenWidenFn * const widenfns[3] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ };
+ NeonGenWidenFn *widenfn = widenfns[size];
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
+ tcg_res[pass] = tcg_temp_new_i64();
+ widenfn(tcg_res[pass], tcg_op);
+ tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
+
+ tcg_temp_free_i32(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+/* AdvSIMD two reg misc
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ bool u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool need_fpstatus = false;
+ bool need_rmode = false;
+ int rmode = -1;
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr tcg_fpstatus;
+
+ switch (opcode) {
+ case 0x0: /* REV64, REV32 */
+ case 0x1: /* REV16 */
+ handle_rev(s, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x5: /* CNT, NOT, RBIT */
+ if (u && size == 0) {
+ /* NOT */
+ break;
+ } else if (u && size == 1) {
+ /* RBIT */
+ break;
+ } else if (!u && size == 0) {
+ /* CNT */
+ break;
+ }
+ unallocated_encoding(s);
+ return;
+ case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
+ case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x4: /* CLS, CLZ */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x2: /* SADDLP, UADDLP */
+ case 0x6: /* SADALP, UADALP */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x13: /* SHLL, SHLL2 */
+ if (u == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_shll(s, is_q, size, rn, rd);
+ return;
+ case 0xa: /* CMLT */
+ if (u == 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xb: /* ABS, NEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x3: /* SUQADD, USQADD */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
+ return;
+ case 0x7: /* SQABS, SQNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0xc ... 0xf:
+ case 0x16 ... 0x1f:
+ {
+ /* Floating point: U, size[1] and opcode indicate operation;
+ * size[0] indicates single or double precision.
+ */
+ int is_double = extract32(size, 0, 1);
+ opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
+ size = is_double ? 3 : 2;
+ switch (opcode) {
+ case 0x2f: /* FABS */
+ case 0x6f: /* FNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ bool is_signed = (opcode == 0x1d) ? true : false;
+ int elements = is_double ? 2 : is_q ? 4 : 2;
+ if (is_double && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
+ return;
+ }
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
+ return;
+ case 0x7f: /* FSQRT */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ need_fpstatus = true;
+ need_rmode = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x5c: /* FCVTAU */
+ case 0x1c: /* FCVTAS */
+ need_fpstatus = true;
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x3c: /* URECPE */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x3d: /* FRECPE */
+ case 0x7d: /* FRSQRTE */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
+ return;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ if (size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x16: /* FCVTN, FCVTN2 */
+ /* handle_2misc_narrow does a 2*size -> size operation, but these
+ * instructions encode the source size rather than dest size.
+ */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
+ return;
+ case 0x36: /* BFCVTN, BFCVTN2 */
+ if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
+ return;
+ case 0x17: /* FCVTL, FCVTL2 */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_widening(s, opcode, is_q, size, rn, rd);
+ return;
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ need_rmode = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ /* fall through */
+ case 0x59: /* FRINTX */
+ case 0x79: /* FRINTI */
+ need_fpstatus = true;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x58: /* FRINTA */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ need_fpstatus = true;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x7c: /* URSQRTE */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1e: /* FRINT32Z */
+ case 0x1f: /* FRINT64Z */
+ need_rmode = true;
+ rmode = FPROUNDING_ZERO;
+ /* fall through */
+ case 0x5e: /* FRINT32X */
+ case 0x5f: /* FRINT64X */
+ need_fpstatus = true;
+ if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (need_fpstatus || need_rmode) {
+ tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
+ } else {
+ tcg_fpstatus = NULL;
+ }
+ if (need_rmode) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ } else {
+ tcg_rmode = NULL;
+ }
+
+ switch (opcode) {
+ case 0x5:
+ if (u && size == 0) { /* NOT */
+ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
+ return;
+ }
+ break;
+ case 0x8: /* CMGT, CMGE */
+ if (u) {
+ gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
+ } else {
+ gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
+ }
+ return;
+ case 0x9: /* CMEQ, CMLE */
+ if (u) {
+ gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
+ } else {
+ gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
+ }
+ return;
+ case 0xa: /* CMLT */
+ gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
+ return;
+ case 0xb:
+ if (u) { /* ABS, NEG */
+ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
+ } else {
+ gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
+ }
+ return;
+ }
+
+ if (size == 3) {
+ /* All 64-bit element operations can be shared with scalar 2misc */
+ int pass;
+
+ /* Coverity claims (size == 3 && !is_q) has been eliminated
+ * from all paths leading to here.
+ */
+ tcg_debug_assert(is_q);
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+
+ handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
+ tcg_rmode, tcg_fpstatus);
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ }
+ } else {
+ int pass;
+
+ for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+
+ if (size == 2) {
+ /* Special cases for 32 bit elements */
+ switch (opcode) {
+ case 0x4: /* CLS */
+ if (u) {
+ tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
+ } else {
+ tcg_gen_clrsb_i32(tcg_res, tcg_op);
+ }
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ if (u) {
+ gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
+ } else {
+ gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
+ }
+ break;
+ case 0x2f: /* FABS */
+ gen_helper_vfp_abss(tcg_res, tcg_op);
+ break;
+ case 0x6f: /* FNEG */
+ gen_helper_vfp_negs(tcg_res, tcg_op);
+ break;
+ case 0x7f: /* FSQRT */
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosls(tcg_res, tcg_op,
+ tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touls(tcg_res, tcg_op,
+ tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x7c: /* URSQRTE */
+ gen_helper_rsqrte_u32(tcg_res, tcg_op);
+ break;
+ case 0x1e: /* FRINT32Z */
+ case 0x5e: /* FRINT32X */
+ gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x1f: /* FRINT64Z */
+ case 0x5f: /* FRINT64X */
+ gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ /* Use helpers for 8 and 16 bit elements */
+ switch (opcode) {
+ case 0x5: /* CNT, RBIT */
+ /* For these two insns size is part of the opcode specifier
+ * (handled earlier); they always operate on byte elements.
+ */
+ if (u) {
+ gen_helper_neon_rbit_u8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_cnt_u8(tcg_res, tcg_op);
+ }
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ {
+ NeonGenOneOpEnvFn *genfn;
+ static NeonGenOneOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
+ { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
+ };
+ genfn = fns[size][u];
+ genfn(tcg_res, cpu_env, tcg_op);
+ break;
+ }
+ case 0x4: /* CLS, CLZ */
+ if (u) {
+ if (size == 0) {
+ gen_helper_neon_clz_u8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_clz_u16(tcg_res, tcg_op);
+ }
+ } else {
+ if (size == 0) {
+ gen_helper_neon_cls_s8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_cls_s16(tcg_res, tcg_op);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ }
+ }
+ clear_vec_high(s, is_q, rd);
+
+ if (need_rmode) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_rmode);
+ }
+ if (need_fpstatus) {
+ tcg_temp_free_ptr(tcg_fpstatus);
+ }
+}
+
+/* AdvSIMD [scalar] two register miscellaneous (FP16)
+ *
+ * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
+ * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
+ * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
+ * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
+ *
+ * This actually covers two groups where scalar access is governed by
+ * bit 28. A bunch of the instructions (float to integral) only exist
+ * in the vector form and are un-allocated for the scalar decode. Also
+ * in the scalar decode Q is always 1.
+ */
+static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
+{
+ int fpop, opcode, a, u;
+ int rn, rd;
+ bool is_q;
+ bool is_scalar;
+ bool only_in_vector = false;
+
+ int pass;
+ TCGv_i32 tcg_rmode = NULL;
+ TCGv_ptr tcg_fpstatus = NULL;
+ bool need_rmode = false;
+ bool need_fpst = true;
+ int rmode;
+
+ if (!dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ rd = extract32(insn, 0, 5);
+ rn = extract32(insn, 5, 5);
+
+ a = extract32(insn, 23, 1);
+ u = extract32(insn, 29, 1);
+ is_scalar = extract32(insn, 28, 1);
+ is_q = extract32(insn, 30, 1);
+
+ opcode = extract32(insn, 12, 5);
+ fpop = deposit32(opcode, 5, 1, a);
+ fpop = deposit32(fpop, 6, 1, u);
+
+ switch (fpop) {
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ int elements;
+
+ if (is_scalar) {
+ elements = 1;
+ } else {
+ elements = (is_q ? 8 : 4);
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
+ return;
+ }
+ break;
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
+ return;
+ case 0x3d: /* FRECPE */
+ case 0x3f: /* FRECPX */
+ break;
+ case 0x18: /* FRINTN */
+ need_rmode = true;
+ only_in_vector = true;
+ rmode = FPROUNDING_TIEEVEN;
+ break;
+ case 0x19: /* FRINTM */
+ need_rmode = true;
+ only_in_vector = true;
+ rmode = FPROUNDING_NEGINF;
+ break;
+ case 0x38: /* FRINTP */
+ need_rmode = true;
+ only_in_vector = true;
+ rmode = FPROUNDING_POSINF;
+ break;
+ case 0x39: /* FRINTZ */
+ need_rmode = true;
+ only_in_vector = true;
+ rmode = FPROUNDING_ZERO;
+ break;
+ case 0x58: /* FRINTA */
+ need_rmode = true;
+ only_in_vector = true;
+ rmode = FPROUNDING_TIEAWAY;
+ break;
+ case 0x59: /* FRINTX */
+ case 0x79: /* FRINTI */
+ only_in_vector = true;
+ /* current rounding mode */
+ break;
+ case 0x1a: /* FCVTNS */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEEVEN;
+ break;
+ case 0x1b: /* FCVTMS */
+ need_rmode = true;
+ rmode = FPROUNDING_NEGINF;
+ break;
+ case 0x1c: /* FCVTAS */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ break;
+ case 0x3a: /* FCVTPS */
+ need_rmode = true;
+ rmode = FPROUNDING_POSINF;
+ break;
+ case 0x3b: /* FCVTZS */
+ need_rmode = true;
+ rmode = FPROUNDING_ZERO;
+ break;
+ case 0x5a: /* FCVTNU */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEEVEN;
+ break;
+ case 0x5b: /* FCVTMU */
+ need_rmode = true;
+ rmode = FPROUNDING_NEGINF;
+ break;
+ case 0x5c: /* FCVTAU */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ break;
+ case 0x7a: /* FCVTPU */
+ need_rmode = true;
+ rmode = FPROUNDING_POSINF;
+ break;
+ case 0x7b: /* FCVTZU */
+ need_rmode = true;
+ rmode = FPROUNDING_ZERO;
+ break;
+ case 0x2f: /* FABS */
+ case 0x6f: /* FNEG */
+ need_fpst = false;
+ break;
+ case 0x7d: /* FRSQRTE */
+ case 0x7f: /* FSQRT (vector) */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+
+ /* Check additional constraints for the scalar encoding */
+ if (is_scalar) {
+ if (!is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* FRINTxx is only in the vector form */
+ if (only_in_vector) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (need_rmode || need_fpst) {
+ tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
+ }
+
+ if (need_rmode) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ }
+
+ if (is_scalar) {
+ TCGv_i32 tcg_op = read_fp_hreg(s, rn);
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ switch (fpop) {
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x6f: /* FNEG */
+ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* limit any sign extension going on */
+ tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ } else {
+ for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
+
+ switch (fpop) {
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x2f: /* FABS */
+ tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
+ break;
+ case 0x6f: /* FNEG */
+ tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x7f: /* FSQRT */
+ gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ }
+
+ clear_vec_high(s, is_q, rd);
+ }
+
+ if (tcg_rmode) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_rmode);
+ }
+
+ if (tcg_fpstatus) {
+ tcg_temp_free_ptr(tcg_fpstatus);
+ }
+}
+
+/* AdvSIMD scalar x indexed element
+ * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
+ * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * AdvSIMD vector x indexed element
+ * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
+ */
+static void disas_simd_indexed(DisasContext *s, uint32_t insn)
+{
+ /* This encoding has two kinds of instruction:
+ * normal, where we perform elt x idxelt => elt for each
+ * element in the vector
+ * long, where we perform elt x idxelt and generate a result of
+ * double the width of the input element
+ * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
+ */
+ bool is_scalar = extract32(insn, 28, 1);
+ bool is_q = extract32(insn, 30, 1);
+ bool u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int l = extract32(insn, 21, 1);
+ int m = extract32(insn, 20, 1);
+ /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
+ int rm = extract32(insn, 16, 4);
+ int opcode = extract32(insn, 12, 4);
+ int h = extract32(insn, 11, 1);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool is_long = false;
+ int is_fp = 0;
+ bool is_fp16 = false;
+ int index;
+ TCGv_ptr fpst;
+
+ switch (16 * u + opcode) {
+ case 0x08: /* MUL */
+ case 0x10: /* MLA */
+ case 0x14: /* MLS */
+ if (is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x02: /* SMLAL, SMLAL2 */
+ case 0x12: /* UMLAL, UMLAL2 */
+ case 0x06: /* SMLSL, SMLSL2 */
+ case 0x16: /* UMLSL, UMLSL2 */
+ case 0x0a: /* SMULL, SMULL2 */
+ case 0x1a: /* UMULL, UMULL2 */
+ if (is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_long = true;
+ break;
+ case 0x03: /* SQDMLAL, SQDMLAL2 */
+ case 0x07: /* SQDMLSL, SQDMLSL2 */
+ case 0x0b: /* SQDMULL, SQDMULL2 */
+ is_long = true;
+ break;
+ case 0x0c: /* SQDMULH */
+ case 0x0d: /* SQRDMULH */
+ break;
+ case 0x01: /* FMLA */
+ case 0x05: /* FMLS */
+ case 0x09: /* FMUL */
+ case 0x19: /* FMULX */
+ is_fp = 1;
+ break;
+ case 0x1d: /* SQRDMLAH */
+ case 0x1f: /* SQRDMLSH */
+ if (!dc_isar_feature(aa64_rdm, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x0e: /* SDOT */
+ case 0x1e: /* UDOT */
+ if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x0f:
+ switch (size) {
+ case 0: /* SUDOT */
+ case 2: /* USDOT */
+ if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = MO_32;
+ break;
+ case 1: /* BFDOT */
+ if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = MO_32;
+ break;
+ case 3: /* BFMLAL{B,T} */
+ if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* can't set is_fp without other incorrect size checks */
+ size = MO_16;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x11: /* FCMLA #0 */
+ case 0x13: /* FCMLA #90 */
+ case 0x15: /* FCMLA #180 */
+ case 0x17: /* FCMLA #270 */
+ if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_fp = 2;
+ break;
+ case 0x00: /* FMLAL */
+ case 0x04: /* FMLSL */
+ case 0x18: /* FMLAL2 */
+ case 0x1c: /* FMLSL2 */
+ if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = MO_16;
+ /* is_fp, but we pass cpu_env not fp_status. */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (is_fp) {
+ case 1: /* normal fp */
+ /* convert insn encoded size to MemOp size */
+ switch (size) {
+ case 0: /* half-precision */
+ size = MO_16;
+ is_fp16 = true;
+ break;
+ case MO_32: /* single precision */
+ case MO_64: /* double precision */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+
+ case 2: /* complex fp */
+ /* Each indexable element is a complex pair. */
+ size += 1;
+ switch (size) {
+ case MO_32:
+ if (h && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_fp16 = true;
+ break;
+ case MO_64:
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+
+ default: /* integer */
+ switch (size) {
+ case MO_8:
+ case MO_64:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+ if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Given MemOp size, adjust register and indexing. */
+ switch (size) {
+ case MO_16:
+ index = h << 2 | l << 1 | m;
+ break;
+ case MO_32:
+ index = h << 1 | l;
+ rm |= m << 4;
+ break;
+ case MO_64:
+ if (l || !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ index = h;
+ rm |= m << 4;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_fp) {
+ fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ } else {
+ fpst = NULL;
+ }
+
+ switch (16 * u + opcode) {
+ case 0x0e: /* SDOT */
+ case 0x1e: /* UDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
+ u ? gen_helper_gvec_udot_idx_b
+ : gen_helper_gvec_sdot_idx_b);
+ return;
+ case 0x0f:
+ switch (extract32(insn, 22, 2)) {
+ case 0: /* SUDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
+ gen_helper_gvec_sudot_idx_b);
+ return;
+ case 1: /* BFDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
+ gen_helper_gvec_bfdot_idx);
+ return;
+ case 2: /* USDOT */
+ gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
+ gen_helper_gvec_usdot_idx_b);
+ return;
+ case 3: /* BFMLAL{B,T} */
+ gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
+ gen_helper_gvec_bfmlal_idx);
+ return;
+ }
+ g_assert_not_reached();
+ case 0x11: /* FCMLA #0 */
+ case 0x13: /* FCMLA #90 */
+ case 0x15: /* FCMLA #180 */
+ case 0x17: /* FCMLA #270 */
+ {
+ int rot = extract32(insn, 13, 2);
+ int data = (index << 2) | rot;
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, rd), fpst,
+ is_q ? 16 : 8, vec_full_reg_size(s), data,
+ size == MO_64
+ ? gen_helper_gvec_fcmlas_idx
+ : gen_helper_gvec_fcmlah_idx);
+ tcg_temp_free_ptr(fpst);
+ }
+ return;
+
+ case 0x00: /* FMLAL */
+ case 0x04: /* FMLSL */
+ case 0x18: /* FMLAL2 */
+ case 0x1c: /* FMLSL2 */
+ {
+ int is_s = extract32(opcode, 2, 1);
+ int is_2 = u;
+ int data = (index << 2) | (is_2 << 1) | is_s;
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), cpu_env,
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ data, gen_helper_gvec_fmlal_idx_a64);
+ }
+ return;
+
+ case 0x08: /* MUL */
+ if (!is_long && !is_scalar) {
+ static gen_helper_gvec_3 * const fns[3] = {
+ gen_helper_gvec_mul_idx_h,
+ gen_helper_gvec_mul_idx_s,
+ gen_helper_gvec_mul_idx_d,
+ };
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ index, fns[size - 1]);
+ return;
+ }
+ break;
+
+ case 0x10: /* MLA */
+ if (!is_long && !is_scalar) {
+ static gen_helper_gvec_4 * const fns[3] = {
+ gen_helper_gvec_mla_idx_h,
+ gen_helper_gvec_mla_idx_s,
+ gen_helper_gvec_mla_idx_d,
+ };
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, rd),
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ index, fns[size - 1]);
+ return;
+ }
+ break;
+
+ case 0x14: /* MLS */
+ if (!is_long && !is_scalar) {
+ static gen_helper_gvec_4 * const fns[3] = {
+ gen_helper_gvec_mls_idx_h,
+ gen_helper_gvec_mls_idx_s,
+ gen_helper_gvec_mls_idx_d,
+ };
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, rd),
+ is_q ? 16 : 8, vec_full_reg_size(s),
+ index, fns[size - 1]);
+ return;
+ }
+ break;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_idx = tcg_temp_new_i64();
+ int pass;
+
+ assert(is_fp && is_q && !is_long);
+
+ read_vec_element(s, tcg_idx, rm, index, MO_64);
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+
+ switch (16 * u + opcode) {
+ case 0x05: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negd(tcg_op, tcg_op);
+ /* fall through */
+ case 0x01: /* FMLA */
+ read_vec_element(s, tcg_res, rd, pass, MO_64);
+ gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
+ break;
+ case 0x09: /* FMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
+ break;
+ case 0x19: /* FMULX */
+ gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+ }
+
+ tcg_temp_free_i64(tcg_idx);
+ clear_vec_high(s, !is_scalar, rd);
+ } else if (!is_long) {
+ /* 32 bit floating point, or 16 or 32 bit integer.
+ * For the 16 bit scalar case we use the usual Neon helpers and
+ * rely on the fact that 0 op 0 == 0 with no side effects.
+ */
+ TCGv_i32 tcg_idx = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ read_vec_element_i32(s, tcg_idx, rm, index, size);
+
+ if (size == 1 && !is_scalar) {
+ /* The simplest way to handle the 16x16 indexed ops is to duplicate
+ * the index into both halves of the 32 bit tcg_idx and then use
+ * the usual Neon helpers.
+ */
+ tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
+
+ switch (16 * u + opcode) {
+ case 0x08: /* MUL */
+ case 0x10: /* MLA */
+ case 0x14: /* MLS */
+ {
+ static NeonGenTwoOpFn * const fns[2][2] = {
+ { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
+ { tcg_gen_add_i32, tcg_gen_sub_i32 },
+ };
+ NeonGenTwoOpFn *genfn;
+ bool is_sub = opcode == 0x4;
+
+ if (size == 1) {
+ gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
+ } else {
+ tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
+ }
+ if (opcode == 0x8) {
+ break;
+ }
+ read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ genfn = fns[size - 1][is_sub];
+ genfn(tcg_res, tcg_op, tcg_res);
+ break;
+ }
+ case 0x05: /* FMLS */
+ case 0x01: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass,
+ is_scalar ? size : MO_32);
+ switch (size) {
+ case 1:
+ if (opcode == 0x5) {
+ /* As usual for ARM, separate negation for fused
+ * multiply-add */
+ tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
+ }
+ if (is_scalar) {
+ gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
+ tcg_res, fpst);
+ } else {
+ gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
+ tcg_res, fpst);
+ }
+ break;
+ case 2:
+ if (opcode == 0x5) {
+ /* As usual for ARM, separate negation for
+ * fused multiply-add */
+ tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
+ }
+ gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
+ tcg_res, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 0x09: /* FMUL */
+ switch (size) {
+ case 1:
+ if (is_scalar) {
+ gen_helper_advsimd_mulh(tcg_res, tcg_op,
+ tcg_idx, fpst);
+ } else {
+ gen_helper_advsimd_mul2h(tcg_res, tcg_op,
+ tcg_idx, fpst);
+ }
+ break;
+ case 2:
+ gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 0x19: /* FMULX */
+ switch (size) {
+ case 1:
+ if (is_scalar) {
+ gen_helper_advsimd_mulxh(tcg_res, tcg_op,
+ tcg_idx, fpst);
+ } else {
+ gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
+ tcg_idx, fpst);
+ }
+ break;
+ case 2:
+ gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 0x0c: /* SQDMULH */
+ if (size == 1) {
+ gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ }
+ break;
+ case 0x0d: /* SQRDMULH */
+ if (size == 1) {
+ gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ }
+ break;
+ case 0x1d: /* SQRDMLAH */
+ read_vec_element_i32(s, tcg_res, rd, pass,
+ is_scalar ? size : MO_32);
+ if (size == 1) {
+ gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx, tcg_res);
+ } else {
+ gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx, tcg_res);
+ }
+ break;
+ case 0x1f: /* SQRDMLSH */
+ read_vec_element_i32(s, tcg_res, rd, pass,
+ is_scalar ? size : MO_32);
+ if (size == 1) {
+ gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx, tcg_res);
+ } else {
+ gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx, tcg_res);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ tcg_temp_free_i32(tcg_idx);
+ clear_vec_high(s, is_q, rd);
+ } else {
+ /* long ops: 16x16->32 or 32x32->64 */
+ TCGv_i64 tcg_res[2];
+ int pass;
+ bool satop = extract32(opcode, 0, 1);
+ MemOp memop = MO_32;
+
+ if (satop || !u) {
+ memop |= MO_SIGN;
+ }
+
+ if (size == 2) {
+ TCGv_i64 tcg_idx = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_idx, rm, index, memop);
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_passres;
+ int passelt;
+
+ if (is_scalar) {
+ passelt = 0;
+ } else {
+ passelt = pass + (is_q * 2);
+ }
+
+ read_vec_element(s, tcg_op, rn, passelt, memop);
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ if (opcode == 0xa || opcode == 0xb) {
+ /* Non-accumulating ops */
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
+ tcg_temp_free_i64(tcg_op);
+
+ if (satop) {
+ /* saturating, doubling */
+ gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ }
+
+ if (opcode == 0xa || opcode == 0xb) {
+ continue;
+ }
+
+ /* Accumulating op: handle accumulate step */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+
+ switch (opcode) {
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ break;
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ break;
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ tcg_gen_neg_i64(tcg_passres, tcg_passres);
+ /* fall through */
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ tcg_temp_free_i64(tcg_idx);
+
+ clear_vec_high(s, !is_scalar, rd);
+ } else {
+ TCGv_i32 tcg_idx = tcg_temp_new_i32();
+
+ assert(size == 1);
+ read_vec_element_i32(s, tcg_idx, rm, index, size);
+
+ if (!is_scalar) {
+ /* The simplest way to handle the 16x16 indexed ops is to
+ * duplicate the index into both halves of the 32 bit tcg_idx
+ * and then use the usual Neon helpers.
+ */
+ tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
+ }
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i64 tcg_passres;
+
+ if (is_scalar) {
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
+ } else {
+ read_vec_element_i32(s, tcg_op, rn,
+ pass + (is_q * 2), MO_32);
+ }
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ if (opcode == 0xa || opcode == 0xb) {
+ /* Non-accumulating ops */
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ if (memop & MO_SIGN) {
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
+ }
+ if (satop) {
+ gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ }
+ tcg_temp_free_i32(tcg_op);
+
+ if (opcode == 0xa || opcode == 0xb) {
+ continue;
+ }
+
+ /* Accumulating op: handle accumulate step */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+
+ switch (opcode) {
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
+ tcg_passres);
+ break;
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
+ tcg_passres);
+ break;
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
+ /* fall through */
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ tcg_temp_free_i32(tcg_idx);
+
+ if (is_scalar) {
+ tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
+ }
+ }
+
+ if (is_scalar) {
+ tcg_res[1] = tcg_const_i64(0);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ }
+
+ if (fpst) {
+ tcg_temp_free_ptr(fpst);
+ }
+}
+
+/* Crypto AES
+ * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----------------+------+-----------+--------+-----+------+------+
+ * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----------------+------+-----------+--------+-----+------+------+
+ */
+static void disas_crypto_aes(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int decrypt;
+ gen_helper_gvec_2 *genfn2 = NULL;
+ gen_helper_gvec_3 *genfn3 = NULL;
+
+ if (!dc_isar_feature(aa64_aes, s) || size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x4: /* AESE */
+ decrypt = 0;
+ genfn3 = gen_helper_crypto_aese;
+ break;
+ case 0x6: /* AESMC */
+ decrypt = 0;
+ genfn2 = gen_helper_crypto_aesmc;
+ break;
+ case 0x5: /* AESD */
+ decrypt = 1;
+ genfn3 = gen_helper_crypto_aese;
+ break;
+ case 0x7: /* AESIMC */
+ decrypt = 1;
+ genfn2 = gen_helper_crypto_aesmc;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ if (genfn2) {
+ gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
+ } else {
+ gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
+ }
+}
+
+/* Crypto three-reg SHA
+ * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
+ * +-----------------+------+---+------+---+--------+-----+------+------+
+ * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
+ * +-----------------+------+---+------+---+--------+-----+------+------+
+ */
+static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 3);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ gen_helper_gvec_3 *genfn;
+ bool feature;
+
+ if (size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0: /* SHA1C */
+ genfn = gen_helper_crypto_sha1c;
+ feature = dc_isar_feature(aa64_sha1, s);
+ break;
+ case 1: /* SHA1P */
+ genfn = gen_helper_crypto_sha1p;
+ feature = dc_isar_feature(aa64_sha1, s);
+ break;
+ case 2: /* SHA1M */
+ genfn = gen_helper_crypto_sha1m;
+ feature = dc_isar_feature(aa64_sha1, s);
+ break;
+ case 3: /* SHA1SU0 */
+ genfn = gen_helper_crypto_sha1su0;
+ feature = dc_isar_feature(aa64_sha1, s);
+ break;
+ case 4: /* SHA256H */
+ genfn = gen_helper_crypto_sha256h;
+ feature = dc_isar_feature(aa64_sha256, s);
+ break;
+ case 5: /* SHA256H2 */
+ genfn = gen_helper_crypto_sha256h2;
+ feature = dc_isar_feature(aa64_sha256, s);
+ break;
+ case 6: /* SHA256SU1 */
+ genfn = gen_helper_crypto_sha256su1;
+ feature = dc_isar_feature(aa64_sha256, s);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
+}
+
+/* Crypto two-reg SHA
+ * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----------------+------+-----------+--------+-----+------+------+
+ * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----------------+------+-----------+--------+-----+------+------+
+ */
+static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ gen_helper_gvec_2 *genfn;
+ bool feature;
+
+ if (size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0: /* SHA1H */
+ feature = dc_isar_feature(aa64_sha1, s);
+ genfn = gen_helper_crypto_sha1h;
+ break;
+ case 1: /* SHA1SU1 */
+ feature = dc_isar_feature(aa64_sha1, s);
+ genfn = gen_helper_crypto_sha1su1;
+ break;
+ case 2: /* SHA256SU0 */
+ feature = dc_isar_feature(aa64_sha256, s);
+ genfn = gen_helper_crypto_sha256su0;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
+}
+
+static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
+{
+ tcg_gen_rotli_i64(d, m, 1);
+ tcg_gen_xor_i64(d, d, n);
+}
+
+static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
+{
+ tcg_gen_rotli_vec(vece, d, m, 1);
+ tcg_gen_xor_vec(vece, d, d, n);
+}
+
+void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
+ static const GVecGen3 op = {
+ .fni8 = gen_rax1_i64,
+ .fniv = gen_rax1_vec,
+ .opt_opc = vecop_list,
+ .fno = gen_helper_crypto_rax1,
+ .vece = MO_64,
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
+}
+
+/* Crypto three-reg SHA512
+ * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
+ * +-----------------------+------+---+---+-----+--------+------+------+
+ * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
+ * +-----------------------+------+---+---+-----+--------+------+------+
+ */
+static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
+{
+ int opcode = extract32(insn, 10, 2);
+ int o = extract32(insn, 14, 1);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool feature;
+ gen_helper_gvec_3 *oolfn = NULL;
+ GVecGen3Fn *gvecfn = NULL;
+
+ if (o == 0) {
+ switch (opcode) {
+ case 0: /* SHA512H */
+ feature = dc_isar_feature(aa64_sha512, s);
+ oolfn = gen_helper_crypto_sha512h;
+ break;
+ case 1: /* SHA512H2 */
+ feature = dc_isar_feature(aa64_sha512, s);
+ oolfn = gen_helper_crypto_sha512h2;
+ break;
+ case 2: /* SHA512SU1 */
+ feature = dc_isar_feature(aa64_sha512, s);
+ oolfn = gen_helper_crypto_sha512su1;
+ break;
+ case 3: /* RAX1 */
+ feature = dc_isar_feature(aa64_sha3, s);
+ gvecfn = gen_gvec_rax1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (opcode) {
+ case 0: /* SM3PARTW1 */
+ feature = dc_isar_feature(aa64_sm3, s);
+ oolfn = gen_helper_crypto_sm3partw1;
+ break;
+ case 1: /* SM3PARTW2 */
+ feature = dc_isar_feature(aa64_sm3, s);
+ oolfn = gen_helper_crypto_sm3partw2;
+ break;
+ case 2: /* SM4EKEY */
+ feature = dc_isar_feature(aa64_sm4, s);
+ oolfn = gen_helper_crypto_sm4ekey;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (oolfn) {
+ gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
+ } else {
+ gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
+ }
+}
+
+/* Crypto two-reg SHA512
+ * 31 12 11 10 9 5 4 0
+ * +-----------------------------------------+--------+------+------+
+ * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
+ * +-----------------------------------------+--------+------+------+
+ */
+static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
+{
+ int opcode = extract32(insn, 10, 2);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool feature;
+
+ switch (opcode) {
+ case 0: /* SHA512SU0 */
+ feature = dc_isar_feature(aa64_sha512, s);
+ break;
+ case 1: /* SM4E */
+ feature = dc_isar_feature(aa64_sm4, s);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0: /* SHA512SU0 */
+ gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
+ break;
+ case 1: /* SM4E */
+ gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Crypto four-register
+ * 31 23 22 21 20 16 15 14 10 9 5 4 0
+ * +-------------------+-----+------+---+------+------+------+
+ * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
+ * +-------------------+-----+------+---+------+------+------+
+ */
+static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
+{
+ int op0 = extract32(insn, 21, 2);
+ int rm = extract32(insn, 16, 5);
+ int ra = extract32(insn, 10, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool feature;
+
+ switch (op0) {
+ case 0: /* EOR3 */
+ case 1: /* BCAX */
+ feature = dc_isar_feature(aa64_sha3, s);
+ break;
+ case 2: /* SM3SS1 */
+ feature = dc_isar_feature(aa64_sm3, s);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!feature) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (op0 < 2) {
+ TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
+ int pass;
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
+ tcg_op3 = tcg_temp_new_i64();
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = tcg_temp_new_i64();
+
+ for (pass = 0; pass < 2; pass++) {
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+ read_vec_element(s, tcg_op3, ra, pass, MO_64);
+
+ if (op0 == 0) {
+ /* EOR3 */
+ tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
+ } else {
+ /* BCAX */
+ tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
+ }
+ tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
+ }
+ write_vec_element(s, tcg_res[0], rd, 0, MO_64);
+ write_vec_element(s, tcg_res[1], rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_op3);
+ tcg_temp_free_i64(tcg_res[0]);
+ tcg_temp_free_i64(tcg_res[1]);
+ } else {
+ TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
+
+ tcg_op1 = tcg_temp_new_i32();
+ tcg_op2 = tcg_temp_new_i32();
+ tcg_op3 = tcg_temp_new_i32();
+ tcg_res = tcg_temp_new_i32();
+ tcg_zero = tcg_const_i32(0);
+
+ read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
+ read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
+
+ tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
+ tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
+ tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
+ tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
+
+ write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
+ write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
+ write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
+ write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_op3);
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_zero);
+ }
+}
+
+/* Crypto XAR
+ * 31 21 20 16 15 10 9 5 4 0
+ * +-----------------------+------+--------+------+------+
+ * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
+ * +-----------------------+------+--------+------+------+
+ */
+static void disas_crypto_xar(DisasContext *s, uint32_t insn)
+{
+ int rm = extract32(insn, 16, 5);
+ int imm6 = extract32(insn, 10, 6);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ if (!dc_isar_feature(aa64_sha3, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), imm6, 16,
+ vec_full_reg_size(s));
+}
+
+/* Crypto three-reg imm2
+ * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
+ * +-----------------------+------+-----+------+--------+------+------+
+ * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
+ * +-----------------------+------+-----+------+--------+------+------+
+ */
+static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
+ gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
+ };
+ int opcode = extract32(insn, 10, 2);
+ int imm2 = extract32(insn, 12, 2);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ if (!dc_isar_feature(aa64_sm3, s)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
+}
+
+/* C3.6 Data processing - SIMD, inc Crypto
+ *
+ * As the decode gets a little complex we are using a table based
+ * approach for this part of the decode.
+ */
+static const AArch64DecodeTable data_proc_simd[] = {
+ /* pattern , mask , fn */
+ { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
+ { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
+ { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
+ { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
+ { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
+ { 0x0e000400, 0x9fe08400, disas_simd_copy },
+ { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
+ /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
+ { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
+ { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
+ { 0x0e000000, 0xbf208c00, disas_simd_tb },
+ { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
+ { 0x2e000000, 0xbf208400, disas_simd_ext },
+ { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
+ { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
+ { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
+ { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
+ { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
+ { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
+ { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
+ { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
+ { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
+ { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
+ { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
+ { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
+ { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
+ { 0xce000000, 0xff808000, disas_crypto_four_reg },
+ { 0xce800000, 0xffe00000, disas_crypto_xar },
+ { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
+ { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
+ { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
+ { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
+ { 0x00000000, 0x00000000, NULL }
+};
+
+static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
+{
+ /* Note that this is called with all non-FP cases from
+ * table C3-6 so it must UNDEF for entries not specifically
+ * allocated to instructions in that table.
+ */
+ AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
+ if (fn) {
+ fn(s, insn);
+ } else {
+ unallocated_encoding(s);
+ }
+}
+
+/* C3.6 Data processing - SIMD and floating point */
+static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
+{
+ if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
+ disas_data_proc_fp(s, insn);
+ } else {
+ /* SIMD, including crypto */
+ disas_data_proc_simd(s, insn);
+ }
+}
+
+/**
+ * is_guarded_page:
+ * @env: The cpu environment
+ * @s: The DisasContext
+ *
+ * Return true if the page is guarded.
+ */
+static bool is_guarded_page(CPUARMState *env, DisasContext *s)
+{
+ uint64_t addr = s->base.pc_first;
+#ifdef CONFIG_USER_ONLY
+ return page_get_flags(addr) & PAGE_BTI;
+#else
+ int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
+ unsigned int index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+
+ /*
+ * We test this immediately after reading an insn, which means
+ * that any normal page must be in the TLB. The only exception
+ * would be for executing from flash or device memory, which
+ * does not retain the TLB entry.
+ *
+ * FIXME: Assume false for those, for now. We could use
+ * arm_cpu_get_phys_page_attrs_debug to re-read the page
+ * table entry even for that case.
+ */
+ return (tlb_hit(entry->addr_code, addr) &&
+ arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
+#endif
+}
+
+/**
+ * btype_destination_ok:
+ * @insn: The instruction at the branch destination
+ * @bt: SCTLR_ELx.BT
+ * @btype: PSTATE.BTYPE, and is non-zero
+ *
+ * On a guarded page, there are a limited number of insns
+ * that may be present at the branch target:
+ * - branch target identifiers,
+ * - paciasp, pacibsp,
+ * - BRK insn
+ * - HLT insn
+ * Anything else causes a Branch Target Exception.
+ *
+ * Return true if the branch is compatible, false to raise BTITRAP.
+ */
+static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
+{
+ if ((insn & 0xfffff01fu) == 0xd503201fu) {
+ /* HINT space */
+ switch (extract32(insn, 5, 7)) {
+ case 0b011001: /* PACIASP */
+ case 0b011011: /* PACIBSP */
+ /*
+ * If SCTLR_ELx.BT, then PACI*SP are not compatible
+ * with btype == 3. Otherwise all btype are ok.
+ */
+ return !bt || btype != 3;
+ case 0b100000: /* BTI */
+ /* Not compatible with any btype. */
+ return false;
+ case 0b100010: /* BTI c */
+ /* Not compatible with btype == 3 */
+ return btype != 3;
+ case 0b100100: /* BTI j */
+ /* Not compatible with btype == 2 */
+ return btype != 2;
+ case 0b100110: /* BTI jc */
+ /* Compatible with any btype. */
+ return true;
+ }
+ } else {
+ switch (insn & 0xffe0001fu) {
+ case 0xd4200000u: /* BRK */
+ case 0xd4400000u: /* HLT */
+ /* Give priority to the breakpoint exception. */
+ return true;
+ }
+ }
+ return false;
+}
+
+static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
+ CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cpu->env_ptr;
+ ARMCPU *arm_cpu = env_archcpu(env);
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
+ int bound, core_mmu_idx;
+
+ dc->isar = &arm_cpu->isar;
+ dc->condjmp = 0;
+
+ dc->aarch64 = 1;
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
+ dc->thumb = 0;
+ dc->sctlr_b = 0;
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
+ dc->condexec_mask = 0;
+ dc->condexec_cond = 0;
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
+ dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
+ dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
+ dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
+ dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = (dc->current_el == 0);
+#endif
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
+ dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
+ dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
+ dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16;
+ dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
+ dc->bt = EX_TBFLAG_A64(tb_flags, BT);
+ dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
+ dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
+ dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
+ dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
+ dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
+ dc->vec_len = 0;
+ dc->vec_stride = 0;
+ dc->cp_regs = arm_cpu->cp_regs;
+ dc->features = env->features;
+ dc->dcz_blocksize = arm_cpu->dcz_blocksize;
+
+#ifdef CONFIG_USER_ONLY
+ /* In sve_probe_page, we assume TBI is enabled. */
+ tcg_debug_assert(dc->tbid & 1);
+#endif
+
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
+ dc->is_ldex = false;
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
+
+ /* Bound the number of insns to execute to those left on the page. */
+ bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
+
+ /* If architectural single step active, limit to 1. */
+ if (dc->ss_active) {
+ bound = 1;
+ }
+ dc->base.max_insns = MIN(dc->base.max_insns, bound);
+
+ init_tmp_a64_array(dc);
+}
+
+static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(dc->base.pc_next, 0, 0);
+ dc->insn_start = tcg_last_op();
+}
+
+static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *s = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cpu->env_ptr;
+ uint32_t insn;
+
+ if (s->ss_active && !s->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(s->base.num_insns == 1);
+ gen_swstep_exception(s, 0, 0);
+ s->base.is_jmp = DISAS_NORETURN;
+ return;
+ }
+
+ s->pc_curr = s->base.pc_next;
+ insn = arm_ldl_code(env, &s->base, s->base.pc_next, s->sctlr_b);
+ s->insn = insn;
+ s->base.pc_next += 4;
+
+ s->fp_access_checked = false;
+ s->sve_access_checked = false;
+
+ if (s->pstate_il) {
+ /*
+ * Illegal execution state. This has priority over BTI
+ * exceptions, but comes after instruction abort exceptions.
+ */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_illegalstate(), default_exception_el(s));
+ return;
+ }
+
+ if (dc_isar_feature(aa64_bti, s)) {
+ if (s->base.num_insns == 1) {
+ /*
+ * At the first insn of the TB, compute s->guarded_page.
+ * We delayed computing this until successfully reading
+ * the first insn of the TB, above. This (mostly) ensures
+ * that the softmmu tlb entry has been populated, and the
+ * page table GP bit is available.
+ *
+ * Note that we need to compute this even if btype == 0,
+ * because this value is used for BR instructions later
+ * where ENV is not available.
+ */
+ s->guarded_page = is_guarded_page(env, s);
+
+ /* First insn can have btype set to non-zero. */
+ tcg_debug_assert(s->btype >= 0);
+
+ /*
+ * Note that the Branch Target Exception has fairly high
+ * priority -- below debugging exceptions but above most
+ * everything else. This allows us to handle this now
+ * instead of waiting until the insn is otherwise decoded.
+ */
+ if (s->btype != 0
+ && s->guarded_page
+ && !btype_destination_ok(insn, s->bt, s->btype)) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_btitrap(s->btype),
+ default_exception_el(s));
+ return;
+ }
+ } else {
+ /* Not the first insn: btype must be 0. */
+ tcg_debug_assert(s->btype == 0);
+ }
+ }
+
+ switch (extract32(insn, 25, 4)) {
+ case 0x0: case 0x1: case 0x3: /* UNALLOCATED */
+ unallocated_encoding(s);
+ break;
+ case 0x2:
+ if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
+ unallocated_encoding(s);
+ }
+ break;
+ case 0x8: case 0x9: /* Data processing - immediate */
+ disas_data_proc_imm(s, insn);
+ break;
+ case 0xa: case 0xb: /* Branch, exception generation and system insns */
+ disas_b_exc_sys(s, insn);
+ break;
+ case 0x4:
+ case 0x6:
+ case 0xc:
+ case 0xe: /* Loads and stores */
+ disas_ldst(s, insn);
+ break;
+ case 0x5:
+ case 0xd: /* Data processing - register */
+ disas_data_proc_reg(s, insn);
+ break;
+ case 0x7:
+ case 0xf: /* Data processing - SIMD and floating point */
+ disas_data_proc_simd_fp(s, insn);
+ break;
+ default:
+ assert(FALSE); /* all 15 cases should be handled above */
+ break;
+ }
+
+ /* if we allocated any temporaries, free them here */
+ free_tmp_a64(s);
+
+ /*
+ * After execution of most insns, btype is reset to 0.
+ * Note that we set btype == -1 when the insn sets btype.
+ */
+ if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
+ reset_btype(s);
+ }
+
+ translator_loop_temp_check(&s->base);
+}
+
+static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (unlikely(dc->ss_active)) {
+ /* Note that this means single stepping WFI doesn't halt the CPU.
+ * For conditional branch insns this is harmless unreachable code as
+ * gen_goto_tb() has already handled emitting the debug exception
+ * (and thus a tb-jump is not possible when singlestepping).
+ */
+ switch (dc->base.is_jmp) {
+ default:
+ gen_a64_set_pc_im(dc->base.pc_next);
+ /* fall through */
+ case DISAS_EXIT:
+ case DISAS_JUMP:
+ gen_step_complete_exception(dc);
+ break;
+ case DISAS_NORETURN:
+ break;
+ }
+ } else {
+ switch (dc->base.is_jmp) {
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ gen_goto_tb(dc, 1, dc->base.pc_next);
+ break;
+ default:
+ case DISAS_UPDATE_EXIT:
+ gen_a64_set_pc_im(dc->base.pc_next);
+ /* fall through */
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_UPDATE_NOCHAIN:
+ gen_a64_set_pc_im(dc->base.pc_next);
+ /* fall through */
+ case DISAS_JUMP:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_NORETURN:
+ case DISAS_SWI:
+ break;
+ case DISAS_WFE:
+ gen_a64_set_pc_im(dc->base.pc_next);
+ gen_helper_wfe(cpu_env);
+ break;
+ case DISAS_YIELD:
+ gen_a64_set_pc_im(dc->base.pc_next);
+ gen_helper_yield(cpu_env);
+ break;
+ case DISAS_WFI:
+ {
+ /* This is a special case because we don't want to just halt the CPU
+ * if trying to debug across a WFI.
+ */
+ TCGv_i32 tmp = tcg_const_i32(4);
+
+ gen_a64_set_pc_im(dc->base.pc_next);
+ gen_helper_wfi(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ /* The helper doesn't necessarily throw an exception, but we
+ * must go back to the main loop to check for interrupts anyway.
+ */
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ }
+ }
+ }
+}
+
+static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
+ CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
+ log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
+}
+
+const TranslatorOps aarch64_translator_ops = {
+ .init_disas_context = aarch64_tr_init_disas_context,
+ .tb_start = aarch64_tr_tb_start,
+ .insn_start = aarch64_tr_insn_start,
+ .translate_insn = aarch64_tr_translate_insn,
+ .tb_stop = aarch64_tr_tb_stop,
+ .disas_log = aarch64_tr_disas_log,
+};
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
new file mode 100644
index 000000000..58f50abca
--- /dev/null
+++ b/target/arm/translate-a64.h
@@ -0,0 +1,127 @@
+/*
+ * AArch64 translation, common definitions.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARM_TRANSLATE_A64_H
+#define TARGET_ARM_TRANSLATE_A64_H
+
+#define unsupported_encoding(s, insn) \
+ do { \
+ qemu_log_mask(LOG_UNIMP, \
+ "%s:%d: unsupported instruction encoding 0x%08x " \
+ "at pc=%016" PRIx64 "\n", \
+ __FILE__, __LINE__, insn, s->pc_curr); \
+ unallocated_encoding(s); \
+ } while (0)
+
+TCGv_i64 new_tmp_a64(DisasContext *s);
+TCGv_i64 new_tmp_a64_local(DisasContext *s);
+TCGv_i64 new_tmp_a64_zero(DisasContext *s);
+TCGv_i64 cpu_reg(DisasContext *s, int reg);
+TCGv_i64 cpu_reg_sp(DisasContext *s, int reg);
+TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf);
+TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf);
+void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v);
+bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
+ unsigned int imms, unsigned int immr);
+bool sve_access_check(DisasContext *s);
+TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr);
+TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
+ bool tag_checked, int log2_size);
+TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
+ bool tag_checked, int size);
+
+/* We should have at some point before trying to access an FP register
+ * done the necessary access check, so assert that
+ * (a) we did the check and
+ * (b) we didn't then just plough ahead anyway if it failed.
+ * Print the instruction pattern in the abort message so we can figure
+ * out what we need to fix if a user encounters this problem in the wild.
+ */
+static inline void assert_fp_access_checked(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
+ fprintf(stderr, "target-arm: FP access check missing for "
+ "instruction 0x%08x\n", s->insn);
+ abort();
+ }
+#endif
+}
+
+/* Return the offset into CPUARMState of an element of specified
+ * size, 'element' places in from the least significant end of
+ * the FP/vector register Qn.
+ */
+static inline int vec_reg_offset(DisasContext *s, int regno,
+ int element, MemOp size)
+{
+ int element_size = 1 << size;
+ int offs = element * element_size;
+#ifdef HOST_WORDS_BIGENDIAN
+ /* This is complicated slightly because vfp.zregs[n].d[0] is
+ * still the lowest and vfp.zregs[n].d[15] the highest of the
+ * 256 byte vector, even on big endian systems.
+ *
+ * Calculate the offset assuming fully little-endian,
+ * then XOR to account for the order of the 8-byte units.
+ *
+ * For 16 byte elements, the two 8 byte halves will not form a
+ * host int128 if the host is bigendian, since they're in the
+ * wrong order. However the only 16 byte operation we have is
+ * a move, so we can ignore this for the moment. More complicated
+ * operations will have to special case loading and storing from
+ * the zregs array.
+ */
+ if (element_size < 8) {
+ offs ^= 8 - element_size;
+ }
+#endif
+ offs += offsetof(CPUARMState, vfp.zregs[regno]);
+ assert_fp_access_checked(s);
+ return offs;
+}
+
+/* Return the offset info CPUARMState of the "whole" vector register Qn. */
+static inline int vec_full_reg_offset(DisasContext *s, int regno)
+{
+ assert_fp_access_checked(s);
+ return offsetof(CPUARMState, vfp.zregs[regno]);
+}
+
+/* Return a newly allocated pointer to the vector register. */
+static inline TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
+{
+ TCGv_ptr ret = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno));
+ return ret;
+}
+
+/* Return the byte size of the "whole" vector register, VL / 8. */
+static inline int vec_full_reg_size(DisasContext *s)
+{
+ return s->sve_len;
+}
+
+bool disas_sve(DisasContext *, uint32_t);
+
+void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, int64_t shift,
+ uint32_t opr_sz, uint32_t max_sz);
+
+#endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c
new file mode 100644
index 000000000..d9e144e8e
--- /dev/null
+++ b/target/arm/translate-m-nocp.c
@@ -0,0 +1,791 @@
+/*
+ * ARM translation: M-profile NOCP special-case instructions
+ *
+ * Copyright (c) 2020 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "translate.h"
+#include "translate-a32.h"
+
+#include "decode-m-nocp.c.inc"
+
+/*
+ * Decode VLLDM and VLSTM are nonstandard because:
+ * * if there is no FPU then these insns must NOP in
+ * Secure state and UNDEF in Nonsecure state
+ * * if there is an FPU then these insns do not have
+ * the usual behaviour that vfp_access_check() provides of
+ * being controlled by CPACR/NSACR enable bits or the
+ * lazy-stacking logic.
+ */
+static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
+{
+ TCGv_i32 fptr;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_M) ||
+ !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+
+ if (a->op) {
+ /*
+ * T2 encoding ({D0-D31} reglist): v8.1M and up. We choose not
+ * to take the IMPDEF option to make memory accesses to the stack
+ * slots that correspond to the D16-D31 registers (discarding
+ * read data and writing UNKNOWN values), so for us the T2
+ * encoding behaves identically to the T1 encoding.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return false;
+ }
+ } else {
+ /*
+ * T1 encoding ({D0-D15} reglist); undef if we have 32 Dregs.
+ * This is currently architecturally impossible, but we add the
+ * check to stay in line with the pseudocode. Note that we must
+ * emit code for the UNDEF so it takes precedence over the NOCP.
+ */
+ if (dc_isar_feature(aa32_simd_r32, s)) {
+ unallocated_encoding(s);
+ return true;
+ }
+ }
+
+ /*
+ * If not secure, UNDEF. We must emit code for this
+ * rather than returning false so that this takes
+ * precedence over the m-nocp.decode NOCP fallback.
+ */
+ if (!s->v8m_secure) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ s->eci_handled = true;
+
+ /* If no fpu, NOP. */
+ if (!dc_isar_feature(aa32_vfp, s)) {
+ clear_eci_state(s);
+ return true;
+ }
+
+ fptr = load_reg(s, a->rn);
+ if (a->l) {
+ gen_helper_v7m_vlldm(cpu_env, fptr);
+ } else {
+ gen_helper_v7m_vlstm(cpu_env, fptr);
+ }
+ tcg_temp_free_i32(fptr);
+
+ clear_eci_state(s);
+
+ /*
+ * End the TB, because we have updated FP control bits,
+ * and possibly VPR or LTPSIZE.
+ */
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+ return true;
+}
+
+static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
+{
+ int btmreg, topreg;
+ TCGv_i64 zero;
+ TCGv_i32 aspen, sfpa;
+
+ if (!dc_isar_feature(aa32_m_sec_state, s)) {
+ /* Before v8.1M, fall through in decode to NOCP check */
+ return false;
+ }
+
+ /* Explicitly UNDEF because this takes precedence over NOCP */
+ if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ s->eci_handled = true;
+
+ if (!dc_isar_feature(aa32_vfp_simd, s)) {
+ /* NOP if we have neither FP nor MVE */
+ clear_eci_state(s);
+ return true;
+ }
+
+ /*
+ * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
+ * active floating point context so we must NOP (without doing
+ * any lazy state preservation or the NOCP check).
+ */
+ aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
+ sfpa = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
+ tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
+ tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
+ tcg_gen_or_i32(sfpa, sfpa, aspen);
+ arm_gen_condlabel(s);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
+
+ if (s->fp_excp_el != 0) {
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
+ syn_uncategorized(), s->fp_excp_el);
+ return true;
+ }
+
+ topreg = a->vd + a->imm - 1;
+ btmreg = a->vd;
+
+ /* Convert to Sreg numbers if the insn specified in Dregs */
+ if (a->size == 3) {
+ topreg = topreg * 2 + 1;
+ btmreg *= 2;
+ }
+
+ if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
+ /* UNPREDICTABLE: we choose to undef */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ /* Silently ignore requests to clear D16-D31 if they don't exist */
+ if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
+ topreg = 31;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* Zero the Sregs from btmreg to topreg inclusive. */
+ zero = tcg_const_i64(0);
+ if (btmreg & 1) {
+ write_neon_element64(zero, btmreg >> 1, 1, MO_32);
+ btmreg++;
+ }
+ for (; btmreg + 1 <= topreg; btmreg += 2) {
+ write_neon_element64(zero, btmreg >> 1, 0, MO_64);
+ }
+ if (btmreg == topreg) {
+ write_neon_element64(zero, btmreg >> 1, 0, MO_32);
+ btmreg++;
+ }
+ assert(btmreg == topreg + 1);
+ if (dc_isar_feature(aa32_mve, s)) {
+ TCGv_i32 z32 = tcg_const_i32(0);
+ store_cpu_field(z32, v7m.vpr);
+ }
+
+ clear_eci_state(s);
+ return true;
+}
+
+/*
+ * M-profile provides two different sets of instructions that can
+ * access floating point system registers: VMSR/VMRS (which move
+ * to/from a general purpose register) and VLDR/VSTR sysreg (which
+ * move directly to/from memory). In some cases there are also side
+ * effects which must happen after any write to memory (which could
+ * cause an exception). So we implement the common logic for the
+ * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
+ * which take pointers to callback functions which will perform the
+ * actual "read/write general purpose register" and "read/write
+ * memory" operations.
+ */
+
+/*
+ * Emit code to store the sysreg to its final destination; frees the
+ * TCG temp 'value' it is passed. do_access is true to do the store,
+ * and false to skip it and only perform side-effects like base
+ * register writeback.
+ */
+typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value,
+ bool do_access);
+/*
+ * Emit code to load the value to be copied to the sysreg; returns
+ * a new TCG temporary. do_access is true to do the store,
+ * and false to skip it and only perform side-effects like base
+ * register writeback.
+ */
+typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque,
+ bool do_access);
+
+/* Common decode/access checks for fp sysreg read/write */
+typedef enum FPSysRegCheckResult {
+ FPSysRegCheckFailed, /* caller should return false */
+ FPSysRegCheckDone, /* caller should return true */
+ FPSysRegCheckContinue, /* caller should continue generating code */
+} FPSysRegCheckResult;
+
+static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
+{
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return FPSysRegCheckFailed;
+ }
+
+ switch (regno) {
+ case ARM_VFP_FPSCR:
+ case QEMU_VFP_FPSCR_NZCV:
+ break;
+ case ARM_VFP_FPSCR_NZCVQC:
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return FPSysRegCheckFailed;
+ }
+ break;
+ case ARM_VFP_FPCXT_S:
+ case ARM_VFP_FPCXT_NS:
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return FPSysRegCheckFailed;
+ }
+ if (!s->v8m_secure) {
+ return FPSysRegCheckFailed;
+ }
+ break;
+ case ARM_VFP_VPR:
+ case ARM_VFP_P0:
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return FPSysRegCheckFailed;
+ }
+ break;
+ default:
+ return FPSysRegCheckFailed;
+ }
+
+ /*
+ * FPCXT_NS is a special case: it has specific handling for
+ * "current FP state is inactive", and must do the PreserveFPState()
+ * but not the usual full set of actions done by ExecuteFPCheck().
+ * So we don't call vfp_access_check() and the callers must handle this.
+ */
+ if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
+ return FPSysRegCheckDone;
+ }
+ return FPSysRegCheckContinue;
+}
+
+static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
+ TCGLabel *label)
+{
+ /*
+ * FPCXT_NS is a special case: it has specific handling for
+ * "current FP state is inactive", and must do the PreserveFPState()
+ * but not the usual full set of actions done by ExecuteFPCheck().
+ * We don't have a TB flag that matches the fpInactive check, so we
+ * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
+ *
+ * Emit code that checks fpInactive and does a conditional
+ * branch to label based on it:
+ * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
+ * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
+ */
+ assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
+
+ /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
+ TCGv_i32 aspen, fpca;
+ aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
+ fpca = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
+ tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
+ tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
+ tcg_gen_or_i32(fpca, fpca, aspen);
+ tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
+ tcg_temp_free_i32(aspen);
+ tcg_temp_free_i32(fpca);
+}
+
+static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
+ fp_sysreg_loadfn *loadfn,
+ void *opaque)
+{
+ /* Do a write to an M-profile floating point system register */
+ TCGv_i32 tmp;
+ TCGLabel *lab_end = NULL;
+
+ switch (fp_sysreg_checks(s, regno)) {
+ case FPSysRegCheckFailed:
+ return false;
+ case FPSysRegCheckDone:
+ return true;
+ case FPSysRegCheckContinue:
+ break;
+ }
+
+ switch (regno) {
+ case ARM_VFP_FPSCR:
+ tmp = loadfn(s, opaque, true);
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPSCR_NZCVQC:
+ {
+ TCGv_i32 fpscr;
+ tmp = loadfn(s, opaque, true);
+ if (dc_isar_feature(aa32_mve, s)) {
+ /* QC is only present for MVE; otherwise RES0 */
+ TCGv_i32 qc = tcg_temp_new_i32();
+ tcg_gen_andi_i32(qc, tmp, FPCR_QC);
+ /*
+ * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
+ * here writing the same value into all elements is simplest.
+ */
+ tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
+ 16, 16, qc);
+ }
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
+ tcg_gen_or_i32(fpscr, fpscr, tmp);
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_temp_free_i32(tmp);
+ break;
+ }
+ case ARM_VFP_FPCXT_NS:
+ {
+ TCGLabel *lab_active = gen_new_label();
+
+ lab_end = gen_new_label();
+ gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
+ /*
+ * fpInactive case: write is a NOP, so only do side effects
+ * like register writeback before we branch to end
+ */
+ loadfn(s, opaque, false);
+ tcg_gen_br(lab_end);
+
+ gen_set_label(lab_active);
+ /*
+ * !fpInactive: if FPU disabled, take NOCP exception;
+ * otherwise PreserveFPState(), and then FPCXT_NS writes
+ * behave the same as FPCXT_S writes.
+ */
+ if (!vfp_access_check_m(s, true)) {
+ /*
+ * This was only a conditional exception, so override
+ * gen_exception_insn()'s default to DISAS_NORETURN
+ */
+ s->base.is_jmp = DISAS_NEXT;
+ break;
+ }
+ }
+ /* fall through */
+ case ARM_VFP_FPCXT_S:
+ {
+ TCGv_i32 sfpa, control;
+ /*
+ * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
+ * bits [27:0] from value and zeroes bits [31:28].
+ */
+ tmp = loadfn(s, opaque, true);
+ sfpa = tcg_temp_new_i32();
+ tcg_gen_shri_i32(sfpa, tmp, 31);
+ control = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_deposit_i32(control, control, sfpa,
+ R_V7M_CONTROL_SFPA_SHIFT, 1);
+ store_cpu_field(control, v7m.control[M_REG_S]);
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(sfpa);
+ break;
+ }
+ case ARM_VFP_VPR:
+ /* Behaves as NOP if not privileged */
+ if (IS_USER(s)) {
+ loadfn(s, opaque, false);
+ break;
+ }
+ tmp = loadfn(s, opaque, true);
+ store_cpu_field(tmp, v7m.vpr);
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ break;
+ case ARM_VFP_P0:
+ {
+ TCGv_i32 vpr;
+ tmp = loadfn(s, opaque, true);
+ vpr = load_cpu_field(v7m.vpr);
+ tcg_gen_deposit_i32(vpr, vpr, tmp,
+ R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
+ store_cpu_field(vpr, v7m.vpr);
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ tcg_temp_free_i32(tmp);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+ if (lab_end) {
+ gen_set_label(lab_end);
+ }
+ return true;
+}
+
+static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
+ fp_sysreg_storefn *storefn,
+ void *opaque)
+{
+ /* Do a read from an M-profile floating point system register */
+ TCGv_i32 tmp;
+ TCGLabel *lab_end = NULL;
+ bool lookup_tb = false;
+
+ switch (fp_sysreg_checks(s, regno)) {
+ case FPSysRegCheckFailed:
+ return false;
+ case FPSysRegCheckDone:
+ return true;
+ case FPSysRegCheckContinue:
+ break;
+ }
+
+ if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
+ /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
+ regno = QEMU_VFP_FPSCR_NZCV;
+ }
+
+ switch (regno) {
+ case ARM_VFP_FPSCR:
+ tmp = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ storefn(s, opaque, tmp, true);
+ break;
+ case ARM_VFP_FPSCR_NZCVQC:
+ tmp = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
+ storefn(s, opaque, tmp, true);
+ break;
+ case QEMU_VFP_FPSCR_NZCV:
+ /*
+ * Read just NZCV; this is a special case to avoid the
+ * helper call for the "VMRS to CPSR.NZCV" insn.
+ */
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
+ storefn(s, opaque, tmp, true);
+ break;
+ case ARM_VFP_FPCXT_S:
+ {
+ TCGv_i32 control, sfpa, fpscr;
+ /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
+ tmp = tcg_temp_new_i32();
+ sfpa = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
+ control = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
+ tcg_gen_or_i32(tmp, tmp, sfpa);
+ tcg_temp_free_i32(sfpa);
+ /*
+ * Store result before updating FPSCR etc, in case
+ * it is a memory write which causes an exception.
+ */
+ storefn(s, opaque, tmp, true);
+ /*
+ * Now we must reset FPSCR from FPDSCR_NS, and clear
+ * CONTROL.SFPA; so we'll end the TB here.
+ */
+ tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
+ store_cpu_field(control, v7m.control[M_REG_S]);
+ fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ tcg_temp_free_i32(fpscr);
+ lookup_tb = true;
+ break;
+ }
+ case ARM_VFP_FPCXT_NS:
+ {
+ TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
+ TCGLabel *lab_active = gen_new_label();
+
+ lookup_tb = true;
+
+ gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
+ /* fpInactive case: reads as FPDSCR_NS */
+ TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
+ storefn(s, opaque, tmp, true);
+ lab_end = gen_new_label();
+ tcg_gen_br(lab_end);
+
+ gen_set_label(lab_active);
+ /*
+ * !fpInactive: if FPU disabled, take NOCP exception;
+ * otherwise PreserveFPState(), and then FPCXT_NS
+ * reads the same as FPCXT_S.
+ */
+ if (!vfp_access_check_m(s, true)) {
+ /*
+ * This was only a conditional exception, so override
+ * gen_exception_insn()'s default to DISAS_NORETURN
+ */
+ s->base.is_jmp = DISAS_NEXT;
+ break;
+ }
+ tmp = tcg_temp_new_i32();
+ sfpa = tcg_temp_new_i32();
+ fpscr = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(fpscr, cpu_env);
+ tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
+ control = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
+ tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
+ tcg_gen_or_i32(tmp, tmp, sfpa);
+ tcg_temp_free_i32(control);
+ /* Store result before updating FPSCR, in case it faults */
+ storefn(s, opaque, tmp, true);
+ /* If SFPA is zero then set FPSCR from FPDSCR_NS */
+ fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
+ zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(sfpa);
+ tcg_temp_free_i32(fpdscr);
+ tcg_temp_free_i32(fpscr);
+ break;
+ }
+ case ARM_VFP_VPR:
+ /* Behaves as NOP if not privileged */
+ if (IS_USER(s)) {
+ storefn(s, opaque, NULL, false);
+ break;
+ }
+ tmp = load_cpu_field(v7m.vpr);
+ storefn(s, opaque, tmp, true);
+ break;
+ case ARM_VFP_P0:
+ tmp = load_cpu_field(v7m.vpr);
+ tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
+ storefn(s, opaque, tmp, true);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (lab_end) {
+ gen_set_label(lab_end);
+ }
+ if (lookup_tb) {
+ gen_lookup_tb(s);
+ }
+ return true;
+}
+
+static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value,
+ bool do_access)
+{
+ arg_VMSR_VMRS *a = opaque;
+
+ if (!do_access) {
+ return;
+ }
+
+ if (a->rt == 15) {
+ /* Set the 4 flag bits in the CPSR */
+ gen_set_nzcv(value);
+ tcg_temp_free_i32(value);
+ } else {
+ store_reg(s, a->rt, value);
+ }
+}
+
+static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque, bool do_access)
+{
+ arg_VMSR_VMRS *a = opaque;
+
+ if (!do_access) {
+ return NULL;
+ }
+ return load_reg(s, a->rt);
+}
+
+static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
+{
+ /*
+ * Accesses to R15 are UNPREDICTABLE; we choose to undef.
+ * FPSCR -> r15 is a special case which writes to the PSR flags;
+ * set a->reg to a special value to tell gen_M_fp_sysreg_read()
+ * we only care about the top 4 bits of FPSCR there.
+ */
+ if (a->rt == 15) {
+ if (a->l && a->reg == ARM_VFP_FPSCR) {
+ a->reg = QEMU_VFP_FPSCR_NZCV;
+ } else {
+ return false;
+ }
+ }
+
+ if (a->l) {
+ /* VMRS, move FP system register to gp register */
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
+ } else {
+ /* VMSR, move gp register to FP system register */
+ return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
+ }
+}
+
+static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value,
+ bool do_access)
+{
+ arg_vldr_sysreg *a = opaque;
+ uint32_t offset = a->imm;
+ TCGv_i32 addr;
+
+ if (!a->a) {
+ offset = -offset;
+ }
+
+ if (!do_access && !a->w) {
+ return;
+ }
+
+ addr = load_reg(s, a->rn);
+ if (a->p) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ if (do_access) {
+ gen_aa32_st_i32(s, value, addr, get_mem_index(s),
+ MO_UL | MO_ALIGN | s->be_data);
+ tcg_temp_free_i32(value);
+ }
+
+ if (a->w) {
+ /* writeback */
+ if (!a->p) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+}
+
+static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque,
+ bool do_access)
+{
+ arg_vldr_sysreg *a = opaque;
+ uint32_t offset = a->imm;
+ TCGv_i32 addr;
+ TCGv_i32 value = NULL;
+
+ if (!a->a) {
+ offset = -offset;
+ }
+
+ if (!do_access && !a->w) {
+ return NULL;
+ }
+
+ addr = load_reg(s, a->rn);
+ if (a->p) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ if (do_access) {
+ value = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
+ MO_UL | MO_ALIGN | s->be_data);
+ }
+
+ if (a->w) {
+ /* writeback */
+ if (!a->p) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ return value;
+}
+
+static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return false;
+ }
+ if (a->rn == 15) {
+ return false;
+ }
+ return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
+}
+
+static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return false;
+ }
+ if (a->rn == 15) {
+ return false;
+ }
+ return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
+}
+
+static bool trans_NOCP(DisasContext *s, arg_nocp *a)
+{
+ /*
+ * Handle M-profile early check for disabled coprocessor:
+ * all we need to do here is emit the NOCP exception if
+ * the coprocessor is disabled. Otherwise we return false
+ * and the real VFP/etc decode will handle the insn.
+ */
+ assert(arm_dc_feature(s, ARM_FEATURE_M));
+
+ if (a->cp == 11) {
+ a->cp = 10;
+ }
+ if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
+ (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
+ /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
+ a->cp = 10;
+ }
+
+ if (a->cp != 10) {
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
+ syn_uncategorized(), default_exception_el(s));
+ return true;
+ }
+
+ if (s->fp_excp_el != 0) {
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
+ syn_uncategorized(), s->fp_excp_el);
+ return true;
+ }
+
+ return false;
+}
+
+static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
+{
+ /* This range needs a coprocessor check for v8.1M and later only */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return false;
+ }
+ return trans_NOCP(s, a);
+}
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
new file mode 100644
index 000000000..4267d43cc
--- /dev/null
+++ b/target/arm/translate-mve.c
@@ -0,0 +1,2311 @@
+/*
+ * ARM translation: M-profile MVE instructions
+ *
+ * Copyright (c) 2021 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "exec/exec-all.h"
+#include "exec/gen-icount.h"
+#include "translate.h"
+#include "translate-a32.h"
+
+static inline int vidup_imm(DisasContext *s, int x)
+{
+ return 1 << x;
+}
+
+/* Include the generated decoder */
+#include "decode-mve.c.inc"
+
+typedef void MVEGenLdStFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenLdStSGFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenLdStIlFn(TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
+typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
+typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenLongDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
+typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
+typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
+typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenDualAccOpFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenVCVTRmodeFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
+
+/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
+static inline long mve_qreg_offset(unsigned reg)
+{
+ return offsetof(CPUARMState, vfp.zregs[reg].d[0]);
+}
+
+static TCGv_ptr mve_qreg_ptr(unsigned reg)
+{
+ TCGv_ptr ret = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ret, cpu_env, mve_qreg_offset(reg));
+ return ret;
+}
+
+static bool mve_no_predication(DisasContext *s)
+{
+ /*
+ * Return true if we are executing the entire MVE instruction
+ * with no predication or partial-execution, and so we can safely
+ * use an inline TCG vector implementation.
+ */
+ return s->eci == 0 && s->mve_no_pred;
+}
+
+static bool mve_check_qreg_bank(DisasContext *s, int qmask)
+{
+ /*
+ * Check whether Qregs are in range. For v8.1M only Q0..Q7
+ * are supported, see VFPSmallRegisterBank().
+ */
+ return qmask < 8;
+}
+
+bool mve_eci_check(DisasContext *s)
+{
+ /*
+ * This is a beatwise insn: check that ECI is valid (not a
+ * reserved value) and note that we are handling it.
+ * Return true if OK, false if we generated an exception.
+ */
+ s->eci_handled = true;
+ switch (s->eci) {
+ case ECI_NONE:
+ case ECI_A0:
+ case ECI_A0A1:
+ case ECI_A0A1A2:
+ case ECI_A0A1A2B0:
+ return true;
+ default:
+ /* Reserved value: INVSTATE UsageFault */
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
+ default_exception_el(s));
+ return false;
+ }
+}
+
+void mve_update_eci(DisasContext *s)
+{
+ /*
+ * The helper function will always update the CPUState field,
+ * so we only need to update the DisasContext field.
+ */
+ if (s->eci) {
+ s->eci = (s->eci == ECI_A0A1A2B0) ? ECI_A0 : ECI_NONE;
+ }
+}
+
+void mve_update_and_store_eci(DisasContext *s)
+{
+ /*
+ * For insns which don't call a helper function that will call
+ * mve_advance_vpt(), this version updates s->eci and also stores
+ * it out to the CPUState field.
+ */
+ if (s->eci) {
+ mve_update_eci(s);
+ store_cpu_field(tcg_constant_i32(s->eci << 4), condexec_bits);
+ }
+}
+
+static bool mve_skip_first_beat(DisasContext *s)
+{
+ /* Return true if PSR.ECI says we must skip the first beat of this insn */
+ switch (s->eci) {
+ case ECI_NONE:
+ return false;
+ case ECI_A0:
+ case ECI_A0A1:
+ case ECI_A0A1A2:
+ case ECI_A0A1A2B0:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn,
+ unsigned msize)
+{
+ TCGv_i32 addr;
+ uint32_t offset;
+ TCGv_ptr qreg;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd) ||
+ !fn) {
+ return false;
+ }
+
+ /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
+ if (a->rn == 15 || (a->rn == 13 && a->w)) {
+ return false;
+ }
+
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ offset = a->imm << msize;
+ if (!a->a) {
+ offset = -offset;
+ }
+ addr = load_reg(s, a->rn);
+ if (a->p) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+
+ qreg = mve_qreg_ptr(a->qd);
+ fn(cpu_env, qreg, addr);
+ tcg_temp_free_ptr(qreg);
+
+ /*
+ * Writeback always happens after the last beat of the insn,
+ * regardless of predication
+ */
+ if (a->w) {
+ if (!a->p) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VLDR_VSTR(DisasContext *s, arg_VLDR_VSTR *a)
+{
+ static MVEGenLdStFn * const ldstfns[4][2] = {
+ { gen_helper_mve_vstrb, gen_helper_mve_vldrb },
+ { gen_helper_mve_vstrh, gen_helper_mve_vldrh },
+ { gen_helper_mve_vstrw, gen_helper_mve_vldrw },
+ { NULL, NULL }
+ };
+ return do_ldst(s, a, ldstfns[a->size][a->l], a->size);
+}
+
+#define DO_VLDST_WIDE_NARROW(OP, SLD, ULD, ST, MSIZE) \
+ static bool trans_##OP(DisasContext *s, arg_VLDR_VSTR *a) \
+ { \
+ static MVEGenLdStFn * const ldstfns[2][2] = { \
+ { gen_helper_mve_##ST, gen_helper_mve_##SLD }, \
+ { NULL, gen_helper_mve_##ULD }, \
+ }; \
+ return do_ldst(s, a, ldstfns[a->u][a->l], MSIZE); \
+ }
+
+DO_VLDST_WIDE_NARROW(VLDSTB_H, vldrb_sh, vldrb_uh, vstrb_h, MO_8)
+DO_VLDST_WIDE_NARROW(VLDSTB_W, vldrb_sw, vldrb_uw, vstrb_w, MO_8)
+DO_VLDST_WIDE_NARROW(VLDSTH_W, vldrh_sw, vldrh_uw, vstrh_w, MO_16)
+
+static bool do_ldst_sg(DisasContext *s, arg_vldst_sg *a, MVEGenLdStSGFn fn)
+{
+ TCGv_i32 addr;
+ TCGv_ptr qd, qm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
+ !fn || a->rn == 15) {
+ /* Rn case is UNPREDICTABLE */
+ return false;
+ }
+
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ addr = load_reg(s, a->rn);
+
+ qd = mve_qreg_ptr(a->qd);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qd, qm, addr);
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qm);
+ tcg_temp_free_i32(addr);
+ mve_update_eci(s);
+ return true;
+}
+
+/*
+ * The naming scheme here is "vldrb_sg_sh == in-memory byte loads
+ * signextended to halfword elements in register". _os_ indicates that
+ * the offsets in Qm should be scaled by the element size.
+ */
+/* This macro is just to make the arrays more compact in these functions */
+#define F(N) gen_helper_mve_##N
+
+/* VLDRB/VSTRB (ie msize 1) with OS=1 is UNPREDICTABLE; we UNDEF */
+static bool trans_VLDR_S_sg(DisasContext *s, arg_vldst_sg *a)
+{
+ static MVEGenLdStSGFn * const fns[2][4][4] = { {
+ { NULL, F(vldrb_sg_sh), F(vldrb_sg_sw), NULL },
+ { NULL, NULL, F(vldrh_sg_sw), NULL },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL }
+ }, {
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, F(vldrh_sg_os_sw), NULL },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL }
+ }
+ };
+ if (a->qd == a->qm) {
+ return false; /* UNPREDICTABLE */
+ }
+ return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]);
+}
+
+static bool trans_VLDR_U_sg(DisasContext *s, arg_vldst_sg *a)
+{
+ static MVEGenLdStSGFn * const fns[2][4][4] = { {
+ { F(vldrb_sg_ub), F(vldrb_sg_uh), F(vldrb_sg_uw), NULL },
+ { NULL, F(vldrh_sg_uh), F(vldrh_sg_uw), NULL },
+ { NULL, NULL, F(vldrw_sg_uw), NULL },
+ { NULL, NULL, NULL, F(vldrd_sg_ud) }
+ }, {
+ { NULL, NULL, NULL, NULL },
+ { NULL, F(vldrh_sg_os_uh), F(vldrh_sg_os_uw), NULL },
+ { NULL, NULL, F(vldrw_sg_os_uw), NULL },
+ { NULL, NULL, NULL, F(vldrd_sg_os_ud) }
+ }
+ };
+ if (a->qd == a->qm) {
+ return false; /* UNPREDICTABLE */
+ }
+ return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]);
+}
+
+static bool trans_VSTR_sg(DisasContext *s, arg_vldst_sg *a)
+{
+ static MVEGenLdStSGFn * const fns[2][4][4] = { {
+ { F(vstrb_sg_ub), F(vstrb_sg_uh), F(vstrb_sg_uw), NULL },
+ { NULL, F(vstrh_sg_uh), F(vstrh_sg_uw), NULL },
+ { NULL, NULL, F(vstrw_sg_uw), NULL },
+ { NULL, NULL, NULL, F(vstrd_sg_ud) }
+ }, {
+ { NULL, NULL, NULL, NULL },
+ { NULL, F(vstrh_sg_os_uh), F(vstrh_sg_os_uw), NULL },
+ { NULL, NULL, F(vstrw_sg_os_uw), NULL },
+ { NULL, NULL, NULL, F(vstrd_sg_os_ud) }
+ }
+ };
+ return do_ldst_sg(s, a, fns[a->os][a->msize][a->size]);
+}
+
+#undef F
+
+static bool do_ldst_sg_imm(DisasContext *s, arg_vldst_sg_imm *a,
+ MVEGenLdStSGFn *fn, unsigned msize)
+{
+ uint32_t offset;
+ TCGv_ptr qd, qm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
+ !fn) {
+ return false;
+ }
+
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ offset = a->imm << msize;
+ if (!a->a) {
+ offset = -offset;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qd, qm, tcg_constant_i32(offset));
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qm);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VLDRW_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
+{
+ static MVEGenLdStSGFn * const fns[] = {
+ gen_helper_mve_vldrw_sg_uw,
+ gen_helper_mve_vldrw_sg_wb_uw,
+ };
+ if (a->qd == a->qm) {
+ return false; /* UNPREDICTABLE */
+ }
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_32);
+}
+
+static bool trans_VLDRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
+{
+ static MVEGenLdStSGFn * const fns[] = {
+ gen_helper_mve_vldrd_sg_ud,
+ gen_helper_mve_vldrd_sg_wb_ud,
+ };
+ if (a->qd == a->qm) {
+ return false; /* UNPREDICTABLE */
+ }
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_64);
+}
+
+static bool trans_VSTRW_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
+{
+ static MVEGenLdStSGFn * const fns[] = {
+ gen_helper_mve_vstrw_sg_uw,
+ gen_helper_mve_vstrw_sg_wb_uw,
+ };
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_32);
+}
+
+static bool trans_VSTRD_sg_imm(DisasContext *s, arg_vldst_sg_imm *a)
+{
+ static MVEGenLdStSGFn * const fns[] = {
+ gen_helper_mve_vstrd_sg_ud,
+ gen_helper_mve_vstrd_sg_wb_ud,
+ };
+ return do_ldst_sg_imm(s, a, fns[a->w], MO_64);
+}
+
+static bool do_vldst_il(DisasContext *s, arg_vldst_il *a, MVEGenLdStIlFn *fn,
+ int addrinc)
+{
+ TCGv_i32 rn;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd) ||
+ !fn || (a->rn == 13 && a->w) || a->rn == 15) {
+ /* Variously UNPREDICTABLE or UNDEF or related-encoding */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ rn = load_reg(s, a->rn);
+ /*
+ * We pass the index of Qd, not a pointer, because the helper must
+ * access multiple Q registers starting at Qd and working up.
+ */
+ fn(cpu_env, tcg_constant_i32(a->qd), rn);
+
+ if (a->w) {
+ tcg_gen_addi_i32(rn, rn, addrinc);
+ store_reg(s, a->rn, rn);
+ } else {
+ tcg_temp_free_i32(rn);
+ }
+ mve_update_and_store_eci(s);
+ return true;
+}
+
+/* This macro is just to make the arrays more compact in these functions */
+#define F(N) gen_helper_mve_##N
+
+static bool trans_VLD2(DisasContext *s, arg_vldst_il *a)
+{
+ static MVEGenLdStIlFn * const fns[4][4] = {
+ { F(vld20b), F(vld20h), F(vld20w), NULL, },
+ { F(vld21b), F(vld21h), F(vld21w), NULL, },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL },
+ };
+ if (a->qd > 6) {
+ return false;
+ }
+ return do_vldst_il(s, a, fns[a->pat][a->size], 32);
+}
+
+static bool trans_VLD4(DisasContext *s, arg_vldst_il *a)
+{
+ static MVEGenLdStIlFn * const fns[4][4] = {
+ { F(vld40b), F(vld40h), F(vld40w), NULL, },
+ { F(vld41b), F(vld41h), F(vld41w), NULL, },
+ { F(vld42b), F(vld42h), F(vld42w), NULL, },
+ { F(vld43b), F(vld43h), F(vld43w), NULL, },
+ };
+ if (a->qd > 4) {
+ return false;
+ }
+ return do_vldst_il(s, a, fns[a->pat][a->size], 64);
+}
+
+static bool trans_VST2(DisasContext *s, arg_vldst_il *a)
+{
+ static MVEGenLdStIlFn * const fns[4][4] = {
+ { F(vst20b), F(vst20h), F(vst20w), NULL, },
+ { F(vst21b), F(vst21h), F(vst21w), NULL, },
+ { NULL, NULL, NULL, NULL },
+ { NULL, NULL, NULL, NULL },
+ };
+ if (a->qd > 6) {
+ return false;
+ }
+ return do_vldst_il(s, a, fns[a->pat][a->size], 32);
+}
+
+static bool trans_VST4(DisasContext *s, arg_vldst_il *a)
+{
+ static MVEGenLdStIlFn * const fns[4][4] = {
+ { F(vst40b), F(vst40h), F(vst40w), NULL, },
+ { F(vst41b), F(vst41h), F(vst41w), NULL, },
+ { F(vst42b), F(vst42h), F(vst42w), NULL, },
+ { F(vst43b), F(vst43h), F(vst43w), NULL, },
+ };
+ if (a->qd > 4) {
+ return false;
+ }
+ return do_vldst_il(s, a, fns[a->pat][a->size], 64);
+}
+
+#undef F
+
+static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
+{
+ TCGv_ptr qd;
+ TCGv_i32 rt;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd)) {
+ return false;
+ }
+ if (a->rt == 13 || a->rt == 15) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ rt = load_reg(s, a->rt);
+ if (mve_no_predication(s)) {
+ tcg_gen_gvec_dup_i32(a->size, mve_qreg_offset(a->qd), 16, 16, rt);
+ } else {
+ qd = mve_qreg_ptr(a->qd);
+ tcg_gen_dup_i32(a->size, rt, rt);
+ gen_helper_mve_vdup(cpu_env, qd, rt);
+ tcg_temp_free_ptr(qd);
+ }
+ tcg_temp_free_i32(rt);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn,
+ GVecGen2Fn vecfn)
+{
+ TCGv_ptr qd, qm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
+ !fn) {
+ return false;
+ }
+
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ if (vecfn && mve_no_predication(s)) {
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), 16, 16);
+ } else {
+ qd = mve_qreg_ptr(a->qd);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qd, qm);
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qm);
+ }
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn)
+{
+ return do_1op_vec(s, a, fn, NULL);
+}
+
+#define DO_1OP_VEC(INSN, FN, VECFN) \
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
+ { \
+ static MVEGenOneOpFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_1op_vec(s, a, fns[a->size], VECFN); \
+ }
+
+#define DO_1OP(INSN, FN) DO_1OP_VEC(INSN, FN, NULL)
+
+DO_1OP(VCLZ, vclz)
+DO_1OP(VCLS, vcls)
+DO_1OP_VEC(VABS, vabs, tcg_gen_gvec_abs)
+DO_1OP_VEC(VNEG, vneg, tcg_gen_gvec_neg)
+DO_1OP(VQABS, vqabs)
+DO_1OP(VQNEG, vqneg)
+DO_1OP(VMAXA, vmaxa)
+DO_1OP(VMINA, vmina)
+
+/*
+ * For simple float/int conversions we use the fixed-point
+ * conversion helpers with a zero shift count
+ */
+#define DO_VCVT(INSN, HFN, SFN) \
+ static void gen_##INSN##h(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \
+ { \
+ gen_helper_mve_##HFN(env, qd, qm, tcg_constant_i32(0)); \
+ } \
+ static void gen_##INSN##s(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \
+ { \
+ gen_helper_mve_##SFN(env, qd, qm, tcg_constant_i32(0)); \
+ } \
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
+ { \
+ static MVEGenOneOpFn * const fns[] = { \
+ NULL, \
+ gen_##INSN##h, \
+ gen_##INSN##s, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_1op(s, a, fns[a->size]); \
+ }
+
+DO_VCVT(VCVT_SF, vcvt_sh, vcvt_sf)
+DO_VCVT(VCVT_UF, vcvt_uh, vcvt_uf)
+DO_VCVT(VCVT_FS, vcvt_hs, vcvt_fs)
+DO_VCVT(VCVT_FU, vcvt_hu, vcvt_fu)
+
+static bool do_vcvt_rmode(DisasContext *s, arg_1op *a,
+ enum arm_fprounding rmode, bool u)
+{
+ /*
+ * Handle VCVT fp to int with specified rounding mode.
+ * This is a 1op fn but we must pass the rounding mode as
+ * an immediate to the helper.
+ */
+ TCGv_ptr qd, qm;
+ static MVEGenVCVTRmodeFn * const fns[4][2] = {
+ { NULL, NULL },
+ { gen_helper_mve_vcvt_rm_sh, gen_helper_mve_vcvt_rm_uh },
+ { gen_helper_mve_vcvt_rm_ss, gen_helper_mve_vcvt_rm_us },
+ { NULL, NULL },
+ };
+ MVEGenVCVTRmodeFn *fn = fns[a->size][u];
+
+ if (!dc_isar_feature(aa32_mve_fp, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
+ !fn) {
+ return false;
+ }
+
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode)));
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qm);
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_VCVT_RMODE(INSN, RMODE, U) \
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
+ { \
+ return do_vcvt_rmode(s, a, RMODE, U); \
+ } \
+
+DO_VCVT_RMODE(VCVTAS, FPROUNDING_TIEAWAY, false)
+DO_VCVT_RMODE(VCVTAU, FPROUNDING_TIEAWAY, true)
+DO_VCVT_RMODE(VCVTNS, FPROUNDING_TIEEVEN, false)
+DO_VCVT_RMODE(VCVTNU, FPROUNDING_TIEEVEN, true)
+DO_VCVT_RMODE(VCVTPS, FPROUNDING_POSINF, false)
+DO_VCVT_RMODE(VCVTPU, FPROUNDING_POSINF, true)
+DO_VCVT_RMODE(VCVTMS, FPROUNDING_NEGINF, false)
+DO_VCVT_RMODE(VCVTMU, FPROUNDING_NEGINF, true)
+
+#define DO_VCVT_SH(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
+ { \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_1op(s, a, gen_helper_mve_##FN); \
+ } \
+
+DO_VCVT_SH(VCVTB_SH, vcvtb_sh)
+DO_VCVT_SH(VCVTT_SH, vcvtt_sh)
+DO_VCVT_SH(VCVTB_HS, vcvtb_hs)
+DO_VCVT_SH(VCVTT_HS, vcvtt_hs)
+
+#define DO_VRINT(INSN, RMODE) \
+ static void gen_##INSN##h(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \
+ { \
+ gen_helper_mve_vrint_rm_h(env, qd, qm, \
+ tcg_constant_i32(arm_rmode_to_sf(RMODE))); \
+ } \
+ static void gen_##INSN##s(TCGv_ptr env, TCGv_ptr qd, TCGv_ptr qm) \
+ { \
+ gen_helper_mve_vrint_rm_s(env, qd, qm, \
+ tcg_constant_i32(arm_rmode_to_sf(RMODE))); \
+ } \
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
+ { \
+ static MVEGenOneOpFn * const fns[] = { \
+ NULL, \
+ gen_##INSN##h, \
+ gen_##INSN##s, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_1op(s, a, fns[a->size]); \
+ }
+
+DO_VRINT(VRINTN, FPROUNDING_TIEEVEN)
+DO_VRINT(VRINTA, FPROUNDING_TIEAWAY)
+DO_VRINT(VRINTZ, FPROUNDING_ZERO)
+DO_VRINT(VRINTM, FPROUNDING_NEGINF)
+DO_VRINT(VRINTP, FPROUNDING_POSINF)
+
+static bool trans_VRINTX(DisasContext *s, arg_1op *a)
+{
+ static MVEGenOneOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vrintx_h,
+ gen_helper_mve_vrintx_s,
+ NULL,
+ };
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
+ return false;
+ }
+ return do_1op(s, a, fns[a->size]);
+}
+
+/* Narrowing moves: only size 0 and 1 are valid */
+#define DO_VMOVN(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_1op *a) \
+ { \
+ static MVEGenOneOpFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ NULL, \
+ NULL, \
+ }; \
+ return do_1op(s, a, fns[a->size]); \
+ }
+
+DO_VMOVN(VMOVNB, vmovnb)
+DO_VMOVN(VMOVNT, vmovnt)
+DO_VMOVN(VQMOVUNB, vqmovunb)
+DO_VMOVN(VQMOVUNT, vqmovunt)
+DO_VMOVN(VQMOVN_BS, vqmovnbs)
+DO_VMOVN(VQMOVN_TS, vqmovnts)
+DO_VMOVN(VQMOVN_BU, vqmovnbu)
+DO_VMOVN(VQMOVN_TU, vqmovntu)
+
+static bool trans_VREV16(DisasContext *s, arg_1op *a)
+{
+ static MVEGenOneOpFn * const fns[] = {
+ gen_helper_mve_vrev16b,
+ NULL,
+ NULL,
+ NULL,
+ };
+ return do_1op(s, a, fns[a->size]);
+}
+
+static bool trans_VREV32(DisasContext *s, arg_1op *a)
+{
+ static MVEGenOneOpFn * const fns[] = {
+ gen_helper_mve_vrev32b,
+ gen_helper_mve_vrev32h,
+ NULL,
+ NULL,
+ };
+ return do_1op(s, a, fns[a->size]);
+}
+
+static bool trans_VREV64(DisasContext *s, arg_1op *a)
+{
+ static MVEGenOneOpFn * const fns[] = {
+ gen_helper_mve_vrev64b,
+ gen_helper_mve_vrev64h,
+ gen_helper_mve_vrev64w,
+ NULL,
+ };
+ return do_1op(s, a, fns[a->size]);
+}
+
+static bool trans_VMVN(DisasContext *s, arg_1op *a)
+{
+ return do_1op_vec(s, a, gen_helper_mve_vmvn, tcg_gen_gvec_not);
+}
+
+static bool trans_VABS_fp(DisasContext *s, arg_1op *a)
+{
+ static MVEGenOneOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vfabsh,
+ gen_helper_mve_vfabss,
+ NULL,
+ };
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
+ return false;
+ }
+ return do_1op(s, a, fns[a->size]);
+}
+
+static bool trans_VNEG_fp(DisasContext *s, arg_1op *a)
+{
+ static MVEGenOneOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vfnegh,
+ gen_helper_mve_vfnegs,
+ NULL,
+ };
+ if (!dc_isar_feature(aa32_mve_fp, s)) {
+ return false;
+ }
+ return do_1op(s, a, fns[a->size]);
+}
+
+static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn,
+ GVecGen3Fn *vecfn)
+{
+ TCGv_ptr qd, qn, qm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qn | a->qm) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ if (vecfn && mve_no_predication(s)) {
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qn),
+ mve_qreg_offset(a->qm), 16, 16);
+ } else {
+ qd = mve_qreg_ptr(a->qd);
+ qn = mve_qreg_ptr(a->qn);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qd, qn, qm);
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_ptr(qm);
+ }
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn *fn)
+{
+ return do_2op_vec(s, a, fn, NULL);
+}
+
+#define DO_LOGIC(INSN, HELPER, VECFN) \
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
+ { \
+ return do_2op_vec(s, a, HELPER, VECFN); \
+ }
+
+DO_LOGIC(VAND, gen_helper_mve_vand, tcg_gen_gvec_and)
+DO_LOGIC(VBIC, gen_helper_mve_vbic, tcg_gen_gvec_andc)
+DO_LOGIC(VORR, gen_helper_mve_vorr, tcg_gen_gvec_or)
+DO_LOGIC(VORN, gen_helper_mve_vorn, tcg_gen_gvec_orc)
+DO_LOGIC(VEOR, gen_helper_mve_veor, tcg_gen_gvec_xor)
+
+static bool trans_VPSEL(DisasContext *s, arg_2op *a)
+{
+ /* This insn updates predication bits */
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ return do_2op(s, a, gen_helper_mve_vpsel);
+}
+
+#define DO_2OP_VEC(INSN, FN, VECFN) \
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
+ { \
+ static MVEGenTwoOpFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_2op_vec(s, a, fns[a->size], VECFN); \
+ }
+
+#define DO_2OP(INSN, FN) DO_2OP_VEC(INSN, FN, NULL)
+
+DO_2OP_VEC(VADD, vadd, tcg_gen_gvec_add)
+DO_2OP_VEC(VSUB, vsub, tcg_gen_gvec_sub)
+DO_2OP_VEC(VMUL, vmul, tcg_gen_gvec_mul)
+DO_2OP(VMULH_S, vmulhs)
+DO_2OP(VMULH_U, vmulhu)
+DO_2OP(VRMULH_S, vrmulhs)
+DO_2OP(VRMULH_U, vrmulhu)
+DO_2OP_VEC(VMAX_S, vmaxs, tcg_gen_gvec_smax)
+DO_2OP_VEC(VMAX_U, vmaxu, tcg_gen_gvec_umax)
+DO_2OP_VEC(VMIN_S, vmins, tcg_gen_gvec_smin)
+DO_2OP_VEC(VMIN_U, vminu, tcg_gen_gvec_umin)
+DO_2OP(VABD_S, vabds)
+DO_2OP(VABD_U, vabdu)
+DO_2OP(VHADD_S, vhadds)
+DO_2OP(VHADD_U, vhaddu)
+DO_2OP(VHSUB_S, vhsubs)
+DO_2OP(VHSUB_U, vhsubu)
+DO_2OP(VMULL_BS, vmullbs)
+DO_2OP(VMULL_BU, vmullbu)
+DO_2OP(VMULL_TS, vmullts)
+DO_2OP(VMULL_TU, vmulltu)
+DO_2OP(VQDMULH, vqdmulh)
+DO_2OP(VQRDMULH, vqrdmulh)
+DO_2OP(VQADD_S, vqadds)
+DO_2OP(VQADD_U, vqaddu)
+DO_2OP(VQSUB_S, vqsubs)
+DO_2OP(VQSUB_U, vqsubu)
+DO_2OP(VSHL_S, vshls)
+DO_2OP(VSHL_U, vshlu)
+DO_2OP(VRSHL_S, vrshls)
+DO_2OP(VRSHL_U, vrshlu)
+DO_2OP(VQSHL_S, vqshls)
+DO_2OP(VQSHL_U, vqshlu)
+DO_2OP(VQRSHL_S, vqrshls)
+DO_2OP(VQRSHL_U, vqrshlu)
+DO_2OP(VQDMLADH, vqdmladh)
+DO_2OP(VQDMLADHX, vqdmladhx)
+DO_2OP(VQRDMLADH, vqrdmladh)
+DO_2OP(VQRDMLADHX, vqrdmladhx)
+DO_2OP(VQDMLSDH, vqdmlsdh)
+DO_2OP(VQDMLSDHX, vqdmlsdhx)
+DO_2OP(VQRDMLSDH, vqrdmlsdh)
+DO_2OP(VQRDMLSDHX, vqrdmlsdhx)
+DO_2OP(VRHADD_S, vrhadds)
+DO_2OP(VRHADD_U, vrhaddu)
+/*
+ * VCADD Qd == Qm at size MO_32 is UNPREDICTABLE; we choose not to diagnose
+ * so we can reuse the DO_2OP macro. (Our implementation calculates the
+ * "expected" results in this case.) Similarly for VHCADD.
+ */
+DO_2OP(VCADD90, vcadd90)
+DO_2OP(VCADD270, vcadd270)
+DO_2OP(VHCADD90, vhcadd90)
+DO_2OP(VHCADD270, vhcadd270)
+
+static bool trans_VQDMULLB(DisasContext *s, arg_2op *a)
+{
+ static MVEGenTwoOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vqdmullbh,
+ gen_helper_mve_vqdmullbw,
+ NULL,
+ };
+ if (a->size == MO_32 && (a->qd == a->qm || a->qd == a->qn)) {
+ /* UNPREDICTABLE; we choose to undef */
+ return false;
+ }
+ return do_2op(s, a, fns[a->size]);
+}
+
+static bool trans_VQDMULLT(DisasContext *s, arg_2op *a)
+{
+ static MVEGenTwoOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vqdmullth,
+ gen_helper_mve_vqdmulltw,
+ NULL,
+ };
+ if (a->size == MO_32 && (a->qd == a->qm || a->qd == a->qn)) {
+ /* UNPREDICTABLE; we choose to undef */
+ return false;
+ }
+ return do_2op(s, a, fns[a->size]);
+}
+
+static bool trans_VMULLP_B(DisasContext *s, arg_2op *a)
+{
+ /*
+ * Note that a->size indicates the output size, ie VMULL.P8
+ * is the 8x8->16 operation and a->size is MO_16; VMULL.P16
+ * is the 16x16->32 operation and a->size is MO_32.
+ */
+ static MVEGenTwoOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vmullpbh,
+ gen_helper_mve_vmullpbw,
+ NULL,
+ };
+ return do_2op(s, a, fns[a->size]);
+}
+
+static bool trans_VMULLP_T(DisasContext *s, arg_2op *a)
+{
+ /* a->size is as for trans_VMULLP_B */
+ static MVEGenTwoOpFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vmullpth,
+ gen_helper_mve_vmullptw,
+ NULL,
+ };
+ return do_2op(s, a, fns[a->size]);
+}
+
+/*
+ * VADC and VSBC: these perform an add-with-carry or subtract-with-carry
+ * of the 32-bit elements in each lane of the input vectors, where the
+ * carry-out of each add is the carry-in of the next. The initial carry
+ * input is either fixed (0 for VADCI, 1 for VSBCI) or is from FPSCR.C
+ * (for VADC and VSBC); the carry out at the end is written back to FPSCR.C.
+ * These insns are subject to beat-wise execution. Partial execution
+ * of an I=1 (initial carry input fixed) insn which does not
+ * execute the first beat must start with the current FPSCR.NZCV
+ * value, not the fixed constant input.
+ */
+static bool trans_VADC(DisasContext *s, arg_2op *a)
+{
+ return do_2op(s, a, gen_helper_mve_vadc);
+}
+
+static bool trans_VADCI(DisasContext *s, arg_2op *a)
+{
+ if (mve_skip_first_beat(s)) {
+ return trans_VADC(s, a);
+ }
+ return do_2op(s, a, gen_helper_mve_vadci);
+}
+
+static bool trans_VSBC(DisasContext *s, arg_2op *a)
+{
+ return do_2op(s, a, gen_helper_mve_vsbc);
+}
+
+static bool trans_VSBCI(DisasContext *s, arg_2op *a)
+{
+ if (mve_skip_first_beat(s)) {
+ return trans_VSBC(s, a);
+ }
+ return do_2op(s, a, gen_helper_mve_vsbci);
+}
+
+#define DO_2OP_FP(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2op *a) \
+ { \
+ static MVEGenTwoOpFn * const fns[] = { \
+ NULL, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##s, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_2op(s, a, fns[a->size]); \
+ }
+
+DO_2OP_FP(VADD_fp, vfadd)
+DO_2OP_FP(VSUB_fp, vfsub)
+DO_2OP_FP(VMUL_fp, vfmul)
+DO_2OP_FP(VABD_fp, vfabd)
+DO_2OP_FP(VMAXNM, vmaxnm)
+DO_2OP_FP(VMINNM, vminnm)
+DO_2OP_FP(VCADD90_fp, vfcadd90)
+DO_2OP_FP(VCADD270_fp, vfcadd270)
+DO_2OP_FP(VFMA, vfma)
+DO_2OP_FP(VFMS, vfms)
+DO_2OP_FP(VCMUL0, vcmul0)
+DO_2OP_FP(VCMUL90, vcmul90)
+DO_2OP_FP(VCMUL180, vcmul180)
+DO_2OP_FP(VCMUL270, vcmul270)
+DO_2OP_FP(VCMLA0, vcmla0)
+DO_2OP_FP(VCMLA90, vcmla90)
+DO_2OP_FP(VCMLA180, vcmla180)
+DO_2OP_FP(VCMLA270, vcmla270)
+DO_2OP_FP(VMAXNMA, vmaxnma)
+DO_2OP_FP(VMINNMA, vminnma)
+
+static bool do_2op_scalar(DisasContext *s, arg_2scalar *a,
+ MVEGenTwoOpScalarFn fn)
+{
+ TCGv_ptr qd, qn;
+ TCGv_i32 rm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qn) ||
+ !fn) {
+ return false;
+ }
+ if (a->rm == 13 || a->rm == 15) {
+ /* UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ qn = mve_qreg_ptr(a->qn);
+ rm = load_reg(s, a->rm);
+ fn(cpu_env, qd, qn, rm);
+ tcg_temp_free_i32(rm);
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qn);
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_2OP_SCALAR(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2scalar *a) \
+ { \
+ static MVEGenTwoOpScalarFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_2op_scalar(s, a, fns[a->size]); \
+ }
+
+DO_2OP_SCALAR(VADD_scalar, vadd_scalar)
+DO_2OP_SCALAR(VSUB_scalar, vsub_scalar)
+DO_2OP_SCALAR(VMUL_scalar, vmul_scalar)
+DO_2OP_SCALAR(VHADD_S_scalar, vhadds_scalar)
+DO_2OP_SCALAR(VHADD_U_scalar, vhaddu_scalar)
+DO_2OP_SCALAR(VHSUB_S_scalar, vhsubs_scalar)
+DO_2OP_SCALAR(VHSUB_U_scalar, vhsubu_scalar)
+DO_2OP_SCALAR(VQADD_S_scalar, vqadds_scalar)
+DO_2OP_SCALAR(VQADD_U_scalar, vqaddu_scalar)
+DO_2OP_SCALAR(VQSUB_S_scalar, vqsubs_scalar)
+DO_2OP_SCALAR(VQSUB_U_scalar, vqsubu_scalar)
+DO_2OP_SCALAR(VQDMULH_scalar, vqdmulh_scalar)
+DO_2OP_SCALAR(VQRDMULH_scalar, vqrdmulh_scalar)
+DO_2OP_SCALAR(VBRSR, vbrsr)
+DO_2OP_SCALAR(VMLA, vmla)
+DO_2OP_SCALAR(VMLAS, vmlas)
+DO_2OP_SCALAR(VQDMLAH, vqdmlah)
+DO_2OP_SCALAR(VQRDMLAH, vqrdmlah)
+DO_2OP_SCALAR(VQDMLASH, vqdmlash)
+DO_2OP_SCALAR(VQRDMLASH, vqrdmlash)
+
+static bool trans_VQDMULLB_scalar(DisasContext *s, arg_2scalar *a)
+{
+ static MVEGenTwoOpScalarFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vqdmullb_scalarh,
+ gen_helper_mve_vqdmullb_scalarw,
+ NULL,
+ };
+ if (a->qd == a->qn && a->size == MO_32) {
+ /* UNPREDICTABLE; we choose to undef */
+ return false;
+ }
+ return do_2op_scalar(s, a, fns[a->size]);
+}
+
+static bool trans_VQDMULLT_scalar(DisasContext *s, arg_2scalar *a)
+{
+ static MVEGenTwoOpScalarFn * const fns[] = {
+ NULL,
+ gen_helper_mve_vqdmullt_scalarh,
+ gen_helper_mve_vqdmullt_scalarw,
+ NULL,
+ };
+ if (a->qd == a->qn && a->size == MO_32) {
+ /* UNPREDICTABLE; we choose to undef */
+ return false;
+ }
+ return do_2op_scalar(s, a, fns[a->size]);
+}
+
+
+#define DO_2OP_FP_SCALAR(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2scalar *a) \
+ { \
+ static MVEGenTwoOpScalarFn * const fns[] = { \
+ NULL, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##s, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_2op_scalar(s, a, fns[a->size]); \
+ }
+
+DO_2OP_FP_SCALAR(VADD_fp_scalar, vfadd_scalar)
+DO_2OP_FP_SCALAR(VSUB_fp_scalar, vfsub_scalar)
+DO_2OP_FP_SCALAR(VMUL_fp_scalar, vfmul_scalar)
+DO_2OP_FP_SCALAR(VFMA_scalar, vfma_scalar)
+DO_2OP_FP_SCALAR(VFMAS_scalar, vfmas_scalar)
+
+static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a,
+ MVEGenLongDualAccOpFn *fn)
+{
+ TCGv_ptr qn, qm;
+ TCGv_i64 rda;
+ TCGv_i32 rdalo, rdahi;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qn | a->qm) ||
+ !fn) {
+ return false;
+ }
+ /*
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
+ */
+ if (a->rdahi == 13 || a->rdahi == 15) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qn = mve_qreg_ptr(a->qn);
+ qm = mve_qreg_ptr(a->qm);
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an A=0 (no-accumulate) insn which does not execute the first
+ * beat must start with the current rda value, not 0.
+ */
+ if (a->a || mve_skip_first_beat(s)) {
+ rda = tcg_temp_new_i64();
+ rdalo = load_reg(s, a->rdalo);
+ rdahi = load_reg(s, a->rdahi);
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+ tcg_temp_free_i32(rdalo);
+ tcg_temp_free_i32(rdahi);
+ } else {
+ rda = tcg_const_i64(0);
+ }
+
+ fn(rda, cpu_env, qn, qm, rda);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_ptr(qm);
+
+ rdalo = tcg_temp_new_i32();
+ rdahi = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(rdalo, rda);
+ tcg_gen_extrh_i64_i32(rdahi, rda);
+ store_reg(s, a->rdalo, rdalo);
+ store_reg(s, a->rdahi, rdahi);
+ tcg_temp_free_i64(rda);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VMLALDAV_S(DisasContext *s, arg_vmlaldav *a)
+{
+ static MVEGenLongDualAccOpFn * const fns[4][2] = {
+ { NULL, NULL },
+ { gen_helper_mve_vmlaldavsh, gen_helper_mve_vmlaldavxsh },
+ { gen_helper_mve_vmlaldavsw, gen_helper_mve_vmlaldavxsw },
+ { NULL, NULL },
+ };
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
+}
+
+static bool trans_VMLALDAV_U(DisasContext *s, arg_vmlaldav *a)
+{
+ static MVEGenLongDualAccOpFn * const fns[4][2] = {
+ { NULL, NULL },
+ { gen_helper_mve_vmlaldavuh, NULL },
+ { gen_helper_mve_vmlaldavuw, NULL },
+ { NULL, NULL },
+ };
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
+}
+
+static bool trans_VMLSLDAV(DisasContext *s, arg_vmlaldav *a)
+{
+ static MVEGenLongDualAccOpFn * const fns[4][2] = {
+ { NULL, NULL },
+ { gen_helper_mve_vmlsldavsh, gen_helper_mve_vmlsldavxsh },
+ { gen_helper_mve_vmlsldavsw, gen_helper_mve_vmlsldavxsw },
+ { NULL, NULL },
+ };
+ return do_long_dual_acc(s, a, fns[a->size][a->x]);
+}
+
+static bool trans_VRMLALDAVH_S(DisasContext *s, arg_vmlaldav *a)
+{
+ static MVEGenLongDualAccOpFn * const fns[] = {
+ gen_helper_mve_vrmlaldavhsw, gen_helper_mve_vrmlaldavhxsw,
+ };
+ return do_long_dual_acc(s, a, fns[a->x]);
+}
+
+static bool trans_VRMLALDAVH_U(DisasContext *s, arg_vmlaldav *a)
+{
+ static MVEGenLongDualAccOpFn * const fns[] = {
+ gen_helper_mve_vrmlaldavhuw, NULL,
+ };
+ return do_long_dual_acc(s, a, fns[a->x]);
+}
+
+static bool trans_VRMLSLDAVH(DisasContext *s, arg_vmlaldav *a)
+{
+ static MVEGenLongDualAccOpFn * const fns[] = {
+ gen_helper_mve_vrmlsldavhsw, gen_helper_mve_vrmlsldavhxsw,
+ };
+ return do_long_dual_acc(s, a, fns[a->x]);
+}
+
+static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn)
+{
+ TCGv_ptr qn, qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qn) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qn = mve_qreg_ptr(a->qn);
+ qm = mve_qreg_ptr(a->qm);
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an A=0 (no-accumulate) insn which does not execute the first
+ * beat must start with the current rda value, not 0.
+ */
+ if (a->a || mve_skip_first_beat(s)) {
+ rda = load_reg(s, a->rda);
+ } else {
+ rda = tcg_const_i32(0);
+ }
+
+ fn(rda, cpu_env, qn, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_ptr(qm);
+
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_DUAL_ACC(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vmladav *a) \
+ { \
+ static MVEGenDualAccOpFn * const fns[4][2] = { \
+ { gen_helper_mve_##FN##b, gen_helper_mve_##FN##xb }, \
+ { gen_helper_mve_##FN##h, gen_helper_mve_##FN##xh }, \
+ { gen_helper_mve_##FN##w, gen_helper_mve_##FN##xw }, \
+ { NULL, NULL }, \
+ }; \
+ return do_dual_acc(s, a, fns[a->size][a->x]); \
+ }
+
+DO_DUAL_ACC(VMLADAV_S, vmladavs)
+DO_DUAL_ACC(VMLSDAV, vmlsdav)
+
+static bool trans_VMLADAV_U(DisasContext *s, arg_vmladav *a)
+{
+ static MVEGenDualAccOpFn * const fns[4][2] = {
+ { gen_helper_mve_vmladavub, NULL },
+ { gen_helper_mve_vmladavuh, NULL },
+ { gen_helper_mve_vmladavuw, NULL },
+ { NULL, NULL },
+ };
+ return do_dual_acc(s, a, fns[a->size][a->x]);
+}
+
+static void gen_vpst(DisasContext *s, uint32_t mask)
+{
+ /*
+ * Set the VPR mask fields. We take advantage of MASK01 and MASK23
+ * being adjacent fields in the register.
+ *
+ * Updating the masks is not predicated, but it is subject to beat-wise
+ * execution, and the mask is updated on the odd-numbered beats.
+ * So if PSR.ECI says we should skip beat 1, we mustn't update the
+ * 01 mask field.
+ */
+ TCGv_i32 vpr = load_cpu_field(v7m.vpr);
+ switch (s->eci) {
+ case ECI_NONE:
+ case ECI_A0:
+ /* Update both 01 and 23 fields */
+ tcg_gen_deposit_i32(vpr, vpr,
+ tcg_constant_i32(mask | (mask << 4)),
+ R_V7M_VPR_MASK01_SHIFT,
+ R_V7M_VPR_MASK01_LENGTH + R_V7M_VPR_MASK23_LENGTH);
+ break;
+ case ECI_A0A1:
+ case ECI_A0A1A2:
+ case ECI_A0A1A2B0:
+ /* Update only the 23 mask field */
+ tcg_gen_deposit_i32(vpr, vpr,
+ tcg_constant_i32(mask),
+ R_V7M_VPR_MASK23_SHIFT, R_V7M_VPR_MASK23_LENGTH);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ store_cpu_field(vpr, v7m.vpr);
+}
+
+static bool trans_VPST(DisasContext *s, arg_VPST *a)
+{
+ /* mask == 0 is a "related encoding" */
+ if (!dc_isar_feature(aa32_mve, s) || !a->mask) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+ gen_vpst(s, a->mask);
+ mve_update_and_store_eci(s);
+ return true;
+}
+
+static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a)
+{
+ /*
+ * Invert the predicate in VPR.P0. We have call out to
+ * a helper because this insn itself is beatwise and can
+ * be predicated.
+ */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ gen_helper_mve_vpnot(cpu_env);
+ /* This insn updates predication bits */
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
+{
+ /* VADDV: vector add across vector */
+ static MVEGenVADDVFn * const fns[4][2] = {
+ { gen_helper_mve_vaddvsb, gen_helper_mve_vaddvub },
+ { gen_helper_mve_vaddvsh, gen_helper_mve_vaddvuh },
+ { gen_helper_mve_vaddvsw, gen_helper_mve_vaddvuw },
+ { NULL, NULL }
+ };
+ TCGv_ptr qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ a->size == 3) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an A=0 (no-accumulate) insn which does not execute the first
+ * beat must start with the current value of Rda, not zero.
+ */
+ if (a->a || mve_skip_first_beat(s)) {
+ /* Accumulate input from Rda */
+ rda = load_reg(s, a->rda);
+ } else {
+ /* Accumulate starting at zero */
+ rda = tcg_const_i32(0);
+ }
+
+ qm = mve_qreg_ptr(a->qm);
+ fns[a->size][a->u](rda, cpu_env, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qm);
+
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a)
+{
+ /*
+ * Vector Add Long Across Vector: accumulate the 32-bit
+ * elements of the vector into a 64-bit result stored in
+ * a pair of general-purpose registers.
+ * No need to check Qm's bank: it is only 3 bits in decode.
+ */
+ TCGv_ptr qm;
+ TCGv_i64 rda;
+ TCGv_i32 rdalo, rdahi;
+
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+ /*
+ * rdahi == 13 is UNPREDICTABLE; rdahi == 15 is a related
+ * encoding; rdalo always has bit 0 clear so cannot be 13 or 15.
+ */
+ if (a->rdahi == 13 || a->rdahi == 15) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an A=0 (no-accumulate) insn which does not execute the first
+ * beat must start with the current value of RdaHi:RdaLo, not zero.
+ */
+ if (a->a || mve_skip_first_beat(s)) {
+ /* Accumulate input from RdaHi:RdaLo */
+ rda = tcg_temp_new_i64();
+ rdalo = load_reg(s, a->rdalo);
+ rdahi = load_reg(s, a->rdahi);
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+ tcg_temp_free_i32(rdalo);
+ tcg_temp_free_i32(rdahi);
+ } else {
+ /* Accumulate starting at zero */
+ rda = tcg_const_i64(0);
+ }
+
+ qm = mve_qreg_ptr(a->qm);
+ if (a->u) {
+ gen_helper_mve_vaddlv_u(rda, cpu_env, qm, rda);
+ } else {
+ gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda);
+ }
+ tcg_temp_free_ptr(qm);
+
+ rdalo = tcg_temp_new_i32();
+ rdahi = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(rdalo, rda);
+ tcg_gen_extrh_i64_i32(rdahi, rda);
+ store_reg(s, a->rdalo, rdalo);
+ store_reg(s, a->rdahi, rdahi);
+ tcg_temp_free_i64(rda);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn,
+ GVecGen2iFn *vecfn)
+{
+ TCGv_ptr qd;
+ uint64_t imm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
+
+ if (vecfn && mve_no_predication(s)) {
+ vecfn(MO_64, mve_qreg_offset(a->qd), mve_qreg_offset(a->qd),
+ imm, 16, 16);
+ } else {
+ qd = mve_qreg_ptr(a->qd);
+ fn(cpu_env, qd, tcg_constant_i64(imm));
+ tcg_temp_free_ptr(qd);
+ }
+ mve_update_eci(s);
+ return true;
+}
+
+static void gen_gvec_vmovi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, c);
+}
+
+static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a)
+{
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
+ MVEGenOneOpImmFn *fn;
+ GVecGen2iFn *vecfn;
+
+ if ((a->cmode & 1) && a->cmode < 12) {
+ if (a->op) {
+ /*
+ * For op=1, the immediate will be inverted by asimd_imm_const(),
+ * so the VBIC becomes a logical AND operation.
+ */
+ fn = gen_helper_mve_vandi;
+ vecfn = tcg_gen_gvec_andi;
+ } else {
+ fn = gen_helper_mve_vorri;
+ vecfn = tcg_gen_gvec_ori;
+ }
+ } else {
+ /* There is one unallocated cmode/op combination in this space */
+ if (a->cmode == 15 && a->op == 1) {
+ return false;
+ }
+ /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */
+ fn = gen_helper_mve_vmovi;
+ vecfn = gen_gvec_vmovi;
+ }
+ return do_1imm(s, a, fn, vecfn);
+}
+
+static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
+ bool negateshift, GVecGen2iFn vecfn)
+{
+ TCGv_ptr qd, qm;
+ int shift = a->shift;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qd | a->qm) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * When we handle a right shift insn using a left-shift helper
+ * which permits a negative shift count to indicate a right-shift,
+ * we must negate the shift count.
+ */
+ if (negateshift) {
+ shift = -shift;
+ }
+
+ if (vecfn && mve_no_predication(s)) {
+ vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm),
+ shift, 16, 16);
+ } else {
+ qd = mve_qreg_ptr(a->qd);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qd, qm, tcg_constant_i32(shift));
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qm);
+ }
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn,
+ bool negateshift)
+{
+ return do_2shift_vec(s, a, fn, negateshift, NULL);
+}
+
+#define DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, VECFN) \
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
+ { \
+ static MVEGenTwoOpShiftFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_2shift_vec(s, a, fns[a->size], NEGATESHIFT, VECFN); \
+ }
+
+#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \
+ DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, NULL)
+
+static void do_gvec_shri_s(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ /*
+ * We get here with a negated shift count, and we must handle
+ * shifts by the element size, which tcg_gen_gvec_sari() does not do.
+ */
+ shift = -shift;
+ if (shift == (8 << vece)) {
+ shift--;
+ }
+ tcg_gen_gvec_sari(vece, dofs, aofs, shift, oprsz, maxsz);
+}
+
+static void do_gvec_shri_u(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ /*
+ * We get here with a negated shift count, and we must handle
+ * shifts by the element size, which tcg_gen_gvec_shri() does not do.
+ */
+ shift = -shift;
+ if (shift == (8 << vece)) {
+ tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, 0);
+ } else {
+ tcg_gen_gvec_shri(vece, dofs, aofs, shift, oprsz, maxsz);
+ }
+}
+
+DO_2SHIFT_VEC(VSHLI, vshli_u, false, tcg_gen_gvec_shli)
+DO_2SHIFT(VQSHLI_S, vqshli_s, false)
+DO_2SHIFT(VQSHLI_U, vqshli_u, false)
+DO_2SHIFT(VQSHLUI, vqshlui_s, false)
+/* These right shifts use a left-shift helper with negated shift count */
+DO_2SHIFT_VEC(VSHRI_S, vshli_s, true, do_gvec_shri_s)
+DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u)
+DO_2SHIFT(VRSHRI_S, vrshli_s, true)
+DO_2SHIFT(VRSHRI_U, vrshli_u, true)
+
+DO_2SHIFT_VEC(VSRI, vsri, false, gen_gvec_sri)
+DO_2SHIFT_VEC(VSLI, vsli, false, gen_gvec_sli)
+
+#define DO_2SHIFT_FP(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
+ { \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_2shift(s, a, gen_helper_mve_##FN, false); \
+ }
+
+DO_2SHIFT_FP(VCVT_SH_fixed, vcvt_sh)
+DO_2SHIFT_FP(VCVT_UH_fixed, vcvt_uh)
+DO_2SHIFT_FP(VCVT_HS_fixed, vcvt_hs)
+DO_2SHIFT_FP(VCVT_HU_fixed, vcvt_hu)
+DO_2SHIFT_FP(VCVT_SF_fixed, vcvt_sf)
+DO_2SHIFT_FP(VCVT_UF_fixed, vcvt_uf)
+DO_2SHIFT_FP(VCVT_FS_fixed, vcvt_fs)
+DO_2SHIFT_FP(VCVT_FU_fixed, vcvt_fu)
+
+static bool do_2shift_scalar(DisasContext *s, arg_shl_scalar *a,
+ MVEGenTwoOpShiftFn *fn)
+{
+ TCGv_ptr qda;
+ TCGv_i32 rm;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qda) ||
+ a->rm == 13 || a->rm == 15 || !fn) {
+ /* Rm cases are UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qda = mve_qreg_ptr(a->qda);
+ rm = load_reg(s, a->rm);
+ fn(cpu_env, qda, qda, rm);
+ tcg_temp_free_ptr(qda);
+ tcg_temp_free_i32(rm);
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_2SHIFT_SCALAR(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_shl_scalar *a) \
+ { \
+ static MVEGenTwoOpShiftFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_2shift_scalar(s, a, fns[a->size]); \
+ }
+
+DO_2SHIFT_SCALAR(VSHL_S_scalar, vshli_s)
+DO_2SHIFT_SCALAR(VSHL_U_scalar, vshli_u)
+DO_2SHIFT_SCALAR(VRSHL_S_scalar, vrshli_s)
+DO_2SHIFT_SCALAR(VRSHL_U_scalar, vrshli_u)
+DO_2SHIFT_SCALAR(VQSHL_S_scalar, vqshli_s)
+DO_2SHIFT_SCALAR(VQSHL_U_scalar, vqshli_u)
+DO_2SHIFT_SCALAR(VQRSHL_S_scalar, vqrshli_s)
+DO_2SHIFT_SCALAR(VQRSHL_U_scalar, vqrshli_u)
+
+#define DO_VSHLL(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
+ { \
+ static MVEGenTwoOpShiftFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ }; \
+ return do_2shift_vec(s, a, fns[a->size], false, do_gvec_##FN); \
+ }
+
+/*
+ * For the VSHLL vector helpers, the vece is the size of the input
+ * (ie MO_8 or MO_16); the helpers want to work in the output size.
+ * The shift count can be 0..<input size>, inclusive. (0 is VMOVL.)
+ */
+static void do_gvec_vshllbs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ unsigned ovece = vece + 1;
+ unsigned ibits = vece == MO_8 ? 8 : 16;
+ tcg_gen_gvec_shli(ovece, dofs, aofs, ibits, oprsz, maxsz);
+ tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
+}
+
+static void do_gvec_vshllbu(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ unsigned ovece = vece + 1;
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
+ ovece == MO_16 ? 0xff : 0xffff, oprsz, maxsz);
+ tcg_gen_gvec_shli(ovece, dofs, dofs, shift, oprsz, maxsz);
+}
+
+static void do_gvec_vshllts(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ unsigned ovece = vece + 1;
+ unsigned ibits = vece == MO_8 ? 8 : 16;
+ if (shift == 0) {
+ tcg_gen_gvec_sari(ovece, dofs, aofs, ibits, oprsz, maxsz);
+ } else {
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
+ ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz);
+ tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
+ }
+}
+
+static void do_gvec_vshlltu(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ unsigned ovece = vece + 1;
+ unsigned ibits = vece == MO_8 ? 8 : 16;
+ if (shift == 0) {
+ tcg_gen_gvec_shri(ovece, dofs, aofs, ibits, oprsz, maxsz);
+ } else {
+ tcg_gen_gvec_andi(ovece, dofs, aofs,
+ ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz);
+ tcg_gen_gvec_shri(ovece, dofs, dofs, ibits - shift, oprsz, maxsz);
+ }
+}
+
+DO_VSHLL(VSHLL_BS, vshllbs)
+DO_VSHLL(VSHLL_BU, vshllbu)
+DO_VSHLL(VSHLL_TS, vshllts)
+DO_VSHLL(VSHLL_TU, vshlltu)
+
+#define DO_2SHIFT_N(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
+ { \
+ static MVEGenTwoOpShiftFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ }; \
+ return do_2shift(s, a, fns[a->size], false); \
+ }
+
+DO_2SHIFT_N(VSHRNB, vshrnb)
+DO_2SHIFT_N(VSHRNT, vshrnt)
+DO_2SHIFT_N(VRSHRNB, vrshrnb)
+DO_2SHIFT_N(VRSHRNT, vrshrnt)
+DO_2SHIFT_N(VQSHRNB_S, vqshrnb_s)
+DO_2SHIFT_N(VQSHRNT_S, vqshrnt_s)
+DO_2SHIFT_N(VQSHRNB_U, vqshrnb_u)
+DO_2SHIFT_N(VQSHRNT_U, vqshrnt_u)
+DO_2SHIFT_N(VQSHRUNB, vqshrunb)
+DO_2SHIFT_N(VQSHRUNT, vqshrunt)
+DO_2SHIFT_N(VQRSHRNB_S, vqrshrnb_s)
+DO_2SHIFT_N(VQRSHRNT_S, vqrshrnt_s)
+DO_2SHIFT_N(VQRSHRNB_U, vqrshrnb_u)
+DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
+DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
+DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
+
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
+{
+ /*
+ * Whole Vector Left Shift with Carry. The carry is taken
+ * from a general purpose register and written back there.
+ * An imm of 0 means "shift by 32".
+ */
+ TCGv_ptr qd;
+ TCGv_i32 rdm;
+
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
+ return false;
+ }
+ if (a->rdm == 13 || a->rdm == 15) {
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ rdm = load_reg(s, a->rdm);
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
+ store_reg(s, a->rdm, rdm);
+ tcg_temp_free_ptr(qd);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn)
+{
+ TCGv_ptr qd;
+ TCGv_i32 rn;
+
+ /*
+ * Vector increment/decrement with wrap and duplicate (VIDUP, VDDUP).
+ * This fills the vector with elements of successively increasing
+ * or decreasing values, starting from Rn.
+ */
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
+ return false;
+ }
+ if (a->size == MO_64) {
+ /* size 0b11 is another encoding */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ rn = load_reg(s, a->rn);
+ fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm));
+ store_reg(s, a->rn, rn);
+ tcg_temp_free_ptr(qd);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn)
+{
+ TCGv_ptr qd;
+ TCGv_i32 rn, rm;
+
+ /*
+ * Vector increment/decrement with wrap and duplicate (VIWDUp, VDWDUP)
+ * This fills the vector with elements of successively increasing
+ * or decreasing values, starting from Rn. Rm specifies a point where
+ * the count wraps back around to 0. The updated offset is written back
+ * to Rn.
+ */
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
+ return false;
+ }
+ if (!fn || a->rm == 13 || a->rm == 15) {
+ /*
+ * size 0b11 is another encoding; Rm == 13 is UNPREDICTABLE;
+ * Rm == 13 is VIWDUP, VDWDUP.
+ */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ rn = load_reg(s, a->rn);
+ rm = load_reg(s, a->rm);
+ fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm));
+ store_reg(s, a->rn, rn);
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_i32(rm);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VIDUP(DisasContext *s, arg_vidup *a)
+{
+ static MVEGenVIDUPFn * const fns[] = {
+ gen_helper_mve_vidupb,
+ gen_helper_mve_viduph,
+ gen_helper_mve_vidupw,
+ NULL,
+ };
+ return do_vidup(s, a, fns[a->size]);
+}
+
+static bool trans_VDDUP(DisasContext *s, arg_vidup *a)
+{
+ static MVEGenVIDUPFn * const fns[] = {
+ gen_helper_mve_vidupb,
+ gen_helper_mve_viduph,
+ gen_helper_mve_vidupw,
+ NULL,
+ };
+ /* VDDUP is just like VIDUP but with a negative immediate */
+ a->imm = -a->imm;
+ return do_vidup(s, a, fns[a->size]);
+}
+
+static bool trans_VIWDUP(DisasContext *s, arg_viwdup *a)
+{
+ static MVEGenVIWDUPFn * const fns[] = {
+ gen_helper_mve_viwdupb,
+ gen_helper_mve_viwduph,
+ gen_helper_mve_viwdupw,
+ NULL,
+ };
+ return do_viwdup(s, a, fns[a->size]);
+}
+
+static bool trans_VDWDUP(DisasContext *s, arg_viwdup *a)
+{
+ static MVEGenVIWDUPFn * const fns[] = {
+ gen_helper_mve_vdwdupb,
+ gen_helper_mve_vdwduph,
+ gen_helper_mve_vdwdupw,
+ NULL,
+ };
+ return do_viwdup(s, a, fns[a->size]);
+}
+
+static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn)
+{
+ TCGv_ptr qn, qm;
+
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qm) ||
+ !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qn = mve_qreg_ptr(a->qn);
+ qm = mve_qreg_ptr(a->qm);
+ fn(cpu_env, qn, qm);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_ptr(qm);
+ if (a->mask) {
+ /* VPT */
+ gen_vpst(s, a->mask);
+ }
+ /* This insn updates predication bits */
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ mve_update_eci(s);
+ return true;
+}
+
+static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a,
+ MVEGenScalarCmpFn *fn)
+{
+ TCGv_ptr qn;
+ TCGv_i32 rm;
+
+ if (!dc_isar_feature(aa32_mve, s) || !fn || a->rm == 13) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qn = mve_qreg_ptr(a->qn);
+ if (a->rm == 15) {
+ /* Encoding Rm=0b1111 means "constant zero" */
+ rm = tcg_constant_i32(0);
+ } else {
+ rm = load_reg(s, a->rm);
+ }
+ fn(cpu_env, qn, rm);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_i32(rm);
+ if (a->mask) {
+ /* VPT */
+ gen_vpst(s, a->mask);
+ }
+ /* This insn updates predication bits */
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_VCMP(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vcmp *a) \
+ { \
+ static MVEGenCmpFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_vcmp(s, a, fns[a->size]); \
+ } \
+ static bool trans_##INSN##_scalar(DisasContext *s, \
+ arg_vcmp_scalar *a) \
+ { \
+ static MVEGenScalarCmpFn * const fns[] = { \
+ gen_helper_mve_##FN##_scalarb, \
+ gen_helper_mve_##FN##_scalarh, \
+ gen_helper_mve_##FN##_scalarw, \
+ NULL, \
+ }; \
+ return do_vcmp_scalar(s, a, fns[a->size]); \
+ }
+
+DO_VCMP(VCMPEQ, vcmpeq)
+DO_VCMP(VCMPNE, vcmpne)
+DO_VCMP(VCMPCS, vcmpcs)
+DO_VCMP(VCMPHI, vcmphi)
+DO_VCMP(VCMPGE, vcmpge)
+DO_VCMP(VCMPLT, vcmplt)
+DO_VCMP(VCMPGT, vcmpgt)
+DO_VCMP(VCMPLE, vcmple)
+
+#define DO_VCMP_FP(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vcmp *a) \
+ { \
+ static MVEGenCmpFn * const fns[] = { \
+ NULL, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##s, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_vcmp(s, a, fns[a->size]); \
+ } \
+ static bool trans_##INSN##_scalar(DisasContext *s, \
+ arg_vcmp_scalar *a) \
+ { \
+ static MVEGenScalarCmpFn * const fns[] = { \
+ NULL, \
+ gen_helper_mve_##FN##_scalarh, \
+ gen_helper_mve_##FN##_scalars, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_vcmp_scalar(s, a, fns[a->size]); \
+ }
+
+DO_VCMP_FP(VCMPEQ_fp, vfcmpeq)
+DO_VCMP_FP(VCMPNE_fp, vfcmpne)
+DO_VCMP_FP(VCMPGE_fp, vfcmpge)
+DO_VCMP_FP(VCMPLT_fp, vfcmplt)
+DO_VCMP_FP(VCMPGT_fp, vfcmpgt)
+DO_VCMP_FP(VCMPLE_fp, vfcmple)
+
+static bool do_vmaxv(DisasContext *s, arg_vmaxv *a, MVEGenVADDVFn fn)
+{
+ /*
+ * MIN/MAX operations across a vector: compute the min or
+ * max of the initial value in a general purpose register
+ * and all the elements in the vector, and store it back
+ * into the general purpose register.
+ */
+ TCGv_ptr qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qm) ||
+ !fn || a->rda == 13 || a->rda == 15) {
+ /* Rda cases are UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qm = mve_qreg_ptr(a->qm);
+ rda = load_reg(s, a->rda);
+ fn(rda, cpu_env, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qm);
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_VMAXV(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vmaxv *a) \
+ { \
+ static MVEGenVADDVFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_vmaxv(s, a, fns[a->size]); \
+ }
+
+DO_VMAXV(VMAXV_S, vmaxvs)
+DO_VMAXV(VMAXV_U, vmaxvu)
+DO_VMAXV(VMAXAV, vmaxav)
+DO_VMAXV(VMINV_S, vminvs)
+DO_VMAXV(VMINV_U, vminvu)
+DO_VMAXV(VMINAV, vminav)
+
+#define DO_VMAXV_FP(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vmaxv *a) \
+ { \
+ static MVEGenVADDVFn * const fns[] = { \
+ NULL, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##s, \
+ NULL, \
+ }; \
+ if (!dc_isar_feature(aa32_mve_fp, s)) { \
+ return false; \
+ } \
+ return do_vmaxv(s, a, fns[a->size]); \
+ }
+
+DO_VMAXV_FP(VMAXNMV, vmaxnmv)
+DO_VMAXV_FP(VMINNMV, vminnmv)
+DO_VMAXV_FP(VMAXNMAV, vmaxnmav)
+DO_VMAXV_FP(VMINNMAV, vminnmav)
+
+static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn)
+{
+ /* Absolute difference accumulated across vector */
+ TCGv_ptr qn, qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qm | a->qn) ||
+ !fn || a->rda == 13 || a->rda == 15) {
+ /* Rda cases are UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qm = mve_qreg_ptr(a->qm);
+ qn = mve_qreg_ptr(a->qn);
+ rda = load_reg(s, a->rda);
+ fn(rda, cpu_env, qn, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qm);
+ tcg_temp_free_ptr(qn);
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_VABAV(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vabav *a) \
+ { \
+ static MVEGenVABAVFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_vabav(s, a, fns[a->size]); \
+ }
+
+DO_VABAV(VABAV_S, vabavs)
+DO_VABAV(VABAV_U, vabavu)
+
+static bool trans_VMOV_to_2gp(DisasContext *s, arg_VMOV_to_2gp *a)
+{
+ /*
+ * VMOV two 32-bit vector lanes to two general-purpose registers.
+ * This insn is not predicated but it is subject to beat-wise
+ * execution if it is not in an IT block. For us this means
+ * only that if PSR.ECI says we should not be executing the beat
+ * corresponding to the lane of the vector register being accessed
+ * then we should skip perfoming the move, and that we need to do
+ * the usual check for bad ECI state and advance of ECI state.
+ * (If PSR.ECI is non-zero then we cannot be in an IT block.)
+ */
+ TCGv_i32 tmp;
+ int vd;
+
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd) ||
+ a->rt == 13 || a->rt == 15 || a->rt2 == 13 || a->rt2 == 15 ||
+ a->rt == a->rt2) {
+ /* Rt/Rt2 cases are UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /* Convert Qreg index to Dreg for read_neon_element32() etc */
+ vd = a->qd * 2;
+
+ if (!mve_skip_vmov(s, vd, a->idx, MO_32)) {
+ tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, vd, a->idx, MO_32);
+ store_reg(s, a->rt, tmp);
+ }
+ if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) {
+ tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, vd + 1, a->idx, MO_32);
+ store_reg(s, a->rt2, tmp);
+ }
+
+ mve_update_and_store_eci(s);
+ return true;
+}
+
+static bool trans_VMOV_from_2gp(DisasContext *s, arg_VMOV_to_2gp *a)
+{
+ /*
+ * VMOV two general-purpose registers to two 32-bit vector lanes.
+ * This insn is not predicated but it is subject to beat-wise
+ * execution if it is not in an IT block. For us this means
+ * only that if PSR.ECI says we should not be executing the beat
+ * corresponding to the lane of the vector register being accessed
+ * then we should skip perfoming the move, and that we need to do
+ * the usual check for bad ECI state and advance of ECI state.
+ * (If PSR.ECI is non-zero then we cannot be in an IT block.)
+ */
+ TCGv_i32 tmp;
+ int vd;
+
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd) ||
+ a->rt == 13 || a->rt == 15 || a->rt2 == 13 || a->rt2 == 15) {
+ /* Rt/Rt2 cases are UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /* Convert Qreg idx to Dreg for read_neon_element32() etc */
+ vd = a->qd * 2;
+
+ if (!mve_skip_vmov(s, vd, a->idx, MO_32)) {
+ tmp = load_reg(s, a->rt);
+ write_neon_element32(tmp, vd, a->idx, MO_32);
+ tcg_temp_free_i32(tmp);
+ }
+ if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) {
+ tmp = load_reg(s, a->rt2);
+ write_neon_element32(tmp, vd + 1, a->idx, MO_32);
+ tcg_temp_free_i32(tmp);
+ }
+
+ mve_update_and_store_eci(s);
+ return true;
+}
diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c
new file mode 100644
index 000000000..dd43de558
--- /dev/null
+++ b/target/arm/translate-neon.c
@@ -0,0 +1,4064 @@
+/*
+ * ARM translation: AArch32 Neon instructions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ * Copyright (c) 2020 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "exec/exec-all.h"
+#include "exec/gen-icount.h"
+#include "translate.h"
+#include "translate-a32.h"
+
+/* Include the generated Neon decoder */
+#include "decode-neon-dp.c.inc"
+#include "decode-neon-ls.c.inc"
+#include "decode-neon-shared.c.inc"
+
+static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
+{
+ TCGv_ptr ret = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
+ return ret;
+}
+
+static void neon_load_element(TCGv_i32 var, int reg, int ele, MemOp mop)
+{
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
+
+ switch (mop) {
+ case MO_UB:
+ tcg_gen_ld8u_i32(var, cpu_env, offset);
+ break;
+ case MO_UW:
+ tcg_gen_ld16u_i32(var, cpu_env, offset);
+ break;
+ case MO_UL:
+ tcg_gen_ld_i32(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void neon_load_element64(TCGv_i64 var, int reg, int ele, MemOp mop)
+{
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
+
+ switch (mop) {
+ case MO_UB:
+ tcg_gen_ld8u_i64(var, cpu_env, offset);
+ break;
+ case MO_UW:
+ tcg_gen_ld16u_i64(var, cpu_env, offset);
+ break;
+ case MO_UL:
+ tcg_gen_ld32u_i64(var, cpu_env, offset);
+ break;
+ case MO_Q:
+ tcg_gen_ld_i64(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void neon_store_element(int reg, int ele, MemOp size, TCGv_i32 var)
+{
+ long offset = neon_element_offset(reg, ele, size);
+
+ switch (size) {
+ case MO_8:
+ tcg_gen_st8_i32(var, cpu_env, offset);
+ break;
+ case MO_16:
+ tcg_gen_st16_i32(var, cpu_env, offset);
+ break;
+ case MO_32:
+ tcg_gen_st_i32(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void neon_store_element64(int reg, int ele, MemOp size, TCGv_i64 var)
+{
+ long offset = neon_element_offset(reg, ele, size);
+
+ switch (size) {
+ case MO_8:
+ tcg_gen_st8_i64(var, cpu_env, offset);
+ break;
+ case MO_16:
+ tcg_gen_st16_i64(var, cpu_env, offset);
+ break;
+ case MO_32:
+ tcg_gen_st32_i64(var, cpu_env, offset);
+ break;
+ case MO_64:
+ tcg_gen_st_i64(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool do_neon_ddda(DisasContext *s, int q, int vd, int vn, int vm,
+ int data, gen_helper_gvec_4 *fn_gvec)
+{
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
+ return false;
+ }
+
+ /*
+ * UNDEF accesses to odd registers for each bit of Q.
+ * Q will be 0b111 for all Q-reg instructions, otherwise
+ * when we have mixed Q- and D-reg inputs.
+ */
+ if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ int opr_sz = q ? 16 : 8;
+ tcg_gen_gvec_4_ool(vfp_reg_offset(1, vd),
+ vfp_reg_offset(1, vn),
+ vfp_reg_offset(1, vm),
+ vfp_reg_offset(1, vd),
+ opr_sz, opr_sz, data, fn_gvec);
+ return true;
+}
+
+static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm,
+ int data, ARMFPStatusFlavour fp_flavour,
+ gen_helper_gvec_4_ptr *fn_gvec_ptr)
+{
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (((vd | vn | vm) & 0x10) && !dc_isar_feature(aa32_simd_r32, s)) {
+ return false;
+ }
+
+ /*
+ * UNDEF accesses to odd registers for each bit of Q.
+ * Q will be 0b111 for all Q-reg instructions, otherwise
+ * when we have mixed Q- and D-reg inputs.
+ */
+ if (((vd & 1) * 4 | (vn & 1) * 2 | (vm & 1)) & q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ int opr_sz = q ? 16 : 8;
+ TCGv_ptr fpst = fpstatus_ptr(fp_flavour);
+
+ tcg_gen_gvec_4_ptr(vfp_reg_offset(1, vd),
+ vfp_reg_offset(1, vn),
+ vfp_reg_offset(1, vm),
+ vfp_reg_offset(1, vd),
+ fpst, opr_sz, opr_sz, data, fn_gvec_ptr);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
+{
+ if (!dc_isar_feature(aa32_vcma, s)) {
+ return false;
+ }
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot,
+ FPST_STD_F16, gen_helper_gvec_fcmlah);
+ }
+ return do_neon_ddda_fpst(s, a->q * 7, a->vd, a->vn, a->vm, a->rot,
+ FPST_STD, gen_helper_gvec_fcmlas);
+}
+
+static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
+{
+ int opr_sz;
+ TCGv_ptr fpst;
+ gen_helper_gvec_3_ptr *fn_gvec_ptr;
+
+ if (!dc_isar_feature(aa32_vcma, s)
+ || (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s))) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vn | a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ opr_sz = (1 + a->q) * 8;
+ fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
+ fn_gvec_ptr = (a->size == MO_16) ?
+ gen_helper_gvec_fcaddh : gen_helper_gvec_fcadds;
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+ vfp_reg_offset(1, a->vn),
+ vfp_reg_offset(1, a->vm),
+ fpst, opr_sz, opr_sz, a->rot,
+ fn_gvec_ptr);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VSDOT(DisasContext *s, arg_VSDOT *a)
+{
+ if (!dc_isar_feature(aa32_dp, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_sdot_b);
+}
+
+static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a)
+{
+ if (!dc_isar_feature(aa32_dp, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_udot_b);
+}
+
+static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_usdot_b);
+}
+
+static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a)
+{
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_bfdot);
+}
+
+static bool trans_VFML(DisasContext *s, arg_VFML *a)
+{
+ int opr_sz;
+
+ if (!dc_isar_feature(aa32_fhm, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ opr_sz = (1 + a->q) * 8;
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+ vfp_reg_offset(a->q, a->vn),
+ vfp_reg_offset(a->q, a->vm),
+ cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
+ gen_helper_gvec_fmlal_a32);
+ return true;
+}
+
+static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
+{
+ int data = (a->index << 2) | a->rot;
+
+ if (!dc_isar_feature(aa32_vcma, s)) {
+ return false;
+ }
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data,
+ FPST_STD_F16, gen_helper_gvec_fcmlah_idx);
+ }
+ return do_neon_ddda_fpst(s, a->q * 6, a->vd, a->vn, a->vm, data,
+ FPST_STD, gen_helper_gvec_fcmlas_idx);
+}
+
+static bool trans_VSDOT_scalar(DisasContext *s, arg_VSDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_dp, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_sdot_idx_b);
+}
+
+static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_dp, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_udot_idx_b);
+}
+
+static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_usdot_idx_b);
+}
+
+static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_sudot_idx_b);
+}
+
+static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a)
+{
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
+ gen_helper_gvec_bfdot_idx);
+}
+
+static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
+{
+ int opr_sz;
+
+ if (!dc_isar_feature(aa32_fhm, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
+ return false;
+ }
+
+ if (a->vd & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ opr_sz = (1 + a->q) * 8;
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+ vfp_reg_offset(a->q, a->vn),
+ vfp_reg_offset(a->q, a->rm),
+ cpu_env, opr_sz, opr_sz,
+ (a->index << 2) | a->s, /* is_2 == 0 */
+ gen_helper_gvec_fmlal_idx_a32);
+ return true;
+}
+
+static struct {
+ int nregs;
+ int interleave;
+ int spacing;
+} const neon_ls_element_type[11] = {
+ {1, 4, 1},
+ {1, 4, 2},
+ {4, 1, 1},
+ {2, 2, 2},
+ {1, 3, 1},
+ {1, 3, 2},
+ {3, 1, 1},
+ {1, 1, 1},
+ {1, 2, 1},
+ {1, 2, 2},
+ {2, 1, 1}
+};
+
+static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
+ int stride)
+{
+ if (rm != 15) {
+ TCGv_i32 base;
+
+ base = load_reg(s, rn);
+ if (rm == 13) {
+ tcg_gen_addi_i32(base, base, stride);
+ } else {
+ TCGv_i32 index;
+ index = load_reg(s, rm);
+ tcg_gen_add_i32(base, base, index);
+ tcg_temp_free_i32(index);
+ }
+ store_reg(s, rn, base);
+ }
+}
+
+static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
+{
+ /* Neon load/store multiple structures */
+ int nregs, interleave, spacing, reg, n;
+ MemOp mop, align, endian;
+ int mmu_idx = get_mem_index(s);
+ int size = a->size;
+ TCGv_i64 tmp64;
+ TCGv_i32 addr, tmp;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+ if (a->itype > 10) {
+ return false;
+ }
+ /* Catch UNDEF cases for bad values of align field */
+ switch (a->itype & 0xc) {
+ case 4:
+ if (a->align >= 2) {
+ return false;
+ }
+ break;
+ case 8:
+ if (a->align == 3) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+ nregs = neon_ls_element_type[a->itype].nregs;
+ interleave = neon_ls_element_type[a->itype].interleave;
+ spacing = neon_ls_element_type[a->itype].spacing;
+ if (size == 3 && (interleave | spacing) != 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* For our purposes, bytes are always little-endian. */
+ endian = s->be_data;
+ if (size == 0) {
+ endian = MO_LE;
+ }
+
+ /* Enforce alignment requested by the instruction */
+ if (a->align) {
+ align = pow2_align(a->align + 2); /* 4 ** a->align */
+ } else {
+ align = s->align_mem ? MO_ALIGN : 0;
+ }
+
+ /*
+ * Consecutive little-endian elements from a single register
+ * can be promoted to a larger little-endian operation.
+ */
+ if (interleave == 1 && endian == MO_LE) {
+ /* Retain any natural alignment. */
+ if (align == MO_ALIGN) {
+ align = pow2_align(size);
+ }
+ size = 3;
+ }
+
+ tmp64 = tcg_temp_new_i64();
+ addr = tcg_temp_new_i32();
+ tmp = tcg_const_i32(1 << size);
+ load_reg_var(s, addr, a->rn);
+
+ mop = endian | size | align;
+ for (reg = 0; reg < nregs; reg++) {
+ for (n = 0; n < 8 >> size; n++) {
+ int xs;
+ for (xs = 0; xs < interleave; xs++) {
+ int tt = a->vd + reg + spacing * xs;
+
+ if (a->l) {
+ gen_aa32_ld_internal_i64(s, tmp64, addr, mmu_idx, mop);
+ neon_store_element64(tt, n, size, tmp64);
+ } else {
+ neon_load_element64(tmp64, tt, n, size);
+ gen_aa32_st_internal_i64(s, tmp64, addr, mmu_idx, mop);
+ }
+ tcg_gen_add_i32(addr, addr, tmp);
+
+ /* Subsequent memory operations inherit alignment */
+ mop &= ~MO_AMASK;
+ }
+ }
+ }
+ tcg_temp_free_i32(addr);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i64(tmp64);
+
+ gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
+ return true;
+}
+
+static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
+{
+ /* Neon load single structure to all lanes */
+ int reg, stride, vec_size;
+ int vd = a->vd;
+ int size = a->size;
+ int nregs = a->n + 1;
+ TCGv_i32 addr, tmp;
+ MemOp mop, align;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ align = 0;
+ if (size == 3) {
+ if (nregs != 4 || a->a == 0) {
+ return false;
+ }
+ /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
+ size = MO_32;
+ align = MO_ALIGN_16;
+ } else if (a->a) {
+ switch (nregs) {
+ case 1:
+ if (size == 0) {
+ return false;
+ }
+ align = MO_ALIGN;
+ break;
+ case 2:
+ align = pow2_align(size + 1);
+ break;
+ case 3:
+ return false;
+ case 4:
+ align = pow2_align(size + 2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * VLD1 to all lanes: T bit indicates how many Dregs to write.
+ * VLD2/3/4 to all lanes: T bit indicates register stride.
+ */
+ stride = a->t ? 2 : 1;
+ vec_size = nregs == 1 ? stride * 8 : 8;
+ mop = size | align;
+ tmp = tcg_temp_new_i32();
+ addr = tcg_temp_new_i32();
+ load_reg_var(s, addr, a->rn);
+ for (reg = 0; reg < nregs; reg++) {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
+ if ((vd & 1) && vec_size == 16) {
+ /*
+ * We cannot write 16 bytes at once because the
+ * destination is unaligned.
+ */
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
+ 8, 8, tmp);
+ tcg_gen_gvec_mov(0, neon_full_reg_offset(vd + 1),
+ neon_full_reg_offset(vd), 8, 8);
+ } else {
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(vd),
+ vec_size, vec_size, tmp);
+ }
+ tcg_gen_addi_i32(addr, addr, 1 << size);
+ vd += stride;
+
+ /* Subsequent memory operations inherit alignment */
+ mop &= ~MO_AMASK;
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(addr);
+
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
+
+ return true;
+}
+
+static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
+{
+ /* Neon load/store single structure to one lane */
+ int reg;
+ int nregs = a->n + 1;
+ int vd = a->vd;
+ TCGv_i32 addr, tmp;
+ MemOp mop;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ /* Catch the UNDEF cases. This is unavoidably a bit messy. */
+ switch (nregs) {
+ case 1:
+ if (((a->align & (1 << a->size)) != 0) ||
+ (a->size == 2 && (a->align == 1 || a->align == 2))) {
+ return false;
+ }
+ break;
+ case 3:
+ if ((a->align & 1) != 0) {
+ return false;
+ }
+ /* fall through */
+ case 2:
+ if (a->size == 2 && (a->align & 2) != 0) {
+ return false;
+ }
+ break;
+ case 4:
+ if (a->size == 2 && a->align == 3) {
+ return false;
+ }
+ break;
+ default:
+ abort();
+ }
+ if ((vd + a->stride * (nregs - 1)) > 31) {
+ /*
+ * Attempts to write off the end of the register file are
+ * UNPREDICTABLE; we choose to UNDEF because otherwise we would
+ * access off the end of the array that holds the register data.
+ */
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* Pick up SCTLR settings */
+ mop = finalize_memop(s, a->size);
+
+ if (a->align) {
+ MemOp align_op;
+
+ switch (nregs) {
+ case 1:
+ /* For VLD1, use natural alignment. */
+ align_op = MO_ALIGN;
+ break;
+ case 2:
+ /* For VLD2, use double alignment. */
+ align_op = pow2_align(a->size + 1);
+ break;
+ case 4:
+ if (a->size == MO_32) {
+ /*
+ * For VLD4.32, align = 1 is double alignment, align = 2 is
+ * quad alignment; align = 3 is rejected above.
+ */
+ align_op = pow2_align(a->size + a->align);
+ } else {
+ /* For VLD4.8 and VLD.16, we want quad alignment. */
+ align_op = pow2_align(a->size + 2);
+ }
+ break;
+ default:
+ /* For VLD3, the alignment field is zero and rejected above. */
+ g_assert_not_reached();
+ }
+
+ mop = (mop & ~MO_AMASK) | align_op;
+ }
+
+ tmp = tcg_temp_new_i32();
+ addr = tcg_temp_new_i32();
+ load_reg_var(s, addr, a->rn);
+
+ for (reg = 0; reg < nregs; reg++) {
+ if (a->l) {
+ gen_aa32_ld_internal_i32(s, tmp, addr, get_mem_index(s), mop);
+ neon_store_element(vd, a->reg_idx, a->size, tmp);
+ } else { /* Store */
+ neon_load_element(tmp, vd, a->reg_idx, a->size);
+ gen_aa32_st_internal_i32(s, tmp, addr, get_mem_index(s), mop);
+ }
+ vd += a->stride;
+ tcg_gen_addi_i32(addr, addr, 1 << a->size);
+
+ /* Subsequent memory operations inherit alignment */
+ mop &= ~MO_AMASK;
+ }
+ tcg_temp_free_i32(addr);
+ tcg_temp_free_i32(tmp);
+
+ gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
+
+ return true;
+}
+
+static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
+{
+ int vec_size = a->q ? 16 : 8;
+ int rd_ofs = neon_full_reg_offset(a->vd);
+ int rn_ofs = neon_full_reg_offset(a->vn);
+ int rm_ofs = neon_full_reg_offset(a->vm);
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vn | a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
+ return true;
+}
+
+#define DO_3SAME(INSN, FUNC) \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ return do_3same(s, a, FUNC); \
+ }
+
+DO_3SAME(VADD, tcg_gen_gvec_add)
+DO_3SAME(VSUB, tcg_gen_gvec_sub)
+DO_3SAME(VAND, tcg_gen_gvec_and)
+DO_3SAME(VBIC, tcg_gen_gvec_andc)
+DO_3SAME(VORR, tcg_gen_gvec_or)
+DO_3SAME(VORN, tcg_gen_gvec_orc)
+DO_3SAME(VEOR, tcg_gen_gvec_xor)
+DO_3SAME(VSHL_S, gen_gvec_sshl)
+DO_3SAME(VSHL_U, gen_gvec_ushl)
+DO_3SAME(VQADD_S, gen_gvec_sqadd_qc)
+DO_3SAME(VQADD_U, gen_gvec_uqadd_qc)
+DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc)
+DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc)
+
+/* These insns are all gvec_bitsel but with the inputs in various orders. */
+#define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
+ } \
+ DO_3SAME(INSN, gen_##INSN##_3s)
+
+DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
+DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
+DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
+
+#define DO_3SAME_NO_SZ_3(INSN, FUNC) \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (a->size == 3) { \
+ return false; \
+ } \
+ return do_3same(s, a, FUNC); \
+ }
+
+DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
+DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
+DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
+DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
+DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
+DO_3SAME_NO_SZ_3(VMLA, gen_gvec_mla)
+DO_3SAME_NO_SZ_3(VMLS, gen_gvec_mls)
+DO_3SAME_NO_SZ_3(VTST, gen_gvec_cmtst)
+DO_3SAME_NO_SZ_3(VABD_S, gen_gvec_sabd)
+DO_3SAME_NO_SZ_3(VABA_S, gen_gvec_saba)
+DO_3SAME_NO_SZ_3(VABD_U, gen_gvec_uabd)
+DO_3SAME_NO_SZ_3(VABA_U, gen_gvec_uaba)
+
+#define DO_3SAME_CMP(INSN, COND) \
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
+ } \
+ DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
+
+DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
+DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
+DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
+DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
+DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
+
+#define WRAP_OOL_FN(WRAPNAME, FUNC) \
+ static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
+ uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
+ { \
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
+ }
+
+WRAP_OOL_FN(gen_VMUL_p_3s, gen_helper_gvec_pmul_b)
+
+static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
+{
+ if (a->size != 0) {
+ return false;
+ }
+ return do_3same(s, a, gen_VMUL_p_3s);
+}
+
+#define DO_VQRDMLAH(INSN, FUNC) \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (!dc_isar_feature(aa32_rdm, s)) { \
+ return false; \
+ } \
+ if (a->size != 1 && a->size != 2) { \
+ return false; \
+ } \
+ return do_3same(s, a, FUNC); \
+ }
+
+DO_VQRDMLAH(VQRDMLAH, gen_gvec_sqrdmlah_qc)
+DO_VQRDMLAH(VQRDMLSH, gen_gvec_sqrdmlsh_qc)
+
+#define DO_SHA1(NAME, FUNC) \
+ WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
+ static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (!dc_isar_feature(aa32_sha1, s)) { \
+ return false; \
+ } \
+ return do_3same(s, a, gen_##NAME##_3s); \
+ }
+
+DO_SHA1(SHA1C, gen_helper_crypto_sha1c)
+DO_SHA1(SHA1P, gen_helper_crypto_sha1p)
+DO_SHA1(SHA1M, gen_helper_crypto_sha1m)
+DO_SHA1(SHA1SU0, gen_helper_crypto_sha1su0)
+
+#define DO_SHA2(NAME, FUNC) \
+ WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
+ static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (!dc_isar_feature(aa32_sha2, s)) { \
+ return false; \
+ } \
+ return do_3same(s, a, gen_##NAME##_3s); \
+ }
+
+DO_SHA2(SHA256H, gen_helper_crypto_sha256h)
+DO_SHA2(SHA256H2, gen_helper_crypto_sha256h2)
+DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
+
+#define DO_3SAME_64(INSN, FUNC) \
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ static const GVecGen3 op = { .fni8 = FUNC }; \
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &op); \
+ } \
+ DO_3SAME(INSN, gen_##INSN##_3s)
+
+#define DO_3SAME_64_ENV(INSN, FUNC) \
+ static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
+ { \
+ FUNC(d, cpu_env, n, m); \
+ } \
+ DO_3SAME_64(INSN, gen_##INSN##_elt)
+
+DO_3SAME_64(VRSHL_S64, gen_helper_neon_rshl_s64)
+DO_3SAME_64(VRSHL_U64, gen_helper_neon_rshl_u64)
+DO_3SAME_64_ENV(VQSHL_S64, gen_helper_neon_qshl_s64)
+DO_3SAME_64_ENV(VQSHL_U64, gen_helper_neon_qshl_u64)
+DO_3SAME_64_ENV(VQRSHL_S64, gen_helper_neon_qrshl_s64)
+DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
+
+#define DO_3SAME_32(INSN, FUNC) \
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ static const GVecGen3 ops[4] = { \
+ { .fni4 = gen_helper_neon_##FUNC##8 }, \
+ { .fni4 = gen_helper_neon_##FUNC##16 }, \
+ { .fni4 = gen_helper_neon_##FUNC##32 }, \
+ { 0 }, \
+ }; \
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
+ } \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (a->size > 2) { \
+ return false; \
+ } \
+ return do_3same(s, a, gen_##INSN##_3s); \
+ }
+
+/*
+ * Some helper functions need to be passed the cpu_env. In order
+ * to use those with the gvec APIs like tcg_gen_gvec_3() we need
+ * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
+ * and which call a NeonGenTwoOpEnvFn().
+ */
+#define WRAP_ENV_FN(WRAPNAME, FUNC) \
+ static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
+ { \
+ FUNC(d, cpu_env, n, m); \
+ }
+
+#define DO_3SAME_32_ENV(INSN, FUNC) \
+ WRAP_ENV_FN(gen_##INSN##_tramp8, gen_helper_neon_##FUNC##8); \
+ WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##16); \
+ WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##32); \
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ static const GVecGen3 ops[4] = { \
+ { .fni4 = gen_##INSN##_tramp8 }, \
+ { .fni4 = gen_##INSN##_tramp16 }, \
+ { .fni4 = gen_##INSN##_tramp32 }, \
+ { 0 }, \
+ }; \
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
+ } \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (a->size > 2) { \
+ return false; \
+ } \
+ return do_3same(s, a, gen_##INSN##_3s); \
+ }
+
+DO_3SAME_32(VHADD_S, hadd_s)
+DO_3SAME_32(VHADD_U, hadd_u)
+DO_3SAME_32(VHSUB_S, hsub_s)
+DO_3SAME_32(VHSUB_U, hsub_u)
+DO_3SAME_32(VRHADD_S, rhadd_s)
+DO_3SAME_32(VRHADD_U, rhadd_u)
+DO_3SAME_32(VRSHL_S, rshl_s)
+DO_3SAME_32(VRSHL_U, rshl_u)
+
+DO_3SAME_32_ENV(VQSHL_S, qshl_s)
+DO_3SAME_32_ENV(VQSHL_U, qshl_u)
+DO_3SAME_32_ENV(VQRSHL_S, qrshl_s)
+DO_3SAME_32_ENV(VQRSHL_U, qrshl_u)
+
+static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
+{
+ /* Operations handled pairwise 32 bits at a time */
+ TCGv_i32 tmp, tmp2, tmp3;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->size == 3) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ assert(a->q == 0); /* enforced by decode patterns */
+
+ /*
+ * Note that we have to be careful not to clobber the source operands
+ * in the "vm == vd" case by storing the result of the first pass too
+ * early. Since Q is 0 there are always just two passes, so instead
+ * of a complicated loop over each pass we just unroll.
+ */
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tmp3 = tcg_temp_new_i32();
+
+ read_neon_element32(tmp, a->vn, 0, MO_32);
+ read_neon_element32(tmp2, a->vn, 1, MO_32);
+ fn(tmp, tmp, tmp2);
+
+ read_neon_element32(tmp3, a->vm, 0, MO_32);
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
+ fn(tmp3, tmp3, tmp2);
+
+ write_neon_element32(tmp, a->vd, 0, MO_32);
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ return true;
+}
+
+#define DO_3SAME_PAIR(INSN, func) \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ static NeonGenTwoOpFn * const fns[] = { \
+ gen_helper_neon_##func##8, \
+ gen_helper_neon_##func##16, \
+ gen_helper_neon_##func##32, \
+ }; \
+ if (a->size > 2) { \
+ return false; \
+ } \
+ return do_3same_pair(s, a, fns[a->size]); \
+ }
+
+/* 32-bit pairwise ops end up the same as the elementwise versions. */
+#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
+#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
+#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
+#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
+#define gen_helper_neon_padd_u32 tcg_gen_add_i32
+
+DO_3SAME_PAIR(VPMAX_S, pmax_s)
+DO_3SAME_PAIR(VPMIN_S, pmin_s)
+DO_3SAME_PAIR(VPMAX_U, pmax_u)
+DO_3SAME_PAIR(VPMIN_U, pmin_u)
+DO_3SAME_PAIR(VPADD, padd_u)
+
+#define DO_3SAME_VQDMULH(INSN, FUNC) \
+ WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##_s16); \
+ WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##_s32); \
+ static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ static const GVecGen3 ops[2] = { \
+ { .fni4 = gen_##INSN##_tramp16 }, \
+ { .fni4 = gen_##INSN##_tramp32 }, \
+ }; \
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece - 1]); \
+ } \
+ static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (a->size != 1 && a->size != 2) { \
+ return false; \
+ } \
+ return do_3same(s, a, gen_##INSN##_3s); \
+ }
+
+DO_3SAME_VQDMULH(VQDMULH, qdmulh)
+DO_3SAME_VQDMULH(VQRDMULH, qrdmulh)
+
+#define WRAP_FP_GVEC(WRAPNAME, FPST, FUNC) \
+ static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rn_ofs, uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ TCGv_ptr fpst = fpstatus_ptr(FPST); \
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
+ oprsz, maxsz, 0, FUNC); \
+ tcg_temp_free_ptr(fpst); \
+ }
+
+#define DO_3S_FP_GVEC(INSN,SFUNC,HFUNC) \
+ WRAP_FP_GVEC(gen_##INSN##_fp32_3s, FPST_STD, SFUNC) \
+ WRAP_FP_GVEC(gen_##INSN##_fp16_3s, FPST_STD_F16, HFUNC) \
+ static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (a->size == MO_16) { \
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
+ return false; \
+ } \
+ return do_3same(s, a, gen_##INSN##_fp16_3s); \
+ } \
+ return do_3same(s, a, gen_##INSN##_fp32_3s); \
+ }
+
+
+DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s, gen_helper_gvec_fadd_h)
+DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s, gen_helper_gvec_fsub_h)
+DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s, gen_helper_gvec_fabd_h)
+DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s, gen_helper_gvec_fmul_h)
+DO_3S_FP_GVEC(VCEQ, gen_helper_gvec_fceq_s, gen_helper_gvec_fceq_h)
+DO_3S_FP_GVEC(VCGE, gen_helper_gvec_fcge_s, gen_helper_gvec_fcge_h)
+DO_3S_FP_GVEC(VCGT, gen_helper_gvec_fcgt_s, gen_helper_gvec_fcgt_h)
+DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h)
+DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
+DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h)
+DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h)
+DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h)
+DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
+DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h)
+DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h)
+DO_3S_FP_GVEC(VRECPS, gen_helper_gvec_recps_nf_s, gen_helper_gvec_recps_nf_h)
+DO_3S_FP_GVEC(VRSQRTS, gen_helper_gvec_rsqrts_nf_s, gen_helper_gvec_rsqrts_nf_h)
+
+WRAP_FP_GVEC(gen_VMAXNM_fp32_3s, FPST_STD, gen_helper_gvec_fmaxnum_s)
+WRAP_FP_GVEC(gen_VMAXNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fmaxnum_h)
+WRAP_FP_GVEC(gen_VMINNM_fp32_3s, FPST_STD, gen_helper_gvec_fminnum_s)
+WRAP_FP_GVEC(gen_VMINNM_fp16_3s, FPST_STD_F16, gen_helper_gvec_fminnum_h)
+
+static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ return do_3same(s, a, gen_VMAXNM_fp16_3s);
+ }
+ return do_3same(s, a, gen_VMAXNM_fp32_3s);
+}
+
+static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ return do_3same(s, a, gen_VMINNM_fp16_3s);
+ }
+ return do_3same(s, a, gen_VMINNM_fp32_3s);
+}
+
+static bool do_3same_fp_pair(DisasContext *s, arg_3same *a,
+ gen_helper_gvec_3_ptr *fn)
+{
+ /* FP pairwise operations */
+ TCGv_ptr fpstatus;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ assert(a->q == 0); /* enforced by decode patterns */
+
+
+ fpstatus = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
+ tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
+ vfp_reg_offset(1, a->vn),
+ vfp_reg_offset(1, a->vm),
+ fpstatus, 8, 8, 0, fn);
+ tcg_temp_free_ptr(fpstatus);
+
+ return true;
+}
+
+/*
+ * For all the functions using this macro, size == 1 means fp16,
+ * which is an architecture extension we don't implement yet.
+ */
+#define DO_3S_FP_PAIR(INSN,FUNC) \
+ static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
+ { \
+ if (a->size == MO_16) { \
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
+ return false; \
+ } \
+ return do_3same_fp_pair(s, a, FUNC##h); \
+ } \
+ return do_3same_fp_pair(s, a, FUNC##s); \
+ }
+
+DO_3S_FP_PAIR(VPADD, gen_helper_neon_padd)
+DO_3S_FP_PAIR(VPMAX, gen_helper_neon_pmax)
+DO_3S_FP_PAIR(VPMIN, gen_helper_neon_pmin)
+
+static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
+{
+ /* Handle a 2-reg-shift insn which can be vectorized. */
+ int vec_size = a->q ? 16 : 8;
+ int rd_ofs = neon_full_reg_offset(a->vd);
+ int rm_ofs = neon_full_reg_offset(a->vm);
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fn(a->size, rd_ofs, rm_ofs, a->shift, vec_size, vec_size);
+ return true;
+}
+
+#define DO_2SH(INSN, FUNC) \
+ static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
+ { \
+ return do_vector_2sh(s, a, FUNC); \
+ } \
+
+DO_2SH(VSHL, tcg_gen_gvec_shli)
+DO_2SH(VSLI, gen_gvec_sli)
+DO_2SH(VSRI, gen_gvec_sri)
+DO_2SH(VSRA_S, gen_gvec_ssra)
+DO_2SH(VSRA_U, gen_gvec_usra)
+DO_2SH(VRSHR_S, gen_gvec_srshr)
+DO_2SH(VRSHR_U, gen_gvec_urshr)
+DO_2SH(VRSRA_S, gen_gvec_srsra)
+DO_2SH(VRSRA_U, gen_gvec_ursra)
+
+static bool trans_VSHR_S_2sh(DisasContext *s, arg_2reg_shift *a)
+{
+ /* Signed shift out of range results in all-sign-bits */
+ a->shift = MIN(a->shift, (8 << a->size) - 1);
+ return do_vector_2sh(s, a, tcg_gen_gvec_sari);
+}
+
+static void gen_zero_rd_2sh(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_dup_imm(vece, rd_ofs, oprsz, maxsz, 0);
+}
+
+static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a)
+{
+ /* Shift out of range is architecturally valid and results in zero. */
+ if (a->shift >= (8 << a->size)) {
+ return do_vector_2sh(s, a, gen_zero_rd_2sh);
+ } else {
+ return do_vector_2sh(s, a, tcg_gen_gvec_shri);
+ }
+}
+
+static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
+ NeonGenTwo64OpEnvFn *fn)
+{
+ /*
+ * 2-reg-and-shift operations, size == 3 case, where the
+ * function needs to be passed cpu_env.
+ */
+ TCGv_i64 constimm;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * To avoid excessive duplication of ops we implement shift
+ * by immediate using the variable shift operations.
+ */
+ constimm = tcg_const_i64(dup_const(a->size, a->shift));
+
+ for (pass = 0; pass < a->q + 1; pass++) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ read_neon_element64(tmp, a->vm, pass, MO_64);
+ fn(tmp, cpu_env, tmp, constimm);
+ write_neon_element64(tmp, a->vd, pass, MO_64);
+ tcg_temp_free_i64(tmp);
+ }
+ tcg_temp_free_i64(constimm);
+ return true;
+}
+
+static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
+ NeonGenTwoOpEnvFn *fn)
+{
+ /*
+ * 2-reg-and-shift operations, size < 3 case, where the
+ * helper needs to be passed cpu_env.
+ */
+ TCGv_i32 constimm, tmp;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * To avoid excessive duplication of ops we implement shift
+ * by immediate using the variable shift operations.
+ */
+ constimm = tcg_const_i32(dup_const(a->size, a->shift));
+ tmp = tcg_temp_new_i32();
+
+ for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
+ read_neon_element32(tmp, a->vm, pass, MO_32);
+ fn(tmp, cpu_env, tmp, constimm);
+ write_neon_element32(tmp, a->vd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(constimm);
+ return true;
+}
+
+#define DO_2SHIFT_ENV(INSN, FUNC) \
+ static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
+ { \
+ return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
+ } \
+ static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
+ { \
+ static NeonGenTwoOpEnvFn * const fns[] = { \
+ gen_helper_neon_##FUNC##8, \
+ gen_helper_neon_##FUNC##16, \
+ gen_helper_neon_##FUNC##32, \
+ }; \
+ assert(a->size < ARRAY_SIZE(fns)); \
+ return do_2shift_env_32(s, a, fns[a->size]); \
+ }
+
+DO_2SHIFT_ENV(VQSHLU, qshlu_s)
+DO_2SHIFT_ENV(VQSHL_U, qshl_u)
+DO_2SHIFT_ENV(VQSHL_S, qshl_s)
+
+static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
+ NeonGenTwo64OpFn *shiftfn,
+ NeonGenNarrowEnvFn *narrowfn)
+{
+ /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
+ TCGv_i64 constimm, rm1, rm2;
+ TCGv_i32 rd;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vm & 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This is always a right shift, and the shiftfn is always a
+ * left-shift helper, which thus needs the negated shift count.
+ */
+ constimm = tcg_const_i64(-a->shift);
+ rm1 = tcg_temp_new_i64();
+ rm2 = tcg_temp_new_i64();
+ rd = tcg_temp_new_i32();
+
+ /* Load both inputs first to avoid potential overwrite if rm == rd */
+ read_neon_element64(rm1, a->vm, 0, MO_64);
+ read_neon_element64(rm2, a->vm, 1, MO_64);
+
+ shiftfn(rm1, rm1, constimm);
+ narrowfn(rd, cpu_env, rm1);
+ write_neon_element32(rd, a->vd, 0, MO_32);
+
+ shiftfn(rm2, rm2, constimm);
+ narrowfn(rd, cpu_env, rm2);
+ write_neon_element32(rd, a->vd, 1, MO_32);
+
+ tcg_temp_free_i32(rd);
+ tcg_temp_free_i64(rm1);
+ tcg_temp_free_i64(rm2);
+ tcg_temp_free_i64(constimm);
+
+ return true;
+}
+
+static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
+ NeonGenTwoOpFn *shiftfn,
+ NeonGenNarrowEnvFn *narrowfn)
+{
+ /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
+ TCGv_i32 constimm, rm1, rm2, rm3, rm4;
+ TCGv_i64 rtmp;
+ uint32_t imm;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vm & 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This is always a right shift, and the shiftfn is always a
+ * left-shift helper, which thus needs the negated shift count
+ * duplicated into each lane of the immediate value.
+ */
+ if (a->size == 1) {
+ imm = (uint16_t)(-a->shift);
+ imm |= imm << 16;
+ } else {
+ /* size == 2 */
+ imm = -a->shift;
+ }
+ constimm = tcg_const_i32(imm);
+
+ /* Load all inputs first to avoid potential overwrite */
+ rm1 = tcg_temp_new_i32();
+ rm2 = tcg_temp_new_i32();
+ rm3 = tcg_temp_new_i32();
+ rm4 = tcg_temp_new_i32();
+ read_neon_element32(rm1, a->vm, 0, MO_32);
+ read_neon_element32(rm2, a->vm, 1, MO_32);
+ read_neon_element32(rm3, a->vm, 2, MO_32);
+ read_neon_element32(rm4, a->vm, 3, MO_32);
+ rtmp = tcg_temp_new_i64();
+
+ shiftfn(rm1, rm1, constimm);
+ shiftfn(rm2, rm2, constimm);
+
+ tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
+ tcg_temp_free_i32(rm2);
+
+ narrowfn(rm1, cpu_env, rtmp);
+ write_neon_element32(rm1, a->vd, 0, MO_32);
+ tcg_temp_free_i32(rm1);
+
+ shiftfn(rm3, rm3, constimm);
+ shiftfn(rm4, rm4, constimm);
+ tcg_temp_free_i32(constimm);
+
+ tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
+ tcg_temp_free_i32(rm4);
+
+ narrowfn(rm3, cpu_env, rtmp);
+ tcg_temp_free_i64(rtmp);
+ write_neon_element32(rm3, a->vd, 1, MO_32);
+ tcg_temp_free_i32(rm3);
+ return true;
+}
+
+#define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
+ static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
+ { \
+ return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
+ }
+#define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
+ static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
+ { \
+ return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
+ }
+
+static void gen_neon_narrow_u32(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
+{
+ tcg_gen_extrl_i64_i32(dest, src);
+}
+
+static void gen_neon_narrow_u16(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
+{
+ gen_helper_neon_narrow_u16(dest, src);
+}
+
+static void gen_neon_narrow_u8(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
+{
+ gen_helper_neon_narrow_u8(dest, src);
+}
+
+DO_2SN_64(VSHRN_64, gen_ushl_i64, gen_neon_narrow_u32)
+DO_2SN_32(VSHRN_32, gen_ushl_i32, gen_neon_narrow_u16)
+DO_2SN_32(VSHRN_16, gen_helper_neon_shl_u16, gen_neon_narrow_u8)
+
+DO_2SN_64(VRSHRN_64, gen_helper_neon_rshl_u64, gen_neon_narrow_u32)
+DO_2SN_32(VRSHRN_32, gen_helper_neon_rshl_u32, gen_neon_narrow_u16)
+DO_2SN_32(VRSHRN_16, gen_helper_neon_rshl_u16, gen_neon_narrow_u8)
+
+DO_2SN_64(VQSHRUN_64, gen_sshl_i64, gen_helper_neon_unarrow_sat32)
+DO_2SN_32(VQSHRUN_32, gen_sshl_i32, gen_helper_neon_unarrow_sat16)
+DO_2SN_32(VQSHRUN_16, gen_helper_neon_shl_s16, gen_helper_neon_unarrow_sat8)
+
+DO_2SN_64(VQRSHRUN_64, gen_helper_neon_rshl_s64, gen_helper_neon_unarrow_sat32)
+DO_2SN_32(VQRSHRUN_32, gen_helper_neon_rshl_s32, gen_helper_neon_unarrow_sat16)
+DO_2SN_32(VQRSHRUN_16, gen_helper_neon_rshl_s16, gen_helper_neon_unarrow_sat8)
+DO_2SN_64(VQSHRN_S64, gen_sshl_i64, gen_helper_neon_narrow_sat_s32)
+DO_2SN_32(VQSHRN_S32, gen_sshl_i32, gen_helper_neon_narrow_sat_s16)
+DO_2SN_32(VQSHRN_S16, gen_helper_neon_shl_s16, gen_helper_neon_narrow_sat_s8)
+
+DO_2SN_64(VQRSHRN_S64, gen_helper_neon_rshl_s64, gen_helper_neon_narrow_sat_s32)
+DO_2SN_32(VQRSHRN_S32, gen_helper_neon_rshl_s32, gen_helper_neon_narrow_sat_s16)
+DO_2SN_32(VQRSHRN_S16, gen_helper_neon_rshl_s16, gen_helper_neon_narrow_sat_s8)
+
+DO_2SN_64(VQSHRN_U64, gen_ushl_i64, gen_helper_neon_narrow_sat_u32)
+DO_2SN_32(VQSHRN_U32, gen_ushl_i32, gen_helper_neon_narrow_sat_u16)
+DO_2SN_32(VQSHRN_U16, gen_helper_neon_shl_u16, gen_helper_neon_narrow_sat_u8)
+
+DO_2SN_64(VQRSHRN_U64, gen_helper_neon_rshl_u64, gen_helper_neon_narrow_sat_u32)
+DO_2SN_32(VQRSHRN_U32, gen_helper_neon_rshl_u32, gen_helper_neon_narrow_sat_u16)
+DO_2SN_32(VQRSHRN_U16, gen_helper_neon_rshl_u16, gen_helper_neon_narrow_sat_u8)
+
+static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
+ NeonGenWidenFn *widenfn, bool u)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 rm0, rm1;
+ uint64_t widen_mask = 0;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This is a widen-and-shift operation. The shift is always less
+ * than the width of the source type, so after widening the input
+ * vector we can simply shift the whole 64-bit widened register,
+ * and then clear the potential overflow bits resulting from left
+ * bits of the narrow input appearing as right bits of the left
+ * neighbour narrow input. Calculate a mask of bits to clear.
+ */
+ if ((a->shift != 0) && (a->size < 2 || u)) {
+ int esize = 8 << a->size;
+ widen_mask = MAKE_64BIT_MASK(0, esize);
+ widen_mask >>= esize - a->shift;
+ widen_mask = dup_const(a->size + 1, widen_mask);
+ }
+
+ rm0 = tcg_temp_new_i32();
+ rm1 = tcg_temp_new_i32();
+ read_neon_element32(rm0, a->vm, 0, MO_32);
+ read_neon_element32(rm1, a->vm, 1, MO_32);
+ tmp = tcg_temp_new_i64();
+
+ widenfn(tmp, rm0);
+ tcg_temp_free_i32(rm0);
+ if (a->shift != 0) {
+ tcg_gen_shli_i64(tmp, tmp, a->shift);
+ tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
+ }
+ write_neon_element64(tmp, a->vd, 0, MO_64);
+
+ widenfn(tmp, rm1);
+ tcg_temp_free_i32(rm1);
+ if (a->shift != 0) {
+ tcg_gen_shli_i64(tmp, tmp, a->shift);
+ tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
+ }
+ write_neon_element64(tmp, a->vd, 1, MO_64);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+static bool trans_VSHLL_S_2sh(DisasContext *s, arg_2reg_shift *a)
+{
+ static NeonGenWidenFn * const widenfn[] = {
+ gen_helper_neon_widen_s8,
+ gen_helper_neon_widen_s16,
+ tcg_gen_ext_i32_i64,
+ };
+ return do_vshll_2sh(s, a, widenfn[a->size], false);
+}
+
+static bool trans_VSHLL_U_2sh(DisasContext *s, arg_2reg_shift *a)
+{
+ static NeonGenWidenFn * const widenfn[] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ };
+ return do_vshll_2sh(s, a, widenfn[a->size], true);
+}
+
+static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
+ gen_helper_gvec_2_ptr *fn)
+{
+ /* FP operations in 2-reg-and-shift group */
+ int vec_size = a->q ? 16 : 8;
+ int rd_ofs = neon_full_reg_offset(a->vd);
+ int rm_ofs = neon_full_reg_offset(a->vm);
+ TCGv_ptr fpst;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD);
+ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, vec_size, vec_size, a->shift, fn);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+#define DO_FP_2SH(INSN, FUNC) \
+ static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
+ { \
+ return do_fp_2sh(s, a, FUNC); \
+ }
+
+DO_FP_2SH(VCVT_SF, gen_helper_gvec_vcvt_sf)
+DO_FP_2SH(VCVT_UF, gen_helper_gvec_vcvt_uf)
+DO_FP_2SH(VCVT_FS, gen_helper_gvec_vcvt_fs)
+DO_FP_2SH(VCVT_FU, gen_helper_gvec_vcvt_fu)
+
+DO_FP_2SH(VCVT_SH, gen_helper_gvec_vcvt_sh)
+DO_FP_2SH(VCVT_UH, gen_helper_gvec_vcvt_uh)
+DO_FP_2SH(VCVT_HS, gen_helper_gvec_vcvt_hs)
+DO_FP_2SH(VCVT_HU, gen_helper_gvec_vcvt_hu)
+
+static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
+ GVecGen2iFn *fn)
+{
+ uint64_t imm;
+ int reg_ofs, vec_size;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ reg_ofs = neon_full_reg_offset(a->vd);
+ vec_size = a->q ? 16 : 8;
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
+
+ fn(MO_64, reg_ofs, reg_ofs, imm, vec_size, vec_size);
+ return true;
+}
+
+static void gen_VMOV_1r(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
+}
+
+static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
+{
+ /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
+ GVecGen2iFn *fn;
+
+ if ((a->cmode & 1) && a->cmode < 12) {
+ /* for op=1, the imm will be inverted, so BIC becomes AND. */
+ fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
+ } else {
+ /* There is one unallocated cmode/op combination in this space */
+ if (a->cmode == 15 && a->op == 1) {
+ return false;
+ }
+ fn = gen_VMOV_1r;
+ }
+ return do_1reg_imm(s, a, fn);
+}
+
+static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
+ NeonGenWidenFn *widenfn,
+ NeonGenTwo64OpFn *opfn,
+ int src1_mop, int src2_mop)
+{
+ /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
+ TCGv_i64 rn0_64, rn1_64, rm_64;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!opfn) {
+ /* size == 3 case, which is an entirely different insn group */
+ return false;
+ }
+
+ if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ rn0_64 = tcg_temp_new_i64();
+ rn1_64 = tcg_temp_new_i64();
+ rm_64 = tcg_temp_new_i64();
+
+ if (src1_mop >= 0) {
+ read_neon_element64(rn0_64, a->vn, 0, src1_mop);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vn, 0, MO_32);
+ widenfn(rn0_64, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ if (src2_mop >= 0) {
+ read_neon_element64(rm_64, a->vm, 0, src2_mop);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vm, 0, MO_32);
+ widenfn(rm_64, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+
+ opfn(rn0_64, rn0_64, rm_64);
+
+ /*
+ * Load second pass inputs before storing the first pass result, to
+ * avoid incorrect results if a narrow input overlaps with the result.
+ */
+ if (src1_mop >= 0) {
+ read_neon_element64(rn1_64, a->vn, 1, src1_mop);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vn, 1, MO_32);
+ widenfn(rn1_64, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ if (src2_mop >= 0) {
+ read_neon_element64(rm_64, a->vm, 1, src2_mop);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vm, 1, MO_32);
+ widenfn(rm_64, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
+
+ opfn(rn1_64, rn1_64, rm_64);
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
+
+ tcg_temp_free_i64(rn0_64);
+ tcg_temp_free_i64(rn1_64);
+ tcg_temp_free_i64(rm_64);
+
+ return true;
+}
+
+#define DO_PREWIDEN(INSN, S, OP, SRC1WIDE, SIGN) \
+ static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
+ { \
+ static NeonGenWidenFn * const widenfn[] = { \
+ gen_helper_neon_widen_##S##8, \
+ gen_helper_neon_widen_##S##16, \
+ NULL, NULL, \
+ }; \
+ static NeonGenTwo64OpFn * const addfn[] = { \
+ gen_helper_neon_##OP##l_u16, \
+ gen_helper_neon_##OP##l_u32, \
+ tcg_gen_##OP##_i64, \
+ NULL, \
+ }; \
+ int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
+ return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
+ SRC1WIDE ? MO_Q : narrow_mop, \
+ narrow_mop); \
+ }
+
+DO_PREWIDEN(VADDL_S, s, add, false, MO_SIGN)
+DO_PREWIDEN(VADDL_U, u, add, false, 0)
+DO_PREWIDEN(VSUBL_S, s, sub, false, MO_SIGN)
+DO_PREWIDEN(VSUBL_U, u, sub, false, 0)
+DO_PREWIDEN(VADDW_S, s, add, true, MO_SIGN)
+DO_PREWIDEN(VADDW_U, u, add, true, 0)
+DO_PREWIDEN(VSUBW_S, s, sub, true, MO_SIGN)
+DO_PREWIDEN(VSUBW_U, u, sub, true, 0)
+
+static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
+ NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
+{
+ /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
+ TCGv_i64 rn_64, rm_64;
+ TCGv_i32 rd0, rd1;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!opfn || !narrowfn) {
+ /* size == 3 case, which is an entirely different insn group */
+ return false;
+ }
+
+ if ((a->vn | a->vm) & 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ rn_64 = tcg_temp_new_i64();
+ rm_64 = tcg_temp_new_i64();
+ rd0 = tcg_temp_new_i32();
+ rd1 = tcg_temp_new_i32();
+
+ read_neon_element64(rn_64, a->vn, 0, MO_64);
+ read_neon_element64(rm_64, a->vm, 0, MO_64);
+
+ opfn(rn_64, rn_64, rm_64);
+
+ narrowfn(rd0, rn_64);
+
+ read_neon_element64(rn_64, a->vn, 1, MO_64);
+ read_neon_element64(rm_64, a->vm, 1, MO_64);
+
+ opfn(rn_64, rn_64, rm_64);
+
+ narrowfn(rd1, rn_64);
+
+ write_neon_element32(rd0, a->vd, 0, MO_32);
+ write_neon_element32(rd1, a->vd, 1, MO_32);
+
+ tcg_temp_free_i32(rd0);
+ tcg_temp_free_i32(rd1);
+ tcg_temp_free_i64(rn_64);
+ tcg_temp_free_i64(rm_64);
+
+ return true;
+}
+
+#define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
+ static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
+ { \
+ static NeonGenTwo64OpFn * const addfn[] = { \
+ gen_helper_neon_##OP##l_u16, \
+ gen_helper_neon_##OP##l_u32, \
+ tcg_gen_##OP##_i64, \
+ NULL, \
+ }; \
+ static NeonGenNarrowFn * const narrowfn[] = { \
+ gen_helper_neon_##NARROWTYPE##_high_u8, \
+ gen_helper_neon_##NARROWTYPE##_high_u16, \
+ EXTOP, \
+ NULL, \
+ }; \
+ return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
+ }
+
+static void gen_narrow_round_high_u32(TCGv_i32 rd, TCGv_i64 rn)
+{
+ tcg_gen_addi_i64(rn, rn, 1u << 31);
+ tcg_gen_extrh_i64_i32(rd, rn);
+}
+
+DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
+DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
+DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
+DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)
+
+static bool do_long_3d(DisasContext *s, arg_3diff *a,
+ NeonGenTwoOpWidenFn *opfn,
+ NeonGenTwo64OpFn *accfn)
+{
+ /*
+ * 3-regs different lengths, long operations.
+ * These perform an operation on two inputs that returns a double-width
+ * result, and then possibly perform an accumulation operation of
+ * that result into the double-width destination.
+ */
+ TCGv_i64 rd0, rd1, tmp;
+ TCGv_i32 rn, rm;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!opfn) {
+ /* size == 3 case, which is an entirely different insn group */
+ return false;
+ }
+
+ if (a->vd & 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ rd0 = tcg_temp_new_i64();
+ rd1 = tcg_temp_new_i64();
+
+ rn = tcg_temp_new_i32();
+ rm = tcg_temp_new_i32();
+ read_neon_element32(rn, a->vn, 0, MO_32);
+ read_neon_element32(rm, a->vm, 0, MO_32);
+ opfn(rd0, rn, rm);
+
+ read_neon_element32(rn, a->vn, 1, MO_32);
+ read_neon_element32(rm, a->vm, 1, MO_32);
+ opfn(rd1, rn, rm);
+ tcg_temp_free_i32(rn);
+ tcg_temp_free_i32(rm);
+
+ /* Don't store results until after all loads: they might overlap */
+ if (accfn) {
+ tmp = tcg_temp_new_i64();
+ read_neon_element64(tmp, a->vd, 0, MO_64);
+ accfn(rd0, tmp, rd0);
+ read_neon_element64(tmp, a->vd, 1, MO_64);
+ accfn(rd1, tmp, rd1);
+ tcg_temp_free_i64(tmp);
+ }
+
+ write_neon_element64(rd0, a->vd, 0, MO_64);
+ write_neon_element64(rd1, a->vd, 1, MO_64);
+ tcg_temp_free_i64(rd0);
+ tcg_temp_free_i64(rd1);
+
+ return true;
+}
+
+static bool trans_VABDL_S_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ gen_helper_neon_abdl_s16,
+ gen_helper_neon_abdl_s32,
+ gen_helper_neon_abdl_s64,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VABDL_U_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ gen_helper_neon_abdl_u16,
+ gen_helper_neon_abdl_u32,
+ gen_helper_neon_abdl_u64,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ gen_helper_neon_abdl_s16,
+ gen_helper_neon_abdl_s32,
+ gen_helper_neon_abdl_s64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const addfn[] = {
+ gen_helper_neon_addl_u16,
+ gen_helper_neon_addl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
+}
+
+static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ gen_helper_neon_abdl_u16,
+ gen_helper_neon_abdl_u32,
+ gen_helper_neon_abdl_u64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const addfn[] = {
+ gen_helper_neon_addl_u16,
+ gen_helper_neon_addl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
+}
+
+static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
+{
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+
+ tcg_gen_muls2_i32(lo, hi, rn, rm);
+ tcg_gen_concat_i32_i64(rd, lo, hi);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+}
+
+static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
+{
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+
+ tcg_gen_mulu2_i32(lo, hi, rn, rm);
+ tcg_gen_concat_i32_i64(rd, lo, hi);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+}
+
+static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ gen_helper_neon_mull_s8,
+ gen_helper_neon_mull_s16,
+ gen_mull_s32,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ gen_helper_neon_mull_u8,
+ gen_helper_neon_mull_u16,
+ gen_mull_u32,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], NULL);
+}
+
+#define DO_VMLAL(INSN,MULL,ACC) \
+ static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
+ { \
+ static NeonGenTwoOpWidenFn * const opfn[] = { \
+ gen_helper_neon_##MULL##8, \
+ gen_helper_neon_##MULL##16, \
+ gen_##MULL##32, \
+ NULL, \
+ }; \
+ static NeonGenTwo64OpFn * const accfn[] = { \
+ gen_helper_neon_##ACC##l_u16, \
+ gen_helper_neon_##ACC##l_u32, \
+ tcg_gen_##ACC##_i64, \
+ NULL, \
+ }; \
+ return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
+ }
+
+DO_VMLAL(VMLAL_S,mull_s,add)
+DO_VMLAL(VMLAL_U,mull_u,add)
+DO_VMLAL(VMLSL_S,mull_s,sub)
+DO_VMLAL(VMLSL_U,mull_u,sub)
+
+static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
+{
+ gen_helper_neon_mull_s16(rd, rn, rm);
+ gen_helper_neon_addl_saturate_s32(rd, cpu_env, rd, rd);
+}
+
+static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
+{
+ gen_mull_s32(rd, rn, rm);
+ gen_helper_neon_addl_saturate_s64(rd, cpu_env, rd, rd);
+}
+
+static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_VQDMULL_16,
+ gen_VQDMULL_32,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], NULL);
+}
+
+static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
+}
+
+static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
+}
+
+static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_VQDMULL_16,
+ gen_VQDMULL_32,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const accfn[] = {
+ NULL,
+ gen_VQDMLAL_acc_16,
+ gen_VQDMLAL_acc_32,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ gen_helper_neon_negl_u32(rm, rm);
+ gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
+}
+
+static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ tcg_gen_neg_i64(rm, rm);
+ gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
+}
+
+static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_VQDMULL_16,
+ gen_VQDMULL_32,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const accfn[] = {
+ NULL,
+ gen_VQDMLSL_acc_16,
+ gen_VQDMLSL_acc_32,
+ NULL,
+ };
+
+ return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
+{
+ gen_helper_gvec_3 *fn_gvec;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & 1) {
+ return false;
+ }
+
+ switch (a->size) {
+ case 0:
+ fn_gvec = gen_helper_neon_pmull_h;
+ break;
+ case 2:
+ if (!dc_isar_feature(aa32_pmull, s)) {
+ return false;
+ }
+ fn_gvec = gen_helper_gvec_pmull_q;
+ break;
+ default:
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tcg_gen_gvec_3_ool(neon_full_reg_offset(a->vd),
+ neon_full_reg_offset(a->vn),
+ neon_full_reg_offset(a->vm),
+ 16, 16, 0, fn_gvec);
+ return true;
+}
+
+static void gen_neon_dup_low16(TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(var, var);
+ tcg_gen_shli_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_neon_dup_high16(TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(var, var, 0xffff0000);
+ tcg_gen_shri_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline TCGv_i32 neon_get_scalar(int size, int reg)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ if (size == MO_16) {
+ read_neon_element32(tmp, reg & 7, reg >> 4, MO_32);
+ if (reg & 8) {
+ gen_neon_dup_high16(tmp);
+ } else {
+ gen_neon_dup_low16(tmp);
+ }
+ } else {
+ read_neon_element32(tmp, reg & 15, reg >> 4, MO_32);
+ }
+ return tmp;
+}
+
+static bool do_2scalar(DisasContext *s, arg_2scalar *a,
+ NeonGenTwoOpFn *opfn, NeonGenTwoOpFn *accfn)
+{
+ /*
+ * Two registers and a scalar: perform an operation between
+ * the input elements and the scalar, and then possibly
+ * perform an accumulation operation of that result into the
+ * destination.
+ */
+ TCGv_i32 scalar, tmp;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!opfn) {
+ /* Bad size (including size == 3, which is a different insn group) */
+ return false;
+ }
+
+ if (a->q && ((a->vd | a->vn) & 1)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ scalar = neon_get_scalar(a->size, a->vm);
+ tmp = tcg_temp_new_i32();
+
+ for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
+ read_neon_element32(tmp, a->vn, pass, MO_32);
+ opfn(tmp, tmp, scalar);
+ if (accfn) {
+ TCGv_i32 rd = tcg_temp_new_i32();
+ read_neon_element32(rd, a->vd, pass, MO_32);
+ accfn(tmp, rd, tmp);
+ tcg_temp_free_i32(rd);
+ }
+ write_neon_element32(tmp, a->vd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(scalar);
+ return true;
+}
+
+static bool trans_VMUL_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ gen_helper_neon_mul_u16,
+ tcg_gen_mul_i32,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VMLA_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ gen_helper_neon_mul_u16,
+ tcg_gen_mul_i32,
+ NULL,
+ };
+ static NeonGenTwoOpFn * const accfn[] = {
+ NULL,
+ gen_helper_neon_add_u16,
+ tcg_gen_add_i32,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static bool trans_VMLS_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ gen_helper_neon_mul_u16,
+ tcg_gen_mul_i32,
+ NULL,
+ };
+ static NeonGenTwoOpFn * const accfn[] = {
+ NULL,
+ gen_helper_neon_sub_u16,
+ tcg_gen_sub_i32,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a,
+ gen_helper_gvec_3_ptr *fn)
+{
+ /* Two registers and a scalar, using gvec */
+ int vec_size = a->q ? 16 : 8;
+ int rd_ofs = neon_full_reg_offset(a->vd);
+ int rn_ofs = neon_full_reg_offset(a->vn);
+ int rm_ofs;
+ int idx;
+ TCGv_ptr fpstatus;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!fn) {
+ /* Bad size (including size == 3, which is a different insn group) */
+ return false;
+ }
+
+ if (a->q && ((a->vd | a->vn) & 1)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* a->vm is M:Vm, which encodes both register and index */
+ idx = extract32(a->vm, a->size + 2, 2);
+ a->vm = extract32(a->vm, 0, a->size + 2);
+ rm_ofs = neon_full_reg_offset(a->vm);
+
+ fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD);
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus,
+ vec_size, vec_size, idx, fn);
+ tcg_temp_free_ptr(fpstatus);
+ return true;
+}
+
+#define DO_VMUL_F_2sc(NAME, FUNC) \
+ static bool trans_##NAME##_F_2sc(DisasContext *s, arg_2scalar *a) \
+ { \
+ static gen_helper_gvec_3_ptr * const opfn[] = { \
+ NULL, \
+ gen_helper_##FUNC##_h, \
+ gen_helper_##FUNC##_s, \
+ NULL, \
+ }; \
+ if (a->size == MO_16 && !dc_isar_feature(aa32_fp16_arith, s)) { \
+ return false; \
+ } \
+ return do_2scalar_fp_vec(s, a, opfn[a->size]); \
+ }
+
+DO_VMUL_F_2sc(VMUL, gvec_fmul_idx)
+DO_VMUL_F_2sc(VMLA, gvec_fmla_nf_idx)
+DO_VMUL_F_2sc(VMLS, gvec_fmls_nf_idx)
+
+WRAP_ENV_FN(gen_VQDMULH_16, gen_helper_neon_qdmulh_s16)
+WRAP_ENV_FN(gen_VQDMULH_32, gen_helper_neon_qdmulh_s32)
+WRAP_ENV_FN(gen_VQRDMULH_16, gen_helper_neon_qrdmulh_s16)
+WRAP_ENV_FN(gen_VQRDMULH_32, gen_helper_neon_qrdmulh_s32)
+
+static bool trans_VQDMULH_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ gen_VQDMULH_16,
+ gen_VQDMULH_32,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VQRDMULH_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpFn * const opfn[] = {
+ NULL,
+ gen_VQRDMULH_16,
+ gen_VQRDMULH_32,
+ NULL,
+ };
+
+ return do_2scalar(s, a, opfn[a->size], NULL);
+}
+
+static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a,
+ NeonGenThreeOpEnvFn *opfn)
+{
+ /*
+ * VQRDMLAH/VQRDMLSH: this is like do_2scalar, but the opfn
+ * performs a kind of fused op-then-accumulate using a helper
+ * function that takes all of rd, rn and the scalar at once.
+ */
+ TCGv_i32 scalar, rn, rd;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_rdm, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!opfn) {
+ /* Bad size (including size == 3, which is a different insn group) */
+ return false;
+ }
+
+ if (a->q && ((a->vd | a->vn) & 1)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ scalar = neon_get_scalar(a->size, a->vm);
+ rn = tcg_temp_new_i32();
+ rd = tcg_temp_new_i32();
+
+ for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
+ read_neon_element32(rn, a->vn, pass, MO_32);
+ read_neon_element32(rd, a->vd, pass, MO_32);
+ opfn(rd, cpu_env, rn, scalar, rd);
+ write_neon_element32(rd, a->vd, pass, MO_32);
+ }
+ tcg_temp_free_i32(rn);
+ tcg_temp_free_i32(rd);
+ tcg_temp_free_i32(scalar);
+
+ return true;
+}
+
+static bool trans_VQRDMLAH_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenThreeOpEnvFn *opfn[] = {
+ NULL,
+ gen_helper_neon_qrdmlah_s16,
+ gen_helper_neon_qrdmlah_s32,
+ NULL,
+ };
+ return do_vqrdmlah_2sc(s, a, opfn[a->size]);
+}
+
+static bool trans_VQRDMLSH_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenThreeOpEnvFn *opfn[] = {
+ NULL,
+ gen_helper_neon_qrdmlsh_s16,
+ gen_helper_neon_qrdmlsh_s32,
+ NULL,
+ };
+ return do_vqrdmlah_2sc(s, a, opfn[a->size]);
+}
+
+static bool do_2scalar_long(DisasContext *s, arg_2scalar *a,
+ NeonGenTwoOpWidenFn *opfn,
+ NeonGenTwo64OpFn *accfn)
+{
+ /*
+ * Two registers and a scalar, long operations: perform an
+ * operation on the input elements and the scalar which produces
+ * a double-width result, and then possibly perform an accumulation
+ * operation of that result into the destination.
+ */
+ TCGv_i32 scalar, rn;
+ TCGv_i64 rn0_64, rn1_64;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!opfn) {
+ /* Bad size (including size == 3, which is a different insn group) */
+ return false;
+ }
+
+ if (a->vd & 1) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ scalar = neon_get_scalar(a->size, a->vm);
+
+ /* Load all inputs before writing any outputs, in case of overlap */
+ rn = tcg_temp_new_i32();
+ read_neon_element32(rn, a->vn, 0, MO_32);
+ rn0_64 = tcg_temp_new_i64();
+ opfn(rn0_64, rn, scalar);
+
+ read_neon_element32(rn, a->vn, 1, MO_32);
+ rn1_64 = tcg_temp_new_i64();
+ opfn(rn1_64, rn, scalar);
+ tcg_temp_free_i32(rn);
+ tcg_temp_free_i32(scalar);
+
+ if (accfn) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ read_neon_element64(t64, a->vd, 0, MO_64);
+ accfn(rn0_64, t64, rn0_64);
+ read_neon_element64(t64, a->vd, 1, MO_64);
+ accfn(rn1_64, t64, rn1_64);
+ tcg_temp_free_i64(t64);
+ }
+
+ write_neon_element64(rn0_64, a->vd, 0, MO_64);
+ write_neon_element64(rn1_64, a->vd, 1, MO_64);
+ tcg_temp_free_i64(rn0_64);
+ tcg_temp_free_i64(rn1_64);
+ return true;
+}
+
+static bool trans_VMULL_S_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_helper_neon_mull_s16,
+ gen_mull_s32,
+ NULL,
+ };
+
+ return do_2scalar_long(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VMULL_U_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_helper_neon_mull_u16,
+ gen_mull_u32,
+ NULL,
+ };
+
+ return do_2scalar_long(s, a, opfn[a->size], NULL);
+}
+
+#define DO_VMLAL_2SC(INSN, MULL, ACC) \
+ static bool trans_##INSN##_2sc(DisasContext *s, arg_2scalar *a) \
+ { \
+ static NeonGenTwoOpWidenFn * const opfn[] = { \
+ NULL, \
+ gen_helper_neon_##MULL##16, \
+ gen_##MULL##32, \
+ NULL, \
+ }; \
+ static NeonGenTwo64OpFn * const accfn[] = { \
+ NULL, \
+ gen_helper_neon_##ACC##l_u32, \
+ tcg_gen_##ACC##_i64, \
+ NULL, \
+ }; \
+ return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]); \
+ }
+
+DO_VMLAL_2SC(VMLAL_S, mull_s, add)
+DO_VMLAL_2SC(VMLAL_U, mull_u, add)
+DO_VMLAL_2SC(VMLSL_S, mull_s, sub)
+DO_VMLAL_2SC(VMLSL_U, mull_u, sub)
+
+static bool trans_VQDMULL_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_VQDMULL_16,
+ gen_VQDMULL_32,
+ NULL,
+ };
+
+ return do_2scalar_long(s, a, opfn[a->size], NULL);
+}
+
+static bool trans_VQDMLAL_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_VQDMULL_16,
+ gen_VQDMULL_32,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const accfn[] = {
+ NULL,
+ gen_VQDMLAL_acc_16,
+ gen_VQDMLAL_acc_32,
+ NULL,
+ };
+
+ return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static bool trans_VQDMLSL_2sc(DisasContext *s, arg_2scalar *a)
+{
+ static NeonGenTwoOpWidenFn * const opfn[] = {
+ NULL,
+ gen_VQDMULL_16,
+ gen_VQDMULL_32,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const accfn[] = {
+ NULL,
+ gen_VQDMLSL_acc_16,
+ gen_VQDMLSL_acc_32,
+ NULL,
+ };
+
+ return do_2scalar_long(s, a, opfn[a->size], accfn[a->size]);
+}
+
+static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vn | a->vm | a->vd) & a->q) {
+ return false;
+ }
+
+ if (a->imm > 7 && !a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (!a->q) {
+ /* Extract 64 bits from <Vm:Vn> */
+ TCGv_i64 left, right, dest;
+
+ left = tcg_temp_new_i64();
+ right = tcg_temp_new_i64();
+ dest = tcg_temp_new_i64();
+
+ read_neon_element64(right, a->vn, 0, MO_64);
+ read_neon_element64(left, a->vm, 0, MO_64);
+ tcg_gen_extract2_i64(dest, right, left, a->imm * 8);
+ write_neon_element64(dest, a->vd, 0, MO_64);
+
+ tcg_temp_free_i64(left);
+ tcg_temp_free_i64(right);
+ tcg_temp_free_i64(dest);
+ } else {
+ /* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */
+ TCGv_i64 left, middle, right, destleft, destright;
+
+ left = tcg_temp_new_i64();
+ middle = tcg_temp_new_i64();
+ right = tcg_temp_new_i64();
+ destleft = tcg_temp_new_i64();
+ destright = tcg_temp_new_i64();
+
+ if (a->imm < 8) {
+ read_neon_element64(right, a->vn, 0, MO_64);
+ read_neon_element64(middle, a->vn, 1, MO_64);
+ tcg_gen_extract2_i64(destright, right, middle, a->imm * 8);
+ read_neon_element64(left, a->vm, 0, MO_64);
+ tcg_gen_extract2_i64(destleft, middle, left, a->imm * 8);
+ } else {
+ read_neon_element64(right, a->vn, 1, MO_64);
+ read_neon_element64(middle, a->vm, 0, MO_64);
+ tcg_gen_extract2_i64(destright, right, middle, (a->imm - 8) * 8);
+ read_neon_element64(left, a->vm, 1, MO_64);
+ tcg_gen_extract2_i64(destleft, middle, left, (a->imm - 8) * 8);
+ }
+
+ write_neon_element64(destright, a->vd, 0, MO_64);
+ write_neon_element64(destleft, a->vd, 1, MO_64);
+
+ tcg_temp_free_i64(destright);
+ tcg_temp_free_i64(destleft);
+ tcg_temp_free_i64(right);
+ tcg_temp_free_i64(middle);
+ tcg_temp_free_i64(left);
+ }
+ return true;
+}
+
+static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
+{
+ TCGv_i64 val, def;
+ TCGv_i32 desc;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vn + a->len + 1) > 32) {
+ /*
+ * This is UNPREDICTABLE; we choose to UNDEF to avoid the
+ * helper function running off the end of the register file.
+ */
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ desc = tcg_const_i32((a->vn << 2) | a->len);
+ def = tcg_temp_new_i64();
+ if (a->op) {
+ read_neon_element64(def, a->vd, 0, MO_64);
+ } else {
+ tcg_gen_movi_i64(def, 0);
+ }
+ val = tcg_temp_new_i64();
+ read_neon_element64(val, a->vm, 0, MO_64);
+
+ gen_helper_neon_tbl(val, cpu_env, desc, val, def);
+ write_neon_element64(val, a->vd, 0, MO_64);
+
+ tcg_temp_free_i64(def);
+ tcg_temp_free_i64(val);
+ tcg_temp_free_i32(desc);
+ return true;
+}
+
+static bool trans_VDUP_scalar(DisasContext *s, arg_VDUP_scalar *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tcg_gen_gvec_dup_mem(a->size, neon_full_reg_offset(a->vd),
+ neon_element_offset(a->vm, a->index, a->size),
+ a->q ? 16 : 8, a->q ? 16 : 8);
+ return true;
+}
+
+static bool trans_VREV64(DisasContext *s, arg_VREV64 *a)
+{
+ int pass, half;
+ TCGv_i32 tmp[2];
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (a->size == 3) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp[0] = tcg_temp_new_i32();
+ tmp[1] = tcg_temp_new_i32();
+
+ for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
+ for (half = 0; half < 2; half++) {
+ read_neon_element32(tmp[half], a->vm, pass * 2 + half, MO_32);
+ switch (a->size) {
+ case 0:
+ tcg_gen_bswap32_i32(tmp[half], tmp[half]);
+ break;
+ case 1:
+ gen_swap_half(tmp[half], tmp[half]);
+ break;
+ case 2:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ write_neon_element32(tmp[1], a->vd, pass * 2, MO_32);
+ write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32);
+ }
+
+ tcg_temp_free_i32(tmp[0]);
+ tcg_temp_free_i32(tmp[1]);
+ return true;
+}
+
+static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a,
+ NeonGenWidenFn *widenfn,
+ NeonGenTwo64OpFn *opfn,
+ NeonGenTwo64OpFn *accfn)
+{
+ /*
+ * Pairwise long operations: widen both halves of the pair,
+ * combine the pairs with the opfn, and then possibly accumulate
+ * into the destination with the accfn.
+ */
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (!widenfn) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ for (pass = 0; pass < a->q + 1; pass++) {
+ TCGv_i32 tmp;
+ TCGv_i64 rm0_64, rm1_64, rd_64;
+
+ rm0_64 = tcg_temp_new_i64();
+ rm1_64 = tcg_temp_new_i64();
+ rd_64 = tcg_temp_new_i64();
+
+ tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vm, pass * 2, MO_32);
+ widenfn(rm0_64, tmp);
+ read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32);
+ widenfn(rm1_64, tmp);
+ tcg_temp_free_i32(tmp);
+
+ opfn(rd_64, rm0_64, rm1_64);
+ tcg_temp_free_i64(rm0_64);
+ tcg_temp_free_i64(rm1_64);
+
+ if (accfn) {
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+ read_neon_element64(tmp64, a->vd, pass, MO_64);
+ accfn(rd_64, tmp64, rd_64);
+ tcg_temp_free_i64(tmp64);
+ }
+ write_neon_element64(rd_64, a->vd, pass, MO_64);
+ tcg_temp_free_i64(rd_64);
+ }
+ return true;
+}
+
+static bool trans_VPADDL_S(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenWidenFn * const widenfn[] = {
+ gen_helper_neon_widen_s8,
+ gen_helper_neon_widen_s16,
+ tcg_gen_ext_i32_i64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const opfn[] = {
+ gen_helper_neon_paddl_u16,
+ gen_helper_neon_paddl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+
+ return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
+}
+
+static bool trans_VPADDL_U(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenWidenFn * const widenfn[] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const opfn[] = {
+ gen_helper_neon_paddl_u16,
+ gen_helper_neon_paddl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+
+ return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size], NULL);
+}
+
+static bool trans_VPADAL_S(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenWidenFn * const widenfn[] = {
+ gen_helper_neon_widen_s8,
+ gen_helper_neon_widen_s16,
+ tcg_gen_ext_i32_i64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const opfn[] = {
+ gen_helper_neon_paddl_u16,
+ gen_helper_neon_paddl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const accfn[] = {
+ gen_helper_neon_addl_u16,
+ gen_helper_neon_addl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+
+ return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
+ accfn[a->size]);
+}
+
+static bool trans_VPADAL_U(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenWidenFn * const widenfn[] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const opfn[] = {
+ gen_helper_neon_paddl_u16,
+ gen_helper_neon_paddl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+ static NeonGenTwo64OpFn * const accfn[] = {
+ gen_helper_neon_addl_u16,
+ gen_helper_neon_addl_u32,
+ tcg_gen_add_i64,
+ NULL,
+ };
+
+ return do_2misc_pairwise(s, a, widenfn[a->size], opfn[a->size],
+ accfn[a->size]);
+}
+
+typedef void ZipFn(TCGv_ptr, TCGv_ptr);
+
+static bool do_zip_uzp(DisasContext *s, arg_2misc *a,
+ ZipFn *fn)
+{
+ TCGv_ptr pd, pm;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (!fn) {
+ /* Bad size or size/q combination */
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ pd = vfp_reg_ptr(true, a->vd);
+ pm = vfp_reg_ptr(true, a->vm);
+ fn(pd, pm);
+ tcg_temp_free_ptr(pd);
+ tcg_temp_free_ptr(pm);
+ return true;
+}
+
+static bool trans_VUZP(DisasContext *s, arg_2misc *a)
+{
+ static ZipFn * const fn[2][4] = {
+ {
+ gen_helper_neon_unzip8,
+ gen_helper_neon_unzip16,
+ NULL,
+ NULL,
+ }, {
+ gen_helper_neon_qunzip8,
+ gen_helper_neon_qunzip16,
+ gen_helper_neon_qunzip32,
+ NULL,
+ }
+ };
+ return do_zip_uzp(s, a, fn[a->q][a->size]);
+}
+
+static bool trans_VZIP(DisasContext *s, arg_2misc *a)
+{
+ static ZipFn * const fn[2][4] = {
+ {
+ gen_helper_neon_zip8,
+ gen_helper_neon_zip16,
+ NULL,
+ NULL,
+ }, {
+ gen_helper_neon_qzip8,
+ gen_helper_neon_qzip16,
+ gen_helper_neon_qzip32,
+ NULL,
+ }
+ };
+ return do_zip_uzp(s, a, fn[a->q][a->size]);
+}
+
+static bool do_vmovn(DisasContext *s, arg_2misc *a,
+ NeonGenNarrowEnvFn *narrowfn)
+{
+ TCGv_i64 rm;
+ TCGv_i32 rd0, rd1;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vm & 1) {
+ return false;
+ }
+
+ if (!narrowfn) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ rm = tcg_temp_new_i64();
+ rd0 = tcg_temp_new_i32();
+ rd1 = tcg_temp_new_i32();
+
+ read_neon_element64(rm, a->vm, 0, MO_64);
+ narrowfn(rd0, cpu_env, rm);
+ read_neon_element64(rm, a->vm, 1, MO_64);
+ narrowfn(rd1, cpu_env, rm);
+ write_neon_element32(rd0, a->vd, 0, MO_32);
+ write_neon_element32(rd1, a->vd, 1, MO_32);
+ tcg_temp_free_i32(rd0);
+ tcg_temp_free_i32(rd1);
+ tcg_temp_free_i64(rm);
+ return true;
+}
+
+#define DO_VMOVN(INSN, FUNC) \
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
+ { \
+ static NeonGenNarrowEnvFn * const narrowfn[] = { \
+ FUNC##8, \
+ FUNC##16, \
+ FUNC##32, \
+ NULL, \
+ }; \
+ return do_vmovn(s, a, narrowfn[a->size]); \
+ }
+
+DO_VMOVN(VMOVN, gen_neon_narrow_u)
+DO_VMOVN(VQMOVUN, gen_helper_neon_unarrow_sat)
+DO_VMOVN(VQMOVN_S, gen_helper_neon_narrow_sat_s)
+DO_VMOVN(VQMOVN_U, gen_helper_neon_narrow_sat_u)
+
+static bool trans_VSHLL(DisasContext *s, arg_2misc *a)
+{
+ TCGv_i32 rm0, rm1;
+ TCGv_i64 rd;
+ static NeonGenWidenFn * const widenfns[] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ NULL,
+ };
+ NeonGenWidenFn *widenfn = widenfns[a->size];
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & 1) {
+ return false;
+ }
+
+ if (!widenfn) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ rd = tcg_temp_new_i64();
+ rm0 = tcg_temp_new_i32();
+ rm1 = tcg_temp_new_i32();
+
+ read_neon_element32(rm0, a->vm, 0, MO_32);
+ read_neon_element32(rm1, a->vm, 1, MO_32);
+
+ widenfn(rd, rm0);
+ tcg_gen_shli_i64(rd, rd, 8 << a->size);
+ write_neon_element64(rd, a->vd, 0, MO_64);
+ widenfn(rd, rm1);
+ tcg_gen_shli_i64(rd, rd, 8 << a->size);
+ write_neon_element64(rd, a->vd, 1, MO_64);
+
+ tcg_temp_free_i64(rd);
+ tcg_temp_free_i32(rm0);
+ tcg_temp_free_i32(rm1);
+ return true;
+}
+
+static bool trans_VCVT_B16_F32(DisasContext *s, arg_2misc *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+ TCGv_i32 dst0, dst1;
+
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vm & 1) || (a->size != 1)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_STD);
+ tmp = tcg_temp_new_i64();
+ dst0 = tcg_temp_new_i32();
+ dst1 = tcg_temp_new_i32();
+
+ read_neon_element64(tmp, a->vm, 0, MO_64);
+ gen_helper_bfcvt_pair(dst0, tmp, fpst);
+
+ read_neon_element64(tmp, a->vm, 1, MO_64);
+ gen_helper_bfcvt_pair(dst1, tmp, fpst);
+
+ write_neon_element32(dst0, a->vd, 0, MO_32);
+ write_neon_element32(dst1, a->vd, 1, MO_32);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(dst0);
+ tcg_temp_free_i32(dst1);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 ahp, tmp, tmp2, tmp3;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
+ !dc_isar_feature(aa32_fp16_spconv, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vm & 1) || (a->size != 1)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_STD);
+ ahp = get_ahp_flag();
+ tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vm, 0, MO_32);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ tmp2 = tcg_temp_new_i32();
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp2, tmp2, fpst, ahp);
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_or_i32(tmp2, tmp2, tmp);
+ read_neon_element32(tmp, a->vm, 2, MO_32);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp);
+ tmp3 = tcg_temp_new_i32();
+ read_neon_element32(tmp3, a->vm, 3, MO_32);
+ write_neon_element32(tmp2, a->vd, 0, MO_32);
+ tcg_temp_free_i32(tmp2);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp);
+ tcg_gen_shli_i32(tmp3, tmp3, 16);
+ tcg_gen_or_i32(tmp3, tmp3, tmp);
+ write_neon_element32(tmp3, a->vd, 1, MO_32);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(ahp);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 ahp, tmp, tmp2, tmp3;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON) ||
+ !dc_isar_feature(aa32_fp16_spconv, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vd & 1) || (a->size != 1)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_STD);
+ ahp = get_ahp_flag();
+ tmp3 = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vm, 0, MO_32);
+ read_neon_element32(tmp2, a->vm, 1, MO_32);
+ tcg_gen_ext16u_i32(tmp3, tmp);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
+ write_neon_element32(tmp3, a->vd, 0, MO_32);
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp);
+ write_neon_element32(tmp, a->vd, 1, MO_32);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_ext16u_i32(tmp3, tmp2);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp);
+ write_neon_element32(tmp3, a->vd, 2, MO_32);
+ tcg_temp_free_i32(tmp3);
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp);
+ write_neon_element32(tmp2, a->vd, 3, MO_32);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(ahp);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static bool do_2misc_vec(DisasContext *s, arg_2misc *a, GVecGen2Fn *fn)
+{
+ int vec_size = a->q ? 16 : 8;
+ int rd_ofs = neon_full_reg_offset(a->vd);
+ int rm_ofs = neon_full_reg_offset(a->vm);
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->size == 3) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fn(a->size, rd_ofs, rm_ofs, vec_size, vec_size);
+
+ return true;
+}
+
+#define DO_2MISC_VEC(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
+ { \
+ return do_2misc_vec(s, a, FN); \
+ }
+
+DO_2MISC_VEC(VNEG, tcg_gen_gvec_neg)
+DO_2MISC_VEC(VABS, tcg_gen_gvec_abs)
+DO_2MISC_VEC(VCEQ0, gen_gvec_ceq0)
+DO_2MISC_VEC(VCGT0, gen_gvec_cgt0)
+DO_2MISC_VEC(VCLE0, gen_gvec_cle0)
+DO_2MISC_VEC(VCGE0, gen_gvec_cge0)
+DO_2MISC_VEC(VCLT0, gen_gvec_clt0)
+
+static bool trans_VMVN(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 0) {
+ return false;
+ }
+ return do_2misc_vec(s, a, tcg_gen_gvec_not);
+}
+
+#define WRAP_2M_3_OOL_FN(WRAPNAME, FUNC, DATA) \
+ static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rm_ofs, uint32_t oprsz, \
+ uint32_t maxsz) \
+ { \
+ tcg_gen_gvec_3_ool(rd_ofs, rd_ofs, rm_ofs, oprsz, maxsz, \
+ DATA, FUNC); \
+ }
+
+#define WRAP_2M_2_OOL_FN(WRAPNAME, FUNC, DATA) \
+ static void WRAPNAME(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rm_ofs, uint32_t oprsz, \
+ uint32_t maxsz) \
+ { \
+ tcg_gen_gvec_2_ool(rd_ofs, rm_ofs, oprsz, maxsz, DATA, FUNC); \
+ }
+
+WRAP_2M_3_OOL_FN(gen_AESE, gen_helper_crypto_aese, 0)
+WRAP_2M_3_OOL_FN(gen_AESD, gen_helper_crypto_aese, 1)
+WRAP_2M_2_OOL_FN(gen_AESMC, gen_helper_crypto_aesmc, 0)
+WRAP_2M_2_OOL_FN(gen_AESIMC, gen_helper_crypto_aesmc, 1)
+WRAP_2M_2_OOL_FN(gen_SHA1H, gen_helper_crypto_sha1h, 0)
+WRAP_2M_2_OOL_FN(gen_SHA1SU1, gen_helper_crypto_sha1su1, 0)
+WRAP_2M_2_OOL_FN(gen_SHA256SU0, gen_helper_crypto_sha256su0, 0)
+
+#define DO_2M_CRYPTO(INSN, FEATURE, SIZE) \
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
+ { \
+ if (!dc_isar_feature(FEATURE, s) || a->size != SIZE) { \
+ return false; \
+ } \
+ return do_2misc_vec(s, a, gen_##INSN); \
+ }
+
+DO_2M_CRYPTO(AESE, aa32_aes, 0)
+DO_2M_CRYPTO(AESD, aa32_aes, 0)
+DO_2M_CRYPTO(AESMC, aa32_aes, 0)
+DO_2M_CRYPTO(AESIMC, aa32_aes, 0)
+DO_2M_CRYPTO(SHA1H, aa32_sha1, 2)
+DO_2M_CRYPTO(SHA1SU1, aa32_sha1, 2)
+DO_2M_CRYPTO(SHA256SU0, aa32_sha2, 2)
+
+static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn)
+{
+ TCGv_i32 tmp;
+ int pass;
+
+ /* Handle a 2-reg-misc operation by iterating 32 bits at a time */
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!fn) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
+ read_neon_element32(tmp, a->vm, pass, MO_32);
+ fn(tmp, tmp);
+ write_neon_element32(tmp, a->vd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tmp);
+
+ return true;
+}
+
+static bool trans_VREV32(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenOneOpFn * const fn[] = {
+ tcg_gen_bswap32_i32,
+ gen_swap_half,
+ NULL,
+ NULL,
+ };
+ return do_2misc(s, a, fn[a->size]);
+}
+
+static bool trans_VREV16(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 0) {
+ return false;
+ }
+ return do_2misc(s, a, gen_rev16);
+}
+
+static bool trans_VCLS(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenOneOpFn * const fn[] = {
+ gen_helper_neon_cls_s8,
+ gen_helper_neon_cls_s16,
+ gen_helper_neon_cls_s32,
+ NULL,
+ };
+ return do_2misc(s, a, fn[a->size]);
+}
+
+static void do_VCLZ_32(TCGv_i32 rd, TCGv_i32 rm)
+{
+ tcg_gen_clzi_i32(rd, rm, 32);
+}
+
+static bool trans_VCLZ(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenOneOpFn * const fn[] = {
+ gen_helper_neon_clz_u8,
+ gen_helper_neon_clz_u16,
+ do_VCLZ_32,
+ NULL,
+ };
+ return do_2misc(s, a, fn[a->size]);
+}
+
+static bool trans_VCNT(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 0) {
+ return false;
+ }
+ return do_2misc(s, a, gen_helper_neon_cnt_u8);
+}
+
+static void gen_VABS_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_andi(vece, rd_ofs, rm_ofs,
+ vece == MO_16 ? 0x7fff : 0x7fffffff,
+ oprsz, maxsz);
+}
+
+static bool trans_VABS_F(DisasContext *s, arg_2misc *a)
+{
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ } else if (a->size != MO_32) {
+ return false;
+ }
+ return do_2misc_vec(s, a, gen_VABS_F);
+}
+
+static void gen_VNEG_F(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_xori(vece, rd_ofs, rm_ofs,
+ vece == MO_16 ? 0x8000 : 0x80000000,
+ oprsz, maxsz);
+}
+
+static bool trans_VNEG_F(DisasContext *s, arg_2misc *a)
+{
+ if (a->size == MO_16) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+ } else if (a->size != MO_32) {
+ return false;
+ }
+ return do_2misc_vec(s, a, gen_VNEG_F);
+}
+
+static bool trans_VRECPE(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 2) {
+ return false;
+ }
+ return do_2misc(s, a, gen_helper_recpe_u32);
+}
+
+static bool trans_VRSQRTE(DisasContext *s, arg_2misc *a)
+{
+ if (a->size != 2) {
+ return false;
+ }
+ return do_2misc(s, a, gen_helper_rsqrte_u32);
+}
+
+#define WRAP_1OP_ENV_FN(WRAPNAME, FUNC) \
+ static void WRAPNAME(TCGv_i32 d, TCGv_i32 m) \
+ { \
+ FUNC(d, cpu_env, m); \
+ }
+
+WRAP_1OP_ENV_FN(gen_VQABS_s8, gen_helper_neon_qabs_s8)
+WRAP_1OP_ENV_FN(gen_VQABS_s16, gen_helper_neon_qabs_s16)
+WRAP_1OP_ENV_FN(gen_VQABS_s32, gen_helper_neon_qabs_s32)
+WRAP_1OP_ENV_FN(gen_VQNEG_s8, gen_helper_neon_qneg_s8)
+WRAP_1OP_ENV_FN(gen_VQNEG_s16, gen_helper_neon_qneg_s16)
+WRAP_1OP_ENV_FN(gen_VQNEG_s32, gen_helper_neon_qneg_s32)
+
+static bool trans_VQABS(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenOneOpFn * const fn[] = {
+ gen_VQABS_s8,
+ gen_VQABS_s16,
+ gen_VQABS_s32,
+ NULL,
+ };
+ return do_2misc(s, a, fn[a->size]);
+}
+
+static bool trans_VQNEG(DisasContext *s, arg_2misc *a)
+{
+ static NeonGenOneOpFn * const fn[] = {
+ gen_VQNEG_s8,
+ gen_VQNEG_s16,
+ gen_VQNEG_s32,
+ NULL,
+ };
+ return do_2misc(s, a, fn[a->size]);
+}
+
+#define DO_2MISC_FP_VEC(INSN, HFUNC, SFUNC) \
+ static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ static gen_helper_gvec_2_ptr * const fns[4] = { \
+ NULL, HFUNC, SFUNC, NULL, \
+ }; \
+ TCGv_ptr fpst; \
+ fpst = fpstatus_ptr(vece == MO_16 ? FPST_STD_F16 : FPST_STD); \
+ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, 0, \
+ fns[vece]); \
+ tcg_temp_free_ptr(fpst); \
+ } \
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
+ { \
+ if (a->size == MO_16) { \
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
+ return false; \
+ } \
+ } else if (a->size != MO_32) { \
+ return false; \
+ } \
+ return do_2misc_vec(s, a, gen_##INSN); \
+ }
+
+DO_2MISC_FP_VEC(VRECPE_F, gen_helper_gvec_frecpe_h, gen_helper_gvec_frecpe_s)
+DO_2MISC_FP_VEC(VRSQRTE_F, gen_helper_gvec_frsqrte_h, gen_helper_gvec_frsqrte_s)
+DO_2MISC_FP_VEC(VCGT0_F, gen_helper_gvec_fcgt0_h, gen_helper_gvec_fcgt0_s)
+DO_2MISC_FP_VEC(VCGE0_F, gen_helper_gvec_fcge0_h, gen_helper_gvec_fcge0_s)
+DO_2MISC_FP_VEC(VCEQ0_F, gen_helper_gvec_fceq0_h, gen_helper_gvec_fceq0_s)
+DO_2MISC_FP_VEC(VCLT0_F, gen_helper_gvec_fclt0_h, gen_helper_gvec_fclt0_s)
+DO_2MISC_FP_VEC(VCLE0_F, gen_helper_gvec_fcle0_h, gen_helper_gvec_fcle0_s)
+DO_2MISC_FP_VEC(VCVT_FS, gen_helper_gvec_sstoh, gen_helper_gvec_sitos)
+DO_2MISC_FP_VEC(VCVT_FU, gen_helper_gvec_ustoh, gen_helper_gvec_uitos)
+DO_2MISC_FP_VEC(VCVT_SF, gen_helper_gvec_tosszh, gen_helper_gvec_tosizs)
+DO_2MISC_FP_VEC(VCVT_UF, gen_helper_gvec_touszh, gen_helper_gvec_touizs)
+
+DO_2MISC_FP_VEC(VRINTX_impl, gen_helper_gvec_vrintx_h, gen_helper_gvec_vrintx_s)
+
+static bool trans_VRINTX(DisasContext *s, arg_2misc *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+ return trans_VRINTX_impl(s, a);
+}
+
+#define DO_VEC_RMODE(INSN, RMODE, OP) \
+ static void gen_##INSN(unsigned vece, uint32_t rd_ofs, \
+ uint32_t rm_ofs, \
+ uint32_t oprsz, uint32_t maxsz) \
+ { \
+ static gen_helper_gvec_2_ptr * const fns[4] = { \
+ NULL, \
+ gen_helper_gvec_##OP##h, \
+ gen_helper_gvec_##OP##s, \
+ NULL, \
+ }; \
+ TCGv_ptr fpst; \
+ fpst = fpstatus_ptr(vece == 1 ? FPST_STD_F16 : FPST_STD); \
+ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, \
+ arm_rmode_to_sf(RMODE), fns[vece]); \
+ tcg_temp_free_ptr(fpst); \
+ } \
+ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \
+ { \
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) { \
+ return false; \
+ } \
+ if (a->size == MO_16) { \
+ if (!dc_isar_feature(aa32_fp16_arith, s)) { \
+ return false; \
+ } \
+ } else if (a->size != MO_32) { \
+ return false; \
+ } \
+ return do_2misc_vec(s, a, gen_##INSN); \
+ }
+
+DO_VEC_RMODE(VCVTAU, FPROUNDING_TIEAWAY, vcvt_rm_u)
+DO_VEC_RMODE(VCVTAS, FPROUNDING_TIEAWAY, vcvt_rm_s)
+DO_VEC_RMODE(VCVTNU, FPROUNDING_TIEEVEN, vcvt_rm_u)
+DO_VEC_RMODE(VCVTNS, FPROUNDING_TIEEVEN, vcvt_rm_s)
+DO_VEC_RMODE(VCVTPU, FPROUNDING_POSINF, vcvt_rm_u)
+DO_VEC_RMODE(VCVTPS, FPROUNDING_POSINF, vcvt_rm_s)
+DO_VEC_RMODE(VCVTMU, FPROUNDING_NEGINF, vcvt_rm_u)
+DO_VEC_RMODE(VCVTMS, FPROUNDING_NEGINF, vcvt_rm_s)
+
+DO_VEC_RMODE(VRINTN, FPROUNDING_TIEEVEN, vrint_rm_)
+DO_VEC_RMODE(VRINTA, FPROUNDING_TIEAWAY, vrint_rm_)
+DO_VEC_RMODE(VRINTZ, FPROUNDING_ZERO, vrint_rm_)
+DO_VEC_RMODE(VRINTM, FPROUNDING_NEGINF, vrint_rm_)
+DO_VEC_RMODE(VRINTP, FPROUNDING_POSINF, vrint_rm_)
+
+static bool trans_VSWP(DisasContext *s, arg_2misc *a)
+{
+ TCGv_i64 rm, rd;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (a->size != 0) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ rm = tcg_temp_new_i64();
+ rd = tcg_temp_new_i64();
+ for (pass = 0; pass < (a->q ? 2 : 1); pass++) {
+ read_neon_element64(rm, a->vm, pass, MO_64);
+ read_neon_element64(rd, a->vd, pass, MO_64);
+ write_neon_element64(rm, a->vd, pass, MO_64);
+ write_neon_element64(rd, a->vm, pass, MO_64);
+ }
+ tcg_temp_free_i64(rm);
+ tcg_temp_free_i64(rd);
+
+ return true;
+}
+static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 rd, tmp;
+
+ rd = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
+
+ tcg_gen_shli_i32(rd, t0, 8);
+ tcg_gen_andi_i32(rd, rd, 0xff00ff00);
+ tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
+ tcg_gen_or_i32(rd, rd, tmp);
+
+ tcg_gen_shri_i32(t1, t1, 8);
+ tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
+ tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
+ tcg_gen_or_i32(t1, t1, tmp);
+ tcg_gen_mov_i32(t0, rd);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(rd);
+}
+
+static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 rd, tmp;
+
+ rd = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
+
+ tcg_gen_shli_i32(rd, t0, 16);
+ tcg_gen_andi_i32(tmp, t1, 0xffff);
+ tcg_gen_or_i32(rd, rd, tmp);
+ tcg_gen_shri_i32(t1, t1, 16);
+ tcg_gen_andi_i32(tmp, t0, 0xffff0000);
+ tcg_gen_or_i32(t1, t1, tmp);
+ tcg_gen_mov_i32(t0, rd);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(rd);
+}
+
+static bool trans_VTRN(DisasContext *s, arg_2misc *a)
+{
+ TCGv_i32 tmp, tmp2;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if ((a->vd | a->vm) & a->q) {
+ return false;
+ }
+
+ if (a->size == 3) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ if (a->size == MO_32) {
+ for (pass = 0; pass < (a->q ? 4 : 2); pass += 2) {
+ read_neon_element32(tmp, a->vm, pass, MO_32);
+ read_neon_element32(tmp2, a->vd, pass + 1, MO_32);
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
+ write_neon_element32(tmp, a->vd, pass + 1, MO_32);
+ }
+ } else {
+ for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
+ read_neon_element32(tmp, a->vm, pass, MO_32);
+ read_neon_element32(tmp2, a->vd, pass, MO_32);
+ if (a->size == MO_8) {
+ gen_neon_trn_u8(tmp, tmp2);
+ } else {
+ gen_neon_trn_u16(tmp, tmp2);
+ }
+ write_neon_element32(tmp2, a->vm, pass, MO_32);
+ write_neon_element32(tmp, a->vd, pass, MO_32);
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ return true;
+}
+
+static bool trans_VSMMLA(DisasContext *s, arg_VSMMLA *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_smmla_b);
+}
+
+static bool trans_VUMMLA(DisasContext *s, arg_VUMMLA *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_ummla_b);
+}
+
+static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a)
+{
+ if (!dc_isar_feature(aa32_i8mm, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_usmmla_b);
+}
+
+static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a)
+{
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+ return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0,
+ gen_helper_gvec_bfmmla);
+}
+
+static bool trans_VFMA_b16(DisasContext *s, arg_VFMA_b16 *a)
+{
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+ return do_neon_ddda_fpst(s, 7, a->vd, a->vn, a->vm, a->q, FPST_STD,
+ gen_helper_gvec_bfmlal);
+}
+
+static bool trans_VFMA_b16_scal(DisasContext *s, arg_VFMA_b16_scal *a)
+{
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+ return do_neon_ddda_fpst(s, 6, a->vd, a->vn, a->vm,
+ (a->index << 1) | a->q, FPST_STD,
+ gen_helper_gvec_bfmlal_idx);
+}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
new file mode 100644
index 000000000..76b5fe9f3
--- /dev/null
+++ b/target/arm/translate-sve.c
@@ -0,0 +1,8744 @@
+/*
+ * AArch64 SVE translation
+ *
+ * Copyright (c) 2018 Linaro, Ltd
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "qemu/log.h"
+#include "arm_ldst.h"
+#include "translate.h"
+#include "internals.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+#include "translate-a64.h"
+#include "fpu/softfloat.h"
+
+
+typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
+ TCGv_i64, uint32_t, uint32_t);
+
+typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
+
+typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
+typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i64, TCGv_i32);
+
+/*
+ * Helpers for extracting complex instruction fields.
+ */
+
+/* See e.g. ASR (immediate, predicated).
+ * Returns -1 for unallocated encoding; diagnose later.
+ */
+static int tszimm_esz(DisasContext *s, int x)
+{
+ x >>= 3; /* discard imm3 */
+ return 31 - clz32(x);
+}
+
+static int tszimm_shr(DisasContext *s, int x)
+{
+ return (16 << tszimm_esz(s, x)) - x;
+}
+
+/* See e.g. LSL (immediate, predicated). */
+static int tszimm_shl(DisasContext *s, int x)
+{
+ return x - (8 << tszimm_esz(s, x));
+}
+
+/* The SH bit is in bit 8. Extract the low 8 and shift. */
+static inline int expand_imm_sh8s(DisasContext *s, int x)
+{
+ return (int8_t)x << (x & 0x100 ? 8 : 0);
+}
+
+static inline int expand_imm_sh8u(DisasContext *s, int x)
+{
+ return (uint8_t)x << (x & 0x100 ? 8 : 0);
+}
+
+/* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
+ * with unsigned data. C.f. SVE Memory Contiguous Load Group.
+ */
+static inline int msz_dtype(DisasContext *s, int msz)
+{
+ static const uint8_t dtype[4] = { 0, 5, 10, 15 };
+ return dtype[msz];
+}
+
+/*
+ * Include the generated decoder.
+ */
+
+#include "decode-sve.c.inc"
+
+/*
+ * Implement all of the translator functions referenced by the decoder.
+ */
+
+/* Return the offset info CPUARMState of the predicate vector register Pn.
+ * Note for this purpose, FFR is P16.
+ */
+static inline int pred_full_reg_offset(DisasContext *s, int regno)
+{
+ return offsetof(CPUARMState, vfp.pregs[regno]);
+}
+
+/* Return the byte size of the whole predicate register, VL / 64. */
+static inline int pred_full_reg_size(DisasContext *s)
+{
+ return s->sve_len >> 3;
+}
+
+/* Round up the size of a register to a size allowed by
+ * the tcg vector infrastructure. Any operation which uses this
+ * size may assume that the bits above pred_full_reg_size are zero,
+ * and must leave them the same way.
+ *
+ * Note that this is not needed for the vector registers as they
+ * are always properly sized for tcg vectors.
+ */
+static int size_for_gvec(int size)
+{
+ if (size <= 8) {
+ return 8;
+ } else {
+ return QEMU_ALIGN_UP(size, 16);
+ }
+}
+
+static int pred_gvec_reg_size(DisasContext *s)
+{
+ return size_for_gvec(pred_full_reg_size(s));
+}
+
+/* Invoke an out-of-line helper on 2 Zregs. */
+static void gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
+ int rd, int rn, int data)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vsz, vsz, data, fn);
+}
+
+/* Invoke an out-of-line helper on 3 Zregs. */
+static void gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
+ int rd, int rn, int rm, int data)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vsz, vsz, data, fn);
+}
+
+/* Invoke an out-of-line helper on 4 Zregs. */
+static void gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
+ int rd, int rn, int rm, int ra, int data)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ vsz, vsz, data, fn);
+}
+
+/* Invoke an out-of-line helper on 2 Zregs and a predicate. */
+static void gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
+ int rd, int rn, int pg, int data)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ pred_full_reg_offset(s, pg),
+ vsz, vsz, data, fn);
+}
+
+/* Invoke an out-of-line helper on 3 Zregs and a predicate. */
+static void gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
+ int rd, int rn, int rm, int pg, int data)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ pred_full_reg_offset(s, pg),
+ vsz, vsz, data, fn);
+}
+
+/* Invoke a vector expander on two Zregs. */
+static void gen_gvec_fn_zz(DisasContext *s, GVecGen2Fn *gvec_fn,
+ int esz, int rd, int rn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ gvec_fn(esz, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn), vsz, vsz);
+}
+
+/* Invoke a vector expander on three Zregs. */
+static void gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
+ int esz, int rd, int rn, int rm)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ gvec_fn(esz, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm), vsz, vsz);
+}
+
+/* Invoke a vector expander on four Zregs. */
+static void gen_gvec_fn_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
+ int esz, int rd, int rn, int rm, int ra)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ gvec_fn(esz, vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra), vsz, vsz);
+}
+
+/* Invoke a vector move on two Zregs. */
+static bool do_mov_z(DisasContext *s, int rd, int rn)
+{
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zz(s, tcg_gen_gvec_mov, MO_8, rd, rn);
+ }
+ return true;
+}
+
+/* Initialize a Zreg with replications of a 64-bit immediate. */
+static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
+}
+
+/* Invoke a vector expander on three Pregs. */
+static void gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
+ int rd, int rn, int rm)
+{
+ unsigned psz = pred_gvec_reg_size(s);
+ gvec_fn(MO_64, pred_full_reg_offset(s, rd),
+ pred_full_reg_offset(s, rn),
+ pred_full_reg_offset(s, rm), psz, psz);
+}
+
+/* Invoke a vector move on two Pregs. */
+static bool do_mov_p(DisasContext *s, int rd, int rn)
+{
+ if (sve_access_check(s)) {
+ unsigned psz = pred_gvec_reg_size(s);
+ tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
+ pred_full_reg_offset(s, rn), psz, psz);
+ }
+ return true;
+}
+
+/* Set the cpu flags as per a return from an SVE helper. */
+static void do_pred_flags(TCGv_i32 t)
+{
+ tcg_gen_mov_i32(cpu_NF, t);
+ tcg_gen_andi_i32(cpu_ZF, t, 2);
+ tcg_gen_andi_i32(cpu_CF, t, 1);
+ tcg_gen_movi_i32(cpu_VF, 0);
+}
+
+/* Subroutines computing the ARM PredTest psuedofunction. */
+static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ gen_helper_sve_predtest1(t, d, g);
+ do_pred_flags(t);
+ tcg_temp_free_i32(t);
+}
+
+static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
+{
+ TCGv_ptr dptr = tcg_temp_new_ptr();
+ TCGv_ptr gptr = tcg_temp_new_ptr();
+ TCGv_i32 t;
+
+ tcg_gen_addi_ptr(dptr, cpu_env, dofs);
+ tcg_gen_addi_ptr(gptr, cpu_env, gofs);
+ t = tcg_const_i32(words);
+
+ gen_helper_sve_predtest(t, dptr, gptr, t);
+ tcg_temp_free_ptr(dptr);
+ tcg_temp_free_ptr(gptr);
+
+ do_pred_flags(t);
+ tcg_temp_free_i32(t);
+}
+
+/* For each element size, the bits within a predicate word that are active. */
+const uint64_t pred_esz_masks[4] = {
+ 0xffffffffffffffffull, 0x5555555555555555ull,
+ 0x1111111111111111ull, 0x0101010101010101ull
+};
+
+/*
+ *** SVE Logical - Unpredicated Group
+ */
+
+static bool do_zzz_fn(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *gvec_fn)
+{
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, gvec_fn, a->esz, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool trans_AND_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_and);
+}
+
+static bool trans_ORR_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_or);
+}
+
+static bool trans_EOR_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_xor);
+}
+
+static bool trans_BIC_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_andc);
+}
+
+static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ uint64_t mask = dup_const(MO_8, 0xff >> sh);
+
+ tcg_gen_xor_i64(t, n, m);
+ tcg_gen_shri_i64(d, t, sh);
+ tcg_gen_shli_i64(t, t, 8 - sh);
+ tcg_gen_andi_i64(d, d, mask);
+ tcg_gen_andi_i64(t, t, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ uint64_t mask = dup_const(MO_16, 0xffff >> sh);
+
+ tcg_gen_xor_i64(t, n, m);
+ tcg_gen_shri_i64(d, t, sh);
+ tcg_gen_shli_i64(t, t, 16 - sh);
+ tcg_gen_andi_i64(d, d, mask);
+ tcg_gen_andi_i64(t, t, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
+{
+ tcg_gen_xor_i32(d, n, m);
+ tcg_gen_rotri_i32(d, d, sh);
+}
+
+static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
+{
+ tcg_gen_xor_i64(d, n, m);
+ tcg_gen_rotri_i64(d, d, sh);
+}
+
+static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, int64_t sh)
+{
+ tcg_gen_xor_vec(vece, d, n, m);
+ tcg_gen_rotri_vec(vece, d, d, sh);
+}
+
+void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, int64_t shift,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
+ static const GVecGen3i ops[4] = {
+ { .fni8 = gen_xar8_i64,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_sve2_xar_b,
+ .opt_opc = vecop,
+ .vece = MO_8 },
+ { .fni8 = gen_xar16_i64,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_sve2_xar_h,
+ .opt_opc = vecop,
+ .vece = MO_16 },
+ { .fni4 = gen_xar_i32,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_sve2_xar_s,
+ .opt_opc = vecop,
+ .vece = MO_32 },
+ { .fni8 = gen_xar_i64,
+ .fniv = gen_xar_vec,
+ .fno = gen_helper_gvec_xar_d,
+ .opt_opc = vecop,
+ .vece = MO_64 }
+ };
+ int esize = 8 << vece;
+
+ /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
+ tcg_debug_assert(shift >= 0);
+ tcg_debug_assert(shift <= esize);
+ shift &= esize - 1;
+
+ if (shift == 0) {
+ /* xar with no rotate devolves to xor. */
+ tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
+ } else {
+ tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
+ shift, &ops[vece]);
+ }
+}
+
+static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
+{
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool do_sve2_zzzz_fn(DisasContext *s, arg_rrrr_esz *a, GVecGen4Fn *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzzz(s, fn, a->esz, a->rd, a->rn, a->rm, a->ra);
+ }
+ return true;
+}
+
+static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_xor_i64(d, n, m);
+ tcg_gen_xor_i64(d, d, k);
+}
+
+static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_xor_vec(vece, d, n, m);
+ tcg_gen_xor_vec(vece, d, d, k);
+}
+
+static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_eor3_i64,
+ .fniv = gen_eor3_vec,
+ .fno = gen_helper_sve2_eor3,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_EOR3(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_eor3);
+}
+
+static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_andc_i64(d, m, k);
+ tcg_gen_xor_i64(d, d, n);
+}
+
+static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_andc_vec(vece, d, m, k);
+ tcg_gen_xor_vec(vece, d, d, n);
+}
+
+static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bcax_i64,
+ .fniv = gen_bcax_vec,
+ .fno = gen_helper_sve2_bcax,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BCAX(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bcax);
+}
+
+static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ /* BSL differs from the generic bitsel in argument ordering. */
+ tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
+}
+
+static bool trans_BSL(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl);
+}
+
+static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_andc_i64(n, k, n);
+ tcg_gen_andc_i64(m, m, k);
+ tcg_gen_or_i64(d, n, m);
+}
+
+static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_gen_not_vec(vece, n, n);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ } else {
+ tcg_gen_andc_vec(vece, n, k, n);
+ tcg_gen_andc_vec(vece, m, m, k);
+ tcg_gen_or_vec(vece, d, n, m);
+ }
+}
+
+static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bsl1n_i64,
+ .fniv = gen_bsl1n_vec,
+ .fno = gen_helper_sve2_bsl1n,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BSL1N(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl1n);
+}
+
+static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ /*
+ * Z[dn] = (n & k) | (~m & ~k)
+ * = | ~(m | k)
+ */
+ tcg_gen_and_i64(n, n, k);
+ if (TCG_TARGET_HAS_orc_i64) {
+ tcg_gen_or_i64(m, m, k);
+ tcg_gen_orc_i64(d, n, m);
+ } else {
+ tcg_gen_nor_i64(m, m, k);
+ tcg_gen_or_i64(d, n, m);
+ }
+}
+
+static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ if (TCG_TARGET_HAS_bitsel_vec) {
+ tcg_gen_not_vec(vece, m, m);
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ } else {
+ tcg_gen_and_vec(vece, n, n, k);
+ tcg_gen_or_vec(vece, m, m, k);
+ tcg_gen_orc_vec(vece, d, n, m);
+ }
+}
+
+static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bsl2n_i64,
+ .fniv = gen_bsl2n_vec,
+ .fno = gen_helper_sve2_bsl2n,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_BSL2N(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_bsl2n);
+}
+
+static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
+{
+ tcg_gen_and_i64(n, n, k);
+ tcg_gen_andc_i64(m, m, k);
+ tcg_gen_nor_i64(d, n, m);
+}
+
+static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
+ TCGv_vec m, TCGv_vec k)
+{
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
+ tcg_gen_not_vec(vece, d, d);
+}
+
+static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
+ uint32_t a, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_nbsl_i64,
+ .fniv = gen_nbsl_vec,
+ .fno = gen_helper_sve2_nbsl,
+ .vece = MO_64,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
+}
+
+static bool trans_NBSL(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sve2_zzzz_fn(s, a, gen_nbsl);
+}
+
+/*
+ *** SVE Integer Arithmetic - Unpredicated Group
+ */
+
+static bool trans_ADD_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_add);
+}
+
+static bool trans_SUB_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_sub);
+}
+
+static bool trans_SQADD_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_ssadd);
+}
+
+static bool trans_SQSUB_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_sssub);
+}
+
+static bool trans_UQADD_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_usadd);
+}
+
+static bool trans_UQSUB_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_fn(s, a, tcg_gen_gvec_ussub);
+}
+
+/*
+ *** SVE Integer Arithmetic - Binary Predicated Group
+ */
+
+static bool do_zpzz_ool(DisasContext *s, arg_rprr_esz *a, gen_helper_gvec_4 *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0);
+ }
+ return true;
+}
+
+/* Select active elememnts from Zn and inactive elements from Zm,
+ * storing the result in Zd.
+ */
+static void do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
+{
+ static gen_helper_gvec_4 * const fns[4] = {
+ gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
+ gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d
+ };
+ gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
+}
+
+#define DO_ZPZZ(NAME, name) \
+static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4 * const fns[4] = { \
+ gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \
+ gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \
+ }; \
+ return do_zpzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZPZZ(AND, and)
+DO_ZPZZ(EOR, eor)
+DO_ZPZZ(ORR, orr)
+DO_ZPZZ(BIC, bic)
+
+DO_ZPZZ(ADD, add)
+DO_ZPZZ(SUB, sub)
+
+DO_ZPZZ(SMAX, smax)
+DO_ZPZZ(UMAX, umax)
+DO_ZPZZ(SMIN, smin)
+DO_ZPZZ(UMIN, umin)
+DO_ZPZZ(SABD, sabd)
+DO_ZPZZ(UABD, uabd)
+
+DO_ZPZZ(MUL, mul)
+DO_ZPZZ(SMULH, smulh)
+DO_ZPZZ(UMULH, umulh)
+
+DO_ZPZZ(ASR, asr)
+DO_ZPZZ(LSR, lsr)
+DO_ZPZZ(LSL, lsl)
+
+static bool trans_SDIV_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[4] = {
+ NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
+ };
+ return do_zpzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UDIV_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[4] = {
+ NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
+ };
+ return do_zpzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SEL_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ if (sve_access_check(s)) {
+ do_sel_z(s, a->rd, a->rn, a->rm, a->pg, a->esz);
+ }
+ return true;
+}
+
+#undef DO_ZPZZ
+
+/*
+ *** SVE Integer Arithmetic - Unary Predicated Group
+ */
+
+static bool do_zpz_ool(DisasContext *s, arg_rpr_esz *a, gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, 0);
+ }
+ return true;
+}
+
+#define DO_ZPZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
+ }; \
+ return do_zpz_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZPZ(CLS, cls)
+DO_ZPZ(CLZ, clz)
+DO_ZPZ(CNT_zpz, cnt_zpz)
+DO_ZPZ(CNOT, cnot)
+DO_ZPZ(NOT_zpz, not_zpz)
+DO_ZPZ(ABS, abs)
+DO_ZPZ(NEG, neg)
+
+static bool trans_FABS(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ gen_helper_sve_fabs_h,
+ gen_helper_sve_fabs_s,
+ gen_helper_sve_fabs_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_FNEG(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ gen_helper_sve_fneg_h,
+ gen_helper_sve_fneg_s,
+ gen_helper_sve_fneg_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SXTB(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ gen_helper_sve_sxtb_h,
+ gen_helper_sve_sxtb_s,
+ gen_helper_sve_sxtb_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UXTB(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ gen_helper_sve_uxtb_h,
+ gen_helper_sve_uxtb_s,
+ gen_helper_sve_uxtb_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SXTH(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL, NULL,
+ gen_helper_sve_sxth_s,
+ gen_helper_sve_sxth_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UXTH(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL, NULL,
+ gen_helper_sve_uxth_s,
+ gen_helper_sve_uxth_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SXTW(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_sxtw_d : NULL);
+}
+
+static bool trans_UXTW(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_uxtw_d : NULL);
+}
+
+#undef DO_ZPZ
+
+/*
+ *** SVE Integer Reduction Group
+ */
+
+typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
+static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
+ gen_helper_gvec_reduc *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr t_zn, t_pg;
+ TCGv_i32 desc;
+ TCGv_i64 temp;
+
+ if (fn == NULL) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+ temp = tcg_temp_new_i64();
+ t_zn = tcg_temp_new_ptr();
+ t_pg = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ fn(temp, t_zn, t_pg, desc);
+ tcg_temp_free_ptr(t_zn);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_i32(desc);
+
+ write_fp_dreg(s, a->rd, temp);
+ tcg_temp_free_i64(temp);
+ return true;
+}
+
+#define DO_VPZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
+{ \
+ static gen_helper_gvec_reduc * const fns[4] = { \
+ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
+ }; \
+ return do_vpz_ool(s, a, fns[a->esz]); \
+}
+
+DO_VPZ(ORV, orv)
+DO_VPZ(ANDV, andv)
+DO_VPZ(EORV, eorv)
+
+DO_VPZ(UADDV, uaddv)
+DO_VPZ(SMAXV, smaxv)
+DO_VPZ(UMAXV, umaxv)
+DO_VPZ(SMINV, sminv)
+DO_VPZ(UMINV, uminv)
+
+static bool trans_SADDV(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_reduc * const fns[4] = {
+ gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
+ gen_helper_sve_saddv_s, NULL
+ };
+ return do_vpz_ool(s, a, fns[a->esz]);
+}
+
+#undef DO_VPZ
+
+/*
+ *** SVE Shift by Immediate - Predicated Group
+ */
+
+/*
+ * Copy Zn into Zd, storing zeros into inactive elements.
+ * If invert, store zeros into the active elements.
+ */
+static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
+ int esz, bool invert)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_movz_b, gen_helper_sve_movz_h,
+ gen_helper_sve_movz_s, gen_helper_sve_movz_d,
+ };
+
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
+ }
+ return true;
+}
+
+static bool do_zpzi_ool(DisasContext *s, arg_rpri_esz *a,
+ gen_helper_gvec_3 *fn)
+{
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
+ }
+ return true;
+}
+
+static bool trans_ASR_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h,
+ gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d,
+ };
+ if (a->esz < 0) {
+ /* Invalid tsz encoding -- see tszimm_esz. */
+ return false;
+ }
+ /* Shift by element size is architecturally valid. For
+ arithmetic right-shift, it's the same as by one less. */
+ a->imm = MIN(a->imm, (8 << a->esz) - 1);
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_LSR_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h,
+ gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d,
+ };
+ if (a->esz < 0) {
+ return false;
+ }
+ /* Shift by element size is architecturally valid.
+ For logical shifts, it is a zeroing operation. */
+ if (a->imm >= (8 << a->esz)) {
+ return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
+ } else {
+ return do_zpzi_ool(s, a, fns[a->esz]);
+ }
+}
+
+static bool trans_LSL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h,
+ gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d,
+ };
+ if (a->esz < 0) {
+ return false;
+ }
+ /* Shift by element size is architecturally valid.
+ For logical shifts, it is a zeroing operation. */
+ if (a->imm >= (8 << a->esz)) {
+ return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
+ } else {
+ return do_zpzi_ool(s, a, fns[a->esz]);
+ }
+}
+
+static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_asrd_b, gen_helper_sve_asrd_h,
+ gen_helper_sve_asrd_s, gen_helper_sve_asrd_d,
+ };
+ if (a->esz < 0) {
+ return false;
+ }
+ /* Shift by element size is architecturally valid. For arithmetic
+ right shift for division, it is a zeroing operation. */
+ if (a->imm >= (8 << a->esz)) {
+ return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
+ } else {
+ return do_zpzi_ool(s, a, fns[a->esz]);
+ }
+}
+
+static bool trans_SQSHL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
+ gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UQSHL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
+ gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SRSHR(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
+ gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_URSHR(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
+ gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQSHLU(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
+ gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+/*
+ *** SVE Bitwise Shift - Predicated Group
+ */
+
+#define DO_ZPZW(NAME, name) \
+static bool trans_##NAME##_zpzw(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4 * const fns[3] = { \
+ gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
+ gen_helper_sve_##name##_zpzw_s, \
+ }; \
+ if (a->esz < 0 || a->esz >= 3) { \
+ return false; \
+ } \
+ return do_zpzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZPZW(ASR, asr)
+DO_ZPZW(LSR, lsr)
+DO_ZPZW(LSL, lsl)
+
+#undef DO_ZPZW
+
+/*
+ *** SVE Bitwise Shift - Unpredicated Group
+ */
+
+static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
+ void (*gvec_fn)(unsigned, uint32_t, uint32_t,
+ int64_t, uint32_t, uint32_t))
+{
+ if (a->esz < 0) {
+ /* Invalid tsz encoding -- see tszimm_esz. */
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ /* Shift by element size is architecturally valid. For
+ arithmetic right-shift, it's the same as by one less.
+ Otherwise it is a zeroing operation. */
+ if (a->imm >= 8 << a->esz) {
+ if (asr) {
+ a->imm = (8 << a->esz) - 1;
+ } else {
+ do_dupi_z(s, a->rd, 0);
+ return true;
+ }
+ }
+ gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool trans_ASR_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_shift_imm(s, a, true, tcg_gen_gvec_sari);
+}
+
+static bool trans_LSR_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_shift_imm(s, a, false, tcg_gen_gvec_shri);
+}
+
+static bool trans_LSL_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_shift_imm(s, a, false, tcg_gen_gvec_shli);
+}
+
+static bool do_zzw_ool(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+#define DO_ZZW(NAME, name) \
+static bool trans_##NAME##_zzw(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
+ gen_helper_sve_##name##_zzw_s, NULL \
+ }; \
+ return do_zzw_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZZW(ASR, asr)
+DO_ZZW(LSR, lsr)
+DO_ZZW(LSL, lsl)
+
+#undef DO_ZZW
+
+/*
+ *** SVE Integer Multiply-Add Group
+ */
+
+static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
+ gen_helper_gvec_5 *fn)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->ra),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ pred_full_reg_offset(s, a->pg),
+ vsz, vsz, 0, fn);
+ }
+ return true;
+}
+
+#define DO_ZPZZZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
+{ \
+ static gen_helper_gvec_5 * const fns[4] = { \
+ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
+ }; \
+ return do_zpzzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZPZZZ(MLA, mla)
+DO_ZPZZZ(MLS, mls)
+
+#undef DO_ZPZZZ
+
+/*
+ *** SVE Index Generation Group
+ */
+
+static void do_index(DisasContext *s, int esz, int rd,
+ TCGv_i64 start, TCGv_i64 incr)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+ TCGv_ptr t_zd = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
+ if (esz == 3) {
+ gen_helper_sve_index_d(t_zd, start, incr, desc);
+ } else {
+ typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
+ static index_fn * const fns[3] = {
+ gen_helper_sve_index_b,
+ gen_helper_sve_index_h,
+ gen_helper_sve_index_s,
+ };
+ TCGv_i32 s32 = tcg_temp_new_i32();
+ TCGv_i32 i32 = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(s32, start);
+ tcg_gen_extrl_i64_i32(i32, incr);
+ fns[esz](t_zd, s32, i32, desc);
+
+ tcg_temp_free_i32(s32);
+ tcg_temp_free_i32(i32);
+ }
+ tcg_temp_free_ptr(t_zd);
+ tcg_temp_free_i32(desc);
+}
+
+static bool trans_INDEX_ii(DisasContext *s, arg_INDEX_ii *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 start = tcg_const_i64(a->imm1);
+ TCGv_i64 incr = tcg_const_i64(a->imm2);
+ do_index(s, a->esz, a->rd, start, incr);
+ tcg_temp_free_i64(start);
+ tcg_temp_free_i64(incr);
+ }
+ return true;
+}
+
+static bool trans_INDEX_ir(DisasContext *s, arg_INDEX_ir *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 start = tcg_const_i64(a->imm);
+ TCGv_i64 incr = cpu_reg(s, a->rm);
+ do_index(s, a->esz, a->rd, start, incr);
+ tcg_temp_free_i64(start);
+ }
+ return true;
+}
+
+static bool trans_INDEX_ri(DisasContext *s, arg_INDEX_ri *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 start = cpu_reg(s, a->rn);
+ TCGv_i64 incr = tcg_const_i64(a->imm);
+ do_index(s, a->esz, a->rd, start, incr);
+ tcg_temp_free_i64(incr);
+ }
+ return true;
+}
+
+static bool trans_INDEX_rr(DisasContext *s, arg_INDEX_rr *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 start = cpu_reg(s, a->rn);
+ TCGv_i64 incr = cpu_reg(s, a->rm);
+ do_index(s, a->esz, a->rd, start, incr);
+ }
+ return true;
+}
+
+/*
+ *** SVE Stack Allocation Group
+ */
+
+static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
+ tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
+ }
+ return true;
+}
+
+static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
+ tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
+ }
+ return true;
+}
+
+static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+ tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
+ }
+ return true;
+}
+
+/*
+ *** SVE Compute Vector Address Group
+ */
+
+static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
+{
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
+ }
+ return true;
+}
+
+static bool trans_ADR_p32(DisasContext *s, arg_rrri *a)
+{
+ return do_adr(s, a, gen_helper_sve_adr_p32);
+}
+
+static bool trans_ADR_p64(DisasContext *s, arg_rrri *a)
+{
+ return do_adr(s, a, gen_helper_sve_adr_p64);
+}
+
+static bool trans_ADR_s32(DisasContext *s, arg_rrri *a)
+{
+ return do_adr(s, a, gen_helper_sve_adr_s32);
+}
+
+static bool trans_ADR_u32(DisasContext *s, arg_rrri *a)
+{
+ return do_adr(s, a, gen_helper_sve_adr_u32);
+}
+
+/*
+ *** SVE Integer Misc - Unpredicated Group
+ */
+
+static bool trans_FEXPA(DisasContext *s, arg_rr_esz *a)
+{
+ static gen_helper_gvec_2 * const fns[4] = {
+ NULL,
+ gen_helper_sve_fexpa_h,
+ gen_helper_sve_fexpa_s,
+ gen_helper_sve_fexpa_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zz(s, fns[a->esz], a->rd, a->rn, 0);
+ }
+ return true;
+}
+
+static bool trans_FTSSEL(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ gen_helper_sve_ftssel_h,
+ gen_helper_sve_ftssel_s,
+ gen_helper_sve_ftssel_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+/*
+ *** SVE Predicate Logical Operations Group
+ */
+
+static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
+ const GVecGen4 *gvec_op)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned psz = pred_gvec_reg_size(s);
+ int dofs = pred_full_reg_offset(s, a->rd);
+ int nofs = pred_full_reg_offset(s, a->rn);
+ int mofs = pred_full_reg_offset(s, a->rm);
+ int gofs = pred_full_reg_offset(s, a->pg);
+
+ if (!a->s) {
+ tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
+ return true;
+ }
+
+ if (psz == 8) {
+ /* Do the operation and the flags generation in temps. */
+ TCGv_i64 pd = tcg_temp_new_i64();
+ TCGv_i64 pn = tcg_temp_new_i64();
+ TCGv_i64 pm = tcg_temp_new_i64();
+ TCGv_i64 pg = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(pn, cpu_env, nofs);
+ tcg_gen_ld_i64(pm, cpu_env, mofs);
+ tcg_gen_ld_i64(pg, cpu_env, gofs);
+
+ gvec_op->fni8(pd, pn, pm, pg);
+ tcg_gen_st_i64(pd, cpu_env, dofs);
+
+ do_predtest1(pd, pg);
+
+ tcg_temp_free_i64(pd);
+ tcg_temp_free_i64(pn);
+ tcg_temp_free_i64(pm);
+ tcg_temp_free_i64(pg);
+ } else {
+ /* The operation and flags generation is large. The computation
+ * of the flags depends on the original contents of the guarding
+ * predicate. If the destination overwrites the guarding predicate,
+ * then the easiest way to get this right is to save a copy.
+ */
+ int tofs = gofs;
+ if (a->rd == a->pg) {
+ tofs = offsetof(CPUARMState, vfp.preg_tmp);
+ tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
+ }
+
+ tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
+ do_predtest(s, dofs, tofs, psz / 8);
+ }
+ return true;
+}
+
+static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_and_i64(pd, pn, pm);
+ tcg_gen_and_i64(pd, pd, pg);
+}
+
+static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_and_vec(vece, pd, pn, pm);
+ tcg_gen_and_vec(vece, pd, pd, pg);
+}
+
+static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_and_pg_i64,
+ .fniv = gen_and_pg_vec,
+ .fno = gen_helper_sve_and_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+
+ if (!a->s) {
+ if (!sve_access_check(s)) {
+ return true;
+ }
+ if (a->rn == a->rm) {
+ if (a->pg == a->rn) {
+ do_mov_p(s, a->rd, a->rn);
+ } else {
+ gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
+ }
+ return true;
+ } else if (a->pg == a->rn || a->pg == a->rm) {
+ gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
+ return true;
+ }
+ }
+ return do_pppp_flags(s, a, &op);
+}
+
+static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_andc_i64(pd, pn, pm);
+ tcg_gen_and_i64(pd, pd, pg);
+}
+
+static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_andc_vec(vece, pd, pn, pm);
+ tcg_gen_and_vec(vece, pd, pd, pg);
+}
+
+static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_bic_pg_i64,
+ .fniv = gen_bic_pg_vec,
+ .fno = gen_helper_sve_bic_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+
+ if (!a->s && a->pg == a->rn) {
+ if (sve_access_check(s)) {
+ gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
+ }
+ return true;
+ }
+ return do_pppp_flags(s, a, &op);
+}
+
+static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_xor_i64(pd, pn, pm);
+ tcg_gen_and_i64(pd, pd, pg);
+}
+
+static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_xor_vec(vece, pd, pn, pm);
+ tcg_gen_and_vec(vece, pd, pd, pg);
+}
+
+static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_eor_pg_i64,
+ .fniv = gen_eor_pg_vec,
+ .fno = gen_helper_sve_eor_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ return do_pppp_flags(s, a, &op);
+}
+
+static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ if (a->s) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned psz = pred_gvec_reg_size(s);
+ tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
+ pred_full_reg_offset(s, a->pg),
+ pred_full_reg_offset(s, a->rn),
+ pred_full_reg_offset(s, a->rm), psz, psz);
+ }
+ return true;
+}
+
+static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_or_i64(pd, pn, pm);
+ tcg_gen_and_i64(pd, pd, pg);
+}
+
+static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_or_vec(vece, pd, pn, pm);
+ tcg_gen_and_vec(vece, pd, pd, pg);
+}
+
+static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_orr_pg_i64,
+ .fniv = gen_orr_pg_vec,
+ .fno = gen_helper_sve_orr_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+
+ if (!a->s && a->pg == a->rn && a->rn == a->rm) {
+ return do_mov_p(s, a->rd, a->rn);
+ }
+ return do_pppp_flags(s, a, &op);
+}
+
+static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_orc_i64(pd, pn, pm);
+ tcg_gen_and_i64(pd, pd, pg);
+}
+
+static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_orc_vec(vece, pd, pn, pm);
+ tcg_gen_and_vec(vece, pd, pd, pg);
+}
+
+static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_orn_pg_i64,
+ .fniv = gen_orn_pg_vec,
+ .fno = gen_helper_sve_orn_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ return do_pppp_flags(s, a, &op);
+}
+
+static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_or_i64(pd, pn, pm);
+ tcg_gen_andc_i64(pd, pg, pd);
+}
+
+static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_or_vec(vece, pd, pn, pm);
+ tcg_gen_andc_vec(vece, pd, pg, pd);
+}
+
+static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_nor_pg_i64,
+ .fniv = gen_nor_pg_vec,
+ .fno = gen_helper_sve_nor_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ return do_pppp_flags(s, a, &op);
+}
+
+static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
+{
+ tcg_gen_and_i64(pd, pn, pm);
+ tcg_gen_andc_i64(pd, pg, pd);
+}
+
+static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
+ TCGv_vec pm, TCGv_vec pg)
+{
+ tcg_gen_and_vec(vece, pd, pn, pm);
+ tcg_gen_andc_vec(vece, pd, pg, pd);
+}
+
+static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
+{
+ static const GVecGen4 op = {
+ .fni8 = gen_nand_pg_i64,
+ .fniv = gen_nand_pg_vec,
+ .fno = gen_helper_sve_nand_pppp,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ };
+ return do_pppp_flags(s, a, &op);
+}
+
+/*
+ *** SVE Predicate Misc Group
+ */
+
+static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
+{
+ if (sve_access_check(s)) {
+ int nofs = pred_full_reg_offset(s, a->rn);
+ int gofs = pred_full_reg_offset(s, a->pg);
+ int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
+
+ if (words == 1) {
+ TCGv_i64 pn = tcg_temp_new_i64();
+ TCGv_i64 pg = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(pn, cpu_env, nofs);
+ tcg_gen_ld_i64(pg, cpu_env, gofs);
+ do_predtest1(pn, pg);
+
+ tcg_temp_free_i64(pn);
+ tcg_temp_free_i64(pg);
+ } else {
+ do_predtest(s, nofs, gofs, words);
+ }
+ }
+ return true;
+}
+
+/* See the ARM pseudocode DecodePredCount. */
+static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
+{
+ unsigned elements = fullsz >> esz;
+ unsigned bound;
+
+ switch (pattern) {
+ case 0x0: /* POW2 */
+ return pow2floor(elements);
+ case 0x1: /* VL1 */
+ case 0x2: /* VL2 */
+ case 0x3: /* VL3 */
+ case 0x4: /* VL4 */
+ case 0x5: /* VL5 */
+ case 0x6: /* VL6 */
+ case 0x7: /* VL7 */
+ case 0x8: /* VL8 */
+ bound = pattern;
+ break;
+ case 0x9: /* VL16 */
+ case 0xa: /* VL32 */
+ case 0xb: /* VL64 */
+ case 0xc: /* VL128 */
+ case 0xd: /* VL256 */
+ bound = 16 << (pattern - 9);
+ break;
+ case 0x1d: /* MUL4 */
+ return elements - elements % 4;
+ case 0x1e: /* MUL3 */
+ return elements - elements % 3;
+ case 0x1f: /* ALL */
+ return elements;
+ default: /* #uimm5 */
+ return 0;
+ }
+ return elements >= bound ? bound : 0;
+}
+
+/* This handles all of the predicate initialization instructions,
+ * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
+ * so that decode_pred_count returns 0. For SETFFR, we will have
+ * set RD == 16 == FFR.
+ */
+static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned ofs = pred_full_reg_offset(s, rd);
+ unsigned numelem, setsz, i;
+ uint64_t word, lastword;
+ TCGv_i64 t;
+
+ numelem = decode_pred_count(fullsz, pat, esz);
+
+ /* Determine what we must store into each bit, and how many. */
+ if (numelem == 0) {
+ lastword = word = 0;
+ setsz = fullsz;
+ } else {
+ setsz = numelem << esz;
+ lastword = word = pred_esz_masks[esz];
+ if (setsz % 64) {
+ lastword &= MAKE_64BIT_MASK(0, setsz % 64);
+ }
+ }
+
+ t = tcg_temp_new_i64();
+ if (fullsz <= 64) {
+ tcg_gen_movi_i64(t, lastword);
+ tcg_gen_st_i64(t, cpu_env, ofs);
+ goto done;
+ }
+
+ if (word == lastword) {
+ unsigned maxsz = size_for_gvec(fullsz / 8);
+ unsigned oprsz = size_for_gvec(setsz / 8);
+
+ if (oprsz * 8 == setsz) {
+ tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
+ goto done;
+ }
+ }
+
+ setsz /= 8;
+ fullsz /= 8;
+
+ tcg_gen_movi_i64(t, word);
+ for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
+ tcg_gen_st_i64(t, cpu_env, ofs + i);
+ }
+ if (lastword != word) {
+ tcg_gen_movi_i64(t, lastword);
+ tcg_gen_st_i64(t, cpu_env, ofs + i);
+ i += 8;
+ }
+ if (i < fullsz) {
+ tcg_gen_movi_i64(t, 0);
+ for (; i < fullsz; i += 8) {
+ tcg_gen_st_i64(t, cpu_env, ofs + i);
+ }
+ }
+
+ done:
+ tcg_temp_free_i64(t);
+
+ /* PTRUES */
+ if (setflag) {
+ tcg_gen_movi_i32(cpu_NF, -(word != 0));
+ tcg_gen_movi_i32(cpu_CF, word == 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ }
+ return true;
+}
+
+static bool trans_PTRUE(DisasContext *s, arg_PTRUE *a)
+{
+ return do_predset(s, a->esz, a->rd, a->pat, a->s);
+}
+
+static bool trans_SETFFR(DisasContext *s, arg_SETFFR *a)
+{
+ /* Note pat == 31 is #all, to set all elements. */
+ return do_predset(s, 0, FFR_PRED_NUM, 31, false);
+}
+
+static bool trans_PFALSE(DisasContext *s, arg_PFALSE *a)
+{
+ /* Note pat == 32 is #unimp, to set no elements. */
+ return do_predset(s, 0, a->rd, 32, false);
+}
+
+static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
+{
+ /* The path through do_pppp_flags is complicated enough to want to avoid
+ * duplication. Frob the arguments into the form of a predicated AND.
+ */
+ arg_rprr_s alt_a = {
+ .rd = a->rd, .pg = a->pg, .s = a->s,
+ .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
+ };
+ return trans_AND_pppp(s, &alt_a);
+}
+
+static bool trans_RDFFR(DisasContext *s, arg_RDFFR *a)
+{
+ return do_mov_p(s, a->rd, FFR_PRED_NUM);
+}
+
+static bool trans_WRFFR(DisasContext *s, arg_WRFFR *a)
+{
+ return do_mov_p(s, FFR_PRED_NUM, a->rn);
+}
+
+static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
+ void (*gen_fn)(TCGv_i32, TCGv_ptr,
+ TCGv_ptr, TCGv_i32))
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ TCGv_ptr t_pd = tcg_temp_new_ptr();
+ TCGv_ptr t_pg = tcg_temp_new_ptr();
+ TCGv_i32 t;
+ unsigned desc = 0;
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+
+ tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
+ t = tcg_const_i32(desc);
+
+ gen_fn(t, t_pd, t_pg, t);
+ tcg_temp_free_ptr(t_pd);
+ tcg_temp_free_ptr(t_pg);
+
+ do_pred_flags(t);
+ tcg_temp_free_i32(t);
+ return true;
+}
+
+static bool trans_PFIRST(DisasContext *s, arg_rr_esz *a)
+{
+ return do_pfirst_pnext(s, a, gen_helper_sve_pfirst);
+}
+
+static bool trans_PNEXT(DisasContext *s, arg_rr_esz *a)
+{
+ return do_pfirst_pnext(s, a, gen_helper_sve_pnext);
+}
+
+/*
+ *** SVE Element Count Group
+ */
+
+/* Perform an inline saturating addition of a 32-bit value within
+ * a 64-bit register. The second operand is known to be positive,
+ * which halves the comparisions we must perform to bound the result.
+ */
+static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
+{
+ int64_t ibound;
+ TCGv_i64 bound;
+ TCGCond cond;
+
+ /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
+ if (u) {
+ tcg_gen_ext32u_i64(reg, reg);
+ } else {
+ tcg_gen_ext32s_i64(reg, reg);
+ }
+ if (d) {
+ tcg_gen_sub_i64(reg, reg, val);
+ ibound = (u ? 0 : INT32_MIN);
+ cond = TCG_COND_LT;
+ } else {
+ tcg_gen_add_i64(reg, reg, val);
+ ibound = (u ? UINT32_MAX : INT32_MAX);
+ cond = TCG_COND_GT;
+ }
+ bound = tcg_const_i64(ibound);
+ tcg_gen_movcond_i64(cond, reg, reg, bound, bound, reg);
+ tcg_temp_free_i64(bound);
+}
+
+/* Similarly with 64-bit values. */
+static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t2;
+
+ if (u) {
+ if (d) {
+ tcg_gen_sub_i64(t0, reg, val);
+ t2 = tcg_constant_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
+ } else {
+ tcg_gen_add_i64(t0, reg, val);
+ t2 = tcg_constant_i64(-1);
+ tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
+ }
+ } else {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ if (d) {
+ /* Detect signed overflow for subtraction. */
+ tcg_gen_xor_i64(t0, reg, val);
+ tcg_gen_sub_i64(t1, reg, val);
+ tcg_gen_xor_i64(reg, reg, t1);
+ tcg_gen_and_i64(t0, t0, reg);
+
+ /* Bound the result. */
+ tcg_gen_movi_i64(reg, INT64_MIN);
+ t2 = tcg_constant_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
+ } else {
+ /* Detect signed overflow for addition. */
+ tcg_gen_xor_i64(t0, reg, val);
+ tcg_gen_add_i64(reg, reg, val);
+ tcg_gen_xor_i64(t1, reg, val);
+ tcg_gen_andc_i64(t0, t1, t0);
+
+ /* Bound the result. */
+ tcg_gen_movi_i64(t1, INT64_MAX);
+ t2 = tcg_constant_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
+ }
+ tcg_temp_free_i64(t1);
+ }
+ tcg_temp_free_i64(t0);
+}
+
+/* Similarly with a vector and a scalar operand. */
+static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
+ TCGv_i64 val, bool u, bool d)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr dptr, nptr;
+ TCGv_i32 t32, desc;
+ TCGv_i64 t64;
+
+ dptr = tcg_temp_new_ptr();
+ nptr = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
+ tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
+ desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+
+ switch (esz) {
+ case MO_8:
+ t32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t32, val);
+ if (d) {
+ tcg_gen_neg_i32(t32, t32);
+ }
+ if (u) {
+ gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc);
+ } else {
+ gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc);
+ }
+ tcg_temp_free_i32(t32);
+ break;
+
+ case MO_16:
+ t32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t32, val);
+ if (d) {
+ tcg_gen_neg_i32(t32, t32);
+ }
+ if (u) {
+ gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc);
+ } else {
+ gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc);
+ }
+ tcg_temp_free_i32(t32);
+ break;
+
+ case MO_32:
+ t64 = tcg_temp_new_i64();
+ if (d) {
+ tcg_gen_neg_i64(t64, val);
+ } else {
+ tcg_gen_mov_i64(t64, val);
+ }
+ if (u) {
+ gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc);
+ } else {
+ gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc);
+ }
+ tcg_temp_free_i64(t64);
+ break;
+
+ case MO_64:
+ if (u) {
+ if (d) {
+ gen_helper_sve_uqsubi_d(dptr, nptr, val, desc);
+ } else {
+ gen_helper_sve_uqaddi_d(dptr, nptr, val, desc);
+ }
+ } else if (d) {
+ t64 = tcg_temp_new_i64();
+ tcg_gen_neg_i64(t64, val);
+ gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc);
+ tcg_temp_free_i64(t64);
+ } else {
+ gen_helper_sve_sqaddi_d(dptr, nptr, val, desc);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_temp_free_ptr(dptr);
+ tcg_temp_free_ptr(nptr);
+ tcg_temp_free_i32(desc);
+}
+
+static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
+{
+ if (sve_access_check(s)) {
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
+ tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
+ }
+ return true;
+}
+
+static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
+{
+ if (sve_access_check(s)) {
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
+ int inc = numelem * a->imm * (a->d ? -1 : 1);
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+
+ tcg_gen_addi_i64(reg, reg, inc);
+ }
+ return true;
+}
+
+static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
+ int inc = numelem * a->imm;
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+
+ /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
+ if (inc == 0) {
+ if (a->u) {
+ tcg_gen_ext32u_i64(reg, reg);
+ } else {
+ tcg_gen_ext32s_i64(reg, reg);
+ }
+ } else {
+ TCGv_i64 t = tcg_const_i64(inc);
+ do_sat_addsub_32(reg, t, a->u, a->d);
+ tcg_temp_free_i64(t);
+ }
+ return true;
+}
+
+static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
+ int inc = numelem * a->imm;
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+
+ if (inc != 0) {
+ TCGv_i64 t = tcg_const_i64(inc);
+ do_sat_addsub_64(reg, t, a->u, a->d);
+ tcg_temp_free_i64(t);
+ }
+ return true;
+}
+
+static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
+ int inc = numelem * a->imm;
+
+ if (inc != 0) {
+ if (sve_access_check(s)) {
+ TCGv_i64 t = tcg_const_i64(a->d ? -inc : inc);
+ tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ t, fullsz, fullsz);
+ tcg_temp_free_i64(t);
+ }
+ } else {
+ do_mov_z(s, a->rd, a->rn);
+ }
+ return true;
+}
+
+static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+
+ unsigned fullsz = vec_full_reg_size(s);
+ unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
+ int inc = numelem * a->imm;
+
+ if (inc != 0) {
+ if (sve_access_check(s)) {
+ TCGv_i64 t = tcg_const_i64(inc);
+ do_sat_addsub_vec(s, a->esz, a->rd, a->rn, t, a->u, a->d);
+ tcg_temp_free_i64(t);
+ }
+ } else {
+ do_mov_z(s, a->rd, a->rn);
+ }
+ return true;
+}
+
+/*
+ *** SVE Bitwise Immediate Group
+ */
+
+static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
+{
+ uint64_t imm;
+ if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
+ extract32(a->dbm, 0, 6),
+ extract32(a->dbm, 6, 6))) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ gvec_fn(MO_64, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn), imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool trans_AND_zzi(DisasContext *s, arg_rr_dbm *a)
+{
+ return do_zz_dbm(s, a, tcg_gen_gvec_andi);
+}
+
+static bool trans_ORR_zzi(DisasContext *s, arg_rr_dbm *a)
+{
+ return do_zz_dbm(s, a, tcg_gen_gvec_ori);
+}
+
+static bool trans_EOR_zzi(DisasContext *s, arg_rr_dbm *a)
+{
+ return do_zz_dbm(s, a, tcg_gen_gvec_xori);
+}
+
+static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
+{
+ uint64_t imm;
+ if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
+ extract32(a->dbm, 0, 6),
+ extract32(a->dbm, 6, 6))) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ do_dupi_z(s, a->rd, imm);
+ }
+ return true;
+}
+
+/*
+ *** SVE Integer Wide Immediate - Predicated Group
+ */
+
+/* Implement all merging copies. This is used for CPY (immediate),
+ * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
+ */
+static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
+ TCGv_i64 val)
+{
+ typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
+ static gen_cpy * const fns[4] = {
+ gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h,
+ gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d,
+ };
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+ TCGv_ptr t_zd = tcg_temp_new_ptr();
+ TCGv_ptr t_zn = tcg_temp_new_ptr();
+ TCGv_ptr t_pg = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
+ tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+
+ fns[esz](t_zd, t_zn, t_pg, val, desc);
+
+ tcg_temp_free_ptr(t_zd);
+ tcg_temp_free_ptr(t_zn);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_i32(desc);
+}
+
+static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ /* Decode the VFP immediate. */
+ uint64_t imm = vfp_expand_imm(a->esz, a->imm);
+ TCGv_i64 t_imm = tcg_const_i64(imm);
+ do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm);
+ tcg_temp_free_i64(t_imm);
+ }
+ return true;
+}
+
+static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
+{
+ if (a->esz == 0 && extract32(s->insn, 13, 1)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 t_imm = tcg_const_i64(a->imm);
+ do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, t_imm);
+ tcg_temp_free_i64(t_imm);
+ }
+ return true;
+}
+
+static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
+{
+ static gen_helper_gvec_2i * const fns[4] = {
+ gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h,
+ gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d,
+ };
+
+ if (a->esz == 0 && extract32(s->insn, 13, 1)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i64 t_imm = tcg_const_i64(a->imm);
+ tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
+ pred_full_reg_offset(s, a->pg),
+ t_imm, vsz, vsz, 0, fns[a->esz]);
+ tcg_temp_free_i64(t_imm);
+ }
+ return true;
+}
+
+/*
+ *** SVE Permute Extract Group
+ */
+
+static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned n_ofs = imm >= vsz ? 0 : imm;
+ unsigned n_siz = vsz - n_ofs;
+ unsigned d = vec_full_reg_offset(s, rd);
+ unsigned n = vec_full_reg_offset(s, rn);
+ unsigned m = vec_full_reg_offset(s, rm);
+
+ /* Use host vector move insns if we have appropriate sizes
+ * and no unfortunate overlap.
+ */
+ if (m != d
+ && n_ofs == size_for_gvec(n_ofs)
+ && n_siz == size_for_gvec(n_siz)
+ && (d != n || n_siz <= n_ofs)) {
+ tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz);
+ if (n_ofs != 0) {
+ tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs);
+ }
+ } else {
+ tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext);
+ }
+ return true;
+}
+
+static bool trans_EXT(DisasContext *s, arg_EXT *a)
+{
+ return do_EXT(s, a->rd, a->rn, a->rm, a->imm);
+}
+
+static bool trans_EXT_sve2(DisasContext *s, arg_rri *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_EXT(s, a->rd, a->rn, (a->rn + 1) % 32, a->imm);
+}
+
+/*
+ *** SVE Permute - Unpredicated Group
+ */
+
+static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
+ vsz, vsz, cpu_reg_sp(s, a->rn));
+ }
+ return true;
+}
+
+static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
+{
+ if ((a->imm & 0x1f) == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned dofs = vec_full_reg_offset(s, a->rd);
+ unsigned esz, index;
+
+ esz = ctz32(a->imm);
+ index = a->imm >> (esz + 1);
+
+ if ((index << esz) < vsz) {
+ unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
+ tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
+ } else {
+ /*
+ * While dup_mem handles 128-bit elements, dup_imm does not.
+ * Thankfully element size doesn't matter for splatting zero.
+ */
+ tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
+ }
+ }
+ return true;
+}
+
+static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
+{
+ typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
+ static gen_insr * const fns[4] = {
+ gen_helper_sve_insr_b, gen_helper_sve_insr_h,
+ gen_helper_sve_insr_s, gen_helper_sve_insr_d,
+ };
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i32 desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+ TCGv_ptr t_zd = tcg_temp_new_ptr();
+ TCGv_ptr t_zn = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
+
+ fns[a->esz](t_zd, t_zn, val, desc);
+
+ tcg_temp_free_ptr(t_zd);
+ tcg_temp_free_ptr(t_zn);
+ tcg_temp_free_i32(desc);
+}
+
+static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
+ do_insr_i64(s, a, t);
+ tcg_temp_free_i64(t);
+ }
+ return true;
+}
+
+static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
+{
+ if (sve_access_check(s)) {
+ do_insr_i64(s, a, cpu_reg(s, a->rm));
+ }
+ return true;
+}
+
+static bool trans_REV_v(DisasContext *s, arg_rr_esz *a)
+{
+ static gen_helper_gvec_2 * const fns[4] = {
+ gen_helper_sve_rev_b, gen_helper_sve_rev_h,
+ gen_helper_sve_rev_s, gen_helper_sve_rev_d
+ };
+
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zz(s, fns[a->esz], a->rd, a->rn, 0);
+ }
+ return true;
+}
+
+static bool trans_TBL(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_tbl_b, gen_helper_sve_tbl_h,
+ gen_helper_sve_tbl_s, gen_helper_sve_tbl_d
+ };
+
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_TBL_sve2(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[4] = {
+ gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
+ gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn,
+ (a->rn + 1) % 32, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_TBX(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
+ gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fns[a->esz], a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
+{
+ static gen_helper_gvec_2 * const fns[4][2] = {
+ { NULL, NULL },
+ { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h },
+ { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s },
+ { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d },
+ };
+
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn)
+ + (a->h ? vsz / 2 : 0),
+ vsz, vsz, 0, fns[a->esz][a->u]);
+ }
+ return true;
+}
+
+/*
+ *** SVE Permute - Predicates Group
+ */
+
+static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
+ gen_helper_gvec_3 *fn)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned vsz = pred_full_reg_size(s);
+
+ TCGv_ptr t_d = tcg_temp_new_ptr();
+ TCGv_ptr t_n = tcg_temp_new_ptr();
+ TCGv_ptr t_m = tcg_temp_new_ptr();
+ TCGv_i32 t_desc;
+ uint32_t desc = 0;
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
+
+ tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
+ t_desc = tcg_const_i32(desc);
+
+ fn(t_d, t_n, t_m, t_desc);
+
+ tcg_temp_free_ptr(t_d);
+ tcg_temp_free_ptr(t_n);
+ tcg_temp_free_ptr(t_m);
+ tcg_temp_free_i32(t_desc);
+ return true;
+}
+
+static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
+ gen_helper_gvec_2 *fn)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned vsz = pred_full_reg_size(s);
+ TCGv_ptr t_d = tcg_temp_new_ptr();
+ TCGv_ptr t_n = tcg_temp_new_ptr();
+ TCGv_i32 t_desc;
+ uint32_t desc = 0;
+
+ tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
+ t_desc = tcg_const_i32(desc);
+
+ fn(t_d, t_n, t_desc);
+
+ tcg_temp_free_i32(t_desc);
+ tcg_temp_free_ptr(t_d);
+ tcg_temp_free_ptr(t_n);
+ return true;
+}
+
+static bool trans_ZIP1_p(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_perm_pred3(s, a, 0, gen_helper_sve_zip_p);
+}
+
+static bool trans_ZIP2_p(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_perm_pred3(s, a, 1, gen_helper_sve_zip_p);
+}
+
+static bool trans_UZP1_p(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_perm_pred3(s, a, 0, gen_helper_sve_uzp_p);
+}
+
+static bool trans_UZP2_p(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_perm_pred3(s, a, 1, gen_helper_sve_uzp_p);
+}
+
+static bool trans_TRN1_p(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_perm_pred3(s, a, 0, gen_helper_sve_trn_p);
+}
+
+static bool trans_TRN2_p(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_perm_pred3(s, a, 1, gen_helper_sve_trn_p);
+}
+
+static bool trans_REV_p(DisasContext *s, arg_rr_esz *a)
+{
+ return do_perm_pred2(s, a, 0, gen_helper_sve_rev_p);
+}
+
+static bool trans_PUNPKLO(DisasContext *s, arg_PUNPKLO *a)
+{
+ return do_perm_pred2(s, a, 0, gen_helper_sve_punpk_p);
+}
+
+static bool trans_PUNPKHI(DisasContext *s, arg_PUNPKHI *a)
+{
+ return do_perm_pred2(s, a, 1, gen_helper_sve_punpk_p);
+}
+
+/*
+ *** SVE Permute - Interleaving Group
+ */
+
+static bool do_zip(DisasContext *s, arg_rrr_esz *a, bool high)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_zip_b, gen_helper_sve_zip_h,
+ gen_helper_sve_zip_s, gen_helper_sve_zip_d,
+ };
+
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned high_ofs = high ? vsz / 2 : 0;
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn) + high_ofs,
+ vec_full_reg_offset(s, a->rm) + high_ofs,
+ vsz, vsz, 0, fns[a->esz]);
+ }
+ return true;
+}
+
+static bool do_zzz_data_ool(DisasContext *s, arg_rrr_esz *a, int data,
+ gen_helper_gvec_3 *fn)
+{
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
+ }
+ return true;
+}
+
+static bool trans_ZIP1_z(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip(s, a, false);
+}
+
+static bool trans_ZIP2_z(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip(s, a, true);
+}
+
+static bool do_zip_q(DisasContext *s, arg_rrr_esz *a, bool high)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned high_ofs = high ? QEMU_ALIGN_DOWN(vsz, 32) / 2 : 0;
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn) + high_ofs,
+ vec_full_reg_offset(s, a->rm) + high_ofs,
+ vsz, vsz, 0, gen_helper_sve2_zip_q);
+ }
+ return true;
+}
+
+static bool trans_ZIP1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip_q(s, a, false);
+}
+
+static bool trans_ZIP2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zip_q(s, a, true);
+}
+
+static gen_helper_gvec_3 * const uzp_fns[4] = {
+ gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
+ gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
+};
+
+static bool trans_UZP1_z(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_data_ool(s, a, 0, uzp_fns[a->esz]);
+}
+
+static bool trans_UZP2_z(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_data_ool(s, a, 1 << a->esz, uzp_fns[a->esz]);
+}
+
+static bool trans_UZP1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 0, gen_helper_sve2_uzp_q);
+}
+
+static bool trans_UZP2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 16, gen_helper_sve2_uzp_q);
+}
+
+static gen_helper_gvec_3 * const trn_fns[4] = {
+ gen_helper_sve_trn_b, gen_helper_sve_trn_h,
+ gen_helper_sve_trn_s, gen_helper_sve_trn_d,
+};
+
+static bool trans_TRN1_z(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_data_ool(s, a, 0, trn_fns[a->esz]);
+}
+
+static bool trans_TRN2_z(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_zzz_data_ool(s, a, 1 << a->esz, trn_fns[a->esz]);
+}
+
+static bool trans_TRN1_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 0, gen_helper_sve2_trn_q);
+}
+
+static bool trans_TRN2_q(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ return do_zzz_data_ool(s, a, 16, gen_helper_sve2_trn_q);
+}
+
+/*
+ *** SVE Permute Vector - Predicated Group
+ */
+
+static bool trans_COMPACT(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+/* Call the helper that computes the ARM LastActiveElement pseudocode
+ * function, scaled by the element size. This includes the not found
+ * indication; e.g. not found for esz=3 is -8.
+ */
+static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
+{
+ /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
+ * round up, as we do elsewhere, because we need the exact size.
+ */
+ TCGv_ptr t_p = tcg_temp_new_ptr();
+ TCGv_i32 t_desc;
+ unsigned desc = 0;
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
+
+ tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
+ t_desc = tcg_const_i32(desc);
+
+ gen_helper_sve_last_active_element(ret, t_p, t_desc);
+
+ tcg_temp_free_i32(t_desc);
+ tcg_temp_free_ptr(t_p);
+}
+
+/* Increment LAST to the offset of the next element in the vector,
+ * wrapping around to 0.
+ */
+static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
+{
+ unsigned vsz = vec_full_reg_size(s);
+
+ tcg_gen_addi_i32(last, last, 1 << esz);
+ if (is_power_of_2(vsz)) {
+ tcg_gen_andi_i32(last, last, vsz - 1);
+ } else {
+ TCGv_i32 max = tcg_const_i32(vsz);
+ TCGv_i32 zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last);
+ tcg_temp_free_i32(max);
+ tcg_temp_free_i32(zero);
+ }
+}
+
+/* If LAST < 0, set LAST to the offset of the last element in the vector. */
+static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
+{
+ unsigned vsz = vec_full_reg_size(s);
+
+ if (is_power_of_2(vsz)) {
+ tcg_gen_andi_i32(last, last, vsz - 1);
+ } else {
+ TCGv_i32 max = tcg_const_i32(vsz - (1 << esz));
+ TCGv_i32 zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last);
+ tcg_temp_free_i32(max);
+ tcg_temp_free_i32(zero);
+ }
+}
+
+/* Load an unsigned element of ESZ from BASE+OFS. */
+static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+
+ switch (esz) {
+ case 0:
+ tcg_gen_ld8u_i64(r, base, ofs);
+ break;
+ case 1:
+ tcg_gen_ld16u_i64(r, base, ofs);
+ break;
+ case 2:
+ tcg_gen_ld32u_i64(r, base, ofs);
+ break;
+ case 3:
+ tcg_gen_ld_i64(r, base, ofs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return r;
+}
+
+/* Load an unsigned element of ESZ from RM[LAST]. */
+static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
+ int rm, int esz)
+{
+ TCGv_ptr p = tcg_temp_new_ptr();
+ TCGv_i64 r;
+
+ /* Convert offset into vector into offset into ENV.
+ * The final adjustment for the vector register base
+ * is added via constant offset to the load.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+ /* Adjust for element ordering. See vec_reg_offset. */
+ if (esz < 3) {
+ tcg_gen_xori_i32(last, last, 8 - (1 << esz));
+ }
+#endif
+ tcg_gen_ext_i32_ptr(p, last);
+ tcg_gen_add_ptr(p, p, cpu_env);
+
+ r = load_esz(p, vec_full_reg_offset(s, rm), esz);
+ tcg_temp_free_ptr(p);
+
+ return r;
+}
+
+/* Compute CLAST for a Zreg. */
+static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
+{
+ TCGv_i32 last;
+ TCGLabel *over;
+ TCGv_i64 ele;
+ unsigned vsz, esz = a->esz;
+
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ last = tcg_temp_local_new_i32();
+ over = gen_new_label();
+
+ find_last_active(s, last, esz, a->pg);
+
+ /* There is of course no movcond for a 2048-bit vector,
+ * so we must branch over the actual store.
+ */
+ tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over);
+
+ if (!before) {
+ incr_last_active(s, last, esz);
+ }
+
+ ele = load_last_active(s, last, a->rm, esz);
+ tcg_temp_free_i32(last);
+
+ vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
+ tcg_temp_free_i64(ele);
+
+ /* If this insn used MOVPRFX, we may need a second move. */
+ if (a->rd != a->rn) {
+ TCGLabel *done = gen_new_label();
+ tcg_gen_br(done);
+
+ gen_set_label(over);
+ do_mov_z(s, a->rd, a->rn);
+
+ gen_set_label(done);
+ } else {
+ gen_set_label(over);
+ }
+ return true;
+}
+
+static bool trans_CLASTA_z(DisasContext *s, arg_rprr_esz *a)
+{
+ return do_clast_vector(s, a, false);
+}
+
+static bool trans_CLASTB_z(DisasContext *s, arg_rprr_esz *a)
+{
+ return do_clast_vector(s, a, true);
+}
+
+/* Compute CLAST for a scalar. */
+static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
+ bool before, TCGv_i64 reg_val)
+{
+ TCGv_i32 last = tcg_temp_new_i32();
+ TCGv_i64 ele, cmp, zero;
+
+ find_last_active(s, last, esz, pg);
+
+ /* Extend the original value of last prior to incrementing. */
+ cmp = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(cmp, last);
+
+ if (!before) {
+ incr_last_active(s, last, esz);
+ }
+
+ /* The conceit here is that while last < 0 indicates not found, after
+ * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
+ * from which we can load garbage. We then discard the garbage with
+ * a conditional move.
+ */
+ ele = load_last_active(s, last, rm, esz);
+ tcg_temp_free_i32(last);
+
+ zero = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, zero, ele, reg_val);
+
+ tcg_temp_free_i64(zero);
+ tcg_temp_free_i64(cmp);
+ tcg_temp_free_i64(ele);
+}
+
+/* Compute CLAST for a Vreg. */
+static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
+{
+ if (sve_access_check(s)) {
+ int esz = a->esz;
+ int ofs = vec_reg_offset(s, a->rd, 0, esz);
+ TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
+
+ do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
+ write_fp_dreg(s, a->rd, reg);
+ tcg_temp_free_i64(reg);
+ }
+ return true;
+}
+
+static bool trans_CLASTA_v(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_clast_fp(s, a, false);
+}
+
+static bool trans_CLASTB_v(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_clast_fp(s, a, true);
+}
+
+/* Compute CLAST for a Xreg. */
+static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
+{
+ TCGv_i64 reg;
+
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ reg = cpu_reg(s, a->rd);
+ switch (a->esz) {
+ case 0:
+ tcg_gen_ext8u_i64(reg, reg);
+ break;
+ case 1:
+ tcg_gen_ext16u_i64(reg, reg);
+ break;
+ case 2:
+ tcg_gen_ext32u_i64(reg, reg);
+ break;
+ case 3:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
+ return true;
+}
+
+static bool trans_CLASTA_r(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_clast_general(s, a, false);
+}
+
+static bool trans_CLASTB_r(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_clast_general(s, a, true);
+}
+
+/* Compute LAST for a scalar. */
+static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
+ int pg, int rm, bool before)
+{
+ TCGv_i32 last = tcg_temp_new_i32();
+ TCGv_i64 ret;
+
+ find_last_active(s, last, esz, pg);
+ if (before) {
+ wrap_last_active(s, last, esz);
+ } else {
+ incr_last_active(s, last, esz);
+ }
+
+ ret = load_last_active(s, last, rm, esz);
+ tcg_temp_free_i32(last);
+ return ret;
+}
+
+/* Compute LAST for a Vreg. */
+static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
+ write_fp_dreg(s, a->rd, val);
+ tcg_temp_free_i64(val);
+ }
+ return true;
+}
+
+static bool trans_LASTA_v(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_last_fp(s, a, false);
+}
+
+static bool trans_LASTB_v(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_last_fp(s, a, true);
+}
+
+/* Compute LAST for a Xreg. */
+static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
+ tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
+ tcg_temp_free_i64(val);
+ }
+ return true;
+}
+
+static bool trans_LASTA_r(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_last_general(s, a, false);
+}
+
+static bool trans_LASTB_r(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_last_general(s, a, true);
+}
+
+static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
+{
+ if (sve_access_check(s)) {
+ do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
+ }
+ return true;
+}
+
+static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
+{
+ if (sve_access_check(s)) {
+ int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
+ TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
+ do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
+ tcg_temp_free_i64(t);
+ }
+ return true;
+}
+
+static bool trans_REVB(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ gen_helper_sve_revb_h,
+ gen_helper_sve_revb_s,
+ gen_helper_sve_revb_d,
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_REVH(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ NULL,
+ NULL,
+ gen_helper_sve_revh_s,
+ gen_helper_sve_revh_d,
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_REVW(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ool(s, a, a->esz == 3 ? gen_helper_sve_revw_d : NULL);
+}
+
+static bool trans_RBIT(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve_rbit_b,
+ gen_helper_sve_rbit_h,
+ gen_helper_sve_rbit_s,
+ gen_helper_sve_rbit_d,
+ };
+ return do_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a)
+{
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
+ a->rd, a->rn, a->rm, a->pg, a->esz);
+ }
+ return true;
+}
+
+static bool trans_SPLICE_sve2(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
+ a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz);
+ }
+ return true;
+}
+
+/*
+ *** SVE Integer Compare - Vectors Group
+ */
+
+static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_flags_4 *gen_fn)
+{
+ TCGv_ptr pd, zn, zm, pg;
+ unsigned vsz;
+ TCGv_i32 t;
+
+ if (gen_fn == NULL) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ vsz = vec_full_reg_size(s);
+ t = tcg_const_i32(simd_desc(vsz, vsz, 0));
+ pd = tcg_temp_new_ptr();
+ zn = tcg_temp_new_ptr();
+ zm = tcg_temp_new_ptr();
+ pg = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
+
+ gen_fn(t, pd, zn, zm, pg, t);
+
+ tcg_temp_free_ptr(pd);
+ tcg_temp_free_ptr(zn);
+ tcg_temp_free_ptr(zm);
+ tcg_temp_free_ptr(pg);
+
+ do_pred_flags(t);
+
+ tcg_temp_free_i32(t);
+ return true;
+}
+
+#define DO_PPZZ(NAME, name) \
+static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_flags_4 * const fns[4] = { \
+ gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
+ gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
+ }; \
+ return do_ppzz_flags(s, a, fns[a->esz]); \
+}
+
+DO_PPZZ(CMPEQ, cmpeq)
+DO_PPZZ(CMPNE, cmpne)
+DO_PPZZ(CMPGT, cmpgt)
+DO_PPZZ(CMPGE, cmpge)
+DO_PPZZ(CMPHI, cmphi)
+DO_PPZZ(CMPHS, cmphs)
+
+#undef DO_PPZZ
+
+#define DO_PPZW(NAME, name) \
+static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_flags_4 * const fns[4] = { \
+ gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
+ gen_helper_sve_##name##_ppzw_s, NULL \
+ }; \
+ return do_ppzz_flags(s, a, fns[a->esz]); \
+}
+
+DO_PPZW(CMPEQ, cmpeq)
+DO_PPZW(CMPNE, cmpne)
+DO_PPZW(CMPGT, cmpgt)
+DO_PPZW(CMPGE, cmpge)
+DO_PPZW(CMPHI, cmphi)
+DO_PPZW(CMPHS, cmphs)
+DO_PPZW(CMPLT, cmplt)
+DO_PPZW(CMPLE, cmple)
+DO_PPZW(CMPLO, cmplo)
+DO_PPZW(CMPLS, cmpls)
+
+#undef DO_PPZW
+
+/*
+ *** SVE Integer Compare - Immediate Groups
+ */
+
+static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
+ gen_helper_gvec_flags_3 *gen_fn)
+{
+ TCGv_ptr pd, zn, pg;
+ unsigned vsz;
+ TCGv_i32 t;
+
+ if (gen_fn == NULL) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ vsz = vec_full_reg_size(s);
+ t = tcg_const_i32(simd_desc(vsz, vsz, a->imm));
+ pd = tcg_temp_new_ptr();
+ zn = tcg_temp_new_ptr();
+ pg = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
+
+ gen_fn(t, pd, zn, pg, t);
+
+ tcg_temp_free_ptr(pd);
+ tcg_temp_free_ptr(zn);
+ tcg_temp_free_ptr(pg);
+
+ do_pred_flags(t);
+
+ tcg_temp_free_i32(t);
+ return true;
+}
+
+#define DO_PPZI(NAME, name) \
+static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \
+{ \
+ static gen_helper_gvec_flags_3 * const fns[4] = { \
+ gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
+ gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
+ }; \
+ return do_ppzi_flags(s, a, fns[a->esz]); \
+}
+
+DO_PPZI(CMPEQ, cmpeq)
+DO_PPZI(CMPNE, cmpne)
+DO_PPZI(CMPGT, cmpgt)
+DO_PPZI(CMPGE, cmpge)
+DO_PPZI(CMPHI, cmphi)
+DO_PPZI(CMPHS, cmphs)
+DO_PPZI(CMPLT, cmplt)
+DO_PPZI(CMPLE, cmple)
+DO_PPZI(CMPLO, cmplo)
+DO_PPZI(CMPLS, cmpls)
+
+#undef DO_PPZI
+
+/*
+ *** SVE Partition Break Group
+ */
+
+static bool do_brk3(DisasContext *s, arg_rprr_s *a,
+ gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned vsz = pred_full_reg_size(s);
+
+ /* Predicate sizes may be smaller and cannot use simd_desc. */
+ TCGv_ptr d = tcg_temp_new_ptr();
+ TCGv_ptr n = tcg_temp_new_ptr();
+ TCGv_ptr m = tcg_temp_new_ptr();
+ TCGv_ptr g = tcg_temp_new_ptr();
+ TCGv_i32 t = tcg_const_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
+
+ tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
+
+ if (a->s) {
+ fn_s(t, d, n, m, g, t);
+ do_pred_flags(t);
+ } else {
+ fn(d, n, m, g, t);
+ }
+ tcg_temp_free_ptr(d);
+ tcg_temp_free_ptr(n);
+ tcg_temp_free_ptr(m);
+ tcg_temp_free_ptr(g);
+ tcg_temp_free_i32(t);
+ return true;
+}
+
+static bool do_brk2(DisasContext *s, arg_rpr_s *a,
+ gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ unsigned vsz = pred_full_reg_size(s);
+
+ /* Predicate sizes may be smaller and cannot use simd_desc. */
+ TCGv_ptr d = tcg_temp_new_ptr();
+ TCGv_ptr n = tcg_temp_new_ptr();
+ TCGv_ptr g = tcg_temp_new_ptr();
+ TCGv_i32 t = tcg_const_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
+
+ tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
+ tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
+
+ if (a->s) {
+ fn_s(t, d, n, g, t);
+ do_pred_flags(t);
+ } else {
+ fn(d, n, g, t);
+ }
+ tcg_temp_free_ptr(d);
+ tcg_temp_free_ptr(n);
+ tcg_temp_free_ptr(g);
+ tcg_temp_free_i32(t);
+ return true;
+}
+
+static bool trans_BRKPA(DisasContext *s, arg_rprr_s *a)
+{
+ return do_brk3(s, a, gen_helper_sve_brkpa, gen_helper_sve_brkpas);
+}
+
+static bool trans_BRKPB(DisasContext *s, arg_rprr_s *a)
+{
+ return do_brk3(s, a, gen_helper_sve_brkpb, gen_helper_sve_brkpbs);
+}
+
+static bool trans_BRKA_m(DisasContext *s, arg_rpr_s *a)
+{
+ return do_brk2(s, a, gen_helper_sve_brka_m, gen_helper_sve_brkas_m);
+}
+
+static bool trans_BRKB_m(DisasContext *s, arg_rpr_s *a)
+{
+ return do_brk2(s, a, gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m);
+}
+
+static bool trans_BRKA_z(DisasContext *s, arg_rpr_s *a)
+{
+ return do_brk2(s, a, gen_helper_sve_brka_z, gen_helper_sve_brkas_z);
+}
+
+static bool trans_BRKB_z(DisasContext *s, arg_rpr_s *a)
+{
+ return do_brk2(s, a, gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z);
+}
+
+static bool trans_BRKN(DisasContext *s, arg_rpr_s *a)
+{
+ return do_brk2(s, a, gen_helper_sve_brkn, gen_helper_sve_brkns);
+}
+
+/*
+ *** SVE Predicate Count Group
+ */
+
+static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
+{
+ unsigned psz = pred_full_reg_size(s);
+
+ if (psz <= 8) {
+ uint64_t psz_mask;
+
+ tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
+ if (pn != pg) {
+ TCGv_i64 g = tcg_temp_new_i64();
+ tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_and_i64(val, val, g);
+ tcg_temp_free_i64(g);
+ }
+
+ /* Reduce the pred_esz_masks value simply to reduce the
+ * size of the code generated here.
+ */
+ psz_mask = MAKE_64BIT_MASK(0, psz * 8);
+ tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask);
+
+ tcg_gen_ctpop_i64(val, val);
+ } else {
+ TCGv_ptr t_pn = tcg_temp_new_ptr();
+ TCGv_ptr t_pg = tcg_temp_new_ptr();
+ unsigned desc = 0;
+ TCGv_i32 t_desc;
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
+
+ tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+ t_desc = tcg_const_i32(desc);
+
+ gen_helper_sve_cntp(val, t_pn, t_pg, t_desc);
+ tcg_temp_free_ptr(t_pn);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_i32(t_desc);
+ }
+}
+
+static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
+{
+ if (sve_access_check(s)) {
+ do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
+ }
+ return true;
+}
+
+static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+ TCGv_i64 val = tcg_temp_new_i64();
+
+ do_cntp(s, val, a->esz, a->pg, a->pg);
+ if (a->d) {
+ tcg_gen_sub_i64(reg, reg, val);
+ } else {
+ tcg_gen_add_i64(reg, reg, val);
+ }
+ tcg_temp_free_i64(val);
+ }
+ return true;
+}
+
+static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i64 val = tcg_temp_new_i64();
+ GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds;
+
+ do_cntp(s, val, a->esz, a->pg, a->pg);
+ gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn), val, vsz, vsz);
+ }
+ return true;
+}
+
+static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+ TCGv_i64 val = tcg_temp_new_i64();
+
+ do_cntp(s, val, a->esz, a->pg, a->pg);
+ do_sat_addsub_32(reg, val, a->u, a->d);
+ }
+ return true;
+}
+
+static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+ TCGv_i64 val = tcg_temp_new_i64();
+
+ do_cntp(s, val, a->esz, a->pg, a->pg);
+ do_sat_addsub_64(reg, val, a->u, a->d);
+ }
+ return true;
+}
+
+static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 val = tcg_temp_new_i64();
+ do_cntp(s, val, a->esz, a->pg, a->pg);
+ do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
+ }
+ return true;
+}
+
+/*
+ *** SVE Integer Compare Scalars Group
+ */
+
+static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
+{
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ);
+ TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
+ TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
+ TCGv_i64 cmp = tcg_temp_new_i64();
+
+ tcg_gen_setcond_i64(cond, cmp, rn, rm);
+ tcg_gen_extrl_i64_i32(cpu_NF, cmp);
+ tcg_temp_free_i64(cmp);
+
+ /* VF = !NF & !CF. */
+ tcg_gen_xori_i32(cpu_VF, cpu_NF, 1);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF);
+
+ /* Both NF and VF actually look at bit 31. */
+ tcg_gen_neg_i32(cpu_NF, cpu_NF);
+ tcg_gen_neg_i32(cpu_VF, cpu_VF);
+ return true;
+}
+
+static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
+{
+ TCGv_i64 op0, op1, t0, t1, tmax;
+ TCGv_i32 t2, t3;
+ TCGv_ptr ptr;
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned desc = 0;
+ TCGCond cond;
+ uint64_t maxval;
+ /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
+ bool eq = a->eq == a->lt;
+
+ /* The greater-than conditions are all SVE2. */
+ if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ op0 = read_cpu_reg(s, a->rn, 1);
+ op1 = read_cpu_reg(s, a->rm, 1);
+
+ if (!a->sf) {
+ if (a->u) {
+ tcg_gen_ext32u_i64(op0, op0);
+ tcg_gen_ext32u_i64(op1, op1);
+ } else {
+ tcg_gen_ext32s_i64(op0, op0);
+ tcg_gen_ext32s_i64(op1, op1);
+ }
+ }
+
+ /* For the helper, compress the different conditions into a computation
+ * of how many iterations for which the condition is true.
+ */
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ if (a->lt) {
+ tcg_gen_sub_i64(t0, op1, op0);
+ if (a->u) {
+ maxval = a->sf ? UINT64_MAX : UINT32_MAX;
+ cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
+ } else {
+ maxval = a->sf ? INT64_MAX : INT32_MAX;
+ cond = eq ? TCG_COND_LE : TCG_COND_LT;
+ }
+ } else {
+ tcg_gen_sub_i64(t0, op0, op1);
+ if (a->u) {
+ maxval = 0;
+ cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
+ } else {
+ maxval = a->sf ? INT64_MIN : INT32_MIN;
+ cond = eq ? TCG_COND_GE : TCG_COND_GT;
+ }
+ }
+
+ tmax = tcg_const_i64(vsz >> a->esz);
+ if (eq) {
+ /* Equality means one more iteration. */
+ tcg_gen_addi_i64(t0, t0, 1);
+
+ /*
+ * For the less-than while, if op1 is maxval (and the only time
+ * the addition above could overflow), then we produce an all-true
+ * predicate by setting the count to the vector length. This is
+ * because the pseudocode is described as an increment + compare
+ * loop, and the maximum integer would always compare true.
+ * Similarly, the greater-than while has the same issue with the
+ * minimum integer due to the decrement + compare loop.
+ */
+ tcg_gen_movi_i64(t1, maxval);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
+ }
+
+ /* Bound to the maximum. */
+ tcg_gen_umin_i64(t0, t0, tmax);
+ tcg_temp_free_i64(tmax);
+
+ /* Set the count to zero if the condition is false. */
+ tcg_gen_movi_i64(t1, 0);
+ tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
+ tcg_temp_free_i64(t1);
+
+ /* Since we're bounded, pass as a 32-bit type. */
+ t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, t0);
+ tcg_temp_free_i64(t0);
+
+ /* Scale elements to bits. */
+ tcg_gen_shli_i32(t2, t2, a->esz);
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ t3 = tcg_const_i32(desc);
+
+ ptr = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
+
+ if (a->lt) {
+ gen_helper_sve_whilel(t2, ptr, t2, t3);
+ } else {
+ gen_helper_sve_whileg(t2, ptr, t2, t3);
+ }
+ do_pred_flags(t2);
+
+ tcg_temp_free_ptr(ptr);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ return true;
+}
+
+static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
+{
+ TCGv_i64 op0, op1, diff, t1, tmax;
+ TCGv_i32 t2, t3;
+ TCGv_ptr ptr;
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned desc = 0;
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ op0 = read_cpu_reg(s, a->rn, 1);
+ op1 = read_cpu_reg(s, a->rm, 1);
+
+ tmax = tcg_const_i64(vsz);
+ diff = tcg_temp_new_i64();
+
+ if (a->rw) {
+ /* WHILERW */
+ /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
+ t1 = tcg_temp_new_i64();
+ tcg_gen_sub_i64(diff, op0, op1);
+ tcg_gen_sub_i64(t1, op1, op0);
+ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
+ tcg_temp_free_i64(t1);
+ /* Round down to a multiple of ESIZE. */
+ tcg_gen_andi_i64(diff, diff, -1 << a->esz);
+ /* If op1 == op0, diff == 0, and the condition is always true. */
+ tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
+ } else {
+ /* WHILEWR */
+ tcg_gen_sub_i64(diff, op1, op0);
+ /* Round down to a multiple of ESIZE. */
+ tcg_gen_andi_i64(diff, diff, -1 << a->esz);
+ /* If op0 >= op1, diff <= 0, the condition is always true. */
+ tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
+ }
+
+ /* Bound to the maximum. */
+ tcg_gen_umin_i64(diff, diff, tmax);
+ tcg_temp_free_i64(tmax);
+
+ /* Since we're bounded, pass as a 32-bit type. */
+ t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, diff);
+ tcg_temp_free_i64(diff);
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ t3 = tcg_const_i32(desc);
+
+ ptr = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
+
+ gen_helper_sve_whilel(t2, ptr, t2, t3);
+ do_pred_flags(t2);
+
+ tcg_temp_free_ptr(ptr);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ return true;
+}
+
+/*
+ *** SVE Integer Wide Immediate - Unpredicated Group
+ */
+
+static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ int dofs = vec_full_reg_offset(s, a->rd);
+ uint64_t imm;
+
+ /* Decode the VFP immediate. */
+ imm = vfp_expand_imm(a->esz, a->imm);
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
+ }
+ return true;
+}
+
+static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
+{
+ if (a->esz == 0 && extract32(s->insn, 13, 1)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ int dofs = vec_full_reg_offset(s, a->rd);
+
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
+ }
+ return true;
+}
+
+static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ if (a->esz == 0 && extract32(s->insn, 13, 1)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_addi(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ a->imm = -a->imm;
+ return trans_ADD_zzi(s, a);
+}
+
+static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
+ static const GVecGen2s op[4] = {
+ { .fni8 = tcg_gen_vec_sub8_i64,
+ .fniv = tcg_gen_sub_vec,
+ .fno = gen_helper_sve_subri_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8,
+ .scalar_first = true },
+ { .fni8 = tcg_gen_vec_sub16_i64,
+ .fniv = tcg_gen_sub_vec,
+ .fno = gen_helper_sve_subri_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16,
+ .scalar_first = true },
+ { .fni4 = tcg_gen_sub_i32,
+ .fniv = tcg_gen_sub_vec,
+ .fno = gen_helper_sve_subri_s,
+ .opt_opc = vecop_list,
+ .vece = MO_32,
+ .scalar_first = true },
+ { .fni8 = tcg_gen_sub_i64,
+ .fniv = tcg_gen_sub_vec,
+ .fno = gen_helper_sve_subri_d,
+ .opt_opc = vecop_list,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64,
+ .scalar_first = true }
+ };
+
+ if (a->esz == 0 && extract32(s->insn, 13, 1)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i64 c = tcg_const_i64(a->imm);
+ tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, c, &op[a->esz]);
+ tcg_temp_free_i64(c);
+ }
+ return true;
+}
+
+static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_muli(a->esz, vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
+{
+ if (a->esz == 0 && extract32(s->insn, 13, 1)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 val = tcg_const_i64(a->imm);
+ do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, u, d);
+ tcg_temp_free_i64(val);
+ }
+ return true;
+}
+
+static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_zzi_sat(s, a, false, false);
+}
+
+static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_zzi_sat(s, a, true, false);
+}
+
+static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_zzi_sat(s, a, false, true);
+}
+
+static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a)
+{
+ return do_zzi_sat(s, a, true, true);
+}
+
+static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i64 c = tcg_const_i64(a->imm);
+
+ tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ c, vsz, vsz, 0, fn);
+ tcg_temp_free_i64(c);
+ }
+ return true;
+}
+
+#define DO_ZZI(NAME, name) \
+static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \
+{ \
+ static gen_helper_gvec_2i * const fns[4] = { \
+ gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
+ gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
+ }; \
+ return do_zzi_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZZI(SMAX, smax)
+DO_ZZI(UMAX, umax)
+DO_ZZI(SMIN, smin)
+DO_ZZI(UMIN, umin)
+
+#undef DO_ZZI
+
+static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a)
+{
+ static gen_helper_gvec_4 * const fns[2][2] = {
+ { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
+ { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
+ };
+
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0);
+ }
+ return true;
+}
+
+/*
+ * SVE Multiply - Indexed
+ */
+
+static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a,
+ gen_helper_gvec_4 *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
+ }
+ return true;
+}
+
+#define DO_RRXR(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { return do_zzxz_ool(s, a, FUNC); }
+
+DO_RRXR(trans_SDOT_zzxw_s, gen_helper_gvec_sdot_idx_b)
+DO_RRXR(trans_SDOT_zzxw_d, gen_helper_gvec_sdot_idx_h)
+DO_RRXR(trans_UDOT_zzxw_s, gen_helper_gvec_udot_idx_b)
+DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
+
+static bool trans_SUDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ return do_zzxz_ool(s, a, gen_helper_gvec_sudot_idx_b);
+}
+
+static bool trans_USDOT_zzxw_s(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ return do_zzxz_ool(s, a, gen_helper_gvec_usdot_idx_b);
+}
+
+#undef DO_RRXR
+
+static bool do_sve2_zzz_data(DisasContext *s, int rd, int rn, int rm, int data,
+ gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vsz, vsz, data, fn);
+ }
+ return true;
+}
+
+#define DO_SVE2_RRX(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
+ { return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, a->index, FUNC); }
+
+DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
+DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
+DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
+
+DO_SVE2_RRX(trans_SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
+DO_SVE2_RRX(trans_SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
+DO_SVE2_RRX(trans_SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
+
+DO_SVE2_RRX(trans_SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
+DO_SVE2_RRX(trans_SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
+DO_SVE2_RRX(trans_SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
+
+#undef DO_SVE2_RRX
+
+#define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
+ { \
+ return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, \
+ (a->index << 1) | TOP, FUNC); \
+ }
+
+DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
+DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
+DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
+DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
+
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
+DO_SVE2_RRX_TB(trans_SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
+DO_SVE2_RRX_TB(trans_SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
+
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
+DO_SVE2_RRX_TB(trans_UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
+DO_SVE2_RRX_TB(trans_UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
+
+#undef DO_SVE2_RRX_TB
+
+static bool do_sve2_zzzz_data(DisasContext *s, int rd, int rn, int rm, int ra,
+ int data, gen_helper_gvec_4 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ vec_full_reg_offset(s, rm),
+ vec_full_reg_offset(s, ra),
+ vsz, vsz, data, fn);
+ }
+ return true;
+}
+
+#define DO_SVE2_RRXR(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, a->index, FUNC); }
+
+DO_SVE2_RRXR(trans_MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
+DO_SVE2_RRXR(trans_MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
+DO_SVE2_RRXR(trans_MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
+
+DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
+DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
+DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
+
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
+DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
+
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
+DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
+
+#undef DO_SVE2_RRXR
+
+#define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { \
+ return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->rd, \
+ (a->index << 1) | TOP, FUNC); \
+ }
+
+DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
+DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
+DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
+DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
+DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
+DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
+DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
+
+DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
+DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
+DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
+DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
+
+#undef DO_SVE2_RRXR_TB
+
+#define DO_SVE2_RRXR_ROT(NAME, FUNC) \
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
+ { \
+ return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, \
+ (a->index << 2) | a->rot, FUNC); \
+ }
+
+DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
+DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
+
+DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
+DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
+
+DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
+DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
+
+#undef DO_SVE2_RRXR_ROT
+
+/*
+ *** SVE Floating Point Multiply-Add Indexed Group
+ */
+
+static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
+{
+ static gen_helper_gvec_4_ptr * const fns[3] = {
+ gen_helper_gvec_fmla_idx_h,
+ gen_helper_gvec_fmla_idx_s,
+ gen_helper_gvec_fmla_idx_d,
+ };
+
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz, (a->index << 1) | sub,
+ fns[a->esz - 1]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
+{
+ return do_FMLA_zzxz(s, a, false);
+}
+
+static bool trans_FMLS_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
+{
+ return do_FMLA_zzxz(s, a, true);
+}
+
+/*
+ *** SVE Floating Point Multiply Indexed Group
+ */
+
+static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[3] = {
+ gen_helper_gvec_fmul_idx_h,
+ gen_helper_gvec_fmul_idx_s,
+ gen_helper_gvec_fmul_idx_d,
+ };
+
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ status, vsz, vsz, a->index, fns[a->esz - 1]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+/*
+ *** SVE Floating Point Fast Reduction Group
+ */
+
+typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32);
+
+static void do_reduce(DisasContext *s, arg_rpr_esz *a,
+ gen_helper_fp_reduce *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned p2vsz = pow2ceil(vsz);
+ TCGv_i32 t_desc = tcg_const_i32(simd_desc(vsz, vsz, p2vsz));
+ TCGv_ptr t_zn, t_pg, status;
+ TCGv_i64 temp;
+
+ temp = tcg_temp_new_i64();
+ t_zn = tcg_temp_new_ptr();
+ t_pg = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ fn(temp, t_zn, t_pg, status, t_desc);
+ tcg_temp_free_ptr(t_zn);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_ptr(status);
+ tcg_temp_free_i32(t_desc);
+
+ write_fp_dreg(s, a->rd, temp);
+ tcg_temp_free_i64(temp);
+}
+
+#define DO_VPZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
+{ \
+ static gen_helper_fp_reduce * const fns[3] = { \
+ gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, \
+ gen_helper_sve_##name##_d, \
+ }; \
+ if (a->esz == 0) { \
+ return false; \
+ } \
+ if (sve_access_check(s)) { \
+ do_reduce(s, a, fns[a->esz - 1]); \
+ } \
+ return true; \
+}
+
+DO_VPZ(FADDV, faddv)
+DO_VPZ(FMINNMV, fminnmv)
+DO_VPZ(FMAXNMV, fmaxnmv)
+DO_VPZ(FMINV, fminv)
+DO_VPZ(FMAXV, fmaxv)
+
+/*
+ *** SVE Floating Point Unary Operations - Unpredicated Group
+ */
+
+static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+}
+
+static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a)
+{
+ static gen_helper_gvec_2_ptr * const fns[3] = {
+ gen_helper_gvec_frecpe_h,
+ gen_helper_gvec_frecpe_s,
+ gen_helper_gvec_frecpe_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ do_zz_fp(s, a, fns[a->esz - 1]);
+ }
+ return true;
+}
+
+static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a)
+{
+ static gen_helper_gvec_2_ptr * const fns[3] = {
+ gen_helper_gvec_frsqrte_h,
+ gen_helper_gvec_frsqrte_s,
+ gen_helper_gvec_frsqrte_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ do_zz_fp(s, a, fns[a->esz - 1]);
+ }
+ return true;
+}
+
+/*
+ *** SVE Floating Point Compare with Zero Group
+ */
+
+static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
+ gen_helper_gvec_3_ptr *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+}
+
+#define DO_PPZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
+{ \
+ static gen_helper_gvec_3_ptr * const fns[3] = { \
+ gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, \
+ gen_helper_sve_##name##_d, \
+ }; \
+ if (a->esz == 0) { \
+ return false; \
+ } \
+ if (sve_access_check(s)) { \
+ do_ppz_fp(s, a, fns[a->esz - 1]); \
+ } \
+ return true; \
+}
+
+DO_PPZ(FCMGE_ppz0, fcmge0)
+DO_PPZ(FCMGT_ppz0, fcmgt0)
+DO_PPZ(FCMLE_ppz0, fcmle0)
+DO_PPZ(FCMLT_ppz0, fcmlt0)
+DO_PPZ(FCMEQ_ppz0, fcmeq0)
+DO_PPZ(FCMNE_ppz0, fcmne0)
+
+#undef DO_PPZ
+
+/*
+ *** SVE floating-point trig multiply-add coefficient
+ */
+
+static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[3] = {
+ gen_helper_sve_ftmad_h,
+ gen_helper_sve_ftmad_s,
+ gen_helper_sve_ftmad_d,
+ };
+
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ status, vsz, vsz, a->imm, fns[a->esz - 1]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+/*
+ *** SVE Floating Point Accumulating Reduction Group
+ */
+
+static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
+{
+ typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr,
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
+ static fadda_fn * const fns[3] = {
+ gen_helper_sve_fadda_h,
+ gen_helper_sve_fadda_s,
+ gen_helper_sve_fadda_d,
+ };
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr t_rm, t_pg, t_fpst;
+ TCGv_i64 t_val;
+ TCGv_i32 t_desc;
+
+ if (a->esz == 0) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
+ t_rm = tcg_temp_new_ptr();
+ t_pg = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
+ t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ t_desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+
+ fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
+
+ tcg_temp_free_i32(t_desc);
+ tcg_temp_free_ptr(t_fpst);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_ptr(t_rm);
+
+ write_fp_dreg(s, a->rd, t_val);
+ tcg_temp_free_i64(t_val);
+ return true;
+}
+
+/*
+ *** SVE Floating Point Arithmetic - Unpredicated Group
+ */
+
+static bool do_zzz_fp(DisasContext *s, arg_rrr_esz *a,
+ gen_helper_gvec_3_ptr *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+
+#define DO_FP3(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3_ptr * const fns[4] = { \
+ NULL, gen_helper_gvec_##name##_h, \
+ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
+ }; \
+ return do_zzz_fp(s, a, fns[a->esz]); \
+}
+
+DO_FP3(FADD_zzz, fadd)
+DO_FP3(FSUB_zzz, fsub)
+DO_FP3(FMUL_zzz, fmul)
+DO_FP3(FTSMUL, ftsmul)
+DO_FP3(FRECPS, recps)
+DO_FP3(FRSQRTS, rsqrts)
+
+#undef DO_FP3
+
+/*
+ *** SVE Floating Point Arithmetic - Predicated Group
+ */
+
+static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+#define DO_FP3(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ NULL, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
+ }; \
+ return do_zpzz_fp(s, a, fns[a->esz]); \
+}
+
+DO_FP3(FADD_zpzz, fadd)
+DO_FP3(FSUB_zpzz, fsub)
+DO_FP3(FMUL_zpzz, fmul)
+DO_FP3(FMIN_zpzz, fmin)
+DO_FP3(FMAX_zpzz, fmax)
+DO_FP3(FMINNM_zpzz, fminnum)
+DO_FP3(FMAXNM_zpzz, fmaxnum)
+DO_FP3(FABD, fabd)
+DO_FP3(FSCALE, fscalbn)
+DO_FP3(FDIV, fdiv)
+DO_FP3(FMULX, fmulx)
+
+#undef DO_FP3
+
+typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr,
+ TCGv_i64, TCGv_ptr, TCGv_i32);
+
+static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
+ TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr t_zd, t_zn, t_pg, status;
+ TCGv_i32 desc;
+
+ t_zd = tcg_temp_new_ptr();
+ t_zn = tcg_temp_new_ptr();
+ t_pg = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
+ tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+
+ status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ desc = tcg_const_i32(simd_desc(vsz, vsz, 0));
+ fn(t_zd, t_zn, t_pg, scalar, status, desc);
+
+ tcg_temp_free_i32(desc);
+ tcg_temp_free_ptr(status);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_ptr(t_zn);
+ tcg_temp_free_ptr(t_zd);
+}
+
+static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
+ gen_helper_sve_fp2scalar *fn)
+{
+ TCGv_i64 temp = tcg_const_i64(imm);
+ do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, temp, fn);
+ tcg_temp_free_i64(temp);
+}
+
+#define DO_FP_IMM(NAME, name, const0, const1) \
+static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
+{ \
+ static gen_helper_sve_fp2scalar * const fns[3] = { \
+ gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, \
+ gen_helper_sve_##name##_d \
+ }; \
+ static uint64_t const val[3][2] = { \
+ { float16_##const0, float16_##const1 }, \
+ { float32_##const0, float32_##const1 }, \
+ { float64_##const0, float64_##const1 }, \
+ }; \
+ if (a->esz == 0) { \
+ return false; \
+ } \
+ if (sve_access_check(s)) { \
+ do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \
+ } \
+ return true; \
+}
+
+DO_FP_IMM(FADD, fadds, half, one)
+DO_FP_IMM(FSUB, fsubs, half, one)
+DO_FP_IMM(FMUL, fmuls, half, two)
+DO_FP_IMM(FSUBR, fsubrs, half, one)
+DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
+DO_FP_IMM(FMINNM, fminnms, zero, one)
+DO_FP_IMM(FMAX, fmaxs, zero, one)
+DO_FP_IMM(FMIN, fmins, zero, one)
+
+#undef DO_FP_IMM
+
+static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (fn == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+#define DO_FPCMP(NAME, name) \
+static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ NULL, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
+ }; \
+ return do_fp_cmp(s, a, fns[a->esz]); \
+}
+
+DO_FPCMP(FCMGE, fcmge)
+DO_FPCMP(FCMGT, fcmgt)
+DO_FPCMP(FCMEQ, fcmeq)
+DO_FPCMP(FCMNE, fcmne)
+DO_FPCMP(FCMUO, fcmuo)
+DO_FPCMP(FACGE, facge)
+DO_FPCMP(FACGT, facgt)
+
+#undef DO_FPCMP
+
+static bool trans_FCADD(DisasContext *s, arg_FCADD *a)
+{
+ static gen_helper_gvec_4_ptr * const fns[3] = {
+ gen_helper_sve_fcadd_h,
+ gen_helper_sve_fcadd_s,
+ gen_helper_sve_fcadd_d
+ };
+
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, a->rot, fns[a->esz - 1]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool do_fmla(DisasContext *s, arg_rprrr_esz *a,
+ gen_helper_gvec_5_ptr *fn)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+#define DO_FMLA(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
+{ \
+ static gen_helper_gvec_5_ptr * const fns[4] = { \
+ NULL, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
+ }; \
+ return do_fmla(s, a, fns[a->esz]); \
+}
+
+DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
+DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
+DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
+DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
+
+#undef DO_FMLA
+
+static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
+{
+ static gen_helper_gvec_5_ptr * const fns[4] = {
+ NULL,
+ gen_helper_sve_fcmla_zpzzz_h,
+ gen_helper_sve_fcmla_zpzzz_s,
+ gen_helper_sve_fcmla_zpzzz_d,
+ };
+
+ if (a->esz == 0) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, a->rot, fns[a->esz]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
+{
+ static gen_helper_gvec_4_ptr * const fns[2] = {
+ gen_helper_gvec_fcmlah_idx,
+ gen_helper_gvec_fcmlas_idx,
+ };
+
+ tcg_debug_assert(a->esz == 1 || a->esz == 2);
+ tcg_debug_assert(a->rd == a->ra);
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz,
+ a->index * 4 + a->rot,
+ fns[a->esz - 1]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+/*
+ *** SVE Floating Point Unary Operations Predicated Group
+ */
+
+static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg,
+ bool is_fp16, gen_helper_gvec_3_ptr *fn)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
+ vec_full_reg_offset(s, rn),
+ pred_full_reg_offset(s, pg),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh);
+}
+
+static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs);
+}
+
+static bool trans_BFCVT(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvt);
+}
+
+static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh);
+}
+
+static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd);
+}
+
+static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds);
+}
+
+static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd);
+}
+
+static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh);
+}
+
+static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh);
+}
+
+static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs);
+}
+
+static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs);
+}
+
+static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd);
+}
+
+static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd);
+}
+
+static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss);
+}
+
+static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss);
+}
+
+static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd);
+}
+
+static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd);
+}
+
+static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds);
+}
+
+static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds);
+}
+
+static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd);
+}
+
+static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd);
+}
+
+static gen_helper_gvec_3_ptr * const frint_fns[3] = {
+ gen_helper_sve_frint_h,
+ gen_helper_sve_frint_s,
+ gen_helper_sve_frint_d
+};
+
+static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16,
+ frint_fns[a->esz - 1]);
+}
+
+static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[3] = {
+ gen_helper_sve_frintx_h,
+ gen_helper_sve_frintx_s,
+ gen_helper_sve_frintx_d
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
+}
+
+static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
+ int mode, gen_helper_gvec_3_ptr *fn)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_i32 tmode = tcg_const_i32(mode);
+ TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+
+ gen_helper_set_rmode(tmode, tmode, status);
+
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fn);
+
+ gen_helper_set_rmode(tmode, tmode, status);
+ tcg_temp_free_i32(tmode);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_nearest_even, frint_fns[a->esz - 1]);
+}
+
+static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_up, frint_fns[a->esz - 1]);
+}
+
+static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_down, frint_fns[a->esz - 1]);
+}
+
+static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_to_zero, frint_fns[a->esz - 1]);
+}
+
+static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_ties_away, frint_fns[a->esz - 1]);
+}
+
+static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[3] = {
+ gen_helper_sve_frecpx_h,
+ gen_helper_sve_frecpx_s,
+ gen_helper_sve_frecpx_d
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
+}
+
+static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[3] = {
+ gen_helper_sve_fsqrt_h,
+ gen_helper_sve_fsqrt_s,
+ gen_helper_sve_fsqrt_d
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
+}
+
+static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh);
+}
+
+static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh);
+}
+
+static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh);
+}
+
+static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss);
+}
+
+static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds);
+}
+
+static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd);
+}
+
+static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd);
+}
+
+static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh);
+}
+
+static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh);
+}
+
+static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh);
+}
+
+static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss);
+}
+
+static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds);
+}
+
+static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd);
+}
+
+static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd);
+}
+
+/*
+ *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
+ */
+
+/* Subroutine loading a vector register at VOFS of LEN bytes.
+ * The load should begin at the address Rn + IMM.
+ */
+
+static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
+{
+ int len_align = QEMU_ALIGN_DOWN(len, 8);
+ int len_remain = len % 8;
+ int nparts = len / 8 + ctpop8(len_remain);
+ int midx = get_mem_index(s);
+ TCGv_i64 dirty_addr, clean_addr, t0, t1;
+
+ dirty_addr = tcg_temp_new_i64();
+ tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
+ tcg_temp_free_i64(dirty_addr);
+
+ /*
+ * Note that unpredicated load/store of vector/predicate registers
+ * are defined as a stream of bytes, which equates to little-endian
+ * operations on larger quantities.
+ * Attempt to keep code expansion to a minimum by limiting the
+ * amount of unrolling done.
+ */
+ if (nparts <= 4) {
+ int i;
+
+ t0 = tcg_temp_new_i64();
+ for (i = 0; i < len_align; i += 8) {
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_st_i64(t0, cpu_env, vofs + i);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 8);
+ }
+ tcg_temp_free_i64(t0);
+ } else {
+ TCGLabel *loop = gen_new_label();
+ TCGv_ptr tp, i = tcg_const_local_ptr(0);
+
+ /* Copy the clean address into a local temp, live across the loop. */
+ t0 = clean_addr;
+ clean_addr = new_tmp_a64_local(s);
+ tcg_gen_mov_i64(clean_addr, t0);
+
+ gen_set_label(loop);
+
+ t0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 8);
+
+ tp = tcg_temp_new_ptr();
+ tcg_gen_add_ptr(tp, cpu_env, i);
+ tcg_gen_addi_ptr(i, i, 8);
+ tcg_gen_st_i64(t0, tp, vofs);
+ tcg_temp_free_ptr(tp);
+ tcg_temp_free_i64(t0);
+
+ tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
+ tcg_temp_free_ptr(i);
+ }
+
+ /*
+ * Predicate register loads can be any multiple of 2.
+ * Note that we still store the entire 64-bit unit into cpu_env.
+ */
+ if (len_remain) {
+ t0 = tcg_temp_new_i64();
+ switch (len_remain) {
+ case 2:
+ case 4:
+ case 8:
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
+ MO_LE | ctz32(len_remain));
+ break;
+
+ case 6:
+ t1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 4);
+ tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW);
+ tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
+ tcg_temp_free_i64(t1);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+/* Similarly for stores. */
+static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
+{
+ int len_align = QEMU_ALIGN_DOWN(len, 8);
+ int len_remain = len % 8;
+ int nparts = len / 8 + ctpop8(len_remain);
+ int midx = get_mem_index(s);
+ TCGv_i64 dirty_addr, clean_addr, t0;
+
+ dirty_addr = tcg_temp_new_i64();
+ tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
+ clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
+ tcg_temp_free_i64(dirty_addr);
+
+ /* Note that unpredicated load/store of vector/predicate registers
+ * are defined as a stream of bytes, which equates to little-endian
+ * operations on larger quantities. There is no nice way to force
+ * a little-endian store for aarch64_be-linux-user out of line.
+ *
+ * Attempt to keep code expansion to a minimum by limiting the
+ * amount of unrolling done.
+ */
+ if (nparts <= 4) {
+ int i;
+
+ t0 = tcg_temp_new_i64();
+ for (i = 0; i < len_align; i += 8) {
+ tcg_gen_ld_i64(t0, cpu_env, vofs + i);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 8);
+ }
+ tcg_temp_free_i64(t0);
+ } else {
+ TCGLabel *loop = gen_new_label();
+ TCGv_ptr tp, i = tcg_const_local_ptr(0);
+
+ /* Copy the clean address into a local temp, live across the loop. */
+ t0 = clean_addr;
+ clean_addr = new_tmp_a64_local(s);
+ tcg_gen_mov_i64(clean_addr, t0);
+
+ gen_set_label(loop);
+
+ t0 = tcg_temp_new_i64();
+ tp = tcg_temp_new_ptr();
+ tcg_gen_add_ptr(tp, cpu_env, i);
+ tcg_gen_ld_i64(t0, tp, vofs);
+ tcg_gen_addi_ptr(i, i, 8);
+ tcg_temp_free_ptr(tp);
+
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 8);
+ tcg_temp_free_i64(t0);
+
+ tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
+ tcg_temp_free_ptr(i);
+ }
+
+ /* Predicate register stores can be any multiple of 2. */
+ if (len_remain) {
+ t0 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
+
+ switch (len_remain) {
+ case 2:
+ case 4:
+ case 8:
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx,
+ MO_LE | ctz32(len_remain));
+ break;
+
+ case 6:
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL);
+ tcg_gen_addi_i64(clean_addr, clean_addr, 4);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(t0);
+ }
+}
+
+static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
+{
+ if (sve_access_check(s)) {
+ int size = vec_full_reg_size(s);
+ int off = vec_full_reg_offset(s, a->rd);
+ do_ldr(s, off, size, a->rn, a->imm * size);
+ }
+ return true;
+}
+
+static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
+{
+ if (sve_access_check(s)) {
+ int size = pred_full_reg_size(s);
+ int off = pred_full_reg_offset(s, a->rd);
+ do_ldr(s, off, size, a->rn, a->imm * size);
+ }
+ return true;
+}
+
+static bool trans_STR_zri(DisasContext *s, arg_rri *a)
+{
+ if (sve_access_check(s)) {
+ int size = vec_full_reg_size(s);
+ int off = vec_full_reg_offset(s, a->rd);
+ do_str(s, off, size, a->rn, a->imm * size);
+ }
+ return true;
+}
+
+static bool trans_STR_pri(DisasContext *s, arg_rri *a)
+{
+ if (sve_access_check(s)) {
+ int size = pred_full_reg_size(s);
+ int off = pred_full_reg_offset(s, a->rd);
+ do_str(s, off, size, a->rn, a->imm * size);
+ }
+ return true;
+}
+
+/*
+ *** SVE Memory - Contiguous Load Group
+ */
+
+/* The memory mode of the dtype. */
+static const MemOp dtype_mop[16] = {
+ MO_UB, MO_UB, MO_UB, MO_UB,
+ MO_SL, MO_UW, MO_UW, MO_UW,
+ MO_SW, MO_SW, MO_UL, MO_UL,
+ MO_SB, MO_SB, MO_SB, MO_Q
+};
+
+#define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
+
+/* The vector element size of dtype. */
+static const uint8_t dtype_esz[16] = {
+ 0, 1, 2, 3,
+ 3, 1, 2, 3,
+ 3, 2, 2, 3,
+ 3, 2, 1, 3
+};
+
+static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
+ int dtype, uint32_t mte_n, bool is_write,
+ gen_helper_gvec_mem *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr t_pg;
+ TCGv_i32 t_desc;
+ int desc = 0;
+
+ /*
+ * For e.g. LD4, there are not enough arguments to pass all 4
+ * registers as pointers, so encode the regno into the data field.
+ * For consistency, do this even for LD1.
+ */
+ if (s->mte_active[0]) {
+ int msz = dtype_msz(dtype);
+
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
+ desc <<= SVE_MTEDESC_SHIFT;
+ } else {
+ addr = clean_data_tbi(s, addr);
+ }
+
+ desc = simd_desc(vsz, vsz, zt | desc);
+ t_desc = tcg_const_i32(desc);
+ t_pg = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+ fn(cpu_env, t_pg, addr, t_desc);
+
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_i32(t_desc);
+}
+
+/* Indexed by [mte][be][dtype][nreg] */
+static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
+ { /* mte inactive, little-endian */
+ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
+ gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
+ { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
+ gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
+ { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
+ gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
+ { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
+ gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
+
+ /* mte inactive, big-endian */
+ { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
+ gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
+ { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
+ gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
+ { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
+ gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
+ { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
+ gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
+
+ { /* mte active, little-endian */
+ { { gen_helper_sve_ld1bb_r_mte,
+ gen_helper_sve_ld2bb_r_mte,
+ gen_helper_sve_ld3bb_r_mte,
+ gen_helper_sve_ld4bb_r_mte },
+ { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_le_r_mte,
+ gen_helper_sve_ld2hh_le_r_mte,
+ gen_helper_sve_ld3hh_le_r_mte,
+ gen_helper_sve_ld4hh_le_r_mte },
+ { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_le_r_mte,
+ gen_helper_sve_ld2ss_le_r_mte,
+ gen_helper_sve_ld3ss_le_r_mte,
+ gen_helper_sve_ld4ss_le_r_mte },
+ { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_le_r_mte,
+ gen_helper_sve_ld2dd_le_r_mte,
+ gen_helper_sve_ld3dd_le_r_mte,
+ gen_helper_sve_ld4dd_le_r_mte } },
+
+ /* mte active, big-endian */
+ { { gen_helper_sve_ld1bb_r_mte,
+ gen_helper_sve_ld2bb_r_mte,
+ gen_helper_sve_ld3bb_r_mte,
+ gen_helper_sve_ld4bb_r_mte },
+ { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hh_be_r_mte,
+ gen_helper_sve_ld2hh_be_r_mte,
+ gen_helper_sve_ld3hh_be_r_mte,
+ gen_helper_sve_ld4hh_be_r_mte },
+ { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1ss_be_r_mte,
+ gen_helper_sve_ld2ss_be_r_mte,
+ gen_helper_sve_ld3ss_be_r_mte,
+ gen_helper_sve_ld4ss_be_r_mte },
+ { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
+
+ { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dd_be_r_mte,
+ gen_helper_sve_ld2dd_be_r_mte,
+ gen_helper_sve_ld3dd_be_r_mte,
+ gen_helper_sve_ld4dd_be_r_mte } } },
+};
+
+static void do_ld_zpa(DisasContext *s, int zt, int pg,
+ TCGv_i64 addr, int dtype, int nreg)
+{
+ gen_helper_gvec_mem *fn
+ = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
+
+ /*
+ * While there are holes in the table, they are not
+ * accessible via the instruction encoding.
+ */
+ assert(fn != NULL);
+ do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
+}
+
+static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
+{
+ if (a->rm == 31) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
+ }
+ return true;
+}
+
+static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
+{
+ if (sve_access_check(s)) {
+ int vsz = vec_full_reg_size(s);
+ int elements = vsz >> dtype_esz[a->dtype];
+ TCGv_i64 addr = new_tmp_a64(s);
+
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
+ (a->imm * elements * (a->nreg + 1))
+ << dtype_msz(a->dtype));
+ do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
+ }
+ return true;
+}
+
+static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
+{
+ static gen_helper_gvec_mem * const fns[2][2][16] = {
+ { /* mte inactive, little-endian */
+ { gen_helper_sve_ldff1bb_r,
+ gen_helper_sve_ldff1bhu_r,
+ gen_helper_sve_ldff1bsu_r,
+ gen_helper_sve_ldff1bdu_r,
+
+ gen_helper_sve_ldff1sds_le_r,
+ gen_helper_sve_ldff1hh_le_r,
+ gen_helper_sve_ldff1hsu_le_r,
+ gen_helper_sve_ldff1hdu_le_r,
+
+ gen_helper_sve_ldff1hds_le_r,
+ gen_helper_sve_ldff1hss_le_r,
+ gen_helper_sve_ldff1ss_le_r,
+ gen_helper_sve_ldff1sdu_le_r,
+
+ gen_helper_sve_ldff1bds_r,
+ gen_helper_sve_ldff1bss_r,
+ gen_helper_sve_ldff1bhs_r,
+ gen_helper_sve_ldff1dd_le_r },
+
+ /* mte inactive, big-endian */
+ { gen_helper_sve_ldff1bb_r,
+ gen_helper_sve_ldff1bhu_r,
+ gen_helper_sve_ldff1bsu_r,
+ gen_helper_sve_ldff1bdu_r,
+
+ gen_helper_sve_ldff1sds_be_r,
+ gen_helper_sve_ldff1hh_be_r,
+ gen_helper_sve_ldff1hsu_be_r,
+ gen_helper_sve_ldff1hdu_be_r,
+
+ gen_helper_sve_ldff1hds_be_r,
+ gen_helper_sve_ldff1hss_be_r,
+ gen_helper_sve_ldff1ss_be_r,
+ gen_helper_sve_ldff1sdu_be_r,
+
+ gen_helper_sve_ldff1bds_r,
+ gen_helper_sve_ldff1bss_r,
+ gen_helper_sve_ldff1bhs_r,
+ gen_helper_sve_ldff1dd_be_r } },
+
+ { /* mte active, little-endian */
+ { gen_helper_sve_ldff1bb_r_mte,
+ gen_helper_sve_ldff1bhu_r_mte,
+ gen_helper_sve_ldff1bsu_r_mte,
+ gen_helper_sve_ldff1bdu_r_mte,
+
+ gen_helper_sve_ldff1sds_le_r_mte,
+ gen_helper_sve_ldff1hh_le_r_mte,
+ gen_helper_sve_ldff1hsu_le_r_mte,
+ gen_helper_sve_ldff1hdu_le_r_mte,
+
+ gen_helper_sve_ldff1hds_le_r_mte,
+ gen_helper_sve_ldff1hss_le_r_mte,
+ gen_helper_sve_ldff1ss_le_r_mte,
+ gen_helper_sve_ldff1sdu_le_r_mte,
+
+ gen_helper_sve_ldff1bds_r_mte,
+ gen_helper_sve_ldff1bss_r_mte,
+ gen_helper_sve_ldff1bhs_r_mte,
+ gen_helper_sve_ldff1dd_le_r_mte },
+
+ /* mte active, big-endian */
+ { gen_helper_sve_ldff1bb_r_mte,
+ gen_helper_sve_ldff1bhu_r_mte,
+ gen_helper_sve_ldff1bsu_r_mte,
+ gen_helper_sve_ldff1bdu_r_mte,
+
+ gen_helper_sve_ldff1sds_be_r_mte,
+ gen_helper_sve_ldff1hh_be_r_mte,
+ gen_helper_sve_ldff1hsu_be_r_mte,
+ gen_helper_sve_ldff1hdu_be_r_mte,
+
+ gen_helper_sve_ldff1hds_be_r_mte,
+ gen_helper_sve_ldff1hss_be_r_mte,
+ gen_helper_sve_ldff1ss_be_r_mte,
+ gen_helper_sve_ldff1sdu_be_r_mte,
+
+ gen_helper_sve_ldff1bds_r_mte,
+ gen_helper_sve_ldff1bss_r_mte,
+ gen_helper_sve_ldff1bhs_r_mte,
+ gen_helper_sve_ldff1dd_be_r_mte } },
+ };
+
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
+ fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
+ }
+ return true;
+}
+
+static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
+{
+ static gen_helper_gvec_mem * const fns[2][2][16] = {
+ { /* mte inactive, little-endian */
+ { gen_helper_sve_ldnf1bb_r,
+ gen_helper_sve_ldnf1bhu_r,
+ gen_helper_sve_ldnf1bsu_r,
+ gen_helper_sve_ldnf1bdu_r,
+
+ gen_helper_sve_ldnf1sds_le_r,
+ gen_helper_sve_ldnf1hh_le_r,
+ gen_helper_sve_ldnf1hsu_le_r,
+ gen_helper_sve_ldnf1hdu_le_r,
+
+ gen_helper_sve_ldnf1hds_le_r,
+ gen_helper_sve_ldnf1hss_le_r,
+ gen_helper_sve_ldnf1ss_le_r,
+ gen_helper_sve_ldnf1sdu_le_r,
+
+ gen_helper_sve_ldnf1bds_r,
+ gen_helper_sve_ldnf1bss_r,
+ gen_helper_sve_ldnf1bhs_r,
+ gen_helper_sve_ldnf1dd_le_r },
+
+ /* mte inactive, big-endian */
+ { gen_helper_sve_ldnf1bb_r,
+ gen_helper_sve_ldnf1bhu_r,
+ gen_helper_sve_ldnf1bsu_r,
+ gen_helper_sve_ldnf1bdu_r,
+
+ gen_helper_sve_ldnf1sds_be_r,
+ gen_helper_sve_ldnf1hh_be_r,
+ gen_helper_sve_ldnf1hsu_be_r,
+ gen_helper_sve_ldnf1hdu_be_r,
+
+ gen_helper_sve_ldnf1hds_be_r,
+ gen_helper_sve_ldnf1hss_be_r,
+ gen_helper_sve_ldnf1ss_be_r,
+ gen_helper_sve_ldnf1sdu_be_r,
+
+ gen_helper_sve_ldnf1bds_r,
+ gen_helper_sve_ldnf1bss_r,
+ gen_helper_sve_ldnf1bhs_r,
+ gen_helper_sve_ldnf1dd_be_r } },
+
+ { /* mte inactive, little-endian */
+ { gen_helper_sve_ldnf1bb_r_mte,
+ gen_helper_sve_ldnf1bhu_r_mte,
+ gen_helper_sve_ldnf1bsu_r_mte,
+ gen_helper_sve_ldnf1bdu_r_mte,
+
+ gen_helper_sve_ldnf1sds_le_r_mte,
+ gen_helper_sve_ldnf1hh_le_r_mte,
+ gen_helper_sve_ldnf1hsu_le_r_mte,
+ gen_helper_sve_ldnf1hdu_le_r_mte,
+
+ gen_helper_sve_ldnf1hds_le_r_mte,
+ gen_helper_sve_ldnf1hss_le_r_mte,
+ gen_helper_sve_ldnf1ss_le_r_mte,
+ gen_helper_sve_ldnf1sdu_le_r_mte,
+
+ gen_helper_sve_ldnf1bds_r_mte,
+ gen_helper_sve_ldnf1bss_r_mte,
+ gen_helper_sve_ldnf1bhs_r_mte,
+ gen_helper_sve_ldnf1dd_le_r_mte },
+
+ /* mte inactive, big-endian */
+ { gen_helper_sve_ldnf1bb_r_mte,
+ gen_helper_sve_ldnf1bhu_r_mte,
+ gen_helper_sve_ldnf1bsu_r_mte,
+ gen_helper_sve_ldnf1bdu_r_mte,
+
+ gen_helper_sve_ldnf1sds_be_r_mte,
+ gen_helper_sve_ldnf1hh_be_r_mte,
+ gen_helper_sve_ldnf1hsu_be_r_mte,
+ gen_helper_sve_ldnf1hdu_be_r_mte,
+
+ gen_helper_sve_ldnf1hds_be_r_mte,
+ gen_helper_sve_ldnf1hss_be_r_mte,
+ gen_helper_sve_ldnf1ss_be_r_mte,
+ gen_helper_sve_ldnf1sdu_be_r_mte,
+
+ gen_helper_sve_ldnf1bds_r_mte,
+ gen_helper_sve_ldnf1bss_r_mte,
+ gen_helper_sve_ldnf1bhs_r_mte,
+ gen_helper_sve_ldnf1dd_be_r_mte } },
+ };
+
+ if (sve_access_check(s)) {
+ int vsz = vec_full_reg_size(s);
+ int elements = vsz >> dtype_esz[a->dtype];
+ int off = (a->imm * elements) << dtype_msz(a->dtype);
+ TCGv_i64 addr = new_tmp_a64(s);
+
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
+ do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
+ fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
+ }
+ return true;
+}
+
+static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr t_pg;
+ int poff;
+
+ /* Load the first quadword using the normal predicated load helpers. */
+ poff = pred_full_reg_offset(s, pg);
+ if (vsz > 16) {
+ /*
+ * Zero-extend the first 16 bits of the predicate into a temporary.
+ * This avoids triggering an assert making sure we don't have bits
+ * set within a predicate beyond VQ, but we have lowered VQ to 1
+ * for this load operation.
+ */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+#ifdef HOST_WORDS_BIGENDIAN
+ poff += 6;
+#endif
+ tcg_gen_ld16u_i64(tmp, cpu_env, poff);
+
+ poff = offsetof(CPUARMState, vfp.preg_tmp);
+ tcg_gen_st_i64(tmp, cpu_env, poff);
+ tcg_temp_free_i64(tmp);
+ }
+
+ t_pg = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(t_pg, cpu_env, poff);
+
+ gen_helper_gvec_mem *fn
+ = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
+ fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
+
+ tcg_temp_free_ptr(t_pg);
+
+ /* Replicate that first quadword. */
+ if (vsz > 16) {
+ int doff = vec_full_reg_offset(s, zt);
+ tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16);
+ }
+}
+
+static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
+{
+ if (a->rm == 31) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ int msz = dtype_msz(a->dtype);
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ do_ldrq(s, a->rd, a->pg, addr, a->dtype);
+ }
+ return true;
+}
+
+static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
+{
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
+ do_ldrq(s, a->rd, a->pg, addr, a->dtype);
+ }
+ return true;
+}
+
+static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned vsz_r32;
+ TCGv_ptr t_pg;
+ int poff, doff;
+
+ if (vsz < 32) {
+ /*
+ * Note that this UNDEFINED check comes after CheckSVEEnabled()
+ * in the ARM pseudocode, which is the sve_access_check() done
+ * in our caller. We should not now return false from the caller.
+ */
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Load the first octaword using the normal predicated load helpers. */
+
+ poff = pred_full_reg_offset(s, pg);
+ if (vsz > 32) {
+ /*
+ * Zero-extend the first 32 bits of the predicate into a temporary.
+ * This avoids triggering an assert making sure we don't have bits
+ * set within a predicate beyond VQ, but we have lowered VQ to 2
+ * for this load operation.
+ */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+#ifdef HOST_WORDS_BIGENDIAN
+ poff += 4;
+#endif
+ tcg_gen_ld32u_i64(tmp, cpu_env, poff);
+
+ poff = offsetof(CPUARMState, vfp.preg_tmp);
+ tcg_gen_st_i64(tmp, cpu_env, poff);
+ tcg_temp_free_i64(tmp);
+ }
+
+ t_pg = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(t_pg, cpu_env, poff);
+
+ gen_helper_gvec_mem *fn
+ = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
+ fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
+
+ tcg_temp_free_ptr(t_pg);
+
+ /*
+ * Replicate that first octaword.
+ * The replication happens in units of 32; if the full vector size
+ * is not a multiple of 32, the final bits are zeroed.
+ */
+ doff = vec_full_reg_offset(s, zt);
+ vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32);
+ if (vsz >= 64) {
+ tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32);
+ }
+ vsz -= vsz_r32;
+ if (vsz) {
+ tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0);
+ }
+}
+
+static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (a->rm == 31) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ do_ldro(s, a->rd, a->pg, addr, a->dtype);
+ }
+ return true;
+}
+
+static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
+{
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
+ do_ldro(s, a->rd, a->pg, addr, a->dtype);
+ }
+ return true;
+}
+
+/* Load and broadcast element. */
+static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned psz = pred_full_reg_size(s);
+ unsigned esz = dtype_esz[a->dtype];
+ unsigned msz = dtype_msz(a->dtype);
+ TCGLabel *over;
+ TCGv_i64 temp, clean_addr;
+
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ over = gen_new_label();
+
+ /* If the guarding predicate has no bits set, no load occurs. */
+ if (psz <= 8) {
+ /* Reduce the pred_esz_masks value simply to reduce the
+ * size of the code generated here.
+ */
+ uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
+ temp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
+ tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
+ tcg_temp_free_i64(temp);
+ } else {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ find_last_active(s, t32, esz, a->pg);
+ tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over);
+ tcg_temp_free_i32(t32);
+ }
+
+ /* Load the data. */
+ temp = tcg_temp_new_i64();
+ tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
+ clean_addr = gen_mte_check1(s, temp, false, true, msz);
+
+ tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
+ finalize_memop(s, dtype_mop[a->dtype]));
+
+ /* Broadcast to *all* elements. */
+ tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
+ vsz, vsz, temp);
+ tcg_temp_free_i64(temp);
+
+ /* Zero the inactive elements. */
+ gen_set_label(over);
+ return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
+}
+
+static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
+ int msz, int esz, int nreg)
+{
+ static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
+ { { { gen_helper_sve_st1bb_r,
+ gen_helper_sve_st1bh_r,
+ gen_helper_sve_st1bs_r,
+ gen_helper_sve_st1bd_r },
+ { NULL,
+ gen_helper_sve_st1hh_le_r,
+ gen_helper_sve_st1hs_le_r,
+ gen_helper_sve_st1hd_le_r },
+ { NULL, NULL,
+ gen_helper_sve_st1ss_le_r,
+ gen_helper_sve_st1sd_le_r },
+ { NULL, NULL, NULL,
+ gen_helper_sve_st1dd_le_r } },
+ { { gen_helper_sve_st1bb_r,
+ gen_helper_sve_st1bh_r,
+ gen_helper_sve_st1bs_r,
+ gen_helper_sve_st1bd_r },
+ { NULL,
+ gen_helper_sve_st1hh_be_r,
+ gen_helper_sve_st1hs_be_r,
+ gen_helper_sve_st1hd_be_r },
+ { NULL, NULL,
+ gen_helper_sve_st1ss_be_r,
+ gen_helper_sve_st1sd_be_r },
+ { NULL, NULL, NULL,
+ gen_helper_sve_st1dd_be_r } } },
+
+ { { { gen_helper_sve_st1bb_r_mte,
+ gen_helper_sve_st1bh_r_mte,
+ gen_helper_sve_st1bs_r_mte,
+ gen_helper_sve_st1bd_r_mte },
+ { NULL,
+ gen_helper_sve_st1hh_le_r_mte,
+ gen_helper_sve_st1hs_le_r_mte,
+ gen_helper_sve_st1hd_le_r_mte },
+ { NULL, NULL,
+ gen_helper_sve_st1ss_le_r_mte,
+ gen_helper_sve_st1sd_le_r_mte },
+ { NULL, NULL, NULL,
+ gen_helper_sve_st1dd_le_r_mte } },
+ { { gen_helper_sve_st1bb_r_mte,
+ gen_helper_sve_st1bh_r_mte,
+ gen_helper_sve_st1bs_r_mte,
+ gen_helper_sve_st1bd_r_mte },
+ { NULL,
+ gen_helper_sve_st1hh_be_r_mte,
+ gen_helper_sve_st1hs_be_r_mte,
+ gen_helper_sve_st1hd_be_r_mte },
+ { NULL, NULL,
+ gen_helper_sve_st1ss_be_r_mte,
+ gen_helper_sve_st1sd_be_r_mte },
+ { NULL, NULL, NULL,
+ gen_helper_sve_st1dd_be_r_mte } } },
+ };
+ static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
+ { { { gen_helper_sve_st2bb_r,
+ gen_helper_sve_st2hh_le_r,
+ gen_helper_sve_st2ss_le_r,
+ gen_helper_sve_st2dd_le_r },
+ { gen_helper_sve_st3bb_r,
+ gen_helper_sve_st3hh_le_r,
+ gen_helper_sve_st3ss_le_r,
+ gen_helper_sve_st3dd_le_r },
+ { gen_helper_sve_st4bb_r,
+ gen_helper_sve_st4hh_le_r,
+ gen_helper_sve_st4ss_le_r,
+ gen_helper_sve_st4dd_le_r } },
+ { { gen_helper_sve_st2bb_r,
+ gen_helper_sve_st2hh_be_r,
+ gen_helper_sve_st2ss_be_r,
+ gen_helper_sve_st2dd_be_r },
+ { gen_helper_sve_st3bb_r,
+ gen_helper_sve_st3hh_be_r,
+ gen_helper_sve_st3ss_be_r,
+ gen_helper_sve_st3dd_be_r },
+ { gen_helper_sve_st4bb_r,
+ gen_helper_sve_st4hh_be_r,
+ gen_helper_sve_st4ss_be_r,
+ gen_helper_sve_st4dd_be_r } } },
+ { { { gen_helper_sve_st2bb_r_mte,
+ gen_helper_sve_st2hh_le_r_mte,
+ gen_helper_sve_st2ss_le_r_mte,
+ gen_helper_sve_st2dd_le_r_mte },
+ { gen_helper_sve_st3bb_r_mte,
+ gen_helper_sve_st3hh_le_r_mte,
+ gen_helper_sve_st3ss_le_r_mte,
+ gen_helper_sve_st3dd_le_r_mte },
+ { gen_helper_sve_st4bb_r_mte,
+ gen_helper_sve_st4hh_le_r_mte,
+ gen_helper_sve_st4ss_le_r_mte,
+ gen_helper_sve_st4dd_le_r_mte } },
+ { { gen_helper_sve_st2bb_r_mte,
+ gen_helper_sve_st2hh_be_r_mte,
+ gen_helper_sve_st2ss_be_r_mte,
+ gen_helper_sve_st2dd_be_r_mte },
+ { gen_helper_sve_st3bb_r_mte,
+ gen_helper_sve_st3hh_be_r_mte,
+ gen_helper_sve_st3ss_be_r_mte,
+ gen_helper_sve_st3dd_be_r_mte },
+ { gen_helper_sve_st4bb_r_mte,
+ gen_helper_sve_st4hh_be_r_mte,
+ gen_helper_sve_st4ss_be_r_mte,
+ gen_helper_sve_st4dd_be_r_mte } } },
+ };
+ gen_helper_gvec_mem *fn;
+ int be = s->be_data == MO_BE;
+
+ if (nreg == 0) {
+ /* ST1 */
+ fn = fn_single[s->mte_active[0]][be][msz][esz];
+ nreg = 1;
+ } else {
+ /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
+ assert(msz == esz);
+ fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
+ }
+ assert(fn != NULL);
+ do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
+}
+
+static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
+{
+ if (a->rm == 31 || a->msz > a->esz) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_i64 addr = new_tmp_a64(s);
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
+ }
+ return true;
+}
+
+static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
+{
+ if (a->msz > a->esz) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ int vsz = vec_full_reg_size(s);
+ int elements = vsz >> a->esz;
+ TCGv_i64 addr = new_tmp_a64(s);
+
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
+ (a->imm * elements * (a->nreg + 1)) << a->msz);
+ do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
+ }
+ return true;
+}
+
+/*
+ *** SVE gather loads / scatter stores
+ */
+
+static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
+ int scale, TCGv_i64 scalar, int msz, bool is_write,
+ gen_helper_gvec_mem_scatter *fn)
+{
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr t_zm = tcg_temp_new_ptr();
+ TCGv_ptr t_pg = tcg_temp_new_ptr();
+ TCGv_ptr t_zt = tcg_temp_new_ptr();
+ TCGv_i32 t_desc;
+ int desc = 0;
+
+ if (s->mte_active[0]) {
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
+ desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
+ desc <<= SVE_MTEDESC_SHIFT;
+ }
+ desc = simd_desc(vsz, vsz, desc | scale);
+ t_desc = tcg_const_i32(desc);
+
+ tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
+ tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
+ tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
+ fn(cpu_env, t_zt, t_pg, t_zm, scalar, t_desc);
+
+ tcg_temp_free_ptr(t_zt);
+ tcg_temp_free_ptr(t_zm);
+ tcg_temp_free_ptr(t_pg);
+ tcg_temp_free_i32(t_desc);
+}
+
+/* Indexed by [mte][be][ff][xs][u][msz]. */
+static gen_helper_gvec_mem_scatter * const
+gather_load_fn32[2][2][2][2][2][3] = {
+ { /* MTE Inactive */
+ { /* Little-endian */
+ { { { gen_helper_sve_ldbss_zsu,
+ gen_helper_sve_ldhss_le_zsu,
+ NULL, },
+ { gen_helper_sve_ldbsu_zsu,
+ gen_helper_sve_ldhsu_le_zsu,
+ gen_helper_sve_ldss_le_zsu, } },
+ { { gen_helper_sve_ldbss_zss,
+ gen_helper_sve_ldhss_le_zss,
+ NULL, },
+ { gen_helper_sve_ldbsu_zss,
+ gen_helper_sve_ldhsu_le_zss,
+ gen_helper_sve_ldss_le_zss, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbss_zsu,
+ gen_helper_sve_ldffhss_le_zsu,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zsu,
+ gen_helper_sve_ldffhsu_le_zsu,
+ gen_helper_sve_ldffss_le_zsu, } },
+ { { gen_helper_sve_ldffbss_zss,
+ gen_helper_sve_ldffhss_le_zss,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zss,
+ gen_helper_sve_ldffhsu_le_zss,
+ gen_helper_sve_ldffss_le_zss, } } } },
+
+ { /* Big-endian */
+ { { { gen_helper_sve_ldbss_zsu,
+ gen_helper_sve_ldhss_be_zsu,
+ NULL, },
+ { gen_helper_sve_ldbsu_zsu,
+ gen_helper_sve_ldhsu_be_zsu,
+ gen_helper_sve_ldss_be_zsu, } },
+ { { gen_helper_sve_ldbss_zss,
+ gen_helper_sve_ldhss_be_zss,
+ NULL, },
+ { gen_helper_sve_ldbsu_zss,
+ gen_helper_sve_ldhsu_be_zss,
+ gen_helper_sve_ldss_be_zss, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbss_zsu,
+ gen_helper_sve_ldffhss_be_zsu,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zsu,
+ gen_helper_sve_ldffhsu_be_zsu,
+ gen_helper_sve_ldffss_be_zsu, } },
+ { { gen_helper_sve_ldffbss_zss,
+ gen_helper_sve_ldffhss_be_zss,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zss,
+ gen_helper_sve_ldffhsu_be_zss,
+ gen_helper_sve_ldffss_be_zss, } } } } },
+ { /* MTE Active */
+ { /* Little-endian */
+ { { { gen_helper_sve_ldbss_zsu_mte,
+ gen_helper_sve_ldhss_le_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldbsu_zsu_mte,
+ gen_helper_sve_ldhsu_le_zsu_mte,
+ gen_helper_sve_ldss_le_zsu_mte, } },
+ { { gen_helper_sve_ldbss_zss_mte,
+ gen_helper_sve_ldhss_le_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldbsu_zss_mte,
+ gen_helper_sve_ldhsu_le_zss_mte,
+ gen_helper_sve_ldss_le_zss_mte, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbss_zsu_mte,
+ gen_helper_sve_ldffhss_le_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zsu_mte,
+ gen_helper_sve_ldffhsu_le_zsu_mte,
+ gen_helper_sve_ldffss_le_zsu_mte, } },
+ { { gen_helper_sve_ldffbss_zss_mte,
+ gen_helper_sve_ldffhss_le_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zss_mte,
+ gen_helper_sve_ldffhsu_le_zss_mte,
+ gen_helper_sve_ldffss_le_zss_mte, } } } },
+
+ { /* Big-endian */
+ { { { gen_helper_sve_ldbss_zsu_mte,
+ gen_helper_sve_ldhss_be_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldbsu_zsu_mte,
+ gen_helper_sve_ldhsu_be_zsu_mte,
+ gen_helper_sve_ldss_be_zsu_mte, } },
+ { { gen_helper_sve_ldbss_zss_mte,
+ gen_helper_sve_ldhss_be_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldbsu_zss_mte,
+ gen_helper_sve_ldhsu_be_zss_mte,
+ gen_helper_sve_ldss_be_zss_mte, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbss_zsu_mte,
+ gen_helper_sve_ldffhss_be_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zsu_mte,
+ gen_helper_sve_ldffhsu_be_zsu_mte,
+ gen_helper_sve_ldffss_be_zsu_mte, } },
+ { { gen_helper_sve_ldffbss_zss_mte,
+ gen_helper_sve_ldffhss_be_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldffbsu_zss_mte,
+ gen_helper_sve_ldffhsu_be_zss_mte,
+ gen_helper_sve_ldffss_be_zss_mte, } } } } },
+};
+
+/* Note that we overload xs=2 to indicate 64-bit offset. */
+static gen_helper_gvec_mem_scatter * const
+gather_load_fn64[2][2][2][3][2][4] = {
+ { /* MTE Inactive */
+ { /* Little-endian */
+ { { { gen_helper_sve_ldbds_zsu,
+ gen_helper_sve_ldhds_le_zsu,
+ gen_helper_sve_ldsds_le_zsu,
+ NULL, },
+ { gen_helper_sve_ldbdu_zsu,
+ gen_helper_sve_ldhdu_le_zsu,
+ gen_helper_sve_ldsdu_le_zsu,
+ gen_helper_sve_lddd_le_zsu, } },
+ { { gen_helper_sve_ldbds_zss,
+ gen_helper_sve_ldhds_le_zss,
+ gen_helper_sve_ldsds_le_zss,
+ NULL, },
+ { gen_helper_sve_ldbdu_zss,
+ gen_helper_sve_ldhdu_le_zss,
+ gen_helper_sve_ldsdu_le_zss,
+ gen_helper_sve_lddd_le_zss, } },
+ { { gen_helper_sve_ldbds_zd,
+ gen_helper_sve_ldhds_le_zd,
+ gen_helper_sve_ldsds_le_zd,
+ NULL, },
+ { gen_helper_sve_ldbdu_zd,
+ gen_helper_sve_ldhdu_le_zd,
+ gen_helper_sve_ldsdu_le_zd,
+ gen_helper_sve_lddd_le_zd, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbds_zsu,
+ gen_helper_sve_ldffhds_le_zsu,
+ gen_helper_sve_ldffsds_le_zsu,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zsu,
+ gen_helper_sve_ldffhdu_le_zsu,
+ gen_helper_sve_ldffsdu_le_zsu,
+ gen_helper_sve_ldffdd_le_zsu, } },
+ { { gen_helper_sve_ldffbds_zss,
+ gen_helper_sve_ldffhds_le_zss,
+ gen_helper_sve_ldffsds_le_zss,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zss,
+ gen_helper_sve_ldffhdu_le_zss,
+ gen_helper_sve_ldffsdu_le_zss,
+ gen_helper_sve_ldffdd_le_zss, } },
+ { { gen_helper_sve_ldffbds_zd,
+ gen_helper_sve_ldffhds_le_zd,
+ gen_helper_sve_ldffsds_le_zd,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zd,
+ gen_helper_sve_ldffhdu_le_zd,
+ gen_helper_sve_ldffsdu_le_zd,
+ gen_helper_sve_ldffdd_le_zd, } } } },
+ { /* Big-endian */
+ { { { gen_helper_sve_ldbds_zsu,
+ gen_helper_sve_ldhds_be_zsu,
+ gen_helper_sve_ldsds_be_zsu,
+ NULL, },
+ { gen_helper_sve_ldbdu_zsu,
+ gen_helper_sve_ldhdu_be_zsu,
+ gen_helper_sve_ldsdu_be_zsu,
+ gen_helper_sve_lddd_be_zsu, } },
+ { { gen_helper_sve_ldbds_zss,
+ gen_helper_sve_ldhds_be_zss,
+ gen_helper_sve_ldsds_be_zss,
+ NULL, },
+ { gen_helper_sve_ldbdu_zss,
+ gen_helper_sve_ldhdu_be_zss,
+ gen_helper_sve_ldsdu_be_zss,
+ gen_helper_sve_lddd_be_zss, } },
+ { { gen_helper_sve_ldbds_zd,
+ gen_helper_sve_ldhds_be_zd,
+ gen_helper_sve_ldsds_be_zd,
+ NULL, },
+ { gen_helper_sve_ldbdu_zd,
+ gen_helper_sve_ldhdu_be_zd,
+ gen_helper_sve_ldsdu_be_zd,
+ gen_helper_sve_lddd_be_zd, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbds_zsu,
+ gen_helper_sve_ldffhds_be_zsu,
+ gen_helper_sve_ldffsds_be_zsu,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zsu,
+ gen_helper_sve_ldffhdu_be_zsu,
+ gen_helper_sve_ldffsdu_be_zsu,
+ gen_helper_sve_ldffdd_be_zsu, } },
+ { { gen_helper_sve_ldffbds_zss,
+ gen_helper_sve_ldffhds_be_zss,
+ gen_helper_sve_ldffsds_be_zss,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zss,
+ gen_helper_sve_ldffhdu_be_zss,
+ gen_helper_sve_ldffsdu_be_zss,
+ gen_helper_sve_ldffdd_be_zss, } },
+ { { gen_helper_sve_ldffbds_zd,
+ gen_helper_sve_ldffhds_be_zd,
+ gen_helper_sve_ldffsds_be_zd,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zd,
+ gen_helper_sve_ldffhdu_be_zd,
+ gen_helper_sve_ldffsdu_be_zd,
+ gen_helper_sve_ldffdd_be_zd, } } } } },
+ { /* MTE Active */
+ { /* Little-endian */
+ { { { gen_helper_sve_ldbds_zsu_mte,
+ gen_helper_sve_ldhds_le_zsu_mte,
+ gen_helper_sve_ldsds_le_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldbdu_zsu_mte,
+ gen_helper_sve_ldhdu_le_zsu_mte,
+ gen_helper_sve_ldsdu_le_zsu_mte,
+ gen_helper_sve_lddd_le_zsu_mte, } },
+ { { gen_helper_sve_ldbds_zss_mte,
+ gen_helper_sve_ldhds_le_zss_mte,
+ gen_helper_sve_ldsds_le_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldbdu_zss_mte,
+ gen_helper_sve_ldhdu_le_zss_mte,
+ gen_helper_sve_ldsdu_le_zss_mte,
+ gen_helper_sve_lddd_le_zss_mte, } },
+ { { gen_helper_sve_ldbds_zd_mte,
+ gen_helper_sve_ldhds_le_zd_mte,
+ gen_helper_sve_ldsds_le_zd_mte,
+ NULL, },
+ { gen_helper_sve_ldbdu_zd_mte,
+ gen_helper_sve_ldhdu_le_zd_mte,
+ gen_helper_sve_ldsdu_le_zd_mte,
+ gen_helper_sve_lddd_le_zd_mte, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbds_zsu_mte,
+ gen_helper_sve_ldffhds_le_zsu_mte,
+ gen_helper_sve_ldffsds_le_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zsu_mte,
+ gen_helper_sve_ldffhdu_le_zsu_mte,
+ gen_helper_sve_ldffsdu_le_zsu_mte,
+ gen_helper_sve_ldffdd_le_zsu_mte, } },
+ { { gen_helper_sve_ldffbds_zss_mte,
+ gen_helper_sve_ldffhds_le_zss_mte,
+ gen_helper_sve_ldffsds_le_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zss_mte,
+ gen_helper_sve_ldffhdu_le_zss_mte,
+ gen_helper_sve_ldffsdu_le_zss_mte,
+ gen_helper_sve_ldffdd_le_zss_mte, } },
+ { { gen_helper_sve_ldffbds_zd_mte,
+ gen_helper_sve_ldffhds_le_zd_mte,
+ gen_helper_sve_ldffsds_le_zd_mte,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zd_mte,
+ gen_helper_sve_ldffhdu_le_zd_mte,
+ gen_helper_sve_ldffsdu_le_zd_mte,
+ gen_helper_sve_ldffdd_le_zd_mte, } } } },
+ { /* Big-endian */
+ { { { gen_helper_sve_ldbds_zsu_mte,
+ gen_helper_sve_ldhds_be_zsu_mte,
+ gen_helper_sve_ldsds_be_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldbdu_zsu_mte,
+ gen_helper_sve_ldhdu_be_zsu_mte,
+ gen_helper_sve_ldsdu_be_zsu_mte,
+ gen_helper_sve_lddd_be_zsu_mte, } },
+ { { gen_helper_sve_ldbds_zss_mte,
+ gen_helper_sve_ldhds_be_zss_mte,
+ gen_helper_sve_ldsds_be_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldbdu_zss_mte,
+ gen_helper_sve_ldhdu_be_zss_mte,
+ gen_helper_sve_ldsdu_be_zss_mte,
+ gen_helper_sve_lddd_be_zss_mte, } },
+ { { gen_helper_sve_ldbds_zd_mte,
+ gen_helper_sve_ldhds_be_zd_mte,
+ gen_helper_sve_ldsds_be_zd_mte,
+ NULL, },
+ { gen_helper_sve_ldbdu_zd_mte,
+ gen_helper_sve_ldhdu_be_zd_mte,
+ gen_helper_sve_ldsdu_be_zd_mte,
+ gen_helper_sve_lddd_be_zd_mte, } } },
+
+ /* First-fault */
+ { { { gen_helper_sve_ldffbds_zsu_mte,
+ gen_helper_sve_ldffhds_be_zsu_mte,
+ gen_helper_sve_ldffsds_be_zsu_mte,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zsu_mte,
+ gen_helper_sve_ldffhdu_be_zsu_mte,
+ gen_helper_sve_ldffsdu_be_zsu_mte,
+ gen_helper_sve_ldffdd_be_zsu_mte, } },
+ { { gen_helper_sve_ldffbds_zss_mte,
+ gen_helper_sve_ldffhds_be_zss_mte,
+ gen_helper_sve_ldffsds_be_zss_mte,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zss_mte,
+ gen_helper_sve_ldffhdu_be_zss_mte,
+ gen_helper_sve_ldffsdu_be_zss_mte,
+ gen_helper_sve_ldffdd_be_zss_mte, } },
+ { { gen_helper_sve_ldffbds_zd_mte,
+ gen_helper_sve_ldffhds_be_zd_mte,
+ gen_helper_sve_ldffsds_be_zd_mte,
+ NULL, },
+ { gen_helper_sve_ldffbdu_zd_mte,
+ gen_helper_sve_ldffhdu_be_zd_mte,
+ gen_helper_sve_ldffsdu_be_zd_mte,
+ gen_helper_sve_ldffdd_be_zd_mte, } } } } },
+};
+
+static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
+{
+ gen_helper_gvec_mem_scatter *fn = NULL;
+ bool be = s->be_data == MO_BE;
+ bool mte = s->mte_active[0];
+
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ switch (a->esz) {
+ case MO_32:
+ fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz];
+ break;
+ case MO_64:
+ fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
+ break;
+ }
+ assert(fn != NULL);
+
+ do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
+ cpu_reg_sp(s, a->rn), a->msz, false, fn);
+ return true;
+}
+
+static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
+{
+ gen_helper_gvec_mem_scatter *fn = NULL;
+ bool be = s->be_data == MO_BE;
+ bool mte = s->mte_active[0];
+ TCGv_i64 imm;
+
+ if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ switch (a->esz) {
+ case MO_32:
+ fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz];
+ break;
+ case MO_64:
+ fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz];
+ break;
+ }
+ assert(fn != NULL);
+
+ /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
+ * by loading the immediate into the scalar parameter.
+ */
+ imm = tcg_const_i64(a->imm << a->msz);
+ do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, false, fn);
+ tcg_temp_free_i64(imm);
+ return true;
+}
+
+static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return trans_LD1_zprz(s, a);
+}
+
+/* Indexed by [mte][be][xs][msz]. */
+static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
+ { /* MTE Inactive */
+ { /* Little-endian */
+ { gen_helper_sve_stbs_zsu,
+ gen_helper_sve_sths_le_zsu,
+ gen_helper_sve_stss_le_zsu, },
+ { gen_helper_sve_stbs_zss,
+ gen_helper_sve_sths_le_zss,
+ gen_helper_sve_stss_le_zss, } },
+ { /* Big-endian */
+ { gen_helper_sve_stbs_zsu,
+ gen_helper_sve_sths_be_zsu,
+ gen_helper_sve_stss_be_zsu, },
+ { gen_helper_sve_stbs_zss,
+ gen_helper_sve_sths_be_zss,
+ gen_helper_sve_stss_be_zss, } } },
+ { /* MTE Active */
+ { /* Little-endian */
+ { gen_helper_sve_stbs_zsu_mte,
+ gen_helper_sve_sths_le_zsu_mte,
+ gen_helper_sve_stss_le_zsu_mte, },
+ { gen_helper_sve_stbs_zss_mte,
+ gen_helper_sve_sths_le_zss_mte,
+ gen_helper_sve_stss_le_zss_mte, } },
+ { /* Big-endian */
+ { gen_helper_sve_stbs_zsu_mte,
+ gen_helper_sve_sths_be_zsu_mte,
+ gen_helper_sve_stss_be_zsu_mte, },
+ { gen_helper_sve_stbs_zss_mte,
+ gen_helper_sve_sths_be_zss_mte,
+ gen_helper_sve_stss_be_zss_mte, } } },
+};
+
+/* Note that we overload xs=2 to indicate 64-bit offset. */
+static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
+ { /* MTE Inactive */
+ { /* Little-endian */
+ { gen_helper_sve_stbd_zsu,
+ gen_helper_sve_sthd_le_zsu,
+ gen_helper_sve_stsd_le_zsu,
+ gen_helper_sve_stdd_le_zsu, },
+ { gen_helper_sve_stbd_zss,
+ gen_helper_sve_sthd_le_zss,
+ gen_helper_sve_stsd_le_zss,
+ gen_helper_sve_stdd_le_zss, },
+ { gen_helper_sve_stbd_zd,
+ gen_helper_sve_sthd_le_zd,
+ gen_helper_sve_stsd_le_zd,
+ gen_helper_sve_stdd_le_zd, } },
+ { /* Big-endian */
+ { gen_helper_sve_stbd_zsu,
+ gen_helper_sve_sthd_be_zsu,
+ gen_helper_sve_stsd_be_zsu,
+ gen_helper_sve_stdd_be_zsu, },
+ { gen_helper_sve_stbd_zss,
+ gen_helper_sve_sthd_be_zss,
+ gen_helper_sve_stsd_be_zss,
+ gen_helper_sve_stdd_be_zss, },
+ { gen_helper_sve_stbd_zd,
+ gen_helper_sve_sthd_be_zd,
+ gen_helper_sve_stsd_be_zd,
+ gen_helper_sve_stdd_be_zd, } } },
+ { /* MTE Inactive */
+ { /* Little-endian */
+ { gen_helper_sve_stbd_zsu_mte,
+ gen_helper_sve_sthd_le_zsu_mte,
+ gen_helper_sve_stsd_le_zsu_mte,
+ gen_helper_sve_stdd_le_zsu_mte, },
+ { gen_helper_sve_stbd_zss_mte,
+ gen_helper_sve_sthd_le_zss_mte,
+ gen_helper_sve_stsd_le_zss_mte,
+ gen_helper_sve_stdd_le_zss_mte, },
+ { gen_helper_sve_stbd_zd_mte,
+ gen_helper_sve_sthd_le_zd_mte,
+ gen_helper_sve_stsd_le_zd_mte,
+ gen_helper_sve_stdd_le_zd_mte, } },
+ { /* Big-endian */
+ { gen_helper_sve_stbd_zsu_mte,
+ gen_helper_sve_sthd_be_zsu_mte,
+ gen_helper_sve_stsd_be_zsu_mte,
+ gen_helper_sve_stdd_be_zsu_mte, },
+ { gen_helper_sve_stbd_zss_mte,
+ gen_helper_sve_sthd_be_zss_mte,
+ gen_helper_sve_stsd_be_zss_mte,
+ gen_helper_sve_stdd_be_zss_mte, },
+ { gen_helper_sve_stbd_zd_mte,
+ gen_helper_sve_sthd_be_zd_mte,
+ gen_helper_sve_stsd_be_zd_mte,
+ gen_helper_sve_stdd_be_zd_mte, } } },
+};
+
+static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
+{
+ gen_helper_gvec_mem_scatter *fn;
+ bool be = s->be_data == MO_BE;
+ bool mte = s->mte_active[0];
+
+ if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+ switch (a->esz) {
+ case MO_32:
+ fn = scatter_store_fn32[mte][be][a->xs][a->msz];
+ break;
+ case MO_64:
+ fn = scatter_store_fn64[mte][be][a->xs][a->msz];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
+ cpu_reg_sp(s, a->rn), a->msz, true, fn);
+ return true;
+}
+
+static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
+{
+ gen_helper_gvec_mem_scatter *fn = NULL;
+ bool be = s->be_data == MO_BE;
+ bool mte = s->mte_active[0];
+ TCGv_i64 imm;
+
+ if (a->esz < a->msz) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ switch (a->esz) {
+ case MO_32:
+ fn = scatter_store_fn32[mte][be][0][a->msz];
+ break;
+ case MO_64:
+ fn = scatter_store_fn64[mte][be][2][a->msz];
+ break;
+ }
+ assert(fn != NULL);
+
+ /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
+ * by loading the immediate into the scalar parameter.
+ */
+ imm = tcg_const_i64(a->imm << a->msz);
+ do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, true, fn);
+ tcg_temp_free_i64(imm);
+ return true;
+}
+
+static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return trans_ST1_zprz(s, a);
+}
+
+/*
+ * Prefetches
+ */
+
+static bool trans_PRF(DisasContext *s, arg_PRF *a)
+{
+ /* Prefetch is a nop within QEMU. */
+ (void)sve_access_check(s);
+ return true;
+}
+
+static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
+{
+ if (a->rm == 31) {
+ return false;
+ }
+ /* Prefetch is a nop within QEMU. */
+ (void)sve_access_check(s);
+ return true;
+}
+
+/*
+ * Move Prefix
+ *
+ * TODO: The implementation so far could handle predicated merging movprfx.
+ * The helper functions as written take an extra source register to
+ * use in the operation, but the result is only written when predication
+ * succeeds. For unpredicated movprfx, we need to rearrange the helpers
+ * to allow the final write back to the destination to be unconditional.
+ * For predicated zeroing movprfx, we need to rearrange the helpers to
+ * allow the final write back to zero inactives.
+ *
+ * In the meantime, just emit the moves.
+ */
+
+static bool trans_MOVPRFX(DisasContext *s, arg_MOVPRFX *a)
+{
+ return do_mov_z(s, a->rd, a->rn);
+}
+
+static bool trans_MOVPRFX_m(DisasContext *s, arg_rpr_esz *a)
+{
+ if (sve_access_check(s)) {
+ do_sel_z(s, a->rd, a->rn, a->rd, a->pg, a->esz);
+ }
+ return true;
+}
+
+static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a)
+{
+ return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false);
+}
+
+/*
+ * SVE2 Integer Multiply - Unpredicated
+ */
+
+static bool trans_MUL_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, tcg_gen_gvec_mul, a->esz, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool do_sve2_zzz_ool(DisasContext *s, arg_rrr_esz *a,
+ gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_SMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
+ gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
+ gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_PMUL_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sve2_zzz_ool(s, a, gen_helper_gvec_pmul_b);
+}
+
+static bool trans_SQDMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
+ gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQRDMULH_zzz(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
+ gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
+ };
+ return do_sve2_zzz_ool(s, a, fns[a->esz]);
+}
+
+/*
+ * SVE2 Integer - Predicated
+ */
+
+static bool do_sve2_zpzz_ool(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzz_ool(s, a, fn);
+}
+
+static bool trans_SADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[3] = {
+ gen_helper_sve2_sadalp_zpzz_h,
+ gen_helper_sve2_sadalp_zpzz_s,
+ gen_helper_sve2_sadalp_zpzz_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
+}
+
+static bool trans_UADALP_zpzz(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[3] = {
+ gen_helper_sve2_uadalp_zpzz_h,
+ gen_helper_sve2_uadalp_zpzz_s,
+ gen_helper_sve2_uadalp_zpzz_d,
+ };
+ if (a->esz == 0) {
+ return false;
+ }
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 1]);
+}
+
+/*
+ * SVE2 integer unary operations (predicated)
+ */
+
+static bool do_sve2_zpz_ool(DisasContext *s, arg_rpr_esz *a,
+ gen_helper_gvec_3 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ool(s, a, fn);
+}
+
+static bool trans_URECPE(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz != 2) {
+ return false;
+ }
+ return do_sve2_zpz_ool(s, a, gen_helper_sve2_urecpe_s);
+}
+
+static bool trans_URSQRTE(DisasContext *s, arg_rpr_esz *a)
+{
+ if (a->esz != 2) {
+ return false;
+ }
+ return do_sve2_zpz_ool(s, a, gen_helper_sve2_ursqrte_s);
+}
+
+static bool trans_SQABS(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
+ gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
+ };
+ return do_sve2_zpz_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQNEG(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
+ gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
+ };
+ return do_sve2_zpz_ool(s, a, fns[a->esz]);
+}
+
+#define DO_SVE2_ZPZZ(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4 * const fns[4] = { \
+ gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \
+ }; \
+ return do_sve2_zpzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZPZZ(SQSHL, sqshl)
+DO_SVE2_ZPZZ(SQRSHL, sqrshl)
+DO_SVE2_ZPZZ(SRSHL, srshl)
+
+DO_SVE2_ZPZZ(UQSHL, uqshl)
+DO_SVE2_ZPZZ(UQRSHL, uqrshl)
+DO_SVE2_ZPZZ(URSHL, urshl)
+
+DO_SVE2_ZPZZ(SHADD, shadd)
+DO_SVE2_ZPZZ(SRHADD, srhadd)
+DO_SVE2_ZPZZ(SHSUB, shsub)
+
+DO_SVE2_ZPZZ(UHADD, uhadd)
+DO_SVE2_ZPZZ(URHADD, urhadd)
+DO_SVE2_ZPZZ(UHSUB, uhsub)
+
+DO_SVE2_ZPZZ(ADDP, addp)
+DO_SVE2_ZPZZ(SMAXP, smaxp)
+DO_SVE2_ZPZZ(UMAXP, umaxp)
+DO_SVE2_ZPZZ(SMINP, sminp)
+DO_SVE2_ZPZZ(UMINP, uminp)
+
+DO_SVE2_ZPZZ(SQADD_zpzz, sqadd)
+DO_SVE2_ZPZZ(UQADD_zpzz, uqadd)
+DO_SVE2_ZPZZ(SQSUB_zpzz, sqsub)
+DO_SVE2_ZPZZ(UQSUB_zpzz, uqsub)
+DO_SVE2_ZPZZ(SUQADD, suqadd)
+DO_SVE2_ZPZZ(USQADD, usqadd)
+
+/*
+ * SVE2 Widening Integer Arithmetic
+ */
+
+static bool do_sve2_zzw_ool(DisasContext *s, arg_rrr_esz *a,
+ gen_helper_gvec_3 *fn, int data)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vsz, vsz, data, fn);
+ }
+ return true;
+}
+
+#define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_h, \
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
+ }; \
+ return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \
+}
+
+DO_SVE2_ZZZ_TB(SADDLB, saddl, false, false)
+DO_SVE2_ZZZ_TB(SSUBLB, ssubl, false, false)
+DO_SVE2_ZZZ_TB(SABDLB, sabdl, false, false)
+
+DO_SVE2_ZZZ_TB(UADDLB, uaddl, false, false)
+DO_SVE2_ZZZ_TB(USUBLB, usubl, false, false)
+DO_SVE2_ZZZ_TB(UABDLB, uabdl, false, false)
+
+DO_SVE2_ZZZ_TB(SADDLT, saddl, true, true)
+DO_SVE2_ZZZ_TB(SSUBLT, ssubl, true, true)
+DO_SVE2_ZZZ_TB(SABDLT, sabdl, true, true)
+
+DO_SVE2_ZZZ_TB(UADDLT, uaddl, true, true)
+DO_SVE2_ZZZ_TB(USUBLT, usubl, true, true)
+DO_SVE2_ZZZ_TB(UABDLT, uabdl, true, true)
+
+DO_SVE2_ZZZ_TB(SADDLBT, saddl, false, true)
+DO_SVE2_ZZZ_TB(SSUBLBT, ssubl, false, true)
+DO_SVE2_ZZZ_TB(SSUBLTB, ssubl, true, false)
+
+DO_SVE2_ZZZ_TB(SQDMULLB_zzz, sqdmull_zzz, false, false)
+DO_SVE2_ZZZ_TB(SQDMULLT_zzz, sqdmull_zzz, true, true)
+
+DO_SVE2_ZZZ_TB(SMULLB_zzz, smull_zzz, false, false)
+DO_SVE2_ZZZ_TB(SMULLT_zzz, smull_zzz, true, true)
+
+DO_SVE2_ZZZ_TB(UMULLB_zzz, umull_zzz, false, false)
+DO_SVE2_ZZZ_TB(UMULLT_zzz, umull_zzz, true, true)
+
+static bool do_eor_tb(DisasContext *s, arg_rrr_esz *a, bool sel1)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
+ gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
+ };
+ return do_sve2_zzw_ool(s, a, fns[a->esz], (!sel1 << 1) | sel1);
+}
+
+static bool trans_EORBT(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_eor_tb(s, a, false);
+}
+
+static bool trans_EORTB(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_eor_tb(s, a, true);
+}
+
+static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
+ NULL, gen_helper_sve2_pmull_d,
+ };
+ if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_PMULLB(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_trans_pmull(s, a, false);
+}
+
+static bool trans_PMULLT(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_trans_pmull(s, a, true);
+}
+
+#define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_h, \
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
+ }; \
+ return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \
+}
+
+DO_SVE2_ZZZ_WTB(SADDWB, saddw, false)
+DO_SVE2_ZZZ_WTB(SADDWT, saddw, true)
+DO_SVE2_ZZZ_WTB(SSUBWB, ssubw, false)
+DO_SVE2_ZZZ_WTB(SSUBWT, ssubw, true)
+
+DO_SVE2_ZZZ_WTB(UADDWB, uaddw, false)
+DO_SVE2_ZZZ_WTB(UADDWT, uaddw, true)
+DO_SVE2_ZZZ_WTB(USUBWB, usubw, false)
+DO_SVE2_ZZZ_WTB(USUBWT, usubw, true)
+
+static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
+{
+ int top = imm & 1;
+ int shl = imm >> 1;
+ int halfbits = 4 << vece;
+
+ if (top) {
+ if (shl == halfbits) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_sari_vec(vece, d, n, halfbits);
+ tcg_gen_shli_vec(vece, d, d, shl);
+ }
+ } else {
+ tcg_gen_shli_vec(vece, d, n, halfbits);
+ tcg_gen_sari_vec(vece, d, d, halfbits - shl);
+ }
+}
+
+static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
+{
+ int halfbits = 4 << vece;
+ int top = imm & 1;
+ int shl = (imm >> 1);
+ int shift;
+ uint64_t mask;
+
+ mask = MAKE_64BIT_MASK(0, halfbits);
+ mask <<= shl;
+ mask = dup_const(vece, mask);
+
+ shift = shl - top * halfbits;
+ if (shift < 0) {
+ tcg_gen_shri_i64(d, n, -shift);
+ } else {
+ tcg_gen_shli_i64(d, n, shift);
+ }
+ tcg_gen_andi_i64(d, d, mask);
+}
+
+static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_16, d, n, imm);
+}
+
+static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_32, d, n, imm);
+}
+
+static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
+{
+ gen_ushll_i64(MO_64, d, n, imm);
+}
+
+static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
+{
+ int halfbits = 4 << vece;
+ int top = imm & 1;
+ int shl = imm >> 1;
+
+ if (top) {
+ if (shl == halfbits) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_shri_vec(vece, d, n, halfbits);
+ tcg_gen_shli_vec(vece, d, d, shl);
+ }
+ } else {
+ if (shl == 0) {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+ } else {
+ tcg_gen_shli_vec(vece, d, n, halfbits);
+ tcg_gen_shri_vec(vece, d, d, halfbits - shl);
+ }
+ }
+}
+
+static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
+ bool sel, bool uns)
+{
+ static const TCGOpcode sshll_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, 0
+ };
+ static const TCGOpcode ushll_list[] = {
+ INDEX_op_shli_vec, INDEX_op_shri_vec, 0
+ };
+ static const GVecGen2i ops[2][3] = {
+ { { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_h,
+ .vece = MO_16 },
+ { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_s,
+ .vece = MO_32 },
+ { .fniv = gen_sshll_vec,
+ .opt_opc = sshll_list,
+ .fno = gen_helper_sve2_sshll_d,
+ .vece = MO_64 } },
+ { { .fni8 = gen_ushll16_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_h,
+ .vece = MO_16 },
+ { .fni8 = gen_ushll32_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_s,
+ .vece = MO_32 },
+ { .fni8 = gen_ushll64_i64,
+ .fniv = gen_ushll_vec,
+ .opt_opc = ushll_list,
+ .fno = gen_helper_sve2_ushll_d,
+ .vece = MO_64 } },
+ };
+
+ if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, (a->imm << 1) | sel,
+ &ops[uns][a->esz]);
+ }
+ return true;
+}
+
+static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, false, false);
+}
+
+static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, true, false);
+}
+
+static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, false, true);
+}
+
+static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_shll_tb(s, a, true, true);
+}
+
+static bool trans_BEXT(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
+ gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
+ };
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
+}
+
+static bool trans_BDEP(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
+ gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
+ };
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
+}
+
+static bool trans_BGRP(DisasContext *s, arg_rrr_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
+ gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
+ };
+ if (!dc_isar_feature(aa64_sve2_bitperm, s)) {
+ return false;
+ }
+ return do_sve2_zzw_ool(s, a, fns[a->esz], 0);
+}
+
+static bool do_cadd(DisasContext *s, arg_rrr_esz *a, bool sq, bool rot)
+{
+ static gen_helper_gvec_3 * const fns[2][4] = {
+ { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
+ gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d },
+ { gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
+ gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d },
+ };
+ return do_sve2_zzw_ool(s, a, fns[sq][a->esz], rot);
+}
+
+static bool trans_CADD_rot90(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, false, false);
+}
+
+static bool trans_CADD_rot270(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, false, true);
+}
+
+static bool trans_SQCADD_rot90(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, true, false);
+}
+
+static bool trans_SQCADD_rot270(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_cadd(s, a, true, true);
+}
+
+static bool do_sve2_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
+ gen_helper_gvec_4 *fn, int data)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
+ }
+ return true;
+}
+
+static bool do_abal(DisasContext *s, arg_rrrr_esz *a, bool uns, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[2][4] = {
+ { NULL, gen_helper_sve2_sabal_h,
+ gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d },
+ { NULL, gen_helper_sve2_uabal_h,
+ gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d },
+ };
+ return do_sve2_zzzz_ool(s, a, fns[uns][a->esz], sel);
+}
+
+static bool trans_SABALB(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, false, false);
+}
+
+static bool trans_SABALT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, false, true);
+}
+
+static bool trans_UABALB(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, true, false);
+}
+
+static bool trans_UABALT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_abal(s, a, true, true);
+}
+
+static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[2] = {
+ gen_helper_sve2_adcl_s,
+ gen_helper_sve2_adcl_d,
+ };
+ /*
+ * Note that in this case the ESZ field encodes both size and sign.
+ * Split out 'subtract' into bit 1 of the data field for the helper.
+ */
+ return do_sve2_zzzz_ool(s, a, fns[a->esz & 1], (a->esz & 2) | sel);
+}
+
+static bool trans_ADCLB(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_adcl(s, a, false);
+}
+
+static bool trans_ADCLT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_adcl(s, a, true);
+}
+
+static bool do_sve2_fn2i(DisasContext *s, arg_rri_esz *a, GVecGen2iFn *fn)
+{
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ unsigned rd_ofs = vec_full_reg_offset(s, a->rd);
+ unsigned rn_ofs = vec_full_reg_offset(s, a->rn);
+ fn(a->esz, rd_ofs, rn_ofs, a->imm, vsz, vsz);
+ }
+ return true;
+}
+
+static bool trans_SSRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_ssra);
+}
+
+static bool trans_USRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_usra);
+}
+
+static bool trans_SRSRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_srsra);
+}
+
+static bool trans_URSRA(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_ursra);
+}
+
+static bool trans_SRI(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_sri);
+}
+
+static bool trans_SLI(DisasContext *s, arg_rri_esz *a)
+{
+ return do_sve2_fn2i(s, a, gen_gvec_sli);
+}
+
+static bool do_sve2_fn_zzz(DisasContext *s, arg_rrr_esz *a, GVecGen3Fn *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool trans_SABA(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sve2_fn_zzz(s, a, gen_gvec_saba);
+}
+
+static bool trans_UABA(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sve2_fn_zzz(s, a, gen_gvec_uaba);
+}
+
+static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
+ const GVecGen2 ops[3])
+{
+ if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
+ !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, &ops[a->esz]);
+ }
+ return true;
+}
+
+static const TCGOpcode sqxtn_list[] = {
+ INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
+};
+
+static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t mask = (1ull << halfbits) - 1;
+ int64_t min = -1ull << (halfbits - 1);
+ int64_t max = -min - 1;
+
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, d, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, d, d, t);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_and_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtnb_vec,
+ .opt_opc = sqxtn_list,
+ .fno = gen_helper_sve2_sqxtnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtnb_vec,
+ .opt_opc = sqxtn_list,
+ .fno = gen_helper_sve2_sqxtnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtnb_vec,
+ .opt_opc = sqxtn_list,
+ .fno = gen_helper_sve2_sqxtnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t mask = (1ull << halfbits) - 1;
+ int64_t min = -1ull << (halfbits - 1);
+ int64_t max = -min - 1;
+
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtnt_vec,
+ .opt_opc = sqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtnt_vec,
+ .opt_opc = sqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtnt_vec,
+ .opt_opc = sqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static const TCGOpcode uqxtn_list[] = {
+ INDEX_op_shli_vec, INDEX_op_umin_vec, 0
+};
+
+static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_uqxtnb_vec,
+ .opt_opc = uqxtn_list,
+ .fno = gen_helper_sve2_uqxtnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqxtnb_vec,
+ .opt_opc = uqxtn_list,
+ .fno = gen_helper_sve2_uqxtnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqxtnb_vec,
+ .opt_opc = uqxtn_list,
+ .fno = gen_helper_sve2_uqxtnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_uqxtnt_vec,
+ .opt_opc = uqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqxtnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqxtnt_vec,
+ .opt_opc = uqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqxtnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqxtnt_vec,
+ .opt_opc = uqxtn_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqxtnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static const TCGOpcode sqxtun_list[] = {
+ INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
+};
+
+static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, d, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtunb_vec,
+ .opt_opc = sqxtun_list,
+ .fno = gen_helper_sve2_sqxtunb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtunb_vec,
+ .opt_opc = sqxtun_list,
+ .fno = gen_helper_sve2_sqxtunb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtunb_vec,
+ .opt_opc = sqxtun_list,
+ .fno = gen_helper_sve2_sqxtunb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = (1ull << halfbits) - 1;
+
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2 ops[3] = {
+ { .fniv = gen_sqxtunt_vec,
+ .opt_opc = sqxtun_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtunt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqxtunt_vec,
+ .opt_opc = sqxtun_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtunt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqxtunt_vec,
+ .opt_opc = sqxtun_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqxtunt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_narrow_extract(s, a, ops);
+}
+
+static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
+ const GVecGen2i ops[3])
+{
+ if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ assert(a->imm > 0 && a->imm <= (8 << a->esz));
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vsz, vsz, a->imm, &ops[a->esz]);
+ }
+ return true;
+}
+
+static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
+{
+ int halfbits = 4 << vece;
+ uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
+
+ tcg_gen_shri_i64(d, n, shr);
+ tcg_gen_andi_i64(d, d, mask);
+}
+
+static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnb_i64(MO_16, d, n, shr);
+}
+
+static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnb_i64(MO_32, d, n, shr);
+}
+
+static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnb_i64(MO_64, d, n, shr);
+}
+
+static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
+ static const GVecGen2i ops[3] = {
+ { .fni8 = gen_shrnb16_i64,
+ .fniv = gen_shrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_shrnb_h,
+ .vece = MO_16 },
+ { .fni8 = gen_shrnb32_i64,
+ .fniv = gen_shrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_shrnb_s,
+ .vece = MO_32 },
+ { .fni8 = gen_shrnb64_i64,
+ .fniv = gen_shrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_shrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
+{
+ int halfbits = 4 << vece;
+ uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
+
+ tcg_gen_shli_i64(n, n, halfbits - shr);
+ tcg_gen_andi_i64(n, n, ~mask);
+ tcg_gen_andi_i64(d, d, mask);
+ tcg_gen_or_i64(d, d, n);
+}
+
+static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnt_i64(MO_16, d, n, shr);
+}
+
+static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ gen_shrnt_i64(MO_32, d, n, shr);
+}
+
+static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
+{
+ tcg_gen_shri_i64(n, n, shr);
+ tcg_gen_deposit_i64(d, d, n, 32, 32);
+}
+
+static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
+
+ tcg_gen_shli_vec(vece, n, n, halfbits - shr);
+ tcg_gen_dupi_vec(vece, t, mask);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
+ static const GVecGen2i ops[3] = {
+ { .fni8 = gen_shrnt16_i64,
+ .fniv = gen_shrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_shrnt_h,
+ .vece = MO_16 },
+ { .fni8 = gen_shrnt32_i64,
+ .fniv = gen_shrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_shrnt_s,
+ .vece = MO_32 },
+ { .fni8 = gen_shrnt64_i64,
+ .fniv = gen_shrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_shrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_rshrnb_h },
+ { .fno = gen_helper_sve2_rshrnb_s },
+ { .fno = gen_helper_sve2_rshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_rshrnt_h },
+ { .fno = gen_helper_sve2_rshrnt_s },
+ { .fno = gen_helper_sve2_rshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrunb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrunb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrunb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrunb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrunb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrunb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, 0);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_smax_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrunt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrunt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrunt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrunt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrunt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrunt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrunb_h },
+ { .fno = gen_helper_sve2_sqrshrunb_s },
+ { .fno = gen_helper_sve2_sqrshrunb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrunt_h },
+ { .fno = gen_helper_sve2_sqrshrunt_s },
+ { .fno = gen_helper_sve2_sqrshrunt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
+ int64_t min = -max - 1;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_and_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_sqshrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+ int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
+ int64_t min = -max - 1;
+
+ tcg_gen_sari_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, min);
+ tcg_gen_smax_vec(vece, n, n, t);
+ tcg_gen_dupi_vec(vece, t, max);
+ tcg_gen_smin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_sqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_sqshrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrnb_h },
+ { .fno = gen_helper_sve2_sqrshrnb_s },
+ { .fno = gen_helper_sve2_sqrshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_sqrshrnt_h },
+ { .fno = gen_helper_sve2_sqrshrnt_s },
+ { .fno = gen_helper_sve2_sqrshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, d, n, t);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shri_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqshrnb_vec,
+ .opt_opc = vec_list,
+ .fno = gen_helper_sve2_uqshrnb_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
+ TCGv_vec n, int64_t shr)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ int halfbits = 4 << vece;
+
+ tcg_gen_shri_vec(vece, n, n, shr);
+ tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
+ tcg_gen_umin_vec(vece, n, n, t);
+ tcg_gen_shli_vec(vece, n, n, halfbits);
+ tcg_gen_bitsel_vec(vece, d, t, d, n);
+ tcg_temp_free_vec(t);
+}
+
+static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const TCGOpcode vec_list[] = {
+ INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i ops[3] = {
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_h,
+ .vece = MO_16 },
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_s,
+ .vece = MO_32 },
+ { .fniv = gen_uqshrnt_vec,
+ .opt_opc = vec_list,
+ .load_dest = true,
+ .fno = gen_helper_sve2_uqshrnt_d,
+ .vece = MO_64 },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_uqrshrnb_h },
+ { .fno = gen_helper_sve2_uqrshrnb_s },
+ { .fno = gen_helper_sve2_uqrshrnb_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
+{
+ static const GVecGen2i ops[3] = {
+ { .fno = gen_helper_sve2_uqrshrnt_h },
+ { .fno = gen_helper_sve2_uqrshrnt_s },
+ { .fno = gen_helper_sve2_uqrshrnt_d },
+ };
+ return do_sve2_shr_narrow(s, a, ops);
+}
+
+#define DO_SVE2_ZZZ_NARROW(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
+{ \
+ static gen_helper_gvec_3 * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_h, \
+ gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
+ }; \
+ return do_sve2_zzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
+DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
+DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
+DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
+
+DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
+DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
+DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
+DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
+
+static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_flags_4 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_ppzz_flags(s, a, fn);
+}
+
+#define DO_SVE2_PPZZ_MATCH(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_flags_4 * const fns[4] = { \
+ gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \
+ NULL, NULL \
+ }; \
+ return do_sve2_ppzz_flags(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_PPZZ_MATCH(MATCH, match)
+DO_SVE2_PPZZ_MATCH(NMATCH, nmatch)
+
+static bool trans_HISTCNT(DisasContext *s, arg_rprr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[2] = {
+ gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
+ };
+ if (a->esz < 2) {
+ return false;
+ }
+ return do_sve2_zpzz_ool(s, a, fns[a->esz - 2]);
+}
+
+static bool trans_HISTSEG(DisasContext *s, arg_rrr_esz *a)
+{
+ if (a->esz != 0) {
+ return false;
+ }
+ return do_sve2_zzz_ool(s, a, gen_helper_sve2_histseg);
+}
+
+static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzz_fp(s, a, fn);
+}
+
+#define DO_SVE2_ZPZZ_FP(NAME, name) \
+static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ NULL, gen_helper_sve2_##name##_zpzz_h, \
+ gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
+ }; \
+ return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
+}
+
+DO_SVE2_ZPZZ_FP(FADDP, faddp)
+DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
+DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
+DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
+DO_SVE2_ZPZZ_FP(FMINP, fminp)
+
+/*
+ * SVE Integer Multiply-Add (unpredicated)
+ */
+
+static bool trans_FMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ gen_helper_gvec_4_ptr *fn;
+
+ switch (a->esz) {
+ case MO_32:
+ if (!dc_isar_feature(aa64_sve_f32mm, s)) {
+ return false;
+ }
+ fn = gen_helper_fmmla_s;
+ break;
+ case MO_64:
+ if (!dc_isar_feature(aa64_sve_f64mm, s)) {
+ return false;
+ }
+ fn = gen_helper_fmmla_d;
+ break;
+ default:
+ return false;
+ }
+
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz, 0, fn);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool do_sqdmlal_zzzw(DisasContext *s, arg_rrrr_esz *a,
+ bool sel1, bool sel2)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_sqdmlal_zzzw_h,
+ gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1);
+}
+
+static bool do_sqdmlsl_zzzw(DisasContext *s, arg_rrrr_esz *a,
+ bool sel1, bool sel2)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
+ gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], (sel2 << 1) | sel1);
+}
+
+static bool trans_SQDMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlal_zzzw(s, a, false, false);
+}
+
+static bool trans_SQDMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlal_zzzw(s, a, true, true);
+}
+
+static bool trans_SQDMLALBT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlal_zzzw(s, a, false, true);
+}
+
+static bool trans_SQDMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlsl_zzzw(s, a, false, false);
+}
+
+static bool trans_SQDMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlsl_zzzw(s, a, true, true);
+}
+
+static bool trans_SQDMLSLBT(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_sqdmlsl_zzzw(s, a, false, true);
+}
+
+static bool trans_SQRDMLAH_zzzz(DisasContext *s, arg_rrrr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
+ gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
+}
+
+static bool trans_SQRDMLSH_zzzz(DisasContext *s, arg_rrrr_esz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
+ gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], 0);
+}
+
+static bool do_smlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_smlal_zzzw_h,
+ gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_SMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlal_zzzw(s, a, false);
+}
+
+static bool trans_SMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlal_zzzw(s, a, true);
+}
+
+static bool do_umlal_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_umlal_zzzw_h,
+ gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_UMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlal_zzzw(s, a, false);
+}
+
+static bool trans_UMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlal_zzzw(s, a, true);
+}
+
+static bool do_smlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_smlsl_zzzw_h,
+ gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_SMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlsl_zzzw(s, a, false);
+}
+
+static bool trans_SMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_smlsl_zzzw(s, a, true);
+}
+
+static bool do_umlsl_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ NULL, gen_helper_sve2_umlsl_zzzw_h,
+ gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
+ };
+ return do_sve2_zzzz_ool(s, a, fns[a->esz], sel);
+}
+
+static bool trans_UMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlsl_zzzw(s, a, false);
+}
+
+static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_umlsl_zzzw(s, a, true);
+}
+
+static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
+ gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
+ }
+ return true;
+}
+
+static bool trans_CDOT_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s) || a->esz < MO_32) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_helper_gvec_4 *fn = (a->esz == MO_32
+ ? gen_helper_sve2_cdot_zzzz_s
+ : gen_helper_sve2_cdot_zzzz_d);
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->rot);
+ }
+ return true;
+}
+
+static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a)
+{
+ static gen_helper_gvec_4 * const fns[] = {
+ gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
+ gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
+ }
+ return true;
+}
+
+static bool trans_USDOT_zzzz(DisasContext *s, arg_USDOT_zzzz *a)
+{
+ if (a->esz != 2 || !dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ vsz, vsz, 0, gen_helper_gvec_usdot_b);
+ }
+ return true;
+}
+
+static bool trans_AESMC(DisasContext *s, arg_AESMC *a)
+{
+ if (!dc_isar_feature(aa64_sve2_aes, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zz(s, gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt);
+ }
+ return true;
+}
+
+static bool do_aese(DisasContext *s, arg_rrr_esz *a, bool decrypt)
+{
+ if (!dc_isar_feature(aa64_sve2_aes, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, gen_helper_crypto_aese,
+ a->rd, a->rn, a->rm, decrypt);
+ }
+ return true;
+}
+
+static bool trans_AESE(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_aese(s, a, false);
+}
+
+static bool trans_AESD(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_aese(s, a, true);
+}
+
+static bool do_sm4(DisasContext *s, arg_rrr_esz *a, gen_helper_gvec_3 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2_sm4, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, 0);
+ }
+ return true;
+}
+
+static bool trans_SM4E(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sm4(s, a, gen_helper_crypto_sm4e);
+}
+
+static bool trans_SM4EKEY(DisasContext *s, arg_rrr_esz *a)
+{
+ return do_sm4(s, a, gen_helper_crypto_sm4ekey);
+}
+
+static bool trans_RAX1(DisasContext *s, arg_rrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2_sha3, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_fn_zzz(s, gen_gvec_rax1, MO_64, a->rd, a->rn, a->rm);
+ }
+ return true;
+}
+
+static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
+}
+
+static bool trans_BFCVTNT(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvtnt);
+}
+
+static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds);
+}
+
+static bool trans_FCVTLT_hs(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_hs);
+}
+
+static bool trans_FCVTLT_sd(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_sd);
+}
+
+static bool trans_FCVTX_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve_fcvt_ds);
+}
+
+static bool trans_FCVTXNT_ds(DisasContext *s, arg_rpr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve2_fcvtnt_ds);
+}
+
+static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a)
+{
+ static gen_helper_gvec_3_ptr * const fns[] = {
+ NULL, gen_helper_flogb_h,
+ gen_helper_flogb_s, gen_helper_flogb_d
+ };
+
+ if (!dc_isar_feature(aa64_sve2, s) || fns[a->esz] == NULL) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_ptr status =
+ fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
+ unsigned vsz = vec_full_reg_size(s);
+
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ pred_full_reg_offset(s, a->pg),
+ status, vsz, vsz, 0, fns[a->esz]);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ cpu_env, vsz, vsz, (sel << 1) | sub,
+ gen_helper_sve2_fmlal_zzzw_s);
+ }
+ return true;
+}
+
+static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, false, false);
+}
+
+static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, false, true);
+}
+
+static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, true, false);
+}
+
+static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_FMLAL_zzzw(s, a, true, true);
+}
+
+static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ cpu_env, vsz, vsz,
+ (a->index << 2) | (sel << 1) | sub,
+ gen_helper_sve2_fmlal_zzxw_s);
+ }
+ return true;
+}
+
+static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, false, false);
+}
+
+static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, false, true);
+}
+
+static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, true, false);
+}
+
+static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_FMLAL_zzxw(s, a, true, true);
+}
+
+static bool do_i8mm_zzzz_ool(DisasContext *s, arg_rrrr_esz *a,
+ gen_helper_gvec_4 *fn, int data)
+{
+ if (!dc_isar_feature(aa64_sve_i8mm, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
+ }
+ return true;
+}
+
+static bool trans_SMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_smmla_b, 0);
+}
+
+static bool trans_USMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_usmmla_b, 0);
+}
+
+static bool trans_UMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_i8mm_zzzz_ool(s, a, gen_helper_gvec_ummla_b, 0);
+}
+
+static bool trans_BFDOT_zzzz(DisasContext *s, arg_rrrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot,
+ a->rd, a->rn, a->rm, a->ra, 0);
+ }
+ return true;
+}
+
+static bool trans_BFDOT_zzxz(DisasContext *s, arg_rrxr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, gen_helper_gvec_bfdot_idx,
+ a->rd, a->rn, a->rm, a->ra, a->index);
+ }
+ return true;
+}
+
+static bool trans_BFMMLA(DisasContext *s, arg_rrrr_esz *a)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ gen_gvec_ool_zzzz(s, gen_helper_gvec_bfmmla,
+ a->rd, a->rn, a->rm, a->ra, 0);
+ }
+ return true;
+}
+
+static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
+ unsigned vsz = vec_full_reg_size(s);
+
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz, sel,
+ gen_helper_gvec_bfmlal);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool trans_BFMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_BFMLAL_zzzw(s, a, false);
+}
+
+static bool trans_BFMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
+{
+ return do_BFMLAL_zzzw(s, a, true);
+}
+
+static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
+{
+ if (!dc_isar_feature(aa64_sve_bf16, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
+ unsigned vsz = vec_full_reg_size(s);
+
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vec_full_reg_offset(s, a->ra),
+ status, vsz, vsz, (a->index << 1) | sel,
+ gen_helper_gvec_bfmlal_idx);
+ tcg_temp_free_ptr(status);
+ }
+ return true;
+}
+
+static bool trans_BFMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_BFMLAL_zzxw(s, a, false);
+}
+
+static bool trans_BFMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
+{
+ return do_BFMLAL_zzxw(s, a, true);
+}
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
new file mode 100644
index 000000000..59bcaec5b
--- /dev/null
+++ b/target/arm/translate-vfp.c
@@ -0,0 +1,3627 @@
+/*
+ * ARM translation: AArch32 VFP instructions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ * Copyright (c) 2019 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "exec/exec-all.h"
+#include "exec/gen-icount.h"
+#include "translate.h"
+#include "translate-a32.h"
+
+/* Include the generated VFP decoder */
+#include "decode-vfp.c.inc"
+#include "decode-vfp-uncond.c.inc"
+
+static inline void vfp_load_reg64(TCGv_i64 var, int reg)
+{
+ tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
+}
+
+static inline void vfp_store_reg64(TCGv_i64 var, int reg)
+{
+ tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
+}
+
+static inline void vfp_load_reg32(TCGv_i32 var, int reg)
+{
+ tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
+}
+
+static inline void vfp_store_reg32(TCGv_i32 var, int reg)
+{
+ tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
+}
+
+/*
+ * The imm8 encodes the sign bit, enough bits to represent an exponent in
+ * the range 01....1xx to 10....0xx, and the most significant 4 bits of
+ * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
+ */
+uint64_t vfp_expand_imm(int size, uint8_t imm8)
+{
+ uint64_t imm;
+
+ switch (size) {
+ case MO_64:
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
+ extract32(imm8, 0, 6);
+ imm <<= 48;
+ break;
+ case MO_32:
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
+ (extract32(imm8, 0, 6) << 3);
+ imm <<= 16;
+ break;
+ case MO_16:
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
+ (extract32(imm8, 0, 6) << 6);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return imm;
+}
+
+/*
+ * Return the offset of a 16-bit half of the specified VFP single-precision
+ * register. If top is true, returns the top 16 bits; otherwise the bottom
+ * 16 bits.
+ */
+static inline long vfp_f16_offset(unsigned reg, bool top)
+{
+ long offs = vfp_reg_offset(false, reg);
+#ifdef HOST_WORDS_BIGENDIAN
+ if (!top) {
+ offs += 2;
+ }
+#else
+ if (top) {
+ offs += 2;
+ }
+#endif
+ return offs;
+}
+
+/*
+ * Generate code for M-profile lazy FP state preservation if needed;
+ * this corresponds to the pseudocode PreserveFPState() function.
+ */
+static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
+{
+ if (s->v7m_lspact) {
+ /*
+ * Lazy state saving affects external memory and also the NVIC,
+ * so we must mark it as an IO operation for icount (and cause
+ * this to be the last insn in the TB).
+ */
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+ gen_io_start();
+ }
+ gen_helper_v7m_preserve_fp_state(cpu_env);
+ /*
+ * If the preserve_fp_state helper doesn't throw an exception
+ * then it will clear LSPACT; we don't need to repeat this for
+ * any further FP insns in this TB.
+ */
+ s->v7m_lspact = false;
+ /*
+ * The helper might have zeroed VPR, so we do not know the
+ * correct value for the MVE_NO_PRED TB flag any more.
+ * If we're about to create a new fp context then that
+ * will precisely determine the MVE_NO_PRED value (see
+ * gen_update_fp_context()). Otherwise, we must:
+ * - set s->mve_no_pred to false, so this instruction
+ * is generated to use helper functions
+ * - end the TB now, without chaining to the next TB
+ */
+ if (skip_context_update || !s->v7m_new_fp_ctxt_needed) {
+ s->mve_no_pred = false;
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ }
+ }
+}
+
+/*
+ * Generate code for M-profile FP context handling: update the
+ * ownership of the FP context, and create a new context if
+ * necessary. This corresponds to the parts of the pseudocode
+ * ExecuteFPCheck() after the inital PreserveFPState() call.
+ */
+static void gen_update_fp_context(DisasContext *s)
+{
+ /* Update ownership of FP context: set FPCCR.S to match current state */
+ if (s->v8m_fpccr_s_wrong) {
+ TCGv_i32 tmp;
+
+ tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
+ if (s->v8m_secure) {
+ tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
+ } else {
+ tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
+ }
+ store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
+ /* Don't need to do this for any further FP insns in this TB */
+ s->v8m_fpccr_s_wrong = false;
+ }
+
+ if (s->v7m_new_fp_ctxt_needed) {
+ /*
+ * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
+ * the FPSCR, and VPR.
+ */
+ TCGv_i32 control, fpscr;
+ uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
+
+ fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
+ gen_helper_vfp_set_fpscr(cpu_env, fpscr);
+ tcg_temp_free_i32(fpscr);
+ if (dc_isar_feature(aa32_mve, s)) {
+ TCGv_i32 z32 = tcg_const_i32(0);
+ store_cpu_field(z32, v7m.vpr);
+ }
+ /*
+ * We just updated the FPSCR and VPR. Some of this state is cached
+ * in the MVE_NO_PRED TB flag. We want to avoid having to end the
+ * TB here, which means we need the new value of the MVE_NO_PRED
+ * flag to be exactly known here and the same for all executions.
+ * Luckily FPDSCR.LTPSIZE is always constant 4 and the VPR is
+ * always set to 0, so the new MVE_NO_PRED flag is always 1
+ * if and only if we have MVE.
+ *
+ * (The other FPSCR state cached in TB flags is VECLEN and VECSTRIDE,
+ * but those do not exist for M-profile, so are not relevant here.)
+ */
+ s->mve_no_pred = dc_isar_feature(aa32_mve, s);
+
+ if (s->v8m_secure) {
+ bits |= R_V7M_CONTROL_SFPA_MASK;
+ }
+ control = load_cpu_field(v7m.control[M_REG_S]);
+ tcg_gen_ori_i32(control, control, bits);
+ store_cpu_field(control, v7m.control[M_REG_S]);
+ /* Don't need to do this for any further FP insns in this TB */
+ s->v7m_new_fp_ctxt_needed = false;
+ }
+}
+
+/*
+ * Check that VFP access is enabled, A-profile specific version.
+ *
+ * If VFP is enabled, return true. If not, emit code to generate an
+ * appropriate exception and return false.
+ * The ignore_vfp_enabled argument specifies that we should ignore
+ * whether VFP is enabled via FPEXC.EN: this should be true for FMXR/FMRX
+ * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
+ */
+static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled)
+{
+ if (s->fp_excp_el) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ return false;
+ }
+
+ if (!s->vfp_enabled && !ignore_vfp_enabled) {
+ assert(!arm_dc_feature(s, ARM_FEATURE_M));
+ unallocated_encoding(s);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Check that VFP access is enabled, M-profile specific version.
+ *
+ * If VFP is enabled, do the necessary M-profile lazy-FP handling and then
+ * return true. If not, emit code to generate an appropriate exception and
+ * return false.
+ * skip_context_update is true to skip the "update FP context" part of this.
+ */
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update)
+{
+ if (s->fp_excp_el) {
+ /*
+ * M-profile mostly catches the "FPU disabled" case early, in
+ * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
+ * which do coprocessor-checks are outside the large ranges of
+ * the encoding space handled by the patterns in m-nocp.decode,
+ * and for them we may need to raise NOCP here.
+ */
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
+ syn_uncategorized(), s->fp_excp_el);
+ return false;
+ }
+
+ /* Handle M-profile lazy FP state mechanics */
+
+ /* Trigger lazy-state preservation if necessary */
+ gen_preserve_fp_state(s, skip_context_update);
+
+ if (!skip_context_update) {
+ /* Update ownership of FP context and create new FP context if needed */
+ gen_update_fp_context(s);
+ }
+
+ return true;
+}
+
+/*
+ * The most usual kind of VFP access check, for everything except
+ * FMXR/FMRX to the always-available special registers.
+ */
+bool vfp_access_check(DisasContext *s)
+{
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ return vfp_access_check_m(s, false);
+ } else {
+ return vfp_access_check_a(s, false);
+ }
+}
+
+static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
+{
+ uint32_t rd, rn, rm;
+ int sz = a->sz;
+
+ if (!dc_isar_feature(aa32_vsel, s)) {
+ return false;
+ }
+
+ if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vm | a->vn | a->vd) & 0x10)) {
+ return false;
+ }
+
+ rd = a->vd;
+ rn = a->vn;
+ rm = a->vm;
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (sz == 3) {
+ TCGv_i64 frn, frm, dest;
+ TCGv_i64 tmp, zero, zf, nf, vf;
+
+ zero = tcg_const_i64(0);
+
+ frn = tcg_temp_new_i64();
+ frm = tcg_temp_new_i64();
+ dest = tcg_temp_new_i64();
+
+ zf = tcg_temp_new_i64();
+ nf = tcg_temp_new_i64();
+ vf = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(zf, cpu_ZF);
+ tcg_gen_ext_i32_i64(nf, cpu_NF);
+ tcg_gen_ext_i32_i64(vf, cpu_VF);
+
+ vfp_load_reg64(frn, rn);
+ vfp_load_reg64(frm, rm);
+ switch (a->cc) {
+ case 0: /* eq: Z */
+ tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
+ frn, frm);
+ break;
+ case 1: /* vs: V */
+ tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
+ frn, frm);
+ break;
+ case 2: /* ge: N == V -> N ^ V == 0 */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, vf, nf);
+ tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
+ frn, frm);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 3: /* gt: !Z && N == V */
+ tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
+ frn, frm);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, vf, nf);
+ tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
+ dest, frm);
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ vfp_store_reg64(dest, rd);
+ tcg_temp_free_i64(frn);
+ tcg_temp_free_i64(frm);
+ tcg_temp_free_i64(dest);
+
+ tcg_temp_free_i64(zf);
+ tcg_temp_free_i64(nf);
+ tcg_temp_free_i64(vf);
+
+ tcg_temp_free_i64(zero);
+ } else {
+ TCGv_i32 frn, frm, dest;
+ TCGv_i32 tmp, zero;
+
+ zero = tcg_const_i32(0);
+
+ frn = tcg_temp_new_i32();
+ frm = tcg_temp_new_i32();
+ dest = tcg_temp_new_i32();
+ vfp_load_reg32(frn, rn);
+ vfp_load_reg32(frm, rm);
+ switch (a->cc) {
+ case 0: /* eq: Z */
+ tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
+ frn, frm);
+ break;
+ case 1: /* vs: V */
+ tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
+ frn, frm);
+ break;
+ case 2: /* ge: N == V -> N ^ V == 0 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+ tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
+ frn, frm);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 3: /* gt: !Z && N == V */
+ tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
+ frn, frm);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+ tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
+ dest, frm);
+ tcg_temp_free_i32(tmp);
+ break;
+ }
+ /* For fp16 the top half is always zeroes */
+ if (sz == 1) {
+ tcg_gen_andi_i32(dest, dest, 0xffff);
+ }
+ vfp_store_reg32(dest, rd);
+ tcg_temp_free_i32(frn);
+ tcg_temp_free_i32(frm);
+ tcg_temp_free_i32(dest);
+
+ tcg_temp_free_i32(zero);
+ }
+
+ return true;
+}
+
+/*
+ * Table for converting the most common AArch32 encoding of
+ * rounding mode to arm_fprounding order (which matches the
+ * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
+ */
+static const uint8_t fp_decode_rm[] = {
+ FPROUNDING_TIEAWAY,
+ FPROUNDING_TIEEVEN,
+ FPROUNDING_POSINF,
+ FPROUNDING_NEGINF,
+};
+
+static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
+{
+ uint32_t rd, rm;
+ int sz = a->sz;
+ TCGv_ptr fpst;
+ TCGv_i32 tcg_rmode;
+ int rounding = fp_decode_rm[a->rm];
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vm | a->vd) & 0x10)) {
+ return false;
+ }
+
+ rd = a->vd;
+ rm = a->vm;
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (sz == 1) {
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ } else {
+ fpst = fpstatus_ptr(FPST_FPCR);
+ }
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+
+ if (sz == 3) {
+ TCGv_i64 tcg_op;
+ TCGv_i64 tcg_res;
+ tcg_op = tcg_temp_new_i64();
+ tcg_res = tcg_temp_new_i64();
+ vfp_load_reg64(tcg_op, rm);
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
+ vfp_store_reg64(tcg_res, rd);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op;
+ TCGv_i32 tcg_res;
+ tcg_op = tcg_temp_new_i32();
+ tcg_res = tcg_temp_new_i32();
+ vfp_load_reg32(tcg_op, rm);
+ if (sz == 1) {
+ gen_helper_rinth(tcg_res, tcg_op, fpst);
+ } else {
+ gen_helper_rints(tcg_res, tcg_op, fpst);
+ }
+ vfp_store_reg32(tcg_res, rd);
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_temp_free_i32(tcg_rmode);
+
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
+{
+ uint32_t rd, rm;
+ int sz = a->sz;
+ TCGv_ptr fpst;
+ TCGv_i32 tcg_rmode, tcg_shift;
+ int rounding = fp_decode_rm[a->rm];
+ bool is_signed = a->op;
+
+ if (!dc_isar_feature(aa32_vcvt_dr, s)) {
+ return false;
+ }
+
+ if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
+ return false;
+ }
+
+ rd = a->vd;
+ rm = a->vm;
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (sz == 1) {
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ } else {
+ fpst = fpstatus_ptr(FPST_FPCR);
+ }
+
+ tcg_shift = tcg_const_i32(0);
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+
+ if (sz == 3) {
+ TCGv_i64 tcg_double, tcg_res;
+ TCGv_i32 tcg_tmp;
+ tcg_double = tcg_temp_new_i64();
+ tcg_res = tcg_temp_new_i64();
+ tcg_tmp = tcg_temp_new_i32();
+ vfp_load_reg64(tcg_double, rm);
+ if (is_signed) {
+ gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
+ }
+ tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
+ vfp_store_reg32(tcg_tmp, rd);
+ tcg_temp_free_i32(tcg_tmp);
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single, tcg_res;
+ tcg_single = tcg_temp_new_i32();
+ tcg_res = tcg_temp_new_i32();
+ vfp_load_reg32(tcg_single, rm);
+ if (sz == 1) {
+ if (is_signed) {
+ gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_toulh(tcg_res, tcg_single, tcg_shift, fpst);
+ }
+ } else {
+ if (is_signed) {
+ gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
+ }
+ }
+ vfp_store_reg32(tcg_res, rd);
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_single);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ tcg_temp_free_i32(tcg_rmode);
+
+ tcg_temp_free_i32(tcg_shift);
+
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+bool mve_skip_vmov(DisasContext *s, int vn, int index, int size)
+{
+ /*
+ * In a CPU with MVE, the VMOV (vector lane to general-purpose register)
+ * and VMOV (general-purpose register to vector lane) insns are not
+ * predicated, but they are subject to beatwise execution if they are
+ * not in an IT block.
+ *
+ * Since our implementation always executes all 4 beats in one tick,
+ * this means only that if PSR.ECI says we should not be executing
+ * the beat corresponding to the lane of the vector register being
+ * accessed then we should skip performing the move, and that we need
+ * to do the usual check for bad ECI state and advance of ECI state.
+ *
+ * Note that if PSR.ECI is non-zero then we cannot be in an IT block.
+ *
+ * Return true if this VMOV scalar <-> gpreg should be skipped because
+ * the MVE PSR.ECI state says we skip the beat where the store happens.
+ */
+
+ /* Calculate the byte offset into Qn which we're going to access */
+ int ofs = (index << size) + ((vn & 1) * 8);
+
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ switch (s->eci) {
+ case ECI_NONE:
+ return false;
+ case ECI_A0:
+ return ofs < 4;
+ case ECI_A0A1:
+ return ofs < 8;
+ case ECI_A0A1A2:
+ case ECI_A0A1A2B0:
+ return ofs < 12;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
+{
+ /* VMOV scalar to general purpose register */
+ TCGv_i32 tmp;
+
+ /*
+ * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
+ * all sizes, whether the CPU has fp or not.
+ */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ if (a->size == MO_32
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
+ return false;
+ }
+
+ if (dc_isar_feature(aa32_mve, s)) {
+ if (!mve_eci_check(s)) {
+ return true;
+ }
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
+ tmp = tcg_temp_new_i32();
+ read_neon_element32(tmp, a->vn, a->index,
+ a->size | (a->u ? 0 : MO_SIGN));
+ store_reg(s, a->rt, tmp);
+ }
+
+ if (dc_isar_feature(aa32_mve, s)) {
+ mve_update_and_store_eci(s);
+ }
+ return true;
+}
+
+static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
+{
+ /* VMOV general purpose register to scalar */
+ TCGv_i32 tmp;
+
+ /*
+ * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
+ * all sizes, whether the CPU has fp or not.
+ */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ if (a->size == MO_32
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
+ return false;
+ }
+
+ if (dc_isar_feature(aa32_mve, s)) {
+ if (!mve_eci_check(s)) {
+ return true;
+ }
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (!mve_skip_vmov(s, a->vn, a->index, a->size)) {
+ tmp = load_reg(s, a->rt);
+ write_neon_element32(tmp, a->vn, a->index, a->size);
+ tcg_temp_free_i32(tmp);
+ }
+
+ if (dc_isar_feature(aa32_mve, s)) {
+ mve_update_and_store_eci(s);
+ }
+ return true;
+}
+
+static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
+{
+ /* VDUP (general purpose register) */
+ TCGv_i32 tmp;
+ int size, vec_size;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
+ return false;
+ }
+
+ if (a->b && a->e) {
+ return false;
+ }
+
+ if (a->q && (a->vn & 1)) {
+ return false;
+ }
+
+ vec_size = a->q ? 16 : 8;
+ if (a->b) {
+ size = 0;
+ } else if (a->e) {
+ size = 1;
+ } else {
+ size = 2;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = load_reg(s, a->rt);
+ tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
+ vec_size, vec_size, tmp);
+ tcg_temp_free_i32(tmp);
+
+ return true;
+}
+
+static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
+{
+ TCGv_i32 tmp;
+ bool ignore_vfp_enabled = false;
+
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* M profile version was already handled in m-nocp.decode */
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+
+ switch (a->reg) {
+ case ARM_VFP_FPSID:
+ /*
+ * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
+ * all ID registers to privileged access only.
+ */
+ if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_MVFR2:
+ if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_FPSCR:
+ break;
+ case ARM_VFP_FPEXC:
+ if (IS_USER(s)) {
+ return false;
+ }
+ ignore_vfp_enabled = true;
+ break;
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ /* Not present in VFPv3 */
+ if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * Call vfp_access_check_a() directly, because we need to tell
+ * it to ignore FPEXC.EN for some register accesses.
+ */
+ if (!vfp_access_check_a(s, ignore_vfp_enabled)) {
+ return true;
+ }
+
+ if (a->l) {
+ /* VMRS, move VFP special register to gp register */
+ switch (a->reg) {
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ case ARM_VFP_MVFR2:
+ case ARM_VFP_FPSID:
+ if (s->current_el == 1) {
+ TCGv_i32 tcg_reg, tcg_rt;
+
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ tcg_reg = tcg_const_i32(a->reg);
+ tcg_rt = tcg_const_i32(a->rt);
+ gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
+ tcg_temp_free_i32(tcg_reg);
+ tcg_temp_free_i32(tcg_rt);
+ }
+ /* fall through */
+ case ARM_VFP_FPEXC:
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ tmp = load_cpu_field(vfp.xregs[a->reg]);
+ break;
+ case ARM_VFP_FPSCR:
+ if (a->rt == 15) {
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
+ } else {
+ tmp = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (a->rt == 15) {
+ /* Set the 4 flag bits in the CPSR. */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, a->rt, tmp);
+ }
+ } else {
+ /* VMSR, move gp register to VFP special register */
+ switch (a->reg) {
+ case ARM_VFP_FPSID:
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ case ARM_VFP_MVFR2:
+ /* Writes are ignored. */
+ break;
+ case ARM_VFP_FPSCR:
+ tmp = load_reg(s, a->rt);
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPEXC:
+ /*
+ * TODO: VFP subarchitecture support.
+ * For now, keep the EN bit only
+ */
+ tmp = load_reg(s, a->rt);
+ tcg_gen_andi_i32(tmp, tmp, 1 << 30);
+ store_cpu_field(tmp, vfp.xregs[a->reg]);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ tmp = load_reg(s, a->rt);
+ store_cpu_field(tmp, vfp.xregs[a->reg]);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ return true;
+}
+
+
+static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
+{
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (a->rt == 15) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (a->l) {
+ /* VFP to general purpose register */
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vn);
+ tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ store_reg(s, a->rt, tmp);
+ } else {
+ /* general purpose register to VFP */
+ tmp = load_reg(s, a->rt);
+ tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ vfp_store_reg32(tmp, a->vn);
+ tcg_temp_free_i32(tmp);
+ }
+
+ return true;
+}
+
+static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
+{
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (a->l) {
+ /* VFP to general purpose register */
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vn);
+ if (a->rt == 15) {
+ /* Set the 4 flag bits in the CPSR. */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, a->rt, tmp);
+ }
+ } else {
+ /* general purpose register to VFP */
+ tmp = load_reg(s, a->rt);
+ vfp_store_reg32(tmp, a->vn);
+ tcg_temp_free_i32(tmp);
+ }
+
+ return true;
+}
+
+static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
+{
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ /*
+ * VMOV between two general-purpose registers and two single precision
+ * floating point registers
+ */
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (a->op) {
+ /* fpreg to gpreg */
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ store_reg(s, a->rt, tmp);
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm + 1);
+ store_reg(s, a->rt2, tmp);
+ } else {
+ /* gpreg to fpreg */
+ tmp = load_reg(s, a->rt);
+ vfp_store_reg32(tmp, a->vm);
+ tcg_temp_free_i32(tmp);
+ tmp = load_reg(s, a->rt2);
+ vfp_store_reg32(tmp, a->vm + 1);
+ tcg_temp_free_i32(tmp);
+ }
+
+ return true;
+}
+
+static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
+{
+ TCGv_i32 tmp;
+
+ /*
+ * VMOV between two general-purpose registers and one double precision
+ * floating point register. Note that this does not require support
+ * for double precision arithmetic.
+ */
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (a->op) {
+ /* fpreg to gpreg */
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm * 2);
+ store_reg(s, a->rt, tmp);
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm * 2 + 1);
+ store_reg(s, a->rt2, tmp);
+ } else {
+ /* gpreg to fpreg */
+ tmp = load_reg(s, a->rt);
+ vfp_store_reg32(tmp, a->vm * 2);
+ tcg_temp_free_i32(tmp);
+ tmp = load_reg(s, a->rt2);
+ vfp_store_reg32(tmp, a->vm * 2 + 1);
+ tcg_temp_free_i32(tmp);
+ }
+
+ return true;
+}
+
+static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
+{
+ uint32_t offset;
+ TCGv_i32 addr, tmp;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
+ offset = a->imm << 1;
+ if (!a->u) {
+ offset = -offset;
+ }
+
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, offset);
+ tmp = tcg_temp_new_i32();
+ if (a->l) {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
+ vfp_store_reg32(tmp, a->vd);
+ } else {
+ vfp_load_reg32(tmp, a->vd);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
+{
+ uint32_t offset;
+ TCGv_i32 addr, tmp;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ offset = a->imm << 2;
+ if (!a->u) {
+ offset = -offset;
+ }
+
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, offset);
+ tmp = tcg_temp_new_i32();
+ if (a->l) {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ vfp_store_reg32(tmp, a->vd);
+ } else {
+ vfp_load_reg32(tmp, a->vd);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
+{
+ uint32_t offset;
+ TCGv_i32 addr;
+ TCGv_i64 tmp;
+
+ /* Note that this does not require support for double arithmetic. */
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ offset = a->imm << 2;
+ if (!a->u) {
+ offset = -offset;
+ }
+
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, offset);
+ tmp = tcg_temp_new_i64();
+ if (a->l) {
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ vfp_store_reg64(tmp, a->vd);
+ } else {
+ vfp_load_reg64(tmp, a->vd);
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
+{
+ uint32_t offset;
+ TCGv_i32 addr, tmp;
+ int i, n;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ n = a->imm;
+
+ if (n == 0 || (a->vd + n) > 32) {
+ /*
+ * UNPREDICTABLE cases for bad immediates: we choose to
+ * UNDEF to avoid generating huge numbers of TCG ops
+ */
+ return false;
+ }
+ if (a->rn == 15 && a->w) {
+ /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
+ return false;
+ }
+
+ s->eci_handled = true;
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, 0);
+ if (a->p) {
+ /* pre-decrement */
+ tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ /*
+ * Here 'addr' is the lowest address we will store to,
+ * and is either the old SP (if post-increment) or
+ * the new SP (if pre-decrement). For post-increment
+ * where the old value is below the limit and the new
+ * value is above, it is UNKNOWN whether the limit check
+ * triggers; we choose to trigger.
+ */
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ offset = 4;
+ tmp = tcg_temp_new_i32();
+ for (i = 0; i < n; i++) {
+ if (a->l) {
+ /* load */
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ vfp_store_reg32(tmp, a->vd + i);
+ } else {
+ /* store */
+ vfp_load_reg32(tmp, a->vd + i);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ tcg_temp_free_i32(tmp);
+ if (a->w) {
+ /* writeback */
+ if (a->p) {
+ offset = -offset * n;
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+
+ clear_eci_state(s);
+ return true;
+}
+
+static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
+{
+ uint32_t offset;
+ TCGv_i32 addr;
+ TCGv_i64 tmp;
+ int i, n;
+
+ /* Note that this does not require support for double arithmetic. */
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ n = a->imm >> 1;
+
+ if (n == 0 || (a->vd + n) > 32 || n > 16) {
+ /*
+ * UNPREDICTABLE cases for bad immediates: we choose to
+ * UNDEF to avoid generating huge numbers of TCG ops
+ */
+ return false;
+ }
+ if (a->rn == 15 && a->w) {
+ /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) {
+ return false;
+ }
+
+ s->eci_handled = true;
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* For thumb, use of PC is UNPREDICTABLE. */
+ addr = add_reg_for_lit(s, a->rn, 0);
+ if (a->p) {
+ /* pre-decrement */
+ tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ /*
+ * Here 'addr' is the lowest address we will store to,
+ * and is either the old SP (if post-increment) or
+ * the new SP (if pre-decrement). For post-increment
+ * where the old value is below the limit and the new
+ * value is above, it is UNKNOWN whether the limit check
+ * triggers; we choose to trigger.
+ */
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ offset = 8;
+ tmp = tcg_temp_new_i64();
+ for (i = 0; i < n; i++) {
+ if (a->l) {
+ /* load */
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ vfp_store_reg64(tmp, a->vd + i);
+ } else {
+ /* store */
+ vfp_load_reg64(tmp, a->vd + i);
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ tcg_temp_free_i64(tmp);
+ if (a->w) {
+ /* writeback */
+ if (a->p) {
+ offset = -offset * n;
+ } else if (a->imm & 1) {
+ offset = 4;
+ } else {
+ offset = 0;
+ }
+
+ if (offset != 0) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+
+ clear_eci_state(s);
+ return true;
+}
+
+/*
+ * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
+ * The callback should emit code to write a value to vd. If
+ * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
+ * will contain the old value of the relevant VFP register;
+ * otherwise it must be written to only.
+ */
+typedef void VFPGen3OpSPFn(TCGv_i32 vd,
+ TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
+typedef void VFPGen3OpDPFn(TCGv_i64 vd,
+ TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
+
+/*
+ * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
+ * The callback should emit code to write a value to vd (which
+ * should be written to only).
+ */
+typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
+typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
+
+/*
+ * Return true if the specified S reg is in a scalar bank
+ * (ie if it is s0..s7)
+ */
+static inline bool vfp_sreg_is_scalar(int reg)
+{
+ return (reg & 0x18) == 0;
+}
+
+/*
+ * Return true if the specified D reg is in a scalar bank
+ * (ie if it is d0..d3 or d16..d19)
+ */
+static inline bool vfp_dreg_is_scalar(int reg)
+{
+ return (reg & 0xc) == 0;
+}
+
+/*
+ * Advance the S reg number forwards by delta within its bank
+ * (ie increment the low 3 bits but leave the rest the same)
+ */
+static inline int vfp_advance_sreg(int reg, int delta)
+{
+ return ((reg + delta) & 0x7) | (reg & ~0x7);
+}
+
+/*
+ * Advance the D reg number forwards by delta within its bank
+ * (ie increment the low 2 bits but leave the rest the same)
+ */
+static inline int vfp_advance_dreg(int reg, int delta)
+{
+ return ((reg + delta) & 0x3) | (reg & ~0x3);
+}
+
+/*
+ * Perform a 3-operand VFP data processing instruction. fn is the
+ * callback to do the actual operation; this function deals with the
+ * code to handle looping around for VFP vector processing.
+ */
+static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
+ int vd, int vn, int vm, bool reads_vd)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ int veclen = s->vec_len;
+ TCGv_i32 f0, f1, fd;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ /* Figure out what type of vector operation this is. */
+ if (vfp_sreg_is_scalar(vd)) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = s->vec_stride + 1;
+
+ if (vfp_sreg_is_scalar(vm)) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i32();
+ f1 = tcg_temp_new_i32();
+ fd = tcg_temp_new_i32();
+ fpst = fpstatus_ptr(FPST_FPCR);
+
+ vfp_load_reg32(f0, vn);
+ vfp_load_reg32(f1, vm);
+
+ for (;;) {
+ if (reads_vd) {
+ vfp_load_reg32(fd, vd);
+ }
+ fn(fd, f0, f1, fpst);
+ vfp_store_reg32(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = vfp_advance_sreg(vd, delta_d);
+ vn = vfp_advance_sreg(vn, delta_d);
+ vfp_load_reg32(f0, vn);
+ if (delta_m) {
+ vm = vfp_advance_sreg(vm, delta_m);
+ vfp_load_reg32(f1, vm);
+ }
+ }
+
+ tcg_temp_free_i32(f0);
+ tcg_temp_free_i32(f1);
+ tcg_temp_free_i32(fd);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
+ int vd, int vn, int vm, bool reads_vd)
+{
+ /*
+ * Do a half-precision operation. Functionally this is
+ * the same as do_vfp_3op_sp(), except:
+ * - it uses the FPST_FPCR_F16
+ * - it doesn't need the VFP vector handling (fp16 is a
+ * v8 feature, and in v8 VFP vectors don't exist)
+ * - it does the aa32_fp16_arith feature test
+ */
+ TCGv_i32 f0, f1, fd;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ f0 = tcg_temp_new_i32();
+ f1 = tcg_temp_new_i32();
+ fd = tcg_temp_new_i32();
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+
+ vfp_load_reg32(f0, vn);
+ vfp_load_reg32(f1, vm);
+
+ if (reads_vd) {
+ vfp_load_reg32(fd, vd);
+ }
+ fn(fd, f0, f1, fpst);
+ vfp_store_reg32(fd, vd);
+
+ tcg_temp_free_i32(f0);
+ tcg_temp_free_i32(f1);
+ tcg_temp_free_i32(fd);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
+ int vd, int vn, int vm, bool reads_vd)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ int veclen = s->vec_len;
+ TCGv_i64 f0, f1, fd;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ /* Figure out what type of vector operation this is. */
+ if (vfp_dreg_is_scalar(vd)) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = (s->vec_stride >> 1) + 1;
+
+ if (vfp_dreg_is_scalar(vm)) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i64();
+ f1 = tcg_temp_new_i64();
+ fd = tcg_temp_new_i64();
+ fpst = fpstatus_ptr(FPST_FPCR);
+
+ vfp_load_reg64(f0, vn);
+ vfp_load_reg64(f1, vm);
+
+ for (;;) {
+ if (reads_vd) {
+ vfp_load_reg64(fd, vd);
+ }
+ fn(fd, f0, f1, fpst);
+ vfp_store_reg64(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = vfp_advance_dreg(vd, delta_d);
+ vn = vfp_advance_dreg(vn, delta_d);
+ vfp_load_reg64(f0, vn);
+ if (delta_m) {
+ vm = vfp_advance_dreg(vm, delta_m);
+ vfp_load_reg64(f1, vm);
+ }
+ }
+
+ tcg_temp_free_i64(f0);
+ tcg_temp_free_i64(f1);
+ tcg_temp_free_i64(fd);
+ tcg_temp_free_ptr(fpst);
+
+ return true;
+}
+
+static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ int veclen = s->vec_len;
+ TCGv_i32 f0, fd;
+
+ /* Note that the caller must check the aa32_fpsp_v2 feature. */
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ /* Figure out what type of vector operation this is. */
+ if (vfp_sreg_is_scalar(vd)) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = s->vec_stride + 1;
+
+ if (vfp_sreg_is_scalar(vm)) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i32();
+ fd = tcg_temp_new_i32();
+
+ vfp_load_reg32(f0, vm);
+
+ for (;;) {
+ fn(fd, f0);
+ vfp_store_reg32(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ if (delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ vd = vfp_advance_sreg(vd, delta_d);
+ vfp_store_reg32(fd, vd);
+ }
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = vfp_advance_sreg(vd, delta_d);
+ vm = vfp_advance_sreg(vm, delta_m);
+ vfp_load_reg32(f0, vm);
+ }
+
+ tcg_temp_free_i32(f0);
+ tcg_temp_free_i32(fd);
+
+ return true;
+}
+
+static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
+{
+ /*
+ * Do a half-precision operation. Functionally this is
+ * the same as do_vfp_2op_sp(), except:
+ * - it doesn't need the VFP vector handling (fp16 is a
+ * v8 feature, and in v8 VFP vectors don't exist)
+ * - it does the aa32_fp16_arith feature test
+ */
+ TCGv_i32 f0;
+
+ /* Note that the caller must check the aa32_fp16_arith feature */
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ f0 = tcg_temp_new_i32();
+ vfp_load_reg32(f0, vm);
+ fn(f0, f0);
+ vfp_store_reg32(f0, vd);
+ tcg_temp_free_i32(f0);
+
+ return true;
+}
+
+static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
+{
+ uint32_t delta_m = 0;
+ uint32_t delta_d = 0;
+ int veclen = s->vec_len;
+ TCGv_i64 f0, fd;
+
+ /* Note that the caller must check the aa32_fpdp_v2 feature. */
+
+ /* UNDEF accesses to D16-D31 if they don't exist */
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ /* Figure out what type of vector operation this is. */
+ if (vfp_dreg_is_scalar(vd)) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = (s->vec_stride >> 1) + 1;
+
+ if (vfp_dreg_is_scalar(vm)) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ f0 = tcg_temp_new_i64();
+ fd = tcg_temp_new_i64();
+
+ vfp_load_reg64(f0, vm);
+
+ for (;;) {
+ fn(fd, f0);
+ vfp_store_reg64(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ if (delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ vd = vfp_advance_dreg(vd, delta_d);
+ vfp_store_reg64(fd, vd);
+ }
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = vfp_advance_dreg(vd, delta_d);
+ vd = vfp_advance_dreg(vm, delta_m);
+ vfp_load_reg64(f0, vm);
+ }
+
+ tcg_temp_free_i64(f0);
+ tcg_temp_free_i64(fd);
+
+ return true;
+}
+
+static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* Note that order of inputs to the add matters for NaNs */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_VMLA_hp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* Note that order of inputs to the add matters for NaNs */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
+{
+ /* Note that order of inputs to the add matters for NaNs */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
+ tcg_temp_free_i64(tmp);
+}
+
+static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /*
+ * VMLS: vd = vd + -(vn * vm)
+ * Note that order of inputs to the add matters for NaNs.
+ */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
+ gen_helper_vfp_negh(tmp, tmp);
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_VMLS_hp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /*
+ * VMLS: vd = vd + -(vn * vm)
+ * Note that order of inputs to the add matters for NaNs.
+ */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
+ gen_helper_vfp_negs(tmp, tmp);
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
+{
+ /*
+ * VMLS: vd = vd + -(vn * vm)
+ * Note that order of inputs to the add matters for NaNs.
+ */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
+ gen_helper_vfp_negd(tmp, tmp);
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
+ tcg_temp_free_i64(tmp);
+}
+
+static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /*
+ * VNMLS: -fd + (fn * fm)
+ * Note that it isn't valid to replace (-A + B) with (B - A) or similar
+ * plausible looking simplifications because this will give wrong results
+ * for NaNs.
+ */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
+ gen_helper_vfp_negh(vd, vd);
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_VNMLS_hp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /*
+ * VNMLS: -fd + (fn * fm)
+ * Note that it isn't valid to replace (-A + B) with (B - A) or similar
+ * plausible looking simplifications because this will give wrong results
+ * for NaNs.
+ */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
+ gen_helper_vfp_negs(vd, vd);
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
+{
+ /*
+ * VNMLS: -fd + (fn * fm)
+ * Note that it isn't valid to replace (-A + B) with (B - A) or similar
+ * plausible looking simplifications because this will give wrong results
+ * for NaNs.
+ */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
+ gen_helper_vfp_negd(vd, vd);
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
+ tcg_temp_free_i64(tmp);
+}
+
+static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* VNMLA: -fd + -(fn * fm) */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_mulh(tmp, vn, vm, fpst);
+ gen_helper_vfp_negh(tmp, tmp);
+ gen_helper_vfp_negh(vd, vd);
+ gen_helper_vfp_addh(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_VNMLA_hp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* VNMLA: -fd + -(fn * fm) */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ gen_helper_vfp_muls(tmp, vn, vm, fpst);
+ gen_helper_vfp_negs(tmp, tmp);
+ gen_helper_vfp_negs(vd, vd);
+ gen_helper_vfp_adds(vd, vd, tmp, fpst);
+ tcg_temp_free_i32(tmp);
+}
+
+static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
+}
+
+static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
+{
+ /* VNMLA: -fd + (fn * fm) */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_helper_vfp_muld(tmp, vn, vm, fpst);
+ gen_helper_vfp_negd(tmp, tmp);
+ gen_helper_vfp_negd(vd, vd);
+ gen_helper_vfp_addd(vd, vd, tmp, fpst);
+ tcg_temp_free_i64(tmp);
+}
+
+static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
+}
+
+static bool trans_VMUL_hp(DisasContext *s, arg_VMUL_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_helper_vfp_mulh, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
+}
+
+static void gen_VNMUL_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* VNMUL: -(fn * fm) */
+ gen_helper_vfp_mulh(vd, vn, vm, fpst);
+ gen_helper_vfp_negh(vd, vd);
+}
+
+static bool trans_VNMUL_hp(DisasContext *s, arg_VNMUL_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_VNMUL_hp, a->vd, a->vn, a->vm, false);
+}
+
+static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
+{
+ /* VNMUL: -(fn * fm) */
+ gen_helper_vfp_muls(vd, vn, vm, fpst);
+ gen_helper_vfp_negs(vd, vd);
+}
+
+static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
+}
+
+static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
+{
+ /* VNMUL: -(fn * fm) */
+ gen_helper_vfp_muld(vd, vn, vm, fpst);
+ gen_helper_vfp_negd(vd, vd);
+}
+
+static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VADD_hp(DisasContext *s, arg_VADD_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_helper_vfp_addh, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VSUB_hp(DisasContext *s, arg_VSUB_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_helper_vfp_subh, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VDIV_hp(DisasContext *s, arg_VDIV_sp *a)
+{
+ return do_vfp_3op_hp(s, gen_helper_vfp_divh, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
+{
+ return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
+{
+ return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMINNM_hp(DisasContext *s, arg_VMINNM_sp *a)
+{
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
+ return false;
+ }
+ return do_vfp_3op_hp(s, gen_helper_vfp_minnumh,
+ a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMAXNM_hp(DisasContext *s, arg_VMAXNM_sp *a)
+{
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
+ return false;
+ }
+ return do_vfp_3op_hp(s, gen_helper_vfp_maxnumh,
+ a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a)
+{
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
+ return false;
+ }
+ return do_vfp_3op_sp(s, gen_helper_vfp_minnums,
+ a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a)
+{
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
+ return false;
+ }
+ return do_vfp_3op_sp(s, gen_helper_vfp_maxnums,
+ a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a)
+{
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
+ return false;
+ }
+ return do_vfp_3op_dp(s, gen_helper_vfp_minnumd,
+ a->vd, a->vn, a->vm, false);
+}
+
+static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a)
+{
+ if (!dc_isar_feature(aa32_vminmaxnm, s)) {
+ return false;
+ }
+ return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd,
+ a->vd, a->vn, a->vm, false);
+}
+
+static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
+{
+ /*
+ * VFNMA : fd = muladd(-fd, fn, fm)
+ * VFNMS : fd = muladd(-fd, -fn, fm)
+ * VFMA : fd = muladd( fd, fn, fm)
+ * VFMS : fd = muladd( fd, -fn, fm)
+ *
+ * These are fused multiply-add, and must be done as one floating
+ * point operation with no rounding between the multiplication and
+ * addition steps. NB that doing the negations here as separate
+ * steps is correct : an input NaN should come out with its sign
+ * bit flipped if it is a negated-input.
+ */
+ TCGv_ptr fpst;
+ TCGv_i32 vn, vm, vd;
+
+ /*
+ * Present in VFPv4 only, and only with the FP16 extension.
+ * Note that we can't rely on the SIMDFMAC check alone, because
+ * in a Neon-no-VFP core that ID register field will be non-zero.
+ */
+ if (!dc_isar_feature(aa32_fp16_arith, s) ||
+ !dc_isar_feature(aa32_simdfmac, s) ||
+ !dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vn = tcg_temp_new_i32();
+ vm = tcg_temp_new_i32();
+ vd = tcg_temp_new_i32();
+
+ vfp_load_reg32(vn, a->vn);
+ vfp_load_reg32(vm, a->vm);
+ if (neg_n) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negh(vn, vn);
+ }
+ vfp_load_reg32(vd, a->vd);
+ if (neg_d) {
+ /* VFNMA, VFNMS */
+ gen_helper_vfp_negh(vd, vd);
+ }
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
+ vfp_store_reg32(vd, a->vd);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(vn);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_i32(vd);
+
+ return true;
+}
+
+static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
+{
+ /*
+ * VFNMA : fd = muladd(-fd, fn, fm)
+ * VFNMS : fd = muladd(-fd, -fn, fm)
+ * VFMA : fd = muladd( fd, fn, fm)
+ * VFMS : fd = muladd( fd, -fn, fm)
+ *
+ * These are fused multiply-add, and must be done as one floating
+ * point operation with no rounding between the multiplication and
+ * addition steps. NB that doing the negations here as separate
+ * steps is correct : an input NaN should come out with its sign
+ * bit flipped if it is a negated-input.
+ */
+ TCGv_ptr fpst;
+ TCGv_i32 vn, vm, vd;
+
+ /*
+ * Present in VFPv4 only.
+ * Note that we can't rely on the SIMDFMAC check alone, because
+ * in a Neon-no-VFP core that ID register field will be non-zero.
+ */
+ if (!dc_isar_feature(aa32_simdfmac, s) ||
+ !dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+ /*
+ * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
+ * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
+ */
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vn = tcg_temp_new_i32();
+ vm = tcg_temp_new_i32();
+ vd = tcg_temp_new_i32();
+
+ vfp_load_reg32(vn, a->vn);
+ vfp_load_reg32(vm, a->vm);
+ if (neg_n) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negs(vn, vn);
+ }
+ vfp_load_reg32(vd, a->vd);
+ if (neg_d) {
+ /* VFNMA, VFNMS */
+ gen_helper_vfp_negs(vd, vd);
+ }
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
+ vfp_store_reg32(vd, a->vd);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(vn);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_i32(vd);
+
+ return true;
+}
+
+static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
+{
+ /*
+ * VFNMA : fd = muladd(-fd, fn, fm)
+ * VFNMS : fd = muladd(-fd, -fn, fm)
+ * VFMA : fd = muladd( fd, fn, fm)
+ * VFMS : fd = muladd( fd, -fn, fm)
+ *
+ * These are fused multiply-add, and must be done as one floating
+ * point operation with no rounding between the multiplication and
+ * addition steps. NB that doing the negations here as separate
+ * steps is correct : an input NaN should come out with its sign
+ * bit flipped if it is a negated-input.
+ */
+ TCGv_ptr fpst;
+ TCGv_i64 vn, vm, vd;
+
+ /*
+ * Present in VFPv4 only.
+ * Note that we can't rely on the SIMDFMAC check alone, because
+ * in a Neon-no-VFP core that ID register field will be non-zero.
+ */
+ if (!dc_isar_feature(aa32_simdfmac, s) ||
+ !dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+ /*
+ * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
+ * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
+ */
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vn = tcg_temp_new_i64();
+ vm = tcg_temp_new_i64();
+ vd = tcg_temp_new_i64();
+
+ vfp_load_reg64(vn, a->vn);
+ vfp_load_reg64(vm, a->vm);
+ if (neg_n) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negd(vn, vn);
+ }
+ vfp_load_reg64(vd, a->vd);
+ if (neg_d) {
+ /* VFNMA, VFNMS */
+ gen_helper_vfp_negd(vd, vd);
+ }
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
+ vfp_store_reg64(vd, a->vd);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(vn);
+ tcg_temp_free_i64(vm);
+ tcg_temp_free_i64(vd);
+
+ return true;
+}
+
+#define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
+ static bool trans_##INSN##_##PREC(DisasContext *s, \
+ arg_##INSN##_##PREC *a) \
+ { \
+ return do_vfm_##PREC(s, a, NEGN, NEGD); \
+ }
+
+#define MAKE_VFM_TRANS_FNS(PREC) \
+ MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
+ MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
+ MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
+ MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
+
+MAKE_VFM_TRANS_FNS(hp)
+MAKE_VFM_TRANS_FNS(sp)
+MAKE_VFM_TRANS_FNS(dp)
+
+static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
+{
+ TCGv_i32 fd;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
+ vfp_store_reg32(fd, a->vd);
+ tcg_temp_free_i32(fd);
+ return true;
+}
+
+static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
+{
+ uint32_t delta_d = 0;
+ int veclen = s->vec_len;
+ TCGv_i32 fd;
+ uint32_t vd;
+
+ vd = a->vd;
+
+ if (!dc_isar_feature(aa32_fpsp_v3, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ /* Figure out what type of vector operation this is. */
+ if (vfp_sreg_is_scalar(vd)) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = s->vec_stride + 1;
+ }
+ }
+
+ fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
+
+ for (;;) {
+ vfp_store_reg32(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = vfp_advance_sreg(vd, delta_d);
+ }
+
+ tcg_temp_free_i32(fd);
+ return true;
+}
+
+static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
+{
+ uint32_t delta_d = 0;
+ int veclen = s->vec_len;
+ TCGv_i64 fd;
+ uint32_t vd;
+
+ vd = a->vd;
+
+ if (!dc_isar_feature(aa32_fpdp_v3, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fpshvec, s) &&
+ (veclen != 0 || s->vec_stride != 0)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ if (veclen > 0) {
+ /* Figure out what type of vector operation this is. */
+ if (vfp_dreg_is_scalar(vd)) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ delta_d = (s->vec_stride >> 1) + 1;
+ }
+ }
+
+ fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
+
+ for (;;) {
+ vfp_store_reg64(fd, vd);
+
+ if (veclen == 0) {
+ break;
+ }
+
+ /* Set up the operands for the next iteration */
+ veclen--;
+ vd = vfp_advance_dreg(vd, delta_d);
+ }
+
+ tcg_temp_free_i64(fd);
+ return true;
+}
+
+#define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
+ static bool trans_##INSN##_##PREC(DisasContext *s, \
+ arg_##INSN##_##PREC *a) \
+ { \
+ if (!dc_isar_feature(CHECK, s)) { \
+ return false; \
+ } \
+ return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
+ }
+
+#define DO_VFP_VMOV(INSN, PREC, FN) \
+ static bool trans_##INSN##_##PREC(DisasContext *s, \
+ arg_##INSN##_##PREC *a) \
+ { \
+ if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
+ !dc_isar_feature(aa32_mve, s)) { \
+ return false; \
+ } \
+ return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
+ }
+
+DO_VFP_VMOV(VMOV_reg, sp, tcg_gen_mov_i32)
+DO_VFP_VMOV(VMOV_reg, dp, tcg_gen_mov_i64)
+
+DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh, aa32_fp16_arith)
+DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss, aa32_fpsp_v2)
+DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd, aa32_fpdp_v2)
+
+DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh, aa32_fp16_arith)
+DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs, aa32_fpsp_v2)
+DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd, aa32_fpdp_v2)
+
+static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
+{
+ gen_helper_vfp_sqrth(vd, vm, cpu_env);
+}
+
+static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
+{
+ gen_helper_vfp_sqrts(vd, vm, cpu_env);
+}
+
+static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
+{
+ gen_helper_vfp_sqrtd(vd, vm, cpu_env);
+}
+
+DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
+DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp, aa32_fpsp_v2)
+DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp, aa32_fpdp_v2)
+
+static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
+{
+ TCGv_i32 vd, vm;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ /* Vm/M bits must be zero for the Z variant */
+ if (a->z && a->vm != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vd = tcg_temp_new_i32();
+ vm = tcg_temp_new_i32();
+
+ vfp_load_reg32(vd, a->vd);
+ if (a->z) {
+ tcg_gen_movi_i32(vm, 0);
+ } else {
+ vfp_load_reg32(vm, a->vm);
+ }
+
+ if (a->e) {
+ gen_helper_vfp_cmpeh(vd, vm, cpu_env);
+ } else {
+ gen_helper_vfp_cmph(vd, vm, cpu_env);
+ }
+
+ tcg_temp_free_i32(vd);
+ tcg_temp_free_i32(vm);
+
+ return true;
+}
+
+static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
+{
+ TCGv_i32 vd, vm;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+
+ /* Vm/M bits must be zero for the Z variant */
+ if (a->z && a->vm != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vd = tcg_temp_new_i32();
+ vm = tcg_temp_new_i32();
+
+ vfp_load_reg32(vd, a->vd);
+ if (a->z) {
+ tcg_gen_movi_i32(vm, 0);
+ } else {
+ vfp_load_reg32(vm, a->vm);
+ }
+
+ if (a->e) {
+ gen_helper_vfp_cmpes(vd, vm, cpu_env);
+ } else {
+ gen_helper_vfp_cmps(vd, vm, cpu_env);
+ }
+
+ tcg_temp_free_i32(vd);
+ tcg_temp_free_i32(vm);
+
+ return true;
+}
+
+static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
+{
+ TCGv_i64 vd, vm;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ /* Vm/M bits must be zero for the Z variant */
+ if (a->z && a->vm != 0) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vd = tcg_temp_new_i64();
+ vm = tcg_temp_new_i64();
+
+ vfp_load_reg64(vd, a->vd);
+ if (a->z) {
+ tcg_gen_movi_i64(vm, 0);
+ } else {
+ vfp_load_reg64(vm, a->vm);
+ }
+
+ if (a->e) {
+ gen_helper_vfp_cmped(vd, vm, cpu_env);
+ } else {
+ gen_helper_vfp_cmpd(vd, vm, cpu_env);
+ }
+
+ tcg_temp_free_i64(vd);
+ tcg_temp_free_i64(vm);
+
+ return true;
+}
+
+static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 ahp_mode;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fp16_spconv, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ ahp_mode = get_ahp_flag();
+ tmp = tcg_temp_new_i32();
+ /* The T bit tells us if we want the low or high 16 bits of Vm */
+ tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
+ gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_i32(ahp_mode);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 ahp_mode;
+ TCGv_i32 tmp;
+ TCGv_i64 vd;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ ahp_mode = get_ahp_flag();
+ tmp = tcg_temp_new_i32();
+ /* The T bit tells us if we want the low or high 16 bits of Vm */
+ tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
+ vd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
+ vfp_store_reg64(vd, a->vd);
+ tcg_temp_free_i32(ahp_mode);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i64(vd);
+ return true;
+}
+
+static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_bf16, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ tmp = tcg_temp_new_i32();
+
+ vfp_load_reg32(tmp, a->vm);
+ gen_helper_bfcvt(tmp, tmp, fpst);
+ tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 ahp_mode;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fp16_spconv, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ ahp_mode = get_ahp_flag();
+ tmp = tcg_temp_new_i32();
+
+ vfp_load_reg32(tmp, a->vm);
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
+ tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
+ tcg_temp_free_i32(ahp_mode);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 ahp_mode;
+ TCGv_i32 tmp;
+ TCGv_i64 vm;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ ahp_mode = get_ahp_flag();
+ tmp = tcg_temp_new_i32();
+ vm = tcg_temp_new_i64();
+
+ vfp_load_reg64(vm, a->vm);
+ gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
+ tcg_temp_free_i64(vm);
+ tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
+ tcg_temp_free_i32(ahp_mode);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_rinth(tmp, tmp, fpst);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_rints(tmp, tmp, fpst);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ vfp_load_reg64(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_rintd(tmp, tmp, fpst);
+ vfp_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rinth(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rints(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+ TCGv_i32 tcg_rmode;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ vfp_load_reg64(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ gen_helper_rintd(tmp, tmp, fpst);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
+ vfp_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i32(tcg_rmode);
+ return true;
+}
+
+static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ gen_helper_rinth_exact(tmp, tmp, fpst);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i32();
+ vfp_load_reg32(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_rints_exact(tmp, tmp, fpst);
+ vfp_store_reg32(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
+
+static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_vrint, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ tmp = tcg_temp_new_i64();
+ vfp_load_reg64(tmp, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ gen_helper_rintd_exact(tmp, tmp, fpst);
+ vfp_store_reg64(tmp, a->vd);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
+{
+ TCGv_i64 vd;
+ TCGv_i32 vm;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vm = tcg_temp_new_i32();
+ vd = tcg_temp_new_i64();
+ vfp_load_reg32(vm, a->vm);
+ gen_helper_vfp_fcvtds(vd, vm, cpu_env);
+ vfp_store_reg64(vd, a->vd);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_i64(vd);
+ return true;
+}
+
+static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
+{
+ TCGv_i64 vm;
+ TCGv_i32 vd;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vd = tcg_temp_new_i32();
+ vm = tcg_temp_new_i64();
+ vfp_load_reg64(vm, a->vm);
+ gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
+ vfp_store_reg32(vd, a->vd);
+ tcg_temp_free_i32(vd);
+ tcg_temp_free_i64(vm);
+ return true;
+}
+
+static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
+{
+ TCGv_i32 vm;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vm = tcg_temp_new_i32();
+ vfp_load_reg32(vm, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ if (a->s) {
+ /* i32 -> f16 */
+ gen_helper_vfp_sitoh(vm, vm, fpst);
+ } else {
+ /* u32 -> f16 */
+ gen_helper_vfp_uitoh(vm, vm, fpst);
+ }
+ vfp_store_reg32(vm, a->vd);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
+{
+ TCGv_i32 vm;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vm = tcg_temp_new_i32();
+ vfp_load_reg32(vm, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ if (a->s) {
+ /* i32 -> f32 */
+ gen_helper_vfp_sitos(vm, vm, fpst);
+ } else {
+ /* u32 -> f32 */
+ gen_helper_vfp_uitos(vm, vm, fpst);
+ }
+ vfp_store_reg32(vm, a->vd);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
+{
+ TCGv_i32 vm;
+ TCGv_i64 vd;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vm = tcg_temp_new_i32();
+ vd = tcg_temp_new_i64();
+ vfp_load_reg32(vm, a->vm);
+ fpst = fpstatus_ptr(FPST_FPCR);
+ if (a->s) {
+ /* i32 -> f64 */
+ gen_helper_vfp_sitod(vd, vm, fpst);
+ } else {
+ /* u32 -> f64 */
+ gen_helper_vfp_uitod(vd, vm, fpst);
+ }
+ vfp_store_reg64(vd, a->vd);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_i64(vd);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
+{
+ TCGv_i32 vd;
+ TCGv_i64 vm;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ if (!dc_isar_feature(aa32_jscvt, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ vm = tcg_temp_new_i64();
+ vd = tcg_temp_new_i32();
+ vfp_load_reg64(vm, a->vm);
+ gen_helper_vjcvt(vd, vm, cpu_env);
+ vfp_store_reg32(vd, a->vd);
+ tcg_temp_free_i64(vm);
+ tcg_temp_free_i32(vd);
+ return true;
+}
+
+static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
+{
+ TCGv_i32 vd, shift;
+ TCGv_ptr fpst;
+ int frac_bits;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
+
+ vd = tcg_temp_new_i32();
+ vfp_load_reg32(vd, a->vd);
+
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ shift = tcg_const_i32(frac_bits);
+
+ /* Switch on op:U:sx bits */
+ switch (a->opc) {
+ case 0:
+ gen_helper_vfp_shtoh_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 1:
+ gen_helper_vfp_sltoh_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 2:
+ gen_helper_vfp_uhtoh_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 3:
+ gen_helper_vfp_ultoh_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 4:
+ gen_helper_vfp_toshh_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 5:
+ gen_helper_vfp_toslh_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 6:
+ gen_helper_vfp_touhh_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 7:
+ gen_helper_vfp_toulh_round_to_zero(vd, vd, shift, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ vfp_store_reg32(vd, a->vd);
+ tcg_temp_free_i32(vd);
+ tcg_temp_free_i32(shift);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
+{
+ TCGv_i32 vd, shift;
+ TCGv_ptr fpst;
+ int frac_bits;
+
+ if (!dc_isar_feature(aa32_fpsp_v3, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
+
+ vd = tcg_temp_new_i32();
+ vfp_load_reg32(vd, a->vd);
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ shift = tcg_const_i32(frac_bits);
+
+ /* Switch on op:U:sx bits */
+ switch (a->opc) {
+ case 0:
+ gen_helper_vfp_shtos_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 1:
+ gen_helper_vfp_sltos_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 2:
+ gen_helper_vfp_uhtos_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 3:
+ gen_helper_vfp_ultos_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 4:
+ gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 5:
+ gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 6:
+ gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 7:
+ gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ vfp_store_reg32(vd, a->vd);
+ tcg_temp_free_i32(vd);
+ tcg_temp_free_i32(shift);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
+{
+ TCGv_i64 vd;
+ TCGv_i32 shift;
+ TCGv_ptr fpst;
+ int frac_bits;
+
+ if (!dc_isar_feature(aa32_fpdp_v3, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
+
+ vd = tcg_temp_new_i64();
+ vfp_load_reg64(vd, a->vd);
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ shift = tcg_const_i32(frac_bits);
+
+ /* Switch on op:U:sx bits */
+ switch (a->opc) {
+ case 0:
+ gen_helper_vfp_shtod_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 1:
+ gen_helper_vfp_sltod_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 2:
+ gen_helper_vfp_uhtod_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 3:
+ gen_helper_vfp_ultod_round_to_nearest(vd, vd, shift, fpst);
+ break;
+ case 4:
+ gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 5:
+ gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 6:
+ gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
+ break;
+ case 7:
+ gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ vfp_store_reg64(vd, a->vd);
+ tcg_temp_free_i64(vd);
+ tcg_temp_free_i32(shift);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
+{
+ TCGv_i32 vm;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR_F16);
+ vm = tcg_temp_new_i32();
+ vfp_load_reg32(vm, a->vm);
+
+ if (a->s) {
+ if (a->rz) {
+ gen_helper_vfp_tosizh(vm, vm, fpst);
+ } else {
+ gen_helper_vfp_tosih(vm, vm, fpst);
+ }
+ } else {
+ if (a->rz) {
+ gen_helper_vfp_touizh(vm, vm, fpst);
+ } else {
+ gen_helper_vfp_touih(vm, vm, fpst);
+ }
+ }
+ vfp_store_reg32(vm, a->vd);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
+{
+ TCGv_i32 vm;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ vm = tcg_temp_new_i32();
+ vfp_load_reg32(vm, a->vm);
+
+ if (a->s) {
+ if (a->rz) {
+ gen_helper_vfp_tosizs(vm, vm, fpst);
+ } else {
+ gen_helper_vfp_tosis(vm, vm, fpst);
+ }
+ } else {
+ if (a->rz) {
+ gen_helper_vfp_touizs(vm, vm, fpst);
+ } else {
+ gen_helper_vfp_touis(vm, vm, fpst);
+ }
+ }
+ vfp_store_reg32(vm, a->vd);
+ tcg_temp_free_i32(vm);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
+{
+ TCGv_i32 vd;
+ TCGv_i64 vm;
+ TCGv_ptr fpst;
+
+ if (!dc_isar_feature(aa32_fpdp_v2, s)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(FPST_FPCR);
+ vm = tcg_temp_new_i64();
+ vd = tcg_temp_new_i32();
+ vfp_load_reg64(vm, a->vm);
+
+ if (a->s) {
+ if (a->rz) {
+ gen_helper_vfp_tosizd(vd, vm, fpst);
+ } else {
+ gen_helper_vfp_tosid(vd, vm, fpst);
+ }
+ } else {
+ if (a->rz) {
+ gen_helper_vfp_touizd(vd, vm, fpst);
+ } else {
+ gen_helper_vfp_touid(vd, vm, fpst);
+ }
+ }
+ vfp_store_reg32(vd, a->vd);
+ tcg_temp_free_i32(vd);
+ tcg_temp_free_i64(vm);
+ tcg_temp_free_ptr(fpst);
+ return true;
+}
+
+static bool trans_VINS(DisasContext *s, arg_VINS *a)
+{
+ TCGv_i32 rd, rm;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* Insert low half of Vm into high half of Vd */
+ rm = tcg_temp_new_i32();
+ rd = tcg_temp_new_i32();
+ vfp_load_reg32(rm, a->vm);
+ vfp_load_reg32(rd, a->vd);
+ tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
+ vfp_store_reg32(rd, a->vd);
+ tcg_temp_free_i32(rm);
+ tcg_temp_free_i32(rd);
+ return true;
+}
+
+static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
+{
+ TCGv_i32 rm;
+
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ return false;
+ }
+
+ if (s->vec_len != 0 || s->vec_stride != 0) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /* Set Vd to high half of Vm */
+ rm = tcg_temp_new_i32();
+ vfp_load_reg32(rm, a->vm);
+ tcg_gen_shri_i32(rm, rm, 16);
+ vfp_store_reg32(rm, a->vd);
+ tcg_temp_free_i32(rm);
+ return true;
+}
diff --git a/target/arm/translate.c b/target/arm/translate.c
new file mode 100644
index 000000000..98f592592
--- /dev/null
+++ b/target/arm/translate.c
@@ -0,0 +1,9923 @@
+/*
+ * ARM translation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "internals.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "qemu/log.h"
+#include "qemu/bitops.h"
+#include "arm_ldst.h"
+#include "semihosting/semihost.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/log.h"
+
+
+#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
+#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
+/* currently all emulated v5 cores are also v5TE, so don't bother */
+#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
+#define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
+#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
+#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
+#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
+#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
+#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
+
+#include "translate.h"
+#include "translate-a32.h"
+
+/* These are TCG temporaries used only by the legacy iwMMXt decoder */
+static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
+/* These are TCG globals which alias CPUARMState fields */
+static TCGv_i32 cpu_R[16];
+TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
+TCGv_i64 cpu_exclusive_addr;
+TCGv_i64 cpu_exclusive_val;
+
+#include "exec/gen-icount.h"
+
+static const char * const regnames[] =
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
+
+
+/* initialize TCG globals. */
+void arm_translate_init(void)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUARMState, regs[i]),
+ regnames[i]);
+ }
+ cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
+ cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
+ cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
+ cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
+
+ cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
+ cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, exclusive_val), "exclusive_val");
+
+ a64_translate_init();
+}
+
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
+{
+ /* Expand the encoded constant as per AdvSIMDExpandImm pseudocode */
+ switch (cmode) {
+ case 0: case 1:
+ /* no-op */
+ break;
+ case 2: case 3:
+ imm <<= 8;
+ break;
+ case 4: case 5:
+ imm <<= 16;
+ break;
+ case 6: case 7:
+ imm <<= 24;
+ break;
+ case 8: case 9:
+ imm |= imm << 16;
+ break;
+ case 10: case 11:
+ imm = (imm << 8) | (imm << 24);
+ break;
+ case 12:
+ imm = (imm << 8) | 0xff;
+ break;
+ case 13:
+ imm = (imm << 16) | 0xffff;
+ break;
+ case 14:
+ if (op) {
+ /*
+ * This and cmode == 15 op == 1 are the only cases where
+ * the top and bottom 32 bits of the encoded constant differ.
+ */
+ uint64_t imm64 = 0;
+ int n;
+
+ for (n = 0; n < 8; n++) {
+ if (imm & (1 << n)) {
+ imm64 |= (0xffULL << (n * 8));
+ }
+ }
+ return imm64;
+ }
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
+ break;
+ case 15:
+ if (op) {
+ /* Reserved encoding for AArch32; valid for AArch64 */
+ uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
+ if (imm & 0x80) {
+ imm64 |= 0x8000000000000000ULL;
+ }
+ if (imm & 0x40) {
+ imm64 |= 0x3fc0000000000000ULL;
+ } else {
+ imm64 |= 0x4000000000000000ULL;
+ }
+ return imm64;
+ }
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
+ break;
+ }
+ if (op) {
+ imm = ~imm;
+ }
+ return dup_const(MO_32, imm);
+}
+
+/* Generate a label used for skipping this instruction */
+void arm_gen_condlabel(DisasContext *s)
+{
+ if (!s->condjmp) {
+ s->condlabel = gen_new_label();
+ s->condjmp = 1;
+ }
+}
+
+/* Flags for the disas_set_da_iss info argument:
+ * lower bits hold the Rt register number, higher bits are flags.
+ */
+typedef enum ISSInfo {
+ ISSNone = 0,
+ ISSRegMask = 0x1f,
+ ISSInvalid = (1 << 5),
+ ISSIsAcqRel = (1 << 6),
+ ISSIsWrite = (1 << 7),
+ ISSIs16Bit = (1 << 8),
+} ISSInfo;
+
+/* Save the syndrome information for a Data Abort */
+static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
+{
+ uint32_t syn;
+ int sas = memop & MO_SIZE;
+ bool sse = memop & MO_SIGN;
+ bool is_acqrel = issinfo & ISSIsAcqRel;
+ bool is_write = issinfo & ISSIsWrite;
+ bool is_16bit = issinfo & ISSIs16Bit;
+ int srt = issinfo & ISSRegMask;
+
+ if (issinfo & ISSInvalid) {
+ /* Some callsites want to conditionally provide ISS info,
+ * eg "only if this was not a writeback"
+ */
+ return;
+ }
+
+ if (srt == 15) {
+ /* For AArch32, insns where the src/dest is R15 never generate
+ * ISS information. Catching that here saves checking at all
+ * the call sites.
+ */
+ return;
+ }
+
+ syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
+ 0, 0, 0, is_write, 0, is_16bit);
+ disas_set_insn_syndrome(s, syn);
+}
+
+static inline int get_a32_user_mem_index(DisasContext *s)
+{
+ /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
+ * insns:
+ * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+ * otherwise, access as if at PL0.
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
+ case ARMMMUIdx_SE3:
+ case ARMMMUIdx_SE10_0:
+ case ARMMMUIdx_SE10_1:
+ case ARMMMUIdx_SE10_1_PAN:
+ return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
+ case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MPriv:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MPrivNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MSPriv:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
+ case ARMMMUIdx_MSUserNegPri:
+ case ARMMMUIdx_MSPrivNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* The architectural value of PC. */
+static uint32_t read_pc(DisasContext *s)
+{
+ return s->pc_curr + (s->thumb ? 4 : 8);
+}
+
+/* Set a variable to the value of a CPU register. */
+void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
+{
+ if (reg == 15) {
+ tcg_gen_movi_i32(var, read_pc(s));
+ } else {
+ tcg_gen_mov_i32(var, cpu_R[reg]);
+ }
+}
+
+/*
+ * Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
+ * This is used for load/store for which use of PC implies (literal),
+ * or ADD that implies ADR.
+ */
+TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ if (reg == 15) {
+ tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
+ } else {
+ tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
+ }
+ return tmp;
+}
+
+/* Set a CPU register. The source must be a temporary and will be
+ marked as dead. */
+void store_reg(DisasContext *s, int reg, TCGv_i32 var)
+{
+ if (reg == 15) {
+ /* In Thumb mode, we must ignore bit 0.
+ * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
+ * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
+ * We choose to ignore [1:0] in ARM mode for all architecture versions.
+ */
+ tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
+ s->base.is_jmp = DISAS_JUMP;
+ } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* For M-profile SP bits [1:0] are always zero */
+ tcg_gen_andi_i32(var, var, ~3);
+ }
+ tcg_gen_mov_i32(cpu_R[reg], var);
+ tcg_temp_free_i32(var);
+}
+
+/*
+ * Variant of store_reg which applies v8M stack-limit checks before updating
+ * SP. If the check fails this will result in an exception being taken.
+ * We disable the stack checks for CONFIG_USER_ONLY because we have
+ * no idea what the stack limits should be in that case.
+ * If stack checking is not being done this just acts like store_reg().
+ */
+static void store_sp_checked(DisasContext *s, TCGv_i32 var)
+{
+#ifndef CONFIG_USER_ONLY
+ if (s->v8m_stackcheck) {
+ gen_helper_v8m_stackcheck(cpu_env, var);
+ }
+#endif
+ store_reg(s, 13, var);
+}
+
+/* Value extensions. */
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
+#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
+#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
+
+#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
+#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
+
+void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
+{
+ TCGv_i32 tmp_mask = tcg_const_i32(mask);
+ gen_helper_cpsr_write(cpu_env, var, tmp_mask);
+ tcg_temp_free_i32(tmp_mask);
+}
+
+static void gen_exception_internal(int excp)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_singlestep_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_swstep_exception(s, 1, s->is_ldex);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+void clear_eci_state(DisasContext *s)
+{
+ /*
+ * Clear any ECI/ICI state: used when a load multiple/store
+ * multiple insn executes.
+ */
+ if (s->eci) {
+ store_cpu_field_constant(0, condexec_bits);
+ s->eci = 0;
+ }
+}
+
+static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 tmp1 = tcg_temp_new_i32();
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(tmp1, a);
+ tcg_gen_ext16s_i32(tmp2, b);
+ tcg_gen_mul_i32(tmp1, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_sari_i32(a, a, 16);
+ tcg_gen_sari_i32(b, b, 16);
+ tcg_gen_mul_i32(b, b, a);
+ tcg_gen_mov_i32(a, tmp1);
+ tcg_temp_free_i32(tmp1);
+}
+
+/* Byteswap each halfword. */
+void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
+ tcg_gen_shri_i32(tmp, var, 8);
+ tcg_gen_and_i32(tmp, tmp, mask);
+ tcg_gen_and_i32(var, var, mask);
+ tcg_gen_shli_i32(var, var, 8);
+ tcg_gen_or_i32(dest, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+/* Byteswap low halfword and sign extend. */
+static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
+{
+ tcg_gen_bswap16_i32(var, var, TCG_BSWAP_OS);
+}
+
+/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
+ tmp = (t0 ^ t1) & 0x8000;
+ t0 &= ~0x8000;
+ t1 &= ~0x8000;
+ t0 = (t0 + t1) ^ tmp;
+ */
+
+static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andi_i32(tmp, tmp, 0x8000);
+ tcg_gen_andi_i32(t0, t0, ~0x8000);
+ tcg_gen_andi_i32(t1, t1, ~0x8000);
+ tcg_gen_add_i32(t0, t0, t1);
+ tcg_gen_xor_i32(dest, t0, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+/* Set N and Z flags from var. */
+static inline void gen_logic_CC(TCGv_i32 var)
+{
+ tcg_gen_mov_i32(cpu_NF, var);
+ tcg_gen_mov_i32(cpu_ZF, var);
+}
+
+/* dest = T0 + T1 + CF. */
+static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ tcg_gen_add_i32(dest, t0, t1);
+ tcg_gen_add_i32(dest, dest, cpu_CF);
+}
+
+/* dest = T0 - T1 + CF - 1. */
+static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ tcg_gen_sub_i32(dest, t0, t1);
+ tcg_gen_add_i32(dest, dest, cpu_CF);
+ tcg_gen_subi_i32(dest, dest, 1);
+}
+
+/* dest = T0 + T1. Compute C, N, V and Z flags */
+static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
+/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
+static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ if (TCG_TARGET_HAS_add2_i32) {
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
+ } else {
+ TCGv_i64 q0 = tcg_temp_new_i64();
+ TCGv_i64 q1 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(q0, t0);
+ tcg_gen_extu_i32_i64(q1, t1);
+ tcg_gen_add_i64(q0, q0, q1);
+ tcg_gen_extu_i32_i64(q1, cpu_CF);
+ tcg_gen_add_i64(q0, q0, q1);
+ tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
+ tcg_temp_free_i64(q0);
+ tcg_temp_free_i64(q1);
+ }
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
+/* dest = T0 - T1. Compute C, N, V and Z flags */
+static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp;
+ tcg_gen_sub_i32(cpu_NF, t0, t1);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
+/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
+static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_not_i32(tmp, t1);
+ gen_adc_CC(dest, t0, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+#define GEN_SHIFT(name) \
+static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
+{ \
+ TCGv_i32 tmp1, tmp2, tmp3; \
+ tmp1 = tcg_temp_new_i32(); \
+ tcg_gen_andi_i32(tmp1, t1, 0xff); \
+ tmp2 = tcg_const_i32(0); \
+ tmp3 = tcg_const_i32(0x1f); \
+ tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
+ tcg_temp_free_i32(tmp3); \
+ tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
+ tcg_gen_##name##_i32(dest, tmp2, tmp1); \
+ tcg_temp_free_i32(tmp2); \
+ tcg_temp_free_i32(tmp1); \
+}
+GEN_SHIFT(shl)
+GEN_SHIFT(shr)
+#undef GEN_SHIFT
+
+static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp1, tmp2;
+ tmp1 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp1, t1, 0xff);
+ tmp2 = tcg_const_i32(0x1f);
+ tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_sar_i32(dest, t0, tmp1);
+ tcg_temp_free_i32(tmp1);
+}
+
+static void shifter_out_im(TCGv_i32 var, int shift)
+{
+ tcg_gen_extract_i32(cpu_CF, var, shift, 1);
+}
+
+/* Shift by immediate. Includes special handling for shift == 0. */
+static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
+ int shift, int flags)
+{
+ switch (shiftop) {
+ case 0: /* LSL */
+ if (shift != 0) {
+ if (flags)
+ shifter_out_im(var, 32 - shift);
+ tcg_gen_shli_i32(var, var, shift);
+ }
+ break;
+ case 1: /* LSR */
+ if (shift == 0) {
+ if (flags) {
+ tcg_gen_shri_i32(cpu_CF, var, 31);
+ }
+ tcg_gen_movi_i32(var, 0);
+ } else {
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ tcg_gen_shri_i32(var, var, shift);
+ }
+ break;
+ case 2: /* ASR */
+ if (shift == 0)
+ shift = 32;
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ if (shift == 32)
+ shift = 31;
+ tcg_gen_sari_i32(var, var, shift);
+ break;
+ case 3: /* ROR/RRX */
+ if (shift != 0) {
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ tcg_gen_rotri_i32(var, var, shift); break;
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_CF, 31);
+ if (flags)
+ shifter_out_im(var, 0);
+ tcg_gen_shri_i32(var, var, 1);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ }
+};
+
+static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
+ TCGv_i32 shift, int flags)
+{
+ if (flags) {
+ switch (shiftop) {
+ case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
+ case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
+ }
+ } else {
+ switch (shiftop) {
+ case 0:
+ gen_shl(var, var, shift);
+ break;
+ case 1:
+ gen_shr(var, var, shift);
+ break;
+ case 2:
+ gen_sar(var, var, shift);
+ break;
+ case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
+ tcg_gen_rotr_i32(var, var, shift); break;
+ }
+ }
+ tcg_temp_free_i32(shift);
+}
+
+/*
+ * Generate a conditional based on ARM condition code cc.
+ * This is common between ARM and Aarch64 targets.
+ */
+void arm_test_cc(DisasCompare *cmp, int cc)
+{
+ TCGv_i32 value;
+ TCGCond cond;
+ bool global = true;
+
+ switch (cc) {
+ case 0: /* eq: Z */
+ case 1: /* ne: !Z */
+ cond = TCG_COND_EQ;
+ value = cpu_ZF;
+ break;
+
+ case 2: /* cs: C */
+ case 3: /* cc: !C */
+ cond = TCG_COND_NE;
+ value = cpu_CF;
+ break;
+
+ case 4: /* mi: N */
+ case 5: /* pl: !N */
+ cond = TCG_COND_LT;
+ value = cpu_NF;
+ break;
+
+ case 6: /* vs: V */
+ case 7: /* vc: !V */
+ cond = TCG_COND_LT;
+ value = cpu_VF;
+ break;
+
+ case 8: /* hi: C && !Z */
+ case 9: /* ls: !C || Z -> !(C && !Z) */
+ cond = TCG_COND_NE;
+ value = tcg_temp_new_i32();
+ global = false;
+ /* CF is 1 for C, so -CF is an all-bits-set mask for C;
+ ZF is non-zero for !Z; so AND the two subexpressions. */
+ tcg_gen_neg_i32(value, cpu_CF);
+ tcg_gen_and_i32(value, value, cpu_ZF);
+ break;
+
+ case 10: /* ge: N == V -> N ^ V == 0 */
+ case 11: /* lt: N != V -> N ^ V != 0 */
+ /* Since we're only interested in the sign bit, == 0 is >= 0. */
+ cond = TCG_COND_GE;
+ value = tcg_temp_new_i32();
+ global = false;
+ tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
+ break;
+
+ case 12: /* gt: !Z && N == V */
+ case 13: /* le: Z || N != V */
+ cond = TCG_COND_NE;
+ value = tcg_temp_new_i32();
+ global = false;
+ /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
+ * the sign bit then AND with ZF to yield the result. */
+ tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
+ tcg_gen_sari_i32(value, value, 31);
+ tcg_gen_andc_i32(value, cpu_ZF, value);
+ break;
+
+ case 14: /* always */
+ case 15: /* always */
+ /* Use the ALWAYS condition, which will fold early.
+ * It doesn't matter what we use for the value. */
+ cond = TCG_COND_ALWAYS;
+ value = cpu_ZF;
+ goto no_invert;
+
+ default:
+ fprintf(stderr, "Bad condition code 0x%x\n", cc);
+ abort();
+ }
+
+ if (cc & 1) {
+ cond = tcg_invert_cond(cond);
+ }
+
+ no_invert:
+ cmp->cond = cond;
+ cmp->value = value;
+ cmp->value_global = global;
+}
+
+void arm_free_cc(DisasCompare *cmp)
+{
+ if (!cmp->value_global) {
+ tcg_temp_free_i32(cmp->value);
+ }
+}
+
+void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
+{
+ tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
+}
+
+void arm_gen_test_cc(int cc, TCGLabel *label)
+{
+ DisasCompare cmp;
+ arm_test_cc(&cmp, cc);
+ arm_jump_cc(&cmp, label);
+ arm_free_cc(&cmp);
+}
+
+void gen_set_condexec(DisasContext *s)
+{
+ if (s->condexec_mask) {
+ uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
+
+ store_cpu_field_constant(val, condexec_bits);
+ }
+}
+
+void gen_set_pc_im(DisasContext *s, target_ulong val)
+{
+ tcg_gen_movi_i32(cpu_R[15], val);
+}
+
+/* Set PC and Thumb state from var. var is marked as dead. */
+static inline void gen_bx(DisasContext *s, TCGv_i32 var)
+{
+ s->base.is_jmp = DISAS_JUMP;
+ tcg_gen_andi_i32(cpu_R[15], var, ~1);
+ tcg_gen_andi_i32(var, var, 1);
+ store_cpu_field(var, thumb);
+}
+
+/*
+ * Set PC and Thumb state from var. var is marked as dead.
+ * For M-profile CPUs, include logic to detect exception-return
+ * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
+ * and BX reg, and no others, and happens only for code in Handler mode.
+ * The Security Extension also requires us to check for the FNC_RETURN
+ * which signals a function return from non-secure state; this can happen
+ * in both Handler and Thread mode.
+ * To avoid having to do multiple comparisons in inline generated code,
+ * we make the check we do here loose, so it will match for EXC_RETURN
+ * in Thread mode. For system emulation do_v7m_exception_exit() checks
+ * for these spurious cases and returns without doing anything (giving
+ * the same behaviour as for a branch to a non-magic address).
+ *
+ * In linux-user mode it is unclear what the right behaviour for an
+ * attempted FNC_RETURN should be, because in real hardware this will go
+ * directly to Secure code (ie not the Linux kernel) which will then treat
+ * the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
+ * attempt behave the way it would on a CPU without the security extension,
+ * which is to say "like a normal branch". That means we can simply treat
+ * all branches as normal with no magic address behaviour.
+ */
+static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
+{
+ /* Generate the same code here as for a simple bx, but flag via
+ * s->base.is_jmp that we need to do the rest of the work later.
+ */
+ gen_bx(s, var);
+#ifndef CONFIG_USER_ONLY
+ if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
+ (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
+ s->base.is_jmp = DISAS_BX_EXCRET;
+ }
+#endif
+}
+
+static inline void gen_bx_excret_final_code(DisasContext *s)
+{
+ /* Generate the code to finish possible exception return and end the TB */
+ TCGLabel *excret_label = gen_new_label();
+ uint32_t min_magic;
+
+ if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
+ /* Covers FNC_RETURN and EXC_RETURN magic */
+ min_magic = FNC_RETURN_MIN_MAGIC;
+ } else {
+ /* EXC_RETURN magic only */
+ min_magic = EXC_RETURN_MIN_MAGIC;
+ }
+
+ /* Is the new PC value in the magic range indicating exception return? */
+ tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
+ /* No: end the TB as we would for a DISAS_JMP */
+ if (s->ss_active) {
+ gen_singlestep_exception(s);
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ gen_set_label(excret_label);
+ /* Yes: this is an exception return.
+ * At this point in runtime env->regs[15] and env->thumb will hold
+ * the exception-return magic number, which do_v7m_exception_exit()
+ * will read. Nothing else will be able to see those values because
+ * the cpu-exec main loop guarantees that we will always go straight
+ * from raising the exception to the exception-handling code.
+ *
+ * gen_ss_advance(s) does nothing on M profile currently but
+ * calling it is conceptually the right thing as we have executed
+ * this instruction (compare SWI, HVC, SMC handling).
+ */
+ gen_ss_advance(s);
+ gen_exception_internal(EXCP_EXCEPTION_EXIT);
+}
+
+static inline void gen_bxns(DisasContext *s, int rm)
+{
+ TCGv_i32 var = load_reg(s, rm);
+
+ /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
+ * we need to sync state before calling it, but:
+ * - we don't need to do gen_set_pc_im() because the bxns helper will
+ * always set the PC itself
+ * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
+ * unless it's outside an IT block or the last insn in an IT block,
+ * so we know that condexec == 0 (already set at the top of the TB)
+ * is correct in the non-UNPREDICTABLE cases, and we can choose
+ * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
+ */
+ gen_helper_v7m_bxns(cpu_env, var);
+ tcg_temp_free_i32(var);
+ s->base.is_jmp = DISAS_EXIT;
+}
+
+static inline void gen_blxns(DisasContext *s, int rm)
+{
+ TCGv_i32 var = load_reg(s, rm);
+
+ /* We don't need to sync condexec state, for the same reason as bxns.
+ * We do however need to set the PC, because the blxns helper reads it.
+ * The blxns helper may throw an exception.
+ */
+ gen_set_pc_im(s, s->base.pc_next);
+ gen_helper_v7m_blxns(cpu_env, var);
+ tcg_temp_free_i32(var);
+ s->base.is_jmp = DISAS_EXIT;
+}
+
+/* Variant of store_reg which uses branch&exchange logic when storing
+ to r15 in ARM architecture v7 and above. The source must be a temporary
+ and will be marked as dead. */
+static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
+{
+ if (reg == 15 && ENABLE_ARCH_7) {
+ gen_bx(s, var);
+ } else {
+ store_reg(s, reg, var);
+ }
+}
+
+/* Variant of store_reg which uses branch&exchange logic when storing
+ * to r15 in ARM architecture v5T and above. This is used for storing
+ * the results of a LDR/LDM/POP into r15, and corresponds to the cases
+ * in the ARM ARM which use the LoadWritePC() pseudocode function. */
+static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
+{
+ if (reg == 15 && ENABLE_ARCH_5) {
+ gen_bx_excret(s, var);
+ } else {
+ store_reg(s, reg, var);
+ }
+}
+
+#ifdef CONFIG_USER_ONLY
+#define IS_USER_ONLY 1
+#else
+#define IS_USER_ONLY 0
+#endif
+
+MemOp pow2_align(unsigned i)
+{
+ static const MemOp mop_align[] = {
+ 0, MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16,
+ /*
+ * FIXME: TARGET_PAGE_BITS_MIN affects TLB_FLAGS_MASK such
+ * that 256-bit alignment (MO_ALIGN_32) cannot be supported:
+ * see get_alignment_bits(). Enforce only 128-bit alignment for now.
+ */
+ MO_ALIGN_16
+ };
+ g_assert(i < ARRAY_SIZE(mop_align));
+ return mop_align[i];
+}
+
+/*
+ * Abstractions of "generate code to do a guest load/store for
+ * AArch32", where a vaddr is always 32 bits (and is zero
+ * extended if we're a 64 bit core) and data is also
+ * 32 bits unless specifically doing a 64 bit access.
+ * These functions work like tcg_gen_qemu_{ld,st}* except
+ * that the address argument is TCGv_i32 rather than TCGv.
+ */
+
+static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
+{
+ TCGv addr = tcg_temp_new();
+ tcg_gen_extu_i32_tl(addr, a32);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
+ tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
+ }
+ return addr;
+}
+
+/*
+ * Internal routines are used for NEON cases where the endianness
+ * and/or alignment has already been taken into account and manipulated.
+ */
+void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
+
+void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
+
+void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
+ tcg_temp_free(addr);
+}
+
+void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
+ tcg_temp_free_i64(tmp);
+ } else {
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
+ }
+ tcg_temp_free(addr);
+}
+
+void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, MemOp opc)
+{
+ gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
+}
+
+void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, MemOp opc)
+{
+ gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
+}
+
+void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc)
+{
+ gen_aa32_ld_internal_i64(s, val, a32, index, finalize_memop(s, opc));
+}
+
+void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc)
+{
+ gen_aa32_st_internal_i64(s, val, a32, index, finalize_memop(s, opc));
+}
+
+#define DO_GEN_LD(SUFF, OPC) \
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
+ }
+
+#define DO_GEN_ST(SUFF, OPC) \
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
+ }
+
+static inline void gen_hvc(DisasContext *s, int imm16)
+{
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration (ie before
+ * the insn really executes).
+ */
+ gen_set_pc_im(s, s->pc_curr);
+ gen_helper_pre_hvc(cpu_env);
+ /* Otherwise we will treat this as a real exception which
+ * happens after execution of the insn. (The distinction matters
+ * for the PC value reported to the exception handler and also
+ * for single stepping.)
+ */
+ s->svc_imm = imm16;
+ gen_set_pc_im(s, s->base.pc_next);
+ s->base.is_jmp = DISAS_HVC;
+}
+
+static inline void gen_smc(DisasContext *s)
+{
+ /* As with HVC, we may take an exception either before or after
+ * the insn executes.
+ */
+ TCGv_i32 tmp;
+
+ gen_set_pc_im(s, s->pc_curr);
+ tmp = tcg_const_i32(syn_aa32_smc());
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_set_pc_im(s, s->base.pc_next);
+ s->base.is_jmp = DISAS_SMC;
+}
+
+static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, pc);
+ gen_exception_internal(excp);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
+ uint32_t syn, uint32_t target_el)
+{
+ if (s->aarch64) {
+ gen_a64_set_pc_im(pc);
+ } else {
+ gen_set_condexec(s);
+ gen_set_pc_im(s, pc);
+ }
+ gen_exception(excp, syn, target_el);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
+{
+ TCGv_i32 tcg_syn;
+
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ tcg_syn = tcg_const_i32(syn);
+ gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
+ tcg_temp_free_i32(tcg_syn);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+void unallocated_encoding(DisasContext *s)
+{
+ /* Unallocated and reserved encodings are uncategorized */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
+static void gen_exception_el(DisasContext *s, int excp, uint32_t syn,
+ TCGv_i32 tcg_el)
+{
+ TCGv_i32 tcg_excp;
+ TCGv_i32 tcg_syn;
+
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ tcg_excp = tcg_const_i32(excp);
+ tcg_syn = tcg_const_i32(syn);
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn, tcg_el);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+/* Force a TB lookup after an instruction that changes the CPU state. */
+void gen_lookup_tb(DisasContext *s)
+{
+ tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
+ s->base.is_jmp = DISAS_EXIT;
+}
+
+static inline void gen_hlt(DisasContext *s, int imm)
+{
+ /* HLT. This has two purposes.
+ * Architecturally, it is an external halting debug instruction.
+ * Since QEMU doesn't implement external debug, we treat this as
+ * it is required for halting debug disabled: it will UNDEF.
+ * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
+ * and "HLT 0xF000" is an A32 semihosting syscall. These traps
+ * must trigger semihosting even for ARMv7 and earlier, where
+ * HLT was an undefined encoding.
+ * In system mode, we don't allow userspace access to
+ * semihosting, to provide some semblance of security
+ * (and for consistency with our 32-bit semihosting).
+ */
+ if (semihosting_enabled() &&
+#ifndef CONFIG_USER_ONLY
+ s->current_el != 0 &&
+#endif
+ (imm == (s->thumb ? 0x3c : 0xf000))) {
+ gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
+ return;
+ }
+
+ unallocated_encoding(s);
+}
+
+/*
+ * Return the offset of a "full" NEON Dreg.
+ */
+long neon_full_reg_offset(unsigned reg)
+{
+ return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
+}
+
+/*
+ * Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
+ * where 0 is the least significant end of the register.
+ */
+long neon_element_offset(int reg, int element, MemOp memop)
+{
+ int element_size = 1 << (memop & MO_SIZE);
+ int ofs = element * element_size;
+#ifdef HOST_WORDS_BIGENDIAN
+ /*
+ * Calculate the offset assuming fully little-endian,
+ * then XOR to account for the order of the 8-byte units.
+ */
+ if (element_size < 8) {
+ ofs ^= 8 - element_size;
+ }
+#endif
+ return neon_full_reg_offset(reg) + ofs;
+}
+
+/* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
+long vfp_reg_offset(bool dp, unsigned reg)
+{
+ if (dp) {
+ return neon_element_offset(reg, 0, MO_64);
+ } else {
+ return neon_element_offset(reg >> 1, reg & 1, MO_32);
+ }
+}
+
+void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
+{
+ long off = neon_element_offset(reg, ele, memop);
+
+ switch (memop) {
+ case MO_SB:
+ tcg_gen_ld8s_i32(dest, cpu_env, off);
+ break;
+ case MO_UB:
+ tcg_gen_ld8u_i32(dest, cpu_env, off);
+ break;
+ case MO_SW:
+ tcg_gen_ld16s_i32(dest, cpu_env, off);
+ break;
+ case MO_UW:
+ tcg_gen_ld16u_i32(dest, cpu_env, off);
+ break;
+ case MO_UL:
+ case MO_SL:
+ tcg_gen_ld_i32(dest, cpu_env, off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
+{
+ long off = neon_element_offset(reg, ele, memop);
+
+ switch (memop) {
+ case MO_SL:
+ tcg_gen_ld32s_i64(dest, cpu_env, off);
+ break;
+ case MO_UL:
+ tcg_gen_ld32u_i64(dest, cpu_env, off);
+ break;
+ case MO_Q:
+ tcg_gen_ld_i64(dest, cpu_env, off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
+{
+ long off = neon_element_offset(reg, ele, memop);
+
+ switch (memop) {
+ case MO_8:
+ tcg_gen_st8_i32(src, cpu_env, off);
+ break;
+ case MO_16:
+ tcg_gen_st16_i32(src, cpu_env, off);
+ break;
+ case MO_32:
+ tcg_gen_st_i32(src, cpu_env, off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
+{
+ long off = neon_element_offset(reg, ele, memop);
+
+ switch (memop) {
+ case MO_32:
+ tcg_gen_st32_i64(src, cpu_env, off);
+ break;
+ case MO_64:
+ tcg_gen_st_i64(src, cpu_env, off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#define ARM_CP_RW_BIT (1 << 20)
+
+static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
+{
+ tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
+}
+
+static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
+{
+ tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
+}
+
+static inline TCGv_i32 iwmmxt_load_creg(int reg)
+{
+ TCGv_i32 var = tcg_temp_new_i32();
+ tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
+ return var;
+}
+
+static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
+{
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
+ tcg_temp_free_i32(var);
+}
+
+static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
+{
+ iwmmxt_store_reg(cpu_M0, rn);
+}
+
+static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_M0, rn);
+}
+
+static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+#define IWMMXT_OP(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV_SIZE(name) \
+IWMMXT_OP_ENV(name##b) \
+IWMMXT_OP_ENV(name##w) \
+IWMMXT_OP_ENV(name##l)
+
+#define IWMMXT_OP_ENV1(name) \
+static inline void gen_op_iwmmxt_##name##_M0(void) \
+{ \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
+}
+
+IWMMXT_OP(maddsq)
+IWMMXT_OP(madduq)
+IWMMXT_OP(sadb)
+IWMMXT_OP(sadw)
+IWMMXT_OP(mulslw)
+IWMMXT_OP(mulshw)
+IWMMXT_OP(mululw)
+IWMMXT_OP(muluhw)
+IWMMXT_OP(macsw)
+IWMMXT_OP(macuw)
+
+IWMMXT_OP_ENV_SIZE(unpackl)
+IWMMXT_OP_ENV_SIZE(unpackh)
+
+IWMMXT_OP_ENV1(unpacklub)
+IWMMXT_OP_ENV1(unpackluw)
+IWMMXT_OP_ENV1(unpacklul)
+IWMMXT_OP_ENV1(unpackhub)
+IWMMXT_OP_ENV1(unpackhuw)
+IWMMXT_OP_ENV1(unpackhul)
+IWMMXT_OP_ENV1(unpacklsb)
+IWMMXT_OP_ENV1(unpacklsw)
+IWMMXT_OP_ENV1(unpacklsl)
+IWMMXT_OP_ENV1(unpackhsb)
+IWMMXT_OP_ENV1(unpackhsw)
+IWMMXT_OP_ENV1(unpackhsl)
+
+IWMMXT_OP_ENV_SIZE(cmpeq)
+IWMMXT_OP_ENV_SIZE(cmpgtu)
+IWMMXT_OP_ENV_SIZE(cmpgts)
+
+IWMMXT_OP_ENV_SIZE(mins)
+IWMMXT_OP_ENV_SIZE(minu)
+IWMMXT_OP_ENV_SIZE(maxs)
+IWMMXT_OP_ENV_SIZE(maxu)
+
+IWMMXT_OP_ENV_SIZE(subn)
+IWMMXT_OP_ENV_SIZE(addn)
+IWMMXT_OP_ENV_SIZE(subu)
+IWMMXT_OP_ENV_SIZE(addu)
+IWMMXT_OP_ENV_SIZE(subs)
+IWMMXT_OP_ENV_SIZE(adds)
+
+IWMMXT_OP_ENV(avgb0)
+IWMMXT_OP_ENV(avgb1)
+IWMMXT_OP_ENV(avgw0)
+IWMMXT_OP_ENV(avgw1)
+
+IWMMXT_OP_ENV(packuw)
+IWMMXT_OP_ENV(packul)
+IWMMXT_OP_ENV(packuq)
+IWMMXT_OP_ENV(packsw)
+IWMMXT_OP_ENV(packsl)
+IWMMXT_OP_ENV(packsq)
+
+static void gen_op_iwmmxt_set_mup(void)
+{
+ TCGv_i32 tmp;
+ tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
+ tcg_gen_ori_i32(tmp, tmp, 2);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
+}
+
+static void gen_op_iwmmxt_set_cup(void)
+{
+ TCGv_i32 tmp;
+ tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
+ tcg_gen_ori_i32(tmp, tmp, 1);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
+}
+
+static void gen_op_iwmmxt_setpsr_nz(void)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
+}
+
+static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
+ tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
+ TCGv_i32 dest)
+{
+ int rd;
+ uint32_t offset;
+ TCGv_i32 tmp;
+
+ rd = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rd);
+
+ offset = (insn & 0xff) << ((insn >> 7) & 2);
+ if (insn & (1 << 24)) {
+ /* Pre indexed */
+ if (insn & (1 << 23))
+ tcg_gen_addi_i32(tmp, tmp, offset);
+ else
+ tcg_gen_addi_i32(tmp, tmp, -offset);
+ tcg_gen_mov_i32(dest, tmp);
+ if (insn & (1 << 21))
+ store_reg(s, rd, tmp);
+ else
+ tcg_temp_free_i32(tmp);
+ } else if (insn & (1 << 21)) {
+ /* Post indexed */
+ tcg_gen_mov_i32(dest, tmp);
+ if (insn & (1 << 23))
+ tcg_gen_addi_i32(tmp, tmp, offset);
+ else
+ tcg_gen_addi_i32(tmp, tmp, -offset);
+ store_reg(s, rd, tmp);
+ } else if (!(insn & (1 << 23)))
+ return 1;
+ return 0;
+}
+
+static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
+{
+ int rd = (insn >> 0) & 0xf;
+ TCGv_i32 tmp;
+
+ if (insn & (1 << 8)) {
+ if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
+ return 1;
+ } else {
+ tmp = iwmmxt_load_creg(rd);
+ }
+ } else {
+ tmp = tcg_temp_new_i32();
+ iwmmxt_load_reg(cpu_V0, rd);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
+ }
+ tcg_gen_andi_i32(tmp, tmp, mask);
+ tcg_gen_mov_i32(dest, tmp);
+ tcg_temp_free_i32(tmp);
+ return 0;
+}
+
+/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
+ (ie. an undefined instruction). */
+static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
+{
+ int rd, wrd;
+ int rdhi, rdlo, rd0, rd1, i;
+ TCGv_i32 addr;
+ TCGv_i32 tmp, tmp2, tmp3;
+
+ if ((insn & 0x0e000e00) == 0x0c000000) {
+ if ((insn & 0x0fe00ff0) == 0x0c400000) {
+ wrd = insn & 0xf;
+ rdlo = (insn >> 12) & 0xf;
+ rdhi = (insn >> 16) & 0xf;
+ if (insn & ARM_CP_RW_BIT) { /* TMRRC */
+ iwmmxt_load_reg(cpu_V0, wrd);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
+ } else { /* TMCRR */
+ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
+ iwmmxt_store_reg(cpu_V0, wrd);
+ gen_op_iwmmxt_set_mup();
+ }
+ return 0;
+ }
+
+ wrd = (insn >> 12) & 0xf;
+ addr = tcg_temp_new_i32();
+ if (gen_iwmmxt_address(s, insn, addr)) {
+ tcg_temp_free_i32(addr);
+ return 1;
+ }
+ if (insn & ARM_CP_RW_BIT) {
+ if ((insn >> 28) == 0xf) { /* WLDRW wCx */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ iwmmxt_store_creg(wrd, tmp);
+ } else {
+ i = 1;
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 22)) { /* WLDRD */
+ gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
+ i = 0;
+ } else { /* WLDRW wRd */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ }
+ } else {
+ tmp = tcg_temp_new_i32();
+ if (insn & (1 << 22)) { /* WLDRH */
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ } else { /* WLDRB */
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ }
+ }
+ if (i) {
+ tcg_gen_extu_i32_i64(cpu_M0, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ }
+ } else {
+ if ((insn >> 28) == 0xf) { /* WSTRW wCx */
+ tmp = iwmmxt_load_creg(wrd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ } else {
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = tcg_temp_new_i32();
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 22)) { /* WSTRD */
+ gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
+ } else { /* WSTRW wRd */
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ }
+ } else {
+ if (insn & (1 << 22)) { /* WSTRH */
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ } else { /* WSTRB */
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ }
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ return 0;
+ }
+
+ if ((insn & 0x0f000000) != 0x0e000000)
+ return 1;
+
+ switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
+ case 0x000: /* WOR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_orq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x011: /* TMCR */
+ if (insn & 0xf)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ switch (wrd) {
+ case ARM_IWMMXT_wCID:
+ case ARM_IWMMXT_wCASF:
+ break;
+ case ARM_IWMMXT_wCon:
+ gen_op_iwmmxt_set_cup();
+ /* Fall through. */
+ case ARM_IWMMXT_wCSSF:
+ tmp = iwmmxt_load_creg(wrd);
+ tmp2 = load_reg(s, rd);
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ iwmmxt_store_creg(wrd, tmp);
+ break;
+ case ARM_IWMMXT_wCGR0:
+ case ARM_IWMMXT_wCGR1:
+ case ARM_IWMMXT_wCGR2:
+ case ARM_IWMMXT_wCGR3:
+ gen_op_iwmmxt_set_cup();
+ tmp = load_reg(s, rd);
+ iwmmxt_store_creg(wrd, tmp);
+ break;
+ default:
+ return 1;
+ }
+ break;
+ case 0x100: /* WXOR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_xorq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x111: /* TMRC */
+ if (insn & 0xf)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ tmp = iwmmxt_load_creg(wrd);
+ store_reg(s, rd, tmp);
+ break;
+ case 0x300: /* WANDN */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tcg_gen_neg_i64(cpu_M0, cpu_M0);
+ gen_op_iwmmxt_andq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x200: /* WAND */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_andq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x810: case 0xa10: /* WMADD */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maddsq_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_madduq_M0_wRn(rd1);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_unpackll_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 22))
+ gen_op_iwmmxt_sadw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_sadb_M0_wRn(rd1);
+ if (!(insn & (1 << 20)))
+ gen_op_iwmmxt_addl_M0_wRn(wrd);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21)) {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_mulshw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_mulslw_M0_wRn(rd1);
+ } else {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_muluhw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_mululw_M0_wRn(rd1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_macsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_macuw_M0_wRn(rd1);
+ if (!(insn & (1 << 20))) {
+ iwmmxt_load_reg(cpu_V1, wrd);
+ tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 22)) {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_avgw1_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_avgw0_M0_wRn(rd1);
+ } else {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_avgb1_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_avgb0_M0_wRn(rd1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
+ tcg_gen_andi_i32(tmp, tmp, 7);
+ iwmmxt_load_reg(cpu_V1, rd1);
+ gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
+ if (((insn >> 6) & 3) == 3)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rd);
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ switch ((insn >> 6) & 3) {
+ case 0:
+ tmp2 = tcg_const_i32(0xff);
+ tmp3 = tcg_const_i32((insn & 7) << 3);
+ break;
+ case 1:
+ tmp2 = tcg_const_i32(0xffff);
+ tmp3 = tcg_const_i32((insn & 3) << 4);
+ break;
+ case 2:
+ tmp2 = tcg_const_i32(0xffffffff);
+ tmp3 = tcg_const_i32((insn & 1) << 5);
+ break;
+ default:
+ tmp2 = NULL;
+ tmp3 = NULL;
+ }
+ gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ if (rd == 15 || ((insn >> 22) & 3) == 3)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = tcg_temp_new_i32();
+ switch ((insn >> 22) & 3) {
+ case 0:
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ if (insn & 8) {
+ tcg_gen_ext8s_i32(tmp, tmp);
+ } else {
+ tcg_gen_andi_i32(tmp, tmp, 0xff);
+ }
+ break;
+ case 1:
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ if (insn & 8) {
+ tcg_gen_ext16s_i32(tmp, tmp);
+ } else {
+ tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ }
+ break;
+ case 2:
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
+ if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
+ return 1;
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
+ break;
+ case 1:
+ tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
+ break;
+ case 2:
+ tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
+ break;
+ }
+ tcg_gen_shli_i32(tmp, tmp, 28);
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
+ if (((insn >> 6) & 3) == 3)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rd);
+ switch ((insn >> 6) & 3) {
+ case 0:
+ gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
+ break;
+ case 1:
+ gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
+ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
+ return 1;
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp2, tmp);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ for (i = 0; i < 7; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 4);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 1:
+ for (i = 0; i < 3; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 8);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 2:
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ break;
+ }
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
+ break;
+ case 1:
+ gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
+ break;
+ case 2:
+ gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
+ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
+ return 1;
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp2, tmp);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ for (i = 0; i < 7; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 4);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 1:
+ for (i = 0; i < 3; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 8);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 2:
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ break;
+ }
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
+ rd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_helper_iwmmxt_msbb(tmp, cpu_M0);
+ break;
+ case 1:
+ gen_helper_iwmmxt_msbw(tmp, cpu_M0);
+ break;
+ case 2:
+ gen_helper_iwmmxt_msbl(tmp, cpu_M0);
+ break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
+ case 0x906: case 0xb06: case 0xd06: case 0xf06:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
+ case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsb_M0();
+ else
+ gen_op_iwmmxt_unpacklub_M0();
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsw_M0();
+ else
+ gen_op_iwmmxt_unpackluw_M0();
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsl_M0();
+ else
+ gen_op_iwmmxt_unpacklul_M0();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
+ case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsb_M0();
+ else
+ gen_op_iwmmxt_unpackhub_M0();
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsw_M0();
+ else
+ gen_op_iwmmxt_unpackhuw_M0();
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsl_M0();
+ else
+ gen_op_iwmmxt_unpackhul_M0();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
+ case 0x214: case 0x614: case 0xa14: case 0xe14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ switch ((insn >> 22) & 3) {
+ case 1:
+ gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
+ case 0x014: case 0x414: case 0x814: case 0xc14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ switch ((insn >> 22) & 3) {
+ case 1:
+ gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
+ case 0x114: case 0x514: case 0x914: case 0xd14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ switch ((insn >> 22) & 3) {
+ case 1:
+ gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
+ case 0x314: case 0x714: case 0xb14: case 0xf14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ switch ((insn >> 22) & 3) {
+ case 1:
+ if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
+ case 0x916: case 0xb16: case 0xd16: case 0xf16:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
+ case 0x816: case 0xa16: case 0xc16: case 0xe16:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
+ case 0x402: case 0x502: case 0x602: case 0x702:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_const_i32((insn >> 20) & 3);
+ iwmmxt_load_reg(cpu_V1, rd1);
+ gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
+ case 0x41a: case 0x51a: case 0x61a: case 0x71a:
+ case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
+ case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 20) & 0xf) {
+ case 0x0:
+ gen_op_iwmmxt_subnb_M0_wRn(rd1);
+ break;
+ case 0x1:
+ gen_op_iwmmxt_subub_M0_wRn(rd1);
+ break;
+ case 0x3:
+ gen_op_iwmmxt_subsb_M0_wRn(rd1);
+ break;
+ case 0x4:
+ gen_op_iwmmxt_subnw_M0_wRn(rd1);
+ break;
+ case 0x5:
+ gen_op_iwmmxt_subuw_M0_wRn(rd1);
+ break;
+ case 0x7:
+ gen_op_iwmmxt_subsw_M0_wRn(rd1);
+ break;
+ case 0x8:
+ gen_op_iwmmxt_subnl_M0_wRn(rd1);
+ break;
+ case 0x9:
+ gen_op_iwmmxt_subul_M0_wRn(rd1);
+ break;
+ case 0xb:
+ gen_op_iwmmxt_subsl_M0_wRn(rd1);
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
+ case 0x41e: case 0x51e: case 0x61e: case 0x71e:
+ case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
+ case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
+ gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
+ case 0x418: case 0x518: case 0x618: case 0x718:
+ case 0x818: case 0x918: case 0xa18: case 0xb18:
+ case 0xc18: case 0xd18: case 0xe18: case 0xf18:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 20) & 0xf) {
+ case 0x0:
+ gen_op_iwmmxt_addnb_M0_wRn(rd1);
+ break;
+ case 0x1:
+ gen_op_iwmmxt_addub_M0_wRn(rd1);
+ break;
+ case 0x3:
+ gen_op_iwmmxt_addsb_M0_wRn(rd1);
+ break;
+ case 0x4:
+ gen_op_iwmmxt_addnw_M0_wRn(rd1);
+ break;
+ case 0x5:
+ gen_op_iwmmxt_adduw_M0_wRn(rd1);
+ break;
+ case 0x7:
+ gen_op_iwmmxt_addsw_M0_wRn(rd1);
+ break;
+ case 0x8:
+ gen_op_iwmmxt_addnl_M0_wRn(rd1);
+ break;
+ case 0x9:
+ gen_op_iwmmxt_addul_M0_wRn(rd1);
+ break;
+ case 0xb:
+ gen_op_iwmmxt_addsl_M0_wRn(rd1);
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
+ case 0x408: case 0x508: case 0x608: case 0x708:
+ case 0x808: case 0x908: case 0xa08: case 0xb08:
+ case 0xc08: case 0xd08: case 0xe08: case 0xf08:
+ if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packul_M0_wRn(rd1);
+ break;
+ case 3:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsq_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packuq_M0_wRn(rd1);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x201: case 0x203: case 0x205: case 0x207:
+ case 0x209: case 0x20b: case 0x20d: case 0x20f:
+ case 0x211: case 0x213: case 0x215: case 0x217:
+ case 0x219: case 0x21b: case 0x21d: case 0x21f:
+ wrd = (insn >> 5) & 0xf;
+ rd0 = (insn >> 12) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ if (rd0 == 0xf || rd1 == 0xf)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = load_reg(s, rd0);
+ tmp2 = load_reg(s, rd1);
+ switch ((insn >> 16) & 0xf) {
+ case 0x0: /* TMIA */
+ gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0x8: /* TMIAPH */
+ gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
+ if (insn & (1 << 16))
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ if (insn & (1 << 17))
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ default:
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
+ (ie. an undefined instruction). */
+static int disas_dsp_insn(DisasContext *s, uint32_t insn)
+{
+ int acc, rd0, rd1, rdhi, rdlo;
+ TCGv_i32 tmp, tmp2;
+
+ if ((insn & 0x0ff00f10) == 0x0e200010) {
+ /* Multiply with Internal Accumulate Format */
+ rd0 = (insn >> 12) & 0xf;
+ rd1 = insn & 0xf;
+ acc = (insn >> 5) & 7;
+
+ if (acc != 0)
+ return 1;
+
+ tmp = load_reg(s, rd0);
+ tmp2 = load_reg(s, rd1);
+ switch ((insn >> 16) & 0xf) {
+ case 0x0: /* MIA */
+ gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0x8: /* MIAPH */
+ gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0xc: /* MIABB */
+ case 0xd: /* MIABT */
+ case 0xe: /* MIATB */
+ case 0xf: /* MIATT */
+ if (insn & (1 << 16))
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ if (insn & (1 << 17))
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ default:
+ return 1;
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+
+ gen_op_iwmmxt_movq_wRn_M0(acc);
+ return 0;
+ }
+
+ if ((insn & 0x0fe00ff8) == 0x0c400000) {
+ /* Internal Accumulator Access Format */
+ rdhi = (insn >> 16) & 0xf;
+ rdlo = (insn >> 12) & 0xf;
+ acc = insn & 7;
+
+ if (acc != 0)
+ return 1;
+
+ if (insn & ARM_CP_RW_BIT) { /* MRA */
+ iwmmxt_load_reg(cpu_V0, acc);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
+ } else { /* MAR */
+ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
+ iwmmxt_store_reg(cpu_V0, acc);
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+static void gen_goto_ptr(void)
+{
+ tcg_gen_lookup_and_goto_ptr();
+}
+
+/* This will end the TB but doesn't guarantee we'll return to
+ * cpu_loop_exec. Any live exit_requests will be processed as we
+ * enter the next TB.
+ */
+static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+{
+ if (translator_use_goto_tb(&s->base, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_set_pc_im(s, dest);
+ tcg_gen_exit_tb(s->base.tb, n);
+ } else {
+ gen_set_pc_im(s, dest);
+ gen_goto_ptr();
+ }
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+/* Jump, specifying which TB number to use if we gen_goto_tb() */
+static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
+{
+ if (unlikely(s->ss_active)) {
+ /* An indirect jump so that we still trigger the debug exception. */
+ gen_set_pc_im(s, dest);
+ s->base.is_jmp = DISAS_JUMP;
+ return;
+ }
+ switch (s->base.is_jmp) {
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ case DISAS_NORETURN:
+ /*
+ * The normal case: just go to the destination TB.
+ * NB: NORETURN happens if we generate code like
+ * gen_brcondi(l);
+ * gen_jmp();
+ * gen_set_label(l);
+ * gen_jmp();
+ * on the second call to gen_jmp().
+ */
+ gen_goto_tb(s, tbno, dest);
+ break;
+ case DISAS_UPDATE_NOCHAIN:
+ case DISAS_UPDATE_EXIT:
+ /*
+ * We already decided we're leaving the TB for some other reason.
+ * Avoid using goto_tb so we really do exit back to the main loop
+ * and don't chain to another TB.
+ */
+ gen_set_pc_im(s, dest);
+ gen_goto_ptr();
+ s->base.is_jmp = DISAS_NORETURN;
+ break;
+ default:
+ /*
+ * We shouldn't be emitting code for a jump and also have
+ * is_jmp set to one of the special cases like DISAS_SWI.
+ */
+ g_assert_not_reached();
+ }
+}
+
+static inline void gen_jmp(DisasContext *s, uint32_t dest)
+{
+ gen_jmp_tb(s, dest, 0);
+}
+
+static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
+{
+ if (x)
+ tcg_gen_sari_i32(t0, t0, 16);
+ else
+ gen_sxth(t0);
+ if (y)
+ tcg_gen_sari_i32(t1, t1, 16);
+ else
+ gen_sxth(t1);
+ tcg_gen_mul_i32(t0, t0, t1);
+}
+
+/* Return the mask of PSR bits set by a MSR instruction. */
+static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
+{
+ uint32_t mask = 0;
+
+ if (flags & (1 << 0)) {
+ mask |= 0xff;
+ }
+ if (flags & (1 << 1)) {
+ mask |= 0xff00;
+ }
+ if (flags & (1 << 2)) {
+ mask |= 0xff0000;
+ }
+ if (flags & (1 << 3)) {
+ mask |= 0xff000000;
+ }
+
+ /* Mask out undefined and reserved bits. */
+ mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
+
+ /* Mask out execution state. */
+ if (!spsr) {
+ mask &= ~CPSR_EXEC;
+ }
+
+ /* Mask out privileged bits. */
+ if (IS_USER(s)) {
+ mask &= CPSR_USER;
+ }
+ return mask;
+}
+
+/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
+static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
+{
+ TCGv_i32 tmp;
+ if (spsr) {
+ /* ??? This is also undefined in system mode. */
+ if (IS_USER(s))
+ return 1;
+
+ tmp = load_cpu_field(spsr);
+ tcg_gen_andi_i32(tmp, tmp, ~mask);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_or_i32(tmp, tmp, t0);
+ store_cpu_field(tmp, spsr);
+ } else {
+ gen_set_cpsr(t0, mask);
+ }
+ tcg_temp_free_i32(t0);
+ gen_lookup_tb(s);
+ return 0;
+}
+
+/* Returns nonzero if access to the PSR is not permitted. */
+static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
+{
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ return gen_set_psr(s, mask, spsr, tmp);
+}
+
+static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
+ int *tgtmode, int *regno)
+{
+ /* Decode the r and sysm fields of MSR/MRS banked accesses into
+ * the target mode and register number, and identify the various
+ * unpredictable cases.
+ * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
+ * + executed in user mode
+ * + using R15 as the src/dest register
+ * + accessing an unimplemented register
+ * + accessing a register that's inaccessible at current PL/security state*
+ * + accessing a register that you could access with a different insn
+ * We choose to UNDEF in all these cases.
+ * Since we don't know which of the various AArch32 modes we are in
+ * we have to defer some checks to runtime.
+ * Accesses to Monitor mode registers from Secure EL1 (which implies
+ * that EL3 is AArch64) must trap to EL3.
+ *
+ * If the access checks fail this function will emit code to take
+ * an exception and return false. Otherwise it will return true,
+ * and set *tgtmode and *regno appropriately.
+ */
+ int exc_target = default_exception_el(s);
+
+ /* These instructions are present only in ARMv8, or in ARMv7 with the
+ * Virtualization Extensions.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
+ !arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ goto undef;
+ }
+
+ if (IS_USER(s) || rn == 15) {
+ goto undef;
+ }
+
+ /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
+ * of registers into (r, sysm).
+ */
+ if (r) {
+ /* SPSRs for other modes */
+ switch (sysm) {
+ case 0xe: /* SPSR_fiq */
+ *tgtmode = ARM_CPU_MODE_FIQ;
+ break;
+ case 0x10: /* SPSR_irq */
+ *tgtmode = ARM_CPU_MODE_IRQ;
+ break;
+ case 0x12: /* SPSR_svc */
+ *tgtmode = ARM_CPU_MODE_SVC;
+ break;
+ case 0x14: /* SPSR_abt */
+ *tgtmode = ARM_CPU_MODE_ABT;
+ break;
+ case 0x16: /* SPSR_und */
+ *tgtmode = ARM_CPU_MODE_UND;
+ break;
+ case 0x1c: /* SPSR_mon */
+ *tgtmode = ARM_CPU_MODE_MON;
+ break;
+ case 0x1e: /* SPSR_hyp */
+ *tgtmode = ARM_CPU_MODE_HYP;
+ break;
+ default: /* unallocated */
+ goto undef;
+ }
+ /* We arbitrarily assign SPSR a register number of 16. */
+ *regno = 16;
+ } else {
+ /* general purpose registers for other modes */
+ switch (sysm) {
+ case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
+ *tgtmode = ARM_CPU_MODE_USR;
+ *regno = sysm + 8;
+ break;
+ case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
+ *tgtmode = ARM_CPU_MODE_FIQ;
+ *regno = sysm;
+ break;
+ case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
+ *tgtmode = ARM_CPU_MODE_IRQ;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
+ *tgtmode = ARM_CPU_MODE_SVC;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
+ *tgtmode = ARM_CPU_MODE_ABT;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
+ *tgtmode = ARM_CPU_MODE_UND;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
+ *tgtmode = ARM_CPU_MODE_MON;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
+ *tgtmode = ARM_CPU_MODE_HYP;
+ /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
+ *regno = sysm & 1 ? 13 : 17;
+ break;
+ default: /* unallocated */
+ goto undef;
+ }
+ }
+
+ /* Catch the 'accessing inaccessible register' cases we can detect
+ * at translate time.
+ */
+ switch (*tgtmode) {
+ case ARM_CPU_MODE_MON:
+ if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
+ goto undef;
+ }
+ if (s->current_el == 1) {
+ /* If we're in Secure EL1 (which implies that EL3 is AArch64)
+ * then accesses to Mon registers trap to Secure EL2, if it exists,
+ * otherwise EL3.
+ */
+ TCGv_i32 tcg_el;
+
+ if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
+ dc_isar_feature(aa64_sel2, s)) {
+ /* Target EL is EL<3 minus SCR_EL3.EEL2> */
+ tcg_el = load_cpu_field(cp15.scr_el3);
+ tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
+ tcg_gen_addi_i32(tcg_el, tcg_el, 3);
+ } else {
+ tcg_el = tcg_const_i32(3);
+ }
+
+ gen_exception_el(s, EXCP_UDEF, syn_uncategorized(), tcg_el);
+ tcg_temp_free_i32(tcg_el);
+ return false;
+ }
+ break;
+ case ARM_CPU_MODE_HYP:
+ /*
+ * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
+ * (and so we can forbid accesses from EL2 or below). elr_hyp
+ * can be accessed also from Hyp mode, so forbid accesses from
+ * EL0 or EL1.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
+ (s->current_el < 3 && *regno != 17)) {
+ goto undef;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+
+undef:
+ /* If we get here then some access check did not pass */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_uncategorized(), exc_target);
+ return false;
+}
+
+static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
+{
+ TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
+ int tgtmode = 0, regno = 0;
+
+ if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
+ return;
+ }
+
+ /* Sync state because msr_banked() can raise exceptions */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ tcg_reg = load_reg(s, rn);
+ tcg_tgtmode = tcg_const_i32(tgtmode);
+ tcg_regno = tcg_const_i32(regno);
+ gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
+ tcg_temp_free_i32(tcg_tgtmode);
+ tcg_temp_free_i32(tcg_regno);
+ tcg_temp_free_i32(tcg_reg);
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+}
+
+static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
+{
+ TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
+ int tgtmode = 0, regno = 0;
+
+ if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
+ return;
+ }
+
+ /* Sync state because mrs_banked() can raise exceptions */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ tcg_reg = tcg_temp_new_i32();
+ tcg_tgtmode = tcg_const_i32(tgtmode);
+ tcg_regno = tcg_const_i32(regno);
+ gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
+ tcg_temp_free_i32(tcg_tgtmode);
+ tcg_temp_free_i32(tcg_regno);
+ store_reg(s, rn, tcg_reg);
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+}
+
+/* Store value to PC as for an exception return (ie don't
+ * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
+ * will do the masking based on the new value of the Thumb bit.
+ */
+static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
+{
+ tcg_gen_mov_i32(cpu_R[15], pc);
+ tcg_temp_free_i32(pc);
+}
+
+/* Generate a v6 exception return. Marks both values as dead. */
+static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
+{
+ store_pc_exc_ret(s, pc);
+ /* The cpsr_write_eret helper will mask the low bits of PC
+ * appropriately depending on the new Thumb bit, so it must
+ * be called after storing the new PC.
+ */
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_cpsr_write_eret(cpu_env, cpsr);
+ tcg_temp_free_i32(cpsr);
+ /* Must exit loop to check un-masked IRQs */
+ s->base.is_jmp = DISAS_EXIT;
+}
+
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
+{
+ gen_rfe(s, pc, load_cpu_field(spsr));
+}
+
+static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz,
+ gen_helper_gvec_3_ptr *fn)
+{
+ TCGv_ptr qc_ptr = tcg_temp_new_ptr();
+
+ tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
+ opr_sz, max_sz, 0, fn);
+ tcg_temp_free_ptr(qc_ptr);
+}
+
+void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[2] = {
+ gen_helper_gvec_qrdmlah_s16, gen_helper_gvec_qrdmlah_s32
+ };
+ tcg_debug_assert(vece >= 1 && vece <= 2);
+ gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
+}
+
+void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3_ptr * const fns[2] = {
+ gen_helper_gvec_qrdmlsh_s16, gen_helper_gvec_qrdmlsh_s32
+ };
+ tcg_debug_assert(vece >= 1 && vece <= 2);
+ gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
+}
+
+#define GEN_CMP0(NAME, COND) \
+ static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
+ { \
+ tcg_gen_setcondi_i32(COND, d, a, 0); \
+ tcg_gen_neg_i32(d, d); \
+ } \
+ static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
+ { \
+ tcg_gen_setcondi_i64(COND, d, a, 0); \
+ tcg_gen_neg_i64(d, d); \
+ } \
+ static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
+ { \
+ TCGv_vec zero = tcg_const_zeros_vec_matching(d); \
+ tcg_gen_cmp_vec(COND, vece, d, a, zero); \
+ tcg_temp_free_vec(zero); \
+ } \
+ void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
+ uint32_t opr_sz, uint32_t max_sz) \
+ { \
+ const GVecGen2 op[4] = { \
+ { .fno = gen_helper_gvec_##NAME##0_b, \
+ .fniv = gen_##NAME##0_vec, \
+ .opt_opc = vecop_list_cmp, \
+ .vece = MO_8 }, \
+ { .fno = gen_helper_gvec_##NAME##0_h, \
+ .fniv = gen_##NAME##0_vec, \
+ .opt_opc = vecop_list_cmp, \
+ .vece = MO_16 }, \
+ { .fni4 = gen_##NAME##0_i32, \
+ .fniv = gen_##NAME##0_vec, \
+ .opt_opc = vecop_list_cmp, \
+ .vece = MO_32 }, \
+ { .fni8 = gen_##NAME##0_i64, \
+ .fniv = gen_##NAME##0_vec, \
+ .opt_opc = vecop_list_cmp, \
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
+ .vece = MO_64 }, \
+ }; \
+ tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
+ }
+
+static const TCGOpcode vecop_list_cmp[] = {
+ INDEX_op_cmp_vec, 0
+};
+
+GEN_CMP0(ceq, TCG_COND_EQ)
+GEN_CMP0(cle, TCG_COND_LE)
+GEN_CMP0(cge, TCG_COND_GE)
+GEN_CMP0(clt, TCG_COND_LT)
+GEN_CMP0(cgt, TCG_COND_GT)
+
+#undef GEN_CMP0
+
+static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_sar8i_i64(a, a, shift);
+ tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_sar16i_i64(a, a, shift);
+ tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_sari_i32(a, a, shift);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_sari_i64(a, a, shift);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ tcg_gen_sari_vec(vece, a, a, sh);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2i ops[4] = {
+ { .fni8 = gen_ssra8_i64,
+ .fniv = gen_ssra_vec,
+ .fno = gen_helper_gvec_ssra_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_ssra16_i64,
+ .fniv = gen_ssra_vec,
+ .fno = gen_helper_gvec_ssra_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_ssra32_i32,
+ .fniv = gen_ssra_vec,
+ .fno = gen_helper_gvec_ssra_s,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_ssra64_i64,
+ .fniv = gen_ssra_vec,
+ .fno = gen_helper_gvec_ssra_b,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize]. */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ /*
+ * Shifts larger than the element size are architecturally valid.
+ * Signed results in all sign bits.
+ */
+ shift = MIN(shift, (8 << vece) - 1);
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+}
+
+static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_shr8i_i64(a, a, shift);
+ tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_shr16i_i64(a, a, shift);
+ tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_shri_i32(a, a, shift);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_shri_i64(a, a, shift);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ tcg_gen_shri_vec(vece, a, a, sh);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2i ops[4] = {
+ { .fni8 = gen_usra8_i64,
+ .fniv = gen_usra_vec,
+ .fno = gen_helper_gvec_usra_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8, },
+ { .fni8 = gen_usra16_i64,
+ .fniv = gen_usra_vec,
+ .fno = gen_helper_gvec_usra_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16, },
+ { .fni4 = gen_usra32_i32,
+ .fniv = gen_usra_vec,
+ .fno = gen_helper_gvec_usra_s,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32, },
+ { .fni8 = gen_usra64_i64,
+ .fniv = gen_usra_vec,
+ .fno = gen_helper_gvec_usra_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64, },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize]. */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ /*
+ * Shifts larger than the element size are architecturally valid.
+ * Unsigned results in all zeros as input to accumulate: nop.
+ */
+ if (shift < (8 << vece)) {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+ } else {
+ /* Nop, but we do need to clear the tail. */
+ tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
+ }
+}
+
+/*
+ * Shift one less than the requested amount, and the low bit is
+ * the rounding bit. For the 8 and 16-bit operations, because we
+ * mask the low bit, we can perform a normal integer shift instead
+ * of a vector shift.
+ */
+static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, sh - 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_sar8i_i64(d, a, sh);
+ tcg_gen_vec_add8_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, sh - 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_sar16i_i64(d, a, sh);
+ tcg_gen_vec_add16_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
+{
+ TCGv_i32 t;
+
+ /* Handle shift by the input size for the benefit of trans_SRSHR_ri */
+ if (sh == 32) {
+ tcg_gen_movi_i32(d, 0);
+ return;
+ }
+ t = tcg_temp_new_i32();
+ tcg_gen_extract_i32(t, a, sh - 1, 1);
+ tcg_gen_sari_i32(d, a, sh);
+ tcg_gen_add_i32(d, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_extract_i64(t, a, sh - 1, 1);
+ tcg_gen_sari_i64(d, a, sh);
+ tcg_gen_add_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec ones = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_shri_vec(vece, t, a, sh - 1);
+ tcg_gen_dupi_vec(vece, ones, 1);
+ tcg_gen_and_vec(vece, t, t, ones);
+ tcg_gen_sari_vec(vece, d, a, sh);
+ tcg_gen_add_vec(vece, d, d, t);
+
+ tcg_temp_free_vec(t);
+ tcg_temp_free_vec(ones);
+}
+
+void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2i ops[4] = {
+ { .fni8 = gen_srshr8_i64,
+ .fniv = gen_srshr_vec,
+ .fno = gen_helper_gvec_srshr_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_srshr16_i64,
+ .fniv = gen_srshr_vec,
+ .fno = gen_helper_gvec_srshr_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_srshr32_i32,
+ .fniv = gen_srshr_vec,
+ .fno = gen_helper_gvec_srshr_s,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_srshr64_i64,
+ .fniv = gen_srshr_vec,
+ .fno = gen_helper_gvec_srshr_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize] */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ if (shift == (8 << vece)) {
+ /*
+ * Shifts larger than the element size are architecturally valid.
+ * Signed results in all sign bits. With rounding, this produces
+ * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
+ * I.e. always zero.
+ */
+ tcg_gen_gvec_dup_imm(vece, rd_ofs, opr_sz, max_sz, 0);
+ } else {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+ }
+}
+
+static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_srshr8_i64(t, a, sh);
+ tcg_gen_vec_add8_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_srshr16_i64(t, a, sh);
+ tcg_gen_vec_add16_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ gen_srshr32_i32(t, a, sh);
+ tcg_gen_add_i32(d, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ gen_srshr64_i64(t, a, sh);
+ tcg_gen_add_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ gen_srshr_vec(vece, t, a, sh);
+ tcg_gen_add_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2i ops[4] = {
+ { .fni8 = gen_srsra8_i64,
+ .fniv = gen_srsra_vec,
+ .fno = gen_helper_gvec_srsra_b,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_8 },
+ { .fni8 = gen_srsra16_i64,
+ .fniv = gen_srsra_vec,
+ .fno = gen_helper_gvec_srsra_h,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fni4 = gen_srsra32_i32,
+ .fniv = gen_srsra_vec,
+ .fno = gen_helper_gvec_srsra_s,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fni8 = gen_srsra64_i64,
+ .fniv = gen_srsra_vec,
+ .fno = gen_helper_gvec_srsra_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize] */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ /*
+ * Shifts larger than the element size are architecturally valid.
+ * Signed results in all sign bits. With rounding, this produces
+ * (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
+ * I.e. always zero. With accumulation, this leaves D unchanged.
+ */
+ if (shift == (8 << vece)) {
+ /* Nop, but we do need to clear the tail. */
+ tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
+ } else {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+ }
+}
+
+static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, sh - 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
+ tcg_gen_vec_shr8i_i64(d, a, sh);
+ tcg_gen_vec_add8_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, sh - 1);
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
+ tcg_gen_vec_shr16i_i64(d, a, sh);
+ tcg_gen_vec_add16_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
+{
+ TCGv_i32 t;
+
+ /* Handle shift by the input size for the benefit of trans_URSHR_ri */
+ if (sh == 32) {
+ tcg_gen_extract_i32(d, a, sh - 1, 1);
+ return;
+ }
+ t = tcg_temp_new_i32();
+ tcg_gen_extract_i32(t, a, sh - 1, 1);
+ tcg_gen_shri_i32(d, a, sh);
+ tcg_gen_add_i32(d, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_extract_i64(t, a, sh - 1, 1);
+ tcg_gen_shri_i64(d, a, sh);
+ tcg_gen_add_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec ones = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_shri_vec(vece, t, a, shift - 1);
+ tcg_gen_dupi_vec(vece, ones, 1);
+ tcg_gen_and_vec(vece, t, t, ones);
+ tcg_gen_shri_vec(vece, d, a, shift);
+ tcg_gen_add_vec(vece, d, d, t);
+
+ tcg_temp_free_vec(t);
+ tcg_temp_free_vec(ones);
+}
+
+void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2i ops[4] = {
+ { .fni8 = gen_urshr8_i64,
+ .fniv = gen_urshr_vec,
+ .fno = gen_helper_gvec_urshr_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_urshr16_i64,
+ .fniv = gen_urshr_vec,
+ .fno = gen_helper_gvec_urshr_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_urshr32_i32,
+ .fniv = gen_urshr_vec,
+ .fno = gen_helper_gvec_urshr_s,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_urshr64_i64,
+ .fniv = gen_urshr_vec,
+ .fno = gen_helper_gvec_urshr_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize] */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ if (shift == (8 << vece)) {
+ /*
+ * Shifts larger than the element size are architecturally valid.
+ * Unsigned results in zero. With rounding, this produces a
+ * copy of the most significant bit.
+ */
+ tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz);
+ } else {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+ }
+}
+
+static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ if (sh == 8) {
+ tcg_gen_vec_shr8i_i64(t, a, 7);
+ } else {
+ gen_urshr8_i64(t, a, sh);
+ }
+ tcg_gen_vec_add8_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ if (sh == 16) {
+ tcg_gen_vec_shr16i_i64(t, a, 15);
+ } else {
+ gen_urshr16_i64(t, a, sh);
+ }
+ tcg_gen_vec_add16_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ if (sh == 32) {
+ tcg_gen_shri_i32(t, a, 31);
+ } else {
+ gen_urshr32_i32(t, a, sh);
+ }
+ tcg_gen_add_i32(d, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ if (sh == 64) {
+ tcg_gen_shri_i64(t, a, 63);
+ } else {
+ gen_urshr64_i64(t, a, sh);
+ }
+ tcg_gen_add_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ if (sh == (8 << vece)) {
+ tcg_gen_shri_vec(vece, t, a, sh - 1);
+ } else {
+ gen_urshr_vec(vece, t, a, sh);
+ }
+ tcg_gen_add_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen2i ops[4] = {
+ { .fni8 = gen_ursra8_i64,
+ .fniv = gen_ursra_vec,
+ .fno = gen_helper_gvec_ursra_b,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_8 },
+ { .fni8 = gen_ursra16_i64,
+ .fniv = gen_ursra_vec,
+ .fno = gen_helper_gvec_ursra_h,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fni4 = gen_ursra32_i32,
+ .fniv = gen_ursra_vec,
+ .fno = gen_helper_gvec_ursra_s,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fni8 = gen_ursra64_i64,
+ .fniv = gen_ursra_vec,
+ .fno = gen_helper_gvec_ursra_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize] */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+}
+
+static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_8, 0xff >> shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_16, 0xffff >> shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_shri_i32(a, a, shift);
+ tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
+}
+
+static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_shri_i64(a, a, shift);
+ tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
+}
+
+static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
+ tcg_gen_shri_vec(vece, t, a, sh);
+ tcg_gen_and_vec(vece, d, d, m);
+ tcg_gen_or_vec(vece, d, d, t);
+
+ tcg_temp_free_vec(t);
+ tcg_temp_free_vec(m);
+}
+
+void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
+ const GVecGen2i ops[4] = {
+ { .fni8 = gen_shr8_ins_i64,
+ .fniv = gen_shr_ins_vec,
+ .fno = gen_helper_gvec_sri_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_shr16_ins_i64,
+ .fniv = gen_shr_ins_vec,
+ .fno = gen_helper_gvec_sri_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_shr32_ins_i32,
+ .fniv = gen_shr_ins_vec,
+ .fno = gen_helper_gvec_sri_s,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_shr64_ins_i64,
+ .fniv = gen_shr_ins_vec,
+ .fno = gen_helper_gvec_sri_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [1..esize]. */
+ tcg_debug_assert(shift > 0);
+ tcg_debug_assert(shift <= (8 << vece));
+
+ /* Shift of esize leaves destination unchanged. */
+ if (shift < (8 << vece)) {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+ } else {
+ /* Nop, but we do need to clear the tail. */
+ tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
+ }
+}
+
+static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_8, 0xff << shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_16, 0xffff << shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
+}
+
+static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
+}
+
+static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_shli_vec(vece, t, a, sh);
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
+ tcg_gen_and_vec(vece, d, d, m);
+ tcg_gen_or_vec(vece, d, d, t);
+
+ tcg_temp_free_vec(t);
+ tcg_temp_free_vec(m);
+}
+
+void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
+ const GVecGen2i ops[4] = {
+ { .fni8 = gen_shl8_ins_i64,
+ .fniv = gen_shl_ins_vec,
+ .fno = gen_helper_gvec_sli_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_shl16_ins_i64,
+ .fniv = gen_shl_ins_vec,
+ .fno = gen_helper_gvec_sli_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_shl32_ins_i32,
+ .fniv = gen_shl_ins_vec,
+ .fno = gen_helper_gvec_sli_s,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_shl64_ins_i64,
+ .fniv = gen_shl_ins_vec,
+ .fno = gen_helper_gvec_sli_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+
+ /* tszimm encoding produces immediates in the range [0..esize-1]. */
+ tcg_debug_assert(shift >= 0);
+ tcg_debug_assert(shift < (8 << vece));
+
+ if (shift == 0) {
+ tcg_gen_gvec_mov(vece, rd_ofs, rm_ofs, opr_sz, max_sz);
+ } else {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
+ }
+}
+
+static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u8(a, a, b);
+ gen_helper_neon_add_u8(d, d, a);
+}
+
+static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u8(a, a, b);
+ gen_helper_neon_sub_u8(d, d, a);
+}
+
+static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u16(a, a, b);
+ gen_helper_neon_add_u16(d, d, a);
+}
+
+static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u16(a, a, b);
+ gen_helper_neon_sub_u16(d, d, a);
+}
+
+static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_mul_i32(a, a, b);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_mul_i32(a, a, b);
+ tcg_gen_sub_i32(d, d, a);
+}
+
+static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_mul_i64(a, a, b);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_mul_i64(a, a, b);
+ tcg_gen_sub_i64(d, d, a);
+}
+
+static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_mul_vec(vece, a, a, b);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_mul_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, d, d, a);
+}
+
+/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
+ * these tables are shared with AArch64 which does support them.
+ */
+void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fni4 = gen_mla8_i32,
+ .fniv = gen_mla_vec,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni4 = gen_mla16_i32,
+ .fniv = gen_mla_vec,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_mla32_i32,
+ .fniv = gen_mla_vec,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_mla64_i64,
+ .fniv = gen_mla_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fni4 = gen_mls8_i32,
+ .fniv = gen_mls_vec,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni4 = gen_mls16_i32,
+ .fniv = gen_mls_vec,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_mls32_i32,
+ .fniv = gen_mls_vec,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_mls64_i64,
+ .fniv = gen_mls_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+/* CMTST : test is "if (X & Y != 0)". */
+static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_and_i32(d, a, b);
+ tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_and_i64(d, a, b);
+ tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_and_vec(vece, d, a, b);
+ tcg_gen_dupi_vec(vece, a, 0);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
+}
+
+void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_cmp_vec, 0 };
+ static const GVecGen3 ops[4] = {
+ { .fni4 = gen_helper_neon_tst_u8,
+ .fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni4 = gen_helper_neon_tst_u16,
+ .fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_cmtst_i32,
+ .fniv = gen_cmtst_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_cmtst_i64,
+ .fniv = gen_cmtst_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
+{
+ TCGv_i32 lval = tcg_temp_new_i32();
+ TCGv_i32 rval = tcg_temp_new_i32();
+ TCGv_i32 lsh = tcg_temp_new_i32();
+ TCGv_i32 rsh = tcg_temp_new_i32();
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 max = tcg_const_i32(32);
+
+ /*
+ * Rely on the TCG guarantee that out of range shifts produce
+ * unspecified results, not undefined behaviour (i.e. no trap).
+ * Discard out-of-range results after the fact.
+ */
+ tcg_gen_ext8s_i32(lsh, shift);
+ tcg_gen_neg_i32(rsh, lsh);
+ tcg_gen_shl_i32(lval, src, lsh);
+ tcg_gen_shr_i32(rval, src, rsh);
+ tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
+ tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
+
+ tcg_temp_free_i32(lval);
+ tcg_temp_free_i32(rval);
+ tcg_temp_free_i32(lsh);
+ tcg_temp_free_i32(rsh);
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(max);
+}
+
+void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
+{
+ TCGv_i64 lval = tcg_temp_new_i64();
+ TCGv_i64 rval = tcg_temp_new_i64();
+ TCGv_i64 lsh = tcg_temp_new_i64();
+ TCGv_i64 rsh = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 max = tcg_const_i64(64);
+
+ /*
+ * Rely on the TCG guarantee that out of range shifts produce
+ * unspecified results, not undefined behaviour (i.e. no trap).
+ * Discard out-of-range results after the fact.
+ */
+ tcg_gen_ext8s_i64(lsh, shift);
+ tcg_gen_neg_i64(rsh, lsh);
+ tcg_gen_shl_i64(lval, src, lsh);
+ tcg_gen_shr_i64(rval, src, rsh);
+ tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
+ tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
+
+ tcg_temp_free_i64(lval);
+ tcg_temp_free_i64(rval);
+ tcg_temp_free_i64(lsh);
+ tcg_temp_free_i64(rsh);
+ tcg_temp_free_i64(zero);
+ tcg_temp_free_i64(max);
+}
+
+static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
+ TCGv_vec src, TCGv_vec shift)
+{
+ TCGv_vec lval = tcg_temp_new_vec_matching(dst);
+ TCGv_vec rval = tcg_temp_new_vec_matching(dst);
+ TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
+ TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
+ TCGv_vec msk, max;
+
+ tcg_gen_neg_vec(vece, rsh, shift);
+ if (vece == MO_8) {
+ tcg_gen_mov_vec(lsh, shift);
+ } else {
+ msk = tcg_temp_new_vec_matching(dst);
+ tcg_gen_dupi_vec(vece, msk, 0xff);
+ tcg_gen_and_vec(vece, lsh, shift, msk);
+ tcg_gen_and_vec(vece, rsh, rsh, msk);
+ tcg_temp_free_vec(msk);
+ }
+
+ /*
+ * Rely on the TCG guarantee that out of range shifts produce
+ * unspecified results, not undefined behaviour (i.e. no trap).
+ * Discard out-of-range results after the fact.
+ */
+ tcg_gen_shlv_vec(vece, lval, src, lsh);
+ tcg_gen_shrv_vec(vece, rval, src, rsh);
+
+ max = tcg_temp_new_vec_matching(dst);
+ tcg_gen_dupi_vec(vece, max, 8 << vece);
+
+ /*
+ * The choice of LT (signed) and GEU (unsigned) are biased toward
+ * the instructions of the x86_64 host. For MO_8, the whole byte
+ * is significant so we must use an unsigned compare; otherwise we
+ * have already masked to a byte and so a signed compare works.
+ * Other tcg hosts have a full set of comparisons and do not care.
+ */
+ if (vece == MO_8) {
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
+ tcg_gen_andc_vec(vece, lval, lval, lsh);
+ tcg_gen_andc_vec(vece, rval, rval, rsh);
+ } else {
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
+ tcg_gen_and_vec(vece, lval, lval, lsh);
+ tcg_gen_and_vec(vece, rval, rval, rsh);
+ }
+ tcg_gen_or_vec(vece, dst, lval, rval);
+
+ tcg_temp_free_vec(max);
+ tcg_temp_free_vec(lval);
+ tcg_temp_free_vec(rval);
+ tcg_temp_free_vec(lsh);
+ tcg_temp_free_vec(rsh);
+}
+
+void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_shlv_vec,
+ INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_ushl_vec,
+ .fno = gen_helper_gvec_ushl_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_ushl_vec,
+ .fno = gen_helper_gvec_ushl_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_ushl_i32,
+ .fniv = gen_ushl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_ushl_i64,
+ .fniv = gen_ushl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
+{
+ TCGv_i32 lval = tcg_temp_new_i32();
+ TCGv_i32 rval = tcg_temp_new_i32();
+ TCGv_i32 lsh = tcg_temp_new_i32();
+ TCGv_i32 rsh = tcg_temp_new_i32();
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 max = tcg_const_i32(31);
+
+ /*
+ * Rely on the TCG guarantee that out of range shifts produce
+ * unspecified results, not undefined behaviour (i.e. no trap).
+ * Discard out-of-range results after the fact.
+ */
+ tcg_gen_ext8s_i32(lsh, shift);
+ tcg_gen_neg_i32(rsh, lsh);
+ tcg_gen_shl_i32(lval, src, lsh);
+ tcg_gen_umin_i32(rsh, rsh, max);
+ tcg_gen_sar_i32(rval, src, rsh);
+ tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
+ tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
+
+ tcg_temp_free_i32(lval);
+ tcg_temp_free_i32(rval);
+ tcg_temp_free_i32(lsh);
+ tcg_temp_free_i32(rsh);
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(max);
+}
+
+void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
+{
+ TCGv_i64 lval = tcg_temp_new_i64();
+ TCGv_i64 rval = tcg_temp_new_i64();
+ TCGv_i64 lsh = tcg_temp_new_i64();
+ TCGv_i64 rsh = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 max = tcg_const_i64(63);
+
+ /*
+ * Rely on the TCG guarantee that out of range shifts produce
+ * unspecified results, not undefined behaviour (i.e. no trap).
+ * Discard out-of-range results after the fact.
+ */
+ tcg_gen_ext8s_i64(lsh, shift);
+ tcg_gen_neg_i64(rsh, lsh);
+ tcg_gen_shl_i64(lval, src, lsh);
+ tcg_gen_umin_i64(rsh, rsh, max);
+ tcg_gen_sar_i64(rval, src, rsh);
+ tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
+ tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
+
+ tcg_temp_free_i64(lval);
+ tcg_temp_free_i64(rval);
+ tcg_temp_free_i64(lsh);
+ tcg_temp_free_i64(rsh);
+ tcg_temp_free_i64(zero);
+ tcg_temp_free_i64(max);
+}
+
+static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
+ TCGv_vec src, TCGv_vec shift)
+{
+ TCGv_vec lval = tcg_temp_new_vec_matching(dst);
+ TCGv_vec rval = tcg_temp_new_vec_matching(dst);
+ TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
+ TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
+ TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
+
+ /*
+ * Rely on the TCG guarantee that out of range shifts produce
+ * unspecified results, not undefined behaviour (i.e. no trap).
+ * Discard out-of-range results after the fact.
+ */
+ tcg_gen_neg_vec(vece, rsh, shift);
+ if (vece == MO_8) {
+ tcg_gen_mov_vec(lsh, shift);
+ } else {
+ tcg_gen_dupi_vec(vece, tmp, 0xff);
+ tcg_gen_and_vec(vece, lsh, shift, tmp);
+ tcg_gen_and_vec(vece, rsh, rsh, tmp);
+ }
+
+ /* Bound rsh so out of bound right shift gets -1. */
+ tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
+ tcg_gen_umin_vec(vece, rsh, rsh, tmp);
+ tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
+
+ tcg_gen_shlv_vec(vece, lval, src, lsh);
+ tcg_gen_sarv_vec(vece, rval, src, rsh);
+
+ /* Select in-bound left shift. */
+ tcg_gen_andc_vec(vece, lval, lval, tmp);
+
+ /* Select between left and right shift. */
+ if (vece == MO_8) {
+ tcg_gen_dupi_vec(vece, tmp, 0);
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
+ } else {
+ tcg_gen_dupi_vec(vece, tmp, 0x80);
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
+ }
+
+ tcg_temp_free_vec(lval);
+ tcg_temp_free_vec(rval);
+ tcg_temp_free_vec(lsh);
+ tcg_temp_free_vec(rsh);
+ tcg_temp_free_vec(tmp);
+}
+
+void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
+ INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_sshl_vec,
+ .fno = gen_helper_gvec_sshl_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_sshl_vec,
+ .fno = gen_helper_gvec_sshl_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_sshl_i32,
+ .fniv = gen_sshl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_sshl_i64,
+ .fniv = gen_sshl_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
+ tcg_gen_add_vec(vece, x, a, b);
+ tcg_gen_usadd_vec(vece, t, a, b);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
+ tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_temp_free_vec(x);
+}
+
+void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_uqadd_vec,
+ .fno = gen_helper_gvec_uqadd_b,
+ .write_aofs = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_uqadd_vec,
+ .fno = gen_helper_gvec_uqadd_h,
+ .write_aofs = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fniv = gen_uqadd_vec,
+ .fno = gen_helper_gvec_uqadd_s,
+ .write_aofs = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fniv = gen_uqadd_vec,
+ .fno = gen_helper_gvec_uqadd_d,
+ .write_aofs = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+ rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
+ tcg_gen_add_vec(vece, x, a, b);
+ tcg_gen_ssadd_vec(vece, t, a, b);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
+ tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_temp_free_vec(x);
+}
+
+void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_sqadd_vec,
+ .fno = gen_helper_gvec_sqadd_b,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_8 },
+ { .fniv = gen_sqadd_vec,
+ .fno = gen_helper_gvec_sqadd_h,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_16 },
+ { .fniv = gen_sqadd_vec,
+ .fno = gen_helper_gvec_sqadd_s,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_32 },
+ { .fniv = gen_sqadd_vec,
+ .fno = gen_helper_gvec_sqadd_d,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+ rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
+ tcg_gen_sub_vec(vece, x, a, b);
+ tcg_gen_ussub_vec(vece, t, a, b);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
+ tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_temp_free_vec(x);
+}
+
+void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_uqsub_vec,
+ .fno = gen_helper_gvec_uqsub_b,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_8 },
+ { .fniv = gen_uqsub_vec,
+ .fno = gen_helper_gvec_uqsub_h,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_16 },
+ { .fniv = gen_uqsub_vec,
+ .fno = gen_helper_gvec_uqsub_s,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_32 },
+ { .fniv = gen_uqsub_vec,
+ .fno = gen_helper_gvec_uqsub_d,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+ rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
+ TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec x = tcg_temp_new_vec_matching(t);
+ tcg_gen_sub_vec(vece, x, a, b);
+ tcg_gen_sssub_vec(vece, t, a, b);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
+ tcg_gen_or_vec(vece, sat, sat, x);
+ tcg_temp_free_vec(x);
+}
+
+void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ { .fniv = gen_sqsub_vec,
+ .fno = gen_helper_gvec_sqsub_b,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_8 },
+ { .fniv = gen_sqsub_vec,
+ .fno = gen_helper_gvec_sqsub_h,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_16 },
+ { .fniv = gen_sqsub_vec,
+ .fno = gen_helper_gvec_sqsub_s,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_32 },
+ { .fniv = gen_sqsub_vec,
+ .fno = gen_helper_gvec_sqsub_d,
+ .opt_opc = vecop_list,
+ .write_aofs = true,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
+ rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_sub_i32(t, a, b);
+ tcg_gen_sub_i32(d, b, a);
+ tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(t, a, b);
+ tcg_gen_sub_i64(d, b, a);
+ tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_smin_vec(vece, t, a, b);
+ tcg_gen_smax_vec(vece, d, a, b);
+ tcg_gen_sub_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sub_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_sabd_vec,
+ .fno = gen_helper_gvec_sabd_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_sabd_vec,
+ .fno = gen_helper_gvec_sabd_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_sabd_i32,
+ .fniv = gen_sabd_vec,
+ .fno = gen_helper_gvec_sabd_s,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_sabd_i64,
+ .fniv = gen_sabd_vec,
+ .fno = gen_helper_gvec_sabd_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_sub_i32(t, a, b);
+ tcg_gen_sub_i32(d, b, a);
+ tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(t, a, b);
+ tcg_gen_sub_i64(d, b, a);
+ tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_umin_vec(vece, t, a, b);
+ tcg_gen_umax_vec(vece, d, a, b);
+ tcg_gen_sub_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sub_vec, INDEX_op_umin_vec, INDEX_op_umax_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_uabd_vec,
+ .fno = gen_helper_gvec_uabd_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fniv = gen_uabd_vec,
+ .fno = gen_helper_gvec_uabd_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_uabd_i32,
+ .fniv = gen_uabd_vec,
+ .fno = gen_helper_gvec_uabd_s,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_uabd_i64,
+ .fniv = gen_uabd_vec,
+ .fno = gen_helper_gvec_uabd_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+ gen_sabd_i32(t, a, b);
+ tcg_gen_add_i32(d, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ gen_sabd_i64(t, a, b);
+ tcg_gen_add_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ gen_sabd_vec(vece, t, a, b);
+ tcg_gen_add_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sub_vec, INDEX_op_add_vec,
+ INDEX_op_smin_vec, INDEX_op_smax_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_saba_vec,
+ .fno = gen_helper_gvec_saba_b,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_8 },
+ { .fniv = gen_saba_vec,
+ .fno = gen_helper_gvec_saba_h,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fni4 = gen_saba_i32,
+ .fniv = gen_saba_vec,
+ .fno = gen_helper_gvec_saba_s,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fni8 = gen_saba_i64,
+ .fniv = gen_saba_vec,
+ .fno = gen_helper_gvec_saba_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+ gen_uabd_i32(t, a, b);
+ tcg_gen_add_i32(d, d, t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ gen_uabd_i64(t, a, b);
+ tcg_gen_add_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ gen_uabd_vec(vece, t, a, b);
+ tcg_gen_add_vec(vece, d, d, t);
+ tcg_temp_free_vec(t);
+}
+
+void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sub_vec, INDEX_op_add_vec,
+ INDEX_op_umin_vec, INDEX_op_umax_vec, 0
+ };
+ static const GVecGen3 ops[4] = {
+ { .fniv = gen_uaba_vec,
+ .fno = gen_helper_gvec_uaba_b,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_8 },
+ { .fniv = gen_uaba_vec,
+ .fno = gen_helper_gvec_uaba_h,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fni4 = gen_uaba_i32,
+ .fniv = gen_uaba_vec,
+ .fno = gen_helper_gvec_uaba_s,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fni8 = gen_uaba_i64,
+ .fniv = gen_uaba_vec,
+ .fno = gen_helper_gvec_uaba_d,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64 },
+ };
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
+}
+
+static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
+ int opc1, int crn, int crm, int opc2,
+ bool isread, int rt, int rt2)
+{
+ const ARMCPRegInfo *ri;
+
+ ri = get_arm_cp_reginfo(s->cp_regs,
+ ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
+ if (ri) {
+ bool need_exit_tb;
+
+ /* Check access permissions */
+ if (!cp_access_ok(s->current_el, ri, isread)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (s->hstr_active || ri->accessfn ||
+ (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
+ /* Emit code to perform further access permissions checks at
+ * runtime; this may result in an exception.
+ * Note that on XScale all cp0..c13 registers do an access check
+ * call in order to handle c15_cpar.
+ */
+ TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn, tcg_isread;
+ uint32_t syndrome;
+
+ /* Note that since we are an implementation which takes an
+ * exception on a trapped conditional instruction only if the
+ * instruction passes its condition code check, we can take
+ * advantage of the clause in the ARM ARM that allows us to set
+ * the COND field in the instruction to 0xE in all cases.
+ * We could fish the actual condition out of the insn (ARM)
+ * or the condexec bits (Thumb) but it isn't necessary.
+ */
+ switch (cpnum) {
+ case 14:
+ if (is64) {
+ syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, false);
+ } else {
+ syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, false);
+ }
+ break;
+ case 15:
+ if (is64) {
+ syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, false);
+ } else {
+ syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, false);
+ }
+ break;
+ default:
+ /* ARMv8 defines that only coprocessors 14 and 15 exist,
+ * so this can only happen if this is an ARMv7 or earlier CPU,
+ * in which case the syndrome information won't actually be
+ * guest visible.
+ */
+ assert(!arm_dc_feature(s, ARM_FEATURE_V8));
+ syndrome = syn_uncategorized();
+ break;
+ }
+
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ tmpptr = tcg_const_ptr(ri);
+ tcg_syn = tcg_const_i32(syndrome);
+ tcg_isread = tcg_const_i32(isread);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
+ tcg_isread);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_isread);
+ } else if (ri->type & ARM_CP_RAISES_EXC) {
+ /*
+ * The readfn or writefn might raise an exception;
+ * synchronize the CPU state in case it does.
+ */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return;
+ case ARM_CP_WFI:
+ if (isread) {
+ unallocated_encoding(s);
+ return;
+ }
+ gen_set_pc_im(s, s->base.pc_next);
+ s->base.is_jmp = DISAS_WFI;
+ return;
+ default:
+ break;
+ }
+
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ gen_io_start();
+ }
+
+ if (isread) {
+ /* Read */
+ if (is64) {
+ TCGv_i64 tmp64;
+ TCGv_i32 tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp64 = tcg_const_i64(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ tmp64 = tcg_temp_new_i64();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ store_reg(s, rt, tmp);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrh_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
+ store_reg(s, rt2, tmp);
+ } else {
+ TCGv_i32 tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp = tcg_const_i32(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ tmp = tcg_temp_new_i32();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp = load_cpu_offset(ri->fieldoffset);
+ }
+ if (rt == 15) {
+ /* Destination register of r15 for 32 bit loads sets
+ * the condition codes from the high 4 bits of the value
+ */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, rt, tmp);
+ }
+ }
+ } else {
+ /* Write */
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return;
+ }
+
+ if (is64) {
+ TCGv_i32 tmplo, tmphi;
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+ tmplo = load_reg(s, rt);
+ tmphi = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
+ tcg_temp_free_i32(tmplo);
+ tcg_temp_free_i32(tmphi);
+ if (ri->writefn) {
+ TCGv_ptr tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (ri->writefn) {
+ TCGv_i32 tmp;
+ TCGv_ptr tmpptr;
+ tmp = load_reg(s, rt);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tmp);
+ } else {
+ TCGv_i32 tmp = load_reg(s, rt);
+ store_cpu_offset(tmp, ri->fieldoffset);
+ }
+ }
+ }
+
+ /* I/O operations must end the TB here (whether read or write) */
+ need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
+ (ri->type & ARM_CP_IO));
+
+ if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ /*
+ * A write to any coprocessor register that ends a TB
+ * must rebuild the hflags for the next TB.
+ */
+ TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
+ } else {
+ if (ri->type & ARM_CP_NEWEL) {
+ gen_helper_rebuild_hflags_a32_newel(cpu_env);
+ } else {
+ gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
+ }
+ }
+ tcg_temp_free_i32(tcg_el);
+ /*
+ * We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ need_exit_tb = true;
+ }
+ if (need_exit_tb) {
+ gen_lookup_tb(s);
+ }
+
+ return;
+ }
+
+ /* Unknown register; this might be a guest error or a QEMU
+ * unimplemented feature.
+ */
+ if (is64) {
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
+ "64 bit system register cp:%d opc1: %d crm:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crm,
+ s->ns ? "non-secure" : "secure");
+ } else {
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
+ "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
+ s->ns ? "non-secure" : "secure");
+ }
+
+ unallocated_encoding(s);
+ return;
+}
+
+/* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
+static void disas_xscale_insn(DisasContext *s, uint32_t insn)
+{
+ int cpnum = (insn >> 8) & 0xf;
+
+ if (extract32(s->c15_cpar, cpnum, 1) == 0) {
+ unallocated_encoding(s);
+ } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
+ if (disas_iwmmxt_insn(s, insn)) {
+ unallocated_encoding(s);
+ }
+ } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
+ if (disas_dsp_insn(s, insn)) {
+ unallocated_encoding(s);
+ }
+ }
+}
+
+/* Store a 64-bit value to a register pair. Clobbers val. */
+static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
+{
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, val);
+ store_reg(s, rlow, tmp);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrh_i64_i32(tmp, val);
+ store_reg(s, rhigh, tmp);
+}
+
+/* load and add a 64-bit value from a register pair. */
+static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmpl;
+ TCGv_i32 tmph;
+
+ /* Load 64-bit value rd:rn. */
+ tmpl = load_reg(s, rlow);
+ tmph = load_reg(s, rhigh);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
+ tcg_temp_free_i32(tmpl);
+ tcg_temp_free_i32(tmph);
+ tcg_gen_add_i64(val, val, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* Set N and Z flags from hi|lo. */
+static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
+{
+ tcg_gen_mov_i32(cpu_NF, hi);
+ tcg_gen_or_i32(cpu_ZF, lo, hi);
+}
+
+/* Load/Store exclusive instructions are implemented by remembering
+ the value/address loaded, and seeing if these are the same
+ when the store is performed. This should be sufficient to implement
+ the architecturally mandated semantics, and avoids having to monitor
+ regular stores. The compare vs the remembered value is done during
+ the cmpxchg operation, but we must compare the addresses manually. */
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv_i32 addr, int size)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ MemOp opc = size | MO_ALIGN | s->be_data;
+
+ s->is_ldex = true;
+
+ if (size == 3) {
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /*
+ * For AArch32, architecturally the 32-bit word at the lowest
+ * address is always Rt and the one at addr+4 is Rt2, even if
+ * the CPU is big-endian. That means we don't want to do a
+ * gen_aa32_ld_i64(), which checks SCTLR_B as if for an
+ * architecturally 64-bit access, but instead do a 64-bit access
+ * using MO_BE if appropriate and then split the two halves.
+ */
+ TCGv taddr = gen_aa32_addr(s, addr, opc);
+
+ tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
+ tcg_temp_free(taddr);
+ tcg_gen_mov_i64(cpu_exclusive_val, t64);
+ if (s->be_data == MO_BE) {
+ tcg_gen_extr_i64_i32(tmp2, tmp, t64);
+ } else {
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ }
+ tcg_temp_free_i64(t64);
+
+ store_reg(s, rt2, tmp2);
+ } else {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
+ tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
+ }
+
+ store_reg(s, rt, tmp);
+ tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
+}
+
+static void gen_clrex(DisasContext *s)
+{
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i32 addr, int size)
+{
+ TCGv_i32 t0, t1, t2;
+ TCGv_i64 extaddr;
+ TCGv taddr;
+ TCGLabel *done_label;
+ TCGLabel *fail_label;
+ MemOp opc = size | MO_ALIGN | s->be_data;
+
+ /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
+ [addr] = {Rt};
+ {Rd} = 0;
+ } else {
+ {Rd} = 1;
+ } */
+ fail_label = gen_new_label();
+ done_label = gen_new_label();
+ extaddr = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(extaddr, addr);
+ tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
+ tcg_temp_free_i64(extaddr);
+
+ taddr = gen_aa32_addr(s, addr, opc);
+ t0 = tcg_temp_new_i32();
+ t1 = load_reg(s, rt);
+ if (size == 3) {
+ TCGv_i64 o64 = tcg_temp_new_i64();
+ TCGv_i64 n64 = tcg_temp_new_i64();
+
+ t2 = load_reg(s, rt2);
+
+ /*
+ * For AArch32, architecturally the 32-bit word at the lowest
+ * address is always Rt and the one at addr+4 is Rt2, even if
+ * the CPU is big-endian. Since we're going to treat this as a
+ * single 64-bit BE store, we need to put the two halves in the
+ * opposite order for BE to LE, so that they end up in the right
+ * places. We don't want gen_aa32_st_i64, because that checks
+ * SCTLR_B as if for an architectural 64-bit access.
+ */
+ if (s->be_data == MO_BE) {
+ tcg_gen_concat_i32_i64(n64, t2, t1);
+ } else {
+ tcg_gen_concat_i32_i64(n64, t1, t2);
+ }
+ tcg_temp_free_i32(t2);
+
+ tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
+ get_mem_index(s), opc);
+ tcg_temp_free_i64(n64);
+
+ tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
+ tcg_gen_extrl_i64_i32(t0, o64);
+
+ tcg_temp_free_i64(o64);
+ } else {
+ t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
+ tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
+ tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
+ tcg_temp_free_i32(t2);
+ }
+ tcg_temp_free_i32(t1);
+ tcg_temp_free(taddr);
+ tcg_gen_mov_i32(cpu_R[rd], t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_br(done_label);
+
+ gen_set_label(fail_label);
+ tcg_gen_movi_i32(cpu_R[rd], 1);
+ gen_set_label(done_label);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+/* gen_srs:
+ * @env: CPUARMState
+ * @s: DisasContext
+ * @mode: mode field from insn (which stack to store to)
+ * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
+ * @writeback: true if writeback bit set
+ *
+ * Generate code for the SRS (Store Return State) insn.
+ */
+static void gen_srs(DisasContext *s,
+ uint32_t mode, uint32_t amode, bool writeback)
+{
+ int32_t offset;
+ TCGv_i32 addr, tmp;
+ bool undef = false;
+
+ /* SRS is:
+ * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
+ * and specified mode is monitor mode
+ * - UNDEFINED in Hyp mode
+ * - UNPREDICTABLE in User or System mode
+ * - UNPREDICTABLE if the specified mode is:
+ * -- not implemented
+ * -- not a valid mode number
+ * -- a mode that's at a higher exception level
+ * -- Monitor, if we are Non-secure
+ * For the UNPREDICTABLE cases we choose to UNDEF.
+ */
+ if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
+ return;
+ }
+
+ if (s->current_el == 0 || s->current_el == 2) {
+ undef = true;
+ }
+
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ break;
+ case ARM_CPU_MODE_HYP:
+ if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ undef = true;
+ }
+ break;
+ case ARM_CPU_MODE_MON:
+ /* No need to check specifically for "are we non-secure" because
+ * we've already made EL0 UNDEF and handled the trap for S-EL1;
+ * so if this isn't EL3 then we must be non-secure.
+ */
+ if (s->current_el != 3) {
+ undef = true;
+ }
+ break;
+ default:
+ undef = true;
+ }
+
+ if (undef) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ addr = tcg_temp_new_i32();
+ tmp = tcg_const_i32(mode);
+ /* get_r13_banked() will raise an exception if called from System mode */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc_curr);
+ gen_helper_get_r13_banked(addr, cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ switch (amode) {
+ case 0: /* DA */
+ offset = -4;
+ break;
+ case 1: /* IA */
+ offset = 0;
+ break;
+ case 2: /* DB */
+ offset = -8;
+ break;
+ case 3: /* IB */
+ offset = 4;
+ break;
+ default:
+ abort();
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ tmp = load_reg(s, 14);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+ tmp = load_cpu_field(spsr);
+ tcg_gen_addi_i32(addr, addr, 4);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+ if (writeback) {
+ switch (amode) {
+ case 0:
+ offset = -8;
+ break;
+ case 1:
+ offset = 4;
+ break;
+ case 2:
+ offset = -4;
+ break;
+ case 3:
+ offset = 0;
+ break;
+ default:
+ abort();
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ tmp = tcg_const_i32(mode);
+ gen_helper_set_r13_banked(cpu_env, tmp, addr);
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+}
+
+/* Skip this instruction if the ARM condition is false */
+static void arm_skip_unless(DisasContext *s, uint32_t cond)
+{
+ arm_gen_condlabel(s);
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
+}
+
+
+/*
+ * Constant expanders used by T16/T32 decode
+ */
+
+/* Return only the rotation part of T32ExpandImm. */
+static int t32_expandimm_rot(DisasContext *s, int x)
+{
+ return x & 0xc00 ? extract32(x, 7, 5) : 0;
+}
+
+/* Return the unrotated immediate from T32ExpandImm. */
+static int t32_expandimm_imm(DisasContext *s, int x)
+{
+ int imm = extract32(x, 0, 8);
+
+ switch (extract32(x, 8, 4)) {
+ case 0: /* XY */
+ /* Nothing to do. */
+ break;
+ case 1: /* 00XY00XY */
+ imm *= 0x00010001;
+ break;
+ case 2: /* XY00XY00 */
+ imm *= 0x01000100;
+ break;
+ case 3: /* XYXYXYXY */
+ imm *= 0x01010101;
+ break;
+ default:
+ /* Rotated constant. */
+ imm |= 0x80;
+ break;
+ }
+ return imm;
+}
+
+static int t32_branch24(DisasContext *s, int x)
+{
+ /* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
+ x ^= !(x < 0) * (3 << 21);
+ /* Append the final zero. */
+ return x << 1;
+}
+
+static int t16_setflags(DisasContext *s)
+{
+ return s->condexec_mask == 0;
+}
+
+static int t16_push_list(DisasContext *s, int x)
+{
+ return (x & 0xff) | (x & 0x100) << (14 - 8);
+}
+
+static int t16_pop_list(DisasContext *s, int x)
+{
+ return (x & 0xff) | (x & 0x100) << (15 - 8);
+}
+
+/*
+ * Include the generated decoders.
+ */
+
+#include "decode-a32.c.inc"
+#include "decode-a32-uncond.c.inc"
+#include "decode-t32.c.inc"
+#include "decode-t16.c.inc"
+
+static bool valid_cp(DisasContext *s, int cp)
+{
+ /*
+ * Return true if this coprocessor field indicates something
+ * that's really a possible coprocessor.
+ * For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
+ * and of those only cp14 and cp15 were used for registers.
+ * cp10 and cp11 were used for VFP and Neon, whose decode is
+ * dealt with elsewhere. With the advent of fp16, cp9 is also
+ * now part of VFP.
+ * For v8A and later, the encoding has been tightened so that
+ * only cp14 and cp15 are valid, and other values aren't considered
+ * to be in the coprocessor-instruction space at all. v8M still
+ * permits coprocessors 0..7.
+ * For XScale, we must not decode the XScale cp0, cp1 space as
+ * a standard coprocessor insn, because we want to fall through to
+ * the legacy disas_xscale_insn() decoder after decodetree is done.
+ */
+ if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) {
+ return false;
+ }
+
+ if (arm_dc_feature(s, ARM_FEATURE_V8) &&
+ !arm_dc_feature(s, ARM_FEATURE_M)) {
+ return cp >= 14;
+ }
+ return cp < 8 || cp >= 14;
+}
+
+static bool trans_MCR(DisasContext *s, arg_MCR *a)
+{
+ if (!valid_cp(s, a->cp)) {
+ return false;
+ }
+ do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
+ false, a->rt, 0);
+ return true;
+}
+
+static bool trans_MRC(DisasContext *s, arg_MRC *a)
+{
+ if (!valid_cp(s, a->cp)) {
+ return false;
+ }
+ do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
+ true, a->rt, 0);
+ return true;
+}
+
+static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
+{
+ if (!valid_cp(s, a->cp)) {
+ return false;
+ }
+ do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
+ false, a->rt, a->rt2);
+ return true;
+}
+
+static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
+{
+ if (!valid_cp(s, a->cp)) {
+ return false;
+ }
+ do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
+ true, a->rt, a->rt2);
+ return true;
+}
+
+/* Helpers to swap operands for reverse-subtract. */
+static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_sub_i32(dst, b, a);
+}
+
+static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_sub_CC(dst, b, a);
+}
+
+static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_sub_carry(dest, b, a);
+}
+
+static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_sbc_CC(dest, b, a);
+}
+
+/*
+ * Helpers for the data processing routines.
+ *
+ * After the computation store the results back.
+ * This may be suppressed altogether (STREG_NONE), require a runtime
+ * check against the stack limits (STREG_SP_CHECK), or generate an
+ * exception return. Oh, or store into a register.
+ *
+ * Always return true, indicating success for a trans_* function.
+ */
+typedef enum {
+ STREG_NONE,
+ STREG_NORMAL,
+ STREG_SP_CHECK,
+ STREG_EXC_RET,
+} StoreRegKind;
+
+static bool store_reg_kind(DisasContext *s, int rd,
+ TCGv_i32 val, StoreRegKind kind)
+{
+ switch (kind) {
+ case STREG_NONE:
+ tcg_temp_free_i32(val);
+ return true;
+ case STREG_NORMAL:
+ /* See ALUWritePC: Interworking only from a32 mode. */
+ if (s->thumb) {
+ store_reg(s, rd, val);
+ } else {
+ store_reg_bx(s, rd, val);
+ }
+ return true;
+ case STREG_SP_CHECK:
+ store_sp_checked(s, val);
+ return true;
+ case STREG_EXC_RET:
+ gen_exception_return(s, val);
+ return true;
+ }
+ g_assert_not_reached();
+}
+
+/*
+ * Data Processing (register)
+ *
+ * Operate, with set flags, one register source,
+ * one immediate shifted register source, and a destination.
+ */
+static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
+ int logic_cc, StoreRegKind kind)
+{
+ TCGv_i32 tmp1, tmp2;
+
+ tmp2 = load_reg(s, a->rm);
+ gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
+ tmp1 = load_reg(s, a->rn);
+
+ gen(tmp1, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+
+ if (logic_cc) {
+ gen_logic_CC(tmp1);
+ }
+ return store_reg_kind(s, a->rd, tmp1, kind);
+}
+
+static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
+ void (*gen)(TCGv_i32, TCGv_i32),
+ int logic_cc, StoreRegKind kind)
+{
+ TCGv_i32 tmp;
+
+ tmp = load_reg(s, a->rm);
+ gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
+
+ gen(tmp, tmp);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ return store_reg_kind(s, a->rd, tmp, kind);
+}
+
+/*
+ * Data-processing (register-shifted register)
+ *
+ * Operate, with set flags, one register source,
+ * one register shifted register source, and a destination.
+ */
+static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
+ int logic_cc, StoreRegKind kind)
+{
+ TCGv_i32 tmp1, tmp2;
+
+ tmp1 = load_reg(s, a->rs);
+ tmp2 = load_reg(s, a->rm);
+ gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
+ tmp1 = load_reg(s, a->rn);
+
+ gen(tmp1, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+
+ if (logic_cc) {
+ gen_logic_CC(tmp1);
+ }
+ return store_reg_kind(s, a->rd, tmp1, kind);
+}
+
+static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
+ void (*gen)(TCGv_i32, TCGv_i32),
+ int logic_cc, StoreRegKind kind)
+{
+ TCGv_i32 tmp1, tmp2;
+
+ tmp1 = load_reg(s, a->rs);
+ tmp2 = load_reg(s, a->rm);
+ gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
+
+ gen(tmp2, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ return store_reg_kind(s, a->rd, tmp2, kind);
+}
+
+/*
+ * Data-processing (immediate)
+ *
+ * Operate, with set flags, one register source,
+ * one rotated immediate, and a destination.
+ *
+ * Note that logic_cc && a->rot setting CF based on the msb of the
+ * immediate is the reason why we must pass in the unrotated form
+ * of the immediate.
+ */
+static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
+ int logic_cc, StoreRegKind kind)
+{
+ TCGv_i32 tmp1, tmp2;
+ uint32_t imm;
+
+ imm = ror32(a->imm, a->rot);
+ if (logic_cc && a->rot) {
+ tcg_gen_movi_i32(cpu_CF, imm >> 31);
+ }
+ tmp2 = tcg_const_i32(imm);
+ tmp1 = load_reg(s, a->rn);
+
+ gen(tmp1, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+
+ if (logic_cc) {
+ gen_logic_CC(tmp1);
+ }
+ return store_reg_kind(s, a->rd, tmp1, kind);
+}
+
+static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
+ void (*gen)(TCGv_i32, TCGv_i32),
+ int logic_cc, StoreRegKind kind)
+{
+ TCGv_i32 tmp;
+ uint32_t imm;
+
+ imm = ror32(a->imm, a->rot);
+ if (logic_cc && a->rot) {
+ tcg_gen_movi_i32(cpu_CF, imm >> 31);
+ }
+ tmp = tcg_const_i32(imm);
+
+ gen(tmp, tmp);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ return store_reg_kind(s, a->rd, tmp, kind);
+}
+
+#define DO_ANY3(NAME, OP, L, K) \
+ static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
+ { StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
+ static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
+ { StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
+ static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
+ { StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
+
+#define DO_ANY2(NAME, OP, L, K) \
+ static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
+ { StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
+ static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
+ { StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
+ static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
+ { StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
+
+#define DO_CMP2(NAME, OP, L) \
+ static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
+ { return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
+ static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
+ { return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
+ static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
+ { return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
+
+DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
+DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
+DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
+DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
+
+DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
+DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
+DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
+DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
+
+DO_CMP2(TST, tcg_gen_and_i32, true)
+DO_CMP2(TEQ, tcg_gen_xor_i32, true)
+DO_CMP2(CMN, gen_add_CC, false)
+DO_CMP2(CMP, gen_sub_CC, false)
+
+DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
+ a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
+
+/*
+ * Note for the computation of StoreRegKind we return out of the
+ * middle of the functions that are expanded by DO_ANY3, and that
+ * we modify a->s via that parameter before it is used by OP.
+ */
+DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
+ ({
+ StoreRegKind ret = STREG_NORMAL;
+ if (a->rd == 15 && a->s) {
+ /*
+ * See ALUExceptionReturn:
+ * In User mode, UNPREDICTABLE; we choose UNDEF.
+ * In Hyp mode, UNDEFINED.
+ */
+ if (IS_USER(s) || s->current_el == 2) {
+ unallocated_encoding(s);
+ return true;
+ }
+ /* There is no writeback of nzcv to PSTATE. */
+ a->s = 0;
+ ret = STREG_EXC_RET;
+ } else if (a->rd == 13 && a->rn == 13) {
+ ret = STREG_SP_CHECK;
+ }
+ ret;
+ }))
+
+DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
+ ({
+ StoreRegKind ret = STREG_NORMAL;
+ if (a->rd == 15 && a->s) {
+ /*
+ * See ALUExceptionReturn:
+ * In User mode, UNPREDICTABLE; we choose UNDEF.
+ * In Hyp mode, UNDEFINED.
+ */
+ if (IS_USER(s) || s->current_el == 2) {
+ unallocated_encoding(s);
+ return true;
+ }
+ /* There is no writeback of nzcv to PSTATE. */
+ a->s = 0;
+ ret = STREG_EXC_RET;
+ } else if (a->rd == 13) {
+ ret = STREG_SP_CHECK;
+ }
+ ret;
+ }))
+
+DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
+
+/*
+ * ORN is only available with T32, so there is no register-shifted-register
+ * form of the insn. Using the DO_ANY3 macro would create an unused function.
+ */
+static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
+{
+ return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
+}
+
+static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
+{
+ return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
+}
+
+#undef DO_ANY3
+#undef DO_ANY2
+#undef DO_CMP2
+
+static bool trans_ADR(DisasContext *s, arg_ri *a)
+{
+ store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
+ return true;
+}
+
+static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
+{
+ TCGv_i32 tmp;
+
+ if (!ENABLE_ARCH_6T2) {
+ return false;
+ }
+
+ tmp = tcg_const_i32(a->imm);
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
+{
+ TCGv_i32 tmp;
+
+ if (!ENABLE_ARCH_6T2) {
+ return false;
+ }
+
+ tmp = load_reg(s, a->rd);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+/*
+ * v8.1M MVE wide-shifts
+ */
+static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a,
+ WideShiftImmFn *fn)
+{
+ TCGv_i64 rda;
+ TCGv_i32 rdalo, rdahi;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
+ return false;
+ }
+ if (a->rdahi == 15) {
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
+ return false;
+ }
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
+ a->rdahi == 13) {
+ /* RdaHi == 13 is UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ if (a->shim == 0) {
+ a->shim = 32;
+ }
+
+ rda = tcg_temp_new_i64();
+ rdalo = load_reg(s, a->rdalo);
+ rdahi = load_reg(s, a->rdahi);
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+
+ fn(rda, rda, a->shim);
+
+ tcg_gen_extrl_i64_i32(rdalo, rda);
+ tcg_gen_extrh_i64_i32(rdahi, rda);
+ store_reg(s, a->rdalo, rdalo);
+ store_reg(s, a->rdahi, rdahi);
+ tcg_temp_free_i64(rda);
+
+ return true;
+}
+
+static bool trans_ASRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, tcg_gen_sari_i64);
+}
+
+static bool trans_LSLL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, tcg_gen_shli_i64);
+}
+
+static bool trans_LSRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, tcg_gen_shri_i64);
+}
+
+static void gen_mve_sqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
+{
+ gen_helper_mve_sqshll(r, cpu_env, n, tcg_constant_i32(shift));
+}
+
+static bool trans_SQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_mve_sqshll);
+}
+
+static void gen_mve_uqshll(TCGv_i64 r, TCGv_i64 n, int64_t shift)
+{
+ gen_helper_mve_uqshll(r, cpu_env, n, tcg_constant_i32(shift));
+}
+
+static bool trans_UQSHLL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_mve_uqshll);
+}
+
+static bool trans_SRSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_srshr64_i64);
+}
+
+static bool trans_URSHRL_ri(DisasContext *s, arg_mve_shl_ri *a)
+{
+ return do_mve_shl_ri(s, a, gen_urshr64_i64);
+}
+
+static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn)
+{
+ TCGv_i64 rda;
+ TCGv_i32 rdalo, rdahi;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
+ return false;
+ }
+ if (a->rdahi == 15) {
+ /* These are a different encoding (SQSHL/SRSHR/UQSHL/URSHR) */
+ return false;
+ }
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
+ a->rdahi == 13 || a->rm == 13 || a->rm == 15 ||
+ a->rm == a->rdahi || a->rm == a->rdalo) {
+ /* These rdahi/rdalo/rm cases are UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ rda = tcg_temp_new_i64();
+ rdalo = load_reg(s, a->rdalo);
+ rdahi = load_reg(s, a->rdahi);
+ tcg_gen_concat_i32_i64(rda, rdalo, rdahi);
+
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
+ fn(rda, cpu_env, rda, cpu_R[a->rm]);
+
+ tcg_gen_extrl_i64_i32(rdalo, rda);
+ tcg_gen_extrh_i64_i32(rdahi, rda);
+ store_reg(s, a->rdalo, rdalo);
+ store_reg(s, a->rdahi, rdahi);
+ tcg_temp_free_i64(rda);
+
+ return true;
+}
+
+static bool trans_LSLL_rr(DisasContext *s, arg_mve_shl_rr *a)
+{
+ return do_mve_shl_rr(s, a, gen_helper_mve_ushll);
+}
+
+static bool trans_ASRL_rr(DisasContext *s, arg_mve_shl_rr *a)
+{
+ return do_mve_shl_rr(s, a, gen_helper_mve_sshrl);
+}
+
+static bool trans_UQRSHLL64_rr(DisasContext *s, arg_mve_shl_rr *a)
+{
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll);
+}
+
+static bool trans_SQRSHRL64_rr(DisasContext *s, arg_mve_shl_rr *a)
+{
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl);
+}
+
+static bool trans_UQRSHLL48_rr(DisasContext *s, arg_mve_shl_rr *a)
+{
+ return do_mve_shl_rr(s, a, gen_helper_mve_uqrshll48);
+}
+
+static bool trans_SQRSHRL48_rr(DisasContext *s, arg_mve_shl_rr *a)
+{
+ return do_mve_shl_rr(s, a, gen_helper_mve_sqrshrl48);
+}
+
+static bool do_mve_sh_ri(DisasContext *s, arg_mve_sh_ri *a, ShiftImmFn *fn)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
+ return false;
+ }
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
+ a->rda == 13 || a->rda == 15) {
+ /* These rda cases are UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ if (a->shim == 0) {
+ a->shim = 32;
+ }
+ fn(cpu_R[a->rda], cpu_R[a->rda], a->shim);
+
+ return true;
+}
+
+static bool trans_URSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
+{
+ return do_mve_sh_ri(s, a, gen_urshr32_i32);
+}
+
+static bool trans_SRSHR_ri(DisasContext *s, arg_mve_sh_ri *a)
+{
+ return do_mve_sh_ri(s, a, gen_srshr32_i32);
+}
+
+static void gen_mve_sqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
+{
+ gen_helper_mve_sqshl(r, cpu_env, n, tcg_constant_i32(shift));
+}
+
+static bool trans_SQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
+{
+ return do_mve_sh_ri(s, a, gen_mve_sqshl);
+}
+
+static void gen_mve_uqshl(TCGv_i32 r, TCGv_i32 n, int32_t shift)
+{
+ gen_helper_mve_uqshl(r, cpu_env, n, tcg_constant_i32(shift));
+}
+
+static bool trans_UQSHL_ri(DisasContext *s, arg_mve_sh_ri *a)
+{
+ return do_mve_sh_ri(s, a, gen_mve_uqshl);
+}
+
+static bool do_mve_sh_rr(DisasContext *s, arg_mve_sh_rr *a, ShiftFn *fn)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ /* Decode falls through to ORR/MOV UNPREDICTABLE handling */
+ return false;
+ }
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !arm_dc_feature(s, ARM_FEATURE_M_MAIN) ||
+ a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 ||
+ a->rm == a->rda) {
+ /* These rda/rm cases are UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ /* The helper takes care of the sign-extension of the low 8 bits of Rm */
+ fn(cpu_R[a->rda], cpu_env, cpu_R[a->rda], cpu_R[a->rm]);
+ return true;
+}
+
+static bool trans_SQRSHR_rr(DisasContext *s, arg_mve_sh_rr *a)
+{
+ return do_mve_sh_rr(s, a, gen_helper_mve_sqrshr);
+}
+
+static bool trans_UQRSHL_rr(DisasContext *s, arg_mve_sh_rr *a)
+{
+ return do_mve_sh_rr(s, a, gen_helper_mve_uqrshl);
+}
+
+/*
+ * Multiply and multiply accumulate
+ */
+
+static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ tcg_gen_mul_i32(t1, t1, t2);
+ tcg_temp_free_i32(t2);
+ if (add) {
+ t2 = load_reg(s, a->ra);
+ tcg_gen_add_i32(t1, t1, t2);
+ tcg_temp_free_i32(t2);
+ }
+ if (a->s) {
+ gen_logic_CC(t1);
+ }
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool trans_MUL(DisasContext *s, arg_MUL *a)
+{
+ return op_mla(s, a, false);
+}
+
+static bool trans_MLA(DisasContext *s, arg_MLA *a)
+{
+ return op_mla(s, a, true);
+}
+
+static bool trans_MLS(DisasContext *s, arg_MLS *a)
+{
+ TCGv_i32 t1, t2;
+
+ if (!ENABLE_ARCH_6T2) {
+ return false;
+ }
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ tcg_gen_mul_i32(t1, t1, t2);
+ tcg_temp_free_i32(t2);
+ t2 = load_reg(s, a->ra);
+ tcg_gen_sub_i32(t1, t2, t1);
+ tcg_temp_free_i32(t2);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
+{
+ TCGv_i32 t0, t1, t2, t3;
+
+ t0 = load_reg(s, a->rm);
+ t1 = load_reg(s, a->rn);
+ if (uns) {
+ tcg_gen_mulu2_i32(t0, t1, t0, t1);
+ } else {
+ tcg_gen_muls2_i32(t0, t1, t0, t1);
+ }
+ if (add) {
+ t2 = load_reg(s, a->ra);
+ t3 = load_reg(s, a->rd);
+ tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ if (a->s) {
+ gen_logicq_cc(t0, t1);
+ }
+ store_reg(s, a->ra, t0);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
+{
+ return op_mlal(s, a, true, false);
+}
+
+static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
+{
+ return op_mlal(s, a, false, false);
+}
+
+static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
+{
+ return op_mlal(s, a, true, true);
+}
+
+static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
+{
+ return op_mlal(s, a, false, true);
+}
+
+static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
+{
+ TCGv_i32 t0, t1, t2, zero;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t0 = load_reg(s, a->rm);
+ t1 = load_reg(s, a->rn);
+ tcg_gen_mulu2_i32(t0, t1, t0, t1);
+ zero = tcg_const_i32(0);
+ t2 = load_reg(s, a->ra);
+ tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
+ tcg_temp_free_i32(t2);
+ t2 = load_reg(s, a->rd);
+ tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(zero);
+ store_reg(s, a->ra, t0);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+/*
+ * Saturating addition and subtraction
+ */
+
+static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
+{
+ TCGv_i32 t0, t1;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_5TE) {
+ return false;
+ }
+
+ t0 = load_reg(s, a->rm);
+ t1 = load_reg(s, a->rn);
+ if (doub) {
+ gen_helper_add_saturate(t1, cpu_env, t1, t1);
+ }
+ if (add) {
+ gen_helper_add_saturate(t0, cpu_env, t0, t1);
+ } else {
+ gen_helper_sub_saturate(t0, cpu_env, t0, t1);
+ }
+ tcg_temp_free_i32(t1);
+ store_reg(s, a->rd, t0);
+ return true;
+}
+
+#define DO_QADDSUB(NAME, ADD, DOUB) \
+static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
+{ \
+ return op_qaddsub(s, a, ADD, DOUB); \
+}
+
+DO_QADDSUB(QADD, true, false)
+DO_QADDSUB(QSUB, false, false)
+DO_QADDSUB(QDADD, true, true)
+DO_QADDSUB(QDSUB, false, true)
+
+#undef DO_QADDSUB
+
+/*
+ * Halfword multiply and multiply accumulate
+ */
+
+static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
+ int add_long, bool nt, bool mt)
+{
+ TCGv_i32 t0, t1, tl, th;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_5TE) {
+ return false;
+ }
+
+ t0 = load_reg(s, a->rn);
+ t1 = load_reg(s, a->rm);
+ gen_mulxy(t0, t1, nt, mt);
+ tcg_temp_free_i32(t1);
+
+ switch (add_long) {
+ case 0:
+ store_reg(s, a->rd, t0);
+ break;
+ case 1:
+ t1 = load_reg(s, a->ra);
+ gen_helper_add_setq(t0, cpu_env, t0, t1);
+ tcg_temp_free_i32(t1);
+ store_reg(s, a->rd, t0);
+ break;
+ case 2:
+ tl = load_reg(s, a->ra);
+ th = load_reg(s, a->rd);
+ /* Sign-extend the 32-bit product to 64 bits. */
+ t1 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, t0, 31);
+ tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ store_reg(s, a->ra, tl);
+ store_reg(s, a->rd, th);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+#define DO_SMLAX(NAME, add, nt, mt) \
+static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
+{ \
+ return op_smlaxxx(s, a, add, nt, mt); \
+}
+
+DO_SMLAX(SMULBB, 0, 0, 0)
+DO_SMLAX(SMULBT, 0, 0, 1)
+DO_SMLAX(SMULTB, 0, 1, 0)
+DO_SMLAX(SMULTT, 0, 1, 1)
+
+DO_SMLAX(SMLABB, 1, 0, 0)
+DO_SMLAX(SMLABT, 1, 0, 1)
+DO_SMLAX(SMLATB, 1, 1, 0)
+DO_SMLAX(SMLATT, 1, 1, 1)
+
+DO_SMLAX(SMLALBB, 2, 0, 0)
+DO_SMLAX(SMLALBT, 2, 0, 1)
+DO_SMLAX(SMLALTB, 2, 1, 0)
+DO_SMLAX(SMLALTT, 2, 1, 1)
+
+#undef DO_SMLAX
+
+static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
+{
+ TCGv_i32 t0, t1;
+
+ if (!ENABLE_ARCH_5TE) {
+ return false;
+ }
+
+ t0 = load_reg(s, a->rn);
+ t1 = load_reg(s, a->rm);
+ /*
+ * Since the nominal result is product<47:16>, shift the 16-bit
+ * input up by 16 bits, so that the result is at product<63:32>.
+ */
+ if (mt) {
+ tcg_gen_andi_i32(t1, t1, 0xffff0000);
+ } else {
+ tcg_gen_shli_i32(t1, t1, 16);
+ }
+ tcg_gen_muls2_i32(t0, t1, t0, t1);
+ tcg_temp_free_i32(t0);
+ if (add) {
+ t0 = load_reg(s, a->ra);
+ gen_helper_add_setq(t1, cpu_env, t1, t0);
+ tcg_temp_free_i32(t0);
+ }
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+#define DO_SMLAWX(NAME, add, mt) \
+static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
+{ \
+ return op_smlawx(s, a, add, mt); \
+}
+
+DO_SMLAWX(SMULWB, 0, 0)
+DO_SMLAWX(SMULWT, 0, 1)
+DO_SMLAWX(SMLAWB, 1, 0)
+DO_SMLAWX(SMLAWT, 1, 1)
+
+#undef DO_SMLAWX
+
+/*
+ * MSR (immediate) and hints
+ */
+
+static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
+{
+ /*
+ * When running single-threaded TCG code, use the helper to ensure that
+ * the next round-robin scheduled vCPU gets a crack. When running in
+ * MTTCG we don't generate jumps to the helper as it won't affect the
+ * scheduling of other vCPUs.
+ */
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_set_pc_im(s, s->base.pc_next);
+ s->base.is_jmp = DISAS_YIELD;
+ }
+ return true;
+}
+
+static bool trans_WFE(DisasContext *s, arg_WFE *a)
+{
+ /*
+ * When running single-threaded TCG code, use the helper to ensure that
+ * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
+ * just skip this instruction. Currently the SEV/SEVL instructions,
+ * which are *one* of many ways to wake the CPU from WFE, are not
+ * implemented so we can't sleep like WFI does.
+ */
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_set_pc_im(s, s->base.pc_next);
+ s->base.is_jmp = DISAS_WFE;
+ }
+ return true;
+}
+
+static bool trans_WFI(DisasContext *s, arg_WFI *a)
+{
+ /* For WFI, halt the vCPU until an IRQ. */
+ gen_set_pc_im(s, s->base.pc_next);
+ s->base.is_jmp = DISAS_WFI;
+ return true;
+}
+
+static bool trans_NOP(DisasContext *s, arg_NOP *a)
+{
+ return true;
+}
+
+static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
+{
+ uint32_t val = ror32(a->imm, a->rot * 2);
+ uint32_t mask = msr_mask(s, a->mask, a->r);
+
+ if (gen_set_psr_im(s, mask, a->r, val)) {
+ unallocated_encoding(s);
+ }
+ return true;
+}
+
+/*
+ * Cyclic Redundancy Check
+ */
+
+static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
+{
+ TCGv_i32 t1, t2, t3;
+
+ if (!dc_isar_feature(aa32_crc32, s)) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ switch (sz) {
+ case MO_8:
+ gen_uxtb(t2);
+ break;
+ case MO_16:
+ gen_uxth(t2);
+ break;
+ case MO_32:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ t3 = tcg_const_i32(1 << sz);
+ if (c) {
+ gen_helper_crc32c(t1, t1, t2, t3);
+ } else {
+ gen_helper_crc32(t1, t1, t2, t3);
+ }
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+#define DO_CRC32(NAME, c, sz) \
+static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
+ { return op_crc32(s, a, c, sz); }
+
+DO_CRC32(CRC32B, false, MO_8)
+DO_CRC32(CRC32H, false, MO_16)
+DO_CRC32(CRC32W, false, MO_32)
+DO_CRC32(CRC32CB, true, MO_8)
+DO_CRC32(CRC32CH, true, MO_16)
+DO_CRC32(CRC32CW, true, MO_32)
+
+#undef DO_CRC32
+
+/*
+ * Miscellaneous instructions
+ */
+
+static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
+{
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ gen_mrs_banked(s, a->r, a->sysm, a->rd);
+ return true;
+}
+
+static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
+{
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ gen_msr_banked(s, a->r, a->sysm, a->rn);
+ return true;
+}
+
+static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
+{
+ TCGv_i32 tmp;
+
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (a->r) {
+ if (IS_USER(s)) {
+ unallocated_encoding(s);
+ return true;
+ }
+ tmp = load_cpu_field(spsr);
+ } else {
+ tmp = tcg_temp_new_i32();
+ gen_helper_cpsr_read(tmp, cpu_env);
+ }
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
+{
+ TCGv_i32 tmp;
+ uint32_t mask = msr_mask(s, a->mask, a->r);
+
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ tmp = load_reg(s, a->rn);
+ if (gen_set_psr(s, mask, a->r, tmp)) {
+ unallocated_encoding(s);
+ }
+ return true;
+}
+
+static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
+{
+ TCGv_i32 tmp;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ tmp = tcg_const_i32(a->sysm);
+ gen_helper_v7m_mrs(tmp, cpu_env, tmp);
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
+{
+ TCGv_i32 addr, reg;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ addr = tcg_const_i32((a->mask << 10) | a->sysm);
+ reg = load_reg(s, a->rn);
+ gen_helper_v7m_msr(cpu_env, addr, reg);
+ tcg_temp_free_i32(addr);
+ tcg_temp_free_i32(reg);
+ /* If we wrote to CONTROL, the EL might have changed */
+ gen_helper_rebuild_hflags_m32_newel(cpu_env);
+ gen_lookup_tb(s);
+ return true;
+}
+
+static bool trans_BX(DisasContext *s, arg_BX *a)
+{
+ if (!ENABLE_ARCH_4T) {
+ return false;
+ }
+ gen_bx_excret(s, load_reg(s, a->rm));
+ return true;
+}
+
+static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
+{
+ if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ /*
+ * v7A allows BXJ to be trapped via HSTR.TJDBX. We don't waste a
+ * TBFLAGS bit on a basically-never-happens case, so call a helper
+ * function to check for the trap and raise the exception if needed
+ * (passing it the register number for the syndrome value).
+ * v8A doesn't have this HSTR bit.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
+ arm_dc_feature(s, ARM_FEATURE_EL2) &&
+ s->current_el < 2 && s->ns) {
+ gen_helper_check_bxj_trap(cpu_env, tcg_constant_i32(a->rm));
+ }
+ /* Trivial implementation equivalent to bx. */
+ gen_bx(s, load_reg(s, a->rm));
+ return true;
+}
+
+static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
+{
+ TCGv_i32 tmp;
+
+ if (!ENABLE_ARCH_5) {
+ return false;
+ }
+ tmp = load_reg(s, a->rm);
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
+ gen_bx(s, tmp);
+ return true;
+}
+
+/*
+ * BXNS/BLXNS: only exist for v8M with the security extensions,
+ * and always UNDEF if NonSecure. We don't implement these in
+ * the user-only mode either (in theory you can use them from
+ * Secure User mode but they are too tied in to system emulation).
+ */
+static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
+{
+ if (!s->v8m_secure || IS_USER_ONLY) {
+ unallocated_encoding(s);
+ } else {
+ gen_bxns(s, a->rm);
+ }
+ return true;
+}
+
+static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
+{
+ if (!s->v8m_secure || IS_USER_ONLY) {
+ unallocated_encoding(s);
+ } else {
+ gen_blxns(s, a->rm);
+ }
+ return true;
+}
+
+static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
+{
+ TCGv_i32 tmp;
+
+ if (!ENABLE_ARCH_5) {
+ return false;
+ }
+ tmp = load_reg(s, a->rm);
+ tcg_gen_clzi_i32(tmp, tmp, 32);
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_ERET(DisasContext *s, arg_ERET *a)
+{
+ TCGv_i32 tmp;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
+ return false;
+ }
+ if (IS_USER(s)) {
+ unallocated_encoding(s);
+ return true;
+ }
+ if (s->current_el == 2) {
+ /* ERET from Hyp uses ELR_Hyp, not LR */
+ tmp = load_cpu_field(elr_el[2]);
+ } else {
+ tmp = load_reg(s, 14);
+ }
+ gen_exception_return(s, tmp);
+ return true;
+}
+
+static bool trans_HLT(DisasContext *s, arg_HLT *a)
+{
+ gen_hlt(s, a->imm);
+ return true;
+}
+
+static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
+{
+ if (!ENABLE_ARCH_5) {
+ return false;
+ }
+ /* BKPT is OK with ECI set and leaves it untouched */
+ s->eci_handled = true;
+ if (arm_dc_feature(s, ARM_FEATURE_M) &&
+ semihosting_enabled() &&
+#ifndef CONFIG_USER_ONLY
+ !IS_USER(s) &&
+#endif
+ (a->imm == 0xab)) {
+ gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
+ } else {
+ gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
+ }
+ return true;
+}
+
+static bool trans_HVC(DisasContext *s, arg_HVC *a)
+{
+ if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (IS_USER(s)) {
+ unallocated_encoding(s);
+ } else {
+ gen_hvc(s, a->imm);
+ }
+ return true;
+}
+
+static bool trans_SMC(DisasContext *s, arg_SMC *a)
+{
+ if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (IS_USER(s)) {
+ unallocated_encoding(s);
+ } else {
+ gen_smc(s);
+ }
+ return true;
+}
+
+static bool trans_SG(DisasContext *s, arg_SG *a)
+{
+ if (!arm_dc_feature(s, ARM_FEATURE_M) ||
+ !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+ /*
+ * SG (v8M only)
+ * The bulk of the behaviour for this instruction is implemented
+ * in v7m_handle_execute_nsc(), which deals with the insn when
+ * it is executed by a CPU in non-secure state from memory
+ * which is Secure & NonSecure-Callable.
+ * Here we only need to handle the remaining cases:
+ * * in NS memory (including the "security extension not
+ * implemented" case) : NOP
+ * * in S memory but CPU already secure (clear IT bits)
+ * We know that the attribute for the memory this insn is
+ * in must match the current CPU state, because otherwise
+ * get_phys_addr_pmsav8 would have generated an exception.
+ */
+ if (s->v8m_secure) {
+ /* Like the IT insn, we don't need to generate any code */
+ s->condexec_cond = 0;
+ s->condexec_mask = 0;
+ }
+ return true;
+}
+
+static bool trans_TT(DisasContext *s, arg_TT *a)
+{
+ TCGv_i32 addr, tmp;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_M) ||
+ !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return false;
+ }
+ if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
+ /* We UNDEF for these UNPREDICTABLE cases */
+ unallocated_encoding(s);
+ return true;
+ }
+ if (a->A && !s->v8m_secure) {
+ /* This case is UNDEFINED. */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ addr = load_reg(s, a->rn);
+ tmp = tcg_const_i32((a->A << 1) | a->T);
+ gen_helper_v7m_tt(tmp, cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+/*
+ * Load/store register index
+ */
+
+static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
+{
+ ISSInfo ret;
+
+ /* ISS not valid if writeback */
+ if (p && !w) {
+ ret = rd;
+ if (s->base.pc_next - s->pc_curr == 2) {
+ ret |= ISSIs16Bit;
+ }
+ } else {
+ ret = ISSInvalid;
+ }
+ return ret;
+}
+
+static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
+{
+ TCGv_i32 addr = load_reg(s, a->rn);
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ if (a->p) {
+ TCGv_i32 ofs = load_reg(s, a->rm);
+ gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
+ if (a->u) {
+ tcg_gen_add_i32(addr, addr, ofs);
+ } else {
+ tcg_gen_sub_i32(addr, addr, ofs);
+ }
+ tcg_temp_free_i32(ofs);
+ }
+ return addr;
+}
+
+static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
+ TCGv_i32 addr, int address_offset)
+{
+ if (!a->p) {
+ TCGv_i32 ofs = load_reg(s, a->rm);
+ gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
+ if (a->u) {
+ tcg_gen_add_i32(addr, addr, ofs);
+ } else {
+ tcg_gen_sub_i32(addr, addr, ofs);
+ }
+ tcg_temp_free_i32(ofs);
+ } else if (!a->w) {
+ tcg_temp_free_i32(addr);
+ return;
+ }
+ tcg_gen_addi_i32(addr, addr, address_offset);
+ store_reg(s, a->rn, addr);
+}
+
+static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
+ MemOp mop, int mem_idx)
+{
+ ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
+ TCGv_i32 addr, tmp;
+
+ addr = op_addr_rr_pre(s, a);
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
+ disas_set_da_iss(s, mop, issinfo);
+
+ /*
+ * Perform base writeback before the loaded value to
+ * ensure correct behavior with overlapping index registers.
+ */
+ op_addr_rr_post(s, a, addr, 0);
+ store_reg_from_load(s, a->rt, tmp);
+ return true;
+}
+
+static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
+ MemOp mop, int mem_idx)
+{
+ ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
+ TCGv_i32 addr, tmp;
+
+ /*
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
+ * is either UNPREDICTABLE or has defined behaviour
+ */
+ if (s->thumb && a->rn == 15) {
+ return false;
+ }
+
+ addr = op_addr_rr_pre(s, a);
+
+ tmp = load_reg(s, a->rt);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
+ disas_set_da_iss(s, mop, issinfo);
+ tcg_temp_free_i32(tmp);
+
+ op_addr_rr_post(s, a, addr, 0);
+ return true;
+}
+
+static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
+{
+ int mem_idx = get_mem_index(s);
+ TCGv_i32 addr, tmp;
+
+ if (!ENABLE_ARCH_5TE) {
+ return false;
+ }
+ if (a->rt & 1) {
+ unallocated_encoding(s);
+ return true;
+ }
+ addr = op_addr_rr_pre(s, a);
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ store_reg(s, a->rt, tmp);
+
+ tcg_gen_addi_i32(addr, addr, 4);
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ store_reg(s, a->rt + 1, tmp);
+
+ /* LDRD w/ base writeback is undefined if the registers overlap. */
+ op_addr_rr_post(s, a, addr, -4);
+ return true;
+}
+
+static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
+{
+ int mem_idx = get_mem_index(s);
+ TCGv_i32 addr, tmp;
+
+ if (!ENABLE_ARCH_5TE) {
+ return false;
+ }
+ if (a->rt & 1) {
+ unallocated_encoding(s);
+ return true;
+ }
+ addr = op_addr_rr_pre(s, a);
+
+ tmp = load_reg(s, a->rt);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+
+ tcg_gen_addi_i32(addr, addr, 4);
+
+ tmp = load_reg(s, a->rt + 1);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+
+ op_addr_rr_post(s, a, addr, -4);
+ return true;
+}
+
+/*
+ * Load/store immediate index
+ */
+
+static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
+{
+ int ofs = a->imm;
+
+ if (!a->u) {
+ ofs = -ofs;
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ /*
+ * Stackcheck. Here we know 'addr' is the current SP;
+ * U is set if we're moving SP up, else down. It is
+ * UNKNOWN whether the limit check triggers when SP starts
+ * below the limit and ends up above it; we chose to do so.
+ */
+ if (!a->u) {
+ TCGv_i32 newsp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
+ gen_helper_v8m_stackcheck(cpu_env, newsp);
+ tcg_temp_free_i32(newsp);
+ } else {
+ gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
+ }
+ }
+
+ return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
+}
+
+static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
+ TCGv_i32 addr, int address_offset)
+{
+ if (!a->p) {
+ if (a->u) {
+ address_offset += a->imm;
+ } else {
+ address_offset -= a->imm;
+ }
+ } else if (!a->w) {
+ tcg_temp_free_i32(addr);
+ return;
+ }
+ tcg_gen_addi_i32(addr, addr, address_offset);
+ store_reg(s, a->rn, addr);
+}
+
+static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
+ MemOp mop, int mem_idx)
+{
+ ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
+ TCGv_i32 addr, tmp;
+
+ addr = op_addr_ri_pre(s, a);
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
+ disas_set_da_iss(s, mop, issinfo);
+
+ /*
+ * Perform base writeback before the loaded value to
+ * ensure correct behavior with overlapping index registers.
+ */
+ op_addr_ri_post(s, a, addr, 0);
+ store_reg_from_load(s, a->rt, tmp);
+ return true;
+}
+
+static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
+ MemOp mop, int mem_idx)
+{
+ ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
+ TCGv_i32 addr, tmp;
+
+ /*
+ * In Thumb encodings of stores Rn=1111 is UNDEF; for Arm it
+ * is either UNPREDICTABLE or has defined behaviour
+ */
+ if (s->thumb && a->rn == 15) {
+ return false;
+ }
+
+ addr = op_addr_ri_pre(s, a);
+
+ tmp = load_reg(s, a->rt);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
+ disas_set_da_iss(s, mop, issinfo);
+ tcg_temp_free_i32(tmp);
+
+ op_addr_ri_post(s, a, addr, 0);
+ return true;
+}
+
+static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
+{
+ int mem_idx = get_mem_index(s);
+ TCGv_i32 addr, tmp;
+
+ addr = op_addr_ri_pre(s, a);
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ store_reg(s, a->rt, tmp);
+
+ tcg_gen_addi_i32(addr, addr, 4);
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ store_reg(s, rt2, tmp);
+
+ /* LDRD w/ base writeback is undefined if the registers overlap. */
+ op_addr_ri_post(s, a, addr, -4);
+ return true;
+}
+
+static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
+{
+ if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
+ return false;
+ }
+ return op_ldrd_ri(s, a, a->rt + 1);
+}
+
+static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
+{
+ arg_ldst_ri b = {
+ .u = a->u, .w = a->w, .p = a->p,
+ .rn = a->rn, .rt = a->rt, .imm = a->imm
+ };
+ return op_ldrd_ri(s, &b, a->rt2);
+}
+
+static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
+{
+ int mem_idx = get_mem_index(s);
+ TCGv_i32 addr, tmp;
+
+ addr = op_addr_ri_pre(s, a);
+
+ tmp = load_reg(s, a->rt);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+
+ tcg_gen_addi_i32(addr, addr, 4);
+
+ tmp = load_reg(s, rt2);
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+
+ op_addr_ri_post(s, a, addr, -4);
+ return true;
+}
+
+static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
+{
+ if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
+ return false;
+ }
+ return op_strd_ri(s, a, a->rt + 1);
+}
+
+static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
+{
+ arg_ldst_ri b = {
+ .u = a->u, .w = a->w, .p = a->p,
+ .rn = a->rn, .rt = a->rt, .imm = a->imm
+ };
+ return op_strd_ri(s, &b, a->rt2);
+}
+
+#define DO_LDST(NAME, WHICH, MEMOP) \
+static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
+{ \
+ return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
+} \
+static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
+{ \
+ return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
+} \
+static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
+{ \
+ return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
+} \
+static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
+{ \
+ return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
+}
+
+DO_LDST(LDR, load, MO_UL)
+DO_LDST(LDRB, load, MO_UB)
+DO_LDST(LDRH, load, MO_UW)
+DO_LDST(LDRSB, load, MO_SB)
+DO_LDST(LDRSH, load, MO_SW)
+
+DO_LDST(STR, store, MO_UL)
+DO_LDST(STRB, store, MO_UB)
+DO_LDST(STRH, store, MO_UW)
+
+#undef DO_LDST
+
+/*
+ * Synchronization primitives
+ */
+
+static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
+{
+ TCGv_i32 addr, tmp;
+ TCGv taddr;
+
+ opc |= s->be_data;
+ addr = load_reg(s, a->rn);
+ taddr = gen_aa32_addr(s, addr, opc);
+ tcg_temp_free_i32(addr);
+
+ tmp = load_reg(s, a->rt2);
+ tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
+ tcg_temp_free(taddr);
+
+ store_reg(s, a->rt, tmp);
+ return true;
+}
+
+static bool trans_SWP(DisasContext *s, arg_SWP *a)
+{
+ return op_swp(s, a, MO_UL | MO_ALIGN);
+}
+
+static bool trans_SWPB(DisasContext *s, arg_SWP *a)
+{
+ return op_swp(s, a, MO_UB);
+}
+
+/*
+ * Load/Store Exclusive and Load-Acquire/Store-Release
+ */
+
+static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
+{
+ TCGv_i32 addr;
+ /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
+ bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
+
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rd == 15 || a->rn == 15 || a->rt == 15
+ || a->rd == a->rn || a->rd == a->rt
+ || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
+ || (mop == MO_64
+ && (a->rt2 == 15
+ || a->rd == a->rt2
+ || (!v8a && s->thumb && a->rt2 == 13)))) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ if (rel) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+
+ addr = tcg_temp_local_new_i32();
+ load_reg_var(s, addr, a->rn);
+ tcg_gen_addi_i32(addr, addr, a->imm);
+
+ gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
+ tcg_temp_free_i32(addr);
+ return true;
+}
+
+static bool trans_STREX(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+ return op_strex(s, a, MO_32, false);
+}
+
+static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_6K) {
+ return false;
+ }
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rt & 1) {
+ unallocated_encoding(s);
+ return true;
+ }
+ a->rt2 = a->rt + 1;
+ return op_strex(s, a, MO_64, false);
+}
+
+static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
+{
+ return op_strex(s, a, MO_64, false);
+}
+
+static bool trans_STREXB(DisasContext *s, arg_STREX *a)
+{
+ if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
+ return false;
+ }
+ return op_strex(s, a, MO_8, false);
+}
+
+static bool trans_STREXH(DisasContext *s, arg_STREX *a)
+{
+ if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
+ return false;
+ }
+ return op_strex(s, a, MO_16, false);
+}
+
+static bool trans_STLEX(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_strex(s, a, MO_32, true);
+}
+
+static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rt & 1) {
+ unallocated_encoding(s);
+ return true;
+ }
+ a->rt2 = a->rt + 1;
+ return op_strex(s, a, MO_64, true);
+}
+
+static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_strex(s, a, MO_64, true);
+}
+
+static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_strex(s, a, MO_8, true);
+}
+
+static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_strex(s, a, MO_16, true);
+}
+
+static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
+{
+ TCGv_i32 addr, tmp;
+
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rn == 15 || a->rt == 15) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ addr = load_reg(s, a->rn);
+ tmp = load_reg(s, a->rt);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
+ disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(addr);
+ return true;
+}
+
+static bool trans_STL(DisasContext *s, arg_STL *a)
+{
+ return op_stl(s, a, MO_UL);
+}
+
+static bool trans_STLB(DisasContext *s, arg_STL *a)
+{
+ return op_stl(s, a, MO_UB);
+}
+
+static bool trans_STLH(DisasContext *s, arg_STL *a)
+{
+ return op_stl(s, a, MO_UW);
+}
+
+static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
+{
+ TCGv_i32 addr;
+ /* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
+ bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
+
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rn == 15 || a->rt == 15
+ || (!v8a && s->thumb && a->rt == 13)
+ || (mop == MO_64
+ && (a->rt2 == 15 || a->rt == a->rt2
+ || (!v8a && s->thumb && a->rt2 == 13)))) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ addr = tcg_temp_local_new_i32();
+ load_reg_var(s, addr, a->rn);
+ tcg_gen_addi_i32(addr, addr, a->imm);
+
+ gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
+ tcg_temp_free_i32(addr);
+
+ if (acq) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+ return true;
+}
+
+static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_32, false);
+}
+
+static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_6K) {
+ return false;
+ }
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rt & 1) {
+ unallocated_encoding(s);
+ return true;
+ }
+ a->rt2 = a->rt + 1;
+ return op_ldrex(s, a, MO_64, false);
+}
+
+static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
+{
+ return op_ldrex(s, a, MO_64, false);
+}
+
+static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
+{
+ if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_8, false);
+}
+
+static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
+{
+ if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_16, false);
+}
+
+static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_32, true);
+}
+
+static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rt & 1) {
+ unallocated_encoding(s);
+ return true;
+ }
+ a->rt2 = a->rt + 1;
+ return op_ldrex(s, a, MO_64, true);
+}
+
+static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_64, true);
+}
+
+static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_8, true);
+}
+
+static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
+{
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ return op_ldrex(s, a, MO_16, true);
+}
+
+static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
+{
+ TCGv_i32 addr, tmp;
+
+ if (!ENABLE_ARCH_8) {
+ return false;
+ }
+ /* We UNDEF for these UNPREDICTABLE cases. */
+ if (a->rn == 15 || a->rt == 15) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ addr = load_reg(s, a->rn);
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN);
+ disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
+ tcg_temp_free_i32(addr);
+
+ store_reg(s, a->rt, tmp);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ return true;
+}
+
+static bool trans_LDA(DisasContext *s, arg_LDA *a)
+{
+ return op_lda(s, a, MO_UL);
+}
+
+static bool trans_LDAB(DisasContext *s, arg_LDA *a)
+{
+ return op_lda(s, a, MO_UB);
+}
+
+static bool trans_LDAH(DisasContext *s, arg_LDA *a)
+{
+ return op_lda(s, a, MO_UW);
+}
+
+/*
+ * Media instructions
+ */
+
+static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
+{
+ TCGv_i32 t1, t2;
+
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ gen_helper_usad8(t1, t1, t2);
+ tcg_temp_free_i32(t2);
+ if (a->ra != 15) {
+ t2 = load_reg(s, a->ra);
+ tcg_gen_add_i32(t1, t1, t2);
+ tcg_temp_free_i32(t2);
+ }
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
+{
+ TCGv_i32 tmp;
+ int width = a->widthm1 + 1;
+ int shift = a->lsb;
+
+ if (!ENABLE_ARCH_6T2) {
+ return false;
+ }
+ if (shift + width > 32) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ tmp = load_reg(s, a->rn);
+ if (u) {
+ tcg_gen_extract_i32(tmp, tmp, shift, width);
+ } else {
+ tcg_gen_sextract_i32(tmp, tmp, shift, width);
+ }
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
+{
+ return op_bfx(s, a, false);
+}
+
+static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
+{
+ return op_bfx(s, a, true);
+}
+
+static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
+{
+ TCGv_i32 tmp;
+ int msb = a->msb, lsb = a->lsb;
+ int width;
+
+ if (!ENABLE_ARCH_6T2) {
+ return false;
+ }
+ if (msb < lsb) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ unallocated_encoding(s);
+ return true;
+ }
+
+ width = msb + 1 - lsb;
+ if (a->rn == 15) {
+ /* BFC */
+ tmp = tcg_const_i32(0);
+ } else {
+ /* BFI */
+ tmp = load_reg(s, a->rn);
+ }
+ if (width != 32) {
+ TCGv_i32 tmp2 = load_reg(s, a->rd);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_UDF(DisasContext *s, arg_UDF *a)
+{
+ unallocated_encoding(s);
+ return true;
+}
+
+/*
+ * Parallel addition and subtraction
+ */
+
+static bool op_par_addsub(DisasContext *s, arg_rrr *a,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 t0, t1;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t0 = load_reg(s, a->rn);
+ t1 = load_reg(s, a->rm);
+
+ gen(t0, t0, t1);
+
+ tcg_temp_free_i32(t1);
+ store_reg(s, a->rd, t0);
+ return true;
+}
+
+static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
+ void (*gen)(TCGv_i32, TCGv_i32,
+ TCGv_i32, TCGv_ptr))
+{
+ TCGv_i32 t0, t1;
+ TCGv_ptr ge;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t0 = load_reg(s, a->rn);
+ t1 = load_reg(s, a->rm);
+
+ ge = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
+ gen(t0, t0, t1, ge);
+
+ tcg_temp_free_ptr(ge);
+ tcg_temp_free_i32(t1);
+ store_reg(s, a->rd, t0);
+ return true;
+}
+
+#define DO_PAR_ADDSUB(NAME, helper) \
+static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
+{ \
+ return op_par_addsub(s, a, helper); \
+}
+
+#define DO_PAR_ADDSUB_GE(NAME, helper) \
+static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
+{ \
+ return op_par_addsub_ge(s, a, helper); \
+}
+
+DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
+DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
+DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
+DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
+DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
+DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
+
+DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
+DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
+DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
+DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
+DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
+DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
+
+DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
+DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
+DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
+DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
+DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
+DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
+
+DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
+DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
+DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
+DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
+DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
+DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
+
+DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
+DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
+DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
+DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
+DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
+DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
+
+DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
+DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
+DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
+DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
+DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
+DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
+
+#undef DO_PAR_ADDSUB
+#undef DO_PAR_ADDSUB_GE
+
+/*
+ * Packing, unpacking, saturation, and reversal
+ */
+
+static bool trans_PKH(DisasContext *s, arg_PKH *a)
+{
+ TCGv_i32 tn, tm;
+ int shift = a->imm;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_6) {
+ return false;
+ }
+
+ tn = load_reg(s, a->rn);
+ tm = load_reg(s, a->rm);
+ if (a->tb) {
+ /* PKHTB */
+ if (shift == 0) {
+ shift = 31;
+ }
+ tcg_gen_sari_i32(tm, tm, shift);
+ tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
+ } else {
+ /* PKHBT */
+ tcg_gen_shli_i32(tm, tm, shift);
+ tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
+ }
+ tcg_temp_free_i32(tm);
+ store_reg(s, a->rd, tn);
+ return true;
+}
+
+static bool op_sat(DisasContext *s, arg_sat *a,
+ void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 tmp, satimm;
+ int shift = a->imm;
+
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+
+ tmp = load_reg(s, a->rn);
+ if (a->sh) {
+ tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
+ } else {
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+
+ satimm = tcg_const_i32(a->satimm);
+ gen(tmp, cpu_env, tmp, satimm);
+ tcg_temp_free_i32(satimm);
+
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_SSAT(DisasContext *s, arg_sat *a)
+{
+ return op_sat(s, a, gen_helper_ssat);
+}
+
+static bool trans_USAT(DisasContext *s, arg_sat *a)
+{
+ return op_sat(s, a, gen_helper_usat);
+}
+
+static bool trans_SSAT16(DisasContext *s, arg_sat *a)
+{
+ if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ return false;
+ }
+ return op_sat(s, a, gen_helper_ssat16);
+}
+
+static bool trans_USAT16(DisasContext *s, arg_sat *a)
+{
+ if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ return false;
+ }
+ return op_sat(s, a, gen_helper_usat16);
+}
+
+static bool op_xta(DisasContext *s, arg_rrr_rot *a,
+ void (*gen_extract)(TCGv_i32, TCGv_i32),
+ void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+
+ tmp = load_reg(s, a->rm);
+ /*
+ * TODO: In many cases we could do a shift instead of a rotate.
+ * Combined with a simple extend, that becomes an extract.
+ */
+ tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
+ gen_extract(tmp, tmp);
+
+ if (a->rn != 15) {
+ TCGv_i32 tmp2 = load_reg(s, a->rn);
+ gen_add(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
+{
+ return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
+}
+
+static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
+{
+ return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
+}
+
+static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
+{
+ if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ return false;
+ }
+ return op_xta(s, a, gen_helper_sxtb16, gen_add16);
+}
+
+static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
+{
+ return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
+}
+
+static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
+{
+ return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
+}
+
+static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
+{
+ if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ return false;
+ }
+ return op_xta(s, a, gen_helper_uxtb16, gen_add16);
+}
+
+static bool trans_SEL(DisasContext *s, arg_rrr *a)
+{
+ TCGv_i32 t1, t2, t3;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ t3 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
+ gen_helper_sel_flags(t1, t3, t1, t2);
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool op_rr(DisasContext *s, arg_rr *a,
+ void (*gen)(TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ tmp = load_reg(s, a->rm);
+ gen(tmp, tmp);
+ store_reg(s, a->rd, tmp);
+ return true;
+}
+
+static bool trans_REV(DisasContext *s, arg_rr *a)
+{
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+ return op_rr(s, a, tcg_gen_bswap32_i32);
+}
+
+static bool trans_REV16(DisasContext *s, arg_rr *a)
+{
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+ return op_rr(s, a, gen_rev16);
+}
+
+static bool trans_REVSH(DisasContext *s, arg_rr *a)
+{
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+ return op_rr(s, a, gen_revsh);
+}
+
+static bool trans_RBIT(DisasContext *s, arg_rr *a)
+{
+ if (!ENABLE_ARCH_6T2) {
+ return false;
+ }
+ return op_rr(s, a, gen_helper_rbit);
+}
+
+/*
+ * Signed multiply, signed and unsigned divide
+ */
+
+static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
+{
+ TCGv_i32 t1, t2;
+
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ if (m_swap) {
+ gen_swap_half(t2, t2);
+ }
+ gen_smul_dual(t1, t2);
+
+ if (sub) {
+ /*
+ * This subtraction cannot overflow, so we can do a simple
+ * 32-bit subtraction and then a possible 32-bit saturating
+ * addition of Ra.
+ */
+ tcg_gen_sub_i32(t1, t1, t2);
+ tcg_temp_free_i32(t2);
+
+ if (a->ra != 15) {
+ t2 = load_reg(s, a->ra);
+ gen_helper_add_setq(t1, cpu_env, t1, t2);
+ tcg_temp_free_i32(t2);
+ }
+ } else if (a->ra == 15) {
+ /* Single saturation-checking addition */
+ gen_helper_add_setq(t1, cpu_env, t1, t2);
+ tcg_temp_free_i32(t2);
+ } else {
+ /*
+ * We need to add the products and Ra together and then
+ * determine whether the final result overflowed. Doing
+ * this as two separate add-and-check-overflow steps incorrectly
+ * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
+ * Do all the arithmetic at 64-bits and then check for overflow.
+ */
+ TCGv_i64 p64, q64;
+ TCGv_i32 t3, qf, one;
+
+ p64 = tcg_temp_new_i64();
+ q64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(p64, t1);
+ tcg_gen_ext_i32_i64(q64, t2);
+ tcg_gen_add_i64(p64, p64, q64);
+ load_reg_var(s, t2, a->ra);
+ tcg_gen_ext_i32_i64(q64, t2);
+ tcg_gen_add_i64(p64, p64, q64);
+ tcg_temp_free_i64(q64);
+
+ tcg_gen_extr_i64_i32(t1, t2, p64);
+ tcg_temp_free_i64(p64);
+ /*
+ * t1 is the low half of the result which goes into Rd.
+ * We have overflow and must set Q if the high half (t2)
+ * is different from the sign-extension of t1.
+ */
+ t3 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t3, t1, 31);
+ qf = load_cpu_field(QF);
+ one = tcg_constant_i32(1);
+ tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
+ store_cpu_field(qf, QF);
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ }
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlad(s, a, false, false);
+}
+
+static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlad(s, a, true, false);
+}
+
+static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlad(s, a, false, true);
+}
+
+static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlad(s, a, true, true);
+}
+
+static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
+{
+ TCGv_i32 t1, t2;
+ TCGv_i64 l1, l2;
+
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ if (m_swap) {
+ gen_swap_half(t2, t2);
+ }
+ gen_smul_dual(t1, t2);
+
+ l1 = tcg_temp_new_i64();
+ l2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(l1, t1);
+ tcg_gen_ext_i32_i64(l2, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ if (sub) {
+ tcg_gen_sub_i64(l1, l1, l2);
+ } else {
+ tcg_gen_add_i64(l1, l1, l2);
+ }
+ tcg_temp_free_i64(l2);
+
+ gen_addq(s, l1, a->ra, a->rd);
+ gen_storeq_reg(s, a->ra, a->rd, l1);
+ tcg_temp_free_i64(l1);
+ return true;
+}
+
+static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlald(s, a, false, false);
+}
+
+static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlald(s, a, true, false);
+}
+
+static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlald(s, a, false, true);
+}
+
+static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
+{
+ return op_smlald(s, a, true, true);
+}
+
+static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
+{
+ TCGv_i32 t1, t2;
+
+ if (s->thumb
+ ? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
+ : !ENABLE_ARCH_6) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ tcg_gen_muls2_i32(t2, t1, t1, t2);
+
+ if (a->ra != 15) {
+ TCGv_i32 t3 = load_reg(s, a->ra);
+ if (sub) {
+ /*
+ * For SMMLS, we need a 64-bit subtract. Borrow caused by
+ * a non-zero multiplicand lowpart, and the correct result
+ * lowpart for rounding.
+ */
+ TCGv_i32 zero = tcg_const_i32(0);
+ tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
+ tcg_temp_free_i32(zero);
+ } else {
+ tcg_gen_add_i32(t1, t1, t3);
+ }
+ tcg_temp_free_i32(t3);
+ }
+ if (round) {
+ /*
+ * Adding 0x80000000 to the 64-bit quantity means that we have
+ * carry in to the high word when the low word has the msb set.
+ */
+ tcg_gen_shri_i32(t2, t2, 31);
+ tcg_gen_add_i32(t1, t1, t2);
+ }
+ tcg_temp_free_i32(t2);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
+{
+ return op_smmla(s, a, false, false);
+}
+
+static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
+{
+ return op_smmla(s, a, true, false);
+}
+
+static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
+{
+ return op_smmla(s, a, false, true);
+}
+
+static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
+{
+ return op_smmla(s, a, true, true);
+}
+
+static bool op_div(DisasContext *s, arg_rrr *a, bool u)
+{
+ TCGv_i32 t1, t2;
+
+ if (s->thumb
+ ? !dc_isar_feature(aa32_thumb_div, s)
+ : !dc_isar_feature(aa32_arm_div, s)) {
+ return false;
+ }
+
+ t1 = load_reg(s, a->rn);
+ t2 = load_reg(s, a->rm);
+ if (u) {
+ gen_helper_udiv(t1, cpu_env, t1, t2);
+ } else {
+ gen_helper_sdiv(t1, cpu_env, t1, t2);
+ }
+ tcg_temp_free_i32(t2);
+ store_reg(s, a->rd, t1);
+ return true;
+}
+
+static bool trans_SDIV(DisasContext *s, arg_rrr *a)
+{
+ return op_div(s, a, false);
+}
+
+static bool trans_UDIV(DisasContext *s, arg_rrr *a)
+{
+ return op_div(s, a, true);
+}
+
+/*
+ * Block data transfer
+ */
+
+static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
+{
+ TCGv_i32 addr = load_reg(s, a->rn);
+
+ if (a->b) {
+ if (a->i) {
+ /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ } else if (!a->i && n != 1) {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+
+ if (s->v8m_stackcheck && a->rn == 13 && a->w) {
+ /*
+ * If the writeback is incrementing SP rather than
+ * decrementing it, and the initial SP is below the
+ * stack limit but the final written-back SP would
+ * be above, then then we must not perform any memory
+ * accesses, but it is IMPDEF whether we generate
+ * an exception. We choose to do so in this case.
+ * At this point 'addr' is the lowest address, so
+ * either the original SP (if incrementing) or our
+ * final SP (if decrementing), so that's what we check.
+ */
+ gen_helper_v8m_stackcheck(cpu_env, addr);
+ }
+
+ return addr;
+}
+
+static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
+ TCGv_i32 addr, int n)
+{
+ if (a->w) {
+ /* write back */
+ if (!a->b) {
+ if (a->i) {
+ /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ } else if (!a->i && n != 1) {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+}
+
+static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
+{
+ int i, j, n, list, mem_idx;
+ bool user = a->u;
+ TCGv_i32 addr, tmp, tmp2;
+
+ if (user) {
+ /* STM (user) */
+ if (IS_USER(s)) {
+ /* Only usable in supervisor mode. */
+ unallocated_encoding(s);
+ return true;
+ }
+ }
+
+ list = a->list;
+ n = ctpop16(list);
+ if (n < min_n || a->rn == 15) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ s->eci_handled = true;
+
+ addr = op_addr_block_pre(s, a, n);
+ mem_idx = get_mem_index(s);
+
+ for (i = j = 0; i < 16; i++) {
+ if (!(list & (1 << i))) {
+ continue;
+ }
+
+ if (user && i != 15) {
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_const_i32(i);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else {
+ tmp = load_reg(s, i);
+ }
+ gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ tcg_temp_free_i32(tmp);
+
+ /* No need to add after the last transfer. */
+ if (++j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+
+ op_addr_block_post(s, a, addr, n);
+ clear_eci_state(s);
+ return true;
+}
+
+static bool trans_STM(DisasContext *s, arg_ldst_block *a)
+{
+ /* BitCount(list) < 1 is UNPREDICTABLE */
+ return op_stm(s, a, 1);
+}
+
+static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
+{
+ /* Writeback register in register list is UNPREDICTABLE for T32. */
+ if (a->w && (a->list & (1 << a->rn))) {
+ unallocated_encoding(s);
+ return true;
+ }
+ /* BitCount(list) < 2 is UNPREDICTABLE */
+ return op_stm(s, a, 2);
+}
+
+static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
+{
+ int i, j, n, list, mem_idx;
+ bool loaded_base;
+ bool user = a->u;
+ bool exc_return = false;
+ TCGv_i32 addr, tmp, tmp2, loaded_var;
+
+ if (user) {
+ /* LDM (user), LDM (exception return) */
+ if (IS_USER(s)) {
+ /* Only usable in supervisor mode. */
+ unallocated_encoding(s);
+ return true;
+ }
+ if (extract32(a->list, 15, 1)) {
+ exc_return = true;
+ user = false;
+ } else {
+ /* LDM (user) does not allow writeback. */
+ if (a->w) {
+ unallocated_encoding(s);
+ return true;
+ }
+ }
+ }
+
+ list = a->list;
+ n = ctpop16(list);
+ if (n < min_n || a->rn == 15) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ s->eci_handled = true;
+
+ addr = op_addr_block_pre(s, a, n);
+ mem_idx = get_mem_index(s);
+ loaded_base = false;
+ loaded_var = NULL;
+
+ for (i = j = 0; i < 16; i++) {
+ if (!(list & (1 << i))) {
+ continue;
+ }
+
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN);
+ if (user) {
+ tmp2 = tcg_const_i32(i);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ } else if (i == a->rn) {
+ loaded_var = tmp;
+ loaded_base = true;
+ } else if (i == 15 && exc_return) {
+ store_pc_exc_ret(s, tmp);
+ } else {
+ store_reg_from_load(s, i, tmp);
+ }
+
+ /* No need to add after the last transfer. */
+ if (++j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+
+ op_addr_block_post(s, a, addr, n);
+
+ if (loaded_base) {
+ /* Note that we reject base == pc above. */
+ store_reg(s, a->rn, loaded_var);
+ }
+
+ if (exc_return) {
+ /* Restore CPSR from SPSR. */
+ tmp = load_cpu_field(spsr);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_cpsr_write_eret(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ /* Must exit loop to check un-masked IRQs */
+ s->base.is_jmp = DISAS_EXIT;
+ }
+ clear_eci_state(s);
+ return true;
+}
+
+static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
+{
+ /*
+ * Writeback register in register list is UNPREDICTABLE
+ * for ArchVersion() >= 7. Prior to v7, A32 would write
+ * an UNKNOWN value to the base register.
+ */
+ if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
+ unallocated_encoding(s);
+ return true;
+ }
+ /* BitCount(list) < 1 is UNPREDICTABLE */
+ return do_ldm(s, a, 1);
+}
+
+static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
+{
+ /* Writeback register in register list is UNPREDICTABLE for T32. */
+ if (a->w && (a->list & (1 << a->rn))) {
+ unallocated_encoding(s);
+ return true;
+ }
+ /* BitCount(list) < 2 is UNPREDICTABLE */
+ return do_ldm(s, a, 2);
+}
+
+static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
+{
+ /* Writeback is conditional on the base register not being loaded. */
+ a->w = !(a->list & (1 << a->rn));
+ /* BitCount(list) < 1 is UNPREDICTABLE */
+ return do_ldm(s, a, 1);
+}
+
+static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
+{
+ int i;
+ TCGv_i32 zero;
+
+ if (!dc_isar_feature(aa32_m_sec_state, s)) {
+ return false;
+ }
+
+ if (extract32(a->list, 13, 1)) {
+ return false;
+ }
+
+ if (!a->list) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ return false;
+ }
+
+ s->eci_handled = true;
+
+ zero = tcg_const_i32(0);
+ for (i = 0; i < 15; i++) {
+ if (extract32(a->list, i, 1)) {
+ /* Clear R[i] */
+ tcg_gen_mov_i32(cpu_R[i], zero);
+ }
+ }
+ if (extract32(a->list, 15, 1)) {
+ /*
+ * Clear APSR (by calling the MSR helper with the same argument
+ * as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
+ */
+ TCGv_i32 maskreg = tcg_const_i32(0xc << 8);
+ gen_helper_v7m_msr(cpu_env, maskreg, zero);
+ tcg_temp_free_i32(maskreg);
+ }
+ tcg_temp_free_i32(zero);
+ clear_eci_state(s);
+ return true;
+}
+
+/*
+ * Branch, branch with link
+ */
+
+static bool trans_B(DisasContext *s, arg_i *a)
+{
+ gen_jmp(s, read_pc(s) + a->imm);
+ return true;
+}
+
+static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
+{
+ /* This has cond from encoding, required to be outside IT block. */
+ if (a->cond >= 0xe) {
+ return false;
+ }
+ if (s->condexec_mask) {
+ unallocated_encoding(s);
+ return true;
+ }
+ arm_skip_unless(s, a->cond);
+ gen_jmp(s, read_pc(s) + a->imm);
+ return true;
+}
+
+static bool trans_BL(DisasContext *s, arg_i *a)
+{
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
+ gen_jmp(s, read_pc(s) + a->imm);
+ return true;
+}
+
+static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
+{
+ /*
+ * BLX <imm> would be useless on M-profile; the encoding space
+ * is used for other insns from v8.1M onward, and UNDEFs before that.
+ */
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+
+ /* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
+ if (s->thumb && (a->imm & 2)) {
+ return false;
+ }
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
+ store_cpu_field_constant(!s->thumb, thumb);
+ gen_jmp(s, (read_pc(s) & ~3) + a->imm);
+ return true;
+}
+
+static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
+{
+ assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
+ tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
+ return true;
+}
+
+static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
+ tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
+ gen_bx(s, tmp);
+ return true;
+}
+
+static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
+{
+ TCGv_i32 tmp;
+
+ assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
+ if (!ENABLE_ARCH_5) {
+ return false;
+ }
+ tmp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
+ tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
+ tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
+ gen_bx(s, tmp);
+ return true;
+}
+
+static bool trans_BF(DisasContext *s, arg_BF *a)
+{
+ /*
+ * M-profile branch future insns. The architecture permits an
+ * implementation to implement these as NOPs (equivalent to
+ * discarding the LO_BRANCH_INFO cache immediately), and we
+ * take that IMPDEF option because for QEMU a "real" implementation
+ * would be complicated and wouldn't execute any faster.
+ */
+ if (!dc_isar_feature(aa32_lob, s)) {
+ return false;
+ }
+ if (a->boff == 0) {
+ /* SEE "Related encodings" (loop insns) */
+ return false;
+ }
+ /* Handle as NOP */
+ return true;
+}
+
+static bool trans_DLS(DisasContext *s, arg_DLS *a)
+{
+ /* M-profile low-overhead loop start */
+ TCGv_i32 tmp;
+
+ if (!dc_isar_feature(aa32_lob, s)) {
+ return false;
+ }
+ if (a->rn == 13 || a->rn == 15) {
+ /*
+ * For DLSTP rn == 15 is a related encoding (LCTP); the
+ * other cases caught by this condition are all
+ * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
+ */
+ return false;
+ }
+
+ if (a->size != 4) {
+ /* DLSTP */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+ }
+
+ /* Not a while loop: set LR to the count, and set LTPSIZE for DLSTP */
+ tmp = load_reg(s, a->rn);
+ store_reg(s, 14, tmp);
+ if (a->size != 4) {
+ /* DLSTP: set FPSCR.LTPSIZE */
+ tmp = tcg_const_i32(a->size);
+ store_cpu_field(tmp, v7m.ltpsize);
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ }
+ return true;
+}
+
+static bool trans_WLS(DisasContext *s, arg_WLS *a)
+{
+ /* M-profile low-overhead while-loop start */
+ TCGv_i32 tmp;
+ TCGLabel *nextlabel;
+
+ if (!dc_isar_feature(aa32_lob, s)) {
+ return false;
+ }
+ if (a->rn == 13 || a->rn == 15) {
+ /*
+ * For WLSTP rn == 15 is a related encoding (LE); the
+ * other cases caught by this condition are all
+ * CONSTRAINED UNPREDICTABLE: we choose to UNDEF
+ */
+ return false;
+ }
+ if (s->condexec_mask) {
+ /*
+ * WLS in an IT block is CONSTRAINED UNPREDICTABLE;
+ * we choose to UNDEF, because otherwise our use of
+ * gen_goto_tb(1) would clash with the use of TB exit 1
+ * in the dc->condjmp condition-failed codepath in
+ * arm_tr_tb_stop() and we'd get an assertion.
+ */
+ return false;
+ }
+ if (a->size != 4) {
+ /* WLSTP */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+ /*
+ * We need to check that the FPU is enabled here, but mustn't
+ * call vfp_access_check() to do that because we don't want to
+ * do the lazy state preservation in the "loop count is zero" case.
+ * Do the check-and-raise-exception by hand.
+ */
+ if (s->fp_excp_el) {
+ gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
+ syn_uncategorized(), s->fp_excp_el);
+ return true;
+ }
+ }
+
+ nextlabel = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel);
+ tmp = load_reg(s, a->rn);
+ store_reg(s, 14, tmp);
+ if (a->size != 4) {
+ /*
+ * WLSTP: set FPSCR.LTPSIZE. This requires that we do the
+ * lazy state preservation, new FP context creation, etc,
+ * that vfp_access_check() does. We know that the actual
+ * access check will succeed (ie it won't generate code that
+ * throws an exception) because we did that check by hand earlier.
+ */
+ bool ok = vfp_access_check(s);
+ assert(ok);
+ tmp = tcg_const_i32(a->size);
+ store_cpu_field(tmp, v7m.ltpsize);
+ /*
+ * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0)
+ * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK.
+ */
+ }
+ gen_jmp_tb(s, s->base.pc_next, 1);
+
+ gen_set_label(nextlabel);
+ gen_jmp(s, read_pc(s) + a->imm);
+ return true;
+}
+
+static bool trans_LE(DisasContext *s, arg_LE *a)
+{
+ /*
+ * M-profile low-overhead loop end. The architecture permits an
+ * implementation to discard the LO_BRANCH_INFO cache at any time,
+ * and we take the IMPDEF option to never set it in the first place
+ * (equivalent to always discarding it immediately), because for QEMU
+ * a "real" implementation would be complicated and wouldn't execute
+ * any faster.
+ */
+ TCGv_i32 tmp;
+ TCGLabel *loopend;
+ bool fpu_active;
+
+ if (!dc_isar_feature(aa32_lob, s)) {
+ return false;
+ }
+ if (a->f && a->tp) {
+ return false;
+ }
+ if (s->condexec_mask) {
+ /*
+ * LE in an IT block is CONSTRAINED UNPREDICTABLE;
+ * we choose to UNDEF, because otherwise our use of
+ * gen_goto_tb(1) would clash with the use of TB exit 1
+ * in the dc->condjmp condition-failed codepath in
+ * arm_tr_tb_stop() and we'd get an assertion.
+ */
+ return false;
+ }
+ if (a->tp) {
+ /* LETP */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+ if (!vfp_access_check(s)) {
+ s->eci_handled = true;
+ return true;
+ }
+ }
+
+ /* LE/LETP is OK with ECI set and leaves it untouched */
+ s->eci_handled = true;
+
+ /*
+ * With MVE, LTPSIZE might not be 4, and we must emit an INVSTATE
+ * UsageFault exception for the LE insn in that case. Note that we
+ * are not directly checking FPSCR.LTPSIZE but instead check the
+ * pseudocode LTPSIZE() function, which returns 4 if the FPU is
+ * not currently active (ie ActiveFPState() returns false). We
+ * can identify not-active purely from our TB state flags, as the
+ * FPU is active only if:
+ * the FPU is enabled
+ * AND lazy state preservation is not active
+ * AND we do not need a new fp context (this is the ASPEN/FPCA check)
+ *
+ * Usually we don't need to care about this distinction between
+ * LTPSIZE and FPSCR.LTPSIZE, because the code in vfp_access_check()
+ * will either take an exception or clear the conditions that make
+ * the FPU not active. But LE is an unusual case of a non-FP insn
+ * that looks at LTPSIZE.
+ */
+ fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed;
+
+ if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
+ /* Need to do a runtime check for LTPSIZE != 4 */
+ TCGLabel *skipexc = gen_new_label();
+ tmp = load_cpu_field(v7m.ltpsize);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc);
+ tcg_temp_free_i32(tmp);
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
+ default_exception_el(s));
+ gen_set_label(skipexc);
+ }
+
+ if (a->f) {
+ /* Loop-forever: just jump back to the loop start */
+ gen_jmp(s, read_pc(s) - a->imm);
+ return true;
+ }
+
+ /*
+ * Not loop-forever. If LR <= loop-decrement-value this is the last loop.
+ * For LE, we know at this point that LTPSIZE must be 4 and the
+ * loop decrement value is 1. For LETP we need to calculate the decrement
+ * value from LTPSIZE.
+ */
+ loopend = gen_new_label();
+ if (!a->tp) {
+ tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, loopend);
+ tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1);
+ } else {
+ /*
+ * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local
+ * so that decr stays live after the brcondi.
+ */
+ TCGv_i32 decr = tcg_temp_local_new_i32();
+ TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize);
+ tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize);
+ tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr);
+ tcg_temp_free_i32(ltpsize);
+
+ tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend);
+
+ tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr);
+ tcg_temp_free_i32(decr);
+ }
+ /* Jump back to the loop start */
+ gen_jmp(s, read_pc(s) - a->imm);
+
+ gen_set_label(loopend);
+ if (a->tp) {
+ /* Exits from tail-pred loops must reset LTPSIZE to 4 */
+ tmp = tcg_const_i32(4);
+ store_cpu_field(tmp, v7m.ltpsize);
+ }
+ /* End TB, continuing to following insn */
+ gen_jmp_tb(s, s->base.pc_next, 1);
+ return true;
+}
+
+static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
+{
+ /*
+ * M-profile Loop Clear with Tail Predication. Since our implementation
+ * doesn't cache branch information, all we need to do is reset
+ * FPSCR.LTPSIZE to 4.
+ */
+
+ if (!dc_isar_feature(aa32_lob, s) ||
+ !dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ store_cpu_field_constant(4, v7m.ltpsize);
+ return true;
+}
+
+static bool trans_VCTP(DisasContext *s, arg_VCTP *a)
+{
+ /*
+ * M-profile Create Vector Tail Predicate. This insn is itself
+ * predicated and is subject to beatwise execution.
+ */
+ TCGv_i32 rn_shifted, masklen;
+
+ if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) {
+ return false;
+ }
+
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * We pre-calculate the mask length here to avoid having
+ * to have multiple helpers specialized for size.
+ * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16".
+ */
+ rn_shifted = tcg_temp_new_i32();
+ masklen = load_reg(s, a->rn);
+ tcg_gen_shli_i32(rn_shifted, masklen, a->size);
+ tcg_gen_movcond_i32(TCG_COND_LEU, masklen,
+ masklen, tcg_constant_i32(1 << (4 - a->size)),
+ rn_shifted, tcg_constant_i32(16));
+ gen_helper_mve_vctp(cpu_env, masklen);
+ tcg_temp_free_i32(masklen);
+ tcg_temp_free_i32(rn_shifted);
+ /* This insn updates predication bits */
+ s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
+ mve_update_eci(s);
+ return true;
+}
+
+static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
+{
+ TCGv_i32 addr, tmp;
+
+ tmp = load_reg(s, a->rm);
+ if (half) {
+ tcg_gen_add_i32(tmp, tmp, tmp);
+ }
+ addr = load_reg(s, a->rn);
+ tcg_gen_add_i32(addr, addr, tmp);
+
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
+ tcg_temp_free_i32(addr);
+
+ tcg_gen_add_i32(tmp, tmp, tmp);
+ tcg_gen_addi_i32(tmp, tmp, read_pc(s));
+ store_reg(s, 15, tmp);
+ return true;
+}
+
+static bool trans_TBB(DisasContext *s, arg_tbranch *a)
+{
+ return op_tbranch(s, a, false);
+}
+
+static bool trans_TBH(DisasContext *s, arg_tbranch *a)
+{
+ return op_tbranch(s, a, true);
+}
+
+static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
+{
+ TCGv_i32 tmp = load_reg(s, a->rn);
+
+ arm_gen_condlabel(s);
+ tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
+ tmp, 0, s->condlabel);
+ tcg_temp_free_i32(tmp);
+ gen_jmp(s, read_pc(s) + a->imm);
+ return true;
+}
+
+/*
+ * Supervisor call - both T32 & A32 come here so we need to check
+ * which mode we are in when checking for semihosting.
+ */
+
+static bool trans_SVC(DisasContext *s, arg_SVC *a)
+{
+ const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() &&
+#ifndef CONFIG_USER_ONLY
+ !IS_USER(s) &&
+#endif
+ (a->imm == semihost_imm)) {
+ gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
+ } else {
+ gen_set_pc_im(s, s->base.pc_next);
+ s->svc_imm = a->imm;
+ s->base.is_jmp = DISAS_SWI;
+ }
+ return true;
+}
+
+/*
+ * Unconditional system instructions
+ */
+
+static bool trans_RFE(DisasContext *s, arg_RFE *a)
+{
+ static const int8_t pre_offset[4] = {
+ /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
+ };
+ static const int8_t post_offset[4] = {
+ /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
+ };
+ TCGv_i32 addr, t1, t2;
+
+ if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (IS_USER(s)) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ addr = load_reg(s, a->rn);
+ tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
+
+ /* Load PC into tmp and CPSR into tmp2. */
+ t1 = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, t1, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+ tcg_gen_addi_i32(addr, addr, 4);
+ t2 = tcg_temp_new_i32();
+ gen_aa32_ld_i32(s, t2, addr, get_mem_index(s), MO_UL | MO_ALIGN);
+
+ if (a->w) {
+ /* Base writeback. */
+ tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
+ store_reg(s, a->rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ gen_rfe(s, t1, t2);
+ return true;
+}
+
+static bool trans_SRS(DisasContext *s, arg_SRS *a)
+{
+ if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ gen_srs(s, a->mode, a->pu, a->w);
+ return true;
+}
+
+static bool trans_CPS(DisasContext *s, arg_CPS *a)
+{
+ uint32_t mask, val;
+
+ if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (IS_USER(s)) {
+ /* Implemented as NOP in user mode. */
+ return true;
+ }
+ /* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
+
+ mask = val = 0;
+ if (a->imod & 2) {
+ if (a->A) {
+ mask |= CPSR_A;
+ }
+ if (a->I) {
+ mask |= CPSR_I;
+ }
+ if (a->F) {
+ mask |= CPSR_F;
+ }
+ if (a->imod & 1) {
+ val |= mask;
+ }
+ }
+ if (a->M) {
+ mask |= CPSR_M;
+ val |= a->mode;
+ }
+ if (mask) {
+ gen_set_psr_im(s, mask, 0, val);
+ }
+ return true;
+}
+
+static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
+{
+ TCGv_i32 tmp, addr, el;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (IS_USER(s)) {
+ /* Implemented as NOP in user mode. */
+ return true;
+ }
+
+ tmp = tcg_const_i32(a->im);
+ /* FAULTMASK */
+ if (a->F) {
+ addr = tcg_const_i32(19);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ /* PRIMASK */
+ if (a->I) {
+ addr = tcg_const_i32(16);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ el = tcg_const_i32(s->current_el);
+ gen_helper_rebuild_hflags_m32(cpu_env, el);
+ tcg_temp_free_i32(el);
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ return true;
+}
+
+/*
+ * Clear-Exclusive, Barriers
+ */
+
+static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
+{
+ if (s->thumb
+ ? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
+ : !ENABLE_ARCH_6K) {
+ return false;
+ }
+ gen_clrex(s);
+ return true;
+}
+
+static bool trans_DSB(DisasContext *s, arg_DSB *a)
+{
+ if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ return true;
+}
+
+static bool trans_DMB(DisasContext *s, arg_DMB *a)
+{
+ return trans_DSB(s, NULL);
+}
+
+static bool trans_ISB(DisasContext *s, arg_ISB *a)
+{
+ if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
+ return false;
+ }
+ /*
+ * We need to break the TB after this insn to execute
+ * self-modifying code correctly and also to take
+ * any pending interrupts immediately.
+ */
+ s->base.is_jmp = DISAS_TOO_MANY;
+ return true;
+}
+
+static bool trans_SB(DisasContext *s, arg_SB *a)
+{
+ if (!dc_isar_feature(aa32_sb, s)) {
+ return false;
+ }
+ /*
+ * TODO: There is no speculation barrier opcode
+ * for TCG; MB and end the TB instead.
+ */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ s->base.is_jmp = DISAS_TOO_MANY;
+ return true;
+}
+
+static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
+{
+ if (!ENABLE_ARCH_6) {
+ return false;
+ }
+ if (a->E != (s->be_data == MO_BE)) {
+ gen_helper_setend(cpu_env);
+ s->base.is_jmp = DISAS_UPDATE_EXIT;
+ }
+ return true;
+}
+
+/*
+ * Preload instructions
+ * All are nops, contingent on the appropriate arch level.
+ */
+
+static bool trans_PLD(DisasContext *s, arg_PLD *a)
+{
+ return ENABLE_ARCH_5TE;
+}
+
+static bool trans_PLDW(DisasContext *s, arg_PLD *a)
+{
+ return arm_dc_feature(s, ARM_FEATURE_V7MP);
+}
+
+static bool trans_PLI(DisasContext *s, arg_PLD *a)
+{
+ return ENABLE_ARCH_7;
+}
+
+/*
+ * If-then
+ */
+
+static bool trans_IT(DisasContext *s, arg_IT *a)
+{
+ int cond_mask = a->cond_mask;
+
+ /*
+ * No actual code generated for this insn, just setup state.
+ *
+ * Combinations of firstcond and mask which set up an 0b1111
+ * condition are UNPREDICTABLE; we take the CONSTRAINED
+ * UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
+ * i.e. both meaning "execute always".
+ */
+ s->condexec_cond = (cond_mask >> 4) & 0xe;
+ s->condexec_mask = cond_mask & 0x1f;
+ return true;
+}
+
+/* v8.1M CSEL/CSINC/CSNEG/CSINV */
+static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
+{
+ TCGv_i32 rn, rm, zero;
+ DisasCompare c;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
+ return false;
+ }
+
+ if (a->rm == 13) {
+ /* SEE "Related encodings" (MVE shifts) */
+ return false;
+ }
+
+ if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) {
+ /* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
+ return false;
+ }
+
+ /* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
+ if (a->rn == 15) {
+ rn = tcg_const_i32(0);
+ } else {
+ rn = load_reg(s, a->rn);
+ }
+ if (a->rm == 15) {
+ rm = tcg_const_i32(0);
+ } else {
+ rm = load_reg(s, a->rm);
+ }
+
+ switch (a->op) {
+ case 0: /* CSEL */
+ break;
+ case 1: /* CSINC */
+ tcg_gen_addi_i32(rm, rm, 1);
+ break;
+ case 2: /* CSINV */
+ tcg_gen_not_i32(rm, rm);
+ break;
+ case 3: /* CSNEG */
+ tcg_gen_neg_i32(rm, rm);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ arm_test_cc(&c, a->fcond);
+ zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(c.cond, rn, c.value, zero, rn, rm);
+ arm_free_cc(&c);
+ tcg_temp_free_i32(zero);
+
+ store_reg(s, a->rd, rn);
+ tcg_temp_free_i32(rm);
+
+ return true;
+}
+
+/*
+ * Legacy decoder.
+ */
+
+static void disas_arm_insn(DisasContext *s, unsigned int insn)
+{
+ unsigned int cond = insn >> 28;
+
+ /* M variants do not implement ARM mode; this must raise the INVSTATE
+ * UsageFault exception.
+ */
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
+ default_exception_el(s));
+ return;
+ }
+
+ if (s->pstate_il) {
+ /*
+ * Illegal execution state. This has priority over BTI
+ * exceptions, but comes after instruction abort exceptions.
+ */
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
+ syn_illegalstate(), default_exception_el(s));
+ return;
+ }
+
+ if (cond == 0xf) {
+ /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
+ * choose to UNDEF. In ARMv5 and above the space is used
+ * for miscellaneous unconditional instructions.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Unconditional instructions. */
+ /* TODO: Perhaps merge these into one decodetree output file. */
+ if (disas_a32_uncond(s, insn) ||
+ disas_vfp_uncond(s, insn) ||
+ disas_neon_dp(s, insn) ||
+ disas_neon_ls(s, insn) ||
+ disas_neon_shared(s, insn)) {
+ return;
+ }
+ /* fall back to legacy decoder */
+
+ if ((insn & 0x0e000f00) == 0x0c000100) {
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
+ /* iWMMXt register transfer. */
+ if (extract32(s->c15_cpar, 1, 1)) {
+ if (!disas_iwmmxt_insn(s, insn)) {
+ return;
+ }
+ }
+ }
+ }
+ goto illegal_op;
+ }
+ if (cond != 0xe) {
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ arm_skip_unless(s, cond);
+ }
+
+ /* TODO: Perhaps merge these into one decodetree output file. */
+ if (disas_a32(s, insn) ||
+ disas_vfp(s, insn)) {
+ return;
+ }
+ /* fall back to legacy decoder */
+ /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
+ if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
+ if (((insn & 0x0c000e00) == 0x0c000000)
+ && ((insn & 0x03000000) != 0x03000000)) {
+ /* Coprocessor insn, coprocessor 0 or 1 */
+ disas_xscale_insn(s, insn);
+ return;
+ }
+ }
+
+illegal_op:
+ unallocated_encoding(s);
+}
+
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
+{
+ /*
+ * Return true if this is a 16 bit instruction. We must be precise
+ * about this (matching the decode).
+ */
+ if ((insn >> 11) < 0x1d) {
+ /* Definitely a 16-bit instruction */
+ return true;
+ }
+
+ /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
+ * first half of a 32-bit Thumb insn. Thumb-1 cores might
+ * end up actually treating this as two 16-bit insns, though,
+ * if it's half of a bl/blx pair that might span a page boundary.
+ */
+ if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
+ arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* Thumb2 cores (including all M profile ones) always treat
+ * 32-bit insns as 32-bit.
+ */
+ return false;
+ }
+
+ if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
+ /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
+ * is not on the next page; we merge this into a 32-bit
+ * insn.
+ */
+ return false;
+ }
+ /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
+ * 0b1111_1xxx_xxxx_xxxx : BL suffix;
+ * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
+ * -- handle as single 16 bit insn
+ */
+ return true;
+}
+
+/* Translate a 32-bit thumb instruction. */
+static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
+{
+ /*
+ * ARMv6-M supports a limited subset of Thumb2 instructions.
+ * Other Thumb1 architectures allow only 32-bit
+ * combined BL/BLX prefix and suffix.
+ */
+ if (arm_dc_feature(s, ARM_FEATURE_M) &&
+ !arm_dc_feature(s, ARM_FEATURE_V7)) {
+ int i;
+ bool found = false;
+ static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
+ 0xf3b08040 /* dsb */,
+ 0xf3b08050 /* dmb */,
+ 0xf3b08060 /* isb */,
+ 0xf3e08000 /* mrs */,
+ 0xf000d000 /* bl */};
+ static const uint32_t armv6m_mask[] = {0xffe0d000,
+ 0xfff0d0f0,
+ 0xfff0d0f0,
+ 0xfff0d0f0,
+ 0xffe0d000,
+ 0xf800d000};
+
+ for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
+ if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ goto illegal_op;
+ }
+ } else if ((insn & 0xf800e800) != 0xf000e800) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ /*
+ * NOCP takes precedence over any UNDEF for (almost) the
+ * entire wide range of coprocessor-space encodings, so check
+ * for it first before proceeding to actually decode eg VFP
+ * insns. This decode also handles the few insns which are
+ * in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
+ */
+ if (disas_m_nocp(s, insn)) {
+ return;
+ }
+ }
+
+ if ((insn & 0xef000000) == 0xef000000) {
+ /*
+ * T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
+ * transform into
+ * A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
+ */
+ uint32_t a32_insn = (insn & 0xe2ffffff) |
+ ((insn & (1 << 28)) >> 4) | (1 << 28);
+
+ if (disas_neon_dp(s, a32_insn)) {
+ return;
+ }
+ }
+
+ if ((insn & 0xff100000) == 0xf9000000) {
+ /*
+ * T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
+ * transform into
+ * A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
+ */
+ uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
+
+ if (disas_neon_ls(s, a32_insn)) {
+ return;
+ }
+ }
+
+ /*
+ * TODO: Perhaps merge these into one decodetree output file.
+ * Note disas_vfp is written for a32 with cond field in the
+ * top nibble. The t32 encoding requires 0xe in the top nibble.
+ */
+ if (disas_t32(s, insn) ||
+ disas_vfp_uncond(s, insn) ||
+ disas_neon_shared(s, insn) ||
+ disas_mve(s, insn) ||
+ ((insn >> 28) == 0xe && disas_vfp(s, insn))) {
+ return;
+ }
+
+illegal_op:
+ unallocated_encoding(s);
+}
+
+static void disas_thumb_insn(DisasContext *s, uint32_t insn)
+{
+ if (!disas_t16(s, insn)) {
+ unallocated_encoding(s);
+ }
+}
+
+static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
+{
+ /* Return true if the insn at dc->base.pc_next might cross a page boundary.
+ * (False positives are OK, false negatives are not.)
+ * We know this is a Thumb insn, and our caller ensures we are
+ * only called if dc->base.pc_next is less than 4 bytes from the page
+ * boundary, so we cross the page if the first 16 bits indicate
+ * that this is a 32 bit insn.
+ */
+ uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
+
+ return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
+}
+
+static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cs->env_ptr;
+ ARMCPU *cpu = env_archcpu(env);
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
+ uint32_t condexec, core_mmu_idx;
+
+ dc->isar = &cpu->isar;
+ dc->condjmp = 0;
+
+ dc->aarch64 = 0;
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
+ dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB);
+ dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
+ condexec = EX_TBFLAG_AM32(tb_flags, CONDEXEC);
+ /*
+ * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this
+ * is always the IT bits. On M-profile, some of the reserved encodings
+ * of IT are used instead to indicate either ICI or ECI, which
+ * indicate partial progress of a restartable insn that was interrupted
+ * partway through by an exception:
+ * * if CONDEXEC[3:0] != 0b0000 : CONDEXEC is IT bits
+ * * if CONDEXEC[3:0] == 0b0000 : CONDEXEC is ICI or ECI bits
+ * In all cases CONDEXEC == 0 means "not in IT block or restartable
+ * insn, behave normally".
+ */
+ dc->eci = dc->condexec_mask = dc->condexec_cond = 0;
+ dc->eci_handled = false;
+ dc->insn_eci_rewind = NULL;
+ if (condexec & 0xf) {
+ dc->condexec_mask = (condexec & 0xf) << 1;
+ dc->condexec_cond = condexec >> 4;
+ } else {
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ dc->eci = condexec >> 4;
+ }
+ }
+
+ core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
+ dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = (dc->current_el == 0);
+#endif
+ dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
+ dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
+ dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ dc->vfp_enabled = 1;
+ dc->be_data = MO_TE;
+ dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER);
+ dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ regime_is_secure(env, dc->mmu_idx);
+ dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK);
+ dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG);
+ dc->v7m_new_fp_ctxt_needed =
+ EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED);
+ dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT);
+ dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED);
+ } else {
+ dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL);
+ dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B);
+ dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE);
+ dc->ns = EX_TBFLAG_A32(tb_flags, NS);
+ dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN);
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR);
+ } else {
+ dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN);
+ dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE);
+ }
+ }
+ dc->cp_regs = cpu->cp_regs;
+ dc->features = env->features;
+
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
+ dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
+ dc->is_ldex = false;
+
+ dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
+
+ /* If architectural single step active, limit to 1. */
+ if (dc->ss_active) {
+ dc->base.max_insns = 1;
+ }
+
+ /* ARM is a fixed-length ISA. Bound the number of insns to execute
+ to those left on the page. */
+ if (!dc->thumb) {
+ int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
+ dc->base.max_insns = MIN(dc->base.max_insns, bound);
+ }
+
+ cpu_V0 = tcg_temp_new_i64();
+ cpu_V1 = tcg_temp_new_i64();
+ cpu_M0 = tcg_temp_new_i64();
+}
+
+static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ /* A note on handling of the condexec (IT) bits:
+ *
+ * We want to avoid the overhead of having to write the updated condexec
+ * bits back to the CPUARMState for every instruction in an IT block. So:
+ * (1) if the condexec bits are not already zero then we write
+ * zero back into the CPUARMState now. This avoids complications trying
+ * to do it at the end of the block. (For example if we don't do this
+ * it's hard to identify whether we can safely skip writing condexec
+ * at the end of the TB, which we definitely want to do for the case
+ * where a TB doesn't do anything with the IT state at all.)
+ * (2) if we are going to leave the TB then we call gen_set_condexec()
+ * which will write the correct value into CPUARMState if zero is wrong.
+ * This is done both for leaving the TB at the end, and for leaving
+ * it because of an exception we know will happen, which is done in
+ * gen_exception_insn(). The latter is necessary because we need to
+ * leave the TB with the PC/IT state just prior to execution of the
+ * instruction which caused the exception.
+ * (3) if we leave the TB unexpectedly (eg a data abort on a load)
+ * then the CPUARMState will be wrong and we need to reset it.
+ * This is handled in the same way as restoration of the
+ * PC in these situations; we save the value of the condexec bits
+ * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
+ * then uses this to restore them after an exception.
+ *
+ * Note that there are no instructions which can read the condexec
+ * bits, and none which can write non-static values to them, so
+ * we don't need to care about whether CPUARMState is correct in the
+ * middle of a TB.
+ */
+
+ /* Reset the conditional execution bits immediately. This avoids
+ complications trying to do it at the end of the block. */
+ if (dc->condexec_mask || dc->condexec_cond) {
+ store_cpu_field_constant(0, condexec_bits);
+ }
+}
+
+static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ /*
+ * The ECI/ICI bits share PSR bits with the IT bits, so we
+ * need to reconstitute the bits from the split-out DisasContext
+ * fields here.
+ */
+ uint32_t condexec_bits;
+
+ if (dc->eci) {
+ condexec_bits = dc->eci << 4;
+ } else {
+ condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
+ }
+ tcg_gen_insn_start(dc->base.pc_next, condexec_bits, 0);
+ dc->insn_start = tcg_last_op();
+}
+
+static bool arm_pre_translate_insn(DisasContext *dc)
+{
+#ifdef CONFIG_USER_ONLY
+ /* Intercept jump to the magic kernel page. */
+ if (dc->base.pc_next >= 0xffff0000) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception_internal(EXCP_KERNEL_TRAP);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
+#endif
+
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(dc->base.num_insns == 1);
+ gen_swstep_exception(dc, 0, 0);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
+
+ return false;
+}
+
+static void arm_post_translate_insn(DisasContext *dc)
+{
+ if (dc->condjmp && !dc->base.is_jmp) {
+ gen_set_label(dc->condlabel);
+ dc->condjmp = 0;
+ }
+ translator_loop_temp_check(&dc->base);
+}
+
+static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cpu->env_ptr;
+ unsigned int insn;
+
+ if (arm_pre_translate_insn(dc)) {
+ dc->base.pc_next += 4;
+ return;
+ }
+
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_ldl_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
+ dc->insn = insn;
+ dc->base.pc_next += 4;
+ disas_arm_insn(dc, insn);
+
+ arm_post_translate_insn(dc);
+
+ /* ARM is a fixed-length ISA. We performed the cross-page check
+ in init_disas_context by adjusting max_insns. */
+}
+
+static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
+{
+ /* Return true if this Thumb insn is always unconditional,
+ * even inside an IT block. This is true of only a very few
+ * instructions: BKPT, HLT, and SG.
+ *
+ * A larger class of instructions are UNPREDICTABLE if used
+ * inside an IT block; we do not need to detect those here, because
+ * what we do by default (perform the cc check and update the IT
+ * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
+ * choice for those situations.
+ *
+ * insn is either a 16-bit or a 32-bit instruction; the two are
+ * distinguishable because for the 16-bit case the top 16 bits
+ * are zeroes, and that isn't a valid 32-bit encoding.
+ */
+ if ((insn & 0xffffff00) == 0xbe00) {
+ /* BKPT */
+ return true;
+ }
+
+ if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
+ !arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* HLT: v8A only. This is unconditional even when it is going to
+ * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
+ * For v7 cores this was a plain old undefined encoding and so
+ * honours its cc check. (We might be using the encoding as
+ * a semihosting trap, but we don't change the cc check behaviour
+ * on that account, because a debugger connected to a real v7A
+ * core and emulating semihosting traps by catching the UNDEF
+ * exception would also only see cases where the cc check passed.
+ * No guest code should be trying to do a HLT semihosting trap
+ * in an IT block anyway.
+ */
+ return true;
+ }
+
+ if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
+ arm_dc_feature(s, ARM_FEATURE_M)) {
+ /* SG: v8M only */
+ return true;
+ }
+
+ return false;
+}
+
+static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUARMState *env = cpu->env_ptr;
+ uint32_t insn;
+ bool is_16bit;
+
+ if (arm_pre_translate_insn(dc)) {
+ dc->base.pc_next += 2;
+ return;
+ }
+
+ dc->pc_curr = dc->base.pc_next;
+ insn = arm_lduw_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
+ is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
+ dc->base.pc_next += 2;
+ if (!is_16bit) {
+ uint32_t insn2 = arm_lduw_code(env, &dc->base, dc->base.pc_next,
+ dc->sctlr_b);
+
+ insn = insn << 16 | insn2;
+ dc->base.pc_next += 2;
+ }
+ dc->insn = insn;
+
+ if (dc->pstate_il) {
+ /*
+ * Illegal execution state. This has priority over BTI
+ * exceptions, but comes after instruction abort exceptions.
+ */
+ gen_exception_insn(dc, dc->pc_curr, EXCP_UDEF,
+ syn_illegalstate(), default_exception_el(dc));
+ return;
+ }
+
+ if (dc->eci) {
+ /*
+ * For M-profile continuable instructions, ECI/ICI handling
+ * falls into these cases:
+ * - interrupt-continuable instructions
+ * These are the various load/store multiple insns (both
+ * integer and fp). The ICI bits indicate the register
+ * where the load/store can resume. We make the IMPDEF
+ * choice to always do "instruction restart", ie ignore
+ * the ICI value and always execute the ldm/stm from the
+ * start. So all we need to do is zero PSR.ICI if the
+ * insn executes.
+ * - MVE instructions subject to beat-wise execution
+ * Here the ECI bits indicate which beats have already been
+ * executed, and we must honour this. Each insn of this
+ * type will handle it correctly. We will update PSR.ECI
+ * in the helper function for the insn (some ECI values
+ * mean that the following insn also has been partially
+ * executed).
+ * - Special cases which don't advance ECI
+ * The insns LE, LETP and BKPT leave the ECI/ICI state
+ * bits untouched.
+ * - all other insns (the common case)
+ * Non-zero ECI/ICI means an INVSTATE UsageFault.
+ * We place a rewind-marker here. Insns in the previous
+ * three categories will set a flag in the DisasContext.
+ * If the flag isn't set after we call disas_thumb_insn()
+ * or disas_thumb2_insn() then we know we have a "some other
+ * insn" case. We will rewind to the marker (ie throwing away
+ * all the generated code) and instead emit "take exception".
+ */
+ dc->insn_eci_rewind = tcg_last_op();
+ }
+
+ if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
+ uint32_t cond = dc->condexec_cond;
+
+ /*
+ * Conditionally skip the insn. Note that both 0xe and 0xf mean
+ * "always"; 0xf is not "never".
+ */
+ if (cond < 0x0e) {
+ arm_skip_unless(dc, cond);
+ }
+ }
+
+ if (is_16bit) {
+ disas_thumb_insn(dc, insn);
+ } else {
+ disas_thumb2_insn(dc, insn);
+ }
+
+ /* Advance the Thumb condexec condition. */
+ if (dc->condexec_mask) {
+ dc->condexec_cond = ((dc->condexec_cond & 0xe) |
+ ((dc->condexec_mask >> 4) & 1));
+ dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
+ if (dc->condexec_mask == 0) {
+ dc->condexec_cond = 0;
+ }
+ }
+
+ if (dc->eci && !dc->eci_handled) {
+ /*
+ * Insn wasn't valid for ECI/ICI at all: undo what we
+ * just generated and instead emit an exception
+ */
+ tcg_remove_ops_after(dc->insn_eci_rewind);
+ dc->condjmp = 0;
+ gen_exception_insn(dc, dc->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
+ default_exception_el(dc));
+ }
+
+ arm_post_translate_insn(dc);
+
+ /* Thumb is a variable-length ISA. Stop translation when the next insn
+ * will touch a new page. This ensures that prefetch aborts occur at
+ * the right place.
+ *
+ * We want to stop the TB if the next insn starts in a new page,
+ * or if it spans between this page and the next. This means that
+ * if we're looking at the last halfword in the page we need to
+ * see if it's a 16-bit Thumb insn (which will fit in this TB)
+ * or a 32-bit Thumb insn (which won't).
+ * This is to avoid generating a silly TB with a single 16-bit insn
+ * in it at the end of this page (which would execute correctly
+ * but isn't very efficient).
+ */
+ if (dc->base.is_jmp == DISAS_NEXT
+ && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
+ || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
+ && insn_crosses_page(env, dc)))) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ /* At this stage dc->condjmp will only be set when the skipped
+ instruction was a conditional branch or trap, and the PC has
+ already been written. */
+ gen_set_condexec(dc);
+ if (dc->base.is_jmp == DISAS_BX_EXCRET) {
+ /* Exception return branches need some special case code at the
+ * end of the TB, which is complex enough that it has to
+ * handle the single-step vs not and the condition-failed
+ * insn codepath itself.
+ */
+ gen_bx_excret_final_code(dc);
+ } else if (unlikely(dc->ss_active)) {
+ /* Unconditional and "condition passed" instruction codepath. */
+ switch (dc->base.is_jmp) {
+ case DISAS_SWI:
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ break;
+ case DISAS_HVC:
+ gen_ss_advance(dc);
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ break;
+ case DISAS_SMC:
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
+ break;
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ case DISAS_UPDATE_EXIT:
+ case DISAS_UPDATE_NOCHAIN:
+ gen_set_pc_im(dc, dc->base.pc_next);
+ /* fall through */
+ default:
+ /* FIXME: Single stepping a WFI insn will not halt the CPU. */
+ gen_singlestep_exception(dc);
+ break;
+ case DISAS_NORETURN:
+ break;
+ }
+ } else {
+ /* While branches must always occur at the end of an IT block,
+ there are a few other things that can cause us to terminate
+ the TB in the middle of an IT block:
+ - Exception generating instructions (bkpt, swi, undefined).
+ - Page boundaries.
+ - Hardware watchpoints.
+ Hardware breakpoints have already been handled and skip this code.
+ */
+ switch (dc->base.is_jmp) {
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ gen_goto_tb(dc, 1, dc->base.pc_next);
+ break;
+ case DISAS_UPDATE_NOCHAIN:
+ gen_set_pc_im(dc, dc->base.pc_next);
+ /* fall through */
+ case DISAS_JUMP:
+ gen_goto_ptr();
+ break;
+ case DISAS_UPDATE_EXIT:
+ gen_set_pc_im(dc, dc->base.pc_next);
+ /* fall through */
+ default:
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_NORETURN:
+ /* nothing more to generate */
+ break;
+ case DISAS_WFI:
+ {
+ TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
+ !(dc->insn & (1U << 31))) ? 2 : 4);
+
+ gen_helper_wfi(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ /* The helper doesn't necessarily throw an exception, but we
+ * must go back to the main loop to check for interrupts anyway.
+ */
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ }
+ case DISAS_WFE:
+ gen_helper_wfe(cpu_env);
+ break;
+ case DISAS_YIELD:
+ gen_helper_yield(cpu_env);
+ break;
+ case DISAS_SWI:
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ break;
+ case DISAS_HVC:
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ break;
+ case DISAS_SMC:
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
+ break;
+ }
+ }
+
+ if (dc->condjmp) {
+ /* "Condition failed" instruction codepath for the branch/trap insn */
+ gen_set_label(dc->condlabel);
+ gen_set_condexec(dc);
+ if (unlikely(dc->ss_active)) {
+ gen_set_pc_im(dc, dc->base.pc_next);
+ gen_singlestep_exception(dc);
+ } else {
+ gen_goto_tb(dc, 1, dc->base.pc_next);
+ }
+ }
+}
+
+static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
+ log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
+}
+
+static const TranslatorOps arm_translator_ops = {
+ .init_disas_context = arm_tr_init_disas_context,
+ .tb_start = arm_tr_tb_start,
+ .insn_start = arm_tr_insn_start,
+ .translate_insn = arm_tr_translate_insn,
+ .tb_stop = arm_tr_tb_stop,
+ .disas_log = arm_tr_disas_log,
+};
+
+static const TranslatorOps thumb_translator_ops = {
+ .init_disas_context = arm_tr_init_disas_context,
+ .tb_start = arm_tr_tb_start,
+ .insn_start = arm_tr_insn_start,
+ .translate_insn = thumb_tr_translate_insn,
+ .tb_stop = arm_tr_tb_stop,
+ .disas_log = arm_tr_disas_log,
+};
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc = { };
+ const TranslatorOps *ops = &arm_translator_ops;
+ CPUARMTBFlags tb_flags = arm_tbflags_from_tb(tb);
+
+ if (EX_TBFLAG_AM32(tb_flags, THUMB)) {
+ ops = &thumb_translator_ops;
+ }
+#ifdef TARGET_AARCH64
+ if (EX_TBFLAG_ANY(tb_flags, AARCH64_STATE)) {
+ ops = &aarch64_translator_ops;
+ }
+#endif
+
+ translator_loop(ops, &dc.base, cpu, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ if (is_a64(env)) {
+ env->pc = data[0];
+ env->condexec_bits = 0;
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
+ } else {
+ env->regs[15] = data[0];
+ env->condexec_bits = data[1];
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
+ }
+}
diff --git a/target/arm/translate.h b/target/arm/translate.h
new file mode 100644
index 000000000..3a0db801d
--- /dev/null
+++ b/target/arm/translate.h
@@ -0,0 +1,586 @@
+#ifndef TARGET_ARM_TRANSLATE_H
+#define TARGET_ARM_TRANSLATE_H
+
+#include "exec/translator.h"
+#include "internals.h"
+
+
+/* internal defines */
+typedef struct DisasContext {
+ DisasContextBase base;
+ const ARMISARegisters *isar;
+
+ /* The address of the current instruction being translated. */
+ target_ulong pc_curr;
+ target_ulong page_start;
+ uint32_t insn;
+ /* Nonzero if this instruction has been conditionally skipped. */
+ int condjmp;
+ /* The label that will be jumped to when the instruction is skipped. */
+ TCGLabel *condlabel;
+ /* Thumb-2 conditional execution bits. */
+ int condexec_mask;
+ int condexec_cond;
+ /* M-profile ECI/ICI exception-continuable instruction state */
+ int eci;
+ /*
+ * trans_ functions for insns which are continuable should set this true
+ * after decode (ie after any UNDEF checks)
+ */
+ bool eci_handled;
+ /* TCG op to rewind to if this turns out to be an invalid ECI state */
+ TCGOp *insn_eci_rewind;
+ int thumb;
+ int sctlr_b;
+ MemOp be_data;
+#if !defined(CONFIG_USER_ONLY)
+ int user;
+#endif
+ ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
+ uint8_t tbii; /* TBI1|TBI0 for insns */
+ uint8_t tbid; /* TBI1|TBI0 for data */
+ uint8_t tcma; /* TCMA1|TCMA0 for MTE */
+ bool ns; /* Use non-secure CPREG bank on access */
+ int fp_excp_el; /* FP exception EL or 0 if enabled */
+ int sve_excp_el; /* SVE exception EL or 0 if enabled */
+ int sve_len; /* SVE vector length in bytes */
+ /* Flag indicating that exceptions from secure mode are routed to EL3. */
+ bool secure_routed_to_el3;
+ bool vfp_enabled; /* FP enabled via FPSCR.EN */
+ int vec_len;
+ int vec_stride;
+ bool v7m_handler_mode;
+ bool v8m_secure; /* true if v8M and we're in Secure mode */
+ bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
+ bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */
+ bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */
+ bool v7m_lspact; /* FPCCR.LSPACT set */
+ /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
+ * so that top level loop can generate correct syndrome information.
+ */
+ uint32_t svc_imm;
+ int aarch64;
+ int current_el;
+ /* Debug target exception level for single-step exceptions */
+ int debug_target_el;
+ GHashTable *cp_regs;
+ uint64_t features; /* CPU features bits */
+ /* Because unallocated encodings generate different exception syndrome
+ * information from traps due to FP being disabled, we can't do a single
+ * "is fp access disabled" check at a high level in the decode tree.
+ * To help in catching bugs where the access check was forgotten in some
+ * code path, we set this flag when the access check is done, and assert
+ * that it is set at the point where we actually touch the FP regs.
+ */
+ bool fp_access_checked;
+ bool sve_access_checked;
+ /* ARMv8 single-step state (this is distinct from the QEMU gdbstub
+ * single-step support).
+ */
+ bool ss_active;
+ bool pstate_ss;
+ /* True if the insn just emitted was a load-exclusive instruction
+ * (necessary for syndrome information for single step exceptions),
+ * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
+ */
+ bool is_ldex;
+ /* True if AccType_UNPRIV should be used for LDTR et al */
+ bool unpriv;
+ /* True if v8.3-PAuth is active. */
+ bool pauth_active;
+ /* True if v8.5-MTE access to tags is enabled. */
+ bool ata;
+ /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */
+ bool mte_active[2];
+ /* True with v8.5-BTI and SCTLR_ELx.BT* set. */
+ bool bt;
+ /* True if any CP15 access is trapped by HSTR_EL2 */
+ bool hstr_active;
+ /* True if memory operations require alignment */
+ bool align_mem;
+ /* True if PSTATE.IL is set */
+ bool pstate_il;
+ /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
+ bool mve_no_pred;
+ /*
+ * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
+ * < 0, set by the current instruction.
+ */
+ int8_t btype;
+ /* A copy of cpu->dcz_blocksize. */
+ uint8_t dcz_blocksize;
+ /* True if this page is guarded. */
+ bool guarded_page;
+ /* Bottom two bits of XScale c15_cpar coprocessor access control reg */
+ int c15_cpar;
+ /* TCG op of the current insn_start. */
+ TCGOp *insn_start;
+#define TMP_A64_MAX 16
+ int tmp_a64_count;
+ TCGv_i64 tmp_a64[TMP_A64_MAX];
+} DisasContext;
+
+typedef struct DisasCompare {
+ TCGCond cond;
+ TCGv_i32 value;
+ bool value_global;
+} DisasCompare;
+
+/* Share the TCG temporaries common between 32 and 64 bit modes. */
+extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
+extern TCGv_i64 cpu_exclusive_addr;
+extern TCGv_i64 cpu_exclusive_val;
+
+/*
+ * Constant expanders for the decoders.
+ */
+
+static inline int negate(DisasContext *s, int x)
+{
+ return -x;
+}
+
+static inline int plus_1(DisasContext *s, int x)
+{
+ return x + 1;
+}
+
+static inline int plus_2(DisasContext *s, int x)
+{
+ return x + 2;
+}
+
+static inline int times_2(DisasContext *s, int x)
+{
+ return x * 2;
+}
+
+static inline int times_4(DisasContext *s, int x)
+{
+ return x * 4;
+}
+
+static inline int times_2_plus_1(DisasContext *s, int x)
+{
+ return x * 2 + 1;
+}
+
+static inline int rsub_64(DisasContext *s, int x)
+{
+ return 64 - x;
+}
+
+static inline int rsub_32(DisasContext *s, int x)
+{
+ return 32 - x;
+}
+
+static inline int rsub_16(DisasContext *s, int x)
+{
+ return 16 - x;
+}
+
+static inline int rsub_8(DisasContext *s, int x)
+{
+ return 8 - x;
+}
+
+static inline int neon_3same_fp_size(DisasContext *s, int x)
+{
+ /* Convert 0==fp32, 1==fp16 into a MO_* value */
+ return MO_32 - x;
+}
+
+static inline int arm_dc_feature(DisasContext *dc, int feature)
+{
+ return (dc->features & (1ULL << feature)) != 0;
+}
+
+static inline int get_mem_index(DisasContext *s)
+{
+ return arm_to_core_mmu_idx(s->mmu_idx);
+}
+
+/* Function used to determine the target exception EL when otherwise not known
+ * or default.
+ */
+static inline int default_exception_el(DisasContext *s)
+{
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3. Otherwise,
+ * exceptions can only be routed to ELs above 1, so we target the higher of
+ * 1 or the current EL.
+ */
+ return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3)
+ ? 3 : MAX(1, s->current_el);
+}
+
+static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
+{
+ /* We don't need to save all of the syndrome so we mask and shift
+ * out unneeded bits to help the sleb128 encoder do a better job.
+ */
+ syn &= ARM_INSN_START_WORD2_MASK;
+ syn >>= ARM_INSN_START_WORD2_SHIFT;
+
+ /* We check and clear insn_start_idx to catch multiple updates. */
+ assert(s->insn_start != NULL);
+ tcg_set_insn_start_param(s->insn_start, 2, syn);
+ s->insn_start = NULL;
+}
+
+/* is_jmp field values */
+#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
+/* CPU state was modified dynamically; exit to main loop for interrupts. */
+#define DISAS_UPDATE_EXIT DISAS_TARGET_1
+/* These instructions trap after executing, so the A32/T32 decoder must
+ * defer them until after the conditional execution state has been updated.
+ * WFI also needs special handling when single-stepping.
+ */
+#define DISAS_WFI DISAS_TARGET_2
+#define DISAS_SWI DISAS_TARGET_3
+/* WFE */
+#define DISAS_WFE DISAS_TARGET_4
+#define DISAS_HVC DISAS_TARGET_5
+#define DISAS_SMC DISAS_TARGET_6
+#define DISAS_YIELD DISAS_TARGET_7
+/* M profile branch which might be an exception return (and so needs
+ * custom end-of-TB code)
+ */
+#define DISAS_BX_EXCRET DISAS_TARGET_8
+/*
+ * For instructions which want an immediate exit to the main loop, as opposed
+ * to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this
+ * doesn't write the PC on exiting the translation loop so you need to ensure
+ * something (gen_a64_set_pc_im or runtime helper) has done so before we reach
+ * return from cpu_tb_exec.
+ */
+#define DISAS_EXIT DISAS_TARGET_9
+/* CPU state was modified dynamically; no need to exit, but do not chain. */
+#define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10
+
+#ifdef TARGET_AARCH64
+void a64_translate_init(void);
+void gen_a64_set_pc_im(uint64_t val);
+extern const TranslatorOps aarch64_translator_ops;
+#else
+static inline void a64_translate_init(void)
+{
+}
+
+static inline void gen_a64_set_pc_im(uint64_t val)
+{
+}
+#endif
+
+void arm_test_cc(DisasCompare *cmp, int cc);
+void arm_free_cc(DisasCompare *cmp);
+void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
+void arm_gen_test_cc(int cc, TCGLabel *label);
+MemOp pow2_align(unsigned i);
+void unallocated_encoding(DisasContext *s);
+void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
+ uint32_t syn, uint32_t target_el);
+
+/* Return state of Alternate Half-precision flag, caller frees result */
+static inline TCGv_i32 get_ahp_flag(void)
+{
+ TCGv_i32 ret = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(ret, cpu_env,
+ offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR]));
+ tcg_gen_extract_i32(ret, ret, 26, 1);
+
+ return ret;
+}
+
+/* Set bits within PSTATE. */
+static inline void set_pstate_bits(uint32_t bits)
+{
+ TCGv_i32 p = tcg_temp_new_i32();
+
+ tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
+
+ tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_gen_ori_i32(p, p, bits);
+ tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_temp_free_i32(p);
+}
+
+/* Clear bits within PSTATE. */
+static inline void clear_pstate_bits(uint32_t bits)
+{
+ TCGv_i32 p = tcg_temp_new_i32();
+
+ tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
+
+ tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_gen_andi_i32(p, p, ~bits);
+ tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
+ tcg_temp_free_i32(p);
+}
+
+/* If the singlestep state is Active-not-pending, advance to Active-pending. */
+static inline void gen_ss_advance(DisasContext *s)
+{
+ if (s->ss_active) {
+ s->pstate_ss = 0;
+ clear_pstate_bits(PSTATE_SS);
+ }
+}
+
+static inline void gen_exception(int excp, uint32_t syndrome,
+ uint32_t target_el)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
+
+ tcg_temp_free_i32(tcg_el);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+/* Generate an architectural singlestep exception */
+static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
+{
+ bool same_el = (s->debug_target_el == s->current_el);
+
+ /*
+ * If singlestep is targeting a lower EL than the current one,
+ * then s->ss_active must be false and we can never get here.
+ */
+ assert(s->debug_target_el >= s->current_el);
+
+ gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
+}
+
+/*
+ * Given a VFP floating point constant encoded into an 8 bit immediate in an
+ * instruction, expand it to the actual constant value of the specified
+ * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
+ */
+uint64_t vfp_expand_imm(int size, uint8_t imm8);
+
+/* Vector operations shared between ARM and AArch64. */
+void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
+void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+
+void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
+ int64_t shift, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
+
+/*
+ * Forward to the isar_feature_* tests given a DisasContext pointer.
+ */
+#define dc_isar_feature(name, ctx) \
+ ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
+
+/* Note that the gvec expanders operate on offsets + sizes. */
+typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
+typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
+ uint32_t, uint32_t);
+typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t, uint32_t);
+typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
+ uint32_t, uint32_t, uint32_t);
+
+/* Function prototype for gen_ functions for calling Neon helpers */
+typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32);
+typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
+typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
+ TCGv_i32, TCGv_i32);
+typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
+typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
+typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
+typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
+typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
+typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32);
+typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr);
+typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
+typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
+typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64);
+typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
+typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
+typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
+typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
+typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
+typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
+typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
+
+/**
+ * arm_tbflags_from_tb:
+ * @tb: the TranslationBlock
+ *
+ * Extract the flag values from @tb.
+ */
+static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
+{
+ return (CPUARMTBFlags){ tb->flags, tb->cs_base };
+}
+
+/*
+ * Enum for argument to fpstatus_ptr().
+ */
+typedef enum ARMFPStatusFlavour {
+ FPST_FPCR,
+ FPST_FPCR_F16,
+ FPST_STD,
+ FPST_STD_F16,
+} ARMFPStatusFlavour;
+
+/**
+ * fpstatus_ptr: return TCGv_ptr to the specified fp_status field
+ *
+ * We have multiple softfloat float_status fields in the Arm CPU state struct
+ * (see the comment in cpu.h for details). Return a TCGv_ptr which has
+ * been set up to point to the requested field in the CPU state struct.
+ * The options are:
+ *
+ * FPST_FPCR
+ * for non-FP16 operations controlled by the FPCR
+ * FPST_FPCR_F16
+ * for operations controlled by the FPCR where FPCR.FZ16 is to be used
+ * FPST_STD
+ * for A32/T32 Neon operations using the "standard FPSCR value"
+ * FPST_STD_F16
+ * as FPST_STD, but where FPCR.FZ16 is to be used
+ */
+static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
+{
+ TCGv_ptr statusptr = tcg_temp_new_ptr();
+ int offset;
+
+ switch (flavour) {
+ case FPST_FPCR:
+ offset = offsetof(CPUARMState, vfp.fp_status);
+ break;
+ case FPST_FPCR_F16:
+ offset = offsetof(CPUARMState, vfp.fp_status_f16);
+ break;
+ case FPST_STD:
+ offset = offsetof(CPUARMState, vfp.standard_fp_status);
+ break;
+ case FPST_STD_F16:
+ offset = offsetof(CPUARMState, vfp.standard_fp_status_f16);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ return statusptr;
+}
+
+/**
+ * finalize_memop:
+ * @s: DisasContext
+ * @opc: size+sign+align of the memory operation
+ *
+ * Build the complete MemOp for a memory operation, including alignment
+ * and endianness.
+ *
+ * If (op & MO_AMASK) then the operation already contains the required
+ * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
+ * unaligned operation, e.g. for AccType_NORMAL.
+ *
+ * In the latter case, there are configuration bits that require alignment,
+ * and this is applied here. Note that there is no way to indicate that
+ * no alignment should ever be enforced; this must be handled manually.
+ */
+static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
+{
+ if (s->align_mem && !(opc & MO_AMASK)) {
+ opc |= MO_ALIGN;
+ }
+ return opc | s->be_data;
+}
+
+/**
+ * asimd_imm_const: Expand an encoded SIMD constant value
+ *
+ * Expand a SIMD constant value. This is essentially the pseudocode
+ * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
+ * VMVN and VBIC (when cmode < 14 && op == 1).
+ *
+ * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
+ * callers must catch this; we return the 64-bit constant value defined
+ * for AArch64.
+ *
+ * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
+ * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
+ * we produce an immediate constant value of 0 in these cases.
+ */
+uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
+
+#endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
new file mode 100644
index 000000000..17fb15836
--- /dev/null
+++ b/target/arm/vec_helper.c
@@ -0,0 +1,2666 @@
+/*
+ * ARM AdvSIMD / SVE Vector Operations
+ *
+ * Copyright (c) 2018 Linaro
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "fpu/softfloat.h"
+#include "qemu/int128.h"
+#include "vec_internal.h"
+
+/*
+ * Data for expanding active predicate bits to bytes, for byte elements.
+ *
+ * for (i = 0; i < 256; ++i) {
+ * unsigned long m = 0;
+ * for (j = 0; j < 8; j++) {
+ * if ((i >> j) & 1) {
+ * m |= 0xfful << (j << 3);
+ * }
+ * }
+ * printf("0x%016lx,\n", m);
+ * }
+ */
+const uint64_t expand_pred_b_data[256] = {
+ 0x0000000000000000, 0x00000000000000ff, 0x000000000000ff00,
+ 0x000000000000ffff, 0x0000000000ff0000, 0x0000000000ff00ff,
+ 0x0000000000ffff00, 0x0000000000ffffff, 0x00000000ff000000,
+ 0x00000000ff0000ff, 0x00000000ff00ff00, 0x00000000ff00ffff,
+ 0x00000000ffff0000, 0x00000000ffff00ff, 0x00000000ffffff00,
+ 0x00000000ffffffff, 0x000000ff00000000, 0x000000ff000000ff,
+ 0x000000ff0000ff00, 0x000000ff0000ffff, 0x000000ff00ff0000,
+ 0x000000ff00ff00ff, 0x000000ff00ffff00, 0x000000ff00ffffff,
+ 0x000000ffff000000, 0x000000ffff0000ff, 0x000000ffff00ff00,
+ 0x000000ffff00ffff, 0x000000ffffff0000, 0x000000ffffff00ff,
+ 0x000000ffffffff00, 0x000000ffffffffff, 0x0000ff0000000000,
+ 0x0000ff00000000ff, 0x0000ff000000ff00, 0x0000ff000000ffff,
+ 0x0000ff0000ff0000, 0x0000ff0000ff00ff, 0x0000ff0000ffff00,
+ 0x0000ff0000ffffff, 0x0000ff00ff000000, 0x0000ff00ff0000ff,
+ 0x0000ff00ff00ff00, 0x0000ff00ff00ffff, 0x0000ff00ffff0000,
+ 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0x0000ff00ffffffff,
+ 0x0000ffff00000000, 0x0000ffff000000ff, 0x0000ffff0000ff00,
+ 0x0000ffff0000ffff, 0x0000ffff00ff0000, 0x0000ffff00ff00ff,
+ 0x0000ffff00ffff00, 0x0000ffff00ffffff, 0x0000ffffff000000,
+ 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0x0000ffffff00ffff,
+ 0x0000ffffffff0000, 0x0000ffffffff00ff, 0x0000ffffffffff00,
+ 0x0000ffffffffffff, 0x00ff000000000000, 0x00ff0000000000ff,
+ 0x00ff00000000ff00, 0x00ff00000000ffff, 0x00ff000000ff0000,
+ 0x00ff000000ff00ff, 0x00ff000000ffff00, 0x00ff000000ffffff,
+ 0x00ff0000ff000000, 0x00ff0000ff0000ff, 0x00ff0000ff00ff00,
+ 0x00ff0000ff00ffff, 0x00ff0000ffff0000, 0x00ff0000ffff00ff,
+ 0x00ff0000ffffff00, 0x00ff0000ffffffff, 0x00ff00ff00000000,
+ 0x00ff00ff000000ff, 0x00ff00ff0000ff00, 0x00ff00ff0000ffff,
+ 0x00ff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00,
+ 0x00ff00ff00ffffff, 0x00ff00ffff000000, 0x00ff00ffff0000ff,
+ 0x00ff00ffff00ff00, 0x00ff00ffff00ffff, 0x00ff00ffffff0000,
+ 0x00ff00ffffff00ff, 0x00ff00ffffffff00, 0x00ff00ffffffffff,
+ 0x00ffff0000000000, 0x00ffff00000000ff, 0x00ffff000000ff00,
+ 0x00ffff000000ffff, 0x00ffff0000ff0000, 0x00ffff0000ff00ff,
+ 0x00ffff0000ffff00, 0x00ffff0000ffffff, 0x00ffff00ff000000,
+ 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0x00ffff00ff00ffff,
+ 0x00ffff00ffff0000, 0x00ffff00ffff00ff, 0x00ffff00ffffff00,
+ 0x00ffff00ffffffff, 0x00ffffff00000000, 0x00ffffff000000ff,
+ 0x00ffffff0000ff00, 0x00ffffff0000ffff, 0x00ffffff00ff0000,
+ 0x00ffffff00ff00ff, 0x00ffffff00ffff00, 0x00ffffff00ffffff,
+ 0x00ffffffff000000, 0x00ffffffff0000ff, 0x00ffffffff00ff00,
+ 0x00ffffffff00ffff, 0x00ffffffffff0000, 0x00ffffffffff00ff,
+ 0x00ffffffffffff00, 0x00ffffffffffffff, 0xff00000000000000,
+ 0xff000000000000ff, 0xff0000000000ff00, 0xff0000000000ffff,
+ 0xff00000000ff0000, 0xff00000000ff00ff, 0xff00000000ffff00,
+ 0xff00000000ffffff, 0xff000000ff000000, 0xff000000ff0000ff,
+ 0xff000000ff00ff00, 0xff000000ff00ffff, 0xff000000ffff0000,
+ 0xff000000ffff00ff, 0xff000000ffffff00, 0xff000000ffffffff,
+ 0xff0000ff00000000, 0xff0000ff000000ff, 0xff0000ff0000ff00,
+ 0xff0000ff0000ffff, 0xff0000ff00ff0000, 0xff0000ff00ff00ff,
+ 0xff0000ff00ffff00, 0xff0000ff00ffffff, 0xff0000ffff000000,
+ 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0xff0000ffff00ffff,
+ 0xff0000ffffff0000, 0xff0000ffffff00ff, 0xff0000ffffffff00,
+ 0xff0000ffffffffff, 0xff00ff0000000000, 0xff00ff00000000ff,
+ 0xff00ff000000ff00, 0xff00ff000000ffff, 0xff00ff0000ff0000,
+ 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0xff00ff0000ffffff,
+ 0xff00ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00,
+ 0xff00ff00ff00ffff, 0xff00ff00ffff0000, 0xff00ff00ffff00ff,
+ 0xff00ff00ffffff00, 0xff00ff00ffffffff, 0xff00ffff00000000,
+ 0xff00ffff000000ff, 0xff00ffff0000ff00, 0xff00ffff0000ffff,
+ 0xff00ffff00ff0000, 0xff00ffff00ff00ff, 0xff00ffff00ffff00,
+ 0xff00ffff00ffffff, 0xff00ffffff000000, 0xff00ffffff0000ff,
+ 0xff00ffffff00ff00, 0xff00ffffff00ffff, 0xff00ffffffff0000,
+ 0xff00ffffffff00ff, 0xff00ffffffffff00, 0xff00ffffffffffff,
+ 0xffff000000000000, 0xffff0000000000ff, 0xffff00000000ff00,
+ 0xffff00000000ffff, 0xffff000000ff0000, 0xffff000000ff00ff,
+ 0xffff000000ffff00, 0xffff000000ffffff, 0xffff0000ff000000,
+ 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0xffff0000ff00ffff,
+ 0xffff0000ffff0000, 0xffff0000ffff00ff, 0xffff0000ffffff00,
+ 0xffff0000ffffffff, 0xffff00ff00000000, 0xffff00ff000000ff,
+ 0xffff00ff0000ff00, 0xffff00ff0000ffff, 0xffff00ff00ff0000,
+ 0xffff00ff00ff00ff, 0xffff00ff00ffff00, 0xffff00ff00ffffff,
+ 0xffff00ffff000000, 0xffff00ffff0000ff, 0xffff00ffff00ff00,
+ 0xffff00ffff00ffff, 0xffff00ffffff0000, 0xffff00ffffff00ff,
+ 0xffff00ffffffff00, 0xffff00ffffffffff, 0xffffff0000000000,
+ 0xffffff00000000ff, 0xffffff000000ff00, 0xffffff000000ffff,
+ 0xffffff0000ff0000, 0xffffff0000ff00ff, 0xffffff0000ffff00,
+ 0xffffff0000ffffff, 0xffffff00ff000000, 0xffffff00ff0000ff,
+ 0xffffff00ff00ff00, 0xffffff00ff00ffff, 0xffffff00ffff0000,
+ 0xffffff00ffff00ff, 0xffffff00ffffff00, 0xffffff00ffffffff,
+ 0xffffffff00000000, 0xffffffff000000ff, 0xffffffff0000ff00,
+ 0xffffffff0000ffff, 0xffffffff00ff0000, 0xffffffff00ff00ff,
+ 0xffffffff00ffff00, 0xffffffff00ffffff, 0xffffffffff000000,
+ 0xffffffffff0000ff, 0xffffffffff00ff00, 0xffffffffff00ffff,
+ 0xffffffffffff0000, 0xffffffffffff00ff, 0xffffffffffffff00,
+ 0xffffffffffffffff,
+};
+
+/* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */
+int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
+ bool neg, bool round)
+{
+ /*
+ * Simplify:
+ * = ((a3 << 8) + ((e1 * e2) << 1) + (round << 7)) >> 8
+ * = ((a3 << 7) + (e1 * e2) + (round << 6)) >> 7
+ */
+ int32_t ret = (int32_t)src1 * src2;
+ if (neg) {
+ ret = -ret;
+ }
+ ret += ((int32_t)src3 << 7) + (round << 6);
+ ret >>= 7;
+
+ if (ret != (int8_t)ret) {
+ ret = (ret < 0 ? INT8_MIN : INT8_MAX);
+ }
+ return ret;
+}
+
+void HELPER(sve2_sqrdmlah_b)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], a[i], false, true);
+ }
+}
+
+void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], a[i], true, true);
+ }
+}
+
+void HELPER(sve2_sqdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, false);
+ }
+}
+
+void HELPER(sve2_sqrdmulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = do_sqrdmlah_b(n[i], m[i], 0, false, true);
+ }
+}
+
+/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
+int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
+ bool neg, bool round, uint32_t *sat)
+{
+ /* Simplify similarly to do_sqrdmlah_b above. */
+ int32_t ret = (int32_t)src1 * src2;
+ if (neg) {
+ ret = -ret;
+ }
+ ret += ((int32_t)src3 << 15) + (round << 14);
+ ret >>= 15;
+
+ if (ret != (int16_t)ret) {
+ *sat = 1;
+ ret = (ret < 0 ? INT16_MIN : INT16_MAX);
+ }
+ return ret;
+}
+
+uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1,
+ uint32_t src2, uint32_t src3)
+{
+ uint32_t *sat = &env->vfp.qc[0];
+ uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, false, true, sat);
+ uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
+ false, true, sat);
+ return deposit32(e1, 16, 16, e2);
+}
+
+void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ int16_t *d = vd;
+ int16_t *n = vn;
+ int16_t *m = vm;
+ uintptr_t i;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], d[i], false, true, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1,
+ uint32_t src2, uint32_t src3)
+{
+ uint32_t *sat = &env->vfp.qc[0];
+ uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, true, true, sat);
+ uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
+ true, true, sat);
+ return deposit32(e1, 16, 16, e2);
+}
+
+void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ int16_t *d = vd;
+ int16_t *n = vn;
+ int16_t *m = vm;
+ uintptr_t i;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], d[i], true, true, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(neon_sqdmulh_h)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(neon_sqrdmulh_h)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(sve2_sqrdmlah_h)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], a[i], false, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], a[i], true, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, false, &discard);
+ }
+}
+
+void HELPER(sve2_sqrdmulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = do_sqrdmlah_h(n[i], m[i], 0, false, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, false, &discard);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int16_t *d = vd, *n = vn, *m = (int16_t *)vm + H2(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 2; i += 16 / 2) {
+ int16_t mm = m[i];
+ for (j = 0; j < 16 / 2; ++j) {
+ d[i + j] = do_sqrdmlah_h(n[i + j], mm, 0, false, true, &discard);
+ }
+ }
+}
+
+/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
+int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
+ bool neg, bool round, uint32_t *sat)
+{
+ /* Simplify similarly to do_sqrdmlah_b above. */
+ int64_t ret = (int64_t)src1 * src2;
+ if (neg) {
+ ret = -ret;
+ }
+ ret += ((int64_t)src3 << 31) + (round << 30);
+ ret >>= 31;
+
+ if (ret != (int32_t)ret) {
+ *sat = 1;
+ ret = (ret < 0 ? INT32_MIN : INT32_MAX);
+ }
+ return ret;
+}
+
+uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1,
+ int32_t src2, int32_t src3)
+{
+ uint32_t *sat = &env->vfp.qc[0];
+ return do_sqrdmlah_s(src1, src2, src3, false, true, sat);
+}
+
+void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ int32_t *d = vd;
+ int32_t *n = vn;
+ int32_t *m = vm;
+ uintptr_t i;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], d[i], false, true, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1,
+ int32_t src2, int32_t src3)
+{
+ uint32_t *sat = &env->vfp.qc[0];
+ return do_sqrdmlah_s(src1, src2, src3, true, true, sat);
+}
+
+void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ int32_t *d = vd;
+ int32_t *n = vn;
+ int32_t *m = vm;
+ uintptr_t i;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], d[i], true, true, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(neon_sqdmulh_s)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(neon_sqrdmulh_s)(void *vd, void *vn, void *vm,
+ void *vq, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, vq);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(sve2_sqrdmlah_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], a[i], false, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqrdmlsh_s)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm, *a = va;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], a[i], true, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, false, &discard);
+ }
+}
+
+void HELPER(sve2_sqrdmulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm;
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = do_sqrdmlah_s(n[i], m[i], 0, false, true, &discard);
+ }
+}
+
+void HELPER(sve2_sqdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, false, &discard);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int32_t *d = vd, *n = vn, *m = (int32_t *)vm + H4(idx);
+ uint32_t discard;
+
+ for (i = 0; i < opr_sz / 4; i += 16 / 4) {
+ int32_t mm = m[i];
+ for (j = 0; j < 16 / 4; ++j) {
+ d[i + j] = do_sqrdmlah_s(n[i + j], mm, 0, false, true, &discard);
+ }
+ }
+}
+
+/* Signed saturating rounding doubling multiply-accumulate high half, 64-bit */
+static int64_t do_sat128_d(Int128 r)
+{
+ int64_t ls = int128_getlo(r);
+ int64_t hs = int128_gethi(r);
+
+ if (unlikely(hs != (ls >> 63))) {
+ return hs < 0 ? INT64_MIN : INT64_MAX;
+ }
+ return ls;
+}
+
+int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, bool neg, bool round)
+{
+ uint64_t l, h;
+ Int128 r, t;
+
+ /* As in do_sqrdmlah_b, but with 128-bit arithmetic. */
+ muls64(&l, &h, m, n);
+ r = int128_make128(l, h);
+ if (neg) {
+ r = int128_neg(r);
+ }
+ if (a) {
+ t = int128_exts64(a);
+ t = int128_lshift(t, 63);
+ r = int128_add(r, t);
+ }
+ if (round) {
+ t = int128_exts64(1ll << 62);
+ r = int128_add(r, t);
+ }
+ r = int128_rshift(r, 63);
+
+ return do_sat128_d(r);
+}
+
+void HELPER(sve2_sqrdmlah_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], a[i], false, true);
+ }
+}
+
+void HELPER(sve2_sqrdmlsh_d)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm, *a = va;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], a[i], true, true);
+ }
+}
+
+void HELPER(sve2_sqdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, false);
+ }
+}
+
+void HELPER(sve2_sqrdmulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = do_sqrdmlah_d(n[i], m[i], 0, false, true);
+ }
+}
+
+void HELPER(sve2_sqdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
+
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
+ int64_t mm = m[i];
+ for (j = 0; j < 16 / 8; ++j) {
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, false);
+ }
+ }
+}
+
+void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ int idx = simd_data(desc);
+ int64_t *d = vd, *n = vn, *m = (int64_t *)vm + idx;
+
+ for (i = 0; i < opr_sz / 8; i += 16 / 8) {
+ int64_t mm = m[i];
+ for (j = 0; j < 16 / 8; ++j) {
+ d[i + j] = do_sqrdmlah_d(n[i + j], mm, 0, false, true);
+ }
+ }
+}
+
+/* Integer 8 and 16-bit dot-product.
+ *
+ * Note that for the loops herein, host endianness does not matter
+ * with respect to the ordering of data within the quad-width lanes.
+ * All elements are treated equally, no matter where they are.
+ */
+
+#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m = vm; \
+ for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \
+ d[i] = (a[i] + \
+ (TYPED)n[i * 4 + 0] * m[i * 4 + 0] + \
+ (TYPED)n[i * 4 + 1] * m[i * 4 + 1] + \
+ (TYPED)n[i * 4 + 2] * m[i * 4 + 2] + \
+ (TYPED)n[i * 4 + 3] * m[i * 4 + 3]); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t)
+DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
+DO_DOT(gvec_usdot_b, uint32_t, uint8_t, int8_t)
+DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
+DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
+
+#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i = 0, opr_sz = simd_oprsz(desc); \
+ intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \
+ intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \
+ intptr_t index = simd_data(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m_indexed = (TYPEM *)vm + HD(index) * 4; \
+ do { \
+ TYPED m0 = m_indexed[i * 4 + 0]; \
+ TYPED m1 = m_indexed[i * 4 + 1]; \
+ TYPED m2 = m_indexed[i * 4 + 2]; \
+ TYPED m3 = m_indexed[i * 4 + 3]; \
+ do { \
+ d[i] = (a[i] + \
+ n[i * 4 + 0] * m0 + \
+ n[i * 4 + 1] * m1 + \
+ n[i * 4 + 2] * m2 + \
+ n[i * 4 + 3] * m3); \
+ } while (++i < segend); \
+ segend = i + 4; \
+ } while (i < opr_sz_n); \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4)
+DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4)
+DO_DOT_IDX(gvec_sudot_idx_b, int32_t, int8_t, uint8_t, H4)
+DO_DOT_IDX(gvec_usdot_idx_b, int32_t, uint8_t, int8_t, H4)
+DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, H8)
+DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, H8)
+
+void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float16 *d = vd;
+ float16 *n = vn;
+ float16 *m = vm;
+ float_status *fpst = vfpst;
+ uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t neg_imag = neg_real ^ 1;
+ uintptr_t i;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 15;
+ neg_imag <<= 15;
+
+ for (i = 0; i < opr_sz / 2; i += 2) {
+ float16 e0 = n[H2(i)];
+ float16 e1 = m[H2(i + 1)] ^ neg_imag;
+ float16 e2 = n[H2(i + 1)];
+ float16 e3 = m[H2(i)] ^ neg_real;
+
+ d[H2(i)] = float16_add(e0, e1, fpst);
+ d[H2(i + 1)] = float16_add(e2, e3, fpst);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcadds)(void *vd, void *vn, void *vm,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float32 *d = vd;
+ float32 *n = vn;
+ float32 *m = vm;
+ float_status *fpst = vfpst;
+ uint32_t neg_real = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t neg_imag = neg_real ^ 1;
+ uintptr_t i;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 31;
+ neg_imag <<= 31;
+
+ for (i = 0; i < opr_sz / 4; i += 2) {
+ float32 e0 = n[H4(i)];
+ float32 e1 = m[H4(i + 1)] ^ neg_imag;
+ float32 e2 = n[H4(i + 1)];
+ float32 e3 = m[H4(i)] ^ neg_real;
+
+ d[H4(i)] = float32_add(e0, e1, fpst);
+ d[H4(i + 1)] = float32_add(e2, e3, fpst);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcaddd)(void *vd, void *vn, void *vm,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float64 *d = vd;
+ float64 *n = vn;
+ float64 *m = vm;
+ float_status *fpst = vfpst;
+ uint64_t neg_real = extract64(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t neg_imag = neg_real ^ 1;
+ uintptr_t i;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 63;
+ neg_imag <<= 63;
+
+ for (i = 0; i < opr_sz / 8; i += 2) {
+ float64 e0 = n[i];
+ float64 e1 = m[i + 1] ^ neg_imag;
+ float64 e2 = n[i + 1];
+ float64 e3 = m[i] ^ neg_real;
+
+ d[i] = float64_add(e0, e1, fpst);
+ d[i + 1] = float64_add(e2, e3, fpst);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcmlah)(void *vd, void *vn, void *vm, void *va,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
+ float_status *fpst = vfpst;
+ intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t neg_real = flip ^ neg_imag;
+ uintptr_t i;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 15;
+ neg_imag <<= 15;
+
+ for (i = 0; i < opr_sz / 2; i += 2) {
+ float16 e2 = n[H2(i + flip)];
+ float16 e1 = m[H2(i + flip)] ^ neg_real;
+ float16 e4 = e2;
+ float16 e3 = m[H2(i + 1 - flip)] ^ neg_imag;
+
+ d[H2(i)] = float16_muladd(e2, e1, a[H2(i)], 0, fpst);
+ d[H2(i + 1)] = float16_muladd(e4, e3, a[H2(i + 1)], 0, fpst);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcmlah_idx)(void *vd, void *vn, void *vm, void *va,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float16 *d = vd, *n = vn, *m = vm, *a = va;
+ float_status *fpst = vfpst;
+ intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
+ uint32_t neg_real = flip ^ neg_imag;
+ intptr_t elements = opr_sz / sizeof(float16);
+ intptr_t eltspersegment = 16 / sizeof(float16);
+ intptr_t i, j;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 15;
+ neg_imag <<= 15;
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ float16 mr = m[H2(i + 2 * index + 0)];
+ float16 mi = m[H2(i + 2 * index + 1)];
+ float16 e1 = neg_real ^ (flip ? mi : mr);
+ float16 e3 = neg_imag ^ (flip ? mr : mi);
+
+ for (j = i; j < i + eltspersegment; j += 2) {
+ float16 e2 = n[H2(j + flip)];
+ float16 e4 = e2;
+
+ d[H2(j)] = float16_muladd(e2, e1, a[H2(j)], 0, fpst);
+ d[H2(j + 1)] = float16_muladd(e4, e3, a[H2(j + 1)], 0, fpst);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcmlas)(void *vd, void *vn, void *vm, void *va,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
+ float_status *fpst = vfpst;
+ intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint32_t neg_real = flip ^ neg_imag;
+ uintptr_t i;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 31;
+ neg_imag <<= 31;
+
+ for (i = 0; i < opr_sz / 4; i += 2) {
+ float32 e2 = n[H4(i + flip)];
+ float32 e1 = m[H4(i + flip)] ^ neg_real;
+ float32 e4 = e2;
+ float32 e3 = m[H4(i + 1 - flip)] ^ neg_imag;
+
+ d[H4(i)] = float32_muladd(e2, e1, a[H4(i)], 0, fpst);
+ d[H4(i + 1)] = float32_muladd(e4, e3, a[H4(i + 1)], 0, fpst);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcmlas_idx)(void *vd, void *vn, void *vm, void *va,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float32 *d = vd, *n = vn, *m = vm, *a = va;
+ float_status *fpst = vfpst;
+ intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint32_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 2, 2);
+ uint32_t neg_real = flip ^ neg_imag;
+ intptr_t elements = opr_sz / sizeof(float32);
+ intptr_t eltspersegment = 16 / sizeof(float32);
+ intptr_t i, j;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 31;
+ neg_imag <<= 31;
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ float32 mr = m[H4(i + 2 * index + 0)];
+ float32 mi = m[H4(i + 2 * index + 1)];
+ float32 e1 = neg_real ^ (flip ? mi : mr);
+ float32 e3 = neg_imag ^ (flip ? mr : mi);
+
+ for (j = i; j < i + eltspersegment; j += 2) {
+ float32 e2 = n[H4(j + flip)];
+ float32 e4 = e2;
+
+ d[H4(j)] = float32_muladd(e2, e1, a[H4(j)], 0, fpst);
+ d[H4(j + 1)] = float32_muladd(e4, e3, a[H4(j + 1)], 0, fpst);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fcmlad)(void *vd, void *vn, void *vm, void *va,
+ void *vfpst, uint32_t desc)
+{
+ uintptr_t opr_sz = simd_oprsz(desc);
+ float64 *d = vd, *n = vn, *m = vm, *a = va;
+ float_status *fpst = vfpst;
+ intptr_t flip = extract32(desc, SIMD_DATA_SHIFT, 1);
+ uint64_t neg_imag = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ uint64_t neg_real = flip ^ neg_imag;
+ uintptr_t i;
+
+ /* Shift boolean to the sign bit so we can xor to negate. */
+ neg_real <<= 63;
+ neg_imag <<= 63;
+
+ for (i = 0; i < opr_sz / 8; i += 2) {
+ float64 e2 = n[i + flip];
+ float64 e1 = m[i + flip] ^ neg_real;
+ float64 e4 = e2;
+ float64 e3 = m[i + 1 - flip] ^ neg_imag;
+
+ d[i] = float64_muladd(e2, e1, a[i], 0, fpst);
+ d[i + 1] = float64_muladd(e4, e3, a[i + 1], 0, fpst);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+/*
+ * Floating point comparisons producing an integer result (all 1s or all 0s).
+ * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
+ * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
+ */
+static uint16_t float16_ceq(float16 op1, float16 op2, float_status *stat)
+{
+ return -float16_eq_quiet(op1, op2, stat);
+}
+
+static uint32_t float32_ceq(float32 op1, float32 op2, float_status *stat)
+{
+ return -float32_eq_quiet(op1, op2, stat);
+}
+
+static uint16_t float16_cge(float16 op1, float16 op2, float_status *stat)
+{
+ return -float16_le(op2, op1, stat);
+}
+
+static uint32_t float32_cge(float32 op1, float32 op2, float_status *stat)
+{
+ return -float32_le(op2, op1, stat);
+}
+
+static uint16_t float16_cgt(float16 op1, float16 op2, float_status *stat)
+{
+ return -float16_lt(op2, op1, stat);
+}
+
+static uint32_t float32_cgt(float32 op1, float32 op2, float_status *stat)
+{
+ return -float32_lt(op2, op1, stat);
+}
+
+static uint16_t float16_acge(float16 op1, float16 op2, float_status *stat)
+{
+ return -float16_le(float16_abs(op2), float16_abs(op1), stat);
+}
+
+static uint32_t float32_acge(float32 op1, float32 op2, float_status *stat)
+{
+ return -float32_le(float32_abs(op2), float32_abs(op1), stat);
+}
+
+static uint16_t float16_acgt(float16 op1, float16 op2, float_status *stat)
+{
+ return -float16_lt(float16_abs(op2), float16_abs(op1), stat);
+}
+
+static uint32_t float32_acgt(float32 op1, float32 op2, float_status *stat)
+{
+ return -float32_lt(float32_abs(op2), float32_abs(op1), stat);
+}
+
+static int16_t vfp_tosszh(float16 x, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ if (float16_is_any_nan(x)) {
+ float_raise(float_flag_invalid, fpst);
+ return 0;
+ }
+ return float16_to_int16_round_to_zero(x, fpst);
+}
+
+static uint16_t vfp_touszh(float16 x, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ if (float16_is_any_nan(x)) {
+ float_raise(float_flag_invalid, fpst);
+ return 0;
+ }
+ return float16_to_uint16_round_to_zero(x, fpst);
+}
+
+#define DO_2OP(NAME, FUNC, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ TYPE *d = vd, *n = vn; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = FUNC(n[i], stat); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_2OP(gvec_frecpe_h, helper_recpe_f16, float16)
+DO_2OP(gvec_frecpe_s, helper_recpe_f32, float32)
+DO_2OP(gvec_frecpe_d, helper_recpe_f64, float64)
+
+DO_2OP(gvec_frsqrte_h, helper_rsqrte_f16, float16)
+DO_2OP(gvec_frsqrte_s, helper_rsqrte_f32, float32)
+DO_2OP(gvec_frsqrte_d, helper_rsqrte_f64, float64)
+
+DO_2OP(gvec_vrintx_h, float16_round_to_int, float16)
+DO_2OP(gvec_vrintx_s, float32_round_to_int, float32)
+
+DO_2OP(gvec_sitos, helper_vfp_sitos, int32_t)
+DO_2OP(gvec_uitos, helper_vfp_uitos, uint32_t)
+DO_2OP(gvec_tosizs, helper_vfp_tosizs, float32)
+DO_2OP(gvec_touizs, helper_vfp_touizs, float32)
+DO_2OP(gvec_sstoh, int16_to_float16, int16_t)
+DO_2OP(gvec_ustoh, uint16_to_float16, uint16_t)
+DO_2OP(gvec_tosszh, vfp_tosszh, float16)
+DO_2OP(gvec_touszh, vfp_touszh, float16)
+
+#define WRAP_CMP0_FWD(FN, CMPOP, TYPE) \
+ static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
+ { \
+ return TYPE##_##CMPOP(op, TYPE##_zero, stat); \
+ }
+
+#define WRAP_CMP0_REV(FN, CMPOP, TYPE) \
+ static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
+ { \
+ return TYPE##_##CMPOP(TYPE##_zero, op, stat); \
+ }
+
+#define DO_2OP_CMP0(FN, CMPOP, DIRN) \
+ WRAP_CMP0_##DIRN(FN, CMPOP, float16) \
+ WRAP_CMP0_##DIRN(FN, CMPOP, float32) \
+ DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \
+ DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32)
+
+DO_2OP_CMP0(cgt, cgt, FWD)
+DO_2OP_CMP0(cge, cge, FWD)
+DO_2OP_CMP0(ceq, ceq, FWD)
+DO_2OP_CMP0(clt, cgt, REV)
+DO_2OP_CMP0(cle, cge, REV)
+
+#undef DO_2OP
+#undef DO_2OP_CMP0
+
+/* Floating-point trigonometric starting value.
+ * See the ARM ARM pseudocode function FPTrigSMul.
+ */
+static float16 float16_ftsmul(float16 op1, uint16_t op2, float_status *stat)
+{
+ float16 result = float16_mul(op1, op1, stat);
+ if (!float16_is_any_nan(result)) {
+ result = float16_set_sign(result, op2 & 1);
+ }
+ return result;
+}
+
+static float32 float32_ftsmul(float32 op1, uint32_t op2, float_status *stat)
+{
+ float32 result = float32_mul(op1, op1, stat);
+ if (!float32_is_any_nan(result)) {
+ result = float32_set_sign(result, op2 & 1);
+ }
+ return result;
+}
+
+static float64 float64_ftsmul(float64 op1, uint64_t op2, float_status *stat)
+{
+ float64 result = float64_mul(op1, op1, stat);
+ if (!float64_is_any_nan(result)) {
+ result = float64_set_sign(result, op2 & 1);
+ }
+ return result;
+}
+
+static float16 float16_abd(float16 op1, float16 op2, float_status *stat)
+{
+ return float16_abs(float16_sub(op1, op2, stat));
+}
+
+static float32 float32_abd(float32 op1, float32 op2, float_status *stat)
+{
+ return float32_abs(float32_sub(op1, op2, stat));
+}
+
+/*
+ * Reciprocal step. These are the AArch32 version which uses a
+ * non-fused multiply-and-subtract.
+ */
+static float16 float16_recps_nf(float16 op1, float16 op2, float_status *stat)
+{
+ op1 = float16_squash_input_denormal(op1, stat);
+ op2 = float16_squash_input_denormal(op2, stat);
+
+ if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
+ (float16_is_infinity(op2) && float16_is_zero(op1))) {
+ return float16_two;
+ }
+ return float16_sub(float16_two, float16_mul(op1, op2, stat), stat);
+}
+
+static float32 float32_recps_nf(float32 op1, float32 op2, float_status *stat)
+{
+ op1 = float32_squash_input_denormal(op1, stat);
+ op2 = float32_squash_input_denormal(op2, stat);
+
+ if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
+ (float32_is_infinity(op2) && float32_is_zero(op1))) {
+ return float32_two;
+ }
+ return float32_sub(float32_two, float32_mul(op1, op2, stat), stat);
+}
+
+/* Reciprocal square-root step. AArch32 non-fused semantics. */
+static float16 float16_rsqrts_nf(float16 op1, float16 op2, float_status *stat)
+{
+ op1 = float16_squash_input_denormal(op1, stat);
+ op2 = float16_squash_input_denormal(op2, stat);
+
+ if ((float16_is_infinity(op1) && float16_is_zero(op2)) ||
+ (float16_is_infinity(op2) && float16_is_zero(op1))) {
+ return float16_one_point_five;
+ }
+ op1 = float16_sub(float16_three, float16_mul(op1, op2, stat), stat);
+ return float16_div(op1, float16_two, stat);
+}
+
+static float32 float32_rsqrts_nf(float32 op1, float32 op2, float_status *stat)
+{
+ op1 = float32_squash_input_denormal(op1, stat);
+ op2 = float32_squash_input_denormal(op2, stat);
+
+ if ((float32_is_infinity(op1) && float32_is_zero(op2)) ||
+ (float32_is_infinity(op2) && float32_is_zero(op1))) {
+ return float32_one_point_five;
+ }
+ op1 = float32_sub(float32_three, float32_mul(op1, op2, stat), stat);
+ return float32_div(op1, float32_two, stat);
+}
+
+#define DO_3OP(NAME, FUNC, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = FUNC(n[i], m[i], stat); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_3OP(gvec_fadd_h, float16_add, float16)
+DO_3OP(gvec_fadd_s, float32_add, float32)
+DO_3OP(gvec_fadd_d, float64_add, float64)
+
+DO_3OP(gvec_fsub_h, float16_sub, float16)
+DO_3OP(gvec_fsub_s, float32_sub, float32)
+DO_3OP(gvec_fsub_d, float64_sub, float64)
+
+DO_3OP(gvec_fmul_h, float16_mul, float16)
+DO_3OP(gvec_fmul_s, float32_mul, float32)
+DO_3OP(gvec_fmul_d, float64_mul, float64)
+
+DO_3OP(gvec_ftsmul_h, float16_ftsmul, float16)
+DO_3OP(gvec_ftsmul_s, float32_ftsmul, float32)
+DO_3OP(gvec_ftsmul_d, float64_ftsmul, float64)
+
+DO_3OP(gvec_fabd_h, float16_abd, float16)
+DO_3OP(gvec_fabd_s, float32_abd, float32)
+
+DO_3OP(gvec_fceq_h, float16_ceq, float16)
+DO_3OP(gvec_fceq_s, float32_ceq, float32)
+
+DO_3OP(gvec_fcge_h, float16_cge, float16)
+DO_3OP(gvec_fcge_s, float32_cge, float32)
+
+DO_3OP(gvec_fcgt_h, float16_cgt, float16)
+DO_3OP(gvec_fcgt_s, float32_cgt, float32)
+
+DO_3OP(gvec_facge_h, float16_acge, float16)
+DO_3OP(gvec_facge_s, float32_acge, float32)
+
+DO_3OP(gvec_facgt_h, float16_acgt, float16)
+DO_3OP(gvec_facgt_s, float32_acgt, float32)
+
+DO_3OP(gvec_fmax_h, float16_max, float16)
+DO_3OP(gvec_fmax_s, float32_max, float32)
+
+DO_3OP(gvec_fmin_h, float16_min, float16)
+DO_3OP(gvec_fmin_s, float32_min, float32)
+
+DO_3OP(gvec_fmaxnum_h, float16_maxnum, float16)
+DO_3OP(gvec_fmaxnum_s, float32_maxnum, float32)
+
+DO_3OP(gvec_fminnum_h, float16_minnum, float16)
+DO_3OP(gvec_fminnum_s, float32_minnum, float32)
+
+DO_3OP(gvec_recps_nf_h, float16_recps_nf, float16)
+DO_3OP(gvec_recps_nf_s, float32_recps_nf, float32)
+
+DO_3OP(gvec_rsqrts_nf_h, float16_rsqrts_nf, float16)
+DO_3OP(gvec_rsqrts_nf_s, float32_rsqrts_nf, float32)
+
+#ifdef TARGET_AARCH64
+
+DO_3OP(gvec_recps_h, helper_recpsf_f16, float16)
+DO_3OP(gvec_recps_s, helper_recpsf_f32, float32)
+DO_3OP(gvec_recps_d, helper_recpsf_f64, float64)
+
+DO_3OP(gvec_rsqrts_h, helper_rsqrtsf_f16, float16)
+DO_3OP(gvec_rsqrts_s, helper_rsqrtsf_f32, float32)
+DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
+
+#endif
+#undef DO_3OP
+
+/* Non-fused multiply-add (unlike float16_muladd etc, which are fused) */
+static float16 float16_muladd_nf(float16 dest, float16 op1, float16 op2,
+ float_status *stat)
+{
+ return float16_add(dest, float16_mul(op1, op2, stat), stat);
+}
+
+static float32 float32_muladd_nf(float32 dest, float32 op1, float32 op2,
+ float_status *stat)
+{
+ return float32_add(dest, float32_mul(op1, op2, stat), stat);
+}
+
+static float16 float16_mulsub_nf(float16 dest, float16 op1, float16 op2,
+ float_status *stat)
+{
+ return float16_sub(dest, float16_mul(op1, op2, stat), stat);
+}
+
+static float32 float32_mulsub_nf(float32 dest, float32 op1, float32 op2,
+ float_status *stat)
+{
+ return float32_sub(dest, float32_mul(op1, op2, stat), stat);
+}
+
+/* Fused versions; these have the semantics Neon VFMA/VFMS want */
+static float16 float16_muladd_f(float16 dest, float16 op1, float16 op2,
+ float_status *stat)
+{
+ return float16_muladd(op1, op2, dest, 0, stat);
+}
+
+static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2,
+ float_status *stat)
+{
+ return float32_muladd(op1, op2, dest, 0, stat);
+}
+
+static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2,
+ float_status *stat)
+{
+ return float16_muladd(float16_chs(op1), op2, dest, 0, stat);
+}
+
+static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2,
+ float_status *stat)
+{
+ return float32_muladd(float32_chs(op1), op2, dest, 0, stat);
+}
+
+#define DO_MULADD(NAME, FUNC, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = FUNC(d[i], n[i], m[i], stat); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_MULADD(gvec_fmla_h, float16_muladd_nf, float16)
+DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32)
+
+DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16)
+DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
+
+DO_MULADD(gvec_vfma_h, float16_muladd_f, float16)
+DO_MULADD(gvec_vfma_s, float32_muladd_f, float32)
+
+DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
+DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
+
+/* For the indexed ops, SVE applies the index per 128-bit vector segment.
+ * For AdvSIMD, there is of course only one such vector segment.
+ */
+
+#define DO_MUL_IDX(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
+ intptr_t idx = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[H(i + idx)]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = n[i + j] * mm; \
+ } \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_MUL_IDX(gvec_mul_idx_h, uint16_t, H2)
+DO_MUL_IDX(gvec_mul_idx_s, uint32_t, H4)
+DO_MUL_IDX(gvec_mul_idx_d, uint64_t, H8)
+
+#undef DO_MUL_IDX
+
+#define DO_MLA_IDX(NAME, TYPE, OP, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
+ intptr_t idx = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[H(i + idx)]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = a[i + j] OP n[i + j] * mm; \
+ } \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_MLA_IDX(gvec_mla_idx_h, uint16_t, +, H2)
+DO_MLA_IDX(gvec_mla_idx_s, uint32_t, +, H4)
+DO_MLA_IDX(gvec_mla_idx_d, uint64_t, +, H8)
+
+DO_MLA_IDX(gvec_mls_idx_h, uint16_t, -, H2)
+DO_MLA_IDX(gvec_mls_idx_s, uint32_t, -, H4)
+DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, H8)
+
+#undef DO_MLA_IDX
+
+#define DO_FMUL_IDX(NAME, ADD, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
+ intptr_t idx = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[H(i + idx)]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = TYPE##_##ADD(d[i + j], \
+ TYPE##_mul(n[i + j], mm, stat), stat); \
+ } \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+#define float16_nop(N, M, S) (M)
+#define float32_nop(N, M, S) (M)
+#define float64_nop(N, M, S) (M)
+
+DO_FMUL_IDX(gvec_fmul_idx_h, nop, float16, H2)
+DO_FMUL_IDX(gvec_fmul_idx_s, nop, float32, H4)
+DO_FMUL_IDX(gvec_fmul_idx_d, nop, float64, H8)
+
+/*
+ * Non-fused multiply-accumulate operations, for Neon. NB that unlike
+ * the fused ops below they assume accumulate both from and into Vd.
+ */
+DO_FMUL_IDX(gvec_fmla_nf_idx_h, add, float16, H2)
+DO_FMUL_IDX(gvec_fmla_nf_idx_s, add, float32, H4)
+DO_FMUL_IDX(gvec_fmls_nf_idx_h, sub, float16, H2)
+DO_FMUL_IDX(gvec_fmls_nf_idx_s, sub, float32, H4)
+
+#undef float16_nop
+#undef float32_nop
+#undef float64_nop
+#undef DO_FMUL_IDX
+
+#define DO_FMLA_IDX(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
+ void *stat, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc); \
+ intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
+ TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
+ intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ op1_neg <<= (8 * sizeof(TYPE) - 1); \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[H(i + idx)]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
+ mm, a[i + j], 0, stat); \
+ } \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2)
+DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4)
+DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8)
+
+#undef DO_FMLA_IDX
+
+#define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
+void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
+ bool q = false; \
+ for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
+ WTYPE dd = (WTYPE)n[i] OP m[i]; \
+ if (dd < MIN) { \
+ dd = MIN; \
+ q = true; \
+ } else if (dd > MAX) { \
+ dd = MAX; \
+ q = true; \
+ } \
+ d[i] = dd; \
+ } \
+ if (q) { \
+ uint32_t *qc = vq; \
+ qc[0] = 1; \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_SAT(gvec_uqadd_b, int, uint8_t, uint8_t, +, 0, UINT8_MAX)
+DO_SAT(gvec_uqadd_h, int, uint16_t, uint16_t, +, 0, UINT16_MAX)
+DO_SAT(gvec_uqadd_s, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX)
+
+DO_SAT(gvec_sqadd_b, int, int8_t, int8_t, +, INT8_MIN, INT8_MAX)
+DO_SAT(gvec_sqadd_h, int, int16_t, int16_t, +, INT16_MIN, INT16_MAX)
+DO_SAT(gvec_sqadd_s, int64_t, int32_t, int32_t, +, INT32_MIN, INT32_MAX)
+
+DO_SAT(gvec_uqsub_b, int, uint8_t, uint8_t, -, 0, UINT8_MAX)
+DO_SAT(gvec_uqsub_h, int, uint16_t, uint16_t, -, 0, UINT16_MAX)
+DO_SAT(gvec_uqsub_s, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX)
+
+DO_SAT(gvec_sqsub_b, int, int8_t, int8_t, -, INT8_MIN, INT8_MAX)
+DO_SAT(gvec_sqsub_h, int, int16_t, int16_t, -, INT16_MIN, INT16_MAX)
+DO_SAT(gvec_sqsub_s, int64_t, int32_t, int32_t, -, INT32_MIN, INT32_MAX)
+
+#undef DO_SAT
+
+void HELPER(gvec_uqadd_d)(void *vd, void *vq, void *vn,
+ void *vm, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ bool q = false;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ uint64_t nn = n[i], mm = m[i], dd = nn + mm;
+ if (dd < nn) {
+ dd = UINT64_MAX;
+ q = true;
+ }
+ d[i] = dd;
+ }
+ if (q) {
+ uint32_t *qc = vq;
+ qc[0] = 1;
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_uqsub_d)(void *vd, void *vq, void *vn,
+ void *vm, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ bool q = false;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ uint64_t nn = n[i], mm = m[i], dd = nn - mm;
+ if (nn < mm) {
+ dd = 0;
+ q = true;
+ }
+ d[i] = dd;
+ }
+ if (q) {
+ uint32_t *qc = vq;
+ qc[0] = 1;
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_sqadd_d)(void *vd, void *vq, void *vn,
+ void *vm, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm;
+ bool q = false;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ int64_t nn = n[i], mm = m[i], dd = nn + mm;
+ if (((dd ^ nn) & ~(nn ^ mm)) & INT64_MIN) {
+ dd = (nn >> 63) ^ ~INT64_MIN;
+ q = true;
+ }
+ d[i] = dd;
+ }
+ if (q) {
+ uint32_t *qc = vq;
+ qc[0] = 1;
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_sqsub_d)(void *vd, void *vq, void *vn,
+ void *vm, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ int64_t *d = vd, *n = vn, *m = vm;
+ bool q = false;
+
+ for (i = 0; i < oprsz / 8; i++) {
+ int64_t nn = n[i], mm = m[i], dd = nn - mm;
+ if (((dd ^ nn) & (nn ^ mm)) & INT64_MIN) {
+ dd = (nn >> 63) ^ ~INT64_MIN;
+ q = true;
+ }
+ d[i] = dd;
+ }
+ if (q) {
+ uint32_t *qc = vq;
+ qc[0] = 1;
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+
+#define DO_SRA(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ TYPE *d = vd, *n = vn; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] += n[i] >> shift; \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_SRA(gvec_ssra_b, int8_t)
+DO_SRA(gvec_ssra_h, int16_t)
+DO_SRA(gvec_ssra_s, int32_t)
+DO_SRA(gvec_ssra_d, int64_t)
+
+DO_SRA(gvec_usra_b, uint8_t)
+DO_SRA(gvec_usra_h, uint16_t)
+DO_SRA(gvec_usra_s, uint32_t)
+DO_SRA(gvec_usra_d, uint64_t)
+
+#undef DO_SRA
+
+#define DO_RSHR(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ TYPE *d = vd, *n = vn; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ TYPE tmp = n[i] >> (shift - 1); \
+ d[i] = (tmp >> 1) + (tmp & 1); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_RSHR(gvec_srshr_b, int8_t)
+DO_RSHR(gvec_srshr_h, int16_t)
+DO_RSHR(gvec_srshr_s, int32_t)
+DO_RSHR(gvec_srshr_d, int64_t)
+
+DO_RSHR(gvec_urshr_b, uint8_t)
+DO_RSHR(gvec_urshr_h, uint16_t)
+DO_RSHR(gvec_urshr_s, uint32_t)
+DO_RSHR(gvec_urshr_d, uint64_t)
+
+#undef DO_RSHR
+
+#define DO_RSRA(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ TYPE *d = vd, *n = vn; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ TYPE tmp = n[i] >> (shift - 1); \
+ d[i] += (tmp >> 1) + (tmp & 1); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_RSRA(gvec_srsra_b, int8_t)
+DO_RSRA(gvec_srsra_h, int16_t)
+DO_RSRA(gvec_srsra_s, int32_t)
+DO_RSRA(gvec_srsra_d, int64_t)
+
+DO_RSRA(gvec_ursra_b, uint8_t)
+DO_RSRA(gvec_ursra_h, uint16_t)
+DO_RSRA(gvec_ursra_s, uint32_t)
+DO_RSRA(gvec_ursra_d, uint64_t)
+
+#undef DO_RSRA
+
+#define DO_SRI(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ TYPE *d = vd, *n = vn; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_SRI(gvec_sri_b, uint8_t)
+DO_SRI(gvec_sri_h, uint16_t)
+DO_SRI(gvec_sri_s, uint32_t)
+DO_SRI(gvec_sri_d, uint64_t)
+
+#undef DO_SRI
+
+#define DO_SLI(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ TYPE *d = vd, *n = vn; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_SLI(gvec_sli_b, uint8_t)
+DO_SLI(gvec_sli_h, uint16_t)
+DO_SLI(gvec_sli_s, uint32_t)
+DO_SLI(gvec_sli_d, uint64_t)
+
+#undef DO_SLI
+
+/*
+ * Convert float16 to float32, raising no exceptions and
+ * preserving exceptional values, including SNaN.
+ * This is effectively an unpack+repack operation.
+ */
+static float32 float16_to_float32_by_bits(uint32_t f16, bool fz16)
+{
+ const int f16_bias = 15;
+ const int f32_bias = 127;
+ uint32_t sign = extract32(f16, 15, 1);
+ uint32_t exp = extract32(f16, 10, 5);
+ uint32_t frac = extract32(f16, 0, 10);
+
+ if (exp == 0x1f) {
+ /* Inf or NaN */
+ exp = 0xff;
+ } else if (exp == 0) {
+ /* Zero or denormal. */
+ if (frac != 0) {
+ if (fz16) {
+ frac = 0;
+ } else {
+ /*
+ * Denormal; these are all normal float32.
+ * Shift the fraction so that the msb is at bit 11,
+ * then remove bit 11 as the implicit bit of the
+ * normalized float32. Note that we still go through
+ * the shift for normal numbers below, to put the
+ * float32 fraction at the right place.
+ */
+ int shift = clz32(frac) - 21;
+ frac = (frac << shift) & 0x3ff;
+ exp = f32_bias - f16_bias - shift + 1;
+ }
+ }
+ } else {
+ /* Normal number; adjust the bias. */
+ exp += f32_bias - f16_bias;
+ }
+ sign <<= 31;
+ exp <<= 23;
+ frac <<= 23 - 10;
+
+ return sign | exp | frac;
+}
+
+static uint64_t load4_f16(uint64_t *ptr, int is_q, int is_2)
+{
+ /*
+ * Branchless load of u32[0], u64[0], u32[1], or u64[1].
+ * Load the 2nd qword iff is_q & is_2.
+ * Shift to the 2nd dword iff !is_q & is_2.
+ * For !is_q & !is_2, the upper bits of the result are garbage.
+ */
+ return ptr[is_q & is_2] >> ((is_2 & ~is_q) << 5);
+}
+
+/*
+ * Note that FMLAL requires oprsz == 8 or oprsz == 16,
+ * as there is not yet SVE versions that might use blocking.
+ */
+
+static void do_fmlal(float32 *d, void *vn, void *vm, float_status *fpst,
+ uint32_t desc, bool fz16)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ int is_q = oprsz == 16;
+ uint64_t n_4, m_4;
+
+ /* Pre-load all of the f16 data, avoiding overlap issues. */
+ n_4 = load4_f16(vn, is_q, is_2);
+ m_4 = load4_f16(vm, is_q, is_2);
+
+ /* Negate all inputs for FMLSL at once. */
+ if (is_s) {
+ n_4 ^= 0x8000800080008000ull;
+ }
+
+ for (i = 0; i < oprsz / 4; i++) {
+ float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
+ float32 m_1 = float16_to_float32_by_bits(m_4 >> (i * 16), fz16);
+ d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fmlal_a32)(void *vd, void *vn, void *vm,
+ void *venv, uint32_t desc)
+{
+ CPUARMState *env = venv;
+ do_fmlal(vd, vn, vm, &env->vfp.standard_fp_status, desc,
+ get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+}
+
+void HELPER(gvec_fmlal_a64)(void *vd, void *vn, void *vm,
+ void *venv, uint32_t desc)
+{
+ CPUARMState *env = venv;
+ do_fmlal(vd, vn, vm, &env->vfp.fp_status, desc,
+ get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+}
+
+void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
+ void *venv, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
+ CPUARMState *env = venv;
+ float_status *status = &env->vfp.fp_status;
+ bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+
+ for (i = 0; i < oprsz; i += sizeof(float32)) {
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + sel)) ^ negn;
+ float16 mm_16 = *(float16 *)(vm + H1_2(i + sel));
+ float32 nn = float16_to_float32_by_bits(nn_16, fz16);
+ float32 mm = float16_to_float32_by_bits(mm_16, fz16);
+ float32 aa = *(float32 *)(va + H1_4(i));
+
+ *(float32 *)(vd + H1_4(i)) = float32_muladd(nn, mm, aa, 0, status);
+ }
+}
+
+static void do_fmlal_idx(float32 *d, void *vn, void *vm, float_status *fpst,
+ uint32_t desc, bool fz16)
+{
+ intptr_t i, oprsz = simd_oprsz(desc);
+ int is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
+ int is_2 = extract32(desc, SIMD_DATA_SHIFT + 1, 1);
+ int index = extract32(desc, SIMD_DATA_SHIFT + 2, 3);
+ int is_q = oprsz == 16;
+ uint64_t n_4;
+ float32 m_1;
+
+ /* Pre-load all of the f16 data, avoiding overlap issues. */
+ n_4 = load4_f16(vn, is_q, is_2);
+
+ /* Negate all inputs for FMLSL at once. */
+ if (is_s) {
+ n_4 ^= 0x8000800080008000ull;
+ }
+
+ m_1 = float16_to_float32_by_bits(((float16 *)vm)[H2(index)], fz16);
+
+ for (i = 0; i < oprsz / 4; i++) {
+ float32 n_1 = float16_to_float32_by_bits(n_4 >> (i * 16), fz16);
+ d[H4(i)] = float32_muladd(n_1, m_1, d[H4(i)], 0, fpst);
+ }
+ clear_tail(d, oprsz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_fmlal_idx_a32)(void *vd, void *vn, void *vm,
+ void *venv, uint32_t desc)
+{
+ CPUARMState *env = venv;
+ do_fmlal_idx(vd, vn, vm, &env->vfp.standard_fp_status, desc,
+ get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+}
+
+void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm,
+ void *venv, uint32_t desc)
+{
+ CPUARMState *env = venv;
+ do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc,
+ get_flush_inputs_to_zero(&env->vfp.fp_status_f16));
+}
+
+void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,
+ void *venv, uint32_t desc)
+{
+ intptr_t i, j, oprsz = simd_oprsz(desc);
+ uint16_t negn = extract32(desc, SIMD_DATA_SHIFT, 1) << 15;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16);
+ CPUARMState *env = venv;
+ float_status *status = &env->vfp.fp_status;
+ bool fz16 = get_flush_inputs_to_zero(&env->vfp.fp_status_f16);
+
+ for (i = 0; i < oprsz; i += 16) {
+ float16 mm_16 = *(float16 *)(vm + i + idx);
+ float32 mm = float16_to_float32_by_bits(mm_16, fz16);
+
+ for (j = 0; j < 16; j += sizeof(float32)) {
+ float16 nn_16 = *(float16 *)(vn + H1_2(i + j + sel)) ^ negn;
+ float32 nn = float16_to_float32_by_bits(nn_16, fz16);
+ float32 aa = *(float32 *)(va + H1_4(i + j));
+
+ *(float32 *)(vd + H1_4(i + j)) =
+ float32_muladd(nn, mm, aa, 0, status);
+ }
+ }
+}
+
+void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ int8_t mm = m[i];
+ int8_t nn = n[i];
+ int8_t res = 0;
+ if (mm >= 0) {
+ if (mm < 8) {
+ res = nn << mm;
+ }
+ } else {
+ res = nn >> (mm > -8 ? -mm : 7);
+ }
+ d[i] = res;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ int8_t mm = m[i]; /* only 8 bits of shift are significant */
+ int16_t nn = n[i];
+ int16_t res = 0;
+ if (mm >= 0) {
+ if (mm < 16) {
+ res = nn << mm;
+ }
+ } else {
+ res = nn >> (mm > -16 ? -mm : 15);
+ }
+ d[i] = res;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ int8_t mm = m[i];
+ uint8_t nn = n[i];
+ uint8_t res = 0;
+ if (mm >= 0) {
+ if (mm < 8) {
+ res = nn << mm;
+ }
+ } else {
+ if (mm > -8) {
+ res = nn >> -mm;
+ }
+ }
+ d[i] = res;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ int8_t mm = m[i]; /* only 8 bits of shift are significant */
+ uint16_t nn = n[i];
+ uint16_t res = 0;
+ if (mm >= 0) {
+ if (mm < 16) {
+ res = nn << mm;
+ }
+ } else {
+ if (mm > -16) {
+ res = nn >> -mm;
+ }
+ }
+ d[i] = res;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+/*
+ * 8x8->8 polynomial multiply.
+ *
+ * Polynomial multiplication is like integer multiplication except the
+ * partial products are XORed, not added.
+ *
+ * TODO: expose this as a generic vector operation, as it is a common
+ * crypto building block.
+ */
+void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ uint64_t nn = n[i];
+ uint64_t mm = m[i];
+ uint64_t rr = 0;
+
+ for (j = 0; j < 8; ++j) {
+ uint64_t mask = (nn & 0x0101010101010101ull) * 0xff;
+ rr ^= mm & mask;
+ mm = (mm << 1) & 0xfefefefefefefefeull;
+ nn >>= 1;
+ }
+ d[i] = rr;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+/*
+ * 64x64->128 polynomial multiply.
+ * Because of the lanes are not accessed in strict columns,
+ * this probably cannot be turned into a generic helper.
+ */
+void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ intptr_t hi = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; i += 2) {
+ uint64_t nn = n[i + hi];
+ uint64_t mm = m[i + hi];
+ uint64_t rhi = 0;
+ uint64_t rlo = 0;
+
+ /* Bit 0 can only influence the low 64-bit result. */
+ if (nn & 1) {
+ rlo = mm;
+ }
+
+ for (j = 1; j < 64; ++j) {
+ uint64_t mask = -((nn >> j) & 1);
+ rlo ^= (mm << j) & mask;
+ rhi ^= (mm >> (64 - j)) & mask;
+ }
+ d[i] = rlo;
+ d[i + 1] = rhi;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+/*
+ * 8x8->16 polynomial multiply.
+ *
+ * The byte inputs are expanded to (or extracted from) half-words.
+ * Note that neon and sve2 get the inputs from different positions.
+ * This allows 4 bytes to be processed in parallel with uint64_t.
+ */
+
+static uint64_t expand_byte_to_half(uint64_t x)
+{
+ return (x & 0x000000ff)
+ | ((x & 0x0000ff00) << 8)
+ | ((x & 0x00ff0000) << 16)
+ | ((x & 0xff000000) << 24);
+}
+
+uint64_t pmull_w(uint64_t op1, uint64_t op2)
+{
+ uint64_t result = 0;
+ int i;
+ for (i = 0; i < 16; ++i) {
+ uint64_t mask = (op1 & 0x0000000100000001ull) * 0xffffffff;
+ result ^= op2 & mask;
+ op1 >>= 1;
+ op2 <<= 1;
+ }
+ return result;
+}
+
+uint64_t pmull_h(uint64_t op1, uint64_t op2)
+{
+ uint64_t result = 0;
+ int i;
+ for (i = 0; i < 8; ++i) {
+ uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff;
+ result ^= op2 & mask;
+ op1 >>= 1;
+ op2 <<= 1;
+ }
+ return result;
+}
+
+void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ int hi = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t nn = n[hi], mm = m[hi];
+
+ d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
+ nn >>= 32;
+ mm >>= 32;
+ d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm));
+
+ clear_tail(d, 16, simd_maxsz(desc));
+}
+
+#ifdef TARGET_AARCH64
+void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ int shift = simd_data(desc) * 8;
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull;
+ uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull;
+
+ d[i] = pmull_h(nn, mm);
+ }
+}
+
+static uint64_t pmull_d(uint64_t op1, uint64_t op2)
+{
+ uint64_t result = 0;
+ int i;
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t mask = -((op1 >> i) & 1);
+ result ^= (op2 << i) & mask;
+ }
+ return result;
+}
+
+void HELPER(sve2_pmull_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t sel = H4(simd_data(desc));
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint32_t *n = vn, *m = vm;
+ uint64_t *d = vd;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ d[i] = pmull_d(n[2 * i + sel], m[2 * i + sel]);
+ }
+}
+#endif
+
+#define DO_CMP0(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
+ TYPE nn = *(TYPE *)(vn + i); \
+ *(TYPE *)(vd + i) = -(nn OP 0); \
+ } \
+ clear_tail(vd, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_CMP0(gvec_ceq0_b, int8_t, ==)
+DO_CMP0(gvec_clt0_b, int8_t, <)
+DO_CMP0(gvec_cle0_b, int8_t, <=)
+DO_CMP0(gvec_cgt0_b, int8_t, >)
+DO_CMP0(gvec_cge0_b, int8_t, >=)
+
+DO_CMP0(gvec_ceq0_h, int16_t, ==)
+DO_CMP0(gvec_clt0_h, int16_t, <)
+DO_CMP0(gvec_cle0_h, int16_t, <=)
+DO_CMP0(gvec_cgt0_h, int16_t, >)
+DO_CMP0(gvec_cge0_h, int16_t, >=)
+
+#undef DO_CMP0
+
+#define DO_ABD(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ \
+ for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
+ d[i] = n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_ABD(gvec_sabd_b, int8_t)
+DO_ABD(gvec_sabd_h, int16_t)
+DO_ABD(gvec_sabd_s, int32_t)
+DO_ABD(gvec_sabd_d, int64_t)
+
+DO_ABD(gvec_uabd_b, uint8_t)
+DO_ABD(gvec_uabd_h, uint16_t)
+DO_ABD(gvec_uabd_s, uint32_t)
+DO_ABD(gvec_uabd_d, uint64_t)
+
+#undef DO_ABD
+
+#define DO_ABA(NAME, TYPE) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ \
+ for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
+ d[i] += n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_ABA(gvec_saba_b, int8_t)
+DO_ABA(gvec_saba_h, int16_t)
+DO_ABA(gvec_saba_s, int32_t)
+DO_ABA(gvec_saba_d, int64_t)
+
+DO_ABA(gvec_uaba_b, uint8_t)
+DO_ABA(gvec_uaba_h, uint16_t)
+DO_ABA(gvec_uaba_s, uint32_t)
+DO_ABA(gvec_uaba_d, uint64_t)
+
+#undef DO_ABA
+
+#define DO_NEON_PAIRWISE(NAME, OP) \
+ void HELPER(NAME##s)(void *vd, void *vn, void *vm, \
+ void *stat, uint32_t oprsz) \
+ { \
+ float_status *fpst = stat; \
+ float32 *d = vd; \
+ float32 *n = vn; \
+ float32 *m = vm; \
+ float32 r0, r1; \
+ \
+ /* Read all inputs before writing outputs in case vm == vd */ \
+ r0 = float32_##OP(n[H4(0)], n[H4(1)], fpst); \
+ r1 = float32_##OP(m[H4(0)], m[H4(1)], fpst); \
+ \
+ d[H4(0)] = r0; \
+ d[H4(1)] = r1; \
+ } \
+ \
+ void HELPER(NAME##h)(void *vd, void *vn, void *vm, \
+ void *stat, uint32_t oprsz) \
+ { \
+ float_status *fpst = stat; \
+ float16 *d = vd; \
+ float16 *n = vn; \
+ float16 *m = vm; \
+ float16 r0, r1, r2, r3; \
+ \
+ /* Read all inputs before writing outputs in case vm == vd */ \
+ r0 = float16_##OP(n[H2(0)], n[H2(1)], fpst); \
+ r1 = float16_##OP(n[H2(2)], n[H2(3)], fpst); \
+ r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
+ r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
+ \
+ d[H2(0)] = r0; \
+ d[H2(1)] = r1; \
+ d[H2(2)] = r2; \
+ d[H2(3)] = r3; \
+ }
+
+DO_NEON_PAIRWISE(neon_padd, add)
+DO_NEON_PAIRWISE(neon_pmax, max)
+DO_NEON_PAIRWISE(neon_pmin, min)
+
+#undef DO_NEON_PAIRWISE
+
+#define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
+ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+ { \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ int shift = simd_data(desc); \
+ TYPE *d = vd, *n = vn; \
+ float_status *fpst = stat; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = FUNC(n[i], shift, fpst); \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+ }
+
+DO_VCVT_FIXED(gvec_vcvt_sf, helper_vfp_sltos, uint32_t)
+DO_VCVT_FIXED(gvec_vcvt_uf, helper_vfp_ultos, uint32_t)
+DO_VCVT_FIXED(gvec_vcvt_fs, helper_vfp_tosls_round_to_zero, uint32_t)
+DO_VCVT_FIXED(gvec_vcvt_fu, helper_vfp_touls_round_to_zero, uint32_t)
+DO_VCVT_FIXED(gvec_vcvt_sh, helper_vfp_shtoh, uint16_t)
+DO_VCVT_FIXED(gvec_vcvt_uh, helper_vfp_uhtoh, uint16_t)
+DO_VCVT_FIXED(gvec_vcvt_hs, helper_vfp_toshh_round_to_zero, uint16_t)
+DO_VCVT_FIXED(gvec_vcvt_hu, helper_vfp_touhh_round_to_zero, uint16_t)
+
+#undef DO_VCVT_FIXED
+
+#define DO_VCVT_RMODE(NAME, FUNC, TYPE) \
+ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+ { \
+ float_status *fpst = stat; \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ uint32_t rmode = simd_data(desc); \
+ uint32_t prev_rmode = get_float_rounding_mode(fpst); \
+ TYPE *d = vd, *n = vn; \
+ set_float_rounding_mode(rmode, fpst); \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = FUNC(n[i], 0, fpst); \
+ } \
+ set_float_rounding_mode(prev_rmode, fpst); \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+ }
+
+DO_VCVT_RMODE(gvec_vcvt_rm_ss, helper_vfp_tosls, uint32_t)
+DO_VCVT_RMODE(gvec_vcvt_rm_us, helper_vfp_touls, uint32_t)
+DO_VCVT_RMODE(gvec_vcvt_rm_sh, helper_vfp_toshh, uint16_t)
+DO_VCVT_RMODE(gvec_vcvt_rm_uh, helper_vfp_touhh, uint16_t)
+
+#undef DO_VCVT_RMODE
+
+#define DO_VRINT_RMODE(NAME, FUNC, TYPE) \
+ void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
+ { \
+ float_status *fpst = stat; \
+ intptr_t i, oprsz = simd_oprsz(desc); \
+ uint32_t rmode = simd_data(desc); \
+ uint32_t prev_rmode = get_float_rounding_mode(fpst); \
+ TYPE *d = vd, *n = vn; \
+ set_float_rounding_mode(rmode, fpst); \
+ for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
+ d[i] = FUNC(n[i], fpst); \
+ } \
+ set_float_rounding_mode(prev_rmode, fpst); \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+ }
+
+DO_VRINT_RMODE(gvec_vrint_rm_h, helper_rinth, uint16_t)
+DO_VRINT_RMODE(gvec_vrint_rm_s, helper_rints, uint32_t)
+
+#undef DO_VRINT_RMODE
+
+#ifdef TARGET_AARCH64
+void HELPER(simd_tblx)(void *vd, void *vm, void *venv, uint32_t desc)
+{
+ const uint8_t *indices = vm;
+ CPUARMState *env = venv;
+ size_t oprsz = simd_oprsz(desc);
+ uint32_t rn = extract32(desc, SIMD_DATA_SHIFT, 5);
+ bool is_tbx = extract32(desc, SIMD_DATA_SHIFT + 5, 1);
+ uint32_t table_len = desc >> (SIMD_DATA_SHIFT + 6);
+ union {
+ uint8_t b[16];
+ uint64_t d[2];
+ } result;
+
+ /*
+ * We must construct the final result in a temp, lest the output
+ * overlaps the input table. For TBL, begin with zero; for TBX,
+ * begin with the original register contents. Note that we always
+ * copy 16 bytes here to avoid an extra branch; clearing the high
+ * bits of the register for oprsz == 8 is handled below.
+ */
+ if (is_tbx) {
+ memcpy(&result, vd, 16);
+ } else {
+ memset(&result, 0, 16);
+ }
+
+ for (size_t i = 0; i < oprsz; ++i) {
+ uint32_t index = indices[H1(i)];
+
+ if (index < table_len) {
+ /*
+ * Convert index (a byte offset into the virtual table
+ * which is a series of 128-bit vectors concatenated)
+ * into the correct register element, bearing in mind
+ * that the table can wrap around from V31 to V0.
+ */
+ const uint8_t *table = (const uint8_t *)
+ aa64_vfp_qreg(env, (rn + (index >> 4)) % 32);
+ result.b[H1(i)] = table[H1(index % 16)];
+ }
+ }
+
+ memcpy(vd, &result, 16);
+ clear_tail(vd, oprsz, simd_maxsz(desc));
+}
+#endif
+
+/*
+ * NxN -> N highpart multiply
+ *
+ * TODO: expose this as a generic vector operation.
+ */
+
+void HELPER(gvec_smulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ((int32_t)n[i] * m[i]) >> 8;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_smulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = ((int32_t)n[i] * m[i]) >> 16;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_smulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ int32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = ((int64_t)n[i] * m[i]) >> 32;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_smulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t discard;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ muls64(&discard, &d[i], n[i], m[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint8_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ((uint32_t)n[i] * m[i]) >> 8;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint16_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 2; ++i) {
+ d[i] = ((uint32_t)n[i] * m[i]) >> 16;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_s)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint32_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = ((uint64_t)n[i] * m[i]) >> 32;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_umulh_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+ uint64_t discard;
+
+ for (i = 0; i < opr_sz / 8; ++i) {
+ mulu64(&discard, &d[i], n[i], m[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_xar_d)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ int shr = simd_data(desc);
+ uint64_t *d = vd, *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz; ++i) {
+ d[i] = ror64(n[i] ^ m[i], shr);
+ }
+ clear_tail(d, opr_sz * 8, simd_maxsz(desc));
+}
+
+/*
+ * Integer matrix-multiply accumulate
+ */
+
+static uint32_t do_smmla_b(uint32_t sum, void *vn, void *vm)
+{
+ int8_t *n = vn, *m = vm;
+
+ for (intptr_t k = 0; k < 8; ++k) {
+ sum += n[H1(k)] * m[H1(k)];
+ }
+ return sum;
+}
+
+static uint32_t do_ummla_b(uint32_t sum, void *vn, void *vm)
+{
+ uint8_t *n = vn, *m = vm;
+
+ for (intptr_t k = 0; k < 8; ++k) {
+ sum += n[H1(k)] * m[H1(k)];
+ }
+ return sum;
+}
+
+static uint32_t do_usmmla_b(uint32_t sum, void *vn, void *vm)
+{
+ uint8_t *n = vn;
+ int8_t *m = vm;
+
+ for (intptr_t k = 0; k < 8; ++k) {
+ sum += n[H1(k)] * m[H1(k)];
+ }
+ return sum;
+}
+
+static void do_mmla_b(void *vd, void *vn, void *vm, void *va, uint32_t desc,
+ uint32_t (*inner_loop)(uint32_t, void *, void *))
+{
+ intptr_t seg, opr_sz = simd_oprsz(desc);
+
+ for (seg = 0; seg < opr_sz; seg += 16) {
+ uint32_t *d = vd + seg;
+ uint32_t *a = va + seg;
+ uint32_t sum0, sum1, sum2, sum3;
+
+ /*
+ * Process the entire segment at once, writing back the
+ * results only after we've consumed all of the inputs.
+ *
+ * Key to indices by column:
+ * i j i j
+ */
+ sum0 = a[H4(0 + 0)];
+ sum0 = inner_loop(sum0, vn + seg + 0, vm + seg + 0);
+ sum1 = a[H4(0 + 1)];
+ sum1 = inner_loop(sum1, vn + seg + 0, vm + seg + 8);
+ sum2 = a[H4(2 + 0)];
+ sum2 = inner_loop(sum2, vn + seg + 8, vm + seg + 0);
+ sum3 = a[H4(2 + 1)];
+ sum3 = inner_loop(sum3, vn + seg + 8, vm + seg + 8);
+
+ d[H4(0)] = sum0;
+ d[H4(1)] = sum1;
+ d[H4(2)] = sum2;
+ d[H4(3)] = sum3;
+ }
+ clear_tail(vd, opr_sz, simd_maxsz(desc));
+}
+
+#define DO_MMLA_B(NAME, INNER) \
+ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+ { do_mmla_b(vd, vn, vm, va, desc, INNER); }
+
+DO_MMLA_B(gvec_smmla_b, do_smmla_b)
+DO_MMLA_B(gvec_ummla_b, do_ummla_b)
+DO_MMLA_B(gvec_usmmla_b, do_usmmla_b)
+
+/*
+ * BFloat16 Dot Product
+ */
+
+static float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2)
+{
+ /* FPCR is ignored for BFDOT and BFMMLA. */
+ float_status bf_status = {
+ .tininess_before_rounding = float_tininess_before_rounding,
+ .float_rounding_mode = float_round_to_odd_inf,
+ .flush_to_zero = true,
+ .flush_inputs_to_zero = true,
+ .default_nan_mode = true,
+ };
+ float32 t1, t2;
+
+ /*
+ * Extract each BFloat16 from the element pair, and shift
+ * them such that they become float32.
+ */
+ t1 = float32_mul(e1 << 16, e2 << 16, &bf_status);
+ t2 = float32_mul(e1 & 0xffff0000u, e2 & 0xffff0000u, &bf_status);
+ t1 = float32_add(t1, t2, &bf_status);
+ t1 = float32_add(sum, t1, &bf_status);
+
+ return t1;
+}
+
+void HELPER(gvec_bfdot)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ float32 *d = vd, *a = va;
+ uint32_t *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ d[i] = bfdotadd(a[i], n[i], m[i]);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
+ void *va, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ intptr_t index = simd_data(desc);
+ intptr_t elements = opr_sz / 4;
+ intptr_t eltspersegment = MIN(16 / 4, elements);
+ float32 *d = vd, *a = va;
+ uint32_t *n = vn, *m = vm;
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(index)];
+
+ for (j = i; j < i + eltspersegment; j++) {
+ d[j] = bfdotadd(a[j], n[j], m_idx);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
+{
+ intptr_t s, opr_sz = simd_oprsz(desc);
+ float32 *d = vd, *a = va;
+ uint32_t *n = vn, *m = vm;
+
+ for (s = 0; s < opr_sz / 4; s += 4) {
+ float32 sum00, sum01, sum10, sum11;
+
+ /*
+ * Process the entire segment at once, writing back the
+ * results only after we've consumed all of the inputs.
+ *
+ * Key to indicies by column:
+ * i j i k j k
+ */
+ sum00 = a[s + H4(0 + 0)];
+ sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]);
+ sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]);
+
+ sum01 = a[s + H4(0 + 1)];
+ sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]);
+ sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]);
+
+ sum10 = a[s + H4(2 + 0)];
+ sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]);
+ sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]);
+
+ sum11 = a[s + H4(2 + 1)];
+ sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]);
+ sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]);
+
+ d[s + H4(0 + 0)] = sum00;
+ d[s + H4(0 + 1)] = sum01;
+ d[s + H4(2 + 0)] = sum10;
+ d[s + H4(2 + 1)] = sum11;
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
+ void *stat, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc);
+ intptr_t sel = simd_data(desc);
+ float32 *d = vd, *a = va;
+ bfloat16 *n = vn, *m = vm;
+
+ for (i = 0; i < opr_sz / 4; ++i) {
+ float32 nn = n[H2(i * 2 + sel)] << 16;
+ float32 mm = m[H2(i * 2 + sel)] << 16;
+ d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], 0, stat);
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
+ void *va, void *stat, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1);
+ intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 1, 3);
+ intptr_t elements = opr_sz / 4;
+ intptr_t eltspersegment = MIN(16 / 4, elements);
+ float32 *d = vd, *a = va;
+ bfloat16 *n = vn, *m = vm;
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ float32 m_idx = m[H2(2 * i + index)] << 16;
+
+ for (j = i; j < i + eltspersegment; j++) {
+ float32 n_j = n[H2(2 * j + sel)] << 16;
+ d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], 0, stat);
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
diff --git a/target/arm/vec_internal.h b/target/arm/vec_internal.h
new file mode 100644
index 000000000..2a3355829
--- /dev/null
+++ b/target/arm/vec_internal.h
@@ -0,0 +1,220 @@
+/*
+ * ARM AdvSIMD / SVE Vector Helpers
+ *
+ * Copyright (c) 2020 Linaro
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARM_VEC_INTERNALS_H
+#define TARGET_ARM_VEC_INTERNALS_H
+
+/*
+ * Note that vector data is stored in host-endian 64-bit chunks,
+ * so addressing units smaller than that needs a host-endian fixup.
+ *
+ * The H<N> macros are used when indexing an array of elements of size N.
+ *
+ * The H1_<N> macros are used when performing byte arithmetic and then
+ * casting the final pointer to a type of size N.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+#define H1(x) ((x) ^ 7)
+#define H1_2(x) ((x) ^ 6)
+#define H1_4(x) ((x) ^ 4)
+#define H2(x) ((x) ^ 3)
+#define H4(x) ((x) ^ 1)
+#else
+#define H1(x) (x)
+#define H1_2(x) (x)
+#define H1_4(x) (x)
+#define H2(x) (x)
+#define H4(x) (x)
+#endif
+/*
+ * Access to 64-bit elements isn't host-endian dependent; we provide H8
+ * and H1_8 so that when a function is being generated from a macro we
+ * can pass these rather than an empty macro argument, for clarity.
+ */
+#define H8(x) (x)
+#define H1_8(x) (x)
+
+/* Data for expanding active predicate bits to bytes, for byte elements. */
+extern const uint64_t expand_pred_b_data[256];
+
+static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz)
+{
+ uint64_t *d = vd + opr_sz;
+ uintptr_t i;
+
+ for (i = opr_sz; i < max_sz; i += 8) {
+ *d++ = 0;
+ }
+}
+
+static inline int32_t do_sqrshl_bhs(int32_t src, int32_t shift, int bits,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -bits) {
+ /* Rounding the sign bit always produces 0. */
+ if (round) {
+ return 0;
+ }
+ return src >> 31;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < bits) {
+ int32_t val = src << shift;
+ if (bits == 32) {
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else {
+ int32_t extval = sextract32(val, 0, bits);
+ if (!sat || val == extval) {
+ return extval;
+ }
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return (1u << (bits - 1)) - (src >= 0);
+}
+
+static inline uint32_t do_uqrshl_bhs(uint32_t src, int32_t shift, int bits,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -(bits + round)) {
+ return 0;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < bits) {
+ uint32_t val = src << shift;
+ if (bits == 32) {
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else {
+ uint32_t extval = extract32(val, 0, bits);
+ if (!sat || val == extval) {
+ return extval;
+ }
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return MAKE_64BIT_MASK(0, bits);
+}
+
+static inline int32_t do_suqrshl_bhs(int32_t src, int32_t shift, int bits,
+ bool round, uint32_t *sat)
+{
+ if (sat && src < 0) {
+ *sat = 1;
+ return 0;
+ }
+ return do_uqrshl_bhs(src, shift, bits, round, sat);
+}
+
+static inline int64_t do_sqrshl_d(int64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -64) {
+ /* Rounding the sign bit always produces 0. */
+ if (round) {
+ return 0;
+ }
+ return src >> 63;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < 64) {
+ int64_t val = src << shift;
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return src < 0 ? INT64_MIN : INT64_MAX;
+}
+
+static inline uint64_t do_uqrshl_d(uint64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ if (shift <= -(64 + round)) {
+ return 0;
+ } else if (shift < 0) {
+ if (round) {
+ src >>= -shift - 1;
+ return (src >> 1) + (src & 1);
+ }
+ return src >> -shift;
+ } else if (shift < 64) {
+ uint64_t val = src << shift;
+ if (!sat || val >> shift == src) {
+ return val;
+ }
+ } else if (!sat || src == 0) {
+ return 0;
+ }
+
+ *sat = 1;
+ return UINT64_MAX;
+}
+
+static inline int64_t do_suqrshl_d(int64_t src, int64_t shift,
+ bool round, uint32_t *sat)
+{
+ if (sat && src < 0) {
+ *sat = 1;
+ return 0;
+ }
+ return do_uqrshl_d(src, shift, round, sat);
+}
+
+int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool);
+int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
+int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
+int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
+
+/*
+ * 8 x 8 -> 16 vector polynomial multiply where the inputs are
+ * in the low 8 bits of each 16-bit element
+*/
+uint64_t pmull_h(uint64_t op1, uint64_t op2);
+/*
+ * 16 x 16 -> 32 vector polynomial multiply where the inputs are
+ * in the low 16 bits of each 32-bit element
+ */
+uint64_t pmull_w(uint64_t op1, uint64_t op2);
+
+#endif /* TARGET_ARM_VEC_INTERNALS_H */
diff --git a/target/arm/vfp-uncond.decode b/target/arm/vfp-uncond.decode
new file mode 100644
index 000000000..5c50447a6
--- /dev/null
+++ b/target/arm/vfp-uncond.decode
@@ -0,0 +1,82 @@
+# AArch32 VFP instruction descriptions (unconditional insns)
+#
+# Copyright (c) 2019 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+# Encodings for the unconditional VFP instructions are here:
+# generally anything matching A32
+# 1111 1110 .... .... .... 101. ...0 ....
+# and T32
+# 1111 110. .... .... .... 101. .... ....
+# 1111 1110 .... .... .... 101. .... ....
+# (but those patterns might also cover some Neon instructions,
+# which do not live in this file.)
+
+# VFP registers have an odd encoding with a four-bit field
+# and a one-bit field which are assembled in different orders
+# depending on whether the register is double or single precision.
+# Each individual instruction function must do the checks for
+# "double register selected but CPU does not have double support"
+# and "double register number has bit 4 set but CPU does not
+# support D16-D31" (which should UNDEF).
+%vm_dp 5:1 0:4
+%vm_sp 0:4 5:1
+%vn_dp 7:1 16:4
+%vn_sp 16:4 7:1
+%vd_dp 22:1 12:4
+%vd_sp 12:4 22:1
+
+@vfp_dnm_s ................................ vm=%vm_sp vn=%vn_sp vd=%vd_sp
+@vfp_dnm_d ................................ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+VSEL 1111 1110 0. cc:2 .... .... 1001 .0.0 .... \
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp sz=1
+VSEL 1111 1110 0. cc:2 .... .... 1010 .0.0 .... \
+ vm=%vm_sp vn=%vn_sp vd=%vd_sp sz=2
+VSEL 1111 1110 0. cc:2 .... .... 1011 .0.0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp sz=3
+
+VMAXNM_hp 1111 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
+VMINNM_hp 1111 1110 1.00 .... .... 1001 .1.0 .... @vfp_dnm_s
+
+VMAXNM_sp 1111 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s
+VMINNM_sp 1111 1110 1.00 .... .... 1010 .1.0 .... @vfp_dnm_s
+
+VMAXNM_dp 1111 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
+VMINNM_dp 1111 1110 1.00 .... .... 1011 .1.0 .... @vfp_dnm_d
+
+VRINT 1111 1110 1.11 10 rm:2 .... 1001 01.0 .... \
+ vm=%vm_sp vd=%vd_sp sz=1
+VRINT 1111 1110 1.11 10 rm:2 .... 1010 01.0 .... \
+ vm=%vm_sp vd=%vd_sp sz=2
+VRINT 1111 1110 1.11 10 rm:2 .... 1011 01.0 .... \
+ vm=%vm_dp vd=%vd_dp sz=3
+
+# VCVT float to int with specified rounding mode; Vd is always single-precision
+VCVT 1111 1110 1.11 11 rm:2 .... 1001 op:1 1.0 .... \
+ vm=%vm_sp vd=%vd_sp sz=1
+VCVT 1111 1110 1.11 11 rm:2 .... 1010 op:1 1.0 .... \
+ vm=%vm_sp vd=%vd_sp sz=2
+VCVT 1111 1110 1.11 11 rm:2 .... 1011 op:1 1.0 .... \
+ vm=%vm_dp vd=%vd_sp sz=3
+
+VMOVX 1111 1110 1.11 0000 .... 1010 01 . 0 .... \
+ vd=%vd_sp vm=%vm_sp
+
+VINS 1111 1110 1.11 0000 .... 1010 11 . 0 .... \
+ vd=%vd_sp vm=%vm_sp
diff --git a/target/arm/vfp.decode b/target/arm/vfp.decode
new file mode 100644
index 000000000..5405e8019
--- /dev/null
+++ b/target/arm/vfp.decode
@@ -0,0 +1,247 @@
+# AArch32 VFP instruction descriptions (conditional insns)
+#
+# Copyright (c) 2019 Linaro, Ltd
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+#
+# This file is processed by scripts/decodetree.py
+#
+# Encodings for the conditional VFP instructions are here:
+# generally anything matching A32
+# cccc 11.. .... .... .... 101. .... ....
+# and T32
+# 1110 110. .... .... .... 101. .... ....
+# 1110 1110 .... .... .... 101. .... ....
+# (but those patterns might also cover some Neon instructions,
+# which do not live in this file.)
+
+# VFP registers have an odd encoding with a four-bit field
+# and a one-bit field which are assembled in different orders
+# depending on whether the register is double or single precision.
+# Each individual instruction function must do the checks for
+# "double register selected but CPU does not have double support"
+# and "double register number has bit 4 set but CPU does not
+# support D16-D31" (which should UNDEF).
+%vm_dp 5:1 0:4
+%vm_sp 0:4 5:1
+%vn_dp 7:1 16:4
+%vn_sp 16:4 7:1
+%vd_dp 22:1 12:4
+%vd_sp 12:4 22:1
+
+%vmov_idx_b 21:1 5:2
+%vmov_idx_h 21:1 6:1
+
+%vmov_imm 16:4 0:4
+
+@vfp_dnm_s ................................ vm=%vm_sp vn=%vn_sp vd=%vd_sp
+@vfp_dnm_d ................................ vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+@vfp_dm_ss ................................ vm=%vm_sp vd=%vd_sp
+@vfp_dm_dd ................................ vm=%vm_dp vd=%vd_dp
+@vfp_dm_ds ................................ vm=%vm_sp vd=%vd_dp
+@vfp_dm_sd ................................ vm=%vm_dp vd=%vd_sp
+
+# VMOV scalar to general-purpose register; note that this does
+# include some Neon cases.
+VMOV_to_gp ---- 1110 u:1 1. 1 .... rt:4 1011 ... 1 0000 \
+ vn=%vn_dp size=0 index=%vmov_idx_b
+VMOV_to_gp ---- 1110 u:1 0. 1 .... rt:4 1011 ..1 1 0000 \
+ vn=%vn_dp size=1 index=%vmov_idx_h
+VMOV_to_gp ---- 1110 0 0 index:1 1 .... rt:4 1011 .00 1 0000 \
+ vn=%vn_dp size=2 u=0
+
+VMOV_from_gp ---- 1110 0 1. 0 .... rt:4 1011 ... 1 0000 \
+ vn=%vn_dp size=0 index=%vmov_idx_b
+VMOV_from_gp ---- 1110 0 0. 0 .... rt:4 1011 ..1 1 0000 \
+ vn=%vn_dp size=1 index=%vmov_idx_h
+VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
+ vn=%vn_dp size=2
+
+VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
+ vn=%vn_dp
+
+VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
+VMOV_half ---- 1110 000 l:1 .... rt:4 1001 . 001 0000 vn=%vn_sp
+VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 vn=%vn_sp
+
+VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... vm=%vm_sp
+VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... vm=%vm_dp
+
+VLDR_VSTR_hp ---- 1101 u:1 .0 l:1 rn:4 .... 1001 imm:8 vd=%vd_sp
+VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 vd=%vd_sp
+VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 vd=%vd_dp
+
+# We split the load/store multiple up into two patterns to avoid
+# overlap with other insns in the "Advanced SIMD load/store and 64-bit move"
+# grouping:
+# P=0 U=0 W=0 is 64-bit VMOV
+# P=1 W=0 is VLDR/VSTR
+# P=U W=1 is UNDEF
+# leaving P=0 U=1 W=x and P=1 U=0 W=1 for load/store multiple.
+# These include FSTM/FLDM.
+VLDM_VSTM_sp ---- 1100 1 . w:1 l:1 rn:4 .... 1010 imm:8 \
+ vd=%vd_sp p=0 u=1
+VLDM_VSTM_dp ---- 1100 1 . w:1 l:1 rn:4 .... 1011 imm:8 \
+ vd=%vd_dp p=0 u=1
+
+VLDM_VSTM_sp ---- 1101 0.1 l:1 rn:4 .... 1010 imm:8 \
+ vd=%vd_sp p=1 u=0 w=1
+VLDM_VSTM_dp ---- 1101 0.1 l:1 rn:4 .... 1011 imm:8 \
+ vd=%vd_dp p=1 u=0 w=1
+
+# 3-register VFP data-processing; bits [23,21:20,6] identify the operation.
+VMLA_hp ---- 1110 0.00 .... .... 1001 .0.0 .... @vfp_dnm_s
+VMLA_sp ---- 1110 0.00 .... .... 1010 .0.0 .... @vfp_dnm_s
+VMLA_dp ---- 1110 0.00 .... .... 1011 .0.0 .... @vfp_dnm_d
+
+VMLS_hp ---- 1110 0.00 .... .... 1001 .1.0 .... @vfp_dnm_s
+VMLS_sp ---- 1110 0.00 .... .... 1010 .1.0 .... @vfp_dnm_s
+VMLS_dp ---- 1110 0.00 .... .... 1011 .1.0 .... @vfp_dnm_d
+
+VNMLS_hp ---- 1110 0.01 .... .... 1001 .0.0 .... @vfp_dnm_s
+VNMLS_sp ---- 1110 0.01 .... .... 1010 .0.0 .... @vfp_dnm_s
+VNMLS_dp ---- 1110 0.01 .... .... 1011 .0.0 .... @vfp_dnm_d
+
+VNMLA_hp ---- 1110 0.01 .... .... 1001 .1.0 .... @vfp_dnm_s
+VNMLA_sp ---- 1110 0.01 .... .... 1010 .1.0 .... @vfp_dnm_s
+VNMLA_dp ---- 1110 0.01 .... .... 1011 .1.0 .... @vfp_dnm_d
+
+VMUL_hp ---- 1110 0.10 .... .... 1001 .0.0 .... @vfp_dnm_s
+VMUL_sp ---- 1110 0.10 .... .... 1010 .0.0 .... @vfp_dnm_s
+VMUL_dp ---- 1110 0.10 .... .... 1011 .0.0 .... @vfp_dnm_d
+
+VNMUL_hp ---- 1110 0.10 .... .... 1001 .1.0 .... @vfp_dnm_s
+VNMUL_sp ---- 1110 0.10 .... .... 1010 .1.0 .... @vfp_dnm_s
+VNMUL_dp ---- 1110 0.10 .... .... 1011 .1.0 .... @vfp_dnm_d
+
+VADD_hp ---- 1110 0.11 .... .... 1001 .0.0 .... @vfp_dnm_s
+VADD_sp ---- 1110 0.11 .... .... 1010 .0.0 .... @vfp_dnm_s
+VADD_dp ---- 1110 0.11 .... .... 1011 .0.0 .... @vfp_dnm_d
+
+VSUB_hp ---- 1110 0.11 .... .... 1001 .1.0 .... @vfp_dnm_s
+VSUB_sp ---- 1110 0.11 .... .... 1010 .1.0 .... @vfp_dnm_s
+VSUB_dp ---- 1110 0.11 .... .... 1011 .1.0 .... @vfp_dnm_d
+
+VDIV_hp ---- 1110 1.00 .... .... 1001 .0.0 .... @vfp_dnm_s
+VDIV_sp ---- 1110 1.00 .... .... 1010 .0.0 .... @vfp_dnm_s
+VDIV_dp ---- 1110 1.00 .... .... 1011 .0.0 .... @vfp_dnm_d
+
+VFMA_hp ---- 1110 1.10 .... .... 1001 .0. 0 .... @vfp_dnm_s
+VFMS_hp ---- 1110 1.10 .... .... 1001 .1. 0 .... @vfp_dnm_s
+VFNMA_hp ---- 1110 1.01 .... .... 1001 .0. 0 .... @vfp_dnm_s
+VFNMS_hp ---- 1110 1.01 .... .... 1001 .1. 0 .... @vfp_dnm_s
+
+VFMA_sp ---- 1110 1.10 .... .... 1010 .0. 0 .... @vfp_dnm_s
+VFMS_sp ---- 1110 1.10 .... .... 1010 .1. 0 .... @vfp_dnm_s
+VFNMA_sp ---- 1110 1.01 .... .... 1010 .0. 0 .... @vfp_dnm_s
+VFNMS_sp ---- 1110 1.01 .... .... 1010 .1. 0 .... @vfp_dnm_s
+
+VFMA_dp ---- 1110 1.10 .... .... 1011 .0.0 .... @vfp_dnm_d
+VFMS_dp ---- 1110 1.10 .... .... 1011 .1.0 .... @vfp_dnm_d
+VFNMA_dp ---- 1110 1.01 .... .... 1011 .0.0 .... @vfp_dnm_d
+VFNMS_dp ---- 1110 1.01 .... .... 1011 .1.0 .... @vfp_dnm_d
+
+VMOV_imm_hp ---- 1110 1.11 .... .... 1001 0000 .... \
+ vd=%vd_sp imm=%vmov_imm
+VMOV_imm_sp ---- 1110 1.11 .... .... 1010 0000 .... \
+ vd=%vd_sp imm=%vmov_imm
+VMOV_imm_dp ---- 1110 1.11 .... .... 1011 0000 .... \
+ vd=%vd_dp imm=%vmov_imm
+
+VMOV_reg_sp ---- 1110 1.11 0000 .... 1010 01.0 .... @vfp_dm_ss
+VMOV_reg_dp ---- 1110 1.11 0000 .... 1011 01.0 .... @vfp_dm_dd
+
+VABS_hp ---- 1110 1.11 0000 .... 1001 11.0 .... @vfp_dm_ss
+VABS_sp ---- 1110 1.11 0000 .... 1010 11.0 .... @vfp_dm_ss
+VABS_dp ---- 1110 1.11 0000 .... 1011 11.0 .... @vfp_dm_dd
+
+VNEG_hp ---- 1110 1.11 0001 .... 1001 01.0 .... @vfp_dm_ss
+VNEG_sp ---- 1110 1.11 0001 .... 1010 01.0 .... @vfp_dm_ss
+VNEG_dp ---- 1110 1.11 0001 .... 1011 01.0 .... @vfp_dm_dd
+
+VSQRT_hp ---- 1110 1.11 0001 .... 1001 11.0 .... @vfp_dm_ss
+VSQRT_sp ---- 1110 1.11 0001 .... 1010 11.0 .... @vfp_dm_ss
+VSQRT_dp ---- 1110 1.11 0001 .... 1011 11.0 .... @vfp_dm_dd
+
+VCMP_hp ---- 1110 1.11 010 z:1 .... 1001 e:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCMP_sp ---- 1110 1.11 010 z:1 .... 1010 e:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCMP_dp ---- 1110 1.11 010 z:1 .... 1011 e:1 1.0 .... \
+ vd=%vd_dp vm=%vm_dp
+
+# VCVTT and VCVTB from f16: Vd format depends on size bit; Vm is always vm_sp
+VCVT_f32_f16 ---- 1110 1.11 0010 .... 1010 t:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_f64_f16 ---- 1110 1.11 0010 .... 1011 t:1 1.0 .... \
+ vd=%vd_dp vm=%vm_sp
+
+# VCVTB and VCVTT to f16: Vd format is always vd_sp;
+# Vm format depends on size bit
+VCVT_b16_f32 ---- 1110 1.11 0011 .... 1001 t:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_f16_f32 ---- 1110 1.11 0011 .... 1010 t:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_f16_f64 ---- 1110 1.11 0011 .... 1011 t:1 1.0 .... \
+ vd=%vd_sp vm=%vm_dp
+
+VRINTR_hp ---- 1110 1.11 0110 .... 1001 01.0 .... @vfp_dm_ss
+VRINTR_sp ---- 1110 1.11 0110 .... 1010 01.0 .... @vfp_dm_ss
+VRINTR_dp ---- 1110 1.11 0110 .... 1011 01.0 .... @vfp_dm_dd
+
+VRINTZ_hp ---- 1110 1.11 0110 .... 1001 11.0 .... @vfp_dm_ss
+VRINTZ_sp ---- 1110 1.11 0110 .... 1010 11.0 .... @vfp_dm_ss
+VRINTZ_dp ---- 1110 1.11 0110 .... 1011 11.0 .... @vfp_dm_dd
+
+VRINTX_hp ---- 1110 1.11 0111 .... 1001 01.0 .... @vfp_dm_ss
+VRINTX_sp ---- 1110 1.11 0111 .... 1010 01.0 .... @vfp_dm_ss
+VRINTX_dp ---- 1110 1.11 0111 .... 1011 01.0 .... @vfp_dm_dd
+
+# VCVT between single and double:
+# Vm precision depends on size; Vd is its reverse
+VCVT_sp ---- 1110 1.11 0111 .... 1010 11.0 .... @vfp_dm_ds
+VCVT_dp ---- 1110 1.11 0111 .... 1011 11.0 .... @vfp_dm_sd
+
+# VCVT from integer to floating point: Vm always single; Vd depends on size
+VCVT_int_hp ---- 1110 1.11 1000 .... 1001 s:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_int_sp ---- 1110 1.11 1000 .... 1010 s:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_int_dp ---- 1110 1.11 1000 .... 1011 s:1 1.0 .... \
+ vd=%vd_dp vm=%vm_sp
+
+# VJCVT is always dp to sp
+VJCVT ---- 1110 1.11 1001 .... 1011 11.0 .... @vfp_dm_sd
+
+# VCVT between floating-point and fixed-point. The immediate value
+# is in the same format as a Vm single-precision register number.
+# We assemble bits 18 (op), 16 (u) and 7 (sx) into a single opc field
+# for the convenience of the trans_VCVT_fix functions.
+%vcvt_fix_op 18:1 16:1 7:1
+VCVT_fix_hp ---- 1110 1.11 1.1. .... 1001 .1.0 .... \
+ vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
+VCVT_fix_sp ---- 1110 1.11 1.1. .... 1010 .1.0 .... \
+ vd=%vd_sp imm=%vm_sp opc=%vcvt_fix_op
+VCVT_fix_dp ---- 1110 1.11 1.1. .... 1011 .1.0 .... \
+ vd=%vd_dp imm=%vm_sp opc=%vcvt_fix_op
+
+# VCVT float to integer (VCVT and VCVTR): Vd always single; Vd depends on size
+VCVT_hp_int ---- 1110 1.11 110 s:1 .... 1001 rz:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_sp_int ---- 1110 1.11 110 s:1 .... 1010 rz:1 1.0 .... \
+ vd=%vd_sp vm=%vm_sp
+VCVT_dp_int ---- 1110 1.11 110 s:1 .... 1011 rz:1 1.0 .... \
+ vd=%vd_sp vm=%vm_dp
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
new file mode 100644
index 000000000..24e3d820a
--- /dev/null
+++ b/target/arm/vfp_helper.c
@@ -0,0 +1,1348 @@
+/*
+ * ARM VFP floating-point operations
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "internals.h"
+#ifdef CONFIG_TCG
+#include "qemu/log.h"
+#include "fpu/softfloat.h"
+#endif
+
+/* VFP support. We follow the convention used for VFP instructions:
+ Single precision routines have a "s" suffix, double precision a
+ "d" suffix. */
+
+#ifdef CONFIG_TCG
+
+/* Convert host exception flags to vfp form. */
+static inline int vfp_exceptbits_from_host(int host_bits)
+{
+ int target_bits = 0;
+
+ if (host_bits & float_flag_invalid) {
+ target_bits |= 1;
+ }
+ if (host_bits & float_flag_divbyzero) {
+ target_bits |= 2;
+ }
+ if (host_bits & float_flag_overflow) {
+ target_bits |= 4;
+ }
+ if (host_bits & (float_flag_underflow | float_flag_output_denormal)) {
+ target_bits |= 8;
+ }
+ if (host_bits & float_flag_inexact) {
+ target_bits |= 0x10;
+ }
+ if (host_bits & float_flag_input_denormal) {
+ target_bits |= 0x80;
+ }
+ return target_bits;
+}
+
+/* Convert vfp exception flags to target form. */
+static inline int vfp_exceptbits_to_host(int target_bits)
+{
+ int host_bits = 0;
+
+ if (target_bits & 1) {
+ host_bits |= float_flag_invalid;
+ }
+ if (target_bits & 2) {
+ host_bits |= float_flag_divbyzero;
+ }
+ if (target_bits & 4) {
+ host_bits |= float_flag_overflow;
+ }
+ if (target_bits & 8) {
+ host_bits |= float_flag_underflow;
+ }
+ if (target_bits & 0x10) {
+ host_bits |= float_flag_inexact;
+ }
+ if (target_bits & 0x80) {
+ host_bits |= float_flag_input_denormal;
+ }
+ return host_bits;
+}
+
+static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
+{
+ uint32_t i;
+
+ i = get_float_exception_flags(&env->vfp.fp_status);
+ i |= get_float_exception_flags(&env->vfp.standard_fp_status);
+ /* FZ16 does not generate an input denormal exception. */
+ i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
+ & ~float_flag_input_denormal);
+ i |= (get_float_exception_flags(&env->vfp.standard_fp_status_f16)
+ & ~float_flag_input_denormal);
+ return vfp_exceptbits_from_host(i);
+}
+
+static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
+{
+ int i;
+ uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
+
+ changed ^= val;
+ if (changed & (3 << 22)) {
+ i = (val >> 22) & 3;
+ switch (i) {
+ case FPROUNDING_TIEEVEN:
+ i = float_round_nearest_even;
+ break;
+ case FPROUNDING_POSINF:
+ i = float_round_up;
+ break;
+ case FPROUNDING_NEGINF:
+ i = float_round_down;
+ break;
+ case FPROUNDING_ZERO:
+ i = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(i, &env->vfp.fp_status);
+ set_float_rounding_mode(i, &env->vfp.fp_status_f16);
+ }
+ if (changed & FPCR_FZ16) {
+ bool ftz_enabled = val & FPCR_FZ16;
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
+ set_flush_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.standard_fp_status_f16);
+ }
+ if (changed & FPCR_FZ) {
+ bool ftz_enabled = val & FPCR_FZ;
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status);
+ }
+ if (changed & FPCR_DN) {
+ bool dnan_enabled = val & FPCR_DN;
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status);
+ set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
+ }
+
+ /*
+ * The exception flags are ORed together when we read fpscr so we
+ * only need to preserve the current state in one of our
+ * float_status values.
+ */
+ i = vfp_exceptbits_to_host(val);
+ set_float_exception_flags(i, &env->vfp.fp_status);
+ set_float_exception_flags(0, &env->vfp.fp_status_f16);
+ set_float_exception_flags(0, &env->vfp.standard_fp_status);
+ set_float_exception_flags(0, &env->vfp.standard_fp_status_f16);
+}
+
+#else
+
+static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
+{
+ return 0;
+}
+
+static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
+{
+}
+
+#endif
+
+uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
+{
+ uint32_t i, fpscr;
+
+ fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
+ | (env->vfp.vec_len << 16)
+ | (env->vfp.vec_stride << 20);
+
+ /*
+ * M-profile LTPSIZE overlaps A-profile Stride; whichever of the
+ * two is not applicable to this CPU will always be zero.
+ */
+ fpscr |= env->v7m.ltpsize << 16;
+
+ fpscr |= vfp_get_fpscr_from_host(env);
+
+ i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
+ fpscr |= i ? FPCR_QC : 0;
+
+ return fpscr;
+}
+
+uint32_t vfp_get_fpscr(CPUARMState *env)
+{
+ return HELPER(vfp_get_fpscr)(env);
+}
+
+void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
+ if (!cpu_isar_feature(any_fp16, cpu)) {
+ val &= ~FPCR_FZ16;
+ }
+
+ vfp_set_fpscr_to_host(env, val);
+
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * Short-vector length and stride; on M-profile these bits
+ * are used for different purposes.
+ * We can't make this conditional be "if MVFR0.FPShVec != 0",
+ * because in v7A no-short-vector-support cores still had to
+ * allow Stride/Len to be written with the only effect that
+ * some insns are required to UNDEF if the guest sets them.
+ */
+ env->vfp.vec_len = extract32(val, 16, 3);
+ env->vfp.vec_stride = extract32(val, 20, 2);
+ } else if (cpu_isar_feature(aa32_mve, cpu)) {
+ env->v7m.ltpsize = extract32(val, FPCR_LTPSIZE_SHIFT,
+ FPCR_LTPSIZE_LENGTH);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_NEON) ||
+ cpu_isar_feature(aa32_mve, cpu)) {
+ /*
+ * The bit we set within fpscr_q is arbitrary; the register as a
+ * whole being zero/non-zero is what counts.
+ * TODO: M-profile MVE also has a QC bit.
+ */
+ env->vfp.qc[0] = val & FPCR_QC;
+ env->vfp.qc[1] = 0;
+ env->vfp.qc[2] = 0;
+ env->vfp.qc[3] = 0;
+ }
+
+ /*
+ * We don't implement trapped exception handling, so the
+ * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
+ *
+ * The exception flags IOC|DZC|OFC|UFC|IXC|IDC are stored in
+ * fp_status; QC, Len and Stride are stored separately earlier.
+ * Clear out all of those and the RES0 bits: only NZCV, AHP, DN,
+ * FZ, RMode and FZ16 are kept in vfp.xregs[FPSCR].
+ */
+ env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
+}
+
+void vfp_set_fpscr(CPUARMState *env, uint32_t val)
+{
+ HELPER(vfp_set_fpscr)(env, val);
+}
+
+#ifdef CONFIG_TCG
+
+#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
+
+#define VFP_BINOP(name) \
+dh_ctype_f16 VFP_HELPER(name, h)(dh_ctype_f16 a, dh_ctype_f16 b, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return float16_ ## name(a, b, fpst); \
+} \
+float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return float32_ ## name(a, b, fpst); \
+} \
+float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return float64_ ## name(a, b, fpst); \
+}
+VFP_BINOP(add)
+VFP_BINOP(sub)
+VFP_BINOP(mul)
+VFP_BINOP(div)
+VFP_BINOP(min)
+VFP_BINOP(max)
+VFP_BINOP(minnum)
+VFP_BINOP(maxnum)
+#undef VFP_BINOP
+
+dh_ctype_f16 VFP_HELPER(neg, h)(dh_ctype_f16 a)
+{
+ return float16_chs(a);
+}
+
+float32 VFP_HELPER(neg, s)(float32 a)
+{
+ return float32_chs(a);
+}
+
+float64 VFP_HELPER(neg, d)(float64 a)
+{
+ return float64_chs(a);
+}
+
+dh_ctype_f16 VFP_HELPER(abs, h)(dh_ctype_f16 a)
+{
+ return float16_abs(a);
+}
+
+float32 VFP_HELPER(abs, s)(float32 a)
+{
+ return float32_abs(a);
+}
+
+float64 VFP_HELPER(abs, d)(float64 a)
+{
+ return float64_abs(a);
+}
+
+dh_ctype_f16 VFP_HELPER(sqrt, h)(dh_ctype_f16 a, CPUARMState *env)
+{
+ return float16_sqrt(a, &env->vfp.fp_status_f16);
+}
+
+float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
+{
+ return float32_sqrt(a, &env->vfp.fp_status);
+}
+
+float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
+{
+ return float64_sqrt(a, &env->vfp.fp_status);
+}
+
+static void softfloat_to_vfp_compare(CPUARMState *env, FloatRelation cmp)
+{
+ uint32_t flags;
+ switch (cmp) {
+ case float_relation_equal:
+ flags = 0x6;
+ break;
+ case float_relation_less:
+ flags = 0x8;
+ break;
+ case float_relation_greater:
+ flags = 0x2;
+ break;
+ case float_relation_unordered:
+ flags = 0x3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ env->vfp.xregs[ARM_VFP_FPSCR] =
+ deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags);
+}
+
+/* XXX: check quiet/signaling case */
+#define DO_VFP_cmp(P, FLOATTYPE, ARGTYPE, FPST) \
+void VFP_HELPER(cmp, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
+{ \
+ softfloat_to_vfp_compare(env, \
+ FLOATTYPE ## _compare_quiet(a, b, &env->vfp.FPST)); \
+} \
+void VFP_HELPER(cmpe, P)(ARGTYPE a, ARGTYPE b, CPUARMState *env) \
+{ \
+ softfloat_to_vfp_compare(env, \
+ FLOATTYPE ## _compare(a, b, &env->vfp.FPST)); \
+}
+DO_VFP_cmp(h, float16, dh_ctype_f16, fp_status_f16)
+DO_VFP_cmp(s, float32, float32, fp_status)
+DO_VFP_cmp(d, float64, float64, fp_status)
+#undef DO_VFP_cmp
+
+/* Integer to float and float to integer conversions */
+
+#define CONV_ITOF(name, ftype, fsz, sign) \
+ftype HELPER(name)(uint32_t x, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
+}
+
+#define CONV_FTOI(name, ftype, fsz, sign, round) \
+sign##int32_t HELPER(name)(ftype x, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ if (float##fsz##_is_any_nan(x)) { \
+ float_raise(float_flag_invalid, fpst); \
+ return 0; \
+ } \
+ return float##fsz##_to_##sign##int32##round(x, fpst); \
+}
+
+#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
+ CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
+ CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
+ CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
+
+FLOAT_CONVS(si, h, uint32_t, 16, )
+FLOAT_CONVS(si, s, float32, 32, )
+FLOAT_CONVS(si, d, float64, 64, )
+FLOAT_CONVS(ui, h, uint32_t, 16, u)
+FLOAT_CONVS(ui, s, float32, 32, u)
+FLOAT_CONVS(ui, d, float64, 64, u)
+
+#undef CONV_ITOF
+#undef CONV_FTOI
+#undef FLOAT_CONVS
+
+/* floating point conversion */
+float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
+{
+ return float32_to_float64(x, &env->vfp.fp_status);
+}
+
+float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
+{
+ return float64_to_float32(x, &env->vfp.fp_status);
+}
+
+uint32_t HELPER(bfcvt)(float32 x, void *status)
+{
+ return float32_to_bfloat16(x, status);
+}
+
+uint32_t HELPER(bfcvt_pair)(uint64_t pair, void *status)
+{
+ bfloat16 lo = float32_to_bfloat16(extract64(pair, 0, 32), status);
+ bfloat16 hi = float32_to_bfloat16(extract64(pair, 32, 32), status);
+ return deposit32(lo, 16, 16, hi);
+}
+
+/*
+ * VFP3 fixed point conversion. The AArch32 versions of fix-to-float
+ * must always round-to-nearest; the AArch64 ones honour the FPSCR
+ * rounding mode. (For AArch32 Neon the standard-FPSCR is set to
+ * round-to-nearest so either helper will work.) AArch32 float-to-fix
+ * must round-to-zero.
+ */
+#define VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
+ftype HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
+ void *fpstp) \
+{ return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
+
+#define VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \
+ ftype HELPER(vfp_##name##to##p##_round_to_nearest)(uint##isz##_t x, \
+ uint32_t shift, \
+ void *fpstp) \
+ { \
+ ftype ret; \
+ float_status *fpst = fpstp; \
+ FloatRoundMode oldmode = fpst->float_rounding_mode; \
+ fpst->float_rounding_mode = float_round_nearest_even; \
+ ret = itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); \
+ fpst->float_rounding_mode = oldmode; \
+ return ret; \
+ }
+
+#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, ROUND, suff) \
+uint##isz##_t HELPER(vfp_to##name##p##suff)(ftype x, uint32_t shift, \
+ void *fpst) \
+{ \
+ if (unlikely(float##fsz##_is_any_nan(x))) { \
+ float_raise(float_flag_invalid, fpst); \
+ return 0; \
+ } \
+ return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
+}
+
+#define VFP_CONV_FIX(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FIX_FLOAT_ROUND(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
+ float_round_to_zero, _round_to_zero) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
+ get_float_rounding_mode(fpst), )
+
+#define VFP_CONV_FIX_A64(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, ftype, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, ftype, isz, itype, \
+ get_float_rounding_mode(fpst), )
+
+VFP_CONV_FIX(sh, d, 64, float64, 64, int16)
+VFP_CONV_FIX(sl, d, 64, float64, 64, int32)
+VFP_CONV_FIX_A64(sq, d, 64, float64, 64, int64)
+VFP_CONV_FIX(uh, d, 64, float64, 64, uint16)
+VFP_CONV_FIX(ul, d, 64, float64, 64, uint32)
+VFP_CONV_FIX_A64(uq, d, 64, float64, 64, uint64)
+VFP_CONV_FIX(sh, s, 32, float32, 32, int16)
+VFP_CONV_FIX(sl, s, 32, float32, 32, int32)
+VFP_CONV_FIX_A64(sq, s, 32, float32, 64, int64)
+VFP_CONV_FIX(uh, s, 32, float32, 32, uint16)
+VFP_CONV_FIX(ul, s, 32, float32, 32, uint32)
+VFP_CONV_FIX_A64(uq, s, 32, float32, 64, uint64)
+VFP_CONV_FIX(sh, h, 16, dh_ctype_f16, 32, int16)
+VFP_CONV_FIX(sl, h, 16, dh_ctype_f16, 32, int32)
+VFP_CONV_FIX_A64(sq, h, 16, dh_ctype_f16, 64, int64)
+VFP_CONV_FIX(uh, h, 16, dh_ctype_f16, 32, uint16)
+VFP_CONV_FIX(ul, h, 16, dh_ctype_f16, 32, uint32)
+VFP_CONV_FIX_A64(uq, h, 16, dh_ctype_f16, 64, uint64)
+
+#undef VFP_CONV_FIX
+#undef VFP_CONV_FIX_FLOAT
+#undef VFP_CONV_FLOAT_FIX_ROUND
+#undef VFP_CONV_FIX_A64
+
+/* Set the current fp rounding mode and return the old one.
+ * The argument is a softfloat float_round_ value.
+ */
+uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
+{
+ float_status *fp_status = fpstp;
+
+ uint32_t prev_rmode = get_float_rounding_mode(fp_status);
+ set_float_rounding_mode(rmode, fp_status);
+
+ return prev_rmode;
+}
+
+/* Half precision conversions. */
+float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing input denormals.
+ */
+ float_status *fpst = fpstp;
+ bool save = get_flush_inputs_to_zero(fpst);
+ set_flush_inputs_to_zero(false, fpst);
+ float32 r = float16_to_float32(a, !ahp_mode, fpst);
+ set_flush_inputs_to_zero(save, fpst);
+ return r;
+}
+
+uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing output denormals.
+ */
+ float_status *fpst = fpstp;
+ bool save = get_flush_to_zero(fpst);
+ set_flush_to_zero(false, fpst);
+ float16 r = float32_to_float16(a, !ahp_mode, fpst);
+ set_flush_to_zero(save, fpst);
+ return r;
+}
+
+float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing input denormals.
+ */
+ float_status *fpst = fpstp;
+ bool save = get_flush_inputs_to_zero(fpst);
+ set_flush_inputs_to_zero(false, fpst);
+ float64 r = float16_to_float64(a, !ahp_mode, fpst);
+ set_flush_inputs_to_zero(save, fpst);
+ return r;
+}
+
+uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
+{
+ /* Squash FZ16 to 0 for the duration of conversion. In this case,
+ * it would affect flushing output denormals.
+ */
+ float_status *fpst = fpstp;
+ bool save = get_flush_to_zero(fpst);
+ set_flush_to_zero(false, fpst);
+ float16 r = float64_to_float16(a, !ahp_mode, fpst);
+ set_flush_to_zero(save, fpst);
+ return r;
+}
+
+/* NEON helpers. */
+
+/* Constants 256 and 512 are used in some helpers; we avoid relying on
+ * int->float conversions at run-time. */
+#define float64_256 make_float64(0x4070000000000000LL)
+#define float64_512 make_float64(0x4080000000000000LL)
+#define float16_maxnorm make_float16(0x7bff)
+#define float32_maxnorm make_float32(0x7f7fffff)
+#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
+
+/* Reciprocal functions
+ *
+ * The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
+ */
+
+/* See RecipEstimate()
+ *
+ * input is a 9 bit fixed point number
+ * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
+ * result range 256 .. 511 for a number from 1.0 to 511/256.
+ */
+
+static int recip_estimate(int input)
+{
+ int a, b, r;
+ assert(256 <= input && input < 512);
+ a = (input * 2) + 1;
+ b = (1 << 19) / a;
+ r = (b + 1) >> 1;
+ assert(256 <= r && r < 512);
+ return r;
+}
+
+/*
+ * Common wrapper to call recip_estimate
+ *
+ * The parameters are exponent and 64 bit fraction (without implicit
+ * bit) where the binary point is nominally at bit 52. Returns a
+ * float64 which can then be rounded to the appropriate size by the
+ * callee.
+ */
+
+static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac)
+{
+ uint32_t scaled, estimate;
+ uint64_t result_frac;
+ int result_exp;
+
+ /* Handle sub-normals */
+ if (*exp == 0) {
+ if (extract64(frac, 51, 1) == 0) {
+ *exp = -1;
+ frac <<= 2;
+ } else {
+ frac <<= 1;
+ }
+ }
+
+ /* scaled = UInt('1':fraction<51:44>) */
+ scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
+ estimate = recip_estimate(scaled);
+
+ result_exp = exp_off - *exp;
+ result_frac = deposit64(0, 44, 8, estimate);
+ if (result_exp == 0) {
+ result_frac = deposit64(result_frac >> 1, 51, 1, 1);
+ } else if (result_exp == -1) {
+ result_frac = deposit64(result_frac >> 2, 50, 2, 1);
+ result_exp = 0;
+ }
+
+ *exp = result_exp;
+
+ return result_frac;
+}
+
+static bool round_to_inf(float_status *fpst, bool sign_bit)
+{
+ switch (fpst->float_rounding_mode) {
+ case float_round_nearest_even: /* Round to Nearest */
+ return true;
+ case float_round_up: /* Round to +Inf */
+ return !sign_bit;
+ case float_round_down: /* Round to -Inf */
+ return sign_bit;
+ case float_round_to_zero: /* Round to Zero */
+ return false;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float16 f16 = float16_squash_input_denormal(input, fpst);
+ uint32_t f16_val = float16_val(f16);
+ uint32_t f16_sign = float16_is_neg(f16);
+ int f16_exp = extract32(f16_val, 10, 5);
+ uint32_t f16_frac = extract32(f16_val, 0, 10);
+ uint64_t f64_frac;
+
+ if (float16_is_any_nan(f16)) {
+ float16 nan = f16;
+ if (float16_is_signaling_nan(f16, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float16_silence_nan(f16, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float16_default_nan(fpst);
+ }
+ return nan;
+ } else if (float16_is_infinity(f16)) {
+ return float16_set_sign(float16_zero, float16_is_neg(f16));
+ } else if (float16_is_zero(f16)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float16_set_sign(float16_infinity, float16_is_neg(f16));
+ } else if (float16_abs(f16) < (1 << 8)) {
+ /* Abs(value) < 2.0^-16 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f16_sign)) {
+ return float16_set_sign(float16_infinity, f16_sign);
+ } else {
+ return float16_set_sign(float16_maxnorm, f16_sign);
+ }
+ } else if (f16_exp >= 29 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float16_set_sign(float16_zero, float16_is_neg(f16));
+ }
+
+ f64_frac = call_recip_estimate(&f16_exp, 29,
+ ((uint64_t) f16_frac) << (52 - 10));
+
+ /* result = sign : result_exp<4:0> : fraction<51:42> */
+ f16_val = deposit32(0, 15, 1, f16_sign);
+ f16_val = deposit32(f16_val, 10, 5, f16_exp);
+ f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10));
+ return make_float16(f16_val);
+}
+
+float32 HELPER(recpe_f32)(float32 input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f32 = float32_squash_input_denormal(input, fpst);
+ uint32_t f32_val = float32_val(f32);
+ bool f32_sign = float32_is_neg(f32);
+ int f32_exp = extract32(f32_val, 23, 8);
+ uint32_t f32_frac = extract32(f32_val, 0, 23);
+ uint64_t f64_frac;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float32_silence_nan(f32, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan(fpst);
+ }
+ return nan;
+ } else if (float32_is_infinity(f32)) {
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if (float32_abs(f32) < (1ULL << 21)) {
+ /* Abs(value) < 2.0^-128 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f32_sign)) {
+ return float32_set_sign(float32_infinity, f32_sign);
+ } else {
+ return float32_set_sign(float32_maxnorm, f32_sign);
+ }
+ } else if (f32_exp >= 253 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ }
+
+ f64_frac = call_recip_estimate(&f32_exp, 253,
+ ((uint64_t) f32_frac) << (52 - 23));
+
+ /* result = sign : result_exp<7:0> : fraction<51:29> */
+ f32_val = deposit32(0, 31, 1, f32_sign);
+ f32_val = deposit32(f32_val, 23, 8, f32_exp);
+ f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23));
+ return make_float32(f32_val);
+}
+
+float64 HELPER(recpe_f64)(float64 input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f64 = float64_squash_input_denormal(input, fpst);
+ uint64_t f64_val = float64_val(f64);
+ bool f64_sign = float64_is_neg(f64);
+ int f64_exp = extract64(f64_val, 52, 11);
+ uint64_t f64_frac = extract64(f64_val, 0, 52);
+
+ /* Deal with any special cases */
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float64_silence_nan(f64, fpst);
+ }
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan(fpst);
+ }
+ return nan;
+ } else if (float64_is_infinity(f64)) {
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
+ /* Abs(value) < 2.0^-1024 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f64_sign)) {
+ return float64_set_sign(float64_infinity, f64_sign);
+ } else {
+ return float64_set_sign(float64_maxnorm, f64_sign);
+ }
+ } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ }
+
+ f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac);
+
+ /* result = sign : result_exp<10:0> : fraction<51:0>; */
+ f64_val = deposit64(0, 63, 1, f64_sign);
+ f64_val = deposit64(f64_val, 52, 11, f64_exp);
+ f64_val = deposit64(f64_val, 0, 52, f64_frac);
+ return make_float64(f64_val);
+}
+
+/* The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM.
+ */
+
+static int do_recip_sqrt_estimate(int a)
+{
+ int b, estimate;
+
+ assert(128 <= a && a < 512);
+ if (a < 256) {
+ a = a * 2 + 1;
+ } else {
+ a = (a >> 1) << 1;
+ a = (a + 1) * 2;
+ }
+ b = 512;
+ while (a * (b + 1) * (b + 1) < (1 << 28)) {
+ b += 1;
+ }
+ estimate = (b + 1) / 2;
+ assert(256 <= estimate && estimate < 512);
+
+ return estimate;
+}
+
+
+static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
+{
+ int estimate;
+ uint32_t scaled;
+
+ if (*exp == 0) {
+ while (extract64(frac, 51, 1) == 0) {
+ frac = frac << 1;
+ *exp -= 1;
+ }
+ frac = extract64(frac, 0, 51) << 1;
+ }
+
+ if (*exp & 1) {
+ /* scaled = UInt('01':fraction<51:45>) */
+ scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7));
+ } else {
+ /* scaled = UInt('1':fraction<51:44>) */
+ scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
+ }
+ estimate = do_recip_sqrt_estimate(scaled);
+
+ *exp = (exp_off - *exp) / 2;
+ return extract64(estimate, 0, 8) << 44;
+}
+
+uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
+{
+ float_status *s = fpstp;
+ float16 f16 = float16_squash_input_denormal(input, s);
+ uint16_t val = float16_val(f16);
+ bool f16_sign = float16_is_neg(f16);
+ int f16_exp = extract32(val, 10, 5);
+ uint16_t f16_frac = extract32(val, 0, 10);
+ uint64_t f64_frac;
+
+ if (float16_is_any_nan(f16)) {
+ float16 nan = f16;
+ if (float16_is_signaling_nan(f16, s)) {
+ float_raise(float_flag_invalid, s);
+ if (!s->default_nan_mode) {
+ nan = float16_silence_nan(f16, fpstp);
+ }
+ }
+ if (s->default_nan_mode) {
+ nan = float16_default_nan(s);
+ }
+ return nan;
+ } else if (float16_is_zero(f16)) {
+ float_raise(float_flag_divbyzero, s);
+ return float16_set_sign(float16_infinity, f16_sign);
+ } else if (f16_sign) {
+ float_raise(float_flag_invalid, s);
+ return float16_default_nan(s);
+ } else if (float16_is_infinity(f16)) {
+ return float16_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ f64_frac = ((uint64_t) f16_frac) << (52 - 10);
+
+ f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac);
+
+ /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
+ val = deposit32(0, 15, 1, f16_sign);
+ val = deposit32(val, 10, 5, f16_exp);
+ val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8));
+ return make_float16(val);
+}
+
+float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
+{
+ float_status *s = fpstp;
+ float32 f32 = float32_squash_input_denormal(input, s);
+ uint32_t val = float32_val(f32);
+ uint32_t f32_sign = float32_is_neg(f32);
+ int f32_exp = extract32(val, 23, 8);
+ uint32_t f32_frac = extract32(val, 0, 23);
+ uint64_t f64_frac;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32, s)) {
+ float_raise(float_flag_invalid, s);
+ if (!s->default_nan_mode) {
+ nan = float32_silence_nan(f32, fpstp);
+ }
+ }
+ if (s->default_nan_mode) {
+ nan = float32_default_nan(s);
+ }
+ return nan;
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, s);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if (float32_is_neg(f32)) {
+ float_raise(float_flag_invalid, s);
+ return float32_default_nan(s);
+ } else if (float32_is_infinity(f32)) {
+ return float32_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ f64_frac = ((uint64_t) f32_frac) << 29;
+
+ f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac);
+
+ /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
+ val = deposit32(0, 31, 1, f32_sign);
+ val = deposit32(val, 23, 8, f32_exp);
+ val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8));
+ return make_float32(val);
+}
+
+float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
+{
+ float_status *s = fpstp;
+ float64 f64 = float64_squash_input_denormal(input, s);
+ uint64_t val = float64_val(f64);
+ bool f64_sign = float64_is_neg(f64);
+ int f64_exp = extract64(val, 52, 11);
+ uint64_t f64_frac = extract64(val, 0, 52);
+
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64, s)) {
+ float_raise(float_flag_invalid, s);
+ if (!s->default_nan_mode) {
+ nan = float64_silence_nan(f64, fpstp);
+ }
+ }
+ if (s->default_nan_mode) {
+ nan = float64_default_nan(s);
+ }
+ return nan;
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, s);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if (float64_is_neg(f64)) {
+ float_raise(float_flag_invalid, s);
+ return float64_default_nan(s);
+ } else if (float64_is_infinity(f64)) {
+ return float64_zero;
+ }
+
+ f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac);
+
+ /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
+ val = deposit64(0, 61, 1, f64_sign);
+ val = deposit64(val, 52, 11, f64_exp);
+ val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8));
+ return make_float64(val);
+}
+
+uint32_t HELPER(recpe_u32)(uint32_t a)
+{
+ int input, estimate;
+
+ if ((a & 0x80000000) == 0) {
+ return 0xffffffff;
+ }
+
+ input = extract32(a, 23, 9);
+ estimate = recip_estimate(input);
+
+ return deposit32(0, (32 - 9), 9, estimate);
+}
+
+uint32_t HELPER(rsqrte_u32)(uint32_t a)
+{
+ int estimate;
+
+ if ((a & 0xc0000000) == 0) {
+ return 0xffffffff;
+ }
+
+ estimate = do_recip_sqrt_estimate(extract32(a, 23, 9));
+
+ return deposit32(0, 23, 9, estimate);
+}
+
+/* VFPv4 fused multiply-accumulate */
+dh_ctype_f16 VFP_HELPER(muladd, h)(dh_ctype_f16 a, dh_ctype_f16 b,
+ dh_ctype_f16 c, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return float16_muladd(a, b, c, 0, fpst);
+}
+
+float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return float32_muladd(a, b, c, 0, fpst);
+}
+
+float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return float64_muladd(a, b, c, 0, fpst);
+}
+
+/* ARMv8 round to integral */
+dh_ctype_f16 HELPER(rinth_exact)(dh_ctype_f16 x, void *fp_status)
+{
+ return float16_round_to_int(x, fp_status);
+}
+
+float32 HELPER(rints_exact)(float32 x, void *fp_status)
+{
+ return float32_round_to_int(x, fp_status);
+}
+
+float64 HELPER(rintd_exact)(float64 x, void *fp_status)
+{
+ return float64_round_to_int(x, fp_status);
+}
+
+dh_ctype_f16 HELPER(rinth)(dh_ctype_f16 x, void *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float16 ret;
+
+ ret = float16_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+float32 HELPER(rints)(float32 x, void *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float32 ret;
+
+ ret = float32_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+float64 HELPER(rintd)(float64 x, void *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float64 ret;
+
+ ret = float64_round_to_int(x, fp_status);
+
+ new_flags = get_float_exception_flags(fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+/* Convert ARM rounding mode to softfloat */
+int arm_rmode_to_sf(int rmode)
+{
+ switch (rmode) {
+ case FPROUNDING_TIEAWAY:
+ rmode = float_round_ties_away;
+ break;
+ case FPROUNDING_ODD:
+ /* FIXME: add support for TIEAWAY and ODD */
+ qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
+ rmode);
+ /* fall through for now */
+ case FPROUNDING_TIEEVEN:
+ default:
+ rmode = float_round_nearest_even;
+ break;
+ case FPROUNDING_POSINF:
+ rmode = float_round_up;
+ break;
+ case FPROUNDING_NEGINF:
+ rmode = float_round_down;
+ break;
+ case FPROUNDING_ZERO:
+ rmode = float_round_to_zero;
+ break;
+ }
+ return rmode;
+}
+
+/*
+ * Implement float64 to int32_t conversion without saturation;
+ * the result is supplied modulo 2^32.
+ */
+uint64_t HELPER(fjcvtzs)(float64 value, void *vstatus)
+{
+ float_status *status = vstatus;
+ uint32_t exp, sign;
+ uint64_t frac;
+ uint32_t inexact = 1; /* !Z */
+
+ sign = extract64(value, 63, 1);
+ exp = extract64(value, 52, 11);
+ frac = extract64(value, 0, 52);
+
+ if (exp == 0) {
+ /* While not inexact for IEEE FP, -0.0 is inexact for JavaScript. */
+ inexact = sign;
+ if (frac != 0) {
+ if (status->flush_inputs_to_zero) {
+ float_raise(float_flag_input_denormal, status);
+ } else {
+ float_raise(float_flag_inexact, status);
+ inexact = 1;
+ }
+ }
+ frac = 0;
+ } else if (exp == 0x7ff) {
+ /* This operation raises Invalid for both NaN and overflow (Inf). */
+ float_raise(float_flag_invalid, status);
+ frac = 0;
+ } else {
+ int true_exp = exp - 1023;
+ int shift = true_exp - 52;
+
+ /* Restore implicit bit. */
+ frac |= 1ull << 52;
+
+ /* Shift the fraction into place. */
+ if (shift >= 0) {
+ /* The number is so large we must shift the fraction left. */
+ if (shift >= 64) {
+ /* The fraction is shifted out entirely. */
+ frac = 0;
+ } else {
+ frac <<= shift;
+ }
+ } else if (shift > -64) {
+ /* Normal case -- shift right and notice if bits shift out. */
+ inexact = (frac << (64 + shift)) != 0;
+ frac >>= -shift;
+ } else {
+ /* The fraction is shifted out entirely. */
+ frac = 0;
+ }
+
+ /* Notice overflow or inexact exceptions. */
+ if (true_exp > 31 || frac > (sign ? 0x80000000ull : 0x7fffffff)) {
+ /* Overflow, for which this operation raises invalid. */
+ float_raise(float_flag_invalid, status);
+ inexact = 1;
+ } else if (inexact) {
+ float_raise(float_flag_inexact, status);
+ }
+
+ /* Honor the sign. */
+ if (sign) {
+ frac = -frac;
+ }
+ }
+
+ /* Pack the result and the env->ZF representation of Z together. */
+ return deposit64(frac, 32, 32, inexact);
+}
+
+uint32_t HELPER(vjcvt)(float64 value, CPUARMState *env)
+{
+ uint64_t pair = HELPER(fjcvtzs)(value, &env->vfp.fp_status);
+ uint32_t result = pair;
+ uint32_t z = (pair >> 32) == 0;
+
+ /* Store Z, clear NCV, in FPSCR.NZCV. */
+ env->vfp.xregs[ARM_VFP_FPSCR]
+ = (env->vfp.xregs[ARM_VFP_FPSCR] & ~CPSR_NZCV) | (z * CPSR_Z);
+
+ return result;
+}
+
+/* Round a float32 to an integer that fits in int32_t or int64_t. */
+static float32 frint_s(float32 f, float_status *fpst, int intsize)
+{
+ int old_flags = get_float_exception_flags(fpst);
+ uint32_t exp = extract32(f, 23, 8);
+
+ if (unlikely(exp == 0xff)) {
+ /* NaN or Inf. */
+ goto overflow;
+ }
+
+ /* Round and re-extract the exponent. */
+ f = float32_round_to_int(f, fpst);
+ exp = extract32(f, 23, 8);
+
+ /* Validate the range of the result. */
+ if (exp < 126 + intsize) {
+ /* abs(F) <= INT{N}_MAX */
+ return f;
+ }
+ if (exp == 126 + intsize) {
+ uint32_t sign = extract32(f, 31, 1);
+ uint32_t frac = extract32(f, 0, 23);
+ if (sign && frac == 0) {
+ /* F == INT{N}_MIN */
+ return f;
+ }
+ }
+
+ overflow:
+ /*
+ * Raise Invalid and return INT{N}_MIN as a float. Revert any
+ * inexact exception float32_round_to_int may have raised.
+ */
+ set_float_exception_flags(old_flags | float_flag_invalid, fpst);
+ return (0x100u + 126u + intsize) << 23;
+}
+
+float32 HELPER(frint32_s)(float32 f, void *fpst)
+{
+ return frint_s(f, fpst, 32);
+}
+
+float32 HELPER(frint64_s)(float32 f, void *fpst)
+{
+ return frint_s(f, fpst, 64);
+}
+
+/* Round a float64 to an integer that fits in int32_t or int64_t. */
+static float64 frint_d(float64 f, float_status *fpst, int intsize)
+{
+ int old_flags = get_float_exception_flags(fpst);
+ uint32_t exp = extract64(f, 52, 11);
+
+ if (unlikely(exp == 0x7ff)) {
+ /* NaN or Inf. */
+ goto overflow;
+ }
+
+ /* Round and re-extract the exponent. */
+ f = float64_round_to_int(f, fpst);
+ exp = extract64(f, 52, 11);
+
+ /* Validate the range of the result. */
+ if (exp < 1022 + intsize) {
+ /* abs(F) <= INT{N}_MAX */
+ return f;
+ }
+ if (exp == 1022 + intsize) {
+ uint64_t sign = extract64(f, 63, 1);
+ uint64_t frac = extract64(f, 0, 52);
+ if (sign && frac == 0) {
+ /* F == INT{N}_MIN */
+ return f;
+ }
+ }
+
+ overflow:
+ /*
+ * Raise Invalid and return INT{N}_MIN as a float. Revert any
+ * inexact exception float64_round_to_int may have raised.
+ */
+ set_float_exception_flags(old_flags | float_flag_invalid, fpst);
+ return (uint64_t)(0x800 + 1022 + intsize) << 52;
+}
+
+float64 HELPER(frint32_d)(float64 f, void *fpst)
+{
+ return frint_d(f, fpst, 32);
+}
+
+float64 HELPER(frint64_d)(float64 f, void *fpst)
+{
+ return frint_d(f, fpst, 64);
+}
+
+void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg)
+{
+ uint32_t syndrome;
+
+ switch (reg) {
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ case ARM_VFP_MVFR2:
+ if (!(arm_hcr_el2_eff(env) & HCR_TID3)) {
+ return;
+ }
+ break;
+ case ARM_VFP_FPSID:
+ if (!(arm_hcr_el2_eff(env) & HCR_TID0)) {
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL
+ | (1 << 24) | (0xe << 20) | (7 << 14)
+ | (reg << 10) | (rt << 5) | 1);
+
+ raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
+}
+
+#endif
diff --git a/target/avr/Kconfig b/target/avr/Kconfig
new file mode 100644
index 000000000..155592d35
--- /dev/null
+++ b/target/avr/Kconfig
@@ -0,0 +1,2 @@
+config AVR
+ bool
diff --git a/target/avr/cpu-param.h b/target/avr/cpu-param.h
new file mode 100644
index 000000000..7ef4e7c67
--- /dev/null
+++ b/target/avr/cpu-param.h
@@ -0,0 +1,36 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef AVR_CPU_PARAM_H
+#define AVR_CPU_PARAM_H
+
+#define TARGET_LONG_BITS 32
+/*
+ * TARGET_PAGE_BITS cannot be more than 8 bits because
+ * 1. all IO registers occupy [0x0000 .. 0x00ff] address range, and they
+ * should be implemented as a device and not memory
+ * 2. SRAM starts at the address 0x0100
+ */
+#define TARGET_PAGE_BITS 8
+#define TARGET_PHYS_ADDR_SPACE_BITS 24
+#define TARGET_VIRT_ADDR_SPACE_BITS 24
+#define NB_MMU_MODES 2
+
+#endif
diff --git a/target/avr/cpu-qom.h b/target/avr/cpu-qom.h
new file mode 100644
index 000000000..9fa6989c1
--- /dev/null
+++ b/target/avr/cpu-qom.h
@@ -0,0 +1,49 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef QEMU_AVR_QOM_H
+#define QEMU_AVR_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_AVR_CPU "avr-cpu"
+
+OBJECT_DECLARE_TYPE(AVRCPU, AVRCPUClass,
+ AVR_CPU)
+
+/**
+ * AVRCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @vr: Version Register value.
+ *
+ * A AVR CPU model.
+ */
+struct AVRCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#endif /* !defined (QEMU_AVR_CPU_QOM_H) */
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
new file mode 100644
index 000000000..5d70e34dd
--- /dev/null
+++ b/target/avr/cpu.c
@@ -0,0 +1,378 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2019-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
+#include "cpu.h"
+#include "disas/dis-asm.h"
+
+static void avr_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+
+ cpu->env.pc_w = value / 2; /* internally PC points to words */
+}
+
+static bool avr_cpu_has_work(CPUState *cs)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ return (cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET))
+ && cpu_interrupts_enabled(env);
+}
+
+static void avr_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ env->pc_w = tb->pc / 2; /* internally PC points to words */
+}
+
+static void avr_cpu_reset(DeviceState *ds)
+{
+ CPUState *cs = CPU(ds);
+ AVRCPU *cpu = AVR_CPU(cs);
+ AVRCPUClass *mcc = AVR_CPU_GET_CLASS(cpu);
+ CPUAVRState *env = &cpu->env;
+
+ mcc->parent_reset(ds);
+
+ env->pc_w = 0;
+ env->sregI = 1;
+ env->sregC = 0;
+ env->sregZ = 0;
+ env->sregN = 0;
+ env->sregV = 0;
+ env->sregS = 0;
+ env->sregH = 0;
+ env->sregT = 0;
+
+ env->rampD = 0;
+ env->rampX = 0;
+ env->rampY = 0;
+ env->rampZ = 0;
+ env->eind = 0;
+ env->sp = 0;
+
+ env->skip = 0;
+
+ memset(env->r, 0, sizeof(env->r));
+}
+
+static void avr_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_arch_avr;
+ info->print_insn = avr_print_insn;
+}
+
+static void avr_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ AVRCPUClass *mcc = AVR_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void avr_cpu_set_int(void *opaque, int irq, int level)
+{
+ AVRCPU *cpu = opaque;
+ CPUAVRState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint64_t mask = (1ull << irq);
+
+ if (level) {
+ env->intsrc |= mask;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ env->intsrc &= ~mask;
+ if (env->intsrc == 0) {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+}
+
+static void avr_cpu_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+
+ /* Set the number of interrupts supported by the CPU. */
+ qdev_init_gpio_in(DEVICE(cpu), avr_cpu_set_int,
+ sizeof(cpu->env.intsrc) * 8);
+}
+
+static ObjectClass *avr_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+
+ oc = object_class_by_name(cpu_model);
+ if (object_class_dynamic_cast(oc, TYPE_AVR_CPU) == NULL ||
+ object_class_is_abstract(oc)) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "\n");
+ qemu_fprintf(f, "PC: %06x\n", env->pc_w * 2); /* PC points to words */
+ qemu_fprintf(f, "SP: %04x\n", env->sp);
+ qemu_fprintf(f, "rampD: %02x\n", env->rampD >> 16);
+ qemu_fprintf(f, "rampX: %02x\n", env->rampX >> 16);
+ qemu_fprintf(f, "rampY: %02x\n", env->rampY >> 16);
+ qemu_fprintf(f, "rampZ: %02x\n", env->rampZ >> 16);
+ qemu_fprintf(f, "EIND: %02x\n", env->eind >> 16);
+ qemu_fprintf(f, "X: %02x%02x\n", env->r[27], env->r[26]);
+ qemu_fprintf(f, "Y: %02x%02x\n", env->r[29], env->r[28]);
+ qemu_fprintf(f, "Z: %02x%02x\n", env->r[31], env->r[30]);
+ qemu_fprintf(f, "SREG: [ %c %c %c %c %c %c %c %c ]\n",
+ env->sregI ? 'I' : '-',
+ env->sregT ? 'T' : '-',
+ env->sregH ? 'H' : '-',
+ env->sregS ? 'S' : '-',
+ env->sregV ? 'V' : '-',
+ env->sregN ? '-' : 'N', /* Zf has negative logic */
+ env->sregZ ? 'Z' : '-',
+ env->sregC ? 'I' : '-');
+ qemu_fprintf(f, "SKIP: %02x\n", env->skip);
+
+ qemu_fprintf(f, "\n");
+ for (i = 0; i < ARRAY_SIZE(env->r); i++) {
+ qemu_fprintf(f, "R[%02d]: %02x ", i, env->r[i]);
+
+ if ((i % 8) == 7) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "\n");
+}
+
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps avr_sysemu_ops = {
+ .get_phys_page_debug = avr_cpu_get_phys_page_debug,
+};
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps avr_tcg_ops = {
+ .initialize = avr_cpu_tcg_init,
+ .synchronize_from_tb = avr_cpu_synchronize_from_tb,
+ .cpu_exec_interrupt = avr_cpu_exec_interrupt,
+ .tlb_fill = avr_cpu_tlb_fill,
+ .do_interrupt = avr_cpu_do_interrupt,
+};
+
+static void avr_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ AVRCPUClass *mcc = AVR_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, avr_cpu_realizefn, &mcc->parent_realize);
+ device_class_set_parent_reset(dc, avr_cpu_reset, &mcc->parent_reset);
+
+ cc->class_by_name = avr_cpu_class_by_name;
+
+ cc->has_work = avr_cpu_has_work;
+ cc->dump_state = avr_cpu_dump_state;
+ cc->set_pc = avr_cpu_set_pc;
+ cc->memory_rw_debug = avr_cpu_memory_rw_debug;
+ dc->vmsd = &vms_avr_cpu;
+ cc->sysemu_ops = &avr_sysemu_ops;
+ cc->disas_set_info = avr_cpu_disas_set_info;
+ cc->gdb_read_register = avr_cpu_gdb_read_register;
+ cc->gdb_write_register = avr_cpu_gdb_write_register;
+ cc->gdb_adjust_breakpoint = avr_cpu_gdb_adjust_breakpoint;
+ cc->gdb_num_core_regs = 35;
+ cc->gdb_core_xml_file = "avr-cpu.xml";
+ cc->tcg_ops = &avr_tcg_ops;
+}
+
+/*
+ * Setting features of AVR core type avr5
+ * --------------------------------------
+ *
+ * This type of AVR core is present in the following AVR MCUs:
+ *
+ * ata5702m322, ata5782, ata5790, ata5790n, ata5791, ata5795, ata5831, ata6613c,
+ * ata6614q, ata8210, ata8510, atmega16, atmega16a, atmega161, atmega162,
+ * atmega163, atmega164a, atmega164p, atmega164pa, atmega165, atmega165a,
+ * atmega165p, atmega165pa, atmega168, atmega168a, atmega168p, atmega168pa,
+ * atmega168pb, atmega169, atmega169a, atmega169p, atmega169pa, atmega16hvb,
+ * atmega16hvbrevb, atmega16m1, atmega16u4, atmega32a, atmega32, atmega323,
+ * atmega324a, atmega324p, atmega324pa, atmega325, atmega325a, atmega325p,
+ * atmega325pa, atmega3250, atmega3250a, atmega3250p, atmega3250pa, atmega328,
+ * atmega328p, atmega328pb, atmega329, atmega329a, atmega329p, atmega329pa,
+ * atmega3290, atmega3290a, atmega3290p, atmega3290pa, atmega32c1, atmega32m1,
+ * atmega32u4, atmega32u6, atmega406, atmega64, atmega64a, atmega640, atmega644,
+ * atmega644a, atmega644p, atmega644pa, atmega645, atmega645a, atmega645p,
+ * atmega6450, atmega6450a, atmega6450p, atmega649, atmega649a, atmega649p,
+ * atmega6490, atmega16hva, atmega16hva2, atmega32hvb, atmega6490a, atmega6490p,
+ * atmega64c1, atmega64m1, atmega64hve, atmega64hve2, atmega64rfr2,
+ * atmega644rfr2, atmega32hvbrevb, at90can32, at90can64, at90pwm161, at90pwm216,
+ * at90pwm316, at90scr100, at90usb646, at90usb647, at94k, m3000
+ */
+static void avr_avr5_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+ CPUAVRState *env = &cpu->env;
+
+ set_avr_feature(env, AVR_FEATURE_LPM);
+ set_avr_feature(env, AVR_FEATURE_IJMP_ICALL);
+ set_avr_feature(env, AVR_FEATURE_ADIW_SBIW);
+ set_avr_feature(env, AVR_FEATURE_SRAM);
+ set_avr_feature(env, AVR_FEATURE_BREAK);
+
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_PC);
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_SP);
+ set_avr_feature(env, AVR_FEATURE_JMP_CALL);
+ set_avr_feature(env, AVR_FEATURE_LPMX);
+ set_avr_feature(env, AVR_FEATURE_MOVW);
+ set_avr_feature(env, AVR_FEATURE_MUL);
+}
+
+/*
+ * Setting features of AVR core type avr51
+ * --------------------------------------
+ *
+ * This type of AVR core is present in the following AVR MCUs:
+ *
+ * atmega128, atmega128a, atmega1280, atmega1281, atmega1284, atmega1284p,
+ * atmega128rfa1, atmega128rfr2, atmega1284rfr2, at90can128, at90usb1286,
+ * at90usb1287
+ */
+static void avr_avr51_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+ CPUAVRState *env = &cpu->env;
+
+ set_avr_feature(env, AVR_FEATURE_LPM);
+ set_avr_feature(env, AVR_FEATURE_IJMP_ICALL);
+ set_avr_feature(env, AVR_FEATURE_ADIW_SBIW);
+ set_avr_feature(env, AVR_FEATURE_SRAM);
+ set_avr_feature(env, AVR_FEATURE_BREAK);
+
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_PC);
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_SP);
+ set_avr_feature(env, AVR_FEATURE_RAMPZ);
+ set_avr_feature(env, AVR_FEATURE_ELPMX);
+ set_avr_feature(env, AVR_FEATURE_ELPM);
+ set_avr_feature(env, AVR_FEATURE_JMP_CALL);
+ set_avr_feature(env, AVR_FEATURE_LPMX);
+ set_avr_feature(env, AVR_FEATURE_MOVW);
+ set_avr_feature(env, AVR_FEATURE_MUL);
+}
+
+/*
+ * Setting features of AVR core type avr6
+ * --------------------------------------
+ *
+ * This type of AVR core is present in the following AVR MCUs:
+ *
+ * atmega2560, atmega2561, atmega256rfr2, atmega2564rfr2
+ */
+static void avr_avr6_initfn(Object *obj)
+{
+ AVRCPU *cpu = AVR_CPU(obj);
+ CPUAVRState *env = &cpu->env;
+
+ set_avr_feature(env, AVR_FEATURE_LPM);
+ set_avr_feature(env, AVR_FEATURE_IJMP_ICALL);
+ set_avr_feature(env, AVR_FEATURE_ADIW_SBIW);
+ set_avr_feature(env, AVR_FEATURE_SRAM);
+ set_avr_feature(env, AVR_FEATURE_BREAK);
+
+ set_avr_feature(env, AVR_FEATURE_3_BYTE_PC);
+ set_avr_feature(env, AVR_FEATURE_2_BYTE_SP);
+ set_avr_feature(env, AVR_FEATURE_RAMPZ);
+ set_avr_feature(env, AVR_FEATURE_EIJMP_EICALL);
+ set_avr_feature(env, AVR_FEATURE_ELPMX);
+ set_avr_feature(env, AVR_FEATURE_ELPM);
+ set_avr_feature(env, AVR_FEATURE_JMP_CALL);
+ set_avr_feature(env, AVR_FEATURE_LPMX);
+ set_avr_feature(env, AVR_FEATURE_MOVW);
+ set_avr_feature(env, AVR_FEATURE_MUL);
+}
+
+typedef struct AVRCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+} AVRCPUInfo;
+
+
+static void avr_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ const char *typename = object_class_get_name(OBJECT_CLASS(data));
+
+ qemu_printf("%s\n", typename);
+}
+
+void avr_cpu_list(void)
+{
+ GSList *list;
+ list = object_class_get_list_sorted(TYPE_AVR_CPU, false);
+ g_slist_foreach(list, avr_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+#define DEFINE_AVR_CPU_TYPE(model, initfn) \
+ { \
+ .parent = TYPE_AVR_CPU, \
+ .instance_init = initfn, \
+ .name = AVR_CPU_TYPE_NAME(model), \
+ }
+
+static const TypeInfo avr_cpu_type_info[] = {
+ {
+ .name = TYPE_AVR_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(AVRCPU),
+ .instance_init = avr_cpu_initfn,
+ .class_size = sizeof(AVRCPUClass),
+ .class_init = avr_cpu_class_init,
+ .abstract = true,
+ },
+ DEFINE_AVR_CPU_TYPE("avr5", avr_avr5_initfn),
+ DEFINE_AVR_CPU_TYPE("avr51", avr_avr51_initfn),
+ DEFINE_AVR_CPU_TYPE("avr6", avr_avr6_initfn),
+};
+
+DEFINE_TYPES(avr_cpu_type_info)
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
new file mode 100644
index 000000000..dceacf3cd
--- /dev/null
+++ b/target/avr/cpu.h
@@ -0,0 +1,255 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef QEMU_AVR_CPU_H
+#define QEMU_AVR_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+#ifdef CONFIG_USER_ONLY
+#error "AVR 8-bit does not support user mode"
+#endif
+
+#define AVR_CPU_TYPE_SUFFIX "-" TYPE_AVR_CPU
+#define AVR_CPU_TYPE_NAME(name) (name AVR_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_AVR_CPU
+
+#define TCG_GUEST_DEFAULT_MO 0
+
+/*
+ * AVR has two memory spaces, data & code.
+ * e.g. both have 0 address
+ * ST/LD instructions access data space
+ * LPM/SPM and instruction fetching access code memory space
+ */
+#define MMU_CODE_IDX 0
+#define MMU_DATA_IDX 1
+
+#define EXCP_RESET 1
+#define EXCP_INT(n) (EXCP_RESET + (n) + 1)
+
+/* Number of CPU registers */
+#define NUMBER_OF_CPU_REGISTERS 32
+/* Number of IO registers accessible by ld/st/in/out */
+#define NUMBER_OF_IO_REGISTERS 64
+
+/*
+ * Offsets of AVR memory regions in host memory space.
+ *
+ * This is needed because the AVR has separate code and data address
+ * spaces that both have start from zero but have to go somewhere in
+ * host memory.
+ *
+ * It's also useful to know where some things are, like the IO registers.
+ */
+/* Flash program memory */
+#define OFFSET_CODE 0x00000000
+/* CPU registers, IO registers, and SRAM */
+#define OFFSET_DATA 0x00800000
+/* CPU registers specifically, these are mapped at the start of data */
+#define OFFSET_CPU_REGISTERS OFFSET_DATA
+/*
+ * IO registers, including status register, stack pointer, and memory
+ * mapped peripherals, mapped just after CPU registers
+ */
+#define OFFSET_IO_REGISTERS (OFFSET_DATA + NUMBER_OF_CPU_REGISTERS)
+
+typedef enum AVRFeature {
+ AVR_FEATURE_SRAM,
+
+ AVR_FEATURE_1_BYTE_PC,
+ AVR_FEATURE_2_BYTE_PC,
+ AVR_FEATURE_3_BYTE_PC,
+
+ AVR_FEATURE_1_BYTE_SP,
+ AVR_FEATURE_2_BYTE_SP,
+
+ AVR_FEATURE_BREAK,
+ AVR_FEATURE_DES,
+ AVR_FEATURE_RMW, /* Read Modify Write - XCH LAC LAS LAT */
+
+ AVR_FEATURE_EIJMP_EICALL,
+ AVR_FEATURE_IJMP_ICALL,
+ AVR_FEATURE_JMP_CALL,
+
+ AVR_FEATURE_ADIW_SBIW,
+
+ AVR_FEATURE_SPM,
+ AVR_FEATURE_SPMX,
+
+ AVR_FEATURE_ELPMX,
+ AVR_FEATURE_ELPM,
+ AVR_FEATURE_LPMX,
+ AVR_FEATURE_LPM,
+
+ AVR_FEATURE_MOVW,
+ AVR_FEATURE_MUL,
+ AVR_FEATURE_RAMPD,
+ AVR_FEATURE_RAMPX,
+ AVR_FEATURE_RAMPY,
+ AVR_FEATURE_RAMPZ,
+} AVRFeature;
+
+typedef struct CPUAVRState CPUAVRState;
+
+struct CPUAVRState {
+ uint32_t pc_w; /* 0x003fffff up to 22 bits */
+
+ uint32_t sregC; /* 0x00000001 1 bit */
+ uint32_t sregZ; /* 0x00000001 1 bit */
+ uint32_t sregN; /* 0x00000001 1 bit */
+ uint32_t sregV; /* 0x00000001 1 bit */
+ uint32_t sregS; /* 0x00000001 1 bit */
+ uint32_t sregH; /* 0x00000001 1 bit */
+ uint32_t sregT; /* 0x00000001 1 bit */
+ uint32_t sregI; /* 0x00000001 1 bit */
+
+ uint32_t rampD; /* 0x00ff0000 8 bits */
+ uint32_t rampX; /* 0x00ff0000 8 bits */
+ uint32_t rampY; /* 0x00ff0000 8 bits */
+ uint32_t rampZ; /* 0x00ff0000 8 bits */
+ uint32_t eind; /* 0x00ff0000 8 bits */
+
+ uint32_t r[NUMBER_OF_CPU_REGISTERS]; /* 8 bits each */
+ uint32_t sp; /* 16 bits */
+
+ uint32_t skip; /* if set skip instruction */
+
+ uint64_t intsrc; /* interrupt sources */
+ bool fullacc; /* CPU/MEM if true MEM only otherwise */
+
+ uint64_t features;
+};
+
+/**
+ * AVRCPU:
+ * @env: #CPUAVRState
+ *
+ * A AVR CPU.
+ */
+typedef struct AVRCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUAVRState env;
+} AVRCPU;
+
+extern const struct VMStateDescription vms_avr_cpu;
+
+void avr_cpu_do_interrupt(CPUState *cpu);
+bool avr_cpu_exec_interrupt(CPUState *cpu, int int_req);
+hwaddr avr_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int avr_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int avr_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+int avr_print_insn(bfd_vma addr, disassemble_info *info);
+vaddr avr_cpu_gdb_adjust_breakpoint(CPUState *cpu, vaddr addr);
+
+static inline int avr_feature(CPUAVRState *env, AVRFeature feature)
+{
+ return (env->features & (1U << feature)) != 0;
+}
+
+static inline void set_avr_feature(CPUAVRState *env, int feature)
+{
+ env->features |= (1U << feature);
+}
+
+#define cpu_list avr_cpu_list
+#define cpu_mmu_index avr_cpu_mmu_index
+
+static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch)
+{
+ return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
+}
+
+void avr_cpu_tcg_init(void);
+
+void avr_cpu_list(void);
+int cpu_avr_exec(CPUState *cpu);
+int avr_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf,
+ int len, bool is_write);
+
+enum {
+ TB_FLAGS_FULL_ACCESS = 1,
+ TB_FLAGS_SKIP = 2,
+};
+
+static inline void cpu_get_tb_cpu_state(CPUAVRState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ uint32_t flags = 0;
+
+ *pc = env->pc_w * 2;
+ *cs_base = 0;
+
+ if (env->fullacc) {
+ flags |= TB_FLAGS_FULL_ACCESS;
+ }
+ if (env->skip) {
+ flags |= TB_FLAGS_SKIP;
+ }
+
+ *pflags = flags;
+}
+
+static inline int cpu_interrupts_enabled(CPUAVRState *env)
+{
+ return env->sregI != 0;
+}
+
+static inline uint8_t cpu_get_sreg(CPUAVRState *env)
+{
+ uint8_t sreg;
+ sreg = (env->sregC) << 0
+ | (env->sregZ) << 1
+ | (env->sregN) << 2
+ | (env->sregV) << 3
+ | (env->sregS) << 4
+ | (env->sregH) << 5
+ | (env->sregT) << 6
+ | (env->sregI) << 7;
+ return sreg;
+}
+
+static inline void cpu_set_sreg(CPUAVRState *env, uint8_t sreg)
+{
+ env->sregC = (sreg >> 0) & 0x01;
+ env->sregZ = (sreg >> 1) & 0x01;
+ env->sregN = (sreg >> 2) & 0x01;
+ env->sregV = (sreg >> 3) & 0x01;
+ env->sregS = (sreg >> 4) & 0x01;
+ env->sregH = (sreg >> 5) & 0x01;
+ env->sregT = (sreg >> 6) & 0x01;
+ env->sregI = (sreg >> 7) & 0x01;
+}
+
+bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+typedef CPUAVRState CPUArchState;
+typedef AVRCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+#endif /* !defined (QEMU_AVR_CPU_H) */
diff --git a/target/avr/disas.c b/target/avr/disas.c
new file mode 100644
index 000000000..b7689e8d7
--- /dev/null
+++ b/target/avr/disas.c
@@ -0,0 +1,245 @@
+/*
+ * AVR disassembler
+ *
+ * Copyright (c) 2019-2020 Richard Henderson <rth@twiddle.net>
+ * Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+typedef struct {
+ disassemble_info *info;
+ uint16_t next_word;
+ bool next_word_used;
+} DisasContext;
+
+static int to_regs_16_31_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 16);
+}
+
+static int to_regs_16_23_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 8);
+}
+
+static int to_regs_24_30_by_two(DisasContext *ctx, int indx)
+{
+ return 24 + (indx % 4) * 2;
+}
+
+static int to_regs_00_30_by_two(DisasContext *ctx, int indx)
+{
+ return (indx % 16) * 2;
+}
+
+static uint16_t next_word(DisasContext *ctx)
+{
+ ctx->next_word_used = true;
+ return ctx->next_word;
+}
+
+static int append_16(DisasContext *ctx, int x)
+{
+ return x << 16 | next_word(ctx);
+}
+
+/* Include the auto-generated decoder. */
+static bool decode_insn(DisasContext *ctx, uint16_t insn);
+#include "decode-insn.c.inc"
+
+#define output(mnemonic, format, ...) \
+ (pctx->info->fprintf_func(pctx->info->stream, "%-9s " format, \
+ mnemonic, ##__VA_ARGS__))
+
+int avr_print_insn(bfd_vma addr, disassemble_info *info)
+{
+ DisasContext ctx;
+ DisasContext *pctx = &ctx;
+ bfd_byte buffer[4];
+ uint16_t insn;
+ int status;
+
+ ctx.info = info;
+
+ status = info->read_memory_func(addr, buffer, 4, info);
+ if (status != 0) {
+ info->memory_error_func(status, addr, info);
+ return -1;
+ }
+ insn = bfd_getl16(buffer);
+ ctx.next_word = bfd_getl16(buffer + 2);
+ ctx.next_word_used = false;
+
+ if (!decode_insn(&ctx, insn)) {
+ output(".db", "0x%02x, 0x%02x", buffer[0], buffer[1]);
+ }
+
+ return ctx.next_word_used ? 4 : 2;
+}
+
+
+#define INSN(opcode, format, ...) \
+static bool trans_##opcode(DisasContext *pctx, arg_##opcode * a) \
+{ \
+ output(#opcode, format, ##__VA_ARGS__); \
+ return true; \
+}
+
+#define INSN_MNEMONIC(opcode, mnemonic, format, ...) \
+static bool trans_##opcode(DisasContext *pctx, arg_##opcode * a) \
+{ \
+ output(mnemonic, format, ##__VA_ARGS__); \
+ return true; \
+}
+
+/*
+ * C Z N V S H T I
+ * 0 1 2 3 4 5 6 7
+ */
+static const char brbc[][5] = {
+ "BRCC", "BRNE", "BRPL", "BRVC", "BRGE", "BRHC", "BRTC", "BRID"
+};
+
+static const char brbs[][5] = {
+ "BRCS", "BREQ", "BRMI", "BRVS", "BRLT", "BRHS", "BRTS", "BRIE"
+};
+
+static const char bset[][4] = {
+ "SEC", "SEZ", "SEN", "SEZ", "SES", "SEH", "SET", "SEI"
+};
+
+static const char bclr[][4] = {
+ "CLC", "CLZ", "CLN", "CLZ", "CLS", "CLH", "CLT", "CLI"
+};
+
+/*
+ * Arithmetic Instructions
+ */
+INSN(ADD, "r%d, r%d", a->rd, a->rr)
+INSN(ADC, "r%d, r%d", a->rd, a->rr)
+INSN(ADIW, "r%d:r%d, %d", a->rd + 1, a->rd, a->imm)
+INSN(SUB, "r%d, r%d", a->rd, a->rr)
+INSN(SUBI, "r%d, %d", a->rd, a->imm)
+INSN(SBC, "r%d, r%d", a->rd, a->rr)
+INSN(SBCI, "r%d, %d", a->rd, a->imm)
+INSN(SBIW, "r%d:r%d, %d", a->rd + 1, a->rd, a->imm)
+INSN(AND, "r%d, r%d", a->rd, a->rr)
+INSN(ANDI, "r%d, %d", a->rd, a->imm)
+INSN(OR, "r%d, r%d", a->rd, a->rr)
+INSN(ORI, "r%d, %d", a->rd, a->imm)
+INSN(EOR, "r%d, r%d", a->rd, a->rr)
+INSN(COM, "r%d", a->rd)
+INSN(NEG, "r%d", a->rd)
+INSN(INC, "r%d", a->rd)
+INSN(DEC, "r%d", a->rd)
+INSN(MUL, "r%d, r%d", a->rd, a->rr)
+INSN(MULS, "r%d, r%d", a->rd, a->rr)
+INSN(MULSU, "r%d, r%d", a->rd, a->rr)
+INSN(FMUL, "r%d, r%d", a->rd, a->rr)
+INSN(FMULS, "r%d, r%d", a->rd, a->rr)
+INSN(FMULSU, "r%d, r%d", a->rd, a->rr)
+INSN(DES, "%d", a->imm)
+
+/*
+ * Branch Instructions
+ */
+INSN(RJMP, ".%+d", a->imm * 2)
+INSN(IJMP, "")
+INSN(EIJMP, "")
+INSN(JMP, "0x%x", a->imm * 2)
+INSN(RCALL, ".%+d", a->imm * 2)
+INSN(ICALL, "")
+INSN(EICALL, "")
+INSN(CALL, "0x%x", a->imm * 2)
+INSN(RET, "")
+INSN(RETI, "")
+INSN(CPSE, "r%d, r%d", a->rd, a->rr)
+INSN(CP, "r%d, r%d", a->rd, a->rr)
+INSN(CPC, "r%d, r%d", a->rd, a->rr)
+INSN(CPI, "r%d, %d", a->rd, a->imm)
+INSN(SBRC, "r%d, %d", a->rr, a->bit)
+INSN(SBRS, "r%d, %d", a->rr, a->bit)
+INSN(SBIC, "$%d, %d", a->reg, a->bit)
+INSN(SBIS, "$%d, %d", a->reg, a->bit)
+INSN_MNEMONIC(BRBS, brbs[a->bit], ".%+d", a->imm * 2)
+INSN_MNEMONIC(BRBC, brbc[a->bit], ".%+d", a->imm * 2)
+
+/*
+ * Data Transfer Instructions
+ */
+INSN(MOV, "r%d, r%d", a->rd, a->rr)
+INSN(MOVW, "r%d:r%d, r%d:r%d", a->rd + 1, a->rd, a->rr + 1, a->rr)
+INSN(LDI, "r%d, %d", a->rd, a->imm)
+INSN(LDS, "r%d, %d", a->rd, a->imm)
+INSN(LDX1, "r%d, X", a->rd)
+INSN(LDX2, "r%d, X+", a->rd)
+INSN(LDX3, "r%d, -X", a->rd)
+INSN(LDY2, "r%d, Y+", a->rd)
+INSN(LDY3, "r%d, -Y", a->rd)
+INSN(LDZ2, "r%d, Z+", a->rd)
+INSN(LDZ3, "r%d, -Z", a->rd)
+INSN(LDDY, "r%d, Y+%d", a->rd, a->imm)
+INSN(LDDZ, "r%d, Z+%d", a->rd, a->imm)
+INSN(STS, "%d, r%d", a->imm, a->rd)
+INSN(STX1, "X, r%d", a->rr)
+INSN(STX2, "X+, r%d", a->rr)
+INSN(STX3, "-X, r%d", a->rr)
+INSN(STY2, "Y+, r%d", a->rd)
+INSN(STY3, "-Y, r%d", a->rd)
+INSN(STZ2, "Z+, r%d", a->rd)
+INSN(STZ3, "-Z, r%d", a->rd)
+INSN(STDY, "Y+%d, r%d", a->imm, a->rd)
+INSN(STDZ, "Z+%d, r%d", a->imm, a->rd)
+INSN(LPM1, "")
+INSN(LPM2, "r%d, Z", a->rd)
+INSN(LPMX, "r%d, Z+", a->rd)
+INSN(ELPM1, "")
+INSN(ELPM2, "r%d, Z", a->rd)
+INSN(ELPMX, "r%d, Z+", a->rd)
+INSN(SPM, "")
+INSN(SPMX, "Z+")
+INSN(IN, "r%d, $%d", a->rd, a->imm)
+INSN(OUT, "$%d, r%d", a->imm, a->rd)
+INSN(PUSH, "r%d", a->rd)
+INSN(POP, "r%d", a->rd)
+INSN(XCH, "Z, r%d", a->rd)
+INSN(LAC, "Z, r%d", a->rd)
+INSN(LAS, "Z, r%d", a->rd)
+INSN(LAT, "Z, r%d", a->rd)
+
+/*
+ * Bit and Bit-test Instructions
+ */
+INSN(LSR, "r%d", a->rd)
+INSN(ROR, "r%d", a->rd)
+INSN(ASR, "r%d", a->rd)
+INSN(SWAP, "r%d", a->rd)
+INSN(SBI, "$%d, %d", a->reg, a->bit)
+INSN(CBI, "%d, %d", a->reg, a->bit)
+INSN(BST, "r%d, %d", a->rd, a->bit)
+INSN(BLD, "r%d, %d", a->rd, a->bit)
+INSN_MNEMONIC(BSET, bset[a->bit], "")
+INSN_MNEMONIC(BCLR, bclr[a->bit], "")
+
+/*
+ * MCU Control Instructions
+ */
+INSN(BREAK, "")
+INSN(NOP, "")
+INSN(SLEEP, "")
+INSN(WDR, "")
diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c
new file mode 100644
index 000000000..1c1b908c9
--- /dev/null
+++ b/target/avr/gdbstub.c
@@ -0,0 +1,97 @@
+/*
+ * QEMU AVR gdbstub
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "exec/gdbstub.h"
+
+int avr_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ /* R */
+ if (n < 32) {
+ return gdb_get_reg8(mem_buf, env->r[n]);
+ }
+
+ /* SREG */
+ if (n == 32) {
+ uint8_t sreg = cpu_get_sreg(env);
+
+ return gdb_get_reg8(mem_buf, sreg);
+ }
+
+ /* SP */
+ if (n == 33) {
+ return gdb_get_reg16(mem_buf, env->sp & 0x0000ffff);
+ }
+
+ /* PC */
+ if (n == 34) {
+ return gdb_get_reg32(mem_buf, env->pc_w * 2);
+ }
+
+ return 0;
+}
+
+int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ /* R */
+ if (n < 32) {
+ env->r[n] = *mem_buf;
+ return 1;
+ }
+
+ /* SREG */
+ if (n == 32) {
+ cpu_set_sreg(env, *mem_buf);
+ return 1;
+ }
+
+ /* SP */
+ if (n == 33) {
+ env->sp = lduw_p(mem_buf);
+ return 2;
+ }
+
+ /* PC */
+ if (n == 34) {
+ env->pc_w = ldl_p(mem_buf) / 2;
+ return 4;
+ }
+
+ return 0;
+}
+
+vaddr avr_cpu_gdb_adjust_breakpoint(CPUState *cpu, vaddr addr)
+{
+ /*
+ * This is due to some strange GDB behavior
+ * Let's assume main has address 0x100:
+ * b main - sets breakpoint at address 0x00000100 (code)
+ * b *0x100 - sets breakpoint at address 0x00800100 (data)
+ *
+ * Force all breakpoints into code space.
+ */
+ return addr % OFFSET_DATA;
+}
diff --git a/target/avr/helper.c b/target/avr/helper.c
new file mode 100644
index 000000000..981c29da4
--- /dev/null
+++ b/target/avr/helper.c
@@ -0,0 +1,347 @@
+/*
+ * QEMU AVR CPU helpers
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/core/tcg-cpu-ops.h"
+#include "exec/exec-all.h"
+#include "exec/address-spaces.h"
+#include "exec/helper-proto.h"
+
+bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ bool ret = false;
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ if (interrupt_request & CPU_INTERRUPT_RESET) {
+ if (cpu_interrupts_enabled(env)) {
+ cs->exception_index = EXCP_RESET;
+ cc->tcg_ops->do_interrupt(cs);
+
+ cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
+
+ ret = true;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
+ int index = ctz32(env->intsrc);
+ cs->exception_index = EXCP_INT(index);
+ cc->tcg_ops->do_interrupt(cs);
+
+ env->intsrc &= env->intsrc - 1; /* clear the interrupt */
+ if (!env->intsrc) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+void avr_cpu_do_interrupt(CPUState *cs)
+{
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+
+ uint32_t ret = env->pc_w;
+ int vector = 0;
+ int size = avr_feature(env, AVR_FEATURE_JMP_CALL) ? 2 : 1;
+ int base = 0;
+
+ if (cs->exception_index == EXCP_RESET) {
+ vector = 0;
+ } else if (env->intsrc != 0) {
+ vector = ctz32(env->intsrc) + 1;
+ }
+
+ if (avr_feature(env, AVR_FEATURE_3_BYTE_PC)) {
+ cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8);
+ cpu_stb_data(env, env->sp--, (ret & 0xff0000) >> 16);
+ } else if (avr_feature(env, AVR_FEATURE_2_BYTE_PC)) {
+ cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ cpu_stb_data(env, env->sp--, (ret & 0x00ff00) >> 8);
+ } else {
+ cpu_stb_data(env, env->sp--, (ret & 0x0000ff));
+ }
+
+ env->pc_w = base + vector * size;
+ env->sregI = 0; /* clear Global Interrupt Flag */
+
+ cs->exception_index = -1;
+}
+
+int avr_cpu_memory_rw_debug(CPUState *cs, vaddr addr, uint8_t *buf,
+ int len, bool is_write)
+{
+ return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
+}
+
+hwaddr avr_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ return addr; /* I assume 1:1 address correspondence */
+}
+
+bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ int prot = 0;
+ MemTxAttrs attrs = {};
+ uint32_t paddr;
+
+ address &= TARGET_PAGE_MASK;
+
+ if (mmu_idx == MMU_CODE_IDX) {
+ /* access to code in flash */
+ paddr = OFFSET_CODE + address;
+ prot = PAGE_READ | PAGE_EXEC;
+ if (paddr + TARGET_PAGE_SIZE > OFFSET_DATA) {
+ error_report("execution left flash memory");
+ abort();
+ }
+ } else if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
+ /*
+ * access to CPU registers, exit and rebuilt this TB to use full access
+ * incase it touches specially handled registers like SREG or SP
+ */
+ AVRCPU *cpu = AVR_CPU(cs);
+ CPUAVRState *env = &cpu->env;
+ env->fullacc = 1;
+ cpu_loop_exit_restore(cs, retaddr);
+ } else {
+ /* access to memory. nothing special */
+ paddr = OFFSET_DATA + address;
+ prot = PAGE_READ | PAGE_WRITE;
+ }
+
+ tlb_set_page_with_attrs(cs, address, paddr, attrs, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+
+ return true;
+}
+
+/*
+ * helpers
+ */
+
+void helper_sleep(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void helper_unsupported(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /*
+ * I count not find what happens on the real platform, so
+ * it's EXCP_DEBUG for meanwhile
+ */
+ cs->exception_index = EXCP_DEBUG;
+ if (qemu_loglevel_mask(LOG_UNIMP)) {
+ qemu_log("UNSUPPORTED\n");
+ cpu_dump_state(cs, stderr, 0);
+ }
+ cpu_loop_exit(cs);
+}
+
+void helper_debug(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+void helper_break(CPUAVRState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+void helper_wdr(CPUAVRState *env)
+{
+ qemu_log_mask(LOG_UNIMP, "WDG reset (not implemented)\n");
+}
+
+/*
+ * This function implements IN instruction
+ *
+ * It does the following
+ * a. if an IO register belongs to CPU, its value is read and returned
+ * b. otherwise io address is translated to mem address and physical memory
+ * is read.
+ * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation
+ *
+ */
+target_ulong helper_inb(CPUAVRState *env, uint32_t port)
+{
+ target_ulong data = 0;
+
+ switch (port) {
+ case 0x38: /* RAMPD */
+ data = 0xff & (env->rampD >> 16);
+ break;
+ case 0x39: /* RAMPX */
+ data = 0xff & (env->rampX >> 16);
+ break;
+ case 0x3a: /* RAMPY */
+ data = 0xff & (env->rampY >> 16);
+ break;
+ case 0x3b: /* RAMPZ */
+ data = 0xff & (env->rampZ >> 16);
+ break;
+ case 0x3c: /* EIND */
+ data = 0xff & (env->eind >> 16);
+ break;
+ case 0x3d: /* SPL */
+ data = env->sp & 0x00ff;
+ break;
+ case 0x3e: /* SPH */
+ data = env->sp >> 8;
+ break;
+ case 0x3f: /* SREG */
+ data = cpu_get_sreg(env);
+ break;
+ default:
+ /* not a special register, pass to normal memory access */
+ data = address_space_ldub(&address_space_memory,
+ OFFSET_IO_REGISTERS + port,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+
+ return data;
+}
+
+/*
+ * This function implements OUT instruction
+ *
+ * It does the following
+ * a. if an IO register belongs to CPU, its value is written into the register
+ * b. otherwise io address is translated to mem address and physical memory
+ * is written.
+ * c. it caches the value for sake of SBI, SBIC, SBIS & CBI implementation
+ *
+ */
+void helper_outb(CPUAVRState *env, uint32_t port, uint32_t data)
+{
+ data &= 0x000000ff;
+
+ switch (port) {
+ case 0x38: /* RAMPD */
+ if (avr_feature(env, AVR_FEATURE_RAMPD)) {
+ env->rampD = (data & 0xff) << 16;
+ }
+ break;
+ case 0x39: /* RAMPX */
+ if (avr_feature(env, AVR_FEATURE_RAMPX)) {
+ env->rampX = (data & 0xff) << 16;
+ }
+ break;
+ case 0x3a: /* RAMPY */
+ if (avr_feature(env, AVR_FEATURE_RAMPY)) {
+ env->rampY = (data & 0xff) << 16;
+ }
+ break;
+ case 0x3b: /* RAMPZ */
+ if (avr_feature(env, AVR_FEATURE_RAMPZ)) {
+ env->rampZ = (data & 0xff) << 16;
+ }
+ break;
+ case 0x3c: /* EIDN */
+ env->eind = (data & 0xff) << 16;
+ break;
+ case 0x3d: /* SPL */
+ env->sp = (env->sp & 0xff00) | (data);
+ break;
+ case 0x3e: /* SPH */
+ if (avr_feature(env, AVR_FEATURE_2_BYTE_SP)) {
+ env->sp = (env->sp & 0x00ff) | (data << 8);
+ }
+ break;
+ case 0x3f: /* SREG */
+ cpu_set_sreg(env, data);
+ break;
+ default:
+ /* not a special register, pass to normal memory access */
+ address_space_stb(&address_space_memory, OFFSET_IO_REGISTERS + port,
+ data, MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+}
+
+/*
+ * this function implements LD instruction when there is a possibility to read
+ * from a CPU register
+ */
+target_ulong helper_fullrd(CPUAVRState *env, uint32_t addr)
+{
+ uint8_t data;
+
+ env->fullacc = false;
+
+ if (addr < NUMBER_OF_CPU_REGISTERS) {
+ /* CPU registers */
+ data = env->r[addr];
+ } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
+ /* IO registers */
+ data = helper_inb(env, addr - NUMBER_OF_CPU_REGISTERS);
+ } else {
+ /* memory */
+ data = address_space_ldub(&address_space_memory, OFFSET_DATA + addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+ return data;
+}
+
+/*
+ * this function implements ST instruction when there is a possibility to write
+ * into a CPU register
+ */
+void helper_fullwr(CPUAVRState *env, uint32_t data, uint32_t addr)
+{
+ env->fullacc = false;
+
+ /* Following logic assumes this: */
+ assert(OFFSET_CPU_REGISTERS == OFFSET_DATA);
+ assert(OFFSET_IO_REGISTERS == OFFSET_CPU_REGISTERS +
+ NUMBER_OF_CPU_REGISTERS);
+
+ if (addr < NUMBER_OF_CPU_REGISTERS) {
+ /* CPU registers */
+ env->r[addr] = data;
+ } else if (addr < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
+ /* IO registers */
+ helper_outb(env, addr - NUMBER_OF_CPU_REGISTERS, data);
+ } else {
+ /* memory */
+ address_space_stb(&address_space_memory, OFFSET_DATA + addr, data,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+}
diff --git a/target/avr/helper.h b/target/avr/helper.h
new file mode 100644
index 000000000..4d02e648f
--- /dev/null
+++ b/target/avr/helper.h
@@ -0,0 +1,29 @@
+/*
+ * QEMU AVR CPU helpers
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+DEF_HELPER_1(wdr, void, env)
+DEF_HELPER_1(debug, noreturn, env)
+DEF_HELPER_1(break, noreturn, env)
+DEF_HELPER_1(sleep, noreturn, env)
+DEF_HELPER_1(unsupported, noreturn, env)
+DEF_HELPER_3(outb, void, env, i32, i32)
+DEF_HELPER_2(inb, tl, env, i32)
+DEF_HELPER_3(fullwr, void, env, i32, i32)
+DEF_HELPER_2(fullrd, tl, env, i32)
diff --git a/target/avr/insn.decode b/target/avr/insn.decode
new file mode 100644
index 000000000..482c23ad0
--- /dev/null
+++ b/target/avr/insn.decode
@@ -0,0 +1,187 @@
+#
+# AVR instruction decode definitions.
+#
+# Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# regs_16_31_by_one = [16 .. 31]
+# regs_16_23_by_one = [16 .. 23]
+# regs_24_30_by_two = [24, 26, 28, 30]
+# regs_00_30_by_two = [0, 2, 4, 6, 8, .. 30]
+
+%rd 4:5
+%rr 9:1 0:4
+
+%rd_a 4:4 !function=to_regs_16_31_by_one
+%rd_b 4:3 !function=to_regs_16_23_by_one
+%rd_c 4:2 !function=to_regs_24_30_by_two
+%rr_a 0:4 !function=to_regs_16_31_by_one
+%rr_b 0:3 !function=to_regs_16_23_by_one
+
+%imm6 6:2 0:4
+%imm8 8:4 0:4
+
+%io_imm 9:2 0:4
+%ldst_d_imm 13:1 10:2 0:3
+
+
+&rd_rr rd rr
+&rd_imm rd imm
+
+@op_rd_rr .... .. . ..... .... &rd_rr rd=%rd rr=%rr
+@op_rd_imm6 .... .... .. .. .... &rd_imm rd=%rd_c imm=%imm6
+@op_rd_imm8 .... .... .... .... &rd_imm rd=%rd_a imm=%imm8
+@fmul .... .... . ... . ... &rd_rr rd=%rd_b rr=%rr_b
+
+#
+# Arithmetic Instructions
+#
+ADD 0000 11 . ..... .... @op_rd_rr
+ADC 0001 11 . ..... .... @op_rd_rr
+ADIW 1001 0110 .. .. .... @op_rd_imm6
+SUB 0001 10 . ..... .... @op_rd_rr
+SUBI 0101 .... .... .... @op_rd_imm8
+SBC 0000 10 . ..... .... @op_rd_rr
+SBCI 0100 .... .... .... @op_rd_imm8
+SBIW 1001 0111 .. .. .... @op_rd_imm6
+AND 0010 00 . ..... .... @op_rd_rr
+ANDI 0111 .... .... .... @op_rd_imm8
+OR 0010 10 . ..... .... @op_rd_rr
+ORI 0110 .... .... .... @op_rd_imm8
+EOR 0010 01 . ..... .... @op_rd_rr
+COM 1001 010 rd:5 0000
+NEG 1001 010 rd:5 0001
+INC 1001 010 rd:5 0011
+DEC 1001 010 rd:5 1010
+MUL 1001 11 . ..... .... @op_rd_rr
+MULS 0000 0010 .... .... &rd_rr rd=%rd_a rr=%rr_a
+MULSU 0000 0011 0 ... 0 ... @fmul
+FMUL 0000 0011 0 ... 1 ... @fmul
+FMULS 0000 0011 1 ... 0 ... @fmul
+FMULSU 0000 0011 1 ... 1 ... @fmul
+DES 1001 0100 imm:4 1011
+
+#
+# Branch Instructions
+#
+
+# The 22-bit immediate is partially in the opcode word,
+# and partially in the next. Use append_16 to build the
+# complete 22-bit value.
+%imm_call 4:5 0:1 !function=append_16
+
+@op_bit .... .... . bit:3 ....
+@op_bit_imm .... .. imm:s7 bit:3
+
+RJMP 1100 imm:s12
+IJMP 1001 0100 0000 1001
+EIJMP 1001 0100 0001 1001
+JMP 1001 010 ..... 110 . imm=%imm_call
+RCALL 1101 imm:s12
+ICALL 1001 0101 0000 1001
+EICALL 1001 0101 0001 1001
+CALL 1001 010 ..... 111 . imm=%imm_call
+RET 1001 0101 0000 1000
+RETI 1001 0101 0001 1000
+CPSE 0001 00 . ..... .... @op_rd_rr
+CP 0001 01 . ..... .... @op_rd_rr
+CPC 0000 01 . ..... .... @op_rd_rr
+CPI 0011 .... .... .... @op_rd_imm8
+SBRC 1111 110 rr:5 0 bit:3
+SBRS 1111 111 rr:5 0 bit:3
+SBIC 1001 1001 reg:5 bit:3
+SBIS 1001 1011 reg:5 bit:3
+BRBS 1111 00 ....... ... @op_bit_imm
+BRBC 1111 01 ....... ... @op_bit_imm
+
+#
+# Data Transfer Instructions
+#
+
+%rd_d 4:4 !function=to_regs_00_30_by_two
+%rr_d 0:4 !function=to_regs_00_30_by_two
+
+@io_rd_imm .... . .. ..... .... &rd_imm rd=%rd imm=%io_imm
+@ldst_d .. . . .. . rd:5 . ... &rd_imm imm=%ldst_d_imm
+
+# The 16-bit immediate is completely in the next word.
+# Fields cannot be defined with no bits, so we cannot play
+# the same trick and append to a zero-bit value.
+# Defer reading the immediate until trans_{LDS,STS}.
+@ldst_s .... ... rd:5 .... imm=0
+
+MOV 0010 11 . ..... .... @op_rd_rr
+MOVW 0000 0001 .... .... &rd_rr rd=%rd_d rr=%rr_d
+LDI 1110 .... .... .... @op_rd_imm8
+LDS 1001 000 ..... 0000 @ldst_s
+LDX1 1001 000 rd:5 1100
+LDX2 1001 000 rd:5 1101
+LDX3 1001 000 rd:5 1110
+LDY2 1001 000 rd:5 1001
+LDY3 1001 000 rd:5 1010
+LDZ2 1001 000 rd:5 0001
+LDZ3 1001 000 rd:5 0010
+LDDY 10 . 0 .. 0 ..... 1 ... @ldst_d
+LDDZ 10 . 0 .. 0 ..... 0 ... @ldst_d
+STS 1001 001 ..... 0000 @ldst_s
+STX1 1001 001 rr:5 1100
+STX2 1001 001 rr:5 1101
+STX3 1001 001 rr:5 1110
+STY2 1001 001 rd:5 1001
+STY3 1001 001 rd:5 1010
+STZ2 1001 001 rd:5 0001
+STZ3 1001 001 rd:5 0010
+STDY 10 . 0 .. 1 ..... 1 ... @ldst_d
+STDZ 10 . 0 .. 1 ..... 0 ... @ldst_d
+LPM1 1001 0101 1100 1000
+LPM2 1001 000 rd:5 0100
+LPMX 1001 000 rd:5 0101
+ELPM1 1001 0101 1101 1000
+ELPM2 1001 000 rd:5 0110
+ELPMX 1001 000 rd:5 0111
+SPM 1001 0101 1110 1000
+SPMX 1001 0101 1111 1000
+IN 1011 0 .. ..... .... @io_rd_imm
+OUT 1011 1 .. ..... .... @io_rd_imm
+PUSH 1001 001 rd:5 1111
+POP 1001 000 rd:5 1111
+XCH 1001 001 rd:5 0100
+LAC 1001 001 rd:5 0110
+LAS 1001 001 rd:5 0101
+LAT 1001 001 rd:5 0111
+
+#
+# Bit and Bit-test Instructions
+#
+LSR 1001 010 rd:5 0110
+ROR 1001 010 rd:5 0111
+ASR 1001 010 rd:5 0101
+SWAP 1001 010 rd:5 0010
+SBI 1001 1010 reg:5 bit:3
+CBI 1001 1000 reg:5 bit:3
+BST 1111 101 rd:5 0 bit:3
+BLD 1111 100 rd:5 0 bit:3
+BSET 1001 0100 0 bit:3 1000
+BCLR 1001 0100 1 bit:3 1000
+
+#
+# MCU Control Instructions
+#
+BREAK 1001 0101 1001 1000
+NOP 0000 0000 0000 0000
+SLEEP 1001 0101 1000 1000
+WDR 1001 0101 1010 1000
diff --git a/target/avr/machine.c b/target/avr/machine.c
new file mode 100644
index 000000000..16f7a3e03
--- /dev/null
+++ b/target/avr/machine.c
@@ -0,0 +1,119 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2016-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+static int get_sreg(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ CPUAVRState *env = opaque;
+ uint8_t sreg;
+
+ sreg = qemu_get_byte(f);
+ cpu_set_sreg(env, sreg);
+ return 0;
+}
+
+static int put_sreg(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ CPUAVRState *env = opaque;
+ uint8_t sreg = cpu_get_sreg(env);
+
+ qemu_put_byte(f, sreg);
+ return 0;
+}
+
+static const VMStateInfo vms_sreg = {
+ .name = "sreg",
+ .get = get_sreg,
+ .put = put_sreg,
+};
+
+static int get_segment(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ uint32_t *ramp = opaque;
+ uint8_t temp;
+
+ temp = qemu_get_byte(f);
+ *ramp = ((uint32_t)temp) << 16;
+ return 0;
+}
+
+static int put_segment(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ uint32_t *ramp = opaque;
+ uint8_t temp = *ramp >> 16;
+
+ qemu_put_byte(f, temp);
+ return 0;
+}
+
+static const VMStateInfo vms_rampD = {
+ .name = "rampD",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_rampX = {
+ .name = "rampX",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_rampY = {
+ .name = "rampY",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_rampZ = {
+ .name = "rampZ",
+ .get = get_segment,
+ .put = put_segment,
+};
+static const VMStateInfo vms_eind = {
+ .name = "eind",
+ .get = get_segment,
+ .put = put_segment,
+};
+
+const VMStateDescription vms_avr_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.pc_w, AVRCPU),
+ VMSTATE_UINT32(env.sp, AVRCPU),
+ VMSTATE_UINT32(env.skip, AVRCPU),
+
+ VMSTATE_UINT32_ARRAY(env.r, AVRCPU, NUMBER_OF_CPU_REGISTERS),
+
+ VMSTATE_SINGLE(env, AVRCPU, 0, vms_sreg, CPUAVRState),
+ VMSTATE_SINGLE(env.rampD, AVRCPU, 0, vms_rampD, uint32_t),
+ VMSTATE_SINGLE(env.rampX, AVRCPU, 0, vms_rampX, uint32_t),
+ VMSTATE_SINGLE(env.rampY, AVRCPU, 0, vms_rampY, uint32_t),
+ VMSTATE_SINGLE(env.rampZ, AVRCPU, 0, vms_rampZ, uint32_t),
+ VMSTATE_SINGLE(env.eind, AVRCPU, 0, vms_eind, uint32_t),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/avr/meson.build b/target/avr/meson.build
new file mode 100644
index 000000000..7e8e29c59
--- /dev/null
+++ b/target/avr/meson.build
@@ -0,0 +1,20 @@
+gen = [
+ decodetree.process('insn.decode', extra_args: [ '--decode', 'decode_insn',
+ '--insnwidth', '16' ])
+]
+
+avr_ss = ss.source_set()
+avr_softmmu_ss = ss.source_set()
+
+avr_ss.add(gen)
+avr_ss.add(files(
+ 'translate.c',
+ 'helper.c',
+ 'cpu.c',
+ 'gdbstub.c',
+ 'disas.c'))
+
+avr_softmmu_ss.add(files('machine.c'))
+
+target_arch += {'avr': avr_ss}
+target_softmmu_arch += {'avr': avr_softmmu_ss}
diff --git a/target/avr/translate.c b/target/avr/translate.c
new file mode 100644
index 000000000..af8a3e0f9
--- /dev/null
+++ b/target/avr/translate.c
@@ -0,0 +1,3043 @@
+/*
+ * QEMU AVR CPU
+ *
+ * Copyright (c) 2019-2020 Michael Rolnik
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "tcg/tcg.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+#include "exec/translator.h"
+#include "exec/gen-icount.h"
+
+/*
+ * Define if you want a BREAK instruction translated to a breakpoint
+ * Active debugging connection is assumed
+ * This is for
+ * https://github.com/seharris/qemu-avr-tests/tree/master/instruction-tests
+ * tests
+ */
+#undef BREAKPOINT_ON_BREAK
+
+static TCGv cpu_pc;
+
+static TCGv cpu_Cf;
+static TCGv cpu_Zf;
+static TCGv cpu_Nf;
+static TCGv cpu_Vf;
+static TCGv cpu_Sf;
+static TCGv cpu_Hf;
+static TCGv cpu_Tf;
+static TCGv cpu_If;
+
+static TCGv cpu_rampD;
+static TCGv cpu_rampX;
+static TCGv cpu_rampY;
+static TCGv cpu_rampZ;
+
+static TCGv cpu_r[NUMBER_OF_CPU_REGISTERS];
+static TCGv cpu_eind;
+static TCGv cpu_sp;
+
+static TCGv cpu_skip;
+
+static const char reg_names[NUMBER_OF_CPU_REGISTERS][8] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+};
+#define REG(x) (cpu_r[x])
+
+#define DISAS_EXIT DISAS_TARGET_0 /* We want return to the cpu main loop. */
+#define DISAS_LOOKUP DISAS_TARGET_1 /* We have a variable condition exit. */
+#define DISAS_CHAIN DISAS_TARGET_2 /* We have a single condition exit. */
+
+typedef struct DisasContext DisasContext;
+
+/* This is the state at translation time. */
+struct DisasContext {
+ DisasContextBase base;
+
+ CPUAVRState *env;
+ CPUState *cs;
+
+ target_long npc;
+ uint32_t opcode;
+
+ /* Routine used to access memory */
+ int memidx;
+
+ /*
+ * some AVR instructions can make the following instruction to be skipped
+ * Let's name those instructions
+ * A - instruction that can skip the next one
+ * B - instruction that can be skipped. this depends on execution of A
+ * there are two scenarios
+ * 1. A and B belong to the same translation block
+ * 2. A is the last instruction in the translation block and B is the last
+ *
+ * following variables are used to simplify the skipping logic, they are
+ * used in the following manner (sketch)
+ *
+ * TCGLabel *skip_label = NULL;
+ * if (ctx->skip_cond != TCG_COND_NEVER) {
+ * skip_label = gen_new_label();
+ * tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label);
+ * }
+ *
+ * if (free_skip_var0) {
+ * tcg_temp_free(skip_var0);
+ * free_skip_var0 = false;
+ * }
+ *
+ * translate(ctx);
+ *
+ * if (skip_label) {
+ * gen_set_label(skip_label);
+ * }
+ */
+ TCGv skip_var0;
+ TCGv skip_var1;
+ TCGCond skip_cond;
+ bool free_skip_var0;
+};
+
+void avr_cpu_tcg_init(void)
+{
+ int i;
+
+#define AVR_REG_OFFS(x) offsetof(CPUAVRState, x)
+ cpu_pc = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(pc_w), "pc");
+ cpu_Cf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregC), "Cf");
+ cpu_Zf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregZ), "Zf");
+ cpu_Nf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregN), "Nf");
+ cpu_Vf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregV), "Vf");
+ cpu_Sf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregS), "Sf");
+ cpu_Hf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregH), "Hf");
+ cpu_Tf = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregT), "Tf");
+ cpu_If = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sregI), "If");
+ cpu_rampD = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampD), "rampD");
+ cpu_rampX = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampX), "rampX");
+ cpu_rampY = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampY), "rampY");
+ cpu_rampZ = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(rampZ), "rampZ");
+ cpu_eind = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(eind), "eind");
+ cpu_sp = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(sp), "sp");
+ cpu_skip = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(skip), "skip");
+
+ for (i = 0; i < NUMBER_OF_CPU_REGISTERS; i++) {
+ cpu_r[i] = tcg_global_mem_new_i32(cpu_env, AVR_REG_OFFS(r[i]),
+ reg_names[i]);
+ }
+#undef AVR_REG_OFFS
+}
+
+static int to_regs_16_31_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 16);
+}
+
+static int to_regs_16_23_by_one(DisasContext *ctx, int indx)
+{
+ return 16 + (indx % 8);
+}
+
+static int to_regs_24_30_by_two(DisasContext *ctx, int indx)
+{
+ return 24 + (indx % 4) * 2;
+}
+
+static int to_regs_00_30_by_two(DisasContext *ctx, int indx)
+{
+ return (indx % 16) * 2;
+}
+
+static uint16_t next_word(DisasContext *ctx)
+{
+ return cpu_lduw_code(ctx->env, ctx->npc++ * 2);
+}
+
+static int append_16(DisasContext *ctx, int x)
+{
+ return x << 16 | next_word(ctx);
+}
+
+static bool avr_have_feature(DisasContext *ctx, int feature)
+{
+ if (!avr_feature(ctx->env, feature)) {
+ gen_helper_unsupported(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return false;
+ }
+ return true;
+}
+
+static bool decode_insn(DisasContext *ctx, uint16_t insn);
+#include "decode-insn.c.inc"
+
+/*
+ * Arithmetic Instructions
+ */
+
+/*
+ * Utility functions for updating status registers:
+ *
+ * - gen_add_CHf()
+ * - gen_add_Vf()
+ * - gen_sub_CHf()
+ * - gen_sub_Vf()
+ * - gen_NSf()
+ * - gen_ZNSf()
+ *
+ */
+
+static void gen_add_CHf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ TCGv t3 = tcg_temp_new_i32();
+
+ tcg_gen_and_tl(t1, Rd, Rr); /* t1 = Rd & Rr */
+ tcg_gen_andc_tl(t2, Rd, R); /* t2 = Rd & ~R */
+ tcg_gen_andc_tl(t3, Rr, R); /* t3 = Rr & ~R */
+ tcg_gen_or_tl(t1, t1, t2); /* t1 = t1 | t2 | t3 */
+ tcg_gen_or_tl(t1, t1, t3);
+
+ tcg_gen_shri_tl(cpu_Cf, t1, 7); /* Cf = t1(7) */
+ tcg_gen_shri_tl(cpu_Hf, t1, 3); /* Hf = t1(3) */
+ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+
+ /* t1 = Rd & Rr & ~R | ~Rd & ~Rr & R */
+ /* = (Rd ^ R) & ~(Rd ^ Rr) */
+ tcg_gen_xor_tl(t1, Rd, R);
+ tcg_gen_xor_tl(t2, Rd, Rr);
+ tcg_gen_andc_tl(t1, t1, t2);
+
+ tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */
+
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ TCGv t3 = tcg_temp_new_i32();
+
+ tcg_gen_not_tl(t1, Rd); /* t1 = ~Rd */
+ tcg_gen_and_tl(t2, t1, Rr); /* t2 = ~Rd & Rr */
+ tcg_gen_or_tl(t3, t1, Rr); /* t3 = (~Rd | Rr) & R */
+ tcg_gen_and_tl(t3, t3, R);
+ tcg_gen_or_tl(t2, t2, t3); /* t2 = ~Rd & Rr | ~Rd & R | R & Rr */
+
+ tcg_gen_shri_tl(cpu_Cf, t2, 7); /* Cf = t2(7) */
+ tcg_gen_shri_tl(cpu_Hf, t2, 3); /* Hf = t2(3) */
+ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr)
+{
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+
+ /* t1 = Rd & ~Rr & ~R | ~Rd & Rr & R */
+ /* = (Rd ^ R) & (Rd ^ R) */
+ tcg_gen_xor_tl(t1, Rd, R);
+ tcg_gen_xor_tl(t2, Rd, Rr);
+ tcg_gen_and_tl(t1, t1, t2);
+
+ tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */
+
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_NSf(TCGv R)
+{
+ tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+}
+
+static void gen_ZNSf(TCGv R)
+{
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+}
+
+/*
+ * Adds two registers without the C Flag and places the result in the
+ * destination register Rd.
+ */
+static bool trans_ADD(DisasContext *ctx, arg_ADD *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_add_tl(R, Rd, Rr); /* Rd = Rd + Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_add_CHf(R, Rd, Rr);
+ gen_add_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Adds two registers and the contents of the C Flag and places the result in
+ * the destination register Rd.
+ */
+static bool trans_ADC(DisasContext *ctx, arg_ADC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_add_tl(R, Rd, Rr); /* R = Rd + Rr + Cf */
+ tcg_gen_add_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_add_CHf(R, Rd, Rr);
+ gen_add_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Adds an immediate value (0 - 63) to a register pair and places the result
+ * in the register pair. This instruction operates on the upper four register
+ * pairs, and is well suited for operations on the pointer registers. This
+ * instruction is not available in all devices. Refer to the device specific
+ * instruction set summary.
+ */
+static bool trans_ADIW(DisasContext *ctx, arg_ADIW *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) {
+ return true;
+ }
+
+ TCGv RdL = cpu_r[a->rd];
+ TCGv RdH = cpu_r[a->rd + 1];
+ int Imm = (a->imm);
+ TCGv R = tcg_temp_new_i32();
+ TCGv Rd = tcg_temp_new_i32();
+
+ tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */
+ tcg_gen_addi_tl(R, Rd, Imm); /* R = Rd + Imm */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */
+ tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15);
+ tcg_gen_andc_tl(cpu_Vf, R, Rd); /* Vf = R & ~Rd */
+ tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf);/* Sf = Nf ^ Vf */
+
+ /* update output registers */
+ tcg_gen_andi_tl(RdL, R, 0xff);
+ tcg_gen_shri_tl(RdH, R, 8);
+
+ tcg_temp_free_i32(Rd);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Subtracts two registers and places the result in the destination
+ * register Rd.
+ */
+static bool trans_SUB(DisasContext *ctx, arg_SUB *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ tcg_gen_andc_tl(cpu_Cf, Rd, R); /* Cf = Rd & ~R */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Subtracts a register and a constant and places the result in the
+ * destination register Rd. This instruction is working on Register R16 to R31
+ * and is very well suited for operations on the X, Y, and Z-pointers.
+ */
+static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = tcg_const_i32(a->imm);
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Imm */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+ tcg_temp_free_i32(Rr);
+
+ return true;
+}
+
+/*
+ * Subtracts two registers and subtracts with the C Flag and places the
+ * result in the destination register Rd.
+ */
+static bool trans_SBC(DisasContext *ctx, arg_SBC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv zero = tcg_const_i32(0);
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
+ tcg_gen_sub_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_NSf(R);
+
+ /*
+ * Previous value remains unchanged when the result is zero;
+ * cleared otherwise.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * SBCI -- Subtract Immediate with Carry
+ */
+static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = tcg_const_i32(a->imm);
+ TCGv R = tcg_temp_new_i32();
+ TCGv zero = tcg_const_i32(0);
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
+ tcg_gen_sub_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_NSf(R);
+
+ /*
+ * Previous value remains unchanged when the result is zero;
+ * cleared otherwise.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(R);
+ tcg_temp_free_i32(Rr);
+
+ return true;
+}
+
+/*
+ * Subtracts an immediate value (0-63) from a register pair and places the
+ * result in the register pair. This instruction operates on the upper four
+ * register pairs, and is well suited for operations on the Pointer Registers.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_SBIW(DisasContext *ctx, arg_SBIW *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ADIW_SBIW)) {
+ return true;
+ }
+
+ TCGv RdL = cpu_r[a->rd];
+ TCGv RdH = cpu_r[a->rd + 1];
+ int Imm = (a->imm);
+ TCGv R = tcg_temp_new_i32();
+ TCGv Rd = tcg_temp_new_i32();
+
+ tcg_gen_deposit_tl(Rd, RdL, RdH, 8, 8); /* Rd = RdH:RdL */
+ tcg_gen_subi_tl(R, Rd, Imm); /* R = Rd - Imm */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_andc_tl(cpu_Cf, R, Rd);
+ tcg_gen_shri_tl(cpu_Cf, cpu_Cf, 15); /* Cf = R & ~Rd */
+ tcg_gen_andc_tl(cpu_Vf, Rd, R);
+ tcg_gen_shri_tl(cpu_Vf, cpu_Vf, 15); /* Vf = Rd & ~R */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ tcg_gen_shri_tl(cpu_Nf, R, 15); /* Nf = R(15) */
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+
+ /* update output registers */
+ tcg_gen_andi_tl(RdL, R, 0xff);
+ tcg_gen_shri_tl(RdH, R, 8);
+
+ tcg_temp_free_i32(Rd);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Performs the logical AND between the contents of register Rd and register
+ * Rr and places the result in the destination register Rd.
+ */
+static bool trans_AND(DisasContext *ctx, arg_AND *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_and_tl(R, Rd, Rr); /* Rd = Rd and Rr */
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Performs the logical AND between the contents of register Rd and a constant
+ * and places the result in the destination register Rd.
+ */
+static bool trans_ANDI(DisasContext *ctx, arg_ANDI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int Imm = (a->imm);
+
+ tcg_gen_andi_tl(Rd, Rd, Imm); /* Rd = Rd & Imm */
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Performs the logical OR between the contents of register Rd and register
+ * Rr and places the result in the destination register Rd.
+ */
+static bool trans_OR(DisasContext *ctx, arg_OR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_or_tl(R, Rd, Rr);
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Performs the logical OR between the contents of register Rd and a
+ * constant and places the result in the destination register Rd.
+ */
+static bool trans_ORI(DisasContext *ctx, arg_ORI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int Imm = (a->imm);
+
+ tcg_gen_ori_tl(Rd, Rd, Imm); /* Rd = Rd | Imm */
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0x00); /* Vf = 0 */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Performs the logical EOR between the contents of register Rd and
+ * register Rr and places the result in the destination register Rd.
+ */
+static bool trans_EOR(DisasContext *ctx, arg_EOR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+
+ tcg_gen_xor_tl(Rd, Rd, Rr);
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Vf, 0);
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Clears the specified bits in register Rd. Performs the logical AND
+ * between the contents of register Rd and the complement of the constant mask
+ * K. The result will be placed in register Rd.
+ */
+static bool trans_COM(DisasContext *ctx, arg_COM *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_xori_tl(Rd, Rd, 0xff);
+
+ /* update status register */
+ tcg_gen_movi_tl(cpu_Cf, 1); /* Cf = 1 */
+ tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */
+ gen_ZNSf(Rd);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Replaces the contents of register Rd with its two's complement; the
+ * value $80 is left unchanged.
+ */
+static bool trans_NEG(DisasContext *ctx, arg_NEG *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_const_i32(0);
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, t0, Rd); /* R = 0 - Rd */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, t0, Rd);
+ gen_sub_Vf(R, t0, Rd);
+ gen_ZNSf(R);
+
+ /* update output registers */
+ tcg_gen_mov_tl(Rd, R);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * Adds one -1- to the contents of register Rd and places the result in the
+ * destination register Rd. The C Flag in SREG is not affected by the
+ * operation, thus allowing the INC instruction to be used on a loop counter in
+ * multiple-precision computations. When operating on unsigned numbers, only
+ * BREQ and BRNE branches can be expected to perform consistently. When
+ * operating on two's complement values, all signed branches are available.
+ */
+static bool trans_INC(DisasContext *ctx, arg_INC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_addi_tl(Rd, Rd, 1);
+ tcg_gen_andi_tl(Rd, Rd, 0xff);
+
+ /* update status register */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x80); /* Vf = Rd == 0x80 */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * Subtracts one -1- from the contents of register Rd and places the result
+ * in the destination register Rd. The C Flag in SREG is not affected by the
+ * operation, thus allowing the DEC instruction to be used on a loop counter in
+ * multiple-precision computations. When operating on unsigned values, only
+ * BREQ and BRNE branches can be expected to perform consistently. When
+ * operating on two's complement values, all signed branches are available.
+ */
+static bool trans_DEC(DisasContext *ctx, arg_DEC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_subi_tl(Rd, Rd, 1); /* Rd = Rd - 1 */
+ tcg_gen_andi_tl(Rd, Rd, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Vf, Rd, 0x7f); /* Vf = Rd == 0x7f */
+ gen_ZNSf(Rd);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit unsigned multiplication.
+ */
+static bool trans_MUL(DisasContext *ctx, arg_MUL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication.
+ */
+static bool trans_MULS(DisasContext *ctx, arg_MULS *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit multiplication of a
+ * signed and an unsigned number.
+ */
+static bool trans_MULSU(DisasContext *ctx, arg_MULSU *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make R 16 bits */
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit unsigned
+ * multiplication and shifts the result one bit left.
+ */
+static bool trans_FMUL(DisasContext *ctx, arg_FMUL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_mul_tl(R, Rd, Rr); /* R = Rd * Rr */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update output registers */
+ tcg_gen_shli_tl(R, R, 1);
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+ tcg_gen_andi_tl(R1, R1, 0xff);
+
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication
+ * and shifts the result one bit left.
+ */
+static bool trans_FMULS(DisasContext *ctx, arg_FMULS *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_ext8s_tl(t1, Rr); /* make Rr full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, t1); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update output registers */
+ tcg_gen_shli_tl(R, R, 1);
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+ tcg_gen_andi_tl(R1, R1, 0xff);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs 8-bit x 8-bit -> 16-bit signed multiplication
+ * and shifts the result one bit left.
+ */
+static bool trans_FMULSU(DisasContext *ctx, arg_FMULSU *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MUL)) {
+ return true;
+ }
+
+ TCGv R0 = cpu_r[0];
+ TCGv R1 = cpu_r[1];
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+
+ tcg_gen_ext8s_tl(t0, Rd); /* make Rd full 32 bit signed */
+ tcg_gen_mul_tl(R, t0, Rr); /* R = Rd * Rr */
+ tcg_gen_andi_tl(R, R, 0xffff); /* make it 16 bits */
+
+ /* update status register */
+ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+
+ /* update output registers */
+ tcg_gen_shli_tl(R, R, 1);
+ tcg_gen_andi_tl(R0, R, 0xff);
+ tcg_gen_shri_tl(R1, R, 8);
+ tcg_gen_andi_tl(R1, R1, 0xff);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * The module is an instruction set extension to the AVR CPU, performing
+ * DES iterations. The 64-bit data block (plaintext or ciphertext) is placed in
+ * the CPU register file, registers R0-R7, where LSB of data is placed in LSB
+ * of R0 and MSB of data is placed in MSB of R7. The full 64-bit key (including
+ * parity bits) is placed in registers R8- R15, organized in the register file
+ * with LSB of key in LSB of R8 and MSB of key in MSB of R15. Executing one DES
+ * instruction performs one round in the DES algorithm. Sixteen rounds must be
+ * executed in increasing order to form the correct DES ciphertext or
+ * plaintext. Intermediate results are stored in the register file (R0-R15)
+ * after each DES instruction. The instruction's operand (K) determines which
+ * round is executed, and the half carry flag (H) determines whether encryption
+ * or decryption is performed. The DES algorithm is described in
+ * "Specifications for the Data Encryption Standard" (Federal Information
+ * Processing Standards Publication 46). Intermediate results in this
+ * implementation differ from the standard because the initial permutation and
+ * the inverse initial permutation are performed each iteration. This does not
+ * affect the result in the final ciphertext or plaintext, but reduces
+ * execution time.
+ */
+static bool trans_DES(DisasContext *ctx, arg_DES *a)
+{
+ /* TODO */
+ if (!avr_have_feature(ctx, AVR_FEATURE_DES)) {
+ return true;
+ }
+
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+
+ return true;
+}
+
+/*
+ * Branch Instructions
+ */
+static void gen_jmp_ez(DisasContext *ctx)
+{
+ tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
+ tcg_gen_or_tl(cpu_pc, cpu_pc, cpu_eind);
+ ctx->base.is_jmp = DISAS_LOOKUP;
+}
+
+static void gen_jmp_z(DisasContext *ctx)
+{
+ tcg_gen_deposit_tl(cpu_pc, cpu_r[30], cpu_r[31], 8, 8);
+ ctx->base.is_jmp = DISAS_LOOKUP;
+}
+
+static void gen_push_ret(DisasContext *ctx, int ret)
+{
+ if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
+
+ TCGv t0 = tcg_const_i32((ret & 0x0000ff));
+
+ tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ tcg_temp_free_i32(t0);
+ } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
+
+ TCGv t0 = tcg_const_i32((ret & 0x00ffff));
+
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ tcg_temp_free_i32(t0);
+
+ } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
+
+ TCGv lo = tcg_const_i32((ret & 0x0000ff));
+ TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8);
+
+ tcg_gen_qemu_st_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 2);
+ tcg_gen_qemu_st_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+}
+
+static void gen_pop_ret(DisasContext *ctx, TCGv ret)
+{
+ if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) {
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_UB);
+ } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) {
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_ld_tl(ret, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) {
+ TCGv lo = tcg_temp_new_i32();
+ TCGv hi = tcg_temp_new_i32();
+
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ tcg_gen_qemu_ld_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW);
+
+ tcg_gen_addi_tl(cpu_sp, cpu_sp, 2);
+ tcg_gen_qemu_ld_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB);
+
+ tcg_gen_deposit_tl(ret, lo, hi, 8, 16);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ const TranslationBlock *tb = ctx->base.tb;
+
+ if (translator_use_goto_tb(&ctx->base, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb(tb, n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+/*
+ * Relative jump to an address within PC - 2K +1 and PC + 2K (words). For
+ * AVR microcontrollers with Program memory not exceeding 4K words (8KB) this
+ * instruction can address the entire memory from every address location. See
+ * also JMP.
+ */
+static bool trans_RJMP(DisasContext *ctx, arg_RJMP *a)
+{
+ int dst = ctx->npc + a->imm;
+
+ gen_goto_tb(ctx, 0, dst);
+
+ return true;
+}
+
+/*
+ * Indirect jump to the address pointed to by the Z (16 bits) Pointer
+ * Register in the Register File. The Z-pointer Register is 16 bits wide and
+ * allows jump within the lowest 64K words (128KB) section of Program memory.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_IJMP(DisasContext *ctx, arg_IJMP *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) {
+ return true;
+ }
+
+ gen_jmp_z(ctx);
+
+ return true;
+}
+
+/*
+ * Indirect jump to the address pointed to by the Z (16 bits) Pointer
+ * Register in the Register File and the EIND Register in the I/O space. This
+ * instruction allows for indirect jumps to the entire 4M (words) Program
+ * memory space. See also IJMP. This instruction is not available in all
+ * devices. Refer to the device specific instruction set summary.
+ */
+static bool trans_EIJMP(DisasContext *ctx, arg_EIJMP *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) {
+ return true;
+ }
+
+ gen_jmp_ez(ctx);
+ return true;
+}
+
+/*
+ * Jump to an address within the entire 4M (words) Program memory. See also
+ * RJMP. This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.0
+ */
+static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) {
+ return true;
+ }
+
+ gen_goto_tb(ctx, 0, a->imm);
+
+ return true;
+}
+
+/*
+ * Relative call to an address within PC - 2K + 1 and PC + 2K (words). The
+ * return address (the instruction after the RCALL) is stored onto the Stack.
+ * See also CALL. For AVR microcontrollers with Program memory not exceeding 4K
+ * words (8KB) this instruction can address the entire memory from every
+ * address location. The Stack Pointer uses a post-decrement scheme during
+ * RCALL.
+ */
+static bool trans_RCALL(DisasContext *ctx, arg_RCALL *a)
+{
+ int ret = ctx->npc;
+ int dst = ctx->npc + a->imm;
+
+ gen_push_ret(ctx, ret);
+ gen_goto_tb(ctx, 0, dst);
+
+ return true;
+}
+
+/*
+ * Calls to a subroutine within the entire 4M (words) Program memory. The
+ * return address (to the instruction after the CALL) will be stored onto the
+ * Stack. See also RCALL. The Stack Pointer uses a post-decrement scheme during
+ * CALL. This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_ICALL(DisasContext *ctx, arg_ICALL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_IJMP_ICALL)) {
+ return true;
+ }
+
+ int ret = ctx->npc;
+
+ gen_push_ret(ctx, ret);
+ gen_jmp_z(ctx);
+
+ return true;
+}
+
+/*
+ * Indirect call of a subroutine pointed to by the Z (16 bits) Pointer
+ * Register in the Register File and the EIND Register in the I/O space. This
+ * instruction allows for indirect calls to the entire 4M (words) Program
+ * memory space. See also ICALL. The Stack Pointer uses a post-decrement scheme
+ * during EICALL. This instruction is not available in all devices. Refer to
+ * the device specific instruction set summary.
+ */
+static bool trans_EICALL(DisasContext *ctx, arg_EICALL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_EIJMP_EICALL)) {
+ return true;
+ }
+
+ int ret = ctx->npc;
+
+ gen_push_ret(ctx, ret);
+ gen_jmp_ez(ctx);
+ return true;
+}
+
+/*
+ * Calls to a subroutine within the entire Program memory. The return
+ * address (to the instruction after the CALL) will be stored onto the Stack.
+ * (See also RCALL). The Stack Pointer uses a post-decrement scheme during
+ * CALL. This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_CALL(DisasContext *ctx, arg_CALL *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_JMP_CALL)) {
+ return true;
+ }
+
+ int Imm = a->imm;
+ int ret = ctx->npc;
+
+ gen_push_ret(ctx, ret);
+ gen_goto_tb(ctx, 0, Imm);
+
+ return true;
+}
+
+/*
+ * Returns from subroutine. The return address is loaded from the STACK.
+ * The Stack Pointer uses a preincrement scheme during RET.
+ */
+static bool trans_RET(DisasContext *ctx, arg_RET *a)
+{
+ gen_pop_ret(ctx, cpu_pc);
+
+ ctx->base.is_jmp = DISAS_LOOKUP;
+ return true;
+}
+
+/*
+ * Returns from interrupt. The return address is loaded from the STACK and
+ * the Global Interrupt Flag is set. Note that the Status Register is not
+ * automatically stored when entering an interrupt routine, and it is not
+ * restored when returning from an interrupt routine. This must be handled by
+ * the application program. The Stack Pointer uses a pre-increment scheme
+ * during RETI.
+ */
+static bool trans_RETI(DisasContext *ctx, arg_RETI *a)
+{
+ gen_pop_ret(ctx, cpu_pc);
+ tcg_gen_movi_tl(cpu_If, 1);
+
+ /* Need to return to main loop to re-evaluate interrupts. */
+ ctx->base.is_jmp = DISAS_EXIT;
+ return true;
+}
+
+/*
+ * This instruction performs a compare between two registers Rd and Rr, and
+ * skips the next instruction if Rd = Rr.
+ */
+static bool trans_CPSE(DisasContext *ctx, arg_CPSE *a)
+{
+ ctx->skip_cond = TCG_COND_EQ;
+ ctx->skip_var0 = cpu_r[a->rd];
+ ctx->skip_var1 = cpu_r[a->rr];
+ return true;
+}
+
+/*
+ * This instruction performs a compare between two registers Rd and Rr.
+ * None of the registers are changed. All conditional branches can be used
+ * after this instruction.
+ */
+static bool trans_CP(DisasContext *ctx, arg_CP *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs a compare between two registers Rd and Rr and
+ * also takes into account the previous carry. None of the registers are
+ * changed. All conditional branches can be used after this instruction.
+ */
+static bool trans_CPC(DisasContext *ctx, arg_CPC *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+ TCGv R = tcg_temp_new_i32();
+ TCGv zero = tcg_const_i32(0);
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr - Cf */
+ tcg_gen_sub_tl(R, R, cpu_Cf);
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_NSf(R);
+
+ /*
+ * Previous value remains unchanged when the result is zero;
+ * cleared otherwise.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(R);
+
+ return true;
+}
+
+/*
+ * This instruction performs a compare between register Rd and a constant.
+ * The register is not changed. All conditional branches can be used after this
+ * instruction.
+ */
+static bool trans_CPI(DisasContext *ctx, arg_CPI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int Imm = a->imm;
+ TCGv Rr = tcg_const_i32(Imm);
+ TCGv R = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(R, Rd, Rr); /* R = Rd - Rr */
+ tcg_gen_andi_tl(R, R, 0xff); /* make it 8 bits */
+
+ /* update status register */
+ gen_sub_CHf(R, Rd, Rr);
+ gen_sub_Vf(R, Rd, Rr);
+ gen_ZNSf(R);
+
+ tcg_temp_free_i32(R);
+ tcg_temp_free_i32(Rr);
+
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in a register and skips the next
+ * instruction if the bit is cleared.
+ */
+static bool trans_SBRC(DisasContext *ctx, arg_SBRC *a)
+{
+ TCGv Rr = cpu_r[a->rr];
+
+ ctx->skip_cond = TCG_COND_EQ;
+ ctx->skip_var0 = tcg_temp_new();
+ ctx->free_skip_var0 = true;
+
+ tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit);
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in a register and skips the next
+ * instruction if the bit is set.
+ */
+static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a)
+{
+ TCGv Rr = cpu_r[a->rr];
+
+ ctx->skip_cond = TCG_COND_NE;
+ ctx->skip_var0 = tcg_temp_new();
+ ctx->free_skip_var0 = true;
+
+ tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit);
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in an I/O Register and skips the
+ * next instruction if the bit is cleared. This instruction operates on the
+ * lower 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a)
+{
+ TCGv temp = tcg_const_i32(a->reg);
+
+ gen_helper_inb(temp, cpu_env, temp);
+ tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ ctx->skip_cond = TCG_COND_EQ;
+ ctx->skip_var0 = temp;
+ ctx->free_skip_var0 = true;
+
+ return true;
+}
+
+/*
+ * This instruction tests a single bit in an I/O Register and skips the
+ * next instruction if the bit is set. This instruction operates on the lower
+ * 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a)
+{
+ TCGv temp = tcg_const_i32(a->reg);
+
+ gen_helper_inb(temp, cpu_env, temp);
+ tcg_gen_andi_tl(temp, temp, 1 << a->bit);
+ ctx->skip_cond = TCG_COND_NE;
+ ctx->skip_var0 = temp;
+ ctx->free_skip_var0 = true;
+
+ return true;
+}
+
+/*
+ * Conditional relative branch. Tests a single bit in SREG and branches
+ * relatively to PC if the bit is cleared. This instruction branches relatively
+ * to PC in either direction (PC - 63 < = destination <= PC + 64). The
+ * parameter k is the offset from PC and is represented in two's complement
+ * form.
+ */
+static bool trans_BRBC(DisasContext *ctx, arg_BRBC *a)
+{
+ TCGLabel *not_taken = gen_new_label();
+
+ TCGv var;
+
+ switch (a->bit) {
+ case 0x00:
+ var = cpu_Cf;
+ break;
+ case 0x01:
+ var = cpu_Zf;
+ break;
+ case 0x02:
+ var = cpu_Nf;
+ break;
+ case 0x03:
+ var = cpu_Vf;
+ break;
+ case 0x04:
+ var = cpu_Sf;
+ break;
+ case 0x05:
+ var = cpu_Hf;
+ break;
+ case 0x06:
+ var = cpu_Tf;
+ break;
+ case 0x07:
+ var = cpu_If;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, var, 0, not_taken);
+ gen_goto_tb(ctx, 0, ctx->npc + a->imm);
+ gen_set_label(not_taken);
+
+ ctx->base.is_jmp = DISAS_CHAIN;
+ return true;
+}
+
+/*
+ * Conditional relative branch. Tests a single bit in SREG and branches
+ * relatively to PC if the bit is set. This instruction branches relatively to
+ * PC in either direction (PC - 63 < = destination <= PC + 64). The parameter k
+ * is the offset from PC and is represented in two's complement form.
+ */
+static bool trans_BRBS(DisasContext *ctx, arg_BRBS *a)
+{
+ TCGLabel *not_taken = gen_new_label();
+
+ TCGv var;
+
+ switch (a->bit) {
+ case 0x00:
+ var = cpu_Cf;
+ break;
+ case 0x01:
+ var = cpu_Zf;
+ break;
+ case 0x02:
+ var = cpu_Nf;
+ break;
+ case 0x03:
+ var = cpu_Vf;
+ break;
+ case 0x04:
+ var = cpu_Sf;
+ break;
+ case 0x05:
+ var = cpu_Hf;
+ break;
+ case 0x06:
+ var = cpu_Tf;
+ break;
+ case 0x07:
+ var = cpu_If;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_brcondi_i32(TCG_COND_EQ, var, 0, not_taken);
+ gen_goto_tb(ctx, 0, ctx->npc + a->imm);
+ gen_set_label(not_taken);
+
+ ctx->base.is_jmp = DISAS_CHAIN;
+ return true;
+}
+
+/*
+ * Data Transfer Instructions
+ */
+
+/*
+ * in the gen_set_addr & gen_get_addr functions
+ * H assumed to be in 0x00ff0000 format
+ * M assumed to be in 0x000000ff format
+ * L assumed to be in 0x000000ff format
+ */
+static void gen_set_addr(TCGv addr, TCGv H, TCGv M, TCGv L)
+{
+
+ tcg_gen_andi_tl(L, addr, 0x000000ff);
+
+ tcg_gen_andi_tl(M, addr, 0x0000ff00);
+ tcg_gen_shri_tl(M, M, 8);
+
+ tcg_gen_andi_tl(H, addr, 0x00ff0000);
+}
+
+static void gen_set_xaddr(TCGv addr)
+{
+ gen_set_addr(addr, cpu_rampX, cpu_r[27], cpu_r[26]);
+}
+
+static void gen_set_yaddr(TCGv addr)
+{
+ gen_set_addr(addr, cpu_rampY, cpu_r[29], cpu_r[28]);
+}
+
+static void gen_set_zaddr(TCGv addr)
+{
+ gen_set_addr(addr, cpu_rampZ, cpu_r[31], cpu_r[30]);
+}
+
+static TCGv gen_get_addr(TCGv H, TCGv M, TCGv L)
+{
+ TCGv addr = tcg_temp_new_i32();
+
+ tcg_gen_deposit_tl(addr, M, H, 8, 8);
+ tcg_gen_deposit_tl(addr, L, addr, 8, 16);
+
+ return addr;
+}
+
+static TCGv gen_get_xaddr(void)
+{
+ return gen_get_addr(cpu_rampX, cpu_r[27], cpu_r[26]);
+}
+
+static TCGv gen_get_yaddr(void)
+{
+ return gen_get_addr(cpu_rampY, cpu_r[29], cpu_r[28]);
+}
+
+static TCGv gen_get_zaddr(void)
+{
+ return gen_get_addr(cpu_rampZ, cpu_r[31], cpu_r[30]);
+}
+
+/*
+ * Load one byte indirect from data space to register and stores an clear
+ * the bits in data space specified by the register. The instruction can only
+ * be used towards internal SRAM. The data location is pointed to by the Z (16
+ * bits) Pointer Register in the Register File. Memory access is limited to the
+ * current data segment of 64KB. To access another data segment in devices with
+ * more than 64KB data space, the RAMPZ in register in the I/O area has to be
+ * changed. The Z-pointer Register is left unchanged by the operation. This
+ * instruction is especially suited for clearing status bits stored in SRAM.
+ */
+static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
+{
+ if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
+ gen_helper_fullwr(cpu_env, data, addr);
+ } else {
+ tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
+ }
+}
+
+static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
+{
+ if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
+ gen_helper_fullrd(data, cpu_env, addr);
+ } else {
+ tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
+ }
+}
+
+/*
+ * This instruction makes a copy of one register into another. The source
+ * register Rr is left unchanged, while the destination register Rd is loaded
+ * with a copy of Rr.
+ */
+static bool trans_MOV(DisasContext *ctx, arg_MOV *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv Rr = cpu_r[a->rr];
+
+ tcg_gen_mov_tl(Rd, Rr);
+
+ return true;
+}
+
+/*
+ * This instruction makes a copy of one register pair into another register
+ * pair. The source register pair Rr+1:Rr is left unchanged, while the
+ * destination register pair Rd+1:Rd is loaded with a copy of Rr + 1:Rr. This
+ * instruction is not available in all devices. Refer to the device specific
+ * instruction set summary.
+ */
+static bool trans_MOVW(DisasContext *ctx, arg_MOVW *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_MOVW)) {
+ return true;
+ }
+
+ TCGv RdL = cpu_r[a->rd];
+ TCGv RdH = cpu_r[a->rd + 1];
+ TCGv RrL = cpu_r[a->rr];
+ TCGv RrH = cpu_r[a->rr + 1];
+
+ tcg_gen_mov_tl(RdH, RrH);
+ tcg_gen_mov_tl(RdL, RrL);
+
+ return true;
+}
+
+/*
+ * Loads an 8 bit constant directly to register 16 to 31.
+ */
+static bool trans_LDI(DisasContext *ctx, arg_LDI *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ int imm = a->imm;
+
+ tcg_gen_movi_tl(Rd, imm);
+
+ return true;
+}
+
+/*
+ * Loads one byte from the data space to a register. For parts with SRAM,
+ * the data space consists of the Register File, I/O memory and internal SRAM
+ * (and external SRAM if applicable). For parts without SRAM, the data space
+ * consists of the register file only. The EEPROM has a separate address space.
+ * A 16-bit address must be supplied. Memory access is limited to the current
+ * data segment of 64KB. The LDS instruction uses the RAMPD Register to access
+ * memory above 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPD in register in the I/O area has to be changed.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_LDS(DisasContext *ctx, arg_LDS *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_rampD;
+ a->imm = next_word(ctx);
+
+ tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
+ tcg_gen_shli_tl(addr, addr, 16);
+ tcg_gen_ori_tl(addr, addr, a->imm);
+
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte indirect from the data space to a register. For parts
+ * with SRAM, the data space consists of the Register File, I/O memory and
+ * internal SRAM (and external SRAM if applicable). For parts without SRAM, the
+ * data space consists of the Register File only. In some parts the Flash
+ * Memory has been mapped to the data space and can be read using this command.
+ * The EEPROM has a separate address space. The data location is pointed to by
+ * the X (16 bits) Pointer Register in the Register File. Memory access is
+ * limited to the current data segment of 64KB. To access another data segment
+ * in devices with more than 64KB data space, the RAMPX in register in the I/O
+ * area has to be changed. The X-pointer Register can either be left unchanged
+ * by the operation, or it can be post-incremented or predecremented. These
+ * features are especially suited for accessing arrays, tables, and Stack
+ * Pointer usage of the X-pointer Register. Note that only the low byte of the
+ * X-pointer is updated in devices with no more than 256 bytes data space. For
+ * such devices, the high byte of the pointer is not used by this instruction
+ * and can be used for other purposes. The RAMPX Register in the I/O area is
+ * updated in parts with more than 64KB data space or more than 64KB Program
+ * memory, and the increment/decrement is added to the entire 24-bit address on
+ * such devices. Not all variants of this instruction is available in all
+ * devices. Refer to the device specific instruction set summary. In the
+ * Reduced Core tinyAVR the LD instruction can be used to achieve the same
+ * operation as LPM since the program memory is mapped to the data memory
+ * space.
+ */
+static bool trans_LDX1(DisasContext *ctx, arg_LDX1 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDX2(DisasContext *ctx, arg_LDX2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_load(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDX3(DisasContext *ctx, arg_LDX3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_xaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_load(ctx, Rd, addr);
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte indirect with or without displacement from the data space
+ * to a register. For parts with SRAM, the data space consists of the Register
+ * File, I/O memory and internal SRAM (and external SRAM if applicable). For
+ * parts without SRAM, the data space consists of the Register File only. In
+ * some parts the Flash Memory has been mapped to the data space and can be
+ * read using this command. The EEPROM has a separate address space. The data
+ * location is pointed to by the Y (16 bits) Pointer Register in the Register
+ * File. Memory access is limited to the current data segment of 64KB. To
+ * access another data segment in devices with more than 64KB data space, the
+ * RAMPY in register in the I/O area has to be changed. The Y-pointer Register
+ * can either be left unchanged by the operation, or it can be post-incremented
+ * or predecremented. These features are especially suited for accessing
+ * arrays, tables, and Stack Pointer usage of the Y-pointer Register. Note that
+ * only the low byte of the Y-pointer is updated in devices with no more than
+ * 256 bytes data space. For such devices, the high byte of the pointer is not
+ * used by this instruction and can be used for other purposes. The RAMPY
+ * Register in the I/O area is updated in parts with more than 64KB data space
+ * or more than 64KB Program memory, and the increment/decrement/displacement
+ * is added to the entire 24-bit address on such devices. Not all variants of
+ * this instruction is available in all devices. Refer to the device specific
+ * instruction set summary. In the Reduced Core tinyAVR the LD instruction can
+ * be used to achieve the same operation as LPM since the program memory is
+ * mapped to the data memory space.
+ */
+static bool trans_LDY2(DisasContext *ctx, arg_LDY2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ gen_data_load(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDY3(DisasContext *ctx, arg_LDY3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_load(ctx, Rd, addr);
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDDY(DisasContext *ctx, arg_LDDY *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte indirect with or without displacement from the data space
+ * to a register. For parts with SRAM, the data space consists of the Register
+ * File, I/O memory and internal SRAM (and external SRAM if applicable). For
+ * parts without SRAM, the data space consists of the Register File only. In
+ * some parts the Flash Memory has been mapped to the data space and can be
+ * read using this command. The EEPROM has a separate address space. The data
+ * location is pointed to by the Z (16 bits) Pointer Register in the Register
+ * File. Memory access is limited to the current data segment of 64KB. To
+ * access another data segment in devices with more than 64KB data space, the
+ * RAMPZ in register in the I/O area has to be changed. The Z-pointer Register
+ * can either be left unchanged by the operation, or it can be post-incremented
+ * or predecremented. These features are especially suited for Stack Pointer
+ * usage of the Z-pointer Register, however because the Z-pointer Register can
+ * be used for indirect subroutine calls, indirect jumps and table lookup, it
+ * is often more convenient to use the X or Y-pointer as a dedicated Stack
+ * Pointer. Note that only the low byte of the Z-pointer is updated in devices
+ * with no more than 256 bytes data space. For such devices, the high byte of
+ * the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPZ Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the
+ * increment/decrement/displacement is added to the entire 24-bit address on
+ * such devices. Not all variants of this instruction is available in all
+ * devices. Refer to the device specific instruction set summary. In the
+ * Reduced Core tinyAVR the LD instruction can be used to achieve the same
+ * operation as LPM since the program memory is mapped to the data memory
+ * space. For using the Z-pointer for table lookup in Program memory see the
+ * LPM and ELPM instructions.
+ */
+static bool trans_LDZ2(DisasContext *ctx, arg_LDZ2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ gen_data_load(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDZ3(DisasContext *ctx, arg_LDZ3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_load(ctx, Rd, addr);
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LDDZ(DisasContext *ctx, arg_LDDZ *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_load(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte from a Register to the data space. For parts with SRAM,
+ * the data space consists of the Register File, I/O memory and internal SRAM
+ * (and external SRAM if applicable). For parts without SRAM, the data space
+ * consists of the Register File only. The EEPROM has a separate address space.
+ * A 16-bit address must be supplied. Memory access is limited to the current
+ * data segment of 64KB. The STS instruction uses the RAMPD Register to access
+ * memory above 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPD in register in the I/O area has to be changed.
+ * This instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ */
+static bool trans_STS(DisasContext *ctx, arg_STS *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_rampD;
+ a->imm = next_word(ctx);
+
+ tcg_gen_mov_tl(addr, H); /* addr = H:M:L */
+ tcg_gen_shli_tl(addr, addr, 16);
+ tcg_gen_ori_tl(addr, addr, a->imm);
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte indirect from a register to data space. For parts with SRAM,
+ * the data space consists of the Register File, I/O memory, and internal SRAM
+ * (and external SRAM if applicable). For parts without SRAM, the data space
+ * consists of the Register File only. The EEPROM has a separate address space.
+ *
+ * The data location is pointed to by the X (16 bits) Pointer Register in the
+ * Register File. Memory access is limited to the current data segment of 64KB.
+ * To access another data segment in devices with more than 64KB data space, the
+ * RAMPX in register in the I/O area has to be changed.
+ *
+ * The X-pointer Register can either be left unchanged by the operation, or it
+ * can be post-incremented or pre-decremented. These features are especially
+ * suited for accessing arrays, tables, and Stack Pointer usage of the
+ * X-pointer Register. Note that only the low byte of the X-pointer is updated
+ * in devices with no more than 256 bytes data space. For such devices, the high
+ * byte of the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPX Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the increment /
+ * decrement is added to the entire 24-bit address on such devices.
+ */
+static bool trans_STX1(DisasContext *ctx, arg_STX1 *a)
+{
+ TCGv Rd = cpu_r[a->rr];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STX2(DisasContext *ctx, arg_STX2 *a)
+{
+ TCGv Rd = cpu_r[a->rr];
+ TCGv addr = gen_get_xaddr();
+
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STX3(DisasContext *ctx, arg_STX3 *a)
+{
+ TCGv Rd = cpu_r[a->rr];
+ TCGv addr = gen_get_xaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_store(ctx, Rd, addr);
+ gen_set_xaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte indirect with or without displacement from a register to data
+ * space. For parts with SRAM, the data space consists of the Register File, I/O
+ * memory, and internal SRAM (and external SRAM if applicable). For parts
+ * without SRAM, the data space consists of the Register File only. The EEPROM
+ * has a separate address space.
+ *
+ * The data location is pointed to by the Y (16 bits) Pointer Register in the
+ * Register File. Memory access is limited to the current data segment of 64KB.
+ * To access another data segment in devices with more than 64KB data space, the
+ * RAMPY in register in the I/O area has to be changed.
+ *
+ * The Y-pointer Register can either be left unchanged by the operation, or it
+ * can be post-incremented or pre-decremented. These features are especially
+ * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer
+ * Register. Note that only the low byte of the Y-pointer is updated in devices
+ * with no more than 256 bytes data space. For such devices, the high byte of
+ * the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPY Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the increment /
+ * decrement / displacement is added to the entire 24-bit address on such
+ * devices.
+ */
+static bool trans_STY2(DisasContext *ctx, arg_STY2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STY3(DisasContext *ctx, arg_STY3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_store(ctx, Rd, addr);
+ gen_set_yaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STDY(DisasContext *ctx, arg_STDY *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_yaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Stores one byte indirect with or without displacement from a register to data
+ * space. For parts with SRAM, the data space consists of the Register File, I/O
+ * memory, and internal SRAM (and external SRAM if applicable). For parts
+ * without SRAM, the data space consists of the Register File only. The EEPROM
+ * has a separate address space.
+ *
+ * The data location is pointed to by the Y (16 bits) Pointer Register in the
+ * Register File. Memory access is limited to the current data segment of 64KB.
+ * To access another data segment in devices with more than 64KB data space, the
+ * RAMPY in register in the I/O area has to be changed.
+ *
+ * The Y-pointer Register can either be left unchanged by the operation, or it
+ * can be post-incremented or pre-decremented. These features are especially
+ * suited for accessing arrays, tables, and Stack Pointer usage of the Y-pointer
+ * Register. Note that only the low byte of the Y-pointer is updated in devices
+ * with no more than 256 bytes data space. For such devices, the high byte of
+ * the pointer is not used by this instruction and can be used for other
+ * purposes. The RAMPY Register in the I/O area is updated in parts with more
+ * than 64KB data space or more than 64KB Program memory, and the increment /
+ * decrement / displacement is added to the entire 24-bit address on such
+ * devices.
+ */
+static bool trans_STZ2(DisasContext *ctx, arg_STZ2 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STZ3(DisasContext *ctx, arg_STZ3 *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */
+ gen_data_store(ctx, Rd, addr);
+
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_STDZ(DisasContext *ctx, arg_STDZ *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */
+ gen_data_store(ctx, Rd, addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte pointed to by the Z-register into the destination
+ * register Rd. This instruction features a 100% space effective constant
+ * initialization or constant data fetch. The Program memory is organized in
+ * 16-bit words while the Z-pointer is a byte address. Thus, the least
+ * significant bit of the Z-pointer selects either low byte (ZLSB = 0) or high
+ * byte (ZLSB = 1). This instruction can address the first 64KB (32K words) of
+ * Program memory. The Zpointer Register can either be left unchanged by the
+ * operation, or it can be incremented. The incrementation does not apply to
+ * the RAMPZ Register.
+ *
+ * Devices with Self-Programming capability can use the LPM instruction to read
+ * the Fuse and Lock bit values.
+ */
+static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[0];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_r[31];
+ TCGv L = cpu_r[30];
+
+ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
+ tcg_gen_or_tl(addr, addr, L);
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_LPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_r[31];
+ TCGv L = cpu_r[30];
+
+ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
+ tcg_gen_or_tl(addr, addr, L);
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_LPMX)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = tcg_temp_new_i32();
+ TCGv H = cpu_r[31];
+ TCGv L = cpu_r[30];
+
+ tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
+ tcg_gen_or_tl(addr, addr, L);
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ tcg_gen_andi_tl(L, addr, 0xff);
+ tcg_gen_shri_tl(addr, addr, 8);
+ tcg_gen_andi_tl(H, addr, 0xff);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Loads one byte pointed to by the Z-register and the RAMPZ Register in
+ * the I/O space, and places this byte in the destination register Rd. This
+ * instruction features a 100% space effective constant initialization or
+ * constant data fetch. The Program memory is organized in 16-bit words while
+ * the Z-pointer is a byte address. Thus, the least significant bit of the
+ * Z-pointer selects either low byte (ZLSB = 0) or high byte (ZLSB = 1). This
+ * instruction can address the entire Program memory space. The Z-pointer
+ * Register can either be left unchanged by the operation, or it can be
+ * incremented. The incrementation applies to the entire 24-bit concatenation
+ * of the RAMPZ and Z-pointer Registers.
+ *
+ * Devices with Self-Programming capability can use the ELPM instruction to
+ * read the Fuse and Lock bit value.
+ */
+static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[0];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ELPM)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_ELPMX)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+
+ tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
+ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
+ gen_set_zaddr(addr);
+
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * SPM can be used to erase a page in the Program memory, to write a page
+ * in the Program memory (that is already erased), and to set Boot Loader Lock
+ * bits. In some devices, the Program memory can be written one word at a time,
+ * in other devices an entire page can be programmed simultaneously after first
+ * filling a temporary page buffer. In all cases, the Program memory must be
+ * erased one page at a time. When erasing the Program memory, the RAMPZ and
+ * Z-register are used as page address. When writing the Program memory, the
+ * RAMPZ and Z-register are used as page or word address, and the R1:R0
+ * register pair is used as data(1). When setting the Boot Loader Lock bits,
+ * the R1:R0 register pair is used as data. Refer to the device documentation
+ * for detailed description of SPM usage. This instruction can address the
+ * entire Program memory.
+ *
+ * The SPM instruction is not available in all devices. Refer to the device
+ * specific instruction set summary.
+ *
+ * Note: 1. R1 determines the instruction high byte, and R0 determines the
+ * instruction low byte.
+ */
+static bool trans_SPM(DisasContext *ctx, arg_SPM *a)
+{
+ /* TODO */
+ if (!avr_have_feature(ctx, AVR_FEATURE_SPM)) {
+ return true;
+ }
+
+ return true;
+}
+
+static bool trans_SPMX(DisasContext *ctx, arg_SPMX *a)
+{
+ /* TODO */
+ if (!avr_have_feature(ctx, AVR_FEATURE_SPMX)) {
+ return true;
+ }
+
+ return true;
+}
+
+/*
+ * Loads data from the I/O Space (Ports, Timers, Configuration Registers,
+ * etc.) into register Rd in the Register File.
+ */
+static bool trans_IN(DisasContext *ctx, arg_IN *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv port = tcg_const_i32(a->imm);
+
+ gen_helper_inb(Rd, cpu_env, port);
+
+ tcg_temp_free_i32(port);
+
+ return true;
+}
+
+/*
+ * Stores data from register Rr in the Register File to I/O Space (Ports,
+ * Timers, Configuration Registers, etc.).
+ */
+static bool trans_OUT(DisasContext *ctx, arg_OUT *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv port = tcg_const_i32(a->imm);
+
+ gen_helper_outb(cpu_env, port, Rd);
+
+ tcg_temp_free_i32(port);
+
+ return true;
+}
+
+/*
+ * This instruction stores the contents of register Rr on the STACK. The
+ * Stack Pointer is post-decremented by 1 after the PUSH. This instruction is
+ * not available in all devices. Refer to the device specific instruction set
+ * summary.
+ */
+static bool trans_PUSH(DisasContext *ctx, arg_PUSH *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ gen_data_store(ctx, Rd, cpu_sp);
+ tcg_gen_subi_tl(cpu_sp, cpu_sp, 1);
+
+ return true;
+}
+
+/*
+ * This instruction loads register Rd with a byte from the STACK. The Stack
+ * Pointer is pre-incremented by 1 before the POP. This instruction is not
+ * available in all devices. Refer to the device specific instruction set
+ * summary.
+ */
+static bool trans_POP(DisasContext *ctx, arg_POP *a)
+{
+ /*
+ * Using a temp to work around some strange behaviour:
+ * tcg_gen_addi_tl(cpu_sp, cpu_sp, 1);
+ * gen_data_load(ctx, Rd, cpu_sp);
+ * seems to cause the add to happen twice.
+ * This doesn't happen if either the add or the load is removed.
+ */
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_addi_tl(t1, cpu_sp, 1);
+ gen_data_load(ctx, Rd, t1);
+ tcg_gen_mov_tl(cpu_sp, t1);
+
+ return true;
+}
+
+/*
+ * Exchanges one byte indirect between register and data space. The data
+ * location is pointed to by the Z (16 bits) Pointer Register in the Register
+ * File. Memory access is limited to the current data segment of 64KB. To
+ * access another data segment in devices with more than 64KB data space, the
+ * RAMPZ in register in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for writing/reading status bits stored in SRAM.
+ */
+static bool trans_XCH(DisasContext *ctx, arg_XCH *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv addr = gen_get_zaddr();
+
+ gen_data_load(ctx, t0, addr);
+ gen_data_store(ctx, Rd, addr);
+ tcg_gen_mov_tl(Rd, t0);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Load one byte indirect from data space to register and set bits in data
+ * space specified by the register. The instruction can only be used towards
+ * internal SRAM. The data location is pointed to by the Z (16 bits) Pointer
+ * Register in the Register File. Memory access is limited to the current data
+ * segment of 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPZ in register in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for setting status bits stored in SRAM.
+ */
+static bool trans_LAS(DisasContext *ctx, arg_LAS *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rr = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
+ tcg_gen_or_tl(t1, t0, Rr);
+ tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */
+ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Load one byte indirect from data space to register and stores and clear
+ * the bits in data space specified by the register. The instruction can
+ * only be used towards internal SRAM. The data location is pointed to by
+ * the Z (16 bits) Pointer Register in the Register File. Memory access is
+ * limited to the current data segment of 64KB. To access another data
+ * segment in devices with more than 64KB data space, the RAMPZ in register
+ * in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for clearing status bits stored in SRAM.
+ */
+static bool trans_LAC(DisasContext *ctx, arg_LAC *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rr = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
+ tcg_gen_andc_tl(t1, t0, Rr); /* t1 = t0 & (0xff - Rr) = t0 & ~Rr */
+ tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */
+ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+
+/*
+ * Load one byte indirect from data space to register and toggles bits in
+ * the data space specified by the register. The instruction can only be used
+ * towards SRAM. The data location is pointed to by the Z (16 bits) Pointer
+ * Register in the Register File. Memory access is limited to the current data
+ * segment of 64KB. To access another data segment in devices with more than
+ * 64KB data space, the RAMPZ in register in the I/O area has to be changed.
+ *
+ * The Z-pointer Register is left unchanged by the operation. This instruction
+ * is especially suited for changing status bits stored in SRAM.
+ */
+static bool trans_LAT(DisasContext *ctx, arg_LAT *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_RMW)) {
+ return true;
+ }
+
+ TCGv Rd = cpu_r[a->rd];
+ TCGv addr = gen_get_zaddr();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ gen_data_load(ctx, t0, addr); /* t0 = mem[addr] */
+ tcg_gen_xor_tl(t1, t0, Rd);
+ tcg_gen_mov_tl(Rd, t0); /* Rd = t0 */
+ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(addr);
+
+ return true;
+}
+
+/*
+ * Bit and Bit-test Instructions
+ */
+static void gen_rshift_ZNVSf(TCGv R)
+{
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */
+ tcg_gen_shri_tl(cpu_Nf, R, 7); /* Nf = R(7) */
+ tcg_gen_xor_tl(cpu_Vf, cpu_Nf, cpu_Cf);
+ tcg_gen_xor_tl(cpu_Sf, cpu_Nf, cpu_Vf); /* Sf = Nf ^ Vf */
+}
+
+/*
+ * Shifts all bits in Rd one place to the right. Bit 7 is cleared. Bit 0 is
+ * loaded into the C Flag of the SREG. This operation effectively divides an
+ * unsigned value by two. The C Flag can be used to round the result.
+ */
+static bool trans_LSR(DisasContext *ctx, arg_LSR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_andi_tl(cpu_Cf, Rd, 1);
+ tcg_gen_shri_tl(Rd, Rd, 1);
+
+ /* update status register */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, Rd, 0); /* Zf = Rd == 0 */
+ tcg_gen_movi_tl(cpu_Nf, 0);
+ tcg_gen_mov_tl(cpu_Vf, cpu_Cf);
+ tcg_gen_mov_tl(cpu_Sf, cpu_Vf);
+
+ return true;
+}
+
+/*
+ * Shifts all bits in Rd one place to the right. The C Flag is shifted into
+ * bit 7 of Rd. Bit 0 is shifted into the C Flag. This operation, combined
+ * with ASR, effectively divides multi-byte signed values by two. Combined with
+ * LSR it effectively divides multi-byte unsigned values by two. The Carry Flag
+ * can be used to round the result.
+ */
+static bool trans_ROR(DisasContext *ctx, arg_ROR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+
+ tcg_gen_shli_tl(t0, cpu_Cf, 7);
+
+ /* update status register */
+ tcg_gen_andi_tl(cpu_Cf, Rd, 1);
+
+ /* update output register */
+ tcg_gen_shri_tl(Rd, Rd, 1);
+ tcg_gen_or_tl(Rd, Rd, t0);
+
+ /* update status register */
+ gen_rshift_ZNVSf(Rd);
+
+ tcg_temp_free_i32(t0);
+
+ return true;
+}
+
+/*
+ * Shifts all bits in Rd one place to the right. Bit 7 is held constant. Bit 0
+ * is loaded into the C Flag of the SREG. This operation effectively divides a
+ * signed value by two without changing its sign. The Carry Flag can be used to
+ * round the result.
+ */
+static bool trans_ASR(DisasContext *ctx, arg_ASR *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+
+ /* update status register */
+ tcg_gen_andi_tl(cpu_Cf, Rd, 1); /* Cf = Rd(0) */
+
+ /* update output register */
+ tcg_gen_andi_tl(t0, Rd, 0x80); /* Rd = (Rd & 0x80) | (Rd >> 1) */
+ tcg_gen_shri_tl(Rd, Rd, 1);
+ tcg_gen_or_tl(Rd, Rd, t0);
+
+ /* update status register */
+ gen_rshift_ZNVSf(Rd);
+
+ tcg_temp_free_i32(t0);
+
+ return true;
+}
+
+/*
+ * Swaps high and low nibbles in a register.
+ */
+static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(t0, Rd, 0x0f);
+ tcg_gen_shli_tl(t0, t0, 4);
+ tcg_gen_andi_tl(t1, Rd, 0xf0);
+ tcg_gen_shri_tl(t1, t1, 4);
+ tcg_gen_or_tl(Rd, t0, t1);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+
+ return true;
+}
+
+/*
+ * Sets a specified bit in an I/O Register. This instruction operates on
+ * the lower 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_SBI(DisasContext *ctx, arg_SBI *a)
+{
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_const_i32(a->reg);
+
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_ori_tl(data, data, 1 << a->bit);
+ gen_helper_outb(cpu_env, port, data);
+
+ tcg_temp_free_i32(port);
+ tcg_temp_free_i32(data);
+
+ return true;
+}
+
+/*
+ * Clears a specified bit in an I/O Register. This instruction operates on
+ * the lower 32 I/O Registers -- addresses 0-31.
+ */
+static bool trans_CBI(DisasContext *ctx, arg_CBI *a)
+{
+ TCGv data = tcg_temp_new_i32();
+ TCGv port = tcg_const_i32(a->reg);
+
+ gen_helper_inb(data, cpu_env, port);
+ tcg_gen_andi_tl(data, data, ~(1 << a->bit));
+ gen_helper_outb(cpu_env, port, data);
+
+ tcg_temp_free_i32(data);
+ tcg_temp_free_i32(port);
+
+ return true;
+}
+
+/*
+ * Stores bit b from Rd to the T Flag in SREG (Status Register).
+ */
+static bool trans_BST(DisasContext *ctx, arg_BST *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+
+ tcg_gen_andi_tl(cpu_Tf, Rd, 1 << a->bit);
+ tcg_gen_shri_tl(cpu_Tf, cpu_Tf, a->bit);
+
+ return true;
+}
+
+/*
+ * Copies the T Flag in the SREG (Status Register) to bit b in register Rd.
+ */
+static bool trans_BLD(DisasContext *ctx, arg_BLD *a)
+{
+ TCGv Rd = cpu_r[a->rd];
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(Rd, Rd, ~(1u << a->bit)); /* clear bit */
+ tcg_gen_shli_tl(t1, cpu_Tf, a->bit); /* create mask */
+ tcg_gen_or_tl(Rd, Rd, t1);
+
+ tcg_temp_free_i32(t1);
+
+ return true;
+}
+
+/*
+ * Sets a single Flag or bit in SREG.
+ */
+static bool trans_BSET(DisasContext *ctx, arg_BSET *a)
+{
+ switch (a->bit) {
+ case 0x00:
+ tcg_gen_movi_tl(cpu_Cf, 0x01);
+ break;
+ case 0x01:
+ tcg_gen_movi_tl(cpu_Zf, 0x01);
+ break;
+ case 0x02:
+ tcg_gen_movi_tl(cpu_Nf, 0x01);
+ break;
+ case 0x03:
+ tcg_gen_movi_tl(cpu_Vf, 0x01);
+ break;
+ case 0x04:
+ tcg_gen_movi_tl(cpu_Sf, 0x01);
+ break;
+ case 0x05:
+ tcg_gen_movi_tl(cpu_Hf, 0x01);
+ break;
+ case 0x06:
+ tcg_gen_movi_tl(cpu_Tf, 0x01);
+ break;
+ case 0x07:
+ tcg_gen_movi_tl(cpu_If, 0x01);
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * Clears a single Flag in SREG.
+ */
+static bool trans_BCLR(DisasContext *ctx, arg_BCLR *a)
+{
+ switch (a->bit) {
+ case 0x00:
+ tcg_gen_movi_tl(cpu_Cf, 0x00);
+ break;
+ case 0x01:
+ tcg_gen_movi_tl(cpu_Zf, 0x00);
+ break;
+ case 0x02:
+ tcg_gen_movi_tl(cpu_Nf, 0x00);
+ break;
+ case 0x03:
+ tcg_gen_movi_tl(cpu_Vf, 0x00);
+ break;
+ case 0x04:
+ tcg_gen_movi_tl(cpu_Sf, 0x00);
+ break;
+ case 0x05:
+ tcg_gen_movi_tl(cpu_Hf, 0x00);
+ break;
+ case 0x06:
+ tcg_gen_movi_tl(cpu_Tf, 0x00);
+ break;
+ case 0x07:
+ tcg_gen_movi_tl(cpu_If, 0x00);
+ break;
+ }
+
+ return true;
+}
+
+/*
+ * MCU Control Instructions
+ */
+
+/*
+ * The BREAK instruction is used by the On-chip Debug system, and is
+ * normally not used in the application software. When the BREAK instruction is
+ * executed, the AVR CPU is set in the Stopped Mode. This gives the On-chip
+ * Debugger access to internal resources. If any Lock bits are set, or either
+ * the JTAGEN or OCDEN Fuses are unprogrammed, the CPU will treat the BREAK
+ * instruction as a NOP and will not enter the Stopped mode. This instruction
+ * is not available in all devices. Refer to the device specific instruction
+ * set summary.
+ */
+static bool trans_BREAK(DisasContext *ctx, arg_BREAK *a)
+{
+ if (!avr_have_feature(ctx, AVR_FEATURE_BREAK)) {
+ return true;
+ }
+
+#ifdef BREAKPOINT_ON_BREAK
+ tcg_gen_movi_tl(cpu_pc, ctx->npc - 1);
+ gen_helper_debug(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#else
+ /* NOP */
+#endif
+
+ return true;
+}
+
+/*
+ * This instruction performs a single cycle No Operation.
+ */
+static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
+{
+
+ /* NOP */
+
+ return true;
+}
+
+/*
+ * This instruction sets the circuit in sleep mode defined by the MCU
+ * Control Register.
+ */
+static bool trans_SLEEP(DisasContext *ctx, arg_SLEEP *a)
+{
+ gen_helper_sleep(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+/*
+ * This instruction resets the Watchdog Timer. This instruction must be
+ * executed within a limited time given by the WD prescaler. See the Watchdog
+ * Timer hardware specification.
+ */
+static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
+{
+ gen_helper_wdr(cpu_env);
+
+ return true;
+}
+
+/*
+ * Core translation mechanism functions:
+ *
+ * - translate()
+ * - canonicalize_skip()
+ * - gen_intermediate_code()
+ * - restore_state_to_opc()
+ *
+ */
+static void translate(DisasContext *ctx)
+{
+ uint32_t opcode = next_word(ctx);
+
+ if (!decode_insn(ctx, opcode)) {
+ gen_helper_unsupported(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+}
+
+/* Standardize the cpu_skip condition to NE. */
+static bool canonicalize_skip(DisasContext *ctx)
+{
+ switch (ctx->skip_cond) {
+ case TCG_COND_NEVER:
+ /* Normal case: cpu_skip is known to be false. */
+ return false;
+
+ case TCG_COND_ALWAYS:
+ /*
+ * Breakpoint case: cpu_skip is known to be true, via TB_FLAGS_SKIP.
+ * The breakpoint is on the instruction being skipped, at the start
+ * of the TranslationBlock. No need to update.
+ */
+ return false;
+
+ case TCG_COND_NE:
+ if (ctx->skip_var1 == NULL) {
+ tcg_gen_mov_tl(cpu_skip, ctx->skip_var0);
+ } else {
+ tcg_gen_xor_tl(cpu_skip, ctx->skip_var0, ctx->skip_var1);
+ ctx->skip_var1 = NULL;
+ }
+ break;
+
+ default:
+ /* Convert to a NE condition vs 0. */
+ if (ctx->skip_var1 == NULL) {
+ tcg_gen_setcondi_tl(ctx->skip_cond, cpu_skip, ctx->skip_var0, 0);
+ } else {
+ tcg_gen_setcond_tl(ctx->skip_cond, cpu_skip,
+ ctx->skip_var0, ctx->skip_var1);
+ ctx->skip_var1 = NULL;
+ }
+ ctx->skip_cond = TCG_COND_NE;
+ break;
+ }
+ if (ctx->free_skip_var0) {
+ tcg_temp_free(ctx->skip_var0);
+ ctx->free_skip_var0 = false;
+ }
+ ctx->skip_var0 = cpu_skip;
+ return true;
+}
+
+static void avr_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUAVRState *env = cs->env_ptr;
+ uint32_t tb_flags = ctx->base.tb->flags;
+
+ ctx->cs = cs;
+ ctx->env = env;
+ ctx->npc = ctx->base.pc_first / 2;
+
+ ctx->skip_cond = TCG_COND_NEVER;
+ if (tb_flags & TB_FLAGS_SKIP) {
+ ctx->skip_cond = TCG_COND_ALWAYS;
+ ctx->skip_var0 = cpu_skip;
+ }
+
+ if (tb_flags & TB_FLAGS_FULL_ACCESS) {
+ /*
+ * This flag is set by ST/LD instruction we will regenerate it ONLY
+ * with mem/cpu memory access instead of mem access
+ */
+ ctx->base.max_insns = 1;
+ }
+}
+
+static void avr_tr_tb_start(DisasContextBase *db, CPUState *cs)
+{
+}
+
+static void avr_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->npc);
+}
+
+static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ TCGLabel *skip_label = NULL;
+
+ /* Conditionally skip the next instruction, if indicated. */
+ if (ctx->skip_cond != TCG_COND_NEVER) {
+ skip_label = gen_new_label();
+ if (ctx->skip_var0 == cpu_skip) {
+ /*
+ * Copy cpu_skip so that we may zero it before the branch.
+ * This ensures that cpu_skip is non-zero after the label
+ * if and only if the skipped insn itself sets a skip.
+ */
+ ctx->free_skip_var0 = true;
+ ctx->skip_var0 = tcg_temp_new();
+ tcg_gen_mov_tl(ctx->skip_var0, cpu_skip);
+ tcg_gen_movi_tl(cpu_skip, 0);
+ }
+ if (ctx->skip_var1 == NULL) {
+ tcg_gen_brcondi_tl(ctx->skip_cond, ctx->skip_var0, 0, skip_label);
+ } else {
+ tcg_gen_brcond_tl(ctx->skip_cond, ctx->skip_var0,
+ ctx->skip_var1, skip_label);
+ ctx->skip_var1 = NULL;
+ }
+ if (ctx->free_skip_var0) {
+ tcg_temp_free(ctx->skip_var0);
+ ctx->free_skip_var0 = false;
+ }
+ ctx->skip_cond = TCG_COND_NEVER;
+ ctx->skip_var0 = NULL;
+ }
+
+ translate(ctx);
+
+ ctx->base.pc_next = ctx->npc * 2;
+
+ if (skip_label) {
+ canonicalize_skip(ctx);
+ gen_set_label(skip_label);
+ if (ctx->base.is_jmp == DISAS_NORETURN) {
+ ctx->base.is_jmp = DISAS_CHAIN;
+ }
+ }
+
+ if (ctx->base.is_jmp == DISAS_NEXT) {
+ target_ulong page_first = ctx->base.pc_first & TARGET_PAGE_MASK;
+
+ if ((ctx->base.pc_next - page_first) >= TARGET_PAGE_SIZE - 4) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ bool nonconst_skip = canonicalize_skip(ctx);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_NORETURN:
+ assert(!nonconst_skip);
+ break;
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ case DISAS_CHAIN:
+ if (!nonconst_skip) {
+ /* Note gen_goto_tb checks singlestep. */
+ gen_goto_tb(ctx, 1, ctx->npc);
+ break;
+ }
+ tcg_gen_movi_tl(cpu_pc, ctx->npc);
+ /* fall through */
+ case DISAS_LOOKUP:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void avr_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps avr_tr_ops = {
+ .init_disas_context = avr_tr_init_disas_context,
+ .tb_start = avr_tr_tb_start,
+ .insn_start = avr_tr_insn_start,
+ .translate_insn = avr_tr_translate_insn,
+ .tb_stop = avr_tr_tb_stop,
+ .disas_log = avr_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc = { };
+ translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc_w = data[0];
+}
diff --git a/target/cris/Kconfig b/target/cris/Kconfig
new file mode 100644
index 000000000..3fdc309fb
--- /dev/null
+++ b/target/cris/Kconfig
@@ -0,0 +1,2 @@
+config CRIS
+ bool
diff --git a/target/cris/cpu-param.h b/target/cris/cpu-param.h
new file mode 100644
index 000000000..36a305876
--- /dev/null
+++ b/target/cris/cpu-param.h
@@ -0,0 +1,17 @@
+/*
+ * CRIS cpu parameters for qemu.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef CRIS_CPU_PARAM_H
+#define CRIS_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 13
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define NB_MMU_MODES 2
+
+#endif
diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h
new file mode 100644
index 000000000..2596edc7e
--- /dev/null
+++ b/target/cris/cpu-qom.h
@@ -0,0 +1,51 @@
+/*
+ * QEMU CRIS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_CRIS_CPU_QOM_H
+#define QEMU_CRIS_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_CRIS_CPU "cris-cpu"
+
+OBJECT_DECLARE_TYPE(CRISCPU, CRISCPUClass,
+ CRIS_CPU)
+
+/**
+ * CRISCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @vr: Version Register value.
+ *
+ * A CRIS CPU model.
+ */
+struct CRISCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+
+ uint32_t vr;
+};
+
+
+#endif
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
new file mode 100644
index 000000000..ed6c78134
--- /dev/null
+++ b/target/cris/cpu.c
@@ -0,0 +1,338 @@
+/*
+ * QEMU CRIS CPU
+ *
+ * Copyright (c) 2008 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "mmu.h"
+
+
+static void cris_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool cris_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
+static void cris_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ CRISCPU *cpu = CRIS_CPU(s);
+ CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(cpu);
+ CPUCRISState *env = &cpu->env;
+ uint32_t vr;
+
+ ccc->parent_reset(dev);
+
+ vr = env->pregs[PR_VR];
+ memset(env, 0, offsetof(CPUCRISState, end_reset_fields));
+ env->pregs[PR_VR] = vr;
+
+#if defined(CONFIG_USER_ONLY)
+ /* start in user mode with interrupts enabled. */
+ env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
+#else
+ cris_mmu_init(env);
+ env->pregs[PR_CCS] = 0;
+#endif
+}
+
+static ObjectClass *cris_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+#if defined(CONFIG_USER_ONLY)
+ if (strcasecmp(cpu_model, "any") == 0) {
+ return object_class_by_name(CRIS_CPU_TYPE_NAME("crisv32"));
+ }
+#endif
+
+ typename = g_strdup_printf(CRIS_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_CRIS_CPU) ||
+ object_class_is_abstract(oc))) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+/* Sort alphabetically by VR. */
+static gint cris_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ CRISCPUClass *ccc_a = CRIS_CPU_CLASS(a);
+ CRISCPUClass *ccc_b = CRIS_CPU_CLASS(b);
+
+ /* */
+ if (ccc_a->vr > ccc_b->vr) {
+ return 1;
+ } else if (ccc_a->vr < ccc_b->vr) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void cris_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ const char *typename = object_class_get_name(oc);
+ char *name;
+
+ name = g_strndup(typename, strlen(typename) - strlen(CRIS_CPU_TYPE_SUFFIX));
+ qemu_printf(" %s\n", name);
+ g_free(name);
+}
+
+void cris_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list(TYPE_CRIS_CPU, false);
+ list = g_slist_sort(list, cris_cpu_list_compare);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, cris_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+static void cris_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ ccc->parent_realize(dev, errp);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void cris_cpu_set_irq(void *opaque, int irq, int level)
+{
+ CRISCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ int type = irq == CRIS_CPU_IRQ ? CPU_INTERRUPT_HARD : CPU_INTERRUPT_NMI;
+
+ if (irq == CRIS_CPU_IRQ) {
+ /*
+ * The PIC passes us the vector for the IRQ as the value it sends
+ * over the qemu_irq line
+ */
+ cpu->env.interrupt_vector = level;
+ }
+
+ if (level) {
+ cpu_interrupt(cs, type);
+ } else {
+ cpu_reset_interrupt(cs, type);
+ }
+}
+#endif
+
+static void cris_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ CRISCPU *cc = CRIS_CPU(cpu);
+ CPUCRISState *env = &cc->env;
+
+ if (env->pregs[PR_VR] != 32) {
+ info->mach = bfd_mach_cris_v0_v10;
+ info->print_insn = print_insn_crisv10;
+ } else {
+ info->mach = bfd_mach_cris_v32;
+ info->print_insn = print_insn_crisv32;
+ }
+}
+
+static void cris_cpu_initfn(Object *obj)
+{
+ CRISCPU *cpu = CRIS_CPU(obj);
+ CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
+ CPUCRISState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+
+ env->pregs[PR_VR] = ccc->vr;
+
+#ifndef CONFIG_USER_ONLY
+ /* IRQ and NMI lines. */
+ qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2);
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps cris_sysemu_ops = {
+ .get_phys_page_debug = cris_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps crisv10_tcg_ops = {
+ .initialize = cris_initialize_crisv10_tcg,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = cris_cpu_tlb_fill,
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
+ .do_interrupt = crisv10_cpu_do_interrupt,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static const struct TCGCPUOps crisv32_tcg_ops = {
+ .initialize = cris_initialize_tcg,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = cris_cpu_tlb_fill,
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
+ .do_interrupt = cris_cpu_do_interrupt,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 8;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_ops = &crisv10_tcg_ops;
+}
+
+static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 9;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_ops = &crisv10_tcg_ops;
+}
+
+static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 10;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_ops = &crisv10_tcg_ops;
+}
+
+static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 11;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_ops = &crisv10_tcg_ops;
+}
+
+static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 17;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+ cc->tcg_ops = &crisv10_tcg_ops;
+}
+
+static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 32;
+ cc->tcg_ops = &crisv32_tcg_ops;
+}
+
+static void cris_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, cris_cpu_realizefn,
+ &ccc->parent_realize);
+
+ device_class_set_parent_reset(dc, cris_cpu_reset, &ccc->parent_reset);
+
+ cc->class_by_name = cris_cpu_class_by_name;
+ cc->has_work = cris_cpu_has_work;
+ cc->dump_state = cris_cpu_dump_state;
+ cc->set_pc = cris_cpu_set_pc;
+ cc->gdb_read_register = cris_cpu_gdb_read_register;
+ cc->gdb_write_register = cris_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ dc->vmsd = &vmstate_cris_cpu;
+ cc->sysemu_ops = &cris_sysemu_ops;
+#endif
+
+ cc->gdb_num_core_regs = 49;
+ cc->gdb_stop_before_watchpoint = true;
+
+ cc->disas_set_info = cris_disas_set_info;
+}
+
+#define DEFINE_CRIS_CPU_TYPE(cpu_model, initfn) \
+ { \
+ .parent = TYPE_CRIS_CPU, \
+ .class_init = initfn, \
+ .name = CRIS_CPU_TYPE_NAME(cpu_model), \
+ }
+
+static const TypeInfo cris_cpu_model_type_infos[] = {
+ {
+ .name = TYPE_CRIS_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(CRISCPU),
+ .instance_init = cris_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(CRISCPUClass),
+ .class_init = cris_cpu_class_init,
+ },
+ DEFINE_CRIS_CPU_TYPE("crisv8", crisv8_cpu_class_init),
+ DEFINE_CRIS_CPU_TYPE("crisv9", crisv9_cpu_class_init),
+ DEFINE_CRIS_CPU_TYPE("crisv10", crisv10_cpu_class_init),
+ DEFINE_CRIS_CPU_TYPE("crisv11", crisv11_cpu_class_init),
+ DEFINE_CRIS_CPU_TYPE("crisv17", crisv17_cpu_class_init),
+ DEFINE_CRIS_CPU_TYPE("crisv32", crisv32_cpu_class_init),
+};
+
+DEFINE_TYPES(cris_cpu_model_type_infos)
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
new file mode 100644
index 000000000..b445b194e
--- /dev/null
+++ b/target/cris/cpu.h
@@ -0,0 +1,286 @@
+/*
+ * CRIS virtual CPU header
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CRIS_CPU_H
+#define CRIS_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+#define EXCP_NMI 1
+#define EXCP_GURU 2
+#define EXCP_BUSFAULT 3
+#define EXCP_IRQ 4
+#define EXCP_BREAK 5
+
+/* CRIS-specific interrupt pending bits. */
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+
+/* CRUS CPU device objects interrupt lines. */
+/* PIC passes the vector for the IRQ as the value of it sends over qemu_irq */
+#define CRIS_CPU_IRQ 0
+#define CRIS_CPU_NMI 1
+
+/* Register aliases. R0 - R15 */
+#define R_FP 8
+#define R_SP 14
+#define R_ACR 15
+
+/* Support regs, P0 - P15 */
+#define PR_BZ 0
+#define PR_VR 1
+#define PR_PID 2
+#define PR_SRS 3
+#define PR_WZ 4
+#define PR_EXS 5
+#define PR_EDA 6
+#define PR_PREFIX 6 /* On CRISv10 P6 is reserved, we use it as prefix. */
+#define PR_MOF 7
+#define PR_DZ 8
+#define PR_EBP 9
+#define PR_ERP 10
+#define PR_SRP 11
+#define PR_NRP 12
+#define PR_CCS 13
+#define PR_USP 14
+#define PRV10_BRP 14
+#define PR_SPC 15
+
+/* CPU flags. */
+#define Q_FLAG 0x80000000
+#define M_FLAG_V32 0x40000000
+#define PFIX_FLAG 0x800 /* CRISv10 Only. */
+#define F_FLAG_V10 0x400
+#define P_FLAG_V10 0x200
+#define S_FLAG 0x200
+#define R_FLAG 0x100
+#define P_FLAG 0x80
+#define M_FLAG_V10 0x80
+#define U_FLAG 0x40
+#define I_FLAG 0x20
+#define X_FLAG 0x10
+#define N_FLAG 0x08
+#define Z_FLAG 0x04
+#define V_FLAG 0x02
+#define C_FLAG 0x01
+#define ALU_FLAGS 0x1F
+
+/* Condition codes. */
+#define CC_CC 0
+#define CC_CS 1
+#define CC_NE 2
+#define CC_EQ 3
+#define CC_VC 4
+#define CC_VS 5
+#define CC_PL 6
+#define CC_MI 7
+#define CC_LS 8
+#define CC_HI 9
+#define CC_GE 10
+#define CC_LT 11
+#define CC_GT 12
+#define CC_LE 13
+#define CC_A 14
+#define CC_P 15
+
+typedef struct {
+ uint32_t hi;
+ uint32_t lo;
+} TLBSet;
+
+typedef struct CPUCRISState {
+ uint32_t regs[16];
+ /* P0 - P15 are referred to as special registers in the docs. */
+ uint32_t pregs[16];
+
+ /* Pseudo register for the PC. Not directly accessible on CRIS. */
+ uint32_t pc;
+
+ /* Pseudo register for the kernel stack. */
+ uint32_t ksp;
+
+ /* Branch. */
+ int dslot;
+ int btaken;
+ uint32_t btarget;
+
+ /* Condition flag tracking. */
+ uint32_t cc_op;
+ uint32_t cc_mask;
+ uint32_t cc_dest;
+ uint32_t cc_src;
+ uint32_t cc_result;
+ /* size of the operation, 1 = byte, 2 = word, 4 = dword. */
+ int cc_size;
+ /* X flag at the time of cc snapshot. */
+ int cc_x;
+
+ /* CRIS has certain insns that lockout interrupts. */
+ int locked_irq;
+ int interrupt_vector;
+ int fault_vector;
+ int trap_vector;
+
+ /* FIXME: add a check in the translator to avoid writing to support
+ register sets beyond the 4th. The ISA allows up to 256! but in
+ practice there is no core that implements more than 4.
+
+ Support function registers are used to control units close to the
+ core. Accesses do not pass down the normal hierarchy.
+ */
+ uint32_t sregs[4][16];
+
+ /* Linear feedback shift reg in the mmu. Used to provide pseudo
+ randomness for the 'hint' the mmu gives to sw for choosing valid
+ sets on TLB refills. */
+ uint32_t mmu_rand_lfsr;
+
+ /*
+ * We just store the stores to the tlbset here for later evaluation
+ * when the hw needs access to them.
+ *
+ * One for I and another for D.
+ */
+ TLBSet tlbsets[2][4][16];
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Members from load_info on are preserved across resets. */
+ void *load_info;
+} CPUCRISState;
+
+/**
+ * CRISCPU:
+ * @env: #CPUCRISState
+ *
+ * A CRIS CPU.
+ */
+struct CRISCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUCRISState env;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_cris_cpu;
+
+void cris_cpu_do_interrupt(CPUState *cpu);
+void crisv10_cpu_do_interrupt(CPUState *cpu);
+bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+#endif
+
+void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags);
+
+hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+int crisv10_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int cris_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void cris_initialize_tcg(void);
+void cris_initialize_crisv10_tcg(void);
+
+/* Instead of computing the condition codes after each CRIS instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DEST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+enum {
+ CC_OP_DYNAMIC, /* Use env->cc_op */
+ CC_OP_FLAGS,
+ CC_OP_CMP,
+ CC_OP_MOVE,
+ CC_OP_ADD,
+ CC_OP_ADDC,
+ CC_OP_MCP,
+ CC_OP_ADDU,
+ CC_OP_SUB,
+ CC_OP_SUBU,
+ CC_OP_NEG,
+ CC_OP_BTST,
+ CC_OP_MULS,
+ CC_OP_MULU,
+ CC_OP_DSTEP,
+ CC_OP_MSTEP,
+ CC_OP_BOUND,
+
+ CC_OP_OR,
+ CC_OP_AND,
+ CC_OP_XOR,
+ CC_OP_LSL,
+ CC_OP_LSR,
+ CC_OP_ASR,
+ CC_OP_LZ
+};
+
+/* CRIS uses 8k pages. */
+#define MMAP_SHIFT TARGET_PAGE_BITS
+
+#define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU
+#define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_CRIS_CPU
+
+/* MMU modes definitions */
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
+{
+ return !!(env->pregs[PR_CCS] & U_FLAG);
+}
+
+/* Support function regs. */
+#define SFR_RW_GC_CFG 0][0
+#define SFR_RW_MM_CFG env->pregs[PR_SRS]][0
+#define SFR_RW_MM_KBASE_LO env->pregs[PR_SRS]][1
+#define SFR_RW_MM_KBASE_HI env->pregs[PR_SRS]][2
+#define SFR_R_MM_CAUSE env->pregs[PR_SRS]][3
+#define SFR_RW_MM_TLB_SEL env->pregs[PR_SRS]][4
+#define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5
+#define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6
+
+typedef CPUCRISState CPUArchState;
+typedef CRISCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = env->dslot |
+ (env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG
+ | X_FLAG | PFIX_FLAG));
+}
+
+#define cpu_list cris_cpu_list
+void cris_cpu_list(void);
+
+#endif
diff --git a/target/cris/crisv10-decode.h b/target/cris/crisv10-decode.h
new file mode 100644
index 000000000..9c531f36b
--- /dev/null
+++ b/target/cris/crisv10-decode.h
@@ -0,0 +1,112 @@
+/*
+ * CRISv10 insn decoding macros.
+ *
+ * Copyright (c) 2010 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_CRIS_CRISV10_DECODE_H
+#define TARGET_CRIS_CRISV10_DECODE_H
+
+#define CRISV10_MODE_QIMMEDIATE 0
+#define CRISV10_MODE_REG 1
+#define CRISV10_MODE_INDIRECT 2
+#define CRISV10_MODE_AUTOINC 3
+
+/* Quick Immediate. */
+#define CRISV10_QIMM_BCC_R0 0
+#define CRISV10_QIMM_BCC_R1 1
+#define CRISV10_QIMM_BCC_R2 2
+#define CRISV10_QIMM_BCC_R3 3
+
+#define CRISV10_QIMM_BDAP_R0 4
+#define CRISV10_QIMM_BDAP_R1 5
+#define CRISV10_QIMM_BDAP_R2 6
+#define CRISV10_QIMM_BDAP_R3 7
+
+#define CRISV10_QIMM_ADDQ 8
+#define CRISV10_QIMM_MOVEQ 9
+#define CRISV10_QIMM_SUBQ 10
+#define CRISV10_QIMM_CMPQ 11
+#define CRISV10_QIMM_ANDQ 12
+#define CRISV10_QIMM_ORQ 13
+#define CRISV10_QIMM_ASHQ 14
+#define CRISV10_QIMM_LSHQ 15
+
+
+#define CRISV10_REG_ADDX 0
+#define CRISV10_REG_MOVX 1
+#define CRISV10_REG_SUBX 2
+#define CRISV10_REG_LSL 3
+#define CRISV10_REG_ADDI 4
+#define CRISV10_REG_BIAP 5
+#define CRISV10_REG_NEG 6
+#define CRISV10_REG_BOUND 7
+#define CRISV10_REG_ADD 8
+#define CRISV10_REG_MOVE_R 9
+#define CRISV10_REG_MOVE_SPR_R 9
+#define CRISV10_REG_MOVE_R_SPR 8
+#define CRISV10_REG_SUB 10
+#define CRISV10_REG_CMP 11
+#define CRISV10_REG_AND 12
+#define CRISV10_REG_OR 13
+#define CRISV10_REG_ASR 14
+#define CRISV10_REG_LSR 15
+
+#define CRISV10_REG_BTST 3
+#define CRISV10_REG_SCC 4
+#define CRISV10_REG_SETF 6
+#define CRISV10_REG_CLEARF 7
+#define CRISV10_REG_BIAP 5
+#define CRISV10_REG_ABS 10
+#define CRISV10_REG_DSTEP 11
+#define CRISV10_REG_LZ 12
+#define CRISV10_REG_NOT 13
+#define CRISV10_REG_SWAP 13
+#define CRISV10_REG_XOR 14
+#define CRISV10_REG_MSTEP 15
+
+/* Indirect, var size. */
+#define CRISV10_IND_TEST 14
+#define CRISV10_IND_MUL 4
+#define CRISV10_IND_BDAP_M 5
+#define CRISV10_IND_ADD 8
+#define CRISV10_IND_MOVE_M_R 9
+
+
+/* indirect fixed size. */
+#define CRISV10_IND_ADDX 0
+#define CRISV10_IND_MOVX 1
+#define CRISV10_IND_SUBX 2
+#define CRISV10_IND_CMPX 3
+#define CRISV10_IND_JUMP_M 4
+#define CRISV10_IND_DIP 5
+#define CRISV10_IND_JUMP_R 6
+#define CRISV17_IND_ADDC 6
+#define CRISV10_IND_BOUND 7
+#define CRISV10_IND_BCC_M 7
+#define CRISV10_IND_MOVE_M_SPR 8
+#define CRISV10_IND_MOVE_SPR_M 9
+#define CRISV10_IND_SUB 10
+#define CRISV10_IND_CMP 11
+#define CRISV10_IND_AND 12
+#define CRISV10_IND_OR 13
+#define CRISV10_IND_MOVE_R_M 15
+
+#define CRISV10_IND_MOVEM_M_R 14
+#define CRISV10_IND_MOVEM_R_M 15
+
+#endif
diff --git a/target/cris/crisv32-decode.h b/target/cris/crisv32-decode.h
new file mode 100644
index 000000000..fa0a7f0d6
--- /dev/null
+++ b/target/cris/crisv32-decode.h
@@ -0,0 +1,133 @@
+/*
+ * CRIS insn decoding macros.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CRISV32_DECODE_H
+#define CRISV32_DECODE_H
+
+/* Convenient binary macros. */
+#define HEX__(n) 0x##n##LU
+#define B8__(x) ((x&0x0000000FLU)?1:0) \
+ + ((x&0x000000F0LU)?2:0) \
+ + ((x&0x00000F00LU)?4:0) \
+ + ((x&0x0000F000LU)?8:0) \
+ + ((x&0x000F0000LU)?16:0) \
+ + ((x&0x00F00000LU)?32:0) \
+ + ((x&0x0F000000LU)?64:0) \
+ + ((x&0xF0000000LU)?128:0)
+#define B8(d) ((unsigned char)B8__(HEX__(d)))
+
+/* Quick imm. */
+#define DEC_BCCQ {B8(00000000), B8(11110000)}
+#define DEC_ADDOQ {B8(00010000), B8(11110000)}
+#define DEC_ADDQ {B8(00100000), B8(11111100)}
+#define DEC_MOVEQ {B8(00100100), B8(11111100)}
+#define DEC_SUBQ {B8(00101000), B8(11111100)}
+#define DEC_CMPQ {B8(00101100), B8(11111100)}
+#define DEC_ANDQ {B8(00110000), B8(11111100)}
+#define DEC_ORQ {B8(00110100), B8(11111100)}
+#define DEC_BTSTQ {B8(00111000), B8(11111110)}
+#define DEC_ASRQ {B8(00111010), B8(11111110)}
+#define DEC_LSLQ {B8(00111100), B8(11111110)}
+#define DEC_LSRQ {B8(00111110), B8(11111110)}
+
+/* Register. */
+#define DEC_MOVU_R {B8(01000100), B8(11111110)}
+#define DEC_MOVU_R {B8(01000100), B8(11111110)}
+#define DEC_MOVS_R {B8(01000110), B8(11111110)}
+#define DEC_MOVE_R {B8(01100100), B8(11111100)}
+#define DEC_MOVE_RP {B8(01100011), B8(11111111)}
+#define DEC_MOVE_PR {B8(01100111), B8(11111111)}
+#define DEC_DSTEP_R {B8(01101111), B8(11111111)}
+#define DEC_MOVE_RS {B8(10110111), B8(11111111)}
+#define DEC_MOVE_SR {B8(11110111), B8(11111111)}
+#define DEC_ADDU_R {B8(01000000), B8(11111110)}
+#define DEC_ADDS_R {B8(01000010), B8(11111110)}
+#define DEC_ADD_R {B8(01100000), B8(11111100)}
+#define DEC_ADDI_R {B8(01010000), B8(11111100)}
+#define DEC_MULS_R {B8(11010000), B8(11111100)}
+#define DEC_MULU_R {B8(10010000), B8(11111100)}
+#define DEC_ADDI_ACR {B8(01010100), B8(11111100)}
+#define DEC_NEG_R {B8(01011000), B8(11111100)}
+#define DEC_BOUND_R {B8(01011100), B8(11111100)}
+#define DEC_SUBU_R {B8(01001000), B8(11111110)}
+#define DEC_SUBS_R {B8(01001010), B8(11111110)}
+#define DEC_SUB_R {B8(01101000), B8(11111100)}
+#define DEC_CMP_R {B8(01101100), B8(11111100)}
+#define DEC_AND_R {B8(01110000), B8(11111100)}
+#define DEC_ABS_R {B8(01101011), B8(11111111)}
+#define DEC_LZ_R {B8(01110011), B8(11111111)}
+#define DEC_MCP_R {B8(01111111), B8(11111111)}
+#define DEC_SWAP_R {B8(01110111), B8(11111111)}
+#define DEC_XOR_R {B8(01111011), B8(11111111)}
+#define DEC_LSL_R {B8(01001100), B8(11111100)}
+#define DEC_LSR_R {B8(01111100), B8(11111100)}
+#define DEC_ASR_R {B8(01111000), B8(11111100)}
+#define DEC_OR_R {B8(01110100), B8(11111100)}
+#define DEC_BTST_R {B8(01001111), B8(11111111)}
+
+/* Fixed. */
+#define DEC_SETF {B8(01011011), B8(11111111)}
+#define DEC_CLEARF {B8(01011111), B8(11111111)}
+
+/* Memory. */
+#define DEC_ADDU_M {B8(10000000), B8(10111110)}
+#define DEC_ADDS_M {B8(10000010), B8(10111110)}
+#define DEC_MOVU_M {B8(10000100), B8(10111110)}
+#define DEC_MOVS_M {B8(10000110), B8(10111110)}
+#define DEC_SUBU_M {B8(10001000), B8(10111110)}
+#define DEC_SUBS_M {B8(10001010), B8(10111110)}
+#define DEC_CMPU_M {B8(10001100), B8(10111110)}
+#define DEC_CMPS_M {B8(10001110), B8(10111110)}
+#define DEC_ADDO_M {B8(10010100), B8(10111100)}
+#define DEC_BOUND_M {B8(10011100), B8(10111100)}
+#define DEC_ADD_M {B8(10100000), B8(10111100)}
+#define DEC_MOVE_MR {B8(10100100), B8(10111100)}
+#define DEC_SUB_M {B8(10101000), B8(10111100)}
+#define DEC_CMP_M {B8(10101100), B8(10111100)}
+#define DEC_AND_M {B8(10110000), B8(10111100)}
+#define DEC_OR_M {B8(10110100), B8(10111100)}
+#define DEC_TEST_M {B8(10111000), B8(10111100)}
+#define DEC_MOVE_RM {B8(10111100), B8(10111100)}
+
+#define DEC_ADDC_R {B8(01010111), B8(11111111)}
+#define DEC_ADDC_MR {B8(10011010), B8(10111111)}
+#define DEC_LAPCQ {B8(10010111), B8(11111111)}
+#define DEC_LAPC_IM {B8(11010111), B8(11111111)}
+
+#define DEC_MOVE_MP {B8(10100011), B8(10111111)}
+#define DEC_MOVE_PM {B8(10100111), B8(10111111)}
+
+#define DEC_SCC_R {B8(01010011), B8(11111111)}
+#define DEC_RFE_ETC {B8(10010011), B8(11111111)}
+#define DEC_JUMP_P {B8(10011111), B8(11111111)}
+#define DEC_BCC_IM {B8(11011111), B8(11111111)}
+#define DEC_JAS_R {B8(10011011), B8(11111111)}
+#define DEC_JASC_R {B8(10110011), B8(11111111)}
+#define DEC_JAS_IM {B8(11011011), B8(11111111)}
+#define DEC_JASC_IM {B8(11110011), B8(11111111)}
+#define DEC_BAS_IM {B8(11101011), B8(11111111)}
+#define DEC_BASC_IM {B8(11101111), B8(11111111)}
+#define DEC_MOVEM_MR {B8(10111011), B8(10111111)}
+#define DEC_MOVEM_RM {B8(10111111), B8(10111111)}
+
+#define DEC_FTAG_FIDX_D_M {B8(10101011), B8(11111111)}
+#define DEC_FTAG_FIDX_I_M {B8(11010011), B8(11111111)}
+
+#endif
diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c
new file mode 100644
index 000000000..2418d575b
--- /dev/null
+++ b/target/cris/gdbstub.c
@@ -0,0 +1,130 @@
+/*
+ * CRIS gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int crisv10_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+
+ if (n < 15) {
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ }
+
+ if (n == 15) {
+ return gdb_get_reg32(mem_buf, env->pc);
+ }
+
+ if (n < 32) {
+ switch (n) {
+ case 16:
+ return gdb_get_reg8(mem_buf, env->pregs[n - 16]);
+ case 17:
+ return gdb_get_reg8(mem_buf, env->pregs[n - 16]);
+ case 20:
+ case 21:
+ return gdb_get_reg16(mem_buf, env->pregs[n - 16]);
+ default:
+ if (n >= 23) {
+ return gdb_get_reg32(mem_buf, env->pregs[n - 16]);
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+int cris_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ uint8_t srs;
+
+ srs = env->pregs[PR_SRS];
+ if (n < 16) {
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ }
+
+ if (n >= 21 && n < 32) {
+ return gdb_get_reg32(mem_buf, env->pregs[n - 16]);
+ }
+ if (n >= 33 && n < 49) {
+ return gdb_get_reg32(mem_buf, env->sregs[srs][n - 33]);
+ }
+ switch (n) {
+ case 16:
+ return gdb_get_reg8(mem_buf, env->pregs[0]);
+ case 17:
+ return gdb_get_reg8(mem_buf, env->pregs[1]);
+ case 18:
+ return gdb_get_reg32(mem_buf, env->pregs[2]);
+ case 19:
+ return gdb_get_reg8(mem_buf, srs);
+ case 20:
+ return gdb_get_reg16(mem_buf, env->pregs[4]);
+ case 32:
+ return gdb_get_reg32(mem_buf, env->pc);
+ }
+
+ return 0;
+}
+
+int cris_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > 49) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 16) {
+ env->regs[n] = tmp;
+ }
+
+ if (n >= 21 && n < 32) {
+ env->pregs[n - 16] = tmp;
+ }
+
+ /* FIXME: Should support function regs be writable? */
+ switch (n) {
+ case 16:
+ return 1;
+ case 17:
+ return 1;
+ case 18:
+ env->pregs[PR_PID] = tmp;
+ break;
+ case 19:
+ return 1;
+ case 20:
+ return 2;
+ case 32:
+ env->pc = tmp;
+ break;
+ }
+
+ return 4;
+}
diff --git a/target/cris/helper.c b/target/cris/helper.c
new file mode 100644
index 000000000..a0d6ecdcd
--- /dev/null
+++ b/target/cris/helper.c
@@ -0,0 +1,290 @@
+/*
+ * CRIS helper routines.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/core/tcg-cpu-ops.h"
+#include "mmu.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+
+
+//#define CRIS_HELPER_DEBUG
+
+
+#ifdef CRIS_HELPER_DEBUG
+#define D(x) x
+#define D_LOG(...) qemu_log(__VA_ARGS__)
+#else
+#define D(x)
+#define D_LOG(...) do { } while (0)
+#endif
+
+static void cris_shift_ccs(CPUCRISState *env)
+{
+ uint32_t ccs;
+ /* Apply the ccs shift. */
+ ccs = env->pregs[PR_CCS];
+ ccs = ((ccs & 0xc0000000) | ((ccs << 12) >> 2)) & ~0x3ff;
+ env->pregs[PR_CCS] = ccs;
+}
+
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ struct cris_mmu_result res;
+ int prot, miss;
+ target_ulong phy;
+
+ miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
+ access_type, mmu_idx, 0);
+ if (likely(!miss)) {
+ /*
+ * Mask off the cache selection bit. The ETRAX busses do not
+ * see the top bit.
+ */
+ phy = res.phy & ~0x80000000;
+ prot = res.prot;
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ if (probe) {
+ return false;
+ }
+
+ if (cs->exception_index == EXCP_BUSFAULT) {
+ cpu_abort(cs, "CRIS: Illegal recursive bus fault."
+ "addr=%" VADDR_PRIx " access_type=%d\n",
+ address, access_type);
+ }
+
+ env->pregs[PR_EDA] = address;
+ cs->exception_index = EXCP_BUSFAULT;
+ env->fault_vector = res.bf_vec;
+ if (retaddr) {
+ if (cpu_restore_state(cs, retaddr, true)) {
+ /* Evaluate flags after retranslation. */
+ helper_top_evaluate_flags(env);
+ }
+ }
+ cpu_loop_exit(cs);
+}
+
+void crisv10_cpu_do_interrupt(CPUState *cs)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ int ex_vec = -1;
+
+ D_LOG("exception index=%d interrupt_req=%d\n",
+ cs->exception_index,
+ cs->interrupt_request);
+
+ if (env->dslot) {
+ /* CRISv10 never takes interrupts while in a delay-slot. */
+ cpu_abort(cs, "CRIS: Interrupt on delay-slot\n");
+ }
+
+ assert(!(env->pregs[PR_CCS] & PFIX_FLAG));
+ switch (cs->exception_index) {
+ case EXCP_BREAK:
+ /* These exceptions are genereated by the core itself.
+ ERP should point to the insn following the brk. */
+ ex_vec = env->trap_vector;
+ env->pregs[PRV10_BRP] = env->pc;
+ break;
+
+ case EXCP_NMI:
+ /* NMI is hardwired to vector zero. */
+ ex_vec = 0;
+ env->pregs[PR_CCS] &= ~M_FLAG_V10;
+ env->pregs[PRV10_BRP] = env->pc;
+ break;
+
+ case EXCP_BUSFAULT:
+ cpu_abort(cs, "Unhandled busfault");
+ break;
+
+ default:
+ /* The interrupt controller gives us the vector. */
+ ex_vec = env->interrupt_vector;
+ /* Normal interrupts are taken between
+ TB's. env->pc is valid here. */
+ env->pregs[PR_ERP] = env->pc;
+ break;
+ }
+
+ if (env->pregs[PR_CCS] & U_FLAG) {
+ /* Swap stack pointers. */
+ env->pregs[PR_USP] = env->regs[R_SP];
+ env->regs[R_SP] = env->ksp;
+ }
+
+ /* Now that we are in kernel mode, load the handlers address. */
+ env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
+ env->locked_irq = 1;
+ env->pregs[PR_CCS] |= F_FLAG_V10; /* set F. */
+
+ qemu_log_mask(CPU_LOG_INT, "%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
+ __func__, env->pc, ex_vec,
+ env->pregs[PR_CCS],
+ env->pregs[PR_PID],
+ env->pregs[PR_ERP]);
+}
+
+void cris_cpu_do_interrupt(CPUState *cs)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ int ex_vec = -1;
+
+ D_LOG("exception index=%d interrupt_req=%d\n",
+ cs->exception_index,
+ cs->interrupt_request);
+
+ switch (cs->exception_index) {
+ case EXCP_BREAK:
+ /* These exceptions are genereated by the core itself.
+ ERP should point to the insn following the brk. */
+ ex_vec = env->trap_vector;
+ env->pregs[PR_ERP] = env->pc;
+ break;
+
+ case EXCP_NMI:
+ /* NMI is hardwired to vector zero. */
+ ex_vec = 0;
+ env->pregs[PR_CCS] &= ~M_FLAG_V32;
+ env->pregs[PR_NRP] = env->pc;
+ break;
+
+ case EXCP_BUSFAULT:
+ ex_vec = env->fault_vector;
+ env->pregs[PR_ERP] = env->pc;
+ break;
+
+ default:
+ /* The interrupt controller gives us the vector. */
+ ex_vec = env->interrupt_vector;
+ /* Normal interrupts are taken between
+ TB's. env->pc is valid here. */
+ env->pregs[PR_ERP] = env->pc;
+ break;
+ }
+
+ /* Fill in the IDX field. */
+ env->pregs[PR_EXS] = (ex_vec & 0xff) << 8;
+
+ if (env->dslot) {
+ D_LOG("excp isr=%x PC=%x ds=%d SP=%x"
+ " ERP=%x pid=%x ccs=%x cc=%d %x\n",
+ ex_vec, env->pc, env->dslot,
+ env->regs[R_SP],
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->cc_op, env->cc_mask);
+ /* We loose the btarget, btaken state here so rexec the
+ branch. */
+ env->pregs[PR_ERP] -= env->dslot;
+ /* Exception starts with dslot cleared. */
+ env->dslot = 0;
+ }
+
+ if (env->pregs[PR_CCS] & U_FLAG) {
+ /* Swap stack pointers. */
+ env->pregs[PR_USP] = env->regs[R_SP];
+ env->regs[R_SP] = env->ksp;
+ }
+
+ /* Apply the CRIS CCS shift. Clears U if set. */
+ cris_shift_ccs(env);
+
+ /* Now that we are in kernel mode, load the handlers address.
+ This load may not fault, real hw leaves that behaviour as
+ undefined. */
+ env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
+
+ /* Clear the excption_index to avoid spurios hw_aborts for recursive
+ bus faults. */
+ cs->exception_index = -1;
+
+ D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
+ __func__, env->pc, ex_vec,
+ env->pregs[PR_CCS],
+ env->pregs[PR_PID],
+ env->pregs[PR_ERP]);
+}
+
+hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ uint32_t phy = addr;
+ struct cris_mmu_result res;
+ int miss;
+
+ miss = cris_mmu_translate(&res, &cpu->env, addr, MMU_DATA_LOAD, 0, 1);
+ /* If D TLB misses, try I TLB. */
+ if (miss) {
+ miss = cris_mmu_translate(&res, &cpu->env, addr, MMU_INST_FETCH, 0, 1);
+ }
+
+ if (!miss) {
+ phy = res.phy;
+ }
+ D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy));
+ return phy;
+}
+
+bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ bool ret = false;
+
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && (env->pregs[PR_CCS] & I_FLAG)
+ && !env->locked_irq) {
+ cs->exception_index = EXCP_IRQ;
+ cc->tcg_ops->do_interrupt(cs);
+ ret = true;
+ }
+ if (interrupt_request & CPU_INTERRUPT_NMI) {
+ unsigned int m_flag_archval;
+ if (env->pregs[PR_VR] < 32) {
+ m_flag_archval = M_FLAG_V10;
+ } else {
+ m_flag_archval = M_FLAG_V32;
+ }
+ if ((env->pregs[PR_CCS] & m_flag_archval)) {
+ cs->exception_index = EXCP_NMI;
+ cc->tcg_ops->do_interrupt(cs);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
diff --git a/target/cris/helper.h b/target/cris/helper.h
new file mode 100644
index 000000000..3abf60868
--- /dev/null
+++ b/target/cris/helper.h
@@ -0,0 +1,23 @@
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+DEF_HELPER_2(tlb_flush_pid, void, env, i32)
+DEF_HELPER_2(spc_write, void, env, i32)
+DEF_HELPER_1(rfe, void, env)
+DEF_HELPER_1(rfn, void, env)
+
+DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
+DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_mcp, TCG_CALL_NO_SE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_alu_4, TCG_CALL_NO_SE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_sub_4, TCG_CALL_NO_SE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(evaluate_flags_move_4, TCG_CALL_NO_SE, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(evaluate_flags_move_2, TCG_CALL_NO_SE, i32, env, i32, i32)
+DEF_HELPER_1(evaluate_flags, void, env)
+DEF_HELPER_1(top_evaluate_flags, void, env)
diff --git a/target/cris/machine.c b/target/cris/machine.c
new file mode 100644
index 000000000..f370f3348
--- /dev/null
+++ b/target/cris/machine.c
@@ -0,0 +1,93 @@
+/*
+ * CRIS virtual CPU state save/load support
+ *
+ * Copyright (c) 2012 Red Hat, Inc.
+ * Written by Juan Quintela <quintela@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+static const VMStateDescription vmstate_tlbset = {
+ .name = "cpu/tlbset",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(lo, TLBSet),
+ VMSTATE_UINT32(hi, TLBSet),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cris_env = {
+ .name = "env",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(pregs, CPUCRISState, 16),
+ VMSTATE_UINT32(pc, CPUCRISState),
+ VMSTATE_UINT32(ksp, CPUCRISState),
+ VMSTATE_INT32(dslot, CPUCRISState),
+ VMSTATE_INT32(btaken, CPUCRISState),
+ VMSTATE_UINT32(btarget, CPUCRISState),
+ VMSTATE_UINT32(cc_op, CPUCRISState),
+ VMSTATE_UINT32(cc_mask, CPUCRISState),
+ VMSTATE_UINT32(cc_dest, CPUCRISState),
+ VMSTATE_UINT32(cc_src, CPUCRISState),
+ VMSTATE_UINT32(cc_result, CPUCRISState),
+ VMSTATE_INT32(cc_size, CPUCRISState),
+ VMSTATE_INT32(cc_x, CPUCRISState),
+ VMSTATE_INT32(locked_irq, CPUCRISState),
+ VMSTATE_INT32(interrupt_vector, CPUCRISState),
+ VMSTATE_INT32(fault_vector, CPUCRISState),
+ VMSTATE_INT32(trap_vector, CPUCRISState),
+ VMSTATE_UINT32_ARRAY(sregs[0], CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(sregs[1], CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(sregs[2], CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(sregs[3], CPUCRISState, 16),
+ VMSTATE_UINT32(mmu_rand_lfsr, CPUCRISState),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][0], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][1], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][2], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][3], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][0], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][1], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][2], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][3], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_cris_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, CRISCPU, 1, vmstate_cris_env, CPUCRISState),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/cris/meson.build b/target/cris/meson.build
new file mode 100644
index 000000000..c1e326d95
--- /dev/null
+++ b/target/cris/meson.build
@@ -0,0 +1,17 @@
+cris_ss = ss.source_set()
+cris_ss.add(files(
+ 'cpu.c',
+ 'gdbstub.c',
+ 'op_helper.c',
+ 'translate.c',
+))
+
+cris_softmmu_ss = ss.source_set()
+cris_softmmu_ss.add(files(
+ 'helper.c',
+ 'machine.c',
+ 'mmu.c',
+))
+
+target_arch += {'cris': cris_ss}
+target_softmmu_arch += {'cris': cris_softmmu_ss}
diff --git a/target/cris/mmu.c b/target/cris/mmu.c
new file mode 100644
index 000000000..b574ec6e5
--- /dev/null
+++ b/target/cris/mmu.c
@@ -0,0 +1,355 @@
+/*
+ * CRIS mmu emulation.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "mmu.h"
+
+#ifdef DEBUG
+#define D(x) x
+#define D_LOG(...) qemu_log(__VA_ARGS__)
+#else
+#define D(x) do { } while (0)
+#define D_LOG(...) do { } while (0)
+#endif
+
+void cris_mmu_init(CPUCRISState *env)
+{
+ env->mmu_rand_lfsr = 0xcccc;
+}
+
+#define SR_POLYNOM 0x8805
+static inline unsigned int compute_polynom(unsigned int sr)
+{
+ unsigned int i;
+ unsigned int f;
+
+ f = 0;
+ for (i = 0; i < 16; i++) {
+ f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
+ }
+
+ return f;
+}
+
+static void cris_mmu_update_rand_lfsr(CPUCRISState *env)
+{
+ unsigned int f;
+
+ /* Update lfsr at every fault. */
+ f = compute_polynom(env->mmu_rand_lfsr);
+ env->mmu_rand_lfsr >>= 1;
+ env->mmu_rand_lfsr |= (f << 15);
+ env->mmu_rand_lfsr &= 0xffff;
+}
+
+static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
+{
+ return (rw_gc_cfg & 12) != 0;
+}
+
+static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
+{
+ return (1 << seg) & rw_mm_cfg;
+}
+
+static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg)
+{
+ uint32_t base;
+ int i;
+
+ if (seg < 8) {
+ base = env->sregs[SFR_RW_MM_KBASE_LO];
+ } else {
+ base = env->sregs[SFR_RW_MM_KBASE_HI];
+ }
+
+ i = seg & 7;
+ base >>= i * 4;
+ base &= 15;
+
+ base <<= 28;
+ return base;
+}
+
+/* Used by the tlb decoder. */
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+static inline void set_field(uint32_t *dst, unsigned int val,
+ unsigned int offset, unsigned int width)
+{
+ uint32_t mask;
+
+ mask = (1 << width) - 1;
+ mask <<= offset;
+ val <<= offset;
+
+ val &= mask;
+ *dst &= ~(mask);
+ *dst |= val;
+}
+
+#ifdef DEBUG
+static void dump_tlb(CPUCRISState *env, int mmu)
+{
+ int set;
+ int idx;
+ uint32_t hi, lo, tlb_vpn, tlb_pfn;
+
+ for (set = 0; set < 4; set++) {
+ for (idx = 0; idx < 16; idx++) {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+ tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
+ tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
+
+ printf("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
+ set, idx, hi, lo, tlb_vpn, tlb_pfn);
+ }
+ }
+}
+#endif
+
+static int cris_mmu_translate_page(struct cris_mmu_result *res,
+ CPUCRISState *env, uint32_t vaddr,
+ MMUAccessType access_type,
+ int usermode, int debug)
+{
+ unsigned int vpage;
+ unsigned int idx;
+ uint32_t pid, lo, hi;
+ uint32_t tlb_vpn, tlb_pfn = 0;
+ int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
+ int cfg_v, cfg_k, cfg_w, cfg_x;
+ int set, match = 0;
+ uint32_t r_cause;
+ uint32_t r_cfg;
+ int rwcause;
+ int mmu = 1; /* Data mmu is default. */
+ int vect_base;
+
+ r_cause = env->sregs[SFR_R_MM_CAUSE];
+ r_cfg = env->sregs[SFR_RW_MM_CFG];
+ pid = env->pregs[PR_PID] & 0xff;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ rwcause = CRIS_MMU_ERR_EXEC;
+ mmu = 0;
+ break;
+ case MMU_DATA_STORE:
+ rwcause = CRIS_MMU_ERR_WRITE;
+ break;
+ default:
+ case MMU_DATA_LOAD:
+ rwcause = CRIS_MMU_ERR_READ;
+ break;
+ }
+
+ /* I exception vectors 4 - 7, D 8 - 11. */
+ vect_base = (mmu + 1) * 4;
+
+ vpage = vaddr >> 13;
+
+ /*
+ * We know the index which to check on each set.
+ * Scan both I and D.
+ */
+ idx = vpage & 15;
+ for (set = 0; set < 4; set++) {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+
+ tlb_vpn = hi >> 13;
+ tlb_pid = EXTRACT_FIELD(hi, 0, 7);
+ tlb_g = EXTRACT_FIELD(lo, 4, 4);
+
+ D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
+ mmu, set, idx, tlb_vpn, vpage, lo, hi);
+ if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) {
+ match = 1;
+ break;
+ }
+ }
+
+ res->bf_vec = vect_base;
+ if (match) {
+ cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
+ cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
+ cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
+ cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
+
+ tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
+ tlb_v = EXTRACT_FIELD(lo, 3, 3);
+ tlb_k = EXTRACT_FIELD(lo, 2, 2);
+ tlb_w = EXTRACT_FIELD(lo, 1, 1);
+ tlb_x = EXTRACT_FIELD(lo, 0, 0);
+
+ /*
+ * set_exception_vector(0x04, i_mmu_refill);
+ * set_exception_vector(0x05, i_mmu_invalid);
+ * set_exception_vector(0x06, i_mmu_access);
+ * set_exception_vector(0x07, i_mmu_execute);
+ * set_exception_vector(0x08, d_mmu_refill);
+ * set_exception_vector(0x09, d_mmu_invalid);
+ * set_exception_vector(0x0a, d_mmu_access);
+ * set_exception_vector(0x0b, d_mmu_write);
+ */
+ if (cfg_k && tlb_k && usermode) {
+ D(printf("tlb: kernel protected %x lo=%x pc=%x\n",
+ vaddr, lo, env->pc));
+ match = 0;
+ res->bf_vec = vect_base + 2;
+ } else if (access_type == MMU_DATA_STORE && cfg_w && !tlb_w) {
+ D(printf("tlb: write protected %x lo=%x pc=%x\n",
+ vaddr, lo, env->pc));
+ match = 0;
+ /* write accesses never go through the I mmu. */
+ res->bf_vec = vect_base + 3;
+ } else if (access_type == MMU_INST_FETCH && cfg_x && !tlb_x) {
+ D(printf("tlb: exec protected %x lo=%x pc=%x\n",
+ vaddr, lo, env->pc));
+ match = 0;
+ res->bf_vec = vect_base + 3;
+ } else if (cfg_v && !tlb_v) {
+ D(printf("tlb: invalid %x\n", vaddr));
+ match = 0;
+ res->bf_vec = vect_base + 1;
+ }
+
+ res->prot = 0;
+ if (match) {
+ res->prot |= PAGE_READ;
+ if (tlb_w) {
+ res->prot |= PAGE_WRITE;
+ }
+ if (mmu == 0 && (cfg_x || tlb_x)) {
+ res->prot |= PAGE_EXEC;
+ }
+ } else {
+ D(dump_tlb(env, mmu));
+ }
+ } else {
+ /* If refill, provide a randomized set. */
+ set = env->mmu_rand_lfsr & 3;
+ }
+
+ if (!match && !debug) {
+ cris_mmu_update_rand_lfsr(env);
+
+ /* Compute index. */
+ idx = vpage & 15;
+
+ /* Update RW_MM_TLB_SEL. */
+ env->sregs[SFR_RW_MM_TLB_SEL] = 0;
+ set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
+ set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
+
+ /* Update RW_MM_CAUSE. */
+ set_field(&r_cause, rwcause, 8, 2);
+ set_field(&r_cause, vpage, 13, 19);
+ set_field(&r_cause, pid, 0, 8);
+ env->sregs[SFR_R_MM_CAUSE] = r_cause;
+ D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
+ }
+
+ D(printf("%s access=%u mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
+ " %x cause=%x sel=%x sp=%x %x %x\n",
+ __func__, access_type, match, env->pc,
+ vaddr, vpage,
+ tlb_vpn, tlb_pfn, tlb_pid,
+ pid,
+ r_cause,
+ env->sregs[SFR_RW_MM_TLB_SEL],
+ env->regs[R_SP], env->pregs[PR_USP], env->ksp));
+
+ res->phy = tlb_pfn << TARGET_PAGE_BITS;
+ return !match;
+}
+
+void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
+{
+ target_ulong vaddr;
+ unsigned int idx;
+ uint32_t lo, hi;
+ uint32_t tlb_vpn;
+ int tlb_pid, tlb_g, tlb_v;
+ unsigned int set;
+ unsigned int mmu;
+
+ pid &= 0xff;
+ for (mmu = 0; mmu < 2; mmu++) {
+ for (set = 0; set < 4; set++) {
+ for (idx = 0; idx < 16; idx++) {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+
+ tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
+ tlb_pid = EXTRACT_FIELD(hi, 0, 7);
+ tlb_g = EXTRACT_FIELD(lo, 4, 4);
+ tlb_v = EXTRACT_FIELD(lo, 3, 3);
+
+ if (tlb_v && !tlb_g && (tlb_pid == pid)) {
+ vaddr = tlb_vpn << TARGET_PAGE_BITS;
+ D_LOG("flush pid=%x vaddr=%x\n", pid, vaddr);
+ tlb_flush_page(env_cpu(env), vaddr);
+ }
+ }
+ }
+ }
+}
+
+int cris_mmu_translate(struct cris_mmu_result *res,
+ CPUCRISState *env, uint32_t vaddr,
+ MMUAccessType access_type, int mmu_idx, int debug)
+{
+ int seg;
+ int miss = 0;
+ int is_user = mmu_idx == MMU_USER_IDX;
+ uint32_t old_srs;
+
+ old_srs = env->pregs[PR_SRS];
+
+ env->pregs[PR_SRS] = access_type == MMU_INST_FETCH ? 1 : 2;
+
+ if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
+ res->phy = vaddr;
+ res->prot = PAGE_BITS;
+ goto done;
+ }
+
+ seg = vaddr >> 28;
+ if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) {
+ uint32_t base;
+
+ miss = 0;
+ base = cris_mmu_translate_seg(env, seg);
+ res->phy = base | (0x0fffffff & vaddr);
+ res->prot = PAGE_BITS;
+ } else {
+ miss = cris_mmu_translate_page(res, env, vaddr, access_type,
+ is_user, debug);
+ }
+ done:
+ env->pregs[PR_SRS] = old_srs;
+ return miss;
+}
diff --git a/target/cris/mmu.h b/target/cris/mmu.h
new file mode 100644
index 000000000..d57386ec6
--- /dev/null
+++ b/target/cris/mmu.h
@@ -0,0 +1,22 @@
+#ifndef TARGET_CRIS_MMU_H
+#define TARGET_CRIS_MMU_H
+
+#define CRIS_MMU_ERR_EXEC 0
+#define CRIS_MMU_ERR_READ 1
+#define CRIS_MMU_ERR_WRITE 2
+#define CRIS_MMU_ERR_FLUSH 3
+
+struct cris_mmu_result
+{
+ uint32_t phy;
+ int prot;
+ int bf_vec;
+};
+
+void cris_mmu_init(CPUCRISState *env);
+void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid);
+int cris_mmu_translate(struct cris_mmu_result *res,
+ CPUCRISState *env, uint32_t vaddr,
+ MMUAccessType access_type, int mmu_idx, int debug);
+
+#endif
diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c
new file mode 100644
index 000000000..d55a18a21
--- /dev/null
+++ b/target/cris/op_helper.c
@@ -0,0 +1,581 @@
+/*
+ * CRIS helper routines
+ *
+ * Copyright (c) 2007 AXIS Communications
+ * Written by Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "mmu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+//#define CRIS_OP_HELPER_DEBUG
+
+
+#ifdef CRIS_OP_HELPER_DEBUG
+#define D(x) x
+#define D_LOG(...) qemu_log(__VA_ARGS__)
+#else
+#define D(x)
+#define D_LOG(...) do { } while (0)
+#endif
+
+void helper_raise_exception(CPUCRISState *env, uint32_t index)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
+
+void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
+{
+#if !defined(CONFIG_USER_ONLY)
+ pid &= 0xff;
+ if (pid != (env->pregs[PR_PID] & 0xff)) {
+ cris_mmu_flush_pid(env, env->pregs[PR_PID]);
+ }
+#endif
+}
+
+void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
+{
+#if !defined(CONFIG_USER_ONLY)
+ CPUState *cs = env_cpu(env);
+
+ tlb_flush_page(cs, env->pregs[PR_SPC]);
+ tlb_flush_page(cs, new_spc);
+#endif
+}
+
+/* Used by the tlb decoder. */
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
+{
+ uint32_t srs;
+ srs = env->pregs[PR_SRS];
+ srs &= 3;
+ env->sregs[srs][sreg] = env->regs[reg];
+
+#if !defined(CONFIG_USER_ONLY)
+ if (srs == 1 || srs == 2) {
+ if (sreg == 6) {
+ /* Writes to tlb-hi write to mm_cause as a side effect. */
+ env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg];
+ env->sregs[SFR_R_MM_CAUSE] = env->regs[reg];
+ } else if (sreg == 5) {
+ uint32_t set;
+ uint32_t idx;
+ uint32_t lo, hi;
+ uint32_t vaddr;
+ int tlb_v;
+
+ idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
+ set >>= 4;
+ set &= 3;
+
+ idx &= 15;
+ /* We've just made a write to tlb_lo. */
+ lo = env->sregs[SFR_RW_MM_TLB_LO];
+ /* Writes are done via r_mm_cause. */
+ hi = env->sregs[SFR_R_MM_CAUSE];
+
+ vaddr = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].hi, 13, 31);
+ vaddr <<= TARGET_PAGE_BITS;
+ tlb_v = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].lo, 3, 3);
+ env->tlbsets[srs - 1][set][idx].lo = lo;
+ env->tlbsets[srs - 1][set][idx].hi = hi;
+
+ D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
+ vaddr, tlb_v, env->pc);
+ if (tlb_v) {
+ tlb_flush_page(env_cpu(env), vaddr);
+ }
+ }
+ }
+#endif
+}
+
+void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg)
+{
+ uint32_t srs;
+ env->pregs[PR_SRS] &= 3;
+ srs = env->pregs[PR_SRS];
+
+#if !defined(CONFIG_USER_ONLY)
+ if (srs == 1 || srs == 2) {
+ uint32_t set;
+ uint32_t idx;
+ uint32_t lo, hi;
+
+ idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
+ set >>= 4;
+ set &= 3;
+ idx &= 15;
+
+ /* Update the mirror regs. */
+ hi = env->tlbsets[srs - 1][set][idx].hi;
+ lo = env->tlbsets[srs - 1][set][idx].lo;
+ env->sregs[SFR_RW_MM_TLB_HI] = hi;
+ env->sregs[SFR_RW_MM_TLB_LO] = lo;
+ }
+#endif
+ env->regs[reg] = env->sregs[srs][sreg];
+}
+
+static void cris_ccs_rshift(CPUCRISState *env)
+{
+ uint32_t ccs;
+
+ /* Apply the ccs shift. */
+ ccs = env->pregs[PR_CCS];
+ ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10);
+ if (ccs & U_FLAG) {
+ /* Enter user mode. */
+ env->ksp = env->regs[R_SP];
+ env->regs[R_SP] = env->pregs[PR_USP];
+ }
+
+ env->pregs[PR_CCS] = ccs;
+}
+
+void helper_rfe(CPUCRISState *env)
+{
+ int rflag = env->pregs[PR_CCS] & R_FLAG;
+
+ D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n",
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->btarget);
+
+ cris_ccs_rshift(env);
+
+ /* RFE sets the P_FLAG only if the R_FLAG is not set. */
+ if (!rflag) {
+ env->pregs[PR_CCS] |= P_FLAG;
+ }
+}
+
+void helper_rfn(CPUCRISState *env)
+{
+ int rflag = env->pregs[PR_CCS] & R_FLAG;
+
+ D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n",
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->btarget);
+
+ cris_ccs_rshift(env);
+
+ /* Set the P_FLAG only if the R_FLAG is not set. */
+ if (!rflag) {
+ env->pregs[PR_CCS] |= P_FLAG;
+ }
+
+ /* Always set the M flag. */
+ env->pregs[PR_CCS] |= M_FLAG_V32;
+}
+
+uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs)
+{
+ /* FIXME: clean this up. */
+
+ /*
+ * des ref:
+ * The N flag is set according to the selected bit in the dest reg.
+ * The Z flag is set if the selected bit and all bits to the right are
+ * zero.
+ * The X flag is cleared.
+ * Other flags are left untouched.
+ * The destination reg is not affected.
+ */
+ unsigned int fz, sbit, bset, mask, masked_t0;
+
+ sbit = t1 & 31;
+ bset = !!(t0 & (1 << sbit));
+ mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1;
+ masked_t0 = t0 & mask;
+ fz = !(masked_t0 | bset);
+
+ /* Clear the X, N and Z flags. */
+ ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG);
+ if (env->pregs[PR_VR] < 32) {
+ ccs &= ~(V_FLAG | C_FLAG);
+ }
+ /* Set the N and Z flags accordingly. */
+ ccs |= (bset << 3) | (fz << 2);
+ return ccs;
+}
+
+static inline uint32_t evaluate_flags_writeback(CPUCRISState *env,
+ uint32_t flags, uint32_t ccs)
+{
+ unsigned int x, z, mask;
+
+ /* Extended arithmetics, leave the z flag alone. */
+ x = env->cc_x;
+ mask = env->cc_mask | X_FLAG;
+ if (x) {
+ z = flags & Z_FLAG;
+ mask = mask & ~z;
+ }
+ flags &= mask;
+
+ /* all insn clear the x-flag except setf or clrf. */
+ ccs &= ~mask;
+ ccs |= flags;
+ return ccs;
+}
+
+uint32_t helper_evaluate_flags_muls(CPUCRISState *env,
+ uint32_t ccs, uint32_t res, uint32_t mof)
+{
+ uint32_t flags = 0;
+ int64_t tmp;
+ int dneg;
+
+ dneg = ((int32_t)res) < 0;
+
+ tmp = mof;
+ tmp <<= 32;
+ tmp |= res;
+ if (tmp == 0) {
+ flags |= Z_FLAG;
+ } else if (tmp < 0) {
+ flags |= N_FLAG;
+ }
+ if ((dneg && mof != -1) || (!dneg && mof != 0)) {
+ flags |= V_FLAG;
+ }
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_mulu(CPUCRISState *env,
+ uint32_t ccs, uint32_t res, uint32_t mof)
+{
+ uint32_t flags = 0;
+ uint64_t tmp;
+
+ tmp = mof;
+ tmp <<= 32;
+ tmp |= res;
+ if (tmp == 0) {
+ flags |= Z_FLAG;
+ } else if (tmp >> 63) {
+ flags |= N_FLAG;
+ }
+ if (mof) {
+ flags |= V_FLAG;
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs,
+ uint32_t src, uint32_t dst, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ src = src & 0x80000000;
+ dst = dst & 0x80000000;
+
+ if ((res & 0x80000000L) != 0L) {
+ flags |= N_FLAG;
+ if (!src && !dst) {
+ flags |= V_FLAG;
+ } else if (src & dst) {
+ flags |= R_FLAG;
+ }
+ } else {
+ if (res == 0L) {
+ flags |= Z_FLAG;
+ }
+ if (src & dst) {
+ flags |= V_FLAG;
+ }
+ if (dst | src) {
+ flags |= R_FLAG;
+ }
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs,
+ uint32_t src, uint32_t dst, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ src = src & 0x80000000;
+ dst = dst & 0x80000000;
+
+ if ((res & 0x80000000L) != 0L) {
+ flags |= N_FLAG;
+ if (!src && !dst) {
+ flags |= V_FLAG;
+ } else if (src & dst) {
+ flags |= C_FLAG;
+ }
+ } else {
+ if (res == 0L) {
+ flags |= Z_FLAG;
+ }
+ if (src & dst) {
+ flags |= V_FLAG;
+ }
+ if (dst | src) {
+ flags |= C_FLAG;
+ }
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs,
+ uint32_t src, uint32_t dst, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ src = (~src) & 0x80000000;
+ dst = dst & 0x80000000;
+
+ if ((res & 0x80000000L) != 0L) {
+ flags |= N_FLAG;
+ if (!src && !dst) {
+ flags |= V_FLAG;
+ } else if (src & dst) {
+ flags |= C_FLAG;
+ }
+ } else {
+ if (res == 0L) {
+ flags |= Z_FLAG;
+ }
+ if (src & dst) {
+ flags |= V_FLAG;
+ }
+ if (dst | src) {
+ flags |= C_FLAG;
+ }
+ }
+
+ flags ^= C_FLAG;
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_move_4(CPUCRISState *env,
+ uint32_t ccs, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ if ((int32_t)res < 0) {
+ flags |= N_FLAG;
+ } else if (res == 0L) {
+ flags |= Z_FLAG;
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_move_2(CPUCRISState *env,
+ uint32_t ccs, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ if ((int16_t)res < 0L) {
+ flags |= N_FLAG;
+ } else if (res == 0) {
+ flags |= Z_FLAG;
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+/*
+ * TODO: This is expensive. We could split things up and only evaluate part of
+ * CCR on a need to know basis. For now, we simply re-evaluate everything.
+ */
+void helper_evaluate_flags(CPUCRISState *env)
+{
+ uint32_t src, dst, res;
+ uint32_t flags = 0;
+
+ src = env->cc_src;
+ dst = env->cc_dest;
+ res = env->cc_result;
+
+ if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) {
+ src = ~src;
+ }
+
+ /*
+ * Now, evaluate the flags. This stuff is based on
+ * Per Zander's CRISv10 simulator.
+ */
+ switch (env->cc_size) {
+ case 1:
+ if ((res & 0x80L) != 0L) {
+ flags |= N_FLAG;
+ if (((src & 0x80L) == 0L) && ((dst & 0x80L) == 0L)) {
+ flags |= V_FLAG;
+ } else if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) {
+ flags |= C_FLAG;
+ }
+ } else {
+ if ((res & 0xFFL) == 0L) {
+ flags |= Z_FLAG;
+ }
+ if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) {
+ flags |= V_FLAG;
+ }
+ if ((dst & 0x80L) != 0L || (src & 0x80L) != 0L) {
+ flags |= C_FLAG;
+ }
+ }
+ break;
+ case 2:
+ if ((res & 0x8000L) != 0L) {
+ flags |= N_FLAG;
+ if (((src & 0x8000L) == 0L) && ((dst & 0x8000L) == 0L)) {
+ flags |= V_FLAG;
+ } else if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) {
+ flags |= C_FLAG;
+ }
+ } else {
+ if ((res & 0xFFFFL) == 0L) {
+ flags |= Z_FLAG;
+ }
+ if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) {
+ flags |= V_FLAG;
+ }
+ if ((dst & 0x8000L) != 0L || (src & 0x8000L) != 0L) {
+ flags |= C_FLAG;
+ }
+ }
+ break;
+ case 4:
+ if ((res & 0x80000000L) != 0L) {
+ flags |= N_FLAG;
+ if (((src & 0x80000000L) == 0L) && ((dst & 0x80000000L) == 0L)) {
+ flags |= V_FLAG;
+ } else if (((src & 0x80000000L) != 0L) &&
+ ((dst & 0x80000000L) != 0L)) {
+ flags |= C_FLAG;
+ }
+ } else {
+ if (res == 0L) {
+ flags |= Z_FLAG;
+ }
+ if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) {
+ flags |= V_FLAG;
+ }
+ if ((dst & 0x80000000L) != 0L || (src & 0x80000000L) != 0L) {
+ flags |= C_FLAG;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) {
+ flags ^= C_FLAG;
+ }
+
+ env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags,
+ env->pregs[PR_CCS]);
+}
+
+void helper_top_evaluate_flags(CPUCRISState *env)
+{
+ switch (env->cc_op) {
+ case CC_OP_MCP:
+ env->pregs[PR_CCS]
+ = helper_evaluate_flags_mcp(env, env->pregs[PR_CCS],
+ env->cc_src, env->cc_dest,
+ env->cc_result);
+ break;
+ case CC_OP_MULS:
+ env->pregs[PR_CCS]
+ = helper_evaluate_flags_muls(env, env->pregs[PR_CCS],
+ env->cc_result, env->pregs[PR_MOF]);
+ break;
+ case CC_OP_MULU:
+ env->pregs[PR_CCS]
+ = helper_evaluate_flags_mulu(env, env->pregs[PR_CCS],
+ env->cc_result, env->pregs[PR_MOF]);
+ break;
+ case CC_OP_MOVE:
+ case CC_OP_AND:
+ case CC_OP_OR:
+ case CC_OP_XOR:
+ case CC_OP_ASR:
+ case CC_OP_LSR:
+ case CC_OP_LSL:
+ switch (env->cc_size) {
+ case 4:
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_move_4(env,
+ env->pregs[PR_CCS],
+ env->cc_result);
+ break;
+ case 2:
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_move_2(env,
+ env->pregs[PR_CCS],
+ env->cc_result);
+ break;
+ default:
+ helper_evaluate_flags(env);
+ break;
+ }
+ break;
+ case CC_OP_FLAGS:
+ /* live. */
+ break;
+ case CC_OP_SUB:
+ case CC_OP_CMP:
+ if (env->cc_size == 4) {
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_sub_4(env,
+ env->pregs[PR_CCS],
+ env->cc_src, env->cc_dest,
+ env->cc_result);
+ } else {
+ helper_evaluate_flags(env);
+ }
+ break;
+ default:
+ switch (env->cc_size) {
+ case 4:
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_alu_4(env,
+ env->pregs[PR_CCS],
+ env->cc_src, env->cc_dest,
+ env->cc_result);
+ break;
+ default:
+ helper_evaluate_flags(env);
+ break;
+ }
+ break;
+ }
+}
diff --git a/target/cris/opcode-cris.h b/target/cris/opcode-cris.h
new file mode 100644
index 000000000..40509c88d
--- /dev/null
+++ b/target/cris/opcode-cris.h
@@ -0,0 +1,355 @@
+/* cris.h -- Header file for CRIS opcode and register tables.
+ Copyright (C) 2000, 2001, 2004 Free Software Foundation, Inc.
+ Contributed by Axis Communications AB, Lund, Sweden.
+ Originally written for GAS 1.38.1 by Mikael Asker.
+ Updated, BFDized and GNUified by Hans-Peter Nilsson.
+
+This file is part of GAS, GDB and the GNU binutils.
+
+GAS, GDB, and GNU binutils is free software; you can redistribute it
+and/or modify it under the terms of the GNU General Public License as
+published by the Free Software Foundation; either version 2, or (at your
+option) any later version.
+
+GAS, GDB, and GNU binutils are distributed in the hope that they will be
+useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef TARGET_CRIS_OPCODE_CRIS_H
+#define TARGET_CRIS_OPCODE_CRIS_H
+
+#if !defined(__STDC__) && !defined(const)
+#define const
+#endif
+
+
+/* Registers. */
+#define MAX_REG (15)
+#define CRIS_REG_SP (14)
+#define CRIS_REG_PC (15)
+
+/* CPU version control of disassembly and assembly of instructions.
+ May affect how the instruction is assembled, at least the size of
+ immediate operands. */
+enum cris_insn_version_usage
+{
+ /* Any version. */
+ cris_ver_version_all=0,
+
+ /* Indeterminate (intended for disassembly only, or obsolete). */
+ cris_ver_warning,
+
+ /* Only for v0..3 (Etrax 1..4). */
+ cris_ver_v0_3,
+
+ /* Only for v3 or higher (ETRAX 4 and beyond). */
+ cris_ver_v3p,
+
+ /* Only for v8 (Etrax 100). */
+ cris_ver_v8,
+
+ /* Only for v8 or higher (ETRAX 100, ETRAX 100 LX). */
+ cris_ver_v8p,
+
+ /* Only for v0..10. FIXME: Not sure what to do with this. */
+ cris_ver_sim_v0_10,
+
+ /* Only for v0..10. */
+ cris_ver_v0_10,
+
+ /* Only for v3..10. (ETRAX 4, ETRAX 100 and ETRAX 100 LX). */
+ cris_ver_v3_10,
+
+ /* Only for v8..10 (ETRAX 100 and ETRAX 100 LX). */
+ cris_ver_v8_10,
+
+ /* Only for v10 (ETRAX 100 LX) and same series. */
+ cris_ver_v10,
+
+ /* Only for v10 (ETRAX 100 LX) and same series. */
+ cris_ver_v10p,
+
+ /* Only for v32 or higher (codename GUINNESS).
+ Of course some or all these of may change to cris_ver_v32p if/when
+ there's a new revision. */
+ cris_ver_v32p
+};
+
+
+/* Special registers. */
+struct cris_spec_reg
+{
+ const char *const name;
+ unsigned int number;
+
+ /* The size of the register. */
+ unsigned int reg_size;
+
+ /* What CPU version the special register of that name is implemented
+ in. If cris_ver_warning, emit an unimplemented-warning. */
+ enum cris_insn_version_usage applicable_version;
+
+ /* There might be a specific warning for using a special register
+ here. */
+ const char *const warning;
+};
+extern const struct cris_spec_reg cris_spec_regs[];
+
+
+/* Support registers (kind of special too, but not named as such). */
+struct cris_support_reg
+{
+ const char *const name;
+ unsigned int number;
+};
+extern const struct cris_support_reg cris_support_regs[];
+
+/* Opcode-dependent constants. */
+#define AUTOINCR_BIT (0x04)
+
+/* Prefixes. */
+#define BDAP_QUICK_OPCODE (0x0100)
+#define BDAP_QUICK_Z_BITS (0x0e00)
+
+#define BIAP_OPCODE (0x0540)
+#define BIAP_Z_BITS (0x0a80)
+
+#define DIP_OPCODE (0x0970)
+#define DIP_Z_BITS (0xf280)
+
+#define BDAP_INDIR_LOW (0x40)
+#define BDAP_INDIR_LOW_Z (0x80)
+#define BDAP_INDIR_HIGH (0x09)
+#define BDAP_INDIR_HIGH_Z (0x02)
+
+#define BDAP_INDIR_OPCODE (BDAP_INDIR_HIGH * 0x0100 + BDAP_INDIR_LOW)
+#define BDAP_INDIR_Z_BITS (BDAP_INDIR_HIGH_Z * 0x100 + BDAP_INDIR_LOW_Z)
+#define BDAP_PC_LOW (BDAP_INDIR_LOW + CRIS_REG_PC)
+#define BDAP_INCR_HIGH (BDAP_INDIR_HIGH + AUTOINCR_BIT)
+
+/* No prefix must have this code for its "match" bits in the
+ opcode-table. "BCC .+2" will do nicely. */
+#define NO_CRIS_PREFIX 0
+
+/* Definitions for condition codes. */
+#define CC_CC 0x0
+#define CC_HS 0x0
+#define CC_CS 0x1
+#define CC_LO 0x1
+#define CC_NE 0x2
+#define CC_EQ 0x3
+#define CC_VC 0x4
+#define CC_VS 0x5
+#define CC_PL 0x6
+#define CC_MI 0x7
+#define CC_LS 0x8
+#define CC_HI 0x9
+#define CC_GE 0xA
+#define CC_LT 0xB
+#define CC_GT 0xC
+#define CC_LE 0xD
+#define CC_A 0xE
+#define CC_EXT 0xF
+
+/* A table of strings "cc", "cs"... indexed with condition code
+ values as above. */
+extern const char *const cris_cc_strings[];
+
+/* Bcc quick. */
+#define BRANCH_QUICK_LOW (0)
+#define BRANCH_QUICK_HIGH (0)
+#define BRANCH_QUICK_OPCODE (BRANCH_QUICK_HIGH * 0x0100 + BRANCH_QUICK_LOW)
+#define BRANCH_QUICK_Z_BITS (0x0F00)
+
+/* BA quick. */
+#define BA_QUICK_HIGH (BRANCH_QUICK_HIGH + CC_A * 0x10)
+#define BA_QUICK_OPCODE (BA_QUICK_HIGH * 0x100 + BRANCH_QUICK_LOW)
+
+/* Bcc [PC+]. */
+#define BRANCH_PC_LOW (0xFF)
+#define BRANCH_INCR_HIGH (0x0D)
+#define BA_PC_INCR_OPCODE \
+ ((BRANCH_INCR_HIGH + CC_A * 0x10) * 0x0100 + BRANCH_PC_LOW)
+
+/* Jump. */
+/* Note that old versions generated special register 8 (in high bits)
+ and not-that-old versions recognized it as a jump-instruction.
+ That opcode now belongs to JUMPU. */
+#define JUMP_INDIR_OPCODE (0x0930)
+#define JUMP_INDIR_Z_BITS (0xf2c0)
+#define JUMP_PC_INCR_OPCODE \
+ (JUMP_INDIR_OPCODE + AUTOINCR_BIT * 0x0100 + CRIS_REG_PC)
+
+#define MOVE_M_TO_PREG_OPCODE 0x0a30
+#define MOVE_M_TO_PREG_ZBITS 0x01c0
+
+/* BDAP.D N,PC. */
+#define MOVE_PC_INCR_OPCODE_PREFIX \
+ (((BDAP_INCR_HIGH | (CRIS_REG_PC << 4)) << 8) | BDAP_PC_LOW | (2 << 4))
+#define MOVE_PC_INCR_OPCODE_SUFFIX \
+ (MOVE_M_TO_PREG_OPCODE | CRIS_REG_PC | (AUTOINCR_BIT << 8))
+
+#define JUMP_PC_INCR_OPCODE_V32 (0x0DBF)
+
+/* BA DWORD (V32). */
+#define BA_DWORD_OPCODE (0x0EBF)
+
+/* Nop. */
+#define NOP_OPCODE (0x050F)
+#define NOP_Z_BITS (0xFFFF ^ NOP_OPCODE)
+
+#define NOP_OPCODE_V32 (0x05B0)
+#define NOP_Z_BITS_V32 (0xFFFF ^ NOP_OPCODE_V32)
+
+/* For the compatibility mode, let's use "MOVE R0,P0". Doesn't affect
+ registers or flags. Unfortunately shuts off interrupts for one cycle
+ for < v32, but there doesn't seem to be any alternative without that
+ effect. */
+#define NOP_OPCODE_COMMON (0x630)
+#define NOP_OPCODE_ZBITS_COMMON (0xffff & ~NOP_OPCODE_COMMON)
+
+/* LAPC.D */
+#define LAPC_DWORD_OPCODE (0x0D7F)
+#define LAPC_DWORD_Z_BITS (0x0fff & ~LAPC_DWORD_OPCODE)
+
+/* Structure of an opcode table entry. */
+enum cris_imm_oprnd_size_type
+{
+ /* No size is applicable. */
+ SIZE_NONE,
+
+ /* Always 32 bits. */
+ SIZE_FIX_32,
+
+ /* Indicated by size of special register. */
+ SIZE_SPEC_REG,
+
+ /* Indicated by size field, signed. */
+ SIZE_FIELD_SIGNED,
+
+ /* Indicated by size field, unsigned. */
+ SIZE_FIELD_UNSIGNED,
+
+ /* Indicated by size field, no sign implied. */
+ SIZE_FIELD
+};
+
+/* For GDB. FIXME: Is this the best way to handle opcode
+ interpretation? */
+enum cris_op_type
+{
+ cris_not_implemented_op = 0,
+ cris_abs_op,
+ cris_addi_op,
+ cris_asr_op,
+ cris_asrq_op,
+ cris_ax_ei_setf_op,
+ cris_bdap_prefix,
+ cris_biap_prefix,
+ cris_break_op,
+ cris_btst_nop_op,
+ cris_clearf_di_op,
+ cris_dip_prefix,
+ cris_dstep_logshift_mstep_neg_not_op,
+ cris_eight_bit_offset_branch_op,
+ cris_move_mem_to_reg_movem_op,
+ cris_move_reg_to_mem_movem_op,
+ cris_move_to_preg_op,
+ cris_muls_op,
+ cris_mulu_op,
+ cris_none_reg_mode_add_sub_cmp_and_or_move_op,
+ cris_none_reg_mode_clear_test_op,
+ cris_none_reg_mode_jump_op,
+ cris_none_reg_mode_move_from_preg_op,
+ cris_quick_mode_add_sub_op,
+ cris_quick_mode_and_cmp_move_or_op,
+ cris_quick_mode_bdap_prefix,
+ cris_reg_mode_add_sub_cmp_and_or_move_op,
+ cris_reg_mode_clear_op,
+ cris_reg_mode_jump_op,
+ cris_reg_mode_move_from_preg_op,
+ cris_reg_mode_test_op,
+ cris_scc_op,
+ cris_sixteen_bit_offset_branch_op,
+ cris_three_operand_add_sub_cmp_and_or_op,
+ cris_three_operand_bound_op,
+ cris_two_operand_bound_op,
+ cris_xor_op
+};
+
+struct cris_opcode
+{
+ /* The name of the insn. */
+ const char *name;
+
+ /* Bits that must be 1 for a match. */
+ unsigned int match;
+
+ /* Bits that must be 0 for a match. */
+ unsigned int lose;
+
+ /* See the table in "opcodes/cris-opc.c". */
+ const char *args;
+
+ /* Nonzero if this is a delayed branch instruction. */
+ char delayed;
+
+ /* Size of immediate operands. */
+ enum cris_imm_oprnd_size_type imm_oprnd_size;
+
+ /* Indicates which version this insn was first implemented in. */
+ enum cris_insn_version_usage applicable_version;
+
+ /* What kind of operation this is. */
+ enum cris_op_type op;
+};
+extern const struct cris_opcode cris_opcodes[];
+
+
+/* These macros are for the target-specific flags in disassemble_info
+ used at disassembly. */
+
+/* This insn accesses memory. This flag is more trustworthy than
+ checking insn_type for "dis_dref" which does not work for
+ e.g. "JSR [foo]". */
+#define CRIS_DIS_FLAG_MEMREF (1 << 0)
+
+/* The "target" field holds a register number. */
+#define CRIS_DIS_FLAG_MEM_TARGET_IS_REG (1 << 1)
+
+/* The "target2" field holds a register number; add it to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_IS_REG (1 << 2)
+
+/* Yet another add-on: the register in "target2" must be multiplied
+ by 2 before adding to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MULT2 (1 << 3)
+
+/* Yet another add-on: the register in "target2" must be multiplied
+ by 4 (mutually exclusive with .._MULT2). */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MULT4 (1 << 4)
+
+/* The register in "target2" is an indirect memory reference (of the
+ register there), add to "target". Assumed size is dword (mutually
+ exclusive with .._MULT[24]). */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MEM (1 << 5)
+
+/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "byte";
+ sign-extended before adding to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_BYTE (1 << 6)
+
+/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "word";
+ sign-extended before adding to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_WORD (1 << 7)
+
+#endif /* TARGET_CRIS_OPCODE_CRIS_H */
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/target/cris/translate.c b/target/cris/translate.c
new file mode 100644
index 000000000..59325b388
--- /dev/null
+++ b/target/cris/translate.c
@@ -0,0 +1,3398 @@
+/*
+ * CRIS emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2008 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * FIXME:
+ * The condition code translation is in need of attention.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-proto.h"
+#include "mmu.h"
+#include "exec/cpu_ldst.h"
+#include "exec/translator.h"
+#include "crisv32-decode.h"
+#include "qemu/qemu-print.h"
+
+#include "exec/helper-gen.h"
+
+#include "exec/log.h"
+
+
+#define DISAS_CRIS 0
+#if DISAS_CRIS
+# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DIS(...) do { } while (0)
+#endif
+
+#define D(x)
+#define BUG() (gen_BUG(dc, __FILE__, __LINE__))
+#define BUG_ON(x) ({if (x) BUG();})
+
+/*
+ * Target-specific is_jmp field values
+ */
+/* Only pc was modified dynamically */
+#define DISAS_JUMP DISAS_TARGET_0
+/* Cpu state was modified dynamically, including pc */
+#define DISAS_UPDATE DISAS_TARGET_1
+/* Cpu state was modified dynamically, excluding pc -- use npc */
+#define DISAS_UPDATE_NEXT DISAS_TARGET_2
+/* PC update for delayed branch, see cpustate_changed otherwise */
+#define DISAS_DBRANCH DISAS_TARGET_3
+
+/* Used by the decoder. */
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+#define CC_MASK_NZ 0xc
+#define CC_MASK_NZV 0xe
+#define CC_MASK_NZVC 0xf
+#define CC_MASK_RNZV 0x10e
+
+static TCGv cpu_R[16];
+static TCGv cpu_PR[16];
+static TCGv cc_x;
+static TCGv cc_src;
+static TCGv cc_dest;
+static TCGv cc_result;
+static TCGv cc_op;
+static TCGv cc_size;
+static TCGv cc_mask;
+
+static TCGv env_btaken;
+static TCGv env_btarget;
+static TCGv env_pc;
+
+#include "exec/gen-icount.h"
+
+/* This is the state at translation time. */
+typedef struct DisasContext {
+ DisasContextBase base;
+
+ CRISCPU *cpu;
+ target_ulong pc, ppc;
+
+ /* Decoder. */
+ unsigned int (*decoder)(CPUCRISState *env, struct DisasContext *dc);
+ uint32_t ir;
+ uint32_t opcode;
+ unsigned int op1;
+ unsigned int op2;
+ unsigned int zsize, zzsize;
+ unsigned int mode;
+ unsigned int postinc;
+
+ unsigned int size;
+ unsigned int src;
+ unsigned int dst;
+ unsigned int cond;
+
+ int update_cc;
+ int cc_op;
+ int cc_size;
+ uint32_t cc_mask;
+
+ int cc_size_uptodate; /* -1 invalid or last written value. */
+
+ int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not up-to-date. */
+ int flags_uptodate; /* Whether or not $ccs is up-to-date. */
+ int flags_x;
+
+ int clear_x; /* Clear x after this insn? */
+ int clear_prefix; /* Clear prefix after this insn? */
+ int clear_locked_irq; /* Clear the irq lockout. */
+ int cpustate_changed;
+ unsigned int tb_flags; /* tb dependent flags. */
+
+#define JMP_NOJMP 0
+#define JMP_DIRECT 1
+#define JMP_DIRECT_CC 2
+#define JMP_INDIRECT 3
+ int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
+ uint32_t jmp_pc;
+
+ int delayed_branch;
+} DisasContext;
+
+static void gen_BUG(DisasContext *dc, const char *file, int line)
+{
+ cpu_abort(CPU(dc->cpu), "%s:%d pc=%x\n", file, line, dc->pc);
+}
+
+static const char * const regnames_v32[] =
+{
+ "$r0", "$r1", "$r2", "$r3",
+ "$r4", "$r5", "$r6", "$r7",
+ "$r8", "$r9", "$r10", "$r11",
+ "$r12", "$r13", "$sp", "$acr",
+};
+
+static const char * const pregnames_v32[] =
+{
+ "$bz", "$vr", "$pid", "$srs",
+ "$wz", "$exs", "$eda", "$mof",
+ "$dz", "$ebp", "$erp", "$srp",
+ "$nrp", "$ccs", "$usp", "$spc",
+};
+
+/* We need this table to handle preg-moves with implicit width. */
+static const int preg_sizes[] = {
+ 1, /* bz. */
+ 1, /* vr. */
+ 4, /* pid. */
+ 1, /* srs. */
+ 2, /* wz. */
+ 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+};
+
+#define t_gen_mov_TN_env(tn, member) \
+ tcg_gen_ld_tl(tn, cpu_env, offsetof(CPUCRISState, member))
+#define t_gen_mov_env_TN(member, tn) \
+ tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member))
+#define t_gen_movi_env_TN(member, c) \
+ do { \
+ TCGv tc = tcg_const_tl(c); \
+ t_gen_mov_env_TN(member, tc); \
+ tcg_temp_free(tc); \
+ } while (0)
+
+static inline void t_gen_mov_TN_preg(TCGv tn, int r)
+{
+ assert(r >= 0 && r <= 15);
+ if (r == PR_BZ || r == PR_WZ || r == PR_DZ) {
+ tcg_gen_movi_tl(tn, 0);
+ } else if (r == PR_VR) {
+ tcg_gen_movi_tl(tn, 32);
+ } else {
+ tcg_gen_mov_tl(tn, cpu_PR[r]);
+ }
+}
+static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
+{
+ assert(r >= 0 && r <= 15);
+ if (r == PR_BZ || r == PR_WZ || r == PR_DZ) {
+ return;
+ } else if (r == PR_SRS) {
+ tcg_gen_andi_tl(cpu_PR[r], tn, 3);
+ } else {
+ if (r == PR_PID) {
+ gen_helper_tlb_flush_pid(cpu_env, tn);
+ }
+ if (dc->tb_flags & S_FLAG && r == PR_SPC) {
+ gen_helper_spc_write(cpu_env, tn);
+ } else if (r == PR_CCS) {
+ dc->cpustate_changed = 1;
+ }
+ tcg_gen_mov_tl(cpu_PR[r], tn);
+ }
+}
+
+/* Sign extend at translation time. */
+static int sign_extend(unsigned int val, unsigned int width)
+{
+ int sval;
+
+ /* LSL. */
+ val <<= 31 - width;
+ sval = val;
+ /* ASR. */
+ sval >>= 31 - width;
+ return sval;
+}
+
+static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
+ unsigned int size, unsigned int sign)
+{
+ int r;
+
+ switch (size) {
+ case 4:
+ {
+ r = cpu_ldl_code(env, addr);
+ break;
+ }
+ case 2:
+ {
+ if (sign) {
+ r = cpu_ldsw_code(env, addr);
+ } else {
+ r = cpu_lduw_code(env, addr);
+ }
+ break;
+ }
+ case 1:
+ {
+ if (sign) {
+ r = cpu_ldsb_code(env, addr);
+ } else {
+ r = cpu_ldub_code(env, addr);
+ }
+ break;
+ }
+ default:
+ cpu_abort(CPU(dc->cpu), "Invalid fetch size %d\n", size);
+ break;
+ }
+ return r;
+}
+
+static void cris_lock_irq(DisasContext *dc)
+{
+ dc->clear_locked_irq = 0;
+ t_gen_movi_env_TN(locked_irq, 1);
+}
+
+static inline void t_gen_raise_exception(uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t0, t_31;
+
+ t0 = tcg_temp_new();
+ t_31 = tcg_const_tl(31);
+ tcg_gen_shl_tl(d, a, b);
+
+ tcg_gen_sub_tl(t0, t_31, b);
+ tcg_gen_sar_tl(t0, t0, t_31);
+ tcg_gen_and_tl(t0, t0, d);
+ tcg_gen_xor_tl(d, d, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t_31);
+}
+
+static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t0, t_31;
+
+ t0 = tcg_temp_new();
+ t_31 = tcg_temp_new();
+ tcg_gen_shr_tl(d, a, b);
+
+ tcg_gen_movi_tl(t_31, 31);
+ tcg_gen_sub_tl(t0, t_31, b);
+ tcg_gen_sar_tl(t0, t0, t_31);
+ tcg_gen_and_tl(t0, t0, d);
+ tcg_gen_xor_tl(d, d, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t_31);
+}
+
+static void t_gen_asr(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t0, t_31;
+
+ t0 = tcg_temp_new();
+ t_31 = tcg_temp_new();
+ tcg_gen_sar_tl(d, a, b);
+
+ tcg_gen_movi_tl(t_31, 31);
+ tcg_gen_sub_tl(t0, t_31, b);
+ tcg_gen_sar_tl(t0, t0, t_31);
+ tcg_gen_or_tl(d, d, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t_31);
+}
+
+static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t = tcg_temp_new();
+
+ /*
+ * d <<= 1
+ * if (d >= s)
+ * d -= s;
+ */
+ tcg_gen_shli_tl(d, a, 1);
+ tcg_gen_sub_tl(t, d, b);
+ tcg_gen_movcond_tl(TCG_COND_GEU, d, d, b, t, d);
+ tcg_temp_free(t);
+}
+
+static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
+{
+ TCGv t;
+
+ /*
+ * d <<= 1
+ * if (n)
+ * d += s;
+ */
+ t = tcg_temp_new();
+ tcg_gen_shli_tl(d, a, 1);
+ tcg_gen_shli_tl(t, ccs, 31 - 3);
+ tcg_gen_sari_tl(t, t, 31);
+ tcg_gen_and_tl(t, t, b);
+ tcg_gen_add_tl(d, d, t);
+ tcg_temp_free(t);
+}
+
+/* Extended arithmetics on CRIS. */
+static inline void t_gen_add_flag(TCGv d, int flag)
+{
+ TCGv c;
+
+ c = tcg_temp_new();
+ t_gen_mov_TN_preg(c, PR_CCS);
+ /* Propagate carry into d. */
+ tcg_gen_andi_tl(c, c, 1 << flag);
+ if (flag) {
+ tcg_gen_shri_tl(c, c, flag);
+ }
+ tcg_gen_add_tl(d, d, c);
+ tcg_temp_free(c);
+}
+
+static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
+{
+ if (dc->flags_x) {
+ TCGv c = tcg_temp_new();
+
+ t_gen_mov_TN_preg(c, PR_CCS);
+ /* C flag is already at bit 0. */
+ tcg_gen_andi_tl(c, c, C_FLAG);
+ tcg_gen_add_tl(d, d, c);
+ tcg_temp_free(c);
+ }
+}
+
+static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
+{
+ if (dc->flags_x) {
+ TCGv c = tcg_temp_new();
+
+ t_gen_mov_TN_preg(c, PR_CCS);
+ /* C flag is already at bit 0. */
+ tcg_gen_andi_tl(c, c, C_FLAG);
+ tcg_gen_sub_tl(d, d, c);
+ tcg_temp_free(c);
+ }
+}
+
+/* Swap the two bytes within each half word of the s operand.
+ T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
+static inline void t_gen_swapb(TCGv d, TCGv s)
+{
+ TCGv t, org_s;
+
+ t = tcg_temp_new();
+ org_s = tcg_temp_new();
+
+ /* d and s may refer to the same object. */
+ tcg_gen_mov_tl(org_s, s);
+ tcg_gen_shli_tl(t, org_s, 8);
+ tcg_gen_andi_tl(d, t, 0xff00ff00);
+ tcg_gen_shri_tl(t, org_s, 8);
+ tcg_gen_andi_tl(t, t, 0x00ff00ff);
+ tcg_gen_or_tl(d, d, t);
+ tcg_temp_free(t);
+ tcg_temp_free(org_s);
+}
+
+/* Swap the halfwords of the s operand. */
+static inline void t_gen_swapw(TCGv d, TCGv s)
+{
+ TCGv t;
+ /* d and s refer the same object. */
+ t = tcg_temp_new();
+ tcg_gen_mov_tl(t, s);
+ tcg_gen_shli_tl(d, t, 16);
+ tcg_gen_shri_tl(t, t, 16);
+ tcg_gen_or_tl(d, d, t);
+ tcg_temp_free(t);
+}
+
+/* Reverse the within each byte.
+ T0 = (((T0 << 7) & 0x80808080) |
+ ((T0 << 5) & 0x40404040) |
+ ((T0 << 3) & 0x20202020) |
+ ((T0 << 1) & 0x10101010) |
+ ((T0 >> 1) & 0x08080808) |
+ ((T0 >> 3) & 0x04040404) |
+ ((T0 >> 5) & 0x02020202) |
+ ((T0 >> 7) & 0x01010101));
+ */
+static void t_gen_swapr(TCGv d, TCGv s)
+{
+ static const struct {
+ int shift; /* LSL when positive, LSR when negative. */
+ uint32_t mask;
+ } bitrev[] = {
+ {7, 0x80808080},
+ {5, 0x40404040},
+ {3, 0x20202020},
+ {1, 0x10101010},
+ {-1, 0x08080808},
+ {-3, 0x04040404},
+ {-5, 0x02020202},
+ {-7, 0x01010101}
+ };
+ int i;
+ TCGv t, org_s;
+
+ /* d and s refer the same object. */
+ t = tcg_temp_new();
+ org_s = tcg_temp_new();
+ tcg_gen_mov_tl(org_s, s);
+
+ tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
+ tcg_gen_andi_tl(d, t, bitrev[0].mask);
+ for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
+ if (bitrev[i].shift >= 0) {
+ tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
+ } else {
+ tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
+ }
+ tcg_gen_andi_tl(t, t, bitrev[i].mask);
+ tcg_gen_or_tl(d, d, t);
+ }
+ tcg_temp_free(t);
+ tcg_temp_free(org_s);
+}
+
+static bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+ return translator_use_goto_tb(&dc->base, dest);
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (use_goto_tb(dc, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(env_pc, dest);
+ tcg_gen_exit_tb(dc->base.tb, n);
+ } else {
+ tcg_gen_movi_tl(env_pc, dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+static inline void cris_clear_x_flag(DisasContext *dc)
+{
+ if (dc->flags_x) {
+ dc->flags_uptodate = 0;
+ }
+ dc->flags_x = 0;
+}
+
+static void cris_flush_cc_state(DisasContext *dc)
+{
+ if (dc->cc_size_uptodate != dc->cc_size) {
+ tcg_gen_movi_tl(cc_size, dc->cc_size);
+ dc->cc_size_uptodate = dc->cc_size;
+ }
+ tcg_gen_movi_tl(cc_op, dc->cc_op);
+ tcg_gen_movi_tl(cc_mask, dc->cc_mask);
+}
+
+static void cris_evaluate_flags(DisasContext *dc)
+{
+ if (dc->flags_uptodate) {
+ return;
+ }
+
+ cris_flush_cc_state(dc);
+
+ switch (dc->cc_op) {
+ case CC_OP_MCP:
+ gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_src,
+ cc_dest, cc_result);
+ break;
+ case CC_OP_MULS:
+ gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_result,
+ cpu_PR[PR_MOF]);
+ break;
+ case CC_OP_MULU:
+ gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_result,
+ cpu_PR[PR_MOF]);
+ break;
+ case CC_OP_MOVE:
+ case CC_OP_AND:
+ case CC_OP_OR:
+ case CC_OP_XOR:
+ case CC_OP_ASR:
+ case CC_OP_LSR:
+ case CC_OP_LSL:
+ switch (dc->cc_size) {
+ case 4:
+ gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
+ cpu_env, cpu_PR[PR_CCS], cc_result);
+ break;
+ case 2:
+ gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
+ cpu_env, cpu_PR[PR_CCS], cc_result);
+ break;
+ default:
+ gen_helper_evaluate_flags(cpu_env);
+ break;
+ }
+ break;
+ case CC_OP_FLAGS:
+ /* live. */
+ break;
+ case CC_OP_SUB:
+ case CC_OP_CMP:
+ if (dc->cc_size == 4) {
+ gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
+ } else {
+ gen_helper_evaluate_flags(cpu_env);
+ }
+
+ break;
+ default:
+ switch (dc->cc_size) {
+ case 4:
+ gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
+ break;
+ default:
+ gen_helper_evaluate_flags(cpu_env);
+ break;
+ }
+ break;
+ }
+
+ if (dc->flags_x) {
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
+ } else if (dc->cc_op == CC_OP_FLAGS) {
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
+ }
+ dc->flags_uptodate = 1;
+}
+
+static void cris_cc_mask(DisasContext *dc, unsigned int mask)
+{
+ uint32_t ovl;
+
+ if (!mask) {
+ dc->update_cc = 0;
+ return;
+ }
+
+ /* Check if we need to evaluate the condition codes due to
+ CC overlaying. */
+ ovl = (dc->cc_mask ^ mask) & ~mask;
+ if (ovl) {
+ /* TODO: optimize this case. It trigs all the time. */
+ cris_evaluate_flags(dc);
+ }
+ dc->cc_mask = mask;
+ dc->update_cc = 1;
+}
+
+static void cris_update_cc_op(DisasContext *dc, int op, int size)
+{
+ dc->cc_op = op;
+ dc->cc_size = size;
+ dc->flags_uptodate = 0;
+}
+
+static inline void cris_update_cc_x(DisasContext *dc)
+{
+ /* Save the x flag state at the time of the cc snapshot. */
+ if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
+ return;
+ }
+ tcg_gen_movi_tl(cc_x, dc->flags_x);
+ dc->cc_x_uptodate = 2 | dc->flags_x;
+}
+
+/* Update cc prior to executing ALU op. Needs source operands untouched. */
+static void cris_pre_alu_update_cc(DisasContext *dc, int op,
+ TCGv dst, TCGv src, int size)
+{
+ if (dc->update_cc) {
+ cris_update_cc_op(dc, op, size);
+ tcg_gen_mov_tl(cc_src, src);
+
+ if (op != CC_OP_MOVE
+ && op != CC_OP_AND
+ && op != CC_OP_OR
+ && op != CC_OP_XOR
+ && op != CC_OP_ASR
+ && op != CC_OP_LSR
+ && op != CC_OP_LSL) {
+ tcg_gen_mov_tl(cc_dest, dst);
+ }
+
+ cris_update_cc_x(dc);
+ }
+}
+
+/* Update cc after executing ALU op. needs the result. */
+static inline void cris_update_result(DisasContext *dc, TCGv res)
+{
+ if (dc->update_cc) {
+ tcg_gen_mov_tl(cc_result, res);
+ }
+}
+
+/* Returns one if the write back stage should execute. */
+static void cris_alu_op_exec(DisasContext *dc, int op,
+ TCGv dst, TCGv a, TCGv b, int size)
+{
+ /* Emit the ALU insns. */
+ switch (op) {
+ case CC_OP_ADD:
+ tcg_gen_add_tl(dst, a, b);
+ /* Extended arithmetics. */
+ t_gen_addx_carry(dc, dst);
+ break;
+ case CC_OP_ADDC:
+ tcg_gen_add_tl(dst, a, b);
+ t_gen_add_flag(dst, 0); /* C_FLAG. */
+ break;
+ case CC_OP_MCP:
+ tcg_gen_add_tl(dst, a, b);
+ t_gen_add_flag(dst, 8); /* R_FLAG. */
+ break;
+ case CC_OP_SUB:
+ tcg_gen_sub_tl(dst, a, b);
+ /* Extended arithmetics. */
+ t_gen_subx_carry(dc, dst);
+ break;
+ case CC_OP_MOVE:
+ tcg_gen_mov_tl(dst, b);
+ break;
+ case CC_OP_OR:
+ tcg_gen_or_tl(dst, a, b);
+ break;
+ case CC_OP_AND:
+ tcg_gen_and_tl(dst, a, b);
+ break;
+ case CC_OP_XOR:
+ tcg_gen_xor_tl(dst, a, b);
+ break;
+ case CC_OP_LSL:
+ t_gen_lsl(dst, a, b);
+ break;
+ case CC_OP_LSR:
+ t_gen_lsr(dst, a, b);
+ break;
+ case CC_OP_ASR:
+ t_gen_asr(dst, a, b);
+ break;
+ case CC_OP_NEG:
+ tcg_gen_neg_tl(dst, b);
+ /* Extended arithmetics. */
+ t_gen_subx_carry(dc, dst);
+ break;
+ case CC_OP_LZ:
+ tcg_gen_clzi_tl(dst, b, TARGET_LONG_BITS);
+ break;
+ case CC_OP_MULS:
+ tcg_gen_muls2_tl(dst, cpu_PR[PR_MOF], a, b);
+ break;
+ case CC_OP_MULU:
+ tcg_gen_mulu2_tl(dst, cpu_PR[PR_MOF], a, b);
+ break;
+ case CC_OP_DSTEP:
+ t_gen_cris_dstep(dst, a, b);
+ break;
+ case CC_OP_MSTEP:
+ t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
+ break;
+ case CC_OP_BOUND:
+ tcg_gen_movcond_tl(TCG_COND_LEU, dst, a, b, a, b);
+ break;
+ case CC_OP_CMP:
+ tcg_gen_sub_tl(dst, a, b);
+ /* Extended arithmetics. */
+ t_gen_subx_carry(dc, dst);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "illegal ALU op.\n");
+ BUG();
+ break;
+ }
+
+ if (size == 1) {
+ tcg_gen_andi_tl(dst, dst, 0xff);
+ } else if (size == 2) {
+ tcg_gen_andi_tl(dst, dst, 0xffff);
+ }
+}
+
+static void cris_alu(DisasContext *dc, int op,
+ TCGv d, TCGv op_a, TCGv op_b, int size)
+{
+ TCGv tmp;
+ int writeback;
+
+ writeback = 1;
+
+ if (op == CC_OP_CMP) {
+ tmp = tcg_temp_new();
+ writeback = 0;
+ } else if (size == 4) {
+ tmp = d;
+ writeback = 0;
+ } else {
+ tmp = tcg_temp_new();
+ }
+
+
+ cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
+ cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
+ cris_update_result(dc, tmp);
+
+ /* Writeback. */
+ if (writeback) {
+ if (size == 1) {
+ tcg_gen_andi_tl(d, d, ~0xff);
+ } else {
+ tcg_gen_andi_tl(d, d, ~0xffff);
+ }
+ tcg_gen_or_tl(d, d, tmp);
+ }
+ if (tmp != d) {
+ tcg_temp_free(tmp);
+ }
+}
+
+static int arith_cc(DisasContext *dc)
+{
+ if (dc->update_cc) {
+ switch (dc->cc_op) {
+ case CC_OP_ADDC: return 1;
+ case CC_OP_ADD: return 1;
+ case CC_OP_SUB: return 1;
+ case CC_OP_DSTEP: return 1;
+ case CC_OP_LSL: return 1;
+ case CC_OP_LSR: return 1;
+ case CC_OP_ASR: return 1;
+ case CC_OP_CMP: return 1;
+ case CC_OP_NEG: return 1;
+ case CC_OP_OR: return 1;
+ case CC_OP_AND: return 1;
+ case CC_OP_XOR: return 1;
+ case CC_OP_MULU: return 1;
+ case CC_OP_MULS: return 1;
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
+{
+ int arith_opt, move_opt;
+
+ /* TODO: optimize more condition codes. */
+
+ /*
+ * If the flags are live, we've gotta look into the bits of CCS.
+ * Otherwise, if we just did an arithmetic operation we try to
+ * evaluate the condition code faster.
+ *
+ * When this function is done, T0 should be non-zero if the condition
+ * code is true.
+ */
+ arith_opt = arith_cc(dc) && !dc->flags_uptodate;
+ move_opt = (dc->cc_op == CC_OP_MOVE);
+ switch (cond) {
+ case CC_EQ:
+ if ((arith_opt || move_opt)
+ && dc->cc_x_uptodate != (2 | X_FLAG)) {
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cc, cc_result, 0);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc,
+ cpu_PR[PR_CCS], Z_FLAG);
+ }
+ break;
+ case CC_NE:
+ if ((arith_opt || move_opt)
+ && dc->cc_x_uptodate != (2 | X_FLAG)) {
+ tcg_gen_mov_tl(cc, cc_result);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
+ Z_FLAG);
+ tcg_gen_andi_tl(cc, cc, Z_FLAG);
+ }
+ break;
+ case CC_CS:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
+ break;
+ case CC_CC:
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
+ tcg_gen_andi_tl(cc, cc, C_FLAG);
+ break;
+ case CC_VS:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
+ break;
+ case CC_VC:
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
+ V_FLAG);
+ tcg_gen_andi_tl(cc, cc, V_FLAG);
+ break;
+ case CC_PL:
+ if (arith_opt || move_opt) {
+ int bits = 31;
+
+ if (dc->cc_size == 1) {
+ bits = 7;
+ } else if (dc->cc_size == 2) {
+ bits = 15;
+ }
+
+ tcg_gen_shri_tl(cc, cc_result, bits);
+ tcg_gen_xori_tl(cc, cc, 1);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
+ N_FLAG);
+ tcg_gen_andi_tl(cc, cc, N_FLAG);
+ }
+ break;
+ case CC_MI:
+ if (arith_opt || move_opt) {
+ int bits = 31;
+
+ if (dc->cc_size == 1) {
+ bits = 7;
+ } else if (dc->cc_size == 2) {
+ bits = 15;
+ }
+
+ tcg_gen_shri_tl(cc, cc_result, bits);
+ tcg_gen_andi_tl(cc, cc, 1);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
+ N_FLAG);
+ }
+ break;
+ case CC_LS:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
+ C_FLAG | Z_FLAG);
+ break;
+ case CC_HI:
+ cris_evaluate_flags(dc);
+ {
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+ tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
+ C_FLAG | Z_FLAG);
+ /* Overlay the C flag on top of the Z. */
+ tcg_gen_shli_tl(cc, tmp, 2);
+ tcg_gen_and_tl(cc, tmp, cc);
+ tcg_gen_andi_tl(cc, cc, Z_FLAG);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+ case CC_GE:
+ cris_evaluate_flags(dc);
+ /* Overlay the V flag on top of the N. */
+ tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
+ tcg_gen_xor_tl(cc,
+ cpu_PR[PR_CCS], cc);
+ tcg_gen_andi_tl(cc, cc, N_FLAG);
+ tcg_gen_xori_tl(cc, cc, N_FLAG);
+ break;
+ case CC_LT:
+ cris_evaluate_flags(dc);
+ /* Overlay the V flag on top of the N. */
+ tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
+ tcg_gen_xor_tl(cc,
+ cpu_PR[PR_CCS], cc);
+ tcg_gen_andi_tl(cc, cc, N_FLAG);
+ break;
+ case CC_GT:
+ cris_evaluate_flags(dc);
+ {
+ TCGv n, z;
+
+ n = tcg_temp_new();
+ z = tcg_temp_new();
+
+ /* To avoid a shift we overlay everything on
+ the V flag. */
+ tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
+ tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
+ /* invert Z. */
+ tcg_gen_xori_tl(z, z, 2);
+
+ tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
+ tcg_gen_xori_tl(n, n, 2);
+ tcg_gen_and_tl(cc, z, n);
+ tcg_gen_andi_tl(cc, cc, 2);
+
+ tcg_temp_free(n);
+ tcg_temp_free(z);
+ }
+ break;
+ case CC_LE:
+ cris_evaluate_flags(dc);
+ {
+ TCGv n, z;
+
+ n = tcg_temp_new();
+ z = tcg_temp_new();
+
+ /* To avoid a shift we overlay everything on
+ the V flag. */
+ tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
+ tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
+
+ tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
+ tcg_gen_or_tl(cc, z, n);
+ tcg_gen_andi_tl(cc, cc, 2);
+
+ tcg_temp_free(n);
+ tcg_temp_free(z);
+ }
+ break;
+ case CC_P:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
+ break;
+ case CC_A:
+ tcg_gen_movi_tl(cc, 1);
+ break;
+ default:
+ BUG();
+ break;
+ };
+}
+
+static void cris_store_direct_jmp(DisasContext *dc)
+{
+ /* Store the direct jmp state into the cpu-state. */
+ if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
+ if (dc->jmp == JMP_DIRECT) {
+ tcg_gen_movi_tl(env_btaken, 1);
+ }
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ dc->jmp = JMP_INDIRECT;
+ }
+}
+
+static void cris_prepare_cc_branch (DisasContext *dc,
+ int offset, int cond)
+{
+ /* This helps us re-schedule the micro-code to insns in delay-slots
+ before the actual jump. */
+ dc->delayed_branch = 2;
+ dc->jmp = JMP_DIRECT_CC;
+ dc->jmp_pc = dc->pc + offset;
+
+ gen_tst_cc(dc, env_btaken, cond);
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+}
+
+
+/* jumps, when the dest is in a live reg for example. Direct should be set
+ when the dest addr is constant to allow tb chaining. */
+static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
+{
+ /* This helps us re-schedule the micro-code to insns in delay-slots
+ before the actual jump. */
+ dc->delayed_branch = 2;
+ dc->jmp = type;
+ if (type == JMP_INDIRECT) {
+ tcg_gen_movi_tl(env_btaken, 1);
+ }
+}
+
+static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+ tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEQ);
+}
+
+static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
+ unsigned int size, int sign)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+ tcg_gen_qemu_ld_tl(dst, addr, mem_index,
+ MO_TE + ctz32(size) + (sign ? MO_SIGN : 0));
+}
+
+static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
+ unsigned int size)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+
+ /* Conditional writes. We only support the kind were X and P are known
+ at translation time. */
+ if (dc->flags_x && (dc->tb_flags & P_FLAG)) {
+ dc->postinc = 0;
+ cris_evaluate_flags(dc);
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
+ return;
+ }
+
+ tcg_gen_qemu_st_tl(val, addr, mem_index, MO_TE + ctz32(size));
+
+ if (dc->flags_x) {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
+ }
+}
+
+static inline void t_gen_sext(TCGv d, TCGv s, int size)
+{
+ if (size == 1) {
+ tcg_gen_ext8s_i32(d, s);
+ } else if (size == 2) {
+ tcg_gen_ext16s_i32(d, s);
+ } else {
+ tcg_gen_mov_tl(d, s);
+ }
+}
+
+static inline void t_gen_zext(TCGv d, TCGv s, int size)
+{
+ if (size == 1) {
+ tcg_gen_ext8u_i32(d, s);
+ } else if (size == 2) {
+ tcg_gen_ext16u_i32(d, s);
+ } else {
+ tcg_gen_mov_tl(d, s);
+ }
+}
+
+#if DISAS_CRIS
+static char memsize_char(int size)
+{
+ switch (size) {
+ case 1: return 'b';
+ case 2: return 'w';
+ case 4: return 'd';
+ default:
+ return 'x';
+ }
+}
+#endif
+
+static inline unsigned int memsize_z(DisasContext *dc)
+{
+ return dc->zsize + 1;
+}
+
+static inline unsigned int memsize_zz(DisasContext *dc)
+{
+ switch (dc->zzsize) {
+ case 0: return 1;
+ case 1: return 2;
+ default:
+ return 4;
+ }
+}
+
+static inline void do_postinc (DisasContext *dc, int size)
+{
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
+ }
+}
+
+static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
+ int size, int s_ext, TCGv dst)
+{
+ if (s_ext) {
+ t_gen_sext(dst, cpu_R[rs], size);
+ } else {
+ t_gen_zext(dst, cpu_R[rs], size);
+ }
+}
+
+/* Prepare T0 and T1 for a register alu operation.
+ s_ext decides if the operand1 should be sign-extended or zero-extended when
+ needed. */
+static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
+ int size, int s_ext, TCGv dst, TCGv src)
+{
+ dec_prep_move_r(dc, rs, rd, size, s_ext, src);
+
+ if (s_ext) {
+ t_gen_sext(dst, cpu_R[rd], size);
+ } else {
+ t_gen_zext(dst, cpu_R[rd], size);
+ }
+}
+
+static int dec_prep_move_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst)
+{
+ unsigned int rs;
+ uint32_t imm;
+ int is_imm;
+ int insn_len = 2;
+
+ rs = dc->op1;
+ is_imm = rs == 15 && dc->postinc;
+
+ /* Load [$rs] onto T1. */
+ if (is_imm) {
+ insn_len = 2 + memsize;
+ if (memsize == 1) {
+ insn_len++;
+ }
+
+ imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext);
+ tcg_gen_movi_tl(dst, imm);
+ dc->postinc = 0;
+ } else {
+ cris_flush_cc_state(dc);
+ gen_load(dc, dst, cpu_R[rs], memsize, 0);
+ if (s_ext) {
+ t_gen_sext(dst, dst, memsize);
+ } else {
+ t_gen_zext(dst, dst, memsize);
+ }
+ }
+ return insn_len;
+}
+
+/* Prepare T0 and T1 for a memory + alu operation.
+ s_ext decides if the operand1 should be sign-extended or zero-extended when
+ needed. */
+static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst, TCGv src)
+{
+ int insn_len;
+
+ insn_len = dec_prep_move_m(env, dc, s_ext, memsize, src);
+ tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
+ return insn_len;
+}
+
+#if DISAS_CRIS
+static const char *cc_name(int cc)
+{
+ static const char * const cc_names[16] = {
+ "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
+ "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
+ };
+ assert(cc < 16);
+ return cc_names[cc];
+}
+#endif
+
+/* Start of insn decoders. */
+
+static int dec_bccq(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t offset;
+ int sign;
+ uint32_t cond = dc->op2;
+
+ offset = EXTRACT_FIELD(dc->ir, 1, 7);
+ sign = EXTRACT_FIELD(dc->ir, 0, 0);
+
+ offset *= 2;
+ offset |= sign << 8;
+ offset = sign_extend(offset, 8);
+
+ LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
+
+ /* op2 holds the condition-code. */
+ cris_cc_mask(dc, 0);
+ cris_prepare_cc_branch(dc, offset, cond);
+ return 2;
+}
+static int dec_addoq(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t imm;
+
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
+ imm = sign_extend(dc->op1, 7);
+
+ LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Fetch register operand, */
+ tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
+
+ return 2;
+}
+static int dec_addq(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c;
+ LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
+
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ c = tcg_const_tl(dc->op1);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
+ tcg_temp_free(c);
+ return 2;
+}
+static int dec_moveq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+ LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
+
+ tcg_gen_movi_tl(cpu_R[dc->op2], imm);
+ return 2;
+}
+static int dec_subq(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+
+ LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(dc->op1);
+ cris_alu(dc, CC_OP_SUB,
+ cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
+ tcg_temp_free(c);
+ return 2;
+}
+static int dec_cmpq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ TCGv c;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+
+ LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ c = tcg_const_tl(imm);
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
+ tcg_temp_free(c);
+ return 2;
+}
+static int dec_andq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ TCGv c;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+
+ LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ c = tcg_const_tl(imm);
+ cris_alu(dc, CC_OP_AND,
+ cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
+ tcg_temp_free(c);
+ return 2;
+}
+static int dec_orq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ TCGv c;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+ LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ c = tcg_const_tl(imm);
+ cris_alu(dc, CC_OP_OR,
+ cpu_R[dc->op2], cpu_R[dc->op2], c, 4);
+ tcg_temp_free(c);
+ return 2;
+}
+static int dec_btstq(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ c = tcg_const_tl(dc->op1);
+ cris_evaluate_flags(dc);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
+ c, cpu_PR[PR_CCS]);
+ tcg_temp_free(c);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->flags_uptodate = 1;
+ return 2;
+}
+static int dec_asrq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+static int dec_lslq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+static int dec_lsrq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+
+static int dec_move_r(CPUCRISState *env, DisasContext *dc)
+{
+ int size = memsize_zz(dc);
+
+ LOG_DIS("move.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ if (size == 4) {
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_update_cc_op(dc, CC_OP_MOVE, 4);
+ cris_update_cc_x(dc);
+ cris_update_result(dc, cpu_R[dc->op2]);
+ } else {
+ TCGv t0;
+
+ t0 = tcg_temp_new();
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], t0, size);
+ tcg_temp_free(t0);
+ }
+ return 2;
+}
+
+static int dec_scc_r(CPUCRISState *env, DisasContext *dc)
+{
+ int cond = dc->op2;
+
+ LOG_DIS("s%s $r%u\n",
+ cc_name(cond), dc->op1);
+
+ gen_tst_cc(dc, cpu_R[dc->op1], cond);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->op1], cpu_R[dc->op1], 0);
+
+ cris_cc_mask(dc, 0);
+ return 2;
+}
+
+static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
+{
+ if (size == 4) {
+ t[0] = cpu_R[dc->op2];
+ t[1] = cpu_R[dc->op1];
+ } else {
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+ }
+}
+
+static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
+{
+ if (size != 4) {
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+ }
+}
+
+static int dec_and_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("and.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_lz_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("lz $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
+ cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_lsl_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("lsl.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_lsr_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("lsr.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_asr_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("asr.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_muls_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("muls.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZV);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_mulu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("mulu.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZV);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+
+static int dec_dstep_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_DSTEP,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
+ return 2;
+}
+
+static int dec_xor_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("xor.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ BUG_ON(size != 4); /* xor is dword. */
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_bound_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv l0;
+ int size = memsize_zz(dc);
+ LOG_DIS("bound.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ l0 = tcg_temp_local_new();
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
+ tcg_temp_free(l0);
+ return 2;
+}
+
+static int dec_cmp_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("cmp.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_abs_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("abs $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_abs_tl(cpu_R[dc->op2], cpu_R[dc->op1]);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+
+static int dec_add_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("add.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_addc_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("addc $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_evaluate_flags(dc);
+
+ /* Set for this insn. */
+ dc->flags_x = X_FLAG;
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADDC,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
+ return 2;
+}
+
+static int dec_mcp_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("mcp $p%u, $r%u\n",
+ dc->op2, dc->op1);
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, CC_MASK_RNZV);
+ cris_alu(dc, CC_OP_MCP,
+ cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
+ return 2;
+}
+
+#if DISAS_CRIS
+static char * swapmode_name(int mode, char *modename) {
+ int i = 0;
+ if (mode & 8) {
+ modename[i++] = 'n';
+ }
+ if (mode & 4) {
+ modename[i++] = 'w';
+ }
+ if (mode & 2) {
+ modename[i++] = 'b';
+ }
+ if (mode & 1) {
+ modename[i++] = 'r';
+ }
+ modename[i++] = 0;
+ return modename;
+}
+#endif
+
+static int dec_swap_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+#if DISAS_CRIS
+ char modename[4];
+#endif
+ LOG_DIS("swap%s $r%u\n",
+ swapmode_name(dc->op2, modename), dc->op1);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(t0, cpu_R[dc->op1]);
+ if (dc->op2 & 8) {
+ tcg_gen_not_tl(t0, t0);
+ }
+ if (dc->op2 & 4) {
+ t_gen_swapw(t0, t0);
+ }
+ if (dc->op2 & 2) {
+ t_gen_swapb(t0, t0);
+ }
+ if (dc->op2 & 1) {
+ t_gen_swapr(t0, t0);
+ }
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_or_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("or.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_addi_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("addi.%c $r%u, $r%u\n",
+ memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+ t0 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize);
+ tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_addi_acr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
+ memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+ t0 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize);
+ tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_neg_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("neg.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_btst_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("btst $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_evaluate_flags(dc);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
+ cpu_R[dc->op1], cpu_PR[PR_CCS]);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->flags_uptodate = 1;
+ return 2;
+}
+
+static int dec_sub_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("sub.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+/* Zero extension. From size to dword. */
+static int dec_movu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("movu.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Sign extension. From size to dword. */
+static int dec_movs_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("movs.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_sext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* zero extension. From size to dword. */
+static int dec_addu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("addu.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_zext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Sign extension. From size to dword. */
+static int dec_adds_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("adds.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_sext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Zero extension. From size to dword. */
+static int dec_subu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("subu.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_zext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_SUB,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Sign extension. From size to dword. */
+static int dec_subs_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("subs.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_sext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_SUB,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t flags;
+ int set = (~dc->opcode >> 2) & 1;
+
+
+ flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
+ | EXTRACT_FIELD(dc->ir, 0, 3);
+ if (set && flags == 0) {
+ LOG_DIS("nop\n");
+ return 2;
+ } else if (!set && (flags & 0x20)) {
+ LOG_DIS("di\n");
+ } else {
+ LOG_DIS("%sf %x\n", set ? "set" : "clr", flags);
+ }
+
+ /* User space is not allowed to touch these. Silently ignore. */
+ if (dc->tb_flags & U_FLAG) {
+ flags &= ~(S_FLAG | I_FLAG | U_FLAG);
+ }
+
+ if (flags & X_FLAG) {
+ if (set) {
+ dc->flags_x = X_FLAG;
+ } else {
+ dc->flags_x = 0;
+ }
+ }
+
+ /* Break the TB if any of the SPI flag changes. */
+ if (flags & (P_FLAG | S_FLAG)) {
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ dc->base.is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = 1;
+ }
+
+ /* For the I flag, only act on posedge. */
+ if ((flags & I_FLAG)) {
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ dc->base.is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = 1;
+ }
+
+
+ /* Simply decode the flags. */
+ cris_evaluate_flags(dc);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ cris_update_cc_x(dc);
+ tcg_gen_movi_tl(cc_op, dc->cc_op);
+
+ if (set) {
+ if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
+ /* Enter user mode. */
+ t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
+ tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
+ dc->cpustate_changed = 1;
+ }
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
+ } else {
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
+ }
+
+ dc->flags_uptodate = 1;
+ dc->clear_x = 0;
+ return 2;
+}
+
+static int dec_move_rs(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c2, c1;
+ LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
+ c1 = tcg_const_tl(dc->op1);
+ c2 = tcg_const_tl(dc->op2);
+ cris_cc_mask(dc, 0);
+ gen_helper_movl_sreg_reg(cpu_env, c2, c1);
+ tcg_temp_free(c1);
+ tcg_temp_free(c2);
+ return 2;
+}
+static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c2, c1;
+ LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
+ c1 = tcg_const_tl(dc->op1);
+ c2 = tcg_const_tl(dc->op2);
+ cris_cc_mask(dc, 0);
+ gen_helper_movl_reg_sreg(cpu_env, c1, c2);
+ tcg_temp_free(c1);
+ tcg_temp_free(c2);
+ return 2;
+}
+
+static int dec_move_rp(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+
+ t[0] = tcg_temp_new();
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ tcg_gen_mov_tl(t[0], cpu_R[dc->op1]);
+ if (dc->tb_flags & U_FLAG) {
+ t[1] = tcg_temp_new();
+ /* User space is not allowed to touch all flags. */
+ tcg_gen_andi_tl(t[0], t[0], 0x39f);
+ tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
+ tcg_gen_or_tl(t[0], t[1], t[0]);
+ tcg_temp_free(t[1]);
+ }
+ } else {
+ tcg_gen_mov_tl(t[0], cpu_R[dc->op1]);
+ }
+
+ t_gen_mov_preg_TN(dc, dc->op2, t[0]);
+ if (dc->op2 == PR_CCS) {
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->flags_uptodate = 1;
+ }
+ tcg_temp_free(t[0]);
+ return 2;
+}
+static int dec_move_pr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+
+ if (dc->op2 == PR_DZ) {
+ tcg_gen_movi_tl(cpu_R[dc->op1], 0);
+ } else {
+ t0 = tcg_temp_new();
+ t_gen_mov_TN_preg(t0, dc->op2);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op1], cpu_R[dc->op1], t0,
+ preg_sizes[dc->op2]);
+ tcg_temp_free(t0);
+ }
+ return 2;
+}
+
+static int dec_move_mr(CPUCRISState *env, DisasContext *dc)
+{
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("move.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ if (memsize == 4) {
+ insn_len = dec_prep_move_m(env, dc, 0, 4, cpu_R[dc->op2]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_update_cc_op(dc, CC_OP_MOVE, 4);
+ cris_update_cc_x(dc);
+ cris_update_result(dc, cpu_R[dc->op2]);
+ } else {
+ TCGv t0;
+
+ t0 = tcg_temp_new();
+ insn_len = dec_prep_move_m(env, dc, 0, memsize, t0);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
+ tcg_temp_free(t0);
+ }
+ do_postinc(dc, memsize);
+ return insn_len;
+}
+
+static inline void cris_alu_m_alloc_temps(TCGv *t)
+{
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+}
+
+static inline void cris_alu_m_free_temps(TCGv *t)
+{
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+}
+
+static int dec_movs_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("movs.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_addu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("addu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_adds_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("adds.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_subu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("subu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_subs_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("subs.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_movu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+
+ LOG_DIS("movu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_cmps_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1],
+ memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_cmp_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1],
+ memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_test_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2], c;
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("test.%c [$r%u%s] op2=%x\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_evaluate_flags(dc);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
+
+ c = tcg_const_tl(0);
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], t[1], c, memsize_zz(dc));
+ tcg_temp_free(c);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_and_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("and.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_add_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("add.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_addo_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("add.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, 0);
+ cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_bound_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv l[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("bound.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ l[0] = tcg_temp_local_new();
+ l[1] = tcg_temp_local_new();
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
+ do_postinc(dc, memsize);
+ tcg_temp_free(l[0]);
+ tcg_temp_free(l[1]);
+ return insn_len;
+}
+
+static int dec_addc_mr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int insn_len = 2;
+ LOG_DIS("addc [$r%u%s, $r%u\n",
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_evaluate_flags(dc);
+
+ /* Set for this insn. */
+ dc->flags_x = X_FLAG;
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, 4, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
+ do_postinc(dc, 4);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_sub_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2, dc->ir, dc->zzsize);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_or_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2, dc->pc);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_OR,
+ cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_move_mp(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len = 2;
+
+ LOG_DIS("move.%c [$r%u%s, $p%u\n",
+ memsize_char(memsize),
+ dc->op1,
+ dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, 0);
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ if (dc->tb_flags & U_FLAG) {
+ /* User space is not allowed to touch all flags. */
+ tcg_gen_andi_tl(t[1], t[1], 0x39f);
+ tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
+ tcg_gen_or_tl(t[1], t[0], t[1]);
+ }
+ }
+
+ t_gen_mov_preg_TN(dc, dc->op2, t[1]);
+
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_move_pm(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int memsize;
+
+ memsize = preg_sizes[dc->op2];
+
+ LOG_DIS("move.%c $p%u, [$r%u%s\n",
+ memsize_char(memsize),
+ dc->op2, dc->op1, dc->postinc ? "+]" : "]");
+
+ /* prepare store. Address in T0, value in T1. */
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+ t0 = tcg_temp_new();
+ t_gen_mov_TN_preg(t0, dc->op2);
+ cris_flush_cc_state(dc);
+ gen_store(dc, cpu_R[dc->op1], t0, memsize);
+ tcg_temp_free(t0);
+
+ cris_cc_mask(dc, 0);
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
+ }
+ return 2;
+}
+
+static int dec_movem_mr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv_i64 tmp[16];
+ TCGv tmp32;
+ TCGv addr;
+ int i;
+ int nr = dc->op2 + 1;
+
+ LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
+ dc->postinc ? "+]" : "]", dc->op2);
+
+ addr = tcg_temp_new();
+ /* There are probably better ways of doing this. */
+ cris_flush_cc_state(dc);
+ for (i = 0; i < (nr >> 1); i++) {
+ tmp[i] = tcg_temp_new_i64();
+ tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
+ gen_load64(dc, tmp[i], addr);
+ }
+ if (nr & 1) {
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
+ gen_load(dc, tmp32, addr, 4, 0);
+ } else {
+ tmp32 = NULL;
+ }
+ tcg_temp_free(addr);
+
+ for (i = 0; i < (nr >> 1); i++) {
+ tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]);
+ tcg_gen_shri_i64(tmp[i], tmp[i], 32);
+ tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
+ tcg_temp_free_i64(tmp[i]);
+ }
+ if (nr & 1) {
+ tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
+ tcg_temp_free(tmp32);
+ }
+
+ /* writeback the updated pointer value. */
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
+ }
+
+ /* gen_load might want to evaluate the previous insns flags. */
+ cris_cc_mask(dc, 0);
+ return 2;
+}
+
+static int dec_movem_rm(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv tmp;
+ TCGv addr;
+ int i;
+
+ LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
+ dc->postinc ? "+]" : "]");
+
+ cris_flush_cc_state(dc);
+
+ tmp = tcg_temp_new();
+ addr = tcg_temp_new();
+ tcg_gen_movi_tl(tmp, 4);
+ tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
+ for (i = 0; i <= dc->op2; i++) {
+ /* Displace addr. */
+ /* Perform the store. */
+ gen_store(dc, addr, cpu_R[i], 4);
+ tcg_gen_add_tl(addr, addr, tmp);
+ }
+ if (dc->postinc) {
+ tcg_gen_mov_tl(cpu_R[dc->op1], addr);
+ }
+ cris_cc_mask(dc, 0);
+ tcg_temp_free(tmp);
+ tcg_temp_free(addr);
+ return 2;
+}
+
+static int dec_move_rm(CPUCRISState *env, DisasContext *dc)
+{
+ int memsize;
+
+ memsize = memsize_zz(dc);
+
+ LOG_DIS("move.%c $r%u, [$r%u]\n",
+ memsize_char(memsize), dc->op2, dc->op1);
+
+ /* prepare store. */
+ cris_flush_cc_state(dc);
+ gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
+
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
+ }
+ cris_cc_mask(dc, 0);
+ return 2;
+}
+
+static int dec_lapcq(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("lapcq %x, $r%u\n",
+ dc->pc + dc->op1*2, dc->op2);
+ cris_cc_mask(dc, 0);
+ tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
+ return 2;
+}
+
+static int dec_lapc_im(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int rd;
+ int32_t imm;
+ int32_t pc;
+
+ rd = dc->op2;
+
+ cris_cc_mask(dc, 0);
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+ LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
+
+ pc = dc->pc;
+ pc += imm;
+ tcg_gen_movi_tl(cpu_R[rd], pc);
+ return 6;
+}
+
+/* Jump to special reg. */
+static int dec_jump_p(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("jump $p%u\n", dc->op2);
+
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+ t_gen_mov_TN_preg(env_btarget, dc->op2);
+ /* rete will often have low bit set to indicate delayslot. */
+ tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
+ cris_cc_mask(dc, 0);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return 2;
+}
+
+/* Jump and save. */
+static int dec_jas_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c;
+ LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
+ if (dc->op2 > 15) {
+ abort();
+ }
+ c = tcg_const_tl(dc->pc + 4);
+ t_gen_mov_preg_TN(dc, dc->op2, c);
+ tcg_temp_free(c);
+
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return 2;
+}
+
+static int dec_jas_im(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ TCGv c;
+
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("jas 0x%x\n", imm);
+ cris_cc_mask(dc, 0);
+ c = tcg_const_tl(dc->pc + 8);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, c);
+ tcg_temp_free(c);
+
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_jasc_im(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ TCGv c;
+
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("jasc 0x%x\n", imm);
+ cris_cc_mask(dc, 0);
+ c = tcg_const_tl(dc->pc + 8 + 4);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, c);
+ tcg_temp_free(c);
+
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_jasc_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv c;
+ LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
+ c = tcg_const_tl(dc->pc + 4 + 4);
+ t_gen_mov_preg_TN(dc, dc->op2, c);
+ tcg_temp_free(c);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return 2;
+}
+
+static int dec_bcc_im(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t offset;
+ uint32_t cond = dc->op2;
+
+ offset = cris_fetch(env, dc, dc->pc + 2, 2, 1);
+
+ LOG_DIS("b%s %d pc=%x dst=%x\n",
+ cc_name(cond), offset,
+ dc->pc, dc->pc + offset);
+
+ cris_cc_mask(dc, 0);
+ /* op2 holds the condition-code. */
+ cris_prepare_cc_branch(dc, offset, cond);
+ return 4;
+}
+
+static int dec_bas_im(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t simm;
+ TCGv c;
+
+ simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
+ cris_cc_mask(dc, 0);
+ c = tcg_const_tl(dc->pc + 8);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, c);
+ tcg_temp_free(c);
+
+ dc->jmp_pc = dc->pc + simm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_basc_im(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t simm;
+ TCGv c;
+ simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
+ cris_cc_mask(dc, 0);
+ c = tcg_const_tl(dc->pc + 12);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, c);
+ tcg_temp_free(c);
+
+ dc->jmp_pc = dc->pc + simm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
+{
+ cris_cc_mask(dc, 0);
+
+ if (dc->op2 == 15) {
+ tcg_gen_st_i32(tcg_const_i32(1), cpu_env,
+ -offsetof(CRISCPU, env) + offsetof(CPUState, halted));
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ t_gen_raise_exception(EXCP_HLT);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return 2;
+ }
+
+ switch (dc->op2 & 7) {
+ case 2:
+ /* rfe. */
+ LOG_DIS("rfe\n");
+ cris_evaluate_flags(dc);
+ gen_helper_rfe(cpu_env);
+ dc->base.is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = true;
+ break;
+ case 5:
+ /* rfn. */
+ LOG_DIS("rfn\n");
+ cris_evaluate_flags(dc);
+ gen_helper_rfn(cpu_env);
+ dc->base.is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = true;
+ break;
+ case 6:
+ LOG_DIS("break %d\n", dc->op1);
+ cris_evaluate_flags(dc);
+ /* break. */
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+
+ /* Breaks start at 16 in the exception vector. */
+ t_gen_movi_env_TN(trap_vector, dc->op1 + 16);
+ t_gen_raise_exception(EXCP_BREAK);
+ dc->base.is_jmp = DISAS_NORETURN;
+ break;
+ default:
+ printf("op2=%x\n", dc->op2);
+ BUG();
+ break;
+
+ }
+ return 2;
+}
+
+static int dec_ftag_fidx_d_m(CPUCRISState *env, DisasContext *dc)
+{
+ return 2;
+}
+
+static int dec_ftag_fidx_i_m(CPUCRISState *env, DisasContext *dc)
+{
+ return 2;
+}
+
+static int dec_null(CPUCRISState *env, DisasContext *dc)
+{
+ printf("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
+ dc->pc, dc->opcode, dc->op1, dc->op2);
+ fflush(NULL);
+ BUG();
+ return 2;
+}
+
+static const struct decoder_info {
+ struct {
+ uint32_t bits;
+ uint32_t mask;
+ };
+ int (*dec)(CPUCRISState *env, DisasContext *dc);
+} decinfo[] = {
+ /* Order matters here. */
+ {DEC_MOVEQ, dec_moveq},
+ {DEC_BTSTQ, dec_btstq},
+ {DEC_CMPQ, dec_cmpq},
+ {DEC_ADDOQ, dec_addoq},
+ {DEC_ADDQ, dec_addq},
+ {DEC_SUBQ, dec_subq},
+ {DEC_ANDQ, dec_andq},
+ {DEC_ORQ, dec_orq},
+ {DEC_ASRQ, dec_asrq},
+ {DEC_LSLQ, dec_lslq},
+ {DEC_LSRQ, dec_lsrq},
+ {DEC_BCCQ, dec_bccq},
+
+ {DEC_BCC_IM, dec_bcc_im},
+ {DEC_JAS_IM, dec_jas_im},
+ {DEC_JAS_R, dec_jas_r},
+ {DEC_JASC_IM, dec_jasc_im},
+ {DEC_JASC_R, dec_jasc_r},
+ {DEC_BAS_IM, dec_bas_im},
+ {DEC_BASC_IM, dec_basc_im},
+ {DEC_JUMP_P, dec_jump_p},
+ {DEC_LAPC_IM, dec_lapc_im},
+ {DEC_LAPCQ, dec_lapcq},
+
+ {DEC_RFE_ETC, dec_rfe_etc},
+ {DEC_ADDC_MR, dec_addc_mr},
+
+ {DEC_MOVE_MP, dec_move_mp},
+ {DEC_MOVE_PM, dec_move_pm},
+ {DEC_MOVEM_MR, dec_movem_mr},
+ {DEC_MOVEM_RM, dec_movem_rm},
+ {DEC_MOVE_PR, dec_move_pr},
+ {DEC_SCC_R, dec_scc_r},
+ {DEC_SETF, dec_setclrf},
+ {DEC_CLEARF, dec_setclrf},
+
+ {DEC_MOVE_SR, dec_move_sr},
+ {DEC_MOVE_RP, dec_move_rp},
+ {DEC_SWAP_R, dec_swap_r},
+ {DEC_ABS_R, dec_abs_r},
+ {DEC_LZ_R, dec_lz_r},
+ {DEC_MOVE_RS, dec_move_rs},
+ {DEC_BTST_R, dec_btst_r},
+ {DEC_ADDC_R, dec_addc_r},
+
+ {DEC_DSTEP_R, dec_dstep_r},
+ {DEC_XOR_R, dec_xor_r},
+ {DEC_MCP_R, dec_mcp_r},
+ {DEC_CMP_R, dec_cmp_r},
+
+ {DEC_ADDI_R, dec_addi_r},
+ {DEC_ADDI_ACR, dec_addi_acr},
+
+ {DEC_ADD_R, dec_add_r},
+ {DEC_SUB_R, dec_sub_r},
+
+ {DEC_ADDU_R, dec_addu_r},
+ {DEC_ADDS_R, dec_adds_r},
+ {DEC_SUBU_R, dec_subu_r},
+ {DEC_SUBS_R, dec_subs_r},
+ {DEC_LSL_R, dec_lsl_r},
+
+ {DEC_AND_R, dec_and_r},
+ {DEC_OR_R, dec_or_r},
+ {DEC_BOUND_R, dec_bound_r},
+ {DEC_ASR_R, dec_asr_r},
+ {DEC_LSR_R, dec_lsr_r},
+
+ {DEC_MOVU_R, dec_movu_r},
+ {DEC_MOVS_R, dec_movs_r},
+ {DEC_NEG_R, dec_neg_r},
+ {DEC_MOVE_R, dec_move_r},
+
+ {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
+ {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
+
+ {DEC_MULS_R, dec_muls_r},
+ {DEC_MULU_R, dec_mulu_r},
+
+ {DEC_ADDU_M, dec_addu_m},
+ {DEC_ADDS_M, dec_adds_m},
+ {DEC_SUBU_M, dec_subu_m},
+ {DEC_SUBS_M, dec_subs_m},
+
+ {DEC_CMPU_M, dec_cmpu_m},
+ {DEC_CMPS_M, dec_cmps_m},
+ {DEC_MOVU_M, dec_movu_m},
+ {DEC_MOVS_M, dec_movs_m},
+
+ {DEC_CMP_M, dec_cmp_m},
+ {DEC_ADDO_M, dec_addo_m},
+ {DEC_BOUND_M, dec_bound_m},
+ {DEC_ADD_M, dec_add_m},
+ {DEC_SUB_M, dec_sub_m},
+ {DEC_AND_M, dec_and_m},
+ {DEC_OR_M, dec_or_m},
+ {DEC_MOVE_RM, dec_move_rm},
+ {DEC_TEST_M, dec_test_m},
+ {DEC_MOVE_MR, dec_move_mr},
+
+ {{0, 0}, dec_null}
+};
+
+static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
+{
+ int insn_len = 2;
+ int i;
+
+ /* Load a halfword onto the instruction register. */
+ dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
+
+ /* Now decode it. */
+ dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
+ dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
+ dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
+ dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
+ dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
+
+ /* Large switch for all insns. */
+ for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
+ if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
+ insn_len = decinfo[i].dec(env, dc);
+ break;
+ }
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ /* Single-stepping ? */
+ if (dc->tb_flags & S_FLAG) {
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
+ /* We treat SPC as a break with an odd trap vector. */
+ cris_evaluate_flags(dc);
+ t_gen_movi_env_TN(trap_vector, 3);
+ tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
+ tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
+ t_gen_raise_exception(EXCP_BREAK);
+ gen_set_label(l1);
+ }
+#endif
+ return insn_len;
+}
+
+#include "translate_v10.c.inc"
+
+/*
+ * Delay slots on QEMU/CRIS.
+ *
+ * If an exception hits on a delayslot, the core will let ERP (the Exception
+ * Return Pointer) point to the branch (the previous) insn and set the lsb to
+ * to give SW a hint that the exception actually hit on the dslot.
+ *
+ * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
+ * the core and any jmp to an odd addresses will mask off that lsb. It is
+ * simply there to let sw know there was an exception on a dslot.
+ *
+ * When the software returns from an exception, the branch will re-execute.
+ * On QEMU care needs to be taken when a branch+delayslot sequence is broken
+ * and the branch and delayslot don't share pages.
+ *
+ * The TB contaning the branch insn will set up env->btarget and evaluate
+ * env->btaken. When the translation loop exits we will note that the branch
+ * sequence is broken and let env->dslot be the size of the branch insn (those
+ * vary in length).
+ *
+ * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
+ * set). It will also expect to have env->dslot setup with the size of the
+ * delay slot so that env->pc - env->dslot point to the branch insn. This TB
+ * will execute the dslot and take the branch, either to btarget or just one
+ * insn ahead.
+ *
+ * When exceptions occur, we check for env->dslot in do_interrupt to detect
+ * broken branch sequences and setup $erp accordingly (i.e let it point to the
+ * branch and set lsb). Then env->dslot gets cleared so that the exception
+ * handler can enter. When returning from exceptions (jump $erp) the lsb gets
+ * masked off and we will reexecute the branch insn.
+ *
+ */
+
+static void cris_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUCRISState *env = cs->env_ptr;
+ uint32_t tb_flags = dc->base.tb->flags;
+ uint32_t pc_start;
+
+ if (env->pregs[PR_VR] == 32) {
+ dc->decoder = crisv32_decoder;
+ dc->clear_locked_irq = 0;
+ } else {
+ dc->decoder = crisv10_decoder;
+ dc->clear_locked_irq = 1;
+ }
+
+ /*
+ * Odd PC indicates that branch is rexecuting due to exception in the
+ * delayslot, like in real hw.
+ */
+ pc_start = dc->base.pc_first & ~1;
+ dc->base.pc_first = pc_start;
+ dc->base.pc_next = pc_start;
+
+ dc->cpu = env_archcpu(env);
+ dc->ppc = pc_start;
+ dc->pc = pc_start;
+ dc->flags_uptodate = 1;
+ dc->flags_x = tb_flags & X_FLAG;
+ dc->cc_x_uptodate = 0;
+ dc->cc_mask = 0;
+ dc->update_cc = 0;
+ dc->clear_prefix = 0;
+ dc->cpustate_changed = 0;
+
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->cc_size_uptodate = -1;
+
+ /* Decode TB flags. */
+ dc->tb_flags = tb_flags & (S_FLAG | P_FLAG | U_FLAG | X_FLAG | PFIX_FLAG);
+ dc->delayed_branch = !!(tb_flags & 7);
+ if (dc->delayed_branch) {
+ dc->jmp = JMP_INDIRECT;
+ } else {
+ dc->jmp = JMP_NOJMP;
+ }
+}
+
+static void cris_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void cris_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(dc->delayed_branch == 1 ? dc->ppc | 1 : dc->pc);
+}
+
+static void cris_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUCRISState *env = cs->env_ptr;
+ unsigned int insn_len;
+
+ /* Pretty disas. */
+ LOG_DIS("%8.8x:\t", dc->pc);
+
+ dc->clear_x = 1;
+
+ insn_len = dc->decoder(env, dc);
+ dc->ppc = dc->pc;
+ dc->pc += insn_len;
+ dc->base.pc_next += insn_len;
+
+ if (dc->base.is_jmp == DISAS_NORETURN) {
+ return;
+ }
+
+ if (dc->clear_x) {
+ cris_clear_x_flag(dc);
+ }
+
+ /*
+ * All branches are delayed branches, handled immediately below.
+ * We don't expect to see odd combinations of exit conditions.
+ */
+ assert(dc->base.is_jmp == DISAS_NEXT || dc->cpustate_changed);
+
+ if (dc->delayed_branch && --dc->delayed_branch == 0) {
+ dc->base.is_jmp = DISAS_DBRANCH;
+ return;
+ }
+
+ if (dc->base.is_jmp != DISAS_NEXT) {
+ return;
+ }
+
+ /* Force an update if the per-tb cpu state has changed. */
+ if (dc->cpustate_changed) {
+ dc->base.is_jmp = DISAS_UPDATE_NEXT;
+ return;
+ }
+
+ /*
+ * FIXME: Only the first insn in the TB should cross a page boundary.
+ * If we can detect the length of the next insn easily, we should.
+ * In the meantime, simply stop when we do cross.
+ */
+ if ((dc->pc ^ dc->base.pc_first) & TARGET_PAGE_MASK) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ DisasJumpType is_jmp = dc->base.is_jmp;
+ target_ulong npc = dc->pc;
+
+ if (is_jmp == DISAS_NORETURN) {
+ /* If we have a broken branch+delayslot sequence, it's too late. */
+ assert(dc->delayed_branch != 1);
+ return;
+ }
+
+ if (dc->clear_locked_irq) {
+ t_gen_movi_env_TN(locked_irq, 0);
+ }
+
+ /* Broken branch+delayslot sequence. */
+ if (dc->delayed_branch == 1) {
+ /* Set env->dslot to the size of the branch insn. */
+ t_gen_movi_env_TN(dslot, dc->pc - dc->ppc);
+ cris_store_direct_jmp(dc);
+ }
+
+ cris_evaluate_flags(dc);
+
+ /* Evaluate delayed branch destination and fold to another is_jmp case. */
+ if (is_jmp == DISAS_DBRANCH) {
+ if (dc->base.tb->flags & 7) {
+ t_gen_movi_env_TN(dslot, 0);
+ }
+
+ switch (dc->jmp) {
+ case JMP_DIRECT:
+ npc = dc->jmp_pc;
+ is_jmp = dc->cpustate_changed ? DISAS_UPDATE_NEXT : DISAS_TOO_MANY;
+ break;
+
+ case JMP_DIRECT_CC:
+ /*
+ * Use a conditional branch if either taken or not-taken path
+ * can use goto_tb. If neither can, then treat it as indirect.
+ */
+ if (likely(!dc->cpustate_changed)
+ && (use_goto_tb(dc, dc->jmp_pc) || use_goto_tb(dc, npc))) {
+ TCGLabel *not_taken = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, not_taken);
+ gen_goto_tb(dc, 1, dc->jmp_pc);
+ gen_set_label(not_taken);
+
+ /* not-taken case handled below. */
+ is_jmp = DISAS_TOO_MANY;
+ break;
+ }
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ /* fall through */
+
+ case JMP_INDIRECT:
+ tcg_gen_movcond_tl(TCG_COND_NE, env_pc,
+ env_btaken, tcg_constant_tl(0),
+ env_btarget, tcg_constant_tl(npc));
+ is_jmp = dc->cpustate_changed ? DISAS_UPDATE : DISAS_JUMP;
+
+ /*
+ * We have now consumed btaken and btarget. Hint to the
+ * tcg compiler that the writeback to env may be dropped.
+ */
+ tcg_gen_discard_tl(env_btaken);
+ tcg_gen_discard_tl(env_btarget);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ switch (is_jmp) {
+ case DISAS_TOO_MANY:
+ gen_goto_tb(dc, 0, npc);
+ break;
+ case DISAS_UPDATE_NEXT:
+ tcg_gen_movi_tl(env_pc, npc);
+ /* fall through */
+ case DISAS_JUMP:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_UPDATE:
+ /* Indicate that interupts must be re-evaluated before the next TB. */
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void cris_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ if (!DISAS_CRIS) {
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+ }
+}
+
+static const TranslatorOps cris_tr_ops = {
+ .init_disas_context = cris_tr_init_disas_context,
+ .tb_start = cris_tr_tb_start,
+ .insn_start = cris_tr_insn_start,
+ .translate_insn = cris_tr_translate_insn,
+ .tb_stop = cris_tr_tb_stop,
+ .disas_log = cris_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+ translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
+}
+
+void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ const char * const *regnames;
+ const char * const *pregnames;
+ int i;
+
+ if (!env) {
+ return;
+ }
+ if (env->pregs[PR_VR] < 32) {
+ pregnames = pregnames_v10;
+ regnames = regnames_v10;
+ } else {
+ pregnames = pregnames_v32;
+ regnames = regnames_v32;
+ }
+
+ qemu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
+ "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
+ env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
+ env->cc_op,
+ env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
+
+
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "%s=%8.8x ", regnames[i], env->regs[i]);
+ if ((i + 1) % 4 == 0) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "\nspecial regs:\n");
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
+ if ((i + 1) % 4 == 0) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ if (env->pregs[PR_VR] >= 32) {
+ uint32_t srs = env->pregs[PR_SRS];
+ qemu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
+ if (srs < ARRAY_SIZE(env->sregs)) {
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "s%2.2d=%8.8x ",
+ i, env->sregs[srs][i]);
+ if ((i + 1) % 4 == 0) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ }
+ }
+ qemu_fprintf(f, "\n\n");
+
+}
+
+void cris_initialize_tcg(void)
+{
+ int i;
+
+ cc_x = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_x), "cc_x");
+ cc_src = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_src), "cc_src");
+ cc_dest = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_dest),
+ "cc_dest");
+ cc_result = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_result),
+ "cc_result");
+ cc_op = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_op), "cc_op");
+ cc_size = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_size),
+ "cc_size");
+ cc_mask = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_mask),
+ "cc_mask");
+
+ env_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pc),
+ "pc");
+ env_btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btarget),
+ "btarget");
+ env_btaken = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btaken),
+ "btaken");
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, regs[i]),
+ regnames_v32[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ cpu_PR[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pregs[i]),
+ pregnames_v32[i]);
+ }
+}
+
+void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
new file mode 100644
index 000000000..f500e9344
--- /dev/null
+++ b/target/cris/translate_v10.c.inc
@@ -0,0 +1,1333 @@
+/*
+ * CRISv10 emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2010 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "crisv10-decode.h"
+
+static const char * const regnames_v10[] =
+{
+ "$r0", "$r1", "$r2", "$r3",
+ "$r4", "$r5", "$r6", "$r7",
+ "$r8", "$r9", "$r10", "$r11",
+ "$r12", "$r13", "$sp", "$pc",
+};
+
+static const char * const pregnames_v10[] =
+{
+ "$bz", "$vr", "$p2", "$p3",
+ "$wz", "$ccr", "$p6-prefix", "$mof",
+ "$dz", "$ibr", "$irp", "$srp",
+ "$bar", "$dccr", "$brp", "$usp",
+};
+
+/* We need this table to handle preg-moves with implicit width. */
+static const int preg_sizes_v10[] = {
+ 1, /* bz. */
+ 1, /* vr. */
+ 1, /* pid. */
+ 1, /* srs. */
+ 2, /* wz. */
+ 2, 2, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+};
+
+static inline int dec10_size(unsigned int size)
+{
+ size++;
+ if (size == 3)
+ size++;
+ return size;
+}
+
+static inline void cris_illegal_insn(DisasContext *dc)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "illegal insn at pc=%x\n", dc->pc);
+ t_gen_raise_exception(EXCP_BREAK);
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
+ unsigned int size, int mem_index)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv taddr = tcg_temp_local_new();
+ TCGv tval = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ dc->postinc = 0;
+ cris_evaluate_flags(dc);
+
+ tcg_gen_mov_tl(taddr, addr);
+ tcg_gen_mov_tl(tval, val);
+
+ /* Store only if F flag isn't set */
+ tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ if (size == 1) {
+ tcg_gen_qemu_st8(tval, taddr, mem_index);
+ } else if (size == 2) {
+ tcg_gen_qemu_st16(tval, taddr, mem_index);
+ } else {
+ tcg_gen_qemu_st32(tval, taddr, mem_index);
+ }
+ gen_set_label(l1);
+ tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
+ tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
+ tcg_temp_free(t1);
+ tcg_temp_free(tval);
+ tcg_temp_free(taddr);
+}
+
+static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
+ unsigned int size)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+ /* Conditional writes. */
+ if (dc->flags_x) {
+ gen_store_v10_conditional(dc, addr, val, size, mem_index);
+ return;
+ }
+
+ if (size == 1) {
+ tcg_gen_qemu_st8(val, addr, mem_index);
+ } else if (size == 2) {
+ tcg_gen_qemu_st16(val, addr, mem_index);
+ } else {
+ tcg_gen_qemu_st32(val, addr, mem_index);
+ }
+}
+
+
+/* Prefix flag and register are used to handle the more complex
+ addressing modes. */
+static void cris_set_prefix(DisasContext *dc)
+{
+ dc->clear_prefix = 0;
+ dc->tb_flags |= PFIX_FLAG;
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], PFIX_FLAG);
+
+ /* prefix insns don't clear the x flag. */
+ dc->clear_x = 0;
+ cris_lock_irq(dc);
+}
+
+static void crisv10_prepare_memaddr(DisasContext *dc,
+ TCGv addr, unsigned int size)
+{
+ if (dc->tb_flags & PFIX_FLAG) {
+ tcg_gen_mov_tl(addr, cpu_PR[PR_PREFIX]);
+ } else {
+ tcg_gen_mov_tl(addr, cpu_R[dc->src]);
+ }
+}
+
+static unsigned int crisv10_post_memaddr(DisasContext *dc, unsigned int size)
+{
+ unsigned int insn_len = 0;
+
+ if (dc->tb_flags & PFIX_FLAG) {
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], cpu_PR[PR_PREFIX]);
+ }
+ } else {
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
+ if (dc->src == 15) {
+ insn_len += size & ~1;
+ } else {
+ tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], size);
+ }
+ }
+ }
+ return insn_len;
+}
+
+static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst)
+{
+ unsigned int rs;
+ uint32_t imm;
+ int is_imm;
+ int insn_len = 0;
+
+ rs = dc->src;
+ is_imm = rs == 15 && !(dc->tb_flags & PFIX_FLAG);
+ LOG_DIS("rs=%d rd=%d is_imm=%d mode=%d pfix=%d\n",
+ rs, dc->dst, is_imm, dc->mode, dc->tb_flags & PFIX_FLAG);
+
+ /* Load [$rs] onto T1. */
+ if (is_imm) {
+ if (memsize != 4) {
+ if (s_ext) {
+ if (memsize == 1)
+ imm = cpu_ldsb_code(env, dc->pc + 2);
+ else
+ imm = cpu_ldsw_code(env, dc->pc + 2);
+ } else {
+ if (memsize == 1)
+ imm = cpu_ldub_code(env, dc->pc + 2);
+ else
+ imm = cpu_lduw_code(env, dc->pc + 2);
+ }
+ } else
+ imm = cpu_ldl_code(env, dc->pc + 2);
+
+ tcg_gen_movi_tl(dst, imm);
+
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
+ insn_len += memsize;
+ if (memsize == 1)
+ insn_len++;
+ tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len);
+ }
+ } else {
+ TCGv addr;
+
+ addr = tcg_temp_new();
+ cris_flush_cc_state(dc);
+ crisv10_prepare_memaddr(dc, addr, memsize);
+ gen_load(dc, dst, addr, memsize, 0);
+ if (s_ext)
+ t_gen_sext(dst, dst, memsize);
+ else
+ t_gen_zext(dst, dst, memsize);
+ insn_len += crisv10_post_memaddr(dc, memsize);
+ tcg_temp_free(addr);
+ }
+
+ if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) {
+ dc->dst = dc->src;
+ }
+ return insn_len;
+}
+
+static unsigned int dec10_quick_imm(DisasContext *dc)
+{
+ int32_t imm, simm;
+ int op;
+ TCGv c;
+
+ /* sign extend. */
+ imm = dc->ir & ((1 << 6) - 1);
+ simm = (int8_t) (imm << 2);
+ simm >>= 2;
+ switch (dc->opcode) {
+ case CRISV10_QIMM_BDAP_R0:
+ case CRISV10_QIMM_BDAP_R1:
+ case CRISV10_QIMM_BDAP_R2:
+ case CRISV10_QIMM_BDAP_R3:
+ simm = (int8_t)dc->ir;
+ LOG_DIS("bdap %d $r%d\n", simm, dc->dst);
+ LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
+ dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
+ cris_set_prefix(dc);
+ if (dc->dst == 15) {
+ tcg_gen_movi_tl(cpu_PR[PR_PREFIX], dc->pc + 2 + simm);
+ } else {
+ tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
+ }
+ break;
+
+ case CRISV10_QIMM_MOVEQ:
+ LOG_DIS("moveq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(simm);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_CMPQ:
+ LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(simm);
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_ADDQ:
+ LOG_DIS("addq %d, $r%d\n", imm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(imm);
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_ANDQ:
+ LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(simm);
+ cris_alu(dc, CC_OP_AND, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_ASHQ:
+ LOG_DIS("ashq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ op = imm & (1 << 5);
+ imm &= 0x1f;
+ c = tcg_const_tl(imm);
+ if (op) {
+ cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ } else {
+ /* BTST */
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
+ c, cpu_PR[PR_CCS]);
+ }
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_LSHQ:
+ LOG_DIS("lshq %d, $r%d\n", simm, dc->dst);
+
+ op = CC_OP_LSL;
+ if (imm & (1 << 5)) {
+ op = CC_OP_LSR;
+ }
+ imm &= 0x1f;
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(imm);
+ cris_alu(dc, op, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_SUBQ:
+ LOG_DIS("subq %d, $r%d\n", imm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(imm);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+ case CRISV10_QIMM_ORQ:
+ LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ c = tcg_const_tl(simm);
+ cris_alu(dc, CC_OP_OR, cpu_R[dc->dst],
+ cpu_R[dc->dst], c, 4);
+ tcg_temp_free(c);
+ break;
+
+ case CRISV10_QIMM_BCC_R0:
+ case CRISV10_QIMM_BCC_R1:
+ case CRISV10_QIMM_BCC_R2:
+ case CRISV10_QIMM_BCC_R3:
+ imm = dc->ir & 0xff;
+ /* bit 0 is a sign bit. */
+ if (imm & 1) {
+ imm |= 0xffffff00; /* sign extend. */
+ imm &= ~1; /* get rid of the sign bit. */
+ }
+ imm += 2;
+ LOG_DIS("b%s %d\n", cc_name(dc->cond), imm);
+
+ cris_cc_mask(dc, 0);
+ cris_prepare_cc_branch(dc, imm, dc->cond);
+ break;
+
+ default:
+ LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
+ dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled quickimm\n");
+ break;
+ }
+ return 2;
+}
+
+static unsigned int dec10_setclrf(DisasContext *dc)
+{
+ uint32_t flags;
+ unsigned int set = ~dc->opcode & 1;
+
+ flags = EXTRACT_FIELD(dc->ir, 0, 3)
+ | (EXTRACT_FIELD(dc->ir, 12, 15) << 4);
+ LOG_DIS("%s set=%d flags=%x\n", __func__, set, flags);
+
+
+ if (flags & X_FLAG) {
+ if (set)
+ dc->flags_x = X_FLAG;
+ else
+ dc->flags_x = 0;
+ }
+
+ cris_evaluate_flags (dc);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ cris_update_cc_x(dc);
+ tcg_gen_movi_tl(cc_op, dc->cc_op);
+
+ if (set) {
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
+ } else {
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS],
+ ~(flags|F_FLAG_V10|P_FLAG_V10));
+ }
+
+ dc->flags_uptodate = 1;
+ dc->clear_x = 0;
+ cris_lock_irq(dc);
+ return 2;
+}
+
+static inline void dec10_reg_prep_sext(DisasContext *dc, int size, int sext,
+ TCGv dd, TCGv ds, TCGv sd, TCGv ss)
+{
+ if (sext) {
+ t_gen_sext(dd, sd, size);
+ t_gen_sext(ds, ss, size);
+ } else {
+ t_gen_zext(dd, sd, size);
+ t_gen_zext(ds, ss, size);
+ }
+}
+
+static void dec10_reg_alu(DisasContext *dc, int op, int size, int sext)
+{
+ TCGv t[2];
+
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+ dec10_reg_prep_sext(dc, size, sext,
+ t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]);
+
+ if (op == CC_OP_LSL || op == CC_OP_LSR || op == CC_OP_ASR) {
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ }
+
+ assert(dc->dst != 15);
+ cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], size);
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+}
+
+static void dec10_reg_bound(DisasContext *dc, int size)
+{
+ TCGv t;
+
+ t = tcg_temp_local_new();
+ t_gen_zext(t, cpu_R[dc->src], size);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
+ tcg_temp_free(t);
+}
+
+static void dec10_reg_mul(DisasContext *dc, int size, int sext)
+{
+ int op = sext ? CC_OP_MULS : CC_OP_MULU;
+ TCGv t[2];
+
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+ dec10_reg_prep_sext(dc, size, sext,
+ t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]);
+
+ cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], 4);
+
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+}
+
+
+static void dec10_reg_movs(DisasContext *dc)
+{
+ int size = (dc->size & 1) + 1;
+ TCGv t;
+
+ LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ t = tcg_temp_new();
+ if (dc->ir & 32)
+ t_gen_sext(t, cpu_R[dc->src], size);
+ else
+ t_gen_zext(t, cpu_R[dc->src], size);
+
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
+ tcg_temp_free(t);
+}
+
+static void dec10_reg_alux(DisasContext *dc, int op)
+{
+ int size = (dc->size & 1) + 1;
+ TCGv t;
+
+ LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ t = tcg_temp_new();
+ if (dc->ir & 32)
+ t_gen_sext(t, cpu_R[dc->src], size);
+ else
+ t_gen_zext(t, cpu_R[dc->src], size);
+
+ cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
+ tcg_temp_free(t);
+}
+
+static void dec10_reg_mov_pr(DisasContext *dc)
+{
+ LOG_DIS("move p%d r%d sz=%d\n", dc->dst, dc->src, preg_sizes_v10[dc->dst]);
+ cris_lock_irq(dc);
+ if (dc->src == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_PR[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return;
+ }
+ if (dc->dst == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src],
+ cpu_R[dc->src], cpu_PR[dc->dst], preg_sizes_v10[dc->dst]);
+}
+
+static void dec10_reg_abs(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("abs $r%u, $r%u\n", dc->src, dc->dst);
+
+ assert(dc->dst != 15);
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, cpu_R[dc->src], 31);
+ tcg_gen_xor_tl(cpu_R[dc->dst], cpu_R[dc->src], t0);
+ tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0);
+
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4);
+ tcg_temp_free(t0);
+}
+
+static void dec10_reg_swap(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("not $r%d, $r%d\n", dc->src, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(t0, cpu_R[dc->src]);
+ if (dc->dst & 8)
+ tcg_gen_not_tl(t0, t0);
+ if (dc->dst & 4)
+ t_gen_swapw(t0, t0);
+ if (dc->dst & 2)
+ t_gen_swapb(t0, t0);
+ if (dc->dst & 1)
+ t_gen_swapr(t0, t0);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], cpu_R[dc->src], t0, 4);
+ tcg_temp_free(t0);
+}
+
+static void dec10_reg_scc(DisasContext *dc)
+{
+ int cond = dc->dst;
+
+ LOG_DIS("s%s $r%u\n", cc_name(cond), dc->src);
+
+ gen_tst_cc(dc, cpu_R[dc->src], cond);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->src], cpu_R[dc->src], 0);
+
+ cris_cc_mask(dc, 0);
+}
+
+static unsigned int dec10_reg(DisasContext *dc)
+{
+ TCGv t;
+ unsigned int insn_len = 2;
+ unsigned int size = dec10_size(dc->size);
+ unsigned int tmp;
+
+ if (dc->size != 3) {
+ switch (dc->opcode) {
+ case CRISV10_REG_MOVE_R:
+ LOG_DIS("move.%d $r%d, $r%d\n", dc->size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_MOVE, size, 0);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ }
+ break;
+ case CRISV10_REG_MOVX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_movs(dc);
+ break;
+ case CRISV10_REG_ADDX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_ADD);
+ break;
+ case CRISV10_REG_SUBX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_SUB);
+ break;
+ case CRISV10_REG_ADD:
+ LOG_DIS("add $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_ADD, size, 0);
+ break;
+ case CRISV10_REG_SUB:
+ LOG_DIS("sub $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_SUB, size, 0);
+ break;
+ case CRISV10_REG_CMP:
+ LOG_DIS("cmp $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_CMP, size, 0);
+ break;
+ case CRISV10_REG_BOUND:
+ LOG_DIS("bound $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_bound(dc, size);
+ break;
+ case CRISV10_REG_AND:
+ LOG_DIS("and $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_AND, size, 0);
+ break;
+ case CRISV10_REG_ADDI:
+ if (dc->src == 15) {
+ /* nop. */
+ return 2;
+ }
+ t = tcg_temp_new();
+ LOG_DIS("addi r%d r%d size=%d\n", dc->src, dc->dst, dc->size);
+ tcg_gen_shli_tl(t, cpu_R[dc->dst], dc->size & 3);
+ tcg_gen_add_tl(cpu_R[dc->src], cpu_R[dc->src], t);
+ tcg_temp_free(t);
+ break;
+ case CRISV10_REG_LSL:
+ LOG_DIS("lsl $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_LSL, size, 0);
+ break;
+ case CRISV10_REG_LSR:
+ LOG_DIS("lsr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_LSR, size, 0);
+ break;
+ case CRISV10_REG_ASR:
+ LOG_DIS("asr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_ASR, size, 1);
+ break;
+ case CRISV10_REG_OR:
+ LOG_DIS("or $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_OR, size, 0);
+ break;
+ case CRISV10_REG_NEG:
+ LOG_DIS("neg $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_NEG, size, 0);
+ break;
+ case CRISV10_REG_BIAP:
+ LOG_DIS("BIAP pc=%x reg %d r%d r%d size=%d\n", dc->pc,
+ dc->opcode, dc->src, dc->dst, size);
+ switch (size) {
+ case 4: tmp = 2; break;
+ case 2: tmp = 1; break;
+ case 1: tmp = 0; break;
+ default:
+ cpu_abort(CPU(dc->cpu), "Unhandled BIAP");
+ break;
+ }
+
+ t = tcg_temp_new();
+ tcg_gen_shli_tl(t, cpu_R[dc->dst], tmp);
+ if (dc->src == 15) {
+ tcg_gen_addi_tl(cpu_PR[PR_PREFIX], t, ((dc->pc +2)| 1) + 1);
+ } else {
+ tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_R[dc->src], t);
+ }
+ tcg_temp_free(t);
+ cris_set_prefix(dc);
+ break;
+
+ default:
+ LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
+ dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+ } else {
+ switch (dc->opcode) {
+ case CRISV10_REG_MOVX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_movs(dc);
+ break;
+ case CRISV10_REG_ADDX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_ADD);
+ break;
+ case CRISV10_REG_SUBX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_SUB);
+ break;
+ case CRISV10_REG_MOVE_SPR_R:
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, 0);
+ dec10_reg_mov_pr(dc);
+ break;
+ case CRISV10_REG_MOVE_R_SPR:
+ LOG_DIS("move r%d p%d\n", dc->src, dc->dst);
+ cris_evaluate_flags(dc);
+ if (dc->src != 11) /* fast for srp. */
+ dc->cpustate_changed = 1;
+ t_gen_mov_preg_TN(dc, dc->dst, cpu_R[dc->src]);
+ break;
+ case CRISV10_REG_SETF:
+ case CRISV10_REG_CLEARF:
+ dec10_setclrf(dc);
+ break;
+ case CRISV10_REG_SWAP:
+ dec10_reg_swap(dc);
+ break;
+ case CRISV10_REG_ABS:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_abs(dc);
+ break;
+ case CRISV10_REG_LZ:
+ LOG_DIS("lz $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_LZ, 4, 0);
+ break;
+ case CRISV10_REG_XOR:
+ LOG_DIS("xor $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_XOR, 4, 0);
+ break;
+ case CRISV10_REG_BTST:
+ LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
+ cpu_R[dc->src], cpu_PR[PR_CCS]);
+ break;
+ case CRISV10_REG_DSTEP:
+ LOG_DIS("dstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_DSTEP, cpu_R[dc->dst],
+ cpu_R[dc->dst], cpu_R[dc->src], 4);
+ break;
+ case CRISV10_REG_MSTEP:
+ LOG_DIS("mstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_MSTEP, cpu_R[dc->dst],
+ cpu_R[dc->dst], cpu_R[dc->src], 4);
+ break;
+ case CRISV10_REG_SCC:
+ dec10_reg_scc(dc);
+ break;
+ default:
+ LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
+ dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+ }
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc,
+ unsigned int size)
+{
+ unsigned int insn_len = 2;
+ TCGv t;
+
+ LOG_DIS("%s: move.%d [$r%d], $r%d\n", __func__,
+ size, dc->src, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t = tcg_temp_new();
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, size);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ }
+
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size)
+{
+ unsigned int insn_len = 2;
+ TCGv addr;
+
+ LOG_DIS("move.%d $r%d, [$r%d]\n", dc->size, dc->src, dc->dst);
+ addr = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, size);
+ gen_store_v10(dc, addr, cpu_R[dc->dst], size);
+ insn_len += crisv10_post_memaddr(dc, size);
+ tcg_temp_free(addr);
+
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int insn_len = 2, rd = dc->dst;
+ TCGv t, addr;
+
+ LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src);
+ cris_lock_irq(dc);
+
+ addr = tcg_temp_new();
+ t = tcg_temp_new();
+ insn_len += dec10_prep_move_m(env, dc, 0, 4, t);
+ if (rd == 15) {
+ tcg_gen_mov_tl(env_btarget, t);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ } else {
+ tcg_gen_mov_tl(cpu_PR[rd], t);
+ dc->cpustate_changed = 1;
+ }
+ tcg_temp_free(addr);
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_pr_m(DisasContext *dc)
+{
+ unsigned int insn_len = 2, size = preg_sizes_v10[dc->dst];
+ TCGv addr, t0;
+
+ LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src);
+
+ addr = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, size);
+ if (dc->dst == PR_CCS) {
+ t0 = tcg_temp_new();
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(t0, cpu_PR[PR_CCS], ~PFIX_FLAG);
+ gen_store_v10(dc, addr, t0, size);
+ tcg_temp_free(t0);
+ } else {
+ gen_store_v10(dc, addr, cpu_PR[dc->dst], size);
+ }
+ insn_len += crisv10_post_memaddr(dc, size);
+ tcg_temp_free(addr);
+ cris_lock_irq(dc);
+
+ return insn_len;
+}
+
+static void dec10_movem_r_m(DisasContext *dc)
+{
+ int i, pfix = dc->tb_flags & PFIX_FLAG;
+ TCGv addr, t0;
+
+ LOG_DIS("%s r%d, [r%d] pi=%d ir=%x\n", __func__,
+ dc->dst, dc->src, dc->postinc, dc->ir);
+
+ addr = tcg_temp_new();
+ t0 = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, 4);
+ tcg_gen_mov_tl(t0, addr);
+ for (i = dc->dst; i >= 0; i--) {
+ if ((pfix && dc->mode == CRISV10_MODE_AUTOINC) && dc->src == i) {
+ gen_store_v10(dc, addr, t0, 4);
+ } else {
+ gen_store_v10(dc, addr, cpu_R[i], 4);
+ }
+ tcg_gen_addi_tl(addr, addr, 4);
+ }
+
+ if (pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], t0);
+ }
+
+ if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], addr);
+ }
+ tcg_temp_free(addr);
+ tcg_temp_free(t0);
+}
+
+static void dec10_movem_m_r(DisasContext *dc)
+{
+ int i, pfix = dc->tb_flags & PFIX_FLAG;
+ TCGv addr, t0;
+
+ LOG_DIS("%s [r%d], r%d pi=%d ir=%x\n", __func__,
+ dc->src, dc->dst, dc->postinc, dc->ir);
+
+ addr = tcg_temp_new();
+ t0 = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, 4);
+ tcg_gen_mov_tl(t0, addr);
+ for (i = dc->dst; i >= 0; i--) {
+ gen_load(dc, cpu_R[i], addr, 4, 0);
+ tcg_gen_addi_tl(addr, addr, 4);
+ }
+
+ if (pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], t0);
+ }
+
+ if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], addr);
+ }
+ tcg_temp_free(addr);
+ tcg_temp_free(t0);
+}
+
+static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc,
+ int op, unsigned int size)
+{
+ int insn_len = 0;
+ int rd = dc->dst;
+ TCGv t[2];
+
+ cris_alu_m_alloc_temps(t);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
+ cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t[0], size);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ return insn_len;
+ }
+
+ cris_alu_m_free_temps(t);
+
+ return insn_len;
+}
+
+static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc,
+ unsigned int size)
+{
+ int insn_len = 0;
+ int rd = dc->dst;
+ TCGv t;
+
+ t = tcg_temp_local_new();
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ }
+
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op)
+{
+ unsigned int size = (dc->size & 1) ? 2 : 1;
+ unsigned int sx = !!(dc->size & 2);
+ int insn_len = 2;
+ int rd = dc->dst;
+ TCGv t;
+
+ LOG_DIS("addx size=%d sx=%d op=%d %d\n", size, sx, dc->src, dc->dst);
+
+ t = tcg_temp_new();
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_prep_move_m(env, dc, sx, size, t);
+ cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t, 4);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ }
+
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static int dec10_dip(CPUCRISState *env, DisasContext *dc)
+{
+ int insn_len = 2;
+ uint32_t imm;
+
+ LOG_DIS("dip pc=%x opcode=%d r%d r%d\n",
+ dc->pc, dc->opcode, dc->src, dc->dst);
+ if (dc->src == 15) {
+ imm = cpu_ldl_code(env, dc->pc + 2);
+ tcg_gen_movi_tl(cpu_PR[PR_PREFIX], imm);
+ if (dc->postinc)
+ insn_len += 4;
+ tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len - 2);
+ } else {
+ gen_load(dc, cpu_PR[PR_PREFIX], cpu_R[dc->src], 4, 0);
+ if (dc->postinc)
+ tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], 4);
+ }
+
+ cris_set_prefix(dc);
+ return insn_len;
+}
+
+static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size)
+{
+ int insn_len = 2;
+ int rd = dc->dst;
+
+ LOG_DIS("bdap_m pc=%x opcode=%d r%d r%d sz=%d\n",
+ dc->pc, dc->opcode, dc->src, dc->dst, size);
+
+ assert(dc->dst != 15);
+#if 0
+ /* 8bit embedded offset? */
+ if (!dc->postinc && (dc->ir & (1 << 11))) {
+ int simm = dc->ir & 0xff;
+
+ /* cpu_abort(CPU(dc->cpu), "Unhandled opcode"); */
+ /* sign extended. */
+ simm = (int8_t)simm;
+
+ tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
+
+ cris_set_prefix(dc);
+ return insn_len;
+ }
+#endif
+ /* Now the rest of the modes are truly indirect. */
+ insn_len += dec10_prep_move_m(env, dc, 1, size, cpu_PR[PR_PREFIX]);
+ tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]);
+ cris_set_prefix(dc);
+ return insn_len;
+}
+
+static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int insn_len = 2;
+ unsigned int size = dec10_size(dc->size);
+ uint32_t imm;
+ int32_t simm;
+ TCGv t[2], c;
+
+ if (dc->size != 3) {
+ switch (dc->opcode) {
+ case CRISV10_IND_MOVE_M_R:
+ return dec10_ind_move_m_r(env, dc, size);
+ case CRISV10_IND_MOVE_R_M:
+ return dec10_ind_move_r_m(dc, size);
+ case CRISV10_IND_CMP:
+ LOG_DIS("cmp size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_CMP, size);
+ break;
+ case CRISV10_IND_TEST:
+ LOG_DIS("test size=%d op=%d %d\n", size, dc->src, dc->dst);
+
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_m_alloc_temps(t);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
+ c = tcg_const_tl(0);
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
+ t[0], c, size);
+ tcg_temp_free(c);
+ cris_alu_m_free_temps(t);
+ break;
+ case CRISV10_IND_ADD:
+ LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_ADD, size);
+ break;
+ case CRISV10_IND_SUB:
+ LOG_DIS("sub size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_SUB, size);
+ break;
+ case CRISV10_IND_BOUND:
+ LOG_DIS("bound size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_bound(env, dc, size);
+ break;
+ case CRISV10_IND_AND:
+ LOG_DIS("and size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_AND, size);
+ break;
+ case CRISV10_IND_OR:
+ LOG_DIS("or size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_OR, size);
+ break;
+ case CRISV10_IND_MOVX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
+ break;
+ case CRISV10_IND_ADDX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
+ break;
+ case CRISV10_IND_SUBX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
+ break;
+ case CRISV10_IND_CMPX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
+ break;
+ case CRISV10_IND_MUL:
+ /* This is a reg insn coded in the mem indir space. */
+ LOG_DIS("mul pc=%x opcode=%d\n", dc->pc, dc->opcode);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_mul(dc, size, dc->ir & (1 << 10));
+ break;
+ case CRISV10_IND_BDAP_M:
+ insn_len = dec10_bdap_m(env, dc, size);
+ break;
+ default:
+ /*
+ * ADDC for v17:
+ *
+ * Instruction format: ADDC [Rs],Rd
+ *
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
+ * |Destination(Rd)| 1 0 0 1 1 0 1 0 | Source(Rs)|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+--+--+
+ *
+ * Instruction format: ADDC [Rs+],Rd
+ *
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
+ * |Destination(Rd)| 1 1 0 1 1 0 1 0 | Source(Rs)|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
+ */
+ if (dc->opcode == CRISV17_IND_ADDC && dc->size == 2 &&
+ env->pregs[PR_VR] == 17) {
+ LOG_DIS("addc op=%d %d\n", dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_ADDC, size);
+ break;
+ }
+
+ LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n",
+ dc->pc, size, dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+ return insn_len;
+ }
+
+ switch (dc->opcode) {
+ case CRISV10_IND_MOVE_M_SPR:
+ insn_len = dec10_ind_move_m_pr(env, dc);
+ break;
+ case CRISV10_IND_MOVE_SPR_M:
+ insn_len = dec10_ind_move_pr_m(dc);
+ break;
+ case CRISV10_IND_JUMP_M:
+ if (dc->src == 15) {
+ LOG_DIS("jump.%d %d r%d r%d direct\n", size,
+ dc->opcode, dc->src, dc->dst);
+ imm = cpu_ldl_code(env, dc->pc + 2);
+ if (dc->mode == CRISV10_MODE_AUTOINC)
+ insn_len += size;
+
+ c = tcg_const_tl(dc->pc + insn_len);
+ t_gen_mov_preg_TN(dc, dc->dst, c);
+ tcg_temp_free(c);
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ dc->delayed_branch--; /* v10 has no dslot here. */
+ } else {
+ if (dc->dst == 14) {
+ LOG_DIS("break %d\n", dc->src);
+ cris_evaluate_flags(dc);
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ c = tcg_const_tl(dc->src + 2);
+ t_gen_mov_env_TN(trap_vector, c);
+ tcg_temp_free(c);
+ t_gen_raise_exception(EXCP_BREAK);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return insn_len;
+ }
+ LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
+ dc->opcode, dc->src, dc->dst);
+ t[0] = tcg_temp_new();
+ c = tcg_const_tl(dc->pc + insn_len);
+ t_gen_mov_preg_TN(dc, dc->dst, c);
+ tcg_temp_free(c);
+ crisv10_prepare_memaddr(dc, t[0], size);
+ gen_load(dc, env_btarget, t[0], 4, 0);
+ insn_len += crisv10_post_memaddr(dc, size);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch--; /* v10 has no dslot here. */
+ tcg_temp_free(t[0]);
+ }
+ break;
+
+ case CRISV10_IND_MOVEM_R_M:
+ LOG_DIS("movem_r_m pc=%x opcode=%d r%d r%d\n",
+ dc->pc, dc->opcode, dc->dst, dc->src);
+ dec10_movem_r_m(dc);
+ break;
+ case CRISV10_IND_MOVEM_M_R:
+ LOG_DIS("movem_m_r pc=%x opcode=%d\n", dc->pc, dc->opcode);
+ dec10_movem_m_r(dc);
+ break;
+ case CRISV10_IND_JUMP_R:
+ LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n",
+ dc->pc, dc->opcode, dc->dst, dc->src);
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]);
+ c = tcg_const_tl(dc->pc + insn_len);
+ t_gen_mov_preg_TN(dc, dc->dst, c);
+ tcg_temp_free(c);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch--; /* v10 has no dslot here. */
+ break;
+ case CRISV10_IND_MOVX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
+ break;
+ case CRISV10_IND_ADDX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
+ break;
+ case CRISV10_IND_SUBX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
+ break;
+ case CRISV10_IND_CMPX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
+ break;
+ case CRISV10_IND_DIP:
+ insn_len = dec10_dip(env, dc);
+ break;
+ case CRISV10_IND_BCC_M:
+
+ cris_cc_mask(dc, 0);
+ simm = cpu_ldsw_code(env, dc->pc + 2);
+ simm += 4;
+
+ LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
+ cris_prepare_cc_branch(dc, simm, dc->cond);
+ insn_len = 4;
+ break;
+ default:
+ LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+
+ return insn_len;
+}
+
+static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int insn_len = 2;
+
+ /* Load a halfword onto the instruction register. */
+ dc->ir = cpu_lduw_code(env, dc->pc);
+
+ /* Now decode it. */
+ dc->opcode = EXTRACT_FIELD(dc->ir, 6, 9);
+ dc->mode = EXTRACT_FIELD(dc->ir, 10, 11);
+ dc->src = EXTRACT_FIELD(dc->ir, 0, 3);
+ dc->size = EXTRACT_FIELD(dc->ir, 4, 5);
+ dc->cond = dc->dst = EXTRACT_FIELD(dc->ir, 12, 15);
+ dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
+
+ dc->clear_prefix = 1;
+
+ /* FIXME: What if this insn insn't 2 in length?? */
+ if (dc->src == 15 || dc->dst == 15)
+ tcg_gen_movi_tl(cpu_R[15], dc->pc + 2);
+
+ switch (dc->mode) {
+ case CRISV10_MODE_QIMMEDIATE:
+ insn_len = dec10_quick_imm(dc);
+ break;
+ case CRISV10_MODE_REG:
+ insn_len = dec10_reg(dc);
+ break;
+ case CRISV10_MODE_AUTOINC:
+ case CRISV10_MODE_INDIRECT:
+ insn_len = dec10_ind(env, dc);
+ break;
+ }
+
+ if (dc->clear_prefix && dc->tb_flags & PFIX_FLAG) {
+ dc->tb_flags &= ~PFIX_FLAG;
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~PFIX_FLAG);
+ if (dc->tb_flags != dc->base.tb->flags) {
+ dc->cpustate_changed = 1;
+ }
+ }
+
+ /* CRISv10 locks out interrupts on dslots. */
+ if (dc->delayed_branch == 2) {
+ cris_lock_irq(dc);
+ }
+ return insn_len;
+}
+
+void cris_initialize_crisv10_tcg(void)
+{
+ int i;
+
+ cc_x = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_x), "cc_x");
+ cc_src = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_src), "cc_src");
+ cc_dest = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_dest),
+ "cc_dest");
+ cc_result = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_result),
+ "cc_result");
+ cc_op = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_op), "cc_op");
+ cc_size = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_size),
+ "cc_size");
+ cc_mask = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_mask),
+ "cc_mask");
+
+ env_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pc),
+ "pc");
+ env_btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btarget),
+ "btarget");
+ env_btaken = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btaken),
+ "btaken");
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, regs[i]),
+ regnames_v10[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ cpu_PR[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pregs[i]),
+ pregnames_v10[i]);
+ }
+}
diff --git a/target/hexagon/README b/target/hexagon/README
new file mode 100644
index 000000000..372e24747
--- /dev/null
+++ b/target/hexagon/README
@@ -0,0 +1,314 @@
+Hexagon is Qualcomm's very long instruction word (VLIW) digital signal
+processor(DSP). We also support Hexagon Vector eXtensions (HVX). HVX
+is a wide vector coprocessor designed for high performance computer vision,
+image processing, machine learning, and other workloads.
+
+The following versions of the Hexagon core are supported
+ Scalar core: v67
+ https://developer.qualcomm.com/downloads/qualcomm-hexagon-v67-programmer-s-reference-manual
+ HVX extension: v66
+ https://developer.qualcomm.com/downloads/qualcomm-hexagon-v66-hvx-programmer-s-reference-manual
+
+We presented an overview of the project at the 2019 KVM Forum.
+ https://kvmforum2019.sched.com/event/Tmwc/qemu-hexagon-automatic-translation-of-the-isa-manual-pseudcode-to-tiny-code-instructions-of-a-vliw-architecture-niccolo-izzo-revng-taylor-simpson-qualcomm-innovation-center
+
+*** Tour of the code ***
+
+The qemu-hexagon implementation is a combination of qemu and the Hexagon
+architecture library (aka archlib). The three primary directories with
+Hexagon-specific code are
+
+ qemu/target/hexagon
+ This has all the instruction and packet semantics
+ qemu/target/hexagon/imported
+ These files are imported with very little modification from archlib
+ *.idef Instruction semantics definition
+ macros.def Mapping of macros to instruction attributes
+ encode*.def Encoding patterns for each instruction
+ iclass.def Instruction class definitions used to determine
+ legal VLIW slots for each instruction
+ qemu/linux-user/hexagon
+ Helpers for loading the ELF file and making Linux system calls,
+ signals, etc
+
+We start with scripts that generate a bunch of include files. This
+is a two step process. The first step is to use the C preprocessor to expand
+macros inside the architecture definition files. This is done in
+target/hexagon/gen_semantics.c. This step produces
+ <BUILD_DIR>/target/hexagon/semantics_generated.pyinc.
+That file is consumed by the following python scripts to produce the indicated
+header files in <BUILD_DIR>/target/hexagon
+ gen_opcodes_def.py -> opcodes_def_generated.h.inc
+ gen_op_regs.py -> op_regs_generated.h.inc
+ gen_printinsn.py -> printinsn_generated.h.inc
+ gen_op_attribs.py -> op_attribs_generated.h.inc
+ gen_helper_protos.py -> helper_protos_generated.h.inc
+ gen_shortcode.py -> shortcode_generated.h.inc
+ gen_tcg_funcs.py -> tcg_funcs_generated.c.inc
+ gen_tcg_func_table.py -> tcg_func_table_generated.c.inc
+ gen_helper_funcs.py -> helper_funcs_generated.c.inc
+
+Qemu helper functions have 3 parts
+ DEF_HELPER declaration indicates the signature of the helper
+ gen_helper_<NAME> will generate a TCG call to the helper function
+ The helper implementation
+
+Here's an example of the A2_add instruction.
+ Instruction tag A2_add
+ Assembly syntax "Rd32=add(Rs32,Rt32)"
+ Instruction semantics "{ RdV=RsV+RtV;}"
+
+By convention, the operands are identified by letter
+ RdV is the destination register
+ RsV, RtV are source registers
+
+The generator uses the operand naming conventions (see large comment in
+hex_common.py) to determine the signature of the helper function. Here are the
+results for A2_add
+
+helper_protos_generated.h.inc
+ DEF_HELPER_3(A2_add, s32, env, s32, s32)
+
+tcg_funcs_generated.c.inc
+ static void generate_A2_add(
+ CPUHexagonState *env,
+ DisasContext *ctx,
+ Insn *insn,
+ Packet *pkt)
+ {
+ TCGv RdV = tcg_temp_local_new();
+ const int RdN = insn->regno[0];
+ TCGv RsV = hex_gpr[insn->regno[1]];
+ TCGv RtV = hex_gpr[insn->regno[2]];
+ gen_helper_A2_add(RdV, cpu_env, RsV, RtV);
+ gen_log_reg_write(RdN, RdV);
+ ctx_log_reg_write(ctx, RdN);
+ tcg_temp_free(RdV);
+ }
+
+helper_funcs_generated.c.inc
+ int32_t HELPER(A2_add)(CPUHexagonState *env, int32_t RsV, int32_t RtV)
+ {
+ uint32_t slot __attribute__((unused)) = 4;
+ int32_t RdV = 0;
+ { RdV=RsV+RtV;}
+ return RdV;
+ }
+
+Note that generate_A2_add updates the disassembly context to be processed
+when the packet commits (see "Packet Semantics" below).
+
+The generator checks for fGEN_TCG_<tag> macro. This allows us to generate
+TCG code instead of a call to the helper. If defined, the macro takes 1
+argument.
+ C semantics (aka short code)
+
+This allows the code generator to override the auto-generated code. In some
+cases this is necessary for correct execution. We can also override for
+faster emulation. For example, calling a helper for add is more expensive
+than generating a TCG add operation.
+
+The gen_tcg.h file has any overrides. For example, we could write
+ #define fGEN_TCG_A2_add(GENHLPR, SHORTCODE) \
+ tcg_gen_add_tl(RdV, RsV, RtV)
+
+The instruction semantics C code relies heavily on macros. In cases where the
+C semantics are specified only with macros, we can override the default with
+the short semantics option and #define the macros to generate TCG code. One
+example is L2_loadw_locked:
+ Instruction tag L2_loadw_locked
+ Assembly syntax "Rd32=memw_locked(Rs32)"
+ Instruction semantics "{ fEA_REG(RsV); fLOAD_LOCKED(1,4,u,EA,RdV) }"
+
+In gen_tcg.h, we use the shortcode
+#define fGEN_TCG_L2_loadw_locked(SHORTCODE) \
+ SHORTCODE
+
+There are also cases where we brute force the TCG code generation.
+Instructions with multiple definitions are examples. These require special
+handling because qemu helpers can only return a single value.
+
+For HVX vectors, the generator behaves slightly differently. The wide vectors
+won't fit in a TCGv or TCGv_i64, so we pass TCGv_ptr variables to pass the
+address to helper functions. Here's an example for an HVX vector-add-word
+istruction.
+ static void generate_V6_vaddw(
+ CPUHexagonState *env,
+ DisasContext *ctx,
+ Insn *insn,
+ Packet *pkt)
+ {
+ const int VdN = insn->regno[0];
+ const intptr_t VdV_off =
+ ctx_future_vreg_off(ctx, VdN, 1, true);
+ TCGv_ptr VdV = tcg_temp_local_new_ptr();
+ tcg_gen_addi_ptr(VdV, cpu_env, VdV_off);
+ const int VuN = insn->regno[1];
+ const intptr_t VuV_off =
+ vreg_src_off(ctx, VuN);
+ TCGv_ptr VuV = tcg_temp_local_new_ptr();
+ const int VvN = insn->regno[2];
+ const intptr_t VvV_off =
+ vreg_src_off(ctx, VvN);
+ TCGv_ptr VvV = tcg_temp_local_new_ptr();
+ tcg_gen_addi_ptr(VuV, cpu_env, VuV_off);
+ tcg_gen_addi_ptr(VvV, cpu_env, VvV_off);
+ TCGv slot = tcg_constant_tl(insn->slot);
+ gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV, slot);
+ tcg_temp_free(slot);
+ gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false);
+ ctx_log_vreg_write(ctx, VdN, EXT_DFL, false);
+ tcg_temp_free_ptr(VdV);
+ tcg_temp_free_ptr(VuV);
+ tcg_temp_free_ptr(VvV);
+ }
+
+Notice that we also generate a variable named <operand>_off for each operand of
+the instruction. This makes it easy to override the instruction semantics with
+functions from tcg-op-gvec.h. Here's the override for this instruction.
+ #define fGEN_TCG_V6_vaddw(SHORTCODE) \
+ tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+Finally, we notice that the override doesn't use the TCGv_ptr variables, so
+we don't generate them when an override is present. Here is what we generate
+when the override is present.
+ static void generate_V6_vaddw(
+ CPUHexagonState *env,
+ DisasContext *ctx,
+ Insn *insn,
+ Packet *pkt)
+ {
+ const int VdN = insn->regno[0];
+ const intptr_t VdV_off =
+ ctx_future_vreg_off(ctx, VdN, 1, true);
+ const int VuN = insn->regno[1];
+ const intptr_t VuV_off =
+ vreg_src_off(ctx, VuN);
+ const int VvN = insn->regno[2];
+ const intptr_t VvV_off =
+ vreg_src_off(ctx, VvN);
+ fGEN_TCG_V6_vaddw({ fHIDE(int i;) fVFOREACH(32, i) { VdV.w[i] = VuV.w[i] + VvV.w[i] ; } });
+ gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false);
+ ctx_log_vreg_write(ctx, VdN, EXT_DFL, false);
+ }
+
+In addition to instruction semantics, we use a generator to create the decode
+tree. This generation is also a two step process. The first step is to run
+target/hexagon/gen_dectree_import.c to produce
+ <BUILD_DIR>/target/hexagon/iset.py
+This file is imported by target/hexagon/dectree.py to produce
+ <BUILD_DIR>/target/hexagon/dectree_generated.h.inc
+
+*** Key Files ***
+
+cpu.h
+
+This file contains the definition of the CPUHexagonState struct. It is the
+runtime information for each thread and contains stuff like the GPR and
+predicate registers.
+
+macros.h
+mmvec/macros.h
+
+The Hexagon arch lib relies heavily on macros for the instruction semantics.
+This is a great advantage for qemu because we can override them for different
+purposes. You will also notice there are sometimes two definitions of a macro.
+The QEMU_GENERATE variable determines whether we want the macro to generate TCG
+code. If QEMU_GENERATE is not defined, we want the macro to generate vanilla
+C code that will work in the helper implementation.
+
+translate.c
+
+The functions in this file generate TCG code for a translation block. Some
+important functions in this file are
+
+ gen_start_packet - initialize the data structures for packet semantics
+ gen_commit_packet - commit the register writes, stores, etc for a packet
+ decode_and_translate_packet - disassemble a packet and generate code
+
+genptr.c
+gen_tcg.h
+
+These files create a function for each instruction. It is mostly composed of
+fGEN_TCG_<tag> definitions followed by including tcg_funcs_generated.c.inc.
+
+op_helper.c
+
+This file contains the implementations of all the helpers. There are a few
+general purpose helpers, but most of them are generated by including
+helper_funcs_generated.c.inc. There are also several helpers used for debugging.
+
+
+*** Packet Semantics ***
+
+VLIW packet semantics differ from serial semantics in that all input operands
+are read, then the operations are performed, then all the results are written.
+For exmaple, this packet performs a swap of registers r0 and r1
+ { r0 = r1; r1 = r0 }
+Note that the result is different if the instructions are executed serially.
+
+Packet semantics dictate that we defer any changes of state until the entire
+packet is committed. We record the results of each instruction in a side data
+structure, and update the visible processor state when we commit the packet.
+
+The data structures are divided between the runtime state and the translation
+context.
+
+During the TCG generation (see translate.[ch]), we use the DisasContext to
+track what needs to be done during packet commit. Here are the relevant
+fields
+
+ reg_log list of registers written
+ reg_log_idx index into ctx_reg_log
+ pred_log list of predicates written
+ pred_log_idx index into ctx_pred_log
+ store_width width of stores (indexed by slot)
+
+During runtime, the following fields in CPUHexagonState (see cpu.h) are used
+
+ new_value new value of a given register
+ reg_written boolean indicating if register was written
+ new_pred_value new value of a predicate register
+ pred_written boolean indicating if predicate was written
+ mem_log_stores record of the stores (indexed by slot)
+
+For Hexagon Vector eXtensions (HVX), the following fields are used
+ VRegs Vector registers
+ future_VRegs Registers to be stored during packet commit
+ tmp_VRegs Temporary registers *not* stored during commit
+ VRegs_updated Mask of predicated vector writes
+ QRegs Q (vector predicate) registers
+ future_QRegs Registers to be stored during packet commit
+ QRegs_updated Mask of predicated vector writes
+
+*** Debugging ***
+
+You can turn on a lot of debugging by changing the HEX_DEBUG macro to 1 in
+internal.h. This will stream a lot of information as it generates TCG and
+executes the code.
+
+To track down nasty issues with Hexagon->TCG generation, we compare the
+execution results with actual hardware running on a Hexagon Linux target.
+Run qemu with the "-d cpu" option. Then, we can diff the results and figure
+out where qemu and hardware behave differently.
+
+The stacks are located at different locations. We handle this by changing
+env->stack_adjust in translate.c. First, set this to zero and run qemu.
+Then, change env->stack_adjust to the difference between the two stack
+locations. Then rebuild qemu and run again. That will produce a very
+clean diff.
+
+Here are some handy places to set breakpoints
+
+ At the call to gen_start_packet for a given PC (note that the line number
+ might change in the future)
+ br translate.c:602 if ctx->base.pc_next == 0xdeadbeef
+ The helper function for each instruction is named helper_<TAG>, so here's
+ an example that will set a breakpoint at the start
+ br helper_A2_add
+ If you have the HEX_DEBUG macro set, the following will be useful
+ At the start of execution of a packet for a given PC
+ br helper_debug_start_packet if env->gpr[41] == 0xdeadbeef
+ At the end of execution of a packet for a given PC
+ br helper_debug_commit_end if env->this_PC == 0xdeadbeef
diff --git a/target/hexagon/arch.c b/target/hexagon/arch.c
new file mode 100644
index 000000000..68a55b3bd
--- /dev/null
+++ b/target/hexagon/arch.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "fpu/softfloat.h"
+#include "cpu.h"
+#include "fma_emu.h"
+#include "arch.h"
+#include "macros.h"
+
+#define SF_BIAS 127
+#define SF_MAXEXP 254
+#define SF_MANTBITS 23
+#define float32_nan make_float32(0xffffffff)
+
+/*
+ * These three tables are used by the cabacdecbin instruction
+ */
+const uint8_t rLPS_table_64x4[64][4] = {
+ {128, 176, 208, 240},
+ {128, 167, 197, 227},
+ {128, 158, 187, 216},
+ {123, 150, 178, 205},
+ {116, 142, 169, 195},
+ {111, 135, 160, 185},
+ {105, 128, 152, 175},
+ {100, 122, 144, 166},
+ {95, 116, 137, 158},
+ {90, 110, 130, 150},
+ {85, 104, 123, 142},
+ {81, 99, 117, 135},
+ {77, 94, 111, 128},
+ {73, 89, 105, 122},
+ {69, 85, 100, 116},
+ {66, 80, 95, 110},
+ {62, 76, 90, 104},
+ {59, 72, 86, 99},
+ {56, 69, 81, 94},
+ {53, 65, 77, 89},
+ {51, 62, 73, 85},
+ {48, 59, 69, 80},
+ {46, 56, 66, 76},
+ {43, 53, 63, 72},
+ {41, 50, 59, 69},
+ {39, 48, 56, 65},
+ {37, 45, 54, 62},
+ {35, 43, 51, 59},
+ {33, 41, 48, 56},
+ {32, 39, 46, 53},
+ {30, 37, 43, 50},
+ {29, 35, 41, 48},
+ {27, 33, 39, 45},
+ {26, 31, 37, 43},
+ {24, 30, 35, 41},
+ {23, 28, 33, 39},
+ {22, 27, 32, 37},
+ {21, 26, 30, 35},
+ {20, 24, 29, 33},
+ {19, 23, 27, 31},
+ {18, 22, 26, 30},
+ {17, 21, 25, 28},
+ {16, 20, 23, 27},
+ {15, 19, 22, 25},
+ {14, 18, 21, 24},
+ {14, 17, 20, 23},
+ {13, 16, 19, 22},
+ {12, 15, 18, 21},
+ {12, 14, 17, 20},
+ {11, 14, 16, 19},
+ {11, 13, 15, 18},
+ {10, 12, 15, 17},
+ {10, 12, 14, 16},
+ {9, 11, 13, 15},
+ {9, 11, 12, 14},
+ {8, 10, 12, 14},
+ {8, 9, 11, 13},
+ {7, 9, 11, 12},
+ {7, 9, 10, 12},
+ {7, 8, 10, 11},
+ {6, 8, 9, 11},
+ {6, 7, 9, 10},
+ {6, 7, 8, 9},
+ {2, 2, 2, 2}
+};
+
+const uint8_t AC_next_state_MPS_64[64] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 62, 63
+};
+
+
+const uint8_t AC_next_state_LPS_64[64] = {
+ 0, 0, 1, 2, 2, 4, 4, 5, 6, 7,
+ 8, 9, 9, 11, 11, 12, 13, 13, 15, 15,
+ 16, 16, 18, 18, 19, 19, 21, 21, 22, 22,
+ 23, 24, 24, 25, 26, 26, 27, 27, 28, 29,
+ 29, 30, 30, 30, 31, 32, 32, 33, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 63
+};
+
+#define BITS_MASK_8 0x5555555555555555ULL
+#define PAIR_MASK_8 0x3333333333333333ULL
+#define NYBL_MASK_8 0x0f0f0f0f0f0f0f0fULL
+#define BYTE_MASK_8 0x00ff00ff00ff00ffULL
+#define HALF_MASK_8 0x0000ffff0000ffffULL
+#define WORD_MASK_8 0x00000000ffffffffULL
+
+uint64_t interleave(uint32_t odd, uint32_t even)
+{
+ /* Convert to long long */
+ uint64_t myodd = odd;
+ uint64_t myeven = even;
+ /* First, spread bits out */
+ myodd = (myodd | (myodd << 16)) & HALF_MASK_8;
+ myeven = (myeven | (myeven << 16)) & HALF_MASK_8;
+ myodd = (myodd | (myodd << 8)) & BYTE_MASK_8;
+ myeven = (myeven | (myeven << 8)) & BYTE_MASK_8;
+ myodd = (myodd | (myodd << 4)) & NYBL_MASK_8;
+ myeven = (myeven | (myeven << 4)) & NYBL_MASK_8;
+ myodd = (myodd | (myodd << 2)) & PAIR_MASK_8;
+ myeven = (myeven | (myeven << 2)) & PAIR_MASK_8;
+ myodd = (myodd | (myodd << 1)) & BITS_MASK_8;
+ myeven = (myeven | (myeven << 1)) & BITS_MASK_8;
+ /* Now OR together */
+ return myeven | (myodd << 1);
+}
+
+uint64_t deinterleave(uint64_t src)
+{
+ /* Get odd and even bits */
+ uint64_t myodd = ((src >> 1) & BITS_MASK_8);
+ uint64_t myeven = (src & BITS_MASK_8);
+
+ /* Unspread bits */
+ myeven = (myeven | (myeven >> 1)) & PAIR_MASK_8;
+ myodd = (myodd | (myodd >> 1)) & PAIR_MASK_8;
+ myeven = (myeven | (myeven >> 2)) & NYBL_MASK_8;
+ myodd = (myodd | (myodd >> 2)) & NYBL_MASK_8;
+ myeven = (myeven | (myeven >> 4)) & BYTE_MASK_8;
+ myodd = (myodd | (myodd >> 4)) & BYTE_MASK_8;
+ myeven = (myeven | (myeven >> 8)) & HALF_MASK_8;
+ myodd = (myodd | (myodd >> 8)) & HALF_MASK_8;
+ myeven = (myeven | (myeven >> 16)) & WORD_MASK_8;
+ myodd = (myodd | (myodd >> 16)) & WORD_MASK_8;
+
+ /* Return odd bits in upper half */
+ return myeven | (myodd << 32);
+}
+
+int32_t conv_round(int32_t a, int n)
+{
+ int64_t val;
+
+ if (n == 0) {
+ val = a;
+ } else if ((a & ((1 << (n - 1)) - 1)) == 0) { /* N-1..0 all zero? */
+ /* Add LSB from int part */
+ val = ((fSE32_64(a)) + (int64_t) (((uint32_t) ((1 << n) & a)) >> 1));
+ } else {
+ val = ((fSE32_64(a)) + (1 << (n - 1)));
+ }
+
+ val = val >> n;
+ return (int32_t)val;
+}
+
+/* Floating Point Stuff */
+
+static const FloatRoundMode softfloat_roundingmodes[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_down,
+ float_round_up,
+};
+
+void arch_fpop_start(CPUHexagonState *env)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ set_float_rounding_mode(
+ softfloat_roundingmodes[fREAD_REG_FIELD(USR, USR_FPRND)],
+ &env->fp_status);
+}
+
+#ifdef CONFIG_USER_ONLY
+/*
+ * Hexagon Linux kernel only sets the relevant bits in USR (user status
+ * register). The exception isn't raised to user mode, so we don't
+ * model it in qemu user mode.
+ */
+#define RAISE_FP_EXCEPTION do {} while (0)
+#endif
+
+#define SOFTFLOAT_TEST_FLAG(FLAG, MYF, MYE) \
+ do { \
+ if (flags & FLAG) { \
+ if (GET_USR_FIELD(USR_##MYF) == 0) { \
+ SET_USR_FIELD(USR_##MYF, 1); \
+ if (GET_USR_FIELD(USR_##MYE)) { \
+ RAISE_FP_EXCEPTION; \
+ } \
+ } \
+ } \
+ } while (0)
+
+void arch_fpop_end(CPUHexagonState *env)
+{
+ int flags = get_float_exception_flags(&env->fp_status);
+ if (flags != 0) {
+ SOFTFLOAT_TEST_FLAG(float_flag_inexact, FPINPF, FPINPE);
+ SOFTFLOAT_TEST_FLAG(float_flag_divbyzero, FPDBZF, FPDBZE);
+ SOFTFLOAT_TEST_FLAG(float_flag_invalid, FPINVF, FPINVE);
+ SOFTFLOAT_TEST_FLAG(float_flag_overflow, FPOVFF, FPOVFE);
+ SOFTFLOAT_TEST_FLAG(float_flag_underflow, FPUNFF, FPUNFE);
+ }
+}
+
+int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd, int *adjust,
+ float_status *fp_status)
+{
+ int n_exp;
+ int d_exp;
+ int ret = 0;
+ float32 RsV, RtV, RdV;
+ int PeV = 0;
+ RsV = *Rs;
+ RtV = *Rt;
+ if (float32_is_any_nan(RsV) && float32_is_any_nan(RtV)) {
+ if (extract32(RsV & RtV, 22, 1) == 0) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ RdV = RsV = RtV = float32_nan;
+ } else if (float32_is_any_nan(RsV)) {
+ if (extract32(RsV, 22, 1) == 0) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ RdV = RsV = RtV = float32_nan;
+ } else if (float32_is_any_nan(RtV)) {
+ /* or put NaN in num/den fixup? */
+ if (extract32(RtV, 22, 1) == 0) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ RdV = RsV = RtV = float32_nan;
+ } else if (float32_is_infinity(RsV) && float32_is_infinity(RtV)) {
+ /* or put Inf in num fixup? */
+ RdV = RsV = RtV = float32_nan;
+ float_raise(float_flag_invalid, fp_status);
+ } else if (float32_is_zero(RsV) && float32_is_zero(RtV)) {
+ /* or put zero in num fixup? */
+ RdV = RsV = RtV = float32_nan;
+ float_raise(float_flag_invalid, fp_status);
+ } else if (float32_is_zero(RtV)) {
+ /* or put Inf in num fixup? */
+ uint8_t RsV_sign = float32_is_neg(RsV);
+ uint8_t RtV_sign = float32_is_neg(RtV);
+ /* Check that RsV is NOT infinite before we overwrite it */
+ if (!float32_is_infinity(RsV)) {
+ float_raise(float_flag_divbyzero, fp_status);
+ }
+ RsV = infinite_float32(RsV_sign ^ RtV_sign);
+ RtV = float32_one;
+ RdV = float32_one;
+ } else if (float32_is_infinity(RtV)) {
+ RsV = make_float32(0x80000000 & (RsV ^ RtV));
+ RtV = float32_one;
+ RdV = float32_one;
+ } else if (float32_is_zero(RsV)) {
+ /* Does this just work itself out? */
+ /* No, 0/Inf causes problems. */
+ RsV = make_float32(0x80000000 & (RsV ^ RtV));
+ RtV = float32_one;
+ RdV = float32_one;
+ } else if (float32_is_infinity(RsV)) {
+ uint8_t RsV_sign = float32_is_neg(RsV);
+ uint8_t RtV_sign = float32_is_neg(RtV);
+ RsV = infinite_float32(RsV_sign ^ RtV_sign);
+ RtV = float32_one;
+ RdV = float32_one;
+ } else {
+ PeV = 0x00;
+ /* Basic checks passed */
+ n_exp = float32_getexp(RsV);
+ d_exp = float32_getexp(RtV);
+ if ((n_exp - d_exp + SF_BIAS) <= SF_MANTBITS) {
+ /* Near quotient underflow / inexact Q */
+ PeV = 0x80;
+ RtV = float32_scalbn(RtV, -64, fp_status);
+ RsV = float32_scalbn(RsV, 64, fp_status);
+ } else if ((n_exp - d_exp + SF_BIAS) > (SF_MAXEXP - 24)) {
+ /* Near quotient overflow */
+ PeV = 0x40;
+ RtV = float32_scalbn(RtV, 32, fp_status);
+ RsV = float32_scalbn(RsV, -32, fp_status);
+ } else if (n_exp <= SF_MANTBITS + 2) {
+ RtV = float32_scalbn(RtV, 64, fp_status);
+ RsV = float32_scalbn(RsV, 64, fp_status);
+ } else if (d_exp <= 1) {
+ RtV = float32_scalbn(RtV, 32, fp_status);
+ RsV = float32_scalbn(RsV, 32, fp_status);
+ } else if (d_exp > 252) {
+ RtV = float32_scalbn(RtV, -32, fp_status);
+ RsV = float32_scalbn(RsV, -32, fp_status);
+ }
+ RdV = 0;
+ ret = 1;
+ }
+ *Rs = RsV;
+ *Rt = RtV;
+ *Rd = RdV;
+ *adjust = PeV;
+ return ret;
+}
+
+int arch_sf_invsqrt_common(float32 *Rs, float32 *Rd, int *adjust,
+ float_status *fp_status)
+{
+ float32 RsV, RdV;
+ int PeV = 0;
+ int r_exp;
+ int ret = 0;
+ RsV = *Rs;
+ if (float32_is_any_nan(RsV)) {
+ if (extract32(RsV, 22, 1) == 0) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ RdV = RsV = float32_nan;
+ } else if (float32_lt(RsV, float32_zero, fp_status)) {
+ /* Negative nonzero values are NaN */
+ float_raise(float_flag_invalid, fp_status);
+ RsV = float32_nan;
+ RdV = float32_nan;
+ } else if (float32_is_infinity(RsV)) {
+ /* or put Inf in num fixup? */
+ RsV = infinite_float32(1);
+ RdV = infinite_float32(1);
+ } else if (float32_is_zero(RsV)) {
+ /* or put zero in num fixup? */
+ RdV = float32_one;
+ } else {
+ PeV = 0x00;
+ /* Basic checks passed */
+ r_exp = float32_getexp(RsV);
+ if (r_exp <= 24) {
+ RsV = float32_scalbn(RsV, 64, fp_status);
+ PeV = 0xe0;
+ }
+ RdV = 0;
+ ret = 1;
+ }
+ *Rs = RsV;
+ *Rd = RdV;
+ *adjust = PeV;
+ return ret;
+}
+
+const uint8_t recip_lookup_table[128] = {
+ 0x0fe, 0x0fa, 0x0f6, 0x0f2, 0x0ef, 0x0eb, 0x0e7, 0x0e4,
+ 0x0e0, 0x0dd, 0x0d9, 0x0d6, 0x0d2, 0x0cf, 0x0cc, 0x0c9,
+ 0x0c6, 0x0c2, 0x0bf, 0x0bc, 0x0b9, 0x0b6, 0x0b3, 0x0b1,
+ 0x0ae, 0x0ab, 0x0a8, 0x0a5, 0x0a3, 0x0a0, 0x09d, 0x09b,
+ 0x098, 0x096, 0x093, 0x091, 0x08e, 0x08c, 0x08a, 0x087,
+ 0x085, 0x083, 0x080, 0x07e, 0x07c, 0x07a, 0x078, 0x075,
+ 0x073, 0x071, 0x06f, 0x06d, 0x06b, 0x069, 0x067, 0x065,
+ 0x063, 0x061, 0x05f, 0x05e, 0x05c, 0x05a, 0x058, 0x056,
+ 0x054, 0x053, 0x051, 0x04f, 0x04e, 0x04c, 0x04a, 0x049,
+ 0x047, 0x045, 0x044, 0x042, 0x040, 0x03f, 0x03d, 0x03c,
+ 0x03a, 0x039, 0x037, 0x036, 0x034, 0x033, 0x032, 0x030,
+ 0x02f, 0x02d, 0x02c, 0x02b, 0x029, 0x028, 0x027, 0x025,
+ 0x024, 0x023, 0x021, 0x020, 0x01f, 0x01e, 0x01c, 0x01b,
+ 0x01a, 0x019, 0x017, 0x016, 0x015, 0x014, 0x013, 0x012,
+ 0x011, 0x00f, 0x00e, 0x00d, 0x00c, 0x00b, 0x00a, 0x009,
+ 0x008, 0x007, 0x006, 0x005, 0x004, 0x003, 0x002, 0x000,
+};
+
+const uint8_t invsqrt_lookup_table[128] = {
+ 0x069, 0x066, 0x063, 0x061, 0x05e, 0x05b, 0x059, 0x057,
+ 0x054, 0x052, 0x050, 0x04d, 0x04b, 0x049, 0x047, 0x045,
+ 0x043, 0x041, 0x03f, 0x03d, 0x03b, 0x039, 0x037, 0x036,
+ 0x034, 0x032, 0x030, 0x02f, 0x02d, 0x02c, 0x02a, 0x028,
+ 0x027, 0x025, 0x024, 0x022, 0x021, 0x01f, 0x01e, 0x01d,
+ 0x01b, 0x01a, 0x019, 0x017, 0x016, 0x015, 0x014, 0x012,
+ 0x011, 0x010, 0x00f, 0x00d, 0x00c, 0x00b, 0x00a, 0x009,
+ 0x008, 0x007, 0x006, 0x005, 0x004, 0x003, 0x002, 0x001,
+ 0x0fe, 0x0fa, 0x0f6, 0x0f3, 0x0ef, 0x0eb, 0x0e8, 0x0e4,
+ 0x0e1, 0x0de, 0x0db, 0x0d7, 0x0d4, 0x0d1, 0x0ce, 0x0cb,
+ 0x0c9, 0x0c6, 0x0c3, 0x0c0, 0x0be, 0x0bb, 0x0b8, 0x0b6,
+ 0x0b3, 0x0b1, 0x0af, 0x0ac, 0x0aa, 0x0a8, 0x0a5, 0x0a3,
+ 0x0a1, 0x09f, 0x09d, 0x09b, 0x099, 0x097, 0x095, 0x093,
+ 0x091, 0x08f, 0x08d, 0x08b, 0x089, 0x087, 0x086, 0x084,
+ 0x082, 0x080, 0x07f, 0x07d, 0x07b, 0x07a, 0x078, 0x077,
+ 0x075, 0x074, 0x072, 0x071, 0x06f, 0x06e, 0x06c, 0x06b,
+};
diff --git a/target/hexagon/arch.h b/target/hexagon/arch.h
new file mode 100644
index 000000000..70918065d
--- /dev/null
+++ b/target/hexagon/arch.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_ARCH_H
+#define HEXAGON_ARCH_H
+
+#include "qemu/int128.h"
+
+extern const uint8_t rLPS_table_64x4[64][4];
+extern const uint8_t AC_next_state_MPS_64[64];
+extern const uint8_t AC_next_state_LPS_64[64];
+
+uint64_t interleave(uint32_t odd, uint32_t even);
+uint64_t deinterleave(uint64_t src);
+int32_t conv_round(int32_t a, int n);
+void arch_fpop_start(CPUHexagonState *env);
+void arch_fpop_end(CPUHexagonState *env);
+int arch_sf_recip_common(float32 *Rs, float32 *Rt, float32 *Rd,
+ int *adjust, float_status *fp_status);
+int arch_sf_invsqrt_common(float32 *Rs, float32 *Rd, int *adjust,
+ float_status *fp_status);
+
+extern const uint8_t recip_lookup_table[128];
+
+extern const uint8_t invsqrt_lookup_table[128];
+
+#endif
diff --git a/target/hexagon/attribs.h b/target/hexagon/attribs.h
new file mode 100644
index 000000000..54576f414
--- /dev/null
+++ b/target/hexagon/attribs.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_ATTRIBS_H
+#define HEXAGON_ATTRIBS_H
+
+#include "qemu/bitmap.h"
+#include "opcodes.h"
+
+enum {
+#define DEF_ATTRIB(NAME, ...) A_##NAME,
+#include "attribs_def.h.inc"
+#undef DEF_ATTRIB
+};
+
+extern DECLARE_BITMAP(opcode_attribs[XX_LAST_OPCODE], A_ZZ_LASTATTRIB);
+
+#define GET_ATTRIB(opcode, attrib) \
+ test_bit(attrib, opcode_attribs[opcode])
+
+#endif /* ATTRIBS_H */
diff --git a/target/hexagon/attribs_def.h.inc b/target/hexagon/attribs_def.h.inc
new file mode 100644
index 000000000..dc890a557
--- /dev/null
+++ b/target/hexagon/attribs_def.h.inc
@@ -0,0 +1,120 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Keep this as the first attribute: */
+DEF_ATTRIB(AA_DUMMY, "Dummy Zeroth Attribute", "", "")
+
+/* Misc */
+DEF_ATTRIB(EXTENSION, "Extension instruction", "", "")
+
+DEF_ATTRIB(PRIV, "Not available in user or guest mode", "", "")
+DEF_ATTRIB(GUEST, "Not available in user mode", "", "")
+
+DEF_ATTRIB(FPOP, "Floating Point Operation", "", "")
+
+DEF_ATTRIB(EXTENDABLE, "Immediate may be extended", "", "")
+
+DEF_ATTRIB(ARCHV2, "V2 architecture", "", "")
+DEF_ATTRIB(ARCHV3, "V3 architecture", "", "")
+DEF_ATTRIB(ARCHV4, "V4 architecture", "", "")
+DEF_ATTRIB(ARCHV5, "V5 architecture", "", "")
+
+DEF_ATTRIB(SUBINSN, "sub-instruction", "", "")
+
+/* Load and Store attributes */
+DEF_ATTRIB(LOAD, "Loads from memory", "", "")
+DEF_ATTRIB(STORE, "Stores to memory", "", "")
+DEF_ATTRIB(MEMLIKE, "Memory-like instruction", "", "")
+DEF_ATTRIB(MEMLIKE_PACKET_RULES, "follows Memory-like packet rules", "", "")
+
+/* V6 Vector attributes */
+DEF_ATTRIB(CVI, "Executes on the HVX extension", "", "")
+
+DEF_ATTRIB(CVI_NEW, "New value memory instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_VM, "Memory instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_VP, "Permute instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_VP_VS, "Double vector permute/shft insn executes on HVX", "", "")
+DEF_ATTRIB(CVI_VX, "Multiply instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_VX_DV, "Double vector multiply insn executes on HVX", "", "")
+DEF_ATTRIB(CVI_VS, "Shift instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_VS_VX, "Permute/shift and multiply insn executes on HVX", "", "")
+DEF_ATTRIB(CVI_VA, "ALU instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_VA_DV, "Double vector alu instruction executes on HVX", "", "")
+DEF_ATTRIB(CVI_4SLOT, "Consumes all the vector execution resources", "", "")
+DEF_ATTRIB(CVI_TMP, "Transient Memory Load not written to register", "", "")
+DEF_ATTRIB(CVI_GATHER, "CVI Gather operation", "", "")
+DEF_ATTRIB(CVI_SCATTER, "CVI Scatter operation", "", "")
+DEF_ATTRIB(CVI_SCATTER_RELEASE, "CVI Store Release for scatter", "", "")
+DEF_ATTRIB(CVI_TMP_DST, "CVI instruction that doesn't write a register", "", "")
+DEF_ATTRIB(CVI_SLOT23, "Can execute in slot 2 or slot 3 (HVX)", "", "")
+
+
+/* Change-of-flow attributes */
+DEF_ATTRIB(JUMP, "Jump-type instruction", "", "")
+DEF_ATTRIB(INDIRECT, "Absolute register jump", "", "")
+DEF_ATTRIB(CALL, "Function call instruction", "", "")
+DEF_ATTRIB(COF, "Change-of-flow instruction", "", "")
+DEF_ATTRIB(CONDEXEC, "May be cancelled by a predicate", "", "")
+DEF_ATTRIB(DOTNEWVALUE, "Uses a register value generated in this pkt", "", "")
+DEF_ATTRIB(NEWCMPJUMP, "Compound compare and jump", "", "")
+
+/* access to implicit registers */
+DEF_ATTRIB(IMPLICIT_WRITES_LR, "Writes the link register", "", "UREG.LR")
+DEF_ATTRIB(IMPLICIT_WRITES_SP, "Writes the stack pointer", "", "UREG.SP")
+DEF_ATTRIB(IMPLICIT_WRITES_FP, "Writes the frame pointer", "", "UREG.FP")
+DEF_ATTRIB(IMPLICIT_WRITES_LC0, "Writes loop count for loop 0", "", "UREG.LC0")
+DEF_ATTRIB(IMPLICIT_WRITES_LC1, "Writes loop count for loop 1", "", "UREG.LC1")
+DEF_ATTRIB(IMPLICIT_WRITES_SA0, "Writes start addr for loop 0", "", "UREG.SA0")
+DEF_ATTRIB(IMPLICIT_WRITES_SA1, "Writes start addr for loop 1", "", "UREG.SA1")
+DEF_ATTRIB(IMPLICIT_WRITES_P0, "Writes Predicate 0", "", "UREG.P0")
+DEF_ATTRIB(IMPLICIT_WRITES_P1, "Writes Predicate 1", "", "UREG.P1")
+DEF_ATTRIB(IMPLICIT_WRITES_P2, "Writes Predicate 1", "", "UREG.P2")
+DEF_ATTRIB(IMPLICIT_WRITES_P3, "May write Predicate 3", "", "UREG.P3")
+DEF_ATTRIB(IMPLICIT_READS_PC, "Reads the PC register", "", "")
+DEF_ATTRIB(IMPLICIT_WRITES_USR, "May write USR", "", "")
+DEF_ATTRIB(WRITES_PRED_REG, "Writes a predicate register", "", "")
+
+DEF_ATTRIB(CRSLOT23, "Can execute in slot 2 or slot 3 (CR)", "", "")
+DEF_ATTRIB(IT_NOP, "nop instruction", "", "")
+DEF_ATTRIB(IT_EXTENDER, "constant extender instruction", "", "")
+
+
+/* Restrictions to make note of */
+DEF_ATTRIB(RESTRICT_SLOT0ONLY, "Must execute on slot0", "", "")
+DEF_ATTRIB(RESTRICT_SLOT1ONLY, "Must execute on slot1", "", "")
+DEF_ATTRIB(RESTRICT_SLOT2ONLY, "Must execute on slot2", "", "")
+DEF_ATTRIB(RESTRICT_SLOT3ONLY, "Must execute on slot3", "", "")
+DEF_ATTRIB(RESTRICT_NOSLOT1, "No slot 1 instruction in parallel", "", "")
+DEF_ATTRIB(RESTRICT_PREFERSLOT0, "Try to encode into slot 0", "", "")
+
+DEF_ATTRIB(ICOP, "Instruction cache op", "", "")
+
+DEF_ATTRIB(HWLOOP0_END, "Ends HW loop0", "", "")
+DEF_ATTRIB(HWLOOP1_END, "Ends HW loop1", "", "")
+DEF_ATTRIB(DCZEROA, "dczeroa type", "", "")
+DEF_ATTRIB(ICFLUSHOP, "icflush op type", "", "")
+DEF_ATTRIB(DCFLUSHOP, "dcflush op type", "", "")
+DEF_ATTRIB(L2FLUSHOP, "l2flush op type", "", "")
+DEF_ATTRIB(DCFETCH, "dcfetch type", "", "")
+
+DEF_ATTRIB(L2FETCH, "Instruction is l2fetch type", "", "")
+
+DEF_ATTRIB(ICINVA, "icinva", "", "")
+DEF_ATTRIB(DCCLEANINVA, "dccleaninva", "", "")
+
+/* Keep this as the last attribute: */
+DEF_ATTRIB(ZZ_LASTATTRIB, "Last attribute in the file", "", "")
diff --git a/target/hexagon/cpu-param.h b/target/hexagon/cpu-param.h
new file mode 100644
index 000000000..e8ed5468d
--- /dev/null
+++ b/target/hexagon/cpu-param.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_CPU_PARAM_H
+#define HEXAGON_CPU_PARAM_H
+
+#define TARGET_PAGE_BITS 16 /* 64K pages */
+#define TARGET_LONG_BITS 32
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define NB_MMU_MODES 1
+
+#endif
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
new file mode 100644
index 000000000..fa9bd702d
--- /dev/null
+++ b/target/hexagon/cpu.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/exec-all.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "fpu/softfloat-helpers.h"
+
+static void hexagon_v67_cpu_init(Object *obj)
+{
+}
+
+static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+ char **cpuname;
+
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ typename = g_strdup_printf(HEXAGON_CPU_TYPE_NAME("%s"), cpuname[0]);
+ oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
+ g_free(typename);
+ if (!oc || !object_class_dynamic_cast(oc, TYPE_HEXAGON_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static Property hexagon_lldb_compat_property =
+ DEFINE_PROP_BOOL("lldb-compat", HexagonCPU, lldb_compat, false);
+static Property hexagon_lldb_stack_adjust_property =
+ DEFINE_PROP_UNSIGNED("lldb-stack-adjust", HexagonCPU, lldb_stack_adjust,
+ 0, qdev_prop_uint32, target_ulong);
+
+const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "sa0", "lc0", "sa1", "lc1", "p3_0", "c5", "m0", "m1",
+ "usr", "pc", "ugp", "gp", "cs0", "cs1", "c14", "c15",
+ "c16", "c17", "c18", "c19", "pkt_cnt", "insn_cnt", "hvx_cnt", "c23",
+ "c24", "c25", "c26", "c27", "c28", "c29", "c30", "c31",
+};
+
+/*
+ * One of the main debugging techniques is to use "-d cpu" and compare against
+ * LLDB output when single stepping. However, the target and qemu put the
+ * stacks at different locations. This is used to compensate so the diff is
+ * cleaner.
+ */
+static target_ulong adjust_stack_ptrs(CPUHexagonState *env, target_ulong addr)
+{
+ HexagonCPU *cpu = env_archcpu(env);
+ target_ulong stack_adjust = cpu->lldb_stack_adjust;
+ target_ulong stack_start = env->stack_start;
+ target_ulong stack_size = 0x10000;
+
+ if (stack_adjust == 0) {
+ return addr;
+ }
+
+ if (stack_start + 0x1000 >= addr && addr >= (stack_start - stack_size)) {
+ return addr - stack_adjust;
+ }
+ return addr;
+}
+
+/* HEX_REG_P3_0 (aka C4) is an alias for the predicate registers */
+static target_ulong read_p3_0(CPUHexagonState *env)
+{
+ int32_t control_reg = 0;
+ int i;
+ for (i = NUM_PREGS - 1; i >= 0; i--) {
+ control_reg <<= 8;
+ control_reg |= env->pred[i] & 0xff;
+ }
+ return control_reg;
+}
+
+static void print_reg(FILE *f, CPUHexagonState *env, int regnum)
+{
+ target_ulong value;
+
+ if (regnum == HEX_REG_P3_0) {
+ value = read_p3_0(env);
+ } else {
+ value = regnum < 32 ? adjust_stack_ptrs(env, env->gpr[regnum])
+ : env->gpr[regnum];
+ }
+
+ qemu_fprintf(f, " %s = 0x" TARGET_FMT_lx "\n",
+ hexagon_regnames[regnum], value);
+}
+
+static void print_vreg(FILE *f, CPUHexagonState *env, int regnum,
+ bool skip_if_zero)
+{
+ if (skip_if_zero) {
+ bool nonzero_found = false;
+ for (int i = 0; i < MAX_VEC_SIZE_BYTES; i++) {
+ if (env->VRegs[regnum].ub[i] != 0) {
+ nonzero_found = true;
+ break;
+ }
+ }
+ if (!nonzero_found) {
+ return;
+ }
+ }
+
+ qemu_fprintf(f, " v%d = ( ", regnum);
+ qemu_fprintf(f, "0x%02x", env->VRegs[regnum].ub[MAX_VEC_SIZE_BYTES - 1]);
+ for (int i = MAX_VEC_SIZE_BYTES - 2; i >= 0; i--) {
+ qemu_fprintf(f, ", 0x%02x", env->VRegs[regnum].ub[i]);
+ }
+ qemu_fprintf(f, " )\n");
+}
+
+void hexagon_debug_vreg(CPUHexagonState *env, int regnum)
+{
+ print_vreg(stdout, env, regnum, false);
+}
+
+static void print_qreg(FILE *f, CPUHexagonState *env, int regnum,
+ bool skip_if_zero)
+{
+ if (skip_if_zero) {
+ bool nonzero_found = false;
+ for (int i = 0; i < MAX_VEC_SIZE_BYTES / 8; i++) {
+ if (env->QRegs[regnum].ub[i] != 0) {
+ nonzero_found = true;
+ break;
+ }
+ }
+ if (!nonzero_found) {
+ return;
+ }
+ }
+
+ qemu_fprintf(f, " q%d = ( ", regnum);
+ qemu_fprintf(f, "0x%02x",
+ env->QRegs[regnum].ub[MAX_VEC_SIZE_BYTES / 8 - 1]);
+ for (int i = MAX_VEC_SIZE_BYTES / 8 - 2; i >= 0; i--) {
+ qemu_fprintf(f, ", 0x%02x", env->QRegs[regnum].ub[i]);
+ }
+ qemu_fprintf(f, " )\n");
+}
+
+void hexagon_debug_qreg(CPUHexagonState *env, int regnum)
+{
+ print_qreg(stdout, env, regnum, false);
+}
+
+static void hexagon_dump(CPUHexagonState *env, FILE *f, int flags)
+{
+ HexagonCPU *cpu = env_archcpu(env);
+
+ if (cpu->lldb_compat) {
+ /*
+ * When comparing with LLDB, it doesn't step through single-cycle
+ * hardware loops the same way. So, we just skip them here
+ */
+ if (env->gpr[HEX_REG_PC] == env->last_pc_dumped) {
+ return;
+ }
+ env->last_pc_dumped = env->gpr[HEX_REG_PC];
+ }
+
+ qemu_fprintf(f, "General Purpose Registers = {\n");
+ for (int i = 0; i < 32; i++) {
+ print_reg(f, env, i);
+ }
+ print_reg(f, env, HEX_REG_SA0);
+ print_reg(f, env, HEX_REG_LC0);
+ print_reg(f, env, HEX_REG_SA1);
+ print_reg(f, env, HEX_REG_LC1);
+ print_reg(f, env, HEX_REG_M0);
+ print_reg(f, env, HEX_REG_M1);
+ print_reg(f, env, HEX_REG_USR);
+ print_reg(f, env, HEX_REG_P3_0);
+ print_reg(f, env, HEX_REG_GP);
+ print_reg(f, env, HEX_REG_UGP);
+ print_reg(f, env, HEX_REG_PC);
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Not modelled in user mode, print junk to minimize the diff's
+ * with LLDB output
+ */
+ qemu_fprintf(f, " cause = 0x000000db\n");
+ qemu_fprintf(f, " badva = 0x00000000\n");
+ qemu_fprintf(f, " cs0 = 0x00000000\n");
+ qemu_fprintf(f, " cs1 = 0x00000000\n");
+#else
+ print_reg(f, env, HEX_REG_CAUSE);
+ print_reg(f, env, HEX_REG_BADVA);
+ print_reg(f, env, HEX_REG_CS0);
+ print_reg(f, env, HEX_REG_CS1);
+#endif
+ qemu_fprintf(f, "}\n");
+
+ if (flags & CPU_DUMP_FPU) {
+ qemu_fprintf(f, "Vector Registers = {\n");
+ for (int i = 0; i < NUM_VREGS; i++) {
+ print_vreg(f, env, i, true);
+ }
+ for (int i = 0; i < NUM_QREGS; i++) {
+ print_qreg(f, env, i, true);
+ }
+ qemu_fprintf(f, "}\n");
+ }
+}
+
+static void hexagon_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ CPUHexagonState *env = &cpu->env;
+
+ hexagon_dump(env, f, flags);
+}
+
+void hexagon_debug(CPUHexagonState *env)
+{
+ hexagon_dump(env, stdout, CPU_DUMP_FPU);
+}
+
+static void hexagon_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ CPUHexagonState *env = &cpu->env;
+ env->gpr[HEX_REG_PC] = value;
+}
+
+static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ CPUHexagonState *env = &cpu->env;
+ env->gpr[HEX_REG_PC] = tb->pc;
+}
+
+static bool hexagon_cpu_has_work(CPUState *cs)
+{
+ return true;
+}
+
+void restore_state_to_opc(CPUHexagonState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->gpr[HEX_REG_PC] = data[0];
+}
+
+static void hexagon_cpu_reset(DeviceState *dev)
+{
+ CPUState *cs = CPU(dev);
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ HexagonCPUClass *mcc = HEXAGON_CPU_GET_CLASS(cpu);
+ CPUHexagonState *env = &cpu->env;
+
+ mcc->parent_reset(dev);
+
+ set_default_nan_mode(1, &env->fp_status);
+ set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status);
+}
+
+static void hexagon_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+ info->print_insn = print_insn_hexagon;
+}
+
+static void hexagon_cpu_realize(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ HexagonCPUClass *mcc = HEXAGON_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void hexagon_cpu_init(Object *obj)
+{
+ HexagonCPU *cpu = HEXAGON_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+ qdev_property_add_static(DEVICE(obj), &hexagon_lldb_compat_property);
+ qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property);
+}
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps hexagon_tcg_ops = {
+ .initialize = hexagon_translate_init,
+ .synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
+};
+
+static void hexagon_cpu_class_init(ObjectClass *c, void *data)
+{
+ HexagonCPUClass *mcc = HEXAGON_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ device_class_set_parent_realize(dc, hexagon_cpu_realize,
+ &mcc->parent_realize);
+
+ device_class_set_parent_reset(dc, hexagon_cpu_reset, &mcc->parent_reset);
+
+ cc->class_by_name = hexagon_cpu_class_by_name;
+ cc->has_work = hexagon_cpu_has_work;
+ cc->dump_state = hexagon_dump_state;
+ cc->set_pc = hexagon_cpu_set_pc;
+ cc->gdb_read_register = hexagon_gdb_read_register;
+ cc->gdb_write_register = hexagon_gdb_write_register;
+ cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS;
+ cc->gdb_stop_before_watchpoint = true;
+ cc->disas_set_info = hexagon_cpu_disas_set_info;
+ cc->tcg_ops = &hexagon_tcg_ops;
+}
+
+#define DEFINE_CPU(type_name, initfn) \
+ { \
+ .name = type_name, \
+ .parent = TYPE_HEXAGON_CPU, \
+ .instance_init = initfn \
+ }
+
+static const TypeInfo hexagon_cpu_type_infos[] = {
+ {
+ .name = TYPE_HEXAGON_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(HexagonCPU),
+ .instance_init = hexagon_cpu_init,
+ .abstract = true,
+ .class_size = sizeof(HexagonCPUClass),
+ .class_init = hexagon_cpu_class_init,
+ },
+ DEFINE_CPU(TYPE_HEXAGON_CPU_V67, hexagon_v67_cpu_init),
+};
+
+DEFINE_TYPES(hexagon_cpu_type_infos)
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
new file mode 100644
index 000000000..de121d950
--- /dev/null
+++ b/target/hexagon/cpu.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_CPU_H
+#define HEXAGON_CPU_H
+
+/* Forward declaration needed by some of the header files */
+typedef struct CPUHexagonState CPUHexagonState;
+
+#include "fpu/softfloat-types.h"
+
+#include "qemu-common.h"
+#include "exec/cpu-defs.h"
+#include "hex_regs.h"
+#include "mmvec/mmvec.h"
+
+#define NUM_PREGS 4
+#define TOTAL_PER_THREAD_REGS 64
+
+#define SLOTS_MAX 4
+#define STORES_MAX 2
+#define REG_WRITES_MAX 32
+#define PRED_WRITES_MAX 5 /* 4 insns + endloop */
+#define VSTORES_MAX 2
+
+#define TYPE_HEXAGON_CPU "hexagon-cpu"
+
+#define HEXAGON_CPU_TYPE_SUFFIX "-" TYPE_HEXAGON_CPU
+#define HEXAGON_CPU_TYPE_NAME(name) (name HEXAGON_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_HEXAGON_CPU
+
+#define TYPE_HEXAGON_CPU_V67 HEXAGON_CPU_TYPE_NAME("v67")
+
+#define MMU_USER_IDX 0
+
+typedef struct {
+ target_ulong va;
+ uint8_t width;
+ uint32_t data32;
+ uint64_t data64;
+} MemLog;
+
+typedef struct {
+ target_ulong va;
+ int size;
+ DECLARE_BITMAP(mask, MAX_VEC_SIZE_BYTES) QEMU_ALIGNED(16);
+ MMVector data QEMU_ALIGNED(16);
+} VStoreLog;
+
+#define EXEC_STATUS_OK 0x0000
+#define EXEC_STATUS_STOP 0x0002
+#define EXEC_STATUS_REPLAY 0x0010
+#define EXEC_STATUS_LOCKED 0x0020
+#define EXEC_STATUS_EXCEPTION 0x0100
+
+
+#define EXCEPTION_DETECTED (env->status & EXEC_STATUS_EXCEPTION)
+#define REPLAY_DETECTED (env->status & EXEC_STATUS_REPLAY)
+#define CLEAR_EXCEPTION (env->status &= (~EXEC_STATUS_EXCEPTION))
+#define SET_EXCEPTION (env->status |= EXEC_STATUS_EXCEPTION)
+
+/* Maximum number of vector temps in a packet */
+#define VECTOR_TEMPS_MAX 4
+
+struct CPUHexagonState {
+ target_ulong gpr[TOTAL_PER_THREAD_REGS];
+ target_ulong pred[NUM_PREGS];
+ target_ulong branch_taken;
+ target_ulong next_PC;
+
+ /* For comparing with LLDB on target - see adjust_stack_ptrs function */
+ target_ulong last_pc_dumped;
+ target_ulong stack_start;
+
+ uint8_t slot_cancelled;
+ target_ulong new_value[TOTAL_PER_THREAD_REGS];
+
+ /*
+ * Only used when HEX_DEBUG is on, but unconditionally included
+ * to reduce recompile time when turning HEX_DEBUG on/off.
+ */
+ target_ulong this_PC;
+ target_ulong reg_written[TOTAL_PER_THREAD_REGS];
+
+ target_ulong new_pred_value[NUM_PREGS];
+ target_ulong pred_written;
+
+ MemLog mem_log_stores[STORES_MAX];
+ target_ulong pkt_has_store_s1;
+ target_ulong dczero_addr;
+
+ float_status fp_status;
+
+ target_ulong llsc_addr;
+ target_ulong llsc_val;
+ uint64_t llsc_val_i64;
+
+ MMVector VRegs[NUM_VREGS] QEMU_ALIGNED(16);
+ MMVector future_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16);
+ MMVector tmp_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16);
+
+ VRegMask VRegs_updated;
+
+ MMQReg QRegs[NUM_QREGS] QEMU_ALIGNED(16);
+ MMQReg future_QRegs[NUM_QREGS] QEMU_ALIGNED(16);
+ QRegMask QRegs_updated;
+
+ /* Temporaries used within instructions */
+ MMVectorPair VuuV QEMU_ALIGNED(16);
+ MMVectorPair VvvV QEMU_ALIGNED(16);
+ MMVectorPair VxxV QEMU_ALIGNED(16);
+ MMVector vtmp QEMU_ALIGNED(16);
+ MMQReg qtmp QEMU_ALIGNED(16);
+
+ VStoreLog vstore[VSTORES_MAX];
+ target_ulong vstore_pending[VSTORES_MAX];
+ bool vtcm_pending;
+ VTCMStoreLog vtcm_log;
+};
+
+#define HEXAGON_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(HexagonCPUClass, (klass), TYPE_HEXAGON_CPU)
+#define HEXAGON_CPU(obj) \
+ OBJECT_CHECK(HexagonCPU, (obj), TYPE_HEXAGON_CPU)
+#define HEXAGON_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(HexagonCPUClass, (obj), TYPE_HEXAGON_CPU)
+
+typedef struct HexagonCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+} HexagonCPUClass;
+
+typedef struct HexagonCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+ CPUNegativeOffsetState neg;
+ CPUHexagonState env;
+
+ bool lldb_compat;
+ target_ulong lldb_stack_adjust;
+} HexagonCPU;
+
+#include "cpu_bits.h"
+
+static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->gpr[HEX_REG_PC];
+ *cs_base = 0;
+#ifdef CONFIG_USER_ONLY
+ *flags = 0;
+#else
+#error System mode not supported on Hexagon yet
+#endif
+}
+
+static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+#error System mode not supported on Hexagon yet
+#endif
+}
+
+typedef struct CPUHexagonState CPUArchState;
+typedef HexagonCPU ArchCPU;
+
+void hexagon_translate_init(void);
+
+#include "exec/cpu-all.h"
+
+#endif /* HEXAGON_CPU_H */
diff --git a/target/hexagon/cpu_bits.h b/target/hexagon/cpu_bits.h
new file mode 100644
index 000000000..96fef7172
--- /dev/null
+++ b/target/hexagon/cpu_bits.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_CPU_BITS_H
+#define HEXAGON_CPU_BITS_H
+
+#include "qemu/bitops.h"
+
+#define HEX_EXCP_FETCH_NO_UPAGE 0x012
+#define HEX_EXCP_INVALID_PACKET 0x015
+#define HEX_EXCP_INVALID_OPCODE 0x015
+#define HEX_EXCP_PRIV_NO_UREAD 0x024
+#define HEX_EXCP_PRIV_NO_UWRITE 0x025
+
+#define HEX_EXCP_TRAP0 0x172
+
+#define PACKET_WORDS_MAX 4
+
+static inline uint32_t parse_bits(uint32_t encoding)
+{
+ /* The parse bits are [15:14] */
+ return extract32(encoding, 14, 2);
+}
+
+static inline uint32_t iclass_bits(uint32_t encoding)
+{
+ /* The instruction class is encoded in bits [31:28] */
+ uint32_t iclass = extract32(encoding, 28, 4);
+ /* If parse bits are zero, this is a duplex */
+ if (parse_bits(encoding) == 0) {
+ iclass += 16;
+ }
+ return iclass;
+}
+
+static inline bool is_packet_end(uint32_t endocing)
+{
+ uint32_t bits = parse_bits(endocing);
+ return ((bits == 0x3) || (bits == 0x0));
+}
+
+int disassemble_hexagon(uint32_t *words, int nwords, bfd_vma pc, GString *buf);
+
+#endif
diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c
new file mode 100644
index 000000000..6f0f27b4b
--- /dev/null
+++ b/target/hexagon/decode.c
@@ -0,0 +1,980 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "iclass.h"
+#include "attribs.h"
+#include "genptr.h"
+#include "decode.h"
+#include "insn.h"
+#include "printinsn.h"
+#include "mmvec/decode_ext_mmvec.h"
+
+#define fZXTN(N, M, VAL) ((VAL) & ((1LL << (N)) - 1))
+
+enum {
+ EXT_IDX_noext = 0,
+ EXT_IDX_noext_AFTER = 4,
+ EXT_IDX_mmvec = 4,
+ EXT_IDX_mmvec_AFTER = 8,
+ XX_LAST_EXT_IDX
+};
+
+/*
+ * Certain operand types represent a non-contiguous set of values.
+ * For example, the compound compare-and-jump instruction can only access
+ * registers R0-R7 and R16-23.
+ * This table represents the mapping from the encoding to the actual values.
+ */
+
+#define DEF_REGMAP(NAME, ELEMENTS, ...) \
+ static const unsigned int DECODE_REGISTER_##NAME[ELEMENTS] = \
+ { __VA_ARGS__ };
+ /* Name Num Table */
+DEF_REGMAP(R_16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
+DEF_REGMAP(R__8, 8, 0, 2, 4, 6, 16, 18, 20, 22)
+DEF_REGMAP(R_8, 8, 0, 1, 2, 3, 4, 5, 6, 7)
+
+#define DECODE_MAPPED_REG(OPNUM, NAME) \
+ insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]];
+
+typedef struct {
+ const struct DectreeTable *table_link;
+ const struct DectreeTable *table_link_b;
+ Opcode opcode;
+ enum {
+ DECTREE_ENTRY_INVALID,
+ DECTREE_TABLE_LINK,
+ DECTREE_SUBINSNS,
+ DECTREE_EXTSPACE,
+ DECTREE_TERMINAL
+ } type;
+} DectreeEntry;
+
+typedef struct DectreeTable {
+ unsigned int (*lookup_function)(int startbit, int width, uint32_t opcode);
+ unsigned int size;
+ unsigned int startbit;
+ unsigned int width;
+ const DectreeEntry table[];
+} DectreeTable;
+
+#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) \
+ static const DectreeTable dectree_table_##TAG;
+#define TABLE_LINK(TABLE) /* NOTHING */
+#define TERMINAL(TAG, ENC) /* NOTHING */
+#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) /* NOTHING */
+#define EXTSPACE(TAG, ENC) /* NOTHING */
+#define INVALID() /* NOTHING */
+#define DECODE_END_TABLE(...) /* NOTHING */
+#define DECODE_MATCH_INFO(...) /* NOTHING */
+#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */
+#define DECODE_OPINFO(...) /* NOTHING */
+
+#include "dectree_generated.h.inc"
+
+#undef DECODE_OPINFO
+#undef DECODE_MATCH_INFO
+#undef DECODE_LEGACY_MATCH_INFO
+#undef DECODE_END_TABLE
+#undef INVALID
+#undef TERMINAL
+#undef SUBINSNS
+#undef EXTSPACE
+#undef TABLE_LINK
+#undef DECODE_NEW_TABLE
+#undef DECODE_SEPARATOR_BITS
+
+#define DECODE_SEPARATOR_BITS(START, WIDTH) NULL, START, WIDTH
+#define DECODE_NEW_TABLE_HELPER(TAG, SIZE, FN, START, WIDTH) \
+ static const DectreeTable dectree_table_##TAG = { \
+ .size = SIZE, \
+ .lookup_function = FN, \
+ .startbit = START, \
+ .width = WIDTH, \
+ .table = {
+#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) \
+ DECODE_NEW_TABLE_HELPER(TAG, SIZE, WHATNOT)
+
+#define TABLE_LINK(TABLE) \
+ { .type = DECTREE_TABLE_LINK, .table_link = &dectree_table_##TABLE },
+#define TERMINAL(TAG, ENC) \
+ { .type = DECTREE_TERMINAL, .opcode = TAG },
+#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) \
+ { \
+ .type = DECTREE_SUBINSNS, \
+ .table_link = &dectree_table_DECODE_SUBINSN_##CLASSA, \
+ .table_link_b = &dectree_table_DECODE_SUBINSN_##CLASSB \
+ },
+#define EXTSPACE(TAG, ENC) { .type = DECTREE_EXTSPACE },
+#define INVALID() { .type = DECTREE_ENTRY_INVALID, .opcode = XX_LAST_OPCODE },
+
+#define DECODE_END_TABLE(...) } };
+
+#define DECODE_MATCH_INFO(...) /* NOTHING */
+#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */
+#define DECODE_OPINFO(...) /* NOTHING */
+
+#include "dectree_generated.h.inc"
+
+#undef DECODE_OPINFO
+#undef DECODE_MATCH_INFO
+#undef DECODE_LEGACY_MATCH_INFO
+#undef DECODE_END_TABLE
+#undef INVALID
+#undef TERMINAL
+#undef SUBINSNS
+#undef EXTSPACE
+#undef TABLE_LINK
+#undef DECODE_NEW_TABLE
+#undef DECODE_NEW_TABLE_HELPER
+#undef DECODE_SEPARATOR_BITS
+
+static const DectreeTable dectree_table_DECODE_EXT_EXT_noext = {
+ .size = 1, .lookup_function = NULL, .startbit = 0, .width = 0,
+ .table = {
+ { .type = DECTREE_ENTRY_INVALID, .opcode = XX_LAST_OPCODE },
+ }
+};
+
+static const DectreeTable *ext_trees[XX_LAST_EXT_IDX];
+
+static void decode_ext_init(void)
+{
+ int i;
+ for (i = EXT_IDX_noext; i < EXT_IDX_noext_AFTER; i++) {
+ ext_trees[i] = &dectree_table_DECODE_EXT_EXT_noext;
+ }
+ for (i = EXT_IDX_mmvec; i < EXT_IDX_mmvec_AFTER; i++) {
+ ext_trees[i] = &dectree_table_DECODE_EXT_EXT_mmvec;
+ }
+}
+
+typedef struct {
+ uint32_t mask;
+ uint32_t match;
+} DecodeITableEntry;
+
+#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) /* NOTHING */
+#define TABLE_LINK(TABLE) /* NOTHING */
+#define TERMINAL(TAG, ENC) /* NOTHING */
+#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) /* NOTHING */
+#define EXTSPACE(TAG, ENC) /* NOTHING */
+#define INVALID() /* NOTHING */
+#define DECODE_END_TABLE(...) /* NOTHING */
+#define DECODE_OPINFO(...) /* NOTHING */
+
+#define DECODE_MATCH_INFO_NORMAL(TAG, MASK, MATCH) \
+ [TAG] = { \
+ .mask = MASK, \
+ .match = MATCH, \
+ },
+
+#define DECODE_MATCH_INFO_NULL(TAG, MASK, MATCH) \
+ [TAG] = { .match = ~0 },
+
+#define DECODE_MATCH_INFO(...) DECODE_MATCH_INFO_NORMAL(__VA_ARGS__)
+#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */
+
+static const DecodeITableEntry decode_itable[XX_LAST_OPCODE] = {
+#include "dectree_generated.h.inc"
+};
+
+#undef DECODE_MATCH_INFO
+#define DECODE_MATCH_INFO(...) DECODE_MATCH_INFO_NULL(__VA_ARGS__)
+
+#undef DECODE_LEGACY_MATCH_INFO
+#define DECODE_LEGACY_MATCH_INFO(...) DECODE_MATCH_INFO_NORMAL(__VA_ARGS__)
+
+static const DecodeITableEntry decode_legacy_itable[XX_LAST_OPCODE] = {
+#include "dectree_generated.h.inc"
+};
+
+#undef DECODE_OPINFO
+#undef DECODE_MATCH_INFO
+#undef DECODE_LEGACY_MATCH_INFO
+#undef DECODE_END_TABLE
+#undef INVALID
+#undef TERMINAL
+#undef SUBINSNS
+#undef EXTSPACE
+#undef TABLE_LINK
+#undef DECODE_NEW_TABLE
+#undef DECODE_SEPARATOR_BITS
+
+void decode_init(void)
+{
+ decode_ext_init();
+}
+
+void decode_send_insn_to(Packet *packet, int start, int newloc)
+{
+ Insn tmpinsn;
+ int direction;
+ int i;
+ if (start == newloc) {
+ return;
+ }
+ if (start < newloc) {
+ /* Move towards end */
+ direction = 1;
+ } else {
+ /* move towards beginning */
+ direction = -1;
+ }
+ for (i = start; i != newloc; i += direction) {
+ tmpinsn = packet->insn[i];
+ packet->insn[i] = packet->insn[i + direction];
+ packet->insn[i + direction] = tmpinsn;
+ }
+}
+
+/* Fill newvalue registers with the correct regno */
+static void
+decode_fill_newvalue_regno(Packet *packet)
+{
+ int i, use_regidx, offset, def_idx, dst_idx;
+ uint16_t def_opcode, use_opcode;
+ char *dststr;
+
+ for (i = 1; i < packet->num_insns; i++) {
+ if (GET_ATTRIB(packet->insn[i].opcode, A_DOTNEWVALUE) &&
+ !GET_ATTRIB(packet->insn[i].opcode, A_EXTENSION)) {
+ use_opcode = packet->insn[i].opcode;
+
+ /* It's a store, so we're adjusting the Nt field */
+ if (GET_ATTRIB(use_opcode, A_STORE)) {
+ use_regidx = strchr(opcode_reginfo[use_opcode], 't') -
+ opcode_reginfo[use_opcode];
+ } else { /* It's a Jump, so we're adjusting the Ns field */
+ use_regidx = strchr(opcode_reginfo[use_opcode], 's') -
+ opcode_reginfo[use_opcode];
+ }
+
+ /*
+ * What's encoded at the N-field is the offset to who's producing
+ * the value. Shift off the LSB which indicates odd/even register,
+ * then walk backwards and skip over the constant extenders.
+ */
+ offset = packet->insn[i].regno[use_regidx] >> 1;
+ def_idx = i - offset;
+ for (int j = 0; j < offset; j++) {
+ if (GET_ATTRIB(packet->insn[i - j - 1].opcode, A_IT_EXTENDER)) {
+ def_idx--;
+ }
+ }
+
+ /*
+ * Check for a badly encoded N-field which points to an instruction
+ * out-of-range
+ */
+ g_assert(!((def_idx < 0) || (def_idx > (packet->num_insns - 1))));
+
+ /*
+ * packet->insn[def_idx] is the producer
+ * Figure out which type of destination it produces
+ * and the corresponding index in the reginfo
+ */
+ def_opcode = packet->insn[def_idx].opcode;
+ dststr = strstr(opcode_wregs[def_opcode], "Rd");
+ if (dststr) {
+ dststr = strchr(opcode_reginfo[def_opcode], 'd');
+ } else {
+ dststr = strstr(opcode_wregs[def_opcode], "Rx");
+ if (dststr) {
+ dststr = strchr(opcode_reginfo[def_opcode], 'x');
+ } else {
+ dststr = strstr(opcode_wregs[def_opcode], "Re");
+ if (dststr) {
+ dststr = strchr(opcode_reginfo[def_opcode], 'e');
+ } else {
+ dststr = strstr(opcode_wregs[def_opcode], "Ry");
+ if (dststr) {
+ dststr = strchr(opcode_reginfo[def_opcode], 'y');
+ } else {
+ g_assert_not_reached();
+ }
+ }
+ }
+ }
+ g_assert(dststr != NULL);
+
+ /* Now patch up the consumer with the register number */
+ dst_idx = dststr - opcode_reginfo[def_opcode];
+ packet->insn[i].regno[use_regidx] =
+ packet->insn[def_idx].regno[dst_idx];
+ /*
+ * We need to remember who produces this value to later
+ * check if it was dynamically cancelled
+ */
+ packet->insn[i].new_value_producer_slot =
+ packet->insn[def_idx].slot;
+ }
+ }
+}
+
+/* Split CJ into a compare and a jump */
+static void decode_split_cmpjump(Packet *pkt)
+{
+ int last, i;
+ int numinsns = pkt->num_insns;
+
+ /*
+ * First, split all compare-jumps.
+ * The compare is sent to the end as a new instruction.
+ * Do it this way so we don't reorder dual jumps. Those need to stay in
+ * original order.
+ */
+ for (i = 0; i < numinsns; i++) {
+ /* It's a cmp-jump */
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_NEWCMPJUMP)) {
+ last = pkt->num_insns;
+ pkt->insn[last] = pkt->insn[i]; /* copy the instruction */
+ pkt->insn[last].part1 = true; /* last insn does the CMP */
+ pkt->insn[i].part1 = false; /* existing insn does the JUMP */
+ pkt->num_insns++;
+ }
+ }
+
+ /* Now re-shuffle all the compares back to the beginning */
+ for (i = 0; i < pkt->num_insns; i++) {
+ if (pkt->insn[i].part1) {
+ decode_send_insn_to(pkt, i, 0);
+ }
+ }
+}
+
+static bool decode_opcode_can_jump(int opcode)
+{
+ if ((GET_ATTRIB(opcode, A_JUMP)) ||
+ (GET_ATTRIB(opcode, A_CALL)) ||
+ (opcode == J2_trap0) ||
+ (opcode == J2_pause)) {
+ /* Exception to A_JUMP attribute */
+ if (opcode == J4_hintjumpr) {
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static bool decode_opcode_ends_loop(int opcode)
+{
+ return GET_ATTRIB(opcode, A_HWLOOP0_END) ||
+ GET_ATTRIB(opcode, A_HWLOOP1_END);
+}
+
+/* Set the is_* fields in each instruction */
+static void decode_set_insn_attr_fields(Packet *pkt)
+{
+ int i;
+ int numinsns = pkt->num_insns;
+ uint16_t opcode;
+
+ pkt->pkt_has_cof = false;
+ pkt->pkt_has_endloop = false;
+ pkt->pkt_has_dczeroa = false;
+
+ for (i = 0; i < numinsns; i++) {
+ opcode = pkt->insn[i].opcode;
+ if (pkt->insn[i].part1) {
+ continue; /* Skip compare of cmp-jumps */
+ }
+
+ if (GET_ATTRIB(opcode, A_DCZEROA)) {
+ pkt->pkt_has_dczeroa = true;
+ }
+
+ if (GET_ATTRIB(opcode, A_STORE)) {
+ if (pkt->insn[i].slot == 0) {
+ pkt->pkt_has_store_s0 = true;
+ } else {
+ pkt->pkt_has_store_s1 = true;
+ }
+ }
+
+ pkt->pkt_has_cof |= decode_opcode_can_jump(opcode);
+
+ pkt->insn[i].is_endloop = decode_opcode_ends_loop(opcode);
+
+ pkt->pkt_has_endloop |= pkt->insn[i].is_endloop;
+
+ pkt->pkt_has_cof |= pkt->pkt_has_endloop;
+ }
+}
+
+/*
+ * Shuffle for execution
+ * Move stores to end (in same order as encoding)
+ * Move compares to beginning (for use by .new insns)
+ */
+static void decode_shuffle_for_execution(Packet *packet)
+{
+ bool changed = false;
+ int i;
+ bool flag; /* flag means we've seen a non-memory instruction */
+ int n_mems;
+ int last_insn = packet->num_insns - 1;
+
+ /*
+ * Skip end loops, somehow an end loop is getting in and messing
+ * up the order
+ */
+ if (decode_opcode_ends_loop(packet->insn[last_insn].opcode)) {
+ last_insn--;
+ }
+
+ do {
+ changed = false;
+ /*
+ * Stores go last, must not reorder.
+ * Cannot shuffle stores past loads, either.
+ * Iterate backwards. If we see a non-memory instruction,
+ * then a store, shuffle the store to the front. Don't shuffle
+ * stores wrt each other or a load.
+ */
+ for (flag = false, n_mems = 0, i = last_insn; i >= 0; i--) {
+ int opcode = packet->insn[i].opcode;
+
+ if (flag && GET_ATTRIB(opcode, A_STORE)) {
+ decode_send_insn_to(packet, i, last_insn - n_mems);
+ n_mems++;
+ changed = true;
+ } else if (GET_ATTRIB(opcode, A_STORE)) {
+ n_mems++;
+ } else if (GET_ATTRIB(opcode, A_LOAD)) {
+ /*
+ * Don't set flag, since we don't want to shuffle a
+ * store past a load
+ */
+ n_mems++;
+ } else if (GET_ATTRIB(opcode, A_DOTNEWVALUE)) {
+ /*
+ * Don't set flag, since we don't want to shuffle past
+ * a .new value
+ */
+ } else {
+ flag = true;
+ }
+ }
+
+ if (changed) {
+ continue;
+ }
+ /* Compares go first, may be reordered wrt each other */
+ for (flag = false, i = 0; i < last_insn + 1; i++) {
+ int opcode = packet->insn[i].opcode;
+
+ if ((strstr(opcode_wregs[opcode], "Pd4") ||
+ strstr(opcode_wregs[opcode], "Pe4")) &&
+ GET_ATTRIB(opcode, A_STORE) == 0) {
+ /* This should be a compare (not a store conditional) */
+ if (flag) {
+ decode_send_insn_to(packet, i, 0);
+ changed = true;
+ continue;
+ }
+ } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P3) &&
+ !decode_opcode_ends_loop(packet->insn[i].opcode)) {
+ /*
+ * spNloop instruction
+ * Don't reorder endloops; they are not valid for .new uses,
+ * and we want to match HW
+ */
+ if (flag) {
+ decode_send_insn_to(packet, i, 0);
+ changed = true;
+ continue;
+ }
+ } else if (GET_ATTRIB(opcode, A_IMPLICIT_WRITES_P0) &&
+ !GET_ATTRIB(opcode, A_NEWCMPJUMP)) {
+ if (flag) {
+ decode_send_insn_to(packet, i, 0);
+ changed = true;
+ continue;
+ }
+ } else {
+ flag = true;
+ }
+ }
+ if (changed) {
+ continue;
+ }
+ } while (changed);
+
+ /*
+ * If we have a .new register compare/branch, move that to the very
+ * very end, past stores
+ */
+ for (i = 0; i < last_insn; i++) {
+ if (GET_ATTRIB(packet->insn[i].opcode, A_DOTNEWVALUE)) {
+ decode_send_insn_to(packet, i, last_insn);
+ break;
+ }
+ }
+}
+
+static void
+apply_extender(Packet *pkt, int i, uint32_t extender)
+{
+ int immed_num;
+ uint32_t base_immed;
+
+ immed_num = opcode_which_immediate_is_extended(pkt->insn[i].opcode);
+ base_immed = pkt->insn[i].immed[immed_num];
+
+ pkt->insn[i].immed[immed_num] = extender | fZXTN(6, 32, base_immed);
+}
+
+static void decode_apply_extenders(Packet *packet)
+{
+ int i;
+ for (i = 0; i < packet->num_insns; i++) {
+ if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) {
+ packet->insn[i + 1].extension_valid = true;
+ apply_extender(packet, i + 1, packet->insn[i].immed[0]);
+ }
+ }
+}
+
+static void decode_remove_extenders(Packet *packet)
+{
+ int i, j;
+ for (i = 0; i < packet->num_insns; i++) {
+ if (GET_ATTRIB(packet->insn[i].opcode, A_IT_EXTENDER)) {
+ /* Remove this one by moving the remaining instructions down */
+ for (j = i;
+ (j < packet->num_insns - 1) && (j < INSTRUCTIONS_MAX - 1);
+ j++) {
+ packet->insn[j] = packet->insn[j + 1];
+ }
+ packet->num_insns--;
+ }
+ }
+}
+
+static SlotMask get_valid_slots(const Packet *pkt, unsigned int slot)
+{
+ if (GET_ATTRIB(pkt->insn[slot].opcode, A_EXTENSION)) {
+ return mmvec_ext_decode_find_iclass_slots(pkt->insn[slot].opcode);
+ } else {
+ return find_iclass_slots(pkt->insn[slot].opcode,
+ pkt->insn[slot].iclass);
+ }
+}
+
+#define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) /* NOTHING */
+#define TABLE_LINK(TABLE) /* NOTHING */
+#define TERMINAL(TAG, ENC) /* NOTHING */
+#define SUBINSNS(TAG, CLASSA, CLASSB, ENC) /* NOTHING */
+#define EXTSPACE(TAG, ENC) /* NOTHING */
+#define INVALID() /* NOTHING */
+#define DECODE_END_TABLE(...) /* NOTHING */
+#define DECODE_MATCH_INFO(...) /* NOTHING */
+#define DECODE_LEGACY_MATCH_INFO(...) /* NOTHING */
+
+#define DECODE_REG(REGNO, WIDTH, STARTBIT) \
+ insn->regno[REGNO] = ((encoding >> STARTBIT) & ((1 << WIDTH) - 1));
+
+#define DECODE_IMPL_REG(REGNO, VAL) \
+ insn->regno[REGNO] = VAL;
+
+#define DECODE_IMM(IMMNO, WIDTH, STARTBIT, VALSTART) \
+ insn->immed[IMMNO] |= (((encoding >> STARTBIT) & ((1 << WIDTH) - 1))) << \
+ (VALSTART);
+
+#define DECODE_IMM_SXT(IMMNO, WIDTH) \
+ insn->immed[IMMNO] = ((((int32_t)insn->immed[IMMNO]) << (32 - WIDTH)) >> \
+ (32 - WIDTH));
+
+#define DECODE_IMM_NEG(IMMNO, WIDTH) \
+ insn->immed[IMMNO] = -insn->immed[IMMNO];
+
+#define DECODE_IMM_SHIFT(IMMNO, SHAMT) \
+ if ((!insn->extension_valid) || \
+ (insn->which_extended != IMMNO)) { \
+ insn->immed[IMMNO] <<= SHAMT; \
+ }
+
+#define DECODE_OPINFO(TAG, BEH) \
+ case TAG: \
+ { BEH } \
+ break; \
+
+/*
+ * Fill in the operands of the instruction
+ * dectree_generated.h.inc has a DECODE_OPINFO entry for each opcode
+ * For example,
+ * DECODE_OPINFO(A2_addi,
+ * DECODE_REG(0,5,0)
+ * DECODE_REG(1,5,16)
+ * DECODE_IMM(0,7,21,9)
+ * DECODE_IMM(0,9,5,0)
+ * DECODE_IMM_SXT(0,16)
+ * with the macros defined above, we'll fill in a switch statement
+ * where each case is an opcode tag.
+ */
+static void
+decode_op(Insn *insn, Opcode tag, uint32_t encoding)
+{
+ insn->immed[0] = 0;
+ insn->immed[1] = 0;
+ insn->opcode = tag;
+ if (insn->extension_valid) {
+ insn->which_extended = opcode_which_immediate_is_extended(tag);
+ }
+
+ switch (tag) {
+#include "dectree_generated.h.inc"
+ default:
+ break;
+ }
+
+ insn->generate = opcode_genptr[tag];
+
+ insn->iclass = iclass_bits(encoding);
+}
+
+#undef DECODE_REG
+#undef DECODE_IMPL_REG
+#undef DECODE_IMM
+#undef DECODE_IMM_SHIFT
+#undef DECODE_OPINFO
+#undef DECODE_MATCH_INFO
+#undef DECODE_LEGACY_MATCH_INFO
+#undef DECODE_END_TABLE
+#undef INVALID
+#undef TERMINAL
+#undef SUBINSNS
+#undef EXTSPACE
+#undef TABLE_LINK
+#undef DECODE_NEW_TABLE
+#undef DECODE_SEPARATOR_BITS
+
+static unsigned int
+decode_subinsn_tablewalk(Insn *insn, const DectreeTable *table,
+ uint32_t encoding)
+{
+ unsigned int i;
+ Opcode opc;
+ if (table->lookup_function) {
+ i = table->lookup_function(table->startbit, table->width, encoding);
+ } else {
+ i = extract32(encoding, table->startbit, table->width);
+ }
+ if (table->table[i].type == DECTREE_TABLE_LINK) {
+ return decode_subinsn_tablewalk(insn, table->table[i].table_link,
+ encoding);
+ } else if (table->table[i].type == DECTREE_TERMINAL) {
+ opc = table->table[i].opcode;
+ if ((encoding & decode_itable[opc].mask) != decode_itable[opc].match) {
+ return 0;
+ }
+ decode_op(insn, opc, encoding);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static unsigned int get_insn_a(uint32_t encoding)
+{
+ return extract32(encoding, 0, 13);
+}
+
+static unsigned int get_insn_b(uint32_t encoding)
+{
+ return extract32(encoding, 16, 13);
+}
+
+static unsigned int
+decode_insns_tablewalk(Insn *insn, const DectreeTable *table,
+ uint32_t encoding)
+{
+ unsigned int i;
+ unsigned int a, b;
+ Opcode opc;
+ if (table->lookup_function) {
+ i = table->lookup_function(table->startbit, table->width, encoding);
+ } else {
+ i = extract32(encoding, table->startbit, table->width);
+ }
+ if (table->table[i].type == DECTREE_TABLE_LINK) {
+ return decode_insns_tablewalk(insn, table->table[i].table_link,
+ encoding);
+ } else if (table->table[i].type == DECTREE_SUBINSNS) {
+ a = get_insn_a(encoding);
+ b = get_insn_b(encoding);
+ b = decode_subinsn_tablewalk(insn, table->table[i].table_link_b, b);
+ a = decode_subinsn_tablewalk(insn + 1, table->table[i].table_link, a);
+ if ((a == 0) || (b == 0)) {
+ return 0;
+ }
+ return 2;
+ } else if (table->table[i].type == DECTREE_TERMINAL) {
+ opc = table->table[i].opcode;
+ if ((encoding & decode_itable[opc].mask) != decode_itable[opc].match) {
+ if ((encoding & decode_legacy_itable[opc].mask) !=
+ decode_legacy_itable[opc].match) {
+ return 0;
+ }
+ }
+ decode_op(insn, opc, encoding);
+ return 1;
+ } else if (table->table[i].type == DECTREE_EXTSPACE) {
+ /*
+ * For now, HVX will be the only coproc
+ */
+ return decode_insns_tablewalk(insn, ext_trees[EXT_IDX_mmvec], encoding);
+ } else {
+ return 0;
+ }
+}
+
+static unsigned int
+decode_insns(Insn *insn, uint32_t encoding)
+{
+ const DectreeTable *table;
+ if (parse_bits(encoding) != 0) {
+ /* Start with PP table - 32 bit instructions */
+ table = &dectree_table_DECODE_ROOT_32;
+ } else {
+ /* start with EE table - duplex instructions */
+ table = &dectree_table_DECODE_ROOT_EE;
+ }
+ return decode_insns_tablewalk(insn, table, encoding);
+}
+
+static void decode_add_endloop_insn(Insn *insn, int loopnum)
+{
+ if (loopnum == 10) {
+ insn->opcode = J2_endloop01;
+ insn->generate = opcode_genptr[J2_endloop01];
+ } else if (loopnum == 1) {
+ insn->opcode = J2_endloop1;
+ insn->generate = opcode_genptr[J2_endloop1];
+ } else if (loopnum == 0) {
+ insn->opcode = J2_endloop0;
+ insn->generate = opcode_genptr[J2_endloop0];
+ } else {
+ g_assert_not_reached();
+ }
+}
+
+static bool decode_parsebits_is_loopend(uint32_t encoding32)
+{
+ uint32_t bits = parse_bits(encoding32);
+ return bits == 0x2;
+}
+
+static void
+decode_set_slot_number(Packet *pkt)
+{
+ int slot;
+ int i;
+ bool hit_mem_insn = false;
+ bool hit_duplex = false;
+ bool slot0_found = false;
+ bool slot1_found = false;
+ int slot1_iidx = 0;
+
+ /*
+ * The slots are encoded in reverse order
+ * For each instruction, count down until you find a suitable slot
+ */
+ for (i = 0, slot = 3; i < pkt->num_insns; i++) {
+ SlotMask valid_slots = get_valid_slots(pkt, i);
+
+ while (!(valid_slots & (1 << slot))) {
+ slot--;
+ }
+ pkt->insn[i].slot = slot;
+ if (slot) {
+ /* I've assigned the slot, now decrement it for the next insn */
+ slot--;
+ }
+ }
+
+ /* Fix the exceptions - mem insns to slot 0,1 */
+ for (i = pkt->num_insns - 1; i >= 0; i--) {
+ /* First memory instruction always goes to slot 0 */
+ if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) ||
+ GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) &&
+ !hit_mem_insn) {
+ hit_mem_insn = true;
+ pkt->insn[i].slot = 0;
+ continue;
+ }
+
+ /* Next memory instruction always goes to slot 1 */
+ if ((GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE) ||
+ GET_ATTRIB(pkt->insn[i].opcode, A_MEMLIKE_PACKET_RULES)) &&
+ hit_mem_insn) {
+ pkt->insn[i].slot = 1;
+ }
+ }
+
+ /* Fix the exceptions - duplex always slot 0,1 */
+ for (i = pkt->num_insns - 1; i >= 0; i--) {
+ /* First subinsn always goes to slot 0 */
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && !hit_duplex) {
+ hit_duplex = true;
+ pkt->insn[i].slot = 0;
+ continue;
+ }
+
+ /* Next subinsn always goes to slot 1 */
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN) && hit_duplex) {
+ pkt->insn[i].slot = 1;
+ }
+ }
+
+ /* Fix the exceptions - slot 1 is never empty, always aligns to slot 0 */
+ for (i = pkt->num_insns - 1; i >= 0; i--) {
+ /* Is slot0 used? */
+ if (pkt->insn[i].slot == 0) {
+ bool is_endloop = (pkt->insn[i].opcode == J2_endloop01);
+ is_endloop |= (pkt->insn[i].opcode == J2_endloop0);
+ is_endloop |= (pkt->insn[i].opcode == J2_endloop1);
+
+ /*
+ * Make sure it's not endloop since, we're overloading
+ * slot0 for endloop
+ */
+ if (!is_endloop) {
+ slot0_found = true;
+ }
+ }
+ /* Is slot1 used? */
+ if (pkt->insn[i].slot == 1) {
+ slot1_found = true;
+ slot1_iidx = i;
+ }
+ }
+ /* Is slot0 empty and slot1 used? */
+ if ((!slot0_found) && slot1_found) {
+ /* Then push it to slot0 */
+ pkt->insn[slot1_iidx].slot = 0;
+ }
+}
+
+/*
+ * decode_packet
+ * Decodes packet with given words
+ * Returns 0 on insufficient words,
+ * or number of words used on success
+ */
+
+int decode_packet(int max_words, const uint32_t *words, Packet *pkt,
+ bool disas_only)
+{
+ int num_insns = 0;
+ int words_read = 0;
+ bool end_of_packet = false;
+ int new_insns = 0;
+ int i;
+ uint32_t encoding32;
+
+ /* Initialize */
+ memset(pkt, 0, sizeof(*pkt));
+ /* Try to build packet */
+ while (!end_of_packet && (words_read < max_words)) {
+ encoding32 = words[words_read];
+ end_of_packet = is_packet_end(encoding32);
+ new_insns = decode_insns(&pkt->insn[num_insns], encoding32);
+ g_assert(new_insns > 0);
+ /*
+ * If we saw an extender, mark next word extended so immediate
+ * decode works
+ */
+ if (pkt->insn[num_insns].opcode == A4_ext) {
+ pkt->insn[num_insns + 1].extension_valid = true;
+ }
+ num_insns += new_insns;
+ words_read++;
+ }
+
+ pkt->num_insns = num_insns;
+ if (!end_of_packet) {
+ /* Ran out of words! */
+ return 0;
+ }
+ pkt->encod_pkt_size_in_bytes = words_read * 4;
+ pkt->pkt_has_hvx = false;
+ for (i = 0; i < num_insns; i++) {
+ pkt->pkt_has_hvx |=
+ GET_ATTRIB(pkt->insn[i].opcode, A_CVI);
+ }
+
+ /*
+ * Check for :endloop in the parse bits
+ * Section 10.6 of the Programmer's Reference describes the encoding
+ * The end of hardware loop 0 can be encoded with 2 words
+ * The end of hardware loop 1 needs 3 words
+ */
+ if ((words_read == 2) && (decode_parsebits_is_loopend(words[0]))) {
+ decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0);
+ }
+ if (words_read >= 3) {
+ bool has_loop0, has_loop1;
+ has_loop0 = decode_parsebits_is_loopend(words[0]);
+ has_loop1 = decode_parsebits_is_loopend(words[1]);
+ if (has_loop0 && has_loop1) {
+ decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 10);
+ } else if (has_loop1) {
+ decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 1);
+ } else if (has_loop0) {
+ decode_add_endloop_insn(&pkt->insn[pkt->num_insns++], 0);
+ }
+ }
+
+ decode_apply_extenders(pkt);
+ if (!disas_only) {
+ decode_remove_extenders(pkt);
+ }
+ decode_set_slot_number(pkt);
+ decode_fill_newvalue_regno(pkt);
+
+ if (pkt->pkt_has_hvx) {
+ mmvec_ext_decode_checks(pkt, disas_only);
+ }
+
+ if (!disas_only) {
+ decode_shuffle_for_execution(pkt);
+ decode_split_cmpjump(pkt);
+ decode_set_insn_attr_fields(pkt);
+ }
+
+ return words_read;
+}
+
+/* Used for "-d in_asm" logging */
+int disassemble_hexagon(uint32_t *words, int nwords, bfd_vma pc,
+ GString *buf)
+{
+ Packet pkt;
+
+ if (decode_packet(nwords, words, &pkt, true) > 0) {
+ snprint_a_pkt_disas(buf, &pkt, words, pc);
+ return pkt.encod_pkt_size_in_bytes;
+ } else {
+ g_string_assign(buf, "<invalid>");
+ return 0;
+ }
+}
diff --git a/target/hexagon/decode.h b/target/hexagon/decode.h
new file mode 100644
index 000000000..c66f5ea64
--- /dev/null
+++ b/target/hexagon/decode.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_DECODE_H
+#define HEXAGON_DECODE_H
+
+#include "cpu.h"
+#include "opcodes.h"
+#include "insn.h"
+
+void decode_init(void);
+
+void decode_send_insn_to(Packet *packet, int start, int newloc);
+
+int decode_packet(int max_words, const uint32_t *words, Packet *pkt,
+ bool disas_only);
+
+#endif
diff --git a/target/hexagon/dectree.py b/target/hexagon/dectree.py
new file mode 100755
index 000000000..29467ec7d
--- /dev/null
+++ b/target/hexagon/dectree.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import io
+import re
+
+import sys
+import iset
+
+encs = {tag : ''.join(reversed(iset.iset[tag]['enc'].replace(' ', '')))
+ for tag in iset.tags if iset.iset[tag]['enc'] != 'MISSING ENCODING'}
+
+enc_classes = set([iset.iset[tag]['enc_class'] for tag in encs.keys()])
+subinsn_enc_classes = \
+ set([enc_class for enc_class in enc_classes \
+ if enc_class.startswith('SUBINSN_')])
+ext_enc_classes = \
+ set([enc_class for enc_class in enc_classes \
+ if enc_class not in ('NORMAL', '16BIT') and \
+ not enc_class.startswith('SUBINSN_')])
+
+try:
+ subinsn_groupings = iset.subinsn_groupings
+except AttributeError:
+ subinsn_groupings = {}
+
+for (tag, subinsn_grouping) in subinsn_groupings.items():
+ encs[tag] = ''.join(reversed(subinsn_grouping['enc'].replace(' ', '')))
+
+dectree_normal = {'leaves' : set()}
+dectree_16bit = {'leaves' : set()}
+dectree_subinsn_groupings = {'leaves' : set()}
+dectree_subinsns = {name : {'leaves' : set()} for name in subinsn_enc_classes}
+dectree_extensions = {name : {'leaves' : set()} for name in ext_enc_classes}
+
+for tag in encs.keys():
+ if tag in subinsn_groupings:
+ dectree_subinsn_groupings['leaves'].add(tag)
+ continue
+ enc_class = iset.iset[tag]['enc_class']
+ if enc_class.startswith('SUBINSN_'):
+ if len(encs[tag]) != 32:
+ encs[tag] = encs[tag] + '0' * (32 - len(encs[tag]))
+ dectree_subinsns[enc_class]['leaves'].add(tag)
+ elif enc_class == '16BIT':
+ if len(encs[tag]) != 16:
+ raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' +
+ 'width of 16 bits!'.format(tag, enc_class))
+ dectree_16bit['leaves'].add(tag)
+ else:
+ if len(encs[tag]) != 32:
+ raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' +
+ 'width of 32 bits!'.format(tag, enc_class))
+ if enc_class == 'NORMAL':
+ dectree_normal['leaves'].add(tag)
+ else:
+ dectree_extensions[enc_class]['leaves'].add(tag)
+
+faketags = set()
+for (tag, enc) in iset.enc_ext_spaces.items():
+ faketags.add(tag)
+ encs[tag] = ''.join(reversed(enc.replace(' ', '')))
+ dectree_normal['leaves'].add(tag)
+
+faketags |= set(subinsn_groupings.keys())
+
+def every_bit_counts(bitset):
+ for i in range(1, len(next(iter(bitset)))):
+ if len(set([bits[:i] + bits[i+1:] for bits in bitset])) == len(bitset):
+ return False
+ return True
+
+def auto_separate(node):
+ tags = node['leaves']
+ if len(tags) <= 1:
+ return
+ enc_width = len(encs[next(iter(tags))])
+ opcode_bit_for_all = \
+ [all([encs[tag][i] in '01' \
+ for tag in tags]) for i in range(enc_width)]
+ opcode_bit_is_0_for_all = \
+ [opcode_bit_for_all[i] and all([encs[tag][i] == '0' \
+ for tag in tags]) for i in range(enc_width)]
+ opcode_bit_is_1_for_all = \
+ [opcode_bit_for_all[i] and all([encs[tag][i] == '1' \
+ for tag in tags]) for i in range(enc_width)]
+ differentiator_opcode_bit = \
+ [opcode_bit_for_all[i] and \
+ not (opcode_bit_is_0_for_all[i] or \
+ opcode_bit_is_1_for_all[i]) \
+ for i in range(enc_width)]
+ best_width = 0
+ for width in range(4, 0, -1):
+ for lsb in range(enc_width - width, -1, -1):
+ bitset = set([encs[tag][lsb:lsb+width] for tag in tags])
+ if all(differentiator_opcode_bit[lsb:lsb+width]) and \
+ (len(bitset) == len(tags) or every_bit_counts(bitset)):
+ best_width = width
+ best_lsb = lsb
+ caught_all_tags = len(bitset) == len(tags)
+ break
+ if best_width != 0:
+ break
+ if best_width == 0:
+ raise Exception('Could not find a way to differentiate the encodings ' +
+ 'of the following tags:\n{}'.format('\n'.join(tags)))
+ if caught_all_tags:
+ for width in range(1, best_width):
+ for lsb in range(enc_width - width, -1, -1):
+ bitset = set([encs[tag][lsb:lsb+width] for tag in tags])
+ if all(differentiator_opcode_bit[lsb:lsb+width]) and \
+ len(bitset) == len(tags):
+ best_width = width
+ best_lsb = lsb
+ break
+ else:
+ continue
+ break
+ node['separator_lsb'] = best_lsb
+ node['separator_width'] = best_width
+ node['children'] = []
+ for value in range(2 ** best_width):
+ child = {}
+ bits = ''.join(reversed('{:0{}b}'.format(value, best_width)))
+ child['leaves'] = \
+ set([tag for tag in tags \
+ if encs[tag][best_lsb:best_lsb+best_width] == bits])
+ node['children'].append(child)
+ for child in node['children']:
+ auto_separate(child)
+
+auto_separate(dectree_normal)
+auto_separate(dectree_16bit)
+if subinsn_groupings:
+ auto_separate(dectree_subinsn_groupings)
+for dectree_subinsn in dectree_subinsns.values():
+ auto_separate(dectree_subinsn)
+for dectree_ext in dectree_extensions.values():
+ auto_separate(dectree_ext)
+
+for tag in faketags:
+ del encs[tag]
+
+def table_name(parents, node):
+ path = parents + [node]
+ root = path[0]
+ tag = next(iter(node['leaves']))
+ if tag in subinsn_groupings:
+ enc_width = len(subinsn_groupings[tag]['enc'].replace(' ', ''))
+ else:
+ tag = next(iter(node['leaves'] - faketags))
+ enc_width = len(encs[tag])
+ determining_bits = ['_'] * enc_width
+ for (parent, child) in zip(path[:-1], path[1:]):
+ lsb = parent['separator_lsb']
+ width = parent['separator_width']
+ value = parent['children'].index(child)
+ determining_bits[lsb:lsb+width] = \
+ list(reversed('{:0{}b}'.format(value, width)))
+ if tag in subinsn_groupings:
+ name = 'DECODE_ROOT_EE'
+ else:
+ enc_class = iset.iset[tag]['enc_class']
+ if enc_class in ext_enc_classes:
+ name = 'DECODE_EXT_{}'.format(enc_class)
+ elif enc_class in subinsn_enc_classes:
+ name = 'DECODE_SUBINSN_{}'.format(enc_class)
+ else:
+ name = 'DECODE_ROOT_{}'.format(enc_width)
+ if node != root:
+ name += '_' + ''.join(reversed(determining_bits))
+ return name
+
+def print_node(f, node, parents):
+ if len(node['leaves']) <= 1:
+ return
+ name = table_name(parents, node)
+ lsb = node['separator_lsb']
+ width = node['separator_width']
+ print('DECODE_NEW_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\
+ format(name, 2 ** width, lsb, width), file=f)
+ for child in node['children']:
+ if len(child['leaves']) == 0:
+ print('INVALID()', file=f)
+ elif len(child['leaves']) == 1:
+ (tag,) = child['leaves']
+ if tag in subinsn_groupings:
+ class_a = subinsn_groupings[tag]['class_a']
+ class_b = subinsn_groupings[tag]['class_b']
+ enc = subinsn_groupings[tag]['enc'].replace(' ', '')
+ if 'RESERVED' in tag:
+ print('INVALID()', file=f)
+ else:
+ print('SUBINSNS({},{},{},"{}")'.\
+ format(tag, class_a, class_b, enc), file=f)
+ elif tag in iset.enc_ext_spaces:
+ enc = iset.enc_ext_spaces[tag].replace(' ', '')
+ print('EXTSPACE({},"{}")'.format(tag, enc), file=f)
+ else:
+ enc = ''.join(reversed(encs[tag]))
+ print('TERMINAL({},"{}")'.format(tag, enc), file=f)
+ else:
+ print('TABLE_LINK({})'.format(table_name(parents + [node], child)),
+ file=f)
+ print('DECODE_END_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\
+ format(name, 2 ** width, lsb, width), file=f)
+ print(file=f)
+ parents.append(node)
+ for child in node['children']:
+ print_node(f, child, parents)
+ parents.pop()
+
+def print_tree(f, tree):
+ print_node(f, tree, [])
+
+def print_match_info(f):
+ for tag in sorted(encs.keys(), key=iset.tags.index):
+ enc = ''.join(reversed(encs[tag]))
+ mask = int(re.sub(r'[^1]', r'0', enc.replace('0', '1')), 2)
+ match = int(re.sub(r'[^01]', r'0', enc), 2)
+ suffix = ''
+ print('DECODE{}_MATCH_INFO({},0x{:x}U,0x{:x}U)'.\
+ format(suffix, tag, mask, match), file=f)
+
+regre = re.compile(
+ r'((?<!DUP)[MNORCPQXSGVZA])([stuvwxyzdefg]+)([.]?[LlHh]?)(\d+S?)')
+immre = re.compile(r'[#]([rRsSuUm])(\d+)(?:[:](\d+))?')
+
+def ordered_unique(l):
+ return sorted(set(l), key=l.index)
+
+implicit_registers = {
+ 'SP' : 29,
+ 'FP' : 30,
+ 'LR' : 31
+}
+
+num_registers = {
+ 'R' : 32,
+ 'V' : 32
+}
+
+def print_op_info(f):
+ for tag in sorted(encs.keys(), key=iset.tags.index):
+ enc = encs[tag]
+ print(file=f)
+ print('DECODE_OPINFO({},'.format(tag), file=f)
+ regs = ordered_unique(regre.findall(iset.iset[tag]['syntax']))
+ imms = ordered_unique(immre.findall(iset.iset[tag]['syntax']))
+ regno = 0
+ for reg in regs:
+ reg_type = reg[0]
+ reg_letter = reg[1][0]
+ reg_num_choices = int(reg[3].rstrip('S'))
+ reg_mapping = reg[0] + ''.join(['_' for letter in reg[1]]) + reg[3]
+ reg_enc_fields = re.findall(reg_letter + '+', enc)
+ if len(reg_enc_fields) == 0:
+ raise Exception('Tag "{}" missing register field!'.format(tag))
+ if len(reg_enc_fields) > 1:
+ raise Exception('Tag "{}" has split register field!'.\
+ format(tag))
+ reg_enc_field = reg_enc_fields[0]
+ if 2 ** len(reg_enc_field) != reg_num_choices:
+ raise Exception('Tag "{}" has incorrect register field width!'.\
+ format(tag))
+ print(' DECODE_REG({},{},{})'.\
+ format(regno, len(reg_enc_field), enc.index(reg_enc_field)),
+ file=f)
+ if reg_type in num_registers and \
+ reg_num_choices != num_registers[reg_type]:
+ print(' DECODE_MAPPED_REG({},{})'.\
+ format(regno, reg_mapping), file=f)
+ regno += 1
+ def implicit_register_key(reg):
+ return implicit_registers[reg]
+ for reg in sorted(
+ set([r for r in (iset.iset[tag]['rregs'].split(',') + \
+ iset.iset[tag]['wregs'].split(',')) \
+ if r in implicit_registers]), key=implicit_register_key):
+ print(' DECODE_IMPL_REG({},{})'.\
+ format(regno, implicit_registers[reg]), file=f)
+ regno += 1
+ if imms and imms[0][0].isupper():
+ imms = reversed(imms)
+ for imm in imms:
+ if imm[0].isupper():
+ immno = 1
+ else:
+ immno = 0
+ imm_type = imm[0]
+ imm_width = int(imm[1])
+ imm_shift = imm[2]
+ if imm_shift:
+ imm_shift = int(imm_shift)
+ else:
+ imm_shift = 0
+ if imm_type.islower():
+ imm_letter = 'i'
+ else:
+ imm_letter = 'I'
+ remainder = imm_width
+ for m in reversed(list(re.finditer(imm_letter + '+', enc))):
+ remainder -= m.end() - m.start()
+ print(' DECODE_IMM({},{},{},{})'.\
+ format(immno, m.end() - m.start(), m.start(), remainder),
+ file=f)
+ if remainder != 0:
+ if imm[2]:
+ imm[2] = ':' + imm[2]
+ raise Exception('Tag "{}" has an incorrect number of ' + \
+ 'encoding bits for immediate "{}"'.\
+ format(tag, ''.join(imm)))
+ if imm_type.lower() in 'sr':
+ print(' DECODE_IMM_SXT({},{})'.\
+ format(immno, imm_width), file=f)
+ if imm_type.lower() == 'n':
+ print(' DECODE_IMM_NEG({},{})'.\
+ format(immno, imm_width), file=f)
+ if imm_shift:
+ print(' DECODE_IMM_SHIFT({},{})'.\
+ format(immno, imm_shift), file=f)
+ print(')', file=f)
+
+if __name__ == '__main__':
+ with open(sys.argv[1], 'w') as f:
+ print_tree(f, dectree_normal)
+ print_tree(f, dectree_16bit)
+ if subinsn_groupings:
+ print_tree(f, dectree_subinsn_groupings)
+ for (name, dectree_subinsn) in sorted(dectree_subinsns.items()):
+ print_tree(f, dectree_subinsn)
+ for (name, dectree_ext) in sorted(dectree_extensions.items()):
+ print_tree(f, dectree_ext)
+ print_match_info(f)
+ print_op_info(f)
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
new file mode 100644
index 000000000..d3b45d494
--- /dev/null
+++ b/target/hexagon/fma_emu.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/int128.h"
+#include "fpu/softfloat.h"
+#include "macros.h"
+#include "fma_emu.h"
+
+#define DF_INF_EXP 0x7ff
+#define DF_BIAS 1023
+#define DF_MANTBITS 52
+#define DF_NAN 0xffffffffffffffffULL
+#define DF_INF 0x7ff0000000000000ULL
+#define DF_MINUS_INF 0xfff0000000000000ULL
+#define DF_MAXF 0x7fefffffffffffffULL
+#define DF_MINUS_MAXF 0xffefffffffffffffULL
+
+#define SF_INF_EXP 0xff
+#define SF_BIAS 127
+#define SF_MANTBITS 23
+#define SF_INF 0x7f800000
+#define SF_MINUS_INF 0xff800000
+#define SF_MAXF 0x7f7fffff
+#define SF_MINUS_MAXF 0xff7fffff
+
+#define HF_INF_EXP 0x1f
+#define HF_BIAS 15
+
+#define WAY_BIG_EXP 4096
+
+typedef union {
+ double f;
+ uint64_t i;
+ struct {
+ uint64_t mant:52;
+ uint64_t exp:11;
+ uint64_t sign:1;
+ };
+} Double;
+
+typedef union {
+ float f;
+ uint32_t i;
+ struct {
+ uint32_t mant:23;
+ uint32_t exp:8;
+ uint32_t sign:1;
+ };
+} Float;
+
+static uint64_t float64_getmant(float64 f64)
+{
+ Double a = { .i = f64 };
+ if (float64_is_normal(f64)) {
+ return a.mant | 1ULL << 52;
+ }
+ if (float64_is_zero(f64)) {
+ return 0;
+ }
+ if (float64_is_denormal(f64)) {
+ return a.mant;
+ }
+ return ~0ULL;
+}
+
+int32_t float64_getexp(float64 f64)
+{
+ Double a = { .i = f64 };
+ if (float64_is_normal(f64)) {
+ return a.exp;
+ }
+ if (float64_is_denormal(f64)) {
+ return a.exp + 1;
+ }
+ return -1;
+}
+
+static uint64_t float32_getmant(float32 f32)
+{
+ Float a = { .i = f32 };
+ if (float32_is_normal(f32)) {
+ return a.mant | 1ULL << 23;
+ }
+ if (float32_is_zero(f32)) {
+ return 0;
+ }
+ if (float32_is_denormal(f32)) {
+ return a.mant;
+ }
+ return ~0ULL;
+}
+
+int32_t float32_getexp(float32 f32)
+{
+ Float a = { .i = f32 };
+ if (float32_is_normal(f32)) {
+ return a.exp;
+ }
+ if (float32_is_denormal(f32)) {
+ return a.exp + 1;
+ }
+ return -1;
+}
+
+static uint32_t int128_getw0(Int128 x)
+{
+ return int128_getlo(x);
+}
+
+static uint32_t int128_getw1(Int128 x)
+{
+ return int128_getlo(x) >> 32;
+}
+
+static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
+{
+ Int128 a, b;
+ uint64_t pp0, pp1a, pp1b, pp1s, pp2;
+
+ a = int128_make64(ai);
+ b = int128_make64(bi);
+ pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
+ pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
+ pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
+ pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
+
+ pp1s = pp1a + pp1b;
+ if ((pp1s < pp1a) || (pp1s < pp1b)) {
+ pp2 += (1ULL << 32);
+ }
+ uint64_t ret_low = pp0 + (pp1s << 32);
+ if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
+ pp2 += 1;
+ }
+
+ return int128_make128(ret_low, pp2 + (pp1s >> 32));
+}
+
+static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
+{
+ Int128 ret = int128_sub(a, b);
+ if (borrow != 0) {
+ ret = int128_sub(ret, int128_one());
+ }
+ return ret;
+}
+
+typedef struct {
+ Int128 mant;
+ int32_t exp;
+ uint8_t sign;
+ uint8_t guard;
+ uint8_t round;
+ uint8_t sticky;
+} Accum;
+
+static void accum_init(Accum *p)
+{
+ p->mant = int128_zero();
+ p->exp = 0;
+ p->sign = 0;
+ p->guard = 0;
+ p->round = 0;
+ p->sticky = 0;
+}
+
+static Accum accum_norm_left(Accum a)
+{
+ a.exp--;
+ a.mant = int128_lshift(a.mant, 1);
+ a.mant = int128_or(a.mant, int128_make64(a.guard));
+ a.guard = a.round;
+ a.round = a.sticky;
+ return a;
+}
+
+/* This function is marked inline for performance reasons */
+static inline Accum accum_norm_right(Accum a, int amt)
+{
+ if (amt > 130) {
+ a.sticky |=
+ a.round | a.guard | int128_nz(a.mant);
+ a.guard = a.round = 0;
+ a.mant = int128_zero();
+ a.exp += amt;
+ return a;
+
+ }
+ while (amt >= 64) {
+ a.sticky |= a.round | a.guard | (int128_getlo(a.mant) != 0);
+ a.guard = (int128_getlo(a.mant) >> 63) & 1;
+ a.round = (int128_getlo(a.mant) >> 62) & 1;
+ a.mant = int128_make64(int128_gethi(a.mant));
+ a.exp += 64;
+ amt -= 64;
+ }
+ while (amt > 0) {
+ a.exp++;
+ a.sticky |= a.round;
+ a.round = a.guard;
+ a.guard = int128_getlo(a.mant) & 1;
+ a.mant = int128_rshift(a.mant, 1);
+ amt--;
+ }
+ return a;
+}
+
+/*
+ * On the add/sub, we need to be able to shift out lots of bits, but need a
+ * sticky bit for what was shifted out, I think.
+ */
+static Accum accum_add(Accum a, Accum b);
+
+static Accum accum_sub(Accum a, Accum b, int negate)
+{
+ Accum ret;
+ accum_init(&ret);
+ int borrow;
+
+ if (a.sign != b.sign) {
+ b.sign = !b.sign;
+ return accum_add(a, b);
+ }
+ if (b.exp > a.exp) {
+ /* small - big == - (big - small) */
+ return accum_sub(b, a, !negate);
+ }
+ if ((b.exp == a.exp) && (int128_gt(b.mant, a.mant))) {
+ /* small - big == - (big - small) */
+ return accum_sub(b, a, !negate);
+ }
+
+ while (a.exp > b.exp) {
+ /* Try to normalize exponents: shrink a exponent and grow mantissa */
+ if (int128_gethi(a.mant) & (1ULL << 62)) {
+ /* Can't grow a any more */
+ break;
+ } else {
+ a = accum_norm_left(a);
+ }
+ }
+
+ while (a.exp > b.exp) {
+ /* Try to normalize exponents: grow b exponent and shrink mantissa */
+ /* Keep around shifted out bits... we might need those later */
+ b = accum_norm_right(b, a.exp - b.exp);
+ }
+
+ if ((int128_gt(b.mant, a.mant))) {
+ return accum_sub(b, a, !negate);
+ }
+
+ /* OK, now things should be normalized! */
+ ret.sign = a.sign;
+ ret.exp = a.exp;
+ assert(!int128_gt(b.mant, a.mant));
+ borrow = (b.round << 2) | (b.guard << 1) | b.sticky;
+ ret.mant = int128_sub_borrow(a.mant, b.mant, (borrow != 0));
+ borrow = 0 - borrow;
+ ret.guard = (borrow >> 2) & 1;
+ ret.round = (borrow >> 1) & 1;
+ ret.sticky = (borrow >> 0) & 1;
+ if (negate) {
+ ret.sign = !ret.sign;
+ }
+ return ret;
+}
+
+static Accum accum_add(Accum a, Accum b)
+{
+ Accum ret;
+ accum_init(&ret);
+ if (a.sign != b.sign) {
+ b.sign = !b.sign;
+ return accum_sub(a, b, 0);
+ }
+ if (b.exp > a.exp) {
+ /* small + big == (big + small) */
+ return accum_add(b, a);
+ }
+ if ((b.exp == a.exp) && int128_gt(b.mant, a.mant)) {
+ /* small + big == (big + small) */
+ return accum_add(b, a);
+ }
+
+ while (a.exp > b.exp) {
+ /* Try to normalize exponents: shrink a exponent and grow mantissa */
+ if (int128_gethi(a.mant) & (1ULL << 62)) {
+ /* Can't grow a any more */
+ break;
+ } else {
+ a = accum_norm_left(a);
+ }
+ }
+
+ while (a.exp > b.exp) {
+ /* Try to normalize exponents: grow b exponent and shrink mantissa */
+ /* Keep around shifted out bits... we might need those later */
+ b = accum_norm_right(b, a.exp - b.exp);
+ }
+
+ /* OK, now things should be normalized! */
+ if (int128_gt(b.mant, a.mant)) {
+ return accum_add(b, a);
+ };
+ ret.sign = a.sign;
+ ret.exp = a.exp;
+ assert(!int128_gt(b.mant, a.mant));
+ ret.mant = int128_add(a.mant, b.mant);
+ ret.guard = b.guard;
+ ret.round = b.round;
+ ret.sticky = b.sticky;
+ return ret;
+}
+
+/* Return an infinity with requested sign */
+static float64 infinite_float64(uint8_t sign)
+{
+ if (sign) {
+ return make_float64(DF_MINUS_INF);
+ } else {
+ return make_float64(DF_INF);
+ }
+}
+
+/* Return a maximum finite value with requested sign */
+static float64 maxfinite_float64(uint8_t sign)
+{
+ if (sign) {
+ return make_float64(DF_MINUS_MAXF);
+ } else {
+ return make_float64(DF_MAXF);
+ }
+}
+
+/* Return a zero value with requested sign */
+static float64 zero_float64(uint8_t sign)
+{
+ if (sign) {
+ return make_float64(0x8000000000000000);
+ } else {
+ return float64_zero;
+ }
+}
+
+/* Return an infinity with the requested sign */
+float32 infinite_float32(uint8_t sign)
+{
+ if (sign) {
+ return make_float32(SF_MINUS_INF);
+ } else {
+ return make_float32(SF_INF);
+ }
+}
+
+/* Return a maximum finite value with the requested sign */
+static float32 maxfinite_float32(uint8_t sign)
+{
+ if (sign) {
+ return make_float32(SF_MINUS_MAXF);
+ } else {
+ return make_float32(SF_MAXF);
+ }
+}
+
+/* Return a zero value with requested sign */
+static float32 zero_float32(uint8_t sign)
+{
+ if (sign) {
+ return make_float32(0x80000000);
+ } else {
+ return float32_zero;
+ }
+}
+
+#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
+static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
+{ \
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
+ && ((a.guard | a.round | a.sticky) == 0)) { \
+ /* result zero */ \
+ switch (fp_status->float_rounding_mode) { \
+ case float_round_down: \
+ return zero_##SUFFIX(1); \
+ default: \
+ return zero_##SUFFIX(0); \
+ } \
+ } \
+ /* Normalize right */ \
+ /* We want MANTBITS bits of mantissa plus the leading one. */ \
+ /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
+ /* So we need to normalize right while the high word is non-zero and \
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
+ while ((int128_gethi(a.mant) != 0) || \
+ ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
+ a = accum_norm_right(a, 1); \
+ } \
+ /* \
+ * OK, now normalize left \
+ * We want to normalize left until we have a leading one in bit 24 \
+ * Theoretically, we only need to shift a maximum of one to the left if we \
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
+ * shoudl be 0 \
+ */ \
+ while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
+ a = accum_norm_left(a); \
+ } \
+ /* \
+ * OK, now we might need to denormalize because of potential underflow. \
+ * We need to do this before rounding, and rounding might make us normal \
+ * again \
+ */ \
+ while (a.exp <= 0) { \
+ a = accum_norm_right(a, 1 - a.exp); \
+ /* \
+ * Do we have underflow? \
+ * That's when we get an inexact answer because we ran out of bits \
+ * in a denormal. \
+ */ \
+ if (a.guard || a.round || a.sticky) { \
+ float_raise(float_flag_underflow, fp_status); \
+ } \
+ } \
+ /* OK, we're relatively canonical... now we need to round */ \
+ if (a.guard || a.round || a.sticky) { \
+ float_raise(float_flag_inexact, fp_status); \
+ switch (fp_status->float_rounding_mode) { \
+ case float_round_to_zero: \
+ /* Chop and we're done */ \
+ break; \
+ case float_round_up: \
+ if (a.sign == 0) { \
+ a.mant = int128_add(a.mant, int128_one()); \
+ } \
+ break; \
+ case float_round_down: \
+ if (a.sign != 0) { \
+ a.mant = int128_add(a.mant, int128_one()); \
+ } \
+ break; \
+ default: \
+ if (a.round || a.sticky) { \
+ /* round up if guard is 1, down if guard is zero */ \
+ a.mant = int128_add(a.mant, int128_make64(a.guard)); \
+ } else if (a.guard) { \
+ /* exactly .5, round up if odd */ \
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
+ } \
+ break; \
+ } \
+ } \
+ /* \
+ * OK, now we might have carried all the way up. \
+ * So we might need to shr once \
+ * at least we know that the lsb should be zero if we rounded and \
+ * got a carry out... \
+ */ \
+ if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
+ a = accum_norm_right(a, 1); \
+ } \
+ /* Overflow? */ \
+ if (a.exp >= INF_EXP) { \
+ /* Yep, inf result */ \
+ float_raise(float_flag_overflow, fp_status); \
+ float_raise(float_flag_inexact, fp_status); \
+ switch (fp_status->float_rounding_mode) { \
+ case float_round_to_zero: \
+ return maxfinite_##SUFFIX(a.sign); \
+ case float_round_up: \
+ if (a.sign == 0) { \
+ return infinite_##SUFFIX(a.sign); \
+ } else { \
+ return maxfinite_##SUFFIX(a.sign); \
+ } \
+ case float_round_down: \
+ if (a.sign != 0) { \
+ return infinite_##SUFFIX(a.sign); \
+ } else { \
+ return maxfinite_##SUFFIX(a.sign); \
+ } \
+ default: \
+ return infinite_##SUFFIX(a.sign); \
+ } \
+ } \
+ /* Underflow? */ \
+ if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
+ /* Leading one means: No, we're normal. So, we should be done... */ \
+ INTERNAL_TYPE ret; \
+ ret.i = 0; \
+ ret.sign = a.sign; \
+ ret.exp = a.exp; \
+ ret.mant = int128_getlo(a.mant); \
+ return ret.i; \
+ } \
+ assert(a.exp == 1); \
+ INTERNAL_TYPE ret; \
+ ret.i = 0; \
+ ret.sign = a.sign; \
+ ret.exp = 0; \
+ ret.mant = int128_getlo(a.mant); \
+ return ret.i; \
+}
+
+GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
+GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
+
+static bool is_inf_prod(float64 a, float64 b)
+{
+ return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
+ (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
+ (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
+}
+
+static float64 special_fma(float64 a, float64 b, float64 c,
+ float_status *fp_status)
+{
+ float64 ret = make_float64(0);
+
+ /*
+ * If A multiplied by B is an exact infinity and C is also an infinity
+ * but with the opposite sign, FMA returns NaN and raises invalid.
+ */
+ uint8_t a_sign = float64_is_neg(a);
+ uint8_t b_sign = float64_is_neg(b);
+ uint8_t c_sign = float64_is_neg(c);
+ if (is_inf_prod(a, b) && float64_is_infinity(c)) {
+ if ((a_sign ^ b_sign) != c_sign) {
+ ret = make_float64(DF_NAN);
+ float_raise(float_flag_invalid, fp_status);
+ return ret;
+ }
+ }
+ if ((float64_is_infinity(a) && float64_is_zero(b)) ||
+ (float64_is_zero(a) && float64_is_infinity(b))) {
+ ret = make_float64(DF_NAN);
+ float_raise(float_flag_invalid, fp_status);
+ return ret;
+ }
+ /*
+ * If none of the above checks are true and C is a NaN,
+ * a NaN shall be returned
+ * If A or B are NaN, a NAN shall be returned.
+ */
+ if (float64_is_any_nan(a) ||
+ float64_is_any_nan(b) ||
+ float64_is_any_nan(c)) {
+ if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ ret = make_float64(DF_NAN);
+ return ret;
+ }
+ /*
+ * We have checked for adding opposite-signed infinities.
+ * Other infinities return infinity with the correct sign
+ */
+ if (float64_is_infinity(c)) {
+ ret = infinite_float64(c_sign);
+ return ret;
+ }
+ if (float64_is_infinity(a) || float64_is_infinity(b)) {
+ ret = infinite_float64(a_sign ^ b_sign);
+ return ret;
+ }
+ g_assert_not_reached();
+}
+
+static float32 special_fmaf(float32 a, float32 b, float32 c,
+ float_status *fp_status)
+{
+ float64 aa, bb, cc;
+ aa = float32_to_float64(a, fp_status);
+ bb = float32_to_float64(b, fp_status);
+ cc = float32_to_float64(c, fp_status);
+ return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
+}
+
+float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
+ float_status *fp_status)
+{
+ Accum prod;
+ Accum acc;
+ Accum result;
+ accum_init(&prod);
+ accum_init(&acc);
+ accum_init(&result);
+
+ uint8_t a_sign = float32_is_neg(a);
+ uint8_t b_sign = float32_is_neg(b);
+ uint8_t c_sign = float32_is_neg(c);
+ if (float32_is_infinity(a) ||
+ float32_is_infinity(b) ||
+ float32_is_infinity(c)) {
+ return special_fmaf(a, b, c, fp_status);
+ }
+ if (float32_is_any_nan(a) ||
+ float32_is_any_nan(b) ||
+ float32_is_any_nan(c)) {
+ return special_fmaf(a, b, c, fp_status);
+ }
+ if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
+ float32 tmp = float32_mul(a, b, fp_status);
+ tmp = float32_add(tmp, c, fp_status);
+ return tmp;
+ }
+
+ /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
+ prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
+
+ /*
+ * Note: extracting the mantissa into an int is multiplying by
+ * 2**23, so adjust here
+ */
+ prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
+ prod.sign = a_sign ^ b_sign;
+ if (float32_is_zero(a) || float32_is_zero(b)) {
+ prod.exp = -2 * WAY_BIG_EXP;
+ }
+ if ((scale > 0) && float32_is_denormal(c)) {
+ acc.mant = int128_mul_6464(0, 0);
+ acc.exp = -WAY_BIG_EXP;
+ acc.sign = c_sign;
+ acc.sticky = 1;
+ result = accum_add(prod, acc);
+ } else if (!float32_is_zero(c)) {
+ acc.mant = int128_mul_6464(float32_getmant(c), 1);
+ acc.exp = float32_getexp(c);
+ acc.sign = c_sign;
+ result = accum_add(prod, acc);
+ } else {
+ result = prod;
+ }
+ result.exp += scale;
+ return accum_round_float32(result, fp_status);
+}
+
+float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
+{
+ if (float32_is_zero(a) || float32_is_zero(b)) {
+ return float32_mul(a, b, fp_status);
+ }
+ return internal_fmafx(a, b, float32_zero, 0, fp_status);
+}
+
+float64 internal_mpyhh(float64 a, float64 b,
+ unsigned long long int accumulated,
+ float_status *fp_status)
+{
+ Accum x;
+ unsigned long long int prod;
+ unsigned int sticky;
+ uint8_t a_sign, b_sign;
+
+ sticky = accumulated & 1;
+ accumulated >>= 1;
+ accum_init(&x);
+ if (float64_is_zero(a) ||
+ float64_is_any_nan(a) ||
+ float64_is_infinity(a)) {
+ return float64_mul(a, b, fp_status);
+ }
+ if (float64_is_zero(b) ||
+ float64_is_any_nan(b) ||
+ float64_is_infinity(b)) {
+ return float64_mul(a, b, fp_status);
+ }
+ x.mant = int128_mul_6464(accumulated, 1);
+ x.sticky = sticky;
+ prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
+ x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
+ x.exp = float64_getexp(a) + float64_getexp(b) - DF_BIAS - 20;
+ if (!float64_is_normal(a) || !float64_is_normal(b)) {
+ /* crush to inexact zero */
+ x.sticky = 1;
+ x.exp = -4096;
+ }
+ a_sign = float64_is_neg(a);
+ b_sign = float64_is_neg(b);
+ x.sign = a_sign ^ b_sign;
+ return accum_round_float64(x, fp_status);
+}
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
new file mode 100644
index 000000000..e3b99a8cf
--- /dev/null
+++ b/target/hexagon/fma_emu.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_FMA_EMU_H
+#define HEXAGON_FMA_EMU_H
+
+static inline bool is_finite(float64 x)
+{
+ return !float64_is_any_nan(x) && !float64_is_infinity(x);
+}
+
+int32_t float64_getexp(float64 f64);
+int32_t float32_getexp(float32 f32);
+float32 infinite_float32(uint8_t sign);
+float32 internal_fmafx(float32 a, float32 b, float32 c,
+ int scale, float_status *fp_status);
+float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
+float64 internal_mpyhh(float64 a, float64 b,
+ unsigned long long int accumulated,
+ float_status *fp_status);
+
+#endif
diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c
new file mode 100644
index 000000000..9c8c04c96
--- /dev/null
+++ b/target/hexagon/gdbstub.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "cpu.h"
+#include "internal.h"
+
+int hexagon_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ CPUHexagonState *env = &cpu->env;
+
+ if (n < TOTAL_PER_THREAD_REGS) {
+ return gdb_get_regl(mem_buf, env->gpr[n]);
+ }
+
+ g_assert_not_reached();
+}
+
+int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
+ CPUHexagonState *env = &cpu->env;
+
+ if (n < TOTAL_PER_THREAD_REGS) {
+ env->gpr[n] = ldtul_p(mem_buf);
+ return sizeof(target_ulong);
+ }
+
+ g_assert_not_reached();
+}
diff --git a/target/hexagon/gen_dectree_import.c b/target/hexagon/gen_dectree_import.c
new file mode 100644
index 000000000..ee354677f
--- /dev/null
+++ b/target/hexagon/gen_dectree_import.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This program generates the encodings file that is processed by
+ * the dectree.py script to produce the decoding tree. We use the C
+ * preprocessor to manipulate the files imported from the Hexagon
+ * architecture library.
+ */
+#include <stdio.h>
+#include <string.h>
+#include "opcodes.h"
+
+#define STRINGIZE(X) #X
+
+const char * const opcode_names[] = {
+#define OPCODE(IID) STRINGIZE(IID)
+#include "opcodes_def_generated.h.inc"
+ NULL
+#undef OPCODE
+};
+
+/*
+ * Process the instruction definitions
+ * Scalar core instructions have the following form
+ * Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(),
+ * "Add 32-bit registers",
+ * { RdV=RsV+RtV;})
+ * HVX instructions have the following form
+ * EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)",
+ * ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX,A_CVI_LATE),
+ * "Insert Word Scalar into Vector",
+ * VxV.uw[0] = RtV;)
+ */
+const char * const opcode_syntax[XX_LAST_OPCODE] = {
+#define Q6INSN(TAG, BEH, ATTRIBS, DESCR, SEM) \
+ [TAG] = BEH,
+#define EXTINSN(TAG, BEH, ATTRIBS, DESCR, SEM) \
+ [TAG] = BEH,
+#include "imported/allidefs.def"
+#undef Q6INSN
+#undef EXTINSN
+};
+
+const char * const opcode_rregs[] = {
+#define REGINFO(TAG, REGINFO, RREGS, WREGS) RREGS,
+#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */
+#include "op_regs_generated.h.inc"
+ NULL
+#undef REGINFO
+#undef IMMINFO
+};
+
+const char * const opcode_wregs[] = {
+#define REGINFO(TAG, REGINFO, RREGS, WREGS) WREGS,
+#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */
+#include "op_regs_generated.h.inc"
+ NULL
+#undef REGINFO
+#undef IMMINFO
+};
+
+const OpcodeEncoding opcode_encodings[] = {
+#define DEF_ENC32(TAG, ENCSTR) \
+ [TAG] = { .encoding = ENCSTR },
+#define DEF_ENC_SUBINSN(TAG, CLASS, ENCSTR) \
+ [TAG] = { .encoding = ENCSTR, .enc_class = CLASS },
+#define DEF_EXT_ENC(TAG, CLASS, ENCSTR) \
+ [TAG] = { .encoding = ENCSTR, .enc_class = CLASS },
+#include "imported/encode.def"
+#undef DEF_ENC32
+#undef DEF_ENC_SUBINSN
+#undef DEF_EXT_ENC
+};
+
+static const char * const opcode_enc_class_names[XX_LAST_ENC_CLASS] = {
+ "NORMAL",
+ "16BIT",
+ "SUBINSN_A",
+ "SUBINSN_L1",
+ "SUBINSN_L2",
+ "SUBINSN_S1",
+ "SUBINSN_S2",
+ "EXT_noext",
+ "EXT_mmvec",
+};
+
+static const char *get_opcode_enc(int opcode)
+{
+ const char *tmp = opcode_encodings[opcode].encoding;
+ if (tmp == NULL) {
+ tmp = "MISSING ENCODING";
+ }
+ return tmp;
+}
+
+static const char *get_opcode_enc_class(int opcode)
+{
+ const char *tmp = opcode_encodings[opcode].encoding;
+ if (tmp == NULL) {
+ const char *test = "V6_"; /* HVX */
+ const char *name = opcode_names[opcode];
+ if (strncmp(name, test, strlen(test)) == 0) {
+ return "EXT_mmvec";
+ }
+ }
+ return opcode_enc_class_names[opcode_encodings[opcode].enc_class];
+}
+
+static void gen_iset_table(FILE *out)
+{
+ int i;
+
+ fprintf(out, "iset = {\n");
+ for (i = 0; i < XX_LAST_OPCODE; i++) {
+ fprintf(out, "\t\'%s\' : {\n", opcode_names[i]);
+ fprintf(out, "\t\t\'tag\' : \'%s\',\n", opcode_names[i]);
+ fprintf(out, "\t\t\'syntax\' : \'%s\',\n", opcode_syntax[i]);
+ fprintf(out, "\t\t\'rregs\' : \'%s\',\n", opcode_rregs[i]);
+ fprintf(out, "\t\t\'wregs\' : \'%s\',\n", opcode_wregs[i]);
+ fprintf(out, "\t\t\'enc\' : \'%s\',\n", get_opcode_enc(i));
+ fprintf(out, "\t\t\'enc_class\' : \'%s\',\n", get_opcode_enc_class(i));
+ fprintf(out, "\t},\n");
+ }
+ fprintf(out, "};\n\n");
+}
+
+static void gen_tags_list(FILE *out)
+{
+ int i;
+
+ fprintf(out, "tags = [\n");
+ for (i = 0; i < XX_LAST_OPCODE; i++) {
+ fprintf(out, "\t\'%s\',\n", opcode_names[i]);
+ }
+ fprintf(out, "];\n\n");
+}
+
+static void gen_enc_ext_spaces_table(FILE *out)
+{
+ fprintf(out, "enc_ext_spaces = {\n");
+#define DEF_EXT_SPACE(SPACEID, ENCSTR) \
+ fprintf(out, "\t\'%s\' : \'%s\',\n", #SPACEID, ENCSTR);
+#include "imported/encode.def"
+#undef DEF_EXT_SPACE
+ fprintf(out, "};\n\n");
+}
+
+static void gen_subinsn_groupings_table(FILE *out)
+{
+ fprintf(out, "subinsn_groupings = {\n");
+#define DEF_PACKED32(TAG, TYPEA, TYPEB, ENCSTR) \
+ do { \
+ fprintf(out, "\t\'%s\' : {\n", #TAG); \
+ fprintf(out, "\t\t\'name\' : \'%s\',\n", #TAG); \
+ fprintf(out, "\t\t\'class_a\' : \'%s\',\n", #TYPEA); \
+ fprintf(out, "\t\t\'class_b\' : \'%s\',\n", #TYPEB); \
+ fprintf(out, "\t\t\'enc\' : \'%s\',\n", ENCSTR); \
+ fprintf(out, "\t},\n"); \
+ } while (0);
+#include "imported/encode.def"
+#undef DEF_PACKED32
+ fprintf(out, "};\n\n");
+}
+
+int main(int argc, char *argv[])
+{
+ FILE *outfile;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: gen_dectree_import ouptputfile\n");
+ return 1;
+ }
+ outfile = fopen(argv[1], "w");
+ if (outfile == NULL) {
+ fprintf(stderr, "Cannot open %s for writing\n", argv[1]);
+ return 1;
+ }
+
+ gen_iset_table(outfile);
+ gen_tags_list(outfile);
+ gen_enc_ext_spaces_table(outfile);
+ gen_subinsn_groupings_table(outfile);
+
+ fclose(outfile);
+ return 0;
+}
diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py
new file mode 100755
index 000000000..a446c4538
--- /dev/null
+++ b/target/hexagon/gen_helper_funcs.py
@@ -0,0 +1,315 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+##
+## Helpers for gen_helper_function
+##
+def gen_decl_ea(f):
+ f.write(" uint32_t EA;\n")
+
+def gen_helper_return_type(f,regtype,regid,regno):
+ if regno > 1 : f.write(", ")
+ f.write("int32_t")
+
+def gen_helper_return_type_pair(f,regtype,regid,regno):
+ if regno > 1 : f.write(", ")
+ f.write("int64_t")
+
+def gen_helper_arg(f,regtype,regid,regno):
+ if regno > 0 : f.write(", " )
+ f.write("int32_t %s%sV" % (regtype,regid))
+
+def gen_helper_arg_new(f,regtype,regid,regno):
+ if regno >= 0 : f.write(", " )
+ f.write("int32_t %s%sN" % (regtype,regid))
+
+def gen_helper_arg_pair(f,regtype,regid,regno):
+ if regno >= 0 : f.write(", ")
+ f.write("int64_t %s%sV" % (regtype,regid))
+
+def gen_helper_arg_ext(f,regtype,regid,regno):
+ if regno > 0 : f.write(", ")
+ f.write("void *%s%sV_void" % (regtype,regid))
+
+def gen_helper_arg_ext_pair(f,regtype,regid,regno):
+ if regno > 0 : f.write(", ")
+ f.write("void *%s%sV_void" % (regtype,regid))
+
+def gen_helper_arg_opn(f,regtype,regid,i,tag):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_arg_ext_pair(f,regtype,regid,i)
+ else:
+ gen_helper_arg_pair(f,regtype,regid,i)
+ elif (hex_common.is_single(regid)):
+ if hex_common.is_old_val(regtype, regid, tag):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_arg_ext(f,regtype,regid,i)
+ else:
+ gen_helper_arg(f,regtype,regid,i)
+ elif hex_common.is_new_val(regtype, regid, tag):
+ gen_helper_arg_new(f,regtype,regid,i)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+def gen_helper_arg_imm(f,immlett):
+ f.write(", int32_t %s" % (hex_common.imm_name(immlett)))
+
+def gen_helper_dest_decl(f,regtype,regid,regno,subfield=""):
+ f.write(" int32_t %s%sV%s = 0;\n" % \
+ (regtype,regid,subfield))
+
+def gen_helper_dest_decl_pair(f,regtype,regid,regno,subfield=""):
+ f.write(" int64_t %s%sV%s = 0;\n" % \
+ (regtype,regid,subfield))
+
+def gen_helper_dest_decl_ext(f,regtype,regid):
+ if (regtype == "Q"):
+ f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \
+ (regtype,regid,regtype,regid))
+ else:
+ f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \
+ (regtype,regid,regtype,regid))
+
+def gen_helper_dest_decl_ext_pair(f,regtype,regid,regno):
+ f.write(" /* %s%sV is *(MMVectorPair *))%s%sV_void) */\n" % \
+ (regtype,regid,regtype, regid))
+
+def gen_helper_dest_decl_opn(f,regtype,regid,i):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_dest_decl_ext_pair(f,regtype,regid, i)
+ else:
+ gen_helper_dest_decl_pair(f,regtype,regid,i)
+ elif (hex_common.is_single(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_dest_decl_ext(f,regtype,regid)
+ else:
+ gen_helper_dest_decl(f,regtype,regid,i)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+def gen_helper_src_var_ext(f,regtype,regid):
+ if (regtype == "Q"):
+ f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \
+ (regtype,regid,regtype,regid))
+ else:
+ f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \
+ (regtype,regid,regtype,regid))
+
+def gen_helper_src_var_ext_pair(f,regtype,regid,regno):
+ f.write(" /* %s%sV%s is *(MMVectorPair *)(%s%sV%s_void) */\n" % \
+ (regtype,regid,regno,regtype,regid,regno))
+
+def gen_helper_return(f,regtype,regid,regno):
+ f.write(" return %s%sV;\n" % (regtype,regid))
+
+def gen_helper_return_pair(f,regtype,regid,regno):
+ f.write(" return %s%sV;\n" % (regtype,regid))
+
+def gen_helper_dst_write_ext(f,regtype,regid):
+ return
+
+def gen_helper_dst_write_ext_pair(f,regtype,regid):
+ return
+
+def gen_helper_return_opn(f, regtype, regid, i):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_dst_write_ext_pair(f,regtype,regid)
+ else:
+ gen_helper_return_pair(f,regtype,regid,i)
+ elif (hex_common.is_single(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_dst_write_ext(f,regtype,regid)
+ else:
+ gen_helper_return(f,regtype,regid,i)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+##
+## Generate the TCG code to call the helper
+## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
+## We produce:
+## int32_t HELPER(A2_add)(CPUHexagonState *env, int32_t RsV, int32_t RtV)
+## {
+## uint32_t slot __attribute__(unused)) = 4;
+## int32_t RdV = 0;
+## { RdV=RsV+RtV;}
+## COUNT_HELPER(A2_add);
+## return RdV;
+## }
+##
+def gen_helper_function(f, tag, tagregs, tagimms):
+ regs = tagregs[tag]
+ imms = tagimms[tag]
+
+ numresults = 0
+ numscalarresults = 0
+ numscalarreadwrite = 0
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ numresults += 1
+ if (hex_common.is_scalar_reg(regtype)):
+ numscalarresults += 1
+ if (hex_common.is_readwrite(regid)):
+ if (hex_common.is_scalar_reg(regtype)):
+ numscalarreadwrite += 1
+
+ if (numscalarresults > 1):
+ ## The helper is bogus when there is more than one result
+ f.write("void HELPER(%s)(CPUHexagonState *env) { BOGUS_HELPER(%s); }\n"
+ % (tag, tag))
+ else:
+ ## The return type of the function is the type of the destination
+ ## register (if scalar)
+ i=0
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ continue
+ else:
+ gen_helper_return_type_pair(f,regtype,regid,i)
+ elif (hex_common.is_single(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ continue
+ else:
+ gen_helper_return_type(f,regtype,regid,i)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ i += 1
+
+ if (numscalarresults == 0):
+ f.write("void")
+ f.write(" HELPER(%s)(CPUHexagonState *env" % tag)
+
+ ## Arguments include the vector destination operands
+ i = 1
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_arg_ext_pair(f,regtype,regid,i)
+ else:
+ continue
+ elif (hex_common.is_single(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_arg_ext(f,regtype,regid,i)
+ else:
+ # This is the return value of the function
+ continue
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ i += 1
+
+ ## Arguments to the helper function are the source regs and immediates
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_read(regid)):
+ if (hex_common.is_hvx_reg(regtype) and
+ hex_common.is_readwrite(regid)):
+ continue
+ gen_helper_arg_opn(f,regtype,regid,i,tag)
+ i += 1
+ for immlett,bits,immshift in imms:
+ gen_helper_arg_imm(f,immlett)
+ i += 1
+
+ if hex_common.need_slot(tag):
+ if i > 0: f.write(", ")
+ f.write("uint32_t slot")
+ i += 1
+ if hex_common.need_part1(tag):
+ if i > 0: f.write(", ")
+ f.write("uint32_t part1")
+ f.write(")\n{\n")
+ if (not hex_common.need_slot(tag)):
+ f.write(" uint32_t slot __attribute__((unused)) = 4;\n" )
+ if hex_common.need_ea(tag): gen_decl_ea(f)
+ ## Declare the return variable
+ i=0
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_writeonly(regid)):
+ gen_helper_dest_decl_opn(f,regtype,regid,i)
+ i += 1
+
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_read(regid)):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_src_var_ext_pair(f,regtype,regid,i)
+ elif (hex_common.is_single(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_helper_src_var_ext(f,regtype,regid)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+ if 'A_FPOP' in hex_common.attribdict[tag]:
+ f.write(' arch_fpop_start(env);\n');
+
+ f.write(" %s\n" % hex_common.semdict[tag])
+
+ if 'A_FPOP' in hex_common.attribdict[tag]:
+ f.write(' arch_fpop_end(env);\n');
+
+ ## Save/return the return variable
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ gen_helper_return_opn(f, regtype, regid, i)
+ f.write("}\n\n")
+ ## End of the helper definition
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.read_overrides_file(sys.argv[3])
+ hex_common.read_overrides_file(sys.argv[4])
+ hex_common.calculate_attribs()
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[5], 'w') as f:
+ for tag in hex_common.tags:
+ ## Skip the priv instructions
+ if ( "A_PRIV" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the guest instructions
+ if ( "A_GUEST" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the diag instructions
+ if ( tag == "Y6_diag" ) :
+ continue
+ if ( tag == "Y6_diag0" ) :
+ continue
+ if ( tag == "Y6_diag1" ) :
+ continue
+ if ( hex_common.skip_qemu_helper(tag) ):
+ continue
+
+ gen_helper_function(f, tag, tagregs, tagimms)
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py
new file mode 100755
index 000000000..3b4e993fd
--- /dev/null
+++ b/target/hexagon/gen_helper_protos.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+##
+## Helpers for gen_helper_prototype
+##
+def_helper_types = {
+ 'N' : 's32',
+ 'O' : 's32',
+ 'P' : 's32',
+ 'M' : 's32',
+ 'C' : 's32',
+ 'R' : 's32',
+ 'V' : 'ptr',
+ 'Q' : 'ptr'
+}
+
+def_helper_types_pair = {
+ 'R' : 's64',
+ 'C' : 's64',
+ 'S' : 's64',
+ 'G' : 's64',
+ 'V' : 'ptr',
+ 'Q' : 'ptr'
+}
+
+def gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i):
+ if (hex_common.is_pair(regid)):
+ f.write(", %s" % (def_helper_types_pair[regtype]))
+ elif (hex_common.is_single(regid)):
+ f.write(", %s" % (def_helper_types[regtype]))
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+##
+## Generate the DEF_HELPER prototype for an instruction
+## For A2_add: Rd32=add(Rs32,Rt32)
+## We produce:
+## DEF_HELPER_3(A2_add, s32, env, s32, s32)
+##
+def gen_helper_prototype(f, tag, tagregs, tagimms):
+ regs = tagregs[tag]
+ imms = tagimms[tag]
+
+ numresults = 0
+ numscalarresults = 0
+ numscalarreadwrite = 0
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ numresults += 1
+ if (hex_common.is_scalar_reg(regtype)):
+ numscalarresults += 1
+ if (hex_common.is_readwrite(regid)):
+ if (hex_common.is_scalar_reg(regtype)):
+ numscalarreadwrite += 1
+
+ if (numscalarresults > 1):
+ ## The helper is bogus when there is more than one result
+ f.write('DEF_HELPER_1(%s, void, env)\n' % tag)
+ else:
+ ## Figure out how many arguments the helper will take
+ if (numscalarresults == 0):
+ def_helper_size = len(regs)+len(imms)+numscalarreadwrite+1
+ if hex_common.need_part1(tag): def_helper_size += 1
+ if hex_common.need_slot(tag): def_helper_size += 1
+ f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag))
+ ## The return type is void
+ f.write(', void' )
+ else:
+ def_helper_size = len(regs)+len(imms)+numscalarreadwrite
+ if hex_common.need_part1(tag): def_helper_size += 1
+ if hex_common.need_slot(tag): def_helper_size += 1
+ f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag))
+
+ ## Generate the qemu DEF_HELPER type for each result
+ ## Iterate over this list twice
+ ## - Emit the scalar result
+ ## - Emit the vector result
+ i=0
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ if (not hex_common.is_hvx_reg(regtype)):
+ gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+
+ ## Put the env between the outputs and inputs
+ f.write(', env' )
+ i += 1
+
+ # Second pass
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+
+ ## Generate the qemu type for each input operand (regs and immediates)
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_read(regid)):
+ if (hex_common.is_hvx_reg(regtype) and
+ hex_common.is_readwrite(regid)):
+ continue
+ gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+ for immlett,bits,immshift in imms:
+ f.write(", s32")
+
+ ## Add the arguments for the instruction slot and part1 (if needed)
+ if hex_common.need_slot(tag): f.write(', i32' )
+ if hex_common.need_part1(tag): f.write(' , i32' )
+ f.write(')\n')
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.read_overrides_file(sys.argv[3])
+ hex_common.read_overrides_file(sys.argv[4])
+ hex_common.calculate_attribs()
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[5], 'w') as f:
+ for tag in hex_common.tags:
+ ## Skip the priv instructions
+ if ( "A_PRIV" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the guest instructions
+ if ( "A_GUEST" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the diag instructions
+ if ( tag == "Y6_diag" ) :
+ continue
+ if ( tag == "Y6_diag0" ) :
+ continue
+ if ( tag == "Y6_diag1" ) :
+ continue
+
+ if ( hex_common.skip_qemu_helper(tag) ):
+ continue
+
+ gen_helper_prototype(f, tag, tagregs, tagimms)
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_op_attribs.py b/target/hexagon/gen_op_attribs.py
new file mode 100755
index 000000000..6a1a1ca21
--- /dev/null
+++ b/target/hexagon/gen_op_attribs.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.calculate_attribs()
+
+ ##
+ ## Generate all the attributes associated with each instruction
+ ##
+ with open(sys.argv[3], 'w') as f:
+ for tag in hex_common.tags:
+ f.write('OP_ATTRIB(%s,ATTRIBS(%s))\n' % \
+ (tag, ','.join(sorted(hex_common.attribdict[tag]))))
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_op_regs.py b/target/hexagon/gen_op_regs.py
new file mode 100755
index 000000000..e8137d4a1
--- /dev/null
+++ b/target/hexagon/gen_op_regs.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+##
+## Generate the register and immediate operands for each instruction
+##
+def calculate_regid_reg(tag):
+ def letter_inc(x): return chr(ord(x)+1)
+ ordered_implregs = [ 'SP','FP','LR' ]
+ srcdst_lett = 'X'
+ src_lett = 'S'
+ dst_lett = 'D'
+ retstr = ""
+ mapdict = {}
+ for reg in ordered_implregs:
+ reg_rd = 0
+ reg_wr = 0
+ if ('A_IMPLICIT_WRITES_'+reg) in hex_common.attribdict[tag]: reg_wr = 1
+ if reg_rd and reg_wr:
+ retstr += srcdst_lett
+ mapdict[srcdst_lett] = reg
+ srcdst_lett = letter_inc(srcdst_lett)
+ elif reg_rd:
+ retstr += src_lett
+ mapdict[src_lett] = reg
+ src_lett = letter_inc(src_lett)
+ elif reg_wr:
+ retstr += dst_lett
+ mapdict[dst_lett] = reg
+ dst_lett = letter_inc(dst_lett)
+ return retstr,mapdict
+
+def calculate_regid_letters(tag):
+ retstr,mapdict = calculate_regid_reg(tag)
+ return retstr
+
+def strip_reg_prefix(x):
+ y=x.replace('UREG.','')
+ y=y.replace('MREG.','')
+ return y.replace('GREG.','')
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[3], 'w') as f:
+ for tag in hex_common.tags:
+ regs = tagregs[tag]
+ rregs = []
+ wregs = []
+ regids = ""
+ for regtype,regid,toss,numregs in regs:
+ if hex_common.is_read(regid):
+ if regid[0] not in regids: regids += regid[0]
+ rregs.append(regtype+regid+numregs)
+ if hex_common.is_written(regid):
+ wregs.append(regtype+regid+numregs)
+ if regid[0] not in regids: regids += regid[0]
+ for attrib in hex_common.attribdict[tag]:
+ if hex_common.attribinfo[attrib]['rreg']:
+ rregs.append(strip_reg_prefix(attribinfo[attrib]['rreg']))
+ if hex_common.attribinfo[attrib]['wreg']:
+ wregs.append(strip_reg_prefix(attribinfo[attrib]['wreg']))
+ regids += calculate_regid_letters(tag)
+ f.write('REGINFO(%s,"%s",\t/*RD:*/\t"%s",\t/*WR:*/\t"%s")\n' % \
+ (tag,regids,",".join(rregs),",".join(wregs)))
+
+ for tag in hex_common.tags:
+ imms = tagimms[tag]
+ f.write( 'IMMINFO(%s' % tag)
+ if not imms:
+ f.write(''','u',0,0,'U',0,0''')
+ for sign,size,shamt in imms:
+ if sign == 'r': sign = 's'
+ if not shamt:
+ shamt = "0"
+ f.write(''','%s',%s,%s''' % (sign,size,shamt))
+ if len(imms) == 1:
+ if sign.isupper():
+ myu = 'u'
+ else:
+ myu = 'U'
+ f.write(''','%s',0,0''' % myu)
+ f.write(')\n')
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_opcodes_def.py b/target/hexagon/gen_opcodes_def.py
new file mode 100755
index 000000000..fa604a8db
--- /dev/null
+++ b/target/hexagon/gen_opcodes_def.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+
+ ##
+ ## Generate a list of all the opcodes
+ ##
+ with open(sys.argv[3], 'w') as f:
+ for tag in hex_common.tags:
+ f.write ( "OPCODE(%s),\n" % (tag) )
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_printinsn.py b/target/hexagon/gen_printinsn.py
new file mode 100755
index 000000000..12737bf8a
--- /dev/null
+++ b/target/hexagon/gen_printinsn.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+##
+## Generate data for printing each instruction (format string + operands)
+##
+def regprinter(m):
+ str = m.group(1)
+ str += ":".join(["%d"]*len(m.group(2)))
+ str += m.group(3)
+ if ('S' in m.group(1)) and (len(m.group(2)) == 1):
+ str += "/%s"
+ elif ('C' in m.group(1)) and (len(m.group(2)) == 1):
+ str += "/%s"
+ return str
+
+def spacify(s):
+ # Regular expression that matches any operator that contains '=' character:
+ opswithequal_re = '[-+^&|!<>=]?='
+ # Regular expression that matches any assignment operator.
+ assignment_re = '[-+^&|]?='
+
+ # Out of the operators that contain the = sign, if the operator is also an
+ # assignment, spaces will be added around it, unless it's enclosed within
+ # parentheses, or spaces are already present.
+
+ equals = re.compile(opswithequal_re)
+ assign = re.compile(assignment_re)
+
+ slen = len(s)
+ paren_count = {}
+ i = 0
+ pc = 0
+ while i < slen:
+ c = s[i]
+ if c == '(':
+ pc += 1
+ elif c == ')':
+ pc -= 1
+ paren_count[i] = pc
+ i += 1
+
+ # Iterate over all operators that contain the equal sign. If any
+ # match is also an assignment operator, add spaces around it if
+ # the parenthesis count is 0.
+ pos = 0
+ out = []
+ for m in equals.finditer(s):
+ ms = m.start()
+ me = m.end()
+ # t is the string that matched opswithequal_re.
+ t = m.string[ms:me]
+ out += s[pos:ms]
+ pos = me
+ if paren_count[ms] == 0:
+ # Check if the entire string t is an assignment.
+ am = assign.match(t)
+ if am and len(am.group(0)) == me-ms:
+ # Don't add spaces if they are already there.
+ if ms > 0 and s[ms-1] != ' ':
+ out.append(' ')
+ out += t
+ if me < slen and s[me] != ' ':
+ out.append(' ')
+ continue
+ # If this is not an assignment, just append it to the output
+ # string.
+ out += t
+
+ # Append the remaining part of the string.
+ out += s[pos:len(s)]
+ return ''.join(out)
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+
+ immext_casere = re.compile(r'IMMEXT\(([A-Za-z])')
+
+ with open(sys.argv[3], 'w') as f:
+ for tag in hex_common.tags:
+ if not hex_common.behdict[tag]: continue
+ extendable_upper_imm = False
+ extendable_lower_imm = False
+ m = immext_casere.search(hex_common.semdict[tag])
+ if m:
+ if m.group(1).isupper():
+ extendable_upper_imm = True
+ else:
+ extendable_lower_imm = True
+ beh = hex_common.behdict[tag]
+ beh = hex_common.regre.sub(regprinter,beh)
+ beh = hex_common.absimmre.sub(r"#%s0x%x",beh)
+ beh = hex_common.relimmre.sub(r"PC+%s%d",beh)
+ beh = spacify(beh)
+ # Print out a literal "%s" at the end, used to match empty string
+ # so C won't complain at us
+ if ("A_VECX" in hex_common.attribdict[tag]):
+ macname = "DEF_VECX_PRINTINFO"
+ else: macname = "DEF_PRINTINFO"
+ f.write('%s(%s,"%s%%s"' % (macname,tag,beh))
+ regs_or_imms = \
+ hex_common.reg_or_immre.findall(hex_common.behdict[tag])
+ ri = 0
+ seenregs = {}
+ for allregs,a,b,c,d,allimm,immlett,bits,immshift in regs_or_imms:
+ if a:
+ #register
+ if b in seenregs:
+ regno = seenregs[b]
+ else:
+ regno = ri
+ if len(b) == 1:
+ f.write(', insn->regno[%d]' % regno)
+ if 'S' in a:
+ f.write(', sreg2str(insn->regno[%d])' % regno)
+ elif 'C' in a:
+ f.write(', creg2str(insn->regno[%d])' % regno)
+ elif len(b) == 2:
+ f.write(', insn->regno[%d] + 1, insn->regno[%d]' % \
+ (regno,regno))
+ else:
+ print("Put some stuff to handle quads here")
+ if b not in seenregs:
+ seenregs[b] = ri
+ ri += 1
+ else:
+ #immediate
+ if (immlett.isupper()):
+ if extendable_upper_imm:
+ if immlett in 'rR':
+ f.write(',insn->extension_valid?"##":""')
+ else:
+ f.write(',insn->extension_valid?"#":""')
+ else:
+ f.write(',""')
+ ii = 1
+ else:
+ if extendable_lower_imm:
+ if immlett in 'rR':
+ f.write(',insn->extension_valid?"##":""')
+ else:
+ f.write(',insn->extension_valid?"#":""')
+ else:
+ f.write(',""')
+ ii = 0
+ f.write(', insn->immed[%d]' % ii)
+ # append empty string so there is at least one more arg
+ f.write(',"")\n')
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_semantics.c b/target/hexagon/gen_semantics.c
new file mode 100644
index 000000000..4a2bdd70e
--- /dev/null
+++ b/target/hexagon/gen_semantics.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This program generates the semantics file that is processed by
+ * the do_qemu.py script. We use the C preporcessor to manipulate the
+ * files imported from the Hexagon architecture library.
+ */
+
+#include <stdio.h>
+#define STRINGIZE(X) #X
+
+int main(int argc, char *argv[])
+{
+ FILE *outfile;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: gen_semantics ouptputfile\n");
+ return 1;
+ }
+ outfile = fopen(argv[1], "w");
+ if (outfile == NULL) {
+ fprintf(stderr, "Cannot open %s for writing\n", argv[1]);
+ return 1;
+ }
+
+/*
+ * Process the instruction definitions
+ * Scalar core instructions have the following form
+ * Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(),
+ * "Add 32-bit registers",
+ * { RdV=RsV+RtV;})
+ * HVX instructions have the following form
+ * EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)",
+ * ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX),
+ * "Insert Word Scalar into Vector",
+ * VxV.uw[0] = RtV;)
+ */
+#define Q6INSN(TAG, BEH, ATTRIBS, DESCR, SEM) \
+ do { \
+ fprintf(outfile, "SEMANTICS( \\\n" \
+ " \"%s\", \\\n" \
+ " %s, \\\n" \
+ " \"\"\"%s\"\"\" \\\n" \
+ ")\n", \
+ #TAG, STRINGIZE(BEH), STRINGIZE(SEM)); \
+ fprintf(outfile, "ATTRIBUTES( \\\n" \
+ " \"%s\", \\\n" \
+ " \"%s\" \\\n" \
+ ")\n", \
+ #TAG, STRINGIZE(ATTRIBS)); \
+ } while (0);
+#define EXTINSN(TAG, BEH, ATTRIBS, DESCR, SEM) \
+ do { \
+ fprintf(outfile, "SEMANTICS( \\\n" \
+ " \"%s\", \\\n" \
+ " %s, \\\n" \
+ " \"\"\"%s\"\"\" \\\n" \
+ ")\n", \
+ #TAG, STRINGIZE(BEH), STRINGIZE(SEM)); \
+ fprintf(outfile, "ATTRIBUTES( \\\n" \
+ " \"%s\", \\\n" \
+ " \"%s\" \\\n" \
+ ")\n", \
+ #TAG, STRINGIZE(ATTRIBS)); \
+ } while (0);
+#include "imported/allidefs.def"
+#undef Q6INSN
+#undef EXTINSN
+
+/*
+ * Process the macro definitions
+ * Macros definitions have the following form
+ * DEF_MACRO(
+ * fLSBNEW0,
+ * predlog_read(thread,0),
+ * ()
+ * )
+ * The important part here is the attributes. Whenever an instruction
+ * invokes a macro, we add the macro's attributes to the instruction.
+ */
+#define DEF_MACRO(MNAME, BEH, ATTRS) \
+ fprintf(outfile, "MACROATTRIB( \\\n" \
+ " \"%s\", \\\n" \
+ " \"\"\"%s\"\"\", \\\n" \
+ " \"%s\" \\\n" \
+ ")\n", \
+ #MNAME, STRINGIZE(BEH), STRINGIZE(ATTRS));
+#include "imported/macros.def"
+#undef DEF_MACRO
+
+/*
+ * Process the macros for HVX
+ */
+#define DEF_MACRO(MNAME, BEH, ATTRS) \
+ fprintf(outfile, "MACROATTRIB( \\\n" \
+ " \"%s\", \\\n" \
+ " \"\"\"%s\"\"\", \\\n" \
+ " \"%s\" \\\n" \
+ ")\n", \
+ #MNAME, STRINGIZE(BEH), STRINGIZE(ATTRS));
+#include "imported/allext_macros.def"
+#undef DEF_MACRO
+
+ fclose(outfile);
+ return 0;
+}
diff --git a/target/hexagon/gen_shortcode.py b/target/hexagon/gen_shortcode.py
new file mode 100755
index 000000000..9b589d018
--- /dev/null
+++ b/target/hexagon/gen_shortcode.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+def gen_shortcode(f, tag):
+ f.write('DEF_SHORTCODE(%s, %s)\n' % (tag, hex_common.semdict[tag]))
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.calculate_attribs()
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[3], 'w') as f:
+ f.write("#ifndef DEF_SHORTCODE\n")
+ f.write("#define DEF_SHORTCODE(TAG,SHORTCODE) /* Nothing */\n")
+ f.write("#endif\n")
+
+ for tag in hex_common.tags:
+ ## Skip the priv instructions
+ if ( "A_PRIV" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the guest instructions
+ if ( "A_GUEST" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the diag instructions
+ if ( tag == "Y6_diag" ) :
+ continue
+ if ( tag == "Y6_diag0" ) :
+ continue
+ if ( tag == "Y6_diag1" ) :
+ continue
+
+ gen_shortcode(f, tag)
+
+ f.write("#undef DEF_SHORTCODE\n")
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h
new file mode 100644
index 000000000..c6f0879b6
--- /dev/null
+++ b/target/hexagon/gen_tcg.h
@@ -0,0 +1,743 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_GEN_TCG_H
+#define HEXAGON_GEN_TCG_H
+
+/*
+ * Here is a primer to understand the tag names for load/store instructions
+ *
+ * Data types
+ * b signed byte r0 = memb(r2+#0)
+ * ub unsigned byte r0 = memub(r2+#0)
+ * h signed half word (16 bits) r0 = memh(r2+#0)
+ * uh unsigned half word r0 = memuh(r2+#0)
+ * i integer (32 bits) r0 = memw(r2+#0)
+ * d double word (64 bits) r1:0 = memd(r2+#0)
+ *
+ * Addressing modes
+ * _io indirect with offset r0 = memw(r1+#4)
+ * _ur absolute with register offset r0 = memw(r1<<#4+##variable)
+ * _rr indirect with register offset r0 = memw(r1+r4<<#2)
+ * gp global pointer relative r0 = memw(gp+#200)
+ * _sp stack pointer relative r0 = memw(r29+#12)
+ * _ap absolute set r0 = memw(r1=##variable)
+ * _pr post increment register r0 = memw(r1++m1)
+ * _pbr post increment bit reverse r0 = memw(r1++m1:brev)
+ * _pi post increment immediate r0 = memb(r1++#1)
+ * _pci post increment circular immediate r0 = memw(r1++#4:circ(m0))
+ * _pcr post increment circular register r0 = memw(r1++I:circ(m0))
+ */
+
+/* Macros for complex addressing modes */
+#define GET_EA_ap \
+ do { \
+ fEA_IMM(UiV); \
+ tcg_gen_movi_tl(ReV, UiV); \
+ } while (0)
+#define GET_EA_pr \
+ do { \
+ fEA_REG(RxV); \
+ fPM_M(RxV, MuV); \
+ } while (0)
+#define GET_EA_pbr \
+ do { \
+ gen_helper_fbrev(EA, RxV); \
+ tcg_gen_add_tl(RxV, RxV, MuV); \
+ } while (0)
+#define GET_EA_pi \
+ do { \
+ fEA_REG(RxV); \
+ fPM_I(RxV, siV); \
+ } while (0)
+#define GET_EA_pci \
+ do { \
+ TCGv tcgv_siV = tcg_constant_tl(siV); \
+ tcg_gen_mov_tl(EA, RxV); \
+ gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \
+ hex_gpr[HEX_REG_CS0 + MuN]); \
+ } while (0)
+#define GET_EA_pcr(SHIFT) \
+ do { \
+ TCGv ireg = tcg_temp_new(); \
+ tcg_gen_mov_tl(EA, RxV); \
+ gen_read_ireg(ireg, MuV, (SHIFT)); \
+ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
+ tcg_temp_free(ireg); \
+ } while (0)
+
+/* Instructions with multiple definitions */
+#define fGEN_TCG_LOAD_AP(RES, SIZE, SIGN) \
+ do { \
+ fMUST_IMMEXT(UiV); \
+ fEA_IMM(UiV); \
+ fLOAD(1, SIZE, SIGN, EA, RES); \
+ tcg_gen_movi_tl(ReV, UiV); \
+ } while (0)
+
+#define fGEN_TCG_L4_loadrub_ap(SHORTCODE) \
+ fGEN_TCG_LOAD_AP(RdV, 1, u)
+#define fGEN_TCG_L4_loadrb_ap(SHORTCODE) \
+ fGEN_TCG_LOAD_AP(RdV, 1, s)
+#define fGEN_TCG_L4_loadruh_ap(SHORTCODE) \
+ fGEN_TCG_LOAD_AP(RdV, 2, u)
+#define fGEN_TCG_L4_loadrh_ap(SHORTCODE) \
+ fGEN_TCG_LOAD_AP(RdV, 2, s)
+#define fGEN_TCG_L4_loadri_ap(SHORTCODE) \
+ fGEN_TCG_LOAD_AP(RdV, 4, u)
+#define fGEN_TCG_L4_loadrd_ap(SHORTCODE) \
+ fGEN_TCG_LOAD_AP(RddV, 8, u)
+
+#define fGEN_TCG_L2_loadrub_pci(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrb_pci(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadruh_pci(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrh_pci(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadri_pci(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrd_pci(SHORTCODE) SHORTCODE
+
+#define fGEN_TCG_LOAD_pcr(SHIFT, LOAD) \
+ do { \
+ TCGv ireg = tcg_temp_new(); \
+ tcg_gen_mov_tl(EA, RxV); \
+ gen_read_ireg(ireg, MuV, SHIFT); \
+ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
+ LOAD; \
+ tcg_temp_free(ireg); \
+ } while (0)
+
+#define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \
+ fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, u, EA, RdV))
+#define fGEN_TCG_L2_loadrb_pcr(SHORTCODE) \
+ fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, s, EA, RdV))
+#define fGEN_TCG_L2_loadruh_pcr(SHORTCODE) \
+ fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, u, EA, RdV))
+#define fGEN_TCG_L2_loadrh_pcr(SHORTCODE) \
+ fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, s, EA, RdV))
+#define fGEN_TCG_L2_loadri_pcr(SHORTCODE) \
+ fGEN_TCG_LOAD_pcr(2, fLOAD(1, 4, u, EA, RdV))
+#define fGEN_TCG_L2_loadrd_pcr(SHORTCODE) \
+ fGEN_TCG_LOAD_pcr(3, fLOAD(1, 8, u, EA, RddV))
+
+#define fGEN_TCG_L2_loadrub_pr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrub_pbr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrub_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrb_pr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrb_pbr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrb_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadruh_pr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadruh_pbr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadruh_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrh_pr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrh_pbr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrh_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadri_pr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadri_pbr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadri_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrd_pr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrd_pbr(SHORTCODE) SHORTCODE
+#define fGEN_TCG_L2_loadrd_pi(SHORTCODE) SHORTCODE
+
+/*
+ * These instructions load 2 bytes and places them in
+ * two halves of the destination register.
+ * The GET_EA macro determines the addressing mode.
+ * The SIGN argument determines whether to zero-extend or
+ * sign-extend.
+ */
+#define fGEN_TCG_loadbXw2(GET_EA, SIGN) \
+ do { \
+ TCGv tmp = tcg_temp_new(); \
+ TCGv byte = tcg_temp_new(); \
+ GET_EA; \
+ fLOAD(1, 2, u, EA, tmp); \
+ tcg_gen_movi_tl(RdV, 0); \
+ for (int i = 0; i < 2; i++) { \
+ gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \
+ } \
+ tcg_temp_free(tmp); \
+ tcg_temp_free(byte); \
+ } while (0)
+
+#define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \
+ fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), false)
+#define fGEN_TCG_L4_loadbzw2_ur(SHORTCODE) \
+ fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), false)
+#define fGEN_TCG_L2_loadbsw2_io(SHORTCODE) \
+ fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), true)
+#define fGEN_TCG_L4_loadbsw2_ur(SHORTCODE) \
+ fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), true)
+#define fGEN_TCG_L4_loadbzw2_ap(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_ap, false)
+#define fGEN_TCG_L2_loadbzw2_pr(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pr, false)
+#define fGEN_TCG_L2_loadbzw2_pbr(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pbr, false)
+#define fGEN_TCG_L2_loadbzw2_pi(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pi, false)
+#define fGEN_TCG_L4_loadbsw2_ap(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_ap, true)
+#define fGEN_TCG_L2_loadbsw2_pr(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pr, true)
+#define fGEN_TCG_L2_loadbsw2_pbr(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pbr, true)
+#define fGEN_TCG_L2_loadbsw2_pi(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pi, true)
+#define fGEN_TCG_L2_loadbzw2_pci(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pci, false)
+#define fGEN_TCG_L2_loadbsw2_pci(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pci, true)
+#define fGEN_TCG_L2_loadbzw2_pcr(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pcr(1), false)
+#define fGEN_TCG_L2_loadbsw2_pcr(SHORTCODE) \
+ fGEN_TCG_loadbXw2(GET_EA_pcr(1), true)
+
+/*
+ * These instructions load 4 bytes and places them in
+ * four halves of the destination register pair.
+ * The GET_EA macro determines the addressing mode.
+ * The SIGN argument determines whether to zero-extend or
+ * sign-extend.
+ */
+#define fGEN_TCG_loadbXw4(GET_EA, SIGN) \
+ do { \
+ TCGv tmp = tcg_temp_new(); \
+ TCGv byte = tcg_temp_new(); \
+ GET_EA; \
+ fLOAD(1, 4, u, EA, tmp); \
+ tcg_gen_movi_i64(RddV, 0); \
+ for (int i = 0; i < 4; i++) { \
+ gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN))); \
+ } \
+ tcg_temp_free(tmp); \
+ tcg_temp_free(byte); \
+ } while (0)
+
+#define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \
+ fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), false)
+#define fGEN_TCG_L4_loadbzw4_ur(SHORTCODE) \
+ fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), false)
+#define fGEN_TCG_L2_loadbsw4_io(SHORTCODE) \
+ fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), true)
+#define fGEN_TCG_L4_loadbsw4_ur(SHORTCODE) \
+ fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), true)
+#define fGEN_TCG_L2_loadbzw4_pci(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pci, false)
+#define fGEN_TCG_L2_loadbsw4_pci(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pci, true)
+#define fGEN_TCG_L2_loadbzw4_pcr(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pcr(2), false)
+#define fGEN_TCG_L2_loadbsw4_pcr(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pcr(2), true)
+#define fGEN_TCG_L4_loadbzw4_ap(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_ap, false)
+#define fGEN_TCG_L2_loadbzw4_pr(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pr, false)
+#define fGEN_TCG_L2_loadbzw4_pbr(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pbr, false)
+#define fGEN_TCG_L2_loadbzw4_pi(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pi, false)
+#define fGEN_TCG_L4_loadbsw4_ap(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_ap, true)
+#define fGEN_TCG_L2_loadbsw4_pr(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pr, true)
+#define fGEN_TCG_L2_loadbsw4_pbr(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pbr, true)
+#define fGEN_TCG_L2_loadbsw4_pi(SHORTCODE) \
+ fGEN_TCG_loadbXw4(GET_EA_pi, true)
+
+/*
+ * These instructions load a half word, shift the destination right by 16 bits
+ * and place the loaded value in the high half word of the destination pair.
+ * The GET_EA macro determines the addressing mode.
+ */
+#define fGEN_TCG_loadalignh(GET_EA) \
+ do { \
+ TCGv tmp = tcg_temp_new(); \
+ TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
+ GET_EA; \
+ fLOAD(1, 2, u, EA, tmp); \
+ tcg_gen_extu_i32_i64(tmp_i64, tmp); \
+ tcg_gen_shri_i64(RyyV, RyyV, 16); \
+ tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \
+ tcg_temp_free(tmp); \
+ tcg_temp_free_i64(tmp_i64); \
+ } while (0)
+
+#define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \
+ fGEN_TCG_loadalignh(fEA_IRs(UiV, RtV, uiV))
+#define fGEN_TCG_L2_loadalignh_io(SHORTCODE) \
+ fGEN_TCG_loadalignh(fEA_RI(RsV, siV))
+#define fGEN_TCG_L2_loadalignh_pci(SHORTCODE) \
+ fGEN_TCG_loadalignh(GET_EA_pci)
+#define fGEN_TCG_L2_loadalignh_pcr(SHORTCODE) \
+ fGEN_TCG_loadalignh(GET_EA_pcr(1))
+#define fGEN_TCG_L4_loadalignh_ap(SHORTCODE) \
+ fGEN_TCG_loadalignh(GET_EA_ap)
+#define fGEN_TCG_L2_loadalignh_pr(SHORTCODE) \
+ fGEN_TCG_loadalignh(GET_EA_pr)
+#define fGEN_TCG_L2_loadalignh_pbr(SHORTCODE) \
+ fGEN_TCG_loadalignh(GET_EA_pbr)
+#define fGEN_TCG_L2_loadalignh_pi(SHORTCODE) \
+ fGEN_TCG_loadalignh(GET_EA_pi)
+
+/* Same as above, but loads a byte instead of half word */
+#define fGEN_TCG_loadalignb(GET_EA) \
+ do { \
+ TCGv tmp = tcg_temp_new(); \
+ TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
+ GET_EA; \
+ fLOAD(1, 1, u, EA, tmp); \
+ tcg_gen_extu_i32_i64(tmp_i64, tmp); \
+ tcg_gen_shri_i64(RyyV, RyyV, 8); \
+ tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \
+ tcg_temp_free(tmp); \
+ tcg_temp_free_i64(tmp_i64); \
+ } while (0)
+
+#define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \
+ fGEN_TCG_loadalignb(fEA_RI(RsV, siV))
+#define fGEN_TCG_L4_loadalignb_ur(SHORTCODE) \
+ fGEN_TCG_loadalignb(fEA_IRs(UiV, RtV, uiV))
+#define fGEN_TCG_L2_loadalignb_pci(SHORTCODE) \
+ fGEN_TCG_loadalignb(GET_EA_pci)
+#define fGEN_TCG_L2_loadalignb_pcr(SHORTCODE) \
+ fGEN_TCG_loadalignb(GET_EA_pcr(0))
+#define fGEN_TCG_L4_loadalignb_ap(SHORTCODE) \
+ fGEN_TCG_loadalignb(GET_EA_ap)
+#define fGEN_TCG_L2_loadalignb_pr(SHORTCODE) \
+ fGEN_TCG_loadalignb(GET_EA_pr)
+#define fGEN_TCG_L2_loadalignb_pbr(SHORTCODE) \
+ fGEN_TCG_loadalignb(GET_EA_pbr)
+#define fGEN_TCG_L2_loadalignb_pi(SHORTCODE) \
+ fGEN_TCG_loadalignb(GET_EA_pi)
+
+/*
+ * Predicated loads
+ * Here is a primer to understand the tag names
+ *
+ * Predicate used
+ * t true "old" value if (p0) r0 = memb(r2+#0)
+ * f false "old" value if (!p0) r0 = memb(r2+#0)
+ * tnew true "new" value if (p0.new) r0 = memb(r2+#0)
+ * fnew false "new" value if (!p0.new) r0 = memb(r2+#0)
+ */
+#define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \
+ do { \
+ TCGv LSB = tcg_temp_local_new(); \
+ TCGLabel *label = gen_new_label(); \
+ GET_EA; \
+ PRED; \
+ PRED_LOAD_CANCEL(LSB, EA); \
+ tcg_gen_movi_tl(RdV, 0); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
+ fLOAD(1, SIZE, SIGN, EA, RdV); \
+ gen_set_label(label); \
+ tcg_temp_free(LSB); \
+ } while (0)
+
+#define fGEN_TCG_L2_ploadrubt_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, u)
+#define fGEN_TCG_L2_ploadrubf_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, u)
+#define fGEN_TCG_L2_ploadrubtnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, u)
+#define fGEN_TCG_L2_ploadrubfnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 1, u)
+#define fGEN_TCG_L2_ploadrbt_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, s)
+#define fGEN_TCG_L2_ploadrbf_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, s)
+#define fGEN_TCG_L2_ploadrbtnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, s)
+#define fGEN_TCG_L2_ploadrbfnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD({ fEA_REG(RxV); fPM_I(RxV, siV); }, \
+ fLSBNEWNOT(PtN), 1, s)
+
+#define fGEN_TCG_L2_ploadruht_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, u)
+#define fGEN_TCG_L2_ploadruhf_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, u)
+#define fGEN_TCG_L2_ploadruhtnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, u)
+#define fGEN_TCG_L2_ploadruhfnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, u)
+#define fGEN_TCG_L2_ploadrht_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, s)
+#define fGEN_TCG_L2_ploadrhf_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, s)
+#define fGEN_TCG_L2_ploadrhtnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, s)
+#define fGEN_TCG_L2_ploadrhfnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, s)
+
+#define fGEN_TCG_L2_ploadrit_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 4, u)
+#define fGEN_TCG_L2_ploadrif_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 4, u)
+#define fGEN_TCG_L2_ploadritnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 4, u)
+#define fGEN_TCG_L2_ploadrifnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 4, u)
+
+/* Predicated loads into a register pair */
+#define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \
+ do { \
+ TCGv LSB = tcg_temp_local_new(); \
+ TCGLabel *label = gen_new_label(); \
+ GET_EA; \
+ PRED; \
+ PRED_LOAD_CANCEL(LSB, EA); \
+ tcg_gen_movi_i64(RddV, 0); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
+ fLOAD(1, 8, u, EA, RddV); \
+ gen_set_label(label); \
+ tcg_temp_free(LSB); \
+ } while (0)
+
+#define fGEN_TCG_L2_ploadrdt_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLD(PtV))
+#define fGEN_TCG_L2_ploadrdf_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLDNOT(PtV))
+#define fGEN_TCG_L2_ploadrdtnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEW(PtN))
+#define fGEN_TCG_L2_ploadrdfnew_pi(SHORTCODE) \
+ fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEWNOT(PtN))
+
+/* load-locked and store-locked */
+#define fGEN_TCG_L2_loadw_locked(SHORTCODE) \
+ SHORTCODE
+#define fGEN_TCG_L4_loadd_locked(SHORTCODE) \
+ SHORTCODE
+#define fGEN_TCG_S2_storew_locked(SHORTCODE) \
+ SHORTCODE
+#define fGEN_TCG_S4_stored_locked(SHORTCODE) \
+ SHORTCODE
+
+#define fGEN_TCG_STORE(SHORTCODE) \
+ do { \
+ TCGv HALF = tcg_temp_new(); \
+ TCGv BYTE = tcg_temp_new(); \
+ SHORTCODE; \
+ tcg_temp_free(HALF); \
+ tcg_temp_free(BYTE); \
+ } while (0)
+
+#define fGEN_TCG_STORE_pcr(SHIFT, STORE) \
+ do { \
+ TCGv ireg = tcg_temp_new(); \
+ TCGv HALF = tcg_temp_new(); \
+ TCGv BYTE = tcg_temp_new(); \
+ tcg_gen_mov_tl(EA, RxV); \
+ gen_read_ireg(ireg, MuV, SHIFT); \
+ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
+ STORE; \
+ tcg_temp_free(ireg); \
+ tcg_temp_free(HALF); \
+ tcg_temp_free(BYTE); \
+ } while (0)
+
+#define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerb_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerb_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, RtV)))
+
+#define fGEN_TCG_S2_storerh_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerh_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerh_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, RtV)))
+
+#define fGEN_TCG_S2_storerf_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerf_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerf_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(1, RtV)))
+
+#define fGEN_TCG_S2_storeri_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storeri_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storeri_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, RtV))
+
+#define fGEN_TCG_S2_storerd_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerd_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerd_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(3, fSTORE(1, 8, EA, RttV))
+
+#define fGEN_TCG_S2_storerbnew_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerbnew_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerbnew_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, NtN)))
+
+#define fGEN_TCG_S2_storerhnew_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerhnew_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerhnew_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, NtN)))
+
+#define fGEN_TCG_S2_storerinew_pbr(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerinew_pci(SHORTCODE) \
+ fGEN_TCG_STORE(SHORTCODE)
+#define fGEN_TCG_S2_storerinew_pcr(SHORTCODE) \
+ fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN))
+
+/*
+ * Mathematical operations with more than one definition require
+ * special handling
+ */
+#define fGEN_TCG_A5_ACS(SHORTCODE) \
+ do { \
+ gen_helper_vacsh_pred(PeV, cpu_env, RxxV, RssV, RttV); \
+ gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV); \
+ } while (0)
+
+/*
+ * Approximate reciprocal
+ * r3,p1 = sfrecipa(r0, r1)
+ *
+ * The helper packs the 2 32-bit results into a 64-bit value,
+ * so unpack them into the proper results.
+ */
+#define fGEN_TCG_F2_sfrecipa(SHORTCODE) \
+ do { \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV); \
+ tcg_gen_extrh_i64_i32(RdV, tmp); \
+ tcg_gen_extrl_i64_i32(PeV, tmp); \
+ tcg_temp_free_i64(tmp); \
+ } while (0)
+
+/*
+ * Approximation of the reciprocal square root
+ * r1,p0 = sfinvsqrta(r0)
+ *
+ * The helper packs the 2 32-bit results into a 64-bit value,
+ * so unpack them into the proper results.
+ */
+#define fGEN_TCG_F2_sfinvsqrta(SHORTCODE) \
+ do { \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \
+ tcg_gen_extrh_i64_i32(RdV, tmp); \
+ tcg_gen_extrl_i64_i32(PeV, tmp); \
+ tcg_temp_free_i64(tmp); \
+ } while (0)
+
+/*
+ * Add or subtract with carry.
+ * Predicate register is used as an extra input and output.
+ * r5:4 = add(r1:0, r3:2, p1):carry
+ */
+#define fGEN_TCG_A4_addp_c(SHORTCODE) \
+ do { \
+ TCGv_i64 carry = tcg_temp_new_i64(); \
+ TCGv_i64 zero = tcg_constant_i64(0); \
+ tcg_gen_extu_i32_i64(carry, PxV); \
+ tcg_gen_andi_i64(carry, carry, 1); \
+ tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
+ tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \
+ tcg_gen_extrl_i64_i32(PxV, carry); \
+ gen_8bitsof(PxV, PxV); \
+ tcg_temp_free_i64(carry); \
+ } while (0)
+
+/* r5:4 = sub(r1:0, r3:2, p1):carry */
+#define fGEN_TCG_A4_subp_c(SHORTCODE) \
+ do { \
+ TCGv_i64 carry = tcg_temp_new_i64(); \
+ TCGv_i64 zero = tcg_constant_i64(0); \
+ TCGv_i64 not_RttV = tcg_temp_new_i64(); \
+ tcg_gen_extu_i32_i64(carry, PxV); \
+ tcg_gen_andi_i64(carry, carry, 1); \
+ tcg_gen_not_i64(not_RttV, RttV); \
+ tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
+ tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \
+ tcg_gen_extrl_i64_i32(PxV, carry); \
+ gen_8bitsof(PxV, PxV); \
+ tcg_temp_free_i64(carry); \
+ tcg_temp_free_i64(not_RttV); \
+ } while (0)
+
+/*
+ * Compare each of the 8 unsigned bytes
+ * The minimum is placed in each byte of the destination.
+ * Each bit of the predicate is set true if the bit from the first operand
+ * is greater than the bit from the second operand.
+ * r5:4,p1 = vminub(r1:0, r3:2)
+ */
+#define fGEN_TCG_A6_vminub_RdP(SHORTCODE) \
+ do { \
+ TCGv left = tcg_temp_new(); \
+ TCGv right = tcg_temp_new(); \
+ TCGv tmp = tcg_temp_new(); \
+ tcg_gen_movi_tl(PeV, 0); \
+ tcg_gen_movi_i64(RddV, 0); \
+ for (int i = 0; i < 8; i++) { \
+ gen_get_byte_i64(left, i, RttV, false); \
+ gen_get_byte_i64(right, i, RssV, false); \
+ tcg_gen_setcond_tl(TCG_COND_GT, tmp, left, right); \
+ tcg_gen_deposit_tl(PeV, PeV, tmp, i, 1); \
+ tcg_gen_umin_tl(tmp, left, right); \
+ gen_set_byte_i64(i, RddV, tmp); \
+ } \
+ tcg_temp_free(left); \
+ tcg_temp_free(right); \
+ tcg_temp_free(tmp); \
+ } while (0)
+
+/* Floating point */
+#define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \
+ gen_helper_conv_sf2df(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_df2sf(SHORTCODE) \
+ gen_helper_conv_df2sf(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_uw2sf(SHORTCODE) \
+ gen_helper_conv_uw2sf(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_uw2df(SHORTCODE) \
+ gen_helper_conv_uw2df(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_w2sf(SHORTCODE) \
+ gen_helper_conv_w2sf(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_w2df(SHORTCODE) \
+ gen_helper_conv_w2df(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_ud2sf(SHORTCODE) \
+ gen_helper_conv_ud2sf(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_ud2df(SHORTCODE) \
+ gen_helper_conv_ud2df(RddV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_d2sf(SHORTCODE) \
+ gen_helper_conv_d2sf(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_d2df(SHORTCODE) \
+ gen_helper_conv_d2df(RddV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_sf2uw(SHORTCODE) \
+ gen_helper_conv_sf2uw(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_sf2w(SHORTCODE) \
+ gen_helper_conv_sf2w(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_sf2ud(SHORTCODE) \
+ gen_helper_conv_sf2ud(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_sf2d(SHORTCODE) \
+ gen_helper_conv_sf2d(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_df2uw(SHORTCODE) \
+ gen_helper_conv_df2uw(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_df2w(SHORTCODE) \
+ gen_helper_conv_df2w(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_df2ud(SHORTCODE) \
+ gen_helper_conv_df2ud(RddV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_df2d(SHORTCODE) \
+ gen_helper_conv_df2d(RddV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_sf2uw_chop(SHORTCODE) \
+ gen_helper_conv_sf2uw_chop(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_sf2w_chop(SHORTCODE) \
+ gen_helper_conv_sf2w_chop(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_sf2ud_chop(SHORTCODE) \
+ gen_helper_conv_sf2ud_chop(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_sf2d_chop(SHORTCODE) \
+ gen_helper_conv_sf2d_chop(RddV, cpu_env, RsV)
+#define fGEN_TCG_F2_conv_df2uw_chop(SHORTCODE) \
+ gen_helper_conv_df2uw_chop(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_df2w_chop(SHORTCODE) \
+ gen_helper_conv_df2w_chop(RdV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_df2ud_chop(SHORTCODE) \
+ gen_helper_conv_df2ud_chop(RddV, cpu_env, RssV)
+#define fGEN_TCG_F2_conv_df2d_chop(SHORTCODE) \
+ gen_helper_conv_df2d_chop(RddV, cpu_env, RssV)
+#define fGEN_TCG_F2_sfadd(SHORTCODE) \
+ gen_helper_sfadd(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfsub(SHORTCODE) \
+ gen_helper_sfsub(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfcmpeq(SHORTCODE) \
+ gen_helper_sfcmpeq(PdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfcmpgt(SHORTCODE) \
+ gen_helper_sfcmpgt(PdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfcmpge(SHORTCODE) \
+ gen_helper_sfcmpge(PdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfcmpuo(SHORTCODE) \
+ gen_helper_sfcmpuo(PdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfmax(SHORTCODE) \
+ gen_helper_sfmax(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfmin(SHORTCODE) \
+ gen_helper_sfmin(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sfclass(SHORTCODE) \
+ do { \
+ TCGv imm = tcg_constant_tl(uiV); \
+ gen_helper_sfclass(PdV, cpu_env, RsV, imm); \
+ } while (0)
+#define fGEN_TCG_F2_sffixupn(SHORTCODE) \
+ gen_helper_sffixupn(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sffixupd(SHORTCODE) \
+ gen_helper_sffixupd(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sffixupr(SHORTCODE) \
+ gen_helper_sffixupr(RdV, cpu_env, RsV)
+#define fGEN_TCG_F2_dfadd(SHORTCODE) \
+ gen_helper_dfadd(RddV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfsub(SHORTCODE) \
+ gen_helper_dfsub(RddV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfmax(SHORTCODE) \
+ gen_helper_dfmax(RddV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfmin(SHORTCODE) \
+ gen_helper_dfmin(RddV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfcmpeq(SHORTCODE) \
+ gen_helper_dfcmpeq(PdV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfcmpgt(SHORTCODE) \
+ gen_helper_dfcmpgt(PdV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfcmpge(SHORTCODE) \
+ gen_helper_dfcmpge(PdV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfcmpuo(SHORTCODE) \
+ gen_helper_dfcmpuo(PdV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfclass(SHORTCODE) \
+ do { \
+ TCGv imm = tcg_constant_tl(uiV); \
+ gen_helper_dfclass(PdV, cpu_env, RssV, imm); \
+ } while (0)
+#define fGEN_TCG_F2_sfmpy(SHORTCODE) \
+ gen_helper_sfmpy(RdV, cpu_env, RsV, RtV)
+#define fGEN_TCG_F2_sffma(SHORTCODE) \
+ gen_helper_sffma(RxV, cpu_env, RxV, RsV, RtV)
+#define fGEN_TCG_F2_sffma_sc(SHORTCODE) \
+ gen_helper_sffma_sc(RxV, cpu_env, RxV, RsV, RtV, PuV)
+#define fGEN_TCG_F2_sffms(SHORTCODE) \
+ gen_helper_sffms(RxV, cpu_env, RxV, RsV, RtV)
+#define fGEN_TCG_F2_sffma_lib(SHORTCODE) \
+ gen_helper_sffma_lib(RxV, cpu_env, RxV, RsV, RtV)
+#define fGEN_TCG_F2_sffms_lib(SHORTCODE) \
+ gen_helper_sffms_lib(RxV, cpu_env, RxV, RsV, RtV)
+
+#define fGEN_TCG_F2_dfmpyfix(SHORTCODE) \
+ gen_helper_dfmpyfix(RddV, cpu_env, RssV, RttV)
+#define fGEN_TCG_F2_dfmpyhh(SHORTCODE) \
+ gen_helper_dfmpyhh(RxxV, cpu_env, RxxV, RssV, RttV)
+
+/* Nothing to do for these in qemu, need to suppress compiler warnings */
+#define fGEN_TCG_Y4_l2fetch(SHORTCODE) \
+ do { \
+ RsV = RsV; \
+ RtV = RtV; \
+ } while (0)
+#define fGEN_TCG_Y5_l2fetch(SHORTCODE) \
+ do { \
+ RsV = RsV; \
+ } while (0)
+
+#endif
diff --git a/target/hexagon/gen_tcg_func_table.py b/target/hexagon/gen_tcg_func_table.py
new file mode 100755
index 000000000..4809d3273
--- /dev/null
+++ b/target/hexagon/gen_tcg_func_table.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.calculate_attribs()
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[3], 'w') as f:
+ f.write("#ifndef HEXAGON_FUNC_TABLE_H\n")
+ f.write("#define HEXAGON_FUNC_TABLE_H\n\n")
+
+ f.write("const SemanticInsn opcode_genptr[XX_LAST_OPCODE] = {\n")
+ for tag in hex_common.tags:
+ ## Skip the priv instructions
+ if ( "A_PRIV" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the guest instructions
+ if ( "A_GUEST" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the diag instructions
+ if ( tag == "Y6_diag" ) :
+ continue
+ if ( tag == "Y6_diag0" ) :
+ continue
+ if ( tag == "Y6_diag1" ) :
+ continue
+
+ f.write(" [%s] = generate_%s,\n" % (tag, tag))
+ f.write("};\n\n")
+
+ f.write("#endif /* HEXAGON_FUNC_TABLE_H */\n")
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py
new file mode 100755
index 000000000..1fd9de95d
--- /dev/null
+++ b/target/hexagon/gen_tcg_funcs.py
@@ -0,0 +1,707 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+import hex_common
+
+##
+## Helpers for gen_tcg_func
+##
+def gen_decl_ea_tcg(f, tag):
+ if ('A_CONDEXEC' in hex_common.attribdict[tag] or
+ 'A_LOAD' in hex_common.attribdict[tag]):
+ f.write(" TCGv EA = tcg_temp_local_new();\n")
+ else:
+ f.write(" TCGv EA = tcg_temp_new();\n")
+
+def gen_free_ea_tcg(f):
+ f.write(" tcg_temp_free(EA);\n")
+
+def genptr_decl_pair_writable(f, tag, regtype, regid, regno):
+ regN="%s%sN" % (regtype,regid)
+ f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \
+ (regtype, regid))
+ if (regtype == "C"):
+ f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ else:
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN)
+ f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \
+ (regN, regN))
+ f.write(" }\n")
+ f.write(" if (!is_preloaded(ctx, %s + 1)) {\n" % regN)
+ f.write(" tcg_gen_mov_tl(hex_new_value[%s + 1], hex_gpr[%s + 1]);\n" % \
+ (regN, regN))
+ f.write(" }\n")
+
+def genptr_decl_writable(f, tag, regtype, regid, regno):
+ regN="%s%sN" % (regtype,regid)
+ f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \
+ (regtype, regid))
+ if (regtype == "C"):
+ f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ else:
+ f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ f.write(" if (!is_preloaded(ctx, %s)) {\n" % regN)
+ f.write(" tcg_gen_mov_tl(hex_new_value[%s], hex_gpr[%s]);\n" % \
+ (regN, regN))
+ f.write(" }\n")
+
+def genptr_decl(f, tag, regtype, regid, regno):
+ regN="%s%sN" % (regtype,regid)
+ if (regtype == "R"):
+ if (regid in {"ss", "tt"}):
+ f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \
+ (regtype, regid))
+ f.write(" const int %s = insn->regno[%d];\n" % \
+ (regN, regno))
+ elif (regid in {"dd", "ee", "xx", "yy"}):
+ genptr_decl_pair_writable(f, tag, regtype, regid, regno)
+ elif (regid in {"s", "t", "u", "v"}):
+ f.write(" TCGv %s%sV = hex_gpr[insn->regno[%d]];\n" % \
+ (regtype, regid, regno))
+ elif (regid in {"d", "e", "x", "y"}):
+ genptr_decl_writable(f, tag, regtype, regid, regno)
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid in {"s", "t", "u", "v"}):
+ f.write(" TCGv %s%sV = hex_pred[insn->regno[%d]];\n" % \
+ (regtype, regid, regno))
+ elif (regid in {"d", "e", "x"}):
+ genptr_decl_writable(f, tag, regtype, regid, regno)
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "C"):
+ if (regid == "ss"):
+ f.write(" TCGv_i64 %s%sV = tcg_temp_local_new_i64();\n" % \
+ (regtype, regid))
+ f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regN, regno))
+ elif (regid == "dd"):
+ genptr_decl_pair_writable(f, tag, regtype, regid, regno)
+ elif (regid == "s"):
+ f.write(" TCGv %s%sV = tcg_temp_local_new();\n" % \
+ (regtype, regid))
+ f.write(" const int %s%sN = insn->regno[%d] + HEX_REG_SA0;\n" % \
+ (regtype, regid, regno))
+ elif (regid == "d"):
+ genptr_decl_writable(f, tag, regtype, regid, regno)
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "M"):
+ if (regid == "u"):
+ f.write(" const int %s%sN = insn->regno[%d];\n"% \
+ (regtype, regid, regno))
+ f.write(" TCGv %s%sV = hex_gpr[%s%sN + HEX_REG_M0];\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "V"):
+ if (regid in {"dd"}):
+ f.write(" const int %s%sN = insn->regno[%d];\n" %\
+ (regtype, regid, regno))
+ f.write(" const intptr_t %s%sV_off =\n" %\
+ (regtype, regid))
+ if (hex_common.is_tmp_result(tag)):
+ f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 2, true);\n" % \
+ (regtype, regid))
+ else:
+ f.write(" ctx_future_vreg_off(ctx, %s%sN," % \
+ (regtype, regid))
+ f.write(" 2, true);\n")
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
+ (regtype, regid))
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid in {"uu", "vv", "xx"}):
+ f.write(" const int %s%sN = insn->regno[%d];\n" % \
+ (regtype, regid, regno))
+ f.write(" const intptr_t %s%sV_off =\n" % \
+ (regtype, regid))
+ f.write(" offsetof(CPUHexagonState, %s%sV);\n" % \
+ (regtype, regid))
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
+ (regtype, regid))
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid in {"s", "u", "v", "w"}):
+ f.write(" const int %s%sN = insn->regno[%d];\n" % \
+ (regtype, regid, regno))
+ f.write(" const intptr_t %s%sV_off =\n" % \
+ (regtype, regid))
+ f.write(" vreg_src_off(ctx, %s%sN);\n" % \
+ (regtype, regid))
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
+ (regtype, regid))
+ elif (regid in {"d", "x", "y"}):
+ f.write(" const int %s%sN = insn->regno[%d];\n" % \
+ (regtype, regid, regno))
+ f.write(" const intptr_t %s%sV_off =\n" % \
+ (regtype, regid))
+ if (hex_common.is_tmp_result(tag)):
+ f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 1, true);\n" % \
+ (regtype, regid))
+ else:
+ f.write(" ctx_future_vreg_off(ctx, %s%sN," % \
+ (regtype, regid))
+ f.write(" 1, true);\n");
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
+ (regtype, regid))
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "Q"):
+ if (regid in {"d", "e", "x"}):
+ f.write(" const int %s%sN = insn->regno[%d];\n" % \
+ (regtype, regid, regno))
+ f.write(" const intptr_t %s%sV_off =\n" % \
+ (regtype, regid))
+ f.write(" offsetof(CPUHexagonState,\n")
+ f.write(" future_QRegs[%s%sN]);\n" % \
+ (regtype, regid))
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
+ (regtype, regid))
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid in {"s", "t", "u", "v"}):
+ f.write(" const int %s%sN = insn->regno[%d];\n" % \
+ (regtype, regid, regno))
+ f.write(" const intptr_t %s%sV_off =\n" %\
+ (regtype, regid))
+ f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]);\n" % \
+ (regtype, regid))
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
+ (regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_decl_new(f, tag, regtype, regid, regno):
+ if (regtype == "N"):
+ if (regid in {"s", "t"}):
+ f.write(" TCGv %s%sN = hex_new_value[insn->regno[%d]];\n" % \
+ (regtype, regid, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid in {"t", "u", "v"}):
+ f.write(" TCGv %s%sN = hex_new_pred_value[insn->regno[%d]];\n" % \
+ (regtype, regid, regno))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "O"):
+ if (regid == "s"):
+ f.write(" const intptr_t %s%sN_num = insn->regno[%d];\n" % \
+ (regtype, regid, regno))
+ if (hex_common.skip_qemu_helper(tag)):
+ f.write(" const intptr_t %s%sN_off =\n" % \
+ (regtype, regid))
+ f.write(" ctx_future_vreg_off(ctx, %s%sN_num," % \
+ (regtype, regid))
+ f.write(" 1, true);\n")
+ else:
+ f.write(" TCGv %s%sN = tcg_constant_tl(%s%sN_num);\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i):
+ if (hex_common.is_pair(regid)):
+ genptr_decl(f, tag, regtype, regid, i)
+ elif (hex_common.is_single(regid)):
+ if hex_common.is_old_val(regtype, regid, tag):
+ genptr_decl(f,tag, regtype, regid, i)
+ elif hex_common.is_new_val(regtype, regid, tag):
+ genptr_decl_new(f, tag, regtype, regid, i)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+def genptr_decl_imm(f,immlett):
+ if (immlett.isupper()):
+ i = 1
+ else:
+ i = 0
+ f.write(" int %s = insn->immed[%d];\n" % \
+ (hex_common.imm_name(immlett), i))
+
+def genptr_free(f, tag, regtype, regid, regno):
+ if (regtype == "R"):
+ if (regid in {"dd", "ss", "tt", "xx", "yy"}):
+ f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid))
+ elif (regid in {"d", "e", "x", "y"}):
+ f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid))
+ elif (regid not in {"s", "t", "u", "v"}):
+ print("Bad register parse: ",regtype,regid)
+ elif (regtype == "P"):
+ if (regid in {"d", "e", "x"}):
+ f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid))
+ elif (regid not in {"s", "t", "u", "v"}):
+ print("Bad register parse: ",regtype,regid)
+ elif (regtype == "C"):
+ if (regid in {"dd", "ss"}):
+ f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid))
+ elif (regid in {"d", "s"}):
+ f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid))
+ else:
+ print("Bad register parse: ",regtype,regid)
+ elif (regtype == "M"):
+ if (regid != "u"):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "V"):
+ if (regid in {"dd", "uu", "vv", "xx", \
+ "d", "s", "u", "v", "w", "x", "y"}):
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" tcg_temp_free_ptr(%s%sV);\n" % \
+ (regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "Q"):
+ if (regid in {"d", "e", "s", "t", "u", "v", "x"}):
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" tcg_temp_free_ptr(%s%sV);\n" % \
+ (regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_free_new(f, tag, regtype, regid, regno):
+ if (regtype == "N"):
+ if (regid not in {"s", "t"}):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid not in {"t", "u", "v"}):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "O"):
+ if (regid != "s"):
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_free_opn(f,regtype,regid,i,tag):
+ if (hex_common.is_pair(regid)):
+ genptr_free(f, tag, regtype, regid, i)
+ elif (hex_common.is_single(regid)):
+ if hex_common.is_old_val(regtype, regid, tag):
+ genptr_free(f, tag, regtype, regid, i)
+ elif hex_common.is_new_val(regtype, regid, tag):
+ genptr_free_new(f, tag, regtype, regid, i)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+def genptr_src_read(f, tag, regtype, regid):
+ if (regtype == "R"):
+ if (regid in {"ss", "tt", "xx", "yy"}):
+ f.write(" tcg_gen_concat_i32_i64(%s%sV, hex_gpr[%s%sN],\n" % \
+ (regtype, regid, regtype, regid))
+ f.write(" hex_gpr[%s%sN + 1]);\n" % \
+ (regtype, regid))
+ elif (regid in {"x", "y"}):
+ f.write(" tcg_gen_mov_tl(%s%sV, hex_gpr[%s%sN]);\n" % \
+ (regtype,regid,regtype,regid))
+ elif (regid not in {"s", "t", "u", "v"}):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid == "x"):
+ f.write(" tcg_gen_mov_tl(%s%sV, hex_pred[%s%sN]);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid not in {"s", "t", "u", "v"}):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "C"):
+ if (regid == "ss"):
+ f.write(" gen_read_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid == "s"):
+ f.write(" gen_read_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "M"):
+ if (regid != "u"):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "V"):
+ if (regid in {"uu", "vv", "xx"}):
+ f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
+ (regtype, regid))
+ f.write(" vreg_src_off(ctx, %s%sN),\n" % \
+ (regtype, regid))
+ f.write(" sizeof(MMVector), sizeof(MMVector));\n")
+ f.write(" tcg_gen_gvec_mov(MO_64,\n")
+ f.write(" %s%sV_off + sizeof(MMVector),\n" % \
+ (regtype, regid))
+ f.write(" vreg_src_off(ctx, %s%sN ^ 1),\n" % \
+ (regtype, regid))
+ f.write(" sizeof(MMVector), sizeof(MMVector));\n")
+ elif (regid in {"s", "u", "v", "w"}):
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid in {"x", "y"}):
+ f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
+ (regtype, regid))
+ f.write(" vreg_src_off(ctx, %s%sN),\n" % \
+ (regtype, regid))
+ f.write(" sizeof(MMVector), sizeof(MMVector));\n")
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "Q"):
+ if (regid in {"s", "t", "u", "v"}):
+ if (not hex_common.skip_qemu_helper(tag)):
+ f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid in {"x"}):
+ f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
+ (regtype, regid))
+ f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]),\n" % \
+ (regtype, regid))
+ f.write(" sizeof(MMQReg), sizeof(MMQReg));\n")
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_src_read_new(f,regtype,regid):
+ if (regtype == "N"):
+ if (regid not in {"s", "t"}):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid not in {"t", "u", "v"}):
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "O"):
+ if (regid != "s"):
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_src_read_opn(f,regtype,regid,tag):
+ if (hex_common.is_pair(regid)):
+ genptr_src_read(f, tag, regtype, regid)
+ elif (hex_common.is_single(regid)):
+ if hex_common.is_old_val(regtype, regid, tag):
+ genptr_src_read(f, tag, regtype, regid)
+ elif hex_common.is_new_val(regtype, regid, tag):
+ genptr_src_read_new(f,regtype,regid)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+def gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i):
+ if (i > 0): f.write(", ")
+ if (hex_common.is_pair(regid)):
+ f.write("%s%sV" % (regtype,regid))
+ elif (hex_common.is_single(regid)):
+ if hex_common.is_old_val(regtype, regid, tag):
+ f.write("%s%sV" % (regtype,regid))
+ elif hex_common.is_new_val(regtype, regid, tag):
+ f.write("%s%sN" % (regtype,regid))
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+def gen_helper_decl_imm(f,immlett):
+ f.write(" TCGv tcgv_%s = tcg_constant_tl(%s);\n" % \
+ (hex_common.imm_name(immlett), hex_common.imm_name(immlett)))
+
+def gen_helper_call_imm(f,immlett):
+ f.write(", tcgv_%s" % hex_common.imm_name(immlett))
+
+def genptr_dst_write_pair(f, tag, regtype, regid):
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ f.write(" gen_log_predicated_reg_write_pair(%s%sN, %s%sV, insn->slot);\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ f.write(" gen_log_reg_write_pair(%s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ f.write(" ctx_log_reg_write_pair(ctx, %s%sN);\n" % \
+ (regtype, regid))
+
+def genptr_dst_write(f, tag, regtype, regid):
+ if (regtype == "R"):
+ if (regid in {"dd", "xx", "yy"}):
+ genptr_dst_write_pair(f, tag, regtype, regid)
+ elif (regid in {"d", "e", "x", "y"}):
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ f.write(" gen_log_predicated_reg_write(%s%sN, %s%sV,\n" % \
+ (regtype, regid, regtype, regid))
+ f.write(" insn->slot);\n")
+ else:
+ f.write(" gen_log_reg_write(%s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ f.write(" ctx_log_reg_write(ctx, %s%sN);\n" % \
+ (regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "P"):
+ if (regid in {"d", "e", "x"}):
+ f.write(" gen_log_pred_write(ctx, %s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ f.write(" ctx_log_pred_write(ctx, %s%sN);\n" % \
+ (regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "C"):
+ if (regid == "dd"):
+ f.write(" gen_write_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ elif (regid == "d"):
+ f.write(" gen_write_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \
+ (regtype, regid, regtype, regid))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"):
+ if (regtype == "V"):
+ if (regid in {"dd", "xx", "yy"}):
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ is_predicated = "true"
+ else:
+ is_predicated = "false"
+ f.write(" gen_log_vreg_write_pair(ctx, %s%sV_off, %s%sN, " % \
+ (regtype, regid, regtype, regid))
+ f.write("%s, insn->slot, %s);\n" % \
+ (newv, is_predicated))
+ f.write(" ctx_log_vreg_write_pair(ctx, %s%sN, %s,\n" % \
+ (regtype, regid, newv))
+ f.write(" %s);\n" % (is_predicated))
+ elif (regid in {"d", "x", "y"}):
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ is_predicated = "true"
+ else:
+ is_predicated = "false"
+ f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s, " % \
+ (regtype, regid, regtype, regid, newv))
+ f.write("insn->slot, %s);\n" % \
+ (is_predicated))
+ f.write(" ctx_log_vreg_write(ctx, %s%sN, %s, %s);\n" % \
+ (regtype, regid, newv, is_predicated))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ elif (regtype == "Q"):
+ if (regid in {"d", "e", "x"}):
+ if ('A_CONDEXEC' in hex_common.attribdict[tag]):
+ is_predicated = "true"
+ else:
+ is_predicated = "false"
+ f.write(" gen_log_qreg_write(%s%sV_off, %s%sN, %s, " % \
+ (regtype, regid, regtype, regid, newv))
+ f.write("insn->slot, %s);\n" % (is_predicated))
+ f.write(" ctx_log_qreg_write(ctx, %s%sN, %s);\n" % \
+ (regtype, regid, is_predicated))
+ else:
+ print("Bad register parse: ", regtype, regid)
+ else:
+ print("Bad register parse: ", regtype, regid)
+
+def genptr_dst_write_opn(f,regtype, regid, tag):
+ if (hex_common.is_pair(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ if (hex_common.is_tmp_result(tag)):
+ genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP")
+ else:
+ genptr_dst_write_ext(f, tag, regtype, regid)
+ else:
+ genptr_dst_write(f, tag, regtype, regid)
+ elif (hex_common.is_single(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ if (hex_common.is_new_result(tag)):
+ genptr_dst_write_ext(f, tag, regtype, regid, "EXT_NEW")
+ if (hex_common.is_tmp_result(tag)):
+ genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP")
+ else:
+ genptr_dst_write_ext(f, tag, regtype, regid, "EXT_DFL")
+ else:
+ genptr_dst_write(f, tag, regtype, regid)
+ else:
+ print("Bad register parse: ",regtype,regid,toss,numregs)
+
+##
+## Generate the TCG code to call the helper
+## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
+## We produce:
+## static void generate_A2_add()
+## CPUHexagonState *env
+## DisasContext *ctx,
+## Insn *insn,
+## Packet *pkt)
+## {
+## TCGv RdV = tcg_temp_local_new();
+## const int RdN = insn->regno[0];
+## TCGv RsV = hex_gpr[insn->regno[1]];
+## TCGv RtV = hex_gpr[insn->regno[2]];
+## <GEN>
+## gen_log_reg_write(RdN, RdV);
+## ctx_log_reg_write(ctx, RdN);
+## tcg_temp_free(RdV);
+## }
+##
+## where <GEN> depends on hex_common.skip_qemu_helper(tag)
+## if hex_common.skip_qemu_helper(tag) is True
+## <GEN> is fGEN_TCG_A2_add({ RdV=RsV+RtV;});
+## if hex_common.skip_qemu_helper(tag) is False
+## <GEN> is gen_helper_A2_add(RdV, cpu_env, RsV, RtV);
+##
+def gen_tcg_func(f, tag, regs, imms):
+ f.write("static void generate_%s(\n" %tag)
+ f.write(" CPUHexagonState *env,\n")
+ f.write(" DisasContext *ctx,\n")
+ f.write(" Insn *insn,\n")
+ f.write(" Packet *pkt)\n")
+ f.write('{\n')
+ if hex_common.need_ea(tag): gen_decl_ea_tcg(f, tag)
+ i=0
+ ## Declare all the operands (regs and immediates)
+ for regtype,regid,toss,numregs in regs:
+ genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+ for immlett,bits,immshift in imms:
+ genptr_decl_imm(f,immlett)
+
+ if 'A_PRIV' in hex_common.attribdict[tag]:
+ f.write(' fCHECKFORPRIV();\n')
+ if 'A_GUEST' in hex_common.attribdict[tag]:
+ f.write(' fCHECKFORGUEST();\n')
+
+ ## Read all the inputs
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_read(regid)):
+ genptr_src_read_opn(f,regtype,regid,tag)
+
+ if ( hex_common.skip_qemu_helper(tag) ):
+ f.write(" fGEN_TCG_%s(%s);\n" % (tag, hex_common.semdict[tag]))
+ else:
+ ## Generate the call to the helper
+ for immlett,bits,immshift in imms:
+ gen_helper_decl_imm(f,immlett)
+ if hex_common.need_part1(tag):
+ f.write(" TCGv part1 = tcg_constant_tl(insn->part1);\n")
+ if hex_common.need_slot(tag):
+ f.write(" TCGv slot = tcg_constant_tl(insn->slot);\n")
+ f.write(" gen_helper_%s(" % (tag))
+ i=0
+ ## If there is a scalar result, it is the return type
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ if (hex_common.is_hvx_reg(regtype)):
+ continue
+ gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+ if (i > 0): f.write(", ")
+ f.write("cpu_env")
+ i=1
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ if (not hex_common.is_hvx_reg(regtype)):
+ continue
+ gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_read(regid)):
+ if (hex_common.is_hvx_reg(regtype) and
+ hex_common.is_readwrite(regid)):
+ continue
+ gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i)
+ i += 1
+ for immlett,bits,immshift in imms:
+ gen_helper_call_imm(f,immlett)
+
+ if hex_common.need_slot(tag): f.write(", slot")
+ if hex_common.need_part1(tag): f.write(", part1" )
+ f.write(");\n")
+
+ ## Write all the outputs
+ for regtype,regid,toss,numregs in regs:
+ if (hex_common.is_written(regid)):
+ genptr_dst_write_opn(f,regtype, regid, tag)
+
+ ## Free all the operands (regs and immediates)
+ if hex_common.need_ea(tag): gen_free_ea_tcg(f)
+ for regtype,regid,toss,numregs in regs:
+ genptr_free_opn(f,regtype,regid,i,tag)
+ i += 1
+
+ f.write("}\n\n")
+
+def gen_def_tcg_func(f, tag, tagregs, tagimms):
+ regs = tagregs[tag]
+ imms = tagimms[tag]
+
+ gen_tcg_func(f, tag, regs, imms)
+
+def main():
+ hex_common.read_semantics_file(sys.argv[1])
+ hex_common.read_attribs_file(sys.argv[2])
+ hex_common.read_overrides_file(sys.argv[3])
+ hex_common.read_overrides_file(sys.argv[4])
+ hex_common.calculate_attribs()
+ tagregs = hex_common.get_tagregs()
+ tagimms = hex_common.get_tagimms()
+
+ with open(sys.argv[5], 'w') as f:
+ f.write("#ifndef HEXAGON_TCG_FUNCS_H\n")
+ f.write("#define HEXAGON_TCG_FUNCS_H\n\n")
+
+ for tag in hex_common.tags:
+ ## Skip the priv instructions
+ if ( "A_PRIV" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the guest instructions
+ if ( "A_GUEST" in hex_common.attribdict[tag] ) :
+ continue
+ ## Skip the diag instructions
+ if ( tag == "Y6_diag" ) :
+ continue
+ if ( tag == "Y6_diag0" ) :
+ continue
+ if ( tag == "Y6_diag1" ) :
+ continue
+
+ gen_def_tcg_func(f, tag, tagregs, tagimms)
+
+ f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n")
+
+if __name__ == "__main__":
+ main()
diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h
new file mode 100644
index 000000000..cdcc9382b
--- /dev/null
+++ b/target/hexagon/gen_tcg_hvx.h
@@ -0,0 +1,903 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_GEN_TCG_HVX_H
+#define HEXAGON_GEN_TCG_HVX_H
+
+/*
+ * Histogram instructions
+ *
+ * Note that these instructions operate directly on the vector registers
+ * and therefore happen after commit.
+ *
+ * The generate_<tag> function is called twice
+ * The first time is during the normal TCG generation
+ * ctx->pre_commit is true
+ * In the masked cases, we save the mask to the qtmp temporary
+ * Otherwise, there is nothing to do
+ * The second call is at the end of gen_commit_packet
+ * ctx->pre_commit is false
+ * Generate the call to the helper
+ */
+
+static inline void assert_vhist_tmp(DisasContext *ctx)
+{
+ /* vhist instructions require exactly one .tmp to be defined */
+ g_assert(ctx->tmp_vregs_idx == 1);
+}
+
+#define fGEN_TCG_V6_vhist(SHORTCODE) \
+ if (!ctx->pre_commit) { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vhist(cpu_env); \
+ }
+#define fGEN_TCG_V6_vhistq(SHORTCODE) \
+ do { \
+ if (ctx->pre_commit) { \
+ intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
+ tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ } else { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vhistq(cpu_env); \
+ } \
+ } while (0)
+#define fGEN_TCG_V6_vwhist256(SHORTCODE) \
+ if (!ctx->pre_commit) { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist256(cpu_env); \
+ }
+#define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
+ do { \
+ if (ctx->pre_commit) { \
+ intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
+ tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ } else { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist256q(cpu_env); \
+ } \
+ } while (0)
+#define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
+ if (!ctx->pre_commit) { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist256_sat(cpu_env); \
+ }
+#define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
+ do { \
+ if (ctx->pre_commit) { \
+ intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
+ tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ } else { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist256q_sat(cpu_env); \
+ } \
+ } while (0)
+#define fGEN_TCG_V6_vwhist128(SHORTCODE) \
+ if (!ctx->pre_commit) { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist128(cpu_env); \
+ }
+#define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
+ do { \
+ if (ctx->pre_commit) { \
+ intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
+ tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ } else { \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist128q(cpu_env); \
+ } \
+ } while (0)
+#define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
+ if (!ctx->pre_commit) { \
+ TCGv tcgv_uiV = tcg_constant_tl(uiV); \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
+ }
+#define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
+ do { \
+ if (ctx->pre_commit) { \
+ intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
+ tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ } else { \
+ TCGv tcgv_uiV = tcg_constant_tl(uiV); \
+ assert_vhist_tmp(ctx); \
+ gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
+ } \
+ } while (0)
+
+
+#define fGEN_TCG_V6_vassign(SHORTCODE) \
+ tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+/* Vector conditional move */
+#define fGEN_TCG_VEC_CMOV(PRED) \
+ do { \
+ TCGv lsb = tcg_temp_new(); \
+ TCGLabel *false_label = gen_new_label(); \
+ TCGLabel *end_label = gen_new_label(); \
+ tcg_gen_andi_tl(lsb, PsV, 1); \
+ tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
+ tcg_temp_free(lsb); \
+ tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_gen_br(end_label); \
+ gen_set_label(false_label); \
+ tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
+ 1 << insn->slot); \
+ gen_set_label(end_label); \
+ } while (0)
+
+
+/* Vector conditional move (true) */
+#define fGEN_TCG_V6_vcmov(SHORTCODE) \
+ fGEN_TCG_VEC_CMOV(1)
+
+/* Vector conditional move (false) */
+#define fGEN_TCG_V6_vncmov(SHORTCODE) \
+ fGEN_TCG_VEC_CMOV(0)
+
+/* Vector add - various forms */
+#define fGEN_TCG_V6_vaddb(SHORTCODE) \
+ tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vaddh(SHORTCYDE) \
+ tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vaddw(SHORTCODE) \
+ tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \
+ tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \
+ sizeof(MMVector) * 2, sizeof(MMVector) * 2)
+
+#define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \
+ tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \
+ sizeof(MMVector) * 2, sizeof(MMVector) * 2)
+
+#define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \
+ tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \
+ sizeof(MMVector) * 2, sizeof(MMVector) * 2)
+
+/* Vector sub - various forms */
+#define fGEN_TCG_V6_vsubb(SHORTCODE) \
+ tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vsubh(SHORTCODE) \
+ tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vsubw(SHORTCODE) \
+ tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \
+ tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \
+ sizeof(MMVector) * 2, sizeof(MMVector) * 2)
+
+#define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \
+ tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \
+ sizeof(MMVector) * 2, sizeof(MMVector) * 2)
+
+#define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \
+ tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \
+ sizeof(MMVector) * 2, sizeof(MMVector) * 2)
+
+/* Vector shift right - various forms */
+#define fGEN_TCG_V6_vasrh(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 15); \
+ tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \
+ do { \
+ intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 15); \
+ tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vasrw(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 31); \
+ tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \
+ do { \
+ intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 31); \
+ tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vlsrb(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 7); \
+ tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vlsrh(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 15); \
+ tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vlsrw(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 31); \
+ tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+/* Vector shift left - various forms */
+#define fGEN_TCG_V6_vaslb(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 7); \
+ tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vaslh(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 15); \
+ tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \
+ do { \
+ intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 15); \
+ tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vaslw(SHORTCODE) \
+ do { \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 31); \
+ tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+#define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \
+ do { \
+ intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
+ TCGv shift = tcg_temp_new(); \
+ tcg_gen_andi_tl(shift, RtV, 31); \
+ tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ tcg_temp_free(shift); \
+ } while (0)
+
+/* Vector max - various forms */
+#define fGEN_TCG_V6_vmaxw(SHORTCODE) \
+ tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vmaxh(SHORTCODE) \
+ tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vmaxuh(SHORTCODE) \
+ tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vmaxb(SHORTCODE) \
+ tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vmaxub(SHORTCODE) \
+ tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+/* Vector min - various forms */
+#define fGEN_TCG_V6_vminw(SHORTCODE) \
+ tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vminh(SHORTCODE) \
+ tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vminuh(SHORTCODE) \
+ tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vminb(SHORTCODE) \
+ tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+#define fGEN_TCG_V6_vminub(SHORTCODE) \
+ tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+/* Vector logical ops */
+#define fGEN_TCG_V6_vxor(SHORTCODE) \
+ tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vand(SHORTCODE) \
+ tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vor(SHORTCODE) \
+ tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vnot(SHORTCODE) \
+ tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+/* Q register logical ops */
+#define fGEN_TCG_V6_pred_or(SHORTCODE) \
+ tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \
+ sizeof(MMQReg), sizeof(MMQReg))
+
+#define fGEN_TCG_V6_pred_and(SHORTCODE) \
+ tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \
+ sizeof(MMQReg), sizeof(MMQReg))
+
+#define fGEN_TCG_V6_pred_xor(SHORTCODE) \
+ tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \
+ sizeof(MMQReg), sizeof(MMQReg))
+
+#define fGEN_TCG_V6_pred_or_n(SHORTCODE) \
+ tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \
+ sizeof(MMQReg), sizeof(MMQReg))
+
+#define fGEN_TCG_V6_pred_and_n(SHORTCODE) \
+ tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \
+ sizeof(MMQReg), sizeof(MMQReg))
+
+#define fGEN_TCG_V6_pred_not(SHORTCODE) \
+ tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \
+ sizeof(MMQReg), sizeof(MMQReg))
+
+/* Vector compares */
+#define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \
+ do { \
+ intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
+ tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ vec_to_qvec(SIZE, QdV_off, tmpoff); \
+ } while (0)
+
+#define fGEN_TCG_V6_vgtw(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4)
+#define fGEN_TCG_V6_vgth(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2)
+#define fGEN_TCG_V6_vgtb(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1)
+
+#define fGEN_TCG_V6_vgtuw(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4)
+#define fGEN_TCG_V6_vgtuh(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2)
+#define fGEN_TCG_V6_vgtub(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1)
+
+#define fGEN_TCG_V6_veqw(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4)
+#define fGEN_TCG_V6_veqh(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2)
+#define fGEN_TCG_V6_veqb(SHORTCODE) \
+ fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1)
+
+#define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \
+ do { \
+ intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
+ intptr_t qoff = offsetof(CPUHexagonState, qtmp); \
+ tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
+ sizeof(MMVector), sizeof(MMVector)); \
+ vec_to_qvec(SIZE, qoff, tmpoff); \
+ OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \
+ } while (0)
+
+#define fGEN_TCG_V6_vgtw_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_vgtw_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_vgth_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_vgth_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_vgth_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_vgtb_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_vgtb_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_vgtub_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_vgtub_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_veqw_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_veqw_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_veqw_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_veqh_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_veqh_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_veqh_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor)
+
+#define fGEN_TCG_V6_veqb_and(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and)
+#define fGEN_TCG_V6_veqb_or(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or)
+#define fGEN_TCG_V6_veqb_xor(SHORTCODE) \
+ fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor)
+
+/* Vector splat - various forms */
+#define fGEN_TCG_V6_lvsplatw(SHORTCODE) \
+ tcg_gen_gvec_dup_i32(MO_32, VdV_off, \
+ sizeof(MMVector), sizeof(MMVector), RtV)
+
+#define fGEN_TCG_V6_lvsplath(SHORTCODE) \
+ tcg_gen_gvec_dup_i32(MO_16, VdV_off, \
+ sizeof(MMVector), sizeof(MMVector), RtV)
+
+#define fGEN_TCG_V6_lvsplatb(SHORTCODE) \
+ tcg_gen_gvec_dup_i32(MO_8, VdV_off, \
+ sizeof(MMVector), sizeof(MMVector), RtV)
+
+/* Vector absolute value - various forms */
+#define fGEN_TCG_V6_vabsb(SHORTCODE) \
+ tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vabsh(SHORTCODE) \
+ tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+#define fGEN_TCG_V6_vabsw(SHORTCODE) \
+ tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \
+ sizeof(MMVector), sizeof(MMVector))
+
+/* Vector loads */
+#define fGEN_TCG_V6_vL32b_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE) SHORTCODE
+
+/* Predicated vector loads */
+#define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \
+ do { \
+ TCGv LSB = tcg_temp_new(); \
+ TCGLabel *false_label = gen_new_label(); \
+ TCGLabel *end_label = gen_new_label(); \
+ GET_EA; \
+ PRED; \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
+ tcg_temp_free(LSB); \
+ gen_vreg_load(ctx, DSTOFF, EA, true); \
+ INC; \
+ tcg_gen_br(end_label); \
+ gen_set_label(false_label); \
+ tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
+ 1 << insn->slot); \
+ gen_set_label(end_label); \
+ } while (0)
+
+#define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
+ fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
+ fEA_REG(RxV), \
+ VdV_off, \
+ fPM_I(RxV, siV * sizeof(MMVector)))
+#define fGEN_TCG_PRED_VEC_LOAD_npred_pi \
+ fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
+ fEA_REG(RxV), \
+ VdV_off, \
+ fPM_I(RxV, siV * sizeof(MMVector)))
+
+#define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_pi
+#define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_pi
+#define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_pi
+#define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_pi
+#define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_pi
+#define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_pi
+#define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_pi
+#define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_pi
+#define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_pi
+#define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_pi
+#define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_pi
+#define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_pi
+
+#define fGEN_TCG_PRED_VEC_LOAD_pred_ai \
+ fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
+ fEA_RI(RtV, siV * sizeof(MMVector)), \
+ VdV_off, \
+ do {} while (0))
+#define fGEN_TCG_PRED_VEC_LOAD_npred_ai \
+ fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
+ fEA_RI(RtV, siV * sizeof(MMVector)), \
+ VdV_off, \
+ do {} while (0))
+
+#define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ai
+#define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ai
+#define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ai
+#define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ai
+#define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ai
+#define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ai
+#define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ai
+#define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ai
+#define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ai
+#define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ai
+#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ai
+#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ai
+
+#define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \
+ fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
+ fEA_REG(RxV), \
+ VdV_off, \
+ fPM_M(RxV, MuV))
+#define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \
+ fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
+ fEA_REG(RxV), \
+ VdV_off, \
+ fPM_M(RxV, MuV))
+
+#define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ppu
+#define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ppu
+#define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ppu
+#define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ppu
+#define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ppu
+#define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ppu
+#define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ppu
+#define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ppu
+#define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ppu
+#define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ppu
+#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_pred_ppu
+#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_LOAD_npred_ppu
+
+/* Vector stores */
+#define fGEN_TCG_V6_vS32b_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE) SHORTCODE
+
+/* New value vector stores */
+#define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \
+ do { \
+ GET_EA; \
+ gen_vreg_store(ctx, insn, pkt, EA, OsN_off, insn->slot, true); \
+ INC; \
+ } while (0)
+
+#define fGEN_TCG_NEWVAL_VEC_STORE_pi \
+ fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector)))
+
+#define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \
+ fGEN_TCG_NEWVAL_VEC_STORE_pi
+#define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \
+ fGEN_TCG_NEWVAL_VEC_STORE_pi
+
+#define fGEN_TCG_NEWVAL_VEC_STORE_ai \
+ fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \
+ do { } while (0))
+
+#define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \
+ fGEN_TCG_NEWVAL_VEC_STORE_ai
+#define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \
+ fGEN_TCG_NEWVAL_VEC_STORE_ai
+
+#define fGEN_TCG_NEWVAL_VEC_STORE_ppu \
+ fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV))
+
+#define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \
+ fGEN_TCG_NEWVAL_VEC_STORE_ppu
+#define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \
+ fGEN_TCG_NEWVAL_VEC_STORE_ppu
+
+/* Predicated vector stores */
+#define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \
+ do { \
+ TCGv LSB = tcg_temp_new(); \
+ TCGLabel *false_label = gen_new_label(); \
+ TCGLabel *end_label = gen_new_label(); \
+ GET_EA; \
+ PRED; \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
+ tcg_temp_free(LSB); \
+ gen_vreg_store(ctx, insn, pkt, EA, SRCOFF, insn->slot, ALIGN); \
+ INC; \
+ tcg_gen_br(end_label); \
+ gen_set_label(false_label); \
+ tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
+ 1 << insn->slot); \
+ gen_set_label(end_label); \
+ } while (0)
+
+#define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
+ fEA_REG(RxV), \
+ VsV_off, ALIGN, \
+ fPM_I(RxV, siV * sizeof(MMVector)))
+#define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
+ fEA_REG(RxV), \
+ VsV_off, ALIGN, \
+ fPM_I(RxV, siV * sizeof(MMVector)))
+#define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
+ fEA_REG(RxV), \
+ OsN_off, true, \
+ fPM_I(RxV, siV * sizeof(MMVector)))
+#define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
+ fEA_REG(RxV), \
+ OsN_off, true, \
+ fPM_I(RxV, siV * sizeof(MMVector)))
+
+#define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
+#define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
+#define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_pi(false)
+#define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_pi(false)
+#define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
+#define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
+#define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_pred_pi
+#define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_npred_pi
+#define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_pred_pi
+#define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_npred_pi
+
+#define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
+ fEA_RI(RtV, siV * sizeof(MMVector)), \
+ VsV_off, ALIGN, \
+ do { } while (0))
+#define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
+ fEA_RI(RtV, siV * sizeof(MMVector)), \
+ VsV_off, ALIGN, \
+ do { } while (0))
+#define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
+ fEA_RI(RtV, siV * sizeof(MMVector)), \
+ OsN_off, true, \
+ do { } while (0))
+#define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
+ fEA_RI(RtV, siV * sizeof(MMVector)), \
+ OsN_off, true, \
+ do { } while (0))
+
+#define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
+#define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
+#define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_ai(false)
+#define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_ai(false)
+#define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
+#define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
+#define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_pred_ai
+#define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_npred_ai
+#define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_pred_ai
+#define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_npred_ai
+
+#define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
+ fEA_REG(RxV), \
+ VsV_off, ALIGN, \
+ fPM_M(RxV, MuV))
+#define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
+ fEA_REG(RxV), \
+ VsV_off, ALIGN, \
+ fPM_M(RxV, MuV))
+#define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
+ fEA_REG(RxV), \
+ OsN_off, true, \
+ fPM_M(RxV, MuV))
+#define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \
+ fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
+ fEA_REG(RxV), \
+ OsN_off, true, \
+ fPM_M(RxV, MuV))
+
+#define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
+#define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
+#define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_ppu(false)
+#define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_ppu(false)
+#define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
+#define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
+#define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
+#define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
+#define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
+#define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \
+ fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
+
+/* Masked vector stores */
+#define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE) SHORTCODE
+#define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE) SHORTCODE
+
+/* Store release not modelled in qemu, but need to suppress compiler warnings */
+#define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \
+ do { \
+ siV = siV; \
+ } while (0)
+#define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \
+ do { \
+ RtV = RtV; \
+ siV = siV; \
+ } while (0)
+#define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \
+ do { \
+ MuV = MuV; \
+ } while (0)
+
+#endif
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
new file mode 100644
index 000000000..4419d30e2
--- /dev/null
+++ b/target/hexagon/genptr.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "insn.h"
+#include "opcodes.h"
+#include "translate.h"
+#define QEMU_GENERATE /* Used internally by macros.h */
+#include "macros.h"
+#include "mmvec/macros.h"
+#undef QEMU_GENERATE
+#include "gen_tcg.h"
+#include "gen_tcg_hvx.h"
+
+static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot)
+{
+ TCGv zero = tcg_constant_tl(0);
+ TCGv slot_mask = tcg_temp_new();
+
+ tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
+ tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero,
+ val, hex_new_value[rnum]);
+ if (HEX_DEBUG) {
+ /*
+ * Do this so HELPER(debug_commit_end) will know
+ *
+ * Note that slot_mask indicates the value is not written
+ * (i.e., slot was cancelled), so we create a true/false value before
+ * or'ing with hex_reg_written[rnum].
+ */
+ tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
+ tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
+ }
+
+ tcg_temp_free(slot_mask);
+}
+
+static inline void gen_log_reg_write(int rnum, TCGv val)
+{
+ tcg_gen_mov_tl(hex_new_value[rnum], val);
+ if (HEX_DEBUG) {
+ /* Do this so HELPER(debug_commit_end) will know */
+ tcg_gen_movi_tl(hex_reg_written[rnum], 1);
+ }
+}
+
+static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot)
+{
+ TCGv val32 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+ TCGv slot_mask = tcg_temp_new();
+
+ tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
+ /* Low word */
+ tcg_gen_extrl_i64_i32(val32, val);
+ tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum],
+ slot_mask, zero,
+ val32, hex_new_value[rnum]);
+ /* High word */
+ tcg_gen_extrh_i64_i32(val32, val);
+ tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1],
+ slot_mask, zero,
+ val32, hex_new_value[rnum + 1]);
+ if (HEX_DEBUG) {
+ /*
+ * Do this so HELPER(debug_commit_end) will know
+ *
+ * Note that slot_mask indicates the value is not written
+ * (i.e., slot was cancelled), so we create a true/false value before
+ * or'ing with hex_reg_written[rnum].
+ */
+ tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
+ tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
+ tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1],
+ slot_mask);
+ }
+
+ tcg_temp_free(val32);
+ tcg_temp_free(slot_mask);
+}
+
+static void gen_log_reg_write_pair(int rnum, TCGv_i64 val)
+{
+ /* Low word */
+ tcg_gen_extrl_i64_i32(hex_new_value[rnum], val);
+ if (HEX_DEBUG) {
+ /* Do this so HELPER(debug_commit_end) will know */
+ tcg_gen_movi_tl(hex_reg_written[rnum], 1);
+ }
+
+ /* High word */
+ tcg_gen_extrh_i64_i32(hex_new_value[rnum + 1], val);
+ if (HEX_DEBUG) {
+ /* Do this so HELPER(debug_commit_end) will know */
+ tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1);
+ }
+}
+
+static inline void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val)
+{
+ TCGv base_val = tcg_temp_new();
+
+ tcg_gen_andi_tl(base_val, val, 0xff);
+
+ /*
+ * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
+ *
+ * Multiple writes to the same preg are and'ed together
+ * If this is the first predicate write in the packet, do a
+ * straight assignment. Otherwise, do an and.
+ */
+ if (!test_bit(pnum, ctx->pregs_written)) {
+ tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val);
+ } else {
+ tcg_gen_and_tl(hex_new_pred_value[pnum],
+ hex_new_pred_value[pnum], base_val);
+ }
+ tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum);
+
+ tcg_temp_free(base_val);
+}
+
+static inline void gen_read_p3_0(TCGv control_reg)
+{
+ tcg_gen_movi_tl(control_reg, 0);
+ for (int i = 0; i < NUM_PREGS; i++) {
+ tcg_gen_deposit_tl(control_reg, control_reg, hex_pred[i], i * 8, 8);
+ }
+}
+
+/*
+ * Certain control registers require special handling on read
+ * HEX_REG_P3_0 aliased to the predicate registers
+ * -> concat the 4 predicate registers together
+ * HEX_REG_PC actual value stored in DisasContext
+ * -> assign from ctx->base.pc_next
+ * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
+ * -> add current TB changes to existing reg value
+ */
+static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num,
+ TCGv dest)
+{
+ if (reg_num == HEX_REG_P3_0) {
+ gen_read_p3_0(dest);
+ } else if (reg_num == HEX_REG_PC) {
+ tcg_gen_movi_tl(dest, ctx->base.pc_next);
+ } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
+ tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_PKT_CNT],
+ ctx->num_packets);
+ } else if (reg_num == HEX_REG_QEMU_INSN_CNT) {
+ tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT],
+ ctx->num_insns);
+ } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
+ tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT],
+ ctx->num_hvx_insns);
+ } else {
+ tcg_gen_mov_tl(dest, hex_gpr[reg_num]);
+ }
+}
+
+static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num,
+ TCGv_i64 dest)
+{
+ if (reg_num == HEX_REG_P3_0) {
+ TCGv p3_0 = tcg_temp_new();
+ gen_read_p3_0(p3_0);
+ tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]);
+ tcg_temp_free(p3_0);
+ } else if (reg_num == HEX_REG_PC - 1) {
+ TCGv pc = tcg_constant_tl(ctx->base.pc_next);
+ tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc);
+ } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
+ TCGv pkt_cnt = tcg_temp_new();
+ TCGv insn_cnt = tcg_temp_new();
+ tcg_gen_addi_tl(pkt_cnt, hex_gpr[HEX_REG_QEMU_PKT_CNT],
+ ctx->num_packets);
+ tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT],
+ ctx->num_insns);
+ tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt);
+ tcg_temp_free(pkt_cnt);
+ tcg_temp_free(insn_cnt);
+ } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
+ TCGv hvx_cnt = tcg_temp_new();
+ tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT],
+ ctx->num_hvx_insns);
+ tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]);
+ tcg_temp_free(hvx_cnt);
+ } else {
+ tcg_gen_concat_i32_i64(dest,
+ hex_gpr[reg_num],
+ hex_gpr[reg_num + 1]);
+ }
+}
+
+static inline void gen_write_p3_0(TCGv control_reg)
+{
+ for (int i = 0; i < NUM_PREGS; i++) {
+ tcg_gen_extract_tl(hex_pred[i], control_reg, i * 8, 8);
+ }
+}
+
+/*
+ * Certain control registers require special handling on write
+ * HEX_REG_P3_0 aliased to the predicate registers
+ * -> break the value across 4 predicate registers
+ * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
+ * -> clear the changes
+ */
+static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num,
+ TCGv val)
+{
+ if (reg_num == HEX_REG_P3_0) {
+ gen_write_p3_0(val);
+ } else {
+ gen_log_reg_write(reg_num, val);
+ ctx_log_reg_write(ctx, reg_num);
+ if (reg_num == HEX_REG_QEMU_PKT_CNT) {
+ ctx->num_packets = 0;
+ }
+ if (reg_num == HEX_REG_QEMU_INSN_CNT) {
+ ctx->num_insns = 0;
+ }
+ if (reg_num == HEX_REG_QEMU_HVX_CNT) {
+ ctx->num_hvx_insns = 0;
+ }
+ }
+}
+
+static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num,
+ TCGv_i64 val)
+{
+ if (reg_num == HEX_REG_P3_0) {
+ TCGv val32 = tcg_temp_new();
+ tcg_gen_extrl_i64_i32(val32, val);
+ gen_write_p3_0(val32);
+ tcg_gen_extrh_i64_i32(val32, val);
+ gen_log_reg_write(reg_num + 1, val32);
+ tcg_temp_free(val32);
+ ctx_log_reg_write(ctx, reg_num + 1);
+ } else {
+ gen_log_reg_write_pair(reg_num, val);
+ ctx_log_reg_write_pair(ctx, reg_num);
+ if (reg_num == HEX_REG_QEMU_PKT_CNT) {
+ ctx->num_packets = 0;
+ ctx->num_insns = 0;
+ }
+ if (reg_num == HEX_REG_QEMU_HVX_CNT) {
+ ctx->num_hvx_insns = 0;
+ }
+ }
+}
+
+static TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign)
+{
+ if (sign) {
+ tcg_gen_sextract_tl(result, src, N * 8, 8);
+ } else {
+ tcg_gen_extract_tl(result, src, N * 8, 8);
+ }
+ return result;
+}
+
+static TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign)
+{
+ TCGv_i64 res64 = tcg_temp_new_i64();
+ if (sign) {
+ tcg_gen_sextract_i64(res64, src, N * 8, 8);
+ } else {
+ tcg_gen_extract_i64(res64, src, N * 8, 8);
+ }
+ tcg_gen_extrl_i64_i32(result, res64);
+ tcg_temp_free_i64(res64);
+
+ return result;
+}
+
+static inline TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign)
+{
+ if (sign) {
+ tcg_gen_sextract_tl(result, src, N * 16, 16);
+ } else {
+ tcg_gen_extract_tl(result, src, N * 16, 16);
+ }
+ return result;
+}
+
+static inline void gen_set_half(int N, TCGv result, TCGv src)
+{
+ tcg_gen_deposit_tl(result, result, src, N * 16, 16);
+}
+
+static inline void gen_set_half_i64(int N, TCGv_i64 result, TCGv src)
+{
+ TCGv_i64 src64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(src64, src);
+ tcg_gen_deposit_i64(result, result, src64, N * 16, 16);
+ tcg_temp_free_i64(src64);
+}
+
+static void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
+{
+ TCGv_i64 src64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(src64, src);
+ tcg_gen_deposit_i64(result, result, src64, N * 8, 8);
+ tcg_temp_free_i64(src64);
+}
+
+static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
+{
+ tcg_gen_qemu_ld32u(dest, vaddr, mem_index);
+ tcg_gen_mov_tl(hex_llsc_addr, vaddr);
+ tcg_gen_mov_tl(hex_llsc_val, dest);
+}
+
+static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
+{
+ tcg_gen_qemu_ld64(dest, vaddr, mem_index);
+ tcg_gen_mov_tl(hex_llsc_addr, vaddr);
+ tcg_gen_mov_i64(hex_llsc_val_i64, dest);
+}
+
+static inline void gen_store_conditional4(DisasContext *ctx,
+ TCGv pred, TCGv vaddr, TCGv src)
+{
+ TCGLabel *fail = gen_new_label();
+ TCGLabel *done = gen_new_label();
+ TCGv one, zero, tmp;
+
+ tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
+
+ one = tcg_constant_tl(0xff);
+ zero = tcg_constant_tl(0);
+ tmp = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src,
+ ctx->mem_idx, MO_32);
+ tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val,
+ one, zero);
+ tcg_temp_free(tmp);
+ tcg_gen_br(done);
+
+ gen_set_label(fail);
+ tcg_gen_movi_tl(pred, 0);
+
+ gen_set_label(done);
+ tcg_gen_movi_tl(hex_llsc_addr, ~0);
+}
+
+static inline void gen_store_conditional8(DisasContext *ctx,
+ TCGv pred, TCGv vaddr, TCGv_i64 src)
+{
+ TCGLabel *fail = gen_new_label();
+ TCGLabel *done = gen_new_label();
+ TCGv_i64 one, zero, tmp;
+
+ tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
+
+ one = tcg_constant_i64(0xff);
+ zero = tcg_constant_i64(0);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src,
+ ctx->mem_idx, MO_64);
+ tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64,
+ one, zero);
+ tcg_gen_extrl_i64_i32(pred, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_br(done);
+
+ gen_set_label(fail);
+ tcg_gen_movi_tl(pred, 0);
+
+ gen_set_label(done);
+ tcg_gen_movi_tl(hex_llsc_addr, ~0);
+}
+
+static inline void gen_store32(TCGv vaddr, TCGv src, int width, int slot)
+{
+ tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
+ tcg_gen_movi_tl(hex_store_width[slot], width);
+ tcg_gen_mov_tl(hex_store_val32[slot], src);
+}
+
+static inline void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src,
+ DisasContext *ctx, int slot)
+{
+ gen_store32(vaddr, src, 1, slot);
+ ctx->store_width[slot] = 1;
+}
+
+static inline void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src,
+ DisasContext *ctx, int slot)
+{
+ TCGv tmp = tcg_constant_tl(src);
+ gen_store1(cpu_env, vaddr, tmp, ctx, slot);
+}
+
+static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src,
+ DisasContext *ctx, int slot)
+{
+ gen_store32(vaddr, src, 2, slot);
+ ctx->store_width[slot] = 2;
+}
+
+static inline void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src,
+ DisasContext *ctx, int slot)
+{
+ TCGv tmp = tcg_constant_tl(src);
+ gen_store2(cpu_env, vaddr, tmp, ctx, slot);
+}
+
+static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src,
+ DisasContext *ctx, int slot)
+{
+ gen_store32(vaddr, src, 4, slot);
+ ctx->store_width[slot] = 4;
+}
+
+static inline void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src,
+ DisasContext *ctx, int slot)
+{
+ TCGv tmp = tcg_constant_tl(src);
+ gen_store4(cpu_env, vaddr, tmp, ctx, slot);
+}
+
+static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src,
+ DisasContext *ctx, int slot)
+{
+ tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
+ tcg_gen_movi_tl(hex_store_width[slot], 8);
+ tcg_gen_mov_i64(hex_store_val64[slot], src);
+ ctx->store_width[slot] = 8;
+}
+
+static inline void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src,
+ DisasContext *ctx, int slot)
+{
+ TCGv_i64 tmp = tcg_constant_i64(src);
+ gen_store8(cpu_env, vaddr, tmp, ctx, slot);
+}
+
+static TCGv gen_8bitsof(TCGv result, TCGv value)
+{
+ TCGv zero = tcg_constant_tl(0);
+ TCGv ones = tcg_constant_tl(0xff);
+ tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero);
+
+ return result;
+}
+
+static intptr_t vreg_src_off(DisasContext *ctx, int num)
+{
+ intptr_t offset = offsetof(CPUHexagonState, VRegs[num]);
+
+ if (test_bit(num, ctx->vregs_select)) {
+ offset = ctx_future_vreg_off(ctx, num, 1, false);
+ }
+ if (test_bit(num, ctx->vregs_updated_tmp)) {
+ offset = ctx_tmp_vreg_off(ctx, num, 1, false);
+ }
+ return offset;
+}
+
+static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num,
+ VRegWriteType type, int slot_num,
+ bool is_predicated)
+{
+ TCGLabel *label_end = NULL;
+ intptr_t dstoff;
+
+ if (is_predicated) {
+ TCGv cancelled = tcg_temp_local_new();
+ label_end = gen_new_label();
+
+ /* Don't do anything if the slot was cancelled */
+ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
+ tcg_temp_free(cancelled);
+ }
+
+ if (type != EXT_TMP) {
+ dstoff = ctx_future_vreg_off(ctx, num, 1, true);
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
+ sizeof(MMVector), sizeof(MMVector));
+ tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num);
+ } else {
+ dstoff = ctx_tmp_vreg_off(ctx, num, 1, false);
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
+ sizeof(MMVector), sizeof(MMVector));
+ }
+
+ if (is_predicated) {
+ gen_set_label(label_end);
+ }
+}
+
+static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num,
+ VRegWriteType type, int slot_num,
+ bool is_predicated)
+{
+ gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated);
+ srcoff += sizeof(MMVector);
+ gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated);
+}
+
+static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew,
+ int slot_num, bool is_predicated)
+{
+ TCGLabel *label_end = NULL;
+ intptr_t dstoff;
+
+ if (is_predicated) {
+ TCGv cancelled = tcg_temp_local_new();
+ label_end = gen_new_label();
+
+ /* Don't do anything if the slot was cancelled */
+ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
+ tcg_temp_free(cancelled);
+ }
+
+ dstoff = offsetof(CPUHexagonState, future_QRegs[num]);
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg));
+
+ if (is_predicated) {
+ tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num);
+ gen_set_label(label_end);
+ }
+}
+
+static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
+ bool aligned)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ if (aligned) {
+ tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
+ }
+ for (int i = 0; i < sizeof(MMVector) / 8; i++) {
+ tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx);
+ tcg_gen_addi_tl(src, src, 8);
+ tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
+ }
+ tcg_temp_free_i64(tmp);
+}
+
+static void gen_vreg_store(DisasContext *ctx, Insn *insn, Packet *pkt,
+ TCGv EA, intptr_t srcoff, int slot, bool aligned)
+{
+ intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
+ intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
+
+ if (is_gather_store_insn(insn, pkt)) {
+ TCGv sl = tcg_constant_tl(slot);
+ gen_helper_gather_store(cpu_env, EA, sl);
+ return;
+ }
+
+ tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
+ if (aligned) {
+ tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
+ ~((int32_t)sizeof(MMVector) - 1));
+ } else {
+ tcg_gen_mov_tl(hex_vstore_addr[slot], EA);
+ }
+ tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
+
+ /* Copy the data to the vstore buffer */
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
+ /* Set the mask to all 1's */
+ tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL);
+}
+
+static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff,
+ intptr_t bitsoff, int slot, bool invert)
+{
+ intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
+ intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
+
+ tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
+ tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
+ ~((int32_t)sizeof(MMVector) - 1));
+ tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
+
+ /* Copy the data to the vstore buffer */
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
+ /* Copy the mask */
+ tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg));
+ if (invert) {
+ tcg_gen_gvec_not(MO_64, maskoff, maskoff,
+ sizeof(MMQReg), sizeof(MMQReg));
+ }
+}
+
+static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 word = tcg_temp_new_i64();
+ TCGv_i64 bits = tcg_temp_new_i64();
+ TCGv_i64 mask = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_constant_i64(0);
+ TCGv_i64 ones = tcg_constant_i64(~0);
+
+ for (int i = 0; i < sizeof(MMVector) / 8; i++) {
+ tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8);
+ tcg_gen_movi_i64(mask, 0);
+
+ for (int j = 0; j < 8; j += size) {
+ tcg_gen_extract_i64(word, tmp, j * 8, size * 8);
+ tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero);
+ tcg_gen_deposit_i64(mask, mask, bits, j, size);
+ }
+
+ tcg_gen_st8_i64(mask, cpu_env, dstoff + i);
+ }
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(word);
+ tcg_temp_free_i64(bits);
+ tcg_temp_free_i64(mask);
+}
+
+#include "tcg_funcs_generated.c.inc"
+#include "tcg_func_table_generated.c.inc"
diff --git a/target/hexagon/genptr.h b/target/hexagon/genptr.h
new file mode 100644
index 000000000..c158005d2
--- /dev/null
+++ b/target/hexagon/genptr.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_GENPTR_H
+#define HEXAGON_GENPTR_H
+
+#include "insn.h"
+
+extern const SemanticInsn opcode_genptr[];
+
+#endif
diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h
new file mode 100644
index 000000000..c89aa4ed4
--- /dev/null
+++ b/target/hexagon/helper.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "internal.h"
+#include "helper_protos_generated.h.inc"
+
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_RETURN, noreturn, env, i32)
+DEF_HELPER_1(debug_start_packet, void, env)
+DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int)
+DEF_HELPER_FLAGS_3(debug_commit_end, TCG_CALL_NO_WG, void, env, int, int)
+DEF_HELPER_2(commit_store, void, env, int)
+DEF_HELPER_3(gather_store, void, env, i32, int)
+DEF_HELPER_1(commit_hvx_stores, void, env)
+DEF_HELPER_FLAGS_4(fcircadd, TCG_CALL_NO_RWG_SE, s32, s32, s32, s32, s32)
+DEF_HELPER_FLAGS_1(fbrev, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_3(sfrecipa, i64, env, f32, f32)
+DEF_HELPER_2(sfinvsqrta, i64, env, f32)
+DEF_HELPER_4(vacsh_val, s64, env, s64, s64, s64)
+DEF_HELPER_FLAGS_4(vacsh_pred, TCG_CALL_NO_RWG_SE, s32, env, s64, s64, s64)
+
+/* Floating point */
+DEF_HELPER_2(conv_sf2df, f64, env, f32)
+DEF_HELPER_2(conv_df2sf, f32, env, f64)
+DEF_HELPER_2(conv_uw2sf, f32, env, s32)
+DEF_HELPER_2(conv_uw2df, f64, env, s32)
+DEF_HELPER_2(conv_w2sf, f32, env, s32)
+DEF_HELPER_2(conv_w2df, f64, env, s32)
+DEF_HELPER_2(conv_ud2sf, f32, env, s64)
+DEF_HELPER_2(conv_ud2df, f64, env, s64)
+DEF_HELPER_2(conv_d2sf, f32, env, s64)
+DEF_HELPER_2(conv_d2df, f64, env, s64)
+DEF_HELPER_2(conv_sf2uw, i32, env, f32)
+DEF_HELPER_2(conv_sf2w, s32, env, f32)
+DEF_HELPER_2(conv_sf2ud, i64, env, f32)
+DEF_HELPER_2(conv_sf2d, s64, env, f32)
+DEF_HELPER_2(conv_df2uw, i32, env, f64)
+DEF_HELPER_2(conv_df2w, s32, env, f64)
+DEF_HELPER_2(conv_df2ud, i64, env, f64)
+DEF_HELPER_2(conv_df2d, s64, env, f64)
+DEF_HELPER_2(conv_sf2uw_chop, i32, env, f32)
+DEF_HELPER_2(conv_sf2w_chop, s32, env, f32)
+DEF_HELPER_2(conv_sf2ud_chop, i64, env, f32)
+DEF_HELPER_2(conv_sf2d_chop, s64, env, f32)
+DEF_HELPER_2(conv_df2uw_chop, i32, env, f64)
+DEF_HELPER_2(conv_df2w_chop, s32, env, f64)
+DEF_HELPER_2(conv_df2ud_chop, i64, env, f64)
+DEF_HELPER_2(conv_df2d_chop, s64, env, f64)
+DEF_HELPER_3(sfadd, f32, env, f32, f32)
+DEF_HELPER_3(sfsub, f32, env, f32, f32)
+DEF_HELPER_3(sfcmpeq, s32, env, f32, f32)
+DEF_HELPER_3(sfcmpgt, s32, env, f32, f32)
+DEF_HELPER_3(sfcmpge, s32, env, f32, f32)
+DEF_HELPER_3(sfcmpuo, s32, env, f32, f32)
+DEF_HELPER_3(sfmax, f32, env, f32, f32)
+DEF_HELPER_3(sfmin, f32, env, f32, f32)
+DEF_HELPER_3(sfclass, s32, env, f32, s32)
+DEF_HELPER_3(sffixupn, f32, env, f32, f32)
+DEF_HELPER_3(sffixupd, f32, env, f32, f32)
+DEF_HELPER_2(sffixupr, f32, env, f32)
+
+DEF_HELPER_3(dfadd, f64, env, f64, f64)
+DEF_HELPER_3(dfsub, f64, env, f64, f64)
+DEF_HELPER_3(dfmax, f64, env, f64, f64)
+DEF_HELPER_3(dfmin, f64, env, f64, f64)
+DEF_HELPER_3(dfcmpeq, s32, env, f64, f64)
+DEF_HELPER_3(dfcmpgt, s32, env, f64, f64)
+DEF_HELPER_3(dfcmpge, s32, env, f64, f64)
+DEF_HELPER_3(dfcmpuo, s32, env, f64, f64)
+DEF_HELPER_3(dfclass, s32, env, f64, s32)
+
+DEF_HELPER_3(sfmpy, f32, env, f32, f32)
+DEF_HELPER_4(sffma, f32, env, f32, f32, f32)
+DEF_HELPER_5(sffma_sc, f32, env, f32, f32, f32, f32)
+DEF_HELPER_4(sffms, f32, env, f32, f32, f32)
+DEF_HELPER_4(sffma_lib, f32, env, f32, f32, f32)
+DEF_HELPER_4(sffms_lib, f32, env, f32, f32, f32)
+
+DEF_HELPER_3(dfmpyfix, f64, env, f64, f64)
+DEF_HELPER_4(dfmpyhh, f64, env, f64, f64, f64)
+
+/* Histogram instructions */
+DEF_HELPER_1(vhist, void, env)
+DEF_HELPER_1(vhistq, void, env)
+DEF_HELPER_1(vwhist256, void, env)
+DEF_HELPER_1(vwhist256q, void, env)
+DEF_HELPER_1(vwhist256_sat, void, env)
+DEF_HELPER_1(vwhist256q_sat, void, env)
+DEF_HELPER_1(vwhist128, void, env)
+DEF_HELPER_1(vwhist128q, void, env)
+DEF_HELPER_2(vwhist128m, void, env, s32)
+DEF_HELPER_2(vwhist128qm, void, env, s32)
+
+DEF_HELPER_2(probe_pkt_scalar_store_s0, void, env, int)
+DEF_HELPER_2(probe_hvx_stores, void, env, int)
+DEF_HELPER_3(probe_pkt_scalar_hvx_stores, void, env, int, int)
diff --git a/target/hexagon/hex_arch_types.h b/target/hexagon/hex_arch_types.h
new file mode 100644
index 000000000..78ad607f5
--- /dev/null
+++ b/target/hexagon/hex_arch_types.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_ARCH_TYPES_H
+#define HEXAGON_ARCH_TYPES_H
+
+#include "qemu/osdep.h"
+#include "mmvec/mmvec.h"
+#include "qemu/int128.h"
+
+/*
+ * These types are used by the code imported from the Hexagon
+ * architecture library.
+ */
+typedef uint8_t size1u_t;
+typedef int8_t size1s_t;
+typedef uint16_t size2u_t;
+typedef int16_t size2s_t;
+typedef uint32_t size4u_t;
+typedef int32_t size4s_t;
+typedef uint64_t size8u_t;
+typedef int64_t size8s_t;
+typedef Int128 size16s_t;
+
+typedef MMVector mmvector_t;
+typedef MMVectorPair mmvector_pair_t;
+typedef MMQReg mmqret_t;
+
+#endif
diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py
new file mode 100755
index 000000000..c81aca8d2
--- /dev/null
+++ b/target/hexagon/hex_common.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python3
+
+##
+## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+import sys
+import re
+import string
+
+behdict = {} # tag ->behavior
+semdict = {} # tag -> semantics
+attribdict = {} # tag -> attributes
+macros = {} # macro -> macro information...
+attribinfo = {} # Register information and misc
+tags = [] # list of all tags
+overrides = {} # tags with helper overrides
+
+# We should do this as a hash for performance,
+# but to keep order let's keep it as a list.
+def uniquify(seq):
+ seen = set()
+ seen_add = seen.add
+ return [x for x in seq if x not in seen and not seen_add(x)]
+
+regre = re.compile(
+ r"((?<!DUP)[MNORCPQXSGVZA])([stuvwxyzdefg]+)([.]?[LlHh]?)(\d+S?)")
+immre = re.compile(r"[#]([rRsSuUm])(\d+)(?:[:](\d+))?")
+reg_or_immre = \
+ re.compile(r"(((?<!DUP)[MNRCOPQXSGVZA])([stuvwxyzdefg]+)" + \
+ "([.]?[LlHh]?)(\d+S?))|([#]([rRsSuUm])(\d+)[:]?(\d+)?)")
+relimmre = re.compile(r"[#]([rR])(\d+)(?:[:](\d+))?")
+absimmre = re.compile(r"[#]([sSuUm])(\d+)(?:[:](\d+))?")
+
+finished_macros = set()
+
+def expand_macro_attribs(macro,allmac_re):
+ if macro.key not in finished_macros:
+ # Get a list of all things that might be macros
+ l = allmac_re.findall(macro.beh)
+ for submacro in l:
+ if not submacro: continue
+ if not macros[submacro]:
+ raise Exception("Couldn't find macro: <%s>" % l)
+ macro.attribs |= expand_macro_attribs(
+ macros[submacro], allmac_re)
+ finished_macros.add(macro.key)
+ return macro.attribs
+
+# When qemu needs an attribute that isn't in the imported files,
+# we'll add it here.
+def add_qemu_macro_attrib(name, attrib):
+ macros[name].attribs.add(attrib)
+
+immextre = re.compile(r'f(MUST_)?IMMEXT[(]([UuSsRr])')
+def calculate_attribs():
+ add_qemu_macro_attrib('fREAD_PC', 'A_IMPLICIT_READS_PC')
+ add_qemu_macro_attrib('fTRAP', 'A_IMPLICIT_READS_PC')
+ add_qemu_macro_attrib('fWRITE_P0', 'A_WRITES_PRED_REG')
+ add_qemu_macro_attrib('fWRITE_P1', 'A_WRITES_PRED_REG')
+ add_qemu_macro_attrib('fWRITE_P2', 'A_WRITES_PRED_REG')
+ add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG')
+ add_qemu_macro_attrib('fSET_OVERFLOW', 'A_IMPLICIT_WRITES_USR')
+ add_qemu_macro_attrib('fSET_LPCFG', 'A_IMPLICIT_WRITES_USR')
+
+ # Recurse down macros, find attributes from sub-macros
+ macroValues = list(macros.values())
+ allmacros_restr = "|".join(set([ m.re.pattern for m in macroValues ]))
+ allmacros_re = re.compile(allmacros_restr)
+ for macro in macroValues:
+ expand_macro_attribs(macro,allmacros_re)
+ # Append attributes to all instructions
+ for tag in tags:
+ for macname in allmacros_re.findall(semdict[tag]):
+ if not macname: continue
+ macro = macros[macname]
+ attribdict[tag] |= set(macro.attribs)
+ # Figure out which instructions write predicate registers
+ tagregs = get_tagregs()
+ for tag in tags:
+ regs = tagregs[tag]
+ for regtype, regid, toss, numregs in regs:
+ if regtype == "P" and is_written(regid):
+ attribdict[tag].add('A_WRITES_PRED_REG')
+
+def SEMANTICS(tag, beh, sem):
+ #print tag,beh,sem
+ behdict[tag] = beh
+ semdict[tag] = sem
+ attribdict[tag] = set()
+ tags.append(tag) # dicts have no order, this is for order
+
+def ATTRIBUTES(tag, attribstring):
+ attribstring = \
+ attribstring.replace("ATTRIBS","").replace("(","").replace(")","")
+ if not attribstring:
+ return
+ attribs = attribstring.split(",")
+ for attrib in attribs:
+ attribdict[tag].add(attrib.strip())
+
+class Macro(object):
+ __slots__ = ['key','name', 'beh', 'attribs', 're']
+ def __init__(self, name, beh, attribs):
+ self.key = name
+ self.name = name
+ self.beh = beh
+ self.attribs = set(attribs)
+ self.re = re.compile("\\b" + name + "\\b")
+
+def MACROATTRIB(macname,beh,attribstring):
+ attribstring = attribstring.replace("(","").replace(")","")
+ if attribstring:
+ attribs = attribstring.split(",")
+ else:
+ attribs = []
+ macros[macname] = Macro(macname,beh,attribs)
+
+def compute_tag_regs(tag):
+ return uniquify(regre.findall(behdict[tag]))
+
+def compute_tag_immediates(tag):
+ return uniquify(immre.findall(behdict[tag]))
+
+##
+## tagregs is the main data structure we'll use
+## tagregs[tag] will contain the registers used by an instruction
+## Within each entry, we'll use the regtype and regid fields
+## regtype can be one of the following
+## C control register
+## N new register value
+## P predicate register
+## R GPR register
+## M modifier register
+## Q HVX predicate vector
+## V HVX vector register
+## O HVX new vector register
+## regid can be one of the following
+## d, e destination register
+## dd destination register pair
+## s, t, u, v, w source register
+## ss, tt, uu, vv source register pair
+## x, y read-write register
+## xx, yy read-write register pair
+##
+def get_tagregs():
+ return dict(zip(tags, list(map(compute_tag_regs, tags))))
+
+def get_tagimms():
+ return dict(zip(tags, list(map(compute_tag_immediates, tags))))
+
+def is_pair(regid):
+ return len(regid) == 2
+
+def is_single(regid):
+ return len(regid) == 1
+
+def is_written(regid):
+ return regid[0] in "dexy"
+
+def is_writeonly(regid):
+ return regid[0] in "de"
+
+def is_read(regid):
+ return regid[0] in "stuvwxy"
+
+def is_readwrite(regid):
+ return regid[0] in "xy"
+
+def is_scalar_reg(regtype):
+ return regtype in "RPC"
+
+def is_hvx_reg(regtype):
+ return regtype in "VQ"
+
+def is_old_val(regtype, regid, tag):
+ return regtype+regid+'V' in semdict[tag]
+
+def is_new_val(regtype, regid, tag):
+ return regtype+regid+'N' in semdict[tag]
+
+def need_slot(tag):
+ if ('A_CONDEXEC' in attribdict[tag] or
+ 'A_STORE' in attribdict[tag] or
+ 'A_LOAD' in attribdict[tag]):
+ return 1
+ else:
+ return 0
+
+def need_part1(tag):
+ return re.compile(r"fPART1").search(semdict[tag])
+
+def need_ea(tag):
+ return re.compile(r"\bEA\b").search(semdict[tag])
+
+def skip_qemu_helper(tag):
+ return tag in overrides.keys()
+
+def is_tmp_result(tag):
+ return ('A_CVI_TMP' in attribdict[tag] or
+ 'A_CVI_TMP_DST' in attribdict[tag])
+
+def is_new_result(tag):
+ return ('A_CVI_NEW' in attribdict[tag])
+
+def imm_name(immlett):
+ return "%siV" % immlett
+
+def read_semantics_file(name):
+ eval_line = ""
+ for line in open(name, 'rt').readlines():
+ if not line.startswith("#"):
+ eval_line += line
+ if line.endswith("\\\n"):
+ eval_line.rstrip("\\\n")
+ else:
+ eval(eval_line.strip())
+ eval_line = ""
+
+def read_attribs_file(name):
+ attribre = re.compile(r'DEF_ATTRIB\(([A-Za-z0-9_]+), ([^,]*), ' +
+ r'"([A-Za-z0-9_\.]*)", "([A-Za-z0-9_\.]*)"\)')
+ for line in open(name, 'rt').readlines():
+ if not attribre.match(line):
+ continue
+ (attrib_base,descr,rreg,wreg) = attribre.findall(line)[0]
+ attrib_base = 'A_' + attrib_base
+ attribinfo[attrib_base] = {'rreg':rreg, 'wreg':wreg, 'descr':descr}
+
+def read_overrides_file(name):
+ overridere = re.compile("#define fGEN_TCG_([A-Za-z0-9_]+)\(.*")
+ for line in open(name, 'rt').readlines():
+ if not overridere.match(line):
+ continue
+ tag = overridere.findall(line)[0]
+ overrides[tag] = True
diff --git a/target/hexagon/hex_regs.h b/target/hexagon/hex_regs.h
new file mode 100644
index 000000000..e1b3149b0
--- /dev/null
+++ b/target/hexagon/hex_regs.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_REGS_H
+#define HEXAGON_REGS_H
+
+enum {
+ HEX_REG_R00 = 0,
+ HEX_REG_R01 = 1,
+ HEX_REG_R02 = 2,
+ HEX_REG_R03 = 3,
+ HEX_REG_R04 = 4,
+ HEX_REG_R05 = 5,
+ HEX_REG_R06 = 6,
+ HEX_REG_R07 = 7,
+ HEX_REG_R08 = 8,
+ HEX_REG_R09 = 9,
+ HEX_REG_R10 = 10,
+ HEX_REG_R11 = 11,
+ HEX_REG_R12 = 12,
+ HEX_REG_R13 = 13,
+ HEX_REG_R14 = 14,
+ HEX_REG_R15 = 15,
+ HEX_REG_R16 = 16,
+ HEX_REG_R17 = 17,
+ HEX_REG_R18 = 18,
+ HEX_REG_R19 = 19,
+ HEX_REG_R20 = 20,
+ HEX_REG_R21 = 21,
+ HEX_REG_R22 = 22,
+ HEX_REG_R23 = 23,
+ HEX_REG_R24 = 24,
+ HEX_REG_R25 = 25,
+ HEX_REG_R26 = 26,
+ HEX_REG_R27 = 27,
+ HEX_REG_R28 = 28,
+ HEX_REG_R29 = 29,
+ HEX_REG_SP = 29,
+ HEX_REG_FP = 30,
+ HEX_REG_R30 = 30,
+ HEX_REG_LR = 31,
+ HEX_REG_R31 = 31,
+ HEX_REG_SA0 = 32,
+ HEX_REG_LC0 = 33,
+ HEX_REG_SA1 = 34,
+ HEX_REG_LC1 = 35,
+ HEX_REG_P3_0 = 36,
+ HEX_REG_M0 = 38,
+ HEX_REG_M1 = 39,
+ HEX_REG_USR = 40,
+ HEX_REG_PC = 41,
+ HEX_REG_UGP = 42,
+ HEX_REG_GP = 43,
+ HEX_REG_CS0 = 44,
+ HEX_REG_CS1 = 45,
+ HEX_REG_UPCYCLELO = 46,
+ HEX_REG_UPCYCLEHI = 47,
+ HEX_REG_FRAMELIMIT = 48,
+ HEX_REG_FRAMEKEY = 49,
+ HEX_REG_PKTCNTLO = 50,
+ HEX_REG_PKTCNTHI = 51,
+ /* Use reserved control registers for qemu execution counts */
+ HEX_REG_QEMU_PKT_CNT = 52,
+ HEX_REG_QEMU_INSN_CNT = 53,
+ HEX_REG_QEMU_HVX_CNT = 54,
+ HEX_REG_UTIMERLO = 62,
+ HEX_REG_UTIMERHI = 63,
+};
+
+#endif
diff --git a/target/hexagon/iclass.c b/target/hexagon/iclass.c
new file mode 100644
index 000000000..609128699
--- /dev/null
+++ b/target/hexagon/iclass.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "iclass.h"
+
+static const SlotMask iclass_info[] = {
+
+#define DEF_PP_ICLASS32(TYPE, SLOTS, UNITS) \
+ [ICLASS_FROM_TYPE(TYPE)] = SLOTS_##SLOTS,
+#define DEF_EE_ICLASS32(TYPE, SLOTS, UNITS) \
+ [ICLASS_FROM_TYPE(TYPE)] = SLOTS_##SLOTS,
+#include "imported/iclass.def"
+#undef DEF_PP_ICLASS32
+#undef DEF_EE_ICLASS32
+};
+
+SlotMask find_iclass_slots(Opcode opcode, int itype)
+{
+ /* There are some exceptions to what the iclass dictates */
+ if (GET_ATTRIB(opcode, A_ICOP)) {
+ return SLOTS_2;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT0ONLY)) {
+ return SLOTS_0;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT1ONLY)) {
+ return SLOTS_1;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT2ONLY)) {
+ return SLOTS_2;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT3ONLY)) {
+ return SLOTS_3;
+ } else if (GET_ATTRIB(opcode, A_COF) &&
+ GET_ATTRIB(opcode, A_INDIRECT) &&
+ !GET_ATTRIB(opcode, A_MEMLIKE) &&
+ !GET_ATTRIB(opcode, A_MEMLIKE_PACKET_RULES)) {
+ return SLOTS_2;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_NOSLOT1)) {
+ return SLOTS_0;
+ } else if ((opcode == J2_trap0) ||
+ (opcode == Y2_isync) ||
+ (opcode == J2_pause) || (opcode == J4_hintjumpr)) {
+ return SLOTS_2;
+ } else if (GET_ATTRIB(opcode, A_CRSLOT23)) {
+ return SLOTS_23;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_PREFERSLOT0)) {
+ return SLOTS_0;
+ } else if (GET_ATTRIB(opcode, A_SUBINSN)) {
+ return SLOTS_01;
+ } else if (GET_ATTRIB(opcode, A_CALL)) {
+ return SLOTS_23;
+ } else if ((opcode == J4_jumpseti) || (opcode == J4_jumpsetr)) {
+ return SLOTS_23;
+ } else {
+ return iclass_info[itype];
+ }
+}
diff --git a/target/hexagon/iclass.h b/target/hexagon/iclass.h
new file mode 100644
index 000000000..78d372621
--- /dev/null
+++ b/target/hexagon/iclass.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_ICLASS_H
+#define HEXAGON_ICLASS_H
+
+#include "attribs.h"
+
+#define ICLASS_FROM_TYPE(TYPE) ICLASS_##TYPE
+
+enum {
+
+#define DEF_PP_ICLASS32(TYPE, SLOTS, UNITS) ICLASS_FROM_TYPE(TYPE),
+#define DEF_EE_ICLASS32(TYPE, SLOTS, UNITS) ICLASS_FROM_TYPE(TYPE),
+#include "imported/iclass.def"
+#undef DEF_PP_ICLASS32
+#undef DEF_EE_ICLASS32
+
+ ICLASS_FROM_TYPE(COPROC_VX),
+ ICLASS_FROM_TYPE(COPROC_VMEM),
+ NUM_ICLASSES
+};
+
+typedef enum {
+ SLOTS_0 = (1 << 0),
+ SLOTS_1 = (1 << 1),
+ SLOTS_2 = (1 << 2),
+ SLOTS_3 = (1 << 3),
+ SLOTS_01 = SLOTS_0 | SLOTS_1,
+ SLOTS_23 = SLOTS_2 | SLOTS_3,
+ SLOTS_0123 = SLOTS_0 | SLOTS_1 | SLOTS_2 | SLOTS_3,
+} SlotMask;
+
+SlotMask find_iclass_slots(Opcode opcode, int itype);
+
+#endif
diff --git a/target/hexagon/imported/allext.idef b/target/hexagon/imported/allext.idef
new file mode 100644
index 000000000..9d4b23e9d
--- /dev/null
+++ b/target/hexagon/imported/allext.idef
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Top level file for all instruction set extensions
+ */
+#define EXTNAME mmvec
+#define EXTSTR "mmvec"
+#include "mmvec/ext.idef"
+#undef EXTNAME
+#undef EXTSTR
diff --git a/target/hexagon/imported/allext_macros.def b/target/hexagon/imported/allext_macros.def
new file mode 100644
index 000000000..9c9119915
--- /dev/null
+++ b/target/hexagon/imported/allext_macros.def
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Top level file for all instruction set extensions
+ */
+#define EXTNAME mmvec
+#define EXTSTR "mmvec"
+#include "mmvec/macros.def"
+#undef EXTNAME
+#undef EXTSTR
diff --git a/target/hexagon/imported/allextenc.def b/target/hexagon/imported/allextenc.def
new file mode 100644
index 000000000..39a3e93fa
--- /dev/null
+++ b/target/hexagon/imported/allextenc.def
@@ -0,0 +1,20 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define EXTNAME mmvec
+#include "mmvec/encode_ext.def"
+#undef EXTNAME
diff --git a/target/hexagon/imported/allidefs.def b/target/hexagon/imported/allidefs.def
new file mode 100644
index 000000000..ee253b83a
--- /dev/null
+++ b/target/hexagon/imported/allidefs.def
@@ -0,0 +1,31 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Top level instruction definition file
+ */
+
+#include "branch.idef"
+#include "ldst.idef"
+#include "compare.idef"
+#include "mpy.idef"
+#include "alu.idef"
+#include "float.idef"
+#include "shift.idef"
+#include "system.idef"
+#include "subinsns.idef"
+#include "allext.idef"
diff --git a/target/hexagon/imported/alu.idef b/target/hexagon/imported/alu.idef
new file mode 100644
index 000000000..58477ae40
--- /dev/null
+++ b/target/hexagon/imported/alu.idef
@@ -0,0 +1,1302 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * ALU Instructions
+ */
+
+
+/**********************************************/
+/* Add/Sub instructions */
+/**********************************************/
+
+Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(),
+"Add 32-bit registers",
+{ RdV=RsV+RtV;})
+
+Q6INSN(A2_sub,"Rd32=sub(Rt32,Rs32)",ATTRIBS(),
+"Subtract 32-bit registers",
+{ RdV=RtV-RsV;})
+
+#define COND_ALU(TAG,OPER,DESCR,SEMANTICS)\
+Q6INSN(TAG##t,"if (Pu4) "OPER,ATTRIBS(A_ARCHV2),DESCR,{if(fLSBOLD(PuV)){SEMANTICS;} else {CANCEL;}})\
+Q6INSN(TAG##f,"if (!Pu4) "OPER,ATTRIBS(A_ARCHV2),DESCR,{if(fLSBOLDNOT(PuV)){SEMANTICS;} else {CANCEL;}})\
+Q6INSN(TAG##tnew,"if (Pu4.new) " OPER,ATTRIBS(A_ARCHV2),DESCR,{if(fLSBNEW(PuN)){SEMANTICS;} else {CANCEL;}})\
+Q6INSN(TAG##fnew,"if (!Pu4.new) "OPER,ATTRIBS(A_ARCHV2),DESCR,{if(fLSBNEWNOT(PuN)){SEMANTICS;} else {CANCEL;}})
+
+COND_ALU(A2_padd,"Rd32=add(Rs32,Rt32)","Conditionally Add 32-bit registers",RdV=RsV+RtV)
+COND_ALU(A2_psub,"Rd32=sub(Rt32,Rs32)","Conditionally Subtract 32-bit registers",RdV=RtV-RsV)
+COND_ALU(A2_paddi,"Rd32=add(Rs32,#s8)","Conditionally Add Register and immediate",fIMMEXT(siV); RdV=RsV+siV)
+COND_ALU(A2_pxor,"Rd32=xor(Rs32,Rt32)","Conditionally XOR registers",RdV=RsV^RtV)
+COND_ALU(A2_pand,"Rd32=and(Rs32,Rt32)","Conditionally AND registers",RdV=RsV&RtV)
+COND_ALU(A2_por,"Rd32=or(Rs32,Rt32)","Conditionally OR registers",RdV=RsV|RtV)
+
+COND_ALU(A4_psxtb,"Rd32=sxtb(Rs32)","Conditionally sign-extend byte", RdV=fSXTN(8,32,RsV))
+COND_ALU(A4_pzxtb,"Rd32=zxtb(Rs32)","Conditionally zero-extend byte", RdV=fZXTN(8,32,RsV))
+COND_ALU(A4_psxth,"Rd32=sxth(Rs32)","Conditionally sign-extend halfword", RdV=fSXTN(16,32,RsV))
+COND_ALU(A4_pzxth,"Rd32=zxth(Rs32)","Conditionally zero-extend halfword", RdV=fZXTN(16,32,RsV))
+COND_ALU(A4_paslh,"Rd32=aslh(Rs32)","Conditionally zero-extend halfword", RdV=RsV<<16)
+COND_ALU(A4_pasrh,"Rd32=asrh(Rs32)","Conditionally zero-extend halfword", RdV=RsV>>16)
+
+
+Q6INSN(A2_addsat,"Rd32=add(Rs32,Rt32):sat",ATTRIBS(),
+"Add 32-bit registers with saturation",
+{ RdV=fSAT(fSE32_64(RsV)+fSE32_64(RtV)); })
+
+Q6INSN(A2_subsat,"Rd32=sub(Rt32,Rs32):sat",ATTRIBS(),
+"Subtract 32-bit registers with saturation",
+{ RdV=fSAT(fSE32_64(RtV) - fSE32_64(RsV)); })
+
+
+Q6INSN(A2_addi,"Rd32=add(Rs32,#s16)",ATTRIBS(),
+"Add a signed immediate to a register",
+{ fIMMEXT(siV); RdV=RsV+siV;})
+
+
+Q6INSN(C4_addipc,"Rd32=add(pc,#u6)",ATTRIBS(),
+"Add immediate to PC",
+{ RdV=fREAD_PC()+fIMMEXT(uiV);})
+
+
+
+/**********************************************/
+/* Single-precision HL forms */
+/* These insns and the SP mpy are the ones */
+/* that can do .HL stuff */
+/**********************************************/
+#define STD_HL_INSN(TAG,OPER,AOPER,ATR,SEM)\
+Q6INSN(A2_##TAG##_ll, OPER"(Rt.L32,Rs.L32)"AOPER, ATR,"",{SEM(fGETHALF(0,RtV),fGETHALF(0,RsV));})\
+Q6INSN(A2_##TAG##_lh, OPER"(Rt.L32,Rs.H32)"AOPER, ATR,"",{SEM(fGETHALF(0,RtV),fGETHALF(1,RsV));})\
+Q6INSN(A2_##TAG##_hl, OPER"(Rt.H32,Rs.L32)"AOPER, ATR,"",{SEM(fGETHALF(1,RtV),fGETHALF(0,RsV));})\
+Q6INSN(A2_##TAG##_hh, OPER"(Rt.H32,Rs.H32)"AOPER, ATR,"",{SEM(fGETHALF(1,RtV),fGETHALF(1,RsV));})
+
+#define SUBSTD_HL_INSN(TAG,OPER,AOPER,ATR,SEM)\
+Q6INSN(A2_##TAG##_ll, OPER"(Rt.L32,Rs.L32)"AOPER, ATR,"",{SEM(fGETHALF(0,RtV),fGETHALF(0,RsV));})\
+Q6INSN(A2_##TAG##_hl, OPER"(Rt.L32,Rs.H32)"AOPER, ATR,"",{SEM(fGETHALF(0,RtV),fGETHALF(1,RsV));})
+
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=fSXTN(16,32,(A+B))
+SUBSTD_HL_INSN(addh_l16,"Rd32=add","",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=fSATH(A+B)
+SUBSTD_HL_INSN(addh_l16_sat,"Rd32=add",":sat",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=fSXTN(16,32,(A-B))
+SUBSTD_HL_INSN(subh_l16,"Rd32=sub","",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=fSATH(A-B)
+SUBSTD_HL_INSN(subh_l16_sat,"Rd32=sub",":sat",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=(A+B)<<16
+STD_HL_INSN(addh_h16,"Rd32=add",":<<16",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=(fSATH(A+B))<<16
+STD_HL_INSN(addh_h16_sat,"Rd32=add",":sat:<<16",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=(A-B)<<16
+STD_HL_INSN(subh_h16,"Rd32=sub",":<<16",ATTRIBS(),HLSEM)
+
+#undef HLSEM
+#define HLSEM(A,B) RdV=(fSATH(A-B))<<16
+STD_HL_INSN(subh_h16_sat,"Rd32=sub",":sat:<<16",ATTRIBS(),HLSEM)
+
+
+
+
+Q6INSN(A2_aslh,"Rd32=aslh(Rs32)",ATTRIBS(),
+"Arithmetic Shift Left by Halfword",{ RdV=RsV<<16; })
+
+Q6INSN(A2_asrh,"Rd32=asrh(Rs32)",ATTRIBS(),
+"Arithmetic Shift Right by Halfword",{ RdV=RsV>>16; })
+
+
+/* 64-bit versions */
+
+Q6INSN(A2_addp,"Rdd32=add(Rss32,Rtt32)",ATTRIBS(),
+"Add",
+{ RddV=RssV+RttV;})
+
+Q6INSN(A2_addpsat,"Rdd32=add(Rss32,Rtt32):sat",ATTRIBS(A_ARCHV3),
+"Add",
+{ fADDSAT64(RddV,RssV,RttV);})
+
+Q6INSN(A2_addspl,"Rdd32=add(Rss32,Rtt32):raw:lo",ATTRIBS(A_ARCHV3),
+"Add",
+{ RddV=RttV+fSXTN(32,64,fGETWORD(0,RssV));})
+
+Q6INSN(A2_addsph,"Rdd32=add(Rss32,Rtt32):raw:hi",ATTRIBS(A_ARCHV3),
+"Add",
+{ RddV=RttV+fSXTN(32,64,fGETWORD(1,RssV));})
+
+Q6INSN(A2_subp,"Rdd32=sub(Rtt32,Rss32)",ATTRIBS(),
+"Sub",
+{ RddV=RttV-RssV;})
+
+/* 64-bit with carry */
+
+Q6INSN(A4_addp_c,"Rdd32=add(Rss32,Rtt32,Px4):carry",ATTRIBS(),"Add with Carry",
+{
+ RddV = RssV + RttV + fLSBOLD(PxV);
+ PxV = f8BITSOF(fCARRY_FROM_ADD(RssV,RttV,fLSBOLD(PxV)));
+})
+
+Q6INSN(A4_subp_c,"Rdd32=sub(Rss32,Rtt32,Px4):carry",ATTRIBS(),"Sub with Carry",
+{
+ RddV = RssV + ~RttV + fLSBOLD(PxV);
+ PxV = f8BITSOF(fCARRY_FROM_ADD(RssV,~RttV,fLSBOLD(PxV)));
+})
+
+
+/* NEG and ABS */
+
+Q6INSN(A2_negsat,"Rd32=neg(Rs32):sat",ATTRIBS(),
+"Arithmetic negate register", { RdV = fSAT(-fCAST8s(RsV)); })
+
+Q6INSN(A2_abs,"Rd32=abs(Rs32)",ATTRIBS(),
+"Absolute Value register", { RdV = fABS(RsV); })
+
+Q6INSN(A2_abssat,"Rd32=abs(Rs32):sat",ATTRIBS(),
+"Arithmetic negate register", { RdV = fSAT(fABS(fCAST4_8s(RsV))); })
+
+Q6INSN(A2_vconj,"Rdd32=vconj(Rss32):sat",ATTRIBS(A_ARCHV2),
+"Vector Complex conjugate of Rss",
+{ fSETHALF(1,RddV,fSATN(16,-fGETHALF(1,RssV)));
+ fSETHALF(0,RddV,fGETHALF(0,RssV));
+ fSETHALF(3,RddV,fSATN(16,-fGETHALF(3,RssV)));
+ fSETHALF(2,RddV,fGETHALF(2,RssV));
+})
+
+
+/* 64-bit versions */
+
+Q6INSN(A2_negp,"Rdd32=neg(Rss32)",ATTRIBS(),
+"Arithmetic negate register", { RddV = -RssV; })
+
+Q6INSN(A2_absp,"Rdd32=abs(Rss32)",ATTRIBS(),
+"Absolute Value register", { RddV = fABS(RssV); })
+
+
+/* MIN and MAX R */
+
+Q6INSN(A2_max,"Rd32=max(Rs32,Rt32)",ATTRIBS(),
+"Maximum of two registers",
+{ RdV = fMAX(RsV,RtV); })
+
+Q6INSN(A2_maxu,"Rd32=maxu(Rs32,Rt32)",ATTRIBS(),
+"Maximum of two registers (unsigned)",
+{ RdV = fMAX(fCAST4u(RsV),fCAST4u(RtV)); })
+
+Q6INSN(A2_min,"Rd32=min(Rt32,Rs32)",ATTRIBS(),
+"Minimum of two registers",
+{ RdV = fMIN(RtV,RsV); })
+
+Q6INSN(A2_minu,"Rd32=minu(Rt32,Rs32)",ATTRIBS(),
+"Minimum of two registers (unsigned)",
+{ RdV = fMIN(fCAST4u(RtV),fCAST4u(RsV)); })
+
+/* MIN and MAX Pairs */
+#if 1
+Q6INSN(A2_maxp,"Rdd32=max(Rss32,Rtt32)",ATTRIBS(A_ARCHV3),
+"Maximum of two register pairs",
+{ RddV = fMAX(RssV,RttV); })
+
+Q6INSN(A2_maxup,"Rdd32=maxu(Rss32,Rtt32)",ATTRIBS(A_ARCHV3),
+"Maximum of two register pairs (unsigned)",
+{ RddV = fMAX(fCAST8u(RssV),fCAST8u(RttV)); })
+
+Q6INSN(A2_minp,"Rdd32=min(Rtt32,Rss32)",ATTRIBS(A_ARCHV3),
+"Minimum of two register pairs",
+{ RddV = fMIN(RttV,RssV); })
+
+Q6INSN(A2_minup,"Rdd32=minu(Rtt32,Rss32)",ATTRIBS(A_ARCHV3),
+"Minimum of two register pairs (unsigned)",
+{ RddV = fMIN(fCAST8u(RttV),fCAST8u(RssV)); })
+#endif
+
+/**********************************************/
+/* Register and Immediate Transfers */
+/**********************************************/
+
+Q6INSN(A2_nop,"nop",ATTRIBS(A_IT_NOP),
+"Nop (32-bit encoding)",
+ fHIDE( { } ))
+
+
+Q6INSN(A4_ext,"immext(#u26:6)",ATTRIBS(A_IT_EXTENDER),
+"This instruction carries the 26 most-significant immediate bits for the next instruction",
+{ fHIDE(); })
+
+
+Q6INSN(A2_tfr,"Rd32=Rs32",ATTRIBS(),
+"tfr register",{ RdV=RsV;})
+
+Q6INSN(A2_tfrsi,"Rd32=#s16",ATTRIBS(),
+"transfer signed immediate to register",{ fIMMEXT(siV); RdV=siV;})
+
+Q6INSN(A2_sxtb,"Rd32=sxtb(Rs32)",ATTRIBS(),
+"Sign extend byte", {RdV = fSXTN(8,32,RsV);})
+
+Q6INSN(A2_zxth,"Rd32=zxth(Rs32)",ATTRIBS(),
+"Zero extend half", {RdV = fZXTN(16,32,RsV);})
+
+Q6INSN(A2_sxth,"Rd32=sxth(Rs32)",ATTRIBS(),
+"Sign extend half", {RdV = fSXTN(16,32,RsV);})
+
+Q6INSN(A2_combinew,"Rdd32=combine(Rs32,Rt32)",ATTRIBS(),
+"Combine two words into a register pair",
+{ fSETWORD(0,RddV,RtV);
+ fSETWORD(1,RddV,RsV);
+})
+
+Q6INSN(A4_combineri,"Rdd32=combine(Rs32,#s8)",ATTRIBS(),
+"Combine a word and an immediate into a register pair",
+{ fIMMEXT(siV); fSETWORD(0,RddV,siV);
+ fSETWORD(1,RddV,RsV);
+})
+
+Q6INSN(A4_combineir,"Rdd32=combine(#s8,Rs32)",ATTRIBS(),
+"Combine a word and an immediate into a register pair",
+{ fIMMEXT(siV); fSETWORD(0,RddV,RsV);
+ fSETWORD(1,RddV,siV);
+})
+
+
+
+Q6INSN(A2_combineii,"Rdd32=combine(#s8,#S8)",ATTRIBS(A_ARCHV2),
+"Set two small immediates",
+{ fIMMEXT(siV); fSETWORD(0,RddV,SiV); fSETWORD(1,RddV,siV); })
+
+Q6INSN(A4_combineii,"Rdd32=combine(#s8,#U6)",ATTRIBS(),"Set two small immediates",
+{ fIMMEXT(UiV); fSETWORD(0,RddV,UiV); fSETWORD(1,RddV,siV); })
+
+
+Q6INSN(A2_combine_hh,"Rd32=combine(Rt.H32,Rs.H32)",ATTRIBS(),
+"Combine two halfs into a register", {RdV = (fGETUHALF(1,RtV)<<16) | fGETUHALF(1,RsV);})
+
+Q6INSN(A2_combine_hl,"Rd32=combine(Rt.H32,Rs.L32)",ATTRIBS(),
+"Combine two halfs into a register", {RdV = (fGETUHALF(1,RtV)<<16) | fGETUHALF(0,RsV);})
+
+Q6INSN(A2_combine_lh,"Rd32=combine(Rt.L32,Rs.H32)",ATTRIBS(),
+"Combine two halfs into a register", {RdV = (fGETUHALF(0,RtV)<<16) | fGETUHALF(1,RsV);})
+
+Q6INSN(A2_combine_ll,"Rd32=combine(Rt.L32,Rs.L32)",ATTRIBS(),
+"Combine two halfs into a register", {RdV = (fGETUHALF(0,RtV)<<16) | fGETUHALF(0,RsV);})
+
+Q6INSN(A2_tfril,"Rx.L32=#u16",ATTRIBS(),
+"Set low 16-bits, leave upper 16 unchanged",{ fSETHALF(0,RxV,uiV);})
+
+Q6INSN(A2_tfrih,"Rx.H32=#u16",ATTRIBS(),
+"Set high 16-bits, leave low 16 unchanged",{ fSETHALF(1,RxV,uiV);})
+
+Q6INSN(A2_tfrcrr,"Rd32=Cs32",ATTRIBS(),
+"transfer control register to general register",{ RdV=CsV;})
+
+Q6INSN(A2_tfrrcr,"Cd32=Rs32",ATTRIBS(),
+"transfer general register to control register",{ CdV=RsV;})
+
+Q6INSN(A4_tfrcpp,"Rdd32=Css32",ATTRIBS(),
+"transfer control register to general register",{ RddV=CssV;})
+
+Q6INSN(A4_tfrpcp,"Cdd32=Rss32",ATTRIBS(),
+"transfer general register to control register",{ CddV=RssV;})
+
+
+/**********************************************/
+/* Logicals */
+/**********************************************/
+
+Q6INSN(A2_and,"Rd32=and(Rs32,Rt32)",ATTRIBS(),
+"logical AND",{ RdV=RsV&RtV;})
+
+Q6INSN(A2_or,"Rd32=or(Rs32,Rt32)",ATTRIBS(),
+"logical OR",{ RdV=RsV|RtV;})
+
+Q6INSN(A2_xor,"Rd32=xor(Rs32,Rt32)",ATTRIBS(),
+"logical XOR",{ RdV=RsV^RtV;})
+
+Q6INSN(M2_xor_xacc,"Rx32^=xor(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"logical XOR with XOR accumulation",{ RxV^=RsV^RtV;})
+
+Q6INSN(M4_xor_xacc,"Rxx32^=xor(Rss32,Rtt32)",,
+"logical XOR with XOR accumulation",{ RxxV^=RssV^RttV;})
+
+
+
+Q6INSN(A4_andn,"Rd32=and(Rt32,~Rs32)",,
+"And-Not", { RdV = (RtV & ~RsV); })
+
+Q6INSN(A4_orn,"Rd32=or(Rt32,~Rs32)",,
+"Or-Not", { RdV = (RtV | ~RsV); })
+
+
+Q6INSN(A4_andnp,"Rdd32=and(Rtt32,~Rss32)",,
+"And-Not", { RddV = (RttV & ~RssV); })
+
+Q6INSN(A4_ornp,"Rdd32=or(Rtt32,~Rss32)",,
+"Or-Not", { RddV = (RttV | ~RssV); })
+
+
+
+
+/********************/
+/* Compound add-add */
+/********************/
+
+Q6INSN(S4_addaddi,"Rd32=add(Rs32,add(Ru32,#s6))",ATTRIBS(),
+ "3-input add",
+ { RdV = RsV + RuV + fIMMEXT(siV); })
+
+
+Q6INSN(S4_subaddi,"Rd32=add(Rs32,sub(#s6,Ru32))",ATTRIBS(),
+ "3-input sub",
+ { RdV = RsV - RuV + fIMMEXT(siV); })
+
+
+
+/****************************/
+/* Compound logical-logical */
+/****************************/
+
+Q6INSN(M4_and_and,"Rx32&=and(Rs32,Rt32)",ATTRIBS(),
+"Compound And-And", { RxV &= (RsV & RtV); })
+
+Q6INSN(M4_and_andn,"Rx32&=and(Rs32,~Rt32)",ATTRIBS(),
+"Compound And-Andn", { RxV &= (RsV & ~RtV); })
+
+Q6INSN(M4_and_or,"Rx32&=or(Rs32,Rt32)",ATTRIBS(),
+"Compound And-Or", { RxV &= (RsV | RtV); })
+
+Q6INSN(M4_and_xor,"Rx32&=xor(Rs32,Rt32)",ATTRIBS(),
+"Compound And-xor", { RxV &= (RsV ^ RtV); })
+
+
+
+Q6INSN(M4_or_and,"Rx32|=and(Rs32,Rt32)",ATTRIBS(),
+"Compound Or-And", { RxV |= (RsV & RtV); })
+
+Q6INSN(M4_or_andn,"Rx32|=and(Rs32,~Rt32)",ATTRIBS(),
+"Compound Or-AndN", { RxV |= (RsV & ~RtV); })
+
+Q6INSN(M4_or_or,"Rx32|=or(Rs32,Rt32)",ATTRIBS(),
+"Compound Or-Or", { RxV |= (RsV | RtV); })
+
+Q6INSN(M4_or_xor,"Rx32|=xor(Rs32,Rt32)",ATTRIBS(),
+"Compound Or-xor", { RxV |= (RsV ^ RtV); })
+
+
+Q6INSN(S4_or_andix,"Rx32=or(Ru32,and(Rx32,#s10))",ATTRIBS(),
+"Compound Or-And", { RxV = RuV | (RxV & fIMMEXT(siV)); })
+
+Q6INSN(S4_or_andi,"Rx32|=and(Rs32,#s10)",ATTRIBS(),
+"Compound Or-And", { RxV = RxV | (RsV & fIMMEXT(siV)); })
+
+Q6INSN(S4_or_ori,"Rx32|=or(Rs32,#s10)",ATTRIBS(),
+"Compound Or-And", { RxV = RxV | (RsV | fIMMEXT(siV)); })
+
+
+
+
+Q6INSN(M4_xor_and,"Rx32^=and(Rs32,Rt32)",ATTRIBS(),
+"Compound Xor-And", { RxV ^= (RsV & RtV); })
+
+Q6INSN(M4_xor_or,"Rx32^=or(Rs32,Rt32)",ATTRIBS(),
+"Compound Xor-Or", { RxV ^= (RsV | RtV); })
+
+Q6INSN(M4_xor_andn,"Rx32^=and(Rs32,~Rt32)",ATTRIBS(),
+"Compound Xor-And", { RxV ^= (RsV & ~RtV); })
+
+
+
+
+
+
+Q6INSN(A2_subri,"Rd32=sub(#s10,Rs32)",ATTRIBS(A_ARCHV2),
+"Subtract register from immediate",{ fIMMEXT(siV); RdV=siV-RsV;})
+
+Q6INSN(A2_andir,"Rd32=and(Rs32,#s10)",ATTRIBS(A_ARCHV2),
+"logical AND with immediate",{ fIMMEXT(siV); RdV=RsV&siV;})
+
+Q6INSN(A2_orir,"Rd32=or(Rs32,#s10)",ATTRIBS(A_ARCHV2),
+"logical OR with immediate",{ fIMMEXT(siV); RdV=RsV|siV;})
+
+
+
+
+Q6INSN(A2_andp,"Rdd32=and(Rss32,Rtt32)",ATTRIBS(),
+"logical AND pair",{ RddV=RssV&RttV;})
+
+Q6INSN(A2_orp,"Rdd32=or(Rss32,Rtt32)",ATTRIBS(),
+"logical OR pair",{ RddV=RssV|RttV;})
+
+Q6INSN(A2_xorp,"Rdd32=xor(Rss32,Rtt32)",ATTRIBS(),
+"logical eXclusive OR pair",{ RddV=RssV^RttV;})
+
+Q6INSN(A2_notp,"Rdd32=not(Rss32)",ATTRIBS(),
+"logical NOT pair",{ RddV=~RssV;})
+
+Q6INSN(A2_sxtw,"Rdd32=sxtw(Rs32)",ATTRIBS(),
+"Sign extend 32-bit word to 64-bit pair",
+{ RddV = fCAST4_8s(RsV); })
+
+Q6INSN(A2_sat,"Rd32=sat(Rss32)",ATTRIBS(),
+"Saturate to 32-bit Signed",
+{ RdV = fSAT(RssV); })
+
+Q6INSN(A2_roundsat,"Rd32=round(Rss32):sat",ATTRIBS(),
+"Round & Saturate to 32-bit Signed",
+{ fHIDE(size8s_t tmp;) fADDSAT64(tmp,RssV,0x080000000ULL); RdV = fGETWORD(1,tmp); })
+
+Q6INSN(A2_sath,"Rd32=sath(Rs32)",ATTRIBS(),
+"Saturate to 16-bit Signed",
+{ RdV = fSATH(RsV); })
+
+Q6INSN(A2_satuh,"Rd32=satuh(Rs32)",ATTRIBS(),
+"Saturate to 16-bit Unsigned",
+{ RdV = fSATUH(RsV); })
+
+Q6INSN(A2_satub,"Rd32=satub(Rs32)",ATTRIBS(),
+"Saturate to 8-bit Unsigned",
+{ RdV = fSATUB(RsV); })
+
+Q6INSN(A2_satb,"Rd32=satb(Rs32)",ATTRIBS(A_ARCHV2),
+"Saturate to 8-bit Signed",
+{ RdV = fSATB(RsV); })
+
+/**********************************************/
+/* Vector Add */
+/**********************************************/
+
+Q6INSN(A2_vaddub,"Rdd32=vaddub(Rss32,Rtt32)",ATTRIBS(),
+"Add vector of bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,(fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV)));
+ }
+})
+
+Q6INSN(A2_vaddubs,"Rdd32=vaddub(Rss32,Rtt32):sat",ATTRIBS(),
+"Add vector of bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,fSATUN(8,fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV)));
+ }
+})
+
+Q6INSN(A2_vaddh,"Rdd32=vaddh(Rss32,Rtt32)",ATTRIBS(),
+"Add vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fGETHALF(i,RssV)+fGETHALF(i,RttV));
+ }
+})
+
+Q6INSN(A2_vaddhs,"Rdd32=vaddh(Rss32,Rtt32):sat",ATTRIBS(),
+"Add vector of half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATN(16,fGETHALF(i,RssV)+fGETHALF(i,RttV)));
+ }
+})
+
+Q6INSN(A2_vadduhs,"Rdd32=vadduh(Rss32,Rtt32):sat",ATTRIBS(),
+"Add vector of unsigned half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATUN(16,fGETUHALF(i,RssV)+fGETUHALF(i,RttV)));
+ }
+})
+
+Q6INSN(A5_vaddhubs,"Rd32=vaddhub(Rss32,Rtt32):sat",ATTRIBS(),
+"Add vector of half integers with saturation and pack to unsigned bytes",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV,fSATUB(fGETHALF(i,RssV)+fGETHALF(i,RttV)));
+ }
+})
+
+Q6INSN(A2_vaddw,"Rdd32=vaddw(Rss32,Rtt32)",ATTRIBS(),
+"Add vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fGETWORD(i,RssV)+fGETWORD(i,RttV));
+ }
+})
+
+Q6INSN(A2_vaddws,"Rdd32=vaddw(Rss32,Rtt32):sat",ATTRIBS(),
+"Add vector of words with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSATN(32,fGETWORD(i,RssV)+fGETWORD(i,RttV)));
+ }
+})
+
+
+
+Q6INSN(S4_vxaddsubw,"Rdd32=vxaddsubw(Rss32,Rtt32):sat",ATTRIBS(),
+"Cross vector add-sub words with saturation",
+{
+ fSETWORD(0,RddV,fSAT(fGETWORD(0,RssV)+fGETWORD(1,RttV)));
+ fSETWORD(1,RddV,fSAT(fGETWORD(1,RssV)-fGETWORD(0,RttV)));
+})
+Q6INSN(S4_vxsubaddw,"Rdd32=vxsubaddw(Rss32,Rtt32):sat",ATTRIBS(),
+"Cross vector sub-add words with saturation",
+{
+ fSETWORD(0,RddV,fSAT(fGETWORD(0,RssV)-fGETWORD(1,RttV)));
+ fSETWORD(1,RddV,fSAT(fGETWORD(1,RssV)+fGETWORD(0,RttV)));
+})
+
+
+
+Q6INSN(S4_vxaddsubh,"Rdd32=vxaddsubh(Rss32,Rtt32):sat",ATTRIBS(),
+"Cross vector add-sub halfwords with saturation",
+{
+ fSETHALF(0,RddV,fSATH(fGETHALF(0,RssV)+fGETHALF(1,RttV)));
+ fSETHALF(1,RddV,fSATH(fGETHALF(1,RssV)-fGETHALF(0,RttV)));
+
+ fSETHALF(2,RddV,fSATH(fGETHALF(2,RssV)+fGETHALF(3,RttV)));
+ fSETHALF(3,RddV,fSATH(fGETHALF(3,RssV)-fGETHALF(2,RttV)));
+
+})
+Q6INSN(S4_vxsubaddh,"Rdd32=vxsubaddh(Rss32,Rtt32):sat",ATTRIBS(),
+"Cross vector sub-add halfwords with saturation",
+{
+ fSETHALF(0,RddV,fSATH(fGETHALF(0,RssV)-fGETHALF(1,RttV)));
+ fSETHALF(1,RddV,fSATH(fGETHALF(1,RssV)+fGETHALF(0,RttV)));
+
+ fSETHALF(2,RddV,fSATH(fGETHALF(2,RssV)-fGETHALF(3,RttV)));
+ fSETHALF(3,RddV,fSATH(fGETHALF(3,RssV)+fGETHALF(2,RttV)));
+})
+
+
+
+
+Q6INSN(S4_vxaddsubhr,"Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat",ATTRIBS(),
+"Cross vector add-sub halfwords with shift, round, and saturation",
+{
+ fSETHALF(0,RddV,fSATH((fGETHALF(0,RssV)+fGETHALF(1,RttV)+1)>>1));
+ fSETHALF(1,RddV,fSATH((fGETHALF(1,RssV)-fGETHALF(0,RttV)+1)>>1));
+
+ fSETHALF(2,RddV,fSATH((fGETHALF(2,RssV)+fGETHALF(3,RttV)+1)>>1));
+ fSETHALF(3,RddV,fSATH((fGETHALF(3,RssV)-fGETHALF(2,RttV)+1)>>1));
+
+})
+Q6INSN(S4_vxsubaddhr,"Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat",ATTRIBS(),
+"Cross vector sub-add halfwords with shift, round, and saturation",
+{
+ fSETHALF(0,RddV,fSATH((fGETHALF(0,RssV)-fGETHALF(1,RttV)+1)>>1));
+ fSETHALF(1,RddV,fSATH((fGETHALF(1,RssV)+fGETHALF(0,RttV)+1)>>1));
+
+ fSETHALF(2,RddV,fSATH((fGETHALF(2,RssV)-fGETHALF(3,RttV)+1)>>1));
+ fSETHALF(3,RddV,fSATH((fGETHALF(3,RssV)+fGETHALF(2,RttV)+1)>>1));
+})
+
+
+
+
+
+/**********************************************/
+/* 1/2 Vector operations */
+/**********************************************/
+
+
+Q6INSN(A2_svavgh,"Rd32=vavgh(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Avg vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,((fGETHALF(i,RsV)+fGETHALF(i,RtV))>>1));
+ }
+})
+
+Q6INSN(A2_svavghs,"Rd32=vavgh(Rs32,Rt32):rnd",ATTRIBS(A_ARCHV2),
+"Avg vector of half integers with rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,((fGETHALF(i,RsV)+fGETHALF(i,RtV)+1)>>1));
+ }
+})
+
+
+
+Q6INSN(A2_svnavgh,"Rd32=vnavgh(Rt32,Rs32)",ATTRIBS(A_ARCHV2),
+"Avg vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,((fGETHALF(i,RtV)-fGETHALF(i,RsV))>>1));
+ }
+})
+
+
+Q6INSN(A2_svaddh,"Rd32=vaddh(Rs32,Rt32)",ATTRIBS(),
+"Add vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETHALF(i,RsV)+fGETHALF(i,RtV));
+ }
+})
+
+Q6INSN(A2_svaddhs,"Rd32=vaddh(Rs32,Rt32):sat",ATTRIBS(),
+"Add vector of half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fSATN(16,fGETHALF(i,RsV)+fGETHALF(i,RtV)));
+ }
+})
+
+Q6INSN(A2_svadduhs,"Rd32=vadduh(Rs32,Rt32):sat",ATTRIBS(),
+"Add vector of unsigned half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fSATUN(16,fGETUHALF(i,RsV)+fGETUHALF(i,RtV)));
+ }
+})
+
+
+Q6INSN(A2_svsubh,"Rd32=vsubh(Rt32,Rs32)",ATTRIBS(),
+"Sub vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETHALF(i,RtV)-fGETHALF(i,RsV));
+ }
+})
+
+Q6INSN(A2_svsubhs,"Rd32=vsubh(Rt32,Rs32):sat",ATTRIBS(),
+"Sub vector of half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fSATN(16,fGETHALF(i,RtV)-fGETHALF(i,RsV)));
+ }
+})
+
+Q6INSN(A2_svsubuhs,"Rd32=vsubuh(Rt32,Rs32):sat",ATTRIBS(),
+"Sub vector of unsigned half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fSATUN(16,fGETUHALF(i,RtV)-fGETUHALF(i,RsV)));
+ }
+})
+
+
+
+
+/**********************************************/
+/* Vector Reduce Add */
+/**********************************************/
+
+Q6INSN(A2_vraddub,"Rdd32=vraddub(Rss32,Rtt32)",ATTRIBS(),
+"Sum: two vectors of unsigned bytes",
+{
+ fHIDE(int i;)
+ RddV = 0;
+ for (i=0;i<4;i++) {
+ fSETWORD(0,RddV,(fGETWORD(0,RddV) + (fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV))));
+ }
+ for (i=4;i<8;i++) {
+ fSETWORD(1,RddV,(fGETWORD(1,RddV) + (fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV))));
+ }
+})
+
+Q6INSN(A2_vraddub_acc,"Rxx32+=vraddub(Rss32,Rtt32)",ATTRIBS(),
+"Sum: two vectors of unsigned bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETWORD(0,RxxV,(fGETWORD(0,RxxV) + (fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV))));
+ }
+ for (i = 4; i < 8; i++) {
+ fSETWORD(1,RxxV,(fGETWORD(1,RxxV) + (fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV))));
+ }
+})
+
+
+
+Q6INSN(M2_vraddh,"Rd32=vraddh(Rss32,Rtt32)",ATTRIBS(A_ARCHV3),
+"Sum: two vectors of halves",
+{
+ fHIDE(int i;)
+ RdV = 0;
+ for (i=0;i<4;i++) {
+ RdV += (fGETHALF(i,RssV)+fGETHALF(i,RttV));
+ }
+})
+
+Q6INSN(M2_vradduh,"Rd32=vradduh(Rss32,Rtt32)",ATTRIBS(A_ARCHV3),
+"Sum: two vectors of unsigned halves",
+{
+ fHIDE(int i;)
+ RdV = 0;
+ for (i=0;i<4;i++) {
+ RdV += (fGETUHALF(i,RssV)+fGETUHALF(i,RttV));
+ }
+})
+
+/**********************************************/
+/* Vector Sub */
+/**********************************************/
+
+Q6INSN(A2_vsubub,"Rdd32=vsubub(Rtt32,Rss32)",ATTRIBS(),
+"Sub vector of bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,(fGETUBYTE(i,RttV)-fGETUBYTE(i,RssV)));
+ }
+})
+
+Q6INSN(A2_vsububs,"Rdd32=vsubub(Rtt32,Rss32):sat",ATTRIBS(),
+"Sub vector of bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,fSATUN(8,fGETUBYTE(i,RttV)-fGETUBYTE(i,RssV)));
+ }
+})
+
+Q6INSN(A2_vsubh,"Rdd32=vsubh(Rtt32,Rss32)",ATTRIBS(),
+"Sub vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fGETHALF(i,RttV)-fGETHALF(i,RssV));
+ }
+})
+
+Q6INSN(A2_vsubhs,"Rdd32=vsubh(Rtt32,Rss32):sat",ATTRIBS(),
+"Sub vector of half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATN(16,fGETHALF(i,RttV)-fGETHALF(i,RssV)));
+ }
+})
+
+Q6INSN(A2_vsubuhs,"Rdd32=vsubuh(Rtt32,Rss32):sat",ATTRIBS(),
+"Sub vector of unsigned half integers with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATUN(16,fGETUHALF(i,RttV)-fGETUHALF(i,RssV)));
+ }
+})
+
+Q6INSN(A2_vsubw,"Rdd32=vsubw(Rtt32,Rss32)",ATTRIBS(),
+"Sub vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fGETWORD(i,RttV)-fGETWORD(i,RssV));
+ }
+})
+
+Q6INSN(A2_vsubws,"Rdd32=vsubw(Rtt32,Rss32):sat",ATTRIBS(),
+"Sub vector of words with saturation",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSATN(32,fGETWORD(i,RttV)-fGETWORD(i,RssV)));
+ }
+})
+
+
+
+
+/**********************************************/
+/* Vector Abs */
+/**********************************************/
+
+Q6INSN(A2_vabsh,"Rdd32=vabsh(Rss32)",ATTRIBS(),
+"Negate vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fABS(fGETHALF(i,RssV)));
+ }
+})
+
+Q6INSN(A2_vabshsat,"Rdd32=vabsh(Rss32):sat",ATTRIBS(),
+"Negate vector of half integers",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATH(fABS(fGETHALF(i,RssV))));
+ }
+})
+
+Q6INSN(A2_vabsw,"Rdd32=vabsw(Rss32)",ATTRIBS(),
+"Absolute Value vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fABS(fGETWORD(i,RssV)));
+ }
+})
+
+Q6INSN(A2_vabswsat,"Rdd32=vabsw(Rss32):sat",ATTRIBS(),
+"Absolute Value vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSAT(fABS(fGETWORD(i,RssV))));
+ }
+})
+
+/**********************************************/
+/* Vector SAD */
+/**********************************************/
+
+
+Q6INSN(M2_vabsdiffw,"Rdd32=vabsdiffw(Rtt32,Rss32)",ATTRIBS(A_ARCHV2),
+"Absolute Differences: vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fABS(fGETWORD(i,RttV) - fGETWORD(i,RssV)));
+ }
+})
+
+Q6INSN(M2_vabsdiffh,"Rdd32=vabsdiffh(Rtt32,Rss32)",ATTRIBS(A_ARCHV2),
+"Absolute Differences: vector of halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fABS(fGETHALF(i,RttV) - fGETHALF(i,RssV)));
+ }
+})
+
+Q6INSN(M6_vabsdiffb,"Rdd32=vabsdiffb(Rtt32,Rss32)",ATTRIBS(),
+"Absolute Differences: vector of halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<8;i++) {
+ fSETBYTE(i,RddV,fABS(fGETBYTE(i,RttV) - fGETBYTE(i,RssV)));
+ }
+})
+
+Q6INSN(M6_vabsdiffub,"Rdd32=vabsdiffub(Rtt32,Rss32)",ATTRIBS(),
+"Absolute Differences: vector of halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<8;i++) {
+ fSETBYTE(i,RddV,fABS(fGETUBYTE(i,RttV) - fGETUBYTE(i,RssV)));
+ }
+})
+
+
+
+Q6INSN(A2_vrsadub,"Rdd32=vrsadub(Rss32,Rtt32)",ATTRIBS(),
+"Sum of Absolute Differences: vector of unsigned bytes",
+{
+ fHIDE(int i;)
+ RddV = 0;
+ for (i = 0; i < 4; i++) {
+ fSETWORD(0,RddV,(fGETWORD(0,RddV) + fABS((fGETUBYTE(i,RssV) - fGETUBYTE(i,RttV)))));
+ }
+ for (i = 4; i < 8; i++) {
+ fSETWORD(1,RddV,(fGETWORD(1,RddV) + fABS((fGETUBYTE(i,RssV) - fGETUBYTE(i,RttV)))));
+ }
+})
+
+Q6INSN(A2_vrsadub_acc,"Rxx32+=vrsadub(Rss32,Rtt32)",ATTRIBS(),
+"Sum of Absolute Differences: vector of unsigned bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETWORD(0,RxxV,(fGETWORD(0,RxxV) + fABS((fGETUBYTE(i,RssV) - fGETUBYTE(i,RttV)))));
+ }
+ for (i = 4; i < 8; i++) {
+ fSETWORD(1,RxxV,(fGETWORD(1,RxxV) + fABS((fGETUBYTE(i,RssV) - fGETUBYTE(i,RttV)))));
+ }
+})
+
+
+/**********************************************/
+/* Vector Average */
+/**********************************************/
+
+Q6INSN(A2_vavgub,"Rdd32=vavgub(Rss32,Rtt32)",ATTRIBS(),
+"Average vector of unsigned bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,((fGETUBYTE(i,RssV) + fGETUBYTE(i,RttV))>>1));
+ }
+})
+
+Q6INSN(A2_vavguh,"Rdd32=vavguh(Rss32,Rtt32)",ATTRIBS(),
+"Average vector of unsigned halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,(fGETUHALF(i,RssV)+fGETUHALF(i,RttV))>>1);
+ }
+})
+
+Q6INSN(A2_vavgh,"Rdd32=vavgh(Rss32,Rtt32)",ATTRIBS(),
+"Average vector of halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,(fGETHALF(i,RssV)+fGETHALF(i,RttV))>>1);
+ }
+})
+
+Q6INSN(A2_vnavgh,"Rdd32=vnavgh(Rtt32,Rss32)",ATTRIBS(),
+"Negative Average vector of halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,(fGETHALF(i,RttV)-fGETHALF(i,RssV))>>1);
+ }
+})
+
+Q6INSN(A2_vavgw,"Rdd32=vavgw(Rss32,Rtt32)",ATTRIBS(),
+"Average vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fSXTN(32,33,fGETWORD(i,RssV))+fSXTN(32,33,fGETWORD(i,RttV)))>>1);
+ }
+})
+
+Q6INSN(A2_vnavgw,"Rdd32=vnavgw(Rtt32,Rss32)",ATTRIBS(A_ARCHV2),
+"Average vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fSXTN(32,33,fGETWORD(i,RttV))-fSXTN(32,33,fGETWORD(i,RssV)))>>1);
+ }
+})
+
+Q6INSN(A2_vavgwr,"Rdd32=vavgw(Rss32,Rtt32):rnd",ATTRIBS(),
+"Average vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fSXTN(32,33,fGETWORD(i,RssV))+fSXTN(32,33,fGETWORD(i,RttV))+1)>>1);
+ }
+})
+
+Q6INSN(A2_vnavgwr,"Rdd32=vnavgw(Rtt32,Rss32):rnd:sat",ATTRIBS(A_ARCHV2),
+"Average vector of words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSAT((fSXTN(32,33,fGETWORD(i,RttV))-fSXTN(32,33,fGETWORD(i,RssV))+1)>>1));
+ }
+})
+
+Q6INSN(A2_vavgwcr,"Rdd32=vavgw(Rss32,Rtt32):crnd",ATTRIBS(A_ARCHV2),
+"Average vector of words with convergent rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fCRND(fSXTN(32,33,fGETWORD(i,RssV))+fSXTN(32,33,fGETWORD(i,RttV)))>>1));
+ }
+})
+
+Q6INSN(A2_vnavgwcr,"Rdd32=vnavgw(Rtt32,Rss32):crnd:sat",ATTRIBS(A_ARCHV2),
+"Average negative vector of words with convergent rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSAT(fCRND(fSXTN(32,33,fGETWORD(i,RttV))-fSXTN(32,33,fGETWORD(i,RssV)))>>1));
+ }
+})
+
+Q6INSN(A2_vavghcr,"Rdd32=vavgh(Rss32,Rtt32):crnd",ATTRIBS(A_ARCHV2),
+"Average vector of halfwords with conv rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fCRND(fGETHALF(i,RssV)+fGETHALF(i,RttV))>>1);
+ }
+})
+
+Q6INSN(A2_vnavghcr,"Rdd32=vnavgh(Rtt32,Rss32):crnd:sat",ATTRIBS(A_ARCHV2),
+"Average negative vector of halfwords with conv rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATH(fCRND(fGETHALF(i,RttV)-fGETHALF(i,RssV))>>1));
+ }
+})
+
+
+Q6INSN(A2_vavguw,"Rdd32=vavguw(Rss32,Rtt32)",ATTRIBS(),
+"Average vector of unsigned words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fZXTN(32,33,fGETUWORD(i,RssV))+fZXTN(32,33,fGETUWORD(i,RttV)))>>1);
+ }
+})
+
+Q6INSN(A2_vavguwr,"Rdd32=vavguw(Rss32,Rtt32):rnd",ATTRIBS(),
+"Average vector of unsigned words",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fZXTN(32,33,fGETUWORD(i,RssV))+fZXTN(32,33,fGETUWORD(i,RttV))+1)>>1);
+ }
+})
+
+Q6INSN(A2_vavgubr,"Rdd32=vavgub(Rss32,Rtt32):rnd",ATTRIBS(),
+"Average vector of unsigned bytes",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,((fGETUBYTE(i,RssV)+fGETUBYTE(i,RttV)+1)>>1));
+ }
+})
+
+Q6INSN(A2_vavguhr,"Rdd32=vavguh(Rss32,Rtt32):rnd",ATTRIBS(),
+"Average vector of unsigned halfwords with rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,(fGETUHALF(i,RssV)+fGETUHALF(i,RttV)+1)>>1);
+ }
+})
+
+Q6INSN(A2_vavghr,"Rdd32=vavgh(Rss32,Rtt32):rnd",ATTRIBS(),
+"Average vector of halfwords with rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,(fGETHALF(i,RssV)+fGETHALF(i,RttV)+1)>>1);
+ }
+})
+
+Q6INSN(A2_vnavghr,"Rdd32=vnavgh(Rtt32,Rss32):rnd:sat",ATTRIBS(A_ARCHV2),
+"Negative Average vector of halfwords with rounding",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATH((fGETHALF(i,RttV)-fGETHALF(i,RssV)+1)>>1));
+ }
+})
+
+
+/* Rounding Instruction */
+
+Q6INSN(A4_round_ri,"Rd32=round(Rs32,#u5)",ATTRIBS(),"Round", {RdV = fRNDN(RsV,uiV)>>uiV; })
+Q6INSN(A4_round_rr,"Rd32=round(Rs32,Rt32)",ATTRIBS(),"Round", {RdV = fRNDN(RsV,fZXTN(5,32,RtV))>>fZXTN(5,32,RtV); })
+Q6INSN(A4_round_ri_sat,"Rd32=round(Rs32,#u5):sat",ATTRIBS(),"Round", {RdV = (fSAT(fRNDN(RsV,uiV)))>>uiV; })
+Q6INSN(A4_round_rr_sat,"Rd32=round(Rs32,Rt32):sat",ATTRIBS(),"Round", {RdV = (fSAT(fRNDN(RsV,fZXTN(5,32,RtV))))>>fZXTN(5,32,RtV); })
+
+
+Q6INSN(A4_cround_ri,"Rd32=cround(Rs32,#u5)",ATTRIBS(),"Convergent Round", {RdV = fCRNDN(RsV,uiV); })
+Q6INSN(A4_cround_rr,"Rd32=cround(Rs32,Rt32)",ATTRIBS(),"Convergent Round", {RdV = fCRNDN(RsV,fZXTN(5,32,RtV)); })
+
+
+#define CROUND(DST,SRC,SHIFT) \
+ fHIDE(size16s_t rndbit_128;)\
+ fHIDE(size16s_t tmp128;)\
+ fHIDE(size16s_t src_128;)\
+ if (SHIFT == 0) { \
+ DST = SRC;\
+ } else if ((SRC & (size8s_t)((1LL << (SHIFT - 1)) - 1LL)) == 0) { \
+ src_128 = fCAST8S_16S(SRC);\
+ rndbit_128 = fCAST8S_16S(1LL);\
+ rndbit_128 = fSHIFTL128(rndbit_128, SHIFT);\
+ rndbit_128 = fAND128(rndbit_128, src_128);\
+ rndbit_128 = fSHIFTR128(rndbit_128, 1);\
+ tmp128 = fADD128(src_128, rndbit_128);\
+ tmp128 = fSHIFTR128(tmp128, SHIFT);\
+ DST = fCAST16S_8S(tmp128);\
+ } else {\
+ size16s_t rndbit_128 = fCAST8S_16S((1LL << (SHIFT - 1))); \
+ size16s_t src_128 = fCAST8S_16S(SRC); \
+ size16s_t tmp128 = fADD128(src_128, rndbit_128);\
+ tmp128 = fSHIFTR128(tmp128, SHIFT);\
+ DST = fCAST16S_8S(tmp128);\
+ }
+
+Q6INSN(A7_croundd_ri,"Rdd32=cround(Rss32,#u6)",ATTRIBS(),"Convergent Round",
+{
+CROUND(RddV,RssV,uiV);
+})
+
+Q6INSN(A7_croundd_rr,"Rdd32=cround(Rss32,Rt32)",ATTRIBS(),"Convergent Round",
+{
+CROUND(RddV,RssV,fZXTN(6,32,RtV));
+})
+
+
+
+
+
+
+
+
+
+Q6INSN(A7_clip,"Rd32=clip(Rs32,#u5)",ATTRIBS(),"Clip to #s5", { fCLIP(RdV,RsV,uiV);})
+Q6INSN(A7_vclip,"Rdd32=vclip(Rss32,#u5)",ATTRIBS(),"Clip to #s5",
+{
+fHIDE(size4s_t tmp;)
+fCLIP(tmp, fGETWORD(0, RssV), uiV);
+fSETWORD(0, RddV, tmp);
+fCLIP(tmp,fGETWORD(1, RssV), uiV);
+fSETWORD(1, RddV, tmp);
+}
+)
+
+
+
+/**********************************************/
+/* V4: Cross Vector Min/Max */
+/**********************************************/
+
+
+#define VRMINORMAX(TAG,STR,OP,SHORTTYPE,SETTYPE,GETTYPE,NEL,SHIFT) \
+Q6INSN(A4_vr##TAG##SHORTTYPE,"Rxx32=vr"#TAG#SHORTTYPE"(Rss32,Ru32)",ATTRIBS(), \
+"Choose " STR " elements of a vector", \
+{ \
+ fHIDE(int i; size8s_t TAG; size4s_t addr;) \
+ TAG = fGET##GETTYPE(0,RxxV); \
+ addr = fGETWORD(1,RxxV); \
+ for (i = 0; i < NEL; i++) { \
+ if (TAG OP fGET##GETTYPE(i,RssV)) { \
+ TAG = fGET##GETTYPE(i,RssV); \
+ addr = RuV | i<<SHIFT; \
+ } \
+ } \
+ fSETWORD(0,RxxV,TAG); \
+ fSETWORD(1,RxxV,addr); \
+})
+
+#define RMINMAX(SHORTTYPE,SETTYPE,GETTYPE,NEL,SHIFT) \
+VRMINORMAX(min,"minimum",>,SHORTTYPE,SETTYPE,GETTYPE,NEL,SHIFT) \
+VRMINORMAX(max,"maximum",<,SHORTTYPE,SETTYPE,GETTYPE,NEL,SHIFT)
+
+
+RMINMAX(h,HALF,HALF,4,1)
+RMINMAX(uh,HALF,UHALF,4,1)
+RMINMAX(w,WORD,WORD,2,2)
+RMINMAX(uw,WORD,UWORD,2,2)
+
+#undef RMINMAX
+#undef VRMINORMAX
+
+/**********************************************/
+/* Vector Min/Max */
+/**********************************************/
+
+#define VMINORMAX(TAG,STR,FUNC,SHORTTYPE,SETTYPE,GETTYPE,NEL) \
+Q6INSN(A2_v##TAG##SHORTTYPE,"Rdd32=v"#TAG#SHORTTYPE"(Rtt32,Rss32)",ATTRIBS(), \
+"Choose " STR " elements of two vectors", \
+{ \
+ fHIDE(int i;) \
+ for (i = 0; i < NEL; i++) { \
+ fSET##SETTYPE(i,RddV,FUNC(fGET##GETTYPE(i,RttV),fGET##GETTYPE(i,RssV))); \
+ } \
+})
+
+#define VMINORMAX3(TAG,STR,FUNC,SHORTTYPE,SETTYPE,GETTYPE,NEL) \
+Q6INSN(A6_v##TAG##SHORTTYPE##3,"Rxx32=v"#TAG#SHORTTYPE"3(Rtt32,Rss32)",ATTRIBS(), \
+"Choose " STR " elements of two vectors", \
+{ \
+ fHIDE(int i;) \
+ for (i = 0; i < NEL; i++) { \
+ fSET##SETTYPE(i,RxxV,FUNC(fGET##GETTYPE(i,RxxV),FUNC(fGET##GETTYPE(i,RttV),fGET##GETTYPE(i,RssV)))); \
+ } \
+})
+
+#define MINMAX(SHORTTYPE,SETTYPE,GETTYPE,NEL) \
+VMINORMAX(min,"minimum",fMIN,SHORTTYPE,SETTYPE,GETTYPE,NEL) \
+VMINORMAX(max,"maximum",fMAX,SHORTTYPE,SETTYPE,GETTYPE,NEL)
+
+MINMAX(b,BYTE,BYTE,8)
+MINMAX(ub,BYTE,UBYTE,8)
+MINMAX(h,HALF,HALF,4)
+MINMAX(uh,HALF,UHALF,4)
+MINMAX(w,WORD,WORD,2)
+MINMAX(uw,WORD,UWORD,2)
+
+#undef MINMAX
+#undef VMINORMAX
+#undef VMINORMAX3
+
+
+Q6INSN(A5_ACS,"Rxx32,Pe4=vacsh(Rss32,Rtt32)",ATTRIBS(),
+"Add Compare and Select elements of two vectors, record the maximums and the decisions ",
+{
+ fHIDE(int i;)
+ fHIDE(int xv;)
+ fHIDE(int sv;)
+ fHIDE(int tv;)
+ for (i = 0; i < 4; i++) {
+ xv = (int) fGETHALF(i,RxxV);
+ sv = (int) fGETHALF(i,RssV);
+ tv = (int) fGETHALF(i,RttV);
+ xv = xv + tv; //assumes 17bit datapath
+ sv = sv - tv; //assumes 17bit datapath
+ fSETBIT(i*2, PeV, (xv > sv));
+ fSETBIT(i*2+1,PeV, (xv > sv));
+ fSETHALF(i, RxxV, fSATH(fMAX(xv,sv)));
+ }
+})
+
+Q6INSN(A6_vminub_RdP,"Rdd32,Pe4=vminub(Rtt32,Rss32)",ATTRIBS(),
+"Vector minimum of bytes, records minimum and decision vector",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i, PeV, (fGETUBYTE(i,RttV) > fGETUBYTE(i,RssV)));
+ fSETBYTE(i,RddV,fMIN(fGETUBYTE(i,RttV),fGETUBYTE(i,RssV)));
+ }
+})
+
+/**********************************************/
+/* Vector Min/Max */
+/**********************************************/
+
+
+Q6INSN(A4_modwrapu,"Rd32=modwrap(Rs32,Rt32)",ATTRIBS(),
+"Wrap to an unsigned modulo buffer",
+{
+ if (RsV < 0) {
+ RdV = RsV + fCAST4u(RtV);
+ } else if (fCAST4u(RsV) >= fCAST4u(RtV)) {
+ RdV = RsV - fCAST4u(RtV);
+ } else {
+ RdV = RsV;
+ }
+})
diff --git a/target/hexagon/imported/branch.idef b/target/hexagon/imported/branch.idef
new file mode 100644
index 000000000..88f5f48cc
--- /dev/null
+++ b/target/hexagon/imported/branch.idef
@@ -0,0 +1,326 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/*********************************************/
+/* Jump instructions */
+/*********************************************/
+
+#define A_JDIR A_JUMP
+#define A_CJNEWDIR A_JUMP
+#define A_CJOLDDIR A_JUMP
+#define A_NEWVALUEJ A_JUMP,A_DOTNEWVALUE,A_MEMLIKE_PACKET_RULES
+#define A_JINDIR A_JUMP,A_INDIRECT
+#define A_JINDIRNEW A_JUMP,A_INDIRECT
+#define A_JINDIROLD A_JUMP,A_INDIRECT
+
+Q6INSN(J2_jump,"jump #r22:2",ATTRIBS(A_JDIR), "direct unconditional jump",
+{fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);})
+
+Q6INSN(J2_jumpr,"jumpr Rs32",ATTRIBS(A_JINDIR), "indirect unconditional jump",
+{fJUMPR(RsN,RsV,COF_TYPE_JUMPR);})
+
+#define OLDCOND_JUMP(TAG,OPER,OPER2,ATTRIB,DESCR,SEMANTICS) \
+Q6INSN(TAG##t,"if (Pu4) "OPER":nt "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBOLD(PuV),,SPECULATE_NOT_TAKEN,12,0); if (fLSBOLD(PuV)) { SEMANTICS; }}) \
+Q6INSN(TAG##f,"if (!Pu4) "OPER":nt "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBOLDNOT(PuV),,SPECULATE_NOT_TAKEN,12,0); if (fLSBOLDNOT(PuV)) { SEMANTICS; }}) \
+Q6INSN(TAG##tpt,"if (Pu4) "OPER":t "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBOLD(PuV),,SPECULATE_TAKEN,12,0); if (fLSBOLD(PuV)) { SEMANTICS; }}) \
+Q6INSN(TAG##fpt,"if (!Pu4) "OPER":t "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBOLDNOT(PuV),,SPECULATE_TAKEN,12,0); if (fLSBOLDNOT(PuV)) { SEMANTICS; }})
+
+OLDCOND_JUMP(J2_jump,"jump","#r15:2",ATTRIBS(A_CJOLDDIR),"direct conditional jump",
+fIMMEXT(riV);fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);)
+
+OLDCOND_JUMP(J2_jumpr,"jumpr","Rs32",ATTRIBS(A_JINDIROLD),"indirect conditional jump",
+fJUMPR(RsN,RsV,COF_TYPE_JUMPR);)
+
+#define NEWCOND_JUMP(TAG,OPER,OPER2,ATTRIB,DESCR,SEMANTICS)\
+Q6INSN(TAG##tnew,"if (Pu4.new) "OPER":nt "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBNEW(PuN),, SPECULATE_NOT_TAKEN , 12,0)} {if(fLSBNEW(PuN)){SEMANTICS;}})\
+Q6INSN(TAG##fnew,"if (!Pu4.new) "OPER":nt "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBNEWNOT(PuN),, SPECULATE_NOT_TAKEN , 12,0)} {if(fLSBNEWNOT(PuN)){SEMANTICS;}})\
+Q6INSN(TAG##tnewpt,"if (Pu4.new) "OPER":t "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBNEW(PuN),, SPECULATE_TAKEN , 12,0)} {if(fLSBNEW(PuN)){SEMANTICS;}})\
+Q6INSN(TAG##fnewpt,"if (!Pu4.new) "OPER":t "OPER2,ATTRIB,DESCR,{fBRANCH_SPECULATE_STALL(fLSBNEWNOT(PuN),, SPECULATE_TAKEN , 12,0)} {if(fLSBNEWNOT(PuN)){SEMANTICS;}})
+
+NEWCOND_JUMP(J2_jump,"jump","#r15:2",ATTRIBS(A_CJNEWDIR,A_ARCHV2),"direct conditional jump",
+fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMPNEW);)
+
+NEWCOND_JUMP(J2_jumpr,"jumpr","Rs32",ATTRIBS(A_JINDIRNEW,A_ARCHV3),"indirect conditional jump",
+fJUMPR(RsN,RsV,COF_TYPE_JUMPR);)
+
+
+
+Q6INSN(J4_hintjumpr,"hintjr(Rs32)",ATTRIBS(A_JINDIR),"hint indirect conditional jump",
+{fHINTJR(RsV);})
+
+
+/*********************************************/
+/* Compound Compare-Jumps */
+/*********************************************/
+Q6INSN(J2_jumprz,"if (Rs32!=#0) jump:nt #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register true",
+{fBRANCH_SPECULATE_STALL((RsV!=0), , SPECULATE_NOT_TAKEN,12,0) if (RsV != 0) { fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprnz,"if (Rs32==#0) jump:nt #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register false",
+{fBRANCH_SPECULATE_STALL((RsV==0), , SPECULATE_NOT_TAKEN,12,0) if (RsV == 0) {fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprzpt,"if (Rs32!=#0) jump:t #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register true",
+{fBRANCH_SPECULATE_STALL((RsV!=0), , SPECULATE_TAKEN,12,0) if (RsV != 0) { fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprnzpt,"if (Rs32==#0) jump:t #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register false",
+{fBRANCH_SPECULATE_STALL((RsV==0), , SPECULATE_TAKEN,12,0) if (RsV == 0) {fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprgtez,"if (Rs32>=#0) jump:nt #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register greater or equal to zero",
+{fBRANCH_SPECULATE_STALL((RsV>=0), , SPECULATE_NOT_TAKEN,12,0) if (RsV>=0) { fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprgtezpt,"if (Rs32>=#0) jump:t #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register greater or equal to zero",
+{fBRANCH_SPECULATE_STALL((RsV>=0), , SPECULATE_TAKEN,12,0) if (RsV>=0) { fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprltez,"if (Rs32<=#0) jump:nt #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register less than or equal to zero",
+{fBRANCH_SPECULATE_STALL((RsV<=0), , SPECULATE_NOT_TAKEN,12,0) if (RsV<=0) { fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+Q6INSN(J2_jumprltezpt,"if (Rs32<=#0) jump:t #r13:2",ATTRIBS(A_CJNEWDIR,A_ARCHV3),"direct conditional jump if register less than or equal to zero",
+{fBRANCH_SPECULATE_STALL((RsV<=0), , SPECULATE_TAKEN,12,0) if (RsV<=0) { fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+
+
+/*********************************************/
+/* V4 Compound Compare-Jumps */
+/*********************************************/
+
+
+/* V4 compound compare jumps (CJ) */
+#define STD_CMPJUMP(TAG,TST,TSTSEM)\
+Q6INSN(J4_##TAG##_tp0_jump_nt, "p0="TST"; if (p0.new) jump:nt #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump", {fPART1(fWRITE_P0(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW0,,SPECULATE_NOT_TAKEN,13,0) if (fLSBNEW0) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_fp0_jump_nt, "p0="TST"; if (!p0.new) jump:nt #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump",{fPART1(fWRITE_P0(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW0NOT,,SPECULATE_NOT_TAKEN,13,0) if (fLSBNEW0NOT) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_tp0_jump_t, "p0="TST"; if (p0.new) jump:t #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump", {fPART1(fWRITE_P0(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW0,,SPECULATE_TAKEN,13,0) if (fLSBNEW0) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_fp0_jump_t, "p0="TST"; if (!p0.new) jump:t #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump", {fPART1(fWRITE_P0(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW0NOT,,SPECULATE_TAKEN,13,0) if (fLSBNEW0NOT) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_tp1_jump_nt, "p1="TST"; if (p1.new) jump:nt #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump", {fPART1(fWRITE_P1(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW1,,SPECULATE_NOT_TAKEN,13,0) if (fLSBNEW1) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_fp1_jump_nt, "p1="TST"; if (!p1.new) jump:nt #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump",{fPART1(fWRITE_P1(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW1NOT,,SPECULATE_NOT_TAKEN,13,0) if (fLSBNEW1NOT) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_tp1_jump_t, "p1="TST"; if (p1.new) jump:t #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump", {fPART1(fWRITE_P1(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW1,,SPECULATE_TAKEN,13,0) if (fLSBNEW1) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_fp1_jump_t, "p1="TST"; if (!p1.new) jump:t #r9:2", ATTRIBS(A_CJNEWDIR,A_NEWCMPJUMP),"compound compare-jump", {fPART1(fWRITE_P1(f8BITSOF(TSTSEM))) fBRANCH_SPECULATE_STALL(fLSBNEW1NOT,,SPECULATE_TAKEN,13,0) if (fLSBNEW1NOT) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+
+STD_CMPJUMP(cmpeqi,"cmp.eq(Rs16,#U5)",(RsV==UiV))
+STD_CMPJUMP(cmpgti,"cmp.gt(Rs16,#U5)",(RsV>UiV))
+STD_CMPJUMP(cmpgtui,"cmp.gtu(Rs16,#U5)",(fCAST4u(RsV)>UiV))
+
+STD_CMPJUMP(cmpeqn1,"cmp.eq(Rs16,#-1)",(RsV==-1))
+STD_CMPJUMP(cmpgtn1,"cmp.gt(Rs16,#-1)",(RsV>-1))
+STD_CMPJUMP(tstbit0,"tstbit(Rs16,#0)",(RsV & 1))
+
+STD_CMPJUMP(cmpeq,"cmp.eq(Rs16,Rt16)",(RsV==RtV))
+STD_CMPJUMP(cmpgt,"cmp.gt(Rs16,Rt16)",(RsV>RtV))
+STD_CMPJUMP(cmpgtu,"cmp.gtu(Rs16,Rt16)",(fCAST4u(RsV)>RtV))
+
+
+
+/* V4 jump and transfer (CJ) */
+Q6INSN(J4_jumpseti,"Rd16=#U6 ; jump #r9:2",ATTRIBS(A_JDIR), "direct unconditional jump and set register to immediate",
+{fIMMEXT(riV); fPCALIGN(riV); RdV=UiV; fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);})
+
+Q6INSN(J4_jumpsetr,"Rd16=Rs16 ; jump #r9:2",ATTRIBS(A_JDIR), "direct unconditional jump and transfer register",
+{fIMMEXT(riV); fPCALIGN(riV); RdV=RsV; fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);})
+
+
+/* V4 new-value jumps (NCJ) */
+#define STD_CMPJUMPNEWRS(TAG,TST,TSTSEM)\
+Q6INSN(J4_##TAG##_jumpnv_t, "if ("TST") jump:t #r9:2", ATTRIBS(A_NEWVALUEJ),"compound compare-jump",{fBRANCH_SPECULATE_STALL(TSTSEM,,SPECULATE_TAKEN,13,0);if (TSTSEM) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})\
+Q6INSN(J4_##TAG##_jumpnv_nt,"if ("TST") jump:nt #r9:2",ATTRIBS(A_NEWVALUEJ),"compound compare-jump",{fBRANCH_SPECULATE_STALL(TSTSEM,,SPECULATE_NOT_TAKEN,13,0); if (TSTSEM) {fIMMEXT(riV); fPCALIGN(riV); fBRANCH(fREAD_PC()+riV,COF_TYPE_JUMP);}})
+
+
+
+
+STD_CMPJUMPNEWRS(cmpeqi_t,"cmp.eq(Ns8.new,#U5)",(fNEWREG(NsN)==(UiV)))
+STD_CMPJUMPNEWRS(cmpeqi_f,"!cmp.eq(Ns8.new,#U5)",(fNEWREG(NsN)!=(UiV)))
+STD_CMPJUMPNEWRS(cmpgti_t,"cmp.gt(Ns8.new,#U5)",(fNEWREG(NsN)>(UiV)))
+STD_CMPJUMPNEWRS(cmpgti_f,"!cmp.gt(Ns8.new,#U5)",!(fNEWREG(NsN)>(UiV)))
+STD_CMPJUMPNEWRS(cmpgtui_t,"cmp.gtu(Ns8.new,#U5)",(fCAST4u(fNEWREG(NsN))>(UiV)))
+STD_CMPJUMPNEWRS(cmpgtui_f,"!cmp.gtu(Ns8.new,#U5)",!(fCAST4u(fNEWREG(NsN))>(UiV)))
+
+
+STD_CMPJUMPNEWRS(cmpeqn1_t,"cmp.eq(Ns8.new,#-1)",(fNEWREG(NsN)==(-1)))
+STD_CMPJUMPNEWRS(cmpeqn1_f,"!cmp.eq(Ns8.new,#-1)",(fNEWREG(NsN)!=(-1)))
+STD_CMPJUMPNEWRS(cmpgtn1_t,"cmp.gt(Ns8.new,#-1)",(fNEWREG(NsN)>(-1)))
+STD_CMPJUMPNEWRS(cmpgtn1_f,"!cmp.gt(Ns8.new,#-1)",!(fNEWREG(NsN)>(-1)))
+STD_CMPJUMPNEWRS(tstbit0_t,"tstbit(Ns8.new,#0)",((fNEWREG(NsN)) & 1))
+STD_CMPJUMPNEWRS(tstbit0_f,"!tstbit(Ns8.new,#0)",!((fNEWREG(NsN)) & 1))
+
+
+STD_CMPJUMPNEWRS(cmpeq_t, "cmp.eq(Ns8.new,Rt32)", (fNEWREG(NsN)==RtV))
+STD_CMPJUMPNEWRS(cmpgt_t, "cmp.gt(Ns8.new,Rt32)", (fNEWREG(NsN)>RtV))
+STD_CMPJUMPNEWRS(cmpgtu_t,"cmp.gtu(Ns8.new,Rt32)",(fCAST4u(fNEWREG(NsN))>fCAST4u(RtV)))
+STD_CMPJUMPNEWRS(cmplt_t, "cmp.gt(Rt32,Ns8.new)", (RtV>fNEWREG(NsN)))
+STD_CMPJUMPNEWRS(cmpltu_t,"cmp.gtu(Rt32,Ns8.new)",(fCAST4u(RtV)>fCAST4u(fNEWREG(NsN))))
+STD_CMPJUMPNEWRS(cmpeq_f, "!cmp.eq(Ns8.new,Rt32)", (fNEWREG(NsN)!=RtV))
+STD_CMPJUMPNEWRS(cmpgt_f, "!cmp.gt(Ns8.new,Rt32)", !(fNEWREG(NsN)>RtV))
+STD_CMPJUMPNEWRS(cmpgtu_f,"!cmp.gtu(Ns8.new,Rt32)",!(fCAST4u(fNEWREG(NsN))>fCAST4u(RtV)))
+STD_CMPJUMPNEWRS(cmplt_f, "!cmp.gt(Rt32,Ns8.new)", !(RtV>fNEWREG(NsN)))
+STD_CMPJUMPNEWRS(cmpltu_f,"!cmp.gtu(Rt32,Ns8.new)",!(fCAST4u(RtV)>fCAST4u(fNEWREG(NsN))))
+
+
+
+
+
+/*********************************************/
+/* Subroutine Call instructions */
+/*********************************************/
+
+#define CDIR_STD A_CALL
+#define CINDIR_STD A_CALL,A_INDIRECT
+
+Q6INSN(J2_call,"call #r22:2",ATTRIBS(CDIR_STD), "direct unconditional call",
+{fIMMEXT(riV); fPCALIGN(riV); fCALL(fREAD_PC()+riV); })
+
+Q6INSN(J2_callt,"if (Pu4) call #r15:2",ATTRIBS(CDIR_STD),"direct conditional call if true",
+{fIMMEXT(riV); fPCALIGN(riV); fBRANCH_SPECULATE_STALL(fLSBOLD(PuV),,SPECULATE_NOT_TAKEN,12,0); if (fLSBOLD(PuV)) { fCALL(fREAD_PC()+riV); }})
+
+Q6INSN(J2_callf,"if (!Pu4) call #r15:2",ATTRIBS(CDIR_STD),"direct conditional call if false",
+{fIMMEXT(riV); fPCALIGN(riV); fBRANCH_SPECULATE_STALL(fLSBOLDNOT(PuV),,SPECULATE_NOT_TAKEN,12,0);if (fLSBOLDNOT(PuV)) { fCALL(fREAD_PC()+riV); }})
+
+Q6INSN(J2_callr,"callr Rs32",ATTRIBS(CINDIR_STD), "indirect unconditional call",
+{ fCALLR(RsV); })
+
+Q6INSN(J2_callrt,"if (Pu4) callr Rs32",ATTRIBS(CINDIR_STD),"indirect conditional call if true",
+{fBRANCH_SPECULATE_STALL(fLSBOLD(PuV),,SPECULATE_NOT_TAKEN,12,0);if (fLSBOLD(PuV)) { fCALLR(RsV); }})
+
+Q6INSN(J2_callrf,"if (!Pu4) callr Rs32",ATTRIBS(CINDIR_STD),"indirect conditional call if false",
+{fBRANCH_SPECULATE_STALL(fLSBOLDNOT(PuV),,SPECULATE_NOT_TAKEN,12,0);if (fLSBOLDNOT(PuV)) { fCALLR(RsV); }})
+
+
+
+
+/*********************************************/
+/* HW Loop instructions */
+/*********************************************/
+
+Q6INSN(J2_loop0r,"loop0(#r7:2,Rs32)",ATTRIBS(),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, RsV);
+ fSET_LPCFG(0);
+})
+
+Q6INSN(J2_loop1r,"loop1(#r7:2,Rs32)",ATTRIBS(),"Initialize HW loop 1",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS1(/*sa,lc*/ fREAD_PC()+riV, RsV);
+})
+
+Q6INSN(J2_loop0i,"loop0(#r7:2,#U10)",ATTRIBS(),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, UiV);
+ fSET_LPCFG(0);
+})
+
+Q6INSN(J2_loop1i,"loop1(#r7:2,#U10)",ATTRIBS(),"Initialize HW loop 1",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS1(/*sa,lc*/ fREAD_PC()+riV, UiV);
+})
+
+
+Q6INSN(J2_ploop1sr,"p3=sp1loop0(#r7:2,Rs32)",ATTRIBS(A_ARCHV2),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, RsV);
+ fSET_LPCFG(1);
+ fWRITE_P3(0);
+})
+Q6INSN(J2_ploop1si,"p3=sp1loop0(#r7:2,#U10)",ATTRIBS(A_ARCHV2),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, UiV);
+ fSET_LPCFG(1);
+ fWRITE_P3(0);
+})
+
+Q6INSN(J2_ploop2sr,"p3=sp2loop0(#r7:2,Rs32)",ATTRIBS(A_ARCHV2),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, RsV);
+ fSET_LPCFG(2);
+ fWRITE_P3(0);
+})
+Q6INSN(J2_ploop2si,"p3=sp2loop0(#r7:2,#U10)",ATTRIBS(A_ARCHV2),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, UiV);
+ fSET_LPCFG(2);
+ fWRITE_P3(0);
+})
+
+Q6INSN(J2_ploop3sr,"p3=sp3loop0(#r7:2,Rs32)",ATTRIBS(A_ARCHV2),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, RsV);
+ fSET_LPCFG(3);
+ fWRITE_P3(0);
+})
+Q6INSN(J2_ploop3si,"p3=sp3loop0(#r7:2,#U10)",ATTRIBS(A_ARCHV2),"Initialize HW loop 0",
+{ fIMMEXT(riV); fPCALIGN(riV);
+ fWRITE_LOOP_REGS0(/*sa,lc*/ fREAD_PC()+riV, UiV);
+ fSET_LPCFG(3);
+ fWRITE_P3(0);
+})
+
+
+
+Q6INSN(J2_endloop01,"endloop01",ATTRIBS(A_HWLOOP0_END,A_HWLOOP1_END),"Loopend for inner and outer loop",
+{
+
+ /* V2: With predicate control */
+ if (fGET_LPCFG) {
+ fHIDE( if (fGET_LPCFG >= 2) { /* Nothing */ } else )
+ if (fGET_LPCFG==1) {
+ fWRITE_P3(0xff);
+ }
+ fSET_LPCFG(fGET_LPCFG-1);
+ }
+
+ /* check if iterate */
+ if (fREAD_LC0>1) {
+ fBRANCH(fREAD_SA0,COF_TYPE_LOOPEND0);
+ /* decrement loop count */
+ fWRITE_LC0(fREAD_LC0-1);
+ } else {
+ /* check if iterate */
+ if (fREAD_LC1>1) {
+ fBRANCH(fREAD_SA1,COF_TYPE_LOOPEND1);
+ /* decrement loop count */
+ fWRITE_LC1(fREAD_LC1-1);
+ }
+ }
+
+})
+
+Q6INSN(J2_endloop0,"endloop0",ATTRIBS(A_HWLOOP0_END),"Loopend for inner loop",
+{
+
+ /* V2: With predicate control */
+ if (fGET_LPCFG) {
+ fHIDE( if (fGET_LPCFG >= 2) { /* Nothing */ } else )
+ if (fGET_LPCFG==1) {
+ fWRITE_P3(0xff);
+ }
+ fSET_LPCFG(fGET_LPCFG-1);
+ }
+
+ /* check if iterate */
+ if (fREAD_LC0>1) {
+ fBRANCH(fREAD_SA0,COF_TYPE_LOOPEND0);
+ /* decrement loop count */
+ fWRITE_LC0(fREAD_LC0-1);
+ }
+})
+
+Q6INSN(J2_endloop1,"endloop1",ATTRIBS(A_HWLOOP1_END),"Loopend for outer loop",
+{
+ /* check if iterate */
+ if (fREAD_LC1>1) {
+ fBRANCH(fREAD_SA1,COF_TYPE_LOOPEND1);
+ /* decrement loop count */
+ fWRITE_LC1(fREAD_LC1-1);
+ }
+})
diff --git a/target/hexagon/imported/compare.idef b/target/hexagon/imported/compare.idef
new file mode 100644
index 000000000..abd016ffb
--- /dev/null
+++ b/target/hexagon/imported/compare.idef
@@ -0,0 +1,619 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Compare Instructions
+ */
+
+
+
+/*********************************************/
+/* Scalar compare instructions */
+/*********************************************/
+
+Q6INSN(C2_cmpeq,"Pd4=cmp.eq(Rs32,Rt32)",ATTRIBS(),
+"Compare for Equal",
+{PdV=f8BITSOF(RsV==RtV);})
+
+Q6INSN(C2_cmpgt,"Pd4=cmp.gt(Rs32,Rt32)",ATTRIBS(),
+"Compare for signed Greater Than",
+{PdV=f8BITSOF(RsV>RtV);})
+
+Q6INSN(C2_cmpgtu,"Pd4=cmp.gtu(Rs32,Rt32)",ATTRIBS(),
+"Compare for Greater Than Unsigned",
+{PdV=f8BITSOF(fCAST4u(RsV)>fCAST4u(RtV));})
+
+Q6INSN(C2_cmpeqp,"Pd4=cmp.eq(Rss32,Rtt32)",ATTRIBS(),
+"Compare for Equal",
+{PdV=f8BITSOF(RssV==RttV);})
+
+Q6INSN(C2_cmpgtp,"Pd4=cmp.gt(Rss32,Rtt32)",ATTRIBS(),
+"Compare for signed Greater Than",
+{PdV=f8BITSOF(RssV>RttV);})
+
+Q6INSN(C2_cmpgtup,"Pd4=cmp.gtu(Rss32,Rtt32)",ATTRIBS(),
+"Compare for Greater Than Unsigned",
+{PdV=f8BITSOF(fCAST8u(RssV)>fCAST8u(RttV));})
+
+
+
+
+/*********************************************/
+/* Compare and put result in GPR */
+/* typically for function I/O */
+/*********************************************/
+
+Q6INSN(A4_rcmpeqi,"Rd32=cmp.eq(Rs32,#s8)",ATTRIBS(),
+"Compare for Equal",
+{fIMMEXT(siV); RdV=(RsV==siV); })
+
+Q6INSN(A4_rcmpneqi,"Rd32=!cmp.eq(Rs32,#s8)",ATTRIBS(),
+"Compare for Equal",
+{fIMMEXT(siV); RdV=(RsV!=siV); })
+
+
+Q6INSN(A4_rcmpeq,"Rd32=cmp.eq(Rs32,Rt32)",ATTRIBS(),
+"Compare for Equal",
+{RdV=(RsV==RtV); })
+
+Q6INSN(A4_rcmpneq,"Rd32=!cmp.eq(Rs32,Rt32)",ATTRIBS(),
+"Compare for Equal",
+{RdV=(RsV!=RtV); })
+
+
+
+/*********************************************/
+/* Scalar compare instructions */
+/*********************************************/
+
+
+Q6INSN(C2_bitsset,"Pd4=bitsset(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Compare for selected bits set",
+{PdV=f8BITSOF((RsV&RtV)==RtV);})
+
+Q6INSN(C2_bitsclr,"Pd4=bitsclr(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Compare for selected bits clear",
+{PdV=f8BITSOF((RsV&RtV)==0);})
+
+
+Q6INSN(C4_nbitsset,"Pd4=!bitsset(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Compare for selected bits set",
+{PdV=f8BITSOF((RsV&RtV)!=RtV);})
+
+Q6INSN(C4_nbitsclr,"Pd4=!bitsclr(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Compare for selected bits clear",
+{PdV=f8BITSOF((RsV&RtV)!=0);})
+
+
+
+/*********************************************/
+/* Scalar compare instructions W/ immediate */
+/*********************************************/
+
+Q6INSN(C2_cmpeqi,"Pd4=cmp.eq(Rs32,#s10)",ATTRIBS(),
+"Compare for Equal",
+{fIMMEXT(siV); PdV=f8BITSOF(RsV==siV);})
+
+Q6INSN(C2_cmpgti,"Pd4=cmp.gt(Rs32,#s10)",ATTRIBS(),
+"Compare for signed Greater Than",
+{fIMMEXT(siV); PdV=f8BITSOF(RsV>siV);})
+
+Q6INSN(C2_cmpgtui,"Pd4=cmp.gtu(Rs32,#u9)",ATTRIBS(),
+"Compare for Greater Than Unsigned",
+{fIMMEXT(uiV); PdV=f8BITSOF(fCAST4u(RsV)>fCAST4u(uiV));})
+
+Q6INSN(C2_bitsclri,"Pd4=bitsclr(Rs32,#u6)",ATTRIBS(A_ARCHV2),
+"Compare for selected bits clear",
+{PdV=f8BITSOF((RsV&uiV)==0);})
+
+Q6INSN(C4_nbitsclri,"Pd4=!bitsclr(Rs32,#u6)",ATTRIBS(A_ARCHV2),
+"Compare for selected bits clear",
+{PdV=f8BITSOF((RsV&uiV)!=0);})
+
+
+
+
+Q6INSN(C4_cmpneqi,"Pd4=!cmp.eq(Rs32,#s10)",ATTRIBS(), "Compare for Not Equal", {fIMMEXT(siV); PdV=f8BITSOF(RsV!=siV);})
+Q6INSN(C4_cmpltei,"Pd4=!cmp.gt(Rs32,#s10)",ATTRIBS(), "Compare for Less Than or Equal", {fIMMEXT(siV); PdV=f8BITSOF(RsV<=siV);})
+Q6INSN(C4_cmplteui,"Pd4=!cmp.gtu(Rs32,#u9)",ATTRIBS(), "Compare for Less Than or Equal Unsigned", {fIMMEXT(uiV); PdV=f8BITSOF(fCAST4u(RsV)<=fCAST4u(uiV));})
+
+Q6INSN(C4_cmpneq,"Pd4=!cmp.eq(Rs32,Rt32)",ATTRIBS(), "And-Compare for Equal", {PdV=f8BITSOF(RsV!=RtV);})
+Q6INSN(C4_cmplte,"Pd4=!cmp.gt(Rs32,Rt32)",ATTRIBS(), "And-Compare for signed Greater Than", {PdV=f8BITSOF(RsV<=RtV);})
+Q6INSN(C4_cmplteu,"Pd4=!cmp.gtu(Rs32,Rt32)",ATTRIBS(), "And-Compare for Greater Than Unsigned", {PdV=f8BITSOF(fCAST4u(RsV)<=fCAST4u(RtV));})
+
+
+
+
+
+/* Predicate Logical Operations */
+
+Q6INSN(C2_and,"Pd4=and(Pt4,Ps4)",ATTRIBS(A_CRSLOT23),
+"Predicate AND",
+{PdV=PsV & PtV;})
+
+Q6INSN(C2_or,"Pd4=or(Pt4,Ps4)",ATTRIBS(A_CRSLOT23),
+"Predicate OR",
+{PdV=PsV | PtV;})
+
+Q6INSN(C2_xor,"Pd4=xor(Ps4,Pt4)",ATTRIBS(A_CRSLOT23),
+"Predicate XOR",
+{PdV=PsV ^ PtV;})
+
+Q6INSN(C2_andn,"Pd4=and(Pt4,!Ps4)",ATTRIBS(A_CRSLOT23),
+"Predicate AND NOT",
+{PdV=PtV & (~PsV);})
+
+Q6INSN(C2_not,"Pd4=not(Ps4)",ATTRIBS(A_CRSLOT23),
+"Logical NOT Predicate",
+{PdV=~PsV;})
+
+Q6INSN(C2_orn,"Pd4=or(Pt4,!Ps4)",ATTRIBS(A_ARCHV2,A_CRSLOT23),
+"Predicate OR NOT",
+{PdV=PtV | (~PsV);})
+
+
+
+
+
+Q6INSN(C4_and_and,"Pd4=and(Ps4,and(Pt4,Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound And-And", { PdV = PsV & PtV & PuV; })
+
+Q6INSN(C4_and_or,"Pd4=and(Ps4,or(Pt4,Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound And-Or", { PdV = PsV & (PtV | PuV); })
+
+Q6INSN(C4_or_and,"Pd4=or(Ps4,and(Pt4,Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound Or-And", { PdV = PsV | (PtV & PuV); })
+
+Q6INSN(C4_or_or,"Pd4=or(Ps4,or(Pt4,Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound Or-Or", { PdV = PsV | PtV | PuV; })
+
+
+
+Q6INSN(C4_and_andn,"Pd4=and(Ps4,and(Pt4,!Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound And-And", { PdV = PsV & PtV & (~PuV); })
+
+Q6INSN(C4_and_orn,"Pd4=and(Ps4,or(Pt4,!Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound And-Or", { PdV = PsV & (PtV | (~PuV)); })
+
+Q6INSN(C4_or_andn,"Pd4=or(Ps4,and(Pt4,!Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound Or-And", { PdV = PsV | (PtV & (~PuV)); })
+
+Q6INSN(C4_or_orn,"Pd4=or(Ps4,or(Pt4,!Pu4))",ATTRIBS(A_CRSLOT23),
+"Compound Or-Or", { PdV = PsV | PtV | (~PuV); })
+
+
+Q6INSN(C2_any8,"Pd4=any8(Ps4)",ATTRIBS(A_CRSLOT23),
+"Logical ANY of low 8 predicate bits",
+{ PdV = (PsV ? 0xff : 0x00); })
+
+Q6INSN(C2_all8,"Pd4=all8(Ps4)",ATTRIBS(A_CRSLOT23),
+"Logical ALL of low 8 predicate bits",
+{ PdV = (PsV == 0xff ? 0xff : 0x00); })
+
+Q6INSN(C2_vitpack,"Rd32=vitpack(Ps4,Pt4)",ATTRIBS(),
+"Pack the odd and even bits of two predicate registers",
+{ RdV = (PsV&0x55) | (PtV&0xAA); })
+
+/* Mux instructions */
+
+Q6INSN(C2_mux,"Rd32=mux(Pu4,Rs32,Rt32)",ATTRIBS(),
+"Scalar MUX",
+{ RdV = (fLSBOLD(PuV) ? RsV : RtV); })
+
+
+Q6INSN(C2_cmovenewit,"if (Pu4.new) Rd32=#s12",ATTRIBS(A_ARCHV2),
+"Scalar conditional move",
+{ fIMMEXT(siV); if (fLSBNEW(PuN)) RdV=siV; else CANCEL;})
+
+Q6INSN(C2_cmovenewif,"if (!Pu4.new) Rd32=#s12",ATTRIBS(A_ARCHV2),
+"Scalar conditional move",
+{ fIMMEXT(siV); if (fLSBNEWNOT(PuN)) RdV=siV; else CANCEL;})
+
+Q6INSN(C2_cmoveit,"if (Pu4) Rd32=#s12",ATTRIBS(A_ARCHV2),
+"Scalar conditional move",
+{ fIMMEXT(siV); if (fLSBOLD(PuV)) RdV=siV; else CANCEL;})
+
+Q6INSN(C2_cmoveif,"if (!Pu4) Rd32=#s12",ATTRIBS(A_ARCHV2),
+"Scalar conditional move",
+{ fIMMEXT(siV); if (fLSBOLDNOT(PuV)) RdV=siV; else CANCEL;})
+
+
+
+Q6INSN(C2_ccombinewnewt,"if (Pu4.new) Rdd32=combine(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Conditionally combine two words into a register pair",
+{ if (fLSBNEW(PuN)) {
+ fSETWORD(0,RddV,RtV);
+ fSETWORD(1,RddV,RsV);
+ } else {CANCEL;}
+})
+
+Q6INSN(C2_ccombinewnewf,"if (!Pu4.new) Rdd32=combine(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Conditionally combine two words into a register pair",
+{ if (fLSBNEWNOT(PuN)) {
+ fSETWORD(0,RddV,RtV);
+ fSETWORD(1,RddV,RsV);
+ } else {CANCEL;}
+})
+
+Q6INSN(C2_ccombinewt,"if (Pu4) Rdd32=combine(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Conditionally combine two words into a register pair",
+{ if (fLSBOLD(PuV)) {
+ fSETWORD(0,RddV,RtV);
+ fSETWORD(1,RddV,RsV);
+ } else {CANCEL;}
+})
+
+Q6INSN(C2_ccombinewf,"if (!Pu4) Rdd32=combine(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Conditionally combine two words into a register pair",
+{ if (fLSBOLDNOT(PuV)) {
+ fSETWORD(0,RddV,RtV);
+ fSETWORD(1,RddV,RsV);
+ } else {CANCEL;}
+})
+
+
+
+Q6INSN(C2_muxii,"Rd32=mux(Pu4,#s8,#S8)",ATTRIBS(A_ARCHV2),
+"Scalar MUX immediates",
+{ fIMMEXT(siV); RdV = (fLSBOLD(PuV) ? siV : SiV); })
+
+
+
+Q6INSN(C2_muxir,"Rd32=mux(Pu4,Rs32,#s8)",ATTRIBS(A_ARCHV2),
+"Scalar MUX register immediate",
+{ fIMMEXT(siV); RdV = (fLSBOLD(PuV) ? RsV : siV); })
+
+
+Q6INSN(C2_muxri,"Rd32=mux(Pu4,#s8,Rs32)",ATTRIBS(A_ARCHV2),
+"Scalar MUX register immediate",
+{ fIMMEXT(siV); RdV = (fLSBOLD(PuV) ? siV : RsV); })
+
+
+
+Q6INSN(C2_vmux,"Rdd32=vmux(Pu4,Rss32,Rtt32)",ATTRIBS(),
+"Vector MUX",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,(fGETBIT(i,PuV)?(fGETBYTE(i,RssV)):(fGETBYTE(i,RttV))));
+ }
+})
+
+Q6INSN(C2_mask,"Rdd32=mask(Pt4)",ATTRIBS(),
+"Vector Mask Generation",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBYTE(i,RddV,(fGETBIT(i,PtV)?(0xff):(0x00)));
+ }
+})
+
+/* VCMP */
+
+Q6INSN(A2_vcmpbeq,"Pd4=vcmpb.eq(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i,PdV,(fGETBYTE(i,RssV) == fGETBYTE(i,RttV)));
+ }
+})
+
+Q6INSN(A4_vcmpbeqi,"Pd4=vcmpb.eq(Rss32,#u8)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i,PdV,(fGETUBYTE(i,RssV) == uiV));
+ }
+})
+
+Q6INSN(A4_vcmpbeq_any,"Pd4=any8(vcmpb.eq(Rss32,Rtt32))",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ PdV = 0;
+ for (i = 0; i < 8; i++) {
+ if (fGETBYTE(i,RssV) == fGETBYTE(i,RttV)) PdV = 0xff;
+ }
+})
+
+Q6INSN(A6_vcmpbeq_notany,"Pd4=!any8(vcmpb.eq(Rss32,Rtt32))",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ PdV = 0;
+ for (i = 0; i < 8; i++) {
+ if (fGETBYTE(i,RssV) == fGETBYTE(i,RttV)) PdV = 0xff;
+ }
+ PdV = ~PdV;
+})
+
+Q6INSN(A2_vcmpbgtu,"Pd4=vcmpb.gtu(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i,PdV,(fGETUBYTE(i,RssV) > fGETUBYTE(i,RttV)));
+ }
+})
+
+Q6INSN(A4_vcmpbgtui,"Pd4=vcmpb.gtu(Rss32,#u7)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i,PdV,(fGETUBYTE(i,RssV) > uiV));
+ }
+})
+
+Q6INSN(A4_vcmpbgt,"Pd4=vcmpb.gt(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i,PdV,(fGETBYTE(i,RssV) > fGETBYTE(i,RttV)));
+ }
+})
+
+Q6INSN(A4_vcmpbgti,"Pd4=vcmpb.gt(Rss32,#s8)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 8; i++) {
+ fSETBIT(i,PdV,(fGETBYTE(i,RssV) > siV));
+ }
+})
+
+
+
+Q6INSN(A4_cmpbeq,"Pd4=cmpb.eq(Rs32,Rt32)",ATTRIBS(),
+"Compare bytes ",
+{
+ PdV=f8BITSOF(fGETBYTE(0,RsV) == fGETBYTE(0,RtV));
+})
+
+Q6INSN(A4_cmpbeqi,"Pd4=cmpb.eq(Rs32,#u8)",ATTRIBS(),
+"Compare bytes ",
+{
+ PdV=f8BITSOF(fGETUBYTE(0,RsV) == uiV);
+})
+
+Q6INSN(A4_cmpbgtu,"Pd4=cmpb.gtu(Rs32,Rt32)",ATTRIBS(),
+"Compare bytes ",
+{
+ PdV=f8BITSOF(fGETUBYTE(0,RsV) > fGETUBYTE(0,RtV));
+})
+
+Q6INSN(A4_cmpbgtui,"Pd4=cmpb.gtu(Rs32,#u7)",ATTRIBS(),
+"Compare bytes ",
+{
+ fIMMEXT(uiV);
+ PdV=f8BITSOF(fGETUBYTE(0,RsV) > fCAST4u(uiV));
+})
+
+Q6INSN(A4_cmpbgt,"Pd4=cmpb.gt(Rs32,Rt32)",ATTRIBS(),
+"Compare bytes ",
+{
+ PdV=f8BITSOF(fGETBYTE(0,RsV) > fGETBYTE(0,RtV));
+})
+
+Q6INSN(A4_cmpbgti,"Pd4=cmpb.gt(Rs32,#s8)",ATTRIBS(),
+"Compare bytes ",
+{
+ PdV=f8BITSOF(fGETBYTE(0,RsV) > siV);
+})
+
+Q6INSN(A2_vcmpheq,"Pd4=vcmph.eq(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETBIT(i*2,PdV, (fGETHALF(i,RssV) == fGETHALF(i,RttV)));
+ fSETBIT(i*2+1,PdV,(fGETHALF(i,RssV) == fGETHALF(i,RttV)));
+ }
+})
+
+Q6INSN(A2_vcmphgt,"Pd4=vcmph.gt(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETBIT(i*2, PdV, (fGETHALF(i,RssV) > fGETHALF(i,RttV)));
+ fSETBIT(i*2+1,PdV, (fGETHALF(i,RssV) > fGETHALF(i,RttV)));
+ }
+})
+
+Q6INSN(A2_vcmphgtu,"Pd4=vcmph.gtu(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETBIT(i*2, PdV, (fGETUHALF(i,RssV) > fGETUHALF(i,RttV)));
+ fSETBIT(i*2+1,PdV, (fGETUHALF(i,RssV) > fGETUHALF(i,RttV)));
+ }
+})
+
+Q6INSN(A4_vcmpheqi,"Pd4=vcmph.eq(Rss32,#s8)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETBIT(i*2,PdV, (fGETHALF(i,RssV) == siV));
+ fSETBIT(i*2+1,PdV,(fGETHALF(i,RssV) == siV));
+ }
+})
+
+Q6INSN(A4_vcmphgti,"Pd4=vcmph.gt(Rss32,#s8)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETBIT(i*2, PdV, (fGETHALF(i,RssV) > siV));
+ fSETBIT(i*2+1,PdV, (fGETHALF(i,RssV) > siV));
+ }
+})
+
+
+Q6INSN(A4_vcmphgtui,"Pd4=vcmph.gtu(Rss32,#u7)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ fSETBIT(i*2, PdV, (fGETUHALF(i,RssV) > uiV));
+ fSETBIT(i*2+1,PdV, (fGETUHALF(i,RssV) > uiV));
+ }
+})
+
+Q6INSN(A4_cmpheq,"Pd4=cmph.eq(Rs32,Rt32)",ATTRIBS(),
+"Compare halfwords ",
+{
+ PdV=f8BITSOF(fGETHALF(0,RsV) == fGETHALF(0,RtV));
+})
+
+Q6INSN(A4_cmphgt,"Pd4=cmph.gt(Rs32,Rt32)",ATTRIBS(),
+"Compare halfwords ",
+{
+ PdV=f8BITSOF(fGETHALF(0,RsV) > fGETHALF(0,RtV));
+})
+
+Q6INSN(A4_cmphgtu,"Pd4=cmph.gtu(Rs32,Rt32)",ATTRIBS(),
+"Compare halfwords ",
+{
+ PdV=f8BITSOF(fGETUHALF(0,RsV) > fGETUHALF(0,RtV));
+})
+
+Q6INSN(A4_cmpheqi,"Pd4=cmph.eq(Rs32,#s8)",ATTRIBS(),
+"Compare halfwords ",
+{
+ fIMMEXT(siV);
+ PdV=f8BITSOF(fGETHALF(0,RsV) == siV);
+})
+
+Q6INSN(A4_cmphgti,"Pd4=cmph.gt(Rs32,#s8)",ATTRIBS(),
+"Compare halfwords ",
+{
+ fIMMEXT(siV);
+ PdV=f8BITSOF(fGETHALF(0,RsV) > siV);
+})
+
+Q6INSN(A4_cmphgtui,"Pd4=cmph.gtu(Rs32,#u7)",ATTRIBS(),
+"Compare halfwords ",
+{
+ fIMMEXT(uiV);
+ PdV=f8BITSOF(fGETUHALF(0,RsV) > fCAST4u(uiV));
+})
+
+Q6INSN(A2_vcmpweq,"Pd4=vcmpw.eq(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fSETBITS(3,0,PdV,(fGETWORD(0,RssV)==fGETWORD(0,RttV)));
+ fSETBITS(7,4,PdV,(fGETWORD(1,RssV)==fGETWORD(1,RttV)));
+})
+
+Q6INSN(A2_vcmpwgt,"Pd4=vcmpw.gt(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fSETBITS(3,0,PdV,(fGETWORD(0,RssV)>fGETWORD(0,RttV)));
+ fSETBITS(7,4,PdV,(fGETWORD(1,RssV)>fGETWORD(1,RttV)));
+})
+
+Q6INSN(A2_vcmpwgtu,"Pd4=vcmpw.gtu(Rss32,Rtt32)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fSETBITS(3,0,PdV,(fGETUWORD(0,RssV)>fGETUWORD(0,RttV)));
+ fSETBITS(7,4,PdV,(fGETUWORD(1,RssV)>fGETUWORD(1,RttV)));
+})
+
+Q6INSN(A4_vcmpweqi,"Pd4=vcmpw.eq(Rss32,#s8)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fSETBITS(3,0,PdV,(fGETWORD(0,RssV)==siV));
+ fSETBITS(7,4,PdV,(fGETWORD(1,RssV)==siV));
+})
+
+Q6INSN(A4_vcmpwgti,"Pd4=vcmpw.gt(Rss32,#s8)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fSETBITS(3,0,PdV,(fGETWORD(0,RssV)>siV));
+ fSETBITS(7,4,PdV,(fGETWORD(1,RssV)>siV));
+})
+
+Q6INSN(A4_vcmpwgtui,"Pd4=vcmpw.gtu(Rss32,#u7)",ATTRIBS(),
+"Compare elements of two vectors ",
+{
+ fSETBITS(3,0,PdV,(fGETUWORD(0,RssV)>fCAST4u(uiV)));
+ fSETBITS(7,4,PdV,(fGETUWORD(1,RssV)>fCAST4u(uiV)));
+})
+
+Q6INSN(A4_boundscheck_hi,"Pd4=boundscheck(Rss32,Rtt32):raw:hi",ATTRIBS(),
+"Detect if a register is within bounds",
+{
+ fHIDE(size4u_t src;)
+ src = fGETUWORD(1,RssV);
+ PdV = f8BITSOF((fCAST4u(src) >= fGETUWORD(0,RttV)) && (fCAST4u(src) < fGETUWORD(1,RttV)));
+})
+
+Q6INSN(A4_boundscheck_lo,"Pd4=boundscheck(Rss32,Rtt32):raw:lo",ATTRIBS(),
+"Detect if a register is within bounds",
+{
+ fHIDE(size4u_t src;)
+ src = fGETUWORD(0,RssV);
+ PdV = f8BITSOF((fCAST4u(src) >= fGETUWORD(0,RttV)) && (fCAST4u(src) < fGETUWORD(1,RttV)));
+})
+
+Q6INSN(A4_tlbmatch,"Pd4=tlbmatch(Rss32,Rt32)",ATTRIBS(),
+"Detect if a VA/ASID matches a TLB entry",
+{
+ fHIDE(size4u_t TLBHI; size4u_t TLBLO; size4u_t MASK; size4u_t SIZE;)
+ MASK = 0x07ffffff;
+ TLBLO = fGETUWORD(0,RssV);
+ TLBHI = fGETUWORD(1,RssV);
+ SIZE = fMIN(6,fCL1_4(~fBREV_4(TLBLO)));
+ MASK &= (0xffffffff << 2*SIZE);
+ PdV = f8BITSOF(fGETBIT(31,TLBHI) && ((TLBHI & MASK) == (RtV & MASK)));
+})
+
+Q6INSN(C2_tfrpr,"Rd32=Ps4",ATTRIBS(),
+"Transfer predicate to general register", { RdV = fZXTN(8,32,PsV); })
+
+Q6INSN(C2_tfrrp,"Pd4=Rs32",ATTRIBS(),
+"Transfer general register to Predicate", { PdV = fGETUBYTE(0,RsV); })
+
+Q6INSN(C4_fastcorner9,"Pd4=fastcorner9(Ps4,Pt4)",ATTRIBS(A_CRSLOT23),
+"Determine whether the predicate sources define a corner",
+{
+ fHIDE(size4u_t tmp = 0; size4u_t i;)
+ fSETHALF(0,tmp,(PsV<<8)|PtV);
+ fSETHALF(1,tmp,(PsV<<8)|PtV);
+ for (i = 1; i < 9; i++) {
+ tmp &= tmp >> 1;
+ }
+ PdV = f8BITSOF(tmp != 0);
+})
+
+Q6INSN(C4_fastcorner9_not,"Pd4=!fastcorner9(Ps4,Pt4)",ATTRIBS(A_CRSLOT23),
+"Determine whether the predicate sources define a corner",
+{
+ fHIDE(size4u_t tmp = 0; size4u_t i;)
+ fSETHALF(0,tmp,(PsV<<8)|PtV);
+ fSETHALF(1,tmp,(PsV<<8)|PtV);
+ for (i = 1; i < 9; i++) {
+ tmp &= tmp >> 1;
+ }
+ PdV = f8BITSOF(tmp == 0);
+})
diff --git a/target/hexagon/imported/encode.def b/target/hexagon/imported/encode.def
new file mode 100644
index 000000000..e40e7fbff
--- /dev/null
+++ b/target/hexagon/imported/encode.def
@@ -0,0 +1,125 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This just includes all encoding files
+ */
+
+#ifndef DEF_FIELD32
+#define __SELF_DEF_FIELD32
+#define DEF_FIELD32(...) /* nothing */
+#endif
+
+#ifndef DEF_CLASS32
+#define __SELF_DEF_CLASS32
+#define DEF_CLASS32(...) /* nothing */
+#endif
+
+#ifndef DEF_ANTICLASS32
+#define __SELF_DEF_ANTICLASS32
+#define DEF_ANTICLASS32(...) /* nothing */
+#endif
+
+#ifndef LEGACY_DEF_ENC32
+#define __SELF_DEF_LEGACY_DEF_ENC32
+#define LEGACY_DEF_ENC32(...) /* nothing */
+#endif
+
+#ifndef DEF_FIELDROW_DESC32
+#define __SELF_DEF_FIELDROW_DESC32
+#define DEF_FIELDROW_DESC32(...) /* nothing */
+#endif
+
+#ifndef DEF_ENC32
+#define __SELF_DEF_ENC32
+#define DEF_ENC32(...) /* nothing */
+#endif
+
+#ifndef DEF_PACKED32
+#define __SELF_DEF_PACKED32
+#define DEF_PACKED32(...) /* nothing */
+#endif
+
+#ifndef DEF_ENC_SUBINSN
+#define __SELF_DEF_ENC_SUBINSN
+#define DEF_ENC_SUBINSN(...) /* nothing */
+#endif
+
+#ifndef DEF_EXT_ENC
+#define __SELF_DEF_EXT_ENC
+#define DEF_EXT_ENC(...) /* nothing */
+#endif
+
+#ifndef DEF_EXT_SPACE
+#define __SELF_DEF_EXT_SPACE
+#define DEF_EXT_SPACE(...) /* nothing */
+#endif
+
+#include "encode_pp.def"
+#include "encode_subinsn.def"
+#include "allextenc.def"
+
+#ifdef __SELF_DEF_FIELD32
+#undef __SELF_DEF_FIELD32
+#undef DEF_FIELD32
+#endif
+
+#ifdef __SELF_DEF_CLASS32
+#undef __SELF_DEF_CLASS32
+#undef DEF_CLASS32
+#endif
+
+#ifdef __SELF_DEF_ANTICLASS32
+#undef __SELF_DEF_ANTICLASS32
+#undef DEF_ANTICLASS32
+#endif
+
+#ifdef __SELF_DEF_LEGACY_DEF_ENC32
+#undef __SELF_DEF_LEGACY_DEF_ENC32
+#undef LEGACY_DEF_ENC32
+#endif
+
+#ifdef __SELF_DEF_FIELDROW_DESC32
+#undef __SELF_DEF_FIELDROW_DESC32
+#undef DEF_FIELDROW_DESC32
+#endif
+
+#ifdef __SELF_DEF_ENC32
+#undef __SELF_DEF_ENC32
+#undef DEF_ENC32
+#endif
+
+#ifdef __SELF_DEF_EXT_SPACE
+#undef __SELF_DEF_EXT_SPACE
+#undef DEF_EXT_SPACE
+#endif
+
+
+#ifdef __SELF_DEF_PACKED32
+#undef __SELF_DEF_PACKED32
+#undef DEF_PACKED32
+#endif
+
+#ifdef __SELF_DEF_ENC_SUBINSN
+#undef __SELF_DEF_ENC_SUBINSN
+#undef DEF_ENC_SUBINSN
+#endif
+
+#ifdef __SELF_DEF_EXT_ENC
+#undef __SELF_DEF_EXT_ENC
+#undef DEF_EXT_ENC
+#endif
diff --git a/target/hexagon/imported/encode_pp.def b/target/hexagon/imported/encode_pp.def
new file mode 100644
index 000000000..939c6fc55
--- /dev/null
+++ b/target/hexagon/imported/encode_pp.def
@@ -0,0 +1,2143 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Encodings for 32 bit instructions
+ *
+ */
+
+
+
+
+DEF_CLASS32("---- ---- -------- PP------ --------",ALL_PP)
+DEF_FIELD32("---- ---- -------- !!------ --------",Parse,"Packet/Loop parse bits")
+DEF_FIELD32("!!!! ---- -------- PP------ --------",ICLASS,"Instruction Class")
+
+#define ICLASS_EXTENDER "0000"
+#define ICLASS_CJ "0001"
+#define ICLASS_NCJ "0010"
+#define ICLASS_V4LDST "0011"
+#define ICLASS_V2LDST "0100"
+#define ICLASS_J "0101"
+#define ICLASS_CR "0110"
+#define ICLASS_ALU2op "0111"
+#define ICLASS_S2op "1000"
+#define ICLASS_LD "1001"
+#define ICLASS_ST "1010"
+#define ICLASS_ADDI "1011"
+#define ICLASS_S3op "1100"
+#define ICLASS_ALU64 "1101"
+#define ICLASS_M "1110"
+#define ICLASS_ALU3op "1111"
+
+
+
+/*******************************/
+/* */
+/* */
+/* V4 Immediate Payload */
+/* */
+/* */
+/*******************************/
+
+DEF_CLASS32(ICLASS_EXTENDER" ---- -------- PP------ --------",EXTENDER)
+DEF_ENC32(A4_ext, ICLASS_EXTENDER "iiii iiiiiiii PPiiiiii iiiiiiii")
+
+
+
+/*******************************/
+/* */
+/* */
+/* V2 PREDICATED LD/ST */
+/* */
+/* */
+/*******************************/
+
+DEF_CLASS32(ICLASS_V2LDST" ---- -------- PP------ --------",V2LDST)
+DEF_CLASS32(ICLASS_V2LDST" ---1 -------- PP------ --------",V2LD)
+DEF_CLASS32(ICLASS_V2LDST" ---0 -------- PP------ --------",V2ST)
+DEF_CLASS32(ICLASS_V2LDST" 0--1 -------- PP------ --------",PLD)
+DEF_CLASS32(ICLASS_V2LDST" 0--0 -------- PP------ --------",PST)
+DEF_CLASS32(ICLASS_V2LDST" 1--1 -------- PP------ --------",GPLD)
+DEF_CLASS32(ICLASS_V2LDST" 1--0 -------- PP------ --------",GPST)
+
+DEF_FIELD32(ICLASS_V2LDST" 0!-- -------- PP------ --------",PMEM_Sense,"Sense")
+DEF_FIELD32(ICLASS_V2LDST" 0-!- -------- PP------ --------",PMEM_PredNew,"PredNew")
+DEF_FIELD32(ICLASS_V2LDST" ---1 !!------ PP------ --------",PMEML_Type,"Type")
+DEF_FIELD32(ICLASS_V2LDST" ---1 --!----- PP------ --------",PMEML_UN,"Unsigned")
+DEF_FIELD32(ICLASS_V2LDST" ---0 !!!----- PP------ --------",PMEMS_Type,"Type")
+
+#define STD_PLD_IOENC(TAG,OPC) \
+DEF_ENC32(L2_pload##TAG##t_io, ICLASS_V2LDST" 0001 "OPC" sssss PP0ttiii iiiddddd")\
+DEF_ENC32(L2_pload##TAG##f_io, ICLASS_V2LDST" 0101 "OPC" sssss PP0ttiii iiiddddd")\
+DEF_ENC32(L2_pload##TAG##tnew_io,ICLASS_V2LDST" 0011 "OPC" sssss PP0ttiii iiiddddd")\
+DEF_ENC32(L2_pload##TAG##fnew_io,ICLASS_V2LDST" 0111 "OPC" sssss PP0ttiii iiiddddd")
+
+STD_PLD_IOENC(rb, "000")
+STD_PLD_IOENC(rub, "001")
+STD_PLD_IOENC(rh, "010")
+STD_PLD_IOENC(ruh, "011")
+STD_PLD_IOENC(ri, "100")
+STD_PLD_IOENC(rd, "110") /* note dest reg field LSB=0, 1 is reserved */
+
+
+
+#define STD_PST_IOENC(TAG,OPC,SRC) \
+DEF_ENC32(S2_pstore##TAG##t_io, ICLASS_V2LDST" 0000 "OPC" sssss PPi"SRC" iiiii0vv")\
+DEF_ENC32(S2_pstore##TAG##f_io, ICLASS_V2LDST" 0100 "OPC" sssss PPi"SRC" iiiii0vv")\
+DEF_ENC32(S4_pstore##TAG##tnew_io,ICLASS_V2LDST" 0010 "OPC" sssss PPi"SRC" iiiii0vv")\
+DEF_ENC32(S4_pstore##TAG##fnew_io,ICLASS_V2LDST" 0110 "OPC" sssss PPi"SRC" iiiii0vv")
+
+STD_PST_IOENC(rb, "000","ttttt")
+STD_PST_IOENC(rh, "010","ttttt")
+STD_PST_IOENC(rf, "011","ttttt")
+STD_PST_IOENC(ri, "100","ttttt")
+STD_PST_IOENC(rd, "110","ttttt")
+STD_PST_IOENC(rbnew, "101","00ttt")
+STD_PST_IOENC(rhnew, "101","01ttt")
+STD_PST_IOENC(rinew, "101","10ttt")
+
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* V2 GP-RELATIVE LD/ST */
+/* */
+/* */
+/*******************************/
+#define STD_LD_GP(TAG,OPC) \
+DEF_ENC32(L2_load##TAG##gp, ICLASS_V2LDST" 1ii1 "OPC" iiiii PPiiiiii iiiddddd")
+
+STD_LD_GP(rb, "000")
+STD_LD_GP(rub, "001")
+STD_LD_GP(rh, "010")
+STD_LD_GP(ruh, "011")
+STD_LD_GP(ri, "100")
+STD_LD_GP(rd, "110") /* note dest reg field LSB=0, 1 is reserved */
+
+#define STD_ST_GP(TAG,OPC,SRC) \
+DEF_ENC32(S2_store##TAG##gp, ICLASS_V2LDST" 1ii0 "OPC" iiiii PPi"SRC" iiiiiiii")
+
+STD_ST_GP(rb, "000","ttttt")
+STD_ST_GP(rh, "010","ttttt")
+STD_ST_GP(rf, "011","ttttt")
+STD_ST_GP(ri, "100","ttttt")
+STD_ST_GP(rd, "110","ttttt")
+STD_ST_GP(rbnew,"101","00ttt")
+STD_ST_GP(rhnew,"101","01ttt")
+STD_ST_GP(rinew,"101","10ttt")
+
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* V4LDST */
+/* */
+/* */
+/*******************************/
+
+
+DEF_CLASS32(ICLASS_V4LDST" ---- -------- PP------ --------",V4LDST)
+DEF_CLASS32(ICLASS_V4LDST" 0--- -------- PP------ --------",Pred_RplusR)
+DEF_CLASS32(ICLASS_V4LDST" 100- -------- PP------ --------",Pred_StoreImmed)
+DEF_CLASS32(ICLASS_V4LDST" 101- -------- PP------ --------",RplusR)
+DEF_CLASS32(ICLASS_V4LDST" 110- -------- PP------ --------",StoreImmed)
+DEF_CLASS32(ICLASS_V4LDST" 111- -------- PP------ --------",MemOp)
+
+
+
+
+/*******************************/
+/* Pred (R+R) */
+/*******************************/
+
+#define STD_PLD_RRENC(TAG,OPC) \
+DEF_ENC32(L4_pload##TAG##t_rr, ICLASS_V4LDST" 00 00 "OPC" sssss PPittttt ivvddddd")\
+DEF_ENC32(L4_pload##TAG##f_rr, ICLASS_V4LDST" 00 01 "OPC" sssss PPittttt ivvddddd")\
+DEF_ENC32(L4_pload##TAG##tnew_rr,ICLASS_V4LDST" 00 10 "OPC" sssss PPittttt ivvddddd")\
+DEF_ENC32(L4_pload##TAG##fnew_rr,ICLASS_V4LDST" 00 11 "OPC" sssss PPittttt ivvddddd")
+
+STD_PLD_RRENC(rb, "000")
+STD_PLD_RRENC(rub, "001")
+STD_PLD_RRENC(rh, "010")
+STD_PLD_RRENC(ruh, "011")
+STD_PLD_RRENC(ri, "100")
+STD_PLD_RRENC(rd, "110")
+
+#define STD_PST_RRENC(TAG,OPC,SRC) \
+DEF_ENC32(S4_pstore##TAG##t_rr, ICLASS_V4LDST" 01 00 "OPC" sssss PPiuuuuu ivv"SRC)\
+DEF_ENC32(S4_pstore##TAG##f_rr, ICLASS_V4LDST" 01 01 "OPC" sssss PPiuuuuu ivv"SRC)\
+DEF_ENC32(S4_pstore##TAG##tnew_rr,ICLASS_V4LDST" 01 10 "OPC" sssss PPiuuuuu ivv"SRC)\
+DEF_ENC32(S4_pstore##TAG##fnew_rr,ICLASS_V4LDST" 01 11 "OPC" sssss PPiuuuuu ivv"SRC)
+
+STD_PST_RRENC(rb, "000","ttttt")
+STD_PST_RRENC(rh, "010","ttttt")
+STD_PST_RRENC(rf, "011","ttttt")
+STD_PST_RRENC(ri, "100","ttttt")
+STD_PST_RRENC(rd, "110","ttttt")
+STD_PST_RRENC(rbnew, "101","00ttt")
+STD_PST_RRENC(rhnew, "101","01ttt")
+STD_PST_RRENC(rinew, "101","10ttt")
+
+
+
+/*******************************/
+/* Pred Store immediates */
+/*******************************/
+
+#define V4_PSTI(TAG,OPC) \
+DEF_ENC32(S4_storei##TAG##t_io, ICLASS_V4LDST" 100 00 "OPC" sssss PPIiiiii ivvIIIII")\
+DEF_ENC32(S4_storei##TAG##f_io, ICLASS_V4LDST" 100 01 "OPC" sssss PPIiiiii ivvIIIII")\
+DEF_ENC32(S4_storei##TAG##tnew_io, ICLASS_V4LDST" 100 10 "OPC" sssss PPIiiiii ivvIIIII")\
+DEF_ENC32(S4_storei##TAG##fnew_io, ICLASS_V4LDST" 100 11 "OPC" sssss PPIiiiii ivvIIIII")
+
+V4_PSTI(rb, "00")
+V4_PSTI(rh, "01")
+V4_PSTI(ri, "10")
+
+
+
+/*******************************/
+/* (R+R) */
+/*******************************/
+
+#define STD_LD_RRENC(TAG,OPC) \
+DEF_ENC32(L4_load##TAG##_rr, ICLASS_V4LDST" 1010 "OPC" sssss PPittttt i--ddddd")
+
+STD_LD_RRENC(rb, "000")
+STD_LD_RRENC(rub, "001")
+STD_LD_RRENC(rh, "010")
+STD_LD_RRENC(ruh, "011")
+STD_LD_RRENC(ri, "100")
+STD_LD_RRENC(rd, "110")
+
+#define STD_ST_RRENC(TAG,OPC,SRC) \
+DEF_ENC32(S4_store##TAG##_rr, ICLASS_V4LDST" 1011 "OPC" sssss PPiuuuuu i--"SRC)
+
+STD_ST_RRENC(rb, "000","ttttt")
+STD_ST_RRENC(rh, "010","ttttt")
+STD_ST_RRENC(rf, "011","ttttt")
+STD_ST_RRENC(ri, "100","ttttt")
+STD_ST_RRENC(rd, "110","ttttt")
+STD_ST_RRENC(rbnew, "101","00ttt")
+STD_ST_RRENC(rhnew, "101","01ttt")
+STD_ST_RRENC(rinew, "101","10ttt")
+
+
+
+
+/*******************************/
+/* Store immediates */
+/*******************************/
+
+#define V4_STI(TAG,OPC) \
+DEF_ENC32(S4_storei##TAG##_io, ICLASS_V4LDST" 110 -- "OPC" sssss PPIiiiii iIIIIIII")
+
+
+V4_STI(rb, "00")
+V4_STI(rh, "01")
+V4_STI(ri, "10")
+
+
+/*******************************/
+/* Memops */
+/*******************************/
+
+#define MEMOPENC(TAG,OPC) \
+DEF_ENC32(L4_add_##TAG##_io, ICLASS_V4LDST" 111 0- " OPC "sssss PP0iiiii i00ttttt")\
+DEF_ENC32(L4_sub_##TAG##_io, ICLASS_V4LDST" 111 0- " OPC "sssss PP0iiiii i01ttttt")\
+DEF_ENC32(L4_and_##TAG##_io, ICLASS_V4LDST" 111 0- " OPC "sssss PP0iiiii i10ttttt")\
+DEF_ENC32(L4_or_##TAG##_io, ICLASS_V4LDST" 111 0- " OPC "sssss PP0iiiii i11ttttt")\
+\
+DEF_ENC32(L4_iadd_##TAG##_io, ICLASS_V4LDST" 111 1- " OPC "sssss PP0iiiii i00IIIII")\
+DEF_ENC32(L4_isub_##TAG##_io, ICLASS_V4LDST" 111 1- " OPC "sssss PP0iiiii i01IIIII")\
+DEF_ENC32(L4_iand_##TAG##_io, ICLASS_V4LDST" 111 1- " OPC "sssss PP0iiiii i10IIIII")\
+DEF_ENC32(L4_ior_##TAG##_io, ICLASS_V4LDST" 111 1- " OPC "sssss PP0iiiii i11IIIII")
+
+
+
+MEMOPENC(memopw,"10")
+MEMOPENC(memoph,"01")
+MEMOPENC(memopb,"00")
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* LOAD */
+/* */
+/* */
+/*******************************/
+DEF_CLASS32(ICLASS_LD" ---- -------- PP------ --------",LD)
+
+
+DEF_CLASS32(ICLASS_LD" 0--- -------- PP------ --------",LD_ADDR_ROFFSET)
+DEF_CLASS32(ICLASS_LD" 100- -------- PP----0- --------",LD_ADDR_POST_CIRC_IMMED)
+DEF_CLASS32(ICLASS_LD" 101- -------- PP00---- --------",LD_ADDR_POST_IMMED)
+DEF_CLASS32(ICLASS_LD" 101- -------- PP01---- --------",LD_ADDR_ABS_UPDATE_V4)
+DEF_CLASS32(ICLASS_LD" 101- -------- PP1----- --------",LD_ADDR_POST_IMMED_PRED_V2)
+DEF_CLASS32(ICLASS_LD" 110- -------- PP-0---- 0-------",LD_ADDR_POST_REG)
+DEF_CLASS32(ICLASS_LD" 110- -------- PP-1---- --------",LD_ADDR_ABS_PLUS_REG_V4)
+DEF_CLASS32(ICLASS_LD" 100- -------- PP----1- --------",LD_ADDR_POST_CREG_V2)
+DEF_CLASS32(ICLASS_LD" 111- -------- PP------ 0-------",LD_ADDR_POST_BREV_REG)
+DEF_CLASS32(ICLASS_LD" 111- -------- PP------ 1-------",LD_ADDR_PRED_ABS_V4)
+
+DEF_FIELD32(ICLASS_LD" !!!- -------- PP------ --------",LD_Amode,"Amode")
+DEF_FIELD32(ICLASS_LD" ---! !!------ PP------ --------",LD_Type,"Type")
+DEF_FIELD32(ICLASS_LD" ---- --!----- PP------ --------",LD_UN,"Unsigned")
+
+#define STD_LD_ENC(TAG,OPC) \
+DEF_ENC32(L2_load##TAG##_io, ICLASS_LD" 0 ii "OPC" sssss PPiiiiii iiiddddd")\
+DEF_ENC32(L2_load##TAG##_pci, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--0i iiiddddd")\
+DEF_ENC32(L2_load##TAG##_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP00---i iiiddddd")\
+DEF_ENC32(L4_load##TAG##_ap, ICLASS_LD" 1 01 "OPC" eeeee PP01IIII -IIddddd")\
+DEF_ENC32(L2_load##TAG##_pr, ICLASS_LD" 1 10 "OPC" xxxxx PPu0---- 0--ddddd")\
+DEF_ENC32(L4_load##TAG##_ur, ICLASS_LD" 1 10 "OPC" ttttt PPi1IIII iIIddddd")\
+DEF_ENC32(L2_load##TAG##_pcr, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--1- 0--ddddd")\
+DEF_ENC32(L2_load##TAG##_pbr, ICLASS_LD" 1 11 "OPC" xxxxx PPu0---- 0--ddddd")
+
+
+#define STD_LDX_ENC(TAG,OPC) \
+DEF_ENC32(L2_load##TAG##_io, ICLASS_LD" 0 ii "OPC" sssss PPiiiiii iiiyyyyy")\
+DEF_ENC32(L2_load##TAG##_pci, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--0i iiiyyyyy")\
+DEF_ENC32(L2_load##TAG##_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP00---i iiiyyyyy")\
+DEF_ENC32(L4_load##TAG##_ap, ICLASS_LD" 1 01 "OPC" eeeee PP01IIII -IIyyyyy")\
+DEF_ENC32(L2_load##TAG##_pr, ICLASS_LD" 1 10 "OPC" xxxxx PPu0---- 0--yyyyy")\
+DEF_ENC32(L4_load##TAG##_ur, ICLASS_LD" 1 10 "OPC" ttttt PPi1IIII iIIyyyyy")\
+DEF_ENC32(L2_load##TAG##_pcr, ICLASS_LD" 1 00 "OPC" xxxxx PPu0--1- 0--yyyyy")\
+DEF_ENC32(L2_load##TAG##_pbr, ICLASS_LD" 1 11 "OPC" xxxxx PPu0---- 0--yyyyy")
+
+
+#define STD_PLD_ENC(TAG,OPC) \
+DEF_ENC32(L2_pload##TAG##t_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP100tti iiiddddd")\
+DEF_ENC32(L2_pload##TAG##f_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP101tti iiiddddd")\
+DEF_ENC32(L2_pload##TAG##tnew_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP110tti iiiddddd")\
+DEF_ENC32(L2_pload##TAG##fnew_pi, ICLASS_LD" 1 01 "OPC" xxxxx PP111tti iiiddddd")\
+DEF_ENC32(L4_pload##TAG##t_abs, ICLASS_LD" 1 11 "OPC" iiiii PP100tti 1--ddddd")\
+DEF_ENC32(L4_pload##TAG##f_abs, ICLASS_LD" 1 11 "OPC" iiiii PP101tti 1--ddddd")\
+DEF_ENC32(L4_pload##TAG##tnew_abs,ICLASS_LD" 1 11 "OPC" iiiii PP110tti 1--ddddd")\
+DEF_ENC32(L4_pload##TAG##fnew_abs,ICLASS_LD" 1 11 "OPC" iiiii PP111tti 1--ddddd")
+
+
+/* 0 000 misc: dealloc,loadw_locked,dcfetch */
+STD_LD_ENC(bzw4,"0 101")
+STD_LD_ENC(bzw2,"0 011")
+
+STD_LD_ENC(bsw4,"0 111")
+STD_LD_ENC(bsw2,"0 001")
+
+STD_LDX_ENC(alignh,"0 010")
+STD_LDX_ENC(alignb,"0 100")
+
+STD_LD_ENC(rb, "1 000")
+STD_LD_ENC(rub, "1 001")
+STD_LD_ENC(rh, "1 010")
+STD_LD_ENC(ruh, "1 011")
+STD_LD_ENC(ri, "1 100")
+STD_LD_ENC(rd, "1 110") /* note dest reg field LSB=0, 1 is reserved */
+
+STD_PLD_ENC(rb, "1 000")
+STD_PLD_ENC(rub, "1 001")
+STD_PLD_ENC(rh, "1 010")
+STD_PLD_ENC(ruh, "1 011")
+STD_PLD_ENC(ri, "1 100")
+STD_PLD_ENC(rd, "1 110") /* note dest reg field LSB=0, 1 is reserved */
+
+
+DEF_CLASS32( ICLASS_LD" 0--0 000----- PP------ --------",LD_MISC)
+DEF_ANTICLASS32(ICLASS_LD" 0--0 000----- PP------ --------",LD_ADDR_ROFFSET)
+DEF_ANTICLASS32(ICLASS_LD" 1000 000----- PP------ --------",LD_ADDR_POST_CIRC_IMMED)
+DEF_ANTICLASS32(ICLASS_LD" 1010 000----- PP------ --------",LD_ADDR_POST_IMMED)
+DEF_ANTICLASS32(ICLASS_LD" 1100 000----- PP------ --------",LD_ADDR_POST_REG)
+DEF_ANTICLASS32(ICLASS_LD" 1110 000----- PP------ --------",LD_ADDR_POST_REG)
+
+DEF_ENC32(L2_deallocframe, ICLASS_LD" 000 0 000 sssss PP0----- ---ddddd")
+DEF_ENC32(L4_return, ICLASS_LD" 011 0 000 sssss PP0000-- ---ddddd")
+DEF_ENC32(L4_return_t, ICLASS_LD" 011 0 000 sssss PP0100vv ---ddddd")
+DEF_ENC32(L4_return_f, ICLASS_LD" 011 0 000 sssss PP1100vv ---ddddd")
+DEF_ENC32(L4_return_tnew_pt, ICLASS_LD" 011 0 000 sssss PP0110vv ---ddddd")
+DEF_ENC32(L4_return_fnew_pt, ICLASS_LD" 011 0 000 sssss PP1110vv ---ddddd")
+DEF_ENC32(L4_return_tnew_pnt, ICLASS_LD" 011 0 000 sssss PP0010vv ---ddddd")
+DEF_ENC32(L4_return_fnew_pnt, ICLASS_LD" 011 0 000 sssss PP1010vv ---ddddd")
+
+DEF_ENC32(L2_loadw_locked,ICLASS_LD" 001 0 000 sssss PP00---- -00ddddd")
+
+
+
+
+
+
+DEF_ENC32(L4_loadd_locked,ICLASS_LD" 001 0 000 sssss PP01---- -00ddddd")
+DEF_EXT_SPACE(EXTRACTW, ICLASS_LD" 001 0 000 iiiii PP0iiiii -01iiiii")
+DEF_ENC32(Y2_dcfetchbo, ICLASS_LD" 010 0 000 sssss PP0--iii iiiiiiii")
+
+
+
+
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* STORE */
+/* */
+/* */
+/*******************************/
+
+DEF_CLASS32(ICLASS_ST" ---- -------- PP------ --------",ST)
+
+DEF_FIELD32(ICLASS_ST" !!!- -------- PP------ --------",ST_Amode,"Amode")
+DEF_FIELD32(ICLASS_ST" ---! !!------ PP------ --------",ST_Type,"Type")
+DEF_FIELD32(ICLASS_ST" ---- --!----- PP------ --------",ST_UN,"Unsigned")
+
+DEF_CLASS32(ICLASS_ST" 0--1 -------- PP------ --------",ST_ADDR_ROFFSET)
+DEF_CLASS32(ICLASS_ST" 1001 -------- PP------ ------0-",ST_ADDR_POST_CIRC_IMMED)
+DEF_CLASS32(ICLASS_ST" 1011 -------- PP0----- 0-----0-",ST_ADDR_POST_IMMED)
+DEF_CLASS32(ICLASS_ST" 1011 -------- PP0----- 1-------",ST_ADDR_ABS_UPDATE_V4)
+DEF_CLASS32(ICLASS_ST" 1011 -------- PP1----- --------",ST_ADDR_POST_IMMED_PRED_V2)
+DEF_CLASS32(ICLASS_ST" 1111 -------- PP------ 1-------",ST_ADDR_PRED_ABS_V4)
+DEF_CLASS32(ICLASS_ST" 1101 -------- PP------ 0-------",ST_ADDR_POST_REG)
+DEF_CLASS32(ICLASS_ST" 1101 -------- PP------ 1-------",ST_ADDR_ABS_PLUS_REG_V4)
+DEF_CLASS32(ICLASS_ST" 1001 -------- PP------ ------1-",ST_ADDR_POST_CREG_V2)
+DEF_CLASS32(ICLASS_ST" 1111 -------- PP------ 0-------",ST_ADDR_POST_BREV_REG)
+DEF_CLASS32(ICLASS_ST" 0--0 1------- PP------ --------",ST_MISC_STORELIKE)
+DEF_CLASS32(ICLASS_ST" 1--0 0------- PP------ --------",ST_MISC_BUSOP)
+DEF_CLASS32(ICLASS_ST" 0--0 0------- PP------ --------",ST_MISC_CACHEOP)
+
+
+#define STD_ST_ENC(TAG,OPC,SRC) \
+DEF_ENC32(S2_store##TAG##_io, ICLASS_ST" 0 ii "OPC" sssss PPi"SRC" iiiiiiii")\
+DEF_ENC32(S2_store##TAG##_pci, ICLASS_ST" 1 00 "OPC" xxxxx PPu"SRC" 0iiii-0-")\
+DEF_ENC32(S2_store##TAG##_pi, ICLASS_ST" 1 01 "OPC" xxxxx PP0"SRC" 0iiii-0-")\
+DEF_ENC32(S4_store##TAG##_ap, ICLASS_ST" 1 01 "OPC" eeeee PP0"SRC" 1-IIIIII")\
+DEF_ENC32(S2_store##TAG##_pr, ICLASS_ST" 1 10 "OPC" xxxxx PPu"SRC" 0-------")\
+DEF_ENC32(S4_store##TAG##_ur, ICLASS_ST" 1 10 "OPC" uuuuu PPi"SRC" 1iIIIIII")\
+DEF_ENC32(S2_store##TAG##_pcr, ICLASS_ST" 1 00 "OPC" xxxxx PPu"SRC" 0-----1-")\
+DEF_ENC32(S2_store##TAG##_pbr, ICLASS_ST" 1 11 "OPC" xxxxx PPu"SRC" 0-------")
+
+
+#define STD_PST_ENC(TAG,OPC,SRC) \
+DEF_ENC32(S2_pstore##TAG##t_pi, ICLASS_ST" 1 01 "OPC" xxxxx PP1"SRC" 0iiii0vv")\
+DEF_ENC32(S2_pstore##TAG##f_pi, ICLASS_ST" 1 01 "OPC" xxxxx PP1"SRC" 0iiii1vv")\
+DEF_ENC32(S2_pstore##TAG##tnew_pi, ICLASS_ST" 1 01 "OPC" xxxxx PP1"SRC" 1iiii0vv")\
+DEF_ENC32(S2_pstore##TAG##fnew_pi, ICLASS_ST" 1 01 "OPC" xxxxx PP1"SRC" 1iiii1vv")\
+DEF_ENC32(S4_pstore##TAG##t_abs, ICLASS_ST" 1 11 "OPC" ---ii PP0"SRC" 1iiii0vv")\
+DEF_ENC32(S4_pstore##TAG##f_abs, ICLASS_ST" 1 11 "OPC" ---ii PP0"SRC" 1iiii1vv")\
+DEF_ENC32(S4_pstore##TAG##tnew_abs,ICLASS_ST" 1 11 "OPC" ---ii PP1"SRC" 1iiii0vv")\
+DEF_ENC32(S4_pstore##TAG##fnew_abs,ICLASS_ST" 1 11 "OPC" ---ii PP1"SRC" 1iiii1vv")
+
+
+/* 0 0-- Store Misc */
+/* 0 1xx Available */
+STD_ST_ENC(rb, "1 000","ttttt")
+STD_ST_ENC(rh, "1 010","ttttt")
+STD_ST_ENC(rf, "1 011","ttttt")
+STD_ST_ENC(ri, "1 100","ttttt")
+STD_ST_ENC(rd, "1 110","ttttt")
+STD_ST_ENC(rbnew, "1 101","00ttt")
+STD_ST_ENC(rhnew, "1 101","01ttt")
+STD_ST_ENC(rinew, "1 101","10ttt")
+
+STD_PST_ENC(rb, "1 000","ttttt")
+STD_PST_ENC(rh, "1 010","ttttt")
+STD_PST_ENC(rf, "1 011","ttttt")
+STD_PST_ENC(ri, "1 100","ttttt")
+STD_PST_ENC(rd, "1 110","ttttt")
+STD_PST_ENC(rbnew, "1 101","00ttt")
+STD_PST_ENC(rhnew, "1 101","01ttt")
+STD_PST_ENC(rinew, "1 101","10ttt")
+
+
+
+/* User */
+/* xx - st_misc */
+/* */
+/* x bus/cache */
+/* x store/cache */
+DEF_ENC32(S2_allocframe, ICLASS_ST" 000 01 00xxxxx PP000iii iiiiiiii")
+DEF_ENC32(S2_storew_locked,ICLASS_ST" 000 01 01sssss PP-ttttt ------dd")
+DEF_ENC32(S4_stored_locked,ICLASS_ST" 000 01 11sssss PP0ttttt ------dd")
+DEF_ENC32(Y2_dczeroa, ICLASS_ST" 000 01 10sssss PP0----- --------")
+
+
+DEF_ENC32(Y2_barrier, ICLASS_ST" 100 00 00----- PP------ 000-----")
+DEF_ENC32(Y2_syncht, ICLASS_ST" 100 00 10----- PP------ --------")
+
+
+
+DEF_ENC32(Y2_dccleana, ICLASS_ST" 000 00 00sssss PP------ --------")
+DEF_ENC32(Y2_dcinva, ICLASS_ST" 000 00 01sssss PP------ --------")
+DEF_ENC32(Y2_dccleaninva, ICLASS_ST" 000 00 10sssss PP------ --------")
+
+DEF_ENC32(Y4_l2fetch, ICLASS_ST" 011 00 00sssss PP-ttttt 000-----")
+DEF_ENC32(Y5_l2fetch, ICLASS_ST" 011 01 00sssss PP-ttttt --------")
+
+/*******************************/
+/* */
+/* */
+/* JUMP */
+/* */
+/* */
+/*******************************/
+
+DEF_CLASS32(ICLASS_J" ---- -------- PP------ --------",J)
+DEF_CLASS32(ICLASS_J" 0--- -------- PP------ --------",JUMPR_MISC)
+DEF_CLASS32(ICLASS_J" 10-- -------- PP------ --------",UCJUMP)
+DEF_CLASS32(ICLASS_J" 110- -------- PP------ --------",CJUMP)
+DEF_FIELD32(ICLASS_J" 110- -------- PP--!--- --------",J_DN,"Dot-new")
+DEF_FIELD32(ICLASS_J" 110- -------- PP-!---- --------",J_PT,"Predict-taken")
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0000 -------- PP------ --------","[#0] PC=(Rs), R31=return")
+DEF_ENC32(J2_callr, ICLASS_J" 0000 101sssss PP------ --------")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0001 -------- PP------ --------","[#1] if (Pu) PC=(Rs), R31=return")
+DEF_ENC32(J2_callrt, ICLASS_J" 0001 000sssss PP----uu --------")
+DEF_ENC32(J2_callrf, ICLASS_J" 0001 001sssss PP----uu --------")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0010 -------- PP------ --------","[#2] PC=(Rs); ")
+DEF_ENC32(J2_jumpr, ICLASS_J" 0010 100sssss PP------ --------")
+DEF_ENC32(J4_hintjumpr, ICLASS_J" 0010 101sssss PP------ --------")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0011 -------- PP------ --------","[#3] if (Pu) PC=(Rs) ")
+DEF_ENC32(J2_jumprt, ICLASS_J" 0011 010sssss PP-00-uu --------")
+DEF_ENC32(J2_jumprf, ICLASS_J" 0011 011sssss PP-00-uu --------")
+DEF_ENC32(J2_jumprtpt, ICLASS_J" 0011 010sssss PP-10-uu --------")
+DEF_ENC32(J2_jumprfpt, ICLASS_J" 0011 011sssss PP-10-uu --------")
+DEF_ENC32(J2_jumprtnew, ICLASS_J" 0011 010sssss PP-01-uu --------")
+DEF_ENC32(J2_jumprfnew, ICLASS_J" 0011 011sssss PP-01-uu --------")
+DEF_ENC32(J2_jumprtnewpt, ICLASS_J" 0011 010sssss PP-11-uu --------")
+DEF_ENC32(J2_jumprfnewpt, ICLASS_J" 0011 011sssss PP-11-uu --------")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0100 -------- PP------ --------","[#4] (#u8) ")
+DEF_ENC32(J2_trap0, ICLASS_J" 0100 00------ PP-iiiii ---iii--")
+DEF_ENC32(J2_pause, ICLASS_J" 0100 01------ PP-iiiii ---iii--")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0110 -------- PP------ --------","[#6] icop(Rs) ")
+DEF_ENC32(Y2_icinva, ICLASS_J" 0110 110sssss PP000--- --------")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 0111 -------- PP------ --------","[#7] () ")
+DEF_ENC32(Y2_isync, ICLASS_J" 0111 11000000 PP0---00 00000010")
+
+/* JUMP */
+DEF_FIELDROW_DESC32(ICLASS_J" 100- -------- PP------ --------","[#8,9] PC=(#r22)")
+DEF_ENC32(J2_jump, ICLASS_J" 100i iiiiiiii PPiiiiii iiiiiii-")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 101- -------- PP------ --------","[#10,11] PC=(#r22), R31=return")
+DEF_ENC32(J2_call, ICLASS_J" 101i iiiiiiii PPiiiiii iiiiiii0")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 1100 -------- PP------ --------","[#12] if (Pu) PC=(#r15)")
+DEF_ENC32(J2_jumpt, ICLASS_J" 1100 ii0iiiii PPi00-uu iiiiiii-")
+DEF_ENC32(J2_jumpf, ICLASS_J" 1100 ii1iiiii PPi00-uu iiiiiii-")
+DEF_ENC32(J2_jumptpt, ICLASS_J" 1100 ii0iiiii PPi10-uu iiiiiii-")
+DEF_ENC32(J2_jumpfpt, ICLASS_J" 1100 ii1iiiii PPi10-uu iiiiiii-")
+DEF_ENC32(J2_jumptnew, ICLASS_J" 1100 ii0iiiii PPi01-uu iiiiiii-")
+DEF_ENC32(J2_jumpfnew, ICLASS_J" 1100 ii1iiiii PPi01-uu iiiiiii-")
+DEF_ENC32(J2_jumptnewpt,ICLASS_J" 1100 ii0iiiii PPi11-uu iiiiiii-")
+DEF_ENC32(J2_jumpfnewpt,ICLASS_J" 1100 ii1iiiii PPi11-uu iiiiiii-")
+
+DEF_FIELDROW_DESC32(ICLASS_J" 1101 -------- PP------ --------","[#13] if (Pu) PC=(#r15), R31=return")
+DEF_ENC32(J2_callt, ICLASS_J" 1101 ii0iiiii PPi-0-uu iiiiiii-")
+DEF_ENC32(J2_callf, ICLASS_J" 1101 ii1iiiii PPi-0-uu iiiiiii-")
+
+
+
+
+
+
+
+/*******************************/
+/* */
+/* V4 */
+/* COMPOUND COMPARE-JUMPS */
+/* */
+/* */
+/*******************************/
+
+
+/* EJP: this has to match what we have in htmldocs.py... so I will call it CJ, we can change it */
+DEF_CLASS32(ICLASS_CJ" 0--- -------- PP------ --------",CJ)
+
+DEF_FIELDROW_DESC32(ICLASS_CJ" 00-- -------- -------- --------","[#0-3] pd=cmp.xx(R,#u5) ; if ([!]p0.new) jump:[h] #s9:2 ")
+DEF_FIELDROW_DESC32(ICLASS_CJ" 010- -------- -------- --------","[#4,5] pd=cmp.eq(R,R) ; if ([!]p0.new) jump:[h] #s9:2 ")
+DEF_FIELDROW_DESC32(ICLASS_CJ" 0110 -------- -------- --------","[#6] Rd=#u6 ; jump #s9:2 ")
+DEF_FIELDROW_DESC32(ICLASS_CJ" 0111 -------- -------- --------","[#7] Rd=Rs ; jump #s9:2 ")
+
+
+#define CMPJMPI_ENC(TAG,OPC) \
+DEF_ENC32(TAG##i_tp0_jump_t, ICLASS_CJ" 00 0 "OPC" 0iissss PP1IIIII iiiiiii-") \
+DEF_ENC32(TAG##i_fp0_jump_t, ICLASS_CJ" 00 0 "OPC" 1iissss PP1IIIII iiiiiii-") \
+DEF_ENC32(TAG##i_tp0_jump_nt, ICLASS_CJ" 00 0 "OPC" 0iissss PP0IIIII iiiiiii-") \
+DEF_ENC32(TAG##i_fp0_jump_nt, ICLASS_CJ" 00 0 "OPC" 1iissss PP0IIIII iiiiiii-") \
+\
+DEF_ENC32(TAG##i_tp1_jump_t, ICLASS_CJ" 00 1 "OPC" 0iissss PP1IIIII iiiiiii-") \
+DEF_ENC32(TAG##i_fp1_jump_t, ICLASS_CJ" 00 1 "OPC" 1iissss PP1IIIII iiiiiii-") \
+DEF_ENC32(TAG##i_tp1_jump_nt, ICLASS_CJ" 00 1 "OPC" 0iissss PP0IIIII iiiiiii-") \
+DEF_ENC32(TAG##i_fp1_jump_nt, ICLASS_CJ" 00 1 "OPC" 1iissss PP0IIIII iiiiiii-")
+
+CMPJMPI_ENC(J4_cmpeq,"00")
+CMPJMPI_ENC(J4_cmpgt,"01")
+CMPJMPI_ENC(J4_cmpgtu,"10")
+
+
+#define CMPJMP1I_ENC(TAG,OPC) \
+DEF_ENC32(TAG##_tp0_jump_t, ICLASS_CJ" 00 0 11 0iissss PP1---"OPC" iiiiiii-") \
+DEF_ENC32(TAG##_fp0_jump_t, ICLASS_CJ" 00 0 11 1iissss PP1---"OPC" iiiiiii-") \
+DEF_ENC32(TAG##_tp0_jump_nt, ICLASS_CJ" 00 0 11 0iissss PP0---"OPC" iiiiiii-") \
+DEF_ENC32(TAG##_fp0_jump_nt, ICLASS_CJ" 00 0 11 1iissss PP0---"OPC" iiiiiii-") \
+\
+DEF_ENC32(TAG##_tp1_jump_t, ICLASS_CJ" 00 1 11 0iissss PP1---"OPC" iiiiiii-") \
+DEF_ENC32(TAG##_fp1_jump_t, ICLASS_CJ" 00 1 11 1iissss PP1---"OPC" iiiiiii-") \
+DEF_ENC32(TAG##_tp1_jump_nt, ICLASS_CJ" 00 1 11 0iissss PP0---"OPC" iiiiiii-") \
+DEF_ENC32(TAG##_fp1_jump_nt, ICLASS_CJ" 00 1 11 1iissss PP0---"OPC" iiiiiii-")
+
+CMPJMP1I_ENC(J4_cmpeqn1,"00")
+CMPJMP1I_ENC(J4_cmpgtn1,"01")
+CMPJMP1I_ENC(J4_tstbit0,"11")
+
+
+
+#define CMPJMPR_ENC(TAG,OPC) \
+DEF_ENC32(TAG##_tp0_jump_t, ICLASS_CJ" 01 0 "OPC" 0iissss PP10tttt iiiiiii-") \
+DEF_ENC32(TAG##_fp0_jump_t, ICLASS_CJ" 01 0 "OPC" 1iissss PP10tttt iiiiiii-") \
+DEF_ENC32(TAG##_tp0_jump_nt, ICLASS_CJ" 01 0 "OPC" 0iissss PP00tttt iiiiiii-") \
+DEF_ENC32(TAG##_fp0_jump_nt, ICLASS_CJ" 01 0 "OPC" 1iissss PP00tttt iiiiiii-") \
+\
+DEF_ENC32(TAG##_tp1_jump_t, ICLASS_CJ" 01 0 "OPC" 0iissss PP11tttt iiiiiii-") \
+DEF_ENC32(TAG##_fp1_jump_t, ICLASS_CJ" 01 0 "OPC" 1iissss PP11tttt iiiiiii-") \
+DEF_ENC32(TAG##_tp1_jump_nt, ICLASS_CJ" 01 0 "OPC" 0iissss PP01tttt iiiiiii-") \
+DEF_ENC32(TAG##_fp1_jump_nt, ICLASS_CJ" 01 0 "OPC" 1iissss PP01tttt iiiiiii-")
+
+CMPJMPR_ENC(J4_cmpeq,"00")
+CMPJMPR_ENC(J4_cmpgt,"01")
+CMPJMPR_ENC(J4_cmpgtu,"10")
+
+
+DEF_ENC32(J4_jumpseti, ICLASS_CJ" 0110 --iidddd PPIIIIII iiiiiii-")
+DEF_ENC32(J4_jumpsetr, ICLASS_CJ" 0111 --iissss PP--dddd iiiiiii-")
+
+
+DEF_EXT_SPACE(EXT_CJ, ICLASS_CJ"1 iii iiiiiiii PPiiiiii iiiiiiii")
+
+
+
+DEF_CLASS32(ICLASS_NCJ" 0--- -------- PP------ --------",NCJ)
+DEF_FIELDROW_DESC32(ICLASS_NCJ" 00-- -------- -------- --------","[#0-3] if (cmp.xx(R.new,R)) jump:[h] #s9:2 ")
+DEF_FIELDROW_DESC32(ICLASS_NCJ" 01-- -------- -------- --------","[#4-7] if (cmp.xx(R.new,#U5)) jump:[h] #s9:2 ")
+
+#define OPRJMP_ENC(TAG,OPC) \
+DEF_ENC32(TAG##_t_jumpnv_t, ICLASS_NCJ" 00 "OPC" 0ii-sss PP1ttttt iiiiiii-") \
+DEF_ENC32(TAG##_f_jumpnv_t, ICLASS_NCJ" 00 "OPC" 1ii-sss PP1ttttt iiiiiii-") \
+DEF_ENC32(TAG##_t_jumpnv_nt, ICLASS_NCJ" 00 "OPC" 0ii-sss PP0ttttt iiiiiii-") \
+DEF_ENC32(TAG##_f_jumpnv_nt, ICLASS_NCJ" 00 "OPC" 1ii-sss PP0ttttt iiiiiii-")
+
+OPRJMP_ENC(J4_cmpeq, "000")
+OPRJMP_ENC(J4_cmpgt, "001")
+OPRJMP_ENC(J4_cmpgtu, "010")
+OPRJMP_ENC(J4_cmplt, "011")
+OPRJMP_ENC(J4_cmpltu, "100")
+
+
+#define OPIJMP_ENC(TAG,OPC) \
+DEF_ENC32(TAG##_t_jumpnv_t, ICLASS_NCJ" 01 "OPC" 0ii-sss PP1IIIII iiiiiii-") \
+DEF_ENC32(TAG##_f_jumpnv_t, ICLASS_NCJ" 01 "OPC" 1ii-sss PP1IIIII iiiiiii-") \
+DEF_ENC32(TAG##_t_jumpnv_nt, ICLASS_NCJ" 01 "OPC" 0ii-sss PP0IIIII iiiiiii-") \
+DEF_ENC32(TAG##_f_jumpnv_nt, ICLASS_NCJ" 01 "OPC" 1ii-sss PP0IIIII iiiiiii-")
+
+OPIJMP_ENC(J4_cmpeqi, "000")
+OPIJMP_ENC(J4_cmpgti, "001")
+OPIJMP_ENC(J4_cmpgtui, "010")
+
+
+#define OPI1JMP_ENC(TAG,OPC) \
+DEF_ENC32(TAG##_t_jumpnv_t, ICLASS_NCJ" 01 "OPC" 0ii-sss PP1----- iiiiiii-") \
+DEF_ENC32(TAG##_f_jumpnv_t, ICLASS_NCJ" 01 "OPC" 1ii-sss PP1----- iiiiiii-") \
+DEF_ENC32(TAG##_t_jumpnv_nt, ICLASS_NCJ" 01 "OPC" 0ii-sss PP0----- iiiiiii-") \
+DEF_ENC32(TAG##_f_jumpnv_nt, ICLASS_NCJ" 01 "OPC" 1ii-sss PP0----- iiiiiii-")
+
+OPI1JMP_ENC(J4_cmpeqn1, "100")
+OPI1JMP_ENC(J4_cmpgtn1, "101")
+OPI1JMP_ENC(J4_tstbit0, "011")
+
+
+DEF_EXT_SPACE(EXT_NCJ, ICLASS_NCJ"1 iii iiiiiiii PPiiiiii iiiiiiii")
+
+
+
+/*******************************/
+/* */
+/* */
+/* CR */
+/* */
+/* */
+/*******************************/
+
+
+
+DEF_CLASS32(ICLASS_CR" ---- -------- PP------ --------",CR)
+DEF_CLASS32(ICLASS_CR" -0-- -------- PP------ --------",CRUSER)
+DEF_CLASS32(ICLASS_CR" -1-- -------- PP------ --------",CRSUPER)
+
+DEF_FIELD32(ICLASS_CR" -!-- -------- PP------ --------",CR_sm,"Supervisor mode only")
+
+/* User CR ops */
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 0000 -------- PP------ --------","[#0] (Rs,#r8)")
+DEF_ENC32(J2_loop0r, ICLASS_CR" 0000 000sssss PP-iiiii ---ii---")
+DEF_ENC32(J2_loop1r, ICLASS_CR" 0000 001sssss PP-iiiii ---ii---")
+DEF_ENC32(J2_ploop1sr, ICLASS_CR" 0000 101sssss PP-iiiii ---ii---")
+DEF_ENC32(J2_ploop2sr, ICLASS_CR" 0000 110sssss PP-iiiii ---ii---")
+DEF_ENC32(J2_ploop3sr, ICLASS_CR" 0000 111sssss PP-iiiii ---ii---")
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 0001 -------- PP------ --------","[#1] (Rs,#r13)")
+DEF_ENC32(J2_jumprz, ICLASS_CR" 0001 00isssss PPi0iiii iiiiiii-")
+DEF_ENC32(J2_jumprzpt, ICLASS_CR" 0001 00isssss PPi1iiii iiiiiii-")
+DEF_ENC32(J2_jumprnz, ICLASS_CR" 0001 10isssss PPi0iiii iiiiiii-")
+DEF_ENC32(J2_jumprnzpt, ICLASS_CR" 0001 10isssss PPi1iiii iiiiiii-")
+
+DEF_ENC32(J2_jumprgtez, ICLASS_CR" 0001 01isssss PPi0iiii iiiiiii-")
+DEF_ENC32(J2_jumprgtezpt,ICLASS_CR" 0001 01isssss PPi1iiii iiiiiii-")
+DEF_ENC32(J2_jumprltez, ICLASS_CR" 0001 11isssss PPi0iiii iiiiiii-")
+DEF_ENC32(J2_jumprltezpt,ICLASS_CR" 0001 11isssss PPi1iiii iiiiiii-")
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 0010 -------- PP------ --------","[#2] Cd=Rs ")
+DEF_ENC32(A2_tfrrcr, ICLASS_CR" 0010 001sssss PP------ ---ddddd")
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 0011 -------- PP------ --------","[#3] Cdd=Rss ")
+DEF_ENC32(A4_tfrpcp, ICLASS_CR" 0011 001sssss PP------ ---ddddd")
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 1000 -------- PP------ --------","[#8] Rdd=Css ")
+DEF_ENC32(A4_tfrcpp, ICLASS_CR" 1000 000sssss PP------ ---ddddd")
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 1001 -------- PP------ --------","[#9] (#r8,#U10)")
+DEF_ENC32(J2_ploop1si, ICLASS_CR" 1001 101IIIII PP-iiiii IIIii-II")
+DEF_ENC32(J2_ploop2si, ICLASS_CR" 1001 110IIIII PP-iiiii IIIii-II")
+DEF_ENC32(J2_ploop3si, ICLASS_CR" 1001 111IIIII PP-iiiii IIIii-II")
+DEF_ENC32(J2_loop0i, ICLASS_CR" 1001 000IIIII PP-iiiii IIIii-II")
+DEF_ENC32(J2_loop1i, ICLASS_CR" 1001 001IIIII PP-iiiii IIIii-II")
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 1010 -------- PP------ --------","[#10] Rd=Cs ")
+DEF_ENC32(A2_tfrcrr, ICLASS_CR" 1010 000sssss PP------ ---ddddd")
+DEF_ENC32(C4_addipc, ICLASS_CR" 1010 01001001 PP-iiiii i--ddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_CR" 1011 -------- PP0----- --------","[#11] Pd=(Ps,Pt,Pu)")
+DEF_ENC32(C2_and, ICLASS_CR" 1011 0000--ss PP0---tt ------dd")
+DEF_ENC32(C2_or, ICLASS_CR" 1011 0010--ss PP0---tt ------dd")
+DEF_ENC32(C2_xor, ICLASS_CR" 1011 0100--ss PP0---tt ------dd")
+DEF_ENC32(C2_andn, ICLASS_CR" 1011 0110--ss PP0---tt ------dd")
+DEF_ENC32(C2_any8, ICLASS_CR" 1011 1000--ss PP0----- ------dd")
+DEF_ENC32(C2_all8, ICLASS_CR" 1011 1010--ss PP0----- ------dd")
+DEF_ENC32(C2_not, ICLASS_CR" 1011 1100--ss PP0----- ------dd")
+DEF_ENC32(C2_orn, ICLASS_CR" 1011 1110--ss PP0---tt ------dd")
+
+DEF_ENC32(C4_and_and, ICLASS_CR" 1011 0001--ss PP0---tt uu----dd")
+DEF_ENC32(C4_and_or, ICLASS_CR" 1011 0011--ss PP0---tt uu----dd")
+DEF_ENC32(C4_or_and, ICLASS_CR" 1011 0101--ss PP0---tt uu----dd")
+DEF_ENC32(C4_or_or, ICLASS_CR" 1011 0111--ss PP0---tt uu----dd")
+DEF_ENC32(C4_and_andn, ICLASS_CR" 1011 1001--ss PP0---tt uu----dd")
+DEF_ENC32(C4_and_orn, ICLASS_CR" 1011 1011--ss PP0---tt uu----dd")
+DEF_ENC32(C4_or_andn, ICLASS_CR" 1011 1101--ss PP0---tt uu----dd")
+DEF_ENC32(C4_or_orn, ICLASS_CR" 1011 1111--ss PP0---tt uu----dd")
+
+DEF_ENC32(C4_fastcorner9, ICLASS_CR"1011 0000--ss PP1---tt 1--1--dd")
+DEF_ENC32(C4_fastcorner9_not, ICLASS_CR"1011 0001--ss PP1---tt 1--1--dd")
+
+
+
+/*******************************/
+/* */
+/* */
+/* M */
+/* */
+/* */
+/*******************************/
+
+
+DEF_CLASS32(ICLASS_M" ---- -------- PP------ --------",M)
+DEF_FIELD32(ICLASS_M" !!!! -------- PP------ --------",M_RegType,"Register Type")
+DEF_FIELD32(ICLASS_M" ---- !!!----- PP------ --------",M_MajOp,"Major Opcode")
+DEF_FIELD32(ICLASS_M" ---- -------- PP------ !!!-----",M_MinOp,"Minor Opcode")
+
+
+
+#define SP_MPY(TAG,REGTYPE,DSTCHARS,SAT,RND,UNS)\
+DEF_ENC32(TAG##_ll_s0, ICLASS_M REGTYPE "0" UNS RND"sssss PP-ttttt "SAT"00" DSTCHARS)\
+DEF_ENC32(TAG##_lh_s0, ICLASS_M REGTYPE "0" UNS RND"sssss PP-ttttt "SAT"01" DSTCHARS)\
+DEF_ENC32(TAG##_hl_s0, ICLASS_M REGTYPE "0" UNS RND"sssss PP-ttttt "SAT"10" DSTCHARS)\
+DEF_ENC32(TAG##_hh_s0, ICLASS_M REGTYPE "0" UNS RND"sssss PP-ttttt "SAT"11" DSTCHARS)\
+DEF_ENC32(TAG##_ll_s1, ICLASS_M REGTYPE "1" UNS RND"sssss PP-ttttt "SAT"00" DSTCHARS)\
+DEF_ENC32(TAG##_lh_s1, ICLASS_M REGTYPE "1" UNS RND"sssss PP-ttttt "SAT"01" DSTCHARS)\
+DEF_ENC32(TAG##_hl_s1, ICLASS_M REGTYPE "1" UNS RND"sssss PP-ttttt "SAT"10" DSTCHARS)\
+DEF_ENC32(TAG##_hh_s1, ICLASS_M REGTYPE "1" UNS RND"sssss PP-ttttt "SAT"11" DSTCHARS)
+
+/* Double precision */
+#define MPY_ENC(TAG,REGTYPE,DSTCHARS,SAT,RNDNAC,UNS,SHFT,VMIN2)\
+DEF_ENC32(TAG, ICLASS_M REGTYPE SHFT UNS RNDNAC"sssss PP0ttttt "SAT VMIN2 DSTCHARS)
+
+#define MPYI_ENC(TAG,REGTYPE,DSTCHARS,RNDNAC,UNS,SHFT)\
+DEF_ENC32(TAG, ICLASS_M REGTYPE SHFT UNS RNDNAC"sssss PP0iiiii iii" DSTCHARS)
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0000 -------- PP------ --------","[#0] Rd=(Rs,#u8)")
+MPYI_ENC(M2_mpysip, "0000","ddddd","-","-","0" )
+MPYI_ENC(M2_mpysin, "0000","ddddd","-","-","1" )
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0001 -------- PP------ --------","[#1] Rx=(Rs,#u8)")
+MPYI_ENC(M2_macsip, "0001","xxxxx","-","-","0" )
+MPYI_ENC(M2_macsin, "0001","xxxxx","-","-","1" )
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0010 -------- PP------ --------","[#2] Rx=(Rs,#s8)")
+MPYI_ENC(M2_accii, "0010","xxxxx","-","-","0" )
+MPYI_ENC(M2_naccii, "0010","xxxxx","-","-","1" )
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0011 -------- PP------ --------","[#3] Ry=(Ru,(Rs,Ry)) ")
+DEF_ENC32(M4_mpyrr_addr,ICLASS_M" 0011 000sssss PP-yyyyy ---uuuuu")
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0100 -------- PP------ --------","[#4] Rdd=(Rs,Rt)")
+DEF_FIELD32(ICLASS_M" 0100 -------- PP------ --!-----",Ma_tH,"Rt is High") /*Rt high */
+DEF_FIELD32(ICLASS_M" 0100 -------- PP------ -!------",Ma_sH,"Rs is High") /* Rs high */
+SP_MPY(M2_mpyd, "0100","ddddd","-","0","0")
+SP_MPY(M2_mpyd_rnd, "0100","ddddd","-","1","0")
+SP_MPY(M2_mpyud, "0100","ddddd","-","0","1")
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0101 -------- PP------ --------","[#5] Rdd=(Rs,Rt)")
+MPY_ENC(M2_dpmpyss_s0, "0101","ddddd","0","0","0","0","00")
+MPY_ENC(M2_dpmpyuu_s0, "0101","ddddd","0","0","1","0","00")
+MPY_ENC(M2_vmpy2s_s0, "0101","ddddd","1","0","0","0","01")
+MPY_ENC(M2_vmpy2s_s1, "0101","ddddd","1","0","0","1","01")
+MPY_ENC(M2_cmpyi_s0, "0101","ddddd","0","0","0","0","01")
+MPY_ENC(M2_cmpyr_s0, "0101","ddddd","0","0","0","0","10")
+MPY_ENC(M2_cmpys_s0, "0101","ddddd","1","0","0","0","10")
+MPY_ENC(M2_cmpys_s1, "0101","ddddd","1","0","0","1","10")
+MPY_ENC(M2_cmpysc_s0, "0101","ddddd","1","0","1","0","10")
+MPY_ENC(M2_cmpysc_s1, "0101","ddddd","1","0","1","1","10")
+MPY_ENC(M2_vmpy2su_s0, "0101","ddddd","1","0","0","0","11")
+MPY_ENC(M2_vmpy2su_s1, "0101","ddddd","1","0","0","1","11")
+MPY_ENC(M4_pmpyw, "0101","ddddd","1","0","1","0","11")
+MPY_ENC(M4_vpmpyh, "0101","ddddd","1","0","1","1","11")
+MPY_ENC(M5_vmpybuu, "0101","ddddd","0","0","0","1","01")
+MPY_ENC(M5_vmpybsu, "0101","ddddd","0","0","1","0","01")
+
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0110 -------- PP------ --------","[#6] Rxx=(Rs,Rt)")
+DEF_FIELD32(ICLASS_M" 0110 -------- PP------ --!-----",Mb_tH,"Rt is High") /*Rt high */
+DEF_FIELD32(ICLASS_M" 0110 -------- PP------ -!------",Mb_sH,"Rs is High") /* Rs high */
+SP_MPY(M2_mpyd_acc, "0110","xxxxx","0","0","0")
+SP_MPY(M2_mpyud_acc, "0110","xxxxx","0","0","1")
+SP_MPY(M2_mpyd_nac, "0110","xxxxx","0","1","0")
+SP_MPY(M2_mpyud_nac, "0110","xxxxx","0","1","1")
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 0111 -------- PP------ --------","[#7] Rxx=(Rs,Rt)")
+MPY_ENC(M2_dpmpyss_acc_s0, "0111","xxxxx","0","0","0","0","00")
+MPY_ENC(M2_dpmpyss_nac_s0, "0111","xxxxx","0","1","0","0","00")
+MPY_ENC(M2_dpmpyuu_acc_s0, "0111","xxxxx","0","0","1","0","00")
+MPY_ENC(M2_dpmpyuu_nac_s0, "0111","xxxxx","0","1","1","0","00")
+MPY_ENC(M2_vmac2s_s0, "0111","xxxxx","1","0","0","0","01")
+MPY_ENC(M2_vmac2s_s1, "0111","xxxxx","1","0","0","1","01")
+MPY_ENC(M2_cmaci_s0, "0111","xxxxx","0","0","0","0","01")
+MPY_ENC(M2_cmacr_s0, "0111","xxxxx","0","0","0","0","10")
+MPY_ENC(M2_cmacs_s0, "0111","xxxxx","1","0","0","0","10")
+MPY_ENC(M2_cmacs_s1, "0111","xxxxx","1","0","0","1","10")
+MPY_ENC(M2_cmacsc_s0, "0111","xxxxx","1","0","1","0","10")
+MPY_ENC(M2_cmacsc_s1, "0111","xxxxx","1","0","1","1","10")
+MPY_ENC(M2_vmac2, "0111","xxxxx","0","1","0","0","01")
+MPY_ENC(M2_cnacs_s0, "0111","xxxxx","1","0","0","0","11")
+MPY_ENC(M2_cnacs_s1, "0111","xxxxx","1","0","0","1","11")
+MPY_ENC(M2_cnacsc_s0, "0111","xxxxx","1","0","1","0","11")
+MPY_ENC(M2_cnacsc_s1, "0111","xxxxx","1","0","1","1","11")
+MPY_ENC(M2_vmac2su_s0, "0111","xxxxx","1","1","1","0","01")
+MPY_ENC(M2_vmac2su_s1, "0111","xxxxx","1","1","1","1","01")
+MPY_ENC(M4_pmpyw_acc, "0111","xxxxx","1","1","0","0","11")
+MPY_ENC(M4_vpmpyh_acc, "0111","xxxxx","1","1","0","1","11")
+MPY_ENC(M5_vmacbuu, "0111","xxxxx","0","0","0","1","01")
+MPY_ENC(M5_vmacbsu, "0111","xxxxx","0","0","1","1","01")
+
+
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1000 -------- PP------ --------","[#8] Rdd=(Rss,Rtt)")
+MPY_ENC(M2_vrcmpyi_s0, "1000","ddddd","0","0","0","0","00")
+MPY_ENC(M2_vdmpys_s0, "1000","ddddd","1","0","0","0","00")
+MPY_ENC(M2_vdmpys_s1, "1000","ddddd","1","0","0","1","00")
+MPY_ENC(M2_vrcmpyi_s0c, "1000","ddddd","0","0","1","0","00")
+MPY_ENC(M2_vabsdiffw, "1000","ddddd","0","1","0","0","00")
+MPY_ENC(M6_vabsdiffub, "1000","ddddd","0","1","0","1","00")
+MPY_ENC(M2_vabsdiffh, "1000","ddddd","0","1","1","0","00")
+MPY_ENC(M6_vabsdiffb, "1000","ddddd","0","1","1","1","00")
+MPY_ENC(M2_vrcmpys_s1_h, "1000","ddddd","1","1","0","1","00")
+MPY_ENC(M2_vrcmpys_s1_l, "1000","ddddd","1","1","1","1","00")
+MPY_ENC(M2_vrcmpyr_s0c, "1000","ddddd","0","1","1","0","01")
+MPY_ENC(M2_vrcmpyr_s0, "1000","ddddd","0","0","0","0","01")
+MPY_ENC(A2_vraddub, "1000","ddddd","0","0","1","0","01")
+MPY_ENC(M2_mmpyl_s0, "1000","ddddd","1","0","0","0","01")
+MPY_ENC(M2_mmpyl_s1, "1000","ddddd","1","0","0","1","01")
+MPY_ENC(M2_mmpyl_rs0, "1000","ddddd","1","1","0","0","01")
+MPY_ENC(M2_mmpyl_rs1, "1000","ddddd","1","1","0","1","01")
+MPY_ENC(M2_mmpyul_s0, "1000","ddddd","1","0","1","0","01")
+MPY_ENC(M2_mmpyul_s1, "1000","ddddd","1","0","1","1","01")
+MPY_ENC(M2_mmpyul_rs0, "1000","ddddd","1","1","1","0","01")
+MPY_ENC(M2_mmpyul_rs1, "1000","ddddd","1","1","1","1","01")
+MPY_ENC(M2_vrmpy_s0, "1000","ddddd","0","0","0","0","10")
+MPY_ENC(A2_vrsadub, "1000","ddddd","0","0","1","0","10")
+MPY_ENC(M2_vmpy2es_s0, "1000","ddddd","1","0","0","0","10")
+MPY_ENC(M2_vmpy2es_s1, "1000","ddddd","1","0","0","1","10")
+MPY_ENC(M2_vcmpy_s0_sat_i, "1000","ddddd","1","0","1","0","10")
+MPY_ENC(M2_vcmpy_s0_sat_r, "1000","ddddd","1","1","0","0","10")
+MPY_ENC(M2_vcmpy_s1_sat_i, "1000","ddddd","1","0","1","1","10")
+MPY_ENC(M2_vcmpy_s1_sat_r, "1000","ddddd","1","1","0","1","10")
+
+MPY_ENC(M2_mmpyh_s0, "1000","ddddd","1","0","0","0","11")
+MPY_ENC(M2_mmpyh_s1, "1000","ddddd","1","0","0","1","11")
+MPY_ENC(M2_mmpyh_rs0, "1000","ddddd","1","1","0","0","11")
+MPY_ENC(M2_mmpyh_rs1, "1000","ddddd","1","1","0","1","11")
+MPY_ENC(M2_mmpyuh_s0, "1000","ddddd","1","0","1","0","11")
+MPY_ENC(M2_mmpyuh_s1, "1000","ddddd","1","0","1","1","11")
+MPY_ENC(M2_mmpyuh_rs0, "1000","ddddd","1","1","1","0","11")
+MPY_ENC(M2_mmpyuh_rs1, "1000","ddddd","1","1","1","1","11")
+
+MPY_ENC(M4_vrmpyeh_s0, "1000","ddddd","1","0","1","0","00")
+MPY_ENC(M4_vrmpyeh_s1, "1000","ddddd","1","0","1","1","00")
+MPY_ENC(M4_vrmpyoh_s0, "1000","ddddd","0","1","0","0","10")
+MPY_ENC(M4_vrmpyoh_s1, "1000","ddddd","0","1","0","1","10")
+MPY_ENC(M5_vrmpybuu, "1000","ddddd","0","0","0","1","01")
+MPY_ENC(M5_vrmpybsu, "1000","ddddd","0","0","1","1","01")
+MPY_ENC(M5_vdmpybsu, "1000","ddddd","0","1","0","1","01")
+
+MPY_ENC(F2_dfadd, "1000","ddddd","0","0","0","0","11")
+MPY_ENC(F2_dfsub, "1000","ddddd","0","0","0","1","11")
+MPY_ENC(F2_dfmpyfix, "1000","ddddd","0","0","1","0","11")
+MPY_ENC(F2_dfmin, "1000","ddddd","0","0","1","1","11")
+MPY_ENC(F2_dfmax, "1000","ddddd","0","1","0","0","11")
+MPY_ENC(F2_dfmpyll, "1000","ddddd","0","1","0","1","11")
+#ifdef ADD_DP_OPS
+MPY_ENC(F2_dfdivcheat, "1000","ddddd","0","0","0","1","00")
+
+MPY_ENC(F2_dffixupn, "1000","ddddd","0","1","0","1","11")
+MPY_ENC(F2_dffixupd, "1000","ddddd","0","1","1","0","11")
+MPY_ENC(F2_dfrecipa, "1000","ddddd","0","1","1","1","ee")
+#endif
+
+MPY_ENC(M7_dcmpyrw, "1000","ddddd","0","0","0","1","10")
+MPY_ENC(M7_dcmpyrwc, "1000","ddddd","0","0","1","1","10")
+MPY_ENC(M7_dcmpyiw, "1000","ddddd","0","1","1","0","10")
+MPY_ENC(M7_dcmpyiwc, "1000","ddddd","0","1","1","1","10")
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1001 -------- PP------ --------","[#9] Rd=(Rss,Rtt)")
+MPY_ENC(M2_vdmpyrs_s0, "1001","ddddd","0","0","0","0","00")
+MPY_ENC(M2_vdmpyrs_s1, "1001","ddddd","0","0","0","1","00")
+
+MPY_ENC(M7_wcmpyrw, "1001","ddddd","0","0","1","0","00")
+MPY_ENC(M7_wcmpyrw_rnd, "1001","ddddd","0","0","1","1","00")
+MPY_ENC(M7_wcmpyiw, "1001","ddddd","0","1","0","0","00")
+MPY_ENC(M7_wcmpyiw_rnd, "1001","ddddd","0","1","0","1","00")
+
+MPY_ENC(M7_wcmpyrwc, "1001","ddddd","0","1","1","0","00")
+MPY_ENC(M7_wcmpyrwc_rnd, "1001","ddddd","0","1","1","1","00")
+MPY_ENC(M7_wcmpyiwc, "1001","ddddd","1","0","0","0","00")
+MPY_ENC(M7_wcmpyiwc_rnd, "1001","ddddd","1","0","0","1","00")
+
+
+
+MPY_ENC(M2_vradduh, "1001","ddddd","-","-","-","0","01")
+MPY_ENC(M2_vrcmpys_s1rp_h, "1001","ddddd","1","1","-","1","10")
+MPY_ENC(M2_vrcmpys_s1rp_l, "1001","ddddd","1","1","-","1","11")
+MPY_ENC(M2_vraddh, "1001","ddddd","1","1","-","0","11")
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1010 -------- PP------ --------","[#10] Rxx=(Rss,Rtt)")
+MPY_ENC(M2_vrcmaci_s0, "1010","xxxxx","0","0","0","0","00")
+MPY_ENC(M2_vdmacs_s0, "1010","xxxxx","1","0","0","0","00")
+MPY_ENC(M2_vdmacs_s1, "1010","xxxxx","1","0","0","1","00")
+MPY_ENC(M2_vrcmaci_s0c, "1010","xxxxx","0","0","1","0","00")
+MPY_ENC(M2_vcmac_s0_sat_i, "1010","xxxxx","1","0","1","0","00")
+MPY_ENC(M2_vcmac_s0_sat_r, "1010","xxxxx","1","1","0","0","00")
+MPY_ENC(M2_vrcmpys_acc_s1_h, "1010","xxxxx","1","1","0","1","00")
+MPY_ENC(M2_vrcmpys_acc_s1_l, "1010","xxxxx","1","1","1","1","00")
+MPY_ENC(M2_vrcmacr_s0, "1010","xxxxx","0","0","0","0","01")
+MPY_ENC(A2_vraddub_acc, "1010","xxxxx","0","0","1","0","01")
+MPY_ENC(M2_mmacls_s0, "1010","xxxxx","1","0","0","0","01")
+MPY_ENC(M2_mmacls_s1, "1010","xxxxx","1","0","0","1","01")
+MPY_ENC(M2_mmacls_rs0, "1010","xxxxx","1","1","0","0","01")
+MPY_ENC(M2_mmacls_rs1, "1010","xxxxx","1","1","0","1","01")
+MPY_ENC(M2_mmaculs_s0, "1010","xxxxx","1","0","1","0","01")
+MPY_ENC(M2_mmaculs_s1, "1010","xxxxx","1","0","1","1","01")
+MPY_ENC(M2_mmaculs_rs0, "1010","xxxxx","1","1","1","0","01")
+MPY_ENC(M2_mmaculs_rs1, "1010","xxxxx","1","1","1","1","01")
+MPY_ENC(M2_vrcmacr_s0c, "1010","xxxxx","0","1","1","0","01")
+MPY_ENC(M2_vrmac_s0, "1010","xxxxx","0","0","0","0","10")
+MPY_ENC(A2_vrsadub_acc, "1010","xxxxx","0","0","1","0","10")
+MPY_ENC(M2_vmac2es_s0, "1010","xxxxx","1","0","0","0","10")
+MPY_ENC(M2_vmac2es_s1, "1010","xxxxx","1","0","0","1","10")
+MPY_ENC(M2_vmac2es, "1010","xxxxx","0","1","0","0","10")
+MPY_ENC(M2_mmachs_s0, "1010","xxxxx","1","0","0","0","11")
+MPY_ENC(M2_mmachs_s1, "1010","xxxxx","1","0","0","1","11")
+MPY_ENC(M2_mmachs_rs0, "1010","xxxxx","1","1","0","0","11")
+MPY_ENC(M2_mmachs_rs1, "1010","xxxxx","1","1","0","1","11")
+MPY_ENC(M2_mmacuhs_s0, "1010","xxxxx","1","0","1","0","11")
+MPY_ENC(M2_mmacuhs_s1, "1010","xxxxx","1","0","1","1","11")
+MPY_ENC(M2_mmacuhs_rs0, "1010","xxxxx","1","1","1","0","11")
+MPY_ENC(M2_mmacuhs_rs1, "1010","xxxxx","1","1","1","1","11")
+MPY_ENC(M4_vrmpyeh_acc_s0, "1010","xxxxx","1","1","0","0","10")
+MPY_ENC(M4_vrmpyeh_acc_s1, "1010","xxxxx","1","1","0","1","10")
+MPY_ENC(M4_vrmpyoh_acc_s0, "1010","xxxxx","1","1","1","0","10")
+MPY_ENC(M4_vrmpyoh_acc_s1, "1010","xxxxx","1","1","1","1","10")
+MPY_ENC(M5_vrmacbuu, "1010","xxxxx","0","0","0","1","01")
+MPY_ENC(M5_vrmacbsu, "1010","xxxxx","0","0","1","1","01")
+MPY_ENC(M5_vdmacbsu, "1010","xxxxx","0","1","0","0","01")
+
+MPY_ENC(F2_dfmpylh, "1010","xxxxx","0","0","0","0","11")
+MPY_ENC(F2_dfmpyhh, "1010","xxxxx","0","0","0","1","11")
+#ifdef ADD_DP_OPS
+MPY_ENC(F2_dfmpyhh, "1010","xxxxx","0","0","1","0","11")
+MPY_ENC(F2_dffma, "1010","xxxxx","0","0","0","0","11")
+MPY_ENC(F2_dffms, "1010","xxxxx","0","0","0","1","11")
+
+MPY_ENC(F2_dffma_lib, "1010","xxxxx","0","0","1","0","11")
+MPY_ENC(F2_dffms_lib, "1010","xxxxx","0","0","1","1","11")
+MPY_ENC(F2_dffma_sc, "1010","xxxxx","0","1","1","1","uu")
+#endif
+
+
+MPY_ENC(M7_dcmpyrw_acc, "1010","xxxxx","0","0","0","1","10")
+MPY_ENC(M7_dcmpyrwc_acc, "1010","xxxxx","0","0","1","1","10")
+MPY_ENC(M7_dcmpyiw_acc, "1010","xxxxx","0","1","1","0","10")
+MPY_ENC(M7_dcmpyiwc_acc, "1010","xxxxx","1","0","1","0","10")
+
+
+
+
+MPY_ENC(A5_ACS, "1010","xxxxx","0","1","0","1","ee")
+MPY_ENC(A6_vminub_RdP, "1010","ddddd","0","1","1","1","ee")
+/*
+*/
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1011 -------- PP------ --------","[#11] Reserved")
+MPY_ENC(F2_sfadd, "1011","ddddd","0","0","0","0","00")
+MPY_ENC(F2_sfsub, "1011","ddddd","0","0","0","0","01")
+MPY_ENC(F2_sfmax, "1011","ddddd","0","0","0","1","00")
+MPY_ENC(F2_sfmin, "1011","ddddd","0","0","0","1","01")
+MPY_ENC(F2_sfmpy, "1011","ddddd","0","0","1","0","00")
+MPY_ENC(F2_sffixupn, "1011","ddddd","0","0","1","1","00")
+MPY_ENC(F2_sffixupd, "1011","ddddd","0","0","1","1","01")
+MPY_ENC(F2_sfrecipa, "1011","ddddd","1","1","1","1","ee")
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1100 -------- PP------ --------","[#12] Rd=(Rs,Rt)")
+DEF_FIELD32(ICLASS_M" 1100 -------- PP------ --!-----",Mc_tH,"Rt is High") /*Rt high */
+DEF_FIELD32(ICLASS_M" 1100 -------- PP------ -!------",Mc_sH,"Rs is High") /* Rs high */
+SP_MPY(M2_mpy, "1100","ddddd","0","0","0")
+SP_MPY(M2_mpy_sat, "1100","ddddd","1","0","0")
+SP_MPY(M2_mpy_rnd, "1100","ddddd","0","1","0")
+SP_MPY(M2_mpy_sat_rnd, "1100","ddddd","1","1","0")
+SP_MPY(M2_mpyu, "1100","ddddd","0","0","1")
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1101 -------- PP------ --------","[#13] Rd=(Rs,Rt)")
+/* EJP: same as mpyi MPY_ENC(M2_mpyui, "1101","ddddd","0","0","1","0","00") */
+MPY_ENC(M2_mpyi, "1101","ddddd","0","0","0","0","00")
+MPY_ENC(M2_mpy_up, "1101","ddddd","0","0","0","0","01")
+MPY_ENC(M2_mpyu_up, "1101","ddddd","0","0","1","0","01")
+MPY_ENC(M2_dpmpyss_rnd_s0, "1101","ddddd","0","1","0","0","01")
+MPY_ENC(M2_cmpyrs_s0, "1101","ddddd","1","1","0","0","10")
+MPY_ENC(M2_cmpyrs_s1, "1101","ddddd","1","1","0","1","10")
+MPY_ENC(M2_cmpyrsc_s0, "1101","ddddd","1","1","1","0","10")
+MPY_ENC(M2_cmpyrsc_s1, "1101","ddddd","1","1","1","1","10")
+MPY_ENC(M2_vmpy2s_s0pack, "1101","ddddd","1","1","0","0","11")
+MPY_ENC(M2_vmpy2s_s1pack, "1101","ddddd","1","1","0","1","11")
+MPY_ENC(M2_hmmpyh_rs1, "1101","ddddd","1","1","0","1","00")
+MPY_ENC(M2_hmmpyl_rs1, "1101","ddddd","1","1","1","1","00")
+
+MPY_ENC(M2_hmmpyh_s1, "1101","ddddd","0","1","0","1","00")
+MPY_ENC(M2_hmmpyl_s1, "1101","ddddd","0","1","0","1","01")
+MPY_ENC(M2_mpy_up_s1, "1101","ddddd","0","1","0","1","10")
+MPY_ENC(M2_mpy_up_s1_sat, "1101","ddddd","0","1","1","1","00")
+MPY_ENC(M2_mpysu_up, "1101","ddddd","0","1","1","0","01")
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1110 -------- PP------ --------","[#14] Rx=(Rs,Rt)")
+DEF_FIELD32(ICLASS_M" 1110 -------- PP------ --!-----",Md_tH,"Rt is High") /*Rt high */
+DEF_FIELD32(ICLASS_M" 1110 -------- PP------ -!------",Md_sH,"Rs is High") /* Rs high */
+SP_MPY(M2_mpyu_acc, "1110","xxxxx","0","0","1")
+SP_MPY(M2_mpy_acc, "1110","xxxxx","0","0","0")
+SP_MPY(M2_mpy_acc_sat, "1110","xxxxx","1","0","0")
+SP_MPY(M2_mpyu_nac, "1110","xxxxx","0","1","1")
+SP_MPY(M2_mpy_nac, "1110","xxxxx","0","1","0")
+SP_MPY(M2_mpy_nac_sat, "1110","xxxxx","1","1","0")
+
+
+DEF_FIELDROW_DESC32(ICLASS_M" 1111 -------- PP------ --------","[#15] Rx=(Rs,Rt)")
+MPY_ENC(M2_maci, "1111","xxxxx","0","0","0","0","00")
+MPY_ENC(M2_mnaci, "1111","xxxxx","0","0","0","1","00")
+MPY_ENC(M2_acci, "1111","xxxxx","0","0","0","0","01")
+MPY_ENC(M2_nacci, "1111","xxxxx","0","0","0","1","01")
+MPY_ENC(M2_xor_xacc, "1111","xxxxx","0","0","0","1","11")
+MPY_ENC(M2_subacc, "1111","xxxxx","0","0","0","0","11")
+
+MPY_ENC(M4_mac_up_s1_sat, "1111","xxxxx","0","1","1","0","00")
+MPY_ENC(M4_nac_up_s1_sat, "1111","xxxxx","0","1","1","0","01")
+
+MPY_ENC(M4_and_and, "1111","xxxxx","0","0","1","0","00")
+MPY_ENC(M4_and_or, "1111","xxxxx","0","0","1","0","01")
+MPY_ENC(M4_and_xor, "1111","xxxxx","0","0","1","0","10")
+MPY_ENC(M4_or_and, "1111","xxxxx","0","0","1","0","11")
+MPY_ENC(M4_or_or, "1111","xxxxx","0","0","1","1","00")
+MPY_ENC(M4_or_xor, "1111","xxxxx","0","0","1","1","01")
+MPY_ENC(M4_xor_and, "1111","xxxxx","0","0","1","1","10")
+MPY_ENC(M4_xor_or, "1111","xxxxx","0","0","1","1","11")
+
+MPY_ENC(M4_or_andn, "1111","xxxxx","0","1","0","0","00")
+MPY_ENC(M4_and_andn, "1111","xxxxx","0","1","0","0","01")
+MPY_ENC(M4_xor_andn, "1111","xxxxx","0","1","0","0","10")
+
+MPY_ENC(F2_sffma, "1111","xxxxx","1","0","0","0","00")
+MPY_ENC(F2_sffms, "1111","xxxxx","1","0","0","0","01")
+
+MPY_ENC(F2_sffma_lib, "1111","xxxxx","1","0","0","0","10")
+MPY_ENC(F2_sffms_lib, "1111","xxxxx","1","0","0","0","11")
+
+MPY_ENC(F2_sffma_sc, "1111","xxxxx","1","1","1","0","uu")
+
+
+/*******************************/
+/* */
+/* */
+/* ALU32_2op */
+/* */
+/* */
+/*******************************/
+DEF_CLASS32(ICLASS_ADDI" ---- -------- PP------ --------",ALU32_ADDI)
+
+DEF_CLASS32(ICLASS_ALU2op" ---- -------- PP------ --------",ALU32_2op)
+DEF_FIELD32(ICLASS_ALU2op" !--- -------- PP------ --------",A2_Rs,"No Rs read")
+DEF_FIELD32(ICLASS_ALU2op" -!!! -------- PP------ --------",A2_MajOp,"Major Opcode")
+DEF_FIELD32(ICLASS_ALU2op" ---- !!!----- PP------ --------",A2_MinOp,"Minor Opcode")
+
+DEF_FIELD32(ICLASS_ALU3op" -!!! -------- PP------ --------",A3_MajOp,"Major Opcode")
+DEF_FIELD32(ICLASS_ALU3op" ---- !!!----- PP------ --------",A3_MinOp,"Minor Opcode")
+DEF_CLASS32(ICLASS_ALU3op" ---- -------- PP------ --------",ALU32_3op)
+DEF_FIELD32(ICLASS_ALU3op" !--- -------- PP------ --------",A3_P,"Predicated")
+DEF_FIELD32(ICLASS_ALU3op" ---- -------- PP!----- --------",A3_DN,"Dot-new")
+DEF_FIELD32(ICLASS_ALU3op" ---- -------- PP------ !-------",A3_PS,"Predicate sense")
+
+
+/*************************/
+/* Our good friend addi */
+/*************************/
+DEF_ENC32(A2_addi, ICLASS_ADDI" iiii iiisssss PPiiiiii iiiddddd")
+
+
+/*******************************/
+/* Standard ALU32 insns */
+/*******************************/
+
+#define ALU32_IRR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+DEF_ENC32(TAG, ICLASS_ALU2op" "MAJ4" "MIN3"sssss PP"SMOD1"iiiii "VMIN3 DSTCHARS)
+
+#define ALU32_RR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+DEF_ENC32(TAG, ICLASS_ALU2op" "MAJ4" "MIN3"sssss PP"SMOD1"----- "VMIN3 DSTCHARS)
+
+#define CONDA32_RR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+DEF_ENC32(TAG##t, ICLASS_ALU2op" "MAJ4" "MIN3"sssss PP"SMOD1"-00uu "VMIN3 DSTCHARS)\
+DEF_ENC32(TAG##f, ICLASS_ALU2op" "MAJ4" "MIN3"sssss PP"SMOD1"-10uu "VMIN3 DSTCHARS)\
+DEF_ENC32(TAG##tnew,ICLASS_ALU2op" "MAJ4" "MIN3"sssss PP"SMOD1"-01uu "VMIN3 DSTCHARS)\
+DEF_ENC32(TAG##fnew,ICLASS_ALU2op" "MAJ4" "MIN3"sssss PP"SMOD1"-11uu "VMIN3 DSTCHARS)
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0000 -------- PP------ --------","[#0] (Pu) Rd=(Rs)")
+DEF_FIELD32( ICLASS_ALU2op" 0000 -------- PP!----- --------",A32a_C,"Conditional")
+DEF_FIELD32( ICLASS_ALU2op" 0000 -------- PP--!--- --------",A32a_S,"Predicate sense")
+DEF_FIELD32( ICLASS_ALU2op" 0000 -------- PP---!-- --------",A32a_dn,"Dot-new")
+
+ALU32_RR_ENC(A2_aslh, "0000","000","0","---","ddddd")
+ALU32_RR_ENC(A2_asrh, "0000","001","0","---","ddddd")
+ALU32_RR_ENC(A2_tfr, "0000","011","0","---","ddddd")
+ALU32_RR_ENC(A2_sxtb, "0000","101","0","---","ddddd")
+ALU32_RR_ENC(A2_zxth, "0000","110","0","---","ddddd")
+ALU32_RR_ENC(A2_sxth, "0000","111","0","---","ddddd")
+
+CONDA32_RR_ENC(A4_paslh, "0000","000","1","---","ddddd")
+CONDA32_RR_ENC(A4_pasrh, "0000","001","1","---","ddddd")
+CONDA32_RR_ENC(A4_pzxtb, "0000","100","1","---","ddddd")
+CONDA32_RR_ENC(A4_psxtb, "0000","101","1","---","ddddd")
+CONDA32_RR_ENC(A4_pzxth, "0000","110","1","---","ddddd")
+CONDA32_RR_ENC(A4_psxth, "0000","111","1","---","ddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0001 -------- PP------ --------","[#1] Rx=(#u16)")
+DEF_ENC32(A2_tfril, ICLASS_ALU2op" 0001 ii1xxxxx PPiiiiii iiiiiiii")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0010 -------- PP------ --------","[#2] Rx=(#u16)")
+DEF_ENC32(A2_tfrih, ICLASS_ALU2op" 0010 ii1xxxxx PPiiiiii iiiiiiii")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0011 -------- PP------ --------","[#3] Rd=(Pu,Rs,#s8)")
+DEF_ENC32(C2_muxir, ICLASS_ALU2op" 0011 0uusssss PP0iiiii iiiddddd")
+DEF_ENC32(C2_muxri, ICLASS_ALU2op" 0011 1uusssss PP0iiiii iiiddddd")
+
+DEF_ENC32(A4_combineri, ICLASS_ALU2op" 0011 -00sssss PP1iiiii iiiddddd") /* Rdd = (Rs,#s8) */
+DEF_ENC32(A4_combineir, ICLASS_ALU2op" 0011 -01sssss PP1iiiii iiiddddd") /* Rdd = (Rs,#s8) */
+DEF_ENC32(A4_rcmpeqi, ICLASS_ALU2op" 0011 -10sssss PP1iiiii iiiddddd") /* Rd = (Rs,#s8) */
+DEF_ENC32(A4_rcmpneqi, ICLASS_ALU2op" 0011 -11sssss PP1iiiii iiiddddd") /* Rd = (Rs,#s8) */
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0100 -------- PP------ --------","[#4] (Pu) Rd=(Rs,#s8)")
+DEF_FIELD32( ICLASS_ALU2op" 0100 -------- PP!----- --------",A32a_DN,"Dot-new")
+DEF_FIELD32( ICLASS_ALU2op" 0100 !------- PP------ --------",A32a_PS,"Predicate sense")
+DEF_ENC32(A2_paddit, ICLASS_ALU2op" 0100 0uusssss PP0iiiii iiiddddd")
+DEF_ENC32(A2_padditnew, ICLASS_ALU2op" 0100 0uusssss PP1iiiii iiiddddd")
+DEF_ENC32(A2_paddif, ICLASS_ALU2op" 0100 1uusssss PP0iiiii iiiddddd")
+DEF_ENC32(A2_paddifnew, ICLASS_ALU2op" 0100 1uusssss PP1iiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0101 -------- PP------ --------","[#5] Pd=(Rs,#s10)")
+DEF_ENC32(C2_cmpeqi, ICLASS_ALU2op" 0101 00isssss PPiiiiii iii000dd")
+DEF_ENC32(C2_cmpgti, ICLASS_ALU2op" 0101 01isssss PPiiiiii iii000dd")
+DEF_ENC32(C2_cmpgtui, ICLASS_ALU2op" 0101 100sssss PPiiiiii iii000dd")
+
+DEF_ENC32(C4_cmpneqi, ICLASS_ALU2op" 0101 00isssss PPiiiiii iii100dd")
+DEF_ENC32(C4_cmpltei, ICLASS_ALU2op" 0101 01isssss PPiiiiii iii100dd")
+DEF_ENC32(C4_cmplteui, ICLASS_ALU2op" 0101 100sssss PPiiiiii iii100dd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0110 -------- PP------ --------","[#6] Rd=(Rs,#s10)")
+ALU32_IRR_ENC(A2_andir, "0110","00i","i","iii","ddddd")
+ALU32_IRR_ENC(A2_subri, "0110","01i","i","iii","ddddd")
+ALU32_IRR_ENC(A2_orir, "0110","10i","i","iii","ddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 0111 -------- PP------ --------","[#7] Reserved")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 1000 -------- PP------ --------","[#8] Rd=#s16")
+DEF_ENC32(A2_tfrsi, ICLASS_ALU2op" 1000 ii-iiiii PPiiiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 1001 -------- PP------ --------","[#9] Reserved")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 101- -------- PP------ --------","[#10,#11] Rd=(Pu,#s8,#S8)")
+DEF_ENC32(C2_muxii, ICLASS_ALU2op" 101u uIIIIIII PPIiiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 1100 -------- PP------ --------","[#12] Rdd=(#s8,#S8)")
+DEF_ENC32(A2_combineii, ICLASS_ALU2op" 1100 0IIIIIII PPIiiiii iiiddddd")
+DEF_ENC32(A4_combineii, ICLASS_ALU2op" 1100 1--IIIII PPIiiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 1101 -------- PP------ --------","[#13] Reserved")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 1110 -------- PP------ --------","[#14] (Pu) Rd=#s12")
+DEF_FIELD32( ICLASS_ALU2op" 1110 ---0---- PP!----- --------",A32c_DN,"Dot-new")
+DEF_FIELD32( ICLASS_ALU2op" 1110 !--0---- PP------ --------",A32c_PS,"Predicate sense")
+DEF_ENC32(C2_cmovenewit,ICLASS_ALU2op" 1110 0uu0iiii PP1iiiii iiiddddd")
+DEF_ENC32(C2_cmovenewif,ICLASS_ALU2op" 1110 1uu0iiii PP1iiiii iiiddddd")
+DEF_ENC32(C2_cmoveit, ICLASS_ALU2op" 1110 0uu0iiii PP0iiiii iiiddddd")
+DEF_ENC32(C2_cmoveif, ICLASS_ALU2op" 1110 1uu0iiii PP0iiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32( ICLASS_ALU2op" 1111 -------- PP------ --------","[#15] nop")
+DEF_ENC32(A2_nop, ICLASS_ALU2op" 1111 -------- PP------ --------")
+
+
+
+
+
+
+
+
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* ALU32_3op */
+/* */
+/* */
+/*******************************/
+
+
+#define V2A32_RRR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+DEF_ENC32(TAG, ICLASS_ALU3op" "MAJ4" "MIN3"sssss PP"SMOD1"ttttt "VMIN3 DSTCHARS)
+
+#define V2A32_RR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+DEF_ENC32(TAG, ICLASS_ALU3op" "MAJ4" "MIN3"sssss PP"SMOD1"----- "VMIN3 DSTCHARS)
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0000 -------- PP------ --------","[#0] Reserved")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0001 -------- PP------ --------","[#1] Rd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_and, "0001","000","-","---","ddddd")
+V2A32_RRR_ENC(A2_or, "0001","001","-","---","ddddd")
+V2A32_RRR_ENC(A2_xor, "0001","011","-","---","ddddd")
+V2A32_RRR_ENC(A4_andn, "0001","100","-","---","ddddd")
+V2A32_RRR_ENC(A4_orn, "0001","101","-","---","ddddd")
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0010 -------- PP------ --------","[#2] Pd=(Rs,Rt)")
+V2A32_RRR_ENC(C2_cmpeq, "0010","-00","-","---","000dd")
+V2A32_RRR_ENC(C2_cmpgt, "0010","-10","-","---","000dd")
+V2A32_RRR_ENC(C2_cmpgtu, "0010","-11","-","---","000dd")
+
+V2A32_RRR_ENC(C4_cmpneq, "0010","-00","-","---","100dd")
+V2A32_RRR_ENC(C4_cmplte, "0010","-10","-","---","100dd")
+V2A32_RRR_ENC(C4_cmplteu, "0010","-11","-","---","100dd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0011 -------- PP------ --------","[#3] Rd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_add, "0011","000","-","---","ddddd")
+V2A32_RRR_ENC(A2_sub, "0011","001","-","---","ddddd")
+V2A32_RRR_ENC(A2_combine_hh, "0011","100","-","---","ddddd")
+V2A32_RRR_ENC(A2_combine_hl, "0011","101","-","---","ddddd")
+V2A32_RRR_ENC(A2_combine_lh, "0011","110","-","---","ddddd")
+V2A32_RRR_ENC(A2_combine_ll, "0011","111","-","---","ddddd")
+V2A32_RRR_ENC(A4_rcmpeq, "0011","010","-","---","ddddd")
+V2A32_RRR_ENC(A4_rcmpneq, "0011","011","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0100 -------- PP------ --------","[#4] Rd=(Pu,Rs,Rt)")
+V2A32_RRR_ENC(C2_mux, "0100","---","-","-uu","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0101 -------- PP------ --------","[#5] Rdd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_combinew, "0101","0--","-","---","ddddd")
+V2A32_RRR_ENC(S2_packhl, "0101","1--","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0110 -------- PP------ --------","[#6] Rd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_svaddh, "0110","000","-","---","ddddd")
+V2A32_RRR_ENC(A2_svaddhs, "0110","001","-","---","ddddd")
+V2A32_RRR_ENC(A2_svadduhs, "0110","011","-","---","ddddd")
+V2A32_RRR_ENC(A2_svsubh, "0110","100","-","---","ddddd")
+V2A32_RRR_ENC(A2_svsubhs, "0110","101","-","---","ddddd")
+V2A32_RRR_ENC(A2_svsubuhs, "0110","111","-","---","ddddd")
+V2A32_RRR_ENC(A2_addsat, "0110","010","-","---","ddddd")
+V2A32_RRR_ENC(A2_subsat, "0110","110","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 0111 -------- PP------ --------","[#7] Rd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_svavgh, "0111","-00","-","---","ddddd")
+V2A32_RRR_ENC(A2_svavghs, "0111","-01","-","---","ddddd")
+V2A32_RRR_ENC(A2_svnavgh, "0111","-11","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1000 -------- PP------ --------","[#8] Reserved")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1001 -------- PP------ --------","[#9] (Pu) Rd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_pandt, "1001","-00","0","0uu","ddddd")
+V2A32_RRR_ENC(A2_pandtnew, "1001","-00","1","0uu","ddddd")
+V2A32_RRR_ENC(A2_pandf, "1001","-00","0","1uu","ddddd")
+V2A32_RRR_ENC(A2_pandfnew, "1001","-00","1","1uu","ddddd")
+V2A32_RRR_ENC(A2_port, "1001","-01","0","0uu","ddddd")
+V2A32_RRR_ENC(A2_portnew, "1001","-01","1","0uu","ddddd")
+V2A32_RRR_ENC(A2_porf, "1001","-01","0","1uu","ddddd")
+V2A32_RRR_ENC(A2_porfnew, "1001","-01","1","1uu","ddddd")
+V2A32_RRR_ENC(A2_pxort, "1001","-11","0","0uu","ddddd")
+V2A32_RRR_ENC(A2_pxortnew, "1001","-11","1","0uu","ddddd")
+V2A32_RRR_ENC(A2_pxorf, "1001","-11","0","1uu","ddddd")
+V2A32_RRR_ENC(A2_pxorfnew, "1001","-11","1","1uu","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1010 -------- PP------ --------","[#10] Reserved")
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1011 -------- PP------ --------","[#11] (Pu) Rd=(Rs,Rt)")
+V2A32_RRR_ENC(A2_paddt, "1011","0-0","0","0uu","ddddd")
+V2A32_RRR_ENC(A2_paddtnew, "1011","0-0","1","0uu","ddddd")
+V2A32_RRR_ENC(A2_paddf, "1011","0-0","0","1uu","ddddd")
+V2A32_RRR_ENC(A2_paddfnew, "1011","0-0","1","1uu","ddddd")
+V2A32_RRR_ENC(A2_psubt, "1011","0-1","0","0uu","ddddd")
+V2A32_RRR_ENC(A2_psubtnew, "1011","0-1","1","0uu","ddddd")
+V2A32_RRR_ENC(A2_psubf, "1011","0-1","0","1uu","ddddd")
+V2A32_RRR_ENC(A2_psubfnew, "1011","0-1","1","1uu","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1100 -------- PP------ --------","[#12] Reserved")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1101 -------- PP------ --------","[#13] (Pu) Rdd=(Rs,Rt)")
+V2A32_RRR_ENC(C2_ccombinewnewt, "1101","000","1","0uu","ddddd")
+V2A32_RRR_ENC(C2_ccombinewnewf, "1101","000","1","1uu","ddddd")
+V2A32_RRR_ENC(C2_ccombinewt, "1101","000","0","0uu","ddddd")
+V2A32_RRR_ENC(C2_ccombinewf, "1101","000","0","1uu","ddddd")
+
+
+
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU3op" 1110 -------- PP------ --------","[#14] Reserved")
+
+
+
+
+
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* S */
+/* */
+/* */
+/*******************************/
+
+DEF_CLASS32(ICLASS_S2op" ---- -------- PP------ --------",S_2op)
+DEF_FIELD32(ICLASS_S2op" !!!! -------- PP------ --------",STYPEB_RegType,"Register Type")
+DEF_FIELD32(ICLASS_S2op" ---- !!------ PP------ --------",S2_MajOp,"Major Opcode")
+DEF_FIELD32(ICLASS_S2op" ---- -------- PP------ !!!-----",S2_MinOp,"Minor Opcode")
+
+DEF_CLASS32(ICLASS_S3op" ---- -------- PP------ --------",S_3op)
+DEF_FIELD32(ICLASS_S3op" !!!! -------- PP------ --------",STYPEA_RegType,"Register Type")
+DEF_FIELD32(ICLASS_S3op" ---- !!------ PP------ --------",S3_Maj,"Major Opcode")
+DEF_FIELD32(ICLASS_S3op" ---- -------- PP------ !!------",S3_Min,"Minor Opcode")
+
+
+#define SH_RRR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S3op" "MAJ4" "MIN3"sssss PP"SMOD1"ttttt "VMIN3 DSTCHARS)
+
+#define SH_RRRiENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S3op" "MAJ4" "MIN3"iiiii PP"SMOD1"ttttt "VMIN3 DSTCHARS)
+
+#define SH_RRR_ENCX(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S3op" "MAJ4" "MIN3"sssss PP"SMOD1"xxxxx "VMIN3 DSTCHARS)
+
+#define SH3_RR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S3op" "MAJ4" "MIN3"sssss PP"SMOD1"----- "VMIN3 DSTCHARS)
+
+#define SH_PPP_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S3op" "MAJ4" "MIN3"---ss PP"SMOD1"---tt "VMIN3 DSTCHARS)
+
+#define SH2_RR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S2op" "MAJ4" "MIN3"sssss PP"SMOD1"----- "VMIN3 DSTCHARS)
+
+#define SH2_PPP_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S2op" "MAJ4" "MIN3"---ss PP"SMOD1"---tt "VMIN3 DSTCHARS)
+
+#define SH_RRI4_ENC(TAG,MAJ4,MIN3,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S2op" "MAJ4" "MIN3 "sssss PP00iiii " VMIN3 DSTCHARS)
+
+#define SH_RRI5_ENC(TAG,MAJ4,MIN3,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S2op" "MAJ4" "MIN3 "sssss PP0iiiii " VMIN3 DSTCHARS)
+
+#define SH_RRI6_ENC(TAG,MAJ4,MIN3,VMIN3,DSTCHARS) \
+DEF_ENC32(TAG,ICLASS_S2op" "MAJ4" "MIN3 "sssss PPiiiiii " VMIN3 DSTCHARS)
+
+#define RSHIFTTYPES(TAGEND,MAJ4,MIN3,SMOD1,DMOD1,DSTCHARS) \
+SH_RRR_ENC(S2_asr_r_##TAGEND,MAJ4,MIN3,SMOD1,"00"DMOD1,DSTCHARS) \
+SH_RRR_ENC(S2_lsr_r_##TAGEND,MAJ4,MIN3,SMOD1,"01"DMOD1,DSTCHARS) \
+SH_RRR_ENC(S2_asl_r_##TAGEND,MAJ4,MIN3,SMOD1,"10"DMOD1,DSTCHARS) \
+SH_RRR_ENC(S2_lsl_r_##TAGEND,MAJ4,MIN3,SMOD1,"11"DMOD1,DSTCHARS)
+
+
+#define I5SHIFTTYPES(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI5_ENC(S2_asr_i_##TAGEND,MAJ4,MIN3,SMOD1 "00",DSTCHARS) \
+SH_RRI5_ENC(S2_lsr_i_##TAGEND,MAJ4,MIN3,SMOD1 "01",DSTCHARS) \
+SH_RRI5_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS) \
+SH_RRI5_ENC(S6_rol_i_##TAGEND,MAJ4,MIN3,SMOD1 "11",DSTCHARS)
+
+#define I5SHIFTTYPES_NOROL(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI5_ENC(S2_asr_i_##TAGEND,MAJ4,MIN3,SMOD1 "00",DSTCHARS) \
+SH_RRI5_ENC(S2_lsr_i_##TAGEND,MAJ4,MIN3,SMOD1 "01",DSTCHARS) \
+SH_RRI5_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS)
+
+#define I5SHIFTTYPES_NOASR(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI5_ENC(S2_lsr_i_##TAGEND,MAJ4,MIN3,SMOD1 "01",DSTCHARS) \
+SH_RRI5_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS) \
+SH_RRI5_ENC(S6_rol_i_##TAGEND,MAJ4,MIN3,SMOD1 "11",DSTCHARS)
+
+#define I4SHIFTTYPES(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI4_ENC(S2_asr_i_##TAGEND,MAJ4,MIN3,SMOD1 "00",DSTCHARS) \
+SH_RRI4_ENC(S2_lsr_i_##TAGEND,MAJ4,MIN3,SMOD1 "01",DSTCHARS) \
+SH_RRI4_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS)
+
+#define I5ASHIFTTYPES(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI5_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS)
+
+#define I4ASHIFTTYPES(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI4_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS)
+
+#define I6SHIFTTYPES(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI6_ENC(S2_asr_i_##TAGEND,MAJ4,MIN3,SMOD1 "00",DSTCHARS) \
+SH_RRI6_ENC(S2_lsr_i_##TAGEND,MAJ4,MIN3,SMOD1 "01",DSTCHARS) \
+SH_RRI6_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS) \
+SH_RRI6_ENC(S6_rol_i_##TAGEND,MAJ4,MIN3,SMOD1 "11",DSTCHARS) \
+
+#define I6SHIFTTYPES_NOASR(TAGEND,MAJ4,MIN3,SMOD1,DSTCHARS) \
+SH_RRI6_ENC(S2_lsr_i_##TAGEND,MAJ4,MIN3,SMOD1 "01",DSTCHARS) \
+SH_RRI6_ENC(S2_asl_i_##TAGEND,MAJ4,MIN3,SMOD1 "10",DSTCHARS) \
+SH_RRI6_ENC(S6_rol_i_##TAGEND,MAJ4,MIN3,SMOD1 "11",DSTCHARS)
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0000 -------- PP------ --------","[#0] Rdd=(Rss,#u6)")
+/* EJP: there is actually quite a bit of space here, look at the reserved bits */
+I6SHIFTTYPES(p, "0000","000","0","ddddd")
+I5SHIFTTYPES_NOROL(vw, "0000","010","0","ddddd")
+I4SHIFTTYPES(vh, "0000","100","0","ddddd")
+
+
+
+/* False assume an immediate */
+SH2_RR_ENC(S2_vsathub_nopack, "0000","000","-","1 00","ddddd")
+SH2_RR_ENC(S2_vsatwuh_nopack, "0000","000","-","1 01","ddddd")
+SH2_RR_ENC(S2_vsatwh_nopack, "0000","000","-","1 10","ddddd")
+SH2_RR_ENC(S2_vsathb_nopack, "0000","000","-","1 11","ddddd")
+
+SH_RRI4_ENC(S5_vasrhrnd, "0000","001", "0 00","ddddd")
+
+SH2_RR_ENC(A2_vabsh, "0000","010","-","1 00","ddddd")
+SH2_RR_ENC(A2_vabshsat, "0000","010","-","1 01","ddddd")
+SH2_RR_ENC(A2_vabsw, "0000","010","-","1 10","ddddd")
+SH2_RR_ENC(A2_vabswsat, "0000","010","-","1 11","ddddd")
+
+SH2_RR_ENC(A2_notp, "0000","100","-","1 00","ddddd")
+SH2_RR_ENC(A2_negp, "0000","100","-","1 01","ddddd")
+SH2_RR_ENC(A2_absp, "0000","100","-","1 10","ddddd")
+SH2_RR_ENC(A2_vconj, "0000","100","-","1 11","ddddd")
+
+SH2_RR_ENC(S2_deinterleave, "0000","110","-","1 00","ddddd")
+SH2_RR_ENC(S2_interleave, "0000","110","-","1 01","ddddd")
+SH2_RR_ENC(S2_brevp, "0000","110","-","1 10","ddddd")
+SH_RRI6_ENC(S2_asr_i_p_rnd, "0000","110", "1 11","ddddd")
+
+SH2_RR_ENC(F2_conv_df2d, "0000","111","0","0 00","ddddd")
+SH2_RR_ENC(F2_conv_df2ud, "0000","111","0","0 01","ddddd")
+SH2_RR_ENC(F2_conv_ud2df, "0000","111","0","0 10","ddddd")
+SH2_RR_ENC(F2_conv_d2df, "0000","111","0","0 11","ddddd")
+#ifdef ADD_DP_OPS
+SH2_RR_ENC(F2_dffixupr, "0000","111","0","1 00","ddddd")
+SH2_RR_ENC(F2_dfsqrtcheat, "0000","111","0","1 01","ddddd")
+#endif
+SH2_RR_ENC(F2_conv_df2d_chop, "0000","111","0","1 10","ddddd")
+SH2_RR_ENC(F2_conv_df2ud_chop,"0000","111","0","1 11","ddddd")
+#ifdef ADD_DP_OPS
+SH2_RR_ENC(F2_dfinvsqrta, "0000","111","1","0 ee","ddddd")
+#endif
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0001 -------- PP------ --------","[#1] Rdd=(Rss,#u6,#U6)")
+DEF_ENC32(S2_extractup,ICLASS_S2op" 0001 IIIsssss PPiiiiii IIIddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0010 -------- PP------ --------","[#2] Rxx=(Rss,#u6)")
+I6SHIFTTYPES(p_nac, "0010","00-","0","xxxxx")
+I6SHIFTTYPES(p_acc, "0010","00-","1","xxxxx")
+I6SHIFTTYPES(p_and, "0010","01-","0","xxxxx")
+I6SHIFTTYPES(p_or, "0010","01-","1","xxxxx")
+I6SHIFTTYPES_NOASR(p_xacc, "0010","10-","0","xxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0011 -------- PP------ --------","[#3] Rxx=(Rss,#u6,#U6)")
+DEF_ENC32(S2_insertp,ICLASS_S2op" 0011 IIIsssss PPiiiiii IIIxxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0100 -------- PP------ --------","[#4] Rdd=(Rs)")
+SH2_RR_ENC(S2_vsxtbh, "0100","00-","-","00-","ddddd")
+SH2_RR_ENC(S2_vzxtbh, "0100","00-","-","01-","ddddd")
+SH2_RR_ENC(S2_vsxthw, "0100","00-","-","10-","ddddd")
+SH2_RR_ENC(S2_vzxthw, "0100","00-","-","11-","ddddd")
+SH2_RR_ENC(A2_sxtw, "0100","01-","-","00-","ddddd")
+SH2_RR_ENC(S2_vsplatrh, "0100","01-","-","01-","ddddd")
+SH2_RR_ENC(S6_vsplatrbp, "0100","01-","-","10-","ddddd")
+
+SH2_RR_ENC(F2_conv_sf2df, "0100","1--","-","000","ddddd")
+SH2_RR_ENC(F2_conv_uw2df, "0100","1--","-","001","ddddd")
+SH2_RR_ENC(F2_conv_w2df, "0100","1--","-","010","ddddd")
+SH2_RR_ENC(F2_conv_sf2ud, "0100","1--","-","011","ddddd")
+SH2_RR_ENC(F2_conv_sf2d, "0100","1--","-","100","ddddd")
+SH2_RR_ENC(F2_conv_sf2ud_chop, "0100","1--","-","101","ddddd")
+SH2_RR_ENC(F2_conv_sf2d_chop, "0100","1--","-","110","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0101 -------- PP------ --------","[#5] Pd=(Rs,#u6)")
+DEF_ENC32(S2_tstbit_i,ICLASS_S2op" 0101 000sssss PP0iiiii ------dd")
+DEF_ENC32(C2_tfrrp, ICLASS_S2op" 0101 010sssss PP------ ------dd")
+DEF_ENC32(C2_bitsclri,ICLASS_S2op" 0101 100sssss PPiiiiii ------dd")
+DEF_ENC32(S4_ntstbit_i,ICLASS_S2op"0101 001sssss PP0iiiii ------dd")
+DEF_ENC32(C4_nbitsclri,ICLASS_S2op"0101 101sssss PPiiiiii ------dd")
+DEF_ENC32(F2_sfclass, ICLASS_S2op"0101 111sssss PP0iiiii ------dd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0110 -------- PP------ --------","[#6] Rdd=(Pt)")
+DEF_ENC32(C2_mask, ICLASS_S2op" 0110 --- ----- PP----tt --- ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 0111 -------- PP------ --------","[#7] Rx=(Rs,#u4,#S6)")
+DEF_ENC32(S2_tableidxb,ICLASS_S2op" 0111 00isssss PPIIIIII iiixxxxx")
+DEF_ENC32(S2_tableidxh,ICLASS_S2op" 0111 01isssss PPIIIIII iiixxxxx")
+DEF_ENC32(S2_tableidxw,ICLASS_S2op" 0111 10isssss PPIIIIII iiixxxxx")
+DEF_ENC32(S2_tableidxd,ICLASS_S2op" 0111 11isssss PPIIIIII iiixxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1000 -------- PP------ --------","[#8] Rd=(Rss,#u6)")
+SH2_RR_ENC(S2_vsathub, "1000","000","-","000","ddddd")
+SH2_RR_ENC(S2_vsatwh, "1000","000","-","010","ddddd")
+SH2_RR_ENC(S2_vsatwuh, "1000","000","-","100","ddddd")
+SH2_RR_ENC(S2_vsathb, "1000","000","-","110","ddddd")
+SH2_RR_ENC(S2_clbp, "1000","010","-","000","ddddd")
+SH2_RR_ENC(S2_cl0p, "1000","010","-","010","ddddd")
+SH2_RR_ENC(S2_cl1p, "1000","010","-","100","ddddd")
+SH2_RR_ENC(S2_ct0p, "1000","111","-","010","ddddd")
+SH2_RR_ENC(S2_ct1p, "1000","111","-","100","ddddd")
+SH2_RR_ENC(S2_vtrunohb, "1000","100","-","000","ddddd")
+SH2_RR_ENC(S2_vtrunehb, "1000","100","-","010","ddddd")
+SH2_RR_ENC(S2_vrndpackwh, "1000","100","-","100","ddddd")
+SH2_RR_ENC(S2_vrndpackwhs, "1000","100","-","110","ddddd")
+SH2_RR_ENC(A2_sat, "1000","110","-","000","ddddd")
+SH2_RR_ENC(A2_roundsat, "1000","110","-","001","ddddd")
+SH_RRI5_ENC(S2_asr_i_svw_trun, "1000","110", "010","ddddd")
+SH_RRI5_ENC(A4_bitspliti, "1000","110", "100","ddddd")
+
+SH_RRI5_ENC(A7_clip, "1000","110", "101","ddddd")
+SH_RRI5_ENC(A7_vclip, "1000","110", "110","ddddd")
+
+
+SH2_RR_ENC(S4_clbpnorm, "1000","011","-","000","ddddd")
+SH_RRI6_ENC(S4_clbpaddi, "1000","011", "010","ddddd")
+SH2_RR_ENC(S5_popcountp, "1000","011","-","011","ddddd")
+
+SH_RRI4_ENC(S5_asrhub_rnd_sat, "1000","011", "100","ddddd")
+SH_RRI4_ENC(S5_asrhub_sat, "1000","011", "101","ddddd")
+
+SH2_RR_ENC(F2_conv_df2sf, "1000","000","-","001","ddddd")
+SH2_RR_ENC(F2_conv_ud2sf, "1000","001","-","001","ddddd")
+SH2_RR_ENC(F2_conv_d2sf, "1000","010","-","001","ddddd")
+SH2_RR_ENC(F2_conv_df2uw, "1000","011","-","001","ddddd")
+SH2_RR_ENC(F2_conv_df2w, "1000","100","-","001","ddddd")
+SH2_RR_ENC(F2_conv_df2uw_chop, "1000","101","-","001","ddddd")
+SH2_RR_ENC(F2_conv_df2w_chop, "1000","111","-","001","ddddd")
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1001 -------- PP------ --------","[#9] Rd=(Ps,Pt)")
+DEF_ENC32(C2_vitpack, ICLASS_S2op" 1001 -00 ---ss PP----tt --- ddddd")
+DEF_ENC32(C2_tfrpr, ICLASS_S2op" 1001 -1- ---ss PP------ --- ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1010 -------- PP------ --------","[#10] Rdd=(Rss,#u6,#U6)")
+DEF_ENC32(S4_extractp,ICLASS_S2op" 1010 IIIsssss PPiiiiii IIIddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1011 -------- PP------ --------","[#11] Rd=(Rs)")
+SH2_RR_ENC(F2_conv_uw2sf, "1011","001","-","000","ddddd")
+SH2_RR_ENC(F2_conv_w2sf, "1011","010","-","000","ddddd")
+SH2_RR_ENC(F2_conv_sf2uw, "1011","011","-","000","ddddd")
+SH2_RR_ENC(F2_conv_sf2w, "1011","100","-","000","ddddd")
+SH2_RR_ENC(F2_conv_sf2uw_chop, "1011","011","-","001","ddddd")
+SH2_RR_ENC(F2_conv_sf2w_chop, "1011","100","-","001","ddddd")
+SH2_RR_ENC(F2_sffixupr, "1011","101","-","000","ddddd")
+SH2_RR_ENC(F2_sfinvsqrta, "1011","111","-","0ee","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1100 -------- PP------ --------","[#12] Rd=(Rs,#u6)")
+I5SHIFTTYPES(r, "1100","000", "0 ","ddddd")
+SH_RRI5_ENC(S2_asl_i_r_sat, "1100","010", "010","ddddd")
+SH_RRI5_ENC(S2_asr_i_r_rnd, "1100","010", "000","ddddd")
+
+SH2_RR_ENC(S2_svsathb, "1100","10-","-", "00-","ddddd")
+SH2_RR_ENC(S2_svsathub, "1100","10-","-", "01-","ddddd")
+
+SH_RRI5_ENC(A4_cround_ri, "1100","111", "00-","ddddd")
+SH_RRI6_ENC(A7_croundd_ri, "1100","111", "01-","ddddd")
+SH_RRI5_ENC(A4_round_ri, "1100","111", "10-","ddddd")
+SH_RRI5_ENC(A4_round_ri_sat, "1100","111", "11-","ddddd")
+
+DEF_ENC32(S2_setbit_i, ICLASS_S2op" 1100 110sssss PP0iiiii 000ddddd")
+DEF_ENC32(S2_clrbit_i, ICLASS_S2op" 1100 110sssss PP0iiiii 001ddddd")
+DEF_ENC32(S2_togglebit_i,ICLASS_S2op" 1100 110sssss PP0iiiii 010ddddd")
+
+DEF_ENC32(S4_clbaddi ,ICLASS_S2op" 1100 001sssss PPiiiiii 000ddddd")
+
+
+
+/* False read #u6 */
+SH2_RR_ENC(S2_clb, "1100","000","-","1 00","ddddd")
+SH2_RR_ENC(S2_cl0, "1100","000","-","1 01","ddddd")
+SH2_RR_ENC(S2_cl1, "1100","000","-","1 10","ddddd")
+SH2_RR_ENC(S2_clbnorm, "1100","000","-","1 11","ddddd")
+SH2_RR_ENC(S2_ct0, "1100","010","-","1 00","ddddd")
+SH2_RR_ENC(S2_ct1, "1100","010","-","1 01","ddddd")
+SH2_RR_ENC(S2_brev, "1100","010","-","1 10","ddddd")
+SH2_RR_ENC(S2_vsplatrb, "1100","010","-","1 11","ddddd")
+SH2_RR_ENC(A2_abs, "1100","100","-","1 00","ddddd")
+SH2_RR_ENC(A2_abssat, "1100","100","-","1 01","ddddd")
+SH2_RR_ENC(A2_negsat, "1100","100","-","1 10","ddddd")
+SH2_RR_ENC(A2_swiz, "1100","100","-","1 11","ddddd")
+SH2_RR_ENC(A2_sath, "1100","110","-","1 00","ddddd")
+SH2_RR_ENC(A2_satuh, "1100","110","-","1 01","ddddd")
+SH2_RR_ENC(A2_satub, "1100","110","-","1 10","ddddd")
+SH2_RR_ENC(A2_satb, "1100","110","-","1 11","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1101 -------- PP------ --------","[#13] Rd=(Rs,#u6,#U6)")
+DEF_ENC32(S2_extractu, ICLASS_S2op" 1101 0IIsssss PP0iiiii IIIddddd")
+DEF_ENC32(S4_extract, ICLASS_S2op" 1101 1IIsssss PP0iiiii IIIddddd")
+DEF_ENC32(S2_mask, ICLASS_S2op" 1101 0II----- PP1iiiii IIIddddd")
+
+
+
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1110 -------- PP------ --------","[#14] Rx=(Rs,#u6)")
+I5SHIFTTYPES(r_nac, "1110","00-","0","xxxxx")
+I5SHIFTTYPES(r_acc, "1110","00-","1","xxxxx")
+I5SHIFTTYPES(r_and, "1110","01-","0","xxxxx")
+I5SHIFTTYPES(r_or, "1110","01-","1","xxxxx")
+I5SHIFTTYPES_NOASR(r_xacc,"1110","10-","0","xxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S2op" 1111 -------- PP------ --------","[#15] Rs=(Rs,#u6,#U6)")
+DEF_ENC32(S2_insert, ICLASS_S2op" 1111 0IIsssss PP0iiiii IIIxxxxx")
+
+
+
+
+
+/*************************/
+/* S_3_operand */
+/*************************/
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0000 -------- PP------ --------","[#0] Rdd=(Rss,Rtt,#u3)")
+SH_RRR_ENC(S2_valignib, "0000","0--","-","iii","ddddd")
+SH_RRR_ENC(S2_vspliceib, "0000","1--","-","iii","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0001 -------- PP------ --------","[#1] Rdd=(Rss,Rtt)")
+SH_RRR_ENC(S2_extractup_rp, "0001","00-","-","00-","ddddd")
+SH_RRR_ENC(S2_shuffeb, "0001","00-","-","01-","ddddd")
+SH_RRR_ENC(S2_shuffob, "0001","00-","-","10-","ddddd")
+SH_RRR_ENC(S2_shuffeh, "0001","00-","-","11-","ddddd")
+
+SH_RRR_ENC(S2_shuffoh, "0001","10-","-","000","ddddd")
+SH_RRR_ENC(S2_vtrunewh, "0001","10-","-","010","ddddd")
+SH_RRR_ENC(S6_vtrunehb_ppp, "0001","10-","-","011","ddddd")
+SH_RRR_ENC(S2_vtrunowh, "0001","10-","-","100","ddddd")
+SH_RRR_ENC(S6_vtrunohb_ppp, "0001","10-","-","101","ddddd")
+SH_RRR_ENC(S2_lfsp, "0001","10-","-","110","ddddd")
+
+SH_RRR_ENC(S4_vxaddsubw, "0001","01-","-","000","ddddd")
+SH_RRR_ENC(A5_vaddhubs, "0001","01-","-","001","ddddd")
+SH_RRR_ENC(S4_vxsubaddw, "0001","01-","-","010","ddddd")
+SH_RRR_ENC(S4_vxaddsubh, "0001","01-","-","100","ddddd")
+SH_RRR_ENC(S4_vxsubaddh, "0001","01-","-","110","ddddd")
+
+SH_RRR_ENC(S4_vxaddsubhr, "0001","11-","-","00-","ddddd")
+SH_RRR_ENC(S4_vxsubaddhr, "0001","11-","-","01-","ddddd")
+SH_RRR_ENC(S4_extractp_rp, "0001","11-","-","10-","ddddd")
+SH_RRR_ENC(S2_cabacdecbin, "0001","11-","-","11-","ddddd") /* implicit P0 write */
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0010 -------- PP------ --------","[#2] Rdd=(Rss,Rtt,Pu)")
+SH_RRR_ENC(S2_valignrb, "0010","0--","-","-uu","ddddd")
+SH_RRR_ENC(S2_vsplicerb, "0010","100","-","-uu","ddddd")
+SH_RRR_ENC(A4_addp_c, "0010","110","-","-xx","ddddd")
+SH_RRR_ENC(A4_subp_c, "0010","111","-","-xx","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0011 -------- PP------ --------","[#3] Rdd=(Rss,Rt)")
+RSHIFTTYPES(vw, "0011","00-","-","-","ddddd")
+RSHIFTTYPES(vh, "0011","01-","-","-","ddddd")
+RSHIFTTYPES(p, "0011","10-","-","-","ddddd")
+SH_RRR_ENC(S2_vcrotate, "0011","11-","-","00-","ddddd")
+SH_RRR_ENC(S2_vcnegh, "0011","11-","-","01-","ddddd")
+SH_RRR_ENC(S4_vrcrotate, "0011","11-","i","11i","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0100 -------- PP------ --------","[#4] Rd=(Rs,Rt,#u3)")
+DEF_ENC32(S2_addasl_rrri, ICLASS_S3op" 0100 000 sssss PP0ttttt iiiddddd")
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0101 -------- PP------ --------","[#5] Rd=(Rss,Rt)")
+SH_RRR_ENC(S2_asr_r_svw_trun, "0101","---","-","010","ddddd")
+SH_RRR_ENC(M4_cmpyi_wh, "0101","---","-","100","ddddd")
+SH_RRR_ENC(M4_cmpyr_wh, "0101","---","-","110","ddddd")
+SH_RRR_ENC(M4_cmpyi_whc, "0101","---","-","101","ddddd")
+SH_RRR_ENC(M4_cmpyr_whc, "0101","---","-","111","ddddd")
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0110 -------- PP------ --------","[#6] Rd=(Rs,Rt)")
+SH_RRR_ENC(S2_asr_r_r_sat, "0110","00-","-","00-","ddddd") \
+SH_RRR_ENC(S2_asl_r_r_sat, "0110","00-","-","10-","ddddd")
+
+RSHIFTTYPES(r, "0110","01-","-","-","ddddd")
+
+SH_RRR_ENC(S2_setbit_r, "0110","10-","-","00-","ddddd")
+SH_RRR_ENC(S2_clrbit_r, "0110","10-","-","01-","ddddd")
+SH_RRR_ENC(S2_togglebit_r, "0110","10-","-","10-","ddddd")
+SH_RRRiENC(S4_lsli, "0110","10-","-","11i","ddddd")
+
+SH_RRR_ENC(A4_cround_rr, "0110","11-","-","00-","ddddd")
+SH_RRR_ENC(A7_croundd_rr, "0110","11-","-","01-","ddddd")
+SH_RRR_ENC(A4_round_rr, "0110","11-","-","10-","ddddd")
+SH_RRR_ENC(A4_round_rr_sat, "0110","11-","-","11-","ddddd")
+
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 0111 -------- PP------ --------","[#7] Pd=(Rs,Rt)")
+SH_RRR_ENC(S2_tstbit_r, "0111","000","-","---","---dd")
+SH_RRR_ENC(C2_bitsset, "0111","010","-","---","---dd")
+SH_RRR_ENC(C2_bitsclr, "0111","100","-","---","---dd")
+SH_RRR_ENC(A4_cmpheq, "0111","110","-","011","---dd")
+SH_RRR_ENC(A4_cmphgt, "0111","110","-","100","---dd")
+SH_RRR_ENC(A4_cmphgtu, "0111","110","-","101","---dd")
+SH_RRR_ENC(A4_cmpbeq, "0111","110","-","110","---dd")
+SH_RRR_ENC(A4_cmpbgtu, "0111","110","-","111","---dd")
+SH_RRR_ENC(A4_cmpbgt, "0111","110","-","010","---dd")
+SH_RRR_ENC(S4_ntstbit_r, "0111","001","-","---","---dd")
+SH_RRR_ENC(C4_nbitsset, "0111","011","-","---","---dd")
+SH_RRR_ENC(C4_nbitsclr, "0111","101","-","---","---dd")
+
+SH_RRR_ENC(F2_sfcmpge, "0111","111","-","000","---dd")
+SH_RRR_ENC(F2_sfcmpuo, "0111","111","-","001","---dd")
+SH_RRR_ENC(F2_sfcmpeq, "0111","111","-","011","---dd")
+SH_RRR_ENC(F2_sfcmpgt, "0111","111","-","100","---dd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1000 -------- PP------ --------","[#8] Rx=(Rs,Rtt)")
+SH_RRR_ENC(S2_insert_rp, "1000","---","-","---","xxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1001 -------- PP------ --------","[#9] Rd=(Rs,Rtt)")
+SH_RRR_ENC(S2_extractu_rp, "1001","00-","-","00-","ddddd")
+SH_RRR_ENC(S4_extract_rp, "1001","00-","-","01-","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1010 -------- PP------ --------","[#10] Rxx=(Rss,Rtt)")
+SH_RRR_ENC(S2_insertp_rp, "1010","0--","0","---","xxxxx")
+SH_RRR_ENC(M4_xor_xacc, "1010","10-","0","000","xxxxx")
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1011 -------- PP------ --------","[#11] Rxx=(Rss,Rt)")
+RSHIFTTYPES(p_or, "1011","000","-","-","xxxxx")
+RSHIFTTYPES(p_and, "1011","010","-","-","xxxxx")
+RSHIFTTYPES(p_nac, "1011","100","-","-","xxxxx")
+RSHIFTTYPES(p_acc, "1011","110","-","-","xxxxx")
+RSHIFTTYPES(p_xor, "1011","011","-","-","xxxxx")
+
+SH_RRR_ENCX(A4_vrmaxh, "1011","001","0","001","uuuuu")
+SH_RRR_ENCX(A4_vrmaxuh, "1011","001","1","001","uuuuu")
+SH_RRR_ENCX(A4_vrmaxw, "1011","001","0","010","uuuuu")
+SH_RRR_ENCX(A4_vrmaxuw, "1011","001","1","010","uuuuu")
+
+SH_RRR_ENCX(A4_vrminh, "1011","001","0","101","uuuuu")
+SH_RRR_ENCX(A4_vrminuh, "1011","001","1","101","uuuuu")
+SH_RRR_ENCX(A4_vrminw, "1011","001","0","110","uuuuu")
+SH_RRR_ENCX(A4_vrminuw, "1011","001","1","110","uuuuu")
+
+SH_RRR_ENC(S2_vrcnegh, "1011","001","1","111","xxxxx")
+
+SH_RRR_ENC(S4_vrcrotate_acc, "1011","101","i","--i","xxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1100 -------- PP------ --------","[#12] Rx=(Rs,Rt)")
+RSHIFTTYPES(r_or, "1100","00-","-","-","xxxxx")
+RSHIFTTYPES(r_and, "1100","01-","-","-","xxxxx")
+RSHIFTTYPES(r_nac, "1100","10-","-","-","xxxxx")
+RSHIFTTYPES(r_acc, "1100","11-","-","-","xxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1101 -------- PP------ --------","[#13] Reserved")
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1110 -------- PP------ --------","[#14] Reserved")
+
+
+DEF_FIELDROW_DESC32(ICLASS_S3op" 1111 -------- PP------ --------","[#14] User Instruction")
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*******************************/
+/* */
+/* */
+/* ALU64 */
+/* */
+/* */
+/*******************************/
+DEF_CLASS32(ICLASS_ALU64" ---- -------- PP------ --------",ALU64)
+DEF_FIELD32(ICLASS_ALU64" !!!! -------- PP------ --------",ALU64_RegType,"Register Type")
+DEF_FIELD32(ICLASS_ALU64" 0--- !!!----- PP------ --------",A_MajOp,"Major Opcode")
+DEF_FIELD32(ICLASS_ALU64" 0--- -------- PP------ !!!-----",A_MinOp,"Minor Opcode")
+DEF_FIELD32(ICLASS_ALU64" 11-- -------- PP------ ---!!!!!",A_MajOp,"Major Opcode")
+
+
+
+#define ALU64_RRR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+DEF_ENC32(TAG, ICLASS_ALU64" "MAJ4" "MIN3"sssss PP"SMOD1"ttttt "VMIN3 DSTCHARS)
+
+#define LEGACY_ALU64_RRR_ENC(TAG,MAJ4,MIN3,SMOD1,VMIN3,DSTCHARS)\
+LEGACY_DEF_ENC32(TAG, ICLASS_ALU64" "MAJ4" "MIN3"sssss PP"SMOD1"ttttt "VMIN3 DSTCHARS)
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0000 -------- PP------ --------","[#0] Rd=(Rss,Rtt)")
+ALU64_RRR_ENC(S2_parityp, "0000","---","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0001 -------- PP------ --------","[#1] Rdd=(Pu,Rss,Rtt)")
+ALU64_RRR_ENC(C2_vmux, "0001","---","-","-uu","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0010 -------- PP------ --------","[#2] Pd=(Rss,Rtt)")
+ALU64_RRR_ENC(A2_vcmpweq, "0010","0--","0","000","---dd")
+ALU64_RRR_ENC(A2_vcmpwgt, "0010","0--","0","001","---dd")
+ALU64_RRR_ENC(A2_vcmpwgtu, "0010","0--","0","010","---dd")
+ALU64_RRR_ENC(A2_vcmpheq, "0010","0--","0","011","---dd")
+ALU64_RRR_ENC(A2_vcmphgt, "0010","0--","0","100","---dd")
+ALU64_RRR_ENC(A2_vcmphgtu, "0010","0--","0","101","---dd")
+ALU64_RRR_ENC(A2_vcmpbeq, "0010","0--","0","110","---dd")
+ALU64_RRR_ENC(A2_vcmpbgtu, "0010","0--","0","111","---dd")
+
+ALU64_RRR_ENC(A4_vcmpbeq_any, "0010","0--","1","000","---dd")
+ALU64_RRR_ENC(A6_vcmpbeq_notany, "0010","0--","1","001","---dd")
+ALU64_RRR_ENC(A4_vcmpbgt, "0010","0--","1","010","---dd")
+ALU64_RRR_ENC(A4_tlbmatch, "0010","0--","1","011","---dd")
+ALU64_RRR_ENC(A4_boundscheck_lo, "0010","0--","1","100","---dd")
+ALU64_RRR_ENC(A4_boundscheck_hi, "0010","0--","1","101","---dd")
+
+ALU64_RRR_ENC(C2_cmpeqp, "0010","100","-","000","---dd")
+ALU64_RRR_ENC(C2_cmpgtp, "0010","100","-","010","---dd")
+ALU64_RRR_ENC(C2_cmpgtup, "0010","100","-","100","---dd")
+
+ALU64_RRR_ENC(F2_dfcmpeq, "0010","111","-","000","---dd")
+ALU64_RRR_ENC(F2_dfcmpgt, "0010","111","-","001","---dd")
+ALU64_RRR_ENC(F2_dfcmpge, "0010","111","-","010","---dd")
+ALU64_RRR_ENC(F2_dfcmpuo, "0010","111","-","011","---dd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0011 -------- PP------ --------","[#3] Rdd=(Rss,Rtt)")
+ALU64_RRR_ENC(A2_vaddub, "0011","000","-","000","ddddd")
+ALU64_RRR_ENC(A2_vaddubs, "0011","000","-","001","ddddd")
+ALU64_RRR_ENC(A2_vaddh, "0011","000","-","010","ddddd")
+ALU64_RRR_ENC(A2_vaddhs, "0011","000","-","011","ddddd")
+ALU64_RRR_ENC(A2_vadduhs, "0011","000","-","100","ddddd")
+ALU64_RRR_ENC(A2_vaddw, "0011","000","-","101","ddddd")
+ALU64_RRR_ENC(A2_vaddws, "0011","000","-","110","ddddd")
+ALU64_RRR_ENC(A2_addp, "0011","000","-","111","ddddd")
+
+ALU64_RRR_ENC(A2_vsubub, "0011","001","-","000","ddddd")
+ALU64_RRR_ENC(A2_vsububs, "0011","001","-","001","ddddd")
+ALU64_RRR_ENC(A2_vsubh, "0011","001","-","010","ddddd")
+ALU64_RRR_ENC(A2_vsubhs, "0011","001","-","011","ddddd")
+ALU64_RRR_ENC(A2_vsubuhs, "0011","001","-","100","ddddd")
+ALU64_RRR_ENC(A2_vsubw, "0011","001","-","101","ddddd")
+ALU64_RRR_ENC(A2_vsubws, "0011","001","-","110","ddddd")
+ALU64_RRR_ENC(A2_subp, "0011","001","-","111","ddddd")
+
+ALU64_RRR_ENC(A2_vavgub, "0011","010","-","000","ddddd")
+ALU64_RRR_ENC(A2_vavgubr, "0011","010","-","001","ddddd")
+ALU64_RRR_ENC(A2_vavgh, "0011","010","-","010","ddddd")
+ALU64_RRR_ENC(A2_vavghr, "0011","010","-","011","ddddd")
+ALU64_RRR_ENC(A2_vavghcr, "0011","010","-","100","ddddd")
+ALU64_RRR_ENC(A2_vavguh, "0011","010","-","101","ddddd")
+ALU64_RRR_ENC(A2_vavguhr, "0011","010","-","11-","ddddd")
+
+ALU64_RRR_ENC(A2_vavgw, "0011","011","-","000","ddddd")
+ALU64_RRR_ENC(A2_vavgwr, "0011","011","-","001","ddddd")
+ALU64_RRR_ENC(A2_vavgwcr, "0011","011","-","010","ddddd")
+ALU64_RRR_ENC(A2_vavguw, "0011","011","-","011","ddddd")
+ALU64_RRR_ENC(A2_vavguwr, "0011","011","-","100","ddddd")
+ALU64_RRR_ENC(A2_addpsat, "0011","011","-","101","ddddd")
+ALU64_RRR_ENC(A2_addspl, "0011","011","-","110","ddddd")
+ALU64_RRR_ENC(A2_addsph, "0011","011","-","111","ddddd")
+
+ALU64_RRR_ENC(A2_vnavgh, "0011","100","-","000","ddddd")
+ALU64_RRR_ENC(A2_vnavghr, "0011","100","-","001","ddddd")
+ALU64_RRR_ENC(A2_vnavghcr, "0011","100","-","010","ddddd")
+ALU64_RRR_ENC(A2_vnavgw, "0011","100","-","011","ddddd")
+ALU64_RRR_ENC(A2_vnavgwr, "0011","100","-","10-","ddddd")
+ALU64_RRR_ENC(A2_vnavgwcr, "0011","100","-","11-","ddddd")
+
+ALU64_RRR_ENC(A2_vminub, "0011","101","-","000","ddddd")
+ALU64_RRR_ENC(A2_vminh, "0011","101","-","001","ddddd")
+ALU64_RRR_ENC(A2_vminuh, "0011","101","-","010","ddddd")
+ALU64_RRR_ENC(A2_vminw, "0011","101","-","011","ddddd")
+ALU64_RRR_ENC(A2_vminuw, "0011","101","-","100","ddddd")
+ALU64_RRR_ENC(A2_vmaxuw, "0011","101","-","101","ddddd")
+ALU64_RRR_ENC(A2_minp, "0011","101","-","110","ddddd")
+ALU64_RRR_ENC(A2_minup, "0011","101","-","111","ddddd")
+
+ALU64_RRR_ENC(A2_vmaxub, "0011","110","-","000","ddddd")
+ALU64_RRR_ENC(A2_vmaxh, "0011","110","-","001","ddddd")
+ALU64_RRR_ENC(A2_vmaxuh, "0011","110","-","010","ddddd")
+ALU64_RRR_ENC(A2_vmaxw, "0011","110","-","011","ddddd")
+ALU64_RRR_ENC(A2_maxp, "0011","110","-","100","ddddd")
+ALU64_RRR_ENC(A2_maxup, "0011","110","-","101","ddddd")
+ALU64_RRR_ENC(A2_vmaxb, "0011","110","-","110","ddddd")
+ALU64_RRR_ENC(A2_vminb, "0011","110","-","111","ddddd")
+
+ALU64_RRR_ENC(A2_andp, "0011","111","-","000","ddddd")
+ALU64_RRR_ENC(A2_orp, "0011","111","-","010","ddddd")
+ALU64_RRR_ENC(A2_xorp, "0011","111","-","100","ddddd")
+ALU64_RRR_ENC(A4_andnp, "0011","111","-","001","ddddd")
+ALU64_RRR_ENC(A4_ornp, "0011","111","-","011","ddddd")
+
+ALU64_RRR_ENC(A4_modwrapu, "0011","111","-","111","ddddd")
+
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0100 -------- PP------ --------","[#4] Rdd=(Rs,Rt)")
+LEGACY_ALU64_RRR_ENC(S2_packhl, "0100","--0","-","---","ddddd")
+ALU64_RRR_ENC(A4_bitsplit, "0100","--1","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0101 -------- PP------ --------","[#5] Rd=(Rs,Rt)")
+ALU64_RRR_ENC(A2_addh_l16_ll, "0101","000","-","00-","ddddd")
+ALU64_RRR_ENC(A2_addh_l16_hl, "0101","000","-","01-","ddddd")
+ALU64_RRR_ENC(A2_addh_l16_sat_ll,"0101","000","-","10-","ddddd")
+ALU64_RRR_ENC(A2_addh_l16_sat_hl,"0101","000","-","11-","ddddd")
+
+ALU64_RRR_ENC(A2_subh_l16_ll, "0101","001","-","00-","ddddd")
+ALU64_RRR_ENC(A2_subh_l16_hl, "0101","001","-","01-","ddddd")
+ALU64_RRR_ENC(A2_subh_l16_sat_ll,"0101","001","-","10-","ddddd")
+ALU64_RRR_ENC(A2_subh_l16_sat_hl,"0101","001","-","11-","ddddd")
+
+ALU64_RRR_ENC(A2_addh_h16_ll, "0101","010","-","000","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_lh, "0101","010","-","001","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_hl, "0101","010","-","010","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_hh, "0101","010","-","011","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_sat_ll,"0101","010","-","100","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_sat_lh,"0101","010","-","101","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_sat_hl,"0101","010","-","110","ddddd")
+ALU64_RRR_ENC(A2_addh_h16_sat_hh,"0101","010","-","111","ddddd")
+
+ALU64_RRR_ENC(A2_subh_h16_ll, "0101","011","-","000","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_lh, "0101","011","-","001","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_hl, "0101","011","-","010","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_hh, "0101","011","-","011","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_sat_ll,"0101","011","-","100","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_sat_lh,"0101","011","-","101","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_sat_hl,"0101","011","-","110","ddddd")
+ALU64_RRR_ENC(A2_subh_h16_sat_hh,"0101","011","-","111","ddddd")
+
+LEGACY_ALU64_RRR_ENC(A2_addsat, "0101","100","-","0--","ddddd")
+LEGACY_ALU64_RRR_ENC(A2_subsat, "0101","100","-","1--","ddddd")
+
+ALU64_RRR_ENC(A2_min, "0101","101","-","0--","ddddd")
+ALU64_RRR_ENC(A2_minu, "0101","101","-","1--","ddddd")
+
+ALU64_RRR_ENC(A2_max, "0101","110","-","0--","ddddd")
+ALU64_RRR_ENC(A2_maxu, "0101","110","-","1--","ddddd")
+
+ALU64_RRR_ENC(S4_parity, "0101","111","-","---","ddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0110 -------- PP------ --------","[#6] Rd=#u10 ")
+DEF_ENC32(F2_sfimm_p, ICLASS_ALU64" 0110 00i ----- PPiiiiii iiiddddd")
+DEF_ENC32(F2_sfimm_n, ICLASS_ALU64" 0110 01i ----- PPiiiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 0111 -------- PP------ --------","[#7] Rd=(Rs,Rt,#u6)")
+DEF_ENC32(M4_mpyrr_addi, ICLASS_ALU64" 0111 0ii sssss PPittttt iiiddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1000 -------- PP------ --------","[#8] Rd=(Rs,#u6,#U6)")
+DEF_ENC32(M4_mpyri_addi, ICLASS_ALU64" 1000 Iii sssss PPiddddd iiiIIIII")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1001 -------- PP------ --------","[#9] Rdd=#u10 ")
+DEF_ENC32(F2_dfimm_p, ICLASS_ALU64" 1001 00i ----- PPiiiiii iiiddddd")
+DEF_ENC32(F2_dfimm_n, ICLASS_ALU64" 1001 01i ----- PPiiiiii iiiddddd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1010 -------- PP------ --------","[#10] Rx=(Rs,Rx,#s10)")
+DEF_ENC32(S4_or_andix, ICLASS_ALU64" 1010 01i xxxxx PPiiiiii iiiuuuuu")
+DEF_ENC32(S4_or_andi, ICLASS_ALU64" 1010 00i sssss PPiiiiii iiixxxxx")
+DEF_ENC32(S4_or_ori, ICLASS_ALU64" 1010 10i sssss PPiiiiii iiixxxxx")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1011 -------- PP------ --------","[#11] Rd=(Rs,Rd,#s6)")
+DEF_ENC32(S4_addaddi, ICLASS_ALU64" 1011 0ii sssss PPiddddd iiiuuuuu")
+DEF_ENC32(S4_subaddi, ICLASS_ALU64" 1011 1ii sssss PPiddddd iiiuuuuu")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1100 -------- PP------ --------","[#12] Pd=(Rss,#s8)")
+DEF_ENC32(A4_vcmpbeqi, ICLASS_ALU64"1100 000sssss PP-iiiii iii00-dd")
+DEF_ENC32(A4_vcmpbgti, ICLASS_ALU64"1100 001sssss PP-iiiii iii00-dd")
+DEF_ENC32(A4_vcmpbgtui, ICLASS_ALU64"1100 010sssss PP-0iiii iii00-dd")
+DEF_ENC32(A4_vcmpheqi, ICLASS_ALU64"1100 000sssss PP-iiiii iii01-dd")
+DEF_ENC32(A4_vcmphgti, ICLASS_ALU64"1100 001sssss PP-iiiii iii01-dd")
+DEF_ENC32(A4_vcmphgtui, ICLASS_ALU64"1100 010sssss PP-0iiii iii01-dd")
+DEF_ENC32(A4_vcmpweqi, ICLASS_ALU64"1100 000sssss PP-iiiii iii10-dd")
+DEF_ENC32(A4_vcmpwgti, ICLASS_ALU64"1100 001sssss PP-iiiii iii10-dd")
+DEF_ENC32(A4_vcmpwgtui, ICLASS_ALU64"1100 010sssss PP-0iiii iii10-dd")
+
+DEF_ENC32(F2_dfclass, ICLASS_ALU64"1100 100sssss PP-000ii iii10-dd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1101 -------- PP------ --------","[#13] Pd=(Rs,#s8)")
+DEF_ENC32(A4_cmpbeqi, ICLASS_ALU64"1101 -00sssss PP-iiiii iii00-dd")
+DEF_ENC32(A4_cmpbgti, ICLASS_ALU64"1101 -01sssss PP-iiiii iii00-dd")
+DEF_ENC32(A4_cmpbgtui, ICLASS_ALU64"1101 -10sssss PP-0iiii iii00-dd")
+DEF_ENC32(A4_cmpheqi, ICLASS_ALU64"1101 -00sssss PP-iiiii iii01-dd")
+DEF_ENC32(A4_cmphgti, ICLASS_ALU64"1101 -01sssss PP-iiiii iii01-dd")
+DEF_ENC32(A4_cmphgtui, ICLASS_ALU64"1101 -10sssss PP-0iiii iii01-dd")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1110 -------- PP------ --------","[#14] Rx=(#u9,op(Rx,#u5))")
+
+#define OP_OPI_RI(TAG,OPB)\
+DEF_ENC32(S4_andi_##TAG##_ri,ICLASS_ALU64" 1110 iiixxxxx PPiIIIII iii"OPB"i00-")\
+DEF_ENC32(S4_ori_##TAG##_ri, ICLASS_ALU64" 1110 iiixxxxx PPiIIIII iii"OPB"i01-")\
+DEF_ENC32(S4_addi_##TAG##_ri,ICLASS_ALU64" 1110 iiixxxxx PPiIIIII iii"OPB"i10-")\
+DEF_ENC32(S4_subi_##TAG##_ri,ICLASS_ALU64" 1110 iiixxxxx PPiIIIII iii"OPB"i11-")
+
+OP_OPI_RI(asl,"0")
+OP_OPI_RI(lsr,"1")
+
+
+DEF_FIELDROW_DESC32(ICLASS_ALU64" 1111 -------- PP------ --------","[#15] Rd=(Rs,Ru,#u6:2)")
+DEF_ENC32(M4_mpyri_addr_u2, ICLASS_ALU64" 1111 0ii sssss PPiddddd iiiuuuuu")
+DEF_ENC32(M4_mpyri_addr, ICLASS_ALU64" 1111 1ii sssss PPiddddd iiiuuuuu")
diff --git a/target/hexagon/imported/encode_subinsn.def b/target/hexagon/imported/encode_subinsn.def
new file mode 100644
index 000000000..742fb50ef
--- /dev/null
+++ b/target/hexagon/imported/encode_subinsn.def
@@ -0,0 +1,149 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/* DEF_ENC_SUBINSN(TAG, CLASS, ENCSTR) */
+
+
+
+
+/*********************/
+/* Ld1-type subinsns */
+/*********************/
+DEF_ENC_SUBINSN(SL1_loadri_io, SUBINSN_L1, "0iiiissssdddd")
+DEF_ENC_SUBINSN(SL1_loadrub_io, SUBINSN_L1, "1iiiissssdddd")
+
+/*********************/
+/* St1-type subinsns */
+/*********************/
+DEF_ENC_SUBINSN(SS1_storew_io, SUBINSN_S1, "0ii iisssstttt")
+DEF_ENC_SUBINSN(SS1_storeb_io, SUBINSN_S1, "1ii iisssstttt")
+
+
+/*********************/
+/* Ld2-type subinsns */
+/*********************/
+DEF_ENC_SUBINSN(SL2_loadrh_io, SUBINSN_L2, "00i iissssdddd")
+DEF_ENC_SUBINSN(SL2_loadruh_io, SUBINSN_L2, "01i iissssdddd")
+DEF_ENC_SUBINSN(SL2_loadrb_io, SUBINSN_L2, "10i iissssdddd")
+DEF_ENC_SUBINSN(SL2_loadri_sp, SUBINSN_L2, "111 0iiiiidddd")
+DEF_ENC_SUBINSN(SL2_loadrd_sp, SUBINSN_L2, "111 10iiiiiddd")
+
+DEF_ENC_SUBINSN(SL2_deallocframe,SUBINSN_L2, "111 1100---0--")
+
+DEF_ENC_SUBINSN(SL2_return, SUBINSN_L2, "111 1101---0--")
+DEF_ENC_SUBINSN(SL2_return_t, SUBINSN_L2, "111 1101---100")
+DEF_ENC_SUBINSN(SL2_return_f, SUBINSN_L2, "111 1101---101")
+DEF_ENC_SUBINSN(SL2_return_tnew, SUBINSN_L2, "111 1101---110")
+DEF_ENC_SUBINSN(SL2_return_fnew, SUBINSN_L2, "111 1101---111")
+
+DEF_ENC_SUBINSN(SL2_jumpr31, SUBINSN_L2, "111 1111---0--")
+DEF_ENC_SUBINSN(SL2_jumpr31_t, SUBINSN_L2, "111 1111---100")
+DEF_ENC_SUBINSN(SL2_jumpr31_f, SUBINSN_L2, "111 1111---101")
+DEF_ENC_SUBINSN(SL2_jumpr31_tnew,SUBINSN_L2, "111 1111---110")
+DEF_ENC_SUBINSN(SL2_jumpr31_fnew,SUBINSN_L2, "111 1111---111")
+
+
+/*********************/
+/* St2-type subinsns */
+/*********************/
+DEF_ENC_SUBINSN(SS2_storeh_io, SUBINSN_S2, "00i iisssstttt")
+DEF_ENC_SUBINSN(SS2_storew_sp, SUBINSN_S2, "010 0iiiiitttt")
+DEF_ENC_SUBINSN(SS2_stored_sp, SUBINSN_S2, "010 1iiiiiittt")
+
+DEF_ENC_SUBINSN(SS2_storewi0, SUBINSN_S2, "100 00ssssiiii")
+DEF_ENC_SUBINSN(SS2_storewi1, SUBINSN_S2, "100 01ssssiiii")
+DEF_ENC_SUBINSN(SS2_storebi0, SUBINSN_S2, "100 10ssssiiii")
+DEF_ENC_SUBINSN(SS2_storebi1, SUBINSN_S2, "100 11ssssiiii")
+
+DEF_ENC_SUBINSN(SS2_allocframe, SUBINSN_S2, "111 0iiiii----")
+
+
+
+/*******************/
+/* A-type subinsns */
+/*******************/
+DEF_ENC_SUBINSN(SA1_addi, SUBINSN_A, "00i iiiiiixxxx")
+DEF_ENC_SUBINSN(SA1_seti, SUBINSN_A, "010 iiiiiidddd")
+DEF_ENC_SUBINSN(SA1_addsp, SUBINSN_A, "011 iiiiiidddd")
+
+DEF_ENC_SUBINSN(SA1_tfr, SUBINSN_A, "100 00ssssdddd")
+DEF_ENC_SUBINSN(SA1_inc, SUBINSN_A, "100 01ssssdddd")
+DEF_ENC_SUBINSN(SA1_and1, SUBINSN_A, "100 10ssssdddd")
+DEF_ENC_SUBINSN(SA1_dec, SUBINSN_A, "100 11ssssdddd")
+
+DEF_ENC_SUBINSN(SA1_sxth, SUBINSN_A, "101 00ssssdddd")
+DEF_ENC_SUBINSN(SA1_sxtb, SUBINSN_A, "101 01ssssdddd")
+DEF_ENC_SUBINSN(SA1_zxth, SUBINSN_A, "101 10ssssdddd")
+DEF_ENC_SUBINSN(SA1_zxtb, SUBINSN_A, "101 11ssssdddd")
+
+
+DEF_ENC_SUBINSN(SA1_addrx, SUBINSN_A, "110 00ssssxxxx")
+DEF_ENC_SUBINSN(SA1_cmpeqi, SUBINSN_A, "110 01ssss--ii")
+DEF_ENC_SUBINSN(SA1_setin1, SUBINSN_A, "110 1--0--dddd")
+DEF_ENC_SUBINSN(SA1_clrtnew, SUBINSN_A, "110 1--100dddd")
+DEF_ENC_SUBINSN(SA1_clrfnew, SUBINSN_A, "110 1--101dddd")
+DEF_ENC_SUBINSN(SA1_clrt, SUBINSN_A, "110 1--110dddd")
+DEF_ENC_SUBINSN(SA1_clrf, SUBINSN_A, "110 1--111dddd")
+
+
+DEF_ENC_SUBINSN(SA1_combine0i, SUBINSN_A, "111 -0-ii00ddd")
+DEF_ENC_SUBINSN(SA1_combine1i, SUBINSN_A, "111 -0-ii01ddd")
+DEF_ENC_SUBINSN(SA1_combine2i, SUBINSN_A, "111 -0-ii10ddd")
+DEF_ENC_SUBINSN(SA1_combine3i, SUBINSN_A, "111 -0-ii11ddd")
+DEF_ENC_SUBINSN(SA1_combinezr, SUBINSN_A, "111 -1ssss0ddd")
+DEF_ENC_SUBINSN(SA1_combinerz, SUBINSN_A, "111 -1ssss1ddd")
+
+
+
+
+/* maybe R=cmpeq ? */
+
+
+/* Add a group of NCJ: if (R.new==#0) jump:hint #r9 */
+/* Add a group of NCJ: if (R.new!=#0) jump:hint #r9 */
+/* NCJ goes with LD1, LD2 */
+
+
+
+
+DEF_FIELD32("---! !!!! !!!!!!!! EE------ --------",SUBFIELD_B_SLOT1,"B: Slot1 Instruction")
+DEF_FIELD32("---- ---- -------- EE-!!!!! !!!!!!!!",SUBFIELD_A_SLOT0,"A: Slot0 Instruction")
+
+
+/* DEF_PACKED32(TAG, CLASSA, CLASSB, ENCSTR) */
+
+DEF_PACKED32(P2_PACKED_L1_L1, SUBINSN_L1, SUBINSN_L1, "000B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_L1_L2, SUBINSN_L2, SUBINSN_L1, "000B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_L2_L2, SUBINSN_L2, SUBINSN_L2, "001B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_A_A, SUBINSN_A, SUBINSN_A, "001B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+
+DEF_PACKED32(P2_PACKED_L1_A, SUBINSN_L1, SUBINSN_A, "010B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_L2_A, SUBINSN_L2, SUBINSN_A, "010B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S1_A, SUBINSN_S1, SUBINSN_A, "011B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S2_A, SUBINSN_S2, SUBINSN_A, "011B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+
+DEF_PACKED32(P2_PACKED_S1_L1, SUBINSN_S1, SUBINSN_L1, "100B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S1_L2, SUBINSN_S1, SUBINSN_L2, "100B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S1_S1, SUBINSN_S1, SUBINSN_S1, "101B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S1_S2, SUBINSN_S2, SUBINSN_S1, "101B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+
+DEF_PACKED32(P2_PACKED_S2_L1, SUBINSN_S2, SUBINSN_L1, "110B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S2_L2, SUBINSN_S2, SUBINSN_L2, "110B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
+DEF_PACKED32(P2_PACKED_S2_S2, SUBINSN_S2, SUBINSN_S2, "111B BBBB BBBB BBBB EE0A AAAA AAAA AAAA")
+
+DEF_PACKED32(P2_PACKED_RESERVED, SUBINSN_INVALID, SUBINSN_INVALID, "111B BBBB BBBB BBBB EE1A AAAA AAAA AAAA")
diff --git a/target/hexagon/imported/float.idef b/target/hexagon/imported/float.idef
new file mode 100644
index 000000000..3e75bc460
--- /dev/null
+++ b/target/hexagon/imported/float.idef
@@ -0,0 +1,344 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Floating-Point Instructions
+ */
+
+/*************************************/
+/* Scalar FP */
+/*************************************/
+Q6INSN(F2_sfadd,"Rd32=sfadd(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Add",
+{ RdV=fUNFLOAT(fFLOAT(RsV)+fFLOAT(RtV));})
+
+Q6INSN(F2_sfsub,"Rd32=sfsub(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Subtract",
+{ RdV=fUNFLOAT(fFLOAT(RsV)-fFLOAT(RtV));})
+
+Q6INSN(F2_sfmpy,"Rd32=sfmpy(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Multiply",
+{ RdV=fUNFLOAT(fSFMPY(fFLOAT(RsV),fFLOAT(RtV)));})
+
+Q6INSN(F2_sffma,"Rx32+=sfmpy(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Fused Multiply Add",
+{ RxV=fUNFLOAT(fFMAF(fFLOAT(RsV),fFLOAT(RtV),fFLOAT(RxV)));})
+
+Q6INSN(F2_sffma_sc,"Rx32+=sfmpy(Rs32,Rt32,Pu4):scale",ATTRIBS(),
+"Floating-Point Fused Multiply Add w/ Additional Scaling (2**Pu)",
+{
+ fHIDE(size4s_t tmp;)
+ fCHECKSFNAN3(RxV,RxV,RsV,RtV);
+ tmp=fUNFLOAT(fFMAFX(fFLOAT(RsV),fFLOAT(RtV),fFLOAT(RxV),PuV));
+ if (!((fFLOAT(RxV) == 0.0) && fISZEROPROD(fFLOAT(RsV),fFLOAT(RtV)))) RxV = tmp;
+})
+
+Q6INSN(F2_sffms,"Rx32-=sfmpy(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Fused Multiply Add",
+{ RxV=fUNFLOAT(fFMAF(-fFLOAT(RsV),fFLOAT(RtV),fFLOAT(RxV))); })
+
+Q6INSN(F2_sffma_lib,"Rx32+=sfmpy(Rs32,Rt32):lib",ATTRIBS(),
+"Floating-Point Fused Multiply Add for Library Routines",
+{ fFPSETROUND_NEAREST(); fHIDE(int infinp; int infminusinf; size4s_t tmp;)
+ infminusinf = ((isinf(fFLOAT(RxV))) &&
+ (fISINFPROD(fFLOAT(RsV),fFLOAT(RtV))) &&
+ (fGETBIT(31,RsV ^ RxV ^ RtV) != 0));
+ infinp = (isinf(fFLOAT(RxV))) || (isinf(fFLOAT(RtV))) || (isinf(fFLOAT(RsV)));
+ fCHECKSFNAN3(RxV,RxV,RsV,RtV);
+ tmp=fUNFLOAT(fFMAF(fFLOAT(RsV),fFLOAT(RtV),fFLOAT(RxV)));
+ if (!((fFLOAT(RxV) == 0.0) && fISZEROPROD(fFLOAT(RsV),fFLOAT(RtV)))) RxV = tmp;
+ fFPCANCELFLAGS();
+ if (isinf(fFLOAT(RxV)) && !infinp) RxV = RxV - 1;
+ if (infminusinf) RxV = 0;
+})
+
+Q6INSN(F2_sffms_lib,"Rx32-=sfmpy(Rs32,Rt32):lib",ATTRIBS(),
+"Floating-Point Fused Multiply Add for Library Routines",
+{ fFPSETROUND_NEAREST(); fHIDE(int infinp; int infminusinf; size4s_t tmp;)
+ infminusinf = ((isinf(fFLOAT(RxV))) &&
+ (fISINFPROD(fFLOAT(RsV),fFLOAT(RtV))) &&
+ (fGETBIT(31,RsV ^ RxV ^ RtV) == 0));
+ infinp = (isinf(fFLOAT(RxV))) || (isinf(fFLOAT(RtV))) || (isinf(fFLOAT(RsV)));
+ fCHECKSFNAN3(RxV,RxV,RsV,RtV);
+ tmp=fUNFLOAT(fFMAF(-fFLOAT(RsV),fFLOAT(RtV),fFLOAT(RxV)));
+ if (!((fFLOAT(RxV) == 0.0) && fISZEROPROD(fFLOAT(RsV),fFLOAT(RtV)))) RxV = tmp;
+ fFPCANCELFLAGS();
+ if (isinf(fFLOAT(RxV)) && !infinp) RxV = RxV - 1;
+ if (infminusinf) RxV = 0;
+})
+
+
+Q6INSN(F2_sfcmpeq,"Pd4=sfcmp.eq(Rs32,Rt32)",ATTRIBS(),
+"Floating Point Compare for Equal",
+{PdV=f8BITSOF(fFLOAT(RsV)==fFLOAT(RtV));})
+
+Q6INSN(F2_sfcmpgt,"Pd4=sfcmp.gt(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Compare for Greater Than",
+{PdV=f8BITSOF(fFLOAT(RsV)>fFLOAT(RtV));})
+
+/* cmpge is not the same as !cmpgt(swapops) in IEEE */
+
+Q6INSN(F2_sfcmpge,"Pd4=sfcmp.ge(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Compare for Greater Than / Equal To",
+{PdV=f8BITSOF(fFLOAT(RsV)>=fFLOAT(RtV));})
+
+/* Everyone seems to have this... */
+
+Q6INSN(F2_sfcmpuo,"Pd4=sfcmp.uo(Rs32,Rt32)",ATTRIBS(),
+"Floating-Point Compare for Unordered",
+{PdV=f8BITSOF(isunordered(fFLOAT(RsV),fFLOAT(RtV)));})
+
+
+Q6INSN(F2_sfmax,"Rd32=sfmax(Rs32,Rt32)",ATTRIBS(),
+"Maximum of Floating-Point values",
+{ RdV = fUNFLOAT(fSF_MAX(fFLOAT(RsV),fFLOAT(RtV))); })
+
+Q6INSN(F2_sfmin,"Rd32=sfmin(Rs32,Rt32)",ATTRIBS(),
+"Minimum of Floating-Point values",
+{ RdV = fUNFLOAT(fSF_MIN(fFLOAT(RsV),fFLOAT(RtV))); })
+
+
+Q6INSN(F2_sfclass,"Pd4=sfclass(Rs32,#u5)",ATTRIBS(),
+"Classify Floating-Point Value",
+{
+ fHIDE(int class;)
+ PdV = 0;
+ class = fpclassify(fFLOAT(RsV));
+ /* Is the value zero? */
+ if (fGETBIT(0,uiV) && (class == FP_ZERO)) PdV = 0xff;
+ if (fGETBIT(1,uiV) && (class == FP_NORMAL)) PdV = 0xff;
+ if (fGETBIT(2,uiV) && (class == FP_SUBNORMAL)) PdV = 0xff;
+ if (fGETBIT(3,uiV) && (class == FP_INFINITE)) PdV = 0xff;
+ if (fGETBIT(4,uiV) && (class == FP_NAN)) PdV = 0xff;
+ fFPCANCELFLAGS();
+})
+
+/* Range: +/- (1.0 .. 1+(63/64)) * 2**(-6 .. +9) */
+/* More immediate bits should probably be used for more precision? */
+
+Q6INSN(F2_sfimm_p,"Rd32=sfmake(#u10):pos",ATTRIBS(),
+"Make Floating Point Value",
+{
+ RdV = (127 - 6) << 23;
+ RdV += uiV << 17;
+})
+
+Q6INSN(F2_sfimm_n,"Rd32=sfmake(#u10):neg",ATTRIBS(),
+"Make Floating Point Value",
+{
+ RdV = (127 - 6) << 23;
+ RdV += (uiV << 17);
+ RdV |= (1 << 31);
+})
+
+
+Q6INSN(F2_sfrecipa,"Rd32,Pe4=sfrecipa(Rs32,Rt32)",ATTRIBS(),
+"Reciprocal Approximation for Division",
+{
+ fHIDE(int idx;)
+ fHIDE(int adjust;)
+ fHIDE(int mant;)
+ fHIDE(int exp;)
+ if (fSF_RECIP_COMMON(RsV,RtV,RdV,adjust)) {
+ PeV = adjust;
+ idx = (RtV >> 16) & 0x7f;
+ mant = (fSF_RECIP_LOOKUP(idx) << 15) | 1;
+ exp = fSF_BIAS() - (fSF_GETEXP(RtV) - fSF_BIAS()) - 1;
+ RdV = fMAKESF(fGETBIT(31,RtV),exp,mant);
+ }
+})
+
+Q6INSN(F2_sffixupn,"Rd32=sffixupn(Rs32,Rt32)",ATTRIBS(),
+"Fix Up Numerator",
+{
+ fHIDE(int adjust;)
+ fSF_RECIP_COMMON(RsV,RtV,RdV,adjust);
+ RdV = RsV;
+})
+
+Q6INSN(F2_sffixupd,"Rd32=sffixupd(Rs32,Rt32)",ATTRIBS(),
+"Fix Up Denominator",
+{
+ fHIDE(int adjust;)
+ fSF_RECIP_COMMON(RsV,RtV,RdV,adjust);
+ RdV = RtV;
+})
+
+Q6INSN(F2_sfinvsqrta,"Rd32,Pe4=sfinvsqrta(Rs32)",ATTRIBS(),
+"Reciprocal Square Root Approximation",
+{
+ fHIDE(int idx;)
+ fHIDE(int adjust;)
+ fHIDE(int mant;)
+ fHIDE(int exp;)
+ if (fSF_INVSQRT_COMMON(RsV,RdV,adjust)) {
+ PeV = adjust;
+ idx = (RsV >> 17) & 0x7f;
+ mant = (fSF_INVSQRT_LOOKUP(idx) << 15);
+ exp = fSF_BIAS() - ((fSF_GETEXP(RsV) - fSF_BIAS()) >> 1) - 1;
+ RdV = fMAKESF(fGETBIT(31,RsV),exp,mant);
+ }
+})
+
+Q6INSN(F2_sffixupr,"Rd32=sffixupr(Rs32)",ATTRIBS(),
+"Fix Up Radicand",
+{
+ fHIDE(int adjust;)
+ fSF_INVSQRT_COMMON(RsV,RdV,adjust);
+ RdV = RsV;
+})
+
+/*************************************/
+/* Scalar DP */
+/*************************************/
+Q6INSN(F2_dfadd,"Rdd32=dfadd(Rss32,Rtt32)",ATTRIBS(),
+"Floating-Point Add",
+{ RddV=fUNDOUBLE(fDOUBLE(RssV)+fDOUBLE(RttV));})
+
+Q6INSN(F2_dfsub,"Rdd32=dfsub(Rss32,Rtt32)",ATTRIBS(),
+"Floating-Point Subtract",
+{ RddV=fUNDOUBLE(fDOUBLE(RssV)-fDOUBLE(RttV));})
+
+Q6INSN(F2_dfmax,"Rdd32=dfmax(Rss32,Rtt32)",ATTRIBS(),
+"Maximum of Floating-Point values",
+{ RddV = fUNDOUBLE(fDF_MAX(fDOUBLE(RssV),fDOUBLE(RttV))); })
+
+Q6INSN(F2_dfmin,"Rdd32=dfmin(Rss32,Rtt32)",ATTRIBS(),
+"Minimum of Floating-Point values",
+{ RddV = fUNDOUBLE(fDF_MIN(fDOUBLE(RssV),fDOUBLE(RttV))); })
+
+Q6INSN(F2_dfmpyfix,"Rdd32=dfmpyfix(Rss32,Rtt32)",ATTRIBS(),
+"Fix Up Multiplicand for Multiplication",
+{
+ if (fDF_ISDENORM(RssV) && fDF_ISBIG(RttV) && fDF_ISNORMAL(RttV)) RddV = fUNDOUBLE(fDOUBLE(RssV) * 0x1.0p52);
+ else if (fDF_ISDENORM(RttV) && fDF_ISBIG(RssV) && fDF_ISNORMAL(RssV)) RddV = fUNDOUBLE(fDOUBLE(RssV) * 0x1.0p-52);
+ else RddV = RssV;
+})
+
+Q6INSN(F2_dfmpyll,"Rdd32=dfmpyll(Rss32,Rtt32)",ATTRIBS(),
+"Multiply low*low and shift off low 32 bits into sticky (in MSB)",
+{
+ fHIDE(size8u_t prod;)
+ prod = fMPY32UU(fGETUWORD(0,RssV),fGETUWORD(0,RttV));
+ RddV = (prod >> 32) << 1;
+ if (fGETUWORD(0,prod) != 0) fSETBIT(0,RddV,1);
+})
+
+Q6INSN(F2_dfmpylh,"Rxx32+=dfmpylh(Rss32,Rtt32)",ATTRIBS(),
+"Multiply low*high and accumulate",
+{
+ RxxV += (fGETUWORD(0,RssV) * (0x00100000 | fZXTN(20,64,fGETUWORD(1,RttV)))) << 1;
+})
+
+Q6INSN(F2_dfmpyhh,"Rxx32+=dfmpyhh(Rss32,Rtt32)",ATTRIBS(),
+"Multiply high*high and accumulate with L*H value",
+{
+ RxxV = fUNDOUBLE(fDF_MPY_HH(fDOUBLE(RssV),fDOUBLE(RttV),RxxV));
+})
+
+
+
+Q6INSN(F2_dfcmpeq,"Pd4=dfcmp.eq(Rss32,Rtt32)",ATTRIBS(),
+"Floating Point Compare for Equal",
+{PdV=f8BITSOF(fDOUBLE(RssV)==fDOUBLE(RttV));})
+
+Q6INSN(F2_dfcmpgt,"Pd4=dfcmp.gt(Rss32,Rtt32)",ATTRIBS(),
+"Floating-Point Compare for Greater Than",
+{PdV=f8BITSOF(fDOUBLE(RssV)>fDOUBLE(RttV));})
+
+
+/* cmpge is not the same as !cmpgt(swapops) in IEEE */
+
+Q6INSN(F2_dfcmpge,"Pd4=dfcmp.ge(Rss32,Rtt32)",ATTRIBS(),
+"Floating-Point Compare for Greater Than / Equal To",
+{PdV=f8BITSOF(fDOUBLE(RssV)>=fDOUBLE(RttV));})
+
+/* Everyone seems to have this... */
+
+Q6INSN(F2_dfcmpuo,"Pd4=dfcmp.uo(Rss32,Rtt32)",ATTRIBS(),
+"Floating-Point Compare for Unordered",
+{PdV=f8BITSOF(isunordered(fDOUBLE(RssV),fDOUBLE(RttV)));})
+
+
+Q6INSN(F2_dfclass,"Pd4=dfclass(Rss32,#u5)",ATTRIBS(),
+"Classify Floating-Point Value",
+{
+ fHIDE(int class;)
+ PdV = 0;
+ class = fpclassify(fDOUBLE(RssV));
+ /* Is the value zero? */
+ if (fGETBIT(0,uiV) && (class == FP_ZERO)) PdV = 0xff;
+ if (fGETBIT(1,uiV) && (class == FP_NORMAL)) PdV = 0xff;
+ if (fGETBIT(2,uiV) && (class == FP_SUBNORMAL)) PdV = 0xff;
+ if (fGETBIT(3,uiV) && (class == FP_INFINITE)) PdV = 0xff;
+ if (fGETBIT(4,uiV) && (class == FP_NAN)) PdV = 0xff;
+ fFPCANCELFLAGS();
+})
+
+
+/* Range: +/- (1.0 .. 1+(63/64)) * 2**(-6 .. +9) */
+/* More immediate bits should probably be used for more precision? */
+
+Q6INSN(F2_dfimm_p,"Rdd32=dfmake(#u10):pos",ATTRIBS(),
+"Make Floating Point Value",
+{
+ RddV = (1023ULL - 6) << 52;
+ RddV += (fHIDE((size8u_t))uiV) << 46;
+})
+
+Q6INSN(F2_dfimm_n,"Rdd32=dfmake(#u10):neg",ATTRIBS(),
+"Make Floating Point Value",
+{
+ RddV = (1023ULL - 6) << 52;
+ RddV += (fHIDE((size8u_t))uiV) << 46;
+ RddV |= ((1ULL) << 63);
+})
+
+
+/* CONVERSION */
+
+#define CONVERT(TAG,DEST,DESTV,SRC,SRCV,OUTCAST,OUTTYPE,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+ Q6INSN(F2_conv_##TAG##MODETAG,#DEST"=convert_"#TAG"("#SRC")"#MODESYN,ATTRIBS(), \
+ "Floating point format conversion", \
+ { MODEBEH DESTV = OUTCAST(conv_##INTYPE##_to_##OUTTYPE(INCAST(SRCV))); })
+
+CONVERT(sf2df,Rdd32,RddV,Rs32,RsV,fUNDOUBLE,df,fFLOAT,sf,,,)
+CONVERT(df2sf,Rd32,RdV,Rss32,RssV,fUNFLOAT,sf,fDOUBLE,df,,,)
+
+#define ALLINTDST(TAGSTART,SRC,SRCV,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+CONVERT(TAGSTART##uw,Rd32,RdV,SRC,SRCV,fCAST4u,4u,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+CONVERT(TAGSTART##w,Rd32,RdV,SRC,SRCV,fCAST4s,4s,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+CONVERT(TAGSTART##ud,Rdd32,RddV,SRC,SRCV,fCAST8u,8u,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+CONVERT(TAGSTART##d,Rdd32,RddV,SRC,SRCV,fCAST8s,8s,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH)
+
+#define ALLFPDST(TAGSTART,SRC,SRCV,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+CONVERT(TAGSTART##sf,Rd32,RdV,SRC,SRCV,fUNFLOAT,sf,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH) \
+CONVERT(TAGSTART##df,Rdd32,RddV,SRC,SRCV,fUNDOUBLE,df,INCAST,INTYPE,MODETAG,MODESYN,MODEBEH)
+
+#define ALLINTSRC(GEN,MODETAG,MODESYN,MODEBEH) \
+GEN(uw##2,Rs32,RsV,fCAST4u,4u,MODETAG,MODESYN,MODEBEH) \
+GEN(w##2,Rs32,RsV,fCAST4s,4s,MODETAG,MODESYN,MODEBEH) \
+GEN(ud##2,Rss32,RssV,fCAST8u,8u,MODETAG,MODESYN,MODEBEH) \
+GEN(d##2,Rss32,RssV,fCAST8s,8s,MODETAG,MODESYN,MODEBEH)
+
+#define ALLFPSRC(GEN,MODETAG,MODESYN,MODEBEH) \
+GEN(sf##2,Rs32,RsV,fFLOAT,sf,MODETAG,MODESYN,MODEBEH) \
+GEN(df##2,Rss32,RssV,fDOUBLE,df,MODETAG,MODESYN,MODEBEH)
+
+ALLINTSRC(ALLFPDST,,,)
+ALLFPSRC(ALLINTDST,,,)
+ALLFPSRC(ALLINTDST,_chop,:chop,fFPSETROUND_CHOP();)
diff --git a/target/hexagon/imported/iclass.def b/target/hexagon/imported/iclass.def
new file mode 100644
index 000000000..fb57968c6
--- /dev/null
+++ b/target/hexagon/imported/iclass.def
@@ -0,0 +1,51 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* DEF_*(TYPE,SLOTS,UNITS) */
+DEF_PP_ICLASS32(EXTENDER,0123,LDST|SUNIT|MUNIT) /* 0 */
+DEF_PP_ICLASS32(CJ,0123,CTRLFLOW) /* 1 */
+DEF_PP_ICLASS32(NCJ,01,LDST|CTRLFLOW) /* 2 */
+DEF_PP_ICLASS32(V4LDST,01,LDST) /* 3 */
+DEF_PP_ICLASS32(V2LDST,01,LDST) /* 4 */
+DEF_PP_ICLASS32(J,0123,CTRLFLOW) /* 5 */
+DEF_PP_ICLASS32(CR,3,SUNIT) /* 6 */
+DEF_PP_ICLASS32(ALU32_2op,0123,LDST|SUNIT|MUNIT) /* 7 */
+DEF_PP_ICLASS32(S_2op,23,SUNIT|MUNIT) /* 8 */
+DEF_PP_ICLASS32(LD,01,LDST) /* 9 */
+DEF_PP_ICLASS32(ST,01,LDST) /* 10 */
+DEF_PP_ICLASS32(ALU32_ADDI,0123,LDST|SUNIT|MUNIT) /* 11 */
+DEF_PP_ICLASS32(S_3op,23,SUNIT|MUNIT) /* 12 */
+DEF_PP_ICLASS32(ALU64,23,SUNIT|MUNIT) /* 13 */
+DEF_PP_ICLASS32(M,23,SUNIT|MUNIT) /* 14 */
+DEF_PP_ICLASS32(ALU32_3op,0123,LDST|SUNIT|MUNIT) /* 15 */
+
+DEF_EE_ICLASS32(EE0,01,INVALID) /* 0 */
+DEF_EE_ICLASS32(EE1,01,INVALID) /* 1 */
+DEF_EE_ICLASS32(EE2,01,INVALID) /* 2 */
+DEF_EE_ICLASS32(EE3,01,INVALID) /* 3 */
+DEF_EE_ICLASS32(EE4,01,INVALID) /* 4 */
+DEF_EE_ICLASS32(EE5,01,INVALID) /* 5 */
+DEF_EE_ICLASS32(EE6,01,INVALID) /* 6 */
+DEF_EE_ICLASS32(EE7,01,INVALID) /* 7 */
+DEF_EE_ICLASS32(EE8,01,INVALID) /* 8 */
+DEF_EE_ICLASS32(EE9,01,INVALID) /* 9 */
+DEF_EE_ICLASS32(EEA,01,INVALID) /* 10 */
+DEF_EE_ICLASS32(EEB,01,INVALID) /* 11 */
+DEF_EE_ICLASS32(EEC,01,INVALID) /* 12 */
+DEF_EE_ICLASS32(EED,01,INVALID) /* 13 */
+DEF_EE_ICLASS32(EEE,01,INVALID) /* 14 */
+DEF_EE_ICLASS32(EEF,01,INVALID) /* 15 */
diff --git a/target/hexagon/imported/ldst.idef b/target/hexagon/imported/ldst.idef
new file mode 100644
index 000000000..359d3b744
--- /dev/null
+++ b/target/hexagon/imported/ldst.idef
@@ -0,0 +1,354 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Load and Store instruction definitions
+ */
+
+/* The set of addressing modes standard to all Load instructions */
+#define STD_LD_AMODES(TAG,OPER,DESCR,ATTRIB,SHFT,SEMANTICS,SCALE)\
+Q6INSN(L2_##TAG##_io, OPER"(Rs32+#s11:"SHFT")", ATTRIB,DESCR,{fIMMEXT(siV); fEA_RI(RsV,siV); SEMANTICS; })\
+Q6INSN(L4_##TAG##_ur, OPER"(Rt32<<#u2+#U6)", ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IRs(UiV,RtV,uiV); SEMANTICS;})\
+Q6INSN(L4_##TAG##_ap, OPER"(Re32=#U6)", ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IMM(UiV); SEMANTICS; ReV=UiV; })\
+Q6INSN(L2_##TAG##_pr, OPER"(Rx32++Mu2)", ATTRIB,DESCR,{fEA_REG(RxV); fPM_M(RxV,MuV); SEMANTICS;})\
+Q6INSN(L2_##TAG##_pbr, OPER"(Rx32++Mu2:brev)", ATTRIB,DESCR,{fEA_BREVR(RxV); fPM_M(RxV,MuV); SEMANTICS;})\
+Q6INSN(L2_##TAG##_pi, OPER"(Rx32++#s4:"SHFT")", ATTRIB,DESCR,{fEA_REG(RxV); fPM_I(RxV,siV); SEMANTICS;})\
+Q6INSN(L2_##TAG##_pci, OPER"(Rx32++#s4:"SHFT":circ(Mu2))",ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRI(RxV,siV,MuV); SEMANTICS;})\
+Q6INSN(L2_##TAG##_pcr, OPER"(Rx32++I:circ(Mu2))", ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRR(RxV,fREAD_IREG(MuV)<<SCALE,MuV); SEMANTICS;})
+
+/* The set of 32-bit load instructions */
+STD_LD_AMODES(loadrub,"Rd32=memub","Load Unsigned Byte",ATTRIBS(A_LOAD),"0",fLOAD(1,1,u,EA,RdV),0)
+STD_LD_AMODES(loadrb, "Rd32=memb", "Load signed Byte",ATTRIBS(A_LOAD),"0",fLOAD(1,1,s,EA,RdV),0)
+STD_LD_AMODES(loadruh,"Rd32=memuh","Load unsigned Half integer",ATTRIBS(A_LOAD),"1",fLOAD(1,2,u,EA,RdV),1)
+STD_LD_AMODES(loadrh, "Rd32=memh", "Load signed Half integer",ATTRIBS(A_LOAD),"1",fLOAD(1,2,s,EA,RdV),1)
+STD_LD_AMODES(loadri, "Rd32=memw", "Load Word",ATTRIBS(A_LOAD),"2",fLOAD(1,4,u,EA,RdV),2)
+STD_LD_AMODES(loadrd, "Rdd32=memd","Load Double integer",ATTRIBS(A_LOAD),"3",fLOAD(1,8,u,EA,RddV),3)
+
+/* These instructions do a load an unpack */
+STD_LD_AMODES(loadbzw2, "Rd32=memubh", "Load Bytes and Vector Zero-Extend (unpack)",
+ATTRIBS(A_LOAD),"1",
+{fHIDE(size2u_t tmpV; int i;)
+ fLOAD(1,2,u,EA,tmpV);
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETUBYTE(i,tmpV));
+ }
+},1)
+
+STD_LD_AMODES(loadbzw4, "Rdd32=memubh", "Load Bytes and Vector Zero-Extend (unpack)",
+ATTRIBS(A_LOAD),"2",
+{fHIDE(size4u_t tmpV; int i;)
+ fLOAD(1,4,u,EA,tmpV);
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fGETUBYTE(i,tmpV));
+ }
+},2)
+
+
+
+/* These instructions do a load an unpack */
+STD_LD_AMODES(loadbsw2, "Rd32=membh", "Load Bytes and Vector Sign-Extend (unpack)",
+ATTRIBS(A_LOAD),"1",
+{fHIDE(size2u_t tmpV; int i;)
+ fLOAD(1,2,u,EA,tmpV);
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETBYTE(i,tmpV));
+ }
+},1)
+
+STD_LD_AMODES(loadbsw4, "Rdd32=membh", "Load Bytes and Vector Sign-Extend (unpack)",
+ATTRIBS(A_LOAD),"2",
+{fHIDE(size4u_t tmpV; int i;)
+ fLOAD(1,4,u,EA,tmpV);
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fGETBYTE(i,tmpV));
+ }
+},2)
+
+
+
+STD_LD_AMODES(loadalignh, "Ryy32=memh_fifo", "Load Half-word into shifted vector",
+ATTRIBS(A_LOAD),"1",
+{
+ fHIDE(size8u_t tmpV;)
+ fLOAD(1,2,u,EA,tmpV);
+ RyyV = (((size8u_t)RyyV)>>16)|(tmpV<<48);
+},1)
+
+
+STD_LD_AMODES(loadalignb, "Ryy32=memb_fifo", "Load byte into shifted vector",
+ATTRIBS(A_LOAD),"0",
+{
+ fHIDE(size8u_t tmpV;)
+ fLOAD(1,1,u,EA,tmpV);
+ RyyV = (((size8u_t)RyyV)>>8)|(tmpV<<56);
+},0)
+
+
+
+
+/* The set of addressing modes standard to all Store instructions */
+#define STD_ST_AMODES(TAG,DEST,OPER,DESCR,ATTRIB,SHFT,SEMANTICS,SCALE)\
+Q6INSN(S2_##TAG##_io, OPER"(Rs32+#s11:"SHFT")="DEST, ATTRIB,DESCR,{fIMMEXT(siV); fEA_RI(RsV,siV); SEMANTICS; })\
+Q6INSN(S2_##TAG##_pi, OPER"(Rx32++#s4:"SHFT")="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_I(RxV,siV); SEMANTICS; })\
+Q6INSN(S4_##TAG##_ap, OPER"(Re32=#U6)="DEST, ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IMM(UiV); SEMANTICS; ReV=UiV; })\
+Q6INSN(S2_##TAG##_pr, OPER"(Rx32++Mu2)="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_M(RxV,MuV); SEMANTICS; })\
+Q6INSN(S4_##TAG##_ur, OPER"(Ru32<<#u2+#U6)="DEST, ATTRIB,DESCR,{fMUST_IMMEXT(UiV); fEA_IRs(UiV,RuV,uiV); SEMANTICS;})\
+Q6INSN(S2_##TAG##_pbr, OPER"(Rx32++Mu2:brev)="DEST, ATTRIB,DESCR,{fEA_BREVR(RxV); fPM_M(RxV,MuV); SEMANTICS; })\
+Q6INSN(S2_##TAG##_pci, OPER"(Rx32++#s4:"SHFT":circ(Mu2))="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRI(RxV,siV,MuV); SEMANTICS;})\
+Q6INSN(S2_##TAG##_pcr, OPER"(Rx32++I:circ(Mu2))="DEST, ATTRIB,DESCR,{fEA_REG(RxV); fPM_CIRR(RxV,fREAD_IREG(MuV)<<SCALE,MuV); SEMANTICS;})
+
+
+/* The set of 32-bit store instructions */
+STD_ST_AMODES(storerb, "Rt32", "memb","Store Byte",ATTRIBS(A_STORE),"0",fSTORE(1,1,EA,fGETBYTE(0,RtV)),0)
+STD_ST_AMODES(storerh, "Rt32", "memh","Store Half integer",ATTRIBS(A_STORE),"1",fSTORE(1,2,EA,fGETHALF(0,RtV)),1)
+STD_ST_AMODES(storerf, "Rt.H32", "memh","Store Upper Half integer",ATTRIBS(A_STORE),"1",fSTORE(1,2,EA,fGETHALF(1,RtV)),1)
+STD_ST_AMODES(storeri, "Rt32", "memw","Store Word",ATTRIBS(A_STORE),"2",fSTORE(1,4,EA,RtV),2)
+STD_ST_AMODES(storerd, "Rtt32","memd","Store Double integer",ATTRIBS(A_STORE),"3",fSTORE(1,8,EA,RttV),3)
+STD_ST_AMODES(storerinew, "Nt8.new", "memw","Store Word",ATTRIBS(A_STORE),"2",fSTORE(1,4,EA,fNEWREG_ST(NtN)),2)
+STD_ST_AMODES(storerbnew, "Nt8.new", "memb","Store Byte",ATTRIBS(A_STORE),"0",fSTORE(1,1,EA,fGETBYTE(0,fNEWREG_ST(NtN))),0)
+STD_ST_AMODES(storerhnew, "Nt8.new", "memh","Store Half integer",ATTRIBS(A_STORE),"1",fSTORE(1,2,EA,fGETHALF(0,fNEWREG_ST(NtN))),1)
+
+
+Q6INSN(S2_allocframe,"allocframe(Rx32,#u11:3):raw", ATTRIBS(A_STORE,A_RESTRICT_SLOT0ONLY), "Allocate stack frame",
+{ fEA_RI(RxV,-8); fSTORE(1,8,EA,fFRAME_SCRAMBLE((fCAST8_8u(fREAD_LR()) << 32) | fCAST4_4u(fREAD_FP()))); fWRITE_FP(EA); fFRAMECHECK(EA-uiV,EA); RxV = EA-uiV; })
+
+#define A_RETURN A_RESTRICT_SLOT0ONLY
+
+Q6INSN(L2_deallocframe,"Rdd32=deallocframe(Rs32):raw", ATTRIBS(A_LOAD), "Deallocate stack frame",
+{ fHIDE(size8u_t tmp;) fEA_REG(RsV);
+ fLOAD(1,8,u,EA,tmp);
+ RddV = fFRAME_UNSCRAMBLE(tmp);
+ fWRITE_SP(EA+8); })
+
+Q6INSN(L4_return,"Rdd32=dealloc_return(Rs32):raw", ATTRIBS(A_JINDIR,A_LOAD,A_RETURN), "Deallocate stack frame and return",
+{ fHIDE(size8u_t tmp;) fEA_REG(RsV);
+ fLOAD(1,8,u,EA,tmp);
+ RddV = fFRAME_UNSCRAMBLE(tmp);
+ fWRITE_SP(EA+8);
+ fJUMPR(REG_LR,fGETWORD(1,RddV),COF_TYPE_JUMPR);})
+
+#define CONDSEM(SRCREG,STALLBITS0,STALLBITS1,PREDFUNC,PREDARG,STALLSPEC,PREDCOND) \
+{ \
+ fHIDE(size8u_t tmp;) \
+ fBRANCH_SPECULATE_STALL(PREDFUNC##PREDCOND(PREDARG),,STALLSPEC,STALLBITS0,STALLBITS1); \
+ fEA_REG(SRCREG); \
+ if (PREDFUNC##PREDCOND(PREDARG)) { \
+ fLOAD(1,8,u,EA,tmp); \
+ RddV = fFRAME_UNSCRAMBLE(tmp); \
+ fWRITE_SP(EA+8); \
+ fJUMPR(REG_LR,fGETWORD(1,RddV),COF_TYPE_JUMPR); \
+ } else { \
+ LOAD_CANCEL(EA); \
+ } \
+}
+
+#define COND_RETURN_TF(TG,TG2,DOTNEW,STALLBITS0,STALLBITS1,STALLSPEC,ATTRIBS,PREDFUNC,PREDARG,T_NT) \
+ Q6INSN(TG##_t##TG2,"if (Pv4"DOTNEW") Rdd32=dealloc_return(Rs32)"T_NT":raw",ATTRIBS,"deallocate stack frame and return", \
+ CONDSEM(RsV,STALLBITS0,STALLBITS1,PREDFUNC,PREDARG,STALLSPEC,)) \
+ Q6INSN(TG##_f##TG2,"if (!Pv4"DOTNEW") Rdd32=dealloc_return(Rs32)"T_NT":raw",ATTRIBS,"deallocate stack frame and return", \
+ CONDSEM(RsV,STALLBITS0,STALLBITS1,PREDFUNC##NOT,PREDARG,STALLSPEC,))
+
+#define COND_RETURN_NEW(TG,STALLBITS0,STALLBITS1,ATTRIBS) \
+ COND_RETURN_TF(TG,new_pt,".new",12,0,SPECULATE_TAKEN,ATTRIBS,fLSBNEW,PvN,":t") \
+ COND_RETURN_TF(TG,new_pnt,".new",12,0,SPECULATE_NOT_TAKEN,ATTRIBS,fLSBNEW,PvN,":nt") \
+
+#define RETURN_ATTRIBS A_LOAD,A_RETURN
+
+COND_RETURN_TF(L4_return,,,7,0,SPECULATE_NOT_TAKEN,ATTRIBS(RETURN_ATTRIBS,A_JINDIROLD),fLSBOLD,PvV,)
+COND_RETURN_NEW(L4_return,12,0,ATTRIBS(RETURN_ATTRIBS,A_JINDIRNEW))
+
+
+
+
+Q6INSN(L2_loadw_locked,"Rd32=memw_locked(Rs32)", ATTRIBS(A_LOAD,A_RESTRICT_SLOT0ONLY), "Load word with lock",
+{ fEA_REG(RsV); fLOAD_LOCKED(1,4,u,EA,RdV) })
+
+
+Q6INSN(S2_storew_locked,"memw_locked(Rs32,Pd4)=Rt32", ATTRIBS(A_STORE,A_RESTRICT_SLOT0ONLY), "Store word with lock",
+{ fEA_REG(RsV); fSTORE_LOCKED(1,4,EA,RtV,PdV) })
+
+
+Q6INSN(L4_loadd_locked,"Rdd32=memd_locked(Rs32)", ATTRIBS(A_LOAD,A_RESTRICT_SLOT0ONLY), "Load double with lock",
+{ fEA_REG(RsV); fLOAD_LOCKED(1,8,u,EA,RddV) })
+
+Q6INSN(S4_stored_locked,"memd_locked(Rs32,Pd4)=Rtt32", ATTRIBS(A_STORE,A_RESTRICT_SLOT0ONLY), "Store word with lock",
+{ fEA_REG(RsV); fSTORE_LOCKED(1,8,EA,RttV,PdV) })
+
+
+
+
+
+/*****************************************************************/
+/* */
+/* Predicated LDST */
+/* */
+/*****************************************************************/
+
+#define STD_PLD_AMODES(TAG,OPER,DESCR,ATTRIB,SHFT,SHFTNUM,SEMANTICS)\
+Q6INSN(L4_##TAG##_rr, OPER"(Rs32+Rt32<<#u2)", ATTRIB,DESCR,{fEA_RRs(RsV,RtV,uiV); SEMANTICS;})\
+Q6INSN(L2_p##TAG##t_io, "if (Pt4) "OPER"(Rs32+#u6:"SHFT")", ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if(fLSBOLD(PtV)){SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##t_pi, "if (Pt4) "OPER"(Rx32++#s4:"SHFT")", ATTRIB,DESCR,{fEA_REG(RxV); if(fLSBOLD(PtV)){ fPM_I(RxV,siV); SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##f_io, "if (!Pt4) "OPER"(Rs32+#u6:"SHFT")", ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if(fLSBOLDNOT(PtV)){ SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##f_pi, "if (!Pt4) "OPER"(Rx32++#s4:"SHFT")", ATTRIB,DESCR,{fEA_REG(RxV); if(fLSBOLDNOT(PtV)){ fPM_I(RxV,siV); SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##tnew_io,"if (Pt4.new) "OPER"(Rs32+#u6:"SHFT")",ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if (fLSBNEW(PtN)) { SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##fnew_io,"if (!Pt4.new) "OPER"(Rs32+#u6:"SHFT")",ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if (fLSBNEWNOT(PtN)) { SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##t_rr, "if (Pv4) "OPER"(Rs32+Rt32<<#u2)", ATTRIB,DESCR,{fEA_RRs(RsV,RtV,uiV); if(fLSBOLD(PvV)){ SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##f_rr, "if (!Pv4) "OPER"(Rs32+Rt32<<#u2)", ATTRIB,DESCR,{fEA_RRs(RsV,RtV,uiV); if(fLSBOLDNOT(PvV)){ SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##tnew_rr,"if (Pv4.new) "OPER"(Rs32+Rt32<<#u2)",ATTRIB,DESCR,{fEA_RRs(RsV,RtV,uiV); if (fLSBNEW(PvN)) { SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##fnew_rr,"if (!Pv4.new) "OPER"(Rs32+Rt32<<#u2)",ATTRIB,DESCR,{fEA_RRs(RsV,RtV,uiV); if (fLSBNEWNOT(PvN)) { SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##tnew_pi, "if (Pt4.new) "OPER"(Rx32++#s4:"SHFT")", ATTRIB,DESCR,{fEA_REG(RxV); if(fLSBNEW(PtN)){ fPM_I(RxV,siV); SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L2_p##TAG##fnew_pi, "if (!Pt4.new) "OPER"(Rx32++#s4:"SHFT")", ATTRIB,DESCR,{fEA_REG(RxV); if(fLSBNEWNOT(PtN)){ fPM_I(RxV,siV); SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##t_abs, "if (Pt4) "OPER"(#u6)", ATTRIB,DESCR,{fMUST_IMMEXT(uiV); fEA_IMM(uiV); if(fLSBOLD(PtV)){ SEMANTICS;} else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##f_abs, "if (!Pt4) "OPER"(#u6)", ATTRIB,DESCR,{fMUST_IMMEXT(uiV); fEA_IMM(uiV); if(fLSBOLDNOT(PtV)){ SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##tnew_abs,"if (Pt4.new) "OPER"(#u6)",ATTRIB,DESCR,{fMUST_IMMEXT(uiV); fEA_IMM(uiV);if (fLSBNEW(PtN)) { SEMANTICS; } else {LOAD_CANCEL(EA);}})\
+Q6INSN(L4_p##TAG##fnew_abs,"if (!Pt4.new) "OPER"(#u6)",ATTRIB,DESCR,{fMUST_IMMEXT(uiV); fEA_IMM(uiV);if (fLSBNEWNOT(PtN)) { SEMANTICS; } else {LOAD_CANCEL(EA);}})
+
+
+
+/* The set of 32-bit predicated load instructions */
+STD_PLD_AMODES(loadrub,"Rd32=memub","Load Unsigned Byte",ATTRIBS(A_ARCHV2,A_LOAD),"0",0,fLOAD(1,1,u,EA,RdV))
+STD_PLD_AMODES(loadrb, "Rd32=memb", "Load signed Byte",ATTRIBS(A_ARCHV2,A_LOAD),"0",0,fLOAD(1,1,s,EA,RdV))
+STD_PLD_AMODES(loadruh,"Rd32=memuh","Load unsigned Half integer",ATTRIBS(A_ARCHV2,A_LOAD),"1",1,fLOAD(1,2,u,EA,RdV))
+STD_PLD_AMODES(loadrh, "Rd32=memh", "Load signed Half integer",ATTRIBS(A_ARCHV2,A_LOAD),"1",1,fLOAD(1,2,s,EA,RdV))
+STD_PLD_AMODES(loadri, "Rd32=memw", "Load Word",ATTRIBS(A_ARCHV2,A_LOAD),"2",2,fLOAD(1,4,u,EA,RdV))
+STD_PLD_AMODES(loadrd, "Rdd32=memd","Load Double integer",ATTRIBS(A_ARCHV2,A_LOAD),"3",3,fLOAD(1,8,u,EA,RddV))
+
+/* The set of addressing modes standard to all predicated store instructions */
+#define STD_PST_AMODES(TAG,DEST,OPER,DESCR,ATTRIB,SHFT,SHFTNUM,SEMANTICS)\
+Q6INSN(S4_##TAG##_rr, OPER"(Rs32+Ru32<<#u2)="DEST, ATTRIB,DESCR,{fEA_RRs(RsV,RuV,uiV); SEMANTICS;})\
+Q6INSN(S2_p##TAG##t_io, "if (Pv4) "OPER"(Rs32+#u6:"SHFT")="DEST, ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if (fLSBOLD(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S2_p##TAG##t_pi, "if (Pv4) "OPER"(Rx32++#s4:"SHFT")="DEST, ATTRIB,DESCR,{fEA_REG(RxV); if (fLSBOLD(PvV)){ fPM_I(RxV,siV); SEMANTICS;} else {STORE_CANCEL(EA);}})\
+Q6INSN(S2_p##TAG##f_io, "if (!Pv4) "OPER"(Rs32+#u6:"SHFT")="DEST, ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if (fLSBOLDNOT(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S2_p##TAG##f_pi, "if (!Pv4) "OPER"(Rx32++#s4:"SHFT")="DEST, ATTRIB,DESCR,{fEA_REG(RxV); if (fLSBOLDNOT(PvV)){ fPM_I(RxV,siV); SEMANTICS;} else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##t_rr, "if (Pv4) "OPER"(Rs32+Ru32<<#u2)="DEST, ATTRIB,DESCR,{fEA_RRs(RsV,RuV,uiV); if (fLSBOLD(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##f_rr, "if (!Pv4) "OPER"(Rs32+Ru32<<#u2)="DEST, ATTRIB,DESCR,{fEA_RRs(RsV,RuV,uiV); if (fLSBOLDNOT(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##tnew_io,"if (Pv4.new) "OPER"(Rs32+#u6:"SHFT")="DEST,ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if ( fLSBNEW(PvN)) { SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##fnew_io,"if (!Pv4.new) "OPER"(Rs32+#u6:"SHFT")="DEST,ATTRIB,DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); if (fLSBNEWNOT(PvN)) { SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##tnew_rr,"if (Pv4.new) "OPER"(Rs32+Ru32<<#u2)="DEST,ATTRIB,DESCR,{fEA_RRs(RsV,RuV,uiV); if ( fLSBNEW(PvN)) { SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##fnew_rr,"if (!Pv4.new) "OPER"(Rs32+Ru32<<#u2)="DEST,ATTRIB,DESCR,{fEA_RRs(RsV,RuV,uiV); if (fLSBNEWNOT(PvN)) { SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S2_p##TAG##tnew_pi, "if (Pv4.new) "OPER"(Rx32++#s4:"SHFT")="DEST, ATTRIB,DESCR,{fEA_REG(RxV); if (fLSBNEW(PvN)){ fPM_I(RxV,siV); SEMANTICS;} else {STORE_CANCEL(EA);}})\
+Q6INSN(S2_p##TAG##fnew_pi, "if (!Pv4.new) "OPER"(Rx32++#s4:"SHFT")="DEST, ATTRIB,DESCR,{fEA_REG(RxV); if (fLSBNEWNOT(PvN)){ fPM_I(RxV,siV); SEMANTICS;} else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##t_abs, "if (Pv4) "OPER"(#u6)="DEST, ATTRIB,DESCR,{fMUST_IMMEXT(uiV); fEA_IMM(uiV); if (fLSBOLD(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##f_abs, "if (!Pv4) "OPER"(#u6)="DEST, ATTRIB,DESCR,{fMUST_IMMEXT(uiV);fEA_IMM(uiV); if (fLSBOLDNOT(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##tnew_abs,"if (Pv4.new) "OPER"(#u6)="DEST,ATTRIB,DESCR,{fMUST_IMMEXT(uiV);fEA_IMM(uiV); if ( fLSBNEW(PvN)) { SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_p##TAG##fnew_abs,"if (!Pv4.new) "OPER"(#u6)="DEST,ATTRIB,DESCR,{fMUST_IMMEXT(uiV);fEA_IMM(uiV); if (fLSBNEWNOT(PvN)) { SEMANTICS; } else {STORE_CANCEL(EA);}})
+
+
+
+
+/* The set of 32-bit predicated store instructions */
+STD_PST_AMODES(storerb,"Rt32","memb","Store Byte",ATTRIBS(A_ARCHV2,A_STORE),"0",0,fSTORE(1,1,EA,fGETBYTE(0,RtV)))
+STD_PST_AMODES(storerh,"Rt32","memh","Store Half integer",ATTRIBS(A_ARCHV2,A_STORE),"1",1,fSTORE(1,2,EA,fGETHALF(0,RtV)))
+STD_PST_AMODES(storerf,"Rt.H32","memh","Store Upper Half integer",ATTRIBS(A_ARCHV2,A_STORE),"1",1,fSTORE(1,2,EA,fGETHALF(1,RtV)))
+STD_PST_AMODES(storeri,"Rt32","memw","Store Word",ATTRIBS(A_ARCHV2,A_STORE),"2",2,fSTORE(1,4,EA,RtV))
+STD_PST_AMODES(storerd,"Rtt32","memd","Store Double integer",ATTRIBS(A_ARCHV2,A_STORE),"3",3,fSTORE(1,8,EA,RttV))
+STD_PST_AMODES(storerinew,"Nt8.new","memw","Store Word",ATTRIBS(A_ARCHV2,A_STORE),"2",2,fSTORE(1,4,EA,fNEWREG_ST(NtN)))
+STD_PST_AMODES(storerbnew,"Nt8.new","memb","Store Byte",ATTRIBS(A_ARCHV2,A_STORE),"0",0,fSTORE(1,1,EA,fGETBYTE(0,fNEWREG_ST(NtN))))
+STD_PST_AMODES(storerhnew,"Nt8.new","memh","Store Half integer",ATTRIBS(A_ARCHV2,A_STORE),"1",1,fSTORE(1,2,EA,fGETHALF(0,fNEWREG_ST(NtN))))
+
+
+
+
+/*****************************************************************/
+/* */
+/* Mem-Ops (Load-op-Store) */
+/* */
+/*****************************************************************/
+
+/* The set of 32-bit non-predicated mem-ops */
+#define STD_MEMOP_AMODES(TAG,OPER,DESCR,SEMANTICS)\
+Q6INSN(L4_##TAG##w_io, "memw(Rs32+#u6:2)"OPER, ATTRIBS(A_RESTRICT_SLOT0ONLY),DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); fHIDE(size4s_t tmp;) fLOAD(1,4,s,EA,tmp); SEMANTICS; fSTORE(1,4,EA,tmp); })\
+Q6INSN(L4_##TAG##b_io, "memb(Rs32+#u6:0)"OPER, ATTRIBS(A_RESTRICT_SLOT0ONLY),DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); fHIDE(size4s_t tmp;) fLOAD(1,1,s,EA,tmp); SEMANTICS; fSTORE(1,1,EA,tmp); })\
+Q6INSN(L4_##TAG##h_io, "memh(Rs32+#u6:1)"OPER, ATTRIBS(A_RESTRICT_SLOT0ONLY),DESCR,{fIMMEXT(uiV); fEA_RI(RsV,uiV); fHIDE(size4s_t tmp;) fLOAD(1,2,s,EA,tmp); SEMANTICS; fSTORE(1,2,EA,tmp); })
+
+
+
+STD_MEMOP_AMODES(add_memop, "+=Rt32", "Add Register to Memory Word", tmp += RtV)
+STD_MEMOP_AMODES(sub_memop, "-=Rt32", "Sub Register from Memory Word", tmp -= RtV)
+STD_MEMOP_AMODES(and_memop, "&=Rt32", "Logical AND Register to Memory Word", tmp &= RtV)
+STD_MEMOP_AMODES(or_memop, "|=Rt32", "Logical OR Register to Memory Word", tmp |= RtV)
+
+
+STD_MEMOP_AMODES(iadd_memop, "+=#U5", "Add Immediate to Memory Word", tmp += UiV)
+STD_MEMOP_AMODES(isub_memop, "-=#U5", "Sub Immediate to Memory Word", tmp -= UiV)
+STD_MEMOP_AMODES(iand_memop, "=clrbit(#U5)", "Clear a bit in memory", tmp &= (~(1<<UiV)))
+STD_MEMOP_AMODES(ior_memop, "=setbit(#U5)", "Set a bit in memory", tmp |= (1<<UiV))
+
+
+/*****************************************************************/
+/* */
+/* V4 store immediates */
+/* */
+/*****************************************************************/
+/* Predicated Store immediates */
+#define V4_PSTI_AMODES(TAG,DEST,OPER,DESCR,ATTRIB,SHFT,SEMANTICS)\
+Q6INSN(S4_##TAG##t_io,"if (Pv4) "OPER"(Rs32+#u6:"SHFT")="DEST,ATTRIB,DESCR,{fEA_RI(RsV,uiV); if (fLSBOLD(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_##TAG##f_io,"if (!Pv4) "OPER"(Rs32+#u6:"SHFT")="DEST,ATTRIB,DESCR,{fEA_RI(RsV,uiV); if (fLSBOLDNOT(PvV)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_##TAG##tnew_io,"if (Pv4.new) "OPER"(Rs32+#u6:"SHFT")="DEST,ATTRIB,DESCR,{fEA_RI(RsV,uiV); if (fLSBNEW(PvN)){ SEMANTICS; } else {STORE_CANCEL(EA);}})\
+Q6INSN(S4_##TAG##fnew_io,"if (!Pv4.new) "OPER"(Rs32+#u6:"SHFT")="DEST,ATTRIB,DESCR,{fEA_RI(RsV,uiV); if (fLSBNEWNOT(PvN)){ SEMANTICS; } else {STORE_CANCEL(EA);}})
+
+/* The set of 32-bit store immediate instructions */
+V4_PSTI_AMODES(storeirb,"#S6","memb","Store Immediate Byte",ATTRIBS(A_ARCHV2,A_STORE),"0",fIMMEXT(SiV); fSTORE(1,1,EA,SiV))
+V4_PSTI_AMODES(storeirh,"#S6","memh","Store Immediate Half integer",ATTRIBS(A_ARCHV2,A_STORE),"1",fIMMEXT(SiV); fSTORE(1,2,EA,SiV))
+V4_PSTI_AMODES(storeiri,"#S6","memw","Store Immediate Word",ATTRIBS(A_ARCHV2,A_STORE),"2",fIMMEXT(SiV); fSTORE(1,4,EA,SiV))
+
+
+/* Non-predicated store immediates */
+#define V4_STI_AMODES(TAG,DEST,OPER,DESCR,ATTRIB,SHFT,SEMANTICS)\
+Q6INSN(S4_##TAG##_io, OPER"(Rs32+#u6:"SHFT")="DEST, ATTRIB,DESCR,{fEA_RI(RsV,uiV); SEMANTICS; })
+
+/* The set of 32-bit store immediate instructions */
+V4_STI_AMODES(storeirb,"#S8","memb","Store Immediate Byte",ATTRIBS(A_ARCHV2,A_STORE),"0",fIMMEXT(SiV); fSTORE(1,1,EA,SiV))
+V4_STI_AMODES(storeirh,"#S8","memh","Store Immediate Half integer",ATTRIBS(A_ARCHV2,A_STORE),"1",fIMMEXT(SiV); fSTORE(1,2,EA,SiV))
+V4_STI_AMODES(storeiri,"#S8","memw","Store Immediate Word",ATTRIBS(A_ARCHV2,A_STORE),"2",fIMMEXT(SiV); fSTORE(1,4,EA,SiV))
+
+
+
+
+
+
+
+/*****************************************************************/
+/* */
+/* V2 GP-relative LD/ST */
+/* */
+/*****************************************************************/
+
+#define STD_GPLD_AMODES(TAG,OPER,DESCR,ATTRIB,SHFT,SEMANTICS)\
+Q6INSN(L2_##TAG##gp, OPER"(gp+#u16:"SHFT")", ATTRIB,DESCR,{fIMMEXT(uiV); fEA_GPI(uiV); SEMANTICS; })
+
+/* The set of 32-bit load instructions */
+STD_GPLD_AMODES(loadrub,"Rd32=memub","Load Unsigned Byte",ATTRIBS(A_LOAD,A_ARCHV2),"0",fLOAD(1,1,u,EA,RdV))
+STD_GPLD_AMODES(loadrb, "Rd32=memb", "Load signed Byte",ATTRIBS(A_LOAD,A_ARCHV2),"0",fLOAD(1,1,s,EA,RdV))
+STD_GPLD_AMODES(loadruh,"Rd32=memuh","Load unsigned Half integer",ATTRIBS(A_LOAD,A_ARCHV2),"1",fLOAD(1,2,u,EA,RdV))
+STD_GPLD_AMODES(loadrh, "Rd32=memh", "Load signed Half integer",ATTRIBS(A_LOAD,A_ARCHV2),"1",fLOAD(1,2,s,EA,RdV))
+STD_GPLD_AMODES(loadri, "Rd32=memw", "Load Word",ATTRIBS(A_LOAD,A_ARCHV2),"2",fLOAD(1,4,u,EA,RdV))
+STD_GPLD_AMODES(loadrd, "Rdd32=memd","Load Double integer",ATTRIBS(A_LOAD,A_ARCHV2),"3",fLOAD(1,8,u,EA,RddV))
+
+
+#define STD_GPST_AMODES(TAG,DEST,OPER,DESCR,ATTRIB,SHFT,SEMANTICS)\
+Q6INSN(S2_##TAG##gp, OPER"(gp+#u16:"SHFT")="DEST, ATTRIB,DESCR,{fIMMEXT(uiV); fEA_GPI(uiV); SEMANTICS; })
+
+/* The set of 32-bit store instructions */
+STD_GPST_AMODES(storerb, "Rt32", "memb","Store Byte",ATTRIBS(A_STORE,A_ARCHV2),"0",fSTORE(1,1,EA,fGETBYTE(0,RtV)))
+STD_GPST_AMODES(storerh, "Rt32", "memh","Store Half integer",ATTRIBS(A_STORE,A_ARCHV2),"1",fSTORE(1,2,EA,fGETHALF(0,RtV)))
+STD_GPST_AMODES(storerf, "Rt.H32", "memh","Store Upper Half integer",ATTRIBS(A_STORE,A_ARCHV2),"1",fSTORE(1,2,EA,fGETHALF(1,RtV)))
+STD_GPST_AMODES(storeri, "Rt32", "memw","Store Word",ATTRIBS(A_STORE,A_ARCHV2),"2",fSTORE(1,4,EA,RtV))
+STD_GPST_AMODES(storerd, "Rtt32","memd","Store Double integer",ATTRIBS(A_STORE,A_ARCHV2),"3",fSTORE(1,8,EA,RttV))
+STD_GPST_AMODES(storerinew, "Nt8.new", "memw","Store Word",ATTRIBS(A_STORE,A_ARCHV2),"2",fSTORE(1,4,EA,fNEWREG_ST(NtN)))
+STD_GPST_AMODES(storerbnew, "Nt8.new", "memb","Store Byte",ATTRIBS(A_STORE,A_ARCHV2),"0",fSTORE(1,1,EA,fGETBYTE(0,fNEWREG_ST(NtN))))
+STD_GPST_AMODES(storerhnew, "Nt8.new", "memh","Store Half integer",ATTRIBS(A_STORE,A_ARCHV2),"1",fSTORE(1,2,EA,fGETHALF(0,fNEWREG_ST(NtN))))
diff --git a/target/hexagon/imported/macros.def b/target/hexagon/imported/macros.def
new file mode 100755
index 000000000..e23f91562
--- /dev/null
+++ b/target/hexagon/imported/macros.def
@@ -0,0 +1,1666 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+DEF_MACRO(
+ LIKELY, /* NAME */
+ __builtin_expect((X),1), /* BEH */
+ () /* attribs */
+)
+
+DEF_MACRO(
+ UNLIKELY, /* NAME */
+ __builtin_expect((X),0), /* BEH */
+ () /* attribs */
+)
+
+DEF_MACRO(
+ CANCEL, /* macro name */
+ {if (thread->last_pkt) thread->last_pkt->slot_cancelled |= (1<<insn->slot); return;} , /* behavior */
+ (A_CONDEXEC)
+)
+
+DEF_MACRO(
+ LOAD_CANCEL, /* macro name */
+ {mem_general_load_cancelled(thread,EA,insn);CANCEL;} , /* behavior */
+ (A_CONDEXEC)
+)
+
+DEF_MACRO(
+ STORE_CANCEL, /* macro name */
+ {mem_general_store_cancelled(thread,EA,insn);CANCEL;} , /* behavior */
+ (A_CONDEXEC)
+)
+
+DEF_MACRO(
+ fMAX, /* macro name */
+ (((A) > (B)) ? (A) : (B)), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fMIN, /* macro name */
+ (((A) < (B)) ? (A) : (B)), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fABS, /* macro name */
+ (((A)<0)?(-(A)):(A)), /* behavior */
+ /* optional attributes */
+)
+
+
+/* Bit insert */
+DEF_MACRO(
+ fINSERT_BITS,
+ {
+ REG = ((REG) & ~(((fCONSTLL(1)<<(WIDTH))-1)<<(OFFSET))) | (((INVAL) & ((fCONSTLL(1)<<(WIDTH))-1)) << (OFFSET));
+ },
+ /* attribs */
+)
+
+/* Bit extract */
+DEF_MACRO(
+ fEXTRACTU_BITS,
+ (fZXTN(WIDTH,32,(INREG >> OFFSET))),
+ /* attribs */
+)
+
+DEF_MACRO(
+ fEXTRACTU_BIDIR,
+ (fZXTN(WIDTH,32,fBIDIR_LSHIFTR((INREG),(OFFSET),4_8))),
+ /* attribs */
+)
+
+DEF_MACRO(
+ fEXTRACTU_RANGE,
+ (fZXTN((HIBIT-LOWBIT+1),32,(INREG >> LOWBIT))),
+ /* attribs */
+)
+
+
+DEF_MACRO(
+ fINSERT_RANGE,
+ {
+ int offset=LOWBIT;
+ int width=HIBIT-LOWBIT+1;
+ /* clear bits where new bits go */
+ INREG &= ~(((fCONSTLL(1)<<width)-1)<<offset);
+ /* OR in new bits */
+ INREG |= ((INVAL & ((fCONSTLL(1)<<width)-1)) << offset);
+ },
+ /* attribs */
+)
+
+
+DEF_MACRO(
+ f8BITSOF,
+ ( (VAL) ? 0xff : 0x00),
+ /* attribs */
+)
+
+DEF_MACRO(
+ fLSBOLD,
+ ((VAL) & 1),
+ ()
+)
+
+DEF_MACRO(
+ fLSBNEW,
+ predlog_read(thread,PNUM),
+ ()
+)
+
+DEF_MACRO(
+ fLSBNEW0,
+ predlog_read(thread,0),
+ ()
+)
+
+DEF_MACRO(
+ fLSBNEW1,
+ predlog_read(thread,1),
+ ()
+)
+
+DEF_MACRO(
+ fLSBOLDNOT,
+ (!fLSBOLD(VAL)),
+ ()
+)
+
+DEF_MACRO(
+ fLSBNEWNOT,
+ (!fLSBNEW(PNUM)),
+ ()
+)
+
+DEF_MACRO(
+ fLSBNEW0NOT,
+ (!fLSBNEW0),
+ ()
+)
+
+DEF_MACRO(
+ fLSBNEW1NOT,
+ (!fLSBNEW1),
+ ()
+)
+
+DEF_MACRO(
+ fNEWREG,
+ ({if (newvalue_missing(thread,RNUM) ||
+ IS_CANCELLED(insn->new_value_producer_slot)) CANCEL; reglog_read(thread,RNUM);}),
+ (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY)
+)
+// Store new with a missing newvalue or cancelled goes out as a zero byte store in V65
+// take advantage of the fact that reglog_read returns zero for not valid rnum
+DEF_MACRO(
+ fNEWREG_ST,
+ ({if (newvalue_missing(thread,RNUM) ||
+ IS_CANCELLED(insn->new_value_producer_slot)) { STORE_ZERO; RNUM = -1; }; reglog_read(thread,RNUM);}),
+ (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY)
+)
+
+DEF_MACRO(
+ fVSATUVALN,
+ ({ ((VAL) < 0) ? 0 : ((1LL<<(N))-1);}),
+ ()
+)
+
+DEF_MACRO(
+ fSATUVALN,
+ ({fSET_OVERFLOW(); ((VAL) < 0) ? 0 : ((1LL<<(N))-1);}),
+ ()
+)
+
+DEF_MACRO(
+ fSATVALN,
+ ({fSET_OVERFLOW(); ((VAL) < 0) ? (-(1LL<<((N)-1))) : ((1LL<<((N)-1))-1);}),
+ ()
+)
+
+DEF_MACRO(
+ fVSATVALN,
+ ({((VAL) < 0) ? (-(1LL<<((N)-1))) : ((1LL<<((N)-1))-1);}),
+ ()
+)
+
+DEF_MACRO(
+ fZXTN, /* macro name */
+ ((VAL) & ((1LL<<(N))-1)),
+ /* attribs */
+)
+
+DEF_MACRO(
+ fSXTN, /* macro name */
+ ((fZXTN(N,M,VAL) ^ (1LL<<((N)-1))) - (1LL<<((N)-1))),
+ /* attribs */
+)
+
+DEF_MACRO(
+ fSATN,
+ ((fSXTN(N,64,VAL) == (VAL)) ? (VAL) : fSATVALN(N,VAL)),
+ ()
+)
+DEF_MACRO(
+ fVSATN,
+ ((fSXTN(N,64,VAL) == (VAL)) ? (VAL) : fVSATVALN(N,VAL)),
+ ()
+)
+
+DEF_MACRO(
+ fADDSAT64,
+ {
+ size8u_t __a = fCAST8u(A);
+ size8u_t __b = fCAST8u(B);
+ size8u_t __sum = __a + __b;
+ size8u_t __xor = __a ^ __b;
+ const size8u_t __mask = 0x8000000000000000ULL;
+ if (__xor & __mask) {
+ /* Opposite signs, OK */
+ DST = __sum;
+ } else if ((__a ^ __sum) & __mask) {
+ /* Signs mismatch */
+ if (__sum & __mask) {
+ /* overflowed to negative, make max pos */
+ DST=0x7FFFFFFFFFFFFFFFLL; fSET_OVERFLOW();
+ } else {
+ /* overflowed to positive, make max neg */
+ DST=0x8000000000000000LL; fSET_OVERFLOW();
+ }
+ } else {
+ /* signs did not mismatch, OK */
+ DST = __sum;
+ }
+ },
+ ()
+)
+
+DEF_MACRO(
+ fVSATUN,
+ ((fZXTN(N,64,VAL) == (VAL)) ? (VAL) : fVSATUVALN(N,VAL)),
+ ()
+)
+
+DEF_MACRO(
+ fSATUN,
+ ((fZXTN(N,64,VAL) == (VAL)) ? (VAL) : fSATUVALN(N,VAL)),
+ ()
+)
+
+DEF_MACRO(
+ fSATH,
+ (fSATN(16,VAL)),
+ ()
+)
+
+
+DEF_MACRO(
+ fSATUH,
+ (fSATUN(16,VAL)),
+ ()
+)
+
+DEF_MACRO(
+ fVSATH,
+ (fVSATN(16,VAL)),
+ ()
+)
+
+DEF_MACRO(
+ fVSATUH,
+ (fVSATUN(16,VAL)),
+ ()
+)
+
+
+DEF_MACRO(
+ fSATUB,
+ (fSATUN(8,VAL)),
+ ()
+)
+DEF_MACRO(
+ fSATB,
+ (fSATN(8,VAL)),
+ ()
+)
+
+
+DEF_MACRO(
+ fVSATUB,
+ (fVSATUN(8,VAL)),
+ ()
+)
+DEF_MACRO(
+ fVSATB,
+ (fVSATN(8,VAL)),
+ ()
+)
+
+
+
+
+/*************************************/
+/* immediate extension */
+/*************************************/
+
+DEF_MACRO(
+ fIMMEXT,
+ (IMM = IMM),
+ (A_EXTENDABLE)
+)
+
+DEF_MACRO(
+ fMUST_IMMEXT,
+ fIMMEXT(IMM),
+ (A_EXTENDABLE)
+)
+
+DEF_MACRO(
+ fPCALIGN,
+ IMM=(IMM & ~PCALIGN_MASK),
+ (A_EXTENDABLE)
+)
+
+/*************************************/
+/* Read and Write Implicit Regs */
+/*************************************/
+
+DEF_MACRO(
+ fREAD_IREG, /* read modifier register */
+ (fSXTN(11,64,(((VAL) & 0xf0000000)>>21) | ((VAL>>17)&0x7f) )), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_LR, /* read link register */
+ (READ_RREG(REG_LR)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fWRITE_LR, /* write lr */
+ WRITE_RREG(REG_LR,A), /* behavior */
+ (A_IMPLICIT_WRITES_LR)
+)
+
+DEF_MACRO(
+ fWRITE_FP, /* write sp */
+ WRITE_RREG(REG_FP,A), /* behavior */
+ (A_IMPLICIT_WRITES_FP)
+)
+
+DEF_MACRO(
+ fWRITE_SP, /* write sp */
+ WRITE_RREG(REG_SP,A), /* behavior */
+ (A_IMPLICIT_WRITES_SP)
+)
+
+DEF_MACRO(
+ fREAD_SP, /* read stack pointer */
+ (READ_RREG(REG_SP)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_CSREG, /* read CS register */
+ (READ_RREG(REG_CSA+N)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_LC0, /* read loop count */
+ (READ_RREG(REG_LC0)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_LC1, /* read loop count */
+ (READ_RREG(REG_LC1)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_SA0, /* read start addr */
+ (READ_RREG(REG_SA0)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_SA1, /* read start addr */
+ (READ_RREG(REG_SA1)), /* behavior */
+ ()
+)
+
+
+DEF_MACRO(
+ fREAD_FP, /* read frame pointer */
+ (READ_RREG(REG_FP)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_GP, /* read global pointer */
+ (insn->extension_valid ? 0 : READ_RREG(REG_GP)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_PC, /* read PC */
+ (READ_RREG(REG_PC)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_NPC, /* read next PC */
+ (thread->next_PC & (0xfffffffe)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_P0, /* read Predicate 0 */
+ (READ_PREG(0)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fREAD_P3, /* read Predicate 3 */
+ (READ_PREG(3)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fCHECK_PCALIGN,
+ if (((A) & PCALIGN_MASK)) {
+ register_error_exception(thread,PRECISE_CAUSE_PC_NOT_ALIGNED,thread->Regs[REG_BADVA0],thread->Regs[REG_BADVA1],GET_SSR_FIELD(SSR_BVS),GET_SSR_FIELD(SSR_V0),GET_SSR_FIELD(SSR_V1),0);
+ },
+ ()
+)
+
+DEF_MACRO(
+ fWRITE_NPC, /* write next PC */
+ if (!thread->branch_taken) {
+ if (A != thread->next_PC) {
+ thread->next_pkt_guess=thread->last_pkt->taken_ptr;
+ }
+ fCHECK_PCALIGN(A);
+ thread->branched = 1; thread->branch_taken = 1; thread->next_PC = A; \
+ thread->branch_offset = insn->encoding_offset; thread->branch_opcode = insn->opcode;
+ }, /* behavior */
+ (A_COF)
+)
+
+DEF_MACRO(
+ fBRANCH,
+ fWRITE_NPC(LOC); fCOF_CALLBACK(LOC,TYPE),
+ ()
+)
+
+DEF_MACRO(
+ fJUMPR, /* A jumpr has executed */
+ {fBRANCH(TARGET,COF_TYPE_JUMPR);},
+ (A_INDIRECT)
+)
+
+DEF_MACRO(
+ fHINTJR, /* A hintjr instruction has executed */
+ { },
+)
+
+DEF_MACRO(
+ fCALL, /* Do a call */
+ if (!thread->branch_taken) {fBP_RAS_CALL(A); fWRITE_LR(fREAD_NPC()); fBRANCH(A,COF_TYPE_CALL);},
+ (A_COF,A_IMPLICIT_WRITES_LR,A_CALL)
+)
+
+DEF_MACRO(
+ fCALLR, /* Do a call Register */
+ if (!thread->branch_taken) {fBP_RAS_CALL(A); fWRITE_LR(fREAD_NPC()); fBRANCH(A,COF_TYPE_CALLR);},
+ (A_COF,A_IMPLICIT_WRITES_LR,A_CALL)
+)
+
+DEF_MACRO(
+ fWRITE_LOOP_REGS0, /* write ln,sa,ea,lc */
+ {WRITE_RREG(REG_LC0,COUNT);
+ WRITE_RREG(REG_SA0,START);},
+ (A_IMPLICIT_WRITES_LC0,A_IMPLICIT_WRITES_SA0)
+)
+
+DEF_MACRO(
+ fWRITE_LOOP_REGS1, /* write ln,sa,ea,lc */
+ {WRITE_RREG(REG_LC1,COUNT);
+ WRITE_RREG(REG_SA1,START);},
+ (A_IMPLICIT_WRITES_LC1,A_IMPLICIT_WRITES_SA1)
+)
+
+DEF_MACRO(
+ fWRITE_LC0,
+ WRITE_RREG(REG_LC0,VAL),
+ (A_IMPLICIT_WRITES_LC0)
+)
+
+DEF_MACRO(
+ fWRITE_LC1,
+ WRITE_RREG(REG_LC1,VAL),
+ (A_IMPLICIT_WRITES_LC1)
+)
+
+DEF_MACRO(
+ fCARRY_FROM_ADD,
+ carry_from_add64(A,B,C),
+ /* NOTHING */
+)
+
+DEF_MACRO(
+ fSET_OVERFLOW,
+ SET_USR_FIELD(USR_OVF,1),
+ ()
+)
+
+DEF_MACRO(
+ fSET_LPCFG,
+ SET_USR_FIELD(USR_LPCFG,(VAL)),
+ ()
+)
+
+
+DEF_MACRO(
+ fGET_LPCFG,
+ (GET_USR_FIELD(USR_LPCFG)),
+ ()
+)
+
+
+
+DEF_MACRO(
+ fWRITE_P0, /* write Predicate 0 */
+ WRITE_PREG(0,VAL), /* behavior */
+ (A_IMPLICIT_WRITES_P0)
+)
+
+DEF_MACRO(
+ fWRITE_P1, /* write Predicate 0 */
+ WRITE_PREG(1,VAL), /* behavior */
+ (A_IMPLICIT_WRITES_P1)
+)
+
+DEF_MACRO(
+ fWRITE_P2, /* write Predicate 0 */
+ WRITE_PREG(2,VAL), /* behavior */
+ (A_IMPLICIT_WRITES_P2)
+)
+
+DEF_MACRO(
+ fWRITE_P3, /* write Predicate 0 */
+ WRITE_PREG(3,VAL), /* behavior */
+ (A_IMPLICIT_WRITES_P3)
+)
+
+DEF_MACRO(
+ fPART1, /* write Predicate 0 */
+ if (insn->part1) { WORK; return; }, /* behavior */
+ /* optional attributes */
+)
+
+
+/*************************************/
+/* Casting, Sign-Zero extension, etc */
+/*************************************/
+
+DEF_MACRO(
+ fCAST4u, /* macro name */
+ ((size4u_t)(A)), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST4s, /* macro name */
+ ((size4s_t)(A)), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST8u, /* macro name */
+ ((size8u_t)(A)), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST8s, /* macro name */
+ ((size8s_t)(A)), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST2_2s, /* macro name */
+ ((size2s_t)(A)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST2_2u, /* macro name */
+ ((size2u_t)(A)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST4_4s, /* macro name */
+ ((size4s_t)(A)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST4_4u, /* macro name */
+ ((size4u_t)(A)),
+ /* optional attributes */
+)
+
+
+DEF_MACRO(
+ fCAST4_8s, /* macro name */
+ ((size8s_t)((size4s_t)(A))),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST4_8u, /* macro name */
+ ((size8u_t)((size4u_t)(A))),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST8_8s, /* macro name */
+ ((size8s_t)(A)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST8_8u, /* macro name */
+ ((size8u_t)(A)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST2_8s, /* macro name */
+ ((size8s_t)((size2s_t)(A))),
+ /* optional attributes */
+)
+DEF_MACRO(
+ fCAST2_8u, /* macro name */
+ ((size8u_t)((size2u_t)(A))),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fZE8_16, /* zero-extend 8 to 16 */
+ ((size2s_t)((size1u_t)(A))),
+ /* optional attributes */
+)
+DEF_MACRO(
+ fSE8_16, /* sign-extend 8 to 16 */
+ ((size2s_t)((size1s_t)(A))),
+ /* optional attributes */
+)
+
+
+DEF_MACRO(
+ fSE16_32, /* sign-extend 16 to 32 */
+ ((size4s_t)((size2s_t)(A))), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fZE16_32, /* zero-extend 16 to 32 */
+ ((size4u_t)((size2u_t)(A))), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fSE32_64,
+ ( (size8s_t)((size4s_t)(A)) ), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fZE32_64,
+ ( (size8u_t)((size4u_t)(A)) ), /* behavior */
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fSE8_32, /* sign-extend 8 to 32 */
+ ((size4s_t)((size1s_t)(A))),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fZE8_32, /* zero-extend 8 to 32 */
+ ((size4s_t)((size1u_t)(A))),
+ /* optional attributes */
+)
+
+/*************************************/
+/* DSP arithmetic support */
+/************************************/
+DEF_MACRO(
+ fMPY8UU, /* multiply half integer */
+ (int)(fZE8_16(A)*fZE8_16(B)), /* behavior */
+ ()
+)
+DEF_MACRO(
+ fMPY8US, /* multiply half integer */
+ (int)(fZE8_16(A)*fSE8_16(B)), /* behavior */
+ ()
+)
+DEF_MACRO(
+ fMPY8SU, /* multiply half integer */
+ (int)(fSE8_16(A)*fZE8_16(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY8SS, /* multiply half integer */
+ (int)((short)(A)*(short)(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY16SS, /* multiply half integer */
+ fSE32_64(fSE16_32(A)*fSE16_32(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY16UU, /* multiply unsigned half integer */
+ fZE32_64(fZE16_32(A)*fZE16_32(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY16SU, /* multiply half integer */
+ fSE32_64(fSE16_32(A)*fZE16_32(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY16US, /* multiply half integer */
+ fMPY16SU(B,A),
+ ()
+)
+
+DEF_MACRO(
+ fMPY32SS, /* multiply half integer */
+ (fSE32_64(A)*fSE32_64(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY32UU, /* multiply half integer */
+ (fZE32_64(A)*fZE32_64(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY32SU, /* multiply half integer */
+ (fSE32_64(A)*fZE32_64(B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY3216SS, /* multiply mixed precision */
+ (fSE32_64(A)*fSXTN(16,64,B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fMPY3216SU, /* multiply mixed precision */
+ (fSE32_64(A)*fZXTN(16,64,B)), /* behavior */
+ ()
+)
+
+DEF_MACRO(
+ fROUND, /* optional rounding */
+ (A+0x8000),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCLIP, /* optional rounding */
+ { size4s_t maxv = (1<<U)-1;
+ size4s_t minv = -(1<<U);
+ DST = fMIN(maxv,fMAX(SRC,minv));
+ },
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCRND, /* optional rounding */
+ ((((A)&0x3)==0x3)?((A)+1):((A))),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fRNDN, /* Rounding to a boundary */
+ ((((N)==0)?(A):(((fSE32_64(A))+(1<<((N)-1)))))),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCRNDN, /* Rounding to a boundary */
+ (conv_round(A,N)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fADD128, /* Rounding to a boundary */
+ (add128(A, B)),
+ /* optional attributes */
+)
+DEF_MACRO(
+ fSUB128, /* Rounding to a boundary */
+ (sub128(A, B)),
+ /* optional attributes */
+)
+DEF_MACRO(
+ fSHIFTR128, /* Rounding to a boundary */
+ (shiftr128(A, B)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fSHIFTL128, /* Rounding to a boundary */
+ (shiftl128(A, B)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fAND128, /* Rounding to a boundary */
+ (and128(A, B)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fCAST8S_16S, /* Rounding to a boundary */
+ (cast8s_to_16s(A)),
+ /* optional attributes */
+)
+DEF_MACRO(
+ fCAST16S_8S, /* Rounding to a boundary */
+ (cast16s_to_8s(A)),
+ /* optional attributes */
+)
+
+DEF_MACRO(
+ fEA_RI, /* Calculate EA with Register + Immediate Offset */
+ do { EA=REG+IMM; fDOCHKPAGECROSS(REG,EA); } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fEA_RRs, /* Calculate EA with Register + Registers scaled Offset */
+ do { EA=REG+(REG2<<SCALE); fDOCHKPAGECROSS(REG,EA); } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fEA_IRs, /* Calculate EA with Immediate + Registers scaled Offset */
+ do { EA=IMM+(REG<<SCALE); fDOCHKPAGECROSS(IMM,EA); } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fEA_IMM, /* Calculate EA with Immediate */
+ EA=IMM,
+ ()
+)
+
+DEF_MACRO(
+ fEA_REG, /* Calculate EA with REGISTER */
+ EA=REG,
+ ()
+)
+
+DEF_MACRO(
+ fEA_BREVR, /* Calculate EA with bit reversed bottom of REGISTER */
+ EA=fbrev(REG),
+ ()
+)
+
+DEF_MACRO(
+ fEA_GPI, /* Calculate EA with Global Poitner + Immediate */
+ do { EA=fREAD_GP()+IMM; fGP_DOCHKPAGECROSS(fREAD_GP(),EA); } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fPM_I, /* Post Modify Register by Immediate*/
+ do { REG = REG + IMM; } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fPM_M, /* Post Modify Register by M register */
+ do { REG = REG + MVAL; } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fPM_CIRI, /* Post Modify Register using Circular arithmetic by Immediate */
+ do { fcirc_add(REG,siV,MuV); } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fPM_CIRR, /* Post Modify Register using Circular arithmetic by register */
+ do { fcirc_add(REG,VAL,MuV); } while (0),
+ ()
+)
+
+
+
+DEF_MACRO(
+ fSCALE, /* scale by N */
+ (((size8s_t)(A))<<N),
+ /* optional attributes */
+)
+DEF_MACRO(
+ fVSATW, /* saturating to 32-bits*/
+ fVSATN(32,((long long)A)),
+ ()
+)
+
+DEF_MACRO(
+ fSATW, /* saturating to 32-bits*/
+ fSATN(32,((long long)A)),
+ ()
+)
+
+DEF_MACRO(
+ fVSAT, /* saturating to 32-bits*/
+ fVSATN(32,(A)),
+ ()
+)
+
+DEF_MACRO(
+ fSAT, /* saturating to 32-bits*/
+ fSATN(32,(A)),
+ ()
+)
+
+DEF_MACRO(
+ fSAT_ORIG_SHL, /* Saturating to 32-bits, with original value, for shift left */
+ ((((size4s_t)((fSAT(A)) ^ ((size4s_t)(ORIG_REG)))) < 0) ?
+ fSATVALN(32,((size4s_t)(ORIG_REG))) :
+ ((((ORIG_REG) > 0) && ((A) == 0)) ?
+ fSATVALN(32,(ORIG_REG)) :
+ fSAT(A))),
+ ()
+)
+
+DEF_MACRO(
+ fPASS,
+ A,
+)
+
+DEF_MACRO(
+ fRND, /* saturating to 32-bits*/
+ (((A)+1)>>1),
+)
+
+
+DEF_MACRO(
+ fBIDIR_SHIFTL,
+ (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) >> ((-(SHAMT))-1)) >>1) : (fCAST##REGSTYPE(SRC) << (SHAMT))),
+ ()
+)
+
+DEF_MACRO(
+ fBIDIR_ASHIFTL,
+ fBIDIR_SHIFTL(SRC,SHAMT,REGSTYPE##s),
+ ()
+)
+
+DEF_MACRO(
+ fBIDIR_LSHIFTL,
+ fBIDIR_SHIFTL(SRC,SHAMT,REGSTYPE##u),
+ ()
+)
+
+DEF_MACRO(
+ fBIDIR_ASHIFTL_SAT,
+ (((SHAMT) < 0) ? ((fCAST##REGSTYPE##s(SRC) >> ((-(SHAMT))-1)) >>1) : fSAT_ORIG_SHL(fCAST##REGSTYPE##s(SRC) << (SHAMT),(SRC))),
+ ()
+)
+
+
+DEF_MACRO(
+ fBIDIR_SHIFTR,
+ (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) << ((-(SHAMT))-1)) << 1) : (fCAST##REGSTYPE(SRC) >> (SHAMT))),
+ ()
+)
+
+DEF_MACRO(
+ fBIDIR_ASHIFTR,
+ fBIDIR_SHIFTR(SRC,SHAMT,REGSTYPE##s),
+ ()
+)
+
+DEF_MACRO(
+ fBIDIR_LSHIFTR,
+ fBIDIR_SHIFTR(SRC,SHAMT,REGSTYPE##u),
+ ()
+)
+
+DEF_MACRO(
+ fBIDIR_ASHIFTR_SAT,
+ (((SHAMT) < 0) ? fSAT_ORIG_SHL((fCAST##REGSTYPE##s(SRC) << ((-(SHAMT))-1)) << 1,(SRC)) : (fCAST##REGSTYPE##s(SRC) >> (SHAMT))),
+ ()
+)
+
+DEF_MACRO(
+ fASHIFTR,
+ (fCAST##REGSTYPE##s(SRC) >> (SHAMT)),
+ /* */
+)
+
+DEF_MACRO(
+ fLSHIFTR,
+ (((SHAMT) >= 64)?0:(fCAST##REGSTYPE##u(SRC) >> (SHAMT))),
+ /* */
+)
+
+DEF_MACRO(
+ fROTL,
+ (((SHAMT)==0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) << (SHAMT)) | \
+ ((fCAST##REGSTYPE##u(SRC) >> ((sizeof(SRC)*8)-(SHAMT)))))),
+ /* */
+)
+
+DEF_MACRO(
+ fROTR,
+ (((SHAMT)==0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) >> (SHAMT)) | \
+ ((fCAST##REGSTYPE##u(SRC) << ((sizeof(SRC)*8)-(SHAMT)))))),
+ /* */
+)
+
+DEF_MACRO(
+ fASHIFTL,
+ (((SHAMT) >= 64)?0:(fCAST##REGSTYPE##s(SRC) << (SHAMT))),
+ /* */
+)
+
+/*************************************/
+/* Floating-Point Support */
+/************************************/
+
+DEF_MACRO(
+ fFLOAT, /* name */
+ ({ union { float f; size4u_t i; } _fipun; _fipun.i = (A); _fipun.f; }), /* behavior */
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fUNFLOAT, /* multiply half integer */
+ ({ union { float f; size4u_t i; } _fipun; _fipun.f = (A); isnan(_fipun.f) ? 0xFFFFFFFFU : _fipun.i; }), /* behavior */
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fSFNANVAL,
+ 0xffffffff,
+ ()
+)
+
+DEF_MACRO(
+ fSFINFVAL,
+ (((A) & 0x80000000) | 0x7f800000),
+ ()
+)
+
+DEF_MACRO(
+ fSFONEVAL,
+ (((A) & 0x80000000) | fUNFLOAT(1.0)),
+ ()
+)
+
+DEF_MACRO(
+ fCHECKSFNAN,
+ do {
+ if (isnan(fFLOAT(A))) {
+ if ((fGETBIT(22,A)) == 0) fRAISEFLAGS(FE_INVALID);
+ DST = fSFNANVAL();
+ }
+ } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fCHECKSFNAN3,
+ do {
+ fCHECKSFNAN(DST,A);
+ fCHECKSFNAN(DST,B);
+ fCHECKSFNAN(DST,C);
+ } while (0),
+ ()
+)
+
+DEF_MACRO(
+ fSF_BIAS,
+ 127,
+ ()
+)
+
+DEF_MACRO(
+ fSF_MANTBITS,
+ 23,
+ ()
+)
+
+DEF_MACRO(
+ fSF_MUL_POW2,
+ (fUNFLOAT(fFLOAT(A) * fFLOAT((fSF_BIAS() + (B)) << fSF_MANTBITS()))),
+ ()
+)
+
+DEF_MACRO(
+ fSF_GETEXP,
+ (((A) >> fSF_MANTBITS()) & 0xff),
+ ()
+)
+
+DEF_MACRO(
+ fSF_MAXEXP,
+ (254),
+ ()
+)
+
+DEF_MACRO(
+ fSF_RECIP_COMMON,
+ arch_sf_recip_common(&N,&D,&O,&A),
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fSF_INVSQRT_COMMON,
+ arch_sf_invsqrt_common(&N,&O,&A),
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fFMAFX,
+ internal_fmafx(A,B,C,fSXTN(8,64,ADJ)),
+ ()
+)
+
+DEF_MACRO(
+ fFMAF,
+ internal_fmafx(A,B,C,0),
+ ()
+)
+
+DEF_MACRO(
+ fSFMPY,
+ internal_mpyf(A,B),
+ ()
+)
+
+DEF_MACRO(
+ fMAKESF,
+ ((((SIGN) & 1) << 31) | (((EXP) & 0xff) << fSF_MANTBITS()) |
+ ((MANT) & ((1<<fSF_MANTBITS())-1))),
+ ()
+)
+
+
+DEF_MACRO(
+ fDOUBLE, /* multiply half integer */
+ ({ union { double f; size8u_t i; } _fipun; _fipun.i = (A); _fipun.f; }), /* behavior */
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fUNDOUBLE, /* multiply half integer */
+ ({ union { double f; size8u_t i; } _fipun; _fipun.f = (A); isnan(_fipun.f) ? 0xFFFFFFFFFFFFFFFFULL : _fipun.i; }), /* behavior */
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fDFNANVAL,
+ 0xffffffffffffffffULL,
+ ()
+)
+
+DEF_MACRO(
+ fDF_ISNORMAL,
+ (fpclassify(fDOUBLE(X)) == FP_NORMAL),
+ ()
+)
+
+DEF_MACRO(
+ fDF_ISDENORM,
+ (fpclassify(fDOUBLE(X)) == FP_SUBNORMAL),
+ ()
+)
+
+DEF_MACRO(
+ fDF_ISBIG,
+ (fDF_GETEXP(X) >= 512),
+ ()
+)
+
+DEF_MACRO(
+ fDF_MANTBITS,
+ 52,
+ ()
+)
+
+DEF_MACRO(
+ fDF_GETEXP,
+ (((A) >> fDF_MANTBITS()) & 0x7ff),
+ ()
+)
+
+DEF_MACRO(
+ fFMA,
+ internal_fma(A,B,C),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fDF_MPY_HH,
+ internal_mpyhh(A,B,ACC),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fFPOP_START,
+ arch_fpop_start(thread),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fFPOP_END,
+ arch_fpop_end(thread),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fFPSETROUND_NEAREST,
+ fesetround(FE_TONEAREST),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fFPSETROUND_CHOP,
+ fesetround(FE_TOWARDZERO),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fFPCANCELFLAGS,
+ feclearexcept(FE_ALL_EXCEPT),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fISINFPROD,
+ ((isinf(A) && isinf(B)) ||
+ (isinf(A) && isfinite(B) && ((B) != 0.0)) ||
+ (isinf(B) && isfinite(A) && ((A) != 0.0))),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fISZEROPROD,
+ ((((A) == 0.0) && isfinite(B)) || (((B) == 0.0) && isfinite(A))),
+ /* nothing */
+)
+
+DEF_MACRO(
+ fRAISEFLAGS,
+ arch_raise_fpflag(A),
+ /* NOTHING */
+)
+
+DEF_MACRO(
+ fDF_MAX,
+ (((A)==(B))
+ ? fDOUBLE(fUNDOUBLE(A) & fUNDOUBLE(B))
+ : fmax(A,B)),
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fDF_MIN,
+ (((A)==(B))
+ ? fDOUBLE(fUNDOUBLE(A) | fUNDOUBLE(B))
+ : fmin(A,B)),
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fSF_MAX,
+ (((A)==(B))
+ ? fFLOAT(fUNFLOAT(A) & fUNFLOAT(B))
+ : fmaxf(A,B)),
+ (A_FPOP)
+)
+
+DEF_MACRO(
+ fSF_MIN,
+ (((A)==(B))
+ ? fFLOAT(fUNFLOAT(A) | fUNFLOAT(B))
+ : fminf(A,B)),
+ (A_FPOP)
+)
+
+/*************************************/
+/* Load/Store support */
+/*************************************/
+
+DEF_MACRO(fLOAD,
+ { DST = (size##SIZE##SIGN##_t)MEM_LOAD##SIZE(thread,EA,insn); },
+ (A_LOAD,A_MEMLIKE)
+)
+
+DEF_MACRO(fMEMOP,
+ { memop##SIZE##_##FNTYPE(thread,EA,VALUE); },
+ (A_LOAD,A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fGET_FRAMEKEY,
+ READ_RREG(REG_FRAMEKEY),
+ ()
+)
+
+DEF_MACRO(fFRAME_SCRAMBLE,
+ ((VAL) ^ (fCAST8u(fGET_FRAMEKEY()) << 32)),
+ /* ATTRIBS */
+)
+
+DEF_MACRO(fFRAME_UNSCRAMBLE,
+ fFRAME_SCRAMBLE(VAL),
+ /* ATTRIBS */
+)
+
+DEF_MACRO(fFRAMECHECK,
+ sys_check_framelimit(thread,ADDR,EA),
+ ()
+)
+
+DEF_MACRO(fLOAD_LOCKED,
+ { DST = (size##SIZE##SIGN##_t)mem_load_locked(thread,EA,SIZE,insn); },
+ (A_LOAD,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTORE,
+ { MEM_STORE##SIZE(thread,EA,SRC,insn); },
+ (A_STORE,A_MEMLIKE)
+)
+
+
+DEF_MACRO(fSTORE_LOCKED,
+ { PRED = (mem_store_conditional(thread,EA,SRC,SIZE,insn) ? 0xff : 0); },
+ (A_STORE,A_MEMLIKE)
+)
+
+/*************************************/
+/* Functions to help with bytes */
+/*************************************/
+
+DEF_MACRO(fGETBYTE,
+ ((size1s_t)((SRC>>((N)*8))&0xff)),
+ /* nothing */
+)
+
+DEF_MACRO(fGETUBYTE,
+ ((size1u_t)((SRC>>((N)*8))&0xff)),
+ /* nothing */
+)
+
+DEF_MACRO(fSETBYTE,
+ {
+ DST = (DST & ~(0x0ffLL<<((N)*8))) | (((size8u_t)((VAL) & 0x0ffLL)) << ((N)*8));
+ },
+ /* nothing */
+)
+
+DEF_MACRO(fGETHALF,
+ ((size2s_t)((SRC>>((N)*16))&0xffff)),
+ /* nothing */
+)
+
+DEF_MACRO(fGETUHALF,
+ ((size2u_t)((SRC>>((N)*16))&0xffff)),
+ /* nothing */
+)
+
+DEF_MACRO(fSETHALF,
+ {
+ DST = (DST & ~(0x0ffffLL<<((N)*16))) | (((size8u_t)((VAL) & 0x0ffff)) << ((N)*16));
+ },
+ /* nothing */
+)
+
+
+
+DEF_MACRO(fGETWORD,
+ ((size8s_t)((size4s_t)((SRC>>((N)*32))&0x0ffffffffLL))),
+ /* nothing */
+)
+
+DEF_MACRO(fGETUWORD,
+ ((size8u_t)((size4u_t)((SRC>>((N)*32))&0x0ffffffffLL))),
+ /* nothing */
+)
+
+DEF_MACRO(fSETWORD,
+ {
+ DST = (DST & ~(0x0ffffffffLL<<((N)*32))) | (((VAL) & 0x0ffffffffLL) << ((N)*32));
+ },
+ /* nothing */
+)
+
+DEF_MACRO(fSETBIT,
+ {
+ DST = (DST & ~(1ULL<<(N))) | (((size8u_t)(VAL))<<(N));
+ },
+ /* nothing */
+)
+
+DEF_MACRO(fGETBIT,
+ (((SRC)>>N)&1),
+ /* nothing */
+)
+
+
+DEF_MACRO(fSETBITS,
+ do {
+ int j;
+ for (j=LO;j<=HI;j++) {
+ fSETBIT(j,DST,VAL);
+ }
+ } while (0),
+ /* nothing */
+)
+
+/*************************************/
+/* Used for parity, etc........ */
+/*************************************/
+DEF_MACRO(fCOUNTONES_2,
+ count_ones_2(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fCOUNTONES_4,
+ count_ones_4(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fCOUNTONES_8,
+ count_ones_8(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fBREV_8,
+ reverse_bits_8(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fBREV_4,
+ reverse_bits_4(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fCL1_8,
+ count_leading_ones_8(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fCL1_4,
+ count_leading_ones_4(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fCL1_2,
+ count_leading_ones_2(VAL),
+ /* nothing */
+)
+
+DEF_MACRO(fINTERLEAVE,
+ interleave(ODD,EVEN),
+ /* nothing */
+)
+
+DEF_MACRO(fDEINTERLEAVE,
+ deinterleave(MIXED),
+ /* nothing */
+)
+
+DEF_MACRO(fHIDE,
+ A,
+ ()
+)
+
+DEF_MACRO(fCONSTLL,
+ A##LL,
+)
+
+/* Do the things in the parens, but don't print the parens. */
+DEF_MACRO(fECHO,
+ (A),
+ /* nothing */
+)
+
+
+/********************************************/
+/* OS interface and stop/wait */
+/********************************************/
+
+DEF_MACRO(fPAUSE,
+ {sys_pause(thread, insn->slot, IMM);},
+ ()
+)
+
+DEF_MACRO(fTRAP,
+ warn("Trap NPC=%x ",fREAD_NPC());
+ warn("Trap exception, PCYCLE=%lld TYPE=%d NPC=%x IMM=0x%x",thread->processor_ptr->pstats[pcycles],TRAPTYPE,fREAD_NPC(),IMM);
+ register_trap_exception(thread,fREAD_NPC(),TRAPTYPE,IMM);,
+ ()
+)
+
+DEF_MACRO(fALIGN_REG_FIELD_VALUE,
+ ((VAL)<<reg_field_info[FIELD].offset),
+ /* */
+)
+
+DEF_MACRO(fGET_REG_FIELD_MASK,
+ (((1<<reg_field_info[FIELD].width)-1)<<reg_field_info[FIELD].offset),
+ /* */
+)
+
+DEF_MACRO(fREAD_REG_FIELD,
+ fEXTRACTU_BITS(thread->Regs[REG_##REG],
+ reg_field_info[FIELD].width,
+ reg_field_info[FIELD].offset),
+ /* ATTRIBS */
+)
+
+DEF_MACRO(fGET_FIELD,
+ fEXTRACTU_BITS(VAL,
+ reg_field_info[FIELD].width,
+ reg_field_info[FIELD].offset),
+ /* ATTRIBS */
+)
+
+DEF_MACRO(fSET_FIELD,
+ fINSERT_BITS(VAL,
+ reg_field_info[FIELD].width,
+ reg_field_info[FIELD].offset,
+ (NEWVAL)),
+ /* ATTRIBS */
+)
+
+/********************************************/
+/* Cache Management */
+/********************************************/
+
+DEF_MACRO(fBARRIER,
+ {
+ sys_barrier(thread, insn->slot);
+ },
+ ()
+)
+
+DEF_MACRO(fSYNCH,
+ {
+ sys_sync(thread, insn->slot);
+ },
+ ()
+)
+
+DEF_MACRO(fISYNC,
+ {
+ sys_isync(thread, insn->slot);
+ },
+ ()
+)
+
+
+DEF_MACRO(fDCFETCH,
+ sys_dcfetch(thread, (REG), insn->slot),
+ (A_MEMLIKE)
+)
+
+DEF_MACRO(fICINVA,
+ {
+ arch_internal_flush(thread->processor_ptr, 0, 0xffffffff);
+ sys_icinva(thread, (REG),insn->slot);
+ },
+ (A_ICINVA)
+)
+
+DEF_MACRO(fL2FETCH,
+ sys_l2fetch(thread, ADDR,HEIGHT,WIDTH,STRIDE,FLAGS, insn->slot),
+ (A_MEMLIKE,A_L2FETCH)
+)
+
+DEF_MACRO(fDCCLEANA,
+ sys_dccleana(thread, (REG)),
+ (A_MEMLIKE)
+)
+
+DEF_MACRO(fDCCLEANINVA,
+ sys_dccleaninva(thread, (REG), insn->slot),
+ (A_MEMLIKE,A_DCCLEANINVA)
+)
+
+DEF_MACRO(fDCZEROA,
+ sys_dczeroa(thread, (REG)),
+ (A_MEMLIKE)
+)
+
+DEF_MACRO(fCHECKFORPRIV,
+ {sys_check_privs(thread); if (EXCEPTION_DETECTED) return; },
+ ()
+)
+
+DEF_MACRO(fCHECKFORGUEST,
+ {sys_check_guest(thread); if (EXCEPTION_DETECTED) return; },
+ ()
+)
+
+DEF_MACRO(fBRANCH_SPECULATE_STALL,
+ {
+ sys_speculate_branch_stall(thread, insn->slot, JUMP_COND(JUMP_PRED_SET),
+ SPEC_DIR,
+ DOTNEWVAL,
+ HINTBITNUM,
+ STRBITNUM,
+ 0,
+ thread->last_pkt->pkt_has_dual_jump,
+ insn->is_2nd_jump,
+ (thread->fetch_access.vaddr + insn->encoding_offset*4));
+ },
+ ()
+)
+
+DEF_MACRO(IV1DEAD,
+ ,
+ ()
+)
diff --git a/target/hexagon/imported/mmvec/encode_ext.def b/target/hexagon/imported/mmvec/encode_ext.def
new file mode 100644
index 000000000..6fbbe2c42
--- /dev/null
+++ b/target/hexagon/imported/mmvec/encode_ext.def
@@ -0,0 +1,794 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define CONCAT(A,B) A##B
+#define EXTEXTNAME(X) CONCAT(EXT_,X)
+#define DEF_ENC(TAG,STR) DEF_EXT_ENC(TAG,EXTEXTNAME(EXTNAME),STR)
+
+
+#ifndef NO_MMVEC
+DEF_ENC(V6_extractw, ICLASS_LD" 001 0 000sssss PP0uuuuu --1ddddd") /* coproc insn, returns Rd */
+#endif
+
+
+#ifndef NO_MMVEC
+
+
+
+DEF_CLASS32(ICLASS_NCJ" 1--- -------- PP------ --------",COPROC_VMEM)
+DEF_CLASS32(ICLASS_NCJ" 1000 0-0ttttt PPi--iii ---ddddd",BaseOffset_VMEM_Loads)
+DEF_CLASS32(ICLASS_NCJ" 1000 1-0ttttt PPivviii ---ddddd",BaseOffset_if_Pv_VMEM_Loads)
+DEF_CLASS32(ICLASS_NCJ" 1000 0-1ttttt PPi--iii --------",BaseOffset_VMEM_Stores1)
+DEF_CLASS32(ICLASS_NCJ" 1000 1-0ttttt PPi--iii 00------",BaseOffset_VMEM_Stores2)
+DEF_CLASS32(ICLASS_NCJ" 1000 1-1ttttt PPivviii --------",BaseOffset_if_Pv_VMEM_Stores)
+
+DEF_CLASS32(ICLASS_NCJ" 1001 0-0xxxxx PP---iii ---ddddd",PostImm_VMEM_Loads)
+DEF_CLASS32(ICLASS_NCJ" 1001 1-0xxxxx PP-vviii ---ddddd",PostImm_if_Pv_VMEM_Loads)
+DEF_CLASS32(ICLASS_NCJ" 1001 0-1xxxxx PP---iii --------",PostImm_VMEM_Stores1)
+DEF_CLASS32(ICLASS_NCJ" 1001 1-0xxxxx PP---iii 00------",PostImm_VMEM_Stores2)
+DEF_CLASS32(ICLASS_NCJ" 1001 1-1xxxxx PP-vviii --------",PostImm_if_Pv_VMEM_Stores)
+
+DEF_CLASS32(ICLASS_NCJ" 1011 0-0xxxxx PPu----- ---ddddd",PostM_VMEM_Loads)
+DEF_CLASS32(ICLASS_NCJ" 1011 1-0xxxxx PPuvv--- ---ddddd",PostM_if_Pv_VMEM_Loads)
+DEF_CLASS32(ICLASS_NCJ" 1011 0-1xxxxx PPu----- --------",PostM_VMEM_Stores1)
+DEF_CLASS32(ICLASS_NCJ" 1011 1-0xxxxx PPu----- 00------",PostM_VMEM_Stores2)
+DEF_CLASS32(ICLASS_NCJ" 1011 1-1xxxxx PPuvv--- --------",PostM_if_Pv_VMEM_Stores)
+
+DEF_CLASS32(ICLASS_NCJ" 110- 0------- PP------ --------",Z_Load)
+DEF_CLASS32(ICLASS_NCJ" 110- 1------- PP------ --------",Z_Load_if_Pv)
+
+DEF_CLASS32(ICLASS_NCJ" 1111 000ttttt PPu--0-- ---vvvvv",Gather)
+DEF_CLASS32(ICLASS_NCJ" 1111 000ttttt PPu--1-- -ssvvvvv",Gather_if_Qs)
+DEF_CLASS32(ICLASS_NCJ" 1111 001ttttt PPuvvvvv ---wwwww",Scatter)
+DEF_CLASS32(ICLASS_NCJ" 1111 001ttttt PPuvvvvv -----sss",Scatter_New)
+DEF_CLASS32(ICLASS_NCJ" 1111 1--ttttt PPuvvvvv -sswwwww",Scatter_if_Qs)
+
+
+DEF_FIELD32(ICLASS_NCJ" 1--- -!------ PP------ --------",NT,"NonTemporal")
+
+
+
+DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 000 --- ----- PP i --iii ----- ---","[#0] vmem(Rt+#s4)[:nt]")
+
+#define LDST_ENC(TAG,MAJ3,MID3,RREG,TINY6,MIN3,VREG) DEF_ENC(TAG, ICLASS_NCJ "1" #MAJ3 #MID3 #RREG "PP" #TINY6 #MIN3 #VREG)
+
+#define LDST_BO(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_ai, 000,MID3,ttttt,i PRED iii,MIN3,VREG)
+#define LDST_PI(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_pi, 001,MID3,xxxxx,- PRED iii,MIN3,VREG)
+#define LDST_PM(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_ppu,011,MID3,xxxxx,u PRED ---,MIN3,VREG)
+
+#define LDST_BASICLD(OP,TAGPRE) \
+ OP(TAGPRE, 000,00,000,ddddd) \
+ OP(TAGPRE##_nt, 010,00,000,ddddd) \
+ OP(TAGPRE##_cur, 000,00,001,ddddd) \
+ OP(TAGPRE##_nt_cur, 010,00,001,ddddd) \
+ OP(TAGPRE##_tmp, 000,00,010,ddddd) \
+ OP(TAGPRE##_nt_tmp, 010,00,010,ddddd)
+
+#define LDST_BASICST(OP,TAGPRE) \
+ OP(TAGPRE, 001,--,000,sssss) \
+ OP(TAGPRE##_nt, 011,--,000,sssss) \
+ OP(TAGPRE##_new, 001,--,001,-0sss) \
+ OP(TAGPRE##_srls, 001,--,001,-1---) \
+ OP(TAGPRE##_nt_new, 011,--,001,--sss) \
+
+
+#define LDST_QPREDST(OP,TAGPRE) \
+ OP(TAGPRE##_qpred, 100,vv,000,sssss) \
+ OP(TAGPRE##_nt_qpred, 110,vv,000,sssss) \
+ OP(TAGPRE##_nqpred, 100,vv,001,sssss) \
+ OP(TAGPRE##_nt_nqpred,110,vv,001,sssss) \
+
+#define LDST_CONDLD(OP,TAGPRE) \
+ OP(TAGPRE##_pred, 100,vv,010,ddddd) \
+ OP(TAGPRE##_nt_pred, 110,vv,010,ddddd) \
+ OP(TAGPRE##_npred, 100,vv,011,ddddd) \
+ OP(TAGPRE##_nt_npred, 110,vv,011,ddddd) \
+ OP(TAGPRE##_cur_pred, 100,vv,100,ddddd) \
+ OP(TAGPRE##_nt_cur_pred, 110,vv,100,ddddd) \
+ OP(TAGPRE##_cur_npred, 100,vv,101,ddddd) \
+ OP(TAGPRE##_nt_cur_npred, 110,vv,101,ddddd) \
+ OP(TAGPRE##_tmp_pred, 100,vv,110,ddddd) \
+ OP(TAGPRE##_nt_tmp_pred, 110,vv,110,ddddd) \
+ OP(TAGPRE##_tmp_npred, 100,vv,111,ddddd) \
+ OP(TAGPRE##_nt_tmp_npred, 110,vv,111,ddddd) \
+
+#define LDST_PREDST(OP,TAGPRE,NT,MIN2) \
+ OP(TAGPRE##_pred, 1 NT 1,vv,MIN2 0,sssss) \
+ OP(TAGPRE##_npred, 1 NT 1,vv,MIN2 1,sssss)
+
+#define LDST_PREDSTNEW(OP,TAGPRE,NT,MIN2) \
+ OP(TAGPRE##_pred, 1 NT 1,vv,MIN2 0,NT 0 sss) \
+ OP(TAGPRE##_npred, 1 NT 1,vv,MIN2 1,NT 1 sss)
+
+// 0.0,vv,0--,sssss: pred st
+#define LDST_BASICPREDST(OP,TAGPRE) \
+ LDST_PREDST(OP,TAGPRE, 0,00) \
+ LDST_PREDST(OP,TAGPRE##_nt, 1,00) \
+ LDST_PREDSTNEW(OP,TAGPRE##_new, 0,01) \
+ LDST_PREDSTNEW(OP,TAGPRE##_nt_new, 1,01)
+
+
+
+LDST_BASICLD(LDST_BO,V6_vL32b)
+LDST_CONDLD(LDST_BO,V6_vL32b)
+LDST_BASICLD(LDST_PI,V6_vL32b)
+LDST_CONDLD(LDST_PI,V6_vL32b)
+LDST_BASICLD(LDST_PM,V6_vL32b)
+LDST_CONDLD(LDST_PM,V6_vL32b)
+
+// Loads
+
+LDST_BO(V6_vL32Ub,000,00,111,ddddd)
+//Stores
+LDST_BASICST(LDST_BO,V6_vS32b)
+
+
+LDST_BO(V6_vS32Ub,001,--,111,sssss)
+
+
+
+
+// Byte Enabled Stores
+LDST_QPREDST(LDST_BO,V6_vS32b)
+
+// Scalar Predicated Stores
+LDST_BASICPREDST(LDST_BO,V6_vS32b)
+
+
+LDST_PREDST(LDST_BO,V6_vS32Ub,0,11)
+
+
+
+
+DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 001 --- ----- PP - ----- ddddd ---","[#1] vmem(Rx++#s3)[:nt]")
+
+// Loads
+LDST_PI(V6_vL32Ub,000,00,111,ddddd)
+
+//Stores
+LDST_BASICST(LDST_PI,V6_vS32b)
+
+
+
+LDST_PI(V6_vS32Ub,001,--,111,sssss)
+
+
+// Byte Enabled Stores
+LDST_QPREDST(LDST_PI,V6_vS32b)
+
+
+// Scalar Predicated Stores
+LDST_BASICPREDST(LDST_PI,V6_vS32b)
+
+
+LDST_PREDST(LDST_PI,V6_vS32Ub,0,11)
+
+
+
+DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 011 --- ----- PP - ----- ----- ---","[#3] vmem(Rx++#M)[:nt]")
+
+// Loads
+LDST_PM(V6_vL32Ub,000,00,111,ddddd)
+
+//Stores
+LDST_BASICST(LDST_PM,V6_vS32b)
+
+
+
+LDST_PM(V6_vS32Ub,001,--,111,sssss)
+
+// Byte Enabled Stores
+LDST_QPREDST(LDST_PM,V6_vS32b)
+
+// Scalar Predicated Stores
+LDST_BASICPREDST(LDST_PM,V6_vS32b)
+
+
+LDST_PREDST(LDST_PM,V6_vS32Ub,0,11)
+
+
+
+DEF_ENC(V6_vaddcarrysat, ICLASS_CJ" 1 101 100 vvvvv PP 1 uuuuu 0ss ddddd") //
+DEF_ENC(V6_vaddcarryo, ICLASS_CJ" 1 101 101 vvvvv PP 1 uuuuu 0ee ddddd") //
+DEF_ENC(V6_vsubcarryo, ICLASS_CJ" 1 101 101 vvvvv PP 1 uuuuu 1ee ddddd") //
+DEF_ENC(V6_vsatdw, ICLASS_CJ" 1 101 100 vvvvv PP 1 uuuuu 111 ddddd") //
+
+DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 111 --- ----- PP - ----- ----- ---","[#6] vgather,vscatter")
+DEF_ENC(V6_vgathermw, ICLASS_NCJ" 1 111 000 ttttt PP u --000 --- vvvvv") // vtmp.w=vmem(Rt32,Mu2,Vv32.w).w
+DEF_ENC(V6_vgathermh, ICLASS_NCJ" 1 111 000 ttttt PP u --001 --- vvvvv") // vtmp.h=vmem(Rt32,Mu2,Vv32.h).h
+DEF_ENC(V6_vgathermhw, ICLASS_NCJ" 1 111 000 ttttt PP u --010 --- vvvvv") // vtmp.h=vmem(Rt32,Mu2,Vvv32.w).h
+
+
+DEF_ENC(V6_vgathermwq, ICLASS_NCJ" 1 111 000 ttttt PP u --100 -ss vvvvv") // if (Qs4) vtmp.w=vmem(Rt32,Mu2,Vv32.w).w
+DEF_ENC(V6_vgathermhq, ICLASS_NCJ" 1 111 000 ttttt PP u --101 -ss vvvvv") // if (Qs4) vtmp.h=vmem(Rt32,Mu2,Vv32.h).h
+DEF_ENC(V6_vgathermhwq, ICLASS_NCJ" 1 111 000 ttttt PP u --110 -ss vvvvv") // if (Qs4) vtmp.h=vmem(Rt32,Mu2,Vvv32.w).h
+
+
+
+DEF_ENC(V6_vscattermw, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 000 wwwww") // vmem(Rt32,Mu2,Vv32.w)=Vw32.w
+DEF_ENC(V6_vscattermh, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 001 wwwww") // vmem(Rt32,Mu2,Vv32.h)=Vw32.h
+DEF_ENC(V6_vscattermhw, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 010 wwwww") // vmem(Rt32,Mu2,Vv32.h)=Vw32.h
+
+DEF_ENC(V6_vscattermw_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 100 wwwww") // vmem(Rt32,Mu2,Vv32.w) += Vw32.w
+DEF_ENC(V6_vscattermh_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 101 wwwww") // vmem(Rt32,Mu2,Vv32.h) += Vw32.h
+DEF_ENC(V6_vscattermhw_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 110 wwwww") // vmem(Rt32,Mu2,Vv32.h) += Vw32.h
+
+
+DEF_ENC(V6_vscattermwq, ICLASS_NCJ" 1 111 100 ttttt PP u vvvvv 0ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.w)=Vw32.w
+DEF_ENC(V6_vscattermhq, ICLASS_NCJ" 1 111 100 ttttt PP u vvvvv 1ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.h)=Vw32.h
+DEF_ENC(V6_vscattermhwq, ICLASS_NCJ" 1 111 101 ttttt PP u vvvvv 0ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.h)=Vw32.h
+
+
+
+
+
+DEF_CLASS32(ICLASS_CJ" 1--- -------- PP------ --------",COPROC_VX)
+
+
+
+/***************************************************************
+*
+* Group #0, Uses Q6 Rt8: new in v61
+*
+****************************************************************/
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 000 --- ----- PP - ----- ----- ---","[#1] Vd32=(Vu32, Vv32, Rt8)")
+DEF_ENC(V6_vasrhbsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vasruwuhrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vasrwuhrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vlutvvb_nm, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vlutvwh_nm, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vasruhubrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vasruwuhsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 100 ddddd") //
+DEF_ENC(V6_vasruhubsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 101 ddddd") //
+
+/***************************************************************
+*
+* Group #1, Uses Q6 Rt32
+*
+****************************************************************/
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] Vd32=(Vu32, Rt32)")
+DEF_ENC(V6_vtmpyb, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vtmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vdmpyhb, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vrmpyub, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vrmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vdsaduh, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vdmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vdmpybus_dv, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vdmpyhsusat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vdmpyhsuisat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vdmpyhsat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vdmpyhisat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vdmpyhb_dv, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vmpybus, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vmpabus, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vmpahb, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vmpyh, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vmpyhss, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vmpyhsrs, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vmpyuh, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vrmpybusi, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 10i ddddd") //
+DEF_ENC(V6_vrsadubi, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 11i ddddd") //
+
+DEF_ENC(V6_vmpyihb, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vror, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vmpyuhe, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vmpabuu, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vlut4, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 100 ddddd") //
+
+
+DEF_ENC(V6_vasrw, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vasrh, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vaslw, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vaslh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vlsrw, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vlsrh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vlsrb, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 011 ddddd") //
+
+DEF_ENC(V6_vmpauhb, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vmpyiwub, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vmpyiwh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vmpyiwb, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_lvsplatw, ICLASS_CJ" 1 001 101 ttttt PP 0 ----0 001 ddddd") //
+
+
+
+DEF_ENC(V6_pred_scalar2, ICLASS_CJ" 1 001 101 ttttt PP 0 ----- 010 -01dd") //
+DEF_ENC(V6_vandvrt, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 010 -10dd") //
+DEF_ENC(V6_pred_scalar2v2, ICLASS_CJ" 1 001 101 ttttt PP 0 ----- 010 -11dd") //
+
+DEF_ENC(V6_vtmpyhb, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vandqrt, ICLASS_CJ" 1 001 101 ttttt PP 0 --0uu 101 ddddd") //
+DEF_ENC(V6_vandnqrt, ICLASS_CJ" 1 001 101 ttttt PP 0 --1uu 101 ddddd") //
+
+DEF_ENC(V6_vrmpyubi, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 11i ddddd") //
+
+DEF_ENC(V6_vmpyub, ICLASS_CJ" 1 001 110 ttttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_lvsplath, ICLASS_CJ" 1 001 110 ttttt PP 0 ----- 001 ddddd") //
+DEF_ENC(V6_lvsplatb, ICLASS_CJ" 1 001 110 ttttt PP 0 ----- 010 ddddd") //
+
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] Vx32=(Vu32, Rt32)")
+DEF_ENC(V6_vtmpyb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vtmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vtmpyhb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vdmpyhb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 011 xxxxx") //
+DEF_ENC(V6_vrmpyub_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vrmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vdmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 110 xxxxx") //
+DEF_ENC(V6_vdmpybus_dv_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 111 xxxxx") //
+
+DEF_ENC(V6_vdmpyhsusat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vdmpyhsuisat_acc,ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vdmpyhisat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vdmpyhsat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 011 xxxxx") //
+DEF_ENC(V6_vdmpyhb_dv_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vmpybus_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vmpabus_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 110 xxxxx") //
+DEF_ENC(V6_vmpahb_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 111 xxxxx") //
+
+DEF_ENC(V6_vmpyhsat_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vmpyuh_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vmpyiwb_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vmpyiwh_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 011 xxxxx") //
+DEF_ENC(V6_vrmpybusi_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 10i xxxxx") //
+DEF_ENC(V6_vrsadubi_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 11i xxxxx") //
+
+DEF_ENC(V6_vdsaduh_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vmpyihb_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vaslw_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vandqrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 --0uu 011 xxxxx") //
+DEF_ENC(V6_vandnqrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 --1uu 011 xxxxx") //
+DEF_ENC(V6_vandvrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 100 ---xx") //
+DEF_ENC(V6_vasrw_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vrmpyubi_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 11i xxxxx") //
+
+DEF_ENC(V6_vmpyub_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vmpyiwub_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vmpauhb_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vmpyuhe_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 011 xxxxx")
+DEF_ENC(V6_vmpahhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vmpauhuhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vmpsuhuhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 110 xxxxx") //
+DEF_ENC(V6_vasrh_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 111 xxxxx") //
+
+
+
+
+DEF_ENC(V6_vinsertwr, ICLASS_CJ" 1 001 101 ttttt PP 1 ----- 001 xxxxx")
+
+DEF_ENC(V6_vmpabuu_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vaslh_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vmpyh_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 110 xxxxx") //
+
+
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] (Vx32, Vy32, Rt32)")
+DEF_ENC(V6_vshuff, ICLASS_CJ" 1 001 111 ttttt PP 1 yyyyy 001 xxxxx") //
+DEF_ENC(V6_vdeal, ICLASS_CJ" 1 001 111 ttttt PP 1 yyyyy 010 xxxxx") //
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 010 --- ----- PP - ----- ----- ---","[#2] if (Ps) Vd=Vu")
+DEF_ENC(V6_vcmov, ICLASS_CJ" 1 010 000 ----- PP - uuuuu -ss ddddd")
+DEF_ENC(V6_vncmov, ICLASS_CJ" 1 010 001 ----- PP - uuuuu -ss ddddd")
+DEF_ENC(V6_vnccombine, ICLASS_CJ" 1 010 010 vvvvv PP - uuuuu -ss ddddd")
+DEF_ENC(V6_vccombine, ICLASS_CJ" 1 010 011 vvvvv PP - uuuuu -ss ddddd")
+
+DEF_ENC(V6_vrotr, ICLASS_CJ" 1 010 100 vvvvv PP 1 uuuuu 111 ddddd")
+DEF_ENC(V6_vasr_into, ICLASS_CJ" 1 010 101 vvvvv PP 1 uuuuu 111 xxxxx")
+
+/***************************************************************
+*
+* Group #3, Uses Q6 Rt8
+*
+****************************************************************/
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 011 --- ----- PP - ----- ----- ---","[#3] Vd32=(Vu32, Vv32, Rt8)")
+DEF_ENC(V6_valignb, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vlalignb, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vasrwh, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vasrwhsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vasrwhrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vasrwuhsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vasrhubsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vasrhubrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vasrhbrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 000 ddddd") //
+DEF_ENC(V6_vlutvvb, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 001 ddddd")
+DEF_ENC(V6_vshuffvdd, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 011 ddddd") //
+DEF_ENC(V6_vdealvdd, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 100 ddddd") //
+DEF_ENC(V6_vlutvvb_oracc, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 101 xxxxx")
+DEF_ENC(V6_vlutvwh, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 110 ddddd")
+DEF_ENC(V6_vlutvwh_oracc, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 111 xxxxx")
+
+
+
+/***************************************************************
+*
+* Group #4, No Q6 regs
+*
+****************************************************************/
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 --- ----- PP 0 ----- ----- ---","[#4] Vd32=(Vu32, Vv32)")
+DEF_ENC(V6_vrmpyubv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vrmpybv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vrmpybusv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vdmpyhvsat, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vmpybv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vmpyubv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vmpybusv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vmpyhv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vmpyuhv, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vmpyhvsrs, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vmpyhus, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vmpabusv, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vmpyih, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vand, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vor, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vxor, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vaddw, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vaddubsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vadduhsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vaddhsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vaddwsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vsubb, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vsubh, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vsubw, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vsububsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vsubuhsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vsubhsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vsubwsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vaddb_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vaddh_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vaddw_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vaddubsat_dv,ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vadduhsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vaddhsat_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vaddwsat_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vsubb_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vsubh_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vsubw_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vsububsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vsubuhsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vsubhsat_dv, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vsubwsat_dv, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vaddubh, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vadduhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vaddhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vsububh, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vsubuhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vsubhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vabsdiffub, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vabsdiffh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vabsdiffuh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vabsdiffw, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vavgub, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vavguh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vavgh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vavgw, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vnavgub, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vnavgh, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vnavgw, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vavgubrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vavguhrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vavghrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vavgwrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vmpabuuv, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 --- ----- PP 1 ----- ----- ---","[#4] Vx32=(Vu32, Vv32)")
+DEF_ENC(V6_vrmpyubv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vrmpybv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vrmpybusv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vdmpyhvsat_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 011 xxxxx") //
+DEF_ENC(V6_vmpybv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vmpyubv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vmpybusv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 110 xxxxx") //
+DEF_ENC(V6_vmpyhv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 111 xxxxx") //
+
+DEF_ENC(V6_vmpyuhv_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vmpyhus_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vaddhw_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vmpyowh_64_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 011 xxxxx")
+DEF_ENC(V6_vmpyih_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vmpyiewuh_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vmpyowh_sacc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 110 xxxxx") //
+DEF_ENC(V6_vmpyowh_rnd_sacc,ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 111 xxxxx") //
+
+DEF_ENC(V6_vmpyiewh_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 000 xxxxx") //
+
+DEF_ENC(V6_vadduhw_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vaddubh_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 101 xxxxx") //
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 100 ----- PP 1 ----- ----- ---","[#4] Qx4=(Vu32, Vv32)")
+// Grouped by element size (lsbs), operation (next-lsbs) and operation (next-lsbs)
+DEF_ENC(V6_veqb_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 000xx") //
+DEF_ENC(V6_veqh_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 001xx") //
+DEF_ENC(V6_veqw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 010xx") //
+
+DEF_ENC(V6_vgtb_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 100xx") //
+DEF_ENC(V6_vgth_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 101xx") //
+DEF_ENC(V6_vgtw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 110xx") //
+
+DEF_ENC(V6_vgtub_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 000xx") //
+DEF_ENC(V6_vgtuh_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 001xx") //
+DEF_ENC(V6_vgtuw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 010xx") //
+
+DEF_ENC(V6_veqb_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 000xx") //
+DEF_ENC(V6_veqh_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 001xx") //
+DEF_ENC(V6_veqw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 010xx") //
+
+DEF_ENC(V6_vgtb_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 100xx") //
+DEF_ENC(V6_vgth_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 101xx") //
+DEF_ENC(V6_vgtw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 110xx") //
+
+DEF_ENC(V6_vgtub_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 000xx") //
+DEF_ENC(V6_vgtuh_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 001xx") //
+DEF_ENC(V6_vgtuw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 010xx") //
+
+DEF_ENC(V6_veqb_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 000xx") //
+DEF_ENC(V6_veqh_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 001xx") //
+DEF_ENC(V6_veqw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 010xx") //
+
+DEF_ENC(V6_vgtb_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 100xx") //
+DEF_ENC(V6_vgth_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 101xx") //
+DEF_ENC(V6_vgtw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 110xx") //
+
+DEF_ENC(V6_vgtub_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 000xx") //
+DEF_ENC(V6_vgtuh_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 001xx") //
+DEF_ENC(V6_vgtuw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 010xx") //
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 101 ----- PP 1 ----- ----- ---","[#4] Qx4,Vd32=(Vu32, Vv32)")
+DEF_ENC(V6_vaddcarry, ICLASS_CJ" 1 100 101 vvvvv PP 1 uuuuu 0xx ddddd") //
+DEF_ENC(V6_vsubcarry, ICLASS_CJ" 1 100 101 vvvvv PP 1 uuuuu 1xx ddddd") //
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 11- ----- PP 1 ----- ----- ---","[#4] Vx32|=(Vu32, Vv32,#)")
+DEF_ENC(V6_vlutvvb_oracci, ICLASS_CJ" 1 100 110 vvvvv PP 1 uuuuu iii xxxxx") //
+DEF_ENC(V6_vlutvwh_oracci, ICLASS_CJ" 1 100 111 vvvvv PP 1 uuuuu iii xxxxx") //
+
+
+
+/***************************************************************
+*
+* Group #5, Reserved/Deprecated. Uses Q6 Rx. Stupid FFT.
+*
+****************************************************************/
+
+
+
+
+/***************************************************************
+*
+* Group #6, No Q6 regs
+*
+****************************************************************/
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 0 ----- ----- ---","[#6] Vd32=Vu32")
+DEF_ENC(V6_vabsh, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vabsh_sat, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vabsw, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vabsw_sat, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vnot, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vdealh, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vdealb, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vunpackub, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vunpackuh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vunpackb, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vunpackh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vabsb, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vabsb_sat, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vshuffh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vshuffb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vzb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vzh, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vsb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vsh, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vcl0w, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vpopcounth, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vcl0h, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 111 ddddd") //
+
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ---11 PP 0 ----- ----- ---","[#6] Qd4=Qt4, Qs4")
+DEF_ENC(V6_pred_and, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 000dd") //
+DEF_ENC(V6_pred_or, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 001dd") //
+DEF_ENC(V6_pred_not, ICLASS_CJ" 1 110 --0 ---11 PP 0 ---ss 000 010dd") //
+DEF_ENC(V6_pred_xor, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 011dd") //
+DEF_ENC(V6_pred_or_n, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 100dd") //
+DEF_ENC(V6_pred_and_n, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 101dd") //
+DEF_ENC(V6_shuffeqh, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 110dd") //
+DEF_ENC(V6_shuffeqw, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 111dd") //
+
+DEF_ENC(V6_vnormamtw, ICLASS_CJ" 1 110 --0 ---11 PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vnormamth, ICLASS_CJ" 1 110 --0 ---11 PP 0 uuuuu 101 ddddd") //
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --1 ----- PP 0 ----- ----- ---","[#6] Vd32=Vu32,Vv32")
+DEF_ENC(V6_vlutvvbi, ICLASS_CJ" 1 110 001 vvvvv PP 0 uuuuu iii ddddd")
+DEF_ENC(V6_vlutvwhi, ICLASS_CJ" 1 110 011 vvvvv PP 0 uuuuu iii ddddd")
+
+DEF_ENC(V6_vaddbsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 000 ddddd")
+DEF_ENC(V6_vsubbsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 001 ddddd")
+DEF_ENC(V6_vadduwsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 010 ddddd")
+DEF_ENC(V6_vsubuwsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 011 ddddd")
+DEF_ENC(V6_vaddububb_sat, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 100 ddddd")
+DEF_ENC(V6_vsubububb_sat, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 101 ddddd")
+DEF_ENC(V6_vmpyewuh_64, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 110 ddddd")
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 1 ----- ----- ---","Vx32=Vu32")
+DEF_ENC(V6_vunpackob, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vunpackoh, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 001 xxxxx") //
+//DEF_ENC(V6_vunpackow, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 010 xxxxx") //
+
+DEF_ENC(V6_vhist, ICLASS_CJ" 1 110 --0 ---00 PP 1 -000- 100 -----")
+DEF_ENC(V6_vwhist256, ICLASS_CJ" 1 110 --0 ---00 PP 1 -0010 100 -----")
+DEF_ENC(V6_vwhist256_sat, ICLASS_CJ" 1 110 --0 ---00 PP 1 -0011 100 -----")
+DEF_ENC(V6_vwhist128, ICLASS_CJ" 1 110 --0 ---00 PP 1 -010- 100 -----")
+DEF_ENC(V6_vwhist128m, ICLASS_CJ" 1 110 --0 ---00 PP 1 -011i 100 -----")
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 1 ----- ----- ---","if (Qv4) Vx32=Vu32")
+DEF_ENC(V6_vaddbq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vaddhq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vaddwq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vaddbnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 011 xxxxx") //
+DEF_ENC(V6_vaddhnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 100 xxxxx") //
+DEF_ENC(V6_vaddwnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 101 xxxxx") //
+DEF_ENC(V6_vsubbq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 110 xxxxx") //
+DEF_ENC(V6_vsubhq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 111 xxxxx") //
+
+DEF_ENC(V6_vsubwq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 000 xxxxx") //
+DEF_ENC(V6_vsubbnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 001 xxxxx") //
+DEF_ENC(V6_vsubhnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 010 xxxxx") //
+DEF_ENC(V6_vsubwnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 011 xxxxx") //
+
+DEF_ENC(V6_vhistq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --00- 100 -----")
+DEF_ENC(V6_vwhist256q, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --010 100 -----")
+DEF_ENC(V6_vwhist256q_sat, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --011 100 -----")
+DEF_ENC(V6_vwhist128q, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --10- 100 -----")
+DEF_ENC(V6_vwhist128qm, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --11i 100 -----")
+
+
+DEF_ENC(V6_vandvqv, ICLASS_CJ" 1 110 vv0 ---11 PP 1 uuuuu 000 ddddd")
+DEF_ENC(V6_vandvnqv, ICLASS_CJ" 1 110 vv0 ---11 PP 1 uuuuu 001 ddddd")
+
+
+DEF_ENC(V6_vprefixqb, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --000 010 ddddd") //
+DEF_ENC(V6_vprefixqh, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --001 010 ddddd") //
+DEF_ENC(V6_vprefixqw, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --010 010 ddddd") //
+
+
+
+
+DEF_ENC(V6_vassign, ICLASS_CJ" 1 110 --0 ---11 PP 1 uuuuu 111 ddddd")
+
+DEF_ENC(V6_valignbi, ICLASS_CJ" 1 110 001 vvvvv PP 1 uuuuu iii ddddd")
+DEF_ENC(V6_vlalignbi, ICLASS_CJ" 1 110 011 vvvvv PP 1 uuuuu iii ddddd")
+DEF_ENC(V6_vswap, ICLASS_CJ" 1 110 101 vvvvv PP 1 uuuuu -tt ddddd") //
+DEF_ENC(V6_vmux, ICLASS_CJ" 1 110 111 vvvvv PP 1 uuuuu -tt ddddd") //
+
+
+
+/***************************************************************
+*
+* Group #7, No Q6 regs
+*
+****************************************************************/
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 111 --- ----- PP 0 ----- ----- ---","[#7] Vd32=(Vu32, Vv32)")
+DEF_ENC(V6_vaddbsat, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vminub, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vminuh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vminh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vminw, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vmaxub, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vmaxuh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vmaxh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 111 ddddd") //
+
+
+DEF_ENC(V6_vaddclbh, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 000 ddddd") //
+DEF_ENC(V6_vaddclbw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 001 ddddd") //
+
+DEF_ENC(V6_vavguw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 010 ddddd") //
+DEF_ENC(V6_vavguwrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 011 ddddd") //
+DEF_ENC(V6_vavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 100 ddddd") //
+DEF_ENC(V6_vavgbrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 101 ddddd") //
+DEF_ENC(V6_vnavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 110 ddddd") //
+
+
+DEF_ENC(V6_vmaxw, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vdelta, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vsubbsat, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vrdelta, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vminb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vmaxb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vsatuwuh, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vdealb4w, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 111 ddddd") //
+
+
+DEF_ENC(V6_vmpyowh_rnd, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vshuffeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vshuffob, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vshufeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vshufoh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vshufoeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vshufoeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vcombine, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vmpyieoh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vadduwsat, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vsathub, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vsatwh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vroundwh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 100 ddddd")
+DEF_ENC(V6_vroundwuh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 101 ddddd")
+DEF_ENC(V6_vroundhb, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 110 ddddd")
+DEF_ENC(V6_vroundhub, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 111 ddddd")
+
+DEF_FIELDROW_DESC32( ICLASS_CJ" 1 111 100 ----- PP - ----- ----- ---","[#7] Qd4=(Vu32, Vv32)")
+DEF_ENC(V6_veqb, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 000dd") //
+DEF_ENC(V6_veqh, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 001dd") //
+DEF_ENC(V6_veqw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 010dd") //
+
+DEF_ENC(V6_vgtb, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 100dd") //
+DEF_ENC(V6_vgth, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 101dd") //
+DEF_ENC(V6_vgtw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 110dd") //
+
+DEF_ENC(V6_vgtub, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 000dd") //
+DEF_ENC(V6_vgtuh, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 001dd") //
+DEF_ENC(V6_vgtuw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 010dd") //
+
+
+DEF_ENC(V6_vasrwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vlsrwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vlsrhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vasrhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vaslwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vaslhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vaddb, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vaddh, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 111 ddddd") //
+
+
+DEF_ENC(V6_vmpyiewuh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 000 ddddd")
+DEF_ENC(V6_vmpyiowh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 001 ddddd")
+DEF_ENC(V6_vpackeb, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vpackeh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vsubuwsat, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vpackhub_sat,ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 101 ddddd") //
+DEF_ENC(V6_vpackhb_sat, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 110 ddddd") //
+DEF_ENC(V6_vpackwuh_sat,ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 111 ddddd") //
+
+DEF_ENC(V6_vpackwh_sat, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 000 ddddd") //
+DEF_ENC(V6_vpackob, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 001 ddddd") //
+DEF_ENC(V6_vpackoh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 010 ddddd") //
+DEF_ENC(V6_vrounduhub, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 011 ddddd") //
+DEF_ENC(V6_vrounduwuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 100 ddddd") //
+DEF_ENC(V6_vmpyewuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 101 ddddd")
+DEF_ENC(V6_vmpyowh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 111 ddddd")
+
+
+#endif /* NO MMVEC */
diff --git a/target/hexagon/imported/mmvec/ext.idef b/target/hexagon/imported/mmvec/ext.idef
new file mode 100644
index 000000000..8ca5a606e
--- /dev/null
+++ b/target/hexagon/imported/mmvec/ext.idef
@@ -0,0 +1,2606 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/******************************************************************************
+ *
+ * HOYA: MULTI MEDIA INSTRUCITONS
+ *
+ ******************************************************************************/
+
+#ifndef EXTINSN
+#define EXTINSN Q6INSN
+#define __SELF_DEF_EXTINSN 1
+#endif
+
+#ifndef NO_MMVEC
+
+#define DO_FOR_EACH_CODE(WIDTH, CODE) \
+{ \
+ fHIDE(int i;) \
+ fVFOREACH(WIDTH, i) {\
+ CODE ;\
+ } \
+}
+
+
+
+
+#define ITERATOR_INSN_ANY_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+
+
+#define ITERATOR_INSN2_ANY_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_ANY_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+
+#define ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+
+#define ITERATOR_INSN_SHIFT_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+
+
+#define ITERATOR_INSN_SHIFT_SLOT_VV_LATE(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_SHIFT_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_SHIFT_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_PERMUTE_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),
+
+
+#define ITERATOR_INSN2_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_DEP(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_MPY_SLOT(WIDTH,TAG, SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, \
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, \
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_MPY_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_MPY_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN2_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX2,DESCR,CODE)
+
+
+#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+
+
+
+#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC2(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV,A_CVI_VX_VSRC0_IS_DST), DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN_SLOT2_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV,A_RESTRICT_SLOT2ONLY), DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN_VHISTLIKE(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), \
+DESCR, fHIDE(mmvector_t input;) input = fTMPVDATA(); DO_FOR_EACH_CODE(WIDTH, CODE))
+
+
+
+
+
+/******************************************************************************************
+*
+* MMVECTOR MEMORY OPERATIONS - NO NAPALI V1
+*
+*******************************************************************************************/
+
+
+
+#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+
+
+#define ITERATOR_INSN_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+
+#define ITERATOR_INSN_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+
+#define ITERATOR_INSN_MPY_SLOT_NOV1(WIDTH,TAG, SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, \
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN_PERMUTE_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_PERMUTE_SLOTT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_PERMUTE_SLOT_DEPT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),
+
+
+#define ITERATOR_INSN2_PERMUTE_SLOT_DEPT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_PERMUTE_SLOT_DEP_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_DEPT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \
+EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \
+DESCR, DO_FOR_EACH_CODE(WIDTH, CODE))
+
+#define ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE)
+
+#define NARROWING_SHIFT_NOV1(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \
+ITERATOR_INSN_SHIFT_SLOT_NOV1(ITERSIZE,TAG, \
+"Vd32." #DSTTYPE "=vasr(Vu32." #SRCTYPE ",Vv32." #SRCTYPE ",Rt8)" #SYNOPTS, \
+"Vector shift right and shuffle", \
+ fHIDE(int )shamt = RtV & SHAMTMASK; \
+ DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VvV.SRCTYPE[i],shamt) >> shamt)); \
+ DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuV.SRCTYPE[i],shamt) >> shamt)))
+
+#define MMVEC_AVGS_NOV1(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) \
+ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGSRND( WIDTH, VuV.SRC[i], VvV.SRC[i])) \
+ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vnavg##TYPE, "Vd32=vnavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vnavg(Vu32."#SRC",Vv32."#SRC")", "Vector Negative Average "DESCR, VdV.DEST[i] = fVNAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i]))
+
+ #define MMVEC_AVGU_NOV1(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGU( WIDTH, VuV.SRC[i], VvV.SRC[i])) \
+ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGURND(WIDTH, VuV.SRC[i], VvV.SRC[i]))
+
+
+
+/******************************************************************************************
+*
+* MMVECTOR MEMORY OPERATIONS
+*
+*******************************************************************************************/
+
+#define MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,BEH) \
+EXTINSN(V6_##TAG##_pi, SYNTAXA "(Rx32++#s3)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_REG(RxV); BEH; fPM_I(RxV,VEC_SCALE(siV)); }) \
+EXTINSN(V6_##TAG##_ai, SYNTAXA "(Rt32+#s4)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_RI(RtV,VEC_SCALE(siV)); BEH;}) \
+EXTINSN(V6_##TAG##_ppu, SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_REG(RxV); BEH; fPM_M(RxV,MuV); }) \
+
+
+#define MMVEC_COND_EACH_EA_TRUE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \
+EXTINSN(V6_##TAG##_pred_pi, "if (" #SYNTAXP "4) " SYNTAXA "(Rx32++#s3)" NT SYNTAXB, ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_I(RxV,siV*fVECSIZE()); } else {CANCEL;}}) \
+EXTINSN(V6_##TAG##_pred_ai, "if (" #SYNTAXP "4) " SYNTAXA "(Rt32+#s4)" NT SYNTAXB, ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_RI(RtV,siV*fVECSIZE()); BEH;} else {CANCEL;}}) \
+EXTINSN(V6_##TAG##_pred_ppu, "if (" #SYNTAXP "4) " SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_M(RxV,MuV); } else {CANCEL;}}) \
+
+#define MMVEC_COND_EACH_EA_FALSE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \
+EXTINSN(V6_##TAG##_npred_pi, "if (!" #SYNTAXP "4) " SYNTAXA "(Rx32++#s3)" NT SYNTAXB,ATTRIB,DESCR,{ if (fLSBOLDNOT(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_I(RxV,siV*fVECSIZE()); } else {CANCEL;}}) \
+EXTINSN(V6_##TAG##_npred_ai, "if (!" #SYNTAXP "4) " SYNTAXA "(Rt32+#s4)" NT SYNTAXB,ATTRIB,DESCR, { if (fLSBOLDNOT(SYNTAXP##V)) { fEA_RI(RtV,siV*fVECSIZE()); BEH;} else {CANCEL;}}) \
+EXTINSN(V6_##TAG##_npred_ppu, "if (!" #SYNTAXP "4) " SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR,{ if (fLSBOLDNOT(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_M(RxV,MuV); } else {CANCEL;}})
+
+#define MMVEC_COND_EACH_EA(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \
+MMVEC_COND_EACH_EA_TRUE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \
+MMVEC_COND_EACH_EA_FALSE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH)
+
+
+#define VEC_SCALE(X) X*fVECSIZE()
+
+
+#define MMVEC_LD(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,"Vd32=vmem","",fLOADMMV(EA,VdV))
+#define MMVEC_LDC(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG##_cur,DESCR,ATTRIB,NT,"Vd32.cur=vmem","",fLOADMMV(EA,VdV))
+#define MMVEC_LDT(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG##_tmp,DESCR,ATTRIB,NT,"Vd32.tmp=vmem","",fLOADMMV(EA,VdV))
+#define MMVEC_LDU(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,"Vd32=vmemu","",fLOADMMVU(EA,VdV))
+
+
+#define MMVEC_STQ(TAG,DESCR,ATTRIB,NT) \
+MMVEC_EACH_EA(TAG##_qpred,DESCR,ATTRIB,NT,"if (Qv4) vmem","=Vs32",fSTOREMMVQ(EA,VsV,QvV)) \
+MMVEC_EACH_EA(TAG##_nqpred,DESCR,ATTRIB,NT,"if (!Qv4) vmem","=Vs32",fSTOREMMVNQ(EA,VsV,QvV))
+
+/****************************************************************
+* MAPPING FOR VMEMs
+****************************************************************/
+
+#define ATTR_VMEM A_EXTENSION,A_CVI,A_CVI_VM
+#define ATTR_VMEMU A_EXTENSION,A_CVI,A_CVI_VM,A_CVI_VP
+
+
+MMVEC_LD(vL32b, "Aligned Vector Load", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA),)
+MMVEC_LDC(vL32b, "Aligned Vector Load Cur", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_NEW,A_CVI_VA),)
+MMVEC_LDT(vL32b, "Aligned Vector Load Tmp", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_TMP),)
+
+MMVEC_COND_EACH_EA(vL32b,"Conditional Aligned Vector Load",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA),,"Vd32=vmem",,Pv,fLOADMMV(EA,VdV);)
+MMVEC_COND_EACH_EA(vL32b_cur,"Conditional Aligned Vector Load Cur",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA,A_CVI_NEW),,"Vd32.cur=vmem",,Pv,fLOADMMV(EA,VdV);)
+MMVEC_COND_EACH_EA(vL32b_tmp,"Conditional Aligned Vector Load Tmp",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_TMP),,"Vd32.tmp=vmem",,Pv,fLOADMMV(EA,VdV);)
+
+MMVEC_EACH_EA(vS32b,"Aligned Vector Store",ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),,"vmem","=Vs32",fSTOREMMV(EA,VsV))
+MMVEC_COND_EACH_EA(vS32b,"Aligned Vector Store",ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),,"vmem","=Vs32",Pv,fSTOREMMV(EA,VsV))
+
+
+MMVEC_STQ(vS32b, "Aligned Vector Store", ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),)
+
+MMVEC_LDU(vL32Ub, "Unaligned Vector Load", ATTRIBS(ATTR_VMEMU,A_LOAD,A_RESTRICT_NOSLOT1),)
+
+MMVEC_EACH_EA(vS32Ub,"Unaligned Vector Store",ATTRIBS(ATTR_VMEMU,A_STORE,A_RESTRICT_NOSLOT1),,"vmemu","=Vs32",fSTOREMMVU(EA,VsV))
+
+MMVEC_COND_EACH_EA(vS32Ub,"Unaligned Vector Store",ATTRIBS(ATTR_VMEMU,A_STORE,A_RESTRICT_NOSLOT1),,"vmemu","=Vs32",Pv,fSTOREMMVU(EA,VsV))
+
+MMVEC_EACH_EA(vS32b_new,"Aligned Vector Store New",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),,"vmem","=Os8.new",fSTOREMMV(EA,fNEWVREG(OsN)))
+
+// V65 store relase, zero byte store
+MMVEC_EACH_EA(vS32b_srls,"Aligned Vector Scatter Release",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_SCATTER_RELEASE,A_CVI_NEW,A_RESTRICT_SLOT0ONLY),,"vmem",":scatter_release",fSTORERELEASE(EA,0))
+
+
+
+MMVEC_COND_EACH_EA(vS32b_new,"Aligned Vector Store New",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),,"vmem","=Os8.new",Pv,fSTOREMMV(EA,fNEWVREG(OsN)))
+
+
+/******************************************************************************************
+*
+* MMVECTOR MEMORY OPERATIONS - NON TEMPORAL
+*
+*******************************************************************************************/
+
+#define ATTR_VMEM_NT A_EXTENSION,A_CVI,A_CVI_VM
+
+MMVEC_EACH_EA(vS32b_nt,"Aligned Vector Store - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt","vmem","=Vs32",fSTOREMMV(EA,VsV))
+MMVEC_COND_EACH_EA(vS32b_nt,"Aligned Vector Store - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt","vmem","=Vs32",Pv,fSTOREMMV(EA,VsV))
+
+MMVEC_EACH_EA(vS32b_nt_new,"Aligned Vector Store New - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),":nt","vmem","=Os8.new",fSTOREMMV(EA,fNEWVREG(OsN)))
+MMVEC_COND_EACH_EA(vS32b_nt_new,"Aligned Vector Store New - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),":nt","vmem","=Os8.new",Pv,fSTOREMMV(EA,fNEWVREG(OsN)))
+
+
+MMVEC_STQ(vS32b_nt, "Aligned Vector Store - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt")
+
+MMVEC_LD(vL32b_nt, "Aligned Vector Load - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_VA),":nt")
+MMVEC_LDC(vL32b_nt, "Aligned Vector Load Cur - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_NEW,A_CVI_VA),":nt")
+MMVEC_LDT(vL32b_nt, "Aligned Vector Load Tmp - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_TMP),":nt")
+
+MMVEC_COND_EACH_EA(vL32b_nt,"Conditional Aligned Vector Load",ATTRIBS(ATTR_VMEM_NT,A_CVI_VA),,"Vd32=vmem",":nt",Pv,fLOADMMV(EA,VdV);)
+MMVEC_COND_EACH_EA(vL32b_nt_cur,"Conditional Aligned Vector Load Cur",ATTRIBS(ATTR_VMEM_NT,A_CVI_VA,A_CVI_NEW),,"Vd32.cur=vmem",":nt",Pv,fLOADMMV(EA,VdV);)
+MMVEC_COND_EACH_EA(vL32b_nt_tmp,"Conditional Aligned Vector Load Tmp",ATTRIBS(ATTR_VMEM_NT,A_CVI_TMP),,"Vd32.tmp=vmem",":nt",Pv,fLOADMMV(EA,VdV);)
+
+
+#undef VEC_SCALE
+
+
+/***************************************************
+ * Vector Alignment
+ ************************************************/
+
+#define VALIGNB(SHIFT) \
+ fHIDE(int i;) \
+ for(i = 0; i < fVBYTES(); i++) {\
+ VdV.ub[i] = (i+SHIFT>=fVBYTES()) ? VuV.ub[i+SHIFT-fVBYTES()] : VvV.ub[i+SHIFT];\
+ }
+
+EXTINSN(V6_valignb, "Vd32=valign(Vu32,Vv32,Rt8)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by Rt8 as control",
+{
+ unsigned shift = RtV & (fVBYTES()-1);
+ VALIGNB(shift)
+})
+EXTINSN(V6_vlalignb, "Vd32=vlalign(Vu32,Vv32,Rt8)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by Rt8 as control",
+{
+ unsigned shift = fVBYTES() - (RtV & (fVBYTES()-1));
+ VALIGNB(shift)
+})
+EXTINSN(V6_valignbi, "Vd32=valign(Vu32,Vv32,#u3)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by #u3 as control",
+{
+ VALIGNB(uiV)
+})
+EXTINSN(V6_vlalignbi,"Vd32=vlalign(Vu32,Vv32,#u3)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by #u3 as control",
+{
+ unsigned shift = fVBYTES() - uiV;
+ VALIGNB(shift)
+})
+
+EXTINSN(V6_vror, "Vd32=vror(Vu32,Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),
+"Align Two vectors by Rt32 as control",
+{
+ fHIDE(int k;)
+ for (k=0;k<fVBYTES();k++) {
+ VdV.ub[k] = VuV.ub[(k+RtV)&(fVBYTES()-1)];
+ }
+ })
+
+
+
+
+
+
+
+/**************************************************************
+* Unpack elements with zero/sign extend and cross lane permute
+***************************************************************/
+
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(8,vunpackub, "Vdd32=vunpackub(Vu32)", "Vdd32.uh=vunpack(Vu32.ub)", "Unpack byte with zero-extend", fVARRAY_ELEMENT_ACCESS(VddV, uh, i) = fZE8_16( VuV.ub[i]))
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(8,vunpackb, "Vdd32=vunpackb(Vu32)", "Vdd32.h=vunpack(Vu32.b)", "Unpack bytes with sign-extend", fVARRAY_ELEMENT_ACCESS(VddV, h, i) = fSE8_16( VuV.b[i] ))
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(16,vunpackuh, "Vdd32=vunpackuh(Vu32)", "Vdd32.uw=vunpack(Vu32.uh)", "Unpack halves with zero-extend", fVARRAY_ELEMENT_ACCESS(VddV, uw, i) = fZE16_32(VuV.uh[i]))
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(16,vunpackh, "Vdd32=vunpackh(Vu32)", "Vdd32.w=vunpack(Vu32.h)", "Unpack halves with sign-extend", fVARRAY_ELEMENT_ACCESS(VddV, w, i) = fSE16_32(VuV.h[i] ))
+
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(8, vunpackob, "Vxx32|=vunpackob(Vu32)", "Vxx32.h|=vunpacko(Vu32.b)", "Unpack byte to odd bytes ", fVARRAY_ELEMENT_ACCESS(VxxV, uh, i) |= fZE8_16( VuV.ub[i])<<8)
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(16,vunpackoh, "Vxx32|=vunpackoh(Vu32)", "Vxx32.w|=vunpacko(Vu32.h)", "Unpack halves to odd halves", fVARRAY_ELEMENT_ACCESS(VxxV, uw, i) |= fZE16_32(VuV.uh[i])<<16)
+
+
+/**************************************************************
+* Pack elements and cross lane permute
+***************************************************************/
+
+ ITERATOR_INSN2_PERMUTE_SLOT(16, vpackeb, "Vd32=vpackeb(Vu32,Vv32)", "Vd32.b=vpacke(Vu32.h,Vv32.h)",
+ "Pack bytes",
+ VdV.ub[i] = fGETUBYTE(0, VvV.uh[i]);
+ VdV.ub[i+fVELEM(16)] = fGETUBYTE(0, VuV.uh[i]))
+
+ ITERATOR_INSN2_PERMUTE_SLOT(32, vpackeh, "Vd32=vpackeh(Vu32,Vv32)", "Vd32.h=vpacke(Vu32.w,Vv32.w)",
+ "Pack halfwords",
+ VdV.uh[i] = fGETUHALF(0, VvV.uw[i]);
+ VdV.uh[i+fVELEM(32)] = fGETUHALF(0, VuV.uw[i]))
+
+ ITERATOR_INSN2_PERMUTE_SLOT(16, vpackob, "Vd32=vpackob(Vu32,Vv32)", "Vd32.b=vpacko(Vu32.h,Vv32.h)",
+ "Pack bytes",
+ VdV.ub[i] = fGETUBYTE(1, VvV.uh[i]);
+ VdV.ub[i+fVELEM(16)] = fGETUBYTE(1, VuV.uh[i]))
+
+ ITERATOR_INSN2_PERMUTE_SLOT(32, vpackoh, "Vd32=vpackoh(Vu32,Vv32)", "Vd32.h=vpacko(Vu32.w,Vv32.w)",
+ "Pack halfwords",
+ VdV.uh[i] = fGETUHALF(1, VvV.uw[i]);
+ VdV.uh[i+fVELEM(32)] = fGETUHALF(1, VuV.uw[i]))
+
+
+
+ITERATOR_INSN2_PERMUTE_SLOT(16, vpackhub_sat, "Vd32=vpackhub(Vu32,Vv32):sat", "Vd32.ub=vpack(Vu32.h,Vv32.h):sat",
+ "Pack ubytes with saturation",
+ VdV.ub[i] = fVSATUB(VvV.h[i]);
+ VdV.ub[i+fVELEM(16)] = fVSATUB(VuV.h[i]))
+
+
+ITERATOR_INSN2_PERMUTE_SLOT(16, vpackhb_sat, "Vd32=vpackhb(Vu32,Vv32):sat", "Vd32.b=vpack(Vu32.h,Vv32.h):sat",
+ "Pack bytes with saturation",
+ VdV.b[i] = fVSATB(VvV.h[i]);
+ VdV.b[i+fVELEM(16)] = fVSATB(VuV.h[i]))
+
+
+ITERATOR_INSN2_PERMUTE_SLOT(32, vpackwuh_sat, "Vd32=vpackwuh(Vu32,Vv32):sat", "Vd32.uh=vpack(Vu32.w,Vv32.w):sat",
+ "Pack ubytes with saturation",
+ VdV.uh[i] = fVSATUH(VvV.w[i]);
+ VdV.uh[i+fVELEM(32)] = fVSATUH(VuV.w[i]))
+
+ITERATOR_INSN2_PERMUTE_SLOT(32, vpackwh_sat, "Vd32=vpackwh(Vu32,Vv32):sat", "Vd32.h=vpack(Vu32.w,Vv32.w):sat",
+ "Pack bytes with saturation",
+ VdV.h[i] = fVSATH(VvV.w[i]);
+ VdV.h[i+fVELEM(32)] = fVSATH(VuV.w[i]))
+
+
+
+
+
+/**************************************************************
+* Zero/Sign Extend with in-lane permute
+***************************************************************/
+
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(16,vzb,"Vdd32=vzxtb(Vu32)","Vdd32.uh=vzxt(Vu32.ub)",
+"Vector Zero Extend Bytes",
+ VddV.v[0].uh[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i]));
+ VddV.v[1].uh[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])))
+
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(16,vsb,"Vdd32=vsxtb(Vu32)","Vdd32.h=vsxt(Vu32.b)",
+"Vector Sign Extend Bytes",
+ VddV.v[0].h[i] = fSE8_16(fGETBYTE(0, VuV.h[i]));
+ VddV.v[1].h[i] = fSE8_16(fGETBYTE(1, VuV.h[i])))
+
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(32,vzh,"Vdd32=vzxth(Vu32)","Vdd32.uw=vzxt(Vu32.uh)",
+"Vector Zero Extend halfwords",
+ VddV.v[0].uw[i] = fZE16_32(fGETUHALF(0, VuV.uw[i]));
+ VddV.v[1].uw[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])))
+
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(32,vsh,"Vdd32=vsxth(Vu32)","Vdd32.w=vsxt(Vu32.h)",
+"Vector Sign Extend halfwords",
+ VddV.v[0].w[i] = fSE16_32(fGETHALF(0, VuV.w[i]));
+ VddV.v[1].w[i] = fSE16_32(fGETHALF(1, VuV.w[i])))
+
+
+/**********************************************************************
+*
+*
+*
+* MMVECTOR REDUCTION
+*
+*
+*
+**********************************************************************/
+
+/********************************************
+* 2-WAY REDUCTION - UNSIGNED BYTE BY BYTE
+********************************************/
+
+
+ITERATOR_INSN2_MPY_SLOT(16,vdmpybus,"Vd32=vdmpybus(Vu32,Rt32)","Vd32.h=vdmpy(Vu32.ub,Rt32.b)",
+"Vector Dual Multiply-Accumulates unsigned bytes by bytes",
+ VdV.h[i] = fMPY8US( fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i) % 4, RtV));
+ VdV.h[i] += fMPY8US( fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT(16,vdmpybus_acc,"Vx32+=vdmpybus(Vu32,Rt32)","Vx32.h+=vdmpy(Vu32.ub,Rt32.b)",
+"Vector Dual Multiply-Accumulates unsigned bytes by bytes, and accumulate",
+ VxV.h[i] += fMPY8US( fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i) % 4, RtV));
+ VxV.h[i] += fMPY8US( fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV)))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vdmpybus_dv,"Vdd32=vdmpybus(Vuu32,Rt32)","Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)",
+"Vector Dual Multiply-Accumulates unsigned bytes by bytes, and accumulate Sliding Window Reduction",
+ VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]),fGETBYTE((2*i) % 4, RtV));
+ VddV.v[0].h[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]),fGETBYTE((2*i+1)%4, RtV));
+
+ VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]),fGETBYTE((2*i) % 4, RtV));
+ VddV.v[1].h[i] += fMPY8US(fGETUBYTE(0, VuuV.v[1].uh[i]),fGETBYTE((2*i+1)%4, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vdmpybus_dv_acc,"Vxx32+=vdmpybus(Vuu32,Rt32)","Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)",
+"Vector Dual Multiply-Accumulates unsigned bytes by bytes, and accumulate Sliding Window Reduction",
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]),fGETBYTE((2*i) % 4, RtV));
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]),fGETBYTE((2*i+1)%4, RtV));
+
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]),fGETBYTE((2*i) % 4, RtV));
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(0, VuuV.v[1].uh[i]),fGETBYTE((2*i+1)%4, RtV)))
+
+
+
+/********************************************
+* 2-WAY REDUCTION - HALF BY BYTE
+********************************************/
+ITERATOR_INSN2_MPY_SLOT(32,vdmpyhb,"Vd32=vdmpyhb(Vu32,Rt32)","Vd32.w=vdmpy(Vu32.h,Rt32.b)",
+"Dual-Vector 2-Element Half x Byte Reduction with Sliding Window Overlap",
+ VdV.w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]),fGETBYTE((2*i+0)%4, RtV));
+ VdV.w[i] += fMPY16SS(fGETHALF(1, VuV.w[i]),fGETBYTE((2*i+1)%4, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT(32,vdmpyhb_acc,"Vx32+=vdmpyhb(Vu32,Rt32)","Vx32.w+=vdmpy(Vu32.h,Rt32.b)",
+"Dual-Vector 2-Element Half x Byte Reduction with Sliding Window Overlap",
+ VxV.w[i] += fMPY16SS(fGETHALF(0, VuV.w[i]),fGETBYTE((2*i+0)%4, RtV));
+ VxV.w[i] += fMPY16SS(fGETHALF(1, VuV.w[i]),fGETBYTE((2*i+1)%4, RtV)))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhb_dv,"Vdd32=vdmpyhb(Vuu32,Rt32)","Vdd32.w=vdmpy(Vuu32.h,Rt32.b)",
+"Dual-Vector 2-Element Half x Byte Reduction with Sliding Window Overlap",
+ VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]),fGETBYTE((2*i+0)%4, RtV));
+ VddV.v[0].w[i] += fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]),fGETBYTE((2*i+1)%4, RtV));
+
+ VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]),fGETBYTE((2*i+0)%4, RtV));
+ VddV.v[1].w[i] += fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]),fGETBYTE((2*i+1)%4, RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhb_dv_acc,"Vxx32+=vdmpyhb(Vuu32,Rt32)","Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)",
+"Dual-Vector 2-Element Half x Byte Reduction with Sliding Window Overlap",
+ VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]),fGETBYTE((2*i+0)%4, RtV));
+ VxxV.v[0].w[i] += fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]),fGETBYTE((2*i+1)%4, RtV));
+
+ VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]),fGETBYTE((2*i+0)%4, RtV));
+ VxxV.v[1].w[i] += fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]),fGETBYTE((2*i+1)%4, RtV)))
+
+
+
+
+
+/********************************************
+* 2-WAY REDUCTION - HALF BY HALF
+********************************************/
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhvsat,"Vd32=vdmpyh(Vu32,Vv32):sat","Vd32.w=vdmpy(Vu32.h,Vv32.h):sat",
+"Vector halfword multiply, accumulate pairs, sat to word",
+ fHIDE(size8s_t accum;)
+ accum = fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0, VvV.w[i]));
+ accum += fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1, VvV.w[i]));
+ VdV.w[i] = fVSATW(accum))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhvsat_acc,"Vx32+=vdmpyh(Vu32,Vv32):sat","Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat",
+"Vector halfword multiply, accumulate pairs, sat to word",
+ fHIDE(size8s_t accum;)
+ accum = fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0, VvV.w[i]));
+ accum += fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1, VvV.w[i]));
+ VxV.w[i] = fVSATW(VxV.w[i]+accum))
+
+
+/* VDMPYH */
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhsat,"Vd32=vdmpyh(Vu32,Rt32):sat","Vd32.w=vdmpy(Vu32.h,Rt32.h):sat",
+"Vector halfword multiply, accumulate pairs, saturate to word",
+ fHIDE(size8s_t accum;)
+ accum = fMPY16SS(fGETHALF(0, VuV.w[i]),fGETHALF(0, RtV));
+ accum += fMPY16SS(fGETHALF(1, VuV.w[i]),fGETHALF(1, RtV));
+ VdV.w[i] = fVSATW(accum))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhsat_acc,"Vx32+=vdmpyh(Vu32,Rt32):sat","Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat",
+"Vector halfword multiply, accumulate pairs, saturate to word",
+ fHIDE(size8s_t) accum = VxV.w[i];
+ accum += fMPY16SS(fGETHALF(0, VuV.w[i]),fGETHALF(0, RtV));
+ accum += fMPY16SS(fGETHALF(1, VuV.w[i]),fGETHALF(1, RtV));
+ VxV.w[i] = fVSATW(accum))
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhisat,"Vd32=vdmpyh(Vuu32,Rt32):sat","Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat",
+"Dual Vector Signed Halfword by Signed Halfword 2-Way Reduction to Halfword with saturation",
+ fHIDE(size8s_t accum;)
+ accum = fMPY16SS(fGETHALF(1,VuuV.v[0].w[i]),fGETHALF(0,RtV));
+ accum += fMPY16SS(fGETHALF(0,VuuV.v[1].w[i]),fGETHALF(1,RtV));
+ VdV.w[i] = fVSATW(accum))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhisat_acc,"Vx32+=vdmpyh(Vuu32,Rt32):sat","Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat",
+"Dual Vector Signed Halfword by Signed Halfword 2-Way Reduction to Halfword with accumulation and saturation",
+ fHIDE(size8s_t) accum = VxV.w[i];
+ accum += fMPY16SS(fGETHALF(1,VuuV.v[0].w[i]),fGETHALF(0,RtV));
+ accum += fMPY16SS(fGETHALF(0,VuuV.v[1].w[i]),fGETHALF(1,RtV));
+ VxV.w[i] = fVSATW(accum))
+
+
+
+
+
+
+
+/* VDMPYHSU */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhsusat,"Vd32=vdmpyhsu(Vu32,Rt32):sat","Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat",
+"Vector halfword multiply, accumulate pairs, saturate to word",
+ fHIDE(size8s_t accum;)
+ accum = fMPY16SU(fGETHALF(0, VuV.w[i]),fGETUHALF(0, RtV));
+ accum += fMPY16SU(fGETHALF(1, VuV.w[i]),fGETUHALF(1, RtV));
+ VdV.w[i] = fVSATW(accum))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhsusat_acc,"Vx32+=vdmpyhsu(Vu32,Rt32):sat","Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat",
+"Vector halfword multiply, accumulate pairs, saturate to word",
+ fHIDE(size8s_t) accum=VxV.w[i];
+ accum += fMPY16SU(fGETHALF(0, VuV.w[i]),fGETUHALF(0, RtV));
+ accum += fMPY16SU(fGETHALF(1, VuV.w[i]),fGETUHALF(1, RtV));
+ VxV.w[i] = fVSATW(accum))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhsuisat,"Vd32=vdmpyhsu(Vuu32,Rt32,#1):sat","Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat",
+"Dual Vector Signed Halfword by Signed Halfword 2-Way Reduction to Halfword with saturation",
+ fHIDE(size8s_t accum;)
+ accum = fMPY16SU(fGETHALF(1,VuuV.v[0].w[i]),fGETUHALF(0,RtV));
+ accum += fMPY16SU(fGETHALF(0,VuuV.v[1].w[i]),fGETUHALF(1,RtV));
+ VdV.w[i] = fVSATW(accum))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdmpyhsuisat_acc,"Vx32+=vdmpyhsu(Vuu32,Rt32,#1):sat","Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat",
+"Dual Vector Signed Halfword by Signed Halfword 2-Way Reduction to Halfword with accumulation and saturation",
+ fHIDE(size8s_t) accum=VxV.w[i];
+ accum += fMPY16SU(fGETHALF(1, VuuV.v[0].w[i]),fGETUHALF(0,RtV));
+ accum += fMPY16SU(fGETHALF(0, VuuV.v[1].w[i]),fGETUHALF(1,RtV));
+ VxV.w[i] = fVSATW(accum))
+
+
+
+/********************************************
+* 3-WAY REDUCTION - UNSIGNED BYTE BY BYTE
+********************************************/
+
+ ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vtmpyb, "Vdd32=vtmpyb(Vuu32,Rt32)", "Vdd32.h=vtmpy(Vuu32.b,Rt32.b)",
+"Dual Vector 3x1 Reduction",
+ VddV.v[0].h[i] = fMPY8SS(fGETBYTE(0,VuuV.v[0].h[i]), fGETBYTE((2*i )%4, RtV));
+ VddV.v[0].h[i] += fMPY8SS(fGETBYTE(1,VuuV.v[0].h[i]), fGETBYTE((2*i+1)%4, RtV));
+ VddV.v[0].h[i] += fGETBYTE(0,VuuV.v[1].h[i]);
+
+ VddV.v[1].h[i] = fMPY8SS(fGETBYTE(1,VuuV.v[0].h[i]), fGETBYTE((2*i )%4, RtV));
+ VddV.v[1].h[i] += fMPY8SS(fGETBYTE(0,VuuV.v[1].h[i]), fGETBYTE((2*i+1)%4, RtV));
+ VddV.v[1].h[i] += fGETBYTE(1,VuuV.v[1].h[i]))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vtmpyb_acc, "Vxx32+=vtmpyb(Vuu32,Rt32)", "Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)",
+"Dual Vector 3x1 Reduction",
+ VxxV.v[0].h[i] += fMPY8SS(fGETBYTE(0,VuuV.v[0].h[i]), fGETBYTE((2*i )%4, RtV));
+ VxxV.v[0].h[i] += fMPY8SS(fGETBYTE(1,VuuV.v[0].h[i]), fGETBYTE((2*i+1)%4, RtV));
+ VxxV.v[0].h[i] += fGETBYTE(0,VuuV.v[1].h[i]);
+
+ VxxV.v[1].h[i] += fMPY8SS(fGETBYTE(1,VuuV.v[0].h[i]), fGETBYTE((2*i )%4, RtV));
+ VxxV.v[1].h[i] += fMPY8SS(fGETBYTE(0,VuuV.v[1].h[i]), fGETBYTE((2*i+1)%4, RtV));
+ VxxV.v[1].h[i] += fGETBYTE(1,VuuV.v[1].h[i]))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vtmpybus, "Vdd32=vtmpybus(Vuu32,Rt32)", "Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)",
+"Dual Vector 3x1 Reduction",
+ VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0,VuuV.v[0].uh[i]), fGETBYTE((2*i )%4, RtV));
+ VddV.v[0].h[i] += fMPY8US(fGETUBYTE(1,VuuV.v[0].uh[i]), fGETBYTE((2*i+1)%4, RtV));
+ VddV.v[0].h[i] += fGETUBYTE(0,VuuV.v[1].uh[i]);
+
+ VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1,VuuV.v[0].uh[i]), fGETBYTE((2*i )%4, RtV));
+ VddV.v[1].h[i] += fMPY8US(fGETUBYTE(0,VuuV.v[1].uh[i]), fGETBYTE((2*i+1)%4, RtV));
+ VddV.v[1].h[i] += fGETUBYTE(1,VuuV.v[1].uh[i]))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vtmpybus_acc, "Vxx32+=vtmpybus(Vuu32,Rt32)", "Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)",
+"Dual Vector 3x1 Reduction",
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0,VuuV.v[0].uh[i]), fGETBYTE((2*i )%4, RtV));
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(1,VuuV.v[0].uh[i]), fGETBYTE((2*i+1)%4, RtV));
+ VxxV.v[0].h[i] += fGETUBYTE(0,VuuV.v[1].uh[i]);
+
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1,VuuV.v[0].uh[i]), fGETBYTE((2*i )%4, RtV));
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(0,VuuV.v[1].uh[i]), fGETBYTE((2*i+1)%4, RtV));
+ VxxV.v[1].h[i] += fGETUBYTE(1,VuuV.v[1].uh[i]))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vtmpyhb, "Vdd32=vtmpyhb(Vuu32,Rt32)", "Vdd32.w=vtmpy(Vuu32.h,Rt32.b)",
+"Dual Vector 3x1 Reduction",
+ VddV.v[0].w[i] = fMPY16SS(fGETHALF(0,VuuV.v[0].w[i]), fSE8_16(fGETBYTE((2*i+0)%4, RtV)));
+ VddV.v[0].w[i]+= fMPY16SS(fGETHALF(1,VuuV.v[0].w[i]), fSE8_16(fGETBYTE((2*i+1)%4, RtV)));
+ VddV.v[0].w[i]+= fGETHALF(0,VuuV.v[1].w[i]);
+
+ VddV.v[1].w[i] = fMPY16SS(fGETHALF(1,VuuV.v[0].w[i]), fSE8_16(fGETBYTE((2*i+0)%4, RtV)));
+ VddV.v[1].w[i]+= fMPY16SS(fGETHALF(0,VuuV.v[1].w[i]), fSE8_16(fGETBYTE((2*i+1)%4, RtV)));
+ VddV.v[1].w[i]+= fGETHALF(1,VuuV.v[1].w[i]))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vtmpyhb_acc, "Vxx32+=vtmpyhb(Vuu32,Rt32)", "Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)",
+"Dual Vector 3x1 Reduction",
+ VxxV.v[0].w[i]+= fMPY16SS(fGETHALF(0,VuuV.v[0].w[i]), fSE8_16(fGETBYTE((2*i+0)%4, RtV)));
+ VxxV.v[0].w[i]+= fMPY16SS(fGETHALF(1,VuuV.v[0].w[i]), fSE8_16(fGETBYTE((2*i+1)%4, RtV)));
+ VxxV.v[0].w[i]+= fGETHALF(0,VuuV.v[1].w[i]);
+
+ VxxV.v[1].w[i]+= fMPY16SS(fGETHALF(1,VuuV.v[0].w[i]), fSE8_16(fGETBYTE((2*i+0)%4, RtV)));
+ VxxV.v[1].w[i]+= fMPY16SS(fGETHALF(0,VuuV.v[1].w[i]), fSE8_16(fGETBYTE((2*i+1)%4, RtV)));
+ VxxV.v[1].w[i]+= fGETHALF(1,VuuV.v[1].w[i]))
+
+
+/********************************************
+* 4-WAY REDUCTION - UNSIGNED BYTE BY UNSIGNED BYTE
+********************************************/
+
+
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpyub,"Vd32=vrmpyub(Vu32,Rt32)","Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VdV.uw[i] = fMPY8UU(fGETUBYTE(0,VuV.uw[i]), fGETUBYTE(0,RtV));
+ VdV.uw[i] += fMPY8UU(fGETUBYTE(1,VuV.uw[i]), fGETUBYTE(1,RtV));
+ VdV.uw[i] += fMPY8UU(fGETUBYTE(2,VuV.uw[i]), fGETUBYTE(2,RtV));
+ VdV.uw[i] += fMPY8UU(fGETUBYTE(3,VuV.uw[i]), fGETUBYTE(3,RtV)))
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpyub_acc,"Vx32+=vrmpyub(Vu32,Rt32)","Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients Accumulate",
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(0,VuV.uw[i]), fGETUBYTE(0,RtV));
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(1,VuV.uw[i]), fGETUBYTE(1,RtV));
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(2,VuV.uw[i]), fGETUBYTE(2,RtV));
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(3,VuV.uw[i]), fGETUBYTE(3,RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpyubv,"Vd32=vrmpyub(Vu32,Vv32)","Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VdV.uw[i] = fMPY8UU(fGETUBYTE(0,VuV.uw[i]), fGETUBYTE(0,VvV.uw[i]));
+ VdV.uw[i] += fMPY8UU(fGETUBYTE(1,VuV.uw[i]), fGETUBYTE(1,VvV.uw[i]));
+ VdV.uw[i] += fMPY8UU(fGETUBYTE(2,VuV.uw[i]), fGETUBYTE(2,VvV.uw[i]));
+ VdV.uw[i] += fMPY8UU(fGETUBYTE(3,VuV.uw[i]), fGETUBYTE(3,VvV.uw[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpyubv_acc,"Vx32+=vrmpyub(Vu32,Vv32)","Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients Accumulate",
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(0,VuV.uw[i]), fGETUBYTE(0,VvV.uw[i]));
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(1,VuV.uw[i]), fGETUBYTE(1,VvV.uw[i]));
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(2,VuV.uw[i]), fGETUBYTE(2,VvV.uw[i]));
+ VxV.uw[i] += fMPY8UU(fGETUBYTE(3,VuV.uw[i]), fGETUBYTE(3,VvV.uw[i])))
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpybv,"Vd32=vrmpyb(Vu32,Vv32)","Vd32.w=vrmpy(Vu32.b,Vv32.b)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VdV.w[i] = fMPY8SS(fGETBYTE(0, VuV.w[i]), fGETBYTE(0, VvV.w[i]));
+ VdV.w[i] += fMPY8SS(fGETBYTE(1, VuV.w[i]), fGETBYTE(1, VvV.w[i]));
+ VdV.w[i] += fMPY8SS(fGETBYTE(2, VuV.w[i]), fGETBYTE(2, VvV.w[i]));
+ VdV.w[i] += fMPY8SS(fGETBYTE(3, VuV.w[i]), fGETBYTE(3, VvV.w[i])))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpybv_acc,"Vx32+=vrmpyb(Vu32,Vv32)","Vx32.w+=vrmpy(Vu32.b,Vv32.b)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VxV.w[i] += fMPY8SS(fGETBYTE(0, VuV.w[i]), fGETBYTE(0, VvV.w[i]));
+ VxV.w[i] += fMPY8SS(fGETBYTE(1, VuV.w[i]), fGETBYTE(1, VvV.w[i]));
+ VxV.w[i] += fMPY8SS(fGETBYTE(2, VuV.w[i]), fGETBYTE(2, VvV.w[i]));
+ VxV.w[i] += fMPY8SS(fGETBYTE(3, VuV.w[i]), fGETBYTE(3, VvV.w[i])))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpyubi,"Vdd32=vrmpyub(Vuu32,Rt32,#u1)","Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)",
+"Dual Vector Unsigned Byte By Signed Byte 4-way Reduction to Word",
+ VddV.v[0].uw[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[uiV ? 1:0].uw[i]),fGETUBYTE((0-uiV) & 0x3,RtV));
+ VddV.v[0].uw[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[0 ].uw[i]),fGETUBYTE((1-uiV) & 0x3,RtV));
+ VddV.v[0].uw[i] += fMPY8UU(fGETUBYTE(2, VuuV.v[0 ].uw[i]),fGETUBYTE((2-uiV) & 0x3,RtV));
+ VddV.v[0].uw[i] += fMPY8UU(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETUBYTE((3-uiV) & 0x3,RtV));
+
+ VddV.v[1].uw[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[1 ].uw[i]),fGETUBYTE((2-uiV) & 0x3,RtV));
+ VddV.v[1].uw[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[1 ].uw[i]),fGETUBYTE((3-uiV) & 0x3,RtV));
+ VddV.v[1].uw[i] += fMPY8UU(fGETUBYTE(2, VuuV.v[uiV ? 1:0].uw[i]),fGETUBYTE((0-uiV) & 0x3,RtV));
+ VddV.v[1].uw[i] += fMPY8UU(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETUBYTE((1-uiV) & 0x3,RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpyubi_acc,"Vxx32+=vrmpyub(Vuu32,Rt32,#u1)","Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)",
+"Dual Vector Unsigned Byte By Signed Byte 4-way Reduction with accumulate and saturation to Word",
+ VxxV.v[0].uw[i] += fMPY8UU(fGETUBYTE(0, VuuV.v[uiV ? 1:0].uw[i]),fGETUBYTE((0-uiV) & 0x3,RtV));
+ VxxV.v[0].uw[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[0 ].uw[i]),fGETUBYTE((1-uiV) & 0x3,RtV));
+ VxxV.v[0].uw[i] += fMPY8UU(fGETUBYTE(2, VuuV.v[0 ].uw[i]),fGETUBYTE((2-uiV) & 0x3,RtV));
+ VxxV.v[0].uw[i] += fMPY8UU(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETUBYTE((3-uiV) & 0x3,RtV));
+
+ VxxV.v[1].uw[i] += fMPY8UU(fGETUBYTE(0, VuuV.v[1 ].uw[i]),fGETUBYTE((2-uiV) & 0x3,RtV));
+ VxxV.v[1].uw[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[1 ].uw[i]),fGETUBYTE((3-uiV) & 0x3,RtV));
+ VxxV.v[1].uw[i] += fMPY8UU(fGETUBYTE(2, VuuV.v[uiV ? 1:0].uw[i]),fGETUBYTE((0-uiV) & 0x3,RtV));
+ VxxV.v[1].uw[i] += fMPY8UU(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETUBYTE((1-uiV) & 0x3,RtV)))
+
+
+
+
+/********************************************
+* 4-WAY REDUCTION - UNSIGNED BYTE BY BYTE
+********************************************/
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpybus,"Vd32=vrmpybus(Vu32,Rt32)","Vd32.w=vrmpy(Vu32.ub,Rt32.b)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VdV.w[i] = fMPY8US(fGETUBYTE(0,VuV.uw[i]), fGETBYTE(0,RtV));
+ VdV.w[i] += fMPY8US(fGETUBYTE(1,VuV.uw[i]), fGETBYTE(1,RtV));
+ VdV.w[i] += fMPY8US(fGETUBYTE(2,VuV.uw[i]), fGETBYTE(2,RtV));
+ VdV.w[i] += fMPY8US(fGETUBYTE(3,VuV.uw[i]), fGETBYTE(3,RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpybus_acc,"Vx32+=vrmpybus(Vu32,Rt32)","Vx32.w+=vrmpy(Vu32.ub,Rt32.b)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VxV.w[i] += fMPY8US(fGETUBYTE(0,VuV.uw[i]), fGETBYTE(0,RtV));
+ VxV.w[i] += fMPY8US(fGETUBYTE(1,VuV.uw[i]), fGETBYTE(1,RtV));
+ VxV.w[i] += fMPY8US(fGETUBYTE(2,VuV.uw[i]), fGETBYTE(2,RtV));
+ VxV.w[i] += fMPY8US(fGETUBYTE(3,VuV.uw[i]), fGETBYTE(3,RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpybusi,"Vdd32=vrmpybus(Vuu32,Rt32,#u1)","Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)",
+"Dual Vector Unsigned Byte By Signed Byte 4-way Reduction to Word",
+ VddV.v[0].w[i] = fMPY8US(fGETUBYTE(0, VuuV.v[uiV ? 1:0].uw[i]),fGETBYTE((0-uiV) & 0x3,RtV));
+ VddV.v[0].w[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0 ].uw[i]),fGETBYTE((1-uiV) & 0x3,RtV));
+ VddV.v[0].w[i] += fMPY8US(fGETUBYTE(2, VuuV.v[0 ].uw[i]),fGETBYTE((2-uiV) & 0x3,RtV));
+ VddV.v[0].w[i] += fMPY8US(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETBYTE((3-uiV) & 0x3,RtV));
+
+ VddV.v[1].w[i] = fMPY8US(fGETUBYTE(0, VuuV.v[1 ].uw[i]),fGETBYTE((2-uiV) & 0x3,RtV));
+ VddV.v[1].w[i] += fMPY8US(fGETUBYTE(1, VuuV.v[1 ].uw[i]),fGETBYTE((3-uiV) & 0x3,RtV));
+ VddV.v[1].w[i] += fMPY8US(fGETUBYTE(2, VuuV.v[uiV ? 1:0].uw[i]),fGETBYTE((0-uiV) & 0x3,RtV));
+ VddV.v[1].w[i] += fMPY8US(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETBYTE((1-uiV) & 0x3,RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpybusi_acc,"Vxx32+=vrmpybus(Vuu32,Rt32,#u1)","Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)",
+"Dual Vector Unsigned Byte By Signed Byte 4-way Reduction with accumulate and saturation to Word",
+ VxxV.v[0].w[i] += fMPY8US(fGETUBYTE(0, VuuV.v[uiV ? 1:0].uw[i]),fGETBYTE((0-uiV) & 0x3,RtV));
+ VxxV.v[0].w[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0 ].uw[i]),fGETBYTE((1-uiV) & 0x3,RtV));
+ VxxV.v[0].w[i] += fMPY8US(fGETUBYTE(2, VuuV.v[0 ].uw[i]),fGETBYTE((2-uiV) & 0x3,RtV));
+ VxxV.v[0].w[i] += fMPY8US(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETBYTE((3-uiV) & 0x3,RtV));
+
+ VxxV.v[1].w[i] += fMPY8US(fGETUBYTE(0, VuuV.v[1 ].uw[i]),fGETBYTE((2-uiV) & 0x3,RtV));
+ VxxV.v[1].w[i] += fMPY8US(fGETUBYTE(1, VuuV.v[1 ].uw[i]),fGETBYTE((3-uiV) & 0x3,RtV));
+ VxxV.v[1].w[i] += fMPY8US(fGETUBYTE(2, VuuV.v[uiV ? 1:0].uw[i]),fGETBYTE((0-uiV) & 0x3,RtV));
+ VxxV.v[1].w[i] += fMPY8US(fGETUBYTE(3, VuuV.v[0 ].uw[i]),fGETBYTE((1-uiV) & 0x3,RtV)))
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT(32,vrmpybusv,"Vd32=vrmpybus(Vu32,Vv32)","Vd32.w=vrmpy(Vu32.ub,Vv32.b)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VdV.w[i] = fMPY8US(fGETUBYTE(0,VuV.uw[i]), fGETBYTE(0,VvV.w[i]));
+ VdV.w[i] += fMPY8US(fGETUBYTE(1,VuV.uw[i]), fGETBYTE(1,VvV.w[i]));
+ VdV.w[i] += fMPY8US(fGETUBYTE(2,VuV.uw[i]), fGETBYTE(2,VvV.w[i]));
+ VdV.w[i] += fMPY8US(fGETUBYTE(3,VuV.uw[i]), fGETBYTE(3,VvV.w[i])))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrmpybusv_acc,"Vx32+=vrmpybus(Vu32,Vv32)","Vx32.w+=vrmpy(Vu32.ub,Vv32.b)",
+"Vector Multiply-Accumulate Reduce with 4 byte coefficients",
+ VxV.w[i] += fMPY8US(fGETUBYTE(0,VuV.uw[i]), fGETBYTE(0,VvV.w[i]));
+ VxV.w[i] += fMPY8US(fGETUBYTE(1,VuV.uw[i]), fGETBYTE(1,VvV.w[i]));
+ VxV.w[i] += fMPY8US(fGETUBYTE(2,VuV.uw[i]), fGETBYTE(2,VvV.w[i]));
+ VxV.w[i] += fMPY8US(fGETUBYTE(3,VuV.uw[i]), fGETBYTE(3,VvV.w[i])))
+
+
+
+
+
+
+
+
+
+
+
+/********************************************
+* 2-WAY REDUCTION - SAD
+********************************************/
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdsaduh,"Vdd32=vdsaduh(Vuu32,Rt32)","Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)",
+"Dual Vector Halfword by Byte 4-Way Reduction to Word",
+ VddV.v[0].uw[i] = fABS(fGETUHALF(0, VuuV.v[0].uw[i]) - fGETUHALF(0,RtV));
+ VddV.v[0].uw[i] += fABS(fGETUHALF(1, VuuV.v[0].uw[i]) - fGETUHALF(1,RtV));
+ VddV.v[1].uw[i] = fABS(fGETUHALF(1, VuuV.v[0].uw[i]) - fGETUHALF(0,RtV));
+ VddV.v[1].uw[i] += fABS(fGETUHALF(0, VuuV.v[1].uw[i]) - fGETUHALF(1,RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vdsaduh_acc,"Vxx32+=vdsaduh(Vuu32,Rt32)","Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)",
+"Dual Vector Halfword by Byte 4-Way Reduction to Word",
+ VxxV.v[0].uw[i] += fABS(fGETUHALF(0, VuuV.v[0].uw[i]) - fGETUHALF(0,RtV));
+ VxxV.v[0].uw[i] += fABS(fGETUHALF(1, VuuV.v[0].uw[i]) - fGETUHALF(1,RtV));
+ VxxV.v[1].uw[i] += fABS(fGETUHALF(1, VuuV.v[0].uw[i]) - fGETUHALF(0,RtV));
+ VxxV.v[1].uw[i] += fABS(fGETUHALF(0, VuuV.v[1].uw[i]) - fGETUHALF(1,RtV)))
+
+
+
+
+/********************************************
+* 4-WAY REDUCTION - SAD
+********************************************/
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrsadubi,"Vdd32=vrsadub(Vuu32,Rt32,#u1)","Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)",
+"Dual Vector Halfword by Byte 4-Way Reduction to Word",
+ VddV.v[0].uw[i] = fABS(fZE8_16(fGETUBYTE(0, VuuV.v[uiV?1:0].uw[i])) - fZE8_16(fGETUBYTE((0-uiV)&3,RtV)));
+ VddV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(1, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((1-uiV)&3,RtV)));
+ VddV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(2, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((2-uiV)&3,RtV)));
+ VddV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(3, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((3-uiV)&3,RtV)));
+
+ VddV.v[1].uw[i] = fABS(fZE8_16(fGETUBYTE(0, VuuV.v[1 ].uw[i])) - fZE8_16(fGETUBYTE((2-uiV)&3,RtV)));
+ VddV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(1, VuuV.v[1 ].uw[i])) - fZE8_16(fGETUBYTE((3-uiV)&3,RtV)));
+ VddV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(2, VuuV.v[uiV?1:0].uw[i])) - fZE8_16(fGETUBYTE((0-uiV)&3,RtV)));
+ VddV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(3, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((1-uiV)&3,RtV))))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vrsadubi_acc,"Vxx32+=vrsadub(Vuu32,Rt32,#u1)","Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)",
+"Dual Vector Halfword by Byte 4-Way Reduction to Word",
+ VxxV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(0, VuuV.v[uiV?1:0].uw[i])) - fZE8_16(fGETUBYTE((0-uiV)&3,RtV)));
+ VxxV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(1, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((1-uiV)&3,RtV)));
+ VxxV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(2, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((2-uiV)&3,RtV)));
+ VxxV.v[0].uw[i] += fABS(fZE8_16(fGETUBYTE(3, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((3-uiV)&3,RtV)));
+
+ VxxV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(0, VuuV.v[1 ].uw[i])) - fZE8_16(fGETUBYTE((2-uiV)&3,RtV)));
+ VxxV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(1, VuuV.v[1 ].uw[i])) - fZE8_16(fGETUBYTE((3-uiV)&3,RtV)));
+ VxxV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(2, VuuV.v[uiV?1:0].uw[i])) - fZE8_16(fGETUBYTE((0-uiV)&3,RtV)));
+ VxxV.v[1].uw[i] += fABS(fZE8_16(fGETUBYTE(3, VuuV.v[0 ].uw[i])) - fZE8_16(fGETUBYTE((1-uiV)&3,RtV))))
+
+
+
+
+
+
+
+
+
+
+/*********************************************************************
+ * MMVECTOR SHIFTING
+ * ******************************************************************/
+// Macro to shift arithmetically left/right and by either RT or Vv
+
+#define V_SHIFT(TYPE, DESC, SIZE, LOGSIZE, CASTTYPE) \
+ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasr##TYPE, "Vd32=vasr" #TYPE "(Vu32,Rt32)","Vd32."#TYPE"=vasr(Vu32."#TYPE",Rt32)", "Vector arithmetic shift right " DESC, VdV.TYPE[i] = (VuV.TYPE[i] >> (RtV & (SIZE-1)))) \
+ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasl##TYPE, "Vd32=vasl" #TYPE "(Vu32,Rt32)","Vd32."#TYPE"=vasl(Vu32."#TYPE",Rt32)", "Vector arithmetic shift left " DESC, VdV.TYPE[i] = (VuV.TYPE[i] << (RtV & (SIZE-1)))) \
+ITERATOR_INSN2_SHIFT_SLOT(SIZE,vlsr##TYPE, "Vd32=vlsr" #TYPE "(Vu32,Rt32)","Vd32.u"#TYPE"=vlsr(Vu32.u"#TYPE",Rt32)", "Vector logical shift right " DESC, VdV.u##TYPE[i] = (VuV.u##TYPE[i] >> (RtV & (SIZE-1)))) \
+ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasr##TYPE##v,"Vd32=vasr" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vasr(Vu32."#TYPE",Vv32."#TYPE")", "Vector arithmetic shift right " DESC, VdV.TYPE[i] = fBIDIR_ASHIFTR(VuV.TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \
+ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasl##TYPE##v,"Vd32=vasl" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vasl(Vu32."#TYPE",Vv32."#TYPE")", "Vector arithmetic shift left " DESC, VdV.TYPE[i] = fBIDIR_ASHIFTL(VuV.TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \
+ITERATOR_INSN2_SHIFT_SLOT(SIZE,vlsr##TYPE##v,"Vd32=vlsr" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vlsr(Vu32."#TYPE",Vv32."#TYPE")", "Vector logical shift right " DESC, VdV.u##TYPE[i] = fBIDIR_LSHIFTR(VuV.u##TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \
+
+V_SHIFT(w, "word", 32,5,4_4)
+V_SHIFT(h, "halfword", 16,4,2_2)
+
+ITERATOR_INSN_SHIFT_SLOT(8,vlsrb,"Vd32.ub=vlsr(Vu32.ub,Rt32)","vec log shift right bytes", VdV.b[i] = VuV.ub[i] >> (RtV & 0x7))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vrotr,"Vd32=vrotr(Vu32,Vv32)","Vd32.uw=vrotr(Vu32.uw,Vv32.uw)","Vector word rotate right", VdV.uw[i] = ((VuV.uw[i] >> (VvV.uw[i] & 0x1f)) | (VuV.uw[i] << (32 - (VvV.uw[i] & 0x1f)))))
+
+/*********************************************************************
+ * MMVECTOR SHIFT AND PERMUTE
+ * ******************************************************************/
+
+ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(32,vasr_into,"Vxx32=vasrinto(Vu32,Vv32)","Vxx32.w=vasrinto(Vu32.w,Vv32.w)","ASR vector 1 elements and overlay dropping bits to MSB of vector 2 elements",
+ fHIDE(int64_t ) shift = (fSE32_64(VuV.w[i]) << 32);
+ fHIDE(int64_t ) mask = (((fSE32_64(VxxV.v[0].w[i])) << 32) | fZE32_64(VxxV.v[0].w[i]));
+ fHIDE(int64_t) lomask = (((fSE32_64(1)) << 32) - 1);
+ fHIDE(int ) count = -(0x40 & VvV.w[i]) + (VvV.w[i] & 0x3f);
+ fHIDE(int64_t ) result = (count == -0x40) ? 0 : (((count < 0) ? ((shift << -(count)) | (mask & (lomask << -(count)))) : ((shift >> count) | (mask & (lomask >> count)))));
+ VxxV.v[1].w[i] = ((result >> 32) & 0xffffffff);
+ VxxV.v[0].w[i] = (result & 0xffffffff))
+
+#define NEW_NARROWING_SHIFT 1
+
+#if NEW_NARROWING_SHIFT
+#define NARROWING_SHIFT(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \
+ITERATOR_INSN_SHIFT_SLOT(ITERSIZE,TAG, \
+"Vd32." #DSTTYPE "=vasr(Vu32." #SRCTYPE ",Vv32." #SRCTYPE ",Rt8)" #SYNOPTS, \
+"Vector shift right and shuffle", \
+ fHIDE(int )shamt = RtV & SHAMTMASK; \
+ DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VvV.SRCTYPE[i],shamt) >> shamt)); \
+ DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuV.SRCTYPE[i],shamt) >> shamt)))
+
+
+
+
+
+/* WORD TO HALF*/
+
+NARROWING_SHIFT(32,vasrwh,fSETHALF,h,w,,fECHO,fVNOROUND,0xF)
+NARROWING_SHIFT(32,vasrwhsat,fSETHALF,h,w,:sat,fVSATH,fVNOROUND,0xF)
+NARROWING_SHIFT(32,vasrwhrndsat,fSETHALF,h,w,:rnd:sat,fVSATH,fVROUND,0xF)
+NARROWING_SHIFT(32,vasrwuhrndsat,fSETHALF,uh,w,:rnd:sat,fVSATUH,fVROUND,0xF)
+NARROWING_SHIFT(32,vasrwuhsat,fSETHALF,uh,w,:sat,fVSATUH,fVNOROUND,0xF)
+NARROWING_SHIFT(32,vasruwuhrndsat,fSETHALF,uh,uw,:rnd:sat,fVSATUH,fVROUND,0xF)
+
+NARROWING_SHIFT_NOV1(32,vasruwuhsat,fSETHALF,uh,uw,:sat,fVSATUH,fVNOROUND,0xF)
+NARROWING_SHIFT(16,vasrhubsat,fSETBYTE,ub,h,:sat,fVSATUB,fVNOROUND,0x7)
+NARROWING_SHIFT(16,vasrhubrndsat,fSETBYTE,ub,h,:rnd:sat,fVSATUB,fVROUND,0x7)
+NARROWING_SHIFT(16,vasrhbsat,fSETBYTE,b,h,:sat,fVSATB,fVNOROUND,0x7)
+NARROWING_SHIFT(16,vasrhbrndsat,fSETBYTE,b,h,:rnd:sat,fVSATB,fVROUND,0x7)
+
+NARROWING_SHIFT_NOV1(16,vasruhubsat,fSETBYTE,ub,uh,:sat,fVSATUB,fVNOROUND,0x7)
+NARROWING_SHIFT_NOV1(16,vasruhubrndsat,fSETBYTE,ub,uh,:rnd:sat,fVSATUB,fVROUND,0x7)
+
+#else
+ITERATOR_INSN2_SHIFT_SLOT(32,vasrwh,"Vd32=vasrwh(Vu32,Vv32,Rt8)","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)",
+"Vector arithmetic shift right words, shuffle even halfwords",
+ fSETHALF(0,VdV.w[i], (VvV.w[i] >> (RtV & 0xF)));
+ fSETHALF(1,VdV.w[i], (VuV.w[i] >> (RtV & 0xF))))
+
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vasrwhsat,"Vd32=vasrwh(Vu32,Vv32,Rt8):sat","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat",
+"Vector arithmetic shift right words, shuffle even halfwords",
+ fSETHALF(0,VdV.w[i], fVSATH(VvV.w[i] >> (RtV & 0xF)));
+ fSETHALF(1,VdV.w[i], fVSATH(VuV.w[i] >> (RtV & 0xF))))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vasrwhrndsat,"Vd32=vasrwh(Vu32,Vv32,Rt8):rnd:sat","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat",
+"Vector arithmetic shift right words, shuffle even halfwords",
+ fHIDE(int ) shamt = RtV & 0xF;
+ fSETHALF(0,VdV.w[i], fVSATH( (VvV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt));
+ fSETHALF(1,VdV.w[i], fVSATH( (VuV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vasrwuhrndsat,"Vd32=vasrwuh(Vu32,Vv32,Rt8):rnd:sat","Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat",
+"Vector arithmetic shift right words, shuffle even halfwords",
+ fHIDE(int ) shamt = RtV & 0xF;
+ fSETHALF(0,VdV.w[i], fVSATUH( (VvV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt));
+ fSETHALF(1,VdV.w[i], fVSATUH( (VuV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vasrwuhsat,"Vd32=vasrwuh(Vu32,Vv32,Rt8):sat","Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat",
+"Vector arithmetic shift right words, shuffle even halfwords",
+ fSETHALF(0, VdV.uw[i], fVSATUH(VvV.w[i] >> (RtV & 0xF)));
+ fSETHALF(1, VdV.uw[i], fVSATUH(VuV.w[i] >> (RtV & 0xF))))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vasruwuhrndsat,"Vd32=vasruwuh(Vu32,Vv32,Rt8):rnd:sat","Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat",
+"Vector arithmetic shift right words, shuffle even halfwords",
+ fHIDE(int ) shamt = RtV & 0xF;
+ fSETHALF(0,VdV.w[i], fVSATUH( (VvV.uw[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt));
+ fSETHALF(1,VdV.w[i], fVSATUH( (VuV.uw[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)))
+#endif
+
+
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vroundwh,"Vd32=vroundwh(Vu32,Vv32):sat","Vd32.h=vround(Vu32.w,Vv32.w):sat",
+"Vector round words to halves, shuffle resultant halfwords",
+ fSETHALF(0, VdV.uw[i], fVSATH((VvV.w[i] + fCONSTLL(0x8000)) >> 16));
+ fSETHALF(1, VdV.uw[i], fVSATH((VuV.w[i] + fCONSTLL(0x8000)) >> 16)))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vroundwuh,"Vd32=vroundwuh(Vu32,Vv32):sat","Vd32.uh=vround(Vu32.w,Vv32.w):sat",
+"Vector round words to halves, shuffle resultant halfwords",
+ fSETHALF(0, VdV.uw[i], fVSATUH((VvV.w[i] + fCONSTLL(0x8000)) >> 16));
+ fSETHALF(1, VdV.uw[i], fVSATUH((VuV.w[i] + fCONSTLL(0x8000)) >> 16)))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vrounduwuh,"Vd32=vrounduwuh(Vu32,Vv32):sat","Vd32.uh=vround(Vu32.uw,Vv32.uw):sat",
+"Vector round words to halves, shuffle resultant halfwords",
+ fSETHALF(0, VdV.uw[i], fVSATUH((VvV.uw[i] + fCONSTLL(0x8000)) >> 16));
+ fSETHALF(1, VdV.uw[i], fVSATUH((VuV.uw[i] + fCONSTLL(0x8000)) >> 16)))
+
+
+
+
+
+/* HALF TO BYTE*/
+
+ITERATOR_INSN2_SHIFT_SLOT(16,vroundhb,"Vd32=vroundhb(Vu32,Vv32):sat","Vd32.b=vround(Vu32.h,Vv32.h):sat",
+"Vector round words to halves, shuffle resultant halfwords",
+ fSETBYTE(0, VdV.uh[i], fVSATB((VvV.h[i] + 0x80) >> 8));
+ fSETBYTE(1, VdV.uh[i], fVSATB((VuV.h[i] + 0x80) >> 8)))
+
+ITERATOR_INSN2_SHIFT_SLOT(16,vroundhub,"Vd32=vroundhub(Vu32,Vv32):sat","Vd32.ub=vround(Vu32.h,Vv32.h):sat",
+"Vector round words to halves, shuffle resultant halfwords",
+ fSETBYTE(0, VdV.uh[i], fVSATUB((VvV.h[i] + 0x80) >> 8));
+ fSETBYTE(1, VdV.uh[i], fVSATUB((VuV.h[i] + 0x80) >> 8)))
+
+ITERATOR_INSN2_SHIFT_SLOT(16,vrounduhub,"Vd32=vrounduhub(Vu32,Vv32):sat","Vd32.ub=vround(Vu32.uh,Vv32.uh):sat",
+"Vector round words to halves, shuffle resultant halfwords",
+ fSETBYTE(0, VdV.uh[i], fVSATUB((VvV.uh[i] + 0x80) >> 8));
+ fSETBYTE(1, VdV.uh[i], fVSATUB((VuV.uh[i] + 0x80) >> 8)))
+
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vaslw_acc,"Vx32+=vaslw(Vu32,Rt32)","Vx32.w+=vasl(Vu32.w,Rt32)",
+"Vector shift add word",
+ VxV.w[i] += (VuV.w[i] << (RtV & (32-1))))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vasrw_acc,"Vx32+=vasrw(Vu32,Rt32)","Vx32.w+=vasr(Vu32.w,Rt32)",
+"Vector shift add word",
+ VxV.w[i] += (VuV.w[i] >> (RtV & (32-1))))
+
+ITERATOR_INSN2_SHIFT_SLOT_NOV1(16,vaslh_acc,"Vx32+=vaslh(Vu32,Rt32)","Vx32.h+=vasl(Vu32.h,Rt32)",
+"Vector shift add halfword",
+ VxV.h[i] += (VuV.h[i] << (RtV & (16-1))))
+
+ITERATOR_INSN2_SHIFT_SLOT_NOV1(16,vasrh_acc,"Vx32+=vasrh(Vu32,Rt32)","Vx32.h+=vasr(Vu32.h,Rt32)",
+"Vector shift add halfword",
+ VxV.h[i] += (VuV.h[i] >> (RtV & (16-1))))
+
+/**************************************************************************
+*
+* MMVECTOR ELEMENT-WISE ARITHMETIC
+*
+**************************************************************************/
+
+/**************************************************************************
+* MACROS GO IN MACROS.DEF NOT HERE!!!
+**************************************************************************/
+
+
+#define MMVEC_ABSDIFF(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\
+ITERATOR_INSN2_MPY_SLOT(WIDTH, vabsdiff##TYPE, "Vd32=vabsdiff"TYPE2"(Vu32,Vv32)" ,"Vd32."#DEST"=vabsdiff(Vu32."#SRC",Vv32."#SRC")" , "Vector Absolute of Difference "DESCR, VdV.DEST[i] = (VuV.SRC[i] > VvV.SRC[i]) ? (VuV.SRC[i] - VvV.SRC[i]) : (VvV.SRC[i] - VuV.SRC[i]))
+
+#define MMVEC_ADDU_SAT(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE##sat, "Vd32=vadd"TYPE2"(Vu32,Vv32):sat" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVUADDSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##sat_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVUADDSAT(WIDTH, VuuV.v[0].SRC[i],VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVUADDSAT(WIDTH, VuuV.v[1].SRC[i],VvvV.v[1].SRC[i]))\
+ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE##sat, "Vd32=vsub"TYPE2"(Vu32,Vv32):sat", "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVUSUBSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##sat_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVUSUBSAT(WIDTH, VuuV.v[0].SRC[i],VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVUSUBSAT(WIDTH, VuuV.v[1].SRC[i],VvvV.v[1].SRC[i]))\
+
+#define MMVEC_ADDS_SAT(TYPE,TYPE2,DESCR, WIDTH,DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE##sat, "Vd32=vadd"TYPE2"(Vu32,Vv32):sat" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVSADDSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##sat_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVSADDSAT(WIDTH, VuuV.v[0].SRC[i], VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVSADDSAT(WIDTH, VuuV.v[1].SRC[i], VvvV.v[1].SRC[i]))\
+ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE##sat, "Vd32=vsub"TYPE2"(Vu32,Vv32):sat", "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVSSUBSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##sat_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVSSUBSAT(WIDTH, VuuV.v[0].SRC[i], VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVSSUBSAT(WIDTH, VuuV.v[1].SRC[i], VvvV.v[1].SRC[i]))\
+
+#define MMVEC_AVGU(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGU( WIDTH, VuV.SRC[i], VvV.SRC[i])) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGURND(WIDTH, VuV.SRC[i], VvV.SRC[i]))
+
+
+
+#define MMVEC_AVGS(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGSRND( WIDTH, VuV.SRC[i], VvV.SRC[i])) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vnavg##TYPE, "Vd32=vnavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vnavg(Vu32."#SRC",Vv32."#SRC")", "Vector Negative Average "DESCR, VdV.DEST[i] = fVNAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i]))
+
+
+
+
+
+
+
+#define MMVEC_ADDWRAP(TYPE,TYPE2, DESCR, WIDTH , DEST,SRC)\
+ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE, "Vd32=vadd"TYPE2"(Vu32,Vv32)" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC")", "Vector Add "DESCR, VdV.DEST[i] = VuV.SRC[i] + VvV.SRC[i])\
+ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE, "Vd32=vsub"TYPE2"(Vu32,Vv32)" , "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC")", "Vector Sub "DESCR, VdV.DEST[i] = VuV.SRC[i] - VvV.SRC[i])\
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32)" , "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC")", "Double Vector Add "DESCR, VddV.v[0].DEST[i] = VuuV.v[0].SRC[i] + VvvV.v[0].SRC[i]; VddV.v[1].DEST[i] = VuuV.v[1].SRC[i] + VvvV.v[1].SRC[i])\
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32)" , "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC")", "Double Vector Sub "DESCR, VddV.v[0].DEST[i] = VuuV.v[0].SRC[i] - VvvV.v[0].SRC[i]; VddV.v[1].DEST[i] = VuuV.v[1].SRC[i] - VvvV.v[1].SRC[i]) \
+
+
+
+
+
+/* Wrapping Adds */
+MMVEC_ADDWRAP(b, "b", "Byte", 8, b, b)
+MMVEC_ADDWRAP(h, "h", "Halfword", 16, h, h)
+MMVEC_ADDWRAP(w, "w", "Word", 32, w, w)
+
+/* Saturating Adds */
+MMVEC_ADDU_SAT(ub, "ub", "Unsigned Byte", 8, ub, ub)
+MMVEC_ADDU_SAT(uh, "uh", "Unsigned Halfword", 16, uh, uh)
+MMVEC_ADDU_SAT(uw, "uw", "Unsigned word", 32, uw, uw)
+MMVEC_ADDS_SAT(b, "b", "byte", 8, b, b)
+MMVEC_ADDS_SAT(h, "h", "Halfword", 16, h, h)
+MMVEC_ADDS_SAT(w, "w", "Word", 32, w, w)
+
+
+/* Averaging Instructions */
+MMVEC_AVGU(ub,"ub", "Unsigned Byte", 8, ub, ub)
+MMVEC_AVGU(uh,"uh", "Unsigned Halfword", 16, uh, uh)
+MMVEC_AVGU_NOV1(uw,"uw", "Unsigned Word", 32, uw, uw)
+MMVEC_AVGS_NOV1(b, "b", "Byte", 8, b, b)
+MMVEC_AVGS(h, "h", "Halfword", 16, h, h)
+MMVEC_AVGS(w, "w", "Word", 32, w, w)
+
+
+/* Absolute Difference */
+MMVEC_ABSDIFF(ub,"ub", "Unsigned Byte", 8, ub, ub)
+MMVEC_ABSDIFF(uh,"uh", "Unsigned Halfword", 16, uh, uh)
+MMVEC_ABSDIFF(h,"h", "Halfword", 16, uh, h)
+MMVEC_ABSDIFF(w,"w", "Word", 32, uw, w)
+
+ITERATOR_INSN2_ANY_SLOT(8,vnavgub, "Vd32=vnavgub(Vu32,Vv32)", "Vd32.b=vnavg(Vu32.ub,Vv32.ub)",
+"Vector Negative Average Unsigned Byte", VdV.b[i] = fVNAVGU(8, VuV.ub[i], VvV.ub[i]))
+
+ITERATOR_INSN_ANY_SLOT(32,vaddcarrysat,"Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat","add w/carry and saturate",
+VdV.w[i] = fVSATW(VuV.w[i]+VvV.w[i]+fGETQBIT(QsV,i*4)))
+
+ITERATOR_INSN_ANY_SLOT(32,vaddcarry,"Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry","add w/carry",
+VdV.w[i] = VuV.w[i]+VvV.w[i]+fGETQBIT(QxV,i*4);
+fSETQBITS(QxV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],VvV.w[i],fGETQBIT(QxV,i*4))))
+
+ITERATOR_INSN_ANY_SLOT(32,vsubcarry,"Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry","add w/carry",
+VdV.w[i] = VuV.w[i]+~VvV.w[i]+fGETQBIT(QxV,i*4);
+fSETQBITS(QxV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],~VvV.w[i],fGETQBIT(QxV,i*4))))
+
+ITERATOR_INSN_ANY_SLOT(32,vaddcarryo,"Vd32.w,Qe4=vadd(Vu32.w,Vv32.w):carry","add w/carry out-only",
+VdV.w[i] = VuV.w[i]+VvV.w[i];
+fSETQBITS(QeV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],VvV.w[i],0)))
+
+ITERATOR_INSN_ANY_SLOT(32,vsubcarryo,"Vd32.w,Qe4=vsub(Vu32.w,Vv32.w):carry","subtract w/carry out-only",
+VdV.w[i] = VuV.w[i]+~VvV.w[i]+1;
+fSETQBITS(QeV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],~VvV.w[i],1)))
+
+
+ITERATOR_INSN_ANY_SLOT(32,vsatdw,"Vd32.w=vsatdw(Vu32.w,Vv32.w)","Saturate from 64-bits (higher 32-bits come from first vector) to 32-bits",VdV.w[i] = fVSATDW(VuV.w[i],VvV.w[i]))
+
+
+#define MMVEC_ADDSAT_MIX(TAGEND,SATF,WIDTH,DEST,SRC1,SRC2)\
+ITERATOR_INSN_ANY_SLOT(WIDTH, vadd##TAGEND,"Vd32."#DEST"=vadd(Vu32."#SRC1",Vv32."#SRC2"):sat", "Vector Add mixed", VdV.DEST[i] = SATF(VuV.SRC1[i] + VvV.SRC2[i]))\
+ITERATOR_INSN_ANY_SLOT(WIDTH, vsub##TAGEND,"Vd32."#DEST"=vsub(Vu32."#SRC1",Vv32."#SRC2"):sat", "Vector Sub mixed", VdV.DEST[i] = SATF(VuV.SRC1[i] - VvV.SRC2[i]))\
+
+MMVEC_ADDSAT_MIX(ububb_sat,fVSATUB,8,ub,ub,b)
+
+/****************************
+* WIDENING
+****************************/
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vaddubh,"Vdd32=vaddub(Vu32,Vv32)","Vdd32.h=vadd(Vu32.ub,Vv32.ub)",
+"Vector addition with widen into two vectors",
+ VddV.v[0].h[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i])) + fZE8_16(fGETUBYTE(0, VvV.uh[i]));
+ VddV.v[1].h[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])) + fZE8_16(fGETUBYTE(1, VvV.uh[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vsububh,"Vdd32=vsubub(Vu32,Vv32)","Vdd32.h=vsub(Vu32.ub,Vv32.ub)",
+"Vector subtraction with widen into two vectors",
+ VddV.v[0].h[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i])) - fZE8_16(fGETUBYTE(0, VvV.uh[i]));
+ VddV.v[1].h[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])) - fZE8_16(fGETUBYTE(1, VvV.uh[i])))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vaddhw,"Vdd32=vaddh(Vu32,Vv32)","Vdd32.w=vadd(Vu32.h,Vv32.h)",
+"Vector addition with widen into two vectors",
+ VddV.v[0].w[i] = fGETHALF(0, VuV.w[i]) + fGETHALF(0, VvV.w[i]);
+ VddV.v[1].w[i] = fGETHALF(1, VuV.w[i]) + fGETHALF(1, VvV.w[i]))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vsubhw,"Vdd32=vsubh(Vu32,Vv32)","Vdd32.w=vsub(Vu32.h,Vv32.h)",
+"Vector subtraction with widen into two vectors",
+ VddV.v[0].w[i] = fGETHALF(0, VuV.w[i]) - fGETHALF(0, VvV.w[i]);
+ VddV.v[1].w[i] = fGETHALF(1, VuV.w[i]) - fGETHALF(1, VvV.w[i]))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vadduhw,"Vdd32=vadduh(Vu32,Vv32)","Vdd32.w=vadd(Vu32.uh,Vv32.uh)",
+"Vector addition with widen into two vectors",
+ VddV.v[0].w[i] = fZE16_32(fGETUHALF(0, VuV.uw[i])) + fZE16_32(fGETUHALF(0, VvV.uw[i]));
+ VddV.v[1].w[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])) + fZE16_32(fGETUHALF(1, VvV.uw[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vsubuhw,"Vdd32=vsubuh(Vu32,Vv32)","Vdd32.w=vsub(Vu32.uh,Vv32.uh)",
+"Vector subtraction with widen into two vectors",
+ VddV.v[0].w[i] = fZE16_32(fGETUHALF(0, VuV.uw[i])) - fZE16_32(fGETUHALF(0, VvV.uw[i]));
+ VddV.v[1].w[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])) - fZE16_32(fGETUHALF(1, VvV.uw[i])))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vaddhw_acc,"Vxx32+=vaddh(Vu32,Vv32)","Vxx32.w+=vadd(Vu32.h,Vv32.h)",
+"Vector addition with widen into two vectors",
+ VxxV.v[0].w[i] += fGETHALF(0, VuV.w[i]) + fGETHALF(0, VvV.w[i]);
+ VxxV.v[1].w[i] += fGETHALF(1, VuV.w[i]) + fGETHALF(1, VvV.w[i]))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vadduhw_acc,"Vxx32+=vadduh(Vu32,Vv32)","Vxx32.w+=vadd(Vu32.uh,Vv32.uh)",
+"Vector addition with widen into two vectors",
+ VxxV.v[0].w[i] += fGETUHALF(0, VuV.w[i]) + fGETUHALF(0, VvV.w[i]);
+ VxxV.v[1].w[i] += fGETUHALF(1, VuV.w[i]) + fGETUHALF(1, VvV.w[i]))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vaddubh_acc,"Vxx32+=vaddub(Vu32,Vv32)","Vxx32.h+=vadd(Vu32.ub,Vv32.ub)",
+"Vector addition with widen into two vectors",
+ VxxV.v[0].h[i] += fGETUBYTE(0, VuV.h[i]) + fGETUBYTE(0, VvV.h[i]);
+ VxxV.v[1].h[i] += fGETUBYTE(1, VuV.h[i]) + fGETUBYTE(1, VvV.h[i]))
+
+
+/****************************
+* Conditional
+****************************/
+
+#define CONDADDSUB(WIDTH,TAGEND,LHSYN,RHSYN,DESCR,LHBEH,RHBEH) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vadd##TAGEND##q,"if (Qv4."#TAGEND") "LHSYN"+="RHSYN,"if (Qv4) "LHSYN"+="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH+RHBEH,LHBEH)) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vsub##TAGEND##q,"if (Qv4."#TAGEND") "LHSYN"-="RHSYN,"if (Qv4) "LHSYN"-="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH-RHBEH,LHBEH)) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vadd##TAGEND##nq,"if (!Qv4."#TAGEND") "LHSYN"+="RHSYN,"if (!Qv4) "LHSYN"+="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH,LHBEH+RHBEH)) \
+ITERATOR_INSN2_ANY_SLOT(WIDTH,vsub##TAGEND##nq,"if (!Qv4."#TAGEND") "LHSYN"-="RHSYN,"if (!Qv4) "LHSYN"-="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH,LHBEH-RHBEH)) \
+
+CONDADDSUB(8,b,"Vx32.b","Vu32.b","Conditional add/sub Byte",VxV.ub[i],VuV.ub[i])
+CONDADDSUB(16,h,"Vx32.h","Vu32.h","Conditional add/sub Half",VxV.h[i],VuV.h[i])
+CONDADDSUB(32,w,"Vx32.w","Vu32.w","Conditional add/sub Word",VxV.w[i],VuV.w[i])
+
+/*****************************************************
+ ABSOLUTE VALUES
+*****************************************************/
+// V65
+ITERATOR_INSN2_ANY_SLOT_NOV1(8,vabsb, "Vd32=vabsb(Vu32)", "Vd32.b=vabs(Vu32.b)", "Vector absolute value of bytes", VdV.b[i] = fABS(VuV.b[i]))
+ITERATOR_INSN2_ANY_SLOT_NOV1(8,vabsb_sat, "Vd32=vabsb(Vu32):sat", "Vd32.b=vabs(Vu32.b):sat", "Vector absolute value of bytes", VdV.b[i] = fVSATB(fABS(fSE8_16(VuV.b[i]))))
+
+
+ITERATOR_INSN2_ANY_SLOT(16,vabsh, "Vd32=vabsh(Vu32)", "Vd32.h=vabs(Vu32.h)", "Vector absolute value of halfwords", VdV.h[i] = fABS(VuV.h[i]))
+ITERATOR_INSN2_ANY_SLOT(16,vabsh_sat, "Vd32=vabsh(Vu32):sat", "Vd32.h=vabs(Vu32.h):sat", "Vector absolute value of halfwords", VdV.h[i] = fVSATH(fABS(fSE16_32(VuV.h[i]))))
+ITERATOR_INSN2_ANY_SLOT(32,vabsw, "Vd32=vabsw(Vu32)", "Vd32.w=vabs(Vu32.w)", "Vector absolute value of words", VdV.w[i] = fABS(VuV.w[i]))
+ITERATOR_INSN2_ANY_SLOT(32,vabsw_sat, "Vd32=vabsw(Vu32):sat", "Vd32.w=vabs(Vu32.w):sat", "Vector absolute value of words", VdV.w[i] = fVSATW(fABS(fSE32_64(VuV.w[i]))))
+
+
+/**************************************************************************
+ * MMVECTOR MULTIPLICATIONS
+ * ************************************************************************/
+
+
+/* Byte by Byte */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybv,"Vdd32=vmpyb(Vu32,Vv32)","Vdd32.h=vmpy(Vu32.b,Vv32.b)",
+"Vector absolute value of words",
+ VddV.v[0].h[i] = fMPY8SS(fGETBYTE(0, VuV.h[i]), fGETBYTE(0, VvV.h[i]));
+ VddV.v[1].h[i] = fMPY8SS(fGETBYTE(1, VuV.h[i]), fGETBYTE(1, VvV.h[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybv_acc,"Vxx32+=vmpyb(Vu32,Vv32)","Vxx32.h+=vmpy(Vu32.b,Vv32.b)",
+"Vector absolute value of words",
+ VxxV.v[0].h[i] += fMPY8SS(fGETBYTE(0, VuV.h[i]), fGETBYTE(0, VvV.h[i]));
+ VxxV.v[1].h[i] += fMPY8SS(fGETBYTE(1, VuV.h[i]), fGETBYTE(1, VvV.h[i])))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyubv,"Vdd32=vmpyub(Vu32,Vv32)","Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)",
+"Vector absolute value of words",
+ VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE(0, VvV.uh[i]) );
+ VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE(1, VvV.uh[i]) ))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyubv_acc,"Vxx32+=vmpyub(Vu32,Vv32)","Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)",
+"Vector absolute value of words",
+ VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE(0, VvV.uh[i]) );
+ VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE(1, VvV.uh[i]) ))
+
+
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybusv,"Vdd32=vmpybus(Vu32,Vv32)","Vdd32.h=vmpy(Vu32.ub,Vv32.b)",
+"Vector absolute value of words",
+ VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE(0, VvV.h[i]));
+ VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE(1, VvV.h[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybusv_acc,"Vxx32+=vmpybus(Vu32,Vv32)","Vxx32.h+=vmpy(Vu32.ub,Vv32.b)",
+"Vector absolute value of words",
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE(0, VvV.h[i]));
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE(1, VvV.h[i])))
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabusv,"Vdd32=vmpabus(Vuu32,Vvv32)","Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)",
+"Vertical Byte Multiply",
+ VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, VvvV.v[0].uh[i])) + fMPY8US(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(0, VvvV.v[1].uh[i]));
+ VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(1, VvvV.v[0].uh[i])) + fMPY8US(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(1, VvvV.v[1].uh[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabuuv,"Vdd32=vmpabuu(Vuu32,Vvv32)","Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)",
+"Vertical Byte Multiply",
+ VddV.v[0].h[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, VvvV.v[0].uh[i])) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(0, VvvV.v[1].uh[i]));
+ VddV.v[1].h[i] = fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(1, VvvV.v[0].uh[i])) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(1, VvvV.v[1].uh[i])))
+
+
+
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhv,"Vdd32=vmpyh(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.h)",
+"Vector by Vector Halfword Multiply",
+ VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, VvV.w[i]));
+ VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, VvV.w[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhv_acc,"Vxx32+=vmpyh(Vu32,Vv32)","Vxx32.w+=vmpy(Vu32.h,Vv32.h)",
+"Vector by Vector Halfword Multiply",
+ VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, VvV.w[i]));
+ VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, VvV.w[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuhv,"Vdd32=vmpyuh(Vu32,Vv32)","Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)",
+"Vector by Vector Unsigned Halfword Multiply",
+ VddV.v[0].uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]), fGETUHALF(0, VvV.uw[i]));
+ VddV.v[1].uw[i] = fMPY16UU(fGETUHALF(1, VuV.uw[i]), fGETUHALF(1, VvV.uw[i])))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuhv_acc,"Vxx32+=vmpyuh(Vu32,Vv32)","Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)",
+"Vector by Vector Unsigned Halfword Multiply",
+ VxxV.v[0].uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]), fGETUHALF(0, VvV.uw[i]));
+ VxxV.v[1].uw[i] += fMPY16UU(fGETUHALF(1, VuV.uw[i]), fGETUHALF(1, VvV.uw[i])))
+
+
+
+/* Vector by Vector */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyhvsrs,"Vd32=vmpyh(Vu32,Vv32):<<1:rnd:sat","Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat",
+"Vector halfword multiply with round, shift, and sat16",
+ VdV.h[i] = fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(VuV.h[i],VvV.h[i] )<<1))))))
+
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus, "Vdd32=vmpyhus(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.uh)",
+"Vector by Vector Halfword Multiply",
+ VddV.v[0].w[i] = fMPY16SU(fGETHALF(0, VuV.w[i]), fGETUHALF(0, VvV.uw[i]));
+ VddV.v[1].w[i] = fMPY16SU(fGETHALF(1, VuV.w[i]), fGETUHALF(1, VvV.uw[i])))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus_acc, "Vxx32+=vmpyhus(Vu32,Vv32)","Vxx32.w+=vmpy(Vu32.h,Vv32.uh)",
+"Vector by Vector Halfword Multiply",
+ VxxV.v[0].w[i] += fMPY16SU(fGETHALF(0, VuV.w[i]), fGETUHALF(0, VvV.uw[i]));
+ VxxV.v[1].w[i] += fMPY16SU(fGETHALF(1, VuV.w[i]), fGETUHALF(1, VvV.uw[i])))
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyih,"Vd32=vmpyih(Vu32,Vv32)","Vd32.h=vmpyi(Vu32.h,Vv32.h)",
+"Vector by Vector Halfword Multiply",
+ VdV.h[i] = fMPY16SS(VuV.h[i], VvV.h[i]))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyih_acc,"Vx32+=vmpyih(Vu32,Vv32)","Vx32.h+=vmpyi(Vu32.h,Vv32.h)",
+"Vector by Vector Halfword Multiply",
+ VxV.h[i] += fMPY16SS(VuV.h[i], VvV.h[i]))
+
+
+
+/* 32x32 high half / frac */
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyewuh,"Vd32=vmpyewuh(Vu32,Vv32)","Vd32.w=vmpye(Vu32.w,Vv32.uh)",
+"Vector by Vector Halfword Multiply",
+VdV.w[i] = fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) >> 16)
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh,"Vd32=vmpyowh(Vu32,Vv32):<<1:sat","Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat",
+"Vector by Vector Halfword Multiply",
+VdV.w[i] = fVSATW((((fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) >> 14) + 0) >> 1)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_rnd,"Vd32=vmpyowh(Vu32,Vv32):<<1:rnd:sat","Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat",
+"Vector by Vector Halfword Multiply",
+VdV.w[i] = fVSATW((((fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) >> 14) + 1) >> 1)))
+
+ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32,vmpyewuh_64,"Vdd32=vmpye(Vu32.w,Vv32.uh)",
+"Word times Halfword Multiply, 64-bit result",
+ fHIDE(size8s_t prod;)
+ prod = fMPY32SU(VuV.w[i],fGETUHALF(0,VvV.w[i]));
+ VddV.v[1].w[i] = prod >> 16;
+ VddV.v[0].w[i] = prod << 16)
+
+ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_64_acc,"Vxx32+=vmpyo(Vu32.w,Vv32.h)",
+"Word times Halfword Multiply, 64-bit result",
+ fHIDE(size8s_t prod;)
+ prod = fMPY32SS(VuV.w[i],fGETHALF(1,VvV.w[i])) + fSE32_64(VxxV.v[1].w[i]);
+ VxxV.v[1].w[i] = prod >> 16;
+ fSETHALF(0, VxxV.v[0].w[i], VxxV.v[0].w[i] >> 16);
+ fSETHALF(1, VxxV.v[0].w[i], prod & 0x0000ffff))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_sacc,"Vx32+=vmpyowh(Vu32,Vv32):<<1:sat:shift","Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift",
+"Vector by Vector Halfword Multiply",
+IV1DEAD() VxV.w[i] = fVSATW(((((VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i]))) >> 14) + 0) >> 1)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_rnd_sacc,"Vx32+=vmpyowh(Vu32,Vv32):<<1:rnd:sat:shift","Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift",
+"Vector by Vector Halfword Multiply",
+IV1DEAD() VxV.w[i] = fVSATW(((((VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i]))) >> 14) + 1) >> 1)))
+
+/* For 32x32 integer / low half */
+
+ITERATOR_INSN_MPY_SLOT(32,vmpyieoh,"Vd32.w=vmpyieo(Vu32.h,Vv32.h)","Odd/Even multiply for 32x32 low half",
+ VdV.w[i] = (fGETHALF(0,VuV.w[i])*fGETHALF(1,VvV.w[i])) << 16)
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewuh,"Vd32=vmpyiewuh(Vu32,Vv32)","Vd32.w=vmpyie(Vu32.w,Vv32.uh)",
+"Vector by Vector Word by Halfword Multiply",
+IV1DEAD() VdV.w[i] = fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) )
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiowh,"Vd32=vmpyiowh(Vu32,Vv32)","Vd32.w=vmpyio(Vu32.w,Vv32.h)",
+"Vector by Vector Word by Halfword Multiply",
+IV1DEAD() VdV.w[i] = fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) )
+
+/* Add back these... */
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewh_acc,"Vx32+=vmpyiewh(Vu32,Vv32)","Vx32.w+=vmpyie(Vu32.w,Vv32.h)",
+"Vector by Vector Word by Halfword Multiply",
+VxV.w[i] = VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(0, VvV.w[i])) )
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewuh_acc,"Vx32+=vmpyiewuh(Vu32,Vv32)","Vx32.w+=vmpyie(Vu32.w,Vv32.uh)",
+"Vector by Vector Word by Halfword Multiply",
+VxV.w[i] = VxV.w[i] + fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) )
+
+
+
+
+
+
+
+/* Vector by Scalar */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyub,"Vdd32=vmpyub(Vu32,Rt32)","Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)",
+"Vector absolute value of words",
+ VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE((2*i+0)%4, RtV));
+ VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE((2*i+1)%4, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyub_acc,"Vxx32+=vmpyub(Vu32,Rt32)","Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)",
+"Vector absolute value of words",
+ VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE((2*i+0)%4, RtV));
+ VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE((2*i+1)%4, RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybus,"Vdd32=vmpybus(Vu32,Rt32)","Vdd32.h=vmpy(Vu32.ub,Rt32.b)",
+"Vector absolute value of words",
+ VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i+0)%4, RtV));
+ VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybus_acc,"Vxx32+=vmpybus(Vu32,Rt32)","Vxx32.h+=vmpy(Vu32.ub,Rt32.b)",
+"Vector absolute value of words",
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i+0)%4, RtV));
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabus,"Vdd32=vmpabus(Vuu32,Rt32)","Vdd32.h=vmpa(Vuu32.ub,Rt32.b)",
+"Vertical Byte Multiply",
+ VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, RtV)) + fMPY16SS(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(1, RtV));
+ VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(2, RtV)) + fMPY16SS(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(3, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabus_acc,"Vxx32+=vmpabus(Vuu32,Rt32)","Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)",
+"Vertical Byte Multiply",
+ VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, RtV)) + fMPY16SS(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(1, RtV));
+ VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(2, RtV)) + fMPY16SS(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(3, RtV)))
+
+// V65
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(16,vmpabuu,"Vdd32=vmpabuu(Vuu32,Rt32)","Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)",
+"Vertical Byte Multiply",
+ VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, RtV)) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(1, RtV));
+ VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(2, RtV)) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(3, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(16,vmpabuu_acc,"Vxx32+=vmpabuu(Vuu32,Rt32)","Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)",
+"Vertical Byte Multiply",
+ VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, RtV)) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(1, RtV));
+ VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(2, RtV)) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(3, RtV)))
+
+
+
+
+/* Half by Byte */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpahb,"Vdd32=vmpahb(Vuu32,Rt32)","Vdd32.w=vmpa(Vuu32.h,Rt32.b)",
+"Vertical Byte Multiply",
+ VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV)));
+ VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16SS(fGETHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV))))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpahb_acc,"Vxx32+=vmpahb(Vuu32,Rt32)","Vxx32.w+=vmpa(Vuu32.h,Rt32.b)",
+"Vertical Byte Multiply",
+ VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV)));
+ VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16SS(fGETHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV))))
+
+/* Half by Byte */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpauhb,"Vdd32=vmpauhb(Vuu32,Rt32)","Vdd32.w=vmpa(Vuu32.uh,Rt32.b)",
+"Vertical Byte Multiply",
+ VddV.v[0].w[i] = fMPY16US(fGETUHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16US(fGETUHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV)));
+ VddV.v[1].w[i] = fMPY16US(fGETUHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16US(fGETUHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV))))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpauhb_acc,"Vxx32+=vmpauhb(Vuu32,Rt32)","Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)",
+"Vertical Byte Multiply",
+ VxxV.v[0].w[i] += fMPY16US(fGETUHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16US(fGETUHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV)));
+ VxxV.v[1].w[i] += fMPY16US(fGETUHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16US(fGETUHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV))))
+
+
+
+
+
+
+
+/* Half by Half */
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyh,"Vdd32=vmpyh(Vu32,Rt32)","Vdd32.w=vmpy(Vu32.h,Rt32.h)",
+"Vector absolute value of words",
+ VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV));
+ VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(32,vmpyh_acc,"Vxx32+=vmpyh(Vu32,Rt32)","Vxx32.w+=vmpy(Vu32.h,Rt32.h)",
+"Vector even halfwords with scalar lower halfword multiply with shift and sat32",
+ VxxV.v[0].w[i] = fCAST8s(VxxV.v[0].w[i]) + fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV));
+ VxxV.v[1].w[i] = fCAST8s(VxxV.v[1].w[i]) + fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhsat_acc,"Vxx32+=vmpyh(Vu32,Rt32):sat","Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat",
+"Vector even halfwords with scalar lower halfword multiply with shift and sat32",
+ VxxV.v[0].w[i] = fVSATW(fCAST8s(VxxV.v[0].w[i]) + fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV)));
+ VxxV.v[1].w[i] = fVSATW(fCAST8s(VxxV.v[1].w[i]) + fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV))))
+
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhss,"Vd32=vmpyh(Vu32,Rt32):<<1:sat","Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat",
+"Vector halfword by halfword multiply, shift by 1, and take upper 16 msb",
+ fSETHALF(0,VdV.w[i],fVSATH(fGETHALF(1,fVSAT((fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0,RtV))<<1)))));
+ fSETHALF(1,VdV.w[i],fVSATH(fGETHALF(1,fVSAT((fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1,RtV))<<1)))));
+)
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhsrs,"Vd32=vmpyh(Vu32,Rt32):<<1:rnd:sat","Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat",
+"Vector halfword with scalar halfword multiply with round, shift, and sat16",
+ fSETHALF(0,VdV.w[i],fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0,RtV))<<1))))));
+ fSETHALF(1,VdV.w[i],fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1,RtV))<<1))))));
+)
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuh,"Vdd32=vmpyuh(Vu32,Rt32)","Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)",
+"Vector even halfword unsigned multiply by scalar",
+ VddV.v[0].uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV));
+ VddV.v[1].uw[i] = fMPY16UU(fGETUHALF(1, VuV.uw[i]),fGETUHALF(1,RtV)))
+
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuh_acc,"Vxx32+=vmpyuh(Vu32,Rt32)","Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)",
+"Vector even halfword unsigned multiply by scalar",
+ VxxV.v[0].uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV));
+ VxxV.v[1].uw[i] += fMPY16UU(fGETUHALF(1, VuV.uw[i]),fGETUHALF(1,RtV)))
+
+
+
+
+/********************************************
+* HALF BY BYTE
+********************************************/
+ITERATOR_INSN2_MPY_SLOT(16,vmpyihb,"Vd32=vmpyihb(Vu32,Rt32)","Vd32.h=vmpyi(Vu32.h,Rt32.b)",
+"Vector word by byte multiply, keep lower result",
+VdV.h[i] = fMPY16SS(VuV.h[i], fGETBYTE(i % 4, RtV) ))
+
+ITERATOR_INSN2_MPY_SLOT(16,vmpyihb_acc,"Vx32+=vmpyihb(Vu32,Rt32)","Vx32.h+=vmpyi(Vu32.h,Rt32.b)",
+"Vector word by byte multiply, keep lower result",
+VxV.h[i] += fMPY16SS(VuV.h[i], fGETBYTE(i % 4, RtV) ))
+
+
+/********************************************
+* WORD BY BYTE
+********************************************/
+ITERATOR_INSN2_MPY_SLOT(32,vmpyiwb,"Vd32=vmpyiwb(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.b)",
+"Vector word by byte multiply, keep lower result",
+VdV.w[i] = fMPY32SS(VuV.w[i], fGETBYTE(i % 4, RtV) ))
+
+ITERATOR_INSN2_MPY_SLOT(32,vmpyiwb_acc,"Vx32+=vmpyiwb(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.b)",
+"Vector word by byte multiply, keep lower result",
+VxV.w[i] += fMPY32SS(VuV.w[i], fGETBYTE(i % 4, RtV) ))
+
+ITERATOR_INSN2_MPY_SLOT(32,vmpyiwub,"Vd32=vmpyiwub(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.ub)",
+"Vector word by byte multiply, keep lower result",
+VdV.w[i] = fMPY32SS(VuV.w[i], fGETUBYTE(i % 4, RtV) ))
+
+ITERATOR_INSN2_MPY_SLOT(32,vmpyiwub_acc,"Vx32+=vmpyiwub(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.ub)",
+"Vector word by byte multiply, keep lower result",
+VxV.w[i] += fMPY32SS(VuV.w[i], fGETUBYTE(i % 4, RtV) ))
+
+
+/********************************************
+* WORD BY HALF
+********************************************/
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiwh,"Vd32=vmpyiwh(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.h)",
+"Vector word by byte multiply, keep lower result",
+VdV.w[i] = fMPY32SS(VuV.w[i], fGETHALF(i % 2, RtV)))
+
+ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiwh_acc,"Vx32+=vmpyiwh(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.h)",
+"Vector word by byte multiply, keep lower result",
+VxV.w[i] += fMPY32SS(VuV.w[i], fGETHALF(i % 2, RtV)))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**************************************************************************
+ * MMVECTOR LOGICAL OPERATIONS
+ * ************************************************************************/
+ITERATOR_INSN_ANY_SLOT(16,vand,"Vd32=vand(Vu32,Vv32)", "Vector Logical And", VdV.uh[i] = VuV.uh[i] & VvV.h[i])
+ITERATOR_INSN_ANY_SLOT(16,vor, "Vd32=vor(Vu32,Vv32)", "Vector Logical Or", VdV.uh[i] = VuV.uh[i] | VvV.h[i])
+ITERATOR_INSN_ANY_SLOT(16,vxor,"Vd32=vxor(Vu32,Vv32)", "Vector Logical XOR", VdV.uh[i] = VuV.uh[i] ^ VvV.h[i])
+ITERATOR_INSN_ANY_SLOT(16,vnot,"Vd32=vnot(Vu32)", "Vector Logical NOT", VdV.uh[i] = ~VuV.uh[i])
+
+
+
+
+
+ITERATOR_INSN2_MPY_SLOT_LATE(8, vandqrt,
+"Vd32.ub=vand(Qu4.ub,Rt32.ub)", "Vd32=vand(Qu4,Rt32)", "Insert Predicate into Vector",
+ VdV.ub[i] = fGETQBIT(QuV,i) ? fGETUBYTE(i % 4, RtV) : 0)
+
+ITERATOR_INSN2_MPY_SLOT_LATE(8, vandqrt_acc,
+"Vx32.ub|=vand(Qu4.ub,Rt32.ub)", "Vx32|=vand(Qu4,Rt32)", "Insert Predicate into Vector",
+ VxV.ub[i] |= (fGETQBIT(QuV,i)) ? fGETUBYTE(i % 4, RtV) : 0)
+
+ITERATOR_INSN2_MPY_SLOT_LATE(8, vandnqrt,
+"Vd32.ub=vand(!Qu4.ub,Rt32.ub)", "Vd32=vand(!Qu4,Rt32)", "Insert Predicate into Vector",
+ VdV.ub[i] = !fGETQBIT(QuV,i) ? fGETUBYTE(i % 4, RtV) : 0)
+
+ITERATOR_INSN2_MPY_SLOT_LATE(8, vandnqrt_acc,
+"Vx32.ub|=vand(!Qu4.ub,Rt32.ub)", "Vx32|=vand(!Qu4,Rt32)", "Insert Predicate into Vector",
+ VxV.ub[i] |= !(fGETQBIT(QuV,i)) ? fGETUBYTE(i % 4, RtV) : 0)
+
+
+ITERATOR_INSN2_MPY_SLOT_LATE(8, vandvrt,
+"Qd4.ub=vand(Vu32.ub,Rt32.ub)", "Qd4=vand(Vu32,Rt32)", "Insert into Predicate",
+ fSETQBIT(QdV,i,((VuV.ub[i] & fGETUBYTE(i % 4, RtV)) != 0) ? 1 : 0))
+
+ITERATOR_INSN2_MPY_SLOT_LATE(8, vandvrt_acc,
+"Qx4.ub|=vand(Vu32.ub,Rt32.ub)", "Qx4|=vand(Vu32,Rt32)", "Insert into Predicate ",
+ fSETQBIT(QxV,i,fGETQBIT(QxV,i)|(((VuV.ub[i] & fGETUBYTE(i % 4, RtV)) != 0) ? 1 : 0)))
+
+ITERATOR_INSN_ANY_SLOT(8,vandvqv,"Vd32=vand(Qv4,Vu32)","Mask off bytes",
+VdV.b[i] = fGETQBIT(QvV,i) ? VuV.b[i] : 0)
+ITERATOR_INSN_ANY_SLOT(8,vandvnqv,"Vd32=vand(!Qv4,Vu32)","Mask off bytes",
+VdV.b[i] = !fGETQBIT(QvV,i) ? VuV.b[i] : 0)
+
+
+ /***************************************************
+ * Compare Vector with Vector
+ ***************************************************/
+#define VCMP(DEST, ASRC, ASRCOP, CMP, N, SRC, MASK, WIDTH) \
+{ \
+ for(fHIDE(int) i = 0; i < fVBYTES(); i += WIDTH) { \
+ fSETQBITS(DEST,WIDTH,MASK,i,ASRC ASRCOP ((VuV.SRC[i/WIDTH] CMP VvV.SRC[i/WIDTH]) ? MASK : 0)); \
+ } \
+ }
+
+
+#define MMVEC_CMPGT(TYPE,TYPE2,TYPE3,DESCR,N,MASK,WIDTH,SRC) \
+EXTINSN(V6_vgt##TYPE, "Qd4=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than", \
+ VCMP(QdV, , , >, N, SRC, MASK, WIDTH)) \
+EXTINSN(V6_vgt##TYPE##_and, "Qx4&=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-and", \
+ VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), &, >, N, SRC, MASK, WIDTH)) \
+EXTINSN(V6_vgt##TYPE##_or, "Qx4|=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-or", \
+ VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), |, >, N, SRC, MASK, WIDTH)) \
+EXTINSN(V6_vgt##TYPE##_xor, "Qx4^=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-xor", \
+ VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), ^, >, N, SRC, MASK, WIDTH))
+
+#define MMVEC_CMP(TYPE,TYPE2,TYPE3,DESCR,N,MASK, WIDTH, SRC)\
+MMVEC_CMPGT(TYPE,TYPE2,TYPE3,DESCR,N,MASK,WIDTH,SRC) \
+EXTINSN(V6_veq##TYPE, "Qd4=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equal to", \
+ VCMP(QdV, , , ==, N, SRC, MASK, WIDTH)) \
+EXTINSN(V6_veq##TYPE##_and, "Qx4&=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-and", \
+ VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), &, ==, N, SRC, MASK, WIDTH)) \
+EXTINSN(V6_veq##TYPE##_or, "Qx4|=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-or", \
+ VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), |, ==, N, SRC, MASK, WIDTH)) \
+EXTINSN(V6_veq##TYPE##_xor, "Qx4^=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-xor", \
+ VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), ^, ==, N, SRC, MASK, WIDTH))
+
+
+MMVEC_CMP(w,"w","","Vector Word Compare ", fVELEM(32), 0xF, 4, w)
+MMVEC_CMP(h,"h","","Vector Half Compare ", fVELEM(16), 0x3, 2, h)
+MMVEC_CMP(b,"b","","Vector Half Compare ", fVELEM(8), 0x1, 1, b)
+MMVEC_CMPGT(uw,"uw","","Vector Unsigned Half Compare ", fVELEM(32), 0xF, 4,uw)
+MMVEC_CMPGT(uh,"uh","","Vector Unsigned Half Compare ", fVELEM(16), 0x3, 2,uh)
+MMVEC_CMPGT(ub,"ub","","Vector Unsigned Byte Compare ", fVELEM(8), 0x1, 1,ub)
+
+/***************************************************
+* Predicate Operations
+***************************************************/
+
+EXTINSN(V6_pred_scalar2, "Qd4=vsetq(Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), "Set Vector Predicate ",
+{
+ fHIDE(int i;)
+ for(i = 0; i < fVBYTES(); i++) fSETQBIT(QdV,i,(i < (RtV & (fVBYTES()-1))) ? 1 : 0);
+})
+
+EXTINSN(V6_pred_scalar2v2, "Qd4=vsetq2(Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), "Set Vector Predicate ",
+{
+ fHIDE(int i;)
+ for(i = 0; i < fVBYTES(); i++) fSETQBIT(QdV,i,(i <= ((RtV-1) & (fVBYTES()-1))) ? 1 : 0);
+})
+
+
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, shuffeqw, "Qd4.h=vshuffe(Qs4.w,Qt4.w)","Shrink Predicate", fSETQBIT(QdV,i, (i & 2) ? fGETQBIT(QsV,i-2) : fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, shuffeqh, "Qd4.b=vshuffe(Qs4.h,Qt4.h)","Shrink Predicate", fSETQBIT(QdV,i, (i & 1) ? fGETQBIT(QsV,i-1) : fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_or, "Qd4=or(Qs4,Qt4)","Vector Predicate Or", fSETQBIT(QdV,i,fGETQBIT(QsV,i) || fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_and, "Qd4=and(Qs4,Qt4)","Vector Predicate And", fSETQBIT(QdV,i,fGETQBIT(QsV,i) && fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_xor, "Qd4=xor(Qs4,Qt4)","Vector Predicate Xor", fSETQBIT(QdV,i,fGETQBIT(QsV,i) ^ fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_or_n, "Qd4=or(Qs4,!Qt4)","Vector Predicate Or with not", fSETQBIT(QdV,i,fGETQBIT(QsV,i) || !fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_and_n, "Qd4=and(Qs4,!Qt4)","Vector Predicate And with not", fSETQBIT(QdV,i,fGETQBIT(QsV,i) && !fGETQBIT(QtV,i) ) )
+ITERATOR_INSN_ANY_SLOT(8, pred_not, "Qd4=not(Qs4)","Vector Predicate Not", fSETQBIT(QdV,i,!fGETQBIT(QsV,i) ) )
+
+
+
+EXTINSN(V6_vcmov, "if (Ps4) Vd32=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), "Conditional Mov",
+{
+if (fLSBOLD(PsV)) {
+ fHIDE(int i;)
+ fVFOREACH(8, i) {
+ VdV.ub[i] = VuV.ub[i];
+ }
+ } else {CANCEL;}
+})
+
+EXTINSN(V6_vncmov, "if (!Ps4) Vd32=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), "Conditional Mov",
+{
+if (fLSBOLDNOT(PsV)) {
+ fHIDE(int i;)
+ fVFOREACH(8, i) {
+ VdV.ub[i] = VuV.ub[i];
+ }
+ } else {CANCEL;}
+})
+
+EXTINSN(V6_vccombine, "if (Ps4) Vdd32=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), "Conditional Combine",
+{
+if (fLSBOLD(PsV)) {
+ fHIDE(int i;)
+ fVFOREACH(8, i) {
+ VddV.v[0].ub[i] = VvV.ub[i];
+ VddV.v[1].ub[i] = VuV.ub[i];
+ }
+ } else {CANCEL;}
+})
+
+EXTINSN(V6_vnccombine, "if (!Ps4) Vdd32=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), "Conditional Combine",
+{
+if (fLSBOLDNOT(PsV)) {
+ fHIDE(int i;)
+ fVFOREACH(8, i) {
+ VddV.v[0].ub[i] = VvV.ub[i];
+ VddV.v[1].ub[i] = VuV.ub[i];
+ }
+ } else {CANCEL;}
+})
+
+
+
+ITERATOR_INSN_ANY_SLOT(8,vmux,"Vd32=vmux(Qt4,Vu32,Vv32)",
+"Vector Select Element 8-bit",
+ VdV.ub[i] = fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i])
+
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vswap,"Vdd32=vswap(Qt4,Vu32,Vv32)",
+"Vector Swap Element 8-bit",
+ VddV.v[0].ub[i] = fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i];
+ VddV.v[1].ub[i] = !fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i])
+
+
+/***************************************************************************
+*
+* MMVECTOR SORTING
+*
+****************************************************************************/
+
+#define MMVEC_SORT(TYPE,TYPE2,DESCR,ELEMENTSIZE,SRC)\
+ITERATOR_INSN2_ANY_SLOT(ELEMENTSIZE,vmax##TYPE, "Vd32=vmax" TYPE2 "(Vu32,Vv32)", "Vd32."#SRC"=vmax(Vu32."#SRC",Vv32."#SRC")", "Vector " DESCR " max", VdV.SRC[i] = (VuV.SRC[i] > VvV.SRC[i]) ? VuV.SRC[i] : VvV.SRC[i]) \
+ITERATOR_INSN2_ANY_SLOT(ELEMENTSIZE,vmin##TYPE, "Vd32=vmin" TYPE2 "(Vu32,Vv32)", "Vd32."#SRC"=vmin(Vu32."#SRC",Vv32."#SRC")", "Vector " DESCR " min", VdV.SRC[i] = (VuV.SRC[i] < VvV.SRC[i]) ? VuV.SRC[i] : VvV.SRC[i])
+
+MMVEC_SORT(b,"b", "signed byte", 8, b)
+MMVEC_SORT(ub,"ub", "unsigned byte", 8, ub)
+MMVEC_SORT(uh,"uh", "unsigned halfword",16, uh)
+MMVEC_SORT(h, "h", "halfword", 16, h)
+MMVEC_SORT(w, "w", "word", 32, w)
+
+
+
+
+
+
+
+
+
+/*************************************************************
+* SHUFFLES
+****************************************************************/
+
+ITERATOR_INSN2_ANY_SLOT(16,vsathub,"Vd32=vsathub(Vu32,Vv32)","Vd32.ub=vsat(Vu32.h,Vv32.h)",
+"Saturate and pack 32 halfwords to 32 unsigned bytes, and interleave them",
+ fSETBYTE(0, VdV.uh[i], fVSATUB(VvV.h[i]));
+ fSETBYTE(1, VdV.uh[i], fVSATUB(VuV.h[i])))
+
+ITERATOR_INSN2_ANY_SLOT(32,vsatwh,"Vd32=vsatwh(Vu32,Vv32)","Vd32.h=vsat(Vu32.w,Vv32.w)",
+"Saturate and pack 16 words to 16 halfwords, and interleave them",
+ fSETHALF(0, VdV.w[i], fVSATH(VvV.w[i]));
+ fSETHALF(1, VdV.w[i], fVSATH(VuV.w[i])))
+
+ITERATOR_INSN2_ANY_SLOT(32,vsatuwuh,"Vd32=vsatuwuh(Vu32,Vv32)","Vd32.uh=vsat(Vu32.uw,Vv32.uw)",
+"Saturate and pack 16 words to 16 halfwords, and interleave them",
+ fSETHALF(0, VdV.w[i], fVSATUH(VvV.uw[i]));
+ fSETHALF(1, VdV.w[i], fVSATUH(VuV.uw[i])))
+
+ITERATOR_INSN2_ANY_SLOT(16,vshuffeb,"Vd32=vshuffeb(Vu32,Vv32)","Vd32.b=vshuffe(Vu32.b,Vv32.b)",
+"Shuffle half words with in a lane",
+ fSETBYTE(0, VdV.uh[i], fGETUBYTE(0, VvV.uh[i]));
+ fSETBYTE(1, VdV.uh[i], fGETUBYTE(0, VuV.uh[i])))
+
+ITERATOR_INSN2_ANY_SLOT(16,vshuffob,"Vd32=vshuffob(Vu32,Vv32)","Vd32.b=vshuffo(Vu32.b,Vv32.b)",
+"Shuffle half words with in a lane",
+ fSETBYTE(0, VdV.uh[i], fGETUBYTE(1, VvV.uh[i]));
+ fSETBYTE(1, VdV.uh[i], fGETUBYTE(1, VuV.uh[i])))
+
+ITERATOR_INSN2_ANY_SLOT(32,vshufeh,"Vd32=vshuffeh(Vu32,Vv32)","Vd32.h=vshuffe(Vu32.h,Vv32.h)",
+"Shuffle half words with in a lane",
+ fSETHALF(0, VdV.uw[i], fGETUHALF(0, VvV.uw[i]));
+ fSETHALF(1, VdV.uw[i], fGETUHALF(0, VuV.uw[i])))
+
+ITERATOR_INSN2_ANY_SLOT(32,vshufoh,"Vd32=vshuffoh(Vu32,Vv32)","Vd32.h=vshuffo(Vu32.h,Vv32.h)",
+"Shuffle half words with in a lane",
+ fSETHALF(0, VdV.uw[i], fGETUHALF(1, VvV.uw[i]));
+ fSETHALF(1, VdV.uw[i], fGETUHALF(1, VuV.uw[i])))
+
+
+
+
+/**************************************************************************
+* Double Vector Shuffles
+**************************************************************************/
+
+EXTINSN(V6_vshuff, "vshuff(Vy32,Vx32,Rt32)",
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS),
+"2x2->2x2 transpose, for multiple data sizes, inplace",
+{
+ fHIDE(int offset;)
+ for (offset=1; offset<fVBYTES(); offset<<=1) {
+ if ( RtV & offset) {
+ fHIDE(int k;) \
+ fVFOREACH(8, k) {\
+ if (!( k & offset)) {
+ fSWAPB(VyV.ub[k], VxV.ub[k+offset]);
+ }
+ }
+ }
+ }
+ })
+
+EXTINSN(V6_vshuffvdd, "Vdd32=vshuff(Vu32,Vv32,Rt8)",
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS),
+"2x2->2x2 transpose for multiple data sizes",
+{
+ fHIDE(int offset;)
+ VddV.v[0] = VvV;
+ VddV.v[1] = VuV;
+ for (offset=1; offset<fVBYTES(); offset<<=1) {
+ if ( RtV & offset) {
+ fHIDE(int k;) \
+ fVFOREACH(8, k) {\
+ if (!( k & offset)) {
+ fSWAPB(VddV.v[1].ub[k], VddV.v[0].ub[k+offset]);
+ }
+ }
+ }
+ }
+ })
+
+EXTINSN(V6_vdeal, "vdeal(Vy32,Vx32,Rt32)",
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS),
+" vector - vector deal - or deinterleave, for multiple data sizes, inplace",
+{
+ fHIDE(int offset;)
+ for (offset=fVBYTES()>>1; offset>0; offset>>=1) {
+ if ( RtV & offset) {
+ fHIDE(int k;) \
+ fVFOREACH(8, k) {\
+ if (!( k & offset)) {
+ fSWAPB(VyV.ub[k], VxV.ub[k+offset]);
+ }
+ }
+ }
+ }
+ })
+
+EXTINSN(V6_vdealvdd, "Vdd32=vdeal(Vu32,Vv32,Rt8)",
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS),
+" vector - vector deal - or deinterleave, for multiple data sizes",
+{
+ fHIDE(int offset;)
+ VddV.v[0] = VvV;
+ VddV.v[1] = VuV;
+ for (offset=fVBYTES()>>1; offset>0; offset>>=1) {
+ if ( RtV & offset) {
+ fHIDE(int k;) \
+ fVFOREACH(8, k) {\
+ if (!( k & offset)) {
+ fSWAPB(VddV.v[1].ub[k], VddV.v[0].ub[k+offset]);
+ }
+ }
+ }
+ }
+ })
+
+/**************************************************************************/
+
+
+
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(32,vshufoeh,"Vdd32=vshuffoeh(Vu32,Vv32)","Vdd32.h=vshuffoe(Vu32.h,Vv32.h)",
+"Vector Shuffle half words",
+ fSETHALF(0, VddV.v[0].uw[i], fGETUHALF(0, VvV.uw[i]));
+ fSETHALF(1, VddV.v[0].uw[i], fGETUHALF(0, VuV.uw[i]));
+ fSETHALF(0, VddV.v[1].uw[i], fGETUHALF(1, VvV.uw[i]));
+ fSETHALF(1, VddV.v[1].uw[i], fGETUHALF(1, VuV.uw[i])))
+
+ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(16,vshufoeb,"Vdd32=vshuffoeb(Vu32,Vv32)","Vdd32.b=vshuffoe(Vu32.b,Vv32.b)",
+"Vector Shuffle bytes",
+ fSETBYTE(0, VddV.v[0].uh[i], fGETUBYTE(0, VvV.uh[i]));
+ fSETBYTE(1, VddV.v[0].uh[i], fGETUBYTE(0, VuV.uh[i]));
+ fSETBYTE(0, VddV.v[1].uh[i], fGETUBYTE(1, VvV.uh[i]));
+ fSETBYTE(1, VddV.v[1].uh[i], fGETUBYTE(1, VuV.uh[i])))
+
+
+/***************************************************************
+* Deal
+***************************************************************/
+
+ITERATOR_INSN2_PERMUTE_SLOT(32, vdealh, "Vd32=vdealh(Vu32)", "Vd32.h=vdeal(Vu32.h)",
+"Deal Halfwords",
+ VdV.uh[i ] = fGETUHALF(0, VuV.uw[i]);
+ VdV.uh[i+fVELEM(32)] = fGETUHALF(1, VuV.uw[i]))
+
+ITERATOR_INSN2_PERMUTE_SLOT(16, vdealb, "Vd32=vdealb(Vu32)", "Vd32.b=vdeal(Vu32.b)",
+"Deal Halfwords",
+ VdV.ub[i ] = fGETUBYTE(0, VuV.uh[i]);
+ VdV.ub[i+fVELEM(16)] = fGETUBYTE(1, VuV.uh[i]))
+
+ITERATOR_INSN2_PERMUTE_SLOT(32, vdealb4w, "Vd32=vdealb4w(Vu32,Vv32)", "Vd32.b=vdeale(Vu32.b,Vv32.b)",
+"Deal Two Vectors Bytes",
+ VdV.ub[0+i ] = fGETUBYTE(0, VvV.uw[i]);
+ VdV.ub[fVELEM(32)+i ] = fGETUBYTE(2, VvV.uw[i]);
+ VdV.ub[2*fVELEM(32)+i] = fGETUBYTE(0, VuV.uw[i]);
+ VdV.ub[3*fVELEM(32)+i] = fGETUBYTE(2, VuV.uw[i]))
+
+/***************************************************************
+* shuffle
+***************************************************************/
+
+ITERATOR_INSN2_PERMUTE_SLOT(32, vshuffh, "Vd32=vshuffh(Vu32)", "Vd32.h=vshuff(Vu32.h)",
+"Deal Halfwords",
+ fSETHALF(0, VdV.uw[i], VuV.uh[i]);
+ fSETHALF(1, VdV.uw[i], VuV.uh[i+fVELEM(32)]))
+
+ITERATOR_INSN2_PERMUTE_SLOT(16, vshuffb, "Vd32=vshuffb(Vu32)", "Vd32.b=vshuff(Vu32.b)",
+"Deal Halfwords",
+ fSETBYTE(0, VdV.uh[i], VuV.ub[i]);
+ fSETBYTE(1, VdV.uh[i], VuV.ub[i+fVELEM(16)]))
+
+
+
+
+
+/***********************************************************
+* INSERT AND EXTRACT
+*********************************************************/
+EXTINSN(V6_extractw, "Rd32=vextract(Vu32,Rs32)",
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA,A_MEMLIKE,A_RESTRICT_SLOT0ONLY),
+"Extract an element from a vector to scalar",
+fHIDE(warn("RdN=%d VuN=%d RsN=%d RsV=0x%08x widx=%d",RdN,VuN,RsN,RsV,((RsV & (fVBYTES()-1)) >> 2));)
+RdV = VuV.uw[ (RsV & (fVBYTES()-1)) >> 2];
+fHIDE(warn("RdV=0x%08x",RdV);))
+
+EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)",
+ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX),
+"Insert Word Scalar into Vector",
+VxV.uw[0] = RtV;)
+
+
+
+
+ITERATOR_INSN_MPY_SLOT_LATE(32,lvsplatw, "Vd32=vsplat(Rt32)", "Replicates scalar accross words in vector", VdV.uw[i] = RtV)
+
+ITERATOR_INSN_MPY_SLOT_LATE(16,lvsplath, "Vd32.h=vsplat(Rt32)", "Replicates scalar accross halves in vector", VdV.uh[i] = RtV)
+
+ITERATOR_INSN_MPY_SLOT_LATE(8,lvsplatb, "Vd32.b=vsplat(Rt32)", "Replicates scalar accross bytes in vector", VdV.ub[i] = RtV)
+
+
+ITERATOR_INSN_ANY_SLOT(32,vassign,"Vd32=Vu32","Copy a vector",VdV.w[i]=VuV.w[i])
+
+
+ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vcombine,"Vdd32=vcombine(Vu32,Vv32)",
+"Vector assign, Any two to Vector Pair",
+ VddV.v[0].ub[i] = VvV.ub[i];
+ VddV.v[1].ub[i] = VuV.ub[i])
+
+
+
+///////////////////////////////////////////////////////////////////////////
+
+
+/*********************************************************
+* GENERAL PERMUTE NETWORKS
+*********************************************************/
+
+
+EXTINSN(V6_vdelta, "Vd32=vdelta(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),
+"Reverse Benes Butterfly network ",
+{
+ fHIDE(int offset;)
+ fHIDE(int k;)
+ fHIDE(mmvector_t tmp;)
+ tmp = VuV;
+ for (offset=fVBYTES(); (offset>>=1)>0; ) {
+ for (k = 0; k<fVBYTES(); k++) {
+ VdV.ub[k] = (VvV.ub[k]&offset) ? tmp.ub[k^offset] : tmp.ub[k];
+ }
+ for (k = 0; k<fVBYTES(); k++) {
+ tmp.ub[k] = VdV.ub[k];
+ }
+ }
+})
+
+
+EXTINSN(V6_vrdelta, "Vd32=vrdelta(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),
+"Forward Benes Butterfly network ",
+{
+ fHIDE(int offset;)
+ fHIDE(int k;)
+ fHIDE(mmvector_t tmp;)
+ tmp = VuV;
+ for (offset=1; offset<fVBYTES(); offset<<=1){
+ for (k = 0; k<fVBYTES(); k++) {
+ VdV.ub[k] = (VvV.ub[k]&offset) ? tmp.ub[k^offset] : tmp.ub[k];
+ }
+ for (k = 0; k<fVBYTES(); k++) {
+ tmp.ub[k] = VdV.ub[k];
+ }
+ }
+})
+
+
+
+
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vcl0w,"Vd32=vcl0w(Vu32)","Vd32.uw=vcl0(Vu32.uw)", "Count Leading Zeros in Word", VdV.uw[i]=fCL1_4(~VuV.uw[i]))
+ITERATOR_INSN2_SHIFT_SLOT(16,vcl0h,"Vd32=vcl0h(Vu32)","Vd32.uh=vcl0(Vu32.uh)", "Count Leading Zeros in Word", VdV.uh[i]=fCL1_2(~VuV.uh[i]))
+
+ITERATOR_INSN2_SHIFT_SLOT(32,vnormamtw,"Vd32=vnormamtw(Vu32)","Vd32.w=vnormamt(Vu32.w)","Norm Amount Word",
+VdV.w[i]=fMAX(fCL1_4(~VuV.w[i]),fCL1_4(VuV.w[i]))-1; fHIDE(IV1DEAD();))
+ITERATOR_INSN2_SHIFT_SLOT(16,vnormamth,"Vd32=vnormamth(Vu32)","Vd32.h=vnormamt(Vu32.h)","Norm Amount Halfword",
+VdV.h[i]=fMAX(fCL1_2(~VuV.h[i]),fCL1_2(VuV.h[i]))-1; fHIDE(IV1DEAD();))
+
+ITERATOR_INSN_SHIFT_SLOT_VV_LATE(32,vaddclbw,"Vd32.w=vadd(vclb(Vu32.w),Vv32.w)",
+"Count leading bits and add",
+VdV.w[i] = fMAX(fCL1_4(~VuV.w[i]),fCL1_4(VuV.w[i])) + VvV.w[i])
+
+ITERATOR_INSN_SHIFT_SLOT_VV_LATE(16,vaddclbh,"Vd32.h=vadd(vclb(Vu32.h),Vv32.h)",
+"Count leading bits and add",
+VdV.h[i] = fMAX(fCL1_2(~VuV.h[i]),fCL1_2(VuV.h[i])) + VvV.h[i])
+
+
+ITERATOR_INSN2_SHIFT_SLOT(16,vpopcounth,"Vd32=vpopcounth(Vu32)","Vd32.h=vpopcount(Vu32.h)", "Count Leading Zeros in Word", VdV.uh[i]=fCOUNTONES_2(VuV.uh[i]))
+
+
+#define fHIST(INPUTVEC) \
+ fUARCH_NOTE_PUMP_4X(); \
+ fHIDE(int lane;) \
+ fHIDE(mmvector_t tmp;) \
+ fVFOREACH(128, lane) { \
+ for (fHIDE(int )i=0; i<128/8; ++i) { \
+ unsigned char value = INPUTVEC.ub[(128/8)*lane+i]; \
+ unsigned char regno = value>>3; \
+ unsigned char element = value & 7; \
+ READ_EXT_VREG(regno,tmp,0); \
+ tmp.uh[(128/16)*lane+(element)]++; \
+ WRITE_EXT_VREG(regno,tmp,EXT_NEW); \
+ } \
+ }
+
+#define fHISTQ(INPUTVEC,QVAL) \
+ fUARCH_NOTE_PUMP_4X(); \
+ fHIDE(int lane;) \
+ fHIDE(mmvector_t tmp;) \
+ fVFOREACH(128, lane) { \
+ for (fHIDE(int )i=0; i<128/8; ++i) { \
+ unsigned char value = INPUTVEC.ub[(128/8)*lane+i]; \
+ unsigned char regno = value>>3; \
+ unsigned char element = value & 7; \
+ READ_EXT_VREG(regno,tmp,0); \
+ if (fGETQBIT(QVAL,128/8*lane+i)) tmp.uh[(128/16)*lane+(element)]++; \
+ WRITE_EXT_VREG(regno,tmp,EXT_NEW); \
+ } \
+ }
+
+
+
+EXTINSN(V6_vhist, "vhist",ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), "vhist instruction",{ fHIDE(mmvector_t inputVec;) inputVec=fTMPVDATA(); fHIST(inputVec); })
+EXTINSN(V6_vhistq, "vhist(Qv4)",ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), "vhist instruction",{ fHIDE(mmvector_t inputVec;) inputVec=fTMPVDATA(); fHISTQ(inputVec,QvV); })
+
+#undef fHIST
+#undef fHISTQ
+
+
+/* **** WEIGHTED HISTOGRAM **** */
+
+
+#if 1
+#define WHIST(EL,MASK,BSHIFT,COND,SATF) \
+ fHIDE(unsigned int) bucket = fGETUBYTE(0,input.h[i]); \
+ fHIDE(unsigned int) weight = fGETUBYTE(1,input.h[i]); \
+ fHIDE(unsigned int) vindex = (bucket >> 3) & 0x1F; \
+ fHIDE(unsigned int) elindex = ((i>>BSHIFT) & (~MASK)) | ((bucket>>BSHIFT) & MASK); \
+ fHIDE(mmvector_t tmp;) \
+ READ_EXT_VREG(vindex,tmp,0); \
+ COND tmp.EL[elindex] = SATF(tmp.EL[elindex] + weight); \
+ WRITE_EXT_VREG(vindex,tmp,EXT_NEW); \
+ fUARCH_NOTE_PUMP_2X();
+
+ITERATOR_INSN_VHISTLIKE(16,vwhist256,"vwhist256","vector weighted histogram halfword counters", WHIST(uh,7,0,,))
+ITERATOR_INSN_VHISTLIKE(16,vwhist256q,"vwhist256(Qv4)","vector weighted histogram halfword counters", WHIST(uh,7,0,if (fGETQBIT(QvV,2*i)),))
+ITERATOR_INSN_VHISTLIKE(16,vwhist256_sat,"vwhist256:sat","vector weighted histogram halfword counters", WHIST(uh,7,0,,fVSATUH))
+ITERATOR_INSN_VHISTLIKE(16,vwhist256q_sat,"vwhist256(Qv4):sat","vector weighted histogram halfword counters", WHIST(uh,7,0,if (fGETQBIT(QvV,2*i)),fVSATUH))
+ITERATOR_INSN_VHISTLIKE(16,vwhist128,"vwhist128","vector weighted histogram word counters", WHIST(uw,3,1,,))
+ITERATOR_INSN_VHISTLIKE(16,vwhist128q,"vwhist128(Qv4)","vector weighted histogram word counters", WHIST(uw,3,1,if (fGETQBIT(QvV,2*i)),))
+ITERATOR_INSN_VHISTLIKE(16,vwhist128m,"vwhist128(#u1)","vector weighted histogram word counters", WHIST(uw,3,1,if ((bucket & 1) == uiV),))
+ITERATOR_INSN_VHISTLIKE(16,vwhist128qm,"vwhist128(Qv4,#u1)","vector weighted histogram word counters", WHIST(uw,3,1,if (((bucket & 1) == uiV) && fGETQBIT(QvV,2*i)),))
+
+
+#endif
+
+
+
+/* ****** lookup table instructions *********** */
+
+/* Use low bits from idx to choose next-bigger elements from vector, then use LSB from idx to choose odd or even element */
+
+ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvb,"Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = RtV & 0x7;
+oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = VuV.ub[i];
+VdV.b[i] = ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0)
+
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(8,vlutvvb_oracc,"Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = RtV & 0x7;
+oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = VuV.ub[i];
+VxV.b[i] |= ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0)
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh,"Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = RtV & 0xF;
+oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = fGETUBYTE(0,VuV.uh[i]);
+VddV.v[0].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0;
+idx = fGETUBYTE(1,VuV.uh[i]);
+VddV.v[1].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0)
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_oracc,"Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = fGETUBYTE(0,RtV) & 0xF;
+oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = fGETUBYTE(0,VuV.uh[i]);
+VxxV.v[0].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0;
+idx = fGETUBYTE(1,VuV.uh[i]);
+VxxV.v[1].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0)
+
+ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvbi,"Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = uiV & 0x7;
+oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = VuV.ub[i];
+VdV.b[i] = ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0)
+
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(8,vlutvvb_oracci,"Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = uiV & 0x7;
+oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = VuV.ub[i];
+VxV.b[i] |= ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0)
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwhi,"Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = uiV & 0xF;
+oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = fGETUBYTE(0,VuV.uh[i]);
+VddV.v[0].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0;
+idx = fGETUBYTE(1,VuV.uh[i]);
+VddV.v[1].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0)
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_oracci,"Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;)
+matchval = uiV & 0xF;
+oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1;
+idx = fGETUBYTE(0,VuV.uh[i]);
+VxxV.v[0].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0;
+idx = fGETUBYTE(1,VuV.uh[i]);
+VxxV.v[1].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0)
+
+ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvb_nm,"Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int oddhalf;) fHIDE(int matchval;)
+ matchval = RtV & 0x7;
+ oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1;
+ idx = VuV.ub[i];
+ idx = (idx&0x1F) | (matchval<<5);
+ VdV.b[i] = fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]))
+
+ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_nm,"Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch","vector-vector table lookup",
+fHIDE(unsigned int idx;) fHIDE(int oddhalf;) fHIDE(int matchval;)
+ matchval = RtV & 0xF;
+ oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1;
+ idx = fGETUBYTE(0,VuV.uh[i]);
+ idx = (idx&0x0F) | (matchval<<4);
+ VddV.v[0].h[i] = fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]);
+ idx = fGETUBYTE(1,VuV.uh[i]);
+ idx = (idx&0x0F) | (matchval<<4);
+ VddV.v[1].h[i] = fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]))
+
+
+
+
+/******************************************************************************
+NON LINEAR - V65
+ ******************************************************************************/
+
+ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpahhsat,"Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat","piecewise linear approximation",
+ VxV.h[i]= fVSATH( ( ( fMPY16SS(VxV.h[i],VuV.h[i])<<1) + (fGETHALF(( (VuV.h[i]>>14)&0x3), RttV )<<15))>>16))
+
+
+ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpauhuhsat,"Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat","piecewise linear approximation",
+ VxV.h[i]= fVSATH( ( fMPY16SU(VxV.h[i],VuV.uh[i]) + (fGETUHALF(((VuV.uh[i]>>14)&0x3), RttV )<<15))>>16))
+
+ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpsuhuhsat,"Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat","piecewise linear approximation",
+ VxV.h[i]= fVSATH( ( fMPY16SU(VxV.h[i],VuV.uh[i]) - (fGETUHALF(((VuV.uh[i]>>14)&0x3), RttV )<<15))>>16))
+
+
+ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vlut4,"Vd32.h=vlut4(Vu32.uh,Rtt32.h)","4 entry lookup table",
+ VdV.h[i]= fGETHALF( ((VuV.h[i]>>14)&0x3), RttV ))
+
+
+
+/******************************************************************************
+V65
+ ******************************************************************************/
+
+ITERATOR_INSN_MPY_SLOT_NOV1(32,vmpyuhe,"Vd32.uw=vmpye(Vu32.uh,Rt32.uh)",
+"Vector even halfword unsigned multiply by scalar",
+ VdV.uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV)))
+
+
+ITERATOR_INSN_MPY_SLOT_NOV1(32,vmpyuhe_acc,"Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)",
+"Vector even halfword unsigned multiply by scalar",
+ VxV.uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV)))
+
+
+
+
+EXTINSN(V6_vgathermw, "vtmp.w=vgather(Rt32,Mu2,Vv32.w).w", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather Words",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 4;)
+ fHIDE(fGATHER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ EA = RtV+VvV.uw[i];
+ fVLOG_VTCM_GATHER_WORD(EA, VvV.uw[i], i,MuV);
+ }
+ fGATHER_FINISH()
+})
+EXTINSN(V6_vgathermh, "vtmp.h=vgather(Rt32,Mu2,Vv32.h).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fGATHER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(16, i) {
+ EA = RtV+VvV.uh[i];
+ fVLOG_VTCM_GATHER_HALFWORD(EA, VvV.uh[i], i,MuV);
+ }
+ fGATHER_FINISH()
+})
+
+
+
+EXTINSN(V6_vgathermhw, "vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA_DV,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords",
+{
+ fHIDE(int i;)
+ fHIDE(int j;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fGATHER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ for(j = 0; j < 2; j++) {
+ EA = RtV+VvvV.v[j].uw[i];
+ fVLOG_VTCM_GATHER_HALFWORD_DV(EA, VvvV.v[j].uw[i], (2*i+j),i,j,MuV);
+ }
+ }
+ fGATHER_FINISH()
+})
+
+
+EXTINSN(V6_vgathermwq, "if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather Words",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 4;)
+ fHIDE(fGATHER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ EA = RtV+VvV.uw[i];
+ fVLOG_VTCM_GATHER_WORDQ(EA, VvV.uw[i], i,QsV,MuV);
+ }
+ fGATHER_FINISH()
+})
+EXTINSN(V6_vgathermhq, "if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fGATHER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(16, i) {
+ EA = RtV+VvV.uh[i];
+ fVLOG_VTCM_GATHER_HALFWORDQ(EA, VvV.uh[i], i,QsV,MuV);
+ }
+ fGATHER_FINISH()
+})
+
+
+
+EXTINSN(V6_vgathermhwq, "if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA_DV,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords",
+{
+ fHIDE(int i;)
+ fHIDE(int j;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fGATHER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ for(j = 0; j < 2; j++) {
+ EA = RtV+VvvV.v[j].uw[i];
+ fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, VvvV.v[j].uw[i], (2*i+j),i,j,QsV,MuV);
+ }
+ }
+ fGATHER_FINISH()
+})
+
+
+
+EXTINSN(V6_vscattermw , "vscatter(Rt32,Mu2,Vv32.w).w=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 4;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ EA = RtV+VvV.uw[i];
+ fVLOG_VTCM_WORD(EA, VvV.uw[i], VwV,i,MuV);
+ }
+ fSCATTER_FINISH(0)
+})
+
+
+
+EXTINSN(V6_vscattermh , "vscatter(Rt32,Mu2,Vv32.h).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter halfWords",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(16, i) {
+ EA = RtV+VvV.uh[i];
+ fVLOG_VTCM_HALFWORD(EA,VvV.uh[i],VwV,i,MuV);
+ }
+ fSCATTER_FINISH(0)
+})
+
+
+EXTINSN(V6_vscattermw_add, "vscatter(Rt32,Mu2,Vv32.w).w+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words-Add",
+{
+ fHIDE(int i;)
+ fHIDE(int ALIGNMENT=4;)
+ fHIDE(int element_size = 4;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ EA = (RtV+fVALIGN(VvV.uw[i],ALIGNMENT));
+ fVLOG_VTCM_WORD_INCREMENT(EA,VvV.uw[i],VwV,i,ALIGNMENT,MuV);
+ }
+ fHIDE(fLOG_SCATTER_OP(4);)
+ fSCATTER_FINISH(1)
+})
+
+EXTINSN(V6_vscattermh_add, "vscatter(Rt32,Mu2,Vv32.h).h+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter halfword-Add",
+{
+ fHIDE(int i;)
+ fHIDE(int ALIGNMENT=2;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(16, i) {
+ EA = (RtV+fVALIGN(VvV.uh[i],ALIGNMENT));
+ fVLOG_VTCM_HALFWORD_INCREMENT(EA,VvV.uh[i],VwV,i,ALIGNMENT,MuV);
+ }
+ fHIDE(fLOG_SCATTER_OP(2);)
+ fSCATTER_FINISH(1)
+})
+
+
+EXTINSN(V6_vscattermwq, "if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words conditional",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 4;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ EA = RtV+VvV.uw[i];
+ fVLOG_VTCM_WORDQ(EA,VvV.uw[i], VwV,i,QsV,MuV);
+ }
+ fSCATTER_FINISH(0)
+})
+
+EXTINSN(V6_vscattermhq, "if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter HalfWords conditional",
+{
+ fHIDE(int i;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(16, i) {
+ EA = RtV+VvV.uh[i];
+ fVLOG_VTCM_HALFWORDQ(EA,VvV.uh[i],VwV,i,QsV,MuV);
+ }
+ fSCATTER_FINISH(0)
+})
+
+
+
+
+EXTINSN(V6_vscattermhw , "vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter Words",
+{
+ fHIDE(int i;)
+ fHIDE(int j;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ for(j = 0; j < 2; j++) {
+ EA = RtV+VvvV.v[j].uw[i];
+ fVLOG_VTCM_HALFWORD_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),i,j,MuV);
+ }
+ }
+ fSCATTER_FINISH(0)
+})
+
+
+
+EXTINSN(V6_vscattermhwq, "if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords conditional",
+{
+ fHIDE(int i;)
+ fHIDE(int j;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ for(j = 0; j < 2; j++) {
+ EA = RtV+VvvV.v[j].uw[i];
+ fVLOG_VTCM_HALFWORDQ_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),QsV,i,j,MuV);
+ }
+ }
+ fSCATTER_FINISH(0)
+})
+
+EXTINSN(V6_vscattermhw_add, "vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords-add",
+{
+ fHIDE(int i;)
+ fHIDE(int j;)
+ fHIDE(int ALIGNMENT=2;)
+ fHIDE(int element_size = 2;)
+ fHIDE(fSCATTER_INIT( RtV, MuV, element_size);)
+ fVLASTBYTE(MuV, element_size);
+ fVALIGN(RtV, element_size);
+ fVFOREACH(32, i) {
+ for(j = 0; j < 2; j++) {
+ EA = RtV + fVALIGN(VvvV.v[j].uw[i],ALIGNMENT);;
+ fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),i,j,ALIGNMENT,MuV);
+ }
+ }
+ fHIDE(fLOG_SCATTER_OP(2);)
+ fSCATTER_FINISH(1)
+})
+
+EXTINSN(V6_vprefixqb,"Vd32.b=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into byte",
+{
+ fHIDE(int i;)
+ fHIDE(size1u_t acc = 0;)
+ fVFOREACH(8, i) {
+ acc += fGETQBIT(QvV,i);
+ VdV.ub[i] = acc;
+ }
+ } )
+EXTINSN(V6_vprefixqh,"Vd32.h=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into halfwords",
+{
+ fHIDE(int i;)
+ fHIDE(size2u_t acc = 0;)
+ fVFOREACH(16, i) {
+ acc += fGETQBIT(QvV,i*2+0);
+ acc += fGETQBIT(QvV,i*2+1);
+ VdV.uh[i] = acc;
+ }
+ } )
+EXTINSN(V6_vprefixqw,"Vd32.w=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into words",
+{
+ fHIDE(int i;)
+ fHIDE(size4u_t acc = 0;)
+ fVFOREACH(32, i) {
+ acc += fGETQBIT(QvV,i*4+0);
+ acc += fGETQBIT(QvV,i*4+1);
+ acc += fGETQBIT(QvV,i*4+2);
+ acc += fGETQBIT(QvV,i*4+3);
+ VdV.uw[i] = acc;
+ }
+ } )
+
+
+
+
+
+/******************************************************************************
+ DEBUG Vector/Register Printing
+ ******************************************************************************/
+
+#define PRINT_VU(TYPE, TYPE2, COUNT)\
+ int i; \
+ size4u_t vec_len = fVBYTES();\
+ fprintf(stdout,"V%2d: ",VuN); \
+ for (i=0;i<vec_len>>COUNT;i++) { \
+ fprintf(stdout,TYPE2 " ", VuV.TYPE[i]); \
+ }; \
+ fprintf(stdout,"\\n"); \
+ fflush(stdout);\
+
+#undef ATTR_VMEM
+#undef ATTR_VMEMU
+#undef ATTR_VMEM_NT
+
+#endif /* NO_MMVEC */
+
+#ifdef __SELF_DEF_EXTINSN
+#undef EXTINSN
+#undef __SELF_DEF_EXTINSN
+#endif
diff --git a/target/hexagon/imported/mmvec/macros.def b/target/hexagon/imported/mmvec/macros.def
new file mode 100755
index 000000000..7e5438a99
--- /dev/null
+++ b/target/hexagon/imported/mmvec/macros.def
@@ -0,0 +1,842 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+DEF_MACRO(fDUMPQ,
+ do {
+ printf(STR ":" #REG ": 0x%016llx\n",REG.ud[0]);
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fUSE_LOOKUP_ADDRESS_BY_REV,
+ PROC->arch_proc_options->mmvec_use_full_va_for_lookup,
+ ()
+)
+
+DEF_MACRO(fUSE_LOOKUP_ADDRESS,
+ 1,
+ ()
+)
+
+DEF_MACRO(fNOTQ,
+ ({mmqreg_t _ret = {0}; int _i_; for (_i_ = 0; _i_ < fVECSIZE()/64; _i_++) _ret.ud[_i_] = ~VAL.ud[_i_]; _ret;}),
+ ()
+)
+
+DEF_MACRO(fGETQBITS,
+ ((MASK) & (REG.w[(BITNO)>>5] >> ((BITNO) & 0x1f))),
+ ()
+)
+
+DEF_MACRO(fGETQBIT,
+ fGETQBITS(REG,1,1,BITNO),
+ ()
+)
+
+DEF_MACRO(fGENMASKW,
+ (((fGETQBIT(QREG,(IDX*4+0)) ? 0xFF : 0x0) << 0)
+ |((fGETQBIT(QREG,(IDX*4+1)) ? 0xFF : 0x0) << 8)
+ |((fGETQBIT(QREG,(IDX*4+2)) ? 0xFF : 0x0) << 16)
+ |((fGETQBIT(QREG,(IDX*4+3)) ? 0xFF : 0x0) << 24)),
+ ()
+)
+DEF_MACRO(fGET10BIT,
+ {
+ COE = (((((fGETUBYTE(3,VAL) >> (2 * POS)) & 3) << 8) | fGETUBYTE(POS,VAL)) << 6);
+ COE >>= 6;
+ },
+ ()
+)
+
+DEF_MACRO(fVMAX,
+ (X>Y) ? X : Y,
+ ()
+)
+
+
+DEF_MACRO(fGETNIBBLE,
+ ( fSXTN(4,8,(SRC >> (4*IDX)) & 0xF) ),
+ ()
+)
+
+DEF_MACRO(fGETCRUMB,
+ ( fSXTN(2,8,(SRC >> (2*IDX)) & 0x3) ),
+ ()
+)
+
+DEF_MACRO(fGETCRUMB_SYMMETRIC,
+ ( (fGETCRUMB(IDX,SRC)>=0 ? (2-fGETCRUMB(IDX,SRC)) : fGETCRUMB(IDX,SRC) ) ),
+ ()
+)
+
+#define ZERO_OFFSET_2B +
+
+DEF_MACRO(fGENMASKH,
+ (((fGETQBIT(QREG,(IDX*2+0)) ? 0xFF : 0x0) << 0)
+ |((fGETQBIT(QREG,(IDX*2+1)) ? 0xFF : 0x0) << 8)),
+ ()
+)
+
+DEF_MACRO(fGETMASKW,
+ (VREG.w[IDX] & fGENMASKW((QREG),IDX)),
+ ()
+)
+
+DEF_MACRO(fGETMASKH,
+ (VREG.h[IDX] & fGENMASKH((QREG),IDX)),
+ ()
+)
+
+DEF_MACRO(fCONDMASK8,
+ (fGETQBIT(QREG,IDX) ? (YESVAL) : (NOVAL)),
+ ()
+)
+
+DEF_MACRO(fCONDMASK16,
+ ((fGENMASKH(QREG,IDX) & (YESVAL)) | (fGENMASKH(fNOTQ(QREG),IDX) & (NOVAL))),
+ ()
+)
+
+DEF_MACRO(fCONDMASK32,
+ ((fGENMASKW(QREG,IDX) & (YESVAL)) | (fGENMASKW(fNOTQ(QREG),IDX) & (NOVAL))),
+ ()
+)
+
+
+DEF_MACRO(fSETQBITS,
+ do {
+ size4u_t __TMP = (VAL);
+ REG.w[(BITNO)>>5] &= ~((MASK) << ((BITNO) & 0x1f));
+ REG.w[(BITNO)>>5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f));
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fSETQBIT,
+ fSETQBITS(REG,1,1,BITNO,VAL),
+ ()
+)
+
+DEF_MACRO(fVBYTES,
+ (fVECSIZE()),
+ ()
+)
+
+DEF_MACRO(fVHALVES,
+ (fVECSIZE()/2),
+ ()
+)
+
+DEF_MACRO(fVWORDS,
+ (fVECSIZE()/4),
+ ()
+)
+
+DEF_MACRO(fVDWORDS,
+ (fVECSIZE()/8),
+ ()
+)
+
+DEF_MACRO(fVALIGN,
+ ( ADDR = ADDR & ~(LOG2_ALIGNMENT-1)),
+ ()
+)
+
+DEF_MACRO(fVLASTBYTE,
+ ( ADDR = ADDR | (LOG2_ALIGNMENT-1)),
+ ()
+)
+
+
+DEF_MACRO(fVELEM,
+ ((fVECSIZE()*8)/WIDTH),
+ ()
+)
+
+DEF_MACRO(fVECLOGSIZE,
+ (mmvec_current_veclogsize(thread)),
+ ()
+)
+
+DEF_MACRO(fVECSIZE,
+ (1<<fVECLOGSIZE()),
+ ()
+)
+
+DEF_MACRO(fSWAPB,
+ {
+ size1u_t tmp = A;
+ A = B;
+ B = tmp;
+ },
+ /* NOTHING */
+)
+
+DEF_MACRO(
+ fVZERO,
+ mmvec_zero_vector(),
+ ()
+)
+
+DEF_MACRO(
+ fNEWVREG,
+ ((THREAD2STRUCT->VRegs_updated & (((VRegMask)1)<<VNUM)) ? THREAD2STRUCT->future_VRegs[VNUM] : mmvec_zero_vector()),
+ (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY)
+)
+
+DEF_MACRO(
+ fV_AL_CHECK,
+ if ((EA) & (MASK)) {
+ warn("aligning misaligned vector. PC=%08x EA=%08x",thread->Regs[REG_PC],(EA));
+ },
+ ()
+)
+DEF_MACRO(fSCATTER_INIT,
+ {
+ mem_vector_scatter_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE);
+ if (EXCEPTION_DETECTED) return;
+ },
+ (A_STORE,A_MEMLIKE,A_RESTRICT_SLOT0ONLY)
+)
+
+DEF_MACRO(fGATHER_INIT,
+ {
+ mem_vector_gather_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE);
+ if (EXCEPTION_DETECTED) return;
+ },
+ (A_LOAD,A_MEMLIKE,A_RESTRICT_SLOT1ONLY)
+)
+
+DEF_MACRO(fSCATTER_FINISH,
+ {
+ if (EXCEPTION_DETECTED) return;
+ mem_vector_scatter_finish(thread, insn, OP);
+ },
+ ()
+)
+
+DEF_MACRO(fGATHER_FINISH,
+ {
+ if (EXCEPTION_DETECTED) return;
+ mem_vector_gather_finish(thread, insn);
+ },
+ ()
+)
+
+
+DEF_MACRO(CHECK_VTCM_PAGE,
+ {
+ int slot = insn->slot;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ pa = pa & ~(ALIGNMENT-1);
+ FLAG = (pa < (thread->mem_access[slot].paddr+LENGTH));
+ },
+ ()
+)
+DEF_MACRO(COUNT_OUT_OF_BOUNDS,
+ {
+ if (!FLAG)
+ {
+ THREAD2STRUCT->vtcm_log.oob_access += SIZE;
+ warn("Scatter/Gather out of bounds of region");
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fLOG_SCATTER_OP,
+ {
+ // Log the size and indicate that the extension ext.c file needs to increment right before memory write
+ THREAD2STRUCT->vtcm_log.op = 1;
+ THREAD2STRUCT->vtcm_log.op_size = SIZE;
+ },
+ ()
+)
+
+
+
+DEF_MACRO(fVLOG_VTCM_WORD_INCREMENT,
+ {
+ int slot = insn->slot;
+ int log_bank = 0;
+ int log_byte =0;
+ paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ for(int i0 = 0; i0 < 4; i0++)
+ {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high));
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[4*IDX+i0],4*IDX+i0);
+ }
+ { LOG_VTCM_BANK(pa, log_bank, IDX); }
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD_INCREMENT,
+ {
+ int slot = insn->slot;
+ int log_bank = 0;
+ int log_byte = 0;
+ paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ for(int i0 = 0; i0 < 2; i0++) {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high));
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[2*IDX+i0],2*IDX+i0);
+ }
+ { LOG_VTCM_BANK(pa, log_bank,IDX); }
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD_INCREMENT_DV,
+ {
+ int slot = insn->slot;
+ int log_bank = 0;
+ int log_byte = 0;
+ paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ for(int i0 = 0; i0 < 2; i0++) {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high));
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[2*IDX+i0],2*IDX+i0);
+ }
+ { LOG_VTCM_BANK(pa, log_bank,(2*IDX2+IDX_H));}
+ },
+ ()
+)
+
+
+
+DEF_MACRO(GATHER_FUNCTION,
+{
+ int slot = insn->slot;
+ int i0;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ int log_bank = 0;
+ int log_byte = 0;
+ for(i0 = 0; i0 < ELEMENT_SIZE; i0++)
+ {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL;
+ log_bank |= (log_byte<<i0);
+ size1u_t B = sim_mem_read1(thread->system_ptr, thread->threadId, thread->mem_access[slot].paddr+OFFSET+i0);
+ THREAD2STRUCT->tmp_VRegs[0].ub[ELEMENT_SIZE*IDX+i0] = B;
+ LOG_VTCM_BYTE(pa+i0,log_byte,B,ELEMENT_SIZE*IDX+i0);
+ }
+ LOG_VTCM_BANK(pa, log_bank,BANK_IDX);
+},
+()
+)
+
+
+
+DEF_MACRO(fVLOG_VTCM_GATHER_WORD,
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, 1);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD,
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, 1);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD_DV,
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_WORDQ,
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, fGETQBIT(QsV,4*IDX+i0));
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ,
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, fGETQBIT(QsV,2*IDX+i0));
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ_DV,
+ {
+ GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), fGETQBIT(QsV,2*IDX+i0));
+ },
+ ()
+)
+
+
+DEF_MACRO(DEBUG_LOG_ADDR,
+ {
+
+ if (thread->processor_ptr->arch_proc_options->mmvec_network_addr_log2)
+ {
+
+ int slot = insn->slot;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ }
+ },
+ ()
+)
+
+
+
+
+
+
+
+DEF_MACRO(SCATTER_OP_WRITE_TO_MEM,
+ {
+ for (int i = 0; i < mmvecx->vtcm_log.size; i+=sizeof(TYPE))
+ {
+ if ( mmvecx->vtcm_log.mask.ub[i] != 0) {
+ TYPE dst = 0;
+ TYPE inc = 0;
+ for(int j = 0; j < sizeof(TYPE); j++) {
+ dst |= (sim_mem_read1(thread->system_ptr, thread->threadId, mmvecx->vtcm_log.pa[i+j]) << (8*j));
+ inc |= mmvecx->vtcm_log.data.ub[j+i] << (8*j);
+
+ mmvecx->vtcm_log.mask.ub[j+i] = 0;
+ mmvecx->vtcm_log.data.ub[j+i] = 0;
+ mmvecx->vtcm_log.offsets.ub[j+i] = 0;
+ }
+ dst += inc;
+ for(int j = 0; j < sizeof(TYPE); j++) {
+ sim_mem_write1(thread->system_ptr,thread->threadId, mmvecx->vtcm_log.pa[i+j], (dst >> (8*j))& 0xFF );
+ }
+ }
+
+ }
+ },
+ ()
+)
+
+DEF_MACRO(SCATTER_FUNCTION,
+{
+ int slot = insn->slot;
+ int i0;
+ paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
+ paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
+ int log_bank = 0;
+ int log_byte = 0;
+ for(i0 = 0; i0 < ELEMENT_SIZE; i0++) {
+ log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL;
+ log_bank |= (log_byte<<i0);
+ LOG_VTCM_BYTE(pa+i0,log_byte,IN.ub[ELEMENT_SIZE*IDX+i0],ELEMENT_SIZE*IDX+i0);
+ }
+ LOG_VTCM_BANK(pa, log_bank,BANK_IDX);
+
+},
+()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD,
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, IDX, 1, IN);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_WORD,
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 4, IDX, 1, IN);
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORDQ,
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, IDX, fGETQBIT(QsV,2*IDX+i0), IN);
+ },
+ ()
+)
+DEF_MACRO(fVLOG_VTCM_WORDQ,
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 4, IDX, fGETQBIT(QsV,4*IDX+i0), IN);
+ },
+ ()
+)
+
+
+
+
+
+DEF_MACRO(fVLOG_VTCM_HALFWORD_DV,
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1, IN);
+ },
+ ()
+)
+
+DEF_MACRO(fVLOG_VTCM_HALFWORDQ_DV,
+ {
+ SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), fGETQBIT(QsV,2*IDX+i0), IN);
+ },
+ ()
+)
+
+
+
+
+
+
+DEF_MACRO(fSTORERELEASE,
+ {
+ fV_AL_CHECK(EA,fVECSIZE()-1);
+
+ mem_store_release(thread, insn, fVECSIZE(), EA&~(fVECSIZE()-1), EA, TYPE, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fVFETCH_AL,
+ {
+ fV_AL_CHECK(EA,fVECSIZE()-1);
+ mem_fetch_vector(thread, insn, EA&~(fVECSIZE()-1), insn->slot, fVECSIZE());
+ },
+ (A_LOAD,A_MEMLIKE)
+)
+
+
+DEF_MACRO(fLOADMMV_AL,
+ {
+ fV_AL_CHECK(EA,ALIGNMENT-1);
+ thread->last_pkt->double_access_vec = 0;
+ mem_load_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &DST.ub[0], LEN, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_LOAD,A_MEMLIKE)
+)
+
+DEF_MACRO(fLOADMMV,
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST),
+ ()
+)
+
+DEF_MACRO(fLOADMMVQ,
+ do {
+ int __i;
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ fVFOREACH(8,__i) if (!fGETQBIT(QVAL,__i)) DST.b[__i] = 0;
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fLOADMMVNQ,
+ do {
+ int __i;
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ fVFOREACH(8,__i) if (fGETQBIT(QVAL,__i)) DST.b[__i] = 0;
+ } while (0),
+ ()
+)
+
+DEF_MACRO(fLOADMMVU_AL,
+ {
+ size4u_t size2 = (EA)&(ALIGNMENT-1);
+ size4u_t size1 = LEN-size2;
+ thread->last_pkt->double_access_vec = 1;
+ mem_load_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &DST.ub[size1], size2, fUSE_LOOKUP_ADDRESS());
+ mem_load_vector_oddva(thread, insn, EA, EA,/* slot */ 0, size1, &DST.ub[0], size1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_LOAD,A_MEMLIKE)
+)
+
+DEF_MACRO(fLOADMMVU,
+ {
+ /* if address happens to be aligned, only do aligned load */
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->pkt_has_vmemu_access = 0;
+ thread->last_pkt->double_access = 0;
+
+ fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ } else {
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ thread->last_pkt->double_access = 1;
+
+ fLOADMMVU_AL(EA,fVECSIZE(),fVECSIZE(),DST);
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fSTOREMMV_AL,
+ {
+ fV_AL_CHECK(EA,ALIGNMENT-1);
+ mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTOREMMV,
+ fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC),
+ ()
+)
+
+DEF_MACRO(fSTOREMMVQ_AL,
+ do {
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ } while (0),
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTOREMMVQ,
+ fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK),
+ ()
+)
+
+DEF_MACRO(fSTOREMMVNQ_AL,
+ {
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ fV_AL_CHECK(EA,ALIGNMENT-1);
+ mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTOREMMVNQ,
+ fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK),
+ ()
+)
+
+DEF_MACRO(fSTOREMMVU_AL,
+ {
+ size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
+ size4u_t size2;
+ if (size1>LEN) size1 = LEN;
+ size2 = LEN-size1;
+ mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], 0, 0, fUSE_LOOKUP_ADDRESS());
+ mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTOREMMVU,
+ {
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->double_access = 0;
+ fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC);
+ } else {
+ thread->last_pkt->double_access = 1;
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ fSTOREMMVU_AL(EA,fVECSIZE(),fVECSIZE(),SRC);
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fSTOREMMVQU_AL,
+ {
+ size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
+ size4u_t size2;
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ if (size1>LEN) size1 = LEN;
+ size2 = LEN-size1;
+ mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(),/* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 0, fUSE_LOOKUP_ADDRESS());
+ mem_store_vector_oddva(thread, insn, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTOREMMVQU,
+ {
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->double_access = 0;
+ fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ } else {
+ thread->last_pkt->double_access = 1;
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ fSTOREMMVQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ }
+ },
+ ()
+)
+
+DEF_MACRO(fSTOREMMVNQU_AL,
+ {
+ size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
+ size4u_t size2;
+ mmvector_t maskvec;
+ int i;
+ for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
+ if (size1>LEN) size1 = LEN;
+ size2 = LEN-size1;
+ mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 1, fUSE_LOOKUP_ADDRESS());
+ mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
+ },
+ (A_STORE,A_MEMLIKE)
+)
+
+DEF_MACRO(fSTOREMMVNQU,
+ {
+ thread->last_pkt->pkt_has_vtcm_access = 0;
+ thread->last_pkt->pkt_access_count = 0;
+ if ( (EA & (fVECSIZE()-1)) == 0) {
+ thread->last_pkt->double_access = 0;
+ fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ } else {
+ thread->last_pkt->double_access = 1;
+ thread->last_pkt->pkt_has_vmemu_access = 1;
+ fSTOREMMVNQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
+ }
+ },
+ ()
+)
+
+
+
+
+DEF_MACRO(fVFOREACH,
+ for (VAR = 0; VAR < fVELEM(WIDTH); VAR++),
+ /* NOTHING */
+)
+
+DEF_MACRO(fVARRAY_ELEMENT_ACCESS,
+ ARRAY.v[(INDEX) / (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))],
+ ()
+)
+
+DEF_MACRO(fVNEWCANCEL,
+ do { THREAD2STRUCT->VRegs_select &= ~(1<<(REGNUM)); } while (0),
+ ()
+)
+
+DEF_MACRO(fTMPVDATA,
+ mmvec_vtmp_data(thread),
+ (A_CVI)
+)
+
+DEF_MACRO(fVSATDW,
+ fVSATW( ( ( ((long long)U)<<32 ) | fZXTN(32,64,V) ) ),
+ /* attribs */
+)
+
+DEF_MACRO(fVASL_SATHI,
+ fVSATW(((U)<<1) | ((V)>>31)),
+ /* attribs */
+)
+
+DEF_MACRO(fVUADDSAT,
+ fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVSADDSAT,
+ fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVUSUBSAT,
+ fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVSSUBSAT,
+ fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGU,
+ ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGURND,
+ ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)+1)>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGU,
+ ((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGURNDSAT,
+ fVSATUN(WIDTH,((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)+1)>>1)),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGS,
+ ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVAVGSRND,
+ ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)+1)>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGS,
+ ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V))>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGSRND,
+ ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1),
+ /* attribs */
+)
+
+DEF_MACRO(fVNAVGSRNDSAT,
+ fVSATN(WIDTH,((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1)),
+ /* attribs */
+)
+
+
+DEF_MACRO(fVNOROUND,
+ VAL,
+ /* NOTHING */
+)
+DEF_MACRO(fVNOSAT,
+ VAL,
+ /* NOTHING */
+)
+
+DEF_MACRO(fVROUND,
+ ((VAL) + (((SHAMT)>0)?(1LL<<((SHAMT)-1)):0)),
+ /* NOTHING */
+)
+
+DEF_MACRO(fCARRY_FROM_ADD32,
+ (((fZXTN(32,64,A)+fZXTN(32,64,B)+C) >> 32) & 1),
+ /* NOTHING */
+)
+
+DEF_MACRO(fUARCH_NOTE_PUMP_4X,
+ ,
+ ()
+)
+
+DEF_MACRO(fUARCH_NOTE_PUMP_2X,
+ ,
+ ()
+)
diff --git a/target/hexagon/imported/mpy.idef b/target/hexagon/imported/mpy.idef
new file mode 100644
index 000000000..8744f6596
--- /dev/null
+++ b/target/hexagon/imported/mpy.idef
@@ -0,0 +1,1208 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Multiply Instructions
+ */
+
+
+#define STD_SP_MODES(TAG,OPER,ATR,DST,ACCSEM,SEM,OSEM,SATSEM,RNDSEM)\
+Q6INSN(M2_##TAG##_hh_s0, OPER"(Rs.H32,Rt.H32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETHALF(1,RsV),fGETHALF(1,RtV))));})\
+Q6INSN(M2_##TAG##_hh_s1, OPER"(Rs.H32,Rt.H32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETHALF(1,RsV),fGETHALF(1,RtV)))));})\
+Q6INSN(M2_##TAG##_hl_s0, OPER"(Rs.H32,Rt.L32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETHALF(1,RsV),fGETHALF(0,RtV))));})\
+Q6INSN(M2_##TAG##_hl_s1, OPER"(Rs.H32,Rt.L32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETHALF(1,RsV),fGETHALF(0,RtV)))));})\
+Q6INSN(M2_##TAG##_lh_s0, OPER"(Rs.L32,Rt.H32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETHALF(0,RsV),fGETHALF(1,RtV))));})\
+Q6INSN(M2_##TAG##_lh_s1, OPER"(Rs.L32,Rt.H32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETHALF(0,RsV),fGETHALF(1,RtV)))));})\
+Q6INSN(M2_##TAG##_ll_s0, OPER"(Rs.L32,Rt.L32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETHALF(0,RsV),fGETHALF(0,RtV))));})\
+Q6INSN(M2_##TAG##_ll_s1, OPER"(Rs.L32,Rt.L32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETHALF(0,RsV),fGETHALF(0,RtV)))));})
+
+/*****************************************************/
+/* multiply 16x16->32 signed instructions */
+/*****************************************************/
+STD_SP_MODES(mpy_acc, "Rx32+=mpy", ,RxV,RxV+ ,fMPY16SS, ,fPASS,fPASS)
+STD_SP_MODES(mpy_nac, "Rx32-=mpy", ,RxV,RxV- ,fMPY16SS, ,fPASS,fPASS)
+STD_SP_MODES(mpy_acc_sat,"Rx32+=mpy", ,RxV,RxV+ ,fMPY16SS,":sat" ,fSAT, fPASS)
+STD_SP_MODES(mpy_nac_sat,"Rx32-=mpy", ,RxV,RxV- ,fMPY16SS,":sat" ,fSAT, fPASS)
+STD_SP_MODES(mpy, "Rd32=mpy", ,RdV, ,fMPY16SS, ,fPASS,fPASS)
+STD_SP_MODES(mpy_sat, "Rd32=mpy", ,RdV, ,fMPY16SS,":sat" ,fSAT, fPASS)
+STD_SP_MODES(mpy_rnd, "Rd32=mpy", ,RdV, ,fMPY16SS,":rnd" ,fPASS,fROUND)
+STD_SP_MODES(mpy_sat_rnd,"Rd32=mpy", ,RdV, ,fMPY16SS,":rnd:sat",fSAT, fROUND)
+STD_SP_MODES(mpyd_acc, "Rxx32+=mpy",,RxxV,RxxV+ ,fMPY16SS, ,fPASS,fPASS)
+STD_SP_MODES(mpyd_nac, "Rxx32-=mpy",,RxxV,RxxV- ,fMPY16SS, ,fPASS,fPASS)
+STD_SP_MODES(mpyd, "Rdd32=mpy", ,RddV, ,fMPY16SS, ,fPASS,fPASS)
+STD_SP_MODES(mpyd_rnd, "Rdd32=mpy", ,RddV, ,fMPY16SS,":rnd" ,fPASS,fROUND)
+
+
+/*****************************************************/
+/* multiply 16x16->32 unsigned instructions */
+/*****************************************************/
+#define STD_USP_MODES(TAG,OPER,ATR,DST,ACCSEM,SEM,OSEM,SATSEM,RNDSEM)\
+Q6INSN(M2_##TAG##_hh_s0, OPER"(Rs.H32,Rt.H32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETUHALF(1,RsV),fGETUHALF(1,RtV))));})\
+Q6INSN(M2_##TAG##_hh_s1, OPER"(Rs.H32,Rt.H32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETUHALF(1,RsV),fGETUHALF(1,RtV)))));})\
+Q6INSN(M2_##TAG##_hl_s0, OPER"(Rs.H32,Rt.L32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETUHALF(1,RsV),fGETUHALF(0,RtV))));})\
+Q6INSN(M2_##TAG##_hl_s1, OPER"(Rs.H32,Rt.L32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETUHALF(1,RsV),fGETUHALF(0,RtV)))));})\
+Q6INSN(M2_##TAG##_lh_s0, OPER"(Rs.L32,Rt.H32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETUHALF(0,RsV),fGETUHALF(1,RtV))));})\
+Q6INSN(M2_##TAG##_lh_s1, OPER"(Rs.L32,Rt.H32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETUHALF(0,RsV),fGETUHALF(1,RtV)))));})\
+Q6INSN(M2_##TAG##_ll_s0, OPER"(Rs.L32,Rt.L32)"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM SEM( fGETUHALF(0,RsV),fGETUHALF(0,RtV))));})\
+Q6INSN(M2_##TAG##_ll_s1, OPER"(Rs.L32,Rt.L32):<<1"OSEM, ATR,"",{DST=SATSEM(RNDSEM(ACCSEM fSCALE(1,SEM(fGETUHALF(0,RsV),fGETUHALF(0,RtV)))));})
+
+STD_USP_MODES(mpyu_acc, "Rx32+=mpyu", ,RxV,RxV+ ,fMPY16UU, ,fPASS,fPASS)
+STD_USP_MODES(mpyu_nac, "Rx32-=mpyu", ,RxV,RxV- ,fMPY16UU, ,fPASS,fPASS)
+STD_USP_MODES(mpyu, "Rd32=mpyu", ATTRIBS() ,RdV, ,fMPY16UU, ,fPASS,fPASS)
+STD_USP_MODES(mpyud_acc, "Rxx32+=mpyu",,RxxV,RxxV+,fMPY16UU, ,fPASS,fPASS)
+STD_USP_MODES(mpyud_nac, "Rxx32-=mpyu",,RxxV,RxxV-,fMPY16UU, ,fPASS,fPASS)
+STD_USP_MODES(mpyud, "Rdd32=mpyu", ATTRIBS() ,RddV, ,fMPY16UU, ,fPASS,fPASS)
+
+/**********************************************/
+/* mpy 16x#s8->32 */
+/**********************************************/
+
+Q6INSN(M2_mpysip,"Rd32=+mpyi(Rs32,#u8)",ATTRIBS(A_ARCHV2),
+"32-bit Multiply by unsigned immediate",
+{ fIMMEXT(uiV); RdV=RsV*uiV; })
+
+Q6INSN(M2_mpysin,"Rd32=-mpyi(Rs32,#u8)",ATTRIBS(A_ARCHV2),
+"32-bit Multiply by unsigned immediate, negate result",
+{ RdV=RsV*-uiV; })
+
+Q6INSN(M2_macsip,"Rx32+=mpyi(Rs32,#u8)",ATTRIBS(A_ARCHV2),
+"32-bit Multiply-Add by unsigned immediate",
+{ fIMMEXT(uiV); RxV=RxV + (RsV*uiV);})
+
+Q6INSN(M2_macsin,"Rx32-=mpyi(Rs32,#u8)",ATTRIBS(A_ARCHV2),
+"32-bit Multiply-Subtract by unsigned immediate",
+{ fIMMEXT(uiV); RxV=RxV - (RsV*uiV);})
+
+
+/**********************************************/
+/* multiply/mac 32x32->64 instructions */
+/**********************************************/
+Q6INSN(M2_dpmpyss_s0, "Rdd32=mpy(Rs32,Rt32)", ATTRIBS(),"Multiply 32x32",{RddV=fMPY32SS(RsV,RtV);})
+Q6INSN(M2_dpmpyss_acc_s0,"Rxx32+=mpy(Rs32,Rt32)",ATTRIBS(),"Multiply 32x32",{RxxV= RxxV + fMPY32SS(RsV,RtV);})
+Q6INSN(M2_dpmpyss_nac_s0,"Rxx32-=mpy(Rs32,Rt32)",ATTRIBS(),"Multiply 32x32",{RxxV= RxxV - fMPY32SS(RsV,RtV);})
+
+Q6INSN(M2_dpmpyuu_s0, "Rdd32=mpyu(Rs32,Rt32)", ATTRIBS(),"Multiply 32x32",{RddV=fMPY32UU(fCAST4u(RsV),fCAST4u(RtV));})
+Q6INSN(M2_dpmpyuu_acc_s0,"Rxx32+=mpyu(Rs32,Rt32)",ATTRIBS(),"Multiply 32x32",{RxxV= RxxV + fMPY32UU(fCAST4u(RsV),fCAST4u(RtV));})
+Q6INSN(M2_dpmpyuu_nac_s0,"Rxx32-=mpyu(Rs32,Rt32)",ATTRIBS(),"Multiply 32x32",{RxxV= RxxV - fMPY32UU(fCAST4u(RsV),fCAST4u(RtV));})
+
+
+/******************************************************/
+/* multiply/mac 32x32->32 (upper) instructions */
+/******************************************************/
+Q6INSN(M2_mpy_up, "Rd32=mpy(Rs32,Rt32)", ATTRIBS(),"Multiply 32x32",{RdV=fMPY32SS(RsV,RtV)>>32;})
+Q6INSN(M2_mpy_up_s1, "Rd32=mpy(Rs32,Rt32):<<1", ATTRIBS(),"Multiply 32x32",{RdV=fMPY32SS(RsV,RtV)>>31;})
+Q6INSN(M2_mpy_up_s1_sat, "Rd32=mpy(Rs32,Rt32):<<1:sat", ATTRIBS(),"Multiply 32x32",{RdV=fSAT(fMPY32SS(RsV,RtV)>>31);})
+Q6INSN(M2_mpyu_up, "Rd32=mpyu(Rs32,Rt32)", ATTRIBS(),"Multiply 32x32",{RdV=fMPY32UU(fCAST4u(RsV),fCAST4u(RtV))>>32;})
+Q6INSN(M2_mpysu_up, "Rd32=mpysu(Rs32,Rt32)", ATTRIBS(),"Multiply 32x32",{RdV=fMPY32SU(RsV,fCAST4u(RtV))>>32;})
+Q6INSN(M2_dpmpyss_rnd_s0,"Rd32=mpy(Rs32,Rt32):rnd", ATTRIBS(),"Multiply 32x32",{RdV=(fMPY32SS(RsV,RtV)+fCONSTLL(0x80000000))>>32;})
+
+Q6INSN(M4_mac_up_s1_sat, "Rx32+=mpy(Rs32,Rt32):<<1:sat", ATTRIBS(),"Multiply 32x32",{RxV=fSAT( (fSE32_64(RxV)) + (fMPY32SS(RsV,RtV)>>31));})
+Q6INSN(M4_nac_up_s1_sat, "Rx32-=mpy(Rs32,Rt32):<<1:sat", ATTRIBS(),"Multiply 32x32",{RxV=fSAT( (fSE32_64(RxV)) - (fMPY32SS(RsV,RtV)>>31));})
+
+
+/**********************************************/
+/* 32x32->32 multiply (lower) */
+/**********************************************/
+
+Q6INSN(M2_mpyi,"Rd32=mpyi(Rs32,Rt32)",ATTRIBS(),
+"Multiply Integer",
+{ RdV=RsV*RtV;})
+
+Q6INSN(M2_maci,"Rx32+=mpyi(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Multiply-Accumulate Integer",
+{ RxV=RxV + RsV*RtV;})
+
+Q6INSN(M2_mnaci,"Rx32-=mpyi(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Multiply-Neg-Accumulate Integer",
+{ RxV=RxV - RsV*RtV;})
+
+/****** WHY ARE THESE IN MPY.IDEF? **********/
+
+Q6INSN(M2_acci,"Rx32+=add(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Add with accumulate",
+{ RxV=RxV + RsV + RtV;})
+
+Q6INSN(M2_accii,"Rx32+=add(Rs32,#s8)",ATTRIBS(A_ARCHV2),
+"Add with accumulate",
+{ fIMMEXT(siV); RxV=RxV + RsV + siV;})
+
+Q6INSN(M2_nacci,"Rx32-=add(Rs32,Rt32)",ATTRIBS(A_ARCHV2),
+"Add with neg accumulate",
+{ RxV=RxV - (RsV + RtV);})
+
+Q6INSN(M2_naccii,"Rx32-=add(Rs32,#s8)",ATTRIBS(A_ARCHV2),
+"Add with neg accumulate",
+{ fIMMEXT(siV); RxV=RxV - (RsV + siV);})
+
+Q6INSN(M2_subacc,"Rx32+=sub(Rt32,Rs32)",ATTRIBS(A_ARCHV2),
+"Sub with accumulate",
+{ RxV=RxV + RtV - RsV;})
+
+
+
+
+Q6INSN(M4_mpyrr_addr,"Ry32=add(Ru32,mpyi(Ry32,Rs32))",ATTRIBS(),
+"Mpy by immed and add immed",
+{ RyV = RuV + RsV*RyV;})
+
+Q6INSN(M4_mpyri_addr_u2,"Rd32=add(Ru32,mpyi(#u6:2,Rs32))",ATTRIBS(),
+"Mpy by immed and add immed",
+{ RdV = RuV + RsV*uiV;})
+
+Q6INSN(M4_mpyri_addr,"Rd32=add(Ru32,mpyi(Rs32,#u6))",ATTRIBS(),
+"Mpy by immed and add immed",
+{ fIMMEXT(uiV); RdV = RuV + RsV*uiV;})
+
+
+
+Q6INSN(M4_mpyri_addi,"Rd32=add(#u6,mpyi(Rs32,#U6))",ATTRIBS(),
+"Mpy by immed and add immed",
+{ fIMMEXT(uiV); RdV = uiV + RsV*UiV;})
+
+
+
+Q6INSN(M4_mpyrr_addi,"Rd32=add(#u6,mpyi(Rs32,Rt32))",ATTRIBS(),
+"Mpy by immed and add immed",
+{ fIMMEXT(uiV); RdV = uiV + RsV*RtV;})
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**********************************************/
+/* vector mac 2x[16x16 -> 32] */
+/**********************************************/
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV)))));\
+ fSETWORD(1,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV)))));\
+}
+Q6INSN(M2_vmpy2s_s0,"Rdd32=vmpyh(Rs32,Rt32):sat",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmpy2s_s1,"Rdd32=vmpyh(Rs32,Rt32):<<1:sat",ATTRIBS(),"Vector Multiply",vmac_sema(1))
+
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV)))));\
+ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV)))));\
+}
+Q6INSN(M2_vmac2s_s0,"Rxx32+=vmpyh(Rs32,Rt32):sat",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmac2s_s1,"Rxx32+=vmpyh(Rs32,Rt32):<<1:sat",ATTRIBS(),"Vector Multiply",vmac_sema(1))
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RddV,fSAT(fSCALE(N,fMPY16SU(fGETHALF(0,RsV),fGETUHALF(0,RtV)))));\
+ fSETWORD(1,RddV,fSAT(fSCALE(N,fMPY16SU(fGETHALF(1,RsV),fGETUHALF(1,RtV)))));\
+}
+Q6INSN(M2_vmpy2su_s0,"Rdd32=vmpyhsu(Rs32,Rt32):sat",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmpy2su_s1,"Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat",ATTRIBS(),"Vector Multiply",vmac_sema(1))
+
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + fSCALE(N,fMPY16SU(fGETHALF(0,RsV),fGETUHALF(0,RtV)))));\
+ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + fSCALE(N,fMPY16SU(fGETHALF(1,RsV),fGETUHALF(1,RtV)))));\
+}
+Q6INSN(M2_vmac2su_s0,"Rxx32+=vmpyhsu(Rs32,Rt32):sat",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmac2su_s1,"Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat",ATTRIBS(),"Vector Multiply",vmac_sema(1))
+
+
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETHALF(1,RdV,fGETHALF(1,(fSAT(fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV))) + 0x8000))));\
+ fSETHALF(0,RdV,fGETHALF(1,(fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV))) + 0x8000))));\
+}
+Q6INSN(M2_vmpy2s_s0pack,"Rd32=vmpyh(Rs32,Rt32):rnd:sat",ATTRIBS(A_ARCHV2),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmpy2s_s1pack,"Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat",ATTRIBS(A_ARCHV2),"Vector Multiply",vmac_sema(1))
+
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RxxV,fGETWORD(0,RxxV) + fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV)));\
+ fSETWORD(1,RxxV,fGETWORD(1,RxxV) + fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV)));\
+}
+Q6INSN(M2_vmac2,"Rxx32+=vmpyh(Rs32,Rt32)",ATTRIBS(A_ARCHV2),"Vector Multiply",vmac_sema(0))
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)))));\
+ fSETWORD(1,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)))));\
+}
+Q6INSN(M2_vmpy2es_s0,"Rdd32=vmpyeh(Rss32,Rtt32):sat",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmpy2es_s1,"Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Vector Multiply",vmac_sema(1))
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)))));\
+ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)))));\
+}
+Q6INSN(M2_vmac2es_s0,"Rxx32+=vmpyeh(Rss32,Rtt32):sat",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+Q6INSN(M2_vmac2es_s1,"Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Vector Multiply",vmac_sema(1))
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ fSETWORD(0,RxxV,fGETWORD(0,RxxV) + fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)));\
+ fSETWORD(1,RxxV,fGETWORD(1,RxxV) + fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)));\
+}
+Q6INSN(M2_vmac2es,"Rxx32+=vmpyeh(Rss32,Rtt32)",ATTRIBS(A_ARCHV2),"Vector Multiply",vmac_sema(0))
+
+
+
+
+/********************************************************/
+/* vrmpyh, aka Big Mac, aka Mac Daddy, aka Mac-ac-ac-ac */
+/* vector mac 4x[16x16] + 64 ->64 */
+/********************************************************/
+
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ RxxV = RxxV + fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV))\
+ + fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV))\
+ + fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV))\
+ + fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV));\
+}
+Q6INSN(M2_vrmac_s0,"Rxx32+=vrmpyh(Rss32,Rtt32)",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+
+#undef vmac_sema
+#define vmac_sema(N)\
+{ RddV = fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV))\
+ + fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV))\
+ + fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV))\
+ + fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV));\
+}
+Q6INSN(M2_vrmpy_s0,"Rdd32=vrmpyh(Rss32,Rtt32)",ATTRIBS(),"Vector Multiply",vmac_sema(0))
+
+
+
+/******************************************************/
+/* vector dual macs. just like complex */
+/******************************************************/
+
+
+/* With round&pack */
+#undef dmpy_sema
+#define dmpy_sema(N)\
+{ fSETHALF(0,RdV,fGETHALF(1,(fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV))) + 0x8000))));\
+ fSETHALF(1,RdV,fGETHALF(1,(fSAT(fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV))) + 0x8000))));\
+}
+Q6INSN(M2_vdmpyrs_s0,"Rd32=vdmpy(Rss32,Rtt32):rnd:sat",ATTRIBS(), "vector dual mac w/ round&pack",dmpy_sema(0))
+Q6INSN(M2_vdmpyrs_s1,"Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"vector dual mac w/ round&pack",dmpy_sema(1))
+
+
+
+
+
+/******************************************************/
+/* vector byte multiplies */
+/******************************************************/
+
+
+Q6INSN(M5_vrmpybuu,"Rdd32=vrmpybu(Rss32,Rtt32)",ATTRIBS(),
+ "vector dual mpy bytes",
+{
+ fSETWORD(0,RddV,(fMPY16SS(fGETUBYTE(0,RssV),fGETUBYTE(0,RttV)) +
+ fMPY16SS(fGETUBYTE(1,RssV),fGETUBYTE(1,RttV)) +
+ fMPY16SS(fGETUBYTE(2,RssV),fGETUBYTE(2,RttV)) +
+ fMPY16SS(fGETUBYTE(3,RssV),fGETUBYTE(3,RttV))));
+ fSETWORD(1,RddV,(fMPY16SS(fGETUBYTE(4,RssV),fGETUBYTE(4,RttV)) +
+ fMPY16SS(fGETUBYTE(5,RssV),fGETUBYTE(5,RttV)) +
+ fMPY16SS(fGETUBYTE(6,RssV),fGETUBYTE(6,RttV)) +
+ fMPY16SS(fGETUBYTE(7,RssV),fGETUBYTE(7,RttV))));
+ })
+
+Q6INSN(M5_vrmacbuu,"Rxx32+=vrmpybu(Rss32,Rtt32)",ATTRIBS(),
+ "vector dual mac bytes",
+{
+ fSETWORD(0,RxxV,(fGETWORD(0,RxxV) +
+ fMPY16SS(fGETUBYTE(0,RssV),fGETUBYTE(0,RttV)) +
+ fMPY16SS(fGETUBYTE(1,RssV),fGETUBYTE(1,RttV)) +
+ fMPY16SS(fGETUBYTE(2,RssV),fGETUBYTE(2,RttV)) +
+ fMPY16SS(fGETUBYTE(3,RssV),fGETUBYTE(3,RttV))));
+ fSETWORD(1,RxxV,(fGETWORD(1,RxxV) +
+ fMPY16SS(fGETUBYTE(4,RssV),fGETUBYTE(4,RttV)) +
+ fMPY16SS(fGETUBYTE(5,RssV),fGETUBYTE(5,RttV)) +
+ fMPY16SS(fGETUBYTE(6,RssV),fGETUBYTE(6,RttV)) +
+ fMPY16SS(fGETUBYTE(7,RssV),fGETUBYTE(7,RttV))));
+ })
+
+
+Q6INSN(M5_vrmpybsu,"Rdd32=vrmpybsu(Rss32,Rtt32)",ATTRIBS(),
+ "vector dual mpy bytes",
+{
+ fSETWORD(0,RddV,(fMPY16SS(fGETBYTE(0,RssV),fGETUBYTE(0,RttV)) +
+ fMPY16SS(fGETBYTE(1,RssV),fGETUBYTE(1,RttV)) +
+ fMPY16SS(fGETBYTE(2,RssV),fGETUBYTE(2,RttV)) +
+ fMPY16SS(fGETBYTE(3,RssV),fGETUBYTE(3,RttV))));
+ fSETWORD(1,RddV,(fMPY16SS(fGETBYTE(4,RssV),fGETUBYTE(4,RttV)) +
+ fMPY16SS(fGETBYTE(5,RssV),fGETUBYTE(5,RttV)) +
+ fMPY16SS(fGETBYTE(6,RssV),fGETUBYTE(6,RttV)) +
+ fMPY16SS(fGETBYTE(7,RssV),fGETUBYTE(7,RttV))));
+ })
+
+Q6INSN(M5_vrmacbsu,"Rxx32+=vrmpybsu(Rss32,Rtt32)",ATTRIBS(),
+ "vector dual mac bytes",
+{
+ fSETWORD(0,RxxV,(fGETWORD(0,RxxV) +
+ fMPY16SS(fGETBYTE(0,RssV),fGETUBYTE(0,RttV)) +
+ fMPY16SS(fGETBYTE(1,RssV),fGETUBYTE(1,RttV)) +
+ fMPY16SS(fGETBYTE(2,RssV),fGETUBYTE(2,RttV)) +
+ fMPY16SS(fGETBYTE(3,RssV),fGETUBYTE(3,RttV))));
+ fSETWORD(1,RxxV,(fGETWORD(1,RxxV) +
+ fMPY16SS(fGETBYTE(4,RssV),fGETUBYTE(4,RttV)) +
+ fMPY16SS(fGETBYTE(5,RssV),fGETUBYTE(5,RttV)) +
+ fMPY16SS(fGETBYTE(6,RssV),fGETUBYTE(6,RttV)) +
+ fMPY16SS(fGETBYTE(7,RssV),fGETUBYTE(7,RttV))));
+ })
+
+
+Q6INSN(M5_vmpybuu,"Rdd32=vmpybu(Rs32,Rt32)",ATTRIBS(),
+ "vector mpy bytes",
+{
+ fSETHALF(0,RddV,(fMPY16SS(fGETUBYTE(0,RsV),fGETUBYTE(0,RtV))));
+ fSETHALF(1,RddV,(fMPY16SS(fGETUBYTE(1,RsV),fGETUBYTE(1,RtV))));
+ fSETHALF(2,RddV,(fMPY16SS(fGETUBYTE(2,RsV),fGETUBYTE(2,RtV))));
+ fSETHALF(3,RddV,(fMPY16SS(fGETUBYTE(3,RsV),fGETUBYTE(3,RtV))));
+ })
+
+Q6INSN(M5_vmpybsu,"Rdd32=vmpybsu(Rs32,Rt32)",ATTRIBS(),
+ "vector mpy bytes",
+{
+ fSETHALF(0,RddV,(fMPY16SS(fGETBYTE(0,RsV),fGETUBYTE(0,RtV))));
+ fSETHALF(1,RddV,(fMPY16SS(fGETBYTE(1,RsV),fGETUBYTE(1,RtV))));
+ fSETHALF(2,RddV,(fMPY16SS(fGETBYTE(2,RsV),fGETUBYTE(2,RtV))));
+ fSETHALF(3,RddV,(fMPY16SS(fGETBYTE(3,RsV),fGETUBYTE(3,RtV))));
+ })
+
+
+Q6INSN(M5_vmacbuu,"Rxx32+=vmpybu(Rs32,Rt32)",ATTRIBS(),
+ "vector mac bytes",
+{
+ fSETHALF(0,RxxV,(fGETHALF(0,RxxV)+fMPY16SS(fGETUBYTE(0,RsV),fGETUBYTE(0,RtV))));
+ fSETHALF(1,RxxV,(fGETHALF(1,RxxV)+fMPY16SS(fGETUBYTE(1,RsV),fGETUBYTE(1,RtV))));
+ fSETHALF(2,RxxV,(fGETHALF(2,RxxV)+fMPY16SS(fGETUBYTE(2,RsV),fGETUBYTE(2,RtV))));
+ fSETHALF(3,RxxV,(fGETHALF(3,RxxV)+fMPY16SS(fGETUBYTE(3,RsV),fGETUBYTE(3,RtV))));
+ })
+
+Q6INSN(M5_vmacbsu,"Rxx32+=vmpybsu(Rs32,Rt32)",ATTRIBS(),
+ "vector mac bytes",
+{
+ fSETHALF(0,RxxV,(fGETHALF(0,RxxV)+fMPY16SS(fGETBYTE(0,RsV),fGETUBYTE(0,RtV))));
+ fSETHALF(1,RxxV,(fGETHALF(1,RxxV)+fMPY16SS(fGETBYTE(1,RsV),fGETUBYTE(1,RtV))));
+ fSETHALF(2,RxxV,(fGETHALF(2,RxxV)+fMPY16SS(fGETBYTE(2,RsV),fGETUBYTE(2,RtV))));
+ fSETHALF(3,RxxV,(fGETHALF(3,RxxV)+fMPY16SS(fGETBYTE(3,RsV),fGETUBYTE(3,RtV))));
+ })
+
+
+
+Q6INSN(M5_vdmpybsu,"Rdd32=vdmpybsu(Rss32,Rtt32):sat",ATTRIBS(),
+ "vector quad mpy bytes",
+{
+ fSETHALF(0,RddV,fSATN(16,(fMPY16SS(fGETBYTE(0,RssV),fGETUBYTE(0,RttV)) +
+ fMPY16SS(fGETBYTE(1,RssV),fGETUBYTE(1,RttV)))));
+ fSETHALF(1,RddV,fSATN(16,(fMPY16SS(fGETBYTE(2,RssV),fGETUBYTE(2,RttV)) +
+ fMPY16SS(fGETBYTE(3,RssV),fGETUBYTE(3,RttV)))));
+ fSETHALF(2,RddV,fSATN(16,(fMPY16SS(fGETBYTE(4,RssV),fGETUBYTE(4,RttV)) +
+ fMPY16SS(fGETBYTE(5,RssV),fGETUBYTE(5,RttV)))));
+ fSETHALF(3,RddV,fSATN(16,(fMPY16SS(fGETBYTE(6,RssV),fGETUBYTE(6,RttV)) +
+ fMPY16SS(fGETBYTE(7,RssV),fGETUBYTE(7,RttV)))));
+ })
+
+
+Q6INSN(M5_vdmacbsu,"Rxx32+=vdmpybsu(Rss32,Rtt32):sat",ATTRIBS(),
+ "vector quad mac bytes",
+{
+ fSETHALF(0,RxxV,fSATN(16,(fGETHALF(0,RxxV) +
+ fMPY16SS(fGETBYTE(0,RssV),fGETUBYTE(0,RttV)) +
+ fMPY16SS(fGETBYTE(1,RssV),fGETUBYTE(1,RttV)))));
+ fSETHALF(1,RxxV,fSATN(16,(fGETHALF(1,RxxV) +
+ fMPY16SS(fGETBYTE(2,RssV),fGETUBYTE(2,RttV)) +
+ fMPY16SS(fGETBYTE(3,RssV),fGETUBYTE(3,RttV)))));
+ fSETHALF(2,RxxV,fSATN(16,(fGETHALF(2,RxxV) +
+ fMPY16SS(fGETBYTE(4,RssV),fGETUBYTE(4,RttV)) +
+ fMPY16SS(fGETBYTE(5,RssV),fGETUBYTE(5,RttV)))));
+ fSETHALF(3,RxxV,fSATN(16,(fGETHALF(3,RxxV) +
+ fMPY16SS(fGETBYTE(6,RssV),fGETUBYTE(6,RttV)) +
+ fMPY16SS(fGETBYTE(7,RssV),fGETUBYTE(7,RttV)))));
+ })
+
+
+
+/* Full version */
+#undef dmpy_sema
+#define dmpy_sema(N)\
+{ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV)))));\
+ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV)))));\
+}
+Q6INSN(M2_vdmacs_s0,"Rxx32+=vdmpy(Rss32,Rtt32):sat",ATTRIBS(), "",dmpy_sema(0))
+Q6INSN(M2_vdmacs_s1,"Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat",ATTRIBS(),"",dmpy_sema(1))
+
+#undef dmpy_sema
+#define dmpy_sema(N)\
+{ fSETWORD(0,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV)))));\
+ fSETWORD(1,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV)))));\
+}
+
+Q6INSN(M2_vdmpys_s0,"Rdd32=vdmpy(Rss32,Rtt32):sat",ATTRIBS(), "",dmpy_sema(0))
+Q6INSN(M2_vdmpys_s1,"Rdd32=vdmpy(Rss32,Rtt32):<<1:sat",ATTRIBS(),"",dmpy_sema(1))
+
+
+
+/******************************************************/
+/* complex multiply/mac with */
+/* real&imag are packed together and always saturated */
+/* to protect against overflow. */
+/******************************************************/
+
+#undef cmpy_sema
+#define cmpy_sema(N,CONJMINUS,CONJPLUS)\
+{ fSETHALF(1,RdV,fGETHALF(1,(fSAT(fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(0,RtV))) CONJMINUS \
+ fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(1,RtV))) + 0x8000))));\
+ fSETHALF(0,RdV,fGETHALF(1,(fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV))) CONJPLUS \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV))) + 0x8000))));\
+}
+Q6INSN(M2_cmpyrs_s0,"Rd32=cmpy(Rs32,Rt32):rnd:sat",ATTRIBS(), "Complex Multiply",cmpy_sema(0,+,-))
+Q6INSN(M2_cmpyrs_s1,"Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat",ATTRIBS(),"Complex Multiply",cmpy_sema(1,+,-))
+
+
+Q6INSN(M2_cmpyrsc_s0,"Rd32=cmpy(Rs32,Rt32*):rnd:sat",ATTRIBS(A_ARCHV2), "Complex Multiply",cmpy_sema(0,-,+))
+Q6INSN(M2_cmpyrsc_s1,"Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat",ATTRIBS(A_ARCHV2),"Complex Multiply",cmpy_sema(1,-,+))
+
+
+#undef cmpy_sema
+#define cmpy_sema(N,CONJMINUS,CONJPLUS)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(0,RtV))) CONJMINUS \
+ fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(1,RtV)))));\
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV))) CONJPLUS \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV)))));\
+}
+Q6INSN(M2_cmacs_s0,"Rxx32+=cmpy(Rs32,Rt32):sat",ATTRIBS(), "Complex Multiply",cmpy_sema(0,+,-))
+Q6INSN(M2_cmacs_s1,"Rxx32+=cmpy(Rs32,Rt32):<<1:sat",ATTRIBS(),"Complex Multiply",cmpy_sema(1,+,-))
+
+/* EJP: Need mac versions w/ CONJ T? */
+Q6INSN(M2_cmacsc_s0,"Rxx32+=cmpy(Rs32,Rt32*):sat",ATTRIBS(A_ARCHV2), "Complex Multiply",cmpy_sema(0,-,+))
+Q6INSN(M2_cmacsc_s1,"Rxx32+=cmpy(Rs32,Rt32*):<<1:sat",ATTRIBS(A_ARCHV2),"Complex Multiply",cmpy_sema(1,-,+))
+
+
+#undef cmpy_sema
+#define cmpy_sema(N,CONJMINUS,CONJPLUS)\
+{ fSETWORD(1,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(0,RtV))) CONJMINUS \
+ fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(1,RtV)))));\
+ fSETWORD(0,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV))) CONJPLUS \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV)))));\
+}
+
+Q6INSN(M2_cmpys_s0,"Rdd32=cmpy(Rs32,Rt32):sat",ATTRIBS(), "Complex Multiply",cmpy_sema(0,+,-))
+Q6INSN(M2_cmpys_s1,"Rdd32=cmpy(Rs32,Rt32):<<1:sat",ATTRIBS(),"Complex Multiply",cmpy_sema(1,+,-))
+
+Q6INSN(M2_cmpysc_s0,"Rdd32=cmpy(Rs32,Rt32*):sat",ATTRIBS(A_ARCHV2), "Complex Multiply",cmpy_sema(0,-,+))
+Q6INSN(M2_cmpysc_s1,"Rdd32=cmpy(Rs32,Rt32*):<<1:sat",ATTRIBS(A_ARCHV2),"Complex Multiply",cmpy_sema(1,-,+))
+
+
+
+#undef cmpy_sema
+#define cmpy_sema(N,CONJMINUS,CONJPLUS)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) - (fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(0,RtV))) CONJMINUS \
+ fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(1,RtV))))));\
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) - (fSCALE(N,fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV))) CONJPLUS \
+ fSCALE(N,fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV))))));\
+}
+Q6INSN(M2_cnacs_s0,"Rxx32-=cmpy(Rs32,Rt32):sat",ATTRIBS(A_ARCHV2), "Complex Multiply",cmpy_sema(0,+,-))
+Q6INSN(M2_cnacs_s1,"Rxx32-=cmpy(Rs32,Rt32):<<1:sat",ATTRIBS(A_ARCHV2),"Complex Multiply",cmpy_sema(1,+,-))
+
+/* EJP: need CONJ versions? */
+Q6INSN(M2_cnacsc_s0,"Rxx32-=cmpy(Rs32,Rt32*):sat",ATTRIBS(A_ARCHV2), "Complex Multiply",cmpy_sema(0,-,+))
+Q6INSN(M2_cnacsc_s1,"Rxx32-=cmpy(Rs32,Rt32*):<<1:sat",ATTRIBS(A_ARCHV2),"Complex Multiply",cmpy_sema(1,-,+))
+
+
+/******************************************************/
+/* complex interpolation */
+/* Given a pair of complex values, scale by a,b, sum */
+/* Saturate/shift1 and round/pack */
+/******************************************************/
+
+#undef vrcmpys_sema
+#define vrcmpys_sema(N,INWORD) \
+{ fSETWORD(1,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,INWORD))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(3,RssV),fGETHALF(1,INWORD)))));\
+ fSETWORD(0,RddV,fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,INWORD))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(1,INWORD)))));\
+}
+
+
+
+Q6INSN(M2_vrcmpys_s1_h,"Rdd32=vrcmpys(Rss32,Rtt32):<<1:sat:raw:hi",ATTRIBS(A_ARCHV3), "Vector Reduce Complex Multiply by Scalar",vrcmpys_sema(1,fGETWORD(1,RttV)))
+Q6INSN(M2_vrcmpys_s1_l,"Rdd32=vrcmpys(Rss32,Rtt32):<<1:sat:raw:lo",ATTRIBS(A_ARCHV3), "Vector Reduce Complex Multiply by Scalar",vrcmpys_sema(1,fGETWORD(0,RttV)))
+
+#undef vrcmpys_sema
+#define vrcmpys_sema(N,INWORD) \
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,INWORD))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(3,RssV),fGETHALF(1,INWORD)))));\
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,INWORD))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(1,INWORD)))));\
+}
+
+
+
+Q6INSN(M2_vrcmpys_acc_s1_h,"Rxx32+=vrcmpys(Rss32,Rtt32):<<1:sat:raw:hi",ATTRIBS(A_ARCHV3), "Vector Reduce Complex Multiply by Scalar",vrcmpys_sema(1,fGETWORD(1,RttV)))
+Q6INSN(M2_vrcmpys_acc_s1_l,"Rxx32+=vrcmpys(Rss32,Rtt32):<<1:sat:raw:lo",ATTRIBS(A_ARCHV3), "Vector Reduce Complex Multiply by Scalar",vrcmpys_sema(1,fGETWORD(0,RttV)))
+
+#undef vrcmpys_sema
+#define vrcmpys_sema(N,INWORD) \
+{ fSETHALF(1,RdV,fGETHALF(1,fSAT(fSCALE(N,fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,INWORD))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(3,RssV),fGETHALF(1,INWORD))) + 0x8000)));\
+ fSETHALF(0,RdV,fGETHALF(1,fSAT(fSCALE(N,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,INWORD))) + \
+ fSCALE(N,fMPY16SS(fGETHALF(2,RssV),fGETHALF(1,INWORD))) + 0x8000)));\
+}
+
+Q6INSN(M2_vrcmpys_s1rp_h,"Rd32=vrcmpys(Rss32,Rtt32):<<1:rnd:sat:raw:hi",ATTRIBS(A_ARCHV3), "Vector Reduce Complex Multiply by Scalar",vrcmpys_sema(1,fGETWORD(1,RttV)))
+Q6INSN(M2_vrcmpys_s1rp_l,"Rd32=vrcmpys(Rss32,Rtt32):<<1:rnd:sat:raw:lo",ATTRIBS(A_ARCHV3), "Vector Reduce Complex Multiply by Scalar",vrcmpys_sema(1,fGETWORD(0,RttV)))
+
+/**************************************************************/
+/* mixed mode 32x16 vector dual multiplies */
+/* */
+/**************************************************************/
+
+/* SIGNED 32 x SIGNED 16 */
+
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(2,RttV))))>>16)) ); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RttV))))>>16)) ); \
+}
+Q6INSN(M2_mmacls_s0,"Rxx32+=vmpyweh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmacls_s1,"Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(3,RttV))))>>16) )); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RttV))))>>16 ))); \
+}
+Q6INSN(M2_mmachs_s0,"Rxx32+=vmpywoh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmachs_s1,"Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(2,RttV))))>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RttV))))>>16)); \
+}
+Q6INSN(M2_mmpyl_s0,"Rdd32=vmpyweh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyl_s1,"Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(3,RttV))))>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RttV))))>>16)); \
+}
+Q6INSN(M2_mmpyh_s0,"Rdd32=vmpywoh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyh_s1,"Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+
+/* With rounding */
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(2,RttV)))+0x8000)>>16)) ); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RttV)))+0x8000)>>16)) ); \
+}
+Q6INSN(M2_mmacls_rs0,"Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmacls_rs1,"Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(3,RttV)))+0x8000)>>16) )); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RttV)))+0x8000)>>16 ))); \
+}
+Q6INSN(M2_mmachs_rs0,"Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmachs_rs1,"Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(2,RttV)))+0x8000)>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RttV)))+0x8000)>>16)); \
+}
+Q6INSN(M2_mmpyl_rs0,"Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyl_rs1,"Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(3,RttV)))+0x8000)>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RttV)))+0x8000)>>16)); \
+}
+Q6INSN(M2_mmpyh_rs0,"Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyh_rs1,"Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+
+#undef mixmpy_sema
+#define mixmpy_sema(DEST,EQUALS,N)\
+{ DEST EQUALS fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(2,RttV))) + fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RttV)));}
+
+Q6INSN(M4_vrmpyeh_s0,"Rdd32=vrmpyweh(Rss32,Rtt32)",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(RddV,=,0))
+Q6INSN(M4_vrmpyeh_s1,"Rdd32=vrmpyweh(Rss32,Rtt32):<<1",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(RddV,=,1))
+Q6INSN(M4_vrmpyeh_acc_s0,"Rxx32+=vrmpyweh(Rss32,Rtt32)",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(RxxV,+=,0))
+Q6INSN(M4_vrmpyeh_acc_s1,"Rxx32+=vrmpyweh(Rss32,Rtt32):<<1",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(RxxV,+=,1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(DEST,EQUALS,N)\
+{ DEST EQUALS fSCALE(N,fMPY3216SS(fGETWORD(1,RssV),fGETHALF(3,RttV))) + fSCALE(N,fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RttV)));}
+
+Q6INSN(M4_vrmpyoh_s0,"Rdd32=vrmpywoh(Rss32,Rtt32)",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(RddV,=,0))
+Q6INSN(M4_vrmpyoh_s1,"Rdd32=vrmpywoh(Rss32,Rtt32):<<1",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(RddV,=,1))
+Q6INSN(M4_vrmpyoh_acc_s0,"Rxx32+=vrmpywoh(Rss32,Rtt32)",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(RxxV,+=,0))
+Q6INSN(M4_vrmpyoh_acc_s1,"Rxx32+=vrmpywoh(Rss32,Rtt32):<<1",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(RxxV,+=,1))
+
+
+
+
+
+
+#undef mixmpy_sema
+#define mixmpy_sema(N,H,RND)\
+{ RdV = fSAT((fSCALE(N,fMPY3216SS(RsV,fGETHALF(H,RtV)))RND)>>16); \
+}
+Q6INSN(M2_hmmpyl_rs1,"Rd32=mpy(Rs32,Rt.L32):<<1:rnd:sat",ATTRIBS(A_ARCHV2),"Mixed Precision Multiply",mixmpy_sema(1,0,+0x8000))
+Q6INSN(M2_hmmpyh_rs1,"Rd32=mpy(Rs32,Rt.H32):<<1:rnd:sat",ATTRIBS(A_ARCHV2),"Mixed Precision Multiply",mixmpy_sema(1,1,+0x8000))
+Q6INSN(M2_hmmpyl_s1,"Rd32=mpy(Rs32,Rt.L32):<<1:sat",ATTRIBS(A_ARCHV2),"Mixed Precision Multiply",mixmpy_sema(1,0,))
+Q6INSN(M2_hmmpyh_s1,"Rd32=mpy(Rs32,Rt.H32):<<1:sat",ATTRIBS(A_ARCHV2),"Mixed Precision Multiply",mixmpy_sema(1,1,))
+
+
+
+
+
+
+
+
+
+/* SIGNED 32 x UNSIGNED 16 */
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(2,RttV))))>>16)) ); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(0,RttV))))>>16)) ); \
+}
+Q6INSN(M2_mmaculs_s0,"Rxx32+=vmpyweuh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmaculs_s1,"Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(3,RttV))))>>16) )); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(1,RttV))))>>16 ))); \
+}
+Q6INSN(M2_mmacuhs_s0,"Rxx32+=vmpywouh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmacuhs_s1,"Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(2,RttV))))>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(0,RttV))))>>16)); \
+}
+Q6INSN(M2_mmpyul_s0,"Rdd32=vmpyweuh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyul_s1,"Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(3,RttV))))>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(1,RttV))))>>16)); \
+}
+Q6INSN(M2_mmpyuh_s0,"Rdd32=vmpywouh(Rss32,Rtt32):sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyuh_s1,"Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+
+/* With rounding */
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(2,RttV)))+0x8000)>>16)) ); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(0,RttV)))+0x8000)>>16)) ); \
+}
+Q6INSN(M2_mmaculs_rs0,"Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmaculs_rs1,"Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RxxV,fSAT(fGETWORD(1,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(3,RttV)))+0x8000)>>16) )); \
+ fSETWORD(0,RxxV,fSAT(fGETWORD(0,RxxV) + ((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(1,RttV)))+0x8000)>>16 ))); \
+}
+Q6INSN(M2_mmacuhs_rs0,"Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmacuhs_rs1,"Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(2,RttV)))+0x8000)>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(0,RttV)))+0x8000)>>16)); \
+}
+Q6INSN(M2_mmpyul_rs0,"Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyul_rs1,"Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+#undef mixmpy_sema
+#define mixmpy_sema(N)\
+{ fSETWORD(1,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(1,RssV),fGETUHALF(3,RttV)))+0x8000)>>16)); \
+ fSETWORD(0,RddV,fSAT((fSCALE(N,fMPY3216SU(fGETWORD(0,RssV),fGETUHALF(1,RttV)))+0x8000)>>16)); \
+}
+Q6INSN(M2_mmpyuh_rs0,"Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat",ATTRIBS(), "Mixed Precision Multiply",mixmpy_sema(0))
+Q6INSN(M2_mmpyuh_rs1,"Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Multiply",mixmpy_sema(1))
+
+
+/**************************************************************/
+/* complex mac with full 64-bit accum - no sat, no shift */
+/* either do real or accum, never both */
+/**************************************************************/
+
+Q6INSN(M2_vrcmaci_s0,"Rxx32+=vrcmpyi(Rss32,Rtt32)",ATTRIBS(),"Vector Complex Mac Imaginary",
+{
+RxxV = RxxV + fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,RttV)) + \
+ fMPY16SS(fGETHALF(0,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(2,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_vrcmacr_s0,"Rxx32+=vrcmpyr(Rss32,Rtt32)",ATTRIBS(),"Vector Complex Mac Real",
+{ RxxV = RxxV + fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)) - \
+ fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)) - \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_vrcmaci_s0c,"Rxx32+=vrcmpyi(Rss32,Rtt32*)",ATTRIBS(A_ARCHV2),"Vector Complex Mac Imaginary",
+{
+RxxV = RxxV + fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,RttV)) - \
+ fMPY16SS(fGETHALF(0,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(2,RttV)) - \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_vrcmacr_s0c,"Rxx32+=vrcmpyr(Rss32,Rtt32*)",ATTRIBS(A_ARCHV2),"Vector Complex Mac Real",
+{ RxxV = RxxV + fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)) + \
+ fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)) + \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_cmaci_s0,"Rxx32+=cmpyi(Rs32,Rt32)",ATTRIBS(),"Vector Complex Mac Imaginary",
+{
+RxxV = RxxV + fMPY16SS(fGETHALF(1,RsV),fGETHALF(0,RtV)) + \
+ fMPY16SS(fGETHALF(0,RsV),fGETHALF(1,RtV));
+})
+
+Q6INSN(M2_cmacr_s0,"Rxx32+=cmpyr(Rs32,Rt32)",ATTRIBS(),"Vector Complex Mac Real",
+{ RxxV = RxxV + fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV)) - \
+ fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV));
+})
+
+
+Q6INSN(M2_vrcmpyi_s0,"Rdd32=vrcmpyi(Rss32,Rtt32)",ATTRIBS(),"Vector Complex Mpy Imaginary",
+{
+RddV = fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,RttV)) + \
+ fMPY16SS(fGETHALF(0,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(2,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_vrcmpyr_s0,"Rdd32=vrcmpyr(Rss32,Rtt32)",ATTRIBS(),"Vector Complex Mpy Real",
+{ RddV = fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)) - \
+ fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)) - \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_vrcmpyi_s0c,"Rdd32=vrcmpyi(Rss32,Rtt32*)",ATTRIBS(A_ARCHV2),"Vector Complex Mpy Imaginary",
+{
+RddV = fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,RttV)) - \
+ fMPY16SS(fGETHALF(0,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(2,RttV)) - \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_vrcmpyr_s0c,"Rdd32=vrcmpyr(Rss32,Rtt32*)",ATTRIBS(A_ARCHV2),"Vector Complex Mpy Real",
+{ RddV = fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)) + \
+ fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)) + \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV));\
+})
+
+Q6INSN(M2_cmpyi_s0,"Rdd32=cmpyi(Rs32,Rt32)",ATTRIBS(),"Vector Complex Mpy Imaginary",
+{
+RddV = fMPY16SS(fGETHALF(1,RsV),fGETHALF(0,RtV)) + \
+ fMPY16SS(fGETHALF(0,RsV),fGETHALF(1,RtV));
+})
+
+Q6INSN(M2_cmpyr_s0,"Rdd32=cmpyr(Rs32,Rt32)",ATTRIBS(),"Vector Complex Mpy Real",
+{ RddV = fMPY16SS(fGETHALF(0,RsV),fGETHALF(0,RtV)) - \
+ fMPY16SS(fGETHALF(1,RsV),fGETHALF(1,RtV));
+})
+
+
+/**************************************************************/
+/* Complex mpy/mac with 2x32 bit accum, sat, shift */
+/* 32x16 real or imag */
+/**************************************************************/
+
+#if 1
+
+Q6INSN(M4_cmpyi_wh,"Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Complex Multiply",
+{
+ RdV = fSAT( ( fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RtV))
+ + fMPY3216SS(fGETWORD(1,RssV),fGETHALF(0,RtV))
+ + 0x4000)>>15);
+})
+
+
+Q6INSN(M4_cmpyr_wh,"Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Complex Multiply",
+{
+ RdV = fSAT( ( fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RtV))
+ - fMPY3216SS(fGETWORD(1,RssV),fGETHALF(1,RtV))
+ + 0x4000)>>15);
+})
+
+Q6INSN(M4_cmpyi_whc,"Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Complex Multiply",
+{
+ RdV = fSAT( ( fMPY3216SS(fGETWORD(1,RssV),fGETHALF(0,RtV))
+ - fMPY3216SS(fGETWORD(0,RssV),fGETHALF(1,RtV))
+ + 0x4000)>>15);
+})
+
+
+Q6INSN(M4_cmpyr_whc,"Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat",ATTRIBS(),"Mixed Precision Complex Multiply",
+{
+ RdV = fSAT( ( fMPY3216SS(fGETWORD(0,RssV),fGETHALF(0,RtV))
+ + fMPY3216SS(fGETWORD(1,RssV),fGETHALF(1,RtV))
+ + 0x4000)>>15);
+})
+
+
+#endif
+
+/**************************************************************/
+/* Vector mpy/mac with 2x32 bit accum, sat, shift */
+/* either do real or imag, never both */
+/**************************************************************/
+
+#undef VCMPYSEMI
+#define VCMPYSEMI(DST,ACC0,ACC1,SHIFT,SAT) \
+ fSETWORD(0,DST,SAT(ACC0 fSCALE(SHIFT,fMPY16SS(fGETHALF(1,RssV),fGETHALF(0,RttV)) + \
+ fMPY16SS(fGETHALF(0,RssV),fGETHALF(1,RttV))))); \
+ fSETWORD(1,DST,SAT(ACC1 fSCALE(SHIFT,fMPY16SS(fGETHALF(3,RssV),fGETHALF(2,RttV)) + \
+ fMPY16SS(fGETHALF(2,RssV),fGETHALF(3,RttV))))); \
+
+#undef VCMPYSEMR
+#define VCMPYSEMR(DST,ACC0,ACC1,SHIFT,SAT) \
+ fSETWORD(0,DST,SAT(ACC0 fSCALE(SHIFT,fMPY16SS(fGETHALF(0,RssV),fGETHALF(0,RttV)) - \
+ fMPY16SS(fGETHALF(1,RssV),fGETHALF(1,RttV))))); \
+ fSETWORD(1,DST,SAT(ACC1 fSCALE(SHIFT,fMPY16SS(fGETHALF(2,RssV),fGETHALF(2,RttV)) - \
+ fMPY16SS(fGETHALF(3,RssV),fGETHALF(3,RttV))))); \
+
+
+#undef VCMPYIR
+#define VCMPYIR(TAGBASE,DSTSYN,DSTVAL,ACCSEM,ACCVAL0,ACCVAL1,SHIFTSYN,SHIFTVAL,SATSYN,SATVAL) \
+Q6INSN(M2_##TAGBASE##i,DSTSYN ACCSEM "vcmpyi(Rss32,Rtt32)" SHIFTSYN SATSYN,ATTRIBS(A_ARCHV2), \
+ "Vector Complex Multiply Imaginary", { VCMPYSEMI(DSTVAL,ACCVAL0,ACCVAL1,SHIFTVAL,SATVAL); }) \
+Q6INSN(M2_##TAGBASE##r,DSTSYN ACCSEM "vcmpyr(Rss32,Rtt32)" SHIFTSYN SATSYN,ATTRIBS(A_ARCHV2), \
+ "Vector Complex Multiply Imaginary", { VCMPYSEMR(DSTVAL,ACCVAL0,ACCVAL1,SHIFTVAL,SATVAL); })
+
+
+VCMPYIR(vcmpy_s0_sat_,"Rdd32",RddV,"=",,,"",0,":sat",fSAT)
+VCMPYIR(vcmpy_s1_sat_,"Rdd32",RddV,"=",,,":<<1",1,":sat",fSAT)
+VCMPYIR(vcmac_s0_sat_,"Rxx32",RxxV,"+=",fGETWORD(0,RxxV) + ,fGETWORD(1,RxxV) + ,"",0,":sat",fSAT)
+
+
+/**********************************************************************
+ * Rotation -- by 0, 90, 180, or 270 means mult by 1, J, -1, -J *
+ *********************************************************************/
+
+Q6INSN(S2_vcrotate,"Rdd32=vcrotate(Rss32,Rt32)",ATTRIBS(A_ARCHV2),"Rotate complex value by multiple of PI/2",
+{
+ fHIDE(size1u_t tmp;)
+ tmp = fEXTRACTU_RANGE(RtV,1,0);
+ if (tmp == 0) { /* No rotation */
+ fSETHALF(0,RddV,fGETHALF(0,RssV));
+ fSETHALF(1,RddV,fGETHALF(1,RssV));
+ } else if (tmp == 1) { /* Multiply by -J */
+ fSETHALF(0,RddV,fGETHALF(1,RssV));
+ fSETHALF(1,RddV,fSATH(-fGETHALF(0,RssV)));
+ } else if (tmp == 2) { /* Multiply by J */
+ fSETHALF(0,RddV,fSATH(-fGETHALF(1,RssV)));
+ fSETHALF(1,RddV,fGETHALF(0,RssV));
+ } else { /* Multiply by -1 */
+ fHIDE(if (tmp != 3) fatal("C is broken");)
+ fSETHALF(0,RddV,fSATH(-fGETHALF(0,RssV)));
+ fSETHALF(1,RddV,fSATH(-fGETHALF(1,RssV)));
+ }
+ tmp = fEXTRACTU_RANGE(RtV,3,2);
+ if (tmp == 0) { /* No rotation */
+ fSETHALF(2,RddV,fGETHALF(2,RssV));
+ fSETHALF(3,RddV,fGETHALF(3,RssV));
+ } else if (tmp == 1) { /* Multiply by -J */
+ fSETHALF(2,RddV,fGETHALF(3,RssV));
+ fSETHALF(3,RddV,fSATH(-fGETHALF(2,RssV)));
+ } else if (tmp == 2) { /* Multiply by J */
+ fSETHALF(2,RddV,fSATH(-fGETHALF(3,RssV)));
+ fSETHALF(3,RddV,fGETHALF(2,RssV));
+ } else { /* Multiply by -1 */
+ fHIDE(if (tmp != 3) fatal("C is broken");)
+ fSETHALF(2,RddV,fSATH(-fGETHALF(2,RssV)));
+ fSETHALF(3,RddV,fSATH(-fGETHALF(3,RssV)));
+ }
+})
+
+
+Q6INSN(S4_vrcrotate_acc,"Rxx32+=vrcrotate(Rss32,Rt32,#u2)",ATTRIBS(),"Rotate and Reduce Bytes",
+{
+ fHIDE(int i; int tmpr; int tmpi; unsigned int control;)
+ fHIDE(int sumr; int sumi;)
+ sumr = 0;
+ sumi = 0;
+ control = fGETUBYTE(uiV,RtV);
+ for (i = 0; i < 8; i += 2) {
+ tmpr = fGETBYTE(i ,RssV);
+ tmpi = fGETBYTE(i+1,RssV);
+ switch (control & 3) {
+ case 0: /* No Rotation */
+ sumr += tmpr;
+ sumi += tmpi;
+ break;
+ case 1: /* Multiply by -J */
+ sumr += tmpi;
+ sumi -= tmpr;
+ break;
+ case 2: /* Multiply by J */
+ sumr -= tmpi;
+ sumi += tmpr;
+ break;
+ case 3: /* Multiply by -1 */
+ sumr -= tmpr;
+ sumi -= tmpi;
+ break;
+ fHIDE(default: fatal("C is broken!");)
+ }
+ control = control >> 2;
+ }
+ fSETWORD(0,RxxV,fGETWORD(0,RxxV) + sumr);
+ fSETWORD(1,RxxV,fGETWORD(1,RxxV) + sumi);
+})
+
+Q6INSN(S4_vrcrotate,"Rdd32=vrcrotate(Rss32,Rt32,#u2)",ATTRIBS(),"Rotate and Reduce Bytes",
+{
+ fHIDE(int i; int tmpr; int tmpi; unsigned int control;)
+ fHIDE(int sumr; int sumi;)
+ sumr = 0;
+ sumi = 0;
+ control = fGETUBYTE(uiV,RtV);
+ for (i = 0; i < 8; i += 2) {
+ tmpr = fGETBYTE(i ,RssV);
+ tmpi = fGETBYTE(i+1,RssV);
+ switch (control & 3) {
+ case 0: /* No Rotation */
+ sumr += tmpr;
+ sumi += tmpi;
+ break;
+ case 1: /* Multiply by -J */
+ sumr += tmpi;
+ sumi -= tmpr;
+ break;
+ case 2: /* Multiply by J */
+ sumr -= tmpi;
+ sumi += tmpr;
+ break;
+ case 3: /* Multiply by -1 */
+ sumr -= tmpr;
+ sumi -= tmpi;
+ break;
+ fHIDE(default: fatal("C is broken!");)
+ }
+ control = control >> 2;
+ }
+ fSETWORD(0,RddV,sumr);
+ fSETWORD(1,RddV,sumi);
+})
+
+
+Q6INSN(S2_vcnegh,"Rdd32=vcnegh(Rss32,Rt32)",ATTRIBS(),"Conditional Negate halfwords",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ if (fGETBIT(i,RtV)) {
+ fSETHALF(i,RddV,fSATH(-fGETHALF(i,RssV)));
+ } else {
+ fSETHALF(i,RddV,fGETHALF(i,RssV));
+ }
+ }
+})
+
+Q6INSN(S2_vrcnegh,"Rxx32+=vrcnegh(Rss32,Rt32)",ATTRIBS(),"Vector Reduce Conditional Negate halfwords",
+{
+ fHIDE(int i;)
+ for (i = 0; i < 4; i++) {
+ if (fGETBIT(i,RtV)) {
+ RxxV += -fGETHALF(i,RssV);
+ } else {
+ RxxV += fGETHALF(i,RssV);
+ }
+ }
+})
+
+
+/**********************************************************************
+ * Finite-field multiplies. Written by David Hoyle *
+ *********************************************************************/
+
+Q6INSN(M4_pmpyw,"Rdd32=pmpyw(Rs32,Rt32)",ATTRIBS(),"Polynomial 32bit Multiplication with Addition in GF(2)",
+{
+ fHIDE(int i; unsigned int y;)
+ fHIDE(unsigned long long x; unsigned long long prod;)
+ x = fGETUWORD(0, RsV);
+ y = fGETUWORD(0, RtV);
+
+ prod = 0;
+ for(i=0; i < 32; i++) {
+ if((y >> i) & 1) prod ^= (x << i);
+ }
+ RddV = prod;
+})
+
+Q6INSN(M4_vpmpyh,"Rdd32=vpmpyh(Rs32,Rt32)",ATTRIBS(),"Dual Polynomial 16bit Multiplication with Addition in GF(2)",
+{
+ fHIDE(int i; unsigned int x0; unsigned int x1;)
+ fHIDE(unsigned int y0; unsigned int y1;)
+ fHIDE(unsigned int prod0; unsigned int prod1;)
+
+ x0 = fGETUHALF(0, RsV);
+ x1 = fGETUHALF(1, RsV);
+ y0 = fGETUHALF(0, RtV);
+ y1 = fGETUHALF(1, RtV);
+
+ prod0 = prod1 = 0;
+ for(i=0; i < 16; i++) {
+ if((y0 >> i) & 1) prod0 ^= (x0 << i);
+ if((y1 >> i) & 1) prod1 ^= (x1 << i);
+ }
+ fSETHALF(0,RddV,fGETUHALF(0,prod0));
+ fSETHALF(1,RddV,fGETUHALF(0,prod1));
+ fSETHALF(2,RddV,fGETUHALF(1,prod0));
+ fSETHALF(3,RddV,fGETUHALF(1,prod1));
+})
+
+Q6INSN(M4_pmpyw_acc,"Rxx32^=pmpyw(Rs32,Rt32)",ATTRIBS(),"Polynomial 32bit Multiplication with Addition in GF(2)",
+{
+ fHIDE(int i; unsigned int y;)
+ fHIDE(unsigned long long x; unsigned long long prod;)
+ x = fGETUWORD(0, RsV);
+ y = fGETUWORD(0, RtV);
+
+ prod = 0;
+ for(i=0; i < 32; i++) {
+ if((y >> i) & 1) prod ^= (x << i);
+ }
+ RxxV ^= prod;
+})
+
+Q6INSN(M4_vpmpyh_acc,"Rxx32^=vpmpyh(Rs32,Rt32)",ATTRIBS(),"Dual Polynomial 16bit Multiplication with Addition in GF(2)",
+{
+ fHIDE(int i; unsigned int x0; unsigned int x1;)
+ fHIDE(unsigned int y0; unsigned int y1;)
+ fHIDE(unsigned int prod0; unsigned int prod1;)
+
+ x0 = fGETUHALF(0, RsV);
+ x1 = fGETUHALF(1, RsV);
+ y0 = fGETUHALF(0, RtV);
+ y1 = fGETUHALF(1, RtV);
+
+ prod0 = prod1 = 0;
+ for(i=0; i < 16; i++) {
+ if((y0 >> i) & 1) prod0 ^= (x0 << i);
+ if((y1 >> i) & 1) prod1 ^= (x1 << i);
+ }
+ fSETHALF(0,RxxV,fGETUHALF(0,RxxV) ^ fGETUHALF(0,prod0));
+ fSETHALF(1,RxxV,fGETUHALF(1,RxxV) ^ fGETUHALF(0,prod1));
+ fSETHALF(2,RxxV,fGETUHALF(2,RxxV) ^ fGETUHALF(1,prod0));
+ fSETHALF(3,RxxV,fGETUHALF(3,RxxV) ^ fGETUHALF(1,prod1));
+})
+
+
+/* V70: TINY CORE */
+
+#define CMPY64(TAG,NAME,DESC,OPERAND1,OP,W0,W1,W2,W3) \
+Q6INSN(M7_##TAG,"Rdd32=" NAME "(Rss32," OPERAND1 ")",ATTRIBS(A_RESTRICT_SLOT3ONLY),"Complex Multiply 64-bit " DESC, { RddV = (fMPY32SS(fGETWORD(W0, RssV), fGETWORD(W1, RttV)) OP fMPY32SS(fGETWORD(W2, RssV), fGETWORD(W3, RttV)));})\
+Q6INSN(M7_##TAG##_acc,"Rxx32+=" NAME "(Rss32,"OPERAND1")",ATTRIBS(A_RESTRICT_SLOT3ONLY),"Complex Multiply-Accumulate 64-bit " DESC, { RxxV += (fMPY32SS(fGETWORD(W0, RssV), fGETWORD(W1, RttV)) OP fMPY32SS(fGETWORD(W2, RssV), fGETWORD(W3, RttV)));})
+
+CMPY64(dcmpyrw, "cmpyrw","Real","Rtt32" ,-,0,0,1,1)
+CMPY64(dcmpyrwc,"cmpyrw","Real","Rtt32*",+,0,0,1,1)
+CMPY64(dcmpyiw, "cmpyiw","Imag","Rtt32" ,+,0,1,1,0)
+CMPY64(dcmpyiwc,"cmpyiw","Imag","Rtt32*",-,1,0,0,1)
+
+#define CMPY128(TAG, NAME, OPERAND1, WORD0, WORD1, WORD2, WORD3, OP) \
+Q6INSN(M7_##TAG,"Rd32=" NAME "(Rss32,"OPERAND1"):<<1:sat",ATTRIBS(A_RESTRICT_SLOT3ONLY),"Complex Multiply 32-bit result real", \
+{ \
+fHIDE(size16s_t acc128;)\
+fHIDE(size16s_t tmp128;)\
+fHIDE(size8s_t acc64;)\
+tmp128 = fCAST8S_16S(fMPY32SS(fGETWORD(WORD0, RssV), fGETWORD(WORD1, RttV)));\
+acc128 = fCAST8S_16S(fMPY32SS(fGETWORD(WORD2, RssV), fGETWORD(WORD3, RttV)));\
+acc128 = OP(tmp128,acc128);\
+acc128 = fSHIFTR128(acc128, 31);\
+acc64 = fCAST16S_8S(acc128);\
+RdV = fSATW(acc64);\
+})
+
+
+CMPY128(wcmpyrw, "cmpyrw", "Rtt32", 0, 0, 1, 1, fSUB128)
+CMPY128(wcmpyrwc, "cmpyrw", "Rtt32*", 0, 0, 1, 1, fADD128)
+CMPY128(wcmpyiw, "cmpyiw", "Rtt32", 0, 1, 1, 0, fADD128)
+CMPY128(wcmpyiwc, "cmpyiw", "Rtt32*", 1, 0, 0, 1, fSUB128)
+
+
+#define CMPY128RND(TAG, NAME, OPERAND1, WORD0, WORD1, WORD2, WORD3, OP) \
+Q6INSN(M7_##TAG##_rnd,"Rd32=" NAME "(Rss32,"OPERAND1"):<<1:rnd:sat",ATTRIBS(A_RESTRICT_SLOT3ONLY),"Complex Multiply 32-bit result real", \
+{ \
+fHIDE(size16s_t acc128;)\
+fHIDE(size16s_t tmp128;)\
+fHIDE(size16s_t const128;)\
+fHIDE(size8s_t acc64;)\
+tmp128 = fCAST8S_16S(fMPY32SS(fGETWORD(WORD0, RssV), fGETWORD(WORD1, RttV)));\
+acc128 = fCAST8S_16S(fMPY32SS(fGETWORD(WORD2, RssV), fGETWORD(WORD3, RttV)));\
+const128 = fCAST8S_16S(fCONSTLL(0x40000000));\
+acc128 = OP(tmp128,acc128);\
+acc128 = fADD128(acc128,const128);\
+acc128 = fSHIFTR128(acc128, 31);\
+acc64 = fCAST16S_8S(acc128);\
+RdV = fSATW(acc64);\
+})
+
+CMPY128RND(wcmpyrw, "cmpyrw", "Rtt32", 0, 0, 1, 1, fSUB128)
+CMPY128RND(wcmpyrwc, "cmpyrw", "Rtt32*", 0, 0, 1, 1, fADD128)
+CMPY128RND(wcmpyiw, "cmpyiw", "Rtt32", 0, 1, 1, 0, fADD128)
+CMPY128RND(wcmpyiwc, "cmpyiw", "Rtt32*", 1, 0, 0, 1, fSUB128)
diff --git a/target/hexagon/imported/shift.idef b/target/hexagon/imported/shift.idef
new file mode 100644
index 000000000..b32c4e04d
--- /dev/null
+++ b/target/hexagon/imported/shift.idef
@@ -0,0 +1,1113 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * S-type Instructions
+ */
+
+/**********************************************/
+/* SHIFTS */
+/**********************************************/
+
+/* NOTE: Rdd = Rs *right* shifts don't make sense */
+/* NOTE: Rd[d] = Rs[s] *right* shifts with saturation don't make sense */
+
+
+#define RSHIFTTYPES(TAGEND,REGD,REGS,REGSTYPE,ACC,ACCSRC,SAT,SATOPT,ATTRS) \
+Q6INSN(S2_asr_r_##TAGEND,#REGD "32" #ACC "=asr(" #REGS "32,Rt32)" #SATOPT,ATTRIBS(ATTRS), \
+ "Arithmetic Shift Right by Register", \
+ { \
+ fHIDE(size4s_t) shamt=fSXTN(7,32,RtV);\
+ REGD##V = SAT(ACCSRC ACC fBIDIR_ASHIFTR(REGS##V,shamt,REGSTYPE)); \
+ })\
+\
+Q6INSN(S2_asl_r_##TAGEND,#REGD "32" #ACC "=asl(" #REGS "32,Rt32)" #SATOPT,ATTRIBS(ATTRS), \
+ "Arithmetic Shift Left by Register", \
+ { \
+ fHIDE(size4s_t) shamt=fSXTN(7,32,RtV);\
+ REGD##V = SAT(ACCSRC ACC fBIDIR_ASHIFTL(REGS##V,shamt,REGSTYPE)); \
+ })\
+\
+Q6INSN(S2_lsr_r_##TAGEND,#REGD "32" #ACC "=lsr(" #REGS "32,Rt32)" #SATOPT,ATTRIBS(ATTRS), \
+ "Logical Shift Right by Register", \
+ { \
+ fHIDE(size4s_t) shamt=fSXTN(7,32,RtV);\
+ REGD##V = SAT(ACCSRC ACC fBIDIR_LSHIFTR(REGS##V,shamt,REGSTYPE)); \
+ })\
+\
+Q6INSN(S2_lsl_r_##TAGEND,#REGD "32" #ACC "=lsl(" #REGS "32,Rt32)" #SATOPT,ATTRIBS(ATTRS), \
+ "Logical Shift Left by Register", \
+ { \
+ fHIDE(size4s_t) shamt=fSXTN(7,32,RtV);\
+ REGD##V = SAT(ACCSRC ACC fBIDIR_LSHIFTL(REGS##V,shamt,REGSTYPE)); \
+ })
+
+RSHIFTTYPES(r,Rd,Rs,4_8,,,fECHO,,)
+RSHIFTTYPES(p,Rdd,Rss,8_8,,,fECHO,,)
+RSHIFTTYPES(r_acc,Rx,Rs,4_8,+,RxV,fECHO,,)
+RSHIFTTYPES(p_acc,Rxx,Rss,8_8,+,RxxV,fECHO,,)
+RSHIFTTYPES(r_nac,Rx,Rs,4_8,-,RxV,fECHO,,)
+RSHIFTTYPES(p_nac,Rxx,Rss,8_8,-,RxxV,fECHO,,)
+
+RSHIFTTYPES(r_and,Rx,Rs,4_8,&,RxV,fECHO,,)
+RSHIFTTYPES(r_or,Rx,Rs,4_8,|,RxV,fECHO,,)
+RSHIFTTYPES(p_and,Rxx,Rss,8_8,&,RxxV,fECHO,,)
+RSHIFTTYPES(p_or,Rxx,Rss,8_8,|,RxxV,fECHO,,)
+RSHIFTTYPES(p_xor,Rxx,Rss,8_8,^,RxxV,fECHO,,)
+
+
+#undef RSHIFTTYPES
+
+/* Register shift with saturation */
+#define RSATSHIFTTYPES(TAGEND,REGD,REGS,REGSTYPE) \
+Q6INSN(S2_asr_r_##TAGEND,#REGD "32" "=asr(" #REGS "32,Rt32):sat",ATTRIBS(), \
+ "Arithmetic Shift Right by Register", \
+ { \
+ fHIDE(size4s_t) shamt=fSXTN(7,32,RtV);\
+ REGD##V = fBIDIR_ASHIFTR_SAT(REGS##V,shamt,REGSTYPE); \
+ })\
+\
+Q6INSN(S2_asl_r_##TAGEND,#REGD "32" "=asl(" #REGS "32,Rt32):sat",ATTRIBS(), \
+ "Arithmetic Shift Left by Register", \
+ { \
+ fHIDE(size4s_t) shamt=fSXTN(7,32,RtV);\
+ REGD##V = fBIDIR_ASHIFTL_SAT(REGS##V,shamt,REGSTYPE); \
+ })
+
+RSATSHIFTTYPES(r_sat,Rd,Rs,4_8)
+
+
+
+
+
+#define ISHIFTTYPES(TAGEND,SIZE,REGD,REGS,REGSTYPE,ACC,ACCSRC,SAT,SATOPT,ATTRS) \
+Q6INSN(S2_asr_i_##TAGEND,#REGD "32" #ACC "=asr(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(ATTRS), \
+ "Arithmetic Shift Right by Immediate", \
+ { REGD##V = SAT(ACCSRC ACC fASHIFTR(REGS##V,uiV,REGSTYPE)); }) \
+\
+Q6INSN(S2_lsr_i_##TAGEND,#REGD "32" #ACC "=lsr(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(ATTRS), \
+ "Logical Shift Right by Immediate", \
+ { REGD##V = SAT(ACCSRC ACC fLSHIFTR(REGS##V,uiV,REGSTYPE)); }) \
+\
+Q6INSN(S2_asl_i_##TAGEND,#REGD "32" #ACC "=asl(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(ATTRS), \
+ "Shift Left by Immediate", \
+ { REGD##V = SAT(ACCSRC ACC fASHIFTL(REGS##V,uiV,REGSTYPE)); }) \
+Q6INSN(S6_rol_i_##TAGEND,#REGD "32" #ACC "=rol(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(ATTRS), \
+ "Rotate Left by Immediate", \
+ { REGD##V = SAT(ACCSRC ACC fROTL(REGS##V,uiV,REGSTYPE)); })
+
+
+#define ISHIFTTYPES_ONLY_ASL(TAGEND,SIZE,REGD,REGS,REGSTYPE,ACC,ACCSRC,SAT,SATOPT) \
+Q6INSN(S2_asl_i_##TAGEND,#REGD "32" #ACC "=asl(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(), \
+ "", \
+ { REGD##V = SAT(ACCSRC ACC fASHIFTL(REGS##V,uiV,REGSTYPE)); })
+
+#define ISHIFTTYPES_ONLY_ASR(TAGEND,SIZE,REGD,REGS,REGSTYPE,ACC,ACCSRC,SAT,SATOPT) \
+Q6INSN(S2_asr_i_##TAGEND,#REGD "32" #ACC "=asr(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(), \
+ "", \
+ { REGD##V = SAT(ACCSRC ACC fASHIFTR(REGS##V,uiV,REGSTYPE)); })
+
+
+#define ISHIFTTYPES_NOASR(TAGEND,SIZE,REGD,REGS,REGSTYPE,ACC,ACCSRC,SAT,SATOPT) \
+Q6INSN(S2_lsr_i_##TAGEND,#REGD "32" #ACC "=lsr(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(), \
+ "Logical Shift Right by Register", \
+ { REGD##V = SAT(ACCSRC ACC fLSHIFTR(REGS##V,uiV,REGSTYPE)); }) \
+Q6INSN(S2_asl_i_##TAGEND,#REGD "32" #ACC "=asl(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(), \
+ "Shift Left by Register", \
+ { REGD##V = SAT(ACCSRC ACC fASHIFTL(REGS##V,uiV,REGSTYPE)); }) \
+Q6INSN(S6_rol_i_##TAGEND,#REGD "32" #ACC "=rol(" #REGS "32,#u" #SIZE ")" #SATOPT,ATTRIBS(), \
+ "Rotate Left by Immediate", \
+ { REGD##V = SAT(ACCSRC ACC fROTL(REGS##V,uiV,REGSTYPE)); })
+
+
+
+ISHIFTTYPES(r,5,Rd,Rs,4_4,,,fECHO,,)
+ISHIFTTYPES(p,6,Rdd,Rss,8_8,,,fECHO,,)
+ISHIFTTYPES(r_acc,5,Rx,Rs,4_4,+,RxV,fECHO,,)
+ISHIFTTYPES(p_acc,6,Rxx,Rss,8_8,+,RxxV,fECHO,,)
+ISHIFTTYPES(r_nac,5,Rx,Rs,4_4,-,RxV,fECHO,,)
+ISHIFTTYPES(p_nac,6,Rxx,Rss,8_8,-,RxxV,fECHO,,)
+
+ISHIFTTYPES_NOASR(r_xacc,5,Rx,Rs,4_4,^, RxV,fECHO,)
+ISHIFTTYPES_NOASR(p_xacc,6,Rxx,Rss,8_8,^, RxxV,fECHO,)
+
+ISHIFTTYPES(r_and,5,Rx,Rs,4_4,&,RxV,fECHO,,)
+ISHIFTTYPES(r_or,5,Rx,Rs,4_4,|,RxV,fECHO,,)
+ISHIFTTYPES(p_and,6,Rxx,Rss,8_8,&,RxxV,fECHO,,)
+ISHIFTTYPES(p_or,6,Rxx,Rss,8_8,|,RxxV,fECHO,,)
+
+ISHIFTTYPES_ONLY_ASL(r_sat,5,Rd,Rs,4_8,,,fSAT,:sat)
+
+
+Q6INSN(S2_asr_i_r_rnd,"Rd32=asr(Rs32,#u5):rnd",ATTRIBS(),
+ "Shift right with round",
+ { RdV = fASHIFTR(((fASHIFTR(RsV,uiV,4_8))+1),1,8_8); })
+
+
+Q6INSN(S2_asr_i_p_rnd,"Rdd32=asr(Rss32,#u6):rnd",ATTRIBS(), "Shift right with round",
+{ fHIDE(size8u_t tmp;)
+ fHIDE(size8u_t rnd;)
+ tmp = fASHIFTR(RssV,uiV,8_8);
+ rnd = tmp & 1;
+ RddV = fASHIFTR(tmp,1,8_8) + rnd; })
+
+
+Q6INSN(S4_lsli,"Rd32=lsl(#s6,Rt32)",ATTRIBS(), "Shift an immediate left by register amount",
+{
+ fHIDE(size4s_t) shamt = fSXTN(7,32,RtV);
+ RdV = fBIDIR_LSHIFTL(siV,shamt,4_8);
+})
+
+
+
+
+Q6INSN(S2_addasl_rrri,"Rd32=addasl(Rt32,Rs32,#u3)",ATTRIBS(),
+ "Shift left by small amount and add",
+ { RdV = RtV + fASHIFTL(RsV,uiV,4_4); })
+
+
+
+#define SHIFTOPI(TAGEND,INNEROP,INNERSEM)\
+Q6INSN(S4_andi_##TAGEND,"Rx32=and(#u8,"INNEROP")",,"Shift-op",{RxV=fIMMEXT(uiV)&INNERSEM;})\
+Q6INSN(S4_ori_##TAGEND, "Rx32=or(#u8,"INNEROP")",,"Shift-op",{RxV=fIMMEXT(uiV)|INNERSEM;})\
+Q6INSN(S4_addi_##TAGEND,"Rx32=add(#u8,"INNEROP")",,"Shift-op",{RxV=fIMMEXT(uiV)+INNERSEM;})\
+Q6INSN(S4_subi_##TAGEND,"Rx32=sub(#u8,"INNEROP")",,"Shift-op",{RxV=fIMMEXT(uiV)-INNERSEM;})
+
+
+SHIFTOPI(asl_ri,"asl(Rx32,#U5)",(RxV<<UiV))
+SHIFTOPI(lsr_ri,"lsr(Rx32,#U5)",(((unsigned int)RxV)>>UiV))
+
+
+/**********************************************/
+/* PERMUTES */
+/**********************************************/
+Q6INSN(S2_valignib,"Rdd32=valignb(Rtt32,Rss32,#u3)",
+ATTRIBS(), "Vector align bytes",
+{
+ RddV = (fLSHIFTR(RssV,uiV*8,8_8))|(fASHIFTL(RttV,((8-uiV)*8),8_8));
+})
+
+Q6INSN(S2_valignrb,"Rdd32=valignb(Rtt32,Rss32,Pu4)",
+ATTRIBS(), "Align with register",
+{ RddV = fLSHIFTR(RssV,(PuV&0x7)*8,8_8)|(fASHIFTL(RttV,(8-(PuV&0x7))*8,8_8));})
+
+Q6INSN(S2_vspliceib,"Rdd32=vspliceb(Rss32,Rtt32,#u3)",
+ATTRIBS(), "Vector splice bytes",
+{ RddV = fASHIFTL(RttV,uiV*8,8_8) | fZXTN(uiV*8,64,RssV); })
+
+Q6INSN(S2_vsplicerb,"Rdd32=vspliceb(Rss32,Rtt32,Pu4)",
+ATTRIBS(), "Splice with register",
+{ RddV = fASHIFTL(RttV,(PuV&7)*8,8_8) | fZXTN((PuV&7)*8,64,RssV); })
+
+Q6INSN(S2_vsplatrh,"Rdd32=vsplath(Rs32)",
+ATTRIBS(), "Vector splat halfwords from register",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, fGETHALF(0,RsV));
+ }
+})
+
+
+Q6INSN(S2_vsplatrb,"Rd32=vsplatb(Rs32)",
+ATTRIBS(), "Vector splat bytes from register",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV, fGETBYTE(0,RsV));
+ }
+})
+
+Q6INSN(S6_vsplatrbp,"Rdd32=vsplatb(Rs32)",
+ATTRIBS(), "Vector splat bytes from register",
+{
+ fHIDE(int i;)
+ for (i=0;i<8;i++) {
+ fSETBYTE(i,RddV, fGETBYTE(0,RsV));
+ }
+})
+
+
+
+/**********************************************/
+/* Insert/Extract[u] */
+/**********************************************/
+
+Q6INSN(S2_insert,"Rx32=insert(Rs32,#u5,#U5)",
+ATTRIBS(), "Insert bits",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=UiV;
+ /* clear bits in Rxx where new bits go */
+ RxV &= ~(((fCONSTLL(1)<<width)-1)<<offset);
+ /* OR in new bits */
+ RxV |= ((RsV & ((fCONSTLL(1)<<width)-1)) << offset);
+})
+
+
+Q6INSN(S2_tableidxb,"Rx32=tableidxb(Rs32,#u4,#S6):raw",
+ATTRIBS(A_ARCHV2), "Extract and insert bits",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=SiV;
+ fHIDE(int) field = fEXTRACTU_BIDIR(RsV,width,offset);
+ fINSERT_BITS(RxV,width,0,field);
+})
+
+Q6INSN(S2_tableidxh,"Rx32=tableidxh(Rs32,#u4,#S6):raw",
+ATTRIBS(A_ARCHV2), "Extract and insert bits",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=SiV+1;
+ fHIDE(int) field = fEXTRACTU_BIDIR(RsV,width,offset);
+ fINSERT_BITS(RxV,width,1,field);
+})
+
+Q6INSN(S2_tableidxw,"Rx32=tableidxw(Rs32,#u4,#S6):raw",
+ATTRIBS(A_ARCHV2), "Extract and insert bits",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=SiV+2;
+ fHIDE(int) field = fEXTRACTU_BIDIR(RsV,width,offset);
+ fINSERT_BITS(RxV,width,2,field);
+})
+
+Q6INSN(S2_tableidxd,"Rx32=tableidxd(Rs32,#u4,#S6):raw",
+ATTRIBS(A_ARCHV2), "Extract and insert bits",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=SiV+3;
+ fHIDE(int) field = fEXTRACTU_BIDIR(RsV,width,offset);
+ fINSERT_BITS(RxV,width,3,field);
+})
+
+
+Q6INSN(A4_bitspliti,"Rdd32=bitsplit(Rs32,#u5)",
+ATTRIBS(), "Split a bitfield into two registers",
+{
+ fSETWORD(1,RddV,(fCAST4_4u(RsV)>>uiV));
+ fSETWORD(0,RddV,fZXTN(uiV,32,RsV));
+})
+
+Q6INSN(A4_bitsplit,"Rdd32=bitsplit(Rs32,Rt32)",
+ATTRIBS(), "Split a bitfield into two registers",
+{
+ fHIDE(size4u_t) shamt = fZXTN(5,32,RtV);
+ fSETWORD(1,RddV,(fCAST4_4u(RsV)>>shamt));
+ fSETWORD(0,RddV,fZXTN(shamt,32,RsV));
+})
+
+
+
+
+Q6INSN(S4_extract,"Rd32=extract(Rs32,#u5,#U5)",
+ATTRIBS(), "Extract signed bitfield",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=UiV;
+ RdV = fSXTN(width,32,(fCAST4_4u(RsV) >> offset));
+})
+
+
+Q6INSN(S2_extractu,"Rd32=extractu(Rs32,#u5,#U5)",
+ATTRIBS(), "Extract unsigned bitfield",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=UiV;
+ RdV = fZXTN(width,32,(fCAST4_4u(RsV) >> offset));
+})
+
+Q6INSN(S2_insertp,"Rxx32=insert(Rss32,#u6,#U6)",
+ATTRIBS(), "Insert bits",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=UiV;
+ /* clear bits in Rxx where new bits go */
+ RxxV &= ~(((fCONSTLL(1)<<width)-1)<<offset);
+ /* OR in new bits */
+ RxxV |= ((RssV & ((fCONSTLL(1)<<width)-1)) << offset);
+})
+
+
+Q6INSN(S4_extractp,"Rdd32=extract(Rss32,#u6,#U6)",
+ATTRIBS(), "Extract signed bitfield",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=UiV;
+ RddV = fSXTN(width,64,(fCAST8_8u(RssV) >> offset));
+})
+
+
+Q6INSN(S2_extractup,"Rdd32=extractu(Rss32,#u6,#U6)",
+ATTRIBS(), "Extract unsigned bitfield",
+{
+ fHIDE(int) width=uiV;
+ fHIDE(int) offset=UiV;
+ RddV = fZXTN(width,64,(fCAST8_8u(RssV) >> offset));
+})
+
+
+
+
+Q6INSN(S2_mask,"Rd32=mask(#u5,#U5)",
+ATTRIBS(), "Form mask from immediate",
+{
+ RdV = ((1<<uiV)-1) << UiV;
+})
+
+
+
+
+
+Q6INSN(S2_insert_rp,"Rx32=insert(Rs32,Rtt32)",
+ATTRIBS(), "Insert bits",
+{
+ fHIDE(int) width=fZXTN(6,32,(fGETWORD(1,RttV)));
+ fHIDE(int) offset=fSXTN(7,32,(fGETWORD(0,RttV)));
+ fHIDE(size8u_t) mask = ((fCONSTLL(1)<<width)-1);
+ if (offset < 0) {
+ RxV = 0;
+ } else {
+ /* clear bits in Rxx where new bits go */
+ RxV &= ~(mask<<offset);
+ /* OR in new bits */
+ RxV |= ((RsV & mask) << offset);
+ }
+})
+
+
+Q6INSN(S4_extract_rp,"Rd32=extract(Rs32,Rtt32)",
+ATTRIBS(), "Extract signed bitfield",
+{
+ fHIDE(int) width=fZXTN(6,32,(fGETWORD(1,RttV)));
+ fHIDE(int) offset=fSXTN(7,32,(fGETWORD(0,RttV)));
+ RdV = fSXTN(width,64,fBIDIR_LSHIFTR(fCAST4_8u(RsV),offset,4_8));
+})
+
+
+
+Q6INSN(S2_extractu_rp,"Rd32=extractu(Rs32,Rtt32)",
+ATTRIBS(), "Extract unsigned bitfield",
+{
+ fHIDE(int) width=fZXTN(6,32,(fGETWORD(1,RttV)));
+ fHIDE(int) offset=fSXTN(7,32,(fGETWORD(0,RttV)));
+ RdV = fZXTN(width,64,fBIDIR_LSHIFTR(fCAST4_8u(RsV),offset,4_8));
+})
+
+Q6INSN(S2_insertp_rp,"Rxx32=insert(Rss32,Rtt32)",
+ATTRIBS(), "Insert bits",
+{
+ fHIDE(int) width=fZXTN(6,32,(fGETWORD(1,RttV)));
+ fHIDE(int) offset=fSXTN(7,32,(fGETWORD(0,RttV)));
+ fHIDE(size8u_t) mask = ((fCONSTLL(1)<<width)-1);
+ if (offset < 0) {
+ RxxV = 0;
+ } else {
+ /* clear bits in Rxx where new bits go */
+ RxxV &= ~(mask<<offset);
+ /* OR in new bits */
+ RxxV |= ((RssV & mask) << offset);
+ }
+})
+
+
+Q6INSN(S4_extractp_rp,"Rdd32=extract(Rss32,Rtt32)",
+ATTRIBS(), "Extract signed bitfield",
+{
+ fHIDE(int) width=fZXTN(6,32,(fGETWORD(1,RttV)));
+ fHIDE(int) offset=fSXTN(7,32,(fGETWORD(0,RttV)));
+ RddV = fSXTN(width,64,fBIDIR_LSHIFTR(fCAST8_8u(RssV),offset,8_8));
+})
+
+
+Q6INSN(S2_extractup_rp,"Rdd32=extractu(Rss32,Rtt32)",
+ATTRIBS(), "Extract unsigned bitfield",
+{
+ fHIDE(int) width=fZXTN(6,32,(fGETWORD(1,RttV)));
+ fHIDE(int) offset=fSXTN(7,32,(fGETWORD(0,RttV)));
+ RddV = fZXTN(width,64,fBIDIR_LSHIFTR(fCAST8_8u(RssV),offset,8_8));
+})
+
+/**********************************************/
+/* tstbit/setbit/clrbit */
+/**********************************************/
+
+Q6INSN(S2_tstbit_i,"Pd4=tstbit(Rs32,#u5)",
+ATTRIBS(), "Test a bit",
+{
+ PdV = f8BITSOF((RsV & (1<<uiV)) != 0);
+})
+
+Q6INSN(S4_ntstbit_i,"Pd4=!tstbit(Rs32,#u5)",
+ATTRIBS(), "Test a bit",
+{
+ PdV = f8BITSOF((RsV & (1<<uiV)) == 0);
+})
+
+Q6INSN(S2_setbit_i,"Rd32=setbit(Rs32,#u5)",
+ATTRIBS(), "Set a bit",
+{
+ RdV = (RsV | (1<<uiV));
+})
+
+Q6INSN(S2_togglebit_i,"Rd32=togglebit(Rs32,#u5)",
+ATTRIBS(), "Toggle a bit",
+{
+ RdV = (RsV ^ (1<<uiV));
+})
+
+Q6INSN(S2_clrbit_i,"Rd32=clrbit(Rs32,#u5)",
+ATTRIBS(), "Clear a bit",
+{
+ RdV = (RsV & (~(1<<uiV)));
+})
+
+
+
+/* using a register */
+Q6INSN(S2_tstbit_r,"Pd4=tstbit(Rs32,Rt32)",
+ATTRIBS(), "Test a bit",
+{
+ PdV = f8BITSOF((fCAST4_8u(RsV) & fBIDIR_LSHIFTL(1,fSXTN(7,32,RtV),4_8)) != 0);
+})
+
+Q6INSN(S4_ntstbit_r,"Pd4=!tstbit(Rs32,Rt32)",
+ATTRIBS(), "Test a bit",
+{
+ PdV = f8BITSOF((fCAST4_8u(RsV) & fBIDIR_LSHIFTL(1,fSXTN(7,32,RtV),4_8)) == 0);
+})
+
+Q6INSN(S2_setbit_r,"Rd32=setbit(Rs32,Rt32)",
+ATTRIBS(), "Set a bit",
+{
+ RdV = (RsV | fBIDIR_LSHIFTL(1,fSXTN(7,32,RtV),4_8));
+})
+
+Q6INSN(S2_togglebit_r,"Rd32=togglebit(Rs32,Rt32)",
+ATTRIBS(), "Toggle a bit",
+{
+ RdV = (RsV ^ fBIDIR_LSHIFTL(1,fSXTN(7,32,RtV),4_8));
+})
+
+Q6INSN(S2_clrbit_r,"Rd32=clrbit(Rs32,Rt32)",
+ATTRIBS(), "Clear a bit",
+{
+ RdV = (RsV & (~(fBIDIR_LSHIFTL(1,fSXTN(7,32,RtV),4_8))));
+})
+
+
+/**********************************************/
+/* vector shifting */
+/**********************************************/
+
+/* Half Vector Immediate Shifts */
+
+Q6INSN(S2_asr_i_vh,"Rdd32=vasrh(Rss32,#u4)",ATTRIBS(),
+ "Vector Arithmetic Shift Right by Immediate",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, (fGETHALF(i,RssV)>>uiV));
+ }
+})
+
+
+Q6INSN(S2_lsr_i_vh,"Rdd32=vlsrh(Rss32,#u4)",ATTRIBS(),
+ "Vector Logical Shift Right by Immediate",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, (fGETUHALF(i,RssV)>>uiV));
+ }
+})
+
+Q6INSN(S2_asl_i_vh,"Rdd32=vaslh(Rss32,#u4)",ATTRIBS(),
+ "Vector Arithmetic Shift Left by Immediate",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, (fGETHALF(i,RssV)<<uiV));
+ }
+})
+
+/* Half Vector Register Shifts */
+
+Q6INSN(S2_asr_r_vh,"Rdd32=vasrh(Rss32,Rt32)",ATTRIBS(),
+ "Vector Arithmetic Shift Right by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, fBIDIR_ASHIFTR(fGETHALF(i,RssV),fSXTN(7,32,RtV),2_8));
+ }
+})
+
+Q6INSN(S5_asrhub_rnd_sat,"Rd32=vasrhub(Rss32,#u4):raw",,
+ "Vector Arithmetic Shift Right by Immediate with Round, Saturate, and Pack",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV, fSATUB( ((fGETHALF(i,RssV) >> uiV )+1)>>1 ));
+ }
+})
+
+Q6INSN(S5_asrhub_sat,"Rd32=vasrhub(Rss32,#u4):sat",,
+ "Vector Arithmetic Shift Right by Immediate with Saturate and Pack",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV, fSATUB( fGETHALF(i,RssV) >> uiV ));
+ }
+})
+
+
+
+Q6INSN(S5_vasrhrnd,"Rdd32=vasrh(Rss32,#u4):raw",,
+ "Vector Arithmetic Shift Right by Immediate with Round",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, ( ((fGETHALF(i,RssV) >> uiV)+1)>>1 ));
+ }
+})
+
+
+Q6INSN(S2_asl_r_vh,"Rdd32=vaslh(Rss32,Rt32)",ATTRIBS(),
+ "Vector Arithmetic Shift Left by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, fBIDIR_ASHIFTL(fGETHALF(i,RssV),fSXTN(7,32,RtV),2_8));
+ }
+})
+
+
+
+Q6INSN(S2_lsr_r_vh,"Rdd32=vlsrh(Rss32,Rt32)",ATTRIBS(),
+ "Vector Logical Shift Right by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, fBIDIR_LSHIFTR(fGETUHALF(i,RssV),fSXTN(7,32,RtV),2_8));
+ }
+})
+
+
+Q6INSN(S2_lsl_r_vh,"Rdd32=vlslh(Rss32,Rt32)",ATTRIBS(),
+ "Vector Logical Shift Left by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV, fBIDIR_LSHIFTL(fGETUHALF(i,RssV),fSXTN(7,32,RtV),2_8));
+ }
+})
+
+
+
+
+/* Word Vector Immediate Shifts */
+
+Q6INSN(S2_asr_i_vw,"Rdd32=vasrw(Rss32,#u5)",ATTRIBS(),
+ "Vector Arithmetic Shift Right by Immediate",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fGETWORD(i,RssV)>>uiV));
+ }
+})
+
+
+
+Q6INSN(S2_asr_i_svw_trun,"Rd32=vasrw(Rss32,#u5)",ATTRIBS(A_ARCHV2),
+ "Vector Arithmetic Shift Right by Immediate with Truncate and Pack",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETHALF(0,(fGETWORD(i,RssV)>>uiV)));
+ }
+})
+
+Q6INSN(S2_asr_r_svw_trun,"Rd32=vasrw(Rss32,Rt32)",ATTRIBS(A_ARCHV2),
+ "Vector Arithmetic Shift Right truncate and Pack",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETHALF(0,fBIDIR_ASHIFTR(fGETWORD(i,RssV),fSXTN(7,32,RtV),4_8)));
+ }
+})
+
+
+Q6INSN(S2_lsr_i_vw,"Rdd32=vlsrw(Rss32,#u5)",ATTRIBS(),
+ "Vector Logical Shift Right by Immediate",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fGETUWORD(i,RssV)>>uiV));
+ }
+})
+
+Q6INSN(S2_asl_i_vw,"Rdd32=vaslw(Rss32,#u5)",ATTRIBS(),
+ "Vector Arithmetic Shift Left by Immediate",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,(fGETWORD(i,RssV)<<uiV));
+ }
+})
+
+/* Word Vector Register Shifts */
+
+Q6INSN(S2_asr_r_vw,"Rdd32=vasrw(Rss32,Rt32)",ATTRIBS(),
+ "Vector Arithmetic Shift Right by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV, fBIDIR_ASHIFTR(fGETWORD(i,RssV),fSXTN(7,32,RtV),4_8));
+ }
+})
+
+
+
+Q6INSN(S2_asl_r_vw,"Rdd32=vaslw(Rss32,Rt32)",ATTRIBS(),
+ "Vector Arithmetic Shift Left by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV, fBIDIR_ASHIFTL(fGETWORD(i,RssV),fSXTN(7,32,RtV),4_8));
+ }
+})
+
+
+Q6INSN(S2_lsr_r_vw,"Rdd32=vlsrw(Rss32,Rt32)",ATTRIBS(),
+ "Vector Logical Shift Right by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV, fBIDIR_LSHIFTR(fGETUWORD(i,RssV),fSXTN(7,32,RtV),4_8));
+ }
+})
+
+
+
+Q6INSN(S2_lsl_r_vw,"Rdd32=vlslw(Rss32,Rt32)",ATTRIBS(),
+ "Vector Logical Shift Left by Register",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV, fBIDIR_LSHIFTL(fGETUWORD(i,RssV),fSXTN(7,32,RtV),4_8));
+ }
+})
+
+
+
+
+
+/**********************************************/
+/* Vector SXT/ZXT/SAT/TRUN/RNDPACK */
+/**********************************************/
+
+
+Q6INSN(S2_vrndpackwh,"Rd32=vrndwh(Rss32)",ATTRIBS(),
+"Round and Pack vector of words to Halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETHALF(1,(fGETWORD(i,RssV)+0x08000)));
+ }
+})
+
+Q6INSN(S2_vrndpackwhs,"Rd32=vrndwh(Rss32):sat",ATTRIBS(),
+"Round and Pack vector of words to Halfwords",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fGETHALF(1,fSAT(fGETWORD(i,RssV)+0x08000)));
+ }
+})
+
+Q6INSN(S2_vsxtbh,"Rdd32=vsxtbh(Rs32)",ATTRIBS(A_ARCHV2),
+"Vector sign extend byte to half",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fGETBYTE(i,RsV));
+ }
+})
+
+Q6INSN(S2_vzxtbh,"Rdd32=vzxtbh(Rs32)",ATTRIBS(),
+"Vector zero extend byte to half",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fGETUBYTE(i,RsV));
+ }
+})
+
+Q6INSN(S2_vsathub,"Rd32=vsathub(Rss32)",ATTRIBS(),
+"Vector saturate half to unsigned byte",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV,fSATUN(8,fGETHALF(i,RssV)));
+ }
+})
+
+
+
+
+
+Q6INSN(S2_svsathub,"Rd32=vsathub(Rs32)",ATTRIBS(A_ARCHV2),
+"Vector saturate half to unsigned byte",
+{
+ fSETBYTE(0,RdV,fSATUN(8,fGETHALF(0,RsV)));
+ fSETBYTE(1,RdV,fSATUN(8,fGETHALF(1,RsV)));
+ fSETBYTE(2,RdV,0);
+ fSETBYTE(3,RdV,0);
+})
+
+Q6INSN(S2_svsathb,"Rd32=vsathb(Rs32)",ATTRIBS(A_ARCHV2),
+"Vector saturate half to signed byte",
+{
+ fSETBYTE(0,RdV,fSATN(8,fGETHALF(0,RsV)));
+ fSETBYTE(1,RdV,fSATN(8,fGETHALF(1,RsV)));
+ fSETBYTE(2,RdV,0);
+ fSETBYTE(3,RdV,0);
+})
+
+
+Q6INSN(S2_vsathb,"Rd32=vsathb(Rss32)",ATTRIBS(A_ARCHV2),
+"Vector saturate half to signed byte",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV,fSATN(8,fGETHALF(i,RssV)));
+ }
+})
+
+Q6INSN(S2_vtrunohb,"Rd32=vtrunohb(Rss32)",ATTRIBS(A_ARCHV2),
+"Vector truncate half to byte: take high",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV,fGETBYTE(i*2+1,RssV));
+ }
+})
+
+Q6INSN(S2_vtrunewh,"Rdd32=vtrunewh(Rss32,Rtt32)",ATTRIBS(A_ARCHV2),
+"Vector truncate word to half: take low",
+{
+ fSETHALF(0,RddV,fGETHALF(0,RttV));
+ fSETHALF(1,RddV,fGETHALF(2,RttV));
+ fSETHALF(2,RddV,fGETHALF(0,RssV));
+ fSETHALF(3,RddV,fGETHALF(2,RssV));
+})
+
+Q6INSN(S2_vtrunowh,"Rdd32=vtrunowh(Rss32,Rtt32)",ATTRIBS(A_ARCHV2),
+"Vector truncate word to half: take high",
+{
+ fSETHALF(0,RddV,fGETHALF(1,RttV));
+ fSETHALF(1,RddV,fGETHALF(3,RttV));
+ fSETHALF(2,RddV,fGETHALF(1,RssV));
+ fSETHALF(3,RddV,fGETHALF(3,RssV));
+})
+
+
+Q6INSN(S2_vtrunehb,"Rd32=vtrunehb(Rss32)",ATTRIBS(),
+"Vector truncate half to byte: take low",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RdV,fGETBYTE(i*2,RssV));
+ }
+})
+
+Q6INSN(S6_vtrunehb_ppp,"Rdd32=vtrunehb(Rss32,Rtt32)",ATTRIBS(),
+"Vector truncate half to byte: take low",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RddV,fGETBYTE(i*2,RttV));
+ fSETBYTE(i+4,RddV,fGETBYTE(i*2,RssV));
+ }
+})
+
+Q6INSN(S6_vtrunohb_ppp,"Rdd32=vtrunohb(Rss32,Rtt32)",ATTRIBS(),
+"Vector truncate half to byte: take high",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i,RddV,fGETBYTE(i*2+1,RttV));
+ fSETBYTE(i+4,RddV,fGETBYTE(i*2+1,RssV));
+ }
+})
+
+Q6INSN(S2_vsxthw,"Rdd32=vsxthw(Rs32)",ATTRIBS(),
+"Vector sign extend half to word",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fGETHALF(i,RsV));
+ }
+})
+
+Q6INSN(S2_vzxthw,"Rdd32=vzxthw(Rs32)",ATTRIBS(),
+"Vector zero extend half to word",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fGETUHALF(i,RsV));
+ }
+})
+
+
+Q6INSN(S2_vsatwh,"Rd32=vsatwh(Rss32)",ATTRIBS(),
+"Vector saturate word to signed half",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fSATN(16,fGETWORD(i,RssV)));
+ }
+})
+
+Q6INSN(S2_vsatwuh,"Rd32=vsatwuh(Rss32)",ATTRIBS(),
+"Vector saturate word to unsigned half",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i,RdV,fSATUN(16,fGETWORD(i,RssV)));
+ }
+})
+
+/* Other misc insns of this type */
+
+Q6INSN(S2_packhl,"Rdd32=packhl(Rs32,Rt32)",ATTRIBS(),
+"Pack high halfwords and low halfwords together",
+{
+ fSETHALF(0,RddV,fGETHALF(0,RtV));
+ fSETHALF(1,RddV,fGETHALF(0,RsV));
+ fSETHALF(2,RddV,fGETHALF(1,RtV));
+ fSETHALF(3,RddV,fGETHALF(1,RsV));
+})
+
+Q6INSN(A2_swiz,"Rd32=swiz(Rs32)",ATTRIBS(A_ARCHV2),
+"Endian swap the bytes of Rs",
+{
+ fSETBYTE(0,RdV,fGETBYTE(3,RsV));
+ fSETBYTE(1,RdV,fGETBYTE(2,RsV));
+ fSETBYTE(2,RdV,fGETBYTE(1,RsV));
+ fSETBYTE(3,RdV,fGETBYTE(0,RsV));
+})
+
+
+
+/* Vector Sat without Packing */
+Q6INSN(S2_vsathub_nopack,"Rdd32=vsathub(Rss32)",ATTRIBS(),
+"Vector saturate half to unsigned byte",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATUN(8,fGETHALF(i,RssV)));
+ }
+})
+
+Q6INSN(S2_vsathb_nopack,"Rdd32=vsathb(Rss32)",ATTRIBS(A_ARCHV2),
+"Vector saturate half to signed byte without pack",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETHALF(i,RddV,fSATN(8,fGETHALF(i,RssV)));
+ }
+})
+
+Q6INSN(S2_vsatwh_nopack,"Rdd32=vsatwh(Rss32)",ATTRIBS(),
+"Vector saturate word to signed half",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSATN(16,fGETWORD(i,RssV)));
+ }
+})
+
+Q6INSN(S2_vsatwuh_nopack,"Rdd32=vsatwuh(Rss32)",ATTRIBS(),
+"Vector saturate word to unsigned half",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETWORD(i,RddV,fSATUN(16,fGETWORD(i,RssV)));
+ }
+})
+
+
+/**********************************************/
+/* Shuffle */
+/**********************************************/
+
+
+Q6INSN(S2_shuffob,"Rdd32=shuffob(Rtt32,Rss32)",ATTRIBS(),
+"Shuffle high bytes together",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i*2 ,RddV,fGETBYTE(i*2+1,RssV));
+ fSETBYTE(i*2+1,RddV,fGETBYTE(i*2+1,RttV));
+ }
+})
+
+Q6INSN(S2_shuffeb,"Rdd32=shuffeb(Rss32,Rtt32)",ATTRIBS(),
+"Shuffle low bytes together",
+{
+ fHIDE(int i;)
+ for (i=0;i<4;i++) {
+ fSETBYTE(i*2 ,RddV,fGETBYTE(i*2,RttV));
+ fSETBYTE(i*2+1,RddV,fGETBYTE(i*2,RssV));
+ }
+})
+
+Q6INSN(S2_shuffoh,"Rdd32=shuffoh(Rtt32,Rss32)",ATTRIBS(),
+"Shuffle high halves together",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i*2 ,RddV,fGETHALF(i*2+1,RssV));
+ fSETHALF(i*2+1,RddV,fGETHALF(i*2+1,RttV));
+ }
+})
+
+Q6INSN(S2_shuffeh,"Rdd32=shuffeh(Rss32,Rtt32)",ATTRIBS(),
+"Shuffle low halves together",
+{
+ fHIDE(int i;)
+ for (i=0;i<2;i++) {
+ fSETHALF(i*2 ,RddV,fGETHALF(i*2,RttV));
+ fSETHALF(i*2+1,RddV,fGETHALF(i*2,RssV));
+ }
+})
+
+
+/**********************************************/
+/* Strange bit instructions */
+/**********************************************/
+
+Q6INSN(S5_popcountp,"Rd32=popcount(Rss32)",ATTRIBS(),
+"Population Count", { RdV = fCOUNTONES_8(RssV); })
+
+Q6INSN(S4_parity,"Rd32=parity(Rs32,Rt32)",,
+"Parity of Masked Value", { RdV = 1&fCOUNTONES_4(RsV & RtV); })
+
+Q6INSN(S2_parityp,"Rd32=parity(Rss32,Rtt32)",ATTRIBS(A_ARCHV2),
+"Parity of Masked Value", { RdV = 1&fCOUNTONES_8(RssV & RttV); })
+
+Q6INSN(S2_lfsp,"Rdd32=lfs(Rss32,Rtt32)",ATTRIBS(A_ARCHV2),
+"Parity of Masked Value", { RddV = (fCAST8u(RssV) >> 1) | (fCAST8u((1&fCOUNTONES_8(RssV & RttV)))<<63) ; })
+
+Q6INSN(S2_clbnorm,"Rd32=normamt(Rs32)",ATTRIBS(A_ARCHV2),
+"Count leading sign bits - 1", { if (RsV == 0) { RdV = 0; } else { RdV = (fMAX(fCL1_4(RsV),fCL1_4(~RsV)))-1;} })
+
+Q6INSN(S4_clbaddi,"Rd32=add(clb(Rs32),#s6)",ATTRIBS(A_ARCHV2),
+"Count leading sign bits then add signed number",
+{ RdV = (fMAX(fCL1_4(RsV),fCL1_4(~RsV)))+siV;} )
+
+Q6INSN(S4_clbpnorm,"Rd32=normamt(Rss32)",ATTRIBS(A_ARCHV2),
+"Count leading sign bits - 1", { if (RssV == 0) { RdV = 0; }
+else { RdV = (fMAX(fCL1_8(RssV),fCL1_8(~RssV)))-1;}})
+
+Q6INSN(S4_clbpaddi,"Rd32=add(clb(Rss32),#s6)",ATTRIBS(A_ARCHV2),
+"Count leading sign bits then add signed number",
+{ RdV = (fMAX(fCL1_8(RssV),fCL1_8(~RssV)))+siV;})
+
+
+
+Q6INSN(S2_cabacdecbin,"Rdd32=decbin(Rss32,Rtt32)",ATTRIBS(A_ARCHV3),"CABAC decode bin",
+{
+ fHIDE(size4u_t state;)
+ fHIDE(size4u_t valMPS;)
+ fHIDE(size4u_t bitpos;)
+ fHIDE(size4u_t range;)
+ fHIDE(size4u_t offset;)
+ fHIDE(size4u_t rLPS;)
+ fHIDE(size4u_t rMPS;)
+
+ state = fEXTRACTU_RANGE( fGETWORD(1,RttV) ,5,0);
+ valMPS = fEXTRACTU_RANGE( fGETWORD(1,RttV) ,8,8);
+ bitpos = fEXTRACTU_RANGE( fGETWORD(0,RttV) ,4,0);
+ range = fGETWORD(0,RssV);
+ offset = fGETWORD(1,RssV);
+
+ /* calculate rLPS */
+ range <<= bitpos;
+ offset <<= bitpos;
+ rLPS = rLPS_table_64x4[state][ (range >>29)&3];
+ rLPS = rLPS << 23; /* left aligned */
+
+ /* calculate rMPS */
+ rMPS= (range&0xff800000) - rLPS;
+
+ /* most probable region */
+ if (offset < rMPS) {
+ RddV = AC_next_state_MPS_64[state];
+ fINSERT_RANGE(RddV,8,8,valMPS);
+ fINSERT_RANGE(RddV,31,23,(rMPS>>23));
+ fSETWORD(1,RddV,offset);
+ fWRITE_P0(valMPS);
+
+
+ }
+ /* least probable region */
+ else {
+ RddV = AC_next_state_LPS_64[state];
+ fINSERT_RANGE(RddV,8,8,((!state)?(1-valMPS):(valMPS)));
+ fINSERT_RANGE(RddV,31,23,(rLPS>>23));
+ fSETWORD(1,RddV,(offset-rMPS));
+ fWRITE_P0((valMPS^1));
+ }
+})
+
+
+Q6INSN(S2_clb,"Rd32=clb(Rs32)",ATTRIBS(),
+"Count leading bits", {RdV = fMAX(fCL1_4(RsV),fCL1_4(~RsV));})
+
+
+Q6INSN(S2_cl0,"Rd32=cl0(Rs32)",ATTRIBS(),
+"Count leading bits", {RdV = fCL1_4(~RsV);})
+
+Q6INSN(S2_cl1,"Rd32=cl1(Rs32)",ATTRIBS(),
+"Count leading bits", {RdV = fCL1_4(RsV);})
+
+Q6INSN(S2_clbp,"Rd32=clb(Rss32)",ATTRIBS(),
+"Count leading bits", {RdV = fMAX(fCL1_8(RssV),fCL1_8(~RssV));})
+
+Q6INSN(S2_cl0p,"Rd32=cl0(Rss32)",ATTRIBS(),
+"Count leading bits", {RdV = fCL1_8(~RssV);})
+
+Q6INSN(S2_cl1p,"Rd32=cl1(Rss32)",ATTRIBS(),
+"Count leading bits", {RdV = fCL1_8(RssV);})
+
+
+
+
+Q6INSN(S2_brev, "Rd32=brev(Rs32)", ATTRIBS(A_ARCHV2), "Bit Reverse",{RdV = fBREV_4(RsV);})
+Q6INSN(S2_brevp,"Rdd32=brev(Rss32)", ATTRIBS(), "Bit Reverse",{RddV = fBREV_8(RssV);})
+Q6INSN(S2_ct0, "Rd32=ct0(Rs32)", ATTRIBS(A_ARCHV2), "Count Trailing",{RdV = fCL1_4(~fBREV_4(RsV));})
+Q6INSN(S2_ct1, "Rd32=ct1(Rs32)", ATTRIBS(A_ARCHV2), "Count Trailing",{RdV = fCL1_4(fBREV_4(RsV));})
+Q6INSN(S2_ct0p, "Rd32=ct0(Rss32)", ATTRIBS(), "Count Trailing",{RdV = fCL1_8(~fBREV_8(RssV));})
+Q6INSN(S2_ct1p, "Rd32=ct1(Rss32)", ATTRIBS(), "Count Trailing",{RdV = fCL1_8(fBREV_8(RssV));})
+
+
+Q6INSN(S2_interleave,"Rdd32=interleave(Rss32)",ATTRIBS(A_ARCHV2),"Interleave bits",
+{RddV = fINTERLEAVE(fGETWORD(1,RssV),fGETWORD(0,RssV));})
+
+Q6INSN(S2_deinterleave,"Rdd32=deinterleave(Rss32)",ATTRIBS(A_ARCHV2),"Interleave bits",
+{RddV = fDEINTERLEAVE(RssV);})
diff --git a/target/hexagon/imported/subinsns.idef b/target/hexagon/imported/subinsns.idef
new file mode 100644
index 000000000..ec1c74f47
--- /dev/null
+++ b/target/hexagon/imported/subinsns.idef
@@ -0,0 +1,149 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * sub-instructions
+ */
+
+
+
+/*****************************************************************/
+/* */
+/* A-type subinsns */
+/* */
+/*****************************************************************/
+
+Q6INSN(SA1_addi, "Rx16=add(Rx16,#s7)", ATTRIBS(A_SUBINSN),"Add", { fIMMEXT(siV); RxV=RxV+siV;})
+Q6INSN(SA1_tfr, "Rd16=Rs16", ATTRIBS(A_SUBINSN),"Tfr", { RdV=RsV;})
+Q6INSN(SA1_seti, "Rd16=#u6", ATTRIBS(A_SUBINSN),"Set immed", { fIMMEXT(uiV); RdV=uiV;})
+Q6INSN(SA1_setin1, "Rd16=#-1", ATTRIBS(A_SUBINSN),"Set to -1", { RdV=-1;})
+Q6INSN(SA1_clrtnew, "if (p0.new) Rd16=#0", ATTRIBS(A_SUBINSN),"clear if true", { if (fLSBNEW0) {RdV=0;} else {CANCEL;} })
+Q6INSN(SA1_clrfnew, "if (!p0.new) Rd16=#0", ATTRIBS(A_SUBINSN),"clear if false",{ if (fLSBNEW0NOT) {RdV=0;} else {CANCEL;} })
+Q6INSN(SA1_clrt, "if (p0) Rd16=#0", ATTRIBS(A_SUBINSN),"clear if true", { if (fLSBOLD(fREAD_P0())) {RdV=0;} else {CANCEL;} })
+Q6INSN(SA1_clrf, "if (!p0) Rd16=#0", ATTRIBS(A_SUBINSN),"clear if false",{ if (fLSBOLDNOT(fREAD_P0())) {RdV=0;} else {CANCEL;} })
+
+Q6INSN(SA1_addsp, "Rd16=add(r29,#u6:2)", ATTRIBS(A_SUBINSN),"Add", { RdV=fREAD_SP()+uiV; })
+Q6INSN(SA1_inc, "Rd16=add(Rs16,#1)", ATTRIBS(A_SUBINSN),"Inc", { RdV=RsV+1;})
+Q6INSN(SA1_dec, "Rd16=add(Rs16,#-1)", ATTRIBS(A_SUBINSN),"Dec", { RdV=RsV-1;})
+Q6INSN(SA1_addrx, "Rx16=add(Rx16,Rs16)", ATTRIBS(A_SUBINSN),"Add", { RxV=RxV+RsV; })
+Q6INSN(SA1_zxtb, "Rd16=and(Rs16,#255)", ATTRIBS(A_SUBINSN),"Zxtb", { RdV= fZXTN(8,32,RsV);})
+Q6INSN(SA1_and1, "Rd16=and(Rs16,#1)", ATTRIBS(A_SUBINSN),"And #1", { RdV= RsV&1;})
+Q6INSN(SA1_sxtb, "Rd16=sxtb(Rs16)", ATTRIBS(A_SUBINSN),"Sxtb", { RdV= fSXTN(8,32,RsV);})
+Q6INSN(SA1_zxth, "Rd16=zxth(Rs16)", ATTRIBS(A_SUBINSN),"Zxth", { RdV= fZXTN(16,32,RsV);})
+Q6INSN(SA1_sxth, "Rd16=sxth(Rs16)", ATTRIBS(A_SUBINSN),"Sxth", { RdV= fSXTN(16,32,RsV);})
+Q6INSN(SA1_combinezr,"Rdd8=combine(#0,Rs16)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,RsV); fSETWORD(1,RddV,0); })
+Q6INSN(SA1_combinerz,"Rdd8=combine(Rs16,#0)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,0); fSETWORD(1,RddV,RsV); })
+Q6INSN(SA1_combine0i,"Rdd8=combine(#0,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,0); })
+Q6INSN(SA1_combine1i,"Rdd8=combine(#1,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,1); })
+Q6INSN(SA1_combine2i,"Rdd8=combine(#2,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,2); })
+Q6INSN(SA1_combine3i,"Rdd8=combine(#3,#u2)", ATTRIBS(A_SUBINSN),"Combines", { fSETWORD(0,RddV,uiV); fSETWORD(1,RddV,3); })
+Q6INSN(SA1_cmpeqi, "p0=cmp.eq(Rs16,#u2)", ATTRIBS(A_SUBINSN),"CompareImmed",{fWRITE_P0(f8BITSOF(RsV==uiV));})
+
+
+
+
+/*****************************************************************/
+/* */
+/* Ld1/2 subinsns */
+/* */
+/*****************************************************************/
+
+Q6INSN(SL1_loadri_io, "Rd16=memw(Rs16+#u4:2)", ATTRIBS(A_LOAD,A_SUBINSN),"load word", {fEA_RI(RsV,uiV); fLOAD(1,4,u,EA,RdV);})
+Q6INSN(SL1_loadrub_io, "Rd16=memub(Rs16+#u4:0)",ATTRIBS(A_LOAD,A_SUBINSN),"load byte", {fEA_RI(RsV,uiV); fLOAD(1,1,u,EA,RdV);})
+
+Q6INSN(SL2_loadrh_io, "Rd16=memh(Rs16+#u3:1)", ATTRIBS(A_LOAD,A_SUBINSN),"load half", {fEA_RI(RsV,uiV); fLOAD(1,2,s,EA,RdV);})
+Q6INSN(SL2_loadruh_io, "Rd16=memuh(Rs16+#u3:1)",ATTRIBS(A_LOAD,A_SUBINSN),"load half", {fEA_RI(RsV,uiV); fLOAD(1,2,u,EA,RdV);})
+Q6INSN(SL2_loadrb_io, "Rd16=memb(Rs16+#u3:0)", ATTRIBS(A_LOAD,A_SUBINSN),"load byte", {fEA_RI(RsV,uiV); fLOAD(1,1,s,EA,RdV);})
+Q6INSN(SL2_loadri_sp, "Rd16=memw(r29+#u5:2)", ATTRIBS(A_LOAD,A_SUBINSN),"load word", {fEA_RI(fREAD_SP(),uiV); fLOAD(1,4,u,EA,RdV);})
+Q6INSN(SL2_loadrd_sp, "Rdd8=memd(r29+#u5:3)", ATTRIBS(A_LOAD,A_SUBINSN),"load dword",{fEA_RI(fREAD_SP(),uiV); fLOAD(1,8,u,EA,RddV);})
+
+Q6INSN(SL2_deallocframe,"deallocframe", ATTRIBS(A_SUBINSN,A_LOAD), "Deallocate stack frame",
+{ fHIDE(size8u_t tmp;) fEA_REG(fREAD_FP());
+ fLOAD(1,8,u,EA,tmp);
+ tmp = fFRAME_UNSCRAMBLE(tmp);
+ fWRITE_LR(fGETWORD(1,tmp));
+ fWRITE_FP(fGETWORD(0,tmp));
+ fWRITE_SP(EA+8); })
+
+Q6INSN(SL2_return,"dealloc_return", ATTRIBS(A_JINDIR,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return",
+{ fHIDE(size8u_t tmp;) fEA_REG(fREAD_FP());
+ fLOAD(1,8,u,EA,tmp);
+ tmp = fFRAME_UNSCRAMBLE(tmp);
+ fWRITE_LR(fGETWORD(1,tmp));
+ fWRITE_FP(fGETWORD(0,tmp));
+ fWRITE_SP(EA+8);
+ fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);})
+
+Q6INSN(SL2_return_t,"if (p0) dealloc_return", ATTRIBS(A_JINDIROLD,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return",
+{ fHIDE(size8u_t tmp;); fBRANCH_SPECULATE_STALL(fLSBOLD(fREAD_P0()),, SPECULATE_NOT_TAKEN,4,0); fEA_REG(fREAD_FP()); if (fLSBOLD(fREAD_P0())) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8);
+ fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} })
+
+Q6INSN(SL2_return_f,"if (!p0) dealloc_return", ATTRIBS(A_JINDIROLD,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return",
+{ fHIDE(size8u_t tmp;);fBRANCH_SPECULATE_STALL(fLSBOLDNOT(fREAD_P0()),, SPECULATE_NOT_TAKEN,4,0); fEA_REG(fREAD_FP()); if (fLSBOLDNOT(fREAD_P0())) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8);
+ fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} })
+
+
+
+Q6INSN(SL2_return_tnew,"if (p0.new) dealloc_return:nt", ATTRIBS(A_JINDIRNEW,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return",
+{ fHIDE(size8u_t tmp;) fBRANCH_SPECULATE_STALL(fLSBNEW0,, SPECULATE_NOT_TAKEN , 4,3); fEA_REG(fREAD_FP()); if (fLSBNEW0) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8);
+ fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} })
+
+Q6INSN(SL2_return_fnew,"if (!p0.new) dealloc_return:nt", ATTRIBS(A_JINDIRNEW,A_SUBINSN,A_LOAD,A_RETURN,A_RESTRICT_SLOT0ONLY), "Deallocate stack frame and return",
+{ fHIDE(size8u_t tmp;) fBRANCH_SPECULATE_STALL(fLSBNEW0NOT,, SPECULATE_NOT_TAKEN , 4,3); fEA_REG(fREAD_FP()); if (fLSBNEW0NOT) { fLOAD(1,8,u,EA,tmp); tmp = fFRAME_UNSCRAMBLE(tmp); fWRITE_LR(fGETWORD(1,tmp)); fWRITE_FP(fGETWORD(0,tmp)); fWRITE_SP(EA+8);
+ fJUMPR(REG_LR,fGETWORD(1,tmp),COF_TYPE_JUMPR);} else {LOAD_CANCEL(EA);} })
+
+
+Q6INSN(SL2_jumpr31,"jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIR,A_RESTRICT_SLOT0ONLY),"indirect unconditional jump",
+{ fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);})
+
+Q6INSN(SL2_jumpr31_t,"if (p0) jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIROLD,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if true",
+{fBRANCH_SPECULATE_STALL(fLSBOLD(fREAD_P0()),, SPECULATE_TAKEN,4,0); if (fLSBOLD(fREAD_P0())) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}})
+
+Q6INSN(SL2_jumpr31_f,"if (!p0) jumpr r31",ATTRIBS(A_SUBINSN,A_JINDIROLD,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if false",
+{fBRANCH_SPECULATE_STALL(fLSBOLDNOT(fREAD_P0()),, SPECULATE_TAKEN,4,0); if (fLSBOLDNOT(fREAD_P0())) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}})
+
+
+
+Q6INSN(SL2_jumpr31_tnew,"if (p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNEW,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if true",
+{fBRANCH_SPECULATE_STALL(fLSBNEW0,, SPECULATE_NOT_TAKEN , 4,3); if (fLSBNEW0) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}})
+
+Q6INSN(SL2_jumpr31_fnew,"if (!p0.new) jumpr:nt r31",ATTRIBS(A_SUBINSN,A_JINDIRNEW,A_RESTRICT_SLOT0ONLY),"indirect conditional jump if false",
+{fBRANCH_SPECULATE_STALL(fLSBNEW0NOT,, SPECULATE_NOT_TAKEN , 4,3); if (fLSBNEW0NOT) {fJUMPR(REG_LR,fREAD_LR(),COF_TYPE_JUMPR);}})
+
+
+
+
+
+/*****************************************************************/
+/* */
+/* St1/2 subinsns */
+/* */
+/*****************************************************************/
+
+Q6INSN(SS1_storew_io, "memw(Rs16+#u4:2)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,RtV);})
+Q6INSN(SS1_storeb_io, "memb(Rs16+#u4:0)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,fGETBYTE(0,RtV));})
+Q6INSN(SS2_storeh_io, "memh(Rs16+#u3:1)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store half", {fEA_RI(RsV,uiV); fSTORE(1,2,EA,fGETHALF(0,RtV));})
+Q6INSN(SS2_stored_sp, "memd(r29+#s6:3)=Rtt8", ATTRIBS(A_STORE,A_SUBINSN), "store dword",{fEA_RI(fREAD_SP(),siV); fSTORE(1,8,EA,RttV);})
+Q6INSN(SS2_storew_sp, "memw(r29+#u5:2)=Rt16", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(fREAD_SP(),uiV); fSTORE(1,4,EA,RtV);})
+Q6INSN(SS2_storewi0, "memw(Rs16+#u4:2)=#0", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,0);})
+Q6INSN(SS2_storebi0, "memb(Rs16+#u4:0)=#0", ATTRIBS(A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,0);})
+Q6INSN(SS2_storewi1, "memw(Rs16+#u4:2)=#1", ATTRIBS(A_STORE,A_SUBINSN), "store word", {fEA_RI(RsV,uiV); fSTORE(1,4,EA,1);})
+Q6INSN(SS2_storebi1, "memb(Rs16+#u4:0)=#1", ATTRIBS(A_STORE,A_SUBINSN), "store byte", {fEA_RI(RsV,uiV); fSTORE(1,1,EA,1);})
+
+
+Q6INSN(SS2_allocframe,"allocframe(#u5:3)", ATTRIBS(A_SUBINSN,A_STORE,A_RESTRICT_SLOT0ONLY), "Allocate stack frame",
+{ fEA_RI(fREAD_SP(),-8); fSTORE(1,8,EA,fFRAME_SCRAMBLE((fCAST8_8u(fREAD_LR()) << 32) | fCAST4_4u(fREAD_FP()))); fWRITE_FP(EA); fFRAMECHECK(EA-uiV,EA); fWRITE_SP(EA-uiV); })
diff --git a/target/hexagon/imported/system.idef b/target/hexagon/imported/system.idef
new file mode 100644
index 000000000..7c6568e75
--- /dev/null
+++ b/target/hexagon/imported/system.idef
@@ -0,0 +1,68 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * System Interface Instructions
+ */
+
+
+
+/********************************************/
+/* User->OS interface */
+/********************************************/
+
+Q6INSN(J2_trap0,"trap0(#u8)",ATTRIBS(A_COF),
+"Trap to Operating System",
+ fTRAP(0,uiV);
+)
+
+Q6INSN(J2_pause,"pause(#u8)",ATTRIBS(A_COF),
+"Enter low-power state for #u8 cycles",{fPAUSE(uiV);})
+
+Q6INSN(Y2_icinva,"icinva(Rs32)",ATTRIBS(A_ICOP,A_ICFLUSHOP),"Instruction Cache Invalidate Address",{fEA_REG(RsV); fICINVA(EA);})
+
+Q6INSN(Y2_isync,"isync",ATTRIBS(),"Memory Synchronization",{fISYNC();})
+Q6INSN(Y2_barrier,"barrier",ATTRIBS(A_RESTRICT_SLOT0ONLY),"Memory Barrier",{fBARRIER();})
+Q6INSN(Y2_syncht,"syncht",ATTRIBS(A_RESTRICT_SLOT0ONLY),"Memory Synchronization",{fSYNCH();})
+
+
+Q6INSN(Y2_dcfetchbo,"dcfetch(Rs32+#u11:3)",ATTRIBS(A_RESTRICT_PREFERSLOT0,A_DCFETCH),"Data Cache Prefetch",{fEA_RI(RsV,uiV); fDCFETCH(EA);})
+
+
+Q6INSN(Y2_dczeroa,"dczeroa(Rs32)",ATTRIBS(A_STORE,A_RESTRICT_SLOT0ONLY,A_DCZEROA),"Zero an aligned 32-byte cacheline",{fEA_REG(RsV); fDCZEROA(EA);})
+Q6INSN(Y2_dccleana,"dccleana(Rs32)",ATTRIBS(A_RESTRICT_SLOT0ONLY,A_DCFLUSHOP),"Data Cache Clean Address",{fEA_REG(RsV); fDCCLEANA(EA);})
+Q6INSN(Y2_dccleaninva,"dccleaninva(Rs32)",ATTRIBS(A_RESTRICT_SLOT0ONLY,A_DCFLUSHOP),"Data Cache Clean and Invalidate Address",{fEA_REG(RsV); fDCCLEANINVA(EA);})
+Q6INSN(Y2_dcinva,"dcinva(Rs32)",ATTRIBS(A_RESTRICT_SLOT0ONLY,A_DCFLUSHOP),"Data Cache Invalidate Address",{fEA_REG(RsV); fDCCLEANINVA(EA);})
+
+
+Q6INSN(Y4_l2fetch,"l2fetch(Rs32,Rt32)",ATTRIBS(A_RESTRICT_SLOT0ONLY),"L2 Cache Prefetch",
+{ fL2FETCH(RsV,
+ (RtV&0xff), /*height*/
+ ((RtV>>8)&0xff), /*width*/
+ ((RtV>>16)&0xffff), /*stride*/
+ 0); /*extra attrib flags*/
+})
+
+
+
+Q6INSN(Y5_l2fetch,"l2fetch(Rs32,Rtt32)",ATTRIBS(A_RESTRICT_SLOT0ONLY),"L2 Cache Prefetch",
+{ fL2FETCH(RsV,
+ fGETUHALF(0,RttV), /*height*/
+ fGETUHALF(1,RttV), /*width*/
+ fGETUHALF(2,RttV), /*stride*/
+ fGETUHALF(3,RttV)); /*flags*/
+})
diff --git a/target/hexagon/insn.h b/target/hexagon/insn.h
new file mode 100644
index 000000000..aa2638914
--- /dev/null
+++ b/target/hexagon/insn.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_INSN_H
+#define HEXAGON_INSN_H
+
+#include "cpu.h"
+
+#define INSTRUCTIONS_MAX 7 /* 2 pairs + loopend */
+#define REG_OPERANDS_MAX 5
+#define IMMEDS_MAX 2
+
+struct Instruction;
+struct Packet;
+struct DisasContext;
+
+typedef void (*SemanticInsn)(CPUHexagonState *env,
+ struct DisasContext *ctx,
+ struct Instruction *insn,
+ struct Packet *pkt);
+
+struct Instruction {
+ SemanticInsn generate; /* pointer to genptr routine */
+ uint8_t regno[REG_OPERANDS_MAX]; /* reg operands including predicates */
+ uint16_t opcode;
+
+ uint32_t iclass:6;
+ uint32_t slot:3;
+ uint32_t which_extended:1; /* If has an extender, which immediate */
+ uint32_t new_value_producer_slot:4;
+
+ bool part1; /*
+ * cmp-jumps are split into two insns.
+ * set for the compare and clear for the jump
+ */
+ bool extension_valid; /* Has a constant extender attached */
+ bool is_endloop; /* This is an end of loop */
+ int32_t immed[IMMEDS_MAX]; /* immediate field */
+};
+
+typedef struct Instruction Insn;
+
+struct Packet {
+ uint16_t num_insns;
+ uint16_t encod_pkt_size_in_bytes;
+
+ /* Pre-decodes about COF */
+ bool pkt_has_cof; /* Has any change-of-flow */
+ bool pkt_has_endloop;
+
+ bool pkt_has_dczeroa;
+
+ bool pkt_has_store_s0;
+ bool pkt_has_store_s1;
+
+ bool pkt_has_hvx;
+ Insn *vhist_insn;
+
+ Insn insn[INSTRUCTIONS_MAX];
+};
+
+typedef struct Packet Packet;
+
+#endif
diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h
new file mode 100644
index 000000000..82ac3042a
--- /dev/null
+++ b/target/hexagon/internal.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_INTERNAL_H
+#define HEXAGON_INTERNAL_H
+
+/*
+ * Change HEX_DEBUG to 1 to turn on debugging output
+ */
+#define HEX_DEBUG 0
+#define HEX_DEBUG_LOG(...) \
+ do { \
+ if (HEX_DEBUG) { \
+ qemu_log(__VA_ARGS__); \
+ } \
+ } while (0)
+
+int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void hexagon_debug_vreg(CPUHexagonState *env, int regnum);
+void hexagon_debug_qreg(CPUHexagonState *env, int regnum);
+void hexagon_debug(CPUHexagonState *env);
+
+extern const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS];
+
+#endif
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
new file mode 100644
index 000000000..19d103cad
--- /dev/null
+++ b/target/hexagon/macros.h
@@ -0,0 +1,716 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_MACROS_H
+#define HEXAGON_MACROS_H
+
+#include "cpu.h"
+#include "hex_regs.h"
+#include "reg_fields.h"
+
+#ifdef QEMU_GENERATE
+#define READ_REG(dest, NUM) gen_read_reg(dest, NUM)
+#else
+#define READ_REG(NUM) (env->gpr[(NUM)])
+#define READ_PREG(NUM) (env->pred[NUM])
+
+#define WRITE_RREG(NUM, VAL) log_reg_write(env, NUM, VAL, slot)
+#define WRITE_PREG(NUM, VAL) log_pred_write(env, NUM, VAL)
+#endif
+
+#define PCALIGN 4
+#define PCALIGN_MASK (PCALIGN - 1)
+
+#define GET_FIELD(FIELD, REGIN) \
+ fEXTRACTU_BITS(REGIN, reg_field_info[FIELD].width, \
+ reg_field_info[FIELD].offset)
+
+#ifdef QEMU_GENERATE
+#define GET_USR_FIELD(FIELD, DST) \
+ tcg_gen_extract_tl(DST, hex_gpr[HEX_REG_USR], \
+ reg_field_info[FIELD].offset, \
+ reg_field_info[FIELD].width)
+
+#define TYPE_INT(X) __builtin_types_compatible_p(typeof(X), int)
+#define TYPE_TCGV(X) __builtin_types_compatible_p(typeof(X), TCGv)
+#define TYPE_TCGV_I64(X) __builtin_types_compatible_p(typeof(X), TCGv_i64)
+
+#define SET_USR_FIELD_FUNC(X) \
+ __builtin_choose_expr(TYPE_INT(X), \
+ gen_set_usr_fieldi, \
+ __builtin_choose_expr(TYPE_TCGV(X), \
+ gen_set_usr_field, (void)0))
+#define SET_USR_FIELD(FIELD, VAL) \
+ SET_USR_FIELD_FUNC(VAL)(FIELD, VAL)
+#else
+#define GET_USR_FIELD(FIELD) \
+ fEXTRACTU_BITS(env->gpr[HEX_REG_USR], reg_field_info[FIELD].width, \
+ reg_field_info[FIELD].offset)
+
+#define SET_USR_FIELD(FIELD, VAL) \
+ fINSERT_BITS(env->new_value[HEX_REG_USR], reg_field_info[FIELD].width, \
+ reg_field_info[FIELD].offset, (VAL))
+#endif
+
+#ifdef QEMU_GENERATE
+/*
+ * Section 5.5 of the Hexagon V67 Programmer's Reference Manual
+ *
+ * Slot 1 store with slot 0 load
+ * A slot 1 store operation with a slot 0 load operation can appear in a packet.
+ * The packet attribute :mem_noshuf inhibits the instruction reordering that
+ * would otherwise be done by the assembler. For example:
+ * {
+ * memw(R5) = R2 // slot 1 store
+ * R3 = memh(R6) // slot 0 load
+ * }:mem_noshuf
+ * Unlike most packetized operations, these memory operations are not executed
+ * in parallel (Section 3.3.1). Instead, the store instruction in Slot 1
+ * effectively executes first, followed by the load instruction in Slot 0. If
+ * the addresses of the two operations are overlapping, the load will receive
+ * the newly stored data. This feature is supported in processor versions
+ * V65 or greater.
+ *
+ *
+ * For qemu, we look for a load in slot 0 when there is a store in slot 1
+ * in the same packet. When we see this, we call a helper that merges the
+ * bytes from the store buffer with the value loaded from memory.
+ */
+#define CHECK_NOSHUF \
+ do { \
+ if (insn->slot == 0 && pkt->pkt_has_store_s1) { \
+ process_store(ctx, pkt, 1); \
+ } \
+ } while (0)
+
+#define MEM_LOAD1s(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \
+ } while (0)
+#define MEM_LOAD1u(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \
+ } while (0)
+#define MEM_LOAD2s(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \
+ } while (0)
+#define MEM_LOAD2u(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \
+ } while (0)
+#define MEM_LOAD4s(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
+ } while (0)
+#define MEM_LOAD4u(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
+ } while (0)
+#define MEM_LOAD8u(DST, VA) \
+ do { \
+ CHECK_NOSHUF; \
+ tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \
+ } while (0)
+
+#define MEM_STORE1_FUNC(X) \
+ __builtin_choose_expr(TYPE_INT(X), \
+ gen_store1i, \
+ __builtin_choose_expr(TYPE_TCGV(X), \
+ gen_store1, (void)0))
+#define MEM_STORE1(VA, DATA, SLOT) \
+ MEM_STORE1_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
+
+#define MEM_STORE2_FUNC(X) \
+ __builtin_choose_expr(TYPE_INT(X), \
+ gen_store2i, \
+ __builtin_choose_expr(TYPE_TCGV(X), \
+ gen_store2, (void)0))
+#define MEM_STORE2(VA, DATA, SLOT) \
+ MEM_STORE2_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
+
+#define MEM_STORE4_FUNC(X) \
+ __builtin_choose_expr(TYPE_INT(X), \
+ gen_store4i, \
+ __builtin_choose_expr(TYPE_TCGV(X), \
+ gen_store4, (void)0))
+#define MEM_STORE4(VA, DATA, SLOT) \
+ MEM_STORE4_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
+
+#define MEM_STORE8_FUNC(X) \
+ __builtin_choose_expr(TYPE_INT(X), \
+ gen_store8i, \
+ __builtin_choose_expr(TYPE_TCGV_I64(X), \
+ gen_store8, (void)0))
+#define MEM_STORE8(VA, DATA, SLOT) \
+ MEM_STORE8_FUNC(DATA)(cpu_env, VA, DATA, ctx, SLOT)
+#else
+#define MEM_LOAD1s(VA) ((int8_t)mem_load1(env, slot, VA))
+#define MEM_LOAD1u(VA) ((uint8_t)mem_load1(env, slot, VA))
+#define MEM_LOAD2s(VA) ((int16_t)mem_load2(env, slot, VA))
+#define MEM_LOAD2u(VA) ((uint16_t)mem_load2(env, slot, VA))
+#define MEM_LOAD4s(VA) ((int32_t)mem_load4(env, slot, VA))
+#define MEM_LOAD4u(VA) ((uint32_t)mem_load4(env, slot, VA))
+#define MEM_LOAD8s(VA) ((int64_t)mem_load8(env, slot, VA))
+#define MEM_LOAD8u(VA) ((uint64_t)mem_load8(env, slot, VA))
+
+#define MEM_STORE1(VA, DATA, SLOT) log_store32(env, VA, DATA, 1, SLOT)
+#define MEM_STORE2(VA, DATA, SLOT) log_store32(env, VA, DATA, 2, SLOT)
+#define MEM_STORE4(VA, DATA, SLOT) log_store32(env, VA, DATA, 4, SLOT)
+#define MEM_STORE8(VA, DATA, SLOT) log_store64(env, VA, DATA, 8, SLOT)
+#endif
+
+#define CANCEL cancel_slot(env, slot)
+
+#define LOAD_CANCEL(EA) do { CANCEL; } while (0)
+
+#ifdef QEMU_GENERATE
+static inline void gen_pred_cancel(TCGv pred, int slot_num)
+ {
+ TCGv slot_mask = tcg_temp_new();
+ TCGv tmp = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+ tcg_gen_ori_tl(slot_mask, hex_slot_cancelled, 1 << slot_num);
+ tcg_gen_andi_tl(tmp, pred, 1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero,
+ slot_mask, hex_slot_cancelled);
+ tcg_temp_free(slot_mask);
+ tcg_temp_free(tmp);
+}
+#define PRED_LOAD_CANCEL(PRED, EA) \
+ gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot)
+#endif
+
+#define STORE_CANCEL(EA) { env->slot_cancelled |= (1 << slot); }
+
+#define fMAX(A, B) (((A) > (B)) ? (A) : (B))
+
+#define fMIN(A, B) (((A) < (B)) ? (A) : (B))
+
+#define fABS(A) (((A) < 0) ? (-(A)) : (A))
+#define fINSERT_BITS(REG, WIDTH, OFFSET, INVAL) \
+ REG = ((WIDTH) ? deposit64(REG, (OFFSET), (WIDTH), (INVAL)) : REG)
+#define fEXTRACTU_BITS(INREG, WIDTH, OFFSET) \
+ ((WIDTH) ? extract64((INREG), (OFFSET), (WIDTH)) : 0LL)
+#define fEXTRACTU_BIDIR(INREG, WIDTH, OFFSET) \
+ (fZXTN(WIDTH, 32, fBIDIR_LSHIFTR((INREG), (OFFSET), 4_8)))
+#define fEXTRACTU_RANGE(INREG, HIBIT, LOWBIT) \
+ (((HIBIT) - (LOWBIT) + 1) ? \
+ extract64((INREG), (LOWBIT), ((HIBIT) - (LOWBIT) + 1)) : \
+ 0LL)
+#define fINSERT_RANGE(INREG, HIBIT, LOWBIT, INVAL) \
+ do { \
+ int width = ((HIBIT) - (LOWBIT) + 1); \
+ INREG = (width >= 0 ? \
+ deposit64((INREG), (LOWBIT), width, (INVAL)) : \
+ INREG); \
+ } while (0)
+
+#define f8BITSOF(VAL) ((VAL) ? 0xff : 0x00)
+
+#ifdef QEMU_GENERATE
+#define fLSBOLD(VAL) tcg_gen_andi_tl(LSB, (VAL), 1)
+#else
+#define fLSBOLD(VAL) ((VAL) & 1)
+#endif
+
+#ifdef QEMU_GENERATE
+#define fLSBNEW(PVAL) tcg_gen_andi_tl(LSB, (PVAL), 1)
+#define fLSBNEW0 tcg_gen_andi_tl(LSB, hex_new_pred_value[0], 1)
+#define fLSBNEW1 tcg_gen_andi_tl(LSB, hex_new_pred_value[1], 1)
+#else
+#define fLSBNEW(PVAL) ((PVAL) & 1)
+#define fLSBNEW0 (env->new_pred_value[0] & 1)
+#define fLSBNEW1 (env->new_pred_value[1] & 1)
+#endif
+
+#ifdef QEMU_GENERATE
+#define fLSBOLDNOT(VAL) \
+ do { \
+ tcg_gen_andi_tl(LSB, (VAL), 1); \
+ tcg_gen_xori_tl(LSB, LSB, 1); \
+ } while (0)
+#define fLSBNEWNOT(PNUM) \
+ do { \
+ tcg_gen_andi_tl(LSB, (PNUM), 1); \
+ tcg_gen_xori_tl(LSB, LSB, 1); \
+ } while (0)
+#else
+#define fLSBNEWNOT(PNUM) (!fLSBNEW(PNUM))
+#define fLSBOLDNOT(VAL) (!fLSBOLD(VAL))
+#define fLSBNEW0NOT (!fLSBNEW0)
+#define fLSBNEW1NOT (!fLSBNEW1)
+#endif
+
+#define fNEWREG(VAL) ((int32_t)(VAL))
+
+#define fNEWREG_ST(VAL) (VAL)
+
+#define fVSATUVALN(N, VAL) \
+ ({ \
+ (((int)(VAL)) < 0) ? 0 : ((1LL << (N)) - 1); \
+ })
+#define fSATUVALN(N, VAL) \
+ ({ \
+ fSET_OVERFLOW(); \
+ ((VAL) < 0) ? 0 : ((1LL << (N)) - 1); \
+ })
+#define fSATVALN(N, VAL) \
+ ({ \
+ fSET_OVERFLOW(); \
+ ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \
+ })
+#define fVSATVALN(N, VAL) \
+ ({ \
+ ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \
+ })
+#define fZXTN(N, M, VAL) (((N) != 0) ? extract64((VAL), 0, (N)) : 0LL)
+#define fSXTN(N, M, VAL) (((N) != 0) ? sextract64((VAL), 0, (N)) : 0LL)
+#define fSATN(N, VAL) \
+ ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATVALN(N, VAL))
+#define fVSATN(N, VAL) \
+ ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATVALN(N, VAL))
+#define fADDSAT64(DST, A, B) \
+ do { \
+ uint64_t __a = fCAST8u(A); \
+ uint64_t __b = fCAST8u(B); \
+ uint64_t __sum = __a + __b; \
+ uint64_t __xor = __a ^ __b; \
+ const uint64_t __mask = 0x8000000000000000ULL; \
+ if (__xor & __mask) { \
+ DST = __sum; \
+ } \
+ else if ((__a ^ __sum) & __mask) { \
+ if (__sum & __mask) { \
+ DST = 0x7FFFFFFFFFFFFFFFLL; \
+ fSET_OVERFLOW(); \
+ } else { \
+ DST = 0x8000000000000000LL; \
+ fSET_OVERFLOW(); \
+ } \
+ } else { \
+ DST = __sum; \
+ } \
+ } while (0)
+#define fVSATUN(N, VAL) \
+ ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATUVALN(N, VAL))
+#define fSATUN(N, VAL) \
+ ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATUVALN(N, VAL))
+#define fSATH(VAL) (fSATN(16, VAL))
+#define fSATUH(VAL) (fSATUN(16, VAL))
+#define fVSATH(VAL) (fVSATN(16, VAL))
+#define fVSATUH(VAL) (fVSATUN(16, VAL))
+#define fSATUB(VAL) (fSATUN(8, VAL))
+#define fSATB(VAL) (fSATN(8, VAL))
+#define fVSATUB(VAL) (fVSATUN(8, VAL))
+#define fVSATB(VAL) (fVSATN(8, VAL))
+#define fIMMEXT(IMM) (IMM = IMM)
+#define fMUST_IMMEXT(IMM) fIMMEXT(IMM)
+
+#define fPCALIGN(IMM) IMM = (IMM & ~PCALIGN_MASK)
+
+#ifdef QEMU_GENERATE
+static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift)
+{
+ /*
+ * Section 2.2.4 of the Hexagon V67 Programmer's Reference Manual
+ *
+ * The "I" value from a modifier register is divided into two pieces
+ * LSB bits 23:17
+ * MSB bits 31:28
+ * The value is signed
+ *
+ * At the end we shift the result according to the shift argument
+ */
+ TCGv msb = tcg_temp_new();
+ TCGv lsb = tcg_temp_new();
+
+ tcg_gen_extract_tl(lsb, val, 17, 7);
+ tcg_gen_sari_tl(msb, val, 21);
+ tcg_gen_deposit_tl(result, msb, lsb, 0, 7);
+
+ tcg_gen_shli_tl(result, result, shift);
+
+ tcg_temp_free(msb);
+ tcg_temp_free(lsb);
+
+ return result;
+}
+#define fREAD_IREG(VAL, SHIFT) gen_read_ireg(ireg, (VAL), (SHIFT))
+#else
+#define fREAD_IREG(VAL) \
+ (fSXTN(11, 64, (((VAL) & 0xf0000000) >> 21) | ((VAL >> 17) & 0x7f)))
+#endif
+
+#define fREAD_LR() (READ_REG(HEX_REG_LR))
+
+#define fWRITE_LR(A) WRITE_RREG(HEX_REG_LR, A)
+#define fWRITE_FP(A) WRITE_RREG(HEX_REG_FP, A)
+#define fWRITE_SP(A) WRITE_RREG(HEX_REG_SP, A)
+
+#define fREAD_SP() (READ_REG(HEX_REG_SP))
+#define fREAD_LC0 (READ_REG(HEX_REG_LC0))
+#define fREAD_LC1 (READ_REG(HEX_REG_LC1))
+#define fREAD_SA0 (READ_REG(HEX_REG_SA0))
+#define fREAD_SA1 (READ_REG(HEX_REG_SA1))
+#define fREAD_FP() (READ_REG(HEX_REG_FP))
+#ifdef FIXME
+/* Figure out how to get insn->extension_valid to helper */
+#define fREAD_GP() \
+ (insn->extension_valid ? 0 : READ_REG(HEX_REG_GP))
+#else
+#define fREAD_GP() READ_REG(HEX_REG_GP)
+#endif
+#define fREAD_PC() (READ_REG(HEX_REG_PC))
+
+#define fREAD_NPC() (env->next_PC & (0xfffffffe))
+
+#define fREAD_P0() (READ_PREG(0))
+#define fREAD_P3() (READ_PREG(3))
+
+#define fCHECK_PCALIGN(A)
+
+#define fWRITE_NPC(A) write_new_pc(env, A)
+
+#define fBRANCH(LOC, TYPE) fWRITE_NPC(LOC)
+#define fJUMPR(REGNO, TARGET, TYPE) fBRANCH(TARGET, COF_TYPE_JUMPR)
+#define fHINTJR(TARGET) { /* Not modelled in qemu */}
+#define fCALL(A) \
+ do { \
+ fWRITE_LR(fREAD_NPC()); \
+ fBRANCH(A, COF_TYPE_CALL); \
+ } while (0)
+#define fCALLR(A) \
+ do { \
+ fWRITE_LR(fREAD_NPC()); \
+ fBRANCH(A, COF_TYPE_CALLR); \
+ } while (0)
+#define fWRITE_LOOP_REGS0(START, COUNT) \
+ do { \
+ WRITE_RREG(HEX_REG_LC0, COUNT); \
+ WRITE_RREG(HEX_REG_SA0, START); \
+ } while (0)
+#define fWRITE_LOOP_REGS1(START, COUNT) \
+ do { \
+ WRITE_RREG(HEX_REG_LC1, COUNT); \
+ WRITE_RREG(HEX_REG_SA1, START);\
+ } while (0)
+#define fWRITE_LC0(VAL) WRITE_RREG(HEX_REG_LC0, VAL)
+#define fWRITE_LC1(VAL) WRITE_RREG(HEX_REG_LC1, VAL)
+
+#define fSET_OVERFLOW() SET_USR_FIELD(USR_OVF, 1)
+#define fSET_LPCFG(VAL) SET_USR_FIELD(USR_LPCFG, (VAL))
+#define fGET_LPCFG (GET_USR_FIELD(USR_LPCFG))
+#define fWRITE_P0(VAL) WRITE_PREG(0, VAL)
+#define fWRITE_P1(VAL) WRITE_PREG(1, VAL)
+#define fWRITE_P2(VAL) WRITE_PREG(2, VAL)
+#define fWRITE_P3(VAL) WRITE_PREG(3, VAL)
+#define fPART1(WORK) if (part1) { WORK; return; }
+#define fCAST4u(A) ((uint32_t)(A))
+#define fCAST4s(A) ((int32_t)(A))
+#define fCAST8u(A) ((uint64_t)(A))
+#define fCAST8s(A) ((int64_t)(A))
+#define fCAST2_2s(A) ((int16_t)(A))
+#define fCAST2_2u(A) ((uint16_t)(A))
+#define fCAST4_4s(A) ((int32_t)(A))
+#define fCAST4_4u(A) ((uint32_t)(A))
+#define fCAST4_8s(A) ((int64_t)((int32_t)(A)))
+#define fCAST4_8u(A) ((uint64_t)((uint32_t)(A)))
+#define fCAST8_8s(A) ((int64_t)(A))
+#define fCAST8_8u(A) ((uint64_t)(A))
+#define fCAST2_8s(A) ((int64_t)((int16_t)(A)))
+#define fCAST2_8u(A) ((uint64_t)((uint16_t)(A)))
+#define fZE8_16(A) ((int16_t)((uint8_t)(A)))
+#define fSE8_16(A) ((int16_t)((int8_t)(A)))
+#define fSE16_32(A) ((int32_t)((int16_t)(A)))
+#define fZE16_32(A) ((uint32_t)((uint16_t)(A)))
+#define fSE32_64(A) ((int64_t)((int32_t)(A)))
+#define fZE32_64(A) ((uint64_t)((uint32_t)(A)))
+#define fSE8_32(A) ((int32_t)((int8_t)(A)))
+#define fZE8_32(A) ((int32_t)((uint8_t)(A)))
+#define fMPY8UU(A, B) (int)(fZE8_16(A) * fZE8_16(B))
+#define fMPY8US(A, B) (int)(fZE8_16(A) * fSE8_16(B))
+#define fMPY8SU(A, B) (int)(fSE8_16(A) * fZE8_16(B))
+#define fMPY8SS(A, B) (int)((short)(A) * (short)(B))
+#define fMPY16SS(A, B) fSE32_64(fSE16_32(A) * fSE16_32(B))
+#define fMPY16UU(A, B) fZE32_64(fZE16_32(A) * fZE16_32(B))
+#define fMPY16SU(A, B) fSE32_64(fSE16_32(A) * fZE16_32(B))
+#define fMPY16US(A, B) fMPY16SU(B, A)
+#define fMPY32SS(A, B) (fSE32_64(A) * fSE32_64(B))
+#define fMPY32UU(A, B) (fZE32_64(A) * fZE32_64(B))
+#define fMPY32SU(A, B) (fSE32_64(A) * fZE32_64(B))
+#define fMPY3216SS(A, B) (fSE32_64(A) * fSXTN(16, 64, B))
+#define fMPY3216SU(A, B) (fSE32_64(A) * fZXTN(16, 64, B))
+#define fROUND(A) (A + 0x8000)
+#define fCLIP(DST, SRC, U) \
+ do { \
+ int32_t maxv = (1 << U) - 1; \
+ int32_t minv = -(1 << U); \
+ DST = fMIN(maxv, fMAX(SRC, minv)); \
+ } while (0)
+#define fCRND(A) ((((A) & 0x3) == 0x3) ? ((A) + 1) : ((A)))
+#define fRNDN(A, N) ((((N) == 0) ? (A) : (((fSE32_64(A)) + (1 << ((N) - 1))))))
+#define fCRNDN(A, N) (conv_round(A, N))
+#define fADD128(A, B) (int128_add(A, B))
+#define fSUB128(A, B) (int128_sub(A, B))
+#define fSHIFTR128(A, B) (int128_rshift(A, B))
+#define fSHIFTL128(A, B) (int128_lshift(A, B))
+#define fAND128(A, B) (int128_and(A, B))
+#define fCAST8S_16S(A) (int128_exts64(A))
+#define fCAST16S_8S(A) (int128_getlo(A))
+
+#ifdef QEMU_GENERATE
+#define fEA_RI(REG, IMM) tcg_gen_addi_tl(EA, REG, IMM)
+#define fEA_RRs(REG, REG2, SCALE) \
+ do { \
+ TCGv tmp = tcg_temp_new(); \
+ tcg_gen_shli_tl(tmp, REG2, SCALE); \
+ tcg_gen_add_tl(EA, REG, tmp); \
+ tcg_temp_free(tmp); \
+ } while (0)
+#define fEA_IRs(IMM, REG, SCALE) \
+ do { \
+ tcg_gen_shli_tl(EA, REG, SCALE); \
+ tcg_gen_addi_tl(EA, EA, IMM); \
+ } while (0)
+#else
+#define fEA_RI(REG, IMM) \
+ do { \
+ EA = REG + IMM; \
+ } while (0)
+#define fEA_RRs(REG, REG2, SCALE) \
+ do { \
+ EA = REG + (REG2 << SCALE); \
+ } while (0)
+#define fEA_IRs(IMM, REG, SCALE) \
+ do { \
+ EA = IMM + (REG << SCALE); \
+ } while (0)
+#endif
+
+#ifdef QEMU_GENERATE
+#define fEA_IMM(IMM) tcg_gen_movi_tl(EA, IMM)
+#define fEA_REG(REG) tcg_gen_mov_tl(EA, REG)
+#define fEA_BREVR(REG) gen_helper_fbrev(EA, REG)
+#define fPM_I(REG, IMM) tcg_gen_addi_tl(REG, REG, IMM)
+#define fPM_M(REG, MVAL) tcg_gen_add_tl(REG, REG, MVAL)
+#define fPM_CIRI(REG, IMM, MVAL) \
+ do { \
+ TCGv tcgv_siV = tcg_constant_tl(siV); \
+ gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, \
+ hex_gpr[HEX_REG_CS0 + MuN]); \
+ } while (0)
+#else
+#define fEA_IMM(IMM) do { EA = (IMM); } while (0)
+#define fEA_REG(REG) do { EA = (REG); } while (0)
+#define fEA_GPI(IMM) do { EA = (fREAD_GP() + (IMM)); } while (0)
+#define fPM_I(REG, IMM) do { REG = REG + (IMM); } while (0)
+#define fPM_M(REG, MVAL) do { REG = REG + (MVAL); } while (0)
+#endif
+#define fSCALE(N, A) (((int64_t)(A)) << N)
+#define fVSATW(A) fVSATN(32, ((long long)A))
+#define fSATW(A) fSATN(32, ((long long)A))
+#define fVSAT(A) fVSATN(32, (A))
+#define fSAT(A) fSATN(32, (A))
+#define fSAT_ORIG_SHL(A, ORIG_REG) \
+ ((((int32_t)((fSAT(A)) ^ ((int32_t)(ORIG_REG)))) < 0) \
+ ? fSATVALN(32, ((int32_t)(ORIG_REG))) \
+ : ((((ORIG_REG) > 0) && ((A) == 0)) ? fSATVALN(32, (ORIG_REG)) \
+ : fSAT(A)))
+#define fPASS(A) A
+#define fBIDIR_SHIFTL(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) >> ((-(SHAMT)) - 1)) >> 1) \
+ : (fCAST##REGSTYPE(SRC) << (SHAMT)))
+#define fBIDIR_ASHIFTL(SRC, SHAMT, REGSTYPE) \
+ fBIDIR_SHIFTL(SRC, SHAMT, REGSTYPE##s)
+#define fBIDIR_LSHIFTL(SRC, SHAMT, REGSTYPE) \
+ fBIDIR_SHIFTL(SRC, SHAMT, REGSTYPE##u)
+#define fBIDIR_ASHIFTL_SAT(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) < 0) ? ((fCAST##REGSTYPE##s(SRC) >> ((-(SHAMT)) - 1)) >> 1) \
+ : fSAT_ORIG_SHL(fCAST##REGSTYPE##s(SRC) << (SHAMT), (SRC)))
+#define fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) < 0) ? ((fCAST##REGSTYPE(SRC) << ((-(SHAMT)) - 1)) << 1) \
+ : (fCAST##REGSTYPE(SRC) >> (SHAMT)))
+#define fBIDIR_ASHIFTR(SRC, SHAMT, REGSTYPE) \
+ fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE##s)
+#define fBIDIR_LSHIFTR(SRC, SHAMT, REGSTYPE) \
+ fBIDIR_SHIFTR(SRC, SHAMT, REGSTYPE##u)
+#define fBIDIR_ASHIFTR_SAT(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) < 0) ? fSAT_ORIG_SHL((fCAST##REGSTYPE##s(SRC) \
+ << ((-(SHAMT)) - 1)) << 1, (SRC)) \
+ : (fCAST##REGSTYPE##s(SRC) >> (SHAMT)))
+#define fASHIFTR(SRC, SHAMT, REGSTYPE) (fCAST##REGSTYPE##s(SRC) >> (SHAMT))
+#define fLSHIFTR(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##u(SRC) >> (SHAMT)))
+#define fROTL(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) == 0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) << (SHAMT)) | \
+ ((fCAST##REGSTYPE##u(SRC) >> \
+ ((sizeof(SRC) * 8) - (SHAMT))))))
+#define fROTR(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) == 0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) >> (SHAMT)) | \
+ ((fCAST##REGSTYPE##u(SRC) << \
+ ((sizeof(SRC) * 8) - (SHAMT))))))
+#define fASHIFTL(SRC, SHAMT, REGSTYPE) \
+ (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##s(SRC) << (SHAMT)))
+
+#ifdef QEMU_GENERATE
+#define fLOAD(NUM, SIZE, SIGN, EA, DST) MEM_LOAD##SIZE##SIGN(DST, EA)
+#else
+#define fLOAD(NUM, SIZE, SIGN, EA, DST) \
+ DST = (size##SIZE##SIGN##_t)MEM_LOAD##SIZE##SIGN(EA)
+#endif
+
+#define fMEMOP(NUM, SIZE, SIGN, EA, FNTYPE, VALUE)
+
+#define fGET_FRAMEKEY() READ_REG(HEX_REG_FRAMEKEY)
+#define fFRAME_SCRAMBLE(VAL) ((VAL) ^ (fCAST8u(fGET_FRAMEKEY()) << 32))
+#define fFRAME_UNSCRAMBLE(VAL) fFRAME_SCRAMBLE(VAL)
+
+#ifdef CONFIG_USER_ONLY
+#define fFRAMECHECK(ADDR, EA) do { } while (0) /* Not modelled in linux-user */
+#else
+/* System mode not implemented yet */
+#define fFRAMECHECK(ADDR, EA) g_assert_not_reached();
+#endif
+
+#ifdef QEMU_GENERATE
+#define fLOAD_LOCKED(NUM, SIZE, SIGN, EA, DST) \
+ gen_load_locked##SIZE##SIGN(DST, EA, ctx->mem_idx);
+#endif
+
+#ifdef QEMU_GENERATE
+#define fSTORE(NUM, SIZE, EA, SRC) MEM_STORE##SIZE(EA, SRC, insn->slot)
+#else
+#define fSTORE(NUM, SIZE, EA, SRC) MEM_STORE##SIZE(EA, SRC, slot)
+#endif
+
+#ifdef QEMU_GENERATE
+#define fSTORE_LOCKED(NUM, SIZE, EA, SRC, PRED) \
+ gen_store_conditional##SIZE(ctx, PRED, EA, SRC);
+#endif
+
+#ifdef QEMU_GENERATE
+#define GETBYTE_FUNC(X) \
+ __builtin_choose_expr(TYPE_TCGV(X), \
+ gen_get_byte, \
+ __builtin_choose_expr(TYPE_TCGV_I64(X), \
+ gen_get_byte_i64, (void)0))
+#define fGETBYTE(N, SRC) GETBYTE_FUNC(SRC)(BYTE, N, SRC, true)
+#define fGETUBYTE(N, SRC) GETBYTE_FUNC(SRC)(BYTE, N, SRC, false)
+#else
+#define fGETBYTE(N, SRC) ((int8_t)((SRC >> ((N) * 8)) & 0xff))
+#define fGETUBYTE(N, SRC) ((uint8_t)((SRC >> ((N) * 8)) & 0xff))
+#endif
+
+#define fSETBYTE(N, DST, VAL) \
+ do { \
+ DST = (DST & ~(0x0ffLL << ((N) * 8))) | \
+ (((uint64_t)((VAL) & 0x0ffLL)) << ((N) * 8)); \
+ } while (0)
+
+#ifdef QEMU_GENERATE
+#define fGETHALF(N, SRC) gen_get_half(HALF, N, SRC, true)
+#define fGETUHALF(N, SRC) gen_get_half(HALF, N, SRC, false)
+#else
+#define fGETHALF(N, SRC) ((int16_t)((SRC >> ((N) * 16)) & 0xffff))
+#define fGETUHALF(N, SRC) ((uint16_t)((SRC >> ((N) * 16)) & 0xffff))
+#endif
+#define fSETHALF(N, DST, VAL) \
+ do { \
+ DST = (DST & ~(0x0ffffLL << ((N) * 16))) | \
+ (((uint64_t)((VAL) & 0x0ffff)) << ((N) * 16)); \
+ } while (0)
+#define fSETHALFw fSETHALF
+#define fSETHALFd fSETHALF
+
+#define fGETWORD(N, SRC) \
+ ((int64_t)((int32_t)((SRC >> ((N) * 32)) & 0x0ffffffffLL)))
+#define fGETUWORD(N, SRC) \
+ ((uint64_t)((uint32_t)((SRC >> ((N) * 32)) & 0x0ffffffffLL)))
+
+#define fSETWORD(N, DST, VAL) \
+ do { \
+ DST = (DST & ~(0x0ffffffffLL << ((N) * 32))) | \
+ (((VAL) & 0x0ffffffffLL) << ((N) * 32)); \
+ } while (0)
+
+#define fSETBIT(N, DST, VAL) \
+ do { \
+ DST = (DST & ~(1ULL << (N))) | (((uint64_t)(VAL)) << (N)); \
+ } while (0)
+
+#define fGETBIT(N, SRC) (((SRC) >> N) & 1)
+#define fSETBITS(HI, LO, DST, VAL) \
+ do { \
+ int j; \
+ for (j = LO; j <= HI; j++) { \
+ fSETBIT(j, DST, VAL); \
+ } \
+ } while (0)
+#define fCOUNTONES_2(VAL) ctpop16(VAL)
+#define fCOUNTONES_4(VAL) ctpop32(VAL)
+#define fCOUNTONES_8(VAL) ctpop64(VAL)
+#define fBREV_8(VAL) revbit64(VAL)
+#define fBREV_4(VAL) revbit32(VAL)
+#define fCL1_8(VAL) clo64(VAL)
+#define fCL1_4(VAL) clo32(VAL)
+#define fCL1_2(VAL) (clz32(~(uint16_t)(VAL) & 0xffff) - 16)
+#define fINTERLEAVE(ODD, EVEN) interleave(ODD, EVEN)
+#define fDEINTERLEAVE(MIXED) deinterleave(MIXED)
+#define fHIDE(A) A
+#define fCONSTLL(A) A##LL
+#define fECHO(A) (A)
+
+#define fTRAP(TRAPTYPE, IMM) helper_raise_exception(env, HEX_EXCP_TRAP0)
+#define fPAUSE(IMM)
+
+#define fALIGN_REG_FIELD_VALUE(FIELD, VAL) \
+ ((VAL) << reg_field_info[FIELD].offset)
+#define fGET_REG_FIELD_MASK(FIELD) \
+ (((1 << reg_field_info[FIELD].width) - 1) << reg_field_info[FIELD].offset)
+#define fREAD_REG_FIELD(REG, FIELD) \
+ fEXTRACTU_BITS(env->gpr[HEX_REG_##REG], \
+ reg_field_info[FIELD].width, \
+ reg_field_info[FIELD].offset)
+#define fGET_FIELD(VAL, FIELD)
+#define fSET_FIELD(VAL, FIELD, NEWVAL)
+#define fBARRIER()
+#define fSYNCH()
+#define fISYNC()
+#define fDCFETCH(REG) \
+ do { (void)REG; } while (0) /* Nothing to do in qemu */
+#define fICINVA(REG) \
+ do { (void)REG; } while (0) /* Nothing to do in qemu */
+#define fL2FETCH(ADDR, HEIGHT, WIDTH, STRIDE, FLAGS)
+#define fDCCLEANA(REG) \
+ do { (void)REG; } while (0) /* Nothing to do in qemu */
+#define fDCCLEANINVA(REG) \
+ do { (void)REG; } while (0) /* Nothing to do in qemu */
+
+#define fDCZEROA(REG) do { env->dczero_addr = (REG); } while (0)
+
+#define fBRANCH_SPECULATE_STALL(DOTNEWVAL, JUMP_COND, SPEC_DIR, HINTBITNUM, \
+ STRBITNUM) /* Nothing */
+
+
+#endif
diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build
new file mode 100644
index 000000000..b61243103
--- /dev/null
+++ b/target/hexagon/meson.build
@@ -0,0 +1,182 @@
+##
+## Copyright(c) 2020-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; either version 2 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, see <http://www.gnu.org/licenses/>.
+##
+
+hexagon_ss = ss.source_set()
+
+hex_common_py = 'hex_common.py'
+attribs_def = meson.current_source_dir() / 'attribs_def.h.inc'
+gen_tcg_h = meson.current_source_dir() / 'gen_tcg.h'
+gen_tcg_hvx_h = meson.current_source_dir() / 'gen_tcg_hvx.h'
+
+#
+# Step 1
+# We use a C program to create semantics_generated.pyinc
+#
+gen_semantics = executable(
+ 'gen_semantics',
+ 'gen_semantics.c',
+ native: true, build_by_default: false)
+
+semantics_generated = custom_target(
+ 'semantics_generated.pyinc',
+ output: 'semantics_generated.pyinc',
+ command: [gen_semantics, '@OUTPUT@'],
+)
+hexagon_ss.add(semantics_generated)
+
+#
+# Step 2
+# We use Python scripts to generate the following files
+# shortcode_generated.h.inc
+# helper_protos_generated.h.inc
+# tcg_funcs_generated.c.inc
+# tcg_func_table_generated.c.inc
+# helper_funcs_generated.c.inc
+# printinsn_generated.h.inc
+# op_regs_generated.h.inc
+# op_attribs_generated.h.inc
+# opcodes_def_generated.h.inc
+#
+shortcode_generated = custom_target(
+ 'shortcode_generated.h.inc',
+ output: 'shortcode_generated.h.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def],
+ command: [python, files('gen_shortcode.py'), semantics_generated, attribs_def, '@OUTPUT@'],
+)
+hexagon_ss.add(shortcode_generated)
+
+helper_protos_generated = custom_target(
+ 'helper_protos_generated.h.inc',
+ output: 'helper_protos_generated.h.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h],
+ command: [python, files('gen_helper_protos.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'],
+)
+hexagon_ss.add(helper_protos_generated)
+
+tcg_funcs_generated = custom_target(
+ 'tcg_funcs_generated.c.inc',
+ output: 'tcg_funcs_generated.c.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h],
+ command: [python, files('gen_tcg_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'],
+)
+hexagon_ss.add(tcg_funcs_generated)
+
+tcg_func_table_generated = custom_target(
+ 'tcg_func_table_generated.c.inc',
+ output: 'tcg_func_table_generated.c.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def],
+ command: [python, files('gen_tcg_func_table.py'), semantics_generated, attribs_def, '@OUTPUT@'],
+)
+hexagon_ss.add(tcg_func_table_generated)
+
+helper_funcs_generated = custom_target(
+ 'helper_funcs_generated.c.inc',
+ output: 'helper_funcs_generated.c.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h],
+ command: [python, files('gen_helper_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'],
+)
+hexagon_ss.add(helper_funcs_generated)
+
+printinsn_generated = custom_target(
+ 'printinsn_generated.h.inc',
+ output: 'printinsn_generated.h.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def],
+ command: [python, files('gen_printinsn.py'), semantics_generated, attribs_def, '@OUTPUT@'],
+)
+hexagon_ss.add(printinsn_generated)
+
+op_regs_generated = custom_target(
+ 'op_regs_generated.h.inc',
+ output: 'op_regs_generated.h.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def],
+ command: [python, files('gen_op_regs.py'), semantics_generated, attribs_def, '@OUTPUT@'],
+)
+hexagon_ss.add(op_regs_generated)
+
+op_attribs_generated = custom_target(
+ 'op_attribs_generated.h.inc',
+ output: 'op_attribs_generated.h.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def],
+ command: [python, files('gen_op_attribs.py'), semantics_generated, attribs_def, '@OUTPUT@'],
+)
+hexagon_ss.add(op_attribs_generated)
+
+opcodes_def_generated = custom_target(
+ 'opcodes_def_generated.h.inc',
+ output: 'opcodes_def_generated.h.inc',
+ depends: [semantics_generated],
+ depend_files: [hex_common_py, attribs_def],
+ command: [python, files('gen_opcodes_def.py'), semantics_generated, attribs_def, '@OUTPUT@'],
+)
+hexagon_ss.add(opcodes_def_generated)
+
+#
+# Step 3
+# We use a C program to create iset.py which is imported into dectree.py
+# to create the decode tree
+#
+gen_dectree_import = executable(
+ 'gen_dectree_import',
+ 'gen_dectree_import.c', opcodes_def_generated, op_regs_generated,
+ native: true, build_by_default: false)
+
+iset_py = custom_target(
+ 'iset.py',
+ output: 'iset.py',
+ command: [gen_dectree_import, '@OUTPUT@'],
+)
+hexagon_ss.add(iset_py)
+
+#
+# Step 4
+# We use the dectree.py script to generate the decode tree header file
+#
+dectree_generated = custom_target(
+ 'dectree_generated.h.inc',
+ output: 'dectree_generated.h.inc',
+ depends: [iset_py],
+ env: {'PYTHONPATH': meson.current_build_dir()},
+ command: [python, files('dectree.py'), '@OUTPUT@'],
+)
+hexagon_ss.add(dectree_generated)
+
+hexagon_ss.add(files(
+ 'cpu.c',
+ 'translate.c',
+ 'op_helper.c',
+ 'gdbstub.c',
+ 'genptr.c',
+ 'reg_fields.c',
+ 'decode.c',
+ 'iclass.c',
+ 'opcodes.c',
+ 'printinsn.c',
+ 'arch.c',
+ 'fma_emu.c',
+ 'mmvec/decode_ext_mmvec.c',
+ 'mmvec/system_ext_mmvec.c',
+))
+
+target_arch += {'hexagon': hexagon_ss}
diff --git a/target/hexagon/mmvec/decode_ext_mmvec.c b/target/hexagon/mmvec/decode_ext_mmvec.c
new file mode 100644
index 000000000..061a65ab8
--- /dev/null
+++ b/target/hexagon/mmvec/decode_ext_mmvec.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "decode.h"
+#include "opcodes.h"
+#include "insn.h"
+#include "iclass.h"
+#include "mmvec/mmvec.h"
+#include "mmvec/decode_ext_mmvec.h"
+
+static void
+check_new_value(Packet *pkt)
+{
+ /* .new value for a MMVector store */
+ int i, j;
+ const char *reginfo;
+ const char *destletters;
+ const char *dststr = NULL;
+ uint16_t def_opcode;
+ char letter;
+ int def_regnum;
+
+ for (i = 1; i < pkt->num_insns; i++) {
+ uint16_t use_opcode = pkt->insn[i].opcode;
+ if (GET_ATTRIB(use_opcode, A_DOTNEWVALUE) &&
+ GET_ATTRIB(use_opcode, A_CVI) &&
+ GET_ATTRIB(use_opcode, A_STORE)) {
+ int use_regidx = strchr(opcode_reginfo[use_opcode], 's') -
+ opcode_reginfo[use_opcode];
+ /*
+ * What's encoded at the N-field is the offset to who's producing
+ * the value.
+ * Shift off the LSB which indicates odd/even register.
+ */
+ int def_off = ((pkt->insn[i].regno[use_regidx]) >> 1);
+ int def_oreg = pkt->insn[i].regno[use_regidx] & 1;
+ int def_idx = -1;
+ for (j = i - 1; (j >= 0) && (def_off >= 0); j--) {
+ if (!GET_ATTRIB(pkt->insn[j].opcode, A_CVI)) {
+ continue;
+ }
+ def_off--;
+ if (def_off == 0) {
+ def_idx = j;
+ break;
+ }
+ }
+ /*
+ * Check for a badly encoded N-field which points to an instruction
+ * out-of-range
+ */
+ g_assert(!((def_off != 0) || (def_idx < 0) ||
+ (def_idx > (pkt->num_insns - 1))));
+
+ /* def_idx is the index of the producer */
+ def_opcode = pkt->insn[def_idx].opcode;
+ reginfo = opcode_reginfo[def_opcode];
+ destletters = "dexy";
+ for (j = 0; (letter = destletters[j]) != 0; j++) {
+ dststr = strchr(reginfo, letter);
+ if (dststr != NULL) {
+ break;
+ }
+ }
+ if ((dststr == NULL) && GET_ATTRIB(def_opcode, A_CVI_GATHER)) {
+ def_regnum = 0;
+ pkt->insn[i].regno[use_regidx] = def_oreg;
+ pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot;
+ } else {
+ if (dststr == NULL) {
+ /* still not there, we have a bad packet */
+ g_assert_not_reached();
+ }
+ def_regnum = pkt->insn[def_idx].regno[dststr - reginfo];
+ /* Now patch up the consumer with the register number */
+ pkt->insn[i].regno[use_regidx] = def_regnum ^ def_oreg;
+ /* special case for (Vx,Vy) */
+ dststr = strchr(reginfo, 'y');
+ if (def_oreg && strchr(reginfo, 'x') && dststr) {
+ def_regnum = pkt->insn[def_idx].regno[dststr - reginfo];
+ pkt->insn[i].regno[use_regidx] = def_regnum;
+ }
+ /*
+ * We need to remember who produces this value to later
+ * check if it was dynamically cancelled
+ */
+ pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot;
+ }
+ }
+ }
+}
+
+/*
+ * We don't want to reorder slot1/slot0 with respect to each other.
+ * So in our shuffling, we don't want to move the .cur / .tmp vmem earlier
+ * Instead, we should move the producing instruction later
+ * But the producing instruction might feed a .new store!
+ * So we may need to move that even later.
+ */
+
+static void
+decode_mmvec_move_cvi_to_end(Packet *pkt, int max)
+{
+ int i;
+ for (i = 0; i < max; i++) {
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_CVI)) {
+ int last_inst = pkt->num_insns - 1;
+ uint16_t last_opcode = pkt->insn[last_inst].opcode;
+
+ /*
+ * If the last instruction is an endloop, move to the one before it
+ * Keep endloop as the last thing always
+ */
+ if ((last_opcode == J2_endloop0) ||
+ (last_opcode == J2_endloop1) ||
+ (last_opcode == J2_endloop01)) {
+ last_inst--;
+ }
+
+ decode_send_insn_to(pkt, i, last_inst);
+ max--;
+ i--; /* Retry this index now that packet has rotated */
+ }
+ }
+}
+
+static void
+decode_shuffle_for_execution_vops(Packet *pkt)
+{
+ /*
+ * Sort for .new
+ */
+ int i;
+ for (i = 0; i < pkt->num_insns; i++) {
+ uint16_t opcode = pkt->insn[i].opcode;
+ if (GET_ATTRIB(opcode, A_LOAD) &&
+ (GET_ATTRIB(opcode, A_CVI_NEW) ||
+ GET_ATTRIB(opcode, A_CVI_TMP))) {
+ /*
+ * Find prior consuming vector instructions
+ * Move to end of packet
+ */
+ decode_mmvec_move_cvi_to_end(pkt, i);
+ break;
+ }
+ }
+
+ /* Move HVX new value stores to the end of the packet */
+ for (i = 0; i < pkt->num_insns - 1; i++) {
+ uint16_t opcode = pkt->insn[i].opcode;
+ if (GET_ATTRIB(opcode, A_STORE) &&
+ GET_ATTRIB(opcode, A_CVI_NEW) &&
+ !GET_ATTRIB(opcode, A_CVI_SCATTER_RELEASE)) {
+ int last_inst = pkt->num_insns - 1;
+ uint16_t last_opcode = pkt->insn[last_inst].opcode;
+
+ /*
+ * If the last instruction is an endloop, move to the one before it
+ * Keep endloop as the last thing always
+ */
+ if ((last_opcode == J2_endloop0) ||
+ (last_opcode == J2_endloop1) ||
+ (last_opcode == J2_endloop01)) {
+ last_inst--;
+ }
+
+ decode_send_insn_to(pkt, i, last_inst);
+ break;
+ }
+ }
+}
+
+static void
+check_for_vhist(Packet *pkt)
+{
+ pkt->vhist_insn = NULL;
+ for (int i = 0; i < pkt->num_insns; i++) {
+ Insn *insn = &pkt->insn[i];
+ int opcode = insn->opcode;
+ if (GET_ATTRIB(opcode, A_CVI) && GET_ATTRIB(opcode, A_CVI_4SLOT)) {
+ pkt->vhist_insn = insn;
+ return;
+ }
+ }
+}
+
+/*
+ * Public Functions
+ */
+
+SlotMask mmvec_ext_decode_find_iclass_slots(int opcode)
+{
+ if (GET_ATTRIB(opcode, A_CVI_VM)) {
+ /* HVX memory instruction */
+ if (GET_ATTRIB(opcode, A_RESTRICT_SLOT0ONLY)) {
+ return SLOTS_0;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT1ONLY)) {
+ return SLOTS_1;
+ }
+ return SLOTS_01;
+ } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT2ONLY)) {
+ return SLOTS_2;
+ } else if (GET_ATTRIB(opcode, A_CVI_VX)) {
+ /* HVX multiply instruction */
+ return SLOTS_23;
+ } else if (GET_ATTRIB(opcode, A_CVI_VS_VX)) {
+ /* HVX permute/shift instruction */
+ return SLOTS_23;
+ } else {
+ return SLOTS_0123;
+ }
+}
+
+void mmvec_ext_decode_checks(Packet *pkt, bool disas_only)
+{
+ check_new_value(pkt);
+ if (!disas_only) {
+ decode_shuffle_for_execution_vops(pkt);
+ }
+ check_for_vhist(pkt);
+}
diff --git a/target/hexagon/mmvec/decode_ext_mmvec.h b/target/hexagon/mmvec/decode_ext_mmvec.h
new file mode 100644
index 000000000..3664b68cf
--- /dev/null
+++ b/target/hexagon/mmvec/decode_ext_mmvec.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_DECODE_EXT_MMVEC_H
+#define HEXAGON_DECODE_EXT_MMVEC_H
+
+void mmvec_ext_decode_checks(Packet *pkt, bool disas_only);
+SlotMask mmvec_ext_decode_find_iclass_slots(int opcode);
+
+#endif
diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h
new file mode 100644
index 000000000..10f463036
--- /dev/null
+++ b/target/hexagon/mmvec/macros.h
@@ -0,0 +1,354 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_MMVEC_MACROS_H
+#define HEXAGON_MMVEC_MACROS_H
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "arch.h"
+#include "mmvec/system_ext_mmvec.h"
+
+#ifndef QEMU_GENERATE
+#define VdV (*(MMVector *)(VdV_void))
+#define VsV (*(MMVector *)(VsV_void))
+#define VuV (*(MMVector *)(VuV_void))
+#define VvV (*(MMVector *)(VvV_void))
+#define VwV (*(MMVector *)(VwV_void))
+#define VxV (*(MMVector *)(VxV_void))
+#define VyV (*(MMVector *)(VyV_void))
+
+#define VddV (*(MMVectorPair *)(VddV_void))
+#define VuuV (*(MMVectorPair *)(VuuV_void))
+#define VvvV (*(MMVectorPair *)(VvvV_void))
+#define VxxV (*(MMVectorPair *)(VxxV_void))
+
+#define QeV (*(MMQReg *)(QeV_void))
+#define QdV (*(MMQReg *)(QdV_void))
+#define QsV (*(MMQReg *)(QsV_void))
+#define QtV (*(MMQReg *)(QtV_void))
+#define QuV (*(MMQReg *)(QuV_void))
+#define QvV (*(MMQReg *)(QvV_void))
+#define QxV (*(MMQReg *)(QxV_void))
+#endif
+
+#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \
+ do { \
+ env->vtcm_log.data.ub[IDX] = (VAL); \
+ if (MASK) { \
+ set_bit((IDX), env->vtcm_log.mask); \
+ } else { \
+ clear_bit((IDX), env->vtcm_log.mask); \
+ } \
+ env->vtcm_log.va[IDX] = (VA); \
+ } while (0)
+
+#define fNOTQ(VAL) \
+ ({ \
+ MMQReg _ret; \
+ int _i_; \
+ for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \
+ _ret.ud[_i_] = ~VAL.ud[_i_]; \
+ } \
+ _ret;\
+ })
+#define fGETQBITS(REG, WIDTH, MASK, BITNO) \
+ ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f)))
+#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO)
+#define fGENMASKW(QREG, IDX) \
+ (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0) | \
+ ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8) | \
+ ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \
+ ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24))
+#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF))
+#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3))
+#define fGETCRUMB_SYMMETRIC(IDX, SRC) \
+ ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \
+ : fGETCRUMB(IDX, SRC)))
+#define fGENMASKH(QREG, IDX) \
+ (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \
+ ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8))
+#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX))
+#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX))
+#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \
+ (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL))
+#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \
+ ((fGENMASKH(QREG, IDX) & (YESVAL)) | \
+ (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \
+ ((fGENMASKW(QREG, IDX) & (YESVAL)) | \
+ (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL)))
+#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \
+ do { \
+ uint32_t __TMP = (VAL); \
+ REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \
+ REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \
+ } while (0)
+#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL)
+#define fVBYTES() (fVECSIZE())
+#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1))
+#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1))
+#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH)
+#define fVECLOGSIZE() (7)
+#define fVECSIZE() (1 << fVECLOGSIZE())
+#define fSWAPB(A, B) do { uint8_t tmp = A; A = B; B = tmp; } while (0)
+#define fV_AL_CHECK(EA, MASK) \
+ if ((EA) & (MASK)) { \
+ warn("aligning misaligned vector. EA=%08x", (EA)); \
+ }
+#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+ mem_vector_scatter_init(env)
+#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \
+ mem_vector_gather_init(env)
+#define fSCATTER_FINISH(OP)
+#define fGATHER_FINISH()
+#define fLOG_SCATTER_OP(SIZE) \
+ do { \
+ env->vtcm_log.op = true; \
+ env->vtcm_log.op_size = SIZE; \
+ } while (0)
+#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+ do { \
+ int log_byte = 0; \
+ target_ulong va = EA; \
+ target_ulong va_high = EA + LEN; \
+ for (int i0 = 0; i0 < 4; i0++) { \
+ log_byte = (va + i0) <= va_high; \
+ LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \
+ 4 * IDX + i0); \
+ } \
+ } while (0)
+#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \
+ do { \
+ int log_byte = 0; \
+ target_ulong va = EA; \
+ target_ulong va_high = EA + LEN; \
+ for (int i0 = 0; i0 < 2; i0++) { \
+ log_byte = (va + i0) <= va_high; \
+ LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+ 2 * IDX + i0); \
+ } \
+ } while (0)
+
+#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \
+ ALIGNMENT, LEN) \
+ do { \
+ int log_byte = 0; \
+ target_ulong va = EA; \
+ target_ulong va_high = EA + LEN; \
+ for (int i0 = 0; i0 < 2; i0++) { \
+ log_byte = (va + i0) <= va_high; \
+ LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \
+ 2 * IDX + i0); \
+ } \
+ } while (0)
+
+/* NOTE - Will this always be tmp_VRegs[0]; */
+#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \
+ do { \
+ int i0; \
+ target_ulong va = EA; \
+ target_ulong va_high = EA + LEN; \
+ uintptr_t ra = GETPC(); \
+ int log_bank = 0; \
+ int log_byte = 0; \
+ for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \
+ log_byte = ((va + i0) <= va_high) && QVAL; \
+ log_bank |= (log_byte << i0); \
+ uint8_t B; \
+ B = cpu_ldub_data_ra(env, EA + i0, ra); \
+ env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \
+ LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \
+ } \
+ } while (0)
+#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \
+ do { \
+ GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \
+ } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \
+ do { \
+ GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \
+ } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \
+ do { \
+ GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \
+ } while (0)
+#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \
+ do { \
+ GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+ fGETQBIT(QsV, 4 * IDX + i0)); \
+ } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \
+ do { \
+ GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+ fGETQBIT(QsV, 2 * IDX + i0)); \
+ } while (0)
+#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \
+ do { \
+ GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+ fGETQBIT(QsV, 2 * IDX + i0)); \
+ } while (0)
+#define SCATTER_OP_WRITE_TO_MEM(TYPE) \
+ do { \
+ uintptr_t ra = GETPC(); \
+ for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \
+ if (test_bit(i, env->vtcm_log.mask)) { \
+ TYPE dst = 0; \
+ TYPE inc = 0; \
+ for (int j = 0; j < sizeof(TYPE); j++) { \
+ uint8_t val; \
+ val = cpu_ldub_data_ra(env, env->vtcm_log.va[i + j], ra); \
+ dst |= val << (8 * j); \
+ inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \
+ clear_bit(j + i, env->vtcm_log.mask); \
+ env->vtcm_log.data.ub[j + i] = 0; \
+ } \
+ dst += inc; \
+ for (int j = 0; j < sizeof(TYPE); j++) { \
+ cpu_stb_data_ra(env, env->vtcm_log.va[i + j], \
+ (dst >> (8 * j)) & 0xFF, ra); \
+ } \
+ } \
+ } \
+ } while (0)
+#define SCATTER_OP_PROBE_MEM(TYPE, MMU_IDX, RETADDR) \
+ do { \
+ for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \
+ if (test_bit(i, env->vtcm_log.mask)) { \
+ for (int j = 0; j < sizeof(TYPE); j++) { \
+ probe_read(env, env->vtcm_log.va[i + j], 1, \
+ MMU_IDX, RETADDR); \
+ probe_write(env, env->vtcm_log.va[i + j], 1, \
+ MMU_IDX, RETADDR); \
+ } \
+ } \
+ } \
+ } while (0)
+#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \
+ do { \
+ int i0; \
+ target_ulong va = EA; \
+ target_ulong va_high = EA + LEN; \
+ int log_bank = 0; \
+ int log_byte = 0; \
+ for (i0 = 0; i0 < ELEM_SIZE; i0++) { \
+ log_byte = ((va + i0) <= va_high) && QVAL; \
+ log_bank |= (log_byte << i0); \
+ LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \
+ ELEM_SIZE * IDX + i0); \
+ } \
+ } while (0)
+#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \
+ do { \
+ SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \
+ } while (0)
+#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \
+ do { \
+ SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \
+ } while (0)
+#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+ do { \
+ SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \
+ fGETQBIT(QsV, 2 * IDX + i0), IN); \
+ } while (0)
+#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \
+ do { \
+ SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \
+ fGETQBIT(QsV, 4 * IDX + i0), IN); \
+ } while (0)
+#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \
+ do { \
+ SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \
+ (2 * IDX2 + IDX_H), 1, IN); \
+ } while (0)
+#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \
+ do { \
+ SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \
+ fGETQBIT(QsV, 2 * IDX + i0), IN); \
+ } while (0)
+#define fSTORERELEASE(EA, TYPE) \
+ do { \
+ fV_AL_CHECK(EA, fVECSIZE() - 1); \
+ } while (0)
+#ifdef QEMU_GENERATE
+#define fLOADMMV(EA, DST) gen_vreg_load(ctx, DST##_off, EA, true)
+#endif
+#ifdef QEMU_GENERATE
+#define fLOADMMVU(EA, DST) gen_vreg_load(ctx, DST##_off, EA, false)
+#endif
+#ifdef QEMU_GENERATE
+#define fSTOREMMV(EA, SRC) \
+ gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, true)
+#endif
+#ifdef QEMU_GENERATE
+#define fSTOREMMVQ(EA, SRC, MASK) \
+ gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, false)
+#endif
+#ifdef QEMU_GENERATE
+#define fSTOREMMVNQ(EA, SRC, MASK) \
+ gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, true)
+#endif
+#ifdef QEMU_GENERATE
+#define fSTOREMMVU(EA, SRC) \
+ gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, false)
+#endif
+#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++)
+#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \
+ ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \
+ (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))]
+
+#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V)))
+#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31))
+#define fVUADDSAT(WIDTH, U, V) \
+ fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSADDSAT(WIDTH, U, V) \
+ fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVUSUBSAT(WIDTH, U, V) \
+ fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V))
+#define fVSSUBSAT(WIDTH, U, V) \
+ fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V))
+#define fVAVGU(WIDTH, U, V) \
+ ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGURND(WIDTH, U, V) \
+ ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGU(WIDTH, U, V) \
+ ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGURNDSAT(WIDTH, U, V) \
+ fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \
+ fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVAVGS(WIDTH, U, V) \
+ ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVAVGSRND(WIDTH, U, V) \
+ ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGS(WIDTH, U, V) \
+ ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1)
+#define fVNAVGSRND(WIDTH, U, V) \
+ ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)
+#define fVNAVGSRNDSAT(WIDTH, U, V) \
+ fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \
+ fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1))
+#define fVNOROUND(VAL, SHAMT) VAL
+#define fVNOSAT(VAL) VAL
+#define fVROUND(VAL, SHAMT) \
+ ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0))
+#define fCARRY_FROM_ADD32(A, B, C) \
+ (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1)
+#define fUARCH_NOTE_PUMP_4X()
+#define fUARCH_NOTE_PUMP_2X()
+
+#define IV1DEAD()
+#endif
diff --git a/target/hexagon/mmvec/mmvec.h b/target/hexagon/mmvec/mmvec.h
new file mode 100644
index 000000000..52d470709
--- /dev/null
+++ b/target/hexagon/mmvec/mmvec.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_MMVEC_H
+#define HEXAGON_MMVEC_H
+
+#define MAX_VEC_SIZE_LOGBYTES 7
+#define MAX_VEC_SIZE_BYTES (1 << MAX_VEC_SIZE_LOGBYTES)
+
+#define NUM_VREGS 32
+#define NUM_QREGS 4
+
+typedef uint32_t VRegMask; /* at least NUM_VREGS bits */
+typedef uint32_t QRegMask; /* at least NUM_QREGS bits */
+
+#define VECTOR_SIZE_BYTE (fVECSIZE())
+
+typedef union {
+ uint64_t ud[MAX_VEC_SIZE_BYTES / 8];
+ int64_t d[MAX_VEC_SIZE_BYTES / 8];
+ uint32_t uw[MAX_VEC_SIZE_BYTES / 4];
+ int32_t w[MAX_VEC_SIZE_BYTES / 4];
+ uint16_t uh[MAX_VEC_SIZE_BYTES / 2];
+ int16_t h[MAX_VEC_SIZE_BYTES / 2];
+ uint8_t ub[MAX_VEC_SIZE_BYTES / 1];
+ int8_t b[MAX_VEC_SIZE_BYTES / 1];
+} MMVector;
+
+typedef union {
+ uint64_t ud[2 * MAX_VEC_SIZE_BYTES / 8];
+ int64_t d[2 * MAX_VEC_SIZE_BYTES / 8];
+ uint32_t uw[2 * MAX_VEC_SIZE_BYTES / 4];
+ int32_t w[2 * MAX_VEC_SIZE_BYTES / 4];
+ uint16_t uh[2 * MAX_VEC_SIZE_BYTES / 2];
+ int16_t h[2 * MAX_VEC_SIZE_BYTES / 2];
+ uint8_t ub[2 * MAX_VEC_SIZE_BYTES / 1];
+ int8_t b[2 * MAX_VEC_SIZE_BYTES / 1];
+ MMVector v[2];
+} MMVectorPair;
+
+typedef union {
+ uint64_t ud[MAX_VEC_SIZE_BYTES / 8 / 8];
+ int64_t d[MAX_VEC_SIZE_BYTES / 8 / 8];
+ uint32_t uw[MAX_VEC_SIZE_BYTES / 4 / 8];
+ int32_t w[MAX_VEC_SIZE_BYTES / 4 / 8];
+ uint16_t uh[MAX_VEC_SIZE_BYTES / 2 / 8];
+ int16_t h[MAX_VEC_SIZE_BYTES / 2 / 8];
+ uint8_t ub[MAX_VEC_SIZE_BYTES / 1 / 8];
+ int8_t b[MAX_VEC_SIZE_BYTES / 1 / 8];
+} MMQReg;
+
+typedef struct {
+ MMVector data;
+ DECLARE_BITMAP(mask, MAX_VEC_SIZE_BYTES);
+ target_ulong va[MAX_VEC_SIZE_BYTES];
+ bool op;
+ int op_size;
+} VTCMStoreLog;
+
+
+/* Types of vector register assignment */
+typedef enum {
+ EXT_DFL, /* Default */
+ EXT_NEW, /* New - value used in the same packet */
+ EXT_TMP /* Temp - value used but not stored to register */
+} VRegWriteType;
+
+#endif
diff --git a/target/hexagon/mmvec/system_ext_mmvec.c b/target/hexagon/mmvec/system_ext_mmvec.c
new file mode 100644
index 000000000..8351f2cc0
--- /dev/null
+++ b/target/hexagon/mmvec/system_ext_mmvec.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "mmvec/system_ext_mmvec.h"
+
+void mem_gather_store(CPUHexagonState *env, target_ulong vaddr, int slot)
+{
+ size_t size = sizeof(MMVector);
+
+ env->vstore_pending[slot] = 1;
+ env->vstore[slot].va = vaddr;
+ env->vstore[slot].size = size;
+ memcpy(&env->vstore[slot].data.ub[0], &env->tmp_VRegs[0], size);
+
+ /* On a gather store, overwrite the store mask to emulate dropped gathers */
+ bitmap_copy(env->vstore[slot].mask, env->vtcm_log.mask, size);
+}
+
+void mem_vector_scatter_init(CPUHexagonState *env)
+{
+ bitmap_zero(env->vtcm_log.mask, MAX_VEC_SIZE_BYTES);
+
+ env->vtcm_pending = true;
+ env->vtcm_log.op = false;
+ env->vtcm_log.op_size = 0;
+}
+
+void mem_vector_gather_init(CPUHexagonState *env)
+{
+ bitmap_zero(env->vtcm_log.mask, MAX_VEC_SIZE_BYTES);
+}
diff --git a/target/hexagon/mmvec/system_ext_mmvec.h b/target/hexagon/mmvec/system_ext_mmvec.h
new file mode 100644
index 000000000..bcefbffdf
--- /dev/null
+++ b/target/hexagon/mmvec/system_ext_mmvec.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_SYSTEM_EXT_MMVEC_H
+#define HEXAGON_SYSTEM_EXT_MMVEC_H
+
+void mem_gather_store(CPUHexagonState *env, target_ulong vaddr, int slot);
+void mem_vector_scatter_init(CPUHexagonState *env);
+void mem_vector_gather_init(CPUHexagonState *env);
+
+#endif
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
new file mode 100644
index 000000000..057baf9a4
--- /dev/null
+++ b/target/hexagon/op_helper.c
@@ -0,0 +1,1475 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "cpu.h"
+#include "internal.h"
+#include "macros.h"
+#include "arch.h"
+#include "hex_arch_types.h"
+#include "fma_emu.h"
+#include "mmvec/mmvec.h"
+#include "mmvec/macros.h"
+
+#define SF_BIAS 127
+#define SF_MANTBITS 23
+
+/* Exceptions processing helpers */
+static void QEMU_NORETURN do_raise_exception_err(CPUHexagonState *env,
+ uint32_t exception,
+ uintptr_t pc)
+{
+ CPUState *cs = env_cpu(env);
+ qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
+ cs->exception_index = exception;
+ cpu_loop_exit_restore(cs, pc);
+}
+
+void QEMU_NORETURN HELPER(raise_exception)(CPUHexagonState *env, uint32_t excp)
+{
+ do_raise_exception_err(env, excp, 0);
+}
+
+static void log_reg_write(CPUHexagonState *env, int rnum,
+ target_ulong val, uint32_t slot)
+{
+ HEX_DEBUG_LOG("log_reg_write[%d] = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")",
+ rnum, val, val);
+ if (val == env->gpr[rnum]) {
+ HEX_DEBUG_LOG(" NO CHANGE");
+ }
+ HEX_DEBUG_LOG("\n");
+
+ env->new_value[rnum] = val;
+ if (HEX_DEBUG) {
+ /* Do this so HELPER(debug_commit_end) will know */
+ env->reg_written[rnum] = 1;
+ }
+}
+
+static void log_pred_write(CPUHexagonState *env, int pnum, target_ulong val)
+{
+ HEX_DEBUG_LOG("log_pred_write[%d] = " TARGET_FMT_ld
+ " (0x" TARGET_FMT_lx ")\n",
+ pnum, val, val);
+
+ /* Multiple writes to the same preg are and'ed together */
+ if (env->pred_written & (1 << pnum)) {
+ env->new_pred_value[pnum] &= val & 0xff;
+ } else {
+ env->new_pred_value[pnum] = val & 0xff;
+ env->pred_written |= 1 << pnum;
+ }
+}
+
+static void log_store32(CPUHexagonState *env, target_ulong addr,
+ target_ulong val, int width, int slot)
+{
+ HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx
+ ", %" PRId32 " [0x08%" PRIx32 "])\n",
+ width, addr, val, val);
+ env->mem_log_stores[slot].va = addr;
+ env->mem_log_stores[slot].width = width;
+ env->mem_log_stores[slot].data32 = val;
+}
+
+static void log_store64(CPUHexagonState *env, target_ulong addr,
+ int64_t val, int width, int slot)
+{
+ HEX_DEBUG_LOG("log_store%d(0x" TARGET_FMT_lx
+ ", %" PRId64 " [0x016%" PRIx64 "])\n",
+ width, addr, val, val);
+ env->mem_log_stores[slot].va = addr;
+ env->mem_log_stores[slot].width = width;
+ env->mem_log_stores[slot].data64 = val;
+}
+
+static void write_new_pc(CPUHexagonState *env, target_ulong addr)
+{
+ HEX_DEBUG_LOG("write_new_pc(0x" TARGET_FMT_lx ")\n", addr);
+
+ /*
+ * If more than one branch is taken in a packet, only the first one
+ * is actually done.
+ */
+ if (env->branch_taken) {
+ HEX_DEBUG_LOG("INFO: multiple branches taken in same packet, "
+ "ignoring the second one\n");
+ } else {
+ fCHECK_PCALIGN(addr);
+ env->branch_taken = 1;
+ env->next_PC = addr;
+ }
+}
+
+/* Handy place to set a breakpoint */
+void HELPER(debug_start_packet)(CPUHexagonState *env)
+{
+ HEX_DEBUG_LOG("Start packet: pc = 0x" TARGET_FMT_lx "\n",
+ env->gpr[HEX_REG_PC]);
+
+ for (int i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
+ env->reg_written[i] = 0;
+ }
+}
+
+/* Checks for bookkeeping errors between disassembly context and runtime */
+void HELPER(debug_check_store_width)(CPUHexagonState *env, int slot, int check)
+{
+ if (env->mem_log_stores[slot].width != check) {
+ HEX_DEBUG_LOG("ERROR: %d != %d\n",
+ env->mem_log_stores[slot].width, check);
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(commit_store)(CPUHexagonState *env, int slot_num)
+{
+ uintptr_t ra = GETPC();
+ uint8_t width = env->mem_log_stores[slot_num].width;
+ target_ulong va = env->mem_log_stores[slot_num].va;
+
+ switch (width) {
+ case 1:
+ cpu_stb_data_ra(env, va, env->mem_log_stores[slot_num].data32, ra);
+ break;
+ case 2:
+ cpu_stw_data_ra(env, va, env->mem_log_stores[slot_num].data32, ra);
+ break;
+ case 4:
+ cpu_stl_data_ra(env, va, env->mem_log_stores[slot_num].data32, ra);
+ break;
+ case 8:
+ cpu_stq_data_ra(env, va, env->mem_log_stores[slot_num].data64, ra);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(gather_store)(CPUHexagonState *env, uint32_t addr, int slot)
+{
+ mem_gather_store(env, addr, slot);
+}
+
+void HELPER(commit_hvx_stores)(CPUHexagonState *env)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ /* Normal (possibly masked) vector store */
+ for (i = 0; i < VSTORES_MAX; i++) {
+ if (env->vstore_pending[i]) {
+ env->vstore_pending[i] = 0;
+ target_ulong va = env->vstore[i].va;
+ int size = env->vstore[i].size;
+ for (int j = 0; j < size; j++) {
+ if (test_bit(j, env->vstore[i].mask)) {
+ cpu_stb_data_ra(env, va + j, env->vstore[i].data.ub[j], ra);
+ }
+ }
+ }
+ }
+
+ /* Scatter store */
+ if (env->vtcm_pending) {
+ env->vtcm_pending = false;
+ if (env->vtcm_log.op) {
+ /* Need to perform the scatter read/modify/write at commit time */
+ if (env->vtcm_log.op_size == 2) {
+ SCATTER_OP_WRITE_TO_MEM(uint16_t);
+ } else if (env->vtcm_log.op_size == 4) {
+ /* Word Scatter += */
+ SCATTER_OP_WRITE_TO_MEM(uint32_t);
+ } else {
+ g_assert_not_reached();
+ }
+ } else {
+ for (i = 0; i < sizeof(MMVector); i++) {
+ if (test_bit(i, env->vtcm_log.mask)) {
+ cpu_stb_data_ra(env, env->vtcm_log.va[i],
+ env->vtcm_log.data.ub[i], ra);
+ clear_bit(i, env->vtcm_log.mask);
+ env->vtcm_log.data.ub[i] = 0;
+ }
+
+ }
+ }
+ }
+}
+
+static void print_store(CPUHexagonState *env, int slot)
+{
+ if (!(env->slot_cancelled & (1 << slot))) {
+ uint8_t width = env->mem_log_stores[slot].width;
+ if (width == 1) {
+ uint32_t data = env->mem_log_stores[slot].data32 & 0xff;
+ HEX_DEBUG_LOG("\tmemb[0x" TARGET_FMT_lx "] = %" PRId32
+ " (0x%02" PRIx32 ")\n",
+ env->mem_log_stores[slot].va, data, data);
+ } else if (width == 2) {
+ uint32_t data = env->mem_log_stores[slot].data32 & 0xffff;
+ HEX_DEBUG_LOG("\tmemh[0x" TARGET_FMT_lx "] = %" PRId32
+ " (0x%04" PRIx32 ")\n",
+ env->mem_log_stores[slot].va, data, data);
+ } else if (width == 4) {
+ uint32_t data = env->mem_log_stores[slot].data32;
+ HEX_DEBUG_LOG("\tmemw[0x" TARGET_FMT_lx "] = %" PRId32
+ " (0x%08" PRIx32 ")\n",
+ env->mem_log_stores[slot].va, data, data);
+ } else if (width == 8) {
+ HEX_DEBUG_LOG("\tmemd[0x" TARGET_FMT_lx "] = %" PRId64
+ " (0x%016" PRIx64 ")\n",
+ env->mem_log_stores[slot].va,
+ env->mem_log_stores[slot].data64,
+ env->mem_log_stores[slot].data64);
+ } else {
+ HEX_DEBUG_LOG("\tBad store width %d\n", width);
+ g_assert_not_reached();
+ }
+ }
+}
+
+/* This function is a handy place to set a breakpoint */
+void HELPER(debug_commit_end)(CPUHexagonState *env, int has_st0, int has_st1)
+{
+ bool reg_printed = false;
+ bool pred_printed = false;
+ int i;
+
+ HEX_DEBUG_LOG("Packet committed: pc = 0x" TARGET_FMT_lx "\n",
+ env->this_PC);
+ HEX_DEBUG_LOG("slot_cancelled = %d\n", env->slot_cancelled);
+
+ for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
+ if (env->reg_written[i]) {
+ if (!reg_printed) {
+ HEX_DEBUG_LOG("Regs written\n");
+ reg_printed = true;
+ }
+ HEX_DEBUG_LOG("\tr%d = " TARGET_FMT_ld " (0x" TARGET_FMT_lx ")\n",
+ i, env->new_value[i], env->new_value[i]);
+ }
+ }
+
+ for (i = 0; i < NUM_PREGS; i++) {
+ if (env->pred_written & (1 << i)) {
+ if (!pred_printed) {
+ HEX_DEBUG_LOG("Predicates written\n");
+ pred_printed = true;
+ }
+ HEX_DEBUG_LOG("\tp%d = 0x" TARGET_FMT_lx "\n",
+ i, env->new_pred_value[i]);
+ }
+ }
+
+ if (has_st0 || has_st1) {
+ HEX_DEBUG_LOG("Stores\n");
+ if (has_st0) {
+ print_store(env, 0);
+ }
+ if (has_st1) {
+ print_store(env, 1);
+ }
+ }
+
+ HEX_DEBUG_LOG("Next PC = " TARGET_FMT_lx "\n", env->next_PC);
+ HEX_DEBUG_LOG("Exec counters: pkt = " TARGET_FMT_lx
+ ", insn = " TARGET_FMT_lx
+ ", hvx = " TARGET_FMT_lx "\n",
+ env->gpr[HEX_REG_QEMU_PKT_CNT],
+ env->gpr[HEX_REG_QEMU_INSN_CNT],
+ env->gpr[HEX_REG_QEMU_HVX_CNT]);
+
+}
+
+int32_t HELPER(fcircadd)(int32_t RxV, int32_t offset, int32_t M, int32_t CS)
+{
+ int32_t K_const = sextract32(M, 24, 4);
+ int32_t length = sextract32(M, 0, 17);
+ uint32_t new_ptr = RxV + offset;
+ uint32_t start_addr;
+ uint32_t end_addr;
+
+ if (K_const == 0 && length >= 4) {
+ start_addr = CS;
+ end_addr = start_addr + length;
+ } else {
+ /*
+ * Versions v3 and earlier used the K value to specify a power-of-2 size
+ * 2^(K+2) that is greater than the buffer length
+ */
+ int32_t mask = (1 << (K_const + 2)) - 1;
+ start_addr = RxV & (~mask);
+ end_addr = start_addr | length;
+ }
+
+ if (new_ptr >= end_addr) {
+ new_ptr -= length;
+ } else if (new_ptr < start_addr) {
+ new_ptr += length;
+ }
+
+ return new_ptr;
+}
+
+uint32_t HELPER(fbrev)(uint32_t addr)
+{
+ /*
+ * Bit reverse the low 16 bits of the address
+ */
+ return deposit32(addr, 0, 16, revbit16(addr));
+}
+
+static float32 build_float32(uint8_t sign, uint32_t exp, uint32_t mant)
+{
+ return make_float32(
+ ((sign & 1) << 31) |
+ ((exp & 0xff) << SF_MANTBITS) |
+ (mant & ((1 << SF_MANTBITS) - 1)));
+}
+
+/*
+ * sfrecipa, sfinvsqrta have two 32-bit results
+ * r0,p0=sfrecipa(r1,r2)
+ * r0,p0=sfinvsqrta(r1)
+ *
+ * Since helpers can only return a single value, we pack the two results
+ * into a 64-bit value.
+ */
+uint64_t HELPER(sfrecipa)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ int32_t PeV = 0;
+ float32 RdV;
+ int idx;
+ int adjust;
+ int mant;
+ int exp;
+
+ arch_fpop_start(env);
+ if (arch_sf_recip_common(&RsV, &RtV, &RdV, &adjust, &env->fp_status)) {
+ PeV = adjust;
+ idx = (RtV >> 16) & 0x7f;
+ mant = (recip_lookup_table[idx] << 15) | 1;
+ exp = SF_BIAS - (float32_getexp(RtV) - SF_BIAS) - 1;
+ RdV = build_float32(extract32(RtV, 31, 1), exp, mant);
+ }
+ arch_fpop_end(env);
+ return ((uint64_t)RdV << 32) | PeV;
+}
+
+uint64_t HELPER(sfinvsqrta)(CPUHexagonState *env, float32 RsV)
+{
+ int PeV = 0;
+ float32 RdV;
+ int idx;
+ int adjust;
+ int mant;
+ int exp;
+
+ arch_fpop_start(env);
+ if (arch_sf_invsqrt_common(&RsV, &RdV, &adjust, &env->fp_status)) {
+ PeV = adjust;
+ idx = (RsV >> 17) & 0x7f;
+ mant = (invsqrt_lookup_table[idx] << 15);
+ exp = SF_BIAS - ((float32_getexp(RsV) - SF_BIAS) >> 1) - 1;
+ RdV = build_float32(extract32(RsV, 31, 1), exp, mant);
+ }
+ arch_fpop_end(env);
+ return ((uint64_t)RdV << 32) | PeV;
+}
+
+int64_t HELPER(vacsh_val)(CPUHexagonState *env,
+ int64_t RxxV, int64_t RssV, int64_t RttV)
+{
+ for (int i = 0; i < 4; i++) {
+ int xv = sextract64(RxxV, i * 16, 16);
+ int sv = sextract64(RssV, i * 16, 16);
+ int tv = sextract64(RttV, i * 16, 16);
+ int max;
+ xv = xv + tv;
+ sv = sv - tv;
+ max = xv > sv ? xv : sv;
+ /* Note that fSATH can set the OVF bit in usr */
+ RxxV = deposit64(RxxV, i * 16, 16, fSATH(max));
+ }
+ return RxxV;
+}
+
+int32_t HELPER(vacsh_pred)(CPUHexagonState *env,
+ int64_t RxxV, int64_t RssV, int64_t RttV)
+{
+ int32_t PeV = 0;
+ for (int i = 0; i < 4; i++) {
+ int xv = sextract64(RxxV, i * 16, 16);
+ int sv = sextract64(RssV, i * 16, 16);
+ int tv = sextract64(RttV, i * 16, 16);
+ xv = xv + tv;
+ sv = sv - tv;
+ PeV = deposit32(PeV, i * 2, 1, (xv > sv));
+ PeV = deposit32(PeV, i * 2 + 1, 1, (xv > sv));
+ }
+ return PeV;
+}
+
+static void probe_store(CPUHexagonState *env, int slot, int mmu_idx)
+{
+ if (!(env->slot_cancelled & (1 << slot))) {
+ size1u_t width = env->mem_log_stores[slot].width;
+ target_ulong va = env->mem_log_stores[slot].va;
+ uintptr_t ra = GETPC();
+ probe_write(env, va, width, mmu_idx, ra);
+ }
+}
+
+/* Called during packet commit when there are two scalar stores */
+void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int mmu_idx)
+{
+ probe_store(env, 0, mmu_idx);
+}
+
+void HELPER(probe_hvx_stores)(CPUHexagonState *env, int mmu_idx)
+{
+ uintptr_t retaddr = GETPC();
+ int i;
+
+ /* Normal (possibly masked) vector store */
+ for (i = 0; i < VSTORES_MAX; i++) {
+ if (env->vstore_pending[i]) {
+ target_ulong va = env->vstore[i].va;
+ int size = env->vstore[i].size;
+ for (int j = 0; j < size; j++) {
+ if (test_bit(j, env->vstore[i].mask)) {
+ probe_write(env, va + j, 1, mmu_idx, retaddr);
+ }
+ }
+ }
+ }
+
+ /* Scatter store */
+ if (env->vtcm_pending) {
+ if (env->vtcm_log.op) {
+ /* Need to perform the scatter read/modify/write at commit time */
+ if (env->vtcm_log.op_size == 2) {
+ SCATTER_OP_PROBE_MEM(size2u_t, mmu_idx, retaddr);
+ } else if (env->vtcm_log.op_size == 4) {
+ /* Word Scatter += */
+ SCATTER_OP_PROBE_MEM(size4u_t, mmu_idx, retaddr);
+ } else {
+ g_assert_not_reached();
+ }
+ } else {
+ for (int i = 0; i < sizeof(MMVector); i++) {
+ if (test_bit(i, env->vtcm_log.mask)) {
+ probe_write(env, env->vtcm_log.va[i], 1, mmu_idx, retaddr);
+ }
+
+ }
+ }
+ }
+}
+
+void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask,
+ int mmu_idx)
+{
+ bool has_st0 = (mask >> 0) & 1;
+ bool has_st1 = (mask >> 1) & 1;
+ bool has_hvx_stores = (mask >> 2) & 1;
+
+ if (has_st0) {
+ probe_store(env, 0, mmu_idx);
+ }
+ if (has_st1) {
+ probe_store(env, 1, mmu_idx);
+ }
+ if (has_hvx_stores) {
+ HELPER(probe_hvx_stores)(env, mmu_idx);
+ }
+}
+
+/*
+ * mem_noshuf
+ * Section 5.5 of the Hexagon V67 Programmer's Reference Manual
+ *
+ * If the load is in slot 0 and there is a store in slot1 (that
+ * wasn't cancelled), we have to do the store first.
+ */
+static void check_noshuf(CPUHexagonState *env, uint32_t slot)
+{
+ if (slot == 0 && env->pkt_has_store_s1 &&
+ ((env->slot_cancelled & (1 << 1)) == 0)) {
+ HELPER(commit_store)(env, 1);
+ }
+}
+
+static uint8_t mem_load1(CPUHexagonState *env, uint32_t slot,
+ target_ulong vaddr)
+{
+ uintptr_t ra = GETPC();
+ check_noshuf(env, slot);
+ return cpu_ldub_data_ra(env, vaddr, ra);
+}
+
+static uint16_t mem_load2(CPUHexagonState *env, uint32_t slot,
+ target_ulong vaddr)
+{
+ uintptr_t ra = GETPC();
+ check_noshuf(env, slot);
+ return cpu_lduw_data_ra(env, vaddr, ra);
+}
+
+static uint32_t mem_load4(CPUHexagonState *env, uint32_t slot,
+ target_ulong vaddr)
+{
+ uintptr_t ra = GETPC();
+ check_noshuf(env, slot);
+ return cpu_ldl_data_ra(env, vaddr, ra);
+}
+
+static uint64_t mem_load8(CPUHexagonState *env, uint32_t slot,
+ target_ulong vaddr)
+{
+ uintptr_t ra = GETPC();
+ check_noshuf(env, slot);
+ return cpu_ldq_data_ra(env, vaddr, ra);
+}
+
+/* Floating point */
+float64 HELPER(conv_sf2df)(CPUHexagonState *env, float32 RsV)
+{
+ float64 out_f64;
+ arch_fpop_start(env);
+ out_f64 = float32_to_float64(RsV, &env->fp_status);
+ arch_fpop_end(env);
+ return out_f64;
+}
+
+float32 HELPER(conv_df2sf)(CPUHexagonState *env, float64 RssV)
+{
+ float32 out_f32;
+ arch_fpop_start(env);
+ out_f32 = float64_to_float32(RssV, &env->fp_status);
+ arch_fpop_end(env);
+ return out_f32;
+}
+
+float32 HELPER(conv_uw2sf)(CPUHexagonState *env, int32_t RsV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = uint32_to_float32(RsV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float64 HELPER(conv_uw2df)(CPUHexagonState *env, int32_t RsV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = uint32_to_float64(RsV, &env->fp_status);
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float32 HELPER(conv_w2sf)(CPUHexagonState *env, int32_t RsV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = int32_to_float32(RsV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float64 HELPER(conv_w2df)(CPUHexagonState *env, int32_t RsV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = int32_to_float64(RsV, &env->fp_status);
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float32 HELPER(conv_ud2sf)(CPUHexagonState *env, int64_t RssV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = uint64_to_float32(RssV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float64 HELPER(conv_ud2df)(CPUHexagonState *env, int64_t RssV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = uint64_to_float64(RssV, &env->fp_status);
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float32 HELPER(conv_d2sf)(CPUHexagonState *env, int64_t RssV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = int64_to_float32(RssV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float64 HELPER(conv_d2df)(CPUHexagonState *env, int64_t RssV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = int64_to_float64(RssV, &env->fp_status);
+ arch_fpop_end(env);
+ return RddV;
+}
+
+uint32_t HELPER(conv_sf2uw)(CPUHexagonState *env, float32 RsV)
+{
+ uint32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = 0;
+ } else {
+ RdV = float32_to_uint32(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+int32_t HELPER(conv_sf2w)(CPUHexagonState *env, float32 RsV)
+{
+ int32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = -1;
+ } else {
+ RdV = float32_to_int32(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+uint64_t HELPER(conv_sf2ud)(CPUHexagonState *env, float32 RsV)
+{
+ uint64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = 0;
+ } else {
+ RddV = float32_to_uint64(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+int64_t HELPER(conv_sf2d)(CPUHexagonState *env, float32 RsV)
+{
+ int64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = -1;
+ } else {
+ RddV = float32_to_int64(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+uint32_t HELPER(conv_df2uw)(CPUHexagonState *env, float64 RssV)
+{
+ uint32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = 0;
+ } else {
+ RdV = float64_to_uint32(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+int32_t HELPER(conv_df2w)(CPUHexagonState *env, float64 RssV)
+{
+ int32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = -1;
+ } else {
+ RdV = float64_to_int32(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+uint64_t HELPER(conv_df2ud)(CPUHexagonState *env, float64 RssV)
+{
+ uint64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = 0;
+ } else {
+ RddV = float64_to_uint64(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+int64_t HELPER(conv_df2d)(CPUHexagonState *env, float64 RssV)
+{
+ int64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = -1;
+ } else {
+ RddV = float64_to_int64(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+uint32_t HELPER(conv_sf2uw_chop)(CPUHexagonState *env, float32 RsV)
+{
+ uint32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = 0;
+ } else {
+ RdV = float32_to_uint32_round_to_zero(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+int32_t HELPER(conv_sf2w_chop)(CPUHexagonState *env, float32 RsV)
+{
+ int32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = -1;
+ } else {
+ RdV = float32_to_int32_round_to_zero(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+uint64_t HELPER(conv_sf2ud_chop)(CPUHexagonState *env, float32 RsV)
+{
+ uint64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float32_is_neg(RsV) && !float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = 0;
+ } else {
+ RddV = float32_to_uint64_round_to_zero(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+int64_t HELPER(conv_sf2d_chop)(CPUHexagonState *env, float32 RsV)
+{
+ int64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float32_is_any_nan(RsV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = -1;
+ } else {
+ RddV = float32_to_int64_round_to_zero(RsV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+uint32_t HELPER(conv_df2uw_chop)(CPUHexagonState *env, float64 RssV)
+{
+ uint32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float64_is_neg(RssV) && !float32_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = 0;
+ } else {
+ RdV = float64_to_uint32_round_to_zero(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+int32_t HELPER(conv_df2w_chop)(CPUHexagonState *env, float64 RssV)
+{
+ int32_t RdV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RdV = -1;
+ } else {
+ RdV = float64_to_int32_round_to_zero(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RdV;
+}
+
+uint64_t HELPER(conv_df2ud_chop)(CPUHexagonState *env, float64 RssV)
+{
+ uint64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon checks the sign before rounding */
+ if (float64_is_neg(RssV) && !float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = 0;
+ } else {
+ RddV = float64_to_uint64_round_to_zero(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+int64_t HELPER(conv_df2d_chop)(CPUHexagonState *env, float64 RssV)
+{
+ int64_t RddV;
+ arch_fpop_start(env);
+ /* Hexagon returns -1 for NaN */
+ if (float64_is_any_nan(RssV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ RddV = -1;
+ } else {
+ RddV = float64_to_int64_round_to_zero(RssV, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float32 HELPER(sfadd)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = float32_add(RsV, RtV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float32 HELPER(sfsub)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = float32_sub(RsV, RtV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+int32_t HELPER(sfcmpeq)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ int32_t PdV;
+ arch_fpop_start(env);
+ PdV = f8BITSOF(float32_eq_quiet(RsV, RtV, &env->fp_status));
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(sfcmpgt)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ int cmp;
+ int32_t PdV;
+ arch_fpop_start(env);
+ cmp = float32_compare_quiet(RsV, RtV, &env->fp_status);
+ PdV = f8BITSOF(cmp == float_relation_greater);
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(sfcmpge)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ int cmp;
+ int32_t PdV;
+ arch_fpop_start(env);
+ cmp = float32_compare_quiet(RsV, RtV, &env->fp_status);
+ PdV = f8BITSOF(cmp == float_relation_greater ||
+ cmp == float_relation_equal);
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(sfcmpuo)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ int32_t PdV;
+ arch_fpop_start(env);
+ PdV = f8BITSOF(float32_is_any_nan(RsV) ||
+ float32_is_any_nan(RtV));
+ arch_fpop_end(env);
+ return PdV;
+}
+
+float32 HELPER(sfmax)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = float32_maxnum(RsV, RtV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float32 HELPER(sfmin)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = float32_minnum(RsV, RtV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+int32_t HELPER(sfclass)(CPUHexagonState *env, float32 RsV, int32_t uiV)
+{
+ int32_t PdV = 0;
+ arch_fpop_start(env);
+ if (fGETBIT(0, uiV) && float32_is_zero(RsV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(1, uiV) && float32_is_normal(RsV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(2, uiV) && float32_is_denormal(RsV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(3, uiV) && float32_is_infinity(RsV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(4, uiV) && float32_is_any_nan(RsV)) {
+ PdV = 0xff;
+ }
+ set_float_exception_flags(0, &env->fp_status);
+ arch_fpop_end(env);
+ return PdV;
+}
+
+float32 HELPER(sffixupn)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV = 0;
+ int adjust;
+ arch_fpop_start(env);
+ arch_sf_recip_common(&RsV, &RtV, &RdV, &adjust, &env->fp_status);
+ RdV = RsV;
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float32 HELPER(sffixupd)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV = 0;
+ int adjust;
+ arch_fpop_start(env);
+ arch_sf_recip_common(&RsV, &RtV, &RdV, &adjust, &env->fp_status);
+ RdV = RtV;
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float32 HELPER(sffixupr)(CPUHexagonState *env, float32 RsV)
+{
+ float32 RdV = 0;
+ int adjust;
+ arch_fpop_start(env);
+ arch_sf_invsqrt_common(&RsV, &RdV, &adjust, &env->fp_status);
+ RdV = RsV;
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float64 HELPER(dfadd)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = float64_add(RssV, RttV, &env->fp_status);
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float64 HELPER(dfsub)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = float64_sub(RssV, RttV, &env->fp_status);
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float64 HELPER(dfmax)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = float64_maxnum(RssV, RttV, &env->fp_status);
+ if (float64_is_any_nan(RssV) || float64_is_any_nan(RttV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float64 HELPER(dfmin)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ float64 RddV;
+ arch_fpop_start(env);
+ RddV = float64_minnum(RssV, RttV, &env->fp_status);
+ if (float64_is_any_nan(RssV) || float64_is_any_nan(RttV)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+int32_t HELPER(dfcmpeq)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ int32_t PdV;
+ arch_fpop_start(env);
+ PdV = f8BITSOF(float64_eq_quiet(RssV, RttV, &env->fp_status));
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(dfcmpgt)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ int cmp;
+ int32_t PdV;
+ arch_fpop_start(env);
+ cmp = float64_compare_quiet(RssV, RttV, &env->fp_status);
+ PdV = f8BITSOF(cmp == float_relation_greater);
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(dfcmpge)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ int cmp;
+ int32_t PdV;
+ arch_fpop_start(env);
+ cmp = float64_compare_quiet(RssV, RttV, &env->fp_status);
+ PdV = f8BITSOF(cmp == float_relation_greater ||
+ cmp == float_relation_equal);
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(dfcmpuo)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ int32_t PdV;
+ arch_fpop_start(env);
+ PdV = f8BITSOF(float64_is_any_nan(RssV) ||
+ float64_is_any_nan(RttV));
+ arch_fpop_end(env);
+ return PdV;
+}
+
+int32_t HELPER(dfclass)(CPUHexagonState *env, float64 RssV, int32_t uiV)
+{
+ int32_t PdV = 0;
+ arch_fpop_start(env);
+ if (fGETBIT(0, uiV) && float64_is_zero(RssV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(1, uiV) && float64_is_normal(RssV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(2, uiV) && float64_is_denormal(RssV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(3, uiV) && float64_is_infinity(RssV)) {
+ PdV = 0xff;
+ }
+ if (fGETBIT(4, uiV) && float64_is_any_nan(RssV)) {
+ PdV = 0xff;
+ }
+ set_float_exception_flags(0, &env->fp_status);
+ arch_fpop_end(env);
+ return PdV;
+}
+
+float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
+{
+ float32 RdV;
+ arch_fpop_start(env);
+ RdV = internal_mpyf(RsV, RtV, &env->fp_status);
+ arch_fpop_end(env);
+ return RdV;
+}
+
+float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV)
+{
+ arch_fpop_start(env);
+ RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
+ arch_fpop_end(env);
+ return RxV;
+}
+
+static bool is_zero_prod(float32 a, float32 b)
+{
+ return ((float32_is_zero(a) && is_finite(b)) ||
+ (float32_is_zero(b) && is_finite(a)));
+}
+
+static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
+{
+ float32 ret = dst;
+ if (float32_is_any_nan(x)) {
+ if (extract32(x, 22, 1) == 0) {
+ float_raise(float_flag_invalid, fp_status);
+ }
+ ret = make_float32(0xffffffff); /* nan */
+ }
+ return ret;
+}
+
+float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV, float32 PuV)
+{
+ size4s_t tmp;
+ arch_fpop_start(env);
+ RxV = check_nan(RxV, RxV, &env->fp_status);
+ RxV = check_nan(RxV, RsV, &env->fp_status);
+ RxV = check_nan(RxV, RtV, &env->fp_status);
+ tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
+ if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
+ RxV = tmp;
+ }
+ arch_fpop_end(env);
+ return RxV;
+}
+
+float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV)
+{
+ float32 neg_RsV;
+ arch_fpop_start(env);
+ neg_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
+ RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
+ arch_fpop_end(env);
+ return RxV;
+}
+
+static bool is_inf_prod(int32_t a, int32_t b)
+{
+ return (float32_is_infinity(a) && float32_is_infinity(b)) ||
+ (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
+ (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
+}
+
+float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV)
+{
+ bool infinp;
+ bool infminusinf;
+ float32 tmp;
+
+ arch_fpop_start(env);
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ infminusinf = float32_is_infinity(RxV) &&
+ is_inf_prod(RsV, RtV) &&
+ (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
+ infinp = float32_is_infinity(RxV) ||
+ float32_is_infinity(RtV) ||
+ float32_is_infinity(RsV);
+ RxV = check_nan(RxV, RxV, &env->fp_status);
+ RxV = check_nan(RxV, RsV, &env->fp_status);
+ RxV = check_nan(RxV, RtV, &env->fp_status);
+ tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
+ if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
+ RxV = tmp;
+ }
+ set_float_exception_flags(0, &env->fp_status);
+ if (float32_is_infinity(RxV) && !infinp) {
+ RxV = RxV - 1;
+ }
+ if (infminusinf) {
+ RxV = 0;
+ }
+ arch_fpop_end(env);
+ return RxV;
+}
+
+float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
+ float32 RsV, float32 RtV)
+{
+ bool infinp;
+ bool infminusinf;
+ float32 tmp;
+
+ arch_fpop_start(env);
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ infminusinf = float32_is_infinity(RxV) &&
+ is_inf_prod(RsV, RtV) &&
+ (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
+ infinp = float32_is_infinity(RxV) ||
+ float32_is_infinity(RtV) ||
+ float32_is_infinity(RsV);
+ RxV = check_nan(RxV, RxV, &env->fp_status);
+ RxV = check_nan(RxV, RsV, &env->fp_status);
+ RxV = check_nan(RxV, RtV, &env->fp_status);
+ float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
+ tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
+ if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
+ RxV = tmp;
+ }
+ set_float_exception_flags(0, &env->fp_status);
+ if (float32_is_infinity(RxV) && !infinp) {
+ RxV = RxV - 1;
+ }
+ if (infminusinf) {
+ RxV = 0;
+ }
+ arch_fpop_end(env);
+ return RxV;
+}
+
+float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
+{
+ int64_t RddV;
+ arch_fpop_start(env);
+ if (float64_is_denormal(RssV) &&
+ (float64_getexp(RttV) >= 512) &&
+ float64_is_normal(RttV)) {
+ RddV = float64_mul(RssV, make_float64(0x4330000000000000),
+ &env->fp_status);
+ } else if (float64_is_denormal(RttV) &&
+ (float64_getexp(RssV) >= 512) &&
+ float64_is_normal(RssV)) {
+ RddV = float64_mul(RssV, make_float64(0x3cb0000000000000),
+ &env->fp_status);
+ } else {
+ RddV = RssV;
+ }
+ arch_fpop_end(env);
+ return RddV;
+}
+
+float64 HELPER(dfmpyhh)(CPUHexagonState *env, float64 RxxV,
+ float64 RssV, float64 RttV)
+{
+ arch_fpop_start(env);
+ RxxV = internal_mpyhh(RssV, RttV, RxxV, &env->fp_status);
+ arch_fpop_end(env);
+ return RxxV;
+}
+
+/* Histogram instructions */
+
+void HELPER(vhist)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int lane = 0; lane < 8; lane++) {
+ for (int i = 0; i < sizeof(MMVector) / 8; ++i) {
+ unsigned char value = input->ub[(sizeof(MMVector) / 8) * lane + i];
+ unsigned char regno = value >> 3;
+ unsigned char element = value & 7;
+
+ env->VRegs[regno].uh[(sizeof(MMVector) / 16) * lane + element]++;
+ }
+ }
+}
+
+void HELPER(vhistq)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int lane = 0; lane < 8; lane++) {
+ for (int i = 0; i < sizeof(MMVector) / 8; ++i) {
+ unsigned char value = input->ub[(sizeof(MMVector) / 8) * lane + i];
+ unsigned char regno = value >> 3;
+ unsigned char element = value & 7;
+
+ if (fGETQBIT(env->qtmp, sizeof(MMVector) / 8 * lane + i)) {
+ env->VRegs[regno].uh[
+ (sizeof(MMVector) / 16) * lane + element]++;
+ }
+ }
+ }
+}
+
+void HELPER(vwhist256)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7);
+
+ env->VRegs[vindex].uh[elindex] =
+ env->VRegs[vindex].uh[elindex] + weight;
+ }
+}
+
+void HELPER(vwhist256q)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7);
+
+ if (fGETQBIT(env->qtmp, 2 * i)) {
+ env->VRegs[vindex].uh[elindex] =
+ env->VRegs[vindex].uh[elindex] + weight;
+ }
+ }
+}
+
+void HELPER(vwhist256_sat)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7);
+
+ env->VRegs[vindex].uh[elindex] =
+ fVSATUH(env->VRegs[vindex].uh[elindex] + weight);
+ }
+}
+
+void HELPER(vwhist256q_sat)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7);
+
+ if (fGETQBIT(env->qtmp, 2 * i)) {
+ env->VRegs[vindex].uh[elindex] =
+ fVSATUH(env->VRegs[vindex].uh[elindex] + weight);
+ }
+ }
+}
+
+void HELPER(vwhist128)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3);
+
+ env->VRegs[vindex].uw[elindex] =
+ env->VRegs[vindex].uw[elindex] + weight;
+ }
+}
+
+void HELPER(vwhist128q)(CPUHexagonState *env)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3);
+
+ if (fGETQBIT(env->qtmp, 2 * i)) {
+ env->VRegs[vindex].uw[elindex] =
+ env->VRegs[vindex].uw[elindex] + weight;
+ }
+ }
+}
+
+void HELPER(vwhist128m)(CPUHexagonState *env, int32_t uiV)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3);
+
+ if ((bucket & 1) == uiV) {
+ env->VRegs[vindex].uw[elindex] =
+ env->VRegs[vindex].uw[elindex] + weight;
+ }
+ }
+}
+
+void HELPER(vwhist128qm)(CPUHexagonState *env, int32_t uiV)
+{
+ MMVector *input = &env->tmp_VRegs[0];
+
+ for (int i = 0; i < (sizeof(MMVector) / 2); i++) {
+ unsigned int bucket = fGETUBYTE(0, input->h[i]);
+ unsigned int weight = fGETUBYTE(1, input->h[i]);
+ unsigned int vindex = (bucket >> 3) & 0x1F;
+ unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3);
+
+ if (((bucket & 1) == uiV) && fGETQBIT(env->qtmp, 2 * i)) {
+ env->VRegs[vindex].uw[elindex] =
+ env->VRegs[vindex].uw[elindex] + weight;
+ }
+ }
+}
+
+static void cancel_slot(CPUHexagonState *env, uint32_t slot)
+{
+ HEX_DEBUG_LOG("Slot %d cancelled\n", slot);
+ env->slot_cancelled |= (1 << slot);
+}
+
+/* These macros can be referenced in the generated helper functions */
+#define warn(...) /* Nothing */
+#define fatal(...) g_assert_not_reached();
+
+#define BOGUS_HELPER(tag) \
+ printf("ERROR: bogus helper: " #tag "\n")
+
+#include "helper_funcs_generated.c.inc"
diff --git a/target/hexagon/opcodes.c b/target/hexagon/opcodes.c
new file mode 100644
index 000000000..35d790cdd
--- /dev/null
+++ b/target/hexagon/opcodes.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * opcodes.c
+ *
+ * data tables generated automatically
+ * Maybe some functions too
+ */
+
+#include "qemu/osdep.h"
+#include "attribs.h"
+#include "decode.h"
+
+#define VEC_DESCR(A, B, C) DESCR(A, B, C)
+#define DONAME(X) #X
+
+const char * const opcode_names[] = {
+#define OPCODE(IID) DONAME(IID)
+#include "opcodes_def_generated.h.inc"
+ NULL
+#undef OPCODE
+};
+
+const char * const opcode_reginfo[] = {
+#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */
+#define REGINFO(TAG, REGINFO, RREGS, WREGS) REGINFO,
+#include "op_regs_generated.h.inc"
+ NULL
+#undef REGINFO
+#undef IMMINFO
+};
+
+
+const char * const opcode_rregs[] = {
+#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */
+#define REGINFO(TAG, REGINFO, RREGS, WREGS) RREGS,
+#include "op_regs_generated.h.inc"
+ NULL
+#undef REGINFO
+#undef IMMINFO
+};
+
+
+const char * const opcode_wregs[] = {
+#define IMMINFO(TAG, SIGN, SIZE, SHAMT, SIGN2, SIZE2, SHAMT2) /* nothing */
+#define REGINFO(TAG, REGINFO, RREGS, WREGS) WREGS,
+#include "op_regs_generated.h.inc"
+ NULL
+#undef REGINFO
+#undef IMMINFO
+};
+
+const char * const opcode_short_semantics[] = {
+#define DEF_SHORTCODE(TAG, SHORTCODE) [TAG] = #SHORTCODE,
+#include "shortcode_generated.h.inc"
+#undef DEF_SHORTCODE
+ NULL
+};
+
+DECLARE_BITMAP(opcode_attribs[XX_LAST_OPCODE], A_ZZ_LASTATTRIB);
+
+static void init_attribs(int tag, ...)
+{
+ va_list ap;
+ int attr;
+ va_start(ap, tag);
+ while ((attr = va_arg(ap, int)) != 0) {
+ set_bit(attr, opcode_attribs[tag]);
+ }
+ va_end(ap);
+}
+
+const OpcodeEncoding opcode_encodings[] = {
+#define DEF_ENC32(OPCODE, ENCSTR) \
+ [OPCODE] = { .encoding = ENCSTR },
+
+#define DEF_ENC_SUBINSN(OPCODE, CLASS, ENCSTR) \
+ [OPCODE] = { .encoding = ENCSTR, .enc_class = CLASS },
+
+#define DEF_EXT_ENC(OPCODE, CLASS, ENCSTR) \
+ [OPCODE] = { .encoding = ENCSTR, .enc_class = CLASS },
+
+#include "imported/encode.def"
+
+#undef DEF_ENC32
+#undef DEF_ENC_SUBINSN
+#undef DEF_EXT_ENC
+};
+
+void opcode_init(void)
+{
+ init_attribs(0, 0);
+
+#define ATTRIBS(...) , ## __VA_ARGS__, 0
+#define OP_ATTRIB(TAG, ARGS) init_attribs(TAG ARGS);
+#include "op_attribs_generated.h.inc"
+#undef OP_ATTRIB
+#undef ATTRIBS
+
+ decode_init();
+}
+
+
+#define NEEDLE "IMMEXT("
+
+int opcode_which_immediate_is_extended(Opcode opcode)
+{
+ const char *p;
+
+ g_assert(opcode < XX_LAST_OPCODE);
+ g_assert(GET_ATTRIB(opcode, A_EXTENDABLE));
+
+ p = opcode_short_semantics[opcode];
+ p = strstr(p, NEEDLE);
+ g_assert(p);
+ p += strlen(NEEDLE);
+ while (isspace(*p)) {
+ p++;
+ }
+ /* lower is always imm 0, upper always imm 1. */
+ if (islower(*p)) {
+ return 0;
+ } else if (isupper(*p)) {
+ return 1;
+ } else {
+ g_assert_not_reached();
+ }
+}
diff --git a/target/hexagon/opcodes.h b/target/hexagon/opcodes.h
new file mode 100644
index 000000000..6e90e00fe
--- /dev/null
+++ b/target/hexagon/opcodes.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_OPCODES_H
+#define HEXAGON_OPCODES_H
+
+typedef enum {
+#define OPCODE(IID) IID
+#include "opcodes_def_generated.h.inc"
+ XX_LAST_OPCODE
+#undef OPCODE
+} Opcode;
+
+typedef enum {
+ NORMAL,
+ HALF,
+ SUBINSN_A,
+ SUBINSN_L1,
+ SUBINSN_L2,
+ SUBINSN_S1,
+ SUBINSN_S2,
+ EXT_noext,
+ EXT_mmvec,
+ XX_LAST_ENC_CLASS
+} EncClass;
+
+extern const char * const opcode_names[];
+
+extern const char * const opcode_reginfo[];
+extern const char * const opcode_rregs[];
+extern const char * const opcode_wregs[];
+
+typedef struct {
+ const char * const encoding;
+ const EncClass enc_class;
+} OpcodeEncoding;
+
+extern const OpcodeEncoding opcode_encodings[XX_LAST_OPCODE];
+
+void opcode_init(void);
+
+int opcode_which_immediate_is_extended(Opcode opcode);
+
+#endif
diff --git a/target/hexagon/printinsn.c b/target/hexagon/printinsn.c
new file mode 100644
index 000000000..4865cdd13
--- /dev/null
+++ b/target/hexagon/printinsn.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "attribs.h"
+#include "printinsn.h"
+#include "insn.h"
+#include "reg_fields.h"
+#include "internal.h"
+
+static const char *sreg2str(unsigned int reg)
+{
+ if (reg < TOTAL_PER_THREAD_REGS) {
+ return hexagon_regnames[reg];
+ } else {
+ return "???";
+ }
+}
+
+static const char *creg2str(unsigned int reg)
+{
+ return sreg2str(reg + HEX_REG_SA0);
+}
+
+static void snprintinsn(GString *buf, Insn *insn)
+{
+ switch (insn->opcode) {
+#define DEF_VECX_PRINTINFO(TAG, FMT, ...) DEF_PRINTINFO(TAG, FMT, __VA_ARGS__)
+#define DEF_PRINTINFO(TAG, FMT, ...) \
+ case TAG: \
+ g_string_append_printf(buf, FMT, __VA_ARGS__); \
+ break;
+#include "printinsn_generated.h.inc"
+#undef DEF_VECX_PRINTINFO
+#undef DEF_PRINTINFO
+ }
+}
+
+void snprint_a_pkt_disas(GString *buf, Packet *pkt, uint32_t *words,
+ target_ulong pc)
+{
+ bool has_endloop0 = false;
+ bool has_endloop1 = false;
+ bool has_endloop01 = false;
+
+ for (int i = 0; i < pkt->num_insns; i++) {
+ if (pkt->insn[i].part1) {
+ continue;
+ }
+
+ /* We'll print the endloop's at the end of the packet */
+ if (pkt->insn[i].opcode == J2_endloop0) {
+ has_endloop0 = true;
+ continue;
+ }
+ if (pkt->insn[i].opcode == J2_endloop1) {
+ has_endloop1 = true;
+ continue;
+ }
+ if (pkt->insn[i].opcode == J2_endloop01) {
+ has_endloop01 = true;
+ continue;
+ }
+
+ g_string_append_printf(buf, "0x" TARGET_FMT_lx "\t", words[i]);
+
+ if (i == 0) {
+ g_string_append(buf, "{");
+ }
+
+ g_string_append(buf, "\t");
+ snprintinsn(buf, &(pkt->insn[i]));
+
+ if (i < pkt->num_insns - 1) {
+ /*
+ * Subinstructions are two instructions encoded
+ * in the same word. Print them on the same line.
+ */
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN)) {
+ g_string_append(buf, "; ");
+ snprintinsn(buf, &(pkt->insn[i + 1]));
+ i++;
+ } else if (pkt->insn[i + 1].opcode != J2_endloop0 &&
+ pkt->insn[i + 1].opcode != J2_endloop1 &&
+ pkt->insn[i + 1].opcode != J2_endloop01) {
+ pc += 4;
+ g_string_append_printf(buf, "\n0x" TARGET_FMT_lx ": ", pc);
+ }
+ }
+ }
+ g_string_append(buf, " }");
+ if (has_endloop0) {
+ g_string_append(buf, " :endloop0");
+ }
+ if (has_endloop1) {
+ g_string_append(buf, " :endloop1");
+ }
+ if (has_endloop01) {
+ g_string_append(buf, " :endloop01");
+ }
+}
+
+void snprint_a_pkt_debug(GString *buf, Packet *pkt)
+{
+ int slot, opcode;
+
+ if (pkt->num_insns > 1) {
+ g_string_append(buf, "\n{\n");
+ }
+
+ for (int i = 0; i < pkt->num_insns; i++) {
+ if (pkt->insn[i].part1) {
+ continue;
+ }
+ g_string_append(buf, "\t");
+ snprintinsn(buf, &(pkt->insn[i]));
+
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_SUBINSN)) {
+ g_string_append(buf, " //subinsn");
+ }
+ if (pkt->insn[i].extension_valid) {
+ g_string_append(buf, " //constant extended");
+ }
+ slot = pkt->insn[i].slot;
+ opcode = pkt->insn[i].opcode;
+ g_string_append_printf(buf, " //slot=%d:tag=%s\n",
+ slot, opcode_names[opcode]);
+ }
+ if (pkt->num_insns > 1) {
+ g_string_append(buf, "}\n");
+ }
+}
diff --git a/target/hexagon/printinsn.h b/target/hexagon/printinsn.h
new file mode 100644
index 000000000..2ecd1731d
--- /dev/null
+++ b/target/hexagon/printinsn.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_PRINTINSN_H
+#define HEXAGON_PRINTINSN_H
+
+#include "insn.h"
+
+void snprint_a_pkt_disas(GString *buf, Packet *pkt, uint32_t *words,
+ target_ulong pc);
+void snprint_a_pkt_debug(GString *buf, Packet *pkt);
+
+#endif
diff --git a/target/hexagon/reg_fields.c b/target/hexagon/reg_fields.c
new file mode 100644
index 000000000..671320372
--- /dev/null
+++ b/target/hexagon/reg_fields.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "reg_fields.h"
+
+const RegField reg_field_info[NUM_REG_FIELDS] = {
+#define DEF_REG_FIELD(TAG, START, WIDTH) \
+ { START, WIDTH },
+#include "reg_fields_def.h.inc"
+#undef DEF_REG_FIELD
+};
diff --git a/target/hexagon/reg_fields.h b/target/hexagon/reg_fields.h
new file mode 100644
index 000000000..9e2ad5d99
--- /dev/null
+++ b/target/hexagon/reg_fields.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_REG_FIELDS_H
+#define HEXAGON_REG_FIELDS_H
+
+typedef struct {
+ int offset;
+ int width;
+} RegField;
+
+enum {
+#define DEF_REG_FIELD(TAG, START, WIDTH) \
+ TAG,
+#include "reg_fields_def.h.inc"
+ NUM_REG_FIELDS
+#undef DEF_REG_FIELD
+};
+
+extern const RegField reg_field_info[NUM_REG_FIELDS];
+
+#endif
diff --git a/target/hexagon/reg_fields_def.h.inc b/target/hexagon/reg_fields_def.h.inc
new file mode 100644
index 000000000..f2a58d486
--- /dev/null
+++ b/target/hexagon/reg_fields_def.h.inc
@@ -0,0 +1,41 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * For registers that have individual fields, explain them here
+ * DEF_REG_FIELD(tag,
+ * bit start offset,
+ * width
+ */
+
+/* USR fields */
+DEF_REG_FIELD(USR_OVF, 0, 1)
+DEF_REG_FIELD(USR_FPINVF, 1, 1)
+DEF_REG_FIELD(USR_FPDBZF, 2, 1)
+DEF_REG_FIELD(USR_FPOVFF, 3, 1)
+DEF_REG_FIELD(USR_FPUNFF, 4, 1)
+DEF_REG_FIELD(USR_FPINPF, 5, 1)
+
+DEF_REG_FIELD(USR_LPCFG, 8, 2)
+
+DEF_REG_FIELD(USR_FPRND, 22, 2)
+
+DEF_REG_FIELD(USR_FPINVE, 25, 1)
+DEF_REG_FIELD(USR_FPDBZE, 26, 1)
+DEF_REG_FIELD(USR_FPOVFE, 27, 1)
+DEF_REG_FIELD(USR_FPUNFE, 28, 1)
+DEF_REG_FIELD(USR_FPINPE, 29, 1)
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
new file mode 100644
index 000000000..b6f541ecb
--- /dev/null
+++ b/target/hexagon/translate.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define QEMU_GENERATE
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+#include "internal.h"
+#include "attribs.h"
+#include "insn.h"
+#include "decode.h"
+#include "translate.h"
+#include "printinsn.h"
+
+TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
+TCGv hex_pred[NUM_PREGS];
+TCGv hex_next_PC;
+TCGv hex_this_PC;
+TCGv hex_slot_cancelled;
+TCGv hex_branch_taken;
+TCGv hex_new_value[TOTAL_PER_THREAD_REGS];
+TCGv hex_reg_written[TOTAL_PER_THREAD_REGS];
+TCGv hex_new_pred_value[NUM_PREGS];
+TCGv hex_pred_written;
+TCGv hex_store_addr[STORES_MAX];
+TCGv hex_store_width[STORES_MAX];
+TCGv hex_store_val32[STORES_MAX];
+TCGv_i64 hex_store_val64[STORES_MAX];
+TCGv hex_pkt_has_store_s1;
+TCGv hex_dczero_addr;
+TCGv hex_llsc_addr;
+TCGv hex_llsc_val;
+TCGv_i64 hex_llsc_val_i64;
+TCGv hex_VRegs_updated;
+TCGv hex_QRegs_updated;
+TCGv hex_vstore_addr[VSTORES_MAX];
+TCGv hex_vstore_size[VSTORES_MAX];
+TCGv hex_vstore_pending[VSTORES_MAX];
+
+static const char * const hexagon_prednames[] = {
+ "p0", "p1", "p2", "p3"
+};
+
+intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum,
+ int num, bool alloc_ok)
+{
+ intptr_t offset;
+
+ /* See if it is already allocated */
+ for (int i = 0; i < ctx->future_vregs_idx; i++) {
+ if (ctx->future_vregs_num[i] == regnum) {
+ return offsetof(CPUHexagonState, future_VRegs[i]);
+ }
+ }
+
+ g_assert(alloc_ok);
+ offset = offsetof(CPUHexagonState, future_VRegs[ctx->future_vregs_idx]);
+ for (int i = 0; i < num; i++) {
+ ctx->future_vregs_num[ctx->future_vregs_idx + i] = regnum++;
+ }
+ ctx->future_vregs_idx += num;
+ g_assert(ctx->future_vregs_idx <= VECTOR_TEMPS_MAX);
+ return offset;
+}
+
+intptr_t ctx_tmp_vreg_off(DisasContext *ctx, int regnum,
+ int num, bool alloc_ok)
+{
+ intptr_t offset;
+
+ /* See if it is already allocated */
+ for (int i = 0; i < ctx->tmp_vregs_idx; i++) {
+ if (ctx->tmp_vregs_num[i] == regnum) {
+ return offsetof(CPUHexagonState, tmp_VRegs[i]);
+ }
+ }
+
+ g_assert(alloc_ok);
+ offset = offsetof(CPUHexagonState, tmp_VRegs[ctx->tmp_vregs_idx]);
+ for (int i = 0; i < num; i++) {
+ ctx->tmp_vregs_num[ctx->tmp_vregs_idx + i] = regnum++;
+ }
+ ctx->tmp_vregs_idx += num;
+ g_assert(ctx->tmp_vregs_idx <= VECTOR_TEMPS_MAX);
+ return offset;
+}
+
+static void gen_exception_raw(int excp)
+{
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+}
+
+static void gen_exec_counters(DisasContext *ctx)
+{
+ tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_PKT_CNT],
+ hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets);
+ tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT],
+ hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns);
+ tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_HVX_CNT],
+ hex_gpr[HEX_REG_QEMU_HVX_CNT], ctx->num_hvx_insns);
+}
+
+static void gen_end_tb(DisasContext *ctx)
+{
+ gen_exec_counters(ctx);
+ tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_exception_end_tb(DisasContext *ctx, int excp)
+{
+ gen_exec_counters(ctx);
+ tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
+ gen_exception_raw(excp);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+}
+
+#define PACKET_BUFFER_LEN 1028
+static void print_pkt(Packet *pkt)
+{
+ GString *buf = g_string_sized_new(PACKET_BUFFER_LEN);
+ snprint_a_pkt_debug(buf, pkt);
+ HEX_DEBUG_LOG("%s", buf->str);
+ g_string_free(buf, true);
+}
+#define HEX_DEBUG_PRINT_PKT(pkt) \
+ do { \
+ if (HEX_DEBUG) { \
+ print_pkt(pkt); \
+ } \
+ } while (0)
+
+static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
+ uint32_t words[])
+{
+ bool found_end = false;
+ int nwords, max_words;
+
+ memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t));
+ for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
+ words[nwords] =
+ translator_ldl(env, &ctx->base,
+ ctx->base.pc_next + nwords * sizeof(uint32_t));
+ found_end = is_packet_end(words[nwords]);
+ }
+ if (!found_end) {
+ /* Read too many words without finding the end */
+ return 0;
+ }
+
+ /* Check for page boundary crossing */
+ max_words = -(ctx->base.pc_next | TARGET_PAGE_MASK) / sizeof(uint32_t);
+ if (nwords > max_words) {
+ /* We can only cross a page boundary at the beginning of a TB */
+ g_assert(ctx->base.num_insns == 1);
+ }
+
+ HEX_DEBUG_LOG("decode_packet: pc = 0x%x\n", ctx->base.pc_next);
+ HEX_DEBUG_LOG(" words = { ");
+ for (int i = 0; i < nwords; i++) {
+ HEX_DEBUG_LOG("0x%x, ", words[i]);
+ }
+ HEX_DEBUG_LOG("}\n");
+
+ return nwords;
+}
+
+static bool check_for_attrib(Packet *pkt, int attrib)
+{
+ for (int i = 0; i < pkt->num_insns; i++) {
+ if (GET_ATTRIB(pkt->insn[i].opcode, attrib)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool need_pc(Packet *pkt)
+{
+ return check_for_attrib(pkt, A_IMPLICIT_READS_PC);
+}
+
+static bool need_slot_cancelled(Packet *pkt)
+{
+ return check_for_attrib(pkt, A_CONDEXEC);
+}
+
+static bool need_pred_written(Packet *pkt)
+{
+ return check_for_attrib(pkt, A_WRITES_PRED_REG);
+}
+
+static void gen_start_packet(DisasContext *ctx, Packet *pkt)
+{
+ target_ulong next_PC = ctx->base.pc_next + pkt->encod_pkt_size_in_bytes;
+ int i;
+
+ /* Clear out the disassembly context */
+ ctx->reg_log_idx = 0;
+ bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS);
+ ctx->preg_log_idx = 0;
+ bitmap_zero(ctx->pregs_written, NUM_PREGS);
+ ctx->future_vregs_idx = 0;
+ ctx->tmp_vregs_idx = 0;
+ ctx->vreg_log_idx = 0;
+ bitmap_zero(ctx->vregs_updated_tmp, NUM_VREGS);
+ bitmap_zero(ctx->vregs_updated, NUM_VREGS);
+ bitmap_zero(ctx->vregs_select, NUM_VREGS);
+ ctx->qreg_log_idx = 0;
+ for (i = 0; i < STORES_MAX; i++) {
+ ctx->store_width[i] = 0;
+ }
+ tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1);
+ ctx->s1_store_processed = false;
+ ctx->pre_commit = true;
+
+ if (HEX_DEBUG) {
+ /* Handy place to set a breakpoint before the packet executes */
+ gen_helper_debug_start_packet(cpu_env);
+ tcg_gen_movi_tl(hex_this_PC, ctx->base.pc_next);
+ }
+
+ /* Initialize the runtime state for packet semantics */
+ if (need_pc(pkt)) {
+ tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
+ }
+ if (need_slot_cancelled(pkt)) {
+ tcg_gen_movi_tl(hex_slot_cancelled, 0);
+ }
+ if (pkt->pkt_has_cof) {
+ tcg_gen_movi_tl(hex_branch_taken, 0);
+ tcg_gen_movi_tl(hex_next_PC, next_PC);
+ }
+ if (need_pred_written(pkt)) {
+ tcg_gen_movi_tl(hex_pred_written, 0);
+ }
+
+ if (pkt->pkt_has_hvx) {
+ tcg_gen_movi_tl(hex_VRegs_updated, 0);
+ tcg_gen_movi_tl(hex_QRegs_updated, 0);
+ }
+}
+
+bool is_gather_store_insn(Insn *insn, Packet *pkt)
+{
+ if (GET_ATTRIB(insn->opcode, A_CVI_NEW) &&
+ insn->new_value_producer_slot == 1) {
+ /* Look for gather instruction */
+ for (int i = 0; i < pkt->num_insns; i++) {
+ Insn *in = &pkt->insn[i];
+ if (GET_ATTRIB(in->opcode, A_CVI_GATHER) && in->slot == 1) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/*
+ * The LOG_*_WRITE macros mark most of the writes in a packet
+ * However, there are some implicit writes marked as attributes
+ * of the applicable instructions.
+ */
+static void mark_implicit_reg_write(DisasContext *ctx, Insn *insn,
+ int attrib, int rnum)
+{
+ if (GET_ATTRIB(insn->opcode, attrib)) {
+ /*
+ * USR is used to set overflow and FP exceptions,
+ * so treat it as conditional
+ */
+ bool is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC) ||
+ rnum == HEX_REG_USR;
+ if (is_predicated && !is_preloaded(ctx, rnum)) {
+ tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]);
+ }
+
+ ctx_log_reg_write(ctx, rnum);
+ }
+}
+
+static void mark_implicit_pred_write(DisasContext *ctx, Insn *insn,
+ int attrib, int pnum)
+{
+ if (GET_ATTRIB(insn->opcode, attrib)) {
+ ctx_log_pred_write(ctx, pnum);
+ }
+}
+
+static void mark_implicit_reg_writes(DisasContext *ctx, Insn *insn)
+{
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_FP, HEX_REG_FP);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SP, HEX_REG_SP);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LR, HEX_REG_LR);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC0, HEX_REG_LC0);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1);
+ mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_USR, HEX_REG_USR);
+ mark_implicit_reg_write(ctx, insn, A_FPOP, HEX_REG_USR);
+}
+
+static void mark_implicit_pred_writes(DisasContext *ctx, Insn *insn)
+{
+ mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P0, 0);
+ mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P1, 1);
+ mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P2, 2);
+ mark_implicit_pred_write(ctx, insn, A_IMPLICIT_WRITES_P3, 3);
+}
+
+static void gen_insn(CPUHexagonState *env, DisasContext *ctx,
+ Insn *insn, Packet *pkt)
+{
+ if (insn->generate) {
+ mark_implicit_reg_writes(ctx, insn);
+ insn->generate(env, ctx, insn, pkt);
+ mark_implicit_pred_writes(ctx, insn);
+ } else {
+ gen_exception_end_tb(ctx, HEX_EXCP_INVALID_OPCODE);
+ }
+}
+
+/*
+ * Helpers for generating the packet commit
+ */
+static void gen_reg_writes(DisasContext *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->reg_log_idx; i++) {
+ int reg_num = ctx->reg_log[i];
+
+ tcg_gen_mov_tl(hex_gpr[reg_num], hex_new_value[reg_num]);
+ }
+}
+
+static void gen_pred_writes(DisasContext *ctx, Packet *pkt)
+{
+ int i;
+
+ /* Early exit if the log is empty */
+ if (!ctx->preg_log_idx) {
+ return;
+ }
+
+ /*
+ * Only endloop instructions will conditionally
+ * write a predicate. If there are no endloop
+ * instructions, we can use the non-conditional
+ * write of the predicates.
+ */
+ if (pkt->pkt_has_endloop) {
+ TCGv zero = tcg_constant_tl(0);
+ TCGv pred_written = tcg_temp_new();
+ for (i = 0; i < ctx->preg_log_idx; i++) {
+ int pred_num = ctx->preg_log[i];
+
+ tcg_gen_andi_tl(pred_written, hex_pred_written, 1 << pred_num);
+ tcg_gen_movcond_tl(TCG_COND_NE, hex_pred[pred_num],
+ pred_written, zero,
+ hex_new_pred_value[pred_num],
+ hex_pred[pred_num]);
+ }
+ tcg_temp_free(pred_written);
+ } else {
+ for (i = 0; i < ctx->preg_log_idx; i++) {
+ int pred_num = ctx->preg_log[i];
+ tcg_gen_mov_tl(hex_pred[pred_num], hex_new_pred_value[pred_num]);
+ if (HEX_DEBUG) {
+ /* Do this so HELPER(debug_commit_end) will know */
+ tcg_gen_ori_tl(hex_pred_written, hex_pred_written,
+ 1 << pred_num);
+ }
+ }
+ }
+}
+
+static void gen_check_store_width(DisasContext *ctx, int slot_num)
+{
+ if (HEX_DEBUG) {
+ TCGv slot = tcg_constant_tl(slot_num);
+ TCGv check = tcg_constant_tl(ctx->store_width[slot_num]);
+ gen_helper_debug_check_store_width(cpu_env, slot, check);
+ }
+}
+
+static bool slot_is_predicated(Packet *pkt, int slot_num)
+{
+ for (int i = 0; i < pkt->num_insns; i++) {
+ if (pkt->insn[i].slot == slot_num) {
+ return GET_ATTRIB(pkt->insn[i].opcode, A_CONDEXEC);
+ }
+ }
+ /* If we get to here, we didn't find an instruction in the requested slot */
+ g_assert_not_reached();
+}
+
+void process_store(DisasContext *ctx, Packet *pkt, int slot_num)
+{
+ bool is_predicated = slot_is_predicated(pkt, slot_num);
+ TCGLabel *label_end = NULL;
+
+ /*
+ * We may have already processed this store
+ * See CHECK_NOSHUF in macros.h
+ */
+ if (slot_num == 1 && ctx->s1_store_processed) {
+ return;
+ }
+ ctx->s1_store_processed = true;
+
+ if (is_predicated) {
+ TCGv cancelled = tcg_temp_new();
+ label_end = gen_new_label();
+
+ /* Don't do anything if the slot was cancelled */
+ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
+ tcg_temp_free(cancelled);
+ }
+ {
+ TCGv address = tcg_temp_local_new();
+ tcg_gen_mov_tl(address, hex_store_addr[slot_num]);
+
+ /*
+ * If we know the width from the DisasContext, we can
+ * generate much cleaner code.
+ * Unfortunately, not all instructions execute the fSTORE
+ * macro during code generation. Anything that uses the
+ * generic helper will have this problem. Instructions
+ * that use fWRAP to generate proper TCG code will be OK.
+ */
+ switch (ctx->store_width[slot_num]) {
+ case 1:
+ gen_check_store_width(ctx, slot_num);
+ tcg_gen_qemu_st8(hex_store_val32[slot_num],
+ hex_store_addr[slot_num],
+ ctx->mem_idx);
+ break;
+ case 2:
+ gen_check_store_width(ctx, slot_num);
+ tcg_gen_qemu_st16(hex_store_val32[slot_num],
+ hex_store_addr[slot_num],
+ ctx->mem_idx);
+ break;
+ case 4:
+ gen_check_store_width(ctx, slot_num);
+ tcg_gen_qemu_st32(hex_store_val32[slot_num],
+ hex_store_addr[slot_num],
+ ctx->mem_idx);
+ break;
+ case 8:
+ gen_check_store_width(ctx, slot_num);
+ tcg_gen_qemu_st64(hex_store_val64[slot_num],
+ hex_store_addr[slot_num],
+ ctx->mem_idx);
+ break;
+ default:
+ {
+ /*
+ * If we get to here, we don't know the width at
+ * TCG generation time, we'll use a helper to
+ * avoid branching based on the width at runtime.
+ */
+ TCGv slot = tcg_constant_tl(slot_num);
+ gen_helper_commit_store(cpu_env, slot);
+ }
+ }
+ tcg_temp_free(address);
+ }
+ if (is_predicated) {
+ gen_set_label(label_end);
+ }
+}
+
+static void process_store_log(DisasContext *ctx, Packet *pkt)
+{
+ /*
+ * When a packet has two stores, the hardware processes
+ * slot 1 and then slot 0. This will be important when
+ * the memory accesses overlap.
+ */
+ if (pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa) {
+ process_store(ctx, pkt, 1);
+ }
+ if (pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa) {
+ process_store(ctx, pkt, 0);
+ }
+}
+
+/* Zero out a 32-bit cache line */
+static void process_dczeroa(DisasContext *ctx, Packet *pkt)
+{
+ if (pkt->pkt_has_dczeroa) {
+ /* Store 32 bytes of zero starting at (addr & ~0x1f) */
+ TCGv addr = tcg_temp_new();
+ TCGv_i64 zero = tcg_constant_i64(0);
+
+ tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
+ tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
+
+ tcg_temp_free(addr);
+ }
+}
+
+static bool pkt_has_hvx_store(Packet *pkt)
+{
+ int i;
+ for (i = 0; i < pkt->num_insns; i++) {
+ int opcode = pkt->insn[i].opcode;
+ if (GET_ATTRIB(opcode, A_CVI) && GET_ATTRIB(opcode, A_STORE)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static void gen_commit_hvx(DisasContext *ctx, Packet *pkt)
+{
+ int i;
+
+ /*
+ * for (i = 0; i < ctx->vreg_log_idx; i++) {
+ * int rnum = ctx->vreg_log[i];
+ * if (ctx->vreg_is_predicated[i]) {
+ * if (env->VRegs_updated & (1 << rnum)) {
+ * env->VRegs[rnum] = env->future_VRegs[rnum];
+ * }
+ * } else {
+ * env->VRegs[rnum] = env->future_VRegs[rnum];
+ * }
+ * }
+ */
+ for (i = 0; i < ctx->vreg_log_idx; i++) {
+ int rnum = ctx->vreg_log[i];
+ bool is_predicated = ctx->vreg_is_predicated[i];
+ intptr_t dstoff = offsetof(CPUHexagonState, VRegs[rnum]);
+ intptr_t srcoff = ctx_future_vreg_off(ctx, rnum, 1, false);
+ size_t size = sizeof(MMVector);
+
+ if (is_predicated) {
+ TCGv cmp = tcg_temp_new();
+ TCGLabel *label_skip = gen_new_label();
+
+ tcg_gen_andi_tl(cmp, hex_VRegs_updated, 1 << rnum);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip);
+ tcg_temp_free(cmp);
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
+ gen_set_label(label_skip);
+ } else {
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
+ }
+ }
+
+ /*
+ * for (i = 0; i < ctx->qreg_log_idx; i++) {
+ * int rnum = ctx->qreg_log[i];
+ * if (ctx->qreg_is_predicated[i]) {
+ * if (env->QRegs_updated) & (1 << rnum)) {
+ * env->QRegs[rnum] = env->future_QRegs[rnum];
+ * }
+ * } else {
+ * env->QRegs[rnum] = env->future_QRegs[rnum];
+ * }
+ * }
+ */
+ for (i = 0; i < ctx->qreg_log_idx; i++) {
+ int rnum = ctx->qreg_log[i];
+ bool is_predicated = ctx->qreg_is_predicated[i];
+ intptr_t dstoff = offsetof(CPUHexagonState, QRegs[rnum]);
+ intptr_t srcoff = offsetof(CPUHexagonState, future_QRegs[rnum]);
+ size_t size = sizeof(MMQReg);
+
+ if (is_predicated) {
+ TCGv cmp = tcg_temp_new();
+ TCGLabel *label_skip = gen_new_label();
+
+ tcg_gen_andi_tl(cmp, hex_QRegs_updated, 1 << rnum);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip);
+ tcg_temp_free(cmp);
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
+ gen_set_label(label_skip);
+ } else {
+ tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size);
+ }
+ }
+
+ if (pkt_has_hvx_store(pkt)) {
+ gen_helper_commit_hvx_stores(cpu_env);
+ }
+}
+
+static void update_exec_counters(DisasContext *ctx, Packet *pkt)
+{
+ int num_insns = pkt->num_insns;
+ int num_real_insns = 0;
+ int num_hvx_insns = 0;
+
+ for (int i = 0; i < num_insns; i++) {
+ if (!pkt->insn[i].is_endloop &&
+ !pkt->insn[i].part1 &&
+ !GET_ATTRIB(pkt->insn[i].opcode, A_IT_NOP)) {
+ num_real_insns++;
+ }
+ if (GET_ATTRIB(pkt->insn[i].opcode, A_CVI)) {
+ num_hvx_insns++;
+ }
+ }
+
+ ctx->num_packets++;
+ ctx->num_insns += num_real_insns;
+ ctx->num_hvx_insns += num_hvx_insns;
+}
+
+static void gen_commit_packet(CPUHexagonState *env, DisasContext *ctx,
+ Packet *pkt)
+{
+ /*
+ * If there is more than one store in a packet, make sure they are all OK
+ * before proceeding with the rest of the packet commit.
+ *
+ * dczeroa has to be the only store operation in the packet, so we go
+ * ahead and process that first.
+ *
+ * When there is an HVX store, there can also be a scalar store in either
+ * slot 0 or slot1, so we create a mask for the helper to indicate what
+ * work to do.
+ *
+ * When there are two scalar stores, we probe the one in slot 0.
+ *
+ * Note that we don't call the probe helper for packets with only one
+ * store. Therefore, we call process_store_log before anything else
+ * involved in committing the packet.
+ */
+ bool has_store_s0 = pkt->pkt_has_store_s0;
+ bool has_store_s1 = (pkt->pkt_has_store_s1 && !ctx->s1_store_processed);
+ bool has_hvx_store = pkt_has_hvx_store(pkt);
+ if (pkt->pkt_has_dczeroa) {
+ /*
+ * The dczeroa will be the store in slot 0, check that we don't have
+ * a store in slot 1 or an HVX store.
+ */
+ g_assert(has_store_s0 && !has_store_s1 && !has_hvx_store);
+ process_dczeroa(ctx, pkt);
+ } else if (has_hvx_store) {
+ TCGv mem_idx = tcg_constant_tl(ctx->mem_idx);
+
+ if (!has_store_s0 && !has_store_s1) {
+ gen_helper_probe_hvx_stores(cpu_env, mem_idx);
+ } else {
+ int mask = 0;
+ TCGv mask_tcgv;
+
+ if (has_store_s0) {
+ mask |= (1 << 0);
+ }
+ if (has_store_s1) {
+ mask |= (1 << 1);
+ }
+ if (has_hvx_store) {
+ mask |= (1 << 2);
+ }
+ mask_tcgv = tcg_constant_tl(mask);
+ gen_helper_probe_pkt_scalar_hvx_stores(cpu_env, mask_tcgv, mem_idx);
+ }
+ } else if (has_store_s0 && has_store_s1) {
+ /*
+ * process_store_log will execute the slot 1 store first,
+ * so we only have to probe the store in slot 0
+ */
+ TCGv mem_idx = tcg_constant_tl(ctx->mem_idx);
+ gen_helper_probe_pkt_scalar_store_s0(cpu_env, mem_idx);
+ }
+
+ process_store_log(ctx, pkt);
+
+ gen_reg_writes(ctx);
+ gen_pred_writes(ctx, pkt);
+ if (pkt->pkt_has_hvx) {
+ gen_commit_hvx(ctx, pkt);
+ }
+ update_exec_counters(ctx, pkt);
+ if (HEX_DEBUG) {
+ TCGv has_st0 =
+ tcg_constant_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa);
+ TCGv has_st1 =
+ tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa);
+
+ /* Handy place to set a breakpoint at the end of execution */
+ gen_helper_debug_commit_end(cpu_env, has_st0, has_st1);
+ }
+
+ if (pkt->vhist_insn != NULL) {
+ ctx->pre_commit = false;
+ pkt->vhist_insn->generate(env, ctx, pkt->vhist_insn, pkt);
+ }
+
+ if (pkt->pkt_has_cof) {
+ gen_end_tb(ctx);
+ }
+}
+
+static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx)
+{
+ uint32_t words[PACKET_WORDS_MAX];
+ int nwords;
+ Packet pkt;
+ int i;
+
+ nwords = read_packet_words(env, ctx, words);
+ if (!nwords) {
+ gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET);
+ return;
+ }
+
+ if (decode_packet(nwords, words, &pkt, false) > 0) {
+ HEX_DEBUG_PRINT_PKT(&pkt);
+ gen_start_packet(ctx, &pkt);
+ for (i = 0; i < pkt.num_insns; i++) {
+ gen_insn(env, ctx, &pkt.insn[i], &pkt);
+ }
+ gen_commit_packet(env, ctx, &pkt);
+ ctx->base.pc_next += pkt.encod_pkt_size_in_bytes;
+ } else {
+ gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET);
+ }
+}
+
+static void hexagon_tr_init_disas_context(DisasContextBase *dcbase,
+ CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ ctx->mem_idx = MMU_USER_IDX;
+ ctx->num_packets = 0;
+ ctx->num_insns = 0;
+ ctx->num_hvx_insns = 0;
+}
+
+static void hexagon_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void hexagon_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next);
+}
+
+static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx)
+{
+ target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
+ bool found_end = false;
+ int nwords;
+
+ for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
+ uint32_t word = cpu_ldl_code(env,
+ ctx->base.pc_next + nwords * sizeof(uint32_t));
+ found_end = is_packet_end(word);
+ }
+ uint32_t next_ptr = ctx->base.pc_next + nwords * sizeof(uint32_t);
+ return found_end && next_ptr - page_start >= TARGET_PAGE_SIZE;
+}
+
+static void hexagon_tr_translate_packet(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUHexagonState *env = cpu->env_ptr;
+
+ decode_and_translate_packet(env, ctx);
+
+ if (ctx->base.is_jmp == DISAS_NEXT) {
+ target_ulong page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
+ target_ulong bytes_max = PACKET_WORDS_MAX * sizeof(target_ulong);
+
+ if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE ||
+ (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - bytes_max &&
+ pkt_crosses_page(env, ctx))) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+
+ /*
+ * The CPU log is used to compare against LLDB single stepping,
+ * so end the TLB after every packet.
+ */
+ HexagonCPU *hex_cpu = env_archcpu(env);
+ if (hex_cpu->lldb_compat && qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_TOO_MANY:
+ gen_exec_counters(ctx);
+ tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void hexagon_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+
+static const TranslatorOps hexagon_tr_ops = {
+ .init_disas_context = hexagon_tr_init_disas_context,
+ .tb_start = hexagon_tr_tb_start,
+ .insn_start = hexagon_tr_insn_start,
+ .translate_insn = hexagon_tr_translate_packet,
+ .tb_stop = hexagon_tr_tb_stop,
+ .disas_log = hexagon_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+#define NAME_LEN 64
+static char new_value_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
+static char reg_written_names[TOTAL_PER_THREAD_REGS][NAME_LEN];
+static char new_pred_value_names[NUM_PREGS][NAME_LEN];
+static char store_addr_names[STORES_MAX][NAME_LEN];
+static char store_width_names[STORES_MAX][NAME_LEN];
+static char store_val32_names[STORES_MAX][NAME_LEN];
+static char store_val64_names[STORES_MAX][NAME_LEN];
+static char vstore_addr_names[VSTORES_MAX][NAME_LEN];
+static char vstore_size_names[VSTORES_MAX][NAME_LEN];
+static char vstore_pending_names[VSTORES_MAX][NAME_LEN];
+
+void hexagon_translate_init(void)
+{
+ int i;
+
+ opcode_init();
+
+ if (HEX_DEBUG) {
+ if (!qemu_logfile) {
+ qemu_set_log(qemu_loglevel);
+ }
+ }
+
+ for (i = 0; i < TOTAL_PER_THREAD_REGS; i++) {
+ hex_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, gpr[i]),
+ hexagon_regnames[i]);
+
+ snprintf(new_value_names[i], NAME_LEN, "new_%s", hexagon_regnames[i]);
+ hex_new_value[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, new_value[i]),
+ new_value_names[i]);
+
+ if (HEX_DEBUG) {
+ snprintf(reg_written_names[i], NAME_LEN, "reg_written_%s",
+ hexagon_regnames[i]);
+ hex_reg_written[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, reg_written[i]),
+ reg_written_names[i]);
+ }
+ }
+ for (i = 0; i < NUM_PREGS; i++) {
+ hex_pred[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, pred[i]),
+ hexagon_prednames[i]);
+
+ snprintf(new_pred_value_names[i], NAME_LEN, "new_pred_%s",
+ hexagon_prednames[i]);
+ hex_new_pred_value[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, new_pred_value[i]),
+ new_pred_value_names[i]);
+ }
+ hex_pred_written = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, pred_written), "pred_written");
+ hex_next_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, next_PC), "next_PC");
+ hex_this_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, this_PC), "this_PC");
+ hex_slot_cancelled = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, slot_cancelled), "slot_cancelled");
+ hex_branch_taken = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, branch_taken), "branch_taken");
+ hex_pkt_has_store_s1 = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, pkt_has_store_s1), "pkt_has_store_s1");
+ hex_dczero_addr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, dczero_addr), "dczero_addr");
+ hex_llsc_addr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, llsc_addr), "llsc_addr");
+ hex_llsc_val = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, llsc_val), "llsc_val");
+ hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64");
+ hex_VRegs_updated = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, VRegs_updated), "VRegs_updated");
+ hex_QRegs_updated = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, QRegs_updated), "QRegs_updated");
+ for (i = 0; i < STORES_MAX; i++) {
+ snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i);
+ hex_store_addr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, mem_log_stores[i].va),
+ store_addr_names[i]);
+
+ snprintf(store_width_names[i], NAME_LEN, "store_width_%d", i);
+ hex_store_width[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, mem_log_stores[i].width),
+ store_width_names[i]);
+
+ snprintf(store_val32_names[i], NAME_LEN, "store_val32_%d", i);
+ hex_store_val32[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, mem_log_stores[i].data32),
+ store_val32_names[i]);
+
+ snprintf(store_val64_names[i], NAME_LEN, "store_val64_%d", i);
+ hex_store_val64[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHexagonState, mem_log_stores[i].data64),
+ store_val64_names[i]);
+ }
+ for (int i = 0; i < VSTORES_MAX; i++) {
+ snprintf(vstore_addr_names[i], NAME_LEN, "vstore_addr_%d", i);
+ hex_vstore_addr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, vstore[i].va),
+ vstore_addr_names[i]);
+
+ snprintf(vstore_size_names[i], NAME_LEN, "vstore_size_%d", i);
+ hex_vstore_size[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, vstore[i].size),
+ vstore_size_names[i]);
+
+ snprintf(vstore_pending_names[i], NAME_LEN, "vstore_pending_%d", i);
+ hex_vstore_pending[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHexagonState, vstore_pending[i]),
+ vstore_pending_names[i]);
+ }
+}
diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h
new file mode 100644
index 000000000..fccfb9434
--- /dev/null
+++ b/target/hexagon/translate.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HEXAGON_TRANSLATE_H
+#define HEXAGON_TRANSLATE_H
+
+#include "qemu/bitmap.h"
+#include "cpu.h"
+#include "exec/translator.h"
+#include "tcg/tcg-op.h"
+#include "internal.h"
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ uint32_t mem_idx;
+ uint32_t num_packets;
+ uint32_t num_insns;
+ uint32_t num_hvx_insns;
+ int reg_log[REG_WRITES_MAX];
+ int reg_log_idx;
+ DECLARE_BITMAP(regs_written, TOTAL_PER_THREAD_REGS);
+ int preg_log[PRED_WRITES_MAX];
+ int preg_log_idx;
+ DECLARE_BITMAP(pregs_written, NUM_PREGS);
+ uint8_t store_width[STORES_MAX];
+ bool s1_store_processed;
+ int future_vregs_idx;
+ int future_vregs_num[VECTOR_TEMPS_MAX];
+ int tmp_vregs_idx;
+ int tmp_vregs_num[VECTOR_TEMPS_MAX];
+ int vreg_log[NUM_VREGS];
+ bool vreg_is_predicated[NUM_VREGS];
+ int vreg_log_idx;
+ DECLARE_BITMAP(vregs_updated_tmp, NUM_VREGS);
+ DECLARE_BITMAP(vregs_updated, NUM_VREGS);
+ DECLARE_BITMAP(vregs_select, NUM_VREGS);
+ int qreg_log[NUM_QREGS];
+ bool qreg_is_predicated[NUM_QREGS];
+ int qreg_log_idx;
+ bool pre_commit;
+} DisasContext;
+
+static inline void ctx_log_reg_write(DisasContext *ctx, int rnum)
+{
+ if (test_bit(rnum, ctx->regs_written)) {
+ HEX_DEBUG_LOG("WARNING: Multiple writes to r%d\n", rnum);
+ }
+ ctx->reg_log[ctx->reg_log_idx] = rnum;
+ ctx->reg_log_idx++;
+ set_bit(rnum, ctx->regs_written);
+}
+
+static inline void ctx_log_reg_write_pair(DisasContext *ctx, int rnum)
+{
+ ctx_log_reg_write(ctx, rnum);
+ ctx_log_reg_write(ctx, rnum + 1);
+}
+
+static inline void ctx_log_pred_write(DisasContext *ctx, int pnum)
+{
+ ctx->preg_log[ctx->preg_log_idx] = pnum;
+ ctx->preg_log_idx++;
+ set_bit(pnum, ctx->pregs_written);
+}
+
+static inline bool is_preloaded(DisasContext *ctx, int num)
+{
+ return test_bit(num, ctx->regs_written);
+}
+
+intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum,
+ int num, bool alloc_ok);
+intptr_t ctx_tmp_vreg_off(DisasContext *ctx, int regnum,
+ int num, bool alloc_ok);
+
+static inline void ctx_log_vreg_write(DisasContext *ctx,
+ int rnum, VRegWriteType type,
+ bool is_predicated)
+{
+ if (type != EXT_TMP) {
+ ctx->vreg_log[ctx->vreg_log_idx] = rnum;
+ ctx->vreg_is_predicated[ctx->vreg_log_idx] = is_predicated;
+ ctx->vreg_log_idx++;
+
+ set_bit(rnum, ctx->vregs_updated);
+ }
+ if (type == EXT_NEW) {
+ set_bit(rnum, ctx->vregs_select);
+ }
+ if (type == EXT_TMP) {
+ set_bit(rnum, ctx->vregs_updated_tmp);
+ }
+}
+
+static inline void ctx_log_vreg_write_pair(DisasContext *ctx,
+ int rnum, VRegWriteType type,
+ bool is_predicated)
+{
+ ctx_log_vreg_write(ctx, rnum ^ 0, type, is_predicated);
+ ctx_log_vreg_write(ctx, rnum ^ 1, type, is_predicated);
+}
+
+static inline void ctx_log_qreg_write(DisasContext *ctx,
+ int rnum, bool is_predicated)
+{
+ ctx->qreg_log[ctx->qreg_log_idx] = rnum;
+ ctx->qreg_is_predicated[ctx->qreg_log_idx] = is_predicated;
+ ctx->qreg_log_idx++;
+}
+
+extern TCGv hex_gpr[TOTAL_PER_THREAD_REGS];
+extern TCGv hex_pred[NUM_PREGS];
+extern TCGv hex_next_PC;
+extern TCGv hex_this_PC;
+extern TCGv hex_slot_cancelled;
+extern TCGv hex_branch_taken;
+extern TCGv hex_new_value[TOTAL_PER_THREAD_REGS];
+extern TCGv hex_reg_written[TOTAL_PER_THREAD_REGS];
+extern TCGv hex_new_pred_value[NUM_PREGS];
+extern TCGv hex_pred_written;
+extern TCGv hex_store_addr[STORES_MAX];
+extern TCGv hex_store_width[STORES_MAX];
+extern TCGv hex_store_val32[STORES_MAX];
+extern TCGv_i64 hex_store_val64[STORES_MAX];
+extern TCGv hex_dczero_addr;
+extern TCGv hex_llsc_addr;
+extern TCGv hex_llsc_val;
+extern TCGv_i64 hex_llsc_val_i64;
+extern TCGv hex_VRegs_updated;
+extern TCGv hex_QRegs_updated;
+extern TCGv hex_vstore_addr[VSTORES_MAX];
+extern TCGv hex_vstore_size[VSTORES_MAX];
+extern TCGv hex_vstore_pending[VSTORES_MAX];
+
+bool is_gather_store_insn(Insn *insn, Packet *pkt);
+void process_store(DisasContext *ctx, Packet *pkt, int slot_num);
+#endif
diff --git a/target/hppa/Kconfig b/target/hppa/Kconfig
new file mode 100644
index 000000000..395a35d79
--- /dev/null
+++ b/target/hppa/Kconfig
@@ -0,0 +1,2 @@
+config HPPA
+ bool
diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h
new file mode 100644
index 000000000..a97d1428d
--- /dev/null
+++ b/target/hppa/cpu-param.h
@@ -0,0 +1,34 @@
+/*
+ * PA-RISC cpu parameters for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef HPPA_CPU_PARAM_H
+#define HPPA_CPU_PARAM_H 1
+
+#ifdef TARGET_HPPA64
+# define TARGET_LONG_BITS 64
+# define TARGET_REGISTER_BITS 64
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+# define TARGET_PHYS_ADDR_SPACE_BITS 64
+#elif defined(CONFIG_USER_ONLY)
+# define TARGET_LONG_BITS 32
+# define TARGET_REGISTER_BITS 32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+# define TARGET_PHYS_ADDR_SPACE_BITS 32
+#else
+/*
+ * In order to form the GVA from space:offset,
+ * we need a 64-bit virtual address space.
+ */
+# define TARGET_LONG_BITS 64
+# define TARGET_REGISTER_BITS 32
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+# define TARGET_PHYS_ADDR_SPACE_BITS 32
+#endif
+#define TARGET_PAGE_BITS 12
+#define NB_MMU_MODES 5
+
+#endif
diff --git a/target/hppa/cpu-qom.h b/target/hppa/cpu-qom.h
new file mode 100644
index 000000000..d424f8837
--- /dev/null
+++ b/target/hppa/cpu-qom.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU HPPA CPU
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_HPPA_CPU_QOM_H
+#define QEMU_HPPA_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_HPPA_CPU "hppa-cpu"
+
+OBJECT_DECLARE_TYPE(HPPACPU, HPPACPUClass,
+ HPPA_CPU)
+
+/**
+ * HPPACPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An HPPA CPU model.
+ */
+struct HPPACPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#endif
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
new file mode 100644
index 000000000..23eb25422
--- /dev/null
+++ b/target/hppa/cpu.c
@@ -0,0 +1,196 @@
+/*
+ * QEMU HPPA CPU
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "qemu/module.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+
+
+static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+ cpu->env.iaoq_f = value;
+ cpu->env.iaoq_b = value + 4;
+}
+
+static void hppa_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+#ifdef CONFIG_USER_ONLY
+ cpu->env.iaoq_f = tb->pc;
+ cpu->env.iaoq_b = tb->cs_base;
+#else
+ /* Recover the IAOQ values from the GVA + PRIV. */
+ uint32_t priv = (tb->flags >> TB_FLAG_PRIV_SHIFT) & 3;
+ target_ulong cs_base = tb->cs_base;
+ target_ulong iasq_f = cs_base & ~0xffffffffull;
+ int32_t diff = cs_base;
+
+ cpu->env.iasq_f = iasq_f;
+ cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
+ if (diff) {
+ cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
+ }
+#endif
+
+ cpu->env.psw_n = (tb->flags & PSW_N) != 0;
+}
+
+static bool hppa_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
+{
+ info->mach = bfd_mach_hppa20;
+ info->print_insn = print_insn_hppa;
+}
+
+#ifndef CONFIG_USER_ONLY
+static void QEMU_NORETURN
+hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+
+ cs->exception_index = EXCP_UNALIGN;
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif /* CONFIG_USER_ONLY */
+
+static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ HPPACPUClass *acc = HPPA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ acc->parent_realize(dev, errp);
+
+#ifndef CONFIG_USER_ONLY
+ {
+ HPPACPU *cpu = HPPA_CPU(cs);
+ cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ hppa_cpu_alarm_timer, cpu);
+ }
+#endif
+}
+
+static void hppa_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ HPPACPU *cpu = HPPA_CPU(obj);
+ CPUHPPAState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+ cs->exception_index = -1;
+ cpu_hppa_loaded_fr0(env);
+ cpu_hppa_put_psw(env, PSW_W);
+}
+
+static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
+{
+ return object_class_by_name(TYPE_HPPA_CPU);
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps hppa_sysemu_ops = {
+ .get_phys_page_debug = hppa_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps hppa_tcg_ops = {
+ .initialize = hppa_translate_init,
+ .synchronize_from_tb = hppa_cpu_synchronize_from_tb,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = hppa_cpu_tlb_fill,
+ .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
+ .do_interrupt = hppa_cpu_do_interrupt,
+ .do_unaligned_access = hppa_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void hppa_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ HPPACPUClass *acc = HPPA_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, hppa_cpu_realizefn,
+ &acc->parent_realize);
+
+ cc->class_by_name = hppa_cpu_class_by_name;
+ cc->has_work = hppa_cpu_has_work;
+ cc->dump_state = hppa_cpu_dump_state;
+ cc->set_pc = hppa_cpu_set_pc;
+ cc->gdb_read_register = hppa_cpu_gdb_read_register;
+ cc->gdb_write_register = hppa_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ dc->vmsd = &vmstate_hppa_cpu;
+ cc->sysemu_ops = &hppa_sysemu_ops;
+#endif
+ cc->disas_set_info = hppa_cpu_disas_set_info;
+ cc->gdb_num_core_regs = 128;
+ cc->tcg_ops = &hppa_tcg_ops;
+}
+
+static const TypeInfo hppa_cpu_type_info = {
+ .name = TYPE_HPPA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(HPPACPU),
+ .instance_init = hppa_cpu_initfn,
+ .abstract = false,
+ .class_size = sizeof(HPPACPUClass),
+ .class_init = hppa_cpu_class_init,
+};
+
+static void hppa_cpu_register_types(void)
+{
+ type_register_static(&hppa_cpu_type_info);
+}
+
+type_init(hppa_cpu_register_types)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
new file mode 100644
index 000000000..294fd7297
--- /dev/null
+++ b/target/hppa/cpu.h
@@ -0,0 +1,341 @@
+/*
+ * PA-RISC emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HPPA_CPU_H
+#define HPPA_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+/* PA-RISC 1.x processors have a strong memory model. */
+/* ??? While we do not yet implement PA-RISC 2.0, those processors have
+ a weak memory model, but with TLB bits that force ordering on a per-page
+ basis. It's probably easier to fall back to a strong memory model. */
+#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
+
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 3
+#define MMU_PHYS_IDX 4
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+/* Hardware exceptions, interupts, faults, and traps. */
+#define EXCP_HPMC 1 /* high priority machine check */
+#define EXCP_POWER_FAIL 2
+#define EXCP_RC 3 /* recovery counter */
+#define EXCP_EXT_INTERRUPT 4 /* external interrupt */
+#define EXCP_LPMC 5 /* low priority machine check */
+#define EXCP_ITLB_MISS 6 /* itlb miss / instruction page fault */
+#define EXCP_IMP 7 /* instruction memory protection trap */
+#define EXCP_ILL 8 /* illegal instruction trap */
+#define EXCP_BREAK 9 /* break instruction */
+#define EXCP_PRIV_OPR 10 /* privileged operation trap */
+#define EXCP_PRIV_REG 11 /* privileged register trap */
+#define EXCP_OVERFLOW 12 /* signed overflow trap */
+#define EXCP_COND 13 /* trap-on-condition */
+#define EXCP_ASSIST 14 /* assist exception trap */
+#define EXCP_DTLB_MISS 15 /* dtlb miss / data page fault */
+#define EXCP_NA_ITLB_MISS 16 /* non-access itlb miss */
+#define EXCP_NA_DTLB_MISS 17 /* non-access dtlb miss */
+#define EXCP_DMP 18 /* data memory protection trap */
+#define EXCP_DMB 19 /* data memory break trap */
+#define EXCP_TLB_DIRTY 20 /* tlb dirty bit trap */
+#define EXCP_PAGE_REF 21 /* page reference trap */
+#define EXCP_ASSIST_EMU 22 /* assist emulation trap */
+#define EXCP_HPT 23 /* high-privilege transfer trap */
+#define EXCP_LPT 24 /* low-privilege transfer trap */
+#define EXCP_TB 25 /* taken branch trap */
+#define EXCP_DMAR 26 /* data memory access rights trap */
+#define EXCP_DMPI 27 /* data memory protection id trap */
+#define EXCP_UNALIGN 28 /* unaligned data reference trap */
+#define EXCP_PER_INTERRUPT 29 /* performance monitor interrupt */
+
+/* Exceptions for linux-user emulation. */
+#define EXCP_SYSCALL 30
+#define EXCP_SYSCALL_LWS 31
+
+/* Taken from Linux kernel: arch/parisc/include/asm/psw.h */
+#define PSW_I 0x00000001
+#define PSW_D 0x00000002
+#define PSW_P 0x00000004
+#define PSW_Q 0x00000008
+#define PSW_R 0x00000010
+#define PSW_F 0x00000020
+#define PSW_G 0x00000040 /* PA1.x only */
+#define PSW_O 0x00000080 /* PA2.0 only */
+#define PSW_CB 0x0000ff00
+#define PSW_M 0x00010000
+#define PSW_V 0x00020000
+#define PSW_C 0x00040000
+#define PSW_B 0x00080000
+#define PSW_X 0x00100000
+#define PSW_N 0x00200000
+#define PSW_L 0x00400000
+#define PSW_H 0x00800000
+#define PSW_T 0x01000000
+#define PSW_S 0x02000000
+#define PSW_E 0x04000000
+#ifdef TARGET_HPPA64
+#define PSW_W 0x08000000 /* PA2.0 only */
+#else
+#define PSW_W 0
+#endif
+#define PSW_Z 0x40000000 /* PA1.x only */
+#define PSW_Y 0x80000000 /* PA1.x only */
+
+#define PSW_SM (PSW_W | PSW_E | PSW_O | PSW_G | PSW_F \
+ | PSW_R | PSW_Q | PSW_P | PSW_D | PSW_I)
+
+/* ssm/rsm instructions number PSW_W and PSW_E differently */
+#define PSW_SM_I PSW_I /* Enable External Interrupts */
+#define PSW_SM_D PSW_D
+#define PSW_SM_P PSW_P
+#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
+#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
+#ifdef TARGET_HPPA64
+#define PSW_SM_E 0x100
+#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
+#else
+#define PSW_SM_E 0
+#define PSW_SM_W 0
+#endif
+
+#define CR_RC 0
+#define CR_PID1 8
+#define CR_PID2 9
+#define CR_PID3 12
+#define CR_PID4 13
+#define CR_SCRCCR 10
+#define CR_SAR 11
+#define CR_IVA 14
+#define CR_EIEM 15
+#define CR_IT 16
+#define CR_IIASQ 17
+#define CR_IIAOQ 18
+#define CR_IIR 19
+#define CR_ISR 20
+#define CR_IOR 21
+#define CR_IPSW 22
+#define CR_EIRR 23
+
+typedef struct CPUHPPAState CPUHPPAState;
+
+#if TARGET_REGISTER_BITS == 32
+typedef uint32_t target_ureg;
+typedef int32_t target_sreg;
+#define TREG_FMT_lx "%08"PRIx32
+#define TREG_FMT_ld "%"PRId32
+#else
+typedef uint64_t target_ureg;
+typedef int64_t target_sreg;
+#define TREG_FMT_lx "%016"PRIx64
+#define TREG_FMT_ld "%"PRId64
+#endif
+
+typedef struct {
+ uint64_t va_b;
+ uint64_t va_e;
+ target_ureg pa;
+ unsigned u : 1;
+ unsigned t : 1;
+ unsigned d : 1;
+ unsigned b : 1;
+ unsigned page_size : 4;
+ unsigned ar_type : 3;
+ unsigned ar_pl1 : 2;
+ unsigned ar_pl2 : 2;
+ unsigned entry_valid : 1;
+ unsigned access_id : 16;
+} hppa_tlb_entry;
+
+struct CPUHPPAState {
+ target_ureg gr[32];
+ uint64_t fr[32];
+ uint64_t sr[8]; /* stored shifted into place for gva */
+
+ target_ureg psw; /* All psw bits except the following: */
+ target_ureg psw_n; /* boolean */
+ target_sreg psw_v; /* in most significant bit */
+
+ /* Splitting the carry-borrow field into the MSB and "the rest", allows
+ * for "the rest" to be deleted when it is unused, but the MSB is in use.
+ * In addition, it's easier to compute carry-in for bit B+1 than it is to
+ * compute carry-out for bit B (3 vs 4 insns for addition, assuming the
+ * host has the appropriate add-with-carry insn to compute the msb).
+ * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
+ */
+ target_ureg psw_cb; /* in least significant bit of next nibble */
+ target_ureg psw_cb_msb; /* boolean */
+
+ target_ureg iaoq_f; /* front */
+ target_ureg iaoq_b; /* back, aka next instruction */
+ uint64_t iasq_f;
+ uint64_t iasq_b;
+
+ uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
+ float_status fp_status;
+
+ target_ureg cr[32]; /* control registers */
+ target_ureg cr_back[2]; /* back of cr17/cr18 */
+ target_ureg shadow[7]; /* shadow registers */
+
+ /* ??? The number of entries isn't specified by the architecture. */
+#define HPPA_TLB_ENTRIES 256
+#define HPPA_BTLB_ENTRIES 0
+
+ /* ??? Implement a unified itlb/dtlb for the moment. */
+ /* ??? We should use a more intelligent data structure. */
+ hppa_tlb_entry tlb[HPPA_TLB_ENTRIES];
+ uint32_t tlb_last;
+};
+
+/**
+ * HPPACPU:
+ * @env: #CPUHPPAState
+ *
+ * An HPPA CPU.
+ */
+struct HPPACPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUHPPAState env;
+ QEMUTimer *alarm_timer;
+};
+
+
+typedef CPUHPPAState CPUArchState;
+typedef HPPACPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ if (env->psw & (ifetch ? PSW_C : PSW_D)) {
+ return env->iaoq_f & 3;
+ }
+ return MMU_PHYS_IDX; /* mmu disabled */
+#endif
+}
+
+void hppa_translate_init(void);
+
+#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
+
+static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc,
+ target_ureg off)
+{
+#ifdef CONFIG_USER_ONLY
+ return off;
+#else
+ off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull);
+ return spc | off;
+#endif
+}
+
+static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
+ target_ureg off)
+{
+ return hppa_form_gva_psw(env->psw, spc, off);
+}
+
+/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
+ * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
+ * same value.
+ */
+#define TB_FLAG_SR_SAME PSW_I
+#define TB_FLAG_PRIV_SHIFT 8
+
+static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
+ target_ulong *cs_base,
+ uint32_t *pflags)
+{
+ uint32_t flags = env->psw_n * PSW_N;
+
+ /* TB lookup assumes that PC contains the complete virtual address.
+ If we leave space+offset separate, we'll get ITLB misses to an
+ incomplete virtual address. This also means that we must separate
+ out current cpu priviledge from the low bits of IAOQ_F. */
+#ifdef CONFIG_USER_ONLY
+ *pc = env->iaoq_f & -4;
+ *cs_base = env->iaoq_b & -4;
+#else
+ /* ??? E, T, H, L, B, P bits need to be here, when implemented. */
+ flags |= env->psw & (PSW_W | PSW_C | PSW_D);
+ flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
+
+ *pc = (env->psw & PSW_C
+ ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4)
+ : env->iaoq_f & -4);
+ *cs_base = env->iasq_f;
+
+ /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
+ low 32-bits of CS_BASE. This will succeed for all direct branches,
+ which is the primary case we care about -- using goto_tb within a page.
+ Failure is indicated by a zero difference. */
+ if (env->iasq_f == env->iasq_b) {
+ target_sreg diff = env->iaoq_b - env->iaoq_f;
+ if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) {
+ *cs_base |= (uint32_t)diff;
+ }
+ }
+ if ((env->sr[4] == env->sr[5])
+ & (env->sr[4] == env->sr[6])
+ & (env->sr[4] == env->sr[7])) {
+ flags |= TB_FLAG_SR_SAME;
+ }
+#endif
+
+ *pflags = flags;
+}
+
+target_ureg cpu_hppa_get_psw(CPUHPPAState *env);
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg);
+void cpu_hppa_loaded_fr0(CPUHPPAState *env);
+
+#ifdef CONFIG_USER_ONLY
+static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { }
+#else
+void cpu_hppa_change_prot_id(CPUHPPAState *env);
+#endif
+
+hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
+int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
+#ifndef CONFIG_USER_ONLY
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void hppa_cpu_do_interrupt(CPUState *cpu);
+bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
+int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
+ int type, hwaddr *pphys, int *pprot);
+extern const MemoryRegionOps hppa_io_eir_ops;
+extern const VMStateDescription vmstate_hppa_cpu;
+void hppa_cpu_alarm_timer(void *);
+int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
+#endif
+void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
+
+#endif /* HPPA_CPU_H */
diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c
new file mode 100644
index 000000000..729c37b2c
--- /dev/null
+++ b/target/hppa/gdbstub.c
@@ -0,0 +1,282 @@
+/*
+ * HPPA gdb server stub
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int hppa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ target_ureg val;
+
+ switch (n) {
+ case 0:
+ val = cpu_hppa_get_psw(env);
+ break;
+ case 1 ... 31:
+ val = env->gr[n];
+ break;
+ case 32:
+ val = env->cr[CR_SAR];
+ break;
+ case 33:
+ val = env->iaoq_f;
+ break;
+ case 34:
+ val = env->iasq_f >> 32;
+ break;
+ case 35:
+ val = env->iaoq_b;
+ break;
+ case 36:
+ val = env->iasq_b >> 32;
+ break;
+ case 37:
+ val = env->cr[CR_EIEM];
+ break;
+ case 38:
+ val = env->cr[CR_IIR];
+ break;
+ case 39:
+ val = env->cr[CR_ISR];
+ break;
+ case 40:
+ val = env->cr[CR_IOR];
+ break;
+ case 41:
+ val = env->cr[CR_IPSW];
+ break;
+ case 43:
+ val = env->sr[4] >> 32;
+ break;
+ case 44:
+ val = env->sr[0] >> 32;
+ break;
+ case 45:
+ val = env->sr[1] >> 32;
+ break;
+ case 46:
+ val = env->sr[2] >> 32;
+ break;
+ case 47:
+ val = env->sr[3] >> 32;
+ break;
+ case 48:
+ val = env->sr[5] >> 32;
+ break;
+ case 49:
+ val = env->sr[6] >> 32;
+ break;
+ case 50:
+ val = env->sr[7] >> 32;
+ break;
+ case 51:
+ val = env->cr[CR_RC];
+ break;
+ case 52:
+ val = env->cr[CR_PID1];
+ break;
+ case 53:
+ val = env->cr[CR_PID2];
+ break;
+ case 54:
+ val = env->cr[CR_SCRCCR];
+ break;
+ case 55:
+ val = env->cr[CR_PID3];
+ break;
+ case 56:
+ val = env->cr[CR_PID4];
+ break;
+ case 57:
+ val = env->cr[24];
+ break;
+ case 58:
+ val = env->cr[25];
+ break;
+ case 59:
+ val = env->cr[26];
+ break;
+ case 60:
+ val = env->cr[27];
+ break;
+ case 61:
+ val = env->cr[28];
+ break;
+ case 62:
+ val = env->cr[29];
+ break;
+ case 63:
+ val = env->cr[30];
+ break;
+ case 64 ... 127:
+ val = extract64(env->fr[(n - 64) / 2], (n & 1 ? 0 : 32), 32);
+ break;
+ default:
+ if (n < 128) {
+ val = 0;
+ } else {
+ return 0;
+ }
+ break;
+ }
+
+ if (TARGET_REGISTER_BITS == 64) {
+ return gdb_get_reg64(mem_buf, val);
+ } else {
+ return gdb_get_reg32(mem_buf, val);
+ }
+}
+
+int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ target_ureg val;
+
+ if (TARGET_REGISTER_BITS == 64) {
+ val = ldq_p(mem_buf);
+ } else {
+ val = ldl_p(mem_buf);
+ }
+
+ switch (n) {
+ case 0:
+ cpu_hppa_put_psw(env, val);
+ break;
+ case 1 ... 31:
+ env->gr[n] = val;
+ break;
+ case 32:
+ env->cr[CR_SAR] = val;
+ break;
+ case 33:
+ env->iaoq_f = val;
+ break;
+ case 34:
+ env->iasq_f = (uint64_t)val << 32;
+ break;
+ case 35:
+ env->iaoq_b = val;
+ break;
+ case 36:
+ env->iasq_b = (uint64_t)val << 32;
+ break;
+ case 37:
+ env->cr[CR_EIEM] = val;
+ break;
+ case 38:
+ env->cr[CR_IIR] = val;
+ break;
+ case 39:
+ env->cr[CR_ISR] = val;
+ break;
+ case 40:
+ env->cr[CR_IOR] = val;
+ break;
+ case 41:
+ env->cr[CR_IPSW] = val;
+ break;
+ case 43:
+ env->sr[4] = (uint64_t)val << 32;
+ break;
+ case 44:
+ env->sr[0] = (uint64_t)val << 32;
+ break;
+ case 45:
+ env->sr[1] = (uint64_t)val << 32;
+ break;
+ case 46:
+ env->sr[2] = (uint64_t)val << 32;
+ break;
+ case 47:
+ env->sr[3] = (uint64_t)val << 32;
+ break;
+ case 48:
+ env->sr[5] = (uint64_t)val << 32;
+ break;
+ case 49:
+ env->sr[6] = (uint64_t)val << 32;
+ break;
+ case 50:
+ env->sr[7] = (uint64_t)val << 32;
+ break;
+ case 51:
+ env->cr[CR_RC] = val;
+ break;
+ case 52:
+ env->cr[CR_PID1] = val;
+ cpu_hppa_change_prot_id(env);
+ break;
+ case 53:
+ env->cr[CR_PID2] = val;
+ cpu_hppa_change_prot_id(env);
+ break;
+ case 54:
+ env->cr[CR_SCRCCR] = val;
+ break;
+ case 55:
+ env->cr[CR_PID3] = val;
+ cpu_hppa_change_prot_id(env);
+ break;
+ case 56:
+ env->cr[CR_PID4] = val;
+ cpu_hppa_change_prot_id(env);
+ break;
+ case 57:
+ env->cr[24] = val;
+ break;
+ case 58:
+ env->cr[25] = val;
+ break;
+ case 59:
+ env->cr[26] = val;
+ break;
+ case 60:
+ env->cr[27] = val;
+ break;
+ case 61:
+ env->cr[28] = val;
+ break;
+ case 62:
+ env->cr[29] = val;
+ break;
+ case 63:
+ env->cr[30] = val;
+ break;
+ case 64:
+ env->fr[0] = deposit64(env->fr[0], 32, 32, val);
+ cpu_hppa_loaded_fr0(env);
+ break;
+ case 65 ... 127:
+ {
+ uint64_t *fr = &env->fr[(n - 64) / 2];
+ *fr = deposit64(*fr, (n & 1 ? 0 : 32), 32, val);
+ }
+ break;
+ default:
+ if (n >= 128) {
+ return 0;
+ }
+ break;
+ }
+ return sizeof(target_ureg);
+}
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
new file mode 100644
index 000000000..1ccff5765
--- /dev/null
+++ b/target/hppa/helper.c
@@ -0,0 +1,129 @@
+/*
+ * HPPA emulation cpu helpers for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "fpu/softfloat.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/qemu-print.h"
+
+target_ureg cpu_hppa_get_psw(CPUHPPAState *env)
+{
+ target_ureg psw;
+
+ /* Fold carry bits down to 8 consecutive bits. */
+ /* ??? Needs tweaking for hppa64. */
+ /* .......b...c...d...e...f...g...h */
+ psw = (env->psw_cb >> 4) & 0x01111111;
+ /* .......b..bc..cd..de..ef..fg..gh */
+ psw |= psw >> 3;
+ /* .............bcd............efgh */
+ psw |= (psw >> 6) & 0x000f000f;
+ /* .........................bcdefgh */
+ psw |= (psw >> 12) & 0xf;
+ psw |= env->psw_cb_msb << 7;
+ psw = (psw & 0xff) << 8;
+
+ psw |= env->psw_n * PSW_N;
+ psw |= (env->psw_v < 0) * PSW_V;
+ psw |= env->psw;
+
+ return psw;
+}
+
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
+{
+ target_ureg old_psw = env->psw;
+ target_ureg cb = 0;
+
+ env->psw = psw & ~(PSW_N | PSW_V | PSW_CB);
+ env->psw_n = (psw / PSW_N) & 1;
+ env->psw_v = -((psw / PSW_V) & 1);
+ env->psw_cb_msb = (psw >> 15) & 1;
+
+ cb |= ((psw >> 14) & 1) << 28;
+ cb |= ((psw >> 13) & 1) << 24;
+ cb |= ((psw >> 12) & 1) << 20;
+ cb |= ((psw >> 11) & 1) << 16;
+ cb |= ((psw >> 10) & 1) << 12;
+ cb |= ((psw >> 9) & 1) << 8;
+ cb |= ((psw >> 8) & 1) << 4;
+ env->psw_cb = cb;
+
+ /* If PSW_P changes, it affects how we translate addresses. */
+ if ((psw ^ old_psw) & PSW_P) {
+#ifndef CONFIG_USER_ONLY
+ tlb_flush_by_mmuidx(env_cpu(env), 0xf);
+#endif
+ }
+}
+
+void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ target_ureg psw = cpu_hppa_get_psw(env);
+ target_ureg psw_cb;
+ char psw_c[20];
+ int i;
+
+ qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx "\n",
+ hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f),
+ hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b));
+
+ psw_c[0] = (psw & PSW_W ? 'W' : '-');
+ psw_c[1] = (psw & PSW_E ? 'E' : '-');
+ psw_c[2] = (psw & PSW_S ? 'S' : '-');
+ psw_c[3] = (psw & PSW_T ? 'T' : '-');
+ psw_c[4] = (psw & PSW_H ? 'H' : '-');
+ psw_c[5] = (psw & PSW_L ? 'L' : '-');
+ psw_c[6] = (psw & PSW_N ? 'N' : '-');
+ psw_c[7] = (psw & PSW_X ? 'X' : '-');
+ psw_c[8] = (psw & PSW_B ? 'B' : '-');
+ psw_c[9] = (psw & PSW_C ? 'C' : '-');
+ psw_c[10] = (psw & PSW_V ? 'V' : '-');
+ psw_c[11] = (psw & PSW_M ? 'M' : '-');
+ psw_c[12] = (psw & PSW_F ? 'F' : '-');
+ psw_c[13] = (psw & PSW_R ? 'R' : '-');
+ psw_c[14] = (psw & PSW_Q ? 'Q' : '-');
+ psw_c[15] = (psw & PSW_P ? 'P' : '-');
+ psw_c[16] = (psw & PSW_D ? 'D' : '-');
+ psw_c[17] = (psw & PSW_I ? 'I' : '-');
+ psw_c[18] = '\0';
+ psw_cb = ((env->psw_cb >> 4) & 0x01111111) | (env->psw_cb_msb << 28);
+
+ qemu_fprintf(f, "PSW " TREG_FMT_lx " CB " TREG_FMT_lx " %s\n",
+ psw, psw_cb, psw_c);
+
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, "GR%02d " TREG_FMT_lx "%c", i, env->gr[i],
+ (i & 3) == 3 ? '\n' : ' ');
+ }
+#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 8; i++) {
+ qemu_fprintf(f, "SR%02d %08x%c", i, (uint32_t)(env->sr[i] >> 32),
+ (i & 3) == 3 ? '\n' : ' ');
+ }
+#endif
+ qemu_fprintf(f, "\n");
+
+ /* ??? FR */
+}
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
new file mode 100644
index 000000000..0a629ffa7
--- /dev/null
+++ b/target/hppa/helper.h
@@ -0,0 +1,95 @@
+#if TARGET_REGISTER_BITS == 64
+# define dh_alias_tr i64
+#else
+# define dh_alias_tr i32
+#endif
+#define dh_ctype_tr target_ureg
+
+DEF_HELPER_2(excp, noreturn, env, int)
+DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tr)
+DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tr)
+
+DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
+
+DEF_HELPER_FLAGS_1(ldc_check, TCG_CALL_NO_RWG, void, tl)
+
+DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tr, env, tl, i32, i32)
+
+DEF_HELPER_FLAGS_1(loaded_fr0, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_2(frnd_s, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmpy_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+
+DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_2(frnd_d, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fmpy_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_s_d, TCG_CALL_NO_RWG, f64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_s, TCG_CALL_NO_RWG, f32, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_w_s, TCG_CALL_NO_RWG, f32, env, s32)
+DEF_HELPER_FLAGS_2(fcnv_dw_s, TCG_CALL_NO_RWG, f32, env, s64)
+DEF_HELPER_FLAGS_2(fcnv_w_d, TCG_CALL_NO_RWG, f64, env, s32)
+DEF_HELPER_FLAGS_2(fcnv_dw_d, TCG_CALL_NO_RWG, f64, env, s64)
+
+DEF_HELPER_FLAGS_2(fcnv_s_w, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_w, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_s_dw, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_dw, TCG_CALL_NO_RWG, s64, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_t_s_w, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_w, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_t_s_dw, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_dw, TCG_CALL_NO_RWG, s64, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_uw_s, TCG_CALL_NO_RWG, f32, env, i32)
+DEF_HELPER_FLAGS_2(fcnv_udw_s, TCG_CALL_NO_RWG, f32, env, i64)
+DEF_HELPER_FLAGS_2(fcnv_uw_d, TCG_CALL_NO_RWG, f64, env, i32)
+DEF_HELPER_FLAGS_2(fcnv_udw_d, TCG_CALL_NO_RWG, f64, env, i64)
+
+DEF_HELPER_FLAGS_2(fcnv_s_uw, TCG_CALL_NO_RWG, i32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_uw, TCG_CALL_NO_RWG, i32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_s_udw, TCG_CALL_NO_RWG, i64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_udw, TCG_CALL_NO_RWG, i64, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_t_s_uw, TCG_CALL_NO_RWG, i32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_uw, TCG_CALL_NO_RWG, i32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_t_s_udw, TCG_CALL_NO_RWG, i64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_udw, TCG_CALL_NO_RWG, i64, env, f64)
+
+DEF_HELPER_FLAGS_5(fcmp_s, TCG_CALL_NO_RWG, void, env, f32, f32, i32, i32)
+DEF_HELPER_FLAGS_5(fcmp_d, TCG_CALL_NO_RWG, void, env, f64, f64, i32, i32)
+
+DEF_HELPER_FLAGS_4(fmpyfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(fmpynfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(fmpyfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmpynfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+
+DEF_HELPER_FLAGS_0(read_interval_timer, TCG_CALL_NO_RWG, tr)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(halt, noreturn, env)
+DEF_HELPER_1(reset, noreturn, env)
+DEF_HELPER_1(rfi, void, env)
+DEF_HELPER_1(rfi_r, void, env)
+DEF_HELPER_FLAGS_2(write_interval_timer, TCG_CALL_NO_RWG, void, env, tr)
+DEF_HELPER_FLAGS_2(write_eirr, TCG_CALL_NO_RWG, void, env, tr)
+DEF_HELPER_FLAGS_2(write_eiem, TCG_CALL_NO_RWG, void, env, tr)
+DEF_HELPER_FLAGS_2(swap_system_mask, TCG_CALL_NO_RWG, tr, env, tr)
+DEF_HELPER_FLAGS_3(itlba, TCG_CALL_NO_RWG, void, env, tl, tr)
+DEF_HELPER_FLAGS_3(itlbp, TCG_CALL_NO_RWG, void, env, tl, tr)
+DEF_HELPER_FLAGS_2(ptlb, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(ptlbe, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(lpa, TCG_CALL_NO_WG, tr, env, tl)
+DEF_HELPER_FLAGS_1(change_prot_id, TCG_CALL_NO_RWG, void, env)
+#endif
diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode
new file mode 100644
index 000000000..d4eefc0d4
--- /dev/null
+++ b/target/hppa/insns.decode
@@ -0,0 +1,533 @@
+#
+# HPPA instruction decode definitions.
+#
+# Copyright (c) 2018 Richard Henderson <rth@twiddle.net>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+####
+# Field definitions
+####
+
+%assemble_sr3 13:1 14:2
+%assemble_sr3x 13:1 14:2 !function=expand_sr3x
+
+%assemble_11a 0:s1 4:10 !function=expand_shl3
+%assemble_12 0:s1 2:1 3:10 !function=expand_shl2
+%assemble_12a 0:s1 3:11 !function=expand_shl2
+%assemble_17 0:s1 16:5 2:1 3:10 !function=expand_shl2
+%assemble_22 0:s1 16:10 2:1 3:10 !function=expand_shl2
+
+%assemble_21 0:s1 1:11 14:2 16:5 12:2 !function=expand_shl11
+
+%lowsign_11 0:s1 1:10
+%lowsign_14 0:s1 1:13
+
+%sm_imm 16:10 !function=expand_sm_imm
+
+%rm64 1:1 16:5
+%rt64 6:1 0:5
+%ra64 7:1 21:5
+%rb64 12:1 16:5
+%rc64 8:1 13:3 9:2
+%rc32 13:3 9:2
+
+%im5_0 0:s1 1:4
+%im5_16 16:s1 17:4
+%ma_to_m 5:1 13:1 !function=ma_to_m
+%ma2_to_m 2:2 !function=ma_to_m
+%pos_to_m 0:1 !function=pos_to_m
+%neg_to_m 0:1 !function=neg_to_m
+%a_to_m 2:1 !function=neg_to_m
+
+####
+# Argument set definitions
+####
+
+# All insns that need to form a virtual address should use this set.
+&ldst t b x disp sp m scale size
+
+&rr_cf t r cf
+&rrr_cf t r1 r2 cf
+&rrr_cf_sh t r1 r2 cf sh
+&rri_cf t r i cf
+
+&rrb_c_f disp n c f r1 r2
+&rib_c_f disp n c f r i
+
+####
+# Format definitions
+####
+
+@rr_cf ...... r:5 ..... cf:4 ....... t:5 &rr_cf
+@rrr_cf ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf
+@rrr_cf_sh ...... r2:5 r1:5 cf:4 .... sh:2 . t:5 &rrr_cf_sh
+@rrr_cf_sh0 ...... r2:5 r1:5 cf:4 ....... t:5 &rrr_cf_sh sh=0
+@rri_cf ...... r:5 t:5 cf:4 . ........... &rri_cf i=%lowsign_11
+
+@rrb_cf ...... r2:5 r1:5 c:3 ........... n:1 . \
+ &rrb_c_f disp=%assemble_12
+@rib_cf ...... r:5 ..... c:3 ........... n:1 . \
+ &rib_c_f disp=%assemble_12 i=%im5_16
+
+####
+# System
+####
+
+break 000000 ----- ----- --- 00000000 -----
+
+mtsp 000000 ----- r:5 ... 11000001 00000 sp=%assemble_sr3
+mtctl 000000 t:5 r:5 --- 11000010 00000
+mtsarcm 000000 01011 r:5 --- 11000110 00000
+mtsm 000000 00000 r:5 000 11000011 00000
+
+mfia 000000 ----- 00000 --- 10100101 t:5
+mfsp 000000 ----- 00000 ... 00100101 t:5 sp=%assemble_sr3
+mfctl 000000 r:5 00000- e:1 -01000101 t:5
+
+sync 000000 ----- ----- 000 00100000 00000 # sync, syncdma
+
+ldsid 000000 b:5 ----- sp:2 0 10000101 t:5
+
+rsm 000000 .......... 000 01110011 t:5 i=%sm_imm
+ssm 000000 .......... 000 01101011 t:5 i=%sm_imm
+
+rfi 000000 ----- ----- --- 01100000 00000
+rfi_r 000000 ----- ----- --- 01100101 00000
+
+# These are artificial instructions used by QEMU firmware.
+# They are allocated from the unassigned instruction space.
+halt 1111 1111 1111 1101 1110 1010 1101 0000
+reset 1111 1111 1111 1101 1110 1010 1101 0001
+
+####
+# Memory Management
+####
+
+@addrx ...... b:5 x:5 .. ........ m:1 ..... \
+ &ldst disp=0 scale=0 t=0 sp=0 size=0
+
+nop 000001 ----- ----- -- 11001010 0 ----- # fdc, disp
+nop_addrx 000001 ..... ..... -- 01001010 . ----- @addrx # fdc, index
+nop_addrx 000001 ..... ..... -- 01001011 . ----- @addrx # fdce
+nop_addrx 000001 ..... ..... --- 0001010 . ----- @addrx # fic 0x0a
+nop_addrx 000001 ..... ..... -- 01001111 . 00000 @addrx # fic 0x4f
+nop_addrx 000001 ..... ..... --- 0001011 . ----- @addrx # fice
+nop_addrx 000001 ..... ..... -- 01001110 . 00000 @addrx # pdc
+
+probe 000001 b:5 ri:5 sp:2 imm:1 100011 write:1 0 t:5
+
+ixtlbx 000001 b:5 r:5 sp:2 0100000 addr:1 0 00000 data=1
+ixtlbx 000001 b:5 r:5 ... 000000 addr:1 0 00000 \
+ sp=%assemble_sr3x data=0
+
+# pcxl and pcxl2 Fast TLB Insert instructions
+ixtlbxf 000001 00000 r:5 00 0 data:1 01000 addr:1 0 00000
+
+pxtlbx 000001 b:5 x:5 sp:2 0100100 local:1 m:1 ----- data=1
+pxtlbx 000001 b:5 x:5 ... 000100 local:1 m:1 ----- \
+ sp=%assemble_sr3x data=0
+
+lpa 000001 b:5 x:5 sp:2 01001101 m:1 t:5 \
+ &ldst disp=0 scale=0 size=0
+
+lci 000001 ----- ----- -- 01001100 0 t:5
+
+####
+# Arith/Log
+####
+
+andcm 000010 ..... ..... .... 000000 - ..... @rrr_cf
+and 000010 ..... ..... .... 001000 - ..... @rrr_cf
+or 000010 ..... ..... .... 001001 - ..... @rrr_cf
+xor 000010 ..... ..... .... 001010 0 ..... @rrr_cf
+uxor 000010 ..... ..... .... 001110 0 ..... @rrr_cf
+ds 000010 ..... ..... .... 010001 0 ..... @rrr_cf
+cmpclr 000010 ..... ..... .... 100010 0 ..... @rrr_cf
+uaddcm 000010 ..... ..... .... 100110 0 ..... @rrr_cf
+uaddcm_tc 000010 ..... ..... .... 100111 0 ..... @rrr_cf
+dcor 000010 ..... 00000 .... 101110 0 ..... @rr_cf
+dcor_i 000010 ..... 00000 .... 101111 0 ..... @rr_cf
+
+add 000010 ..... ..... .... 0110.. - ..... @rrr_cf_sh
+add_l 000010 ..... ..... .... 1010.. 0 ..... @rrr_cf_sh
+add_tsv 000010 ..... ..... .... 1110.. 0 ..... @rrr_cf_sh
+add_c 000010 ..... ..... .... 011100 0 ..... @rrr_cf_sh0
+add_c_tsv 000010 ..... ..... .... 111100 0 ..... @rrr_cf_sh0
+
+sub 000010 ..... ..... .... 010000 - ..... @rrr_cf
+sub_tsv 000010 ..... ..... .... 110000 0 ..... @rrr_cf
+sub_tc 000010 ..... ..... .... 010011 0 ..... @rrr_cf
+sub_tsv_tc 000010 ..... ..... .... 110011 0 ..... @rrr_cf
+sub_b 000010 ..... ..... .... 010100 0 ..... @rrr_cf
+sub_b_tsv 000010 ..... ..... .... 110100 0 ..... @rrr_cf
+
+ldil 001000 t:5 ..................... i=%assemble_21
+addil 001010 r:5 ..................... i=%assemble_21
+ldo 001101 b:5 t:5 -- .............. i=%lowsign_14
+
+addi 101101 ..... ..... .... 0 ........... @rri_cf
+addi_tsv 101101 ..... ..... .... 1 ........... @rri_cf
+addi_tc 101100 ..... ..... .... 0 ........... @rri_cf
+addi_tc_tsv 101100 ..... ..... .... 1 ........... @rri_cf
+
+subi 100101 ..... ..... .... 0 ........... @rri_cf
+subi_tsv 100101 ..... ..... .... 1 ........... @rri_cf
+
+cmpiclr 100100 ..... ..... .... 0 ........... @rri_cf
+
+####
+# Index Mem
+####
+
+@ldstx ...... b:5 x:5 sp:2 scale:1 ....... m:1 t:5 &ldst disp=0
+@ldim5 ...... b:5 ..... sp:2 ......... t:5 \
+ &ldst disp=%im5_16 x=0 scale=0 m=%ma_to_m
+@stim5 ...... b:5 t:5 sp:2 ......... ..... \
+ &ldst disp=%im5_0 x=0 scale=0 m=%ma_to_m
+
+ld 000011 ..... ..... .. . 1 -- 00 size:2 ...... @ldim5
+ld 000011 ..... ..... .. . 0 -- 00 size:2 ...... @ldstx
+st 000011 ..... ..... .. . 1 -- 10 size:2 ...... @stim5
+ldc 000011 ..... ..... .. . 1 -- 0111 ...... @ldim5 size=2
+ldc 000011 ..... ..... .. . 0 -- 0111 ...... @ldstx size=2
+lda 000011 ..... ..... .. . 1 -- 0110 ...... @ldim5 size=2
+lda 000011 ..... ..... .. . 0 -- 0110 ...... @ldstx size=2
+sta 000011 ..... ..... .. . 1 -- 1110 ...... @stim5 size=2
+stby 000011 b:5 r:5 sp:2 a:1 1 -- 1100 m:1 ..... disp=%im5_0
+
+@fldstwx ...... b:5 x:5 sp:2 scale:1 ....... m:1 ..... \
+ &ldst t=%rt64 disp=0 size=2
+@fldstwi ...... b:5 ..... sp:2 . ....... . ..... \
+ &ldst t=%rt64 disp=%im5_16 m=%ma_to_m x=0 scale=0 size=2
+
+fldw 001001 ..... ..... .. . 0 -- 000 . . ..... @fldstwx
+fldw 001001 ..... ..... .. . 1 -- 000 . . ..... @fldstwi
+fstw 001001 ..... ..... .. . 0 -- 100 . . ..... @fldstwx
+fstw 001001 ..... ..... .. . 1 -- 100 . . ..... @fldstwi
+
+@fldstdx ...... b:5 x:5 sp:2 scale:1 ....... m:1 t:5 \
+ &ldst disp=0 size=3
+@fldstdi ...... b:5 ..... sp:2 . ....... . t:5 \
+ &ldst disp=%im5_16 m=%ma_to_m x=0 scale=0 size=3
+
+fldd 001011 ..... ..... .. . 0 -- 000 0 . ..... @fldstdx
+fldd 001011 ..... ..... .. . 1 -- 000 0 . ..... @fldstdi
+fstd 001011 ..... ..... .. . 0 -- 100 0 . ..... @fldstdx
+fstd 001011 ..... ..... .. . 1 -- 100 0 . ..... @fldstdi
+
+####
+# Offset Mem
+####
+
+@ldstim14 ...... b:5 t:5 sp:2 .............. \
+ &ldst disp=%lowsign_14 x=0 scale=0 m=0
+@ldstim14m ...... b:5 t:5 sp:2 .............. \
+ &ldst disp=%lowsign_14 x=0 scale=0 m=%neg_to_m
+@ldstim12m ...... b:5 t:5 sp:2 .............. \
+ &ldst disp=%assemble_12a x=0 scale=0 m=%pos_to_m
+
+# LDB, LDH, LDW, LDWM
+ld 010000 ..... ..... .. .............. @ldstim14 size=0
+ld 010001 ..... ..... .. .............. @ldstim14 size=1
+ld 010010 ..... ..... .. .............. @ldstim14 size=2
+ld 010011 ..... ..... .. .............. @ldstim14m size=2
+ld 010111 ..... ..... .. ...........10. @ldstim12m size=2
+
+# STB, STH, STW, STWM
+st 011000 ..... ..... .. .............. @ldstim14 size=0
+st 011001 ..... ..... .. .............. @ldstim14 size=1
+st 011010 ..... ..... .. .............. @ldstim14 size=2
+st 011011 ..... ..... .. .............. @ldstim14m size=2
+st 011111 ..... ..... .. ...........10. @ldstim12m size=2
+
+fldw 010110 b:5 ..... sp:2 .............. \
+ &ldst disp=%assemble_12a t=%rm64 m=%a_to_m x=0 scale=0 size=2
+fldw 010111 b:5 ..... sp:2 ...........0.. \
+ &ldst disp=%assemble_12a t=%rm64 m=0 x=0 scale=0 size=2
+
+fstw 011110 b:5 ..... sp:2 .............. \
+ &ldst disp=%assemble_12a t=%rm64 m=%a_to_m x=0 scale=0 size=2
+fstw 011111 b:5 ..... sp:2 ...........0.. \
+ &ldst disp=%assemble_12a t=%rm64 m=0 x=0 scale=0 size=2
+
+fldd 010100 b:5 t:5 sp:2 .......... .. 1 . \
+ &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3
+
+fstd 011100 b:5 t:5 sp:2 .......... .. 1 . \
+ &ldst disp=%assemble_11a m=%ma2_to_m x=0 scale=0 size=3
+
+####
+# Floating-point Multiply Add
+####
+
+&mpyadd rm1 rm2 ta ra tm
+@mpyadd ...... rm1:5 rm2:5 ta:5 ra:5 . tm:5 &mpyadd
+
+fmpyadd_f 000110 ..... ..... ..... ..... 0 ..... @mpyadd
+fmpyadd_d 000110 ..... ..... ..... ..... 1 ..... @mpyadd
+fmpysub_f 100110 ..... ..... ..... ..... 0 ..... @mpyadd
+fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd
+
+####
+# Conditional Branches
+####
+
+bb_sar 110000 00000 r:5 c:1 10 ........... n:1 . disp=%assemble_12
+bb_imm 110001 p:5 r:5 c:1 10 ........... n:1 . disp=%assemble_12
+
+movb 110010 ..... ..... ... ........... . . @rrb_cf f=0
+movbi 110011 ..... ..... ... ........... . . @rib_cf f=0
+
+cmpb 100000 ..... ..... ... ........... . . @rrb_cf f=0
+cmpb 100010 ..... ..... ... ........... . . @rrb_cf f=1
+cmpbi 100001 ..... ..... ... ........... . . @rib_cf f=0
+cmpbi 100011 ..... ..... ... ........... . . @rib_cf f=1
+
+addb 101000 ..... ..... ... ........... . . @rrb_cf f=0
+addb 101010 ..... ..... ... ........... . . @rrb_cf f=1
+addbi 101001 ..... ..... ... ........... . . @rib_cf f=0
+addbi 101011 ..... ..... ... ........... . . @rib_cf f=1
+
+####
+# Shift, Extract, Deposit
+####
+
+shrpw_sar 110100 r2:5 r1:5 c:3 00 0 00000 t:5
+shrpw_imm 110100 r2:5 r1:5 c:3 01 0 cpos:5 t:5
+
+extrw_sar 110100 r:5 t:5 c:3 10 se:1 00000 clen:5
+extrw_imm 110100 r:5 t:5 c:3 11 se:1 pos:5 clen:5
+
+depw_sar 110101 t:5 r:5 c:3 00 nz:1 00000 clen:5
+depw_imm 110101 t:5 r:5 c:3 01 nz:1 cpos:5 clen:5
+depwi_sar 110101 t:5 ..... c:3 10 nz:1 00000 clen:5 i=%im5_16
+depwi_imm 110101 t:5 ..... c:3 11 nz:1 cpos:5 clen:5 i=%im5_16
+
+####
+# Branch External
+####
+
+&BE b l n disp sp
+@be ...... b:5 ..... ... ........... n:1 . \
+ &BE disp=%assemble_17 sp=%assemble_sr3
+
+be 111000 ..... ..... ... ........... . . @be l=0
+be 111001 ..... ..... ... ........... . . @be l=31
+
+####
+# Branch
+####
+
+&BL l n disp
+@bl ...... l:5 ..... ... ........... n:1 . &BL disp=%assemble_17
+
+# B,L and B,L,PUSH
+bl 111010 ..... ..... 000 ........... . . @bl
+bl 111010 ..... ..... 100 ........... . . @bl
+# B,L (long displacement)
+bl 111010 ..... ..... 101 ........... n:1 . &BL l=2 \
+ disp=%assemble_22
+b_gate 111010 ..... ..... 001 ........... . . @bl
+blr 111010 l:5 x:5 010 00000000000 n:1 0
+bv 111010 b:5 x:5 110 00000000000 n:1 0
+bve 111010 b:5 00000 110 10000000000 n:1 - l=0
+bve 111010 b:5 00000 111 10000000000 n:1 - l=2
+
+####
+# FP Fused Multiple-Add
+####
+
+fmpyfadd_f 101110 ..... ..... ... . 0 ... . . neg:1 ..... \
+ rm1=%ra64 rm2=%rb64 ra3=%rc64 t=%rt64
+fmpyfadd_d 101110 rm1:5 rm2:5 ... 0 1 ..0 0 0 neg:1 t:5 ra3=%rc32
+
+####
+# FP operations
+####
+
+&fclass01 r t
+&fclass2 r1 r2 c y
+&fclass3 r1 r2 t
+
+@f0c_0 ...... r:5 00000 ..... 00 000 0 t:5 &fclass01
+@f0c_1 ...... r:5 000.. ..... 01 000 0 t:5 &fclass01
+@f0c_2 ...... r1:5 r2:5 y:3 .. 10 000 . c:5 &fclass2
+@f0c_3 ...... r1:5 r2:5 ..... 11 000 0 t:5 &fclass3
+
+@f0e_f_0 ...... ..... 00000 ... 0 0 000 .. 0 ..... \
+ &fclass01 r=%ra64 t=%rt64
+@f0e_d_0 ...... r:5 00000 ... 0 1 000 00 0 t:5 &fclass01
+
+@f0e_ff_1 ...... ..... 000 ... 0000 010 .. 0 ..... \
+ &fclass01 r=%ra64 t=%rt64
+@f0e_fd_1 ...... ..... 000 ... 0100 010 .0 0 t:5 &fclass01 r=%ra64
+@f0e_df_1 ...... r:5 000 ... 0001 010 0. 0 ..... &fclass01 t=%rt64
+@f0e_dd_1 ...... r:5 000 ... 0101 010 00 0 t:5 &fclass01
+
+@f0e_f_2 ...... ..... ..... y:3 .0 100 .00 c:5 \
+ &fclass2 r1=%ra64 r2=%rb64
+@f0e_d_2 ...... r1:5 r2:5 y:3 01 100 000 c:5 &fclass2
+
+@f0e_f_3 ...... ..... ..... ... .0 110 ..0 ..... \
+ &fclass3 r1=%ra64 r2=%rb64 t=%rt64
+@f0e_d_3 ...... r1:5 r2:5 ... 01 110 000 t:5
+
+# Floating point class 0
+
+# FID. With r = t = 0, which via fcpy puts 0 into fr0.
+# This is machine/revision = 0, which is reserved for simulator.
+fcpy_f 001100 00000 00000 00000 000000 00000 \
+ &fclass01 r=0 t=0
+
+fcpy_f 001100 ..... ..... 010 00 ...... ..... @f0c_0
+fabs_f 001100 ..... ..... 011 00 ...... ..... @f0c_0
+fsqrt_f 001100 ..... ..... 100 00 ...... ..... @f0c_0
+frnd_f 001100 ..... ..... 101 00 ...... ..... @f0c_0
+fneg_f 001100 ..... ..... 110 00 ...... ..... @f0c_0
+fnegabs_f 001100 ..... ..... 111 00 ...... ..... @f0c_0
+
+fcpy_d 001100 ..... ..... 010 01 ...... ..... @f0c_0
+fabs_d 001100 ..... ..... 011 01 ...... ..... @f0c_0
+fsqrt_d 001100 ..... ..... 100 01 ...... ..... @f0c_0
+frnd_d 001100 ..... ..... 101 01 ...... ..... @f0c_0
+fneg_d 001100 ..... ..... 110 01 ...... ..... @f0c_0
+fnegabs_d 001100 ..... ..... 111 01 ...... ..... @f0c_0
+
+fcpy_f 001110 ..... ..... 010 ........ ..... @f0e_f_0
+fabs_f 001110 ..... ..... 011 ........ ..... @f0e_f_0
+fsqrt_f 001110 ..... ..... 100 ........ ..... @f0e_f_0
+frnd_f 001110 ..... ..... 101 ........ ..... @f0e_f_0
+fneg_f 001110 ..... ..... 110 ........ ..... @f0e_f_0
+fnegabs_f 001110 ..... ..... 111 ........ ..... @f0e_f_0
+
+fcpy_d 001110 ..... ..... 010 ........ ..... @f0e_d_0
+fabs_d 001110 ..... ..... 011 ........ ..... @f0e_d_0
+fsqrt_d 001110 ..... ..... 100 ........ ..... @f0e_d_0
+frnd_d 001110 ..... ..... 101 ........ ..... @f0e_d_0
+fneg_d 001110 ..... ..... 110 ........ ..... @f0e_d_0
+fnegabs_d 001110 ..... ..... 111 ........ ..... @f0e_d_0
+
+# Floating point class 1
+
+# float/float
+fcnv_d_f 001100 ..... ... 000 00 01 ...... ..... @f0c_1
+fcnv_f_d 001100 ..... ... 000 01 00 ...... ..... @f0c_1
+
+fcnv_d_f 001110 ..... ... 000 .......... ..... @f0e_df_1
+fcnv_f_d 001110 ..... ... 000 .......... ..... @f0e_fd_1
+
+# int/float
+fcnv_w_f 001100 ..... ... 001 00 00 ...... ..... @f0c_1
+fcnv_q_f 001100 ..... ... 001 00 01 ...... ..... @f0c_1
+fcnv_w_d 001100 ..... ... 001 01 00 ...... ..... @f0c_1
+fcnv_q_d 001100 ..... ... 001 01 01 ...... ..... @f0c_1
+
+fcnv_w_f 001110 ..... ... 001 .......... ..... @f0e_ff_1
+fcnv_q_f 001110 ..... ... 001 .......... ..... @f0e_df_1
+fcnv_w_d 001110 ..... ... 001 .......... ..... @f0e_fd_1
+fcnv_q_d 001110 ..... ... 001 .......... ..... @f0e_dd_1
+
+# float/int
+fcnv_f_w 001100 ..... ... 010 00 00 ...... ..... @f0c_1
+fcnv_d_w 001100 ..... ... 010 00 01 ...... ..... @f0c_1
+fcnv_f_q 001100 ..... ... 010 01 00 ...... ..... @f0c_1
+fcnv_d_q 001100 ..... ... 010 01 01 ...... ..... @f0c_1
+
+fcnv_f_w 001110 ..... ... 010 .......... ..... @f0e_ff_1
+fcnv_d_w 001110 ..... ... 010 .......... ..... @f0e_df_1
+fcnv_f_q 001110 ..... ... 010 .......... ..... @f0e_fd_1
+fcnv_d_q 001110 ..... ... 010 .......... ..... @f0e_dd_1
+
+# float/int truncate
+fcnv_t_f_w 001100 ..... ... 011 00 00 ...... ..... @f0c_1
+fcnv_t_d_w 001100 ..... ... 011 00 01 ...... ..... @f0c_1
+fcnv_t_f_q 001100 ..... ... 011 01 00 ...... ..... @f0c_1
+fcnv_t_d_q 001100 ..... ... 011 01 01 ...... ..... @f0c_1
+
+fcnv_t_f_w 001110 ..... ... 011 .......... ..... @f0e_ff_1
+fcnv_t_d_w 001110 ..... ... 011 .......... ..... @f0e_df_1
+fcnv_t_f_q 001110 ..... ... 011 .......... ..... @f0e_fd_1
+fcnv_t_d_q 001110 ..... ... 011 .......... ..... @f0e_dd_1
+
+# uint/float
+fcnv_uw_f 001100 ..... ... 101 00 00 ...... ..... @f0c_1
+fcnv_uq_f 001100 ..... ... 101 00 01 ...... ..... @f0c_1
+fcnv_uw_d 001100 ..... ... 101 01 00 ...... ..... @f0c_1
+fcnv_uq_d 001100 ..... ... 101 01 01 ...... ..... @f0c_1
+
+fcnv_uw_f 001110 ..... ... 101 .......... ..... @f0e_ff_1
+fcnv_uq_f 001110 ..... ... 101 .......... ..... @f0e_df_1
+fcnv_uw_d 001110 ..... ... 101 .......... ..... @f0e_fd_1
+fcnv_uq_d 001110 ..... ... 101 .......... ..... @f0e_dd_1
+
+# float/int
+fcnv_f_uw 001100 ..... ... 110 00 00 ...... ..... @f0c_1
+fcnv_d_uw 001100 ..... ... 110 00 01 ...... ..... @f0c_1
+fcnv_f_uq 001100 ..... ... 110 01 00 ...... ..... @f0c_1
+fcnv_d_uq 001100 ..... ... 110 01 01 ...... ..... @f0c_1
+
+fcnv_f_uw 001110 ..... ... 110 .......... ..... @f0e_ff_1
+fcnv_d_uw 001110 ..... ... 110 .......... ..... @f0e_df_1
+fcnv_f_uq 001110 ..... ... 110 .......... ..... @f0e_fd_1
+fcnv_d_uq 001110 ..... ... 110 .......... ..... @f0e_dd_1
+
+# float/int truncate
+fcnv_t_f_uw 001100 ..... ... 111 00 00 ...... ..... @f0c_1
+fcnv_t_d_uw 001100 ..... ... 111 00 01 ...... ..... @f0c_1
+fcnv_t_f_uq 001100 ..... ... 111 01 00 ...... ..... @f0c_1
+fcnv_t_d_uq 001100 ..... ... 111 01 01 ...... ..... @f0c_1
+
+fcnv_t_f_uw 001110 ..... ... 111 .......... ..... @f0e_ff_1
+fcnv_t_d_uw 001110 ..... ... 111 .......... ..... @f0e_df_1
+fcnv_t_f_uq 001110 ..... ... 111 .......... ..... @f0e_fd_1
+fcnv_t_d_uq 001110 ..... ... 111 .......... ..... @f0e_dd_1
+
+# Floating point class 2
+
+ftest 001100 00000 00000 y:3 00 10000 1 c:5
+
+fcmp_f 001100 ..... ..... ... 00 ..... 0 ..... @f0c_2
+fcmp_d 001100 ..... ..... ... 01 ..... 0 ..... @f0c_2
+
+fcmp_f 001110 ..... ..... ... ..... ... ..... @f0e_f_2
+fcmp_d 001110 ..... ..... ... ..... ... ..... @f0e_d_2
+
+# Floating point class 3
+
+fadd_f 001100 ..... ..... 000 00 ...... ..... @f0c_3
+fsub_f 001100 ..... ..... 001 00 ...... ..... @f0c_3
+fmpy_f 001100 ..... ..... 010 00 ...... ..... @f0c_3
+fdiv_f 001100 ..... ..... 011 00 ...... ..... @f0c_3
+
+fadd_d 001100 ..... ..... 000 01 ...... ..... @f0c_3
+fsub_d 001100 ..... ..... 001 01 ...... ..... @f0c_3
+fmpy_d 001100 ..... ..... 010 01 ...... ..... @f0c_3
+fdiv_d 001100 ..... ..... 011 01 ...... ..... @f0c_3
+
+fadd_f 001110 ..... ..... 000 ..... ... ..... @f0e_f_3
+fsub_f 001110 ..... ..... 001 ..... ... ..... @f0e_f_3
+fmpy_f 001110 ..... ..... 010 ..... ... ..... @f0e_f_3
+fdiv_f 001110 ..... ..... 011 ..... ... ..... @f0e_f_3
+
+fadd_d 001110 ..... ..... 000 ..... ... ..... @f0e_d_3
+fsub_d 001110 ..... ..... 001 ..... ... ..... @f0e_d_3
+fmpy_d 001110 ..... ..... 010 ..... ... ..... @f0e_d_3
+fdiv_d 001110 ..... ..... 011 ..... ... ..... @f0e_d_3
+
+xmpyu 001110 ..... ..... 010 .0111 .00 t:5 r1=%ra64 r2=%rb64
+
+# diag
+diag 000101 ----- ----- ---- ---- ---- ----
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
new file mode 100644
index 000000000..13073ae2b
--- /dev/null
+++ b/target/hppa/int_helper.c
@@ -0,0 +1,260 @@
+/*
+ * HPPA interrupt helper routines
+ *
+ * Copyright (c) 2017 Richard Henderson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "hw/core/cpu.h"
+
+#ifndef CONFIG_USER_ONLY
+static void eval_interrupt(HPPACPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ if (cpu->env.cr[CR_EIRR] & cpu->env.cr[CR_EIEM]) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+/* Each CPU has a word mapped into the GSC bus. Anything on the GSC bus
+ * can write to this word to raise an external interrupt on the target CPU.
+ * This includes the system controler (DINO) for regular devices, or
+ * another CPU for SMP interprocessor interrupts.
+ */
+static uint64_t io_eir_read(void *opaque, hwaddr addr, unsigned size)
+{
+ HPPACPU *cpu = opaque;
+
+ /* ??? What does a read of this register over the GSC bus do? */
+ return cpu->env.cr[CR_EIRR];
+}
+
+static void io_eir_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ HPPACPU *cpu = opaque;
+ int le_bit = ~data & (TARGET_REGISTER_BITS - 1);
+
+ cpu->env.cr[CR_EIRR] |= (target_ureg)1 << le_bit;
+ eval_interrupt(cpu);
+}
+
+const MemoryRegionOps hppa_io_eir_ops = {
+ .read = io_eir_read,
+ .write = io_eir_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 4,
+ .impl.max_access_size = 4,
+};
+
+void hppa_cpu_alarm_timer(void *opaque)
+{
+ /* Raise interrupt 0. */
+ io_eir_write(opaque, 0, 0, 4);
+}
+
+void HELPER(write_eirr)(CPUHPPAState *env, target_ureg val)
+{
+ env->cr[CR_EIRR] &= ~val;
+ qemu_mutex_lock_iothread();
+ eval_interrupt(env_archcpu(env));
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
+{
+ env->cr[CR_EIEM] = val;
+ qemu_mutex_lock_iothread();
+ eval_interrupt(env_archcpu(env));
+ qemu_mutex_unlock_iothread();
+}
+
+void hppa_cpu_do_interrupt(CPUState *cs)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ int i = cs->exception_index;
+ target_ureg iaoq_f = env->iaoq_f;
+ target_ureg iaoq_b = env->iaoq_b;
+ uint64_t iasq_f = env->iasq_f;
+ uint64_t iasq_b = env->iasq_b;
+
+ target_ureg old_psw;
+
+ /* As documented in pa2.0 -- interruption handling. */
+ /* step 1 */
+ env->cr[CR_IPSW] = old_psw = cpu_hppa_get_psw(env);
+
+ /* step 2 -- note PSW_W == 0 for !HPPA64. */
+ cpu_hppa_put_psw(env, PSW_W | (i == EXCP_HPMC ? PSW_M : 0));
+
+ /* step 3 */
+ env->cr[CR_IIASQ] = iasq_f >> 32;
+ env->cr_back[0] = iasq_b >> 32;
+ env->cr[CR_IIAOQ] = iaoq_f;
+ env->cr_back[1] = iaoq_b;
+
+ if (old_psw & PSW_Q) {
+ /* step 5 */
+ /* ISR and IOR will be set elsewhere. */
+ switch (i) {
+ case EXCP_ILL:
+ case EXCP_BREAK:
+ case EXCP_PRIV_REG:
+ case EXCP_PRIV_OPR:
+ /* IIR set via translate.c. */
+ break;
+
+ case EXCP_OVERFLOW:
+ case EXCP_COND:
+ case EXCP_ASSIST:
+ case EXCP_DTLB_MISS:
+ case EXCP_NA_ITLB_MISS:
+ case EXCP_NA_DTLB_MISS:
+ case EXCP_DMAR:
+ case EXCP_DMPI:
+ case EXCP_UNALIGN:
+ case EXCP_DMP:
+ case EXCP_DMB:
+ case EXCP_TLB_DIRTY:
+ case EXCP_PAGE_REF:
+ case EXCP_ASSIST_EMU:
+ {
+ /* Avoid reading directly from the virtual address, lest we
+ raise another exception from some sort of TLB issue. */
+ /* ??? An alternate fool-proof method would be to store the
+ instruction data into the unwind info. That's probably
+ a bit too much in the way of extra storage required. */
+ vaddr vaddr;
+ hwaddr paddr;
+
+ paddr = vaddr = iaoq_f & -4;
+ if (old_psw & PSW_C) {
+ int prot, t;
+
+ vaddr = hppa_form_gva_psw(old_psw, iasq_f, vaddr);
+ t = hppa_get_physical_address(env, vaddr, MMU_KERNEL_IDX,
+ 0, &paddr, &prot);
+ if (t >= 0) {
+ /* We can't re-load the instruction. */
+ env->cr[CR_IIR] = 0;
+ break;
+ }
+ }
+ env->cr[CR_IIR] = ldl_phys(cs->as, paddr);
+ }
+ break;
+
+ default:
+ /* Other exceptions do not set IIR. */
+ break;
+ }
+
+ /* step 6 */
+ env->shadow[0] = env->gr[1];
+ env->shadow[1] = env->gr[8];
+ env->shadow[2] = env->gr[9];
+ env->shadow[3] = env->gr[16];
+ env->shadow[4] = env->gr[17];
+ env->shadow[5] = env->gr[24];
+ env->shadow[6] = env->gr[25];
+ }
+
+ /* step 7 */
+ env->iaoq_f = env->cr[CR_IVA] + 32 * i;
+ env->iaoq_b = env->iaoq_f + 4;
+ env->iasq_f = 0;
+ env->iasq_b = 0;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static const char * const names[] = {
+ [EXCP_HPMC] = "high priority machine check",
+ [EXCP_POWER_FAIL] = "power fail interrupt",
+ [EXCP_RC] = "recovery counter trap",
+ [EXCP_EXT_INTERRUPT] = "external interrupt",
+ [EXCP_LPMC] = "low priority machine check",
+ [EXCP_ITLB_MISS] = "instruction tlb miss fault",
+ [EXCP_IMP] = "instruction memory protection trap",
+ [EXCP_ILL] = "illegal instruction trap",
+ [EXCP_BREAK] = "break instruction trap",
+ [EXCP_PRIV_OPR] = "privileged operation trap",
+ [EXCP_PRIV_REG] = "privileged register trap",
+ [EXCP_OVERFLOW] = "overflow trap",
+ [EXCP_COND] = "conditional trap",
+ [EXCP_ASSIST] = "assist exception trap",
+ [EXCP_DTLB_MISS] = "data tlb miss fault",
+ [EXCP_NA_ITLB_MISS] = "non-access instruction tlb miss",
+ [EXCP_NA_DTLB_MISS] = "non-access data tlb miss",
+ [EXCP_DMP] = "data memory protection trap",
+ [EXCP_DMB] = "data memory break trap",
+ [EXCP_TLB_DIRTY] = "tlb dirty bit trap",
+ [EXCP_PAGE_REF] = "page reference trap",
+ [EXCP_ASSIST_EMU] = "assist emulation trap",
+ [EXCP_HPT] = "high-privilege transfer trap",
+ [EXCP_LPT] = "low-privilege transfer trap",
+ [EXCP_TB] = "taken branch trap",
+ [EXCP_DMAR] = "data memory access rights trap",
+ [EXCP_DMPI] = "data memory protection id trap",
+ [EXCP_UNALIGN] = "unaligned data reference trap",
+ [EXCP_PER_INTERRUPT] = "performance monitor interrupt",
+ [EXCP_SYSCALL] = "syscall",
+ [EXCP_SYSCALL_LWS] = "syscall-lws",
+ };
+ static int count;
+ const char *name = NULL;
+ char unknown[16];
+
+ if (i >= 0 && i < ARRAY_SIZE(names)) {
+ name = names[i];
+ }
+ if (!name) {
+ snprintf(unknown, sizeof(unknown), "unknown %d", i);
+ name = unknown;
+ }
+ qemu_log("INT %6d: %s @ " TARGET_FMT_lx "," TARGET_FMT_lx
+ " -> " TREG_FMT_lx " " TARGET_FMT_lx "\n",
+ ++count, name,
+ hppa_form_gva(env, iasq_f, iaoq_f),
+ hppa_form_gva(env, iasq_b, iaoq_b),
+ env->iaoq_f,
+ hppa_form_gva(env, (uint64_t)env->cr[CR_ISR] << 32,
+ env->cr[CR_IOR]));
+ }
+ cs->exception_index = -1;
+}
+
+bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+
+ /* If interrupts are requested and enabled, raise them. */
+ if ((env->psw & PSW_I) && (interrupt_request & CPU_INTERRUPT_HARD)) {
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ hppa_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/hppa/machine.c b/target/hppa/machine.c
new file mode 100644
index 000000000..905991d7f
--- /dev/null
+++ b/target/hppa/machine.c
@@ -0,0 +1,180 @@
+/*
+ * HPPA interrupt helper routines
+ *
+ * Copyright (c) 2017 Richard Henderson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+#if TARGET_REGISTER_BITS == 64
+#define qemu_put_betr qemu_put_be64
+#define qemu_get_betr qemu_get_be64
+#define VMSTATE_UINTTL_V(_f, _s, _v) \
+ VMSTATE_UINT64_V(_f, _s, _v)
+#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v)
+#else
+#define qemu_put_betr qemu_put_be32
+#define qemu_get_betr qemu_get_be32
+#define VMSTATE_UINTTR_V(_f, _s, _v) \
+ VMSTATE_UINT32_V(_f, _s, _v)
+#define VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v)
+#endif
+
+#define VMSTATE_UINTTR(_f, _s) \
+ VMSTATE_UINTTR_V(_f, _s, 0)
+#define VMSTATE_UINTTR_ARRAY(_f, _s, _n) \
+ VMSTATE_UINTTR_ARRAY_V(_f, _s, _n, 0)
+
+
+static int get_psw(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ CPUHPPAState *env = opaque;
+ cpu_hppa_put_psw(env, qemu_get_betr(f));
+ return 0;
+}
+
+static int put_psw(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ CPUHPPAState *env = opaque;
+ qemu_put_betr(f, cpu_hppa_get_psw(env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_psw = {
+ .name = "psw",
+ .get = get_psw,
+ .put = put_psw,
+};
+
+/* FIXME: Use the PA2.0 format, which is a superset of the PA1.1 format. */
+static int get_tlb(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ hppa_tlb_entry *ent = opaque;
+ uint32_t val;
+
+ memset(ent, 0, sizeof(*ent));
+
+ ent->va_b = qemu_get_be64(f);
+ ent->pa = qemu_get_betr(f);
+ val = qemu_get_be32(f);
+
+ ent->entry_valid = extract32(val, 0, 1);
+ ent->access_id = extract32(val, 1, 18);
+ ent->u = extract32(val, 19, 1);
+ ent->ar_pl2 = extract32(val, 20, 2);
+ ent->ar_pl1 = extract32(val, 22, 2);
+ ent->ar_type = extract32(val, 24, 3);
+ ent->b = extract32(val, 27, 1);
+ ent->d = extract32(val, 28, 1);
+ ent->t = extract32(val, 29, 1);
+
+ ent->va_e = ent->va_b + TARGET_PAGE_SIZE - 1;
+ return 0;
+}
+
+static int put_tlb(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ hppa_tlb_entry *ent = opaque;
+ uint32_t val = 0;
+
+ if (ent->entry_valid) {
+ val = 1;
+ val = deposit32(val, 1, 18, ent->access_id);
+ val = deposit32(val, 19, 1, ent->u);
+ val = deposit32(val, 20, 2, ent->ar_pl2);
+ val = deposit32(val, 22, 2, ent->ar_pl1);
+ val = deposit32(val, 24, 3, ent->ar_type);
+ val = deposit32(val, 27, 1, ent->b);
+ val = deposit32(val, 28, 1, ent->d);
+ val = deposit32(val, 29, 1, ent->t);
+ }
+
+ qemu_put_be64(f, ent->va_b);
+ qemu_put_betr(f, ent->pa);
+ qemu_put_be32(f, val);
+ return 0;
+}
+
+static const VMStateInfo vmstate_tlb = {
+ .name = "tlb entry",
+ .get = get_tlb,
+ .put = put_tlb,
+};
+
+static VMStateField vmstate_env_fields[] = {
+ VMSTATE_UINTTR_ARRAY(gr, CPUHPPAState, 32),
+ VMSTATE_UINT64_ARRAY(fr, CPUHPPAState, 32),
+ VMSTATE_UINT64_ARRAY(sr, CPUHPPAState, 8),
+ VMSTATE_UINTTR_ARRAY(cr, CPUHPPAState, 32),
+ VMSTATE_UINTTR_ARRAY(cr_back, CPUHPPAState, 2),
+ VMSTATE_UINTTR_ARRAY(shadow, CPUHPPAState, 7),
+
+ /* Save the architecture value of the psw, not the internally
+ expanded version. Since this architecture value does not
+ exist in memory to be stored, this requires a but of hoop
+ jumping. We want OFFSET=0 so that we effectively pass ENV
+ to the helper functions, and we need to fill in the name by
+ hand since there's no field of that name. */
+ {
+ .name = "psw",
+ .version_id = 0,
+ .size = sizeof(uint64_t),
+ .info = &vmstate_psw,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+
+ VMSTATE_UINTTR(iaoq_f, CPUHPPAState),
+ VMSTATE_UINTTR(iaoq_b, CPUHPPAState),
+ VMSTATE_UINT64(iasq_f, CPUHPPAState),
+ VMSTATE_UINT64(iasq_b, CPUHPPAState),
+
+ VMSTATE_UINT32(fr0_shadow, CPUHPPAState),
+
+ VMSTATE_ARRAY(tlb, CPUHPPAState, ARRAY_SIZE(((CPUHPPAState *)0)->tlb),
+ 0, vmstate_tlb, hppa_tlb_entry),
+ VMSTATE_UINT32(tlb_last, CPUHPPAState),
+
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_env_fields,
+};
+
+static VMStateField vmstate_cpu_fields[] = {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_hppa_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_cpu_fields,
+};
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
new file mode 100644
index 000000000..bf07445cd
--- /dev/null
+++ b/target/hppa/mem_helper.c
@@ -0,0 +1,380 @@
+/*
+ * HPPA memory access helper routines
+ *
+ * Copyright (c) 2017 Helge Deller
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "hw/core/cpu.h"
+#include "trace.h"
+
+static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ hppa_tlb_entry *ent = &env->tlb[i];
+ if (ent->va_b <= addr && addr <= ent->va_e) {
+ trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
+ ent->va_b, ent->va_e, ent->pa);
+ return ent;
+ }
+ }
+ trace_hppa_tlb_find_entry_not_found(env, addr);
+ return NULL;
+}
+
+static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
+{
+ CPUState *cs = env_cpu(env);
+ unsigned i, n = 1 << (2 * ent->page_size);
+ uint64_t addr = ent->va_b;
+
+ trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
+
+ for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
+ /* Do not flush MMU_PHYS_IDX. */
+ tlb_flush_page_by_mmuidx(cs, addr, 0xf);
+ }
+
+ memset(ent, 0, sizeof(*ent));
+ ent->va_b = -1;
+}
+
+static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
+{
+ hppa_tlb_entry *ent;
+ uint32_t i = env->tlb_last;
+
+ env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
+ ent = &env->tlb[i];
+
+ hppa_flush_tlb_ent(env, ent);
+ return ent;
+}
+
+int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
+ int type, hwaddr *pphys, int *pprot)
+{
+ hwaddr phys;
+ int prot, r_prot, w_prot, x_prot;
+ hppa_tlb_entry *ent;
+ int ret = -1;
+
+ /* Virtual translation disabled. Direct map virtual to physical. */
+ if (mmu_idx == MMU_PHYS_IDX) {
+ phys = addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ goto egress;
+ }
+
+ /* Find a valid tlb entry that matches the virtual address. */
+ ent = hppa_find_tlb(env, addr);
+ if (ent == NULL || !ent->entry_valid) {
+ phys = 0;
+ prot = 0;
+ ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
+ goto egress;
+ }
+
+ /* We now know the physical address. */
+ phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
+
+ /* Map TLB access_rights field to QEMU protection. */
+ r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
+ w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
+ x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
+ switch (ent->ar_type) {
+ case 0: /* read-only: data page */
+ prot = r_prot;
+ break;
+ case 1: /* read/write: dynamic data page */
+ prot = r_prot | w_prot;
+ break;
+ case 2: /* read/execute: normal code page */
+ prot = r_prot | x_prot;
+ break;
+ case 3: /* read/write/execute: dynamic code page */
+ prot = r_prot | w_prot | x_prot;
+ break;
+ default: /* execute: promote to privilege level type & 3 */
+ prot = x_prot;
+ break;
+ }
+
+ /* access_id == 0 means public page and no check is performed */
+ if ((env->psw & PSW_P) && ent->access_id) {
+ /* If bits [31:1] match, and bit 0 is set, suppress write. */
+ int match = ent->access_id * 2 + 1;
+
+ if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
+ match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
+ prot &= PAGE_READ | PAGE_EXEC;
+ if (type == PAGE_WRITE) {
+ ret = EXCP_DMPI;
+ goto egress;
+ }
+ }
+ }
+
+ /* No guest access type indicates a non-architectural access from
+ within QEMU. Bypass checks for access, D, B and T bits. */
+ if (type == 0) {
+ goto egress;
+ }
+
+ if (unlikely(!(prot & type))) {
+ /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
+ ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
+ goto egress;
+ }
+
+ /* In reverse priority order, check for conditions which raise faults.
+ As we go, remove PROT bits that cover the condition we want to check.
+ In this way, the resulting PROT will force a re-check of the
+ architectural TLB entry for the next access. */
+ if (unlikely(!ent->d)) {
+ if (type & PAGE_WRITE) {
+ /* The D bit is not set -- TLB Dirty Bit Fault. */
+ ret = EXCP_TLB_DIRTY;
+ }
+ prot &= PAGE_READ | PAGE_EXEC;
+ }
+ if (unlikely(ent->b)) {
+ if (type & PAGE_WRITE) {
+ /* The B bit is set -- Data Memory Break Fault. */
+ ret = EXCP_DMB;
+ }
+ prot &= PAGE_READ | PAGE_EXEC;
+ }
+ if (unlikely(ent->t)) {
+ if (!(type & PAGE_EXEC)) {
+ /* The T bit is set -- Page Reference Fault. */
+ ret = EXCP_PAGE_REF;
+ }
+ prot &= PAGE_EXEC;
+ }
+
+ egress:
+ *pphys = phys;
+ *pprot = prot;
+ trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
+ return ret;
+}
+
+hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ hwaddr phys;
+ int prot, excp;
+
+ /* If the (data) mmu is disabled, bypass translation. */
+ /* ??? We really ought to know if the code mmu is disabled too,
+ in order to get the correct debugging dumps. */
+ if (!(cpu->env.psw & PSW_D)) {
+ return addr;
+ }
+
+ excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
+ &phys, &prot);
+
+ /* Since we're translating for debugging, the only error that is a
+ hard error is no translation at all. Otherwise, while a real cpu
+ access might not have permission, the debugger does. */
+ return excp == EXCP_DTLB_MISS ? -1 : phys;
+}
+
+bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ int prot, excp, a_prot;
+ hwaddr phys;
+
+ switch (type) {
+ case MMU_INST_FETCH:
+ a_prot = PAGE_EXEC;
+ break;
+ case MMU_DATA_STORE:
+ a_prot = PAGE_WRITE;
+ break;
+ default:
+ a_prot = PAGE_READ;
+ break;
+ }
+
+ excp = hppa_get_physical_address(env, addr, mmu_idx,
+ a_prot, &phys, &prot);
+ if (unlikely(excp >= 0)) {
+ if (probe) {
+ return false;
+ }
+ trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
+ /* Failure. Raise the indicated exception. */
+ cs->exception_index = excp;
+ if (cpu->env.psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ cpu->env.cr[CR_IOR] = addr;
+ cpu->env.cr[CR_ISR] = addr >> 32;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+
+ trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
+ phys & TARGET_PAGE_MASK, size, type, mmu_idx);
+ /* Success! Store the translation into the QEMU TLB. */
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+}
+
+/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
+void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *empty = NULL;
+ int i;
+
+ /* Zap any old entries covering ADDR; notice empty entries on the way. */
+ for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ hppa_tlb_entry *ent = &env->tlb[i];
+ if (ent->va_b <= addr && addr <= ent->va_e) {
+ if (ent->entry_valid) {
+ hppa_flush_tlb_ent(env, ent);
+ }
+ if (!empty) {
+ empty = ent;
+ }
+ }
+ }
+
+ /* If we didn't see an empty entry, evict one. */
+ if (empty == NULL) {
+ empty = hppa_alloc_tlb_ent(env);
+ }
+
+ /* Note that empty->entry_valid == 0 already. */
+ empty->va_b = addr & TARGET_PAGE_MASK;
+ empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
+ empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
+ trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
+}
+
+/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
+void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (unlikely(ent == NULL)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
+ return;
+ }
+
+ ent->access_id = extract32(reg, 1, 18);
+ ent->u = extract32(reg, 19, 1);
+ ent->ar_pl2 = extract32(reg, 20, 2);
+ ent->ar_pl1 = extract32(reg, 22, 2);
+ ent->ar_type = extract32(reg, 24, 3);
+ ent->b = extract32(reg, 27, 1);
+ ent->d = extract32(reg, 28, 1);
+ ent->t = extract32(reg, 29, 1);
+ ent->entry_valid = 1;
+ trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
+ ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
+}
+
+/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
+ synchronous across all processors. */
+static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
+{
+ CPUHPPAState *env = cpu->env_ptr;
+ target_ulong addr = (target_ulong) data.target_ptr;
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (ent && ent->entry_valid) {
+ hppa_flush_tlb_ent(env, ent);
+ }
+}
+
+void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
+{
+ CPUState *src = env_cpu(env);
+ CPUState *cpu;
+ trace_hppa_tlb_ptlb(env);
+ run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
+
+ CPU_FOREACH(cpu) {
+ if (cpu != src) {
+ async_run_on_cpu(cpu, ptlb_work, data);
+ }
+ }
+ async_safe_run_on_cpu(src, ptlb_work, data);
+}
+
+/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
+ number of pages/entries (we choose all), and is local to the cpu. */
+void HELPER(ptlbe)(CPUHPPAState *env)
+{
+ trace_hppa_tlb_ptlbe(env);
+ memset(env->tlb, 0, sizeof(env->tlb));
+ tlb_flush_by_mmuidx(env_cpu(env), 0xf);
+}
+
+void cpu_hppa_change_prot_id(CPUHPPAState *env)
+{
+ if (env->psw & PSW_P) {
+ tlb_flush_by_mmuidx(env_cpu(env), 0xf);
+ }
+}
+
+void HELPER(change_prot_id)(CPUHPPAState *env)
+{
+ cpu_hppa_change_prot_id(env);
+}
+
+target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
+{
+ hwaddr phys;
+ int prot, excp;
+
+ excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
+ &phys, &prot);
+ if (excp >= 0) {
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+ if (excp == EXCP_DTLB_MISS) {
+ excp = EXCP_NA_DTLB_MISS;
+ }
+ trace_hppa_tlb_lpa_failed(env, addr);
+ hppa_dynamic_excp(env, excp, GETPC());
+ }
+ trace_hppa_tlb_lpa_success(env, addr, phys);
+ return phys;
+}
+
+/* Return the ar_type of the TLB at VADDR, or -1. */
+int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
+ return ent ? ent->ar_type : -1;
+}
diff --git a/target/hppa/meson.build b/target/hppa/meson.build
new file mode 100644
index 000000000..021e42a2d
--- /dev/null
+++ b/target/hppa/meson.build
@@ -0,0 +1,21 @@
+gen = decodetree.process('insns.decode')
+
+hppa_ss = ss.source_set()
+hppa_ss.add(gen)
+hppa_ss.add(files(
+ 'cpu.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'int_helper.c',
+ 'op_helper.c',
+ 'translate.c',
+))
+
+hppa_softmmu_ss = ss.source_set()
+hppa_softmmu_ss.add(files(
+ 'machine.c',
+ 'mem_helper.c',
+))
+
+target_arch += {'hppa': hppa_ss}
+target_softmmu_arch += {'hppa': hppa_softmmu_ss}
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
new file mode 100644
index 000000000..96d9391c3
--- /dev/null
+++ b/target/hppa/op_helper.c
@@ -0,0 +1,705 @@
+/*
+ * Helpers for HPPA instructions.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/timer.h"
+#include "sysemu/runstate.h"
+#include "fpu/softfloat.h"
+#include "trace.h"
+
+void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = excp;
+ cpu_loop_exit_restore(cs, ra);
+}
+
+void HELPER(tsv)(CPUHPPAState *env, target_ureg cond)
+{
+ if (unlikely((target_sreg)cond < 0)) {
+ hppa_dynamic_excp(env, EXCP_OVERFLOW, GETPC());
+ }
+}
+
+void HELPER(tcond)(CPUHPPAState *env, target_ureg cond)
+{
+ if (unlikely(cond)) {
+ hppa_dynamic_excp(env, EXCP_COND, GETPC());
+ }
+}
+
+static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
+ uint32_t mask, uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ uint32_t old, new, cmp;
+
+ uint32_t *haddr = g2h(env_cpu(env), addr - 1);
+ old = *haddr;
+ while (1) {
+ new = (old & ~mask) | (val & mask);
+ cmp = qatomic_cmpxchg(haddr, old, new);
+ if (cmp == old) {
+ return;
+ }
+ old = cmp;
+ }
+#else
+ /* FIXME -- we can do better. */
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+#endif
+}
+
+static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ureg val,
+ bool parallel, uintptr_t ra)
+{
+ switch (addr & 3) {
+ case 3:
+ cpu_stb_data_ra(env, addr, val, ra);
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr, val, ra);
+ break;
+ case 1:
+ /* The 3 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_3(env, addr, val, 0x00ffffffu, ra);
+ } else {
+ cpu_stb_data_ra(env, addr, val >> 16, ra);
+ cpu_stw_data_ra(env, addr + 1, val, ra);
+ }
+ break;
+ default:
+ cpu_stl_data_ra(env, addr, val, ra);
+ break;
+ }
+}
+
+void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ureg val)
+{
+ do_stby_b(env, addr, val, false, GETPC());
+}
+
+void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
+ target_ureg val)
+{
+ do_stby_b(env, addr, val, true, GETPC());
+}
+
+static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ureg val,
+ bool parallel, uintptr_t ra)
+{
+ switch (addr & 3) {
+ case 3:
+ /* The 3 byte store must appear atomic. */
+ if (parallel) {
+ atomic_store_3(env, addr - 3, val, 0xffffff00u, ra);
+ } else {
+ cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
+ cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
+ }
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
+ break;
+ case 1:
+ cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
+ break;
+ default:
+ /* Nothing is stored, but protection is checked and the
+ cacheline is marked dirty. */
+ probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra);
+ break;
+ }
+}
+
+void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ureg val)
+{
+ do_stby_e(env, addr, val, false, GETPC());
+}
+
+void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
+ target_ureg val)
+{
+ do_stby_e(env, addr, val, true, GETPC());
+}
+
+void HELPER(ldc_check)(target_ulong addr)
+{
+ if (unlikely(addr & 0xf)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Undefined ldc to unaligned address mod 16: "
+ TARGET_FMT_lx "\n", addr);
+ }
+}
+
+target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
+ uint32_t level, uint32_t want)
+{
+#ifdef CONFIG_USER_ONLY
+ return page_check_range(addr, 1, want);
+#else
+ int prot, excp;
+ hwaddr phys;
+
+ trace_hppa_tlb_probe(addr, level, want);
+ /* Fail if the requested privilege level is higher than current. */
+ if (level < (env->iaoq_f & 3)) {
+ return 0;
+ }
+
+ excp = hppa_get_physical_address(env, addr, level, 0, &phys, &prot);
+ if (excp >= 0) {
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+ if (excp == EXCP_DTLB_MISS) {
+ excp = EXCP_NA_DTLB_MISS;
+ }
+ hppa_dynamic_excp(env, excp, GETPC());
+ }
+ return (want & prot) != 0;
+#endif
+}
+
+void HELPER(loaded_fr0)(CPUHPPAState *env)
+{
+ uint32_t shadow = env->fr[0] >> 32;
+ int rm, d;
+
+ env->fr0_shadow = shadow;
+
+ switch (extract32(shadow, 9, 2)) {
+ default:
+ rm = float_round_nearest_even;
+ break;
+ case 1:
+ rm = float_round_to_zero;
+ break;
+ case 2:
+ rm = float_round_up;
+ break;
+ case 3:
+ rm = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rm, &env->fp_status);
+
+ d = extract32(shadow, 5, 1);
+ set_flush_to_zero(d, &env->fp_status);
+ set_flush_inputs_to_zero(d, &env->fp_status);
+}
+
+void cpu_hppa_loaded_fr0(CPUHPPAState *env)
+{
+ helper_loaded_fr0(env);
+}
+
+#define CONVERT_BIT(X, SRC, DST) \
+ ((SRC) > (DST) \
+ ? (X) / ((SRC) / (DST)) & (DST) \
+ : ((X) & (SRC)) * ((DST) / (SRC)))
+
+static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
+{
+ uint32_t soft_exp = get_float_exception_flags(&env->fp_status);
+ uint32_t hard_exp = 0;
+ uint32_t shadow = env->fr0_shadow;
+
+ if (likely(soft_exp == 0)) {
+ env->fr[0] = (uint64_t)shadow << 32;
+ return;
+ }
+ set_float_exception_flags(0, &env->fp_status);
+
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_inexact, 1u << 0);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_underflow, 1u << 1);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, 1u << 2);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, 1u << 3);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, 1u << 4);
+ shadow |= hard_exp << (32 - 5);
+ env->fr0_shadow = shadow;
+ env->fr[0] = (uint64_t)shadow << 32;
+
+ if (hard_exp & shadow) {
+ hppa_dynamic_excp(env, EXCP_ASSIST, ra);
+ }
+}
+
+float32 HELPER(fsqrt_s)(CPUHPPAState *env, float32 arg)
+{
+ float32 ret = float32_sqrt(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(frnd_s)(CPUHPPAState *env, float32 arg)
+{
+ float32 ret = float32_round_to_int(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fadd_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_add(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fsub_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_sub(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fmpy_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_mul(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fdiv_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_div(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fsqrt_d)(CPUHPPAState *env, float64 arg)
+{
+ float64 ret = float64_sqrt(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(frnd_d)(CPUHPPAState *env, float64 arg)
+{
+ float64 ret = float64_round_to_int(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fadd_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_add(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fsub_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_sub(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fmpy_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_mul(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fdiv_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_div(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_s_d)(CPUHPPAState *env, float32 arg)
+{
+ float64 ret = float32_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_d_s)(CPUHPPAState *env, float64 arg)
+{
+ float32 ret = float64_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_w_s)(CPUHPPAState *env, int32_t arg)
+{
+ float32 ret = int32_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_dw_s)(CPUHPPAState *env, int64_t arg)
+{
+ float32 ret = int64_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_w_d)(CPUHPPAState *env, int32_t arg)
+{
+ float64 ret = int32_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_dw_d)(CPUHPPAState *env, int64_t arg)
+{
+ float64 ret = int64_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_s_w)(CPUHPPAState *env, float32 arg)
+{
+ int32_t ret = float32_to_int32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_d_w)(CPUHPPAState *env, float64 arg)
+{
+ int32_t ret = float64_to_int32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_s_dw)(CPUHPPAState *env, float32 arg)
+{
+ int64_t ret = float32_to_int64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_d_dw)(CPUHPPAState *env, float64 arg)
+{
+ int64_t ret = float64_to_int64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_t_s_w)(CPUHPPAState *env, float32 arg)
+{
+ int32_t ret = float32_to_int32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_t_d_w)(CPUHPPAState *env, float64 arg)
+{
+ int32_t ret = float64_to_int32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_t_s_dw)(CPUHPPAState *env, float32 arg)
+{
+ int64_t ret = float32_to_int64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_t_d_dw)(CPUHPPAState *env, float64 arg)
+{
+ int64_t ret = float64_to_int64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_uw_s)(CPUHPPAState *env, uint32_t arg)
+{
+ float32 ret = uint32_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_udw_s)(CPUHPPAState *env, uint64_t arg)
+{
+ float32 ret = uint64_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_uw_d)(CPUHPPAState *env, uint32_t arg)
+{
+ float64 ret = uint32_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_udw_d)(CPUHPPAState *env, uint64_t arg)
+{
+ float64 ret = uint64_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_s_uw)(CPUHPPAState *env, float32 arg)
+{
+ uint32_t ret = float32_to_uint32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_d_uw)(CPUHPPAState *env, float64 arg)
+{
+ uint32_t ret = float64_to_uint32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_s_udw)(CPUHPPAState *env, float32 arg)
+{
+ uint64_t ret = float32_to_uint64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_d_udw)(CPUHPPAState *env, float64 arg)
+{
+ uint64_t ret = float64_to_uint64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_t_s_uw)(CPUHPPAState *env, float32 arg)
+{
+ uint32_t ret = float32_to_uint32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_t_d_uw)(CPUHPPAState *env, float64 arg)
+{
+ uint32_t ret = float64_to_uint32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_t_s_udw)(CPUHPPAState *env, float32 arg)
+{
+ uint64_t ret = float32_to_uint64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_t_d_udw)(CPUHPPAState *env, float64 arg)
+{
+ uint64_t ret = float64_to_uint64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+static void update_fr0_cmp(CPUHPPAState *env, uint32_t y,
+ uint32_t c, FloatRelation r)
+{
+ uint32_t shadow = env->fr0_shadow;
+
+ switch (r) {
+ case float_relation_greater:
+ c = extract32(c, 4, 1);
+ break;
+ case float_relation_less:
+ c = extract32(c, 3, 1);
+ break;
+ case float_relation_equal:
+ c = extract32(c, 2, 1);
+ break;
+ case float_relation_unordered:
+ c = extract32(c, 1, 1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (y) {
+ /* targeted comparison */
+ /* set fpsr[ca[y - 1]] to current compare */
+ shadow = deposit32(shadow, 21 - (y - 1), 1, c);
+ } else {
+ /* queued comparison */
+ /* shift cq right by one place */
+ shadow = deposit32(shadow, 11, 10, extract32(shadow, 12, 10));
+ /* move fpsr[c] to fpsr[cq[0]] */
+ shadow = deposit32(shadow, 21, 1, extract32(shadow, 26, 1));
+ /* set fpsr[c] to current compare */
+ shadow = deposit32(shadow, 26, 1, c);
+ }
+
+ env->fr0_shadow = shadow;
+ env->fr[0] = (uint64_t)shadow << 32;
+}
+
+void HELPER(fcmp_s)(CPUHPPAState *env, float32 a, float32 b,
+ uint32_t y, uint32_t c)
+{
+ FloatRelation r;
+ if (c & 1) {
+ r = float32_compare(a, b, &env->fp_status);
+ } else {
+ r = float32_compare_quiet(a, b, &env->fp_status);
+ }
+ update_fr0_op(env, GETPC());
+ update_fr0_cmp(env, y, c, r);
+}
+
+void HELPER(fcmp_d)(CPUHPPAState *env, float64 a, float64 b,
+ uint32_t y, uint32_t c)
+{
+ FloatRelation r;
+ if (c & 1) {
+ r = float64_compare(a, b, &env->fp_status);
+ } else {
+ r = float64_compare_quiet(a, b, &env->fp_status);
+ }
+ update_fr0_op(env, GETPC());
+ update_fr0_cmp(env, y, c, r);
+}
+
+float32 HELPER(fmpyfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c)
+{
+ float32 ret = float32_muladd(a, b, c, 0, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fmpynfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c)
+{
+ float32 ret = float32_muladd(a, b, c, float_muladd_negate_product,
+ &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fmpyfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c)
+{
+ float64 ret = float64_muladd(a, b, c, 0, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fmpynfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c)
+{
+ float64 ret = float64_muladd(a, b, c, float_muladd_negate_product,
+ &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+target_ureg HELPER(read_interval_timer)(void)
+{
+#ifdef CONFIG_USER_ONLY
+ /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
+ Just pass through the host cpu clock ticks. */
+ return cpu_get_host_ticks();
+#else
+ /* In system mode we have access to a decent high-resolution clock.
+ In order to make OS-level time accounting work with the cr16,
+ present it with a well-timed clock fixed at 250MHz. */
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val)
+{
+ HPPACPU *cpu = env_archcpu(env);
+ uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t timeout;
+
+ /* Even in 64-bit mode, the comparator is always 32-bit. But the
+ value we expose to the guest is 1/4 of the speed of the clock,
+ so moosh in 34 bits. */
+ timeout = deposit64(current, 0, 34, (uint64_t)val << 2);
+
+ /* If the mooshing puts the clock in the past, advance to next round. */
+ if (timeout < current + 1000) {
+ timeout += 1ULL << 34;
+ }
+
+ cpu->env.cr[CR_IT] = timeout;
+ timer_mod(cpu->alarm_timer, timeout);
+}
+
+void HELPER(halt)(CPUHPPAState *env)
+{
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ helper_excp(env, EXCP_HLT);
+}
+
+void HELPER(reset)(CPUHPPAState *env)
+{
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ helper_excp(env, EXCP_HLT);
+}
+
+target_ureg HELPER(swap_system_mask)(CPUHPPAState *env, target_ureg nsm)
+{
+ target_ulong psw = env->psw;
+ /*
+ * Setting the PSW Q bit to 1, if it was not already 1, is an
+ * undefined operation.
+ *
+ * However, HP-UX 10.20 does this with the SSM instruction.
+ * Tested this on HP9000/712 and HP9000/785/C3750 and both
+ * machines set the Q bit from 0 to 1 without an exception,
+ * so let this go without comment.
+ */
+ env->psw = (psw & ~PSW_SM) | (nsm & PSW_SM);
+ return psw & PSW_SM;
+}
+
+void HELPER(rfi)(CPUHPPAState *env)
+{
+ env->iasq_f = (uint64_t)env->cr[CR_IIASQ] << 32;
+ env->iasq_b = (uint64_t)env->cr_back[0] << 32;
+ env->iaoq_f = env->cr[CR_IIAOQ];
+ env->iaoq_b = env->cr_back[1];
+ cpu_hppa_put_psw(env, env->cr[CR_IPSW]);
+}
+
+void HELPER(rfi_r)(CPUHPPAState *env)
+{
+ env->gr[1] = env->shadow[0];
+ env->gr[8] = env->shadow[1];
+ env->gr[9] = env->shadow[2];
+ env->gr[16] = env->shadow[3];
+ env->gr[17] = env->shadow[4];
+ env->gr[24] = env->shadow[5];
+ env->gr[25] = env->shadow[6];
+ helper_rfi(env);
+}
+#endif
diff --git a/target/hppa/trace-events b/target/hppa/trace-events
new file mode 100644
index 000000000..893151789
--- /dev/null
+++ b/target/hppa/trace-events
@@ -0,0 +1,18 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# mem_helper.c
+disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_find_entry_not_found(void *env, uint64_t addr) "env=%p addr=%08lx"
+disable hppa_tlb_get_physical_address(void *env, int ret, int prot, uint64_t addr, uint64_t phys) "env=%p ret=%d prot=%d addr=0x%lx phys=0x%lx"
+disable hppa_tlb_fill_excp(void *env, uint64_t addr, int size, int type, int mmu_idx) "env=%p addr=0x%lx size=%d type=%d mmu_idx=%d"
+disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size, int type, int mmu_idx) "env=%p addr=0x%lx phys=0x%lx size=%d type=%d mmu_idx=%d"
+disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx"
+disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d"
+disable hppa_tlb_ptlb(void *env) "env=%p"
+disable hppa_tlb_ptlbe(void *env) "env=%p"
+disable hppa_tlb_lpa_success(void *env, uint64_t addr, uint64_t phys) "env=%p addr=0x%lx phys=0x%lx"
+disable hppa_tlb_lpa_failed(void *env, uint64_t addr) "env=%p addr=0x%lx"
+
+# op_helper.c
+disable hppa_tlb_probe(uint64_t addr, int level, int want) "addr=0x%lx level=%d want=%d"
diff --git a/target/hppa/trace.h b/target/hppa/trace.h
new file mode 100644
index 000000000..810cc0969
--- /dev/null
+++ b/target/hppa/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_hppa.h"
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
new file mode 100644
index 000000000..3b9744deb
--- /dev/null
+++ b/target/hppa/translate.c
@@ -0,0 +1,4333 @@
+/*
+ * HPPA emulation cpu translation for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+
+/* Since we have a distinction between register size and address size,
+ we need to redefine all of these. */
+
+#undef TCGv
+#undef tcg_temp_new
+#undef tcg_global_mem_new
+#undef tcg_temp_local_new
+#undef tcg_temp_free
+
+#if TARGET_LONG_BITS == 64
+#define TCGv_tl TCGv_i64
+#define tcg_temp_new_tl tcg_temp_new_i64
+#define tcg_temp_free_tl tcg_temp_free_i64
+#if TARGET_REGISTER_BITS == 64
+#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
+#else
+#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
+#endif
+#else
+#define TCGv_tl TCGv_i32
+#define tcg_temp_new_tl tcg_temp_new_i32
+#define tcg_temp_free_tl tcg_temp_free_i32
+#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
+#endif
+
+#if TARGET_REGISTER_BITS == 64
+#define TCGv_reg TCGv_i64
+
+#define tcg_temp_new tcg_temp_new_i64
+#define tcg_global_mem_new tcg_global_mem_new_i64
+#define tcg_temp_local_new tcg_temp_local_new_i64
+#define tcg_temp_free tcg_temp_free_i64
+
+#define tcg_gen_movi_reg tcg_gen_movi_i64
+#define tcg_gen_mov_reg tcg_gen_mov_i64
+#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
+#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
+#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
+#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
+#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
+#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
+#define tcg_gen_ld_reg tcg_gen_ld_i64
+#define tcg_gen_st8_reg tcg_gen_st8_i64
+#define tcg_gen_st16_reg tcg_gen_st16_i64
+#define tcg_gen_st32_reg tcg_gen_st32_i64
+#define tcg_gen_st_reg tcg_gen_st_i64
+#define tcg_gen_add_reg tcg_gen_add_i64
+#define tcg_gen_addi_reg tcg_gen_addi_i64
+#define tcg_gen_sub_reg tcg_gen_sub_i64
+#define tcg_gen_neg_reg tcg_gen_neg_i64
+#define tcg_gen_subfi_reg tcg_gen_subfi_i64
+#define tcg_gen_subi_reg tcg_gen_subi_i64
+#define tcg_gen_and_reg tcg_gen_and_i64
+#define tcg_gen_andi_reg tcg_gen_andi_i64
+#define tcg_gen_or_reg tcg_gen_or_i64
+#define tcg_gen_ori_reg tcg_gen_ori_i64
+#define tcg_gen_xor_reg tcg_gen_xor_i64
+#define tcg_gen_xori_reg tcg_gen_xori_i64
+#define tcg_gen_not_reg tcg_gen_not_i64
+#define tcg_gen_shl_reg tcg_gen_shl_i64
+#define tcg_gen_shli_reg tcg_gen_shli_i64
+#define tcg_gen_shr_reg tcg_gen_shr_i64
+#define tcg_gen_shri_reg tcg_gen_shri_i64
+#define tcg_gen_sar_reg tcg_gen_sar_i64
+#define tcg_gen_sari_reg tcg_gen_sari_i64
+#define tcg_gen_brcond_reg tcg_gen_brcond_i64
+#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
+#define tcg_gen_setcond_reg tcg_gen_setcond_i64
+#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
+#define tcg_gen_mul_reg tcg_gen_mul_i64
+#define tcg_gen_muli_reg tcg_gen_muli_i64
+#define tcg_gen_div_reg tcg_gen_div_i64
+#define tcg_gen_rem_reg tcg_gen_rem_i64
+#define tcg_gen_divu_reg tcg_gen_divu_i64
+#define tcg_gen_remu_reg tcg_gen_remu_i64
+#define tcg_gen_discard_reg tcg_gen_discard_i64
+#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
+#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
+#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
+#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
+#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
+#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
+#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
+#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
+#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
+#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
+#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
+#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
+#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
+#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
+#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
+#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
+#define tcg_gen_andc_reg tcg_gen_andc_i64
+#define tcg_gen_eqv_reg tcg_gen_eqv_i64
+#define tcg_gen_nand_reg tcg_gen_nand_i64
+#define tcg_gen_nor_reg tcg_gen_nor_i64
+#define tcg_gen_orc_reg tcg_gen_orc_i64
+#define tcg_gen_clz_reg tcg_gen_clz_i64
+#define tcg_gen_ctz_reg tcg_gen_ctz_i64
+#define tcg_gen_clzi_reg tcg_gen_clzi_i64
+#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
+#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
+#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
+#define tcg_gen_rotl_reg tcg_gen_rotl_i64
+#define tcg_gen_rotli_reg tcg_gen_rotli_i64
+#define tcg_gen_rotr_reg tcg_gen_rotr_i64
+#define tcg_gen_rotri_reg tcg_gen_rotri_i64
+#define tcg_gen_deposit_reg tcg_gen_deposit_i64
+#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
+#define tcg_gen_extract_reg tcg_gen_extract_i64
+#define tcg_gen_sextract_reg tcg_gen_sextract_i64
+#define tcg_const_reg tcg_const_i64
+#define tcg_const_local_reg tcg_const_local_i64
+#define tcg_constant_reg tcg_constant_i64
+#define tcg_gen_movcond_reg tcg_gen_movcond_i64
+#define tcg_gen_add2_reg tcg_gen_add2_i64
+#define tcg_gen_sub2_reg tcg_gen_sub2_i64
+#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
+#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
+#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
+#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
+#else
+#define TCGv_reg TCGv_i32
+#define tcg_temp_new tcg_temp_new_i32
+#define tcg_global_mem_new tcg_global_mem_new_i32
+#define tcg_temp_local_new tcg_temp_local_new_i32
+#define tcg_temp_free tcg_temp_free_i32
+
+#define tcg_gen_movi_reg tcg_gen_movi_i32
+#define tcg_gen_mov_reg tcg_gen_mov_i32
+#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
+#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
+#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
+#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
+#define tcg_gen_ld32u_reg tcg_gen_ld_i32
+#define tcg_gen_ld32s_reg tcg_gen_ld_i32
+#define tcg_gen_ld_reg tcg_gen_ld_i32
+#define tcg_gen_st8_reg tcg_gen_st8_i32
+#define tcg_gen_st16_reg tcg_gen_st16_i32
+#define tcg_gen_st32_reg tcg_gen_st32_i32
+#define tcg_gen_st_reg tcg_gen_st_i32
+#define tcg_gen_add_reg tcg_gen_add_i32
+#define tcg_gen_addi_reg tcg_gen_addi_i32
+#define tcg_gen_sub_reg tcg_gen_sub_i32
+#define tcg_gen_neg_reg tcg_gen_neg_i32
+#define tcg_gen_subfi_reg tcg_gen_subfi_i32
+#define tcg_gen_subi_reg tcg_gen_subi_i32
+#define tcg_gen_and_reg tcg_gen_and_i32
+#define tcg_gen_andi_reg tcg_gen_andi_i32
+#define tcg_gen_or_reg tcg_gen_or_i32
+#define tcg_gen_ori_reg tcg_gen_ori_i32
+#define tcg_gen_xor_reg tcg_gen_xor_i32
+#define tcg_gen_xori_reg tcg_gen_xori_i32
+#define tcg_gen_not_reg tcg_gen_not_i32
+#define tcg_gen_shl_reg tcg_gen_shl_i32
+#define tcg_gen_shli_reg tcg_gen_shli_i32
+#define tcg_gen_shr_reg tcg_gen_shr_i32
+#define tcg_gen_shri_reg tcg_gen_shri_i32
+#define tcg_gen_sar_reg tcg_gen_sar_i32
+#define tcg_gen_sari_reg tcg_gen_sari_i32
+#define tcg_gen_brcond_reg tcg_gen_brcond_i32
+#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
+#define tcg_gen_setcond_reg tcg_gen_setcond_i32
+#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
+#define tcg_gen_mul_reg tcg_gen_mul_i32
+#define tcg_gen_muli_reg tcg_gen_muli_i32
+#define tcg_gen_div_reg tcg_gen_div_i32
+#define tcg_gen_rem_reg tcg_gen_rem_i32
+#define tcg_gen_divu_reg tcg_gen_divu_i32
+#define tcg_gen_remu_reg tcg_gen_remu_i32
+#define tcg_gen_discard_reg tcg_gen_discard_i32
+#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
+#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
+#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
+#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
+#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
+#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
+#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
+#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
+#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
+#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
+#define tcg_gen_ext32u_reg tcg_gen_mov_i32
+#define tcg_gen_ext32s_reg tcg_gen_mov_i32
+#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
+#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
+#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
+#define tcg_gen_andc_reg tcg_gen_andc_i32
+#define tcg_gen_eqv_reg tcg_gen_eqv_i32
+#define tcg_gen_nand_reg tcg_gen_nand_i32
+#define tcg_gen_nor_reg tcg_gen_nor_i32
+#define tcg_gen_orc_reg tcg_gen_orc_i32
+#define tcg_gen_clz_reg tcg_gen_clz_i32
+#define tcg_gen_ctz_reg tcg_gen_ctz_i32
+#define tcg_gen_clzi_reg tcg_gen_clzi_i32
+#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
+#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
+#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
+#define tcg_gen_rotl_reg tcg_gen_rotl_i32
+#define tcg_gen_rotli_reg tcg_gen_rotli_i32
+#define tcg_gen_rotr_reg tcg_gen_rotr_i32
+#define tcg_gen_rotri_reg tcg_gen_rotri_i32
+#define tcg_gen_deposit_reg tcg_gen_deposit_i32
+#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
+#define tcg_gen_extract_reg tcg_gen_extract_i32
+#define tcg_gen_sextract_reg tcg_gen_sextract_i32
+#define tcg_const_reg tcg_const_i32
+#define tcg_const_local_reg tcg_const_local_i32
+#define tcg_constant_reg tcg_constant_i32
+#define tcg_gen_movcond_reg tcg_gen_movcond_i32
+#define tcg_gen_add2_reg tcg_gen_add2_i32
+#define tcg_gen_sub2_reg tcg_gen_sub2_i32
+#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
+#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
+#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
+#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
+#endif /* TARGET_REGISTER_BITS */
+
+typedef struct DisasCond {
+ TCGCond c;
+ TCGv_reg a0, a1;
+} DisasCond;
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ CPUState *cs;
+
+ target_ureg iaoq_f;
+ target_ureg iaoq_b;
+ target_ureg iaoq_n;
+ TCGv_reg iaoq_n_var;
+
+ int ntempr, ntempl;
+ TCGv_reg tempr[8];
+ TCGv_tl templ[4];
+
+ DisasCond null_cond;
+ TCGLabel *null_lab;
+
+ uint32_t insn;
+ uint32_t tb_flags;
+ int mmu_idx;
+ int privilege;
+ bool psw_n_nonzero;
+} DisasContext;
+
+/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
+static int expand_sm_imm(DisasContext *ctx, int val)
+{
+ if (val & PSW_SM_E) {
+ val = (val & ~PSW_SM_E) | PSW_E;
+ }
+ if (val & PSW_SM_W) {
+ val = (val & ~PSW_SM_W) | PSW_W;
+ }
+ return val;
+}
+
+/* Inverted space register indicates 0 means sr0 not inferred from base. */
+static int expand_sr3x(DisasContext *ctx, int val)
+{
+ return ~val;
+}
+
+/* Convert the M:A bits within a memory insn to the tri-state value
+ we use for the final M. */
+static int ma_to_m(DisasContext *ctx, int val)
+{
+ return val & 2 ? (val & 1 ? -1 : 1) : 0;
+}
+
+/* Convert the sign of the displacement to a pre or post-modify. */
+static int pos_to_m(DisasContext *ctx, int val)
+{
+ return val ? 1 : -1;
+}
+
+static int neg_to_m(DisasContext *ctx, int val)
+{
+ return val ? -1 : 1;
+}
+
+/* Used for branch targets and fp memory ops. */
+static int expand_shl2(DisasContext *ctx, int val)
+{
+ return val << 2;
+}
+
+/* Used for fp memory ops. */
+static int expand_shl3(DisasContext *ctx, int val)
+{
+ return val << 3;
+}
+
+/* Used for assemble_21. */
+static int expand_shl11(DisasContext *ctx, int val)
+{
+ return val << 11;
+}
+
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+/* We are not using a goto_tb (for whatever reason), but have updated
+ the iaq (for whatever reason), so don't do it again on exit. */
+#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
+
+/* We are exiting the TB, but have neither emitted a goto_tb, nor
+ updated the iaq for the next instruction to be executed. */
+#define DISAS_IAQ_N_STALE DISAS_TARGET_1
+
+/* Similarly, but we want to return to the main loop immediately
+ to recognize unmasked interrupts. */
+#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
+#define DISAS_EXIT DISAS_TARGET_3
+
+/* global register indexes */
+static TCGv_reg cpu_gr[32];
+static TCGv_i64 cpu_sr[4];
+static TCGv_i64 cpu_srH;
+static TCGv_reg cpu_iaoq_f;
+static TCGv_reg cpu_iaoq_b;
+static TCGv_i64 cpu_iasq_f;
+static TCGv_i64 cpu_iasq_b;
+static TCGv_reg cpu_sar;
+static TCGv_reg cpu_psw_n;
+static TCGv_reg cpu_psw_v;
+static TCGv_reg cpu_psw_cb;
+static TCGv_reg cpu_psw_cb_msb;
+
+#include "exec/gen-icount.h"
+
+void hppa_translate_init(void)
+{
+#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
+
+ typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
+ static const GlobalVar vars[] = {
+ { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
+ DEF_VAR(psw_n),
+ DEF_VAR(psw_v),
+ DEF_VAR(psw_cb),
+ DEF_VAR(psw_cb_msb),
+ DEF_VAR(iaoq_f),
+ DEF_VAR(iaoq_b),
+ };
+
+#undef DEF_VAR
+
+ /* Use the symbolic register names that match the disassembler. */
+ static const char gr_names[32][4] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+ };
+ /* SR[4-7] are not global registers so that we can index them. */
+ static const char sr_names[5][4] = {
+ "sr0", "sr1", "sr2", "sr3", "srH"
+ };
+
+ int i;
+
+ cpu_gr[0] = NULL;
+ for (i = 1; i < 32; i++) {
+ cpu_gr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHPPAState, gr[i]),
+ gr_names[i]);
+ }
+ for (i = 0; i < 4; i++) {
+ cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, sr[i]),
+ sr_names[i]);
+ }
+ cpu_srH = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, sr[4]),
+ sr_names[4]);
+
+ for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+ const GlobalVar *v = &vars[i];
+ *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
+ }
+
+ cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, iasq_f),
+ "iasq_f");
+ cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, iasq_b),
+ "iasq_b");
+}
+
+static DisasCond cond_make_f(void)
+{
+ return (DisasCond){
+ .c = TCG_COND_NEVER,
+ .a0 = NULL,
+ .a1 = NULL,
+ };
+}
+
+static DisasCond cond_make_t(void)
+{
+ return (DisasCond){
+ .c = TCG_COND_ALWAYS,
+ .a0 = NULL,
+ .a1 = NULL,
+ };
+}
+
+static DisasCond cond_make_n(void)
+{
+ return (DisasCond){
+ .c = TCG_COND_NE,
+ .a0 = cpu_psw_n,
+ .a1 = tcg_constant_reg(0)
+ };
+}
+
+static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
+{
+ assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
+ return (DisasCond){
+ .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
+ };
+}
+
+static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
+{
+ TCGv_reg tmp = tcg_temp_new();
+ tcg_gen_mov_reg(tmp, a0);
+ return cond_make_0_tmp(c, tmp);
+}
+
+static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
+{
+ DisasCond r = { .c = c };
+
+ assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
+ r.a0 = tcg_temp_new();
+ tcg_gen_mov_reg(r.a0, a0);
+ r.a1 = tcg_temp_new();
+ tcg_gen_mov_reg(r.a1, a1);
+
+ return r;
+}
+
+static void cond_free(DisasCond *cond)
+{
+ switch (cond->c) {
+ default:
+ if (cond->a0 != cpu_psw_n) {
+ tcg_temp_free(cond->a0);
+ }
+ tcg_temp_free(cond->a1);
+ cond->a0 = NULL;
+ cond->a1 = NULL;
+ /* fallthru */
+ case TCG_COND_ALWAYS:
+ cond->c = TCG_COND_NEVER;
+ break;
+ case TCG_COND_NEVER:
+ break;
+ }
+}
+
+static TCGv_reg get_temp(DisasContext *ctx)
+{
+ unsigned i = ctx->ntempr++;
+ g_assert(i < ARRAY_SIZE(ctx->tempr));
+ return ctx->tempr[i] = tcg_temp_new();
+}
+
+#ifndef CONFIG_USER_ONLY
+static TCGv_tl get_temp_tl(DisasContext *ctx)
+{
+ unsigned i = ctx->ntempl++;
+ g_assert(i < ARRAY_SIZE(ctx->templ));
+ return ctx->templ[i] = tcg_temp_new_tl();
+}
+#endif
+
+static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
+{
+ TCGv_reg t = get_temp(ctx);
+ tcg_gen_movi_reg(t, v);
+ return t;
+}
+
+static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (reg == 0) {
+ TCGv_reg t = get_temp(ctx);
+ tcg_gen_movi_reg(t, 0);
+ return t;
+ } else {
+ return cpu_gr[reg];
+ }
+}
+
+static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
+ return get_temp(ctx);
+ } else {
+ return cpu_gr[reg];
+ }
+}
+
+static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
+{
+ if (ctx->null_cond.c != TCG_COND_NEVER) {
+ tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
+ ctx->null_cond.a1, dest, t);
+ } else {
+ tcg_gen_mov_reg(dest, t);
+ }
+}
+
+static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
+{
+ if (reg != 0) {
+ save_or_nullify(ctx, cpu_gr[reg], t);
+ }
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+# define HI_OFS 0
+# define LO_OFS 4
+#else
+# define HI_OFS 4
+# define LO_OFS 0
+#endif
+
+static TCGv_i32 load_frw_i32(unsigned rt)
+{
+ TCGv_i32 ret = tcg_temp_new_i32();
+ tcg_gen_ld_i32(ret, cpu_env,
+ offsetof(CPUHPPAState, fr[rt & 31])
+ + (rt & 32 ? LO_OFS : HI_OFS));
+ return ret;
+}
+
+static TCGv_i32 load_frw0_i32(unsigned rt)
+{
+ if (rt == 0) {
+ return tcg_const_i32(0);
+ } else {
+ return load_frw_i32(rt);
+ }
+}
+
+static TCGv_i64 load_frw0_i64(unsigned rt)
+{
+ if (rt == 0) {
+ return tcg_const_i64(0);
+ } else {
+ TCGv_i64 ret = tcg_temp_new_i64();
+ tcg_gen_ld32u_i64(ret, cpu_env,
+ offsetof(CPUHPPAState, fr[rt & 31])
+ + (rt & 32 ? LO_OFS : HI_OFS));
+ return ret;
+ }
+}
+
+static void save_frw_i32(unsigned rt, TCGv_i32 val)
+{
+ tcg_gen_st_i32(val, cpu_env,
+ offsetof(CPUHPPAState, fr[rt & 31])
+ + (rt & 32 ? LO_OFS : HI_OFS));
+}
+
+#undef HI_OFS
+#undef LO_OFS
+
+static TCGv_i64 load_frd(unsigned rt)
+{
+ TCGv_i64 ret = tcg_temp_new_i64();
+ tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
+ return ret;
+}
+
+static TCGv_i64 load_frd0(unsigned rt)
+{
+ if (rt == 0) {
+ return tcg_const_i64(0);
+ } else {
+ return load_frd(rt);
+ }
+}
+
+static void save_frd(unsigned rt, TCGv_i64 val)
+{
+ tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
+}
+
+static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
+{
+#ifdef CONFIG_USER_ONLY
+ tcg_gen_movi_i64(dest, 0);
+#else
+ if (reg < 4) {
+ tcg_gen_mov_i64(dest, cpu_sr[reg]);
+ } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
+ tcg_gen_mov_i64(dest, cpu_srH);
+ } else {
+ tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
+ }
+#endif
+}
+
+/* Skip over the implementation of an insn that has been nullified.
+ Use this when the insn is too complex for a conditional move. */
+static void nullify_over(DisasContext *ctx)
+{
+ if (ctx->null_cond.c != TCG_COND_NEVER) {
+ /* The always condition should have been handled in the main loop. */
+ assert(ctx->null_cond.c != TCG_COND_ALWAYS);
+
+ ctx->null_lab = gen_new_label();
+
+ /* If we're using PSW[N], copy it to a temp because... */
+ if (ctx->null_cond.a0 == cpu_psw_n) {
+ ctx->null_cond.a0 = tcg_temp_new();
+ tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
+ }
+ /* ... we clear it before branching over the implementation,
+ so that (1) it's clear after nullifying this insn and
+ (2) if this insn nullifies the next, PSW[N] is valid. */
+ if (ctx->psw_n_nonzero) {
+ ctx->psw_n_nonzero = false;
+ tcg_gen_movi_reg(cpu_psw_n, 0);
+ }
+
+ tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
+ ctx->null_cond.a1, ctx->null_lab);
+ cond_free(&ctx->null_cond);
+ }
+}
+
+/* Save the current nullification state to PSW[N]. */
+static void nullify_save(DisasContext *ctx)
+{
+ if (ctx->null_cond.c == TCG_COND_NEVER) {
+ if (ctx->psw_n_nonzero) {
+ tcg_gen_movi_reg(cpu_psw_n, 0);
+ }
+ return;
+ }
+ if (ctx->null_cond.a0 != cpu_psw_n) {
+ tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
+ ctx->null_cond.a0, ctx->null_cond.a1);
+ ctx->psw_n_nonzero = true;
+ }
+ cond_free(&ctx->null_cond);
+}
+
+/* Set a PSW[N] to X. The intention is that this is used immediately
+ before a goto_tb/exit_tb, so that there is no fallthru path to other
+ code within the TB. Therefore we do not update psw_n_nonzero. */
+static void nullify_set(DisasContext *ctx, bool x)
+{
+ if (ctx->psw_n_nonzero || x) {
+ tcg_gen_movi_reg(cpu_psw_n, x);
+ }
+}
+
+/* Mark the end of an instruction that may have been nullified.
+ This is the pair to nullify_over. Always returns true so that
+ it may be tail-called from a translate function. */
+static bool nullify_end(DisasContext *ctx)
+{
+ TCGLabel *null_lab = ctx->null_lab;
+ DisasJumpType status = ctx->base.is_jmp;
+
+ /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
+ For UPDATED, we cannot update on the nullified path. */
+ assert(status != DISAS_IAQ_N_UPDATED);
+
+ if (likely(null_lab == NULL)) {
+ /* The current insn wasn't conditional or handled the condition
+ applied to it without a branch, so the (new) setting of
+ NULL_COND can be applied directly to the next insn. */
+ return true;
+ }
+ ctx->null_lab = NULL;
+
+ if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
+ /* The next instruction will be unconditional,
+ and NULL_COND already reflects that. */
+ gen_set_label(null_lab);
+ } else {
+ /* The insn that we just executed is itself nullifying the next
+ instruction. Store the condition in the PSW[N] global.
+ We asserted PSW[N] = 0 in nullify_over, so that after the
+ label we have the proper value in place. */
+ nullify_save(ctx);
+ gen_set_label(null_lab);
+ ctx->null_cond = cond_make_n();
+ }
+ if (status == DISAS_NORETURN) {
+ ctx->base.is_jmp = DISAS_NEXT;
+ }
+ return true;
+}
+
+static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
+{
+ if (unlikely(ival == -1)) {
+ tcg_gen_mov_reg(dest, vval);
+ } else {
+ tcg_gen_movi_reg(dest, ival);
+ }
+}
+
+static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
+{
+ return ctx->iaoq_f + disp + 8;
+}
+
+static void gen_excp_1(int exception)
+{
+ gen_helper_excp(cpu_env, tcg_constant_i32(exception));
+}
+
+static void gen_excp(DisasContext *ctx, int exception)
+{
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ nullify_save(ctx);
+ gen_excp_1(exception);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static bool gen_excp_iir(DisasContext *ctx, int exc)
+{
+ nullify_over(ctx);
+ tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
+ cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
+ gen_excp(ctx, exc);
+ return nullify_end(ctx);
+}
+
+static bool gen_illegal(DisasContext *ctx)
+{
+ return gen_excp_iir(ctx, EXCP_ILL);
+}
+
+#ifdef CONFIG_USER_ONLY
+#define CHECK_MOST_PRIVILEGED(EXCP) \
+ return gen_excp_iir(ctx, EXCP)
+#else
+#define CHECK_MOST_PRIVILEGED(EXCP) \
+ do { \
+ if (ctx->privilege != 0) { \
+ return gen_excp_iir(ctx, EXCP); \
+ } \
+ } while (0)
+#endif
+
+static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
+{
+ return translator_use_goto_tb(&ctx->base, dest);
+}
+
+/* If the next insn is to be nullified, and it's on the same page,
+ and we're not attempting to set a breakpoint on it, then we can
+ totally skip the nullified insn. This avoids creating and
+ executing a TB that merely branches to the next TB. */
+static bool use_nullify_skip(DisasContext *ctx)
+{
+ return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
+ && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
+}
+
+static void gen_goto_tb(DisasContext *ctx, int which,
+ target_ureg f, target_ureg b)
+{
+ if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
+ tcg_gen_goto_tb(which);
+ tcg_gen_movi_reg(cpu_iaoq_f, f);
+ tcg_gen_movi_reg(cpu_iaoq_b, b);
+ tcg_gen_exit_tb(ctx->base.tb, which);
+ } else {
+ copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+static bool cond_need_sv(int c)
+{
+ return c == 2 || c == 3 || c == 6;
+}
+
+static bool cond_need_cb(int c)
+{
+ return c == 4 || c == 5;
+}
+
+/*
+ * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
+ * the Parisc 1.1 Architecture Reference Manual for details.
+ */
+
+static DisasCond do_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg cb_msb, TCGv_reg sv)
+{
+ DisasCond cond;
+ TCGv_reg tmp;
+
+ switch (cf >> 1) {
+ case 0: /* Never / TR (0 / 1) */
+ cond = cond_make_f();
+ break;
+ case 1: /* = / <> (Z / !Z) */
+ cond = cond_make_0(TCG_COND_EQ, res);
+ break;
+ case 2: /* < / >= (N ^ V / !(N ^ V) */
+ tmp = tcg_temp_new();
+ tcg_gen_xor_reg(tmp, res, sv);
+ cond = cond_make_0_tmp(TCG_COND_LT, tmp);
+ break;
+ case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
+ /*
+ * Simplify:
+ * (N ^ V) | Z
+ * ((res < 0) ^ (sv < 0)) | !res
+ * ((res ^ sv) < 0) | !res
+ * (~(res ^ sv) >= 0) | !res
+ * !(~(res ^ sv) >> 31) | !res
+ * !(~(res ^ sv) >> 31 & res)
+ */
+ tmp = tcg_temp_new();
+ tcg_gen_eqv_reg(tmp, res, sv);
+ tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
+ tcg_gen_and_reg(tmp, tmp, res);
+ cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
+ break;
+ case 4: /* NUV / UV (!C / C) */
+ cond = cond_make_0(TCG_COND_EQ, cb_msb);
+ break;
+ case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
+ tmp = tcg_temp_new();
+ tcg_gen_neg_reg(tmp, cb_msb);
+ tcg_gen_and_reg(tmp, tmp, res);
+ cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
+ break;
+ case 6: /* SV / NSV (V / !V) */
+ cond = cond_make_0(TCG_COND_LT, sv);
+ break;
+ case 7: /* OD / EV */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_reg(tmp, res, 1);
+ cond = cond_make_0_tmp(TCG_COND_NE, tmp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (cf & 1) {
+ cond.c = tcg_invert_cond(cond.c);
+ }
+
+ return cond;
+}
+
+/* Similar, but for the special case of subtraction without borrow, we
+ can use the inputs directly. This can allow other computation to be
+ deleted as unused. */
+
+static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
+{
+ DisasCond cond;
+
+ switch (cf >> 1) {
+ case 1: /* = / <> */
+ cond = cond_make(TCG_COND_EQ, in1, in2);
+ break;
+ case 2: /* < / >= */
+ cond = cond_make(TCG_COND_LT, in1, in2);
+ break;
+ case 3: /* <= / > */
+ cond = cond_make(TCG_COND_LE, in1, in2);
+ break;
+ case 4: /* << / >>= */
+ cond = cond_make(TCG_COND_LTU, in1, in2);
+ break;
+ case 5: /* <<= / >> */
+ cond = cond_make(TCG_COND_LEU, in1, in2);
+ break;
+ default:
+ return do_cond(cf, res, NULL, sv);
+ }
+ if (cf & 1) {
+ cond.c = tcg_invert_cond(cond.c);
+ }
+
+ return cond;
+}
+
+/*
+ * Similar, but for logicals, where the carry and overflow bits are not
+ * computed, and use of them is undefined.
+ *
+ * Undefined or not, hardware does not trap. It seems reasonable to
+ * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
+ * how cases c={2,3} are treated.
+ */
+
+static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
+{
+ switch (cf) {
+ case 0: /* never */
+ case 9: /* undef, C */
+ case 11: /* undef, C & !Z */
+ case 12: /* undef, V */
+ return cond_make_f();
+
+ case 1: /* true */
+ case 8: /* undef, !C */
+ case 10: /* undef, !C | Z */
+ case 13: /* undef, !V */
+ return cond_make_t();
+
+ case 2: /* == */
+ return cond_make_0(TCG_COND_EQ, res);
+ case 3: /* <> */
+ return cond_make_0(TCG_COND_NE, res);
+ case 4: /* < */
+ return cond_make_0(TCG_COND_LT, res);
+ case 5: /* >= */
+ return cond_make_0(TCG_COND_GE, res);
+ case 6: /* <= */
+ return cond_make_0(TCG_COND_LE, res);
+ case 7: /* > */
+ return cond_make_0(TCG_COND_GT, res);
+
+ case 14: /* OD */
+ case 15: /* EV */
+ return do_cond(cf, res, NULL, NULL);
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Similar, but for shift/extract/deposit conditions. */
+
+static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
+{
+ unsigned c, f;
+
+ /* Convert the compressed condition codes to standard.
+ 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
+ 4-7 are the reverse of 0-3. */
+ c = orig & 3;
+ if (c == 3) {
+ c = 7;
+ }
+ f = (orig & 4) / 4;
+
+ return do_log_cond(c * 2 + f, res);
+}
+
+/* Similar, but for unit conditions. */
+
+static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
+{
+ DisasCond cond;
+ TCGv_reg tmp, cb = NULL;
+
+ if (cf & 8) {
+ /* Since we want to test lots of carry-out bits all at once, do not
+ * do our normal thing and compute carry-in of bit B+1 since that
+ * leaves us with carry bits spread across two words.
+ */
+ cb = tcg_temp_new();
+ tmp = tcg_temp_new();
+ tcg_gen_or_reg(cb, in1, in2);
+ tcg_gen_and_reg(tmp, in1, in2);
+ tcg_gen_andc_reg(cb, cb, res);
+ tcg_gen_or_reg(cb, cb, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ switch (cf >> 1) {
+ case 0: /* never / TR */
+ case 1: /* undefined */
+ case 5: /* undefined */
+ cond = cond_make_f();
+ break;
+
+ case 2: /* SBZ / NBZ */
+ /* See hasless(v,1) from
+ * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ */
+ tmp = tcg_temp_new();
+ tcg_gen_subi_reg(tmp, res, 0x01010101u);
+ tcg_gen_andc_reg(tmp, tmp, res);
+ tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
+ cond = cond_make_0(TCG_COND_NE, tmp);
+ tcg_temp_free(tmp);
+ break;
+
+ case 3: /* SHZ / NHZ */
+ tmp = tcg_temp_new();
+ tcg_gen_subi_reg(tmp, res, 0x00010001u);
+ tcg_gen_andc_reg(tmp, tmp, res);
+ tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
+ cond = cond_make_0(TCG_COND_NE, tmp);
+ tcg_temp_free(tmp);
+ break;
+
+ case 4: /* SDC / NDC */
+ tcg_gen_andi_reg(cb, cb, 0x88888888u);
+ cond = cond_make_0(TCG_COND_NE, cb);
+ break;
+
+ case 6: /* SBC / NBC */
+ tcg_gen_andi_reg(cb, cb, 0x80808080u);
+ cond = cond_make_0(TCG_COND_NE, cb);
+ break;
+
+ case 7: /* SHC / NHC */
+ tcg_gen_andi_reg(cb, cb, 0x80008000u);
+ cond = cond_make_0(TCG_COND_NE, cb);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ if (cf & 8) {
+ tcg_temp_free(cb);
+ }
+ if (cf & 1) {
+ cond.c = tcg_invert_cond(cond.c);
+ }
+
+ return cond;
+}
+
+/* Compute signed overflow for addition. */
+static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
+{
+ TCGv_reg sv = get_temp(ctx);
+ TCGv_reg tmp = tcg_temp_new();
+
+ tcg_gen_xor_reg(sv, res, in1);
+ tcg_gen_xor_reg(tmp, in1, in2);
+ tcg_gen_andc_reg(sv, sv, tmp);
+ tcg_temp_free(tmp);
+
+ return sv;
+}
+
+/* Compute signed overflow for subtraction. */
+static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
+ TCGv_reg in1, TCGv_reg in2)
+{
+ TCGv_reg sv = get_temp(ctx);
+ TCGv_reg tmp = tcg_temp_new();
+
+ tcg_gen_xor_reg(sv, res, in1);
+ tcg_gen_xor_reg(tmp, in1, in2);
+ tcg_gen_and_reg(sv, sv, tmp);
+ tcg_temp_free(tmp);
+
+ return sv;
+}
+
+static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned shift, bool is_l,
+ bool is_tsv, bool is_tc, bool is_c, unsigned cf)
+{
+ TCGv_reg dest, cb, cb_msb, sv, tmp;
+ unsigned c = cf >> 1;
+ DisasCond cond;
+
+ dest = tcg_temp_new();
+ cb = NULL;
+ cb_msb = NULL;
+
+ if (shift) {
+ tmp = get_temp(ctx);
+ tcg_gen_shli_reg(tmp, in1, shift);
+ in1 = tmp;
+ }
+
+ if (!is_l || cond_need_cb(c)) {
+ TCGv_reg zero = tcg_constant_reg(0);
+ cb_msb = get_temp(ctx);
+ tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
+ if (is_c) {
+ tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
+ }
+ if (!is_l) {
+ cb = get_temp(ctx);
+ tcg_gen_xor_reg(cb, in1, in2);
+ tcg_gen_xor_reg(cb, cb, dest);
+ }
+ } else {
+ tcg_gen_add_reg(dest, in1, in2);
+ if (is_c) {
+ tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
+ }
+ }
+
+ /* Compute signed overflow if required. */
+ sv = NULL;
+ if (is_tsv || cond_need_sv(c)) {
+ sv = do_add_sv(ctx, dest, in1, in2);
+ if (is_tsv) {
+ /* ??? Need to include overflow from shift. */
+ gen_helper_tsv(cpu_env, sv);
+ }
+ }
+
+ /* Emit any conditional trap before any writeback. */
+ cond = do_cond(cf, dest, cb_msb, sv);
+ if (is_tc) {
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ /* Write back the result. */
+ if (!is_l) {
+ save_or_nullify(ctx, cpu_psw_cb, cb);
+ save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
+ }
+ save_gpr(ctx, rt, dest);
+ tcg_temp_free(dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+}
+
+static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
+ bool is_l, bool is_tsv, bool is_tc, bool is_c)
+{
+ TCGv_reg tcg_r1, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_r2 = load_gpr(ctx, a->r2);
+ do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
+ return nullify_end(ctx);
+}
+
+static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
+ bool is_tsv, bool is_tc)
+{
+ TCGv_reg tcg_im, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_im = load_const(ctx, a->i);
+ tcg_r2 = load_gpr(ctx, a->r);
+ do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
+ return nullify_end(ctx);
+}
+
+static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, bool is_tsv, bool is_b,
+ bool is_tc, unsigned cf)
+{
+ TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
+ unsigned c = cf >> 1;
+ DisasCond cond;
+
+ dest = tcg_temp_new();
+ cb = tcg_temp_new();
+ cb_msb = tcg_temp_new();
+
+ zero = tcg_constant_reg(0);
+ if (is_b) {
+ /* DEST,C = IN1 + ~IN2 + C. */
+ tcg_gen_not_reg(cb, in2);
+ tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
+ tcg_gen_xor_reg(cb, cb, in1);
+ tcg_gen_xor_reg(cb, cb, dest);
+ } else {
+ /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
+ operations by seeding the high word with 1 and subtracting. */
+ tcg_gen_movi_reg(cb_msb, 1);
+ tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
+ tcg_gen_eqv_reg(cb, in1, in2);
+ tcg_gen_xor_reg(cb, cb, dest);
+ }
+
+ /* Compute signed overflow if required. */
+ sv = NULL;
+ if (is_tsv || cond_need_sv(c)) {
+ sv = do_sub_sv(ctx, dest, in1, in2);
+ if (is_tsv) {
+ gen_helper_tsv(cpu_env, sv);
+ }
+ }
+
+ /* Compute the condition. We cannot use the special case for borrow. */
+ if (!is_b) {
+ cond = do_sub_cond(cf, dest, in1, in2, sv);
+ } else {
+ cond = do_cond(cf, dest, cb_msb, sv);
+ }
+
+ /* Emit any conditional trap before any writeback. */
+ if (is_tc) {
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ /* Write back the result. */
+ save_or_nullify(ctx, cpu_psw_cb, cb);
+ save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
+ save_gpr(ctx, rt, dest);
+ tcg_temp_free(dest);
+ tcg_temp_free(cb);
+ tcg_temp_free(cb_msb);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+}
+
+static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
+ bool is_tsv, bool is_b, bool is_tc)
+{
+ TCGv_reg tcg_r1, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_r2 = load_gpr(ctx, a->r2);
+ do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
+ return nullify_end(ctx);
+}
+
+static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
+{
+ TCGv_reg tcg_im, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_im = load_const(ctx, a->i);
+ tcg_r2 = load_gpr(ctx, a->r);
+ do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
+ return nullify_end(ctx);
+}
+
+static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf)
+{
+ TCGv_reg dest, sv;
+ DisasCond cond;
+
+ dest = tcg_temp_new();
+ tcg_gen_sub_reg(dest, in1, in2);
+
+ /* Compute signed overflow if required. */
+ sv = NULL;
+ if (cond_need_sv(cf >> 1)) {
+ sv = do_sub_sv(ctx, dest, in1, in2);
+ }
+
+ /* Form the condition for the compare. */
+ cond = do_sub_cond(cf, dest, in1, in2, sv);
+
+ /* Clear. */
+ tcg_gen_movi_reg(dest, 0);
+ save_gpr(ctx, rt, dest);
+ tcg_temp_free(dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+}
+
+static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
+{
+ TCGv_reg dest = dest_gpr(ctx, rt);
+
+ /* Perform the operation, and writeback. */
+ fn(dest, in1, in2);
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (cf) {
+ ctx->null_cond = do_log_cond(cf, dest);
+ }
+}
+
+static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
+{
+ TCGv_reg tcg_r1, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_r2 = load_gpr(ctx, a->r2);
+ do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
+ return nullify_end(ctx);
+}
+
+static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
+ TCGv_reg in2, unsigned cf, bool is_tc,
+ void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
+{
+ TCGv_reg dest;
+ DisasCond cond;
+
+ if (cf == 0) {
+ dest = dest_gpr(ctx, rt);
+ fn(dest, in1, in2);
+ save_gpr(ctx, rt, dest);
+ cond_free(&ctx->null_cond);
+ } else {
+ dest = tcg_temp_new();
+ fn(dest, in1, in2);
+
+ cond = do_unit_cond(cf, dest, in1, in2);
+
+ if (is_tc) {
+ TCGv_reg tmp = tcg_temp_new();
+ tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+ save_gpr(ctx, rt, dest);
+
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
+ from the top 2 bits of the base register. There are a few system
+ instructions that have a 3-bit space specifier, for which SR0 is
+ not special. To handle this, pass ~SP. */
+static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
+{
+ TCGv_ptr ptr;
+ TCGv_reg tmp;
+ TCGv_i64 spc;
+
+ if (sp != 0) {
+ if (sp < 0) {
+ sp = ~sp;
+ }
+ spc = get_temp_tl(ctx);
+ load_spr(ctx, spc, sp);
+ return spc;
+ }
+ if (ctx->tb_flags & TB_FLAG_SR_SAME) {
+ return cpu_srH;
+ }
+
+ ptr = tcg_temp_new_ptr();
+ tmp = tcg_temp_new();
+ spc = get_temp_tl(ctx);
+
+ tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
+ tcg_gen_andi_reg(tmp, tmp, 030);
+ tcg_gen_trunc_reg_ptr(ptr, tmp);
+ tcg_temp_free(tmp);
+
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
+ tcg_temp_free_ptr(ptr);
+
+ return spc;
+}
+#endif
+
+static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
+ unsigned rb, unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, bool is_phys)
+{
+ TCGv_reg base = load_gpr(ctx, rb);
+ TCGv_reg ofs;
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ ofs = get_temp(ctx);
+ tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
+ tcg_gen_add_reg(ofs, ofs, base);
+ } else if (disp || modify) {
+ ofs = get_temp(ctx);
+ tcg_gen_addi_reg(ofs, base, disp);
+ } else {
+ ofs = base;
+ }
+
+ *pofs = ofs;
+#ifdef CONFIG_USER_ONLY
+ *pgva = (modify <= 0 ? ofs : base);
+#else
+ TCGv_tl addr = get_temp_tl(ctx);
+ tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
+ if (ctx->tb_flags & PSW_W) {
+ tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
+ }
+ if (!is_phys) {
+ tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
+ }
+ *pgva = addr;
+#endif
+}
+
+/* Emit a memory load. The modify parameter should be
+ * < 0 for pre-modify,
+ * > 0 for post-modify,
+ * = 0 for no base register update.
+ */
+static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, MemOp mop)
+{
+ TCGv_reg ofs;
+ TCGv_tl addr;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
+ }
+}
+
+static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, MemOp mop)
+{
+ TCGv_reg ofs;
+ TCGv_tl addr;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
+ }
+}
+
+static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, MemOp mop)
+{
+ TCGv_reg ofs;
+ TCGv_tl addr;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
+ }
+}
+
+static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, MemOp mop)
+{
+ TCGv_reg ofs;
+ TCGv_tl addr;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
+ }
+}
+
+#if TARGET_REGISTER_BITS == 64
+#define do_load_reg do_load_64
+#define do_store_reg do_store_64
+#else
+#define do_load_reg do_load_32
+#define do_store_reg do_store_32
+#endif
+
+static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, MemOp mop)
+{
+ TCGv_reg dest;
+
+ nullify_over(ctx);
+
+ if (modify == 0) {
+ /* No base register update. */
+ dest = dest_gpr(ctx, rt);
+ } else {
+ /* Make sure if RT == RB, we see the result of the load. */
+ dest = get_temp(ctx);
+ }
+ do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
+ save_gpr(ctx, rt, dest);
+
+ return nullify_end(ctx);
+}
+
+static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
+{
+ TCGv_i32 tmp;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new_i32();
+ do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
+ save_frw_i32(rt, tmp);
+ tcg_temp_free_i32(tmp);
+
+ if (rt == 0) {
+ gen_helper_loaded_fr0(cpu_env);
+ }
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
+{
+ return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
+ a->disp, a->sp, a->m);
+}
+
+static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
+{
+ TCGv_i64 tmp;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new_i64();
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
+ save_frd(rt, tmp);
+ tcg_temp_free_i64(tmp);
+
+ if (rt == 0) {
+ gen_helper_loaded_fr0(cpu_env);
+ }
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
+{
+ return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
+ a->disp, a->sp, a->m);
+}
+
+static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
+ target_sreg disp, unsigned sp,
+ int modify, MemOp mop)
+{
+ nullify_over(ctx);
+ do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
+ return nullify_end(ctx);
+}
+
+static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
+{
+ TCGv_i32 tmp;
+
+ nullify_over(ctx);
+
+ tmp = load_frw_i32(rt);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
+ tcg_temp_free_i32(tmp);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
+{
+ return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
+ a->disp, a->sp, a->m);
+}
+
+static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify)
+{
+ TCGv_i64 tmp;
+
+ nullify_over(ctx);
+
+ tmp = load_frd(rt);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
+ tcg_temp_free_i64(tmp);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
+{
+ return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
+ a->disp, a->sp, a->m);
+}
+
+static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ nullify_over(ctx);
+ tmp = load_frw0_i32(ra);
+
+ func(tmp, cpu_env, tmp);
+
+ save_frw_i32(rt, tmp);
+ tcg_temp_free_i32(tmp);
+ return nullify_end(ctx);
+}
+
+static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ nullify_over(ctx);
+ src = load_frd(ra);
+ dst = tcg_temp_new_i32();
+
+ func(dst, cpu_env, src);
+
+ tcg_temp_free_i64(src);
+ save_frw_i32(rt, dst);
+ tcg_temp_free_i32(dst);
+ return nullify_end(ctx);
+}
+
+static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
+{
+ TCGv_i64 tmp;
+
+ nullify_over(ctx);
+ tmp = load_frd0(ra);
+
+ func(tmp, cpu_env, tmp);
+
+ save_frd(rt, tmp);
+ tcg_temp_free_i64(tmp);
+ return nullify_end(ctx);
+}
+
+static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
+{
+ TCGv_i32 src;
+ TCGv_i64 dst;
+
+ nullify_over(ctx);
+ src = load_frw0_i32(ra);
+ dst = tcg_temp_new_i64();
+
+ func(dst, cpu_env, src);
+
+ tcg_temp_free_i32(src);
+ save_frd(rt, dst);
+ tcg_temp_free_i64(dst);
+ return nullify_end(ctx);
+}
+
+static bool do_fop_weww(DisasContext *ctx, unsigned rt,
+ unsigned ra, unsigned rb,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 a, b;
+
+ nullify_over(ctx);
+ a = load_frw0_i32(ra);
+ b = load_frw0_i32(rb);
+
+ func(a, cpu_env, a, b);
+
+ tcg_temp_free_i32(b);
+ save_frw_i32(rt, a);
+ tcg_temp_free_i32(a);
+ return nullify_end(ctx);
+}
+
+static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
+ unsigned ra, unsigned rb,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 a, b;
+
+ nullify_over(ctx);
+ a = load_frd0(ra);
+ b = load_frd0(rb);
+
+ func(a, cpu_env, a, b);
+
+ tcg_temp_free_i64(b);
+ save_frd(rt, a);
+ tcg_temp_free_i64(a);
+ return nullify_end(ctx);
+}
+
+/* Emit an unconditional branch to a direct target, which may or may not
+ have already had nullification handled. */
+static bool do_dbranch(DisasContext *ctx, target_ureg dest,
+ unsigned link, bool is_n)
+{
+ if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
+ if (link != 0) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ ctx->iaoq_n = dest;
+ if (is_n) {
+ ctx->null_cond.c = TCG_COND_ALWAYS;
+ }
+ } else {
+ nullify_over(ctx);
+
+ if (link != 0) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+
+ if (is_n && use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, 0, dest, dest + 4);
+ } else {
+ nullify_set(ctx, is_n);
+ gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
+ }
+
+ nullify_end(ctx);
+
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+ return true;
+}
+
+/* Emit a conditional branch to a direct target. If the branch itself
+ is nullified, we should have already used nullify_over. */
+static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
+ DisasCond *cond)
+{
+ target_ureg dest = iaoq_dest(ctx, disp);
+ TCGLabel *taken = NULL;
+ TCGCond c = cond->c;
+ bool n;
+
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ /* Handle TRUE and NEVER as direct branches. */
+ if (c == TCG_COND_ALWAYS) {
+ return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
+ }
+ if (c == TCG_COND_NEVER) {
+ return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
+ }
+
+ taken = gen_new_label();
+ tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
+ cond_free(cond);
+
+ /* Not taken: Condition not satisfied; nullify on backward branches. */
+ n = is_n && disp < 0;
+ if (n && use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
+ } else {
+ if (!n && ctx->null_lab) {
+ gen_set_label(ctx->null_lab);
+ ctx->null_lab = NULL;
+ }
+ nullify_set(ctx, n);
+ if (ctx->iaoq_n == -1) {
+ /* The temporary iaoq_n_var died at the branch above.
+ Regenerate it here instead of saving it. */
+ tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ }
+ gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
+ }
+
+ gen_set_label(taken);
+
+ /* Taken: Condition satisfied; nullify on forward branches. */
+ n = is_n && disp >= 0;
+ if (n && use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, 1, dest, dest + 4);
+ } else {
+ nullify_set(ctx, n);
+ gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
+ }
+
+ /* Not taken: the branch itself was nullified. */
+ if (ctx->null_lab) {
+ gen_set_label(ctx->null_lab);
+ ctx->null_lab = NULL;
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ } else {
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+ return true;
+}
+
+/* Emit an unconditional branch to an indirect target. This handles
+ nullification of the branch itself. */
+static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
+ unsigned link, bool is_n)
+{
+ TCGv_reg a0, a1, next, tmp;
+ TCGCond c;
+
+ assert(ctx->null_lab == NULL);
+
+ if (ctx->null_cond.c == TCG_COND_NEVER) {
+ if (link != 0) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ next = get_temp(ctx);
+ tcg_gen_mov_reg(next, dest);
+ if (is_n) {
+ if (use_nullify_skip(ctx)) {
+ tcg_gen_mov_reg(cpu_iaoq_f, next);
+ tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
+ nullify_set(ctx, 0);
+ ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
+ return true;
+ }
+ ctx->null_cond.c = TCG_COND_ALWAYS;
+ }
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = next;
+ } else if (is_n && use_nullify_skip(ctx)) {
+ /* The (conditional) branch, B, nullifies the next insn, N,
+ and we're allowed to skip execution N (no single-step or
+ tracepoint in effect). Since the goto_ptr that we must use
+ for the indirect branch consumes no special resources, we
+ can (conditionally) skip B and continue execution. */
+ /* The use_nullify_skip test implies we have a known control path. */
+ tcg_debug_assert(ctx->iaoq_b != -1);
+ tcg_debug_assert(ctx->iaoq_n != -1);
+
+ /* We do have to handle the non-local temporary, DEST, before
+ branching. Since IOAQ_F is not really live at this point, we
+ can simply store DEST optimistically. Similarly with IAOQ_B. */
+ tcg_gen_mov_reg(cpu_iaoq_f, dest);
+ tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
+
+ nullify_over(ctx);
+ if (link != 0) {
+ tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
+ }
+ tcg_gen_lookup_and_goto_ptr();
+ return nullify_end(ctx);
+ } else {
+ c = ctx->null_cond.c;
+ a0 = ctx->null_cond.a0;
+ a1 = ctx->null_cond.a1;
+
+ tmp = tcg_temp_new();
+ next = get_temp(ctx);
+
+ copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
+ tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = next;
+
+ if (link != 0) {
+ tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
+ }
+
+ if (is_n) {
+ /* The branch nullifies the next insn, which means the state of N
+ after the branch is the inverse of the state of N that applied
+ to the branch. */
+ tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_n();
+ ctx->psw_n_nonzero = true;
+ } else {
+ cond_free(&ctx->null_cond);
+ }
+ }
+ return true;
+}
+
+/* Implement
+ * if (IAOQ_Front{30..31} < GR[b]{30..31})
+ * IAOQ_Next{30..31} ← GR[b]{30..31};
+ * else
+ * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
+ * which keeps the privilege level from being increased.
+ */
+static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
+{
+ TCGv_reg dest;
+ switch (ctx->privilege) {
+ case 0:
+ /* Privilege 0 is maximum and is allowed to decrease. */
+ return offset;
+ case 3:
+ /* Privilege 3 is minimum and is never allowed to increase. */
+ dest = get_temp(ctx);
+ tcg_gen_ori_reg(dest, offset, 3);
+ break;
+ default:
+ dest = get_temp(ctx);
+ tcg_gen_andi_reg(dest, offset, -4);
+ tcg_gen_ori_reg(dest, dest, ctx->privilege);
+ tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
+ break;
+ }
+ return dest;
+}
+
+#ifdef CONFIG_USER_ONLY
+/* On Linux, page zero is normally marked execute only + gateway.
+ Therefore normal read or write is supposed to fail, but specific
+ offsets have kernel code mapped to raise permissions to implement
+ system calls. Handling this via an explicit check here, rather
+ in than the "be disp(sr2,r0)" instruction that probably sent us
+ here, is the easiest way to handle the branch delay slot on the
+ aforementioned BE. */
+static void do_page_zero(DisasContext *ctx)
+{
+ /* If by some means we get here with PSW[N]=1, that implies that
+ the B,GATE instruction would be skipped, and we'd fault on the
+ next insn within the privilaged page. */
+ switch (ctx->null_cond.c) {
+ case TCG_COND_NEVER:
+ break;
+ case TCG_COND_ALWAYS:
+ tcg_gen_movi_reg(cpu_psw_n, 0);
+ goto do_sigill;
+ default:
+ /* Since this is always the first (and only) insn within the
+ TB, we should know the state of PSW[N] from TB->FLAGS. */
+ g_assert_not_reached();
+ }
+
+ /* Check that we didn't arrive here via some means that allowed
+ non-sequential instruction execution. Normally the PSW[B] bit
+ detects this by disallowing the B,GATE instruction to execute
+ under such conditions. */
+ if (ctx->iaoq_b != ctx->iaoq_f + 4) {
+ goto do_sigill;
+ }
+
+ switch (ctx->iaoq_f & -4) {
+ case 0x00: /* Null pointer call */
+ gen_excp_1(EXCP_IMP);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+
+ case 0xb0: /* LWS */
+ gen_excp_1(EXCP_SYSCALL_LWS);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+
+ case 0xe0: /* SET_THREAD_POINTER */
+ tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
+ tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
+ tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
+ ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
+ break;
+
+ case 0x100: /* SYSCALL */
+ gen_excp_1(EXCP_SYSCALL);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+
+ default:
+ do_sigill:
+ gen_excp_1(EXCP_ILL);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+ }
+}
+#endif
+
+static bool trans_nop(DisasContext *ctx, arg_nop *a)
+{
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_break(DisasContext *ctx, arg_break *a)
+{
+ return gen_excp_iir(ctx, EXCP_BREAK);
+}
+
+static bool trans_sync(DisasContext *ctx, arg_sync *a)
+{
+ /* No point in nullifying the memory barrier. */
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
+{
+ unsigned rt = a->t;
+ TCGv_reg tmp = dest_gpr(ctx, rt);
+ tcg_gen_movi_reg(tmp, ctx->iaoq_f);
+ save_gpr(ctx, rt, tmp);
+
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
+{
+ unsigned rt = a->t;
+ unsigned rs = a->sp;
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_reg t1 = tcg_temp_new();
+
+ load_spr(ctx, t0, rs);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_reg(t1, t0);
+
+ save_gpr(ctx, rt, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free_i64(t0);
+
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
+{
+ unsigned rt = a->t;
+ unsigned ctl = a->r;
+ TCGv_reg tmp;
+
+ switch (ctl) {
+ case CR_SAR:
+#ifdef TARGET_HPPA64
+ if (a->e == 0) {
+ /* MFSAR without ,W masks low 5 bits. */
+ tmp = dest_gpr(ctx, rt);
+ tcg_gen_andi_reg(tmp, cpu_sar, 31);
+ save_gpr(ctx, rt, tmp);
+ goto done;
+ }
+#endif
+ save_gpr(ctx, rt, cpu_sar);
+ goto done;
+ case CR_IT: /* Interval Timer */
+ /* FIXME: Respect PSW_S bit. */
+ nullify_over(ctx);
+ tmp = dest_gpr(ctx, rt);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ gen_helper_read_interval_timer(tmp);
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ } else {
+ gen_helper_read_interval_timer(tmp);
+ }
+ save_gpr(ctx, rt, tmp);
+ return nullify_end(ctx);
+ case 26:
+ case 27:
+ break;
+ default:
+ /* All other control registers are privileged. */
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+ break;
+ }
+
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ save_gpr(ctx, rt, tmp);
+
+ done:
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
+{
+ unsigned rr = a->r;
+ unsigned rs = a->sp;
+ TCGv_i64 t64;
+
+ if (rs >= 5) {
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+ }
+ nullify_over(ctx);
+
+ t64 = tcg_temp_new_i64();
+ tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
+ tcg_gen_shli_i64(t64, t64, 32);
+
+ if (rs >= 4) {
+ tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
+ ctx->tb_flags &= ~TB_FLAG_SR_SAME;
+ } else {
+ tcg_gen_mov_i64(cpu_sr[rs], t64);
+ }
+ tcg_temp_free_i64(t64);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
+{
+ unsigned ctl = a->t;
+ TCGv_reg reg;
+ TCGv_reg tmp;
+
+ if (ctl == CR_SAR) {
+ reg = load_gpr(ctx, a->r);
+ tmp = tcg_temp_new();
+ tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
+ save_or_nullify(ctx, cpu_sar, tmp);
+ tcg_temp_free(tmp);
+
+ cond_free(&ctx->null_cond);
+ return true;
+ }
+
+ /* All other control registers are privileged or read-only. */
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+ reg = load_gpr(ctx, a->r);
+
+ switch (ctl) {
+ case CR_IT:
+ gen_helper_write_interval_timer(cpu_env, reg);
+ break;
+ case CR_EIRR:
+ gen_helper_write_eirr(cpu_env, reg);
+ break;
+ case CR_EIEM:
+ gen_helper_write_eiem(cpu_env, reg);
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
+ break;
+
+ case CR_IIASQ:
+ case CR_IIAOQ:
+ /* FIXME: Respect PSW_Q bit */
+ /* The write advances the queue and stores to the back element. */
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env,
+ offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
+ tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ tcg_gen_st_reg(reg, cpu_env,
+ offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
+ break;
+
+ case CR_PID1:
+ case CR_PID2:
+ case CR_PID3:
+ case CR_PID4:
+ tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+#ifndef CONFIG_USER_ONLY
+ gen_helper_change_prot_id(cpu_env);
+#endif
+ break;
+
+ default:
+ tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
+ break;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
+{
+ TCGv_reg tmp = tcg_temp_new();
+
+ tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
+ tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
+ save_or_nullify(ctx, cpu_sar, tmp);
+ tcg_temp_free(tmp);
+
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
+{
+ TCGv_reg dest = dest_gpr(ctx, a->t);
+
+#ifdef CONFIG_USER_ONLY
+ /* We don't implement space registers in user mode. */
+ tcg_gen_movi_reg(dest, 0);
+#else
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_reg(dest, t0);
+
+ tcg_temp_free_i64(t0);
+#endif
+ save_gpr(ctx, a->t, dest);
+
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_reg tmp;
+
+ nullify_over(ctx);
+
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_andi_reg(tmp, tmp, ~a->i);
+ gen_helper_swap_system_mask(tmp, cpu_env, tmp);
+ save_gpr(ctx, a->t, tmp);
+
+ /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_reg tmp;
+
+ nullify_over(ctx);
+
+ tmp = get_temp(ctx);
+ tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
+ tcg_gen_ori_reg(tmp, tmp, a->i);
+ gen_helper_swap_system_mask(tmp, cpu_env, tmp);
+ save_gpr(ctx, a->t, tmp);
+
+ /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_reg tmp, reg;
+ nullify_over(ctx);
+
+ reg = load_gpr(ctx, a->r);
+ tmp = get_temp(ctx);
+ gen_helper_swap_system_mask(tmp, cpu_env, reg);
+
+ /* Exit the TB to recognize new interrupts. */
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
+ return nullify_end(ctx);
+#endif
+}
+
+static bool do_rfi(DisasContext *ctx, bool rfi_r)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+
+ if (rfi_r) {
+ gen_helper_rfi_r(cpu_env);
+ } else {
+ gen_helper_rfi(cpu_env);
+ }
+ /* Exit the TB to recognize new interrupts. */
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
+{
+ return do_rfi(ctx, false);
+}
+
+static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
+{
+ return do_rfi(ctx, true);
+}
+
+static bool trans_halt(DisasContext *ctx, arg_halt *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+ gen_helper_halt(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_reset(DisasContext *ctx, arg_reset *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ nullify_over(ctx);
+ gen_helper_reset(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
+{
+ if (a->m) {
+ TCGv_reg dest = dest_gpr(ctx, a->b);
+ TCGv_reg src1 = load_gpr(ctx, a->b);
+ TCGv_reg src2 = load_gpr(ctx, a->x);
+
+ /* The only thing we need to do is the base register modification. */
+ tcg_gen_add_reg(dest, src1, src2);
+ save_gpr(ctx, a->b, dest);
+ }
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_probe(DisasContext *ctx, arg_probe *a)
+{
+ TCGv_reg dest, ofs;
+ TCGv_i32 level, want;
+ TCGv_tl addr;
+
+ nullify_over(ctx);
+
+ dest = dest_gpr(ctx, a->t);
+ form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
+
+ if (a->imm) {
+ level = tcg_constant_i32(a->ri);
+ } else {
+ level = tcg_temp_new_i32();
+ tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
+ tcg_gen_andi_i32(level, level, 3);
+ }
+ want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
+
+ gen_helper_probe(dest, cpu_env, addr, level, want);
+
+ tcg_temp_free_i32(level);
+
+ save_gpr(ctx, a->t, dest);
+ return nullify_end(ctx);
+}
+
+static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_tl addr;
+ TCGv_reg ofs, reg;
+
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
+ reg = load_gpr(ctx, a->r);
+ if (a->addr) {
+ gen_helper_itlba(cpu_env, addr, reg);
+ } else {
+ gen_helper_itlbp(cpu_env, addr, reg);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ if (ctx->tb_flags & PSW_C) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_tl addr;
+ TCGv_reg ofs;
+
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
+ if (a->m) {
+ save_gpr(ctx, a->b, ofs);
+ }
+ if (a->local) {
+ gen_helper_ptlbe(cpu_env);
+ } else {
+ gen_helper_ptlb(cpu_env, addr);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ if (ctx->tb_flags & PSW_C) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
+/*
+ * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
+ * See
+ * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
+ * page 13-9 (195/206)
+ */
+static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_tl addr, atl, stl;
+ TCGv_reg reg;
+
+ nullify_over(ctx);
+
+ /*
+ * FIXME:
+ * if (not (pcxl or pcxl2))
+ * return gen_illegal(ctx);
+ *
+ * Note for future: these are 32-bit systems; no hppa64.
+ */
+
+ atl = tcg_temp_new_tl();
+ stl = tcg_temp_new_tl();
+ addr = tcg_temp_new_tl();
+
+ tcg_gen_ld32u_i64(stl, cpu_env,
+ a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
+ : offsetof(CPUHPPAState, cr[CR_IIASQ]));
+ tcg_gen_ld32u_i64(atl, cpu_env,
+ a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
+ : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
+ tcg_gen_shli_i64(stl, stl, 32);
+ tcg_gen_or_tl(addr, atl, stl);
+ tcg_temp_free_tl(atl);
+ tcg_temp_free_tl(stl);
+
+ reg = load_gpr(ctx, a->r);
+ if (a->addr) {
+ gen_helper_itlba(cpu_env, addr, reg);
+ } else {
+ gen_helper_itlbp(cpu_env, addr, reg);
+ }
+ tcg_temp_free_tl(addr);
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ if (ctx->tb_flags & PSW_C) {
+ ctx->base.is_jmp = DISAS_IAQ_N_STALE;
+ }
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+#ifndef CONFIG_USER_ONLY
+ TCGv_tl vaddr;
+ TCGv_reg ofs, paddr;
+
+ nullify_over(ctx);
+
+ form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
+
+ paddr = tcg_temp_new();
+ gen_helper_lpa(paddr, cpu_env, vaddr);
+
+ /* Note that physical address result overrides base modification. */
+ if (a->m) {
+ save_gpr(ctx, a->b, ofs);
+ }
+ save_gpr(ctx, a->t, paddr);
+ tcg_temp_free(paddr);
+
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_lci(DisasContext *ctx, arg_lci *a)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* The Coherence Index is an implementation-defined function of the
+ physical address. Two addresses with the same CI have a coherent
+ view of the cache. Our implementation is to return 0 for all,
+ since the entire address space is coherent. */
+ save_gpr(ctx, a->t, tcg_constant_reg(0));
+
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
+{
+ return do_add_reg(ctx, a, false, false, false, false);
+}
+
+static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
+{
+ return do_add_reg(ctx, a, true, false, false, false);
+}
+
+static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
+{
+ return do_add_reg(ctx, a, false, true, false, false);
+}
+
+static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
+{
+ return do_add_reg(ctx, a, false, false, false, true);
+}
+
+static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
+{
+ return do_add_reg(ctx, a, false, true, false, true);
+}
+
+static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_sub_reg(ctx, a, false, false, false);
+}
+
+static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_sub_reg(ctx, a, true, false, false);
+}
+
+static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_sub_reg(ctx, a, false, false, true);
+}
+
+static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_sub_reg(ctx, a, true, false, true);
+}
+
+static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_sub_reg(ctx, a, false, true, false);
+}
+
+static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_sub_reg(ctx, a, true, true, false);
+}
+
+static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_log_reg(ctx, a, tcg_gen_andc_reg);
+}
+
+static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_log_reg(ctx, a, tcg_gen_and_reg);
+}
+
+static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
+{
+ if (a->cf == 0) {
+ unsigned r2 = a->r2;
+ unsigned r1 = a->r1;
+ unsigned rt = a->t;
+
+ if (rt == 0) { /* NOP */
+ cond_free(&ctx->null_cond);
+ return true;
+ }
+ if (r2 == 0) { /* COPY */
+ if (r1 == 0) {
+ TCGv_reg dest = dest_gpr(ctx, rt);
+ tcg_gen_movi_reg(dest, 0);
+ save_gpr(ctx, rt, dest);
+ } else {
+ save_gpr(ctx, rt, cpu_gr[r1]);
+ }
+ cond_free(&ctx->null_cond);
+ return true;
+ }
+#ifndef CONFIG_USER_ONLY
+ /* These are QEMU extensions and are nops in the real architecture:
+ *
+ * or %r10,%r10,%r10 -- idle loop; wait for interrupt
+ * or %r31,%r31,%r31 -- death loop; offline cpu
+ * currently implemented as idle.
+ */
+ if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
+ /* No need to check for supervisor, as userland can only pause
+ until the next timer interrupt. */
+ nullify_over(ctx);
+
+ /* Advance the instruction queue. */
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+ nullify_set(ctx, 0);
+
+ /* Tell the qemu main loop to halt until this cpu has work. */
+ tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
+ offsetof(CPUState, halted) - offsetof(HPPACPU, env));
+ gen_excp_1(EXCP_HALTED);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ return nullify_end(ctx);
+ }
+#endif
+ }
+ return do_log_reg(ctx, a, tcg_gen_or_reg);
+}
+
+static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_log_reg(ctx, a, tcg_gen_xor_reg);
+}
+
+static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
+{
+ TCGv_reg tcg_r1, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_r2 = load_gpr(ctx, a->r2);
+ do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
+ return nullify_end(ctx);
+}
+
+static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
+{
+ TCGv_reg tcg_r1, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_r2 = load_gpr(ctx, a->r2);
+ do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
+ return nullify_end(ctx);
+}
+
+static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
+{
+ TCGv_reg tcg_r1, tcg_r2, tmp;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, a->r1);
+ tcg_r2 = load_gpr(ctx, a->r2);
+ tmp = get_temp(ctx);
+ tcg_gen_not_reg(tmp, tcg_r2);
+ do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
+ return nullify_end(ctx);
+}
+
+static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_uaddcm(ctx, a, false);
+}
+
+static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
+{
+ return do_uaddcm(ctx, a, true);
+}
+
+static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
+{
+ TCGv_reg tmp;
+
+ nullify_over(ctx);
+
+ tmp = get_temp(ctx);
+ tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
+ if (!is_i) {
+ tcg_gen_not_reg(tmp, tmp);
+ }
+ tcg_gen_andi_reg(tmp, tmp, 0x11111111);
+ tcg_gen_muli_reg(tmp, tmp, 6);
+ do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
+ is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
+ return nullify_end(ctx);
+}
+
+static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
+{
+ return do_dcor(ctx, a, false);
+}
+
+static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
+{
+ return do_dcor(ctx, a, true);
+}
+
+static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
+{
+ TCGv_reg dest, add1, add2, addc, zero, in1, in2;
+
+ nullify_over(ctx);
+
+ in1 = load_gpr(ctx, a->r1);
+ in2 = load_gpr(ctx, a->r2);
+
+ add1 = tcg_temp_new();
+ add2 = tcg_temp_new();
+ addc = tcg_temp_new();
+ dest = tcg_temp_new();
+ zero = tcg_constant_reg(0);
+
+ /* Form R1 << 1 | PSW[CB]{8}. */
+ tcg_gen_add_reg(add1, in1, in1);
+ tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
+
+ /* Add or subtract R2, depending on PSW[V]. Proper computation of
+ carry{8} requires that we subtract via + ~R2 + 1, as described in
+ the manual. By extracting and masking V, we can produce the
+ proper inputs to the addition without movcond. */
+ tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
+ tcg_gen_xor_reg(add2, in2, addc);
+ tcg_gen_andi_reg(addc, addc, 1);
+ /* ??? This is only correct for 32-bit. */
+ tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
+ tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
+
+ tcg_temp_free(addc);
+
+ /* Write back the result register. */
+ save_gpr(ctx, a->t, dest);
+
+ /* Write back PSW[CB]. */
+ tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
+ tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
+
+ /* Write back PSW[V] for the division step. */
+ tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
+ tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
+
+ /* Install the new nullification. */
+ if (a->cf) {
+ TCGv_reg sv = NULL;
+ if (cond_need_sv(a->cf >> 1)) {
+ /* ??? The lshift is supposed to contribute to overflow. */
+ sv = do_add_sv(ctx, dest, add1, add2);
+ }
+ ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
+ }
+
+ tcg_temp_free(add1);
+ tcg_temp_free(add2);
+ tcg_temp_free(dest);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
+{
+ return do_add_imm(ctx, a, false, false);
+}
+
+static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
+{
+ return do_add_imm(ctx, a, true, false);
+}
+
+static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
+{
+ return do_add_imm(ctx, a, false, true);
+}
+
+static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
+{
+ return do_add_imm(ctx, a, true, true);
+}
+
+static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
+{
+ return do_sub_imm(ctx, a, false);
+}
+
+static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
+{
+ return do_sub_imm(ctx, a, true);
+}
+
+static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
+{
+ TCGv_reg tcg_im, tcg_r2;
+
+ if (a->cf) {
+ nullify_over(ctx);
+ }
+
+ tcg_im = load_const(ctx, a->i);
+ tcg_r2 = load_gpr(ctx, a->r);
+ do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_ld(DisasContext *ctx, arg_ldst *a)
+{
+ return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
+ a->disp, a->sp, a->m, a->size | MO_TE);
+}
+
+static bool trans_st(DisasContext *ctx, arg_ldst *a)
+{
+ assert(a->x == 0 && a->scale == 0);
+ return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
+}
+
+static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
+{
+ MemOp mop = MO_TE | MO_ALIGN | a->size;
+ TCGv_reg zero, dest, ofs;
+ TCGv_tl addr;
+
+ nullify_over(ctx);
+
+ if (a->m) {
+ /* Base register modification. Make sure if RT == RB,
+ we see the result of the load. */
+ dest = get_temp(ctx);
+ } else {
+ dest = dest_gpr(ctx, a->t);
+ }
+
+ form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
+ a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
+
+ /*
+ * For hppa1.1, LDCW is undefined unless aligned mod 16.
+ * However actual hardware succeeds with aligned mod 4.
+ * Detect this case and log a GUEST_ERROR.
+ *
+ * TODO: HPPA64 relaxes the over-alignment requirement
+ * with the ,co completer.
+ */
+ gen_helper_ldc_check(addr);
+
+ zero = tcg_constant_reg(0);
+ tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
+
+ if (a->m) {
+ save_gpr(ctx, a->b, ofs);
+ }
+ save_gpr(ctx, a->t, dest);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_stby(DisasContext *ctx, arg_stby *a)
+{
+ TCGv_reg ofs, val;
+ TCGv_tl addr;
+
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ val = load_gpr(ctx, a->r);
+ if (a->a) {
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ gen_helper_stby_e_parallel(cpu_env, addr, val);
+ } else {
+ gen_helper_stby_e(cpu_env, addr, val);
+ }
+ } else {
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ gen_helper_stby_b_parallel(cpu_env, addr, val);
+ } else {
+ gen_helper_stby_b(cpu_env, addr, val);
+ }
+ }
+ if (a->m) {
+ tcg_gen_andi_reg(ofs, ofs, ~3);
+ save_gpr(ctx, a->b, ofs);
+ }
+
+ return nullify_end(ctx);
+}
+
+static bool trans_lda(DisasContext *ctx, arg_ldst *a)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ trans_ld(ctx, a);
+ ctx->mmu_idx = hold_mmu_idx;
+ return true;
+}
+
+static bool trans_sta(DisasContext *ctx, arg_ldst *a)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ trans_st(ctx, a);
+ ctx->mmu_idx = hold_mmu_idx;
+ return true;
+}
+
+static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
+{
+ TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
+
+ tcg_gen_movi_reg(tcg_rt, a->i);
+ save_gpr(ctx, a->t, tcg_rt);
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_addil(DisasContext *ctx, arg_addil *a)
+{
+ TCGv_reg tcg_rt = load_gpr(ctx, a->r);
+ TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
+
+ tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
+ save_gpr(ctx, 1, tcg_r1);
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
+{
+ TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
+
+ /* Special case rb == 0, for the LDI pseudo-op.
+ The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
+ if (a->b == 0) {
+ tcg_gen_movi_reg(tcg_rt, a->i);
+ } else {
+ tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
+ }
+ save_gpr(ctx, a->t, tcg_rt);
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
+ unsigned c, unsigned f, unsigned n, int disp)
+{
+ TCGv_reg dest, in2, sv;
+ DisasCond cond;
+
+ in2 = load_gpr(ctx, r);
+ dest = get_temp(ctx);
+
+ tcg_gen_sub_reg(dest, in1, in2);
+
+ sv = NULL;
+ if (cond_need_sv(c)) {
+ sv = do_sub_sv(ctx, dest, in1, in2);
+ }
+
+ cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
+ return do_cbranch(ctx, disp, n, &cond);
+}
+
+static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
+{
+ nullify_over(ctx);
+ return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
+}
+
+static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
+{
+ nullify_over(ctx);
+ return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
+}
+
+static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
+ unsigned c, unsigned f, unsigned n, int disp)
+{
+ TCGv_reg dest, in2, sv, cb_msb;
+ DisasCond cond;
+
+ in2 = load_gpr(ctx, r);
+ dest = tcg_temp_new();
+ sv = NULL;
+ cb_msb = NULL;
+
+ if (cond_need_cb(c)) {
+ cb_msb = get_temp(ctx);
+ tcg_gen_movi_reg(cb_msb, 0);
+ tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ } else {
+ tcg_gen_add_reg(dest, in1, in2);
+ }
+ if (cond_need_sv(c)) {
+ sv = do_add_sv(ctx, dest, in1, in2);
+ }
+
+ cond = do_cond(c * 2 + f, dest, cb_msb, sv);
+ save_gpr(ctx, r, dest);
+ tcg_temp_free(dest);
+ return do_cbranch(ctx, disp, n, &cond);
+}
+
+static bool trans_addb(DisasContext *ctx, arg_addb *a)
+{
+ nullify_over(ctx);
+ return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
+}
+
+static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
+{
+ nullify_over(ctx);
+ return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
+}
+
+static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
+{
+ TCGv_reg tmp, tcg_r;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new();
+ tcg_r = load_gpr(ctx, a->r);
+ tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
+
+ cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
+ tcg_temp_free(tmp);
+ return do_cbranch(ctx, a->disp, a->n, &cond);
+}
+
+static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
+{
+ TCGv_reg tmp, tcg_r;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new();
+ tcg_r = load_gpr(ctx, a->r);
+ tcg_gen_shli_reg(tmp, tcg_r, a->p);
+
+ cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
+ tcg_temp_free(tmp);
+ return do_cbranch(ctx, a->disp, a->n, &cond);
+}
+
+static bool trans_movb(DisasContext *ctx, arg_movb *a)
+{
+ TCGv_reg dest;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ dest = dest_gpr(ctx, a->r2);
+ if (a->r1 == 0) {
+ tcg_gen_movi_reg(dest, 0);
+ } else {
+ tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
+ }
+
+ cond = do_sed_cond(a->c, dest);
+ return do_cbranch(ctx, a->disp, a->n, &cond);
+}
+
+static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
+{
+ TCGv_reg dest;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ dest = dest_gpr(ctx, a->r);
+ tcg_gen_movi_reg(dest, a->i);
+
+ cond = do_sed_cond(a->c, dest);
+ return do_cbranch(ctx, a->disp, a->n, &cond);
+}
+
+static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
+{
+ TCGv_reg dest;
+
+ if (a->c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, a->t);
+ if (a->r1 == 0) {
+ tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
+ tcg_gen_shr_reg(dest, dest, cpu_sar);
+ } else if (a->r1 == a->r2) {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
+ tcg_gen_rotr_i32(t32, t32, cpu_sar);
+ tcg_gen_extu_i32_reg(dest, t32);
+ tcg_temp_free_i32(t32);
+ } else {
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 s = tcg_temp_new_i64();
+
+ tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
+ tcg_gen_extu_reg_i64(s, cpu_sar);
+ tcg_gen_shr_i64(t, t, s);
+ tcg_gen_trunc_i64_reg(dest, t);
+
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(s);
+ }
+ save_gpr(ctx, a->t, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (a->c) {
+ ctx->null_cond = do_sed_cond(a->c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
+{
+ unsigned sa = 31 - a->cpos;
+ TCGv_reg dest, t2;
+
+ if (a->c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, a->t);
+ t2 = load_gpr(ctx, a->r2);
+ if (a->r1 == a->r2) {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ tcg_gen_trunc_reg_i32(t32, t2);
+ tcg_gen_rotri_i32(t32, t32, sa);
+ tcg_gen_extu_i32_reg(dest, t32);
+ tcg_temp_free_i32(t32);
+ } else if (a->r1 == 0) {
+ tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
+ } else {
+ TCGv_reg t0 = tcg_temp_new();
+ tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
+ tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
+ tcg_temp_free(t0);
+ }
+ save_gpr(ctx, a->t, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (a->c) {
+ ctx->null_cond = do_sed_cond(a->c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
+{
+ unsigned len = 32 - a->clen;
+ TCGv_reg dest, src, tmp;
+
+ if (a->c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, a->t);
+ src = load_gpr(ctx, a->r);
+ tmp = tcg_temp_new();
+
+ /* Recall that SAR is using big-endian bit numbering. */
+ tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
+ if (a->se) {
+ tcg_gen_sar_reg(dest, src, tmp);
+ tcg_gen_sextract_reg(dest, dest, 0, len);
+ } else {
+ tcg_gen_shr_reg(dest, src, tmp);
+ tcg_gen_extract_reg(dest, dest, 0, len);
+ }
+ tcg_temp_free(tmp);
+ save_gpr(ctx, a->t, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (a->c) {
+ ctx->null_cond = do_sed_cond(a->c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
+{
+ unsigned len = 32 - a->clen;
+ unsigned cpos = 31 - a->pos;
+ TCGv_reg dest, src;
+
+ if (a->c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, a->t);
+ src = load_gpr(ctx, a->r);
+ if (a->se) {
+ tcg_gen_sextract_reg(dest, src, cpos, len);
+ } else {
+ tcg_gen_extract_reg(dest, src, cpos, len);
+ }
+ save_gpr(ctx, a->t, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (a->c) {
+ ctx->null_cond = do_sed_cond(a->c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
+{
+ unsigned len = 32 - a->clen;
+ target_sreg mask0, mask1;
+ TCGv_reg dest;
+
+ if (a->c) {
+ nullify_over(ctx);
+ }
+ if (a->cpos + len > 32) {
+ len = 32 - a->cpos;
+ }
+
+ dest = dest_gpr(ctx, a->t);
+ mask0 = deposit64(0, a->cpos, len, a->i);
+ mask1 = deposit64(-1, a->cpos, len, a->i);
+
+ if (a->nz) {
+ TCGv_reg src = load_gpr(ctx, a->t);
+ if (mask1 != -1) {
+ tcg_gen_andi_reg(dest, src, mask1);
+ src = dest;
+ }
+ tcg_gen_ori_reg(dest, src, mask0);
+ } else {
+ tcg_gen_movi_reg(dest, mask0);
+ }
+ save_gpr(ctx, a->t, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (a->c) {
+ ctx->null_cond = do_sed_cond(a->c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
+{
+ unsigned rs = a->nz ? a->t : 0;
+ unsigned len = 32 - a->clen;
+ TCGv_reg dest, val;
+
+ if (a->c) {
+ nullify_over(ctx);
+ }
+ if (a->cpos + len > 32) {
+ len = 32 - a->cpos;
+ }
+
+ dest = dest_gpr(ctx, a->t);
+ val = load_gpr(ctx, a->r);
+ if (rs == 0) {
+ tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
+ } else {
+ tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
+ }
+ save_gpr(ctx, a->t, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (a->c) {
+ ctx->null_cond = do_sed_cond(a->c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
+ unsigned nz, unsigned clen, TCGv_reg val)
+{
+ unsigned rs = nz ? rt : 0;
+ unsigned len = 32 - clen;
+ TCGv_reg mask, tmp, shift, dest;
+ unsigned msb = 1U << (len - 1);
+
+ dest = dest_gpr(ctx, rt);
+ shift = tcg_temp_new();
+ tmp = tcg_temp_new();
+
+ /* Convert big-endian bit numbering in SAR to left-shift. */
+ tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
+
+ mask = tcg_const_reg(msb + (msb - 1));
+ tcg_gen_and_reg(tmp, val, mask);
+ if (rs) {
+ tcg_gen_shl_reg(mask, mask, shift);
+ tcg_gen_shl_reg(tmp, tmp, shift);
+ tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
+ tcg_gen_or_reg(dest, dest, tmp);
+ } else {
+ tcg_gen_shl_reg(dest, tmp, shift);
+ }
+ tcg_temp_free(shift);
+ tcg_temp_free(mask);
+ tcg_temp_free(tmp);
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx);
+}
+
+static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
+{
+ if (a->c) {
+ nullify_over(ctx);
+ }
+ return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
+}
+
+static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
+{
+ if (a->c) {
+ nullify_over(ctx);
+ }
+ return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
+}
+
+static bool trans_be(DisasContext *ctx, arg_be *a)
+{
+ TCGv_reg tmp;
+
+#ifdef CONFIG_USER_ONLY
+ /* ??? It seems like there should be a good way of using
+ "be disp(sr2, r0)", the canonical gateway entry mechanism
+ to our advantage. But that appears to be inconvenient to
+ manage along side branch delay slots. Therefore we handle
+ entry into the gateway page via absolute address. */
+ /* Since we don't implement spaces, just branch. Do notice the special
+ case of "be disp(*,r0)" using a direct branch to disp, so that we can
+ goto_tb to the TB containing the syscall. */
+ if (a->b == 0) {
+ return do_dbranch(ctx, a->disp, a->l, a->n);
+ }
+#else
+ nullify_over(ctx);
+#endif
+
+ tmp = get_temp(ctx);
+ tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
+ tmp = do_ibranch_priv(ctx, tmp);
+
+#ifdef CONFIG_USER_ONLY
+ return do_ibranch(ctx, tmp, a->l, a->n);
+#else
+ TCGv_i64 new_spc = tcg_temp_new_i64();
+
+ load_spr(ctx, new_spc, a->sp);
+ if (a->l) {
+ copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
+ tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
+ }
+ if (a->n && use_nullify_skip(ctx)) {
+ tcg_gen_mov_reg(cpu_iaoq_f, tmp);
+ tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
+ tcg_gen_mov_i64(cpu_iasq_f, new_spc);
+ tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
+ } else {
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+ }
+ tcg_gen_mov_reg(cpu_iaoq_b, tmp);
+ tcg_gen_mov_i64(cpu_iasq_b, new_spc);
+ nullify_set(ctx, a->n);
+ }
+ tcg_temp_free_i64(new_spc);
+ tcg_gen_lookup_and_goto_ptr();
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return nullify_end(ctx);
+#endif
+}
+
+static bool trans_bl(DisasContext *ctx, arg_bl *a)
+{
+ return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
+}
+
+static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
+{
+ target_ureg dest = iaoq_dest(ctx, a->disp);
+
+ nullify_over(ctx);
+
+ /* Make sure the caller hasn't done something weird with the queue.
+ * ??? This is not quite the same as the PSW[B] bit, which would be
+ * expensive to track. Real hardware will trap for
+ * b gateway
+ * b gateway+4 (in delay slot of first branch)
+ * However, checking for a non-sequential instruction queue *will*
+ * diagnose the security hole
+ * b gateway
+ * b evil
+ * in which instructions at evil would run with increased privs.
+ */
+ if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
+ return gen_illegal(ctx);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb_flags & PSW_C) {
+ CPUHPPAState *env = ctx->cs->env_ptr;
+ int type = hppa_artype_for_page(env, ctx->base.pc_next);
+ /* If we could not find a TLB entry, then we need to generate an
+ ITLB miss exception so the kernel will provide it.
+ The resulting TLB fill operation will invalidate this TB and
+ we will re-translate, at which point we *will* be able to find
+ the TLB entry and determine if this is in fact a gateway page. */
+ if (type < 0) {
+ gen_excp(ctx, EXCP_ITLB_MISS);
+ return true;
+ }
+ /* No change for non-gateway pages or for priv decrease. */
+ if (type >= 4 && type - 4 < ctx->privilege) {
+ dest = deposit32(dest, 0, 2, type - 4);
+ }
+ } else {
+ dest &= -4; /* priv = 0 */
+ }
+#endif
+
+ if (a->l) {
+ TCGv_reg tmp = dest_gpr(ctx, a->l);
+ if (ctx->privilege < 3) {
+ tcg_gen_andi_reg(tmp, tmp, -4);
+ }
+ tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
+ save_gpr(ctx, a->l, tmp);
+ }
+
+ return do_dbranch(ctx, dest, 0, a->n);
+}
+
+static bool trans_blr(DisasContext *ctx, arg_blr *a)
+{
+ if (a->x) {
+ TCGv_reg tmp = get_temp(ctx);
+ tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
+ tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
+ /* The computation here never changes privilege level. */
+ return do_ibranch(ctx, tmp, a->l, a->n);
+ } else {
+ /* BLR R0,RX is a good way to load PC+8 into RX. */
+ return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
+ }
+}
+
+static bool trans_bv(DisasContext *ctx, arg_bv *a)
+{
+ TCGv_reg dest;
+
+ if (a->x == 0) {
+ dest = load_gpr(ctx, a->b);
+ } else {
+ dest = get_temp(ctx);
+ tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
+ tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
+ }
+ dest = do_ibranch_priv(ctx, dest);
+ return do_ibranch(ctx, dest, 0, a->n);
+}
+
+static bool trans_bve(DisasContext *ctx, arg_bve *a)
+{
+ TCGv_reg dest;
+
+#ifdef CONFIG_USER_ONLY
+ dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
+ return do_ibranch(ctx, dest, a->l, a->n);
+#else
+ nullify_over(ctx);
+ dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
+
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+ }
+ copy_iaoq_entry(cpu_iaoq_b, -1, dest);
+ tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
+ if (a->l) {
+ copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ nullify_set(ctx, a->n);
+ tcg_gen_lookup_and_goto_ptr();
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return nullify_end(ctx);
+#endif
+}
+
+/*
+ * Float class 0
+ */
+
+static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_mov_i32(dst, src);
+}
+
+static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
+}
+
+static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_mov_i64(dst, src);
+}
+
+static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
+}
+
+static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_andi_i32(dst, src, INT32_MAX);
+}
+
+static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
+}
+
+static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_andi_i64(dst, src, INT64_MAX);
+}
+
+static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
+}
+
+static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
+}
+
+static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
+}
+
+static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
+}
+
+static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
+}
+
+static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_xori_i32(dst, src, INT32_MIN);
+}
+
+static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
+}
+
+static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_xori_i64(dst, src, INT64_MIN);
+}
+
+static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
+}
+
+static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_ori_i32(dst, src, INT32_MIN);
+}
+
+static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
+}
+
+static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_ori_i64(dst, src, INT64_MIN);
+}
+
+static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
+}
+
+/*
+ * Float class 1
+ */
+
+static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
+}
+
+static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
+}
+
+static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
+}
+
+static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
+}
+
+static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
+}
+
+static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
+}
+
+static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
+}
+
+static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
+}
+
+static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
+}
+
+static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
+}
+
+static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
+}
+
+static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
+}
+
+static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
+}
+
+static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
+}
+
+static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
+}
+
+static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
+}
+
+static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
+}
+
+static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
+}
+
+static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
+}
+
+static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
+}
+
+static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
+}
+
+static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
+}
+
+static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
+}
+
+static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
+}
+
+static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
+}
+
+static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
+{
+ return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
+}
+
+/*
+ * Float class 2
+ */
+
+static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
+{
+ TCGv_i32 ta, tb, tc, ty;
+
+ nullify_over(ctx);
+
+ ta = load_frw0_i32(a->r1);
+ tb = load_frw0_i32(a->r2);
+ ty = tcg_constant_i32(a->y);
+ tc = tcg_constant_i32(a->c);
+
+ gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
+
+ tcg_temp_free_i32(ta);
+ tcg_temp_free_i32(tb);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
+{
+ TCGv_i64 ta, tb;
+ TCGv_i32 tc, ty;
+
+ nullify_over(ctx);
+
+ ta = load_frd0(a->r1);
+ tb = load_frd0(a->r2);
+ ty = tcg_constant_i32(a->y);
+ tc = tcg_constant_i32(a->c);
+
+ gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
+
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
+{
+ TCGv_reg t;
+
+ nullify_over(ctx);
+
+ t = get_temp(ctx);
+ tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+
+ if (a->y == 1) {
+ int mask;
+ bool inv = false;
+
+ switch (a->c) {
+ case 0: /* simple */
+ tcg_gen_andi_reg(t, t, 0x4000000);
+ ctx->null_cond = cond_make_0(TCG_COND_NE, t);
+ goto done;
+ case 2: /* rej */
+ inv = true;
+ /* fallthru */
+ case 1: /* acc */
+ mask = 0x43ff800;
+ break;
+ case 6: /* rej8 */
+ inv = true;
+ /* fallthru */
+ case 5: /* acc8 */
+ mask = 0x43f8000;
+ break;
+ case 9: /* acc6 */
+ mask = 0x43e0000;
+ break;
+ case 13: /* acc4 */
+ mask = 0x4380000;
+ break;
+ case 17: /* acc2 */
+ mask = 0x4200000;
+ break;
+ default:
+ gen_illegal(ctx);
+ return true;
+ }
+ if (inv) {
+ TCGv_reg c = load_const(ctx, mask);
+ tcg_gen_or_reg(t, t, c);
+ ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
+ } else {
+ tcg_gen_andi_reg(t, t, mask);
+ ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
+ }
+ } else {
+ unsigned cbit = (a->y ^ 1) - 1;
+
+ tcg_gen_extract_reg(t, t, 21 - cbit, 1);
+ ctx->null_cond = cond_make_0(TCG_COND_NE, t);
+ tcg_temp_free(t);
+ }
+
+ done:
+ return nullify_end(ctx);
+}
+
+/*
+ * Float class 2
+ */
+
+static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
+}
+
+static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
+}
+
+static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
+}
+
+static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
+}
+
+static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
+}
+
+static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
+}
+
+static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
+}
+
+static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
+{
+ return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
+}
+
+static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
+{
+ TCGv_i64 x, y;
+
+ nullify_over(ctx);
+
+ x = load_frw0_i64(a->r1);
+ y = load_frw0_i64(a->r2);
+ tcg_gen_mul_i64(x, x, y);
+ save_frd(a->t, x);
+ tcg_temp_free_i64(x);
+ tcg_temp_free_i64(y);
+
+ return nullify_end(ctx);
+}
+
+/* Convert the fmpyadd single-precision register encodings to standard. */
+static inline int fmpyadd_s_reg(unsigned r)
+{
+ return (r & 16) * 2 + 16 + (r & 15);
+}
+
+static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
+{
+ int tm = fmpyadd_s_reg(a->tm);
+ int ra = fmpyadd_s_reg(a->ra);
+ int ta = fmpyadd_s_reg(a->ta);
+ int rm2 = fmpyadd_s_reg(a->rm2);
+ int rm1 = fmpyadd_s_reg(a->rm1);
+
+ nullify_over(ctx);
+
+ do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
+ do_fop_weww(ctx, ta, ta, ra,
+ is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
+{
+ return do_fmpyadd_s(ctx, a, false);
+}
+
+static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
+{
+ return do_fmpyadd_s(ctx, a, true);
+}
+
+static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
+{
+ nullify_over(ctx);
+
+ do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
+ do_fop_dedd(ctx, a->ta, a->ta, a->ra,
+ is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
+
+ return nullify_end(ctx);
+}
+
+static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
+{
+ return do_fmpyadd_d(ctx, a, false);
+}
+
+static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
+{
+ return do_fmpyadd_d(ctx, a, true);
+}
+
+static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
+{
+ TCGv_i32 x, y, z;
+
+ nullify_over(ctx);
+ x = load_frw0_i32(a->rm1);
+ y = load_frw0_i32(a->rm2);
+ z = load_frw0_i32(a->ra3);
+
+ if (a->neg) {
+ gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
+ } else {
+ gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
+ }
+
+ tcg_temp_free_i32(y);
+ tcg_temp_free_i32(z);
+ save_frw_i32(a->t, x);
+ tcg_temp_free_i32(x);
+ return nullify_end(ctx);
+}
+
+static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
+{
+ TCGv_i64 x, y, z;
+
+ nullify_over(ctx);
+ x = load_frd0(a->rm1);
+ y = load_frd0(a->rm2);
+ z = load_frd0(a->ra3);
+
+ if (a->neg) {
+ gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
+ } else {
+ gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
+ }
+
+ tcg_temp_free_i64(y);
+ tcg_temp_free_i64(z);
+ save_frd(a->t, x);
+ tcg_temp_free_i64(x);
+ return nullify_end(ctx);
+}
+
+static bool trans_diag(DisasContext *ctx, arg_diag *a)
+{
+ qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
+ cond_free(&ctx->null_cond);
+ return true;
+}
+
+static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ int bound;
+
+ ctx->cs = cs;
+ ctx->tb_flags = ctx->base.tb->flags;
+
+#ifdef CONFIG_USER_ONLY
+ ctx->privilege = MMU_USER_IDX;
+ ctx->mmu_idx = MMU_USER_IDX;
+ ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
+ ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
+#else
+ ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
+ ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
+
+ /* Recover the IAOQ values from the GVA + PRIV. */
+ uint64_t cs_base = ctx->base.tb->cs_base;
+ uint64_t iasq_f = cs_base & ~0xffffffffull;
+ int32_t diff = cs_base;
+
+ ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
+ ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
+#endif
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = NULL;
+
+ /* Bound the number of instructions by those left on the page. */
+ bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
+ ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
+
+ ctx->ntempr = 0;
+ ctx->ntempl = 0;
+ memset(ctx->tempr, 0, sizeof(ctx->tempr));
+ memset(ctx->templ, 0, sizeof(ctx->templ));
+}
+
+static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
+ ctx->null_cond = cond_make_f();
+ ctx->psw_n_nonzero = false;
+ if (ctx->tb_flags & PSW_N) {
+ ctx->null_cond.c = TCG_COND_ALWAYS;
+ ctx->psw_n_nonzero = true;
+ }
+ ctx->null_lab = NULL;
+}
+
+static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
+}
+
+static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUHPPAState *env = cs->env_ptr;
+ DisasJumpType ret;
+ int i, n;
+
+ /* Execute one insn. */
+#ifdef CONFIG_USER_ONLY
+ if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
+ do_page_zero(ctx);
+ ret = ctx->base.is_jmp;
+ assert(ret != DISAS_NEXT);
+ } else
+#endif
+ {
+ /* Always fetch the insn, even if nullified, so that we check
+ the page permissions for execute. */
+ uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
+
+ /* Set up the IA queue for the next insn.
+ This will be overwritten by a branch. */
+ if (ctx->iaoq_b == -1) {
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = get_temp(ctx);
+ tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ } else {
+ ctx->iaoq_n = ctx->iaoq_b + 4;
+ ctx->iaoq_n_var = NULL;
+ }
+
+ if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
+ ctx->null_cond.c = TCG_COND_NEVER;
+ ret = DISAS_NEXT;
+ } else {
+ ctx->insn = insn;
+ if (!decode(ctx, insn)) {
+ gen_illegal(ctx);
+ }
+ ret = ctx->base.is_jmp;
+ assert(ctx->null_lab == NULL);
+ }
+ }
+
+ /* Free any temporaries allocated. */
+ for (i = 0, n = ctx->ntempr; i < n; ++i) {
+ tcg_temp_free(ctx->tempr[i]);
+ ctx->tempr[i] = NULL;
+ }
+ for (i = 0, n = ctx->ntempl; i < n; ++i) {
+ tcg_temp_free_tl(ctx->templ[i]);
+ ctx->templ[i] = NULL;
+ }
+ ctx->ntempr = 0;
+ ctx->ntempl = 0;
+
+ /* Advance the insn queue. Note that this check also detects
+ a priority change within the instruction queue. */
+ if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
+ if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
+ && use_goto_tb(ctx, ctx->iaoq_b)
+ && (ctx->null_cond.c == TCG_COND_NEVER
+ || ctx->null_cond.c == TCG_COND_ALWAYS)) {
+ nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
+ gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
+ ctx->base.is_jmp = ret = DISAS_NORETURN;
+ } else {
+ ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
+ }
+ }
+ ctx->iaoq_f = ctx->iaoq_b;
+ ctx->iaoq_b = ctx->iaoq_n;
+ ctx->base.pc_next += 4;
+
+ switch (ret) {
+ case DISAS_NORETURN:
+ case DISAS_IAQ_N_UPDATED:
+ break;
+
+ case DISAS_NEXT:
+ case DISAS_IAQ_N_STALE:
+ case DISAS_IAQ_N_STALE_EXIT:
+ if (ctx->iaoq_f == -1) {
+ tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+#endif
+ nullify_save(ctx);
+ ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
+ ? DISAS_EXIT
+ : DISAS_IAQ_N_UPDATED);
+ } else if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ DisasJumpType is_jmp = ctx->base.is_jmp;
+
+ switch (is_jmp) {
+ case DISAS_NORETURN:
+ break;
+ case DISAS_TOO_MANY:
+ case DISAS_IAQ_N_STALE:
+ case DISAS_IAQ_N_STALE_EXIT:
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ nullify_save(ctx);
+ /* FALLTHRU */
+ case DISAS_IAQ_N_UPDATED:
+ if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ }
+ /* FALLTHRU */
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ target_ulong pc = dcbase->pc_first;
+
+#ifdef CONFIG_USER_ONLY
+ switch (pc) {
+ case 0x00:
+ qemu_log("IN:\n0x00000000: (null)\n");
+ return;
+ case 0xb0:
+ qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
+ return;
+ case 0xe0:
+ qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
+ return;
+ case 0x100:
+ qemu_log("IN:\n0x00000100: syscall\n");
+ return;
+ }
+#endif
+
+ qemu_log("IN: %s\n", lookup_symbol(pc));
+ log_target_disas(cs, pc, dcbase->tb->size);
+}
+
+static const TranslatorOps hppa_tr_ops = {
+ .init_disas_context = hppa_tr_init_disas_context,
+ .tb_start = hppa_tr_tb_start,
+ .insn_start = hppa_tr_insn_start,
+ .translate_insn = hppa_tr_translate_insn,
+ .tb_stop = hppa_tr_tb_stop,
+ .disas_log = hppa_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+ translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->iaoq_f = data[0];
+ if (data[1] != (target_ureg)-1) {
+ env->iaoq_b = data[1];
+ }
+ /* Since we were executing the instruction at IAOQ_F, and took some
+ sort of action that provoked the cpu_restore_state, we can infer
+ that the instruction was not nullified. */
+ env->psw_n = 0;
+}
diff --git a/target/i386/Kconfig b/target/i386/Kconfig
new file mode 100644
index 000000000..ce6968906
--- /dev/null
+++ b/target/i386/Kconfig
@@ -0,0 +1,5 @@
+config I386
+ bool
+
+config X86_64
+ bool
diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c
new file mode 100644
index 000000000..004141fc0
--- /dev/null
+++ b/target/i386/arch_dump.c
@@ -0,0 +1,461 @@
+/*
+ * i386 memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "sysemu/dump.h"
+#include "elf.h"
+#include "sysemu/memory_mapping.h"
+
+#define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
+ ((DIV_ROUND_UP((hdr_size), 4) \
+ + DIV_ROUND_UP((name_size), 4) \
+ + DIV_ROUND_UP((desc_size), 4)) * 4)
+
+#ifdef TARGET_X86_64
+typedef struct {
+ target_ulong r15, r14, r13, r12, rbp, rbx, r11, r10;
+ target_ulong r9, r8, rax, rcx, rdx, rsi, rdi, orig_rax;
+ target_ulong rip, cs, eflags;
+ target_ulong rsp, ss;
+ target_ulong fs_base, gs_base;
+ target_ulong ds, es, fs, gs;
+} x86_64_user_regs_struct;
+
+typedef struct {
+ char pad1[32];
+ uint32_t pid;
+ char pad2[76];
+ x86_64_user_regs_struct regs;
+ char pad3[8];
+} x86_64_elf_prstatus;
+
+static int x86_64_write_elf64_note(WriteCoreDumpFunction f,
+ CPUX86State *env, int id,
+ void *opaque)
+{
+ x86_64_user_regs_struct regs;
+ Elf64_Nhdr *note;
+ char *buf;
+ int descsz, note_size, name_size = 5;
+ const char *name = "CORE";
+ int ret;
+
+ regs.r15 = env->regs[15];
+ regs.r14 = env->regs[14];
+ regs.r13 = env->regs[13];
+ regs.r12 = env->regs[12];
+ regs.r11 = env->regs[11];
+ regs.r10 = env->regs[10];
+ regs.r9 = env->regs[9];
+ regs.r8 = env->regs[8];
+ regs.rbp = env->regs[R_EBP];
+ regs.rsp = env->regs[R_ESP];
+ regs.rdi = env->regs[R_EDI];
+ regs.rsi = env->regs[R_ESI];
+ regs.rdx = env->regs[R_EDX];
+ regs.rcx = env->regs[R_ECX];
+ regs.rbx = env->regs[R_EBX];
+ regs.rax = env->regs[R_EAX];
+ regs.rip = env->eip;
+ regs.eflags = env->eflags;
+
+ regs.orig_rax = 0; /* FIXME */
+ regs.cs = env->segs[R_CS].selector;
+ regs.ss = env->segs[R_SS].selector;
+ regs.fs_base = env->segs[R_FS].base;
+ regs.gs_base = env->segs[R_GS].base;
+ regs.ds = env->segs[R_DS].selector;
+ regs.es = env->segs[R_ES].selector;
+ regs.fs = env->segs[R_FS].selector;
+ regs.gs = env->segs[R_GS].selector;
+
+ descsz = sizeof(x86_64_elf_prstatus);
+ note_size = ELF_NOTE_SIZE(sizeof(Elf64_Nhdr), name_size, descsz);
+ note = g_malloc0(note_size);
+ note->n_namesz = cpu_to_le32(name_size);
+ note->n_descsz = cpu_to_le32(descsz);
+ note->n_type = cpu_to_le32(NT_PRSTATUS);
+ buf = (char *)note;
+ buf += ROUND_UP(sizeof(Elf64_Nhdr), 4);
+ memcpy(buf, name, name_size);
+ buf += ROUND_UP(name_size, 4);
+ memcpy(buf + 32, &id, 4); /* pr_pid */
+ buf += descsz - sizeof(x86_64_user_regs_struct)-sizeof(target_ulong);
+ memcpy(buf, &regs, sizeof(x86_64_user_regs_struct));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+typedef struct {
+ uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
+ unsigned short ds, __ds, es, __es;
+ unsigned short fs, __fs, gs, __gs;
+ uint32_t orig_eax, eip;
+ unsigned short cs, __cs;
+ uint32_t eflags, esp;
+ unsigned short ss, __ss;
+} x86_user_regs_struct;
+
+typedef struct {
+ char pad1[24];
+ uint32_t pid;
+ char pad2[44];
+ x86_user_regs_struct regs;
+ char pad3[4];
+} x86_elf_prstatus;
+
+static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUX86State *env,
+ int id)
+{
+ memset(prstatus, 0, sizeof(x86_elf_prstatus));
+ prstatus->regs.ebp = env->regs[R_EBP] & 0xffffffff;
+ prstatus->regs.esp = env->regs[R_ESP] & 0xffffffff;
+ prstatus->regs.edi = env->regs[R_EDI] & 0xffffffff;
+ prstatus->regs.esi = env->regs[R_ESI] & 0xffffffff;
+ prstatus->regs.edx = env->regs[R_EDX] & 0xffffffff;
+ prstatus->regs.ecx = env->regs[R_ECX] & 0xffffffff;
+ prstatus->regs.ebx = env->regs[R_EBX] & 0xffffffff;
+ prstatus->regs.eax = env->regs[R_EAX] & 0xffffffff;
+ prstatus->regs.eip = env->eip & 0xffffffff;
+ prstatus->regs.eflags = env->eflags & 0xffffffff;
+
+ prstatus->regs.cs = env->segs[R_CS].selector;
+ prstatus->regs.ss = env->segs[R_SS].selector;
+ prstatus->regs.ds = env->segs[R_DS].selector;
+ prstatus->regs.es = env->segs[R_ES].selector;
+ prstatus->regs.fs = env->segs[R_FS].selector;
+ prstatus->regs.gs = env->segs[R_GS].selector;
+
+ prstatus->pid = id;
+}
+
+static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env,
+ int id, void *opaque)
+{
+ x86_elf_prstatus prstatus;
+ Elf64_Nhdr *note;
+ char *buf;
+ int descsz, note_size, name_size = 5;
+ const char *name = "CORE";
+ int ret;
+
+ x86_fill_elf_prstatus(&prstatus, env, id);
+ descsz = sizeof(x86_elf_prstatus);
+ note_size = ELF_NOTE_SIZE(sizeof(Elf64_Nhdr), name_size, descsz);
+ note = g_malloc0(note_size);
+ note->n_namesz = cpu_to_le32(name_size);
+ note->n_descsz = cpu_to_le32(descsz);
+ note->n_type = cpu_to_le32(NT_PRSTATUS);
+ buf = (char *)note;
+ buf += ROUND_UP(sizeof(Elf64_Nhdr), 4);
+ memcpy(buf, name, name_size);
+ buf += ROUND_UP(name_size, 4);
+ memcpy(buf, &prstatus, sizeof(prstatus));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ int ret;
+#ifdef TARGET_X86_64
+ X86CPU *first_x86_cpu = X86_CPU(first_cpu);
+ bool lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
+
+ if (lma) {
+ ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, opaque);
+ } else {
+#endif
+ ret = x86_write_elf64_note(f, &cpu->env, cpuid, opaque);
+#ifdef TARGET_X86_64
+ }
+#endif
+
+ return ret;
+}
+
+int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ x86_elf_prstatus prstatus;
+ Elf32_Nhdr *note;
+ char *buf;
+ int descsz, note_size, name_size = 5;
+ const char *name = "CORE";
+ int ret;
+
+ x86_fill_elf_prstatus(&prstatus, &cpu->env, cpuid);
+ descsz = sizeof(x86_elf_prstatus);
+ note_size = ELF_NOTE_SIZE(sizeof(Elf32_Nhdr), name_size, descsz);
+ note = g_malloc0(note_size);
+ note->n_namesz = cpu_to_le32(name_size);
+ note->n_descsz = cpu_to_le32(descsz);
+ note->n_type = cpu_to_le32(NT_PRSTATUS);
+ buf = (char *)note;
+ buf += ROUND_UP(sizeof(Elf32_Nhdr), 4);
+ memcpy(buf, name, name_size);
+ buf += ROUND_UP(name_size, 4);
+ memcpy(buf, &prstatus, sizeof(prstatus));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * please count up QEMUCPUSTATE_VERSION if you have changed definition of
+ * QEMUCPUState, and modify the tools using this information accordingly.
+ */
+#define QEMUCPUSTATE_VERSION (1)
+
+struct QEMUCPUSegment {
+ uint32_t selector;
+ uint32_t limit;
+ uint32_t flags;
+ uint32_t pad;
+ uint64_t base;
+};
+
+typedef struct QEMUCPUSegment QEMUCPUSegment;
+
+struct QEMUCPUState {
+ uint32_t version;
+ uint32_t size;
+ uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp;
+ uint64_t r8, r9, r10, r11, r12, r13, r14, r15;
+ uint64_t rip, rflags;
+ QEMUCPUSegment cs, ds, es, fs, gs, ss;
+ QEMUCPUSegment ldt, tr, gdt, idt;
+ uint64_t cr[5];
+ /*
+ * Fields below are optional and are being added at the end without
+ * changing the version. External tools may identify their presence
+ * by checking 'size' field.
+ */
+ uint64_t kernel_gs_base;
+};
+
+typedef struct QEMUCPUState QEMUCPUState;
+
+static void copy_segment(QEMUCPUSegment *d, SegmentCache *s)
+{
+ d->pad = 0;
+ d->selector = s->selector;
+ d->limit = s->limit;
+ d->flags = s->flags;
+ d->base = s->base;
+}
+
+static void qemu_get_cpustate(QEMUCPUState *s, CPUX86State *env)
+{
+ memset(s, 0, sizeof(QEMUCPUState));
+
+ s->version = QEMUCPUSTATE_VERSION;
+ s->size = sizeof(QEMUCPUState);
+
+ s->rax = env->regs[R_EAX];
+ s->rbx = env->regs[R_EBX];
+ s->rcx = env->regs[R_ECX];
+ s->rdx = env->regs[R_EDX];
+ s->rsi = env->regs[R_ESI];
+ s->rdi = env->regs[R_EDI];
+ s->rsp = env->regs[R_ESP];
+ s->rbp = env->regs[R_EBP];
+#ifdef TARGET_X86_64
+ s->r8 = env->regs[8];
+ s->r9 = env->regs[9];
+ s->r10 = env->regs[10];
+ s->r11 = env->regs[11];
+ s->r12 = env->regs[12];
+ s->r13 = env->regs[13];
+ s->r14 = env->regs[14];
+ s->r15 = env->regs[15];
+#endif
+ s->rip = env->eip;
+ s->rflags = env->eflags;
+
+ copy_segment(&s->cs, &env->segs[R_CS]);
+ copy_segment(&s->ds, &env->segs[R_DS]);
+ copy_segment(&s->es, &env->segs[R_ES]);
+ copy_segment(&s->fs, &env->segs[R_FS]);
+ copy_segment(&s->gs, &env->segs[R_GS]);
+ copy_segment(&s->ss, &env->segs[R_SS]);
+ copy_segment(&s->ldt, &env->ldt);
+ copy_segment(&s->tr, &env->tr);
+ copy_segment(&s->gdt, &env->gdt);
+ copy_segment(&s->idt, &env->idt);
+
+ s->cr[0] = env->cr[0];
+ s->cr[1] = env->cr[1];
+ s->cr[2] = env->cr[2];
+ s->cr[3] = env->cr[3];
+ s->cr[4] = env->cr[4];
+
+#ifdef TARGET_X86_64
+ s->kernel_gs_base = env->kernelgsbase;
+#endif
+}
+
+static inline int cpu_write_qemu_note(WriteCoreDumpFunction f,
+ CPUX86State *env,
+ void *opaque,
+ int type)
+{
+ QEMUCPUState state;
+ Elf64_Nhdr *note64;
+ Elf32_Nhdr *note32;
+ void *note;
+ char *buf;
+ int descsz, note_size, name_size = 5, note_head_size;
+ const char *name = "QEMU";
+ int ret;
+
+ qemu_get_cpustate(&state, env);
+
+ descsz = sizeof(state);
+ if (type == 0) {
+ note_head_size = sizeof(Elf32_Nhdr);
+ } else {
+ note_head_size = sizeof(Elf64_Nhdr);
+ }
+ note_size = (DIV_ROUND_UP(note_head_size, 4) + DIV_ROUND_UP(name_size, 4) +
+ DIV_ROUND_UP(descsz, 4)) * 4;
+ note = g_malloc0(note_size);
+ if (type == 0) {
+ note32 = note;
+ note32->n_namesz = cpu_to_le32(name_size);
+ note32->n_descsz = cpu_to_le32(descsz);
+ note32->n_type = 0;
+ } else {
+ note64 = note;
+ note64->n_namesz = cpu_to_le32(name_size);
+ note64->n_descsz = cpu_to_le32(descsz);
+ note64->n_type = 0;
+ }
+ buf = note;
+ buf += ROUND_UP(note_head_size, 4);
+ memcpy(buf, name, name_size);
+ buf += ROUND_UP(name_size, 4);
+ memcpy(buf, &state, sizeof(state));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cs,
+ void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu_write_qemu_note(f, &cpu->env, opaque, 1);
+}
+
+int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cs,
+ void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu_write_qemu_note(f, &cpu->env, opaque, 0);
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ bool lma = false;
+ GuestPhysBlock *block;
+
+#ifdef TARGET_X86_64
+ X86CPU *first_x86_cpu = X86_CPU(first_cpu);
+ lma = first_cpu && (first_x86_cpu->env.hflags & HF_LMA_MASK);
+#endif
+
+ if (lma) {
+ info->d_machine = EM_X86_64;
+ } else {
+ info->d_machine = EM_386;
+ }
+ info->d_endian = ELFDATA2LSB;
+
+ if (lma) {
+ info->d_class = ELFCLASS64;
+ } else {
+ info->d_class = ELFCLASS32;
+
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_end > UINT_MAX) {
+ /* The memory size is greater than 4G */
+ info->d_class = ELFCLASS64;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ int name_size = 5; /* "CORE" or "QEMU" */
+ size_t elf_note_size = 0;
+ size_t qemu_note_size = 0;
+ int elf_desc_size = 0;
+ int qemu_desc_size = 0;
+ int note_head_size;
+
+ if (class == ELFCLASS32) {
+ note_head_size = sizeof(Elf32_Nhdr);
+ } else {
+ note_head_size = sizeof(Elf64_Nhdr);
+ }
+
+ if (machine == EM_386) {
+ elf_desc_size = sizeof(x86_elf_prstatus);
+ }
+#ifdef TARGET_X86_64
+ else {
+ elf_desc_size = sizeof(x86_64_elf_prstatus);
+ }
+#endif
+ qemu_desc_size = sizeof(QEMUCPUState);
+
+ elf_note_size = ELF_NOTE_SIZE(note_head_size, name_size, elf_desc_size);
+ qemu_note_size = ELF_NOTE_SIZE(note_head_size, name_size, qemu_desc_size);
+
+ return (elf_note_size + qemu_note_size) * nr_cpus;
+}
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
new file mode 100644
index 000000000..271cb5e41
--- /dev/null
+++ b/target/i386/arch_memory_mapping.c
@@ -0,0 +1,314 @@
+/*
+ * i386 memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "sysemu/memory_mapping.h"
+
+/* PAE Paging or IA-32e Paging */
+static void walk_pte(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pte_start_addr,
+ int32_t a20_mask, target_ulong start_line_addr)
+{
+ hwaddr pte_addr, start_paddr;
+ uint64_t pte;
+ target_ulong start_vaddr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pte_addr = (pte_start_addr + i * 8) & a20_mask;
+ pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pte & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+
+ start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 12);
+ }
+}
+
+/* 32-bit Paging */
+static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pte_start_addr, int32_t a20_mask,
+ target_ulong start_line_addr)
+{
+ hwaddr pte_addr, start_paddr;
+ uint32_t pte;
+ target_ulong start_vaddr;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ pte_addr = (pte_start_addr + i * 4) & a20_mask;
+ pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pte & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ start_paddr = pte & ~0xfff;
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+
+ start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 12);
+ }
+}
+
+/* PAE Paging or IA-32e Paging */
+#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
+
+static void walk_pde(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pde_start_addr,
+ int32_t a20_mask, target_ulong start_line_addr)
+{
+ hwaddr pde_addr, pte_start_addr, start_paddr;
+ uint64_t pde;
+ target_ulong line_addr, start_vaddr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pde_addr = (pde_start_addr + i * 8) & a20_mask;
+ pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pde & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = start_line_addr | ((i & 0x1ff) << 21);
+ if (pde & PG_PSE_MASK) {
+ /* 2 MB page */
+ start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+ start_vaddr = line_addr;
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 21);
+ continue;
+ }
+
+ pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
+ walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
+ }
+}
+
+/* 32-bit Paging */
+static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pde_start_addr, int32_t a20_mask,
+ bool pse)
+{
+ hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
+ uint32_t pde;
+ target_ulong line_addr, start_vaddr;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ pde_addr = (pde_start_addr + i * 4) & a20_mask;
+ pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pde & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = (((unsigned int)i & 0x3ff) << 22);
+ if ((pde & PG_PSE_MASK) && pse) {
+ /*
+ * 4 MB page:
+ * bits 39:32 are bits 20:13 of the PDE
+ * bit3 31:22 are bits 31:22 of the PDE
+ */
+ high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
+ start_paddr = (pde & ~0x3fffff) | high_paddr;
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+ start_vaddr = line_addr;
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 22);
+ continue;
+ }
+
+ pte_start_addr = (pde & ~0xfff) & a20_mask;
+ walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
+ }
+}
+
+/* PAE Paging */
+static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pdpe_start_addr, int32_t a20_mask)
+{
+ hwaddr pdpe_addr, pde_start_addr;
+ uint64_t pdpe;
+ target_ulong line_addr;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
+ pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = (((unsigned int)i & 0x3) << 30);
+ pde_start_addr = (pdpe & ~0xfff) & a20_mask;
+ walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
+ }
+}
+
+#ifdef TARGET_X86_64
+/* IA-32e Paging */
+static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pdpe_start_addr, int32_t a20_mask,
+ target_ulong start_line_addr)
+{
+ hwaddr pdpe_addr, pde_start_addr, start_paddr;
+ uint64_t pdpe;
+ target_ulong line_addr, start_vaddr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
+ pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
+ if (pdpe & PG_PSE_MASK) {
+ /* 1 GB page */
+ start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+ start_vaddr = line_addr;
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 30);
+ continue;
+ }
+
+ pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
+ walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
+ }
+}
+
+/* IA-32e Paging */
+static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pml4e_start_addr, int32_t a20_mask,
+ target_ulong start_line_addr)
+{
+ hwaddr pml4e_addr, pdpe_start_addr;
+ uint64_t pml4e;
+ target_ulong line_addr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
+ pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = start_line_addr | ((i & 0x1ffULL) << 39);
+ pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
+ walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
+ }
+}
+
+static void walk_pml5e(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pml5e_start_addr, int32_t a20_mask)
+{
+ hwaddr pml5e_addr, pml4e_start_addr;
+ uint64_t pml5e;
+ target_ulong line_addr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pml5e_addr = (pml5e_start_addr + i * 8) & a20_mask;
+ pml5e = address_space_ldq(as, pml5e_addr, MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ if (!(pml5e & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = (0x7fULL << 57) | ((i & 0x1ffULL) << 48);
+ pml4e_start_addr = (pml5e & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml4e(list, as, pml4e_start_addr, a20_mask, line_addr);
+ }
+}
+#endif
+
+void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int32_t a20_mask;
+
+ if (!cpu_paging_enabled(cs)) {
+ /* paging is disabled */
+ return;
+ }
+
+ a20_mask = x86_get_a20_mask(env);
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ if (env->cr[4] & CR4_LA57_MASK) {
+ hwaddr pml5e_addr;
+
+ pml5e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml5e(list, cs->as, pml5e_addr, a20_mask);
+ } else {
+ hwaddr pml4e_addr;
+
+ pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & a20_mask;
+ walk_pml4e(list, cs->as, pml4e_addr, a20_mask,
+ 0xffffULL << 48);
+ }
+ } else
+#endif
+ {
+ hwaddr pdpe_addr;
+
+ pdpe_addr = (env->cr[3] & ~0x1f) & a20_mask;
+ walk_pdpe2(list, cs->as, pdpe_addr, a20_mask);
+ }
+ } else {
+ hwaddr pde_addr;
+ bool pse;
+
+ pde_addr = (env->cr[3] & ~0xfff) & a20_mask;
+ pse = !!(env->cr[4] & CR4_PSE_MASK);
+ walk_pde2(list, cs->as, pde_addr, a20_mask, pse);
+ }
+}
+
diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c
new file mode 100644
index 000000000..08ac957e9
--- /dev/null
+++ b/target/i386/cpu-dump.c
@@ -0,0 +1,570 @@
+/*
+ * i386 CPU dump to FILE
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/qemu-print.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/i386/apic_internal.h"
+#endif
+
+/***********************************************************/
+/* x86 debug */
+
+static const char *cc_op_str[CC_OP_NB] = {
+ "DYNAMIC",
+ "EFLAGS",
+
+ "MULB",
+ "MULW",
+ "MULL",
+ "MULQ",
+
+ "ADDB",
+ "ADDW",
+ "ADDL",
+ "ADDQ",
+
+ "ADCB",
+ "ADCW",
+ "ADCL",
+ "ADCQ",
+
+ "SUBB",
+ "SUBW",
+ "SUBL",
+ "SUBQ",
+
+ "SBBB",
+ "SBBW",
+ "SBBL",
+ "SBBQ",
+
+ "LOGICB",
+ "LOGICW",
+ "LOGICL",
+ "LOGICQ",
+
+ "INCB",
+ "INCW",
+ "INCL",
+ "INCQ",
+
+ "DECB",
+ "DECW",
+ "DECL",
+ "DECQ",
+
+ "SHLB",
+ "SHLW",
+ "SHLL",
+ "SHLQ",
+
+ "SARB",
+ "SARW",
+ "SARL",
+ "SARQ",
+
+ "BMILGB",
+ "BMILGW",
+ "BMILGL",
+ "BMILGQ",
+
+ "ADCX",
+ "ADOX",
+ "ADCOX",
+
+ "CLR",
+};
+
+static void
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
+ const char *name, struct SegmentCache *sc)
+{
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ qemu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
+ sc->selector, sc->base, sc->limit,
+ sc->flags & 0x00ffff00);
+ } else
+#endif
+ {
+ qemu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
+ (uint32_t)sc->base, sc->limit,
+ sc->flags & 0x00ffff00);
+ }
+
+ if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
+ goto done;
+
+ qemu_fprintf(f, " DPL=%d ",
+ (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
+ if (sc->flags & DESC_S_MASK) {
+ if (sc->flags & DESC_CS_MASK) {
+ qemu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
+ ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
+ qemu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
+ (sc->flags & DESC_R_MASK) ? 'R' : '-');
+ } else {
+ qemu_fprintf(f, (sc->flags & DESC_B_MASK
+ || env->hflags & HF_LMA_MASK)
+ ? "DS " : "DS16");
+ qemu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
+ (sc->flags & DESC_W_MASK) ? 'W' : '-');
+ }
+ qemu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
+ } else {
+ static const char *sys_type_name[2][16] = {
+ { /* 32 bit mode */
+ "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
+ "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
+ "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
+ "CallGate32", "Reserved", "IntGate32", "TrapGate32"
+ },
+ { /* 64 bit mode */
+ "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved",
+ "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
+ "Reserved", "IntGate64", "TrapGate64"
+ }
+ };
+ qemu_fprintf(f, "%s",
+ sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
+ [(sc->flags & DESC_TYPE_MASK) >> DESC_TYPE_SHIFT]);
+ }
+done:
+ qemu_fprintf(f, "\n");
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* ARRAY_SIZE check is not required because
+ * DeliveryMode(dm) has a size of 3 bit.
+ */
+static inline const char *dm2str(uint32_t dm)
+{
+ static const char *str[] = {
+ "Fixed",
+ "...",
+ "SMI",
+ "...",
+ "NMI",
+ "INIT",
+ "...",
+ "ExtINT"
+ };
+ return str[dm];
+}
+
+static void dump_apic_lvt(const char *name, uint32_t lvt, bool is_timer)
+{
+ uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
+ qemu_printf("%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
+ name, lvt,
+ lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
+ lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
+ lvt & APIC_LVT_MASKED ? "masked" : "",
+ lvt & APIC_LVT_DELIV_STS ? "pending" : "",
+ !is_timer ?
+ "" : lvt & APIC_LVT_TIMER_PERIODIC ?
+ "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
+ "tsc-deadline" : "one-shot",
+ dm2str(dm));
+ if (dm != APIC_DM_NMI) {
+ qemu_printf(" (vec %u)\n", lvt & APIC_VECTOR_MASK);
+ } else {
+ qemu_printf("\n");
+ }
+}
+
+/* ARRAY_SIZE check is not required because
+ * destination shorthand has a size of 2 bit.
+ */
+static inline const char *shorthand2str(uint32_t shorthand)
+{
+ const char *str[] = {
+ "no-shorthand", "self", "all-self", "all"
+ };
+ return str[shorthand];
+}
+
+static inline uint8_t divider_conf(uint32_t divide_conf)
+{
+ uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
+
+ return divide_val == 7 ? 1 : 2 << divide_val;
+}
+
+static inline void mask2str(char *str, uint32_t val, uint8_t size)
+{
+ while (size--) {
+ *str++ = (val >> size) & 1 ? '1' : '0';
+ }
+ *str = 0;
+}
+
+#define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
+
+static void dump_apic_icr(APICCommonState *s, CPUX86State *env)
+{
+ uint32_t icr = s->icr[0], icr2 = s->icr[1];
+ uint8_t dest_shorthand = \
+ (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
+ bool logical_mod = icr & APIC_ICR_DEST_MOD;
+ char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
+ uint32_t dest_field;
+ bool x2apic;
+
+ qemu_printf("ICR\t 0x%08x %s %s %s %s\n",
+ icr,
+ logical_mod ? "logical" : "physical",
+ icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
+ icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
+ shorthand2str(dest_shorthand));
+
+ qemu_printf("ICR2\t 0x%08x", icr2);
+ if (dest_shorthand != 0) {
+ qemu_printf("\n");
+ return;
+ }
+ x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
+ dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
+
+ if (!logical_mod) {
+ if (x2apic) {
+ qemu_printf(" cpu %u (X2APIC ID)\n", dest_field);
+ } else {
+ qemu_printf(" cpu %u (APIC ID)\n",
+ dest_field & APIC_LOGDEST_XAPIC_ID);
+ }
+ return;
+ }
+
+ if (s->dest_mode == 0xf) { /* flat mode */
+ mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
+ qemu_printf(" mask %s (APIC ID)\n", apic_id_str);
+ } else if (s->dest_mode == 0) { /* cluster mode */
+ if (x2apic) {
+ mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
+ qemu_printf(" cluster %u mask %s (X2APIC ID)\n",
+ dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
+ } else {
+ mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
+ qemu_printf(" cluster %u mask %s (APIC ID)\n",
+ dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
+ }
+ }
+}
+
+static void dump_apic_interrupt(const char *name, uint32_t *ireg_tab,
+ uint32_t *tmr_tab)
+{
+ int i, empty = true;
+
+ qemu_printf("%s\t ", name);
+ for (i = 0; i < 256; i++) {
+ if (apic_get_bit(ireg_tab, i)) {
+ qemu_printf("%u%s ", i,
+ apic_get_bit(tmr_tab, i) ? "(level)" : "");
+ empty = false;
+ }
+ }
+ qemu_printf("%s\n", empty ? "(none)" : "");
+}
+
+void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ APICCommonState *s = APIC_COMMON(cpu->apic_state);
+ if (!s) {
+ qemu_printf("local apic state not available\n");
+ return;
+ }
+ uint32_t *lvt = s->lvt;
+
+ qemu_printf("dumping local APIC state for CPU %-2u\n\n",
+ CPU(cpu)->cpu_index);
+ dump_apic_lvt("LVT0", lvt[APIC_LVT_LINT0], false);
+ dump_apic_lvt("LVT1", lvt[APIC_LVT_LINT1], false);
+ dump_apic_lvt("LVTPC", lvt[APIC_LVT_PERFORM], false);
+ dump_apic_lvt("LVTERR", lvt[APIC_LVT_ERROR], false);
+ dump_apic_lvt("LVTTHMR", lvt[APIC_LVT_THERMAL], false);
+ dump_apic_lvt("LVTT", lvt[APIC_LVT_TIMER], true);
+
+ qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u"
+ " current_count = %u\n",
+ s->divide_conf & APIC_DCR_MASK,
+ divider_conf(s->divide_conf),
+ s->initial_count, apic_get_current_count(s));
+
+ qemu_printf("SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
+ s->spurious_vec,
+ s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
+ s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
+ s->spurious_vec & APIC_VECTOR_MASK);
+
+ dump_apic_icr(s, &cpu->env);
+
+ qemu_printf("ESR\t 0x%08x\n", s->esr);
+
+ dump_apic_interrupt("ISR", s->isr, s->tmr);
+ dump_apic_interrupt("IRR", s->irr, s->tmr);
+
+ qemu_printf("\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
+ s->arb_id, s->tpr, s->dest_mode, s->log_dest);
+ if (s->dest_mode == 0) {
+ qemu_printf("(cluster %u: id %u)",
+ s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
+ s->log_dest & APIC_LOGDEST_XAPIC_ID);
+ }
+ qemu_printf(" PPR 0x%02x\n", apic_get_ppr(s));
+}
+#else
+void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
+{
+}
+#endif /* !CONFIG_USER_ONLY */
+
+#define DUMP_CODE_BYTES_TOTAL 50
+#define DUMP_CODE_BYTES_BACKWARD 20
+
+void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int eflags, i, nb;
+ char cc_op_name[32];
+ static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+
+ eflags = cpu_compute_eflags(env);
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ qemu_fprintf(f, "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
+ "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
+ "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
+ "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
+ "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
+ env->regs[R_EAX],
+ env->regs[R_EBX],
+ env->regs[R_ECX],
+ env->regs[R_EDX],
+ env->regs[R_ESI],
+ env->regs[R_EDI],
+ env->regs[R_EBP],
+ env->regs[R_ESP],
+ env->regs[8],
+ env->regs[9],
+ env->regs[10],
+ env->regs[11],
+ env->regs[12],
+ env->regs[13],
+ env->regs[14],
+ env->regs[15],
+ env->eip, eflags,
+ eflags & DF_MASK ? 'D' : '-',
+ eflags & CC_O ? 'O' : '-',
+ eflags & CC_S ? 'S' : '-',
+ eflags & CC_Z ? 'Z' : '-',
+ eflags & CC_A ? 'A' : '-',
+ eflags & CC_P ? 'P' : '-',
+ eflags & CC_C ? 'C' : '-',
+ env->hflags & HF_CPL_MASK,
+ (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
+ (env->a20_mask >> 20) & 1,
+ (env->hflags >> HF_SMM_SHIFT) & 1,
+ cs->halted);
+ } else
+#endif
+ {
+ qemu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
+ "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
+ "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
+ (uint32_t)env->regs[R_EAX],
+ (uint32_t)env->regs[R_EBX],
+ (uint32_t)env->regs[R_ECX],
+ (uint32_t)env->regs[R_EDX],
+ (uint32_t)env->regs[R_ESI],
+ (uint32_t)env->regs[R_EDI],
+ (uint32_t)env->regs[R_EBP],
+ (uint32_t)env->regs[R_ESP],
+ (uint32_t)env->eip, eflags,
+ eflags & DF_MASK ? 'D' : '-',
+ eflags & CC_O ? 'O' : '-',
+ eflags & CC_S ? 'S' : '-',
+ eflags & CC_Z ? 'Z' : '-',
+ eflags & CC_A ? 'A' : '-',
+ eflags & CC_P ? 'P' : '-',
+ eflags & CC_C ? 'C' : '-',
+ env->hflags & HF_CPL_MASK,
+ (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
+ (env->a20_mask >> 20) & 1,
+ (env->hflags >> HF_SMM_SHIFT) & 1,
+ cs->halted);
+ }
+
+ for(i = 0; i < 6; i++) {
+ cpu_x86_dump_seg_cache(env, f, seg_name[i], &env->segs[i]);
+ }
+ cpu_x86_dump_seg_cache(env, f, "LDT", &env->ldt);
+ cpu_x86_dump_seg_cache(env, f, "TR", &env->tr);
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ qemu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
+ env->gdt.base, env->gdt.limit);
+ qemu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
+ env->idt.base, env->idt.limit);
+ qemu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
+ (uint32_t)env->cr[0],
+ env->cr[2],
+ env->cr[3],
+ (uint32_t)env->cr[4]);
+ for(i = 0; i < 4; i++)
+ qemu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
+ qemu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
+ env->dr[6], env->dr[7]);
+ } else
+#endif
+ {
+ qemu_fprintf(f, "GDT= %08x %08x\n",
+ (uint32_t)env->gdt.base, env->gdt.limit);
+ qemu_fprintf(f, "IDT= %08x %08x\n",
+ (uint32_t)env->idt.base, env->idt.limit);
+ qemu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
+ (uint32_t)env->cr[0],
+ (uint32_t)env->cr[2],
+ (uint32_t)env->cr[3],
+ (uint32_t)env->cr[4]);
+ for(i = 0; i < 4; i++) {
+ qemu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
+ }
+ qemu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
+ env->dr[6], env->dr[7]);
+ }
+ if (flags & CPU_DUMP_CCOP) {
+ if ((unsigned)env->cc_op < CC_OP_NB)
+ snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
+ else
+ snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%s\n",
+ env->cc_src, env->cc_dst,
+ cc_op_name);
+ } else
+#endif
+ {
+ qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%s\n",
+ (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
+ cc_op_name);
+ }
+ }
+ qemu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
+ if (flags & CPU_DUMP_FPU) {
+ int fptag;
+ const uint64_t avx512_mask = XSTATE_OPMASK_MASK | \
+ XSTATE_ZMM_Hi256_MASK | \
+ XSTATE_Hi16_ZMM_MASK | \
+ XSTATE_YMM_MASK | XSTATE_SSE_MASK,
+ avx_mask = XSTATE_YMM_MASK | XSTATE_SSE_MASK;
+ fptag = 0;
+ for(i = 0; i < 8; i++) {
+ fptag |= ((!env->fptags[i]) << i);
+ }
+ update_mxcsr_from_sse_status(env);
+ qemu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
+ env->fpuc,
+ (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
+ env->fpstt,
+ fptag,
+ env->mxcsr);
+ for(i=0;i<8;i++) {
+ CPU_LDoubleU u;
+ u.d = env->fpregs[i].d;
+ qemu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
+ i, u.l.lower, u.l.upper);
+ if ((i & 1) == 1)
+ qemu_fprintf(f, "\n");
+ else
+ qemu_fprintf(f, " ");
+ }
+
+ if ((env->xcr0 & avx512_mask) == avx512_mask) {
+ /* XSAVE enabled AVX512 */
+ for (i = 0; i < NB_OPMASK_REGS; i++) {
+ qemu_fprintf(f, "Opmask%02d=%016"PRIx64"%s", i,
+ env->opmask_regs[i], ((i & 3) == 3) ? "\n" : " ");
+ }
+
+ nb = (env->hflags & HF_CS64_MASK) ? 32 : 8;
+ for (i = 0; i < nb; i++) {
+ qemu_fprintf(f, "ZMM%02d=%016"PRIx64" %016"PRIx64" %016"PRIx64
+ " %016"PRIx64" %016"PRIx64" %016"PRIx64
+ " %016"PRIx64" %016"PRIx64"\n",
+ i,
+ env->xmm_regs[i].ZMM_Q(7),
+ env->xmm_regs[i].ZMM_Q(6),
+ env->xmm_regs[i].ZMM_Q(5),
+ env->xmm_regs[i].ZMM_Q(4),
+ env->xmm_regs[i].ZMM_Q(3),
+ env->xmm_regs[i].ZMM_Q(2),
+ env->xmm_regs[i].ZMM_Q(1),
+ env->xmm_regs[i].ZMM_Q(0));
+ }
+ } else if ((env->xcr0 & avx_mask) == avx_mask) {
+ /* XSAVE enabled AVX */
+ nb = env->hflags & HF_CS64_MASK ? 16 : 8;
+ for (i = 0; i < nb; i++) {
+ qemu_fprintf(f, "YMM%02d=%016"PRIx64" %016"PRIx64" %016"PRIx64
+ " %016"PRIx64"\n", i,
+ env->xmm_regs[i].ZMM_Q(3),
+ env->xmm_regs[i].ZMM_Q(2),
+ env->xmm_regs[i].ZMM_Q(1),
+ env->xmm_regs[i].ZMM_Q(0));
+ }
+ } else { /* SSE and below cases */
+ nb = env->hflags & HF_CS64_MASK ? 16 : 8;
+ for (i = 0; i < nb; i++) {
+ qemu_fprintf(f, "XMM%02d=%016"PRIx64" %016"PRIx64"%s",
+ i,
+ env->xmm_regs[i].ZMM_Q(1),
+ env->xmm_regs[i].ZMM_Q(0),
+ (i & 1) ? "\n" : " ");
+ }
+ }
+ }
+ if (flags & CPU_DUMP_CODE) {
+ target_ulong base = env->segs[R_CS].base + env->eip;
+ target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
+ uint8_t code;
+ char codestr[3];
+
+ qemu_fprintf(f, "Code=");
+ for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
+ if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
+ snprintf(codestr, sizeof(codestr), "%02x", code);
+ } else {
+ snprintf(codestr, sizeof(codestr), "??");
+ }
+ qemu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
+ i == offs ? "<" : "", codestr, i == offs ? ">" : "");
+ }
+ qemu_fprintf(f, "\n");
+ }
+}
diff --git a/target/i386/cpu-internal.h b/target/i386/cpu-internal.h
new file mode 100644
index 000000000..9baac5c0b
--- /dev/null
+++ b/target/i386/cpu-internal.h
@@ -0,0 +1,70 @@
+/*
+ * i386 CPU internal definitions to be shared between cpu.c and cpu-sysemu.c
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef I386_CPU_INTERNAL_H
+#define I386_CPU_INTERNAL_H
+
+typedef enum FeatureWordType {
+ CPUID_FEATURE_WORD,
+ MSR_FEATURE_WORD,
+} FeatureWordType;
+
+typedef struct FeatureWordInfo {
+ FeatureWordType type;
+ /* feature flags names are taken from "Intel Processor Identification and
+ * the CPUID Instruction" and AMD's "CPUID Specification".
+ * In cases of disagreement between feature naming conventions,
+ * aliases may be added.
+ */
+ const char *feat_names[64];
+ union {
+ /* If type==CPUID_FEATURE_WORD */
+ struct {
+ uint32_t eax; /* Input EAX for CPUID */
+ bool needs_ecx; /* CPUID instruction uses ECX as input */
+ uint32_t ecx; /* Input ECX value for CPUID */
+ int reg; /* output register (R_* constant) */
+ } cpuid;
+ /* If type==MSR_FEATURE_WORD */
+ struct {
+ uint32_t index;
+ } msr;
+ };
+ uint64_t tcg_features; /* Feature flags supported by TCG */
+ uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
+ uint64_t migratable_flags; /* Feature flags known to be migratable */
+ /* Features that shouldn't be auto-enabled by "-cpu host" */
+ uint64_t no_autoenable_flags;
+} FeatureWordInfo;
+
+extern FeatureWordInfo feature_word_info[];
+
+void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
+
+#ifndef CONFIG_USER_ONLY
+GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs);
+void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp);
+
+void x86_cpu_apic_create(X86CPU *cpu, Error **errp);
+void x86_cpu_apic_realize(X86CPU *cpu, Error **errp);
+void x86_cpu_machine_reset_cb(void *opaque);
+#endif /* !CONFIG_USER_ONLY */
+
+#endif /* I386_CPU_INTERNAL_H */
diff --git a/target/i386/cpu-param.h b/target/i386/cpu-param.h
new file mode 100644
index 000000000..57abc64c0
--- /dev/null
+++ b/target/i386/cpu-param.h
@@ -0,0 +1,28 @@
+/*
+ * i386 cpu parameters for qemu.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef I386_CPU_PARAM_H
+#define I386_CPU_PARAM_H 1
+
+#ifdef TARGET_X86_64
+# define TARGET_LONG_BITS 64
+# define TARGET_PHYS_ADDR_SPACE_BITS 52
+/*
+ * ??? This is really 48 bits, sign-extended, but the only thing
+ * accessible to userland with bit 48 set is the VSYSCALL, and that
+ * is handled via other mechanisms.
+ */
+# define TARGET_VIRT_ADDR_SPACE_BITS 47
+#else
+# define TARGET_LONG_BITS 32
+# define TARGET_PHYS_ADDR_SPACE_BITS 36
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+#define TARGET_PAGE_BITS 12
+#define NB_MMU_MODES 3
+
+#endif
diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h
new file mode 100644
index 000000000..f9923cee0
--- /dev/null
+++ b/target/i386/cpu-qom.h
@@ -0,0 +1,75 @@
+/*
+ * QEMU x86 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_I386_CPU_QOM_H
+#define QEMU_I386_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qemu/notify.h"
+#include "qom/object.h"
+
+#ifdef TARGET_X86_64
+#define TYPE_X86_CPU "x86_64-cpu"
+#else
+#define TYPE_X86_CPU "i386-cpu"
+#endif
+
+OBJECT_DECLARE_TYPE(X86CPU, X86CPUClass,
+ X86_CPU)
+
+typedef struct X86CPUModel X86CPUModel;
+
+/**
+ * X86CPUClass:
+ * @cpu_def: CPU model definition
+ * @host_cpuid_required: Whether CPU model requires cpuid from host.
+ * @ordering: Ordering on the "-cpu help" CPU model list.
+ * @migration_safe: See CpuDefinitionInfo::migration_safe
+ * @static_model: See CpuDefinitionInfo::static
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An x86 CPU model or family.
+ */
+struct X86CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ /* CPU definition, automatically loaded by instance_init if not NULL.
+ * Should be eventually replaced by subclass-specific property defaults.
+ */
+ X86CPUModel *model;
+
+ bool host_cpuid_required;
+ int ordering;
+ bool migration_safe;
+ bool static_model;
+
+ /* Optional description of CPU model.
+ * If unavailable, cpu_def->model_id is used */
+ const char *model_description;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ DeviceReset parent_reset;
+};
+
+
+#endif
diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c
new file mode 100644
index 000000000..37b7c562f
--- /dev/null
+++ b/target/i386/cpu-sysemu.c
@@ -0,0 +1,352 @@
+/*
+ * i386 CPUID, CPU class, definitions, models: sysemu-only code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "sysemu/xen.h"
+#include "sysemu/whpx.h"
+#include "kvm/kvm_i386.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-run-state.h"
+#include "qapi/qmp/qdict.h"
+#include "qom/qom-qobject.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "hw/qdev-properties.h"
+
+#include "exec/address-spaces.h"
+#include "hw/i386/apic_internal.h"
+
+#include "cpu-internal.h"
+
+/* Return a QDict containing keys for all properties that can be included
+ * in static expansion of CPU models. All properties set by x86_cpu_load_model()
+ * must be included in the dictionary.
+ */
+static QDict *x86_cpu_static_props(void)
+{
+ FeatureWord w;
+ int i;
+ static const char *props[] = {
+ "min-level",
+ "min-xlevel",
+ "family",
+ "model",
+ "stepping",
+ "model-id",
+ "vendor",
+ "lmce",
+ NULL,
+ };
+ static QDict *d;
+
+ if (d) {
+ return d;
+ }
+
+ d = qdict_new();
+ for (i = 0; props[i]; i++) {
+ qdict_put_null(d, props[i]);
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ FeatureWordInfo *fi = &feature_word_info[w];
+ int bit;
+ for (bit = 0; bit < 64; bit++) {
+ if (!fi->feat_names[bit]) {
+ continue;
+ }
+ qdict_put_null(d, fi->feat_names[bit]);
+ }
+ }
+
+ return d;
+}
+
+/* Add an entry to @props dict, with the value for property. */
+static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
+{
+ QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
+ &error_abort);
+
+ qdict_put_obj(props, prop, value);
+}
+
+/* Convert CPU model data from X86CPU object to a property dictionary
+ * that can recreate exactly the same CPU model.
+ */
+static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
+{
+ QDict *sprops = x86_cpu_static_props();
+ const QDictEntry *e;
+
+ for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
+ const char *prop = qdict_entry_key(e);
+ x86_cpu_expand_prop(cpu, props, prop);
+ }
+}
+
+/* Convert CPU model data from X86CPU object to a property dictionary
+ * that can recreate exactly the same CPU model, including every
+ * writeable QOM property.
+ */
+static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
+{
+ ObjectPropertyIterator iter;
+ ObjectProperty *prop;
+
+ object_property_iter_init(&iter, OBJECT(cpu));
+ while ((prop = object_property_iter_next(&iter))) {
+ /* skip read-only or write-only properties */
+ if (!prop->get || !prop->set) {
+ continue;
+ }
+
+ /* "hotplugged" is the only property that is configurable
+ * on the command-line but will be set differently on CPUs
+ * created using "-cpu ... -smp ..." and by CPUs created
+ * on the fly by x86_cpu_from_model() for querying. Skip it.
+ */
+ if (!strcmp(prop->name, "hotplugged")) {
+ continue;
+ }
+ x86_cpu_expand_prop(cpu, props, prop->name);
+ }
+}
+
+static void object_apply_props(Object *obj, QDict *props, Error **errp)
+{
+ const QDictEntry *prop;
+
+ for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
+ if (!object_property_set_qobject(obj, qdict_entry_key(prop),
+ qdict_entry_value(prop), errp)) {
+ break;
+ }
+ }
+}
+
+/* Create X86CPU object according to model+props specification */
+static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
+{
+ X86CPU *xc = NULL;
+ X86CPUClass *xcc;
+ Error *err = NULL;
+
+ xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
+ if (xcc == NULL) {
+ error_setg(&err, "CPU model '%s' not found", model);
+ goto out;
+ }
+
+ xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
+ if (props) {
+ object_apply_props(OBJECT(xc), props, &err);
+ if (err) {
+ goto out;
+ }
+ }
+
+ x86_cpu_expand_features(xc, &err);
+ if (err) {
+ goto out;
+ }
+
+out:
+ if (err) {
+ error_propagate(errp, err);
+ object_unref(OBJECT(xc));
+ xc = NULL;
+ }
+ return xc;
+}
+
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ X86CPU *xc = NULL;
+ Error *err = NULL;
+ CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
+ QDict *props = NULL;
+ const char *base_name;
+
+ xc = x86_cpu_from_model(model->name,
+ model->has_props ?
+ qobject_to(QDict, model->props) :
+ NULL, &err);
+ if (err) {
+ goto out;
+ }
+
+ props = qdict_new();
+ ret->model = g_new0(CpuModelInfo, 1);
+ ret->model->props = QOBJECT(props);
+ ret->model->has_props = true;
+
+ switch (type) {
+ case CPU_MODEL_EXPANSION_TYPE_STATIC:
+ /* Static expansion will be based on "base" only */
+ base_name = "base";
+ x86_cpu_to_dict(xc, props);
+ break;
+ case CPU_MODEL_EXPANSION_TYPE_FULL:
+ /* As we don't return every single property, full expansion needs
+ * to keep the original model name+props, and add extra
+ * properties on top of that.
+ */
+ base_name = model->name;
+ x86_cpu_to_dict_full(xc, props);
+ break;
+ default:
+ error_setg(&err, "Unsupported expansion type");
+ goto out;
+ }
+
+ x86_cpu_to_dict(xc, props);
+
+ ret->model->name = g_strdup(base_name);
+
+out:
+ object_unref(OBJECT(xc));
+ if (err) {
+ error_propagate(errp, err);
+ qapi_free_CpuModelExpansionInfo(ret);
+ ret = NULL;
+ }
+ return ret;
+}
+
+void cpu_clear_apic_feature(CPUX86State *env)
+{
+ env->features[FEAT_1_EDX] &= ~CPUID_APIC;
+}
+
+bool cpu_is_bsp(X86CPU *cpu)
+{
+ return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
+}
+
+/* TODO: remove me, when reset over QOM tree is implemented */
+void x86_cpu_machine_reset_cb(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ cpu_reset(CPU(cpu));
+}
+
+APICCommonClass *apic_get_class(void)
+{
+ const char *apic_type = "apic";
+
+ /* TODO: in-kernel irqchip for hvf */
+ if (kvm_apic_in_kernel()) {
+ apic_type = "kvm-apic";
+ } else if (xen_enabled()) {
+ apic_type = "xen-apic";
+ } else if (whpx_apic_in_platform()) {
+ apic_type = "whpx-apic";
+ }
+
+ return APIC_COMMON_CLASS(object_class_by_name(apic_type));
+}
+
+void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
+{
+ APICCommonState *apic;
+ ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
+
+ cpu->apic_state = DEVICE(object_new_with_class(apic_class));
+
+ object_property_add_child(OBJECT(cpu), "lapic",
+ OBJECT(cpu->apic_state));
+ object_unref(OBJECT(cpu->apic_state));
+
+ qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
+ /* TODO: convert to link<> */
+ apic = APIC_COMMON(cpu->apic_state);
+ apic->cpu = cpu;
+ apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
+}
+
+void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
+{
+ APICCommonState *apic;
+ static bool apic_mmio_map_once;
+
+ if (cpu->apic_state == NULL) {
+ return;
+ }
+ qdev_realize(DEVICE(cpu->apic_state), NULL, errp);
+
+ /* Map APIC MMIO area */
+ apic = APIC_COMMON(cpu->apic_state);
+ if (!apic_mmio_map_once) {
+ memory_region_add_subregion_overlap(get_system_memory(),
+ apic->apicbase &
+ MSR_IA32_APICBASE_BASE,
+ &apic->io_memory,
+ 0x1000);
+ apic_mmio_map_once = true;
+ }
+}
+
+GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ GuestPanicInformation *panic_info = NULL;
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_CRASH)) {
+ panic_info = g_malloc0(sizeof(GuestPanicInformation));
+
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
+
+ assert(HV_CRASH_PARAMS >= 5);
+ panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
+ panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
+ panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
+ panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
+ panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
+ }
+
+ return panic_info;
+}
+void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CPUState *cs = CPU(obj);
+ GuestPanicInformation *panic_info;
+
+ if (!cs->crash_occurred) {
+ error_setg(errp, "No crash occurred");
+ return;
+ }
+
+ panic_info = x86_cpu_get_crash_info(cs);
+ if (panic_info == NULL) {
+ error_setg(errp, "No crash information");
+ return;
+ }
+
+ visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
+ errp);
+ qapi_free_GuestPanicInformation(panic_info);
+}
+
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
new file mode 100644
index 000000000..aa9e63680
--- /dev/null
+++ b/target/i386/cpu.c
@@ -0,0 +1,7055 @@
+/*
+ * i386 CPUID, CPU class, definitions, models
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qemu/cutils.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "tcg/helper-tcg.h"
+#include "sysemu/reset.h"
+#include "sysemu/hvf.h"
+#include "kvm/kvm_i386.h"
+#include "sev.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-machine.h"
+#include "qapi/qmp/qerror.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "standard-headers/asm-x86/kvm_para.h"
+#include "hw/qdev-properties.h"
+#include "hw/i386/topology.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/address-spaces.h"
+#include "hw/boards.h"
+#include "hw/i386/sgx-epc.h"
+#endif
+
+#include "disas/capstone.h"
+#include "cpu-internal.h"
+
+/* Helpers for building CPUID[2] descriptors: */
+
+struct CPUID2CacheDescriptorInfo {
+ enum CacheType type;
+ int level;
+ int size;
+ int line_size;
+ int associativity;
+};
+
+/*
+ * Known CPUID 2 cache descriptors.
+ * From Intel SDM Volume 2A, CPUID instruction
+ */
+struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
+ [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
+ .associativity = 2, .line_size = 32, },
+ [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
+ .associativity = 6, .line_size = 64, },
+ [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
+ .associativity = 2, .line_size = 64, },
+ [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
+ .associativity = 8, .line_size = 64, },
+ /* lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x22, 0x23 are not included
+ */
+ [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 16, .line_size = 64, },
+ /* lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x25, 0x20 are not included
+ */
+ [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 4, .line_size = 32, },
+ [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 4, .line_size = 32, },
+ [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 4, .line_size = 64, },
+ [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
+ .associativity = 12, .line_size = 64, },
+ /* Descriptor 0x49 depends on CPU family/model, so it is not included */
+ [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
+ .associativity = 24, .line_size = 64, },
+ [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 4, .line_size = 64, },
+ /* lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
+ */
+ [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 2, .line_size = 64, },
+ [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
+ .associativity = 8, .line_size = 32, },
+ [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 8, .line_size = 32, },
+ [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 8, .line_size = 32, },
+ [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 8, .line_size = 32, },
+ [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 4, .line_size = 64, },
+ [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 4, .line_size = 64, },
+ [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
+ .associativity = 24, .line_size = 64, },
+ [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
+ .associativity = 24, .line_size = 64, },
+ [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
+ .associativity = 24, .line_size = 64, },
+};
+
+/*
+ * "CPUID leaf 2 does not report cache descriptor information,
+ * use CPUID leaf 4 to query cache parameters"
+ */
+#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
+
+/*
+ * Return a CPUID 2 cache descriptor for a given cache.
+ * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
+ */
+static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
+{
+ int i;
+
+ assert(cache->size > 0);
+ assert(cache->level > 0);
+ assert(cache->line_size > 0);
+ assert(cache->associativity > 0);
+ for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
+ struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
+ if (d->level == cache->level && d->type == cache->type &&
+ d->size == cache->size && d->line_size == cache->line_size &&
+ d->associativity == cache->associativity) {
+ return i;
+ }
+ }
+
+ return CACHE_DESCRIPTOR_UNAVAILABLE;
+}
+
+/* CPUID Leaf 4 constants: */
+
+/* EAX: */
+#define CACHE_TYPE_D 1
+#define CACHE_TYPE_I 2
+#define CACHE_TYPE_UNIFIED 3
+
+#define CACHE_LEVEL(l) (l << 5)
+
+#define CACHE_SELF_INIT_LEVEL (1 << 8)
+
+/* EDX: */
+#define CACHE_NO_INVD_SHARING (1 << 0)
+#define CACHE_INCLUSIVE (1 << 1)
+#define CACHE_COMPLEX_IDX (1 << 2)
+
+/* Encode CacheType for CPUID[4].EAX */
+#define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
+ ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
+ ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
+ 0 /* Invalid value */)
+
+
+/* Encode cache info for CPUID[4] */
+static void encode_cache_cpuid4(CPUCacheInfo *cache,
+ int num_apic_ids, int num_cores,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ assert(cache->size == cache->line_size * cache->associativity *
+ cache->partitions * cache->sets);
+
+ assert(num_apic_ids > 0);
+ *eax = CACHE_TYPE(cache->type) |
+ CACHE_LEVEL(cache->level) |
+ (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
+ ((num_cores - 1) << 26) |
+ ((num_apic_ids - 1) << 14);
+
+ assert(cache->line_size > 0);
+ assert(cache->partitions > 0);
+ assert(cache->associativity > 0);
+ /* We don't implement fully-associative caches */
+ assert(cache->associativity < cache->sets);
+ *ebx = (cache->line_size - 1) |
+ ((cache->partitions - 1) << 12) |
+ ((cache->associativity - 1) << 22);
+
+ assert(cache->sets > 0);
+ *ecx = cache->sets - 1;
+
+ *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
+ (cache->inclusive ? CACHE_INCLUSIVE : 0) |
+ (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
+}
+
+/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
+static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
+{
+ assert(cache->size % 1024 == 0);
+ assert(cache->lines_per_tag > 0);
+ assert(cache->associativity > 0);
+ assert(cache->line_size > 0);
+ return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
+ (cache->lines_per_tag << 8) | (cache->line_size);
+}
+
+#define ASSOC_FULL 0xFF
+
+/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
+#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
+ a == 2 ? 0x2 : \
+ a == 4 ? 0x4 : \
+ a == 8 ? 0x6 : \
+ a == 16 ? 0x8 : \
+ a == 32 ? 0xA : \
+ a == 48 ? 0xB : \
+ a == 64 ? 0xC : \
+ a == 96 ? 0xD : \
+ a == 128 ? 0xE : \
+ a == ASSOC_FULL ? 0xF : \
+ 0 /* invalid value */)
+
+/*
+ * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
+ * @l3 can be NULL.
+ */
+static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
+ CPUCacheInfo *l3,
+ uint32_t *ecx, uint32_t *edx)
+{
+ assert(l2->size % 1024 == 0);
+ assert(l2->associativity > 0);
+ assert(l2->lines_per_tag > 0);
+ assert(l2->line_size > 0);
+ *ecx = ((l2->size / 1024) << 16) |
+ (AMD_ENC_ASSOC(l2->associativity) << 12) |
+ (l2->lines_per_tag << 8) | (l2->line_size);
+
+ if (l3) {
+ assert(l3->size % (512 * 1024) == 0);
+ assert(l3->associativity > 0);
+ assert(l3->lines_per_tag > 0);
+ assert(l3->line_size > 0);
+ *edx = ((l3->size / (512 * 1024)) << 18) |
+ (AMD_ENC_ASSOC(l3->associativity) << 12) |
+ (l3->lines_per_tag << 8) | (l3->line_size);
+ } else {
+ *edx = 0;
+ }
+}
+
+/* Encode cache info for CPUID[8000001D] */
+static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
+ X86CPUTopoInfo *topo_info,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ uint32_t l3_threads;
+ assert(cache->size == cache->line_size * cache->associativity *
+ cache->partitions * cache->sets);
+
+ *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
+ (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
+
+ /* L3 is shared among multiple cores */
+ if (cache->level == 3) {
+ l3_threads = topo_info->cores_per_die * topo_info->threads_per_core;
+ *eax |= (l3_threads - 1) << 14;
+ } else {
+ *eax |= ((topo_info->threads_per_core - 1) << 14);
+ }
+
+ assert(cache->line_size > 0);
+ assert(cache->partitions > 0);
+ assert(cache->associativity > 0);
+ /* We don't implement fully-associative caches */
+ assert(cache->associativity < cache->sets);
+ *ebx = (cache->line_size - 1) |
+ ((cache->partitions - 1) << 12) |
+ ((cache->associativity - 1) << 22);
+
+ assert(cache->sets > 0);
+ *ecx = cache->sets - 1;
+
+ *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
+ (cache->inclusive ? CACHE_INCLUSIVE : 0) |
+ (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
+}
+
+/* Encode cache info for CPUID[8000001E] */
+static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ X86CPUTopoIDs topo_ids;
+
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
+
+ *eax = cpu->apic_id;
+
+ /*
+ * CPUID_Fn8000001E_EBX [Core Identifiers] (CoreId)
+ * Read-only. Reset: 0000_XXXXh.
+ * See Core::X86::Cpuid::ExtApicId.
+ * Core::X86::Cpuid::CoreId_lthree[1:0]_core[3:0]_thread[1:0];
+ * Bits Description
+ * 31:16 Reserved.
+ * 15:8 ThreadsPerCore: threads per core. Read-only. Reset: XXh.
+ * The number of threads per core is ThreadsPerCore+1.
+ * 7:0 CoreId: core ID. Read-only. Reset: XXh.
+ *
+ * NOTE: CoreId is already part of apic_id. Just use it. We can
+ * use all the 8 bits to represent the core_id here.
+ */
+ *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.core_id & 0xFF);
+
+ /*
+ * CPUID_Fn8000001E_ECX [Node Identifiers] (NodeId)
+ * Read-only. Reset: 0000_0XXXh.
+ * Core::X86::Cpuid::NodeId_lthree[1:0]_core[3:0]_thread[1:0];
+ * Bits Description
+ * 31:11 Reserved.
+ * 10:8 NodesPerProcessor: Node per processor. Read-only. Reset: XXXb.
+ * ValidValues:
+ * Value Description
+ * 000b 1 node per processor.
+ * 001b 2 nodes per processor.
+ * 010b Reserved.
+ * 011b 4 nodes per processor.
+ * 111b-100b Reserved.
+ * 7:0 NodeId: Node ID. Read-only. Reset: XXh.
+ *
+ * NOTE: Hardware reserves 3 bits for number of nodes per processor.
+ * But users can create more nodes than the actual hardware can
+ * support. To genaralize we can use all the upper 8 bits for nodes.
+ * NodeId is combination of node and socket_id which is already decoded
+ * in apic_id. Just use it by shifting.
+ */
+ *ecx = ((topo_info->dies_per_pkg - 1) << 8) |
+ ((cpu->apic_id >> apicid_die_offset(topo_info)) & 0xFF);
+
+ *edx = 0;
+}
+
+/*
+ * Definitions of the hardcoded cache entries we expose:
+ * These are legacy cache values. If there is a need to change any
+ * of these values please use builtin_x86_defs
+ */
+
+/* L1 data cache: */
+static CPUCacheInfo legacy_l1d_cache = {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+};
+
+/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
+static CPUCacheInfo legacy_l1d_cache_amd = {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 2,
+ .sets = 512,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .no_invd_sharing = true,
+};
+
+/* L1 instruction cache: */
+static CPUCacheInfo legacy_l1i_cache = {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+};
+
+/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
+static CPUCacheInfo legacy_l1i_cache_amd = {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 2,
+ .sets = 512,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .no_invd_sharing = true,
+};
+
+/* Level 2 unified cache: */
+static CPUCacheInfo legacy_l2_cache = {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 4 * MiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 4096,
+ .partitions = 1,
+ .no_invd_sharing = true,
+};
+
+/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
+static CPUCacheInfo legacy_l2_cache_cpuid2 = {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 2 * MiB,
+ .line_size = 64,
+ .associativity = 8,
+};
+
+
+/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
+static CPUCacheInfo legacy_l2_cache_amd = {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .lines_per_tag = 1,
+ .associativity = 16,
+ .sets = 512,
+ .partitions = 1,
+};
+
+/* Level 3 unified cache: */
+static CPUCacheInfo legacy_l3_cache = {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 16384,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+};
+
+/* TLB definitions: */
+
+#define L1_DTLB_2M_ASSOC 1
+#define L1_DTLB_2M_ENTRIES 255
+#define L1_DTLB_4K_ASSOC 1
+#define L1_DTLB_4K_ENTRIES 255
+
+#define L1_ITLB_2M_ASSOC 1
+#define L1_ITLB_2M_ENTRIES 255
+#define L1_ITLB_4K_ASSOC 1
+#define L1_ITLB_4K_ENTRIES 255
+
+#define L2_DTLB_2M_ASSOC 0 /* disabled */
+#define L2_DTLB_2M_ENTRIES 0 /* disabled */
+#define L2_DTLB_4K_ASSOC 4
+#define L2_DTLB_4K_ENTRIES 512
+
+#define L2_ITLB_2M_ASSOC 0 /* disabled */
+#define L2_ITLB_2M_ENTRIES 0 /* disabled */
+#define L2_ITLB_4K_ASSOC 4
+#define L2_ITLB_4K_ENTRIES 512
+
+/* CPUID Leaf 0x14 constants: */
+#define INTEL_PT_MAX_SUBLEAF 0x1
+/*
+ * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
+ * MSR can be accessed;
+ * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
+ * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
+ * of Intel PT MSRs across warm reset;
+ * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
+ */
+#define INTEL_PT_MINIMAL_EBX 0xf
+/*
+ * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
+ * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
+ * accessed;
+ * bit[01]: ToPA tables can hold any number of output entries, up to the
+ * maximum allowed by the MaskOrTableOffset field of
+ * IA32_RTIT_OUTPUT_MASK_PTRS;
+ * bit[02]: Support Single-Range Output scheme;
+ */
+#define INTEL_PT_MINIMAL_ECX 0x7
+/* generated packets which contain IP payloads have LIP values */
+#define INTEL_PT_IP_LIP (1 << 31)
+#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
+#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
+#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
+#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
+#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
+
+void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
+ uint32_t vendor2, uint32_t vendor3)
+{
+ int i;
+ for (i = 0; i < 4; i++) {
+ dst[i] = vendor1 >> (8 * i);
+ dst[i + 4] = vendor2 >> (8 * i);
+ dst[i + 8] = vendor3 >> (8 * i);
+ }
+ dst[CPUID_VENDOR_SZ] = '\0';
+}
+
+#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
+#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
+ CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
+#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
+ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
+ CPUID_PSE36 | CPUID_FXSR)
+#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
+#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
+ CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
+ CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
+ CPUID_PAE | CPUID_SEP | CPUID_APIC)
+
+#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
+ CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
+ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
+ CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
+ /* partly implemented:
+ CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
+ /* missing:
+ CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
+#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
+ CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
+ CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
+ CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
+ CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
+ CPUID_EXT_RDRAND)
+ /* missing:
+ CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
+ CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
+ CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
+ CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
+ CPUID_EXT_F16C */
+
+#ifdef TARGET_X86_64
+#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
+#else
+#define TCG_EXT2_X86_64_FEATURES 0
+#endif
+
+#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
+ CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
+ CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
+ TCG_EXT2_X86_64_FEATURES)
+#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
+#define TCG_EXT4_FEATURES 0
+#define TCG_SVM_FEATURES (CPUID_SVM_NPT | CPUID_SVM_VGIF | \
+ CPUID_SVM_SVME_ADDR_CHK)
+#define TCG_KVM_FEATURES 0
+#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
+ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
+ CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
+ CPUID_7_0_EBX_ERMS)
+ /* missing:
+ CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
+ CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
+ CPUID_7_0_EBX_RDSEED */
+#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
+ /* CPUID_7_0_ECX_OSPKE is dynamic */ \
+ CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS)
+#define TCG_7_0_EDX_FEATURES 0
+#define TCG_7_1_EAX_FEATURES 0
+#define TCG_APM_FEATURES 0
+#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
+#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
+ /* missing:
+ CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
+#define TCG_14_0_ECX_FEATURES 0
+#define TCG_SGX_12_0_EAX_FEATURES 0
+#define TCG_SGX_12_0_EBX_FEATURES 0
+#define TCG_SGX_12_1_EAX_FEATURES 0
+
+FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
+ [FEAT_1_EDX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "fpu", "vme", "de", "pse",
+ "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep",
+ "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
+ NULL, "ds" /* Intel dts */, "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss",
+ "ht" /* Intel htt */, "tm", "ia64", "pbe",
+ },
+ .cpuid = {.eax = 1, .reg = R_EDX, },
+ .tcg_features = TCG_FEATURES,
+ },
+ [FEAT_1_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
+ "ds-cpl", "vmx", "smx", "est",
+ "tm2", "ssse3", "cid", NULL,
+ "fma", "cx16", "xtpr", "pdcm",
+ NULL, "pcid", "dca", "sse4.1",
+ "sse4.2", "x2apic", "movbe", "popcnt",
+ "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
+ "avx", "f16c", "rdrand", "hypervisor",
+ },
+ .cpuid = { .eax = 1, .reg = R_ECX, },
+ .tcg_features = TCG_EXT_FEATURES,
+ },
+ /* Feature names that are already defined on feature_name[] but
+ * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
+ * names on feat_names below. They are copied automatically
+ * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
+ */
+ [FEAT_8000_0001_EDX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+ NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+ NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
+ NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+ NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+ "nx", NULL, "mmxext", NULL /* mmx */,
+ NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
+ NULL, "lm", "3dnowext", "3dnow",
+ },
+ .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
+ .tcg_features = TCG_EXT2_FEATURES,
+ },
+ [FEAT_8000_0001_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "lahf-lm", "cmp-legacy", "svm", "extapic",
+ "cr8legacy", "abm", "sse4a", "misalignsse",
+ "3dnowprefetch", "osvw", "ibs", "xop",
+ "skinit", "wdt", NULL, "lwp",
+ "fma4", "tce", NULL, "nodeid-msr",
+ NULL, "tbm", "topoext", "perfctr-core",
+ "perfctr-nb", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
+ .tcg_features = TCG_EXT3_FEATURES,
+ /*
+ * TOPOEXT is always allowed but can't be enabled blindly by
+ * "-cpu host", as it requires consistent cache topology info
+ * to be provided so it doesn't confuse guests.
+ */
+ .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
+ },
+ [FEAT_C000_0001_EDX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, "xstore", "xstore-en",
+ NULL, NULL, "xcrypt", "xcrypt-en",
+ "ace2", "ace2-en", "phe", "phe-en",
+ "pmm", "pmm-en", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
+ .tcg_features = TCG_EXT4_FEATURES,
+ },
+ [FEAT_KVM] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
+ "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
+ NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
+ "kvm-poll-control", "kvm-pv-sched-yield", "kvm-asyncpf-int", "kvm-msi-ext-dest-id",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "kvmclock-stable-bit", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
+ .tcg_features = TCG_KVM_FEATURES,
+ },
+ [FEAT_KVM_HINTS] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "kvm-hint-dedicated", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
+ .tcg_features = TCG_KVM_FEATURES,
+ /*
+ * KVM hints aren't auto-enabled by -cpu host, they need to be
+ * explicitly enabled in the command-line.
+ */
+ .no_autoenable_flags = ~0U,
+ },
+ [FEAT_SVM] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "npt", "lbrv", "svm-lock", "nrip-save",
+ "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
+ NULL, NULL, "pause-filter", NULL,
+ "pfthreshold", "avic", NULL, "v-vmsave-vmload",
+ "vgif", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "svme-addr-chk", NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
+ .tcg_features = TCG_SVM_FEATURES,
+ },
+ [FEAT_7_0_EBX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "fsgsbase", "tsc-adjust", "sgx", "bmi1",
+ "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm",
+ NULL, NULL, "mpx", NULL,
+ "avx512f", "avx512dq", "rdseed", "adx",
+ "smap", "avx512ifma", "pcommit", "clflushopt",
+ "clwb", "intel-pt", "avx512pf", "avx512er",
+ "avx512cd", "sha-ni", "avx512bw", "avx512vl",
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EBX,
+ },
+ .tcg_features = TCG_7_0_EBX_FEATURES,
+ },
+ [FEAT_7_0_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, "avx512vbmi", "umip", "pku",
+ NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
+ "gfni", "vaes", "vpclmulqdq", "avx512vnni",
+ "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
+ "la57", NULL, NULL, NULL,
+ NULL, NULL, "rdpid", NULL,
+ "bus-lock-detect", "cldemote", NULL, "movdiri",
+ "movdir64b", NULL, "sgxlc", "pks",
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_ECX,
+ },
+ .tcg_features = TCG_7_0_ECX_FEATURES,
+ },
+ [FEAT_7_0_EDX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
+ "fsrm", NULL, NULL, NULL,
+ "avx512-vp2intersect", NULL, "md-clear", NULL,
+ NULL, NULL, "serialize", NULL,
+ "tsx-ldtrk", NULL, NULL /* pconfig */, NULL,
+ NULL, NULL, NULL, "avx512-fp16",
+ NULL, NULL, "spec-ctrl", "stibp",
+ NULL, "arch-capabilities", "core-capability", "ssbd",
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EDX,
+ },
+ .tcg_features = TCG_7_0_EDX_FEATURES,
+ },
+ [FEAT_7_1_EAX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ "avx-vnni", "avx512-bf16", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_EAX,
+ },
+ .tcg_features = TCG_7_1_EAX_FEATURES,
+ },
+ [FEAT_8000_0007_EDX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "invtsc", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
+ .tcg_features = TCG_APM_FEATURES,
+ .unmigratable_flags = CPUID_APM_INVTSC,
+ },
+ [FEAT_8000_0008_EBX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "clzero", NULL, "xsaveerptr", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, "wbnoinvd", NULL, NULL,
+ "ibpb", NULL, "ibrs", "amd-stibp",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
+ .tcg_features = 0,
+ .unmigratable_flags = 0,
+ },
+ [FEAT_XSAVE] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "xsaveopt", "xsavec", "xgetbv1", "xsaves",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0xd,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_EAX,
+ },
+ .tcg_features = TCG_XSAVE_FEATURES,
+ },
+ [FEAT_6_EAX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, "arat", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 6, .reg = R_EAX, },
+ .tcg_features = TCG_6_EAX_FEATURES,
+ },
+ [FEAT_XSAVE_COMP_LO] = {
+ .type = CPUID_FEATURE_WORD,
+ .cpuid = {
+ .eax = 0xD,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EAX,
+ },
+ .tcg_features = ~0U,
+ .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
+ XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
+ XSTATE_PKRU_MASK,
+ },
+ [FEAT_XSAVE_COMP_HI] = {
+ .type = CPUID_FEATURE_WORD,
+ .cpuid = {
+ .eax = 0xD,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EDX,
+ },
+ .tcg_features = ~0U,
+ },
+ /*Below are MSR exposed features*/
+ [FEAT_ARCH_CAPABILITIES] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
+ "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
+ "taa-no", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_ARCH_CAPABILITIES,
+ },
+ },
+ [FEAT_CORE_CAPABILITY] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, "split-lock-detect", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_CORE_CAPABILITY,
+ },
+ },
+ [FEAT_PERF_CAPABILITIES] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, "full-width-write", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_PERF_CAPABILITIES,
+ },
+ },
+
+ [FEAT_VMX_PROCBASED_CTLS] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
+ NULL, NULL, NULL, "vmx-hlt-exit",
+ NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
+ "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
+ "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
+ "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
+ "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
+ "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ }
+ },
+
+ [FEAT_VMX_SECONDARY_CTLS] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
+ "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
+ "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
+ "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
+ "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
+ "vmx-xsaves", NULL, NULL, NULL,
+ NULL, "vmx-tsc-scaling", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_PROCBASED_CTLS2,
+ }
+ },
+
+ [FEAT_VMX_PINBASED_CTLS] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
+ NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ }
+ },
+
+ [FEAT_VMX_EXIT_CTLS] = {
+ .type = MSR_FEATURE_WORD,
+ /*
+ * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
+ * the LM CPUID bit.
+ */
+ .feat_names = {
+ NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
+ "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
+ NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
+ "vmx-exit-save-efer", "vmx-exit-load-efer",
+ "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
+ NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
+ NULL, "vmx-exit-load-pkrs", NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
+ }
+ },
+
+ [FEAT_VMX_ENTRY_CTLS] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, "vmx-entry-noload-debugctl", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, "vmx-entry-ia32e-mode", NULL, NULL,
+ NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
+ "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
+ NULL, NULL, "vmx-entry-load-pkrs", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+ }
+ },
+
+ [FEAT_VMX_MISC] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
+ "vmx-activity-wait-sipi", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_MISC,
+ }
+ },
+
+ [FEAT_VMX_EPT_VPID_CAPS] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ "vmx-ept-execonly", NULL, NULL, NULL,
+ NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
+ "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
+ NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
+ NULL, NULL, NULL, NULL,
+ "vmx-invvpid", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "vmx-invvpid-single-addr", "vmx-invept-single-context",
+ "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_EPT_VPID_CAP,
+ }
+ },
+
+ [FEAT_VMX_BASIC] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ [54] = "vmx-ins-outs",
+ [55] = "vmx-true-ctls",
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_BASIC,
+ },
+ /* Just to be safe - we don't support setting the MSEG version field. */
+ .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
+ },
+
+ [FEAT_VMX_VMFUNC] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ [0] = "vmx-eptp-switching",
+ },
+ .msr = {
+ .index = MSR_IA32_VMX_VMFUNC,
+ }
+ },
+
+ [FEAT_14_0_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "intel-pt-lip",
+ },
+ .cpuid = {
+ .eax = 0x14,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_ECX,
+ },
+ .tcg_features = TCG_14_0_ECX_FEATURES,
+ },
+
+ [FEAT_SGX_12_0_EAX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "sgx1", "sgx2", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0x12,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EAX,
+ },
+ .tcg_features = TCG_SGX_12_0_EAX_FEATURES,
+ },
+
+ [FEAT_SGX_12_0_EBX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "sgx-exinfo" , NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0x12,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EBX,
+ },
+ .tcg_features = TCG_SGX_12_0_EBX_FEATURES,
+ },
+
+ [FEAT_SGX_12_1_EAX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, "sgx-debug", "sgx-mode64", NULL,
+ "sgx-provisionkey", "sgx-tokenkey", NULL, "sgx-kss",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0x12,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_EAX,
+ },
+ .tcg_features = TCG_SGX_12_1_EAX_FEATURES,
+ },
+};
+
+typedef struct FeatureMask {
+ FeatureWord index;
+ uint64_t mask;
+} FeatureMask;
+
+typedef struct FeatureDep {
+ FeatureMask from, to;
+} FeatureDep;
+
+static FeatureDep feature_dependencies[] = {
+ {
+ .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
+ .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
+ },
+ {
+ .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
+ .to = { FEAT_CORE_CAPABILITY, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_PDCM },
+ .to = { FEAT_PERF_CAPABILITIES, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_VMX },
+ .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_VMX },
+ .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_VMX },
+ .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_VMX },
+ .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_VMX },
+ .to = { FEAT_VMX_MISC, ~0ull },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_VMX },
+ .to = { FEAT_VMX_BASIC, ~0ull },
+ },
+ {
+ .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
+ .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
+ },
+ {
+ .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
+ .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
+ },
+ {
+ .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
+ },
+ {
+ .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
+ },
+ {
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
+ },
+ {
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
+ },
+ {
+ .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT },
+ .to = { FEAT_14_0_ECX, ~0ull },
+ },
+ {
+ .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
+ },
+ {
+ .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
+ .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
+ },
+ {
+ .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
+ },
+ {
+ .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
+ .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
+ },
+ {
+ .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
+ .to = { FEAT_VMX_VMFUNC, ~0ull },
+ },
+ {
+ .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM },
+ .to = { FEAT_SVM, ~0ull },
+ },
+};
+
+typedef struct X86RegisterInfo32 {
+ /* Name of register */
+ const char *name;
+ /* QAPI enum value register */
+ X86CPURegister32 qapi_enum;
+} X86RegisterInfo32;
+
+#define REGISTER(reg) \
+ [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
+static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
+ REGISTER(EAX),
+ REGISTER(ECX),
+ REGISTER(EDX),
+ REGISTER(EBX),
+ REGISTER(ESP),
+ REGISTER(EBP),
+ REGISTER(ESI),
+ REGISTER(EDI),
+};
+#undef REGISTER
+
+ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
+ [XSTATE_FP_BIT] = {
+ /* x87 FP state component is always enabled if XSAVE is supported */
+ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
+ .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+ },
+ [XSTATE_SSE_BIT] = {
+ /* SSE state component is always enabled if XSAVE is supported */
+ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
+ .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+ },
+ [XSTATE_YMM_BIT] =
+ { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
+ .size = sizeof(XSaveAVX) },
+ [XSTATE_BNDREGS_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+ .size = sizeof(XSaveBNDREG) },
+ [XSTATE_BNDCSR_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+ .size = sizeof(XSaveBNDCSR) },
+ [XSTATE_OPMASK_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .size = sizeof(XSaveOpmask) },
+ [XSTATE_ZMM_Hi256_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .size = sizeof(XSaveZMM_Hi256) },
+ [XSTATE_Hi16_ZMM_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .size = sizeof(XSaveHi16_ZMM) },
+ [XSTATE_PKRU_BIT] =
+ { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
+ .size = sizeof(XSavePKRU) },
+};
+
+static uint32_t xsave_area_size(uint64_t mask)
+{
+ int i;
+ uint64_t ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((mask >> i) & 1) {
+ ret = MAX(ret, esa->offset + esa->size);
+ }
+ }
+ return ret;
+}
+
+static inline bool accel_uses_host_cpuid(void)
+{
+ return kvm_enabled() || hvf_enabled();
+}
+
+static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
+{
+ return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
+ cpu->env.features[FEAT_XSAVE_COMP_LO];
+}
+
+/* Return name of 32-bit register, from a R_* constant */
+static const char *get_register_name_32(unsigned int reg)
+{
+ if (reg >= CPU_NB_REGS32) {
+ return NULL;
+ }
+ return x86_reg_info_32[reg].name;
+}
+
+/*
+ * Returns the set of feature flags that are supported and migratable by
+ * QEMU, for a given FeatureWord.
+ */
+static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
+{
+ FeatureWordInfo *wi = &feature_word_info[w];
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ uint64_t f = 1ULL << i;
+
+ /* If the feature name is known, it is implicitly considered migratable,
+ * unless it is explicitly set in unmigratable_flags */
+ if ((wi->migratable_flags & f) ||
+ (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
+ r |= f;
+ }
+ }
+ return r;
+}
+
+void host_cpuid(uint32_t function, uint32_t count,
+ uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+{
+ uint32_t vec[4];
+
+#ifdef __x86_64__
+ asm volatile("cpuid"
+ : "=a"(vec[0]), "=b"(vec[1]),
+ "=c"(vec[2]), "=d"(vec[3])
+ : "0"(function), "c"(count) : "cc");
+#elif defined(__i386__)
+ asm volatile("pusha \n\t"
+ "cpuid \n\t"
+ "mov %%eax, 0(%2) \n\t"
+ "mov %%ebx, 4(%2) \n\t"
+ "mov %%ecx, 8(%2) \n\t"
+ "mov %%edx, 12(%2) \n\t"
+ "popa"
+ : : "a"(function), "c"(count), "S"(vec)
+ : "memory", "cc");
+#else
+ abort();
+#endif
+
+ if (eax)
+ *eax = vec[0];
+ if (ebx)
+ *ebx = vec[1];
+ if (ecx)
+ *ecx = vec[2];
+ if (edx)
+ *edx = vec[3];
+}
+
+/* CPU class name definitions: */
+
+/* Return type name for a given CPU model name
+ * Caller is responsible for freeing the returned string.
+ */
+static char *x86_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
+}
+
+static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
+{
+ g_autofree char *typename = x86_cpu_type_name(cpu_model);
+ return object_class_by_name(typename);
+}
+
+static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
+{
+ const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
+ assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
+ return g_strndup(class_name,
+ strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
+}
+
+typedef struct X86CPUVersionDefinition {
+ X86CPUVersion version;
+ const char *alias;
+ const char *note;
+ PropValue *props;
+} X86CPUVersionDefinition;
+
+/* Base definition for a CPU model */
+typedef struct X86CPUDefinition {
+ const char *name;
+ uint32_t level;
+ uint32_t xlevel;
+ /* vendor is zero-terminated, 12 character ASCII string */
+ char vendor[CPUID_VENDOR_SZ + 1];
+ int family;
+ int model;
+ int stepping;
+ FeatureWordArray features;
+ const char *model_id;
+ const CPUCaches *const cache_info;
+ /*
+ * Definitions for alternative versions of CPU model.
+ * List is terminated by item with version == 0.
+ * If NULL, version 1 will be registered automatically.
+ */
+ const X86CPUVersionDefinition *versions;
+ const char *deprecation_note;
+} X86CPUDefinition;
+
+/* Reference to a specific CPU model version */
+struct X86CPUModel {
+ /* Base CPU definition */
+ const X86CPUDefinition *cpudef;
+ /* CPU model version */
+ X86CPUVersion version;
+ const char *note;
+ /*
+ * If true, this is an alias CPU model.
+ * This matters only for "-cpu help" and query-cpu-definitions
+ */
+ bool is_alias;
+};
+
+/* Get full model name for CPU version */
+static char *x86_cpu_versioned_model_name(const X86CPUDefinition *cpudef,
+ X86CPUVersion version)
+{
+ assert(version > 0);
+ return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
+}
+
+static const X86CPUVersionDefinition *
+x86_cpu_def_get_versions(const X86CPUDefinition *def)
+{
+ /* When X86CPUDefinition::versions is NULL, we register only v1 */
+ static const X86CPUVersionDefinition default_version_list[] = {
+ { 1 },
+ { /* end of list */ }
+ };
+
+ return def->versions ?: default_version_list;
+}
+
+static const CPUCaches epyc_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = 1,
+ .no_invd_sharing = true,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .line_size = 64,
+ .associativity = 4,
+ .partitions = 1,
+ .sets = 256,
+ .lines_per_tag = 1,
+ .self_init = 1,
+ .no_invd_sharing = true,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 8 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 8192,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+ },
+};
+
+static const CPUCaches epyc_rome_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = 1,
+ .no_invd_sharing = true,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = 1,
+ .no_invd_sharing = true,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 16384,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+ },
+};
+
+static const CPUCaches epyc_milan_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = 1,
+ .no_invd_sharing = true,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = 1,
+ .no_invd_sharing = true,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+ },
+};
+
+/* The following VMX features are not supported by KVM and are left out in the
+ * CPU definitions:
+ *
+ * Dual-monitor support (all processors)
+ * Entry to SMM
+ * Deactivate dual-monitor treatment
+ * Number of CR3-target values
+ * Shutdown activity state
+ * Wait-for-SIPI activity state
+ * PAUSE-loop exiting (Westmere and newer)
+ * EPT-violation #VE (Broadwell and newer)
+ * Inject event with insn length=0 (Skylake and newer)
+ * Conceal non-root operation from PT
+ * Conceal VM exits from PT
+ * Conceal VM entries from PT
+ * Enable ENCLS exiting
+ * Mode-based execute control (XS/XU)
+ s TSC scaling (Skylake Server and newer)
+ * GPA translation for PT (IceLake and newer)
+ * User wait and pause
+ * ENCLV exiting
+ * Load IA32_RTIT_CTL
+ * Clear IA32_RTIT_CTL
+ * Advanced VM-exit information for EPT violations
+ * Sub-page write permissions
+ * PT in VMX operation
+ */
+
+static const X86CPUDefinition builtin_x86_defs[] = {
+ {
+ .name = "qemu64",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 15,
+ .model = 107,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_CX16,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
+ .xlevel = 0x8000000A,
+ .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+ },
+ {
+ .name = "phenom",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 16,
+ .model = 2,
+ .stepping = 3,
+ /* Missing: CPUID_HT */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36 | CPUID_VME,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
+ CPUID_EXT_POPCNT,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
+ CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
+ /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
+ CPUID_EXT3_CR8LEG,
+ CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
+ CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
+ CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
+ /* Missing: CPUID_SVM_LBRV */
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT,
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
+ },
+ {
+ .name = "core2duo",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 15,
+ .stepping = 11,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
+ /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
+ * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
+ CPUID_EXT_CX16,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
+ .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
+ .xlevel = 0x80000008,
+ .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
+ },
+ {
+ .name = "kvm64",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ /* Missing: CPUID_HT */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_VME |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36,
+ /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_CX16,
+ /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
+ CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
+ CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
+ CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
+ .features[FEAT_8000_0001_ECX] =
+ 0,
+ /* VMX features from Cedar Mill/Prescott */
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
+ .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
+ .xlevel = 0x80000008,
+ .model_id = "Common KVM processor"
+ },
+ {
+ .name = "qemu32",
+ .level = 4,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 6,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3,
+ .xlevel = 0x80000004,
+ .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+ },
+ {
+ .name = "kvm32",
+ .level = 5,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_VME |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_ECX] =
+ 0,
+ /* VMX features from Yonah */
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
+ .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
+ VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
+ VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
+ .xlevel = 0x80000008,
+ .model_id = "Common 32-bit KVM processor"
+ },
+ {
+ .name = "coreduo",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 14,
+ .stepping = 8,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_VME |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
+ CPUID_SS,
+ /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
+ * CPUID_EXT_PDCM, CPUID_EXT_VMX */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_NX,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
+ .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
+ VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
+ VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
+ .xlevel = 0x80000008,
+ .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
+ },
+ {
+ .name = "486",
+ .level = 1,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 4,
+ .model = 8,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ I486_FEATURES,
+ .xlevel = 0,
+ .model_id = "",
+ },
+ {
+ .name = "pentium",
+ .level = 1,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 5,
+ .model = 4,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PENTIUM_FEATURES,
+ .xlevel = 0,
+ .model_id = "",
+ },
+ {
+ .name = "pentium2",
+ .level = 2,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 5,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ PENTIUM2_FEATURES,
+ .xlevel = 0,
+ .model_id = "",
+ },
+ {
+ .name = "pentium3",
+ .level = 3,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 7,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PENTIUM3_FEATURES,
+ .xlevel = 0,
+ .model_id = "",
+ },
+ {
+ .name = "athlon",
+ .level = 2,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
+ CPUID_MCA,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
+ .xlevel = 0x80000008,
+ .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+ },
+ {
+ .name = "n270",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 28,
+ .stepping = 2,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
+ CPUID_ACPI | CPUID_SS,
+ /* Some CPUs got no CPUID_SEP */
+ /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
+ * CPUID_EXT_XTPR */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MOVBE,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
+ },
+ {
+ .name = "Conroe",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 15,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
+ .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
+ },
+ {
+ .name = "Penryn",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 23,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+ .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
+ },
+ {
+ .name = "Nehalem",
+ .level = 11,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 26,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "Nehalem-IBRS",
+ .props = (PropValue[]) {
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Westmere",
+ .level = 11,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 44,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
+ .xlevel = 0x80000008,
+ .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "Westmere-IBRS",
+ .props = (PropValue[]) {
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Westmere E56xx/L56xx/X56xx (IBRS update)" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "SandyBridge",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 42,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon E312xx (Sandy Bridge)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "SandyBridge-IBRS",
+ .props = (PropValue[]) {
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "IvyBridge",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 58,
+ .stepping = 9,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_ERMS,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "IvyBridge-IBRS",
+ .props = (PropValue[]) {
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Haswell",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 60,
+ .stepping = 4,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Haswell)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "Haswell-noTSX",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { "stepping", "1" },
+ { "model-id", "Intel Core Processor (Haswell, no TSX)", },
+ { /* end of list */ }
+ },
+ },
+ {
+ .version = 3,
+ .alias = "Haswell-IBRS",
+ .props = (PropValue[]) {
+ /* Restore TSX features removed by -v2 above */
+ { "hle", "on" },
+ { "rtm", "on" },
+ /*
+ * Haswell and Haswell-IBRS had stepping=4 in
+ * QEMU 4.0 and older
+ */
+ { "stepping", "4" },
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Core Processor (Haswell, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 4,
+ .alias = "Haswell-noTSX-IBRS",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ /* spec-ctrl was already enabled by -v3 above */
+ { "stepping", "1" },
+ { "model-id",
+ "Intel Core Processor (Haswell, no TSX, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Broadwell",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 61,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Broadwell)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "Broadwell-noTSX",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
+ { /* end of list */ }
+ },
+ },
+ {
+ .version = 3,
+ .alias = "Broadwell-IBRS",
+ .props = (PropValue[]) {
+ /* Restore TSX features removed by -v2 above */
+ { "hle", "on" },
+ { "rtm", "on" },
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Core Processor (Broadwell, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 4,
+ .alias = "Broadwell-noTSX-IBRS",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ /* spec-ctrl was already enabled by -v3 above */
+ { "model-id",
+ "Intel Core Processor (Broadwell, no TSX, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Skylake-Client",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 94,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP,
+ /* XSAVES is added in version 4 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Skylake)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "Skylake-Client-IBRS",
+ .props = (PropValue[]) {
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Core Processor (Skylake, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 3,
+ .alias = "Skylake-Client-noTSX-IBRS",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { "model-id",
+ "Intel Core Processor (Skylake, IBRS, no TSX)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 4,
+ .note = "IBRS, XSAVES, no TSX",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Skylake-Server",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 85,
+ .stepping = 4,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_PKU,
+ /* XSAVES is added in version 5 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Processor (Skylake)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "Skylake-Server-IBRS",
+ .props = (PropValue[]) {
+ /* clflushopt was not added to Skylake-Server-IBRS */
+ /* TODO: add -v3 including clflushopt */
+ { "clflushopt", "off" },
+ { "spec-ctrl", "on" },
+ { "model-id",
+ "Intel Xeon Processor (Skylake, IBRS)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 3,
+ .alias = "Skylake-Server-noTSX-IBRS",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { "model-id",
+ "Intel Xeon Processor (Skylake, IBRS, no TSX)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 4,
+ .props = (PropValue[]) {
+ { "vmx-eptp-switching", "on" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 5,
+ .note = "IBRS, XSAVES, EPT switching, no TSX",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Cascadelake-Server",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 85,
+ .stepping = 6,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512VNNI,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* XSAVES is added in version 5 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Processor (Cascadelake)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ { .version = 2,
+ .note = "ARCH_CAPABILITIES",
+ .props = (PropValue[]) {
+ { "arch-capabilities", "on" },
+ { "rdctl-no", "on" },
+ { "ibrs-all", "on" },
+ { "skip-l1dfl-vmentry", "on" },
+ { "mds-no", "on" },
+ { /* end of list */ }
+ },
+ },
+ { .version = 3,
+ .alias = "Cascadelake-Server-noTSX",
+ .note = "ARCH_CAPABILITIES, no TSX",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { /* end of list */ }
+ },
+ },
+ { .version = 4,
+ .note = "ARCH_CAPABILITIES, no TSX",
+ .props = (PropValue[]) {
+ { "vmx-eptp-switching", "on" },
+ { /* end of list */ }
+ },
+ },
+ { .version = 5,
+ .note = "ARCH_CAPABILITIES, EPT switching, XSAVES, no TSX",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ }
+ },
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Cooperlake",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 85,
+ .stepping = 10,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512VNNI,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
+ .features[FEAT_ARCH_CAPABILITIES] =
+ MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
+ MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
+ MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_AVX512_BF16,
+ /* XSAVES is added in version 2 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Processor (Cooperlake)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ { .version = 2,
+ .note = "XSAVES",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ }
+ },
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Icelake-Client",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 126,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_WBNOINVD,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* XSAVES is added in version 3 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Icelake)",
+ .versions = (X86CPUVersionDefinition[]) {
+ {
+ .version = 1,
+ .note = "deprecated"
+ },
+ {
+ .version = 2,
+ .note = "no TSX, deprecated",
+ .alias = "Icelake-Client-noTSX",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { /* end of list */ }
+ },
+ },
+ {
+ .version = 3,
+ .note = "no TSX, XSAVES, deprecated",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ }
+ },
+ },
+ { /* end of list */ }
+ },
+ .deprecation_note = "use Icelake-Server instead"
+ },
+ {
+ .name = "Icelake-Server",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 134,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_WBNOINVD,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* XSAVES is added in version 5 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Processor (Icelake)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .note = "no TSX",
+ .alias = "Icelake-Server-noTSX",
+ .props = (PropValue[]) {
+ { "hle", "off" },
+ { "rtm", "off" },
+ { /* end of list */ }
+ },
+ },
+ {
+ .version = 3,
+ .props = (PropValue[]) {
+ { "arch-capabilities", "on" },
+ { "rdctl-no", "on" },
+ { "ibrs-all", "on" },
+ { "skip-l1dfl-vmentry", "on" },
+ { "mds-no", "on" },
+ { "pschange-mc-no", "on" },
+ { "taa-no", "on" },
+ { /* end of list */ }
+ },
+ },
+ {
+ .version = 4,
+ .props = (PropValue[]) {
+ { "sha-ni", "on" },
+ { "avx512ifma", "on" },
+ { "rdpid", "on" },
+ { "fsrm", "on" },
+ { "vmx-rdseed-exit", "on" },
+ { "vmx-pml", "on" },
+ { "vmx-eptp-switching", "on" },
+ { "model", "106" },
+ { /* end of list */ }
+ },
+ },
+ {
+ .version = 5,
+ .note = "XSAVES",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ }
+ },
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Denverton",
+ .level = 21,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 95,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
+ CPUID_SSE | CPUID_SSE2,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
+ CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
+ CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* XSAVES is added in version 3 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_ARCH_CAPABILITIES] =
+ MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Atom Processor (Denverton)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .note = "no MPX, no MONITOR",
+ .props = (PropValue[]) {
+ { "monitor", "off" },
+ { "mpx", "off" },
+ { /* end of list */ },
+ },
+ },
+ {
+ .version = 3,
+ .note = "XSAVES, no MPX, no MONITOR",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ },
+ },
+ },
+ { /* end of list */ },
+ },
+ },
+ {
+ .name = "Snowridge",
+ .level = 27,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 134,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ /* missing: CPUID_PN CPUID_IA64 */
+ /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
+ CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
+ CPUID_CX8 | CPUID_APIC | CPUID_SEP |
+ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
+ CPUID_MMX |
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
+ CPUID_EXT_SSSE3 |
+ CPUID_EXT_CX16 |
+ CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_NX |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_LM,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE |
+ CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
+ CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_SHA_NI,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_UMIP |
+ /* missing bit 5 */
+ CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
+ CPUID_7_0_EDX_CORE_CAPABILITY,
+ .features[FEAT_CORE_CAPABILITY] =
+ MSR_CORE_CAP_SPLIT_LOCK_DETECT,
+ /* XSAVES is is added in version 3 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
+ MSR_VMX_BASIC_TRUE_CTLS,
+ .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
+ .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
+ MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
+ VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
+ .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
+ VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
+ VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
+ VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
+ VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
+ VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Atom Processor (SnowRidge)",
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "mpx", "off" },
+ { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
+ { /* end of list */ },
+ },
+ },
+ {
+ .version = 3,
+ .note = "XSAVES, no MPX",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { "vmx-xsaves", "on" },
+ { /* end of list */ },
+ },
+ },
+ {
+ .version = 4,
+ .note = "no split lock detect, no core-capability",
+ .props = (PropValue[]) {
+ { "split-lock-detect", "off" },
+ { "core-capability", "off" },
+ { /* end of list */ },
+ },
+ },
+ { /* end of list */ },
+ },
+ },
+ {
+ .name = "KnightsMill",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 133,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
+ CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
+ CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
+ CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
+ CPUID_PSE | CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
+ CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
+ CPUID_7_0_EBX_AVX512ER,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Phi Processor (Knights Mill)",
+ },
+ {
+ .name = "Opteron_G1",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G2",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_CX16 | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G3",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 16,
+ .model = 2,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_RDTSCP,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
+ CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G4",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 21,
+ .model = 1,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
+ /* no xsaveopt! */
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 62xx class CPU",
+ },
+ {
+ .name = "Opteron_G5",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 21,
+ .model = 2,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+ CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
+ /* no xsaveopt! */
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 63xx class CPU",
+ },
+ {
+ .name = "EPYC",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 23,
+ .model = 1,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_SHA_NI,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
+ .xlevel = 0x8000001E,
+ .model_id = "AMD EPYC Processor",
+ .cache_info = &epyc_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .alias = "EPYC-IBPB",
+ .props = (PropValue[]) {
+ { "ibpb", "on" },
+ { "model-id",
+ "AMD EPYC Processor (with IBPB)" },
+ { /* end of list */ }
+ }
+ },
+ {
+ .version = 3,
+ .props = (PropValue[]) {
+ { "ibpb", "on" },
+ { "perfctr-core", "on" },
+ { "clzero", "on" },
+ { "xsaveerptr", "on" },
+ { "xsaves", "on" },
+ { "model-id",
+ "AMD EPYC Processor" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "Dhyana",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_HYGON,
+ .family = 24,
+ .model = 0,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_IBPB,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
+ /* XSAVES is added in version 2 */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
+ .xlevel = 0x8000001E,
+ .model_id = "Hygon Dhyana Processor",
+ .cache_info = &epyc_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ { .version = 2,
+ .note = "XSAVES",
+ .props = (PropValue[]) {
+ { "xsaves", "on" },
+ { /* end of list */ }
+ },
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "EPYC-Rome",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 23,
+ .model = 49,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
+ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
+ CPUID_8000_0008_EBX_STIBP,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
+ .xlevel = 0x8000001E,
+ .model_id = "AMD EPYC-Rome Processor",
+ .cache_info = &epyc_rome_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "ibrs", "on" },
+ { "amd-ssbd", "on" },
+ { /* end of list */ }
+ }
+ },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "EPYC-Milan",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 25,
+ .model = 1,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_PCID,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
+ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
+ CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP |
+ CPUID_8000_0008_EBX_AMD_SSBD,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_INVPCID,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_PKU,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE | CPUID_SVM_SVME_ADDR_CHK,
+ .xlevel = 0x8000001E,
+ .model_id = "AMD EPYC-Milan Processor",
+ .cache_info = &epyc_milan_cache_info,
+ },
+};
+
+/*
+ * We resolve CPU model aliases using -v1 when using "-machine
+ * none", but this is just for compatibility while libvirt isn't
+ * adapted to resolve CPU model versions before creating VMs.
+ * See "Runnability guarantee of CPU models" at
+ * docs/about/deprecated.rst.
+ */
+X86CPUVersion default_cpu_version = 1;
+
+void x86_cpu_set_default_version(X86CPUVersion version)
+{
+ /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
+ assert(version != CPU_VERSION_AUTO);
+ default_cpu_version = version;
+}
+
+static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
+{
+ int v = 0;
+ const X86CPUVersionDefinition *vdef =
+ x86_cpu_def_get_versions(model->cpudef);
+ while (vdef->version) {
+ v = vdef->version;
+ vdef++;
+ }
+ return v;
+}
+
+/* Return the actual version being used for a specific CPU model */
+static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
+{
+ X86CPUVersion v = model->version;
+ if (v == CPU_VERSION_AUTO) {
+ v = default_cpu_version;
+ }
+ if (v == CPU_VERSION_LATEST) {
+ return x86_cpu_model_last_version(model);
+ }
+ return v;
+}
+
+static Property max_x86_cpu_properties[] = {
+ DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
+ DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+
+ xcc->ordering = 9;
+
+ xcc->model_description =
+ "Enables all features supported by the accelerator in the current host";
+
+ device_class_set_props(dc, max_x86_cpu_properties);
+}
+
+static void max_x86_cpu_initfn(Object *obj)
+{
+ X86CPU *cpu = X86_CPU(obj);
+
+ /* We can't fill the features array here because we don't know yet if
+ * "migratable" is true or false.
+ */
+ cpu->max_features = true;
+ object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort);
+
+ /*
+ * these defaults are used for TCG and all other accelerators
+ * besides KVM and HVF, which overwrite these values
+ */
+ object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD,
+ &error_abort);
+#ifdef TARGET_X86_64
+ object_property_set_int(OBJECT(cpu), "family", 15, &error_abort);
+ object_property_set_int(OBJECT(cpu), "model", 107, &error_abort);
+ object_property_set_int(OBJECT(cpu), "stepping", 1, &error_abort);
+#else
+ object_property_set_int(OBJECT(cpu), "family", 6, &error_abort);
+ object_property_set_int(OBJECT(cpu), "model", 6, &error_abort);
+ object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort);
+#endif
+ object_property_set_str(OBJECT(cpu), "model-id",
+ "QEMU TCG CPU version " QEMU_HW_VERSION,
+ &error_abort);
+}
+
+static const TypeInfo max_x86_cpu_type_info = {
+ .name = X86_CPU_TYPE_NAME("max"),
+ .parent = TYPE_X86_CPU,
+ .instance_init = max_x86_cpu_initfn,
+ .class_init = max_x86_cpu_class_init,
+};
+
+static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
+{
+ assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
+
+ switch (f->type) {
+ case CPUID_FEATURE_WORD:
+ {
+ const char *reg = get_register_name_32(f->cpuid.reg);
+ assert(reg);
+ return g_strdup_printf("CPUID.%02XH:%s",
+ f->cpuid.eax, reg);
+ }
+ case MSR_FEATURE_WORD:
+ return g_strdup_printf("MSR(%02XH)",
+ f->msr.index);
+ }
+
+ return NULL;
+}
+
+static bool x86_cpu_have_filtered_features(X86CPU *cpu)
+{
+ FeatureWord w;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ if (cpu->filtered_features[w]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *f = &feature_word_info[w];
+ int i;
+
+ if (!cpu->force_features) {
+ env->features[w] &= ~mask;
+ }
+ cpu->filtered_features[w] |= mask;
+
+ if (!verbose_prefix) {
+ return;
+ }
+
+ for (i = 0; i < 64; ++i) {
+ if ((1ULL << i) & mask) {
+ g_autofree char *feat_word_str = feature_word_description(f, i);
+ warn_report("%s: %s%s%s [bit %d]",
+ verbose_prefix,
+ feat_word_str,
+ f->feat_names[i] ? "." : "",
+ f->feat_names[i] ? f->feat_names[i] : "", i);
+ }
+ }
+}
+
+static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int64_t value;
+
+ value = (env->cpuid_version >> 8) & 0xf;
+ if (value == 0xf) {
+ value += (env->cpuid_version >> 20) & 0xff;
+ }
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ const int64_t min = 0;
+ const int64_t max = 0xff + 0xf;
+ int64_t value;
+
+ if (!visit_type_int(v, name, &value, errp)) {
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ env->cpuid_version &= ~0xff00f00;
+ if (value > 0x0f) {
+ env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
+ } else {
+ env->cpuid_version |= value << 8;
+ }
+}
+
+static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int64_t value;
+
+ value = (env->cpuid_version >> 4) & 0xf;
+ value |= ((env->cpuid_version >> 16) & 0xf) << 4;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ const int64_t min = 0;
+ const int64_t max = 0xff;
+ int64_t value;
+
+ if (!visit_type_int(v, name, &value, errp)) {
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ env->cpuid_version &= ~0xf00f0;
+ env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
+}
+
+static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int64_t value;
+
+ value = env->cpuid_version & 0xf;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ const int64_t min = 0;
+ const int64_t max = 0xf;
+ int64_t value;
+
+ if (!visit_type_int(v, name, &value, errp)) {
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ env->cpuid_version &= ~0xf;
+ env->cpuid_version |= value & 0xf;
+}
+
+static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ char *value;
+
+ value = g_malloc(CPUID_VENDOR_SZ + 1);
+ x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
+ env->cpuid_vendor3);
+ return value;
+}
+
+static void x86_cpuid_set_vendor(Object *obj, const char *value,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (strlen(value) != CPUID_VENDOR_SZ) {
+ error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
+ return;
+ }
+
+ env->cpuid_vendor1 = 0;
+ env->cpuid_vendor2 = 0;
+ env->cpuid_vendor3 = 0;
+ for (i = 0; i < 4; i++) {
+ env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
+ env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
+ env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
+ }
+}
+
+static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ char *value;
+ int i;
+
+ value = g_malloc(48 + 1);
+ for (i = 0; i < 48; i++) {
+ value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
+ }
+ value[48] = '\0';
+ return value;
+}
+
+static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int c, len, i;
+
+ if (model_id == NULL) {
+ model_id = "";
+ }
+ len = strlen(model_id);
+ memset(env->cpuid_model, 0, 48);
+ for (i = 0; i < 48; i++) {
+ if (i >= len) {
+ c = '\0';
+ } else {
+ c = (uint8_t)model_id[i];
+ }
+ env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
+ }
+}
+
+static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ int64_t value;
+
+ value = cpu->env.tsc_khz * 1000;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ const int64_t min = 0;
+ const int64_t max = INT64_MAX;
+ int64_t value;
+
+ if (!visit_type_int(v, name, &value, errp)) {
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
+}
+
+/* Generic getter for "feature-words" and "filtered-features" properties */
+static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ uint64_t *array = (uint64_t *)opaque;
+ FeatureWord w;
+ X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
+ X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
+ X86CPUFeatureWordInfoList *list = NULL;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ FeatureWordInfo *wi = &feature_word_info[w];
+ /*
+ * We didn't have MSR features when "feature-words" was
+ * introduced. Therefore skipped other type entries.
+ */
+ if (wi->type != CPUID_FEATURE_WORD) {
+ continue;
+ }
+ X86CPUFeatureWordInfo *qwi = &word_infos[w];
+ qwi->cpuid_input_eax = wi->cpuid.eax;
+ qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
+ qwi->cpuid_input_ecx = wi->cpuid.ecx;
+ qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
+ qwi->features = array[w];
+
+ /* List will be in reverse order, but order shouldn't matter */
+ list_entries[w].next = list;
+ list_entries[w].value = &word_infos[w];
+ list = &list_entries[w];
+ }
+
+ visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
+}
+
+/* Convert all '_' in a feature string option name to '-', to make feature
+ * name conform to QOM property naming rule, which uses '-' instead of '_'.
+ */
+static inline void feat2prop(char *s)
+{
+ while ((s = strchr(s, '_'))) {
+ *s = '-';
+ }
+}
+
+/* Return the feature property name for a feature flag bit */
+static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
+{
+ const char *name;
+ /* XSAVE components are automatically enabled by other features,
+ * so return the original feature name instead
+ */
+ if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
+ int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
+
+ if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
+ x86_ext_save_areas[comp].bits) {
+ w = x86_ext_save_areas[comp].feature;
+ bitnr = ctz32(x86_ext_save_areas[comp].bits);
+ }
+ }
+
+ assert(bitnr < 64);
+ assert(w < FEATURE_WORDS);
+ name = feature_word_info[w].feat_names[bitnr];
+ assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
+ return name;
+}
+
+/* Compatibily hack to maintain legacy +-feat semantic,
+ * where +-feat overwrites any feature set by
+ * feat=on|feat even if the later is parsed after +-feat
+ * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
+ */
+static GList *plus_features, *minus_features;
+
+static gint compare_string(gconstpointer a, gconstpointer b)
+{
+ return g_strcmp0(a, b);
+}
+
+/* Parse "+feature,-feature,feature=foo" CPU feature string
+ */
+static void x86_cpu_parse_featurestr(const char *typename, char *features,
+ Error **errp)
+{
+ char *featurestr; /* Single 'key=value" string being parsed */
+ static bool cpu_globals_initialized;
+ bool ambiguous = false;
+
+ if (cpu_globals_initialized) {
+ return;
+ }
+ cpu_globals_initialized = true;
+
+ if (!features) {
+ return;
+ }
+
+ for (featurestr = strtok(features, ",");
+ featurestr;
+ featurestr = strtok(NULL, ",")) {
+ const char *name;
+ const char *val = NULL;
+ char *eq = NULL;
+ char num[32];
+ GlobalProperty *prop;
+
+ /* Compatibility syntax: */
+ if (featurestr[0] == '+') {
+ plus_features = g_list_append(plus_features,
+ g_strdup(featurestr + 1));
+ continue;
+ } else if (featurestr[0] == '-') {
+ minus_features = g_list_append(minus_features,
+ g_strdup(featurestr + 1));
+ continue;
+ }
+
+ eq = strchr(featurestr, '=');
+ if (eq) {
+ *eq++ = 0;
+ val = eq;
+ } else {
+ val = "on";
+ }
+
+ feat2prop(featurestr);
+ name = featurestr;
+
+ if (g_list_find_custom(plus_features, name, compare_string)) {
+ warn_report("Ambiguous CPU model string. "
+ "Don't mix both \"+%s\" and \"%s=%s\"",
+ name, name, val);
+ ambiguous = true;
+ }
+ if (g_list_find_custom(minus_features, name, compare_string)) {
+ warn_report("Ambiguous CPU model string. "
+ "Don't mix both \"-%s\" and \"%s=%s\"",
+ name, name, val);
+ ambiguous = true;
+ }
+
+ /* Special case: */
+ if (!strcmp(name, "tsc-freq")) {
+ int ret;
+ uint64_t tsc_freq;
+
+ ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
+ if (ret < 0 || tsc_freq > INT64_MAX) {
+ error_setg(errp, "bad numerical value %s", val);
+ return;
+ }
+ snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
+ val = num;
+ name = "tsc-frequency";
+ }
+
+ prop = g_new0(typeof(*prop), 1);
+ prop->driver = typename;
+ prop->property = g_strdup(name);
+ prop->value = g_strdup(val);
+ qdev_prop_register_global(prop);
+ }
+
+ if (ambiguous) {
+ warn_report("Compatibility of ambiguous CPU model "
+ "strings won't be kept on future QEMU versions");
+ }
+}
+
+static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
+
+/* Build a list with the name of all features on a feature word array */
+static void x86_cpu_list_feature_names(FeatureWordArray features,
+ strList **list)
+{
+ strList **tail = list;
+ FeatureWord w;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ uint64_t filtered = features[w];
+ int i;
+ for (i = 0; i < 64; i++) {
+ if (filtered & (1ULL << i)) {
+ QAPI_LIST_APPEND(tail, g_strdup(x86_cpu_feature_name(w, i)));
+ }
+ }
+ }
+}
+
+static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *xc = X86_CPU(obj);
+ strList *result = NULL;
+
+ x86_cpu_list_feature_names(xc->filtered_features, &result);
+ visit_type_strList(v, "unavailable-features", &result, errp);
+}
+
+/* Check for missing features that may prevent the CPU class from
+ * running using the current machine and accelerator.
+ */
+static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
+ strList **list)
+{
+ strList **tail = list;
+ X86CPU *xc;
+ Error *err = NULL;
+
+ if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
+ QAPI_LIST_APPEND(tail, g_strdup("kvm"));
+ return;
+ }
+
+ xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
+
+ x86_cpu_expand_features(xc, &err);
+ if (err) {
+ /* Errors at x86_cpu_expand_features should never happen,
+ * but in case it does, just report the model as not
+ * runnable at all using the "type" property.
+ */
+ QAPI_LIST_APPEND(tail, g_strdup("type"));
+ error_free(err);
+ }
+
+ x86_cpu_filter_features(xc, false);
+
+ x86_cpu_list_feature_names(xc->filtered_features, tail);
+
+ object_unref(OBJECT(xc));
+}
+
+/* Print all cpuid feature names in featureset
+ */
+static void listflags(GList *features)
+{
+ size_t len = 0;
+ GList *tmp;
+
+ for (tmp = features; tmp; tmp = tmp->next) {
+ const char *name = tmp->data;
+ if ((len + strlen(name) + 1) >= 75) {
+ qemu_printf("\n");
+ len = 0;
+ }
+ qemu_printf("%s%s", len == 0 ? " " : " ", name);
+ len += strlen(name) + 1;
+ }
+ qemu_printf("\n");
+}
+
+/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
+static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
+ X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
+ int ret;
+
+ if (cc_a->ordering != cc_b->ordering) {
+ ret = cc_a->ordering - cc_b->ordering;
+ } else {
+ g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
+ g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
+ ret = strcmp(name_a, name_b);
+ }
+ return ret;
+}
+
+static GSList *get_sorted_cpu_model_list(void)
+{
+ GSList *list = object_class_get_list(TYPE_X86_CPU, false);
+ list = g_slist_sort(list, x86_cpu_list_compare);
+ return list;
+}
+
+static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
+{
+ Object *obj = object_new_with_class(OBJECT_CLASS(xc));
+ char *r = object_property_get_str(obj, "model-id", &error_abort);
+ object_unref(obj);
+ return r;
+}
+
+static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
+{
+ X86CPUVersion version;
+
+ if (!cc->model || !cc->model->is_alias) {
+ return NULL;
+ }
+ version = x86_cpu_model_resolve_version(cc->model);
+ if (version <= 0) {
+ return NULL;
+ }
+ return x86_cpu_versioned_model_name(cc->model->cpudef, version);
+}
+
+static void x86_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ X86CPUClass *cc = X86_CPU_CLASS(oc);
+ g_autofree char *name = x86_cpu_class_get_model_name(cc);
+ g_autofree char *desc = g_strdup(cc->model_description);
+ g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
+ g_autofree char *model_id = x86_cpu_class_get_model_id(cc);
+
+ if (!desc && alias_of) {
+ if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
+ desc = g_strdup("(alias configured by machine type)");
+ } else {
+ desc = g_strdup_printf("(alias of %s)", alias_of);
+ }
+ }
+ if (!desc && cc->model && cc->model->note) {
+ desc = g_strdup_printf("%s [%s]", model_id, cc->model->note);
+ }
+ if (!desc) {
+ desc = g_strdup_printf("%s", model_id);
+ }
+
+ qemu_printf("x86 %-20s %s\n", name, desc);
+}
+
+/* list available CPU models and flags */
+void x86_cpu_list(void)
+{
+ int i, j;
+ GSList *list;
+ GList *names = NULL;
+
+ qemu_printf("Available CPUs:\n");
+ list = get_sorted_cpu_model_list();
+ g_slist_foreach(list, x86_cpu_list_entry, NULL);
+ g_slist_free(list);
+
+ names = NULL;
+ for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
+ FeatureWordInfo *fw = &feature_word_info[i];
+ for (j = 0; j < 64; j++) {
+ if (fw->feat_names[j]) {
+ names = g_list_append(names, (gpointer)fw->feat_names[j]);
+ }
+ }
+ }
+
+ names = g_list_sort(names, (GCompareFunc)strcmp);
+
+ qemu_printf("\nRecognized CPUID flags:\n");
+ listflags(names);
+ qemu_printf("\n");
+ g_list_free(names);
+}
+
+static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ X86CPUClass *cc = X86_CPU_CLASS(oc);
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfo *info;
+
+ info = g_malloc0(sizeof(*info));
+ info->name = x86_cpu_class_get_model_name(cc);
+ x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
+ info->has_unavailable_features = true;
+ info->q_typename = g_strdup(object_class_get_name(oc));
+ info->migration_safe = cc->migration_safe;
+ info->has_migration_safe = true;
+ info->q_static = cc->static_model;
+ if (cc->model && cc->model->cpudef->deprecation_note) {
+ info->deprecated = true;
+ } else {
+ info->deprecated = false;
+ }
+ /*
+ * Old machine types won't report aliases, so that alias translation
+ * doesn't break compatibility with previous QEMU versions.
+ */
+ if (default_cpu_version != CPU_VERSION_LEGACY) {
+ info->alias_of = x86_cpu_class_get_alias_of(cc);
+ info->has_alias_of = !!info->alias_of;
+ }
+
+ QAPI_LIST_PREPEND(*cpu_list, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list = get_sorted_cpu_model_list();
+ g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
+ g_slist_free(list);
+ return cpu_list;
+}
+
+static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
+ bool migratable_only)
+{
+ FeatureWordInfo *wi = &feature_word_info[w];
+ uint64_t r = 0;
+
+ if (kvm_enabled()) {
+ switch (wi->type) {
+ case CPUID_FEATURE_WORD:
+ r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
+ wi->cpuid.ecx,
+ wi->cpuid.reg);
+ break;
+ case MSR_FEATURE_WORD:
+ r = kvm_arch_get_supported_msr_feature(kvm_state,
+ wi->msr.index);
+ break;
+ }
+ } else if (hvf_enabled()) {
+ if (wi->type != CPUID_FEATURE_WORD) {
+ return 0;
+ }
+ r = hvf_get_supported_cpuid(wi->cpuid.eax,
+ wi->cpuid.ecx,
+ wi->cpuid.reg);
+ } else if (tcg_enabled()) {
+ r = wi->tcg_features;
+ } else {
+ return ~0;
+ }
+#ifndef TARGET_X86_64
+ if (w == FEAT_8000_0001_EDX) {
+ r &= ~CPUID_EXT2_LM;
+ }
+#endif
+ if (migratable_only) {
+ r &= x86_cpu_get_migratable_flags(w);
+ }
+ return r;
+}
+
+/*
+ * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
+ */
+void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
+{
+ PropValue *pv;
+ for (pv = props; pv->prop; pv++) {
+ if (!pv->value) {
+ continue;
+ }
+ object_property_parse(OBJECT(cpu), pv->prop, pv->value,
+ &error_abort);
+ }
+}
+
+/*
+ * Apply properties for the CPU model version specified in model.
+ * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
+ */
+
+static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
+{
+ const X86CPUVersionDefinition *vdef;
+ X86CPUVersion version = x86_cpu_model_resolve_version(model);
+
+ if (version == CPU_VERSION_LEGACY) {
+ return;
+ }
+
+ for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
+ PropValue *p;
+
+ for (p = vdef->props; p && p->prop; p++) {
+ object_property_parse(OBJECT(cpu), p->prop, p->value,
+ &error_abort);
+ }
+
+ if (vdef->version == version) {
+ break;
+ }
+ }
+
+ /*
+ * If we reached the end of the list, version number was invalid
+ */
+ assert(vdef->version == version);
+}
+
+/*
+ * Load data from X86CPUDefinition into a X86CPU object.
+ * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
+ */
+static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
+{
+ const X86CPUDefinition *def = model->cpudef;
+ CPUX86State *env = &cpu->env;
+ FeatureWord w;
+
+ /*NOTE: any property set by this function should be returned by
+ * x86_cpu_static_props(), so static expansion of
+ * query-cpu-model-expansion is always complete.
+ */
+
+ /* CPU models only set _minimum_ values for level/xlevel: */
+ object_property_set_uint(OBJECT(cpu), "min-level", def->level,
+ &error_abort);
+ object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel,
+ &error_abort);
+
+ object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort);
+ object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort);
+ object_property_set_int(OBJECT(cpu), "stepping", def->stepping,
+ &error_abort);
+ object_property_set_str(OBJECT(cpu), "model-id", def->model_id,
+ &error_abort);
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ env->features[w] = def->features[w];
+ }
+
+ /* legacy-cache defaults to 'off' if CPU model provides cache info */
+ cpu->legacy_cache = !def->cache_info;
+
+ env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
+
+ /* sysenter isn't supported in compatibility mode on AMD,
+ * syscall isn't supported in compatibility mode on Intel.
+ * Normally we advertise the actual CPU vendor, but you can
+ * override this using the 'vendor' property if you want to use
+ * KVM's sysenter/syscall emulation in compatibility mode and
+ * when doing cross vendor migration
+ */
+
+ /*
+ * vendor property is set here but then overloaded with the
+ * host cpu vendor for KVM and HVF.
+ */
+ object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort);
+
+ x86_cpu_apply_version_props(cpu, model);
+
+ /*
+ * Properties in versioned CPU model are not user specified features.
+ * We can simply clear env->user_features here since it will be filled later
+ * in x86_cpu_expand_features() based on plus_features and minus_features.
+ */
+ memset(&env->user_features, 0, sizeof(env->user_features));
+}
+
+static gchar *x86_gdb_arch_name(CPUState *cs)
+{
+#ifdef TARGET_X86_64
+ return g_strdup("i386:x86-64");
+#else
+ return g_strdup("i386");
+#endif
+}
+
+static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUModel *model = data;
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ xcc->model = model;
+ xcc->migration_safe = true;
+ cc->deprecation_note = model->cpudef->deprecation_note;
+}
+
+static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
+{
+ g_autofree char *typename = x86_cpu_type_name(name);
+ TypeInfo ti = {
+ .name = typename,
+ .parent = TYPE_X86_CPU,
+ .class_init = x86_cpu_cpudef_class_init,
+ .class_data = model,
+ };
+
+ type_register(&ti);
+}
+
+
+/*
+ * register builtin_x86_defs;
+ * "max", "base" and subclasses ("host") are not registered here.
+ * See x86_cpu_register_types for all model registrations.
+ */
+static void x86_register_cpudef_types(const X86CPUDefinition *def)
+{
+ X86CPUModel *m;
+ const X86CPUVersionDefinition *vdef;
+
+ /* AMD aliases are handled at runtime based on CPUID vendor, so
+ * they shouldn't be set on the CPU model table.
+ */
+ assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
+ /* catch mistakes instead of silently truncating model_id when too long */
+ assert(def->model_id && strlen(def->model_id) <= 48);
+
+ /* Unversioned model: */
+ m = g_new0(X86CPUModel, 1);
+ m->cpudef = def;
+ m->version = CPU_VERSION_AUTO;
+ m->is_alias = true;
+ x86_register_cpu_model_type(def->name, m);
+
+ /* Versioned models: */
+
+ for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
+ X86CPUModel *m = g_new0(X86CPUModel, 1);
+ g_autofree char *name =
+ x86_cpu_versioned_model_name(def, vdef->version);
+ m->cpudef = def;
+ m->version = vdef->version;
+ m->note = vdef->note;
+ x86_register_cpu_model_type(name, m);
+
+ if (vdef->alias) {
+ X86CPUModel *am = g_new0(X86CPUModel, 1);
+ am->cpudef = def;
+ am->version = vdef->version;
+ am->is_alias = true;
+ x86_register_cpu_model_type(vdef->alias, am);
+ }
+ }
+
+}
+
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
+{
+ if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
+ return 57; /* 57 bits virtual */
+ } else {
+ return 48; /* 48 bits virtual */
+ }
+}
+
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ X86CPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ uint32_t die_offset;
+ uint32_t limit;
+ uint32_t signature[3];
+ X86CPUTopoInfo topo_info;
+
+ topo_info.dies_per_pkg = env->nr_dies;
+ topo_info.cores_per_die = cs->nr_cores;
+ topo_info.threads_per_core = cs->nr_threads;
+
+ /* Calculate & apply limits for different index ranges */
+ if (index >= 0xC0000000) {
+ limit = env->cpuid_xlevel2;
+ } else if (index >= 0x80000000) {
+ limit = env->cpuid_xlevel;
+ } else if (index >= 0x40000000) {
+ limit = 0x40000001;
+ } else {
+ limit = env->cpuid_level;
+ }
+
+ if (index > limit) {
+ /* Intel documentation states that invalid EAX input will
+ * return the same information as EAX=cpuid_level
+ * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
+ */
+ index = env->cpuid_level;
+ }
+
+ switch(index) {
+ case 0:
+ *eax = env->cpuid_level;
+ *ebx = env->cpuid_vendor1;
+ *edx = env->cpuid_vendor2;
+ *ecx = env->cpuid_vendor3;
+ break;
+ case 1:
+ *eax = env->cpuid_version;
+ *ebx = (cpu->apic_id << 24) |
+ 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
+ *ecx = env->features[FEAT_1_ECX];
+ if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
+ *ecx |= CPUID_EXT_OSXSAVE;
+ }
+ *edx = env->features[FEAT_1_EDX];
+ if (cs->nr_cores * cs->nr_threads > 1) {
+ *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
+ *edx |= CPUID_HT;
+ }
+ if (!cpu->enable_pmu) {
+ *ecx &= ~CPUID_EXT_PDCM;
+ }
+ break;
+ case 2:
+ /* cache info: needed for Pentium Pro compatibility */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, 0, eax, ebx, ecx, edx);
+ break;
+ } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+ *eax = 1; /* Number of CPUID[EAX=2] calls required */
+ *ebx = 0;
+ if (!cpu->enable_l3_cache) {
+ *ecx = 0;
+ } else {
+ *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
+ }
+ *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
+ (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
+ (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
+ break;
+ case 4:
+ /* cache info: needed for Core compatibility */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, count, eax, ebx, ecx, edx);
+ /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
+ *eax &= ~0xFC000000;
+ if ((*eax & 31) && cs->nr_cores > 1) {
+ *eax |= (cs->nr_cores - 1) << 26;
+ }
+ } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
+ *eax = *ebx = *ecx = *edx = 0;
+ } else {
+ *eax = 0;
+ switch (count) {
+ case 0: /* L1 dcache info */
+ encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
+ 1, cs->nr_cores,
+ eax, ebx, ecx, edx);
+ break;
+ case 1: /* L1 icache info */
+ encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
+ 1, cs->nr_cores,
+ eax, ebx, ecx, edx);
+ break;
+ case 2: /* L2 cache info */
+ encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
+ cs->nr_threads, cs->nr_cores,
+ eax, ebx, ecx, edx);
+ break;
+ case 3: /* L3 cache info */
+ die_offset = apicid_die_offset(&topo_info);
+ if (cpu->enable_l3_cache) {
+ encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
+ (1 << die_offset), cs->nr_cores,
+ eax, ebx, ecx, edx);
+ break;
+ }
+ /* fall through */
+ default: /* end of info */
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+ }
+ break;
+ case 5:
+ /* MONITOR/MWAIT Leaf */
+ *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
+ *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
+ *ecx = cpu->mwait.ecx; /* flags */
+ *edx = cpu->mwait.edx; /* mwait substates */
+ break;
+ case 6:
+ /* Thermal and Power Leaf */
+ *eax = env->features[FEAT_6_EAX];
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 7:
+ /* Structured Extended Feature Flags Enumeration Leaf */
+ if (count == 0) {
+ /* Maximum ECX value for sub-leaves */
+ *eax = env->cpuid_level_func7;
+ *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
+ *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
+ if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
+ *ecx |= CPUID_7_0_ECX_OSPKE;
+ }
+ *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
+
+ /*
+ * SGX cannot be emulated in software. If hardware does not
+ * support enabling SGX and/or SGX flexible launch control,
+ * then we need to update the VM's CPUID values accordingly.
+ */
+ if ((*ebx & CPUID_7_0_EBX_SGX) &&
+ (!kvm_enabled() ||
+ !(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_EBX) &
+ CPUID_7_0_EBX_SGX))) {
+ *ebx &= ~CPUID_7_0_EBX_SGX;
+ }
+
+ if ((*ecx & CPUID_7_0_ECX_SGX_LC) &&
+ (!(*ebx & CPUID_7_0_EBX_SGX) || !kvm_enabled() ||
+ !(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_ECX) &
+ CPUID_7_0_ECX_SGX_LC))) {
+ *ecx &= ~CPUID_7_0_ECX_SGX_LC;
+ }
+ } else if (count == 1) {
+ *eax = env->features[FEAT_7_1_EAX];
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 9:
+ /* Direct Cache Access Information Leaf */
+ *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0xA:
+ /* Architectural Performance Monitoring Leaf */
+ if (kvm_enabled() && cpu->enable_pmu) {
+ KVMState *s = cs->kvm_state;
+
+ *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
+ *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
+ *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
+ *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
+ } else if (hvf_enabled() && cpu->enable_pmu) {
+ *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
+ *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
+ *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
+ *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 0xB:
+ /* Extended Topology Enumeration Leaf */
+ if (!cpu->enable_cpuid_0xb) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ *ecx = count & 0xff;
+ *edx = cpu->apic_id;
+
+ switch (count) {
+ case 0:
+ *eax = apicid_core_offset(&topo_info);
+ *ebx = cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
+ break;
+ case 1:
+ *eax = apicid_pkg_offset(&topo_info);
+ *ebx = cs->nr_cores * cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
+ break;
+ default:
+ *eax = 0;
+ *ebx = 0;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
+ }
+
+ assert(!(*eax & ~0x1f));
+ *ebx &= 0xffff; /* The count doesn't need to be reliable. */
+ break;
+ case 0x1F:
+ /* V2 Extended Topology Enumeration Leaf */
+ if (env->nr_dies < 2) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ *ecx = count & 0xff;
+ *edx = cpu->apic_id;
+ switch (count) {
+ case 0:
+ *eax = apicid_core_offset(&topo_info);
+ *ebx = cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
+ break;
+ case 1:
+ *eax = apicid_die_offset(&topo_info);
+ *ebx = cs->nr_cores * cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
+ break;
+ case 2:
+ *eax = apicid_pkg_offset(&topo_info);
+ *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
+ break;
+ default:
+ *eax = 0;
+ *ebx = 0;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
+ }
+ assert(!(*eax & ~0x1f));
+ *ebx &= 0xffff; /* The count doesn't need to be reliable. */
+ break;
+ case 0xD: {
+ /* Processor Extended State */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ break;
+ }
+
+ if (count == 0) {
+ *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
+ *eax = env->features[FEAT_XSAVE_COMP_LO];
+ *edx = env->features[FEAT_XSAVE_COMP_HI];
+ /*
+ * The initial value of xcr0 and ebx == 0, On host without kvm
+ * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
+ * even through guest update xcr0, this will crash some legacy guest
+ * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
+ */
+ *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
+ } else if (count == 1) {
+ *eax = env->features[FEAT_XSAVE];
+ } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
+ if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
+ *eax = esa->size;
+ *ebx = esa->offset;
+ }
+ }
+ break;
+ }
+ case 0x12:
+#ifndef CONFIG_USER_ONLY
+ if (!kvm_enabled() ||
+ !(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX)) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ /*
+ * SGX sub-leafs CPUID.0x12.{0x2..N} enumerate EPC sections. Retrieve
+ * the EPC properties, e.g. confidentiality and integrity, from the
+ * host's first EPC section, i.e. assume there is one EPC section or
+ * that all EPC sections have the same security properties.
+ */
+ if (count > 1) {
+ uint64_t epc_addr, epc_size;
+
+ if (sgx_epc_get_section(count - 2, &epc_addr, &epc_size)) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+ host_cpuid(index, 2, eax, ebx, ecx, edx);
+ *eax = (uint32_t)(epc_addr & 0xfffff000) | 0x1;
+ *ebx = (uint32_t)(epc_addr >> 32);
+ *ecx = (uint32_t)(epc_size & 0xfffff000) | (*ecx & 0xf);
+ *edx = (uint32_t)(epc_size >> 32);
+ break;
+ }
+
+ /*
+ * SGX sub-leafs CPUID.0x12.{0x0,0x1} are heavily dependent on hardware
+ * and KVM, i.e. QEMU cannot emulate features to override what KVM
+ * supports. Features can be further restricted by userspace, but not
+ * made more permissive.
+ */
+ *eax = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EAX);
+ *ebx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EBX);
+ *ecx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_ECX);
+ *edx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EDX);
+
+ if (count == 0) {
+ *eax &= env->features[FEAT_SGX_12_0_EAX];
+ *ebx &= env->features[FEAT_SGX_12_0_EBX];
+ } else {
+ *eax &= env->features[FEAT_SGX_12_1_EAX];
+ *ebx &= 0; /* ebx reserve */
+ *ecx &= env->features[FEAT_XSAVE_COMP_LO];
+ *edx &= env->features[FEAT_XSAVE_COMP_HI];
+
+ /* FP and SSE are always allowed regardless of XSAVE/XCR0. */
+ *ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK;
+
+ /* Access to PROVISIONKEY requires additional credentials. */
+ if ((*eax & (1U << 4)) &&
+ !kvm_enable_sgx_provisioning(cs->kvm_state)) {
+ *eax &= ~(1U << 4);
+ }
+ }
+#endif
+ break;
+ case 0x14: {
+ /* Intel Processor Trace Enumeration */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
+ !kvm_enabled()) {
+ break;
+ }
+
+ if (count == 0) {
+ *eax = INTEL_PT_MAX_SUBLEAF;
+ *ebx = INTEL_PT_MINIMAL_EBX;
+ *ecx = INTEL_PT_MINIMAL_ECX;
+ if (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP) {
+ *ecx |= CPUID_14_0_ECX_LIP;
+ }
+ } else if (count == 1) {
+ *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
+ *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
+ }
+ break;
+ }
+ case 0x40000000:
+ /*
+ * CPUID code in kvm_arch_init_vcpu() ignores stuff
+ * set here, but we restrict to TCG none the less.
+ */
+ if (tcg_enabled() && cpu->expose_tcg) {
+ memcpy(signature, "TCGTCGTCGTCG", 12);
+ *eax = 0x40000001;
+ *ebx = signature[0];
+ *ecx = signature[1];
+ *edx = signature[2];
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 0x40000001:
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0x80000000:
+ *eax = env->cpuid_xlevel;
+ *ebx = env->cpuid_vendor1;
+ *edx = env->cpuid_vendor2;
+ *ecx = env->cpuid_vendor3;
+ break;
+ case 0x80000001:
+ *eax = env->cpuid_version;
+ *ebx = 0;
+ *ecx = env->features[FEAT_8000_0001_ECX];
+ *edx = env->features[FEAT_8000_0001_EDX];
+
+ /* The Linux kernel checks for the CMPLegacy bit and
+ * discards multiple thread information if it is set.
+ * So don't set it here for Intel to make Linux guests happy.
+ */
+ if (cs->nr_cores * cs->nr_threads > 1) {
+ if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
+ env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
+ env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
+ *ecx |= 1 << 1; /* CmpLegacy bit */
+ }
+ }
+ break;
+ case 0x80000002:
+ case 0x80000003:
+ case 0x80000004:
+ *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
+ *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
+ *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
+ *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
+ break;
+ case 0x80000005:
+ /* cache info (L1 cache) */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, 0, eax, ebx, ecx, edx);
+ break;
+ }
+ *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) |
+ (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
+ *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) |
+ (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
+ *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
+ *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
+ break;
+ case 0x80000006:
+ /* cache info (L2 cache) */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, 0, eax, ebx, ecx, edx);
+ break;
+ }
+ *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
+ (L2_DTLB_2M_ENTRIES << 16) |
+ (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
+ (L2_ITLB_2M_ENTRIES);
+ *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) |
+ (L2_DTLB_4K_ENTRIES << 16) |
+ (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
+ (L2_ITLB_4K_ENTRIES);
+ encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
+ cpu->enable_l3_cache ?
+ env->cache_info_amd.l3_cache : NULL,
+ ecx, edx);
+ break;
+ case 0x80000007:
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = env->features[FEAT_8000_0007_EDX];
+ break;
+ case 0x80000008:
+ /* virtual & phys address size in low 2 bytes. */
+ *eax = cpu->phys_bits;
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ /* 64 bit processor */
+ *eax |= (cpu_x86_virtual_addr_width(env) << 8);
+ }
+ *ebx = env->features[FEAT_8000_0008_EBX];
+ if (cs->nr_cores * cs->nr_threads > 1) {
+ /*
+ * Bits 15:12 is "The number of bits in the initial
+ * Core::X86::Apic::ApicId[ApicId] value that indicate
+ * thread ID within a package".
+ * Bits 7:0 is "The number of threads in the package is NC+1"
+ */
+ *ecx = (apicid_pkg_offset(&topo_info) << 12) |
+ ((cs->nr_cores * cs->nr_threads) - 1);
+ } else {
+ *ecx = 0;
+ }
+ *edx = 0;
+ break;
+ case 0x8000000A:
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ *eax = 0x00000001; /* SVM Revision */
+ *ebx = 0x00000010; /* nr of ASIDs */
+ *ecx = 0;
+ *edx = env->features[FEAT_SVM]; /* optional features */
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 0x8000001D:
+ *eax = 0;
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, count, eax, ebx, ecx, edx);
+ break;
+ }
+ switch (count) {
+ case 0: /* L1 dcache info */
+ encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
+ &topo_info, eax, ebx, ecx, edx);
+ break;
+ case 1: /* L1 icache info */
+ encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
+ &topo_info, eax, ebx, ecx, edx);
+ break;
+ case 2: /* L2 cache info */
+ encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
+ &topo_info, eax, ebx, ecx, edx);
+ break;
+ case 3: /* L3 cache info */
+ encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
+ &topo_info, eax, ebx, ecx, edx);
+ break;
+ default: /* end of info */
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+ break;
+ case 0x8000001E:
+ if (cpu->core_id <= 255) {
+ encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx);
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 0xC0000000:
+ *eax = env->cpuid_xlevel2;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0xC0000001:
+ /* Support for VIA CPU's CPUID instruction */
+ *eax = env->cpuid_version;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = env->features[FEAT_C000_0001_EDX];
+ break;
+ case 0xC0000002:
+ case 0xC0000003:
+ case 0xC0000004:
+ /* Reserved for the future, and now filled with zero */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0x8000001F:
+ *eax = *ebx = *ecx = *edx = 0;
+ if (sev_enabled()) {
+ *eax = 0x2;
+ *eax |= sev_es_enabled() ? 0x8 : 0;
+ *ebx = sev_get_cbit_position();
+ *ebx |= sev_get_reduced_phys_bits() << 6;
+ }
+ break;
+ default:
+ /* reserved values: zero */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+}
+
+static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env)
+{
+#ifndef CONFIG_USER_ONLY
+ /* Those default values are defined in Skylake HW */
+ env->msr_ia32_sgxlepubkeyhash[0] = 0xa6053e051270b7acULL;
+ env->msr_ia32_sgxlepubkeyhash[1] = 0x6cfbe8ba8b3b413dULL;
+ env->msr_ia32_sgxlepubkeyhash[2] = 0xc4916d99f2b3735dULL;
+ env->msr_ia32_sgxlepubkeyhash[3] = 0xd4f8c05909f9bb3bULL;
+#endif
+}
+
+static void x86_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ X86CPU *cpu = X86_CPU(s);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+ CPUX86State *env = &cpu->env;
+ target_ulong cr4;
+ uint64_t xcr0;
+ int i;
+
+ xcc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUX86State, end_reset_fields));
+
+ env->old_exception = -1;
+
+ /* init to reset state */
+ env->int_ctl = 0;
+ env->hflags2 |= HF2_GIF_MASK;
+ env->hflags2 |= HF2_VGIF_MASK;
+ env->hflags &= ~HF_GUEST_MASK;
+
+ cpu_x86_update_cr0(env, 0x60000010);
+ env->a20_mask = ~0x0;
+ env->smbase = 0x30000;
+ env->msr_smi_count = 0;
+
+ env->idt.limit = 0xffff;
+ env->gdt.limit = 0xffff;
+ env->ldt.limit = 0xffff;
+ env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
+ env->tr.limit = 0xffff;
+ env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
+
+ cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
+ DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+
+ env->eip = 0xfff0;
+ env->regs[R_EDX] = env->cpuid_version;
+
+ env->eflags = 0x2;
+
+ /* FPU init */
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = 1;
+ }
+ cpu_set_fpuc(env, 0x37f);
+
+ env->mxcsr = 0x1f80;
+ /* All units are in INIT state. */
+ env->xstate_bv = 0;
+
+ env->pat = 0x0007040600070406ULL;
+ env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
+ env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
+ }
+
+ memset(env->dr, 0, sizeof(env->dr));
+ env->dr[6] = DR6_FIXED_1;
+ env->dr[7] = DR7_FIXED_1;
+ cpu_breakpoint_remove_all(s, BP_CPU);
+ cpu_watchpoint_remove_all(s, BP_CPU);
+
+ cr4 = 0;
+ xcr0 = XSTATE_FP_MASK;
+
+#ifdef CONFIG_USER_ONLY
+ /* Enable all the features for user-mode. */
+ if (env->features[FEAT_1_EDX] & CPUID_SSE) {
+ xcr0 |= XSTATE_SSE_MASK;
+ }
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (env->features[esa->feature] & esa->bits) {
+ xcr0 |= 1ull << i;
+ }
+ }
+
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+ cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
+ }
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
+ cr4 |= CR4_FSGSBASE_MASK;
+ }
+#endif
+
+ env->xcr0 = xcr0;
+ cpu_x86_update_cr4(env, cr4);
+
+ /*
+ * SDM 11.11.5 requires:
+ * - IA32_MTRR_DEF_TYPE MSR.E = 0
+ * - IA32_MTRR_PHYSMASKn.V = 0
+ * All other bits are undefined. For simplification, zero it all.
+ */
+ env->mtrr_deftype = 0;
+ memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+ memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
+ env->interrupt_injected = -1;
+ env->exception_nr = -1;
+ env->exception_pending = 0;
+ env->exception_injected = 0;
+ env->exception_has_payload = false;
+ env->exception_payload = 0;
+ env->nmi_injected = false;
+#if !defined(CONFIG_USER_ONLY)
+ /* We hard-wire the BSP to the first CPU. */
+ apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
+
+ s->halted = !cpu_is_bsp(cpu);
+
+ if (kvm_enabled()) {
+ kvm_arch_reset_vcpu(cpu);
+ }
+
+ x86_cpu_set_sgxlepubkeyhash(env);
+
+ if (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE) {
+ env->amd_tsc_scale_msr = MSR_AMD64_TSC_RATIO_DEFAULT;
+ }
+
+#endif
+}
+
+static void mce_init(X86CPU *cpu)
+{
+ CPUX86State *cenv = &cpu->env;
+ unsigned int bank;
+
+ if (((cenv->cpuid_version >> 8) & 0xf) >= 6
+ && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
+ (CPUID_MCE | CPUID_MCA)) {
+ cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
+ (cpu->enable_lmce ? MCG_LMCE_P : 0);
+ cenv->mcg_ctl = ~(uint64_t)0;
+ for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
+ cenv->mce_banks[bank * 4] = ~(uint64_t)0;
+ }
+ }
+}
+
+static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
+{
+ if (*min < value) {
+ *min = value;
+ }
+}
+
+/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
+static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *fi = &feature_word_info[w];
+ uint32_t eax = fi->cpuid.eax;
+ uint32_t region = eax & 0xF0000000;
+
+ assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
+ if (!env->features[w]) {
+ return;
+ }
+
+ switch (region) {
+ case 0x00000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
+ break;
+ case 0x80000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
+ break;
+ case 0xC0000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
+ break;
+ }
+
+ if (eax == 7) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
+ fi->cpuid.ecx);
+ }
+}
+
+/* Calculate XSAVE components based on the configured CPU feature flags */
+static void x86_cpu_enable_xsave_components(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int i;
+ uint64_t mask;
+
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ env->features[FEAT_XSAVE_COMP_LO] = 0;
+ env->features[FEAT_XSAVE_COMP_HI] = 0;
+ return;
+ }
+
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (env->features[esa->feature] & esa->bits) {
+ mask |= (1ULL << i);
+ }
+ }
+
+ env->features[FEAT_XSAVE_COMP_LO] = mask;
+ env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
+}
+
+/***** Steps involved on loading and filtering CPUID data
+ *
+ * When initializing and realizing a CPU object, the steps
+ * involved in setting up CPUID data are:
+ *
+ * 1) Loading CPU model definition (X86CPUDefinition). This is
+ * implemented by x86_cpu_load_model() and should be completely
+ * transparent, as it is done automatically by instance_init.
+ * No code should need to look at X86CPUDefinition structs
+ * outside instance_init.
+ *
+ * 2) CPU expansion. This is done by realize before CPUID
+ * filtering, and will make sure host/accelerator data is
+ * loaded for CPU models that depend on host capabilities
+ * (e.g. "host"). Done by x86_cpu_expand_features().
+ *
+ * 3) CPUID filtering. This initializes extra data related to
+ * CPUID, and checks if the host supports all capabilities
+ * required by the CPU. Runnability of a CPU model is
+ * determined at this step. Done by x86_cpu_filter_features().
+ *
+ * Some operations don't require all steps to be performed.
+ * More precisely:
+ *
+ * - CPU instance creation (instance_init) will run only CPU
+ * model loading. CPU expansion can't run at instance_init-time
+ * because host/accelerator data may be not available yet.
+ * - CPU realization will perform both CPU model expansion and CPUID
+ * filtering, and return an error in case one of them fails.
+ * - query-cpu-definitions needs to run all 3 steps. It needs
+ * to run CPUID filtering, as the 'unavailable-features'
+ * field is set based on the filtering results.
+ * - The query-cpu-model-expansion QMP command only needs to run
+ * CPU model loading and CPU expansion. It should not filter
+ * any CPUID data based on host capabilities.
+ */
+
+/* Expand CPU configuration data, based on configured features
+ * and host/accelerator capabilities when appropriate.
+ */
+void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWord w;
+ int i;
+ GList *l;
+
+ for (l = plus_features; l; l = l->next) {
+ const char *prop = l->data;
+ if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) {
+ return;
+ }
+ }
+
+ for (l = minus_features; l; l = l->next) {
+ const char *prop = l->data;
+ if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) {
+ return;
+ }
+ }
+
+ /*TODO: Now cpu->max_features doesn't overwrite features
+ * set using QOM properties, and we can convert
+ * plus_features & minus_features to global properties
+ * inside x86_cpu_parse_featurestr() too.
+ */
+ if (cpu->max_features) {
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ /* Override only features that weren't set explicitly
+ * by the user.
+ */
+ env->features[w] |=
+ x86_cpu_get_supported_feature_word(w, cpu->migratable) &
+ ~env->user_features[w] &
+ ~feature_word_info[w].no_autoenable_flags;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
+ FeatureDep *d = &feature_dependencies[i];
+ if (!(env->features[d->from.index] & d->from.mask)) {
+ uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
+
+ /* Not an error unless the dependent feature was added explicitly. */
+ mark_unavailable_features(cpu, d->to.index,
+ unavailable_features & env->user_features[d->to.index],
+ "This feature depends on other features that were not requested");
+
+ env->features[d->to.index] &= ~unavailable_features;
+ }
+ }
+
+ if (!kvm_enabled() || !cpu->expose_kvm) {
+ env->features[FEAT_KVM] = 0;
+ }
+
+ x86_cpu_enable_xsave_components(cpu);
+
+ /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
+ if (cpu->full_cpuid_auto_level) {
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
+ x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
+
+ /* Intel Processor Trace requires CPUID[0x14] */
+ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
+ if (cpu->intel_pt_auto_level) {
+ x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
+ } else if (cpu->env.cpuid_min_level < 0x14) {
+ mark_unavailable_features(cpu, FEAT_7_0_EBX,
+ CPUID_7_0_EBX_INTEL_PT,
+ "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,intel-pt=on,min-level=0x14\"");
+ }
+ }
+
+ /*
+ * Intel CPU topology with multi-dies support requires CPUID[0x1F].
+ * For AMD Rome/Milan, cpuid level is 0x10, and guest OS should detect
+ * extended toplogy by leaf 0xB. Only adjust it for Intel CPU, unless
+ * cpu->vendor_cpuid_only has been unset for compatibility with older
+ * machine types.
+ */
+ if ((env->nr_dies > 1) &&
+ (IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
+ }
+
+ /* SVM requires CPUID[0x8000000A] */
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
+ }
+
+ /* SEV requires CPUID[0x8000001F] */
+ if (sev_enabled()) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
+ }
+
+ /* SGX requires CPUID[0x12] for EPC enumeration */
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12);
+ }
+ }
+
+ /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
+ if (env->cpuid_level_func7 == UINT32_MAX) {
+ env->cpuid_level_func7 = env->cpuid_min_level_func7;
+ }
+ if (env->cpuid_level == UINT32_MAX) {
+ env->cpuid_level = env->cpuid_min_level;
+ }
+ if (env->cpuid_xlevel == UINT32_MAX) {
+ env->cpuid_xlevel = env->cpuid_min_xlevel;
+ }
+ if (env->cpuid_xlevel2 == UINT32_MAX) {
+ env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
+ }
+
+ if (kvm_enabled()) {
+ kvm_hyperv_expand_features(cpu, errp);
+ }
+}
+
+/*
+ * Finishes initialization of CPUID data, filters CPU feature
+ * words based on host availability of each feature.
+ *
+ * Returns: 0 if all flags are supported by the host, non-zero otherwise.
+ */
+static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWord w;
+ const char *prefix = NULL;
+
+ if (verbose) {
+ prefix = accel_uses_host_cpuid()
+ ? "host doesn't support requested feature"
+ : "TCG doesn't support requested feature";
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ uint64_t host_feat =
+ x86_cpu_get_supported_feature_word(w, false);
+ uint64_t requested_features = env->features[w];
+ uint64_t unavailable_features = requested_features & ~host_feat;
+ mark_unavailable_features(cpu, w, unavailable_features, prefix);
+ }
+
+ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
+ kvm_enabled()) {
+ KVMState *s = CPU(cpu)->kvm_state;
+ uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
+ uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
+ uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
+ uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
+ uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
+
+ if (!eax_0 ||
+ ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
+ ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
+ ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
+ ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
+ INTEL_PT_ADDR_RANGES_NUM) ||
+ ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
+ (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
+ ((ecx_0 & CPUID_14_0_ECX_LIP) !=
+ (env->features[FEAT_14_0_ECX] & CPUID_14_0_ECX_LIP))) {
+ /*
+ * Processor Trace capabilities aren't configurable, so if the
+ * host can't emulate the capabilities we report on
+ * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
+ */
+ mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
+ }
+ }
+}
+
+static void x86_cpu_hyperv_realize(X86CPU *cpu)
+{
+ size_t len;
+
+ /* Hyper-V vendor id */
+ if (!cpu->hyperv_vendor) {
+ object_property_set_str(OBJECT(cpu), "hv-vendor-id", "Microsoft Hv",
+ &error_abort);
+ }
+ len = strlen(cpu->hyperv_vendor);
+ if (len > 12) {
+ warn_report("hv-vendor-id truncated to 12 characters");
+ len = 12;
+ }
+ memset(cpu->hyperv_vendor_id, 0, 12);
+ memcpy(cpu->hyperv_vendor_id, cpu->hyperv_vendor, len);
+
+ /* 'Hv#1' interface identification*/
+ cpu->hyperv_interface_id[0] = 0x31237648;
+ cpu->hyperv_interface_id[1] = 0;
+ cpu->hyperv_interface_id[2] = 0;
+ cpu->hyperv_interface_id[3] = 0;
+
+ /* Hypervisor implementation limits */
+ cpu->hyperv_limits[0] = 64;
+ cpu->hyperv_limits[1] = 0;
+ cpu->hyperv_limits[2] = 0;
+}
+
+static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ X86CPU *cpu = X86_CPU(dev);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
+ CPUX86State *env = &cpu->env;
+ Error *local_err = NULL;
+ static bool ht_warned;
+
+ if (cpu->apic_id == UNASSIGNED_APIC_ID) {
+ error_setg(errp, "apic-id property was not initialized properly");
+ return;
+ }
+
+ /*
+ * Process Hyper-V enlightenments.
+ * Note: this currently has to happen before the expansion of CPU features.
+ */
+ x86_cpu_hyperv_realize(cpu);
+
+ x86_cpu_expand_features(cpu, &local_err);
+ if (local_err) {
+ goto out;
+ }
+
+ x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
+
+ if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
+ error_setg(&local_err,
+ accel_uses_host_cpuid() ?
+ "Host doesn't support requested features" :
+ "TCG doesn't support requested features");
+ goto out;
+ }
+
+ /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
+ * CPUID[1].EDX.
+ */
+ if (IS_AMD_CPU(env)) {
+ env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
+ env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
+ & CPUID_EXT2_AMD_ALIASES);
+ }
+
+ x86_cpu_set_sgxlepubkeyhash(env);
+
+ /*
+ * note: the call to the framework needs to happen after feature expansion,
+ * but before the checks/modifications to ucode_rev, mwait, phys_bits.
+ * These may be set by the accel-specific code,
+ * and the results are subsequently checked / assumed in this function.
+ */
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
+ g_autofree char *name = x86_cpu_class_get_model_name(xcc);
+ error_setg(&local_err, "CPU model '%s' requires KVM or HVF", name);
+ goto out;
+ }
+
+ if (cpu->ucode_rev == 0) {
+ /*
+ * The default is the same as KVM's. Note that this check
+ * needs to happen after the evenual setting of ucode_rev in
+ * accel-specific code in cpu_exec_realizefn.
+ */
+ if (IS_AMD_CPU(env)) {
+ cpu->ucode_rev = 0x01000065;
+ } else {
+ cpu->ucode_rev = 0x100000000ULL;
+ }
+ }
+
+ /*
+ * mwait extended info: needed for Core compatibility
+ * We always wake on interrupt even if host does not have the capability.
+ *
+ * requires the accel-specific code in cpu_exec_realizefn to
+ * have already acquired the CPUID data into cpu->mwait.
+ */
+ cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
+
+ /* For 64bit systems think about the number of physical bits to present.
+ * ideally this should be the same as the host; anything other than matching
+ * the host can cause incorrect guest behaviour.
+ * QEMU used to pick the magic value of 40 bits that corresponds to
+ * consumer AMD devices but nothing else.
+ *
+ * Note that this code assumes features expansion has already been done
+ * (as it checks for CPUID_EXT2_LM), and also assumes that potential
+ * phys_bits adjustments to match the host have been already done in
+ * accel-specific code in cpu_exec_realizefn.
+ */
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ if (cpu->phys_bits &&
+ (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
+ cpu->phys_bits < 32)) {
+ error_setg(errp, "phys-bits should be between 32 and %u "
+ " (but is %u)",
+ TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
+ return;
+ }
+ /*
+ * 0 means it was not explicitly set by the user (or by machine
+ * compat_props or by the host code in host-cpu.c).
+ * In this case, the default is the value used by TCG (40).
+ */
+ if (cpu->phys_bits == 0) {
+ cpu->phys_bits = TCG_PHYS_ADDR_BITS;
+ }
+ } else {
+ /* For 32 bit systems don't use the user set value, but keep
+ * phys_bits consistent with what we tell the guest.
+ */
+ if (cpu->phys_bits != 0) {
+ error_setg(errp, "phys-bits is not user-configurable in 32 bit");
+ return;
+ }
+
+ if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
+ cpu->phys_bits = 36;
+ } else {
+ cpu->phys_bits = 32;
+ }
+ }
+
+ /* Cache information initialization */
+ if (!cpu->legacy_cache) {
+ if (!xcc->model || !xcc->model->cpudef->cache_info) {
+ g_autofree char *name = x86_cpu_class_get_model_name(xcc);
+ error_setg(errp,
+ "CPU model '%s' doesn't support legacy-cache=off", name);
+ return;
+ }
+ env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
+ *xcc->model->cpudef->cache_info;
+ } else {
+ /* Build legacy cache information */
+ env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
+ env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
+ env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
+ env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
+
+ env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
+ env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
+ env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
+ env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
+
+ env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
+ env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
+ env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
+ env->cache_info_amd.l3_cache = &legacy_l3_cache;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ MachineState *ms = MACHINE(qdev_get_machine());
+ qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
+
+ if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
+ x86_cpu_apic_create(cpu, &local_err);
+ if (local_err != NULL) {
+ goto out;
+ }
+ }
+#endif
+
+ mce_init(cpu);
+
+ qemu_init_vcpu(cs);
+
+ /*
+ * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
+ * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
+ * based on inputs (sockets,cores,threads), it is still better to give
+ * users a warning.
+ *
+ * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
+ * cs->nr_threads hasn't be populated yet and the checking is incorrect.
+ */
+ if (IS_AMD_CPU(env) &&
+ !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
+ cs->nr_threads > 1 && !ht_warned) {
+ warn_report("This family of AMD CPU doesn't support "
+ "hyperthreading(%d)",
+ cs->nr_threads);
+ error_printf("Please configure -smp options properly"
+ " or try enabling topoext feature.\n");
+ ht_warned = true;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ x86_cpu_apic_realize(cpu, &local_err);
+ if (local_err != NULL) {
+ goto out;
+ }
+#endif /* !CONFIG_USER_ONLY */
+ cpu_reset(cs);
+
+ xcc->parent_realize(dev, &local_err);
+
+out:
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+}
+
+static void x86_cpu_unrealizefn(DeviceState *dev)
+{
+ X86CPU *cpu = X86_CPU(dev);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
+
+#ifndef CONFIG_USER_ONLY
+ cpu_remove_sync(CPU(dev));
+ qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
+#endif
+
+ if (cpu->apic_state) {
+ object_unparent(OBJECT(cpu->apic_state));
+ cpu->apic_state = NULL;
+ }
+
+ xcc->parent_unrealize(dev);
+}
+
+typedef struct BitProperty {
+ FeatureWord w;
+ uint64_t mask;
+} BitProperty;
+
+static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ BitProperty *fp = opaque;
+ uint64_t f = cpu->env.features[fp->w];
+ bool value = (f & fp->mask) == fp->mask;
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ X86CPU *cpu = X86_CPU(obj);
+ BitProperty *fp = opaque;
+ bool value;
+
+ if (dev->realized) {
+ qdev_prop_set_after_realize(dev, name, errp);
+ return;
+ }
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value) {
+ cpu->env.features[fp->w] |= fp->mask;
+ } else {
+ cpu->env.features[fp->w] &= ~fp->mask;
+ }
+ cpu->env.user_features[fp->w] |= fp->mask;
+}
+
+/* Register a boolean property to get/set a single bit in a uint32_t field.
+ *
+ * The same property name can be registered multiple times to make it affect
+ * multiple bits in the same FeatureWord. In that case, the getter will return
+ * true only if all bits are set.
+ */
+static void x86_cpu_register_bit_prop(X86CPUClass *xcc,
+ const char *prop_name,
+ FeatureWord w,
+ int bitnr)
+{
+ ObjectClass *oc = OBJECT_CLASS(xcc);
+ BitProperty *fp;
+ ObjectProperty *op;
+ uint64_t mask = (1ULL << bitnr);
+
+ op = object_class_property_find(oc, prop_name);
+ if (op) {
+ fp = op->opaque;
+ assert(fp->w == w);
+ fp->mask |= mask;
+ } else {
+ fp = g_new0(BitProperty, 1);
+ fp->w = w;
+ fp->mask = mask;
+ object_class_property_add(oc, prop_name, "bool",
+ x86_cpu_get_bit_prop,
+ x86_cpu_set_bit_prop,
+ NULL, fp);
+ }
+}
+
+static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc,
+ FeatureWord w,
+ int bitnr)
+{
+ FeatureWordInfo *fi = &feature_word_info[w];
+ const char *name = fi->feat_names[bitnr];
+
+ if (!name) {
+ return;
+ }
+
+ /* Property names should use "-" instead of "_".
+ * Old names containing underscores are registered as aliases
+ * using object_property_add_alias()
+ */
+ assert(!strchr(name, '_'));
+ /* aliases don't use "|" delimiters anymore, they are registered
+ * manually using object_property_add_alias() */
+ assert(!strchr(name, '|'));
+ x86_cpu_register_bit_prop(xcc, name, w, bitnr);
+}
+
+static void x86_cpu_post_initfn(Object *obj)
+{
+ accel_cpu_instance_init(CPU(obj));
+}
+
+static void x86_cpu_initfn(Object *obj)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
+ CPUX86State *env = &cpu->env;
+
+ env->nr_dies = 1;
+ cpu_set_cpustate_pointers(cpu);
+
+ object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
+ x86_cpu_get_feature_words,
+ NULL, NULL, (void *)env->features);
+ object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
+ x86_cpu_get_feature_words,
+ NULL, NULL, (void *)cpu->filtered_features);
+
+ object_property_add_alias(obj, "sse3", obj, "pni");
+ object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq");
+ object_property_add_alias(obj, "sse4-1", obj, "sse4.1");
+ object_property_add_alias(obj, "sse4-2", obj, "sse4.2");
+ object_property_add_alias(obj, "xd", obj, "nx");
+ object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt");
+ object_property_add_alias(obj, "i64", obj, "lm");
+
+ object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl");
+ object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust");
+ object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt");
+ object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm");
+ object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy");
+ object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr");
+ object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core");
+ object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb");
+ object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay");
+ object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu");
+ object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf");
+ object_property_add_alias(obj, "kvm_asyncpf_int", obj, "kvm-asyncpf-int");
+ object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time");
+ object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi");
+ object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt");
+ object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control");
+ object_property_add_alias(obj, "svm_lock", obj, "svm-lock");
+ object_property_add_alias(obj, "nrip_save", obj, "nrip-save");
+ object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale");
+ object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean");
+ object_property_add_alias(obj, "pause_filter", obj, "pause-filter");
+ object_property_add_alias(obj, "sse4_1", obj, "sse4.1");
+ object_property_add_alias(obj, "sse4_2", obj, "sse4.2");
+
+ object_property_add_alias(obj, "hv-apicv", obj, "hv-avic");
+
+ if (xcc->model) {
+ x86_cpu_load_model(cpu, xcc->model);
+ }
+}
+
+static int64_t x86_cpu_get_arch_id(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu->apic_id;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static bool x86_cpu_get_paging_enabled(const CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu->env.cr[0] & CR0_PG_MASK;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static void x86_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ cpu->env.eip = value;
+}
+
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ return CPU_INTERRUPT_POLL;
+ }
+#endif
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
+ return CPU_INTERRUPT_SIPI;
+ }
+
+ if (env->hflags2 & HF2_GIF_MASK) {
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ return CPU_INTERRUPT_SMI;
+ } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(env->hflags2 & HF2_NMI_MASK)) {
+ return CPU_INTERRUPT_NMI;
+ } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+ return CPU_INTERRUPT_MCE;
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
+ (env->hflags2 & HF2_HIF_MASK)) ||
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
+ return CPU_INTERRUPT_HARD;
+#if !defined(CONFIG_USER_ONLY)
+ } else if (env->hflags2 & HF2_VGIF_MASK) {
+ if((interrupt_request & CPU_INTERRUPT_VIRQ) &&
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
+ return CPU_INTERRUPT_VIRQ;
+ }
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static bool x86_cpu_has_work(CPUState *cs)
+{
+ return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
+}
+
+static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
+ : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
+ : bfd_mach_i386_i8086);
+ info->print_insn = print_insn_i386;
+
+ info->cap_arch = CS_ARCH_X86;
+ info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
+ : env->hflags & HF_CS32_MASK ? CS_MODE_32
+ : CS_MODE_16);
+ info->cap_insn_unit = 1;
+ info->cap_insn_split = 8;
+}
+
+void x86_update_hflags(CPUX86State *env)
+{
+ uint32_t hflags;
+#define HFLAG_COPY_MASK \
+ ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
+ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
+ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
+ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
+
+ hflags = env->hflags & HFLAG_COPY_MASK;
+ hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
+ hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
+ (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
+ hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
+
+ if (env->efer & MSR_EFER_LMA) {
+ hflags |= HF_LMA_MASK;
+ }
+
+ if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
+ hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ } else {
+ hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_CS32_SHIFT);
+ hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
+ !(hflags & HF_CS32_MASK)) {
+ hflags |= HF_ADDSEG_MASK;
+ } else {
+ hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
+ }
+ }
+ env->hflags = hflags;
+}
+
+static Property x86_cpu_properties[] = {
+#ifdef CONFIG_USER_ONLY
+ /* apic_id = 0 by default for *-user, see commit 9886e834 */
+ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
+ DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
+ DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
+ DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
+ DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
+#else
+ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
+ DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
+ DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
+ DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
+ DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
+#endif
+ DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
+ DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
+
+ DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
+ HYPERV_SPINLOCK_NEVER_NOTIFY),
+ DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
+ HYPERV_FEAT_RELAXED, 0),
+ DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
+ HYPERV_FEAT_VAPIC, 0),
+ DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
+ HYPERV_FEAT_TIME, 0),
+ DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
+ HYPERV_FEAT_CRASH, 0),
+ DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
+ HYPERV_FEAT_RESET, 0),
+ DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
+ HYPERV_FEAT_VPINDEX, 0),
+ DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
+ HYPERV_FEAT_RUNTIME, 0),
+ DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
+ HYPERV_FEAT_SYNIC, 0),
+ DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
+ HYPERV_FEAT_STIMER, 0),
+ DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
+ HYPERV_FEAT_FREQUENCIES, 0),
+ DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
+ HYPERV_FEAT_REENLIGHTENMENT, 0),
+ DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
+ HYPERV_FEAT_TLBFLUSH, 0),
+ DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
+ HYPERV_FEAT_EVMCS, 0),
+ DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
+ HYPERV_FEAT_IPI, 0),
+ DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
+ HYPERV_FEAT_STIMER_DIRECT, 0),
+ DEFINE_PROP_BIT64("hv-avic", X86CPU, hyperv_features,
+ HYPERV_FEAT_AVIC, 0),
+ DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
+ hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
+ DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
+ DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false),
+
+ /* WS2008R2 identify by default */
+ DEFINE_PROP_UINT32("hv-version-id-build", X86CPU, hyperv_ver_id_build,
+ 0x3839),
+ DEFINE_PROP_UINT16("hv-version-id-major", X86CPU, hyperv_ver_id_major,
+ 0x000A),
+ DEFINE_PROP_UINT16("hv-version-id-minor", X86CPU, hyperv_ver_id_minor,
+ 0x0000),
+ DEFINE_PROP_UINT32("hv-version-id-spack", X86CPU, hyperv_ver_id_sp, 0),
+ DEFINE_PROP_UINT8("hv-version-id-sbranch", X86CPU, hyperv_ver_id_sb, 0),
+ DEFINE_PROP_UINT32("hv-version-id-snumber", X86CPU, hyperv_ver_id_sn, 0),
+
+ DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
+ DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
+ DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
+ DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
+ DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
+ DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
+ DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
+ DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
+ DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
+ UINT32_MAX),
+ DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
+ DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
+ DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
+ DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+ DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
+ DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
+ DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor),
+ DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
+ DEFINE_PROP_BOOL("x-vendor-cpuid-only", X86CPU, vendor_cpuid_only, true),
+ DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
+ DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
+ DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
+ false),
+ DEFINE_PROP_BOOL("kvm-pv-enforce-cpuid", X86CPU, kvm_pv_enforce_cpuid,
+ false),
+ DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
+ DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
+ DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
+ true),
+ /*
+ * lecacy_cache defaults to true unless the CPU model provides its
+ * own cache information (see x86_cpu_load_def()).
+ */
+ DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
+
+ /*
+ * From "Requirements for Implementing the Microsoft
+ * Hypervisor Interface":
+ * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
+ *
+ * "Starting with Windows Server 2012 and Windows 8, if
+ * CPUID.40000005.EAX contains a value of -1, Windows assumes that
+ * the hypervisor imposes no specific limit to the number of VPs.
+ * In this case, Windows Server 2012 guest VMs may use more than
+ * 64 VPs, up to the maximum supported number of processors applicable
+ * to the specific Windows version being used."
+ */
+ DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
+ DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
+ false),
+ DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
+ true),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps i386_sysemu_ops = {
+ .get_memory_mapping = x86_cpu_get_memory_mapping,
+ .get_paging_enabled = x86_cpu_get_paging_enabled,
+ .get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
+ .asidx_from_attrs = x86_asidx_from_attrs,
+ .get_crash_info = x86_cpu_get_crash_info,
+ .write_elf32_note = x86_cpu_write_elf32_note,
+ .write_elf64_note = x86_cpu_write_elf64_note,
+ .write_elf32_qemunote = x86_cpu_write_elf32_qemunote,
+ .write_elf64_qemunote = x86_cpu_write_elf64_qemunote,
+ .legacy_vmsd = &vmstate_x86_cpu,
+};
+#endif
+
+static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ FeatureWord w;
+
+ device_class_set_parent_realize(dc, x86_cpu_realizefn,
+ &xcc->parent_realize);
+ device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
+ &xcc->parent_unrealize);
+ device_class_set_props(dc, x86_cpu_properties);
+
+ device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset);
+ cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
+
+ cc->class_by_name = x86_cpu_class_by_name;
+ cc->parse_features = x86_cpu_parse_featurestr;
+ cc->has_work = x86_cpu_has_work;
+ cc->dump_state = x86_cpu_dump_state;
+ cc->set_pc = x86_cpu_set_pc;
+ cc->gdb_read_register = x86_cpu_gdb_read_register;
+ cc->gdb_write_register = x86_cpu_gdb_write_register;
+ cc->get_arch_id = x86_cpu_get_arch_id;
+
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &i386_sysemu_ops;
+#endif /* !CONFIG_USER_ONLY */
+
+ cc->gdb_arch_name = x86_gdb_arch_name;
+#ifdef TARGET_X86_64
+ cc->gdb_core_xml_file = "i386-64bit.xml";
+ cc->gdb_num_core_regs = 66;
+#else
+ cc->gdb_core_xml_file = "i386-32bit.xml";
+ cc->gdb_num_core_regs = 50;
+#endif
+ cc->disas_set_info = x86_disas_set_info;
+
+ dc->user_creatable = true;
+
+ object_class_property_add(oc, "family", "int",
+ x86_cpuid_version_get_family,
+ x86_cpuid_version_set_family, NULL, NULL);
+ object_class_property_add(oc, "model", "int",
+ x86_cpuid_version_get_model,
+ x86_cpuid_version_set_model, NULL, NULL);
+ object_class_property_add(oc, "stepping", "int",
+ x86_cpuid_version_get_stepping,
+ x86_cpuid_version_set_stepping, NULL, NULL);
+ object_class_property_add_str(oc, "vendor",
+ x86_cpuid_get_vendor,
+ x86_cpuid_set_vendor);
+ object_class_property_add_str(oc, "model-id",
+ x86_cpuid_get_model_id,
+ x86_cpuid_set_model_id);
+ object_class_property_add(oc, "tsc-frequency", "int",
+ x86_cpuid_get_tsc_freq,
+ x86_cpuid_set_tsc_freq, NULL, NULL);
+ /*
+ * The "unavailable-features" property has the same semantics as
+ * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
+ * QMP command: they list the features that would have prevented the
+ * CPU from running if the "enforce" flag was set.
+ */
+ object_class_property_add(oc, "unavailable-features", "strList",
+ x86_cpu_get_unavailable_features,
+ NULL, NULL, NULL);
+
+#if !defined(CONFIG_USER_ONLY)
+ object_class_property_add(oc, "crash-information", "GuestPanicInformation",
+ x86_cpu_get_crash_info_qom, NULL, NULL, NULL);
+#endif
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ int bitnr;
+ for (bitnr = 0; bitnr < 64; bitnr++) {
+ x86_cpu_register_feature_bit_props(xcc, w, bitnr);
+ }
+ }
+}
+
+static const TypeInfo x86_cpu_type_info = {
+ .name = TYPE_X86_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(X86CPU),
+ .instance_init = x86_cpu_initfn,
+ .instance_post_init = x86_cpu_post_initfn,
+
+ .abstract = true,
+ .class_size = sizeof(X86CPUClass),
+ .class_init = x86_cpu_common_class_init,
+};
+
+/* "base" CPU model, used by query-cpu-model-expansion */
+static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+
+ xcc->static_model = true;
+ xcc->migration_safe = true;
+ xcc->model_description = "base CPU model type with no features enabled";
+ xcc->ordering = 8;
+}
+
+static const TypeInfo x86_base_cpu_type_info = {
+ .name = X86_CPU_TYPE_NAME("base"),
+ .parent = TYPE_X86_CPU,
+ .class_init = x86_cpu_base_class_init,
+};
+
+static void x86_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&x86_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
+ x86_register_cpudef_types(&builtin_x86_defs[i]);
+ }
+ type_register_static(&max_x86_cpu_type_info);
+ type_register_static(&x86_base_cpu_type_info);
+}
+
+type_init(x86_cpu_register_types)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
new file mode 100644
index 000000000..04f2b790c
--- /dev/null
+++ b/target/i386/cpu.h
@@ -0,0 +1,2298 @@
+/*
+ * i386 virtual CPU header
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef I386_CPU_H
+#define I386_CPU_H
+
+#include "sysemu/tcg.h"
+#include "cpu-qom.h"
+#include "kvm/hyperv-proto.h"
+#include "exec/cpu-defs.h"
+#include "qapi/qapi-types-common.h"
+
+/* The x86 has a strong memory model with some store-after-load re-ordering */
+#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+
+#define KVM_HAVE_MCE_INJECTION 1
+
+/* support for self modifying code even if the modified instruction is
+ close to the modifying instruction */
+#define TARGET_HAS_PRECISE_SMC
+
+#ifdef TARGET_X86_64
+#define I386_ELF_MACHINE EM_X86_64
+#define ELF_MACHINE_UNAME "x86_64"
+#else
+#define I386_ELF_MACHINE EM_386
+#define ELF_MACHINE_UNAME "i686"
+#endif
+
+enum {
+ R_EAX = 0,
+ R_ECX = 1,
+ R_EDX = 2,
+ R_EBX = 3,
+ R_ESP = 4,
+ R_EBP = 5,
+ R_ESI = 6,
+ R_EDI = 7,
+ R_R8 = 8,
+ R_R9 = 9,
+ R_R10 = 10,
+ R_R11 = 11,
+ R_R12 = 12,
+ R_R13 = 13,
+ R_R14 = 14,
+ R_R15 = 15,
+
+ R_AL = 0,
+ R_CL = 1,
+ R_DL = 2,
+ R_BL = 3,
+ R_AH = 4,
+ R_CH = 5,
+ R_DH = 6,
+ R_BH = 7,
+};
+
+typedef enum X86Seg {
+ R_ES = 0,
+ R_CS = 1,
+ R_SS = 2,
+ R_DS = 3,
+ R_FS = 4,
+ R_GS = 5,
+ R_LDTR = 6,
+ R_TR = 7,
+} X86Seg;
+
+/* segment descriptor fields */
+#define DESC_G_SHIFT 23
+#define DESC_G_MASK (1 << DESC_G_SHIFT)
+#define DESC_B_SHIFT 22
+#define DESC_B_MASK (1 << DESC_B_SHIFT)
+#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
+#define DESC_L_MASK (1 << DESC_L_SHIFT)
+#define DESC_AVL_SHIFT 20
+#define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
+#define DESC_P_SHIFT 15
+#define DESC_P_MASK (1 << DESC_P_SHIFT)
+#define DESC_DPL_SHIFT 13
+#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
+#define DESC_S_SHIFT 12
+#define DESC_S_MASK (1 << DESC_S_SHIFT)
+#define DESC_TYPE_SHIFT 8
+#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
+#define DESC_A_MASK (1 << 8)
+
+#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
+#define DESC_C_MASK (1 << 10) /* code: conforming */
+#define DESC_R_MASK (1 << 9) /* code: readable */
+
+#define DESC_E_MASK (1 << 10) /* data: expansion direction */
+#define DESC_W_MASK (1 << 9) /* data: writable */
+
+#define DESC_TSS_BUSY_MASK (1 << 9)
+
+/* eflags masks */
+#define CC_C 0x0001
+#define CC_P 0x0004
+#define CC_A 0x0010
+#define CC_Z 0x0040
+#define CC_S 0x0080
+#define CC_O 0x0800
+
+#define TF_SHIFT 8
+#define IOPL_SHIFT 12
+#define VM_SHIFT 17
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define DF_MASK 0x00000400
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define RF_MASK 0x00010000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000
+#define VIP_MASK 0x00100000
+#define ID_MASK 0x00200000
+
+/* hidden flags - used internally by qemu to represent additional cpu
+ states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
+ avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
+ positions to ease oring with eflags. */
+/* current cpl */
+#define HF_CPL_SHIFT 0
+/* true if hardware interrupts must be disabled for next instruction */
+#define HF_INHIBIT_IRQ_SHIFT 3
+/* 16 or 32 segments */
+#define HF_CS32_SHIFT 4
+#define HF_SS32_SHIFT 5
+/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
+#define HF_ADDSEG_SHIFT 6
+/* copy of CR0.PE (protected mode) */
+#define HF_PE_SHIFT 7
+#define HF_TF_SHIFT 8 /* must be same as eflags */
+#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
+#define HF_EM_SHIFT 10
+#define HF_TS_SHIFT 11
+#define HF_IOPL_SHIFT 12 /* must be same as eflags */
+#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
+#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
+#define HF_RF_SHIFT 16 /* must be same as eflags */
+#define HF_VM_SHIFT 17 /* must be same as eflags */
+#define HF_AC_SHIFT 18 /* must be same as eflags */
+#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
+#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
+#define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
+#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
+#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
+#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
+#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
+
+#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
+#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
+#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
+#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
+#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
+#define HF_PE_MASK (1 << HF_PE_SHIFT)
+#define HF_TF_MASK (1 << HF_TF_SHIFT)
+#define HF_MP_MASK (1 << HF_MP_SHIFT)
+#define HF_EM_MASK (1 << HF_EM_SHIFT)
+#define HF_TS_MASK (1 << HF_TS_SHIFT)
+#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
+#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
+#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
+#define HF_RF_MASK (1 << HF_RF_SHIFT)
+#define HF_VM_MASK (1 << HF_VM_SHIFT)
+#define HF_AC_MASK (1 << HF_AC_SHIFT)
+#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
+#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
+#define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
+#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
+#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
+#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
+#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
+
+/* hflags2 */
+
+#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
+#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
+#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
+#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
+#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
+#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
+#define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
+#define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
+#define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/
+
+#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
+#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
+#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
+#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
+#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
+#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
+#define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
+#define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
+#define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT)
+
+#define CR0_PE_SHIFT 0
+#define CR0_MP_SHIFT 1
+
+#define CR0_PE_MASK (1U << 0)
+#define CR0_MP_MASK (1U << 1)
+#define CR0_EM_MASK (1U << 2)
+#define CR0_TS_MASK (1U << 3)
+#define CR0_ET_MASK (1U << 4)
+#define CR0_NE_MASK (1U << 5)
+#define CR0_WP_MASK (1U << 16)
+#define CR0_AM_MASK (1U << 18)
+#define CR0_NW_MASK (1U << 29)
+#define CR0_CD_MASK (1U << 30)
+#define CR0_PG_MASK (1U << 31)
+
+#define CR4_VME_MASK (1U << 0)
+#define CR4_PVI_MASK (1U << 1)
+#define CR4_TSD_MASK (1U << 2)
+#define CR4_DE_MASK (1U << 3)
+#define CR4_PSE_MASK (1U << 4)
+#define CR4_PAE_MASK (1U << 5)
+#define CR4_MCE_MASK (1U << 6)
+#define CR4_PGE_MASK (1U << 7)
+#define CR4_PCE_MASK (1U << 8)
+#define CR4_OSFXSR_SHIFT 9
+#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
+#define CR4_OSXMMEXCPT_MASK (1U << 10)
+#define CR4_UMIP_MASK (1U << 11)
+#define CR4_LA57_MASK (1U << 12)
+#define CR4_VMXE_MASK (1U << 13)
+#define CR4_SMXE_MASK (1U << 14)
+#define CR4_FSGSBASE_MASK (1U << 16)
+#define CR4_PCIDE_MASK (1U << 17)
+#define CR4_OSXSAVE_MASK (1U << 18)
+#define CR4_SMEP_MASK (1U << 20)
+#define CR4_SMAP_MASK (1U << 21)
+#define CR4_PKE_MASK (1U << 22)
+#define CR4_PKS_MASK (1U << 24)
+
+#define CR4_RESERVED_MASK \
+(~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
+ | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
+ | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
+ | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK |CR4_UMIP_MASK \
+ | CR4_LA57_MASK \
+ | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
+ | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK))
+
+#define DR6_BD (1 << 13)
+#define DR6_BS (1 << 14)
+#define DR6_BT (1 << 15)
+#define DR6_FIXED_1 0xffff0ff0
+
+#define DR7_GD (1 << 13)
+#define DR7_TYPE_SHIFT 16
+#define DR7_LEN_SHIFT 18
+#define DR7_FIXED_1 0x00000400
+#define DR7_GLOBAL_BP_MASK 0xaa
+#define DR7_LOCAL_BP_MASK 0x55
+#define DR7_MAX_BP 4
+#define DR7_TYPE_BP_INST 0x0
+#define DR7_TYPE_DATA_WR 0x1
+#define DR7_TYPE_IO_RW 0x2
+#define DR7_TYPE_DATA_RW 0x3
+
+#define DR_RESERVED_MASK 0xffffffff00000000ULL
+
+#define PG_PRESENT_BIT 0
+#define PG_RW_BIT 1
+#define PG_USER_BIT 2
+#define PG_PWT_BIT 3
+#define PG_PCD_BIT 4
+#define PG_ACCESSED_BIT 5
+#define PG_DIRTY_BIT 6
+#define PG_PSE_BIT 7
+#define PG_GLOBAL_BIT 8
+#define PG_PSE_PAT_BIT 12
+#define PG_PKRU_BIT 59
+#define PG_NX_BIT 63
+
+#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
+#define PG_RW_MASK (1 << PG_RW_BIT)
+#define PG_USER_MASK (1 << PG_USER_BIT)
+#define PG_PWT_MASK (1 << PG_PWT_BIT)
+#define PG_PCD_MASK (1 << PG_PCD_BIT)
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
+#define PG_PSE_MASK (1 << PG_PSE_BIT)
+#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
+#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
+#define PG_ADDRESS_MASK 0x000ffffffffff000LL
+#define PG_HI_USER_MASK 0x7ff0000000000000LL
+#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
+#define PG_NX_MASK (1ULL << PG_NX_BIT)
+
+#define PG_ERROR_W_BIT 1
+
+#define PG_ERROR_P_MASK 0x01
+#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
+#define PG_ERROR_U_MASK 0x04
+#define PG_ERROR_RSVD_MASK 0x08
+#define PG_ERROR_I_D_MASK 0x10
+#define PG_ERROR_PK_MASK 0x20
+
+#define PG_MODE_PAE (1 << 0)
+#define PG_MODE_LMA (1 << 1)
+#define PG_MODE_NXE (1 << 2)
+#define PG_MODE_PSE (1 << 3)
+#define PG_MODE_LA57 (1 << 4)
+#define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15)
+
+/* Bits of CR4 that do not affect the NPT page format. */
+#define PG_MODE_WP (1 << 16)
+#define PG_MODE_PKE (1 << 17)
+#define PG_MODE_PKS (1 << 18)
+#define PG_MODE_SMEP (1 << 19)
+
+#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
+
+#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
+#define MCE_BANKS_DEF 10
+
+#define MCG_CAP_BANKS_MASK 0xff
+
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+#define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
+
+#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
+
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
+#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
+#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+
+/* MISC register defines */
+#define MCM_ADDR_SEGOFF 0 /* segment offset */
+#define MCM_ADDR_LINEAR 1 /* linear address */
+#define MCM_ADDR_PHYS 2 /* physical address */
+#define MCM_ADDR_MEM 3 /* memory address */
+#define MCM_ADDR_GENERIC 7 /* generic */
+
+#define MSR_IA32_TSC 0x10
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_EXTD (1 << 10)
+#define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+#define MSR_TSC_ADJUST 0x0000003b
+#define MSR_IA32_SPEC_CTRL 0x48
+#define MSR_VIRT_SSBD 0xc001011f
+#define MSR_IA32_PRED_CMD 0x49
+#define MSR_IA32_UCODE_REV 0x8b
+#define MSR_IA32_CORE_CAPABILITY 0xcf
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x10a
+#define ARCH_CAP_TSX_CTRL_MSR (1<<7)
+
+#define MSR_IA32_PERF_CAPABILITIES 0x345
+
+#define MSR_IA32_TSX_CTRL 0x122
+#define MSR_IA32_TSCDEADLINE 0x6e0
+#define MSR_IA32_PKRS 0x6e1
+
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+#define FEATURE_CONTROL_SGX_LC (1ULL << 17)
+#define FEATURE_CONTROL_SGX (1ULL << 18)
+#define FEATURE_CONTROL_LMCE (1<<20)
+
+#define MSR_IA32_SGXLEPUBKEYHASH0 0x8c
+#define MSR_IA32_SGXLEPUBKEYHASH1 0x8d
+#define MSR_IA32_SGXLEPUBKEYHASH2 0x8e
+#define MSR_IA32_SGXLEPUBKEYHASH3 0x8f
+
+#define MSR_P6_PERFCTR0 0xc1
+
+#define MSR_IA32_SMBASE 0x9e
+#define MSR_SMI_COUNT 0x34
+#define MSR_CORE_THREAD_COUNT 0x35
+#define MSR_MTRRcap 0xfe
+#define MSR_MTRRcap_VCNT 8
+#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
+#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
+
+#define MSR_IA32_SYSENTER_CS 0x174
+#define MSR_IA32_SYSENTER_ESP 0x175
+#define MSR_IA32_SYSENTER_EIP 0x176
+
+#define MSR_MCG_CAP 0x179
+#define MSR_MCG_STATUS 0x17a
+#define MSR_MCG_CTL 0x17b
+#define MSR_MCG_EXT_CTL 0x4d0
+
+#define MSR_P6_EVNTSEL0 0x186
+
+#define MSR_IA32_PERF_STATUS 0x198
+
+#define MSR_IA32_MISC_ENABLE 0x1a0
+/* Indicates good rep/movs microcode on some processors: */
+#define MSR_IA32_MISC_ENABLE_DEFAULT 1
+#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
+
+#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
+#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+
+#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
+
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix16K_A0000 0x259
+#define MSR_MTRRfix4K_C0000 0x268
+#define MSR_MTRRfix4K_C8000 0x269
+#define MSR_MTRRfix4K_D0000 0x26a
+#define MSR_MTRRfix4K_D8000 0x26b
+#define MSR_MTRRfix4K_E0000 0x26c
+#define MSR_MTRRfix4K_E8000 0x26d
+#define MSR_MTRRfix4K_F0000 0x26e
+#define MSR_MTRRfix4K_F8000 0x26f
+
+#define MSR_PAT 0x277
+
+#define MSR_MTRRdefType 0x2ff
+
+#define MSR_CORE_PERF_FIXED_CTR0 0x309
+#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+
+#define MSR_MC0_CTL 0x400
+#define MSR_MC0_STATUS 0x401
+#define MSR_MC0_ADDR 0x402
+#define MSR_MC0_MISC 0x403
+
+#define MSR_IA32_RTIT_OUTPUT_BASE 0x560
+#define MSR_IA32_RTIT_OUTPUT_MASK 0x561
+#define MSR_IA32_RTIT_CTL 0x570
+#define MSR_IA32_RTIT_STATUS 0x571
+#define MSR_IA32_RTIT_CR3_MATCH 0x572
+#define MSR_IA32_RTIT_ADDR0_A 0x580
+#define MSR_IA32_RTIT_ADDR0_B 0x581
+#define MSR_IA32_RTIT_ADDR1_A 0x582
+#define MSR_IA32_RTIT_ADDR1_B 0x583
+#define MSR_IA32_RTIT_ADDR2_A 0x584
+#define MSR_IA32_RTIT_ADDR2_B 0x585
+#define MSR_IA32_RTIT_ADDR3_A 0x586
+#define MSR_IA32_RTIT_ADDR3_B 0x587
+#define MAX_RTIT_ADDRS 8
+
+#define MSR_EFER 0xc0000080
+
+#define MSR_EFER_SCE (1 << 0)
+#define MSR_EFER_LME (1 << 8)
+#define MSR_EFER_LMA (1 << 10)
+#define MSR_EFER_NXE (1 << 11)
+#define MSR_EFER_SVME (1 << 12)
+#define MSR_EFER_FFXSR (1 << 14)
+
+#define MSR_EFER_RESERVED\
+ (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
+ | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
+ | MSR_EFER_FFXSR))
+
+#define MSR_STAR 0xc0000081
+#define MSR_LSTAR 0xc0000082
+#define MSR_CSTAR 0xc0000083
+#define MSR_FMASK 0xc0000084
+#define MSR_FSBASE 0xc0000100
+#define MSR_GSBASE 0xc0000101
+#define MSR_KERNELGSBASE 0xc0000102
+#define MSR_TSC_AUX 0xc0000103
+#define MSR_AMD64_TSC_RATIO 0xc0000104
+
+#define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
+
+#define MSR_VM_HSAVE_PA 0xc0010117
+
+#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_XSS 0x00000da0
+#define MSR_IA32_UMWAIT_CONTROL 0xe1
+
+#define MSR_IA32_VMX_BASIC 0x00000480
+#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
+#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
+#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
+#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
+#define MSR_IA32_VMX_MISC 0x00000485
+#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
+#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
+#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
+#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
+#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
+#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
+#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
+#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
+#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
+#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
+#define MSR_IA32_VMX_VMFUNC 0x00000491
+
+#define XSTATE_FP_BIT 0
+#define XSTATE_SSE_BIT 1
+#define XSTATE_YMM_BIT 2
+#define XSTATE_BNDREGS_BIT 3
+#define XSTATE_BNDCSR_BIT 4
+#define XSTATE_OPMASK_BIT 5
+#define XSTATE_ZMM_Hi256_BIT 6
+#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PKRU_BIT 9
+
+#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
+#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
+#define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
+#define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
+#define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
+#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
+#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
+#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
+
+/* CPUID feature words */
+typedef enum FeatureWord {
+ FEAT_1_EDX, /* CPUID[1].EDX */
+ FEAT_1_ECX, /* CPUID[1].ECX */
+ FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
+ FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
+ FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
+ FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
+ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
+ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
+ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
+ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
+ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
+ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
+ FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
+ FEAT_SVM, /* CPUID[8000_000A].EDX */
+ FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
+ FEAT_6_EAX, /* CPUID[6].EAX */
+ FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+ FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
+ FEAT_ARCH_CAPABILITIES,
+ FEAT_CORE_CAPABILITY,
+ FEAT_PERF_CAPABILITIES,
+ FEAT_VMX_PROCBASED_CTLS,
+ FEAT_VMX_SECONDARY_CTLS,
+ FEAT_VMX_PINBASED_CTLS,
+ FEAT_VMX_EXIT_CTLS,
+ FEAT_VMX_ENTRY_CTLS,
+ FEAT_VMX_MISC,
+ FEAT_VMX_EPT_VPID_CAPS,
+ FEAT_VMX_BASIC,
+ FEAT_VMX_VMFUNC,
+ FEAT_14_0_ECX,
+ FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */
+ FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */
+ FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
+ FEATURE_WORDS,
+} FeatureWord;
+
+typedef uint64_t FeatureWordArray[FEATURE_WORDS];
+
+/* cpuid_features bits */
+#define CPUID_FP87 (1U << 0)
+#define CPUID_VME (1U << 1)
+#define CPUID_DE (1U << 2)
+#define CPUID_PSE (1U << 3)
+#define CPUID_TSC (1U << 4)
+#define CPUID_MSR (1U << 5)
+#define CPUID_PAE (1U << 6)
+#define CPUID_MCE (1U << 7)
+#define CPUID_CX8 (1U << 8)
+#define CPUID_APIC (1U << 9)
+#define CPUID_SEP (1U << 11) /* sysenter/sysexit */
+#define CPUID_MTRR (1U << 12)
+#define CPUID_PGE (1U << 13)
+#define CPUID_MCA (1U << 14)
+#define CPUID_CMOV (1U << 15)
+#define CPUID_PAT (1U << 16)
+#define CPUID_PSE36 (1U << 17)
+#define CPUID_PN (1U << 18)
+#define CPUID_CLFLUSH (1U << 19)
+#define CPUID_DTS (1U << 21)
+#define CPUID_ACPI (1U << 22)
+#define CPUID_MMX (1U << 23)
+#define CPUID_FXSR (1U << 24)
+#define CPUID_SSE (1U << 25)
+#define CPUID_SSE2 (1U << 26)
+#define CPUID_SS (1U << 27)
+#define CPUID_HT (1U << 28)
+#define CPUID_TM (1U << 29)
+#define CPUID_IA64 (1U << 30)
+#define CPUID_PBE (1U << 31)
+
+#define CPUID_EXT_SSE3 (1U << 0)
+#define CPUID_EXT_PCLMULQDQ (1U << 1)
+#define CPUID_EXT_DTES64 (1U << 2)
+#define CPUID_EXT_MONITOR (1U << 3)
+#define CPUID_EXT_DSCPL (1U << 4)
+#define CPUID_EXT_VMX (1U << 5)
+#define CPUID_EXT_SMX (1U << 6)
+#define CPUID_EXT_EST (1U << 7)
+#define CPUID_EXT_TM2 (1U << 8)
+#define CPUID_EXT_SSSE3 (1U << 9)
+#define CPUID_EXT_CID (1U << 10)
+#define CPUID_EXT_FMA (1U << 12)
+#define CPUID_EXT_CX16 (1U << 13)
+#define CPUID_EXT_XTPR (1U << 14)
+#define CPUID_EXT_PDCM (1U << 15)
+#define CPUID_EXT_PCID (1U << 17)
+#define CPUID_EXT_DCA (1U << 18)
+#define CPUID_EXT_SSE41 (1U << 19)
+#define CPUID_EXT_SSE42 (1U << 20)
+#define CPUID_EXT_X2APIC (1U << 21)
+#define CPUID_EXT_MOVBE (1U << 22)
+#define CPUID_EXT_POPCNT (1U << 23)
+#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
+#define CPUID_EXT_AES (1U << 25)
+#define CPUID_EXT_XSAVE (1U << 26)
+#define CPUID_EXT_OSXSAVE (1U << 27)
+#define CPUID_EXT_AVX (1U << 28)
+#define CPUID_EXT_F16C (1U << 29)
+#define CPUID_EXT_RDRAND (1U << 30)
+#define CPUID_EXT_HYPERVISOR (1U << 31)
+
+#define CPUID_EXT2_FPU (1U << 0)
+#define CPUID_EXT2_VME (1U << 1)
+#define CPUID_EXT2_DE (1U << 2)
+#define CPUID_EXT2_PSE (1U << 3)
+#define CPUID_EXT2_TSC (1U << 4)
+#define CPUID_EXT2_MSR (1U << 5)
+#define CPUID_EXT2_PAE (1U << 6)
+#define CPUID_EXT2_MCE (1U << 7)
+#define CPUID_EXT2_CX8 (1U << 8)
+#define CPUID_EXT2_APIC (1U << 9)
+#define CPUID_EXT2_SYSCALL (1U << 11)
+#define CPUID_EXT2_MTRR (1U << 12)
+#define CPUID_EXT2_PGE (1U << 13)
+#define CPUID_EXT2_MCA (1U << 14)
+#define CPUID_EXT2_CMOV (1U << 15)
+#define CPUID_EXT2_PAT (1U << 16)
+#define CPUID_EXT2_PSE36 (1U << 17)
+#define CPUID_EXT2_MP (1U << 19)
+#define CPUID_EXT2_NX (1U << 20)
+#define CPUID_EXT2_MMXEXT (1U << 22)
+#define CPUID_EXT2_MMX (1U << 23)
+#define CPUID_EXT2_FXSR (1U << 24)
+#define CPUID_EXT2_FFXSR (1U << 25)
+#define CPUID_EXT2_PDPE1GB (1U << 26)
+#define CPUID_EXT2_RDTSCP (1U << 27)
+#define CPUID_EXT2_LM (1U << 29)
+#define CPUID_EXT2_3DNOWEXT (1U << 30)
+#define CPUID_EXT2_3DNOW (1U << 31)
+
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+ CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+ CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
+#define CPUID_EXT3_LAHF_LM (1U << 0)
+#define CPUID_EXT3_CMP_LEG (1U << 1)
+#define CPUID_EXT3_SVM (1U << 2)
+#define CPUID_EXT3_EXTAPIC (1U << 3)
+#define CPUID_EXT3_CR8LEG (1U << 4)
+#define CPUID_EXT3_ABM (1U << 5)
+#define CPUID_EXT3_SSE4A (1U << 6)
+#define CPUID_EXT3_MISALIGNSSE (1U << 7)
+#define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
+#define CPUID_EXT3_OSVW (1U << 9)
+#define CPUID_EXT3_IBS (1U << 10)
+#define CPUID_EXT3_XOP (1U << 11)
+#define CPUID_EXT3_SKINIT (1U << 12)
+#define CPUID_EXT3_WDT (1U << 13)
+#define CPUID_EXT3_LWP (1U << 15)
+#define CPUID_EXT3_FMA4 (1U << 16)
+#define CPUID_EXT3_TCE (1U << 17)
+#define CPUID_EXT3_NODEID (1U << 19)
+#define CPUID_EXT3_TBM (1U << 21)
+#define CPUID_EXT3_TOPOEXT (1U << 22)
+#define CPUID_EXT3_PERFCORE (1U << 23)
+#define CPUID_EXT3_PERFNB (1U << 24)
+
+#define CPUID_SVM_NPT (1U << 0)
+#define CPUID_SVM_LBRV (1U << 1)
+#define CPUID_SVM_SVMLOCK (1U << 2)
+#define CPUID_SVM_NRIPSAVE (1U << 3)
+#define CPUID_SVM_TSCSCALE (1U << 4)
+#define CPUID_SVM_VMCBCLEAN (1U << 5)
+#define CPUID_SVM_FLUSHASID (1U << 6)
+#define CPUID_SVM_DECODEASSIST (1U << 7)
+#define CPUID_SVM_PAUSEFILTER (1U << 10)
+#define CPUID_SVM_PFTHRESHOLD (1U << 12)
+#define CPUID_SVM_AVIC (1U << 13)
+#define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15)
+#define CPUID_SVM_VGIF (1U << 16)
+#define CPUID_SVM_SVME_ADDR_CHK (1U << 28)
+
+/* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
+#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
+/* Support SGX */
+#define CPUID_7_0_EBX_SGX (1U << 2)
+/* 1st Group of Advanced Bit Manipulation Extensions */
+#define CPUID_7_0_EBX_BMI1 (1U << 3)
+/* Hardware Lock Elision */
+#define CPUID_7_0_EBX_HLE (1U << 4)
+/* Intel Advanced Vector Extensions 2 */
+#define CPUID_7_0_EBX_AVX2 (1U << 5)
+/* Supervisor-mode Execution Prevention */
+#define CPUID_7_0_EBX_SMEP (1U << 7)
+/* 2nd Group of Advanced Bit Manipulation Extensions */
+#define CPUID_7_0_EBX_BMI2 (1U << 8)
+/* Enhanced REP MOVSB/STOSB */
+#define CPUID_7_0_EBX_ERMS (1U << 9)
+/* Invalidate Process-Context Identifier */
+#define CPUID_7_0_EBX_INVPCID (1U << 10)
+/* Restricted Transactional Memory */
+#define CPUID_7_0_EBX_RTM (1U << 11)
+/* Memory Protection Extension */
+#define CPUID_7_0_EBX_MPX (1U << 14)
+/* AVX-512 Foundation */
+#define CPUID_7_0_EBX_AVX512F (1U << 16)
+/* AVX-512 Doubleword & Quadword Instruction */
+#define CPUID_7_0_EBX_AVX512DQ (1U << 17)
+/* Read Random SEED */
+#define CPUID_7_0_EBX_RDSEED (1U << 18)
+/* ADCX and ADOX instructions */
+#define CPUID_7_0_EBX_ADX (1U << 19)
+/* Supervisor Mode Access Prevention */
+#define CPUID_7_0_EBX_SMAP (1U << 20)
+/* AVX-512 Integer Fused Multiply Add */
+#define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
+/* Persistent Commit */
+#define CPUID_7_0_EBX_PCOMMIT (1U << 22)
+/* Flush a Cache Line Optimized */
+#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
+/* Cache Line Write Back */
+#define CPUID_7_0_EBX_CLWB (1U << 24)
+/* Intel Processor Trace */
+#define CPUID_7_0_EBX_INTEL_PT (1U << 25)
+/* AVX-512 Prefetch */
+#define CPUID_7_0_EBX_AVX512PF (1U << 26)
+/* AVX-512 Exponential and Reciprocal */
+#define CPUID_7_0_EBX_AVX512ER (1U << 27)
+/* AVX-512 Conflict Detection */
+#define CPUID_7_0_EBX_AVX512CD (1U << 28)
+/* SHA1/SHA256 Instruction Extensions */
+#define CPUID_7_0_EBX_SHA_NI (1U << 29)
+/* AVX-512 Byte and Word Instructions */
+#define CPUID_7_0_EBX_AVX512BW (1U << 30)
+/* AVX-512 Vector Length Extensions */
+#define CPUID_7_0_EBX_AVX512VL (1U << 31)
+
+/* AVX-512 Vector Byte Manipulation Instruction */
+#define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
+/* User-Mode Instruction Prevention */
+#define CPUID_7_0_ECX_UMIP (1U << 2)
+/* Protection Keys for User-mode Pages */
+#define CPUID_7_0_ECX_PKU (1U << 3)
+/* OS Enable Protection Keys */
+#define CPUID_7_0_ECX_OSPKE (1U << 4)
+/* UMONITOR/UMWAIT/TPAUSE Instructions */
+#define CPUID_7_0_ECX_WAITPKG (1U << 5)
+/* Additional AVX-512 Vector Byte Manipulation Instruction */
+#define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
+/* Galois Field New Instructions */
+#define CPUID_7_0_ECX_GFNI (1U << 8)
+/* Vector AES Instructions */
+#define CPUID_7_0_ECX_VAES (1U << 9)
+/* Carry-Less Multiplication Quadword */
+#define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
+/* Vector Neural Network Instructions */
+#define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
+/* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
+#define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
+/* POPCNT for vectors of DW/QW */
+#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
+/* 5-level Page Tables */
+#define CPUID_7_0_ECX_LA57 (1U << 16)
+/* Read Processor ID */
+#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* Bus Lock Debug Exception */
+#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
+/* Cache Line Demote Instruction */
+#define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
+/* Move Doubleword as Direct Store Instruction */
+#define CPUID_7_0_ECX_MOVDIRI (1U << 27)
+/* Move 64 Bytes as Direct Store Instruction */
+#define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
+/* Support SGX Launch Control */
+#define CPUID_7_0_ECX_SGX_LC (1U << 30)
+/* Protection Keys for Supervisor-mode Pages */
+#define CPUID_7_0_ECX_PKS (1U << 31)
+
+/* AVX512 Neural Network Instructions */
+#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
+/* AVX512 Multiply Accumulation Single Precision */
+#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
+/* Fast Short Rep Mov */
+#define CPUID_7_0_EDX_FSRM (1U << 4)
+/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
+#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+/* SERIALIZE instruction */
+#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
+/* TSX Suspend Load Address Tracking instruction */
+#define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
+/* AVX512_FP16 instruction */
+#define CPUID_7_0_EDX_AVX512_FP16 (1U << 23)
+/* Speculation Control */
+#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
+/* Single Thread Indirect Branch Predictors */
+#define CPUID_7_0_EDX_STIBP (1U << 27)
+/* Arch Capabilities */
+#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
+/* Core Capability */
+#define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
+/* Speculative Store Bypass Disable */
+#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
+
+/* AVX VNNI Instruction */
+#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
+/* AVX512 BFloat16 Instruction */
+#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+
+/* Packets which contain IP payload have LIP values */
+#define CPUID_14_0_ECX_LIP (1U << 31)
+
+/* CLZERO instruction */
+#define CPUID_8000_0008_EBX_CLZERO (1U << 0)
+/* Always save/restore FP error pointers */
+#define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
+/* Write back and do not invalidate cache */
+#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
+/* Indirect Branch Prediction Barrier */
+#define CPUID_8000_0008_EBX_IBPB (1U << 12)
+/* Indirect Branch Restricted Speculation */
+#define CPUID_8000_0008_EBX_IBRS (1U << 14)
+/* Single Thread Indirect Branch Predictors */
+#define CPUID_8000_0008_EBX_STIBP (1U << 15)
+/* Speculative Store Bypass Disable */
+#define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24)
+
+#define CPUID_XSAVE_XSAVEOPT (1U << 0)
+#define CPUID_XSAVE_XSAVEC (1U << 1)
+#define CPUID_XSAVE_XGETBV1 (1U << 2)
+#define CPUID_XSAVE_XSAVES (1U << 3)
+
+#define CPUID_6_EAX_ARAT (1U << 2)
+
+/* CPUID[0x80000007].EDX flags: */
+#define CPUID_APM_INVTSC (1U << 8)
+
+#define CPUID_VENDOR_SZ 12
+
+#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
+#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
+#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
+#define CPUID_VENDOR_INTEL "GenuineIntel"
+
+#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
+#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
+#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
+#define CPUID_VENDOR_AMD "AuthenticAMD"
+
+#define CPUID_VENDOR_VIA "CentaurHauls"
+
+#define CPUID_VENDOR_HYGON "HygonGenuine"
+
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+
+#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
+#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
+
+/* CPUID[0xB].ECX level types */
+#define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
+#define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
+#define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
+#define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8)
+
+/* MSR Feature Bits */
+#define MSR_ARCH_CAP_RDCL_NO (1U << 0)
+#define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
+#define MSR_ARCH_CAP_RSBA (1U << 2)
+#define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
+#define MSR_ARCH_CAP_SSB_NO (1U << 4)
+#define MSR_ARCH_CAP_MDS_NO (1U << 5)
+#define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
+#define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
+#define MSR_ARCH_CAP_TAA_NO (1U << 8)
+
+#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
+
+/* VMX MSR features */
+#define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
+#define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
+#define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
+#define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
+#define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
+#define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
+
+#define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
+#define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
+#define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
+#define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
+#define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
+#define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
+#define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
+#define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
+
+#define MSR_VMX_EPT_EXECONLY (1ULL << 0)
+#define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
+#define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
+#define MSR_VMX_EPT_UC (1ULL << 8)
+#define MSR_VMX_EPT_WB (1ULL << 14)
+#define MSR_VMX_EPT_2MB (1ULL << 16)
+#define MSR_VMX_EPT_1GB (1ULL << 17)
+#define MSR_VMX_EPT_INVEPT (1ULL << 20)
+#define MSR_VMX_EPT_AD_BITS (1ULL << 21)
+#define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
+#define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
+#define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
+#define MSR_VMX_EPT_INVVPID (1ULL << 32)
+#define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
+#define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
+#define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
+#define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
+
+#define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
+
+
+/* VMX controls */
+#define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
+#define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
+#define VMX_CPU_BASED_HLT_EXITING 0x00000080
+#define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
+#define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
+#define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
+#define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
+#define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
+#define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
+#define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
+#define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
+#define VMX_CPU_BASED_TPR_SHADOW 0x00200000
+#define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
+#define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
+#define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
+#define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
+#define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
+#define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
+#define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
+#define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
+#define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
+
+#define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+#define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define VMX_SECONDARY_EXEC_DESC 0x00000004
+#define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
+#define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
+#define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
+#define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
+#define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
+#define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
+#define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
+#define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
+#define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
+#define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
+#define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
+#define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
+#define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
+#define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
+#define VMX_SECONDARY_EXEC_XSAVES 0x00100000
+#define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000
+
+#define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
+#define VMX_PIN_BASED_NMI_EXITING 0x00000008
+#define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
+#define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
+#define VMX_PIN_BASED_POSTED_INTR 0x00000080
+
+#define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
+#define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
+#define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
+#define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
+#define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
+#define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
+#define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
+#define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
+#define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
+#define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
+#define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
+#define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
+#define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
+
+#define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
+#define VMX_VM_ENTRY_IA32E_MODE 0x00000200
+#define VMX_VM_ENTRY_SMM 0x00000400
+#define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
+#define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
+#define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
+#define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
+#define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
+#define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
+#define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
+#define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000
+
+/* Supported Hyper-V Enlightenments */
+#define HYPERV_FEAT_RELAXED 0
+#define HYPERV_FEAT_VAPIC 1
+#define HYPERV_FEAT_TIME 2
+#define HYPERV_FEAT_CRASH 3
+#define HYPERV_FEAT_RESET 4
+#define HYPERV_FEAT_VPINDEX 5
+#define HYPERV_FEAT_RUNTIME 6
+#define HYPERV_FEAT_SYNIC 7
+#define HYPERV_FEAT_STIMER 8
+#define HYPERV_FEAT_FREQUENCIES 9
+#define HYPERV_FEAT_REENLIGHTENMENT 10
+#define HYPERV_FEAT_TLBFLUSH 11
+#define HYPERV_FEAT_EVMCS 12
+#define HYPERV_FEAT_IPI 13
+#define HYPERV_FEAT_STIMER_DIRECT 14
+#define HYPERV_FEAT_AVIC 15
+
+#ifndef HYPERV_SPINLOCK_NEVER_NOTIFY
+#define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF
+#endif
+
+#define EXCP00_DIVZ 0
+#define EXCP01_DB 1
+#define EXCP02_NMI 2
+#define EXCP03_INT3 3
+#define EXCP04_INTO 4
+#define EXCP05_BOUND 5
+#define EXCP06_ILLOP 6
+#define EXCP07_PREX 7
+#define EXCP08_DBLE 8
+#define EXCP09_XERR 9
+#define EXCP0A_TSS 10
+#define EXCP0B_NOSEG 11
+#define EXCP0C_STACK 12
+#define EXCP0D_GPF 13
+#define EXCP0E_PAGE 14
+#define EXCP10_COPR 16
+#define EXCP11_ALGN 17
+#define EXCP12_MCHK 18
+
+#define EXCP_VMEXIT 0x100 /* only for system emulation */
+#define EXCP_SYSCALL 0x101 /* only for user emulation */
+#define EXCP_VSYSCALL 0x102 /* only for user emulation */
+
+/* i386-specific interrupt pending bits. */
+#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
+#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
+#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
+
+/* Use a clearer name for this. */
+#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
+
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+typedef enum {
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+ CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
+
+ CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
+ CC_OP_MULW,
+ CC_OP_MULL,
+ CC_OP_MULQ,
+
+ CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADDW,
+ CC_OP_ADDL,
+ CC_OP_ADDQ,
+
+ CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADCW,
+ CC_OP_ADCL,
+ CC_OP_ADCQ,
+
+ CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUBW,
+ CC_OP_SUBL,
+ CC_OP_SUBQ,
+
+ CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SBBW,
+ CC_OP_SBBL,
+ CC_OP_SBBQ,
+
+ CC_OP_LOGICB, /* modify all flags, CC_DST = res */
+ CC_OP_LOGICW,
+ CC_OP_LOGICL,
+ CC_OP_LOGICQ,
+
+ CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+ CC_OP_INCW,
+ CC_OP_INCL,
+ CC_OP_INCQ,
+
+ CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+ CC_OP_DECW,
+ CC_OP_DECL,
+ CC_OP_DECQ,
+
+ CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
+ CC_OP_SHLW,
+ CC_OP_SHLL,
+ CC_OP_SHLQ,
+
+ CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
+ CC_OP_SARW,
+ CC_OP_SARL,
+ CC_OP_SARQ,
+
+ CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
+ CC_OP_BMILGW,
+ CC_OP_BMILGL,
+ CC_OP_BMILGQ,
+
+ CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
+ CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
+ CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
+
+ CC_OP_CLR, /* Z set, all other flags clear. */
+ CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
+
+ CC_OP_NB,
+} CCOp;
+
+typedef struct SegmentCache {
+ uint32_t selector;
+ target_ulong base;
+ uint32_t limit;
+ uint32_t flags;
+} SegmentCache;
+
+#define MMREG_UNION(n, bits) \
+ union n { \
+ uint8_t _b_##n[(bits)/8]; \
+ uint16_t _w_##n[(bits)/16]; \
+ uint32_t _l_##n[(bits)/32]; \
+ uint64_t _q_##n[(bits)/64]; \
+ float32 _s_##n[(bits)/32]; \
+ float64 _d_##n[(bits)/64]; \
+ }
+
+typedef union {
+ uint8_t _b[16];
+ uint16_t _w[8];
+ uint32_t _l[4];
+ uint64_t _q[2];
+} XMMReg;
+
+typedef union {
+ uint8_t _b[32];
+ uint16_t _w[16];
+ uint32_t _l[8];
+ uint64_t _q[4];
+} YMMReg;
+
+typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
+typedef MMREG_UNION(MMXReg, 64) MMXReg;
+
+typedef struct BNDReg {
+ uint64_t lb;
+ uint64_t ub;
+} BNDReg;
+
+typedef struct BNDCSReg {
+ uint64_t cfgu;
+ uint64_t sts;
+} BNDCSReg;
+
+#define BNDCFG_ENABLE 1ULL
+#define BNDCFG_BNDPRESERVE 2ULL
+#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define ZMM_B(n) _b_ZMMReg[63 - (n)]
+#define ZMM_W(n) _w_ZMMReg[31 - (n)]
+#define ZMM_L(n) _l_ZMMReg[15 - (n)]
+#define ZMM_S(n) _s_ZMMReg[15 - (n)]
+#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
+#define ZMM_D(n) _d_ZMMReg[7 - (n)]
+
+#define MMX_B(n) _b_MMXReg[7 - (n)]
+#define MMX_W(n) _w_MMXReg[3 - (n)]
+#define MMX_L(n) _l_MMXReg[1 - (n)]
+#define MMX_S(n) _s_MMXReg[1 - (n)]
+#else
+#define ZMM_B(n) _b_ZMMReg[n]
+#define ZMM_W(n) _w_ZMMReg[n]
+#define ZMM_L(n) _l_ZMMReg[n]
+#define ZMM_S(n) _s_ZMMReg[n]
+#define ZMM_Q(n) _q_ZMMReg[n]
+#define ZMM_D(n) _d_ZMMReg[n]
+
+#define MMX_B(n) _b_MMXReg[n]
+#define MMX_W(n) _w_MMXReg[n]
+#define MMX_L(n) _l_MMXReg[n]
+#define MMX_S(n) _s_MMXReg[n]
+#endif
+#define MMX_Q(n) _q_MMXReg[n]
+
+typedef union {
+ floatx80 d __attribute__((aligned(16)));
+ MMXReg mmx;
+} FPReg;
+
+typedef struct {
+ uint64_t base;
+ uint64_t mask;
+} MTRRVar;
+
+#define CPU_NB_REGS64 16
+#define CPU_NB_REGS32 8
+
+#ifdef TARGET_X86_64
+#define CPU_NB_REGS CPU_NB_REGS64
+#else
+#define CPU_NB_REGS CPU_NB_REGS32
+#endif
+
+#define MAX_FIXED_COUNTERS 3
+#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
+
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+#define NB_OPMASK_REGS 8
+
+/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
+ * that APIC ID hasn't been set yet
+ */
+#define UNASSIGNED_APIC_ID 0xFFFFFFFF
+
+typedef union X86LegacyXSaveArea {
+ struct {
+ uint16_t fcw;
+ uint16_t fsw;
+ uint8_t ftw;
+ uint8_t reserved;
+ uint16_t fpop;
+ uint64_t fpip;
+ uint64_t fpdp;
+ uint32_t mxcsr;
+ uint32_t mxcsr_mask;
+ FPReg fpregs[8];
+ uint8_t xmm_regs[16][16];
+ };
+ uint8_t data[512];
+} X86LegacyXSaveArea;
+
+typedef struct X86XSaveHeader {
+ uint64_t xstate_bv;
+ uint64_t xcomp_bv;
+ uint64_t reserve0;
+ uint8_t reserved[40];
+} X86XSaveHeader;
+
+/* Ext. save area 2: AVX State */
+typedef struct XSaveAVX {
+ uint8_t ymmh[16][16];
+} XSaveAVX;
+
+/* Ext. save area 3: BNDREG */
+typedef struct XSaveBNDREG {
+ BNDReg bnd_regs[4];
+} XSaveBNDREG;
+
+/* Ext. save area 4: BNDCSR */
+typedef union XSaveBNDCSR {
+ BNDCSReg bndcsr;
+ uint8_t data[64];
+} XSaveBNDCSR;
+
+/* Ext. save area 5: Opmask */
+typedef struct XSaveOpmask {
+ uint64_t opmask_regs[NB_OPMASK_REGS];
+} XSaveOpmask;
+
+/* Ext. save area 6: ZMM_Hi256 */
+typedef struct XSaveZMM_Hi256 {
+ uint8_t zmm_hi256[16][32];
+} XSaveZMM_Hi256;
+
+/* Ext. save area 7: Hi16_ZMM */
+typedef struct XSaveHi16_ZMM {
+ uint8_t hi16_zmm[16][64];
+} XSaveHi16_ZMM;
+
+/* Ext. save area 9: PKRU state */
+typedef struct XSavePKRU {
+ uint32_t pkru;
+ uint32_t padding;
+} XSavePKRU;
+
+QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
+QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
+QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
+QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
+QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
+QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
+QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
+
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+} ExtSaveArea;
+
+#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1)
+
+extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
+
+typedef enum TPRAccess {
+ TPR_ACCESS_READ,
+ TPR_ACCESS_WRITE,
+} TPRAccess;
+
+/* Cache information data structures: */
+
+enum CacheType {
+ DATA_CACHE,
+ INSTRUCTION_CACHE,
+ UNIFIED_CACHE
+};
+
+typedef struct CPUCacheInfo {
+ enum CacheType type;
+ uint8_t level;
+ /* Size in bytes */
+ uint32_t size;
+ /* Line size, in bytes */
+ uint16_t line_size;
+ /*
+ * Associativity.
+ * Note: representation of fully-associative caches is not implemented
+ */
+ uint8_t associativity;
+ /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
+ uint8_t partitions;
+ /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
+ uint32_t sets;
+ /*
+ * Lines per tag.
+ * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
+ * (Is this synonym to @partitions?)
+ */
+ uint8_t lines_per_tag;
+
+ /* Self-initializing cache */
+ bool self_init;
+ /*
+ * WBINVD/INVD is not guaranteed to act upon lower level caches of
+ * non-originating threads sharing this cache.
+ * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
+ */
+ bool no_invd_sharing;
+ /*
+ * Cache is inclusive of lower cache levels.
+ * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
+ */
+ bool inclusive;
+ /*
+ * A complex function is used to index the cache, potentially using all
+ * address bits. CPUID[4].EDX[bit 2].
+ */
+ bool complex_indexing;
+} CPUCacheInfo;
+
+
+typedef struct CPUCaches {
+ CPUCacheInfo *l1d_cache;
+ CPUCacheInfo *l1i_cache;
+ CPUCacheInfo *l2_cache;
+ CPUCacheInfo *l3_cache;
+} CPUCaches;
+
+typedef struct HVFX86LazyFlags {
+ target_ulong result;
+ target_ulong auxbits;
+} HVFX86LazyFlags;
+
+typedef struct CPUX86State {
+ /* standard registers */
+ target_ulong regs[CPU_NB_REGS];
+ target_ulong eip;
+ target_ulong eflags; /* eflags register. During CPU emulation, CC
+ flags and DF are set to zero because they are
+ stored elsewhere */
+
+ /* emulator internal eflags handling */
+ target_ulong cc_dst;
+ target_ulong cc_src;
+ target_ulong cc_src2;
+ uint32_t cc_op;
+ int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
+ uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
+ are known at translation time. */
+ uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
+
+ /* segments */
+ SegmentCache segs[6]; /* selector values */
+ SegmentCache ldt;
+ SegmentCache tr;
+ SegmentCache gdt; /* only base and limit are used */
+ SegmentCache idt; /* only base and limit are used */
+
+ target_ulong cr[5]; /* NOTE: cr1 is unused */
+ int32_t a20_mask;
+
+ BNDReg bnd_regs[4];
+ BNDCSReg bndcs_regs;
+ uint64_t msr_bndcfgs;
+ uint64_t efer;
+
+ /* Beginning of state preserved by INIT (dummy marker). */
+ struct {} start_init_save;
+
+ /* FPU state */
+ unsigned int fpstt; /* top of stack index */
+ uint16_t fpus;
+ uint16_t fpuc;
+ uint8_t fptags[8]; /* 0 = valid, 1 = empty */
+ FPReg fpregs[8];
+ /* KVM-only so far */
+ uint16_t fpop;
+ uint16_t fpcs;
+ uint16_t fpds;
+ uint64_t fpip;
+ uint64_t fpdp;
+
+ /* emulator internal variables */
+ float_status fp_status;
+ floatx80 ft0;
+
+ float_status mmx_status; /* for 3DNow! float ops */
+ float_status sse_status;
+ uint32_t mxcsr;
+ ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
+ ZMMReg xmm_t0;
+ MMXReg mmx_t0;
+
+ XMMReg ymmh_regs[CPU_NB_REGS];
+
+ uint64_t opmask_regs[NB_OPMASK_REGS];
+ YMMReg zmmh_regs[CPU_NB_REGS];
+ ZMMReg hi16_zmm_regs[CPU_NB_REGS];
+
+ /* sysenter registers */
+ uint32_t sysenter_cs;
+ target_ulong sysenter_esp;
+ target_ulong sysenter_eip;
+ uint64_t star;
+
+ uint64_t vm_hsave;
+
+#ifdef TARGET_X86_64
+ target_ulong lstar;
+ target_ulong cstar;
+ target_ulong fmask;
+ target_ulong kernelgsbase;
+#endif
+
+ uint64_t tsc;
+ uint64_t tsc_adjust;
+ uint64_t tsc_deadline;
+ uint64_t tsc_aux;
+
+ uint64_t xcr0;
+
+ uint64_t mcg_status;
+ uint64_t msr_ia32_misc_enable;
+ uint64_t msr_ia32_feature_control;
+ uint64_t msr_ia32_sgxlepubkeyhash[4];
+
+ uint64_t msr_fixed_ctr_ctrl;
+ uint64_t msr_global_ctrl;
+ uint64_t msr_global_status;
+ uint64_t msr_global_ovf_ctrl;
+ uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
+ uint64_t msr_gp_counters[MAX_GP_COUNTERS];
+ uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
+
+ uint64_t pat;
+ uint32_t smbase;
+ uint64_t msr_smi_count;
+
+ uint32_t pkru;
+ uint32_t pkrs;
+ uint32_t tsx_ctrl;
+
+ uint64_t spec_ctrl;
+ uint64_t amd_tsc_scale_msr;
+ uint64_t virt_ssbd;
+
+ /* End of state preserved by INIT (dummy marker). */
+ struct {} end_init_save;
+
+ uint64_t system_time_msr;
+ uint64_t wall_clock_msr;
+ uint64_t steal_time_msr;
+ uint64_t async_pf_en_msr;
+ uint64_t async_pf_int_msr;
+ uint64_t pv_eoi_en_msr;
+ uint64_t poll_control_msr;
+
+ /* Partition-wide HV MSRs, will be updated only on the first vcpu */
+ uint64_t msr_hv_hypercall;
+ uint64_t msr_hv_guest_os_id;
+ uint64_t msr_hv_tsc;
+
+ /* Per-VCPU HV MSRs */
+ uint64_t msr_hv_vapic;
+ uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
+ uint64_t msr_hv_runtime;
+ uint64_t msr_hv_synic_control;
+ uint64_t msr_hv_synic_evt_page;
+ uint64_t msr_hv_synic_msg_page;
+ uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
+ uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
+ uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
+ uint64_t msr_hv_reenlightenment_control;
+ uint64_t msr_hv_tsc_emulation_control;
+ uint64_t msr_hv_tsc_emulation_status;
+
+ uint64_t msr_rtit_ctrl;
+ uint64_t msr_rtit_status;
+ uint64_t msr_rtit_output_base;
+ uint64_t msr_rtit_output_mask;
+ uint64_t msr_rtit_cr3_match;
+ uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
+
+ /* exception/interrupt handling */
+ int error_code;
+ int exception_is_int;
+ target_ulong exception_next_eip;
+ target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
+ union {
+ struct CPUBreakpoint *cpu_breakpoint[4];
+ struct CPUWatchpoint *cpu_watchpoint[4];
+ }; /* break/watchpoints for dr[0..3] */
+ int old_exception; /* exception in flight */
+
+ uint64_t vm_vmcb;
+ uint64_t tsc_offset;
+ uint64_t intercept;
+ uint16_t intercept_cr_read;
+ uint16_t intercept_cr_write;
+ uint16_t intercept_dr_read;
+ uint16_t intercept_dr_write;
+ uint32_t intercept_exceptions;
+ uint64_t nested_cr3;
+ uint32_t nested_pg_mode;
+ uint8_t v_tpr;
+ uint32_t int_ctl;
+
+ /* KVM states, automatically cleared on reset */
+ uint8_t nmi_injected;
+ uint8_t nmi_pending;
+
+ uintptr_t retaddr;
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields after this point are preserved across CPU reset. */
+
+ /* processor features (e.g. for CPUID insn) */
+ /* Minimum cpuid leaf 7 value */
+ uint32_t cpuid_level_func7;
+ /* Actual cpuid leaf 7 value */
+ uint32_t cpuid_min_level_func7;
+ /* Minimum level/xlevel/xlevel2, based on CPU model + features */
+ uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
+ /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
+ uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
+ /* Actual level/xlevel/xlevel2 value: */
+ uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
+ uint32_t cpuid_vendor1;
+ uint32_t cpuid_vendor2;
+ uint32_t cpuid_vendor3;
+ uint32_t cpuid_version;
+ FeatureWordArray features;
+ /* Features that were explicitly enabled/disabled */
+ FeatureWordArray user_features;
+ uint32_t cpuid_model[12];
+ /* Cache information for CPUID. When legacy-cache=on, the cache data
+ * on each CPUID leaf will be different, because we keep compatibility
+ * with old QEMU versions.
+ */
+ CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
+
+ /* MTRRs */
+ uint64_t mtrr_fixed[11];
+ uint64_t mtrr_deftype;
+ MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
+
+ /* For KVM */
+ uint32_t mp_state;
+ int32_t exception_nr;
+ int32_t interrupt_injected;
+ uint8_t soft_interrupt;
+ uint8_t exception_pending;
+ uint8_t exception_injected;
+ uint8_t has_error_code;
+ uint8_t exception_has_payload;
+ uint64_t exception_payload;
+ uint32_t ins_len;
+ uint32_t sipi_vector;
+ bool tsc_valid;
+ int64_t tsc_khz;
+ int64_t user_tsc_khz; /* for sanity check only */
+ uint64_t apic_bus_freq;
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
+ void *xsave_buf;
+ uint32_t xsave_buf_len;
+#endif
+#if defined(CONFIG_KVM)
+ struct kvm_nested_state *nested_state;
+#endif
+#if defined(CONFIG_HVF)
+ HVFX86LazyFlags hvf_lflags;
+ void *hvf_mmio_buf;
+#endif
+
+ uint64_t mcg_cap;
+ uint64_t mcg_ctl;
+ uint64_t mcg_ext_ctl;
+ uint64_t mce_banks[MCE_BANKS_DEF*4];
+ uint64_t xstate_bv;
+
+ /* vmstate */
+ uint16_t fpus_vmstate;
+ uint16_t fptag_vmstate;
+ uint16_t fpregs_format_vmstate;
+
+ uint64_t xss;
+ uint32_t umwait;
+
+ TPRAccess tpr_access_type;
+
+ unsigned nr_dies;
+} CPUX86State;
+
+struct kvm_msrs;
+
+/**
+ * X86CPU:
+ * @env: #CPUX86State
+ * @migratable: If set, only migratable flags will be accepted when "enforce"
+ * mode is used, and only migratable flags will be included in the "host"
+ * CPU model.
+ *
+ * An x86 CPU.
+ */
+struct X86CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUX86State env;
+ VMChangeStateEntry *vmsentry;
+
+ uint64_t ucode_rev;
+
+ uint32_t hyperv_spinlock_attempts;
+ char *hyperv_vendor;
+ bool hyperv_synic_kvm_only;
+ uint64_t hyperv_features;
+ bool hyperv_passthrough;
+ OnOffAuto hyperv_no_nonarch_cs;
+ uint32_t hyperv_vendor_id[3];
+ uint32_t hyperv_interface_id[4];
+ uint32_t hyperv_limits[3];
+ uint32_t hyperv_nested[4];
+ bool hyperv_enforce_cpuid;
+ uint32_t hyperv_ver_id_build;
+ uint16_t hyperv_ver_id_major;
+ uint16_t hyperv_ver_id_minor;
+ uint32_t hyperv_ver_id_sp;
+ uint8_t hyperv_ver_id_sb;
+ uint32_t hyperv_ver_id_sn;
+
+ bool check_cpuid;
+ bool enforce_cpuid;
+ /*
+ * Force features to be enabled even if the host doesn't support them.
+ * This is dangerous and should be done only for testing CPUID
+ * compatibility.
+ */
+ bool force_features;
+ bool expose_kvm;
+ bool expose_tcg;
+ bool migratable;
+ bool migrate_smi_count;
+ bool max_features; /* Enable all supported features automatically */
+ uint32_t apic_id;
+
+ /* Enables publishing of TSC increment and Local APIC bus frequencies to
+ * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
+ bool vmware_cpuid_freq;
+
+ /* if true the CPUID code directly forward host cache leaves to the guest */
+ bool cache_info_passthrough;
+
+ /* if true the CPUID code directly forwards
+ * host monitor/mwait leaves to the guest */
+ struct {
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+ } mwait;
+
+ /* Features that were filtered out because of missing host capabilities */
+ FeatureWordArray filtered_features;
+
+ /* Enable PMU CPUID bits. This can't be enabled by default yet because
+ * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
+ * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
+ * capabilities) directly to the guest.
+ */
+ bool enable_pmu;
+
+ /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
+ * disabled by default to avoid breaking migration between QEMU with
+ * different LMCE configurations.
+ */
+ bool enable_lmce;
+
+ /* Compatibility bits for old machine types.
+ * If true present virtual l3 cache for VM, the vcpus in the same virtual
+ * socket share an virtual l3 cache.
+ */
+ bool enable_l3_cache;
+
+ /* Compatibility bits for old machine types.
+ * If true present the old cache topology information
+ */
+ bool legacy_cache;
+
+ /* Compatibility bits for old machine types: */
+ bool enable_cpuid_0xb;
+
+ /* Enable auto level-increase for all CPUID leaves */
+ bool full_cpuid_auto_level;
+
+ /* Only advertise CPUID leaves defined by the vendor */
+ bool vendor_cpuid_only;
+
+ /* Enable auto level-increase for Intel Processor Trace leave */
+ bool intel_pt_auto_level;
+
+ /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
+ bool fill_mtrr_mask;
+
+ /* if true override the phys_bits value with a value read from the host */
+ bool host_phys_bits;
+
+ /* if set, limit maximum value for phys_bits when host_phys_bits is true */
+ uint8_t host_phys_bits_limit;
+
+ /* Stop SMI delivery for migration compatibility with old machines */
+ bool kvm_no_smi_migration;
+
+ /* Forcefully disable KVM PV features not exposed in guest CPUIDs */
+ bool kvm_pv_enforce_cpuid;
+
+ /* Number of physical address bits supported */
+ uint32_t phys_bits;
+
+ /* in order to simplify APIC support, we leave this pointer to the
+ user */
+ struct DeviceState *apic_state;
+ struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
+ Notifier machine_done;
+
+ struct kvm_msrs *kvm_msr_buf;
+
+ int32_t node_id; /* NUMA node this CPU belongs to */
+ int32_t socket_id;
+ int32_t die_id;
+ int32_t core_id;
+ int32_t thread_id;
+
+ int32_t hv_max_vps;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_x86_cpu;
+#endif
+
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
+
+int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, void *opaque);
+int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, void *opaque);
+int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+ void *opaque);
+int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+ void *opaque);
+
+void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+ Error **errp);
+
+void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
+
+hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
+
+int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void x86_cpu_list(void);
+int cpu_x86_support_mca_broadcast(CPUX86State *env);
+
+#ifndef CONFIG_USER_ONLY
+int cpu_get_pic_interrupt(CPUX86State *s);
+
+/* MSDOS compatibility mode FPU exception support */
+void x86_register_ferr_irq(qemu_irq irq);
+void fpu_check_raise_ferr_irq(CPUX86State *s);
+void cpu_set_ignne(void);
+void cpu_clear_ignne(void);
+#endif
+
+/* mpx_helper.c */
+void cpu_sync_bndcs_hflags(CPUX86State *env);
+
+/* this function must always be used to load data in the segment
+ cache: it synchronizes the hflags with the segment cache values */
+static inline void cpu_x86_load_seg_cache(CPUX86State *env,
+ X86Seg seg_reg, unsigned int selector,
+ target_ulong base,
+ unsigned int limit,
+ unsigned int flags)
+{
+ SegmentCache *sc;
+ unsigned int new_hflags;
+
+ sc = &env->segs[seg_reg];
+ sc->selector = selector;
+ sc->base = base;
+ sc->limit = limit;
+ sc->flags = flags;
+
+ /* update the hidden flags */
+ {
+ if (seg_reg == R_CS) {
+#ifdef TARGET_X86_64
+ if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
+ /* long mode */
+ env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ env->hflags &= ~(HF_ADDSEG_MASK);
+ } else
+#endif
+ {
+ /* legacy / compatibility case */
+ new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
+ >> (DESC_B_SHIFT - HF_CS32_SHIFT);
+ env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
+ new_hflags;
+ }
+ }
+ if (seg_reg == R_SS) {
+ int cpl = (flags >> DESC_DPL_SHIFT) & 3;
+#if HF_CPL_MASK != 3
+#error HF_CPL_MASK is hardcoded
+#endif
+ env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
+ /* Possibly switch between BNDCFGS and BNDCFGU */
+ cpu_sync_bndcs_hflags(env);
+ }
+ new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
+ >> (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (env->hflags & HF_CS64_MASK) {
+ /* zero base assumed for DS, ES and SS in long mode */
+ } else if (!(env->cr[0] & CR0_PE_MASK) ||
+ (env->eflags & VM_MASK) ||
+ !(env->hflags & HF_CS32_MASK)) {
+ /* XXX: try to avoid this test. The problem comes from the
+ fact that is real mode or vm86 mode we only modify the
+ 'base' and 'selector' fields of the segment cache to go
+ faster. A solution may be to force addseg to one in
+ translate-i386.c. */
+ new_hflags |= HF_ADDSEG_MASK;
+ } else {
+ new_hflags |= ((env->segs[R_DS].base |
+ env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) <<
+ HF_ADDSEG_SHIFT;
+ }
+ env->hflags = (env->hflags &
+ ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
+ }
+}
+
+static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
+ uint8_t sipi_vector)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+
+ env->eip = 0;
+ cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
+ sipi_vector << 12,
+ env->segs[R_CS].limit,
+ env->segs[R_CS].flags);
+ cs->halted = 0;
+}
+
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+ target_ulong *base, unsigned int *limit,
+ unsigned int *flags);
+
+/* op_helper.c */
+/* used for debug or cpu save/restore */
+
+/* cpu-exec.c */
+/* the following helpers are only usable in user mode simulation as
+ they can trigger unexpected exceptions */
+void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector);
+void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
+void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
+void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
+void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
+
+/* cpu.c */
+void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
+ uint32_t vendor2, uint32_t vendor3);
+typedef struct PropValue {
+ const char *prop, *value;
+} PropValue;
+void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
+
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
+
+/* cpu.c other functions (cpuid) */
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+void cpu_clear_apic_feature(CPUX86State *env);
+void host_cpuid(uint32_t function, uint32_t count,
+ uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+
+/* helper.c */
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
+
+#ifndef CONFIG_USER_ONLY
+static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return !!attrs.secure;
+}
+
+static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
+}
+
+/*
+ * load efer and update the corresponding hflags. XXX: do consistency
+ * checks with cpuid bits?
+ */
+void cpu_load_efer(CPUX86State *env, uint64_t val);
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
+#endif
+
+/* will be suppressed */
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
+
+/* hw/pc.c */
+uint64_t cpu_get_tsc(CPUX86State *env);
+
+#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
+#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_X86_CPU
+
+#ifdef TARGET_X86_64
+#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
+#else
+#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
+#endif
+
+#define cpu_list x86_cpu_list
+
+/* MMU modes definitions */
+#define MMU_KSMAP_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_KNOSMAP_IDX 2
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
+{
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
+ ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+static inline int cpu_mmu_index_kernel(CPUX86State *env)
+{
+ return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
+ ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
+ ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+#define CC_DST (env->cc_dst)
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_OP (env->cc_op)
+
+typedef CPUX86State CPUArchState;
+typedef X86CPU ArchCPU;
+
+#include "exec/cpu-all.h"
+#include "svm.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/i386/apic.h"
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *cs_base = env->segs[R_CS].base;
+ *pc = *cs_base + env->eip;
+ *flags = env->hflags |
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+}
+
+void do_cpu_init(X86CPU *cpu);
+void do_cpu_sipi(X86CPU *cpu);
+
+#define MCE_INJECT_BROADCAST 1
+#define MCE_INJECT_UNCOND_AO 2
+
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
+ uint64_t status, uint64_t mcg_status, uint64_t addr,
+ uint64_t misc, int flags);
+
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
+
+static inline uint32_t cpu_compute_eflags(CPUX86State *env)
+{
+ uint32_t eflags = env->eflags;
+ if (tcg_enabled()) {
+ eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
+ }
+ return eflags;
+}
+
+static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
+{
+ return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
+}
+
+static inline int32_t x86_get_a20_mask(CPUX86State *env)
+{
+ if (env->hflags & HF_SMM_MASK) {
+ return -1;
+ } else {
+ return env->a20_mask;
+ }
+}
+
+static inline bool cpu_has_vmx(CPUX86State *env)
+{
+ return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
+}
+
+static inline bool cpu_has_svm(CPUX86State *env)
+{
+ return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
+}
+
+/*
+ * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
+ * Since it was set, CR4.VMXE must remain set as long as vCPU is in
+ * VMX operation. This is because CR4.VMXE is one of the bits set
+ * in MSR_IA32_VMX_CR4_FIXED1.
+ *
+ * There is one exception to above statement when vCPU enters SMM mode.
+ * When a vCPU enters SMM mode, it temporarily exit VMX operation and
+ * may also reset CR4.VMXE during execution in SMM mode.
+ * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
+ * and CR4.VMXE is restored to it's original value of being set.
+ *
+ * Therefore, when vCPU is not in SMM mode, we can infer whether
+ * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
+ * know for certain.
+ */
+static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
+{
+ return cpu_has_vmx(env) &&
+ ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
+}
+
+/* excp_helper.c */
+int get_pg_mode(CPUX86State *env);
+
+/* fpu_helper.c */
+void update_fp_status(CPUX86State *env);
+void update_mxcsr_status(CPUX86State *env);
+void update_mxcsr_from_sse_status(CPUX86State *env);
+
+static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
+{
+ env->mxcsr = mxcsr;
+ if (tcg_enabled()) {
+ update_mxcsr_status(env);
+ }
+}
+
+static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
+{
+ env->fpuc = fpuc;
+ if (tcg_enabled()) {
+ update_fp_status(env);
+ }
+}
+
+/* mem_helper.c */
+void helper_lock_init(void);
+
+/* svm_helper.c */
+#ifdef CONFIG_USER_ONLY
+static inline void
+cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param, uintptr_t retaddr)
+{ /* no-op */ }
+static inline bool
+cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
+{ return false; }
+#else
+void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param, uintptr_t retaddr);
+bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type);
+#endif
+
+/* apic.c */
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
+void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
+ TPRAccess access);
+
+/* Special values for X86CPUVersion: */
+
+/* Resolve to latest CPU version */
+#define CPU_VERSION_LATEST -1
+
+/*
+ * Resolve to version defined by current machine type.
+ * See x86_cpu_set_default_version()
+ */
+#define CPU_VERSION_AUTO -2
+
+/* Don't resolve to any versioned CPU models, like old QEMU versions */
+#define CPU_VERSION_LEGACY 0
+
+typedef int X86CPUVersion;
+
+/*
+ * Set default CPU model version for CPU models having
+ * version == CPU_VERSION_AUTO.
+ */
+void x86_cpu_set_default_version(X86CPUVersion version);
+
+#define APIC_DEFAULT_ADDRESS 0xfee00000
+#define APIC_SPACE_SIZE 0x100000
+
+/* cpu-dump.c */
+void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
+
+/* cpu.c */
+bool cpu_is_bsp(X86CPU *cpu);
+
+void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
+void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
+void x86_update_hflags(CPUX86State* env);
+
+static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
+{
+ return !!(cpu->hyperv_features & BIT(feat));
+}
+
+static inline uint64_t cr4_reserved_bits(CPUX86State *env)
+{
+ uint64_t reserved_bits = CR4_RESERVED_MASK;
+ if (!env->features[FEAT_XSAVE]) {
+ reserved_bits |= CR4_OSXSAVE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) {
+ reserved_bits |= CR4_SMEP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
+ reserved_bits |= CR4_SMAP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) {
+ reserved_bits |= CR4_FSGSBASE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+ reserved_bits |= CR4_PKE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) {
+ reserved_bits |= CR4_LA57_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
+ reserved_bits |= CR4_UMIP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
+ reserved_bits |= CR4_PKS_MASK;
+ }
+ return reserved_bits;
+}
+
+static inline bool ctl_has_irq(CPUX86State *env)
+{
+ uint32_t int_prio;
+ uint32_t tpr;
+
+ int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
+ tpr = env->int_ctl & V_TPR_MASK;
+
+ if (env->int_ctl & V_IGN_TPR_MASK) {
+ return (env->int_ctl & V_IRQ_MASK);
+ }
+
+ return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
+}
+
+hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
+ int *prot);
+#if defined(TARGET_X86_64) && \
+ defined(CONFIG_USER_ONLY) && \
+ defined(CONFIG_LINUX)
+# define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
+#endif
+
+#endif /* I386_CPU_H */
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
new file mode 100644
index 000000000..098a2ad15
--- /dev/null
+++ b/target/i386/gdbstub.c
@@ -0,0 +1,397 @@
+/*
+ * x86 gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_X86_64
+static const int gpr_map[16] = {
+ R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
+ 8, 9, 10, 11, 12, 13, 14, 15
+};
+#else
+#define gpr_map gpr_map32
+#endif
+static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+/*
+ * Keep these in sync with assignment to
+ * gdb_num_core_regs in target/i386/cpu.c
+ * and with the machine description
+ */
+
+/*
+ * SEG: 6 segments, plus fs_base, gs_base, kernel_gs_base
+ */
+
+/*
+ * general regs -----> 8 or 16
+ */
+#define IDX_NB_IP 1
+#define IDX_NB_FLAGS 1
+#define IDX_NB_SEG (6 + 3)
+#define IDX_NB_CTL 6
+#define IDX_NB_FP 16
+/*
+ * fpu regs ----------> 8 or 16
+ */
+#define IDX_NB_MXCSR 1
+/*
+ * total ----> 8+1+1+9+6+16+8+1=50 or 16+1+1+9+6+16+16+1=66
+ */
+
+#define IDX_IP_REG CPU_NB_REGS
+#define IDX_FLAGS_REG (IDX_IP_REG + IDX_NB_IP)
+#define IDX_SEG_REGS (IDX_FLAGS_REG + IDX_NB_FLAGS)
+#define IDX_CTL_REGS (IDX_SEG_REGS + IDX_NB_SEG)
+#define IDX_FP_REGS (IDX_CTL_REGS + IDX_NB_CTL)
+#define IDX_XMM_REGS (IDX_FP_REGS + IDX_NB_FP)
+#define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
+
+#define IDX_CTL_CR0_REG (IDX_CTL_REGS + 0)
+#define IDX_CTL_CR2_REG (IDX_CTL_REGS + 1)
+#define IDX_CTL_CR3_REG (IDX_CTL_REGS + 2)
+#define IDX_CTL_CR4_REG (IDX_CTL_REGS + 3)
+#define IDX_CTL_CR8_REG (IDX_CTL_REGS + 4)
+#define IDX_CTL_EFER_REG (IDX_CTL_REGS + 5)
+
+#ifdef TARGET_X86_64
+#define GDB_FORCE_64 1
+#else
+#define GDB_FORCE_64 0
+#endif
+
+static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong val)
+{
+ if ((hflags & HF_CS64_MASK) || GDB_FORCE_64) {
+ return gdb_get_reg64(buf, val);
+ }
+ return gdb_get_reg32(buf, val);
+}
+
+static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val)
+{
+ if (hflags & HF_CS64_MASK) {
+ *val = ldq_p(buf);
+ return 8;
+ }
+ *val = ldl_p(buf);
+ return 4;
+}
+
+int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ uint64_t tpr;
+
+ /* N.B. GDB can't deal with changes in registers or sizes in the middle
+ of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
+ as if we're on a 64-bit cpu. */
+
+ if (n < CPU_NB_REGS) {
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]);
+ } else if (n < CPU_NB_REGS32) {
+ return gdb_get_reg64(mem_buf,
+ env->regs[gpr_map[n]] & 0xffffffffUL);
+ } else {
+ return gdb_get_regl(mem_buf, 0);
+ }
+ } else {
+ return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]);
+ }
+ } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+ floatx80 *fp = (floatx80 *) &env->fpregs[n - IDX_FP_REGS];
+ int len = gdb_get_reg64(mem_buf, cpu_to_le64(fp->low));
+ len += gdb_get_reg16(mem_buf, cpu_to_le16(fp->high));
+ return len;
+ } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+ n -= IDX_XMM_REGS;
+ if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) {
+ return gdb_get_reg128(mem_buf,
+ env->xmm_regs[n].ZMM_Q(0),
+ env->xmm_regs[n].ZMM_Q(1));
+ }
+ } else {
+ switch (n) {
+ case IDX_IP_REG:
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, env->eip);
+ } else {
+ return gdb_get_reg64(mem_buf, env->eip & 0xffffffffUL);
+ }
+ } else {
+ return gdb_get_reg32(mem_buf, env->eip);
+ }
+ case IDX_FLAGS_REG:
+ return gdb_get_reg32(mem_buf, env->eflags);
+
+ case IDX_SEG_REGS:
+ return gdb_get_reg32(mem_buf, env->segs[R_CS].selector);
+ case IDX_SEG_REGS + 1:
+ return gdb_get_reg32(mem_buf, env->segs[R_SS].selector);
+ case IDX_SEG_REGS + 2:
+ return gdb_get_reg32(mem_buf, env->segs[R_DS].selector);
+ case IDX_SEG_REGS + 3:
+ return gdb_get_reg32(mem_buf, env->segs[R_ES].selector);
+ case IDX_SEG_REGS + 4:
+ return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
+ case IDX_SEG_REGS + 5:
+ return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
+ case IDX_SEG_REGS + 6:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_FS].base);
+ case IDX_SEG_REGS + 7:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_GS].base);
+
+ case IDX_SEG_REGS + 8:
+#ifdef TARGET_X86_64
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->kernelgsbase);
+#else
+ return gdb_get_reg32(mem_buf, 0);
+#endif
+
+ case IDX_FP_REGS + 8:
+ return gdb_get_reg32(mem_buf, env->fpuc);
+ case IDX_FP_REGS + 9:
+ return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) |
+ (env->fpstt & 0x7) << 11);
+ case IDX_FP_REGS + 10:
+ return gdb_get_reg32(mem_buf, 0); /* ftag */
+ case IDX_FP_REGS + 11:
+ return gdb_get_reg32(mem_buf, 0); /* fiseg */
+ case IDX_FP_REGS + 12:
+ return gdb_get_reg32(mem_buf, 0); /* fioff */
+ case IDX_FP_REGS + 13:
+ return gdb_get_reg32(mem_buf, 0); /* foseg */
+ case IDX_FP_REGS + 14:
+ return gdb_get_reg32(mem_buf, 0); /* fooff */
+ case IDX_FP_REGS + 15:
+ return gdb_get_reg32(mem_buf, 0); /* fop */
+
+ case IDX_MXCSR_REG:
+ update_mxcsr_from_sse_status(env);
+ return gdb_get_reg32(mem_buf, env->mxcsr);
+
+ case IDX_CTL_CR0_REG:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[0]);
+ case IDX_CTL_CR2_REG:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[2]);
+ case IDX_CTL_CR3_REG:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[3]);
+ case IDX_CTL_CR4_REG:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[4]);
+ case IDX_CTL_CR8_REG:
+#ifndef CONFIG_USER_ONLY
+ tpr = cpu_get_apic_tpr(cpu->apic_state);
+#else
+ tpr = 0;
+#endif
+ return gdb_read_reg_cs64(env->hflags, mem_buf, tpr);
+
+ case IDX_CTL_EFER_REG:
+ return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer);
+ }
+ }
+ return 0;
+}
+
+static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf)
+{
+ CPUX86State *env = &cpu->env;
+ uint16_t selector = ldl_p(mem_buf);
+
+ if (selector != env->segs[sreg].selector) {
+#if defined(CONFIG_USER_ONLY)
+ cpu_x86_load_seg(env, sreg, selector);
+#else
+ unsigned int limit, flags;
+ target_ulong base;
+
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
+ base = selector << 4;
+ limit = 0xffff;
+ flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT);
+ } else {
+ if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
+ &flags)) {
+ return 4;
+ }
+ }
+ cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
+#endif
+ }
+ return 4;
+}
+
+int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ target_ulong tmp;
+ int len;
+
+ /* N.B. GDB can't deal with changes in registers or sizes in the middle
+ of a session. So if we're in 32-bit mode on a 64-bit cpu, still act
+ as if we're on a 64-bit cpu. */
+
+ if (n < CPU_NB_REGS) {
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ env->regs[gpr_map[n]] = ldtul_p(mem_buf);
+ } else if (n < CPU_NB_REGS32) {
+ env->regs[gpr_map[n]] = ldtul_p(mem_buf) & 0xffffffffUL;
+ }
+ return sizeof(target_ulong);
+ } else if (n < CPU_NB_REGS32) {
+ n = gpr_map32[n];
+ env->regs[n] &= ~0xffffffffUL;
+ env->regs[n] |= (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+ } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+ floatx80 *fp = (floatx80 *) &env->fpregs[n - IDX_FP_REGS];
+ fp->low = le64_to_cpu(* (uint64_t *) mem_buf);
+ fp->high = le16_to_cpu(* (uint16_t *) (mem_buf + 8));
+ return 10;
+ } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+ n -= IDX_XMM_REGS;
+ if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) {
+ env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
+ env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
+ return 16;
+ }
+ } else {
+ switch (n) {
+ case IDX_IP_REG:
+ if (TARGET_LONG_BITS == 64) {
+ if (env->hflags & HF_CS64_MASK) {
+ env->eip = ldq_p(mem_buf);
+ } else {
+ env->eip = ldq_p(mem_buf) & 0xffffffffUL;
+ }
+ return 8;
+ } else {
+ env->eip &= ~0xffffffffUL;
+ env->eip |= (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+ case IDX_FLAGS_REG:
+ env->eflags = ldl_p(mem_buf);
+ return 4;
+
+ case IDX_SEG_REGS:
+ return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf);
+ case IDX_SEG_REGS + 1:
+ return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf);
+ case IDX_SEG_REGS + 2:
+ return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf);
+ case IDX_SEG_REGS + 3:
+ return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf);
+ case IDX_SEG_REGS + 4:
+ return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
+ case IDX_SEG_REGS + 5:
+ return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
+ case IDX_SEG_REGS + 6:
+ return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_FS].base);
+ case IDX_SEG_REGS + 7:
+ return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_GS].base);
+ case IDX_SEG_REGS + 8:
+#ifdef TARGET_X86_64
+ return gdb_write_reg_cs64(env->hflags, mem_buf, &env->kernelgsbase);
+#endif
+ return 4;
+
+ case IDX_FP_REGS + 8:
+ cpu_set_fpuc(env, ldl_p(mem_buf));
+ return 4;
+ case IDX_FP_REGS + 9:
+ tmp = ldl_p(mem_buf);
+ env->fpstt = (tmp >> 11) & 7;
+ env->fpus = tmp & ~0x3800;
+ return 4;
+ case IDX_FP_REGS + 10: /* ftag */
+ return 4;
+ case IDX_FP_REGS + 11: /* fiseg */
+ return 4;
+ case IDX_FP_REGS + 12: /* fioff */
+ return 4;
+ case IDX_FP_REGS + 13: /* foseg */
+ return 4;
+ case IDX_FP_REGS + 14: /* fooff */
+ return 4;
+ case IDX_FP_REGS + 15: /* fop */
+ return 4;
+
+ case IDX_MXCSR_REG:
+ cpu_set_mxcsr(env, ldl_p(mem_buf));
+ return 4;
+
+ case IDX_CTL_CR0_REG:
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ cpu_x86_update_cr0(env, tmp);
+#endif
+ return len;
+
+ case IDX_CTL_CR2_REG:
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ env->cr[2] = tmp;
+#endif
+ return len;
+
+ case IDX_CTL_CR3_REG:
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ cpu_x86_update_cr3(env, tmp);
+#endif
+ return len;
+
+ case IDX_CTL_CR4_REG:
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ cpu_x86_update_cr4(env, tmp);
+#endif
+ return len;
+
+ case IDX_CTL_CR8_REG:
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ cpu_set_apic_tpr(cpu->apic_state, tmp);
+#endif
+ return len;
+
+ case IDX_CTL_EFER_REG:
+ len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp);
+#ifndef CONFIG_USER_ONLY
+ cpu_load_efer(env, tmp);
+#endif
+ return len;
+ }
+ }
+ /* Unrecognised register. */
+ return 0;
+}
diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c
new file mode 100644
index 000000000..136630e9b
--- /dev/null
+++ b/target/i386/hax/hax-accel-ops.c
@@ -0,0 +1,102 @@
+/*
+ * QEMU HAX support
+ *
+ * Copyright IBM, Corp. 2008
+ * Red Hat, Inc. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Glauber Costa <gcosta@redhat.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "sysemu/runstate.h"
+#include "sysemu/cpus.h"
+#include "qemu/guest-random.h"
+
+#include "hax-accel-ops.h"
+
+static void *hax_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+ int r;
+
+ rcu_register_thread();
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+
+ cpu->thread_id = qemu_get_thread_id();
+ current_cpu = cpu;
+ hax_init_vcpu(cpu);
+ cpu_thread_signal_created(cpu);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = hax_smp_cpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+
+ qemu_wait_io_event(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void hax_start_vcpu_thread(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, hax_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+#ifdef _WIN32
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
+#endif
+}
+
+static void hax_accel_ops_class_init(ObjectClass *oc, void *data)
+{
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
+
+ ops->create_vcpu_thread = hax_start_vcpu_thread;
+ ops->kick_vcpu_thread = hax_kick_vcpu_thread;
+
+ ops->synchronize_post_reset = hax_cpu_synchronize_post_reset;
+ ops->synchronize_post_init = hax_cpu_synchronize_post_init;
+ ops->synchronize_state = hax_cpu_synchronize_state;
+ ops->synchronize_pre_loadvm = hax_cpu_synchronize_pre_loadvm;
+}
+
+static const TypeInfo hax_accel_ops_type = {
+ .name = ACCEL_OPS_NAME("hax"),
+
+ .parent = TYPE_ACCEL_OPS,
+ .class_init = hax_accel_ops_class_init,
+ .abstract = true,
+};
+
+static void hax_accel_ops_register_types(void)
+{
+ type_register_static(&hax_accel_ops_type);
+}
+type_init(hax_accel_ops_register_types);
diff --git a/target/i386/hax/hax-accel-ops.h b/target/i386/hax/hax-accel-ops.h
new file mode 100644
index 000000000..c7698519c
--- /dev/null
+++ b/target/i386/hax/hax-accel-ops.h
@@ -0,0 +1,31 @@
+/*
+ * Accelerator CPUS Interface
+ *
+ * Copyright 2020 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HAX_CPUS_H
+#define HAX_CPUS_H
+
+#include "sysemu/cpus.h"
+
+#include "hax-interface.h"
+#include "hax-i386.h"
+
+int hax_init_vcpu(CPUState *cpu);
+int hax_smp_cpu_exec(CPUState *cpu);
+int hax_populate_ram(uint64_t va, uint64_t size);
+
+void hax_cpu_synchronize_state(CPUState *cpu);
+void hax_cpu_synchronize_post_reset(CPUState *cpu);
+void hax_cpu_synchronize_post_init(CPUState *cpu);
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu);
+
+int hax_vcpu_destroy(CPUState *cpu);
+void hax_raise_event(CPUState *cpu);
+void hax_reset_vcpu_state(void *opaque);
+
+#endif /* HAX_CPUS_H */
diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c
new file mode 100644
index 000000000..bf65ed6fa
--- /dev/null
+++ b/target/i386/hax/hax-all.c
@@ -0,0 +1,1140 @@
+/*
+ * QEMU HAX support
+ *
+ * Copyright IBM, Corp. 2008
+ * Red Hat, Inc. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Glauber Costa <gcosta@redhat.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * HAX common code for both windows and darwin
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+
+#include "qemu-common.h"
+#include "qemu/accel.h"
+#include "sysemu/reset.h"
+#include "sysemu/runstate.h"
+#include "hw/boards.h"
+
+#include "hax-accel-ops.h"
+
+#define DEBUG_HAX 0
+
+#define DPRINTF(fmt, ...) \
+ do { \
+ if (DEBUG_HAX) { \
+ fprintf(stdout, fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/* Current version */
+const uint32_t hax_cur_version = 0x4; /* API v4: unmapping and MMIO moves */
+/* Minimum HAX kernel version */
+const uint32_t hax_min_version = 0x4; /* API v4: supports unmapping */
+
+static bool hax_allowed;
+
+struct hax_state hax_global;
+
+static void hax_vcpu_sync_state(CPUArchState *env, int modified);
+static int hax_arch_get_registers(CPUArchState *env);
+
+int hax_enabled(void)
+{
+ return hax_allowed;
+}
+
+int valid_hax_tunnel_size(uint16_t size)
+{
+ return size >= sizeof(struct hax_tunnel);
+}
+
+hax_fd hax_vcpu_get_fd(CPUArchState *env)
+{
+ struct hax_vcpu_state *vcpu = env_cpu(env)->hax_vcpu;
+ if (!vcpu) {
+ return HAX_INVALID_FD;
+ }
+ return vcpu->fd;
+}
+
+static int hax_get_capability(struct hax_state *hax)
+{
+ int ret;
+ struct hax_capabilityinfo capinfo, *cap = &capinfo;
+
+ ret = hax_capability(hax, cap);
+ if (ret) {
+ return ret;
+ }
+
+ if ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) == HAX_CAP_STATUS_NOTWORKING) {
+ if (cap->winfo & HAX_CAP_FAILREASON_VT) {
+ DPRINTF
+ ("VTX feature is not enabled, HAX driver will not work.\n");
+ } else if (cap->winfo & HAX_CAP_FAILREASON_NX) {
+ DPRINTF
+ ("NX feature is not enabled, HAX driver will not work.\n");
+ }
+ return -ENXIO;
+
+ }
+
+ if (!(cap->winfo & HAX_CAP_UG)) {
+ fprintf(stderr, "UG mode is not supported by the hardware.\n");
+ return -ENOTSUP;
+ }
+
+ hax->supports_64bit_ramblock = !!(cap->winfo & HAX_CAP_64BIT_RAMBLOCK);
+
+ if (cap->wstatus & HAX_CAP_MEMQUOTA) {
+ if (cap->mem_quota < hax->mem_quota) {
+ fprintf(stderr, "The VM memory needed exceeds the driver limit.\n");
+ return -ENOSPC;
+ }
+ }
+ return 0;
+}
+
+static int hax_version_support(struct hax_state *hax)
+{
+ int ret;
+ struct hax_module_version version;
+
+ ret = hax_mod_version(hax, &version);
+ if (ret < 0) {
+ return 0;
+ }
+
+ if (hax_min_version > version.cur_version) {
+ fprintf(stderr, "Incompatible HAX module version %d,",
+ version.cur_version);
+ fprintf(stderr, "requires minimum version %d\n", hax_min_version);
+ return 0;
+ }
+ if (hax_cur_version < version.compat_version) {
+ fprintf(stderr, "Incompatible QEMU HAX API version %x,",
+ hax_cur_version);
+ fprintf(stderr, "requires minimum HAX API version %x\n",
+ version.compat_version);
+ return 0;
+ }
+
+ return 1;
+}
+
+int hax_vcpu_create(int id)
+{
+ struct hax_vcpu_state *vcpu = NULL;
+ int ret;
+
+ if (!hax_global.vm) {
+ fprintf(stderr, "vcpu %x created failed, vm is null\n", id);
+ return -1;
+ }
+
+ if (hax_global.vm->vcpus[id]) {
+ fprintf(stderr, "vcpu %x allocated already\n", id);
+ return 0;
+ }
+
+ vcpu = g_new0(struct hax_vcpu_state, 1);
+
+ ret = hax_host_create_vcpu(hax_global.vm->fd, id);
+ if (ret) {
+ fprintf(stderr, "Failed to create vcpu %x\n", id);
+ goto error;
+ }
+
+ vcpu->vcpu_id = id;
+ vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
+ if (hax_invalid_fd(vcpu->fd)) {
+ fprintf(stderr, "Failed to open the vcpu\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hax_global.vm->vcpus[id] = vcpu;
+
+ ret = hax_host_setup_vcpu_channel(vcpu);
+ if (ret) {
+ fprintf(stderr, "Invalid hax tunnel size\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ return 0;
+
+ error:
+ /* vcpu and tunnel will be closed automatically */
+ if (vcpu && !hax_invalid_fd(vcpu->fd)) {
+ hax_close_fd(vcpu->fd);
+ }
+
+ hax_global.vm->vcpus[id] = NULL;
+ g_free(vcpu);
+ return -1;
+}
+
+int hax_vcpu_destroy(CPUState *cpu)
+{
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+
+ if (!hax_global.vm) {
+ fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
+ return -1;
+ }
+
+ if (!vcpu) {
+ return 0;
+ }
+
+ /*
+ * 1. The hax_tunnel is also destroyed when vcpu is destroyed
+ * 2. close fd will cause hax module vcpu be cleaned
+ */
+ hax_close_fd(vcpu->fd);
+ hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
+ g_free(vcpu);
+ return 0;
+}
+
+int hax_init_vcpu(CPUState *cpu)
+{
+ int ret;
+
+ ret = hax_vcpu_create(cpu->cpu_index);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to create HAX vcpu\n");
+ exit(-1);
+ }
+
+ cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
+ cpu->vcpu_dirty = true;
+ qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
+
+ return ret;
+}
+
+struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus)
+{
+ struct hax_vm *vm;
+ int vm_id = 0, ret, i;
+
+ if (hax_invalid_fd(hax->fd)) {
+ return NULL;
+ }
+
+ if (hax->vm) {
+ return hax->vm;
+ }
+
+ if (max_cpus > HAX_MAX_VCPU) {
+ fprintf(stderr, "Maximum VCPU number QEMU supported is %d\n", HAX_MAX_VCPU);
+ return NULL;
+ }
+
+ vm = g_new0(struct hax_vm, 1);
+
+ ret = hax_host_create_vm(hax, &vm_id);
+ if (ret) {
+ fprintf(stderr, "Failed to create vm %x\n", ret);
+ goto error;
+ }
+ vm->id = vm_id;
+ vm->fd = hax_host_open_vm(hax, vm_id);
+ if (hax_invalid_fd(vm->fd)) {
+ fprintf(stderr, "Failed to open vm %d\n", vm_id);
+ goto error;
+ }
+
+ vm->numvcpus = max_cpus;
+ vm->vcpus = g_new0(struct hax_vcpu_state *, vm->numvcpus);
+ for (i = 0; i < vm->numvcpus; i++) {
+ vm->vcpus[i] = NULL;
+ }
+
+ hax->vm = vm;
+ return vm;
+
+ error:
+ g_free(vm);
+ hax->vm = NULL;
+ return NULL;
+}
+
+int hax_vm_destroy(struct hax_vm *vm)
+{
+ int i;
+
+ for (i = 0; i < vm->numvcpus; i++)
+ if (vm->vcpus[i]) {
+ fprintf(stderr, "VCPU should be cleaned before vm clean\n");
+ return -1;
+ }
+ hax_close_fd(vm->fd);
+ vm->numvcpus = 0;
+ g_free(vm->vcpus);
+ g_free(vm);
+ hax_global.vm = NULL;
+ return 0;
+}
+
+static int hax_init(ram_addr_t ram_size, int max_cpus)
+{
+ struct hax_state *hax = NULL;
+ struct hax_qemu_version qversion;
+ int ret;
+
+ hax = &hax_global;
+
+ memset(hax, 0, sizeof(struct hax_state));
+ hax->mem_quota = ram_size;
+
+ hax->fd = hax_mod_open();
+ if (hax_invalid_fd(hax->fd)) {
+ hax->fd = 0;
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = hax_get_capability(hax);
+
+ if (ret) {
+ if (ret != -ENOSPC) {
+ ret = -EINVAL;
+ }
+ goto error;
+ }
+
+ if (!hax_version_support(hax)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax->vm = hax_vm_create(hax, max_cpus);
+ if (!hax->vm) {
+ fprintf(stderr, "Failed to create HAX VM\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax_memory_init();
+
+ qversion.cur_version = hax_cur_version;
+ qversion.min_version = hax_min_version;
+ hax_notify_qemu_version(hax->vm->fd, &qversion);
+
+ return ret;
+ error:
+ if (hax->vm) {
+ hax_vm_destroy(hax->vm);
+ }
+ if (hax->fd) {
+ hax_mod_close(hax);
+ }
+
+ return ret;
+}
+
+static int hax_accel_init(MachineState *ms)
+{
+ int ret = hax_init(ms->ram_size, (int)ms->smp.max_cpus);
+
+ if (ret && (ret != -ENOSPC)) {
+ fprintf(stderr, "No accelerator found.\n");
+ } else {
+ fprintf(stdout, "HAX is %s and emulator runs in %s mode.\n",
+ !ret ? "working" : "not working",
+ !ret ? "fast virt" : "emulation");
+ }
+ return ret;
+}
+
+static int hax_handle_fastmmio(CPUArchState *env, struct hax_fastmmio *hft)
+{
+ if (hft->direction < 2) {
+ cpu_physical_memory_rw(hft->gpa, &hft->value, hft->size,
+ hft->direction);
+ } else {
+ /*
+ * HAX API v4 supports transferring data between two MMIO addresses,
+ * hft->gpa and hft->gpa2 (instructions such as MOVS require this):
+ * hft->direction == 2: gpa ==> gpa2
+ */
+ uint64_t value;
+ cpu_physical_memory_read(hft->gpa, &value, hft->size);
+ cpu_physical_memory_write(hft->gpa2, &value, hft->size);
+ }
+
+ return 0;
+}
+
+static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
+ int direction, int size, int count, void *buffer)
+{
+ uint8_t *ptr;
+ int i;
+ MemTxAttrs attrs = { 0 };
+
+ if (!df) {
+ ptr = (uint8_t *) buffer;
+ } else {
+ ptr = buffer + size * count - size;
+ }
+ for (i = 0; i < count; i++) {
+ address_space_rw(&address_space_io, port, attrs,
+ ptr, size, direction == HAX_EXIT_IO_OUT);
+ if (!df) {
+ ptr += size;
+ } else {
+ ptr -= size;
+ }
+ }
+
+ return 0;
+}
+
+static int hax_vcpu_interrupt(CPUArchState *env)
+{
+ CPUState *cpu = env_cpu(env);
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+
+ /*
+ * Try to inject an interrupt if the guest can accept it
+ * Unlike KVM, HAX kernel check for the eflags, instead of qemu
+ */
+ if (ht->ready_for_interrupt_injection &&
+ (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ int irq;
+
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ hax_inject_interrupt(env, irq);
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+ }
+
+ /* If we have an interrupt but the guest is not ready to receive an
+ * interrupt, request an interrupt window exit. This will
+ * cause a return to userspace as soon as the guest is ready to
+ * receive interrupts. */
+ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ ht->request_interrupt_window = 1;
+ } else {
+ ht->request_interrupt_window = 0;
+ }
+ return 0;
+}
+
+void hax_raise_event(CPUState *cpu)
+{
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+
+ if (!vcpu) {
+ return;
+ }
+ vcpu->tunnel->user_event_pending = 1;
+}
+
+/*
+ * Ask hax kernel module to run the CPU for us till:
+ * 1. Guest crash or shutdown
+ * 2. Need QEMU's emulation like guest execute MMIO instruction
+ * 3. Guest execute HLT
+ * 4. QEMU have Signal/event pending
+ * 5. An unknown VMX exit happens
+ */
+static int hax_vcpu_hax_exec(CPUArchState *env)
+{
+ int ret = 0;
+ CPUState *cpu = env_cpu(env);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+
+ if (!hax_enabled()) {
+ DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip);
+ return 0;
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(x86_cpu->apic_state);
+ }
+
+ /* After a vcpu is halted (either because it is an AP and has just been
+ * reset, or because it has executed the HLT instruction), it will not be
+ * run (hax_vcpu_run()) until it is unhalted. The next few if blocks check
+ * for events that may change the halted state of this vcpu:
+ * a) Maskable interrupt, when RFLAGS.IF is 1;
+ * Note: env->eflags may not reflect the current RFLAGS state, because
+ * it is not updated after each hax_vcpu_run(). We cannot afford
+ * to fail to recognize any unhalt-by-maskable-interrupt event
+ * (in which case the vcpu will halt forever), and yet we cannot
+ * afford the overhead of hax_vcpu_sync_state(). The current
+ * solution is to err on the side of caution and have the HLT
+ * handler (see case HAX_EXIT_HLT below) unconditionally set the
+ * IF_MASK bit in env->eflags, which, in effect, disables the
+ * RFLAGS.IF check.
+ * b) NMI;
+ * c) INIT signal;
+ * d) SIPI signal.
+ */
+ if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu->halted = 0;
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+ DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
+ cpu->cpu_index);
+ do_cpu_init(x86_cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
+ cpu->cpu_index);
+ hax_vcpu_sync_state(env, 0);
+ do_cpu_sipi(x86_cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+
+ if (cpu->halted) {
+ /* If this vcpu is halted, we must not ask HAXM to run it. Instead, we
+ * break out of hax_smp_cpu_exec() as if this vcpu had executed HLT.
+ * That way, this vcpu thread will be trapped in qemu_wait_io_event(),
+ * until the vcpu is unhalted.
+ */
+ cpu->exception_index = EXCP_HLT;
+ return 0;
+ }
+
+ do {
+ int hax_ret;
+
+ if (cpu->exit_request) {
+ ret = 1;
+ break;
+ }
+
+ hax_vcpu_interrupt(env);
+
+ qemu_mutex_unlock_iothread();
+ cpu_exec_start(cpu);
+ hax_ret = hax_vcpu_run(vcpu);
+ cpu_exec_end(cpu);
+ qemu_mutex_lock_iothread();
+
+ /* Simply continue the vcpu_run if system call interrupted */
+ if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
+ DPRINTF("io window interrupted\n");
+ continue;
+ }
+
+ if (hax_ret < 0) {
+ fprintf(stderr, "vcpu run failed for vcpu %x\n", vcpu->vcpu_id);
+ abort();
+ }
+ switch (ht->_exit_status) {
+ case HAX_EXIT_IO:
+ ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
+ ht->pio._direction,
+ ht->pio._size, ht->pio._count, vcpu->iobuf);
+ break;
+ case HAX_EXIT_FAST_MMIO:
+ ret = hax_handle_fastmmio(env, (struct hax_fastmmio *) vcpu->iobuf);
+ break;
+ /* Guest state changed, currently only for shutdown */
+ case HAX_EXIT_STATECHANGE:
+ fprintf(stdout, "VCPU shutdown request\n");
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ hax_vcpu_sync_state(env, 0);
+ ret = 1;
+ break;
+ case HAX_EXIT_UNKNOWN_VMEXIT:
+ fprintf(stderr, "Unknown VMX exit %x from guest\n",
+ ht->_exit_reason);
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ hax_vcpu_sync_state(env, 0);
+ cpu_dump_state(cpu, stderr, 0);
+ ret = -1;
+ break;
+ case HAX_EXIT_HLT:
+ if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ /* hlt instruction with interrupt disabled is shutdown */
+ env->eflags |= IF_MASK;
+ cpu->halted = 1;
+ cpu->exception_index = EXCP_HLT;
+ ret = 1;
+ }
+ break;
+ /* these situations will continue to hax module */
+ case HAX_EXIT_INTERRUPT:
+ case HAX_EXIT_PAUSED:
+ break;
+ case HAX_EXIT_MMIO:
+ /* Should not happen on UG system */
+ fprintf(stderr, "HAX: unsupported MMIO emulation\n");
+ ret = -1;
+ break;
+ case HAX_EXIT_REAL:
+ /* Should not happen on UG system */
+ fprintf(stderr, "HAX: unimplemented real mode emulation\n");
+ ret = -1;
+ break;
+ default:
+ fprintf(stderr, "Unknown exit %x from HAX\n", ht->_exit_status);
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ hax_vcpu_sync_state(env, 0);
+ cpu_dump_state(cpu, stderr, 0);
+ ret = 1;
+ break;
+ }
+ } while (!ret);
+
+ if (cpu->exit_request) {
+ cpu->exit_request = 0;
+ cpu->exception_index = EXCP_INTERRUPT;
+ }
+ return ret < 0;
+}
+
+static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ hax_arch_get_registers(env);
+ cpu->vcpu_dirty = true;
+}
+
+void hax_cpu_synchronize_state(CPUState *cpu)
+{
+ if (!cpu->vcpu_dirty) {
+ run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
+ }
+}
+
+static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ hax_vcpu_sync_state(env, 1);
+ cpu->vcpu_dirty = false;
+}
+
+void hax_cpu_synchronize_post_reset(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+}
+
+static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ hax_vcpu_sync_state(env, 1);
+ cpu->vcpu_dirty = false;
+}
+
+void hax_cpu_synchronize_post_init(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->vcpu_dirty = true;
+}
+
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
+int hax_smp_cpu_exec(CPUState *cpu)
+{
+ CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
+ int fatal;
+ int ret;
+
+ while (1) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
+ ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ break;
+ }
+
+ fatal = hax_vcpu_hax_exec(env);
+
+ if (fatal) {
+ fprintf(stderr, "Unsupported HAX vcpu return\n");
+ abort();
+ }
+ }
+
+ return ret;
+}
+
+static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ memset(lhs, 0, sizeof(struct segment_desc_t));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = 3;
+ lhs->present = 1;
+ lhs->dpl = 3;
+ lhs->operand_size = 0;
+ lhs->desc = 1;
+ lhs->long_mode = 0;
+ lhs->granularity = 0;
+ lhs->available = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT)
+ | (rhs->present * DESC_P_MASK)
+ | (rhs->dpl << DESC_DPL_SHIFT)
+ | (rhs->operand_size << DESC_B_SHIFT)
+ | (rhs->desc * DESC_S_MASK)
+ | (rhs->long_mode << DESC_L_SHIFT)
+ | (rhs->granularity * DESC_G_MASK) | (rhs->available * DESC_AVL_MASK);
+}
+
+static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ unsigned flags = rhs->flags;
+
+ memset(lhs, 0, sizeof(struct segment_desc_t));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+ lhs->present = (flags & DESC_P_MASK) != 0;
+ lhs->dpl = rhs->selector & 3;
+ lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
+ lhs->desc = (flags & DESC_S_MASK) != 0;
+ lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
+ lhs->granularity = (flags & DESC_G_MASK) != 0;
+ lhs->available = (flags & DESC_AVL_MASK) != 0;
+}
+
+static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
+{
+ target_ulong reg = *hax_reg;
+
+ if (set) {
+ *hax_reg = *qemu_reg;
+ } else {
+ *qemu_reg = reg;
+ }
+}
+
+/* The sregs has been synced with HAX kernel already before this call */
+static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs)
+{
+ get_seg(&env->segs[R_CS], &sregs->_cs);
+ get_seg(&env->segs[R_DS], &sregs->_ds);
+ get_seg(&env->segs[R_ES], &sregs->_es);
+ get_seg(&env->segs[R_FS], &sregs->_fs);
+ get_seg(&env->segs[R_GS], &sregs->_gs);
+ get_seg(&env->segs[R_SS], &sregs->_ss);
+
+ get_seg(&env->tr, &sregs->_tr);
+ get_seg(&env->ldt, &sregs->_ldt);
+ env->idt.limit = sregs->_idt.limit;
+ env->idt.base = sregs->_idt.base;
+ env->gdt.limit = sregs->_gdt.limit;
+ env->gdt.base = sregs->_gdt.base;
+ return 0;
+}
+
+static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
+{
+ if ((env->eflags & VM_MASK)) {
+ set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
+ } else {
+ set_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_seg(&sregs->_es, &env->segs[R_ES]);
+ set_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_seg(&sregs->_ss, &env->segs[R_SS]);
+
+ if (env->cr[0] & CR0_PE_MASK) {
+ /* force ss cpl to cs cpl */
+ sregs->_ss.selector = (sregs->_ss.selector & ~3) |
+ (sregs->_cs.selector & 3);
+ sregs->_ss.dpl = sregs->_ss.selector & 3;
+ }
+ }
+
+ set_seg(&sregs->_tr, &env->tr);
+ set_seg(&sregs->_ldt, &env->ldt);
+ sregs->_idt.limit = env->idt.limit;
+ sregs->_idt.base = env->idt.base;
+ sregs->_gdt.limit = env->gdt.limit;
+ sregs->_gdt.base = env->gdt.base;
+ return 0;
+}
+
+static int hax_sync_vcpu_register(CPUArchState *env, int set)
+{
+ struct vcpu_state_t regs;
+ int ret;
+ memset(&regs, 0, sizeof(struct vcpu_state_t));
+
+ if (!set) {
+ ret = hax_sync_vcpu_state(env, &regs, 0);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+
+ /* generic register */
+ hax_getput_reg(&regs._rax, &env->regs[R_EAX], set);
+ hax_getput_reg(&regs._rbx, &env->regs[R_EBX], set);
+ hax_getput_reg(&regs._rcx, &env->regs[R_ECX], set);
+ hax_getput_reg(&regs._rdx, &env->regs[R_EDX], set);
+ hax_getput_reg(&regs._rsi, &env->regs[R_ESI], set);
+ hax_getput_reg(&regs._rdi, &env->regs[R_EDI], set);
+ hax_getput_reg(&regs._rsp, &env->regs[R_ESP], set);
+ hax_getput_reg(&regs._rbp, &env->regs[R_EBP], set);
+#ifdef TARGET_X86_64
+ hax_getput_reg(&regs._r8, &env->regs[8], set);
+ hax_getput_reg(&regs._r9, &env->regs[9], set);
+ hax_getput_reg(&regs._r10, &env->regs[10], set);
+ hax_getput_reg(&regs._r11, &env->regs[11], set);
+ hax_getput_reg(&regs._r12, &env->regs[12], set);
+ hax_getput_reg(&regs._r13, &env->regs[13], set);
+ hax_getput_reg(&regs._r14, &env->regs[14], set);
+ hax_getput_reg(&regs._r15, &env->regs[15], set);
+#endif
+ hax_getput_reg(&regs._rflags, &env->eflags, set);
+ hax_getput_reg(&regs._rip, &env->eip, set);
+
+ if (set) {
+ regs._cr0 = env->cr[0];
+ regs._cr2 = env->cr[2];
+ regs._cr3 = env->cr[3];
+ regs._cr4 = env->cr[4];
+ hax_set_segments(env, &regs);
+ } else {
+ env->cr[0] = regs._cr0;
+ env->cr[2] = regs._cr2;
+ env->cr[3] = regs._cr3;
+ env->cr[4] = regs._cr4;
+ hax_get_segments(env, &regs);
+ }
+
+ if (set) {
+ ret = hax_sync_vcpu_state(env, &regs, 1);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void hax_msr_entry_set(struct vmx_msr *item, uint32_t index,
+ uint64_t value)
+{
+ item->entry = index;
+ item->value = value;
+}
+
+static int hax_get_msrs(CPUArchState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs = md.entries;
+ int ret, i, n;
+
+ n = 0;
+ msrs[n++].entry = MSR_IA32_SYSENTER_CS;
+ msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
+ msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
+ msrs[n++].entry = MSR_IA32_TSC;
+#ifdef TARGET_X86_64
+ msrs[n++].entry = MSR_EFER;
+ msrs[n++].entry = MSR_STAR;
+ msrs[n++].entry = MSR_LSTAR;
+ msrs[n++].entry = MSR_CSTAR;
+ msrs[n++].entry = MSR_FMASK;
+ msrs[n++].entry = MSR_KERNELGSBASE;
+#endif
+ md.nr_msr = n;
+ ret = hax_sync_msr(env, &md, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < md.done; i++) {
+ switch (msrs[i].entry) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = msrs[i].value;
+ break;
+ case MSR_IA32_TSC:
+ env->tsc = msrs[i].value;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_EFER:
+ env->efer = msrs[i].value;
+ break;
+ case MSR_STAR:
+ env->star = msrs[i].value;
+ break;
+ case MSR_LSTAR:
+ env->lstar = msrs[i].value;
+ break;
+ case MSR_CSTAR:
+ env->cstar = msrs[i].value;
+ break;
+ case MSR_FMASK:
+ env->fmask = msrs[i].value;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = msrs[i].value;
+ break;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static int hax_set_msrs(CPUArchState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs;
+ msrs = md.entries;
+ int n = 0;
+
+ memset(&md, 0, sizeof(struct hax_msr_data));
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+#ifdef TARGET_X86_64
+ hax_msr_entry_set(&msrs[n++], MSR_EFER, env->efer);
+ hax_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
+ hax_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
+ hax_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
+ hax_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
+ hax_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
+#endif
+ md.nr_msr = n;
+ md.done = 0;
+
+ return hax_sync_msr(env, &md, 1);
+}
+
+static int hax_get_fpu(CPUArchState *env)
+{
+ struct fx_layout fpu;
+ int i, ret;
+
+ ret = hax_sync_fpu(env, &fpu, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->fpstt = (fpu.fsw >> 11) & 7;
+ env->fpus = fpu.fsw;
+ env->fpuc = fpu.fcw;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((fpu.ftw >> i) & 1);
+ }
+ memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
+
+ for (i = 0; i < 8; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.mmx_1[i][0]);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.mmx_1[i][8]);
+ if (CPU_NB_REGS > 8) {
+ env->xmm_regs[i + 8].ZMM_Q(0) = ldq_p(&fpu.mmx_2[i][0]);
+ env->xmm_regs[i + 8].ZMM_Q(1) = ldq_p(&fpu.mmx_2[i][8]);
+ }
+ }
+ env->mxcsr = fpu.mxcsr;
+
+ return 0;
+}
+
+static int hax_set_fpu(CPUArchState *env)
+{
+ struct fx_layout fpu;
+ int i;
+
+ memset(&fpu, 0, sizeof(fpu));
+ fpu.fsw = env->fpus & ~(7 << 11);
+ fpu.fsw |= (env->fpstt & 7) << 11;
+ fpu.fcw = env->fpuc;
+
+ for (i = 0; i < 8; ++i) {
+ fpu.ftw |= (!env->fptags[i]) << i;
+ }
+
+ memcpy(fpu.st_mm, env->fpregs, sizeof(env->fpregs));
+ for (i = 0; i < 8; i++) {
+ stq_p(&fpu.mmx_1[i][0], env->xmm_regs[i].ZMM_Q(0));
+ stq_p(&fpu.mmx_1[i][8], env->xmm_regs[i].ZMM_Q(1));
+ if (CPU_NB_REGS > 8) {
+ stq_p(&fpu.mmx_2[i][0], env->xmm_regs[i + 8].ZMM_Q(0));
+ stq_p(&fpu.mmx_2[i][8], env->xmm_regs[i + 8].ZMM_Q(1));
+ }
+ }
+
+ fpu.mxcsr = env->mxcsr;
+
+ return hax_sync_fpu(env, &fpu, 1);
+}
+
+static int hax_arch_get_registers(CPUArchState *env)
+{
+ int ret;
+
+ ret = hax_sync_vcpu_register(env, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = hax_get_fpu(env);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = hax_get_msrs(env);
+ if (ret < 0) {
+ return ret;
+ }
+
+ x86_update_hflags(env);
+ return 0;
+}
+
+static int hax_arch_set_registers(CPUArchState *env)
+{
+ int ret;
+ ret = hax_sync_vcpu_register(env, 1);
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sync vcpu reg\n");
+ return ret;
+ }
+ ret = hax_set_fpu(env);
+ if (ret < 0) {
+ fprintf(stderr, "FPU failed\n");
+ return ret;
+ }
+ ret = hax_set_msrs(env);
+ if (ret < 0) {
+ fprintf(stderr, "MSR failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hax_vcpu_sync_state(CPUArchState *env, int modified)
+{
+ if (hax_enabled()) {
+ if (modified) {
+ hax_arch_set_registers(env);
+ } else {
+ hax_arch_get_registers(env);
+ }
+ }
+}
+
+/*
+ * much simpler than kvm, at least in first stage because:
+ * We don't need consider the device pass-through, we don't need
+ * consider the framebuffer, and we may even remove the bios at all
+ */
+int hax_sync_vcpus(void)
+{
+ if (hax_enabled()) {
+ CPUState *cpu;
+
+ cpu = first_cpu;
+ if (!cpu) {
+ return 0;
+ }
+
+ for (; cpu != NULL; cpu = CPU_NEXT(cpu)) {
+ int ret;
+
+ ret = hax_arch_set_registers(cpu->env_ptr);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void hax_reset_vcpu_state(void *opaque)
+{
+ CPUState *cpu;
+ for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) {
+ cpu->hax_vcpu->tunnel->user_event_pending = 0;
+ cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
+ }
+}
+
+static void hax_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelClass *ac = ACCEL_CLASS(oc);
+ ac->name = "HAX";
+ ac->init_machine = hax_accel_init;
+ ac->allowed = &hax_allowed;
+}
+
+static const TypeInfo hax_accel_type = {
+ .name = ACCEL_CLASS_NAME("hax"),
+ .parent = TYPE_ACCEL,
+ .class_init = hax_accel_class_init,
+};
+
+static void hax_type_init(void)
+{
+ type_register_static(&hax_accel_type);
+}
+
+type_init(hax_type_init);
diff --git a/target/i386/hax/hax-i386.h b/target/i386/hax/hax-i386.h
new file mode 100644
index 000000000..efbb34623
--- /dev/null
+++ b/target/i386/hax/hax-i386.h
@@ -0,0 +1,96 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HAX_I386_H
+#define HAX_I386_H
+
+#include "cpu.h"
+#include "sysemu/hax.h"
+
+#ifdef CONFIG_POSIX
+typedef int hax_fd;
+#endif
+
+#ifdef CONFIG_WIN32
+typedef HANDLE hax_fd;
+#endif
+
+extern struct hax_state hax_global;
+struct hax_vcpu_state {
+ hax_fd fd;
+ int vcpu_id;
+ struct hax_tunnel *tunnel;
+ unsigned char *iobuf;
+};
+
+struct hax_state {
+ hax_fd fd; /* the global hax device interface */
+ uint32_t version;
+ struct hax_vm *vm;
+ uint64_t mem_quota;
+ bool supports_64bit_ramblock;
+};
+
+#define HAX_MAX_VCPU 0x10
+
+struct hax_vm {
+ hax_fd fd;
+ int id;
+ int numvcpus;
+ struct hax_vcpu_state **vcpus;
+};
+
+#ifdef NEED_CPU_H
+/* Functions exported to host specific mode */
+hax_fd hax_vcpu_get_fd(CPUArchState *env);
+int valid_hax_tunnel_size(uint16_t size);
+
+/* Host specific functions */
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
+int hax_inject_interrupt(CPUArchState *env, int vector);
+struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus);
+int hax_vcpu_run(struct hax_vcpu_state *vcpu);
+int hax_vcpu_create(int id);
+void hax_kick_vcpu_thread(CPUState *cpu);
+
+int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state,
+ int set);
+int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set);
+int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set);
+#endif
+
+int hax_vm_destroy(struct hax_vm *vm);
+int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap);
+int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion);
+int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags);
+
+/* Common host function */
+int hax_host_create_vm(struct hax_state *hax, int *vm_id);
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id);
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid);
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid);
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu);
+hax_fd hax_mod_open(void);
+void hax_memory_init(void);
+
+
+#ifdef CONFIG_POSIX
+#include "hax-posix.h"
+#endif
+
+#ifdef CONFIG_WIN32
+#include "hax-windows.h"
+#endif
+
+#include "hax-interface.h"
+
+#endif
diff --git a/target/i386/hax/hax-interface.h b/target/i386/hax/hax-interface.h
new file mode 100644
index 000000000..537ae084e
--- /dev/null
+++ b/target/i386/hax/hax-interface.h
@@ -0,0 +1,369 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* Interface with HAX kernel module */
+
+#ifndef HAX_INTERFACE_H
+#define HAX_INTERFACE_H
+
+/* fx_layout has 3 formats table 3-56, 512bytes */
+struct fx_layout {
+ uint16_t fcw;
+ uint16_t fsw;
+ uint8_t ftw;
+ uint8_t res1;
+ uint16_t fop;
+ union {
+ struct {
+ uint32_t fip;
+ uint16_t fcs;
+ uint16_t res2;
+ };
+ uint64_t fpu_ip;
+ };
+ union {
+ struct {
+ uint32_t fdp;
+ uint16_t fds;
+ uint16_t res3;
+ };
+ uint64_t fpu_dp;
+ };
+ uint32_t mxcsr;
+ uint32_t mxcsr_mask;
+ uint8_t st_mm[8][16];
+ uint8_t mmx_1[8][16];
+ uint8_t mmx_2[8][16];
+ uint8_t pad[96];
+} __attribute__ ((aligned(8)));
+
+struct vmx_msr {
+ uint64_t entry;
+ uint64_t value;
+} __attribute__ ((__packed__));
+
+/*
+ * Fixed array is not good, but it makes Mac support a bit easier by avoiding
+ * memory map or copyin staff.
+ */
+#define HAX_MAX_MSR_ARRAY 0x20
+struct hax_msr_data {
+ uint16_t nr_msr;
+ uint16_t done;
+ uint16_t pad[2];
+ struct vmx_msr entries[HAX_MAX_MSR_ARRAY];
+} __attribute__ ((__packed__));
+
+union interruptibility_state_t {
+ uint32_t raw;
+ struct {
+ uint32_t sti_blocking:1;
+ uint32_t movss_blocking:1;
+ uint32_t smi_blocking:1;
+ uint32_t nmi_blocking:1;
+ uint32_t reserved:28;
+ };
+ uint64_t pad;
+};
+
+typedef union interruptibility_state_t interruptibility_state_t;
+
+/* Segment descriptor */
+struct segment_desc_t {
+ uint16_t selector;
+ uint16_t _dummy;
+ uint32_t limit;
+ uint64_t base;
+ union {
+ struct {
+ uint32_t type:4;
+ uint32_t desc:1;
+ uint32_t dpl:2;
+ uint32_t present:1;
+ uint32_t:4;
+ uint32_t available:1;
+ uint32_t long_mode:1;
+ uint32_t operand_size:1;
+ uint32_t granularity:1;
+ uint32_t null:1;
+ uint32_t:15;
+ };
+ uint32_t ar;
+ };
+ uint32_t ipad;
+};
+
+typedef struct segment_desc_t segment_desc_t;
+
+struct vcpu_state_t {
+ union {
+ uint64_t _regs[16];
+ struct {
+ union {
+ struct {
+ uint8_t _al, _ah;
+ };
+ uint16_t _ax;
+ uint32_t _eax;
+ uint64_t _rax;
+ };
+ union {
+ struct {
+ uint8_t _cl, _ch;
+ };
+ uint16_t _cx;
+ uint32_t _ecx;
+ uint64_t _rcx;
+ };
+ union {
+ struct {
+ uint8_t _dl, _dh;
+ };
+ uint16_t _dx;
+ uint32_t _edx;
+ uint64_t _rdx;
+ };
+ union {
+ struct {
+ uint8_t _bl, _bh;
+ };
+ uint16_t _bx;
+ uint32_t _ebx;
+ uint64_t _rbx;
+ };
+ union {
+ uint16_t _sp;
+ uint32_t _esp;
+ uint64_t _rsp;
+ };
+ union {
+ uint16_t _bp;
+ uint32_t _ebp;
+ uint64_t _rbp;
+ };
+ union {
+ uint16_t _si;
+ uint32_t _esi;
+ uint64_t _rsi;
+ };
+ union {
+ uint16_t _di;
+ uint32_t _edi;
+ uint64_t _rdi;
+ };
+
+ uint64_t _r8;
+ uint64_t _r9;
+ uint64_t _r10;
+ uint64_t _r11;
+ uint64_t _r12;
+ uint64_t _r13;
+ uint64_t _r14;
+ uint64_t _r15;
+ };
+ };
+
+ union {
+ uint32_t _eip;
+ uint64_t _rip;
+ };
+
+ union {
+ uint32_t _eflags;
+ uint64_t _rflags;
+ };
+
+ segment_desc_t _cs;
+ segment_desc_t _ss;
+ segment_desc_t _ds;
+ segment_desc_t _es;
+ segment_desc_t _fs;
+ segment_desc_t _gs;
+ segment_desc_t _ldt;
+ segment_desc_t _tr;
+
+ segment_desc_t _gdt;
+ segment_desc_t _idt;
+
+ uint64_t _cr0;
+ uint64_t _cr2;
+ uint64_t _cr3;
+ uint64_t _cr4;
+
+ uint64_t _dr0;
+ uint64_t _dr1;
+ uint64_t _dr2;
+ uint64_t _dr3;
+ uint64_t _dr6;
+ uint64_t _dr7;
+ uint64_t _pde;
+
+ uint32_t _efer;
+
+ uint32_t _sysenter_cs;
+ uint64_t _sysenter_eip;
+ uint64_t _sysenter_esp;
+
+ uint32_t _activity_state;
+ uint32_t pad;
+ interruptibility_state_t _interruptibility_state;
+};
+
+/* HAX exit status */
+enum exit_status {
+ /* IO port request */
+ HAX_EXIT_IO = 1,
+ /* MMIO instruction emulation */
+ HAX_EXIT_MMIO,
+ /* QEMU emulation mode request, currently means guest enter non-PG mode */
+ HAX_EXIT_REAL,
+ /*
+ * Interrupt window open, qemu can inject interrupt now
+ * Also used when signal pending since at that time qemu usually need
+ * check interrupt
+ */
+ HAX_EXIT_INTERRUPT,
+ /* Unknown vmexit, mostly trigger reboot */
+ HAX_EXIT_UNKNOWN_VMEXIT,
+ /* HALT from guest */
+ HAX_EXIT_HLT,
+ /* Reboot request, like because of tripple fault in guest */
+ HAX_EXIT_STATECHANGE,
+ /* the vcpu is now only paused when destroy, so simply return to hax */
+ HAX_EXIT_PAUSED,
+ HAX_EXIT_FAST_MMIO,
+};
+
+/*
+ * The interface definition:
+ * 1. vcpu_run execute will return 0 on success, otherwise mean failed
+ * 2. exit_status return the exit reason, as stated in enum exit_status
+ * 3. exit_reason is the vmx exit reason
+ */
+struct hax_tunnel {
+ uint32_t _exit_reason;
+ uint32_t _exit_flag;
+ uint32_t _exit_status;
+ uint32_t user_event_pending;
+ int ready_for_interrupt_injection;
+ int request_interrupt_window;
+ union {
+ struct {
+ /* 0: read, 1: write */
+#define HAX_EXIT_IO_IN 1
+#define HAX_EXIT_IO_OUT 0
+ uint8_t _direction;
+ uint8_t _df;
+ uint16_t _size;
+ uint16_t _port;
+ uint16_t _count;
+ uint8_t _flags;
+ uint8_t _pad0;
+ uint16_t _pad1;
+ uint32_t _pad2;
+ uint64_t _vaddr;
+ } pio;
+ struct {
+ uint64_t gla;
+ } mmio;
+ struct {
+ } state;
+ };
+} __attribute__ ((__packed__));
+
+struct hax_module_version {
+ uint32_t compat_version;
+ uint32_t cur_version;
+} __attribute__ ((__packed__));
+
+/* This interface is support only after API version 2 */
+struct hax_qemu_version {
+ /* Current API version in QEMU */
+ uint32_t cur_version;
+ /* The minimum API version supported by QEMU */
+ uint32_t min_version;
+} __attribute__ ((__packed__));
+
+/* The mac specfic interface to qemu, mostly is ioctl related */
+struct hax_tunnel_info {
+ uint64_t va;
+ uint64_t io_va;
+ uint16_t size;
+ uint16_t pad[3];
+} __attribute__ ((__packed__));
+
+struct hax_alloc_ram_info {
+ uint32_t size;
+ uint32_t pad;
+ uint64_t va;
+} __attribute__ ((__packed__));
+
+struct hax_ramblock_info {
+ uint64_t start_va;
+ uint64_t size;
+ uint64_t reserved;
+} __attribute__ ((__packed__));
+
+#define HAX_RAM_INFO_ROM 0x01 /* Read-Only */
+#define HAX_RAM_INFO_INVALID 0x80 /* Unmapped, usually used for MMIO */
+struct hax_set_ram_info {
+ uint64_t pa_start;
+ uint32_t size;
+ uint8_t flags;
+ uint8_t pad[3];
+ uint64_t va;
+} __attribute__ ((__packed__));
+
+#define HAX_CAP_STATUS_WORKING 0x1
+#define HAX_CAP_STATUS_NOTWORKING 0x0
+#define HAX_CAP_WORKSTATUS_MASK 0x1
+
+#define HAX_CAP_FAILREASON_VT 0x1
+#define HAX_CAP_FAILREASON_NX 0x2
+
+#define HAX_CAP_MEMQUOTA 0x2
+#define HAX_CAP_UG 0x4
+#define HAX_CAP_64BIT_RAMBLOCK 0x8
+
+struct hax_capabilityinfo {
+ /* bit 0: 1 - working
+ * 0 - not working, possibly because NT/NX disabled
+ * bit 1: 1 - memory limitation working
+ * 0 - no memory limitation
+ */
+ uint16_t wstatus;
+ /* valid when not working
+ * bit 0: VT not enabeld
+ * bit 1: NX not enabled*/
+ uint16_t winfo;
+ uint32_t pad;
+ uint64_t mem_quota;
+} __attribute__ ((__packed__));
+
+struct hax_fastmmio {
+ uint64_t gpa;
+ union {
+ uint64_t value;
+ uint64_t gpa2; /* since HAX API v4 */
+ };
+ uint8_t size;
+ uint8_t direction;
+ uint16_t reg_index;
+ uint32_t pad0;
+ uint64_t _cr0;
+ uint64_t _cr2;
+ uint64_t _cr3;
+ uint64_t _cr4;
+} __attribute__ ((__packed__));
+#endif
diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c
new file mode 100644
index 000000000..a226d174d
--- /dev/null
+++ b/target/i386/hax/hax-mem.c
@@ -0,0 +1,323 @@
+/*
+ * HAX memory mapping operations
+ *
+ * Copyright (c) 2015-16 Intel Corporation
+ * Copyright 2016 Google, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+
+#include "hax-accel-ops.h"
+#include "qemu/queue.h"
+
+#define DEBUG_HAX_MEM 0
+
+#define DPRINTF(fmt, ...) \
+ do { \
+ if (DEBUG_HAX_MEM) { \
+ fprintf(stdout, fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/**
+ * HAXMapping: describes a pending guest physical memory mapping
+ *
+ * @start_pa: a guest physical address marking the start of the region; must be
+ * page-aligned
+ * @size: a guest physical address marking the end of the region; must be
+ * page-aligned
+ * @host_va: the host virtual address of the start of the mapping
+ * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
+ * @entry: additional fields for linking #HAXMapping instances together
+ */
+typedef struct HAXMapping {
+ uint64_t start_pa;
+ uint32_t size;
+ uint64_t host_va;
+ int flags;
+ QTAILQ_ENTRY(HAXMapping) entry;
+} HAXMapping;
+
+/*
+ * A doubly-linked list (actually a tail queue) of the pending page mappings
+ * for the ongoing memory transaction.
+ *
+ * It is used to optimize the number of page mapping updates done through the
+ * kernel module. For example, it's effective when a driver is digging an MMIO
+ * hole inside an existing memory mapping. It will get a deletion of the whole
+ * region, then the addition of the 2 remaining RAM areas around the hole and
+ * finally the memory transaction commit. During the commit, it will effectively
+ * send to the kernel only the removal of the pages from the MMIO hole after
+ * having computed locally the result of the deletion and additions.
+ */
+static QTAILQ_HEAD(, HAXMapping) mappings =
+ QTAILQ_HEAD_INITIALIZER(mappings);
+
+/**
+ * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
+ */
+static void hax_mapping_dump_list(void)
+{
+ HAXMapping *entry;
+
+ DPRINTF("%s updates:\n", __func__);
+ QTAILQ_FOREACH(entry, &mappings, entry) {
+ DPRINTF("\t%c 0x%016" PRIx64 "->0x%016" PRIx64 " VA 0x%016" PRIx64
+ "%s\n", entry->flags & HAX_RAM_INFO_INVALID ? '-' : '+',
+ entry->start_pa, entry->start_pa + entry->size, entry->host_va,
+ entry->flags & HAX_RAM_INFO_ROM ? " ROM" : "");
+ }
+}
+
+static void hax_insert_mapping_before(HAXMapping *next, uint64_t start_pa,
+ uint32_t size, uint64_t host_va,
+ uint8_t flags)
+{
+ HAXMapping *entry;
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->start_pa = start_pa;
+ entry->size = size;
+ entry->host_va = host_va;
+ entry->flags = flags;
+ if (!next) {
+ QTAILQ_INSERT_TAIL(&mappings, entry, entry);
+ } else {
+ QTAILQ_INSERT_BEFORE(next, entry, entry);
+ }
+}
+
+static bool hax_mapping_is_opposite(HAXMapping *entry, uint64_t host_va,
+ uint8_t flags)
+{
+ /* removed then added without change for the read-only flag */
+ bool nop_flags = (entry->flags ^ flags) == HAX_RAM_INFO_INVALID;
+
+ return (entry->host_va == host_va) && nop_flags;
+}
+
+static void hax_update_mapping(uint64_t start_pa, uint32_t size,
+ uint64_t host_va, uint8_t flags)
+{
+ uint64_t end_pa = start_pa + size;
+ HAXMapping *entry, *next;
+
+ QTAILQ_FOREACH_SAFE(entry, &mappings, entry, next) {
+ uint32_t chunk_sz;
+ if (start_pa >= entry->start_pa + entry->size) {
+ continue;
+ }
+ if (start_pa < entry->start_pa) {
+ chunk_sz = end_pa <= entry->start_pa ? size
+ : entry->start_pa - start_pa;
+ hax_insert_mapping_before(entry, start_pa, chunk_sz,
+ host_va, flags);
+ start_pa += chunk_sz;
+ host_va += chunk_sz;
+ size -= chunk_sz;
+ } else if (start_pa > entry->start_pa) {
+ /* split the existing chunk at start_pa */
+ chunk_sz = start_pa - entry->start_pa;
+ hax_insert_mapping_before(entry, entry->start_pa, chunk_sz,
+ entry->host_va, entry->flags);
+ entry->start_pa += chunk_sz;
+ entry->host_va += chunk_sz;
+ entry->size -= chunk_sz;
+ }
+ /* now start_pa == entry->start_pa */
+ chunk_sz = MIN(size, entry->size);
+ if (chunk_sz) {
+ bool nop = hax_mapping_is_opposite(entry, host_va, flags);
+ bool partial = chunk_sz < entry->size;
+ if (partial) {
+ /* remove the beginning of the existing chunk */
+ entry->start_pa += chunk_sz;
+ entry->host_va += chunk_sz;
+ entry->size -= chunk_sz;
+ if (!nop) {
+ hax_insert_mapping_before(entry, start_pa, chunk_sz,
+ host_va, flags);
+ }
+ } else { /* affects the full mapping entry */
+ if (nop) { /* no change to this mapping, remove it */
+ QTAILQ_REMOVE(&mappings, entry, entry);
+ g_free(entry);
+ } else { /* update mapping properties */
+ entry->host_va = host_va;
+ entry->flags = flags;
+ }
+ }
+ start_pa += chunk_sz;
+ host_va += chunk_sz;
+ size -= chunk_sz;
+ }
+ if (!size) { /* we are done */
+ break;
+ }
+ }
+ if (size) { /* add the leftover */
+ hax_insert_mapping_before(NULL, start_pa, size, host_va, flags);
+ }
+}
+
+static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
+{
+ MemoryRegion *mr = section->mr;
+ hwaddr start_pa = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ unsigned int delta;
+ uint64_t host_va;
+ uint32_t max_mapping_size;
+
+ /* We only care about RAM and ROM regions */
+ if (!memory_region_is_ram(mr)) {
+ if (memory_region_is_romd(mr)) {
+ /* HAXM kernel module does not support ROMD yet */
+ warn_report("Ignoring ROMD region 0x%016" PRIx64 "->0x%016" PRIx64,
+ start_pa, start_pa + size);
+ }
+ return;
+ }
+
+ /* Adjust start_pa and size so that they are page-aligned. (Cf
+ * kvm_set_phys_mem() in kvm-all.c).
+ */
+ delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
+ delta &= ~qemu_real_host_page_mask;
+ if (delta > size) {
+ return;
+ }
+ start_pa += delta;
+ size -= delta;
+ size &= qemu_real_host_page_mask;
+ if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ return;
+ }
+
+ host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
+ + section->offset_within_region + delta;
+ if (memory_region_is_rom(section->mr)) {
+ flags |= HAX_RAM_INFO_ROM;
+ }
+
+ /*
+ * The kernel module interface uses 32-bit sizes:
+ * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_set_ram
+ *
+ * If the mapping size is longer than 32 bits, we can't process it in one
+ * call into the kernel. Instead, we split the mapping into smaller ones,
+ * and call hax_update_mapping() on each.
+ */
+ max_mapping_size = UINT32_MAX & qemu_real_host_page_mask;
+ while (size > max_mapping_size) {
+ hax_update_mapping(start_pa, max_mapping_size, host_va, flags);
+ start_pa += max_mapping_size;
+ size -= max_mapping_size;
+ host_va += max_mapping_size;
+ }
+ /* Now size <= max_mapping_size */
+ hax_update_mapping(start_pa, (uint32_t)size, host_va, flags);
+}
+
+static void hax_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ memory_region_ref(section->mr);
+ hax_process_section(section, 0);
+}
+
+static void hax_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ hax_process_section(section, HAX_RAM_INFO_INVALID);
+ memory_region_unref(section->mr);
+}
+
+static void hax_transaction_begin(MemoryListener *listener)
+{
+ g_assert(QTAILQ_EMPTY(&mappings));
+}
+
+static void hax_transaction_commit(MemoryListener *listener)
+{
+ if (!QTAILQ_EMPTY(&mappings)) {
+ HAXMapping *entry, *next;
+
+ if (DEBUG_HAX_MEM) {
+ hax_mapping_dump_list();
+ }
+ QTAILQ_FOREACH_SAFE(entry, &mappings, entry, next) {
+ if (entry->flags & HAX_RAM_INFO_INVALID) {
+ /* for unmapping, put the values expected by the kernel */
+ entry->flags = HAX_RAM_INFO_INVALID;
+ entry->host_va = 0;
+ }
+ if (hax_set_ram(entry->start_pa, entry->size,
+ entry->host_va, entry->flags)) {
+ fprintf(stderr, "%s: Failed mapping @0x%016" PRIx64 "+0x%"
+ PRIx32 " flags %02x\n", __func__, entry->start_pa,
+ entry->size, entry->flags);
+ }
+ QTAILQ_REMOVE(&mappings, entry, entry);
+ g_free(entry);
+ }
+ }
+}
+
+/* currently we fake the dirty bitmap sync, always dirty */
+static void hax_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ MemoryRegion *mr = section->mr;
+
+ if (!memory_region_is_ram(mr)) {
+ /* Skip MMIO regions */
+ return;
+ }
+
+ memory_region_set_dirty(mr, 0, int128_get64(section->size));
+}
+
+static MemoryListener hax_memory_listener = {
+ .name = "hax",
+ .begin = hax_transaction_begin,
+ .commit = hax_transaction_commit,
+ .region_add = hax_region_add,
+ .region_del = hax_region_del,
+ .log_sync = hax_log_sync,
+ .priority = 10,
+};
+
+static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
+{
+ /*
+ * We must register each RAM block with the HAXM kernel module, or
+ * hax_set_ram() will fail for any mapping into the RAM block:
+ * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_alloc_ram
+ *
+ * Old versions of the HAXM kernel module (< 6.2.0) used to preallocate all
+ * host physical pages for the RAM block as part of this registration
+ * process, hence the name hax_populate_ram().
+ */
+ if (hax_populate_ram((uint64_t)(uintptr_t)host, max_size) < 0) {
+ fprintf(stderr, "HAX failed to populate RAM\n");
+ abort();
+ }
+}
+
+static struct RAMBlockNotifier hax_ram_notifier = {
+ .ram_block_added = hax_ram_block_added,
+};
+
+void hax_memory_init(void)
+{
+ ram_block_notifier_add(&hax_ram_notifier);
+ memory_listener_register(&hax_memory_listener, &address_space_memory);
+}
diff --git a/target/i386/hax/hax-posix.c b/target/i386/hax/hax-posix.c
new file mode 100644
index 000000000..ac1a51096
--- /dev/null
+++ b/target/i386/hax/hax-posix.c
@@ -0,0 +1,305 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* HAX module interface - darwin version */
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include "sysemu/cpus.h"
+#include "hax-accel-ops.h"
+
+hax_fd hax_mod_open(void)
+{
+ int fd = open("/dev/HAX", O_RDWR);
+ if (fd == -1) {
+ fprintf(stderr, "Failed to open the hax module\n");
+ }
+
+ qemu_set_cloexec(fd);
+
+ return fd;
+}
+
+int hax_populate_ram(uint64_t va, uint64_t size)
+{
+ int ret;
+
+ if (!hax_global.vm || !hax_global.vm->fd) {
+ fprintf(stderr, "Allocate memory before vm create?\n");
+ return -EINVAL;
+ }
+
+ if (hax_global.supports_64bit_ramblock) {
+ struct hax_ramblock_info ramblock = {
+ .start_va = va,
+ .size = size,
+ .reserved = 0
+ };
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ADD_RAMBLOCK, &ramblock);
+ } else {
+ struct hax_alloc_ram_info info = {
+ .size = (uint32_t)size,
+ .pad = 0,
+ .va = va
+ };
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
+ }
+ if (ret < 0) {
+ fprintf(stderr, "Failed to register RAM block: ret=%d, va=0x%" PRIx64
+ ", size=0x%" PRIx64 ", method=%s\n", ret, va, size,
+ hax_global.supports_64bit_ramblock ? "new" : "legacy");
+ return ret;
+ }
+ return 0;
+}
+
+int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
+{
+ struct hax_set_ram_info info;
+ int ret;
+
+ info.pa_start = start_pa;
+ info.size = size;
+ info.va = host_va;
+ info.flags = (uint8_t) flags;
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, &info);
+ if (ret < 0) {
+ return -errno;
+ }
+ return 0;
+}
+
+int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
+{
+ int ret;
+
+ ret = ioctl(hax->fd, HAX_IOCTL_CAPABILITY, cap);
+ if (ret == -1) {
+ fprintf(stderr, "Failed to get HAX capability\n");
+ return -errno;
+ }
+
+ return 0;
+}
+
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
+{
+ int ret;
+
+ ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
+ if (ret == -1) {
+ fprintf(stderr, "Failed to get HAX version\n");
+ return -errno;
+ }
+
+ return 0;
+}
+
+static char *hax_vm_devfs_string(int vm_id)
+{
+ return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id);
+}
+
+static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
+{
+ return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id);
+}
+
+int hax_host_create_vm(struct hax_state *hax, int *vmid)
+{
+ int ret;
+ int vm_id = 0;
+
+ if (hax_invalid_fd(hax->fd)) {
+ return -EINVAL;
+ }
+
+ if (hax->vm) {
+ return 0;
+ }
+
+ ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
+ *vmid = vm_id;
+ return ret;
+}
+
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
+{
+ hax_fd fd;
+ char *vm_name = NULL;
+
+ vm_name = hax_vm_devfs_string(vm_id);
+ if (!vm_name) {
+ return -1;
+ }
+
+ fd = open(vm_name, O_RDWR);
+ g_free(vm_name);
+
+ qemu_set_cloexec(fd);
+
+ return fd;
+}
+
+int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
+{
+ int ret;
+
+ if (hax_invalid_fd(vm_fd)) {
+ return -EINVAL;
+ }
+
+ ret = ioctl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion);
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed to notify qemu API version\n");
+ return ret;
+ }
+ return 0;
+}
+
+/* Simply assume the size should be bigger than the hax_tunnel,
+ * since the hax_tunnel can be extended later with compatibility considered
+ */
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
+{
+ int ret;
+
+ ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
+ }
+
+ return ret;
+}
+
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
+{
+ char *devfs_path = NULL;
+ hax_fd fd;
+
+ devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
+ if (!devfs_path) {
+ fprintf(stderr, "Failed to get the devfs\n");
+ return -EINVAL;
+ }
+
+ fd = open(devfs_path, O_RDWR);
+ g_free(devfs_path);
+ if (fd < 0) {
+ fprintf(stderr, "Failed to open the vcpu devfs\n");
+ }
+ qemu_set_cloexec(fd);
+ return fd;
+}
+
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+{
+ int ret;
+ struct hax_tunnel_info info;
+
+ ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
+ if (ret) {
+ fprintf(stderr, "Failed to setup the hax tunnel\n");
+ return ret;
+ }
+
+ if (!valid_hax_tunnel_size(info.size)) {
+ fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
+ vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
+ return 0;
+}
+
+int hax_vcpu_run(struct hax_vcpu_state *vcpu)
+{
+ return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
+}
+
+int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+
+ if (set) {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
+ } else {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
+ }
+ return ret;
+}
+
+int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+ if (set) {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
+ } else {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
+ }
+ return ret;
+}
+
+int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+
+ if (set) {
+ ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
+ } else {
+ ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
+ }
+ return ret;
+}
+
+int hax_inject_interrupt(CPUArchState *env, int vector)
+{
+ int fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+
+ return ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);
+}
+
+void hax_kick_vcpu_thread(CPUState *cpu)
+{
+ /*
+ * FIXME: race condition with the exit_request check in
+ * hax_vcpu_hax_exec
+ */
+ cpu->exit_request = 1;
+ cpus_kick_thread(cpu);
+}
diff --git a/target/i386/hax/hax-posix.h b/target/i386/hax/hax-posix.h
new file mode 100644
index 000000000..fb7c64426
--- /dev/null
+++ b/target/i386/hax/hax-posix.h
@@ -0,0 +1,61 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HAX_POSIX_H
+#define TARGET_I386_HAX_POSIX_H
+
+#include <sys/ioctl.h>
+
+#define HAX_INVALID_FD (-1)
+static inline int hax_invalid_fd(hax_fd fd)
+{
+ return fd <= 0;
+}
+
+static inline void hax_mod_close(struct hax_state *hax)
+{
+ close(hax->fd);
+}
+
+static inline void hax_close_fd(hax_fd fd)
+{
+ close(fd);
+}
+
+/* HAX model level ioctl */
+#define HAX_IOCTL_VERSION _IOWR(0, 0x20, struct hax_module_version)
+#define HAX_IOCTL_CREATE_VM _IOWR(0, 0x21, uint32_t)
+#define HAX_IOCTL_DESTROY_VM _IOW(0, 0x22, uint32_t)
+#define HAX_IOCTL_CAPABILITY _IOR(0, 0x23, struct hax_capabilityinfo)
+
+#define HAX_VM_IOCTL_VCPU_CREATE _IOWR(0, 0x80, uint32_t)
+#define HAX_VM_IOCTL_ALLOC_RAM _IOWR(0, 0x81, struct hax_alloc_ram_info)
+#define HAX_VM_IOCTL_SET_RAM _IOWR(0, 0x82, struct hax_set_ram_info)
+#define HAX_VM_IOCTL_VCPU_DESTROY _IOW(0, 0x83, uint32_t)
+#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION _IOW(0, 0x84, struct hax_qemu_version)
+#define HAX_VM_IOCTL_ADD_RAMBLOCK _IOW(0, 0x85, struct hax_ramblock_info)
+
+#define HAX_VCPU_IOCTL_RUN _IO(0, 0xc0)
+#define HAX_VCPU_IOCTL_SET_MSRS _IOWR(0, 0xc1, struct hax_msr_data)
+#define HAX_VCPU_IOCTL_GET_MSRS _IOWR(0, 0xc2, struct hax_msr_data)
+
+#define HAX_VCPU_IOCTL_SET_FPU _IOW(0, 0xc3, struct fx_layout)
+#define HAX_VCPU_IOCTL_GET_FPU _IOR(0, 0xc4, struct fx_layout)
+
+#define HAX_VCPU_IOCTL_SETUP_TUNNEL _IOWR(0, 0xc5, struct hax_tunnel_info)
+#define HAX_VCPU_IOCTL_INTERRUPT _IOWR(0, 0xc6, uint32_t)
+#define HAX_VCPU_SET_REGS _IOWR(0, 0xc7, struct vcpu_state_t)
+#define HAX_VCPU_GET_REGS _IOWR(0, 0xc8, struct vcpu_state_t)
+
+#endif /* TARGET_I386_HAX_POSIX_H */
diff --git a/target/i386/hax/hax-windows.c b/target/i386/hax/hax-windows.c
new file mode 100644
index 000000000..59afa213a
--- /dev/null
+++ b/target/i386/hax/hax-windows.c
@@ -0,0 +1,485 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hax-accel-ops.h"
+
+/*
+ * return 0 when success, -1 when driver not loaded,
+ * other negative value for other failure
+ */
+static int hax_open_device(hax_fd *fd)
+{
+ uint32_t errNum = 0;
+ HANDLE hDevice;
+
+ if (!fd) {
+ return -2;
+ }
+
+ hDevice = CreateFile("\\\\.\\HAX",
+ GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+
+ if (hDevice == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Failed to open the HAX device!\n");
+ errNum = GetLastError();
+ if (errNum == ERROR_FILE_NOT_FOUND) {
+ return -1;
+ }
+ return -2;
+ }
+ *fd = hDevice;
+ return 0;
+}
+
+/* hax_fd hax_mod_open */
+ hax_fd hax_mod_open(void)
+{
+ int ret;
+ hax_fd fd = NULL;
+
+ ret = hax_open_device(&fd);
+ if (ret != 0) {
+ fprintf(stderr, "Open HAX device failed\n");
+ }
+
+ return fd;
+}
+
+int hax_populate_ram(uint64_t va, uint64_t size)
+{
+ int ret;
+ HANDLE hDeviceVM;
+ DWORD dSize = 0;
+
+ if (!hax_global.vm || !hax_global.vm->fd) {
+ fprintf(stderr, "Allocate memory before vm create?\n");
+ return -EINVAL;
+ }
+
+ hDeviceVM = hax_global.vm->fd;
+ if (hax_global.supports_64bit_ramblock) {
+ struct hax_ramblock_info ramblock = {
+ .start_va = va,
+ .size = size,
+ .reserved = 0
+ };
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_ADD_RAMBLOCK,
+ &ramblock, sizeof(ramblock), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ } else {
+ struct hax_alloc_ram_info info = {
+ .size = (uint32_t) size,
+ .pad = 0,
+ .va = va
+ };
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_ALLOC_RAM,
+ &info, sizeof(info), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ }
+
+ if (!ret) {
+ fprintf(stderr, "Failed to register RAM block: va=0x%" PRIx64
+ ", size=0x%" PRIx64 ", method=%s\n", va, size,
+ hax_global.supports_64bit_ramblock ? "new" : "legacy");
+ return ret;
+ }
+
+ return 0;
+}
+
+int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
+{
+ struct hax_set_ram_info info;
+ HANDLE hDeviceVM = hax_global.vm->fd;
+ DWORD dSize = 0;
+ int ret;
+
+ info.pa_start = start_pa;
+ info.size = size;
+ info.va = host_va;
+ info.flags = (uint8_t) flags;
+
+ ret = DeviceIoControl(hDeviceVM, HAX_VM_IOCTL_SET_RAM,
+ &info, sizeof(info), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
+{
+ int ret;
+ HANDLE hDevice = hax->fd; /* handle to hax module */
+ DWORD dSize = 0;
+ DWORD err = 0;
+
+ if (hax_invalid_fd(hDevice)) {
+ fprintf(stderr, "Invalid fd for hax device!\n");
+ return -ENODEV;
+ }
+
+ ret = DeviceIoControl(hDevice, HAX_IOCTL_CAPABILITY, NULL, 0, cap,
+ sizeof(*cap), &dSize, (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ err = GetLastError();
+ if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) {
+ fprintf(stderr, "hax capability is too long to hold.\n");
+ }
+ fprintf(stderr, "Failed to get Hax capability:%luu\n", err);
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
+{
+ int ret;
+ HANDLE hDevice = hax->fd; /* handle to hax module */
+ DWORD dSize = 0;
+ DWORD err = 0;
+
+ if (hax_invalid_fd(hDevice)) {
+ fprintf(stderr, "Invalid fd for hax device!\n");
+ return -ENODEV;
+ }
+
+ ret = DeviceIoControl(hDevice,
+ HAX_IOCTL_VERSION,
+ NULL, 0,
+ version, sizeof(*version), &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ err = GetLastError();
+ if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) {
+ fprintf(stderr, "hax module verion is too long to hold.\n");
+ }
+ fprintf(stderr, "Failed to get Hax module version:%lu\n", err);
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+static char *hax_vm_devfs_string(int vm_id)
+{
+ return g_strdup_printf("\\\\.\\hax_vm%02d", vm_id);
+}
+
+static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
+{
+ return g_strdup_printf("\\\\.\\hax_vm%02d_vcpu%02d", vm_id, vcpu_id);
+}
+
+int hax_host_create_vm(struct hax_state *hax, int *vmid)
+{
+ int ret;
+ int vm_id = 0;
+ DWORD dSize = 0;
+
+ if (hax_invalid_fd(hax->fd)) {
+ return -EINVAL;
+ }
+
+ if (hax->vm) {
+ return 0;
+ }
+
+ ret = DeviceIoControl(hax->fd,
+ HAX_IOCTL_CREATE_VM,
+ NULL, 0, &vm_id, sizeof(vm_id), &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to create VM. Error code: %lu\n",
+ GetLastError());
+ return -1;
+ }
+ *vmid = vm_id;
+ return 0;
+}
+
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
+{
+ char *vm_name = NULL;
+ hax_fd hDeviceVM;
+
+ vm_name = hax_vm_devfs_string(vm_id);
+ if (!vm_name) {
+ fprintf(stderr, "Failed to open VM. VM name is null\n");
+ return INVALID_HANDLE_VALUE;
+ }
+
+ hDeviceVM = CreateFile(vm_name,
+ GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (hDeviceVM == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Open the vm device error:%s, ec:%lu\n",
+ vm_name, GetLastError());
+ }
+
+ g_free(vm_name);
+ return hDeviceVM;
+}
+
+int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
+{
+ int ret;
+ DWORD dSize = 0;
+ if (hax_invalid_fd(vm_fd)) {
+ return -EINVAL;
+ }
+ ret = DeviceIoControl(vm_fd,
+ HAX_VM_IOCTL_NOTIFY_QEMU_VERSION,
+ qversion, sizeof(struct hax_qemu_version),
+ NULL, 0, &dSize, (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to notify qemu API version\n");
+ return -1;
+ }
+ return 0;
+}
+
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
+{
+ int ret;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(vm_fd,
+ HAX_VM_IOCTL_VCPU_CREATE,
+ &vcpuid, sizeof(vcpuid), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
+ return -1;
+ }
+
+ return 0;
+}
+
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
+{
+ char *devfs_path = NULL;
+ hax_fd hDeviceVCPU;
+
+ devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
+ if (!devfs_path) {
+ fprintf(stderr, "Failed to get the devfs\n");
+ return INVALID_HANDLE_VALUE;
+ }
+
+ hDeviceVCPU = CreateFile(devfs_path,
+ GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (hDeviceVCPU == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Failed to open the vcpu devfs\n");
+ }
+ g_free(devfs_path);
+ return hDeviceVCPU;
+}
+
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+{
+ hax_fd hDeviceVCPU = vcpu->fd;
+ int ret;
+ struct hax_tunnel_info info;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SETUP_TUNNEL,
+ NULL, 0, &info, sizeof(info), &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to setup the hax tunnel\n");
+ return -1;
+ }
+
+ if (!valid_hax_tunnel_size(info.size)) {
+ fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
+ ret = -EINVAL;
+ return ret;
+ }
+ vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
+ vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
+ return 0;
+}
+
+int hax_vcpu_run(struct hax_vcpu_state *vcpu)
+{
+ int ret;
+ HANDLE hDeviceVCPU = vcpu->fd;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_RUN,
+ NULL, 0, NULL, 0, &dSize, (LPOVERLAPPED) NULL);
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize = 0;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+
+ hDeviceVCPU = fd;
+
+ if (set) {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SET_FPU,
+ fl, sizeof(*fl), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ } else {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_GET_FPU,
+ NULL, 0, fl, sizeof(*fl), &dSize,
+ (LPOVERLAPPED) NULL);
+ }
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize = 0;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+ hDeviceVCPU = fd;
+
+ if (set) {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SET_MSRS,
+ msrs, sizeof(*msrs),
+ msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL);
+ } else {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_GET_MSRS,
+ msrs, sizeof(*msrs),
+ msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL);
+ }
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+
+ hDeviceVCPU = fd;
+
+ if (set) {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_SET_REGS,
+ state, sizeof(*state),
+ NULL, 0, &dSize, (LPOVERLAPPED) NULL);
+ } else {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_GET_REGS,
+ NULL, 0,
+ state, sizeof(*state), &dSize,
+ (LPOVERLAPPED) NULL);
+ }
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_inject_interrupt(CPUArchState *env, int vector)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+
+ hDeviceVCPU = fd;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_INTERRUPT,
+ &vector, sizeof(vector), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+static void CALLBACK dummy_apc_func(ULONG_PTR unused)
+{
+}
+
+void hax_kick_vcpu_thread(CPUState *cpu)
+{
+ /*
+ * FIXME: race condition with the exit_request check in
+ * hax_vcpu_hax_exec
+ */
+ cpu->exit_request = 1;
+ if (!qemu_cpu_is_self(cpu)) {
+ if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
+ fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
+ __func__, GetLastError());
+ exit(1);
+ }
+ }
+}
diff --git a/target/i386/hax/hax-windows.h b/target/i386/hax/hax-windows.h
new file mode 100644
index 000000000..b1f5d4f32
--- /dev/null
+++ b/target/i386/hax/hax-windows.h
@@ -0,0 +1,88 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HAX_WINDOWS_H
+#define TARGET_I386_HAX_WINDOWS_H
+
+#include <winioctl.h>
+#include <windef.h>
+
+#include "hax-accel-ops.h"
+
+#define HAX_INVALID_FD INVALID_HANDLE_VALUE
+
+static inline void hax_mod_close(struct hax_state *hax)
+{
+ CloseHandle(hax->fd);
+}
+
+static inline void hax_close_fd(hax_fd fd)
+{
+ CloseHandle(fd);
+}
+
+static inline int hax_invalid_fd(hax_fd fd)
+{
+ return (fd == INVALID_HANDLE_VALUE);
+}
+
+#define HAX_DEVICE_TYPE 0x4000
+
+#define HAX_IOCTL_VERSION CTL_CODE(HAX_DEVICE_TYPE, 0x900, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_IOCTL_CREATE_VM CTL_CODE(HAX_DEVICE_TYPE, 0x901, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_IOCTL_CAPABILITY CTL_CODE(HAX_DEVICE_TYPE, 0x910, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VM_IOCTL_VCPU_CREATE CTL_CODE(HAX_DEVICE_TYPE, 0x902, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_ALLOC_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x903, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_SET_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x904, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_VCPU_DESTROY CTL_CODE(HAX_DEVICE_TYPE, 0x905, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_ADD_RAMBLOCK CTL_CODE(HAX_DEVICE_TYPE, 0x913, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_RUN CTL_CODE(HAX_DEVICE_TYPE, 0x906, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_SET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x907, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_GET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x908, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_SET_FPU CTL_CODE(HAX_DEVICE_TYPE, 0x909, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_GET_FPU CTL_CODE(HAX_DEVICE_TYPE, 0x90a, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_SETUP_TUNNEL CTL_CODE(HAX_DEVICE_TYPE, 0x90b, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_INTERRUPT CTL_CODE(HAX_DEVICE_TYPE, 0x90c, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_SET_REGS CTL_CODE(HAX_DEVICE_TYPE, 0x90d, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_GET_REGS CTL_CODE(HAX_DEVICE_TYPE, 0x90e, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION CTL_CODE(HAX_DEVICE_TYPE, 0x910, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+#endif /* TARGET_I386_HAX_WINDOWS_H */
diff --git a/target/i386/hax/meson.build b/target/i386/hax/meson.build
new file mode 100644
index 000000000..d6c520fb6
--- /dev/null
+++ b/target/i386/hax/meson.build
@@ -0,0 +1,7 @@
+i386_softmmu_ss.add(when: 'CONFIG_HAX', if_true: files(
+ 'hax-all.c',
+ 'hax-mem.c',
+ 'hax-accel-ops.c',
+))
+i386_softmmu_ss.add(when: ['CONFIG_HAX', 'CONFIG_POSIX'], if_true: files('hax-posix.c'))
+i386_softmmu_ss.add(when: ['CONFIG_HAX', 'CONFIG_WIN32'], if_true: files('hax-windows.c'))
diff --git a/target/i386/helper.c b/target/i386/helper.c
new file mode 100644
index 000000000..533b29cb9
--- /dev/null
+++ b/target/i386/helper.c
@@ -0,0 +1,679 @@
+/*
+ * i386 helpers (without register variable usage)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qapi-events-run-state.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/runstate.h"
+#include "kvm/kvm_i386.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/hw_accel.h"
+#include "monitor/monitor.h"
+#endif
+
+void cpu_sync_bndcs_hflags(CPUX86State *env)
+{
+ uint32_t hflags = env->hflags;
+ uint32_t hflags2 = env->hflags2;
+ uint32_t bndcsr;
+
+ if ((hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ if ((env->cr[4] & CR4_OSXSAVE_MASK)
+ && (env->xcr0 & XSTATE_BNDCSR_MASK)
+ && (bndcsr & BNDCFG_ENABLE)) {
+ hflags |= HF_MPX_EN_MASK;
+ } else {
+ hflags &= ~HF_MPX_EN_MASK;
+ }
+
+ if (bndcsr & BNDCFG_BNDPRESERVE) {
+ hflags2 |= HF2_MPX_PR_MASK;
+ } else {
+ hflags2 &= ~HF2_MPX_PR_MASK;
+ }
+
+ env->hflags = hflags;
+ env->hflags2 = hflags2;
+}
+
+static void cpu_x86_version(CPUX86State *env, int *family, int *model)
+{
+ int cpuver = env->cpuid_version;
+
+ if (family == NULL || model == NULL) {
+ return;
+ }
+
+ *family = (cpuver >> 8) & 0x0f;
+ *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
+}
+
+/* Broadcast MCA signal for processor version 06H_EH and above */
+int cpu_x86_support_mca_broadcast(CPUX86State *env)
+{
+ int family = 0;
+ int model = 0;
+
+ cpu_x86_version(env, &family, &model);
+ if ((family == 6 && model >= 14) || family > 6) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
+{
+ CPUX86State *env = &cpu->env;
+
+ a20_state = (a20_state != 0);
+ if (a20_state != ((env->a20_mask >> 20) & 1)) {
+ CPUState *cs = CPU(cpu);
+
+ qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
+ /* if the cpu is currently executing code, we must unlink it and
+ all the potentially executing TB */
+ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
+
+ /* when a20 is changed, all the MMU mappings are invalid, so
+ we must flush everything */
+ tlb_flush(cs);
+ env->a20_mask = ~(1 << 20) | (a20_state << 20);
+ }
+}
+
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
+{
+ X86CPU *cpu = env_archcpu(env);
+ int pe_state;
+
+ qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
+ if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
+ (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
+ tlb_flush(CPU(cpu));
+ }
+
+#ifdef TARGET_X86_64
+ if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
+ (env->efer & MSR_EFER_LME)) {
+ /* enter in long mode */
+ /* XXX: generate an exception */
+ if (!(env->cr[4] & CR4_PAE_MASK))
+ return;
+ env->efer |= MSR_EFER_LMA;
+ env->hflags |= HF_LMA_MASK;
+ } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
+ (env->efer & MSR_EFER_LMA)) {
+ /* exit long mode */
+ env->efer &= ~MSR_EFER_LMA;
+ env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
+ env->eip &= 0xffffffff;
+ }
+#endif
+ env->cr[0] = new_cr0 | CR0_ET_MASK;
+
+ /* update PE flag in hidden flags */
+ pe_state = (env->cr[0] & CR0_PE_MASK);
+ env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
+ /* ensure that ADDSEG is always set in real mode */
+ env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
+ /* update FPU flags */
+ env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
+ ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
+}
+
+/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
+ the PDPT */
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
+{
+ env->cr[3] = new_cr3;
+ if (env->cr[0] & CR0_PG_MASK) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
+{
+ uint32_t hflags;
+
+#if defined(DEBUG_MMU)
+ printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
+#endif
+ if ((new_cr4 ^ env->cr[4]) &
+ (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
+ CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
+ tlb_flush(env_cpu(env));
+ }
+
+ /* Clear bits we're going to recompute. */
+ hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
+
+ /* SSE handling */
+ if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
+ new_cr4 &= ~CR4_OSFXSR_MASK;
+ }
+ if (new_cr4 & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
+
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
+ new_cr4 &= ~CR4_SMAP_MASK;
+ }
+ if (new_cr4 & CR4_SMAP_MASK) {
+ hflags |= HF_SMAP_MASK;
+ }
+
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+ new_cr4 &= ~CR4_PKE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
+ new_cr4 &= ~CR4_PKS_MASK;
+ }
+
+ env->cr[4] = new_cr4;
+ env->hflags = hflags;
+
+ cpu_sync_bndcs_hflags(env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ target_ulong pde_addr, pte_addr;
+ uint64_t pte;
+ int32_t a20_mask;
+ uint32_t page_offset;
+ int page_size;
+
+ *attrs = cpu_get_mem_attrs(env);
+
+ a20_mask = x86_get_a20_mask(env);
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ pte = addr & a20_mask;
+ page_size = 4096;
+ } else if (env->cr[4] & CR4_PAE_MASK) {
+ target_ulong pdpe_addr;
+ uint64_t pde, pdpe;
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ bool la57 = env->cr[4] & CR4_LA57_MASK;
+ uint64_t pml5e_addr, pml5e;
+ uint64_t pml4e_addr, pml4e;
+ int32_t sext;
+
+ /* test virtual address sign extension */
+ sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
+ if (sext != 0 && sext != -1) {
+ return -1;
+ }
+
+ if (la57) {
+ pml5e_addr = ((env->cr[3] & ~0xfff) +
+ (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
+ pml5e = x86_ldq_phys(cs, pml5e_addr);
+ if (!(pml5e & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ } else {
+ pml5e = env->cr[3];
+ }
+
+ pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
+ (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
+ pml4e = x86_ldq_phys(cs, pml4e_addr);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
+ (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ if (pdpe & PG_PSE_MASK) {
+ page_size = 1024 * 1024 * 1024;
+ pte = pdpe;
+ goto out;
+ }
+
+ } else
+#endif
+ {
+ pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
+ a20_mask;
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK))
+ return -1;
+ }
+
+ pde_addr = ((pdpe & PG_ADDRESS_MASK) +
+ (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
+ pde = x86_ldq_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ if (pde & PG_PSE_MASK) {
+ /* 2 MB page */
+ page_size = 2048 * 1024;
+ pte = pde;
+ } else {
+ /* 4 KB page */
+ pte_addr = ((pde & PG_ADDRESS_MASK) +
+ (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
+ page_size = 4096;
+ pte = x86_ldq_phys(cs, pte_addr);
+ }
+ if (!(pte & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ } else {
+ uint32_t pde;
+
+ /* page directory entry */
+ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
+ pde = x86_ldl_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK))
+ return -1;
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
+ page_size = 4096 * 1024;
+ } else {
+ /* page directory entry */
+ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
+ pte = x86_ldl_phys(cs, pte_addr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ page_size = 4096;
+ }
+ pte = pte & a20_mask;
+ }
+
+#ifdef TARGET_X86_64
+out:
+#endif
+ pte &= PG_ADDRESS_MASK & ~(page_size - 1);
+ page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
+ return pte | page_offset;
+}
+
+typedef struct MCEInjectionParams {
+ Monitor *mon;
+ int bank;
+ uint64_t status;
+ uint64_t mcg_status;
+ uint64_t addr;
+ uint64_t misc;
+ int flags;
+} MCEInjectionParams;
+
+static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
+ bool recursive)
+{
+ MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
+
+ qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
+ &mff);
+}
+
+static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
+{
+ MCEInjectionParams *params = data.host_ptr;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *cenv = &cpu->env;
+ uint64_t *banks = cenv->mce_banks + 4 * params->bank;
+ g_autofree char *msg = NULL;
+ bool need_reset = false;
+ bool recursive;
+ bool ar = !!(params->status & MCI_STATUS_AR);
+
+ cpu_synchronize_state(cs);
+ recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
+
+ /*
+ * If there is an MCE exception being processed, ignore this SRAO MCE
+ * unless unconditional injection was requested.
+ */
+ if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
+ emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
+ return;
+ }
+
+ if (params->status & MCI_STATUS_UC) {
+ /*
+ * if MSR_MCG_CTL is not all 1s, the uncorrected error
+ * reporting is disabled
+ */
+ if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
+ monitor_printf(params->mon,
+ "CPU %d: Uncorrected error reporting disabled\n",
+ cs->cpu_index);
+ return;
+ }
+
+ /*
+ * if MSR_MCi_CTL is not all 1s, the uncorrected error
+ * reporting is disabled for the bank
+ */
+ if (banks[0] != ~(uint64_t)0) {
+ monitor_printf(params->mon,
+ "CPU %d: Uncorrected error reporting disabled for"
+ " bank %d\n",
+ cs->cpu_index, params->bank);
+ return;
+ }
+
+ if (!(cenv->cr[4] & CR4_MCE_MASK)) {
+ need_reset = true;
+ msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
+ "raising triple fault", cs->cpu_index);
+ } else if (recursive) {
+ need_reset = true;
+ msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
+ "raising triple fault", cs->cpu_index);
+ }
+
+ if (need_reset) {
+ emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
+ recursive);
+ monitor_printf(params->mon, "%s", msg);
+ qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ return;
+ }
+
+ if (banks[1] & MCI_STATUS_VAL) {
+ params->status |= MCI_STATUS_OVER;
+ }
+ banks[2] = params->addr;
+ banks[3] = params->misc;
+ cenv->mcg_status = params->mcg_status;
+ banks[1] = params->status;
+ cpu_interrupt(cs, CPU_INTERRUPT_MCE);
+ } else if (!(banks[1] & MCI_STATUS_VAL)
+ || !(banks[1] & MCI_STATUS_UC)) {
+ if (banks[1] & MCI_STATUS_VAL) {
+ params->status |= MCI_STATUS_OVER;
+ }
+ banks[2] = params->addr;
+ banks[3] = params->misc;
+ banks[1] = params->status;
+ } else {
+ banks[1] |= MCI_STATUS_OVER;
+ }
+
+ emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
+}
+
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
+ uint64_t status, uint64_t mcg_status, uint64_t addr,
+ uint64_t misc, int flags)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *cenv = &cpu->env;
+ MCEInjectionParams params = {
+ .mon = mon,
+ .bank = bank,
+ .status = status,
+ .mcg_status = mcg_status,
+ .addr = addr,
+ .misc = misc,
+ .flags = flags,
+ };
+ unsigned bank_num = cenv->mcg_cap & 0xff;
+
+ if (!cenv->mcg_cap) {
+ monitor_printf(mon, "MCE injection not supported\n");
+ return;
+ }
+ if (bank >= bank_num) {
+ monitor_printf(mon, "Invalid MCE bank number\n");
+ return;
+ }
+ if (!(status & MCI_STATUS_VAL)) {
+ monitor_printf(mon, "Invalid MCE status code\n");
+ return;
+ }
+ if ((flags & MCE_INJECT_BROADCAST)
+ && !cpu_x86_support_mca_broadcast(cenv)) {
+ monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
+ return;
+ }
+
+ run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
+ if (flags & MCE_INJECT_BROADCAST) {
+ CPUState *other_cs;
+
+ params.bank = 1;
+ params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
+ params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
+ params.addr = 0;
+ params.misc = 0;
+ CPU_FOREACH(other_cs) {
+ if (other_cs == cs) {
+ continue;
+ }
+ run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
+ }
+ }
+}
+
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
+{
+ X86CPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+
+ if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
+ env->tpr_access_type = access;
+
+ cpu_interrupt(cs, CPU_INTERRUPT_TPR);
+ } else if (tcg_enabled()) {
+ cpu_restore_state(cs, cs->mem_io_pc, false);
+
+ apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+ target_ulong *base, unsigned int *limit,
+ unsigned int *flags)
+{
+ CPUState *cs = env_cpu(env);
+ SegmentCache *dt;
+ target_ulong ptr;
+ uint32_t e1, e2;
+ int index;
+
+ if (selector & 0x4)
+ dt = &env->ldt;
+ else
+ dt = &env->gdt;
+ index = selector & ~7;
+ ptr = dt->base + index;
+ if ((index + 7) > dt->limit
+ || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
+ || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
+ return 0;
+
+ *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
+ *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & DESC_G_MASK)
+ *limit = (*limit << 12) | 0xfff;
+ *flags = e2;
+
+ return 1;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void do_cpu_init(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ CPUX86State *save = g_new(CPUX86State, 1);
+ int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
+
+ *save = *env;
+
+ cpu_reset(cs);
+ cs->interrupt_request = sipi;
+ memcpy(&env->start_init_save, &save->start_init_save,
+ offsetof(CPUX86State, end_init_save) -
+ offsetof(CPUX86State, start_init_save));
+ g_free(save);
+
+ if (kvm_enabled()) {
+ kvm_arch_do_init_vcpu(cpu);
+ }
+ apic_init_reset(cpu->apic_state);
+}
+
+void do_cpu_sipi(X86CPU *cpu)
+{
+ apic_sipi(cpu->apic_state);
+}
+#else
+void do_cpu_init(X86CPU *cpu)
+{
+}
+void do_cpu_sipi(X86CPU *cpu)
+{
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+
+void cpu_load_efer(CPUX86State *env, uint64_t val)
+{
+ env->efer = val;
+ env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
+ if (env->efer & MSR_EFER_LMA) {
+ env->hflags |= HF_LMA_MASK;
+ }
+ if (env->efer & MSR_EFER_SVME) {
+ env->hflags |= HF_SVME_MASK;
+ }
+}
+
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ return address_space_ldub(as, addr, attrs, NULL);
+}
+
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ return address_space_lduw(as, addr, attrs, NULL);
+}
+
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ return address_space_ldl(as, addr, attrs, NULL);
+}
+
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ return address_space_ldq(as, addr, attrs, NULL);
+}
+
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stb(as, addr, val, attrs, NULL);
+}
+
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stl_notdirty(as, addr, val, attrs, NULL);
+}
+
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stw(as, addr, val, attrs, NULL);
+}
+
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stl(as, addr, val, attrs, NULL);
+}
+
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ MemTxAttrs attrs = cpu_get_mem_attrs(env);
+ AddressSpace *as = cpu_addressspace(cs, attrs);
+
+ address_space_stq(as, addr, val, attrs, NULL);
+}
+#endif
diff --git a/target/i386/helper.h b/target/i386/helper.h
new file mode 100644
index 000000000..ac3b4d1ee
--- /dev/null
+++ b/target/i386/helper.h
@@ -0,0 +1,233 @@
+DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+
+DEF_HELPER_3(write_eflags, void, env, tl, i32)
+DEF_HELPER_1(read_eflags, tl, env)
+DEF_HELPER_2(divb_AL, void, env, tl)
+DEF_HELPER_2(idivb_AL, void, env, tl)
+DEF_HELPER_2(divw_AX, void, env, tl)
+DEF_HELPER_2(idivw_AX, void, env, tl)
+DEF_HELPER_2(divl_EAX, void, env, tl)
+DEF_HELPER_2(idivl_EAX, void, env, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(divq_EAX, void, env, tl)
+DEF_HELPER_2(idivq_EAX, void, env, tl)
+#endif
+DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_1(bnd_jmp, void, env)
+
+DEF_HELPER_2(aam, void, env, int)
+DEF_HELPER_2(aad, void, env, int)
+DEF_HELPER_1(aaa, void, env)
+DEF_HELPER_1(aas, void, env)
+DEF_HELPER_1(daa, void, env)
+DEF_HELPER_1(das, void, env)
+
+DEF_HELPER_2(lsl, tl, env, tl)
+DEF_HELPER_2(lar, tl, env, tl)
+DEF_HELPER_2(verr, void, env, tl)
+DEF_HELPER_2(verw, void, env, tl)
+DEF_HELPER_2(lldt, void, env, int)
+DEF_HELPER_2(ltr, void, env, int)
+DEF_HELPER_3(load_seg, void, env, int, int)
+DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
+DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
+DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
+DEF_HELPER_2(iret_real, void, env, int)
+DEF_HELPER_3(iret_protected, void, env, int, int)
+DEF_HELPER_3(lret_protected, void, env, int, int)
+DEF_HELPER_1(clts, void, env)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
+DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
+#endif /* !CONFIG_USER_ONLY */
+
+DEF_HELPER_1(sysenter, void, env)
+DEF_HELPER_2(sysexit, void, env, int)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(syscall, void, env, int)
+DEF_HELPER_2(sysret, void, env, int)
+#endif
+DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_1(reset_rf, void, env)
+DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_1(cli, void, env)
+DEF_HELPER_1(sti, void, env)
+DEF_HELPER_1(clac, void, env)
+DEF_HELPER_1(stac, void, env)
+DEF_HELPER_3(boundw, void, env, tl, int)
+DEF_HELPER_3(boundl, void, env, tl, int)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(rsm, void, env)
+#endif /* !CONFIG_USER_ONLY */
+
+DEF_HELPER_2(into, void, env, int)
+DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl)
+DEF_HELPER_2(cmpxchg8b, void, env, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl)
+DEF_HELPER_2(cmpxchg16b, void, env, tl)
+#endif
+DEF_HELPER_FLAGS_1(single_step, TCG_CALL_NO_WG, noreturn, env)
+DEF_HELPER_1(rechecking_single_step, void, env)
+DEF_HELPER_1(cpuid, void, env)
+DEF_HELPER_1(rdtsc, void, env)
+DEF_HELPER_1(rdtscp, void, env)
+DEF_HELPER_FLAGS_1(rdpmc, TCG_CALL_NO_WG, noreturn, env)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(outb, void, env, i32, i32)
+DEF_HELPER_2(inb, tl, env, i32)
+DEF_HELPER_3(outw, void, env, i32, i32)
+DEF_HELPER_2(inw, tl, env, i32)
+DEF_HELPER_3(outl, void, env, i32, i32)
+DEF_HELPER_2(inl, tl, env, i32)
+DEF_HELPER_FLAGS_3(check_io, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
+DEF_HELPER_2(svm_check_intercept, void, env, i32)
+DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)
+DEF_HELPER_3(vmrun, void, env, int, int)
+DEF_HELPER_1(vmmcall, void, env)
+DEF_HELPER_2(vmload, void, env, int)
+DEF_HELPER_2(vmsave, void, env, int)
+DEF_HELPER_1(stgi, void, env)
+DEF_HELPER_1(clgi, void, env)
+DEF_HELPER_FLAGS_2(flush_page, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(hlt, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_FLAGS_2(monitor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(mwait, TCG_CALL_NO_WG, noreturn, env, int)
+DEF_HELPER_1(rdmsr, void, env)
+DEF_HELPER_1(wrmsr, void, env)
+DEF_HELPER_FLAGS_2(read_crN, TCG_CALL_NO_RWG, tl, env, int)
+DEF_HELPER_FLAGS_3(write_crN, TCG_CALL_NO_RWG, void, env, int, tl)
+#endif /* !CONFIG_USER_ONLY */
+
+/* x86 FPU */
+
+DEF_HELPER_2(flds_FT0, void, env, i32)
+DEF_HELPER_2(fldl_FT0, void, env, i64)
+DEF_HELPER_2(fildl_FT0, void, env, s32)
+DEF_HELPER_2(flds_ST0, void, env, i32)
+DEF_HELPER_2(fldl_ST0, void, env, i64)
+DEF_HELPER_2(fildl_ST0, void, env, s32)
+DEF_HELPER_2(fildll_ST0, void, env, s64)
+DEF_HELPER_1(fsts_ST0, i32, env)
+DEF_HELPER_1(fstl_ST0, i64, env)
+DEF_HELPER_1(fist_ST0, s32, env)
+DEF_HELPER_1(fistl_ST0, s32, env)
+DEF_HELPER_1(fistll_ST0, s64, env)
+DEF_HELPER_1(fistt_ST0, s32, env)
+DEF_HELPER_1(fisttl_ST0, s32, env)
+DEF_HELPER_1(fisttll_ST0, s64, env)
+DEF_HELPER_2(fldt_ST0, void, env, tl)
+DEF_HELPER_2(fstt_ST0, void, env, tl)
+DEF_HELPER_1(fpush, void, env)
+DEF_HELPER_1(fpop, void, env)
+DEF_HELPER_1(fdecstp, void, env)
+DEF_HELPER_1(fincstp, void, env)
+DEF_HELPER_2(ffree_STN, void, env, int)
+DEF_HELPER_1(fmov_ST0_FT0, void, env)
+DEF_HELPER_2(fmov_FT0_STN, void, env, int)
+DEF_HELPER_2(fmov_ST0_STN, void, env, int)
+DEF_HELPER_2(fmov_STN_ST0, void, env, int)
+DEF_HELPER_2(fxchg_ST0_STN, void, env, int)
+DEF_HELPER_1(fcom_ST0_FT0, void, env)
+DEF_HELPER_1(fucom_ST0_FT0, void, env)
+DEF_HELPER_1(fcomi_ST0_FT0, void, env)
+DEF_HELPER_1(fucomi_ST0_FT0, void, env)
+DEF_HELPER_1(fadd_ST0_FT0, void, env)
+DEF_HELPER_1(fmul_ST0_FT0, void, env)
+DEF_HELPER_1(fsub_ST0_FT0, void, env)
+DEF_HELPER_1(fsubr_ST0_FT0, void, env)
+DEF_HELPER_1(fdiv_ST0_FT0, void, env)
+DEF_HELPER_1(fdivr_ST0_FT0, void, env)
+DEF_HELPER_2(fadd_STN_ST0, void, env, int)
+DEF_HELPER_2(fmul_STN_ST0, void, env, int)
+DEF_HELPER_2(fsub_STN_ST0, void, env, int)
+DEF_HELPER_2(fsubr_STN_ST0, void, env, int)
+DEF_HELPER_2(fdiv_STN_ST0, void, env, int)
+DEF_HELPER_2(fdivr_STN_ST0, void, env, int)
+DEF_HELPER_1(fchs_ST0, void, env)
+DEF_HELPER_1(fabs_ST0, void, env)
+DEF_HELPER_1(fxam_ST0, void, env)
+DEF_HELPER_1(fld1_ST0, void, env)
+DEF_HELPER_1(fldl2t_ST0, void, env)
+DEF_HELPER_1(fldl2e_ST0, void, env)
+DEF_HELPER_1(fldpi_ST0, void, env)
+DEF_HELPER_1(fldlg2_ST0, void, env)
+DEF_HELPER_1(fldln2_ST0, void, env)
+DEF_HELPER_1(fldz_ST0, void, env)
+DEF_HELPER_1(fldz_FT0, void, env)
+DEF_HELPER_1(fnstsw, i32, env)
+DEF_HELPER_1(fnstcw, i32, env)
+DEF_HELPER_2(fldcw, void, env, i32)
+DEF_HELPER_1(fclex, void, env)
+DEF_HELPER_1(fwait, void, env)
+DEF_HELPER_1(fninit, void, env)
+DEF_HELPER_2(fbld_ST0, void, env, tl)
+DEF_HELPER_2(fbst_ST0, void, env, tl)
+DEF_HELPER_1(f2xm1, void, env)
+DEF_HELPER_1(fyl2x, void, env)
+DEF_HELPER_1(fptan, void, env)
+DEF_HELPER_1(fpatan, void, env)
+DEF_HELPER_1(fxtract, void, env)
+DEF_HELPER_1(fprem1, void, env)
+DEF_HELPER_1(fprem, void, env)
+DEF_HELPER_1(fyl2xp1, void, env)
+DEF_HELPER_1(fsqrt, void, env)
+DEF_HELPER_1(fsincos, void, env)
+DEF_HELPER_1(frndint, void, env)
+DEF_HELPER_1(fscale, void, env)
+DEF_HELPER_1(fsin, void, env)
+DEF_HELPER_1(fcos, void, env)
+DEF_HELPER_3(fstenv, void, env, tl, int)
+DEF_HELPER_3(fldenv, void, env, tl, int)
+DEF_HELPER_3(fsave, void, env, tl, int)
+DEF_HELPER_3(frstor, void, env, tl, int)
+DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64)
+
+DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+
+/* MMX/SSE */
+
+DEF_HELPER_2(ldmxcsr, void, env, i32)
+DEF_HELPER_1(update_mxcsr, void, env)
+DEF_HELPER_1(enter_mmx, void, env)
+DEF_HELPER_1(emms, void, env)
+DEF_HELPER_3(movq, void, env, ptr, ptr)
+
+#define SHIFT 0
+#include "ops_sse_header.h"
+#define SHIFT 1
+#include "ops_sse_header.h"
+
+DEF_HELPER_3(rclb, tl, env, tl, tl)
+DEF_HELPER_3(rclw, tl, env, tl, tl)
+DEF_HELPER_3(rcll, tl, env, tl, tl)
+DEF_HELPER_3(rcrb, tl, env, tl, tl)
+DEF_HELPER_3(rcrw, tl, env, tl, tl)
+DEF_HELPER_3(rcrl, tl, env, tl, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_3(rclq, tl, env, tl, tl)
+DEF_HELPER_3(rcrq, tl, env, tl, tl)
+#endif
+
+DEF_HELPER_1(rdrand, tl, env)
diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c
new file mode 100644
index 000000000..10f8aba86
--- /dev/null
+++ b/target/i386/host-cpu.c
@@ -0,0 +1,207 @@
+/*
+ * x86 host CPU functions, and "host" cpu type initialization
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "host-cpu.h"
+#include "qapi/error.h"
+#include "sysemu/sysemu.h"
+
+/* Note: Only safe for use on x86(-64) hosts */
+static uint32_t host_cpu_phys_bits(void)
+{
+ uint32_t eax;
+ uint32_t host_phys_bits;
+
+ host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
+ if (eax >= 0x80000008) {
+ host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
+ /*
+ * Note: According to AMD doc 25481 rev 2.34 they have a field
+ * at 23:16 that can specify a maximum physical address bits for
+ * the guest that can override this value; but I've not seen
+ * anything with that set.
+ */
+ host_phys_bits = eax & 0xff;
+ } else {
+ /*
+ * It's an odd 64 bit machine that doesn't have the leaf for
+ * physical address bits; fall back to 36 that's most older
+ * Intel.
+ */
+ host_phys_bits = 36;
+ }
+
+ return host_phys_bits;
+}
+
+static void host_cpu_enable_cpu_pm(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
+ &cpu->mwait.ecx, &cpu->mwait.edx);
+ env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
+}
+
+static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu)
+{
+ uint32_t host_phys_bits = host_cpu_phys_bits();
+ uint32_t phys_bits = cpu->phys_bits;
+ static bool warned;
+
+ /*
+ * Print a warning if the user set it to a value that's not the
+ * host value.
+ */
+ if (phys_bits != host_phys_bits && phys_bits != 0 &&
+ !warned) {
+ warn_report("Host physical bits (%u)"
+ " does not match phys-bits property (%u)",
+ host_phys_bits, phys_bits);
+ warned = true;
+ }
+
+ if (cpu->host_phys_bits) {
+ /* The user asked for us to use the host physical bits */
+ phys_bits = host_phys_bits;
+ if (cpu->host_phys_bits_limit &&
+ phys_bits > cpu->host_phys_bits_limit) {
+ phys_bits = cpu->host_phys_bits_limit;
+ }
+ }
+
+ return phys_bits;
+}
+
+bool host_cpu_realizefn(CPUState *cs, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cpu->max_features && enable_cpu_pm) {
+ host_cpu_enable_cpu_pm(cpu);
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ uint32_t phys_bits = host_cpu_adjust_phys_bits(cpu);
+
+ if (phys_bits &&
+ (phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
+ phys_bits < 32)) {
+ error_setg(errp, "phys-bits should be between 32 and %u "
+ " (but is %u)",
+ TARGET_PHYS_ADDR_SPACE_BITS, phys_bits);
+ return false;
+ }
+ cpu->phys_bits = phys_bits;
+ }
+ return true;
+}
+
+#define CPUID_MODEL_ID_SZ 48
+/**
+ * cpu_x86_fill_model_id:
+ * Get CPUID model ID string from host CPU.
+ *
+ * @str should have at least CPUID_MODEL_ID_SZ bytes
+ *
+ * The function does NOT add a null terminator to the string
+ * automatically.
+ */
+static int host_cpu_fill_model_id(char *str)
+{
+ uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
+ memcpy(str + i * 16 + 0, &eax, 4);
+ memcpy(str + i * 16 + 4, &ebx, 4);
+ memcpy(str + i * 16 + 8, &ecx, 4);
+ memcpy(str + i * 16 + 12, &edx, 4);
+ }
+ return 0;
+}
+
+void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
+ x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
+
+ host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
+ if (family) {
+ *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
+ }
+ if (model) {
+ *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
+ }
+ if (stepping) {
+ *stepping = eax & 0x0F;
+ }
+}
+
+void host_cpu_instance_init(X86CPU *cpu)
+{
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+
+ if (xcc->model) {
+ uint32_t ebx = 0, ecx = 0, edx = 0;
+ char vendor[CPUID_VENDOR_SZ + 1];
+
+ host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
+ x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
+ object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
+ }
+}
+
+void host_cpu_max_instance_init(X86CPU *cpu)
+{
+ char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
+ char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
+ int family, model, stepping;
+
+ /* Use max host physical address bits if -cpu max option is applied */
+ object_property_set_bool(OBJECT(cpu), "host-phys-bits", true, &error_abort);
+
+ host_cpu_vendor_fms(vendor, &family, &model, &stepping);
+ host_cpu_fill_model_id(model_id);
+
+ object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort);
+ object_property_set_int(OBJECT(cpu), "family", family, &error_abort);
+ object_property_set_int(OBJECT(cpu), "model", model, &error_abort);
+ object_property_set_int(OBJECT(cpu), "stepping", stepping,
+ &error_abort);
+ object_property_set_str(OBJECT(cpu), "model-id", model_id,
+ &error_abort);
+}
+
+static void host_cpu_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+
+ xcc->host_cpuid_required = true;
+ xcc->ordering = 8;
+ xcc->model_description =
+ g_strdup_printf("processor with all supported host features ");
+}
+
+static const TypeInfo host_cpu_type_info = {
+ .name = X86_CPU_TYPE_NAME("host"),
+ .parent = X86_CPU_TYPE_NAME("max"),
+ .class_init = host_cpu_class_init,
+};
+
+static void host_cpu_type_init(void)
+{
+ type_register_static(&host_cpu_type_info);
+}
+
+type_init(host_cpu_type_init);
diff --git a/target/i386/host-cpu.h b/target/i386/host-cpu.h
new file mode 100644
index 000000000..6a9bc918b
--- /dev/null
+++ b/target/i386/host-cpu.h
@@ -0,0 +1,19 @@
+/*
+ * x86 host CPU type initialization and host CPU functions
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HOST_CPU_H
+#define HOST_CPU_H
+
+void host_cpu_instance_init(X86CPU *cpu);
+void host_cpu_max_instance_init(X86CPU *cpu);
+bool host_cpu_realizefn(CPUState *cs, Error **errp);
+
+void host_cpu_vendor_fms(char *vendor, int *family, int *model, int *stepping);
+
+#endif /* HOST_CPU_H */
diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md
new file mode 100644
index 000000000..2d33477ac
--- /dev/null
+++ b/target/i386/hvf/README.md
@@ -0,0 +1,7 @@
+# OS X Hypervisor.framework support in QEMU
+
+These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desktop Hosted Hypervisor) (last known location: https://github.com/veertuinc/vdhh) with some minor changes, the most significant of which were:
+
+1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
+2. Removal of `apic_page` and hyperv-related functionality.
+3. More relaxed use of `qemu_mutex_lock_iothread`.
diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c
new file mode 100644
index 000000000..333db5989
--- /dev/null
+++ b/target/i386/hvf/hvf-cpu.c
@@ -0,0 +1,97 @@
+/*
+ * x86 HVF CPU type initialization
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "host-cpu.h"
+#include "qapi/error.h"
+#include "sysemu/sysemu.h"
+#include "hw/boards.h"
+#include "sysemu/hvf.h"
+#include "hw/core/accel-cpu.h"
+
+static void hvf_cpu_max_instance_init(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ host_cpu_max_instance_init(cpu);
+
+ env->cpuid_min_level =
+ hvf_get_supported_cpuid(0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
+}
+
+static void hvf_cpu_xsave_init(void)
+{
+ static bool first = true;
+ int i;
+
+ if (!first) {
+ return;
+ }
+ first = false;
+
+ /* x87 and SSE states are in the legacy region of the XSAVE area. */
+ x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
+ x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
+
+ for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+ ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+ if (esa->size) {
+ int sz = hvf_get_supported_cpuid(0xd, i, R_EAX);
+ if (sz != 0) {
+ assert(esa->size == sz);
+ esa->offset = hvf_get_supported_cpuid(0xd, i, R_EBX);
+ }
+ }
+ }
+}
+
+static void hvf_cpu_instance_init(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ host_cpu_instance_init(cpu);
+
+ /* Special cases not set in the X86CPUDefinition structs: */
+ /* TODO: in-kernel irqchip for hvf */
+
+ if (cpu->max_features) {
+ hvf_cpu_max_instance_init(cpu);
+ }
+
+ hvf_cpu_xsave_init();
+}
+
+static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
+
+ acc->cpu_realizefn = host_cpu_realizefn;
+ acc->cpu_instance_init = hvf_cpu_instance_init;
+}
+
+static const TypeInfo hvf_cpu_accel_type_info = {
+ .name = ACCEL_CPU_NAME("hvf"),
+
+ .parent = TYPE_ACCEL_CPU,
+ .class_init = hvf_cpu_accel_class_init,
+ .abstract = true,
+};
+
+static void hvf_cpu_accel_register_types(void)
+{
+ type_register_static(&hvf_cpu_accel_type_info);
+}
+
+type_init(hvf_cpu_accel_register_types);
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
new file mode 100644
index 000000000..76e923552
--- /dev/null
+++ b/target/i386/hvf/hvf-i386.h
@@ -0,0 +1,34 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * Copyright 2017 Google Inc
+ *
+ * Adapted from target-i386/hax-i386.h:
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef HVF_I386_H
+#define HVF_I386_H
+
+#include "qemu/accel.h"
+#include "sysemu/hvf.h"
+#include "sysemu/hvf_int.h"
+#include "cpu.h"
+#include "x86.h"
+
+void hvf_handle_io(CPUArchState *, uint16_t, void *, int, int, int);
+
+#ifdef NEED_CPU_H
+/* Functions exported to host specific mode */
+
+/* Host specific functions */
+int hvf_inject_interrupt(CPUArchState *env, int vector);
+#endif
+
+#endif
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
new file mode 100644
index 000000000..4ba6e82fa
--- /dev/null
+++ b/target/i386/hvf/hvf.c
@@ -0,0 +1,673 @@
+/* Copyright 2008 IBM Corporation
+ * 2008 Red Hat, Inc.
+ * Copyright 2011 Intel Corporation
+ * Copyright 2016 Veertu, Inc.
+ * Copyright 2017 The Android Open Source Project
+ *
+ * QEMU Hypervisor.framework support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file contain code under public domain from the hvdos project:
+ * https://github.com/mist64/hvdos
+ *
+ * Parts Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+
+#include "sysemu/hvf.h"
+#include "sysemu/hvf_int.h"
+#include "sysemu/runstate.h"
+#include "sysemu/cpus.h"
+#include "hvf-i386.h"
+#include "vmcs.h"
+#include "vmx.h"
+#include "x86.h"
+#include "x86_descr.h"
+#include "x86_mmu.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+#include "x86_task.h"
+#include "x86hvf.h"
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+#include <sys/sysctl.h>
+
+#include "hw/i386/apic_internal.h"
+#include "qemu/main-loop.h"
+#include "qemu/accel.h"
+#include "target/i386/cpu.h"
+
+void vmx_update_tpr(CPUState *cpu)
+{
+ /* TODO: need integrate APIC handling */
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
+ int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
+
+ wreg(cpu->hvf->fd, HV_X86_TPR, tpr);
+ if (irr == -1) {
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
+ } else {
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
+ irr >> 4);
+ }
+}
+
+static void update_apic_tpr(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4;
+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+}
+
+#define VECTORING_INFO_VECTOR_MASK 0xff
+
+void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
+ int direction, int size, int count)
+{
+ int i;
+ uint8_t *ptr = buffer;
+
+ for (i = 0; i < count; i++) {
+ address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
+ ptr, size,
+ direction);
+ ptr += size;
+ }
+}
+
+static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
+{
+ int read, write;
+
+ /* EPT fault on an instruction fetch doesn't make sense here */
+ if (ept_qual & EPT_VIOLATION_INST_FETCH) {
+ return false;
+ }
+
+ /* EPT fault must be a read fault or a write fault */
+ read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
+ write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
+ if ((read | write) == 0) {
+ return false;
+ }
+
+ if (write && slot) {
+ if (slot->flags & HVF_SLOT_LOG) {
+ memory_region_set_dirty(slot->region, gpa - slot->start, 1);
+ hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
+ HV_MEMORY_READ | HV_MEMORY_WRITE);
+ }
+ }
+
+ /*
+ * The EPT violation must have been caused by accessing a
+ * guest-physical address that is a translation of a guest-linear
+ * address.
+ */
+ if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
+ (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
+ return false;
+ }
+
+ if (!slot) {
+ return true;
+ }
+ if (!memory_region_is_ram(slot->region) &&
+ !(read && memory_region_is_romd(slot->region))) {
+ return true;
+ }
+ return false;
+}
+
+void hvf_arch_vcpu_destroy(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ g_free(env->hvf_mmio_buf);
+}
+
+static void init_tsc_freq(CPUX86State *env)
+{
+ size_t length;
+ uint64_t tsc_freq;
+
+ if (env->tsc_khz != 0) {
+ return;
+ }
+
+ length = sizeof(uint64_t);
+ if (sysctlbyname("machdep.tsc.frequency", &tsc_freq, &length, NULL, 0)) {
+ return;
+ }
+ env->tsc_khz = tsc_freq / 1000; /* Hz to KHz */
+}
+
+static void init_apic_bus_freq(CPUX86State *env)
+{
+ size_t length;
+ uint64_t bus_freq;
+
+ if (env->apic_bus_freq != 0) {
+ return;
+ }
+
+ length = sizeof(uint64_t);
+ if (sysctlbyname("hw.busfrequency", &bus_freq, &length, NULL, 0)) {
+ return;
+ }
+ env->apic_bus_freq = bus_freq;
+}
+
+static inline bool tsc_is_known(CPUX86State *env)
+{
+ return env->tsc_khz != 0;
+}
+
+static inline bool apic_bus_freq_is_known(CPUX86State *env)
+{
+ return env->apic_bus_freq != 0;
+}
+
+void hvf_kick_vcpu_thread(CPUState *cpu)
+{
+ cpus_kick_thread(cpu);
+}
+
+int hvf_arch_init(void)
+{
+ return 0;
+}
+
+int hvf_arch_init_vcpu(CPUState *cpu)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+
+ init_emu();
+ init_decoder();
+
+ hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
+ env->hvf_mmio_buf = g_new(char, 4096);
+
+ if (x86cpu->vmware_cpuid_freq) {
+ init_tsc_freq(env);
+ init_apic_bus_freq(env);
+
+ if (!tsc_is_known(env) || !apic_bus_freq_is_known(env)) {
+ error_report("vmware-cpuid-freq: feature couldn't be enabled");
+ }
+ }
+
+ if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
+ &hvf_state->hvf_caps->vmx_cap_pinbased)) {
+ abort();
+ }
+ if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,
+ &hvf_state->hvf_caps->vmx_cap_procbased)) {
+ abort();
+ }
+ if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,
+ &hvf_state->hvf_caps->vmx_cap_procbased2)) {
+ abort();
+ }
+ if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,
+ &hvf_state->hvf_caps->vmx_cap_entry)) {
+ abort();
+ }
+
+ /* set VMCS control fields */
+ wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS,
+ cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
+ VMCS_PIN_BASED_CTLS_EXTINT |
+ VMCS_PIN_BASED_CTLS_NMI |
+ VMCS_PIN_BASED_CTLS_VNMI));
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS,
+ cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
+ VMCS_PRI_PROC_BASED_CTLS_HLT |
+ VMCS_PRI_PROC_BASED_CTLS_MWAIT |
+ VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
+ VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
+ VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
+ wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS,
+ cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
+ VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
+
+ wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
+ 0));
+ wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
+
+ wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
+
+ x86cpu = X86_CPU(cpu);
+ x86cpu->env.xsave_buf_len = 4096;
+ x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len);
+
+ /*
+ * The allocated storage must be large enough for all of the
+ * possible XSAVE state components.
+ */
+ assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len);
+
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1);
+
+ return 0;
+}
+
+static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ env->exception_nr = -1;
+ env->exception_pending = 0;
+ env->exception_injected = 0;
+ env->interrupt_injected = -1;
+ env->nmi_injected = false;
+ env->ins_len = 0;
+ env->has_error_code = false;
+ if (idtvec_info & VMCS_IDT_VEC_VALID) {
+ switch (idtvec_info & VMCS_IDT_VEC_TYPE) {
+ case VMCS_IDT_VEC_HWINTR:
+ case VMCS_IDT_VEC_SWINTR:
+ env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
+ break;
+ case VMCS_IDT_VEC_NMI:
+ env->nmi_injected = true;
+ break;
+ case VMCS_IDT_VEC_HWEXCEPTION:
+ case VMCS_IDT_VEC_SWEXCEPTION:
+ env->exception_nr = idtvec_info & VMCS_IDT_VEC_VECNUM;
+ env->exception_injected = 1;
+ break;
+ case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
+ default:
+ abort();
+ }
+ if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION ||
+ (idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) {
+ env->ins_len = ins_len;
+ }
+ if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
+ env->has_error_code = true;
+ env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR);
+ }
+ }
+ if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
+ VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+ if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
+ (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
+ env->hflags |= HF_INHIBIT_IRQ_MASK;
+ } else {
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ }
+}
+
+static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ /*
+ * A wrapper extends cpu_x86_cpuid with 0x40000000 and 0x40000010 leafs,
+ * leafs 0x40000001-0x4000000F are filled with zeros
+ * Provides vmware-cpuid-freq support to hvf
+ *
+ * Note: leaf 0x40000000 not exposes HVF,
+ * leaving hypervisor signature empty
+ */
+
+ if (index < 0x40000000 || index > 0x40000010 ||
+ !tsc_is_known(env) || !apic_bus_freq_is_known(env)) {
+
+ cpu_x86_cpuid(env, index, count, eax, ebx, ecx, edx);
+ return;
+ }
+
+ switch (index) {
+ case 0x40000000:
+ *eax = 0x40000010; /* Max available cpuid leaf */
+ *ebx = 0; /* Leave signature empty */
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0x40000010:
+ *eax = env->tsc_khz;
+ *ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
+ *ecx = 0;
+ *edx = 0;
+ break;
+ default:
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+}
+
+int hvf_vcpu_exec(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ int ret = 0;
+ uint64_t rip = 0;
+
+ if (hvf_process_events(cpu)) {
+ return EXCP_HLT;
+ }
+
+ do {
+ if (cpu->vcpu_dirty) {
+ hvf_put_registers(cpu);
+ cpu->vcpu_dirty = false;
+ }
+
+ if (hvf_inject_interrupts(cpu)) {
+ return EXCP_INTERRUPT;
+ }
+ vmx_update_tpr(cpu);
+
+ qemu_mutex_unlock_iothread();
+ if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
+ qemu_mutex_lock_iothread();
+ return EXCP_HLT;
+ }
+
+ hv_return_t r = hv_vcpu_run(cpu->hvf->fd);
+ assert_hvf_ok(r);
+
+ /* handle VMEXIT */
+ uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON);
+ uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION);
+ uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd,
+ VMCS_EXIT_INSTRUCTION_LENGTH);
+
+ uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
+
+ hvf_store_events(cpu, ins_len, idtvec_info);
+ rip = rreg(cpu->hvf->fd, HV_X86_RIP);
+ env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
+
+ qemu_mutex_lock_iothread();
+
+ update_apic_tpr(cpu);
+ current_cpu = cpu;
+
+ ret = 0;
+ switch (exit_reason) {
+ case EXIT_REASON_HLT: {
+ macvm_set_rip(cpu, rip + ins_len);
+ if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK))
+ && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(idtvec_info & VMCS_IDT_VEC_VALID)) {
+ cpu->halted = 1;
+ ret = EXCP_HLT;
+ break;
+ }
+ ret = EXCP_INTERRUPT;
+ break;
+ }
+ case EXIT_REASON_MWAIT: {
+ ret = EXCP_INTERRUPT;
+ break;
+ }
+ /* Need to check if MMIO or unmapped fault */
+ case EXIT_REASON_EPT_FAULT:
+ {
+ hvf_slot *slot;
+ uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS);
+
+ if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
+ ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
+ vmx_set_nmi_blocking(cpu);
+ }
+
+ slot = hvf_find_overlap_slot(gpa, 1);
+ /* mmio */
+ if (ept_emulation_fault(slot, gpa, exit_qual)) {
+ struct x86_decode decode;
+
+ load_regs(cpu);
+ decode_instruction(env, &decode);
+ exec_instruction(env, &decode);
+ store_regs(cpu);
+ break;
+ }
+ break;
+ }
+ case EXIT_REASON_INOUT:
+ {
+ uint32_t in = (exit_qual & 8) != 0;
+ uint32_t size = (exit_qual & 7) + 1;
+ uint32_t string = (exit_qual & 16) != 0;
+ uint32_t port = exit_qual >> 16;
+ /*uint32_t rep = (exit_qual & 0x20) != 0;*/
+
+ if (!string && in) {
+ uint64_t val = 0;
+ load_regs(cpu);
+ hvf_handle_io(env, port, &val, 0, size, 1);
+ if (size == 1) {
+ AL(env) = val;
+ } else if (size == 2) {
+ AX(env) = val;
+ } else if (size == 4) {
+ RAX(env) = (uint32_t)val;
+ } else {
+ RAX(env) = (uint64_t)val;
+ }
+ env->eip += ins_len;
+ store_regs(cpu);
+ break;
+ } else if (!string && !in) {
+ RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX);
+ hvf_handle_io(env, port, &RAX(env), 1, size, 1);
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ struct x86_decode decode;
+
+ load_regs(cpu);
+ decode_instruction(env, &decode);
+ assert(ins_len == decode.len);
+ exec_instruction(env, &decode);
+ store_regs(cpu);
+
+ break;
+ }
+ case EXIT_REASON_CPUID: {
+ uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
+ uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX);
+ uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
+ uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
+
+ if (rax == 1) {
+ /* CPUID1.ecx.OSXSAVE needs to know CR4 */
+ env->cr[4] = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
+ }
+ hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
+
+ wreg(cpu->hvf->fd, HV_X86_RAX, rax);
+ wreg(cpu->hvf->fd, HV_X86_RBX, rbx);
+ wreg(cpu->hvf->fd, HV_X86_RCX, rcx);
+ wreg(cpu->hvf->fd, HV_X86_RDX, rdx);
+
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ case EXIT_REASON_XSETBV: {
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
+ uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
+ uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
+
+ if (ecx) {
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ env->xcr0 = ((uint64_t)edx << 32) | eax;
+ wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1);
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ case EXIT_REASON_INTR_WINDOW:
+ vmx_clear_int_window_exiting(cpu);
+ ret = EXCP_INTERRUPT;
+ break;
+ case EXIT_REASON_NMI_WINDOW:
+ vmx_clear_nmi_window_exiting(cpu);
+ ret = EXCP_INTERRUPT;
+ break;
+ case EXIT_REASON_EXT_INTR:
+ /* force exit and allow io handling */
+ ret = EXCP_INTERRUPT;
+ break;
+ case EXIT_REASON_RDMSR:
+ case EXIT_REASON_WRMSR:
+ {
+ load_regs(cpu);
+ if (exit_reason == EXIT_REASON_RDMSR) {
+ simulate_rdmsr(cpu);
+ } else {
+ simulate_wrmsr(cpu);
+ }
+ env->eip += ins_len;
+ store_regs(cpu);
+ break;
+ }
+ case EXIT_REASON_CR_ACCESS: {
+ int cr;
+ int reg;
+
+ load_regs(cpu);
+ cr = exit_qual & 15;
+ reg = (exit_qual >> 8) & 15;
+
+ switch (cr) {
+ case 0x0: {
+ macvm_set_cr0(cpu->hvf->fd, RRX(env, reg));
+ break;
+ }
+ case 4: {
+ macvm_set_cr4(cpu->hvf->fd, RRX(env, reg));
+ break;
+ }
+ case 8: {
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ if (exit_qual & 0x10) {
+ RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
+ } else {
+ int tpr = RRX(env, reg);
+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+ ret = EXCP_INTERRUPT;
+ }
+ break;
+ }
+ default:
+ error_report("Unrecognized CR %d", cr);
+ abort();
+ }
+ env->eip += ins_len;
+ store_regs(cpu);
+ break;
+ }
+ case EXIT_REASON_APIC_ACCESS: { /* TODO */
+ struct x86_decode decode;
+
+ load_regs(cpu);
+ decode_instruction(env, &decode);
+ exec_instruction(env, &decode);
+ store_regs(cpu);
+ break;
+ }
+ case EXIT_REASON_TPR: {
+ ret = 1;
+ break;
+ }
+ case EXIT_REASON_TASK_SWITCH: {
+ uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
+ x68_segment_selector sel = {.sel = exit_qual & 0xffff};
+ vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
+ vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
+ & VMCS_INTR_T_MASK);
+ break;
+ }
+ case EXIT_REASON_TRIPLE_FAULT: {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ ret = EXCP_INTERRUPT;
+ break;
+ }
+ case EXIT_REASON_RDPMC:
+ wreg(cpu->hvf->fd, HV_X86_RAX, 0);
+ wreg(cpu->hvf->fd, HV_X86_RDX, 0);
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ case VMX_REASON_VMCALL:
+ env->exception_nr = EXCP0D_GPF;
+ env->exception_injected = 1;
+ env->has_error_code = true;
+ env->error_code = 0;
+ break;
+ default:
+ error_report("%llx: unhandled exit %llx", rip, exit_reason);
+ }
+ } while (ret == 0);
+
+ return ret;
+}
diff --git a/target/i386/hvf/meson.build b/target/i386/hvf/meson.build
new file mode 100644
index 000000000..f6d4c394d
--- /dev/null
+++ b/target/i386/hvf/meson.build
@@ -0,0 +1,13 @@
+i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
+ 'hvf.c',
+ 'x86.c',
+ 'x86_cpuid.c',
+ 'x86_decode.c',
+ 'x86_descr.c',
+ 'x86_emu.c',
+ 'x86_flags.c',
+ 'x86_mmu.c',
+ 'x86_task.c',
+ 'x86hvf.c',
+ 'hvf-cpu.c',
+))
diff --git a/target/i386/hvf/panic.h b/target/i386/hvf/panic.h
new file mode 100644
index 000000000..a3eabebbb
--- /dev/null
+++ b/target/i386/hvf/panic.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HVF_PANIC_H
+#define HVF_PANIC_H
+
+#define VM_PANIC(x) {\
+ printf("%s\n", x); \
+ abort(); \
+}
+
+#define VM_PANIC_ON(x) {\
+ if (x) { \
+ printf("%s\n", #x); \
+ abort(); \
+ } \
+}
+
+#define VM_PANIC_EX(...) {\
+ printf(__VA_ARGS__); \
+ abort(); \
+}
+
+#define VM_PANIC_ON_EX(x, ...) {\
+ if (x) { \
+ printf(__VA_ARGS__); \
+ abort(); \
+ } \
+}
+
+#endif
diff --git a/target/i386/hvf/vmcs.h b/target/i386/hvf/vmcs.h
new file mode 100644
index 000000000..42de7ebc3
--- /dev/null
+++ b/target/i386/hvf/vmcs.h
@@ -0,0 +1,374 @@
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef VMCS_H
+#define VMCS_H
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+
+#define VMCS_INITIAL 0xffffffffffffffff
+
+#define VMCS_IDENT(encoding) ((encoding) | 0x80000000)
+/*
+ * VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.
+ */
+#define VMCS_INVALID_ENCODING 0xffffffff
+
+/* 16-bit control fields */
+#define VMCS_VPID 0x00000000
+#define VMCS_PIR_VECTOR 0x00000002
+
+/* 16-bit guest-state fields */
+#define VMCS_GUEST_ES_SELECTOR 0x00000800
+#define VMCS_GUEST_CS_SELECTOR 0x00000802
+#define VMCS_GUEST_SS_SELECTOR 0x00000804
+#define VMCS_GUEST_DS_SELECTOR 0x00000806
+#define VMCS_GUEST_FS_SELECTOR 0x00000808
+#define VMCS_GUEST_GS_SELECTOR 0x0000080A
+#define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
+#define VMCS_GUEST_TR_SELECTOR 0x0000080E
+#define VMCS_GUEST_INTR_STATUS 0x00000810
+
+/* 16-bit host-state fields */
+#define VMCS_HOST_ES_SELECTOR 0x00000C00
+#define VMCS_HOST_CS_SELECTOR 0x00000C02
+#define VMCS_HOST_SS_SELECTOR 0x00000C04
+#define VMCS_HOST_DS_SELECTOR 0x00000C06
+#define VMCS_HOST_FS_SELECTOR 0x00000C08
+#define VMCS_HOST_GS_SELECTOR 0x00000C0A
+#define VMCS_HOST_TR_SELECTOR 0x00000C0C
+
+/* 64-bit control fields */
+#define VMCS_IO_BITMAP_A 0x00002000
+#define VMCS_IO_BITMAP_B 0x00002002
+#define VMCS_MSR_BITMAP 0x00002004
+#define VMCS_EXIT_MSR_STORE 0x00002006
+#define VMCS_EXIT_MSR_LOAD 0x00002008
+#define VMCS_ENTRY_MSR_LOAD 0x0000200A
+#define VMCS_EXECUTIVE_VMCS 0x0000200C
+#define VMCS_TSC_OFFSET 0x00002010
+#define VMCS_VIRTUAL_APIC 0x00002012
+#define VMCS_APIC_ACCESS 0x00002014
+#define VMCS_PIR_DESC 0x00002016
+#define VMCS_EPTP 0x0000201A
+#define VMCS_EOI_EXIT0 0x0000201C
+#define VMCS_EOI_EXIT1 0x0000201E
+#define VMCS_EOI_EXIT2 0x00002020
+#define VMCS_EOI_EXIT3 0x00002022
+#define VMCS_EOI_EXIT(vector) (VMCS_EOI_EXIT0 + ((vector) / 64) * 2)
+
+/* 64-bit read-only fields */
+#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
+
+/* 64-bit guest-state fields */
+#define VMCS_LINK_POINTER 0x00002800
+#define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
+#define VMCS_GUEST_IA32_PAT 0x00002804
+#define VMCS_GUEST_IA32_EFER 0x00002806
+#define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
+#define VMCS_GUEST_PDPTE0 0x0000280A
+#define VMCS_GUEST_PDPTE1 0x0000280C
+#define VMCS_GUEST_PDPTE2 0x0000280E
+#define VMCS_GUEST_PDPTE3 0x00002810
+
+/* 64-bit host-state fields */
+#define VMCS_HOST_IA32_PAT 0x00002C00
+#define VMCS_HOST_IA32_EFER 0x00002C02
+#define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
+
+/* 32-bit control fields */
+#define VMCS_PIN_BASED_CTLS 0x00004000
+#define VMCS_PRI_PROC_BASED_CTLS 0x00004002
+#define VMCS_EXCEPTION_BITMAP 0x00004004
+#define VMCS_PF_ERROR_MASK 0x00004006
+#define VMCS_PF_ERROR_MATCH 0x00004008
+#define VMCS_CR3_TARGET_COUNT 0x0000400A
+#define VMCS_EXIT_CTLS 0x0000400C
+#define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
+#define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
+#define VMCS_ENTRY_CTLS 0x00004012
+#define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
+#define VMCS_ENTRY_INTR_INFO 0x00004016
+#define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
+#define VMCS_ENTRY_INST_LENGTH 0x0000401A
+#define VMCS_TPR_THRESHOLD 0x0000401C
+#define VMCS_SEC_PROC_BASED_CTLS 0x0000401E
+#define VMCS_PLE_GAP 0x00004020
+#define VMCS_PLE_WINDOW 0x00004022
+
+/* 32-bit read-only data fields */
+#define VMCS_INSTRUCTION_ERROR 0x00004400
+#define VMCS_EXIT_REASON 0x00004402
+#define VMCS_EXIT_INTR_INFO 0x00004404
+#define VMCS_EXIT_INTR_ERRCODE 0x00004406
+#define VMCS_IDT_VECTORING_INFO 0x00004408
+#define VMCS_IDT_VECTORING_ERROR 0x0000440A
+#define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
+#define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
+
+/* 32-bit guest-state fields */
+#define VMCS_GUEST_ES_LIMIT 0x00004800
+#define VMCS_GUEST_CS_LIMIT 0x00004802
+#define VMCS_GUEST_SS_LIMIT 0x00004804
+#define VMCS_GUEST_DS_LIMIT 0x00004806
+#define VMCS_GUEST_FS_LIMIT 0x00004808
+#define VMCS_GUEST_GS_LIMIT 0x0000480A
+#define VMCS_GUEST_LDTR_LIMIT 0x0000480C
+#define VMCS_GUEST_TR_LIMIT 0x0000480E
+#define VMCS_GUEST_GDTR_LIMIT 0x00004810
+#define VMCS_GUEST_IDTR_LIMIT 0x00004812
+#define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
+#define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
+#define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
+#define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
+#define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
+#define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
+#define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
+#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
+#define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
+#define VMCS_GUEST_ACTIVITY 0x00004826
+#define VMCS_GUEST_SMBASE 0x00004828
+#define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
+#define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
+
+/* 32-bit host state fields */
+#define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
+
+/* Natural Width control fields */
+#define VMCS_CR0_MASK 0x00006000
+#define VMCS_CR4_MASK 0x00006002
+#define VMCS_CR0_SHADOW 0x00006004
+#define VMCS_CR4_SHADOW 0x00006006
+#define VMCS_CR3_TARGET0 0x00006008
+#define VMCS_CR3_TARGET1 0x0000600A
+#define VMCS_CR3_TARGET2 0x0000600C
+#define VMCS_CR3_TARGET3 0x0000600E
+
+/* Natural Width read-only fields */
+#define VMCS_EXIT_QUALIFICATION 0x00006400
+#define VMCS_IO_RCX 0x00006402
+#define VMCS_IO_RSI 0x00006404
+#define VMCS_IO_RDI 0x00006406
+#define VMCS_IO_RIP 0x00006408
+#define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
+
+/* Natural Width guest-state fields */
+#define VMCS_GUEST_CR0 0x00006800
+#define VMCS_GUEST_CR3 0x00006802
+#define VMCS_GUEST_CR4 0x00006804
+#define VMCS_GUEST_ES_BASE 0x00006806
+#define VMCS_GUEST_CS_BASE 0x00006808
+#define VMCS_GUEST_SS_BASE 0x0000680A
+#define VMCS_GUEST_DS_BASE 0x0000680C
+#define VMCS_GUEST_FS_BASE 0x0000680E
+#define VMCS_GUEST_GS_BASE 0x00006810
+#define VMCS_GUEST_LDTR_BASE 0x00006812
+#define VMCS_GUEST_TR_BASE 0x00006814
+#define VMCS_GUEST_GDTR_BASE 0x00006816
+#define VMCS_GUEST_IDTR_BASE 0x00006818
+#define VMCS_GUEST_DR7 0x0000681A
+#define VMCS_GUEST_RSP 0x0000681C
+#define VMCS_GUEST_RIP 0x0000681E
+#define VMCS_GUEST_RFLAGS 0x00006820
+#define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
+#define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
+#define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
+
+/* Natural Width host-state fields */
+#define VMCS_HOST_CR0 0x00006C00
+#define VMCS_HOST_CR3 0x00006C02
+#define VMCS_HOST_CR4 0x00006C04
+#define VMCS_HOST_FS_BASE 0x00006C06
+#define VMCS_HOST_GS_BASE 0x00006C08
+#define VMCS_HOST_TR_BASE 0x00006C0A
+#define VMCS_HOST_GDTR_BASE 0x00006C0C
+#define VMCS_HOST_IDTR_BASE 0x00006C0E
+#define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
+#define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
+#define VMCS_HOST_RSP 0x00006C14
+#define VMCS_HOST_RIP 0x00006c16
+
+/*
+ * VM instruction error numbers
+ */
+#define VMRESUME_WITH_NON_LAUNCHED_VMCS 5
+
+/*
+ * VMCS exit reasons
+ */
+#define EXIT_REASON_EXCEPTION 0
+#define EXIT_REASON_EXT_INTR 1
+#define EXIT_REASON_TRIPLE_FAULT 2
+#define EXIT_REASON_INIT 3
+#define EXIT_REASON_SIPI 4
+#define EXIT_REASON_IO_SMI 5
+#define EXIT_REASON_SMI 6
+#define EXIT_REASON_INTR_WINDOW 7
+#define EXIT_REASON_NMI_WINDOW 8
+#define EXIT_REASON_TASK_SWITCH 9
+#define EXIT_REASON_CPUID 10
+#define EXIT_REASON_GETSEC 11
+#define EXIT_REASON_HLT 12
+#define EXIT_REASON_INVD 13
+#define EXIT_REASON_INVLPG 14
+#define EXIT_REASON_RDPMC 15
+#define EXIT_REASON_RDTSC 16
+#define EXIT_REASON_RSM 17
+#define EXIT_REASON_VMCALL 18
+#define EXIT_REASON_VMCLEAR 19
+#define EXIT_REASON_VMLAUNCH 20
+#define EXIT_REASON_VMPTRLD 21
+#define EXIT_REASON_VMPTRST 22
+#define EXIT_REASON_VMREAD 23
+#define EXIT_REASON_VMRESUME 24
+#define EXIT_REASON_VMWRITE 25
+#define EXIT_REASON_VMXOFF 26
+#define EXIT_REASON_VMXON 27
+#define EXIT_REASON_CR_ACCESS 28
+#define EXIT_REASON_DR_ACCESS 29
+#define EXIT_REASON_INOUT 30
+#define EXIT_REASON_RDMSR 31
+#define EXIT_REASON_WRMSR 32
+#define EXIT_REASON_INVAL_VMCS 33
+#define EXIT_REASON_INVAL_MSR 34
+#define EXIT_REASON_MWAIT 36
+#define EXIT_REASON_MTF 37
+#define EXIT_REASON_MONITOR 39
+#define EXIT_REASON_PAUSE 40
+#define EXIT_REASON_MCE_DURING_ENTR 41
+#define EXIT_REASON_TPR 43
+#define EXIT_REASON_APIC_ACCESS 44
+#define EXIT_REASON_VIRTUALIZED_EOI 45
+#define EXIT_REASON_GDTR_IDTR 46
+#define EXIT_REASON_LDTR_TR 47
+#define EXIT_REASON_EPT_FAULT 48
+#define EXIT_REASON_EPT_MISCONFIG 49
+#define EXIT_REASON_INVEPT 50
+#define EXIT_REASON_RDTSCP 51
+#define EXIT_REASON_VMX_PREEMPT 52
+#define EXIT_REASON_INVVPID 53
+#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_APIC_WRITE 56
+
+/*
+ * NMI unblocking due to IRET.
+ *
+ * Applies to VM-exits due to hardware exception or EPT fault.
+ */
+#define EXIT_QUAL_NMIUDTI (1 << 12)
+/*
+ * VMCS interrupt information fields
+ */
+#define VMCS_INTR_VALID (1U << 31)
+#define VMCS_INTR_T_MASK 0x700 /* Interruption-info type */
+#define VMCS_INTR_T_HWINTR (0 << 8)
+#define VMCS_INTR_T_NMI (2 << 8)
+#define VMCS_INTR_T_HWEXCEPTION (3 << 8)
+#define VMCS_INTR_T_SWINTR (4 << 8)
+#define VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)
+#define VMCS_INTR_T_SWEXCEPTION (6 << 8)
+#define VMCS_INTR_DEL_ERRCODE (1 << 11)
+
+/*
+ * VMCS IDT-Vectoring information fields
+ */
+#define VMCS_IDT_VEC_VECNUM 0xFF
+#define VMCS_IDT_VEC_VALID (1U << 31)
+#define VMCS_IDT_VEC_TYPE 0x700
+#define VMCS_IDT_VEC_ERRCODE_VALID (1U << 11)
+#define VMCS_IDT_VEC_HWINTR (0 << 8)
+#define VMCS_IDT_VEC_NMI (2 << 8)
+#define VMCS_IDT_VEC_HWEXCEPTION (3 << 8)
+#define VMCS_IDT_VEC_SWINTR (4 << 8)
+#define VMCS_IDT_VEC_PRIV_SWEXCEPTION (5 << 8)
+#define VMCS_IDT_VEC_SWEXCEPTION (6 << 8)
+
+/*
+ * VMCS Guest interruptibility field
+ */
+#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0)
+#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1)
+#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2)
+#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3)
+
+/*
+ * Exit qualification for EXIT_REASON_INVAL_VMCS
+ */
+#define EXIT_QUAL_NMI_WHILE_STI_BLOCKING 3
+
+/*
+ * Exit qualification for EPT violation
+ */
+#define EPT_VIOLATION_DATA_READ (1UL << 0)
+#define EPT_VIOLATION_DATA_WRITE (1UL << 1)
+#define EPT_VIOLATION_INST_FETCH (1UL << 2)
+#define EPT_VIOLATION_GPA_READABLE (1UL << 3)
+#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4)
+#define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5)
+#define EPT_VIOLATION_GLA_VALID (1UL << 7)
+#define EPT_VIOLATION_XLAT_VALID (1UL << 8)
+
+/*
+ * Exit qualification for APIC-access VM exit
+ */
+#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFF)
+#define APIC_ACCESS_TYPE(qual) (((qual) >> 12) & 0xF)
+
+/*
+ * Exit qualification for APIC-write VM exit
+ */
+#define APIC_WRITE_OFFSET(qual) ((qual) & 0xFFF)
+
+#define VMCS_PIN_BASED_CTLS_EXTINT (1 << 0)
+#define VMCS_PIN_BASED_CTLS_NMI (1 << 3)
+#define VMCS_PIN_BASED_CTLS_VNMI (1 << 5)
+
+#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING (1 << 2)
+#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3)
+#define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7)
+#define VMCS_PRI_PROC_BASED_CTLS_MWAIT (1 << 10)
+#define VMCS_PRI_PROC_BASED_CTLS_TSC (1 << 12)
+#define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD (1 << 19)
+#define VMCS_PRI_PROC_BASED_CTLS_CR8_STORE (1 << 20)
+#define VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW (1 << 21)
+#define VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING (1 << 22)
+#define VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL (1 << 31)
+
+#define VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES (1 << 0)
+#define VMCS_PRI_PROC_BASED2_CTLS_X2APIC (1 << 4)
+
+enum task_switch_reason {
+ TSR_CALL,
+ TSR_IRET,
+ TSR_JMP,
+ TSR_IDT_GATE, /* task gate in IDT */
+};
+
+#endif
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
new file mode 100644
index 000000000..6df87116f
--- /dev/null
+++ b/target/i386/hvf/vmx.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ * Based on Veertu vddh/vmm/vmx.h
+ *
+ * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file contain code under public domain from the hvdos project:
+ * https://github.com/mist64/hvdos
+ */
+
+#ifndef VMX_H
+#define VMX_H
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+#include "vmcs.h"
+#include "cpu.h"
+#include "x86.h"
+#include "sysemu/hvf.h"
+#include "sysemu/hvf_int.h"
+
+#include "exec/address-spaces.h"
+
+static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
+{
+ uint64_t v;
+
+ if (hv_vcpu_read_register(vcpu, reg, &v)) {
+ abort();
+ }
+
+ return v;
+}
+
+/* write GPR */
+static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
+{
+ if (hv_vcpu_write_register(vcpu, reg, v)) {
+ abort();
+ }
+}
+
+/* read VMCS field */
+static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
+{
+ uint64_t v;
+
+ hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
+
+ return v;
+}
+
+/* write VMCS field */
+static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
+{
+ hv_vmx_vcpu_write_vmcs(vcpu, field, v);
+}
+
+/* desired control word constrained by hardware/hypervisor capabilities */
+static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
+{
+ return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
+}
+
+#define VM_ENTRY_GUEST_LMA (1LL << 9)
+
+#define AR_TYPE_ACCESSES_MASK 1
+#define AR_TYPE_READABLE_MASK (1 << 1)
+#define AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define AR_TYPE_CODE_MASK (1 << 3)
+#define AR_TYPE_MASK 0x0f
+#define AR_TYPE_BUSY_64_TSS 11
+#define AR_TYPE_BUSY_32_TSS 11
+#define AR_TYPE_BUSY_16_TSS 3
+#define AR_TYPE_LDT 2
+
+static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
+{
+ uint64_t entry_ctls;
+
+ efer |= MSR_EFER_LMA;
+ wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
+ entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
+ wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
+ VM_ENTRY_GUEST_LMA);
+
+ uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
+ if ((efer & MSR_EFER_LME) &&
+ (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
+ wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
+ (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
+ }
+}
+
+static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
+{
+ uint64_t entry_ctls;
+
+ entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
+ wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
+
+ efer &= ~MSR_EFER_LMA;
+ wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
+}
+
+static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
+{
+ int i;
+ uint64_t pdpte[4] = {0, 0, 0, 0};
+ uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
+ uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
+ uint64_t changed_cr0 = old_cr0 ^ cr0;
+ uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
+ uint64_t entry_ctls;
+
+ if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
+ !(efer & MSR_EFER_LME)) {
+ address_space_read(&address_space_memory,
+ rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
+ MEMTXATTRS_UNSPECIFIED, pdpte, 32);
+ /* Only set PDPTE when appropriate. */
+ for (i = 0; i < 4; i++) {
+ wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
+ }
+ }
+
+ wvmcs(vcpu, VMCS_CR0_MASK, mask);
+ wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
+
+ if (efer & MSR_EFER_LME) {
+ if (changed_cr0 & CR0_PG) {
+ if (cr0 & CR0_PG) {
+ enter_long_mode(vcpu, cr0, efer);
+ } else {
+ exit_long_mode(vcpu, cr0, efer);
+ }
+ }
+ } else {
+ entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
+ wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
+ }
+
+ /* Filter new CR0 after we are finished examining it above. */
+ cr0 = (cr0 & ~(mask & ~CR0_PG));
+ wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
+
+ hv_vcpu_invalidate_tlb(vcpu);
+ hv_vcpu_flush(vcpu);
+}
+
+static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
+{
+ uint64_t guest_cr4 = cr4 | CR4_VMXE;
+
+ wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
+ wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
+ wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE);
+
+ hv_vcpu_invalidate_tlb(vcpu);
+ hv_vcpu_flush(vcpu);
+}
+
+static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint64_t val;
+
+ /* BUG, should take considering overlap.. */
+ wreg(cpu->hvf->fd, HV_X86_RIP, rip);
+ env->eip = rip;
+
+ /* after moving forward in rip, we need to clean INTERRUPTABILITY */
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
+ if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,
+ val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
+ }
+}
+
+static inline void vmx_clear_nmi_blocking(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ env->hflags2 &= ~HF2_NMI_MASK;
+ uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
+ gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
+}
+
+static inline void vmx_set_nmi_blocking(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ env->hflags2 |= HF2_NMI_MASK;
+ uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
+ gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
+}
+
+static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
+{
+ uint64_t val;
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
+ VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
+
+}
+
+static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
+{
+
+ uint64_t val;
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
+ ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
+}
+
+#endif
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
new file mode 100644
index 000000000..2898bb70a
--- /dev/null
+++ b/target/i386/hvf/x86.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+#include "vmcs.h"
+#include "vmx.h"
+#include "x86_mmu.h"
+#include "x86_descr.h"
+
+/* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
+{
+ uint32_t ar;
+
+ if (!var->p) {
+ ar = 1 << 16;
+ return ar;
+ }
+
+ ar = var->type & 15;
+ ar |= (var->s & 1) << 4;
+ ar |= (var->dpl & 3) << 5;
+ ar |= (var->p & 1) << 7;
+ ar |= (var->avl & 1) << 12;
+ ar |= (var->l & 1) << 13;
+ ar |= (var->db & 1) << 14;
+ ar |= (var->g & 1) << 15;
+ return ar;
+}*/
+
+bool x86_read_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel)
+{
+ target_ulong base;
+ uint32_t limit;
+
+ memset(desc, 0, sizeof(*desc));
+
+ /* valid gdt descriptors start from index 1 */
+ if (!sel.index && GDT_SEL == sel.ti) {
+ return false;
+ }
+
+ if (GDT_SEL == sel.ti) {
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
+ } else {
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
+ }
+
+ if (sel.index * 8 >= limit) {
+ return false;
+ }
+
+ vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
+ return true;
+}
+
+bool x86_write_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel)
+{
+ target_ulong base;
+ uint32_t limit;
+
+ if (GDT_SEL == sel.ti) {
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
+ } else {
+ base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
+ limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
+ }
+
+ if (sel.index * 8 >= limit) {
+ printf("%s: gdt limit\n", __func__);
+ return false;
+ }
+ vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
+ return true;
+}
+
+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
+ int gate)
+{
+ target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);
+ uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
+
+ memset(idt_desc, 0, sizeof(*idt_desc));
+ if (gate * 8 >= limit) {
+ printf("%s: idt limit\n", __func__);
+ return false;
+ }
+
+ vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
+ return true;
+}
+
+bool x86_is_protected(struct CPUState *cpu)
+{
+ uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
+ return cr0 & CR0_PE;
+}
+
+bool x86_is_real(struct CPUState *cpu)
+{
+ return !x86_is_protected(cpu);
+}
+
+bool x86_is_v8086(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ return x86_is_protected(cpu) && (env->eflags & VM_MASK);
+}
+
+bool x86_is_long_mode(struct CPUState *cpu)
+{
+ return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
+}
+
+bool x86_is_long64_mode(struct CPUState *cpu)
+{
+ struct vmx_segment desc;
+ vmx_read_segment_descriptor(cpu, &desc, R_CS);
+
+ return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
+}
+
+bool x86_is_paging_mode(struct CPUState *cpu)
+{
+ uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
+ return cr0 & CR0_PG;
+}
+
+bool x86_is_pae_enabled(struct CPUState *cpu)
+{
+ uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
+ return cr4 & CR4_PAE;
+}
+
+target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)
+{
+ return vmx_read_segment_base(cpu, seg) + addr;
+}
+
+target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
+ X86Seg seg)
+{
+ switch (size) {
+ case 2:
+ addr = (uint16_t)addr;
+ break;
+ case 4:
+ addr = (uint32_t)addr;
+ break;
+ default:
+ break;
+ }
+ return linear_addr(cpu, addr, seg);
+}
+
+target_ulong linear_rip(struct CPUState *cpu, target_ulong rip)
+{
+ return linear_addr(cpu, rip, R_CS);
+}
diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h
new file mode 100644
index 000000000..782664c2e
--- /dev/null
+++ b/target/i386/hvf/x86.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Veertu Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_H
+#define HVF_X86_H
+
+typedef struct x86_register {
+ union {
+ struct {
+ uint64_t rrx; /* full 64 bit */
+ };
+ struct {
+ uint32_t erx; /* low 32 bit part */
+ uint32_t hi32_unused1;
+ };
+ struct {
+ uint16_t rx; /* low 16 bit part */
+ uint16_t hi16_unused1;
+ uint32_t hi32_unused2;
+ };
+ struct {
+ uint8_t lx; /* low 8 bit part */
+ uint8_t hx; /* high 8 bit */
+ uint16_t hi16_unused2;
+ uint32_t hi32_unused3;
+ };
+ };
+} __attribute__ ((__packed__)) x86_register;
+
+typedef enum x86_reg_cr0 {
+ CR0_PE = (1L << 0),
+ CR0_MP = (1L << 1),
+ CR0_EM = (1L << 2),
+ CR0_TS = (1L << 3),
+ CR0_ET = (1L << 4),
+ CR0_NE = (1L << 5),
+ CR0_WP = (1L << 16),
+ CR0_AM = (1L << 18),
+ CR0_NW = (1L << 29),
+ CR0_CD = (1L << 30),
+ CR0_PG = (1L << 31),
+} x86_reg_cr0;
+
+typedef enum x86_reg_cr4 {
+ CR4_VME = (1L << 0),
+ CR4_PVI = (1L << 1),
+ CR4_TSD = (1L << 2),
+ CR4_DE = (1L << 3),
+ CR4_PSE = (1L << 4),
+ CR4_PAE = (1L << 5),
+ CR4_MSE = (1L << 6),
+ CR4_PGE = (1L << 7),
+ CR4_PCE = (1L << 8),
+ CR4_OSFXSR = (1L << 9),
+ CR4_OSXMMEXCPT = (1L << 10),
+ CR4_VMXE = (1L << 13),
+ CR4_SMXE = (1L << 14),
+ CR4_FSGSBASE = (1L << 16),
+ CR4_PCIDE = (1L << 17),
+ CR4_OSXSAVE = (1L << 18),
+ CR4_SMEP = (1L << 20),
+} x86_reg_cr4;
+
+/* 16 bit Task State Segment */
+typedef struct x86_tss_segment16 {
+ uint16_t link;
+ uint16_t sp0;
+ uint16_t ss0;
+ uint32_t sp1;
+ uint16_t ss1;
+ uint32_t sp2;
+ uint16_t ss2;
+ uint16_t ip;
+ uint16_t flags;
+ uint16_t ax;
+ uint16_t cx;
+ uint16_t dx;
+ uint16_t bx;
+ uint16_t sp;
+ uint16_t bp;
+ uint16_t si;
+ uint16_t di;
+ uint16_t es;
+ uint16_t cs;
+ uint16_t ss;
+ uint16_t ds;
+ uint16_t ldtr;
+} __attribute__((packed)) x86_tss_segment16;
+
+/* 32 bit Task State Segment */
+typedef struct x86_tss_segment32 {
+ uint32_t prev_tss;
+ uint32_t esp0;
+ uint32_t ss0;
+ uint32_t esp1;
+ uint32_t ss1;
+ uint32_t esp2;
+ uint32_t ss2;
+ uint32_t cr3;
+ uint32_t eip;
+ uint32_t eflags;
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t es;
+ uint32_t cs;
+ uint32_t ss;
+ uint32_t ds;
+ uint32_t fs;
+ uint32_t gs;
+ uint32_t ldt;
+ uint16_t trap;
+ uint16_t iomap_base;
+} __attribute__ ((__packed__)) x86_tss_segment32;
+
+/* 64 bit Task State Segment */
+typedef struct x86_tss_segment64 {
+ uint32_t unused;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t unused1;
+ uint64_t ist1;
+ uint64_t ist2;
+ uint64_t ist3;
+ uint64_t ist4;
+ uint64_t ist5;
+ uint64_t ist6;
+ uint64_t ist7;
+ uint64_t unused2;
+ uint16_t unused3;
+ uint16_t iomap_base;
+} __attribute__ ((__packed__)) x86_tss_segment64;
+
+/* segment descriptors */
+typedef struct x86_segment_descriptor {
+ uint64_t limit0:16;
+ uint64_t base0:16;
+ uint64_t base1:8;
+ uint64_t type:4;
+ uint64_t s:1;
+ uint64_t dpl:2;
+ uint64_t p:1;
+ uint64_t limit1:4;
+ uint64_t avl:1;
+ uint64_t l:1;
+ uint64_t db:1;
+ uint64_t g:1;
+ uint64_t base2:8;
+} __attribute__ ((__packed__)) x86_segment_descriptor;
+
+static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
+{
+ return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
+}
+
+static inline void x86_set_segment_base(x86_segment_descriptor *desc,
+ uint32_t base)
+{
+ desc->base2 = base >> 24;
+ desc->base1 = (base >> 16) & 0xff;
+ desc->base0 = base & 0xffff;
+}
+
+static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
+{
+ uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
+ if (desc->g) {
+ return (limit << 12) | 0xfff;
+ }
+ return limit;
+}
+
+static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
+ uint32_t limit)
+{
+ desc->limit0 = limit & 0xffff;
+ desc->limit1 = limit >> 16;
+}
+
+typedef struct x86_call_gate {
+ uint64_t offset0:16;
+ uint64_t selector:16;
+ uint64_t param_count:4;
+ uint64_t reserved:3;
+ uint64_t type:4;
+ uint64_t dpl:1;
+ uint64_t p:1;
+ uint64_t offset1:16;
+} __attribute__ ((__packed__)) x86_call_gate;
+
+static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
+{
+ return (uint32_t)((gate->offset1 << 16) | gate->offset0);
+}
+
+#define GDT_SEL 0
+#define LDT_SEL 1
+
+typedef struct x68_segment_selector {
+ union {
+ uint16_t sel;
+ struct {
+ uint16_t rpl:2;
+ uint16_t ti:1;
+ uint16_t index:13;
+ };
+ };
+} __attribute__ ((__packed__)) x68_segment_selector;
+
+/* useful register access macros */
+#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
+
+#define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
+#define RAX(cpu) RRX(cpu, R_EAX)
+#define RCX(cpu) RRX(cpu, R_ECX)
+#define RDX(cpu) RRX(cpu, R_EDX)
+#define RBX(cpu) RRX(cpu, R_EBX)
+#define RSP(cpu) RRX(cpu, R_ESP)
+#define RBP(cpu) RRX(cpu, R_EBP)
+#define RSI(cpu) RRX(cpu, R_ESI)
+#define RDI(cpu) RRX(cpu, R_EDI)
+#define R8(cpu) RRX(cpu, R_R8)
+#define R9(cpu) RRX(cpu, R_R9)
+#define R10(cpu) RRX(cpu, R_R10)
+#define R11(cpu) RRX(cpu, R_R11)
+#define R12(cpu) RRX(cpu, R_R12)
+#define R13(cpu) RRX(cpu, R_R13)
+#define R14(cpu) RRX(cpu, R_R14)
+#define R15(cpu) RRX(cpu, R_R15)
+
+#define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
+#define EAX(cpu) ERX(cpu, R_EAX)
+#define ECX(cpu) ERX(cpu, R_ECX)
+#define EDX(cpu) ERX(cpu, R_EDX)
+#define EBX(cpu) ERX(cpu, R_EBX)
+#define ESP(cpu) ERX(cpu, R_ESP)
+#define EBP(cpu) ERX(cpu, R_EBP)
+#define ESI(cpu) ERX(cpu, R_ESI)
+#define EDI(cpu) ERX(cpu, R_EDI)
+
+#define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
+#define AX(cpu) RX(cpu, R_EAX)
+#define CX(cpu) RX(cpu, R_ECX)
+#define DX(cpu) RX(cpu, R_EDX)
+#define BP(cpu) RX(cpu, R_EBP)
+#define SP(cpu) RX(cpu, R_ESP)
+#define BX(cpu) RX(cpu, R_EBX)
+#define SI(cpu) RX(cpu, R_ESI)
+#define DI(cpu) RX(cpu, R_EDI)
+
+#define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
+#define AL(cpu) RL(cpu, R_EAX)
+#define CL(cpu) RL(cpu, R_ECX)
+#define DL(cpu) RL(cpu, R_EDX)
+#define BL(cpu) RL(cpu, R_EBX)
+
+#define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
+#define AH(cpu) RH(cpu, R_EAX)
+#define CH(cpu) RH(cpu, R_ECX)
+#define DH(cpu) RH(cpu, R_EDX)
+#define BH(cpu) RH(cpu, R_EBX)
+
+/* deal with GDT/LDT descriptors in memory */
+bool x86_read_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel);
+bool x86_write_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel);
+
+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
+ int gate);
+
+/* helpers */
+bool x86_is_protected(struct CPUState *cpu);
+bool x86_is_real(struct CPUState *cpu);
+bool x86_is_v8086(struct CPUState *cpu);
+bool x86_is_long_mode(struct CPUState *cpu);
+bool x86_is_long64_mode(struct CPUState *cpu);
+bool x86_is_paging_mode(struct CPUState *cpu);
+bool x86_is_pae_enabled(struct CPUState *cpu);
+
+enum X86Seg;
+target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
+target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
+ enum X86Seg seg);
+target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
+
+static inline uint64_t rdtscp(void)
+{
+ uint64_t tsc;
+ __asm__ __volatile__("rdtscp; " /* serializing read of tsc */
+ "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
+ "or %%rdx,%%rax" /* and or onto rax */
+ : "=a"(tsc) /* output to tsc variable */
+ :
+ : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
+
+ return tsc;
+}
+
+#endif
diff --git a/target/i386/hvf/x86_cpuid.c b/target/i386/hvf/x86_cpuid.c
new file mode 100644
index 000000000..32b0d131d
--- /dev/null
+++ b/target/i386/hvf/x86_cpuid.c
@@ -0,0 +1,164 @@
+/*
+ * i386 CPUID helper functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * cpuid
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "x86.h"
+#include "vmx.h"
+#include "sysemu/hvf.h"
+
+static bool xgetbv(uint32_t cpuid_ecx, uint32_t idx, uint64_t *xcr)
+{
+ uint32_t xcrl, xcrh;
+
+ if (cpuid_ecx & CPUID_EXT_OSXSAVE) {
+ /*
+ * The xgetbv instruction is not available to older versions of
+ * the assembler, so we encode the instruction manually.
+ */
+ asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (idx));
+
+ *xcr = (((uint64_t)xcrh) << 32) | xcrl;
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
+ int reg)
+{
+ uint64_t cap;
+ uint32_t eax, ebx, ecx, edx;
+
+ host_cpuid(func, idx, &eax, &ebx, &ecx, &edx);
+
+ switch (func) {
+ case 0:
+ eax = eax < (uint32_t)0xd ? eax : (uint32_t)0xd;
+ break;
+ case 1:
+ edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX |
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS;
+ ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
+ CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
+ CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND;
+ ecx |= CPUID_EXT_HYPERVISOR;
+ break;
+ case 6:
+ eax = CPUID_6_EAX_ARAT;
+ ebx = 0;
+ ecx = 0;
+ edx = 0;
+ break;
+ case 7:
+ if (idx == 0) {
+ ebx &= CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 |
+ CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_RTM |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF |
+ CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_SHA_NI |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL |
+ CPUID_7_0_EBX_INVPCID;
+
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
+ if (!(cap & CPU_BASED2_INVPCID)) {
+ ebx &= ~CPUID_7_0_EBX_INVPCID;
+ }
+
+ ecx &= CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ;
+ edx &= CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS;
+ } else {
+ ebx = 0;
+ ecx = 0;
+ edx = 0;
+ }
+ eax = 0;
+ break;
+ case 0xD:
+ if (idx == 0) {
+ uint64_t host_xcr0;
+ if (xgetbv(ecx, 0, &host_xcr0)) {
+ uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK |
+ XSTATE_SSE_MASK | XSTATE_YMM_MASK |
+ XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
+ XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK |
+ XSTATE_Hi16_ZMM_MASK);
+ eax &= supp_xcr0;
+ }
+ } else if (idx == 1) {
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
+ eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1;
+ if (!(cap & CPU_BASED2_XSAVES_XRSTORS)) {
+ eax &= ~CPUID_XSAVE_XSAVES;
+ }
+ }
+ break;
+ case 0x80000001:
+ /* LM only if HVF in 64-bit mode */
+ edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_EXT2_SYSCALL | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_EXT2_MMXEXT | CPUID_MMX |
+ CPUID_FXSR | CPUID_EXT2_FXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_3DNOWEXT |
+ CPUID_EXT2_3DNOW | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX;
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
+ if (!(cap & CPU_BASED2_RDTSCP)) {
+ edx &= ~CPUID_EXT2_RDTSCP;
+ }
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cap);
+ if (!(cap & CPU_BASED_TSC_OFFSET)) {
+ edx &= ~CPUID_EXT2_RDTSCP;
+ }
+ ecx &= CPUID_EXT3_LAHF_LM | CPUID_EXT3_CMP_LEG | CPUID_EXT3_CR8LEG |
+ CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_OSVW | CPUID_EXT3_XOP |
+ CPUID_EXT3_FMA4 | CPUID_EXT3_TBM;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (reg) {
+ case R_EAX:
+ return eax;
+ case R_EBX:
+ return ebx;
+ case R_ECX:
+ return ecx;
+ case R_EDX:
+ return edx;
+ default:
+ return 0;
+ }
+}
diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c
new file mode 100644
index 000000000..062713b1a
--- /dev/null
+++ b/target/i386/hvf/x86_decode.c
@@ -0,0 +1,2197 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "panic.h"
+#include "x86_decode.h"
+#include "vmx.h"
+#include "x86_mmu.h"
+#include "x86_descr.h"
+
+#define OPCODE_ESCAPE 0xf
+
+static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
+{
+ printf("%llx: failed to decode instruction ", env->eip);
+ for (int i = 0; i < decode->opcode_len; i++) {
+ printf("%x ", decode->opcode[i]);
+ }
+ printf("\n");
+ VM_PANIC("decoder failed\n");
+}
+
+uint64_t sign(uint64_t val, int size)
+{
+ switch (size) {
+ case 1:
+ val = (int8_t)val;
+ break;
+ case 2:
+ val = (int16_t)val;
+ break;
+ case 4:
+ val = (int32_t)val;
+ break;
+ case 8:
+ val = (int64_t)val;
+ break;
+ default:
+ VM_PANIC_EX("%s invalid size %d\n", __func__, size);
+ break;
+ }
+ return val;
+}
+
+static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
+ int size)
+{
+ target_ulong val = 0;
+
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ VM_PANIC_EX("%s invalid size %d\n", __func__, size);
+ break;
+ }
+ target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
+ vmx_read_mem(env_cpu(env), &val, va, size);
+ decode->len += size;
+
+ return val;
+}
+
+static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint8_t)decode_bytes(env, decode, 1);
+}
+
+static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint16_t)decode_bytes(env, decode, 2);
+}
+
+static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint32_t)decode_bytes(env, decode, 4);
+}
+
+static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode)
+{
+ return decode_bytes(env, decode, 8);
+}
+
+static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_RM;
+}
+
+static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = decode->modrm.reg;
+ op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
+ decode->operand_size);
+}
+
+static void decode_rax(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = R_EAX;
+ /* Since reg is always AX, REX prefix has no impact. */
+ op->ptr = get_reg_ref(env, op->reg, false, 0,
+ decode->operand_size);
+}
+
+static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *var, int size)
+{
+ var->type = X86_VAR_IMMEDIATE;
+ var->size = size;
+ switch (size) {
+ case 1:
+ var->val = decode_byte(env, decode);
+ break;
+ case 2:
+ var->val = decode_word(env, decode);
+ break;
+ case 4:
+ var->val = decode_dword(env, decode);
+ break;
+ case 8:
+ var->val = decode_qword(env, decode);
+ break;
+ default:
+ VM_PANIC_EX("bad size %d\n", size);
+ }
+}
+
+static void decode_imm8(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 1);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 1);
+ op->val = sign(op->val, 1);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 2);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+
+static void decode_imm(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ if (8 == decode->operand_size) {
+ decode_immediate(env, decode, op, 4);
+ op->val = sign(op->val, decode->operand_size);
+ } else {
+ decode_immediate(env, decode, op, decode->operand_size);
+ }
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, decode->operand_size);
+ op->val = sign(op->val, decode->operand_size);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm_1(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_IMMEDIATE;
+ op->val = 1;
+}
+
+static void decode_imm_0(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_IMMEDIATE;
+ op->val = 0;
+}
+
+
+static void decode_pushseg(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
+
+ decode->op[0].type = X86_VAR_REG;
+ switch (op) {
+ case 0xe:
+ decode->op[0].reg = R_CS;
+ break;
+ case 0x16:
+ decode->op[0].reg = R_SS;
+ break;
+ case 0x1e:
+ decode->op[0].reg = R_DS;
+ break;
+ case 0x06:
+ decode->op[0].reg = R_ES;
+ break;
+ case 0xa0:
+ decode->op[0].reg = R_FS;
+ break;
+ case 0xa8:
+ decode->op[0].reg = R_GS;
+ break;
+ }
+}
+
+static void decode_popseg(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
+
+ decode->op[0].type = X86_VAR_REG;
+ switch (op) {
+ case 0xf:
+ decode->op[0].reg = R_CS;
+ break;
+ case 0x17:
+ decode->op[0].reg = R_SS;
+ break;
+ case 0x1f:
+ decode->op[0].reg = R_DS;
+ break;
+ case 0x07:
+ decode->op[0].reg = R_ES;
+ break;
+ case 0xa1:
+ decode->op[0].reg = R_FS;
+ break;
+ case 0xa9:
+ decode->op[0].reg = R_GS;
+ break;
+ }
+}
+
+static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x40;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x48;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)
+{
+ if (!decode->modrm.reg) {
+ decode->cmd = X86_DECODE_CMD_INC;
+ } else if (1 == decode->modrm.reg) {
+ decode->cmd = X86_DECODE_CMD_DEC;
+ }
+}
+
+static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x50;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x58;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_jxx(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->displacement = decode_bytes(env, decode, decode->operand_size);
+ decode->displacement_size = decode->operand_size;
+}
+
+static void decode_farjmp(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_IMMEDIATE;
+ decode->op[0].val = decode_bytes(env, decode, decode->operand_size);
+ decode->displacement = decode_word(env, decode);
+}
+
+static void decode_addgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_ADD,
+ X86_DECODE_CMD_OR,
+ X86_DECODE_CMD_ADC,
+ X86_DECODE_CMD_SBB,
+ X86_DECODE_CMD_AND,
+ X86_DECODE_CMD_SUB,
+ X86_DECODE_CMD_XOR,
+ X86_DECODE_CMD_CMP
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_ROL,
+ X86_DECODE_CMD_ROR,
+ X86_DECODE_CMD_RCL,
+ X86_DECODE_CMD_RCR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SHR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SAR
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_f7group(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_NOT,
+ X86_DECODE_CMD_NEG,
+ X86_DECODE_CMD_MUL,
+ X86_DECODE_CMD_IMUL_1,
+ X86_DECODE_CMD_DIV,
+ X86_DECODE_CMD_IDIV
+ };
+ decode->cmd = group[decode->modrm.reg];
+ decode_modrm_rm(env, decode, &decode->op[0]);
+
+ switch (decode->modrm.reg) {
+ case 0:
+ case 1:
+ decode_imm(env, decode, &decode->op[1]);
+ break;
+ case 2:
+ break;
+ case 3:
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x90;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0xb8;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);
+}
+
+static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_OFFSET;
+ op->ptr = decode_bytes(env, decode, decode->addressing_size);
+}
+
+static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0xb0;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);
+}
+
+static void decode_rcx(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = R_ECX;
+ op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
+ decode->operand_size);
+}
+
+struct decode_tbl {
+ uint8_t opcode;
+ enum x86_decode_cmd cmd;
+ uint8_t operand_size;
+ bool is_modrm;
+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op1);
+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op2);
+ void (*decode_op3)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op3);
+ void (*decode_op4)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op4);
+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
+ uint32_t flags_mask;
+};
+
+struct decode_x87_tbl {
+ uint8_t opcode;
+ uint8_t modrm_reg;
+ uint8_t modrm_mod;
+ enum x86_decode_cmd cmd;
+ uint8_t operand_size;
+ bool rev;
+ bool pop;
+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op1);
+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op2);
+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
+ uint32_t flags_mask;
+};
+
+struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL,
+ decode_invalid};
+
+struct decode_tbl _decode_tbl1[256];
+struct decode_tbl _decode_tbl2[256];
+struct decode_x87_tbl _decode_tbl3[256];
+
+static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode)
+{
+ struct decode_x87_tbl *decoder;
+
+ decode->is_fpu = true;
+ int mode = decode->modrm.mod == 3 ? 1 : 0;
+ int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) |
+ decode->modrm.reg;
+
+ decoder = &_decode_tbl3[index];
+
+ decode->cmd = decoder->cmd;
+ if (decoder->operand_size) {
+ decode->operand_size = decoder->operand_size;
+ }
+ decode->flags_mask = decoder->flags_mask;
+ decode->fpop_stack = decoder->pop;
+ decode->frev = decoder->rev;
+
+ if (decoder->decode_op1) {
+ decoder->decode_op1(env, decode, &decode->op[0]);
+ }
+ if (decoder->decode_op2) {
+ decoder->decode_op2(env, decode, &decode->op[1]);
+ }
+ if (decoder->decode_postfix) {
+ decoder->decode_postfix(env, decode);
+ }
+
+ VM_PANIC_ON_EX(!decode->cmd, "x87 opcode %x %x (%x %x) not decoded\n",
+ decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg,
+ decoder->modrm_mod);
+}
+
+static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_INC,
+ X86_DECODE_CMD_DEC,
+ X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_PUSH,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL
+ };
+ decode->cmd = group[decode->modrm.reg];
+ if (decode->modrm.reg > 2) {
+ decode->flags_mask = 0;
+ }
+}
+
+static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode)
+{
+
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_SLDT,
+ X86_DECODE_CMD_STR,
+ X86_DECODE_CMD_LLDT,
+ X86_DECODE_CMD_LTR,
+ X86_DECODE_CMD_VERR,
+ X86_DECODE_CMD_VERW,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_SGDT,
+ X86_DECODE_CMD_SIDT,
+ X86_DECODE_CMD_LGDT,
+ X86_DECODE_CMD_LIDT,
+ X86_DECODE_CMD_SMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_INVLPG
+ };
+ decode->cmd = group[decode->modrm.reg];
+ if (0xf9 == decode->modrm.modrm) {
+ decode->opcode[decode->len++] = decode->modrm.modrm;
+ decode->cmd = X86_DECODE_CMD_RDTSCP;
+ }
+}
+
+static void decode_btgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_BT,
+ X86_DECODE_CMD_BTS,
+ X86_DECODE_CMD_BTR,
+ X86_DECODE_CMD_BTC
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_x87_general(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->is_fpu = true;
+}
+
+static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_FLOATP;
+}
+
+static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_INTP;
+}
+
+static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_BYTEP;
+}
+
+static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_REG;
+ op->reg = 0;
+}
+
+static void decode_decode_x87_modrm_st0(CPUX86State *env,
+ struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_REG;
+ op->reg = decode->modrm.modrm & 7;
+}
+
+
+static void decode_aegroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->is_fpu = true;
+ switch (decode->modrm.reg) {
+ case 0:
+ decode->cmd = X86_DECODE_CMD_FXSAVE;
+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);
+ break;
+ case 1:
+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);
+ decode->cmd = X86_DECODE_CMD_FXRSTOR;
+ break;
+ case 5:
+ if (decode->modrm.modrm == 0xe8) {
+ decode->cmd = X86_DECODE_CMD_LFENCE;
+ } else {
+ VM_PANIC("xrstor");
+ }
+ break;
+ case 6:
+ VM_PANIC_ON(decode->modrm.modrm != 0xf0);
+ decode->cmd = X86_DECODE_CMD_MFENCE;
+ break;
+ case 7:
+ if (decode->modrm.modrm == 0xf8) {
+ decode->cmd = X86_DECODE_CMD_SFENCE;
+ } else {
+ decode->cmd = X86_DECODE_CMD_CLFLUSH;
+ }
+ break;
+ default:
+ VM_PANIC_EX("0xae: reg %d\n", decode->modrm.reg);
+ break;
+ }
+}
+
+static void decode_bswap(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[1] - 0xc8;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+}
+
+static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->modrm.modrm) {
+ case 0xe0:
+ /* FCHS */
+ decode->cmd = X86_DECODE_CMD_FCHS;
+ break;
+ case 0xe1:
+ decode->cmd = X86_DECODE_CMD_FABS;
+ break;
+ case 0xe4:
+ VM_PANIC("FTST");
+ break;
+ case 0xe5:
+ /* FXAM */
+ decode->cmd = X86_DECODE_CMD_FXAM;
+ break;
+ default:
+ VM_PANIC("FLDENV");
+ break;
+ }
+}
+
+static void decode_db_4(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->modrm.modrm) {
+ case 0xe0:
+ VM_PANIC_EX("unhandled FNENI: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe1:
+ VM_PANIC_EX("unhandled FNDISI: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe2:
+ VM_PANIC_EX("unhandled FCLEX: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe3:
+ decode->cmd = X86_DECODE_CMD_FNINIT;
+ break;
+ case 0xe4:
+ decode->cmd = X86_DECODE_CMD_FNSETPM;
+ break;
+ default:
+ VM_PANIC_EX("unhandled fpu opcode: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ }
+}
+
+
+#define RFLAGS_MASK_NONE 0
+#define RFLAGS_MASK_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C)
+#define RFLAGS_MASK_LAHF (CC_S | CC_Z | CC_A | CC_P | CC_C)
+#define RFLAGS_MASK_CF (CC_C)
+#define RFLAGS_MASK_IF (IF_MASK)
+#define RFLAGS_MASK_TF (TF_MASK)
+#define RFLAGS_MASK_DF (DF_MASK)
+#define RFLAGS_MASK_ZF (CC_Z)
+
+struct decode_tbl _1op_inst[] = {
+ {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL,
+ NULL, RFLAGS_MASK_OSZAPC},
+ {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL,
+ NULL, RFLAGS_MASK_OSZAPC},
+ {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL,
+ decode_pushseg, RFLAGS_MASK_NONE},
+ {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL,
+ decode_popseg, RFLAGS_MASK_NONE},
+ {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+
+ {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+
+ {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+
+ {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2f, X86_DECODE_CMD_DAS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x3f, X86_DECODE_CMD_AAS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x40, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x41, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x42, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x43, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x44, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x45, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x46, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x47, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+
+ {0x48, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x49, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4a, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4b, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4c, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4d, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4e, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4f, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+
+ {0x50, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x51, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x52, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x53, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x54, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x55, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x56, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x57, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+
+ {0x58, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x59, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5a, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5b, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5c, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5d, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5e, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5f, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+
+ {0x60, X86_DECODE_CMD_PUSHA, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x61, X86_DECODE_CMD_POPA, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg,
+ decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm,
+ decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x6c, X86_DECODE_CMD_INS, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6d, X86_DECODE_CMD_INS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6e, X86_DECODE_CMD_OUTS, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6f, X86_DECODE_CMD_OUTS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x70, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x71, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x72, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x73, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x74, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x75, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x76, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x77, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x78, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x79, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7a, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7b, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7c, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7d, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7e, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7f, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+
+ {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x90, X86_DECODE_CMD_NOP, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+
+ {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL,
+ NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},
+
+ {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_POPF},*/
+ {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_LAHF},
+
+ {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+
+ {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xba, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+
+ {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+
+ {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_IRET},*/
+
+ {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+
+ {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xd7, X86_DECODE_CMD_XLAT, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xda, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xde, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+
+ {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xe3, X86_DECODE_CMD_JCXZ, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+
+ {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xea, X86_DECODE_CMD_JMP_FAR, 0, false,
+ NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},
+ {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xec, X86_DECODE_CMD_IN, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xed, X86_DECODE_CMD_IN, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xee, X86_DECODE_CMD_OUT, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xef, X86_DECODE_CMD_OUT, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xf4, X86_DECODE_CMD_HLT, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xf5, X86_DECODE_CMD_CMC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
+
+ {0xf6, X86_DECODE_CMD_INVL, 1, true,
+ NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},
+ {0xf7, X86_DECODE_CMD_INVL, 0, true,
+ NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},
+
+ {0xf8, X86_DECODE_CMD_CLC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
+ {0xf9, X86_DECODE_CMD_STC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
+
+ {0xfa, X86_DECODE_CMD_CLI, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},
+ {0xfb, X86_DECODE_CMD_STI, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},
+ {0xfc, X86_DECODE_CMD_CLD, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},
+ {0xfd, X86_DECODE_CMD_STD, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},
+ {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC},
+ {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC},
+};
+
+struct decode_tbl _2op_inst[] = {
+ {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE},
+ {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE},
+ {0x6, X86_DECODE_CMD_CLTS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF},
+ {0x9, X86_DECODE_CMD_WBINVD, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x18, X86_DECODE_CMD_PREFETCH, 0, true,
+ NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},
+ {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x30, X86_DECODE_CMD_WRMSR, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x31, X86_DECODE_CMD_RDTSC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x32, X86_DECODE_CMD_RDMSR, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x77, X86_DECODE_CMD_EMMS, 0, false,
+ NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},
+ {0x82, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x83, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x84, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x85, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x86, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x87, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x88, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x89, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8a, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8b, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8c, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8d, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8e, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8f, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+ {0xa2, X86_DECODE_CMD_CPUID, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_CF},
+ {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+ {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_CF},
+ {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE},
+
+ {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC},
+ {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF},
+
+ {0xc8, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xc9, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xca, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcb, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcc, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcd, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xce, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcf, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+};
+
+struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL,
+ NULL, decode_invalid, 0};
+
+struct decode_x87_tbl _x87_inst[] = {
+ {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
+ decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+
+ {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE},
+ {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+
+ {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL,
+ decode_db_4, RFLAGS_MASK_NONE},
+ {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+
+ {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+
+ {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+};
+
+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ target_ulong ptr = 0;
+ X86Seg seg = R_DS;
+
+ if (!decode->modrm.mod && 6 == decode->modrm.rm) {
+ ptr = decode->displacement;
+ goto calc_addr;
+ }
+
+ if (decode->displacement_size) {
+ ptr = sign(decode->displacement, decode->displacement_size);
+ }
+
+ switch (decode->modrm.rm) {
+ case 0:
+ ptr += BX(env) + SI(env);
+ break;
+ case 1:
+ ptr += BX(env) + DI(env);
+ break;
+ case 2:
+ ptr += BP(env) + SI(env);
+ seg = R_SS;
+ break;
+ case 3:
+ ptr += BP(env) + DI(env);
+ seg = R_SS;
+ break;
+ case 4:
+ ptr += SI(env);
+ break;
+ case 5:
+ ptr += DI(env);
+ break;
+ case 6:
+ ptr += BP(env);
+ seg = R_SS;
+ break;
+ case 7:
+ ptr += BX(env);
+ break;
+ }
+calc_addr:
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->ptr = (uint16_t)ptr;
+ } else {
+ op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
+ }
+}
+
+target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size)
+{
+ target_ulong ptr = 0;
+
+ if (is_extended) {
+ reg |= R_R8;
+ }
+
+ switch (size) {
+ case 1:
+ if (is_extended || reg < 4 || rex_present) {
+ ptr = (target_ulong)&RL(env, reg);
+ } else {
+ ptr = (target_ulong)&RH(env, reg - 4);
+ }
+ break;
+ default:
+ ptr = (target_ulong)&RRX(env, reg);
+ break;
+ }
+ return ptr;
+}
+
+target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size)
+{
+ target_ulong val = 0;
+ memcpy(&val,
+ (void *)get_reg_ref(env, reg, rex_present, is_extended, size),
+ size);
+ return val;
+}
+
+static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode,
+ X86Seg *sel)
+{
+ target_ulong base = 0;
+ target_ulong scaled_index = 0;
+ int addr_size = decode->addressing_size;
+ int base_reg = decode->sib.base;
+ int index_reg = decode->sib.index;
+
+ *sel = R_DS;
+
+ if (decode->modrm.mod || base_reg != R_EBP) {
+ if (decode->rex.b) {
+ base_reg |= R_R8;
+ }
+ if (base_reg == R_ESP || base_reg == R_EBP) {
+ *sel = R_SS;
+ }
+ base = get_reg_val(env, decode->sib.base, decode->rex.rex,
+ decode->rex.b, addr_size);
+ }
+
+ if (decode->rex.x) {
+ index_reg |= R_R8;
+ }
+
+ if (index_reg != R_ESP) {
+ scaled_index = get_reg_val(env, index_reg, decode->rex.rex,
+ decode->rex.x, addr_size) <<
+ decode->sib.scale;
+ }
+ return base + scaled_index;
+}
+
+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ X86Seg seg = R_DS;
+ target_ulong ptr = 0;
+ int addr_size = decode->addressing_size;
+
+ if (decode->displacement_size) {
+ ptr = sign(decode->displacement, decode->displacement_size);
+ }
+
+ if (4 == decode->modrm.rm) {
+ ptr += get_sib_val(env, decode, &seg);
+ } else if (!decode->modrm.mod && 5 == decode->modrm.rm) {
+ if (x86_is_long_mode(env_cpu(env))) {
+ ptr += env->eip + decode->len;
+ } else {
+ ptr = decode->displacement;
+ }
+ } else {
+ if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) {
+ seg = R_SS;
+ }
+ ptr += get_reg_val(env, decode->modrm.rm, decode->rex.rex,
+ decode->rex.b, addr_size);
+ }
+
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->ptr = (uint32_t)ptr;
+ } else {
+ op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
+ }
+}
+
+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ X86Seg seg = R_DS;
+ int32_t offset = 0;
+ int mod = decode->modrm.mod;
+ int rm = decode->modrm.rm;
+ target_ulong ptr;
+ int src = decode->modrm.rm;
+
+ if (decode->displacement_size) {
+ offset = sign(decode->displacement, decode->displacement_size);
+ }
+
+ if (4 == rm) {
+ ptr = get_sib_val(env, decode, &seg) + offset;
+ } else if (0 == mod && 5 == rm) {
+ ptr = env->eip + decode->len + (int32_t) offset;
+ } else {
+ ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) +
+ (int64_t) offset;
+ }
+
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->ptr = ptr;
+ } else {
+ op->ptr = decode_linear_addr(env, decode, ptr, seg);
+ }
+}
+
+
+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ if (3 == decode->modrm.mod) {
+ op->reg = decode->modrm.reg;
+ op->type = X86_VAR_REG;
+ op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
+ return;
+ }
+
+ switch (decode->addressing_size) {
+ case 2:
+ calc_modrm_operand16(env, decode, op);
+ break;
+ case 4:
+ calc_modrm_operand32(env, decode, op);
+ break;
+ case 8:
+ calc_modrm_operand64(env, decode, op);
+ break;
+ default:
+ VM_PANIC_EX("unsupported address size %d\n", decode->addressing_size);
+ break;
+ }
+}
+
+static void decode_prefix(CPUX86State *env, struct x86_decode *decode)
+{
+ while (1) {
+ /*
+ * REX prefix must come after legacy prefixes.
+ * REX before legacy is ignored.
+ * Clear rex to simulate this.
+ */
+ uint8_t byte = decode_byte(env, decode);
+ switch (byte) {
+ case PREFIX_LOCK:
+ decode->lock = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_REPN:
+ case PREFIX_REP:
+ decode->rep = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_CS_SEG_OVERRIDE:
+ case PREFIX_SS_SEG_OVERRIDE:
+ case PREFIX_DS_SEG_OVERRIDE:
+ case PREFIX_ES_SEG_OVERRIDE:
+ case PREFIX_FS_SEG_OVERRIDE:
+ case PREFIX_GS_SEG_OVERRIDE:
+ decode->segment_override = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_OP_SIZE_OVERRIDE:
+ decode->op_size_override = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_ADDR_SIZE_OVERRIDE:
+ decode->addr_size_override = byte;
+ decode->rex.rex = 0;
+ break;
+ case PREFIX_REX ... (PREFIX_REX + 0xf):
+ if (x86_is_long_mode(env_cpu(env))) {
+ decode->rex.rex = byte;
+ break;
+ }
+ /* fall through when not in long mode */
+ default:
+ decode->len--;
+ return;
+ }
+ }
+}
+
+void set_addressing_size(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->addressing_size = -1;
+ if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 2;
+ }
+ } else if (!x86_is_long_mode(env_cpu(env))) {
+ /* protected */
+ struct vmx_segment cs;
+ vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS);
+ /* check db */
+ if ((cs.ar >> 14) & 1) {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 2;
+ } else {
+ decode->addressing_size = 4;
+ }
+ } else {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 2;
+ }
+ }
+ } else {
+ /* long */
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 8;
+ }
+ }
+}
+
+void set_operand_size(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->operand_size = -1;
+ if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) {
+ if (decode->op_size_override) {
+ decode->operand_size = 4;
+ } else {
+ decode->operand_size = 2;
+ }
+ } else if (!x86_is_long_mode(env_cpu(env))) {
+ /* protected */
+ struct vmx_segment cs;
+ vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS);
+ /* check db */
+ if ((cs.ar >> 14) & 1) {
+ if (decode->op_size_override) {
+ decode->operand_size = 2;
+ } else{
+ decode->operand_size = 4;
+ }
+ } else {
+ if (decode->op_size_override) {
+ decode->operand_size = 4;
+ } else {
+ decode->operand_size = 2;
+ }
+ }
+ } else {
+ /* long */
+ if (decode->op_size_override) {
+ decode->operand_size = 2;
+ } else {
+ decode->operand_size = 4;
+ }
+
+ if (decode->rex.w) {
+ decode->operand_size = 8;
+ }
+ }
+}
+
+static void decode_sib(CPUX86State *env, struct x86_decode *decode)
+{
+ if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) &&
+ (decode->addressing_size != 2)) {
+ decode->sib.sib = decode_byte(env, decode);
+ decode->sib_present = true;
+ }
+}
+
+/* 16 bit modrm */
+int disp16_tbl[4][8] = {
+ {0, 0, 0, 0, 0, 0, 2, 0},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 2, 2, 2},
+ {0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+/* 32/64-bit modrm */
+int disp32_tbl[4][8] = {
+ {0, 0, 0, 0, -1, 4, 0, 0},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {4, 4, 4, 4, 4, 4, 4, 4},
+ {0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode)
+{
+ int addressing_size = decode->addressing_size;
+ int mod = decode->modrm.mod;
+ int rm = decode->modrm.rm;
+
+ decode->displacement_size = 0;
+ switch (addressing_size) {
+ case 2:
+ decode->displacement_size = disp16_tbl[mod][rm];
+ if (decode->displacement_size) {
+ decode->displacement = (uint16_t)decode_bytes(env, decode,
+ decode->displacement_size);
+ }
+ break;
+ case 4:
+ case 8:
+ if (-1 == disp32_tbl[mod][rm]) {
+ if (5 == decode->sib.base) {
+ decode->displacement_size = 4;
+ }
+ } else {
+ decode->displacement_size = disp32_tbl[mod][rm];
+ }
+
+ if (decode->displacement_size) {
+ decode->displacement = (uint32_t)decode_bytes(env, decode,
+ decode->displacement_size);
+ }
+ break;
+ }
+}
+
+static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->modrm.modrm = decode_byte(env, decode);
+ decode->is_modrm = true;
+
+ decode_sib(env, decode);
+ decode_displacement(env, decode);
+}
+
+static inline void decode_opcode_general(CPUX86State *env,
+ struct x86_decode *decode,
+ uint8_t opcode,
+ struct decode_tbl *inst_decoder)
+{
+ decode->cmd = inst_decoder->cmd;
+ if (inst_decoder->operand_size) {
+ decode->operand_size = inst_decoder->operand_size;
+ }
+ decode->flags_mask = inst_decoder->flags_mask;
+
+ if (inst_decoder->is_modrm) {
+ decode_modrm(env, decode);
+ }
+ if (inst_decoder->decode_op1) {
+ inst_decoder->decode_op1(env, decode, &decode->op[0]);
+ }
+ if (inst_decoder->decode_op2) {
+ inst_decoder->decode_op2(env, decode, &decode->op[1]);
+ }
+ if (inst_decoder->decode_op3) {
+ inst_decoder->decode_op3(env, decode, &decode->op[2]);
+ }
+ if (inst_decoder->decode_op4) {
+ inst_decoder->decode_op4(env, decode, &decode->op[3]);
+ }
+ if (inst_decoder->decode_postfix) {
+ inst_decoder->decode_postfix(env, decode);
+ }
+}
+
+static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode,
+ uint8_t opcode)
+{
+ struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];
+ decode_opcode_general(env, decode, opcode, inst_decoder);
+}
+
+
+static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode,
+ uint8_t opcode)
+{
+ struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];
+ decode_opcode_general(env, decode, opcode, inst_decoder);
+}
+
+static void decode_opcodes(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t opcode;
+
+ opcode = decode_byte(env, decode);
+ decode->opcode[decode->opcode_len++] = opcode;
+ if (opcode != OPCODE_ESCAPE) {
+ decode_opcode_1(env, decode, opcode);
+ } else {
+ opcode = decode_byte(env, decode);
+ decode->opcode[decode->opcode_len++] = opcode;
+ decode_opcode_2(env, decode, opcode);
+ }
+}
+
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
+{
+ memset(decode, 0, sizeof(*decode));
+ decode_prefix(env, decode);
+ set_addressing_size(env, decode);
+ set_operand_size(env, decode);
+
+ decode_opcodes(env, decode);
+
+ return decode->len;
+}
+
+void init_decoder()
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl1); i++) {
+ memcpy(&_decode_tbl1[i], &invl_inst, sizeof(invl_inst));
+ }
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
+ memcpy(&_decode_tbl2[i], &invl_inst, sizeof(invl_inst));
+ }
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) {
+ memcpy(&_decode_tbl3[i], &invl_inst_x87, sizeof(invl_inst_x87));
+
+ }
+ for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {
+ _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i];
+ }
+ for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) {
+ _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i];
+ }
+ for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) {
+ int index = ((_x87_inst[i].opcode & 0xf) << 4) |
+ ((_x87_inst[i].modrm_mod & 1) << 3) |
+ _x87_inst[i].modrm_reg;
+ _decode_tbl3[index] = _x87_inst[i];
+ }
+}
+
+
+const char *decode_cmd_to_string(enum x86_decode_cmd cmd)
+{
+ static const char *cmds[] = {"INVL", "PUSH", "PUSH_SEG", "POP", "POP_SEG",
+ "MOV", "MOVSX", "MOVZX", "CALL_NEAR", "CALL_NEAR_ABS_INDIRECT",
+ "CALL_FAR_ABS_INDIRECT", "CMD_CALL_FAR", "RET_NEAR", "RET_FAR", "ADD",
+ "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP", "INC", "DEC", "TST",
+ "NOT", "NEG", "JMP_NEAR", "JMP_NEAR_ABS_INDIRECT", "JMP_FAR",
+ "JMP_FAR_ABS_INDIRECT", "LEA", "JXX", "JCXZ", "SETXX", "MOV_TO_SEG",
+ "MOV_FROM_SEG", "CLI", "STI", "CLD", "STD", "STC", "CLC", "OUT", "IN",
+ "INS", "OUTS", "LIDT", "SIDT", "LGDT", "SGDT", "SMSW", "LMSW",
+ "RDTSCP", "INVLPG", "MOV_TO_CR", "MOV_FROM_CR", "MOV_TO_DR",
+ "MOV_FROM_DR", "PUSHF", "POPF", "CPUID", "ROL", "ROR", "RCL", "RCR",
+ "SHL", "SAL", "SHR", "SHRD", "SHLD", "SAR", "DIV", "IDIV", "MUL",
+ "IMUL_3", "IMUL_2", "IMUL_1", "MOVS", "CMPS", "SCAS", "LODS", "STOS",
+ "BSWAP", "XCHG", "RDTSC", "RDMSR", "WRMSR", "ENTER", "LEAVE", "BT",
+ "BTS", "BTC", "BTR", "BSF", "BSR", "IRET", "INT", "POPA", "PUSHA",
+ "CWD", "CBW", "DAS", "AAD", "AAM", "AAS", "LOOP", "SLDT", "STR", "LLDT",
+ "LTR", "VERR", "VERW", "SAHF", "LAHF", "WBINVD", "LDS", "LSS", "LES",
+ "LGS", "LFS", "CMC", "XLAT", "NOP", "CMOV", "CLTS", "XADD", "HLT",
+ "CMPXCHG8B", "CMPXCHG", "POPCNT", "FNINIT", "FLD", "FLDxx", "FNSTCW",
+ "FNSTSW", "FNSETPM", "FSAVE", "FRSTOR", "FXSAVE", "FXRSTOR", "FDIV",
+ "FMUL", "FSUB", "FADD", "EMMS", "MFENCE", "SFENCE", "LFENCE",
+ "PREFETCH", "FST", "FABS", "FUCOM", "FUCOMI", "FLDCW",
+ "FXCH", "FCHS", "FCMOV", "FRNDINT", "FXAM", "LAST"};
+ return cmds[cmd];
+}
+
+target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
+ target_ulong addr, X86Seg seg)
+{
+ switch (decode->segment_override) {
+ case PREFIX_CS_SEG_OVERRIDE:
+ seg = R_CS;
+ break;
+ case PREFIX_SS_SEG_OVERRIDE:
+ seg = R_SS;
+ break;
+ case PREFIX_DS_SEG_OVERRIDE:
+ seg = R_DS;
+ break;
+ case PREFIX_ES_SEG_OVERRIDE:
+ seg = R_ES;
+ break;
+ case PREFIX_FS_SEG_OVERRIDE:
+ seg = R_FS;
+ break;
+ case PREFIX_GS_SEG_OVERRIDE:
+ seg = R_GS;
+ break;
+ default:
+ break;
+ }
+ return linear_addr_size(env_cpu(env), addr, decode->addressing_size, seg);
+}
diff --git a/target/i386/hvf/x86_decode.h b/target/i386/hvf/x86_decode.h
new file mode 100644
index 000000000..a2d7a2a27
--- /dev/null
+++ b/target/i386/hvf/x86_decode.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_DECODE_H
+#define HVF_X86_DECODE_H
+
+#include "cpu.h"
+#include "x86.h"
+
+typedef enum x86_prefix {
+ /* group 1 */
+ PREFIX_LOCK = 0xf0,
+ PREFIX_REPN = 0xf2,
+ PREFIX_REP = 0xf3,
+ /* group 2 */
+ PREFIX_CS_SEG_OVERRIDE = 0x2e,
+ PREFIX_SS_SEG_OVERRIDE = 0x36,
+ PREFIX_DS_SEG_OVERRIDE = 0x3e,
+ PREFIX_ES_SEG_OVERRIDE = 0x26,
+ PREFIX_FS_SEG_OVERRIDE = 0x64,
+ PREFIX_GS_SEG_OVERRIDE = 0x65,
+ /* group 3 */
+ PREFIX_OP_SIZE_OVERRIDE = 0x66,
+ /* group 4 */
+ PREFIX_ADDR_SIZE_OVERRIDE = 0x67,
+
+ PREFIX_REX = 0x40,
+} x86_prefix;
+
+enum x86_decode_cmd {
+ X86_DECODE_CMD_INVL = 0,
+
+ X86_DECODE_CMD_PUSH,
+ X86_DECODE_CMD_PUSH_SEG,
+ X86_DECODE_CMD_POP,
+ X86_DECODE_CMD_POP_SEG,
+ X86_DECODE_CMD_MOV,
+ X86_DECODE_CMD_MOVSX,
+ X86_DECODE_CMD_MOVZX,
+ X86_DECODE_CMD_CALL_NEAR,
+ X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR,
+ X86_DECODE_RET_NEAR,
+ X86_DECODE_RET_FAR,
+ X86_DECODE_CMD_ADD,
+ X86_DECODE_CMD_OR,
+ X86_DECODE_CMD_ADC,
+ X86_DECODE_CMD_SBB,
+ X86_DECODE_CMD_AND,
+ X86_DECODE_CMD_SUB,
+ X86_DECODE_CMD_XOR,
+ X86_DECODE_CMD_CMP,
+ X86_DECODE_CMD_INC,
+ X86_DECODE_CMD_DEC,
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_NOT,
+ X86_DECODE_CMD_NEG,
+ X86_DECODE_CMD_JMP_NEAR,
+ X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_FAR,
+ X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_LEA,
+ X86_DECODE_CMD_JXX,
+ X86_DECODE_CMD_JCXZ,
+ X86_DECODE_CMD_SETXX,
+ X86_DECODE_CMD_MOV_TO_SEG,
+ X86_DECODE_CMD_MOV_FROM_SEG,
+ X86_DECODE_CMD_CLI,
+ X86_DECODE_CMD_STI,
+ X86_DECODE_CMD_CLD,
+ X86_DECODE_CMD_STD,
+ X86_DECODE_CMD_STC,
+ X86_DECODE_CMD_CLC,
+ X86_DECODE_CMD_OUT,
+ X86_DECODE_CMD_IN,
+ X86_DECODE_CMD_INS,
+ X86_DECODE_CMD_OUTS,
+ X86_DECODE_CMD_LIDT,
+ X86_DECODE_CMD_SIDT,
+ X86_DECODE_CMD_LGDT,
+ X86_DECODE_CMD_SGDT,
+ X86_DECODE_CMD_SMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_RDTSCP,
+ X86_DECODE_CMD_INVLPG,
+ X86_DECODE_CMD_MOV_TO_CR,
+ X86_DECODE_CMD_MOV_FROM_CR,
+ X86_DECODE_CMD_MOV_TO_DR,
+ X86_DECODE_CMD_MOV_FROM_DR,
+ X86_DECODE_CMD_PUSHF,
+ X86_DECODE_CMD_POPF,
+ X86_DECODE_CMD_CPUID,
+ X86_DECODE_CMD_ROL,
+ X86_DECODE_CMD_ROR,
+ X86_DECODE_CMD_RCL,
+ X86_DECODE_CMD_RCR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SAL,
+ X86_DECODE_CMD_SHR,
+ X86_DECODE_CMD_SHRD,
+ X86_DECODE_CMD_SHLD,
+ X86_DECODE_CMD_SAR,
+ X86_DECODE_CMD_DIV,
+ X86_DECODE_CMD_IDIV,
+ X86_DECODE_CMD_MUL,
+ X86_DECODE_CMD_IMUL_3,
+ X86_DECODE_CMD_IMUL_2,
+ X86_DECODE_CMD_IMUL_1,
+ X86_DECODE_CMD_MOVS,
+ X86_DECODE_CMD_CMPS,
+ X86_DECODE_CMD_SCAS,
+ X86_DECODE_CMD_LODS,
+ X86_DECODE_CMD_STOS,
+ X86_DECODE_CMD_BSWAP,
+ X86_DECODE_CMD_XCHG,
+ X86_DECODE_CMD_RDTSC,
+ X86_DECODE_CMD_RDMSR,
+ X86_DECODE_CMD_WRMSR,
+ X86_DECODE_CMD_ENTER,
+ X86_DECODE_CMD_LEAVE,
+ X86_DECODE_CMD_BT,
+ X86_DECODE_CMD_BTS,
+ X86_DECODE_CMD_BTC,
+ X86_DECODE_CMD_BTR,
+ X86_DECODE_CMD_BSF,
+ X86_DECODE_CMD_BSR,
+ X86_DECODE_CMD_IRET,
+ X86_DECODE_CMD_INT,
+ X86_DECODE_CMD_POPA,
+ X86_DECODE_CMD_PUSHA,
+ X86_DECODE_CMD_CWD,
+ X86_DECODE_CMD_CBW,
+ X86_DECODE_CMD_DAS,
+ X86_DECODE_CMD_AAD,
+ X86_DECODE_CMD_AAM,
+ X86_DECODE_CMD_AAS,
+ X86_DECODE_CMD_LOOP,
+ X86_DECODE_CMD_SLDT,
+ X86_DECODE_CMD_STR,
+ X86_DECODE_CMD_LLDT,
+ X86_DECODE_CMD_LTR,
+ X86_DECODE_CMD_VERR,
+ X86_DECODE_CMD_VERW,
+ X86_DECODE_CMD_SAHF,
+ X86_DECODE_CMD_LAHF,
+ X86_DECODE_CMD_WBINVD,
+ X86_DECODE_CMD_LDS,
+ X86_DECODE_CMD_LSS,
+ X86_DECODE_CMD_LES,
+ X86_DECODE_XMD_LGS,
+ X86_DECODE_CMD_LFS,
+ X86_DECODE_CMD_CMC,
+ X86_DECODE_CMD_XLAT,
+ X86_DECODE_CMD_NOP,
+ X86_DECODE_CMD_CMOV,
+ X86_DECODE_CMD_CLTS,
+ X86_DECODE_CMD_XADD,
+ X86_DECODE_CMD_HLT,
+ X86_DECODE_CMD_CMPXCHG8B,
+ X86_DECODE_CMD_CMPXCHG,
+ X86_DECODE_CMD_POPCNT,
+
+ X86_DECODE_CMD_FNINIT,
+ X86_DECODE_CMD_FLD,
+ X86_DECODE_CMD_FLDxx,
+ X86_DECODE_CMD_FNSTCW,
+ X86_DECODE_CMD_FNSTSW,
+ X86_DECODE_CMD_FNSETPM,
+ X86_DECODE_CMD_FSAVE,
+ X86_DECODE_CMD_FRSTOR,
+ X86_DECODE_CMD_FXSAVE,
+ X86_DECODE_CMD_FXRSTOR,
+ X86_DECODE_CMD_FDIV,
+ X86_DECODE_CMD_FMUL,
+ X86_DECODE_CMD_FSUB,
+ X86_DECODE_CMD_FADD,
+ X86_DECODE_CMD_EMMS,
+ X86_DECODE_CMD_MFENCE,
+ X86_DECODE_CMD_SFENCE,
+ X86_DECODE_CMD_LFENCE,
+ X86_DECODE_CMD_PREFETCH,
+ X86_DECODE_CMD_CLFLUSH,
+ X86_DECODE_CMD_FST,
+ X86_DECODE_CMD_FABS,
+ X86_DECODE_CMD_FUCOM,
+ X86_DECODE_CMD_FUCOMI,
+ X86_DECODE_CMD_FLDCW,
+ X86_DECODE_CMD_FXCH,
+ X86_DECODE_CMD_FCHS,
+ X86_DECODE_CMD_FCMOV,
+ X86_DECODE_CMD_FRNDINT,
+ X86_DECODE_CMD_FXAM,
+
+ X86_DECODE_CMD_LAST,
+};
+
+const char *decode_cmd_to_string(enum x86_decode_cmd cmd);
+
+typedef struct x86_modrm {
+ union {
+ uint8_t modrm;
+ struct {
+ uint8_t rm:3;
+ uint8_t reg:3;
+ uint8_t mod:2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_modrm;
+
+typedef struct x86_sib {
+ union {
+ uint8_t sib;
+ struct {
+ uint8_t base:3;
+ uint8_t index:3;
+ uint8_t scale:2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_sib;
+
+typedef struct x86_rex {
+ union {
+ uint8_t rex;
+ struct {
+ uint8_t b:1;
+ uint8_t x:1;
+ uint8_t r:1;
+ uint8_t w:1;
+ uint8_t unused:4;
+ };
+ };
+} __attribute__ ((__packed__)) x86_rex;
+
+typedef enum x86_var_type {
+ X86_VAR_IMMEDIATE,
+ X86_VAR_OFFSET,
+ X86_VAR_REG,
+ X86_VAR_RM,
+
+ /* for floating point computations */
+ X87_VAR_REG,
+ X87_VAR_FLOATP,
+ X87_VAR_INTP,
+ X87_VAR_BYTEP,
+} x86_var_type;
+
+typedef struct x86_decode_op {
+ enum x86_var_type type;
+ int size;
+
+ int reg;
+ target_ulong val;
+
+ target_ulong ptr;
+} x86_decode_op;
+
+typedef struct x86_decode {
+ int len;
+ uint8_t opcode[4];
+ uint8_t opcode_len;
+ enum x86_decode_cmd cmd;
+ int addressing_size;
+ int operand_size;
+ int lock;
+ int rep;
+ int op_size_override;
+ int addr_size_override;
+ int segment_override;
+ int control_change_inst;
+ bool fwait;
+ bool fpop_stack;
+ bool frev;
+
+ uint32_t displacement;
+ uint8_t displacement_size;
+ struct x86_rex rex;
+ bool is_modrm;
+ bool sib_present;
+ struct x86_sib sib;
+ struct x86_modrm modrm;
+ struct x86_decode_op op[4];
+ bool is_fpu;
+ uint32_t flags_mask;
+
+} x86_decode;
+
+uint64_t sign(uint64_t val, int size);
+
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
+
+target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
+target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
+ target_ulong addr, enum X86Seg seg);
+
+void init_decoder(void);
+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void set_addressing_size(CPUX86State *env, struct x86_decode *decode);
+void set_operand_size(CPUX86State *env, struct x86_decode *decode);
+
+#endif
diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c
new file mode 100644
index 000000000..af15c06ac
--- /dev/null
+++ b/target/i386/hvf/x86_descr.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "vmx.h"
+#include "x86_descr.h"
+
+#define VMX_SEGMENT_FIELD(seg) \
+ [R_##seg] = { \
+ .selector = VMCS_GUEST_##seg##_SELECTOR, \
+ .base = VMCS_GUEST_##seg##_BASE, \
+ .limit = VMCS_GUEST_##seg##_LIMIT, \
+ .ar_bytes = VMCS_GUEST_##seg##_ACCESS_RIGHTS, \
+}
+
+static const struct vmx_segment_field {
+ int selector;
+ int base;
+ int limit;
+ int ar_bytes;
+} vmx_segment_fields[] = {
+ VMX_SEGMENT_FIELD(ES),
+ VMX_SEGMENT_FIELD(CS),
+ VMX_SEGMENT_FIELD(SS),
+ VMX_SEGMENT_FIELD(DS),
+ VMX_SEGMENT_FIELD(FS),
+ VMX_SEGMENT_FIELD(GS),
+ VMX_SEGMENT_FIELD(LDTR),
+ VMX_SEGMENT_FIELD(TR),
+};
+
+uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)
+{
+ return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
+}
+
+uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)
+{
+ return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
+}
+
+uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
+{
+ return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
+}
+
+x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
+{
+ x68_segment_selector sel;
+ sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
+ return sel;
+}
+
+void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)
+{
+ wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel);
+}
+
+void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
+{
+ desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);
+ desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);
+ desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);
+ desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);
+}
+
+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
+{
+ const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
+
+ wvmcs(cpu->hvf->fd, sf->base, desc->base);
+ wvmcs(cpu->hvf->fd, sf->limit, desc->limit);
+ wvmcs(cpu->hvf->fd, sf->selector, desc->sel);
+ wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar);
+}
+
+void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
+{
+ vmx_desc->sel = selector.sel;
+ vmx_desc->base = x86_segment_base(desc);
+ vmx_desc->limit = x86_segment_limit(desc);
+
+ vmx_desc->ar = (selector.sel ? 0 : 1) << 16 |
+ desc->g << 15 |
+ desc->db << 14 |
+ desc->l << 13 |
+ desc->avl << 12 |
+ desc->p << 7 |
+ desc->dpl << 5 |
+ desc->s << 4 |
+ desc->type;
+}
+
+void vmx_segment_to_x86_descriptor(struct CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc)
+{
+ x86_set_segment_limit(desc, vmx_desc->limit);
+ x86_set_segment_base(desc, vmx_desc->base);
+
+ desc->type = vmx_desc->ar & 15;
+ desc->s = (vmx_desc->ar >> 4) & 1;
+ desc->dpl = (vmx_desc->ar >> 5) & 3;
+ desc->p = (vmx_desc->ar >> 7) & 1;
+ desc->avl = (vmx_desc->ar >> 12) & 1;
+ desc->l = (vmx_desc->ar >> 13) & 1;
+ desc->db = (vmx_desc->ar >> 14) & 1;
+ desc->g = (vmx_desc->ar >> 15) & 1;
+}
+
diff --git a/target/i386/hvf/x86_descr.h b/target/i386/hvf/x86_descr.h
new file mode 100644
index 000000000..c356932fa
--- /dev/null
+++ b/target/i386/hvf/x86_descr.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_DESCR_H
+#define HVF_X86_DESCR_H
+
+#include "x86.h"
+
+typedef struct vmx_segment {
+ uint16_t sel;
+ uint64_t base;
+ uint64_t limit;
+ uint64_t ar;
+} vmx_segment;
+
+/* deal with vmstate descriptors */
+void vmx_read_segment_descriptor(struct CPUState *cpu,
+ struct vmx_segment *desc, enum X86Seg seg);
+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
+ enum X86Seg seg);
+
+x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu,
+ enum X86Seg seg);
+void vmx_write_segment_selector(struct CPUState *cpu,
+ x68_segment_selector selector,
+ enum X86Seg seg);
+
+uint64_t vmx_read_segment_base(struct CPUState *cpu, enum X86Seg seg);
+void vmx_write_segment_base(struct CPUState *cpu, enum X86Seg seg,
+ uint64_t base);
+
+void x86_segment_descriptor_to_vmx(struct CPUState *cpu,
+ x68_segment_selector selector,
+ struct x86_segment_descriptor *desc,
+ struct vmx_segment *vmx_desc);
+
+uint32_t vmx_read_segment_limit(CPUState *cpu, enum X86Seg seg);
+uint32_t vmx_read_segment_ar(CPUState *cpu, enum X86Seg seg);
+void vmx_segment_to_x86_descriptor(struct CPUState *cpu,
+ struct vmx_segment *vmx_desc,
+ struct x86_segment_descriptor *desc);
+
+#endif
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
new file mode 100644
index 000000000..7c8203b21
--- /dev/null
+++ b/target/i386/hvf/x86_emu.c
@@ -0,0 +1,1489 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+/////////////////////////////////////////////////////////////////////////
+
+#include "qemu/osdep.h"
+#include "panic.h"
+#include "qemu-common.h"
+#include "x86_decode.h"
+#include "x86.h"
+#include "x86_emu.h"
+#include "x86_mmu.h"
+#include "x86_flags.h"
+#include "vmcs.h"
+#include "vmx.h"
+
+void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
+ int direction, int size, uint32_t count);
+
+#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
+{ \
+ fetch_operands(env, decode, 2, true, true, false); \
+ switch (decode->operand_size) { \
+ case 1: \
+ { \
+ uint8_t v1 = (uint8_t)decode->op[0].val; \
+ uint8_t v2 = (uint8_t)decode->op[1].val; \
+ uint8_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, decode->op[0].ptr, diff, 1); \
+ } \
+ FLAGS_FUNC##8(env, v1, v2, diff); \
+ break; \
+ } \
+ case 2: \
+ { \
+ uint16_t v1 = (uint16_t)decode->op[0].val; \
+ uint16_t v2 = (uint16_t)decode->op[1].val; \
+ uint16_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, decode->op[0].ptr, diff, 2); \
+ } \
+ FLAGS_FUNC##16(env, v1, v2, diff); \
+ break; \
+ } \
+ case 4: \
+ { \
+ uint32_t v1 = (uint32_t)decode->op[0].val; \
+ uint32_t v2 = (uint32_t)decode->op[1].val; \
+ uint32_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, decode->op[0].ptr, diff, 4); \
+ } \
+ FLAGS_FUNC##32(env, v1, v2, diff); \
+ break; \
+ } \
+ default: \
+ VM_PANIC("bad size\n"); \
+ } \
+} \
+
+target_ulong read_reg(CPUX86State *env, int reg, int size)
+{
+ switch (size) {
+ case 1:
+ return x86_reg(env, reg)->lx;
+ case 2:
+ return x86_reg(env, reg)->rx;
+ case 4:
+ return x86_reg(env, reg)->erx;
+ case 8:
+ return x86_reg(env, reg)->rrx;
+ default:
+ abort();
+ }
+ return 0;
+}
+
+void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
+{
+ switch (size) {
+ case 1:
+ x86_reg(env, reg)->lx = val;
+ break;
+ case 2:
+ x86_reg(env, reg)->rx = val;
+ break;
+ case 4:
+ x86_reg(env, reg)->rrx = (uint32_t)val;
+ break;
+ case 8:
+ x86_reg(env, reg)->rrx = val;
+ break;
+ default:
+ abort();
+ }
+}
+
+target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
+{
+ target_ulong val;
+
+ switch (size) {
+ case 1:
+ val = *(uint8_t *)reg_ptr;
+ break;
+ case 2:
+ val = *(uint16_t *)reg_ptr;
+ break;
+ case 4:
+ val = *(uint32_t *)reg_ptr;
+ break;
+ case 8:
+ val = *(uint64_t *)reg_ptr;
+ break;
+ default:
+ abort();
+ }
+ return val;
+}
+
+void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
+{
+ switch (size) {
+ case 1:
+ *(uint8_t *)reg_ptr = val;
+ break;
+ case 2:
+ *(uint16_t *)reg_ptr = val;
+ break;
+ case 4:
+ *(uint64_t *)reg_ptr = (uint32_t)val;
+ break;
+ case 8:
+ *(uint64_t *)reg_ptr = val;
+ break;
+ default:
+ abort();
+ }
+}
+
+static bool is_host_reg(struct CPUX86State *env, target_ulong ptr)
+{
+ return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
+}
+
+void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size)
+{
+ if (is_host_reg(env, ptr)) {
+ write_val_to_reg(ptr, val, size);
+ return;
+ }
+ vmx_write_mem(env_cpu(env), ptr, &val, size);
+}
+
+uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes)
+{
+ vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, ptr, bytes);
+ return env->hvf_mmio_buf;
+}
+
+
+target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size)
+{
+ target_ulong val;
+ uint8_t *mmio_ptr;
+
+ if (is_host_reg(env, ptr)) {
+ return read_val_from_reg(ptr, size);
+ }
+
+ mmio_ptr = read_mmio(env, ptr, size);
+ switch (size) {
+ case 1:
+ val = *(uint8_t *)mmio_ptr;
+ break;
+ case 2:
+ val = *(uint16_t *)mmio_ptr;
+ break;
+ case 4:
+ val = *(uint32_t *)mmio_ptr;
+ break;
+ case 8:
+ val = *(uint64_t *)mmio_ptr;
+ break;
+ default:
+ VM_PANIC("bad size\n");
+ break;
+ }
+ return val;
+}
+
+static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
+ int n, bool val_op0, bool val_op1, bool val_op2)
+{
+ int i;
+ bool calc_val[3] = {val_op0, val_op1, val_op2};
+
+ for (i = 0; i < n; i++) {
+ switch (decode->op[i].type) {
+ case X86_VAR_IMMEDIATE:
+ break;
+ case X86_VAR_REG:
+ VM_PANIC_ON(!decode->op[i].ptr);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
+ decode->operand_size);
+ }
+ break;
+ case X86_VAR_RM:
+ calc_modrm_operand(env, decode, &decode->op[i]);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
+ decode->operand_size);
+ }
+ break;
+ case X86_VAR_OFFSET:
+ decode->op[i].ptr = decode_linear_addr(env, decode,
+ decode->op[i].ptr,
+ R_DS);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
+ decode->operand_size);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 2, false, true, false);
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
+ decode->operand_size);
+
+ env->eip += decode->len;
+}
+
+static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
+ env->eip += decode->len;
+}
+
+static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
+ env->eip += decode->len;
+}
+
+static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
+ env->eip += decode->len;
+}
+
+static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
+ env->eip += decode->len;
+}
+
+static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
+ env->eip += decode->len;
+}
+
+static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
+ env->eip += decode->len;
+}
+
+static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
+ env->eip += decode->len;
+}
+
+static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
+{
+ /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
+ int32_t val;
+ fetch_operands(env, decode, 2, true, true, false);
+
+ val = 0 - sign(decode->op[1].val, decode->operand_size);
+ write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
+
+ if (4 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
+ } else if (2 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
+ } else if (1 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
+ } else {
+ VM_PANIC("bad op size\n");
+ }
+
+ /*lflags_to_rflags(env);*/
+ env->eip += decode->len;
+}
+
+static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+ env->eip += decode->len;
+}
+
+static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+
+ EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
+
+ env->eip += decode->len;
+}
+
+static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
+ env->eip += decode->len;
+}
+
+static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
+ env->eip += decode->len;
+}
+
+static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 1, true, false, false);
+
+ write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
+ decode->operand_size);
+ env->eip += decode->len;
+}
+
+void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
+{
+ int src_op_size;
+ int op_size = decode->operand_size;
+
+ fetch_operands(env, decode, 1, false, false, false);
+
+ if (0xb6 == decode->opcode[1]) {
+ src_op_size = 1;
+ } else {
+ src_op_size = 2;
+ }
+ decode->operand_size = src_op_size;
+ calc_modrm_operand(env, decode, &decode->op[1]);
+ decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
+
+ env->eip += decode->len;
+}
+
+static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->opcode[0]) {
+ case 0xe6:
+ hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1);
+ break;
+ case 0xe7:
+ hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1,
+ decode->operand_size, 1);
+ break;
+ case 0xee:
+ hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1);
+ break;
+ case 0xef:
+ hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1,
+ decode->operand_size, 1);
+ break;
+ default:
+ VM_PANIC("Bad out opcode\n");
+ break;
+ }
+ env->eip += decode->len;
+}
+
+static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong val = 0;
+ switch (decode->opcode[0]) {
+ case 0xe4:
+ hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1);
+ break;
+ case 0xe5:
+ hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0,
+ decode->operand_size, 1);
+ if (decode->operand_size == 2) {
+ AX(env) = val;
+ } else {
+ RAX(env) = (uint32_t)val;
+ }
+ break;
+ case 0xec:
+ hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1);
+ break;
+ case 0xed:
+ hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1);
+ if (decode->operand_size == 2) {
+ AX(env) = val;
+ } else {
+ RAX(env) = (uint32_t)val;
+ }
+
+ break;
+ default:
+ VM_PANIC("Bad in opcode\n");
+ break;
+ }
+
+ env->eip += decode->len;
+}
+
+static inline void string_increment_reg(struct CPUX86State *env, int reg,
+ struct x86_decode *decode)
+{
+ target_ulong val = read_reg(env, reg, decode->addressing_size);
+ if (env->eflags & DF_MASK) {
+ val -= decode->operand_size;
+ } else {
+ val += decode->operand_size;
+ }
+ write_reg(env, reg, val, decode->addressing_size);
+}
+
+static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
+ void (*func)(struct CPUX86State *env,
+ struct x86_decode *ins), int rep)
+{
+ target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
+ while (rcx--) {
+ func(env, decode);
+ write_reg(env, R_ECX, rcx, decode->addressing_size);
+ if ((PREFIX_REP == rep) && !get_ZF(env)) {
+ break;
+ }
+ if ((PREFIX_REPN == rep) && get_ZF(env)) {
+ break;
+ }
+ }
+}
+
+static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+
+ hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 0,
+ decode->operand_size, 1);
+ vmx_write_mem(env_cpu(env), addr, env->hvf_mmio_buf,
+ decode->operand_size);
+
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_ins_single, 0);
+ } else {
+ exec_ins_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+
+ vmx_read_mem(env_cpu(env), env->hvf_mmio_buf, addr,
+ decode->operand_size);
+ hvf_handle_io(env_cpu(env), DX(env), env->hvf_mmio_buf, 1,
+ decode->operand_size, 1);
+
+ string_increment_reg(env, R_ESI, decode);
+}
+
+static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_outs_single, 0);
+ } else {
+ exec_outs_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong src_addr;
+ target_ulong dst_addr;
+ target_ulong val;
+
+ src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ dst_addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+
+ val = read_val_ext(env, src_addr, decode->operand_size);
+ write_val_ext(env, dst_addr, val, decode->operand_size);
+
+ string_increment_reg(env, R_ESI, decode);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_movs_single, 0);
+ } else {
+ exec_movs_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong src_addr;
+ target_ulong dst_addr;
+
+ src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ dst_addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+
+ decode->op[0].type = X86_VAR_IMMEDIATE;
+ decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+
+ string_increment_reg(env, R_ESI, decode);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_cmps_single, decode->rep);
+ } else {
+ exec_cmps_single(env, decode);
+ }
+ env->eip += decode->len;
+}
+
+
+static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+ target_ulong val;
+
+ addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+ val = read_reg(env, R_EAX, decode->operand_size);
+ vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size);
+
+ string_increment_reg(env, R_EDI, decode);
+}
+
+
+static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_stos_single, 0);
+ } else {
+ exec_stos_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+
+ addr = linear_addr_size(env_cpu(env), RDI(env),
+ decode->addressing_size, R_ES);
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size);
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = R_EAX;
+ if (decode->rep) {
+ string_rep(env, decode, exec_scas_single, decode->rep);
+ } else {
+ exec_scas_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+ target_ulong val = 0;
+
+ addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size);
+ write_reg(env, R_EAX, val, decode->operand_size);
+
+ string_increment_reg(env, R_ESI, decode);
+}
+
+static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_lods_single, 0);
+ } else {
+ exec_lods_single(env, decode);
+ }
+
+ env->eip += decode->len;
+}
+
+void simulate_rdmsr(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ CPUState *cs = env_cpu(env);
+ uint32_t msr = ECX(env);
+ uint64_t val = 0;
+
+ switch (msr) {
+ case MSR_IA32_TSC:
+ val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET);
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
+ break;
+ case MSR_IA32_UCODE_REV:
+ val = x86_cpu->ucode_rev;
+ break;
+ case MSR_EFER:
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER);
+ break;
+ case MSR_FSBASE:
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE);
+ break;
+ case MSR_GSBASE:
+ val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE);
+ break;
+ case MSR_KERNELGSBASE:
+ val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE);
+ break;
+ case MSR_STAR:
+ abort();
+ break;
+ case MSR_LSTAR:
+ abort();
+ break;
+ case MSR_CSTAR:
+ abort();
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ case MSR_CORE_THREAD_COUNT:
+ val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
+ val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
+ break;
+ default:
+ /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
+ val = 0;
+ break;
+ }
+
+ RAX(env) = (uint32_t)val;
+ RDX(env) = (uint32_t)(val >> 32);
+}
+
+static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ simulate_rdmsr(env_cpu(env));
+ env->eip += decode->len;
+}
+
+void simulate_wrmsr(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint32_t msr = ECX(env);
+ uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
+
+ switch (msr) {
+ case MSR_IA32_TSC:
+ break;
+ case MSR_IA32_APICBASE:
+ cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
+ break;
+ case MSR_FSBASE:
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data);
+ break;
+ case MSR_GSBASE:
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data);
+ break;
+ case MSR_KERNELGSBASE:
+ wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data);
+ break;
+ case MSR_STAR:
+ abort();
+ break;
+ case MSR_LSTAR:
+ abort();
+ break;
+ case MSR_CSTAR:
+ abort();
+ break;
+ case MSR_EFER:
+ /*printf("new efer %llx\n", EFER(cpu));*/
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data);
+ if (data & MSR_EFER_NXE) {
+ hv_vcpu_invalidate_tlb(cpu->hvf->fd);
+ }
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = data;
+ break;
+ default:
+ break;
+ }
+
+ /* Related to support known hypervisor interface */
+ /* if (g_hypervisor_iface)
+ g_hypervisor_iface->wrmsr_handler(cpu, msr, data);
+
+ printf("write msr %llx\n", RCX(cpu));*/
+}
+
+static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ simulate_wrmsr(env_cpu(env));
+ env->eip += decode->len;
+}
+
+/*
+ * flag:
+ * 0 - bt, 1 - btc, 2 - bts, 3 - btr
+ */
+static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
+{
+ int32_t displacement;
+ uint8_t index;
+ bool cf;
+ int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
+
+ VM_PANIC_ON(decode->rex.rex);
+
+ fetch_operands(env, decode, 2, false, true, false);
+ index = decode->op[1].val & mask;
+
+ if (decode->op[0].type != X86_VAR_REG) {
+ if (4 == decode->operand_size) {
+ displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
+ decode->op[0].ptr += 4 * displacement;
+ } else if (2 == decode->operand_size) {
+ displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
+ decode->op[0].ptr += 2 * displacement;
+ } else {
+ VM_PANIC("bt 64bit\n");
+ }
+ }
+ decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
+ decode->operand_size);
+ cf = (decode->op[0].val >> index) & 0x01;
+
+ switch (flag) {
+ case 0:
+ set_CF(env, cf);
+ return;
+ case 1:
+ decode->op[0].val ^= (1u << index);
+ break;
+ case 2:
+ decode->op[0].val |= (1u << index);
+ break;
+ case 3:
+ decode->op[0].val &= ~(1u << index);
+ break;
+ }
+ write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
+ decode->operand_size);
+ set_CF(env, cf);
+}
+
+static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 0);
+ env->eip += decode->len;
+}
+
+static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 1);
+ env->eip += decode->len;
+}
+
+static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 3);
+ env->eip += decode->len;
+}
+
+static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 2);
+ env->eip += decode->len;
+}
+
+void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+
+ count = decode->op[1].val;
+ count &= 0x1f; /* count is masked to 5 bits*/
+ if (!count) {
+ goto exit;
+ }
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t res = 0;
+ if (count <= 8) {
+ res = (decode->op[0].val << count);
+ cf = (decode->op[0].val >> (8 - count)) & 0x1;
+ of = cf ^ (res >> 7);
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+ SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t res = 0;
+
+ /* from bochs */
+ if (count <= 16) {
+ res = (decode->op[0].val << count);
+ cf = (decode->op[0].val >> (16 - count)) & 0x1;
+ of = cf ^ (res >> 15); /* of = cf ^ result15 */
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+ SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res = decode->op[0].val << count;
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+ SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
+ cf = (decode->op[0].val >> (32 - count)) & 0x1;
+ of = cf ^ (res >> 31); /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ default:
+ abort();
+ }
+
+exit:
+ /* lflags_to_rflags(env); */
+ env->eip += decode->len;
+}
+
+void exec_movsx(CPUX86State *env, struct x86_decode *decode)
+{
+ int src_op_size;
+ int op_size = decode->operand_size;
+
+ fetch_operands(env, decode, 2, false, false, false);
+
+ if (0xbe == decode->opcode[1]) {
+ src_op_size = 1;
+ } else {
+ src_op_size = 2;
+ }
+
+ decode->operand_size = src_op_size;
+ calc_modrm_operand(env, decode, &decode->op[1]);
+ decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
+ src_op_size);
+
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
+
+ env->eip += decode->len;
+}
+
+void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint32_t bit6, bit7;
+ uint8_t res;
+
+ if ((count & 0x07) == 0) {
+ if (count & 0x18) {
+ bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
+ bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
+ }
+ } else {
+ count &= 0x7; /* use only bottom 3 bits */
+ res = ((uint8_t)decode->op[0].val >> count) |
+ ((uint8_t)decode->op[0].val << (8 - count));
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+ bit6 = (res >> 6) & 1;
+ bit7 = (res >> 7) & 1;
+ /* set eflags: ROR count affects the following flags: C, O */
+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
+ }
+ break;
+ }
+ case 2:
+ {
+ uint32_t bit14, bit15;
+ uint16_t res;
+
+ if ((count & 0x0f) == 0) {
+ if (count & 0x10) {
+ bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
+ bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
+ /* of = result14 ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
+ }
+ } else {
+ count &= 0x0f; /* use only 4 LSB's */
+ res = ((uint16_t)decode->op[0].val >> count) |
+ ((uint16_t)decode->op[0].val << (16 - count));
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+
+ bit14 = (res >> 14) & 1;
+ bit15 = (res >> 15) & 1;
+ /* of = result14 ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
+ }
+ break;
+ }
+ case 4:
+ {
+ uint32_t bit31, bit30;
+ uint32_t res;
+
+ count &= 0x1f;
+ if (count) {
+ res = ((uint32_t)decode->op[0].val >> count) |
+ ((uint32_t)decode->op[0].val << (32 - count));
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+
+ bit31 = (res >> 31) & 1;
+ bit30 = (res >> 30) & 1;
+ /* of = result30 ^ result31 */
+ SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
+ }
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint32_t bit0, bit7;
+ uint8_t res;
+
+ if ((count & 0x07) == 0) {
+ if (count & 0x18) {
+ bit0 = ((uint8_t)decode->op[0].val & 1);
+ bit7 = ((uint8_t)decode->op[0].val >> 7);
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
+ }
+ } else {
+ count &= 0x7; /* use only lowest 3 bits */
+ res = ((uint8_t)decode->op[0].val << count) |
+ ((uint8_t)decode->op[0].val >> (8 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+ /* set eflags:
+ * ROL count affects the following flags: C, O
+ */
+ bit0 = (res & 1);
+ bit7 = (res >> 7);
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
+ }
+ break;
+ }
+ case 2:
+ {
+ uint32_t bit0, bit15;
+ uint16_t res;
+
+ if ((count & 0x0f) == 0) {
+ if (count & 0x10) {
+ bit0 = ((uint16_t)decode->op[0].val & 0x1);
+ bit15 = ((uint16_t)decode->op[0].val >> 15);
+ /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
+ }
+ } else {
+ count &= 0x0f; /* only use bottom 4 bits */
+ res = ((uint16_t)decode->op[0].val << count) |
+ ((uint16_t)decode->op[0].val >> (16 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+ bit0 = (res & 0x1);
+ bit15 = (res >> 15);
+ /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
+ }
+ break;
+ }
+ case 4:
+ {
+ uint32_t bit0, bit31;
+ uint32_t res;
+
+ count &= 0x1f;
+ if (count) {
+ res = ((uint32_t)decode->op[0].val << count) |
+ ((uint32_t)decode->op[0].val >> (32 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+ bit0 = (res & 0x1);
+ bit31 = (res >> 31);
+ /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
+ }
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+
+void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val & 0x1f;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t op1_8 = decode->op[0].val;
+ uint8_t res;
+ count %= 9;
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_8 << 1) | get_CF(env);
+ } else {
+ res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
+ (op1_8 >> (9 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+
+ cf = (op1_8 >> (8 - count)) & 0x01;
+ of = cf ^ (res >> 7); /* of = cf ^ result7 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t res;
+ uint16_t op1_16 = decode->op[0].val;
+
+ count %= 17;
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_16 << 1) | get_CF(env);
+ } else if (count == 16) {
+ res = (get_CF(env) << 15) | (op1_16 >> 1);
+ } else { /* 2..15 */
+ res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
+ (op1_16 >> (17 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+
+ cf = (op1_16 >> (16 - count)) & 0x1;
+ of = cf ^ (res >> 15); /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res;
+ uint32_t op1_32 = decode->op[0].val;
+
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_32 << 1) | get_CF(env);
+ } else {
+ res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
+ (op1_32 >> (33 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+
+ cf = (op1_32 >> (32 - count)) & 0x1;
+ of = cf ^ (res >> 31); /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val & 0x1f;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t op1_8 = decode->op[0].val;
+ uint8_t res;
+
+ count %= 9;
+ if (!count) {
+ break;
+ }
+ res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
+ (op1_8 << (9 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+
+ cf = (op1_8 >> (count - 1)) & 0x1;
+ of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t op1_16 = decode->op[0].val;
+ uint16_t res;
+
+ count %= 17;
+ if (!count) {
+ break;
+ }
+ res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
+ (op1_16 << (17 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+
+ cf = (op1_16 >> (count - 1)) & 0x1;
+ of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
+ result14 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res;
+ uint32_t op1_32 = decode->op[0].val;
+
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_32 >> 1) | (get_CF(env) << 31);
+ } else {
+ res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
+ (op1_32 << (33 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+
+ cf = (op1_32 >> (count - 1)) & 0x1;
+ of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ }
+ env->eip += decode->len;
+}
+
+static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 2, true, true, false);
+
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
+ decode->operand_size);
+ write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
+ decode->operand_size);
+
+ env->eip += decode->len;
+}
+
+static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
+ write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
+ decode->operand_size);
+
+ env->eip += decode->len;
+}
+
+static struct cmd_handler {
+ enum x86_decode_cmd cmd;
+ void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
+} handlers[] = {
+ {X86_DECODE_CMD_INVL, NULL,},
+ {X86_DECODE_CMD_MOV, exec_mov},
+ {X86_DECODE_CMD_ADD, exec_add},
+ {X86_DECODE_CMD_OR, exec_or},
+ {X86_DECODE_CMD_ADC, exec_adc},
+ {X86_DECODE_CMD_SBB, exec_sbb},
+ {X86_DECODE_CMD_AND, exec_and},
+ {X86_DECODE_CMD_SUB, exec_sub},
+ {X86_DECODE_CMD_NEG, exec_neg},
+ {X86_DECODE_CMD_XOR, exec_xor},
+ {X86_DECODE_CMD_CMP, exec_cmp},
+ {X86_DECODE_CMD_INC, exec_inc},
+ {X86_DECODE_CMD_DEC, exec_dec},
+ {X86_DECODE_CMD_TST, exec_tst},
+ {X86_DECODE_CMD_NOT, exec_not},
+ {X86_DECODE_CMD_MOVZX, exec_movzx},
+ {X86_DECODE_CMD_OUT, exec_out},
+ {X86_DECODE_CMD_IN, exec_in},
+ {X86_DECODE_CMD_INS, exec_ins},
+ {X86_DECODE_CMD_OUTS, exec_outs},
+ {X86_DECODE_CMD_RDMSR, exec_rdmsr},
+ {X86_DECODE_CMD_WRMSR, exec_wrmsr},
+ {X86_DECODE_CMD_BT, exec_bt},
+ {X86_DECODE_CMD_BTR, exec_btr},
+ {X86_DECODE_CMD_BTC, exec_btc},
+ {X86_DECODE_CMD_BTS, exec_bts},
+ {X86_DECODE_CMD_SHL, exec_shl},
+ {X86_DECODE_CMD_ROL, exec_rol},
+ {X86_DECODE_CMD_ROR, exec_ror},
+ {X86_DECODE_CMD_RCR, exec_rcr},
+ {X86_DECODE_CMD_RCL, exec_rcl},
+ /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
+ {X86_DECODE_CMD_MOVS, exec_movs},
+ {X86_DECODE_CMD_CMPS, exec_cmps},
+ {X86_DECODE_CMD_STOS, exec_stos},
+ {X86_DECODE_CMD_SCAS, exec_scas},
+ {X86_DECODE_CMD_LODS, exec_lods},
+ {X86_DECODE_CMD_MOVSX, exec_movsx},
+ {X86_DECODE_CMD_XCHG, exec_xchg},
+ {X86_DECODE_CMD_XADD, exec_xadd},
+};
+
+static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
+
+static void init_cmd_handler()
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ _cmd_handler[handlers[i].cmd] = handlers[i];
+ }
+}
+
+void load_regs(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ int i = 0;
+ RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX);
+ RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX);
+ RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX);
+ RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX);
+ RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI);
+ RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI);
+ RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP);
+ RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP);
+ for (i = 8; i < 16; i++) {
+ RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i);
+ }
+
+ env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
+ rflags_to_lflags(env);
+ env->eip = rreg(cpu->hvf->fd, HV_X86_RIP);
+}
+
+void store_regs(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ int i = 0;
+ wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env));
+ wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env));
+ wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env));
+ wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env));
+ wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env));
+ wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env));
+ wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env));
+ wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env));
+ for (i = 8; i < 16; i++) {
+ wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i));
+ }
+
+ lflags_to_rflags(env);
+ wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags);
+ macvm_set_rip(cpu, env->eip);
+}
+
+bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
+{
+ /*if (hvf_vcpu_id(cpu))
+ printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), env->eip,
+ decode_cmd_to_string(ins->cmd));*/
+
+ if (!_cmd_handler[ins->cmd].handler) {
+ printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
+ ins->cmd, ins->opcode[0],
+ ins->opcode_len > 1 ? ins->opcode[1] : 0);
+ env->eip += ins->len;
+ return true;
+ }
+
+ _cmd_handler[ins->cmd].handler(env, ins);
+ return true;
+}
+
+void init_emu()
+{
+ init_cmd_handler();
+}
diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h
new file mode 100644
index 000000000..233f7b8da
--- /dev/null
+++ b/target/i386/hvf/x86_emu.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef X86_EMU_H
+#define X86_EMU_H
+
+#include "x86.h"
+#include "x86_decode.h"
+#include "cpu.h"
+
+void init_emu(void);
+bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins);
+
+void load_regs(struct CPUState *cpu);
+void store_regs(struct CPUState *cpu);
+
+void simulate_rdmsr(struct CPUState *cpu);
+void simulate_wrmsr(struct CPUState *cpu);
+
+target_ulong read_reg(CPUX86State *env, int reg, int size);
+void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
+target_ulong read_val_from_reg(target_ulong reg_ptr, int size);
+void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size);
+void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size);
+uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes);
+target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size);
+
+void exec_movzx(struct CPUX86State *env, struct x86_decode *decode);
+void exec_shl(struct CPUX86State *env, struct x86_decode *decode);
+void exec_movsx(struct CPUX86State *env, struct x86_decode *decode);
+void exec_ror(struct CPUX86State *env, struct x86_decode *decode);
+void exec_rol(struct CPUX86State *env, struct x86_decode *decode);
+void exec_rcl(struct CPUX86State *env, struct x86_decode *decode);
+void exec_rcr(struct CPUX86State *env, struct x86_decode *decode);
+#endif
diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c
new file mode 100644
index 000000000..fecbca751
--- /dev/null
+++ b/target/i386/hvf/x86_flags.c
@@ -0,0 +1,314 @@
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+// Copyright (C) 2017 Google Inc.
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+/////////////////////////////////////////////////////////////////////////
+/*
+ * flags functions
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "panic.h"
+#include "cpu.h"
+#include "x86_flags.h"
+#include "x86.h"
+
+
+/* this is basically bocsh code */
+
+#define LF_SIGN_BIT 31
+
+#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
+#define LF_BIT_AF (3) /* lazy Adjust flag */
+#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
+#define LF_BIT_CF (31) /* lazy Carry Flag */
+#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
+
+#define LF_MASK_SD (0x01 << LF_BIT_SD)
+#define LF_MASK_AF (0x01 << LF_BIT_AF)
+#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
+#define LF_MASK_CF (0x01 << LF_BIT_CF)
+#define LF_MASK_PO (0x01 << LF_BIT_PO)
+
+#define ADD_COUT_VEC(op1, op2, result) \
+ (((op1) & (op2)) | (((op1) | (op2)) & (~(result))))
+
+#define SUB_COUT_VEC(op1, op2, result) \
+ (((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result)))
+
+#define GET_ADD_OVERFLOW(op1, op2, result, mask) \
+ ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))
+
+/* ******************* */
+/* OSZAPC */
+/* ******************* */
+
+/* size, carries, result */
+#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
+ target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
+ (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
+ env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \
+ if ((size) == 32) { \
+ temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
+ } else if ((size) == 16) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
+ } else if ((size) == 8) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
+ } else { \
+ VM_PANIC("unimplemented"); \
+ } \
+ env->hvf_lflags.auxbits = (target_ulong)(uint32_t)temp; \
+}
+
+/* carries, result */
+#define SET_FLAGS_OSZAPC_8(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(8, carries, result)
+#define SET_FLAGS_OSZAPC_16(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(16, carries, result)
+#define SET_FLAGS_OSZAPC_32(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(32, carries, result)
+
+/* ******************* */
+/* OSZAP */
+/* ******************* */
+/* size, carries, result */
+#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
+ target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
+ (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
+ if ((size) == 32) { \
+ temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
+ } else if ((size) == 16) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
+ } else if ((size) == 8) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
+ } else { \
+ VM_PANIC("unimplemented"); \
+ } \
+ env->hvf_lflags.result = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong delta_c = (env->hvf_lflags.auxbits ^ temp) & LF_MASK_CF; \
+ delta_c ^= (delta_c >> 1); \
+ env->hvf_lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
+}
+
+/* carries, result */
+#define SET_FLAGS_OSZAP_8(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(8, carries, result)
+#define SET_FLAGS_OSZAP_16(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(16, carries, result)
+#define SET_FLAGS_OSZAP_32(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(32, carries, result)
+
+void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
+{
+ uint32_t temp_po = new_of ^ new_cf;
+ env->hvf_lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
+ env->hvf_lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);
+}
+
+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+
+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(0, diff);
+}
+
+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(0, diff);
+}
+
+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(0, diff);
+}
+
+bool get_PF(CPUX86State *env)
+{
+ uint32_t temp = (255 & env->hvf_lflags.result);
+ temp = temp ^ (255 & (env->hvf_lflags.auxbits >> LF_BIT_PDB));
+ temp = (temp ^ (temp >> 4)) & 0x0F;
+ return (0x9669U >> temp) & 1;
+}
+
+void set_PF(CPUX86State *env, bool val)
+{
+ uint32_t temp = (255 & env->hvf_lflags.result) ^ (!val);
+ env->hvf_lflags.auxbits &= ~(LF_MASK_PDB);
+ env->hvf_lflags.auxbits |= (temp << LF_BIT_PDB);
+}
+
+bool get_OF(CPUX86State *env)
+{
+ return ((env->hvf_lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
+}
+
+bool get_CF(CPUX86State *env)
+{
+ return (env->hvf_lflags.auxbits >> LF_BIT_CF) & 1;
+}
+
+void set_OF(CPUX86State *env, bool val)
+{
+ bool old_cf = get_CF(env);
+ SET_FLAGS_OxxxxC(env, val, old_cf);
+}
+
+void set_CF(CPUX86State *env, bool val)
+{
+ bool old_of = get_OF(env);
+ SET_FLAGS_OxxxxC(env, old_of, val);
+}
+
+bool get_AF(CPUX86State *env)
+{
+ return (env->hvf_lflags.auxbits >> LF_BIT_AF) & 1;
+}
+
+void set_AF(CPUX86State *env, bool val)
+{
+ env->hvf_lflags.auxbits &= ~(LF_MASK_AF);
+ env->hvf_lflags.auxbits |= val << LF_BIT_AF;
+}
+
+bool get_ZF(CPUX86State *env)
+{
+ return !env->hvf_lflags.result;
+}
+
+void set_ZF(CPUX86State *env, bool val)
+{
+ if (val) {
+ env->hvf_lflags.auxbits ^=
+ (((env->hvf_lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
+ /* merge the parity bits into the Parity Delta Byte */
+ uint32_t temp_pdb = (255 & env->hvf_lflags.result);
+ env->hvf_lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
+ /* now zero the .result value */
+ env->hvf_lflags.result = 0;
+ } else {
+ env->hvf_lflags.result |= (1 << 8);
+ }
+}
+
+bool get_SF(CPUX86State *env)
+{
+ return ((env->hvf_lflags.result >> LF_SIGN_BIT) ^
+ (env->hvf_lflags.auxbits >> LF_BIT_SD)) & 1;
+}
+
+void set_SF(CPUX86State *env, bool val)
+{
+ bool temp_sf = get_SF(env);
+ env->hvf_lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
+}
+
+void lflags_to_rflags(CPUX86State *env)
+{
+ env->eflags |= get_CF(env) ? CC_C : 0;
+ env->eflags |= get_PF(env) ? CC_P : 0;
+ env->eflags |= get_AF(env) ? CC_A : 0;
+ env->eflags |= get_ZF(env) ? CC_Z : 0;
+ env->eflags |= get_SF(env) ? CC_S : 0;
+ env->eflags |= get_OF(env) ? CC_O : 0;
+}
+
+void rflags_to_lflags(CPUX86State *env)
+{
+ env->hvf_lflags.auxbits = env->hvf_lflags.result = 0;
+ set_OF(env, env->eflags & CC_O);
+ set_SF(env, env->eflags & CC_S);
+ set_ZF(env, env->eflags & CC_Z);
+ set_AF(env, env->eflags & CC_A);
+ set_PF(env, env->eflags & CC_P);
+ set_CF(env, env->eflags & CC_C);
+}
diff --git a/target/i386/hvf/x86_flags.h b/target/i386/hvf/x86_flags.h
new file mode 100644
index 000000000..75c2a7fea
--- /dev/null
+++ b/target/i386/hvf/x86_flags.h
@@ -0,0 +1,81 @@
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+// Copyright (C) 2017 Google Inc.
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+/////////////////////////////////////////////////////////////////////////
+/*
+ * x86 eflags functions
+ */
+
+#ifndef X86_FLAGS_H
+#define X86_FLAGS_H
+
+#include "cpu.h"
+void lflags_to_rflags(CPUX86State *env);
+void rflags_to_lflags(CPUX86State *env);
+
+bool get_PF(CPUX86State *env);
+void set_PF(CPUX86State *env, bool val);
+bool get_CF(CPUX86State *env);
+void set_CF(CPUX86State *env, bool val);
+bool get_AF(CPUX86State *env);
+void set_AF(CPUX86State *env, bool val);
+bool get_ZF(CPUX86State *env);
+void set_ZF(CPUX86State *env, bool val);
+bool get_SF(CPUX86State *env);
+void set_SF(CPUX86State *env, bool val);
+bool get_OF(CPUX86State *env);
+void set_OF(CPUX86State *env, bool val);
+
+void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
+
+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+#endif /* X86_FLAGS_H */
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
new file mode 100644
index 000000000..e9ed0f5aa
--- /dev/null
+++ b/target/i386/hvf/x86_mmu.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "panic.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "x86.h"
+#include "x86_mmu.h"
+#include "vmcs.h"
+#include "vmx.h"
+
+#define pte_present(pte) (pte & PT_PRESENT)
+#define pte_write_access(pte) (pte & PT_WRITE)
+#define pte_user_access(pte) (pte & PT_USER)
+#define pte_exec_access(pte) (!(pte & PT_NX))
+
+#define pte_large_page(pte) (pte & PT_PS)
+#define pte_global_access(pte) (pte & PT_GLOBAL)
+
+#define PAE_CR3_MASK (~0x1fllu)
+#define LEGACY_CR3_MASK (0xffffffff)
+
+#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
+#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
+#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
+
+struct gpt_translation {
+ target_ulong gva;
+ uint64_t gpa;
+ int err_code;
+ uint64_t pte[5];
+ bool write_access;
+ bool user_access;
+ bool exec_access;
+};
+
+static int gpt_top_level(struct CPUState *cpu, bool pae)
+{
+ if (!pae) {
+ return 2;
+ }
+ if (x86_is_long_mode(cpu)) {
+ return 4;
+ }
+
+ return 3;
+}
+
+static inline int gpt_entry(target_ulong addr, int level, bool pae)
+{
+ int level_shift = pae ? 9 : 10;
+ return (addr >> (level_shift * (level - 1) + 12)) & ((1 << level_shift) - 1);
+}
+
+static inline int pte_size(bool pae)
+{
+ return pae ? 8 : 4;
+}
+
+
+static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
+ int level, bool pae)
+{
+ int index;
+ uint64_t pte = 0;
+ uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
+ uint64_t gpa = pt->pte[level] & page_mask;
+
+ if (level == 3 && !x86_is_long_mode(cpu)) {
+ gpa = pt->pte[level];
+ }
+
+ index = gpt_entry(pt->gva, level, pae);
+ address_space_read(&address_space_memory, gpa + index * pte_size(pae),
+ MEMTXATTRS_UNSPECIFIED, &pte, pte_size(pae));
+
+ pt->pte[level - 1] = pte;
+
+ return true;
+}
+
+/* test page table entry */
+static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
+ int level, bool *is_large, bool pae)
+{
+ uint64_t pte = pt->pte[level];
+
+ if (pt->write_access) {
+ pt->err_code |= MMU_PAGE_WT;
+ }
+ if (pt->user_access) {
+ pt->err_code |= MMU_PAGE_US;
+ }
+ if (pt->exec_access) {
+ pt->err_code |= MMU_PAGE_NX;
+ }
+
+ if (!pte_present(pte)) {
+ return false;
+ }
+
+ if (pae && !x86_is_long_mode(cpu) && 2 == level) {
+ goto exit;
+ }
+
+ if (1 == level && pte_large_page(pte)) {
+ pt->err_code |= MMU_PAGE_PT;
+ *is_large = true;
+ }
+ if (!level) {
+ pt->err_code |= MMU_PAGE_PT;
+ }
+
+ uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
+ /* check protection */
+ if (cr0 & CR0_WP) {
+ if (pt->write_access && !pte_write_access(pte)) {
+ return false;
+ }
+ }
+
+ if (pt->user_access && !pte_user_access(pte)) {
+ return false;
+ }
+
+ if (pae && pt->exec_access && !pte_exec_access(pte)) {
+ return false;
+ }
+
+exit:
+ /* TODO: check reserved bits */
+ return true;
+}
+
+static inline uint64_t pse_pte_to_page(uint64_t pte)
+{
+ return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
+}
+
+static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
+{
+ VM_PANIC_ON(!pte_large_page(pt->pte[1]))
+ /* 2Mb large page */
+ if (pae) {
+ return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
+ }
+
+ /* 4Mb large page */
+ return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);
+}
+
+
+
+static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
+ struct gpt_translation *pt, bool pae)
+{
+ int top_level, level;
+ bool is_large = false;
+ target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3);
+ uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
+
+ memset(pt, 0, sizeof(*pt));
+ top_level = gpt_top_level(cpu, pae);
+
+ pt->pte[top_level] = pae ? (cr3 & PAE_CR3_MASK) : (cr3 & LEGACY_CR3_MASK);
+ pt->gva = addr;
+ pt->user_access = (err_code & MMU_PAGE_US);
+ pt->write_access = (err_code & MMU_PAGE_WT);
+ pt->exec_access = (err_code & MMU_PAGE_NX);
+
+ for (level = top_level; level > 0; level--) {
+ get_pt_entry(cpu, pt, level, pae);
+
+ if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
+ return false;
+ }
+
+ if (is_large) {
+ break;
+ }
+ }
+
+ if (!is_large) {
+ pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
+ } else {
+ pt->gpa = large_page_gpa(pt, pae);
+ }
+
+ return true;
+}
+
+
+bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa)
+{
+ bool res;
+ struct gpt_translation pt;
+ int err_code = 0;
+
+ if (!x86_is_paging_mode(cpu)) {
+ *gpa = gva;
+ return true;
+ }
+
+ res = walk_gpt(cpu, gva, err_code, &pt, x86_is_pae_enabled(cpu));
+ if (res) {
+ *gpa = pt.gpa;
+ return true;
+ }
+
+ return false;
+}
+
+void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes)
+{
+ uint64_t gpa;
+
+ while (bytes > 0) {
+ /* copy page */
+ int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
+
+ if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
+ VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
+ } else {
+ address_space_write(&address_space_memory, gpa,
+ MEMTXATTRS_UNSPECIFIED, data, copy);
+ }
+
+ bytes -= copy;
+ gva += copy;
+ data += copy;
+ }
+}
+
+void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes)
+{
+ uint64_t gpa;
+
+ while (bytes > 0) {
+ /* copy page */
+ int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
+
+ if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
+ VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
+ }
+ address_space_read(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
+ data, copy);
+
+ bytes -= copy;
+ gva += copy;
+ data += copy;
+ }
+}
diff --git a/target/i386/hvf/x86_mmu.h b/target/i386/hvf/x86_mmu.h
new file mode 100644
index 000000000..9ae8a548d
--- /dev/null
+++ b/target/i386/hvf/x86_mmu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef X86_MMU_H
+#define X86_MMU_H
+
+#define PT_PRESENT (1 << 0)
+#define PT_WRITE (1 << 1)
+#define PT_USER (1 << 2)
+#define PT_WT (1 << 3)
+#define PT_CD (1 << 4)
+#define PT_ACCESSED (1 << 5)
+#define PT_DIRTY (1 << 6)
+#define PT_PS (1 << 7)
+#define PT_GLOBAL (1 << 8)
+#define PT_NX (1llu << 63)
+
+/* error codes */
+#define MMU_PAGE_PT (1 << 0)
+#define MMU_PAGE_WT (1 << 1)
+#define MMU_PAGE_US (1 << 2)
+#define MMU_PAGE_NX (1 << 3)
+
+bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa);
+
+void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes);
+void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes);
+
+#endif /* X86_MMU_H */
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
new file mode 100644
index 000000000..422156128
--- /dev/null
+++ b/target/i386/hvf/x86_task.c
@@ -0,0 +1,185 @@
+// This software is licensed under the terms of the GNU General Public
+// License version 2, as published by the Free Software Foundation, and
+// may be copied, distributed, and modified under those terms.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+#include "qemu/osdep.h"
+#include "panic.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+
+#include "sysemu/hvf.h"
+#include "hvf-i386.h"
+#include "vmcs.h"
+#include "vmx.h"
+#include "x86.h"
+#include "x86_descr.h"
+#include "x86_mmu.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+#include "x86_task.h"
+#include "x86hvf.h"
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+
+#include "hw/i386/apic_internal.h"
+#include "qemu/main-loop.h"
+#include "qemu/accel.h"
+#include "target/i386/cpu.h"
+
+// TODO: taskswitch handling
+static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ /* CR3 and ldt selector are not saved intentionally */
+ tss->eip = (uint32_t)env->eip;
+ tss->eflags = (uint32_t)env->eflags;
+ tss->eax = EAX(env);
+ tss->ecx = ECX(env);
+ tss->edx = EDX(env);
+ tss->ebx = EBX(env);
+ tss->esp = ESP(env);
+ tss->ebp = EBP(env);
+ tss->esi = ESI(env);
+ tss->edi = EDI(env);
+
+ tss->es = vmx_read_segment_selector(cpu, R_ES).sel;
+ tss->cs = vmx_read_segment_selector(cpu, R_CS).sel;
+ tss->ss = vmx_read_segment_selector(cpu, R_SS).sel;
+ tss->ds = vmx_read_segment_selector(cpu, R_DS).sel;
+ tss->fs = vmx_read_segment_selector(cpu, R_FS).sel;
+ tss->gs = vmx_read_segment_selector(cpu, R_GS).sel;
+}
+
+static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3);
+
+ env->eip = tss->eip;
+ env->eflags = tss->eflags | 2;
+
+ /* General purpose registers */
+ RAX(env) = tss->eax;
+ RCX(env) = tss->ecx;
+ RDX(env) = tss->edx;
+ RBX(env) = tss->ebx;
+ RSP(env) = tss->esp;
+ RBP(env) = tss->ebp;
+ RSI(env) = tss->esi;
+ RDI(env) = tss->edi;
+
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
+}
+
+static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
+ uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
+{
+ struct x86_tss_segment32 tss_seg;
+ uint32_t new_tss_base = x86_segment_base(new_desc);
+ uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
+ uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
+
+ vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
+ save_state_to_tss32(cpu, &tss_seg);
+
+ vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
+ vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
+
+ if (old_tss_sel.sel != 0xffff) {
+ tss_seg.prev_tss = old_tss_sel.sel;
+
+ vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
+ }
+ load_state_from_tss32(cpu, &tss_seg);
+ return 0;
+}
+
+void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
+{
+ uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP);
+ if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
+ gate_type != VMCS_INTR_T_HWINTR &&
+ gate_type != VMCS_INTR_T_NMI)) {
+ int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH);
+ macvm_set_rip(cpu, rip + ins_len);
+ return;
+ }
+
+ load_regs(cpu);
+
+ struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
+ int ret;
+ x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
+ uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
+ uint32_t desc_limit;
+ struct x86_call_gate task_gate_desc;
+ struct vmx_segment vmx_seg;
+
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
+ x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
+
+ if (reason == TSR_IDT_GATE && gate_valid) {
+ int dpl;
+
+ ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
+
+ dpl = task_gate_desc.dpl;
+ x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
+ if (tss_sel.rpl > dpl || cs.rpl > dpl)
+ ;//DPRINTF("emulate_gp");
+ }
+
+ desc_limit = x86_segment_limit(&next_tss_desc);
+ if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
+ VM_PANIC("emulate_ts");
+ }
+
+ if (reason == TSR_IRET || reason == TSR_JMP) {
+ curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
+ x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
+ }
+
+ if (reason == TSR_IRET)
+ env->eflags &= ~NT_MASK;
+
+ if (reason != TSR_CALL && reason != TSR_IDT_GATE)
+ old_tss_sel.sel = 0xffff;
+
+ if (reason != TSR_IRET) {
+ next_tss_desc.type |= (1 << 1); /* set busy flag */
+ x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
+ }
+
+ if (next_tss_desc.type & 8)
+ ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
+ else
+ //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
+ VM_PANIC("task_switch_16");
+
+ macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
+ x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
+ vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
+
+ store_regs(cpu);
+
+ hv_vcpu_invalidate_tlb(cpu->hvf->fd);
+ hv_vcpu_flush(cpu->hvf->fd);
+}
diff --git a/target/i386/hvf/x86_task.h b/target/i386/hvf/x86_task.h
new file mode 100644
index 000000000..4eaa61a7d
--- /dev/null
+++ b/target/i386/hvf/x86_task.h
@@ -0,0 +1,20 @@
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_TASK_H
+#define HVF_X86_TASK_H
+
+void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
+ int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
+#endif
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
new file mode 100644
index 000000000..05ec1bddc
--- /dev/null
+++ b/target/i386/hvf/x86hvf.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "x86hvf.h"
+#include "vmx.h"
+#include "vmcs.h"
+#include "cpu.h"
+#include "x86_descr.h"
+#include "x86_decode.h"
+#include "sysemu/hw_accel.h"
+
+#include "hw/i386/apic_internal.h"
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+
+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
+ SegmentCache *qseg, bool is_tr)
+{
+ vmx_seg->sel = qseg->selector;
+ vmx_seg->base = qseg->base;
+ vmx_seg->limit = qseg->limit;
+
+ if (!qseg->selector && !x86_is_real(cpu) && !is_tr) {
+ /* the TR register is usable after processor reset despite
+ * having a null selector */
+ vmx_seg->ar = 1 << 16;
+ return;
+ }
+ vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf;
+ vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15;
+ vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14;
+ vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13;
+ vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12;
+ vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7;
+ vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5;
+ vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4;
+}
+
+void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg)
+{
+ qseg->limit = vmx_seg->limit;
+ qseg->base = vmx_seg->base;
+ qseg->selector = vmx_seg->sel;
+ qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) |
+ (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) |
+ (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) |
+ (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) |
+ (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) |
+ (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) |
+ (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) |
+ (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT);
+}
+
+void hvf_put_xsave(CPUState *cpu_state)
+{
+ void *xsave = X86_CPU(cpu_state)->env.xsave_buf;
+ uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len;
+
+ x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave, xsave_len);
+
+ if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) {
+ abort();
+ }
+}
+
+void hvf_put_segments(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+ struct vmx_segment seg;
+
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
+
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
+
+ /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]);
+ vmx_update_tpr(cpu_state);
+ wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer);
+
+ macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]);
+ macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_CS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_DS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_ES);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_SS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_FS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_GS);
+
+ hvf_set_segment(cpu_state, &seg, &env->tr, true);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_TR);
+
+ hvf_set_segment(cpu_state, &seg, &env->ldt, false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
+
+ hv_vcpu_flush(cpu_state->hvf->fd);
+}
+
+void hvf_put_msrs(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS,
+ env->sysenter_cs);
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP,
+ env->sysenter_esp);
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP,
+ env->sysenter_eip);
+
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star);
+
+#ifdef TARGET_X86_64
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar);
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase);
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask);
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar);
+#endif
+
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base);
+ hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base);
+}
+
+
+void hvf_get_xsave(CPUState *cpu_state)
+{
+ void *xsave = X86_CPU(cpu_state)->env.xsave_buf;
+ uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len;
+
+ if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) {
+ abort();
+ }
+
+ x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len);
+}
+
+void hvf_get_segments(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+
+ struct vmx_segment seg;
+
+ env->interrupt_injected = -1;
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_CS);
+ hvf_get_segment(&env->segs[R_CS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_DS);
+ hvf_get_segment(&env->segs[R_DS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_ES);
+ hvf_get_segment(&env->segs[R_ES], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_FS);
+ hvf_get_segment(&env->segs[R_FS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_GS);
+ hvf_get_segment(&env->segs[R_GS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_SS);
+ hvf_get_segment(&env->segs[R_SS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_TR);
+ hvf_get_segment(&env->tr, &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);
+ hvf_get_segment(&env->ldt, &seg);
+
+ env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
+ env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE);
+ env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
+ env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE);
+
+ env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0);
+ env->cr[2] = 0;
+ env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3);
+ env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4);
+
+ env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER);
+}
+
+void hvf_get_msrs(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+ uint64_t tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp);
+ env->sysenter_cs = tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp);
+ env->sysenter_esp = tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp);
+ env->sysenter_eip = tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star);
+
+#ifdef TARGET_X86_64
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar);
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase);
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask);
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar);
+#endif
+
+ hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp);
+
+ env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET);
+}
+
+int hvf_put_registers(CPUState *cpu_state)
+{
+ X86CPU *x86cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &x86cpu->env;
+
+ wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]);
+ wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]);
+ wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]);
+ wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]);
+ wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]);
+ wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]);
+ wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]);
+ wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]);
+ wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]);
+ wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]);
+ wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]);
+ wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]);
+ wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]);
+ wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]);
+ wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]);
+ wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]);
+ wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags);
+ wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip);
+
+ wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0);
+
+ hvf_put_xsave(cpu_state);
+
+ hvf_put_segments(cpu_state);
+
+ hvf_put_msrs(cpu_state);
+
+ wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]);
+ wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]);
+
+ return 0;
+}
+
+int hvf_get_registers(CPUState *cpu_state)
+{
+ X86CPU *x86cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &x86cpu->env;
+
+ env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX);
+ env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX);
+ env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX);
+ env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX);
+ env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP);
+ env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP);
+ env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI);
+ env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI);
+ env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8);
+ env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9);
+ env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10);
+ env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11);
+ env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12);
+ env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13);
+ env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14);
+ env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15);
+
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
+ env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP);
+
+ hvf_get_xsave(cpu_state);
+ env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0);
+
+ hvf_get_segments(cpu_state);
+ hvf_get_msrs(cpu_state);
+
+ env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0);
+ env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1);
+ env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2);
+ env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3);
+ env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4);
+ env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5);
+ env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6);
+ env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7);
+
+ x86_update_hflags(env);
+ return 0;
+}
+
+static void vmx_set_int_window_exiting(CPUState *cpu)
+{
+ uint64_t val;
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
+ VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
+}
+
+void vmx_clear_int_window_exiting(CPUState *cpu)
+{
+ uint64_t val;
+ val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
+ ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
+}
+
+bool hvf_inject_interrupts(CPUState *cpu_state)
+{
+ X86CPU *x86cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &x86cpu->env;
+
+ uint8_t vector;
+ uint64_t intr_type;
+ bool have_event = true;
+ if (env->interrupt_injected != -1) {
+ vector = env->interrupt_injected;
+ if (env->ins_len) {
+ intr_type = VMCS_INTR_T_SWINTR;
+ } else {
+ intr_type = VMCS_INTR_T_HWINTR;
+ }
+ } else if (env->exception_nr != -1) {
+ vector = env->exception_nr;
+ if (vector == EXCP03_INT3 || vector == EXCP04_INTO) {
+ intr_type = VMCS_INTR_T_SWEXCEPTION;
+ } else {
+ intr_type = VMCS_INTR_T_HWEXCEPTION;
+ }
+ } else if (env->nmi_injected) {
+ vector = EXCP02_NMI;
+ intr_type = VMCS_INTR_T_NMI;
+ } else {
+ have_event = false;
+ }
+
+ uint64_t info = 0;
+ if (have_event) {
+ info = vector | intr_type | VMCS_INTR_VALID;
+ uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON);
+ if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
+ vmx_clear_nmi_blocking(cpu_state);
+ }
+
+ if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) {
+ info &= ~(1 << 12); /* clear undefined bit */
+ if (intr_type == VMCS_INTR_T_SWINTR ||
+ intr_type == VMCS_INTR_T_SWEXCEPTION) {
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
+ }
+
+ if (env->has_error_code) {
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR,
+ env->error_code);
+ /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */
+ info |= VMCS_INTR_DEL_ERRCODE;
+ }
+ /*printf("reinject %lx err %d\n", info, err);*/
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
+ };
+ }
+
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);
+ } else {
+ vmx_set_nmi_window_exiting(cpu_state);
+ }
+ }
+
+ if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
+ (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) {
+ int line = cpu_get_pic_interrupt(&x86cpu->env);
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ if (line >= 0) {
+ wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line |
+ VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
+ }
+ }
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
+ vmx_set_int_window_exiting(cpu_state);
+ }
+ return (cpu_state->interrupt_request
+ & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
+}
+
+int hvf_process_events(CPUState *cpu_state)
+{
+ X86CPU *cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &cpu->env;
+
+ if (!cpu_state->vcpu_dirty) {
+ /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */
+ env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);
+ }
+
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
+ cpu_synchronize_state(cpu_state);
+ do_cpu_init(cpu);
+ }
+
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ }
+ if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_state->halted = 0;
+ }
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
+ cpu_synchronize_state(cpu_state);
+ do_cpu_sipi(cpu);
+ }
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_synchronize_state(cpu_state);
+ apic_handle_tpr_access_report(cpu->apic_state, env->eip,
+ env->tpr_access_type);
+ }
+ return cpu_state->halted;
+}
diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h
new file mode 100644
index 000000000..99ed8d608
--- /dev/null
+++ b/target/i386/hvf/x86hvf.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef X86HVF_H
+#define X86HVF_H
+#include "cpu.h"
+#include "x86_descr.h"
+
+int hvf_process_events(CPUState *);
+bool hvf_inject_interrupts(CPUState *);
+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
+ SegmentCache *qseg, bool is_tr);
+void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);
+void hvf_put_xsave(CPUState *cpu_state);
+void hvf_put_segments(CPUState *cpu_state);
+void hvf_put_msrs(CPUState *cpu_state);
+void hvf_get_xsave(CPUState *cpu_state);
+void hvf_get_msrs(CPUState *cpu_state);
+void vmx_clear_int_window_exiting(CPUState *cpu);
+void hvf_get_segments(CPUState *cpu_state);
+void vmx_update_tpr(CPUState *cpu);
+#endif
diff --git a/target/i386/kvm/hyperv-proto.h b/target/i386/kvm/hyperv-proto.h
new file mode 100644
index 000000000..89f81afda
--- /dev/null
+++ b/target/i386/kvm/hyperv-proto.h
@@ -0,0 +1,172 @@
+/*
+ * Definitions for Hyper-V guest/hypervisor interaction - x86-specific part
+ *
+ * Copyright (c) 2017-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TARGET_I386_HYPERV_PROTO_H
+#define TARGET_I386_HYPERV_PROTO_H
+
+#include "hw/hyperv/hyperv-proto.h"
+
+#define HV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
+#define HV_CPUID_INTERFACE 0x40000001
+#define HV_CPUID_VERSION 0x40000002
+#define HV_CPUID_FEATURES 0x40000003
+#define HV_CPUID_ENLIGHTMENT_INFO 0x40000004
+#define HV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HV_CPUID_NESTED_FEATURES 0x4000000A
+#define HV_CPUID_MIN 0x40000005
+#define HV_CPUID_MAX 0x4000ffff
+#define HV_HYPERVISOR_PRESENT_BIT 0x80000000
+
+/*
+ * HV_CPUID_FEATURES.EAX bits
+ */
+#define HV_VP_RUNTIME_AVAILABLE (1u << 0)
+#define HV_TIME_REF_COUNT_AVAILABLE (1u << 1)
+#define HV_SYNIC_AVAILABLE (1u << 2)
+#define HV_SYNTIMERS_AVAILABLE (1u << 3)
+#define HV_APIC_ACCESS_AVAILABLE (1u << 4)
+#define HV_HYPERCALL_AVAILABLE (1u << 5)
+#define HV_VP_INDEX_AVAILABLE (1u << 6)
+#define HV_RESET_AVAILABLE (1u << 7)
+#define HV_REFERENCE_TSC_AVAILABLE (1u << 9)
+#define HV_ACCESS_FREQUENCY_MSRS (1u << 11)
+#define HV_ACCESS_REENLIGHTENMENTS_CONTROL (1u << 13)
+
+/*
+ * HV_CPUID_FEATURES.EBX bits
+ */
+#define HV_POST_MESSAGES (1u << 4)
+#define HV_SIGNAL_EVENTS (1u << 5)
+
+/*
+ * HV_CPUID_FEATURES.EDX bits
+ */
+#define HV_MWAIT_AVAILABLE (1u << 0)
+#define HV_GUEST_DEBUGGING_AVAILABLE (1u << 1)
+#define HV_PERF_MONITOR_AVAILABLE (1u << 2)
+#define HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1u << 3)
+#define HV_HYPERCALL_PARAMS_XMM_AVAILABLE (1u << 4)
+#define HV_GUEST_IDLE_STATE_AVAILABLE (1u << 5)
+#define HV_FREQUENCY_MSRS_AVAILABLE (1u << 8)
+#define HV_GUEST_CRASH_MSR_AVAILABLE (1u << 10)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE (1u << 19)
+
+/*
+ * HV_CPUID_ENLIGHTMENT_INFO.EAX bits
+ */
+#define HV_AS_SWITCH_RECOMMENDED (1u << 0)
+#define HV_LOCAL_TLB_FLUSH_RECOMMENDED (1u << 1)
+#define HV_REMOTE_TLB_FLUSH_RECOMMENDED (1u << 2)
+#define HV_APIC_ACCESS_RECOMMENDED (1u << 3)
+#define HV_SYSTEM_RESET_RECOMMENDED (1u << 4)
+#define HV_RELAXED_TIMING_RECOMMENDED (1u << 5)
+#define HV_DEPRECATING_AEOI_RECOMMENDED (1u << 9)
+#define HV_CLUSTER_IPI_RECOMMENDED (1u << 10)
+#define HV_EX_PROCESSOR_MASKS_RECOMMENDED (1u << 11)
+#define HV_ENLIGHTENED_VMCS_RECOMMENDED (1u << 14)
+#define HV_NO_NONARCH_CORESHARING (1u << 18)
+
+/*
+ * Basic virtualized MSRs
+ */
+#define HV_X64_MSR_GUEST_OS_ID 0x40000000
+#define HV_X64_MSR_HYPERCALL 0x40000001
+#define HV_X64_MSR_VP_INDEX 0x40000002
+#define HV_X64_MSR_RESET 0x40000003
+#define HV_X64_MSR_VP_RUNTIME 0x40000010
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
+#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
+
+/*
+ * Virtual APIC MSRs
+ */
+#define HV_X64_MSR_EOI 0x40000070
+#define HV_X64_MSR_ICR 0x40000071
+#define HV_X64_MSR_TPR 0x40000072
+#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073
+
+/*
+ * Synthetic interrupt controller MSRs
+ */
+#define HV_X64_MSR_SCONTROL 0x40000080
+#define HV_X64_MSR_SVERSION 0x40000081
+#define HV_X64_MSR_SIEFP 0x40000082
+#define HV_X64_MSR_SIMP 0x40000083
+#define HV_X64_MSR_EOM 0x40000084
+#define HV_X64_MSR_SINT0 0x40000090
+#define HV_X64_MSR_SINT1 0x40000091
+#define HV_X64_MSR_SINT2 0x40000092
+#define HV_X64_MSR_SINT3 0x40000093
+#define HV_X64_MSR_SINT4 0x40000094
+#define HV_X64_MSR_SINT5 0x40000095
+#define HV_X64_MSR_SINT6 0x40000096
+#define HV_X64_MSR_SINT7 0x40000097
+#define HV_X64_MSR_SINT8 0x40000098
+#define HV_X64_MSR_SINT9 0x40000099
+#define HV_X64_MSR_SINT10 0x4000009A
+#define HV_X64_MSR_SINT11 0x4000009B
+#define HV_X64_MSR_SINT12 0x4000009C
+#define HV_X64_MSR_SINT13 0x4000009D
+#define HV_X64_MSR_SINT14 0x4000009E
+#define HV_X64_MSR_SINT15 0x4000009F
+
+/*
+ * Synthetic timer MSRs
+ */
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+
+/*
+ * Guest crash notification MSRs
+ */
+#define HV_X64_MSR_CRASH_P0 0x40000100
+#define HV_X64_MSR_CRASH_P1 0x40000101
+#define HV_X64_MSR_CRASH_P2 0x40000102
+#define HV_X64_MSR_CRASH_P3 0x40000103
+#define HV_X64_MSR_CRASH_P4 0x40000104
+#define HV_CRASH_PARAMS (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 + 1)
+#define HV_X64_MSR_CRASH_CTL 0x40000105
+#define HV_CRASH_CTL_NOTIFY (1ull << 63)
+
+/*
+ * Reenlightenment notification MSRs
+ */
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+#define HV_REENLIGHTENMENT_ENABLE_BIT (1u << 16)
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+
+/*
+ * Hypercall MSR bits
+ */
+#define HV_HYPERCALL_ENABLE (1u << 0)
+
+/*
+ * Synthetic interrupt controller definitions
+ */
+#define HV_SYNIC_VERSION 1
+#define HV_SYNIC_ENABLE (1u << 0)
+#define HV_SIMP_ENABLE (1u << 0)
+#define HV_SIEFP_ENABLE (1u << 0)
+#define HV_SINT_MASKED (1u << 16)
+#define HV_SINT_AUTO_EOI (1u << 17)
+#define HV_SINT_VECTOR_MASK 0xff
+
+#define HV_STIMER_COUNT 4
+
+
+#endif
diff --git a/target/i386/kvm/hyperv-stub.c b/target/i386/kvm/hyperv-stub.c
new file mode 100644
index 000000000..0028527e7
--- /dev/null
+++ b/target/i386/kvm/hyperv-stub.c
@@ -0,0 +1,48 @@
+/*
+ * Stubs for CONFIG_HYPERV=n
+ *
+ * Copyright (c) 2015-2018 Virtuozzo International GmbH.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hyperv.h"
+
+#ifdef CONFIG_KVM
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
+{
+ switch (exit->type) {
+ case KVM_EXIT_HYPERV_SYNIC:
+ if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
+ return -1;
+ }
+
+ /*
+ * Tracking the changes in the MSRs is unnecessary as there are no
+ * users for them beside save/load, which is handled nicely by the
+ * generic MSR save/load code
+ */
+ return 0;
+ case KVM_EXIT_HYPERV_HCALL:
+ exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
+ return 0;
+ default:
+ return -1;
+ }
+}
+#endif
+
+int hyperv_x86_synic_add(X86CPU *cpu)
+{
+ return -ENOSYS;
+}
+
+void hyperv_x86_synic_reset(X86CPU *cpu)
+{
+}
+
+void hyperv_x86_synic_update(X86CPU *cpu)
+{
+}
diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c
new file mode 100644
index 000000000..26efc1e0e
--- /dev/null
+++ b/target/i386/kvm/hyperv.c
@@ -0,0 +1,101 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "hyperv.h"
+#include "hw/hyperv/hyperv.h"
+#include "hyperv-proto.h"
+
+int hyperv_x86_synic_add(X86CPU *cpu)
+{
+ hyperv_synic_add(CPU(cpu));
+ return 0;
+}
+
+void hyperv_x86_synic_reset(X86CPU *cpu)
+{
+ hyperv_synic_reset(CPU(cpu));
+}
+
+void hyperv_x86_synic_update(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ bool enable = env->msr_hv_synic_control & HV_SYNIC_ENABLE;
+ hwaddr msg_page_addr = (env->msr_hv_synic_msg_page & HV_SIMP_ENABLE) ?
+ (env->msr_hv_synic_msg_page & TARGET_PAGE_MASK) : 0;
+ hwaddr event_page_addr = (env->msr_hv_synic_evt_page & HV_SIEFP_ENABLE) ?
+ (env->msr_hv_synic_evt_page & TARGET_PAGE_MASK) : 0;
+ hyperv_synic_update(CPU(cpu), enable, msg_page_addr, event_page_addr);
+}
+
+static void async_synic_update(CPUState *cs, run_on_cpu_data data)
+{
+ qemu_mutex_lock_iothread();
+ hyperv_x86_synic_update(X86_CPU(cs));
+ qemu_mutex_unlock_iothread();
+}
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
+{
+ CPUX86State *env = &cpu->env;
+
+ switch (exit->type) {
+ case KVM_EXIT_HYPERV_SYNIC:
+ if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
+ return -1;
+ }
+
+ switch (exit->u.synic.msr) {
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = exit->u.synic.control;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = exit->u.synic.msg_page;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = exit->u.synic.evt_page;
+ break;
+ default:
+ return -1;
+ }
+
+ /*
+ * this will run in this cpu thread before it returns to KVM, but in a
+ * safe environment (i.e. when all cpus are quiescent) -- this is
+ * necessary because memory hierarchy is being changed
+ */
+ async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL);
+
+ return 0;
+ case KVM_EXIT_HYPERV_HCALL: {
+ uint16_t code = exit->u.hcall.input & 0xffff;
+ bool fast = exit->u.hcall.input & HV_HYPERCALL_FAST;
+ uint64_t param = exit->u.hcall.params[0];
+
+ switch (code) {
+ case HV_POST_MESSAGE:
+ exit->u.hcall.result = hyperv_hcall_post_message(param, fast);
+ break;
+ case HV_SIGNAL_EVENT:
+ exit->u.hcall.result = hyperv_hcall_signal_event(param, fast);
+ break;
+ default:
+ exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
+ }
+ return 0;
+ }
+ default:
+ return -1;
+ }
+}
diff --git a/target/i386/kvm/hyperv.h b/target/i386/kvm/hyperv.h
new file mode 100644
index 000000000..67543296c
--- /dev/null
+++ b/target/i386/kvm/hyperv.h
@@ -0,0 +1,29 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HYPERV_H
+#define TARGET_I386_HYPERV_H
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "hw/hyperv/hyperv.h"
+
+#ifdef CONFIG_KVM
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit);
+#endif
+
+int hyperv_x86_synic_add(X86CPU *cpu);
+void hyperv_x86_synic_reset(X86CPU *cpu);
+void hyperv_x86_synic_update(X86CPU *cpu);
+
+#endif
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
new file mode 100644
index 000000000..d95028018
--- /dev/null
+++ b/target/i386/kvm/kvm-cpu.c
@@ -0,0 +1,201 @@
+/*
+ * x86 KVM CPU type initialization
+ *
+ * Copyright 2021 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "host-cpu.h"
+#include "kvm-cpu.h"
+#include "qapi/error.h"
+#include "sysemu/sysemu.h"
+#include "hw/boards.h"
+
+#include "kvm_i386.h"
+#include "hw/core/accel-cpu.h"
+
+static bool kvm_cpu_realizefn(CPUState *cs, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /*
+ * The realize order is important, since x86_cpu_realize() checks if
+ * nothing else has been set by the user (or by accelerators) in
+ * cpu->ucode_rev and cpu->phys_bits, and updates the CPUID results in
+ * mwait.ecx.
+ * This accel realization code also assumes cpu features are already expanded.
+ *
+ * realize order:
+ *
+ * x86_cpu_realize():
+ * -> x86_cpu_expand_features()
+ * -> cpu_exec_realizefn():
+ * -> accel_cpu_realizefn()
+ * kvm_cpu_realizefn() -> host_cpu_realizefn()
+ * -> check/update ucode_rev, phys_bits, mwait
+ */
+ if (cpu->max_features) {
+ if (enable_cpu_pm && kvm_has_waitpkg()) {
+ env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG;
+ }
+ if (cpu->ucode_rev == 0) {
+ cpu->ucode_rev =
+ kvm_arch_get_supported_msr_feature(kvm_state,
+ MSR_IA32_UCODE_REV);
+ }
+ }
+ return host_cpu_realizefn(cs, errp);
+}
+
+static bool lmce_supported(void)
+{
+ uint64_t mce_cap = 0;
+
+ if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
+ return false;
+ }
+ return !!(mce_cap & MCG_LMCE_P);
+}
+
+static void kvm_cpu_max_instance_init(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ KVMState *s = kvm_state;
+
+ host_cpu_max_instance_init(cpu);
+
+ if (lmce_supported()) {
+ object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort);
+ }
+
+ env->cpuid_min_level =
+ kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+}
+
+static void kvm_cpu_xsave_init(void)
+{
+ static bool first = true;
+ KVMState *s = kvm_state;
+ int i;
+
+ if (!first) {
+ return;
+ }
+ first = false;
+
+ /* x87 and SSE states are in the legacy region of the XSAVE area. */
+ x86_ext_save_areas[XSTATE_FP_BIT].offset = 0;
+ x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0;
+
+ for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
+ ExtSaveArea *esa = &x86_ext_save_areas[i];
+
+ if (esa->size) {
+ int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX);
+ if (sz != 0) {
+ assert(esa->size == sz);
+ esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX);
+ }
+ }
+ }
+}
+
+/*
+ * KVM-specific features that are automatically added/removed
+ * from cpudef models when KVM is enabled.
+ * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
+ *
+ * NOTE: features can be enabled by default only if they were
+ * already available in the oldest kernel version supported
+ * by the KVM accelerator (see "OS requirements" section at
+ * docs/system/target-i386.rst)
+ */
+static PropValue kvm_default_props[] = {
+ { "kvmclock", "on" },
+ { "kvm-nopiodelay", "on" },
+ { "kvm-asyncpf", "on" },
+ { "kvm-steal-time", "on" },
+ { "kvm-pv-eoi", "on" },
+ { "kvmclock-stable-bit", "on" },
+ { "x2apic", "on" },
+ { "kvm-msi-ext-dest-id", "off" },
+ { "acpi", "off" },
+ { "monitor", "off" },
+ { "svm", "off" },
+ { NULL, NULL },
+};
+
+/*
+ * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
+ */
+void x86_cpu_change_kvm_default(const char *prop, const char *value)
+{
+ PropValue *pv;
+ for (pv = kvm_default_props; pv->prop; pv++) {
+ if (!strcmp(pv->prop, prop)) {
+ pv->value = value;
+ break;
+ }
+ }
+
+ /*
+ * It is valid to call this function only for properties that
+ * are already present in the kvm_default_props table.
+ */
+ assert(pv->prop);
+}
+
+static void kvm_cpu_instance_init(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+
+ host_cpu_instance_init(cpu);
+
+ if (xcc->model) {
+ /* only applies to builtin_x86_defs cpus */
+ if (!kvm_irqchip_in_kernel()) {
+ x86_cpu_change_kvm_default("x2apic", "off");
+ } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) {
+ x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on");
+ }
+
+ /* Special cases not set in the X86CPUDefinition structs: */
+ x86_cpu_apply_props(cpu, kvm_default_props);
+ }
+
+ if (cpu->max_features) {
+ kvm_cpu_max_instance_init(cpu);
+ }
+
+ kvm_cpu_xsave_init();
+}
+
+static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
+
+ acc->cpu_realizefn = kvm_cpu_realizefn;
+ acc->cpu_instance_init = kvm_cpu_instance_init;
+}
+static const TypeInfo kvm_cpu_accel_type_info = {
+ .name = ACCEL_CPU_NAME("kvm"),
+
+ .parent = TYPE_ACCEL_CPU,
+ .class_init = kvm_cpu_accel_class_init,
+ .abstract = true,
+};
+static void kvm_cpu_accel_register_types(void)
+{
+ type_register_static(&kvm_cpu_accel_type_info);
+}
+type_init(kvm_cpu_accel_register_types);
diff --git a/target/i386/kvm/kvm-cpu.h b/target/i386/kvm/kvm-cpu.h
new file mode 100644
index 000000000..e858ca21e
--- /dev/null
+++ b/target/i386/kvm/kvm-cpu.h
@@ -0,0 +1,41 @@
+/*
+ * i386 KVM CPU type and functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef KVM_CPU_H
+#define KVM_CPU_H
+
+#ifdef CONFIG_KVM
+/*
+ * Change the value of a KVM-specific default
+ *
+ * If value is NULL, no default will be set and the original
+ * value from the CPU model table will be kept.
+ *
+ * It is valid to call this function only for properties that
+ * are already present in the kvm_default_props table.
+ */
+void x86_cpu_change_kvm_default(const char *prop, const char *value);
+
+#else /* !CONFIG_KVM */
+
+#define x86_cpu_change_kvm_default(a, b)
+
+#endif /* CONFIG_KVM */
+
+#endif /* KVM_CPU_H */
diff --git a/target/i386/kvm/kvm-stub.c b/target/i386/kvm/kvm-stub.c
new file mode 100644
index 000000000..f6e7e4466
--- /dev/null
+++ b/target/i386/kvm/kvm-stub.c
@@ -0,0 +1,46 @@
+/*
+ * QEMU KVM x86 specific function stubs
+ *
+ * Copyright Linaro Limited 2012
+ *
+ * Author: Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "kvm_i386.h"
+
+#ifndef __OPTIMIZE__
+bool kvm_has_smm(void)
+{
+ return 1;
+}
+
+bool kvm_enable_x2apic(void)
+{
+ return false;
+}
+
+/* This function is only called inside conditionals which we
+ * rely on the compiler to optimize out when CONFIG_KVM is not
+ * defined.
+ */
+uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
+ uint32_t index, int reg)
+{
+ abort();
+}
+#endif
+
+bool kvm_hv_vpindex_settable(void)
+{
+ return false;
+}
+
+bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
+{
+ abort();
+}
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
new file mode 100644
index 000000000..5a698bde1
--- /dev/null
+++ b/target/i386/kvm/kvm.c
@@ -0,0 +1,5052 @@
+/*
+ * QEMU KVM support
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qapi-events-run-state.h"
+#include "qapi/error.h"
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+
+#include <linux/kvm.h>
+#include "standard-headers/asm-x86/kvm_para.h"
+
+#include "cpu.h"
+#include "host-cpu.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/kvm_int.h"
+#include "sysemu/runstate.h"
+#include "kvm_i386.h"
+#include "sev.h"
+#include "hyperv.h"
+#include "hyperv-proto.h"
+
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+#include "hw/i386/x86.h"
+#include "hw/i386/apic.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/i386/apic-msidef.h"
+#include "hw/i386/intel_iommu.h"
+#include "hw/i386/x86-iommu.h"
+#include "hw/i386/e820_memory_layout.h"
+
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "migration/blocker.h"
+#include "exec/memattrs.h"
+#include "trace.h"
+
+//#define DEBUG_KVM
+
+#ifdef DEBUG_KVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+/* From arch/x86/kvm/lapic.h */
+#define KVM_APIC_BUS_CYCLE_NS 1
+#define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
+ * 255 kvm_msr_entry structs */
+#define MSR_BUF_SIZE 4096
+
+static void kvm_init_msrs(X86CPU *cpu);
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_INFO(SET_TSS_ADDR),
+ KVM_CAP_INFO(EXT_CPUID),
+ KVM_CAP_INFO(MP_STATE),
+ KVM_CAP_LAST_INFO
+};
+
+static bool has_msr_star;
+static bool has_msr_hsave_pa;
+static bool has_msr_tsc_aux;
+static bool has_msr_tsc_adjust;
+static bool has_msr_tsc_deadline;
+static bool has_msr_feature_control;
+static bool has_msr_misc_enable;
+static bool has_msr_smbase;
+static bool has_msr_bndcfgs;
+static int lm_capable_kernel;
+static bool has_msr_hv_hypercall;
+static bool has_msr_hv_crash;
+static bool has_msr_hv_reset;
+static bool has_msr_hv_vpindex;
+static bool hv_vpindex_settable;
+static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
+static bool has_msr_hv_stimer;
+static bool has_msr_hv_frequencies;
+static bool has_msr_hv_reenlightenment;
+static bool has_msr_xss;
+static bool has_msr_umwait;
+static bool has_msr_spec_ctrl;
+static bool has_tsc_scale_msr;
+static bool has_msr_tsx_ctrl;
+static bool has_msr_virt_ssbd;
+static bool has_msr_smi_count;
+static bool has_msr_arch_capabs;
+static bool has_msr_core_capabs;
+static bool has_msr_vmx_vmfunc;
+static bool has_msr_ucode_rev;
+static bool has_msr_vmx_procbased_ctls2;
+static bool has_msr_perf_capabs;
+static bool has_msr_pkrs;
+
+static uint32_t has_architectural_pmu_version;
+static uint32_t num_architectural_pmu_gp_counters;
+static uint32_t num_architectural_pmu_fixed_counters;
+
+static int has_xsave;
+static int has_xcrs;
+static int has_pit_state2;
+static int has_exception_payload;
+
+static bool has_msr_mcg_ext_ctl;
+
+static struct kvm_cpuid2 *cpuid_cache;
+static struct kvm_cpuid2 *hv_cpuid_cache;
+static struct kvm_msr_list *kvm_feature_msrs;
+
+#define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
+static RateLimit bus_lock_ratelimit_ctrl;
+
+int kvm_has_pit_state2(void)
+{
+ return has_pit_state2;
+}
+
+bool kvm_has_smm(void)
+{
+ return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
+}
+
+bool kvm_has_adjust_clock_stable(void)
+{
+ int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
+
+ return (ret == KVM_CLOCK_TSC_STABLE);
+}
+
+bool kvm_has_adjust_clock(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
+}
+
+bool kvm_has_exception_payload(void)
+{
+ return has_exception_payload;
+}
+
+static bool kvm_x2apic_api_set_flags(uint64_t flags)
+{
+ KVMState *s = KVM_STATE(current_accel());
+
+ return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
+}
+
+#define MEMORIZE(fn, _result) \
+ ({ \
+ static bool _memorized; \
+ \
+ if (_memorized) { \
+ return _result; \
+ } \
+ _memorized = true; \
+ _result = fn; \
+ })
+
+static bool has_x2apic_api;
+
+bool kvm_has_x2apic_api(void)
+{
+ return has_x2apic_api;
+}
+
+bool kvm_enable_x2apic(void)
+{
+ return MEMORIZE(
+ kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
+ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
+ has_x2apic_api);
+}
+
+bool kvm_hv_vpindex_settable(void)
+{
+ return hv_vpindex_settable;
+}
+
+static int kvm_get_tsc(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data = {};
+ int ret;
+
+ if (env->tsc_valid) {
+ return 0;
+ }
+
+ memset(&msr_data, 0, sizeof(msr_data));
+ msr_data.info.nmsrs = 1;
+ msr_data.entries[0].index = MSR_IA32_TSC;
+ env->tsc_valid = !runstate_is_running();
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ env->tsc = msr_data.entries[0].data;
+ return 0;
+}
+
+static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
+{
+ kvm_get_tsc(cpu);
+}
+
+void kvm_synchronize_all_tsc(void)
+{
+ CPUState *cpu;
+
+ if (kvm_enabled()) {
+ CPU_FOREACH(cpu) {
+ run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
+ }
+ }
+}
+
+static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r, size;
+
+ size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+ cpuid = g_malloc0(size);
+ cpuid->nent = max;
+ r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
+ if (r == 0 && cpuid->nent >= max) {
+ r = -E2BIG;
+ }
+ if (r < 0) {
+ if (r == -E2BIG) {
+ g_free(cpuid);
+ return NULL;
+ } else {
+ fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
+ strerror(-r));
+ exit(1);
+ }
+ }
+ return cpuid;
+}
+
+/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
+{
+ struct kvm_cpuid2 *cpuid;
+ int max = 1;
+
+ if (cpuid_cache != NULL) {
+ return cpuid_cache;
+ }
+ while ((cpuid = try_get_cpuid(s, max)) == NULL) {
+ max *= 2;
+ }
+ cpuid_cache = cpuid;
+ return cpuid;
+}
+
+static bool host_tsx_broken(void)
+{
+ int family, model, stepping;\
+ char vendor[CPUID_VENDOR_SZ + 1];
+
+ host_cpu_vendor_fms(vendor, &family, &model, &stepping);
+
+ /* Check if we are running on a Haswell host known to have broken TSX */
+ return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
+ (family == 6) &&
+ ((model == 63 && stepping < 4) ||
+ model == 60 || model == 69 || model == 70);
+}
+
+/* Returns the value for a specific register on the cpuid entry
+ */
+static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+{
+ uint32_t ret = 0;
+ switch (reg) {
+ case R_EAX:
+ ret = entry->eax;
+ break;
+ case R_EBX:
+ ret = entry->ebx;
+ break;
+ case R_ECX:
+ ret = entry->ecx;
+ break;
+ case R_EDX:
+ ret = entry->edx;
+ break;
+ }
+ return ret;
+}
+
+/* Find matching entry for function/index on kvm_cpuid2 struct
+ */
+static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
+{
+ int i;
+ for (i = 0; i < cpuid->nent; ++i) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ return &cpuid->entries[i];
+ }
+ }
+ /* not found: */
+ return NULL;
+}
+
+uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
+ uint32_t index, int reg)
+{
+ struct kvm_cpuid2 *cpuid;
+ uint32_t ret = 0;
+ uint32_t cpuid_1_edx;
+
+ cpuid = get_supported_cpuid(s);
+
+ struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
+ if (entry) {
+ ret = cpuid_entry_get_reg(entry, reg);
+ }
+
+ /* Fixups for the data returned by KVM, below */
+
+ if (function == 1 && reg == R_EDX) {
+ /* KVM before 2.6.30 misreports the following features */
+ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
+ } else if (function == 1 && reg == R_ECX) {
+ /* We can set the hypervisor flag, even if KVM does not return it on
+ * GET_SUPPORTED_CPUID
+ */
+ ret |= CPUID_EXT_HYPERVISOR;
+ /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
+ * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
+ * and the irqchip is in the kernel.
+ */
+ if (kvm_irqchip_in_kernel() &&
+ kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
+ ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
+ }
+
+ /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
+ * without the in-kernel irqchip
+ */
+ if (!kvm_irqchip_in_kernel()) {
+ ret &= ~CPUID_EXT_X2APIC;
+ }
+
+ if (enable_cpu_pm) {
+ int disable_exits = kvm_check_extension(s,
+ KVM_CAP_X86_DISABLE_EXITS);
+
+ if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
+ ret |= CPUID_EXT_MONITOR;
+ }
+ }
+ } else if (function == 6 && reg == R_EAX) {
+ ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
+ } else if (function == 7 && index == 0 && reg == R_EBX) {
+ if (host_tsx_broken()) {
+ ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
+ }
+ } else if (function == 7 && index == 0 && reg == R_EDX) {
+ /*
+ * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
+ * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
+ * returned by KVM_GET_MSR_INDEX_LIST.
+ */
+ if (!has_msr_arch_capabs) {
+ ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
+ }
+ } else if (function == 0x80000001 && reg == R_ECX) {
+ /*
+ * It's safe to enable TOPOEXT even if it's not returned by
+ * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
+ * us to keep CPU models including TOPOEXT runnable on older kernels.
+ */
+ ret |= CPUID_EXT3_TOPOEXT;
+ } else if (function == 0x80000001 && reg == R_EDX) {
+ /* On Intel, kvm returns cpuid according to the Intel spec,
+ * so add missing bits according to the AMD spec:
+ */
+ cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+ ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
+ } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
+ /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
+ * be enabled without the in-kernel irqchip
+ */
+ if (!kvm_irqchip_in_kernel()) {
+ ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
+ }
+ if (kvm_irqchip_is_split()) {
+ ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
+ }
+ } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
+ ret |= 1U << KVM_HINTS_REALTIME;
+ }
+
+ return ret;
+}
+
+uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
+{
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data = {};
+ uint64_t value;
+ uint32_t ret, can_be_one, must_be_one;
+
+ if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
+ return 0;
+ }
+
+ /* Check if requested MSR is supported feature MSR */
+ int i;
+ for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
+ if (kvm_feature_msrs->indices[i] == index) {
+ break;
+ }
+ if (i == kvm_feature_msrs->nmsrs) {
+ return 0; /* if the feature MSR is not supported, simply return 0 */
+ }
+
+ msr_data.info.nmsrs = 1;
+ msr_data.entries[0].index = index;
+
+ ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
+ if (ret != 1) {
+ error_report("KVM get MSR (index=0x%x) feature failed, %s",
+ index, strerror(-ret));
+ exit(1);
+ }
+
+ value = msr_data.entries[0].data;
+ switch (index) {
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ if (!has_msr_vmx_procbased_ctls2) {
+ /* KVM forgot to add these bits for some time, do this ourselves. */
+ if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
+ CPUID_XSAVE_XSAVES) {
+ value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
+ }
+ if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
+ CPUID_EXT_RDRAND) {
+ value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
+ }
+ if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
+ CPUID_7_0_EBX_INVPCID) {
+ value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
+ }
+ if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
+ CPUID_7_0_EBX_RDSEED) {
+ value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
+ }
+ if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
+ CPUID_EXT2_RDTSCP) {
+ value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
+ }
+ }
+ /* fall through */
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ /*
+ * Return true for bits that can be one, but do not have to be one.
+ * The SDM tells us which bits could have a "must be one" setting,
+ * so we can do the opposite transformation in make_vmx_msr_value.
+ */
+ must_be_one = (uint32_t)value;
+ can_be_one = (uint32_t)(value >> 32);
+ return can_be_one & ~must_be_one;
+
+ default:
+ return value;
+ }
+}
+
+static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
+ int *max_banks)
+{
+ int r;
+
+ r = kvm_check_extension(s, KVM_CAP_MCE);
+ if (r > 0) {
+ *max_banks = r;
+ return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
+ }
+ return -ENOSYS;
+}
+
+static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
+ MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
+ uint64_t mcg_status = MCG_STATUS_MCIP;
+ int flags = 0;
+
+ if (code == BUS_MCEERR_AR) {
+ status |= MCI_STATUS_AR | 0x134;
+ mcg_status |= MCG_STATUS_EIPV;
+ } else {
+ status |= 0xc0;
+ mcg_status |= MCG_STATUS_RIPV;
+ }
+
+ flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
+ /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
+ * guest kernel back into env->mcg_ext_ctl.
+ */
+ cpu_synchronize_state(cs);
+ if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
+ mcg_status |= MCG_STATUS_LMCE;
+ flags = 0;
+ }
+
+ cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
+ (MCM_ADDR_PHYS << 6) | 0xc, flags);
+}
+
+static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
+{
+ MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
+
+ qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
+ &mff);
+}
+
+static void hardware_memory_error(void *host_addr)
+{
+ emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
+ error_report("QEMU got Hardware memory error at addr %p", host_addr);
+ exit(1);
+}
+
+void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
+{
+ X86CPU *cpu = X86_CPU(c);
+ CPUX86State *env = &cpu->env;
+ ram_addr_t ram_addr;
+ hwaddr paddr;
+
+ /* If we get an action required MCE, it has been injected by KVM
+ * while the VM was running. An action optional MCE instead should
+ * be coming from the main thread, which qemu_init_sigbus identifies
+ * as the "early kill" thread.
+ */
+ assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
+
+ if ((env->mcg_cap & MCG_SER_P) && addr) {
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr != RAM_ADDR_INVALID &&
+ kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+ kvm_hwpoison_page_add(ram_addr);
+ kvm_mce_inject(cpu, paddr, code);
+
+ /*
+ * Use different logging severity based on error type.
+ * If there is additional MCE reporting on the hypervisor, QEMU VA
+ * could be another source to identify the PA and MCE details.
+ */
+ if (code == BUS_MCEERR_AR) {
+ error_report("Guest MCE Memory Error at QEMU addr %p and "
+ "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
+ addr, paddr, "BUS_MCEERR_AR");
+ } else {
+ warn_report("Guest MCE Memory Error at QEMU addr %p and "
+ "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
+ addr, paddr, "BUS_MCEERR_AO");
+ }
+
+ return;
+ }
+
+ if (code == BUS_MCEERR_AO) {
+ warn_report("Hardware memory error at addr %p of type %s "
+ "for memory used by QEMU itself instead of guest system!",
+ addr, "BUS_MCEERR_AO");
+ }
+ }
+
+ if (code == BUS_MCEERR_AR) {
+ hardware_memory_error(addr);
+ }
+
+ /* Hope we are lucky for AO MCE, just notify a event */
+ emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
+}
+
+static void kvm_reset_exception(CPUX86State *env)
+{
+ env->exception_nr = -1;
+ env->exception_pending = 0;
+ env->exception_injected = 0;
+ env->exception_has_payload = false;
+ env->exception_payload = 0;
+}
+
+static void kvm_queue_exception(CPUX86State *env,
+ int32_t exception_nr,
+ uint8_t exception_has_payload,
+ uint64_t exception_payload)
+{
+ assert(env->exception_nr == -1);
+ assert(!env->exception_pending);
+ assert(!env->exception_injected);
+ assert(!env->exception_has_payload);
+
+ env->exception_nr = exception_nr;
+
+ if (has_exception_payload) {
+ env->exception_pending = 1;
+
+ env->exception_has_payload = exception_has_payload;
+ env->exception_payload = exception_payload;
+ } else {
+ env->exception_injected = 1;
+
+ if (exception_nr == EXCP01_DB) {
+ assert(exception_has_payload);
+ env->dr[6] = exception_payload;
+ } else if (exception_nr == EXCP0E_PAGE) {
+ assert(exception_has_payload);
+ env->cr[2] = exception_payload;
+ } else {
+ assert(!exception_has_payload);
+ }
+ }
+}
+
+static int kvm_inject_mce_oldstyle(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
+ unsigned int bank, bank_num = env->mcg_cap & 0xff;
+ struct kvm_x86_mce mce;
+
+ kvm_reset_exception(env);
+
+ /*
+ * There must be at least one bank in use if an MCE is pending.
+ * Find it and use its values for the event injection.
+ */
+ for (bank = 0; bank < bank_num; bank++) {
+ if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
+ break;
+ }
+ }
+ assert(bank < bank_num);
+
+ mce.bank = bank;
+ mce.status = env->mce_banks[bank * 4 + 1];
+ mce.mcg_status = env->mcg_status;
+ mce.addr = env->mce_banks[bank * 4 + 2];
+ mce.misc = env->mce_banks[bank * 4 + 3];
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
+ }
+ return 0;
+}
+
+static void cpu_update_state(void *opaque, bool running, RunState state)
+{
+ CPUX86State *env = opaque;
+
+ if (running) {
+ env->tsc_valid = false;
+ }
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ return cpu->apic_id;
+}
+
+#ifndef KVM_CPUID_SIGNATURE_NEXT
+#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
+#endif
+
+static bool hyperv_enabled(X86CPU *cpu)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
+ ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
+ cpu->hyperv_features || cpu->hyperv_passthrough);
+}
+
+/*
+ * Check whether target_freq is within conservative
+ * ntp correctable bounds (250ppm) of freq
+ */
+static inline bool freq_within_bounds(int freq, int target_freq)
+{
+ int max_freq = freq + (freq * 250 / 1000000);
+ int min_freq = freq - (freq * 250 / 1000000);
+
+ if (target_freq >= min_freq && target_freq <= max_freq) {
+ return true;
+ }
+
+ return false;
+}
+
+static int kvm_arch_set_tsc_khz(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int r, cur_freq;
+ bool set_ioctl = false;
+
+ if (!env->tsc_khz) {
+ return 0;
+ }
+
+ cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
+
+ /*
+ * If TSC scaling is supported, attempt to set TSC frequency.
+ */
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
+ set_ioctl = true;
+ }
+
+ /*
+ * If desired TSC frequency is within bounds of NTP correction,
+ * attempt to set TSC frequency.
+ */
+ if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
+ set_ioctl = true;
+ }
+
+ r = set_ioctl ?
+ kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
+ -ENOTSUP;
+
+ if (r < 0) {
+ /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
+ * TSC frequency doesn't match the one we want.
+ */
+ cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
+ warn_report("TSC frequency mismatch between "
+ "VM (%" PRId64 " kHz) and host (%d kHz), "
+ "and TSC scaling unavailable",
+ env->tsc_khz, cur_freq);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static bool tsc_is_stable_and_known(CPUX86State *env)
+{
+ if (!env->tsc_khz) {
+ return false;
+ }
+ return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
+ || env->user_tsc_khz;
+}
+
+static struct {
+ const char *desc;
+ struct {
+ uint32_t func;
+ int reg;
+ uint32_t bits;
+ } flags[2];
+ uint64_t dependencies;
+} kvm_hyperv_properties[] = {
+ [HYPERV_FEAT_RELAXED] = {
+ .desc = "relaxed timing (hv-relaxed)",
+ .flags = {
+ {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
+ .bits = HV_RELAXED_TIMING_RECOMMENDED}
+ }
+ },
+ [HYPERV_FEAT_VAPIC] = {
+ .desc = "virtual APIC (hv-vapic)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_APIC_ACCESS_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_TIME] = {
+ .desc = "clocksources (hv-time)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_CRASH] = {
+ .desc = "crash MSRs (hv-crash)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EDX,
+ .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_RESET] = {
+ .desc = "reset MSR (hv-reset)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_RESET_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_VPINDEX] = {
+ .desc = "VP_INDEX MSR (hv-vpindex)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_VP_INDEX_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_RUNTIME] = {
+ .desc = "VP_RUNTIME MSR (hv-runtime)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_VP_RUNTIME_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_SYNIC] = {
+ .desc = "synthetic interrupt controller (hv-synic)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_SYNIC_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_STIMER] = {
+ .desc = "synthetic timers (hv-stimer)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_SYNTIMERS_AVAILABLE}
+ },
+ .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
+ },
+ [HYPERV_FEAT_FREQUENCIES] = {
+ .desc = "frequency MSRs (hv-frequencies)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_ACCESS_FREQUENCY_MSRS},
+ {.func = HV_CPUID_FEATURES, .reg = R_EDX,
+ .bits = HV_FREQUENCY_MSRS_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_REENLIGHTENMENT] = {
+ .desc = "reenlightenment MSRs (hv-reenlightenment)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EAX,
+ .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
+ }
+ },
+ [HYPERV_FEAT_TLBFLUSH] = {
+ .desc = "paravirtualized TLB flush (hv-tlbflush)",
+ .flags = {
+ {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
+ .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
+ HV_EX_PROCESSOR_MASKS_RECOMMENDED}
+ },
+ .dependencies = BIT(HYPERV_FEAT_VPINDEX)
+ },
+ [HYPERV_FEAT_EVMCS] = {
+ .desc = "enlightened VMCS (hv-evmcs)",
+ .flags = {
+ {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
+ .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
+ },
+ .dependencies = BIT(HYPERV_FEAT_VAPIC)
+ },
+ [HYPERV_FEAT_IPI] = {
+ .desc = "paravirtualized IPI (hv-ipi)",
+ .flags = {
+ {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
+ .bits = HV_CLUSTER_IPI_RECOMMENDED |
+ HV_EX_PROCESSOR_MASKS_RECOMMENDED}
+ },
+ .dependencies = BIT(HYPERV_FEAT_VPINDEX)
+ },
+ [HYPERV_FEAT_STIMER_DIRECT] = {
+ .desc = "direct mode synthetic timers (hv-stimer-direct)",
+ .flags = {
+ {.func = HV_CPUID_FEATURES, .reg = R_EDX,
+ .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
+ },
+ .dependencies = BIT(HYPERV_FEAT_STIMER)
+ },
+ [HYPERV_FEAT_AVIC] = {
+ .desc = "AVIC/APICv support (hv-avic/hv-apicv)",
+ .flags = {
+ {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
+ .bits = HV_DEPRECATING_AEOI_RECOMMENDED}
+ }
+ },
+};
+
+static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
+ bool do_sys_ioctl)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r, size;
+
+ size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+ cpuid = g_malloc0(size);
+ cpuid->nent = max;
+
+ if (do_sys_ioctl) {
+ r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ }
+ if (r == 0 && cpuid->nent >= max) {
+ r = -E2BIG;
+ }
+ if (r < 0) {
+ if (r == -E2BIG) {
+ g_free(cpuid);
+ return NULL;
+ } else {
+ fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
+ strerror(-r));
+ exit(1);
+ }
+ }
+ return cpuid;
+}
+
+/*
+ * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
+{
+ struct kvm_cpuid2 *cpuid;
+ /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */
+ int max = 10;
+ int i;
+ bool do_sys_ioctl;
+
+ do_sys_ioctl =
+ kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
+
+ /*
+ * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
+ * unsupported, kvm_hyperv_expand_features() checks for that.
+ */
+ assert(do_sys_ioctl || cs->kvm_state);
+
+ /*
+ * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
+ * -E2BIG, however, it doesn't report back the right size. Keep increasing
+ * it and re-trying until we succeed.
+ */
+ while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
+ max++;
+ }
+
+ /*
+ * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
+ * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
+ * information early, just check for the capability and set the bit
+ * manually.
+ */
+ if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
+ for (i = 0; i < cpuid->nent; i++) {
+ if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
+ cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
+ }
+ }
+ }
+
+ return cpuid;
+}
+
+/*
+ * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
+ * leaves from KVM_CAP_HYPERV* and present MSRs data.
+ */
+static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
+
+ /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
+ cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
+ cpuid->nent = 2;
+
+ /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
+ entry_feat = &cpuid->entries[0];
+ entry_feat->function = HV_CPUID_FEATURES;
+
+ entry_recomm = &cpuid->entries[1];
+ entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
+ entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
+ entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
+ entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
+ entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+ entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
+ entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
+ }
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
+ entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
+ entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
+ }
+
+ if (has_msr_hv_frequencies) {
+ entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
+ entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
+ }
+
+ if (has_msr_hv_crash) {
+ entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
+ }
+
+ if (has_msr_hv_reenlightenment) {
+ entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
+ }
+
+ if (has_msr_hv_reset) {
+ entry_feat->eax |= HV_RESET_AVAILABLE;
+ }
+
+ if (has_msr_hv_vpindex) {
+ entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
+ }
+
+ if (has_msr_hv_runtime) {
+ entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
+ }
+
+ if (has_msr_hv_synic) {
+ unsigned int cap = cpu->hyperv_synic_kvm_only ?
+ KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
+
+ if (kvm_check_extension(cs->kvm_state, cap) > 0) {
+ entry_feat->eax |= HV_SYNIC_AVAILABLE;
+ }
+ }
+
+ if (has_msr_hv_stimer) {
+ entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
+ }
+
+ if (kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_TLBFLUSH) > 0) {
+ entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
+ entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
+ }
+
+ if (kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
+ entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
+ }
+
+ if (kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_SEND_IPI) > 0) {
+ entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
+ entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
+ }
+
+ return cpuid;
+}
+
+static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
+{
+ struct kvm_cpuid_entry2 *entry;
+ struct kvm_cpuid2 *cpuid;
+
+ if (hv_cpuid_cache) {
+ cpuid = hv_cpuid_cache;
+ } else {
+ if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
+ cpuid = get_supported_hv_cpuid(cs);
+ } else {
+ /*
+ * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
+ * before KVM context is created but this is only done when
+ * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
+ * KVM_CAP_HYPERV_CPUID.
+ */
+ assert(cs->kvm_state);
+
+ cpuid = get_supported_hv_cpuid_legacy(cs);
+ }
+ hv_cpuid_cache = cpuid;
+ }
+
+ if (!cpuid) {
+ return 0;
+ }
+
+ entry = cpuid_find_entry(cpuid, func, 0);
+ if (!entry) {
+ return 0;
+ }
+
+ return cpuid_entry_get_reg(entry, reg);
+}
+
+static bool hyperv_feature_supported(CPUState *cs, int feature)
+{
+ uint32_t func, bits;
+ int i, reg;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
+
+ func = kvm_hyperv_properties[feature].flags[i].func;
+ reg = kvm_hyperv_properties[feature].flags[i].reg;
+ bits = kvm_hyperv_properties[feature].flags[i].bits;
+
+ if (!func) {
+ continue;
+ }
+
+ if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Checks that all feature dependencies are enabled */
+static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
+{
+ uint64_t deps;
+ int dep_feat;
+
+ deps = kvm_hyperv_properties[feature].dependencies;
+ while (deps) {
+ dep_feat = ctz64(deps);
+ if (!(hyperv_feat_enabled(cpu, dep_feat))) {
+ error_setg(errp, "Hyper-V %s requires Hyper-V %s",
+ kvm_hyperv_properties[feature].desc,
+ kvm_hyperv_properties[dep_feat].desc);
+ return false;
+ }
+ deps &= ~(1ull << dep_feat);
+ }
+
+ return true;
+}
+
+static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ uint32_t r = 0;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
+ if (!hyperv_feat_enabled(cpu, i)) {
+ continue;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
+ if (kvm_hyperv_properties[i].flags[j].func != func) {
+ continue;
+ }
+ if (kvm_hyperv_properties[i].flags[j].reg != reg) {
+ continue;
+ }
+
+ r |= kvm_hyperv_properties[i].flags[j].bits;
+ }
+ }
+
+ return r;
+}
+
+/*
+ * Expand Hyper-V CPU features. In partucular, check that all the requested
+ * features are supported by the host and the sanity of the configuration
+ * (that all the required dependencies are included). Also, this takes care
+ * of 'hv_passthrough' mode and fills the environment with all supported
+ * Hyper-V features.
+ */
+bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
+{
+ CPUState *cs = CPU(cpu);
+ Error *local_err = NULL;
+ int feat;
+
+ if (!hyperv_enabled(cpu))
+ return true;
+
+ /*
+ * When kvm_hyperv_expand_features is called at CPU feature expansion
+ * time per-CPU kvm_state is not available yet so we can only proceed
+ * when KVM_CAP_SYS_HYPERV_CPUID is supported.
+ */
+ if (!cs->kvm_state &&
+ !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
+ return true;
+
+ if (cpu->hyperv_passthrough) {
+ cpu->hyperv_vendor_id[0] =
+ hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
+ cpu->hyperv_vendor_id[1] =
+ hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
+ cpu->hyperv_vendor_id[2] =
+ hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
+ cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
+ sizeof(cpu->hyperv_vendor_id) + 1);
+ memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
+ sizeof(cpu->hyperv_vendor_id));
+ cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
+
+ cpu->hyperv_interface_id[0] =
+ hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
+ cpu->hyperv_interface_id[1] =
+ hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
+ cpu->hyperv_interface_id[2] =
+ hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
+ cpu->hyperv_interface_id[3] =
+ hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
+
+ cpu->hyperv_ver_id_build =
+ hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
+ cpu->hyperv_ver_id_major =
+ hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16;
+ cpu->hyperv_ver_id_minor =
+ hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff;
+ cpu->hyperv_ver_id_sp =
+ hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
+ cpu->hyperv_ver_id_sb =
+ hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24;
+ cpu->hyperv_ver_id_sn =
+ hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff;
+
+ cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
+ R_EAX);
+ cpu->hyperv_limits[0] =
+ hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
+ cpu->hyperv_limits[1] =
+ hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
+ cpu->hyperv_limits[2] =
+ hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
+
+ cpu->hyperv_spinlock_attempts =
+ hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
+
+ /*
+ * Mark feature as enabled in 'cpu->hyperv_features' as
+ * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
+ */
+ for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
+ if (hyperv_feature_supported(cs, feat)) {
+ cpu->hyperv_features |= BIT(feat);
+ }
+ }
+ } else {
+ /* Check features availability and dependencies */
+ for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
+ /* If the feature was not requested skip it. */
+ if (!hyperv_feat_enabled(cpu, feat)) {
+ continue;
+ }
+
+ /* Check if the feature is supported by KVM */
+ if (!hyperv_feature_supported(cs, feat)) {
+ error_setg(errp, "Hyper-V %s is not supported by kernel",
+ kvm_hyperv_properties[feat].desc);
+ return false;
+ }
+
+ /* Check dependencies */
+ if (!hv_feature_check_deps(cpu, feat, &local_err)) {
+ error_propagate(errp, local_err);
+ return false;
+ }
+ }
+ }
+
+ /* Additional dependencies not covered by kvm_hyperv_properties[] */
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
+ !cpu->hyperv_synic_kvm_only &&
+ !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
+ error_setg(errp, "Hyper-V %s requires Hyper-V %s",
+ kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
+ kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
+ */
+static int hyperv_fill_cpuids(CPUState *cs,
+ struct kvm_cpuid_entry2 *cpuid_ent)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ struct kvm_cpuid_entry2 *c;
+ uint32_t cpuid_i = 0;
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+ c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
+ HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
+ c->ebx = cpu->hyperv_vendor_id[0];
+ c->ecx = cpu->hyperv_vendor_id[1];
+ c->edx = cpu->hyperv_vendor_id[2];
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_INTERFACE;
+ c->eax = cpu->hyperv_interface_id[0];
+ c->ebx = cpu->hyperv_interface_id[1];
+ c->ecx = cpu->hyperv_interface_id[2];
+ c->edx = cpu->hyperv_interface_id[3];
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_VERSION;
+ c->eax = cpu->hyperv_ver_id_build;
+ c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 |
+ cpu->hyperv_ver_id_minor;
+ c->ecx = cpu->hyperv_ver_id_sp;
+ c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 |
+ (cpu->hyperv_ver_id_sn & 0xffffff);
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_FEATURES;
+ c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
+ c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
+ c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
+
+ /* Unconditionally required with any Hyper-V enlightenment */
+ c->eax |= HV_HYPERCALL_AVAILABLE;
+
+ /* SynIC and Vmbus devices require messages/signals hypercalls */
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
+ !cpu->hyperv_synic_kvm_only) {
+ c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
+ }
+
+
+ /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
+ c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_ENLIGHTMENT_INFO;
+ c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
+ c->ebx = cpu->hyperv_spinlock_attempts;
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
+ !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) {
+ c->eax |= HV_APIC_ACCESS_RECOMMENDED;
+ }
+
+ if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
+ c->eax |= HV_NO_NONARCH_CORESHARING;
+ } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
+ c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
+ HV_NO_NONARCH_CORESHARING;
+ }
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_IMPLEMENT_LIMITS;
+ c->eax = cpu->hv_max_vps;
+ c->ebx = cpu->hyperv_limits[0];
+ c->ecx = cpu->hyperv_limits[1];
+ c->edx = cpu->hyperv_limits[2];
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
+ __u32 function;
+
+ /* Create zeroed 0x40000006..0x40000009 leaves */
+ for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
+ function < HV_CPUID_NESTED_FEATURES; function++) {
+ c = &cpuid_ent[cpuid_i++];
+ c->function = function;
+ }
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_NESTED_FEATURES;
+ c->eax = cpu->hyperv_nested[0];
+ }
+
+ return cpuid_i;
+}
+
+static Error *hv_passthrough_mig_blocker;
+static Error *hv_no_nonarch_cs_mig_blocker;
+
+/* Checks that the exposed eVMCS version range is supported by KVM */
+static bool evmcs_version_supported(uint16_t evmcs_version,
+ uint16_t supported_evmcs_version)
+{
+ uint8_t min_version = evmcs_version & 0xff;
+ uint8_t max_version = evmcs_version >> 8;
+ uint8_t min_supported_version = supported_evmcs_version & 0xff;
+ uint8_t max_supported_version = supported_evmcs_version >> 8;
+
+ return (min_version >= min_supported_version) &&
+ (max_version <= max_supported_version);
+}
+
+#define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
+
+static int hyperv_init_vcpu(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ Error *local_err = NULL;
+ int ret;
+
+ if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
+ error_setg(&hv_passthrough_mig_blocker,
+ "'hv-passthrough' CPU flag prevents migration, use explicit"
+ " set of hv-* flags instead");
+ ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
+ hv_no_nonarch_cs_mig_blocker == NULL) {
+ error_setg(&hv_no_nonarch_cs_mig_blocker,
+ "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
+ " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
+ " make sure SMT is disabled and/or that vCPUs are properly"
+ " pinned)");
+ ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
+ /*
+ * the kernel doesn't support setting vp_index; assert that its value
+ * is in sync
+ */
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data = {
+ .info.nmsrs = 1,
+ .entries[0].index = HV_X64_MSR_VP_INDEX,
+ };
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+ assert(ret == 1);
+
+ if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
+ error_report("kernel's vp_index != QEMU's vp_index");
+ return -ENXIO;
+ }
+ }
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
+ uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
+ KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
+ ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
+ if (ret < 0) {
+ error_report("failed to turn on HyperV SynIC in KVM: %s",
+ strerror(-ret));
+ return ret;
+ }
+
+ if (!cpu->hyperv_synic_kvm_only) {
+ ret = hyperv_x86_synic_add(cpu);
+ if (ret < 0) {
+ error_report("failed to create HyperV SynIC: %s",
+ strerror(-ret));
+ return ret;
+ }
+ }
+ }
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
+ uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
+ uint16_t supported_evmcs_version;
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
+ (uintptr_t)&supported_evmcs_version);
+
+ /*
+ * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
+ * option sets. Note: we hardcode the maximum supported eVMCS version
+ * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
+ * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
+ * to be added.
+ */
+ if (ret < 0) {
+ error_report("Hyper-V %s is not supported by kernel",
+ kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
+ return ret;
+ }
+
+ if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
+ error_report("eVMCS version range [%d..%d] is not supported by "
+ "kernel (supported: [%d..%d])", evmcs_version & 0xff,
+ evmcs_version >> 8, supported_evmcs_version & 0xff,
+ supported_evmcs_version >> 8);
+ return -ENOTSUP;
+ }
+
+ cpu->hyperv_nested[0] = evmcs_version;
+ }
+
+ if (cpu->hyperv_enforce_cpuid) {
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1);
+ if (ret < 0) {
+ error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
+ strerror(-ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static Error *invtsc_mig_blocker;
+
+#define KVM_MAX_CPUID_ENTRIES 100
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ struct {
+ struct kvm_cpuid2 cpuid;
+ struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
+ } cpuid_data;
+ /*
+ * The kernel defines these structs with padding fields so there
+ * should be no extra padding in our cpuid_data struct.
+ */
+ QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
+ sizeof(struct kvm_cpuid2) +
+ sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
+
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t limit, i, j, cpuid_i;
+ uint32_t unused;
+ struct kvm_cpuid_entry2 *c;
+ uint32_t signature[3];
+ int kvm_base = KVM_CPUID_SIGNATURE;
+ int max_nested_state_len;
+ int r;
+ Error *local_err = NULL;
+
+ memset(&cpuid_data, 0, sizeof(cpuid_data));
+
+ cpuid_i = 0;
+
+ r = kvm_arch_set_tsc_khz(cs);
+ if (r < 0) {
+ return r;
+ }
+
+ /* vcpu's TSC frequency is either specified by user, or following
+ * the value used by KVM if the former is not present. In the
+ * latter case, we query it from KVM and record in env->tsc_khz,
+ * so that vcpu's TSC frequency can be migrated later via this field.
+ */
+ if (!env->tsc_khz) {
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (r > 0) {
+ env->tsc_khz = r;
+ }
+ }
+
+ env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
+
+ /*
+ * kvm_hyperv_expand_features() is called here for the second time in case
+ * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
+ * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
+ * check which Hyper-V enlightenments are supported and which are not, we
+ * can still proceed and check/expand Hyper-V enlightenments here so legacy
+ * behavior is preserved.
+ */
+ if (!kvm_hyperv_expand_features(cpu, &local_err)) {
+ error_report_err(local_err);
+ return -ENOSYS;
+ }
+
+ if (hyperv_enabled(cpu)) {
+ r = hyperv_init_vcpu(cpu);
+ if (r) {
+ return r;
+ }
+
+ cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
+ kvm_base = KVM_CPUID_SIGNATURE_NEXT;
+ has_msr_hv_hypercall = true;
+ }
+
+ if (cpu->expose_kvm) {
+ memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_SIGNATURE | kvm_base;
+ c->eax = KVM_CPUID_FEATURES | kvm_base;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_FEATURES | kvm_base;
+ c->eax = env->features[FEAT_KVM];
+ c->edx = env->features[FEAT_KVM_HINTS];
+ }
+
+ cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
+
+ if (cpu->kvm_pv_enforce_cpuid) {
+ r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
+ if (r < 0) {
+ fprintf(stderr,
+ "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
+ strerror(-r));
+ abort();
+ }
+ }
+
+ for (i = 0; i <= limit; i++) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "unsupported level value: 0x%x\n", limit);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+
+ switch (i) {
+ case 2: {
+ /* Keep reading function 2 till all the input is received */
+ int times;
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+ KVM_CPUID_FLAG_STATE_READ_NEXT;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax & 0xff;
+
+ for (j = 1; j < times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ case 0x1f:
+ if (env->nr_dies < 2) {
+ break;
+ }
+ /* fallthrough */
+ case 4:
+ case 0xb:
+ case 0xd:
+ for (j = 0; ; j++) {
+ if (i == 0xd && j == 64) {
+ break;
+ }
+
+ if (i == 0x1f && j == 64) {
+ break;
+ }
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (i == 4 && c->eax == 0) {
+ break;
+ }
+ if (i == 0xb && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0x1f && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0xd && c->eax == 0) {
+ continue;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ }
+ break;
+ case 0x7:
+ case 0x12:
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (j > 1 && (c->eax & 0xf) != 1) {
+ break;
+ }
+
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x12,ecx:0x%x)\n", j);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ }
+ break;
+ case 0x14: {
+ uint32_t times;
+
+ c->function = i;
+ c->index = 0;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax;
+
+ for (j = 1; j <= times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = i;
+ c->index = j;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
+ /*
+ * KVM already returns all zeroes if a CPUID entry is missing,
+ * so we can omit it and avoid hitting KVM's 80-entry limit.
+ */
+ cpuid_i--;
+ }
+ break;
+ }
+ }
+
+ if (limit >= 0x0a) {
+ uint32_t eax, edx;
+
+ cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
+
+ has_architectural_pmu_version = eax & 0xff;
+ if (has_architectural_pmu_version > 0) {
+ num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
+
+ /* Shouldn't be more than 32, since that's the number of bits
+ * available in EBX to tell us _which_ counters are available.
+ * Play it safe.
+ */
+ if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
+ }
+
+ if (has_architectural_pmu_version > 1) {
+ num_architectural_pmu_fixed_counters = edx & 0x1f;
+
+ if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
+ num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
+ }
+ }
+ }
+ }
+
+ cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0x80000000; i <= limit; i++) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+
+ switch (i) {
+ case 0x8000001d:
+ /* Query for all AMD cache information leaves */
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (c->eax == 0) {
+ break;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ }
+ break;
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
+ /*
+ * KVM already returns all zeroes if a CPUID entry is missing,
+ * so we can omit it and avoid hitting KVM's 80-entry limit.
+ */
+ cpuid_i--;
+ }
+ break;
+ }
+ }
+
+ /* Call Centaur's CPUID instructions they are supported. */
+ if (env->cpuid_xlevel2 > 0) {
+ cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0xC0000000; i <= limit; i++) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ }
+
+ cpuid_data.cpuid.nent = cpuid_i;
+
+ if (((env->cpuid_version >> 8)&0xF) >= 6
+ && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
+ (CPUID_MCE | CPUID_MCA)
+ && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
+ uint64_t mcg_cap, unsupported_caps;
+ int banks;
+ int ret;
+
+ ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
+ if (ret < 0) {
+ fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
+ return ret;
+ }
+
+ if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
+ error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
+ (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
+ return -ENOTSUP;
+ }
+
+ unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
+ if (unsupported_caps) {
+ if (unsupported_caps & MCG_LMCE_P) {
+ error_report("kvm: LMCE not supported");
+ return -ENOTSUP;
+ }
+ warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
+ unsupported_caps);
+ }
+
+ env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
+ ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
+ return ret;
+ }
+ }
+
+ cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
+
+ c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
+ if (c) {
+ has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
+ !!(c->ecx & CPUID_EXT_SMX);
+ }
+
+ c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0);
+ if (c && (c->ebx & CPUID_7_0_EBX_SGX)) {
+ has_msr_feature_control = true;
+ }
+
+ if (env->mcg_cap & MCG_LMCE_P) {
+ has_msr_mcg_ext_ctl = has_msr_feature_control = true;
+ }
+
+ if (!env->user_tsc_khz) {
+ if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
+ invtsc_mig_blocker == NULL) {
+ error_setg(&invtsc_mig_blocker,
+ "State blocked by non-migratable CPU device"
+ " (invtsc flag)");
+ r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
+ if (r < 0) {
+ error_report_err(local_err);
+ return r;
+ }
+ }
+ }
+
+ if (cpu->vmware_cpuid_freq
+ /* Guests depend on 0x40000000 to detect this feature, so only expose
+ * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
+ && cpu->expose_kvm
+ && kvm_base == KVM_CPUID_SIGNATURE
+ /* TSC clock must be stable and known for this feature. */
+ && tsc_is_stable_and_known(env)) {
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_SIGNATURE | 0x10;
+ c->eax = env->tsc_khz;
+ c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
+ c->ecx = c->edx = 0;
+
+ c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
+ c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
+ }
+
+ cpuid_data.cpuid.nent = cpuid_i;
+
+ cpuid_data.cpuid.padding = 0;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
+ if (r) {
+ goto fail;
+ }
+
+ if (has_xsave) {
+ env->xsave_buf_len = sizeof(struct kvm_xsave);
+ env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
+ memset(env->xsave_buf, 0, env->xsave_buf_len);
+
+ /*
+ * The allocated storage must be large enough for all of the
+ * possible XSAVE state components.
+ */
+ assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX)
+ <= env->xsave_buf_len);
+ }
+
+ max_nested_state_len = kvm_max_nested_state_length();
+ if (max_nested_state_len > 0) {
+ assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
+
+ if (cpu_has_vmx(env) || cpu_has_svm(env)) {
+ struct kvm_vmx_nested_state_hdr *vmx_hdr;
+
+ env->nested_state = g_malloc0(max_nested_state_len);
+ env->nested_state->size = max_nested_state_len;
+
+ if (cpu_has_vmx(env)) {
+ env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
+ vmx_hdr = &env->nested_state->hdr.vmx;
+ vmx_hdr->vmxon_pa = -1ull;
+ vmx_hdr->vmcs12_pa = -1ull;
+ } else {
+ env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
+ }
+ }
+ }
+
+ cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
+
+ if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
+ has_msr_tsc_aux = false;
+ }
+
+ kvm_init_msrs(cpu);
+
+ return 0;
+
+ fail:
+ migrate_del_blocker(invtsc_mig_blocker);
+
+ return r;
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cpu->kvm_msr_buf) {
+ g_free(cpu->kvm_msr_buf);
+ cpu->kvm_msr_buf = NULL;
+ }
+
+ if (env->nested_state) {
+ g_free(env->nested_state);
+ env->nested_state = NULL;
+ }
+
+ qemu_del_vm_change_state_handler(cpu->vmsentry);
+
+ return 0;
+}
+
+void kvm_arch_reset_vcpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ env->xcr0 = 1;
+ if (kvm_irqchip_in_kernel()) {
+ env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
+ KVM_MP_STATE_UNINITIALIZED;
+ } else {
+ env->mp_state = KVM_MP_STATE_RUNNABLE;
+ }
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
+ }
+
+ hyperv_x86_synic_reset(cpu);
+ }
+ /* enabled by default */
+ env->poll_control_msr = 1;
+
+ sev_es_set_reset_vector(CPU(cpu));
+}
+
+void kvm_arch_do_init_vcpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ /* APs get directly into wait-for-SIPI state. */
+ if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
+ env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ }
+}
+
+static int kvm_get_supported_feature_msrs(KVMState *s)
+{
+ int ret = 0;
+
+ if (kvm_feature_msrs != NULL) {
+ return 0;
+ }
+
+ if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
+ return 0;
+ }
+
+ struct kvm_msr_list msr_list;
+
+ msr_list.nmsrs = 0;
+ ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
+ if (ret < 0 && ret != -E2BIG) {
+ error_report("Fetch KVM feature MSR list failed: %s",
+ strerror(-ret));
+ return ret;
+ }
+
+ assert(msr_list.nmsrs > 0);
+ kvm_feature_msrs = (struct kvm_msr_list *) \
+ g_malloc0(sizeof(msr_list) +
+ msr_list.nmsrs * sizeof(msr_list.indices[0]));
+
+ kvm_feature_msrs->nmsrs = msr_list.nmsrs;
+ ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
+
+ if (ret < 0) {
+ error_report("Fetch KVM feature MSR list failed: %s",
+ strerror(-ret));
+ g_free(kvm_feature_msrs);
+ kvm_feature_msrs = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kvm_get_supported_msrs(KVMState *s)
+{
+ int ret = 0;
+ struct kvm_msr_list msr_list, *kvm_msr_list;
+
+ /*
+ * Obtain MSR list from KVM. These are the MSRs that we must
+ * save/restore.
+ */
+ msr_list.nmsrs = 0;
+ ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
+ if (ret < 0 && ret != -E2BIG) {
+ return ret;
+ }
+ /*
+ * Old kernel modules had a bug and could write beyond the provided
+ * memory. Allocate at least a safe amount of 1K.
+ */
+ kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
+ msr_list.nmsrs *
+ sizeof(msr_list.indices[0])));
+
+ kvm_msr_list->nmsrs = msr_list.nmsrs;
+ ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
+ if (ret >= 0) {
+ int i;
+
+ for (i = 0; i < kvm_msr_list->nmsrs; i++) {
+ switch (kvm_msr_list->indices[i]) {
+ case MSR_STAR:
+ has_msr_star = true;
+ break;
+ case MSR_VM_HSAVE_PA:
+ has_msr_hsave_pa = true;
+ break;
+ case MSR_TSC_AUX:
+ has_msr_tsc_aux = true;
+ break;
+ case MSR_TSC_ADJUST:
+ has_msr_tsc_adjust = true;
+ break;
+ case MSR_IA32_TSCDEADLINE:
+ has_msr_tsc_deadline = true;
+ break;
+ case MSR_IA32_SMBASE:
+ has_msr_smbase = true;
+ break;
+ case MSR_SMI_COUNT:
+ has_msr_smi_count = true;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ has_msr_misc_enable = true;
+ break;
+ case MSR_IA32_BNDCFGS:
+ has_msr_bndcfgs = true;
+ break;
+ case MSR_IA32_XSS:
+ has_msr_xss = true;
+ break;
+ case MSR_IA32_UMWAIT_CONTROL:
+ has_msr_umwait = true;
+ break;
+ case HV_X64_MSR_CRASH_CTL:
+ has_msr_hv_crash = true;
+ break;
+ case HV_X64_MSR_RESET:
+ has_msr_hv_reset = true;
+ break;
+ case HV_X64_MSR_VP_INDEX:
+ has_msr_hv_vpindex = true;
+ break;
+ case HV_X64_MSR_VP_RUNTIME:
+ has_msr_hv_runtime = true;
+ break;
+ case HV_X64_MSR_SCONTROL:
+ has_msr_hv_synic = true;
+ break;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ has_msr_hv_stimer = true;
+ break;
+ case HV_X64_MSR_TSC_FREQUENCY:
+ has_msr_hv_frequencies = true;
+ break;
+ case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+ has_msr_hv_reenlightenment = true;
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ has_msr_spec_ctrl = true;
+ break;
+ case MSR_AMD64_TSC_RATIO:
+ has_tsc_scale_msr = true;
+ break;
+ case MSR_IA32_TSX_CTRL:
+ has_msr_tsx_ctrl = true;
+ break;
+ case MSR_VIRT_SSBD:
+ has_msr_virt_ssbd = true;
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ has_msr_arch_capabs = true;
+ break;
+ case MSR_IA32_CORE_CAPABILITY:
+ has_msr_core_capabs = true;
+ break;
+ case MSR_IA32_PERF_CAPABILITIES:
+ has_msr_perf_capabs = true;
+ break;
+ case MSR_IA32_VMX_VMFUNC:
+ has_msr_vmx_vmfunc = true;
+ break;
+ case MSR_IA32_UCODE_REV:
+ has_msr_ucode_rev = true;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ has_msr_vmx_procbased_ctls2 = true;
+ break;
+ case MSR_IA32_PKRS:
+ has_msr_pkrs = true;
+ break;
+ }
+ }
+ }
+
+ g_free(kvm_msr_list);
+
+ return ret;
+}
+
+static Notifier smram_machine_done;
+static KVMMemoryListener smram_listener;
+static AddressSpace smram_address_space;
+static MemoryRegion smram_as_root;
+static MemoryRegion smram_as_mem;
+
+static void register_smram_listener(Notifier *n, void *unused)
+{
+ MemoryRegion *smram =
+ (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+ /* Outer container... */
+ memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
+ memory_region_set_enabled(&smram_as_root, true);
+
+ /* ... with two regions inside: normal system memory with low
+ * priority, and...
+ */
+ memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
+ get_system_memory(), 0, ~0ull);
+ memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
+ memory_region_set_enabled(&smram_as_mem, true);
+
+ if (smram) {
+ /* ... SMRAM with higher priority */
+ memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
+ memory_region_set_enabled(smram, true);
+ }
+
+ address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
+ kvm_memory_listener_register(kvm_state, &smram_listener,
+ &smram_address_space, 1, "kvm-smram");
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ uint64_t identity_base = 0xfffbc000;
+ uint64_t shadow_mem;
+ int ret;
+ struct utsname utsname;
+ Error *local_err = NULL;
+
+ /*
+ * Initialize SEV context, if required
+ *
+ * If no memory encryption is requested (ms->cgs == NULL) this is
+ * a no-op.
+ *
+ * It's also a no-op if a non-SEV confidential guest support
+ * mechanism is selected. SEV is the only mechanism available to
+ * select on x86 at present, so this doesn't arise, but if new
+ * mechanisms are supported in future (e.g. TDX), they'll need
+ * their own initialization either here or elsewhere.
+ */
+ ret = sev_kvm_init(ms->cgs, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+
+ if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+ error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM");
+ return -ENOTSUP;
+ }
+
+ has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
+ has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
+ has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
+
+ hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
+
+ has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
+ if (has_exception_payload) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable exception payload cap: %s",
+ strerror(-ret));
+ return ret;
+ }
+ }
+
+ ret = kvm_get_supported_msrs(s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ kvm_get_supported_feature_msrs(s);
+
+ uname(&utsname);
+ lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
+
+ /*
+ * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
+ * In order to use vm86 mode, an EPT identity map and a TSS are needed.
+ * Since these must be part of guest physical memory, we need to allocate
+ * them, both by setting their start addresses in the kernel and by
+ * creating a corresponding e820 entry. We need 4 pages before the BIOS.
+ *
+ * Older KVM versions may not support setting the identity map base. In
+ * that case we need to stick with the default, i.e. a 256K maximum BIOS
+ * size.
+ */
+ if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
+ /* Allows up to 16M BIOSes. */
+ identity_base = 0xfeffc000;
+
+ ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ /* Set TSS base one page after EPT identity map. */
+ ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Tell fw_cfg to notify the BIOS to reserve the range. */
+ ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
+ if (ret < 0) {
+ fprintf(stderr, "e820_add_entry() table is full\n");
+ return ret;
+ }
+
+ shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
+ if (shadow_mem != -1) {
+ shadow_mem /= 4096;
+ ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
+ object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
+ x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
+ smram_machine_done.notify = register_smram_listener;
+ qemu_add_machine_init_done_notifier(&smram_machine_done);
+ }
+
+ if (enable_cpu_pm) {
+ int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
+ int ret;
+
+/* Work around for kernel header with a typo. TODO: fix header and drop. */
+#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
+#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
+#endif
+ if (disable_exits) {
+ disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
+ KVM_X86_DISABLE_EXITS_HLT |
+ KVM_X86_DISABLE_EXITS_PAUSE |
+ KVM_X86_DISABLE_EXITS_CSTATE);
+ }
+
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
+ disable_exits);
+ if (ret < 0) {
+ error_report("kvm: guest stopping CPU not supported: %s",
+ strerror(-ret));
+ }
+ }
+
+ if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
+ X86MachineState *x86ms = X86_MACHINE(ms);
+
+ if (x86ms->bus_lock_ratelimit > 0) {
+ ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
+ if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
+ error_report("kvm: bus lock detection unsupported");
+ return -ENOTSUP;
+ }
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
+ KVM_BUS_LOCK_DETECTION_EXIT);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable bus lock detection cap: %s",
+ strerror(-ret));
+ return ret;
+ }
+ ratelimit_init(&bus_lock_ratelimit_ctrl);
+ ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
+ x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
+ }
+ }
+
+ return 0;
+}
+
+static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = 3;
+ lhs->present = 1;
+ lhs->dpl = 3;
+ lhs->db = 0;
+ lhs->s = 1;
+ lhs->l = 0;
+ lhs->g = 0;
+ lhs->avl = 0;
+ lhs->unusable = 0;
+}
+
+static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
+{
+ unsigned flags = rhs->flags;
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+ lhs->present = (flags & DESC_P_MASK) != 0;
+ lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
+ lhs->db = (flags >> DESC_B_SHIFT) & 1;
+ lhs->s = (flags & DESC_S_MASK) != 0;
+ lhs->l = (flags >> DESC_L_SHIFT) & 1;
+ lhs->g = (flags & DESC_G_MASK) != 0;
+ lhs->avl = (flags & DESC_AVL_MASK) != 0;
+ lhs->unusable = !lhs->present;
+ lhs->padding = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
+}
+
+static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
+{
+ if (set) {
+ *kvm_reg = *qemu_reg;
+ } else {
+ *qemu_reg = *kvm_reg;
+ }
+}
+
+static int kvm_getput_regs(X86CPU *cpu, int set)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret = 0;
+
+ if (!set) {
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
+ kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
+ kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
+ kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
+ kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
+ kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
+ kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
+ kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
+#ifdef TARGET_X86_64
+ kvm_getput_reg(&regs.r8, &env->regs[8], set);
+ kvm_getput_reg(&regs.r9, &env->regs[9], set);
+ kvm_getput_reg(&regs.r10, &env->regs[10], set);
+ kvm_getput_reg(&regs.r11, &env->regs[11], set);
+ kvm_getput_reg(&regs.r12, &env->regs[12], set);
+ kvm_getput_reg(&regs.r13, &env->regs[13], set);
+ kvm_getput_reg(&regs.r14, &env->regs[14], set);
+ kvm_getput_reg(&regs.r15, &env->regs[15], set);
+#endif
+
+ kvm_getput_reg(&regs.rflags, &env->eflags, set);
+ kvm_getput_reg(&regs.rip, &env->eip, set);
+
+ if (set) {
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
+ }
+
+ return ret;
+}
+
+static int kvm_put_fpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_fpu fpu;
+ int i;
+
+ memset(&fpu, 0, sizeof fpu);
+ fpu.fsw = env->fpus & ~(7 << 11);
+ fpu.fsw |= (env->fpstt & 7) << 11;
+ fpu.fcw = env->fpuc;
+ fpu.last_opcode = env->fpop;
+ fpu.last_ip = env->fpip;
+ fpu.last_dp = env->fpdp;
+ for (i = 0; i < 8; ++i) {
+ fpu.ftwx |= (!env->fptags[i]) << i;
+ }
+ memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
+ stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
+ }
+ fpu.mxcsr = env->mxcsr;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
+}
+
+static int kvm_put_xsave(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ void *xsave = env->xsave_buf;
+
+ if (!has_xsave) {
+ return kvm_put_fpu(cpu);
+ }
+ x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
+}
+
+static int kvm_put_xcrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_xcrs xcrs = {};
+
+ if (!has_xcrs) {
+ return 0;
+ }
+
+ xcrs.nr_xcrs = 1;
+ xcrs.flags = 0;
+ xcrs.xcrs[0].xcr = 0;
+ xcrs.xcrs[0].value = env->xcr0;
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
+}
+
+static int kvm_put_sregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_sregs sregs;
+
+ memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
+ if (env->interrupt_injected >= 0) {
+ sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
+ (uint64_t)1 << (env->interrupt_injected % 64);
+ }
+
+ if ((env->eflags & VM_MASK)) {
+ set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs.es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
+ } else {
+ set_seg(&sregs.cs, &env->segs[R_CS]);
+ set_seg(&sregs.ds, &env->segs[R_DS]);
+ set_seg(&sregs.es, &env->segs[R_ES]);
+ set_seg(&sregs.fs, &env->segs[R_FS]);
+ set_seg(&sregs.gs, &env->segs[R_GS]);
+ set_seg(&sregs.ss, &env->segs[R_SS]);
+ }
+
+ set_seg(&sregs.tr, &env->tr);
+ set_seg(&sregs.ldt, &env->ldt);
+
+ sregs.idt.limit = env->idt.limit;
+ sregs.idt.base = env->idt.base;
+ memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
+ sregs.gdt.limit = env->gdt.limit;
+ sregs.gdt.base = env->gdt.base;
+ memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
+
+ sregs.cr0 = env->cr[0];
+ sregs.cr2 = env->cr[2];
+ sregs.cr3 = env->cr[3];
+ sregs.cr4 = env->cr[4];
+
+ sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
+ sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
+
+ sregs.efer = env->efer;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
+static void kvm_msr_buf_reset(X86CPU *cpu)
+{
+ memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
+}
+
+static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
+{
+ struct kvm_msrs *msrs = cpu->kvm_msr_buf;
+ void *limit = ((void *)msrs) + MSR_BUF_SIZE;
+ struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
+
+ assert((void *)(entry + 1) <= limit);
+
+ entry->index = index;
+ entry->reserved = 0;
+ entry->data = value;
+ msrs->nmsrs++;
+}
+
+static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
+{
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, index, value);
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+}
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
+{
+ int ret;
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
+ assert(ret == 1);
+}
+
+static int kvm_put_tscdeadline_msr(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int ret;
+
+ if (!has_msr_tsc_deadline) {
+ return 0;
+ }
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
+}
+
+/*
+ * Provide a separate write service for the feature control MSR in order to
+ * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
+ * before writing any other state because forcibly leaving nested mode
+ * invalidates the VCPU state.
+ */
+static int kvm_put_msr_feature_control(X86CPU *cpu)
+{
+ int ret;
+
+ if (!has_msr_feature_control) {
+ return 0;
+ }
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
+ cpu->env.msr_ia32_feature_control);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
+}
+
+static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
+{
+ uint32_t default1, can_be_one, can_be_zero;
+ uint32_t must_be_one;
+
+ switch (index) {
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ default1 = 0x00000016;
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ default1 = 0x0401e172;
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ default1 = 0x000011ff;
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ default1 = 0x00036dff;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ default1 = 0;
+ break;
+ default:
+ abort();
+ }
+
+ /* If a feature bit is set, the control can be either set or clear.
+ * Otherwise the value is limited to either 0 or 1 by default1.
+ */
+ can_be_one = features | default1;
+ can_be_zero = features | ~default1;
+ must_be_one = ~can_be_zero;
+
+ /*
+ * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
+ * Bit 32:63 -> 1 if the control bit can be one.
+ */
+ return must_be_one | (((uint64_t)can_be_one) << 32);
+}
+
+static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
+{
+ uint64_t kvm_vmx_basic =
+ kvm_arch_get_supported_msr_feature(kvm_state,
+ MSR_IA32_VMX_BASIC);
+
+ if (!kvm_vmx_basic) {
+ /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
+ * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
+ */
+ return;
+ }
+
+ uint64_t kvm_vmx_misc =
+ kvm_arch_get_supported_msr_feature(kvm_state,
+ MSR_IA32_VMX_MISC);
+ uint64_t kvm_vmx_ept_vpid =
+ kvm_arch_get_supported_msr_feature(kvm_state,
+ MSR_IA32_VMX_EPT_VPID_CAP);
+
+ /*
+ * If the guest is 64-bit, a value of 1 is allowed for the host address
+ * space size vmexit control.
+ */
+ uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
+ ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
+
+ /*
+ * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
+ * not change them for backwards compatibility.
+ */
+ uint64_t fixed_vmx_basic = kvm_vmx_basic &
+ (MSR_VMX_BASIC_VMCS_REVISION_MASK |
+ MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
+ MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
+
+ /*
+ * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
+ * change in the future but are always zero for now, clear them to be
+ * future proof. Bits 32-63 in theory could change, though KVM does
+ * not support dual-monitor treatment and probably never will; mask
+ * them out as well.
+ */
+ uint64_t fixed_vmx_misc = kvm_vmx_misc &
+ (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
+ MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
+
+ /*
+ * EPT memory types should not change either, so we do not bother
+ * adding features for them.
+ */
+ uint64_t fixed_vmx_ept_mask =
+ (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
+ MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
+ uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
+
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ f[FEAT_VMX_PROCBASED_CTLS]));
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ f[FEAT_VMX_PINBASED_CTLS]));
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
+ make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
+ f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+ make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+ f[FEAT_VMX_ENTRY_CTLS]));
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
+ make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
+ f[FEAT_VMX_SECONDARY_CTLS]));
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
+ f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
+ f[FEAT_VMX_BASIC] | fixed_vmx_basic);
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
+ f[FEAT_VMX_MISC] | fixed_vmx_misc);
+ if (has_msr_vmx_vmfunc) {
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
+ }
+
+ /*
+ * Just to be safe, write these with constant values. The CRn_FIXED1
+ * MSRs are generated by KVM based on the vCPU's CPUID.
+ */
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
+ CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
+ CR4_VMXE_MASK);
+
+ if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
+ /* TSC multiplier (0x2032). */
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
+ } else {
+ /* Preemption timer (0x482E). */
+ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
+ }
+}
+
+static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
+{
+ uint64_t kvm_perf_cap =
+ kvm_arch_get_supported_msr_feature(kvm_state,
+ MSR_IA32_PERF_CAPABILITIES);
+
+ if (kvm_perf_cap) {
+ kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
+ kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
+ }
+}
+
+static int kvm_buf_set_msrs(X86CPU *cpu)
+{
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (ret < cpu->kvm_msr_buf->nmsrs) {
+ struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
+ error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
+ (uint32_t)e->index, (uint64_t)e->data);
+ }
+
+ assert(ret == cpu->kvm_msr_buf->nmsrs);
+ return 0;
+}
+
+static void kvm_init_msrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ kvm_msr_buf_reset(cpu);
+ if (has_msr_arch_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
+ env->features[FEAT_ARCH_CAPABILITIES]);
+ }
+
+ if (has_msr_core_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
+ env->features[FEAT_CORE_CAPABILITY]);
+ }
+
+ if (has_msr_perf_capabs && cpu->enable_pmu) {
+ kvm_msr_entry_add_perf(cpu, env->features);
+ }
+
+ if (has_msr_ucode_rev) {
+ kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
+ }
+
+ /*
+ * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
+ * all kernels with MSR features should have them.
+ */
+ if (kvm_feature_msrs && cpu_has_vmx(env)) {
+ kvm_msr_entry_add_vmx(cpu, env->features);
+ }
+
+ assert(kvm_buf_set_msrs(cpu) == 0);
+}
+
+static int kvm_put_msrs(X86CPU *cpu, int level)
+{
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ kvm_msr_buf_reset(cpu);
+
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
+ if (has_msr_star) {
+ kvm_msr_entry_add(cpu, MSR_STAR, env->star);
+ }
+ if (has_msr_hsave_pa) {
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
+ }
+ if (has_msr_tsc_aux) {
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
+ }
+ if (has_msr_tsc_adjust) {
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
+ }
+ if (has_msr_misc_enable) {
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
+ env->msr_ia32_misc_enable);
+ }
+ if (has_msr_smbase) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
+ }
+ if (has_msr_smi_count) {
+ kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
+ }
+ if (has_msr_pkrs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
+ }
+ if (has_msr_bndcfgs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
+ }
+ if (has_msr_xss) {
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
+ }
+ if (has_msr_umwait) {
+ kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
+ }
+ if (has_msr_spec_ctrl) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
+ }
+ if (has_tsc_scale_msr) {
+ kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr);
+ }
+
+ if (has_msr_tsx_ctrl) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
+ }
+ if (has_msr_virt_ssbd) {
+ kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
+ }
+
+#ifdef TARGET_X86_64
+ if (lm_capable_kernel) {
+ kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
+ kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
+ }
+#endif
+
+ /*
+ * The following MSRs have side effects on the guest or are too heavy
+ * for normal writeback. Limit them to reset or full state updates.
+ */
+ if (level >= KVM_PUT_RESET_STATE) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
+ }
+
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
+ }
+
+ if (has_architectural_pmu_version > 0) {
+ if (has_architectural_pmu_version > 1) {
+ /* Stop the counter. */
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ }
+
+ /* Set the counter values. */
+ for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
+ env->msr_fixed_counters[i]);
+ }
+ for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
+ env->msr_gp_counters[i]);
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
+ env->msr_gp_evtsel[i]);
+ }
+ if (has_architectural_pmu_version > 1) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
+ env->msr_global_status);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ env->msr_global_ovf_ctrl);
+
+ /* Now start the PMU. */
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
+ env->msr_fixed_ctr_ctrl);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ env->msr_global_ctrl);
+ }
+ }
+ /*
+ * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
+ * only sync them to KVM on the first cpu
+ */
+ if (current_cpu == first_cpu) {
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
+ env->msr_hv_guest_os_id);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
+ env->msr_hv_hypercall);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
+ env->msr_hv_tsc);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
+ env->msr_hv_reenlightenment_control);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
+ env->msr_hv_tsc_emulation_control);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
+ env->msr_hv_tsc_emulation_status);
+ }
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
+ env->msr_hv_vapic);
+ }
+ if (has_msr_hv_crash) {
+ int j;
+
+ for (j = 0; j < HV_CRASH_PARAMS; j++)
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
+ env->msr_hv_crash_params[j]);
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
+ }
+ if (has_msr_hv_runtime) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
+ && hv_vpindex_settable) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
+ hyperv_vp_index(CPU(cpu)));
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
+ int j;
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
+ env->msr_hv_synic_control);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
+ env->msr_hv_synic_evt_page);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
+ env->msr_hv_synic_msg_page);
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
+ env->msr_hv_synic_sint[j]);
+ }
+ }
+ if (has_msr_hv_stimer) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
+ env->msr_hv_stimer_config[j]);
+ }
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
+ env->msr_hv_stimer_count[j]);
+ }
+ }
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
+
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ /* The CPU GPs if we write to a bit above the physical limit of
+ * the host CPU (and KVM emulates that)
+ */
+ uint64_t mask = env->mtrr_var[i].mask;
+ mask &= phys_mask;
+
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
+ env->mtrr_var[i].base);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
+ }
+ }
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
+ int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
+ 0x14, 1, R_EAX) & 0x7;
+
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
+ env->msr_rtit_ctrl);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
+ env->msr_rtit_status);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
+ env->msr_rtit_output_base);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
+ env->msr_rtit_output_mask);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
+ env->msr_rtit_cr3_match);
+ for (i = 0; i < addr_num; i++) {
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
+ env->msr_rtit_addrs[i]);
+ }
+ }
+
+ if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0,
+ env->msr_ia32_sgxlepubkeyhash[0]);
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1,
+ env->msr_ia32_sgxlepubkeyhash[1]);
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2,
+ env->msr_ia32_sgxlepubkeyhash[2]);
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3,
+ env->msr_ia32_sgxlepubkeyhash[3]);
+ }
+
+ /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
+ * kvm_put_msr_feature_control. */
+ }
+
+ if (env->mcg_cap) {
+ int i;
+
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
+ if (has_msr_mcg_ext_ctl) {
+ kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
+ }
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
+ }
+ }
+
+ return kvm_buf_set_msrs(cpu);
+}
+
+
+static int kvm_get_fpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_fpu fpu;
+ int i, ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->fpstt = (fpu.fsw >> 11) & 7;
+ env->fpus = fpu.fsw;
+ env->fpuc = fpu.fcw;
+ env->fpop = fpu.last_opcode;
+ env->fpip = fpu.last_ip;
+ env->fpdp = fpu.last_dp;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((fpu.ftwx >> i) & 1);
+ }
+ memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
+ }
+ env->mxcsr = fpu.mxcsr;
+
+ return 0;
+}
+
+static int kvm_get_xsave(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ void *xsave = env->xsave_buf;
+ int ret;
+
+ if (!has_xsave) {
+ return kvm_get_fpu(cpu);
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
+ if (ret < 0) {
+ return ret;
+ }
+ x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
+
+ return 0;
+}
+
+static int kvm_get_xcrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int i, ret;
+ struct kvm_xcrs xcrs;
+
+ if (!has_xcrs) {
+ return 0;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < xcrs.nr_xcrs; i++) {
+ /* Only support xcr0 now */
+ if (xcrs.xcrs[i].xcr == 0) {
+ env->xcr0 = xcrs.xcrs[i].value;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int kvm_get_sregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int bit, i, ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* There can only be one pending IRQ set in the bitmap at a time, so try
+ to find it and save its number instead (-1 for none). */
+ env->interrupt_injected = -1;
+ for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
+ if (sregs.interrupt_bitmap[i]) {
+ bit = ctz64(sregs.interrupt_bitmap[i]);
+ env->interrupt_injected = i * 64 + bit;
+ break;
+ }
+ }
+
+ get_seg(&env->segs[R_CS], &sregs.cs);
+ get_seg(&env->segs[R_DS], &sregs.ds);
+ get_seg(&env->segs[R_ES], &sregs.es);
+ get_seg(&env->segs[R_FS], &sregs.fs);
+ get_seg(&env->segs[R_GS], &sregs.gs);
+ get_seg(&env->segs[R_SS], &sregs.ss);
+
+ get_seg(&env->tr, &sregs.tr);
+ get_seg(&env->ldt, &sregs.ldt);
+
+ env->idt.limit = sregs.idt.limit;
+ env->idt.base = sregs.idt.base;
+ env->gdt.limit = sregs.gdt.limit;
+ env->gdt.base = sregs.gdt.base;
+
+ env->cr[0] = sregs.cr0;
+ env->cr[2] = sregs.cr2;
+ env->cr[3] = sregs.cr3;
+ env->cr[4] = sregs.cr4;
+
+ env->efer = sregs.efer;
+
+ /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
+ x86_update_hflags(env);
+
+ return 0;
+}
+
+static int kvm_get_msrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
+ int ret, i;
+ uint64_t mtrr_top_bits;
+
+ kvm_msr_buf_reset(cpu);
+
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
+ kvm_msr_entry_add(cpu, MSR_PAT, 0);
+ if (has_msr_star) {
+ kvm_msr_entry_add(cpu, MSR_STAR, 0);
+ }
+ if (has_msr_hsave_pa) {
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
+ }
+ if (has_msr_tsc_aux) {
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
+ }
+ if (has_msr_tsc_adjust) {
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
+ }
+ if (has_msr_tsc_deadline) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
+ }
+ if (has_msr_misc_enable) {
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
+ }
+ if (has_msr_smbase) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
+ }
+ if (has_msr_smi_count) {
+ kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
+ }
+ if (has_msr_feature_control) {
+ kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
+ }
+ if (has_msr_pkrs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
+ }
+ if (has_msr_bndcfgs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
+ }
+ if (has_msr_xss) {
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
+ }
+ if (has_msr_umwait) {
+ kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
+ }
+ if (has_msr_spec_ctrl) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
+ }
+ if (has_tsc_scale_msr) {
+ kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0);
+ }
+
+ if (has_msr_tsx_ctrl) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
+ }
+ if (has_msr_virt_ssbd) {
+ kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
+ }
+ if (!env->tsc_valid) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
+ env->tsc_valid = !runstate_is_running();
+ }
+
+#ifdef TARGET_X86_64
+ if (lm_capable_kernel) {
+ kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
+ kvm_msr_entry_add(cpu, MSR_FMASK, 0);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
+ }
+#endif
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
+ }
+ if (has_architectural_pmu_version > 0) {
+ if (has_architectural_pmu_version > 1) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
+ }
+ for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
+ }
+ for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
+ }
+ }
+
+ if (env->mcg_cap) {
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
+ if (has_msr_mcg_ext_ctl) {
+ kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
+ }
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
+ }
+ }
+
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
+ }
+ if (has_msr_hv_crash) {
+ int j;
+
+ for (j = 0; j < HV_CRASH_PARAMS; j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
+ }
+ }
+ if (has_msr_hv_runtime) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
+ }
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
+ uint32_t msr;
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
+ for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+ kvm_msr_entry_add(cpu, msr, 0);
+ }
+ }
+ if (has_msr_hv_stimer) {
+ uint32_t msr;
+
+ for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
+ msr++) {
+ kvm_msr_entry_add(cpu, msr, 0);
+ }
+ }
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
+ }
+ }
+
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
+ int addr_num =
+ kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
+
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
+ for (i = 0; i < addr_num; i++) {
+ kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
+ }
+ }
+
+ if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (ret < cpu->kvm_msr_buf->nmsrs) {
+ struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
+ error_report("error: failed to get MSR 0x%" PRIx32,
+ (uint32_t)e->index);
+ }
+
+ assert(ret == cpu->kvm_msr_buf->nmsrs);
+ /*
+ * MTRR masks: Each mask consists of 5 parts
+ * a 10..0: must be zero
+ * b 11 : valid bit
+ * c n-1.12: actual mask bits
+ * d 51..n: reserved must be zero
+ * e 63.52: reserved must be zero
+ *
+ * 'n' is the number of physical bits supported by the CPU and is
+ * apparently always <= 52. We know our 'n' but don't know what
+ * the destinations 'n' is; it might be smaller, in which case
+ * it masks (c) on loading. It might be larger, in which case
+ * we fill 'd' so that d..c is consistent irrespetive of the 'n'
+ * we're migrating to.
+ */
+
+ if (cpu->fill_mtrr_mask) {
+ QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
+ assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
+ mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
+ } else {
+ mtrr_top_bits = 0;
+ }
+
+ for (i = 0; i < ret; i++) {
+ uint32_t index = msrs[i].index;
+ switch (index) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = msrs[i].data;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = msrs[i].data;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = msrs[i].data;
+ break;
+ case MSR_PAT:
+ env->pat = msrs[i].data;
+ break;
+ case MSR_STAR:
+ env->star = msrs[i].data;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_CSTAR:
+ env->cstar = msrs[i].data;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = msrs[i].data;
+ break;
+ case MSR_FMASK:
+ env->fmask = msrs[i].data;
+ break;
+ case MSR_LSTAR:
+ env->lstar = msrs[i].data;
+ break;
+#endif
+ case MSR_IA32_TSC:
+ env->tsc = msrs[i].data;
+ break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = msrs[i].data;
+ break;
+ case MSR_TSC_ADJUST:
+ env->tsc_adjust = msrs[i].data;
+ break;
+ case MSR_IA32_TSCDEADLINE:
+ env->tsc_deadline = msrs[i].data;
+ break;
+ case MSR_VM_HSAVE_PA:
+ env->vm_hsave = msrs[i].data;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ env->system_time_msr = msrs[i].data;
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ env->wall_clock_msr = msrs[i].data;
+ break;
+ case MSR_MCG_STATUS:
+ env->mcg_status = msrs[i].data;
+ break;
+ case MSR_MCG_CTL:
+ env->mcg_ctl = msrs[i].data;
+ break;
+ case MSR_MCG_EXT_CTL:
+ env->mcg_ext_ctl = msrs[i].data;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = msrs[i].data;
+ break;
+ case MSR_IA32_SMBASE:
+ env->smbase = msrs[i].data;
+ break;
+ case MSR_SMI_COUNT:
+ env->msr_smi_count = msrs[i].data;
+ break;
+ case MSR_IA32_FEATURE_CONTROL:
+ env->msr_ia32_feature_control = msrs[i].data;
+ break;
+ case MSR_IA32_BNDCFGS:
+ env->msr_bndcfgs = msrs[i].data;
+ break;
+ case MSR_IA32_XSS:
+ env->xss = msrs[i].data;
+ break;
+ case MSR_IA32_UMWAIT_CONTROL:
+ env->umwait = msrs[i].data;
+ break;
+ case MSR_IA32_PKRS:
+ env->pkrs = msrs[i].data;
+ break;
+ default:
+ if (msrs[i].index >= MSR_MC0_CTL &&
+ msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
+ env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
+ }
+ break;
+ case MSR_KVM_ASYNC_PF_EN:
+ env->async_pf_en_msr = msrs[i].data;
+ break;
+ case MSR_KVM_ASYNC_PF_INT:
+ env->async_pf_int_msr = msrs[i].data;
+ break;
+ case MSR_KVM_PV_EOI_EN:
+ env->pv_eoi_en_msr = msrs[i].data;
+ break;
+ case MSR_KVM_STEAL_TIME:
+ env->steal_time_msr = msrs[i].data;
+ break;
+ case MSR_KVM_POLL_CONTROL: {
+ env->poll_control_msr = msrs[i].data;
+ break;
+ }
+ case MSR_CORE_PERF_FIXED_CTR_CTRL:
+ env->msr_fixed_ctr_ctrl = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ env->msr_global_ctrl = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ env->msr_global_status = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ env->msr_global_ovf_ctrl = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
+ env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
+ break;
+ case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
+ env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
+ break;
+ case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
+ env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_HYPERCALL:
+ env->msr_hv_hypercall = msrs[i].data;
+ break;
+ case HV_X64_MSR_GUEST_OS_ID:
+ env->msr_hv_guest_os_id = msrs[i].data;
+ break;
+ case HV_X64_MSR_APIC_ASSIST_PAGE:
+ env->msr_hv_vapic = msrs[i].data;
+ break;
+ case HV_X64_MSR_REFERENCE_TSC:
+ env->msr_hv_tsc = msrs[i].data;
+ break;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_VP_RUNTIME:
+ env->msr_hv_runtime = msrs[i].data;
+ break;
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
+ msrs[i].data;
+ break;
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
+ msrs[i].data;
+ break;
+ case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+ env->msr_hv_reenlightenment_control = msrs[i].data;
+ break;
+ case HV_X64_MSR_TSC_EMULATION_CONTROL:
+ env->msr_hv_tsc_emulation_control = msrs[i].data;
+ break;
+ case HV_X64_MSR_TSC_EMULATION_STATUS:
+ env->msr_hv_tsc_emulation_status = msrs[i].data;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = msrs[i].data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[0] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ env->mtrr_fixed[1] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[2] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ env->mtrr_fixed[3] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C8000:
+ env->mtrr_fixed[4] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D0000:
+ env->mtrr_fixed[5] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D8000:
+ env->mtrr_fixed[6] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E0000:
+ env->mtrr_fixed[7] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E8000:
+ env->mtrr_fixed[8] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F0000:
+ env->mtrr_fixed[9] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[10] = msrs[i].data;
+ break;
+ case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
+ if (index & 1) {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
+ mtrr_top_bits;
+ } else {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
+ }
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ env->spec_ctrl = msrs[i].data;
+ break;
+ case MSR_AMD64_TSC_RATIO:
+ env->amd_tsc_scale_msr = msrs[i].data;
+ break;
+ case MSR_IA32_TSX_CTRL:
+ env->tsx_ctrl = msrs[i].data;
+ break;
+ case MSR_VIRT_SSBD:
+ env->virt_ssbd = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_CTL:
+ env->msr_rtit_ctrl = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_STATUS:
+ env->msr_rtit_status = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_BASE:
+ env->msr_rtit_output_base = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_MASK:
+ env->msr_rtit_output_mask = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_CR3_MATCH:
+ env->msr_rtit_cr3_match = msrs[i].data;
+ break;
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
+ env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
+ break;
+ case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
+ env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
+ msrs[i].data;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_put_mp_state(X86CPU *cpu)
+{
+ struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+}
+
+static int kvm_get_mp_state(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ struct kvm_mp_state mp_state;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
+ if (ret < 0) {
+ return ret;
+ }
+ env->mp_state = mp_state.mp_state;
+ if (kvm_irqchip_in_kernel()) {
+ cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+ }
+ return 0;
+}
+
+static int kvm_get_apic(X86CPU *cpu)
+{
+ DeviceState *apic = cpu->apic_state;
+ struct kvm_lapic_state kapic;
+ int ret;
+
+ if (apic && kvm_irqchip_in_kernel()) {
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
+ if (ret < 0) {
+ return ret;
+ }
+
+ kvm_get_apic_state(apic, &kapic);
+ }
+ return 0;
+}
+
+static int kvm_put_vcpu_events(X86CPU *cpu, int level)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ struct kvm_vcpu_events events = {};
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ events.flags = 0;
+
+ if (has_exception_payload) {
+ events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
+ events.exception.pending = env->exception_pending;
+ events.exception_has_payload = env->exception_has_payload;
+ events.exception_payload = env->exception_payload;
+ }
+ events.exception.nr = env->exception_nr;
+ events.exception.injected = env->exception_injected;
+ events.exception.has_error_code = env->has_error_code;
+ events.exception.error_code = env->error_code;
+
+ events.interrupt.injected = (env->interrupt_injected >= 0);
+ events.interrupt.nr = env->interrupt_injected;
+ events.interrupt.soft = env->soft_interrupt;
+
+ events.nmi.injected = env->nmi_injected;
+ events.nmi.pending = env->nmi_pending;
+ events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
+
+ events.sipi_vector = env->sipi_vector;
+
+ if (has_msr_smbase) {
+ events.smi.smm = !!(env->hflags & HF_SMM_MASK);
+ events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
+ if (kvm_irqchip_in_kernel()) {
+ /* As soon as these are moved to the kernel, remove them
+ * from cs->interrupt_request.
+ */
+ events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
+ events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
+ cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
+ } else {
+ /* Keep these in cs->interrupt_request. */
+ events.smi.pending = 0;
+ events.smi.latched_init = 0;
+ }
+ /* Stop SMI delivery on old machine types to avoid a reboot
+ * on an inward migration of an old VM.
+ */
+ if (!cpu->kvm_no_smi_migration) {
+ events.flags |= KVM_VCPUEVENT_VALID_SMM;
+ }
+ }
+
+ if (level >= KVM_PUT_RESET_STATE) {
+ events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
+ if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
+ events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+ }
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+}
+
+static int kvm_get_vcpu_events(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
+ env->exception_pending = events.exception.pending;
+ env->exception_has_payload = events.exception_has_payload;
+ env->exception_payload = events.exception_payload;
+ } else {
+ env->exception_pending = 0;
+ env->exception_has_payload = false;
+ }
+ env->exception_injected = events.exception.injected;
+ env->exception_nr =
+ (env->exception_pending || env->exception_injected) ?
+ events.exception.nr : -1;
+ env->has_error_code = events.exception.has_error_code;
+ env->error_code = events.exception.error_code;
+
+ env->interrupt_injected =
+ events.interrupt.injected ? events.interrupt.nr : -1;
+ env->soft_interrupt = events.interrupt.soft;
+
+ env->nmi_injected = events.nmi.injected;
+ env->nmi_pending = events.nmi.pending;
+ if (events.nmi.masked) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+
+ if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
+ if (events.smi.smm) {
+ env->hflags |= HF_SMM_MASK;
+ } else {
+ env->hflags &= ~HF_SMM_MASK;
+ }
+ if (events.smi.pending) {
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
+ } else {
+ cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
+ }
+ if (events.smi.smm_inside_nmi) {
+ env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+ }
+ if (events.smi.latched_init) {
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
+ } else {
+ cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
+ }
+ }
+
+ env->sipi_vector = events.sipi_vector;
+
+ return 0;
+}
+
+static int kvm_guest_debug_workarounds(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ int ret = 0;
+ unsigned long reinject_trap = 0;
+
+ if (!kvm_has_vcpu_events()) {
+ if (env->exception_nr == EXCP01_DB) {
+ reinject_trap = KVM_GUESTDBG_INJECT_DB;
+ } else if (env->exception_injected == EXCP03_INT3) {
+ reinject_trap = KVM_GUESTDBG_INJECT_BP;
+ }
+ kvm_reset_exception(env);
+ }
+
+ /*
+ * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
+ * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
+ * by updating the debug state once again if single-stepping is on.
+ * Another reason to call kvm_update_guest_debug here is a pending debug
+ * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
+ * reinject them via SET_GUEST_DEBUG.
+ */
+ if (reinject_trap ||
+ (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
+ ret = kvm_update_guest_debug(cs, reinject_trap);
+ }
+ return ret;
+}
+
+static int kvm_put_debugregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_debugregs dbgregs;
+ int i;
+
+ if (!kvm_has_debugregs()) {
+ return 0;
+ }
+
+ memset(&dbgregs, 0, sizeof(dbgregs));
+ for (i = 0; i < 4; i++) {
+ dbgregs.db[i] = env->dr[i];
+ }
+ dbgregs.dr6 = env->dr[6];
+ dbgregs.dr7 = env->dr[7];
+ dbgregs.flags = 0;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
+}
+
+static int kvm_get_debugregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_debugregs dbgregs;
+ int i, ret;
+
+ if (!kvm_has_debugregs()) {
+ return 0;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
+ if (ret < 0) {
+ return ret;
+ }
+ for (i = 0; i < 4; i++) {
+ env->dr[i] = dbgregs.db[i];
+ }
+ env->dr[4] = env->dr[6] = dbgregs.dr6;
+ env->dr[5] = env->dr[7] = dbgregs.dr7;
+
+ return 0;
+}
+
+static int kvm_put_nested_state(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int max_nested_state_len = kvm_max_nested_state_length();
+
+ if (!env->nested_state) {
+ return 0;
+ }
+
+ /*
+ * Copy flags that are affected by reset from env->hflags and env->hflags2.
+ */
+ if (env->hflags & HF_GUEST_MASK) {
+ env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
+ } else {
+ env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
+ }
+
+ /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
+ if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
+ env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
+ } else {
+ env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
+ }
+
+ assert(env->nested_state->size <= max_nested_state_len);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
+}
+
+static int kvm_get_nested_state(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int max_nested_state_len = kvm_max_nested_state_length();
+ int ret;
+
+ if (!env->nested_state) {
+ return 0;
+ }
+
+ /*
+ * It is possible that migration restored a smaller size into
+ * nested_state->hdr.size than what our kernel support.
+ * We preserve migration origin nested_state->hdr.size for
+ * call to KVM_SET_NESTED_STATE but wish that our next call
+ * to KVM_GET_NESTED_STATE will use max size our kernel support.
+ */
+ env->nested_state->size = max_nested_state_len;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /*
+ * Copy flags that are affected by reset to env->hflags and env->hflags2.
+ */
+ if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
+ env->hflags |= HF_GUEST_MASK;
+ } else {
+ env->hflags &= ~HF_GUEST_MASK;
+ }
+
+ /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
+ if (cpu_has_svm(env)) {
+ if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
+ env->hflags2 |= HF2_GIF_MASK;
+ } else {
+ env->hflags2 &= ~HF2_GIF_MASK;
+ }
+ }
+
+ return ret;
+}
+
+int kvm_arch_put_registers(CPUState *cpu, int level)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int ret;
+
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+ /* must be before kvm_put_nested_state so that EFER.SVME is set */
+ ret = kvm_put_sregs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (level >= KVM_PUT_RESET_STATE) {
+ ret = kvm_put_nested_state(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_put_msr_feature_control(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (level == KVM_PUT_FULL_STATE) {
+ /* We don't check for kvm_arch_set_tsc_khz() errors here,
+ * because TSC frequency mismatch shouldn't abort migration,
+ * unless the user explicitly asked for a more strict TSC
+ * setting (e.g. using an explicit "tsc-freq" option).
+ */
+ kvm_arch_set_tsc_khz(cpu);
+ }
+
+ ret = kvm_getput_regs(x86_cpu, 1);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_xsave(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_xcrs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ /* must be before kvm_put_msrs */
+ ret = kvm_inject_mce_oldstyle(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_msrs(x86_cpu, level);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_vcpu_events(x86_cpu, level);
+ if (ret < 0) {
+ return ret;
+ }
+ if (level >= KVM_PUT_RESET_STATE) {
+ ret = kvm_put_mp_state(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = kvm_put_tscdeadline_msr(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_debugregs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ /* must be last */
+ ret = kvm_guest_debug_workarounds(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ int ret;
+
+ assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
+
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ /*
+ * KVM_GET_MPSTATE can modify CS and RIP, call it before
+ * KVM_GET_REGS and KVM_GET_SREGS.
+ */
+ ret = kvm_get_mp_state(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_getput_regs(cpu, 0);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_xsave(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_xcrs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_sregs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_msrs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_apic(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_debugregs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_nested_state(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = 0;
+ out:
+ cpu_sync_bndcs_hflags(&cpu->env);
+ return ret;
+}
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ int ret;
+
+ /* Inject NMI */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ qemu_mutex_lock_iothread();
+ cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ qemu_mutex_unlock_iothread();
+ DPRINTF("injected NMI\n");
+ ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
+ if (ret < 0) {
+ fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
+ strerror(-ret));
+ }
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ qemu_mutex_lock_iothread();
+ cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ qemu_mutex_unlock_iothread();
+ DPRINTF("injected SMI\n");
+ ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
+ if (ret < 0) {
+ fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
+ strerror(-ret));
+ }
+ }
+ }
+
+ if (!kvm_pic_in_kernel()) {
+ qemu_mutex_lock_iothread();
+ }
+
+ /* Force the VCPU out of its inner loop to process any INIT requests
+ * or (for userspace APIC, but it is cheap to combine the checks here)
+ * pending TPR access reports.
+ */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ cpu->exit_request = 1;
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu->exit_request = 1;
+ }
+ }
+
+ if (!kvm_pic_in_kernel()) {
+ /* Try to inject an interrupt if the guest can accept it */
+ if (run->ready_for_interrupt_injection &&
+ (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) {
+ int irq;
+
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ struct kvm_interrupt intr;
+
+ intr.irq = irq;
+ DPRINTF("injected interrupt %d\n", irq);
+ ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
+ if (ret < 0) {
+ fprintf(stderr,
+ "KVM: injection failed, interrupt lost (%s)\n",
+ strerror(-ret));
+ }
+ }
+ }
+
+ /* If we have an interrupt but the guest is not ready to receive an
+ * interrupt, request an interrupt window exit. This will
+ * cause a return to userspace as soon as the guest is ready to
+ * receive interrupts. */
+ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ run->request_interrupt_window = 1;
+ } else {
+ run->request_interrupt_window = 0;
+ }
+
+ DPRINTF("setting tpr\n");
+ run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
+
+ qemu_mutex_unlock_iothread();
+ }
+}
+
+static void kvm_rate_limit_on_bus_lock(void)
+{
+ uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
+
+ if (delay_ns) {
+ g_usleep(delay_ns / SCALE_US);
+ }
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ if (run->flags & KVM_RUN_X86_SMM) {
+ env->hflags |= HF_SMM_MASK;
+ } else {
+ env->hflags &= ~HF_SMM_MASK;
+ }
+ if (run->if_flag) {
+ env->eflags |= IF_MASK;
+ } else {
+ env->eflags &= ~IF_MASK;
+ }
+ if (run->flags & KVM_RUN_X86_BUS_LOCK) {
+ kvm_rate_limit_on_bus_lock();
+ }
+
+ /* We need to protect the apic state against concurrent accesses from
+ * different threads in case the userspace irqchip is used. */
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_lock_iothread();
+ }
+ cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
+ cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_unlock_iothread();
+ }
+ return cpu_get_mem_attrs(env);
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+ /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
+ assert(env->mcg_cap);
+
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+
+ kvm_cpu_synchronize_state(cs);
+
+ if (env->exception_nr == EXCP08_DBLE) {
+ /* this means triple fault */
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ cs->exit_request = 1;
+ return 0;
+ }
+ kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
+ env->has_error_code = 0;
+
+ cs->halted = 0;
+ if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
+ env->mp_state = KVM_MP_STATE_RUNNABLE;
+ }
+ }
+
+ if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ kvm_cpu_synchronize_state(cs);
+ do_cpu_init(cpu);
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ return 0;
+ }
+
+ if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ }
+ if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cs->halted = 0;
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+ kvm_cpu_synchronize_state(cs);
+ do_cpu_sipi(cpu);
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ kvm_cpu_synchronize_state(cs);
+ apic_handle_tpr_access_report(cpu->apic_state, env->eip,
+ env->tpr_access_type);
+ }
+
+ return cs->halted;
+}
+
+static int kvm_handle_halt(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+
+ if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) &&
+ !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cs->halted = 1;
+ return EXCP_HLT;
+ }
+
+ return 0;
+}
+
+static int kvm_handle_tpr_access(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+
+ apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
+ run->tpr_access.is_write ? TPR_ACCESS_WRITE
+ : TPR_ACCESS_READ);
+ return 1;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static const uint8_t int3 = 0xcc;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint8_t int3;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
+ return -EINVAL;
+ }
+ if (int3 != 0xcc) {
+ return 0;
+ }
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct {
+ target_ulong addr;
+ int len;
+ int type;
+} hw_breakpoint[4];
+
+static int nb_hw_breakpoint;
+
+static int find_hw_breakpoint(target_ulong addr, int len, int type)
+{
+ int n;
+
+ for (n = 0; n < nb_hw_breakpoint; n++) {
+ if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
+ (hw_breakpoint[n].len == len || len == -1)) {
+ return n;
+ }
+ }
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ len = 1;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ case 4:
+ case 8:
+ if (addr & (len - 1)) {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ if (nb_hw_breakpoint == 4) {
+ return -ENOBUFS;
+ }
+ if (find_hw_breakpoint(addr, len, type) >= 0) {
+ return -EEXIST;
+ }
+ hw_breakpoint[nb_hw_breakpoint].addr = addr;
+ hw_breakpoint[nb_hw_breakpoint].len = len;
+ hw_breakpoint[nb_hw_breakpoint].type = type;
+ nb_hw_breakpoint++;
+
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
+ if (n < 0) {
+ return -ENOENT;
+ }
+ nb_hw_breakpoint--;
+ hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoint = 0;
+}
+
+static CPUWatchpoint hw_watchpoint;
+
+static int kvm_handle_debug(X86CPU *cpu,
+ struct kvm_debug_exit_arch *arch_info)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ int ret = 0;
+ int n;
+
+ if (arch_info->exception == EXCP01_DB) {
+ if (arch_info->dr6 & DR6_BS) {
+ if (cs->singlestep_enabled) {
+ ret = EXCP_DEBUG;
+ }
+ } else {
+ for (n = 0; n < 4; n++) {
+ if (arch_info->dr6 & (1 << n)) {
+ switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
+ case 0x0:
+ ret = EXCP_DEBUG;
+ break;
+ case 0x1:
+ ret = EXCP_DEBUG;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+ hw_watchpoint.flags = BP_MEM_WRITE;
+ break;
+ case 0x3:
+ ret = EXCP_DEBUG;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+ hw_watchpoint.flags = BP_MEM_ACCESS;
+ break;
+ }
+ }
+ }
+ }
+ } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
+ ret = EXCP_DEBUG;
+ }
+ if (ret == 0) {
+ cpu_synchronize_state(cs);
+ assert(env->exception_nr == -1);
+
+ /* pass to guest */
+ kvm_queue_exception(env, arch_info->exception,
+ arch_info->exception == EXCP01_DB,
+ arch_info->dr6);
+ env->has_error_code = 0;
+ }
+
+ return ret;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+ const uint8_t type_code[] = {
+ [GDB_BREAKPOINT_HW] = 0x0,
+ [GDB_WATCHPOINT_WRITE] = 0x1,
+ [GDB_WATCHPOINT_ACCESS] = 0x3
+ };
+ const uint8_t len_code[] = {
+ [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
+ };
+ int n;
+
+ if (kvm_sw_breakpoints_active(cpu)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+ if (nb_hw_breakpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ dbg->arch.debugreg[7] = 0x0600;
+ for (n = 0; n < nb_hw_breakpoint; n++) {
+ dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
+ dbg->arch.debugreg[7] |= (2 << (n * 2)) |
+ (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
+ ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
+ }
+ }
+}
+
+static bool has_sgx_provisioning;
+
+static bool __kvm_enable_sgx_provisioning(KVMState *s)
+{
+ int fd, ret;
+
+ if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) {
+ return false;
+ }
+
+ fd = qemu_open_old("/dev/sgx_provision", O_RDONLY);
+ if (fd < 0) {
+ return false;
+ }
+
+ ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd);
+ if (ret) {
+ error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret));
+ exit(1);
+ }
+ close(fd);
+ return true;
+}
+
+bool kvm_enable_sgx_provisioning(KVMState *s)
+{
+ return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning);
+}
+
+static bool host_supports_vmx(void)
+{
+ uint32_t ecx, unused;
+
+ host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
+ return ecx & CPUID_EXT_VMX;
+}
+
+#define VMX_INVALID_GUEST_STATE 0x80000021
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ uint64_t code;
+ int ret;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_HLT:
+ DPRINTF("handle_hlt\n");
+ qemu_mutex_lock_iothread();
+ ret = kvm_handle_halt(cpu);
+ qemu_mutex_unlock_iothread();
+ break;
+ case KVM_EXIT_SET_TPR:
+ ret = 0;
+ break;
+ case KVM_EXIT_TPR_ACCESS:
+ qemu_mutex_lock_iothread();
+ ret = kvm_handle_tpr_access(cpu);
+ qemu_mutex_unlock_iothread();
+ break;
+ case KVM_EXIT_FAIL_ENTRY:
+ code = run->fail_entry.hardware_entry_failure_reason;
+ fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
+ code);
+ if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
+ fprintf(stderr,
+ "\nIf you're running a guest on an Intel machine without "
+ "unrestricted mode\n"
+ "support, the failure can be most likely due to the guest "
+ "entering an invalid\n"
+ "state for Intel VT. For example, the guest maybe running "
+ "in big real mode\n"
+ "which is not supported on less recent Intel processors."
+ "\n\n");
+ }
+ ret = -1;
+ break;
+ case KVM_EXIT_EXCEPTION:
+ fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
+ run->ex.exception, run->ex.error_code);
+ ret = -1;
+ break;
+ case KVM_EXIT_DEBUG:
+ DPRINTF("kvm_exit_debug\n");
+ qemu_mutex_lock_iothread();
+ ret = kvm_handle_debug(cpu, &run->debug.arch);
+ qemu_mutex_unlock_iothread();
+ break;
+ case KVM_EXIT_HYPERV:
+ ret = kvm_hv_handle_exit(cpu, &run->hyperv);
+ break;
+ case KVM_EXIT_IOAPIC_EOI:
+ ioapic_eoi_broadcast(run->eoi.vector);
+ ret = 0;
+ break;
+ case KVM_EXIT_X86_BUS_LOCK:
+ /* already handled in kvm_arch_post_run */
+ ret = 0;
+ break;
+ default:
+ fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ kvm_cpu_synchronize_state(cs);
+ return !(env->cr[0] & CR0_PE_MASK) ||
+ ((env->segs[R_CS].selector & 3) != 3);
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+ /* We know at this point that we're using the in-kernel
+ * irqchip, so we can use irqfds, and on x86 we know
+ * we can use msi via irqfd and GSI routing.
+ */
+ kvm_msi_via_irqfd_allowed = true;
+ kvm_gsi_routing_allowed = true;
+
+ if (kvm_irqchip_is_split()) {
+ int i;
+
+ /* If the ioapic is in QEMU and the lapics are in KVM, reserve
+ MSI routes for signaling interrupts to the local apics. */
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
+ error_report("Could not enable split IRQ mode.");
+ exit(1);
+ }
+ }
+ }
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ int ret;
+ if (kvm_kernel_irqchip_split()) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
+ if (ret) {
+ error_report("Could not enable split irqchip mode: %s",
+ strerror(-ret));
+ exit(1);
+ } else {
+ DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
+ kvm_split_irqchip = true;
+ return 1;
+ }
+ } else {
+ return 0;
+ }
+}
+
+uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
+{
+ CPUX86State *env;
+ uint64_t ext_id;
+
+ if (!first_cpu) {
+ return address;
+ }
+ env = &X86_CPU(first_cpu)->env;
+ if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
+ return address;
+ }
+
+ /*
+ * If the remappable format bit is set, or the upper bits are
+ * already set in address_hi, or the low extended bits aren't
+ * there anyway, do nothing.
+ */
+ ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
+ if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
+ return address;
+ }
+
+ address &= ~ext_id;
+ address |= ext_id << 35;
+ return address;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ X86IOMMUState *iommu = x86_iommu_get_default();
+
+ if (iommu) {
+ X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
+
+ if (class->int_remap) {
+ int ret;
+ MSIMessage src, dst;
+
+ src.address = route->u.msi.address_hi;
+ src.address <<= VTD_MSI_ADDR_HI_SHIFT;
+ src.address |= route->u.msi.address_lo;
+ src.data = route->u.msi.data;
+
+ ret = class->int_remap(iommu, &src, &dst, dev ? \
+ pci_requester_id(dev) : \
+ X86_IOMMU_SID_INVALID);
+ if (ret) {
+ trace_kvm_x86_fixup_msi_error(route->gsi);
+ return 1;
+ }
+
+ /*
+ * Handled untranslated compatibilty format interrupt with
+ * extended destination ID in the low bits 11-5. */
+ dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
+
+ route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
+ route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
+ route->u.msi.data = dst.data;
+ return 0;
+ }
+ }
+
+ address = kvm_swizzle_msi_ext_dest_id(address);
+ route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
+ route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
+ return 0;
+}
+
+typedef struct MSIRouteEntry MSIRouteEntry;
+
+struct MSIRouteEntry {
+ PCIDevice *dev; /* Device pointer */
+ int vector; /* MSI/MSIX vector index */
+ int virq; /* Virtual IRQ index */
+ QLIST_ENTRY(MSIRouteEntry) list;
+};
+
+/* List of used GSI routes */
+static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
+ QLIST_HEAD_INITIALIZER(msi_route_list);
+
+static void kvm_update_msi_routes_all(void *private, bool global,
+ uint32_t index, uint32_t mask)
+{
+ int cnt = 0, vector;
+ MSIRouteEntry *entry;
+ MSIMessage msg;
+ PCIDevice *dev;
+
+ /* TODO: explicit route update */
+ QLIST_FOREACH(entry, &msi_route_list, list) {
+ cnt++;
+ vector = entry->vector;
+ dev = entry->dev;
+ if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
+ msg = msix_get_message(dev, vector);
+ } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
+ msg = msi_get_message(dev, vector);
+ } else {
+ /*
+ * Either MSI/MSIX is disabled for the device, or the
+ * specific message was masked out. Skip this one.
+ */
+ continue;
+ }
+ kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
+ }
+ kvm_irqchip_commit_routes(kvm_state);
+ trace_kvm_x86_update_msi_routes(cnt);
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ static bool notify_list_inited = false;
+ MSIRouteEntry *entry;
+
+ if (!dev) {
+ /* These are (possibly) IOAPIC routes only used for split
+ * kernel irqchip mode, while what we are housekeeping are
+ * PCI devices only. */
+ return 0;
+ }
+
+ entry = g_new0(MSIRouteEntry, 1);
+ entry->dev = dev;
+ entry->vector = vector;
+ entry->virq = route->gsi;
+ QLIST_INSERT_HEAD(&msi_route_list, entry, list);
+
+ trace_kvm_x86_add_msi_route(route->gsi);
+
+ if (!notify_list_inited) {
+ /* For the first time we do add route, add ourselves into
+ * IOMMU's IEC notify list if needed. */
+ X86IOMMUState *iommu = x86_iommu_get_default();
+ if (iommu) {
+ x86_iommu_iec_register_notifier(iommu,
+ kvm_update_msi_routes_all,
+ NULL);
+ }
+ notify_list_inited = true;
+ }
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ MSIRouteEntry *entry, *next;
+ QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
+ if (entry->virq == virq) {
+ trace_kvm_x86_remove_msi_route(virq);
+ QLIST_REMOVE(entry, list);
+ g_free(entry);
+ break;
+ }
+ }
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
+
+bool kvm_has_waitpkg(void)
+{
+ return has_msr_umwait;
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return !sev_es_enabled();
+}
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
new file mode 100644
index 000000000..a978509d5
--- /dev/null
+++ b/target/i386/kvm/kvm_i386.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU KVM support -- x86 specific functions.
+ *
+ * Copyright (c) 2012 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_I386_H
+#define QEMU_KVM_I386_H
+
+#include "sysemu/kvm.h"
+
+#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
+
+#ifdef CONFIG_KVM
+
+#define kvm_pit_in_kernel() \
+ (kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
+#define kvm_pic_in_kernel() \
+ (kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
+#define kvm_ioapic_in_kernel() \
+ (kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
+
+#else
+
+#define kvm_pit_in_kernel() 0
+#define kvm_pic_in_kernel() 0
+#define kvm_ioapic_in_kernel() 0
+
+#endif /* CONFIG_KVM */
+
+bool kvm_has_smm(void);
+bool kvm_has_adjust_clock(void);
+bool kvm_has_adjust_clock_stable(void);
+bool kvm_has_exception_payload(void);
+void kvm_synchronize_all_tsc(void);
+void kvm_arch_reset_vcpu(X86CPU *cs);
+void kvm_arch_do_init_vcpu(X86CPU *cs);
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
+
+bool kvm_enable_x2apic(void);
+bool kvm_has_x2apic_api(void);
+bool kvm_has_waitpkg(void);
+
+bool kvm_hv_vpindex_settable(void);
+bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp);
+
+uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
+
+bool kvm_enable_sgx_provisioning(KVMState *s);
+
+#endif
diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build
new file mode 100644
index 000000000..736df8b72
--- /dev/null
+++ b/target/i386/kvm/meson.build
@@ -0,0 +1,14 @@
+i386_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
+
+i386_softmmu_kvm_ss = ss.source_set()
+
+i386_softmmu_kvm_ss.add(files(
+ 'kvm.c',
+ 'kvm-cpu.c',
+))
+
+i386_softmmu_kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c'))
+
+i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c'))
+
+i386_softmmu_ss.add_all(when: 'CONFIG_KVM', if_true: i386_softmmu_kvm_ss)
diff --git a/target/i386/kvm/sev-stub.c b/target/i386/kvm/sev-stub.c
new file mode 100644
index 000000000..6080c007a
--- /dev/null
+++ b/target/i386/kvm/sev-stub.c
@@ -0,0 +1,22 @@
+/*
+ * QEMU SEV stub
+ *
+ * Copyright Advanced Micro Devices 2018
+ *
+ * Authors:
+ * Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "sev.h"
+
+int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ /* If we get here, cgs must be some non-SEV thing */
+ return 0;
+}
diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events
new file mode 100644
index 000000000..7c369db1e
--- /dev/null
+++ b/target/i386/kvm/trace-events
@@ -0,0 +1,7 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# kvm.c
+kvm_x86_fixup_msi_error(uint32_t gsi) "VT-d failed to remap interrupt for GSI %" PRIu32
+kvm_x86_add_msi_route(int virq) "Adding route entry for virq %d"
+kvm_x86_remove_msi_route(int virq) "Removing route entry for virq %d"
+kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
diff --git a/target/i386/kvm/trace.h b/target/i386/kvm/trace.h
new file mode 100644
index 000000000..46b75c694
--- /dev/null
+++ b/target/i386/kvm/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_i386_kvm.h"
diff --git a/target/i386/machine.c b/target/i386/machine.c
new file mode 100644
index 000000000..83c2b9152
--- /dev/null
+++ b/target/i386/machine.c
@@ -0,0 +1,1598 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hw/isa/isa.h"
+#include "migration/cpu.h"
+#include "kvm/hyperv.h"
+#include "hw/i386/x86.h"
+#include "kvm/kvm_i386.h"
+
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+
+#include "qemu/error-report.h"
+
+static const VMStateDescription vmstate_segment = {
+ .name = "segment",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(selector, SegmentCache),
+ VMSTATE_UINTTL(base, SegmentCache),
+ VMSTATE_UINT32(limit, SegmentCache),
+ VMSTATE_UINT32(flags, SegmentCache),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_SEGMENT(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(SegmentCache), \
+ .vmsd = &vmstate_segment, \
+ .flags = VMS_STRUCT, \
+ .offset = offsetof(_state, _field) \
+ + type_check(SegmentCache,typeof_field(_state, _field)) \
+}
+
+#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
+
+static const VMStateDescription vmstate_xmm_reg = {
+ .name = "xmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_XMM_REGS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_xmm_reg, ZMMReg)
+
+/* YMMH format is the same as XMM, but for bits 128-255 */
+static const VMStateDescription vmstate_ymmh_reg = {
+ .name = "ymmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
+ vmstate_ymmh_reg, ZMMReg)
+
+static const VMStateDescription vmstate_zmmh_reg = {
+ .name = "zmmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_zmmh_reg, ZMMReg)
+
+#ifdef TARGET_X86_64
+static const VMStateDescription vmstate_hi16_zmm_reg = {
+ .name = "hi16_zmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_hi16_zmm_reg, ZMMReg)
+#endif
+
+static const VMStateDescription vmstate_bnd_regs = {
+ .name = "bnd_regs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(lb, BNDReg),
+ VMSTATE_UINT64(ub, BNDReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_BND_REGS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
+
+static const VMStateDescription vmstate_mtrr_var = {
+ .name = "mtrr_var",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(base, MTRRVar),
+ VMSTATE_UINT64(mask, MTRRVar),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
+
+typedef struct x86_FPReg_tmp {
+ FPReg *parent;
+ uint64_t tmp_mant;
+ uint16_t tmp_exp;
+} x86_FPReg_tmp;
+
+static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ *pmant = temp.l.lower;
+ *pexp = temp.l.upper;
+}
+
+static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.upper = upper;
+ temp.l.lower = mant;
+ return temp.d;
+}
+
+static int fpreg_pre_save(void *opaque)
+{
+ x86_FPReg_tmp *tmp = opaque;
+
+ /* we save the real CPU data (in case of MMX usage only 'mant'
+ contains the MMX register */
+ cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
+
+ return 0;
+}
+
+static int fpreg_post_load(void *opaque, int version)
+{
+ x86_FPReg_tmp *tmp = opaque;
+
+ tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
+ return 0;
+}
+
+static const VMStateDescription vmstate_fpreg_tmp = {
+ .name = "fpreg_tmp",
+ .post_load = fpreg_post_load,
+ .pre_save = fpreg_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
+ VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_fpreg = {
+ .name = "fpreg",
+ .fields = (VMStateField[]) {
+ VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int cpu_pre_save(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+ env->v_tpr = env->int_ctl & V_TPR_MASK;
+ /* FPU */
+ env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ env->fptag_vmstate = 0;
+ for(i = 0; i < 8; i++) {
+ env->fptag_vmstate |= ((!env->fptags[i]) << i);
+ }
+
+ env->fpregs_format_vmstate = 0;
+
+ /*
+ * Real mode guest segments register DPL should be zero.
+ * Older KVM version were setting it wrongly.
+ * Fixing it will allow live migration to host with unrestricted guest
+ * support (otherwise the migration will fail with invalid guest state
+ * error).
+ */
+ if (!(env->cr[0] & CR0_PE_MASK) &&
+ (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
+ env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
+ env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
+ env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
+ env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
+ env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
+ env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
+ }
+
+#ifdef CONFIG_KVM
+ /*
+ * In case vCPU may have enabled VMX, we need to make sure kernel have
+ * required capabilities in order to perform migration correctly:
+ *
+ * 1) We must be able to extract vCPU nested-state from KVM.
+ *
+ * 2) In case vCPU is running in guest-mode and it has a pending exception,
+ * we must be able to determine if it's in a pending or injected state.
+ * Note that in case KVM don't have required capability to do so,
+ * a pending/injected exception will always appear as an
+ * injected exception.
+ */
+ if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
+ (!env->nested_state ||
+ (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
+ env->exception_injected))) {
+ error_report("Guest maybe enabled nested virtualization but kernel "
+ "does not support required capabilities to save vCPU "
+ "nested state");
+ return -EINVAL;
+ }
+#endif
+
+ /*
+ * When vCPU is running L2 and exception is still pending,
+ * it can potentially be intercepted by L1 hypervisor.
+ * In contrast to an injected exception which cannot be
+ * intercepted anymore.
+ *
+ * Furthermore, when a L2 exception is intercepted by L1
+ * hypervisor, its exception payload (CR2/DR6 on #PF/#DB)
+ * should not be set yet in the respective vCPU register.
+ * Thus, in case an exception is pending, it is
+ * important to save the exception payload seperately.
+ *
+ * Therefore, if an exception is not in a pending state
+ * or vCPU is not in guest-mode, it is not important to
+ * distinguish between a pending and injected exception
+ * and we don't need to store seperately the exception payload.
+ *
+ * In order to preserve better backwards-compatible migration,
+ * convert a pending exception to an injected exception in
+ * case it is not important to distinguish between them
+ * as described above.
+ */
+ if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
+ env->exception_pending = 0;
+ env->exception_injected = 1;
+
+ if (env->exception_has_payload) {
+ if (env->exception_nr == EXCP01_DB) {
+ env->dr[6] = env->exception_payload;
+ } else if (env->exception_nr == EXCP0E_PAGE) {
+ env->cr[2] = env->exception_payload;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->tsc_khz && env->user_tsc_khz &&
+ env->tsc_khz != env->user_tsc_khz) {
+ error_report("Mismatch between user-specified TSC frequency and "
+ "migrated TSC frequency");
+ return -EINVAL;
+ }
+
+ if (env->fpregs_format_vmstate) {
+ error_report("Unsupported old non-softfloat CPU state");
+ return -EINVAL;
+ }
+ /*
+ * Real mode guest segments register DPL should be zero.
+ * Older KVM version were setting it wrongly.
+ * Fixing it will allow live migration from such host that don't have
+ * restricted guest support to a host with unrestricted guest support
+ * (otherwise the migration will fail with invalid guest state
+ * error).
+ */
+ if (!(env->cr[0] & CR0_PE_MASK) &&
+ (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
+ env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
+ env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
+ env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
+ env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
+ env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
+ env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
+ }
+
+ /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+ * running under KVM. This is wrong for conforming code segments.
+ * Luckily, in our implementation the CPL field of hflags is redundant
+ * and we can get the right value from the SS descriptor privilege level.
+ */
+ env->hflags &= ~HF_CPL_MASK;
+ env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
+#ifdef CONFIG_KVM
+ if ((env->hflags & HF_GUEST_MASK) &&
+ (!env->nested_state ||
+ !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
+ error_report("vCPU set in guest-mode inconsistent with "
+ "migrated kernel nested state");
+ return -EINVAL;
+ }
+#endif
+
+ /*
+ * There are cases that we can get valid exception_nr with both
+ * exception_pending and exception_injected being cleared.
+ * This can happen in one of the following scenarios:
+ * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
+ * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
+ * 3) "cpu/exception_info" subsection not sent because there is no exception
+ * pending or guest wasn't running L2 (See comment in cpu_pre_save()).
+ *
+ * In those cases, we can just deduce that a valid exception_nr means
+ * we can treat the exception as already injected.
+ */
+ if ((env->exception_nr != -1) &&
+ !env->exception_pending && !env->exception_injected) {
+ env->exception_injected = 1;
+ }
+
+ env->fpstt = (env->fpus_vmstate >> 11) & 7;
+ env->fpus = env->fpus_vmstate & ~0x3800;
+ env->fptag_vmstate ^= 0xff;
+ for(i = 0; i < 8; i++) {
+ env->fptags[i] = (env->fptag_vmstate >> i) & 1;
+ }
+ if (tcg_enabled()) {
+ target_ulong dr7;
+ update_fp_status(env);
+ update_mxcsr_status(env);
+
+ cpu_breakpoint_remove_all(cs, BP_CPU);
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+
+ /* Indicate all breakpoints disabled, as they are, then
+ let the helper re-enable them. */
+ dr7 = env->dr[7];
+ env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
+ cpu_x86_update_dr7(env, dr7);
+ }
+ tlb_flush(cs);
+ return 0;
+}
+
+static bool async_pf_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.async_pf_en_msr != 0;
+}
+
+static bool async_pf_int_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.async_pf_int_msr != 0;
+}
+
+static bool pv_eoi_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.pv_eoi_en_msr != 0;
+}
+
+static bool steal_time_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.steal_time_msr != 0;
+}
+
+static bool exception_info_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ /*
+ * It is important to save exception-info only in case
+ * we need to distinguish between a pending and injected
+ * exception. Which is only required in case there is a
+ * pending exception and vCPU is running L2.
+ * For more info, refer to comment in cpu_pre_save().
+ */
+ return env->exception_pending && (env->hflags & HF_GUEST_MASK);
+}
+
+static const VMStateDescription vmstate_exception_info = {
+ .name = "cpu/exception_info",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = exception_info_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(env.exception_pending, X86CPU),
+ VMSTATE_UINT8(env.exception_injected, X86CPU),
+ VMSTATE_UINT8(env.exception_has_payload, X86CPU),
+ VMSTATE_UINT64(env.exception_payload, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* Poll control MSR enabled by default */
+static bool poll_control_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.poll_control_msr != 1;
+}
+
+static const VMStateDescription vmstate_steal_time_msr = {
+ .name = "cpu/steal_time_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = steal_time_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.steal_time_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_async_pf_msr = {
+ .name = "cpu/async_pf_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = async_pf_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_async_pf_int_msr = {
+ .name = "cpu/async_pf_int_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = async_pf_int_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.async_pf_int_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_pv_eoi_msr = {
+ .name = "cpu/async_pv_eoi_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pv_eoi_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_poll_control_msr = {
+ .name = "cpu/poll_control_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = poll_control_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.poll_control_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool fpop_ip_dp_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
+}
+
+static const VMStateDescription vmstate_fpop_ip_dp = {
+ .name = "cpu/fpop_ip_dp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpop_ip_dp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(env.fpop, X86CPU),
+ VMSTATE_UINT64(env.fpip, X86CPU),
+ VMSTATE_UINT64(env.fpdp, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool tsc_adjust_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->tsc_adjust != 0;
+}
+
+static const VMStateDescription vmstate_msr_tsc_adjust = {
+ .name = "cpu/msr_tsc_adjust",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tsc_adjust_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.tsc_adjust, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool msr_smi_count_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return cpu->migrate_smi_count && env->msr_smi_count != 0;
+}
+
+static const VMStateDescription vmstate_msr_smi_count = {
+ .name = "cpu/msr_smi_count",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = msr_smi_count_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_smi_count, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool tscdeadline_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->tsc_deadline != 0;
+}
+
+static const VMStateDescription vmstate_msr_tscdeadline = {
+ .name = "cpu/msr_tscdeadline",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tscdeadline_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.tsc_deadline, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool misc_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
+}
+
+static bool feature_control_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_ia32_feature_control != 0;
+}
+
+static const VMStateDescription vmstate_msr_ia32_misc_enable = {
+ .name = "cpu/msr_ia32_misc_enable",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = misc_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_msr_ia32_feature_control = {
+ .name = "cpu/msr_ia32_feature_control",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = feature_control_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pmu_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
+ env->msr_global_status || env->msr_global_ovf_ctrl) {
+ return true;
+ }
+ for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+ if (env->msr_fixed_counters[i]) {
+ return true;
+ }
+ }
+ for (i = 0; i < MAX_GP_COUNTERS; i++) {
+ if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_architectural_pmu = {
+ .name = "cpu/msr_architectural_pmu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmu_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
+ VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
+ VMSTATE_UINT64(env.msr_global_status, X86CPU),
+ VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
+ VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
+ VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool mpx_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
+ return true;
+ }
+ }
+
+ if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
+ return true;
+ }
+
+ return !!env->msr_bndcfgs;
+}
+
+static const VMStateDescription vmstate_mpx = {
+ .name = "cpu/mpx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = mpx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
+ VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
+ VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
+ VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_hypercall_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_hypercall = {
+ .name = "cpu/msr_hyperv_hypercall",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_hypercall_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_vapic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_vapic != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_vapic = {
+ .name = "cpu/msr_hyperv_vapic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_vapic_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_time_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_tsc != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_time = {
+ .name = "cpu/msr_hyperv_time",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_time_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_crash_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < HV_CRASH_PARAMS; i++) {
+ if (env->msr_hv_crash_params[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_crash = {
+ .name = "cpu/msr_hyperv_crash",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_crash_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_runtime_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
+ return false;
+ }
+
+ return env->msr_hv_runtime != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_runtime = {
+ .name = "cpu/msr_hyperv_runtime",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_runtime_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_synic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_hv_synic_control != 0 ||
+ env->msr_hv_synic_evt_page != 0 ||
+ env->msr_hv_synic_msg_page != 0) {
+ return true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ if (env->msr_hv_synic_sint[i] != 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int hyperv_synic_post_load(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ hyperv_x86_synic_update(cpu);
+ return 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+ .name = "cpu/msr_hyperv_synic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_synic_enable_needed,
+ .post_load = hyperv_synic_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_stimer_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
+ if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_stimer = {
+ .name = "cpu/msr_hyperv_stimer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_stimer_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
+ HV_STIMER_COUNT),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_reenlightenment_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_reenlightenment_control != 0 ||
+ env->msr_hv_tsc_emulation_control != 0 ||
+ env->msr_hv_tsc_emulation_status != 0;
+}
+
+static int hyperv_reenlightenment_post_load(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ /*
+ * KVM doesn't fully support re-enlightenment notifications so we need to
+ * make sure TSC frequency doesn't change upon migration.
+ */
+ if ((env->msr_hv_reenlightenment_control & HV_REENLIGHTENMENT_ENABLE_BIT) &&
+ !env->user_tsc_khz) {
+ error_report("Guest enabled re-enlightenment notifications, "
+ "'tsc-frequency=' has to be specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
+ .name = "cpu/msr_hyperv_reenlightenment",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_reenlightenment_enable_needed,
+ .post_load = hyperv_reenlightenment_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool avx512_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < NB_OPMASK_REGS; i++) {
+ if (env->opmask_regs[i]) {
+ return true;
+ }
+ }
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
+ if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
+ ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
+ return true;
+ }
+#ifdef TARGET_X86_64
+ if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
+ ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
+ ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
+ ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
+ return true;
+ }
+#endif
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_avx512 = {
+ .name = "cpu/avx512",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = avx512_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
+ VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
+#ifdef TARGET_X86_64
+ VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool xss_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+ .name = "cpu/xss",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xss_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xss, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool umwait_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->umwait != 0;
+}
+
+static const VMStateDescription vmstate_umwait = {
+ .name = "cpu/umwait",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = umwait_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.umwait, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pkru_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->pkru != 0;
+}
+
+static const VMStateDescription vmstate_pkru = {
+ .name = "cpu/pkru",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pkru_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT32(env.pkru, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pkrs_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->pkrs != 0;
+}
+
+static const VMStateDescription vmstate_pkrs = {
+ .name = "cpu/pkrs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pkrs_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT32(env.pkrs, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool tsc_khz_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+ X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
+ return env->tsc_khz && x86mc->save_tsc_khz;
+}
+
+static const VMStateDescription vmstate_tsc_khz = {
+ .name = "cpu/tsc_khz",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tsc_khz_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(env.tsc_khz, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#ifdef CONFIG_KVM
+
+static bool vmx_vmcs12_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+ return (nested_state->size >
+ offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
+}
+
+static const VMStateDescription vmstate_vmx_vmcs12 = {
+ .name = "cpu/kvm_nested_state/vmx/vmcs12",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vmx_vmcs12_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_VMX_VMCS_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vmx_shadow_vmcs12_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+ return (nested_state->size >
+ offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
+}
+
+static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
+ .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vmx_shadow_vmcs12_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_VMX_VMCS_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vmx_nested_state_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+
+ return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX &&
+ nested_state->hdr.vmx.vmxon_pa != -1ull);
+}
+
+static const VMStateDescription vmstate_vmx_nested_state = {
+ .name = "cpu/kvm_nested_state/vmx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vmx_nested_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
+ VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
+ VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vmx_vmcs12,
+ &vmstate_vmx_shadow_vmcs12,
+ NULL,
+ }
+};
+
+static bool svm_nested_state_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+
+ /*
+ * HF_GUEST_MASK and HF2_GIF_MASK are already serialized
+ * via hflags and hflags2, all that's left is the opaque
+ * nested state blob.
+ */
+ return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
+ nested_state->size > offsetof(struct kvm_nested_state, data));
+}
+
+static const VMStateDescription vmstate_svm_nested_state = {
+ .name = "cpu/kvm_nested_state/svm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = svm_nested_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
+ VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_SVM_VMCB_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool nested_state_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return (env->nested_state &&
+ (vmx_nested_state_needed(env->nested_state) ||
+ svm_nested_state_needed(env->nested_state)));
+}
+
+static int nested_state_post_load(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ struct kvm_nested_state *nested_state = env->nested_state;
+ int min_nested_state_len = offsetof(struct kvm_nested_state, data);
+ int max_nested_state_len = kvm_max_nested_state_length();
+
+ /*
+ * If our kernel don't support setting nested state
+ * and we have received nested state from migration stream,
+ * we need to fail migration
+ */
+ if (max_nested_state_len <= 0) {
+ error_report("Received nested state when kernel cannot restore it");
+ return -EINVAL;
+ }
+
+ /*
+ * Verify that the size of received nested_state struct
+ * at least cover required header and is not larger
+ * than the max size that our kernel support
+ */
+ if (nested_state->size < min_nested_state_len) {
+ error_report("Received nested state size less than min: "
+ "len=%d, min=%d",
+ nested_state->size, min_nested_state_len);
+ return -EINVAL;
+ }
+ if (nested_state->size > max_nested_state_len) {
+ error_report("Received unsupported nested state size: "
+ "nested_state->size=%d, max=%d",
+ nested_state->size, max_nested_state_len);
+ return -EINVAL;
+ }
+
+ /* Verify format is valid */
+ if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
+ (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
+ error_report("Received invalid nested state format: %d",
+ nested_state->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_kvm_nested_state = {
+ .name = "cpu/kvm_nested_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_U16(flags, struct kvm_nested_state),
+ VMSTATE_U16(format, struct kvm_nested_state),
+ VMSTATE_U32(size, struct kvm_nested_state),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vmx_nested_state,
+ &vmstate_svm_nested_state,
+ NULL
+ }
+};
+
+static const VMStateDescription vmstate_nested_state = {
+ .name = "cpu/nested_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = nested_state_needed,
+ .post_load = nested_state_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
+ vmstate_kvm_nested_state,
+ struct kvm_nested_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#endif
+
+static bool mcg_ext_ctl_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ return cpu->enable_lmce && env->mcg_ext_ctl;
+}
+
+static const VMStateDescription vmstate_mcg_ext_ctl = {
+ .name = "cpu/mcg_ext_ctl",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = mcg_ext_ctl_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool spec_ctrl_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->spec_ctrl != 0;
+}
+
+static const VMStateDescription vmstate_spec_ctrl = {
+ .name = "cpu/spec_ctrl",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = spec_ctrl_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT64(env.spec_ctrl, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+static bool amd_tsc_scale_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE);
+}
+
+static const VMStateDescription amd_tsc_scale_msr_ctrl = {
+ .name = "cpu/amd_tsc_scale_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = amd_tsc_scale_msr_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+static bool intel_pt_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_rtit_ctrl || env->msr_rtit_status ||
+ env->msr_rtit_output_base || env->msr_rtit_output_mask ||
+ env->msr_rtit_cr3_match) {
+ return true;
+ }
+
+ for (i = 0; i < MAX_RTIT_ADDRS; i++) {
+ if (env->msr_rtit_addrs[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_intel_pt = {
+ .name = "cpu/intel_pt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = intel_pt_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
+ VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
+ VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
+ VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
+ VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool virt_ssbd_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->virt_ssbd != 0;
+}
+
+static const VMStateDescription vmstate_msr_virt_ssbd = {
+ .name = "cpu/virt_ssbd",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = virt_ssbd_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT64(env.virt_ssbd, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool svm_npt_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return !!(env->hflags2 & HF2_NPT_MASK);
+}
+
+static const VMStateDescription vmstate_svm_npt = {
+ .name = "cpu/svn_npt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = svm_npt_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT64(env.nested_cr3, X86CPU),
+ VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool svm_guest_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return tcg_enabled() && env->int_ctl;
+}
+
+static const VMStateDescription vmstate_svm_guest = {
+ .name = "cpu/svm_guest",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = svm_guest_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT32(env.int_ctl, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#ifndef TARGET_X86_64
+static bool intel_efer32_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->efer != 0;
+}
+
+static const VMStateDescription vmstate_efer32 = {
+ .name = "cpu/efer32",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = intel_efer32_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.efer, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
+static bool msr_tsx_ctrl_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR;
+}
+
+static const VMStateDescription vmstate_msr_tsx_ctrl = {
+ .name = "cpu/msr_tsx_ctrl",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = msr_tsx_ctrl_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.tsx_ctrl, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool intel_sgx_msrs_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC);
+}
+
+static const VMStateDescription vmstate_msr_intel_sgx = {
+ .name = "cpu/intel_sgx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = intel_sgx_msrs_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_x86_cpu = {
+ .name = "cpu",
+ .version_id = 12,
+ .minimum_version_id = 11,
+ .pre_save = cpu_pre_save,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
+ VMSTATE_UINTTL(env.eip, X86CPU),
+ VMSTATE_UINTTL(env.eflags, X86CPU),
+ VMSTATE_UINT32(env.hflags, X86CPU),
+ /* FPU */
+ VMSTATE_UINT16(env.fpuc, X86CPU),
+ VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
+ VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
+ VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
+
+ VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
+
+ VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
+ VMSTATE_SEGMENT(env.ldt, X86CPU),
+ VMSTATE_SEGMENT(env.tr, X86CPU),
+ VMSTATE_SEGMENT(env.gdt, X86CPU),
+ VMSTATE_SEGMENT(env.idt, X86CPU),
+
+ VMSTATE_UINT32(env.sysenter_cs, X86CPU),
+ VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
+ VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
+
+ VMSTATE_UINTTL(env.cr[0], X86CPU),
+ VMSTATE_UINTTL(env.cr[2], X86CPU),
+ VMSTATE_UINTTL(env.cr[3], X86CPU),
+ VMSTATE_UINTTL(env.cr[4], X86CPU),
+ VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
+ /* MMU */
+ VMSTATE_INT32(env.a20_mask, X86CPU),
+ /* XMM */
+ VMSTATE_UINT32(env.mxcsr, X86CPU),
+ VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
+
+#ifdef TARGET_X86_64
+ VMSTATE_UINT64(env.efer, X86CPU),
+ VMSTATE_UINT64(env.star, X86CPU),
+ VMSTATE_UINT64(env.lstar, X86CPU),
+ VMSTATE_UINT64(env.cstar, X86CPU),
+ VMSTATE_UINT64(env.fmask, X86CPU),
+ VMSTATE_UINT64(env.kernelgsbase, X86CPU),
+#endif
+ VMSTATE_UINT32(env.smbase, X86CPU),
+
+ VMSTATE_UINT64(env.pat, X86CPU),
+ VMSTATE_UINT32(env.hflags2, X86CPU),
+
+ VMSTATE_UINT64(env.vm_hsave, X86CPU),
+ VMSTATE_UINT64(env.vm_vmcb, X86CPU),
+ VMSTATE_UINT64(env.tsc_offset, X86CPU),
+ VMSTATE_UINT64(env.intercept, X86CPU),
+ VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
+ VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
+ VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
+ VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
+ VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
+ VMSTATE_UINT8(env.v_tpr, X86CPU),
+ /* MTRRs */
+ VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
+ VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
+ VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
+ /* KVM-related states */
+ VMSTATE_INT32(env.interrupt_injected, X86CPU),
+ VMSTATE_UINT32(env.mp_state, X86CPU),
+ VMSTATE_UINT64(env.tsc, X86CPU),
+ VMSTATE_INT32(env.exception_nr, X86CPU),
+ VMSTATE_UINT8(env.soft_interrupt, X86CPU),
+ VMSTATE_UINT8(env.nmi_injected, X86CPU),
+ VMSTATE_UINT8(env.nmi_pending, X86CPU),
+ VMSTATE_UINT8(env.has_error_code, X86CPU),
+ VMSTATE_UINT32(env.sipi_vector, X86CPU),
+ /* MCE */
+ VMSTATE_UINT64(env.mcg_cap, X86CPU),
+ VMSTATE_UINT64(env.mcg_status, X86CPU),
+ VMSTATE_UINT64(env.mcg_ctl, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
+ /* rdtscp */
+ VMSTATE_UINT64(env.tsc_aux, X86CPU),
+ /* KVM pvclock msr */
+ VMSTATE_UINT64(env.system_time_msr, X86CPU),
+ VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
+ /* XSAVE related fields */
+ VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
+ VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
+ VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
+ VMSTATE_END_OF_LIST()
+ /* The above list is not sorted /wrt version numbers, watch out! */
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_exception_info,
+ &vmstate_async_pf_msr,
+ &vmstate_async_pf_int_msr,
+ &vmstate_pv_eoi_msr,
+ &vmstate_steal_time_msr,
+ &vmstate_poll_control_msr,
+ &vmstate_fpop_ip_dp,
+ &vmstate_msr_tsc_adjust,
+ &vmstate_msr_tscdeadline,
+ &vmstate_msr_ia32_misc_enable,
+ &vmstate_msr_ia32_feature_control,
+ &vmstate_msr_architectural_pmu,
+ &vmstate_mpx,
+ &vmstate_msr_hyperv_hypercall,
+ &vmstate_msr_hyperv_vapic,
+ &vmstate_msr_hyperv_time,
+ &vmstate_msr_hyperv_crash,
+ &vmstate_msr_hyperv_runtime,
+ &vmstate_msr_hyperv_synic,
+ &vmstate_msr_hyperv_stimer,
+ &vmstate_msr_hyperv_reenlightenment,
+ &vmstate_avx512,
+ &vmstate_xss,
+ &vmstate_umwait,
+ &vmstate_tsc_khz,
+ &vmstate_msr_smi_count,
+ &vmstate_pkru,
+ &vmstate_pkrs,
+ &vmstate_spec_ctrl,
+ &amd_tsc_scale_msr_ctrl,
+ &vmstate_mcg_ext_ctl,
+ &vmstate_msr_intel_pt,
+ &vmstate_msr_virt_ssbd,
+ &vmstate_svm_npt,
+ &vmstate_svm_guest,
+#ifndef TARGET_X86_64
+ &vmstate_efer32,
+#endif
+#ifdef CONFIG_KVM
+ &vmstate_nested_state,
+#endif
+ &vmstate_msr_tsx_ctrl,
+ &vmstate_msr_intel_sgx,
+ NULL
+ }
+};
diff --git a/target/i386/meson.build b/target/i386/meson.build
new file mode 100644
index 000000000..ae38dc956
--- /dev/null
+++ b/target/i386/meson.build
@@ -0,0 +1,36 @@
+i386_ss = ss.source_set()
+i386_ss.add(files(
+ 'cpu.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'xsave_helper.c',
+ 'cpu-dump.c',
+))
+i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c'))
+
+# x86 cpu type
+i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c'))
+i386_ss.add(when: 'CONFIG_HVF', if_true: files('host-cpu.c'))
+
+i386_softmmu_ss = ss.source_set()
+i386_softmmu_ss.add(files(
+ 'arch_dump.c',
+ 'arch_memory_mapping.c',
+ 'machine.c',
+ 'monitor.c',
+ 'cpu-sysemu.c',
+))
+i386_softmmu_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-sysemu-stub.c'))
+
+i386_user_ss = ss.source_set()
+
+subdir('kvm')
+subdir('hax')
+subdir('whpx')
+subdir('nvmm')
+subdir('hvf')
+subdir('tcg')
+
+target_arch += {'i386': i386_ss}
+target_softmmu_arch += {'i386': i386_softmmu_ss}
+target_user_arch += {'i386': i386_user_ss}
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
new file mode 100644
index 000000000..8e4b4d600
--- /dev/null
+++ b/target/i386/monitor.c
@@ -0,0 +1,669 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "monitor/hmp.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qerror.h"
+#include "sysemu/kvm.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-misc.h"
+#include "hw/i386/pc.h"
+
+/* Perform linear address sign extension */
+static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
+{
+#ifdef TARGET_X86_64
+ if (env->cr[4] & CR4_LA57_MASK) {
+ if (addr & (1ULL << 56)) {
+ addr |= (hwaddr)-(1LL << 57);
+ }
+ } else {
+ if (addr & (1ULL << 47)) {
+ addr |= (hwaddr)-(1LL << 48);
+ }
+ }
+#endif
+ return addr;
+}
+
+static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
+ hwaddr pte, hwaddr mask)
+{
+ addr = addr_canonical(env, addr);
+
+ monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
+ " %c%c%c%c%c%c%c%c%c\n",
+ addr,
+ pte & mask,
+ pte & PG_NX_MASK ? 'X' : '-',
+ pte & PG_GLOBAL_MASK ? 'G' : '-',
+ pte & PG_PSE_MASK ? 'P' : '-',
+ pte & PG_DIRTY_MASK ? 'D' : '-',
+ pte & PG_ACCESSED_MASK ? 'A' : '-',
+ pte & PG_PCD_MASK ? 'C' : '-',
+ pte & PG_PWT_MASK ? 'T' : '-',
+ pte & PG_USER_MASK ? 'U' : '-',
+ pte & PG_RW_MASK ? 'W' : '-');
+}
+
+static void tlb_info_32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2;
+ uint32_t pgd, pde, pte;
+
+ pgd = env->cr[3] & ~0xfff;
+ for(l1 = 0; l1 < 1024; l1++) {
+ cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+ pde = le32_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ /* 4M pages */
+ print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
+ } else {
+ for(l2 = 0; l2 < 1024; l2++) {
+ cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+ pte = le32_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, env, (l1 << 22) + (l2 << 12),
+ pte & ~PG_PSE_MASK,
+ ~0xfff);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2, l3;
+ uint64_t pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+
+ pdp_addr = env->cr[3] & ~0x1f;
+ for (l1 = 0; l1 < 4; l1++) {
+ cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ if (pdpe & PG_PRESENT_MASK) {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ /* 2M pages with PAE, CR4.PSE is ignored */
+ print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
+ ~((hwaddr)(1 << 20) - 1));
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, env, (l1 << 30) + (l2 << 21)
+ + (l3 << 12),
+ pte & ~PG_PSE_MASK,
+ ~(hwaddr)0xfff);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#ifdef TARGET_X86_64
+static void tlb_info_la48(Monitor *mon, CPUArchState *env,
+ uint64_t l0, uint64_t pml4_addr)
+{
+ uint64_t l1, l2, l3, l4;
+ uint64_t pml4e, pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ continue;
+ }
+
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ continue;
+ }
+
+ if (pdpe & PG_PSE_MASK) {
+ /* 1G pages, CR4.PSE is ignored */
+ print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
+ pdpe, 0x3ffffc0000000ULL);
+ continue;
+ }
+
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ if (!(pde & PG_PRESENT_MASK)) {
+ continue;
+ }
+
+ if (pde & PG_PSE_MASK) {
+ /* 2M pages, CR4.PSE is ignored */
+ print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
+ (l3 << 21), pde, 0x3ffffffe00000ULL);
+ continue;
+ }
+
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr
+ + l4 * 8,
+ &pte, 8);
+ pte = le64_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, env, (l0 << 48) + (l1 << 39) +
+ (l2 << 30) + (l3 << 21) + (l4 << 12),
+ pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void tlb_info_la57(Monitor *mon, CPUArchState *env)
+{
+ uint64_t l0;
+ uint64_t pml5e;
+ uint64_t pml5_addr;
+
+ pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
+ for (l0 = 0; l0 < 512; l0++) {
+ cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
+ pml5e = le64_to_cpu(pml5e);
+ if (pml5e & PG_PRESENT_MASK) {
+ tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
+ }
+ }
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env(mon);
+ if (!env) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ monitor_printf(mon, "PG disabled\n");
+ return;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ if (env->cr[4] & CR4_LA57_MASK) {
+ tlb_info_la57(mon, env);
+ } else {
+ tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
+ }
+ } else
+#endif
+ {
+ tlb_info_pae32(mon, env);
+ }
+ } else {
+ tlb_info_32(mon, env);
+ }
+}
+
+static void mem_print(Monitor *mon, CPUArchState *env,
+ hwaddr *pstart, int *plast_prot,
+ hwaddr end, int prot)
+{
+ int prot1;
+ prot1 = *plast_prot;
+ if (prot != prot1) {
+ if (*pstart != -1) {
+ monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
+ TARGET_FMT_plx " %c%c%c\n",
+ addr_canonical(env, *pstart),
+ addr_canonical(env, end),
+ addr_canonical(env, end - *pstart),
+ prot1 & PG_USER_MASK ? 'u' : '-',
+ 'r',
+ prot1 & PG_RW_MASK ? 'w' : '-');
+ }
+ if (prot != 0)
+ *pstart = end;
+ else
+ *pstart = -1;
+ *plast_prot = prot;
+ }
+}
+
+static void mem_info_32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2;
+ int prot, last_prot;
+ uint32_t pgd, pde, pte;
+ hwaddr start, end;
+
+ pgd = env->cr[3] & ~0xfff;
+ last_prot = 0;
+ start = -1;
+ for(l1 = 0; l1 < 1024; l1++) {
+ cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+ pde = le32_to_cpu(pde);
+ end = l1 << 22;
+ if (pde & PG_PRESENT_MASK) {
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ } else {
+ for(l2 = 0; l2 < 1024; l2++) {
+ cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+ pte = le32_to_cpu(pte);
+ end = (l1 << 22) + (l2 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & pde &
+ (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+static void mem_info_pae32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2, l3;
+ int prot, last_prot;
+ uint64_t pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+ hwaddr start, end;
+
+ pdp_addr = env->cr[3] & ~0x1f;
+ last_prot = 0;
+ start = -1;
+ for (l1 = 0; l1 < 4; l1++) {
+ cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = l1 << 30;
+ if (pdpe & PG_PRESENT_MASK) {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l1 << 30) + (l2 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l1 << 30) + (l2 << 21) + (l3 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+
+#ifdef TARGET_X86_64
+static void mem_info_la48(Monitor *mon, CPUArchState *env)
+{
+ int prot, last_prot;
+ uint64_t l1, l2, l3, l4;
+ uint64_t pml4e, pdpe, pde, pte;
+ uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+
+ pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+ last_prot = 0;
+ start = -1;
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ end = l1 << 39;
+ if (pml4e & PG_PRESENT_MASK) {
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = (l1 << 39) + (l2 << 30);
+ if (pdpe & PG_PRESENT_MASK) {
+ if (pdpe & PG_PSE_MASK) {
+ prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ } else {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l1 << 39) + (l2 << 30) + (l3 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e & pdpe;
+ mem_print(mon, env, &start,
+ &last_prot, end, prot);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr
+ + l4 * 8,
+ &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l1 << 39) + (l2 << 30) +
+ (l3 << 21) + (l4 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e & pdpe & pde;
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, env, &start,
+ &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, env, &start,
+ &last_prot, end, prot);
+ }
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
+}
+
+static void mem_info_la57(Monitor *mon, CPUArchState *env)
+{
+ int prot, last_prot;
+ uint64_t l0, l1, l2, l3, l4;
+ uint64_t pml5e, pml4e, pdpe, pde, pte;
+ uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+
+ pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
+ last_prot = 0;
+ start = -1;
+ for (l0 = 0; l0 < 512; l0++) {
+ cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
+ pml5e = le64_to_cpu(pml5e);
+ end = l0 << 48;
+ if (!(pml5e & PG_PRESENT_MASK)) {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ continue;
+ }
+
+ pml4_addr = pml5e & 0x3fffffffff000ULL;
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ end = (l0 << 48) + (l1 << 39);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ continue;
+ }
+
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = (l0 << 48) + (l1 << 39) + (l2 << 30);
+ if (pdpe & PG_PRESENT_MASK) {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ continue;
+ }
+
+ if (pdpe & PG_PSE_MASK) {
+ prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml5e & pml4e;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ continue;
+ }
+
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ prot = 0;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ continue;
+ }
+
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml5e & pml4e & pdpe;
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ continue;
+ }
+
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
+ (l3 << 21) + (l4 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml5e & pml4e & pdpe & pde;
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, env, &start, &last_prot, end, prot);
+ }
+ }
+ }
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_mem(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env(mon);
+ if (!env) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ monitor_printf(mon, "PG disabled\n");
+ return;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ if (env->cr[4] & CR4_LA57_MASK) {
+ mem_info_la57(mon, env);
+ } else {
+ mem_info_la48(mon, env);
+ }
+ } else
+#endif
+ {
+ mem_info_pae32(mon, env);
+ }
+ } else {
+ mem_info_32(mon, env);
+ }
+}
+
+void hmp_mce(Monitor *mon, const QDict *qdict)
+{
+ X86CPU *cpu;
+ CPUState *cs;
+ int cpu_index = qdict_get_int(qdict, "cpu_index");
+ int bank = qdict_get_int(qdict, "bank");
+ uint64_t status = qdict_get_int(qdict, "status");
+ uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
+ uint64_t addr = qdict_get_int(qdict, "addr");
+ uint64_t misc = qdict_get_int(qdict, "misc");
+ int flags = MCE_INJECT_UNCOND_AO;
+
+ if (qdict_get_try_bool(qdict, "broadcast", false)) {
+ flags |= MCE_INJECT_BROADCAST;
+ }
+ cs = qemu_get_cpu(cpu_index);
+ if (cs != NULL) {
+ cpu = X86_CPU(cs);
+ cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
+ flags);
+ }
+}
+
+static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return env->eip + env->segs[R_CS].base;
+}
+
+const MonitorDef monitor_defs[] = {
+#define SEG(name, seg) \
+ { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
+ { name ".base", offsetof(CPUX86State, segs[seg].base) },\
+ { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
+
+ { "eax", offsetof(CPUX86State, regs[0]) },
+ { "ecx", offsetof(CPUX86State, regs[1]) },
+ { "edx", offsetof(CPUX86State, regs[2]) },
+ { "ebx", offsetof(CPUX86State, regs[3]) },
+ { "esp|sp", offsetof(CPUX86State, regs[4]) },
+ { "ebp|fp", offsetof(CPUX86State, regs[5]) },
+ { "esi", offsetof(CPUX86State, regs[6]) },
+ { "edi", offsetof(CPUX86State, regs[7]) },
+#ifdef TARGET_X86_64
+ { "r8", offsetof(CPUX86State, regs[8]) },
+ { "r9", offsetof(CPUX86State, regs[9]) },
+ { "r10", offsetof(CPUX86State, regs[10]) },
+ { "r11", offsetof(CPUX86State, regs[11]) },
+ { "r12", offsetof(CPUX86State, regs[12]) },
+ { "r13", offsetof(CPUX86State, regs[13]) },
+ { "r14", offsetof(CPUX86State, regs[14]) },
+ { "r15", offsetof(CPUX86State, regs[15]) },
+#endif
+ { "eflags", offsetof(CPUX86State, eflags) },
+ { "eip", offsetof(CPUX86State, eip) },
+ SEG("cs", R_CS)
+ SEG("ds", R_DS)
+ SEG("es", R_ES)
+ SEG("ss", R_SS)
+ SEG("fs", R_FS)
+ SEG("gs", R_GS)
+ { "pc", 0, monitor_get_pc, },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
+
+void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
+{
+ CPUState *cs;
+
+ if (qdict_haskey(qdict, "apic-id")) {
+ int id = qdict_get_try_int(qdict, "apic-id", 0);
+ cs = cpu_by_arch_id(id);
+ } else {
+ cs = mon_get_cpu(mon);
+ }
+
+
+ if (!cs) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+ x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
+}
diff --git a/target/i386/nvmm/meson.build b/target/i386/nvmm/meson.build
new file mode 100644
index 000000000..733e33408
--- /dev/null
+++ b/target/i386/nvmm/meson.build
@@ -0,0 +1,8 @@
+i386_softmmu_ss.add(when: 'CONFIG_NVMM', if_true:
+ files(
+ 'nvmm-all.c',
+ 'nvmm-accel-ops.c',
+ )
+)
+
+i386_softmmu_ss.add(when: 'CONFIG_NVMM', if_true: nvmm)
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
new file mode 100644
index 000000000..f788f7528
--- /dev/null
+++ b/target/i386/nvmm/nvmm-accel-ops.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/kvm_int.h"
+#include "qemu/main-loop.h"
+#include "sysemu/cpus.h"
+#include "qemu/guest-random.h"
+
+#include "sysemu/nvmm.h"
+#include "nvmm-accel-ops.h"
+
+static void *qemu_nvmm_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+ int r;
+
+ assert(nvmm_enabled());
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+ cpu->thread_id = qemu_get_thread_id();
+ current_cpu = cpu;
+
+ r = nvmm_init_vcpu(cpu);
+ if (r < 0) {
+ fprintf(stderr, "nvmm_init_vcpu failed: %s\n", strerror(-r));
+ exit(1);
+ }
+
+ /* signal CPU creation */
+ cpu_thread_signal_created(cpu);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = nvmm_vcpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+ while (cpu_thread_is_idle(cpu)) {
+ qemu_cond_wait_iothread(cpu->halt_cond);
+ }
+ qemu_wait_io_event_common(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+
+ nvmm_destroy_vcpu(cpu);
+ cpu_thread_signal_destroyed(cpu);
+ qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void nvmm_start_vcpu_thread(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_nvmm_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+}
+
+/*
+ * Abort the call to run the virtual processor by another thread, and to
+ * return the control to that thread.
+ */
+static void nvmm_kick_vcpu_thread(CPUState *cpu)
+{
+ cpu->exit_request = 1;
+ cpus_kick_thread(cpu);
+}
+
+static void nvmm_accel_ops_class_init(ObjectClass *oc, void *data)
+{
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
+
+ ops->create_vcpu_thread = nvmm_start_vcpu_thread;
+ ops->kick_vcpu_thread = nvmm_kick_vcpu_thread;
+
+ ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset;
+ ops->synchronize_post_init = nvmm_cpu_synchronize_post_init;
+ ops->synchronize_state = nvmm_cpu_synchronize_state;
+ ops->synchronize_pre_loadvm = nvmm_cpu_synchronize_pre_loadvm;
+}
+
+static const TypeInfo nvmm_accel_ops_type = {
+ .name = ACCEL_OPS_NAME("nvmm"),
+
+ .parent = TYPE_ACCEL_OPS,
+ .class_init = nvmm_accel_ops_class_init,
+ .abstract = true,
+};
+
+static void nvmm_accel_ops_register_types(void)
+{
+ type_register_static(&nvmm_accel_ops_type);
+}
+type_init(nvmm_accel_ops_register_types);
diff --git a/target/i386/nvmm/nvmm-accel-ops.h b/target/i386/nvmm/nvmm-accel-ops.h
new file mode 100644
index 000000000..43e24adca
--- /dev/null
+++ b/target/i386/nvmm/nvmm-accel-ops.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef NVMM_CPUS_H
+#define NVMM_CPUS_H
+
+#include "sysemu/cpus.h"
+
+int nvmm_init_vcpu(CPUState *cpu);
+int nvmm_vcpu_exec(CPUState *cpu);
+void nvmm_destroy_vcpu(CPUState *cpu);
+
+void nvmm_cpu_synchronize_state(CPUState *cpu);
+void nvmm_cpu_synchronize_post_reset(CPUState *cpu);
+void nvmm_cpu_synchronize_post_init(CPUState *cpu);
+void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu);
+
+#endif /* NVMM_CPUS_H */
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
new file mode 100644
index 000000000..9af261eea
--- /dev/null
+++ b/target/i386/nvmm/nvmm-all.c
@@ -0,0 +1,1236 @@
+/*
+ * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
+ *
+ * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "exec/ioport.h"
+#include "qemu-common.h"
+#include "qemu/accel.h"
+#include "sysemu/nvmm.h"
+#include "sysemu/cpus.h"
+#include "sysemu/runstate.h"
+#include "qemu/main-loop.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qemu/queue.h"
+#include "migration/blocker.h"
+#include "strings.h"
+
+#include "nvmm-accel-ops.h"
+
+#include <nvmm.h>
+
+struct qemu_vcpu {
+ struct nvmm_vcpu vcpu;
+ uint8_t tpr;
+ bool stop;
+
+ /* Window-exiting for INTs/NMIs. */
+ bool int_window_exit;
+ bool nmi_window_exit;
+
+ /* The guest is in an interrupt shadow (POP SS, etc). */
+ bool int_shadow;
+};
+
+struct qemu_machine {
+ struct nvmm_capability cap;
+ struct nvmm_machine mach;
+};
+
+/* -------------------------------------------------------------------------- */
+
+static bool nvmm_allowed;
+static struct qemu_machine qemu_mach;
+
+static struct qemu_vcpu *
+get_qemu_vcpu(CPUState *cpu)
+{
+ return (struct qemu_vcpu *)cpu->hax_vcpu;
+}
+
+static struct nvmm_machine *
+get_nvmm_mach(void)
+{
+ return &qemu_mach.mach;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg)
+{
+ uint32_t attrib = qseg->flags;
+
+ nseg->selector = qseg->selector;
+ nseg->limit = qseg->limit;
+ nseg->base = qseg->base;
+ nseg->attrib.type = __SHIFTOUT(attrib, DESC_TYPE_MASK);
+ nseg->attrib.s = __SHIFTOUT(attrib, DESC_S_MASK);
+ nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK);
+ nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK);
+ nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK);
+ nseg->attrib.l = __SHIFTOUT(attrib, DESC_L_MASK);
+ nseg->attrib.def = __SHIFTOUT(attrib, DESC_B_MASK);
+ nseg->attrib.g = __SHIFTOUT(attrib, DESC_G_MASK);
+}
+
+static void
+nvmm_set_registers(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ struct nvmm_machine *mach = get_nvmm_mach();
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ struct nvmm_x64_state *state = vcpu->state;
+ uint64_t bitmap;
+ size_t i;
+ int ret;
+
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+ /* GPRs. */
+ state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX];
+ state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX];
+ state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX];
+ state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX];
+ state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP];
+ state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP];
+ state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI];
+ state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI];
+#ifdef TARGET_X86_64
+ state->gprs[NVMM_X64_GPR_R8] = env->regs[R_R8];
+ state->gprs[NVMM_X64_GPR_R9] = env->regs[R_R9];
+ state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10];
+ state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11];
+ state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12];
+ state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13];
+ state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14];
+ state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15];
+#endif
+
+ /* RIP and RFLAGS. */
+ state->gprs[NVMM_X64_GPR_RIP] = env->eip;
+ state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags;
+
+ /* Segments. */
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]);
+
+ /* Special segments. */
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr);
+ nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt);
+
+ /* Control registers. */
+ state->crs[NVMM_X64_CR_CR0] = env->cr[0];
+ state->crs[NVMM_X64_CR_CR2] = env->cr[2];
+ state->crs[NVMM_X64_CR_CR3] = env->cr[3];
+ state->crs[NVMM_X64_CR_CR4] = env->cr[4];
+ state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
+ state->crs[NVMM_X64_CR_XCR0] = env->xcr0;
+
+ /* Debug registers. */
+ state->drs[NVMM_X64_DR_DR0] = env->dr[0];
+ state->drs[NVMM_X64_DR_DR1] = env->dr[1];
+ state->drs[NVMM_X64_DR_DR2] = env->dr[2];
+ state->drs[NVMM_X64_DR_DR3] = env->dr[3];
+ state->drs[NVMM_X64_DR_DR6] = env->dr[6];
+ state->drs[NVMM_X64_DR_DR7] = env->dr[7];
+
+ /* FPU. */
+ state->fpu.fx_cw = env->fpuc;
+ state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11);
+ state->fpu.fx_tw = 0;
+ for (i = 0; i < 8; i++) {
+ state->fpu.fx_tw |= (!env->fptags[i]) << i;
+ }
+ state->fpu.fx_opcode = env->fpop;
+ state->fpu.fx_ip.fa_64 = env->fpip;
+ state->fpu.fx_dp.fa_64 = env->fpdp;
+ state->fpu.fx_mxcsr = env->mxcsr;
+ state->fpu.fx_mxcsr_mask = 0x0000FFFF;
+ assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
+ memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs));
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0],
+ &env->xmm_regs[i].ZMM_Q(0), 8);
+ memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8],
+ &env->xmm_regs[i].ZMM_Q(1), 8);
+ }
+
+ /* MSRs. */
+ state->msrs[NVMM_X64_MSR_EFER] = env->efer;
+ state->msrs[NVMM_X64_MSR_STAR] = env->star;
+#ifdef TARGET_X86_64
+ state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar;
+ state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar;
+ state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask;
+ state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase;
+#endif
+ state->msrs[NVMM_X64_MSR_SYSENTER_CS] = env->sysenter_cs;
+ state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp;
+ state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip;
+ state->msrs[NVMM_X64_MSR_PAT] = env->pat;
+ state->msrs[NVMM_X64_MSR_TSC] = env->tsc;
+
+ bitmap =
+ NVMM_X64_STATE_SEGS |
+ NVMM_X64_STATE_GPRS |
+ NVMM_X64_STATE_CRS |
+ NVMM_X64_STATE_DRS |
+ NVMM_X64_STATE_MSRS |
+ NVMM_X64_STATE_FPU;
+
+ ret = nvmm_vcpu_setstate(mach, vcpu, bitmap);
+ if (ret == -1) {
+ error_report("NVMM: Failed to set virtual processor context,"
+ " error=%d", errno);
+ }
+}
+
+static void
+nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg)
+{
+ qseg->selector = nseg->selector;
+ qseg->limit = nseg->limit;
+ qseg->base = nseg->base;
+
+ qseg->flags =
+ __SHIFTIN((uint32_t)nseg->attrib.type, DESC_TYPE_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.s, DESC_S_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.dpl, DESC_DPL_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.p, DESC_P_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.avl, DESC_AVL_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.l, DESC_L_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.def, DESC_B_MASK) |
+ __SHIFTIN((uint32_t)nseg->attrib.g, DESC_G_MASK);
+}
+
+static void
+nvmm_get_registers(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ struct nvmm_machine *mach = get_nvmm_mach();
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct nvmm_x64_state *state = vcpu->state;
+ uint64_t bitmap, tpr;
+ size_t i;
+ int ret;
+
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+ bitmap =
+ NVMM_X64_STATE_SEGS |
+ NVMM_X64_STATE_GPRS |
+ NVMM_X64_STATE_CRS |
+ NVMM_X64_STATE_DRS |
+ NVMM_X64_STATE_MSRS |
+ NVMM_X64_STATE_FPU;
+
+ ret = nvmm_vcpu_getstate(mach, vcpu, bitmap);
+ if (ret == -1) {
+ error_report("NVMM: Failed to get virtual processor context,"
+ " error=%d", errno);
+ }
+
+ /* GPRs. */
+ env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX];
+ env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX];
+ env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX];
+ env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX];
+ env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP];
+ env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP];
+ env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI];
+ env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI];
+#ifdef TARGET_X86_64
+ env->regs[R_R8] = state->gprs[NVMM_X64_GPR_R8];
+ env->regs[R_R9] = state->gprs[NVMM_X64_GPR_R9];
+ env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10];
+ env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11];
+ env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12];
+ env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13];
+ env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14];
+ env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15];
+#endif
+
+ /* RIP and RFLAGS. */
+ env->eip = state->gprs[NVMM_X64_GPR_RIP];
+ env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS];
+
+ /* Segments. */
+ nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]);
+ nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]);
+ nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]);
+ nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]);
+ nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]);
+ nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]);
+
+ /* Special segments. */
+ nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]);
+ nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]);
+ nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]);
+ nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]);
+
+ /* Control registers. */
+ env->cr[0] = state->crs[NVMM_X64_CR_CR0];
+ env->cr[2] = state->crs[NVMM_X64_CR_CR2];
+ env->cr[3] = state->crs[NVMM_X64_CR_CR3];
+ env->cr[4] = state->crs[NVMM_X64_CR_CR4];
+ tpr = state->crs[NVMM_X64_CR_CR8];
+ if (tpr != qcpu->tpr) {
+ qcpu->tpr = tpr;
+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+ }
+ env->xcr0 = state->crs[NVMM_X64_CR_XCR0];
+
+ /* Debug registers. */
+ env->dr[0] = state->drs[NVMM_X64_DR_DR0];
+ env->dr[1] = state->drs[NVMM_X64_DR_DR1];
+ env->dr[2] = state->drs[NVMM_X64_DR_DR2];
+ env->dr[3] = state->drs[NVMM_X64_DR_DR3];
+ env->dr[6] = state->drs[NVMM_X64_DR_DR6];
+ env->dr[7] = state->drs[NVMM_X64_DR_DR7];
+
+ /* FPU. */
+ env->fpuc = state->fpu.fx_cw;
+ env->fpstt = (state->fpu.fx_sw >> 11) & 0x7;
+ env->fpus = state->fpu.fx_sw & ~0x3800;
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = !((state->fpu.fx_tw >> i) & 1);
+ }
+ env->fpop = state->fpu.fx_opcode;
+ env->fpip = state->fpu.fx_ip.fa_64;
+ env->fpdp = state->fpu.fx_dp.fa_64;
+ env->mxcsr = state->fpu.fx_mxcsr;
+ assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs));
+ memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs));
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ memcpy(&env->xmm_regs[i].ZMM_Q(0),
+ &state->fpu.fx_xmm[i].xmm_bytes[0], 8);
+ memcpy(&env->xmm_regs[i].ZMM_Q(1),
+ &state->fpu.fx_xmm[i].xmm_bytes[8], 8);
+ }
+
+ /* MSRs. */
+ env->efer = state->msrs[NVMM_X64_MSR_EFER];
+ env->star = state->msrs[NVMM_X64_MSR_STAR];
+#ifdef TARGET_X86_64
+ env->lstar = state->msrs[NVMM_X64_MSR_LSTAR];
+ env->cstar = state->msrs[NVMM_X64_MSR_CSTAR];
+ env->fmask = state->msrs[NVMM_X64_MSR_SFMASK];
+ env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE];
+#endif
+ env->sysenter_cs = state->msrs[NVMM_X64_MSR_SYSENTER_CS];
+ env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
+ env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
+ env->pat = state->msrs[NVMM_X64_MSR_PAT];
+ env->tsc = state->msrs[NVMM_X64_MSR_TSC];
+
+ x86_update_hflags(env);
+}
+
+static bool
+nvmm_can_take_int(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ struct nvmm_machine *mach = get_nvmm_mach();
+
+ if (qcpu->int_window_exit) {
+ return false;
+ }
+
+ if (qcpu->int_shadow || !(env->eflags & IF_MASK)) {
+ struct nvmm_x64_state *state = vcpu->state;
+
+ /* Exit on interrupt window. */
+ nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_INTR);
+ state->intr.int_window_exiting = 1;
+ nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_INTR);
+
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+nvmm_can_take_nmi(CPUState *cpu)
+{
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+
+ /*
+ * Contrary to INTs, NMIs always schedule an exit when they are
+ * completed. Therefore, if window-exiting is enabled, it means
+ * NMIs are blocked.
+ */
+ if (qcpu->nmi_window_exit) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Called before the VCPU is run. We inject events generated by the I/O
+ * thread, and synchronize the guest TPR.
+ */
+static void
+nvmm_vcpu_pre_run(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ struct nvmm_machine *mach = get_nvmm_mach();
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct nvmm_x64_state *state = vcpu->state;
+ struct nvmm_vcpu_event *event = vcpu->event;
+ bool has_event = false;
+ bool sync_tpr = false;
+ uint8_t tpr;
+ int ret;
+
+ qemu_mutex_lock_iothread();
+
+ tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
+ if (tpr != qcpu->tpr) {
+ qcpu->tpr = tpr;
+ sync_tpr = true;
+ }
+
+ /*
+ * Force the VCPU out of its inner loop to process any INIT requests
+ * or commit pending TPR access.
+ */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ cpu->exit_request = 1;
+ }
+
+ if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ if (nvmm_can_take_nmi(cpu)) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ event->type = NVMM_VCPU_EVENT_INTR;
+ event->vector = 2;
+ has_event = true;
+ }
+ }
+
+ if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ if (nvmm_can_take_int(cpu)) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ event->type = NVMM_VCPU_EVENT_INTR;
+ event->vector = cpu_get_pic_interrupt(env);
+ has_event = true;
+ }
+ }
+
+ /* Don't want SMIs. */
+ if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ }
+
+ if (sync_tpr) {
+ ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_CRS);
+ if (ret == -1) {
+ error_report("NVMM: Failed to get CPU state,"
+ " error=%d", errno);
+ }
+
+ state->crs[NVMM_X64_CR_CR8] = qcpu->tpr;
+
+ ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_CRS);
+ if (ret == -1) {
+ error_report("NVMM: Failed to set CPU state,"
+ " error=%d", errno);
+ }
+ }
+
+ if (has_event) {
+ ret = nvmm_vcpu_inject(mach, vcpu);
+ if (ret == -1) {
+ error_report("NVMM: Failed to inject event,"
+ " error=%d", errno);
+ }
+ }
+
+ qemu_mutex_unlock_iothread();
+}
+
+/*
+ * Called after the VCPU ran. We synchronize the host view of the TPR and
+ * RFLAGS.
+ */
+static void
+nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
+{
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ uint64_t tpr;
+
+ env->eflags = exit->exitstate.rflags;
+ qcpu->int_shadow = exit->exitstate.int_shadow;
+ qcpu->int_window_exit = exit->exitstate.int_window_exiting;
+ qcpu->nmi_window_exit = exit->exitstate.nmi_window_exiting;
+
+ tpr = exit->exitstate.cr8;
+ if (qcpu->tpr != tpr) {
+ qcpu->tpr = tpr;
+ qemu_mutex_lock_iothread();
+ cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr);
+ qemu_mutex_unlock_iothread();
+ }
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_io_callback(struct nvmm_io *io)
+{
+ MemTxAttrs attrs = { 0 };
+ int ret;
+
+ ret = address_space_rw(&address_space_io, io->port, attrs, io->data,
+ io->size, !io->in);
+ if (ret != MEMTX_OK) {
+ error_report("NVMM: I/O Transaction Failed "
+ "[%s, port=%u, size=%zu]", (io->in ? "in" : "out"),
+ io->port, io->size);
+ }
+
+ /* Needed, otherwise infinite loop. */
+ current_cpu->vcpu_dirty = false;
+}
+
+static void
+nvmm_mem_callback(struct nvmm_mem *mem)
+{
+ cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write);
+
+ /* Needed, otherwise infinite loop. */
+ current_cpu->vcpu_dirty = false;
+}
+
+static struct nvmm_assist_callbacks nvmm_callbacks = {
+ .io = nvmm_io_callback,
+ .mem = nvmm_mem_callback
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int
+nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
+{
+ int ret;
+
+ ret = nvmm_assist_mem(mach, vcpu);
+ if (ret == -1) {
+ error_report("NVMM: Mem Assist Failed [gpa=%p]",
+ (void *)vcpu->exit->u.mem.gpa);
+ }
+
+ return ret;
+}
+
+static int
+nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
+{
+ int ret;
+
+ ret = nvmm_assist_io(mach, vcpu);
+ if (ret == -1) {
+ error_report("NVMM: I/O Assist Failed [port=%d]",
+ (int)vcpu->exit->u.io.port);
+ }
+
+ return ret;
+}
+
+static int
+nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu,
+ struct nvmm_vcpu_exit *exit)
+{
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct nvmm_x64_state *state = vcpu->state;
+ uint64_t val;
+ int ret;
+
+ switch (exit->u.rdmsr.msr) {
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(x86_cpu->apic_state);
+ break;
+ case MSR_MTRRcap:
+ case MSR_MTRRdefType:
+ case MSR_MCG_CAP:
+ case MSR_MCG_STATUS:
+ val = 0;
+ break;
+ default: /* More MSRs to add? */
+ val = 0;
+ error_report("NVMM: Unexpected RDMSR 0x%x, ignored",
+ exit->u.rdmsr.msr);
+ break;
+ }
+
+ ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS);
+ if (ret == -1) {
+ return -1;
+ }
+
+ state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
+ state->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
+ state->gprs[NVMM_X64_GPR_RIP] = exit->u.rdmsr.npc;
+
+ ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
+ if (ret == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu,
+ struct nvmm_vcpu_exit *exit)
+{
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct nvmm_x64_state *state = vcpu->state;
+ uint64_t val;
+ int ret;
+
+ val = exit->u.wrmsr.val;
+
+ switch (exit->u.wrmsr.msr) {
+ case MSR_IA32_APICBASE:
+ cpu_set_apic_base(x86_cpu->apic_state, val);
+ break;
+ case MSR_MTRRdefType:
+ case MSR_MCG_STATUS:
+ break;
+ default: /* More MSRs to add? */
+ error_report("NVMM: Unexpected WRMSR 0x%x [val=0x%lx], ignored",
+ exit->u.wrmsr.msr, val);
+ break;
+ }
+
+ ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS);
+ if (ret == -1) {
+ return -1;
+ }
+
+ state->gprs[NVMM_X64_GPR_RIP] = exit->u.wrmsr.npc;
+
+ ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
+ if (ret == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
+ struct nvmm_vcpu_exit *exit)
+{
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ int ret = 0;
+
+ qemu_mutex_lock_iothread();
+
+ if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) &&
+ !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu->exception_index = EXCP_HLT;
+ cpu->halted = true;
+ ret = 1;
+ }
+
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
+static int
+nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
+{
+ struct nvmm_vcpu_event *event = vcpu->event;
+
+ event->type = NVMM_VCPU_EVENT_EXCP;
+ event->vector = 6;
+ event->u.excp.error = 0;
+
+ return nvmm_vcpu_inject(mach, vcpu);
+}
+
+static int
+nvmm_vcpu_loop(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)cpu->env_ptr;
+ struct nvmm_machine *mach = get_nvmm_mach();
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct nvmm_vcpu_exit *exit = vcpu->exit;
+ int ret;
+
+ /*
+ * Some asynchronous events must be handled outside of the inner
+ * VCPU loop. They are handled here.
+ */
+ if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+ nvmm_cpu_synchronize_state(cpu);
+ do_cpu_init(x86_cpu);
+ /* set int/nmi windows back to the reset state */
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(x86_cpu->apic_state);
+ }
+ if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu->halted = false;
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ nvmm_cpu_synchronize_state(cpu);
+ do_cpu_sipi(x86_cpu);
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ nvmm_cpu_synchronize_state(cpu);
+ apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
+ env->tpr_access_type);
+ }
+
+ if (cpu->halted) {
+ cpu->exception_index = EXCP_HLT;
+ qatomic_set(&cpu->exit_request, false);
+ return 0;
+ }
+
+ qemu_mutex_unlock_iothread();
+ cpu_exec_start(cpu);
+
+ /*
+ * Inner VCPU loop.
+ */
+ do {
+ if (cpu->vcpu_dirty) {
+ nvmm_set_registers(cpu);
+ cpu->vcpu_dirty = false;
+ }
+
+ if (qcpu->stop) {
+ cpu->exception_index = EXCP_INTERRUPT;
+ qcpu->stop = false;
+ ret = 1;
+ break;
+ }
+
+ nvmm_vcpu_pre_run(cpu);
+
+ if (qatomic_read(&cpu->exit_request)) {
+#if NVMM_USER_VERSION >= 2
+ nvmm_vcpu_stop(vcpu);
+#else
+ qemu_cpu_kick_self();
+#endif
+ }
+
+ /* Read exit_request before the kernel reads the immediate exit flag */
+ smp_rmb();
+ ret = nvmm_vcpu_run(mach, vcpu);
+ if (ret == -1) {
+ error_report("NVMM: Failed to exec a virtual processor,"
+ " error=%d", errno);
+ break;
+ }
+
+ nvmm_vcpu_post_run(cpu, exit);
+
+ switch (exit->reason) {
+ case NVMM_VCPU_EXIT_NONE:
+ break;
+#if NVMM_USER_VERSION >= 2
+ case NVMM_VCPU_EXIT_STOPPED:
+ /*
+ * The kernel cleared the immediate exit flag; cpu->exit_request
+ * must be cleared after
+ */
+ smp_wmb();
+ qcpu->stop = true;
+ break;
+#endif
+ case NVMM_VCPU_EXIT_MEMORY:
+ ret = nvmm_handle_mem(mach, vcpu);
+ break;
+ case NVMM_VCPU_EXIT_IO:
+ ret = nvmm_handle_io(mach, vcpu);
+ break;
+ case NVMM_VCPU_EXIT_INT_READY:
+ case NVMM_VCPU_EXIT_NMI_READY:
+ case NVMM_VCPU_EXIT_TPR_CHANGED:
+ break;
+ case NVMM_VCPU_EXIT_HALTED:
+ ret = nvmm_handle_halted(mach, cpu, exit);
+ break;
+ case NVMM_VCPU_EXIT_SHUTDOWN:
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ cpu->exception_index = EXCP_INTERRUPT;
+ ret = 1;
+ break;
+ case NVMM_VCPU_EXIT_RDMSR:
+ ret = nvmm_handle_rdmsr(mach, cpu, exit);
+ break;
+ case NVMM_VCPU_EXIT_WRMSR:
+ ret = nvmm_handle_wrmsr(mach, cpu, exit);
+ break;
+ case NVMM_VCPU_EXIT_MONITOR:
+ case NVMM_VCPU_EXIT_MWAIT:
+ ret = nvmm_inject_ud(mach, vcpu);
+ break;
+ default:
+ error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]",
+ exit->reason, exit->u.inv.hwcode);
+ nvmm_get_registers(cpu);
+ qemu_mutex_lock_iothread();
+ qemu_system_guest_panicked(cpu_get_crash_info(cpu));
+ qemu_mutex_unlock_iothread();
+ ret = -1;
+ break;
+ }
+ } while (ret == 0);
+
+ cpu_exec_end(cpu);
+ qemu_mutex_lock_iothread();
+
+ qatomic_set(&cpu->exit_request, false);
+
+ return ret < 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+ nvmm_get_registers(cpu);
+ cpu->vcpu_dirty = true;
+}
+
+static void
+do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
+{
+ nvmm_set_registers(cpu);
+ cpu->vcpu_dirty = false;
+}
+
+static void
+do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
+{
+ nvmm_set_registers(cpu);
+ cpu->vcpu_dirty = false;
+}
+
+static void
+do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->vcpu_dirty = true;
+}
+
+void nvmm_cpu_synchronize_state(CPUState *cpu)
+{
+ if (!cpu->vcpu_dirty) {
+ run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL);
+ }
+}
+
+void nvmm_cpu_synchronize_post_reset(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+}
+
+void nvmm_cpu_synchronize_post_init(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static Error *nvmm_migration_blocker;
+
+/*
+ * The nvmm_vcpu_stop() mechanism breaks races between entering the VMM
+ * and another thread signaling the vCPU thread to exit.
+ */
+
+static void
+nvmm_ipi_signal(int sigcpu)
+{
+ if (current_cpu) {
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(current_cpu);
+#if NVMM_USER_VERSION >= 2
+ struct nvmm_vcpu *vcpu = &qcpu->vcpu;
+ nvmm_vcpu_stop(vcpu);
+#else
+ qcpu->stop = true;
+#endif
+ }
+}
+
+static void
+nvmm_init_cpu_signals(void)
+{
+ struct sigaction sigact;
+ sigset_t set;
+
+ /* Install the IPI handler. */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = nvmm_ipi_signal;
+ sigaction(SIG_IPI, &sigact, NULL);
+
+ /* Allow IPIs on the current thread. */
+ sigprocmask(SIG_BLOCK, NULL, &set);
+ sigdelset(&set, SIG_IPI);
+ pthread_sigmask(SIG_SETMASK, &set, NULL);
+}
+
+int
+nvmm_init_vcpu(CPUState *cpu)
+{
+ struct nvmm_machine *mach = get_nvmm_mach();
+ struct nvmm_vcpu_conf_cpuid cpuid;
+ struct nvmm_vcpu_conf_tpr tpr;
+ Error *local_error = NULL;
+ struct qemu_vcpu *qcpu;
+ int ret, err;
+
+ nvmm_init_cpu_signals();
+
+ if (nvmm_migration_blocker == NULL) {
+ error_setg(&nvmm_migration_blocker,
+ "NVMM: Migration not supported");
+
+ if (migrate_add_blocker(nvmm_migration_blocker, &local_error) < 0) {
+ error_report_err(local_error);
+ error_free(nvmm_migration_blocker);
+ return -EINVAL;
+ }
+ }
+
+ qcpu = g_malloc0(sizeof(*qcpu));
+ if (qcpu == NULL) {
+ error_report("NVMM: Failed to allocate VCPU context.");
+ return -ENOMEM;
+ }
+
+ ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu);
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Failed to create a virtual processor,"
+ " error=%d", err);
+ g_free(qcpu);
+ return -err;
+ }
+
+ memset(&cpuid, 0, sizeof(cpuid));
+ cpuid.mask = 1;
+ cpuid.leaf = 0x00000001;
+ cpuid.u.mask.set.edx = CPUID_MCE | CPUID_MCA | CPUID_MTRR;
+ ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CPUID,
+ &cpuid);
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Failed to configure a virtual processor,"
+ " error=%d", err);
+ g_free(qcpu);
+ return -err;
+ }
+
+ ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CALLBACKS,
+ &nvmm_callbacks);
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Failed to configure a virtual processor,"
+ " error=%d", err);
+ g_free(qcpu);
+ return -err;
+ }
+
+ if (qemu_mach.cap.arch.vcpu_conf_support & NVMM_CAP_ARCH_VCPU_CONF_TPR) {
+ memset(&tpr, 0, sizeof(tpr));
+ tpr.exit_changed = 1;
+ ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_TPR, &tpr);
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Failed to configure a virtual processor,"
+ " error=%d", err);
+ g_free(qcpu);
+ return -err;
+ }
+ }
+
+ cpu->vcpu_dirty = true;
+ cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu;
+
+ return 0;
+}
+
+int
+nvmm_vcpu_exec(CPUState *cpu)
+{
+ int ret, fatal;
+
+ while (1) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
+ ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ break;
+ }
+
+ fatal = nvmm_vcpu_loop(cpu);
+
+ if (fatal) {
+ error_report("NVMM: Failed to execute a VCPU.");
+ abort();
+ }
+ }
+
+ return ret;
+}
+
+void
+nvmm_destroy_vcpu(CPUState *cpu)
+{
+ struct nvmm_machine *mach = get_nvmm_mach();
+ struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+
+ nvmm_vcpu_destroy(mach, &qcpu->vcpu);
+ g_free(cpu->hax_vcpu);
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+nvmm_update_mapping(hwaddr start_pa, ram_addr_t size, uintptr_t hva,
+ bool add, bool rom, const char *name)
+{
+ struct nvmm_machine *mach = get_nvmm_mach();
+ int ret, prot;
+
+ if (add) {
+ prot = PROT_READ | PROT_EXEC;
+ if (!rom) {
+ prot |= PROT_WRITE;
+ }
+ ret = nvmm_gpa_map(mach, hva, start_pa, size, prot);
+ } else {
+ ret = nvmm_gpa_unmap(mach, hva, start_pa, size);
+ }
+
+ if (ret == -1) {
+ error_report("NVMM: Failed to %s GPA range '%s' PA:%p, "
+ "Size:%p bytes, HostVA:%p, error=%d",
+ (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa,
+ (void *)size, (void *)hva, errno);
+ }
+}
+
+static void
+nvmm_process_section(MemoryRegionSection *section, int add)
+{
+ MemoryRegion *mr = section->mr;
+ hwaddr start_pa = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ unsigned int delta;
+ uintptr_t hva;
+
+ if (!memory_region_is_ram(mr)) {
+ return;
+ }
+
+ /* Adjust start_pa and size so that they are page-aligned. */
+ delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
+ delta &= ~qemu_real_host_page_mask;
+ if (delta > size) {
+ return;
+ }
+ start_pa += delta;
+ size -= delta;
+ size &= qemu_real_host_page_mask;
+ if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ return;
+ }
+
+ hva = (uintptr_t)memory_region_get_ram_ptr(mr) +
+ section->offset_within_region + delta;
+
+ nvmm_update_mapping(start_pa, size, hva, add,
+ memory_region_is_rom(mr), mr->name);
+}
+
+static void
+nvmm_region_add(MemoryListener *listener, MemoryRegionSection *section)
+{
+ memory_region_ref(section->mr);
+ nvmm_process_section(section, 1);
+}
+
+static void
+nvmm_region_del(MemoryListener *listener, MemoryRegionSection *section)
+{
+ nvmm_process_section(section, 0);
+ memory_region_unref(section->mr);
+}
+
+static void
+nvmm_transaction_begin(MemoryListener *listener)
+{
+ /* nothing */
+}
+
+static void
+nvmm_transaction_commit(MemoryListener *listener)
+{
+ /* nothing */
+}
+
+static void
+nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section)
+{
+ MemoryRegion *mr = section->mr;
+
+ if (!memory_region_is_ram(mr)) {
+ return;
+ }
+
+ memory_region_set_dirty(mr, 0, int128_get64(section->size));
+}
+
+static MemoryListener nvmm_memory_listener = {
+ .name = "nvmm",
+ .begin = nvmm_transaction_begin,
+ .commit = nvmm_transaction_commit,
+ .region_add = nvmm_region_add,
+ .region_del = nvmm_region_del,
+ .log_sync = nvmm_log_sync,
+ .priority = 10,
+};
+
+static void
+nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
+{
+ struct nvmm_machine *mach = get_nvmm_mach();
+ uintptr_t hva = (uintptr_t)host;
+ int ret;
+
+ ret = nvmm_hva_map(mach, hva, max_size);
+
+ if (ret == -1) {
+ error_report("NVMM: Failed to map HVA, HostVA:%p "
+ "Size:%p bytes, error=%d",
+ (void *)hva, (void *)size, errno);
+ }
+}
+
+static struct RAMBlockNotifier nvmm_ram_notifier = {
+ .ram_block_added = nvmm_ram_block_added
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int
+nvmm_accel_init(MachineState *ms)
+{
+ int ret, err;
+
+ ret = nvmm_init();
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Initialization failed, error=%d", errno);
+ return -err;
+ }
+
+ ret = nvmm_capability(&qemu_mach.cap);
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Unable to fetch capability, error=%d", errno);
+ return -err;
+ }
+ if (qemu_mach.cap.version < NVMM_KERN_VERSION) {
+ error_report("NVMM: Unsupported version %u", qemu_mach.cap.version);
+ return -EPROGMISMATCH;
+ }
+ if (qemu_mach.cap.state_size != sizeof(struct nvmm_x64_state)) {
+ error_report("NVMM: Wrong state size %u", qemu_mach.cap.state_size);
+ return -EPROGMISMATCH;
+ }
+
+ ret = nvmm_machine_create(&qemu_mach.mach);
+ if (ret == -1) {
+ err = errno;
+ error_report("NVMM: Machine creation failed, error=%d", errno);
+ return -err;
+ }
+
+ memory_listener_register(&nvmm_memory_listener, &address_space_memory);
+ ram_block_notifier_add(&nvmm_ram_notifier);
+
+ printf("NetBSD Virtual Machine Monitor accelerator is operational\n");
+ return 0;
+}
+
+int
+nvmm_enabled(void)
+{
+ return nvmm_allowed;
+}
+
+static void
+nvmm_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelClass *ac = ACCEL_CLASS(oc);
+ ac->name = "NVMM";
+ ac->init_machine = nvmm_accel_init;
+ ac->allowed = &nvmm_allowed;
+}
+
+static const TypeInfo nvmm_accel_type = {
+ .name = ACCEL_CLASS_NAME("nvmm"),
+ .parent = TYPE_ACCEL,
+ .class_init = nvmm_accel_class_init,
+};
+
+static void
+nvmm_type_init(void)
+{
+ type_register_static(&nvmm_accel_type);
+}
+
+type_init(nvmm_type_init);
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
new file mode 100644
index 000000000..6f1fc174b
--- /dev/null
+++ b/target/i386/ops_sse.h
@@ -0,0 +1,2326 @@
+/*
+ * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "crypto/aes.h"
+
+#if SHIFT == 0
+#define Reg MMXReg
+#define XMM_ONLY(...)
+#define B(n) MMX_B(n)
+#define W(n) MMX_W(n)
+#define L(n) MMX_L(n)
+#define Q(n) MMX_Q(n)
+#define SUFFIX _mmx
+#else
+#define Reg ZMMReg
+#define XMM_ONLY(...) __VA_ARGS__
+#define B(n) ZMM_B(n)
+#define W(n) ZMM_W(n)
+#define L(n) ZMM_L(n)
+#define Q(n) ZMM_Q(n)
+#define SUFFIX _xmm
+#endif
+
+void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 15) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->W(0) >>= shift;
+ d->W(1) >>= shift;
+ d->W(2) >>= shift;
+ d->W(3) >>= shift;
+#if SHIFT == 1
+ d->W(4) >>= shift;
+ d->W(5) >>= shift;
+ d->W(6) >>= shift;
+ d->W(7) >>= shift;
+#endif
+ }
+}
+
+void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 15) {
+ shift = 15;
+ } else {
+ shift = s->B(0);
+ }
+ d->W(0) = (int16_t)d->W(0) >> shift;
+ d->W(1) = (int16_t)d->W(1) >> shift;
+ d->W(2) = (int16_t)d->W(2) >> shift;
+ d->W(3) = (int16_t)d->W(3) >> shift;
+#if SHIFT == 1
+ d->W(4) = (int16_t)d->W(4) >> shift;
+ d->W(5) = (int16_t)d->W(5) >> shift;
+ d->W(6) = (int16_t)d->W(6) >> shift;
+ d->W(7) = (int16_t)d->W(7) >> shift;
+#endif
+}
+
+void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 15) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->W(0) <<= shift;
+ d->W(1) <<= shift;
+ d->W(2) <<= shift;
+ d->W(3) <<= shift;
+#if SHIFT == 1
+ d->W(4) <<= shift;
+ d->W(5) <<= shift;
+ d->W(6) <<= shift;
+ d->W(7) <<= shift;
+#endif
+ }
+}
+
+void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 31) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->L(0) >>= shift;
+ d->L(1) >>= shift;
+#if SHIFT == 1
+ d->L(2) >>= shift;
+ d->L(3) >>= shift;
+#endif
+ }
+}
+
+void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 31) {
+ shift = 31;
+ } else {
+ shift = s->B(0);
+ }
+ d->L(0) = (int32_t)d->L(0) >> shift;
+ d->L(1) = (int32_t)d->L(1) >> shift;
+#if SHIFT == 1
+ d->L(2) = (int32_t)d->L(2) >> shift;
+ d->L(3) = (int32_t)d->L(3) >> shift;
+#endif
+}
+
+void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 31) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->L(0) <<= shift;
+ d->L(1) <<= shift;
+#if SHIFT == 1
+ d->L(2) <<= shift;
+ d->L(3) <<= shift;
+#endif
+ }
+}
+
+void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 63) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->Q(0) >>= shift;
+#if SHIFT == 1
+ d->Q(1) >>= shift;
+#endif
+ }
+}
+
+void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 63) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->Q(0) <<= shift;
+#if SHIFT == 1
+ d->Q(1) <<= shift;
+#endif
+ }
+}
+
+#if SHIFT == 1
+void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift, i;
+
+ shift = s->L(0);
+ if (shift > 16) {
+ shift = 16;
+ }
+ for (i = 0; i < 16 - shift; i++) {
+ d->B(i) = d->B(i + shift);
+ }
+ for (i = 16 - shift; i < 16; i++) {
+ d->B(i) = 0;
+ }
+}
+
+void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift, i;
+
+ shift = s->L(0);
+ if (shift > 16) {
+ shift = 16;
+ }
+ for (i = 15; i >= shift; i--) {
+ d->B(i) = d->B(i - shift);
+ }
+ for (i = 0; i < shift; i++) {
+ d->B(i) = 0;
+ }
+}
+#endif
+
+#define SSE_HELPER_B(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->B(0) = F(d->B(0), s->B(0)); \
+ d->B(1) = F(d->B(1), s->B(1)); \
+ d->B(2) = F(d->B(2), s->B(2)); \
+ d->B(3) = F(d->B(3), s->B(3)); \
+ d->B(4) = F(d->B(4), s->B(4)); \
+ d->B(5) = F(d->B(5), s->B(5)); \
+ d->B(6) = F(d->B(6), s->B(6)); \
+ d->B(7) = F(d->B(7), s->B(7)); \
+ XMM_ONLY( \
+ d->B(8) = F(d->B(8), s->B(8)); \
+ d->B(9) = F(d->B(9), s->B(9)); \
+ d->B(10) = F(d->B(10), s->B(10)); \
+ d->B(11) = F(d->B(11), s->B(11)); \
+ d->B(12) = F(d->B(12), s->B(12)); \
+ d->B(13) = F(d->B(13), s->B(13)); \
+ d->B(14) = F(d->B(14), s->B(14)); \
+ d->B(15) = F(d->B(15), s->B(15)); \
+ ) \
+ }
+
+#define SSE_HELPER_W(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->W(0) = F(d->W(0), s->W(0)); \
+ d->W(1) = F(d->W(1), s->W(1)); \
+ d->W(2) = F(d->W(2), s->W(2)); \
+ d->W(3) = F(d->W(3), s->W(3)); \
+ XMM_ONLY( \
+ d->W(4) = F(d->W(4), s->W(4)); \
+ d->W(5) = F(d->W(5), s->W(5)); \
+ d->W(6) = F(d->W(6), s->W(6)); \
+ d->W(7) = F(d->W(7), s->W(7)); \
+ ) \
+ }
+
+#define SSE_HELPER_L(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->L(0) = F(d->L(0), s->L(0)); \
+ d->L(1) = F(d->L(1), s->L(1)); \
+ XMM_ONLY( \
+ d->L(2) = F(d->L(2), s->L(2)); \
+ d->L(3) = F(d->L(3), s->L(3)); \
+ ) \
+ }
+
+#define SSE_HELPER_Q(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->Q(0) = F(d->Q(0), s->Q(0)); \
+ XMM_ONLY( \
+ d->Q(1) = F(d->Q(1), s->Q(1)); \
+ ) \
+ }
+
+#if SHIFT == 0
+static inline int satub(int x)
+{
+ if (x < 0) {
+ return 0;
+ } else if (x > 255) {
+ return 255;
+ } else {
+ return x;
+ }
+}
+
+static inline int satuw(int x)
+{
+ if (x < 0) {
+ return 0;
+ } else if (x > 65535) {
+ return 65535;
+ } else {
+ return x;
+ }
+}
+
+static inline int satsb(int x)
+{
+ if (x < -128) {
+ return -128;
+ } else if (x > 127) {
+ return 127;
+ } else {
+ return x;
+ }
+}
+
+static inline int satsw(int x)
+{
+ if (x < -32768) {
+ return -32768;
+ } else if (x > 32767) {
+ return 32767;
+ } else {
+ return x;
+ }
+}
+
+#define FADD(a, b) ((a) + (b))
+#define FADDUB(a, b) satub((a) + (b))
+#define FADDUW(a, b) satuw((a) + (b))
+#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
+#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
+
+#define FSUB(a, b) ((a) - (b))
+#define FSUBUB(a, b) satub((a) - (b))
+#define FSUBUW(a, b) satuw((a) - (b))
+#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
+#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
+#define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
+#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
+#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
+#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
+
+#define FAND(a, b) ((a) & (b))
+#define FANDN(a, b) ((~(a)) & (b))
+#define FOR(a, b) ((a) | (b))
+#define FXOR(a, b) ((a) ^ (b))
+
+#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0)
+#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0)
+#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0)
+#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0)
+
+#define FMULLW(a, b) ((a) * (b))
+#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
+#define FMULHUW(a, b) ((a) * (b) >> 16)
+#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
+
+#define FAVG(a, b) (((a) + (b) + 1) >> 1)
+#endif
+
+SSE_HELPER_B(helper_paddb, FADD)
+SSE_HELPER_W(helper_paddw, FADD)
+SSE_HELPER_L(helper_paddl, FADD)
+SSE_HELPER_Q(helper_paddq, FADD)
+
+SSE_HELPER_B(helper_psubb, FSUB)
+SSE_HELPER_W(helper_psubw, FSUB)
+SSE_HELPER_L(helper_psubl, FSUB)
+SSE_HELPER_Q(helper_psubq, FSUB)
+
+SSE_HELPER_B(helper_paddusb, FADDUB)
+SSE_HELPER_B(helper_paddsb, FADDSB)
+SSE_HELPER_B(helper_psubusb, FSUBUB)
+SSE_HELPER_B(helper_psubsb, FSUBSB)
+
+SSE_HELPER_W(helper_paddusw, FADDUW)
+SSE_HELPER_W(helper_paddsw, FADDSW)
+SSE_HELPER_W(helper_psubusw, FSUBUW)
+SSE_HELPER_W(helper_psubsw, FSUBSW)
+
+SSE_HELPER_B(helper_pminub, FMINUB)
+SSE_HELPER_B(helper_pmaxub, FMAXUB)
+
+SSE_HELPER_W(helper_pminsw, FMINSW)
+SSE_HELPER_W(helper_pmaxsw, FMAXSW)
+
+SSE_HELPER_Q(helper_pand, FAND)
+SSE_HELPER_Q(helper_pandn, FANDN)
+SSE_HELPER_Q(helper_por, FOR)
+SSE_HELPER_Q(helper_pxor, FXOR)
+
+SSE_HELPER_B(helper_pcmpgtb, FCMPGTB)
+SSE_HELPER_W(helper_pcmpgtw, FCMPGTW)
+SSE_HELPER_L(helper_pcmpgtl, FCMPGTL)
+
+SSE_HELPER_B(helper_pcmpeqb, FCMPEQ)
+SSE_HELPER_W(helper_pcmpeqw, FCMPEQ)
+SSE_HELPER_L(helper_pcmpeql, FCMPEQ)
+
+SSE_HELPER_W(helper_pmullw, FMULLW)
+#if SHIFT == 0
+SSE_HELPER_W(helper_pmulhrw, FMULHRW)
+#endif
+SSE_HELPER_W(helper_pmulhuw, FMULHUW)
+SSE_HELPER_W(helper_pmulhw, FMULHW)
+
+SSE_HELPER_B(helper_pavgb, FAVG)
+SSE_HELPER_W(helper_pavgw, FAVG)
+
+void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0);
+#if SHIFT == 1
+ d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2);
+#endif
+}
+
+void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+
+ for (i = 0; i < (2 << SHIFT); i++) {
+ d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) +
+ (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1);
+ }
+}
+
+#if SHIFT == 0
+static inline int abs1(int a)
+{
+ if (a < 0) {
+ return -a;
+ } else {
+ return a;
+ }
+}
+#endif
+void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ unsigned int val;
+
+ val = 0;
+ val += abs1(d->B(0) - s->B(0));
+ val += abs1(d->B(1) - s->B(1));
+ val += abs1(d->B(2) - s->B(2));
+ val += abs1(d->B(3) - s->B(3));
+ val += abs1(d->B(4) - s->B(4));
+ val += abs1(d->B(5) - s->B(5));
+ val += abs1(d->B(6) - s->B(6));
+ val += abs1(d->B(7) - s->B(7));
+ d->Q(0) = val;
+#if SHIFT == 1
+ val = 0;
+ val += abs1(d->B(8) - s->B(8));
+ val += abs1(d->B(9) - s->B(9));
+ val += abs1(d->B(10) - s->B(10));
+ val += abs1(d->B(11) - s->B(11));
+ val += abs1(d->B(12) - s->B(12));
+ val += abs1(d->B(13) - s->B(13));
+ val += abs1(d->B(14) - s->B(14));
+ val += abs1(d->B(15) - s->B(15));
+ d->Q(1) = val;
+#endif
+}
+
+void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ target_ulong a0)
+{
+ int i;
+
+ for (i = 0; i < (8 << SHIFT); i++) {
+ if (s->B(i) & 0x80) {
+ cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC());
+ }
+ }
+}
+
+void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val)
+{
+ d->L(0) = val;
+ d->L(1) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+}
+
+#ifdef TARGET_X86_64
+void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val)
+{
+ d->Q(0) = val;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+}
+#endif
+
+#if SHIFT == 0
+void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.W(0) = s->W(order & 3);
+ r.W(1) = s->W((order >> 2) & 3);
+ r.W(2) = s->W((order >> 4) & 3);
+ r.W(3) = s->W((order >> 6) & 3);
+ *d = r;
+}
+#else
+void helper_shufps(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.L(0) = d->L(order & 3);
+ r.L(1) = d->L((order >> 2) & 3);
+ r.L(2) = s->L((order >> 4) & 3);
+ r.L(3) = s->L((order >> 6) & 3);
+ *d = r;
+}
+
+void helper_shufpd(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.Q(0) = d->Q(order & 1);
+ r.Q(1) = s->Q((order >> 1) & 1);
+ *d = r;
+}
+
+void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.L(0) = s->L(order & 3);
+ r.L(1) = s->L((order >> 2) & 3);
+ r.L(2) = s->L((order >> 4) & 3);
+ r.L(3) = s->L((order >> 6) & 3);
+ *d = r;
+}
+
+void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.W(0) = s->W(order & 3);
+ r.W(1) = s->W((order >> 2) & 3);
+ r.W(2) = s->W((order >> 4) & 3);
+ r.W(3) = s->W((order >> 6) & 3);
+ r.Q(1) = s->Q(1);
+ *d = r;
+}
+
+void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.Q(0) = s->Q(0);
+ r.W(4) = s->W(4 + (order & 3));
+ r.W(5) = s->W(4 + ((order >> 2) & 3));
+ r.W(6) = s->W(4 + ((order >> 4) & 3));
+ r.W(7) = s->W(4 + ((order >> 6) & 3));
+ *d = r;
+}
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_HELPER_S(name, F) \
+ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \
+ d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \
+ d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \
+ } \
+ \
+ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ } \
+ \
+ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \
+ } \
+ \
+ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ }
+
+#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
+#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
+#define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
+#define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
+#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status)
+
+/* Note that the choice of comparison op here is important to get the
+ * special cases right: for min and max Intel specifies that (-0,0),
+ * (NaN, anything) and (anything, NaN) return the second argument.
+ */
+#define FPU_MIN(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b))
+#define FPU_MAX(size, a, b) \
+ (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b))
+
+SSE_HELPER_S(add, FPU_ADD)
+SSE_HELPER_S(sub, FPU_SUB)
+SSE_HELPER_S(mul, FPU_MUL)
+SSE_HELPER_S(div, FPU_DIV)
+SSE_HELPER_S(min, FPU_MIN)
+SSE_HELPER_S(max, FPU_MAX)
+SSE_HELPER_S(sqrt, FPU_SQRT)
+
+
+/* float to float conversions */
+void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s)
+{
+ float32 s0, s1;
+
+ s0 = s->ZMM_S(0);
+ s1 = s->ZMM_S(1);
+ d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status);
+ d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status);
+}
+
+void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status);
+ d->Q(1) = 0;
+}
+
+void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status);
+}
+
+void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+}
+
+/* integer to float */
+void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status);
+ d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status);
+ d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status);
+ d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status);
+}
+
+void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s)
+{
+ int32_t l0, l1;
+
+ l0 = (int32_t)s->ZMM_L(0);
+ l1 = (int32_t)s->ZMM_L(1);
+ d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status);
+ d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status);
+}
+
+void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s)
+{
+ d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
+ d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
+}
+
+void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s)
+{
+ d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
+ d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
+}
+
+void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val)
+{
+ d->ZMM_S(0) = int32_to_float32(val, &env->sse_status);
+}
+
+void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val)
+{
+ d->ZMM_D(0) = int32_to_float64(val, &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val)
+{
+ d->ZMM_S(0) = int64_to_float32(val, &env->sse_status);
+}
+
+void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val)
+{
+ d->ZMM_D(0) = int64_to_float64(val, &env->sse_status);
+}
+#endif
+
+/* float to integer */
+
+/*
+ * x86 mandates that we return the indefinite integer value for the result
+ * of any float-to-integer conversion that raises the 'invalid' exception.
+ * Wrap the softfloat functions to get this behaviour.
+ */
+#define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \
+ static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \
+ { \
+ int oldflags, newflags; \
+ RETTYPE r; \
+ \
+ oldflags = get_float_exception_flags(s); \
+ set_float_exception_flags(0, s); \
+ r = FN(a, s); \
+ newflags = get_float_exception_flags(s); \
+ if (newflags & float_flag_invalid) { \
+ r = INDEFVALUE; \
+ } \
+ set_float_exception_flags(newflags | oldflags, s); \
+ return r; \
+ }
+
+WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN)
+WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN)
+WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN)
+WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN)
+WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN)
+WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN)
+WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN)
+WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN)
+
+void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_L(2) = x86_float32_to_int32(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_L(3) = x86_float32_to_int32(s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status);
+ d->ZMM_Q(1) = 0;
+}
+
+void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status);
+ d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status);
+}
+
+void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status);
+ d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status);
+}
+
+int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status);
+}
+
+int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status);
+}
+
+int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status);
+}
+#endif
+
+/* float to integer truncated */
+void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_L(2) = x86_float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_L(3) = x86_float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+ d->ZMM_Q(1) = 0;
+}
+
+void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+ d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+}
+
+void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+ d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+}
+
+int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+}
+
+int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status);
+}
+
+int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
+{
+ return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status);
+}
+#endif
+
+void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ d->ZMM_S(0) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(0), &env->sse_status),
+ &env->sse_status);
+ d->ZMM_S(1) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(1), &env->sse_status),
+ &env->sse_status);
+ d->ZMM_S(2) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(2), &env->sse_status),
+ &env->sse_status);
+ d->ZMM_S(3) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(3), &env->sse_status),
+ &env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
+}
+
+void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ d->ZMM_S(0) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(0), &env->sse_status),
+ &env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
+}
+
+void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
+}
+
+void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
+}
+
+static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
+{
+ uint64_t mask;
+
+ if (len == 0) {
+ mask = ~0LL;
+ } else {
+ mask = (1ULL << len) - 1;
+ }
+ return (src >> shift) & mask;
+}
+
+void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0));
+}
+
+void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+{
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length);
+}
+
+static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
+{
+ uint64_t mask;
+
+ if (len == 0) {
+ mask = ~0ULL;
+ } else {
+ mask = (1ULL << len) - 1;
+ }
+ return (src & ~(mask << shift)) | ((src & mask) << shift);
+}
+
+void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8));
+}
+
+void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+{
+ d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length);
+}
+
+void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+ r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
+ *d = r;
+}
+
+void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+ r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
+ *d = r;
+}
+
+void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+ r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
+ *d = r;
+}
+
+void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+ r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
+ *d = r;
+}
+
+void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
+ d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
+}
+
+/* XXX: unordered */
+#define SSE_HELPER_CMP(name, F) \
+ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \
+ d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \
+ d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \
+ } \
+ \
+ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ } \
+ \
+ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \
+ } \
+ \
+ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ }
+
+#define FPU_CMPEQ(size, a, b) \
+ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLT(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLE(size, a, b) \
+ (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPUNORD(size, a, b) \
+ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPNEQ(size, a, b) \
+ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLT(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLE(size, a, b) \
+ (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPORD(size, a, b) \
+ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1)
+
+SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
+SSE_HELPER_CMP(cmplt, FPU_CMPLT)
+SSE_HELPER_CMP(cmple, FPU_CMPLE)
+SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
+SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
+SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
+SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
+SSE_HELPER_CMP(cmpord, FPU_CMPORD)
+
+static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s)
+{
+ FloatRelation ret;
+ float32 s0, s1;
+
+ s0 = d->ZMM_S(0);
+ s1 = s->ZMM_S(0);
+ ret = float32_compare_quiet(s0, s1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_comiss(CPUX86State *env, Reg *d, Reg *s)
+{
+ FloatRelation ret;
+ float32 s0, s1;
+
+ s0 = d->ZMM_S(0);
+ s1 = s->ZMM_S(0);
+ ret = float32_compare(s0, s1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s)
+{
+ FloatRelation ret;
+ float64 d0, d1;
+
+ d0 = d->ZMM_D(0);
+ d1 = s->ZMM_D(0);
+ ret = float64_compare_quiet(d0, d1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_comisd(CPUX86State *env, Reg *d, Reg *s)
+{
+ FloatRelation ret;
+ float64 d0, d1;
+
+ d0 = d->ZMM_D(0);
+ d1 = s->ZMM_D(0);
+ ret = float64_compare(d0, d1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+uint32_t helper_movmskps(CPUX86State *env, Reg *s)
+{
+ int b0, b1, b2, b3;
+
+ b0 = s->ZMM_L(0) >> 31;
+ b1 = s->ZMM_L(1) >> 31;
+ b2 = s->ZMM_L(2) >> 31;
+ b3 = s->ZMM_L(3) >> 31;
+ return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3);
+}
+
+uint32_t helper_movmskpd(CPUX86State *env, Reg *s)
+{
+ int b0, b1;
+
+ b0 = s->ZMM_L(1) >> 31;
+ b1 = s->ZMM_L(3) >> 31;
+ return b0 | (b1 << 1);
+}
+
+#endif
+
+uint32_t glue(helper_pmovmskb, SUFFIX)(CPUX86State *env, Reg *s)
+{
+ uint32_t val;
+
+ val = 0;
+ val |= (s->B(0) >> 7);
+ val |= (s->B(1) >> 6) & 0x02;
+ val |= (s->B(2) >> 5) & 0x04;
+ val |= (s->B(3) >> 4) & 0x08;
+ val |= (s->B(4) >> 3) & 0x10;
+ val |= (s->B(5) >> 2) & 0x20;
+ val |= (s->B(6) >> 1) & 0x40;
+ val |= (s->B(7)) & 0x80;
+#if SHIFT == 1
+ val |= (s->B(8) << 1) & 0x0100;
+ val |= (s->B(9) << 2) & 0x0200;
+ val |= (s->B(10) << 3) & 0x0400;
+ val |= (s->B(11) << 4) & 0x0800;
+ val |= (s->B(12) << 5) & 0x1000;
+ val |= (s->B(13) << 6) & 0x2000;
+ val |= (s->B(14) << 7) & 0x4000;
+ val |= (s->B(15) << 8) & 0x8000;
+#endif
+ return val;
+}
+
+void glue(helper_packsswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.B(0) = satsb((int16_t)d->W(0));
+ r.B(1) = satsb((int16_t)d->W(1));
+ r.B(2) = satsb((int16_t)d->W(2));
+ r.B(3) = satsb((int16_t)d->W(3));
+#if SHIFT == 1
+ r.B(4) = satsb((int16_t)d->W(4));
+ r.B(5) = satsb((int16_t)d->W(5));
+ r.B(6) = satsb((int16_t)d->W(6));
+ r.B(7) = satsb((int16_t)d->W(7));
+#endif
+ r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0));
+ r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1));
+ r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2));
+ r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3));
+#if SHIFT == 1
+ r.B(12) = satsb((int16_t)s->W(4));
+ r.B(13) = satsb((int16_t)s->W(5));
+ r.B(14) = satsb((int16_t)s->W(6));
+ r.B(15) = satsb((int16_t)s->W(7));
+#endif
+ *d = r;
+}
+
+void glue(helper_packuswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.B(0) = satub((int16_t)d->W(0));
+ r.B(1) = satub((int16_t)d->W(1));
+ r.B(2) = satub((int16_t)d->W(2));
+ r.B(3) = satub((int16_t)d->W(3));
+#if SHIFT == 1
+ r.B(4) = satub((int16_t)d->W(4));
+ r.B(5) = satub((int16_t)d->W(5));
+ r.B(6) = satub((int16_t)d->W(6));
+ r.B(7) = satub((int16_t)d->W(7));
+#endif
+ r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0));
+ r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1));
+ r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2));
+ r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3));
+#if SHIFT == 1
+ r.B(12) = satub((int16_t)s->W(4));
+ r.B(13) = satub((int16_t)s->W(5));
+ r.B(14) = satub((int16_t)s->W(6));
+ r.B(15) = satub((int16_t)s->W(7));
+#endif
+ *d = r;
+}
+
+void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.W(0) = satsw(d->L(0));
+ r.W(1) = satsw(d->L(1));
+#if SHIFT == 1
+ r.W(2) = satsw(d->L(2));
+ r.W(3) = satsw(d->L(3));
+#endif
+ r.W((2 << SHIFT) + 0) = satsw(s->L(0));
+ r.W((2 << SHIFT) + 1) = satsw(s->L(1));
+#if SHIFT == 1
+ r.W(6) = satsw(s->L(2));
+ r.W(7) = satsw(s->L(3));
+#endif
+ *d = r;
+}
+
+#define UNPCK_OP(base_name, base) \
+ \
+ void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\
+ Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
+ r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
+ r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
+ r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
+ r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
+ r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
+ r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
+ r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
+ XMM_ONLY( \
+ r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
+ r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
+ r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
+ r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
+ r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
+ r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
+ r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
+ r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
+ ) \
+ *d = r; \
+ } \
+ \
+ void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\
+ Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
+ r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
+ r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
+ r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
+ XMM_ONLY( \
+ r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
+ r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
+ r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
+ r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
+ ) \
+ *d = r; \
+ } \
+ \
+ void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\
+ Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.L(0) = d->L((base << SHIFT) + 0); \
+ r.L(1) = s->L((base << SHIFT) + 0); \
+ XMM_ONLY( \
+ r.L(2) = d->L((base << SHIFT) + 1); \
+ r.L(3) = s->L((base << SHIFT) + 1); \
+ ) \
+ *d = r; \
+ } \
+ \
+ XMM_ONLY( \
+ void glue(helper_punpck ## base_name ## qdq, SUFFIX)(CPUX86State \
+ *env, \
+ Reg *d, \
+ Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.Q(0) = d->Q(base); \
+ r.Q(1) = s->Q(base); \
+ *d = r; \
+ } \
+ )
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+/* 3DNow! float ops */
+#if SHIFT == 0
+void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status);
+ d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status);
+}
+
+void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status);
+ d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status);
+}
+
+void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status);
+ d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0),
+ &env->mmx_status));
+ d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1),
+ &env->mmx_status));
+}
+
+void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+ r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+ *d = r;
+}
+
+void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) {
+ d->MMX_S(0) = s->MMX_S(0);
+ }
+ if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) {
+ d->MMX_S(1) = s->MMX_S(1);
+ }
+}
+
+void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) {
+ d->MMX_S(0) = s->MMX_S(0);
+ }
+ if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) {
+ d->MMX_S(1) = s->MMX_S(1);
+ }
+}
+
+void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+ r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+ *d = r;
+}
+
+void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+ r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+ *d = r;
+}
+
+void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = d->MMX_S(0);
+}
+
+void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff;
+ d->MMX_S(1) = float32_div(float32_one,
+ float32_sqrt(d->MMX_S(1), &env->mmx_status),
+ &env->mmx_status);
+ d->MMX_L(1) |= s->MMX_L(0) & 0x80000000;
+ d->MMX_L(0) = d->MMX_L(1);
+}
+
+void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_L(0) = s->MMX_L(1);
+ r.MMX_L(1) = s->MMX_L(0);
+ *d = r;
+}
+#endif
+
+/* SSSE3 op helpers */
+void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg r;
+
+ for (i = 0; i < (8 << SHIFT); i++) {
+ r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1)));
+ }
+
+ *d = r;
+}
+
+void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+
+ Reg r;
+
+ r.W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
+ r.W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
+ XMM_ONLY(r.W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
+ XMM_ONLY(r.W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
+ r.W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
+ r.W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
+ XMM_ONLY(r.W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
+ XMM_ONLY(r.W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
+
+ *d = r;
+}
+
+void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
+ XMM_ONLY(r.L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
+ r.L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
+ XMM_ONLY(r.L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
+
+ *d = r;
+}
+
+void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
+ r.W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
+ XMM_ONLY(r.W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
+ XMM_ONLY(r.W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
+ r.W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
+ r.W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
+ XMM_ONLY(r.W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
+ XMM_ONLY(r.W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
+
+ *d = r;
+}
+
+void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) +
+ (int8_t)s->B(1) * (uint8_t)d->B(1));
+ d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) +
+ (int8_t)s->B(3) * (uint8_t)d->B(3));
+ d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) +
+ (int8_t)s->B(5) * (uint8_t)d->B(5));
+ d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) +
+ (int8_t)s->B(7) * (uint8_t)d->B(7));
+#if SHIFT == 1
+ d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) +
+ (int8_t)s->B(9) * (uint8_t)d->B(9));
+ d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) +
+ (int8_t)s->B(11) * (uint8_t)d->B(11));
+ d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) +
+ (int8_t)s->B(13) * (uint8_t)d->B(13));
+ d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) +
+ (int8_t)s->B(15) * (uint8_t)d->B(15));
+#endif
+}
+
+void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1);
+ d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3);
+ XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5));
+ XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7));
+ d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1);
+ d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3);
+ XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5));
+ XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7));
+}
+
+void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1);
+ XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3));
+ d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1);
+ XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3));
+}
+
+void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1));
+ d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3));
+ XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5)));
+ XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7)));
+ d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1));
+ d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3));
+ XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5)));
+ XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7)));
+}
+
+#define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x)
+#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x)
+#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x)
+SSE_HELPER_B(helper_pabsb, FABSB)
+SSE_HELPER_W(helper_pabsw, FABSW)
+SSE_HELPER_L(helper_pabsd, FABSL)
+
+#define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15)
+SSE_HELPER_W(helper_pmulhrsw, FMULHRSW)
+
+#define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d)
+#define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d)
+#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d)
+SSE_HELPER_B(helper_psignb, FSIGNB)
+SSE_HELPER_W(helper_psignw, FSIGNW)
+SSE_HELPER_L(helper_psignd, FSIGNL)
+
+void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ int32_t shift)
+{
+ Reg r;
+
+ /* XXX could be checked during translation */
+ if (shift >= (16 << SHIFT)) {
+ r.Q(0) = 0;
+ XMM_ONLY(r.Q(1) = 0);
+ } else {
+ shift <<= 3;
+#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
+#if SHIFT == 0
+ r.Q(0) = SHR(s->Q(0), shift - 0) |
+ SHR(d->Q(0), shift - 64);
+#else
+ r.Q(0) = SHR(s->Q(0), shift - 0) |
+ SHR(s->Q(1), shift - 64) |
+ SHR(d->Q(0), shift - 128) |
+ SHR(d->Q(1), shift - 192);
+ r.Q(1) = SHR(s->Q(0), shift + 64) |
+ SHR(s->Q(1), shift - 0) |
+ SHR(d->Q(0), shift - 64) |
+ SHR(d->Q(1), shift - 128);
+#endif
+#undef SHR
+ }
+
+ *d = r;
+}
+
+#define XMM0 (env->xmm_regs[0])
+
+#if SHIFT == 1
+#define SSE_HELPER_V(name, elem, num, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \
+ d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \
+ if (num > 2) { \
+ d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \
+ d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \
+ if (num > 4) { \
+ d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \
+ d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \
+ d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \
+ d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \
+ if (num > 8) { \
+ d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \
+ d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \
+ d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \
+ d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \
+ d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \
+ d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \
+ d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \
+ d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \
+ } \
+ } \
+ } \
+ }
+
+#define SSE_HELPER_I(name, elem, num, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t imm) \
+ { \
+ d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \
+ d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \
+ if (num > 2) { \
+ d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \
+ d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \
+ if (num > 4) { \
+ d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \
+ d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \
+ d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \
+ d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \
+ if (num > 8) { \
+ d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \
+ d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \
+ d->elem(10) = F(d->elem(10), s->elem(10), \
+ ((imm >> 10) & 1)); \
+ d->elem(11) = F(d->elem(11), s->elem(11), \
+ ((imm >> 11) & 1)); \
+ d->elem(12) = F(d->elem(12), s->elem(12), \
+ ((imm >> 12) & 1)); \
+ d->elem(13) = F(d->elem(13), s->elem(13), \
+ ((imm >> 13) & 1)); \
+ d->elem(14) = F(d->elem(14), s->elem(14), \
+ ((imm >> 14) & 1)); \
+ d->elem(15) = F(d->elem(15), s->elem(15), \
+ ((imm >> 15) & 1)); \
+ } \
+ } \
+ } \
+ }
+
+/* SSE4.1 op helpers */
+#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d)
+#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d)
+#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d)
+SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB)
+SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS)
+SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD)
+
+void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1));
+ uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1));
+
+ CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C);
+}
+
+#define SSE_HELPER_F(name, elem, num, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ if (num > 2) { \
+ if (num > 4) { \
+ d->elem(7) = F(7); \
+ d->elem(6) = F(6); \
+ d->elem(5) = F(5); \
+ d->elem(4) = F(4); \
+ } \
+ d->elem(3) = F(3); \
+ d->elem(2) = F(2); \
+ } \
+ d->elem(1) = F(1); \
+ d->elem(0) = F(0); \
+ }
+
+SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W)
+SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W)
+SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L)
+SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B)
+SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B)
+SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B)
+SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W)
+SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W)
+SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L)
+
+void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0);
+ d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2);
+}
+
+#define FCMPEQQ(d, s) (d == s ? -1 : 0)
+SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ)
+
+void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.W(0) = satuw((int32_t) d->L(0));
+ r.W(1) = satuw((int32_t) d->L(1));
+ r.W(2) = satuw((int32_t) d->L(2));
+ r.W(3) = satuw((int32_t) d->L(3));
+ r.W(4) = satuw((int32_t) s->L(0));
+ r.W(5) = satuw((int32_t) s->L(1));
+ r.W(6) = satuw((int32_t) s->L(2));
+ r.W(7) = satuw((int32_t) s->L(3));
+ *d = r;
+}
+
+#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s)
+#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s)
+#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s)
+#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s)
+SSE_HELPER_B(helper_pminsb, FMINSB)
+SSE_HELPER_L(helper_pminsd, FMINSD)
+SSE_HELPER_W(helper_pminuw, MIN)
+SSE_HELPER_L(helper_pminud, MIN)
+SSE_HELPER_B(helper_pmaxsb, FMAXSB)
+SSE_HELPER_L(helper_pmaxsd, FMAXSD)
+SSE_HELPER_W(helper_pmaxuw, MAX)
+SSE_HELPER_L(helper_pmaxud, MAX)
+
+#define FMULLD(d, s) ((int32_t)d * (int32_t)s)
+SSE_HELPER_L(helper_pmulld, FMULLD)
+
+void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int idx = 0;
+
+ if (s->W(1) < s->W(idx)) {
+ idx = 1;
+ }
+ if (s->W(2) < s->W(idx)) {
+ idx = 2;
+ }
+ if (s->W(3) < s->W(idx)) {
+ idx = 3;
+ }
+ if (s->W(4) < s->W(idx)) {
+ idx = 4;
+ }
+ if (s->W(5) < s->W(idx)) {
+ idx = 5;
+ }
+ if (s->W(6) < s->W(idx)) {
+ idx = 6;
+ }
+ if (s->W(7) < s->W(idx)) {
+ idx = 7;
+ }
+
+ d->W(0) = s->W(idx);
+ d->W(1) = idx;
+ d->L(1) = 0;
+ d->Q(1) = 0;
+}
+
+void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status);
+
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status);
+
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+#define FBLENDP(d, s, m) (m ? s : d)
+SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
+SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
+SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
+
+void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+{
+ float32 iresult = float32_zero;
+
+ if (mask & (1 << 4)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(0), s->ZMM_S(0),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 5)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(1), s->ZMM_S(1),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 6)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(2), s->ZMM_S(2),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 7)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(3), s->ZMM_S(3),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero;
+ d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero;
+ d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero;
+ d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero;
+}
+
+void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+{
+ float64 iresult = float64_zero;
+
+ if (mask & (1 << 4)) {
+ iresult = float64_add(iresult,
+ float64_mul(d->ZMM_D(0), s->ZMM_D(0),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 5)) {
+ iresult = float64_add(iresult,
+ float64_mul(d->ZMM_D(1), s->ZMM_D(1),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero;
+ d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero;
+}
+
+void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t offset)
+{
+ int s0 = (offset & 3) << 2;
+ int d0 = (offset & 4) << 0;
+ int i;
+ Reg r;
+
+ for (i = 0; i < 8; i++, d0++) {
+ r.W(i) = 0;
+ r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0));
+ r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1));
+ r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2));
+ r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3));
+ }
+
+ *d = r;
+}
+
+/* SSE4.2 op helpers */
+#define FCMPGTQ(d, s) ((int64_t)d > (int64_t)s ? -1 : 0)
+SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ)
+
+static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl)
+{
+ int val;
+
+ /* Presence of REX.W is indicated by a bit higher than 7 set */
+ if (ctrl >> 8) {
+ val = abs1((int64_t)env->regs[reg]);
+ } else {
+ val = abs1((int32_t)env->regs[reg]);
+ }
+
+ if (ctrl & 1) {
+ if (val > 8) {
+ return 8;
+ }
+ } else {
+ if (val > 16) {
+ return 16;
+ }
+ }
+ return val;
+}
+
+static inline int pcmp_ilen(Reg *r, uint8_t ctrl)
+{
+ int val = 0;
+
+ if (ctrl & 1) {
+ while (val < 8 && r->W(val)) {
+ val++;
+ }
+ } else {
+ while (val < 16 && r->B(val)) {
+ val++;
+ }
+ }
+
+ return val;
+}
+
+static inline int pcmp_val(Reg *r, uint8_t ctrl, int i)
+{
+ switch ((ctrl >> 0) & 3) {
+ case 0:
+ return r->B(i);
+ case 1:
+ return r->W(i);
+ case 2:
+ return (int8_t)r->B(i);
+ case 3:
+ default:
+ return (int16_t)r->W(i);
+ }
+}
+
+static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s,
+ int8_t ctrl, int valids, int validd)
+{
+ unsigned int res = 0;
+ int v;
+ int j, i;
+ int upper = (ctrl & 1) ? 7 : 15;
+
+ valids--;
+ validd--;
+
+ CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0);
+
+ switch ((ctrl >> 2) & 3) {
+ case 0:
+ for (j = valids; j >= 0; j--) {
+ res <<= 1;
+ v = pcmp_val(s, ctrl, j);
+ for (i = validd; i >= 0; i--) {
+ res |= (v == pcmp_val(d, ctrl, i));
+ }
+ }
+ break;
+ case 1:
+ for (j = valids; j >= 0; j--) {
+ res <<= 1;
+ v = pcmp_val(s, ctrl, j);
+ for (i = ((validd - 1) | 1); i >= 0; i -= 2) {
+ res |= (pcmp_val(d, ctrl, i - 0) >= v &&
+ pcmp_val(d, ctrl, i - 1) <= v);
+ }
+ }
+ break;
+ case 2:
+ res = (1 << (upper - MAX(valids, validd))) - 1;
+ res <<= MAX(valids, validd) - MIN(valids, validd);
+ for (i = MIN(valids, validd); i >= 0; i--) {
+ res <<= 1;
+ v = pcmp_val(s, ctrl, i);
+ res |= (v == pcmp_val(d, ctrl, i));
+ }
+ break;
+ case 3:
+ if (validd == -1) {
+ res = (2 << upper) - 1;
+ break;
+ }
+ for (j = valids == upper ? valids : valids - validd; j >= 0; j--) {
+ res <<= 1;
+ v = 1;
+ for (i = MIN(valids - j, validd); i >= 0; i--) {
+ v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
+ }
+ res |= v;
+ }
+ break;
+ }
+
+ switch ((ctrl >> 4) & 3) {
+ case 1:
+ res ^= (2 << upper) - 1;
+ break;
+ case 3:
+ res ^= (1 << (valids + 1)) - 1;
+ break;
+ }
+
+ if (res) {
+ CC_SRC |= CC_C;
+ }
+ if (res & 1) {
+ CC_SRC |= CC_O;
+ }
+
+ return res;
+}
+
+void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_elen(env, R_EDX, ctrl),
+ pcmp_elen(env, R_EAX, ctrl));
+
+ if (res) {
+ env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res);
+ } else {
+ env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+ }
+}
+
+void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ int i;
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_elen(env, R_EDX, ctrl),
+ pcmp_elen(env, R_EAX, ctrl));
+
+ if ((ctrl >> 6) & 1) {
+ if (ctrl & 1) {
+ for (i = 0; i < 8; i++, res >>= 1) {
+ env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0;
+ }
+ } else {
+ for (i = 0; i < 16; i++, res >>= 1) {
+ env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0;
+ }
+ }
+ } else {
+ env->xmm_regs[0].Q(1) = 0;
+ env->xmm_regs[0].Q(0) = res;
+ }
+}
+
+void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_ilen(s, ctrl),
+ pcmp_ilen(d, ctrl));
+
+ if (res) {
+ env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res);
+ } else {
+ env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+ }
+}
+
+void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ int i;
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_ilen(s, ctrl),
+ pcmp_ilen(d, ctrl));
+
+ if ((ctrl >> 6) & 1) {
+ if (ctrl & 1) {
+ for (i = 0; i < 8; i++, res >>= 1) {
+ env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0;
+ }
+ } else {
+ for (i = 0; i < 16; i++, res >>= 1) {
+ env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0;
+ }
+ }
+ } else {
+ env->xmm_regs[0].Q(1) = 0;
+ env->xmm_regs[0].Q(0) = res;
+ }
+}
+
+#define CRCPOLY 0x1edc6f41
+#define CRCPOLY_BITREV 0x82f63b78
+target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
+{
+ target_ulong crc = (msg & ((target_ulong) -1 >>
+ (TARGET_LONG_BITS - len))) ^ crc1;
+
+ while (len--) {
+ crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0);
+ }
+
+ return crc;
+}
+
+void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ uint64_t ah, al, b, resh, resl;
+
+ ah = 0;
+ al = d->Q((ctrl & 1) != 0);
+ b = s->Q((ctrl & 16) != 0);
+ resh = resl = 0;
+
+ while (b) {
+ if (b & 1) {
+ resl ^= al;
+ resh ^= ah;
+ }
+ ah = (ah << 1) | (al >> 63);
+ al <<= 1;
+ b >>= 1;
+ }
+
+ d->Q(0) = resl;
+ d->Q(1) = resh;
+}
+
+void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4*i+0])] ^
+ AES_Td1[st.B(AES_ishifts[4*i+1])] ^
+ AES_Td2[st.B(AES_ishifts[4*i+2])] ^
+ AES_Td3[st.B(AES_ishifts[4*i+3])]);
+ }
+}
+
+void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0; i < 16; i++) {
+ d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]);
+ }
+}
+
+void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4*i+0])] ^
+ AES_Te1[st.B(AES_shifts[4*i+1])] ^
+ AES_Te2[st.B(AES_shifts[4*i+2])] ^
+ AES_Te3[st.B(AES_shifts[4*i+3])]);
+ }
+}
+
+void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0; i < 16; i++) {
+ d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]);
+ }
+
+}
+
+void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg tmp = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^
+ AES_imc[tmp.B(4*i+1)][1] ^
+ AES_imc[tmp.B(4*i+2)][2] ^
+ AES_imc[tmp.B(4*i+3)][3]);
+ }
+}
+
+void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ int i;
+ Reg tmp = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->B(i) = AES_sbox[tmp.B(i + 4)];
+ d->B(i + 8) = AES_sbox[tmp.B(i + 12)];
+ }
+ d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl;
+ d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl;
+}
+#endif
+
+#undef SHIFT
+#undef XMM_ONLY
+#undef Reg
+#undef B
+#undef W
+#undef L
+#undef Q
+#undef SUFFIX
diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h
new file mode 100644
index 000000000..e68af5c40
--- /dev/null
+++ b/target/i386/ops_sse_header.h
@@ -0,0 +1,356 @@
+/*
+ * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if SHIFT == 0
+#define Reg MMXReg
+#define SUFFIX _mmx
+#else
+#define Reg ZMMReg
+#define SUFFIX _xmm
+#endif
+
+#define dh_alias_Reg ptr
+#define dh_alias_ZMMReg ptr
+#define dh_alias_MMXReg ptr
+#define dh_ctype_Reg Reg *
+#define dh_ctype_ZMMReg ZMMReg *
+#define dh_ctype_MMXReg MMXReg *
+
+DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg)
+
+#if SHIFT == 1
+DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg)
+#endif
+
+#define SSE_HELPER_B(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_W(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_L(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_Q(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+SSE_HELPER_B(paddb, FADD)
+SSE_HELPER_W(paddw, FADD)
+SSE_HELPER_L(paddl, FADD)
+SSE_HELPER_Q(paddq, FADD)
+
+SSE_HELPER_B(psubb, FSUB)
+SSE_HELPER_W(psubw, FSUB)
+SSE_HELPER_L(psubl, FSUB)
+SSE_HELPER_Q(psubq, FSUB)
+
+SSE_HELPER_B(paddusb, FADDUB)
+SSE_HELPER_B(paddsb, FADDSB)
+SSE_HELPER_B(psubusb, FSUBUB)
+SSE_HELPER_B(psubsb, FSUBSB)
+
+SSE_HELPER_W(paddusw, FADDUW)
+SSE_HELPER_W(paddsw, FADDSW)
+SSE_HELPER_W(psubusw, FSUBUW)
+SSE_HELPER_W(psubsw, FSUBSW)
+
+SSE_HELPER_B(pminub, FMINUB)
+SSE_HELPER_B(pmaxub, FMAXUB)
+
+SSE_HELPER_W(pminsw, FMINSW)
+SSE_HELPER_W(pmaxsw, FMAXSW)
+
+SSE_HELPER_Q(pand, FAND)
+SSE_HELPER_Q(pandn, FANDN)
+SSE_HELPER_Q(por, FOR)
+SSE_HELPER_Q(pxor, FXOR)
+
+SSE_HELPER_B(pcmpgtb, FCMPGTB)
+SSE_HELPER_W(pcmpgtw, FCMPGTW)
+SSE_HELPER_L(pcmpgtl, FCMPGTL)
+
+SSE_HELPER_B(pcmpeqb, FCMPEQ)
+SSE_HELPER_W(pcmpeqw, FCMPEQ)
+SSE_HELPER_L(pcmpeql, FCMPEQ)
+
+SSE_HELPER_W(pmullw, FMULLW)
+#if SHIFT == 0
+SSE_HELPER_W(pmulhrw, FMULHRW)
+#endif
+SSE_HELPER_W(pmulhuw, FMULHUW)
+SSE_HELPER_W(pmulhw, FMULHW)
+
+SSE_HELPER_B(pavgb, FAVG)
+SSE_HELPER_W(pavgw, FAVG)
+
+DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg)
+
+DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl)
+DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64)
+#endif
+
+#if SHIFT == 0
+DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int)
+#else
+DEF_HELPER_3(shufps, void, Reg, Reg, int)
+DEF_HELPER_3(shufpd, void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int)
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_HELPER_S(name, F) \
+ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
+
+SSE_HELPER_S(add, FPU_ADD)
+SSE_HELPER_S(sub, FPU_SUB)
+SSE_HELPER_S(mul, FPU_MUL)
+SSE_HELPER_S(div, FPU_DIV)
+SSE_HELPER_S(min, FPU_MIN)
+SSE_HELPER_S(max, FPU_MAX)
+SSE_HELPER_S(sqrt, FPU_SQRT)
+
+
+DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg)
+DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg)
+DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg)
+DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32)
+DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32)
+
+#ifdef TARGET_X86_64
+DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64)
+DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64)
+#endif
+
+DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvtss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg)
+#endif
+
+DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvttss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg)
+#endif
+
+DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg)
+
+#define SSE_HELPER_CMP(name, F) \
+ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
+
+SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
+SSE_HELPER_CMP(cmplt, FPU_CMPLT)
+SSE_HELPER_CMP(cmple, FPU_CMPLE)
+SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
+SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
+SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
+SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
+SSE_HELPER_CMP(cmpord, FPU_CMPORD)
+
+DEF_HELPER_3(ucomiss, void, env, Reg, Reg)
+DEF_HELPER_3(comiss, void, env, Reg, Reg)
+DEF_HELPER_3(ucomisd, void, env, Reg, Reg)
+DEF_HELPER_3(comisd, void, env, Reg, Reg)
+DEF_HELPER_2(movmskps, i32, env, Reg)
+DEF_HELPER_2(movmskpd, i32, env, Reg)
+#endif
+
+DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg)
+DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg)
+#define UNPCK_OP(base_name, base) \
+ DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \
+ DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \
+ DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg)
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+#if SHIFT == 1
+DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg)
+#endif
+
+/* 3DNow! float ops */
+#if SHIFT == 0
+DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg)
+#endif
+
+/* SSSE3 op helpers */
+DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32)
+
+/* SSE4.1 op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32)
+#endif
+
+/* SSE4.2 op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_3(crc32, tl, i32, tl, i32)
+#endif
+
+/* AES-NI op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32)
+#endif
+
+#undef SHIFT
+#undef Reg
+#undef SUFFIX
+
+#undef SSE_HELPER_B
+#undef SSE_HELPER_W
+#undef SSE_HELPER_L
+#undef SSE_HELPER_Q
+#undef SSE_HELPER_S
+#undef SSE_HELPER_CMP
+#undef UNPCK_OP
diff --git a/target/i386/sev-sysemu-stub.c b/target/i386/sev-sysemu-stub.c
new file mode 100644
index 000000000..7a29295d1
--- /dev/null
+++ b/target/i386/sev-sysemu-stub.c
@@ -0,0 +1,70 @@
+/*
+ * QEMU SEV system stub
+ *
+ * Copyright Advanced Micro Devices 2018
+ *
+ * Authors:
+ * Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qmp/qerror.h"
+#include "qapi/error.h"
+#include "sev.h"
+
+SevInfo *qmp_query_sev(Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+SevCapability *qmp_query_sev_capabilities(Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret,
+ bool has_gpa, uint64_t gpa, Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+}
+
+int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void sev_es_set_reset_vector(CPUState *cpu)
+{
+}
+
+int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
+{
+ g_assert_not_reached();
+}
+
+SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
+ Error **errp)
+{
+ error_setg(errp, "SEV is not available in this QEMU");
+ return NULL;
+}
+
+void hmp_info_sev(Monitor *mon, const QDict *qdict)
+{
+ monitor_printf(mon, "SEV is not available in this QEMU\n");
+}
diff --git a/target/i386/sev.c b/target/i386/sev.c
new file mode 100644
index 000000000..025ff7a6f
--- /dev/null
+++ b/target/i386/sev.c
@@ -0,0 +1,1341 @@
+/*
+ * QEMU SEV support
+ *
+ * Copyright Advanced Micro Devices 2016-2018
+ *
+ * Author:
+ * Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include <linux/kvm.h>
+#include <linux/psp-sev.h>
+
+#include <sys/ioctl.h>
+
+#include "qapi/error.h"
+#include "qom/object_interfaces.h"
+#include "qemu/base64.h"
+#include "qemu/module.h"
+#include "qemu/uuid.h"
+#include "crypto/hash.h"
+#include "sysemu/kvm.h"
+#include "sev.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/runstate.h"
+#include "trace.h"
+#include "migration/blocker.h"
+#include "qom/object.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qmp/qerror.h"
+#include "exec/confidential-guest-support.h"
+#include "hw/i386/pc.h"
+#include "exec/address-spaces.h"
+
+#define TYPE_SEV_GUEST "sev-guest"
+OBJECT_DECLARE_SIMPLE_TYPE(SevGuestState, SEV_GUEST)
+
+
+/**
+ * SevGuestState:
+ *
+ * The SevGuestState object is used for creating and managing a SEV
+ * guest.
+ *
+ * # $QEMU \
+ * -object sev-guest,id=sev0 \
+ * -machine ...,memory-encryption=sev0
+ */
+struct SevGuestState {
+ ConfidentialGuestSupport parent_obj;
+
+ /* configuration parameters */
+ char *sev_device;
+ uint32_t policy;
+ char *dh_cert_file;
+ char *session_file;
+ uint32_t cbitpos;
+ uint32_t reduced_phys_bits;
+ bool kernel_hashes;
+
+ /* runtime state */
+ uint32_t handle;
+ uint8_t api_major;
+ uint8_t api_minor;
+ uint8_t build_id;
+ int sev_fd;
+ SevState state;
+ gchar *measurement;
+
+ uint32_t reset_cs;
+ uint32_t reset_ip;
+ bool reset_data_valid;
+};
+
+#define DEFAULT_GUEST_POLICY 0x1 /* disable debug */
+#define DEFAULT_SEV_DEVICE "/dev/sev"
+
+#define SEV_INFO_BLOCK_GUID "00f771de-1a7e-4fcb-890e-68c77e2fb44e"
+typedef struct __attribute__((__packed__)) SevInfoBlock {
+ /* SEV-ES Reset Vector Address */
+ uint32_t reset_addr;
+} SevInfoBlock;
+
+#define SEV_HASH_TABLE_RV_GUID "7255371f-3a3b-4b04-927b-1da6efa8d454"
+typedef struct QEMU_PACKED SevHashTableDescriptor {
+ /* SEV hash table area guest address */
+ uint32_t base;
+ /* SEV hash table area size (in bytes) */
+ uint32_t size;
+} SevHashTableDescriptor;
+
+/* hard code sha256 digest size */
+#define HASH_SIZE 32
+
+typedef struct QEMU_PACKED SevHashTableEntry {
+ QemuUUID guid;
+ uint16_t len;
+ uint8_t hash[HASH_SIZE];
+} SevHashTableEntry;
+
+typedef struct QEMU_PACKED SevHashTable {
+ QemuUUID guid;
+ uint16_t len;
+ SevHashTableEntry cmdline;
+ SevHashTableEntry initrd;
+ SevHashTableEntry kernel;
+} SevHashTable;
+
+/*
+ * Data encrypted by sev_encrypt_flash() must be padded to a multiple of
+ * 16 bytes.
+ */
+typedef struct QEMU_PACKED PaddedSevHashTable {
+ SevHashTable ht;
+ uint8_t padding[ROUND_UP(sizeof(SevHashTable), 16) - sizeof(SevHashTable)];
+} PaddedSevHashTable;
+
+QEMU_BUILD_BUG_ON(sizeof(PaddedSevHashTable) % 16 != 0);
+
+static SevGuestState *sev_guest;
+static Error *sev_mig_blocker;
+
+static const char *const sev_fw_errlist[] = {
+ [SEV_RET_SUCCESS] = "",
+ [SEV_RET_INVALID_PLATFORM_STATE] = "Platform state is invalid",
+ [SEV_RET_INVALID_GUEST_STATE] = "Guest state is invalid",
+ [SEV_RET_INAVLID_CONFIG] = "Platform configuration is invalid",
+ [SEV_RET_INVALID_LEN] = "Buffer too small",
+ [SEV_RET_ALREADY_OWNED] = "Platform is already owned",
+ [SEV_RET_INVALID_CERTIFICATE] = "Certificate is invalid",
+ [SEV_RET_POLICY_FAILURE] = "Policy is not allowed",
+ [SEV_RET_INACTIVE] = "Guest is not active",
+ [SEV_RET_INVALID_ADDRESS] = "Invalid address",
+ [SEV_RET_BAD_SIGNATURE] = "Bad signature",
+ [SEV_RET_BAD_MEASUREMENT] = "Bad measurement",
+ [SEV_RET_ASID_OWNED] = "ASID is already owned",
+ [SEV_RET_INVALID_ASID] = "Invalid ASID",
+ [SEV_RET_WBINVD_REQUIRED] = "WBINVD is required",
+ [SEV_RET_DFFLUSH_REQUIRED] = "DF_FLUSH is required",
+ [SEV_RET_INVALID_GUEST] = "Guest handle is invalid",
+ [SEV_RET_INVALID_COMMAND] = "Invalid command",
+ [SEV_RET_ACTIVE] = "Guest is active",
+ [SEV_RET_HWSEV_RET_PLATFORM] = "Hardware error",
+ [SEV_RET_HWSEV_RET_UNSAFE] = "Hardware unsafe",
+ [SEV_RET_UNSUPPORTED] = "Feature not supported",
+ [SEV_RET_INVALID_PARAM] = "Invalid parameter",
+ [SEV_RET_RESOURCE_LIMIT] = "Required firmware resource depleted",
+ [SEV_RET_SECURE_DATA_INVALID] = "Part-specific integrity check failure",
+};
+
+#define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist)
+
+static int
+sev_ioctl(int fd, int cmd, void *data, int *error)
+{
+ int r;
+ struct kvm_sev_cmd input;
+
+ memset(&input, 0x0, sizeof(input));
+
+ input.id = cmd;
+ input.sev_fd = fd;
+ input.data = (__u64)(unsigned long)data;
+
+ r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input);
+
+ if (error) {
+ *error = input.error;
+ }
+
+ return r;
+}
+
+static int
+sev_platform_ioctl(int fd, int cmd, void *data, int *error)
+{
+ int r;
+ struct sev_issue_cmd arg;
+
+ arg.cmd = cmd;
+ arg.data = (unsigned long)data;
+ r = ioctl(fd, SEV_ISSUE_CMD, &arg);
+ if (error) {
+ *error = arg.error;
+ }
+
+ return r;
+}
+
+static const char *
+fw_error_to_str(int code)
+{
+ if (code < 0 || code >= SEV_FW_MAX_ERROR) {
+ return "unknown error";
+ }
+
+ return sev_fw_errlist[code];
+}
+
+static bool
+sev_check_state(const SevGuestState *sev, SevState state)
+{
+ assert(sev);
+ return sev->state == state ? true : false;
+}
+
+static void
+sev_set_guest_state(SevGuestState *sev, SevState new_state)
+{
+ assert(new_state < SEV_STATE__MAX);
+ assert(sev);
+
+ trace_kvm_sev_change_state(SevState_str(sev->state),
+ SevState_str(new_state));
+ sev->state = new_state;
+}
+
+static void
+sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
+{
+ int r;
+ struct kvm_enc_region range;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ /*
+ * The RAM device presents a memory region that should be treated
+ * as IO region and should not be pinned.
+ */
+ mr = memory_region_from_host(host, &offset);
+ if (mr && memory_region_is_ram_device(mr)) {
+ return;
+ }
+
+ range.addr = (__u64)(unsigned long)host;
+ range.size = max_size;
+
+ trace_kvm_memcrypt_register_region(host, max_size);
+ r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
+ if (r) {
+ error_report("%s: failed to register region (%p+%#zx) error '%s'",
+ __func__, host, max_size, strerror(errno));
+ exit(1);
+ }
+}
+
+static void
+sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
+ size_t max_size)
+{
+ int r;
+ struct kvm_enc_region range;
+ ram_addr_t offset;
+ MemoryRegion *mr;
+
+ /*
+ * The RAM device presents a memory region that should be treated
+ * as IO region and should not have been pinned.
+ */
+ mr = memory_region_from_host(host, &offset);
+ if (mr && memory_region_is_ram_device(mr)) {
+ return;
+ }
+
+ range.addr = (__u64)(unsigned long)host;
+ range.size = max_size;
+
+ trace_kvm_memcrypt_unregister_region(host, max_size);
+ r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
+ if (r) {
+ error_report("%s: failed to unregister region (%p+%#zx)",
+ __func__, host, max_size);
+ }
+}
+
+static struct RAMBlockNotifier sev_ram_notifier = {
+ .ram_block_added = sev_ram_block_added,
+ .ram_block_removed = sev_ram_block_removed,
+};
+
+static void
+sev_guest_finalize(Object *obj)
+{
+}
+
+static char *
+sev_guest_get_session_file(Object *obj, Error **errp)
+{
+ SevGuestState *s = SEV_GUEST(obj);
+
+ return s->session_file ? g_strdup(s->session_file) : NULL;
+}
+
+static void
+sev_guest_set_session_file(Object *obj, const char *value, Error **errp)
+{
+ SevGuestState *s = SEV_GUEST(obj);
+
+ s->session_file = g_strdup(value);
+}
+
+static char *
+sev_guest_get_dh_cert_file(Object *obj, Error **errp)
+{
+ SevGuestState *s = SEV_GUEST(obj);
+
+ return g_strdup(s->dh_cert_file);
+}
+
+static void
+sev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
+{
+ SevGuestState *s = SEV_GUEST(obj);
+
+ s->dh_cert_file = g_strdup(value);
+}
+
+static char *
+sev_guest_get_sev_device(Object *obj, Error **errp)
+{
+ SevGuestState *sev = SEV_GUEST(obj);
+
+ return g_strdup(sev->sev_device);
+}
+
+static void
+sev_guest_set_sev_device(Object *obj, const char *value, Error **errp)
+{
+ SevGuestState *sev = SEV_GUEST(obj);
+
+ sev->sev_device = g_strdup(value);
+}
+
+static bool sev_guest_get_kernel_hashes(Object *obj, Error **errp)
+{
+ SevGuestState *sev = SEV_GUEST(obj);
+
+ return sev->kernel_hashes;
+}
+
+static void sev_guest_set_kernel_hashes(Object *obj, bool value, Error **errp)
+{
+ SevGuestState *sev = SEV_GUEST(obj);
+
+ sev->kernel_hashes = value;
+}
+
+static void
+sev_guest_class_init(ObjectClass *oc, void *data)
+{
+ object_class_property_add_str(oc, "sev-device",
+ sev_guest_get_sev_device,
+ sev_guest_set_sev_device);
+ object_class_property_set_description(oc, "sev-device",
+ "SEV device to use");
+ object_class_property_add_str(oc, "dh-cert-file",
+ sev_guest_get_dh_cert_file,
+ sev_guest_set_dh_cert_file);
+ object_class_property_set_description(oc, "dh-cert-file",
+ "guest owners DH certificate (encoded with base64)");
+ object_class_property_add_str(oc, "session-file",
+ sev_guest_get_session_file,
+ sev_guest_set_session_file);
+ object_class_property_set_description(oc, "session-file",
+ "guest owners session parameters (encoded with base64)");
+ object_class_property_add_bool(oc, "kernel-hashes",
+ sev_guest_get_kernel_hashes,
+ sev_guest_set_kernel_hashes);
+ object_class_property_set_description(oc, "kernel-hashes",
+ "add kernel hashes to guest firmware for measured Linux boot");
+}
+
+static void
+sev_guest_instance_init(Object *obj)
+{
+ SevGuestState *sev = SEV_GUEST(obj);
+
+ sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE);
+ sev->policy = DEFAULT_GUEST_POLICY;
+ object_property_add_uint32_ptr(obj, "policy", &sev->policy,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_add_uint32_ptr(obj, "handle", &sev->handle,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_add_uint32_ptr(obj, "cbitpos", &sev->cbitpos,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_add_uint32_ptr(obj, "reduced-phys-bits",
+ &sev->reduced_phys_bits,
+ OBJ_PROP_FLAG_READWRITE);
+}
+
+/* sev guest info */
+static const TypeInfo sev_guest_info = {
+ .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT,
+ .name = TYPE_SEV_GUEST,
+ .instance_size = sizeof(SevGuestState),
+ .instance_finalize = sev_guest_finalize,
+ .class_init = sev_guest_class_init,
+ .instance_init = sev_guest_instance_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
+};
+
+bool
+sev_enabled(void)
+{
+ return !!sev_guest;
+}
+
+bool
+sev_es_enabled(void)
+{
+ return sev_enabled() && (sev_guest->policy & SEV_POLICY_ES);
+}
+
+uint32_t
+sev_get_cbit_position(void)
+{
+ return sev_guest ? sev_guest->cbitpos : 0;
+}
+
+uint32_t
+sev_get_reduced_phys_bits(void)
+{
+ return sev_guest ? sev_guest->reduced_phys_bits : 0;
+}
+
+static SevInfo *sev_get_info(void)
+{
+ SevInfo *info;
+
+ info = g_new0(SevInfo, 1);
+ info->enabled = sev_enabled();
+
+ if (info->enabled) {
+ info->api_major = sev_guest->api_major;
+ info->api_minor = sev_guest->api_minor;
+ info->build_id = sev_guest->build_id;
+ info->policy = sev_guest->policy;
+ info->state = sev_guest->state;
+ info->handle = sev_guest->handle;
+ }
+
+ return info;
+}
+
+SevInfo *qmp_query_sev(Error **errp)
+{
+ SevInfo *info;
+
+ info = sev_get_info();
+ if (!info) {
+ error_setg(errp, "SEV feature is not available");
+ return NULL;
+ }
+
+ return info;
+}
+
+void hmp_info_sev(Monitor *mon, const QDict *qdict)
+{
+ SevInfo *info = sev_get_info();
+
+ if (info && info->enabled) {
+ monitor_printf(mon, "handle: %d\n", info->handle);
+ monitor_printf(mon, "state: %s\n", SevState_str(info->state));
+ monitor_printf(mon, "build: %d\n", info->build_id);
+ monitor_printf(mon, "api version: %d.%d\n",
+ info->api_major, info->api_minor);
+ monitor_printf(mon, "debug: %s\n",
+ info->policy & SEV_POLICY_NODBG ? "off" : "on");
+ monitor_printf(mon, "key-sharing: %s\n",
+ info->policy & SEV_POLICY_NOKS ? "off" : "on");
+ } else {
+ monitor_printf(mon, "SEV is not enabled\n");
+ }
+
+ qapi_free_SevInfo(info);
+}
+
+static int
+sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
+ size_t *cert_chain_len, Error **errp)
+{
+ guchar *pdh_data = NULL;
+ guchar *cert_chain_data = NULL;
+ struct sev_user_data_pdh_cert_export export = {};
+ int err, r;
+
+ /* query the certificate length */
+ r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
+ if (r < 0) {
+ if (err != SEV_RET_INVALID_LEN) {
+ error_setg(errp, "SEV: Failed to export PDH cert"
+ " ret=%d fw_err=%d (%s)",
+ r, err, fw_error_to_str(err));
+ return 1;
+ }
+ }
+
+ pdh_data = g_new(guchar, export.pdh_cert_len);
+ cert_chain_data = g_new(guchar, export.cert_chain_len);
+ export.pdh_cert_address = (unsigned long)pdh_data;
+ export.cert_chain_address = (unsigned long)cert_chain_data;
+
+ r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
+ if (r < 0) {
+ error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)",
+ r, err, fw_error_to_str(err));
+ goto e_free;
+ }
+
+ *pdh = pdh_data;
+ *pdh_len = export.pdh_cert_len;
+ *cert_chain = cert_chain_data;
+ *cert_chain_len = export.cert_chain_len;
+ return 0;
+
+e_free:
+ g_free(pdh_data);
+ g_free(cert_chain_data);
+ return 1;
+}
+
+static SevCapability *sev_get_capabilities(Error **errp)
+{
+ SevCapability *cap = NULL;
+ guchar *pdh_data = NULL;
+ guchar *cert_chain_data = NULL;
+ size_t pdh_len = 0, cert_chain_len = 0;
+ uint32_t ebx;
+ int fd;
+
+ if (!kvm_enabled()) {
+ error_setg(errp, "KVM not enabled");
+ return NULL;
+ }
+ if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) {
+ error_setg(errp, "SEV is not enabled in KVM");
+ return NULL;
+ }
+
+ fd = open(DEFAULT_SEV_DEVICE, O_RDWR);
+ if (fd < 0) {
+ error_setg_errno(errp, errno, "SEV: Failed to open %s",
+ DEFAULT_SEV_DEVICE);
+ return NULL;
+ }
+
+ if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
+ &cert_chain_data, &cert_chain_len, errp)) {
+ goto out;
+ }
+
+ cap = g_new0(SevCapability, 1);
+ cap->pdh = g_base64_encode(pdh_data, pdh_len);
+ cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len);
+
+ host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
+ cap->cbitpos = ebx & 0x3f;
+
+ /*
+ * When SEV feature is enabled, we loose one bit in guest physical
+ * addressing.
+ */
+ cap->reduced_phys_bits = 1;
+
+out:
+ g_free(pdh_data);
+ g_free(cert_chain_data);
+ close(fd);
+ return cap;
+}
+
+SevCapability *qmp_query_sev_capabilities(Error **errp)
+{
+ return sev_get_capabilities(errp);
+}
+
+static SevAttestationReport *sev_get_attestation_report(const char *mnonce,
+ Error **errp)
+{
+ struct kvm_sev_attestation_report input = {};
+ SevAttestationReport *report = NULL;
+ SevGuestState *sev = sev_guest;
+ g_autofree guchar *data = NULL;
+ g_autofree guchar *buf = NULL;
+ gsize len;
+ int err = 0, ret;
+
+ if (!sev_enabled()) {
+ error_setg(errp, "SEV is not enabled");
+ return NULL;
+ }
+
+ /* lets decode the mnonce string */
+ buf = g_base64_decode(mnonce, &len);
+ if (!buf) {
+ error_setg(errp, "SEV: failed to decode mnonce input");
+ return NULL;
+ }
+
+ /* verify the input mnonce length */
+ if (len != sizeof(input.mnonce)) {
+ error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")",
+ sizeof(input.mnonce), len);
+ return NULL;
+ }
+
+ /* Query the report length */
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
+ &input, &err);
+ if (ret < 0) {
+ if (err != SEV_RET_INVALID_LEN) {
+ error_setg(errp, "SEV: Failed to query the attestation report"
+ " length ret=%d fw_err=%d (%s)",
+ ret, err, fw_error_to_str(err));
+ return NULL;
+ }
+ }
+
+ data = g_malloc(input.len);
+ input.uaddr = (unsigned long)data;
+ memcpy(input.mnonce, buf, sizeof(input.mnonce));
+
+ /* Query the report */
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT,
+ &input, &err);
+ if (ret) {
+ error_setg_errno(errp, errno, "SEV: Failed to get attestation report"
+ " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err));
+ return NULL;
+ }
+
+ report = g_new0(SevAttestationReport, 1);
+ report->data = g_base64_encode(data, input.len);
+
+ trace_kvm_sev_attestation_report(mnonce, report->data);
+
+ return report;
+}
+
+SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
+ Error **errp)
+{
+ return sev_get_attestation_report(mnonce, errp);
+}
+
+static int
+sev_read_file_base64(const char *filename, guchar **data, gsize *len)
+{
+ gsize sz;
+ g_autofree gchar *base64 = NULL;
+ GError *error = NULL;
+
+ if (!g_file_get_contents(filename, &base64, &sz, &error)) {
+ error_report("SEV: Failed to read '%s' (%s)", filename, error->message);
+ g_error_free(error);
+ return -1;
+ }
+
+ *data = g_base64_decode(base64, len);
+ return 0;
+}
+
+static int
+sev_launch_start(SevGuestState *sev)
+{
+ gsize sz;
+ int ret = 1;
+ int fw_error, rc;
+ struct kvm_sev_launch_start start = {
+ .handle = sev->handle, .policy = sev->policy
+ };
+ guchar *session = NULL, *dh_cert = NULL;
+
+ if (sev->session_file) {
+ if (sev_read_file_base64(sev->session_file, &session, &sz) < 0) {
+ goto out;
+ }
+ start.session_uaddr = (unsigned long)session;
+ start.session_len = sz;
+ }
+
+ if (sev->dh_cert_file) {
+ if (sev_read_file_base64(sev->dh_cert_file, &dh_cert, &sz) < 0) {
+ goto out;
+ }
+ start.dh_uaddr = (unsigned long)dh_cert;
+ start.dh_len = sz;
+ }
+
+ trace_kvm_sev_launch_start(start.policy, session, dh_cert);
+ rc = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error);
+ if (rc < 0) {
+ error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'",
+ __func__, ret, fw_error, fw_error_to_str(fw_error));
+ goto out;
+ }
+
+ sev_set_guest_state(sev, SEV_STATE_LAUNCH_UPDATE);
+ sev->handle = start.handle;
+ ret = 0;
+
+out:
+ g_free(session);
+ g_free(dh_cert);
+ return ret;
+}
+
+static int
+sev_launch_update_data(SevGuestState *sev, uint8_t *addr, uint64_t len)
+{
+ int ret, fw_error;
+ struct kvm_sev_launch_update_data update;
+
+ if (!addr || !len) {
+ return 1;
+ }
+
+ update.uaddr = (__u64)(unsigned long)addr;
+ update.len = len;
+ trace_kvm_sev_launch_update_data(addr, len);
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
+ &update, &fw_error);
+ if (ret) {
+ error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
+ __func__, ret, fw_error, fw_error_to_str(fw_error));
+ }
+
+ return ret;
+}
+
+static int
+sev_launch_update_vmsa(SevGuestState *sev)
+{
+ int ret, fw_error;
+
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL, &fw_error);
+ if (ret) {
+ error_report("%s: LAUNCH_UPDATE_VMSA ret=%d fw_error=%d '%s'",
+ __func__, ret, fw_error, fw_error_to_str(fw_error));
+ }
+
+ return ret;
+}
+
+static void
+sev_launch_get_measure(Notifier *notifier, void *unused)
+{
+ SevGuestState *sev = sev_guest;
+ int ret, error;
+ g_autofree guchar *data = NULL;
+ struct kvm_sev_launch_measure measurement = {};
+
+ if (!sev_check_state(sev, SEV_STATE_LAUNCH_UPDATE)) {
+ return;
+ }
+
+ if (sev_es_enabled()) {
+ /* measure all the VM save areas before getting launch_measure */
+ ret = sev_launch_update_vmsa(sev);
+ if (ret) {
+ exit(1);
+ }
+ }
+
+ /* query the measurement blob length */
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE,
+ &measurement, &error);
+ if (!measurement.len) {
+ error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
+ __func__, ret, error, fw_error_to_str(errno));
+ return;
+ }
+
+ data = g_new0(guchar, measurement.len);
+ measurement.uaddr = (unsigned long)data;
+
+ /* get the measurement blob */
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE,
+ &measurement, &error);
+ if (ret) {
+ error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
+ __func__, ret, error, fw_error_to_str(errno));
+ return;
+ }
+
+ sev_set_guest_state(sev, SEV_STATE_LAUNCH_SECRET);
+
+ /* encode the measurement value and emit the event */
+ sev->measurement = g_base64_encode(data, measurement.len);
+ trace_kvm_sev_launch_measurement(sev->measurement);
+}
+
+static char *sev_get_launch_measurement(void)
+{
+ if (sev_guest &&
+ sev_guest->state >= SEV_STATE_LAUNCH_SECRET) {
+ return g_strdup(sev_guest->measurement);
+ }
+
+ return NULL;
+}
+
+SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
+{
+ char *data;
+ SevLaunchMeasureInfo *info;
+
+ data = sev_get_launch_measurement();
+ if (!data) {
+ error_setg(errp, "SEV launch measurement is not available");
+ return NULL;
+ }
+
+ info = g_malloc0(sizeof(*info));
+ info->data = data;
+
+ return info;
+}
+
+static Notifier sev_machine_done_notify = {
+ .notify = sev_launch_get_measure,
+};
+
+static void
+sev_launch_finish(SevGuestState *sev)
+{
+ int ret, error;
+
+ trace_kvm_sev_launch_finish();
+ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, &error);
+ if (ret) {
+ error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'",
+ __func__, ret, error, fw_error_to_str(error));
+ exit(1);
+ }
+
+ sev_set_guest_state(sev, SEV_STATE_RUNNING);
+
+ /* add migration blocker */
+ error_setg(&sev_mig_blocker,
+ "SEV: Migration is not implemented");
+ migrate_add_blocker(sev_mig_blocker, &error_fatal);
+}
+
+static void
+sev_vm_state_change(void *opaque, bool running, RunState state)
+{
+ SevGuestState *sev = opaque;
+
+ if (running) {
+ if (!sev_check_state(sev, SEV_STATE_RUNNING)) {
+ sev_launch_finish(sev);
+ }
+ }
+}
+
+int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ SevGuestState *sev
+ = (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST);
+ char *devname;
+ int ret, fw_error, cmd;
+ uint32_t ebx;
+ uint32_t host_cbitpos;
+ struct sev_user_data_status status = {};
+
+ if (!sev) {
+ return 0;
+ }
+
+ ret = ram_block_discard_disable(true);
+ if (ret) {
+ error_report("%s: cannot disable RAM discard", __func__);
+ return -1;
+ }
+
+ sev_guest = sev;
+ sev->state = SEV_STATE_UNINIT;
+
+ host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
+ host_cbitpos = ebx & 0x3f;
+
+ if (host_cbitpos != sev->cbitpos) {
+ error_setg(errp, "%s: cbitpos check failed, host '%d' requested '%d'",
+ __func__, host_cbitpos, sev->cbitpos);
+ goto err;
+ }
+
+ if (sev->reduced_phys_bits < 1) {
+ error_setg(errp, "%s: reduced_phys_bits check failed, it should be >=1,"
+ " requested '%d'", __func__, sev->reduced_phys_bits);
+ goto err;
+ }
+
+ devname = object_property_get_str(OBJECT(sev), "sev-device", NULL);
+ sev->sev_fd = open(devname, O_RDWR);
+ if (sev->sev_fd < 0) {
+ error_setg(errp, "%s: Failed to open %s '%s'", __func__,
+ devname, strerror(errno));
+ g_free(devname);
+ goto err;
+ }
+ g_free(devname);
+
+ ret = sev_platform_ioctl(sev->sev_fd, SEV_PLATFORM_STATUS, &status,
+ &fw_error);
+ if (ret) {
+ error_setg(errp, "%s: failed to get platform status ret=%d "
+ "fw_error='%d: %s'", __func__, ret, fw_error,
+ fw_error_to_str(fw_error));
+ goto err;
+ }
+ sev->build_id = status.build;
+ sev->api_major = status.api_major;
+ sev->api_minor = status.api_minor;
+
+ if (sev_es_enabled()) {
+ if (!kvm_kernel_irqchip_allowed()) {
+ error_report("%s: SEV-ES guests require in-kernel irqchip support",
+ __func__);
+ goto err;
+ }
+
+ if (!(status.flags & SEV_STATUS_FLAGS_CONFIG_ES)) {
+ error_report("%s: guest policy requires SEV-ES, but "
+ "host SEV-ES support unavailable",
+ __func__);
+ goto err;
+ }
+ cmd = KVM_SEV_ES_INIT;
+ } else {
+ cmd = KVM_SEV_INIT;
+ }
+
+ trace_kvm_sev_init();
+ ret = sev_ioctl(sev->sev_fd, cmd, NULL, &fw_error);
+ if (ret) {
+ error_setg(errp, "%s: failed to initialize ret=%d fw_error=%d '%s'",
+ __func__, ret, fw_error, fw_error_to_str(fw_error));
+ goto err;
+ }
+
+ ret = sev_launch_start(sev);
+ if (ret) {
+ error_setg(errp, "%s: failed to create encryption context", __func__);
+ goto err;
+ }
+
+ ram_block_notifier_add(&sev_ram_notifier);
+ qemu_add_machine_init_done_notifier(&sev_machine_done_notify);
+ qemu_add_vm_change_state_handler(sev_vm_state_change, sev);
+
+ cgs->ready = true;
+
+ return 0;
+err:
+ sev_guest = NULL;
+ ram_block_discard_disable(false);
+ return -1;
+}
+
+int
+sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp)
+{
+ if (!sev_guest) {
+ return 0;
+ }
+
+ /* if SEV is in update state then encrypt the data else do nothing */
+ if (sev_check_state(sev_guest, SEV_STATE_LAUNCH_UPDATE)) {
+ int ret = sev_launch_update_data(sev_guest, ptr, len);
+ if (ret < 0) {
+ error_setg(errp, "SEV: Failed to encrypt pflash rom");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int sev_inject_launch_secret(const char *packet_hdr, const char *secret,
+ uint64_t gpa, Error **errp)
+{
+ struct kvm_sev_launch_secret input;
+ g_autofree guchar *data = NULL, *hdr = NULL;
+ int error, ret = 1;
+ void *hva;
+ gsize hdr_sz = 0, data_sz = 0;
+ MemoryRegion *mr = NULL;
+
+ if (!sev_guest) {
+ error_setg(errp, "SEV not enabled for guest");
+ return 1;
+ }
+
+ /* secret can be injected only in this state */
+ if (!sev_check_state(sev_guest, SEV_STATE_LAUNCH_SECRET)) {
+ error_setg(errp, "SEV: Not in correct state. (LSECRET) %x",
+ sev_guest->state);
+ return 1;
+ }
+
+ hdr = g_base64_decode(packet_hdr, &hdr_sz);
+ if (!hdr || !hdr_sz) {
+ error_setg(errp, "SEV: Failed to decode sequence header");
+ return 1;
+ }
+
+ data = g_base64_decode(secret, &data_sz);
+ if (!data || !data_sz) {
+ error_setg(errp, "SEV: Failed to decode data");
+ return 1;
+ }
+
+ hva = gpa2hva(&mr, gpa, data_sz, errp);
+ if (!hva) {
+ error_prepend(errp, "SEV: Failed to calculate guest address: ");
+ return 1;
+ }
+
+ input.hdr_uaddr = (uint64_t)(unsigned long)hdr;
+ input.hdr_len = hdr_sz;
+
+ input.trans_uaddr = (uint64_t)(unsigned long)data;
+ input.trans_len = data_sz;
+
+ input.guest_uaddr = (uint64_t)(unsigned long)hva;
+ input.guest_len = data_sz;
+
+ trace_kvm_sev_launch_secret(gpa, input.guest_uaddr,
+ input.trans_uaddr, input.trans_len);
+
+ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_LAUNCH_SECRET,
+ &input, &error);
+ if (ret) {
+ error_setg(errp, "SEV: failed to inject secret ret=%d fw_error=%d '%s'",
+ ret, error, fw_error_to_str(error));
+ return ret;
+ }
+
+ return 0;
+}
+
+#define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294"
+struct sev_secret_area {
+ uint32_t base;
+ uint32_t size;
+};
+
+void qmp_sev_inject_launch_secret(const char *packet_hdr,
+ const char *secret,
+ bool has_gpa, uint64_t gpa,
+ Error **errp)
+{
+ if (!sev_enabled()) {
+ error_setg(errp, "SEV not enabled for guest");
+ return;
+ }
+ if (!has_gpa) {
+ uint8_t *data;
+ struct sev_secret_area *area;
+
+ if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) {
+ error_setg(errp, "SEV: no secret area found in OVMF,"
+ " gpa must be specified.");
+ return;
+ }
+ area = (struct sev_secret_area *)data;
+ gpa = area->base;
+ }
+
+ sev_inject_launch_secret(packet_hdr, secret, gpa, errp);
+}
+
+static int
+sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr)
+{
+ if (!info->reset_addr) {
+ error_report("SEV-ES reset address is zero");
+ return 1;
+ }
+
+ *addr = info->reset_addr;
+
+ return 0;
+}
+
+static int
+sev_es_find_reset_vector(void *flash_ptr, uint64_t flash_size,
+ uint32_t *addr)
+{
+ QemuUUID info_guid, *guid;
+ SevInfoBlock *info;
+ uint8_t *data;
+ uint16_t *len;
+
+ /*
+ * Initialize the address to zero. An address of zero with a successful
+ * return code indicates that SEV-ES is not active.
+ */
+ *addr = 0;
+
+ /*
+ * Extract the AP reset vector for SEV-ES guests by locating the SEV GUID.
+ * The SEV GUID is located on its own (original implementation) or within
+ * the Firmware GUID Table (new implementation), either of which are
+ * located 32 bytes from the end of the flash.
+ *
+ * Check the Firmware GUID Table first.
+ */
+ if (pc_system_ovmf_table_find(SEV_INFO_BLOCK_GUID, &data, NULL)) {
+ return sev_es_parse_reset_block((SevInfoBlock *)data, addr);
+ }
+
+ /*
+ * SEV info block not found in the Firmware GUID Table (or there isn't
+ * a Firmware GUID Table), fall back to the original implementation.
+ */
+ data = flash_ptr + flash_size - 0x20;
+
+ qemu_uuid_parse(SEV_INFO_BLOCK_GUID, &info_guid);
+ info_guid = qemu_uuid_bswap(info_guid); /* GUIDs are LE */
+
+ guid = (QemuUUID *)(data - sizeof(info_guid));
+ if (!qemu_uuid_is_equal(guid, &info_guid)) {
+ error_report("SEV information block/Firmware GUID Table block not found in pflash rom");
+ return 1;
+ }
+
+ len = (uint16_t *)((uint8_t *)guid - sizeof(*len));
+ info = (SevInfoBlock *)(data - le16_to_cpu(*len));
+
+ return sev_es_parse_reset_block(info, addr);
+}
+
+void sev_es_set_reset_vector(CPUState *cpu)
+{
+ X86CPU *x86;
+ CPUX86State *env;
+
+ /* Only update if we have valid reset information */
+ if (!sev_guest || !sev_guest->reset_data_valid) {
+ return;
+ }
+
+ /* Do not update the BSP reset state */
+ if (cpu->cpu_index == 0) {
+ return;
+ }
+
+ x86 = X86_CPU(cpu);
+ env = &x86->env;
+
+ cpu_x86_load_seg_cache(env, R_CS, 0xf000, sev_guest->reset_cs, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
+ DESC_R_MASK | DESC_A_MASK);
+
+ env->eip = sev_guest->reset_ip;
+}
+
+int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
+{
+ CPUState *cpu;
+ uint32_t addr;
+ int ret;
+
+ if (!sev_es_enabled()) {
+ return 0;
+ }
+
+ addr = 0;
+ ret = sev_es_find_reset_vector(flash_ptr, flash_size,
+ &addr);
+ if (ret) {
+ return ret;
+ }
+
+ if (addr) {
+ sev_guest->reset_cs = addr & 0xffff0000;
+ sev_guest->reset_ip = addr & 0x0000ffff;
+ sev_guest->reset_data_valid = true;
+
+ CPU_FOREACH(cpu) {
+ sev_es_set_reset_vector(cpu);
+ }
+ }
+
+ return 0;
+}
+
+static const QemuUUID sev_hash_table_header_guid = {
+ .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93,
+ 0xd4, 0x11, 0xfd, 0x21)
+};
+
+static const QemuUUID sev_kernel_entry_guid = {
+ .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1,
+ 0x72, 0xd2, 0x04, 0x5b)
+};
+static const QemuUUID sev_initrd_entry_guid = {
+ .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2,
+ 0x91, 0x69, 0x78, 0x1d)
+};
+static const QemuUUID sev_cmdline_entry_guid = {
+ .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71,
+ 0x4d, 0x36, 0xab, 0x2a)
+};
+
+/*
+ * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page
+ * which is included in SEV's initial memory measurement.
+ */
+bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp)
+{
+ uint8_t *data;
+ SevHashTableDescriptor *area;
+ SevHashTable *ht;
+ PaddedSevHashTable *padded_ht;
+ uint8_t cmdline_hash[HASH_SIZE];
+ uint8_t initrd_hash[HASH_SIZE];
+ uint8_t kernel_hash[HASH_SIZE];
+ uint8_t *hashp;
+ size_t hash_len = HASH_SIZE;
+ hwaddr mapped_len = sizeof(*padded_ht);
+ MemTxAttrs attrs = { 0 };
+ bool ret = true;
+
+ /*
+ * Only add the kernel hashes if the sev-guest configuration explicitly
+ * stated kernel-hashes=on.
+ */
+ if (!sev_guest->kernel_hashes) {
+ return false;
+ }
+
+ if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) {
+ error_setg(errp, "SEV: kernel specified but guest firmware "
+ "has no hashes table GUID");
+ return false;
+ }
+ area = (SevHashTableDescriptor *)data;
+ if (!area->base || area->size < sizeof(PaddedSevHashTable)) {
+ error_setg(errp, "SEV: guest firmware hashes table area is invalid "
+ "(base=0x%x size=0x%x)", area->base, area->size);
+ return false;
+ }
+
+ /*
+ * Calculate hash of kernel command-line with the terminating null byte. If
+ * the user doesn't supply a command-line via -append, the 1-byte "\0" will
+ * be used.
+ */
+ hashp = cmdline_hash;
+ if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data,
+ ctx->cmdline_size, &hashp, &hash_len, errp) < 0) {
+ return false;
+ }
+ assert(hash_len == HASH_SIZE);
+
+ /*
+ * Calculate hash of initrd. If the user doesn't supply an initrd via
+ * -initrd, an empty buffer will be used (ctx->initrd_size == 0).
+ */
+ hashp = initrd_hash;
+ if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data,
+ ctx->initrd_size, &hashp, &hash_len, errp) < 0) {
+ return false;
+ }
+ assert(hash_len == HASH_SIZE);
+
+ /* Calculate hash of the kernel */
+ hashp = kernel_hash;
+ struct iovec iov[2] = {
+ { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size },
+ { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size }
+ };
+ if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov),
+ &hashp, &hash_len, errp) < 0) {
+ return false;
+ }
+ assert(hash_len == HASH_SIZE);
+
+ /*
+ * Populate the hashes table in the guest's memory at the OVMF-designated
+ * area for the SEV hashes table
+ */
+ padded_ht = address_space_map(&address_space_memory, area->base,
+ &mapped_len, true, attrs);
+ if (!padded_ht || mapped_len != sizeof(*padded_ht)) {
+ error_setg(errp, "SEV: cannot map hashes table guest memory area");
+ return false;
+ }
+ ht = &padded_ht->ht;
+
+ ht->guid = sev_hash_table_header_guid;
+ ht->len = sizeof(*ht);
+
+ ht->cmdline.guid = sev_cmdline_entry_guid;
+ ht->cmdline.len = sizeof(ht->cmdline);
+ memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash));
+
+ ht->initrd.guid = sev_initrd_entry_guid;
+ ht->initrd.len = sizeof(ht->initrd);
+ memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash));
+
+ ht->kernel.guid = sev_kernel_entry_guid;
+ ht->kernel.len = sizeof(ht->kernel);
+ memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash));
+
+ /* zero the excess data so the measurement can be reliably calculated */
+ memset(padded_ht->padding, 0, sizeof(padded_ht->padding));
+
+ if (sev_encrypt_flash((uint8_t *)padded_ht, sizeof(*padded_ht), errp) < 0) {
+ ret = false;
+ }
+
+ address_space_unmap(&address_space_memory, padded_ht,
+ mapped_len, true, mapped_len);
+
+ return ret;
+}
+
+static void
+sev_register_types(void)
+{
+ type_register_static(&sev_guest_info);
+}
+
+type_init(sev_register_types);
diff --git a/target/i386/sev.h b/target/i386/sev.h
new file mode 100644
index 000000000..83e82aa42
--- /dev/null
+++ b/target/i386/sev.h
@@ -0,0 +1,62 @@
+/*
+ * QEMU Secure Encrypted Virutualization (SEV) support
+ *
+ * Copyright: Advanced Micro Devices, 2016-2018
+ *
+ * Authors:
+ * Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_SEV_I386_H
+#define QEMU_SEV_I386_H
+
+#ifndef CONFIG_USER_ONLY
+#include CONFIG_DEVICES /* CONFIG_SEV */
+#endif
+
+#include "exec/confidential-guest-support.h"
+
+#define SEV_POLICY_NODBG 0x1
+#define SEV_POLICY_NOKS 0x2
+#define SEV_POLICY_ES 0x4
+#define SEV_POLICY_NOSEND 0x8
+#define SEV_POLICY_DOMAIN 0x10
+#define SEV_POLICY_SEV 0x20
+
+typedef struct SevKernelLoaderContext {
+ char *setup_data;
+ size_t setup_size;
+ char *kernel_data;
+ size_t kernel_size;
+ char *initrd_data;
+ size_t initrd_size;
+ char *cmdline_data;
+ size_t cmdline_size;
+} SevKernelLoaderContext;
+
+#ifdef CONFIG_SEV
+bool sev_enabled(void);
+bool sev_es_enabled(void);
+#else
+#define sev_enabled() 0
+#define sev_es_enabled() 0
+#endif
+
+extern uint32_t sev_get_cbit_position(void);
+extern uint32_t sev_get_reduced_phys_bits(void);
+extern bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp);
+
+int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp);
+int sev_inject_launch_secret(const char *hdr, const char *secret,
+ uint64_t gpa, Error **errp);
+
+int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size);
+void sev_es_set_reset_vector(CPUState *cpu);
+
+int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp);
+
+#endif
diff --git a/target/i386/shift_helper_template.h b/target/i386/shift_helper_template.h
new file mode 100644
index 000000000..54f15d6e0
--- /dev/null
+++ b/target/i386/shift_helper_template.h
@@ -0,0 +1,108 @@
+/*
+ * x86 shift helpers
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+#define SHIFT_MASK (DATA_BITS - 1)
+#if DATA_BITS <= 32
+#define SHIFT1_MASK 0x1f
+#else
+#define SHIFT1_MASK 0x3f
+#endif
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_MASK 0xff
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_MASK 0xffff
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_MASK 0xffffffff
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_MASK 0xffffffffffffffffULL
+#else
+#error unhandled operand size
+#endif
+
+target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0,
+ target_ulong t1)
+{
+ int count, eflags;
+ target_ulong src;
+ target_long res;
+
+ count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = env->cc_src;
+ t0 &= DATA_MASK;
+ src = t0;
+ res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
+ if (count > 1) {
+ res |= t0 >> (DATA_BITS + 1 - count);
+ }
+ t0 = res;
+ env->cc_src = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (DATA_BITS - count)) & CC_C);
+ }
+ return t0;
+}
+
+target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0,
+ target_ulong t1)
+{
+ int count, eflags;
+ target_ulong src;
+ target_long res;
+
+ count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = env->cc_src;
+ t0 &= DATA_MASK;
+ src = t0;
+ res = (t0 >> count) |
+ ((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
+ if (count > 1) {
+ res |= t0 << (DATA_BITS + 1 - count);
+ }
+ t0 = res;
+ env->cc_src = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (count - 1)) & CC_C);
+ }
+ return t0;
+}
+
+#undef DATA_BITS
+#undef SHIFT_MASK
+#undef SHIFT1_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target/i386/svm.h b/target/i386/svm.h
new file mode 100644
index 000000000..f9a785489
--- /dev/null
+++ b/target/i386/svm.h
@@ -0,0 +1,240 @@
+#ifndef SVM_H
+#define SVM_H
+
+#define TLB_CONTROL_DO_NOTHING 0
+#define TLB_CONTROL_FLUSH_ALL_ASID 1
+
+#define V_TPR_MASK 0x0f
+
+#define V_IRQ_SHIFT 8
+#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+
+#define V_GIF_ENABLED_SHIFT 25
+#define V_GIF_ENABLED_MASK (1 << V_GIF_ENABLED_SHIFT)
+
+#define V_GIF_SHIFT 9
+#define V_GIF_MASK (1 << V_GIF_SHIFT)
+
+#define V_INTR_PRIO_SHIFT 16
+#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+
+#define V_IGN_TPR_SHIFT 20
+#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
+#define V_INTR_MASKING_SHIFT 24
+#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+#define V_VMLOAD_VMSAVE_ENABLED_MASK (1 << 1)
+
+#define SVM_INTERRUPT_SHADOW_MASK 1
+
+#define SVM_IOIO_STR_SHIFT 2
+#define SVM_IOIO_REP_SHIFT 3
+#define SVM_IOIO_SIZE_SHIFT 4
+#define SVM_IOIO_ASIZE_SHIFT 7
+
+#define SVM_IOIO_TYPE_MASK 1
+#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+
+#define SVM_EVTINJ_VEC_MASK 0xff
+
+#define SVM_EVTINJ_TYPE_SHIFT 8
+#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_VALID (1 << 31)
+#define SVM_EVTINJ_VALID_ERR (1 << 11)
+
+#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+
+#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+
+#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+
+#define SVM_EXIT_READ_CR0 0x000
+#define SVM_EXIT_READ_CR3 0x003
+#define SVM_EXIT_READ_CR4 0x004
+#define SVM_EXIT_READ_CR8 0x008
+#define SVM_EXIT_WRITE_CR0 0x010
+#define SVM_EXIT_WRITE_CR3 0x013
+#define SVM_EXIT_WRITE_CR4 0x014
+#define SVM_EXIT_WRITE_CR8 0x018
+#define SVM_EXIT_READ_DR0 0x020
+#define SVM_EXIT_READ_DR1 0x021
+#define SVM_EXIT_READ_DR2 0x022
+#define SVM_EXIT_READ_DR3 0x023
+#define SVM_EXIT_READ_DR4 0x024
+#define SVM_EXIT_READ_DR5 0x025
+#define SVM_EXIT_READ_DR6 0x026
+#define SVM_EXIT_READ_DR7 0x027
+#define SVM_EXIT_WRITE_DR0 0x030
+#define SVM_EXIT_WRITE_DR1 0x031
+#define SVM_EXIT_WRITE_DR2 0x032
+#define SVM_EXIT_WRITE_DR3 0x033
+#define SVM_EXIT_WRITE_DR4 0x034
+#define SVM_EXIT_WRITE_DR5 0x035
+#define SVM_EXIT_WRITE_DR6 0x036
+#define SVM_EXIT_WRITE_DR7 0x037
+#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_INTR 0x060
+#define SVM_EXIT_NMI 0x061
+#define SVM_EXIT_SMI 0x062
+#define SVM_EXIT_INIT 0x063
+#define SVM_EXIT_VINTR 0x064
+#define SVM_EXIT_CR0_SEL_WRITE 0x065
+#define SVM_EXIT_IDTR_READ 0x066
+#define SVM_EXIT_GDTR_READ 0x067
+#define SVM_EXIT_LDTR_READ 0x068
+#define SVM_EXIT_TR_READ 0x069
+#define SVM_EXIT_IDTR_WRITE 0x06a
+#define SVM_EXIT_GDTR_WRITE 0x06b
+#define SVM_EXIT_LDTR_WRITE 0x06c
+#define SVM_EXIT_TR_WRITE 0x06d
+#define SVM_EXIT_RDTSC 0x06e
+#define SVM_EXIT_RDPMC 0x06f
+#define SVM_EXIT_PUSHF 0x070
+#define SVM_EXIT_POPF 0x071
+#define SVM_EXIT_CPUID 0x072
+#define SVM_EXIT_RSM 0x073
+#define SVM_EXIT_IRET 0x074
+#define SVM_EXIT_SWINT 0x075
+#define SVM_EXIT_INVD 0x076
+#define SVM_EXIT_PAUSE 0x077
+#define SVM_EXIT_HLT 0x078
+#define SVM_EXIT_INVLPG 0x079
+#define SVM_EXIT_INVLPGA 0x07a
+#define SVM_EXIT_IOIO 0x07b
+#define SVM_EXIT_MSR 0x07c
+#define SVM_EXIT_TASK_SWITCH 0x07d
+#define SVM_EXIT_FERR_FREEZE 0x07e
+#define SVM_EXIT_SHUTDOWN 0x07f
+#define SVM_EXIT_VMRUN 0x080
+#define SVM_EXIT_VMMCALL 0x081
+#define SVM_EXIT_VMLOAD 0x082
+#define SVM_EXIT_VMSAVE 0x083
+#define SVM_EXIT_STGI 0x084
+#define SVM_EXIT_CLGI 0x085
+#define SVM_EXIT_SKINIT 0x086
+#define SVM_EXIT_RDTSCP 0x087
+#define SVM_EXIT_ICEBP 0x088
+#define SVM_EXIT_WBINVD 0x089
+/* only included in documentation, maybe wrong */
+#define SVM_EXIT_MONITOR 0x08a
+#define SVM_EXIT_MWAIT 0x08b
+#define SVM_EXIT_NPF 0x400
+
+#define SVM_EXIT_ERR -1
+
+#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
+
+#define SVM_NPT_ENABLED (1 << 0)
+
+#define SVM_NPTEXIT_GPA (1ULL << 32)
+#define SVM_NPTEXIT_GPT (1ULL << 33)
+
+#define SVM_CR0_RESERVED_MASK 0xffffffff00000000U
+
+#define SVM_MSRPM_SIZE (1ULL << 13)
+#define SVM_IOPM_SIZE ((1ULL << 13) + 1)
+
+struct QEMU_PACKED vmcb_control_area {
+ uint16_t intercept_cr_read;
+ uint16_t intercept_cr_write;
+ uint16_t intercept_dr_read;
+ uint16_t intercept_dr_write;
+ uint32_t intercept_exceptions;
+ uint64_t intercept;
+ uint8_t reserved_1[44];
+ uint64_t iopm_base_pa;
+ uint64_t msrpm_base_pa;
+ uint64_t tsc_offset;
+ uint32_t asid;
+ uint8_t tlb_ctl;
+ uint8_t reserved_2[3];
+ uint32_t int_ctl;
+ uint32_t int_vector;
+ uint32_t int_state;
+ uint8_t reserved_3[4];
+ uint64_t exit_code;
+ uint64_t exit_info_1;
+ uint64_t exit_info_2;
+ uint32_t exit_int_info;
+ uint32_t exit_int_info_err;
+ uint64_t nested_ctl;
+ uint8_t reserved_4[16];
+ uint32_t event_inj;
+ uint32_t event_inj_err;
+ uint64_t nested_cr3;
+ uint64_t lbr_ctl;
+ uint8_t reserved_5[832];
+};
+
+struct QEMU_PACKED vmcb_seg {
+ uint16_t selector;
+ uint16_t attrib;
+ uint32_t limit;
+ uint64_t base;
+};
+
+struct QEMU_PACKED vmcb_save_area {
+ struct vmcb_seg es;
+ struct vmcb_seg cs;
+ struct vmcb_seg ss;
+ struct vmcb_seg ds;
+ struct vmcb_seg fs;
+ struct vmcb_seg gs;
+ struct vmcb_seg gdtr;
+ struct vmcb_seg ldtr;
+ struct vmcb_seg idtr;
+ struct vmcb_seg tr;
+ uint8_t reserved_1[43];
+ uint8_t cpl;
+ uint8_t reserved_2[4];
+ uint64_t efer;
+ uint8_t reserved_3[112];
+ uint64_t cr4;
+ uint64_t cr3;
+ uint64_t cr0;
+ uint64_t dr7;
+ uint64_t dr6;
+ uint64_t rflags;
+ uint64_t rip;
+ uint8_t reserved_4[88];
+ uint64_t rsp;
+ uint8_t reserved_5[24];
+ uint64_t rax;
+ uint64_t star;
+ uint64_t lstar;
+ uint64_t cstar;
+ uint64_t sfmask;
+ uint64_t kernel_gs_base;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ uint64_t cr2;
+ uint8_t reserved_6[32];
+ uint64_t g_pat;
+ uint64_t dbgctl;
+ uint64_t br_from;
+ uint64_t br_to;
+ uint64_t last_excp_from;
+ uint64_t last_excp_to;
+};
+
+struct QEMU_PACKED vmcb {
+ struct vmcb_control_area control;
+ struct vmcb_save_area save;
+};
+
+#endif
diff --git a/target/i386/tcg/bpt_helper.c b/target/i386/tcg/bpt_helper.c
new file mode 100644
index 000000000..b6c1fff16
--- /dev/null
+++ b/target/i386/tcg/bpt_helper.c
@@ -0,0 +1,39 @@
+/*
+ * i386 breakpoint helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "helper-tcg.h"
+
+void QEMU_NORETURN helper_single_step(CPUX86State *env)
+{
+#ifndef CONFIG_USER_ONLY
+ check_hw_breakpoints(env, true);
+ env->dr[6] |= DR6_BS;
+#endif
+ raise_exception(env, EXCP01_DB);
+}
+
+void helper_rechecking_single_step(CPUX86State *env)
+{
+ if ((env->eflags & TF_MASK) != 0) {
+ helper_single_step(env);
+ }
+}
diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c
new file mode 100644
index 000000000..cc7ea9e8b
--- /dev/null
+++ b/target/i386/tcg/cc_helper.c
@@ -0,0 +1,389 @@
+/*
+ * x86 condition code helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "helper-tcg.h"
+
+const uint8_t parity_table[256] = {
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+};
+
+#define SHIFT 0
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+
+#define SHIFT 3
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#endif
+
+static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ return (src1 & ~CC_C) | (dst * CC_C);
+}
+
+static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ return (src1 & ~CC_O) | (src2 * CC_O);
+}
+
+static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
+}
+
+target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
+ target_ulong src2, int op)
+{
+ switch (op) {
+ default: /* should never happen */
+ return 0;
+
+ case CC_OP_EFLAGS:
+ return src1;
+ case CC_OP_CLR:
+ return CC_Z | CC_P;
+ case CC_OP_POPCNT:
+ return src1 ? 0 : CC_Z;
+
+ case CC_OP_MULB:
+ return compute_all_mulb(dst, src1);
+ case CC_OP_MULW:
+ return compute_all_mulw(dst, src1);
+ case CC_OP_MULL:
+ return compute_all_mull(dst, src1);
+
+ case CC_OP_ADDB:
+ return compute_all_addb(dst, src1);
+ case CC_OP_ADDW:
+ return compute_all_addw(dst, src1);
+ case CC_OP_ADDL:
+ return compute_all_addl(dst, src1);
+
+ case CC_OP_ADCB:
+ return compute_all_adcb(dst, src1, src2);
+ case CC_OP_ADCW:
+ return compute_all_adcw(dst, src1, src2);
+ case CC_OP_ADCL:
+ return compute_all_adcl(dst, src1, src2);
+
+ case CC_OP_SUBB:
+ return compute_all_subb(dst, src1);
+ case CC_OP_SUBW:
+ return compute_all_subw(dst, src1);
+ case CC_OP_SUBL:
+ return compute_all_subl(dst, src1);
+
+ case CC_OP_SBBB:
+ return compute_all_sbbb(dst, src1, src2);
+ case CC_OP_SBBW:
+ return compute_all_sbbw(dst, src1, src2);
+ case CC_OP_SBBL:
+ return compute_all_sbbl(dst, src1, src2);
+
+ case CC_OP_LOGICB:
+ return compute_all_logicb(dst, src1);
+ case CC_OP_LOGICW:
+ return compute_all_logicw(dst, src1);
+ case CC_OP_LOGICL:
+ return compute_all_logicl(dst, src1);
+
+ case CC_OP_INCB:
+ return compute_all_incb(dst, src1);
+ case CC_OP_INCW:
+ return compute_all_incw(dst, src1);
+ case CC_OP_INCL:
+ return compute_all_incl(dst, src1);
+
+ case CC_OP_DECB:
+ return compute_all_decb(dst, src1);
+ case CC_OP_DECW:
+ return compute_all_decw(dst, src1);
+ case CC_OP_DECL:
+ return compute_all_decl(dst, src1);
+
+ case CC_OP_SHLB:
+ return compute_all_shlb(dst, src1);
+ case CC_OP_SHLW:
+ return compute_all_shlw(dst, src1);
+ case CC_OP_SHLL:
+ return compute_all_shll(dst, src1);
+
+ case CC_OP_SARB:
+ return compute_all_sarb(dst, src1);
+ case CC_OP_SARW:
+ return compute_all_sarw(dst, src1);
+ case CC_OP_SARL:
+ return compute_all_sarl(dst, src1);
+
+ case CC_OP_BMILGB:
+ return compute_all_bmilgb(dst, src1);
+ case CC_OP_BMILGW:
+ return compute_all_bmilgw(dst, src1);
+ case CC_OP_BMILGL:
+ return compute_all_bmilgl(dst, src1);
+
+ case CC_OP_ADCX:
+ return compute_all_adcx(dst, src1, src2);
+ case CC_OP_ADOX:
+ return compute_all_adox(dst, src1, src2);
+ case CC_OP_ADCOX:
+ return compute_all_adcox(dst, src1, src2);
+
+#ifdef TARGET_X86_64
+ case CC_OP_MULQ:
+ return compute_all_mulq(dst, src1);
+ case CC_OP_ADDQ:
+ return compute_all_addq(dst, src1);
+ case CC_OP_ADCQ:
+ return compute_all_adcq(dst, src1, src2);
+ case CC_OP_SUBQ:
+ return compute_all_subq(dst, src1);
+ case CC_OP_SBBQ:
+ return compute_all_sbbq(dst, src1, src2);
+ case CC_OP_LOGICQ:
+ return compute_all_logicq(dst, src1);
+ case CC_OP_INCQ:
+ return compute_all_incq(dst, src1);
+ case CC_OP_DECQ:
+ return compute_all_decq(dst, src1);
+ case CC_OP_SHLQ:
+ return compute_all_shlq(dst, src1);
+ case CC_OP_SARQ:
+ return compute_all_sarq(dst, src1);
+ case CC_OP_BMILGQ:
+ return compute_all_bmilgq(dst, src1);
+#endif
+ }
+}
+
+uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
+{
+ return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
+}
+
+target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
+ target_ulong src2, int op)
+{
+ switch (op) {
+ default: /* should never happen */
+ case CC_OP_LOGICB:
+ case CC_OP_LOGICW:
+ case CC_OP_LOGICL:
+ case CC_OP_LOGICQ:
+ case CC_OP_CLR:
+ case CC_OP_POPCNT:
+ return 0;
+
+ case CC_OP_EFLAGS:
+ case CC_OP_SARB:
+ case CC_OP_SARW:
+ case CC_OP_SARL:
+ case CC_OP_SARQ:
+ case CC_OP_ADOX:
+ return src1 & 1;
+
+ case CC_OP_INCB:
+ case CC_OP_INCW:
+ case CC_OP_INCL:
+ case CC_OP_INCQ:
+ case CC_OP_DECB:
+ case CC_OP_DECW:
+ case CC_OP_DECL:
+ case CC_OP_DECQ:
+ return src1;
+
+ case CC_OP_MULB:
+ case CC_OP_MULW:
+ case CC_OP_MULL:
+ case CC_OP_MULQ:
+ return src1 != 0;
+
+ case CC_OP_ADCX:
+ case CC_OP_ADCOX:
+ return dst;
+
+ case CC_OP_ADDB:
+ return compute_c_addb(dst, src1);
+ case CC_OP_ADDW:
+ return compute_c_addw(dst, src1);
+ case CC_OP_ADDL:
+ return compute_c_addl(dst, src1);
+
+ case CC_OP_ADCB:
+ return compute_c_adcb(dst, src1, src2);
+ case CC_OP_ADCW:
+ return compute_c_adcw(dst, src1, src2);
+ case CC_OP_ADCL:
+ return compute_c_adcl(dst, src1, src2);
+
+ case CC_OP_SUBB:
+ return compute_c_subb(dst, src1);
+ case CC_OP_SUBW:
+ return compute_c_subw(dst, src1);
+ case CC_OP_SUBL:
+ return compute_c_subl(dst, src1);
+
+ case CC_OP_SBBB:
+ return compute_c_sbbb(dst, src1, src2);
+ case CC_OP_SBBW:
+ return compute_c_sbbw(dst, src1, src2);
+ case CC_OP_SBBL:
+ return compute_c_sbbl(dst, src1, src2);
+
+ case CC_OP_SHLB:
+ return compute_c_shlb(dst, src1);
+ case CC_OP_SHLW:
+ return compute_c_shlw(dst, src1);
+ case CC_OP_SHLL:
+ return compute_c_shll(dst, src1);
+
+ case CC_OP_BMILGB:
+ return compute_c_bmilgb(dst, src1);
+ case CC_OP_BMILGW:
+ return compute_c_bmilgw(dst, src1);
+ case CC_OP_BMILGL:
+ return compute_c_bmilgl(dst, src1);
+
+#ifdef TARGET_X86_64
+ case CC_OP_ADDQ:
+ return compute_c_addq(dst, src1);
+ case CC_OP_ADCQ:
+ return compute_c_adcq(dst, src1, src2);
+ case CC_OP_SUBQ:
+ return compute_c_subq(dst, src1);
+ case CC_OP_SBBQ:
+ return compute_c_sbbq(dst, src1, src2);
+ case CC_OP_SHLQ:
+ return compute_c_shlq(dst, src1);
+ case CC_OP_BMILGQ:
+ return compute_c_bmilgq(dst, src1);
+#endif
+ }
+}
+
+void helper_write_eflags(CPUX86State *env, target_ulong t0,
+ uint32_t update_mask)
+{
+ cpu_load_eflags(env, t0, update_mask);
+}
+
+target_ulong helper_read_eflags(CPUX86State *env)
+{
+ uint32_t eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ eflags |= (env->df & DF_MASK);
+ eflags |= env->eflags & ~(VM_MASK | RF_MASK);
+ return eflags;
+}
+
+void helper_clts(CPUX86State *env)
+{
+ env->cr[0] &= ~CR0_TS_MASK;
+ env->hflags &= ~HF_TS_MASK;
+}
+
+void helper_reset_rf(CPUX86State *env)
+{
+ env->eflags &= ~RF_MASK;
+}
+
+void helper_cli(CPUX86State *env)
+{
+ env->eflags &= ~IF_MASK;
+}
+
+void helper_sti(CPUX86State *env)
+{
+ env->eflags |= IF_MASK;
+}
+
+void helper_clac(CPUX86State *env)
+{
+ env->eflags &= ~AC_MASK;
+}
+
+void helper_stac(CPUX86State *env)
+{
+ env->eflags |= AC_MASK;
+}
+
+#if 0
+/* vm86plus instructions */
+void helper_cli_vm(CPUX86State *env)
+{
+ env->eflags &= ~VIF_MASK;
+}
+
+void helper_sti_vm(CPUX86State *env)
+{
+ env->eflags |= VIF_MASK;
+ if (env->eflags & VIP_MASK) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+}
+#endif
diff --git a/target/i386/tcg/cc_helper_template.h b/target/i386/tcg/cc_helper_template.h
new file mode 100644
index 000000000..bb611feb0
--- /dev/null
+++ b/target/i386/tcg/cc_helper_template.h
@@ -0,0 +1,242 @@
+/*
+ * x86 condition code helpers
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_TYPE uint8_t
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_TYPE uint16_t
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_TYPE uint64_t
+#else
+#error unhandled operand size
+#endif
+
+#define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1))
+
+/* dynamic flags computation */
+
+static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2 = dst - src1;
+
+ cf = dst < src1;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return dst < src1;
+}
+
+static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+ DATA_TYPE src3)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2 = dst - src1 - src3;
+
+ cf = (src3 ? dst <= src1 : dst < src1);
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & 0x10;
+ zf = (dst == 0) << 6;
+ sf = lshift(dst, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+ DATA_TYPE src3)
+{
+ return src3 ? dst <= src1 : dst < src1;
+}
+
+static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src1 = dst + src2;
+
+ cf = src1 < src2;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+{
+ DATA_TYPE src1 = dst + src2;
+
+ return src1 < src2;
+}
+
+static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+ DATA_TYPE src3)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src1 = dst + src2 + src3;
+
+ cf = (src3 ? src1 <= src2 : src1 < src2);
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & 0x10;
+ zf = (dst == 0) << 6;
+ sf = lshift(dst, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+ DATA_TYPE src3)
+{
+ DATA_TYPE src1 = dst + src2 + src3;
+
+ return (src3 ? src1 <= src2 : src1 < src2);
+}
+
+static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = 0;
+ pf = parity_table[(uint8_t)dst];
+ af = 0;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = 0;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2;
+
+ cf = src1;
+ src1 = dst - 1;
+ src2 = 1;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = (dst == SIGN_MASK) * CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2;
+
+ cf = src1;
+ src1 = dst + 1;
+ src2 = 1;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = (dst == SIGN_MASK - 1) * CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = (src1 >> (DATA_BITS - 1)) & CC_C;
+ pf = parity_table[(uint8_t)dst];
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ /* of is defined iff shift count == 1 */
+ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return (src1 >> (DATA_BITS - 1)) & CC_C;
+}
+
+static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = src1 & 1;
+ pf = parity_table[(uint8_t)dst];
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ /* of is defined iff shift count == 1 */
+ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
+ CF are modified and it is slower to do that. Note as well that we
+ don't truncate SRC1 for computing carry to DATA_TYPE. */
+static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = (src1 != 0);
+ pf = parity_table[(uint8_t)dst];
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = cf * CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = (src1 == 0);
+ pf = 0; /* undefined */
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = 0;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return src1 == 0;
+}
+
+#undef DATA_BITS
+#undef SIGN_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c
new file mode 100644
index 000000000..bdae887d0
--- /dev/null
+++ b/target/i386/tcg/excp_helper.c
@@ -0,0 +1,141 @@
+/*
+ * x86 exception helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/log.h"
+#include "sysemu/runstate.h"
+#include "exec/helper-proto.h"
+#include "helper-tcg.h"
+
+void QEMU_NORETURN helper_raise_interrupt(CPUX86State *env, int intno,
+ int next_eip_addend)
+{
+ raise_interrupt(env, intno, 1, 0, next_eip_addend);
+}
+
+void QEMU_NORETURN helper_raise_exception(CPUX86State *env, int exception_index)
+{
+ raise_exception(env, exception_index);
+}
+
+/*
+ * Check nested exceptions and change to double or triple fault if
+ * needed. It should only be called, if this is not an interrupt.
+ * Returns the new exception number.
+ */
+static int check_exception(CPUX86State *env, int intno, int *error_code,
+ uintptr_t retaddr)
+{
+ int first_contributory = env->old_exception == 0 ||
+ (env->old_exception >= 10 &&
+ env->old_exception <= 13);
+ int second_contributory = intno == 0 ||
+ (intno >= 10 && intno <= 13);
+
+ qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
+ env->old_exception, intno);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (env->old_exception == EXCP08_DBLE) {
+ if (env->hflags & HF_GUEST_MASK) {
+ cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
+ }
+
+ qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
+
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ return EXCP_HLT;
+ }
+#endif
+
+ if ((first_contributory && second_contributory)
+ || (env->old_exception == EXCP0E_PAGE &&
+ (second_contributory || (intno == EXCP0E_PAGE)))) {
+ intno = EXCP08_DBLE;
+ *error_code = 0;
+ }
+
+ if (second_contributory || (intno == EXCP0E_PAGE) ||
+ (intno == EXCP08_DBLE)) {
+ env->old_exception = intno;
+ }
+
+ return intno;
+}
+
+/*
+ * Signal an interruption. It is executed in the main CPU loop.
+ * is_int is TRUE if coming from the int instruction. next_eip is the
+ * env->eip value AFTER the interrupt instruction. It is only relevant if
+ * is_int is TRUE.
+ */
+static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
+ int is_int, int error_code,
+ int next_eip_addend,
+ uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (!is_int) {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
+ error_code, retaddr);
+ intno = check_exception(env, intno, &error_code, retaddr);
+ } else {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
+ }
+
+ cs->exception_index = intno;
+ env->error_code = error_code;
+ env->exception_is_int = is_int;
+ env->exception_next_eip = env->eip + next_eip_addend;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+/* shortcuts to generate exceptions */
+
+void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
+ int error_code, int next_eip_addend)
+{
+ raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
+}
+
+void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code)
+{
+ raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
+}
+
+void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr)
+{
+ raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
+}
+
+void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index)
+{
+ raise_interrupt2(env, exception_index, 0, 0, 0, 0);
+}
+
+void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
+ uintptr_t retaddr)
+{
+ raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
+}
diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
new file mode 100644
index 000000000..cdd8e9f94
--- /dev/null
+++ b/target/i386/tcg/fpu_helper.c
@@ -0,0 +1,3041 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <math.h>
+#include "cpu.h"
+#include "tcg-cpu.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "fpu/softfloat-macros.h"
+#include "helper-tcg.h"
+
+/* float macros */
+#define FT0 (env->ft0)
+#define ST0 (env->fpregs[env->fpstt].d)
+#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
+#define ST1 ST(1)
+
+#define FPU_RC_MASK 0xc00
+#define FPU_RC_NEAR 0x000
+#define FPU_RC_DOWN 0x400
+#define FPU_RC_UP 0x800
+#define FPU_RC_CHOP 0xc00
+
+#define MAXTAN 9223372036854775808.0
+
+/* the following deal with x86 long double-precision numbers */
+#define MAXEXPD 0x7fff
+#define EXPBIAS 16383
+#define EXPD(fp) (fp.l.upper & 0x7fff)
+#define SIGND(fp) ((fp.l.upper) & 0x8000)
+#define MANTD(fp) (fp.l.lower)
+#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
+
+#define FPUS_IE (1 << 0)
+#define FPUS_DE (1 << 1)
+#define FPUS_ZE (1 << 2)
+#define FPUS_OE (1 << 3)
+#define FPUS_UE (1 << 4)
+#define FPUS_PE (1 << 5)
+#define FPUS_SF (1 << 6)
+#define FPUS_SE (1 << 7)
+#define FPUS_B (1 << 15)
+
+#define FPUC_EM 0x3f
+
+#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL)
+#define floatx80_lg2_d make_floatx80(0x3ffd, 0x9a209a84fbcff798LL)
+#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL)
+#define floatx80_l2e_d make_floatx80(0x3fff, 0xb8aa3b295c17f0bbLL)
+#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL)
+#define floatx80_l2t_u make_floatx80(0x4000, 0xd49a784bcd1b8affLL)
+#define floatx80_ln2_d make_floatx80(0x3ffe, 0xb17217f7d1cf79abLL)
+#define floatx80_pi_d make_floatx80(0x4000, 0xc90fdaa22168c234LL)
+
+static inline void fpush(CPUX86State *env)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fptags[env->fpstt] = 0; /* validate stack entry */
+}
+
+static inline void fpop(CPUX86State *env)
+{
+ env->fptags[env->fpstt] = 1; /* invalidate stack entry */
+ env->fpstt = (env->fpstt + 1) & 7;
+}
+
+static floatx80 do_fldt(CPUX86State *env, target_ulong ptr, uintptr_t retaddr)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.lower = cpu_ldq_data_ra(env, ptr, retaddr);
+ temp.l.upper = cpu_lduw_data_ra(env, ptr + 8, retaddr);
+ return temp.d;
+}
+
+static void do_fstt(CPUX86State *env, floatx80 f, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ cpu_stq_data_ra(env, ptr, temp.l.lower, retaddr);
+ cpu_stw_data_ra(env, ptr + 8, temp.l.upper, retaddr);
+}
+
+/* x87 FPU helpers */
+
+static inline double floatx80_to_double(CPUX86State *env, floatx80 a)
+{
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.f64 = floatx80_to_float64(a, &env->fp_status);
+ return u.d;
+}
+
+static inline floatx80 double_to_floatx80(CPUX86State *env, double a)
+{
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.d = a;
+ return float64_to_floatx80(u.f64, &env->fp_status);
+}
+
+static void fpu_set_exception(CPUX86State *env, int mask)
+{
+ env->fpus |= mask;
+ if (env->fpus & (~env->fpuc & FPUC_EM)) {
+ env->fpus |= FPUS_SE | FPUS_B;
+ }
+}
+
+static inline uint8_t save_exception_flags(CPUX86State *env)
+{
+ uint8_t old_flags = get_float_exception_flags(&env->fp_status);
+ set_float_exception_flags(0, &env->fp_status);
+ return old_flags;
+}
+
+static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
+{
+ uint8_t new_flags = get_float_exception_flags(&env->fp_status);
+ float_raise(old_flags, &env->fp_status);
+ fpu_set_exception(env,
+ ((new_flags & float_flag_invalid ? FPUS_IE : 0) |
+ (new_flags & float_flag_divbyzero ? FPUS_ZE : 0) |
+ (new_flags & float_flag_overflow ? FPUS_OE : 0) |
+ (new_flags & float_flag_underflow ? FPUS_UE : 0) |
+ (new_flags & float_flag_inexact ? FPUS_PE : 0) |
+ (new_flags & float_flag_input_denormal ? FPUS_DE : 0)));
+}
+
+static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ floatx80 ret = floatx80_div(a, b, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+ return ret;
+}
+
+static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
+{
+ if (env->cr[0] & CR0_NE_MASK) {
+ raise_exception_ra(env, EXCP10_COPR, retaddr);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ else {
+ fpu_check_raise_ferr_irq(env);
+ }
+#endif
+}
+
+void helper_flds_FT0(CPUX86State *env, uint32_t val)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ u.i = val;
+ FT0 = float32_to_floatx80(u.f, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fldl_FT0(CPUX86State *env, uint64_t val)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ u.i = val;
+ FT0 = float64_to_floatx80(u.f, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fildl_FT0(CPUX86State *env, int32_t val)
+{
+ FT0 = int32_to_floatx80(val, &env->fp_status);
+}
+
+void helper_flds_ST0(CPUX86State *env, uint32_t val)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int new_fpstt;
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ u.i = val;
+ env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fldl_ST0(CPUX86State *env, uint64_t val)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int new_fpstt;
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ u.i = val;
+ env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fildl_ST0(CPUX86State *env, int32_t val)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildll_ST0(CPUX86State *env, int64_t val)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+uint32_t helper_fsts_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ u.f = floatx80_to_float32(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+ return u.i;
+}
+
+uint64_t helper_fstl_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ u.f = floatx80_to_float64(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+ return u.i;
+}
+
+int32_t helper_fist_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int32_t val;
+
+ val = floatx80_to_int32(ST0, &env->fp_status);
+ if (val != (int16_t)val) {
+ set_float_exception_flags(float_flag_invalid, &env->fp_status);
+ val = -32768;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
+}
+
+int32_t helper_fistl_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int32_t val;
+
+ val = floatx80_to_int32(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x80000000;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
+}
+
+int64_t helper_fistll_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int64_t val;
+
+ val = floatx80_to_int64(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x8000000000000000ULL;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
+}
+
+int32_t helper_fistt_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int32_t val;
+
+ val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ if (val != (int16_t)val) {
+ set_float_exception_flags(float_flag_invalid, &env->fp_status);
+ val = -32768;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
+}
+
+int32_t helper_fisttl_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int32_t val;
+
+ val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x80000000;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
+}
+
+int64_t helper_fisttll_ST0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int64_t val;
+
+ val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x8000000000000000ULL;
+ }
+ merge_exception_flags(env, old_flags);
+ return val;
+}
+
+void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = do_fldt(env, ptr, GETPC());
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fstt_ST0(CPUX86State *env, target_ulong ptr)
+{
+ do_fstt(env, ST0, ptr, GETPC());
+}
+
+void helper_fpush(CPUX86State *env)
+{
+ fpush(env);
+}
+
+void helper_fpop(CPUX86State *env)
+{
+ fpop(env);
+}
+
+void helper_fdecstp(CPUX86State *env)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fpus &= ~0x4700;
+}
+
+void helper_fincstp(CPUX86State *env)
+{
+ env->fpstt = (env->fpstt + 1) & 7;
+ env->fpus &= ~0x4700;
+}
+
+/* FPU move */
+
+void helper_ffree_STN(CPUX86State *env, int st_index)
+{
+ env->fptags[(env->fpstt + st_index) & 7] = 1;
+}
+
+void helper_fmov_ST0_FT0(CPUX86State *env)
+{
+ ST0 = FT0;
+}
+
+void helper_fmov_FT0_STN(CPUX86State *env, int st_index)
+{
+ FT0 = ST(st_index);
+}
+
+void helper_fmov_ST0_STN(CPUX86State *env, int st_index)
+{
+ ST0 = ST(st_index);
+}
+
+void helper_fmov_STN_ST0(CPUX86State *env, int st_index)
+{
+ ST(st_index) = ST0;
+}
+
+void helper_fxchg_ST0_STN(CPUX86State *env, int st_index)
+{
+ floatx80 tmp;
+
+ tmp = ST(st_index);
+ ST(st_index) = ST0;
+ ST0 = tmp;
+}
+
+/* FPU operations */
+
+static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
+
+void helper_fcom_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ FloatRelation ret;
+
+ ret = floatx80_compare(ST0, FT0, &env->fp_status);
+ env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fucom_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ FloatRelation ret;
+
+ ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+ env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+ merge_exception_flags(env, old_flags);
+}
+
+static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_fcomi_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int eflags;
+ FloatRelation ret;
+
+ ret = floatx80_compare(ST0, FT0, &env->fp_status);
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+ CC_SRC = eflags;
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fucomi_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int eflags;
+ FloatRelation ret;
+
+ ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+ CC_SRC = eflags;
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fadd_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST0 = floatx80_add(ST0, FT0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fmul_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsub_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsubr_ST0_FT0(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fdiv_ST0_FT0(CPUX86State *env)
+{
+ ST0 = helper_fdiv(env, ST0, FT0);
+}
+
+void helper_fdivr_ST0_FT0(CPUX86State *env)
+{
+ ST0 = helper_fdiv(env, FT0, ST0);
+}
+
+/* fp operations between STN and ST0 */
+
+void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fdiv_STN_ST0(CPUX86State *env, int st_index)
+{
+ floatx80 *p;
+
+ p = &ST(st_index);
+ *p = helper_fdiv(env, *p, ST0);
+}
+
+void helper_fdivr_STN_ST0(CPUX86State *env, int st_index)
+{
+ floatx80 *p;
+
+ p = &ST(st_index);
+ *p = helper_fdiv(env, ST0, *p);
+}
+
+/* misc FPU operations */
+void helper_fchs_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_chs(ST0);
+}
+
+void helper_fabs_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_abs(ST0);
+}
+
+void helper_fld1_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_one;
+}
+
+void helper_fldl2t_ST0(CPUX86State *env)
+{
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_UP:
+ ST0 = floatx80_l2t_u;
+ break;
+ default:
+ ST0 = floatx80_l2t;
+ break;
+ }
+}
+
+void helper_fldl2e_ST0(CPUX86State *env)
+{
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_l2e_d;
+ break;
+ default:
+ ST0 = floatx80_l2e;
+ break;
+ }
+}
+
+void helper_fldpi_ST0(CPUX86State *env)
+{
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_pi_d;
+ break;
+ default:
+ ST0 = floatx80_pi;
+ break;
+ }
+}
+
+void helper_fldlg2_ST0(CPUX86State *env)
+{
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_lg2_d;
+ break;
+ default:
+ ST0 = floatx80_lg2;
+ break;
+ }
+}
+
+void helper_fldln2_ST0(CPUX86State *env)
+{
+ switch (env->fpuc & FPU_RC_MASK) {
+ case FPU_RC_DOWN:
+ case FPU_RC_CHOP:
+ ST0 = floatx80_ln2_d;
+ break;
+ default:
+ ST0 = floatx80_ln2;
+ break;
+ }
+}
+
+void helper_fldz_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_zero;
+}
+
+void helper_fldz_FT0(CPUX86State *env)
+{
+ FT0 = floatx80_zero;
+}
+
+uint32_t helper_fnstsw(CPUX86State *env)
+{
+ return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+}
+
+uint32_t helper_fnstcw(CPUX86State *env)
+{
+ return env->fpuc;
+}
+
+void update_fp_status(CPUX86State *env)
+{
+ FloatRoundMode rnd_mode;
+ FloatX80RoundPrec rnd_prec;
+
+ /* set rounding mode */
+ switch (env->fpuc & FPU_RC_MASK) {
+ default:
+ case FPU_RC_NEAR:
+ rnd_mode = float_round_nearest_even;
+ break;
+ case FPU_RC_DOWN:
+ rnd_mode = float_round_down;
+ break;
+ case FPU_RC_UP:
+ rnd_mode = float_round_up;
+ break;
+ case FPU_RC_CHOP:
+ rnd_mode = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(rnd_mode, &env->fp_status);
+
+ switch ((env->fpuc >> 8) & 3) {
+ case 0:
+ rnd_prec = floatx80_precision_s;
+ break;
+ case 2:
+ rnd_prec = floatx80_precision_d;
+ break;
+ case 3:
+ default:
+ rnd_prec = floatx80_precision_x;
+ break;
+ }
+ set_floatx80_rounding_precision(rnd_prec, &env->fp_status);
+}
+
+void helper_fldcw(CPUX86State *env, uint32_t val)
+{
+ cpu_set_fpuc(env, val);
+}
+
+void helper_fclex(CPUX86State *env)
+{
+ env->fpus &= 0x7f00;
+}
+
+void helper_fwait(CPUX86State *env)
+{
+ if (env->fpus & FPUS_SE) {
+ fpu_raise_exception(env, GETPC());
+ }
+}
+
+static void do_fninit(CPUX86State *env)
+{
+ env->fpus = 0;
+ env->fpstt = 0;
+ env->fpcs = 0;
+ env->fpds = 0;
+ env->fpip = 0;
+ env->fpdp = 0;
+ cpu_set_fpuc(env, 0x37f);
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+void helper_fninit(CPUX86State *env)
+{
+ do_fninit(env);
+}
+
+/* BCD ops */
+
+void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
+{
+ floatx80 tmp;
+ uint64_t val;
+ unsigned int v;
+ int i;
+
+ val = 0;
+ for (i = 8; i >= 0; i--) {
+ v = cpu_ldub_data_ra(env, ptr + i, GETPC());
+ val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
+ }
+ tmp = int64_to_floatx80(val, &env->fp_status);
+ if (cpu_ldub_data_ra(env, ptr + 9, GETPC()) & 0x80) {
+ tmp = floatx80_chs(tmp);
+ }
+ fpush(env);
+ ST0 = tmp;
+}
+
+void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ int v;
+ target_ulong mem_ref, mem_end;
+ int64_t val;
+ CPU_LDoubleU temp;
+
+ temp.d = ST0;
+
+ val = floatx80_to_int64(ST0, &env->fp_status);
+ mem_ref = ptr;
+ if (val >= 1000000000000000000LL || val <= -1000000000000000000LL) {
+ set_float_exception_flags(float_flag_invalid, &env->fp_status);
+ while (mem_ref < ptr + 7) {
+ cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
+ }
+ cpu_stb_data_ra(env, mem_ref++, 0xc0, GETPC());
+ cpu_stb_data_ra(env, mem_ref++, 0xff, GETPC());
+ cpu_stb_data_ra(env, mem_ref++, 0xff, GETPC());
+ merge_exception_flags(env, old_flags);
+ return;
+ }
+ mem_end = mem_ref + 9;
+ if (SIGND(temp)) {
+ cpu_stb_data_ra(env, mem_end, 0x80, GETPC());
+ val = -val;
+ } else {
+ cpu_stb_data_ra(env, mem_end, 0x00, GETPC());
+ }
+ while (mem_ref < mem_end) {
+ if (val == 0) {
+ break;
+ }
+ v = val % 100;
+ val = val / 100;
+ v = ((v / 10) << 4) | (v % 10);
+ cpu_stb_data_ra(env, mem_ref++, v, GETPC());
+ }
+ while (mem_ref < mem_end) {
+ cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
+ }
+ merge_exception_flags(env, old_flags);
+}
+
+/* 128-bit significand of log(2). */
+#define ln2_sig_high 0xb17217f7d1cf79abULL
+#define ln2_sig_low 0xc9e3b39803f2f6afULL
+
+/*
+ * Polynomial coefficients for an approximation to (2^x - 1) / x, on
+ * the interval [-1/64, 1/64].
+ */
+#define f2xm1_coeff_0 make_floatx80(0x3ffe, 0xb17217f7d1cf79acULL)
+#define f2xm1_coeff_0_low make_floatx80(0xbfbc, 0xd87edabf495b3762ULL)
+#define f2xm1_coeff_1 make_floatx80(0x3ffc, 0xf5fdeffc162c7543ULL)
+#define f2xm1_coeff_2 make_floatx80(0x3ffa, 0xe35846b82505fcc7ULL)
+#define f2xm1_coeff_3 make_floatx80(0x3ff8, 0x9d955b7dd273b899ULL)
+#define f2xm1_coeff_4 make_floatx80(0x3ff5, 0xaec3ff3c4ef4ac0cULL)
+#define f2xm1_coeff_5 make_floatx80(0x3ff2, 0xa184897c3a7f0de9ULL)
+#define f2xm1_coeff_6 make_floatx80(0x3fee, 0xffe634d0ec30d504ULL)
+#define f2xm1_coeff_7 make_floatx80(0x3feb, 0xb160111d2db515e4ULL)
+
+struct f2xm1_data {
+ /*
+ * A value very close to a multiple of 1/32, such that 2^t and 2^t - 1
+ * are very close to exact floatx80 values.
+ */
+ floatx80 t;
+ /* The value of 2^t. */
+ floatx80 exp2;
+ /* The value of 2^t - 1. */
+ floatx80 exp2m1;
+};
+
+static const struct f2xm1_data f2xm1_table[65] = {
+ { make_floatx80_init(0xbfff, 0x8000000000000000ULL),
+ make_floatx80_init(0x3ffe, 0x8000000000000000ULL),
+ make_floatx80_init(0xbffe, 0x8000000000000000ULL) },
+ { make_floatx80_init(0xbffe, 0xf800000000002e7eULL),
+ make_floatx80_init(0x3ffe, 0x82cd8698ac2b9160ULL),
+ make_floatx80_init(0xbffd, 0xfa64f2cea7a8dd40ULL) },
+ { make_floatx80_init(0xbffe, 0xefffffffffffe960ULL),
+ make_floatx80_init(0x3ffe, 0x85aac367cc488345ULL),
+ make_floatx80_init(0xbffd, 0xf4aa7930676ef976ULL) },
+ { make_floatx80_init(0xbffe, 0xe800000000006f10ULL),
+ make_floatx80_init(0x3ffe, 0x88980e8092da5c14ULL),
+ make_floatx80_init(0xbffd, 0xeecfe2feda4b47d8ULL) },
+ { make_floatx80_init(0xbffe, 0xe000000000008a45ULL),
+ make_floatx80_init(0x3ffe, 0x8b95c1e3ea8ba2a5ULL),
+ make_floatx80_init(0xbffd, 0xe8d47c382ae8bab6ULL) },
+ { make_floatx80_init(0xbffe, 0xd7ffffffffff8a9eULL),
+ make_floatx80_init(0x3ffe, 0x8ea4398b45cd8116ULL),
+ make_floatx80_init(0xbffd, 0xe2b78ce97464fdd4ULL) },
+ { make_floatx80_init(0xbffe, 0xd0000000000019a0ULL),
+ make_floatx80_init(0x3ffe, 0x91c3d373ab11b919ULL),
+ make_floatx80_init(0xbffd, 0xdc785918a9dc8dceULL) },
+ { make_floatx80_init(0xbffe, 0xc7ffffffffff14dfULL),
+ make_floatx80_init(0x3ffe, 0x94f4efa8fef76836ULL),
+ make_floatx80_init(0xbffd, 0xd61620ae02112f94ULL) },
+ { make_floatx80_init(0xbffe, 0xc000000000006530ULL),
+ make_floatx80_init(0x3ffe, 0x9837f0518db87fbbULL),
+ make_floatx80_init(0xbffd, 0xcf901f5ce48f008aULL) },
+ { make_floatx80_init(0xbffe, 0xb7ffffffffff1723ULL),
+ make_floatx80_init(0x3ffe, 0x9b8d39b9d54eb74cULL),
+ make_floatx80_init(0xbffd, 0xc8e58c8c55629168ULL) },
+ { make_floatx80_init(0xbffe, 0xb00000000000b5e1ULL),
+ make_floatx80_init(0x3ffe, 0x9ef5326091a0c366ULL),
+ make_floatx80_init(0xbffd, 0xc2159b3edcbe7934ULL) },
+ { make_floatx80_init(0xbffe, 0xa800000000006f8aULL),
+ make_floatx80_init(0x3ffe, 0xa27043030c49370aULL),
+ make_floatx80_init(0xbffd, 0xbb1f79f9e76d91ecULL) },
+ { make_floatx80_init(0xbffe, 0x9fffffffffff816aULL),
+ make_floatx80_init(0x3ffe, 0xa5fed6a9b15171cfULL),
+ make_floatx80_init(0xbffd, 0xb40252ac9d5d1c62ULL) },
+ { make_floatx80_init(0xbffe, 0x97ffffffffffb621ULL),
+ make_floatx80_init(0x3ffe, 0xa9a15ab4ea7c30e6ULL),
+ make_floatx80_init(0xbffd, 0xacbd4a962b079e34ULL) },
+ { make_floatx80_init(0xbffe, 0x8fffffffffff162bULL),
+ make_floatx80_init(0x3ffe, 0xad583eea42a1b886ULL),
+ make_floatx80_init(0xbffd, 0xa54f822b7abc8ef4ULL) },
+ { make_floatx80_init(0xbffe, 0x87ffffffffff4d34ULL),
+ make_floatx80_init(0x3ffe, 0xb123f581d2ac7b51ULL),
+ make_floatx80_init(0xbffd, 0x9db814fc5aa7095eULL) },
+ { make_floatx80_init(0xbffe, 0x800000000000227dULL),
+ make_floatx80_init(0x3ffe, 0xb504f333f9de539dULL),
+ make_floatx80_init(0xbffd, 0x95f619980c4358c6ULL) },
+ { make_floatx80_init(0xbffd, 0xefffffffffff3978ULL),
+ make_floatx80_init(0x3ffe, 0xb8fbaf4762fbd0a1ULL),
+ make_floatx80_init(0xbffd, 0x8e08a1713a085ebeULL) },
+ { make_floatx80_init(0xbffd, 0xe00000000000df81ULL),
+ make_floatx80_init(0x3ffe, 0xbd08a39f580bfd8cULL),
+ make_floatx80_init(0xbffd, 0x85eeb8c14fe804e8ULL) },
+ { make_floatx80_init(0xbffd, 0xd00000000000bccfULL),
+ make_floatx80_init(0x3ffe, 0xc12c4cca667062f6ULL),
+ make_floatx80_init(0xbffc, 0xfb4eccd6663e7428ULL) },
+ { make_floatx80_init(0xbffd, 0xc00000000000eff0ULL),
+ make_floatx80_init(0x3ffe, 0xc5672a1155069abeULL),
+ make_floatx80_init(0xbffc, 0xea6357baabe59508ULL) },
+ { make_floatx80_init(0xbffd, 0xb000000000000fe6ULL),
+ make_floatx80_init(0x3ffe, 0xc9b9bd866e2f234bULL),
+ make_floatx80_init(0xbffc, 0xd91909e6474372d4ULL) },
+ { make_floatx80_init(0xbffd, 0x9fffffffffff2172ULL),
+ make_floatx80_init(0x3ffe, 0xce248c151f84bf00ULL),
+ make_floatx80_init(0xbffc, 0xc76dcfab81ed0400ULL) },
+ { make_floatx80_init(0xbffd, 0x8fffffffffffafffULL),
+ make_floatx80_init(0x3ffe, 0xd2a81d91f12afb2bULL),
+ make_floatx80_init(0xbffc, 0xb55f89b83b541354ULL) },
+ { make_floatx80_init(0xbffc, 0xffffffffffff81a3ULL),
+ make_floatx80_init(0x3ffe, 0xd744fccad69d7d5eULL),
+ make_floatx80_init(0xbffc, 0xa2ec0cd4a58a0a88ULL) },
+ { make_floatx80_init(0xbffc, 0xdfffffffffff1568ULL),
+ make_floatx80_init(0x3ffe, 0xdbfbb797daf25a44ULL),
+ make_floatx80_init(0xbffc, 0x901121a0943696f0ULL) },
+ { make_floatx80_init(0xbffc, 0xbfffffffffff68daULL),
+ make_floatx80_init(0x3ffe, 0xe0ccdeec2a94f811ULL),
+ make_floatx80_init(0xbffb, 0xf999089eab583f78ULL) },
+ { make_floatx80_init(0xbffc, 0x9fffffffffff4690ULL),
+ make_floatx80_init(0x3ffe, 0xe5b906e77c83657eULL),
+ make_floatx80_init(0xbffb, 0xd237c8c41be4d410ULL) },
+ { make_floatx80_init(0xbffb, 0xffffffffffff8aeeULL),
+ make_floatx80_init(0x3ffe, 0xeac0c6e7dd24427cULL),
+ make_floatx80_init(0xbffb, 0xa9f9c8c116ddec20ULL) },
+ { make_floatx80_init(0xbffb, 0xbfffffffffff2d18ULL),
+ make_floatx80_init(0x3ffe, 0xefe4b99bdcdb06ebULL),
+ make_floatx80_init(0xbffb, 0x80da33211927c8a8ULL) },
+ { make_floatx80_init(0xbffa, 0xffffffffffff8ccbULL),
+ make_floatx80_init(0x3ffe, 0xf5257d152486d0f4ULL),
+ make_floatx80_init(0xbffa, 0xada82eadb792f0c0ULL) },
+ { make_floatx80_init(0xbff9, 0xffffffffffff11feULL),
+ make_floatx80_init(0x3ffe, 0xfa83b2db722a0846ULL),
+ make_floatx80_init(0xbff9, 0xaf89a491babef740ULL) },
+ { floatx80_zero_init,
+ make_floatx80_init(0x3fff, 0x8000000000000000ULL),
+ floatx80_zero_init },
+ { make_floatx80_init(0x3ff9, 0xffffffffffff2680ULL),
+ make_floatx80_init(0x3fff, 0x82cd8698ac2b9f6fULL),
+ make_floatx80_init(0x3ff9, 0xb361a62b0ae7dbc0ULL) },
+ { make_floatx80_init(0x3ffb, 0x800000000000b500ULL),
+ make_floatx80_init(0x3fff, 0x85aac367cc488345ULL),
+ make_floatx80_init(0x3ffa, 0xb5586cf9891068a0ULL) },
+ { make_floatx80_init(0x3ffb, 0xbfffffffffff4b67ULL),
+ make_floatx80_init(0x3fff, 0x88980e8092da7cceULL),
+ make_floatx80_init(0x3ffb, 0x8980e8092da7cce0ULL) },
+ { make_floatx80_init(0x3ffb, 0xffffffffffffff57ULL),
+ make_floatx80_init(0x3fff, 0x8b95c1e3ea8bd6dfULL),
+ make_floatx80_init(0x3ffb, 0xb95c1e3ea8bd6df0ULL) },
+ { make_floatx80_init(0x3ffc, 0x9fffffffffff811fULL),
+ make_floatx80_init(0x3fff, 0x8ea4398b45cd4780ULL),
+ make_floatx80_init(0x3ffb, 0xea4398b45cd47800ULL) },
+ { make_floatx80_init(0x3ffc, 0xbfffffffffff9980ULL),
+ make_floatx80_init(0x3fff, 0x91c3d373ab11b919ULL),
+ make_floatx80_init(0x3ffc, 0x8e1e9b9d588dc8c8ULL) },
+ { make_floatx80_init(0x3ffc, 0xdffffffffffff631ULL),
+ make_floatx80_init(0x3fff, 0x94f4efa8fef70864ULL),
+ make_floatx80_init(0x3ffc, 0xa7a77d47f7b84320ULL) },
+ { make_floatx80_init(0x3ffc, 0xffffffffffff2499ULL),
+ make_floatx80_init(0x3fff, 0x9837f0518db892d4ULL),
+ make_floatx80_init(0x3ffc, 0xc1bf828c6dc496a0ULL) },
+ { make_floatx80_init(0x3ffd, 0x8fffffffffff80fbULL),
+ make_floatx80_init(0x3fff, 0x9b8d39b9d54e3a79ULL),
+ make_floatx80_init(0x3ffc, 0xdc69cdceaa71d3c8ULL) },
+ { make_floatx80_init(0x3ffd, 0x9fffffffffffbc23ULL),
+ make_floatx80_init(0x3fff, 0x9ef5326091a10313ULL),
+ make_floatx80_init(0x3ffc, 0xf7a993048d081898ULL) },
+ { make_floatx80_init(0x3ffd, 0xafffffffffff20ecULL),
+ make_floatx80_init(0x3fff, 0xa27043030c49370aULL),
+ make_floatx80_init(0x3ffd, 0x89c10c0c3124dc28ULL) },
+ { make_floatx80_init(0x3ffd, 0xc00000000000fd2cULL),
+ make_floatx80_init(0x3fff, 0xa5fed6a9b15171cfULL),
+ make_floatx80_init(0x3ffd, 0x97fb5aa6c545c73cULL) },
+ { make_floatx80_init(0x3ffd, 0xd0000000000093beULL),
+ make_floatx80_init(0x3fff, 0xa9a15ab4ea7c30e6ULL),
+ make_floatx80_init(0x3ffd, 0xa6856ad3a9f0c398ULL) },
+ { make_floatx80_init(0x3ffd, 0xe00000000000c2aeULL),
+ make_floatx80_init(0x3fff, 0xad583eea42a17876ULL),
+ make_floatx80_init(0x3ffd, 0xb560fba90a85e1d8ULL) },
+ { make_floatx80_init(0x3ffd, 0xefffffffffff1e3fULL),
+ make_floatx80_init(0x3fff, 0xb123f581d2abef6cULL),
+ make_floatx80_init(0x3ffd, 0xc48fd6074aafbdb0ULL) },
+ { make_floatx80_init(0x3ffd, 0xffffffffffff1c23ULL),
+ make_floatx80_init(0x3fff, 0xb504f333f9de2cadULL),
+ make_floatx80_init(0x3ffd, 0xd413cccfe778b2b4ULL) },
+ { make_floatx80_init(0x3ffe, 0x8800000000006344ULL),
+ make_floatx80_init(0x3fff, 0xb8fbaf4762fbd0a1ULL),
+ make_floatx80_init(0x3ffd, 0xe3eebd1d8bef4284ULL) },
+ { make_floatx80_init(0x3ffe, 0x9000000000005d67ULL),
+ make_floatx80_init(0x3fff, 0xbd08a39f580c668dULL),
+ make_floatx80_init(0x3ffd, 0xf4228e7d60319a34ULL) },
+ { make_floatx80_init(0x3ffe, 0x9800000000009127ULL),
+ make_floatx80_init(0x3fff, 0xc12c4cca6670e042ULL),
+ make_floatx80_init(0x3ffe, 0x82589994cce1c084ULL) },
+ { make_floatx80_init(0x3ffe, 0x9fffffffffff06f9ULL),
+ make_floatx80_init(0x3fff, 0xc5672a11550655c3ULL),
+ make_floatx80_init(0x3ffe, 0x8ace5422aa0cab86ULL) },
+ { make_floatx80_init(0x3ffe, 0xa7fffffffffff80dULL),
+ make_floatx80_init(0x3fff, 0xc9b9bd866e2f234bULL),
+ make_floatx80_init(0x3ffe, 0x93737b0cdc5e4696ULL) },
+ { make_floatx80_init(0x3ffe, 0xafffffffffff1470ULL),
+ make_floatx80_init(0x3fff, 0xce248c151f83fd69ULL),
+ make_floatx80_init(0x3ffe, 0x9c49182a3f07fad2ULL) },
+ { make_floatx80_init(0x3ffe, 0xb800000000000e0aULL),
+ make_floatx80_init(0x3fff, 0xd2a81d91f12aec5cULL),
+ make_floatx80_init(0x3ffe, 0xa5503b23e255d8b8ULL) },
+ { make_floatx80_init(0x3ffe, 0xc00000000000b7faULL),
+ make_floatx80_init(0x3fff, 0xd744fccad69dd630ULL),
+ make_floatx80_init(0x3ffe, 0xae89f995ad3bac60ULL) },
+ { make_floatx80_init(0x3ffe, 0xc800000000003aa6ULL),
+ make_floatx80_init(0x3fff, 0xdbfbb797daf25a44ULL),
+ make_floatx80_init(0x3ffe, 0xb7f76f2fb5e4b488ULL) },
+ { make_floatx80_init(0x3ffe, 0xd00000000000a6aeULL),
+ make_floatx80_init(0x3fff, 0xe0ccdeec2a954685ULL),
+ make_floatx80_init(0x3ffe, 0xc199bdd8552a8d0aULL) },
+ { make_floatx80_init(0x3ffe, 0xd800000000004165ULL),
+ make_floatx80_init(0x3fff, 0xe5b906e77c837155ULL),
+ make_floatx80_init(0x3ffe, 0xcb720dcef906e2aaULL) },
+ { make_floatx80_init(0x3ffe, 0xe00000000000582cULL),
+ make_floatx80_init(0x3fff, 0xeac0c6e7dd24713aULL),
+ make_floatx80_init(0x3ffe, 0xd5818dcfba48e274ULL) },
+ { make_floatx80_init(0x3ffe, 0xe800000000001a5dULL),
+ make_floatx80_init(0x3fff, 0xefe4b99bdcdb06ebULL),
+ make_floatx80_init(0x3ffe, 0xdfc97337b9b60dd6ULL) },
+ { make_floatx80_init(0x3ffe, 0xefffffffffffc1efULL),
+ make_floatx80_init(0x3fff, 0xf5257d152486a2faULL),
+ make_floatx80_init(0x3ffe, 0xea4afa2a490d45f4ULL) },
+ { make_floatx80_init(0x3ffe, 0xf800000000001069ULL),
+ make_floatx80_init(0x3fff, 0xfa83b2db722a0e5cULL),
+ make_floatx80_init(0x3ffe, 0xf50765b6e4541cb8ULL) },
+ { make_floatx80_init(0x3fff, 0x8000000000000000ULL),
+ make_floatx80_init(0x4000, 0x8000000000000000ULL),
+ make_floatx80_init(0x3fff, 0x8000000000000000ULL) },
+};
+
+void helper_f2xm1(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ uint64_t sig = extractFloatx80Frac(ST0);
+ int32_t exp = extractFloatx80Exp(ST0);
+ bool sign = extractFloatx80Sign(ST0);
+
+ if (floatx80_invalid_encoding(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_any_nan(ST0)) {
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_silence_nan(ST0, &env->fp_status);
+ }
+ } else if (exp > 0x3fff ||
+ (exp == 0x3fff && sig != (0x8000000000000000ULL))) {
+ /* Out of range for the instruction, treat as invalid. */
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else if (exp == 0x3fff) {
+ /* Argument 1 or -1, exact result 1 or -0.5. */
+ if (sign) {
+ ST0 = make_floatx80(0xbffe, 0x8000000000000000ULL);
+ }
+ } else if (exp < 0x3fb0) {
+ if (!floatx80_is_zero(ST0)) {
+ /*
+ * Multiplying the argument by an extra-precision version
+ * of log(2) is sufficiently precise. Zero arguments are
+ * returned unchanged.
+ */
+ uint64_t sig0, sig1, sig2;
+ if (exp == 0) {
+ normalizeFloatx80Subnormal(sig, &exp, &sig);
+ }
+ mul128By64To192(ln2_sig_high, ln2_sig_low, sig, &sig0, &sig1,
+ &sig2);
+ /* This result is inexact. */
+ sig1 |= 1;
+ ST0 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ sign, exp, sig0, sig1,
+ &env->fp_status);
+ }
+ } else {
+ floatx80 tmp, y, accum;
+ bool asign, bsign;
+ int32_t n, aexp, bexp;
+ uint64_t asig0, asig1, asig2, bsig0, bsig1;
+ FloatRoundMode save_mode = env->fp_status.float_rounding_mode;
+ FloatX80RoundPrec save_prec =
+ env->fp_status.floatx80_rounding_precision;
+ env->fp_status.float_rounding_mode = float_round_nearest_even;
+ env->fp_status.floatx80_rounding_precision = floatx80_precision_x;
+
+ /* Find the nearest multiple of 1/32 to the argument. */
+ tmp = floatx80_scalbn(ST0, 5, &env->fp_status);
+ n = 32 + floatx80_to_int32(tmp, &env->fp_status);
+ y = floatx80_sub(ST0, f2xm1_table[n].t, &env->fp_status);
+
+ if (floatx80_is_zero(y)) {
+ /*
+ * Use the value of 2^t - 1 from the table, to avoid
+ * needing to special-case zero as a result of
+ * multiplication below.
+ */
+ ST0 = f2xm1_table[n].t;
+ set_float_exception_flags(float_flag_inexact, &env->fp_status);
+ env->fp_status.float_rounding_mode = save_mode;
+ } else {
+ /*
+ * Compute the lower parts of a polynomial expansion for
+ * (2^y - 1) / y.
+ */
+ accum = floatx80_mul(f2xm1_coeff_7, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_6, accum, &env->fp_status);
+ accum = floatx80_mul(accum, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_5, accum, &env->fp_status);
+ accum = floatx80_mul(accum, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_4, accum, &env->fp_status);
+ accum = floatx80_mul(accum, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_3, accum, &env->fp_status);
+ accum = floatx80_mul(accum, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_2, accum, &env->fp_status);
+ accum = floatx80_mul(accum, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_1, accum, &env->fp_status);
+ accum = floatx80_mul(accum, y, &env->fp_status);
+ accum = floatx80_add(f2xm1_coeff_0_low, accum, &env->fp_status);
+
+ /*
+ * The full polynomial expansion is f2xm1_coeff_0 + accum
+ * (where accum has much lower magnitude, and so, in
+ * particular, carry out of the addition is not possible).
+ * (This expansion is only accurate to about 70 bits, not
+ * 128 bits.)
+ */
+ aexp = extractFloatx80Exp(f2xm1_coeff_0);
+ asign = extractFloatx80Sign(f2xm1_coeff_0);
+ shift128RightJamming(extractFloatx80Frac(accum), 0,
+ aexp - extractFloatx80Exp(accum),
+ &asig0, &asig1);
+ bsig0 = extractFloatx80Frac(f2xm1_coeff_0);
+ bsig1 = 0;
+ if (asign == extractFloatx80Sign(accum)) {
+ add128(bsig0, bsig1, asig0, asig1, &asig0, &asig1);
+ } else {
+ sub128(bsig0, bsig1, asig0, asig1, &asig0, &asig1);
+ }
+ /* And thus compute an approximation to 2^y - 1. */
+ mul128By64To192(asig0, asig1, extractFloatx80Frac(y),
+ &asig0, &asig1, &asig2);
+ aexp += extractFloatx80Exp(y) - 0x3ffe;
+ asign ^= extractFloatx80Sign(y);
+ if (n != 32) {
+ /*
+ * Multiply this by the precomputed value of 2^t and
+ * add that of 2^t - 1.
+ */
+ mul128By64To192(asig0, asig1,
+ extractFloatx80Frac(f2xm1_table[n].exp2),
+ &asig0, &asig1, &asig2);
+ aexp += extractFloatx80Exp(f2xm1_table[n].exp2) - 0x3ffe;
+ bexp = extractFloatx80Exp(f2xm1_table[n].exp2m1);
+ bsig0 = extractFloatx80Frac(f2xm1_table[n].exp2m1);
+ bsig1 = 0;
+ if (bexp < aexp) {
+ shift128RightJamming(bsig0, bsig1, aexp - bexp,
+ &bsig0, &bsig1);
+ } else if (aexp < bexp) {
+ shift128RightJamming(asig0, asig1, bexp - aexp,
+ &asig0, &asig1);
+ aexp = bexp;
+ }
+ /* The sign of 2^t - 1 is always that of the result. */
+ bsign = extractFloatx80Sign(f2xm1_table[n].exp2m1);
+ if (asign == bsign) {
+ /* Avoid possible carry out of the addition. */
+ shift128RightJamming(asig0, asig1, 1,
+ &asig0, &asig1);
+ shift128RightJamming(bsig0, bsig1, 1,
+ &bsig0, &bsig1);
+ ++aexp;
+ add128(asig0, asig1, bsig0, bsig1, &asig0, &asig1);
+ } else {
+ sub128(bsig0, bsig1, asig0, asig1, &asig0, &asig1);
+ asign = bsign;
+ }
+ }
+ env->fp_status.float_rounding_mode = save_mode;
+ /* This result is inexact. */
+ asig1 |= 1;
+ ST0 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ asign, aexp, asig0, asig1,
+ &env->fp_status);
+ }
+
+ env->fp_status.floatx80_rounding_precision = save_prec;
+ }
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fptan(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ fptemp = tan(fptemp);
+ ST0 = double_to_floatx80(env, fptemp);
+ fpush(env);
+ ST0 = floatx80_one;
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**52 only */
+ }
+}
+
+/* Values of pi/4, pi/2, 3pi/4 and pi, with 128-bit precision. */
+#define pi_4_exp 0x3ffe
+#define pi_4_sig_high 0xc90fdaa22168c234ULL
+#define pi_4_sig_low 0xc4c6628b80dc1cd1ULL
+#define pi_2_exp 0x3fff
+#define pi_2_sig_high 0xc90fdaa22168c234ULL
+#define pi_2_sig_low 0xc4c6628b80dc1cd1ULL
+#define pi_34_exp 0x4000
+#define pi_34_sig_high 0x96cbe3f9990e91a7ULL
+#define pi_34_sig_low 0x9394c9e8a0a5159dULL
+#define pi_exp 0x4000
+#define pi_sig_high 0xc90fdaa22168c234ULL
+#define pi_sig_low 0xc4c6628b80dc1cd1ULL
+
+/*
+ * Polynomial coefficients for an approximation to atan(x), with only
+ * odd powers of x used, for x in the interval [-1/16, 1/16]. (Unlike
+ * for some other approximations, no low part is needed for the first
+ * coefficient here to achieve a sufficiently accurate result, because
+ * the coefficient in this minimax approximation is very close to
+ * exactly 1.)
+ */
+#define fpatan_coeff_0 make_floatx80(0x3fff, 0x8000000000000000ULL)
+#define fpatan_coeff_1 make_floatx80(0xbffd, 0xaaaaaaaaaaaaaa43ULL)
+#define fpatan_coeff_2 make_floatx80(0x3ffc, 0xccccccccccbfe4f8ULL)
+#define fpatan_coeff_3 make_floatx80(0xbffc, 0x92492491fbab2e66ULL)
+#define fpatan_coeff_4 make_floatx80(0x3ffb, 0xe38e372881ea1e0bULL)
+#define fpatan_coeff_5 make_floatx80(0xbffb, 0xba2c0104bbdd0615ULL)
+#define fpatan_coeff_6 make_floatx80(0x3ffb, 0x9baf7ebf898b42efULL)
+
+struct fpatan_data {
+ /* High and low parts of atan(x). */
+ floatx80 atan_high, atan_low;
+};
+
+static const struct fpatan_data fpatan_table[9] = {
+ { floatx80_zero_init,
+ floatx80_zero_init },
+ { make_floatx80_init(0x3ffb, 0xfeadd4d5617b6e33ULL),
+ make_floatx80_init(0xbfb9, 0xdda19d8305ddc420ULL) },
+ { make_floatx80_init(0x3ffc, 0xfadbafc96406eb15ULL),
+ make_floatx80_init(0x3fbb, 0xdb8f3debef442fccULL) },
+ { make_floatx80_init(0x3ffd, 0xb7b0ca0f26f78474ULL),
+ make_floatx80_init(0xbfbc, 0xeab9bdba460376faULL) },
+ { make_floatx80_init(0x3ffd, 0xed63382b0dda7b45ULL),
+ make_floatx80_init(0x3fbc, 0xdfc88bd978751a06ULL) },
+ { make_floatx80_init(0x3ffe, 0x8f005d5ef7f59f9bULL),
+ make_floatx80_init(0x3fbd, 0xb906bc2ccb886e90ULL) },
+ { make_floatx80_init(0x3ffe, 0xa4bc7d1934f70924ULL),
+ make_floatx80_init(0x3fbb, 0xcd43f9522bed64f8ULL) },
+ { make_floatx80_init(0x3ffe, 0xb8053e2bc2319e74ULL),
+ make_floatx80_init(0xbfbc, 0xd3496ab7bd6eef0cULL) },
+ { make_floatx80_init(0x3ffe, 0xc90fdaa22168c235ULL),
+ make_floatx80_init(0xbfbc, 0xece675d1fc8f8cbcULL) },
+};
+
+void helper_fpatan(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ uint64_t arg0_sig = extractFloatx80Frac(ST0);
+ int32_t arg0_exp = extractFloatx80Exp(ST0);
+ bool arg0_sign = extractFloatx80Sign(ST0);
+ uint64_t arg1_sig = extractFloatx80Frac(ST1);
+ int32_t arg1_exp = extractFloatx80Exp(ST1);
+ bool arg1_sign = extractFloatx80Sign(ST1);
+
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_silence_nan(ST0, &env->fp_status);
+ } else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_silence_nan(ST1, &env->fp_status);
+ } else if (floatx80_invalid_encoding(ST0) ||
+ floatx80_invalid_encoding(ST1)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_any_nan(ST0)) {
+ ST1 = ST0;
+ } else if (floatx80_is_any_nan(ST1)) {
+ /* Pass this NaN through. */
+ } else if (floatx80_is_zero(ST1) && !arg0_sign) {
+ /* Pass this zero through. */
+ } else if (((floatx80_is_infinity(ST0) && !floatx80_is_infinity(ST1)) ||
+ arg0_exp - arg1_exp >= 80) &&
+ !arg0_sign) {
+ /*
+ * Dividing ST1 by ST0 gives the correct result up to
+ * rounding, and avoids spurious underflow exceptions that
+ * might result from passing some small values through the
+ * polynomial approximation, but if a finite nonzero result of
+ * division is exact, the result of fpatan is still inexact
+ * (and underflowing where appropriate).
+ */
+ FloatX80RoundPrec save_prec =
+ env->fp_status.floatx80_rounding_precision;
+ env->fp_status.floatx80_rounding_precision = floatx80_precision_x;
+ ST1 = floatx80_div(ST1, ST0, &env->fp_status);
+ env->fp_status.floatx80_rounding_precision = save_prec;
+ if (!floatx80_is_zero(ST1) &&
+ !(get_float_exception_flags(&env->fp_status) &
+ float_flag_inexact)) {
+ /*
+ * The mathematical result is very slightly closer to zero
+ * than this exact result. Round a value with the
+ * significand adjusted accordingly to get the correct
+ * exceptions, and possibly an adjusted result depending
+ * on the rounding mode.
+ */
+ uint64_t sig = extractFloatx80Frac(ST1);
+ int32_t exp = extractFloatx80Exp(ST1);
+ bool sign = extractFloatx80Sign(ST1);
+ if (exp == 0) {
+ normalizeFloatx80Subnormal(sig, &exp, &sig);
+ }
+ ST1 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ sign, exp, sig - 1,
+ -1, &env->fp_status);
+ }
+ } else {
+ /* The result is inexact. */
+ bool rsign = arg1_sign;
+ int32_t rexp;
+ uint64_t rsig0, rsig1;
+ if (floatx80_is_zero(ST1)) {
+ /*
+ * ST0 is negative. The result is pi with the sign of
+ * ST1.
+ */
+ rexp = pi_exp;
+ rsig0 = pi_sig_high;
+ rsig1 = pi_sig_low;
+ } else if (floatx80_is_infinity(ST1)) {
+ if (floatx80_is_infinity(ST0)) {
+ if (arg0_sign) {
+ rexp = pi_34_exp;
+ rsig0 = pi_34_sig_high;
+ rsig1 = pi_34_sig_low;
+ } else {
+ rexp = pi_4_exp;
+ rsig0 = pi_4_sig_high;
+ rsig1 = pi_4_sig_low;
+ }
+ } else {
+ rexp = pi_2_exp;
+ rsig0 = pi_2_sig_high;
+ rsig1 = pi_2_sig_low;
+ }
+ } else if (floatx80_is_zero(ST0) || arg1_exp - arg0_exp >= 80) {
+ rexp = pi_2_exp;
+ rsig0 = pi_2_sig_high;
+ rsig1 = pi_2_sig_low;
+ } else if (floatx80_is_infinity(ST0) || arg0_exp - arg1_exp >= 80) {
+ /* ST0 is negative. */
+ rexp = pi_exp;
+ rsig0 = pi_sig_high;
+ rsig1 = pi_sig_low;
+ } else {
+ /*
+ * ST0 and ST1 are finite, nonzero and with exponents not
+ * too far apart.
+ */
+ int32_t adj_exp, num_exp, den_exp, xexp, yexp, n, texp, zexp, aexp;
+ int32_t azexp, axexp;
+ bool adj_sub, ysign, zsign;
+ uint64_t adj_sig0, adj_sig1, num_sig, den_sig, xsig0, xsig1;
+ uint64_t msig0, msig1, msig2, remsig0, remsig1, remsig2;
+ uint64_t ysig0, ysig1, tsig, zsig0, zsig1, asig0, asig1;
+ uint64_t azsig0, azsig1;
+ uint64_t azsig2, azsig3, axsig0, axsig1;
+ floatx80 x8;
+ FloatRoundMode save_mode = env->fp_status.float_rounding_mode;
+ FloatX80RoundPrec save_prec =
+ env->fp_status.floatx80_rounding_precision;
+ env->fp_status.float_rounding_mode = float_round_nearest_even;
+ env->fp_status.floatx80_rounding_precision = floatx80_precision_x;
+
+ if (arg0_exp == 0) {
+ normalizeFloatx80Subnormal(arg0_sig, &arg0_exp, &arg0_sig);
+ }
+ if (arg1_exp == 0) {
+ normalizeFloatx80Subnormal(arg1_sig, &arg1_exp, &arg1_sig);
+ }
+ if (arg0_exp > arg1_exp ||
+ (arg0_exp == arg1_exp && arg0_sig >= arg1_sig)) {
+ /* Work with abs(ST1) / abs(ST0). */
+ num_exp = arg1_exp;
+ num_sig = arg1_sig;
+ den_exp = arg0_exp;
+ den_sig = arg0_sig;
+ if (arg0_sign) {
+ /* The result is subtracted from pi. */
+ adj_exp = pi_exp;
+ adj_sig0 = pi_sig_high;
+ adj_sig1 = pi_sig_low;
+ adj_sub = true;
+ } else {
+ /* The result is used as-is. */
+ adj_exp = 0;
+ adj_sig0 = 0;
+ adj_sig1 = 0;
+ adj_sub = false;
+ }
+ } else {
+ /* Work with abs(ST0) / abs(ST1). */
+ num_exp = arg0_exp;
+ num_sig = arg0_sig;
+ den_exp = arg1_exp;
+ den_sig = arg1_sig;
+ /* The result is added to or subtracted from pi/2. */
+ adj_exp = pi_2_exp;
+ adj_sig0 = pi_2_sig_high;
+ adj_sig1 = pi_2_sig_low;
+ adj_sub = !arg0_sign;
+ }
+
+ /*
+ * Compute x = num/den, where 0 < x <= 1 and x is not too
+ * small.
+ */
+ xexp = num_exp - den_exp + 0x3ffe;
+ remsig0 = num_sig;
+ remsig1 = 0;
+ if (den_sig <= remsig0) {
+ shift128Right(remsig0, remsig1, 1, &remsig0, &remsig1);
+ ++xexp;
+ }
+ xsig0 = estimateDiv128To64(remsig0, remsig1, den_sig);
+ mul64To128(den_sig, xsig0, &msig0, &msig1);
+ sub128(remsig0, remsig1, msig0, msig1, &remsig0, &remsig1);
+ while ((int64_t) remsig0 < 0) {
+ --xsig0;
+ add128(remsig0, remsig1, 0, den_sig, &remsig0, &remsig1);
+ }
+ xsig1 = estimateDiv128To64(remsig1, 0, den_sig);
+ /*
+ * No need to correct any estimation error in xsig1; even
+ * with such error, it is accurate enough.
+ */
+
+ /*
+ * Split x as x = t + y, where t = n/8 is the nearest
+ * multiple of 1/8 to x.
+ */
+ x8 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ false, xexp + 3, xsig0,
+ xsig1, &env->fp_status);
+ n = floatx80_to_int32(x8, &env->fp_status);
+ if (n == 0) {
+ ysign = false;
+ yexp = xexp;
+ ysig0 = xsig0;
+ ysig1 = xsig1;
+ texp = 0;
+ tsig = 0;
+ } else {
+ int shift = clz32(n) + 32;
+ texp = 0x403b - shift;
+ tsig = n;
+ tsig <<= shift;
+ if (texp == xexp) {
+ sub128(xsig0, xsig1, tsig, 0, &ysig0, &ysig1);
+ if ((int64_t) ysig0 >= 0) {
+ ysign = false;
+ if (ysig0 == 0) {
+ if (ysig1 == 0) {
+ yexp = 0;
+ } else {
+ shift = clz64(ysig1) + 64;
+ yexp = xexp - shift;
+ shift128Left(ysig0, ysig1, shift,
+ &ysig0, &ysig1);
+ }
+ } else {
+ shift = clz64(ysig0);
+ yexp = xexp - shift;
+ shift128Left(ysig0, ysig1, shift, &ysig0, &ysig1);
+ }
+ } else {
+ ysign = true;
+ sub128(0, 0, ysig0, ysig1, &ysig0, &ysig1);
+ if (ysig0 == 0) {
+ shift = clz64(ysig1) + 64;
+ } else {
+ shift = clz64(ysig0);
+ }
+ yexp = xexp - shift;
+ shift128Left(ysig0, ysig1, shift, &ysig0, &ysig1);
+ }
+ } else {
+ /*
+ * t's exponent must be greater than x's because t
+ * is positive and the nearest multiple of 1/8 to
+ * x, and if x has a greater exponent, the power
+ * of 2 with that exponent is also a multiple of
+ * 1/8.
+ */
+ uint64_t usig0, usig1;
+ shift128RightJamming(xsig0, xsig1, texp - xexp,
+ &usig0, &usig1);
+ ysign = true;
+ sub128(tsig, 0, usig0, usig1, &ysig0, &ysig1);
+ if (ysig0 == 0) {
+ shift = clz64(ysig1) + 64;
+ } else {
+ shift = clz64(ysig0);
+ }
+ yexp = texp - shift;
+ shift128Left(ysig0, ysig1, shift, &ysig0, &ysig1);
+ }
+ }
+
+ /*
+ * Compute z = y/(1+tx), so arctan(x) = arctan(t) +
+ * arctan(z).
+ */
+ zsign = ysign;
+ if (texp == 0 || yexp == 0) {
+ zexp = yexp;
+ zsig0 = ysig0;
+ zsig1 = ysig1;
+ } else {
+ /*
+ * t <= 1, x <= 1 and if both are 1 then y is 0, so tx < 1.
+ */
+ int32_t dexp = texp + xexp - 0x3ffe;
+ uint64_t dsig0, dsig1, dsig2;
+ mul128By64To192(xsig0, xsig1, tsig, &dsig0, &dsig1, &dsig2);
+ /*
+ * dexp <= 0x3fff (and if equal, dsig0 has a leading 0
+ * bit). Add 1 to produce the denominator 1+tx.
+ */
+ shift128RightJamming(dsig0, dsig1, 0x3fff - dexp,
+ &dsig0, &dsig1);
+ dsig0 |= 0x8000000000000000ULL;
+ zexp = yexp - 1;
+ remsig0 = ysig0;
+ remsig1 = ysig1;
+ remsig2 = 0;
+ if (dsig0 <= remsig0) {
+ shift128Right(remsig0, remsig1, 1, &remsig0, &remsig1);
+ ++zexp;
+ }
+ zsig0 = estimateDiv128To64(remsig0, remsig1, dsig0);
+ mul128By64To192(dsig0, dsig1, zsig0, &msig0, &msig1, &msig2);
+ sub192(remsig0, remsig1, remsig2, msig0, msig1, msig2,
+ &remsig0, &remsig1, &remsig2);
+ while ((int64_t) remsig0 < 0) {
+ --zsig0;
+ add192(remsig0, remsig1, remsig2, 0, dsig0, dsig1,
+ &remsig0, &remsig1, &remsig2);
+ }
+ zsig1 = estimateDiv128To64(remsig1, remsig2, dsig0);
+ /* No need to correct any estimation error in zsig1. */
+ }
+
+ if (zexp == 0) {
+ azexp = 0;
+ azsig0 = 0;
+ azsig1 = 0;
+ } else {
+ floatx80 z2, accum;
+ uint64_t z2sig0, z2sig1, z2sig2, z2sig3;
+ /* Compute z^2. */
+ mul128To256(zsig0, zsig1, zsig0, zsig1,
+ &z2sig0, &z2sig1, &z2sig2, &z2sig3);
+ z2 = normalizeRoundAndPackFloatx80(floatx80_precision_x, false,
+ zexp + zexp - 0x3ffe,
+ z2sig0, z2sig1,
+ &env->fp_status);
+
+ /* Compute the lower parts of the polynomial expansion. */
+ accum = floatx80_mul(fpatan_coeff_6, z2, &env->fp_status);
+ accum = floatx80_add(fpatan_coeff_5, accum, &env->fp_status);
+ accum = floatx80_mul(accum, z2, &env->fp_status);
+ accum = floatx80_add(fpatan_coeff_4, accum, &env->fp_status);
+ accum = floatx80_mul(accum, z2, &env->fp_status);
+ accum = floatx80_add(fpatan_coeff_3, accum, &env->fp_status);
+ accum = floatx80_mul(accum, z2, &env->fp_status);
+ accum = floatx80_add(fpatan_coeff_2, accum, &env->fp_status);
+ accum = floatx80_mul(accum, z2, &env->fp_status);
+ accum = floatx80_add(fpatan_coeff_1, accum, &env->fp_status);
+ accum = floatx80_mul(accum, z2, &env->fp_status);
+
+ /*
+ * The full polynomial expansion is z*(fpatan_coeff_0 + accum).
+ * fpatan_coeff_0 is 1, and accum is negative and much smaller.
+ */
+ aexp = extractFloatx80Exp(fpatan_coeff_0);
+ shift128RightJamming(extractFloatx80Frac(accum), 0,
+ aexp - extractFloatx80Exp(accum),
+ &asig0, &asig1);
+ sub128(extractFloatx80Frac(fpatan_coeff_0), 0, asig0, asig1,
+ &asig0, &asig1);
+ /* Multiply by z to compute arctan(z). */
+ azexp = aexp + zexp - 0x3ffe;
+ mul128To256(asig0, asig1, zsig0, zsig1, &azsig0, &azsig1,
+ &azsig2, &azsig3);
+ }
+
+ /* Add arctan(t) (positive or zero) and arctan(z) (sign zsign). */
+ if (texp == 0) {
+ /* z is positive. */
+ axexp = azexp;
+ axsig0 = azsig0;
+ axsig1 = azsig1;
+ } else {
+ bool low_sign = extractFloatx80Sign(fpatan_table[n].atan_low);
+ int32_t low_exp = extractFloatx80Exp(fpatan_table[n].atan_low);
+ uint64_t low_sig0 =
+ extractFloatx80Frac(fpatan_table[n].atan_low);
+ uint64_t low_sig1 = 0;
+ axexp = extractFloatx80Exp(fpatan_table[n].atan_high);
+ axsig0 = extractFloatx80Frac(fpatan_table[n].atan_high);
+ axsig1 = 0;
+ shift128RightJamming(low_sig0, low_sig1, axexp - low_exp,
+ &low_sig0, &low_sig1);
+ if (low_sign) {
+ sub128(axsig0, axsig1, low_sig0, low_sig1,
+ &axsig0, &axsig1);
+ } else {
+ add128(axsig0, axsig1, low_sig0, low_sig1,
+ &axsig0, &axsig1);
+ }
+ if (azexp >= axexp) {
+ shift128RightJamming(axsig0, axsig1, azexp - axexp + 1,
+ &axsig0, &axsig1);
+ axexp = azexp + 1;
+ shift128RightJamming(azsig0, azsig1, 1,
+ &azsig0, &azsig1);
+ } else {
+ shift128RightJamming(axsig0, axsig1, 1,
+ &axsig0, &axsig1);
+ shift128RightJamming(azsig0, azsig1, axexp - azexp + 1,
+ &azsig0, &azsig1);
+ ++axexp;
+ }
+ if (zsign) {
+ sub128(axsig0, axsig1, azsig0, azsig1,
+ &axsig0, &axsig1);
+ } else {
+ add128(axsig0, axsig1, azsig0, azsig1,
+ &axsig0, &axsig1);
+ }
+ }
+
+ if (adj_exp == 0) {
+ rexp = axexp;
+ rsig0 = axsig0;
+ rsig1 = axsig1;
+ } else {
+ /*
+ * Add or subtract arctan(x) (exponent axexp,
+ * significand axsig0 and axsig1, positive, not
+ * necessarily normalized) to the number given by
+ * adj_exp, adj_sig0 and adj_sig1, according to
+ * adj_sub.
+ */
+ if (adj_exp >= axexp) {
+ shift128RightJamming(axsig0, axsig1, adj_exp - axexp + 1,
+ &axsig0, &axsig1);
+ rexp = adj_exp + 1;
+ shift128RightJamming(adj_sig0, adj_sig1, 1,
+ &adj_sig0, &adj_sig1);
+ } else {
+ shift128RightJamming(axsig0, axsig1, 1,
+ &axsig0, &axsig1);
+ shift128RightJamming(adj_sig0, adj_sig1,
+ axexp - adj_exp + 1,
+ &adj_sig0, &adj_sig1);
+ rexp = axexp + 1;
+ }
+ if (adj_sub) {
+ sub128(adj_sig0, adj_sig1, axsig0, axsig1,
+ &rsig0, &rsig1);
+ } else {
+ add128(adj_sig0, adj_sig1, axsig0, axsig1,
+ &rsig0, &rsig1);
+ }
+ }
+
+ env->fp_status.float_rounding_mode = save_mode;
+ env->fp_status.floatx80_rounding_precision = save_prec;
+ }
+ /* This result is inexact. */
+ rsig1 |= 1;
+ ST1 = normalizeRoundAndPackFloatx80(floatx80_precision_x, rsign, rexp,
+ rsig0, rsig1, &env->fp_status);
+ }
+
+ fpop(env);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fxtract(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ CPU_LDoubleU temp;
+
+ temp.d = ST0;
+
+ if (floatx80_is_zero(ST0)) {
+ /* Easy way to generate -inf and raising division by 0 exception */
+ ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero,
+ &env->fp_status);
+ fpush(env);
+ ST0 = temp.d;
+ } else if (floatx80_invalid_encoding(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ fpush(env);
+ ST0 = ST1;
+ } else if (floatx80_is_any_nan(ST0)) {
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_silence_nan(ST0, &env->fp_status);
+ }
+ fpush(env);
+ ST0 = ST1;
+ } else if (floatx80_is_infinity(ST0)) {
+ fpush(env);
+ ST0 = ST1;
+ ST1 = floatx80_infinity;
+ } else {
+ int expdif;
+
+ if (EXPD(temp) == 0) {
+ int shift = clz64(temp.l.lower);
+ temp.l.lower <<= shift;
+ expdif = 1 - EXPBIAS - shift;
+ float_raise(float_flag_input_denormal, &env->fp_status);
+ } else {
+ expdif = EXPD(temp) - EXPBIAS;
+ }
+ /* DP exponent bias */
+ ST0 = int32_to_floatx80(expdif, &env->fp_status);
+ fpush(env);
+ BIASEXPONENT(temp);
+ ST0 = temp.d;
+ }
+ merge_exception_flags(env, old_flags);
+}
+
+static void helper_fprem_common(CPUX86State *env, bool mod)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ uint64_t quotient;
+ CPU_LDoubleU temp0, temp1;
+ int exp0, exp1, expdiff;
+
+ temp0.d = ST0;
+ temp1.d = ST1;
+ exp0 = EXPD(temp0);
+ exp1 = EXPD(temp1);
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ if (floatx80_is_zero(ST0) || floatx80_is_zero(ST1) ||
+ exp0 == 0x7fff || exp1 == 0x7fff ||
+ floatx80_invalid_encoding(ST0) || floatx80_invalid_encoding(ST1)) {
+ ST0 = floatx80_modrem(ST0, ST1, mod, &quotient, &env->fp_status);
+ } else {
+ if (exp0 == 0) {
+ exp0 = 1 - clz64(temp0.l.lower);
+ }
+ if (exp1 == 0) {
+ exp1 = 1 - clz64(temp1.l.lower);
+ }
+ expdiff = exp0 - exp1;
+ if (expdiff < 64) {
+ ST0 = floatx80_modrem(ST0, ST1, mod, &quotient, &env->fp_status);
+ env->fpus |= (quotient & 0x4) << (8 - 2); /* (C0) <-- q2 */
+ env->fpus |= (quotient & 0x2) << (14 - 1); /* (C3) <-- q1 */
+ env->fpus |= (quotient & 0x1) << (9 - 0); /* (C1) <-- q0 */
+ } else {
+ /*
+ * Partial remainder. This choice of how many bits to
+ * process at once is specified in AMD instruction set
+ * manuals, and empirically is followed by Intel
+ * processors as well; it ensures that the final remainder
+ * operation in a loop does produce the correct low three
+ * bits of the quotient. AMD manuals specify that the
+ * flags other than C2 are cleared, and empirically Intel
+ * processors clear them as well.
+ */
+ int n = 32 + (expdiff % 32);
+ temp1.d = floatx80_scalbn(temp1.d, expdiff - n, &env->fp_status);
+ ST0 = floatx80_mod(ST0, temp1.d, &env->fp_status);
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ }
+ }
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fprem1(CPUX86State *env)
+{
+ helper_fprem_common(env, false);
+}
+
+void helper_fprem(CPUX86State *env)
+{
+ helper_fprem_common(env, true);
+}
+
+/* 128-bit significand of log2(e). */
+#define log2_e_sig_high 0xb8aa3b295c17f0bbULL
+#define log2_e_sig_low 0xbe87fed0691d3e89ULL
+
+/*
+ * Polynomial coefficients for an approximation to log2((1+x)/(1-x)),
+ * with only odd powers of x used, for x in the interval [2*sqrt(2)-3,
+ * 3-2*sqrt(2)], which corresponds to logarithms of numbers in the
+ * interval [sqrt(2)/2, sqrt(2)].
+ */
+#define fyl2x_coeff_0 make_floatx80(0x4000, 0xb8aa3b295c17f0bcULL)
+#define fyl2x_coeff_0_low make_floatx80(0xbfbf, 0x834972fe2d7bab1bULL)
+#define fyl2x_coeff_1 make_floatx80(0x3ffe, 0xf6384ee1d01febb8ULL)
+#define fyl2x_coeff_2 make_floatx80(0x3ffe, 0x93bb62877cdfa2e3ULL)
+#define fyl2x_coeff_3 make_floatx80(0x3ffd, 0xd30bb153d808f269ULL)
+#define fyl2x_coeff_4 make_floatx80(0x3ffd, 0xa42589eaf451499eULL)
+#define fyl2x_coeff_5 make_floatx80(0x3ffd, 0x864d42c0f8f17517ULL)
+#define fyl2x_coeff_6 make_floatx80(0x3ffc, 0xe3476578adf26272ULL)
+#define fyl2x_coeff_7 make_floatx80(0x3ffc, 0xc506c5f874e6d80fULL)
+#define fyl2x_coeff_8 make_floatx80(0x3ffc, 0xac5cf50cc57d6372ULL)
+#define fyl2x_coeff_9 make_floatx80(0x3ffc, 0xb1ed0066d971a103ULL)
+
+/*
+ * Compute an approximation of log2(1+arg), where 1+arg is in the
+ * interval [sqrt(2)/2, sqrt(2)]. It is assumed that when this
+ * function is called, rounding precision is set to 80 and the
+ * round-to-nearest mode is in effect. arg must not be exactly zero,
+ * and must not be so close to zero that underflow might occur.
+ */
+static void helper_fyl2x_common(CPUX86State *env, floatx80 arg, int32_t *exp,
+ uint64_t *sig0, uint64_t *sig1)
+{
+ uint64_t arg0_sig = extractFloatx80Frac(arg);
+ int32_t arg0_exp = extractFloatx80Exp(arg);
+ bool arg0_sign = extractFloatx80Sign(arg);
+ bool asign;
+ int32_t dexp, texp, aexp;
+ uint64_t dsig0, dsig1, tsig0, tsig1, rsig0, rsig1, rsig2;
+ uint64_t msig0, msig1, msig2, t2sig0, t2sig1, t2sig2, t2sig3;
+ uint64_t asig0, asig1, asig2, asig3, bsig0, bsig1;
+ floatx80 t2, accum;
+
+ /*
+ * Compute an approximation of arg/(2+arg), with extra precision,
+ * as the argument to a polynomial approximation. The extra
+ * precision is only needed for the first term of the
+ * approximation, with subsequent terms being significantly
+ * smaller; the approximation only uses odd exponents, and the
+ * square of arg/(2+arg) is at most 17-12*sqrt(2) = 0.029....
+ */
+ if (arg0_sign) {
+ dexp = 0x3fff;
+ shift128RightJamming(arg0_sig, 0, dexp - arg0_exp, &dsig0, &dsig1);
+ sub128(0, 0, dsig0, dsig1, &dsig0, &dsig1);
+ } else {
+ dexp = 0x4000;
+ shift128RightJamming(arg0_sig, 0, dexp - arg0_exp, &dsig0, &dsig1);
+ dsig0 |= 0x8000000000000000ULL;
+ }
+ texp = arg0_exp - dexp + 0x3ffe;
+ rsig0 = arg0_sig;
+ rsig1 = 0;
+ rsig2 = 0;
+ if (dsig0 <= rsig0) {
+ shift128Right(rsig0, rsig1, 1, &rsig0, &rsig1);
+ ++texp;
+ }
+ tsig0 = estimateDiv128To64(rsig0, rsig1, dsig0);
+ mul128By64To192(dsig0, dsig1, tsig0, &msig0, &msig1, &msig2);
+ sub192(rsig0, rsig1, rsig2, msig0, msig1, msig2,
+ &rsig0, &rsig1, &rsig2);
+ while ((int64_t) rsig0 < 0) {
+ --tsig0;
+ add192(rsig0, rsig1, rsig2, 0, dsig0, dsig1,
+ &rsig0, &rsig1, &rsig2);
+ }
+ tsig1 = estimateDiv128To64(rsig1, rsig2, dsig0);
+ /*
+ * No need to correct any estimation error in tsig1; even with
+ * such error, it is accurate enough. Now compute the square of
+ * that approximation.
+ */
+ mul128To256(tsig0, tsig1, tsig0, tsig1,
+ &t2sig0, &t2sig1, &t2sig2, &t2sig3);
+ t2 = normalizeRoundAndPackFloatx80(floatx80_precision_x, false,
+ texp + texp - 0x3ffe,
+ t2sig0, t2sig1, &env->fp_status);
+
+ /* Compute the lower parts of the polynomial expansion. */
+ accum = floatx80_mul(fyl2x_coeff_9, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_8, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_7, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_6, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_5, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_4, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_3, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_2, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_1, accum, &env->fp_status);
+ accum = floatx80_mul(accum, t2, &env->fp_status);
+ accum = floatx80_add(fyl2x_coeff_0_low, accum, &env->fp_status);
+
+ /*
+ * The full polynomial expansion is fyl2x_coeff_0 + accum (where
+ * accum has much lower magnitude, and so, in particular, carry
+ * out of the addition is not possible), multiplied by t. (This
+ * expansion is only accurate to about 70 bits, not 128 bits.)
+ */
+ aexp = extractFloatx80Exp(fyl2x_coeff_0);
+ asign = extractFloatx80Sign(fyl2x_coeff_0);
+ shift128RightJamming(extractFloatx80Frac(accum), 0,
+ aexp - extractFloatx80Exp(accum),
+ &asig0, &asig1);
+ bsig0 = extractFloatx80Frac(fyl2x_coeff_0);
+ bsig1 = 0;
+ if (asign == extractFloatx80Sign(accum)) {
+ add128(bsig0, bsig1, asig0, asig1, &asig0, &asig1);
+ } else {
+ sub128(bsig0, bsig1, asig0, asig1, &asig0, &asig1);
+ }
+ /* Multiply by t to compute the required result. */
+ mul128To256(asig0, asig1, tsig0, tsig1,
+ &asig0, &asig1, &asig2, &asig3);
+ aexp += texp - 0x3ffe;
+ *exp = aexp;
+ *sig0 = asig0;
+ *sig1 = asig1;
+}
+
+void helper_fyl2xp1(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ uint64_t arg0_sig = extractFloatx80Frac(ST0);
+ int32_t arg0_exp = extractFloatx80Exp(ST0);
+ bool arg0_sign = extractFloatx80Sign(ST0);
+ uint64_t arg1_sig = extractFloatx80Frac(ST1);
+ int32_t arg1_exp = extractFloatx80Exp(ST1);
+ bool arg1_sign = extractFloatx80Sign(ST1);
+
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_silence_nan(ST0, &env->fp_status);
+ } else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_silence_nan(ST1, &env->fp_status);
+ } else if (floatx80_invalid_encoding(ST0) ||
+ floatx80_invalid_encoding(ST1)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_any_nan(ST0)) {
+ ST1 = ST0;
+ } else if (floatx80_is_any_nan(ST1)) {
+ /* Pass this NaN through. */
+ } else if (arg0_exp > 0x3ffd ||
+ (arg0_exp == 0x3ffd && arg0_sig > (arg0_sign ?
+ 0x95f619980c4336f7ULL :
+ 0xd413cccfe7799211ULL))) {
+ /*
+ * Out of range for the instruction (ST0 must have absolute
+ * value less than 1 - sqrt(2)/2 = 0.292..., according to
+ * Intel manuals; AMD manuals allow a range from sqrt(2)/2 - 1
+ * to sqrt(2) - 1, which we allow here), treat as invalid.
+ */
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_zero(ST0) || floatx80_is_zero(ST1) ||
+ arg1_exp == 0x7fff) {
+ /*
+ * One argument is zero, or multiplying by infinity; correct
+ * result is exact and can be obtained by multiplying the
+ * arguments.
+ */
+ ST1 = floatx80_mul(ST0, ST1, &env->fp_status);
+ } else if (arg0_exp < 0x3fb0) {
+ /*
+ * Multiplying both arguments and an extra-precision version
+ * of log2(e) is sufficiently precise.
+ */
+ uint64_t sig0, sig1, sig2;
+ int32_t exp;
+ if (arg0_exp == 0) {
+ normalizeFloatx80Subnormal(arg0_sig, &arg0_exp, &arg0_sig);
+ }
+ if (arg1_exp == 0) {
+ normalizeFloatx80Subnormal(arg1_sig, &arg1_exp, &arg1_sig);
+ }
+ mul128By64To192(log2_e_sig_high, log2_e_sig_low, arg0_sig,
+ &sig0, &sig1, &sig2);
+ exp = arg0_exp + 1;
+ mul128By64To192(sig0, sig1, arg1_sig, &sig0, &sig1, &sig2);
+ exp += arg1_exp - 0x3ffe;
+ /* This result is inexact. */
+ sig1 |= 1;
+ ST1 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ arg0_sign ^ arg1_sign, exp,
+ sig0, sig1, &env->fp_status);
+ } else {
+ int32_t aexp;
+ uint64_t asig0, asig1, asig2;
+ FloatRoundMode save_mode = env->fp_status.float_rounding_mode;
+ FloatX80RoundPrec save_prec =
+ env->fp_status.floatx80_rounding_precision;
+ env->fp_status.float_rounding_mode = float_round_nearest_even;
+ env->fp_status.floatx80_rounding_precision = floatx80_precision_x;
+
+ helper_fyl2x_common(env, ST0, &aexp, &asig0, &asig1);
+ /*
+ * Multiply by the second argument to compute the required
+ * result.
+ */
+ if (arg1_exp == 0) {
+ normalizeFloatx80Subnormal(arg1_sig, &arg1_exp, &arg1_sig);
+ }
+ mul128By64To192(asig0, asig1, arg1_sig, &asig0, &asig1, &asig2);
+ aexp += arg1_exp - 0x3ffe;
+ /* This result is inexact. */
+ asig1 |= 1;
+ env->fp_status.float_rounding_mode = save_mode;
+ ST1 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ arg0_sign ^ arg1_sign, aexp,
+ asig0, asig1, &env->fp_status);
+ env->fp_status.floatx80_rounding_precision = save_prec;
+ }
+ fpop(env);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fyl2x(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ uint64_t arg0_sig = extractFloatx80Frac(ST0);
+ int32_t arg0_exp = extractFloatx80Exp(ST0);
+ bool arg0_sign = extractFloatx80Sign(ST0);
+ uint64_t arg1_sig = extractFloatx80Frac(ST1);
+ int32_t arg1_exp = extractFloatx80Exp(ST1);
+ bool arg1_sign = extractFloatx80Sign(ST1);
+
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_silence_nan(ST0, &env->fp_status);
+ } else if (floatx80_is_signaling_nan(ST1, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_silence_nan(ST1, &env->fp_status);
+ } else if (floatx80_invalid_encoding(ST0) ||
+ floatx80_invalid_encoding(ST1)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_any_nan(ST0)) {
+ ST1 = ST0;
+ } else if (floatx80_is_any_nan(ST1)) {
+ /* Pass this NaN through. */
+ } else if (arg0_sign && !floatx80_is_zero(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_infinity(ST1)) {
+ FloatRelation cmp = floatx80_compare(ST0, floatx80_one,
+ &env->fp_status);
+ switch (cmp) {
+ case float_relation_less:
+ ST1 = floatx80_chs(ST1);
+ break;
+ case float_relation_greater:
+ /* Result is infinity of the same sign as ST1. */
+ break;
+ default:
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ break;
+ }
+ } else if (floatx80_is_infinity(ST0)) {
+ if (floatx80_is_zero(ST1)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else if (arg1_sign) {
+ ST1 = floatx80_chs(ST0);
+ } else {
+ ST1 = ST0;
+ }
+ } else if (floatx80_is_zero(ST0)) {
+ if (floatx80_is_zero(ST1)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST1 = floatx80_default_nan(&env->fp_status);
+ } else {
+ /* Result is infinity with opposite sign to ST1. */
+ float_raise(float_flag_divbyzero, &env->fp_status);
+ ST1 = make_floatx80(arg1_sign ? 0x7fff : 0xffff,
+ 0x8000000000000000ULL);
+ }
+ } else if (floatx80_is_zero(ST1)) {
+ if (floatx80_lt(ST0, floatx80_one, &env->fp_status)) {
+ ST1 = floatx80_chs(ST1);
+ }
+ /* Otherwise, ST1 is already the correct result. */
+ } else if (floatx80_eq(ST0, floatx80_one, &env->fp_status)) {
+ if (arg1_sign) {
+ ST1 = floatx80_chs(floatx80_zero);
+ } else {
+ ST1 = floatx80_zero;
+ }
+ } else {
+ int32_t int_exp;
+ floatx80 arg0_m1;
+ FloatRoundMode save_mode = env->fp_status.float_rounding_mode;
+ FloatX80RoundPrec save_prec =
+ env->fp_status.floatx80_rounding_precision;
+ env->fp_status.float_rounding_mode = float_round_nearest_even;
+ env->fp_status.floatx80_rounding_precision = floatx80_precision_x;
+
+ if (arg0_exp == 0) {
+ normalizeFloatx80Subnormal(arg0_sig, &arg0_exp, &arg0_sig);
+ }
+ if (arg1_exp == 0) {
+ normalizeFloatx80Subnormal(arg1_sig, &arg1_exp, &arg1_sig);
+ }
+ int_exp = arg0_exp - 0x3fff;
+ if (arg0_sig > 0xb504f333f9de6484ULL) {
+ ++int_exp;
+ }
+ arg0_m1 = floatx80_sub(floatx80_scalbn(ST0, -int_exp,
+ &env->fp_status),
+ floatx80_one, &env->fp_status);
+ if (floatx80_is_zero(arg0_m1)) {
+ /* Exact power of 2; multiply by ST1. */
+ env->fp_status.float_rounding_mode = save_mode;
+ ST1 = floatx80_mul(int32_to_floatx80(int_exp, &env->fp_status),
+ ST1, &env->fp_status);
+ } else {
+ bool asign = extractFloatx80Sign(arg0_m1);
+ int32_t aexp;
+ uint64_t asig0, asig1, asig2;
+ helper_fyl2x_common(env, arg0_m1, &aexp, &asig0, &asig1);
+ if (int_exp != 0) {
+ bool isign = (int_exp < 0);
+ int32_t iexp;
+ uint64_t isig;
+ int shift;
+ int_exp = isign ? -int_exp : int_exp;
+ shift = clz32(int_exp) + 32;
+ isig = int_exp;
+ isig <<= shift;
+ iexp = 0x403e - shift;
+ shift128RightJamming(asig0, asig1, iexp - aexp,
+ &asig0, &asig1);
+ if (asign == isign) {
+ add128(isig, 0, asig0, asig1, &asig0, &asig1);
+ } else {
+ sub128(isig, 0, asig0, asig1, &asig0, &asig1);
+ }
+ aexp = iexp;
+ asign = isign;
+ }
+ /*
+ * Multiply by the second argument to compute the required
+ * result.
+ */
+ if (arg1_exp == 0) {
+ normalizeFloatx80Subnormal(arg1_sig, &arg1_exp, &arg1_sig);
+ }
+ mul128By64To192(asig0, asig1, arg1_sig, &asig0, &asig1, &asig2);
+ aexp += arg1_exp - 0x3ffe;
+ /* This result is inexact. */
+ asig1 |= 1;
+ env->fp_status.float_rounding_mode = save_mode;
+ ST1 = normalizeRoundAndPackFloatx80(floatx80_precision_x,
+ asign ^ arg1_sign, aexp,
+ asig0, asig1, &env->fp_status);
+ }
+
+ env->fp_status.floatx80_rounding_precision = save_prec;
+ }
+ fpop(env);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsqrt(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ if (floatx80_is_neg(ST0)) {
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ env->fpus |= 0x400;
+ }
+ ST0 = floatx80_sqrt(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsincos(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(env, sin(fptemp));
+ fpush(env);
+ ST0 = double_to_floatx80(env, cos(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_frndint(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ ST0 = floatx80_round_to_int(ST0, &env->fp_status);
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fscale(CPUX86State *env)
+{
+ uint8_t old_flags = save_exception_flags(env);
+ if (floatx80_invalid_encoding(ST1) || floatx80_invalid_encoding(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else if (floatx80_is_any_nan(ST1)) {
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ }
+ ST0 = ST1;
+ if (floatx80_is_signaling_nan(ST0, &env->fp_status)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_silence_nan(ST0, &env->fp_status);
+ }
+ } else if (floatx80_is_infinity(ST1) &&
+ !floatx80_invalid_encoding(ST0) &&
+ !floatx80_is_any_nan(ST0)) {
+ if (floatx80_is_neg(ST1)) {
+ if (floatx80_is_infinity(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else {
+ ST0 = (floatx80_is_neg(ST0) ?
+ floatx80_chs(floatx80_zero) :
+ floatx80_zero);
+ }
+ } else {
+ if (floatx80_is_zero(ST0)) {
+ float_raise(float_flag_invalid, &env->fp_status);
+ ST0 = floatx80_default_nan(&env->fp_status);
+ } else {
+ ST0 = (floatx80_is_neg(ST0) ?
+ floatx80_chs(floatx80_infinity) :
+ floatx80_infinity);
+ }
+ }
+ } else {
+ int n;
+ FloatX80RoundPrec save = env->fp_status.floatx80_rounding_precision;
+ uint8_t save_flags = get_float_exception_flags(&env->fp_status);
+ set_float_exception_flags(0, &env->fp_status);
+ n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
+ set_float_exception_flags(save_flags, &env->fp_status);
+ env->fp_status.floatx80_rounding_precision = floatx80_precision_x;
+ ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
+ env->fp_status.floatx80_rounding_precision = save;
+ }
+ merge_exception_flags(env, old_flags);
+}
+
+void helper_fsin(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(env, sin(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**53 only */
+ }
+}
+
+void helper_fcos(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(env, cos(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_fxam_ST0(CPUX86State *env)
+{
+ CPU_LDoubleU temp;
+ int expdif;
+
+ temp.d = ST0;
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ if (SIGND(temp)) {
+ env->fpus |= 0x200; /* C1 <-- 1 */
+ }
+
+ if (env->fptags[env->fpstt]) {
+ env->fpus |= 0x4100; /* Empty */
+ return;
+ }
+
+ expdif = EXPD(temp);
+ if (expdif == MAXEXPD) {
+ if (MANTD(temp) == 0x8000000000000000ULL) {
+ env->fpus |= 0x500; /* Infinity */
+ } else if (MANTD(temp) & 0x8000000000000000ULL) {
+ env->fpus |= 0x100; /* NaN */
+ }
+ } else if (expdif == 0) {
+ if (MANTD(temp) == 0) {
+ env->fpus |= 0x4000; /* Zero */
+ } else {
+ env->fpus |= 0x4400; /* Denormal */
+ }
+ } else if (MANTD(temp) & 0x8000000000000000ULL) {
+ env->fpus |= 0x400;
+ }
+}
+
+static void do_fstenv(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
+{
+ int fpus, fptag, exp, i;
+ uint64_t mant;
+ CPU_LDoubleU tmp;
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i = 7; i >= 0; i--) {
+ fptag <<= 2;
+ if (env->fptags[i]) {
+ fptag |= 3;
+ } else {
+ tmp.d = env->fpregs[i].d;
+ exp = EXPD(tmp);
+ mant = MANTD(tmp);
+ if (exp == 0 && mant == 0) {
+ /* zero */
+ fptag |= 1;
+ } else if (exp == 0 || exp == MAXEXPD
+ || (mant & (1LL << 63)) == 0) {
+ /* NaNs, infinity, denormal */
+ fptag |= 2;
+ }
+ }
+ }
+ if (data32) {
+ /* 32 bit */
+ cpu_stl_data_ra(env, ptr, env->fpuc, retaddr);
+ cpu_stl_data_ra(env, ptr + 4, fpus, retaddr);
+ cpu_stl_data_ra(env, ptr + 8, fptag, retaddr);
+ cpu_stl_data_ra(env, ptr + 12, env->fpip, retaddr); /* fpip */
+ cpu_stl_data_ra(env, ptr + 16, env->fpcs, retaddr); /* fpcs */
+ cpu_stl_data_ra(env, ptr + 20, env->fpdp, retaddr); /* fpoo */
+ cpu_stl_data_ra(env, ptr + 24, env->fpds, retaddr); /* fpos */
+ } else {
+ /* 16 bit */
+ cpu_stw_data_ra(env, ptr, env->fpuc, retaddr);
+ cpu_stw_data_ra(env, ptr + 2, fpus, retaddr);
+ cpu_stw_data_ra(env, ptr + 4, fptag, retaddr);
+ cpu_stw_data_ra(env, ptr + 6, env->fpip, retaddr);
+ cpu_stw_data_ra(env, ptr + 8, env->fpcs, retaddr);
+ cpu_stw_data_ra(env, ptr + 10, env->fpdp, retaddr);
+ cpu_stw_data_ra(env, ptr + 12, env->fpds, retaddr);
+ }
+}
+
+void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fstenv(env, ptr, data32, GETPC());
+}
+
+static void cpu_set_fpus(CPUX86State *env, uint16_t fpus)
+{
+ env->fpstt = (fpus >> 11) & 7;
+ env->fpus = fpus & ~0x3800 & ~FPUS_B;
+ env->fpus |= env->fpus & FPUS_SE ? FPUS_B : 0;
+#if !defined(CONFIG_USER_ONLY)
+ if (!(env->fpus & FPUS_SE)) {
+ /*
+ * Here the processor deasserts FERR#; in response, the chipset deasserts
+ * IGNNE#.
+ */
+ cpu_clear_ignne();
+ }
+#endif
+}
+
+static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
+{
+ int i, fpus, fptag;
+
+ if (data32) {
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+ fpus = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+ fptag = cpu_lduw_data_ra(env, ptr + 8, retaddr);
+ } else {
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+ fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr);
+ fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+ }
+ cpu_set_fpus(env, fpus);
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = ((fptag & 3) == 3);
+ fptag >>= 2;
+ }
+}
+
+void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fldenv(env, ptr, data32, GETPC());
+}
+
+static void do_fsave(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
+{
+ floatx80 tmp;
+ int i;
+
+ do_fstenv(env, ptr, data32, retaddr);
+
+ ptr += (14 << data32);
+ for (i = 0; i < 8; i++) {
+ tmp = ST(i);
+ do_fstt(env, tmp, ptr, retaddr);
+ ptr += 10;
+ }
+
+ do_fninit(env);
+}
+
+void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fsave(env, ptr, data32, GETPC());
+}
+
+static void do_frstor(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
+{
+ floatx80 tmp;
+ int i;
+
+ do_fldenv(env, ptr, data32, retaddr);
+ ptr += (14 << data32);
+
+ for (i = 0; i < 8; i++) {
+ tmp = do_fldt(env, ptr, retaddr);
+ ST(i) = tmp;
+ ptr += 10;
+ }
+}
+
+void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_frstor(env, ptr, data32, GETPC());
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fsave(env, ptr, data32, 0);
+}
+
+void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_frstor(env, ptr, data32, 0);
+}
+#endif
+
+#define XO(X) offsetof(X86XSaveArea, X)
+
+static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int fpus, fptag, i;
+ target_ulong addr;
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i = 0; i < 8; i++) {
+ fptag |= (env->fptags[i] << i);
+ }
+
+ cpu_stw_data_ra(env, ptr + XO(legacy.fcw), env->fpuc, ra);
+ cpu_stw_data_ra(env, ptr + XO(legacy.fsw), fpus, ra);
+ cpu_stw_data_ra(env, ptr + XO(legacy.ftw), fptag ^ 0xff, ra);
+
+ /* In 32-bit mode this is eip, sel, dp, sel.
+ In 64-bit mode this is rip, rdp.
+ But in either case we don't write actual data, just zeros. */
+ cpu_stq_data_ra(env, ptr + XO(legacy.fpip), 0, ra); /* eip+sel; rip */
+ cpu_stq_data_ra(env, ptr + XO(legacy.fpdp), 0, ra); /* edp+sel; rdp */
+
+ addr = ptr + XO(legacy.fpregs);
+ for (i = 0; i < 8; i++) {
+ floatx80 tmp = ST(i);
+ do_fstt(env, tmp, addr, ra);
+ addr += 16;
+ }
+}
+
+static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ update_mxcsr_from_sse_status(env);
+ cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra);
+ cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra);
+}
+
+static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + XO(legacy.xmm_regs);
+ for (i = 0; i < nb_xmm_regs; i++) {
+ cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra);
+ cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra);
+ addr += 16;
+ }
+}
+
+static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra);
+ cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra);
+ }
+}
+
+static void do_xsave_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu),
+ env->bndcs_regs.cfgu, ra);
+ cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts),
+ env->bndcs_regs.sts, ra);
+}
+
+static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, ptr, env->pkru, ra);
+}
+
+static void do_fxsave(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xsave_fpu(env, ptr, ra);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ do_xsave_mxcsr(env, ptr, ra);
+ /* Fast FXSAVE leaves out the XMM registers */
+ if (!(env->efer & MSR_EFER_FFXSR)
+ || (env->hflags & HF_CPL_MASK)
+ || !(env->hflags & HF_LMA_MASK)) {
+ do_xsave_sse(env, ptr, ra);
+ }
+ }
+}
+
+void helper_fxsave(CPUX86State *env, target_ulong ptr)
+{
+ do_fxsave(env, ptr, GETPC());
+}
+
+static uint64_t get_xinuse(CPUX86State *env)
+{
+ uint64_t inuse = -1;
+
+ /* For the most part, we don't track XINUSE. We could calculate it
+ here for all components, but it's probably less work to simply
+ indicate in use. That said, the state of BNDREGS is important
+ enough to track in HFLAGS, so we might as well use that here. */
+ if ((env->hflags & HF_MPX_IU_MASK) == 0) {
+ inuse &= ~XSTATE_BNDREGS_MASK;
+ }
+ return inuse;
+}
+
+static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
+ uint64_t inuse, uint64_t opt, uintptr_t ra)
+{
+ uint64_t old_bv, new_bv;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Never save anything not enabled by XCR0. */
+ rfbm &= env->xcr0;
+ opt &= rfbm;
+
+ if (opt & XSTATE_FP_MASK) {
+ do_xsave_fpu(env, ptr, ra);
+ }
+ if (rfbm & XSTATE_SSE_MASK) {
+ /* Note that saving MXCSR is not suppressed by XSAVEOPT. */
+ do_xsave_mxcsr(env, ptr, ra);
+ }
+ if (opt & XSTATE_SSE_MASK) {
+ do_xsave_sse(env, ptr, ra);
+ }
+ if (opt & XSTATE_BNDREGS_MASK) {
+ do_xsave_bndregs(env, ptr + XO(bndreg_state), ra);
+ }
+ if (opt & XSTATE_BNDCSR_MASK) {
+ do_xsave_bndcsr(env, ptr + XO(bndcsr_state), ra);
+ }
+ if (opt & XSTATE_PKRU_MASK) {
+ do_xsave_pkru(env, ptr + XO(pkru_state), ra);
+ }
+
+ /* Update the XSTATE_BV field. */
+ old_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
+ new_bv = (old_bv & ~rfbm) | (inuse & rfbm);
+ cpu_stq_data_ra(env, ptr + XO(header.xstate_bv), new_bv, ra);
+}
+
+void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC());
+}
+
+void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uint64_t inuse = get_xinuse(env);
+ do_xsave(env, ptr, rfbm, inuse, inuse, GETPC());
+}
+
+static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, fpuc, fpus, fptag;
+ target_ulong addr;
+
+ fpuc = cpu_lduw_data_ra(env, ptr + XO(legacy.fcw), ra);
+ fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra);
+ fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra);
+ cpu_set_fpuc(env, fpuc);
+ cpu_set_fpus(env, fpus);
+ fptag ^= 0xff;
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = ((fptag >> i) & 1);
+ }
+
+ addr = ptr + XO(legacy.fpregs);
+ for (i = 0; i < 8; i++) {
+ floatx80 tmp = do_fldt(env, addr, ra);
+ ST(i) = tmp;
+ addr += 16;
+ }
+}
+
+static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + XO(legacy.mxcsr), ra));
+}
+
+static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + XO(legacy.xmm_regs);
+ for (i = 0; i < nb_xmm_regs; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra);
+ env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra);
+ addr += 16;
+ }
+}
+
+static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra);
+ env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra);
+ }
+}
+
+static void do_xrstor_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->bndcs_regs.cfgu
+ = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), ra);
+ env->bndcs_regs.sts
+ = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), ra);
+}
+
+static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ env->pkru = cpu_ldq_data_ra(env, ptr, ra);
+}
+
+static void do_fxrstor(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xrstor_fpu(env, ptr, ra);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ do_xrstor_mxcsr(env, ptr, ra);
+ /* Fast FXRSTOR leaves out the XMM registers */
+ if (!(env->efer & MSR_EFER_FFXSR)
+ || (env->hflags & HF_CPL_MASK)
+ || !(env->hflags & HF_LMA_MASK)) {
+ do_xrstor_sse(env, ptr, ra);
+ }
+ }
+}
+
+void helper_fxrstor(CPUX86State *env, target_ulong ptr)
+{
+ do_fxrstor(env, ptr, GETPC());
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_fxsave(CPUX86State *env, target_ulong ptr)
+{
+ do_fxsave(env, ptr, 0);
+}
+
+void cpu_x86_fxrstor(CPUX86State *env, target_ulong ptr)
+{
+ do_fxrstor(env, ptr, 0);
+}
+#endif
+
+void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uintptr_t ra = GETPC();
+ uint64_t xstate_bv, xcomp_bv, reserve0;
+
+ rfbm &= env->xcr0;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ xstate_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
+
+ if ((int64_t)xstate_bv < 0) {
+ /* FIXME: Compact form. */
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Standard form. */
+
+ /* The XSTATE_BV field must not set bits not present in XCR0. */
+ if (xstate_bv & ~env->xcr0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* The XCOMP_BV field must be zero. Note that, as of the April 2016
+ revision, the description of the XSAVE Header (Vol 1, Sec 13.4.2)
+ describes only XCOMP_BV, but the description of the standard form
+ of XRSTOR (Vol 1, Sec 13.8.1) checks bytes 23:8 for zero, which
+ includes the next 64-bit field. */
+ xcomp_bv = cpu_ldq_data_ra(env, ptr + XO(header.xcomp_bv), ra);
+ reserve0 = cpu_ldq_data_ra(env, ptr + XO(header.reserve0), ra);
+ if (xcomp_bv || reserve0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ if (rfbm & XSTATE_FP_MASK) {
+ if (xstate_bv & XSTATE_FP_MASK) {
+ do_xrstor_fpu(env, ptr, ra);
+ } else {
+ do_fninit(env);
+ memset(env->fpregs, 0, sizeof(env->fpregs));
+ }
+ }
+ if (rfbm & XSTATE_SSE_MASK) {
+ /* Note that the standard form of XRSTOR loads MXCSR from memory
+ whether or not the XSTATE_BV bit is set. */
+ do_xrstor_mxcsr(env, ptr, ra);
+ if (xstate_bv & XSTATE_SSE_MASK) {
+ do_xrstor_sse(env, ptr, ra);
+ } else {
+ /* ??? When AVX is implemented, we may have to be more
+ selective in the clearing. */
+ memset(env->xmm_regs, 0, sizeof(env->xmm_regs));
+ }
+ }
+ if (rfbm & XSTATE_BNDREGS_MASK) {
+ if (xstate_bv & XSTATE_BNDREGS_MASK) {
+ do_xrstor_bndregs(env, ptr + XO(bndreg_state), ra);
+ env->hflags |= HF_MPX_IU_MASK;
+ } else {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+ }
+ if (rfbm & XSTATE_BNDCSR_MASK) {
+ if (xstate_bv & XSTATE_BNDCSR_MASK) {
+ do_xrstor_bndcsr(env, ptr + XO(bndcsr_state), ra);
+ } else {
+ memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs));
+ }
+ cpu_sync_bndcs_hflags(env);
+ }
+ if (rfbm & XSTATE_PKRU_MASK) {
+ uint64_t old_pkru = env->pkru;
+ if (xstate_bv & XSTATE_PKRU_MASK) {
+ do_xrstor_pkru(env, ptr + XO(pkru_state), ra);
+ } else {
+ env->pkru = 0;
+ }
+ if (env->pkru != old_pkru) {
+ CPUState *cs = env_cpu(env);
+ tlb_flush(cs);
+ }
+ }
+}
+
+#undef XO
+
+uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
+{
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ switch (ecx) {
+ case 0:
+ return env->xcr0;
+ case 1:
+ if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) {
+ return env->xcr0 & get_xinuse(env);
+ }
+ break;
+ }
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
+{
+ uint32_t dummy, ena_lo, ena_hi;
+ uint64_t ena;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ /* Only XCR0 is defined at present; the FPU may not be disabled. */
+ if (ecx != 0 || (mask & XSTATE_FP_MASK) == 0) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling unimplemented features. */
+ cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi);
+ ena = ((uint64_t)ena_hi << 32) | ena_lo;
+ if (mask & ~ena) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling only half of MPX. */
+ if ((mask ^ (mask * (XSTATE_BNDCSR_MASK / XSTATE_BNDREGS_MASK)))
+ & XSTATE_BNDCSR_MASK) {
+ goto do_gpf;
+ }
+
+ env->xcr0 = mask;
+ cpu_sync_bndcs_hflags(env);
+ return;
+
+ do_gpf:
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+/* MMX/SSE */
+/* XXX: optimize by storing fptt and fptags in the static cpu state */
+
+#define SSE_DAZ 0x0040
+#define SSE_RC_MASK 0x6000
+#define SSE_RC_NEAR 0x0000
+#define SSE_RC_DOWN 0x2000
+#define SSE_RC_UP 0x4000
+#define SSE_RC_CHOP 0x6000
+#define SSE_FZ 0x8000
+
+void update_mxcsr_status(CPUX86State *env)
+{
+ uint32_t mxcsr = env->mxcsr;
+ int rnd_type;
+
+ /* set rounding mode */
+ switch (mxcsr & SSE_RC_MASK) {
+ default:
+ case SSE_RC_NEAR:
+ rnd_type = float_round_nearest_even;
+ break;
+ case SSE_RC_DOWN:
+ rnd_type = float_round_down;
+ break;
+ case SSE_RC_UP:
+ rnd_type = float_round_up;
+ break;
+ case SSE_RC_CHOP:
+ rnd_type = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->sse_status);
+
+ /* Set exception flags. */
+ set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
+ (mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
+ (mxcsr & FPUS_OE ? float_flag_overflow : 0) |
+ (mxcsr & FPUS_UE ? float_flag_underflow : 0) |
+ (mxcsr & FPUS_PE ? float_flag_inexact : 0),
+ &env->sse_status);
+
+ /* set denormals are zero */
+ set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
+
+ /* set flush to zero */
+ set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->sse_status);
+}
+
+void update_mxcsr_from_sse_status(CPUX86State *env)
+{
+ uint8_t flags = get_float_exception_flags(&env->sse_status);
+ /*
+ * The MXCSR denormal flag has opposite semantics to
+ * float_flag_input_denormal (the softfloat code sets that flag
+ * only when flushing input denormals to zero, but SSE sets it
+ * only when not flushing them to zero), so is not converted
+ * here.
+ */
+ env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
+ (flags & float_flag_divbyzero ? FPUS_ZE : 0) |
+ (flags & float_flag_overflow ? FPUS_OE : 0) |
+ (flags & float_flag_underflow ? FPUS_UE : 0) |
+ (flags & float_flag_inexact ? FPUS_PE : 0) |
+ (flags & float_flag_output_denormal ? FPUS_UE | FPUS_PE :
+ 0));
+}
+
+void helper_update_mxcsr(CPUX86State *env)
+{
+ update_mxcsr_from_sse_status(env);
+}
+
+void helper_ldmxcsr(CPUX86State *env, uint32_t val)
+{
+ cpu_set_mxcsr(env, val);
+}
+
+void helper_enter_mmx(CPUX86State *env)
+{
+ env->fpstt = 0;
+ *(uint32_t *)(env->fptags) = 0;
+ *(uint32_t *)(env->fptags + 4) = 0;
+}
+
+void helper_emms(CPUX86State *env)
+{
+ /* set to empty state */
+ *(uint32_t *)(env->fptags) = 0x01010101;
+ *(uint32_t *)(env->fptags + 4) = 0x01010101;
+}
+
+/* XXX: suppress */
+void helper_movq(CPUX86State *env, void *d, void *s)
+{
+ *(uint64_t *)d = *(uint64_t *)s;
+}
+
+#define SHIFT 0
+#include "ops_sse.h"
+
+#define SHIFT 1
+#include "ops_sse.h"
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
new file mode 100644
index 000000000..0a4401e91
--- /dev/null
+++ b/target/i386/tcg/helper-tcg.h
@@ -0,0 +1,110 @@
+/*
+ * TCG specific prototypes for helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef I386_HELPER_TCG_H
+#define I386_HELPER_TCG_H
+
+#include "exec/exec-all.h"
+
+/* Maximum instruction code size */
+#define TARGET_MAX_INSN_SIZE 16
+
+#if defined(TARGET_X86_64)
+# define TCG_PHYS_ADDR_BITS 40
+#else
+# define TCG_PHYS_ADDR_BITS 36
+#endif
+
+QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS);
+
+/**
+ * x86_cpu_do_interrupt:
+ * @cpu: vCPU the interrupt is to be handled by.
+ */
+void x86_cpu_do_interrupt(CPUState *cpu);
+#ifndef CONFIG_USER_ONLY
+bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif
+
+/* helper.c */
+#ifdef CONFIG_USER_ONLY
+void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+#endif
+
+void breakpoint_handler(CPUState *cs);
+
+/* n must be a constant to be efficient */
+static inline target_long lshift(target_long x, int n)
+{
+ if (n >= 0) {
+ return x << n;
+ } else {
+ return x >> (-n);
+ }
+}
+
+/* translate.c */
+void tcg_x86_init(void);
+
+/* excp_helper.c */
+void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
+void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
+ uintptr_t retaddr);
+void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr);
+void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
+ int error_code, int next_eip_addend);
+
+/* cc_helper.c */
+extern const uint8_t parity_table[256];
+
+/* misc_helper.c */
+void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask);
+void do_pause(CPUX86State *env) QEMU_NORETURN;
+
+/* sysemu/svm_helper.c */
+#ifndef CONFIG_USER_ONLY
+void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
+ uint64_t exit_info_1, uintptr_t retaddr);
+void do_vmexit(CPUX86State *env);
+#endif
+
+/* seg_helper.c */
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
+void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw);
+void handle_even_inj(CPUX86State *env, int intno, int is_int,
+ int error_code, int is_hw, int rm);
+int exception_has_error_code(int intno);
+
+/* smm_helper.c */
+void do_smm_enter(X86CPU *cpu);
+
+/* bpt_helper.c */
+bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
+
+#endif /* I386_HELPER_TCG_H */
diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c
new file mode 100644
index 000000000..87fa7280e
--- /dev/null
+++ b/target/i386/tcg/int_helper.c
@@ -0,0 +1,494 @@
+/*
+ * x86 integer helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "qapi/error.h"
+#include "qemu/guest-random.h"
+#include "helper-tcg.h"
+
+//#define DEBUG_MULDIV
+
+/* modulo 9 table */
+static const uint8_t rclb_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 0, 1, 2, 3, 4,
+};
+
+/* modulo 17 table */
+static const uint8_t rclw_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+/* division, flags are undefined */
+
+void helper_divb_AL(CPUX86State *env, target_ulong t0)
+{
+ unsigned int num, den, q, r;
+
+ num = (env->regs[R_EAX] & 0xffff);
+ den = (t0 & 0xff);
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q > 0xff) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xff;
+ r = (num % den) & 0xff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
+}
+
+void helper_idivb_AL(CPUX86State *env, target_ulong t0)
+{
+ int num, den, q, r;
+
+ num = (int16_t)env->regs[R_EAX];
+ den = (int8_t)t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q != (int8_t)q) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xff;
+ r = (num % den) & 0xff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
+}
+
+void helper_divw_AX(CPUX86State *env, target_ulong t0)
+{
+ unsigned int num, den, q, r;
+
+ num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
+ den = (t0 & 0xffff);
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q > 0xffff) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xffff;
+ r = (num % den) & 0xffff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
+ env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
+}
+
+void helper_idivw_AX(CPUX86State *env, target_ulong t0)
+{
+ int num, den, q, r;
+
+ num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
+ den = (int16_t)t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q != (int16_t)q) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xffff;
+ r = (num % den) & 0xffff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
+ env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
+}
+
+void helper_divl_EAX(CPUX86State *env, target_ulong t0)
+{
+ unsigned int den, r;
+ uint64_t num, q;
+
+ num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+ den = t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ r = (num % den);
+ if (q > 0xffffffff) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = (uint32_t)q;
+ env->regs[R_EDX] = (uint32_t)r;
+}
+
+void helper_idivl_EAX(CPUX86State *env, target_ulong t0)
+{
+ int den, r;
+ int64_t num, q;
+
+ num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+ den = t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ r = (num % den);
+ if (q != (int32_t)q) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = (uint32_t)q;
+ env->regs[R_EDX] = (uint32_t)r;
+}
+
+/* bcd */
+
+/* XXX: exception */
+void helper_aam(CPUX86State *env, int base)
+{
+ int al, ah;
+
+ al = env->regs[R_EAX] & 0xff;
+ ah = al / base;
+ al = al % base;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+ CC_DST = al;
+}
+
+void helper_aad(CPUX86State *env, int base)
+{
+ int al, ah;
+
+ al = env->regs[R_EAX] & 0xff;
+ ah = (env->regs[R_EAX] >> 8) & 0xff;
+ al = ((ah * base) + al) & 0xff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al;
+ CC_DST = al;
+}
+
+void helper_aaa(CPUX86State *env)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ af = eflags & CC_A;
+ al = env->regs[R_EAX] & 0xff;
+ ah = (env->regs[R_EAX] >> 8) & 0xff;
+
+ icarry = (al > 0xf9);
+ if (((al & 0x0f) > 9) || af) {
+ al = (al + 6) & 0x0f;
+ ah = (ah + 1 + icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void helper_aas(CPUX86State *env)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ af = eflags & CC_A;
+ al = env->regs[R_EAX] & 0xff;
+ ah = (env->regs[R_EAX] >> 8) & 0xff;
+
+ icarry = (al < 6);
+ if (((al & 0x0f) > 9) || af) {
+ al = (al - 6) & 0x0f;
+ ah = (ah - 1 - icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void helper_daa(CPUX86State *env)
+{
+ int old_al, al, af, cf;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ old_al = al = env->regs[R_EAX] & 0xff;
+
+ eflags = 0;
+ if (((al & 0x0f) > 9) || af) {
+ al = (al + 6) & 0xff;
+ eflags |= CC_A;
+ }
+ if ((old_al > 0x99) || cf) {
+ al = (al + 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+void helper_das(CPUX86State *env)
+{
+ int al, al1, af, cf;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ al = env->regs[R_EAX] & 0xff;
+
+ eflags = 0;
+ al1 = al;
+ if (((al & 0x0f) > 9) || af) {
+ eflags |= CC_A;
+ if (al < 6 || cf) {
+ eflags |= CC_C;
+ }
+ al = (al - 6) & 0xff;
+ }
+ if ((al1 > 0x99) || cf) {
+ al = (al - 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+#ifdef TARGET_X86_64
+static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
+{
+ *plow += a;
+ /* carry test */
+ if (*plow < a) {
+ (*phigh)++;
+ }
+ *phigh += b;
+}
+
+static void neg128(uint64_t *plow, uint64_t *phigh)
+{
+ *plow = ~*plow;
+ *phigh = ~*phigh;
+ add128(plow, phigh, 1, 0);
+}
+
+/* return TRUE if overflow */
+static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
+{
+ uint64_t q, r, a1, a0;
+ int i, qb, ab;
+
+ a0 = *plow;
+ a1 = *phigh;
+ if (a1 == 0) {
+ q = a0 / b;
+ r = a0 % b;
+ *plow = q;
+ *phigh = r;
+ } else {
+ if (a1 >= b) {
+ return 1;
+ }
+ /* XXX: use a better algorithm */
+ for (i = 0; i < 64; i++) {
+ ab = a1 >> 63;
+ a1 = (a1 << 1) | (a0 >> 63);
+ if (ab || a1 >= b) {
+ a1 -= b;
+ qb = 1;
+ } else {
+ qb = 0;
+ }
+ a0 = (a0 << 1) | qb;
+ }
+#if defined(DEBUG_MULDIV)
+ printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
+ ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
+ *phigh, *plow, b, a0, a1);
+#endif
+ *plow = a0;
+ *phigh = a1;
+ }
+ return 0;
+}
+
+/* return TRUE if overflow */
+static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
+{
+ int sa, sb;
+
+ sa = ((int64_t)*phigh < 0);
+ if (sa) {
+ neg128(plow, phigh);
+ }
+ sb = (b < 0);
+ if (sb) {
+ b = -b;
+ }
+ if (div64(plow, phigh, b) != 0) {
+ return 1;
+ }
+ if (sa ^ sb) {
+ if (*plow > (1ULL << 63)) {
+ return 1;
+ }
+ *plow = -*plow;
+ } else {
+ if (*plow >= (1ULL << 63)) {
+ return 1;
+ }
+ }
+ if (sa) {
+ *phigh = -*phigh;
+ }
+ return 0;
+}
+
+void helper_divq_EAX(CPUX86State *env, target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ if (t0 == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ r0 = env->regs[R_EAX];
+ r1 = env->regs[R_EDX];
+ if (div64(&r0, &r1, t0)) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = r0;
+ env->regs[R_EDX] = r1;
+}
+
+void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ if (t0 == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ r0 = env->regs[R_EAX];
+ r1 = env->regs[R_EDX];
+ if (idiv64(&r0, &r1, t0)) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = r0;
+ env->regs[R_EDX] = r1;
+}
+#endif
+
+#if TARGET_LONG_BITS == 32
+# define ctztl ctz32
+# define clztl clz32
+#else
+# define ctztl ctz64
+# define clztl clz64
+#endif
+
+target_ulong helper_pdep(target_ulong src, target_ulong mask)
+{
+ target_ulong dest = 0;
+ int i, o;
+
+ for (i = 0; mask != 0; i++) {
+ o = ctztl(mask);
+ mask &= mask - 1;
+ dest |= ((src >> i) & 1) << o;
+ }
+ return dest;
+}
+
+target_ulong helper_pext(target_ulong src, target_ulong mask)
+{
+ target_ulong dest = 0;
+ int i, o;
+
+ for (o = 0; mask != 0; o++) {
+ i = ctztl(mask);
+ mask &= mask - 1;
+ dest |= ((src >> i) & 1) << o;
+ }
+ return dest;
+}
+
+#define SHIFT 0
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+#define SHIFT 3
+#include "shift_helper_template.h"
+#undef SHIFT
+#endif
+
+/* Test that BIT is enabled in CR4. If not, raise an illegal opcode
+ exception. This reduces the requirements for rare CR4 bits being
+ mapped into HFLAGS. */
+void helper_cr4_testbit(CPUX86State *env, uint32_t bit)
+{
+ if (unlikely((env->cr[4] & bit) == 0)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+}
+
+target_ulong HELPER(rdrand)(CPUX86State *env)
+{
+ Error *err = NULL;
+ target_ulong ret;
+
+ if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
+ qemu_log_mask(LOG_UNIMP, "rdrand: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ /* Failure clears CF and all other flags, and returns 0. */
+ env->cc_src = 0;
+ return 0;
+ }
+
+ /* Success sets CF and clears all others. */
+ env->cc_src = CC_C;
+ return ret;
+}
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
new file mode 100644
index 000000000..a207e624c
--- /dev/null
+++ b/target/i386/tcg/mem_helper.c
@@ -0,0 +1,183 @@
+/*
+ * x86 memory access helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/int128.h"
+#include "qemu/atomic128.h"
+#include "tcg/tcg.h"
+#include "helper-tcg.h"
+
+void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
+{
+ uintptr_t ra = GETPC();
+ uint64_t oldv, cmpv, newv;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+
+ cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
+ newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
+
+ oldv = cpu_ldq_data_ra(env, a0, ra);
+ newv = (cmpv == oldv ? newv : oldv);
+ /* always do the store */
+ cpu_stq_data_ra(env, a0, newv, ra);
+
+ if (oldv == cmpv) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = (uint32_t)oldv;
+ env->regs[R_EDX] = (uint32_t)(oldv >> 32);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+
+void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
+{
+#ifdef CONFIG_ATOMIC64
+ uint64_t oldv, cmpv, newv;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+
+ cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
+ newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
+
+ {
+ uintptr_t ra = GETPC();
+ int mem_idx = cpu_mmu_index(env, false);
+ MemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
+ oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
+ }
+
+ if (oldv == cmpv) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = (uint32_t)oldv;
+ env->regs[R_EDX] = (uint32_t)(oldv >> 32);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+#else
+ cpu_loop_exit_atomic(env_cpu(env), GETPC());
+#endif /* CONFIG_ATOMIC64 */
+}
+
+#ifdef TARGET_X86_64
+void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
+{
+ uintptr_t ra = GETPC();
+ Int128 oldv, cmpv, newv;
+ uint64_t o0, o1;
+ int eflags;
+ bool success;
+
+ if ((a0 & 0xf) != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ eflags = cpu_cc_compute_all(env, CC_OP);
+
+ cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
+ newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
+
+ o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
+ o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
+
+ oldv = int128_make128(o0, o1);
+ success = int128_eq(oldv, cmpv);
+ if (!success) {
+ newv = oldv;
+ }
+
+ cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
+ cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
+
+ if (success) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = int128_getlo(oldv);
+ env->regs[R_EDX] = int128_gethi(oldv);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+
+void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
+{
+ uintptr_t ra = GETPC();
+
+ if ((a0 & 0xf) != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ } else if (HAVE_CMPXCHG128) {
+ int eflags = cpu_cc_compute_all(env, CC_OP);
+
+ Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
+ Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
+
+ int mem_idx = cpu_mmu_index(env, false);
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
+ Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
+
+ if (int128_eq(oldv, cmpv)) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = int128_getlo(oldv);
+ env->regs[R_EDX] = int128_gethi(oldv);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+ } else {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+}
+#endif
+
+void helper_boundw(CPUX86State *env, target_ulong a0, int v)
+{
+ int low, high;
+
+ low = cpu_ldsw_data_ra(env, a0, GETPC());
+ high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
+ v = (int16_t)v;
+ if (v < low || v > high) {
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+void helper_boundl(CPUX86State *env, target_ulong a0, int v)
+{
+ int low, high;
+
+ low = cpu_ldl_data_ra(env, a0, GETPC());
+ high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
+ if (v < low || v > high) {
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
diff --git a/target/i386/tcg/meson.build b/target/i386/tcg/meson.build
new file mode 100644
index 000000000..f9110e890
--- /dev/null
+++ b/target/i386/tcg/meson.build
@@ -0,0 +1,15 @@
+i386_ss.add(when: 'CONFIG_TCG', if_true: files(
+ 'bpt_helper.c',
+ 'cc_helper.c',
+ 'excp_helper.c',
+ 'fpu_helper.c',
+ 'int_helper.c',
+ 'mem_helper.c',
+ 'misc_helper.c',
+ 'mpx_helper.c',
+ 'seg_helper.c',
+ 'tcg-cpu.c',
+ 'translate.c'), if_false: files('tcg-stub.c'))
+
+subdir('sysemu')
+subdir('user')
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
new file mode 100644
index 000000000..5769db5ac
--- /dev/null
+++ b/target/i386/tcg/misc_helper.c
@@ -0,0 +1,138 @@
+/*
+ * x86 misc helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "helper-tcg.h"
+
+/*
+ * NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
+ * after generating a call to a helper that uses this.
+ */
+void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask)
+{
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ CC_OP = CC_OP_EFLAGS;
+ env->df = 1 - (2 * ((eflags >> 10) & 1));
+ env->eflags = (env->eflags & ~update_mask) |
+ (eflags & update_mask) | 0x2;
+}
+
+void helper_into(CPUX86State *env, int next_eip_addend)
+{
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if (eflags & CC_O) {
+ raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
+ }
+}
+
+void helper_cpuid(CPUX86State *env)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0, GETPC());
+
+ cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
+ &eax, &ebx, &ecx, &edx);
+ env->regs[R_EAX] = eax;
+ env->regs[R_EBX] = ebx;
+ env->regs[R_ECX] = ecx;
+ env->regs[R_EDX] = edx;
+}
+
+void helper_rdtsc(CPUX86State *env)
+{
+ uint64_t val;
+
+ if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0, GETPC());
+
+ val = cpu_get_tsc(env) + env->tsc_offset;
+ env->regs[R_EAX] = (uint32_t)(val);
+ env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+
+void helper_rdtscp(CPUX86State *env)
+{
+ helper_rdtsc(env);
+ env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
+}
+
+void QEMU_NORETURN helper_rdpmc(CPUX86State *env)
+{
+ if (((env->cr[4] & CR4_PCE_MASK) == 0 ) &&
+ ((env->hflags & HF_CPL_MASK) != 0)) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0, GETPC());
+
+ /* currently unimplemented */
+ qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
+ raise_exception_err(env, EXCP06_ILLOP, 0);
+}
+
+void QEMU_NORETURN do_pause(CPUX86State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* Just let another CPU run. */
+ cs->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC());
+ env->eip += next_eip_addend;
+
+ do_pause(env);
+}
+
+uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
+{
+ if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ if (ecx != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ return env->pkru;
+}
+
+void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
+{
+ CPUState *cs = env_cpu(env);
+
+ if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ env->pkru = val;
+ tlb_flush(cs);
+}
diff --git a/target/i386/tcg/mpx_helper.c b/target/i386/tcg/mpx_helper.c
new file mode 100644
index 000000000..22423eedc
--- /dev/null
+++ b/target/i386/tcg/mpx_helper.c
@@ -0,0 +1,139 @@
+/*
+ * x86 MPX helpers
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "helper-tcg.h"
+
+
+void helper_bndck(CPUX86State *env, uint32_t fail)
+{
+ if (unlikely(fail)) {
+ env->bndcs_regs.sts = 1;
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra)
+{
+ uint64_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12);
+ bt = cpu_ldq_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract64(base, 3, 17) << 5) + (bt & ~7);
+}
+
+static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra)
+{
+ uint32_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK);
+ bt = cpu_ldl_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract32(base, 2, 10) << 4) + (bt & ~3);
+}
+
+uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte, lb, ub, pt;
+
+ bte = lookup_bte64(env, base, ra);
+ lb = cpu_ldq_data_ra(env, bte, ra);
+ ub = cpu_ldq_data_ra(env, bte + 8, ra);
+ pt = cpu_ldq_data_ra(env, bte + 16, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ env->mmx_t0.MMX_Q(0) = ub;
+ return lb;
+}
+
+uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte, lb, ub, pt;
+
+ bte = lookup_bte32(env, base, ra);
+ lb = cpu_ldl_data_ra(env, bte, ra);
+ ub = cpu_ldl_data_ra(env, bte + 4, ra);
+ pt = cpu_ldl_data_ra(env, bte + 8, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ return ((uint64_t)ub << 32) | lb;
+}
+
+void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte;
+
+ bte = lookup_bte64(env, base, ra);
+ cpu_stq_data_ra(env, bte, lb, ra);
+ cpu_stq_data_ra(env, bte + 8, ub, ra);
+ cpu_stq_data_ra(env, bte + 16, ptr, ra);
+}
+
+void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte;
+
+ bte = lookup_bte32(env, base, ra);
+ cpu_stl_data_ra(env, bte, lb, ra);
+ cpu_stl_data_ra(env, bte + 4, ub, ra);
+ cpu_stl_data_ra(env, bte + 8, ptr, ra);
+}
+
+void helper_bnd_jmp(CPUX86State *env)
+{
+ if (!(env->hflags2 & HF2_MPX_PR_MASK)) {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+}
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
new file mode 100644
index 000000000..baa905a0c
--- /dev/null
+++ b/target/i386/tcg/seg_helper.c
@@ -0,0 +1,2343 @@
+/*
+ * x86 segmentation related helpers:
+ * TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+#include "helper-tcg.h"
+#include "seg_helper.h"
+
+/* return non zero if error */
+static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector,
+ uintptr_t retaddr)
+{
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ if (selector & 0x4) {
+ dt = &env->ldt;
+ } else {
+ dt = &env->gdt;
+ }
+ index = selector & ~7;
+ if ((index + 7) > dt->limit) {
+ return -1;
+ }
+ ptr = dt->base + index;
+ *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
+ *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ return 0;
+}
+
+static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector)
+{
+ return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
+}
+
+static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
+{
+ unsigned int limit;
+
+ limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & DESC_G_MASK) {
+ limit = (limit << 12) | 0xfff;
+ }
+ return limit;
+}
+
+static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
+{
+ return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
+}
+
+static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
+ uint32_t e2)
+{
+ sc->base = get_seg_base(e1, e2);
+ sc->limit = get_seg_limit(e1, e2);
+ sc->flags = e2;
+}
+
+/* init the segment cache in vm86 mode. */
+static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
+{
+ selector &= 0xffff;
+
+ cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (3 << DESC_DPL_SHIFT));
+}
+
+static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
+ uint32_t *esp_ptr, int dpl,
+ uintptr_t retaddr)
+{
+ X86CPU *cpu = env_archcpu(env);
+ int type, index, shift;
+
+#if 0
+ {
+ int i;
+ printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
+ for (i = 0; i < env->tr.limit; i++) {
+ printf("%02x ", env->tr.base[i]);
+ if ((i & 7) == 7) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+ }
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK)) {
+ cpu_abort(CPU(cpu), "invalid tss");
+ }
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1) {
+ cpu_abort(CPU(cpu), "invalid tss type");
+ }
+ shift = type >> 3;
+ index = (dpl * 4 + 2) << shift;
+ if (index + (4 << shift) - 1 > env->tr.limit) {
+ raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
+ }
+ if (shift == 0) {
+ *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
+ *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
+ } else {
+ *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
+ *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
+ }
+}
+
+static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
+ int cpl, uintptr_t retaddr)
+{
+ uint32_t e1, e2;
+ int rpl, dpl;
+
+ if ((selector & 0xfffc) != 0) {
+ if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (seg_reg == R_CS) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ if (dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ } else if (seg_reg == R_SS) {
+ /* SS must be writable data */
+ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ if (dpl != cpl || dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ } else {
+ /* not readable code */
+ if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ /* if data or non conforming code, checks the rights */
+ if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ } else {
+ if (seg_reg == R_SS || seg_reg == R_CS) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ }
+}
+
+#define SWITCH_TSS_JMP 0
+#define SWITCH_TSS_IRET 1
+#define SWITCH_TSS_CALL 2
+
+/* XXX: restore CPU state in registers (PowerPC case) */
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, uintptr_t retaddr)
+{
+ int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
+ target_ulong tss_base;
+ uint32_t new_regs[8], new_segs[6];
+ uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
+ uint32_t old_eflags, eflags_mask;
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
+ source);
+
+ /* if task gate, we read the TSS segment and we load it */
+ if (type == 5) {
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
+ }
+ tss_selector = e1 >> 16;
+ if (tss_selector & 4) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+ }
+ if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+ }
+ if (e2 & DESC_S_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
+ }
+
+ if (type & 8) {
+ tss_limit_max = 103;
+ } else {
+ tss_limit_max = 43;
+ }
+ tss_limit = get_seg_limit(e1, e2);
+ tss_base = get_seg_base(e1, e2);
+ if ((tss_selector & 4) != 0 ||
+ tss_limit < tss_limit_max) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+ }
+ old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if (old_type & 8) {
+ old_tss_limit_max = 103;
+ } else {
+ old_tss_limit_max = 43;
+ }
+
+ /* read all the registers from the new TSS */
+ if (type & 8) {
+ /* 32 bit */
+ new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
+ new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
+ new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
+ for (i = 0; i < 8; i++) {
+ new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
+ retaddr);
+ }
+ for (i = 0; i < 6; i++) {
+ new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
+ retaddr);
+ }
+ new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
+ new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
+ } else {
+ /* 16 bit */
+ new_cr3 = 0;
+ new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
+ new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
+ for (i = 0; i < 8; i++) {
+ new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
+ }
+ for (i = 0; i < 4; i++) {
+ new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
+ retaddr);
+ }
+ new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
+ new_segs[R_FS] = 0;
+ new_segs[R_GS] = 0;
+ new_trap = 0;
+ }
+ /* XXX: avoid a compiler warning, see
+ http://support.amd.com/us/Processor_TechDocs/24593.pdf
+ chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
+ (void)new_trap;
+
+ /* NOTE: we must avoid memory exceptions during the task switch,
+ so we make dummy accesses before */
+ /* XXX: it can still fail in some cases, so a bigger hack is
+ necessary to valid the TLB after having done the accesses */
+
+ v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
+ v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
+ cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
+ cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
+
+ /* clear busy bit (it is restartable) */
+ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
+ target_ulong ptr;
+ uint32_t e2;
+
+ ptr = env->gdt.base + (env->tr.selector & ~7);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ e2 &= ~DESC_TSS_BUSY_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+ }
+ old_eflags = cpu_compute_eflags(env);
+ if (source == SWITCH_TSS_IRET) {
+ old_eflags &= ~NT_MASK;
+ }
+
+ /* save the current state in the old TSS */
+ if (old_type & 8) {
+ /* 32 bit */
+ cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
+ for (i = 0; i < 6; i++) {
+ cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
+ env->segs[i].selector, retaddr);
+ }
+ } else {
+ /* 16 bit */
+ cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
+ for (i = 0; i < 4; i++) {
+ cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
+ env->segs[i].selector, retaddr);
+ }
+ }
+
+ /* now if an exception occurs, it will occurs in the next task
+ context */
+
+ if (source == SWITCH_TSS_CALL) {
+ cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
+ new_eflags |= NT_MASK;
+ }
+
+ /* set busy bit */
+ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
+ target_ulong ptr;
+ uint32_t e2;
+
+ ptr = env->gdt.base + (tss_selector & ~7);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ e2 |= DESC_TSS_BUSY_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+ }
+
+ /* set the new CPU state */
+ /* from this point, any exception which occurs can give problems */
+ env->cr[0] |= CR0_TS_MASK;
+ env->hflags |= HF_TS_MASK;
+ env->tr.selector = tss_selector;
+ env->tr.base = tss_base;
+ env->tr.limit = tss_limit;
+ env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
+
+ if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
+ cpu_x86_update_cr3(env, new_cr3);
+ }
+
+ /* load all registers without an exception, then reload them with
+ possible exception */
+ env->eip = new_eip;
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK |
+ IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
+ if (type & 8) {
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ for (i = 0; i < 8; i++) {
+ env->regs[i] = new_regs[i];
+ }
+ } else {
+ cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
+ for (i = 0; i < 8; i++) {
+ env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
+ }
+ }
+ if (new_eflags & VM_MASK) {
+ for (i = 0; i < 6; i++) {
+ load_seg_vm(env, i, new_segs[i]);
+ }
+ } else {
+ /* first just selectors as the rest may trigger exceptions */
+ for (i = 0; i < 6; i++) {
+ cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
+ }
+ }
+
+ env->ldt.selector = new_ldt & ~4;
+ env->ldt.base = 0;
+ env->ldt.limit = 0;
+ env->ldt.flags = 0;
+
+ /* load the LDT */
+ if (new_ldt & 4) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+
+ if ((new_ldt & 0xfffc) != 0) {
+ dt = &env->gdt;
+ index = new_ldt & ~7;
+ if ((index + 7) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+
+ /* load the segments */
+ if (!(new_eflags & VM_MASK)) {
+ int cpl = new_segs[R_CS] & 3;
+ tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
+ tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
+ tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
+ tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
+ tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
+ tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
+ }
+
+ /* check that env->eip is in the CS segment limits */
+ if (new_eip > env->segs[R_CS].limit) {
+ /* XXX: different exception if CALL? */
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* reset local breakpoints */
+ if (env->dr[7] & DR7_LOCAL_BP_MASK) {
+ cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
+ }
+#endif
+}
+
+static void switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip)
+{
+ switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+}
+
+static inline unsigned int get_sp_mask(unsigned int e2)
+{
+#ifdef TARGET_X86_64
+ if (e2 & DESC_L_MASK) {
+ return 0;
+ } else
+#endif
+ if (e2 & DESC_B_MASK) {
+ return 0xffffffff;
+ } else {
+ return 0xffff;
+ }
+}
+
+int exception_has_error_code(int intno)
+{
+ switch (intno) {
+ case 8:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 17:
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef TARGET_X86_64
+#define SET_ESP(val, sp_mask) \
+ do { \
+ if ((sp_mask) == 0xffff) { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
+ ((val) & 0xffff); \
+ } else if ((sp_mask) == 0xffffffffLL) { \
+ env->regs[R_ESP] = (uint32_t)(val); \
+ } else { \
+ env->regs[R_ESP] = (val); \
+ } \
+ } while (0)
+#else
+#define SET_ESP(val, sp_mask) \
+ do { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
+ ((val) & (sp_mask)); \
+ } while (0)
+#endif
+
+/* in 64-bit machines, this can overflow. So this segment addition macro
+ * can be used to trim the value to 32-bit whenever needed */
+#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
+
+/* XXX: add a is_user flag to have proper security support */
+#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ sp -= 2; \
+ cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
+ }
+
+#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ sp -= 4; \
+ cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
+ }
+
+#define POPW_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
+ sp += 2; \
+ }
+
+#define POPL_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
+ sp += 4; \
+ }
+
+#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
+#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
+#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
+#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
+
+/* protected mode interrupt */
+static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip,
+ int is_hw)
+{
+ SegmentCache *dt;
+ target_ulong ptr, ssp;
+ int type, dpl, selector, ss_dpl, cpl;
+ int has_error_code, new_stack, shift;
+ uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
+ uint32_t old_eip, sp_mask;
+ int vm86 = env->eflags & VM_MASK;
+
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ has_error_code = exception_has_error_code(intno);
+ }
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+
+ dt = &env->idt;
+ if (intno * 8 + 7 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ ptr = dt->base + intno * 8;
+ e1 = cpu_ldl_kernel(env, ptr);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch (type) {
+ case 5: /* task gate */
+ case 6: /* 286 interrupt gate */
+ case 7: /* 286 trap gate */
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+
+ if (type == 5) {
+ /* task gate */
+ /* must do that check here to return the correct error code */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+ }
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+ if (has_error_code) {
+ int type;
+ uint32_t mask;
+
+ /* push the error code */
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ shift = type >> 3;
+ if (env->segs[R_SS].flags & DESC_B_MASK) {
+ mask = 0xffffffff;
+ } else {
+ mask = 0xffff;
+ }
+ esp = (env->regs[R_ESP] - (2 << shift)) & mask;
+ ssp = env->segs[R_SS].base + esp;
+ if (shift) {
+ cpu_stl_kernel(env, ssp, error_code);
+ } else {
+ cpu_stw_kernel(env, ssp, error_code);
+ }
+ SET_ESP(esp, mask);
+ }
+ return;
+ }
+
+ /* Otherwise, trap or interrupt gate */
+
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+ }
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ if (load_segment(env, &e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ if (e2 & DESC_C_MASK) {
+ dpl = cpl;
+ }
+ if (dpl < cpl) {
+ /* to inner privilege */
+ get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ new_stack = 1;
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
+ } else {
+ /* to same privilege */
+ if (vm86) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ new_stack = 0;
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ esp = env->regs[R_ESP];
+ }
+
+ shift = type >> 3;
+
+#if 0
+ /* XXX: check that enough room is available */
+ push_size = 6 + (new_stack << 2) + (has_error_code << 1);
+ if (vm86) {
+ push_size += 8;
+ }
+ push_size <<= shift;
+#endif
+ if (shift == 1) {
+ if (new_stack) {
+ if (vm86) {
+ PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ }
+ PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
+ PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
+ }
+ PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
+ PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
+ PUSHL(ssp, esp, sp_mask, old_eip);
+ if (has_error_code) {
+ PUSHL(ssp, esp, sp_mask, error_code);
+ }
+ } else {
+ if (new_stack) {
+ if (vm86) {
+ PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ }
+ PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
+ PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
+ }
+ PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
+ PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
+ PUSHW(ssp, esp, sp_mask, old_eip);
+ if (has_error_code) {
+ PUSHW(ssp, esp, sp_mask, error_code);
+ }
+ }
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
+ if (new_stack) {
+ if (vm86) {
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
+ }
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
+ }
+ SET_ESP(esp, sp_mask);
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ env->eip = offset;
+}
+
+#ifdef TARGET_X86_64
+
+#define PUSHQ_RA(sp, val, ra) \
+ { \
+ sp -= 8; \
+ cpu_stq_kernel_ra(env, sp, (val), ra); \
+ }
+
+#define POPQ_RA(sp, val, ra) \
+ { \
+ val = cpu_ldq_kernel_ra(env, sp, ra); \
+ sp += 8; \
+ }
+
+#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
+#define POPQ(sp, val) POPQ_RA(sp, val, 0)
+
+static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
+{
+ X86CPU *cpu = env_archcpu(env);
+ int index;
+
+#if 0
+ printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
+ env->tr.base, env->tr.limit);
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK)) {
+ cpu_abort(CPU(cpu), "invalid tss");
+ }
+ index = 8 * level + 4;
+ if ((index + 7) > env->tr.limit) {
+ raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
+ }
+ return cpu_ldq_kernel(env, env->tr.base + index);
+}
+
+/* 64 bit interrupt */
+static void do_interrupt64(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
+{
+ SegmentCache *dt;
+ target_ulong ptr;
+ int type, dpl, selector, cpl, ist;
+ int has_error_code, new_stack;
+ uint32_t e1, e2, e3, ss;
+ target_ulong old_eip, esp, offset;
+
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ has_error_code = exception_has_error_code(intno);
+ }
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+
+ dt = &env->idt;
+ if (intno * 16 + 15 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ }
+ ptr = dt->base + intno * 16;
+ e1 = cpu_ldl_kernel(env, ptr);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
+ e3 = cpu_ldl_kernel(env, ptr + 8);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch (type) {
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
+ }
+ selector = e1 >> 16;
+ offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ ist = e2 & 7;
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+
+ if (load_segment(env, &e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (e2 & DESC_C_MASK) {
+ dpl = cpl;
+ }
+ if (dpl < cpl || ist != 0) {
+ /* to inner privilege */
+ new_stack = 1;
+ esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
+ ss = 0;
+ } else {
+ /* to same privilege */
+ if (env->eflags & VM_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ new_stack = 0;
+ esp = env->regs[R_ESP];
+ }
+ esp &= ~0xfLL; /* align stack */
+
+ PUSHQ(esp, env->segs[R_SS].selector);
+ PUSHQ(esp, env->regs[R_ESP]);
+ PUSHQ(esp, cpu_compute_eflags(env));
+ PUSHQ(esp, env->segs[R_CS].selector);
+ PUSHQ(esp, old_eip);
+ if (has_error_code) {
+ PUSHQ(esp, error_code);
+ }
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
+ if (new_stack) {
+ ss = 0 | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
+ }
+ env->regs[R_ESP] = esp;
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ env->eip = offset;
+}
+
+void helper_sysret(CPUX86State *env, int dflag)
+{
+ int cpl, selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ selector = (env->star >> 48) & 0xffff;
+ if (env->hflags & HF_LMA_MASK) {
+ cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
+ | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
+ NT_MASK);
+ if (dflag == 2) {
+ cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ env->eip = env->regs[R_ECX];
+ } else {
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->regs[R_ECX];
+ }
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ } else {
+ env->eflags |= IF_MASK;
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->regs[R_ECX];
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ }
+}
+#endif /* TARGET_X86_64 */
+
+/* real mode interrupt */
+static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip)
+{
+ SegmentCache *dt;
+ target_ulong ptr, ssp;
+ int selector;
+ uint32_t offset, esp;
+ uint32_t old_cs, old_eip;
+
+ /* real mode (simpler!) */
+ dt = &env->idt;
+ if (intno * 4 + 3 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ ptr = dt->base + intno * 4;
+ offset = cpu_lduw_kernel(env, ptr);
+ selector = cpu_lduw_kernel(env, ptr + 2);
+ esp = env->regs[R_ESP];
+ ssp = env->segs[R_SS].base;
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+ old_cs = env->segs[R_CS].selector;
+ /* XXX: use SS segment size? */
+ PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
+ PUSHW(ssp, esp, 0xffff, old_cs);
+ PUSHW(ssp, esp, 0xffff, old_eip);
+
+ /* update processor state */
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
+ env->eip = offset;
+ env->segs[R_CS].selector = selector;
+ env->segs[R_CS].base = (selector << 4);
+ env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
+}
+
+/*
+ * Begin execution of an interruption. is_int is TRUE if coming from
+ * the int instruction. next_eip is the env->eip value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE.
+ */
+void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
+{
+ CPUX86State *env = &cpu->env;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ if ((env->cr[0] & CR0_PE_MASK)) {
+ static int count;
+
+ qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
+ " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
+ count, intno, error_code, is_int,
+ env->hflags & HF_CPL_MASK,
+ env->segs[R_CS].selector, env->eip,
+ (int)env->segs[R_CS].base + env->eip,
+ env->segs[R_SS].selector, env->regs[R_ESP]);
+ if (intno == 0x0e) {
+ qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
+ } else {
+ qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
+ }
+ qemu_log("\n");
+ log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
+#if 0
+ {
+ int i;
+ target_ulong ptr;
+
+ qemu_log(" code=");
+ ptr = env->segs[R_CS].base + env->eip;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+ }
+ if (env->cr[0] & CR0_PE_MASK) {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_GUEST_MASK) {
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
+ }
+#endif
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
+ } else
+#endif
+ {
+ do_interrupt_protected(env, intno, is_int, error_code, next_eip,
+ is_hw);
+ }
+ } else {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_GUEST_MASK) {
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
+ }
+#endif
+ do_interrupt_real(env, intno, is_int, error_code, next_eip);
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_GUEST_MASK) {
+ CPUState *cs = CPU(cpu);
+ uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj));
+
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj & ~SVM_EVTINJ_VALID);
+ }
+#endif
+}
+
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
+{
+ do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
+}
+
+void helper_lldt(CPUX86State *env, int selector)
+{
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, entry_limit;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* XXX: NULL selector case: invalid LDT */
+ env->ldt.base = 0;
+ env->ldt.limit = 0;
+ } else {
+ if (selector & 0x4) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ dt = &env->gdt;
+ index = selector & ~7;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ entry_limit = 15;
+ } else
+#endif
+ {
+ entry_limit = 7;
+ }
+ if ((index + entry_limit) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint32_t e3;
+
+ e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ env->ldt.base |= (target_ulong)e3 << 32;
+ } else
+#endif
+ {
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+ }
+ env->ldt.selector = selector;
+}
+
+void helper_ltr(CPUX86State *env, int selector)
+{
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, type, entry_limit;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* NULL selector case: invalid TR */
+ env->tr.base = 0;
+ env->tr.limit = 0;
+ env->tr.flags = 0;
+ } else {
+ if (selector & 0x4) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ dt = &env->gdt;
+ index = selector & ~7;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ entry_limit = 15;
+ } else
+#endif
+ {
+ entry_limit = 7;
+ }
+ if ((index + entry_limit) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((e2 & DESC_S_MASK) ||
+ (type != 1 && type != 9)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint32_t e3, e4;
+
+ e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+ e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
+ if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ env->tr.base |= (target_ulong)e3 << 32;
+ } else
+#endif
+ {
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ }
+ e2 |= DESC_TSS_BUSY_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
+ }
+ env->tr.selector = selector;
+}
+
+/* only works if protected mode and not VM86. seg_reg must be != R_CS */
+void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
+{
+ uint32_t e1, e2;
+ int cpl, dpl, rpl;
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ cpl = env->hflags & HF_CPL_MASK;
+ if ((selector & 0xfffc) == 0) {
+ /* null selector case */
+ if (seg_reg == R_SS
+#ifdef TARGET_X86_64
+ && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
+#endif
+ ) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
+ } else {
+
+ if (selector & 0x4) {
+ dt = &env->ldt;
+ } else {
+ dt = &env->gdt;
+ }
+ index = selector & ~7;
+ if ((index + 7) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+
+ if (!(e2 & DESC_S_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (seg_reg == R_SS) {
+ /* must be writable segment */
+ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (rpl != cpl || dpl != cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ } else {
+ /* must be readable segment */
+ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+
+ if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+ /* if not conforming code, test rights */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ if (seg_reg == R_SS) {
+ raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
+ } else {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+ }
+
+ /* set the access bit if not already set */
+ if (!(e2 & DESC_A_MASK)) {
+ e2 |= DESC_A_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
+ }
+
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+#if 0
+ qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
+ selector, (unsigned long)sc->base, sc->limit, sc->flags);
+#endif
+ }
+}
+
+/* protected mode jump */
+void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
+ target_ulong next_eip)
+{
+ int gate_cs, type;
+ uint32_t e1, e2, cpl, dpl, rpl, limit;
+
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ if (dpl != cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit &&
+ (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ env->eip = new_eip;
+ } else {
+ /* jump to call or task gate */
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (type != 12) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+#endif
+ switch (type) {
+ case 1: /* 286 TSS */
+ case 9: /* 386 TSS */
+ case 5: /* task gate */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
+ break;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ if ((dpl < cpl) || (dpl < rpl)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+ gate_cs = e1 >> 16;
+ new_eip = (e1 & 0xffff);
+ if (type == 12) {
+ new_eip |= (e2 & 0xffff0000);
+ }
+
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ /* load the upper 8 bytes of the 64-bit call gate */
+ if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ if (type != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ new_eip |= ((target_ulong)e1) << 32;
+ }
+#endif
+
+ if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ /* must be code segment */
+ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
+ (DESC_S_MASK | DESC_CS_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
+ (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (!(e2 & DESC_L_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ if (e2 & DESC_B_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ }
+#endif
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit &&
+ (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ env->eip = new_eip;
+ break;
+ default:
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ break;
+ }
+ }
+}
+
+/* real mode call */
+void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
+ int shift, int next_eip)
+{
+ int new_eip;
+ uint32_t esp, esp_mask;
+ target_ulong ssp;
+
+ new_eip = new_eip1;
+ esp = env->regs[R_ESP];
+ esp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
+ } else {
+ PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
+ }
+
+ SET_ESP(esp, esp_mask);
+ env->eip = new_eip;
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (new_cs << 4);
+}
+
+/* protected mode call */
+void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
+ int shift, target_ulong next_eip)
+{
+ int new_stack, i;
+ uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
+ uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
+ uint32_t val, limit, old_sp_mask;
+ target_ulong ssp, old_ssp, offset, sp;
+
+ LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
+ LOG_PCALL_STATE(env_cpu(env));
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ LOG_PCALL("desc=%08x:%08x\n", e1, e2);
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ if (dpl != cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+
+#ifdef TARGET_X86_64
+ /* XXX: check 16/32 bit cases in long mode */
+ if (shift == 2) {
+ target_ulong rsp;
+
+ /* 64 bit case */
+ rsp = env->regs[R_ESP];
+ PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
+ PUSHQ_RA(rsp, next_eip, GETPC());
+ /* from this point, not restartable */
+ env->regs[R_ESP] = rsp;
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2), e2);
+ env->eip = new_eip;
+ } else
+#endif
+ {
+ sp = env->regs[R_ESP];
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ } else {
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ }
+
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ /* from this point, not restartable */
+ SET_ESP(sp, sp_mask);
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ env->eip = new_eip;
+ }
+ } else {
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (type != 12) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+#endif
+
+ switch (type) {
+ case 1: /* available 286 TSS */
+ case 9: /* available 386 TSS */
+ case 5: /* task gate */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
+ return;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ break;
+ default:
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ break;
+ }
+ shift = type >> 3;
+
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+ selector = e1 >> 16;
+ param_count = e2 & 0x1f;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ /* load the upper 8 bytes of the 64-bit call gate */
+ if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ if (type != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
+ GETPC());
+ }
+ offset |= ((target_ulong)e1) << 32;
+ }
+#endif
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (env->efer & MSR_EFER_LMA) {
+ if (!(e2 & DESC_L_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (e2 & DESC_B_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ shift++;
+ }
+#endif
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner privilege */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ sp = get_rsp_from_tss(env, dpl);
+ ss = dpl; /* SS = NULL selector with RPL = new CPL */
+ new_stack = 1;
+ sp_mask = 0;
+ ssp = 0; /* SS base is always zero in IA-32e mode */
+ LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
+ TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
+ } else
+#endif
+ {
+ uint32_t sp32;
+ get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
+ LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
+ TARGET_FMT_lx "\n", ss, sp32, param_count,
+ env->regs[R_ESP]);
+ sp = sp32;
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
+ }
+
+ /* push_size = ((param_count * 2) + 8) << shift; */
+
+ old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ old_ssp = env->segs[R_SS].base;
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ /* XXX: verify if new stack address is canonical */
+ PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
+ PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
+ /* parameters aren't supported for 64-bit call gates */
+ } else
+#endif
+ if (shift == 1) {
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+ for (i = param_count - 1; i >= 0; i--) {
+ val = cpu_ldl_kernel_ra(env, old_ssp +
+ ((env->regs[R_ESP] + i * 4) &
+ old_sp_mask), GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
+ }
+ } else {
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+ for (i = param_count - 1; i >= 0; i--) {
+ val = cpu_lduw_kernel_ra(env, old_ssp +
+ ((env->regs[R_ESP] + i * 2) &
+ old_sp_mask), GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
+ }
+ }
+ new_stack = 1;
+ } else {
+ /* to same privilege */
+ sp = env->regs[R_ESP];
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ /* push_size = (4 << shift); */
+ new_stack = 0;
+ }
+
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
+ PUSHQ_RA(sp, next_eip, GETPC());
+ } else
+#endif
+ if (shift == 1) {
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ } else {
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ }
+
+ /* from this point, not restartable */
+
+ if (new_stack) {
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
+ } else
+#endif
+ {
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp,
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
+ }
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ SET_ESP(sp, sp_mask);
+ env->eip = offset;
+ }
+}
+
+/* real and vm86 mode iret */
+void helper_iret_real(CPUX86State *env, int shift)
+{
+ uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
+ target_ulong ssp;
+ int eflags_mask;
+
+ sp_mask = 0xffff; /* XXXX: use SS segment size? */
+ sp = env->regs[R_ESP];
+ ssp = env->segs[R_SS].base;
+ if (shift == 1) {
+ /* 32 bits */
+ POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
+ POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
+ new_cs &= 0xffff;
+ POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+ } else {
+ /* 16 bits */
+ POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
+ POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
+ POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+ }
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (new_cs << 4);
+ env->eip = new_eip;
+ if (env->eflags & VM_MASK) {
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
+ NT_MASK;
+ } else {
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
+ RF_MASK | NT_MASK;
+ }
+ if (shift == 0) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
+{
+ int dpl;
+ uint32_t e2;
+
+ /* XXX: on x86_64, we do not want to nullify FS and GS because
+ they may still contain a valid base. I would be interested to
+ know how a real x86_64 CPU behaves */
+ if ((seg_reg == R_FS || seg_reg == R_GS) &&
+ (env->segs[seg_reg].selector & 0xfffc) == 0) {
+ return;
+ }
+
+ e2 = env->segs[seg_reg].flags;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+ /* data or non conforming code segment */
+ if (dpl < cpl) {
+ cpu_x86_load_seg_cache(env, seg_reg, 0,
+ env->segs[seg_reg].base,
+ env->segs[seg_reg].limit,
+ env->segs[seg_reg].flags & ~DESC_P_MASK);
+ }
+ }
+}
+
+/* protected mode iret */
+static inline void helper_ret_protected(CPUX86State *env, int shift,
+ int is_iret, int addend,
+ uintptr_t retaddr)
+{
+ uint32_t new_cs, new_eflags, new_ss;
+ uint32_t new_es, new_ds, new_fs, new_gs;
+ uint32_t e1, e2, ss_e1, ss_e2;
+ int cpl, dpl, rpl, eflags_mask, iopl;
+ target_ulong ssp, sp, new_eip, new_esp, sp_mask;
+
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ sp_mask = -1;
+ } else
+#endif
+ {
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ }
+ sp = env->regs[R_ESP];
+ ssp = env->segs[R_SS].base;
+ new_eflags = 0; /* avoid warning */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ POPQ_RA(sp, new_eip, retaddr);
+ POPQ_RA(sp, new_cs, retaddr);
+ new_cs &= 0xffff;
+ if (is_iret) {
+ POPQ_RA(sp, new_eflags, retaddr);
+ }
+ } else
+#endif
+ {
+ if (shift == 1) {
+ /* 32 bits */
+ POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
+ new_cs &= 0xffff;
+ if (is_iret) {
+ POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+ if (new_eflags & VM_MASK) {
+ goto return_to_vm86;
+ }
+ }
+ } else {
+ /* 16 bits */
+ POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
+ POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
+ if (is_iret) {
+ POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+ }
+ }
+ }
+ LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
+ new_cs, new_eip, shift, addend);
+ LOG_PCALL_STATE(env_cpu(env));
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ if (!(e2 & DESC_S_MASK) ||
+ !(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ rpl = new_cs & 3;
+ if (rpl < cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ if (dpl > rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ } else {
+ if (dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
+ }
+
+ sp += addend;
+ if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
+ ((env->hflags & HF_CS64_MASK) && !is_iret))) {
+ /* return to same privilege level */
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ } else {
+ /* return to different privilege level */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ POPQ_RA(sp, new_esp, retaddr);
+ POPQ_RA(sp, new_ss, retaddr);
+ new_ss &= 0xffff;
+ } else
+#endif
+ {
+ if (shift == 1) {
+ /* 32 bits */
+ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ new_ss &= 0xffff;
+ } else {
+ /* 16 bits */
+ POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ }
+ }
+ LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
+ new_ss, new_esp);
+ if ((new_ss & 0xfffc) == 0) {
+#ifdef TARGET_X86_64
+ /* NULL ss is allowed in long mode if cpl != 3 */
+ /* XXX: test CS64? */
+ if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
+ } else
+#endif
+ {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+ } else {
+ if ((new_ss & 3) != rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
+ }
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ get_seg_base(ss_e1, ss_e2),
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
+
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ sp = new_esp;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ sp_mask = -1;
+ } else
+#endif
+ {
+ sp_mask = get_sp_mask(ss_e2);
+ }
+
+ /* validate data segments */
+ validate_seg(env, R_ES, rpl);
+ validate_seg(env, R_DS, rpl);
+ validate_seg(env, R_FS, rpl);
+ validate_seg(env, R_GS, rpl);
+
+ sp += addend;
+ }
+ SET_ESP(sp, sp_mask);
+ env->eip = new_eip;
+ if (is_iret) {
+ /* NOTE: 'cpl' is the _old_ CPL */
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
+ if (cpl == 0) {
+ eflags_mask |= IOPL_MASK;
+ }
+ iopl = (env->eflags >> IOPL_SHIFT) & 3;
+ if (cpl <= iopl) {
+ eflags_mask |= IF_MASK;
+ }
+ if (shift == 0) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ }
+ return;
+
+ return_to_vm86:
+ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
+
+ /* modify processor state */
+ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
+ IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
+ VIP_MASK);
+ load_seg_vm(env, R_CS, new_cs & 0xffff);
+ load_seg_vm(env, R_SS, new_ss & 0xffff);
+ load_seg_vm(env, R_ES, new_es & 0xffff);
+ load_seg_vm(env, R_DS, new_ds & 0xffff);
+ load_seg_vm(env, R_FS, new_fs & 0xffff);
+ load_seg_vm(env, R_GS, new_gs & 0xffff);
+
+ env->eip = new_eip & 0xffff;
+ env->regs[R_ESP] = new_esp;
+}
+
+void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
+{
+ int tss_selector, type;
+ uint32_t e1, e2;
+
+ /* specific case for TSS */
+ if (env->eflags & NT_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+#endif
+ tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
+ if (tss_selector & 4) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+ }
+ if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
+ /* NOTE: we check both segment and busy TSS */
+ if (type != 3) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+ }
+ switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
+ } else {
+ helper_ret_protected(env, shift, 1, 0, GETPC());
+ }
+ env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+void helper_lret_protected(CPUX86State *env, int shift, int addend)
+{
+ helper_ret_protected(env, shift, 0, addend, GETPC());
+}
+
+void helper_sysenter(CPUX86State *env)
+{
+ if (env->sysenter_cs == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ } else
+#endif
+ {
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ }
+ cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->regs[R_ESP] = env->sysenter_esp;
+ env->eip = env->sysenter_eip;
+}
+
+void helper_sysexit(CPUX86State *env, int dflag)
+{
+ int cpl;
+
+ cpl = env->hflags & HF_CPL_MASK;
+ if (env->sysenter_cs == 0 || cpl != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (dflag == 2) {
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ } else
+#endif
+ {
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ }
+ env->regs[R_ESP] = env->regs[R_ECX];
+ env->eip = env->regs[R_EDX];
+}
+
+target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
+{
+ unsigned int limit;
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl, type;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+ /* conforming */
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1:
+ case 2:
+ case 3:
+ case 9:
+ case 11:
+ break;
+ default:
+ goto fail;
+ }
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return 0;
+ }
+ }
+ limit = get_seg_limit(e1, e2);
+ CC_SRC = eflags | CC_Z;
+ return limit;
+}
+
+target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl, type;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+ /* conforming */
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 9:
+ case 11:
+ case 12:
+ break;
+ default:
+ goto fail;
+ }
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return 0;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+ return e2 & 0x00f0ff00;
+}
+
+void helper_verr(CPUX86State *env, target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_CS_MASK) {
+ if (!(e2 & DESC_R_MASK)) {
+ goto fail;
+ }
+ if (!(e2 & DESC_C_MASK)) {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+}
+
+void helper_verw(CPUX86State *env, target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_CS_MASK) {
+ goto fail;
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ if (!(e2 & DESC_W_MASK)) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+}
diff --git a/target/i386/tcg/seg_helper.h b/target/i386/tcg/seg_helper.h
new file mode 100644
index 000000000..ebf103527
--- /dev/null
+++ b/target/i386/tcg/seg_helper.h
@@ -0,0 +1,66 @@
+/*
+ * x86 segmentation related helpers macros
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SEG_HELPER_H
+#define SEG_HELPER_H
+
+//#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
+# define LOG_PCALL_STATE(cpu) \
+ log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
+#else
+# define LOG_PCALL(...) do { } while (0)
+# define LOG_PCALL_STATE(cpu) do { } while (0)
+#endif
+
+/*
+ * TODO: Convert callers to compute cpu_mmu_index_kernel once
+ * and use *_mmuidx_ra directly.
+ */
+#define cpu_ldub_kernel_ra(e, p, r) \
+ cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
+#define cpu_lduw_kernel_ra(e, p, r) \
+ cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
+#define cpu_ldl_kernel_ra(e, p, r) \
+ cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
+#define cpu_ldq_kernel_ra(e, p, r) \
+ cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
+
+#define cpu_stb_kernel_ra(e, p, v, r) \
+ cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
+#define cpu_stw_kernel_ra(e, p, v, r) \
+ cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
+#define cpu_stl_kernel_ra(e, p, v, r) \
+ cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
+#define cpu_stq_kernel_ra(e, p, v, r) \
+ cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
+
+#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
+#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
+#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
+#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
+
+#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
+#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
+#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
+#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
+
+#endif /* SEG_HELPER_H */
diff --git a/target/i386/tcg/sysemu/bpt_helper.c b/target/i386/tcg/sysemu/bpt_helper.c
new file mode 100644
index 000000000..4d96a48a3
--- /dev/null
+++ b/target/i386/tcg/sysemu/bpt_helper.c
@@ -0,0 +1,298 @@
+/*
+ * i386 breakpoint helpers - sysemu code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "tcg/helper-tcg.h"
+
+
+static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 1;
+}
+
+static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 2;
+
+}
+static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return hw_global_breakpoint_enabled(dr7, index) ||
+ hw_local_breakpoint_enabled(dr7, index);
+}
+
+static inline int hw_breakpoint_type(unsigned long dr7, int index)
+{
+ return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
+}
+
+static inline int hw_breakpoint_len(unsigned long dr7, int index)
+{
+ int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
+ return (len == 2) ? 8 : len + 1;
+}
+
+static int hw_breakpoint_insert(CPUX86State *env, int index)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong dr7 = env->dr[7];
+ target_ulong drN = env->dr[index];
+ int err = 0;
+
+ switch (hw_breakpoint_type(dr7, index)) {
+ case DR7_TYPE_BP_INST:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_breakpoint_insert(cs, drN, BP_CPU,
+ &env->cpu_breakpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* Notice when we should enable calls to bpt_io. */
+ return hw_breakpoint_enabled(env->dr[7], index)
+ ? HF_IOBPT_MASK : 0;
+
+ case DR7_TYPE_DATA_WR:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_WRITE,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_DATA_RW:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_ACCESS,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+ }
+ if (err) {
+ env->cpu_breakpoint[index] = NULL;
+ }
+ return 0;
+}
+
+static void hw_breakpoint_remove(CPUX86State *env, int index)
+{
+ CPUState *cs = env_cpu(env);
+
+ switch (hw_breakpoint_type(env->dr[7], index)) {
+ case DR7_TYPE_BP_INST:
+ if (env->cpu_breakpoint[index]) {
+ cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
+ env->cpu_breakpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_watchpoint[index]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
+ env->cpu_watchpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* HF_IOBPT_MASK cleared elsewhere. */
+ break;
+ }
+}
+
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
+{
+ target_ulong old_dr7 = env->dr[7];
+ int iobpt = 0;
+ int i;
+
+ new_dr7 |= DR7_FIXED_1;
+
+ /* If nothing is changing except the global/local enable bits,
+ then we can make the change more efficient. */
+ if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
+ /* Fold the global and local enable bits together into the
+ global fields, then xor to show which registers have
+ changed collective enable state. */
+ int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
+
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
+ hw_breakpoint_remove(env, i);
+ }
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= HF_IOBPT_MASK;
+ }
+ }
+ } else {
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ hw_breakpoint_remove(env, i);
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ }
+ }
+
+ env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
+}
+
+bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
+{
+ target_ulong dr6;
+ int reg;
+ bool hit_enabled = false;
+
+ dr6 = env->dr[6] & ~0xf;
+ for (reg = 0; reg < DR7_MAX_BP; reg++) {
+ bool bp_match = false;
+ bool wp_match = false;
+
+ switch (hw_breakpoint_type(env->dr[7], reg)) {
+ case DR7_TYPE_BP_INST:
+ if (env->dr[reg] == env->eip) {
+ bp_match = true;
+ }
+ break;
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_watchpoint[reg] &&
+ env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
+ wp_match = true;
+ }
+ break;
+ case DR7_TYPE_IO_RW:
+ break;
+ }
+ if (bp_match || wp_match) {
+ dr6 |= 1 << reg;
+ if (hw_breakpoint_enabled(env->dr[7], reg)) {
+ hit_enabled = true;
+ }
+ }
+ }
+
+ if (hit_enabled || force_dr6_update) {
+ env->dr[6] = dr6;
+ }
+
+ return hit_enabled;
+}
+
+void breakpoint_handler(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_hw_breakpoints(env, false)) {
+ raise_exception(env, EXCP01_DB);
+ } else {
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+ } else {
+ if (cpu_breakpoint_test(cs, env->eip, BP_CPU)) {
+ check_hw_breakpoints(env, true);
+ raise_exception(env, EXCP01_DB);
+ }
+ }
+}
+
+target_ulong helper_get_dr(CPUX86State *env, int reg)
+{
+ if (reg >= 4 && reg < 6) {
+ if (env->cr[4] & CR4_DE_MASK) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ } else {
+ reg += 2;
+ }
+ }
+
+ return env->dr[reg];
+}
+
+void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
+{
+ if (reg >= 4 && reg < 6) {
+ if (env->cr[4] & CR4_DE_MASK) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ } else {
+ reg += 2;
+ }
+ }
+
+ if (reg < 4) {
+ if (hw_breakpoint_enabled(env->dr[7], reg)
+ && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
+ hw_breakpoint_remove(env, reg);
+ env->dr[reg] = t0;
+ hw_breakpoint_insert(env, reg);
+ } else {
+ env->dr[reg] = t0;
+ }
+ } else {
+ if (t0 & DR_RESERVED_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ if (reg == 6) {
+ env->dr[6] = t0 | DR6_FIXED_1;
+ } else {
+ cpu_x86_update_dr7(env, t0);
+ }
+ }
+}
+
+/* Check if Port I/O is trapped by a breakpoint. */
+void helper_bpt_io(CPUX86State *env, uint32_t port,
+ uint32_t size, target_ulong next_eip)
+{
+ target_ulong dr7 = env->dr[7];
+ int i, hit = 0;
+
+ for (i = 0; i < DR7_MAX_BP; ++i) {
+ if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(dr7, i)) {
+ int bpt_len = hw_breakpoint_len(dr7, i);
+ if (port + size - 1 >= env->dr[i]
+ && port <= env->dr[i] + bpt_len - 1) {
+ hit |= 1 << i;
+ }
+ }
+ }
+
+ if (hit) {
+ env->dr[6] = (env->dr[6] & ~0xf) | hit;
+ env->eip = next_eip;
+ raise_exception(env, EXCP01_DB);
+ }
+}
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
new file mode 100644
index 000000000..5ba739fbe
--- /dev/null
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -0,0 +1,474 @@
+/*
+ * x86 exception helpers - sysemu code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/helper-tcg.h"
+
+int get_pg_mode(CPUX86State *env)
+{
+ int pg_mode = 0;
+ if (env->cr[0] & CR0_WP_MASK) {
+ pg_mode |= PG_MODE_WP;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+ pg_mode |= PG_MODE_PAE;
+ }
+ if (env->cr[4] & CR4_PSE_MASK) {
+ pg_mode |= PG_MODE_PSE;
+ }
+ if (env->cr[4] & CR4_PKE_MASK) {
+ pg_mode |= PG_MODE_PKE;
+ }
+ if (env->cr[4] & CR4_PKS_MASK) {
+ pg_mode |= PG_MODE_PKS;
+ }
+ if (env->cr[4] & CR4_SMEP_MASK) {
+ pg_mode |= PG_MODE_SMEP;
+ }
+ if (env->cr[4] & CR4_LA57_MASK) {
+ pg_mode |= PG_MODE_LA57;
+ }
+ if (env->hflags & HF_LMA_MASK) {
+ pg_mode |= PG_MODE_LMA;
+ }
+ if (env->efer & MSR_EFER_NXE) {
+ pg_mode |= PG_MODE_NXE;
+ }
+ return pg_mode;
+}
+
+#define PG_ERROR_OK (-1)
+
+typedef hwaddr (*MMUTranslateFunc)(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
+ int *prot);
+
+#define GET_HPHYS(cs, gpa, access_type, prot) \
+ (get_hphys_func ? get_hphys_func(cs, gpa, access_type, prot) : gpa)
+
+static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_func,
+ uint64_t cr3, int is_write1, int mmu_idx, int pg_mode,
+ hwaddr *xlat, int *page_size, int *prot)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint64_t ptep, pte;
+ int32_t a20_mask;
+ target_ulong pde_addr, pte_addr;
+ int error_code = 0;
+ int is_dirty, is_write, is_user;
+ uint64_t rsvd_mask = PG_ADDRESS_MASK & ~MAKE_64BIT_MASK(0, cpu->phys_bits);
+ uint32_t page_offset;
+ uint32_t pkr;
+
+ is_user = (mmu_idx == MMU_USER_IDX);
+ is_write = is_write1 & 1;
+ a20_mask = x86_get_a20_mask(env);
+
+ if (!(pg_mode & PG_MODE_NXE)) {
+ rsvd_mask |= PG_NX_MASK;
+ }
+
+ if (pg_mode & PG_MODE_PAE) {
+ uint64_t pde, pdpe;
+ target_ulong pdpe_addr;
+
+#ifdef TARGET_X86_64
+ if (pg_mode & PG_MODE_LMA) {
+ bool la57 = pg_mode & PG_MODE_LA57;
+ uint64_t pml5e_addr, pml5e;
+ uint64_t pml4e_addr, pml4e;
+
+ if (la57) {
+ pml5e_addr = ((cr3 & ~0xfff) +
+ (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
+ pml5e_addr = GET_HPHYS(cs, pml5e_addr, MMU_DATA_STORE, NULL);
+ pml5e = x86_ldq_phys(cs, pml5e_addr);
+ if (!(pml5e & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
+ goto do_fault_rsvd;
+ }
+ if (!(pml5e & PG_ACCESSED_MASK)) {
+ pml5e |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
+ }
+ ptep = pml5e ^ PG_NX_MASK;
+ } else {
+ pml5e = cr3;
+ ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
+ }
+
+ pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
+ (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
+ pml4e_addr = GET_HPHYS(cs, pml4e_addr, MMU_DATA_STORE, NULL);
+ pml4e = x86_ldq_phys(cs, pml4e_addr);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
+ goto do_fault_rsvd;
+ }
+ if (!(pml4e & PG_ACCESSED_MASK)) {
+ pml4e |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
+ }
+ ptep &= pml4e ^ PG_NX_MASK;
+ pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
+ a20_mask;
+ pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL);
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pdpe & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep &= pdpe ^ PG_NX_MASK;
+ if (!(pdpe & PG_ACCESSED_MASK)) {
+ pdpe |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
+ }
+ if (pdpe & PG_PSE_MASK) {
+ /* 1 GB page */
+ *page_size = 1024 * 1024 * 1024;
+ pte_addr = pdpe_addr;
+ pte = pdpe;
+ goto do_check_protect;
+ }
+ } else
+#endif
+ {
+ /* XXX: load them when cr3 is loaded ? */
+ pdpe_addr = ((cr3 & ~0x1f) + ((addr >> 27) & 0x18)) &
+ a20_mask;
+ pdpe_addr = GET_HPHYS(cs, pdpe_addr, MMU_DATA_STORE, NULL);
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ rsvd_mask |= PG_HI_USER_MASK;
+ if (pdpe & (rsvd_mask | PG_NX_MASK)) {
+ goto do_fault_rsvd;
+ }
+ ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
+ }
+
+ pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
+ a20_mask;
+ pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL);
+ pde = x86_ldq_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pde & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep &= pde ^ PG_NX_MASK;
+ if (pde & PG_PSE_MASK) {
+ /* 2 MB page */
+ *page_size = 2048 * 1024;
+ pte_addr = pde_addr;
+ pte = pde;
+ goto do_check_protect;
+ }
+ /* 4 KB page */
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pde_addr, pde);
+ }
+ pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
+ a20_mask;
+ pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL);
+ pte = x86_ldq_phys(cs, pte_addr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ /* combine pde and pte nx, user and rw protections */
+ ptep &= pte ^ PG_NX_MASK;
+ *page_size = 4096;
+ } else {
+ uint32_t pde;
+
+ /* page directory entry */
+ pde_addr = ((cr3 & ~0xfff) + ((addr >> 20) & 0xffc)) &
+ a20_mask;
+ pde_addr = GET_HPHYS(cs, pde_addr, MMU_DATA_STORE, NULL);
+ pde = x86_ldl_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ ptep = pde | PG_NX_MASK;
+
+ /* if PSE bit is set, then we use a 4MB page */
+ if ((pde & PG_PSE_MASK) && (pg_mode & PG_MODE_PSE)) {
+ *page_size = 4096 * 1024;
+ pte_addr = pde_addr;
+
+ /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
+ * Leave bits 20-13 in place for setting accessed/dirty bits below.
+ */
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
+ rsvd_mask = 0x200000;
+ goto do_check_protect_pse36;
+ }
+
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pde_addr, pde);
+ }
+
+ /* page directory entry */
+ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
+ a20_mask;
+ pte_addr = GET_HPHYS(cs, pte_addr, MMU_DATA_STORE, NULL);
+ pte = x86_ldl_phys(cs, pte_addr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ /* combine pde and pte user and rw protections */
+ ptep &= pte | PG_NX_MASK;
+ *page_size = 4096;
+ rsvd_mask = 0;
+ }
+
+do_check_protect:
+ rsvd_mask |= (*page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
+do_check_protect_pse36:
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep ^= PG_NX_MASK;
+
+ /* can the page can be put in the TLB? prot will tell us */
+ if (is_user && !(ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+
+ *prot = 0;
+ if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
+ *prot |= PAGE_READ;
+ if ((ptep & PG_RW_MASK) || !(is_user || (pg_mode & PG_MODE_WP))) {
+ *prot |= PAGE_WRITE;
+ }
+ }
+ if (!(ptep & PG_NX_MASK) &&
+ (mmu_idx == MMU_USER_IDX ||
+ !((pg_mode & PG_MODE_SMEP) && (ptep & PG_USER_MASK)))) {
+ *prot |= PAGE_EXEC;
+ }
+
+ if (!(pg_mode & PG_MODE_LMA)) {
+ pkr = 0;
+ } else if (ptep & PG_USER_MASK) {
+ pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0;
+ } else {
+ pkr = pg_mode & PG_MODE_PKS ? env->pkrs : 0;
+ }
+ if (pkr) {
+ uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
+ uint32_t pkr_ad = (pkr >> pk * 2) & 1;
+ uint32_t pkr_wd = (pkr >> pk * 2) & 2;
+ uint32_t pkr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ if (pkr_ad) {
+ pkr_prot &= ~(PAGE_READ | PAGE_WRITE);
+ } else if (pkr_wd && (is_user || (pg_mode & PG_MODE_WP))) {
+ pkr_prot &= ~PAGE_WRITE;
+ }
+
+ *prot &= pkr_prot;
+ if ((pkr_prot & (1 << is_write1)) == 0) {
+ assert(is_write1 != 2);
+ error_code |= PG_ERROR_PK_MASK;
+ goto do_fault_protect;
+ }
+ }
+
+ if ((*prot & (1 << is_write1)) == 0) {
+ goto do_fault_protect;
+ }
+
+ /* yes, it can! */
+ is_dirty = is_write && !(pte & PG_DIRTY_MASK);
+ if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
+ pte |= PG_ACCESSED_MASK;
+ if (is_dirty) {
+ pte |= PG_DIRTY_MASK;
+ }
+ x86_stl_phys_notdirty(cs, pte_addr, pte);
+ }
+
+ if (!(pte & PG_DIRTY_MASK)) {
+ /* only set write access if already dirty... otherwise wait
+ for dirty access */
+ assert(!is_write);
+ *prot &= ~PAGE_WRITE;
+ }
+
+ pte = pte & a20_mask;
+
+ /* align to page_size */
+ pte &= PG_ADDRESS_MASK & ~(*page_size - 1);
+ page_offset = addr & (*page_size - 1);
+ *xlat = GET_HPHYS(cs, pte + page_offset, is_write1, prot);
+ return PG_ERROR_OK;
+
+ do_fault_rsvd:
+ error_code |= PG_ERROR_RSVD_MASK;
+ do_fault_protect:
+ error_code |= PG_ERROR_P_MASK;
+ do_fault:
+ error_code |= (is_write << PG_ERROR_W_BIT);
+ if (is_user)
+ error_code |= PG_ERROR_U_MASK;
+ if (is_write1 == 2 &&
+ (((pg_mode & PG_MODE_NXE) && (pg_mode & PG_MODE_PAE)) ||
+ (pg_mode & PG_MODE_SMEP)))
+ error_code |= PG_ERROR_I_D_MASK;
+ return error_code;
+}
+
+hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
+ int *prot)
+{
+ CPUX86State *env = &X86_CPU(cs)->env;
+ uint64_t exit_info_1;
+ int page_size;
+ int next_prot;
+ hwaddr hphys;
+
+ if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
+ return gphys;
+ }
+
+ exit_info_1 = mmu_translate(cs, gphys, NULL, env->nested_cr3,
+ access_type, MMU_USER_IDX, env->nested_pg_mode,
+ &hphys, &page_size, &next_prot);
+ if (exit_info_1 == PG_ERROR_OK) {
+ if (prot) {
+ *prot &= next_prot;
+ }
+ return hphys;
+ }
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ gphys);
+ if (prot) {
+ exit_info_1 |= SVM_NPTEXIT_GPA;
+ } else { /* page table access */
+ exit_info_1 |= SVM_NPTEXIT_GPT;
+ }
+ cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
+}
+
+/* return value:
+ * -1 = cannot handle fault
+ * 0 = nothing more to do
+ * 1 = generate PF fault
+ */
+static int handle_mmu_fault(CPUState *cs, vaddr addr, int size,
+ int is_write1, int mmu_idx)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int error_code = PG_ERROR_OK;
+ int pg_mode, prot, page_size;
+ hwaddr paddr;
+ hwaddr vaddr;
+
+#if defined(DEBUG_MMU)
+ printf("MMU fault: addr=%" VADDR_PRIx " w=%d mmu=%d eip=" TARGET_FMT_lx "\n",
+ addr, is_write1, mmu_idx, env->eip);
+#endif
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ paddr = addr;
+#ifdef TARGET_X86_64
+ if (!(env->hflags & HF_LMA_MASK)) {
+ /* Without long mode we can only address 32bits in real mode */
+ paddr = (uint32_t)paddr;
+ }
+#endif
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ page_size = 4096;
+ } else {
+ pg_mode = get_pg_mode(env);
+ if (pg_mode & PG_MODE_LMA) {
+ int32_t sext;
+
+ /* test virtual address sign extension */
+ sext = (int64_t)addr >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
+ if (sext != 0 && sext != -1) {
+ env->error_code = 0;
+ cs->exception_index = EXCP0D_GPF;
+ return 1;
+ }
+ }
+
+ error_code = mmu_translate(cs, addr, get_hphys, env->cr[3], is_write1,
+ mmu_idx, pg_mode,
+ &paddr, &page_size, &prot);
+ }
+
+ if (error_code == PG_ERROR_OK) {
+ /* Even if 4MB pages, we map only one 4KB page in the cache to
+ avoid filling it too fast */
+ vaddr = addr & TARGET_PAGE_MASK;
+ paddr &= TARGET_PAGE_MASK;
+
+ assert(prot & (1 << is_write1));
+ tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
+ prot, mmu_idx, page_size);
+ return 0;
+ } else {
+ if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
+ /* cr2 is not modified in case of exceptions */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ addr);
+ } else {
+ env->cr[2] = addr;
+ }
+ env->error_code = error_code;
+ cs->exception_index = EXCP0E_PAGE;
+ return 1;
+ }
+}
+
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->retaddr = retaddr;
+ if (handle_mmu_fault(cs, addr, size, access_type, mmu_idx)) {
+ /* FIXME: On error in get_hphys we have already jumped out. */
+ g_assert(!probe);
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, retaddr);
+ }
+ return true;
+}
diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c
new file mode 100644
index 000000000..1c3610da3
--- /dev/null
+++ b/target/i386/tcg/sysemu/fpu_helper.c
@@ -0,0 +1,57 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers (sysemu code)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/irq.h"
+
+static qemu_irq ferr_irq;
+
+void x86_register_ferr_irq(qemu_irq irq)
+{
+ ferr_irq = irq;
+}
+
+void fpu_check_raise_ferr_irq(CPUX86State *env)
+{
+ if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) {
+ qemu_irq_raise(ferr_irq);
+ return;
+ }
+}
+
+void cpu_clear_ignne(void)
+{
+ CPUX86State *env = &X86_CPU(first_cpu)->env;
+ env->hflags2 &= ~HF2_IGNNE_MASK;
+}
+
+void cpu_set_ignne(void)
+{
+ CPUX86State *env = &X86_CPU(first_cpu)->env;
+ env->hflags2 |= HF2_IGNNE_MASK;
+ /*
+ * We get here in response to a write to port F0h. The chipset should
+ * deassert FP_IRQ and FERR# instead should stay signaled until FPSW_SE is
+ * cleared, because FERR# and FP_IRQ are two separate pins on real
+ * hardware. However, we don't model FERR# as a qemu_irq, so we just
+ * do directly what the chipset would do, i.e. deassert FP_IRQ.
+ */
+ qemu_irq_lower(ferr_irq);
+}
diff --git a/target/i386/tcg/sysemu/meson.build b/target/i386/tcg/sysemu/meson.build
new file mode 100644
index 000000000..2e444e766
--- /dev/null
+++ b/target/i386/tcg/sysemu/meson.build
@@ -0,0 +1,10 @@
+i386_softmmu_ss.add(when: ['CONFIG_TCG', 'CONFIG_SOFTMMU'], if_true: files(
+ 'tcg-cpu.c',
+ 'smm_helper.c',
+ 'excp_helper.c',
+ 'bpt_helper.c',
+ 'misc_helper.c',
+ 'fpu_helper.c',
+ 'svm_helper.c',
+ 'seg_helper.c',
+))
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
new file mode 100644
index 000000000..9ccaa054c
--- /dev/null
+++ b/target/i386/tcg/sysemu/misc_helper.c
@@ -0,0 +1,516 @@
+/*
+ * x86 misc helpers - sysemu code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/address-spaces.h"
+#include "tcg/helper-tcg.h"
+
+void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
+{
+ address_space_stb(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_inb(CPUX86State *env, uint32_t port)
+{
+ return address_space_ldub(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
+{
+ address_space_stw(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_inw(CPUX86State *env, uint32_t port)
+{
+ return address_space_lduw(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
+{
+ address_space_stl(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_inl(CPUX86State *env, uint32_t port)
+{
+ return address_space_ldl(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+}
+
+target_ulong helper_read_crN(CPUX86State *env, int reg)
+{
+ target_ulong val;
+
+ switch (reg) {
+ default:
+ val = env->cr[reg];
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
+ } else {
+ val = env->int_ctl & V_TPR_MASK;
+ }
+ break;
+ }
+ return val;
+}
+
+void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
+{
+ switch (reg) {
+ case 0:
+ /*
+ * If we reach this point, the CR0 write intercept is disabled.
+ * But we could still exit if the hypervisor has requested the selective
+ * intercept for bits other than TS and MP
+ */
+ if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) &&
+ ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) {
+ cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC());
+ }
+ cpu_x86_update_cr0(env, t0);
+ break;
+ case 3:
+ if ((env->efer & MSR_EFER_LMA) &&
+ (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (!(env->efer & MSR_EFER_LMA)) {
+ t0 &= 0xffffffffUL;
+ }
+ cpu_x86_update_cr3(env, t0);
+ break;
+ case 4:
+ if (t0 & cr4_reserved_bits(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
+ (env->hflags & HF_CS64_MASK)) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_x86_update_cr4(env, t0);
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ qemu_mutex_lock_iothread();
+ cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0);
+ qemu_mutex_unlock_iothread();
+ }
+ env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK);
+
+ CPUState *cs = env_cpu(env);
+ if (ctl_has_irq(env)) {
+ cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
+ }
+ break;
+ default:
+ env->cr[reg] = t0;
+ break;
+ }
+}
+
+void helper_wrmsr(CPUX86State *env)
+{
+ uint64_t val;
+ CPUState *cs = env_cpu(env);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC());
+
+ val = ((uint32_t)env->regs[R_EAX]) |
+ ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = val & 0xffff;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = val;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = val;
+ break;
+ case MSR_IA32_APICBASE:
+ cpu_set_apic_base(env_archcpu(env)->apic_state, val);
+ break;
+ case MSR_EFER:
+ {
+ uint64_t update_mask;
+
+ update_mask = 0;
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
+ update_mask |= MSR_EFER_SCE;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ update_mask |= MSR_EFER_LME;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
+ update_mask |= MSR_EFER_NXE;
+ }
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ update_mask |= MSR_EFER_SVME;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ cpu_load_efer(env, (env->efer & ~update_mask) |
+ (val & update_mask));
+ }
+ break;
+ case MSR_STAR:
+ env->star = val;
+ break;
+ case MSR_PAT:
+ env->pat = val;
+ break;
+ case MSR_IA32_PKRS:
+ if (val & 0xFFFFFFFF00000000ull) {
+ goto error;
+ }
+ env->pkrs = val;
+ tlb_flush(cs);
+ break;
+ case MSR_VM_HSAVE_PA:
+ env->vm_hsave = val;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ env->lstar = val;
+ break;
+ case MSR_CSTAR:
+ env->cstar = val;
+ break;
+ case MSR_FMASK:
+ env->fmask = val;
+ break;
+ case MSR_FSBASE:
+ env->segs[R_FS].base = val;
+ break;
+ case MSR_GSBASE:
+ env->segs[R_GS].base = val;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = val;
+ break;
+#endif
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysBase(0)) / 2].base = val;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysMask(0)) / 2].mask = val;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix64K_00000] = val;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix16K_80000 + 1] = val;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix4K_C0000 + 3] = val;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = val;
+ break;
+ case MSR_MCG_STATUS:
+ env->mcg_status = val;
+ break;
+ case MSR_MCG_CTL:
+ if ((env->mcg_cap & MCG_CTL_P)
+ && (val == 0 || val == ~(uint64_t)0)) {
+ env->mcg_ctl = val;
+ }
+ break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = val;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = val;
+ break;
+ case MSR_IA32_BNDCFGS:
+ /* FIXME: #GP if reserved bits are set. */
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->msr_bndcfgs = val;
+ cpu_sync_bndcs_hflags(env);
+ break;
+ default:
+ if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+ && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+ (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+ if ((offset & 0x3) != 0
+ || (val == 0 || val == ~(uint64_t)0)) {
+ env->mce_banks[offset] = val;
+ }
+ break;
+ }
+ /* XXX: exception? */
+ break;
+ }
+ return;
+error:
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+}
+
+void helper_rdmsr(CPUX86State *env)
+{
+ X86CPU *x86_cpu = env_archcpu(env);
+ uint64_t val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC());
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case MSR_IA32_SYSENTER_CS:
+ val = env->sysenter_cs;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ val = env->sysenter_esp;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ val = env->sysenter_eip;
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(env_archcpu(env)->apic_state);
+ break;
+ case MSR_EFER:
+ val = env->efer;
+ break;
+ case MSR_STAR:
+ val = env->star;
+ break;
+ case MSR_PAT:
+ val = env->pat;
+ break;
+ case MSR_IA32_PKRS:
+ val = env->pkrs;
+ break;
+ case MSR_VM_HSAVE_PA:
+ val = env->vm_hsave;
+ break;
+ case MSR_IA32_PERF_STATUS:
+ /* tsc_increment_by_tick */
+ val = 1000ULL;
+ /* CPU multiplier */
+ val |= (((uint64_t)4ULL) << 40);
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ val = env->lstar;
+ break;
+ case MSR_CSTAR:
+ val = env->cstar;
+ break;
+ case MSR_FMASK:
+ val = env->fmask;
+ break;
+ case MSR_FSBASE:
+ val = env->segs[R_FS].base;
+ break;
+ case MSR_GSBASE:
+ val = env->segs[R_GS].base;
+ break;
+ case MSR_KERNELGSBASE:
+ val = env->kernelgsbase;
+ break;
+ case MSR_TSC_AUX:
+ val = env->tsc_aux;
+ break;
+#endif
+ case MSR_SMI_COUNT:
+ val = env->msr_smi_count;
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ case MSR_MTRRcap:
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
+ MSR_MTRRcap_WC_SUPPORTED;
+ } else {
+ /* XXX: exception? */
+ val = 0;
+ }
+ break;
+ case MSR_MCG_CAP:
+ val = env->mcg_cap;
+ break;
+ case MSR_MCG_CTL:
+ if (env->mcg_cap & MCG_CTL_P) {
+ val = env->mcg_ctl;
+ } else {
+ val = 0;
+ }
+ break;
+ case MSR_MCG_STATUS:
+ val = env->mcg_status;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ case MSR_IA32_BNDCFGS:
+ val = env->msr_bndcfgs;
+ break;
+ case MSR_IA32_UCODE_REV:
+ val = x86_cpu->ucode_rev;
+ break;
+ default:
+ if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+ && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+ (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+ val = env->mce_banks[offset];
+ break;
+ }
+ /* XXX: exception? */
+ val = 0;
+ break;
+ }
+ env->regs[R_EAX] = (uint32_t)(val);
+ env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+
+void helper_flush_page(CPUX86State *env, target_ulong addr)
+{
+ tlb_flush_page(env_cpu(env), addr);
+}
+
+static void QEMU_NORETURN do_hlt(CPUX86State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC());
+ env->eip += next_eip_addend;
+
+ do_hlt(env);
+}
+
+void helper_monitor(CPUX86State *env, target_ulong ptr)
+{
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ /* XXX: store address? */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC());
+}
+
+void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC());
+ env->eip += next_eip_addend;
+
+ /* XXX: not complete but not completely erroneous */
+ if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
+ do_pause(env);
+ } else {
+ do_hlt(env);
+ }
+}
diff --git a/target/i386/tcg/sysemu/seg_helper.c b/target/i386/tcg/sysemu/seg_helper.c
new file mode 100644
index 000000000..bf3444c26
--- /dev/null
+++ b/target/i386/tcg/sysemu/seg_helper.c
@@ -0,0 +1,216 @@
+/*
+ * x86 segmentation related helpers: (sysemu-only code)
+ * TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "tcg/helper-tcg.h"
+#include "../seg_helper.h"
+
+#ifdef TARGET_X86_64
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+ int selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ selector = (env->star >> 32) & 0xffff;
+ if (env->hflags & HF_LMA_MASK) {
+ int code64;
+
+ env->regs[R_ECX] = env->eip + next_eip_addend;
+ env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
+
+ code64 = env->hflags & HF_CS64_MASK;
+
+ env->eflags &= ~(env->fmask | RF_MASK);
+ cpu_load_eflags(env, env->eflags, 0);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ if (code64) {
+ env->eip = env->lstar;
+ } else {
+ env->eip = env->cstar;
+ }
+ } else {
+ env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
+
+ env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->star;
+ }
+}
+#endif /* TARGET_X86_64 */
+
+void handle_even_inj(CPUX86State *env, int intno, int is_int,
+ int error_code, int is_hw, int rm)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+
+ if (!(event_inj & SVM_EVTINJ_VALID)) {
+ int type;
+
+ if (is_int) {
+ type = SVM_EVTINJ_TYPE_SOFT;
+ } else {
+ type = SVM_EVTINJ_TYPE_EXEPT;
+ }
+ event_inj = intno | type | SVM_EVTINJ_VALID;
+ if (!rm && exception_has_error_code(intno)) {
+ event_inj |= SVM_EVTINJ_VALID_ERR;
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err),
+ error_code);
+ }
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj);
+ }
+}
+
+void x86_cpu_do_interrupt(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cs->exception_index == EXCP_VMEXIT) {
+ assert(env->old_exception == -1);
+ do_vmexit(env);
+ } else {
+ do_interrupt_all(cpu, cs->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip, 0);
+ /* successfully delivered */
+ env->old_exception = -1;
+ }
+}
+
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int intno;
+
+ interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
+ if (!interrupt_request) {
+ return false;
+ }
+
+ /* Don't process multiple interrupt requests in a single call.
+ * This is required to make icount-driven execution deterministic.
+ */
+ switch (interrupt_request) {
+ case CPU_INTERRUPT_POLL:
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ break;
+ case CPU_INTERRUPT_SIPI:
+ do_cpu_sipi(cpu);
+ break;
+ case CPU_INTERRUPT_SMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ break;
+ case CPU_INTERRUPT_NMI:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ break;
+ case CPU_INTERRUPT_MCE:
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ break;
+ case CPU_INTERRUPT_HARD:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ break;
+ case CPU_INTERRUPT_VIRQ:
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
+ intno = x86_ldl_phys(cs, env->vm_vmcb
+ + offsetof(struct vmcb, control.int_vector));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->int_ctl &= ~V_IRQ_MASK;
+ break;
+ }
+
+ /* Ensure that no TB jump will be modified as the program flow was changed. */
+ return true;
+}
+
+/* check if Port I/O is allowed in TSS */
+void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
+{
+ uintptr_t retaddr = GETPC();
+ uint32_t io_offset, val, mask;
+
+ /* TSS must be a valid 32 bit one */
+ if (!(env->tr.flags & DESC_P_MASK) ||
+ ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
+ env->tr.limit < 103) {
+ goto fail;
+ }
+ io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
+ io_offset += (addr >> 3);
+ /* Note: the check needs two bytes */
+ if ((io_offset + 1) > env->tr.limit) {
+ goto fail;
+ }
+ val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
+ val >>= (addr & 7);
+ mask = (1 << size) - 1;
+ /* all bits must be zero to allow the I/O */
+ if ((val & mask) != 0) {
+ fail:
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+}
diff --git a/target/i386/tcg/sysemu/smm_helper.c b/target/i386/tcg/sysemu/smm_helper.c
new file mode 100644
index 000000000..a45b5651c
--- /dev/null
+++ b/target/i386/tcg/sysemu/smm_helper.c
@@ -0,0 +1,319 @@
+/*
+ * x86 SMM helpers (sysemu-only)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+#include "tcg/helper-tcg.h"
+
+
+/* SMM support */
+
+#ifdef TARGET_X86_64
+#define SMM_REVISION_ID 0x00020064
+#else
+#define SMM_REVISION_ID 0x00020000
+#endif
+
+void do_smm_enter(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ target_ulong sm_state;
+ SegmentCache *dt;
+ int i, offset;
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
+ log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+
+ env->msr_smi_count++;
+ env->hflags |= HF_SMM_MASK;
+ if (env->hflags2 & HF2_NMI_MASK) {
+ env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+ } else {
+ env->hflags2 |= HF2_NMI_MASK;
+ }
+
+ sm_state = env->smbase + 0x8000;
+
+#ifdef TARGET_X86_64
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ offset = 0x7e00 + i * 16;
+ x86_stw_phys(cs, sm_state + offset, dt->selector);
+ x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+ x86_stq_phys(cs, sm_state + offset + 8, dt->base);
+ }
+
+ x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
+ x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
+
+ x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
+ x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
+ x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
+ x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+
+ x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
+ x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
+
+ x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
+ x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
+ x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
+ x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+
+ /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
+ is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
+ 7EA0-7ED7 as "reserved". What's this, and what's really
+ supposed to happen? */
+ x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
+
+ x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
+ x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
+ x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
+ x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
+ x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
+ x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
+ x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
+ x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
+ for (i = 8; i < 16; i++) {
+ x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
+ }
+ x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
+ x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
+ x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
+ x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
+
+ x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
+ x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
+ x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
+
+ x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+ x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
+#else
+ x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
+ x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
+ x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
+ x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
+ x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
+ x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
+ x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
+ x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
+ x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
+ x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
+ x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
+ x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
+ x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
+ x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
+
+ x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
+ x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
+ x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
+ x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+ x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
+ x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
+ x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
+ x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+ x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
+ x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
+
+ x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
+ x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
+
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
+ x86_stl_phys(cs, sm_state + offset + 8, dt->base);
+ x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+ x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+ }
+ x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
+
+ x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+ x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
+#endif
+ /* init SMM cpu state */
+
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, 0);
+#endif
+ cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
+ DF_MASK));
+ env->eip = 0x00008000;
+ cpu_x86_update_cr0(env,
+ env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
+ CR0_PG_MASK));
+ cpu_x86_update_cr4(env, 0);
+ env->dr[7] = 0x00000400;
+
+ cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
+ 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+}
+
+void helper_rsm(CPUX86State *env)
+{
+ X86CPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ target_ulong sm_state;
+ int i, offset;
+ uint32_t val;
+
+ sm_state = env->smbase + 0x8000;
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
+
+ env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
+ env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
+
+ env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
+ env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
+ env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
+ env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
+
+ env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
+ env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
+
+ env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
+ env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
+ env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
+ env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
+
+ env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
+ env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
+ env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
+ env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
+ env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
+ env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
+ env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
+ env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
+ for (i = 8; i < 16; i++) {
+ env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
+ }
+ env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
+ cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
+ env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
+
+ cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
+ cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
+
+ for (i = 0; i < 6; i++) {
+ offset = 0x7e00 + i * 16;
+ cpu_x86_load_seg_cache(env, i,
+ x86_lduw_phys(cs, sm_state + offset),
+ x86_ldq_phys(cs, sm_state + offset + 8),
+ x86_ldl_phys(cs, sm_state + offset + 4),
+ (x86_lduw_phys(cs, sm_state + offset + 2) &
+ 0xf0ff) << 8);
+ }
+
+ val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
+ }
+#else
+ cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
+ cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
+ cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
+ env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
+ env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
+ env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
+ env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
+ env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
+ env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
+ env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
+ env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
+ env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
+ env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
+
+ env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
+ env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
+ env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
+ env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+ env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
+ env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
+ env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
+ env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
+
+ env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
+ env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
+
+ env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
+ env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
+
+ for (i = 0; i < 6; i++) {
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ cpu_x86_load_seg_cache(env, i,
+ x86_ldl_phys(cs,
+ sm_state + 0x7fa8 + i * 4) & 0xffff,
+ x86_ldl_phys(cs, sm_state + offset + 8),
+ x86_ldl_phys(cs, sm_state + offset + 4),
+ (x86_ldl_phys(cs,
+ sm_state + offset) & 0xf0ff) << 8);
+ }
+ cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
+
+ val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
+ }
+#endif
+ if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+ env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+ env->hflags &= ~HF_SMM_MASK;
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
+ log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+}
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
new file mode 100644
index 000000000..6d39611eb
--- /dev/null
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -0,0 +1,863 @@
+/*
+ * x86 SVM helpers (sysemu only)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "tcg/helper-tcg.h"
+
+/* Secure Virtual Machine helpers */
+
+static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
+ const SegmentCache *sc)
+{
+ CPUState *cs = env_cpu(env);
+
+ x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
+ sc->selector);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
+ sc->base);
+ x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
+ sc->limit);
+ x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
+ ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
+}
+
+/*
+ * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
+ * addresses in the segment registers that have been loaded.
+ */
+static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
+{
+ uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
+ *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
+}
+
+static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
+ SegmentCache *sc)
+{
+ CPUState *cs = env_cpu(env);
+ unsigned int flags;
+
+ sc->selector = x86_lduw_phys(cs,
+ addr + offsetof(struct vmcb_seg, selector));
+ sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
+ sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
+ flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
+ sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+ svm_canonicalization(env, &sc->base);
+}
+
+static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
+ int seg_reg)
+{
+ SegmentCache sc1, *sc = &sc1;
+
+ svm_load_seg(env, addr, sc);
+ cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
+ sc->base, sc->limit, sc->flags);
+}
+
+static inline bool is_efer_invalid_state (CPUX86State *env)
+{
+ if (!(env->efer & MSR_EFER_SVME)) {
+ return true;
+ }
+
+ if (env->efer & MSR_EFER_RESERVED) {
+ return true;
+ }
+
+ if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
+ !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[4] & CR4_PAE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[0] & CR0_PE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && (env->cr[4] & CR4_PAE_MASK)
+ && (env->segs[R_CS].flags & DESC_L_MASK)
+ && (env->segs[R_CS].flags & DESC_B_MASK)) {
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool virtual_gif_enabled(CPUX86State *env)
+{
+ if (likely(env->hflags & HF_GUEST_MASK)) {
+ return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
+ && (env->int_ctl & V_GIF_ENABLED_MASK);
+ }
+ return false;
+}
+
+static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
+{
+ uint64_t lbr_ctl;
+
+ if (likely(env->hflags & HF_GUEST_MASK)) {
+ if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
+ cpu_vmexit(env, exit_code, 0, retaddr);
+ }
+
+ lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
+ control.lbr_ctl));
+ return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
+ && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
+
+ }
+
+ return false;
+}
+
+static inline bool virtual_gif_set(CPUX86State *env)
+{
+ return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
+}
+
+void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+ X86CPU *cpu = env_archcpu(env);
+ target_ulong addr;
+ uint64_t nested_ctl;
+ uint32_t event_inj;
+ uint32_t asid;
+ uint64_t new_cr0;
+ uint64_t new_cr3;
+ uint64_t new_cr4;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
+
+ env->vm_vmcb = addr;
+
+ /* save the current CPU state in the hsave page */
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
+
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
+ env->eip + next_eip_addend);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+
+ /* load the interception bitmaps so we do not need to access the
+ vmcb in svm mode */
+ env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.intercept));
+ env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_read));
+ env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_write));
+ env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_read));
+ env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_write));
+ env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_exceptions
+ ));
+
+ nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.nested_ctl));
+ asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.asid));
+
+ uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.msrpm_base_pa));
+ uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.iopm_base_pa));
+
+ if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ env->nested_pg_mode = 0;
+
+ if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (asid == 0) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ if (nested_ctl & SVM_NPT_ENABLED) {
+ env->nested_cr3 = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ control.nested_cr3));
+ env->hflags2 |= HF2_NPT_MASK;
+
+ env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
+ }
+
+ /* enable intercepts */
+ env->hflags |= HF_GUEST_MASK;
+
+ env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.tsc_offset));
+
+ new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
+ if (new_cr0 & SVM_CR0_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
+ if ((env->efer & MSR_EFER_LMA) &&
+ (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
+ if (new_cr4 & cr4_reserved_bits(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ /* clear exit_info_2 so we behave like the real hardware */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
+
+ cpu_x86_update_cr0(env, new_cr0);
+ cpu_x86_update_cr4(env, new_cr4);
+ cpu_x86_update_cr3(env, new_cr3);
+ env->cr[2] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2));
+ env->int_ctl = x86_ldl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ if (env->int_ctl & V_INTR_MASKING_MASK) {
+ env->hflags2 |= HF2_VINTR_MASK;
+ if (env->eflags & IF_MASK) {
+ env->hflags2 |= HF2_HIF_MASK;
+ }
+ }
+
+ cpu_load_efer(env,
+ x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
+ R_ES);
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ R_CS);
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ R_SS);
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ R_DS);
+ svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
+ &env->idt);
+ svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
+ &env->gdt);
+
+ env->eip = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rip));
+
+ env->regs[R_ESP] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax));
+ env->dr[7] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7));
+ env->dr[6] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6));
+
+#ifdef TARGET_X86_64
+ if (env->dr[6] & DR_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if (env->dr[7] & DR_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+#endif
+
+ if (is_efer_invalid_state(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
+ switch (x86_ldub_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
+ case TLB_CONTROL_DO_NOTHING:
+ break;
+ case TLB_CONTROL_FLUSH_ALL_ASID:
+ /* FIXME: this is not 100% correct but should work for now */
+ tlb_flush(cs);
+ break;
+ }
+
+ env->hflags2 |= HF2_GIF_MASK;
+
+ if (ctl_has_irq(env)) {
+ CPUState *cs = env_cpu(env);
+
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ }
+
+ if (virtual_gif_set(env)) {
+ env->hflags2 |= HF2_VGIF_MASK;
+ }
+
+ /* maybe we need to inject an event */
+ event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+ if (event_inj & SVM_EVTINJ_VALID) {
+ uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
+ uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
+ uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj_err));
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
+ /* FIXME: need to implement valid_err */
+ switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
+ case SVM_EVTINJ_TYPE_INTR:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
+ /* XXX: is it always correct? */
+ do_interrupt_x86_hardirq(env, vector, 1);
+ break;
+ case SVM_EVTINJ_TYPE_NMI:
+ cs->exception_index = EXCP02_NMI;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = env->eip;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
+ cpu_loop_exit(cs);
+ break;
+ case SVM_EVTINJ_TYPE_EXEPT:
+ if (vector == EXCP02_NMI || vector >= 31) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
+ cpu_loop_exit(cs);
+ break;
+ case SVM_EVTINJ_TYPE_SOFT:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 1;
+ env->exception_next_eip = env->eip;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
+ cpu_loop_exit(cs);
+ break;
+ default:
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ break;
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
+ env->error_code);
+ }
+}
+
+void helper_vmmcall(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
+ raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_vmload(CPUX86State *env, int aflag)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong addr;
+ int prot;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
+ addr = get_hphys(cs, addr, MMU_DATA_LOAD, &prot);
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
+ "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+ addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.fs.base)),
+ env->segs[R_FS].base);
+
+ svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
+ svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
+ svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
+ svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
+
+#ifdef TARGET_X86_64
+ env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.kernel_gs_base));
+ env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
+ env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
+ env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
+ svm_canonicalization(env, &env->kernelgsbase);
+#endif
+ env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
+ env->sysenter_cs = x86_ldq_phys(cs,
+ addr + offsetof(struct vmcb, save.sysenter_cs));
+ env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.sysenter_esp));
+ env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.sysenter_eip));
+
+}
+
+void helper_vmsave(CPUX86State *env, int aflag)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong addr;
+ int prot;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
+ addr = get_hphys(cs, addr, MMU_DATA_STORE, &prot);
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
+ "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+ addr, x86_ldq_phys(cs,
+ addr + offsetof(struct vmcb, save.fs.base)),
+ env->segs[R_FS].base);
+
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
+ &env->segs[R_FS]);
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
+ &env->segs[R_GS]);
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
+ &env->tr);
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
+ &env->ldt);
+
+#ifdef TARGET_X86_64
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
+ env->kernelgsbase);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
+#endif
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
+ x86_stq_phys(cs,
+ addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
+ env->sysenter_esp);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
+ env->sysenter_eip);
+}
+
+void helper_stgi(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
+
+ if (virtual_gif_enabled(env)) {
+ env->int_ctl |= V_GIF_MASK;
+ env->hflags2 |= HF2_VGIF_MASK;
+ } else {
+ env->hflags2 |= HF2_GIF_MASK;
+ }
+}
+
+void helper_clgi(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
+
+ if (virtual_gif_enabled(env)) {
+ env->int_ctl &= ~V_GIF_MASK;
+ env->hflags2 &= ~HF2_VGIF_MASK;
+ } else {
+ env->hflags2 &= ~HF2_GIF_MASK;
+ }
+}
+
+bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
+{
+ switch (type) {
+ case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
+ if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
+ if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
+ if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
+ if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
+ return true;
+ }
+ break;
+ case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
+ if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
+ return true;
+ }
+ break;
+ default:
+ if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
+ return true;
+ }
+ break;
+ }
+ return false;
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param, uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (likely(!(env->hflags & HF_GUEST_MASK))) {
+ return;
+ }
+
+ if (!cpu_svm_has_intercept(env, type)) {
+ return;
+ }
+
+ if (type == SVM_EXIT_MSR) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.msrpm_base_pa));
+ uint32_t t0, t1;
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case 0 ... 0x1fff:
+ t0 = (env->regs[R_ECX] * 2) % 8;
+ t1 = (env->regs[R_ECX] * 2) / 8;
+ break;
+ case 0xc0000000 ... 0xc0001fff:
+ t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ case 0xc0010000 ... 0xc0011fff:
+ t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ default:
+ cpu_vmexit(env, type, param, retaddr);
+ t0 = 0;
+ t1 = 0;
+ break;
+ }
+ if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
+ cpu_vmexit(env, type, param, retaddr);
+ }
+ return;
+ }
+
+ cpu_vmexit(env, type, param, retaddr);
+}
+
+void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
+{
+ cpu_svm_check_intercept_param(env, type, 0, GETPC());
+}
+
+void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
+ uint32_t next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.iopm_base_pa));
+ uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
+
+ if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
+ /* next env->eip */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ env->eip + next_eip_addend);
+ cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
+ }
+ }
+}
+
+void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
+ uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cpu_restore_state(cs, retaddr, true);
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
+ PRIx64 ", " TARGET_FMT_lx ")!\n",
+ exit_code, exit_info_1,
+ x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.exit_info_2)),
+ env->eip);
+
+ cs->exception_index = EXCP_VMEXIT;
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+ exit_code);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.exit_info_1), exit_info_1),
+
+ /* remove any pending exception */
+ env->old_exception = -1;
+ cpu_loop_exit(cs);
+}
+
+void do_vmexit(CPUX86State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (env->hflags & HF_INHIBIT_IRQ_MASK) {
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+ SVM_INTERRUPT_SHADOW_MASK);
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ } else {
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+ }
+ env->hflags2 &= ~HF2_NPT_MASK;
+
+ /* Save the VM state in the vmcb */
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
+ env->eip);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+ env->hflags & HF_CPL_MASK);
+
+ /* Reload the host state from vm_hsave */
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ env->hflags &= ~HF_GUEST_MASK;
+ env->intercept = 0;
+ env->intercept_exceptions = 0;
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->int_ctl = 0;
+ env->tsc_offset = 0;
+
+ env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.base));
+ env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.limit));
+
+ env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.base));
+ env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.limit));
+
+ cpu_x86_update_cr0(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr0)) |
+ CR0_PE_MASK);
+ cpu_x86_update_cr4(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr4)));
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr3)));
+ /* we need to set the efer after the crs so the hidden flags get
+ set properly */
+ cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
+ VM_MASK));
+
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
+ R_ES);
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
+ R_CS);
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
+ R_SS);
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
+ R_DS);
+
+ env->eip = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rip));
+ env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
+ offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
+ offsetof(struct vmcb, save.rax));
+
+ env->dr[6] = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6));
+ env->dr[7] = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7));
+
+ /* other setups */
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+ x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj)));
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+ x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err)));
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+
+ env->hflags2 &= ~HF2_GIF_MASK;
+ env->hflags2 &= ~HF2_VGIF_MASK;
+ /* FIXME: Resets the current ASID register to zero (host ASID). */
+
+ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
+
+ /* Clears the TSC_OFFSET inside the processor. */
+
+ /* If the host is in PAE mode, the processor reloads the host's PDPEs
+ from the page table indicated the host's CR3. If the PDPEs contain
+ illegal state, the processor causes a shutdown. */
+
+ /* Disables all breakpoints in the host DR7 register. */
+
+ /* Checks the reloaded host state for consistency. */
+
+ /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
+ host's code segment or non-canonical (in the case of long mode), a
+ #GP fault is delivered inside the host. */
+}
diff --git a/target/i386/tcg/sysemu/tcg-cpu.c b/target/i386/tcg/sysemu/tcg-cpu.c
new file mode 100644
index 000000000..c223c0fe9
--- /dev/null
+++ b/target/i386/tcg/sysemu/tcg-cpu.c
@@ -0,0 +1,83 @@
+/*
+ * i386 TCG cpu class initialization functions specific to sysemu
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/helper-tcg.h"
+
+#include "sysemu/sysemu.h"
+#include "qemu/units.h"
+#include "exec/address-spaces.h"
+
+#include "tcg/tcg-cpu.h"
+
+static void tcg_cpu_machine_done(Notifier *n, void *unused)
+{
+ X86CPU *cpu = container_of(n, X86CPU, machine_done);
+ MemoryRegion *smram =
+ (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+ if (smram) {
+ cpu->smram = g_new(MemoryRegion, 1);
+ memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
+ smram, 0, 4 * GiB);
+ memory_region_set_enabled(cpu->smram, true);
+ memory_region_add_subregion_overlap(cpu->cpu_as_root, 0,
+ cpu->smram, 1);
+ }
+}
+
+bool tcg_cpu_realizefn(CPUState *cs, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ /*
+ * The realize order is important, since x86_cpu_realize() checks if
+ * nothing else has been set by the user (or by accelerators) in
+ * cpu->ucode_rev and cpu->phys_bits, and the memory regions
+ * initialized here are needed for the vcpu initialization.
+ *
+ * realize order:
+ * tcg_cpu -> host_cpu -> x86_cpu
+ */
+ cpu->cpu_as_mem = g_new(MemoryRegion, 1);
+ cpu->cpu_as_root = g_new(MemoryRegion, 1);
+
+ /* Outer container... */
+ memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
+ memory_region_set_enabled(cpu->cpu_as_root, true);
+
+ /*
+ * ... with two regions inside: normal system memory with low
+ * priority, and...
+ */
+ memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
+ get_system_memory(), 0, ~0ull);
+ memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
+ memory_region_set_enabled(cpu->cpu_as_mem, true);
+
+ cs->num_ases = 2;
+ cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
+ cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
+
+ /* ... SMRAM with higher priority, linked from /machine/smram. */
+ cpu->machine_done.notify = tcg_cpu_machine_done;
+ qemu_add_machine_init_done_notifier(&cpu->machine_done);
+ return true;
+}
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
new file mode 100644
index 000000000..6fdfdf959
--- /dev/null
+++ b/target/i386/tcg/tcg-cpu.c
@@ -0,0 +1,160 @@
+/*
+ * i386 TCG cpu class initialization
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "helper-tcg.h"
+#include "qemu/accel.h"
+#include "hw/core/accel-cpu.h"
+
+#include "tcg-cpu.h"
+
+/* Frob eflags into and out of the CPU temporary format. */
+
+static void x86_cpu_exec_enter(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ env->df = 1 - (2 * ((env->eflags >> 10) & 1));
+ CC_OP = CC_OP_EFLAGS;
+ env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+}
+
+static void x86_cpu_exec_exit(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->eflags = cpu_compute_eflags(env);
+}
+
+static void x86_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ cpu->env.eip = tb->pc - tb->cs_base;
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool x86_debug_check_breakpoint(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /* RF disables all architectural breakpoints. */
+ return !(env->eflags & RF_MASK);
+}
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps x86_tcg_ops = {
+ .initialize = tcg_x86_init,
+ .synchronize_from_tb = x86_cpu_synchronize_from_tb,
+ .cpu_exec_enter = x86_cpu_exec_enter,
+ .cpu_exec_exit = x86_cpu_exec_exit,
+#ifdef CONFIG_USER_ONLY
+ .fake_user_interrupt = x86_cpu_do_interrupt,
+ .record_sigsegv = x86_cpu_record_sigsegv,
+#else
+ .tlb_fill = x86_cpu_tlb_fill,
+ .do_interrupt = x86_cpu_do_interrupt,
+ .cpu_exec_interrupt = x86_cpu_exec_interrupt,
+ .debug_excp_handler = breakpoint_handler,
+ .debug_check_breakpoint = x86_debug_check_breakpoint,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
+{
+ /* for x86, all cpus use the same set of operations */
+ cc->tcg_ops = &x86_tcg_ops;
+}
+
+static void tcg_cpu_class_init(CPUClass *cc)
+{
+ cc->init_accel_cpu = tcg_cpu_init_ops;
+}
+
+static void tcg_cpu_xsave_init(void)
+{
+#define XO(bit, field) \
+ x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field);
+
+ XO(XSTATE_FP_BIT, legacy);
+ XO(XSTATE_SSE_BIT, legacy);
+ XO(XSTATE_YMM_BIT, avx_state);
+ XO(XSTATE_BNDREGS_BIT, bndreg_state);
+ XO(XSTATE_BNDCSR_BIT, bndcsr_state);
+ XO(XSTATE_OPMASK_BIT, opmask_state);
+ XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state);
+ XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state);
+ XO(XSTATE_PKRU_BIT, pkru_state);
+
+#undef XO
+}
+
+/*
+ * TCG-specific defaults that override cpudef models when using TCG.
+ * Only for builtin_x86_defs models initialized with x86_register_cpudef_types.
+ */
+static PropValue tcg_default_props[] = {
+ { "vme", "off" },
+ { NULL, NULL },
+};
+
+static void tcg_cpu_instance_init(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+
+ if (xcc->model) {
+ /* Special cases not set in the X86CPUDefinition structs: */
+ x86_cpu_apply_props(cpu, tcg_default_props);
+ }
+
+ tcg_cpu_xsave_init();
+}
+
+static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
+
+#ifndef CONFIG_USER_ONLY
+ acc->cpu_realizefn = tcg_cpu_realizefn;
+#endif /* CONFIG_USER_ONLY */
+
+ acc->cpu_class_init = tcg_cpu_class_init;
+ acc->cpu_instance_init = tcg_cpu_instance_init;
+}
+static const TypeInfo tcg_cpu_accel_type_info = {
+ .name = ACCEL_CPU_NAME("tcg"),
+
+ .parent = TYPE_ACCEL_CPU,
+ .class_init = tcg_cpu_accel_class_init,
+ .abstract = true,
+};
+static void tcg_cpu_accel_register_types(void)
+{
+ type_register_static(&tcg_cpu_accel_type_info);
+}
+type_init(tcg_cpu_accel_register_types);
diff --git a/target/i386/tcg/tcg-cpu.h b/target/i386/tcg/tcg-cpu.h
new file mode 100644
index 000000000..53a849445
--- /dev/null
+++ b/target/i386/tcg/tcg-cpu.h
@@ -0,0 +1,81 @@
+/*
+ * i386 TCG cpu class initialization functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TCG_CPU_H
+#define TCG_CPU_H
+
+#define XSAVE_FCW_FSW_OFFSET 0x000
+#define XSAVE_FTW_FOP_OFFSET 0x004
+#define XSAVE_CWD_RIP_OFFSET 0x008
+#define XSAVE_CWD_RDP_OFFSET 0x010
+#define XSAVE_MXCSR_OFFSET 0x018
+#define XSAVE_ST_SPACE_OFFSET 0x020
+#define XSAVE_XMM_SPACE_OFFSET 0x0a0
+#define XSAVE_XSTATE_BV_OFFSET 0x200
+#define XSAVE_AVX_OFFSET 0x240
+#define XSAVE_BNDREG_OFFSET 0x3c0
+#define XSAVE_BNDCSR_OFFSET 0x400
+#define XSAVE_OPMASK_OFFSET 0x440
+#define XSAVE_ZMM_HI256_OFFSET 0x480
+#define XSAVE_HI16_ZMM_OFFSET 0x680
+#define XSAVE_PKRU_OFFSET 0xa80
+
+typedef struct X86XSaveArea {
+ X86LegacyXSaveArea legacy;
+ X86XSaveHeader header;
+
+ /* Extended save areas: */
+
+ /* AVX State: */
+ XSaveAVX avx_state;
+
+ /* Ensure that XSaveBNDREG is properly aligned. */
+ uint8_t padding[XSAVE_BNDREG_OFFSET
+ - sizeof(X86LegacyXSaveArea)
+ - sizeof(X86XSaveHeader)
+ - sizeof(XSaveAVX)];
+
+ /* MPX State: */
+ XSaveBNDREG bndreg_state;
+ XSaveBNDCSR bndcsr_state;
+ /* AVX-512 State: */
+ XSaveOpmask opmask_state;
+ XSaveZMM_Hi256 zmm_hi256_state;
+ XSaveHi16_ZMM hi16_zmm_state;
+ /* PKRU State: */
+ XSavePKRU pkru_state;
+} X86XSaveArea;
+
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fcw) != XSAVE_FCW_FSW_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.ftw) != XSAVE_FTW_FOP_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpip) != XSAVE_CWD_RIP_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpdp) != XSAVE_CWD_RDP_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.mxcsr) != XSAVE_MXCSR_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.fpregs) != XSAVE_ST_SPACE_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, legacy.xmm_regs) != XSAVE_XMM_SPACE_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != XSAVE_AVX_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != XSAVE_BNDREG_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != XSAVE_BNDCSR_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != XSAVE_OPMASK_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != XSAVE_ZMM_HI256_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != XSAVE_HI16_ZMM_OFFSET);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != XSAVE_PKRU_OFFSET);
+
+bool tcg_cpu_realizefn(CPUState *cs, Error **errp);
+
+#endif /* TCG_CPU_H */
diff --git a/target/i386/tcg/tcg-stub.c b/target/i386/tcg/tcg-stub.c
new file mode 100644
index 000000000..8d45579ad
--- /dev/null
+++ b/target/i386/tcg/tcg-stub.c
@@ -0,0 +1,25 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+void update_mxcsr_from_sse_status(CPUX86State *env)
+{
+}
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
new file mode 100644
index 000000000..e9e145154
--- /dev/null
+++ b/target/i386/tcg/translate.c
@@ -0,0 +1,8717 @@
+/*
+ * i386 translation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "qemu/host-utils.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/translator.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "helper-tcg.h"
+
+#include "exec/log.h"
+
+#define PREFIX_REPZ 0x01
+#define PREFIX_REPNZ 0x02
+#define PREFIX_LOCK 0x04
+#define PREFIX_DATA 0x08
+#define PREFIX_ADR 0x10
+#define PREFIX_VEX 0x20
+#define PREFIX_REX 0x40
+
+#ifdef TARGET_X86_64
+# define ctztl ctz64
+# define clztl clz64
+#else
+# define ctztl ctz32
+# define clztl clz32
+#endif
+
+/* For a switch indexed by MODRM, match all memory operands for a given OP. */
+#define CASE_MODRM_MEM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
+
+#define CASE_MODRM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
+ case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
+
+//#define MACRO_TEST 1
+
+/* global register indexes */
+static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
+static TCGv_i32 cpu_cc_op;
+static TCGv cpu_regs[CPU_NB_REGS];
+static TCGv cpu_seg_base[6];
+static TCGv_i64 cpu_bndl[4];
+static TCGv_i64 cpu_bndu[4];
+
+#include "exec/gen-icount.h"
+
+typedef struct DisasContext {
+ DisasContextBase base;
+
+ target_ulong pc; /* pc = eip + cs_base */
+ target_ulong pc_start; /* pc at TB entry */
+ target_ulong cs_base; /* base of CS segment */
+
+ MemOp aflag;
+ MemOp dflag;
+
+ int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
+ uint8_t prefix;
+
+#ifndef CONFIG_USER_ONLY
+ uint8_t cpl; /* code priv level */
+ uint8_t iopl; /* i/o priv level */
+#endif
+ uint8_t vex_l; /* vex vector length */
+ uint8_t vex_v; /* vex vvvv register, without 1's complement. */
+ uint8_t popl_esp_hack; /* for correct popl with esp base handling */
+ uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
+
+#ifdef TARGET_X86_64
+ uint8_t rex_r;
+ uint8_t rex_x;
+ uint8_t rex_b;
+ bool rex_w;
+#endif
+ bool jmp_opt; /* use direct block chaining for direct jumps */
+ bool repz_opt; /* optimize jumps within repz instructions */
+ bool cc_op_dirty;
+
+ CCOp cc_op; /* current CC operation */
+ int mem_index; /* select memory access functions */
+ uint32_t flags; /* all execution flags */
+ int cpuid_features;
+ int cpuid_ext_features;
+ int cpuid_ext2_features;
+ int cpuid_ext3_features;
+ int cpuid_7_0_ebx_features;
+ int cpuid_xsave_features;
+
+ /* TCG local temps */
+ TCGv cc_srcT;
+ TCGv A0;
+ TCGv T0;
+ TCGv T1;
+
+ /* TCG local register indexes (only used inside old micro ops) */
+ TCGv tmp0;
+ TCGv tmp4;
+ TCGv_ptr ptr0;
+ TCGv_ptr ptr1;
+ TCGv_i32 tmp2_i32;
+ TCGv_i32 tmp3_i32;
+ TCGv_i64 tmp1_i64;
+
+ sigjmp_buf jmpbuf;
+} DisasContext;
+
+/* The environment in which user-only runs is constrained. */
+#ifdef CONFIG_USER_ONLY
+#define PE(S) true
+#define CPL(S) 3
+#define IOPL(S) 0
+#define SVME(S) false
+#define GUEST(S) false
+#else
+#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
+#define CPL(S) ((S)->cpl)
+#define IOPL(S) ((S)->iopl)
+#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
+#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
+#endif
+#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
+#define VM86(S) false
+#define CODE32(S) true
+#define SS32(S) true
+#define ADDSEG(S) false
+#else
+#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
+#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
+#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
+#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
+#endif
+#if !defined(TARGET_X86_64)
+#define CODE64(S) false
+#define LMA(S) false
+#elif defined(CONFIG_USER_ONLY)
+#define CODE64(S) true
+#define LMA(S) true
+#else
+#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
+#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
+#endif
+
+#ifdef TARGET_X86_64
+#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
+#define REX_W(S) ((S)->rex_w)
+#define REX_R(S) ((S)->rex_r + 0)
+#define REX_X(S) ((S)->rex_x + 0)
+#define REX_B(S) ((S)->rex_b + 0)
+#else
+#define REX_PREFIX(S) false
+#define REX_W(S) false
+#define REX_R(S) 0
+#define REX_X(S) 0
+#define REX_B(S) 0
+#endif
+
+/*
+ * Many sysemu-only helpers are not reachable for user-only.
+ * Define stub generators here, so that we need not either sprinkle
+ * ifdefs through the translator, nor provide the helper function.
+ */
+#define STUB_HELPER(NAME, ...) \
+ static inline void gen_helper_##NAME(__VA_ARGS__) \
+ { qemu_build_not_reached(); }
+
+#ifdef CONFIG_USER_ONLY
+STUB_HELPER(clgi, TCGv_env env)
+STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
+STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
+STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
+STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
+STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
+STUB_HELPER(monitor, TCGv_env env, TCGv addr)
+STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
+STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
+STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
+STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
+STUB_HELPER(rdmsr, TCGv_env env)
+STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
+STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
+STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
+STUB_HELPER(stgi, TCGv_env env)
+STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
+STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
+STUB_HELPER(vmmcall, TCGv_env env)
+STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
+STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
+STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
+STUB_HELPER(wrmsr, TCGv_env env)
+#endif
+
+static void gen_eob(DisasContext *s);
+static void gen_jr(DisasContext *s, TCGv dest);
+static void gen_jmp(DisasContext *s, target_ulong eip);
+static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
+static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
+static void gen_exception_gpf(DisasContext *s);
+
+/* i386 arith/logic operations */
+enum {
+ OP_ADDL,
+ OP_ORL,
+ OP_ADCL,
+ OP_SBBL,
+ OP_ANDL,
+ OP_SUBL,
+ OP_XORL,
+ OP_CMPL,
+};
+
+/* i386 shift ops */
+enum {
+ OP_ROL,
+ OP_ROR,
+ OP_RCL,
+ OP_RCR,
+ OP_SHL,
+ OP_SHR,
+ OP_SHL1, /* undocumented */
+ OP_SAR = 7,
+};
+
+enum {
+ JCC_O,
+ JCC_B,
+ JCC_Z,
+ JCC_BE,
+ JCC_S,
+ JCC_P,
+ JCC_L,
+ JCC_LE,
+};
+
+enum {
+ /* I386 int registers */
+ OR_EAX, /* MUST be even numbered */
+ OR_ECX,
+ OR_EDX,
+ OR_EBX,
+ OR_ESP,
+ OR_EBP,
+ OR_ESI,
+ OR_EDI,
+
+ OR_TMP0 = 16, /* temporary operand register */
+ OR_TMP1,
+ OR_A0, /* temporary register used when doing address evaluation */
+};
+
+enum {
+ USES_CC_DST = 1,
+ USES_CC_SRC = 2,
+ USES_CC_SRC2 = 4,
+ USES_CC_SRCT = 8,
+};
+
+/* Bit set if the global variable is live after setting CC_OP to X. */
+static const uint8_t cc_op_live[CC_OP_NB] = {
+ [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_EFLAGS] = USES_CC_SRC,
+ [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
+ [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
+ [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_CLR] = 0,
+ [CC_OP_POPCNT] = USES_CC_SRC,
+};
+
+static void set_cc_op(DisasContext *s, CCOp op)
+{
+ int dead;
+
+ if (s->cc_op == op) {
+ return;
+ }
+
+ /* Discard CC computation that will no longer be used. */
+ dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
+ if (dead & USES_CC_DST) {
+ tcg_gen_discard_tl(cpu_cc_dst);
+ }
+ if (dead & USES_CC_SRC) {
+ tcg_gen_discard_tl(cpu_cc_src);
+ }
+ if (dead & USES_CC_SRC2) {
+ tcg_gen_discard_tl(cpu_cc_src2);
+ }
+ if (dead & USES_CC_SRCT) {
+ tcg_gen_discard_tl(s->cc_srcT);
+ }
+
+ if (op == CC_OP_DYNAMIC) {
+ /* The DYNAMIC setting is translator only, and should never be
+ stored. Thus we always consider it clean. */
+ s->cc_op_dirty = false;
+ } else {
+ /* Discard any computed CC_OP value (see shifts). */
+ if (s->cc_op == CC_OP_DYNAMIC) {
+ tcg_gen_discard_i32(cpu_cc_op);
+ }
+ s->cc_op_dirty = true;
+ }
+ s->cc_op = op;
+}
+
+static void gen_update_cc_op(DisasContext *s)
+{
+ if (s->cc_op_dirty) {
+ tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
+ s->cc_op_dirty = false;
+ }
+}
+
+#ifdef TARGET_X86_64
+
+#define NB_OP_SIZES 4
+
+#else /* !TARGET_X86_64 */
+
+#define NB_OP_SIZES 3
+
+#endif /* !TARGET_X86_64 */
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define REG_B_OFFSET (sizeof(target_ulong) - 1)
+#define REG_H_OFFSET (sizeof(target_ulong) - 2)
+#define REG_W_OFFSET (sizeof(target_ulong) - 2)
+#define REG_L_OFFSET (sizeof(target_ulong) - 4)
+#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
+#else
+#define REG_B_OFFSET 0
+#define REG_H_OFFSET 1
+#define REG_W_OFFSET 0
+#define REG_L_OFFSET 0
+#define REG_LH_OFFSET 4
+#endif
+
+/* In instruction encodings for byte register accesses the
+ * register number usually indicates "low 8 bits of register N";
+ * however there are some special cases where N 4..7 indicates
+ * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
+ * true for this special case, false otherwise.
+ */
+static inline bool byte_reg_is_xH(DisasContext *s, int reg)
+{
+ /* Any time the REX prefix is present, byte registers are uniform */
+ if (reg < 4 || REX_PREFIX(s)) {
+ return false;
+ }
+ return true;
+}
+
+/* Select the size of a push/pop operation. */
+static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
+{
+ if (CODE64(s)) {
+ return ot == MO_16 ? MO_16 : MO_64;
+ } else {
+ return ot;
+ }
+}
+
+/* Select the size of the stack pointer. */
+static inline MemOp mo_stacksize(DisasContext *s)
+{
+ return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
+}
+
+/* Select only size 64 else 32. Used for SSE operand sizes. */
+static inline MemOp mo_64_32(MemOp ot)
+{
+#ifdef TARGET_X86_64
+ return ot == MO_64 ? MO_64 : MO_32;
+#else
+ return MO_32;
+#endif
+}
+
+/* Select size 8 if lsb of B is clear, else OT. Used for decoding
+ byte vs word opcodes. */
+static inline MemOp mo_b_d(int b, MemOp ot)
+{
+ return b & 1 ? ot : MO_8;
+}
+
+/* Select size 8 if lsb of B is clear, else OT capped at 32.
+ Used for decoding operand size of port opcodes. */
+static inline MemOp mo_b_d32(int b, MemOp ot)
+{
+ return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
+}
+
+static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
+{
+ switch(ot) {
+ case MO_8:
+ if (!byte_reg_is_xH(s, reg)) {
+ tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
+ } else {
+ tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
+ }
+ break;
+ case MO_16:
+ tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
+ break;
+ case MO_32:
+ /* For x86_64, this sets the higher half of register to zero.
+ For i386, this is equivalent to a mov. */
+ tcg_gen_ext32u_tl(cpu_regs[reg], t0);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_mov_tl(cpu_regs[reg], t0);
+ break;
+#endif
+ default:
+ tcg_abort();
+ }
+}
+
+static inline
+void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
+{
+ if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
+ tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
+ } else {
+ tcg_gen_mov_tl(t0, cpu_regs[reg]);
+ }
+}
+
+static void gen_add_A0_im(DisasContext *s, int val)
+{
+ tcg_gen_addi_tl(s->A0, s->A0, val);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(s->A0, s->A0);
+ }
+}
+
+static inline void gen_op_jmp_v(TCGv dest)
+{
+ tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
+}
+
+static inline
+void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
+{
+ tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
+ gen_op_mov_reg_v(s, size, reg, s->tmp0);
+}
+
+static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
+{
+ tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
+ gen_op_mov_reg_v(s, size, reg, s->tmp0);
+}
+
+static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
+{
+ tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
+}
+
+static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
+{
+ tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
+}
+
+static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
+{
+ if (d == OR_TMP0) {
+ gen_op_st_v(s, idx, s->T0, s->A0);
+ } else {
+ gen_op_mov_reg_v(s, idx, d, s->T0);
+ }
+}
+
+static inline void gen_jmp_im(DisasContext *s, target_ulong pc)
+{
+ tcg_gen_movi_tl(s->tmp0, pc);
+ gen_op_jmp_v(s->tmp0);
+}
+
+/* Compute SEG:REG into A0. SEG is selected from the override segment
+ (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
+ indicate no override. */
+static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
+ int def_seg, int ovr_seg)
+{
+ switch (aflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ if (ovr_seg < 0) {
+ tcg_gen_mov_tl(s->A0, a0);
+ return;
+ }
+ break;
+#endif
+ case MO_32:
+ /* 32 bit address */
+ if (ovr_seg < 0 && ADDSEG(s)) {
+ ovr_seg = def_seg;
+ }
+ if (ovr_seg < 0) {
+ tcg_gen_ext32u_tl(s->A0, a0);
+ return;
+ }
+ break;
+ case MO_16:
+ /* 16 bit address */
+ tcg_gen_ext16u_tl(s->A0, a0);
+ a0 = s->A0;
+ if (ovr_seg < 0) {
+ if (ADDSEG(s)) {
+ ovr_seg = def_seg;
+ } else {
+ return;
+ }
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+ if (ovr_seg >= 0) {
+ TCGv seg = cpu_seg_base[ovr_seg];
+
+ if (aflag == MO_64) {
+ tcg_gen_add_tl(s->A0, a0, seg);
+ } else if (CODE64(s)) {
+ tcg_gen_ext32u_tl(s->A0, a0);
+ tcg_gen_add_tl(s->A0, s->A0, seg);
+ } else {
+ tcg_gen_add_tl(s->A0, a0, seg);
+ tcg_gen_ext32u_tl(s->A0, s->A0);
+ }
+ }
+}
+
+static inline void gen_string_movl_A0_ESI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
+}
+
+static inline void gen_string_movl_A0_EDI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
+}
+
+static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
+{
+ tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_shli_tl(s->T0, s->T0, ot);
+};
+
+static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
+{
+ switch (size) {
+ case MO_8:
+ if (sign) {
+ tcg_gen_ext8s_tl(dst, src);
+ } else {
+ tcg_gen_ext8u_tl(dst, src);
+ }
+ return dst;
+ case MO_16:
+ if (sign) {
+ tcg_gen_ext16s_tl(dst, src);
+ } else {
+ tcg_gen_ext16u_tl(dst, src);
+ }
+ return dst;
+#ifdef TARGET_X86_64
+ case MO_32:
+ if (sign) {
+ tcg_gen_ext32s_tl(dst, src);
+ } else {
+ tcg_gen_ext32u_tl(dst, src);
+ }
+ return dst;
+#endif
+ default:
+ return src;
+ }
+}
+
+static void gen_extu(MemOp ot, TCGv reg)
+{
+ gen_ext_tl(reg, reg, ot, false);
+}
+
+static void gen_exts(MemOp ot, TCGv reg)
+{
+ gen_ext_tl(reg, reg, ot, true);
+}
+
+static inline
+void gen_op_jnz_ecx(DisasContext *s, MemOp size, TCGLabel *label1)
+{
+ tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
+ gen_extu(size, s->tmp0);
+ tcg_gen_brcondi_tl(TCG_COND_NE, s->tmp0, 0, label1);
+}
+
+static inline
+void gen_op_jz_ecx(DisasContext *s, MemOp size, TCGLabel *label1)
+{
+ tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
+ gen_extu(size, s->tmp0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
+}
+
+static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
+{
+ switch (ot) {
+ case MO_8:
+ gen_helper_inb(v, cpu_env, n);
+ break;
+ case MO_16:
+ gen_helper_inw(v, cpu_env, n);
+ break;
+ case MO_32:
+ gen_helper_inl(v, cpu_env, n);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
+{
+ switch (ot) {
+ case MO_8:
+ gen_helper_outb(cpu_env, v, n);
+ break;
+ case MO_16:
+ gen_helper_outw(cpu_env, v, n);
+ break;
+ case MO_32:
+ gen_helper_outl(cpu_env, v, n);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+/*
+ * Validate that access to [port, port + 1<<ot) is allowed.
+ * Raise #GP, or VMM exit if not.
+ */
+static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
+ uint32_t svm_flags)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * We do not implement the ioperm(2) syscall, so the TSS check
+ * will always fail.
+ */
+ gen_exception_gpf(s);
+ return false;
+#else
+ if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
+ gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
+ }
+ if (GUEST(s)) {
+ target_ulong cur_eip = s->base.pc_next - s->cs_base;
+ target_ulong next_eip = s->pc - s->cs_base;
+
+ gen_update_cc_op(s);
+ gen_jmp_im(s, cur_eip);
+ if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ svm_flags |= SVM_IOIO_REP_MASK;
+ }
+ svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
+ gen_helper_svm_check_io(cpu_env, port,
+ tcg_constant_i32(svm_flags),
+ tcg_constant_i32(next_eip - cur_eip));
+ }
+ return true;
+#endif
+}
+
+static inline void gen_movs(DisasContext *s, MemOp ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_ESI);
+ gen_op_add_reg_T0(s, s->aflag, R_EDI);
+}
+
+static void gen_op_update1_cc(DisasContext *s)
+{
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+}
+
+static void gen_op_update2_cc(DisasContext *s)
+{
+ tcg_gen_mov_tl(cpu_cc_src, s->T1);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+}
+
+static void gen_op_update3_cc(DisasContext *s, TCGv reg)
+{
+ tcg_gen_mov_tl(cpu_cc_src2, reg);
+ tcg_gen_mov_tl(cpu_cc_src, s->T1);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+}
+
+static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
+{
+ tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
+}
+
+static void gen_op_update_neg_cc(DisasContext *s)
+{
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ tcg_gen_neg_tl(cpu_cc_src, s->T0);
+ tcg_gen_movi_tl(s->cc_srcT, 0);
+}
+
+/* compute all eflags to cc_src */
+static void gen_compute_eflags(DisasContext *s)
+{
+ TCGv zero, dst, src1, src2;
+ int live, dead;
+
+ if (s->cc_op == CC_OP_EFLAGS) {
+ return;
+ }
+ if (s->cc_op == CC_OP_CLR) {
+ tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
+ set_cc_op(s, CC_OP_EFLAGS);
+ return;
+ }
+
+ zero = NULL;
+ dst = cpu_cc_dst;
+ src1 = cpu_cc_src;
+ src2 = cpu_cc_src2;
+
+ /* Take care to not read values that are not live. */
+ live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
+ dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
+ if (dead) {
+ zero = tcg_const_tl(0);
+ if (dead & USES_CC_DST) {
+ dst = zero;
+ }
+ if (dead & USES_CC_SRC) {
+ src1 = zero;
+ }
+ if (dead & USES_CC_SRC2) {
+ src2 = zero;
+ }
+ }
+
+ gen_update_cc_op(s);
+ gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
+ set_cc_op(s, CC_OP_EFLAGS);
+
+ if (dead) {
+ tcg_temp_free(zero);
+ }
+}
+
+typedef struct CCPrepare {
+ TCGCond cond;
+ TCGv reg;
+ TCGv reg2;
+ target_ulong imm;
+ target_ulong mask;
+ bool use_reg2;
+ bool no_setcond;
+} CCPrepare;
+
+/* compute eflags.C to reg */
+static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
+{
+ TCGv t0, t1;
+ int size, shift;
+
+ switch (s->cc_op) {
+ case CC_OP_SUBB ... CC_OP_SUBQ:
+ /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
+ size = s->cc_op - CC_OP_SUBB;
+ t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
+ /* If no temporary was used, be careful not to alias t1 and t0. */
+ t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
+ tcg_gen_mov_tl(t0, s->cc_srcT);
+ gen_extu(size, t0);
+ goto add_sub;
+
+ case CC_OP_ADDB ... CC_OP_ADDQ:
+ /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
+ size = s->cc_op - CC_OP_ADDB;
+ t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
+ t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
+ add_sub:
+ return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
+ .reg2 = t1, .mask = -1, .use_reg2 = true };
+
+ case CC_OP_LOGICB ... CC_OP_LOGICQ:
+ case CC_OP_CLR:
+ case CC_OP_POPCNT:
+ return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+
+ case CC_OP_INCB ... CC_OP_INCQ:
+ case CC_OP_DECB ... CC_OP_DECQ:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = -1, .no_setcond = true };
+
+ case CC_OP_SHLB ... CC_OP_SHLQ:
+ /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
+ size = s->cc_op - CC_OP_SHLB;
+ shift = (8 << size) - 1;
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = (target_ulong)1 << shift };
+
+ case CC_OP_MULB ... CC_OP_MULQ:
+ return (CCPrepare) { .cond = TCG_COND_NE,
+ .reg = cpu_cc_src, .mask = -1 };
+
+ case CC_OP_BMILGB ... CC_OP_BMILGQ:
+ size = s->cc_op - CC_OP_BMILGB;
+ t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
+
+ case CC_OP_ADCX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
+ .mask = -1, .no_setcond = true };
+
+ case CC_OP_EFLAGS:
+ case CC_OP_SARB ... CC_OP_SARQ:
+ /* CC_SRC & 1 */
+ return (CCPrepare) { .cond = TCG_COND_NE,
+ .reg = cpu_cc_src, .mask = CC_C };
+
+ default:
+ /* The need to compute only C from CC_OP_DYNAMIC is important
+ in efficiently implementing e.g. INC at the start of a TB. */
+ gen_update_cc_op(s);
+ gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
+ cpu_cc_src2, cpu_cc_op);
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+ .mask = -1, .no_setcond = true };
+ }
+}
+
+/* compute eflags.P to reg */
+static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
+{
+ gen_compute_eflags(s);
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_P };
+}
+
+/* compute eflags.S to reg */
+static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
+{
+ switch (s->cc_op) {
+ case CC_OP_DYNAMIC:
+ gen_compute_eflags(s);
+ /* FALLTHRU */
+ case CC_OP_EFLAGS:
+ case CC_OP_ADCX:
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_S };
+ case CC_OP_CLR:
+ case CC_OP_POPCNT:
+ return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+ default:
+ {
+ MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
+ TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
+ return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
+ }
+ }
+}
+
+/* compute eflags.O to reg */
+static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
+{
+ switch (s->cc_op) {
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
+ .mask = -1, .no_setcond = true };
+ case CC_OP_CLR:
+ case CC_OP_POPCNT:
+ return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+ default:
+ gen_compute_eflags(s);
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_O };
+ }
+}
+
+/* compute eflags.Z to reg */
+static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
+{
+ switch (s->cc_op) {
+ case CC_OP_DYNAMIC:
+ gen_compute_eflags(s);
+ /* FALLTHRU */
+ case CC_OP_EFLAGS:
+ case CC_OP_ADCX:
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_Z };
+ case CC_OP_CLR:
+ return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
+ case CC_OP_POPCNT:
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
+ .mask = -1 };
+ default:
+ {
+ MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
+ TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
+ }
+ }
+}
+
+/* perform a conditional store into register 'reg' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used. */
+static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
+{
+ int inv, jcc_op, cond;
+ MemOp size;
+ CCPrepare cc;
+ TCGv t0;
+
+ inv = b & 1;
+ jcc_op = (b >> 1) & 7;
+
+ switch (s->cc_op) {
+ case CC_OP_SUBB ... CC_OP_SUBQ:
+ /* We optimize relational operators for the cmp/jcc case. */
+ size = s->cc_op - CC_OP_SUBB;
+ switch (jcc_op) {
+ case JCC_BE:
+ tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
+ gen_extu(size, s->tmp4);
+ t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
+ cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
+ .reg2 = t0, .mask = -1, .use_reg2 = true };
+ break;
+
+ case JCC_L:
+ cond = TCG_COND_LT;
+ goto fast_jcc_l;
+ case JCC_LE:
+ cond = TCG_COND_LE;
+ fast_jcc_l:
+ tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
+ gen_exts(size, s->tmp4);
+ t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
+ cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
+ .reg2 = t0, .mask = -1, .use_reg2 = true };
+ break;
+
+ default:
+ goto slow_jcc;
+ }
+ break;
+
+ default:
+ slow_jcc:
+ /* This actually generates good code for JC, JZ and JS. */
+ switch (jcc_op) {
+ case JCC_O:
+ cc = gen_prepare_eflags_o(s, reg);
+ break;
+ case JCC_B:
+ cc = gen_prepare_eflags_c(s, reg);
+ break;
+ case JCC_Z:
+ cc = gen_prepare_eflags_z(s, reg);
+ break;
+ case JCC_BE:
+ gen_compute_eflags(s);
+ cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_Z | CC_C };
+ break;
+ case JCC_S:
+ cc = gen_prepare_eflags_s(s, reg);
+ break;
+ case JCC_P:
+ cc = gen_prepare_eflags_p(s, reg);
+ break;
+ case JCC_L:
+ gen_compute_eflags(s);
+ if (reg == cpu_cc_src) {
+ reg = s->tmp0;
+ }
+ tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
+ tcg_gen_xor_tl(reg, reg, cpu_cc_src);
+ cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+ .mask = CC_S };
+ break;
+ default:
+ case JCC_LE:
+ gen_compute_eflags(s);
+ if (reg == cpu_cc_src) {
+ reg = s->tmp0;
+ }
+ tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
+ tcg_gen_xor_tl(reg, reg, cpu_cc_src);
+ cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+ .mask = CC_S | CC_Z };
+ break;
+ }
+ break;
+ }
+
+ if (inv) {
+ cc.cond = tcg_invert_cond(cc.cond);
+ }
+ return cc;
+}
+
+static void gen_setcc1(DisasContext *s, int b, TCGv reg)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, reg);
+
+ if (cc.no_setcond) {
+ if (cc.cond == TCG_COND_EQ) {
+ tcg_gen_xori_tl(reg, cc.reg, 1);
+ } else {
+ tcg_gen_mov_tl(reg, cc.reg);
+ }
+ return;
+ }
+
+ if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
+ cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
+ tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
+ tcg_gen_andi_tl(reg, reg, 1);
+ return;
+ }
+ if (cc.mask != -1) {
+ tcg_gen_andi_tl(reg, cc.reg, cc.mask);
+ cc.reg = reg;
+ }
+ if (cc.use_reg2) {
+ tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
+ } else {
+ tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
+ }
+}
+
+static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
+{
+ gen_setcc1(s, JCC_B << 1, reg);
+}
+
+/* generate a conditional jump to label 'l1' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used. */
+static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, s->T0);
+
+ if (cc.mask != -1) {
+ tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
+ cc.reg = s->T0;
+ }
+ if (cc.use_reg2) {
+ tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
+ } else {
+ tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
+ }
+}
+
+/* Generate a conditional jump to label 'l1' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used.
+ A translation block must end soon. */
+static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, s->T0);
+
+ gen_update_cc_op(s);
+ if (cc.mask != -1) {
+ tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
+ cc.reg = s->T0;
+ }
+ set_cc_op(s, CC_OP_DYNAMIC);
+ if (cc.use_reg2) {
+ tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
+ } else {
+ tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
+ }
+}
+
+/* XXX: does not work with gdbstub "ice" single step - not a
+ serious problem */
+static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ gen_op_jnz_ecx(s, s->aflag, l1);
+ gen_set_label(l2);
+ gen_jmp_tb(s, next_eip, 1);
+ gen_set_label(l1);
+ return l2;
+}
+
+static inline void gen_stos(DisasContext *s, MemOp ot)
+{
+ gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_EDI);
+}
+
+static inline void gen_lods(DisasContext *s, MemOp ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_ESI);
+}
+
+static inline void gen_scas(DisasContext *s, MemOp ot)
+{
+ gen_string_movl_A0_EDI(s);
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ gen_op(s, OP_CMPL, ot, R_EAX);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_EDI);
+}
+
+static inline void gen_cmps(DisasContext *s, MemOp ot)
+{
+ gen_string_movl_A0_EDI(s);
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ gen_string_movl_A0_ESI(s);
+ gen_op(s, OP_CMPL, ot, OR_TMP0);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_ESI);
+ gen_op_add_reg_T0(s, s->aflag, R_EDI);
+}
+
+static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
+{
+ if (s->flags & HF_IOBPT_MASK) {
+#ifdef CONFIG_USER_ONLY
+ /* user-mode cpu should not be in IOBPT mode */
+ g_assert_not_reached();
+#else
+ TCGv_i32 t_size = tcg_const_i32(1 << ot);
+ TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
+
+ gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
+ tcg_temp_free_i32(t_size);
+ tcg_temp_free(t_next);
+#endif /* CONFIG_USER_ONLY */
+ }
+}
+
+static inline void gen_ins(DisasContext *s, MemOp ot)
+{
+ gen_string_movl_A0_EDI(s);
+ /* Note: we must do this dummy write first to be restartable in
+ case of page fault. */
+ tcg_gen_movi_tl(s->T0, 0);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
+ gen_helper_in_func(ot, s->T0, s->tmp2_i32);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_EDI);
+ gen_bpt_io(s, s->tmp2_i32, ot);
+}
+
+static inline void gen_outs(DisasContext *s, MemOp ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
+ gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
+ gen_op_movl_T0_Dshift(s, ot);
+ gen_op_add_reg_T0(s, s->aflag, R_ESI);
+ gen_bpt_io(s, s->tmp2_i32, ot);
+}
+
+/* same method as Valgrind : we generate jumps to current or next
+ instruction */
+#define GEN_REPZ(op) \
+static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \
+ target_ulong cur_eip, target_ulong next_eip) \
+{ \
+ TCGLabel *l2; \
+ gen_update_cc_op(s); \
+ l2 = gen_jz_ecx_string(s, next_eip); \
+ gen_ ## op(s, ot); \
+ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
+ /* a loop would cause two single step exceptions if ECX = 1 \
+ before rep string_insn */ \
+ if (s->repz_opt) \
+ gen_op_jz_ecx(s, s->aflag, l2); \
+ gen_jmp(s, cur_eip); \
+}
+
+#define GEN_REPZ2(op) \
+static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \
+ target_ulong cur_eip, \
+ target_ulong next_eip, \
+ int nz) \
+{ \
+ TCGLabel *l2; \
+ gen_update_cc_op(s); \
+ l2 = gen_jz_ecx_string(s, next_eip); \
+ gen_ ## op(s, ot); \
+ gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
+ gen_update_cc_op(s); \
+ gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
+ if (s->repz_opt) \
+ gen_op_jz_ecx(s, s->aflag, l2); \
+ gen_jmp(s, cur_eip); \
+}
+
+GEN_REPZ(movs)
+GEN_REPZ(stos)
+GEN_REPZ(lods)
+GEN_REPZ(ins)
+GEN_REPZ(outs)
+GEN_REPZ2(scas)
+GEN_REPZ2(cmps)
+
+static void gen_helper_fp_arith_ST0_FT0(int op)
+{
+ switch (op) {
+ case 0:
+ gen_helper_fadd_ST0_FT0(cpu_env);
+ break;
+ case 1:
+ gen_helper_fmul_ST0_FT0(cpu_env);
+ break;
+ case 2:
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 3:
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 4:
+ gen_helper_fsub_ST0_FT0(cpu_env);
+ break;
+ case 5:
+ gen_helper_fsubr_ST0_FT0(cpu_env);
+ break;
+ case 6:
+ gen_helper_fdiv_ST0_FT0(cpu_env);
+ break;
+ case 7:
+ gen_helper_fdivr_ST0_FT0(cpu_env);
+ break;
+ }
+}
+
+/* NOTE the exception in "r" op ordering */
+static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
+{
+ TCGv_i32 tmp = tcg_const_i32(opreg);
+ switch (op) {
+ case 0:
+ gen_helper_fadd_STN_ST0(cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_fmul_STN_ST0(cpu_env, tmp);
+ break;
+ case 4:
+ gen_helper_fsubr_STN_ST0(cpu_env, tmp);
+ break;
+ case 5:
+ gen_helper_fsub_STN_ST0(cpu_env, tmp);
+ break;
+ case 6:
+ gen_helper_fdivr_STN_ST0(cpu_env, tmp);
+ break;
+ case 7:
+ gen_helper_fdiv_STN_ST0(cpu_env, tmp);
+ break;
+ }
+}
+
+static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
+{
+ gen_update_cc_op(s);
+ gen_jmp_im(s, cur_eip);
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+/* Generate #UD for the current instruction. The assumption here is that
+ the instruction is known, but it isn't allowed in the current cpu mode. */
+static void gen_illegal_opcode(DisasContext *s)
+{
+ gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
+}
+
+/* Generate #GP for the current instruction. */
+static void gen_exception_gpf(DisasContext *s)
+{
+ gen_exception(s, EXCP0D_GPF, s->pc_start - s->cs_base);
+}
+
+/* Check for cpl == 0; if not, raise #GP and return false. */
+static bool check_cpl0(DisasContext *s)
+{
+ if (CPL(s) == 0) {
+ return true;
+ }
+ gen_exception_gpf(s);
+ return false;
+}
+
+/* If vm86, check for iopl == 3; if not, raise #GP and return false. */
+static bool check_vm86_iopl(DisasContext *s)
+{
+ if (!VM86(s) || IOPL(s) == 3) {
+ return true;
+ }
+ gen_exception_gpf(s);
+ return false;
+}
+
+/* Check for iopl allowing access; if not, raise #GP and return false. */
+static bool check_iopl(DisasContext *s)
+{
+ if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
+ return true;
+ }
+ gen_exception_gpf(s);
+ return false;
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
+{
+ if (d != OR_TMP0) {
+ if (s1->prefix & PREFIX_LOCK) {
+ /* Lock prefix when destination is not memory. */
+ gen_illegal_opcode(s1);
+ return;
+ }
+ gen_op_mov_v_reg(s1, ot, s1->T0, d);
+ } else if (!(s1->prefix & PREFIX_LOCK)) {
+ gen_op_ld_v(s1, ot, s1->T0, s1->A0);
+ }
+ switch(op) {
+ case OP_ADCL:
+ gen_compute_eflags_c(s1, s1->tmp4);
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
+ tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
+ tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update3_cc(s1, s1->tmp4);
+ set_cc_op(s1, CC_OP_ADCB + ot);
+ break;
+ case OP_SBBL:
+ gen_compute_eflags_c(s1, s1->tmp4);
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
+ tcg_gen_neg_tl(s1->T0, s1->T0);
+ tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
+ tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update3_cc(s1, s1->tmp4);
+ set_cc_op(s1, CC_OP_SBBB + ot);
+ break;
+ case OP_ADDL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update2_cc(s1);
+ set_cc_op(s1, CC_OP_ADDB + ot);
+ break;
+ case OP_SUBL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_neg_tl(s1->T0, s1->T1);
+ tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
+ s1->mem_index, ot | MO_LE);
+ tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
+ } else {
+ tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
+ tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update2_cc(s1);
+ set_cc_op(s1, CC_OP_SUBB + ot);
+ break;
+ default:
+ case OP_ANDL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update1_cc(s1);
+ set_cc_op(s1, CC_OP_LOGICB + ot);
+ break;
+ case OP_ORL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update1_cc(s1);
+ set_cc_op(s1, CC_OP_LOGICB + ot);
+ break;
+ case OP_XORL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update1_cc(s1);
+ set_cc_op(s1, CC_OP_LOGICB + ot);
+ break;
+ case OP_CMPL:
+ tcg_gen_mov_tl(cpu_cc_src, s1->T1);
+ tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
+ tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
+ set_cc_op(s1, CC_OP_SUBB + ot);
+ break;
+ }
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
+{
+ if (s1->prefix & PREFIX_LOCK) {
+ if (d != OR_TMP0) {
+ /* Lock prefix when destination is not memory */
+ gen_illegal_opcode(s1);
+ return;
+ }
+ tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
+ tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ if (d != OR_TMP0) {
+ gen_op_mov_v_reg(s1, ot, s1->T0, d);
+ } else {
+ gen_op_ld_v(s1, ot, s1->T0, s1->A0);
+ }
+ tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+
+ gen_compute_eflags_c(s1, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
+ set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
+}
+
+static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
+ TCGv shm1, TCGv count, bool is_right)
+{
+ TCGv_i32 z32, s32, oldop;
+ TCGv z_tl;
+
+ /* Store the results into the CC variables. If we know that the
+ variable must be dead, store unconditionally. Otherwise we'll
+ need to not disrupt the current contents. */
+ z_tl = tcg_const_tl(0);
+ if (cc_op_live[s->cc_op] & USES_CC_DST) {
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
+ result, cpu_cc_dst);
+ } else {
+ tcg_gen_mov_tl(cpu_cc_dst, result);
+ }
+ if (cc_op_live[s->cc_op] & USES_CC_SRC) {
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
+ shm1, cpu_cc_src);
+ } else {
+ tcg_gen_mov_tl(cpu_cc_src, shm1);
+ }
+ tcg_temp_free(z_tl);
+
+ /* Get the two potential CC_OP values into temporaries. */
+ tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
+ if (s->cc_op == CC_OP_DYNAMIC) {
+ oldop = cpu_cc_op;
+ } else {
+ tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
+ oldop = s->tmp3_i32;
+ }
+
+ /* Conditionally store the CC_OP value. */
+ z32 = tcg_const_i32(0);
+ s32 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(s32, count);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
+ tcg_temp_free_i32(z32);
+ tcg_temp_free_i32(s32);
+
+ /* The CC_OP value is no longer predictable. */
+ set_cc_op(s, CC_OP_DYNAMIC);
+}
+
+static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
+ int is_right, int is_arith)
+{
+ target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, op1);
+ }
+
+ tcg_gen_andi_tl(s->T1, s->T1, mask);
+ tcg_gen_subi_tl(s->tmp0, s->T1, 1);
+
+ if (is_right) {
+ if (is_arith) {
+ gen_exts(ot, s->T0);
+ tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_sar_tl(s->T0, s->T0, s->T1);
+ } else {
+ gen_extu(ot, s->T0);
+ tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shr_tl(s->T0, s->T0, s->T1);
+ }
+ } else {
+ tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shl_tl(s->T0, s->T0, s->T1);
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
+}
+
+static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
+ int is_right, int is_arith)
+{
+ int mask = (ot == MO_64 ? 0x3f : 0x1f);
+
+ /* load */
+ if (op1 == OR_TMP0)
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ else
+ gen_op_mov_v_reg(s, ot, s->T0, op1);
+
+ op2 &= mask;
+ if (op2 != 0) {
+ if (is_right) {
+ if (is_arith) {
+ gen_exts(ot, s->T0);
+ tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
+ tcg_gen_sari_tl(s->T0, s->T0, op2);
+ } else {
+ gen_extu(ot, s->T0);
+ tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
+ tcg_gen_shri_tl(s->T0, s->T0, op2);
+ }
+ } else {
+ tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
+ tcg_gen_shli_tl(s->T0, s->T0, op2);
+ }
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ /* update eflags if non zero shift */
+ if (op2 != 0) {
+ tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
+ }
+}
+
+static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
+{
+ target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
+ TCGv_i32 t0, t1;
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, op1);
+ }
+
+ tcg_gen_andi_tl(s->T1, s->T1, mask);
+
+ switch (ot) {
+ case MO_8:
+ /* Replicate the 8-bit input so that a 32-bit rotate works. */
+ tcg_gen_ext8u_tl(s->T0, s->T0);
+ tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
+ goto do_long;
+ case MO_16:
+ /* Replicate the 16-bit input so that a 32-bit rotate works. */
+ tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
+ goto do_long;
+ do_long:
+#ifdef TARGET_X86_64
+ case MO_32:
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
+ if (is_right) {
+ tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
+ } else {
+ tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
+ }
+ tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ break;
+#endif
+ default:
+ if (is_right) {
+ tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
+ } else {
+ tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
+ }
+ break;
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ /* We'll need the flags computed into CC_SRC. */
+ gen_compute_eflags(s);
+
+ /* The value that was "rotated out" is now present at the other end
+ of the word. Compute C into CC_DST and O into CC_SRC2. Note that
+ since we've computed the flags into CC_SRC, these variables are
+ currently dead. */
+ if (is_right) {
+ tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
+ } else {
+ tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
+ }
+ tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
+ tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
+
+ /* Now conditionally store the new CC_OP value. If the shift count
+ is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
+ Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
+ exactly as we computed above. */
+ t0 = tcg_const_i32(0);
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t1, s->T1);
+ tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
+ tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
+ s->tmp2_i32, s->tmp3_i32);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+
+ /* The CC_OP value is no longer predictable. */
+ set_cc_op(s, CC_OP_DYNAMIC);
+}
+
+static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
+ int is_right)
+{
+ int mask = (ot == MO_64 ? 0x3f : 0x1f);
+ int shift;
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, op1);
+ }
+
+ op2 &= mask;
+ if (op2 != 0) {
+ switch (ot) {
+#ifdef TARGET_X86_64
+ case MO_32:
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ if (is_right) {
+ tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
+ } else {
+ tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
+ }
+ tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ break;
+#endif
+ default:
+ if (is_right) {
+ tcg_gen_rotri_tl(s->T0, s->T0, op2);
+ } else {
+ tcg_gen_rotli_tl(s->T0, s->T0, op2);
+ }
+ break;
+ case MO_8:
+ mask = 7;
+ goto do_shifts;
+ case MO_16:
+ mask = 15;
+ do_shifts:
+ shift = op2 & mask;
+ if (is_right) {
+ shift = mask + 1 - shift;
+ }
+ gen_extu(ot, s->T0);
+ tcg_gen_shli_tl(s->tmp0, s->T0, shift);
+ tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
+ tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
+ break;
+ }
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ if (op2 != 0) {
+ /* Compute the flags into CC_SRC. */
+ gen_compute_eflags(s);
+
+ /* The value that was "rotated out" is now present at the other end
+ of the word. Compute C into CC_DST and O into CC_SRC2. Note that
+ since we've computed the flags into CC_SRC, these variables are
+ currently dead. */
+ if (is_right) {
+ tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
+ } else {
+ tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
+ }
+ tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
+ tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
+ set_cc_op(s, CC_OP_ADCOX);
+ }
+}
+
+/* XXX: add faster immediate = 1 case */
+static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
+ int is_right)
+{
+ gen_compute_eflags(s);
+ assert(s->cc_op == CC_OP_EFLAGS);
+
+ /* load */
+ if (op1 == OR_TMP0)
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ else
+ gen_op_mov_v_reg(s, ot, s->T0, op1);
+
+ if (is_right) {
+ switch (ot) {
+ case MO_8:
+ gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1);
+ break;
+ case MO_16:
+ gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1);
+ break;
+ case MO_32:
+ gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1);
+ break;
+#endif
+ default:
+ tcg_abort();
+ }
+ } else {
+ switch (ot) {
+ case MO_8:
+ gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1);
+ break;
+ case MO_16:
+ gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1);
+ break;
+ case MO_32:
+ gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1);
+ break;
+#endif
+ default:
+ tcg_abort();
+ }
+ }
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+}
+
+/* XXX: add faster immediate case */
+static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
+ bool is_right, TCGv count_in)
+{
+ target_ulong mask = (ot == MO_64 ? 63 : 31);
+ TCGv count;
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, op1);
+ }
+
+ count = tcg_temp_new();
+ tcg_gen_andi_tl(count, count_in, mask);
+
+ switch (ot) {
+ case MO_16:
+ /* Note: we implement the Intel behaviour for shift count > 16.
+ This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
+ portion by constructing it as a 32-bit value. */
+ if (is_right) {
+ tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
+ tcg_gen_mov_tl(s->T1, s->T0);
+ tcg_gen_mov_tl(s->T0, s->tmp0);
+ } else {
+ tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
+ }
+ /*
+ * If TARGET_X86_64 defined then fall through into MO_32 case,
+ * otherwise fall through default case.
+ */
+ case MO_32:
+#ifdef TARGET_X86_64
+ /* Concatenate the two 32-bit values and use a 64-bit shift. */
+ tcg_gen_subi_tl(s->tmp0, count, 1);
+ if (is_right) {
+ tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
+ tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shr_i64(s->T0, s->T0, count);
+ } else {
+ tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
+ tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
+ tcg_gen_shl_i64(s->T0, s->T0, count);
+ tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
+ tcg_gen_shri_i64(s->T0, s->T0, 32);
+ }
+ break;
+#endif
+ default:
+ tcg_gen_subi_tl(s->tmp0, count, 1);
+ if (is_right) {
+ tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
+
+ tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
+ tcg_gen_shr_tl(s->T0, s->T0, count);
+ tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
+ } else {
+ tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
+ if (ot == MO_16) {
+ /* Only needed if count > 16, for Intel behaviour. */
+ tcg_gen_subfi_tl(s->tmp4, 33, count);
+ tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
+ tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
+ }
+
+ tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
+ tcg_gen_shl_tl(s->T0, s->T0, count);
+ tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
+ }
+ tcg_gen_movi_tl(s->tmp4, 0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
+ s->tmp4, s->T1);
+ tcg_gen_or_tl(s->T0, s->T0, s->T1);
+ break;
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
+ tcg_temp_free(count);
+}
+
+static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
+{
+ if (s != OR_TMP1)
+ gen_op_mov_v_reg(s1, ot, s1->T1, s);
+ switch(op) {
+ case OP_ROL:
+ gen_rot_rm_T1(s1, ot, d, 0);
+ break;
+ case OP_ROR:
+ gen_rot_rm_T1(s1, ot, d, 1);
+ break;
+ case OP_SHL:
+ case OP_SHL1:
+ gen_shift_rm_T1(s1, ot, d, 0, 0);
+ break;
+ case OP_SHR:
+ gen_shift_rm_T1(s1, ot, d, 1, 0);
+ break;
+ case OP_SAR:
+ gen_shift_rm_T1(s1, ot, d, 1, 1);
+ break;
+ case OP_RCL:
+ gen_rotc_rm_T1(s1, ot, d, 0);
+ break;
+ case OP_RCR:
+ gen_rotc_rm_T1(s1, ot, d, 1);
+ break;
+ }
+}
+
+static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
+{
+ switch(op) {
+ case OP_ROL:
+ gen_rot_rm_im(s1, ot, d, c, 0);
+ break;
+ case OP_ROR:
+ gen_rot_rm_im(s1, ot, d, c, 1);
+ break;
+ case OP_SHL:
+ case OP_SHL1:
+ gen_shift_rm_im(s1, ot, d, c, 0, 0);
+ break;
+ case OP_SHR:
+ gen_shift_rm_im(s1, ot, d, c, 1, 0);
+ break;
+ case OP_SAR:
+ gen_shift_rm_im(s1, ot, d, c, 1, 1);
+ break;
+ default:
+ /* currently not optimized */
+ tcg_gen_movi_tl(s1->T1, c);
+ gen_shift(s1, op, ot, d, OR_TMP1);
+ break;
+ }
+}
+
+#define X86_MAX_INSN_LENGTH 15
+
+static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
+{
+ uint64_t pc = s->pc;
+
+ s->pc += num_bytes;
+ if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
+ /* If the instruction's 16th byte is on a different page than the 1st, a
+ * page fault on the second page wins over the general protection fault
+ * caused by the instruction being too long.
+ * This can happen even if the operand is only one byte long!
+ */
+ if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
+ volatile uint8_t unused =
+ cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
+ (void) unused;
+ }
+ siglongjmp(s->jmpbuf, 1);
+ }
+
+ return pc;
+}
+
+static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
+{
+ return translator_ldub(env, &s->base, advance_pc(env, s, 1));
+}
+
+static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
+{
+ return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
+}
+
+static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
+{
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
+}
+
+static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
+{
+ return translator_ldl(env, &s->base, advance_pc(env, s, 4));
+}
+
+#ifdef TARGET_X86_64
+static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
+{
+ return translator_ldq(env, &s->base, advance_pc(env, s, 8));
+}
+#endif
+
+/* Decompose an address. */
+
+typedef struct AddressParts {
+ int def_seg;
+ int base;
+ int index;
+ int scale;
+ target_long disp;
+} AddressParts;
+
+static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
+ int modrm)
+{
+ int def_seg, base, index, scale, mod, rm;
+ target_long disp;
+ bool havesib;
+
+ def_seg = R_DS;
+ index = -1;
+ scale = 0;
+ disp = 0;
+
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ base = rm | REX_B(s);
+
+ if (mod == 3) {
+ /* Normally filtered out earlier, but including this path
+ simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
+ goto done;
+ }
+
+ switch (s->aflag) {
+ case MO_64:
+ case MO_32:
+ havesib = 0;
+ if (rm == 4) {
+ int code = x86_ldub_code(env, s);
+ scale = (code >> 6) & 3;
+ index = ((code >> 3) & 7) | REX_X(s);
+ if (index == 4) {
+ index = -1; /* no index */
+ }
+ base = (code & 7) | REX_B(s);
+ havesib = 1;
+ }
+
+ switch (mod) {
+ case 0:
+ if ((base & 7) == 5) {
+ base = -1;
+ disp = (int32_t)x86_ldl_code(env, s);
+ if (CODE64(s) && !havesib) {
+ base = -2;
+ disp += s->pc + s->rip_offset;
+ }
+ }
+ break;
+ case 1:
+ disp = (int8_t)x86_ldub_code(env, s);
+ break;
+ default:
+ case 2:
+ disp = (int32_t)x86_ldl_code(env, s);
+ break;
+ }
+
+ /* For correct popl handling with esp. */
+ if (base == R_ESP && s->popl_esp_hack) {
+ disp += s->popl_esp_hack;
+ }
+ if (base == R_EBP || base == R_ESP) {
+ def_seg = R_SS;
+ }
+ break;
+
+ case MO_16:
+ if (mod == 0) {
+ if (rm == 6) {
+ base = -1;
+ disp = x86_lduw_code(env, s);
+ break;
+ }
+ } else if (mod == 1) {
+ disp = (int8_t)x86_ldub_code(env, s);
+ } else {
+ disp = (int16_t)x86_lduw_code(env, s);
+ }
+
+ switch (rm) {
+ case 0:
+ base = R_EBX;
+ index = R_ESI;
+ break;
+ case 1:
+ base = R_EBX;
+ index = R_EDI;
+ break;
+ case 2:
+ base = R_EBP;
+ index = R_ESI;
+ def_seg = R_SS;
+ break;
+ case 3:
+ base = R_EBP;
+ index = R_EDI;
+ def_seg = R_SS;
+ break;
+ case 4:
+ base = R_ESI;
+ break;
+ case 5:
+ base = R_EDI;
+ break;
+ case 6:
+ base = R_EBP;
+ def_seg = R_SS;
+ break;
+ default:
+ case 7:
+ base = R_EBX;
+ break;
+ }
+ break;
+
+ default:
+ tcg_abort();
+ }
+
+ done:
+ return (AddressParts){ def_seg, base, index, scale, disp };
+}
+
+/* Compute the address, with a minimum number of TCG ops. */
+static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a)
+{
+ TCGv ea = NULL;
+
+ if (a.index >= 0) {
+ if (a.scale == 0) {
+ ea = cpu_regs[a.index];
+ } else {
+ tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
+ ea = s->A0;
+ }
+ if (a.base >= 0) {
+ tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
+ ea = s->A0;
+ }
+ } else if (a.base >= 0) {
+ ea = cpu_regs[a.base];
+ }
+ if (!ea) {
+ tcg_gen_movi_tl(s->A0, a.disp);
+ ea = s->A0;
+ } else if (a.disp != 0) {
+ tcg_gen_addi_tl(s->A0, ea, a.disp);
+ ea = s->A0;
+ }
+
+ return ea;
+}
+
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(s, a);
+ gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
+}
+
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ (void)gen_lea_modrm_0(env, s, modrm);
+}
+
+/* Used for BNDCL, BNDCU, BNDCN. */
+static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
+ TCGCond cond, TCGv_i64 bndv)
+{
+ TCGv ea = gen_lea_modrm_1(s, gen_lea_modrm_0(env, s, modrm));
+
+ tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
+ }
+ tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
+ tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
+ gen_helper_bndck(cpu_env, s->tmp2_i32);
+}
+
+/* used for LEA and MOV AX, mem */
+static void gen_add_A0_ds_seg(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
+}
+
+/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
+ OR_TMP0 */
+static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
+ MemOp ot, int reg, int is_store)
+{
+ int mod, rm;
+
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod == 3) {
+ if (is_store) {
+ if (reg != OR_TMP0)
+ gen_op_mov_v_reg(s, ot, s->T0, reg);
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ if (reg != OR_TMP0)
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (is_store) {
+ if (reg != OR_TMP0)
+ gen_op_mov_v_reg(s, ot, s->T0, reg);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ if (reg != OR_TMP0)
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ }
+ }
+}
+
+static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
+{
+ uint32_t ret;
+
+ switch (ot) {
+ case MO_8:
+ ret = x86_ldub_code(env, s);
+ break;
+ case MO_16:
+ ret = x86_lduw_code(env, s);
+ break;
+ case MO_32:
+#ifdef TARGET_X86_64
+ case MO_64:
+#endif
+ ret = x86_ldl_code(env, s);
+ break;
+ default:
+ tcg_abort();
+ }
+ return ret;
+}
+
+static inline int insn_const_size(MemOp ot)
+{
+ if (ot <= MO_32) {
+ return 1 << ot;
+ } else {
+ return 4;
+ }
+}
+
+static void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
+{
+ target_ulong pc = s->cs_base + eip;
+
+ if (translator_use_goto_tb(&s->base, pc)) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ gen_jmp_im(s, eip);
+ tcg_gen_exit_tb(s->base.tb, tb_num);
+ s->base.is_jmp = DISAS_NORETURN;
+ } else {
+ /* jump to another page */
+ gen_jmp_im(s, eip);
+ gen_jr(s, s->tmp0);
+ }
+}
+
+static inline void gen_jcc(DisasContext *s, int b,
+ target_ulong val, target_ulong next_eip)
+{
+ TCGLabel *l1, *l2;
+
+ if (s->jmp_opt) {
+ l1 = gen_new_label();
+ gen_jcc1(s, b, l1);
+
+ gen_goto_tb(s, 0, next_eip);
+
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, val);
+ } else {
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ gen_jcc1(s, b, l1);
+
+ gen_jmp_im(s, next_eip);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+ gen_jmp_im(s, val);
+ gen_set_label(l2);
+ gen_eob(s);
+ }
+}
+
+static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
+ int modrm, int reg)
+{
+ CCPrepare cc;
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+ cc = gen_prepare_cc(s, b, s->T1);
+ if (cc.mask != -1) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cc.reg, cc.mask);
+ cc.reg = t0;
+ }
+ if (!cc.use_reg2) {
+ cc.reg2 = tcg_const_tl(cc.imm);
+ }
+
+ tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
+ s->T0, cpu_regs[reg]);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+
+ if (cc.mask != -1) {
+ tcg_temp_free(cc.reg);
+ }
+ if (!cc.use_reg2) {
+ tcg_temp_free(cc.reg2);
+ }
+}
+
+static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
+{
+ tcg_gen_ld32u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].selector));
+}
+
+static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
+{
+ tcg_gen_ext16u_tl(s->T0, s->T0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].selector));
+ tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
+}
+
+/* move T0 to seg_reg and compute if the CPU state may change. Never
+ call this function with seg_reg == R_CS */
+static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
+{
+ if (PE(s) && !VM86(s)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), s->tmp2_i32);
+ /* abort translation because the addseg value may change or
+ because ss32 may change. For R_SS, translation must always
+ stop as a special handling must be done to disable hardware
+ interrupts for the next instruction */
+ if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) {
+ s->base.is_jmp = DISAS_TOO_MANY;
+ }
+ } else {
+ gen_op_movl_seg_T0_vm(s, seg_reg);
+ if (seg_reg == R_SS) {
+ s->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
+{
+ /* no SVM activated; fast case */
+ if (likely(!GUEST(s))) {
+ return;
+ }
+ gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
+}
+
+static inline void gen_stack_update(DisasContext *s, int addend)
+{
+ gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
+}
+
+/* Generate a push. It depends on ss32, addseg and dflag. */
+static void gen_push_v(DisasContext *s, TCGv val)
+{
+ MemOp d_ot = mo_pushpop(s, s->dflag);
+ MemOp a_ot = mo_stacksize(s);
+ int size = 1 << d_ot;
+ TCGv new_esp = s->A0;
+
+ tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
+
+ if (!CODE64(s)) {
+ if (ADDSEG(s)) {
+ new_esp = s->tmp4;
+ tcg_gen_mov_tl(new_esp, s->A0);
+ }
+ gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
+ }
+
+ gen_op_st_v(s, d_ot, val, s->A0);
+ gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
+}
+
+/* two step pop is necessary for precise exceptions */
+static MemOp gen_pop_T0(DisasContext *s)
+{
+ MemOp d_ot = mo_pushpop(s, s->dflag);
+
+ gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, s->T0, s->A0);
+
+ return d_ot;
+}
+
+static inline void gen_pop_update(DisasContext *s, MemOp ot)
+{
+ gen_stack_update(s, 1 << ot);
+}
+
+static inline void gen_stack_A0(DisasContext *s)
+{
+ gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
+}
+
+static void gen_pusha(DisasContext *s)
+{
+ MemOp s_ot = SS32(s) ? MO_32 : MO_16;
+ MemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
+ gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
+ }
+
+ gen_stack_update(s, -8 * size);
+}
+
+static void gen_popa(DisasContext *s)
+{
+ MemOp s_ot = SS32(s) ? MO_32 : MO_16;
+ MemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ /* ESP is not reloaded */
+ if (7 - i == R_ESP) {
+ continue;
+ }
+ tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
+ gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, s->T0, s->A0);
+ gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
+ }
+
+ gen_stack_update(s, 8 * size);
+}
+
+static void gen_enter(DisasContext *s, int esp_addend, int level)
+{
+ MemOp d_ot = mo_pushpop(s, s->dflag);
+ MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
+ int size = 1 << d_ot;
+
+ /* Push BP; compute FrameTemp into T1. */
+ tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
+ gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
+
+ level &= 31;
+ if (level != 0) {
+ int i;
+
+ /* Copy level-1 pointers from the previous frame. */
+ for (i = 1; i < level; ++i) {
+ tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
+ gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
+
+ tcg_gen_subi_tl(s->A0, s->T1, size * i);
+ gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, s->tmp0, s->A0);
+ }
+
+ /* Push the current FrameTemp as the last level. */
+ tcg_gen_subi_tl(s->A0, s->T1, size * level);
+ gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, s->T1, s->A0);
+ }
+
+ /* Copy the FrameTemp value to EBP. */
+ gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
+
+ /* Compute the final value of ESP. */
+ tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
+ gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
+}
+
+static void gen_leave(DisasContext *s)
+{
+ MemOp d_ot = mo_pushpop(s, s->dflag);
+ MemOp a_ot = mo_stacksize(s);
+
+ gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, s->T0, s->A0);
+
+ tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
+
+ gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
+ gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
+}
+
+/* Similarly, except that the assumption here is that we don't decode
+ the instruction at all -- either a missing opcode, an unimplemented
+ feature, or just a bogus instruction stream. */
+static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
+{
+ gen_illegal_opcode(s);
+
+ if (qemu_loglevel_mask(LOG_UNIMP)) {
+ FILE *logfile = qemu_log_lock();
+ target_ulong pc = s->pc_start, end = s->pc;
+
+ qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
+ for (; pc < end; ++pc) {
+ qemu_log(" %02x", cpu_ldub_code(env, pc));
+ }
+ qemu_log("\n");
+ qemu_log_unlock(logfile);
+ }
+}
+
+/* an interrupt is different from an exception because of the
+ privilege checks */
+static void gen_interrupt(DisasContext *s, int intno,
+ target_ulong cur_eip, target_ulong next_eip)
+{
+ gen_update_cc_op(s);
+ gen_jmp_im(s, cur_eip);
+ gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
+ tcg_const_i32(next_eip - cur_eip));
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_set_hflag(DisasContext *s, uint32_t mask)
+{
+ if ((s->flags & mask) == 0) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ori_i32(t, t, mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags |= mask;
+ }
+}
+
+static void gen_reset_hflag(DisasContext *s, uint32_t mask)
+{
+ if (s->flags & mask) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_andi_i32(t, t, ~mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags &= ~mask;
+ }
+}
+
+/* Clear BND registers during legacy branches. */
+static void gen_bnd_jmp(DisasContext *s)
+{
+ /* Clear the registers only if BND prefix is missing, MPX is enabled,
+ and if the BNDREGs are known to be in use (non-zero) already.
+ The helper itself will check BNDPRESERVE at runtime. */
+ if ((s->prefix & PREFIX_REPNZ) == 0
+ && (s->flags & HF_MPX_EN_MASK) != 0
+ && (s->flags & HF_MPX_IU_MASK) != 0) {
+ gen_helper_bnd_jmp(cpu_env);
+ }
+}
+
+/* Generate an end of block. Trace exception is also generated if needed.
+ If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
+ If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
+ S->TF. This is used by the syscall/sysret insns. */
+static void
+do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
+{
+ gen_update_cc_op(s);
+
+ /* If several instructions disable interrupts, only the first does it. */
+ if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
+ gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
+ } else {
+ gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
+ }
+
+ if (s->base.tb->flags & HF_RF_MASK) {
+ gen_helper_reset_rf(cpu_env);
+ }
+ if (recheck_tf) {
+ gen_helper_rechecking_single_step(cpu_env);
+ tcg_gen_exit_tb(NULL, 0);
+ } else if (s->flags & HF_TF_MASK) {
+ gen_helper_single_step(cpu_env);
+ } else if (jr) {
+ tcg_gen_lookup_and_goto_ptr();
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static inline void
+gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
+{
+ do_gen_eob_worker(s, inhibit, recheck_tf, false);
+}
+
+/* End of block.
+ If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
+static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
+{
+ gen_eob_worker(s, inhibit, false);
+}
+
+/* End of block, resetting the inhibit irq flag. */
+static void gen_eob(DisasContext *s)
+{
+ gen_eob_worker(s, false, false);
+}
+
+/* Jump to register */
+static void gen_jr(DisasContext *s, TCGv dest)
+{
+ do_gen_eob_worker(s, false, false, true);
+}
+
+/* generate a jump to eip. No segment change must happen before as a
+ direct call to the next block may occur */
+static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
+{
+ gen_update_cc_op(s);
+ set_cc_op(s, CC_OP_DYNAMIC);
+ if (s->jmp_opt) {
+ gen_goto_tb(s, tb_num, eip);
+ } else {
+ gen_jmp_im(s, eip);
+ gen_eob(s);
+ }
+}
+
+static void gen_jmp(DisasContext *s, target_ulong eip)
+{
+ gen_jmp_tb(s, eip, 0);
+}
+
+static inline void gen_ldq_env_A0(DisasContext *s, int offset)
+{
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
+}
+
+static inline void gen_stq_env_A0(DisasContext *s, int offset)
+{
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
+}
+
+static inline void gen_ldo_env_A0(DisasContext *s, int offset)
+{
+ int mem_index = s->mem_index;
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ);
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_addi_tl(s->tmp0, s->A0, 8);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ);
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
+}
+
+static inline void gen_sto_env_A0(DisasContext *s, int offset)
+{
+ int mem_index = s->mem_index;
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ);
+ tcg_gen_addi_tl(s->tmp0, s->A0, 8);
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ);
+}
+
+static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset)
+{
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
+}
+
+static inline void gen_op_movq(DisasContext *s, int d_offset, int s_offset)
+{
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset);
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset);
+}
+
+static inline void gen_op_movl(DisasContext *s, int d_offset, int s_offset)
+{
+ tcg_gen_ld_i32(s->tmp2_i32, cpu_env, s_offset);
+ tcg_gen_st_i32(s->tmp2_i32, cpu_env, d_offset);
+}
+
+static inline void gen_op_movq_env_0(DisasContext *s, int d_offset)
+{
+ tcg_gen_movi_i64(s->tmp1_i64, 0);
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset);
+}
+
+typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
+typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
+typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
+typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
+typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
+typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
+ TCGv_i32 val);
+typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
+typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
+ TCGv val);
+
+#define SSE_SPECIAL ((void *)1)
+#define SSE_DUMMY ((void *)2)
+
+#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
+#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
+ gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
+
+static const SSEFunc_0_epp sse_op_table1[256][4] = {
+ /* 3DNow! extensions */
+ [0x0e] = { SSE_DUMMY }, /* femms */
+ [0x0f] = { SSE_DUMMY }, /* pf... */
+ /* pure SSE operations */
+ [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+ [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+ [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
+ [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
+ [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
+ [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
+ [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
+ [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
+
+ [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
+ [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
+ [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
+ [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
+ [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
+ [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
+ [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
+ [0x2f] = { gen_helper_comiss, gen_helper_comisd },
+ [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
+ [0x51] = SSE_FOP(sqrt),
+ [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
+ [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
+ [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
+ [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
+ [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
+ [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
+ [0x58] = SSE_FOP(add),
+ [0x59] = SSE_FOP(mul),
+ [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
+ gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
+ [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
+ [0x5c] = SSE_FOP(sub),
+ [0x5d] = SSE_FOP(min),
+ [0x5e] = SSE_FOP(div),
+ [0x5f] = SSE_FOP(max),
+
+ [0xc2] = SSE_FOP(cmpeq),
+ [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
+ (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
+
+ /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
+ [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+ [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+
+ /* MMX ops and their SSE extensions */
+ [0x60] = MMX_OP2(punpcklbw),
+ [0x61] = MMX_OP2(punpcklwd),
+ [0x62] = MMX_OP2(punpckldq),
+ [0x63] = MMX_OP2(packsswb),
+ [0x64] = MMX_OP2(pcmpgtb),
+ [0x65] = MMX_OP2(pcmpgtw),
+ [0x66] = MMX_OP2(pcmpgtl),
+ [0x67] = MMX_OP2(packuswb),
+ [0x68] = MMX_OP2(punpckhbw),
+ [0x69] = MMX_OP2(punpckhwd),
+ [0x6a] = MMX_OP2(punpckhdq),
+ [0x6b] = MMX_OP2(packssdw),
+ [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
+ [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
+ [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
+ [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
+ [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
+ (SSEFunc_0_epp)gen_helper_pshufd_xmm,
+ (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
+ (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
+ [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
+ [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
+ [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
+ [0x74] = MMX_OP2(pcmpeqb),
+ [0x75] = MMX_OP2(pcmpeqw),
+ [0x76] = MMX_OP2(pcmpeql),
+ [0x77] = { SSE_DUMMY }, /* emms */
+ [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
+ [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
+ [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
+ [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
+ [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
+ [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
+ [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
+ [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
+ [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
+ [0xd1] = MMX_OP2(psrlw),
+ [0xd2] = MMX_OP2(psrld),
+ [0xd3] = MMX_OP2(psrlq),
+ [0xd4] = MMX_OP2(paddq),
+ [0xd5] = MMX_OP2(pmullw),
+ [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+ [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
+ [0xd8] = MMX_OP2(psubusb),
+ [0xd9] = MMX_OP2(psubusw),
+ [0xda] = MMX_OP2(pminub),
+ [0xdb] = MMX_OP2(pand),
+ [0xdc] = MMX_OP2(paddusb),
+ [0xdd] = MMX_OP2(paddusw),
+ [0xde] = MMX_OP2(pmaxub),
+ [0xdf] = MMX_OP2(pandn),
+ [0xe0] = MMX_OP2(pavgb),
+ [0xe1] = MMX_OP2(psraw),
+ [0xe2] = MMX_OP2(psrad),
+ [0xe3] = MMX_OP2(pavgw),
+ [0xe4] = MMX_OP2(pmulhuw),
+ [0xe5] = MMX_OP2(pmulhw),
+ [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
+ [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
+ [0xe8] = MMX_OP2(psubsb),
+ [0xe9] = MMX_OP2(psubsw),
+ [0xea] = MMX_OP2(pminsw),
+ [0xeb] = MMX_OP2(por),
+ [0xec] = MMX_OP2(paddsb),
+ [0xed] = MMX_OP2(paddsw),
+ [0xee] = MMX_OP2(pmaxsw),
+ [0xef] = MMX_OP2(pxor),
+ [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
+ [0xf1] = MMX_OP2(psllw),
+ [0xf2] = MMX_OP2(pslld),
+ [0xf3] = MMX_OP2(psllq),
+ [0xf4] = MMX_OP2(pmuludq),
+ [0xf5] = MMX_OP2(pmaddwd),
+ [0xf6] = MMX_OP2(psadbw),
+ [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
+ (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
+ [0xf8] = MMX_OP2(psubb),
+ [0xf9] = MMX_OP2(psubw),
+ [0xfa] = MMX_OP2(psubl),
+ [0xfb] = MMX_OP2(psubq),
+ [0xfc] = MMX_OP2(paddb),
+ [0xfd] = MMX_OP2(paddw),
+ [0xfe] = MMX_OP2(paddl),
+};
+
+static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
+ [0 + 2] = MMX_OP2(psrlw),
+ [0 + 4] = MMX_OP2(psraw),
+ [0 + 6] = MMX_OP2(psllw),
+ [8 + 2] = MMX_OP2(psrld),
+ [8 + 4] = MMX_OP2(psrad),
+ [8 + 6] = MMX_OP2(pslld),
+ [16 + 2] = MMX_OP2(psrlq),
+ [16 + 3] = { NULL, gen_helper_psrldq_xmm },
+ [16 + 6] = MMX_OP2(psllq),
+ [16 + 7] = { NULL, gen_helper_pslldq_xmm },
+};
+
+static const SSEFunc_0_epi sse_op_table3ai[] = {
+ gen_helper_cvtsi2ss,
+ gen_helper_cvtsi2sd
+};
+
+#ifdef TARGET_X86_64
+static const SSEFunc_0_epl sse_op_table3aq[] = {
+ gen_helper_cvtsq2ss,
+ gen_helper_cvtsq2sd
+};
+#endif
+
+static const SSEFunc_i_ep sse_op_table3bi[] = {
+ gen_helper_cvttss2si,
+ gen_helper_cvtss2si,
+ gen_helper_cvttsd2si,
+ gen_helper_cvtsd2si
+};
+
+#ifdef TARGET_X86_64
+static const SSEFunc_l_ep sse_op_table3bq[] = {
+ gen_helper_cvttss2sq,
+ gen_helper_cvtss2sq,
+ gen_helper_cvttsd2sq,
+ gen_helper_cvtsd2sq
+};
+#endif
+
+static const SSEFunc_0_epp sse_op_table4[8][4] = {
+ SSE_FOP(cmpeq),
+ SSE_FOP(cmplt),
+ SSE_FOP(cmple),
+ SSE_FOP(cmpunord),
+ SSE_FOP(cmpneq),
+ SSE_FOP(cmpnlt),
+ SSE_FOP(cmpnle),
+ SSE_FOP(cmpord),
+};
+
+static const SSEFunc_0_epp sse_op_table5[256] = {
+ [0x0c] = gen_helper_pi2fw,
+ [0x0d] = gen_helper_pi2fd,
+ [0x1c] = gen_helper_pf2iw,
+ [0x1d] = gen_helper_pf2id,
+ [0x8a] = gen_helper_pfnacc,
+ [0x8e] = gen_helper_pfpnacc,
+ [0x90] = gen_helper_pfcmpge,
+ [0x94] = gen_helper_pfmin,
+ [0x96] = gen_helper_pfrcp,
+ [0x97] = gen_helper_pfrsqrt,
+ [0x9a] = gen_helper_pfsub,
+ [0x9e] = gen_helper_pfadd,
+ [0xa0] = gen_helper_pfcmpgt,
+ [0xa4] = gen_helper_pfmax,
+ [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
+ [0xa7] = gen_helper_movq, /* pfrsqit1 */
+ [0xaa] = gen_helper_pfsubr,
+ [0xae] = gen_helper_pfacc,
+ [0xb0] = gen_helper_pfcmpeq,
+ [0xb4] = gen_helper_pfmul,
+ [0xb6] = gen_helper_movq, /* pfrcpit2 */
+ [0xb7] = gen_helper_pmulhrw_mmx,
+ [0xbb] = gen_helper_pswapd,
+ [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
+};
+
+struct SSEOpHelper_epp {
+ SSEFunc_0_epp op[2];
+ uint32_t ext_mask;
+};
+
+struct SSEOpHelper_eppi {
+ SSEFunc_0_eppi op[2];
+ uint32_t ext_mask;
+};
+
+#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
+#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
+#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
+#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
+#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
+ CPUID_EXT_PCLMULQDQ }
+#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
+
+static const struct SSEOpHelper_epp sse_op_table6[256] = {
+ [0x00] = SSSE3_OP(pshufb),
+ [0x01] = SSSE3_OP(phaddw),
+ [0x02] = SSSE3_OP(phaddd),
+ [0x03] = SSSE3_OP(phaddsw),
+ [0x04] = SSSE3_OP(pmaddubsw),
+ [0x05] = SSSE3_OP(phsubw),
+ [0x06] = SSSE3_OP(phsubd),
+ [0x07] = SSSE3_OP(phsubsw),
+ [0x08] = SSSE3_OP(psignb),
+ [0x09] = SSSE3_OP(psignw),
+ [0x0a] = SSSE3_OP(psignd),
+ [0x0b] = SSSE3_OP(pmulhrsw),
+ [0x10] = SSE41_OP(pblendvb),
+ [0x14] = SSE41_OP(blendvps),
+ [0x15] = SSE41_OP(blendvpd),
+ [0x17] = SSE41_OP(ptest),
+ [0x1c] = SSSE3_OP(pabsb),
+ [0x1d] = SSSE3_OP(pabsw),
+ [0x1e] = SSSE3_OP(pabsd),
+ [0x20] = SSE41_OP(pmovsxbw),
+ [0x21] = SSE41_OP(pmovsxbd),
+ [0x22] = SSE41_OP(pmovsxbq),
+ [0x23] = SSE41_OP(pmovsxwd),
+ [0x24] = SSE41_OP(pmovsxwq),
+ [0x25] = SSE41_OP(pmovsxdq),
+ [0x28] = SSE41_OP(pmuldq),
+ [0x29] = SSE41_OP(pcmpeqq),
+ [0x2a] = SSE41_SPECIAL, /* movntqda */
+ [0x2b] = SSE41_OP(packusdw),
+ [0x30] = SSE41_OP(pmovzxbw),
+ [0x31] = SSE41_OP(pmovzxbd),
+ [0x32] = SSE41_OP(pmovzxbq),
+ [0x33] = SSE41_OP(pmovzxwd),
+ [0x34] = SSE41_OP(pmovzxwq),
+ [0x35] = SSE41_OP(pmovzxdq),
+ [0x37] = SSE42_OP(pcmpgtq),
+ [0x38] = SSE41_OP(pminsb),
+ [0x39] = SSE41_OP(pminsd),
+ [0x3a] = SSE41_OP(pminuw),
+ [0x3b] = SSE41_OP(pminud),
+ [0x3c] = SSE41_OP(pmaxsb),
+ [0x3d] = SSE41_OP(pmaxsd),
+ [0x3e] = SSE41_OP(pmaxuw),
+ [0x3f] = SSE41_OP(pmaxud),
+ [0x40] = SSE41_OP(pmulld),
+ [0x41] = SSE41_OP(phminposuw),
+ [0xdb] = AESNI_OP(aesimc),
+ [0xdc] = AESNI_OP(aesenc),
+ [0xdd] = AESNI_OP(aesenclast),
+ [0xde] = AESNI_OP(aesdec),
+ [0xdf] = AESNI_OP(aesdeclast),
+};
+
+static const struct SSEOpHelper_eppi sse_op_table7[256] = {
+ [0x08] = SSE41_OP(roundps),
+ [0x09] = SSE41_OP(roundpd),
+ [0x0a] = SSE41_OP(roundss),
+ [0x0b] = SSE41_OP(roundsd),
+ [0x0c] = SSE41_OP(blendps),
+ [0x0d] = SSE41_OP(blendpd),
+ [0x0e] = SSE41_OP(pblendw),
+ [0x0f] = SSSE3_OP(palignr),
+ [0x14] = SSE41_SPECIAL, /* pextrb */
+ [0x15] = SSE41_SPECIAL, /* pextrw */
+ [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
+ [0x17] = SSE41_SPECIAL, /* extractps */
+ [0x20] = SSE41_SPECIAL, /* pinsrb */
+ [0x21] = SSE41_SPECIAL, /* insertps */
+ [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
+ [0x40] = SSE41_OP(dpps),
+ [0x41] = SSE41_OP(dppd),
+ [0x42] = SSE41_OP(mpsadbw),
+ [0x44] = PCLMULQDQ_OP(pclmulqdq),
+ [0x60] = SSE42_OP(pcmpestrm),
+ [0x61] = SSE42_OP(pcmpestri),
+ [0x62] = SSE42_OP(pcmpistrm),
+ [0x63] = SSE42_OP(pcmpistri),
+ [0xdf] = AESNI_OP(aeskeygenassist),
+};
+
+static void gen_sse(CPUX86State *env, DisasContext *s, int b,
+ target_ulong pc_start)
+{
+ int b1, op1_offset, op2_offset, is_xmm, val;
+ int modrm, mod, rm, reg;
+ SSEFunc_0_epp sse_fn_epp;
+ SSEFunc_0_eppi sse_fn_eppi;
+ SSEFunc_0_ppi sse_fn_ppi;
+ SSEFunc_0_eppt sse_fn_eppt;
+ MemOp ot;
+
+ b &= 0xff;
+ if (s->prefix & PREFIX_DATA)
+ b1 = 1;
+ else if (s->prefix & PREFIX_REPZ)
+ b1 = 2;
+ else if (s->prefix & PREFIX_REPNZ)
+ b1 = 3;
+ else
+ b1 = 0;
+ sse_fn_epp = sse_op_table1[b][b1];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
+ is_xmm = 1;
+ } else {
+ if (b1 == 0) {
+ /* MMX case */
+ is_xmm = 0;
+ } else {
+ is_xmm = 1;
+ }
+ }
+ /* simple MMX/SSE operation */
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ return;
+ }
+ if (s->flags & HF_EM_MASK) {
+ illegal_op:
+ gen_illegal_opcode(s);
+ return;
+ }
+ if (is_xmm
+ && !(s->flags & HF_OSFXSR_MASK)
+ && (b != 0x38 && b != 0x3a)) {
+ goto unknown_op;
+ }
+ if (b == 0x0e) {
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+ /* If we were fully decoding this we might use illegal_op. */
+ goto unknown_op;
+ }
+ /* femms */
+ gen_helper_emms(cpu_env);
+ return;
+ }
+ if (b == 0x77) {
+ /* emms */
+ gen_helper_emms(cpu_env);
+ return;
+ }
+ /* prepare MMX state (XXX: optimize by storing fptt and fptags in
+ the static cpu state) */
+ if (!is_xmm) {
+ gen_helper_enter_mmx(cpu_env);
+ }
+
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7);
+ if (is_xmm) {
+ reg |= REX_R(s);
+ }
+ mod = (modrm >> 6) & 3;
+ if (sse_fn_epp == SSE_SPECIAL) {
+ b |= (b1 << 8);
+ switch(b) {
+ case 0x0e7: /* movntq */
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+ break;
+ case 0x1e7: /* movntdq */
+ case 0x02b: /* movntps */
+ case 0x12b: /* movntps */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ break;
+ case 0x3f0: /* lddqu */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ break;
+ case 0x22b: /* movntss */
+ case 0x32b: /* movntsd */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ if (b1 & 1) {
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, s->T0, s->A0);
+ }
+ break;
+ case 0x6e: /* movd mm, ea */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
+ tcg_gen_st_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, fpregs[reg].mmx));
+ } else
+#endif
+ {
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_movl_mm_T0_mmx(s->ptr0, s->tmp2_i32);
+ }
+ break;
+ case 0x16e: /* movd xmm, ea */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]));
+ gen_helper_movq_mm_T0_xmm(s->ptr0, s->T0);
+ } else
+#endif
+ {
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]));
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_movl_mm_T0_xmm(s->ptr0, s->tmp2_i32);
+ }
+ break;
+ case 0x6f: /* movq mm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+ } else {
+ rm = (modrm & 7);
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env,
+ offsetof(CPUX86State,fpregs[rm].mmx));
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ }
+ break;
+ case 0x010: /* movups */
+ case 0x110: /* movupd */
+ case 0x028: /* movaps */
+ case 0x128: /* movapd */
+ case 0x16f: /* movdqa xmm, ea */
+ case 0x26f: /* movdqu xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movo(s, offsetof(CPUX86State, xmm_regs[reg]),
+ offsetof(CPUX86State,xmm_regs[rm]));
+ }
+ break;
+ case 0x210: /* movss xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_32, s->T0, s->A0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)));
+ tcg_gen_movi_tl(s->T0, 0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)));
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+ }
+ break;
+ case 0x310: /* movsd xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ tcg_gen_movi_tl(s->T0, 0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ break;
+ case 0x012: /* movlps */
+ case 0x112: /* movlpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ /* movhlps */
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
+ }
+ break;
+ case 0x212: /* movsldup */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
+ }
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ break;
+ case 0x312: /* movddup */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ break;
+ case 0x016: /* movhps */
+ case 0x116: /* movhpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(1)));
+ } else {
+ /* movlhps */
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ break;
+ case 0x216: /* movshdup */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
+ }
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+ break;
+ case 0x178:
+ case 0x378:
+ {
+ int bit_index, field_length;
+
+ if (b1 == 1 && reg != 0)
+ goto illegal_op;
+ field_length = x86_ldub_code(env, s) & 0x3F;
+ bit_index = x86_ldub_code(env, s) & 0x3F;
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]));
+ if (b1 == 1)
+ gen_helper_extrq_i(cpu_env, s->ptr0,
+ tcg_const_i32(bit_index),
+ tcg_const_i32(field_length));
+ else
+ gen_helper_insertq_i(cpu_env, s->ptr0,
+ tcg_const_i32(bit_index),
+ tcg_const_i32(field_length));
+ }
+ break;
+ case 0x7e: /* movd ea, mm */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ tcg_gen_ld_i64(s->T0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
+ } else
+#endif
+ {
+ tcg_gen_ld32u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
+ }
+ break;
+ case 0x17e: /* movd ea, xmm */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ tcg_gen_ld_i64(s->T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
+ } else
+#endif
+ {
+ tcg_gen_ld32u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
+ }
+ break;
+ case 0x27e: /* movq xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)));
+ break;
+ case 0x7f: /* movq ea, mm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+ } else {
+ rm = (modrm & 7);
+ gen_op_movq(s, offsetof(CPUX86State, fpregs[rm].mmx),
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ }
+ break;
+ case 0x011: /* movups */
+ case 0x111: /* movupd */
+ case 0x029: /* movaps */
+ case 0x129: /* movapd */
+ case 0x17f: /* movdqa ea, xmm */
+ case 0x27f: /* movdqu ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movo(s, offsetof(CPUX86State, xmm_regs[rm]),
+ offsetof(CPUX86State,xmm_regs[reg]));
+ }
+ break;
+ case 0x211: /* movss ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, s->T0, s->A0);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ }
+ break;
+ case 0x311: /* movsd ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ }
+ break;
+ case 0x013: /* movlps */
+ case 0x113: /* movlpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x017: /* movhps */
+ case 0x117: /* movhpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(1)));
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x71: /* shift mm, im */
+ case 0x72:
+ case 0x73:
+ case 0x171: /* shift xmm, im */
+ case 0x172:
+ case 0x173:
+ if (b1 >= 2) {
+ goto unknown_op;
+ }
+ val = x86_ldub_code(env, s);
+ if (is_xmm) {
+ tcg_gen_movi_tl(s->T0, val);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_t0.ZMM_L(0)));
+ tcg_gen_movi_tl(s->T0, 0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_t0.ZMM_L(1)));
+ op1_offset = offsetof(CPUX86State,xmm_t0);
+ } else {
+ tcg_gen_movi_tl(s->T0, val);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, mmx_t0.MMX_L(0)));
+ tcg_gen_movi_tl(s->T0, 0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, mmx_t0.MMX_L(1)));
+ op1_offset = offsetof(CPUX86State,mmx_t0);
+ }
+ sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
+ (((modrm >> 3)) & 7)][b1];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (is_xmm) {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op1_offset);
+ sse_fn_epp(cpu_env, s->ptr0, s->ptr1);
+ break;
+ case 0x050: /* movmskps */
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm]));
+ gen_helper_movmskps(s->tmp2_i32, cpu_env, s->ptr0);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
+ break;
+ case 0x150: /* movmskpd */
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm]));
+ gen_helper_movmskpd(s->tmp2_i32, cpu_env, s->ptr0);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
+ break;
+ case 0x02a: /* cvtpi2ps */
+ case 0x12a: /* cvtpi2pd */
+ gen_helper_enter_mmx(cpu_env);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_ldq_env_A0(s, op2_offset);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ switch(b >> 8) {
+ case 0x0:
+ gen_helper_cvtpi2ps(cpu_env, s->ptr0, s->ptr1);
+ break;
+ default:
+ case 0x1:
+ gen_helper_cvtpi2pd(cpu_env, s->ptr0, s->ptr1);
+ break;
+ }
+ break;
+ case 0x22a: /* cvtsi2ss */
+ case 0x32a: /* cvtsi2sd */
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ if (ot == MO_32) {
+ SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ sse_fn_epi(cpu_env, s->ptr0, s->tmp2_i32);
+ } else {
+#ifdef TARGET_X86_64
+ SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
+ sse_fn_epl(cpu_env, s->ptr0, s->T0);
+#else
+ goto illegal_op;
+#endif
+ }
+ break;
+ case 0x02c: /* cvttps2pi */
+ case 0x12c: /* cvttpd2pi */
+ case 0x02d: /* cvtps2pi */
+ case 0x12d: /* cvtpd2pi */
+ gen_helper_enter_mmx(cpu_env);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_ldo_env_A0(s, op2_offset);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ switch(b) {
+ case 0x02c:
+ gen_helper_cvttps2pi(cpu_env, s->ptr0, s->ptr1);
+ break;
+ case 0x12c:
+ gen_helper_cvttpd2pi(cpu_env, s->ptr0, s->ptr1);
+ break;
+ case 0x02d:
+ gen_helper_cvtps2pi(cpu_env, s->ptr0, s->ptr1);
+ break;
+ case 0x12d:
+ gen_helper_cvtpd2pi(cpu_env, s->ptr0, s->ptr1);
+ break;
+ }
+ break;
+ case 0x22c: /* cvttss2si */
+ case 0x32c: /* cvttsd2si */
+ case 0x22d: /* cvtss2si */
+ case 0x32d: /* cvtsd2si */
+ ot = mo_64_32(s->dflag);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ if ((b >> 8) & 1) {
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
+ } else {
+ gen_op_ld_v(s, MO_32, s->T0, s->A0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, xmm_t0.ZMM_L(0)));
+ }
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op2_offset);
+ if (ot == MO_32) {
+ SSEFunc_i_ep sse_fn_i_ep =
+ sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
+ sse_fn_i_ep(s->tmp2_i32, cpu_env, s->ptr0);
+ tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ } else {
+#ifdef TARGET_X86_64
+ SSEFunc_l_ep sse_fn_l_ep =
+ sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
+ sse_fn_l_ep(s->T0, cpu_env, s->ptr0);
+#else
+ goto illegal_op;
+#endif
+ }
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+ case 0xc4: /* pinsrw */
+ case 0x1c4:
+ s->rip_offset = 1;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ val = x86_ldub_code(env, s);
+ if (b1) {
+ val &= 7;
+ tcg_gen_st16_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
+ } else {
+ val &= 3;
+ tcg_gen_st16_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
+ }
+ break;
+ case 0xc5: /* pextrw */
+ case 0x1c5:
+ if (mod != 3)
+ goto illegal_op;
+ ot = mo_64_32(s->dflag);
+ val = x86_ldub_code(env, s);
+ if (b1) {
+ val &= 7;
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_ld16u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
+ } else {
+ val &= 3;
+ rm = (modrm & 7);
+ tcg_gen_ld16u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
+ }
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+ case 0x1d6: /* movq ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ gen_op_movq_env_0(s,
+ offsetof(CPUX86State, xmm_regs[rm].ZMM_Q(1)));
+ }
+ break;
+ case 0x2d6: /* movq2dq */
+ gen_helper_enter_mmx(cpu_env);
+ rm = (modrm & 7);
+ gen_op_movq(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,fpregs[rm].mmx));
+ gen_op_movq_env_0(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_Q(1)));
+ break;
+ case 0x3d6: /* movdq2q */
+ gen_helper_enter_mmx(cpu_env);
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(s, offsetof(CPUX86State, fpregs[reg & 7].mmx),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ break;
+ case 0xd7: /* pmovmskb */
+ case 0x1d7:
+ if (mod != 3)
+ goto illegal_op;
+ if (b1) {
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State, xmm_regs[rm]));
+ gen_helper_pmovmskb_xmm(s->tmp2_i32, cpu_env, s->ptr0);
+ } else {
+ rm = (modrm & 7);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env,
+ offsetof(CPUX86State, fpregs[rm].mmx));
+ gen_helper_pmovmskb_mmx(s->tmp2_i32, cpu_env, s->ptr0);
+ }
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
+ break;
+
+ case 0x138:
+ case 0x038:
+ b = modrm;
+ if ((b & 0xf0) == 0xf0) {
+ goto do_0f_38_fx;
+ }
+ modrm = x86_ldub_code(env, s);
+ rm = modrm & 7;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ if (b1 >= 2) {
+ goto unknown_op;
+ }
+
+ sse_fn_epp = sse_op_table6[b].op[b1];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
+ goto illegal_op;
+
+ if (b1) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
+ } else {
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_lea_modrm(env, s, modrm);
+ switch (b) {
+ case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
+ case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
+ case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
+ gen_ldq_env_A0(s, op2_offset +
+ offsetof(ZMMReg, ZMM_Q(0)));
+ break;
+ case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
+ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_st_i32(s->tmp2_i32, cpu_env, op2_offset +
+ offsetof(ZMMReg, ZMM_L(0)));
+ break;
+ case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
+ tcg_gen_qemu_ld_tl(s->tmp0, s->A0,
+ s->mem_index, MO_LEUW);
+ tcg_gen_st16_tl(s->tmp0, cpu_env, op2_offset +
+ offsetof(ZMMReg, ZMM_W(0)));
+ break;
+ case 0x2a: /* movntqda */
+ gen_ldo_env_A0(s, op1_offset);
+ return;
+ default:
+ gen_ldo_env_A0(s, op2_offset);
+ }
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ } else {
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, op2_offset);
+ }
+ }
+ if (sse_fn_epp == SSE_SPECIAL) {
+ goto unknown_op;
+ }
+
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, s->ptr0, s->ptr1);
+
+ if (b == 0x17) {
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ break;
+
+ case 0x238:
+ case 0x338:
+ do_0f_38_fx:
+ /* Various integer extensions at 0f 38 f[0-f]. */
+ b = modrm | (b1 << 8);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+
+ switch (b) {
+ case 0x3f0: /* crc32 Gd,Eb */
+ case 0x3f1: /* crc32 Gd,Ey */
+ do_crc32:
+ if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
+ goto illegal_op;
+ }
+ if ((b & 0xff) == 0xf0) {
+ ot = MO_8;
+ } else if (s->dflag != MO_64) {
+ ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
+ } else {
+ ot = MO_64;
+ }
+
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[reg]);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_helper_crc32(s->T0, s->tmp2_i32,
+ s->T0, tcg_const_i32(8 << ot));
+
+ ot = mo_64_32(s->dflag);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+
+ case 0x1f0: /* crc32 or movbe */
+ case 0x1f1:
+ /* For these insns, the f3 prefix is supposed to have priority
+ over the 66 prefix, but that's not what we implement above
+ setting b1. */
+ if (s->prefix & PREFIX_REPNZ) {
+ goto do_crc32;
+ }
+ /* FALLTHRU */
+ case 0x0f0: /* movbe Gy,My */
+ case 0x0f1: /* movbe My,Gy */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
+ goto illegal_op;
+ }
+ if (s->dflag != MO_64) {
+ ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
+ } else {
+ ot = MO_64;
+ }
+
+ gen_lea_modrm(env, s, modrm);
+ if ((b & 1) == 0) {
+ tcg_gen_qemu_ld_tl(s->T0, s->A0,
+ s->mem_index, ot | MO_BE);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ } else {
+ tcg_gen_qemu_st_tl(cpu_regs[reg], s->A0,
+ s->mem_index, ot | MO_BE);
+ }
+ break;
+
+ case 0x0f2: /* andn Gy, By, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ tcg_gen_andc_tl(s->T0, s->T0, cpu_regs[s->vex_v]);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ gen_op_update1_cc(s);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+
+ case 0x0f7: /* bextr Gy, Ey, By */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ {
+ TCGv bound, zero;
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ /* Extract START, and shift the operand.
+ Shifts larger than operand size get zeros. */
+ tcg_gen_ext8u_tl(s->A0, cpu_regs[s->vex_v]);
+ tcg_gen_shr_tl(s->T0, s->T0, s->A0);
+
+ bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
+ zero = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound,
+ s->T0, zero);
+ tcg_temp_free(zero);
+
+ /* Extract the LEN into a mask. Lengths larger than
+ operand size get all ones. */
+ tcg_gen_extract_tl(s->A0, cpu_regs[s->vex_v], 8, 8);
+ tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound,
+ s->A0, bound);
+ tcg_temp_free(bound);
+ tcg_gen_movi_tl(s->T1, 1);
+ tcg_gen_shl_tl(s->T1, s->T1, s->A0);
+ tcg_gen_subi_tl(s->T1, s->T1, 1);
+ tcg_gen_and_tl(s->T0, s->T0, s->T1);
+
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ gen_op_update1_cc(s);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ }
+ break;
+
+ case 0x0f5: /* bzhi Gy, Ey, By */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]);
+ {
+ TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
+ /* Note that since we're using BMILG (in order to get O
+ cleared) we need to store the inverse into C. */
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
+ s->T1, bound);
+ tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1,
+ bound, bound, s->T1);
+ tcg_temp_free(bound);
+ }
+ tcg_gen_movi_tl(s->A0, -1);
+ tcg_gen_shl_tl(s->A0, s->A0, s->T1);
+ tcg_gen_andc_tl(s->T0, s->T0, s->A0);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ gen_op_update1_cc(s);
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ break;
+
+ case 0x3f6: /* mulx By, Gy, rdx, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ switch (ot) {
+ default:
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EDX]);
+ tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
+ s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp3_i32);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_mulu2_i64(s->T0, s->T1,
+ s->T0, cpu_regs[R_EDX]);
+ tcg_gen_mov_i64(cpu_regs[s->vex_v], s->T0);
+ tcg_gen_mov_i64(cpu_regs[reg], s->T1);
+ break;
+#endif
+ }
+ break;
+
+ case 0x3f5: /* pdep Gy, By, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ /* Note that by zero-extending the source operand, we
+ automatically handle zero-extending the result. */
+ if (ot == MO_64) {
+ tcg_gen_mov_tl(s->T1, cpu_regs[s->vex_v]);
+ } else {
+ tcg_gen_ext32u_tl(s->T1, cpu_regs[s->vex_v]);
+ }
+ gen_helper_pdep(cpu_regs[reg], s->T1, s->T0);
+ break;
+
+ case 0x2f5: /* pext Gy, By, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ /* Note that by zero-extending the source operand, we
+ automatically handle zero-extending the result. */
+ if (ot == MO_64) {
+ tcg_gen_mov_tl(s->T1, cpu_regs[s->vex_v]);
+ } else {
+ tcg_gen_ext32u_tl(s->T1, cpu_regs[s->vex_v]);
+ }
+ gen_helper_pext(cpu_regs[reg], s->T1, s->T0);
+ break;
+
+ case 0x1f6: /* adcx Gy, Ey */
+ case 0x2f6: /* adox Gy, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
+ goto illegal_op;
+ } else {
+ TCGv carry_in, carry_out, zero;
+ int end_op;
+
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+ /* Re-use the carry-out from a previous round. */
+ carry_in = NULL;
+ carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
+ switch (s->cc_op) {
+ case CC_OP_ADCX:
+ if (b == 0x1f6) {
+ carry_in = cpu_cc_dst;
+ end_op = CC_OP_ADCX;
+ } else {
+ end_op = CC_OP_ADCOX;
+ }
+ break;
+ case CC_OP_ADOX:
+ if (b == 0x1f6) {
+ end_op = CC_OP_ADCOX;
+ } else {
+ carry_in = cpu_cc_src2;
+ end_op = CC_OP_ADOX;
+ }
+ break;
+ case CC_OP_ADCOX:
+ end_op = CC_OP_ADCOX;
+ carry_in = carry_out;
+ break;
+ default:
+ end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
+ break;
+ }
+ /* If we can't reuse carry-out, get it out of EFLAGS. */
+ if (!carry_in) {
+ if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
+ gen_compute_eflags(s);
+ }
+ carry_in = s->tmp0;
+ tcg_gen_extract_tl(carry_in, cpu_cc_src,
+ ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
+ }
+
+ switch (ot) {
+#ifdef TARGET_X86_64
+ case MO_32:
+ /* If we know TL is 64-bit, and we want a 32-bit
+ result, just do everything in 64-bit arithmetic. */
+ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
+ tcg_gen_ext32u_i64(s->T0, s->T0);
+ tcg_gen_add_i64(s->T0, s->T0, cpu_regs[reg]);
+ tcg_gen_add_i64(s->T0, s->T0, carry_in);
+ tcg_gen_ext32u_i64(cpu_regs[reg], s->T0);
+ tcg_gen_shri_i64(carry_out, s->T0, 32);
+ break;
+#endif
+ default:
+ /* Otherwise compute the carry-out in two steps. */
+ zero = tcg_const_tl(0);
+ tcg_gen_add2_tl(s->T0, carry_out,
+ s->T0, zero,
+ carry_in, zero);
+ tcg_gen_add2_tl(cpu_regs[reg], carry_out,
+ cpu_regs[reg], carry_out,
+ s->T0, zero);
+ tcg_temp_free(zero);
+ break;
+ }
+ set_cc_op(s, end_op);
+ }
+ break;
+
+ case 0x1f7: /* shlx Gy, Ey, By */
+ case 0x2f7: /* sarx Gy, Ey, By */
+ case 0x3f7: /* shrx Gy, Ey, By */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ if (ot == MO_64) {
+ tcg_gen_andi_tl(s->T1, cpu_regs[s->vex_v], 63);
+ } else {
+ tcg_gen_andi_tl(s->T1, cpu_regs[s->vex_v], 31);
+ }
+ if (b == 0x1f7) {
+ tcg_gen_shl_tl(s->T0, s->T0, s->T1);
+ } else if (b == 0x2f7) {
+ if (ot != MO_64) {
+ tcg_gen_ext32s_tl(s->T0, s->T0);
+ }
+ tcg_gen_sar_tl(s->T0, s->T0, s->T1);
+ } else {
+ if (ot != MO_64) {
+ tcg_gen_ext32u_tl(s->T0, s->T0);
+ }
+ tcg_gen_shr_tl(s->T0, s->T0, s->T1);
+ }
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+
+ case 0x0f3:
+ case 0x1f3:
+ case 0x2f3:
+ case 0x3f3: /* Group 17 */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+ tcg_gen_mov_tl(cpu_cc_src, s->T0);
+ switch (reg & 7) {
+ case 1: /* blsr By,Ey */
+ tcg_gen_subi_tl(s->T1, s->T0, 1);
+ tcg_gen_and_tl(s->T0, s->T0, s->T1);
+ break;
+ case 2: /* blsmsk By,Ey */
+ tcg_gen_subi_tl(s->T1, s->T0, 1);
+ tcg_gen_xor_tl(s->T0, s->T0, s->T1);
+ break;
+ case 3: /* blsi By, Ey */
+ tcg_gen_neg_tl(s->T1, s->T0);
+ tcg_gen_and_tl(s->T0, s->T0, s->T1);
+ break;
+ default:
+ goto unknown_op;
+ }
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ gen_op_mov_reg_v(s, ot, s->vex_v, s->T0);
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x03a:
+ case 0x13a:
+ b = modrm;
+ modrm = x86_ldub_code(env, s);
+ rm = modrm & 7;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ if (b1 >= 2) {
+ goto unknown_op;
+ }
+
+ sse_fn_eppi = sse_op_table7[b].op[b1];
+ if (!sse_fn_eppi) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
+ goto illegal_op;
+
+ s->rip_offset = 1;
+
+ if (sse_fn_eppi == SSE_SPECIAL) {
+ ot = mo_64_32(s->dflag);
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3)
+ gen_lea_modrm(env, s, modrm);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ val = x86_ldub_code(env, s);
+ switch (b) {
+ case 0x14: /* pextrb */
+ tcg_gen_ld8u_tl(s->T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_B(val & 15)));
+ if (mod == 3) {
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ } else {
+ tcg_gen_qemu_st_tl(s->T0, s->A0,
+ s->mem_index, MO_UB);
+ }
+ break;
+ case 0x15: /* pextrw */
+ tcg_gen_ld16u_tl(s->T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_W(val & 7)));
+ if (mod == 3) {
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ } else {
+ tcg_gen_qemu_st_tl(s->T0, s->A0,
+ s->mem_index, MO_LEUW);
+ }
+ break;
+ case 0x16:
+ if (ot == MO_32) { /* pextrd */
+ tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
+ if (mod == 3) {
+ tcg_gen_extu_i32_tl(cpu_regs[rm], s->tmp2_i32);
+ } else {
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ }
+ } else { /* pextrq */
+#ifdef TARGET_X86_64
+ tcg_gen_ld_i64(s->tmp1_i64, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(val & 1)));
+ if (mod == 3) {
+ tcg_gen_mov_i64(cpu_regs[rm], s->tmp1_i64);
+ } else {
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ }
+#else
+ goto illegal_op;
+#endif
+ }
+ break;
+ case 0x17: /* extractps */
+ tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
+ if (mod == 3) {
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ } else {
+ tcg_gen_qemu_st_tl(s->T0, s->A0,
+ s->mem_index, MO_LEUL);
+ }
+ break;
+ case 0x20: /* pinsrb */
+ if (mod == 3) {
+ gen_op_mov_v_reg(s, MO_32, s->T0, rm);
+ } else {
+ tcg_gen_qemu_ld_tl(s->T0, s->A0,
+ s->mem_index, MO_UB);
+ }
+ tcg_gen_st8_tl(s->T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_B(val & 15)));
+ break;
+ case 0x21: /* insertps */
+ if (mod == 3) {
+ tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm]
+ .ZMM_L((val >> 6) & 3)));
+ } else {
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ }
+ tcg_gen_st_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]
+ .ZMM_L((val >> 4) & 3)));
+ if ((val >> 0) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(0)));
+ if ((val >> 1) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(1)));
+ if ((val >> 2) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(2)));
+ if ((val >> 3) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(3)));
+ break;
+ case 0x22:
+ if (ot == MO_32) { /* pinsrd */
+ if (mod == 3) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[rm]);
+ } else {
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ }
+ tcg_gen_st_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
+ } else { /* pinsrq */
+#ifdef TARGET_X86_64
+ if (mod == 3) {
+ gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm);
+ } else {
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ }
+ tcg_gen_st_i64(s->tmp1_i64, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(val & 1)));
+#else
+ goto illegal_op;
+#endif
+ }
+ break;
+ }
+ return;
+ }
+
+ if (b1) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
+ } else {
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, op2_offset);
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ } else {
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, op2_offset);
+ }
+ }
+ val = x86_ldub_code(env, s);
+
+ if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
+ set_cc_op(s, CC_OP_EFLAGS);
+
+ if (s->dflag == MO_64) {
+ /* The helper must use entire 64-bit gp registers */
+ val |= 1 << 8;
+ }
+ }
+
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ sse_fn_eppi(cpu_env, s->ptr0, s->ptr1, tcg_const_i32(val));
+ break;
+
+ case 0x33a:
+ /* Various integer extensions at 0f 3a f[0-f]. */
+ b = modrm | (b1 << 8);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+
+ switch (b) {
+ case 0x3f0: /* rorx Gy,Ey, Ib */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ b = x86_ldub_code(env, s);
+ if (ot == MO_64) {
+ tcg_gen_rotri_tl(s->T0, s->T0, b & 63);
+ } else {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31);
+ tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ }
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ default:
+ unknown_op:
+ gen_unknown_opcode(env, s);
+ return;
+ }
+ } else {
+ /* generic MMX or SSE operation */
+ switch(b) {
+ case 0x70: /* pshufx insn */
+ case 0xc6: /* pshufx insn */
+ case 0xc2: /* compare insns */
+ s->rip_offset = 1;
+ break;
+ default:
+ break;
+ }
+ if (is_xmm) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod != 3) {
+ int sz = 4;
+
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+
+ switch (b) {
+ case 0x50 ... 0x5a:
+ case 0x5c ... 0x5f:
+ case 0xc2:
+ /* Most sse scalar operations. */
+ if (b1 == 2) {
+ sz = 2;
+ } else if (b1 == 3) {
+ sz = 3;
+ }
+ break;
+
+ case 0x2e: /* ucomis[sd] */
+ case 0x2f: /* comis[sd] */
+ if (b1 == 0) {
+ sz = 2;
+ } else {
+ sz = 3;
+ }
+ break;
+ }
+
+ switch (sz) {
+ case 2:
+ /* 32 bit access */
+ gen_op_ld_v(s, MO_32, s->T0, s->A0);
+ tcg_gen_st32_tl(s->T0, cpu_env,
+ offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ break;
+ case 3:
+ /* 64 bit access */
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
+ break;
+ default:
+ /* 128 bit access */
+ gen_ldo_env_A0(s, op2_offset);
+ break;
+ }
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_ldq_env_A0(s, op2_offset);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ }
+ switch(b) {
+ case 0x0f: /* 3DNow! data insns */
+ val = x86_ldub_code(env, s);
+ sse_fn_epp = sse_op_table5[val];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+ goto illegal_op;
+ }
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, s->ptr0, s->ptr1);
+ break;
+ case 0x70: /* pshufx insn */
+ case 0xc6: /* pshufx insn */
+ val = x86_ldub_code(env, s);
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ /* XXX: introduce a new table? */
+ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
+ sse_fn_ppi(s->ptr0, s->ptr1, tcg_const_i32(val));
+ break;
+ case 0xc2:
+ /* compare insns */
+ val = x86_ldub_code(env, s);
+ if (val >= 8)
+ goto unknown_op;
+ sse_fn_epp = sse_op_table4[val][b1];
+
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, s->ptr0, s->ptr1);
+ break;
+ case 0xf7:
+ /* maskmov : we must prepare A0 */
+ if (mod != 3)
+ goto illegal_op;
+ tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]);
+ gen_extu(s->aflag, s->A0);
+ gen_add_A0_ds_seg(s);
+
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ /* XXX: introduce a new table? */
+ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
+ sse_fn_eppt(cpu_env, s->ptr0, s->ptr1, s->A0);
+ break;
+ default:
+ tcg_gen_addi_ptr(s->ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, s->ptr0, s->ptr1);
+ break;
+ }
+ if (b == 0x2e || b == 0x2f) {
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ }
+}
+
+/* convert one instruction. s->base.is_jmp is set if the translation must
+ be stopped. Return the next pc value */
+static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
+{
+ CPUX86State *env = cpu->env_ptr;
+ int b, prefixes;
+ int shift;
+ MemOp ot, aflag, dflag;
+ int modrm, reg, rm, mod, op, opreg, val;
+ target_ulong next_eip, tval;
+ target_ulong pc_start = s->base.pc_next;
+
+ s->pc_start = s->pc = pc_start;
+ s->override = -1;
+#ifdef TARGET_X86_64
+ s->rex_w = false;
+ s->rex_r = 0;
+ s->rex_x = 0;
+ s->rex_b = 0;
+#endif
+ s->rip_offset = 0; /* for relative ip address */
+ s->vex_l = 0;
+ s->vex_v = 0;
+ if (sigsetjmp(s->jmpbuf, 0) != 0) {
+ gen_exception_gpf(s);
+ return s->pc;
+ }
+
+ prefixes = 0;
+
+ next_byte:
+ b = x86_ldub_code(env, s);
+ /* Collect prefixes. */
+ switch (b) {
+ case 0xf3:
+ prefixes |= PREFIX_REPZ;
+ goto next_byte;
+ case 0xf2:
+ prefixes |= PREFIX_REPNZ;
+ goto next_byte;
+ case 0xf0:
+ prefixes |= PREFIX_LOCK;
+ goto next_byte;
+ case 0x2e:
+ s->override = R_CS;
+ goto next_byte;
+ case 0x36:
+ s->override = R_SS;
+ goto next_byte;
+ case 0x3e:
+ s->override = R_DS;
+ goto next_byte;
+ case 0x26:
+ s->override = R_ES;
+ goto next_byte;
+ case 0x64:
+ s->override = R_FS;
+ goto next_byte;
+ case 0x65:
+ s->override = R_GS;
+ goto next_byte;
+ case 0x66:
+ prefixes |= PREFIX_DATA;
+ goto next_byte;
+ case 0x67:
+ prefixes |= PREFIX_ADR;
+ goto next_byte;
+#ifdef TARGET_X86_64
+ case 0x40 ... 0x4f:
+ if (CODE64(s)) {
+ /* REX prefix */
+ prefixes |= PREFIX_REX;
+ s->rex_w = (b >> 3) & 1;
+ s->rex_r = (b & 0x4) << 1;
+ s->rex_x = (b & 0x2) << 2;
+ s->rex_b = (b & 0x1) << 3;
+ goto next_byte;
+ }
+ break;
+#endif
+ case 0xc5: /* 2-byte VEX */
+ case 0xc4: /* 3-byte VEX */
+ /* VEX prefixes cannot be used except in 32-bit mode.
+ Otherwise the instruction is LES or LDS. */
+ if (CODE32(s) && !VM86(s)) {
+ static const int pp_prefix[4] = {
+ 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
+ };
+ int vex3, vex2 = x86_ldub_code(env, s);
+
+ if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
+ /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
+ otherwise the instruction is LES or LDS. */
+ s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
+ break;
+ }
+
+ /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
+ | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) {
+ goto illegal_op;
+ }
+#ifdef TARGET_X86_64
+ s->rex_r = (~vex2 >> 4) & 8;
+#endif
+ if (b == 0xc5) {
+ /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
+ vex3 = vex2;
+ b = x86_ldub_code(env, s) | 0x100;
+ } else {
+ /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
+ vex3 = x86_ldub_code(env, s);
+#ifdef TARGET_X86_64
+ s->rex_x = (~vex2 >> 3) & 8;
+ s->rex_b = (~vex2 >> 2) & 8;
+ s->rex_w = (vex3 >> 7) & 1;
+#endif
+ switch (vex2 & 0x1f) {
+ case 0x01: /* Implied 0f leading opcode bytes. */
+ b = x86_ldub_code(env, s) | 0x100;
+ break;
+ case 0x02: /* Implied 0f 38 leading opcode bytes. */
+ b = 0x138;
+ break;
+ case 0x03: /* Implied 0f 3a leading opcode bytes. */
+ b = 0x13a;
+ break;
+ default: /* Reserved for future use. */
+ goto unknown_op;
+ }
+ }
+ s->vex_v = (~vex3 >> 3) & 0xf;
+ s->vex_l = (vex3 >> 2) & 1;
+ prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
+ }
+ break;
+ }
+
+ /* Post-process prefixes. */
+ if (CODE64(s)) {
+ /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
+ data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
+ over 0x66 if both are present. */
+ dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
+ /* In 64-bit mode, 0x67 selects 32-bit addressing. */
+ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
+ } else {
+ /* In 16/32-bit mode, 0x66 selects the opposite data size. */
+ if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
+ dflag = MO_32;
+ } else {
+ dflag = MO_16;
+ }
+ /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
+ if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
+ aflag = MO_32;
+ } else {
+ aflag = MO_16;
+ }
+ }
+
+ s->prefix = prefixes;
+ s->aflag = aflag;
+ s->dflag = dflag;
+
+ /* now check op code */
+ reswitch:
+ switch(b) {
+ case 0x0f:
+ /**************************/
+ /* extended op code */
+ b = x86_ldub_code(env, s) | 0x100;
+ goto reswitch;
+
+ /**************************/
+ /* arith & logic */
+ case 0x00 ... 0x05:
+ case 0x08 ... 0x0d:
+ case 0x10 ... 0x15:
+ case 0x18 ... 0x1d:
+ case 0x20 ... 0x25:
+ case 0x28 ... 0x2d:
+ case 0x30 ... 0x35:
+ case 0x38 ... 0x3d:
+ {
+ int op, f, val;
+ op = (b >> 3) & 7;
+ f = (b >> 1) & 3;
+
+ ot = mo_b_d(b, dflag);
+
+ switch(f) {
+ case 0: /* OP Ev, Gv */
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else if (op == OP_XORL && rm == reg) {
+ xor_zero:
+ /* xor reg, reg optimisation */
+ set_cc_op(s, CC_OP_CLR);
+ tcg_gen_movi_tl(s->T0, 0);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+ } else {
+ opreg = rm;
+ }
+ gen_op_mov_v_reg(s, ot, s->T1, reg);
+ gen_op(s, op, ot, opreg);
+ break;
+ case 1: /* OP Gv, Ev */
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ } else if (op == OP_XORL && rm == reg) {
+ goto xor_zero;
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T1, rm);
+ }
+ gen_op(s, op, ot, reg);
+ break;
+ case 2: /* OP A, Iv */
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(s->T1, val);
+ gen_op(s, op, ot, OR_EAX);
+ break;
+ }
+ }
+ break;
+
+ case 0x82:
+ if (CODE64(s))
+ goto illegal_op;
+ /* fall through */
+ case 0x80: /* GRP1 */
+ case 0x81:
+ case 0x83:
+ {
+ int val;
+
+ ot = mo_b_d(b, dflag);
+
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ op = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ if (b == 0x83)
+ s->rip_offset = 1;
+ else
+ s->rip_offset = insn_const_size(ot);
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else {
+ opreg = rm;
+ }
+
+ switch(b) {
+ default:
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ val = insn_get(env, s, ot);
+ break;
+ case 0x83:
+ val = (int8_t)insn_get(env, s, MO_8);
+ break;
+ }
+ tcg_gen_movi_tl(s->T1, val);
+ gen_op(s, op, ot, opreg);
+ }
+ break;
+
+ /**************************/
+ /* inc, dec, and other misc arith */
+ case 0x40 ... 0x47: /* inc Gv */
+ ot = dflag;
+ gen_inc(s, ot, OR_EAX + (b & 7), 1);
+ break;
+ case 0x48 ... 0x4f: /* dec Gv */
+ ot = dflag;
+ gen_inc(s, ot, OR_EAX + (b & 7), -1);
+ break;
+ case 0xf6: /* GRP3 */
+ case 0xf7:
+ ot = mo_b_d(b, dflag);
+
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ op = (modrm >> 3) & 7;
+ if (mod != 3) {
+ if (op == 0) {
+ s->rip_offset = insn_const_size(ot);
+ }
+ gen_lea_modrm(env, s, modrm);
+ /* For those below that handle locked memory, don't load here. */
+ if (!(s->prefix & PREFIX_LOCK)
+ || op != 2) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ }
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ }
+
+ switch(op) {
+ case 0: /* test */
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(s->T1, val);
+ gen_op_testl_T0_T1_cc(s);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+ case 2: /* not */
+ if (s->prefix & PREFIX_LOCK) {
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ tcg_gen_movi_tl(s->T0, ~0);
+ tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
+ s->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_not_tl(s->T0, s->T0);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ }
+ }
+ break;
+ case 3: /* neg */
+ if (s->prefix & PREFIX_LOCK) {
+ TCGLabel *label1;
+ TCGv a0, t0, t1, t2;
+
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ a0 = tcg_temp_local_new();
+ t0 = tcg_temp_local_new();
+ label1 = gen_new_label();
+
+ tcg_gen_mov_tl(a0, s->A0);
+ tcg_gen_mov_tl(t0, s->T0);
+
+ gen_set_label(label1);
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ tcg_gen_mov_tl(t2, t0);
+ tcg_gen_neg_tl(t1, t0);
+ tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
+ s->mem_index, ot | MO_LE);
+ tcg_temp_free(t1);
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
+
+ tcg_temp_free(t2);
+ tcg_temp_free(a0);
+ tcg_gen_mov_tl(s->T0, t0);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_neg_tl(s->T0, s->T0);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ }
+ }
+ gen_op_update_neg_cc(s);
+ set_cc_op(s, CC_OP_SUBB + ot);
+ break;
+ case 4: /* mul */
+ switch(ot) {
+ case MO_8:
+ gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
+ tcg_gen_ext8u_tl(s->T0, s->T0);
+ tcg_gen_ext8u_tl(s->T1, s->T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(s->T0, s->T0, s->T1);
+ gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
+ set_cc_op(s, CC_OP_MULB);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
+ tcg_gen_ext16u_tl(s->T0, s->T0);
+ tcg_gen_ext16u_tl(s->T1, s->T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(s->T0, s->T0, s->T1);
+ gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ tcg_gen_shri_tl(s->T0, s->T0, 16);
+ gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
+ tcg_gen_mov_tl(cpu_cc_src, s->T0);
+ set_cc_op(s, CC_OP_MULW);
+ break;
+ default:
+ case MO_32:
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
+ tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
+ s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
+ set_cc_op(s, CC_OP_MULL);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
+ s->T0, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
+ set_cc_op(s, CC_OP_MULQ);
+ break;
+#endif
+ }
+ break;
+ case 5: /* imul */
+ switch(ot) {
+ case MO_8:
+ gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
+ tcg_gen_ext8s_tl(s->T0, s->T0);
+ tcg_gen_ext8s_tl(s->T1, s->T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(s->T0, s->T0, s->T1);
+ gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ tcg_gen_ext8s_tl(s->tmp0, s->T0);
+ tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
+ set_cc_op(s, CC_OP_MULB);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
+ tcg_gen_ext16s_tl(s->T0, s->T0);
+ tcg_gen_ext16s_tl(s->T1, s->T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(s->T0, s->T0, s->T1);
+ gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ tcg_gen_ext16s_tl(s->tmp0, s->T0);
+ tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
+ tcg_gen_shri_tl(s->T0, s->T0, 16);
+ gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
+ set_cc_op(s, CC_OP_MULW);
+ break;
+ default:
+ case MO_32:
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
+ tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
+ s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
+ tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
+ set_cc_op(s, CC_OP_MULL);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
+ s->T0, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
+ set_cc_op(s, CC_OP_MULQ);
+ break;
+#endif
+ }
+ break;
+ case 6: /* div */
+ switch(ot) {
+ case MO_8:
+ gen_helper_divb_AL(cpu_env, s->T0);
+ break;
+ case MO_16:
+ gen_helper_divw_AX(cpu_env, s->T0);
+ break;
+ default:
+ case MO_32:
+ gen_helper_divl_EAX(cpu_env, s->T0);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_divq_EAX(cpu_env, s->T0);
+ break;
+#endif
+ }
+ break;
+ case 7: /* idiv */
+ switch(ot) {
+ case MO_8:
+ gen_helper_idivb_AL(cpu_env, s->T0);
+ break;
+ case MO_16:
+ gen_helper_idivw_AX(cpu_env, s->T0);
+ break;
+ default:
+ case MO_32:
+ gen_helper_idivl_EAX(cpu_env, s->T0);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_idivq_EAX(cpu_env, s->T0);
+ break;
+#endif
+ }
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0xfe: /* GRP4 */
+ case 0xff: /* GRP5 */
+ ot = mo_b_d(b, dflag);
+
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ op = (modrm >> 3) & 7;
+ if (op >= 2 && b == 0xfe) {
+ goto unknown_op;
+ }
+ if (CODE64(s)) {
+ if (op == 2 || op == 4) {
+ /* operand size for jumps is 64 bit */
+ ot = MO_64;
+ } else if (op == 3 || op == 5) {
+ ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
+ } else if (op == 6) {
+ /* default push size is 64 bit */
+ ot = mo_pushpop(s, dflag);
+ }
+ }
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ if (op >= 2 && op != 3 && op != 5)
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ }
+
+ switch(op) {
+ case 0: /* inc Ev */
+ if (mod != 3)
+ opreg = OR_TMP0;
+ else
+ opreg = rm;
+ gen_inc(s, ot, opreg, 1);
+ break;
+ case 1: /* dec Ev */
+ if (mod != 3)
+ opreg = OR_TMP0;
+ else
+ opreg = rm;
+ gen_inc(s, ot, opreg, -1);
+ break;
+ case 2: /* call Ev */
+ /* XXX: optimize if memory (no 'and' is necessary) */
+ if (dflag == MO_16) {
+ tcg_gen_ext16u_tl(s->T0, s->T0);
+ }
+ next_eip = s->pc - s->cs_base;
+ tcg_gen_movi_tl(s->T1, next_eip);
+ gen_push_v(s, s->T1);
+ gen_op_jmp_v(s->T0);
+ gen_bnd_jmp(s);
+ gen_jr(s, s->T0);
+ break;
+ case 3: /* lcall Ev */
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ gen_add_A0_im(s, 1 << ot);
+ gen_op_ld_v(s, MO_16, s->T0, s->A0);
+ do_lcall:
+ if (PE(s) && !VM86(s)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
+ tcg_const_i32(dflag - 1),
+ tcg_const_tl(s->pc - s->cs_base));
+ } else {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->T1,
+ tcg_const_i32(dflag - 1),
+ tcg_const_i32(s->pc - s->cs_base));
+ }
+ tcg_gen_ld_tl(s->tmp4, cpu_env, offsetof(CPUX86State, eip));
+ gen_jr(s, s->tmp4);
+ break;
+ case 4: /* jmp Ev */
+ if (dflag == MO_16) {
+ tcg_gen_ext16u_tl(s->T0, s->T0);
+ }
+ gen_op_jmp_v(s->T0);
+ gen_bnd_jmp(s);
+ gen_jr(s, s->T0);
+ break;
+ case 5: /* ljmp Ev */
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ gen_add_A0_im(s, 1 << ot);
+ gen_op_ld_v(s, MO_16, s->T0, s->A0);
+ do_ljmp:
+ if (PE(s) && !VM86(s)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
+ tcg_const_tl(s->pc - s->cs_base));
+ } else {
+ gen_op_movl_seg_T0_vm(s, R_CS);
+ gen_op_jmp_v(s->T1);
+ }
+ tcg_gen_ld_tl(s->tmp4, cpu_env, offsetof(CPUX86State, eip));
+ gen_jr(s, s->tmp4);
+ break;
+ case 6: /* push Ev */
+ gen_push_v(s, s->T0);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x84: /* test Ev, Gv */
+ case 0x85:
+ ot = mo_b_d(b, dflag);
+
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_v_reg(s, ot, s->T1, reg);
+ gen_op_testl_T0_T1_cc(s);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+
+ case 0xa8: /* test eAX, Iv */
+ case 0xa9:
+ ot = mo_b_d(b, dflag);
+ val = insn_get(env, s, ot);
+
+ gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
+ tcg_gen_movi_tl(s->T1, val);
+ gen_op_testl_T0_T1_cc(s);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+
+ case 0x98: /* CWDE/CBW */
+ switch (dflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
+ tcg_gen_ext32s_tl(s->T0, s->T0);
+ gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
+ break;
+#endif
+ case MO_32:
+ gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
+ tcg_gen_ext16s_tl(s->T0, s->T0);
+ gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
+ tcg_gen_ext8s_tl(s->T0, s->T0);
+ gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
+ break;
+ default:
+ tcg_abort();
+ }
+ break;
+ case 0x99: /* CDQ/CWD */
+ switch (dflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
+ tcg_gen_sari_tl(s->T0, s->T0, 63);
+ gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
+ break;
+#endif
+ case MO_32:
+ gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
+ tcg_gen_ext32s_tl(s->T0, s->T0);
+ tcg_gen_sari_tl(s->T0, s->T0, 31);
+ gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
+ tcg_gen_ext16s_tl(s->T0, s->T0);
+ tcg_gen_sari_tl(s->T0, s->T0, 15);
+ gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
+ break;
+ default:
+ tcg_abort();
+ }
+ break;
+ case 0x1af: /* imul Gv, Ev */
+ case 0x69: /* imul Gv, Ev, I */
+ case 0x6b:
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ if (b == 0x69)
+ s->rip_offset = insn_const_size(ot);
+ else if (b == 0x6b)
+ s->rip_offset = 1;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ if (b == 0x69) {
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(s->T1, val);
+ } else if (b == 0x6b) {
+ val = (int8_t)insn_get(env, s, MO_8);
+ tcg_gen_movi_tl(s->T1, val);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T1, reg);
+ }
+ switch (ot) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
+ tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
+ break;
+#endif
+ case MO_32:
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
+ tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
+ s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
+ tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
+ tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
+ break;
+ default:
+ tcg_gen_ext16s_tl(s->T0, s->T0);
+ tcg_gen_ext16s_tl(s->T1, s->T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(s->T0, s->T0, s->T1);
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ tcg_gen_ext16s_tl(s->tmp0, s->T0);
+ tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+ }
+ set_cc_op(s, CC_OP_MULB + ot);
+ break;
+ case 0x1c0:
+ case 0x1c1: /* xadd Ev, Gv */
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ gen_op_mov_v_reg(s, ot, s->T0, reg);
+ if (mod == 3) {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_v_reg(s, ot, s->T1, rm);
+ tcg_gen_add_tl(s->T0, s->T0, s->T1);
+ gen_op_mov_reg_v(s, ot, reg, s->T1);
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (s->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
+ s->mem_index, ot | MO_LE);
+ tcg_gen_add_tl(s->T0, s->T0, s->T1);
+ } else {
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ tcg_gen_add_tl(s->T0, s->T0, s->T1);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ }
+ gen_op_mov_reg_v(s, ot, reg, s->T1);
+ }
+ gen_op_update2_cc(s);
+ set_cc_op(s, CC_OP_ADDB + ot);
+ break;
+ case 0x1b0:
+ case 0x1b1: /* cmpxchg Ev, Gv */
+ {
+ TCGv oldv, newv, cmpv;
+
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ oldv = tcg_temp_new();
+ newv = tcg_temp_new();
+ cmpv = tcg_temp_new();
+ gen_op_mov_v_reg(s, ot, newv, reg);
+ tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
+
+ if (s->prefix & PREFIX_LOCK) {
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
+ s->mem_index, ot | MO_LE);
+ gen_op_mov_reg_v(s, ot, R_EAX, oldv);
+ } else {
+ if (mod == 3) {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_v_reg(s, ot, oldv, rm);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, oldv, s->A0);
+ rm = 0; /* avoid warning */
+ }
+ gen_extu(ot, oldv);
+ gen_extu(ot, cmpv);
+ /* store value = (old == cmp ? new : old); */
+ tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
+ if (mod == 3) {
+ gen_op_mov_reg_v(s, ot, R_EAX, oldv);
+ gen_op_mov_reg_v(s, ot, rm, newv);
+ } else {
+ /* Perform an unconditional store cycle like physical cpu;
+ must be before changing accumulator to ensure
+ idempotency if the store faults and the instruction
+ is restarted */
+ gen_op_st_v(s, ot, newv, s->A0);
+ gen_op_mov_reg_v(s, ot, R_EAX, oldv);
+ }
+ }
+ tcg_gen_mov_tl(cpu_cc_src, oldv);
+ tcg_gen_mov_tl(s->cc_srcT, cmpv);
+ tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
+ set_cc_op(s, CC_OP_SUBB + ot);
+ tcg_temp_free(oldv);
+ tcg_temp_free(newv);
+ tcg_temp_free(cmpv);
+ }
+ break;
+ case 0x1c7: /* cmpxchg8b */
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ switch ((modrm >> 3) & 7) {
+ case 1: /* CMPXCHG8, CMPXCHG16 */
+ if (mod == 3) {
+ goto illegal_op;
+ }
+#ifdef TARGET_X86_64
+ if (dflag == MO_64) {
+ if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ if ((s->prefix & PREFIX_LOCK) &&
+ (tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_helper_cmpxchg16b(cpu_env, s->A0);
+ } else {
+ gen_helper_cmpxchg16b_unlocked(cpu_env, s->A0);
+ }
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ }
+#endif
+ if (!(s->cpuid_features & CPUID_CX8)) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ if ((s->prefix & PREFIX_LOCK) &&
+ (tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_helper_cmpxchg8b(cpu_env, s->A0);
+ } else {
+ gen_helper_cmpxchg8b_unlocked(cpu_env, s->A0);
+ }
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+
+ case 7: /* RDSEED */
+ case 6: /* RDRAND */
+ if (mod != 3 ||
+ (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
+ !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
+ goto illegal_op;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdrand(s->T0, cpu_env);
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_reg_v(s, dflag, rm, s->T0);
+ set_cc_op(s, CC_OP_EFLAGS);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+
+ default:
+ goto illegal_op;
+ }
+ break;
+
+ /**************************/
+ /* push/pop */
+ case 0x50 ... 0x57: /* push */
+ gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
+ gen_push_v(s, s->T0);
+ break;
+ case 0x58 ... 0x5f: /* pop */
+ ot = gen_pop_T0(s);
+ /* NOTE: order is important for pop %sp */
+ gen_pop_update(s, ot);
+ gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
+ break;
+ case 0x60: /* pusha */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_pusha(s);
+ break;
+ case 0x61: /* popa */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_popa(s);
+ break;
+ case 0x68: /* push Iv */
+ case 0x6a:
+ ot = mo_pushpop(s, dflag);
+ if (b == 0x68)
+ val = insn_get(env, s, ot);
+ else
+ val = (int8_t)insn_get(env, s, MO_8);
+ tcg_gen_movi_tl(s->T0, val);
+ gen_push_v(s, s->T0);
+ break;
+ case 0x8f: /* pop Ev */
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ ot = gen_pop_T0(s);
+ if (mod == 3) {
+ /* NOTE: order is important for pop %sp */
+ gen_pop_update(s, ot);
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ } else {
+ /* NOTE: order is important too for MMU exceptions */
+ s->popl_esp_hack = 1 << ot;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ s->popl_esp_hack = 0;
+ gen_pop_update(s, ot);
+ }
+ break;
+ case 0xc8: /* enter */
+ {
+ int level;
+ val = x86_lduw_code(env, s);
+ level = x86_ldub_code(env, s);
+ gen_enter(s, val, level);
+ }
+ break;
+ case 0xc9: /* leave */
+ gen_leave(s);
+ break;
+ case 0x06: /* push es */
+ case 0x0e: /* push cs */
+ case 0x16: /* push ss */
+ case 0x1e: /* push ds */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_op_movl_T0_seg(s, b >> 3);
+ gen_push_v(s, s->T0);
+ break;
+ case 0x1a0: /* push fs */
+ case 0x1a8: /* push gs */
+ gen_op_movl_T0_seg(s, (b >> 3) & 7);
+ gen_push_v(s, s->T0);
+ break;
+ case 0x07: /* pop es */
+ case 0x17: /* pop ss */
+ case 0x1f: /* pop ds */
+ if (CODE64(s))
+ goto illegal_op;
+ reg = b >> 3;
+ ot = gen_pop_T0(s);
+ gen_movl_seg_T0(s, reg);
+ gen_pop_update(s, ot);
+ /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
+ if (s->base.is_jmp) {
+ gen_jmp_im(s, s->pc - s->cs_base);
+ if (reg == R_SS) {
+ s->flags &= ~HF_TF_MASK;
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_eob(s);
+ }
+ }
+ break;
+ case 0x1a1: /* pop fs */
+ case 0x1a9: /* pop gs */
+ ot = gen_pop_T0(s);
+ gen_movl_seg_T0(s, (b >> 3) & 7);
+ gen_pop_update(s, ot);
+ if (s->base.is_jmp) {
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+
+ /**************************/
+ /* mov */
+ case 0x88:
+ case 0x89: /* mov Gv, Ev */
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+
+ /* generate a generic store */
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
+ break;
+ case 0xc6:
+ case 0xc7: /* mov Ev, Iv */
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ if (mod != 3) {
+ s->rip_offset = insn_const_size(ot);
+ gen_lea_modrm(env, s, modrm);
+ }
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(s->T0, val);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
+ }
+ break;
+ case 0x8a:
+ case 0x8b: /* mov Ev, Gv */
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+ case 0x8e: /* mov seg, Gv */
+ modrm = x86_ldub_code(env, s);
+ reg = (modrm >> 3) & 7;
+ if (reg >= 6 || reg == R_CS)
+ goto illegal_op;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_movl_seg_T0(s, reg);
+ /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
+ if (s->base.is_jmp) {
+ gen_jmp_im(s, s->pc - s->cs_base);
+ if (reg == R_SS) {
+ s->flags &= ~HF_TF_MASK;
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_eob(s);
+ }
+ }
+ break;
+ case 0x8c: /* mov Gv, seg */
+ modrm = x86_ldub_code(env, s);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (reg >= 6)
+ goto illegal_op;
+ gen_op_movl_T0_seg(s, reg);
+ ot = mod == 3 ? dflag : MO_16;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+
+ case 0x1b6: /* movzbS Gv, Eb */
+ case 0x1b7: /* movzwS Gv, Eb */
+ case 0x1be: /* movsbS Gv, Eb */
+ case 0x1bf: /* movswS Gv, Eb */
+ {
+ MemOp d_ot;
+ MemOp s_ot;
+
+ /* d_ot is the size of destination */
+ d_ot = dflag;
+ /* ot is the size of source */
+ ot = (b & 1) + MO_8;
+ /* s_ot is the sign+size of source */
+ s_ot = b & 8 ? MO_SIGN | ot : ot;
+
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+
+ if (mod == 3) {
+ if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
+ tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ switch (s_ot) {
+ case MO_UB:
+ tcg_gen_ext8u_tl(s->T0, s->T0);
+ break;
+ case MO_SB:
+ tcg_gen_ext8s_tl(s->T0, s->T0);
+ break;
+ case MO_UW:
+ tcg_gen_ext16u_tl(s->T0, s->T0);
+ break;
+ default:
+ case MO_SW:
+ tcg_gen_ext16s_tl(s->T0, s->T0);
+ break;
+ }
+ }
+ gen_op_mov_reg_v(s, d_ot, reg, s->T0);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, s_ot, s->T0, s->A0);
+ gen_op_mov_reg_v(s, d_ot, reg, s->T0);
+ }
+ }
+ break;
+
+ case 0x8d: /* lea */
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ {
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(s, a);
+ gen_lea_v_seg(s, s->aflag, ea, -1, -1);
+ gen_op_mov_reg_v(s, dflag, reg, s->A0);
+ }
+ break;
+
+ case 0xa0: /* mov EAX, Ov */
+ case 0xa1:
+ case 0xa2: /* mov Ov, EAX */
+ case 0xa3:
+ {
+ target_ulong offset_addr;
+
+ ot = mo_b_d(b, dflag);
+ switch (s->aflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ offset_addr = x86_ldq_code(env, s);
+ break;
+#endif
+ default:
+ offset_addr = insn_get(env, s, s->aflag);
+ break;
+ }
+ tcg_gen_movi_tl(s->A0, offset_addr);
+ gen_add_A0_ds_seg(s);
+ if ((b & 2) == 0) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ }
+ }
+ break;
+ case 0xd7: /* xlat */
+ tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
+ tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
+ tcg_gen_add_tl(s->A0, s->A0, s->T0);
+ gen_extu(s->aflag, s->A0);
+ gen_add_A0_ds_seg(s);
+ gen_op_ld_v(s, MO_8, s->T0, s->A0);
+ gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
+ break;
+ case 0xb0 ... 0xb7: /* mov R, Ib */
+ val = insn_get(env, s, MO_8);
+ tcg_gen_movi_tl(s->T0, val);
+ gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
+ break;
+ case 0xb8 ... 0xbf: /* mov R, Iv */
+#ifdef TARGET_X86_64
+ if (dflag == MO_64) {
+ uint64_t tmp;
+ /* 64 bit case */
+ tmp = x86_ldq_code(env, s);
+ reg = (b & 7) | REX_B(s);
+ tcg_gen_movi_tl(s->T0, tmp);
+ gen_op_mov_reg_v(s, MO_64, reg, s->T0);
+ } else
+#endif
+ {
+ ot = dflag;
+ val = insn_get(env, s, ot);
+ reg = (b & 7) | REX_B(s);
+ tcg_gen_movi_tl(s->T0, val);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ }
+ break;
+
+ case 0x91 ... 0x97: /* xchg R, EAX */
+ do_xchg_reg_eax:
+ ot = dflag;
+ reg = (b & 7) | REX_B(s);
+ rm = R_EAX;
+ goto do_xchg_reg;
+ case 0x86:
+ case 0x87: /* xchg Ev, Gv */
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3) {
+ rm = (modrm & 7) | REX_B(s);
+ do_xchg_reg:
+ gen_op_mov_v_reg(s, ot, s->T0, reg);
+ gen_op_mov_v_reg(s, ot, s->T1, rm);
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ gen_op_mov_reg_v(s, ot, reg, s->T1);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_mov_v_reg(s, ot, s->T0, reg);
+ /* for xchg, lock is implicit */
+ tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
+ s->mem_index, ot | MO_LE);
+ gen_op_mov_reg_v(s, ot, reg, s->T1);
+ }
+ break;
+ case 0xc4: /* les Gv */
+ /* In CODE64 this is VEX3; see above. */
+ op = R_ES;
+ goto do_lxx;
+ case 0xc5: /* lds Gv */
+ /* In CODE64 this is VEX2; see above. */
+ op = R_DS;
+ goto do_lxx;
+ case 0x1b2: /* lss Gv */
+ op = R_SS;
+ goto do_lxx;
+ case 0x1b4: /* lfs Gv */
+ op = R_FS;
+ goto do_lxx;
+ case 0x1b5: /* lgs Gv */
+ op = R_GS;
+ do_lxx:
+ ot = dflag != MO_16 ? MO_32 : MO_16;
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, s->T1, s->A0);
+ gen_add_A0_im(s, 1 << ot);
+ /* load the segment first to handle exceptions properly */
+ gen_op_ld_v(s, MO_16, s->T0, s->A0);
+ gen_movl_seg_T0(s, op);
+ /* then put the data */
+ gen_op_mov_reg_v(s, ot, reg, s->T1);
+ if (s->base.is_jmp) {
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+
+ /************************/
+ /* shifts */
+ case 0xc0:
+ case 0xc1:
+ /* shift Ev,Ib */
+ shift = 2;
+ grp2:
+ {
+ ot = mo_b_d(b, dflag);
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ if (shift == 2) {
+ s->rip_offset = 1;
+ }
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else {
+ opreg = (modrm & 7) | REX_B(s);
+ }
+
+ /* simpler op */
+ if (shift == 0) {
+ gen_shift(s, op, ot, opreg, OR_ECX);
+ } else {
+ if (shift == 2) {
+ shift = x86_ldub_code(env, s);
+ }
+ gen_shifti(s, op, ot, opreg, shift);
+ }
+ }
+ break;
+ case 0xd0:
+ case 0xd1:
+ /* shift Ev,1 */
+ shift = 1;
+ goto grp2;
+ case 0xd2:
+ case 0xd3:
+ /* shift Ev,cl */
+ shift = 0;
+ goto grp2;
+
+ case 0x1a4: /* shld imm */
+ op = 0;
+ shift = 1;
+ goto do_shiftd;
+ case 0x1a5: /* shld cl */
+ op = 0;
+ shift = 0;
+ goto do_shiftd;
+ case 0x1ac: /* shrd imm */
+ op = 1;
+ shift = 1;
+ goto do_shiftd;
+ case 0x1ad: /* shrd cl */
+ op = 1;
+ shift = 0;
+ do_shiftd:
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else {
+ opreg = rm;
+ }
+ gen_op_mov_v_reg(s, ot, s->T1, reg);
+
+ if (shift) {
+ TCGv imm = tcg_const_tl(x86_ldub_code(env, s));
+ gen_shiftd_rm_T1(s, ot, opreg, op, imm);
+ tcg_temp_free(imm);
+ } else {
+ gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
+ }
+ break;
+
+ /************************/
+ /* floats */
+ case 0xd8 ... 0xdf:
+ {
+ bool update_fip = true;
+
+ if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
+ /* if CR0.EM or CR0.TS are set, generate an FPU exception */
+ /* XXX: what to do if illegal op ? */
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = ((b & 7) << 3) | ((modrm >> 3) & 7);
+ if (mod != 3) {
+ /* memory op */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(s, a);
+ TCGv last_addr = tcg_temp_new();
+ bool update_fdp = true;
+
+ tcg_gen_mov_tl(last_addr, ea);
+ gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
+
+ switch (op) {
+ case 0x00 ... 0x07: /* fxxxs */
+ case 0x10 ... 0x17: /* fixxxl */
+ case 0x20 ... 0x27: /* fxxxl */
+ case 0x30 ... 0x37: /* fixxx */
+ {
+ int op1;
+ op1 = op & 7;
+
+ switch (op >> 4) {
+ case 0:
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_flds_FT0(cpu_env, s->tmp2_i32);
+ break;
+ case 1:
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
+ break;
+ case 2:
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
+ break;
+ case 3:
+ default:
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LESW);
+ gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
+ break;
+ }
+
+ gen_helper_fp_arith_ST0_FT0(op1);
+ if (op1 == 3) {
+ /* fcomp needs pop */
+ gen_helper_fpop(cpu_env);
+ }
+ }
+ break;
+ case 0x08: /* flds */
+ case 0x0a: /* fsts */
+ case 0x0b: /* fstps */
+ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
+ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
+ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
+ switch (op & 7) {
+ case 0:
+ switch (op >> 4) {
+ case 0:
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_flds_ST0(cpu_env, s->tmp2_i32);
+ break;
+ case 1:
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
+ break;
+ case 2:
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
+ break;
+ case 3:
+ default:
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LESW);
+ gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
+ break;
+ }
+ break;
+ case 1:
+ /* XXX: the corresponding CPUID bit must be tested ! */
+ switch (op >> 4) {
+ case 1:
+ gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ break;
+ case 2:
+ gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ break;
+ case 3:
+ default:
+ gen_helper_fistt_ST0(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUW);
+ break;
+ }
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ switch (op >> 4) {
+ case 0:
+ gen_helper_fsts_ST0(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ break;
+ case 1:
+ gen_helper_fistl_ST0(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUL);
+ break;
+ case 2:
+ gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ break;
+ case 3:
+ default:
+ gen_helper_fist_ST0(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUW);
+ break;
+ }
+ if ((op & 7) == 3) {
+ gen_helper_fpop(cpu_env);
+ }
+ break;
+ }
+ break;
+ case 0x0c: /* fldenv mem */
+ gen_helper_fldenv(cpu_env, s->A0,
+ tcg_const_i32(dflag - 1));
+ update_fip = update_fdp = false;
+ break;
+ case 0x0d: /* fldcw mem */
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUW);
+ gen_helper_fldcw(cpu_env, s->tmp2_i32);
+ update_fip = update_fdp = false;
+ break;
+ case 0x0e: /* fnstenv mem */
+ gen_helper_fstenv(cpu_env, s->A0,
+ tcg_const_i32(dflag - 1));
+ update_fip = update_fdp = false;
+ break;
+ case 0x0f: /* fnstcw mem */
+ gen_helper_fnstcw(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUW);
+ update_fip = update_fdp = false;
+ break;
+ case 0x1d: /* fldt mem */
+ gen_helper_fldt_ST0(cpu_env, s->A0);
+ break;
+ case 0x1f: /* fstpt mem */
+ gen_helper_fstt_ST0(cpu_env, s->A0);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x2c: /* frstor mem */
+ gen_helper_frstor(cpu_env, s->A0,
+ tcg_const_i32(dflag - 1));
+ update_fip = update_fdp = false;
+ break;
+ case 0x2e: /* fnsave mem */
+ gen_helper_fsave(cpu_env, s->A0,
+ tcg_const_i32(dflag - 1));
+ update_fip = update_fdp = false;
+ break;
+ case 0x2f: /* fnstsw mem */
+ gen_helper_fnstsw(s->tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
+ s->mem_index, MO_LEUW);
+ update_fip = update_fdp = false;
+ break;
+ case 0x3c: /* fbld */
+ gen_helper_fbld_ST0(cpu_env, s->A0);
+ break;
+ case 0x3e: /* fbstp */
+ gen_helper_fbst_ST0(cpu_env, s->A0);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x3d: /* fildll */
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
+ break;
+ case 0x3f: /* fistpll */
+ gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
+ s->mem_index, MO_LEQ);
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+
+ if (update_fdp) {
+ int last_seg = s->override >= 0 ? s->override : a.def_seg;
+
+ tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State,
+ segs[last_seg].selector));
+ tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State, fpds));
+ tcg_gen_st_tl(last_addr, cpu_env,
+ offsetof(CPUX86State, fpdp));
+ }
+ tcg_temp_free(last_addr);
+ } else {
+ /* register float ops */
+ opreg = rm;
+
+ switch (op) {
+ case 0x08: /* fld sti */
+ gen_helper_fpush(cpu_env);
+ gen_helper_fmov_ST0_STN(cpu_env,
+ tcg_const_i32((opreg + 1) & 7));
+ break;
+ case 0x09: /* fxchg sti */
+ case 0x29: /* fxchg4 sti, undocumented op */
+ case 0x39: /* fxchg7 sti, undocumented op */
+ gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ break;
+ case 0x0a: /* grp d9/2 */
+ switch (rm) {
+ case 0: /* fnop */
+ /* check exceptions (FreeBSD FPU probe) */
+ gen_helper_fwait(cpu_env);
+ update_fip = false;
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x0c: /* grp d9/4 */
+ switch (rm) {
+ case 0: /* fchs */
+ gen_helper_fchs_ST0(cpu_env);
+ break;
+ case 1: /* fabs */
+ gen_helper_fabs_ST0(cpu_env);
+ break;
+ case 4: /* ftst */
+ gen_helper_fldz_FT0(cpu_env);
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 5: /* fxam */
+ gen_helper_fxam_ST0(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x0d: /* grp d9/5 */
+ {
+ switch (rm) {
+ case 0:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fld1_ST0(cpu_env);
+ break;
+ case 1:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldl2t_ST0(cpu_env);
+ break;
+ case 2:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldl2e_ST0(cpu_env);
+ break;
+ case 3:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldpi_ST0(cpu_env);
+ break;
+ case 4:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldlg2_ST0(cpu_env);
+ break;
+ case 5:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldln2_ST0(cpu_env);
+ break;
+ case 6:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldz_ST0(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ }
+ break;
+ case 0x0e: /* grp d9/6 */
+ switch (rm) {
+ case 0: /* f2xm1 */
+ gen_helper_f2xm1(cpu_env);
+ break;
+ case 1: /* fyl2x */
+ gen_helper_fyl2x(cpu_env);
+ break;
+ case 2: /* fptan */
+ gen_helper_fptan(cpu_env);
+ break;
+ case 3: /* fpatan */
+ gen_helper_fpatan(cpu_env);
+ break;
+ case 4: /* fxtract */
+ gen_helper_fxtract(cpu_env);
+ break;
+ case 5: /* fprem1 */
+ gen_helper_fprem1(cpu_env);
+ break;
+ case 6: /* fdecstp */
+ gen_helper_fdecstp(cpu_env);
+ break;
+ default:
+ case 7: /* fincstp */
+ gen_helper_fincstp(cpu_env);
+ break;
+ }
+ break;
+ case 0x0f: /* grp d9/7 */
+ switch (rm) {
+ case 0: /* fprem */
+ gen_helper_fprem(cpu_env);
+ break;
+ case 1: /* fyl2xp1 */
+ gen_helper_fyl2xp1(cpu_env);
+ break;
+ case 2: /* fsqrt */
+ gen_helper_fsqrt(cpu_env);
+ break;
+ case 3: /* fsincos */
+ gen_helper_fsincos(cpu_env);
+ break;
+ case 5: /* fscale */
+ gen_helper_fscale(cpu_env);
+ break;
+ case 4: /* frndint */
+ gen_helper_frndint(cpu_env);
+ break;
+ case 6: /* fsin */
+ gen_helper_fsin(cpu_env);
+ break;
+ default:
+ case 7: /* fcos */
+ gen_helper_fcos(cpu_env);
+ break;
+ }
+ break;
+ case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
+ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
+ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
+ {
+ int op1;
+
+ op1 = op & 7;
+ if (op >= 0x20) {
+ gen_helper_fp_arith_STN_ST0(op1, opreg);
+ if (op >= 0x30) {
+ gen_helper_fpop(cpu_env);
+ }
+ } else {
+ gen_helper_fmov_FT0_STN(cpu_env,
+ tcg_const_i32(opreg));
+ gen_helper_fp_arith_ST0_FT0(op1);
+ }
+ }
+ break;
+ case 0x02: /* fcom */
+ case 0x22: /* fcom2, undocumented op */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 0x03: /* fcomp */
+ case 0x23: /* fcomp3, undocumented op */
+ case 0x32: /* fcomp5, undocumented op */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x15: /* da/5 */
+ switch (rm) {
+ case 1: /* fucompp */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fucom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x1c:
+ switch (rm) {
+ case 0: /* feni (287 only, just do nop here) */
+ break;
+ case 1: /* fdisi (287 only, just do nop here) */
+ break;
+ case 2: /* fclex */
+ gen_helper_fclex(cpu_env);
+ update_fip = false;
+ break;
+ case 3: /* fninit */
+ gen_helper_fninit(cpu_env);
+ update_fip = false;
+ break;
+ case 4: /* fsetpm (287 only, just do nop here) */
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x1d: /* fucomi */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucomi_ST0_FT0(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x1e: /* fcomi */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcomi_ST0_FT0(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x28: /* ffree sti */
+ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ break;
+ case 0x2a: /* fst sti */
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ break;
+ case 0x2b: /* fstp sti */
+ case 0x0b: /* fstp1 sti, undocumented op */
+ case 0x3a: /* fstp8 sti, undocumented op */
+ case 0x3b: /* fstp9 sti, undocumented op */
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x2c: /* fucom st(i) */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucom_ST0_FT0(cpu_env);
+ break;
+ case 0x2d: /* fucomp st(i) */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x33: /* de/3 */
+ switch (rm) {
+ case 1: /* fcompp */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x38: /* ffreep sti, undocumented op */
+ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x3c: /* df/4 */
+ switch (rm) {
+ case 0:
+ gen_helper_fnstsw(s->tmp2_i32, cpu_env);
+ tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x3d: /* fucomip */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucomi_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x3e: /* fcomip */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcomi_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x10 ... 0x13: /* fcmovxx */
+ case 0x18 ... 0x1b:
+ {
+ int op1;
+ TCGLabel *l1;
+ static const uint8_t fcmov_cc[8] = {
+ (JCC_B << 1),
+ (JCC_Z << 1),
+ (JCC_BE << 1),
+ (JCC_P << 1),
+ };
+
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
+ l1 = gen_new_label();
+ gen_jcc1_noeob(s, op1, l1);
+ gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_set_label(l1);
+ }
+ break;
+ default:
+ goto unknown_op;
+ }
+ }
+
+ if (update_fip) {
+ tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State, segs[R_CS].selector));
+ tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
+ offsetof(CPUX86State, fpcs));
+ tcg_gen_st_tl(tcg_constant_tl(pc_start - s->cs_base),
+ cpu_env, offsetof(CPUX86State, fpip));
+ }
+ }
+ break;
+ /************************/
+ /* string ops */
+
+ case 0xa4: /* movsS */
+ case 0xa5:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_movs(s, ot);
+ }
+ break;
+
+ case 0xaa: /* stosS */
+ case 0xab:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_stos(s, ot);
+ }
+ break;
+ case 0xac: /* lodsS */
+ case 0xad:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_lods(s, ot);
+ }
+ break;
+ case 0xae: /* scasS */
+ case 0xaf:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & PREFIX_REPNZ) {
+ gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+ } else if (prefixes & PREFIX_REPZ) {
+ gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+ } else {
+ gen_scas(s, ot);
+ }
+ break;
+
+ case 0xa6: /* cmpsS */
+ case 0xa7:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & PREFIX_REPNZ) {
+ gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+ } else if (prefixes & PREFIX_REPZ) {
+ gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+ } else {
+ gen_cmps(s, ot);
+ }
+ break;
+ case 0x6c: /* insS */
+ case 0x6d:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32,
+ SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
+ break;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ /* jump generated by gen_repz_ins */
+ } else {
+ gen_ins(s, ot);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ }
+ break;
+ case 0x6e: /* outsS */
+ case 0x6f:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
+ break;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ /* jump generated by gen_repz_outs */
+ } else {
+ gen_outs(s, ot);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ }
+ break;
+
+ /************************/
+ /* port I/O */
+
+ case 0xe4:
+ case 0xe5:
+ ot = mo_b_d32(b, dflag);
+ val = x86_ldub_code(env, s);
+ tcg_gen_movi_i32(s->tmp2_i32, val);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
+ break;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_in_func(ot, s->T1, s->tmp2_i32);
+ gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
+ gen_bpt_io(s, s->tmp2_i32, ot);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0xe6:
+ case 0xe7:
+ ot = mo_b_d32(b, dflag);
+ val = x86_ldub_code(env, s);
+ tcg_gen_movi_i32(s->tmp2_i32, val);
+ if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
+ break;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
+ gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
+ gen_bpt_io(s, s->tmp2_i32, ot);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0xec:
+ case 0xed:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
+ break;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_in_func(ot, s->T1, s->tmp2_i32);
+ gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
+ gen_bpt_io(s, s->tmp2_i32, ot);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0xee:
+ case 0xef:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
+ break;
+ }
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
+ tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
+ gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
+ gen_bpt_io(s, s->tmp2_i32, ot);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+
+ /************************/
+ /* control */
+ case 0xc2: /* ret im */
+ val = x86_ldsw_code(env, s);
+ ot = gen_pop_T0(s);
+ gen_stack_update(s, val + (1 << ot));
+ /* Note that gen_pop_T0 uses a zero-extending load. */
+ gen_op_jmp_v(s->T0);
+ gen_bnd_jmp(s);
+ gen_jr(s, s->T0);
+ break;
+ case 0xc3: /* ret */
+ ot = gen_pop_T0(s);
+ gen_pop_update(s, ot);
+ /* Note that gen_pop_T0 uses a zero-extending load. */
+ gen_op_jmp_v(s->T0);
+ gen_bnd_jmp(s);
+ gen_jr(s, s->T0);
+ break;
+ case 0xca: /* lret im */
+ val = x86_ldsw_code(env, s);
+ do_lret:
+ if (PE(s) && !VM86(s)) {
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
+ tcg_const_i32(val));
+ } else {
+ gen_stack_A0(s);
+ /* pop offset */
+ gen_op_ld_v(s, dflag, s->T0, s->A0);
+ /* NOTE: keeping EIP updated is not a problem in case of
+ exception */
+ gen_op_jmp_v(s->T0);
+ /* pop selector */
+ gen_add_A0_im(s, 1 << dflag);
+ gen_op_ld_v(s, dflag, s->T0, s->A0);
+ gen_op_movl_seg_T0_vm(s, R_CS);
+ /* add stack offset */
+ gen_stack_update(s, val + (2 << dflag));
+ }
+ gen_eob(s);
+ break;
+ case 0xcb: /* lret */
+ val = 0;
+ goto do_lret;
+ case 0xcf: /* iret */
+ gen_svm_check_intercept(s, SVM_EXIT_IRET);
+ if (!PE(s) || VM86(s)) {
+ /* real mode or vm86 mode */
+ if (!check_vm86_iopl(s)) {
+ break;
+ }
+ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+ } else {
+ gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
+ tcg_const_i32(s->pc - s->cs_base));
+ }
+ set_cc_op(s, CC_OP_EFLAGS);
+ gen_eob(s);
+ break;
+ case 0xe8: /* call im */
+ {
+ if (dflag != MO_16) {
+ tval = (int32_t)insn_get(env, s, MO_32);
+ } else {
+ tval = (int16_t)insn_get(env, s, MO_16);
+ }
+ next_eip = s->pc - s->cs_base;
+ tval += next_eip;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ } else if (!CODE64(s)) {
+ tval &= 0xffffffff;
+ }
+ tcg_gen_movi_tl(s->T0, next_eip);
+ gen_push_v(s, s->T0);
+ gen_bnd_jmp(s);
+ gen_jmp(s, tval);
+ }
+ break;
+ case 0x9a: /* lcall im */
+ {
+ unsigned int selector, offset;
+
+ if (CODE64(s))
+ goto illegal_op;
+ ot = dflag;
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, MO_16);
+
+ tcg_gen_movi_tl(s->T0, selector);
+ tcg_gen_movi_tl(s->T1, offset);
+ }
+ goto do_lcall;
+ case 0xe9: /* jmp im */
+ if (dflag != MO_16) {
+ tval = (int32_t)insn_get(env, s, MO_32);
+ } else {
+ tval = (int16_t)insn_get(env, s, MO_16);
+ }
+ tval += s->pc - s->cs_base;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ } else if (!CODE64(s)) {
+ tval &= 0xffffffff;
+ }
+ gen_bnd_jmp(s);
+ gen_jmp(s, tval);
+ break;
+ case 0xea: /* ljmp im */
+ {
+ unsigned int selector, offset;
+
+ if (CODE64(s))
+ goto illegal_op;
+ ot = dflag;
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, MO_16);
+
+ tcg_gen_movi_tl(s->T0, selector);
+ tcg_gen_movi_tl(s->T1, offset);
+ }
+ goto do_ljmp;
+ case 0xeb: /* jmp Jb */
+ tval = (int8_t)insn_get(env, s, MO_8);
+ tval += s->pc - s->cs_base;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ }
+ gen_jmp(s, tval);
+ break;
+ case 0x70 ... 0x7f: /* jcc Jb */
+ tval = (int8_t)insn_get(env, s, MO_8);
+ goto do_jcc;
+ case 0x180 ... 0x18f: /* jcc Jv */
+ if (dflag != MO_16) {
+ tval = (int32_t)insn_get(env, s, MO_32);
+ } else {
+ tval = (int16_t)insn_get(env, s, MO_16);
+ }
+ do_jcc:
+ next_eip = s->pc - s->cs_base;
+ tval += next_eip;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ }
+ gen_bnd_jmp(s);
+ gen_jcc(s, b, tval, next_eip);
+ break;
+
+ case 0x190 ... 0x19f: /* setcc Gv */
+ modrm = x86_ldub_code(env, s);
+ gen_setcc1(s, b, s->T0);
+ gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
+ break;
+ case 0x140 ... 0x14f: /* cmov Gv, Ev */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ gen_cmovcc1(env, s, ot, b, modrm, reg);
+ break;
+
+ /************************/
+ /* flags */
+ case 0x9c: /* pushf */
+ gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
+ if (check_vm86_iopl(s)) {
+ gen_update_cc_op(s);
+ gen_helper_read_eflags(s->T0, cpu_env);
+ gen_push_v(s, s->T0);
+ }
+ break;
+ case 0x9d: /* popf */
+ gen_svm_check_intercept(s, SVM_EXIT_POPF);
+ if (check_vm86_iopl(s)) {
+ ot = gen_pop_T0(s);
+ if (CPL(s) == 0) {
+ if (dflag != MO_16) {
+ gen_helper_write_eflags(cpu_env, s->T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK |
+ IF_MASK |
+ IOPL_MASK)));
+ } else {
+ gen_helper_write_eflags(cpu_env, s->T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK |
+ IF_MASK | IOPL_MASK)
+ & 0xffff));
+ }
+ } else {
+ if (CPL(s) <= IOPL(s)) {
+ if (dflag != MO_16) {
+ gen_helper_write_eflags(cpu_env, s->T0,
+ tcg_const_i32((TF_MASK |
+ AC_MASK |
+ ID_MASK |
+ NT_MASK |
+ IF_MASK)));
+ } else {
+ gen_helper_write_eflags(cpu_env, s->T0,
+ tcg_const_i32((TF_MASK |
+ AC_MASK |
+ ID_MASK |
+ NT_MASK |
+ IF_MASK)
+ & 0xffff));
+ }
+ } else {
+ if (dflag != MO_16) {
+ gen_helper_write_eflags(cpu_env, s->T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK)));
+ } else {
+ gen_helper_write_eflags(cpu_env, s->T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK)
+ & 0xffff));
+ }
+ }
+ }
+ gen_pop_update(s, ot);
+ set_cc_op(s, CC_OP_EFLAGS);
+ /* abort translation because TF/AC flag may change */
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ case 0x9e: /* sahf */
+ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
+ goto illegal_op;
+ gen_op_mov_v_reg(s, MO_8, s->T0, R_AH);
+ gen_compute_eflags(s);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
+ tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
+ break;
+ case 0x9f: /* lahf */
+ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
+ goto illegal_op;
+ gen_compute_eflags(s);
+ /* Note: gen_compute_eflags() only gives the condition codes */
+ tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
+ gen_op_mov_reg_v(s, MO_8, R_AH, s->T0);
+ break;
+ case 0xf5: /* cmc */
+ gen_compute_eflags(s);
+ tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
+ break;
+ case 0xf8: /* clc */
+ gen_compute_eflags(s);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
+ break;
+ case 0xf9: /* stc */
+ gen_compute_eflags(s);
+ tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
+ break;
+ case 0xfc: /* cld */
+ tcg_gen_movi_i32(s->tmp2_i32, 1);
+ tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+ break;
+ case 0xfd: /* std */
+ tcg_gen_movi_i32(s->tmp2_i32, -1);
+ tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+ break;
+
+ /************************/
+ /* bit operations */
+ case 0x1ba: /* bt/bts/btr/btc Gv, im */
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ op = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3) {
+ s->rip_offset = 1;
+ gen_lea_modrm(env, s, modrm);
+ if (!(s->prefix & PREFIX_LOCK)) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ }
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ }
+ /* load shift */
+ val = x86_ldub_code(env, s);
+ tcg_gen_movi_tl(s->T1, val);
+ if (op < 4)
+ goto unknown_op;
+ op -= 4;
+ goto bt_op;
+ case 0x1a3: /* bt Gv, Ev */
+ op = 0;
+ goto do_btx;
+ case 0x1ab: /* bts */
+ op = 1;
+ goto do_btx;
+ case 0x1b3: /* btr */
+ op = 2;
+ goto do_btx;
+ case 0x1bb: /* btc */
+ op = 3;
+ do_btx:
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_v_reg(s, MO_32, s->T1, reg);
+ if (mod != 3) {
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ /* specific case: we need to add a displacement */
+ gen_exts(ot, s->T1);
+ tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
+ tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
+ tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a), s->tmp0);
+ gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
+ if (!(s->prefix & PREFIX_LOCK)) {
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ }
+ } else {
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ }
+ bt_op:
+ tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
+ tcg_gen_movi_tl(s->tmp0, 1);
+ tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
+ if (s->prefix & PREFIX_LOCK) {
+ switch (op) {
+ case 0: /* bt */
+ /* Needs no atomic ops; we surpressed the normal
+ memory load for LOCK above so do it now. */
+ gen_op_ld_v(s, ot, s->T0, s->A0);
+ break;
+ case 1: /* bts */
+ tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
+ s->mem_index, ot | MO_LE);
+ break;
+ case 2: /* btr */
+ tcg_gen_not_tl(s->tmp0, s->tmp0);
+ tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
+ s->mem_index, ot | MO_LE);
+ break;
+ default:
+ case 3: /* btc */
+ tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
+ s->mem_index, ot | MO_LE);
+ break;
+ }
+ tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
+ } else {
+ tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
+ switch (op) {
+ case 0: /* bt */
+ /* Data already loaded; nothing to do. */
+ break;
+ case 1: /* bts */
+ tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
+ break;
+ case 2: /* btr */
+ tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
+ break;
+ default:
+ case 3: /* btc */
+ tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
+ break;
+ }
+ if (op != 0) {
+ if (mod != 3) {
+ gen_op_st_v(s, ot, s->T0, s->A0);
+ } else {
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ }
+ }
+ }
+
+ /* Delay all CC updates until after the store above. Note that
+ C is the result of the test, Z is unchanged, and the others
+ are all undefined. */
+ switch (s->cc_op) {
+ case CC_OP_MULB ... CC_OP_MULQ:
+ case CC_OP_ADDB ... CC_OP_ADDQ:
+ case CC_OP_ADCB ... CC_OP_ADCQ:
+ case CC_OP_SUBB ... CC_OP_SUBQ:
+ case CC_OP_SBBB ... CC_OP_SBBQ:
+ case CC_OP_LOGICB ... CC_OP_LOGICQ:
+ case CC_OP_INCB ... CC_OP_INCQ:
+ case CC_OP_DECB ... CC_OP_DECQ:
+ case CC_OP_SHLB ... CC_OP_SHLQ:
+ case CC_OP_SARB ... CC_OP_SARQ:
+ case CC_OP_BMILGB ... CC_OP_BMILGQ:
+ /* Z was going to be computed from the non-zero status of CC_DST.
+ We can get that same Z value (and the new C value) by leaving
+ CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
+ same width. */
+ tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
+ set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
+ break;
+ default:
+ /* Otherwise, generate EFLAGS and replace the C bit. */
+ gen_compute_eflags(s);
+ tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
+ ctz32(CC_C), 1);
+ break;
+ }
+ break;
+ case 0x1bc: /* bsf / tzcnt */
+ case 0x1bd: /* bsr / lzcnt */
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_extu(ot, s->T0);
+
+ /* Note that lzcnt and tzcnt are in different extensions. */
+ if ((prefixes & PREFIX_REPZ)
+ && (b & 1
+ ? s->cpuid_ext3_features & CPUID_EXT3_ABM
+ : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
+ int size = 8 << ot;
+ /* For lzcnt/tzcnt, C bit is defined related to the input. */
+ tcg_gen_mov_tl(cpu_cc_src, s->T0);
+ if (b & 1) {
+ /* For lzcnt, reduce the target_ulong result by the
+ number of zeros that we expect to find at the top. */
+ tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
+ tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
+ } else {
+ /* For tzcnt, a zero input must return the operand size. */
+ tcg_gen_ctzi_tl(s->T0, s->T0, size);
+ }
+ /* For lzcnt/tzcnt, Z bit is defined related to the result. */
+ gen_op_update1_cc(s);
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ } else {
+ /* For bsr/bsf, only the Z bit is defined and it is related
+ to the input and not the result. */
+ tcg_gen_mov_tl(cpu_cc_dst, s->T0);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+
+ /* ??? The manual says that the output is undefined when the
+ input is zero, but real hardware leaves it unchanged, and
+ real programs appear to depend on that. Accomplish this
+ by passing the output as the value to return upon zero. */
+ if (b & 1) {
+ /* For bsr, return the bit index of the first 1 bit,
+ not the count of leading zeros. */
+ tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
+ tcg_gen_clz_tl(s->T0, s->T0, s->T1);
+ tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
+ } else {
+ tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
+ }
+ }
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+ break;
+ /************************/
+ /* bcd */
+ case 0x27: /* daa */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_daa(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x2f: /* das */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_das(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x37: /* aaa */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_aaa(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x3f: /* aas */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_aas(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0xd4: /* aam */
+ if (CODE64(s))
+ goto illegal_op;
+ val = x86_ldub_code(env, s);
+ if (val == 0) {
+ gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
+ } else {
+ gen_helper_aam(cpu_env, tcg_const_i32(val));
+ set_cc_op(s, CC_OP_LOGICB);
+ }
+ break;
+ case 0xd5: /* aad */
+ if (CODE64(s))
+ goto illegal_op;
+ val = x86_ldub_code(env, s);
+ gen_helper_aad(cpu_env, tcg_const_i32(val));
+ set_cc_op(s, CC_OP_LOGICB);
+ break;
+ /************************/
+ /* misc */
+ case 0x90: /* nop */
+ /* XXX: correct lock test for all insn */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
+ if (REX_B(s)) {
+ goto do_xchg_reg_eax;
+ }
+ if (prefixes & PREFIX_REPZ) {
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
+ s->base.is_jmp = DISAS_NORETURN;
+ }
+ break;
+ case 0x9b: /* fwait */
+ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
+ (HF_MP_MASK | HF_TS_MASK)) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ } else {
+ gen_helper_fwait(cpu_env);
+ }
+ break;
+ case 0xcc: /* int3 */
+ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
+ break;
+ case 0xcd: /* int N */
+ val = x86_ldub_code(env, s);
+ if (check_vm86_iopl(s)) {
+ gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
+ }
+ break;
+ case 0xce: /* into */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
+ break;
+#ifdef WANT_ICEBP
+ case 0xf1: /* icebp (undocumented, exits to external debugger) */
+ gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
+ gen_debug(s);
+ break;
+#endif
+ case 0xfa: /* cli */
+ if (check_iopl(s)) {
+ gen_helper_cli(cpu_env);
+ }
+ break;
+ case 0xfb: /* sti */
+ if (check_iopl(s)) {
+ gen_helper_sti(cpu_env);
+ /* interruptions are enabled only the first insn after sti */
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob_inhibit_irq(s, true);
+ }
+ break;
+ case 0x62: /* bound */
+ if (CODE64(s))
+ goto illegal_op;
+ ot = dflag;
+ modrm = x86_ldub_code(env, s);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_op_mov_v_reg(s, ot, s->T0, reg);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ if (ot == MO_16) {
+ gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32);
+ } else {
+ gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32);
+ }
+ break;
+ case 0x1c8 ... 0x1cf: /* bswap reg */
+ reg = (b & 7) | REX_B(s);
+#ifdef TARGET_X86_64
+ if (dflag == MO_64) {
+ tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
+ break;
+ }
+#endif
+ tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
+ break;
+ case 0xd6: /* salc */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_compute_eflags_c(s, s->T0);
+ tcg_gen_neg_tl(s->T0, s->T0);
+ gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
+ break;
+ case 0xe0: /* loopnz */
+ case 0xe1: /* loopz */
+ case 0xe2: /* loop */
+ case 0xe3: /* jecxz */
+ {
+ TCGLabel *l1, *l2, *l3;
+
+ tval = (int8_t)insn_get(env, s, MO_8);
+ next_eip = s->pc - s->cs_base;
+ tval += next_eip;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ }
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ l3 = gen_new_label();
+ gen_update_cc_op(s);
+ b &= 3;
+ switch(b) {
+ case 0: /* loopnz */
+ case 1: /* loopz */
+ gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
+ gen_op_jz_ecx(s, s->aflag, l3);
+ gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
+ break;
+ case 2: /* loop */
+ gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
+ gen_op_jnz_ecx(s, s->aflag, l1);
+ break;
+ default:
+ case 3: /* jcxz */
+ gen_op_jz_ecx(s, s->aflag, l1);
+ break;
+ }
+
+ gen_set_label(l3);
+ gen_jmp_im(s, next_eip);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+ gen_jmp_im(s, tval);
+ gen_set_label(l2);
+ gen_eob(s);
+ }
+ break;
+ case 0x130: /* wrmsr */
+ case 0x132: /* rdmsr */
+ if (check_cpl0(s)) {
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ if (b & 2) {
+ gen_helper_rdmsr(cpu_env);
+ } else {
+ gen_helper_wrmsr(cpu_env);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ }
+ break;
+ case 0x131: /* rdtsc */
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdtsc(cpu_env);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0x133: /* rdpmc */
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_rdpmc(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
+ break;
+ case 0x134: /* sysenter */
+ /* For Intel SYSENTER is valid on 64-bit */
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ goto illegal_op;
+ if (!PE(s)) {
+ gen_exception_gpf(s);
+ } else {
+ gen_helper_sysenter(cpu_env);
+ gen_eob(s);
+ }
+ break;
+ case 0x135: /* sysexit */
+ /* For Intel SYSEXIT is valid on 64-bit */
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ goto illegal_op;
+ if (!PE(s)) {
+ gen_exception_gpf(s);
+ } else {
+ gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
+ gen_eob(s);
+ }
+ break;
+#ifdef TARGET_X86_64
+ case 0x105: /* syscall */
+ /* XXX: is it usable in real mode ? */
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
+ /* TF handling for the syscall insn is different. The TF bit is checked
+ after the syscall insn completes. This allows #DB to not be
+ generated after one has entered CPL0 if TF is set in FMASK. */
+ gen_eob_worker(s, false, true);
+ break;
+ case 0x107: /* sysret */
+ if (!PE(s)) {
+ gen_exception_gpf(s);
+ } else {
+ gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
+ /* condition codes are modified only in long mode */
+ if (LMA(s)) {
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ /* TF handling for the sysret insn is different. The TF bit is
+ checked after the sysret insn completes. This allows #DB to be
+ generated "as if" the syscall insn in userspace has just
+ completed. */
+ gen_eob_worker(s, false, true);
+ }
+ break;
+#endif
+ case 0x1a2: /* cpuid */
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_cpuid(cpu_env);
+ break;
+ case 0xf4: /* hlt */
+ if (check_cpl0(s)) {
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
+ s->base.is_jmp = DISAS_NORETURN;
+ }
+ break;
+ case 0x100:
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* sldt */
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
+ tcg_gen_ld32u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, ldt.selector));
+ ot = mod == 3 ? dflag : MO_16;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 2: /* lldt */
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_lldt(cpu_env, s->tmp2_i32);
+ }
+ break;
+ case 1: /* str */
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
+ tcg_gen_ld32u_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, tr.selector));
+ ot = mod == 3 ? dflag : MO_16;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 3: /* ltr */
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_helper_ltr(cpu_env, s->tmp2_i32);
+ }
+ break;
+ case 4: /* verr */
+ case 5: /* verw */
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_update_cc_op(s);
+ if (op == 4) {
+ gen_helper_verr(cpu_env, s->T0);
+ } else {
+ gen_helper_verw(cpu_env, s->T0);
+ }
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x101:
+ modrm = x86_ldub_code(env, s);
+ switch (modrm) {
+ CASE_MODRM_MEM_OP(0): /* sgdt */
+ gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(s->T0,
+ cpu_env, offsetof(CPUX86State, gdt.limit));
+ gen_op_st_v(s, MO_16, s->T0, s->A0);
+ gen_add_A0_im(s, 2);
+ tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
+ }
+ gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
+ break;
+
+ case 0xc8: /* monitor */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
+ gen_extu(s->aflag, s->A0);
+ gen_add_A0_ds_seg(s);
+ gen_helper_monitor(cpu_env, s->A0);
+ break;
+
+ case 0xc9: /* mwait */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
+ s->base.is_jmp = DISAS_NORETURN;
+ break;
+
+ case 0xca: /* clac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || CPL(s) != 0) {
+ goto illegal_op;
+ }
+ gen_helper_clac(cpu_env);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xcb: /* stac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || CPL(s) != 0) {
+ goto illegal_op;
+ }
+ gen_helper_stac(cpu_env);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(1): /* sidt */
+ gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
+ gen_op_st_v(s, MO_16, s->T0, s->A0);
+ gen_add_A0_im(s, 2);
+ tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
+ }
+ gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
+ break;
+
+ case 0xd0: /* xgetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
+ break;
+
+ case 0xd1: /* xsetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64);
+ /* End TB because translation flags may change. */
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xd8: /* VMRUN */
+ if (!SVME(s) || !PE(s)) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+ tcg_const_i32(s->pc - pc_start));
+ tcg_gen_exit_tb(NULL, 0);
+ s->base.is_jmp = DISAS_NORETURN;
+ break;
+
+ case 0xd9: /* VMMCALL */
+ if (!SVME(s)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_vmmcall(cpu_env);
+ break;
+
+ case 0xda: /* VMLOAD */
+ if (!SVME(s) || !PE(s)) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdb: /* VMSAVE */
+ if (!SVME(s) || !PE(s)) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdc: /* STGI */
+ if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !PE(s)) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_helper_stgi(cpu_env);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xdd: /* CLGI */
+ if (!SVME(s) || !PE(s)) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ gen_helper_clgi(cpu_env);
+ break;
+
+ case 0xde: /* SKINIT */
+ if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !PE(s)) {
+ goto illegal_op;
+ }
+ gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
+ /* If not intercepted, not implemented -- raise #UD. */
+ goto illegal_op;
+
+ case 0xdf: /* INVLPGA */
+ if (!SVME(s) || !PE(s)) {
+ goto illegal_op;
+ }
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
+ if (s->aflag == MO_64) {
+ tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
+ } else {
+ tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
+ }
+ gen_helper_flush_page(cpu_env, s->A0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(2): /* lgdt */
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, s->T1, s->A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
+ }
+ tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit));
+ break;
+
+ CASE_MODRM_MEM_OP(3): /* lidt */
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, s->T1, s->A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
+ }
+ tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit));
+ break;
+
+ CASE_MODRM_OP(4): /* smsw */
+ gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
+ tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
+ /*
+ * In 32-bit mode, the higher 16 bits of the destination
+ * register are undefined. In practice CR0[31:0] is stored
+ * just like in 64-bit mode.
+ */
+ mod = (modrm >> 6) & 3;
+ ot = (mod != 3 ? MO_16 : s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 0xee: /* rdpkru */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
+ break;
+ case 0xef: /* wrpkru */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
+ break;
+
+ CASE_MODRM_OP(6): /* lmsw */
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ /*
+ * Only the 4 lower bits of CR0 are modified.
+ * PE cannot be set to zero if already set to one.
+ */
+ tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
+ tcg_gen_andi_tl(s->T0, s->T0, 0xf);
+ tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
+ tcg_gen_or_tl(s->T0, s->T0, s->T1);
+ gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(7): /* invlpg */
+ if (!check_cpl0(s)) {
+ break;
+ }
+ gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_flush_page(cpu_env, s->A0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xf8: /* swapgs */
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ if (check_cpl0(s)) {
+ tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
+ tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ tcg_gen_st_tl(s->T0, cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ }
+ break;
+ }
+#endif
+ goto illegal_op;
+
+ case 0xf9: /* rdtscp */
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(s, pc_start - s->cs_base);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdtscp(cpu_env);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x108: /* invd */
+ case 0x109: /* wbinvd */
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
+ /* nothing to do */
+ }
+ break;
+ case 0x63: /* arpl or movslS (x86_64) */
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ int d_ot;
+ /* d_ot is the size of destination */
+ d_ot = dflag;
+
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+
+ if (mod == 3) {
+ gen_op_mov_v_reg(s, MO_32, s->T0, rm);
+ /* sign extend */
+ if (d_ot == MO_64) {
+ tcg_gen_ext32s_tl(s->T0, s->T0);
+ }
+ gen_op_mov_reg_v(s, d_ot, reg, s->T0);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
+ gen_op_mov_reg_v(s, d_ot, reg, s->T0);
+ }
+ } else
+#endif
+ {
+ TCGLabel *label1;
+ TCGv t0, t1, t2, a0;
+
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+ t2 = tcg_temp_local_new();
+ ot = MO_16;
+ modrm = x86_ldub_code(env, s);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, t0, s->A0);
+ a0 = tcg_temp_local_new();
+ tcg_gen_mov_tl(a0, s->A0);
+ } else {
+ gen_op_mov_v_reg(s, ot, t0, rm);
+ a0 = NULL;
+ }
+ gen_op_mov_v_reg(s, ot, t1, reg);
+ tcg_gen_andi_tl(s->tmp0, t0, 3);
+ tcg_gen_andi_tl(t1, t1, 3);
+ tcg_gen_movi_tl(t2, 0);
+ label1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_movi_tl(t2, CC_Z);
+ gen_set_label(label1);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, t0, a0);
+ tcg_temp_free(a0);
+ } else {
+ gen_op_mov_reg_v(s, ot, rm, t0);
+ }
+ gen_compute_eflags(s);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ case 0x102: /* lar */
+ case 0x103: /* lsl */
+ {
+ TCGLabel *label1;
+ TCGv t0;
+ if (!PE(s) || VM86(s))
+ goto illegal_op;
+ ot = dflag != MO_16 ? MO_32 : MO_16;
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ t0 = tcg_temp_local_new();
+ gen_update_cc_op(s);
+ if (b == 0x102) {
+ gen_helper_lar(t0, cpu_env, s->T0);
+ } else {
+ gen_helper_lsl(t0, cpu_env, s->T0);
+ }
+ tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
+ label1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
+ gen_op_mov_reg_v(s, ot, reg, t0);
+ gen_set_label(label1);
+ set_cc_op(s, CC_OP_EFLAGS);
+ tcg_temp_free(t0);
+ }
+ break;
+ case 0x118:
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* prefetchnta */
+ case 1: /* prefetchnt0 */
+ case 2: /* prefetchnt0 */
+ case 3: /* prefetchnt0 */
+ if (mod == 3)
+ goto illegal_op;
+ gen_nop_modrm(env, s, modrm);
+ /* nothing more to do */
+ break;
+ default: /* nop (multi byte) */
+ gen_nop_modrm(env, s, modrm);
+ break;
+ }
+ break;
+ case 0x11a:
+ modrm = x86_ldub_code(env, s);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ if (prefixes & PREFIX_REPZ) {
+ /* bndcl */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcu */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ TCGv_i64 notu = tcg_temp_new_i64();
+ tcg_gen_not_i64(notu, cpu_bndu[reg]);
+ gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
+ tcg_temp_free_i64(notu);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- from reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
+ tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(s->A0, s->A0, 8);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(s->A0, s->A0, 4);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
+ s->mem_index, MO_LEUL);
+ }
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ } else if (mod != 3) {
+ /* bndldx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(s->A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(s->T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0);
+ tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
+ offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
+ } else {
+ gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0);
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
+ tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
+ }
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x11b:
+ modrm = x86_ldub_code(env, s);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ if (mod != 3 && (prefixes & PREFIX_REPZ)) {
+ /* bndmk */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (a.base >= 0) {
+ tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
+ }
+ } else if (a.base == -1) {
+ /* no base register has lower bound of 0 */
+ tcg_gen_movi_i64(cpu_bndl[reg], 0);
+ } else {
+ /* rip-relative generates #ud */
+ goto illegal_op;
+ }
+ tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a));
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(s->A0, s->A0);
+ }
+ tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ break;
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcn */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- to reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
+ tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(s->A0, s->A0, 8);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(s->A0, s->A0, 4);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
+ s->mem_index, MO_LEUL);
+ }
+ }
+ } else if (mod != 3) {
+ /* bndstx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(s->A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(s->T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndstx64(cpu_env, s->A0, s->T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ } else {
+ gen_helper_bndstx32(cpu_env, s->A0, s->T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ }
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
+ modrm = x86_ldub_code(env, s);
+ gen_nop_modrm(env, s, modrm);
+ break;
+
+ case 0x120: /* mov reg, crN */
+ case 0x122: /* mov crN, reg */
+ if (!check_cpl0(s)) {
+ break;
+ }
+ modrm = x86_ldub_code(env, s);
+ /*
+ * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of Intel 386 and 486
+ * processors all show that the mod bits are assumed to be 1's,
+ * regardless of actual values.
+ */
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ switch (reg) {
+ case 0:
+ if ((prefixes & PREFIX_LOCK) &&
+ (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
+ reg = 8;
+ }
+ break;
+ case 2:
+ case 3:
+ case 4:
+ case 8:
+ break;
+ default:
+ goto unknown_op;
+ }
+ ot = (CODE64(s) ? MO_64 : MO_32);
+
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ if (b & 2) {
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
+ gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ }
+ break;
+
+ case 0x121: /* mov reg, drN */
+ case 0x123: /* mov drN, reg */
+ if (check_cpl0(s)) {
+ modrm = x86_ldub_code(env, s);
+ /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of
+ * intel 386 and 486 processors all show that the mod bits
+ * are assumed to be 1's, regardless of actual values.
+ */
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ if (CODE64(s))
+ ot = MO_64;
+ else
+ ot = MO_32;
+ if (reg >= 8) {
+ goto illegal_op;
+ }
+ if (b & 2) {
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
+ gen_op_mov_v_reg(s, ot, s->T0, rm);
+ tcg_gen_movi_i32(s->tmp2_i32, reg);
+ gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
+ tcg_gen_movi_i32(s->tmp2_i32, reg);
+ gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
+ gen_op_mov_reg_v(s, ot, rm, s->T0);
+ }
+ }
+ break;
+ case 0x106: /* clts */
+ if (check_cpl0(s)) {
+ gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
+ gen_helper_clts(cpu_env);
+ /* abort block because static cpu state changed */
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
+ case 0x1c3: /* MOVNTI reg, mem */
+ if (!(s->cpuid_features & CPUID_SSE2))
+ goto illegal_op;
+ ot = mo_64_32(dflag);
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+ /* generate a generic store */
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
+ break;
+ case 0x1ae:
+ modrm = x86_ldub_code(env, s);
+ switch (modrm) {
+ CASE_MODRM_MEM_OP(0): /* fxsave */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_fxsave(cpu_env, s->A0);
+ break;
+
+ CASE_MODRM_MEM_OP(1): /* fxrstor */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_fxrstor(cpu_env, s->A0);
+ break;
+
+ CASE_MODRM_MEM_OP(2): /* ldmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
+ gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
+ break;
+
+ CASE_MODRM_MEM_OP(3): /* stmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_helper_update_mxcsr(cpu_env);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
+ gen_op_st_v(s, MO_32, s->T0, s->A0);
+ break;
+
+ CASE_MODRM_MEM_OP(4): /* xsave */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64);
+ break;
+
+ CASE_MODRM_MEM_OP(5): /* xrstor */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64);
+ /* XRSTOR is how MPX is enabled, which changes how
+ we translate. Thus we need to end the TB. */
+ gen_update_cc_op(s);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clwb */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
+ goto illegal_op;
+ }
+ gen_nop_modrm(env, s, modrm);
+ } else {
+ /* xsaveopt */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
+ || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64);
+ }
+ break;
+
+ CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clflushopt */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
+ goto illegal_op;
+ }
+ } else {
+ /* clflush */
+ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
+ || !(s->cpuid_features & CPUID_CLFLUSH)) {
+ goto illegal_op;
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+
+ case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
+ case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
+ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
+ case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
+ if (CODE64(s)
+ && (prefixes & PREFIX_REPZ)
+ && !(prefixes & PREFIX_LOCK)
+ && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
+ TCGv base, treg, src, dst;
+
+ /* Preserve hflags bits by testing CR4 at runtime. */
+ tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
+ gen_helper_cr4_testbit(cpu_env, s->tmp2_i32);
+
+ base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
+ treg = cpu_regs[(modrm & 7) | REX_B(s)];
+
+ if (modrm & 0x10) {
+ /* wr*base */
+ dst = base, src = treg;
+ } else {
+ /* rd*base */
+ dst = treg, src = base;
+ }
+
+ if (s->dflag == MO_32) {
+ tcg_gen_ext32u_tl(dst, src);
+ } else {
+ tcg_gen_mov_tl(dst, src);
+ }
+ break;
+ }
+ goto unknown_op;
+
+ case 0xf8: /* sfence / pcommit */
+ if (prefixes & PREFIX_DATA) {
+ /* pcommit */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ break;
+ }
+ /* fallthru */
+ case 0xf9 ... 0xff: /* sfence */
+ if (!(s->cpuid_features & CPUID_SSE)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
+ break;
+ case 0xe8 ... 0xef: /* lfence */
+ if (!(s->cpuid_features & CPUID_SSE)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
+ break;
+ case 0xf0 ... 0xf7: /* mfence */
+ if (!(s->cpuid_features & CPUID_SSE2)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x10d: /* 3DNow! prefetch(w) */
+ modrm = x86_ldub_code(env, s);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x1aa: /* rsm */
+ gen_svm_check_intercept(s, SVM_EXIT_RSM);
+ if (!(s->flags & HF_SMM_MASK))
+ goto illegal_op;
+#ifdef CONFIG_USER_ONLY
+ /* we should not be in SMM mode */
+ g_assert_not_reached();
+#else
+ gen_update_cc_op(s);
+ gen_jmp_im(s, s->pc - s->cs_base);
+ gen_helper_rsm(cpu_env);
+#endif /* CONFIG_USER_ONLY */
+ gen_eob(s);
+ break;
+ case 0x1b8: /* SSE4.2 popcnt */
+ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
+ PREFIX_REPZ)
+ goto illegal_op;
+ if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
+ goto illegal_op;
+
+ modrm = x86_ldub_code(env, s);
+ reg = ((modrm >> 3) & 7) | REX_R(s);
+
+ if (s->prefix & PREFIX_DATA) {
+ ot = MO_16;
+ } else {
+ ot = mo_64_32(dflag);
+ }
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_extu(ot, s->T0);
+ tcg_gen_mov_tl(cpu_cc_src, s->T0);
+ tcg_gen_ctpop_tl(s->T0, s->T0);
+ gen_op_mov_reg_v(s, ot, reg, s->T0);
+
+ set_cc_op(s, CC_OP_POPCNT);
+ break;
+ case 0x10e ... 0x10f:
+ /* 3DNow! instructions, ignore prefixes */
+ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
+ /* fall through */
+ case 0x110 ... 0x117:
+ case 0x128 ... 0x12f:
+ case 0x138 ... 0x13a:
+ case 0x150 ... 0x179:
+ case 0x17c ... 0x17f:
+ case 0x1c2:
+ case 0x1c4 ... 0x1c6:
+ case 0x1d0 ... 0x1fe:
+ gen_sse(env, s, b, pc_start);
+ break;
+ default:
+ goto unknown_op;
+ }
+ return s->pc;
+ illegal_op:
+ gen_illegal_opcode(s);
+ return s->pc;
+ unknown_op:
+ gen_unknown_opcode(env, s);
+ return s->pc;
+}
+
+void tcg_x86_init(void)
+{
+ static const char reg_names[CPU_NB_REGS][4] = {
+#ifdef TARGET_X86_64
+ [R_EAX] = "rax",
+ [R_EBX] = "rbx",
+ [R_ECX] = "rcx",
+ [R_EDX] = "rdx",
+ [R_ESI] = "rsi",
+ [R_EDI] = "rdi",
+ [R_EBP] = "rbp",
+ [R_ESP] = "rsp",
+ [8] = "r8",
+ [9] = "r9",
+ [10] = "r10",
+ [11] = "r11",
+ [12] = "r12",
+ [13] = "r13",
+ [14] = "r14",
+ [15] = "r15",
+#else
+ [R_EAX] = "eax",
+ [R_EBX] = "ebx",
+ [R_ECX] = "ecx",
+ [R_EDX] = "edx",
+ [R_ESI] = "esi",
+ [R_EDI] = "edi",
+ [R_EBP] = "ebp",
+ [R_ESP] = "esp",
+#endif
+ };
+ static const char seg_base_names[6][8] = {
+ [R_CS] = "cs_base",
+ [R_DS] = "ds_base",
+ [R_ES] = "es_base",
+ [R_FS] = "fs_base",
+ [R_GS] = "gs_base",
+ [R_SS] = "ss_base",
+ };
+ static const char bnd_regl_names[4][8] = {
+ "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
+ };
+ static const char bnd_regu_names[4][8] = {
+ "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
+ };
+ int i;
+
+ cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUX86State, cc_op), "cc_op");
+ cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
+ "cc_dst");
+ cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
+ "cc_src");
+ cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
+ "cc_src2");
+
+ for (i = 0; i < CPU_NB_REGS; ++i) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUX86State, regs[i]),
+ reg_names[i]);
+ }
+
+ for (i = 0; i < 6; ++i) {
+ cpu_seg_base[i]
+ = tcg_global_mem_new(cpu_env,
+ offsetof(CPUX86State, segs[i].base),
+ seg_base_names[i]);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ cpu_bndl[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].lb),
+ bnd_regl_names[i]);
+ cpu_bndu[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].ub),
+ bnd_regu_names[i]);
+ }
+}
+
+static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUX86State *env = cpu->env_ptr;
+ uint32_t flags = dc->base.tb->flags;
+ uint32_t cflags = tb_cflags(dc->base.tb);
+ int cpl = (flags >> HF_CPL_SHIFT) & 3;
+ int iopl = (flags >> IOPL_SHIFT) & 3;
+
+ dc->cs_base = dc->base.tb->cs_base;
+ dc->flags = flags;
+#ifndef CONFIG_USER_ONLY
+ dc->cpl = cpl;
+ dc->iopl = iopl;
+#endif
+
+ /* We make some simplifying assumptions; validate they're correct. */
+ g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
+ g_assert(CPL(dc) == cpl);
+ g_assert(IOPL(dc) == iopl);
+ g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
+ g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
+ g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
+ g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
+ g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
+ g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
+ g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
+ g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
+
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->cc_op_dirty = false;
+ dc->popl_esp_hack = 0;
+ /* select memory access functions */
+ dc->mem_index = 0;
+#ifdef CONFIG_SOFTMMU
+ dc->mem_index = cpu_mmu_index(env, false);
+#endif
+ dc->cpuid_features = env->features[FEAT_1_EDX];
+ dc->cpuid_ext_features = env->features[FEAT_1_ECX];
+ dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
+ dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
+ dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
+ dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
+ dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
+ (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
+ /*
+ * If jmp_opt, we want to handle each string instruction individually.
+ * For icount also disable repz optimization so that each iteration
+ * is accounted separately.
+ */
+ dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
+
+ dc->T0 = tcg_temp_new();
+ dc->T1 = tcg_temp_new();
+ dc->A0 = tcg_temp_new();
+
+ dc->tmp0 = tcg_temp_new();
+ dc->tmp1_i64 = tcg_temp_new_i64();
+ dc->tmp2_i32 = tcg_temp_new_i32();
+ dc->tmp3_i32 = tcg_temp_new_i32();
+ dc->tmp4 = tcg_temp_new();
+ dc->ptr0 = tcg_temp_new_ptr();
+ dc->ptr1 = tcg_temp_new_ptr();
+ dc->cc_srcT = tcg_temp_local_new();
+}
+
+static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
+}
+
+static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ target_ulong pc_next;
+
+#ifdef TARGET_VSYSCALL_PAGE
+ /*
+ * Detect entry into the vsyscall page and invoke the syscall.
+ */
+ if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
+ gen_exception(dc, EXCP_VSYSCALL, dc->base.pc_next);
+ dc->base.pc_next = dc->pc + 1;
+ return;
+ }
+#endif
+
+ pc_next = disas_insn(dc, cpu);
+
+ if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
+ /* if single step mode, we generate only one instruction and
+ generate an exception */
+ /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
+ the flag and abort the translation to give the irqs a
+ chance to happen */
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
+ && ((pc_next & TARGET_PAGE_MASK)
+ != ((pc_next + TARGET_MAX_INSN_SIZE - 1)
+ & TARGET_PAGE_MASK)
+ || (pc_next & ~TARGET_PAGE_MASK) == 0)) {
+ /* Do not cross the boundary of the pages in icount mode,
+ it can cause an exception. Do it only when boundary is
+ crossed by the first instruction in the block.
+ If current instruction already crossed the bound - it's ok,
+ because an exception hasn't stopped this code.
+ */
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+
+ dc->base.pc_next = pc_next;
+}
+
+static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (dc->base.is_jmp == DISAS_TOO_MANY) {
+ gen_jmp_im(dc, dc->base.pc_next - dc->cs_base);
+ gen_eob(dc);
+ }
+}
+
+static void i386_tr_disas_log(const DisasContextBase *dcbase,
+ CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
+ log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
+}
+
+static const TranslatorOps i386_tr_ops = {
+ .init_disas_context = i386_tr_init_disas_context,
+ .tb_start = i386_tr_tb_start,
+ .insn_start = i386_tr_insn_start,
+ .translate_insn = i386_tr_translate_insn,
+ .tb_stop = i386_tr_tb_stop,
+ .disas_log = i386_tr_disas_log,
+};
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+
+ translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ int cc_op = data[1];
+ env->eip = data[0] - tb->cs_base;
+ if (cc_op != CC_OP_DYNAMIC) {
+ env->cc_op = cc_op;
+ }
+}
diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c
new file mode 100644
index 000000000..cd507e2a1
--- /dev/null
+++ b/target/i386/tcg/user/excp_helper.c
@@ -0,0 +1,50 @@
+/*
+ * x86 exception helpers - user-mode specific code
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/helper-tcg.h"
+
+void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /*
+ * The error_code that hw reports as part of the exception frame
+ * is copied to linux sigcontext.err. The exception_index is
+ * copied to linux sigcontext.trapno. Short of inventing a new
+ * place to store the trapno, we cannot let our caller raise the
+ * signal and set exception_index to EXCP_INTERRUPT.
+ */
+ env->cr[2] = addr;
+ env->error_code = ((access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT)
+ | (maperr ? 0 : PG_ERROR_P_MASK)
+ | PG_ERROR_U_MASK;
+ cs->exception_index = EXCP0E_PAGE;
+
+ /* Disable do_interrupt_user. */
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+
+ cpu_loop_exit_restore(cs, ra);
+}
diff --git a/target/i386/tcg/user/meson.build b/target/i386/tcg/user/meson.build
new file mode 100644
index 000000000..1df6bc434
--- /dev/null
+++ b/target/i386/tcg/user/meson.build
@@ -0,0 +1,4 @@
+i386_user_ss.add(when: ['CONFIG_TCG', 'CONFIG_USER_ONLY'], if_true: files(
+ 'excp_helper.c',
+ 'seg_helper.c',
+))
diff --git a/target/i386/tcg/user/seg_helper.c b/target/i386/tcg/user/seg_helper.c
new file mode 100644
index 000000000..67481b0aa
--- /dev/null
+++ b/target/i386/tcg/user/seg_helper.c
@@ -0,0 +1,109 @@
+/*
+ * x86 segmentation related helpers (user-mode code):
+ * TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "tcg/helper-tcg.h"
+#include "tcg/seg_helper.h"
+
+#ifdef TARGET_X86_64
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_SYSCALL;
+ env->exception_is_int = 0;
+ env->exception_next_eip = env->eip + next_eip_addend;
+ cpu_loop_exit(cs);
+}
+#endif /* TARGET_X86_64 */
+
+/*
+ * fake user mode interrupt. is_int is TRUE if coming from the int
+ * instruction. next_eip is the env->eip value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE or if intno
+ * is EXCP_SYSCALL.
+ */
+static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip)
+{
+ if (is_int) {
+ SegmentCache *dt;
+ target_ulong ptr;
+ int dpl, cpl, shift;
+ uint32_t e2;
+
+ dt = &env->idt;
+ if (env->hflags & HF_LMA_MASK) {
+ shift = 4;
+ } else {
+ shift = 3;
+ }
+ ptr = dt->base + (intno << shift);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
+
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
+ }
+ }
+
+ /* Since we emulate only user space, we cannot do more than
+ exiting the emulation with the suitable exception and error
+ code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
+ if (is_int || intno == EXCP_SYSCALL) {
+ env->eip = next_eip;
+ }
+}
+
+void x86_cpu_do_interrupt(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /* if user mode only, we simulate a fake exception
+ which will be handled outside the cpu execution
+ loop */
+ do_interrupt_user(env, cs->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip);
+ /* successfully delivered */
+ env->old_exception = -1;
+}
+
+void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector)
+{
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
+ selector &= 0xffff;
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
+ } else {
+ helper_load_seg(env, seg_reg, selector);
+ }
+}
diff --git a/target/i386/trace-events b/target/i386/trace-events
new file mode 100644
index 000000000..2cd8726ee
--- /dev/null
+++ b/target/i386/trace-events
@@ -0,0 +1,13 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# sev.c
+kvm_sev_init(void) ""
+kvm_memcrypt_register_region(void *addr, size_t len) "addr %p len 0x%zx"
+kvm_memcrypt_unregister_region(void *addr, size_t len) "addr %p len 0x%zx"
+kvm_sev_change_state(const char *old, const char *new) "%s -> %s"
+kvm_sev_launch_start(int policy, void *session, void *pdh) "policy 0x%x session %p pdh %p"
+kvm_sev_launch_update_data(void *addr, uint64_t len) "addr %p len 0x%" PRIx64
+kvm_sev_launch_measurement(const char *value) "data %s"
+kvm_sev_launch_finish(void) ""
+kvm_sev_launch_secret(uint64_t hpa, uint64_t hva, uint64_t secret, int len) "hpa 0x%" PRIx64 " hva 0x%" PRIx64 " data 0x%" PRIx64 " len %d"
+kvm_sev_attestation_report(const char *mnonce, const char *data) "mnonce %s data %s"
diff --git a/target/i386/trace.h b/target/i386/trace.h
new file mode 100644
index 000000000..781e8ec55
--- /dev/null
+++ b/target/i386/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_i386.h"
diff --git a/target/i386/whpx/meson.build b/target/i386/whpx/meson.build
new file mode 100644
index 000000000..95fc31eb8
--- /dev/null
+++ b/target/i386/whpx/meson.build
@@ -0,0 +1,5 @@
+i386_softmmu_ss.add(when: 'CONFIG_WHPX', if_true: files(
+ 'whpx-all.c',
+ 'whpx-apic.c',
+ 'whpx-accel-ops.c',
+))
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
new file mode 100644
index 000000000..6bc47c530
--- /dev/null
+++ b/target/i386/whpx/whpx-accel-ops.c
@@ -0,0 +1,111 @@
+/*
+ * QEMU Windows Hypervisor Platform accelerator (WHPX)
+ *
+ * Copyright Microsoft Corp. 2017
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/kvm_int.h"
+#include "qemu/main-loop.h"
+#include "sysemu/cpus.h"
+#include "qemu/guest-random.h"
+
+#include "sysemu/whpx.h"
+#include "whpx-internal.h"
+#include "whpx-accel-ops.h"
+
+static void *whpx_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+ int r;
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+ cpu->thread_id = qemu_get_thread_id();
+ current_cpu = cpu;
+
+ r = whpx_init_vcpu(cpu);
+ if (r < 0) {
+ fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
+ exit(1);
+ }
+
+ /* signal CPU creation */
+ cpu_thread_signal_created(cpu);
+ qemu_guest_random_seed_thread_part2(cpu->random_seed);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = whpx_vcpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+ while (cpu_thread_is_idle(cpu)) {
+ qemu_cond_wait_iothread(cpu->halt_cond);
+ }
+ qemu_wait_io_event_common(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+
+ whpx_destroy_vcpu(cpu);
+ cpu_thread_signal_destroyed(cpu);
+ qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void whpx_start_vcpu_thread(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, whpx_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+#ifdef _WIN32
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
+#endif
+}
+
+static void whpx_kick_vcpu_thread(CPUState *cpu)
+{
+ if (!qemu_cpu_is_self(cpu)) {
+ whpx_vcpu_kick(cpu);
+ }
+}
+
+static void whpx_accel_ops_class_init(ObjectClass *oc, void *data)
+{
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
+
+ ops->create_vcpu_thread = whpx_start_vcpu_thread;
+ ops->kick_vcpu_thread = whpx_kick_vcpu_thread;
+
+ ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset;
+ ops->synchronize_post_init = whpx_cpu_synchronize_post_init;
+ ops->synchronize_state = whpx_cpu_synchronize_state;
+ ops->synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm;
+}
+
+static const TypeInfo whpx_accel_ops_type = {
+ .name = ACCEL_OPS_NAME("whpx"),
+
+ .parent = TYPE_ACCEL_OPS,
+ .class_init = whpx_accel_ops_class_init,
+ .abstract = true,
+};
+
+static void whpx_accel_ops_register_types(void)
+{
+ type_register_static(&whpx_accel_ops_type);
+}
+type_init(whpx_accel_ops_register_types);
diff --git a/target/i386/whpx/whpx-accel-ops.h b/target/i386/whpx/whpx-accel-ops.h
new file mode 100644
index 000000000..2dee6d61e
--- /dev/null
+++ b/target/i386/whpx/whpx-accel-ops.h
@@ -0,0 +1,32 @@
+/*
+ * Accelerator CPUS Interface
+ *
+ * Copyright 2020 SUSE LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef WHPX_CPUS_H
+#define WHPX_CPUS_H
+
+#include "sysemu/cpus.h"
+
+int whpx_init_vcpu(CPUState *cpu);
+int whpx_vcpu_exec(CPUState *cpu);
+void whpx_destroy_vcpu(CPUState *cpu);
+void whpx_vcpu_kick(CPUState *cpu);
+
+void whpx_cpu_synchronize_state(CPUState *cpu);
+void whpx_cpu_synchronize_post_reset(CPUState *cpu);
+void whpx_cpu_synchronize_post_init(CPUState *cpu);
+void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu);
+
+/* state subset only touched by the VCPU itself during runtime */
+#define WHPX_SET_RUNTIME_STATE 1
+/* state subset modified during VCPU reset */
+#define WHPX_SET_RESET_STATE 2
+/* full state set, modified during initialization or on vmload */
+#define WHPX_SET_FULL_STATE 3
+
+#endif /* WHPX_CPUS_H */
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
new file mode 100644
index 000000000..ef896da0a
--- /dev/null
+++ b/target/i386/whpx/whpx-all.c
@@ -0,0 +1,1939 @@
+/*
+ * QEMU Windows Hypervisor Platform accelerator (WHPX)
+ *
+ * Copyright Microsoft Corp. 2017
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "exec/ioport.h"
+#include "qemu-common.h"
+#include "qemu/accel.h"
+#include "sysemu/whpx.h"
+#include "sysemu/cpus.h"
+#include "sysemu/runstate.h"
+#include "qemu/main-loop.h"
+#include "hw/boards.h"
+#include "hw/i386/ioapic.h"
+#include "hw/i386/apic_internal.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qapi/qapi-types-common.h"
+#include "qapi/qapi-visit-common.h"
+#include "migration/blocker.h"
+#include <winerror.h>
+
+#include "whpx-internal.h"
+#include "whpx-accel-ops.h"
+
+#include <WinHvPlatform.h>
+#include <WinHvEmulation.h>
+
+#define HYPERV_APIC_BUS_FREQUENCY (200000000ULL)
+
+static const WHV_REGISTER_NAME whpx_register_names[] = {
+
+ /* X64 General purpose registers */
+ WHvX64RegisterRax,
+ WHvX64RegisterRcx,
+ WHvX64RegisterRdx,
+ WHvX64RegisterRbx,
+ WHvX64RegisterRsp,
+ WHvX64RegisterRbp,
+ WHvX64RegisterRsi,
+ WHvX64RegisterRdi,
+ WHvX64RegisterR8,
+ WHvX64RegisterR9,
+ WHvX64RegisterR10,
+ WHvX64RegisterR11,
+ WHvX64RegisterR12,
+ WHvX64RegisterR13,
+ WHvX64RegisterR14,
+ WHvX64RegisterR15,
+ WHvX64RegisterRip,
+ WHvX64RegisterRflags,
+
+ /* X64 Segment registers */
+ WHvX64RegisterEs,
+ WHvX64RegisterCs,
+ WHvX64RegisterSs,
+ WHvX64RegisterDs,
+ WHvX64RegisterFs,
+ WHvX64RegisterGs,
+ WHvX64RegisterLdtr,
+ WHvX64RegisterTr,
+
+ /* X64 Table registers */
+ WHvX64RegisterIdtr,
+ WHvX64RegisterGdtr,
+
+ /* X64 Control Registers */
+ WHvX64RegisterCr0,
+ WHvX64RegisterCr2,
+ WHvX64RegisterCr3,
+ WHvX64RegisterCr4,
+ WHvX64RegisterCr8,
+
+ /* X64 Debug Registers */
+ /*
+ * WHvX64RegisterDr0,
+ * WHvX64RegisterDr1,
+ * WHvX64RegisterDr2,
+ * WHvX64RegisterDr3,
+ * WHvX64RegisterDr6,
+ * WHvX64RegisterDr7,
+ */
+
+ /* X64 Floating Point and Vector Registers */
+ WHvX64RegisterXmm0,
+ WHvX64RegisterXmm1,
+ WHvX64RegisterXmm2,
+ WHvX64RegisterXmm3,
+ WHvX64RegisterXmm4,
+ WHvX64RegisterXmm5,
+ WHvX64RegisterXmm6,
+ WHvX64RegisterXmm7,
+ WHvX64RegisterXmm8,
+ WHvX64RegisterXmm9,
+ WHvX64RegisterXmm10,
+ WHvX64RegisterXmm11,
+ WHvX64RegisterXmm12,
+ WHvX64RegisterXmm13,
+ WHvX64RegisterXmm14,
+ WHvX64RegisterXmm15,
+ WHvX64RegisterFpMmx0,
+ WHvX64RegisterFpMmx1,
+ WHvX64RegisterFpMmx2,
+ WHvX64RegisterFpMmx3,
+ WHvX64RegisterFpMmx4,
+ WHvX64RegisterFpMmx5,
+ WHvX64RegisterFpMmx6,
+ WHvX64RegisterFpMmx7,
+ WHvX64RegisterFpControlStatus,
+ WHvX64RegisterXmmControlStatus,
+
+ /* X64 MSRs */
+ WHvX64RegisterEfer,
+#ifdef TARGET_X86_64
+ WHvX64RegisterKernelGsBase,
+#endif
+ WHvX64RegisterApicBase,
+ /* WHvX64RegisterPat, */
+ WHvX64RegisterSysenterCs,
+ WHvX64RegisterSysenterEip,
+ WHvX64RegisterSysenterEsp,
+ WHvX64RegisterStar,
+#ifdef TARGET_X86_64
+ WHvX64RegisterLstar,
+ WHvX64RegisterCstar,
+ WHvX64RegisterSfmask,
+#endif
+
+ /* Interrupt / Event Registers */
+ /*
+ * WHvRegisterPendingInterruption,
+ * WHvRegisterInterruptState,
+ * WHvRegisterPendingEvent0,
+ * WHvRegisterPendingEvent1
+ * WHvX64RegisterDeliverabilityNotifications,
+ */
+};
+
+struct whpx_register_set {
+ WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
+};
+
+struct whpx_vcpu {
+ WHV_EMULATOR_HANDLE emulator;
+ bool window_registered;
+ bool interruptable;
+ bool ready_for_pic_interrupt;
+ uint64_t tpr;
+ uint64_t apic_base;
+ bool interruption_pending;
+
+ /* Must be the last field as it may have a tail */
+ WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
+};
+
+static bool whpx_allowed;
+static bool whp_dispatch_initialized;
+static HMODULE hWinHvPlatform, hWinHvEmulation;
+static uint32_t max_vcpu_index;
+struct whpx_state whpx_global;
+struct WHPDispatch whp_dispatch;
+
+
+/*
+ * VP support
+ */
+
+static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
+{
+ return (struct whpx_vcpu *)cpu->hax_vcpu;
+}
+
+static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
+ int r86)
+{
+ WHV_X64_SEGMENT_REGISTER hs;
+ unsigned flags = qs->flags;
+
+ hs.Base = qs->base;
+ hs.Limit = qs->limit;
+ hs.Selector = qs->selector;
+
+ if (v86) {
+ hs.Attributes = 0;
+ hs.SegmentType = 3;
+ hs.Present = 1;
+ hs.DescriptorPrivilegeLevel = 3;
+ hs.NonSystemSegment = 1;
+
+ } else {
+ hs.Attributes = (flags >> DESC_TYPE_SHIFT);
+
+ if (r86) {
+ /* hs.Base &= 0xfffff; */
+ }
+ }
+
+ return hs;
+}
+
+static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
+{
+ SegmentCache qs;
+
+ qs.base = hs->Base;
+ qs.limit = hs->Limit;
+ qs.selector = hs->Selector;
+
+ qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
+
+ return qs;
+}
+
+static int whpx_set_tsc(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
+ WHV_REGISTER_VALUE tsc_val;
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+
+ /*
+ * Suspend the partition prior to setting the TSC to reduce the variance
+ * in TSC across vCPUs. When the first vCPU runs post suspend, the
+ * partition is automatically resumed.
+ */
+ if (whp_dispatch.WHvSuspendPartitionTime) {
+
+ /*
+ * Unable to suspend partition while setting TSC is not a fatal
+ * error. It just increases the likelihood of TSC variance between
+ * vCPUs and some guest OS are able to handle that just fine.
+ */
+ hr = whp_dispatch.WHvSuspendPartitionTime(whpx->partition);
+ if (FAILED(hr)) {
+ warn_report("WHPX: Failed to suspend partition, hr=%08lx", hr);
+ }
+ }
+
+ tsc_val.Reg64 = env->tsc;
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set TSC, hr=%08lx", hr);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void whpx_set_registers(CPUState *cpu, int level)
+{
+ struct whpx_state *whpx = &whpx_global;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct whpx_register_set vcxt;
+ HRESULT hr;
+ int idx;
+ int idx_next;
+ int i;
+ int v86, r86;
+
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+ /*
+ * Following MSRs have side effects on the guest or are too heavy for
+ * runtime. Limit them to full state update.
+ */
+ if (level >= WHPX_SET_RESET_STATE) {
+ whpx_set_tsc(cpu);
+ }
+
+ memset(&vcxt, 0, sizeof(struct whpx_register_set));
+
+ v86 = (env->eflags & VM_MASK);
+ r86 = !(env->cr[0] & CR0_PE_MASK);
+
+ vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
+ vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
+
+ idx = 0;
+
+ /* Indexes for first 16 registers match between HV and QEMU definitions */
+ idx_next = 16;
+ for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
+ vcxt.values[idx].Reg64 = (uint64_t)env->regs[idx];
+ }
+ idx = idx_next;
+
+ /* Same goes for RIP and RFLAGS */
+ assert(whpx_register_names[idx] == WHvX64RegisterRip);
+ vcxt.values[idx++].Reg64 = env->eip;
+
+ assert(whpx_register_names[idx] == WHvX64RegisterRflags);
+ vcxt.values[idx++].Reg64 = env->eflags;
+
+ /* Translate 6+4 segment registers. HV and QEMU order matches */
+ assert(idx == WHvX64RegisterEs);
+ for (i = 0; i < 6; i += 1, idx += 1) {
+ vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
+ }
+
+ assert(idx == WHvX64RegisterLdtr);
+ vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
+
+ assert(idx == WHvX64RegisterTr);
+ vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
+
+ assert(idx == WHvX64RegisterIdtr);
+ vcxt.values[idx].Table.Base = env->idt.base;
+ vcxt.values[idx].Table.Limit = env->idt.limit;
+ idx += 1;
+
+ assert(idx == WHvX64RegisterGdtr);
+ vcxt.values[idx].Table.Base = env->gdt.base;
+ vcxt.values[idx].Table.Limit = env->gdt.limit;
+ idx += 1;
+
+ /* CR0, 2, 3, 4, 8 */
+ assert(whpx_register_names[idx] == WHvX64RegisterCr0);
+ vcxt.values[idx++].Reg64 = env->cr[0];
+ assert(whpx_register_names[idx] == WHvX64RegisterCr2);
+ vcxt.values[idx++].Reg64 = env->cr[2];
+ assert(whpx_register_names[idx] == WHvX64RegisterCr3);
+ vcxt.values[idx++].Reg64 = env->cr[3];
+ assert(whpx_register_names[idx] == WHvX64RegisterCr4);
+ vcxt.values[idx++].Reg64 = env->cr[4];
+ assert(whpx_register_names[idx] == WHvX64RegisterCr8);
+ vcxt.values[idx++].Reg64 = vcpu->tpr;
+
+ /* 8 Debug Registers - Skipped */
+
+ /* 16 XMM registers */
+ assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
+ idx_next = idx + 16;
+ for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
+ vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
+ vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
+ }
+ idx = idx_next;
+
+ /* 8 FP registers */
+ assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
+ for (i = 0; i < 8; i += 1, idx += 1) {
+ vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
+ /* vcxt.values[idx].Fp.AsUINT128.High64 =
+ env->fpregs[i].mmx.MMX_Q(1);
+ */
+ }
+
+ /* FP control status register */
+ assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
+ vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
+ vcxt.values[idx].FpControlStatus.FpStatus =
+ (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ vcxt.values[idx].FpControlStatus.FpTag = 0;
+ for (i = 0; i < 8; ++i) {
+ vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
+ }
+ vcxt.values[idx].FpControlStatus.Reserved = 0;
+ vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
+ vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
+ idx += 1;
+
+ /* XMM control status register */
+ assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
+ vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
+ vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
+ vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
+ idx += 1;
+
+ /* MSRs */
+ assert(whpx_register_names[idx] == WHvX64RegisterEfer);
+ vcxt.values[idx++].Reg64 = env->efer;
+#ifdef TARGET_X86_64
+ assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
+ vcxt.values[idx++].Reg64 = env->kernelgsbase;
+#endif
+
+ assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
+ vcxt.values[idx++].Reg64 = vcpu->apic_base;
+
+ /* WHvX64RegisterPat - Skipped */
+
+ assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
+ vcxt.values[idx++].Reg64 = env->sysenter_cs;
+ assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
+ vcxt.values[idx++].Reg64 = env->sysenter_eip;
+ assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
+ vcxt.values[idx++].Reg64 = env->sysenter_esp;
+ assert(whpx_register_names[idx] == WHvX64RegisterStar);
+ vcxt.values[idx++].Reg64 = env->star;
+#ifdef TARGET_X86_64
+ assert(whpx_register_names[idx] == WHvX64RegisterLstar);
+ vcxt.values[idx++].Reg64 = env->lstar;
+ assert(whpx_register_names[idx] == WHvX64RegisterCstar);
+ vcxt.values[idx++].Reg64 = env->cstar;
+ assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
+ vcxt.values[idx++].Reg64 = env->fmask;
+#endif
+
+ /* Interrupt / Event Registers - Skipped */
+
+ assert(idx == RTL_NUMBER_OF(whpx_register_names));
+
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index,
+ whpx_register_names,
+ RTL_NUMBER_OF(whpx_register_names),
+ &vcxt.values[0]);
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
+ hr);
+ }
+
+ return;
+}
+
+static int whpx_get_tsc(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
+ WHV_REGISTER_VALUE tsc_val;
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+
+ hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to get TSC, hr=%08lx", hr);
+ return -1;
+ }
+
+ env->tsc = tsc_val.Reg64;
+ return 0;
+}
+
+static void whpx_get_registers(CPUState *cpu)
+{
+ struct whpx_state *whpx = &whpx_global;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct whpx_register_set vcxt;
+ uint64_t tpr, apic_base;
+ HRESULT hr;
+ int idx;
+ int idx_next;
+ int i;
+
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+ if (!env->tsc_valid) {
+ whpx_get_tsc(cpu);
+ env->tsc_valid = !runstate_is_running();
+ }
+
+ hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index,
+ whpx_register_names,
+ RTL_NUMBER_OF(whpx_register_names),
+ &vcxt.values[0]);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
+ hr);
+ }
+
+ idx = 0;
+
+ /* Indexes for first 16 registers match between HV and QEMU definitions */
+ idx_next = 16;
+ for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
+ env->regs[idx] = vcxt.values[idx].Reg64;
+ }
+ idx = idx_next;
+
+ /* Same goes for RIP and RFLAGS */
+ assert(whpx_register_names[idx] == WHvX64RegisterRip);
+ env->eip = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterRflags);
+ env->eflags = vcxt.values[idx++].Reg64;
+
+ /* Translate 6+4 segment registers. HV and QEMU order matches */
+ assert(idx == WHvX64RegisterEs);
+ for (i = 0; i < 6; i += 1, idx += 1) {
+ env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
+ }
+
+ assert(idx == WHvX64RegisterLdtr);
+ env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
+ assert(idx == WHvX64RegisterTr);
+ env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
+ assert(idx == WHvX64RegisterIdtr);
+ env->idt.base = vcxt.values[idx].Table.Base;
+ env->idt.limit = vcxt.values[idx].Table.Limit;
+ idx += 1;
+ assert(idx == WHvX64RegisterGdtr);
+ env->gdt.base = vcxt.values[idx].Table.Base;
+ env->gdt.limit = vcxt.values[idx].Table.Limit;
+ idx += 1;
+
+ /* CR0, 2, 3, 4, 8 */
+ assert(whpx_register_names[idx] == WHvX64RegisterCr0);
+ env->cr[0] = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterCr2);
+ env->cr[2] = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterCr3);
+ env->cr[3] = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterCr4);
+ env->cr[4] = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterCr8);
+ tpr = vcxt.values[idx++].Reg64;
+ if (tpr != vcpu->tpr) {
+ vcpu->tpr = tpr;
+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+ }
+
+ /* 8 Debug Registers - Skipped */
+
+ /* 16 XMM registers */
+ assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
+ idx_next = idx + 16;
+ for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
+ env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
+ env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
+ }
+ idx = idx_next;
+
+ /* 8 FP registers */
+ assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
+ for (i = 0; i < 8; i += 1, idx += 1) {
+ env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
+ /* env->fpregs[i].mmx.MMX_Q(1) =
+ vcxt.values[idx].Fp.AsUINT128.High64;
+ */
+ }
+
+ /* FP control status register */
+ assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
+ env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
+ env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
+ env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
+ }
+ env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
+ env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
+ idx += 1;
+
+ /* XMM control status register */
+ assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
+ env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
+ idx += 1;
+
+ /* MSRs */
+ assert(whpx_register_names[idx] == WHvX64RegisterEfer);
+ env->efer = vcxt.values[idx++].Reg64;
+#ifdef TARGET_X86_64
+ assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
+ env->kernelgsbase = vcxt.values[idx++].Reg64;
+#endif
+
+ assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
+ apic_base = vcxt.values[idx++].Reg64;
+ if (apic_base != vcpu->apic_base) {
+ vcpu->apic_base = apic_base;
+ cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
+ }
+
+ /* WHvX64RegisterPat - Skipped */
+
+ assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
+ env->sysenter_cs = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
+ env->sysenter_eip = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
+ env->sysenter_esp = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterStar);
+ env->star = vcxt.values[idx++].Reg64;
+#ifdef TARGET_X86_64
+ assert(whpx_register_names[idx] == WHvX64RegisterLstar);
+ env->lstar = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterCstar);
+ env->cstar = vcxt.values[idx++].Reg64;
+ assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
+ env->fmask = vcxt.values[idx++].Reg64;
+#endif
+
+ /* Interrupt / Event Registers - Skipped */
+
+ assert(idx == RTL_NUMBER_OF(whpx_register_names));
+
+ if (whpx_apic_in_platform()) {
+ whpx_apic_get(x86_cpu->apic_state);
+ }
+
+ return;
+}
+
+static HRESULT CALLBACK whpx_emu_ioport_callback(
+ void *ctx,
+ WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
+{
+ MemTxAttrs attrs = { 0 };
+ address_space_rw(&address_space_io, IoAccess->Port, attrs,
+ &IoAccess->Data, IoAccess->AccessSize,
+ IoAccess->Direction);
+ return S_OK;
+}
+
+static HRESULT CALLBACK whpx_emu_mmio_callback(
+ void *ctx,
+ WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
+{
+ cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
+ ma->Direction);
+ return S_OK;
+}
+
+static HRESULT CALLBACK whpx_emu_getreg_callback(
+ void *ctx,
+ const WHV_REGISTER_NAME *RegisterNames,
+ UINT32 RegisterCount,
+ WHV_REGISTER_VALUE *RegisterValues)
+{
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+ CPUState *cpu = (CPUState *)ctx;
+
+ hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index,
+ RegisterNames, RegisterCount,
+ RegisterValues);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to get virtual processor registers,"
+ " hr=%08lx", hr);
+ }
+
+ return hr;
+}
+
+static HRESULT CALLBACK whpx_emu_setreg_callback(
+ void *ctx,
+ const WHV_REGISTER_NAME *RegisterNames,
+ UINT32 RegisterCount,
+ const WHV_REGISTER_VALUE *RegisterValues)
+{
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+ CPUState *cpu = (CPUState *)ctx;
+
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index,
+ RegisterNames, RegisterCount,
+ RegisterValues);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set virtual processor registers,"
+ " hr=%08lx", hr);
+ }
+
+ /*
+ * The emulator just successfully wrote the register state. We clear the
+ * dirty state so we avoid the double write on resume of the VP.
+ */
+ cpu->vcpu_dirty = false;
+
+ return hr;
+}
+
+static HRESULT CALLBACK whpx_emu_translate_callback(
+ void *ctx,
+ WHV_GUEST_VIRTUAL_ADDRESS Gva,
+ WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
+ WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
+ WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
+{
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+ CPUState *cpu = (CPUState *)ctx;
+ WHV_TRANSLATE_GVA_RESULT res;
+
+ hr = whp_dispatch.WHvTranslateGva(whpx->partition, cpu->cpu_index,
+ Gva, TranslateFlags, &res, Gpa);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
+ } else {
+ *TranslationResult = res.ResultCode;
+ }
+
+ return hr;
+}
+
+static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
+ .Size = sizeof(WHV_EMULATOR_CALLBACKS),
+ .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
+ .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
+ .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
+ .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
+ .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
+};
+
+static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
+{
+ HRESULT hr;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ WHV_EMULATOR_STATUS emu_status;
+
+ hr = whp_dispatch.WHvEmulatorTryMmioEmulation(
+ vcpu->emulator, cpu,
+ &vcpu->exit_ctx.VpContext, ctx,
+ &emu_status);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
+ return -1;
+ }
+
+ if (!emu_status.EmulationSuccessful) {
+ error_report("WHPX: Failed to emulate MMIO access with"
+ " EmulatorReturnStatus: %u", emu_status.AsUINT32);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int whpx_handle_portio(CPUState *cpu,
+ WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
+{
+ HRESULT hr;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ WHV_EMULATOR_STATUS emu_status;
+
+ hr = whp_dispatch.WHvEmulatorTryIoEmulation(
+ vcpu->emulator, cpu,
+ &vcpu->exit_ctx.VpContext, ctx,
+ &emu_status);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
+ return -1;
+ }
+
+ if (!emu_status.EmulationSuccessful) {
+ error_report("WHPX: Failed to emulate PortIO access with"
+ " EmulatorReturnStatus: %u", emu_status.AsUINT32);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int whpx_handle_halt(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ int ret = 0;
+
+ qemu_mutex_lock_iothread();
+ if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) &&
+ !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu->exception_index = EXCP_HLT;
+ cpu->halted = true;
+ ret = 1;
+ }
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
+static void whpx_vcpu_pre_run(CPUState *cpu)
+{
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int irq;
+ uint8_t tpr;
+ WHV_X64_PENDING_INTERRUPTION_REGISTER new_int;
+ UINT32 reg_count = 0;
+ WHV_REGISTER_VALUE reg_values[3];
+ WHV_REGISTER_NAME reg_names[3];
+
+ memset(&new_int, 0, sizeof(new_int));
+ memset(reg_values, 0, sizeof(reg_values));
+
+ qemu_mutex_lock_iothread();
+
+ /* Inject NMI */
+ if (!vcpu->interruption_pending &&
+ cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ vcpu->interruptable = false;
+ new_int.InterruptionType = WHvX64PendingNmi;
+ new_int.InterruptionPending = 1;
+ new_int.InterruptionVector = 2;
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ }
+ }
+
+ /*
+ * Force the VCPU out of its inner loop to process any INIT requests or
+ * commit pending TPR access.
+ */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ cpu->exit_request = 1;
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu->exit_request = 1;
+ }
+ }
+
+ /* Get pending hard interruption or replay one that was overwritten */
+ if (!whpx_apic_in_platform()) {
+ if (!vcpu->interruption_pending &&
+ vcpu->interruptable && (env->eflags & IF_MASK)) {
+ assert(!new_int.InterruptionPending);
+ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ new_int.InterruptionType = WHvX64PendingInterrupt;
+ new_int.InterruptionPending = 1;
+ new_int.InterruptionVector = irq;
+ }
+ }
+ }
+
+ /* Setup interrupt state if new one was prepared */
+ if (new_int.InterruptionPending) {
+ reg_values[reg_count].PendingInterruption = new_int;
+ reg_names[reg_count] = WHvRegisterPendingInterruption;
+ reg_count += 1;
+ }
+ } else if (vcpu->ready_for_pic_interrupt &&
+ (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ reg_names[reg_count] = WHvRegisterPendingEvent;
+ reg_values[reg_count].ExtIntEvent = (WHV_X64_PENDING_EXT_INT_EVENT)
+ {
+ .EventPending = 1,
+ .EventType = WHvX64PendingEventExtInt,
+ .Vector = irq,
+ };
+ reg_count += 1;
+ }
+ }
+
+ /* Sync the TPR to the CR8 if was modified during the intercept */
+ tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
+ if (tpr != vcpu->tpr) {
+ vcpu->tpr = tpr;
+ reg_values[reg_count].Reg64 = tpr;
+ cpu->exit_request = 1;
+ reg_names[reg_count] = WHvX64RegisterCr8;
+ reg_count += 1;
+ }
+
+ /* Update the state of the interrupt delivery notification */
+ if (!vcpu->window_registered &&
+ cpu->interrupt_request & CPU_INTERRUPT_HARD) {
+ reg_values[reg_count].DeliverabilityNotifications =
+ (WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER) {
+ .InterruptNotification = 1
+ };
+ vcpu->window_registered = 1;
+ reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
+ reg_count += 1;
+ }
+
+ qemu_mutex_unlock_iothread();
+ vcpu->ready_for_pic_interrupt = false;
+
+ if (reg_count) {
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index,
+ reg_names, reg_count, reg_values);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set interrupt state registers,"
+ " hr=%08lx", hr);
+ }
+ }
+
+ return;
+}
+
+static void whpx_vcpu_post_run(CPUState *cpu)
+{
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+
+ env->eflags = vcpu->exit_ctx.VpContext.Rflags;
+
+ uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
+ if (vcpu->tpr != tpr) {
+ vcpu->tpr = tpr;
+ qemu_mutex_lock_iothread();
+ cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
+ qemu_mutex_unlock_iothread();
+ }
+
+ vcpu->interruption_pending =
+ vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending;
+
+ vcpu->interruptable =
+ !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
+
+ return;
+}
+
+static void whpx_vcpu_process_async_events(CPUState *cpu)
+{
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+
+ if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ whpx_cpu_synchronize_state(cpu);
+ do_cpu_init(x86_cpu);
+ vcpu->interruptable = true;
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(x86_cpu->apic_state);
+ }
+
+ if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu->halted = false;
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ whpx_cpu_synchronize_state(cpu);
+ do_cpu_sipi(x86_cpu);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ whpx_cpu_synchronize_state(cpu);
+ apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
+ env->tpr_access_type);
+ }
+
+ return;
+}
+
+static int whpx_vcpu_run(CPUState *cpu)
+{
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+ int ret;
+
+ whpx_vcpu_process_async_events(cpu);
+ if (cpu->halted && !whpx_apic_in_platform()) {
+ cpu->exception_index = EXCP_HLT;
+ qatomic_set(&cpu->exit_request, false);
+ return 0;
+ }
+
+ qemu_mutex_unlock_iothread();
+ cpu_exec_start(cpu);
+
+ do {
+ if (cpu->vcpu_dirty) {
+ whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE);
+ cpu->vcpu_dirty = false;
+ }
+
+ whpx_vcpu_pre_run(cpu);
+
+ if (qatomic_read(&cpu->exit_request)) {
+ whpx_vcpu_kick(cpu);
+ }
+
+ hr = whp_dispatch.WHvRunVirtualProcessor(
+ whpx->partition, cpu->cpu_index,
+ &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to exec a virtual processor,"
+ " hr=%08lx", hr);
+ ret = -1;
+ break;
+ }
+
+ whpx_vcpu_post_run(cpu);
+
+ switch (vcpu->exit_ctx.ExitReason) {
+ case WHvRunVpExitReasonMemoryAccess:
+ ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
+ break;
+
+ case WHvRunVpExitReasonX64IoPortAccess:
+ ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
+ break;
+
+ case WHvRunVpExitReasonX64InterruptWindow:
+ vcpu->ready_for_pic_interrupt = 1;
+ vcpu->window_registered = 0;
+ ret = 0;
+ break;
+
+ case WHvRunVpExitReasonX64ApicEoi:
+ assert(whpx_apic_in_platform());
+ ioapic_eoi_broadcast(vcpu->exit_ctx.ApicEoi.InterruptVector);
+ break;
+
+ case WHvRunVpExitReasonX64Halt:
+ ret = whpx_handle_halt(cpu);
+ break;
+
+ case WHvRunVpExitReasonX64ApicInitSipiTrap: {
+ WHV_INTERRUPT_CONTROL ipi = {0};
+ uint64_t icr = vcpu->exit_ctx.ApicInitSipi.ApicIcr;
+ uint32_t delivery_mode =
+ (icr & APIC_ICR_DELIV_MOD) >> APIC_ICR_DELIV_MOD_SHIFT;
+ int dest_shorthand =
+ (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
+ bool broadcast = false;
+ bool include_self = false;
+ uint32_t i;
+
+ /* We only registered for INIT and SIPI exits. */
+ if ((delivery_mode != APIC_DM_INIT) &&
+ (delivery_mode != APIC_DM_SIPI)) {
+ error_report(
+ "WHPX: Unexpected APIC exit that is not a INIT or SIPI");
+ break;
+ }
+
+ if (delivery_mode == APIC_DM_INIT) {
+ ipi.Type = WHvX64InterruptTypeInit;
+ } else {
+ ipi.Type = WHvX64InterruptTypeSipi;
+ }
+
+ ipi.DestinationMode =
+ ((icr & APIC_ICR_DEST_MOD) >> APIC_ICR_DEST_MOD_SHIFT) ?
+ WHvX64InterruptDestinationModeLogical :
+ WHvX64InterruptDestinationModePhysical;
+
+ ipi.TriggerMode =
+ ((icr & APIC_ICR_TRIGGER_MOD) >> APIC_ICR_TRIGGER_MOD_SHIFT) ?
+ WHvX64InterruptTriggerModeLevel :
+ WHvX64InterruptTriggerModeEdge;
+
+ ipi.Vector = icr & APIC_VECTOR_MASK;
+ switch (dest_shorthand) {
+ /* no shorthand. Bits 56-63 contain the destination. */
+ case 0:
+ ipi.Destination = (icr >> 56) & APIC_VECTOR_MASK;
+ hr = whp_dispatch.WHvRequestInterrupt(whpx->partition,
+ &ipi, sizeof(ipi));
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to request interrupt hr=%08lx",
+ hr);
+ }
+
+ break;
+
+ /* self */
+ case 1:
+ include_self = true;
+ break;
+
+ /* broadcast, including self */
+ case 2:
+ broadcast = true;
+ include_self = true;
+ break;
+
+ /* broadcast, excluding self */
+ case 3:
+ broadcast = true;
+ break;
+ }
+
+ if (!broadcast && !include_self) {
+ break;
+ }
+
+ for (i = 0; i <= max_vcpu_index; i++) {
+ if (i == cpu->cpu_index && !include_self) {
+ continue;
+ }
+
+ /*
+ * Assuming that APIC Ids are identity mapped since
+ * WHvX64RegisterApicId & WHvX64RegisterInitialApicId registers
+ * are not handled yet and the hypervisor doesn't allow the
+ * guest to modify the APIC ID.
+ */
+ ipi.Destination = i;
+ hr = whp_dispatch.WHvRequestInterrupt(whpx->partition,
+ &ipi, sizeof(ipi));
+ if (FAILED(hr)) {
+ error_report(
+ "WHPX: Failed to request SIPI for %d, hr=%08lx",
+ i, hr);
+ }
+ }
+
+ break;
+ }
+
+ case WHvRunVpExitReasonCanceled:
+ cpu->exception_index = EXCP_INTERRUPT;
+ ret = 1;
+ break;
+
+ case WHvRunVpExitReasonX64MsrAccess: {
+ WHV_REGISTER_VALUE reg_values[3] = {0};
+ WHV_REGISTER_NAME reg_names[3];
+ UINT32 reg_count;
+
+ reg_names[0] = WHvX64RegisterRip;
+ reg_names[1] = WHvX64RegisterRax;
+ reg_names[2] = WHvX64RegisterRdx;
+
+ reg_values[0].Reg64 =
+ vcpu->exit_ctx.VpContext.Rip +
+ vcpu->exit_ctx.VpContext.InstructionLength;
+
+ /*
+ * For all unsupported MSR access we:
+ * ignore writes
+ * return 0 on read.
+ */
+ reg_count = vcpu->exit_ctx.MsrAccess.AccessInfo.IsWrite ?
+ 1 : 3;
+
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx->partition,
+ cpu->cpu_index,
+ reg_names, reg_count,
+ reg_values);
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set MsrAccess state "
+ " registers, hr=%08lx", hr);
+ }
+ ret = 0;
+ break;
+ }
+ case WHvRunVpExitReasonX64Cpuid: {
+ WHV_REGISTER_VALUE reg_values[5];
+ WHV_REGISTER_NAME reg_names[5];
+ UINT32 reg_count = 5;
+ UINT64 cpuid_fn, rip = 0, rax = 0, rcx = 0, rdx = 0, rbx = 0;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ memset(reg_values, 0, sizeof(reg_values));
+
+ rip = vcpu->exit_ctx.VpContext.Rip +
+ vcpu->exit_ctx.VpContext.InstructionLength;
+ cpuid_fn = vcpu->exit_ctx.CpuidAccess.Rax;
+
+ /*
+ * Ideally, these should be supplied to the hypervisor during VCPU
+ * initialization and it should be able to satisfy this request.
+ * But, currently, WHPX doesn't support setting CPUID values in the
+ * hypervisor once the partition has been setup, which is too late
+ * since VCPUs are realized later. For now, use the values from
+ * QEMU to satisfy these requests, until WHPX adds support for
+ * being able to set these values in the hypervisor at runtime.
+ */
+ cpu_x86_cpuid(env, cpuid_fn, 0, (UINT32 *)&rax, (UINT32 *)&rbx,
+ (UINT32 *)&rcx, (UINT32 *)&rdx);
+ switch (cpuid_fn) {
+ case 0x40000000:
+ /* Expose the vmware cpu frequency cpuid leaf */
+ rax = 0x40000010;
+ rbx = rcx = rdx = 0;
+ break;
+
+ case 0x40000010:
+ rax = env->tsc_khz;
+ rbx = env->apic_bus_freq / 1000; /* Hz to KHz */
+ rcx = rdx = 0;
+ break;
+
+ case 0x80000001:
+ /* Remove any support of OSVW */
+ rcx &= ~CPUID_EXT3_OSVW;
+ break;
+ }
+
+ reg_names[0] = WHvX64RegisterRip;
+ reg_names[1] = WHvX64RegisterRax;
+ reg_names[2] = WHvX64RegisterRcx;
+ reg_names[3] = WHvX64RegisterRdx;
+ reg_names[4] = WHvX64RegisterRbx;
+
+ reg_values[0].Reg64 = rip;
+ reg_values[1].Reg64 = rax;
+ reg_values[2].Reg64 = rcx;
+ reg_values[3].Reg64 = rdx;
+ reg_values[4].Reg64 = rbx;
+
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx->partition, cpu->cpu_index,
+ reg_names,
+ reg_count,
+ reg_values);
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set CpuidAccess state registers,"
+ " hr=%08lx", hr);
+ }
+ ret = 0;
+ break;
+ }
+ case WHvRunVpExitReasonNone:
+ case WHvRunVpExitReasonUnrecoverableException:
+ case WHvRunVpExitReasonInvalidVpRegisterValue:
+ case WHvRunVpExitReasonUnsupportedFeature:
+ case WHvRunVpExitReasonException:
+ default:
+ error_report("WHPX: Unexpected VP exit code %d",
+ vcpu->exit_ctx.ExitReason);
+ whpx_get_registers(cpu);
+ qemu_mutex_lock_iothread();
+ qemu_system_guest_panicked(cpu_get_crash_info(cpu));
+ qemu_mutex_unlock_iothread();
+ break;
+ }
+
+ } while (!ret);
+
+ cpu_exec_end(cpu);
+ qemu_mutex_lock_iothread();
+ current_cpu = cpu;
+
+ qatomic_set(&cpu->exit_request, false);
+
+ return ret < 0;
+}
+
+static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+ if (!cpu->vcpu_dirty) {
+ whpx_get_registers(cpu);
+ cpu->vcpu_dirty = true;
+ }
+}
+
+static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ whpx_set_registers(cpu, WHPX_SET_RESET_STATE);
+ cpu->vcpu_dirty = false;
+}
+
+static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ whpx_set_registers(cpu, WHPX_SET_FULL_STATE);
+ cpu->vcpu_dirty = false;
+}
+
+static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ cpu->vcpu_dirty = true;
+}
+
+/*
+ * CPU support.
+ */
+
+void whpx_cpu_synchronize_state(CPUState *cpu)
+{
+ if (!cpu->vcpu_dirty) {
+ run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
+ }
+}
+
+void whpx_cpu_synchronize_post_reset(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+}
+
+void whpx_cpu_synchronize_post_init(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
+/*
+ * Vcpu support.
+ */
+
+static Error *whpx_migration_blocker;
+
+static void whpx_cpu_update_state(void *opaque, bool running, RunState state)
+{
+ CPUX86State *env = opaque;
+
+ if (running) {
+ env->tsc_valid = false;
+ }
+}
+
+int whpx_init_vcpu(CPUState *cpu)
+{
+ HRESULT hr;
+ struct whpx_state *whpx = &whpx_global;
+ struct whpx_vcpu *vcpu = NULL;
+ Error *local_error = NULL;
+ struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ UINT64 freq = 0;
+ int ret;
+
+ /* Add migration blockers for all unsupported features of the
+ * Windows Hypervisor Platform
+ */
+ if (whpx_migration_blocker == NULL) {
+ error_setg(&whpx_migration_blocker,
+ "State blocked due to non-migratable CPUID feature support,"
+ "dirty memory tracking support, and XSAVE/XRSTOR support");
+
+ if (migrate_add_blocker(whpx_migration_blocker, &local_error) < 0) {
+ error_report_err(local_error);
+ error_free(whpx_migration_blocker);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ vcpu = g_malloc0(sizeof(struct whpx_vcpu));
+
+ if (!vcpu) {
+ error_report("WHPX: Failed to allocte VCPU context.");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ hr = whp_dispatch.WHvEmulatorCreateEmulator(
+ &whpx_emu_callbacks,
+ &vcpu->emulator);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to setup instruction completion support,"
+ " hr=%08lx", hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hr = whp_dispatch.WHvCreateVirtualProcessor(
+ whpx->partition, cpu->cpu_index, 0);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to create a virtual processor,"
+ " hr=%08lx", hr);
+ whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * vcpu's TSC frequency is either specified by user, or use the value
+ * provided by Hyper-V if the former is not present. In the latter case, we
+ * query it from Hyper-V and record in env->tsc_khz, so that vcpu's TSC
+ * frequency can be migrated later via this field.
+ */
+ if (!env->tsc_khz) {
+ hr = whp_dispatch.WHvGetCapability(
+ WHvCapabilityCodeProcessorClockFrequency, &freq, sizeof(freq),
+ NULL);
+ if (hr != WHV_E_UNKNOWN_CAPABILITY) {
+ if (FAILED(hr)) {
+ printf("WHPX: Failed to query tsc frequency, hr=0x%08lx\n", hr);
+ } else {
+ env->tsc_khz = freq / 1000; /* Hz to KHz */
+ }
+ }
+ }
+
+ env->apic_bus_freq = HYPERV_APIC_BUS_FREQUENCY;
+ hr = whp_dispatch.WHvGetCapability(
+ WHvCapabilityCodeInterruptClockFrequency, &freq, sizeof(freq), NULL);
+ if (hr != WHV_E_UNKNOWN_CAPABILITY) {
+ if (FAILED(hr)) {
+ printf("WHPX: Failed to query apic bus frequency hr=0x%08lx\n", hr);
+ } else {
+ env->apic_bus_freq = freq;
+ }
+ }
+
+ /*
+ * If the vmware cpuid frequency leaf option is set, and we have a valid
+ * tsc value, trap the corresponding cpuid's.
+ */
+ if (x86_cpu->vmware_cpuid_freq && env->tsc_khz) {
+ UINT32 cpuidExitList[] = {1, 0x80000001, 0x40000000, 0x40000010};
+
+ hr = whp_dispatch.WHvSetPartitionProperty(
+ whpx->partition,
+ WHvPartitionPropertyCodeCpuidExitList,
+ cpuidExitList,
+ RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
+ hr);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
+ vcpu->interruptable = true;
+ cpu->vcpu_dirty = true;
+ cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
+ max_vcpu_index = max(max_vcpu_index, cpu->cpu_index);
+ qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
+
+ return 0;
+
+error:
+ g_free(vcpu);
+
+ return ret;
+}
+
+int whpx_vcpu_exec(CPUState *cpu)
+{
+ int ret;
+ int fatal;
+
+ for (;;) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
+ ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ break;
+ }
+
+ fatal = whpx_vcpu_run(cpu);
+
+ if (fatal) {
+ error_report("WHPX: Failed to exec a virtual processor");
+ abort();
+ }
+ }
+
+ return ret;
+}
+
+void whpx_destroy_vcpu(CPUState *cpu)
+{
+ struct whpx_state *whpx = &whpx_global;
+ struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
+
+ whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
+ whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
+ g_free(cpu->hax_vcpu);
+ return;
+}
+
+void whpx_vcpu_kick(CPUState *cpu)
+{
+ struct whpx_state *whpx = &whpx_global;
+ whp_dispatch.WHvCancelRunVirtualProcessor(
+ whpx->partition, cpu->cpu_index, 0);
+}
+
+/*
+ * Memory support.
+ */
+
+static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
+ void *host_va, int add, int rom,
+ const char *name)
+{
+ struct whpx_state *whpx = &whpx_global;
+ HRESULT hr;
+
+ /*
+ if (add) {
+ printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
+ (void*)start_pa, (void*)size, host_va,
+ (rom ? "ROM" : "RAM"), name);
+ } else {
+ printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
+ (void*)start_pa, (void*)size, host_va, name);
+ }
+ */
+
+ if (add) {
+ hr = whp_dispatch.WHvMapGpaRange(whpx->partition,
+ host_va,
+ start_pa,
+ size,
+ (WHvMapGpaRangeFlagRead |
+ WHvMapGpaRangeFlagExecute |
+ (rom ? 0 : WHvMapGpaRangeFlagWrite)));
+ } else {
+ hr = whp_dispatch.WHvUnmapGpaRange(whpx->partition,
+ start_pa,
+ size);
+ }
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
+ " Host:%p, hr=%08lx",
+ (add ? "MAP" : "UNMAP"), name,
+ (void *)(uintptr_t)start_pa, (void *)size, host_va, hr);
+ }
+}
+
+static void whpx_process_section(MemoryRegionSection *section, int add)
+{
+ MemoryRegion *mr = section->mr;
+ hwaddr start_pa = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ unsigned int delta;
+ uint64_t host_va;
+
+ if (!memory_region_is_ram(mr)) {
+ return;
+ }
+
+ delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
+ delta &= ~qemu_real_host_page_mask;
+ if (delta > size) {
+ return;
+ }
+ start_pa += delta;
+ size -= delta;
+ size &= qemu_real_host_page_mask;
+ if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ return;
+ }
+
+ host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
+ + section->offset_within_region + delta;
+
+ whpx_update_mapping(start_pa, size, (void *)(uintptr_t)host_va, add,
+ memory_region_is_rom(mr), mr->name);
+}
+
+static void whpx_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ memory_region_ref(section->mr);
+ whpx_process_section(section, 1);
+}
+
+static void whpx_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ whpx_process_section(section, 0);
+ memory_region_unref(section->mr);
+}
+
+static void whpx_transaction_begin(MemoryListener *listener)
+{
+}
+
+static void whpx_transaction_commit(MemoryListener *listener)
+{
+}
+
+static void whpx_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ MemoryRegion *mr = section->mr;
+
+ if (!memory_region_is_ram(mr)) {
+ return;
+ }
+
+ memory_region_set_dirty(mr, 0, int128_get64(section->size));
+}
+
+static MemoryListener whpx_memory_listener = {
+ .name = "whpx",
+ .begin = whpx_transaction_begin,
+ .commit = whpx_transaction_commit,
+ .region_add = whpx_region_add,
+ .region_del = whpx_region_del,
+ .log_sync = whpx_log_sync,
+ .priority = 10,
+};
+
+static void whpx_memory_init(void)
+{
+ memory_listener_register(&whpx_memory_listener, &address_space_memory);
+}
+
+/*
+ * Load the functions from the given library, using the given handle. If a
+ * handle is provided, it is used, otherwise the library is opened. The
+ * handle will be updated on return with the opened one.
+ */
+static bool load_whp_dispatch_fns(HMODULE *handle,
+ WHPFunctionList function_list)
+{
+ HMODULE hLib = *handle;
+
+ #define WINHV_PLATFORM_DLL "WinHvPlatform.dll"
+ #define WINHV_EMULATION_DLL "WinHvEmulation.dll"
+ #define WHP_LOAD_FIELD_OPTIONAL(return_type, function_name, signature) \
+ whp_dispatch.function_name = \
+ (function_name ## _t)GetProcAddress(hLib, #function_name); \
+
+ #define WHP_LOAD_FIELD(return_type, function_name, signature) \
+ whp_dispatch.function_name = \
+ (function_name ## _t)GetProcAddress(hLib, #function_name); \
+ if (!whp_dispatch.function_name) { \
+ error_report("Could not load function %s", #function_name); \
+ goto error; \
+ } \
+
+ #define WHP_LOAD_LIB(lib_name, handle_lib) \
+ if (!handle_lib) { \
+ handle_lib = LoadLibrary(lib_name); \
+ if (!handle_lib) { \
+ error_report("Could not load library %s.", lib_name); \
+ goto error; \
+ } \
+ } \
+
+ switch (function_list) {
+ case WINHV_PLATFORM_FNS_DEFAULT:
+ WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib)
+ LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD)
+ break;
+
+ case WINHV_EMULATION_FNS_DEFAULT:
+ WHP_LOAD_LIB(WINHV_EMULATION_DLL, hLib)
+ LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD)
+ break;
+
+ case WINHV_PLATFORM_FNS_SUPPLEMENTAL:
+ WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib)
+ LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_LOAD_FIELD_OPTIONAL)
+ break;
+ }
+
+ *handle = hLib;
+ return true;
+
+error:
+ if (hLib) {
+ FreeLibrary(hLib);
+ }
+
+ return false;
+}
+
+static void whpx_set_kernel_irqchip(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ struct whpx_state *whpx = &whpx_global;
+ OnOffSplit mode;
+
+ if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
+ return;
+ }
+
+ switch (mode) {
+ case ON_OFF_SPLIT_ON:
+ whpx->kernel_irqchip_allowed = true;
+ whpx->kernel_irqchip_required = true;
+ break;
+
+ case ON_OFF_SPLIT_OFF:
+ whpx->kernel_irqchip_allowed = false;
+ whpx->kernel_irqchip_required = false;
+ break;
+
+ case ON_OFF_SPLIT_SPLIT:
+ error_setg(errp, "WHPX: split irqchip currently not supported");
+ error_append_hint(errp,
+ "Try without kernel-irqchip or with kernel-irqchip=on|off");
+ break;
+
+ default:
+ /*
+ * The value was checked in visit_type_OnOffSplit() above. If
+ * we get here, then something is wrong in QEMU.
+ */
+ abort();
+ }
+}
+
+/*
+ * Partition support
+ */
+
+static int whpx_accel_init(MachineState *ms)
+{
+ struct whpx_state *whpx;
+ int ret;
+ HRESULT hr;
+ WHV_CAPABILITY whpx_cap;
+ UINT32 whpx_cap_size;
+ WHV_PARTITION_PROPERTY prop;
+ UINT32 cpuidExitList[] = {1, 0x80000001};
+ WHV_CAPABILITY_FEATURES features = {0};
+
+ whpx = &whpx_global;
+
+ if (!init_whp_dispatch()) {
+ ret = -ENOSYS;
+ goto error;
+ }
+
+ whpx->mem_quota = ms->ram_size;
+
+ hr = whp_dispatch.WHvGetCapability(
+ WHvCapabilityCodeHypervisorPresent, &whpx_cap,
+ sizeof(whpx_cap), &whpx_cap_size);
+ if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
+ error_report("WHPX: No accelerator found, hr=%08lx", hr);
+ ret = -ENOSPC;
+ goto error;
+ }
+
+ hr = whp_dispatch.WHvGetCapability(
+ WHvCapabilityCodeFeatures, &features, sizeof(features), NULL);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to query capabilities, hr=%08lx", hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hr = whp_dispatch.WHvCreatePartition(&whpx->partition);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to create partition, hr=%08lx", hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
+ prop.ProcessorCount = ms->smp.cpus;
+ hr = whp_dispatch.WHvSetPartitionProperty(
+ whpx->partition,
+ WHvPartitionPropertyCodeProcessorCount,
+ &prop,
+ sizeof(WHV_PARTITION_PROPERTY));
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set partition core count to %d,"
+ " hr=%08lx", ms->smp.cores, hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * Error out if WHP doesn't support apic emulation and user is requiring
+ * it.
+ */
+ if (whpx->kernel_irqchip_required && (!features.LocalApicEmulation ||
+ !whp_dispatch.WHvSetVirtualProcessorInterruptControllerState2)) {
+ error_report("WHPX: kernel irqchip requested, but unavailable. "
+ "Try without kernel-irqchip or with kernel-irqchip=off");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (whpx->kernel_irqchip_allowed && features.LocalApicEmulation &&
+ whp_dispatch.WHvSetVirtualProcessorInterruptControllerState2) {
+ WHV_X64_LOCAL_APIC_EMULATION_MODE mode =
+ WHvX64LocalApicEmulationModeXApic;
+ printf("WHPX: setting APIC emulation mode in the hypervisor\n");
+ hr = whp_dispatch.WHvSetPartitionProperty(
+ whpx->partition,
+ WHvPartitionPropertyCodeLocalApicEmulationMode,
+ &mode,
+ sizeof(mode));
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to enable kernel irqchip hr=%08lx", hr);
+ if (whpx->kernel_irqchip_required) {
+ error_report("WHPX: kernel irqchip requested, but unavailable");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ whpx->apic_in_platform = true;
+ }
+ }
+
+ /* Register for MSR and CPUID exits */
+ memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
+ prop.ExtendedVmExits.X64MsrExit = 1;
+ prop.ExtendedVmExits.X64CpuidExit = 1;
+ if (whpx_apic_in_platform()) {
+ prop.ExtendedVmExits.X64ApicInitSipiExitTrap = 1;
+ }
+
+ hr = whp_dispatch.WHvSetPartitionProperty(
+ whpx->partition,
+ WHvPartitionPropertyCodeExtendedVmExits,
+ &prop,
+ sizeof(WHV_PARTITION_PROPERTY));
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to enable MSR & CPUIDexit, hr=%08lx", hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hr = whp_dispatch.WHvSetPartitionProperty(
+ whpx->partition,
+ WHvPartitionPropertyCodeCpuidExitList,
+ cpuidExitList,
+ RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
+ hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hr = whp_dispatch.WHvSetupPartition(whpx->partition);
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ whpx_memory_init();
+
+ printf("Windows Hypervisor Platform accelerator is operational\n");
+ return 0;
+
+error:
+
+ if (NULL != whpx->partition) {
+ whp_dispatch.WHvDeletePartition(whpx->partition);
+ whpx->partition = NULL;
+ }
+
+ return ret;
+}
+
+int whpx_enabled(void)
+{
+ return whpx_allowed;
+}
+
+bool whpx_apic_in_platform(void) {
+ return whpx_global.apic_in_platform;
+}
+
+static void whpx_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelClass *ac = ACCEL_CLASS(oc);
+ ac->name = "WHPX";
+ ac->init_machine = whpx_accel_init;
+ ac->allowed = &whpx_allowed;
+
+ object_class_property_add(oc, "kernel-irqchip", "on|off|split",
+ NULL, whpx_set_kernel_irqchip,
+ NULL, NULL);
+ object_class_property_set_description(oc, "kernel-irqchip",
+ "Configure WHPX in-kernel irqchip");
+}
+
+static void whpx_accel_instance_init(Object *obj)
+{
+ struct whpx_state *whpx = &whpx_global;
+
+ memset(whpx, 0, sizeof(struct whpx_state));
+ /* Turn on kernel-irqchip, by default */
+ whpx->kernel_irqchip_allowed = true;
+}
+
+static const TypeInfo whpx_accel_type = {
+ .name = ACCEL_CLASS_NAME("whpx"),
+ .parent = TYPE_ACCEL,
+ .instance_init = whpx_accel_instance_init,
+ .class_init = whpx_accel_class_init,
+};
+
+static void whpx_type_init(void)
+{
+ type_register_static(&whpx_accel_type);
+}
+
+bool init_whp_dispatch(void)
+{
+ if (whp_dispatch_initialized) {
+ return true;
+ }
+
+ if (!load_whp_dispatch_fns(&hWinHvPlatform, WINHV_PLATFORM_FNS_DEFAULT)) {
+ goto error;
+ }
+
+ if (!load_whp_dispatch_fns(&hWinHvEmulation, WINHV_EMULATION_FNS_DEFAULT)) {
+ goto error;
+ }
+
+ assert(load_whp_dispatch_fns(&hWinHvPlatform,
+ WINHV_PLATFORM_FNS_SUPPLEMENTAL));
+ whp_dispatch_initialized = true;
+
+ return true;
+error:
+ if (hWinHvPlatform) {
+ FreeLibrary(hWinHvPlatform);
+ }
+
+ if (hWinHvEmulation) {
+ FreeLibrary(hWinHvEmulation);
+ }
+
+ return false;
+}
+
+type_init(whpx_type_init);
diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c
new file mode 100644
index 000000000..bba36f3ec
--- /dev/null
+++ b/target/i386/whpx/whpx-apic.c
@@ -0,0 +1,281 @@
+/*
+ * WHPX platform APIC support
+ *
+ * Copyright (c) 2011 Siemens AG
+ *
+ * Authors:
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ * John Starks <jostarks@microsoft.com>
+ *
+ * This work is licensed under the terms of the GNU GPL version 2.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/i386/apic-msidef.h"
+#include "hw/pci/msi.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/whpx.h"
+#include "whpx-internal.h"
+
+struct whpx_lapic_state {
+ struct {
+ uint32_t data;
+ uint32_t padding[3];
+ } fields[256];
+};
+
+static void whpx_put_apic_state(APICCommonState *s,
+ struct whpx_lapic_state *kapic)
+{
+ int i;
+
+ memset(kapic, 0, sizeof(*kapic));
+ kapic->fields[0x2].data = s->id << 24;
+ kapic->fields[0x3].data = s->version | ((APIC_LVT_NB - 1) << 16);
+ kapic->fields[0x8].data = s->tpr;
+ kapic->fields[0xd].data = s->log_dest << 24;
+ kapic->fields[0xe].data = s->dest_mode << 28 | 0x0fffffff;
+ kapic->fields[0xf].data = s->spurious_vec;
+ for (i = 0; i < 8; i++) {
+ kapic->fields[0x10 + i].data = s->isr[i];
+ kapic->fields[0x18 + i].data = s->tmr[i];
+ kapic->fields[0x20 + i].data = s->irr[i];
+ }
+
+ kapic->fields[0x28].data = s->esr;
+ kapic->fields[0x30].data = s->icr[0];
+ kapic->fields[0x31].data = s->icr[1];
+ for (i = 0; i < APIC_LVT_NB; i++) {
+ kapic->fields[0x32 + i].data = s->lvt[i];
+ }
+
+ kapic->fields[0x38].data = s->initial_count;
+ kapic->fields[0x3e].data = s->divide_conf;
+}
+
+static void whpx_get_apic_state(APICCommonState *s,
+ struct whpx_lapic_state *kapic)
+{
+ int i, v;
+
+ s->id = kapic->fields[0x2].data >> 24;
+ s->tpr = kapic->fields[0x8].data;
+ s->arb_id = kapic->fields[0x9].data;
+ s->log_dest = kapic->fields[0xd].data >> 24;
+ s->dest_mode = kapic->fields[0xe].data >> 28;
+ s->spurious_vec = kapic->fields[0xf].data;
+ for (i = 0; i < 8; i++) {
+ s->isr[i] = kapic->fields[0x10 + i].data;
+ s->tmr[i] = kapic->fields[0x18 + i].data;
+ s->irr[i] = kapic->fields[0x20 + i].data;
+ }
+
+ s->esr = kapic->fields[0x28].data;
+ s->icr[0] = kapic->fields[0x30].data;
+ s->icr[1] = kapic->fields[0x31].data;
+ for (i = 0; i < APIC_LVT_NB; i++) {
+ s->lvt[i] = kapic->fields[0x32 + i].data;
+ }
+
+ s->initial_count = kapic->fields[0x38].data;
+ s->divide_conf = kapic->fields[0x3e].data;
+
+ v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4);
+ s->count_shift = (v + 1) & 7;
+
+ s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ apic_next_timer(s, s->initial_count_load_time);
+}
+
+static void whpx_apic_set_base(APICCommonState *s, uint64_t val)
+{
+ s->apicbase = val;
+}
+
+static void whpx_put_apic_base(CPUState *cpu, uint64_t val)
+{
+ HRESULT hr;
+ WHV_REGISTER_VALUE reg_value = {.Reg64 = val};
+ WHV_REGISTER_NAME reg_name = WHvX64RegisterApicBase;
+
+ hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
+ whpx_global.partition,
+ cpu->cpu_index,
+ &reg_name, 1,
+ &reg_value);
+
+ if (FAILED(hr)) {
+ error_report("WHPX: Failed to set MSR APIC base, hr=%08lx", hr);
+ }
+}
+
+static void whpx_apic_set_tpr(APICCommonState *s, uint8_t val)
+{
+ s->tpr = val;
+}
+
+static uint8_t whpx_apic_get_tpr(APICCommonState *s)
+{
+ return s->tpr;
+}
+
+static void whpx_apic_vapic_base_update(APICCommonState *s)
+{
+ /* not implemented yet */
+}
+
+static void whpx_apic_put(CPUState *cs, run_on_cpu_data data)
+{
+ APICCommonState *s = data.host_ptr;
+ struct whpx_lapic_state kapic;
+ HRESULT hr;
+
+ whpx_put_apic_base(CPU(s->cpu), s->apicbase);
+ whpx_put_apic_state(s, &kapic);
+
+ hr = whp_dispatch.WHvSetVirtualProcessorInterruptControllerState2(
+ whpx_global.partition,
+ cs->cpu_index,
+ &kapic,
+ sizeof(kapic));
+ if (FAILED(hr)) {
+ fprintf(stderr,
+ "WHvSetVirtualProcessorInterruptControllerState failed: %08lx\n",
+ hr);
+
+ abort();
+ }
+}
+
+void whpx_apic_get(DeviceState *dev)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+ CPUState *cpu = CPU(s->cpu);
+ struct whpx_lapic_state kapic;
+
+ HRESULT hr = whp_dispatch.WHvGetVirtualProcessorInterruptControllerState2(
+ whpx_global.partition,
+ cpu->cpu_index,
+ &kapic,
+ sizeof(kapic),
+ NULL);
+ if (FAILED(hr)) {
+ fprintf(stderr,
+ "WHvSetVirtualProcessorInterruptControllerState failed: %08lx\n",
+ hr);
+
+ abort();
+ }
+
+ whpx_get_apic_state(s, &kapic);
+}
+
+static void whpx_apic_post_load(APICCommonState *s)
+{
+ run_on_cpu(CPU(s->cpu), whpx_apic_put, RUN_ON_CPU_HOST_PTR(s));
+}
+
+static void whpx_apic_external_nmi(APICCommonState *s)
+{
+}
+
+static void whpx_send_msi(MSIMessage *msg)
+{
+ uint64_t addr = msg->address;
+ uint32_t data = msg->data;
+ uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+ uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
+ uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+ uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
+
+ WHV_INTERRUPT_CONTROL interrupt = {
+ /* Values correspond to delivery modes */
+ .Type = delivery,
+ .DestinationMode = dest_mode ?
+ WHvX64InterruptDestinationModeLogical :
+ WHvX64InterruptDestinationModePhysical,
+
+ .TriggerMode = trigger_mode ?
+ WHvX64InterruptTriggerModeLevel : WHvX64InterruptTriggerModeEdge,
+ .Reserved = 0,
+ .Vector = vector,
+ .Destination = dest,
+ };
+ HRESULT hr = whp_dispatch.WHvRequestInterrupt(whpx_global.partition,
+ &interrupt, sizeof(interrupt));
+ if (FAILED(hr)) {
+ fprintf(stderr, "whpx: injection failed, MSI (%llx, %x) delivery: %d, "
+ "dest_mode: %d, trigger mode: %d, vector: %d, lost (%08lx)\n",
+ addr, data, delivery, dest_mode, trigger_mode, vector, hr);
+ }
+}
+
+static uint64_t whpx_apic_mem_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ return ~(uint64_t)0;
+}
+
+static void whpx_apic_mem_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ MSIMessage msg = { .address = addr, .data = data };
+ whpx_send_msi(&msg);
+}
+
+static const MemoryRegionOps whpx_apic_io_ops = {
+ .read = whpx_apic_mem_read,
+ .write = whpx_apic_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void whpx_apic_reset(APICCommonState *s)
+{
+ /* Not used by WHPX. */
+ s->wait_for_sipi = 0;
+
+ run_on_cpu(CPU(s->cpu), whpx_apic_put, RUN_ON_CPU_HOST_PTR(s));
+}
+
+static void whpx_apic_realize(DeviceState *dev, Error **errp)
+{
+ APICCommonState *s = APIC_COMMON(dev);
+
+ memory_region_init_io(&s->io_memory, OBJECT(s), &whpx_apic_io_ops, s,
+ "whpx-apic-msi", APIC_SPACE_SIZE);
+
+ msi_nonbroken = true;
+}
+
+static void whpx_apic_class_init(ObjectClass *klass, void *data)
+{
+ APICCommonClass *k = APIC_COMMON_CLASS(klass);
+
+ k->realize = whpx_apic_realize;
+ k->reset = whpx_apic_reset;
+ k->set_base = whpx_apic_set_base;
+ k->set_tpr = whpx_apic_set_tpr;
+ k->get_tpr = whpx_apic_get_tpr;
+ k->post_load = whpx_apic_post_load;
+ k->vapic_base_update = whpx_apic_vapic_base_update;
+ k->external_nmi = whpx_apic_external_nmi;
+ k->send_msi = whpx_send_msi;
+}
+
+static const TypeInfo whpx_apic_info = {
+ .name = "whpx-apic",
+ .parent = TYPE_APIC_COMMON,
+ .instance_size = sizeof(APICCommonState),
+ .class_init = whpx_apic_class_init,
+};
+
+static void whpx_apic_register_types(void)
+{
+ type_register_static(&whpx_apic_info);
+}
+
+type_init(whpx_apic_register_types)
diff --git a/target/i386/whpx/whpx-internal.h b/target/i386/whpx/whpx-internal.h
new file mode 100644
index 000000000..908ababf6
--- /dev/null
+++ b/target/i386/whpx/whpx-internal.h
@@ -0,0 +1,86 @@
+#ifndef WHP_INTERNAL_H
+#define WHP_INTERNAL_H
+
+#include <windows.h>
+#include <WinHvPlatform.h>
+#include <WinHvEmulation.h>
+
+struct whpx_state {
+ uint64_t mem_quota;
+ WHV_PARTITION_HANDLE partition;
+ bool kernel_irqchip_allowed;
+ bool kernel_irqchip_required;
+ bool apic_in_platform;
+};
+
+extern struct whpx_state whpx_global;
+void whpx_apic_get(DeviceState *s);
+
+#define WHV_E_UNKNOWN_CAPABILITY 0x80370300L
+
+#define LIST_WINHVPLATFORM_FUNCTIONS(X) \
+ X(HRESULT, WHvGetCapability, (WHV_CAPABILITY_CODE CapabilityCode, VOID* CapabilityBuffer, UINT32 CapabilityBufferSizeInBytes, UINT32* WrittenSizeInBytes)) \
+ X(HRESULT, WHvCreatePartition, (WHV_PARTITION_HANDLE* Partition)) \
+ X(HRESULT, WHvSetupPartition, (WHV_PARTITION_HANDLE Partition)) \
+ X(HRESULT, WHvDeletePartition, (WHV_PARTITION_HANDLE Partition)) \
+ X(HRESULT, WHvGetPartitionProperty, (WHV_PARTITION_HANDLE Partition, WHV_PARTITION_PROPERTY_CODE PropertyCode, VOID* PropertyBuffer, UINT32 PropertyBufferSizeInBytes, UINT32* WrittenSizeInBytes)) \
+ X(HRESULT, WHvSetPartitionProperty, (WHV_PARTITION_HANDLE Partition, WHV_PARTITION_PROPERTY_CODE PropertyCode, const VOID* PropertyBuffer, UINT32 PropertyBufferSizeInBytes)) \
+ X(HRESULT, WHvMapGpaRange, (WHV_PARTITION_HANDLE Partition, VOID* SourceAddress, WHV_GUEST_PHYSICAL_ADDRESS GuestAddress, UINT64 SizeInBytes, WHV_MAP_GPA_RANGE_FLAGS Flags)) \
+ X(HRESULT, WHvUnmapGpaRange, (WHV_PARTITION_HANDLE Partition, WHV_GUEST_PHYSICAL_ADDRESS GuestAddress, UINT64 SizeInBytes)) \
+ X(HRESULT, WHvTranslateGva, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, WHV_GUEST_VIRTUAL_ADDRESS Gva, WHV_TRANSLATE_GVA_FLAGS TranslateFlags, WHV_TRANSLATE_GVA_RESULT* TranslationResult, WHV_GUEST_PHYSICAL_ADDRESS* Gpa)) \
+ X(HRESULT, WHvCreateVirtualProcessor, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, UINT32 Flags)) \
+ X(HRESULT, WHvDeleteVirtualProcessor, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex)) \
+ X(HRESULT, WHvRunVirtualProcessor, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, VOID* ExitContext, UINT32 ExitContextSizeInBytes)) \
+ X(HRESULT, WHvCancelRunVirtualProcessor, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, UINT32 Flags)) \
+ X(HRESULT, WHvGetVirtualProcessorRegisters, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, const WHV_REGISTER_NAME* RegisterNames, UINT32 RegisterCount, WHV_REGISTER_VALUE* RegisterValues)) \
+ X(HRESULT, WHvSetVirtualProcessorRegisters, (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, const WHV_REGISTER_NAME* RegisterNames, UINT32 RegisterCount, const WHV_REGISTER_VALUE* RegisterValues)) \
+
+/*
+ * These are supplemental functions that may not be present
+ * on all versions and are not critical for basic functionality.
+ */
+#define LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(X) \
+ X(HRESULT, WHvSuspendPartitionTime, (WHV_PARTITION_HANDLE Partition)) \
+ X(HRESULT, WHvRequestInterrupt, (WHV_PARTITION_HANDLE Partition, \
+ WHV_INTERRUPT_CONTROL* Interrupt, UINT32 InterruptControlSize)) \
+ X(HRESULT, WHvGetVirtualProcessorInterruptControllerState2, \
+ (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, PVOID State, \
+ UINT32 StateSize, UINT32* WrittenSize)) \
+ X(HRESULT, WHvSetVirtualProcessorInterruptControllerState2, \
+ (WHV_PARTITION_HANDLE Partition, UINT32 VpIndex, PVOID State, \
+ UINT32 StateSize)) \
+
+#define LIST_WINHVEMULATION_FUNCTIONS(X) \
+ X(HRESULT, WHvEmulatorCreateEmulator, (const WHV_EMULATOR_CALLBACKS* Callbacks, WHV_EMULATOR_HANDLE* Emulator)) \
+ X(HRESULT, WHvEmulatorDestroyEmulator, (WHV_EMULATOR_HANDLE Emulator)) \
+ X(HRESULT, WHvEmulatorTryIoEmulation, (WHV_EMULATOR_HANDLE Emulator, VOID* Context, const WHV_VP_EXIT_CONTEXT* VpContext, const WHV_X64_IO_PORT_ACCESS_CONTEXT* IoInstructionContext, WHV_EMULATOR_STATUS* EmulatorReturnStatus)) \
+ X(HRESULT, WHvEmulatorTryMmioEmulation, (WHV_EMULATOR_HANDLE Emulator, VOID* Context, const WHV_VP_EXIT_CONTEXT* VpContext, const WHV_MEMORY_ACCESS_CONTEXT* MmioInstructionContext, WHV_EMULATOR_STATUS* EmulatorReturnStatus)) \
+
+#define WHP_DEFINE_TYPE(return_type, function_name, signature) \
+ typedef return_type (WINAPI *function_name ## _t) signature;
+
+#define WHP_DECLARE_MEMBER(return_type, function_name, signature) \
+ function_name ## _t function_name;
+
+/* Define function typedef */
+LIST_WINHVPLATFORM_FUNCTIONS(WHP_DEFINE_TYPE)
+LIST_WINHVEMULATION_FUNCTIONS(WHP_DEFINE_TYPE)
+LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_DEFINE_TYPE)
+
+struct WHPDispatch {
+ LIST_WINHVPLATFORM_FUNCTIONS(WHP_DECLARE_MEMBER)
+ LIST_WINHVEMULATION_FUNCTIONS(WHP_DECLARE_MEMBER)
+ LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_DECLARE_MEMBER)
+};
+
+extern struct WHPDispatch whp_dispatch;
+
+bool init_whp_dispatch(void);
+
+typedef enum WHPFunctionList {
+ WINHV_PLATFORM_FNS_DEFAULT,
+ WINHV_EMULATION_FNS_DEFAULT,
+ WINHV_PLATFORM_FNS_SUPPLEMENTAL
+} WHPFunctionList;
+
+#endif /* WHP_INTERNAL_H */
diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c
new file mode 100644
index 000000000..ac61a9634
--- /dev/null
+++ b/target/i386/xsave_helper.c
@@ -0,0 +1,251 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+
+void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
+{
+ CPUX86State *env = &cpu->env;
+ const ExtSaveArea *e, *f;
+ int i;
+
+ X86LegacyXSaveArea *legacy;
+ X86XSaveHeader *header;
+ uint16_t cwd, swd, twd;
+
+ memset(buf, 0, buflen);
+
+ e = &x86_ext_save_areas[XSTATE_FP_BIT];
+
+ legacy = buf + e->offset;
+ header = buf + e->offset + sizeof(*legacy);
+
+ twd = 0;
+ swd = env->fpus & ~(7 << 11);
+ swd |= (env->fpstt & 7) << 11;
+ cwd = env->fpuc;
+ for (i = 0; i < 8; ++i) {
+ twd |= (!env->fptags[i]) << i;
+ }
+ legacy->fcw = cwd;
+ legacy->fsw = swd;
+ legacy->ftw = twd;
+ legacy->fpop = env->fpop;
+ legacy->fpip = env->fpip;
+ legacy->fpdp = env->fpdp;
+ memcpy(&legacy->fpregs, env->fpregs,
+ sizeof(env->fpregs));
+ legacy->mxcsr = env->mxcsr;
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *xmm = legacy->xmm_regs[i];
+
+ stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
+ stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
+ }
+
+ header->xstate_bv = env->xstate_bv;
+
+ e = &x86_ext_save_areas[XSTATE_YMM_BIT];
+ if (e->size && e->offset) {
+ XSaveAVX *avx;
+
+ avx = buf + e->offset;
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *ymmh = avx->ymmh[i];
+
+ stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
+ stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
+ }
+ }
+
+ e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
+ if (e->size && e->offset) {
+ XSaveBNDREG *bndreg;
+ XSaveBNDCSR *bndcsr;
+
+ f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
+ assert(f->size);
+ assert(f->offset);
+
+ bndreg = buf + e->offset;
+ bndcsr = buf + f->offset;
+
+ memcpy(&bndreg->bnd_regs, env->bnd_regs,
+ sizeof(env->bnd_regs));
+ bndcsr->bndcsr = env->bndcs_regs;
+ }
+
+ e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
+ if (e->size && e->offset) {
+ XSaveOpmask *opmask;
+ XSaveZMM_Hi256 *zmm_hi256;
+#ifdef TARGET_X86_64
+ XSaveHi16_ZMM *hi16_zmm;
+#endif
+
+ f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
+ assert(f->size);
+ assert(f->offset);
+
+ opmask = buf + e->offset;
+ zmm_hi256 = buf + f->offset;
+
+ memcpy(&opmask->opmask_regs, env->opmask_regs,
+ sizeof(env->opmask_regs));
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
+
+ stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
+ stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
+ stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
+ stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
+ }
+
+#ifdef TARGET_X86_64
+ f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
+ assert(f->size);
+ assert(f->offset);
+
+ hi16_zmm = buf + f->offset;
+
+ memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
+ 16 * sizeof(env->xmm_regs[16]));
+#endif
+ }
+
+#ifdef TARGET_X86_64
+ e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
+ if (e->size && e->offset) {
+ XSavePKRU *pkru = buf + e->offset;
+
+ memcpy(pkru, &env->pkru, sizeof(env->pkru));
+ }
+#endif
+}
+
+void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
+{
+ CPUX86State *env = &cpu->env;
+ const ExtSaveArea *e, *f, *g;
+ int i;
+
+ const X86LegacyXSaveArea *legacy;
+ const X86XSaveHeader *header;
+ uint16_t cwd, swd, twd;
+
+ e = &x86_ext_save_areas[XSTATE_FP_BIT];
+
+ legacy = buf + e->offset;
+ header = buf + e->offset + sizeof(*legacy);
+
+ cwd = legacy->fcw;
+ swd = legacy->fsw;
+ twd = legacy->ftw;
+ env->fpop = legacy->fpop;
+ env->fpstt = (swd >> 11) & 7;
+ env->fpus = swd;
+ env->fpuc = cwd;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((twd >> i) & 1);
+ }
+ env->fpip = legacy->fpip;
+ env->fpdp = legacy->fpdp;
+ env->mxcsr = legacy->mxcsr;
+ memcpy(env->fpregs, &legacy->fpregs,
+ sizeof(env->fpregs));
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ const uint8_t *xmm = legacy->xmm_regs[i];
+
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
+ }
+
+ env->xstate_bv = header->xstate_bv;
+
+ e = &x86_ext_save_areas[XSTATE_YMM_BIT];
+ if (e->size && e->offset) {
+ const XSaveAVX *avx;
+
+ avx = buf + e->offset;
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ const uint8_t *ymmh = avx->ymmh[i];
+
+ env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
+ env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
+ }
+ }
+
+ e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
+ if (e->size && e->offset) {
+ const XSaveBNDREG *bndreg;
+ const XSaveBNDCSR *bndcsr;
+
+ f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
+ assert(f->size);
+ assert(f->offset);
+
+ bndreg = buf + e->offset;
+ bndcsr = buf + f->offset;
+
+ memcpy(env->bnd_regs, &bndreg->bnd_regs,
+ sizeof(env->bnd_regs));
+ env->bndcs_regs = bndcsr->bndcsr;
+ }
+
+ e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
+ if (e->size && e->offset) {
+ const XSaveOpmask *opmask;
+ const XSaveZMM_Hi256 *zmm_hi256;
+#ifdef TARGET_X86_64
+ const XSaveHi16_ZMM *hi16_zmm;
+#endif
+
+ f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
+ assert(f->size);
+ assert(f->offset);
+
+ g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
+ assert(g->size);
+ assert(g->offset);
+
+ opmask = buf + e->offset;
+ zmm_hi256 = buf + f->offset;
+#ifdef TARGET_X86_64
+ hi16_zmm = buf + g->offset;
+#endif
+
+ memcpy(env->opmask_regs, &opmask->opmask_regs,
+ sizeof(env->opmask_regs));
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
+
+ env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
+ env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
+ env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
+ env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
+ }
+
+#ifdef TARGET_X86_64
+ memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
+ 16 * sizeof(env->xmm_regs[16]));
+#endif
+ }
+
+#ifdef TARGET_X86_64
+ e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
+ if (e->size && e->offset) {
+ const XSavePKRU *pkru;
+
+ pkru = buf + e->offset;
+ memcpy(&env->pkru, pkru, sizeof(env->pkru));
+ }
+#endif
+}
diff --git a/target/m68k/Kconfig b/target/m68k/Kconfig
new file mode 100644
index 000000000..23debad51
--- /dev/null
+++ b/target/m68k/Kconfig
@@ -0,0 +1,2 @@
+config M68K
+ bool
diff --git a/target/m68k/cpu-param.h b/target/m68k/cpu-param.h
new file mode 100644
index 000000000..06556dfbf
--- /dev/null
+++ b/target/m68k/cpu-param.h
@@ -0,0 +1,22 @@
+/*
+ * m68k cpu parameters for qemu.
+ *
+ * Copyright (c) 2005-2007 CodeSourcery
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef M68K_CPU_PARAM_H
+#define M68K_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+/*
+ * Coldfire Linux uses 8k pages
+ * and m68k linux uses 4k pages
+ * use the smallest one
+ */
+#define TARGET_PAGE_BITS 12
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define NB_MMU_MODES 2
+
+#endif
diff --git a/target/m68k/cpu-qom.h b/target/m68k/cpu-qom.h
new file mode 100644
index 000000000..1ceb160ec
--- /dev/null
+++ b/target/m68k/cpu-qom.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU Motorola 68k CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_M68K_CPU_QOM_H
+#define QEMU_M68K_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_M68K_CPU "m68k-cpu"
+
+OBJECT_DECLARE_TYPE(M68kCPU, M68kCPUClass,
+ M68K_CPU)
+
+/*
+ * M68kCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Motorola 68k CPU model.
+ */
+struct M68kCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#endif
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
new file mode 100644
index 000000000..c7aeb7da9
--- /dev/null
+++ b/target/m68k/cpu.c
@@ -0,0 +1,605 @@
+/*
+ * QEMU Motorola 68k CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "migration/vmstate.h"
+#include "fpu/softfloat.h"
+
+static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool m68k_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+static void m68k_set_feature(CPUM68KState *env, int feature)
+{
+ env->features |= (1u << feature);
+}
+
+static void m68k_unset_feature(CPUM68KState *env, int feature)
+{
+ env->features &= (-1u - (1u << feature));
+}
+
+static void m68k_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ M68kCPU *cpu = M68K_CPU(s);
+ M68kCPUClass *mcc = M68K_CPU_GET_CLASS(cpu);
+ CPUM68KState *env = &cpu->env;
+ floatx80 nan = floatx80_default_nan(NULL);
+ int i;
+
+ mcc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUM68KState, end_reset_fields));
+#ifdef CONFIG_SOFTMMU
+ cpu_m68k_set_sr(env, SR_S | SR_I);
+#else
+ cpu_m68k_set_sr(env, 0);
+#endif
+ for (i = 0; i < 8; i++) {
+ env->fregs[i].d = nan;
+ }
+ cpu_m68k_set_fpcr(env, 0);
+ env->fpsr = 0;
+
+ /* TODO: We should set PC from the interrupt vector. */
+ env->pc = 0;
+}
+
+static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+ M68kCPU *cpu = M68K_CPU(s);
+ CPUM68KState *env = &cpu->env;
+ info->print_insn = print_insn_m68k;
+ if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ info->mach = bfd_mach_m68040;
+ }
+}
+
+/* CPU models */
+
+static ObjectClass *m68k_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = g_strdup_printf(M68K_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (object_class_dynamic_cast(oc, TYPE_M68K_CPU) == NULL ||
+ object_class_is_abstract(oc))) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void m5206_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+}
+
+/* Base feature set, including isns. for m68k family */
+static void m68000_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_M68000);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+ m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
+ m68k_set_feature(env, M68K_FEATURE_MOVEP);
+}
+
+/*
+ * Adds BKPT, MOVE-from-SR *now priv instr, and MOVEC, MOVES, RTD
+ */
+static void m68010_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68000_cpu_initfn(obj);
+ m68k_set_feature(env, M68K_FEATURE_M68010);
+ m68k_set_feature(env, M68K_FEATURE_RTD);
+ m68k_set_feature(env, M68K_FEATURE_BKPT);
+ m68k_set_feature(env, M68K_FEATURE_MOVEC);
+}
+
+/*
+ * Adds BFCHG, BFCLR, BFEXTS, BFEXTU, BFFFO, BFINS, BFSET, BFTST, CAS, CAS2,
+ * CHK2, CMP2, DIVSL, DIVUL, EXTB, PACK, TRAPcc, UNPK.
+ *
+ * 68020/30 only:
+ * CALLM, cpBcc, cpDBcc, cpGEN, cpRESTORE, cpSAVE, cpScc, cpTRAPcc
+ */
+static void m68020_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68010_cpu_initfn(obj);
+ m68k_unset_feature(env, M68K_FEATURE_M68010);
+ m68k_set_feature(env, M68K_FEATURE_M68020);
+ m68k_set_feature(env, M68K_FEATURE_QUAD_MULDIV);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_BCCL);
+ m68k_set_feature(env, M68K_FEATURE_BITFIELD);
+ m68k_set_feature(env, M68K_FEATURE_EXT_FULL);
+ m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX);
+ m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV);
+ m68k_set_feature(env, M68K_FEATURE_FPU);
+ m68k_set_feature(env, M68K_FEATURE_CAS);
+ m68k_set_feature(env, M68K_FEATURE_CHK2);
+ m68k_set_feature(env, M68K_FEATURE_MSP);
+ m68k_set_feature(env, M68K_FEATURE_UNALIGNED_DATA);
+}
+
+/*
+ * Adds: PFLUSH (*5)
+ * 68030 Only: PFLUSHA (*5), PLOAD (*5), PMOVE
+ * 68030/40 Only: PTEST
+ *
+ * NOTES:
+ * 5. Not valid on MC68EC030
+ */
+static void m68030_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68020_cpu_initfn(obj);
+ m68k_unset_feature(env, M68K_FEATURE_M68020);
+ m68k_set_feature(env, M68K_FEATURE_M68030);
+}
+
+/*
+ * Adds: CINV, CPUSH
+ * Adds all with Note *2: FABS, FSABS, FDABS, FADD, FSADD, FDADD, FBcc, FCMP,
+ * FDBcc, FDIV, FSDIV, FDDIV, FMOVE, FSMOVE, FDMOVE,
+ * FMOVEM, FMUL, FSMUL, FDMUL, FNEG, FSNEG, FDNEG, FNOP,
+ * FRESTORE, FSAVE, FScc, FSQRT, FSSQRT, FDSQRT, FSUB,
+ * FSSUB, FDSUB, FTRAPcc, FTST
+ *
+ * Adds with Notes *2, and *3: FACOS, FASIN, FATAN, FATANH, FCOS, FCOSH, FETOX,
+ * FETOXM, FGETEXP, FGETMAN, FINT, FINTRZ, FLOG10,
+ * FLOG2, FLOGN, FLOGNP1, FMOD, FMOVECR, FREM,
+ * FSCALE, FSGLDIV, FSGLMUL, FSIN, FSINCOS, FSINH,
+ * FTAN, FTANH, FTENTOX, FTWOTOX
+ * NOTES:
+ * 2. Not applicable to the MC68EC040, MC68LC040, MC68EC060, and MC68LC060.
+ * 3. These are software-supported instructions on the MC68040 and MC68060.
+ */
+static void m68040_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68030_cpu_initfn(obj);
+ m68k_unset_feature(env, M68K_FEATURE_M68030);
+ m68k_set_feature(env, M68K_FEATURE_M68040);
+}
+
+/*
+ * Adds: PLPA
+ * Adds all with Note *2: CAS, CAS2, MULS, MULU, CHK2, CMP2, DIVS, DIVU
+ * All Fxxxx instructions are as per m68040 with exception to; FMOVEM NOTE3
+ *
+ * Does NOT implement MOVEP
+ *
+ * NOTES:
+ * 2. Not applicable to the MC68EC040, MC68LC040, MC68EC060, and MC68LC060.
+ * 3. These are software-supported instructions on the MC68040 and MC68060.
+ */
+static void m68060_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68040_cpu_initfn(obj);
+ m68k_unset_feature(env, M68K_FEATURE_M68040);
+ m68k_set_feature(env, M68K_FEATURE_M68060);
+ m68k_unset_feature(env, M68K_FEATURE_MOVEP);
+
+ /* Implemented as a software feature */
+ m68k_unset_feature(env, M68K_FEATURE_QUAD_MULDIV);
+}
+
+static void m5208_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+}
+
+static void cfv4e_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_B);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_CF_FPU);
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+}
+
+static void any_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_B);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_CF_FPU);
+ /*
+ * MAC and EMAC are mututally exclusive, so pick EMAC.
+ * It's mostly backwards compatible.
+ */
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC);
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC_B);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+ m68k_set_feature(env, M68K_FEATURE_EXT_FULL);
+ m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
+}
+
+static void m68k_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ M68kCPU *cpu = M68K_CPU(dev);
+ M68kCPUClass *mcc = M68K_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ register_m68k_insns(&cpu->env);
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ m68k_cpu_init_gdb(cpu);
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void m68k_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+}
+
+#if defined(CONFIG_SOFTMMU)
+static bool fpu_needed(void *opaque)
+{
+ M68kCPU *s = opaque;
+
+ return m68k_feature(&s->env, M68K_FEATURE_CF_FPU) ||
+ m68k_feature(&s->env, M68K_FEATURE_FPU);
+}
+
+typedef struct m68k_FPReg_tmp {
+ FPReg *parent;
+ uint64_t tmp_mant;
+ uint16_t tmp_exp;
+} m68k_FPReg_tmp;
+
+static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ *pmant = temp.l.lower;
+ *pexp = temp.l.upper;
+}
+
+static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.upper = upper;
+ temp.l.lower = mant;
+ return temp.d;
+}
+
+static int freg_pre_save(void *opaque)
+{
+ m68k_FPReg_tmp *tmp = opaque;
+
+ cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
+
+ return 0;
+}
+
+static int freg_post_load(void *opaque, int version)
+{
+ m68k_FPReg_tmp *tmp = opaque;
+
+ tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_freg_tmp = {
+ .name = "freg_tmp",
+ .post_load = freg_post_load,
+ .pre_save = freg_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tmp_mant, m68k_FPReg_tmp),
+ VMSTATE_UINT16(tmp_exp, m68k_FPReg_tmp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_freg = {
+ .name = "freg",
+ .fields = (VMStateField[]) {
+ VMSTATE_WITH_TMP(FPReg, m68k_FPReg_tmp, vmstate_freg_tmp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int fpu_post_load(void *opaque, int version)
+{
+ M68kCPU *s = opaque;
+
+ cpu_m68k_restore_fp_status(&s->env);
+
+ return 0;
+}
+
+const VMStateDescription vmmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpu_needed,
+ .post_load = fpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.fpcr, M68kCPU),
+ VMSTATE_UINT32(env.fpsr, M68kCPU),
+ VMSTATE_STRUCT_ARRAY(env.fregs, M68kCPU, 8, 0, vmstate_freg, FPReg),
+ VMSTATE_STRUCT(env.fp_result, M68kCPU, 0, vmstate_freg, FPReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool cf_spregs_needed(void *opaque)
+{
+ M68kCPU *s = opaque;
+
+ return m68k_feature(&s->env, M68K_FEATURE_CF_ISA_A);
+}
+
+const VMStateDescription vmstate_cf_spregs = {
+ .name = "cpu/cf_spregs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cf_spregs_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.macc, M68kCPU, 4),
+ VMSTATE_UINT32(env.macsr, M68kCPU),
+ VMSTATE_UINT32(env.mac_mask, M68kCPU),
+ VMSTATE_UINT32(env.rambar0, M68kCPU),
+ VMSTATE_UINT32(env.mbar, M68kCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool cpu_68040_mmu_needed(void *opaque)
+{
+ M68kCPU *s = opaque;
+
+ return m68k_feature(&s->env, M68K_FEATURE_M68040);
+}
+
+const VMStateDescription vmstate_68040_mmu = {
+ .name = "cpu/68040_mmu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_68040_mmu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.mmu.ar, M68kCPU),
+ VMSTATE_UINT32(env.mmu.ssw, M68kCPU),
+ VMSTATE_UINT16(env.mmu.tcr, M68kCPU),
+ VMSTATE_UINT32(env.mmu.urp, M68kCPU),
+ VMSTATE_UINT32(env.mmu.srp, M68kCPU),
+ VMSTATE_BOOL(env.mmu.fault, M68kCPU),
+ VMSTATE_UINT32_ARRAY(env.mmu.ttr, M68kCPU, 4),
+ VMSTATE_UINT32(env.mmu.mmusr, M68kCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool cpu_68040_spregs_needed(void *opaque)
+{
+ M68kCPU *s = opaque;
+
+ return m68k_feature(&s->env, M68K_FEATURE_M68040);
+}
+
+const VMStateDescription vmstate_68040_spregs = {
+ .name = "cpu/68040_spregs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = cpu_68040_spregs_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.vbr, M68kCPU),
+ VMSTATE_UINT32(env.cacr, M68kCPU),
+ VMSTATE_UINT32(env.sfc, M68kCPU),
+ VMSTATE_UINT32(env.dfc, M68kCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_m68k_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.dregs, M68kCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.aregs, M68kCPU, 8),
+ VMSTATE_UINT32(env.pc, M68kCPU),
+ VMSTATE_UINT32(env.sr, M68kCPU),
+ VMSTATE_INT32(env.current_sp, M68kCPU),
+ VMSTATE_UINT32_ARRAY(env.sp, M68kCPU, 3),
+ VMSTATE_UINT32(env.cc_op, M68kCPU),
+ VMSTATE_UINT32(env.cc_x, M68kCPU),
+ VMSTATE_UINT32(env.cc_n, M68kCPU),
+ VMSTATE_UINT32(env.cc_v, M68kCPU),
+ VMSTATE_UINT32(env.cc_c, M68kCPU),
+ VMSTATE_UINT32(env.cc_z, M68kCPU),
+ VMSTATE_INT32(env.pending_vector, M68kCPU),
+ VMSTATE_INT32(env.pending_level, M68kCPU),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmmstate_fpu,
+ &vmstate_cf_spregs,
+ &vmstate_68040_mmu,
+ &vmstate_68040_spregs,
+ NULL
+ },
+};
+#endif
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps m68k_sysemu_ops = {
+ .get_phys_page_debug = m68k_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps m68k_tcg_ops = {
+ .initialize = m68k_tcg_init,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = m68k_cpu_tlb_fill,
+ .cpu_exec_interrupt = m68k_cpu_exec_interrupt,
+ .do_interrupt = m68k_cpu_do_interrupt,
+ .do_transaction_failed = m68k_cpu_transaction_failed,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void m68k_cpu_class_init(ObjectClass *c, void *data)
+{
+ M68kCPUClass *mcc = M68K_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ device_class_set_parent_realize(dc, m68k_cpu_realizefn,
+ &mcc->parent_realize);
+ device_class_set_parent_reset(dc, m68k_cpu_reset, &mcc->parent_reset);
+
+ cc->class_by_name = m68k_cpu_class_by_name;
+ cc->has_work = m68k_cpu_has_work;
+ cc->dump_state = m68k_cpu_dump_state;
+ cc->set_pc = m68k_cpu_set_pc;
+ cc->gdb_read_register = m68k_cpu_gdb_read_register;
+ cc->gdb_write_register = m68k_cpu_gdb_write_register;
+#if defined(CONFIG_SOFTMMU)
+ dc->vmsd = &vmstate_m68k_cpu;
+ cc->sysemu_ops = &m68k_sysemu_ops;
+#endif
+ cc->disas_set_info = m68k_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 18;
+ cc->tcg_ops = &m68k_tcg_ops;
+}
+
+static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data)
+{
+ CPUClass *cc = CPU_CLASS(c);
+
+ cc->gdb_core_xml_file = "cf-core.xml";
+}
+
+#define DEFINE_M68K_CPU_TYPE_CF(model) \
+ { \
+ .name = M68K_CPU_TYPE_NAME(#model), \
+ .instance_init = model##_cpu_initfn, \
+ .parent = TYPE_M68K_CPU, \
+ .class_init = m68k_cpu_class_init_cf_core \
+ }
+
+static void m68k_cpu_class_init_m68k_core(ObjectClass *c, void *data)
+{
+ CPUClass *cc = CPU_CLASS(c);
+
+ cc->gdb_core_xml_file = "m68k-core.xml";
+}
+
+#define DEFINE_M68K_CPU_TYPE_M68K(model) \
+ { \
+ .name = M68K_CPU_TYPE_NAME(#model), \
+ .instance_init = model##_cpu_initfn, \
+ .parent = TYPE_M68K_CPU, \
+ .class_init = m68k_cpu_class_init_m68k_core \
+ }
+
+static const TypeInfo m68k_cpus_type_infos[] = {
+ { /* base class should be registered first */
+ .name = TYPE_M68K_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(M68kCPU),
+ .instance_init = m68k_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(M68kCPUClass),
+ .class_init = m68k_cpu_class_init,
+ },
+ DEFINE_M68K_CPU_TYPE_M68K(m68000),
+ DEFINE_M68K_CPU_TYPE_M68K(m68010),
+ DEFINE_M68K_CPU_TYPE_M68K(m68020),
+ DEFINE_M68K_CPU_TYPE_M68K(m68030),
+ DEFINE_M68K_CPU_TYPE_M68K(m68040),
+ DEFINE_M68K_CPU_TYPE_M68K(m68060),
+ DEFINE_M68K_CPU_TYPE_CF(m5206),
+ DEFINE_M68K_CPU_TYPE_CF(m5208),
+ DEFINE_M68K_CPU_TYPE_CF(cfv4e),
+ DEFINE_M68K_CPU_TYPE_CF(any),
+};
+
+DEFINE_TYPES(m68k_cpus_type_infos)
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
new file mode 100644
index 000000000..a3423729e
--- /dev/null
+++ b/target/m68k/cpu.h
@@ -0,0 +1,611 @@
+/*
+ * m68k virtual CPU header
+ *
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef M68K_CPU_H
+#define M68K_CPU_H
+
+#include "exec/cpu-defs.h"
+#include "cpu-qom.h"
+
+#define OS_BYTE 0
+#define OS_WORD 1
+#define OS_LONG 2
+#define OS_SINGLE 3
+#define OS_DOUBLE 4
+#define OS_EXTENDED 5
+#define OS_PACKED 6
+#define OS_UNSIZED 7
+
+#define EXCP_ACCESS 2 /* Access (MMU) error. */
+#define EXCP_ADDRESS 3 /* Address error. */
+#define EXCP_ILLEGAL 4 /* Illegal instruction. */
+#define EXCP_DIV0 5 /* Divide by zero */
+#define EXCP_CHK 6 /* CHK, CHK2 Instructions */
+#define EXCP_TRAPCC 7 /* FTRAPcc, TRAPcc, TRAPV Instructions */
+#define EXCP_PRIVILEGE 8 /* Privilege violation. */
+#define EXCP_TRACE 9
+#define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */
+#define EXCP_LINEF 11 /* Unimplemented line-F (FPU) opcode. */
+#define EXCP_DEBUGNBP 12 /* Non-breakpoint debug interrupt. */
+#define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */
+#define EXCP_FORMAT 14 /* RTE format error. */
+#define EXCP_UNINITIALIZED 15
+#define EXCP_SPURIOUS 24 /* Spurious interrupt */
+#define EXCP_INT_LEVEL_1 25 /* Level 1 Interrupt autovector */
+#define EXCP_INT_LEVEL_7 31 /* Level 7 Interrupt autovector */
+#define EXCP_TRAP0 32 /* User trap #0. */
+#define EXCP_TRAP15 47 /* User trap #15. */
+#define EXCP_FP_BSUN 48 /* Branch Set on Unordered */
+#define EXCP_FP_INEX 49 /* Inexact result */
+#define EXCP_FP_DZ 50 /* Divide by Zero */
+#define EXCP_FP_UNFL 51 /* Underflow */
+#define EXCP_FP_OPERR 52 /* Operand Error */
+#define EXCP_FP_OVFL 53 /* Overflow */
+#define EXCP_FP_SNAN 54 /* Signaling Not-A-Number */
+#define EXCP_FP_UNIMP 55 /* Unimplemented Data type */
+#define EXCP_MMU_CONF 56 /* MMU Configuration Error */
+#define EXCP_MMU_ILLEGAL 57 /* MMU Illegal Operation Error */
+#define EXCP_MMU_ACCESS 58 /* MMU Access Level Violation Error */
+
+#define EXCP_RTE 0x100
+#define EXCP_HALT_INSN 0x101
+
+#define M68K_DTTR0 0
+#define M68K_DTTR1 1
+#define M68K_ITTR0 2
+#define M68K_ITTR1 3
+
+#define M68K_MAX_TTR 2
+#define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index]
+
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+typedef CPU_LDoubleU FPReg;
+
+typedef struct CPUM68KState {
+ uint32_t dregs[8];
+ uint32_t aregs[8];
+ uint32_t pc;
+ uint32_t sr;
+
+ /*
+ * The 68020/30/40 support two supervisor stacks, ISP and MSP.
+ * The 68000/10, Coldfire, and CPU32 only have USP/SSP.
+ *
+ * The current_sp is stored in aregs[7], the other here.
+ * The USP, SSP, and if used the additional ISP for 68020/30/40.
+ */
+ int current_sp;
+ uint32_t sp[3];
+
+ /* Condition flags. */
+ uint32_t cc_op;
+ uint32_t cc_x; /* always 0/1 */
+ uint32_t cc_n; /* in bit 31 (i.e. negative) */
+ uint32_t cc_v; /* in bit 31, unused, or computed from cc_n and cc_v */
+ uint32_t cc_c; /* either 0/1, unused, or computed from cc_n and cc_v */
+ uint32_t cc_z; /* == 0 or unused */
+
+ FPReg fregs[8];
+ FPReg fp_result;
+ uint32_t fpcr;
+ uint32_t fpsr;
+ float_status fp_status;
+
+ uint64_t mactmp;
+ /*
+ * EMAC Hardware deals with 48-bit values composed of one 32-bit and
+ * two 8-bit parts. We store a single 64-bit value and
+ * rearrange/extend this when changing modes.
+ */
+ uint64_t macc[4];
+ uint32_t macsr;
+ uint32_t mac_mask;
+
+ /* MMU status. */
+ struct {
+ uint32_t ar;
+ uint32_t ssw;
+ /* 68040 */
+ uint16_t tcr;
+ uint32_t urp;
+ uint32_t srp;
+ bool fault;
+ uint32_t ttr[4];
+ uint32_t mmusr;
+ } mmu;
+
+ /* Control registers. */
+ uint32_t vbr;
+ uint32_t mbar;
+ uint32_t rambar0;
+ uint32_t cacr;
+ uint32_t sfc;
+ uint32_t dfc;
+
+ int pending_vector;
+ int pending_level;
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields from here on are preserved across CPU reset. */
+ uint32_t features;
+} CPUM68KState;
+
+/*
+ * M68kCPU:
+ * @env: #CPUM68KState
+ *
+ * A Motorola 68k CPU.
+ */
+struct M68kCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUM68KState env;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+void m68k_cpu_do_interrupt(CPUState *cpu);
+bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
+void m68k_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void m68k_tcg_init(void);
+void m68k_cpu_init_gdb(M68kCPU *cpu);
+uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
+void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
+void cpu_m68k_set_sr(CPUM68KState *env, uint32_t);
+void cpu_m68k_restore_fp_status(CPUM68KState *env);
+void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val);
+
+
+/*
+ * Instead of computing the condition codes after each m68k instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DEST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+typedef enum {
+ /* Translator only -- use env->cc_op. */
+ CC_OP_DYNAMIC,
+
+ /* Each flag bit computed into cc_[xcnvz]. */
+ CC_OP_FLAGS,
+
+ /* X in cc_x, C = X, N in cc_n, Z in cc_n, V via cc_n/cc_v. */
+ CC_OP_ADDB, CC_OP_ADDW, CC_OP_ADDL,
+ CC_OP_SUBB, CC_OP_SUBW, CC_OP_SUBL,
+
+ /* X in cc_x, {N,Z,C,V} via cc_n/cc_v. */
+ CC_OP_CMPB, CC_OP_CMPW, CC_OP_CMPL,
+
+ /* X in cc_x, C = 0, V = 0, N in cc_n, Z in cc_n. */
+ CC_OP_LOGIC,
+
+ CC_OP_NB
+} CCOp;
+
+#define CCF_C 0x01
+#define CCF_V 0x02
+#define CCF_Z 0x04
+#define CCF_N 0x08
+#define CCF_X 0x10
+
+#define SR_I_SHIFT 8
+#define SR_I 0x0700
+#define SR_M 0x1000
+#define SR_S 0x2000
+#define SR_T_SHIFT 14
+#define SR_T 0xc000
+
+#define M68K_SR_TRACE(sr) ((sr & SR_T) >> SR_T_SHIFT)
+#define M68K_SR_TRACE_ANY_INS 0x2
+
+#define M68K_SSP 0
+#define M68K_USP 1
+#define M68K_ISP 2
+
+/* bits for 68040 special status word */
+#define M68K_CP_040 0x8000
+#define M68K_CU_040 0x4000
+#define M68K_CT_040 0x2000
+#define M68K_CM_040 0x1000
+#define M68K_MA_040 0x0800
+#define M68K_ATC_040 0x0400
+#define M68K_LK_040 0x0200
+#define M68K_RW_040 0x0100
+#define M68K_SIZ_040 0x0060
+#define M68K_TT_040 0x0018
+#define M68K_TM_040 0x0007
+
+#define M68K_TM_040_DATA 0x0001
+#define M68K_TM_040_CODE 0x0002
+#define M68K_TM_040_SUPER 0x0004
+
+/* bits for 68040 write back status word */
+#define M68K_WBV_040 0x80
+#define M68K_WBSIZ_040 0x60
+#define M68K_WBBYT_040 0x20
+#define M68K_WBWRD_040 0x40
+#define M68K_WBLNG_040 0x00
+#define M68K_WBTT_040 0x18
+#define M68K_WBTM_040 0x07
+
+/* bus access size codes */
+#define M68K_BA_SIZE_MASK 0x60
+#define M68K_BA_SIZE_BYTE 0x20
+#define M68K_BA_SIZE_WORD 0x40
+#define M68K_BA_SIZE_LONG 0x00
+#define M68K_BA_SIZE_LINE 0x60
+
+/* bus access transfer type codes */
+#define M68K_BA_TT_MOVE16 0x08
+
+/* bits for 68040 MMU status register (mmusr) */
+#define M68K_MMU_B_040 0x0800
+#define M68K_MMU_G_040 0x0400
+#define M68K_MMU_U1_040 0x0200
+#define M68K_MMU_U0_040 0x0100
+#define M68K_MMU_S_040 0x0080
+#define M68K_MMU_CM_040 0x0060
+#define M68K_MMU_M_040 0x0010
+#define M68K_MMU_WP_040 0x0004
+#define M68K_MMU_T_040 0x0002
+#define M68K_MMU_R_040 0x0001
+
+#define M68K_MMU_SR_MASK_040 (M68K_MMU_G_040 | M68K_MMU_U1_040 | \
+ M68K_MMU_U0_040 | M68K_MMU_S_040 | \
+ M68K_MMU_CM_040 | M68K_MMU_M_040 | \
+ M68K_MMU_WP_040)
+
+/* bits for 68040 MMU Translation Control Register */
+#define M68K_TCR_ENABLED 0x8000
+#define M68K_TCR_PAGE_8K 0x4000
+
+/* bits for 68040 MMU Table Descriptor / Page Descriptor / TTR */
+#define M68K_DESC_WRITEPROT 0x00000004
+#define M68K_DESC_USED 0x00000008
+#define M68K_DESC_MODIFIED 0x00000010
+#define M68K_DESC_CACHEMODE 0x00000060
+#define M68K_DESC_CM_WRTHRU 0x00000000
+#define M68K_DESC_CM_COPYBK 0x00000020
+#define M68K_DESC_CM_SERIAL 0x00000040
+#define M68K_DESC_CM_NCACHE 0x00000060
+#define M68K_DESC_SUPERONLY 0x00000080
+#define M68K_DESC_USERATTR 0x00000300
+#define M68K_DESC_USERATTR_SHIFT 8
+#define M68K_DESC_GLOBAL 0x00000400
+#define M68K_DESC_URESERVED 0x00000800
+
+#define M68K_ROOT_POINTER_ENTRIES 128
+#define M68K_4K_PAGE_MASK (~0xff)
+#define M68K_POINTER_BASE(entry) (entry & ~0x1ff)
+#define M68K_ROOT_INDEX(addr) ((address >> 23) & 0x1fc)
+#define M68K_POINTER_INDEX(addr) ((address >> 16) & 0x1fc)
+#define M68K_4K_PAGE_BASE(entry) (next & M68K_4K_PAGE_MASK)
+#define M68K_4K_PAGE_INDEX(addr) ((address >> 10) & 0xfc)
+#define M68K_8K_PAGE_MASK (~0x7f)
+#define M68K_8K_PAGE_BASE(entry) (next & M68K_8K_PAGE_MASK)
+#define M68K_8K_PAGE_INDEX(addr) ((address >> 11) & 0x7c)
+#define M68K_UDT_VALID(entry) (entry & 2)
+#define M68K_PDT_VALID(entry) (entry & 3)
+#define M68K_PDT_INDIRECT(entry) ((entry & 3) == 2)
+#define M68K_INDIRECT_POINTER(addr) (addr & ~3)
+#define M68K_TTS_POINTER_SHIFT 18
+#define M68K_TTS_ROOT_SHIFT 25
+
+/* bits for 68040 MMU Transparent Translation Registers */
+#define M68K_TTR_ADDR_BASE 0xff000000
+#define M68K_TTR_ADDR_MASK 0x00ff0000
+#define M68K_TTR_ADDR_MASK_SHIFT 8
+#define M68K_TTR_ENABLED 0x00008000
+#define M68K_TTR_SFIELD 0x00006000
+#define M68K_TTR_SFIELD_USER 0x0000
+#define M68K_TTR_SFIELD_SUPER 0x2000
+
+/* m68k Control Registers */
+
+/* ColdFire */
+/* Memory Management Control Registers */
+#define M68K_CR_ASID 0x003
+#define M68K_CR_ACR0 0x004
+#define M68K_CR_ACR1 0x005
+#define M68K_CR_ACR2 0x006
+#define M68K_CR_ACR3 0x007
+#define M68K_CR_MMUBAR 0x008
+
+/* Processor Miscellaneous Registers */
+#define M68K_CR_PC 0x80F
+
+/* Local Memory and Module Control Registers */
+#define M68K_CR_ROMBAR0 0xC00
+#define M68K_CR_ROMBAR1 0xC01
+#define M68K_CR_RAMBAR0 0xC04
+#define M68K_CR_RAMBAR1 0xC05
+#define M68K_CR_MPCR 0xC0C
+#define M68K_CR_EDRAMBAR 0xC0D
+#define M68K_CR_SECMBAR 0xC0E
+#define M68K_CR_MBAR 0xC0F
+
+/* Local Memory Address Permutation Control Registers */
+#define M68K_CR_PCR1U0 0xD02
+#define M68K_CR_PCR1L0 0xD03
+#define M68K_CR_PCR2U0 0xD04
+#define M68K_CR_PCR2L0 0xD05
+#define M68K_CR_PCR3U0 0xD06
+#define M68K_CR_PCR3L0 0xD07
+#define M68K_CR_PCR1U1 0xD0A
+#define M68K_CR_PCR1L1 0xD0B
+#define M68K_CR_PCR2U1 0xD0C
+#define M68K_CR_PCR2L1 0xD0D
+#define M68K_CR_PCR3U1 0xD0E
+#define M68K_CR_PCR3L1 0xD0F
+
+/* MC680x0 */
+/* MC680[1234]0/CPU32 */
+#define M68K_CR_SFC 0x000
+#define M68K_CR_DFC 0x001
+#define M68K_CR_USP 0x800
+#define M68K_CR_VBR 0x801 /* + Coldfire */
+
+/* MC680[234]0 */
+#define M68K_CR_CACR 0x002 /* + Coldfire */
+#define M68K_CR_CAAR 0x802 /* MC68020 and MC68030 only */
+#define M68K_CR_MSP 0x803
+#define M68K_CR_ISP 0x804
+
+/* MC68040/MC68LC040 */
+#define M68K_CR_TC 0x003
+#define M68K_CR_ITT0 0x004
+#define M68K_CR_ITT1 0x005
+#define M68K_CR_DTT0 0x006
+#define M68K_CR_DTT1 0x007
+#define M68K_CR_MMUSR 0x805
+#define M68K_CR_URP 0x806
+#define M68K_CR_SRP 0x807
+
+/* MC68EC040 */
+#define M68K_CR_IACR0 0x004
+#define M68K_CR_IACR1 0x005
+#define M68K_CR_DACR0 0x006
+#define M68K_CR_DACR1 0x007
+
+/* MC68060 */
+#define M68K_CR_BUSCR 0x008
+#define M68K_CR_PCR 0x808
+
+#define M68K_FPIAR_SHIFT 0
+#define M68K_FPIAR (1 << M68K_FPIAR_SHIFT)
+#define M68K_FPSR_SHIFT 1
+#define M68K_FPSR (1 << M68K_FPSR_SHIFT)
+#define M68K_FPCR_SHIFT 2
+#define M68K_FPCR (1 << M68K_FPCR_SHIFT)
+
+/* Floating-Point Status Register */
+
+/* Condition Code */
+#define FPSR_CC_MASK 0x0f000000
+#define FPSR_CC_A 0x01000000 /* Not-A-Number */
+#define FPSR_CC_I 0x02000000 /* Infinity */
+#define FPSR_CC_Z 0x04000000 /* Zero */
+#define FPSR_CC_N 0x08000000 /* Negative */
+
+/* Quotient */
+
+#define FPSR_QT_MASK 0x00ff0000
+#define FPSR_QT_SHIFT 16
+
+/* Floating-Point Control Register */
+/* Rounding mode */
+#define FPCR_RND_MASK 0x0030
+#define FPCR_RND_N 0x0000
+#define FPCR_RND_Z 0x0010
+#define FPCR_RND_M 0x0020
+#define FPCR_RND_P 0x0030
+
+/* Rounding precision */
+#define FPCR_PREC_MASK 0x00c0
+#define FPCR_PREC_X 0x0000
+#define FPCR_PREC_S 0x0040
+#define FPCR_PREC_D 0x0080
+#define FPCR_PREC_U 0x00c0
+
+#define FPCR_EXCP_MASK 0xff00
+
+/* CACR fields are implementation defined, but some bits are common. */
+#define M68K_CACR_EUSP 0x10
+
+#define MACSR_PAV0 0x100
+#define MACSR_OMC 0x080
+#define MACSR_SU 0x040
+#define MACSR_FI 0x020
+#define MACSR_RT 0x010
+#define MACSR_N 0x008
+#define MACSR_Z 0x004
+#define MACSR_V 0x002
+#define MACSR_EV 0x001
+
+void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector);
+void m68k_switch_sp(CPUM68KState *env);
+
+void do_m68k_semihosting(CPUM68KState *env, int nr);
+
+/*
+ * The 68000 family is defined in six main CPU classes, the 680[012346]0.
+ * Generally each successive CPU adds enhanced data/stack/instructions.
+ * However, some features are only common to one, or a few classes.
+ * The features covers those subsets of instructons.
+ *
+ * CPU32/32+ are basically 680010 compatible with some 68020 class instructons,
+ * and some additional CPU32 instructions. Mostly Supervisor state differences.
+ *
+ * The ColdFire core ISA is a RISC-style reduction of the 68000 series cpu.
+ * There are 4 ColdFire core ISA revisions: A, A+, B and C.
+ * Each feature covers the subset of instructions common to the
+ * ISA revisions mentioned.
+ */
+
+enum m68k_features {
+ /* Base m68k instruction set */
+ M68K_FEATURE_M68000,
+ M68K_FEATURE_M68010,
+ M68K_FEATURE_M68020,
+ M68K_FEATURE_M68030,
+ M68K_FEATURE_M68040,
+ M68K_FEATURE_M68060,
+ /* Base Coldfire set Rev A. */
+ M68K_FEATURE_CF_ISA_A,
+ /* (ISA B or C). */
+ M68K_FEATURE_CF_ISA_B,
+ /* BIT/BITREV, FF1, STRLDSR (ISA A+ or C). */
+ M68K_FEATURE_CF_ISA_APLUSC,
+ /* BRA with Long branch. (680[2346]0, ISA A+ or B). */
+ M68K_FEATURE_BRAL,
+ M68K_FEATURE_CF_FPU,
+ M68K_FEATURE_CF_MAC,
+ M68K_FEATURE_CF_EMAC,
+ /* Revision B EMAC (dual accumulate). */
+ M68K_FEATURE_CF_EMAC_B,
+ /* User Stack Pointer. (680[012346]0, ISA A+, B or C). */
+ M68K_FEATURE_USP,
+ /* Master Stack Pointer. (680[234]0) */
+ M68K_FEATURE_MSP,
+ /* 68020+ full extension word. */
+ M68K_FEATURE_EXT_FULL,
+ /* word sized address index registers. */
+ M68K_FEATURE_WORD_INDEX,
+ /* scaled address index registers. */
+ M68K_FEATURE_SCALED_INDEX,
+ /* 32 bit mul/div. (680[2346]0, and CPU32) */
+ M68K_FEATURE_LONG_MULDIV,
+ /* 64 bit mul/div. (680[2346]0, and CPU32) */
+ M68K_FEATURE_QUAD_MULDIV,
+ /* Bcc with Long branches. (680[2346]0, and CPU32) */
+ M68K_FEATURE_BCCL,
+ /* BFxxx Bit field insns. (680[2346]0) */
+ M68K_FEATURE_BITFIELD,
+ /* fpu insn. (680[46]0) */
+ M68K_FEATURE_FPU,
+ /* CAS/CAS2[WL] insns. (680[2346]0) */
+ M68K_FEATURE_CAS,
+ /* BKPT insn. (680[12346]0, and CPU32) */
+ M68K_FEATURE_BKPT,
+ /* RTD insn. (680[12346]0, and CPU32) */
+ M68K_FEATURE_RTD,
+ /* CHK2 insn. (680[2346]0, and CPU32) */
+ M68K_FEATURE_CHK2,
+ /* MOVEP insn. (680[01234]0, and CPU32) */
+ M68K_FEATURE_MOVEP,
+ /* MOVEC insn. (from 68010) */
+ M68K_FEATURE_MOVEC,
+ /* Unaligned data accesses (680[2346]0) */
+ M68K_FEATURE_UNALIGNED_DATA,
+};
+
+static inline int m68k_feature(CPUM68KState *env, int feature)
+{
+ return (env->features & (1u << feature)) != 0;
+}
+
+void m68k_cpu_list(void);
+
+void register_m68k_insns (CPUM68KState *env);
+
+enum {
+ /* 1 bit to define user level / supervisor access */
+ ACCESS_SUPER = 0x01,
+ /* 1 bit to indicate direction */
+ ACCESS_STORE = 0x02,
+ /* 1 bit to indicate debug access */
+ ACCESS_DEBUG = 0x04,
+ /* PTEST instruction */
+ ACCESS_PTEST = 0x08,
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_DATA = 0x20, /* Data load/store access */
+};
+
+#define M68K_CPU_TYPE_SUFFIX "-" TYPE_M68K_CPU
+#define M68K_CPU_TYPE_NAME(model) model M68K_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_M68K_CPU
+
+#define cpu_list m68k_cpu_list
+
+/* MMU modes definitions */
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
+{
+ return (env->sr & SR_S) == 0 ? 1 : 0;
+}
+
+bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+
+typedef CPUM68KState CPUArchState;
+typedef M68kCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+/* TB flags */
+#define TB_FLAGS_MACSR 0x0f
+#define TB_FLAGS_MSR_S_BIT 13
+#define TB_FLAGS_MSR_S (1 << TB_FLAGS_MSR_S_BIT)
+#define TB_FLAGS_SFC_S_BIT 14
+#define TB_FLAGS_SFC_S (1 << TB_FLAGS_SFC_S_BIT)
+#define TB_FLAGS_DFC_S_BIT 15
+#define TB_FLAGS_DFC_S (1 << TB_FLAGS_DFC_S_BIT)
+#define TB_FLAGS_TRACE 16
+#define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE)
+
+static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = (env->macsr >> 4) & TB_FLAGS_MACSR;
+ if (env->sr & SR_S) {
+ *flags |= TB_FLAGS_MSR_S;
+ *flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S;
+ *flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S;
+ }
+ if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) {
+ *flags |= TB_FLAGS_TRACE;
+ }
+}
+
+void dump_mmu(CPUM68KState *env);
+
+#endif
diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c
new file mode 100644
index 000000000..fdc4937e2
--- /dev/null
+++ b/target/m68k/fpu_helper.c
@@ -0,0 +1,666 @@
+/*
+ * m68k FPU helpers
+ *
+ * Copyright (c) 2006-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "softfloat.h"
+
+/*
+ * Undefined offsets may be different on various FPU.
+ * On 68040 they return 0.0 (floatx80_zero)
+ */
+
+static const floatx80 fpu_rom[128] = {
+ [0x00] = make_floatx80_init(0x4000, 0xc90fdaa22168c235ULL), /* Pi */
+ [0x0b] = make_floatx80_init(0x3ffd, 0x9a209a84fbcff798ULL), /* Log10(2) */
+ [0x0c] = make_floatx80_init(0x4000, 0xadf85458a2bb4a9aULL), /* e */
+ [0x0d] = make_floatx80_init(0x3fff, 0xb8aa3b295c17f0bcULL), /* Log2(e) */
+ [0x0e] = make_floatx80_init(0x3ffd, 0xde5bd8a937287195ULL), /* Log10(e) */
+ [0x0f] = make_floatx80_init(0x0000, 0x0000000000000000ULL), /* Zero */
+ [0x30] = make_floatx80_init(0x3ffe, 0xb17217f7d1cf79acULL), /* ln(2) */
+ [0x31] = make_floatx80_init(0x4000, 0x935d8dddaaa8ac17ULL), /* ln(10) */
+ [0x32] = make_floatx80_init(0x3fff, 0x8000000000000000ULL), /* 10^0 */
+ [0x33] = make_floatx80_init(0x4002, 0xa000000000000000ULL), /* 10^1 */
+ [0x34] = make_floatx80_init(0x4005, 0xc800000000000000ULL), /* 10^2 */
+ [0x35] = make_floatx80_init(0x400c, 0x9c40000000000000ULL), /* 10^4 */
+ [0x36] = make_floatx80_init(0x4019, 0xbebc200000000000ULL), /* 10^8 */
+ [0x37] = make_floatx80_init(0x4034, 0x8e1bc9bf04000000ULL), /* 10^16 */
+ [0x38] = make_floatx80_init(0x4069, 0x9dc5ada82b70b59eULL), /* 10^32 */
+ [0x39] = make_floatx80_init(0x40d3, 0xc2781f49ffcfa6d5ULL), /* 10^64 */
+ [0x3a] = make_floatx80_init(0x41a8, 0x93ba47c980e98ce0ULL), /* 10^128 */
+ [0x3b] = make_floatx80_init(0x4351, 0xaa7eebfb9df9de8eULL), /* 10^256 */
+ [0x3c] = make_floatx80_init(0x46a3, 0xe319a0aea60e91c7ULL), /* 10^512 */
+ [0x3d] = make_floatx80_init(0x4d48, 0xc976758681750c17ULL), /* 10^1024 */
+ [0x3e] = make_floatx80_init(0x5a92, 0x9e8b3b5dc53d5de5ULL), /* 10^2048 */
+ [0x3f] = make_floatx80_init(0x7525, 0xc46052028a20979bULL), /* 10^4096 */
+};
+
+int32_t HELPER(reds32)(CPUM68KState *env, FPReg *val)
+{
+ return floatx80_to_int32(val->d, &env->fp_status);
+}
+
+float32 HELPER(redf32)(CPUM68KState *env, FPReg *val)
+{
+ return floatx80_to_float32(val->d, &env->fp_status);
+}
+
+void HELPER(exts32)(CPUM68KState *env, FPReg *res, int32_t val)
+{
+ res->d = int32_to_floatx80(val, &env->fp_status);
+}
+
+void HELPER(extf32)(CPUM68KState *env, FPReg *res, float32 val)
+{
+ res->d = float32_to_floatx80(val, &env->fp_status);
+}
+
+void HELPER(extf64)(CPUM68KState *env, FPReg *res, float64 val)
+{
+ res->d = float64_to_floatx80(val, &env->fp_status);
+}
+
+float64 HELPER(redf64)(CPUM68KState *env, FPReg *val)
+{
+ return floatx80_to_float64(val->d, &env->fp_status);
+}
+
+void HELPER(firound)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_round_to_int(val->d, &env->fp_status);
+}
+
+static void m68k_restore_precision_mode(CPUM68KState *env)
+{
+ switch (env->fpcr & FPCR_PREC_MASK) {
+ case FPCR_PREC_X: /* extended */
+ set_floatx80_rounding_precision(floatx80_precision_x, &env->fp_status);
+ break;
+ case FPCR_PREC_S: /* single */
+ set_floatx80_rounding_precision(floatx80_precision_s, &env->fp_status);
+ break;
+ case FPCR_PREC_D: /* double */
+ set_floatx80_rounding_precision(floatx80_precision_d, &env->fp_status);
+ break;
+ case FPCR_PREC_U: /* undefined */
+ default:
+ break;
+ }
+}
+
+static void cf_restore_precision_mode(CPUM68KState *env)
+{
+ if (env->fpcr & FPCR_PREC_S) { /* single */
+ set_floatx80_rounding_precision(floatx80_precision_s, &env->fp_status);
+ } else { /* double */
+ set_floatx80_rounding_precision(floatx80_precision_d, &env->fp_status);
+ }
+}
+
+static void restore_rounding_mode(CPUM68KState *env)
+{
+ switch (env->fpcr & FPCR_RND_MASK) {
+ case FPCR_RND_N: /* round to nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ break;
+ case FPCR_RND_Z: /* round to zero */
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ break;
+ case FPCR_RND_M: /* round toward minus infinity */
+ set_float_rounding_mode(float_round_down, &env->fp_status);
+ break;
+ case FPCR_RND_P: /* round toward positive infinity */
+ set_float_rounding_mode(float_round_up, &env->fp_status);
+ break;
+ }
+}
+
+void cpu_m68k_restore_fp_status(CPUM68KState *env)
+{
+ if (m68k_feature(env, M68K_FEATURE_CF_FPU)) {
+ cf_restore_precision_mode(env);
+ } else {
+ m68k_restore_precision_mode(env);
+ }
+ restore_rounding_mode(env);
+}
+
+void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val)
+{
+ env->fpcr = val & 0xffff;
+ cpu_m68k_restore_fp_status(env);
+}
+
+void HELPER(fitrunc)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ FloatRoundMode rounding_mode = get_float_rounding_mode(&env->fp_status);
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ res->d = floatx80_round_to_int(val->d, &env->fp_status);
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+}
+
+void HELPER(set_fpcr)(CPUM68KState *env, uint32_t val)
+{
+ cpu_m68k_set_fpcr(env, val);
+}
+
+#define PREC_BEGIN(prec) \
+ do { \
+ FloatX80RoundPrec old = \
+ get_floatx80_rounding_precision(&env->fp_status); \
+ set_floatx80_rounding_precision(prec, &env->fp_status) \
+
+#define PREC_END() \
+ set_floatx80_rounding_precision(old, &env->fp_status); \
+ } while (0)
+
+void HELPER(fsround)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_round(val->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdround)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_round(val->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fsqrt)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_sqrt(val->d, &env->fp_status);
+}
+
+void HELPER(fssqrt)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_sqrt(val->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdsqrt)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_sqrt(val->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fabs)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status);
+}
+
+void HELPER(fsabs)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdabs)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_round(floatx80_abs(val->d), &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fneg)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status);
+}
+
+void HELPER(fsneg)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdneg)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_round(floatx80_chs(val->d), &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_add(val0->d, val1->d, &env->fp_status);
+}
+
+void HELPER(fsadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_add(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdadd)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_add(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_sub(val1->d, val0->d, &env->fp_status);
+}
+
+void HELPER(fssub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_sub(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdsub)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_sub(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_mul(val0->d, val1->d, &env->fp_status);
+}
+
+void HELPER(fsmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_mul(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_mul(val0->d, val1->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fsglmul)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ FloatRoundMode rounding_mode = get_float_rounding_mode(&env->fp_status);
+ floatx80 a, b;
+
+ PREC_BEGIN(floatx80_precision_s);
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ a = floatx80_round(val0->d, &env->fp_status);
+ b = floatx80_round(val1->d, &env->fp_status);
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ res->d = floatx80_mul(a, b, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_div(val1->d, val0->d, &env->fp_status);
+}
+
+void HELPER(fsdiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_s);
+ res->d = floatx80_div(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fddiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ PREC_BEGIN(floatx80_precision_d);
+ res->d = floatx80_div(val1->d, val0->d, &env->fp_status);
+ PREC_END();
+}
+
+void HELPER(fsgldiv)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ FloatRoundMode rounding_mode = get_float_rounding_mode(&env->fp_status);
+ floatx80 a, b;
+
+ PREC_BEGIN(floatx80_precision_s);
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ a = floatx80_round(val1->d, &env->fp_status);
+ b = floatx80_round(val0->d, &env->fp_status);
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ res->d = floatx80_div(a, b, &env->fp_status);
+ PREC_END();
+}
+
+static int float_comp_to_cc(int float_compare)
+{
+ switch (float_compare) {
+ case float_relation_equal:
+ return FPSR_CC_Z;
+ case float_relation_less:
+ return FPSR_CC_N;
+ case float_relation_unordered:
+ return FPSR_CC_A;
+ case float_relation_greater:
+ return 0;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(fcmp)(CPUM68KState *env, FPReg *val0, FPReg *val1)
+{
+ int float_compare;
+
+ float_compare = floatx80_compare(val1->d, val0->d, &env->fp_status);
+ env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | float_comp_to_cc(float_compare);
+}
+
+void HELPER(ftst)(CPUM68KState *env, FPReg *val)
+{
+ uint32_t cc = 0;
+
+ if (floatx80_is_neg(val->d)) {
+ cc |= FPSR_CC_N;
+ }
+
+ if (floatx80_is_any_nan(val->d)) {
+ cc |= FPSR_CC_A;
+ } else if (floatx80_is_infinity(val->d)) {
+ cc |= FPSR_CC_I;
+ } else if (floatx80_is_zero(val->d)) {
+ cc |= FPSR_CC_Z;
+ }
+ env->fpsr = (env->fpsr & ~FPSR_CC_MASK) | cc;
+}
+
+void HELPER(fconst)(CPUM68KState *env, FPReg *val, uint32_t offset)
+{
+ val->d = fpu_rom[offset];
+}
+
+typedef int (*float_access)(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra);
+
+static uint32_t fmovem_predec(CPUM68KState *env, uint32_t addr, uint32_t mask,
+ float_access access_fn)
+{
+ uintptr_t ra = GETPC();
+ int i, size;
+
+ for (i = 7; i >= 0; i--, mask <<= 1) {
+ if (mask & 0x80) {
+ size = access_fn(env, addr, &env->fregs[i], ra);
+ if ((mask & 0xff) != 0x80) {
+ addr -= size;
+ }
+ }
+ }
+
+ return addr;
+}
+
+static uint32_t fmovem_postinc(CPUM68KState *env, uint32_t addr, uint32_t mask,
+ float_access access_fn)
+{
+ uintptr_t ra = GETPC();
+ int i, size;
+
+ for (i = 0; i < 8; i++, mask <<= 1) {
+ if (mask & 0x80) {
+ size = access_fn(env, addr, &env->fregs[i], ra);
+ addr += size;
+ }
+ }
+
+ return addr;
+}
+
+static int cpu_ld_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ uint32_t high;
+ uint64_t low;
+
+ high = cpu_ldl_data_ra(env, addr, ra);
+ low = cpu_ldq_data_ra(env, addr + 4, ra);
+
+ fp->l.upper = high >> 16;
+ fp->l.lower = low;
+
+ return 12;
+}
+
+static int cpu_st_floatx80_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ cpu_stl_data_ra(env, addr, fp->l.upper << 16, ra);
+ cpu_stq_data_ra(env, addr + 4, fp->l.lower, ra);
+
+ return 12;
+}
+
+static int cpu_ld_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ uint64_t val;
+
+ val = cpu_ldq_data_ra(env, addr, ra);
+ fp->d = float64_to_floatx80(*(float64 *)&val, &env->fp_status);
+
+ return 8;
+}
+
+static int cpu_st_float64_ra(CPUM68KState *env, uint32_t addr, FPReg *fp,
+ uintptr_t ra)
+{
+ float64 val;
+
+ val = floatx80_to_float64(fp->d, &env->fp_status);
+ cpu_stq_data_ra(env, addr, *(uint64_t *)&val, ra);
+
+ return 8;
+}
+
+uint32_t HELPER(fmovemx_st_predec)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_predec(env, addr, mask, cpu_st_floatx80_ra);
+}
+
+uint32_t HELPER(fmovemx_st_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_st_floatx80_ra);
+}
+
+uint32_t HELPER(fmovemx_ld_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_ld_floatx80_ra);
+}
+
+uint32_t HELPER(fmovemd_st_predec)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_predec(env, addr, mask, cpu_st_float64_ra);
+}
+
+uint32_t HELPER(fmovemd_st_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_st_float64_ra);
+}
+
+uint32_t HELPER(fmovemd_ld_postinc)(CPUM68KState *env, uint32_t addr,
+ uint32_t mask)
+{
+ return fmovem_postinc(env, addr, mask, cpu_ld_float64_ra);
+}
+
+static void make_quotient(CPUM68KState *env, floatx80 val)
+{
+ int32_t quotient;
+ int sign;
+
+ if (floatx80_is_any_nan(val)) {
+ return;
+ }
+
+ quotient = floatx80_to_int32(val, &env->fp_status);
+ sign = quotient < 0;
+ if (sign) {
+ quotient = -quotient;
+ }
+
+ quotient = (sign << 7) | (quotient & 0x7f);
+ env->fpsr = (env->fpsr & ~FPSR_QT_MASK) | (quotient << FPSR_QT_SHIFT);
+}
+
+void HELPER(fmod)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_mod(val1->d, val0->d, &env->fp_status);
+
+ make_quotient(env, res->d);
+}
+
+void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_rem(val1->d, val0->d, &env->fp_status);
+
+ make_quotient(env, res->d);
+}
+
+void HELPER(fgetexp)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_getexp(val->d, &env->fp_status);
+}
+
+void HELPER(fgetman)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_getman(val->d, &env->fp_status);
+}
+
+void HELPER(fscale)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1)
+{
+ res->d = floatx80_scale(val1->d, val0->d, &env->fp_status);
+}
+
+void HELPER(flognp1)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_lognp1(val->d, &env->fp_status);
+}
+
+void HELPER(flogn)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_logn(val->d, &env->fp_status);
+}
+
+void HELPER(flog10)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_log10(val->d, &env->fp_status);
+}
+
+void HELPER(flog2)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_log2(val->d, &env->fp_status);
+}
+
+void HELPER(fetox)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_etox(val->d, &env->fp_status);
+}
+
+void HELPER(ftwotox)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_twotox(val->d, &env->fp_status);
+}
+
+void HELPER(ftentox)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_tentox(val->d, &env->fp_status);
+}
+
+void HELPER(ftan)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_tan(val->d, &env->fp_status);
+}
+
+void HELPER(fsin)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_sin(val->d, &env->fp_status);
+}
+
+void HELPER(fcos)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_cos(val->d, &env->fp_status);
+}
+
+void HELPER(fsincos)(CPUM68KState *env, FPReg *res0, FPReg *res1, FPReg *val)
+{
+ floatx80 a = val->d;
+ /*
+ * If res0 and res1 specify the same floating-point data register,
+ * the sine result is stored in the register, and the cosine
+ * result is discarded.
+ */
+ res1->d = floatx80_cos(a, &env->fp_status);
+ res0->d = floatx80_sin(a, &env->fp_status);
+}
+
+void HELPER(fatan)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_atan(val->d, &env->fp_status);
+}
+
+void HELPER(fasin)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_asin(val->d, &env->fp_status);
+}
+
+void HELPER(facos)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_acos(val->d, &env->fp_status);
+}
+
+void HELPER(fatanh)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_atanh(val->d, &env->fp_status);
+}
+
+void HELPER(fetoxm1)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_etoxm1(val->d, &env->fp_status);
+}
+
+void HELPER(ftanh)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_tanh(val->d, &env->fp_status);
+}
+
+void HELPER(fsinh)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_sinh(val->d, &env->fp_status);
+}
+
+void HELPER(fcosh)(CPUM68KState *env, FPReg *res, FPReg *val)
+{
+ res->d = floatx80_cosh(val->d, &env->fp_status);
+}
diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c
new file mode 100644
index 000000000..eb2d030e1
--- /dev/null
+++ b/target/m68k/gdbstub.c
@@ -0,0 +1,78 @@
+/*
+ * m68k gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int m68k_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ if (n < 8) {
+ /* D0-D7 */
+ return gdb_get_reg32(mem_buf, env->dregs[n]);
+ } else if (n < 16) {
+ /* A0-A7 */
+ return gdb_get_reg32(mem_buf, env->aregs[n - 8]);
+ } else {
+ switch (n) {
+ case 16:
+ /* SR is made of SR+CCR, CCR is many 1bit flags so uses helper */
+ return gdb_get_reg32(mem_buf, env->sr | cpu_m68k_get_ccr(env));
+ case 17:
+ return gdb_get_reg32(mem_buf, env->pc);
+ }
+ }
+ /*
+ * FP registers not included here because they vary between
+ * ColdFire and m68k. Use XML bits for these.
+ */
+ return 0;
+}
+
+int m68k_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+ uint32_t tmp;
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 8) {
+ /* D0-D7 */
+ env->dregs[n] = tmp;
+ } else if (n < 16) {
+ /* A0-A7 */
+ env->aregs[n - 8] = tmp;
+ } else {
+ switch (n) {
+ case 16:
+ cpu_m68k_set_sr(env, tmp);
+ break;
+ case 17:
+ env->pc = tmp;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 4;
+}
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
new file mode 100644
index 000000000..5728e4858
--- /dev/null
+++ b/target/m68k/helper.c
@@ -0,0 +1,1536 @@
+/*
+ * m68k op helpers
+ *
+ * Copyright (c) 2006-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "qemu/qemu-print.h"
+
+#define SIGNBIT (1u << 31)
+
+/* Sort alphabetically, except for "any". */
+static gint m68k_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any-" TYPE_M68K_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any-" TYPE_M68K_CPU) == 0) {
+ return -1;
+ } else {
+ return strcasecmp(name_a, name_b);
+ }
+}
+
+static void m68k_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *c = data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(c);
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_M68K_CPU));
+ qemu_printf("%s\n", name);
+ g_free(name);
+}
+
+void m68k_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list(TYPE_M68K_CPU, false);
+ list = g_slist_sort(list, m68k_cpu_list_compare);
+ g_slist_foreach(list, m68k_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+static int cf_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n)
+{
+ if (n < 8) {
+ float_status s;
+ return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s));
+ }
+ switch (n) {
+ case 8: /* fpcontrol */
+ return gdb_get_reg32(mem_buf, env->fpcr);
+ case 9: /* fpstatus */
+ return gdb_get_reg32(mem_buf, env->fpsr);
+ case 10: /* fpiar, not implemented */
+ return gdb_get_reg32(mem_buf, 0);
+ }
+ return 0;
+}
+
+static int cf_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 8) {
+ float_status s;
+ env->fregs[n].d = float64_to_floatx80(ldq_p(mem_buf), &s);
+ return 8;
+ }
+ switch (n) {
+ case 8: /* fpcontrol */
+ cpu_m68k_set_fpcr(env, ldl_p(mem_buf));
+ return 4;
+ case 9: /* fpstatus */
+ env->fpsr = ldl_p(mem_buf);
+ return 4;
+ case 10: /* fpiar, not implemented */
+ return 4;
+ }
+ return 0;
+}
+
+static int m68k_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n)
+{
+ if (n < 8) {
+ int len = gdb_get_reg16(mem_buf, env->fregs[n].l.upper);
+ len += gdb_get_reg16(mem_buf, 0);
+ len += gdb_get_reg64(mem_buf, env->fregs[n].l.lower);
+ return len;
+ }
+ switch (n) {
+ case 8: /* fpcontrol */
+ return gdb_get_reg32(mem_buf, env->fpcr);
+ case 9: /* fpstatus */
+ return gdb_get_reg32(mem_buf, env->fpsr);
+ case 10: /* fpiar, not implemented */
+ return gdb_get_reg32(mem_buf, 0);
+ }
+ return 0;
+}
+
+static int m68k_fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 8) {
+ env->fregs[n].l.upper = lduw_be_p(mem_buf);
+ env->fregs[n].l.lower = ldq_be_p(mem_buf + 4);
+ return 12;
+ }
+ switch (n) {
+ case 8: /* fpcontrol */
+ cpu_m68k_set_fpcr(env, ldl_p(mem_buf));
+ return 4;
+ case 9: /* fpstatus */
+ env->fpsr = ldl_p(mem_buf);
+ return 4;
+ case 10: /* fpiar, not implemented */
+ return 4;
+ }
+ return 0;
+}
+
+void m68k_cpu_init_gdb(M68kCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ if (m68k_feature(env, M68K_FEATURE_CF_FPU)) {
+ gdb_register_coprocessor(cs, cf_fpu_gdb_get_reg, cf_fpu_gdb_set_reg,
+ 11, "cf-fp.xml", 18);
+ } else if (m68k_feature(env, M68K_FEATURE_FPU)) {
+ gdb_register_coprocessor(cs, m68k_fpu_gdb_get_reg,
+ m68k_fpu_gdb_set_reg, 11, "m68k-fp.xml", 18);
+ }
+ /* TODO: Add [E]MAC registers. */
+}
+
+void HELPER(cf_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
+{
+ switch (reg) {
+ case M68K_CR_CACR:
+ env->cacr = val;
+ m68k_switch_sp(env);
+ break;
+ case M68K_CR_ACR0:
+ case M68K_CR_ACR1:
+ case M68K_CR_ACR2:
+ case M68K_CR_ACR3:
+ /* TODO: Implement Access Control Registers. */
+ break;
+ case M68K_CR_VBR:
+ env->vbr = val;
+ break;
+ /* TODO: Implement control registers. */
+ default:
+ cpu_abort(env_cpu(env),
+ "Unimplemented control register write 0x%x = 0x%x\n",
+ reg, val);
+ }
+}
+
+static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = tt;
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
+{
+ switch (reg) {
+ /* MC680[12346]0 */
+ case M68K_CR_SFC:
+ env->sfc = val & 7;
+ return;
+ /* MC680[12346]0 */
+ case M68K_CR_DFC:
+ env->dfc = val & 7;
+ return;
+ /* MC680[12346]0 */
+ case M68K_CR_VBR:
+ env->vbr = val;
+ return;
+ /* MC680[2346]0 */
+ case M68K_CR_CACR:
+ if (m68k_feature(env, M68K_FEATURE_M68020)) {
+ env->cacr = val & 0x0000000f;
+ } else if (m68k_feature(env, M68K_FEATURE_M68030)) {
+ env->cacr = val & 0x00003f1f;
+ } else if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->cacr = val & 0x80008000;
+ } else if (m68k_feature(env, M68K_FEATURE_M68060)) {
+ env->cacr = val & 0xf8e0e000;
+ } else {
+ break;
+ }
+ m68k_switch_sp(env);
+ return;
+ /* MC680[46]0 */
+ case M68K_CR_TC:
+ if (m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ env->mmu.tcr = val;
+ return;
+ }
+ break;
+ /* MC68040 */
+ case M68K_CR_MMUSR:
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->mmu.mmusr = val;
+ return;
+ }
+ break;
+ /* MC680[46]0 */
+ case M68K_CR_SRP:
+ if (m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ env->mmu.srp = val;
+ return;
+ }
+ break;
+ /* MC680[46]0 */
+ case M68K_CR_URP:
+ if (m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ env->mmu.urp = val;
+ return;
+ }
+ break;
+ /* MC680[12346]0 */
+ case M68K_CR_USP:
+ env->sp[M68K_USP] = val;
+ return;
+ /* MC680[234]0 */
+ case M68K_CR_MSP:
+ if (m68k_feature(env, M68K_FEATURE_M68020)
+ || m68k_feature(env, M68K_FEATURE_M68030)
+ || m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->sp[M68K_SSP] = val;
+ return;
+ }
+ break;
+ /* MC680[234]0 */
+ case M68K_CR_ISP:
+ if (m68k_feature(env, M68K_FEATURE_M68020)
+ || m68k_feature(env, M68K_FEATURE_M68030)
+ || m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->sp[M68K_ISP] = val;
+ return;
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_ITT0: /* MC68EC040 only: M68K_CR_IACR0 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->mmu.ttr[M68K_ITTR0] = val;
+ return;
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_ITT1: /* MC68EC040 only: M68K_CR_IACR1 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->mmu.ttr[M68K_ITTR1] = val;
+ return;
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_DTT0: /* MC68EC040 only: M68K_CR_DACR0 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->mmu.ttr[M68K_DTTR0] = val;
+ return;
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_DTT1: /* MC68EC040 only: M68K_CR_DACR1 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->mmu.ttr[M68K_DTTR1] = val;
+ return;
+ }
+ break;
+ /* Unimplemented Registers */
+ case M68K_CR_CAAR:
+ case M68K_CR_PCR:
+ case M68K_CR_BUSCR:
+ cpu_abort(env_cpu(env),
+ "Unimplemented control register write 0x%x = 0x%x\n",
+ reg, val);
+ }
+
+ /* Invalid control registers will generate an exception. */
+ raise_exception_ra(env, EXCP_ILLEGAL, 0);
+ return;
+}
+
+uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg)
+{
+ switch (reg) {
+ /* MC680[12346]0 */
+ case M68K_CR_SFC:
+ return env->sfc;
+ /* MC680[12346]0 */
+ case M68K_CR_DFC:
+ return env->dfc;
+ /* MC680[12346]0 */
+ case M68K_CR_VBR:
+ return env->vbr;
+ /* MC680[2346]0 */
+ case M68K_CR_CACR:
+ if (m68k_feature(env, M68K_FEATURE_M68020)
+ || m68k_feature(env, M68K_FEATURE_M68030)
+ || m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ return env->cacr;
+ }
+ break;
+ /* MC680[46]0 */
+ case M68K_CR_TC:
+ if (m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ return env->mmu.tcr;
+ }
+ break;
+ /* MC68040 */
+ case M68K_CR_MMUSR:
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->mmu.mmusr;
+ }
+ break;
+ /* MC680[46]0 */
+ case M68K_CR_SRP:
+ if (m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ return env->mmu.srp;
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_URP:
+ if (m68k_feature(env, M68K_FEATURE_M68040)
+ || m68k_feature(env, M68K_FEATURE_M68060)) {
+ return env->mmu.urp;
+ }
+ break;
+ /* MC680[46]0 */
+ case M68K_CR_USP:
+ return env->sp[M68K_USP];
+ /* MC680[234]0 */
+ case M68K_CR_MSP:
+ if (m68k_feature(env, M68K_FEATURE_M68020)
+ || m68k_feature(env, M68K_FEATURE_M68030)
+ || m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->sp[M68K_SSP];
+ }
+ break;
+ /* MC680[234]0 */
+ case M68K_CR_ISP:
+ if (m68k_feature(env, M68K_FEATURE_M68020)
+ || m68k_feature(env, M68K_FEATURE_M68030)
+ || m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->sp[M68K_ISP];
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_ITT0: /* MC68EC040 only: M68K_CR_IACR0 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->mmu.ttr[M68K_ITTR0];
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_ITT1: /* MC68EC040 only: M68K_CR_IACR1 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->mmu.ttr[M68K_ITTR1];
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_DTT0: /* MC68EC040 only: M68K_CR_DACR0 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->mmu.ttr[M68K_DTTR0];
+ }
+ break;
+ /* MC68040/MC68LC040 */
+ case M68K_CR_DTT1: /* MC68EC040 only: M68K_CR_DACR1 */
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ return env->mmu.ttr[M68K_DTTR1];
+ }
+ break;
+ /* Unimplemented Registers */
+ case M68K_CR_CAAR:
+ case M68K_CR_PCR:
+ case M68K_CR_BUSCR:
+ cpu_abort(env_cpu(env), "Unimplemented control register read 0x%x\n",
+ reg);
+ }
+
+ /* Invalid control registers will generate an exception. */
+ raise_exception_ra(env, EXCP_ILLEGAL, 0);
+
+ return 0;
+}
+
+void HELPER(set_macsr)(CPUM68KState *env, uint32_t val)
+{
+ uint32_t acc;
+ int8_t exthigh;
+ uint8_t extlow;
+ uint64_t regval;
+ int i;
+ if ((env->macsr ^ val) & (MACSR_FI | MACSR_SU)) {
+ for (i = 0; i < 4; i++) {
+ regval = env->macc[i];
+ exthigh = regval >> 40;
+ if (env->macsr & MACSR_FI) {
+ acc = regval >> 8;
+ extlow = regval;
+ } else {
+ acc = regval;
+ extlow = regval >> 32;
+ }
+ if (env->macsr & MACSR_FI) {
+ regval = (((uint64_t)acc) << 8) | extlow;
+ regval |= ((int64_t)exthigh) << 40;
+ } else if (env->macsr & MACSR_SU) {
+ regval = acc | (((int64_t)extlow) << 32);
+ regval |= ((int64_t)exthigh) << 40;
+ } else {
+ regval = acc | (((uint64_t)extlow) << 32);
+ regval |= ((uint64_t)(uint8_t)exthigh) << 40;
+ }
+ env->macc[i] = regval;
+ }
+ }
+ env->macsr = val;
+}
+
+void m68k_switch_sp(CPUM68KState *env)
+{
+ int new_sp;
+
+ env->sp[env->current_sp] = env->aregs[7];
+ if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ if (env->sr & SR_S) {
+ /* SR:Master-Mode bit unimplemented then ISP is not available */
+ if (!m68k_feature(env, M68K_FEATURE_MSP) || env->sr & SR_M) {
+ new_sp = M68K_SSP;
+ } else {
+ new_sp = M68K_ISP;
+ }
+ } else {
+ new_sp = M68K_USP;
+ }
+ } else {
+ new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP)
+ ? M68K_SSP : M68K_USP;
+ }
+ env->aregs[7] = env->sp[new_sp];
+ env->current_sp = new_sp;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* MMU: 68040 only */
+
+static void print_address_zone(uint32_t logical, uint32_t physical,
+ uint32_t size, int attr)
+{
+ qemu_printf("%08x - %08x -> %08x - %08x %c ",
+ logical, logical + size - 1,
+ physical, physical + size - 1,
+ attr & 4 ? 'W' : '-');
+ size >>= 10;
+ if (size < 1024) {
+ qemu_printf("(%d KiB)\n", size);
+ } else {
+ size >>= 10;
+ if (size < 1024) {
+ qemu_printf("(%d MiB)\n", size);
+ } else {
+ size >>= 10;
+ qemu_printf("(%d GiB)\n", size);
+ }
+ }
+}
+
+static void dump_address_map(CPUM68KState *env, uint32_t root_pointer)
+{
+ int i, j, k;
+ int tic_size, tic_shift;
+ uint32_t tib_mask;
+ uint32_t tia, tib, tic;
+ uint32_t logical = 0xffffffff, physical = 0xffffffff;
+ uint32_t first_logical = 0xffffffff, first_physical = 0xffffffff;
+ uint32_t last_logical, last_physical;
+ int32_t size;
+ int last_attr = -1, attr = -1;
+ CPUState *cs = env_cpu(env);
+ MemTxResult txres;
+
+ if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
+ /* 8k page */
+ tic_size = 32;
+ tic_shift = 13;
+ tib_mask = M68K_8K_PAGE_MASK;
+ } else {
+ /* 4k page */
+ tic_size = 64;
+ tic_shift = 12;
+ tib_mask = M68K_4K_PAGE_MASK;
+ }
+ for (i = 0; i < M68K_ROOT_POINTER_ENTRIES; i++) {
+ tia = address_space_ldl(cs->as, M68K_POINTER_BASE(root_pointer) + i * 4,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK || !M68K_UDT_VALID(tia)) {
+ continue;
+ }
+ for (j = 0; j < M68K_ROOT_POINTER_ENTRIES; j++) {
+ tib = address_space_ldl(cs->as, M68K_POINTER_BASE(tia) + j * 4,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK || !M68K_UDT_VALID(tib)) {
+ continue;
+ }
+ for (k = 0; k < tic_size; k++) {
+ tic = address_space_ldl(cs->as, (tib & tib_mask) + k * 4,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK || !M68K_PDT_VALID(tic)) {
+ continue;
+ }
+ if (M68K_PDT_INDIRECT(tic)) {
+ tic = address_space_ldl(cs->as, M68K_INDIRECT_POINTER(tic),
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ continue;
+ }
+ }
+
+ last_logical = logical;
+ logical = (i << M68K_TTS_ROOT_SHIFT) |
+ (j << M68K_TTS_POINTER_SHIFT) |
+ (k << tic_shift);
+
+ last_physical = physical;
+ physical = tic & ~((1 << tic_shift) - 1);
+
+ last_attr = attr;
+ attr = tic & ((1 << tic_shift) - 1);
+
+ if ((logical != (last_logical + (1 << tic_shift))) ||
+ (physical != (last_physical + (1 << tic_shift))) ||
+ (attr & 4) != (last_attr & 4)) {
+
+ if (first_logical != 0xffffffff) {
+ size = last_logical + (1 << tic_shift) -
+ first_logical;
+ print_address_zone(first_logical,
+ first_physical, size, last_attr);
+ }
+ first_logical = logical;
+ first_physical = physical;
+ }
+ }
+ }
+ }
+ if (first_logical != logical || (attr & 4) != (last_attr & 4)) {
+ size = logical + (1 << tic_shift) - first_logical;
+ print_address_zone(first_logical, first_physical, size, last_attr);
+ }
+}
+
+#define DUMP_CACHEFLAGS(a) \
+ switch (a & M68K_DESC_CACHEMODE) { \
+ case M68K_DESC_CM_WRTHRU: /* cachable, write-through */ \
+ qemu_printf("T"); \
+ break; \
+ case M68K_DESC_CM_COPYBK: /* cachable, copyback */ \
+ qemu_printf("C"); \
+ break; \
+ case M68K_DESC_CM_SERIAL: /* noncachable, serialized */ \
+ qemu_printf("S"); \
+ break; \
+ case M68K_DESC_CM_NCACHE: /* noncachable */ \
+ qemu_printf("N"); \
+ break; \
+ }
+
+static void dump_ttr(uint32_t ttr)
+{
+ if ((ttr & M68K_TTR_ENABLED) == 0) {
+ qemu_printf("disabled\n");
+ return;
+ }
+ qemu_printf("Base: 0x%08x Mask: 0x%08x Control: ",
+ ttr & M68K_TTR_ADDR_BASE,
+ (ttr & M68K_TTR_ADDR_MASK) << M68K_TTR_ADDR_MASK_SHIFT);
+ switch (ttr & M68K_TTR_SFIELD) {
+ case M68K_TTR_SFIELD_USER:
+ qemu_printf("U");
+ break;
+ case M68K_TTR_SFIELD_SUPER:
+ qemu_printf("S");
+ break;
+ default:
+ qemu_printf("*");
+ break;
+ }
+ DUMP_CACHEFLAGS(ttr);
+ if (ttr & M68K_DESC_WRITEPROT) {
+ qemu_printf("R");
+ } else {
+ qemu_printf("W");
+ }
+ qemu_printf(" U: %d\n", (ttr & M68K_DESC_USERATTR) >>
+ M68K_DESC_USERATTR_SHIFT);
+}
+
+void dump_mmu(CPUM68KState *env)
+{
+ if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) {
+ qemu_printf("Translation disabled\n");
+ return;
+ }
+ qemu_printf("Page Size: ");
+ if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
+ qemu_printf("8kB\n");
+ } else {
+ qemu_printf("4kB\n");
+ }
+
+ qemu_printf("MMUSR: ");
+ if (env->mmu.mmusr & M68K_MMU_B_040) {
+ qemu_printf("BUS ERROR\n");
+ } else {
+ qemu_printf("Phy=%08x Flags: ", env->mmu.mmusr & 0xfffff000);
+ /* flags found on the page descriptor */
+ if (env->mmu.mmusr & M68K_MMU_G_040) {
+ qemu_printf("G"); /* Global */
+ } else {
+ qemu_printf(".");
+ }
+ if (env->mmu.mmusr & M68K_MMU_S_040) {
+ qemu_printf("S"); /* Supervisor */
+ } else {
+ qemu_printf(".");
+ }
+ if (env->mmu.mmusr & M68K_MMU_M_040) {
+ qemu_printf("M"); /* Modified */
+ } else {
+ qemu_printf(".");
+ }
+ if (env->mmu.mmusr & M68K_MMU_WP_040) {
+ qemu_printf("W"); /* Write protect */
+ } else {
+ qemu_printf(".");
+ }
+ if (env->mmu.mmusr & M68K_MMU_T_040) {
+ qemu_printf("T"); /* Transparent */
+ } else {
+ qemu_printf(".");
+ }
+ if (env->mmu.mmusr & M68K_MMU_R_040) {
+ qemu_printf("R"); /* Resident */
+ } else {
+ qemu_printf(".");
+ }
+ qemu_printf(" Cache: ");
+ DUMP_CACHEFLAGS(env->mmu.mmusr);
+ qemu_printf(" U: %d\n", (env->mmu.mmusr >> 8) & 3);
+ qemu_printf("\n");
+ }
+
+ qemu_printf("ITTR0: ");
+ dump_ttr(env->mmu.ttr[M68K_ITTR0]);
+ qemu_printf("ITTR1: ");
+ dump_ttr(env->mmu.ttr[M68K_ITTR1]);
+ qemu_printf("DTTR0: ");
+ dump_ttr(env->mmu.ttr[M68K_DTTR0]);
+ qemu_printf("DTTR1: ");
+ dump_ttr(env->mmu.ttr[M68K_DTTR1]);
+
+ qemu_printf("SRP: 0x%08x\n", env->mmu.srp);
+ dump_address_map(env, env->mmu.srp);
+
+ qemu_printf("URP: 0x%08x\n", env->mmu.urp);
+ dump_address_map(env, env->mmu.urp);
+}
+
+static int check_TTR(uint32_t ttr, int *prot, target_ulong addr,
+ int access_type)
+{
+ uint32_t base, mask;
+
+ /* check if transparent translation is enabled */
+ if ((ttr & M68K_TTR_ENABLED) == 0) {
+ return 0;
+ }
+
+ /* check mode access */
+ switch (ttr & M68K_TTR_SFIELD) {
+ case M68K_TTR_SFIELD_USER:
+ /* match only if user */
+ if ((access_type & ACCESS_SUPER) != 0) {
+ return 0;
+ }
+ break;
+ case M68K_TTR_SFIELD_SUPER:
+ /* match only if supervisor */
+ if ((access_type & ACCESS_SUPER) == 0) {
+ return 0;
+ }
+ break;
+ default:
+ /* all other values disable mode matching (FC2) */
+ break;
+ }
+
+ /* check address matching */
+
+ base = ttr & M68K_TTR_ADDR_BASE;
+ mask = (ttr & M68K_TTR_ADDR_MASK) ^ M68K_TTR_ADDR_MASK;
+ mask <<= M68K_TTR_ADDR_MASK_SHIFT;
+
+ if ((addr & mask) != (base & mask)) {
+ return 0;
+ }
+
+ *prot = PAGE_READ | PAGE_EXEC;
+ if ((ttr & M68K_DESC_WRITEPROT) == 0) {
+ *prot |= PAGE_WRITE;
+ }
+
+ return 1;
+}
+
+static int get_physical_address(CPUM68KState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ int access_type, target_ulong *page_size)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t entry;
+ uint32_t next;
+ target_ulong page_mask;
+ bool debug = access_type & ACCESS_DEBUG;
+ int page_bits;
+ int i;
+ MemTxResult txres;
+
+ /* Transparent Translation (physical = logical) */
+ for (i = 0; i < M68K_MAX_TTR; i++) {
+ if (check_TTR(env->mmu.TTR(access_type, i),
+ prot, address, access_type)) {
+ if (access_type & ACCESS_PTEST) {
+ /* Transparent Translation Register bit */
+ env->mmu.mmusr = M68K_MMU_T_040 | M68K_MMU_R_040;
+ }
+ *physical = address;
+ *page_size = TARGET_PAGE_SIZE;
+ return 0;
+ }
+ }
+
+ /* Page Table Root Pointer */
+ *prot = PAGE_READ | PAGE_WRITE;
+ if (access_type & ACCESS_CODE) {
+ *prot |= PAGE_EXEC;
+ }
+ if (access_type & ACCESS_SUPER) {
+ next = env->mmu.srp;
+ } else {
+ next = env->mmu.urp;
+ }
+
+ /* Root Index */
+ entry = M68K_POINTER_BASE(next) | M68K_ROOT_INDEX(address);
+
+ next = address_space_ldl(cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ if (!M68K_UDT_VALID(next)) {
+ return -1;
+ }
+ if (!(next & M68K_DESC_USED) && !debug) {
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ }
+ if (next & M68K_DESC_WRITEPROT) {
+ if (access_type & ACCESS_PTEST) {
+ env->mmu.mmusr |= M68K_MMU_WP_040;
+ }
+ *prot &= ~PAGE_WRITE;
+ if (access_type & ACCESS_STORE) {
+ return -1;
+ }
+ }
+
+ /* Pointer Index */
+ entry = M68K_POINTER_BASE(next) | M68K_POINTER_INDEX(address);
+
+ next = address_space_ldl(cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ if (!M68K_UDT_VALID(next)) {
+ return -1;
+ }
+ if (!(next & M68K_DESC_USED) && !debug) {
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ }
+ if (next & M68K_DESC_WRITEPROT) {
+ if (access_type & ACCESS_PTEST) {
+ env->mmu.mmusr |= M68K_MMU_WP_040;
+ }
+ *prot &= ~PAGE_WRITE;
+ if (access_type & ACCESS_STORE) {
+ return -1;
+ }
+ }
+
+ /* Page Index */
+ if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
+ entry = M68K_8K_PAGE_BASE(next) | M68K_8K_PAGE_INDEX(address);
+ } else {
+ entry = M68K_4K_PAGE_BASE(next) | M68K_4K_PAGE_INDEX(address);
+ }
+
+ next = address_space_ldl(cs->as, entry, MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+
+ if (!M68K_PDT_VALID(next)) {
+ return -1;
+ }
+ if (M68K_PDT_INDIRECT(next)) {
+ next = address_space_ldl(cs->as, M68K_INDIRECT_POINTER(next),
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ }
+ if (access_type & ACCESS_STORE) {
+ if (next & M68K_DESC_WRITEPROT) {
+ if (!(next & M68K_DESC_USED) && !debug) {
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ }
+ } else if ((next & (M68K_DESC_MODIFIED | M68K_DESC_USED)) !=
+ (M68K_DESC_MODIFIED | M68K_DESC_USED) && !debug) {
+ address_space_stl(cs->as, entry,
+ next | (M68K_DESC_MODIFIED | M68K_DESC_USED),
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ }
+ } else {
+ if (!(next & M68K_DESC_USED) && !debug) {
+ address_space_stl(cs->as, entry, next | M68K_DESC_USED,
+ MEMTXATTRS_UNSPECIFIED, &txres);
+ if (txres != MEMTX_OK) {
+ goto txfail;
+ }
+ }
+ }
+
+ if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
+ page_bits = 13;
+ } else {
+ page_bits = 12;
+ }
+ *page_size = 1 << page_bits;
+ page_mask = ~(*page_size - 1);
+ *physical = (next & page_mask) + (address & (*page_size - 1));
+
+ if (access_type & ACCESS_PTEST) {
+ env->mmu.mmusr |= next & M68K_MMU_SR_MASK_040;
+ env->mmu.mmusr |= *physical & 0xfffff000;
+ env->mmu.mmusr |= M68K_MMU_R_040;
+ }
+
+ if (next & M68K_DESC_WRITEPROT) {
+ *prot &= ~PAGE_WRITE;
+ if (access_type & ACCESS_STORE) {
+ return -1;
+ }
+ }
+ if (next & M68K_DESC_SUPERONLY) {
+ if ((access_type & ACCESS_SUPER) == 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+
+txfail:
+ /*
+ * A page table load/store failed. TODO: we should really raise a
+ * suitable guest fault here if this is not a debug access.
+ * For now just return that the translation failed.
+ */
+ return -1;
+}
+
+hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+ hwaddr phys_addr;
+ int prot;
+ int access_type;
+ target_ulong page_size;
+
+ if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) {
+ /* MMU disabled */
+ return addr;
+ }
+
+ access_type = ACCESS_DATA | ACCESS_DEBUG;
+ if (env->sr & SR_S) {
+ access_type |= ACCESS_SUPER;
+ }
+
+ if (get_physical_address(env, &phys_addr, &prot,
+ addr, access_type, &page_size) != 0) {
+ return -1;
+ }
+
+ return phys_addr;
+}
+
+/*
+ * Notify CPU of a pending interrupt. Prioritization and vectoring should
+ * be handled by the interrupt controller. Real hardware only requests
+ * the vector when the interrupt is acknowledged by the CPU. For
+ * simplicity we calculate it when the interrupt is signalled.
+ */
+void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
+{
+ CPUState *cs = CPU(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ env->pending_level = level;
+ env->pending_vector = vector;
+ if (level) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType qemu_access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+ hwaddr physical;
+ int prot;
+ int access_type;
+ int ret;
+ target_ulong page_size;
+
+ if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) {
+ /* MMU disabled */
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ address & TARGET_PAGE_MASK,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ if (qemu_access_type == MMU_INST_FETCH) {
+ access_type = ACCESS_CODE;
+ } else {
+ access_type = ACCESS_DATA;
+ if (qemu_access_type == MMU_DATA_STORE) {
+ access_type |= ACCESS_STORE;
+ }
+ }
+ if (mmu_idx != MMU_USER_IDX) {
+ access_type |= ACCESS_SUPER;
+ }
+
+ ret = get_physical_address(&cpu->env, &physical, &prot,
+ address, access_type, &page_size);
+ if (likely(ret == 0)) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot, mmu_idx, page_size);
+ return true;
+ }
+
+ if (probe) {
+ return false;
+ }
+
+ /* page fault */
+ env->mmu.ssw = M68K_ATC_040;
+ switch (size) {
+ case 1:
+ env->mmu.ssw |= M68K_BA_SIZE_BYTE;
+ break;
+ case 2:
+ env->mmu.ssw |= M68K_BA_SIZE_WORD;
+ break;
+ case 4:
+ env->mmu.ssw |= M68K_BA_SIZE_LONG;
+ break;
+ }
+ if (access_type & ACCESS_SUPER) {
+ env->mmu.ssw |= M68K_TM_040_SUPER;
+ }
+ if (access_type & ACCESS_CODE) {
+ env->mmu.ssw |= M68K_TM_040_CODE;
+ } else {
+ env->mmu.ssw |= M68K_TM_040_DATA;
+ }
+ if (!(access_type & ACCESS_STORE)) {
+ env->mmu.ssw |= M68K_RW_040;
+ }
+
+ cs->exception_index = EXCP_ACCESS;
+ env->mmu.ar = address;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+uint32_t HELPER(bitrev)(uint32_t x)
+{
+ x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau);
+ x = ((x >> 2) & 0x33333333u) | ((x << 2) & 0xccccccccu);
+ x = ((x >> 4) & 0x0f0f0f0fu) | ((x << 4) & 0xf0f0f0f0u);
+ return bswap32(x);
+}
+
+uint32_t HELPER(ff1)(uint32_t x)
+{
+ int n;
+ for (n = 32; x; n--)
+ x >>= 1;
+ return n;
+}
+
+uint32_t HELPER(sats)(uint32_t val, uint32_t v)
+{
+ /* The result has the opposite sign to the original value. */
+ if ((int32_t)v < 0) {
+ val = (((int32_t)val) >> 31) ^ SIGNBIT;
+ }
+ return val;
+}
+
+void cpu_m68k_set_sr(CPUM68KState *env, uint32_t sr)
+{
+ env->sr = sr & 0xffe0;
+ cpu_m68k_set_ccr(env, sr);
+ m68k_switch_sp(env);
+}
+
+void HELPER(set_sr)(CPUM68KState *env, uint32_t val)
+{
+ cpu_m68k_set_sr(env, val);
+}
+
+/* MAC unit. */
+/*
+ * FIXME: The MAC unit implementation is a bit of a mess. Some helpers
+ * take values, others take register numbers and manipulate the contents
+ * in-place.
+ */
+void HELPER(mac_move)(CPUM68KState *env, uint32_t dest, uint32_t src)
+{
+ uint32_t mask;
+ env->macc[dest] = env->macc[src];
+ mask = MACSR_PAV0 << dest;
+ if (env->macsr & (MACSR_PAV0 << src))
+ env->macsr |= mask;
+ else
+ env->macsr &= ~mask;
+}
+
+uint64_t HELPER(macmuls)(CPUM68KState *env, uint32_t op1, uint32_t op2)
+{
+ int64_t product;
+ int64_t res;
+
+ product = (uint64_t)op1 * op2;
+ res = (product << 24) >> 24;
+ if (res != product) {
+ env->macsr |= MACSR_V;
+ if (env->macsr & MACSR_OMC) {
+ /* Make sure the accumulate operation overflows. */
+ if (product < 0)
+ res = ~(1ll << 50);
+ else
+ res = 1ll << 50;
+ }
+ }
+ return res;
+}
+
+uint64_t HELPER(macmulu)(CPUM68KState *env, uint32_t op1, uint32_t op2)
+{
+ uint64_t product;
+
+ product = (uint64_t)op1 * op2;
+ if (product & (0xffffffull << 40)) {
+ env->macsr |= MACSR_V;
+ if (env->macsr & MACSR_OMC) {
+ /* Make sure the accumulate operation overflows. */
+ product = 1ll << 50;
+ } else {
+ product &= ((1ull << 40) - 1);
+ }
+ }
+ return product;
+}
+
+uint64_t HELPER(macmulf)(CPUM68KState *env, uint32_t op1, uint32_t op2)
+{
+ uint64_t product;
+ uint32_t remainder;
+
+ product = (uint64_t)op1 * op2;
+ if (env->macsr & MACSR_RT) {
+ remainder = product & 0xffffff;
+ product >>= 24;
+ if (remainder > 0x800000)
+ product++;
+ else if (remainder == 0x800000)
+ product += (product & 1);
+ } else {
+ product >>= 24;
+ }
+ return product;
+}
+
+void HELPER(macsats)(CPUM68KState *env, uint32_t acc)
+{
+ int64_t tmp;
+ int64_t result;
+ tmp = env->macc[acc];
+ result = ((tmp << 16) >> 16);
+ if (result != tmp) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_V) {
+ env->macsr |= MACSR_PAV0 << acc;
+ if (env->macsr & MACSR_OMC) {
+ /*
+ * The result is saturated to 32 bits, despite overflow occurring
+ * at 48 bits. Seems weird, but that's what the hardware docs
+ * say.
+ */
+ result = (result >> 63) ^ 0x7fffffff;
+ }
+ }
+ env->macc[acc] = result;
+}
+
+void HELPER(macsatu)(CPUM68KState *env, uint32_t acc)
+{
+ uint64_t val;
+
+ val = env->macc[acc];
+ if (val & (0xffffull << 48)) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_V) {
+ env->macsr |= MACSR_PAV0 << acc;
+ if (env->macsr & MACSR_OMC) {
+ if (val > (1ull << 53))
+ val = 0;
+ else
+ val = (1ull << 48) - 1;
+ } else {
+ val &= ((1ull << 48) - 1);
+ }
+ }
+ env->macc[acc] = val;
+}
+
+void HELPER(macsatf)(CPUM68KState *env, uint32_t acc)
+{
+ int64_t sum;
+ int64_t result;
+
+ sum = env->macc[acc];
+ result = (sum << 16) >> 16;
+ if (result != sum) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_V) {
+ env->macsr |= MACSR_PAV0 << acc;
+ if (env->macsr & MACSR_OMC) {
+ result = (result >> 63) ^ 0x7fffffffffffll;
+ }
+ }
+ env->macc[acc] = result;
+}
+
+void HELPER(mac_set_flags)(CPUM68KState *env, uint32_t acc)
+{
+ uint64_t val;
+ val = env->macc[acc];
+ if (val == 0) {
+ env->macsr |= MACSR_Z;
+ } else if (val & (1ull << 47)) {
+ env->macsr |= MACSR_N;
+ }
+ if (env->macsr & (MACSR_PAV0 << acc)) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_FI) {
+ val = ((int64_t)val) >> 40;
+ if (val != 0 && val != -1)
+ env->macsr |= MACSR_EV;
+ } else if (env->macsr & MACSR_SU) {
+ val = ((int64_t)val) >> 32;
+ if (val != 0 && val != -1)
+ env->macsr |= MACSR_EV;
+ } else {
+ if ((val >> 32) != 0)
+ env->macsr |= MACSR_EV;
+ }
+}
+
+#define EXTSIGN(val, index) ( \
+ (index == 0) ? (int8_t)(val) : ((index == 1) ? (int16_t)(val) : (val)) \
+)
+
+#define COMPUTE_CCR(op, x, n, z, v, c) { \
+ switch (op) { \
+ case CC_OP_FLAGS: \
+ /* Everything in place. */ \
+ break; \
+ case CC_OP_ADDB: \
+ case CC_OP_ADDW: \
+ case CC_OP_ADDL: \
+ res = n; \
+ src2 = v; \
+ src1 = EXTSIGN(res - src2, op - CC_OP_ADDB); \
+ c = x; \
+ z = n; \
+ v = (res ^ src1) & ~(src1 ^ src2); \
+ break; \
+ case CC_OP_SUBB: \
+ case CC_OP_SUBW: \
+ case CC_OP_SUBL: \
+ res = n; \
+ src2 = v; \
+ src1 = EXTSIGN(res + src2, op - CC_OP_SUBB); \
+ c = x; \
+ z = n; \
+ v = (res ^ src1) & (src1 ^ src2); \
+ break; \
+ case CC_OP_CMPB: \
+ case CC_OP_CMPW: \
+ case CC_OP_CMPL: \
+ src1 = n; \
+ src2 = v; \
+ res = EXTSIGN(src1 - src2, op - CC_OP_CMPB); \
+ n = res; \
+ z = res; \
+ c = src1 < src2; \
+ v = (res ^ src1) & (src1 ^ src2); \
+ break; \
+ case CC_OP_LOGIC: \
+ c = v = 0; \
+ z = n; \
+ break; \
+ default: \
+ cpu_abort(env_cpu(env), "Bad CC_OP %d", op); \
+ } \
+} while (0)
+
+uint32_t cpu_m68k_get_ccr(CPUM68KState *env)
+{
+ uint32_t x, c, n, z, v;
+ uint32_t res, src1, src2;
+
+ x = env->cc_x;
+ n = env->cc_n;
+ z = env->cc_z;
+ v = env->cc_v;
+ c = env->cc_c;
+
+ COMPUTE_CCR(env->cc_op, x, n, z, v, c);
+
+ n = n >> 31;
+ z = (z == 0);
+ v = v >> 31;
+
+ return x * CCF_X + n * CCF_N + z * CCF_Z + v * CCF_V + c * CCF_C;
+}
+
+uint32_t HELPER(get_ccr)(CPUM68KState *env)
+{
+ return cpu_m68k_get_ccr(env);
+}
+
+void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t ccr)
+{
+ env->cc_x = (ccr & CCF_X ? 1 : 0);
+ env->cc_n = (ccr & CCF_N ? -1 : 0);
+ env->cc_z = (ccr & CCF_Z ? 0 : 1);
+ env->cc_v = (ccr & CCF_V ? -1 : 0);
+ env->cc_c = (ccr & CCF_C ? 1 : 0);
+ env->cc_op = CC_OP_FLAGS;
+}
+
+void HELPER(set_ccr)(CPUM68KState *env, uint32_t ccr)
+{
+ cpu_m68k_set_ccr(env, ccr);
+}
+
+void HELPER(flush_flags)(CPUM68KState *env, uint32_t cc_op)
+{
+ uint32_t res, src1, src2;
+
+ COMPUTE_CCR(cc_op, env->cc_x, env->cc_n, env->cc_z, env->cc_v, env->cc_c);
+ env->cc_op = CC_OP_FLAGS;
+}
+
+uint32_t HELPER(get_macf)(CPUM68KState *env, uint64_t val)
+{
+ int rem;
+ uint32_t result;
+
+ if (env->macsr & MACSR_SU) {
+ /* 16-bit rounding. */
+ rem = val & 0xffffff;
+ val = (val >> 24) & 0xffffu;
+ if (rem > 0x800000)
+ val++;
+ else if (rem == 0x800000)
+ val += (val & 1);
+ } else if (env->macsr & MACSR_RT) {
+ /* 32-bit rounding. */
+ rem = val & 0xff;
+ val >>= 8;
+ if (rem > 0x80)
+ val++;
+ else if (rem == 0x80)
+ val += (val & 1);
+ } else {
+ /* No rounding. */
+ val >>= 8;
+ }
+ if (env->macsr & MACSR_OMC) {
+ /* Saturate. */
+ if (env->macsr & MACSR_SU) {
+ if (val != (uint16_t) val) {
+ result = ((val >> 63) ^ 0x7fff) & 0xffff;
+ } else {
+ result = val & 0xffff;
+ }
+ } else {
+ if (val != (uint32_t)val) {
+ result = ((uint32_t)(val >> 63) & 0x7fffffff);
+ } else {
+ result = (uint32_t)val;
+ }
+ }
+ } else {
+ /* No saturation. */
+ if (env->macsr & MACSR_SU) {
+ result = val & 0xffff;
+ } else {
+ result = (uint32_t)val;
+ }
+ }
+ return result;
+}
+
+uint32_t HELPER(get_macs)(uint64_t val)
+{
+ if (val == (int32_t)val) {
+ return (int32_t)val;
+ } else {
+ return (val >> 61) ^ ~SIGNBIT;
+ }
+}
+
+uint32_t HELPER(get_macu)(uint64_t val)
+{
+ if ((val >> 32) == 0) {
+ return (uint32_t)val;
+ } else {
+ return 0xffffffffu;
+ }
+}
+
+uint32_t HELPER(get_mac_extf)(CPUM68KState *env, uint32_t acc)
+{
+ uint32_t val;
+ val = env->macc[acc] & 0x00ff;
+ val |= (env->macc[acc] >> 32) & 0xff00;
+ val |= (env->macc[acc + 1] << 16) & 0x00ff0000;
+ val |= (env->macc[acc + 1] >> 16) & 0xff000000;
+ return val;
+}
+
+uint32_t HELPER(get_mac_exti)(CPUM68KState *env, uint32_t acc)
+{
+ uint32_t val;
+ val = (env->macc[acc] >> 32) & 0xffff;
+ val |= (env->macc[acc + 1] >> 16) & 0xffff0000;
+ return val;
+}
+
+void HELPER(set_mac_extf)(CPUM68KState *env, uint32_t val, uint32_t acc)
+{
+ int64_t res;
+ int32_t tmp;
+ res = env->macc[acc] & 0xffffffff00ull;
+ tmp = (int16_t)(val & 0xff00);
+ res |= ((int64_t)tmp) << 32;
+ res |= val & 0xff;
+ env->macc[acc] = res;
+ res = env->macc[acc + 1] & 0xffffffff00ull;
+ tmp = (val & 0xff000000);
+ res |= ((int64_t)tmp) << 16;
+ res |= (val >> 16) & 0xff;
+ env->macc[acc + 1] = res;
+}
+
+void HELPER(set_mac_exts)(CPUM68KState *env, uint32_t val, uint32_t acc)
+{
+ int64_t res;
+ int32_t tmp;
+ res = (uint32_t)env->macc[acc];
+ tmp = (int16_t)val;
+ res |= ((int64_t)tmp) << 32;
+ env->macc[acc] = res;
+ res = (uint32_t)env->macc[acc + 1];
+ tmp = val & 0xffff0000;
+ res |= (int64_t)tmp << 16;
+ env->macc[acc + 1] = res;
+}
+
+void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc)
+{
+ uint64_t res;
+ res = (uint32_t)env->macc[acc];
+ res |= ((uint64_t)(val & 0xffff)) << 32;
+ env->macc[acc] = res;
+ res = (uint32_t)env->macc[acc + 1];
+ res |= (uint64_t)(val & 0xffff0000) << 16;
+ env->macc[acc + 1] = res;
+}
+
+#if defined(CONFIG_SOFTMMU)
+void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read)
+{
+ hwaddr physical;
+ int access_type;
+ int prot;
+ int ret;
+ target_ulong page_size;
+
+ access_type = ACCESS_PTEST;
+ if (env->dfc & 4) {
+ access_type |= ACCESS_SUPER;
+ }
+ if ((env->dfc & 3) == 2) {
+ access_type |= ACCESS_CODE;
+ }
+ if (!is_read) {
+ access_type |= ACCESS_STORE;
+ }
+
+ env->mmu.mmusr = 0;
+ env->mmu.ssw = 0;
+ ret = get_physical_address(env, &physical, &prot, addr,
+ access_type, &page_size);
+ if (ret == 0) {
+ tlb_set_page(env_cpu(env), addr & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK,
+ prot, access_type & ACCESS_SUPER ?
+ MMU_KERNEL_IDX : MMU_USER_IDX, page_size);
+ }
+}
+
+void HELPER(pflush)(CPUM68KState *env, uint32_t addr, uint32_t opmode)
+{
+ CPUState *cs = env_cpu(env);
+
+ switch (opmode) {
+ case 0: /* Flush page entry if not global */
+ case 1: /* Flush page entry */
+ tlb_flush_page(cs, addr);
+ break;
+ case 2: /* Flush all except global entries */
+ tlb_flush(cs);
+ break;
+ case 3: /* Flush all entries */
+ tlb_flush(cs);
+ break;
+ }
+}
+
+void HELPER(reset)(CPUM68KState *env)
+{
+ /* FIXME: reset all except CPU */
+}
+#endif
diff --git a/target/m68k/helper.h b/target/m68k/helper.h
new file mode 100644
index 000000000..9842eeaa9
--- /dev/null
+++ b/target/m68k/helper.h
@@ -0,0 +1,130 @@
+DEF_HELPER_1(bitrev, i32, i32)
+DEF_HELPER_1(ff1, i32, i32)
+DEF_HELPER_FLAGS_2(sats, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_3(divuw, void, env, int, i32)
+DEF_HELPER_3(divsw, void, env, int, s32)
+DEF_HELPER_4(divul, void, env, int, int, i32)
+DEF_HELPER_4(divsl, void, env, int, int, s32)
+DEF_HELPER_4(divull, void, env, int, int, i32)
+DEF_HELPER_4(divsll, void, env, int, int, s32)
+DEF_HELPER_2(set_sr, void, env, i32)
+DEF_HELPER_3(cf_movec_to, void, env, i32, i32)
+DEF_HELPER_3(m68k_movec_to, void, env, i32, i32)
+DEF_HELPER_2(m68k_movec_from, i32, env, i32)
+DEF_HELPER_4(cas2w, void, env, i32, i32, i32)
+DEF_HELPER_4(cas2l, void, env, i32, i32, i32)
+DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32)
+
+#define dh_alias_fp ptr
+#define dh_ctype_fp FPReg *
+
+DEF_HELPER_3(exts32, void, env, fp, s32)
+DEF_HELPER_3(extf32, void, env, fp, f32)
+DEF_HELPER_3(extf64, void, env, fp, f64)
+DEF_HELPER_2(redf32, f32, env, fp)
+DEF_HELPER_2(redf64, f64, env, fp)
+DEF_HELPER_2(reds32, s32, env, fp)
+
+DEF_HELPER_3(fsround, void, env, fp, fp)
+DEF_HELPER_3(fdround, void, env, fp, fp)
+DEF_HELPER_3(firound, void, env, fp, fp)
+DEF_HELPER_3(fitrunc, void, env, fp, fp)
+DEF_HELPER_3(fsqrt, void, env, fp, fp)
+DEF_HELPER_3(fssqrt, void, env, fp, fp)
+DEF_HELPER_3(fdsqrt, void, env, fp, fp)
+DEF_HELPER_3(fabs, void, env, fp, fp)
+DEF_HELPER_3(fsabs, void, env, fp, fp)
+DEF_HELPER_3(fdabs, void, env, fp, fp)
+DEF_HELPER_3(fneg, void, env, fp, fp)
+DEF_HELPER_3(fsneg, void, env, fp, fp)
+DEF_HELPER_3(fdneg, void, env, fp, fp)
+DEF_HELPER_4(fadd, void, env, fp, fp, fp)
+DEF_HELPER_4(fsadd, void, env, fp, fp, fp)
+DEF_HELPER_4(fdadd, void, env, fp, fp, fp)
+DEF_HELPER_4(fsub, void, env, fp, fp, fp)
+DEF_HELPER_4(fssub, void, env, fp, fp, fp)
+DEF_HELPER_4(fdsub, void, env, fp, fp, fp)
+DEF_HELPER_4(fmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fsmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fdmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fsglmul, void, env, fp, fp, fp)
+DEF_HELPER_4(fdiv, void, env, fp, fp, fp)
+DEF_HELPER_4(fsdiv, void, env, fp, fp, fp)
+DEF_HELPER_4(fddiv, void, env, fp, fp, fp)
+DEF_HELPER_4(fsgldiv, void, env, fp, fp, fp)
+DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_RWG, void, env, fp, fp)
+DEF_HELPER_FLAGS_2(set_fpcr, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_2(ftst, TCG_CALL_NO_RWG, void, env, fp)
+DEF_HELPER_3(fconst, void, env, fp, i32)
+DEF_HELPER_3(fmovemx_st_predec, i32, env, i32, i32)
+DEF_HELPER_3(fmovemx_st_postinc, i32, env, i32, i32)
+DEF_HELPER_3(fmovemx_ld_postinc, i32, env, i32, i32)
+DEF_HELPER_3(fmovemd_st_predec, i32, env, i32, i32)
+DEF_HELPER_3(fmovemd_st_postinc, i32, env, i32, i32)
+DEF_HELPER_3(fmovemd_ld_postinc, i32, env, i32, i32)
+DEF_HELPER_4(fmod, void, env, fp, fp, fp)
+DEF_HELPER_4(frem, void, env, fp, fp, fp)
+DEF_HELPER_3(fgetexp, void, env, fp, fp)
+DEF_HELPER_3(fgetman, void, env, fp, fp)
+DEF_HELPER_4(fscale, void, env, fp, fp, fp)
+DEF_HELPER_3(flognp1, void, env, fp, fp)
+DEF_HELPER_3(flogn, void, env, fp, fp)
+DEF_HELPER_3(flog10, void, env, fp, fp)
+DEF_HELPER_3(flog2, void, env, fp, fp)
+DEF_HELPER_3(fetox, void, env, fp, fp)
+DEF_HELPER_3(ftwotox, void, env, fp, fp)
+DEF_HELPER_3(ftentox, void, env, fp, fp)
+DEF_HELPER_3(ftan, void, env, fp, fp)
+DEF_HELPER_3(fsin, void, env, fp, fp)
+DEF_HELPER_3(fcos, void, env, fp, fp)
+DEF_HELPER_4(fsincos, void, env, fp, fp, fp)
+DEF_HELPER_3(fatan, void, env, fp, fp)
+DEF_HELPER_3(fasin, void, env, fp, fp)
+DEF_HELPER_3(facos, void, env, fp, fp)
+DEF_HELPER_3(fatanh, void, env, fp, fp)
+DEF_HELPER_3(fetoxm1, void, env, fp, fp)
+DEF_HELPER_3(ftanh, void, env, fp, fp)
+DEF_HELPER_3(fsinh, void, env, fp, fp)
+DEF_HELPER_3(fcosh, void, env, fp, fp)
+
+DEF_HELPER_3(mac_move, void, env, i32, i32)
+DEF_HELPER_3(macmulf, i64, env, i32, i32)
+DEF_HELPER_3(macmuls, i64, env, i32, i32)
+DEF_HELPER_3(macmulu, i64, env, i32, i32)
+DEF_HELPER_2(macsats, void, env, i32)
+DEF_HELPER_2(macsatu, void, env, i32)
+DEF_HELPER_2(macsatf, void, env, i32)
+DEF_HELPER_2(mac_set_flags, void, env, i32)
+DEF_HELPER_2(set_macsr, void, env, i32)
+DEF_HELPER_2(get_macf, i32, env, i64)
+DEF_HELPER_1(get_macs, i32, i64)
+DEF_HELPER_1(get_macu, i32, i64)
+DEF_HELPER_2(get_mac_extf, i32, env, i32)
+DEF_HELPER_2(get_mac_exti, i32, env, i32)
+DEF_HELPER_3(set_mac_extf, void, env, i32, i32)
+DEF_HELPER_3(set_mac_exts, void, env, i32, i32)
+DEF_HELPER_3(set_mac_extu, void, env, i32, i32)
+
+DEF_HELPER_2(flush_flags, void, env, i32)
+DEF_HELPER_2(set_ccr, void, env, i32)
+DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env)
+DEF_HELPER_2(raise_exception, void, env, i32)
+
+DEF_HELPER_FLAGS_3(bfffo_reg, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(bfexts_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfextu_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)
+DEF_HELPER_FLAGS_5(bfins_mem, TCG_CALL_NO_WG, i32, env, i32, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfchg_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfclr_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfset_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfffo_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)
+
+DEF_HELPER_3(chk, void, env, s32, s32)
+DEF_HELPER_4(chk2, void, env, s32, s32, s32)
+
+#if defined(CONFIG_SOFTMMU)
+DEF_HELPER_3(ptest, void, env, i32, i32)
+DEF_HELPER_3(pflush, void, env, i32, i32)
+DEF_HELPER_FLAGS_1(reset, TCG_CALL_NO_RWG, void, env)
+#endif
diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c
new file mode 100644
index 000000000..44ec7e461
--- /dev/null
+++ b/target/m68k/m68k-semi.c
@@ -0,0 +1,469 @@
+/*
+ * m68k/ColdFire Semihosting syscall interface
+ *
+ * Copyright (c) 2005-2007 CodeSourcery.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#if defined(CONFIG_USER_ONLY)
+#include "qemu.h"
+#define SEMIHOSTING_HEAP_SIZE (128 * 1024 * 1024)
+#else
+#include "exec/softmmu-semi.h"
+#include "hw/boards.h"
+#endif
+#include "qemu/log.h"
+
+#define HOSTED_EXIT 0
+#define HOSTED_INIT_SIM 1
+#define HOSTED_OPEN 2
+#define HOSTED_CLOSE 3
+#define HOSTED_READ 4
+#define HOSTED_WRITE 5
+#define HOSTED_LSEEK 6
+#define HOSTED_RENAME 7
+#define HOSTED_UNLINK 8
+#define HOSTED_STAT 9
+#define HOSTED_FSTAT 10
+#define HOSTED_GETTIMEOFDAY 11
+#define HOSTED_ISATTY 12
+#define HOSTED_SYSTEM 13
+
+typedef uint32_t gdb_mode_t;
+typedef uint32_t gdb_time_t;
+
+struct m68k_gdb_stat {
+ uint32_t gdb_st_dev; /* device */
+ uint32_t gdb_st_ino; /* inode */
+ gdb_mode_t gdb_st_mode; /* protection */
+ uint32_t gdb_st_nlink; /* number of hard links */
+ uint32_t gdb_st_uid; /* user ID of owner */
+ uint32_t gdb_st_gid; /* group ID of owner */
+ uint32_t gdb_st_rdev; /* device type (if inode device) */
+ uint64_t gdb_st_size; /* total size, in bytes */
+ uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
+ uint64_t gdb_st_blocks; /* number of blocks allocated */
+ gdb_time_t gdb_st_atime; /* time of last access */
+ gdb_time_t gdb_st_mtime; /* time of last modification */
+ gdb_time_t gdb_st_ctime; /* time of last change */
+} QEMU_PACKED;
+
+struct gdb_timeval {
+ gdb_time_t tv_sec; /* second */
+ uint64_t tv_usec; /* microsecond */
+} QEMU_PACKED;
+
+#define GDB_O_RDONLY 0x0
+#define GDB_O_WRONLY 0x1
+#define GDB_O_RDWR 0x2
+#define GDB_O_APPEND 0x8
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_EXCL 0x800
+
+static int translate_openflags(int flags)
+{
+ int hf;
+
+ if (flags & GDB_O_WRONLY)
+ hf = O_WRONLY;
+ else if (flags & GDB_O_RDWR)
+ hf = O_RDWR;
+ else
+ hf = O_RDONLY;
+
+ if (flags & GDB_O_APPEND) hf |= O_APPEND;
+ if (flags & GDB_O_CREAT) hf |= O_CREAT;
+ if (flags & GDB_O_TRUNC) hf |= O_TRUNC;
+ if (flags & GDB_O_EXCL) hf |= O_EXCL;
+
+ return hf;
+}
+
+static void translate_stat(CPUM68KState *env, target_ulong addr, struct stat *s)
+{
+ struct m68k_gdb_stat *p;
+
+ if (!(p = lock_user(VERIFY_WRITE, addr, sizeof(struct m68k_gdb_stat), 0)))
+ /* FIXME - should this return an error code? */
+ return;
+ p->gdb_st_dev = cpu_to_be32(s->st_dev);
+ p->gdb_st_ino = cpu_to_be32(s->st_ino);
+ p->gdb_st_mode = cpu_to_be32(s->st_mode);
+ p->gdb_st_nlink = cpu_to_be32(s->st_nlink);
+ p->gdb_st_uid = cpu_to_be32(s->st_uid);
+ p->gdb_st_gid = cpu_to_be32(s->st_gid);
+ p->gdb_st_rdev = cpu_to_be32(s->st_rdev);
+ p->gdb_st_size = cpu_to_be64(s->st_size);
+#ifdef _WIN32
+ /* Windows stat is missing some fields. */
+ p->gdb_st_blksize = 0;
+ p->gdb_st_blocks = 0;
+#else
+ p->gdb_st_blksize = cpu_to_be64(s->st_blksize);
+ p->gdb_st_blocks = cpu_to_be64(s->st_blocks);
+#endif
+ p->gdb_st_atime = cpu_to_be32(s->st_atime);
+ p->gdb_st_mtime = cpu_to_be32(s->st_mtime);
+ p->gdb_st_ctime = cpu_to_be32(s->st_ctime);
+ unlock_user(p, addr, sizeof(struct m68k_gdb_stat));
+}
+
+static void m68k_semi_return_u32(CPUM68KState *env, uint32_t ret, uint32_t err)
+{
+ target_ulong args = env->dregs[1];
+ if (put_user_u32(ret, args) ||
+ put_user_u32(err, args + 4)) {
+ /*
+ * The m68k semihosting ABI does not provide any way to report this
+ * error to the guest, so the best we can do is log it in qemu.
+ * It is always a guest error not to pass us a valid argument block.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "m68k-semihosting: return value "
+ "discarded because argument block not writable\n");
+ }
+}
+
+static void m68k_semi_return_u64(CPUM68KState *env, uint64_t ret, uint32_t err)
+{
+ target_ulong args = env->dregs[1];
+ if (put_user_u32(ret >> 32, args) ||
+ put_user_u32(ret, args + 4) ||
+ put_user_u32(err, args + 8)) {
+ /* No way to report this via m68k semihosting ABI; just log it */
+ qemu_log_mask(LOG_GUEST_ERROR, "m68k-semihosting: return value "
+ "discarded because argument block not writable\n");
+ }
+}
+
+static int m68k_semi_is_fseek;
+
+static void m68k_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ if (m68k_semi_is_fseek) {
+ /*
+ * FIXME: We've already lost the high bits of the fseek
+ * return value.
+ */
+ m68k_semi_return_u64(env, ret, err);
+ m68k_semi_is_fseek = 0;
+ } else {
+ m68k_semi_return_u32(env, ret, err);
+ }
+}
+
+/*
+ * Read the input value from the argument block; fail the semihosting
+ * call if the memory read fails.
+ */
+#define GET_ARG(n) do { \
+ if (get_user_ual(arg ## n, args + (n) * 4)) { \
+ result = -1; \
+ errno = EFAULT; \
+ goto failed; \
+ } \
+} while (0)
+
+void do_m68k_semihosting(CPUM68KState *env, int nr)
+{
+ uint32_t args;
+ target_ulong arg0, arg1, arg2, arg3;
+ void *p;
+ void *q;
+ uint32_t len;
+ uint32_t result;
+
+ args = env->dregs[1];
+ switch (nr) {
+ case HOSTED_EXIT:
+ gdb_exit(env->dregs[0]);
+ exit(env->dregs[0]);
+ case HOSTED_OPEN:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "open,%s,%x,%x", arg0, (int)arg1,
+ arg2, arg3);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = open(p, translate_openflags(arg2), arg3);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_CLOSE:
+ {
+ /* Ignore attempts to close stdin/out/err. */
+ GET_ARG(0);
+ int fd = arg0;
+ if (fd > 2) {
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "close,%x", arg0);
+ return;
+ } else {
+ result = close(fd);
+ }
+ } else {
+ result = 0;
+ }
+ break;
+ }
+ case HOSTED_READ:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "read,%x,%x,%x",
+ arg0, arg1, len);
+ return;
+ } else {
+ p = lock_user(VERIFY_WRITE, arg1, len, 0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = read(arg0, p, len);
+ unlock_user(p, arg1, len);
+ }
+ }
+ break;
+ case HOSTED_WRITE:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "write,%x,%x,%x",
+ arg0, arg1, len);
+ return;
+ } else {
+ p = lock_user(VERIFY_READ, arg1, len, 1);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = write(arg0, p, len);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_LSEEK:
+ {
+ uint64_t off;
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ off = (uint32_t)arg2 | ((uint64_t)arg1 << 32);
+ if (use_gdb_syscalls()) {
+ m68k_semi_is_fseek = 1;
+ gdb_do_syscall(m68k_semi_cb, "fseek,%x,%lx,%x",
+ arg0, off, arg3);
+ } else {
+ off = lseek(arg0, off, arg3);
+ m68k_semi_return_u64(env, off, errno);
+ }
+ return;
+ }
+ case HOSTED_RENAME:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "rename,%s,%s",
+ arg0, (int)arg1, arg2, (int)arg3);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ q = lock_user_string(arg2);
+ if (!p || !q) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = rename(p, q);
+ }
+ unlock_user(p, arg0, 0);
+ unlock_user(q, arg2, 0);
+ }
+ break;
+ case HOSTED_UNLINK:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "unlink,%s",
+ arg0, (int)arg1);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = unlink(p);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_STAT:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "stat,%s,%x",
+ arg0, (int)arg1, arg2);
+ return;
+ } else {
+ struct stat s;
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = stat(p, &s);
+ unlock_user(p, arg0, 0);
+ }
+ if (result == 0) {
+ translate_stat(env, arg2, &s);
+ }
+ }
+ break;
+ case HOSTED_FSTAT:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "fstat,%x,%x",
+ arg0, arg1);
+ return;
+ } else {
+ struct stat s;
+ result = fstat(arg0, &s);
+ if (result == 0) {
+ translate_stat(env, arg1, &s);
+ }
+ }
+ break;
+ case HOSTED_GETTIMEOFDAY:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "gettimeofday,%x,%x",
+ arg0, arg1);
+ return;
+ } else {
+ qemu_timeval tv;
+ struct gdb_timeval *p;
+ result = qemu_gettimeofday(&tv);
+ if (result != 0) {
+ if (!(p = lock_user(VERIFY_WRITE,
+ arg0, sizeof(struct gdb_timeval), 0))) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ p->tv_sec = cpu_to_be32(tv.tv_sec);
+ p->tv_usec = cpu_to_be64(tv.tv_usec);
+ unlock_user(p, arg0, sizeof(struct gdb_timeval));
+ }
+ }
+ }
+ break;
+ case HOSTED_ISATTY:
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "isatty,%x", arg0);
+ return;
+ } else {
+ result = isatty(arg0);
+ }
+ break;
+ case HOSTED_SYSTEM:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "system,%s",
+ arg0, (int)arg1);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = system(p);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_INIT_SIM:
+#if defined(CONFIG_USER_ONLY)
+ {
+ CPUState *cs = env_cpu(env);
+ TaskState *ts = cs->opaque;
+ /* Allocate the heap using sbrk. */
+ if (!ts->heap_limit) {
+ abi_ulong ret;
+ uint32_t size;
+ uint32_t base;
+
+ base = do_brk(0);
+ size = SEMIHOSTING_HEAP_SIZE;
+ /* Try a big heap, and reduce the size if that fails. */
+ for (;;) {
+ ret = do_brk(base + size);
+ if (ret >= (base + size)) {
+ break;
+ }
+ size >>= 1;
+ }
+ ts->heap_limit = base + size;
+ }
+ /*
+ * This call may happen before we have writable memory, so return
+ * values directly in registers.
+ */
+ env->dregs[1] = ts->heap_limit;
+ env->aregs[7] = ts->stack_base;
+ }
+#else
+ /*
+ * FIXME: This is wrong for boards where RAM does not start at
+ * address zero.
+ */
+ env->dregs[1] = current_machine->ram_size;
+ env->aregs[7] = current_machine->ram_size;
+#endif
+ return;
+ default:
+ cpu_abort(env_cpu(env), "Unsupported semihosting syscall %d\n", nr);
+ result = 0;
+ }
+failed:
+ m68k_semi_return_u32(env, result, errno);
+}
diff --git a/target/m68k/meson.build b/target/m68k/meson.build
new file mode 100644
index 000000000..05cd9fbd1
--- /dev/null
+++ b/target/m68k/meson.build
@@ -0,0 +1,17 @@
+m68k_ss = ss.source_set()
+m68k_ss.add(files(
+ 'cpu.c',
+ 'fpu_helper.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'm68k-semi.c',
+ 'op_helper.c',
+ 'softfloat.c',
+ 'translate.c',
+))
+
+m68k_softmmu_ss = ss.source_set()
+m68k_softmmu_ss.add(files('monitor.c'))
+
+target_arch += {'m68k': m68k_ss}
+target_softmmu_arch += {'m68k': m68k_softmmu_ss}
diff --git a/target/m68k/monitor.c b/target/m68k/monitor.c
new file mode 100644
index 000000000..2bdf6acae
--- /dev/null
+++ b/target/m68k/monitor.c
@@ -0,0 +1,62 @@
+/*
+ * QEMU monitor for m68k
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/hmp-target.h"
+#include "monitor/monitor.h"
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env(mon);
+
+ if (!env1) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+
+ dump_mmu(env1);
+}
+
+static const MonitorDef monitor_defs[] = {
+ { "d0", offsetof(CPUM68KState, dregs[0]) },
+ { "d1", offsetof(CPUM68KState, dregs[1]) },
+ { "d2", offsetof(CPUM68KState, dregs[2]) },
+ { "d3", offsetof(CPUM68KState, dregs[3]) },
+ { "d4", offsetof(CPUM68KState, dregs[4]) },
+ { "d5", offsetof(CPUM68KState, dregs[5]) },
+ { "d6", offsetof(CPUM68KState, dregs[6]) },
+ { "d7", offsetof(CPUM68KState, dregs[7]) },
+ { "a0", offsetof(CPUM68KState, aregs[0]) },
+ { "a1", offsetof(CPUM68KState, aregs[1]) },
+ { "a2", offsetof(CPUM68KState, aregs[2]) },
+ { "a3", offsetof(CPUM68KState, aregs[3]) },
+ { "a4", offsetof(CPUM68KState, aregs[4]) },
+ { "a5", offsetof(CPUM68KState, aregs[5]) },
+ { "a6", offsetof(CPUM68KState, aregs[6]) },
+ { "a7", offsetof(CPUM68KState, aregs[7]) },
+ { "pc", offsetof(CPUM68KState, pc) },
+ { "sr", offsetof(CPUM68KState, sr) },
+ { "ssp", offsetof(CPUM68KState, sp[0]) },
+ { "usp", offsetof(CPUM68KState, sp[1]) },
+ { "isp", offsetof(CPUM68KState, sp[2]) },
+ { "sfc", offsetof(CPUM68KState, sfc) },
+ { "dfc", offsetof(CPUM68KState, dfc) },
+ { "urp", offsetof(CPUM68KState, mmu.urp) },
+ { "srp", offsetof(CPUM68KState, mmu.srp) },
+ { "dttr0", offsetof(CPUM68KState, mmu.ttr[M68K_DTTR0]) },
+ { "dttr1", offsetof(CPUM68KState, mmu.ttr[M68K_DTTR1]) },
+ { "ittr0", offsetof(CPUM68KState, mmu.ttr[M68K_ITTR0]) },
+ { "ittr1", offsetof(CPUM68KState, mmu.ttr[M68K_ITTR1]) },
+ { "mmusr", offsetof(CPUM68KState, mmu.mmusr) },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
new file mode 100644
index 000000000..cfbc987ba
--- /dev/null
+++ b/target/m68k/op_helper.c
@@ -0,0 +1,1110 @@
+/*
+ * M68K helper routines
+ *
+ * Copyright (c) 2007 CodeSourcery
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "semihosting/semihost.h"
+
+#if !defined(CONFIG_USER_ONLY)
+
+static void cf_rte(CPUM68KState *env)
+{
+ uint32_t sp;
+ uint32_t fmt;
+
+ sp = env->aregs[7];
+ fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
+ env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0);
+ sp |= (fmt >> 28) & 3;
+ env->aregs[7] = sp + 8;
+
+ cpu_m68k_set_sr(env, fmt);
+}
+
+static void m68k_rte(CPUM68KState *env)
+{
+ uint32_t sp;
+ uint16_t fmt;
+ uint16_t sr;
+
+ sp = env->aregs[7];
+throwaway:
+ sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
+ sp += 2;
+ env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
+ sp += 4;
+ if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
+ /* all except 68000 */
+ fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
+ sp += 2;
+ switch (fmt >> 12) {
+ case 0:
+ break;
+ case 1:
+ env->aregs[7] = sp;
+ cpu_m68k_set_sr(env, sr);
+ goto throwaway;
+ case 2:
+ case 3:
+ sp += 4;
+ break;
+ case 4:
+ sp += 8;
+ break;
+ case 7:
+ sp += 52;
+ break;
+ }
+ }
+ env->aregs[7] = sp;
+ cpu_m68k_set_sr(env, sr);
+}
+
+static const char *m68k_exception_name(int index)
+{
+ switch (index) {
+ case EXCP_ACCESS:
+ return "Access Fault";
+ case EXCP_ADDRESS:
+ return "Address Error";
+ case EXCP_ILLEGAL:
+ return "Illegal Instruction";
+ case EXCP_DIV0:
+ return "Divide by Zero";
+ case EXCP_CHK:
+ return "CHK/CHK2";
+ case EXCP_TRAPCC:
+ return "FTRAPcc, TRAPcc, TRAPV";
+ case EXCP_PRIVILEGE:
+ return "Privilege Violation";
+ case EXCP_TRACE:
+ return "Trace";
+ case EXCP_LINEA:
+ return "A-Line";
+ case EXCP_LINEF:
+ return "F-Line";
+ case EXCP_DEBEGBP: /* 68020/030 only */
+ return "Copro Protocol Violation";
+ case EXCP_FORMAT:
+ return "Format Error";
+ case EXCP_UNINITIALIZED:
+ return "Uninitialized Interrupt";
+ case EXCP_SPURIOUS:
+ return "Spurious Interrupt";
+ case EXCP_INT_LEVEL_1:
+ return "Level 1 Interrupt";
+ case EXCP_INT_LEVEL_1 + 1:
+ return "Level 2 Interrupt";
+ case EXCP_INT_LEVEL_1 + 2:
+ return "Level 3 Interrupt";
+ case EXCP_INT_LEVEL_1 + 3:
+ return "Level 4 Interrupt";
+ case EXCP_INT_LEVEL_1 + 4:
+ return "Level 5 Interrupt";
+ case EXCP_INT_LEVEL_1 + 5:
+ return "Level 6 Interrupt";
+ case EXCP_INT_LEVEL_1 + 6:
+ return "Level 7 Interrupt";
+ case EXCP_TRAP0:
+ return "TRAP #0";
+ case EXCP_TRAP0 + 1:
+ return "TRAP #1";
+ case EXCP_TRAP0 + 2:
+ return "TRAP #2";
+ case EXCP_TRAP0 + 3:
+ return "TRAP #3";
+ case EXCP_TRAP0 + 4:
+ return "TRAP #4";
+ case EXCP_TRAP0 + 5:
+ return "TRAP #5";
+ case EXCP_TRAP0 + 6:
+ return "TRAP #6";
+ case EXCP_TRAP0 + 7:
+ return "TRAP #7";
+ case EXCP_TRAP0 + 8:
+ return "TRAP #8";
+ case EXCP_TRAP0 + 9:
+ return "TRAP #9";
+ case EXCP_TRAP0 + 10:
+ return "TRAP #10";
+ case EXCP_TRAP0 + 11:
+ return "TRAP #11";
+ case EXCP_TRAP0 + 12:
+ return "TRAP #12";
+ case EXCP_TRAP0 + 13:
+ return "TRAP #13";
+ case EXCP_TRAP0 + 14:
+ return "TRAP #14";
+ case EXCP_TRAP0 + 15:
+ return "TRAP #15";
+ case EXCP_FP_BSUN:
+ return "FP Branch/Set on unordered condition";
+ case EXCP_FP_INEX:
+ return "FP Inexact Result";
+ case EXCP_FP_DZ:
+ return "FP Divide by Zero";
+ case EXCP_FP_UNFL:
+ return "FP Underflow";
+ case EXCP_FP_OPERR:
+ return "FP Operand Error";
+ case EXCP_FP_OVFL:
+ return "FP Overflow";
+ case EXCP_FP_SNAN:
+ return "FP Signaling NAN";
+ case EXCP_FP_UNIMP:
+ return "FP Unimplemented Data Type";
+ case EXCP_MMU_CONF: /* 68030/68851 only */
+ return "MMU Configuration Error";
+ case EXCP_MMU_ILLEGAL: /* 68851 only */
+ return "MMU Illegal Operation";
+ case EXCP_MMU_ACCESS: /* 68851 only */
+ return "MMU Access Level Violation";
+ case 64 ... 255:
+ return "User Defined Vector";
+ }
+ return "Unassigned";
+}
+
+static void cf_interrupt_all(CPUM68KState *env, int is_hw)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t sp;
+ uint32_t sr;
+ uint32_t fmt;
+ uint32_t retaddr;
+ uint32_t vector;
+
+ fmt = 0;
+ retaddr = env->pc;
+
+ if (!is_hw) {
+ switch (cs->exception_index) {
+ case EXCP_RTE:
+ /* Return from an exception. */
+ cf_rte(env);
+ return;
+ case EXCP_HALT_INSN:
+ if (semihosting_enabled()
+ && (env->sr & SR_S) != 0
+ && (env->pc & 3) == 0
+ && cpu_lduw_code(env, env->pc - 4) == 0x4e71
+ && cpu_ldl_code(env, env->pc) == 0x4e7bf000) {
+ env->pc += 4;
+ do_m68k_semihosting(env, env->dregs[0]);
+ return;
+ }
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+ return;
+ }
+ if (cs->exception_index >= EXCP_TRAP0
+ && cs->exception_index <= EXCP_TRAP15) {
+ /* Move the PC after the trap instruction. */
+ retaddr += 2;
+ }
+ }
+
+ vector = cs->exception_index << 2;
+
+ sr = env->sr | cpu_m68k_get_ccr(env);
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
+ ++count, m68k_exception_name(cs->exception_index),
+ vector, env->pc, env->aregs[7], sr);
+ }
+
+ fmt |= 0x40000000;
+ fmt |= vector << 16;
+ fmt |= sr;
+
+ env->sr |= SR_S;
+ if (is_hw) {
+ env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
+ env->sr &= ~SR_M;
+ }
+ m68k_switch_sp(env);
+ sp = env->aregs[7];
+ fmt |= (sp & 3) << 28;
+
+ /* ??? This could cause MMU faults. */
+ sp &= ~3;
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0);
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0);
+ env->aregs[7] = sp;
+ /* Jump to vector. */
+ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
+}
+
+static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
+ uint16_t format, uint16_t sr,
+ uint32_t addr, uint32_t retaddr)
+{
+ if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
+ /* all except 68000 */
+ CPUState *cs = env_cpu(env);
+ switch (format) {
+ case 4:
+ *sp -= 4;
+ cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0);
+ *sp -= 4;
+ cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
+ break;
+ case 3:
+ case 2:
+ *sp -= 4;
+ cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
+ break;
+ }
+ *sp -= 2;
+ cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2),
+ MMU_KERNEL_IDX, 0);
+ }
+ *sp -= 4;
+ cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0);
+ *sp -= 2;
+ cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0);
+}
+
+static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t sp;
+ uint32_t retaddr;
+ uint32_t vector;
+ uint16_t sr, oldsr;
+
+ retaddr = env->pc;
+
+ if (!is_hw) {
+ switch (cs->exception_index) {
+ case EXCP_RTE:
+ /* Return from an exception. */
+ m68k_rte(env);
+ return;
+ case EXCP_TRAP0 ... EXCP_TRAP15:
+ /* Move the PC after the trap instruction. */
+ retaddr += 2;
+ break;
+ }
+ }
+
+ vector = cs->exception_index << 2;
+
+ sr = env->sr | cpu_m68k_get_ccr(env);
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
+ ++count, m68k_exception_name(cs->exception_index),
+ vector, env->pc, env->aregs[7], sr);
+ }
+
+ /*
+ * MC68040UM/AD, chapter 9.3.10
+ */
+
+ /* "the processor first make an internal copy" */
+ oldsr = sr;
+ /* "set the mode to supervisor" */
+ sr |= SR_S;
+ /* "suppress tracing" */
+ sr &= ~SR_T;
+ /* "sets the processor interrupt mask" */
+ if (is_hw) {
+ sr |= (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
+ }
+ cpu_m68k_set_sr(env, sr);
+ sp = env->aregs[7];
+
+ if (!m68k_feature(env, M68K_FEATURE_UNALIGNED_DATA)) {
+ sp &= ~1;
+ }
+
+ if (cs->exception_index == EXCP_ACCESS) {
+ if (env->mmu.fault) {
+ cpu_abort(cs, "DOUBLE MMU FAULT\n");
+ }
+ env->mmu.fault = true;
+ /* push data 3 */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* push data 2 */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* push data 1 */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 1 / push data 0 */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 1 address */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 2 data */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 2 address */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 3 data */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 3 address */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
+ /* fault address */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
+ /* write back 1 status */
+ sp -= 2;
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 2 status */
+ sp -= 2;
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* write back 3 status */
+ sp -= 2;
+ cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
+ /* special status word */
+ sp -= 2;
+ cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0);
+ /* effective address */
+ sp -= 4;
+ cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
+
+ do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
+ env->mmu.fault = false;
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ qemu_log(" "
+ "ssw: %08x ea: %08x sfc: %d dfc: %d\n",
+ env->mmu.ssw, env->mmu.ar, env->sfc, env->dfc);
+ }
+ } else if (cs->exception_index == EXCP_ADDRESS) {
+ do_stack_frame(env, &sp, 2, oldsr, 0, retaddr);
+ } else if (cs->exception_index == EXCP_ILLEGAL ||
+ cs->exception_index == EXCP_DIV0 ||
+ cs->exception_index == EXCP_CHK ||
+ cs->exception_index == EXCP_TRAPCC ||
+ cs->exception_index == EXCP_TRACE) {
+ /* FIXME: addr is not only env->pc */
+ do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr);
+ } else if (is_hw && oldsr & SR_M &&
+ cs->exception_index >= EXCP_SPURIOUS &&
+ cs->exception_index <= EXCP_INT_LEVEL_7) {
+ do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
+ oldsr = sr;
+ env->aregs[7] = sp;
+ cpu_m68k_set_sr(env, sr &= ~SR_M);
+ sp = env->aregs[7] & ~1;
+ do_stack_frame(env, &sp, 1, oldsr, 0, retaddr);
+ } else {
+ do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
+ }
+
+ env->aregs[7] = sp;
+ /* Jump to vector. */
+ env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
+}
+
+static void do_interrupt_all(CPUM68KState *env, int is_hw)
+{
+ if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ m68k_interrupt_all(env, is_hw);
+ return;
+ }
+ cf_interrupt_all(env, is_hw);
+}
+
+void m68k_cpu_do_interrupt(CPUState *cs)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ do_interrupt_all(env, 0);
+}
+
+static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
+{
+ do_interrupt_all(env, 1);
+}
+
+void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ cpu_restore_state(cs, retaddr, true);
+
+ if (m68k_feature(env, M68K_FEATURE_M68040)) {
+ env->mmu.mmusr = 0;
+
+ /*
+ * According to the MC68040 users manual the ATC bit of the SSW is
+ * used to distinguish between ATC faults and physical bus errors.
+ * In the case of a bus error e.g. during nubus read from an empty
+ * slot this bit should not be set
+ */
+ if (response != MEMTX_DECODE_ERROR) {
+ env->mmu.ssw |= M68K_ATC_040;
+ }
+
+ /* FIXME: manage MMU table access error */
+ env->mmu.ssw &= ~M68K_TM_040;
+ if (env->sr & SR_S) { /* SUPERVISOR */
+ env->mmu.ssw |= M68K_TM_040_SUPER;
+ }
+ if (access_type == MMU_INST_FETCH) { /* instruction or data */
+ env->mmu.ssw |= M68K_TM_040_CODE;
+ } else {
+ env->mmu.ssw |= M68K_TM_040_DATA;
+ }
+ env->mmu.ssw &= ~M68K_BA_SIZE_MASK;
+ switch (size) {
+ case 1:
+ env->mmu.ssw |= M68K_BA_SIZE_BYTE;
+ break;
+ case 2:
+ env->mmu.ssw |= M68K_BA_SIZE_WORD;
+ break;
+ case 4:
+ env->mmu.ssw |= M68K_BA_SIZE_LONG;
+ break;
+ }
+
+ if (access_type != MMU_DATA_STORE) {
+ env->mmu.ssw |= M68K_RW_040;
+ }
+
+ env->mmu.ar = addr;
+
+ cs->exception_index = EXCP_ACCESS;
+ cpu_loop_exit(cs);
+ }
+}
+
+bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) {
+ /*
+ * Real hardware gets the interrupt vector via an IACK cycle
+ * at this point. Current emulated hardware doesn't rely on
+ * this, so we provide/save the vector when the interrupt is
+ * first signalled.
+ */
+ cs->exception_index = env->pending_vector;
+ do_interrupt_m68k_hardirq(env);
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = tt;
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+static void raise_exception(CPUM68KState *env, int tt)
+{
+ raise_exception_ra(env, tt, 0);
+}
+
+void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
+{
+ raise_exception(env, tt);
+}
+
+void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den)
+{
+ uint32_t num = env->dregs[destr];
+ uint32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot > 0xffff) {
+ env->cc_v = -1;
+ /*
+ * real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->dregs[destr] = deposit32(quot, 16, 16, rem);
+ env->cc_z = (int16_t)quot;
+ env->cc_n = (int16_t)quot;
+ env->cc_v = 0;
+}
+
+void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den)
+{
+ int32_t num = env->dregs[destr];
+ uint32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot != (int16_t)quot) {
+ env->cc_v = -1;
+ /* nothing else is modified */
+ /*
+ * real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->dregs[destr] = deposit32(quot, 16, 16, rem);
+ env->cc_z = (int16_t)quot;
+ env->cc_n = (int16_t)quot;
+ env->cc_v = 0;
+}
+
+void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den)
+{
+ uint32_t num = env->dregs[numr];
+ uint32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0;
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_v = 0;
+
+ if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
+ if (numr == regr) {
+ env->dregs[numr] = quot;
+ } else {
+ env->dregs[regr] = rem;
+ }
+ } else {
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+ }
+}
+
+void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den)
+{
+ int32_t num = env->dregs[numr];
+ int32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0;
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_v = 0;
+
+ if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
+ if (numr == regr) {
+ env->dregs[numr] = quot;
+ } else {
+ env->dregs[regr] = rem;
+ }
+ } else {
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+ }
+}
+
+void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den)
+{
+ uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
+ uint64_t quot;
+ uint32_t rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot > 0xffffffffULL) {
+ env->cc_v = -1;
+ /*
+ * real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_v = 0;
+
+ /*
+ * If Dq and Dr are the same, the quotient is returned.
+ * therefore we set Dq last.
+ */
+
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+}
+
+void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
+{
+ int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
+ int64_t quot;
+ int32_t rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot != (int32_t)quot) {
+ env->cc_v = -1;
+ /*
+ * real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_v = 0;
+
+ /*
+ * If Dq and Dr are the same, the quotient is returned.
+ * therefore we set Dq last.
+ */
+
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+}
+
+/* We're executing in a serial context -- no need to be atomic. */
+void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
+{
+ uint32_t Dc1 = extract32(regs, 9, 3);
+ uint32_t Dc2 = extract32(regs, 6, 3);
+ uint32_t Du1 = extract32(regs, 3, 3);
+ uint32_t Du2 = extract32(regs, 0, 3);
+ int16_t c1 = env->dregs[Dc1];
+ int16_t c2 = env->dregs[Dc2];
+ int16_t u1 = env->dregs[Du1];
+ int16_t u2 = env->dregs[Du2];
+ int16_t l1, l2;
+ uintptr_t ra = GETPC();
+
+ l1 = cpu_lduw_data_ra(env, a1, ra);
+ l2 = cpu_lduw_data_ra(env, a2, ra);
+ if (l1 == c1 && l2 == c2) {
+ cpu_stw_data_ra(env, a1, u1, ra);
+ cpu_stw_data_ra(env, a2, u2, ra);
+ }
+
+ if (c1 != l1) {
+ env->cc_n = l1;
+ env->cc_v = c1;
+ } else {
+ env->cc_n = l2;
+ env->cc_v = c2;
+ }
+ env->cc_op = CC_OP_CMPW;
+ env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1);
+ env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
+}
+
+static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
+ bool parallel)
+{
+ uint32_t Dc1 = extract32(regs, 9, 3);
+ uint32_t Dc2 = extract32(regs, 6, 3);
+ uint32_t Du1 = extract32(regs, 3, 3);
+ uint32_t Du2 = extract32(regs, 0, 3);
+ uint32_t c1 = env->dregs[Dc1];
+ uint32_t c2 = env->dregs[Dc2];
+ uint32_t u1 = env->dregs[Du1];
+ uint32_t u2 = env->dregs[Du2];
+ uint32_t l1, l2;
+ uintptr_t ra = GETPC();
+#if defined(CONFIG_ATOMIC64)
+ int mmu_idx = cpu_mmu_index(env, 0);
+ MemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
+#endif
+
+ if (parallel) {
+ /* We're executing in a parallel context -- must be atomic. */
+#ifdef CONFIG_ATOMIC64
+ uint64_t c, u, l;
+ if ((a1 & 7) == 0 && a2 == a1 + 4) {
+ c = deposit64(c2, 32, 32, c1);
+ u = deposit64(u2, 32, 32, u1);
+ l = cpu_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
+ l1 = l >> 32;
+ l2 = l;
+ } else if ((a2 & 7) == 0 && a1 == a2 + 4) {
+ c = deposit64(c1, 32, 32, c2);
+ u = deposit64(u1, 32, 32, u2);
+ l = cpu_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
+ l2 = l >> 32;
+ l1 = l;
+ } else
+#endif
+ {
+ /* Tell the main loop we need to serialize this insn. */
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ } else {
+ /* We're executing in a serial context -- no need to be atomic. */
+ l1 = cpu_ldl_data_ra(env, a1, ra);
+ l2 = cpu_ldl_data_ra(env, a2, ra);
+ if (l1 == c1 && l2 == c2) {
+ cpu_stl_data_ra(env, a1, u1, ra);
+ cpu_stl_data_ra(env, a2, u2, ra);
+ }
+ }
+
+ if (c1 != l1) {
+ env->cc_n = l1;
+ env->cc_v = c1;
+ } else {
+ env->cc_n = l2;
+ env->cc_v = c2;
+ }
+ env->cc_op = CC_OP_CMPL;
+ env->dregs[Dc1] = l1;
+ env->dregs[Dc2] = l2;
+}
+
+void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
+{
+ do_cas2l(env, regs, a1, a2, false);
+}
+
+void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1,
+ uint32_t a2)
+{
+ do_cas2l(env, regs, a1, a2, true);
+}
+
+struct bf_data {
+ uint32_t addr;
+ uint32_t bofs;
+ uint32_t blen;
+ uint32_t len;
+};
+
+static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len)
+{
+ int bofs, blen;
+
+ /* Bound length; map 0 to 32. */
+ len = ((len - 1) & 31) + 1;
+
+ /* Note that ofs is signed. */
+ addr += ofs / 8;
+ bofs = ofs % 8;
+ if (bofs < 0) {
+ bofs += 8;
+ addr -= 1;
+ }
+
+ /*
+ * Compute the number of bytes required (minus one) to
+ * satisfy the bitfield.
+ */
+ blen = (bofs + len - 1) / 8;
+
+ /*
+ * Canonicalize the bit offset for data loaded into a 64-bit big-endian
+ * word. For the cases where BLEN is not a power of 2, adjust ADDR so
+ * that we can use the next power of two sized load without crossing a
+ * page boundary, unless the field itself crosses the boundary.
+ */
+ switch (blen) {
+ case 0:
+ bofs += 56;
+ break;
+ case 1:
+ bofs += 48;
+ break;
+ case 2:
+ if (addr & 1) {
+ bofs += 8;
+ addr -= 1;
+ }
+ /* fallthru */
+ case 3:
+ bofs += 32;
+ break;
+ case 4:
+ if (addr & 3) {
+ bofs += 8 * (addr & 3);
+ addr &= -4;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return (struct bf_data){
+ .addr = addr,
+ .bofs = bofs,
+ .blen = blen,
+ .len = len,
+ };
+}
+
+static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen,
+ uintptr_t ra)
+{
+ switch (blen) {
+ case 0:
+ return cpu_ldub_data_ra(env, addr, ra);
+ case 1:
+ return cpu_lduw_data_ra(env, addr, ra);
+ case 2:
+ case 3:
+ return cpu_ldl_data_ra(env, addr, ra);
+ case 4:
+ return cpu_ldq_data_ra(env, addr, ra);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void bf_store(CPUM68KState *env, uint32_t addr, int blen,
+ uint64_t data, uintptr_t ra)
+{
+ switch (blen) {
+ case 0:
+ cpu_stb_data_ra(env, addr, data, ra);
+ break;
+ case 1:
+ cpu_stw_data_ra(env, addr, data, ra);
+ break;
+ case 2:
+ case 3:
+ cpu_stl_data_ra(env, addr, data, ra);
+ break;
+ case 4:
+ cpu_stq_data_ra(env, addr, data, ra);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+
+ return (int64_t)(data << d.bofs) >> (64 - d.len);
+}
+
+uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+
+ /*
+ * Put CC_N at the top of the high word; put the zero-extended value
+ * at the bottom of the low word.
+ */
+ data <<= d.bofs;
+ data >>= 64 - d.len;
+ data |= data << (64 - d.len);
+
+ return data;
+}
+
+uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs);
+
+ bf_store(env, d.addr, d.blen, data, ra);
+
+ /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */
+ return val << (32 - d.len);
+}
+
+uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ bf_store(env, d.addr, d.blen, data ^ mask, ra);
+
+ return ((data & mask) << d.bofs) >> 32;
+}
+
+uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ bf_store(env, d.addr, d.blen, data & ~mask, ra);
+
+ return ((data & mask) << d.bofs) >> 32;
+}
+
+uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ bf_store(env, d.addr, d.blen, data | mask, ra);
+
+ return ((data & mask) << d.bofs) >> 32;
+}
+
+uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len)
+{
+ return (n ? clz32(n) : len) + ofs;
+}
+
+uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+ uint64_t n = (data & mask) << d.bofs;
+ uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len);
+
+ /*
+ * Return FFO in the low word and N in the high word.
+ * Note that because of MASK and the shift, the low word
+ * is already zero.
+ */
+ return n | ffo;
+}
+
+void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub)
+{
+ /*
+ * From the specs:
+ * X: Not affected, C,V,Z: Undefined,
+ * N: Set if val < 0; cleared if val > ub, undefined otherwise
+ * We implement here values found from a real MC68040:
+ * X,V,Z: Not affected
+ * N: Set if val < 0; cleared if val >= 0
+ * C: if 0 <= ub: set if val < 0 or val > ub, cleared otherwise
+ * if 0 > ub: set if val > ub and val < 0, cleared otherwise
+ */
+ env->cc_n = val;
+ env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0;
+
+ if (val < 0 || val > ub) {
+ CPUState *cs = env_cpu(env);
+
+ /* Recover PC and CC_OP for the beginning of the insn. */
+ cpu_restore_state(cs, GETPC(), true);
+
+ /* flags have been modified by gen_flush_flags() */
+ env->cc_op = CC_OP_FLAGS;
+ /* Adjust PC to end of the insn. */
+ env->pc += 2;
+
+ cs->exception_index = EXCP_CHK;
+ cpu_loop_exit(cs);
+ }
+}
+
+void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub)
+{
+ /*
+ * From the specs:
+ * X: Not affected, N,V: Undefined,
+ * Z: Set if val is equal to lb or ub
+ * C: Set if val < lb or val > ub, cleared otherwise
+ * We implement here values found from a real MC68040:
+ * X,N,V: Not affected
+ * Z: Set if val is equal to lb or ub
+ * C: if lb <= ub: set if val < lb or val > ub, cleared otherwise
+ * if lb > ub: set if val > ub and val < lb, cleared otherwise
+ */
+ env->cc_z = val != lb && val != ub;
+ env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb;
+
+ if (env->cc_c) {
+ CPUState *cs = env_cpu(env);
+
+ /* Recover PC and CC_OP for the beginning of the insn. */
+ cpu_restore_state(cs, GETPC(), true);
+
+ /* flags have been modified by gen_flush_flags() */
+ env->cc_op = CC_OP_FLAGS;
+ /* Adjust PC to end of the insn. */
+ env->pc += 4;
+
+ cs->exception_index = EXCP_CHK;
+ cpu_loop_exit(cs);
+ }
+}
diff --git a/target/m68k/qregs.def b/target/m68k/qregs.def
new file mode 100644
index 000000000..1aadc622d
--- /dev/null
+++ b/target/m68k/qregs.def
@@ -0,0 +1,10 @@
+DEFO32(PC, pc)
+DEFO32(SR, sr)
+DEFO32(CC_OP, cc_op)
+DEFO32(CC_X, cc_x)
+DEFO32(CC_C, cc_c)
+DEFO32(CC_N, cc_n)
+DEFO32(CC_V, cc_v)
+DEFO32(CC_Z, cc_z)
+DEFO32(MACSR, macsr)
+DEFO32(MAC_MASK, mac_mask)
diff --git a/target/m68k/softfloat.c b/target/m68k/softfloat.c
new file mode 100644
index 000000000..02dcc03d1
--- /dev/null
+++ b/target/m68k/softfloat.c
@@ -0,0 +1,2835 @@
+/*
+ * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator,
+ * derived from NetBSD M68040 FPSP functions,
+ * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic
+ * Package. Those parts of the code (and some later contributions) are
+ * provided under that license, as detailed below.
+ * It has subsequently been modified by contributors to the QEMU Project,
+ * so some portions are provided under:
+ * the SoftFloat-2a license
+ * the BSD license
+ * GPL-v2-or-later
+ *
+ * Any future contributions to this file will be taken to be licensed under
+ * the Softfloat-2a license unless specifically indicated otherwise.
+ */
+
+/*
+ * Portions of this work are licensed under the terms of the GNU GPL,
+ * version 2 or later. See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "softfloat.h"
+#include "fpu/softfloat-macros.h"
+#include "softfloat_fpsp_tables.h"
+
+#define pi_exp 0x4000
+#define piby2_exp 0x3FFF
+#define pi_sig UINT64_C(0xc90fdaa22168c235)
+
+static floatx80 propagateFloatx80NaNOneArg(floatx80 a, float_status *status)
+{
+ if (floatx80_is_signaling_nan(a, status)) {
+ float_raise(float_flag_invalid, status);
+ a = floatx80_silence_nan(a, status);
+ }
+
+ if (status->default_nan_mode) {
+ return floatx80_default_nan(status);
+ }
+
+ return a;
+}
+
+/*
+ * Returns the mantissa of the extended double-precision floating-point
+ * value `a'.
+ */
+
+floatx80 floatx80_getman(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a , status);
+ }
+ float_raise(float_flag_invalid , status);
+ return floatx80_default_nan(status);
+ }
+
+ if (aExp == 0) {
+ if (aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+ normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
+ }
+
+ return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign,
+ 0x3FFF, aSig, 0, status);
+}
+
+/*
+ * Returns the exponent of the extended double-precision floating-point
+ * value `a' as an extended double-precision value.
+ */
+
+floatx80 floatx80_getexp(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a , status);
+ }
+ float_raise(float_flag_invalid , status);
+ return floatx80_default_nan(status);
+ }
+
+ if (aExp == 0) {
+ if (aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+ normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
+ }
+
+ return int32_to_floatx80(aExp - 0x3FFF, status);
+}
+
+/*
+ * Scales extended double-precision floating-point value in operand `a' by
+ * value `b'. The function truncates the value in the second operand 'b' to
+ * an integral value and adds that value to the exponent of the operand 'a'.
+ * The operation performed according to the IEC/IEEE Standard for Binary
+ * Floating-Point Arithmetic.
+ */
+
+floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status)
+{
+ bool aSign, bSign;
+ int32_t aExp, bExp, shiftCount;
+ uint64_t aSig, bSig;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+ bSig = extractFloatx80Frac(b);
+ bExp = extractFloatx80Exp(b);
+ bSign = extractFloatx80Sign(b);
+
+ if (bExp == 0x7FFF) {
+ if ((uint64_t) (bSig << 1) ||
+ ((aExp == 0x7FFF) && (uint64_t) (aSig << 1))) {
+ return propagateFloatx80NaN(a, b, status);
+ }
+ float_raise(float_flag_invalid , status);
+ return floatx80_default_nan(status);
+ }
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaN(a, b, status);
+ }
+ return packFloatx80(aSign, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ if (aExp == 0) {
+ if (aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+ if (bExp < 0x3FFF) {
+ return a;
+ }
+ normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
+ }
+
+ if (bExp < 0x3FFF) {
+ return a;
+ }
+
+ if (0x400F < bExp) {
+ aExp = bSign ? -0x6001 : 0xE000;
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ aSign, aExp, aSig, 0, status);
+ }
+
+ shiftCount = 0x403E - bExp;
+ bSig >>= shiftCount;
+ aExp = bSign ? (aExp - bSig) : (aExp + bSig);
+
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ aSign, aExp, aSig, 0, status);
+}
+
+floatx80 floatx80_move(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t)(aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ return a;
+ }
+ if (aExp == 0) {
+ if (aSig == 0) {
+ return a;
+ }
+ normalizeRoundAndPackFloatx80(status->floatx80_rounding_precision,
+ aSign, aExp, aSig, 0, status);
+ }
+ return roundAndPackFloatx80(status->floatx80_rounding_precision, aSign,
+ aExp, aSig, 0, status);
+}
+
+/*
+ * Algorithms for transcendental functions supported by MC68881 and MC68882
+ * mathematical coprocessors. The functions are derived from FPSP library.
+ */
+
+#define one_exp 0x3FFF
+#define one_sig UINT64_C(0x8000000000000000)
+
+/*
+ * Function for compactifying extended double-precision floating point values.
+ */
+
+static int32_t floatx80_make_compact(int32_t aExp, uint64_t aSig)
+{
+ return (aExp << 16) | (aSig >> 48);
+}
+
+/*
+ * Log base e of x plus 1
+ */
+
+floatx80 floatx80_lognp1(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig, fSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, j, k;
+ floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign) {
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+ return packFloatx80(0, floatx80_infinity.high, floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ if (aSign && aExp >= one_exp) {
+ if (aExp == one_exp && aSig == one_sig) {
+ float_raise(float_flag_divbyzero, status);
+ return packFloatx80(aSign, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ if (aExp < 0x3f99 || (aExp == 0x3f99 && aSig == one_sig)) {
+ /* <= min threshold */
+ float_raise(float_flag_inexact, status);
+ return floatx80_move(a, status);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ fp0 = a; /* Z */
+ fp1 = a;
+
+ fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000),
+ status), status); /* X = (1+Z) */
+
+ aExp = extractFloatx80Exp(fp0);
+ aSig = extractFloatx80Frac(fp0);
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact < 0x3FFE8000 || compact > 0x3FFFC000) {
+ /* |X| < 1/2 or |X| > 3/2 */
+ k = aExp - 0x3FFF;
+ fp1 = int32_to_floatx80(k, status);
+
+ fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000);
+ j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */
+
+ f = packFloatx80(0, 0x3FFF, fSig); /* F */
+ fp0 = packFloatx80(0, 0x3FFF, aSig); /* Y */
+
+ fp0 = floatx80_sub(fp0, f, status); /* Y-F */
+
+ lp1cont1:
+ /* LP1CONT1 */
+ fp0 = floatx80_mul(fp0, log_tbl[j], status); /* FP0 IS U = (Y-F)/F */
+ logof2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC));
+ klog2 = floatx80_mul(fp1, logof2, status); /* FP1 IS K*LOG2 */
+ fp2 = floatx80_mul(fp0, fp0, status); /* FP2 IS V=U*U */
+
+ fp3 = fp2;
+ fp1 = fp2;
+
+ fp1 = floatx80_mul(fp1, float64_to_floatx80(
+ make_float64(0x3FC2499AB5E4040B), status),
+ status); /* V*A6 */
+ fp2 = floatx80_mul(fp2, float64_to_floatx80(
+ make_float64(0xBFC555B5848CB7DB), status),
+ status); /* V*A5 */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3FC99999987D8730), status),
+ status); /* A4+V*A6 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0xBFCFFFFFFF6F7E97), status),
+ status); /* A3+V*A5 */
+ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A4+V*A6) */
+ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A3+V*A5) */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3FD55555555555A4), status),
+ status); /* A2+V*(A4+V*A6) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0xBFE0000000000008), status),
+ status); /* A1+V*(A3+V*A5) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A2+V*(A4+V*A6)) */
+ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A1+V*(A3+V*A5)) */
+ fp1 = floatx80_mul(fp1, fp0, status); /* U*V*(A2+V*(A4+V*A6)) */
+ fp0 = floatx80_add(fp0, fp2, status); /* U+V*(A1+V*(A3+V*A5)) */
+
+ fp1 = floatx80_add(fp1, log_tbl[j + 1],
+ status); /* LOG(F)+U*V*(A2+V*(A4+V*A6)) */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS LOG(F) + LOG(1+U) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, klog2, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else if (compact < 0x3FFEF07D || compact > 0x3FFF8841) {
+ /* |X| < 1/16 or |X| > -1/16 */
+ /* LP1CARE */
+ fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000);
+ f = packFloatx80(0, 0x3FFF, fSig); /* F */
+ j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */
+
+ if (compact >= 0x3FFF8000) { /* 1+Z >= 1 */
+ /* KISZERO */
+ fp0 = floatx80_sub(float32_to_floatx80(make_float32(0x3F800000),
+ status), f, status); /* 1-F */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS Y-F = (1-F)+Z */
+ fp1 = packFloatx80(0, 0, 0); /* K = 0 */
+ } else {
+ /* KISNEG */
+ fp0 = floatx80_sub(float32_to_floatx80(make_float32(0x40000000),
+ status), f, status); /* 2-F */
+ fp1 = floatx80_add(fp1, fp1, status); /* 2Z */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS Y-F = (2-F)+2Z */
+ fp1 = packFloatx80(1, one_exp, one_sig); /* K = -1 */
+ }
+ goto lp1cont1;
+ } else {
+ /* LP1ONE16 */
+ fp1 = floatx80_add(fp1, fp1, status); /* FP1 IS 2Z */
+ fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000),
+ status), status); /* FP0 IS 1+X */
+
+ /* LP1CONT2 */
+ fp1 = floatx80_div(fp1, fp0, status); /* U */
+ saveu = fp1;
+ fp0 = floatx80_mul(fp1, fp1, status); /* FP0 IS V = U*U */
+ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS W = V*V */
+
+ fp3 = float64_to_floatx80(make_float64(0x3F175496ADD7DAD6),
+ status); /* B5 */
+ fp2 = float64_to_floatx80(make_float64(0x3F3C71C2FE80C7E0),
+ status); /* B4 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* W*B5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* W*B4 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0x3F624924928BCCFF), status),
+ status); /* B3+W*B5 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3F899999999995EC), status),
+ status); /* B2+W*B4 */
+ fp1 = floatx80_mul(fp1, fp3, status); /* W*(B3+W*B5) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* V*(B2+W*B4) */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3FB5555555555555), status),
+ status); /* B1+W*(B3+W*B5) */
+
+ fp0 = floatx80_mul(fp0, saveu, status); /* FP0 IS U*V */
+ fp1 = floatx80_add(fp1, fp2,
+ status); /* B1+W*(B3+W*B5) + V*(B2+W*B4) */
+ fp0 = floatx80_mul(fp0, fp1,
+ status); /* U*V*([B1+W*(B3+W*B5)] + [V*(B2+W*B4)]) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, saveu, status);
+
+ /*if (!floatx80_is_zero(a)) { */
+ float_raise(float_flag_inexact, status);
+ /*} */
+
+ return a;
+ }
+}
+
+/*
+ * Log base e
+ */
+
+floatx80 floatx80_logn(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig, fSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, j, k, adjk;
+ floatx80 fp0, fp1, fp2, fp3, f, logof2, klog2, saveu;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign == 0) {
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ }
+
+ adjk = 0;
+
+ if (aExp == 0) {
+ if (aSig == 0) { /* zero */
+ float_raise(float_flag_divbyzero, status);
+ return packFloatx80(1, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ if ((aSig & one_sig) == 0) { /* denormal */
+ normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
+ adjk = -100;
+ aExp += 100;
+ a = packFloatx80(aSign, aExp, aSig);
+ }
+ }
+
+ if (aSign) {
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact < 0x3FFEF07D || compact > 0x3FFF8841) {
+ /* |X| < 15/16 or |X| > 17/16 */
+ k = aExp - 0x3FFF;
+ k += adjk;
+ fp1 = int32_to_floatx80(k, status);
+
+ fSig = (aSig & UINT64_C(0xFE00000000000000)) | UINT64_C(0x0100000000000000);
+ j = (fSig >> 56) & 0x7E; /* DISPLACEMENT FOR 1/F */
+
+ f = packFloatx80(0, 0x3FFF, fSig); /* F */
+ fp0 = packFloatx80(0, 0x3FFF, aSig); /* Y */
+
+ fp0 = floatx80_sub(fp0, f, status); /* Y-F */
+
+ /* LP1CONT1 */
+ fp0 = floatx80_mul(fp0, log_tbl[j], status); /* FP0 IS U = (Y-F)/F */
+ logof2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC));
+ klog2 = floatx80_mul(fp1, logof2, status); /* FP1 IS K*LOG2 */
+ fp2 = floatx80_mul(fp0, fp0, status); /* FP2 IS V=U*U */
+
+ fp3 = fp2;
+ fp1 = fp2;
+
+ fp1 = floatx80_mul(fp1, float64_to_floatx80(
+ make_float64(0x3FC2499AB5E4040B), status),
+ status); /* V*A6 */
+ fp2 = floatx80_mul(fp2, float64_to_floatx80(
+ make_float64(0xBFC555B5848CB7DB), status),
+ status); /* V*A5 */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3FC99999987D8730), status),
+ status); /* A4+V*A6 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0xBFCFFFFFFF6F7E97), status),
+ status); /* A3+V*A5 */
+ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A4+V*A6) */
+ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A3+V*A5) */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3FD55555555555A4), status),
+ status); /* A2+V*(A4+V*A6) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0xBFE0000000000008), status),
+ status); /* A1+V*(A3+V*A5) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* V*(A2+V*(A4+V*A6)) */
+ fp2 = floatx80_mul(fp2, fp3, status); /* V*(A1+V*(A3+V*A5)) */
+ fp1 = floatx80_mul(fp1, fp0, status); /* U*V*(A2+V*(A4+V*A6)) */
+ fp0 = floatx80_add(fp0, fp2, status); /* U+V*(A1+V*(A3+V*A5)) */
+
+ fp1 = floatx80_add(fp1, log_tbl[j + 1],
+ status); /* LOG(F)+U*V*(A2+V*(A4+V*A6)) */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 IS LOG(F) + LOG(1+U) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, klog2, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else { /* |X-1| >= 1/16 */
+ fp0 = a;
+ fp1 = a;
+ fp1 = floatx80_sub(fp1, float32_to_floatx80(make_float32(0x3F800000),
+ status), status); /* FP1 IS X-1 */
+ fp0 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000),
+ status), status); /* FP0 IS X+1 */
+ fp1 = floatx80_add(fp1, fp1, status); /* FP1 IS 2(X-1) */
+
+ /* LP1CONT2 */
+ fp1 = floatx80_div(fp1, fp0, status); /* U */
+ saveu = fp1;
+ fp0 = floatx80_mul(fp1, fp1, status); /* FP0 IS V = U*U */
+ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS W = V*V */
+
+ fp3 = float64_to_floatx80(make_float64(0x3F175496ADD7DAD6),
+ status); /* B5 */
+ fp2 = float64_to_floatx80(make_float64(0x3F3C71C2FE80C7E0),
+ status); /* B4 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* W*B5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* W*B4 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0x3F624924928BCCFF), status),
+ status); /* B3+W*B5 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3F899999999995EC), status),
+ status); /* B2+W*B4 */
+ fp1 = floatx80_mul(fp1, fp3, status); /* W*(B3+W*B5) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* V*(B2+W*B4) */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3FB5555555555555), status),
+ status); /* B1+W*(B3+W*B5) */
+
+ fp0 = floatx80_mul(fp0, saveu, status); /* FP0 IS U*V */
+ fp1 = floatx80_add(fp1, fp2, status); /* B1+W*(B3+W*B5) + V*(B2+W*B4) */
+ fp0 = floatx80_mul(fp0, fp1,
+ status); /* U*V*([B1+W*(B3+W*B5)] + [V*(B2+W*B4)]) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, saveu, status);
+
+ /*if (!floatx80_is_zero(a)) { */
+ float_raise(float_flag_inexact, status);
+ /*} */
+
+ return a;
+ }
+}
+
+/*
+ * Log base 10
+ */
+
+floatx80 floatx80_log10(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ floatx80 fp0, fp1;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign == 0) {
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ float_raise(float_flag_divbyzero, status);
+ return packFloatx80(1, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aSign) {
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ fp0 = floatx80_logn(a, status);
+ fp1 = packFloatx80(0, 0x3FFD, UINT64_C(0xDE5BD8A937287195)); /* INV_L10 */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, fp1, status); /* LOGN(X)*INV_L10 */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+}
+
+/*
+ * Log base 2
+ */
+
+floatx80 floatx80_log2(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ floatx80 fp0, fp1;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign == 0) {
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ }
+
+ if (aExp == 0) {
+ if (aSig == 0) {
+ float_raise(float_flag_divbyzero, status);
+ return packFloatx80(1, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+ normalizeFloatx80Subnormal(aSig, &aExp, &aSig);
+ }
+
+ if (aSign) {
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ if (aSig == one_sig) { /* X is 2^k */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = int32_to_floatx80(aExp - 0x3FFF, status);
+ } else {
+ fp0 = floatx80_logn(a, status);
+ fp1 = packFloatx80(0, 0x3FFF, UINT64_C(0xB8AA3B295C17F0BC)); /* INV_L2 */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, fp1, status); /* LOGN(X)*INV_L2 */
+ }
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+}
+
+/*
+ * e to x
+ */
+
+floatx80 floatx80_etox(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, n, j, k, m, m1;
+ floatx80 fp0, fp1, fp2, fp3, l2, scale, adjscale;
+ bool adjflag;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign) {
+ return packFloatx80(0, 0, 0);
+ }
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(0, one_exp, one_sig);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ adjflag = 0;
+
+ if (aExp >= 0x3FBE) { /* |X| >= 2^(-65) */
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact < 0x400CB167) { /* |X| < 16380 log2 */
+ fp0 = a;
+ fp1 = a;
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(
+ make_float32(0x42B8AA3B), status),
+ status); /* 64/log2 * X */
+ adjflag = 0;
+ n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */
+ fp0 = int32_to_floatx80(n, status);
+
+ j = n & 0x3F; /* J = N mod 64 */
+ m = n / 64; /* NOTE: this is really arithmetic right shift by 6 */
+ if (n < 0 && j) {
+ /*
+ * arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ m--;
+ }
+ m += 0x3FFF; /* biased exponent of 2^(M) */
+
+ expcont1:
+ fp2 = fp0; /* N */
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(
+ make_float32(0xBC317218), status),
+ status); /* N * L1, L1 = lead(-log2/64) */
+ l2 = packFloatx80(0, 0x3FDC, UINT64_C(0x82E308654361C4C6));
+ fp2 = floatx80_mul(fp2, l2, status); /* N * L2, L1+L2 = -log2/64 */
+ fp0 = floatx80_add(fp0, fp1, status); /* X + N*L1 */
+ fp0 = floatx80_add(fp0, fp2, status); /* R */
+
+ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */
+ fp2 = float32_to_floatx80(make_float32(0x3AB60B70),
+ status); /* A5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*A5 */
+ fp3 = floatx80_mul(float32_to_floatx80(make_float32(0x3C088895),
+ status), fp1,
+ status); /* fp3 is S*A4 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(make_float64(
+ 0x3FA5555555554431), status),
+ status); /* fp2 is A3+S*A5 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(make_float64(
+ 0x3FC5555555554018), status),
+ status); /* fp3 is A2+S*A4 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*(A3+S*A5) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* fp3 is S*(A2+S*A4) */
+ fp2 = floatx80_add(fp2, float32_to_floatx80(
+ make_float32(0x3F000000), status),
+ status); /* fp2 is A1+S*(A3+S*A5) */
+ fp3 = floatx80_mul(fp3, fp0, status); /* fp3 IS R*S*(A2+S*A4) */
+ fp2 = floatx80_mul(fp2, fp1,
+ status); /* fp2 IS S*(A1+S*(A3+S*A5)) */
+ fp0 = floatx80_add(fp0, fp3, status); /* fp0 IS R+R*S*(A2+S*A4) */
+ fp0 = floatx80_add(fp0, fp2, status); /* fp0 IS EXP(R) - 1 */
+
+ fp1 = exp_tbl[j];
+ fp0 = floatx80_mul(fp0, fp1, status); /* 2^(J/64)*(Exp(R)-1) */
+ fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j], status),
+ status); /* accurate 2^(J/64) */
+ fp0 = floatx80_add(fp0, fp1,
+ status); /* 2^(J/64) + 2^(J/64)*(Exp(R)-1) */
+
+ scale = packFloatx80(0, m, one_sig);
+ if (adjflag) {
+ adjscale = packFloatx80(0, m1, one_sig);
+ fp0 = floatx80_mul(fp0, adjscale, status);
+ }
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, scale, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else { /* |X| >= 16380 log2 */
+ if (compact > 0x400CB27C) { /* |X| >= 16480 log2 */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+ if (aSign) {
+ a = roundAndPackFloatx80(
+ status->floatx80_rounding_precision,
+ 0, -0x1000, aSig, 0, status);
+ } else {
+ a = roundAndPackFloatx80(
+ status->floatx80_rounding_precision,
+ 0, 0x8000, aSig, 0, status);
+ }
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ fp0 = a;
+ fp1 = a;
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(
+ make_float32(0x42B8AA3B), status),
+ status); /* 64/log2 * X */
+ adjflag = 1;
+ n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */
+ fp0 = int32_to_floatx80(n, status);
+
+ j = n & 0x3F; /* J = N mod 64 */
+ /* NOTE: this is really arithmetic right shift by 6 */
+ k = n / 64;
+ if (n < 0 && j) {
+ /* arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ k--;
+ }
+ /* NOTE: this is really arithmetic right shift by 1 */
+ m1 = k / 2;
+ if (k < 0 && (k & 1)) {
+ /* arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ m1--;
+ }
+ m = k - m1;
+ m1 += 0x3FFF; /* biased exponent of 2^(M1) */
+ m += 0x3FFF; /* biased exponent of 2^(M) */
+
+ goto expcont1;
+ }
+ }
+ } else { /* |X| < 2^(-65) */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(a, float32_to_floatx80(make_float32(0x3F800000),
+ status), status); /* 1 + X */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+}
+
+/*
+ * 2 to x
+ */
+
+floatx80 floatx80_twotox(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, n, j, l, m, m1;
+ floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign) {
+ return packFloatx80(0, 0, 0);
+ }
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(0, one_exp, one_sig);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ fp0 = a;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact < 0x3FB98000 || compact > 0x400D80C0) {
+ /* |X| > 16480 or |X| < 2^(-70) */
+ if (compact > 0x3FFF8000) { /* |X| > 16480 */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ if (aSign) {
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ 0, -0x1000, aSig, 0, status);
+ } else {
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ 0, 0x8000, aSig, 0, status);
+ }
+ } else { /* |X| < 2^(-70) */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, float32_to_floatx80(
+ make_float32(0x3F800000), status),
+ status); /* 1 + X */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else { /* 2^(-70) <= |X| <= 16480 */
+ fp1 = fp0; /* X */
+ fp1 = floatx80_mul(fp1, float32_to_floatx80(
+ make_float32(0x42800000), status),
+ status); /* X * 64 */
+ n = floatx80_to_int32(fp1, status);
+ fp1 = int32_to_floatx80(n, status);
+ j = n & 0x3F;
+ l = n / 64; /* NOTE: this is really arithmetic right shift by 6 */
+ if (n < 0 && j) {
+ /*
+ * arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ l--;
+ }
+ m = l / 2; /* NOTE: this is really arithmetic right shift by 1 */
+ if (l < 0 && (l & 1)) {
+ /*
+ * arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ m--;
+ }
+ m1 = l - m;
+ m1 += 0x3FFF; /* ADJFACT IS 2^(M') */
+
+ adjfact = packFloatx80(0, m1, one_sig);
+ fact1 = exp2_tbl[j];
+ fact1.high += m;
+ fact2.high = exp2_tbl2[j] >> 16;
+ fact2.high += m;
+ fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF);
+ fact2.low <<= 48;
+
+ fp1 = floatx80_mul(fp1, float32_to_floatx80(
+ make_float32(0x3C800000), status),
+ status); /* (1/64)*N */
+ fp0 = floatx80_sub(fp0, fp1, status); /* X - (1/64)*INT(64 X) */
+ fp2 = packFloatx80(0, 0x3FFE, UINT64_C(0xB17217F7D1CF79AC)); /* LOG2 */
+ fp0 = floatx80_mul(fp0, fp2, status); /* R */
+
+ /* EXPR */
+ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */
+ fp2 = float64_to_floatx80(make_float64(0x3F56C16D6F7BD0B2),
+ status); /* A5 */
+ fp3 = float64_to_floatx80(make_float64(0x3F811112302C712C),
+ status); /* A4 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* S*A5 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* S*A4 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FA5555555554CC1), status),
+ status); /* A3+S*A5 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0x3FC5555555554A54), status),
+ status); /* A2+S*A4 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A3+S*A5) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* S*(A2+S*A4) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FE0000000000000), status),
+ status); /* A1+S*(A3+S*A5) */
+ fp3 = floatx80_mul(fp3, fp0, status); /* R*S*(A2+S*A4) */
+
+ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A1+S*(A3+S*A5)) */
+ fp0 = floatx80_add(fp0, fp3, status); /* R+R*S*(A2+S*A4) */
+ fp0 = floatx80_add(fp0, fp2, status); /* EXP(R) - 1 */
+
+ fp0 = floatx80_mul(fp0, fact1, status);
+ fp0 = floatx80_add(fp0, fact2, status);
+ fp0 = floatx80_add(fp0, fact1, status);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, adjfact, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+}
+
+/*
+ * 10 to x
+ */
+
+floatx80 floatx80_tentox(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, n, j, l, m, m1;
+ floatx80 fp0, fp1, fp2, fp3, adjfact, fact1, fact2;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign) {
+ return packFloatx80(0, 0, 0);
+ }
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(0, one_exp, one_sig);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ fp0 = a;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact < 0x3FB98000 || compact > 0x400B9B07) {
+ /* |X| > 16480 LOG2/LOG10 or |X| < 2^(-70) */
+ if (compact > 0x3FFF8000) { /* |X| > 16480 */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ if (aSign) {
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ 0, -0x1000, aSig, 0, status);
+ } else {
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ 0, 0x8000, aSig, 0, status);
+ }
+ } else { /* |X| < 2^(-70) */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, float32_to_floatx80(
+ make_float32(0x3F800000), status),
+ status); /* 1 + X */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else { /* 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10 */
+ fp1 = fp0; /* X */
+ fp1 = floatx80_mul(fp1, float64_to_floatx80(
+ make_float64(0x406A934F0979A371),
+ status), status); /* X*64*LOG10/LOG2 */
+ n = floatx80_to_int32(fp1, status); /* N=INT(X*64*LOG10/LOG2) */
+ fp1 = int32_to_floatx80(n, status);
+
+ j = n & 0x3F;
+ l = n / 64; /* NOTE: this is really arithmetic right shift by 6 */
+ if (n < 0 && j) {
+ /*
+ * arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ l--;
+ }
+ m = l / 2; /* NOTE: this is really arithmetic right shift by 1 */
+ if (l < 0 && (l & 1)) {
+ /*
+ * arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ m--;
+ }
+ m1 = l - m;
+ m1 += 0x3FFF; /* ADJFACT IS 2^(M') */
+
+ adjfact = packFloatx80(0, m1, one_sig);
+ fact1 = exp2_tbl[j];
+ fact1.high += m;
+ fact2.high = exp2_tbl2[j] >> 16;
+ fact2.high += m;
+ fact2.low = (uint64_t)(exp2_tbl2[j] & 0xFFFF);
+ fact2.low <<= 48;
+
+ fp2 = fp1; /* N */
+ fp1 = floatx80_mul(fp1, float64_to_floatx80(
+ make_float64(0x3F734413509F8000), status),
+ status); /* N*(LOG2/64LOG10)_LEAD */
+ fp3 = packFloatx80(1, 0x3FCD, UINT64_C(0xC0219DC1DA994FD2));
+ fp2 = floatx80_mul(fp2, fp3, status); /* N*(LOG2/64LOG10)_TRAIL */
+ fp0 = floatx80_sub(fp0, fp1, status); /* X - N L_LEAD */
+ fp0 = floatx80_sub(fp0, fp2, status); /* X - N L_TRAIL */
+ fp2 = packFloatx80(0, 0x4000, UINT64_C(0x935D8DDDAAA8AC17)); /* LOG10 */
+ fp0 = floatx80_mul(fp0, fp2, status); /* R */
+
+ /* EXPR */
+ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */
+ fp2 = float64_to_floatx80(make_float64(0x3F56C16D6F7BD0B2),
+ status); /* A5 */
+ fp3 = float64_to_floatx80(make_float64(0x3F811112302C712C),
+ status); /* A4 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* S*A5 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* S*A4 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FA5555555554CC1), status),
+ status); /* A3+S*A5 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0x3FC5555555554A54), status),
+ status); /* A2+S*A4 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A3+S*A5) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* S*(A2+S*A4) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FE0000000000000), status),
+ status); /* A1+S*(A3+S*A5) */
+ fp3 = floatx80_mul(fp3, fp0, status); /* R*S*(A2+S*A4) */
+
+ fp2 = floatx80_mul(fp2, fp1, status); /* S*(A1+S*(A3+S*A5)) */
+ fp0 = floatx80_add(fp0, fp3, status); /* R+R*S*(A2+S*A4) */
+ fp0 = floatx80_add(fp0, fp2, status); /* EXP(R) - 1 */
+
+ fp0 = floatx80_mul(fp0, fact1, status);
+ fp0 = floatx80_add(fp0, fact2, status);
+ fp0 = floatx80_add(fp0, fact1, status);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, adjfact, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+}
+
+/*
+ * Tangent
+ */
+
+floatx80 floatx80_tan(floatx80 a, float_status *status)
+{
+ bool aSign, xSign;
+ int32_t aExp, xExp;
+ uint64_t aSig, xSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, l, n, j;
+ floatx80 fp0, fp1, fp2, fp3, fp4, fp5, invtwopi, twopi1, twopi2;
+ float32 twoto63;
+ bool endflag;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ fp0 = a;
+
+ if (compact < 0x3FD78000 || compact > 0x4004BC7E) {
+ /* 2^(-40) > |X| > 15 PI */
+ if (compact > 0x3FFF8000) { /* |X| >= 15 PI */
+ /* REDUCEX */
+ fp1 = packFloatx80(0, 0, 0);
+ if (compact == 0x7FFEFFFF) {
+ twopi1 = packFloatx80(aSign ^ 1, 0x7FFE,
+ UINT64_C(0xC90FDAA200000000));
+ twopi2 = packFloatx80(aSign ^ 1, 0x7FDC,
+ UINT64_C(0x85A308D300000000));
+ fp0 = floatx80_add(fp0, twopi1, status);
+ fp1 = fp0;
+ fp0 = floatx80_add(fp0, twopi2, status);
+ fp1 = floatx80_sub(fp1, fp0, status);
+ fp1 = floatx80_add(fp1, twopi2, status);
+ }
+ loop:
+ xSign = extractFloatx80Sign(fp0);
+ xExp = extractFloatx80Exp(fp0);
+ xExp -= 0x3FFF;
+ if (xExp <= 28) {
+ l = 0;
+ endflag = true;
+ } else {
+ l = xExp - 27;
+ endflag = false;
+ }
+ invtwopi = packFloatx80(0, 0x3FFE - l,
+ UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */
+ twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000));
+ twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000));
+
+ /* SIGN(INARG)*2^63 IN SGL */
+ twoto63 = packFloat32(xSign, 0xBE, 0);
+
+ fp2 = floatx80_mul(fp0, invtwopi, status);
+ fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status),
+ status); /* THE FRACT PART OF FP2 IS ROUNDED */
+ fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status),
+ status); /* FP2 is N */
+ fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */
+ fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */
+ fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */
+ fp4 = floatx80_sub(fp4, fp3, status); /* W-P */
+ fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */
+ fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */
+ fp3 = fp0; /* FP3 is A */
+ fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */
+
+ if (endflag) {
+ n = floatx80_to_int32(fp2, status);
+ goto tancont;
+ }
+ fp3 = floatx80_sub(fp3, fp0, status); /* A-R */
+ fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */
+ goto loop;
+ } else {
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_move(a, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else {
+ fp1 = floatx80_mul(fp0, float64_to_floatx80(
+ make_float64(0x3FE45F306DC9C883), status),
+ status); /* X*2/PI */
+
+ n = floatx80_to_int32(fp1, status);
+ j = 32 + n;
+
+ fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */
+ fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status),
+ status); /* FP0 IS R = (X-Y1)-Y2 */
+
+ tancont:
+ if (n & 1) {
+ /* NODD */
+ fp1 = fp0; /* R */
+ fp0 = floatx80_mul(fp0, fp0, status); /* S = R*R */
+ fp3 = float64_to_floatx80(make_float64(0x3EA0B759F50F8688),
+ status); /* Q4 */
+ fp2 = float64_to_floatx80(make_float64(0xBEF2BAA5A8924F04),
+ status); /* P3 */
+ fp3 = floatx80_mul(fp3, fp0, status); /* SQ4 */
+ fp2 = floatx80_mul(fp2, fp0, status); /* SP3 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBF346F59B39BA65F), status),
+ status); /* Q3+SQ4 */
+ fp4 = packFloatx80(0, 0x3FF6, UINT64_C(0xE073D3FC199C4A00));
+ fp2 = floatx80_add(fp2, fp4, status); /* P2+SP3 */
+ fp3 = floatx80_mul(fp3, fp0, status); /* S(Q3+SQ4) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* S(P2+SP3) */
+ fp4 = packFloatx80(0, 0x3FF9, UINT64_C(0xD23CD68415D95FA1));
+ fp3 = floatx80_add(fp3, fp4, status); /* Q2+S(Q3+SQ4) */
+ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0x8895A6C5FB423BCA));
+ fp2 = floatx80_add(fp2, fp4, status); /* P1+S(P2+SP3) */
+ fp3 = floatx80_mul(fp3, fp0, status); /* S(Q2+S(Q3+SQ4)) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* S(P1+S(P2+SP3)) */
+ fp4 = packFloatx80(1, 0x3FFD, UINT64_C(0xEEF57E0DA84BC8CE));
+ fp3 = floatx80_add(fp3, fp4, status); /* Q1+S(Q2+S(Q3+SQ4)) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* RS(P1+S(P2+SP3)) */
+ fp0 = floatx80_mul(fp0, fp3, status); /* S(Q1+S(Q2+S(Q3+SQ4))) */
+ fp1 = floatx80_add(fp1, fp2, status); /* R+RS(P1+S(P2+SP3)) */
+ fp0 = floatx80_add(fp0, float32_to_floatx80(
+ make_float32(0x3F800000), status),
+ status); /* 1+S(Q1+S(Q2+S(Q3+SQ4))) */
+
+ xSign = extractFloatx80Sign(fp1);
+ xExp = extractFloatx80Exp(fp1);
+ xSig = extractFloatx80Frac(fp1);
+ xSign ^= 1;
+ fp1 = packFloatx80(xSign, xExp, xSig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_div(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */
+ fp3 = float64_to_floatx80(make_float64(0x3EA0B759F50F8688),
+ status); /* Q4 */
+ fp2 = float64_to_floatx80(make_float64(0xBEF2BAA5A8924F04),
+ status); /* P3 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* SQ4 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* SP3 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBF346F59B39BA65F), status),
+ status); /* Q3+SQ4 */
+ fp4 = packFloatx80(0, 0x3FF6, UINT64_C(0xE073D3FC199C4A00));
+ fp2 = floatx80_add(fp2, fp4, status); /* P2+SP3 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* S(Q3+SQ4) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* S(P2+SP3) */
+ fp4 = packFloatx80(0, 0x3FF9, UINT64_C(0xD23CD68415D95FA1));
+ fp3 = floatx80_add(fp3, fp4, status); /* Q2+S(Q3+SQ4) */
+ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0x8895A6C5FB423BCA));
+ fp2 = floatx80_add(fp2, fp4, status); /* P1+S(P2+SP3) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* S(Q2+S(Q3+SQ4)) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* S(P1+S(P2+SP3)) */
+ fp4 = packFloatx80(1, 0x3FFD, UINT64_C(0xEEF57E0DA84BC8CE));
+ fp3 = floatx80_add(fp3, fp4, status); /* Q1+S(Q2+S(Q3+SQ4)) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* RS(P1+S(P2+SP3)) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* S(Q1+S(Q2+S(Q3+SQ4))) */
+ fp0 = floatx80_add(fp0, fp2, status); /* R+RS(P1+S(P2+SP3)) */
+ fp1 = floatx80_add(fp1, float32_to_floatx80(
+ make_float32(0x3F800000), status),
+ status); /* 1+S(Q1+S(Q2+S(Q3+SQ4))) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_div(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+}
+
+/*
+ * Sine
+ */
+
+floatx80 floatx80_sin(floatx80 a, float_status *status)
+{
+ bool aSign, xSign;
+ int32_t aExp, xExp;
+ uint64_t aSig, xSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, l, n, j;
+ floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2;
+ float32 posneg1, twoto63;
+ bool endflag;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ fp0 = a;
+
+ if (compact < 0x3FD78000 || compact > 0x4004BC7E) {
+ /* 2^(-40) > |X| > 15 PI */
+ if (compact > 0x3FFF8000) { /* |X| >= 15 PI */
+ /* REDUCEX */
+ fp1 = packFloatx80(0, 0, 0);
+ if (compact == 0x7FFEFFFF) {
+ twopi1 = packFloatx80(aSign ^ 1, 0x7FFE,
+ UINT64_C(0xC90FDAA200000000));
+ twopi2 = packFloatx80(aSign ^ 1, 0x7FDC,
+ UINT64_C(0x85A308D300000000));
+ fp0 = floatx80_add(fp0, twopi1, status);
+ fp1 = fp0;
+ fp0 = floatx80_add(fp0, twopi2, status);
+ fp1 = floatx80_sub(fp1, fp0, status);
+ fp1 = floatx80_add(fp1, twopi2, status);
+ }
+ loop:
+ xSign = extractFloatx80Sign(fp0);
+ xExp = extractFloatx80Exp(fp0);
+ xExp -= 0x3FFF;
+ if (xExp <= 28) {
+ l = 0;
+ endflag = true;
+ } else {
+ l = xExp - 27;
+ endflag = false;
+ }
+ invtwopi = packFloatx80(0, 0x3FFE - l,
+ UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */
+ twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000));
+ twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000));
+
+ /* SIGN(INARG)*2^63 IN SGL */
+ twoto63 = packFloat32(xSign, 0xBE, 0);
+
+ fp2 = floatx80_mul(fp0, invtwopi, status);
+ fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status),
+ status); /* THE FRACT PART OF FP2 IS ROUNDED */
+ fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status),
+ status); /* FP2 is N */
+ fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */
+ fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */
+ fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */
+ fp4 = floatx80_sub(fp4, fp3, status); /* W-P */
+ fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */
+ fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */
+ fp3 = fp0; /* FP3 is A */
+ fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */
+
+ if (endflag) {
+ n = floatx80_to_int32(fp2, status);
+ goto sincont;
+ }
+ fp3 = floatx80_sub(fp3, fp0, status); /* A-R */
+ fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */
+ goto loop;
+ } else {
+ /* SINSM */
+ fp0 = float32_to_floatx80(make_float32(0x3F800000),
+ status); /* 1 */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ /* SINTINY */
+ a = floatx80_move(a, status);
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else {
+ fp1 = floatx80_mul(fp0, float64_to_floatx80(
+ make_float64(0x3FE45F306DC9C883), status),
+ status); /* X*2/PI */
+
+ n = floatx80_to_int32(fp1, status);
+ j = 32 + n;
+
+ fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */
+ fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status),
+ status); /* FP0 IS R = (X-Y1)-Y2 */
+
+ sincont:
+ if (n & 1) {
+ /* COSPOLY */
+ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */
+ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */
+ fp2 = float64_to_floatx80(make_float64(0x3D2AC4D0D6011EE3),
+ status); /* B8 */
+ fp3 = float64_to_floatx80(make_float64(0xBDA9396F9F45AC19),
+ status); /* B7 */
+
+ xSign = extractFloatx80Sign(fp0); /* X IS S */
+ xExp = extractFloatx80Exp(fp0);
+ xSig = extractFloatx80Frac(fp0);
+
+ if ((n >> 1) & 1) {
+ xSign ^= 1;
+ posneg1 = make_float32(0xBF800000); /* -1 */
+ } else {
+ xSign ^= 0;
+ posneg1 = make_float32(0x3F800000); /* 1 */
+ } /* X IS NOW R'= SGN*R */
+
+ fp2 = floatx80_mul(fp2, fp1, status); /* TB8 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* TB7 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3E21EED90612C972), status),
+ status); /* B6+TB8 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBE927E4FB79D9FCF), status),
+ status); /* B5+TB7 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T(B6+TB8) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* T(B5+TB7) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3EFA01A01A01D423), status),
+ status); /* B4+T(B6+TB8) */
+ fp4 = packFloatx80(1, 0x3FF5, UINT64_C(0xB60B60B60B61D438));
+ fp3 = floatx80_add(fp3, fp4, status); /* B3+T(B5+TB7) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T(B4+T(B6+TB8)) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* T(B3+T(B5+TB7)) */
+ fp4 = packFloatx80(0, 0x3FFA, UINT64_C(0xAAAAAAAAAAAAAB5E));
+ fp2 = floatx80_add(fp2, fp4, status); /* B2+T(B4+T(B6+TB8)) */
+ fp1 = floatx80_add(fp1, float32_to_floatx80(
+ make_float32(0xBF000000), status),
+ status); /* B1+T(B3+T(B5+TB7)) */
+ fp0 = floatx80_mul(fp0, fp2, status); /* S(B2+T(B4+T(B6+TB8))) */
+ fp0 = floatx80_add(fp0, fp1, status); /* [B1+T(B3+T(B5+TB7))]+
+ * [S(B2+T(B4+T(B6+TB8)))]
+ */
+
+ x = packFloatx80(xSign, xExp, xSig);
+ fp0 = floatx80_mul(fp0, x, status);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ /* SINPOLY */
+ xSign = extractFloatx80Sign(fp0); /* X IS R */
+ xExp = extractFloatx80Exp(fp0);
+ xSig = extractFloatx80Frac(fp0);
+
+ xSign ^= (n >> 1) & 1; /* X IS NOW R'= SGN*R */
+
+ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */
+ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */
+ fp3 = float64_to_floatx80(make_float64(0xBD6AAA77CCC994F5),
+ status); /* A7 */
+ fp2 = float64_to_floatx80(make_float64(0x3DE612097AAE8DA1),
+ status); /* A6 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* T*A7 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T*A6 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBE5AE6452A118AE4), status),
+ status); /* A5+T*A7 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3EC71DE3A5341531), status),
+ status); /* A4+T*A6 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* T(A5+TA7) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T(A4+TA6) */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBF2A01A01A018B59), status),
+ status); /* A3+T(A5+TA7) */
+ fp4 = packFloatx80(0, 0x3FF8, UINT64_C(0x88888888888859AF));
+ fp2 = floatx80_add(fp2, fp4, status); /* A2+T(A4+TA6) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* T(A3+T(A5+TA7)) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* S(A2+T(A4+TA6)) */
+ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAA99));
+ fp1 = floatx80_add(fp1, fp4, status); /* A1+T(A3+T(A5+TA7)) */
+ fp1 = floatx80_add(fp1, fp2,
+ status); /* [A1+T(A3+T(A5+TA7))]+
+ * [S(A2+T(A4+TA6))]
+ */
+
+ x = packFloatx80(xSign, xExp, xSig);
+ fp0 = floatx80_mul(fp0, x, status); /* R'*S */
+ fp0 = floatx80_mul(fp0, fp1, status); /* SIN(R')-R' */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, x, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+}
+
+/*
+ * Cosine
+ */
+
+floatx80 floatx80_cos(floatx80 a, float_status *status)
+{
+ bool aSign, xSign;
+ int32_t aExp, xExp;
+ uint64_t aSig, xSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, l, n, j;
+ floatx80 fp0, fp1, fp2, fp3, fp4, fp5, x, invtwopi, twopi1, twopi2;
+ float32 posneg1, twoto63;
+ bool endflag;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(0, one_exp, one_sig);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ fp0 = a;
+
+ if (compact < 0x3FD78000 || compact > 0x4004BC7E) {
+ /* 2^(-40) > |X| > 15 PI */
+ if (compact > 0x3FFF8000) { /* |X| >= 15 PI */
+ /* REDUCEX */
+ fp1 = packFloatx80(0, 0, 0);
+ if (compact == 0x7FFEFFFF) {
+ twopi1 = packFloatx80(aSign ^ 1, 0x7FFE,
+ UINT64_C(0xC90FDAA200000000));
+ twopi2 = packFloatx80(aSign ^ 1, 0x7FDC,
+ UINT64_C(0x85A308D300000000));
+ fp0 = floatx80_add(fp0, twopi1, status);
+ fp1 = fp0;
+ fp0 = floatx80_add(fp0, twopi2, status);
+ fp1 = floatx80_sub(fp1, fp0, status);
+ fp1 = floatx80_add(fp1, twopi2, status);
+ }
+ loop:
+ xSign = extractFloatx80Sign(fp0);
+ xExp = extractFloatx80Exp(fp0);
+ xExp -= 0x3FFF;
+ if (xExp <= 28) {
+ l = 0;
+ endflag = true;
+ } else {
+ l = xExp - 27;
+ endflag = false;
+ }
+ invtwopi = packFloatx80(0, 0x3FFE - l,
+ UINT64_C(0xA2F9836E4E44152A)); /* INVTWOPI */
+ twopi1 = packFloatx80(0, 0x3FFF + l, UINT64_C(0xC90FDAA200000000));
+ twopi2 = packFloatx80(0, 0x3FDD + l, UINT64_C(0x85A308D300000000));
+
+ /* SIGN(INARG)*2^63 IN SGL */
+ twoto63 = packFloat32(xSign, 0xBE, 0);
+
+ fp2 = floatx80_mul(fp0, invtwopi, status);
+ fp2 = floatx80_add(fp2, float32_to_floatx80(twoto63, status),
+ status); /* THE FRACT PART OF FP2 IS ROUNDED */
+ fp2 = floatx80_sub(fp2, float32_to_floatx80(twoto63, status),
+ status); /* FP2 is N */
+ fp4 = floatx80_mul(twopi1, fp2, status); /* W = N*P1 */
+ fp5 = floatx80_mul(twopi2, fp2, status); /* w = N*P2 */
+ fp3 = floatx80_add(fp4, fp5, status); /* FP3 is P */
+ fp4 = floatx80_sub(fp4, fp3, status); /* W-P */
+ fp0 = floatx80_sub(fp0, fp3, status); /* FP0 is A := R - P */
+ fp4 = floatx80_add(fp4, fp5, status); /* FP4 is p = (W-P)+w */
+ fp3 = fp0; /* FP3 is A */
+ fp1 = floatx80_sub(fp1, fp4, status); /* FP1 is a := r - p */
+ fp0 = floatx80_add(fp0, fp1, status); /* FP0 is R := A+a */
+
+ if (endflag) {
+ n = floatx80_to_int32(fp2, status);
+ goto sincont;
+ }
+ fp3 = floatx80_sub(fp3, fp0, status); /* A-R */
+ fp1 = floatx80_add(fp1, fp3, status); /* FP1 is r := (A-R)+a */
+ goto loop;
+ } else {
+ /* SINSM */
+ fp0 = float32_to_floatx80(make_float32(0x3F800000), status); /* 1 */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ /* COSTINY */
+ a = floatx80_sub(fp0, float32_to_floatx80(
+ make_float32(0x00800000), status),
+ status);
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else {
+ fp1 = floatx80_mul(fp0, float64_to_floatx80(
+ make_float64(0x3FE45F306DC9C883), status),
+ status); /* X*2/PI */
+
+ n = floatx80_to_int32(fp1, status);
+ j = 32 + n;
+
+ fp0 = floatx80_sub(fp0, pi_tbl[j], status); /* X-Y1 */
+ fp0 = floatx80_sub(fp0, float32_to_floatx80(pi_tbl2[j], status),
+ status); /* FP0 IS R = (X-Y1)-Y2 */
+
+ sincont:
+ if ((n + 1) & 1) {
+ /* COSPOLY */
+ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */
+ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */
+ fp2 = float64_to_floatx80(make_float64(0x3D2AC4D0D6011EE3),
+ status); /* B8 */
+ fp3 = float64_to_floatx80(make_float64(0xBDA9396F9F45AC19),
+ status); /* B7 */
+
+ xSign = extractFloatx80Sign(fp0); /* X IS S */
+ xExp = extractFloatx80Exp(fp0);
+ xSig = extractFloatx80Frac(fp0);
+
+ if (((n + 1) >> 1) & 1) {
+ xSign ^= 1;
+ posneg1 = make_float32(0xBF800000); /* -1 */
+ } else {
+ xSign ^= 0;
+ posneg1 = make_float32(0x3F800000); /* 1 */
+ } /* X IS NOW R'= SGN*R */
+
+ fp2 = floatx80_mul(fp2, fp1, status); /* TB8 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* TB7 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3E21EED90612C972), status),
+ status); /* B6+TB8 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBE927E4FB79D9FCF), status),
+ status); /* B5+TB7 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T(B6+TB8) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* T(B5+TB7) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3EFA01A01A01D423), status),
+ status); /* B4+T(B6+TB8) */
+ fp4 = packFloatx80(1, 0x3FF5, UINT64_C(0xB60B60B60B61D438));
+ fp3 = floatx80_add(fp3, fp4, status); /* B3+T(B5+TB7) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T(B4+T(B6+TB8)) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* T(B3+T(B5+TB7)) */
+ fp4 = packFloatx80(0, 0x3FFA, UINT64_C(0xAAAAAAAAAAAAAB5E));
+ fp2 = floatx80_add(fp2, fp4, status); /* B2+T(B4+T(B6+TB8)) */
+ fp1 = floatx80_add(fp1, float32_to_floatx80(
+ make_float32(0xBF000000), status),
+ status); /* B1+T(B3+T(B5+TB7)) */
+ fp0 = floatx80_mul(fp0, fp2, status); /* S(B2+T(B4+T(B6+TB8))) */
+ fp0 = floatx80_add(fp0, fp1, status);
+ /* [B1+T(B3+T(B5+TB7))]+[S(B2+T(B4+T(B6+TB8)))] */
+
+ x = packFloatx80(xSign, xExp, xSig);
+ fp0 = floatx80_mul(fp0, x, status);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, float32_to_floatx80(posneg1, status), status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ /* SINPOLY */
+ xSign = extractFloatx80Sign(fp0); /* X IS R */
+ xExp = extractFloatx80Exp(fp0);
+ xSig = extractFloatx80Frac(fp0);
+
+ xSign ^= ((n + 1) >> 1) & 1; /* X IS NOW R'= SGN*R */
+
+ fp0 = floatx80_mul(fp0, fp0, status); /* FP0 IS S */
+ fp1 = floatx80_mul(fp0, fp0, status); /* FP1 IS T */
+ fp3 = float64_to_floatx80(make_float64(0xBD6AAA77CCC994F5),
+ status); /* A7 */
+ fp2 = float64_to_floatx80(make_float64(0x3DE612097AAE8DA1),
+ status); /* A6 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* T*A7 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T*A6 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBE5AE6452A118AE4), status),
+ status); /* A5+T*A7 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3EC71DE3A5341531), status),
+ status); /* A4+T*A6 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* T(A5+TA7) */
+ fp2 = floatx80_mul(fp2, fp1, status); /* T(A4+TA6) */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBF2A01A01A018B59), status),
+ status); /* A3+T(A5+TA7) */
+ fp4 = packFloatx80(0, 0x3FF8, UINT64_C(0x88888888888859AF));
+ fp2 = floatx80_add(fp2, fp4, status); /* A2+T(A4+TA6) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* T(A3+T(A5+TA7)) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* S(A2+T(A4+TA6)) */
+ fp4 = packFloatx80(1, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAA99));
+ fp1 = floatx80_add(fp1, fp4, status); /* A1+T(A3+T(A5+TA7)) */
+ fp1 = floatx80_add(fp1, fp2, status);
+ /* [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))] */
+
+ x = packFloatx80(xSign, xExp, xSig);
+ fp0 = floatx80_mul(fp0, x, status); /* R'*S */
+ fp0 = floatx80_mul(fp0, fp1, status); /* SIN(R')-R' */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, x, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+}
+
+/*
+ * Arc tangent
+ */
+
+floatx80 floatx80_atan(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, tbl_index;
+ floatx80 fp0, fp1, fp2, fp3, xsave;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ a = packFloatx80(aSign, piby2_exp, pi_sig);
+ float_raise(float_flag_inexact, status);
+ return floatx80_move(a, status);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ if (compact < 0x3FFB8000 || compact > 0x4002FFFF) {
+ /* |X| >= 16 or |X| < 1/16 */
+ if (compact > 0x3FFF8000) { /* |X| >= 16 */
+ if (compact > 0x40638000) { /* |X| > 2^(100) */
+ fp0 = packFloatx80(aSign, piby2_exp, pi_sig);
+ fp1 = packFloatx80(aSign, 0x0001, one_sig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_sub(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ fp0 = a;
+ fp1 = packFloatx80(1, one_exp, one_sig); /* -1 */
+ fp1 = floatx80_div(fp1, fp0, status); /* X' = -1/X */
+ xsave = fp1;
+ fp0 = floatx80_mul(fp1, fp1, status); /* Y = X'*X' */
+ fp1 = floatx80_mul(fp0, fp0, status); /* Z = Y*Y */
+ fp3 = float64_to_floatx80(make_float64(0xBFB70BF398539E6A),
+ status); /* C5 */
+ fp2 = float64_to_floatx80(make_float64(0x3FBC7187962D1D7D),
+ status); /* C4 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* Z*C5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* Z*C4 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBFC24924827107B8), status),
+ status); /* C3+Z*C5 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FC999999996263E), status),
+ status); /* C2+Z*C4 */
+ fp1 = floatx80_mul(fp1, fp3, status); /* Z*(C3+Z*C5) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* Y*(C2+Z*C4) */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0xBFD5555555555536), status),
+ status); /* C1+Z*(C3+Z*C5) */
+ fp0 = floatx80_mul(fp0, xsave, status); /* X'*Y */
+ /* [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)] */
+ fp1 = floatx80_add(fp1, fp2, status);
+ /* X'*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) ?? */
+ fp0 = floatx80_mul(fp0, fp1, status);
+ fp0 = floatx80_add(fp0, xsave, status);
+ fp1 = packFloatx80(aSign, piby2_exp, pi_sig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else { /* |X| < 1/16 */
+ if (compact < 0x3FD78000) { /* |X| < 2^(-40) */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_move(a, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ fp0 = a;
+ xsave = a;
+ fp0 = floatx80_mul(fp0, fp0, status); /* Y = X*X */
+ fp1 = floatx80_mul(fp0, fp0, status); /* Z = Y*Y */
+ fp2 = float64_to_floatx80(make_float64(0x3FB344447F876989),
+ status); /* B6 */
+ fp3 = float64_to_floatx80(make_float64(0xBFB744EE7FAF45DB),
+ status); /* B5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* Z*B6 */
+ fp3 = floatx80_mul(fp3, fp1, status); /* Z*B5 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FBC71C646940220), status),
+ status); /* B4+Z*B6 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0xBFC24924921872F9),
+ status), status); /* B3+Z*B5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* Z*(B4+Z*B6) */
+ fp1 = floatx80_mul(fp1, fp3, status); /* Z*(B3+Z*B5) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FC9999999998FA9), status),
+ status); /* B2+Z*(B4+Z*B6) */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0xBFD5555555555555), status),
+ status); /* B1+Z*(B3+Z*B5) */
+ fp2 = floatx80_mul(fp2, fp0, status); /* Y*(B2+Z*(B4+Z*B6)) */
+ fp0 = floatx80_mul(fp0, xsave, status); /* X*Y */
+ /* [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))] */
+ fp1 = floatx80_add(fp1, fp2, status);
+ /* X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]) */
+ fp0 = floatx80_mul(fp0, fp1, status);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, xsave, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+ } else {
+ aSig &= UINT64_C(0xF800000000000000);
+ aSig |= UINT64_C(0x0400000000000000);
+ xsave = packFloatx80(aSign, aExp, aSig); /* F */
+ fp0 = a;
+ fp1 = a; /* X */
+ fp2 = packFloatx80(0, one_exp, one_sig); /* 1 */
+ fp1 = floatx80_mul(fp1, xsave, status); /* X*F */
+ fp0 = floatx80_sub(fp0, xsave, status); /* X-F */
+ fp1 = floatx80_add(fp1, fp2, status); /* 1 + X*F */
+ fp0 = floatx80_div(fp0, fp1, status); /* U = (X-F)/(1+X*F) */
+
+ tbl_index = compact;
+
+ tbl_index &= 0x7FFF0000;
+ tbl_index -= 0x3FFB0000;
+ tbl_index >>= 1;
+ tbl_index += compact & 0x00007800;
+ tbl_index >>= 11;
+
+ fp3 = atan_tbl[tbl_index];
+
+ fp3.high |= aSign ? 0x8000 : 0; /* ATAN(F) */
+
+ fp1 = floatx80_mul(fp0, fp0, status); /* V = U*U */
+ fp2 = float64_to_floatx80(make_float64(0xBFF6687E314987D8),
+ status); /* A3 */
+ fp2 = floatx80_add(fp2, fp1, status); /* A3+V */
+ fp2 = floatx80_mul(fp2, fp1, status); /* V*(A3+V) */
+ fp1 = floatx80_mul(fp1, fp0, status); /* U*V */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x4002AC6934A26DB3), status),
+ status); /* A2+V*(A3+V) */
+ fp1 = floatx80_mul(fp1, float64_to_floatx80(
+ make_float64(0xBFC2476F4E1DA28E), status),
+ status); /* A1+U*V */
+ fp1 = floatx80_mul(fp1, fp2, status); /* A1*U*V*(A2+V*(A3+V)) */
+ fp0 = floatx80_add(fp0, fp1, status); /* ATAN(U) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, fp3, status); /* ATAN(X) */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+}
+
+/*
+ * Arc sine
+ */
+
+floatx80 floatx80_asin(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact;
+ floatx80 fp0, fp1, fp2, one;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact >= 0x3FFF8000) { /* |X| >= 1 */
+ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */
+ float_raise(float_flag_inexact, status);
+ a = packFloatx80(aSign, piby2_exp, pi_sig);
+ return floatx80_move(a, status);
+ } else { /* |X| > 1 */
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+
+ } /* |X| < 1 */
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ one = packFloatx80(0, one_exp, one_sig);
+ fp0 = a;
+
+ fp1 = floatx80_sub(one, fp0, status); /* 1 - X */
+ fp2 = floatx80_add(one, fp0, status); /* 1 + X */
+ fp1 = floatx80_mul(fp2, fp1, status); /* (1+X)*(1-X) */
+ fp1 = floatx80_sqrt(fp1, status); /* SQRT((1+X)*(1-X)) */
+ fp0 = floatx80_div(fp0, fp1, status); /* X/SQRT((1+X)*(1-X)) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_atan(fp0, status); /* ATAN(X/SQRT((1+X)*(1-X))) */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+}
+
+/*
+ * Arc cosine
+ */
+
+floatx80 floatx80_acos(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact;
+ floatx80 fp0, fp1, one;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aExp == 0 && aSig == 0) {
+ float_raise(float_flag_inexact, status);
+ return roundAndPackFloatx80(status->floatx80_rounding_precision, 0,
+ piby2_exp, pi_sig, 0, status);
+ }
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact >= 0x3FFF8000) { /* |X| >= 1 */
+ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */
+ if (aSign) { /* X == -1 */
+ a = packFloatx80(0, pi_exp, pi_sig);
+ float_raise(float_flag_inexact, status);
+ return floatx80_move(a, status);
+ } else { /* X == +1 */
+ return packFloatx80(0, 0, 0);
+ }
+ } else { /* |X| > 1 */
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+ } /* |X| < 1 */
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ one = packFloatx80(0, one_exp, one_sig);
+ fp0 = a;
+
+ fp1 = floatx80_add(one, fp0, status); /* 1 + X */
+ fp0 = floatx80_sub(one, fp0, status); /* 1 - X */
+ fp0 = floatx80_div(fp0, fp1, status); /* (1-X)/(1+X) */
+ fp0 = floatx80_sqrt(fp0, status); /* SQRT((1-X)/(1+X)) */
+ fp0 = floatx80_atan(fp0, status); /* ATAN(SQRT((1-X)/(1+X))) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, fp0, status); /* 2 * ATAN(SQRT((1-X)/(1+X))) */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+}
+
+/*
+ * Hyperbolic arc tangent
+ */
+
+floatx80 floatx80_atanh(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact;
+ floatx80 fp0, fp1, fp2, one;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF && (uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact >= 0x3FFF8000) { /* |X| >= 1 */
+ if (aExp == one_exp && aSig == one_sig) { /* |X| == 1 */
+ float_raise(float_flag_divbyzero, status);
+ return packFloatx80(aSign, floatx80_infinity.high,
+ floatx80_infinity.low);
+ } else { /* |X| > 1 */
+ float_raise(float_flag_invalid, status);
+ return floatx80_default_nan(status);
+ }
+ } /* |X| < 1 */
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ one = packFloatx80(0, one_exp, one_sig);
+ fp2 = packFloatx80(aSign, 0x3FFE, one_sig); /* SIGN(X) * (1/2) */
+ fp0 = packFloatx80(0, aExp, aSig); /* Y = |X| */
+ fp1 = packFloatx80(1, aExp, aSig); /* -Y */
+ fp0 = floatx80_add(fp0, fp0, status); /* 2Y */
+ fp1 = floatx80_add(fp1, one, status); /* 1-Y */
+ fp0 = floatx80_div(fp0, fp1, status); /* Z = 2Y/(1-Y) */
+ fp0 = floatx80_lognp1(fp0, status); /* LOG1P(Z) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, fp2,
+ status); /* ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z) */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+}
+
+/*
+ * e to x minus 1
+ */
+
+floatx80 floatx80_etoxm1(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact, n, j, m, m1;
+ floatx80 fp0, fp1, fp2, fp3, l2, sc, onebysc;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ if (aSign) {
+ return packFloatx80(aSign, one_exp, one_sig);
+ }
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ if (aExp >= 0x3FFD) { /* |X| >= 1/4 */
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact <= 0x4004C215) { /* |X| <= 70 log2 */
+ fp0 = a;
+ fp1 = a;
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(
+ make_float32(0x42B8AA3B), status),
+ status); /* 64/log2 * X */
+ n = floatx80_to_int32(fp0, status); /* int(64/log2*X) */
+ fp0 = int32_to_floatx80(n, status);
+
+ j = n & 0x3F; /* J = N mod 64 */
+ m = n / 64; /* NOTE: this is really arithmetic right shift by 6 */
+ if (n < 0 && j) {
+ /*
+ * arithmetic right shift is division and
+ * round towards minus infinity
+ */
+ m--;
+ }
+ m1 = -m;
+ /*m += 0x3FFF; // biased exponent of 2^(M) */
+ /*m1 += 0x3FFF; // biased exponent of -2^(-M) */
+
+ fp2 = fp0; /* N */
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(
+ make_float32(0xBC317218), status),
+ status); /* N * L1, L1 = lead(-log2/64) */
+ l2 = packFloatx80(0, 0x3FDC, UINT64_C(0x82E308654361C4C6));
+ fp2 = floatx80_mul(fp2, l2, status); /* N * L2, L1+L2 = -log2/64 */
+ fp0 = floatx80_add(fp0, fp1, status); /* X + N*L1 */
+ fp0 = floatx80_add(fp0, fp2, status); /* R */
+
+ fp1 = floatx80_mul(fp0, fp0, status); /* S = R*R */
+ fp2 = float32_to_floatx80(make_float32(0x3950097B),
+ status); /* A6 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 is S*A6 */
+ fp3 = floatx80_mul(float32_to_floatx80(make_float32(0x3AB60B6A),
+ status), fp1, status); /* fp3 is S*A5 */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3F81111111174385), status),
+ status); /* fp2 IS A4+S*A6 */
+ fp3 = floatx80_add(fp3, float64_to_floatx80(
+ make_float64(0x3FA5555555554F5A), status),
+ status); /* fp3 is A3+S*A5 */
+ fp2 = floatx80_mul(fp2, fp1, status); /* fp2 IS S*(A4+S*A6) */
+ fp3 = floatx80_mul(fp3, fp1, status); /* fp3 IS S*(A3+S*A5) */
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FC5555555555555), status),
+ status); /* fp2 IS A2+S*(A4+S*A6) */
+ fp3 = floatx80_add(fp3, float32_to_floatx80(
+ make_float32(0x3F000000), status),
+ status); /* fp3 IS A1+S*(A3+S*A5) */
+ fp2 = floatx80_mul(fp2, fp1,
+ status); /* fp2 IS S*(A2+S*(A4+S*A6)) */
+ fp1 = floatx80_mul(fp1, fp3,
+ status); /* fp1 IS S*(A1+S*(A3+S*A5)) */
+ fp2 = floatx80_mul(fp2, fp0,
+ status); /* fp2 IS R*S*(A2+S*(A4+S*A6)) */
+ fp0 = floatx80_add(fp0, fp1,
+ status); /* fp0 IS R+S*(A1+S*(A3+S*A5)) */
+ fp0 = floatx80_add(fp0, fp2, status); /* fp0 IS EXP(R) - 1 */
+
+ fp0 = floatx80_mul(fp0, exp_tbl[j],
+ status); /* 2^(J/64)*(Exp(R)-1) */
+
+ if (m >= 64) {
+ fp1 = float32_to_floatx80(exp_tbl2[j], status);
+ onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */
+ fp1 = floatx80_add(fp1, onebysc, status);
+ fp0 = floatx80_add(fp0, fp1, status);
+ fp0 = floatx80_add(fp0, exp_tbl[j], status);
+ } else if (m < -3) {
+ fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j],
+ status), status);
+ fp0 = floatx80_add(fp0, exp_tbl[j], status);
+ onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */
+ fp0 = floatx80_add(fp0, onebysc, status);
+ } else { /* -3 <= m <= 63 */
+ fp1 = exp_tbl[j];
+ fp0 = floatx80_add(fp0, float32_to_floatx80(exp_tbl2[j],
+ status), status);
+ onebysc = packFloatx80(1, m1 + 0x3FFF, one_sig); /* -2^(-M) */
+ fp1 = floatx80_add(fp1, onebysc, status);
+ fp0 = floatx80_add(fp0, fp1, status);
+ }
+
+ sc = packFloatx80(0, m + 0x3FFF, one_sig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, sc, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else { /* |X| > 70 log2 */
+ if (aSign) {
+ fp0 = float32_to_floatx80(make_float32(0xBF800000),
+ status); /* -1 */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, float32_to_floatx80(
+ make_float32(0x00800000), status),
+ status); /* -1 + 2^(-126) */
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ return floatx80_etox(a, status);
+ }
+ }
+ } else { /* |X| < 1/4 */
+ if (aExp >= 0x3FBE) {
+ fp0 = a;
+ fp0 = floatx80_mul(fp0, fp0, status); /* S = X*X */
+ fp1 = float32_to_floatx80(make_float32(0x2F30CAA8),
+ status); /* B12 */
+ fp1 = floatx80_mul(fp1, fp0, status); /* S * B12 */
+ fp2 = float32_to_floatx80(make_float32(0x310F8290),
+ status); /* B11 */
+ fp1 = floatx80_add(fp1, float32_to_floatx80(
+ make_float32(0x32D73220), status),
+ status); /* B10 */
+ fp2 = floatx80_mul(fp2, fp0, status);
+ fp1 = floatx80_mul(fp1, fp0, status);
+ fp2 = floatx80_add(fp2, float32_to_floatx80(
+ make_float32(0x3493F281), status),
+ status); /* B9 */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3EC71DE3A5774682), status),
+ status); /* B8 */
+ fp2 = floatx80_mul(fp2, fp0, status);
+ fp1 = floatx80_mul(fp1, fp0, status);
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3EFA01A019D7CB68), status),
+ status); /* B7 */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3F2A01A01A019DF3), status),
+ status); /* B6 */
+ fp2 = floatx80_mul(fp2, fp0, status);
+ fp1 = floatx80_mul(fp1, fp0, status);
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3F56C16C16C170E2), status),
+ status); /* B5 */
+ fp1 = floatx80_add(fp1, float64_to_floatx80(
+ make_float64(0x3F81111111111111), status),
+ status); /* B4 */
+ fp2 = floatx80_mul(fp2, fp0, status);
+ fp1 = floatx80_mul(fp1, fp0, status);
+ fp2 = floatx80_add(fp2, float64_to_floatx80(
+ make_float64(0x3FA5555555555555), status),
+ status); /* B3 */
+ fp3 = packFloatx80(0, 0x3FFC, UINT64_C(0xAAAAAAAAAAAAAAAB));
+ fp1 = floatx80_add(fp1, fp3, status); /* B2 */
+ fp2 = floatx80_mul(fp2, fp0, status);
+ fp1 = floatx80_mul(fp1, fp0, status);
+
+ fp2 = floatx80_mul(fp2, fp0, status);
+ fp1 = floatx80_mul(fp1, a, status);
+
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(
+ make_float32(0x3F000000), status),
+ status); /* S*B1 */
+ fp1 = floatx80_add(fp1, fp2, status); /* Q */
+ fp0 = floatx80_add(fp0, fp1, status); /* S*B1+Q */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, a, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else { /* |X| < 2^(-65) */
+ sc = packFloatx80(1, 1, one_sig);
+ fp0 = a;
+
+ if (aExp < 0x0033) { /* |X| < 2^(-16382) */
+ fp0 = floatx80_mul(fp0, float64_to_floatx80(
+ make_float64(0x48B0000000000000), status),
+ status);
+ fp0 = floatx80_add(fp0, sc, status);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, float64_to_floatx80(
+ make_float64(0x3730000000000000), status),
+ status);
+ } else {
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, sc, status);
+ }
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+}
+
+/*
+ * Hyperbolic tangent
+ */
+
+floatx80 floatx80_tanh(floatx80 a, float_status *status)
+{
+ bool aSign, vSign;
+ int32_t aExp, vExp;
+ uint64_t aSig, vSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact;
+ floatx80 fp0, fp1;
+ uint32_t sign;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ return packFloatx80(aSign, one_exp, one_sig);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact < 0x3FD78000 || compact > 0x3FFFDDCE) {
+ /* TANHBORS */
+ if (compact < 0x3FFF8000) {
+ /* TANHSM */
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_move(a, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ if (compact > 0x40048AA1) {
+ /* TANHHUGE */
+ sign = 0x3F800000;
+ sign |= aSign ? 0x80000000 : 0x00000000;
+ fp0 = float32_to_floatx80(make_float32(sign), status);
+ sign &= 0x80000000;
+ sign ^= 0x80800000; /* -SIGN(X)*EPS */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, float32_to_floatx80(make_float32(sign),
+ status), status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ } else {
+ fp0 = packFloatx80(0, aExp + 1, aSig); /* Y = 2|X| */
+ fp0 = floatx80_etox(fp0, status); /* FP0 IS EXP(Y) */
+ fp0 = floatx80_add(fp0, float32_to_floatx80(
+ make_float32(0x3F800000),
+ status), status); /* EXP(Y)+1 */
+ sign = aSign ? 0x80000000 : 0x00000000;
+ fp1 = floatx80_div(float32_to_floatx80(make_float32(
+ sign ^ 0xC0000000), status), fp0,
+ status); /* -SIGN(X)*2 / [EXP(Y)+1] */
+ fp0 = float32_to_floatx80(make_float32(sign | 0x3F800000),
+ status); /* SIGN */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp1, fp0, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+ } else { /* 2**(-40) < |X| < (5/2)LOG2 */
+ fp0 = packFloatx80(0, aExp + 1, aSig); /* Y = 2|X| */
+ fp0 = floatx80_etoxm1(fp0, status); /* FP0 IS Z = EXPM1(Y) */
+ fp1 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x40000000),
+ status),
+ status); /* Z+2 */
+
+ vSign = extractFloatx80Sign(fp1);
+ vExp = extractFloatx80Exp(fp1);
+ vSig = extractFloatx80Frac(fp1);
+
+ fp1 = packFloatx80(vSign ^ aSign, vExp, vSig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_div(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+}
+
+/*
+ * Hyperbolic sine
+ */
+
+floatx80 floatx80_sinh(floatx80 a, float_status *status)
+{
+ bool aSign;
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact;
+ floatx80 fp0, fp1, fp2;
+ float32 fact;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+ aSign = extractFloatx80Sign(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ return packFloatx80(aSign, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(aSign, 0, 0);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact > 0x400CB167) {
+ /* SINHBIG */
+ if (compact > 0x400CB2B3) {
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ return roundAndPackFloatx80(status->floatx80_rounding_precision,
+ aSign, 0x8000, aSig, 0, status);
+ } else {
+ fp0 = floatx80_abs(a); /* Y = |X| */
+ fp0 = floatx80_sub(fp0, float64_to_floatx80(
+ make_float64(0x40C62D38D3D64634), status),
+ status); /* (|X|-16381LOG2_LEAD) */
+ fp0 = floatx80_sub(fp0, float64_to_floatx80(
+ make_float64(0x3D6F90AEB1E75CC7), status),
+ status); /* |X| - 16381 LOG2, ACCURATE */
+ fp0 = floatx80_etox(fp0, status);
+ fp2 = packFloatx80(aSign, 0x7FFB, one_sig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, fp2, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ } else { /* |X| < 16380 LOG2 */
+ fp0 = floatx80_abs(a); /* Y = |X| */
+ fp0 = floatx80_etoxm1(fp0, status); /* FP0 IS Z = EXPM1(Y) */
+ fp1 = floatx80_add(fp0, float32_to_floatx80(make_float32(0x3F800000),
+ status), status); /* 1+Z */
+ fp2 = fp0;
+ fp0 = floatx80_div(fp0, fp1, status); /* Z/(1+Z) */
+ fp0 = floatx80_add(fp0, fp2, status);
+
+ fact = packFloat32(aSign, 0x7E, 0);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, float32_to_floatx80(fact, status), status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+}
+
+/*
+ * Hyperbolic cosine
+ */
+
+floatx80 floatx80_cosh(floatx80 a, float_status *status)
+{
+ int32_t aExp;
+ uint64_t aSig;
+
+ FloatRoundMode user_rnd_mode;
+ FloatX80RoundPrec user_rnd_prec;
+
+ int32_t compact;
+ floatx80 fp0, fp1;
+
+ aSig = extractFloatx80Frac(a);
+ aExp = extractFloatx80Exp(a);
+
+ if (aExp == 0x7FFF) {
+ if ((uint64_t) (aSig << 1)) {
+ return propagateFloatx80NaNOneArg(a, status);
+ }
+ return packFloatx80(0, floatx80_infinity.high,
+ floatx80_infinity.low);
+ }
+
+ if (aExp == 0 && aSig == 0) {
+ return packFloatx80(0, one_exp, one_sig);
+ }
+
+ user_rnd_mode = status->float_rounding_mode;
+ user_rnd_prec = status->floatx80_rounding_precision;
+ status->float_rounding_mode = float_round_nearest_even;
+ status->floatx80_rounding_precision = floatx80_precision_x;
+
+ compact = floatx80_make_compact(aExp, aSig);
+
+ if (compact > 0x400CB167) {
+ if (compact > 0x400CB2B3) {
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+ return roundAndPackFloatx80(status->floatx80_rounding_precision, 0,
+ 0x8000, one_sig, 0, status);
+ } else {
+ fp0 = packFloatx80(0, aExp, aSig);
+ fp0 = floatx80_sub(fp0, float64_to_floatx80(
+ make_float64(0x40C62D38D3D64634), status),
+ status);
+ fp0 = floatx80_sub(fp0, float64_to_floatx80(
+ make_float64(0x3D6F90AEB1E75CC7), status),
+ status);
+ fp0 = floatx80_etox(fp0, status);
+ fp1 = packFloatx80(0, 0x7FFB, one_sig);
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_mul(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+ }
+ }
+
+ fp0 = packFloatx80(0, aExp, aSig); /* |X| */
+ fp0 = floatx80_etox(fp0, status); /* EXP(|X|) */
+ fp0 = floatx80_mul(fp0, float32_to_floatx80(make_float32(0x3F000000),
+ status), status); /* (1/2)*EXP(|X|) */
+ fp1 = float32_to_floatx80(make_float32(0x3E800000), status); /* 1/4 */
+ fp1 = floatx80_div(fp1, fp0, status); /* 1/(2*EXP(|X|)) */
+
+ status->float_rounding_mode = user_rnd_mode;
+ status->floatx80_rounding_precision = user_rnd_prec;
+
+ a = floatx80_add(fp0, fp1, status);
+
+ float_raise(float_flag_inexact, status);
+
+ return a;
+}
diff --git a/target/m68k/softfloat.h b/target/m68k/softfloat.h
new file mode 100644
index 000000000..4bb956713
--- /dev/null
+++ b/target/m68k/softfloat.h
@@ -0,0 +1,48 @@
+/*
+ * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator,
+ * derived from NetBSD M68040 FPSP functions,
+ * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic
+ * Package. Those parts of the code (and some later contributions) are
+ * provided under that license, as detailed below.
+ * It has subsequently been modified by contributors to the QEMU Project,
+ * so some portions are provided under:
+ * the SoftFloat-2a license
+ * the BSD license
+ * GPL-v2-or-later
+ *
+ * Any future contributions to this file will be taken to be licensed under
+ * the Softfloat-2a license unless specifically indicated otherwise.
+ */
+
+/*
+ * Portions of this work are licensed under the terms of the GNU GPL,
+ * version 2 or later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef TARGET_M68K_SOFTFLOAT_H
+#define TARGET_M68K_SOFTFLOAT_H
+#include "fpu/softfloat.h"
+
+floatx80 floatx80_getman(floatx80 a, float_status *status);
+floatx80 floatx80_getexp(floatx80 a, float_status *status);
+floatx80 floatx80_scale(floatx80 a, floatx80 b, float_status *status);
+floatx80 floatx80_move(floatx80 a, float_status *status);
+floatx80 floatx80_lognp1(floatx80 a, float_status *status);
+floatx80 floatx80_logn(floatx80 a, float_status *status);
+floatx80 floatx80_log10(floatx80 a, float_status *status);
+floatx80 floatx80_log2(floatx80 a, float_status *status);
+floatx80 floatx80_etox(floatx80 a, float_status *status);
+floatx80 floatx80_twotox(floatx80 a, float_status *status);
+floatx80 floatx80_tentox(floatx80 a, float_status *status);
+floatx80 floatx80_tan(floatx80 a, float_status *status);
+floatx80 floatx80_sin(floatx80 a, float_status *status);
+floatx80 floatx80_cos(floatx80 a, float_status *status);
+floatx80 floatx80_atan(floatx80 a, float_status *status);
+floatx80 floatx80_asin(floatx80 a, float_status *status);
+floatx80 floatx80_acos(floatx80 a, float_status *status);
+floatx80 floatx80_atanh(floatx80 a, float_status *status);
+floatx80 floatx80_etoxm1(floatx80 a, float_status *status);
+floatx80 floatx80_tanh(floatx80 a, float_status *status);
+floatx80 floatx80_sinh(floatx80 a, float_status *status);
+floatx80 floatx80_cosh(floatx80 a, float_status *status);
+#endif
diff --git a/target/m68k/softfloat_fpsp_tables.h b/target/m68k/softfloat_fpsp_tables.h
new file mode 100644
index 000000000..2ccd9e8bc
--- /dev/null
+++ b/target/m68k/softfloat_fpsp_tables.h
@@ -0,0 +1,642 @@
+/*
+ * Ported from a work by Andreas Grabher for Previous, NeXT Computer Emulator,
+ * derived from NetBSD M68040 FPSP functions,
+ * derived from release 2a of the SoftFloat IEC/IEEE Floating-point Arithmetic
+ * Package. Those parts of the code (and some later contributions) are
+ * provided under that license, as detailed below.
+ * It has subsequently been modified by contributors to the QEMU Project,
+ * so some portions are provided under:
+ * the SoftFloat-2a license
+ * the BSD license
+ * GPL-v2-or-later
+ *
+ * Any future contributions to this file will be taken to be licensed under
+ * the Softfloat-2a license unless specifically indicated otherwise.
+ */
+
+/*
+ * Portions of this work are licensed under the terms of the GNU GPL,
+ * version 2 or later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef TARGET_M68K_SOFTFLOAT_FPSP_TABLES_H
+#define TARGET_M68K_SOFTFLOAT_FPSP_TABLES_H
+
+static const floatx80 log_tbl[128] = {
+ make_floatx80_init(0x3FFE, 0xFE03F80FE03F80FE),
+ make_floatx80_init(0x3FF7, 0xFF015358833C47E2),
+ make_floatx80_init(0x3FFE, 0xFA232CF252138AC0),
+ make_floatx80_init(0x3FF9, 0xBDC8D83EAD88D549),
+ make_floatx80_init(0x3FFE, 0xF6603D980F6603DA),
+ make_floatx80_init(0x3FFA, 0x9CF43DCFF5EAFD48),
+ make_floatx80_init(0x3FFE, 0xF2B9D6480F2B9D65),
+ make_floatx80_init(0x3FFA, 0xDA16EB88CB8DF614),
+ make_floatx80_init(0x3FFE, 0xEF2EB71FC4345238),
+ make_floatx80_init(0x3FFB, 0x8B29B7751BD70743),
+ make_floatx80_init(0x3FFE, 0xEBBDB2A5C1619C8C),
+ make_floatx80_init(0x3FFB, 0xA8D839F830C1FB49),
+ make_floatx80_init(0x3FFE, 0xE865AC7B7603A197),
+ make_floatx80_init(0x3FFB, 0xC61A2EB18CD907AD),
+ make_floatx80_init(0x3FFE, 0xE525982AF70C880E),
+ make_floatx80_init(0x3FFB, 0xE2F2A47ADE3A18AF),
+ make_floatx80_init(0x3FFE, 0xE1FC780E1FC780E2),
+ make_floatx80_init(0x3FFB, 0xFF64898EDF55D551),
+ make_floatx80_init(0x3FFE, 0xDEE95C4CA037BA57),
+ make_floatx80_init(0x3FFC, 0x8DB956A97B3D0148),
+ make_floatx80_init(0x3FFE, 0xDBEB61EED19C5958),
+ make_floatx80_init(0x3FFC, 0x9B8FE100F47BA1DE),
+ make_floatx80_init(0x3FFE, 0xD901B2036406C80E),
+ make_floatx80_init(0x3FFC, 0xA9372F1D0DA1BD17),
+ make_floatx80_init(0x3FFE, 0xD62B80D62B80D62C),
+ make_floatx80_init(0x3FFC, 0xB6B07F38CE90E46B),
+ make_floatx80_init(0x3FFE, 0xD3680D3680D3680D),
+ make_floatx80_init(0x3FFC, 0xC3FD032906488481),
+ make_floatx80_init(0x3FFE, 0xD0B69FCBD2580D0B),
+ make_floatx80_init(0x3FFC, 0xD11DE0FF15AB18CA),
+ make_floatx80_init(0x3FFE, 0xCE168A7725080CE1),
+ make_floatx80_init(0x3FFC, 0xDE1433A16C66B150),
+ make_floatx80_init(0x3FFE, 0xCB8727C065C393E0),
+ make_floatx80_init(0x3FFC, 0xEAE10B5A7DDC8ADD),
+ make_floatx80_init(0x3FFE, 0xC907DA4E871146AD),
+ make_floatx80_init(0x3FFC, 0xF7856E5EE2C9B291),
+ make_floatx80_init(0x3FFE, 0xC6980C6980C6980C),
+ make_floatx80_init(0x3FFD, 0x82012CA5A68206D7),
+ make_floatx80_init(0x3FFE, 0xC4372F855D824CA6),
+ make_floatx80_init(0x3FFD, 0x882C5FCD7256A8C5),
+ make_floatx80_init(0x3FFE, 0xC1E4BBD595F6E947),
+ make_floatx80_init(0x3FFD, 0x8E44C60B4CCFD7DE),
+ make_floatx80_init(0x3FFE, 0xBFA02FE80BFA02FF),
+ make_floatx80_init(0x3FFD, 0x944AD09EF4351AF6),
+ make_floatx80_init(0x3FFE, 0xBD69104707661AA3),
+ make_floatx80_init(0x3FFD, 0x9A3EECD4C3EAA6B2),
+ make_floatx80_init(0x3FFE, 0xBB3EE721A54D880C),
+ make_floatx80_init(0x3FFD, 0xA0218434353F1DE8),
+ make_floatx80_init(0x3FFE, 0xB92143FA36F5E02E),
+ make_floatx80_init(0x3FFD, 0xA5F2FCABBBC506DA),
+ make_floatx80_init(0x3FFE, 0xB70FBB5A19BE3659),
+ make_floatx80_init(0x3FFD, 0xABB3B8BA2AD362A5),
+ make_floatx80_init(0x3FFE, 0xB509E68A9B94821F),
+ make_floatx80_init(0x3FFD, 0xB1641795CE3CA97B),
+ make_floatx80_init(0x3FFE, 0xB30F63528917C80B),
+ make_floatx80_init(0x3FFD, 0xB70475515D0F1C61),
+ make_floatx80_init(0x3FFE, 0xB11FD3B80B11FD3C),
+ make_floatx80_init(0x3FFD, 0xBC952AFEEA3D13E1),
+ make_floatx80_init(0x3FFE, 0xAF3ADDC680AF3ADE),
+ make_floatx80_init(0x3FFD, 0xC2168ED0F458BA4A),
+ make_floatx80_init(0x3FFE, 0xAD602B580AD602B6),
+ make_floatx80_init(0x3FFD, 0xC788F439B3163BF1),
+ make_floatx80_init(0x3FFE, 0xAB8F69E28359CD11),
+ make_floatx80_init(0x3FFD, 0xCCECAC08BF04565D),
+ make_floatx80_init(0x3FFE, 0xA9C84A47A07F5638),
+ make_floatx80_init(0x3FFD, 0xD24204872DD85160),
+ make_floatx80_init(0x3FFE, 0xA80A80A80A80A80B),
+ make_floatx80_init(0x3FFD, 0xD78949923BC3588A),
+ make_floatx80_init(0x3FFE, 0xA655C4392D7B73A8),
+ make_floatx80_init(0x3FFD, 0xDCC2C4B49887DACC),
+ make_floatx80_init(0x3FFE, 0xA4A9CF1D96833751),
+ make_floatx80_init(0x3FFD, 0xE1EEBD3E6D6A6B9E),
+ make_floatx80_init(0x3FFE, 0xA3065E3FAE7CD0E0),
+ make_floatx80_init(0x3FFD, 0xE70D785C2F9F5BDC),
+ make_floatx80_init(0x3FFE, 0xA16B312EA8FC377D),
+ make_floatx80_init(0x3FFD, 0xEC1F392C5179F283),
+ make_floatx80_init(0x3FFE, 0x9FD809FD809FD80A),
+ make_floatx80_init(0x3FFD, 0xF12440D3E36130E6),
+ make_floatx80_init(0x3FFE, 0x9E4CAD23DD5F3A20),
+ make_floatx80_init(0x3FFD, 0xF61CCE92346600BB),
+ make_floatx80_init(0x3FFE, 0x9CC8E160C3FB19B9),
+ make_floatx80_init(0x3FFD, 0xFB091FD38145630A),
+ make_floatx80_init(0x3FFE, 0x9B4C6F9EF03A3CAA),
+ make_floatx80_init(0x3FFD, 0xFFE97042BFA4C2AD),
+ make_floatx80_init(0x3FFE, 0x99D722DABDE58F06),
+ make_floatx80_init(0x3FFE, 0x825EFCED49369330),
+ make_floatx80_init(0x3FFE, 0x9868C809868C8098),
+ make_floatx80_init(0x3FFE, 0x84C37A7AB9A905C9),
+ make_floatx80_init(0x3FFE, 0x97012E025C04B809),
+ make_floatx80_init(0x3FFE, 0x87224C2E8E645FB7),
+ make_floatx80_init(0x3FFE, 0x95A02568095A0257),
+ make_floatx80_init(0x3FFE, 0x897B8CAC9F7DE298),
+ make_floatx80_init(0x3FFE, 0x9445809445809446),
+ make_floatx80_init(0x3FFE, 0x8BCF55DEC4CD05FE),
+ make_floatx80_init(0x3FFE, 0x92F113840497889C),
+ make_floatx80_init(0x3FFE, 0x8E1DC0FB89E125E5),
+ make_floatx80_init(0x3FFE, 0x91A2B3C4D5E6F809),
+ make_floatx80_init(0x3FFE, 0x9066E68C955B6C9B),
+ make_floatx80_init(0x3FFE, 0x905A38633E06C43B),
+ make_floatx80_init(0x3FFE, 0x92AADE74C7BE59E0),
+ make_floatx80_init(0x3FFE, 0x8F1779D9FDC3A219),
+ make_floatx80_init(0x3FFE, 0x94E9BFF615845643),
+ make_floatx80_init(0x3FFE, 0x8DDA520237694809),
+ make_floatx80_init(0x3FFE, 0x9723A1B720134203),
+ make_floatx80_init(0x3FFE, 0x8CA29C046514E023),
+ make_floatx80_init(0x3FFE, 0x995899C890EB8990),
+ make_floatx80_init(0x3FFE, 0x8B70344A139BC75A),
+ make_floatx80_init(0x3FFE, 0x9B88BDAA3A3DAE2F),
+ make_floatx80_init(0x3FFE, 0x8A42F8705669DB46),
+ make_floatx80_init(0x3FFE, 0x9DB4224FFFE1157C),
+ make_floatx80_init(0x3FFE, 0x891AC73AE9819B50),
+ make_floatx80_init(0x3FFE, 0x9FDADC268B7A12DA),
+ make_floatx80_init(0x3FFE, 0x87F78087F78087F8),
+ make_floatx80_init(0x3FFE, 0xA1FCFF17CE733BD4),
+ make_floatx80_init(0x3FFE, 0x86D905447A34ACC6),
+ make_floatx80_init(0x3FFE, 0xA41A9E8F5446FB9F),
+ make_floatx80_init(0x3FFE, 0x85BF37612CEE3C9B),
+ make_floatx80_init(0x3FFE, 0xA633CD7E6771CD8B),
+ make_floatx80_init(0x3FFE, 0x84A9F9C8084A9F9D),
+ make_floatx80_init(0x3FFE, 0xA8489E600B435A5E),
+ make_floatx80_init(0x3FFE, 0x839930523FBE3368),
+ make_floatx80_init(0x3FFE, 0xAA59233CCCA4BD49),
+ make_floatx80_init(0x3FFE, 0x828CBFBEB9A020A3),
+ make_floatx80_init(0x3FFE, 0xAC656DAE6BCC4985),
+ make_floatx80_init(0x3FFE, 0x81848DA8FAF0D277),
+ make_floatx80_init(0x3FFE, 0xAE6D8EE360BB2468),
+ make_floatx80_init(0x3FFE, 0x8080808080808081),
+ make_floatx80_init(0x3FFE, 0xB07197A23C46C654)
+};
+
+static const floatx80 exp_tbl[64] = {
+ make_floatx80_init(0x3FFF, 0x8000000000000000),
+ make_floatx80_init(0x3FFF, 0x8164D1F3BC030774),
+ make_floatx80_init(0x3FFF, 0x82CD8698AC2BA1D8),
+ make_floatx80_init(0x3FFF, 0x843A28C3ACDE4048),
+ make_floatx80_init(0x3FFF, 0x85AAC367CC487B14),
+ make_floatx80_init(0x3FFF, 0x871F61969E8D1010),
+ make_floatx80_init(0x3FFF, 0x88980E8092DA8528),
+ make_floatx80_init(0x3FFF, 0x8A14D575496EFD9C),
+ make_floatx80_init(0x3FFF, 0x8B95C1E3EA8BD6E8),
+ make_floatx80_init(0x3FFF, 0x8D1ADF5B7E5BA9E4),
+ make_floatx80_init(0x3FFF, 0x8EA4398B45CD53C0),
+ make_floatx80_init(0x3FFF, 0x9031DC431466B1DC),
+ make_floatx80_init(0x3FFF, 0x91C3D373AB11C338),
+ make_floatx80_init(0x3FFF, 0x935A2B2F13E6E92C),
+ make_floatx80_init(0x3FFF, 0x94F4EFA8FEF70960),
+ make_floatx80_init(0x3FFF, 0x96942D3720185A00),
+ make_floatx80_init(0x3FFF, 0x9837F0518DB8A970),
+ make_floatx80_init(0x3FFF, 0x99E0459320B7FA64),
+ make_floatx80_init(0x3FFF, 0x9B8D39B9D54E5538),
+ make_floatx80_init(0x3FFF, 0x9D3ED9A72CFFB750),
+ make_floatx80_init(0x3FFF, 0x9EF5326091A111AC),
+ make_floatx80_init(0x3FFF, 0xA0B0510FB9714FC4),
+ make_floatx80_init(0x3FFF, 0xA27043030C496818),
+ make_floatx80_init(0x3FFF, 0xA43515AE09E680A0),
+ make_floatx80_init(0x3FFF, 0xA5FED6A9B15138EC),
+ make_floatx80_init(0x3FFF, 0xA7CD93B4E9653568),
+ make_floatx80_init(0x3FFF, 0xA9A15AB4EA7C0EF8),
+ make_floatx80_init(0x3FFF, 0xAB7A39B5A93ED338),
+ make_floatx80_init(0x3FFF, 0xAD583EEA42A14AC8),
+ make_floatx80_init(0x3FFF, 0xAF3B78AD690A4374),
+ make_floatx80_init(0x3FFF, 0xB123F581D2AC2590),
+ make_floatx80_init(0x3FFF, 0xB311C412A9112488),
+ make_floatx80_init(0x3FFF, 0xB504F333F9DE6484),
+ make_floatx80_init(0x3FFF, 0xB6FD91E328D17790),
+ make_floatx80_init(0x3FFF, 0xB8FBAF4762FB9EE8),
+ make_floatx80_init(0x3FFF, 0xBAFF5AB2133E45FC),
+ make_floatx80_init(0x3FFF, 0xBD08A39F580C36C0),
+ make_floatx80_init(0x3FFF, 0xBF1799B67A731084),
+ make_floatx80_init(0x3FFF, 0xC12C4CCA66709458),
+ make_floatx80_init(0x3FFF, 0xC346CCDA24976408),
+ make_floatx80_init(0x3FFF, 0xC5672A115506DADC),
+ make_floatx80_init(0x3FFF, 0xC78D74C8ABB9B15C),
+ make_floatx80_init(0x3FFF, 0xC9B9BD866E2F27A4),
+ make_floatx80_init(0x3FFF, 0xCBEC14FEF2727C5C),
+ make_floatx80_init(0x3FFF, 0xCE248C151F8480E4),
+ make_floatx80_init(0x3FFF, 0xD06333DAEF2B2594),
+ make_floatx80_init(0x3FFF, 0xD2A81D91F12AE45C),
+ make_floatx80_init(0x3FFF, 0xD4F35AABCFEDFA20),
+ make_floatx80_init(0x3FFF, 0xD744FCCAD69D6AF4),
+ make_floatx80_init(0x3FFF, 0xD99D15C278AFD7B4),
+ make_floatx80_init(0x3FFF, 0xDBFBB797DAF23754),
+ make_floatx80_init(0x3FFF, 0xDE60F4825E0E9124),
+ make_floatx80_init(0x3FFF, 0xE0CCDEEC2A94E110),
+ make_floatx80_init(0x3FFF, 0xE33F8972BE8A5A50),
+ make_floatx80_init(0x3FFF, 0xE5B906E77C8348A8),
+ make_floatx80_init(0x3FFF, 0xE8396A503C4BDC68),
+ make_floatx80_init(0x3FFF, 0xEAC0C6E7DD243930),
+ make_floatx80_init(0x3FFF, 0xED4F301ED9942B84),
+ make_floatx80_init(0x3FFF, 0xEFE4B99BDCDAF5CC),
+ make_floatx80_init(0x3FFF, 0xF281773C59FFB138),
+ make_floatx80_init(0x3FFF, 0xF5257D152486CC2C),
+ make_floatx80_init(0x3FFF, 0xF7D0DF730AD13BB8),
+ make_floatx80_init(0x3FFF, 0xFA83B2DB722A033C),
+ make_floatx80_init(0x3FFF, 0xFD3E0C0CF486C174)
+};
+
+static const float32 exp_tbl2[64] = {
+ const_float32(0x00000000),
+ const_float32(0x9F841A9B),
+ const_float32(0x9FC1D5B9),
+ const_float32(0xA0728369),
+ const_float32(0x1FC5C95C),
+ const_float32(0x1EE85C9F),
+ const_float32(0x9FA20729),
+ const_float32(0xA07BF9AF),
+ const_float32(0xA0020DCF),
+ const_float32(0x205A63DA),
+ const_float32(0x1EB70051),
+ const_float32(0x1F6EB029),
+ const_float32(0xA0781494),
+ const_float32(0x9EB319B0),
+ const_float32(0x2017457D),
+ const_float32(0x1F11D537),
+ const_float32(0x9FB952DD),
+ const_float32(0x1FE43087),
+ const_float32(0x1FA2A818),
+ const_float32(0x1FDE494D),
+ const_float32(0x20504890),
+ const_float32(0xA073691C),
+ const_float32(0x1F9B7A05),
+ const_float32(0xA0797126),
+ const_float32(0xA071A140),
+ const_float32(0x204F62DA),
+ const_float32(0x1F283C4A),
+ const_float32(0x9F9A7FDC),
+ const_float32(0xA05B3FAC),
+ const_float32(0x1FDF2610),
+ const_float32(0x9F705F90),
+ const_float32(0x201F678A),
+ const_float32(0x1F32FB13),
+ const_float32(0x20038B30),
+ const_float32(0x200DC3CC),
+ const_float32(0x9F8B2AE6),
+ const_float32(0xA02BBF70),
+ const_float32(0xA00BF518),
+ const_float32(0xA041DD41),
+ const_float32(0x9FDF137B),
+ const_float32(0x201F1568),
+ const_float32(0x1FC13A2E),
+ const_float32(0xA03F8F03),
+ const_float32(0x1FF4907D),
+ const_float32(0x9E6E53E4),
+ const_float32(0x1FD6D45C),
+ const_float32(0xA076EDB9),
+ const_float32(0x9FA6DE21),
+ const_float32(0x1EE69A2F),
+ const_float32(0x207F439F),
+ const_float32(0x201EC207),
+ const_float32(0x9E8BE175),
+ const_float32(0x20032C4B),
+ const_float32(0x2004DFF5),
+ const_float32(0x1E72F47A),
+ const_float32(0x1F722F22),
+ const_float32(0xA017E945),
+ const_float32(0x1F401A5B),
+ const_float32(0x9FB9A9E3),
+ const_float32(0x20744C05),
+ const_float32(0x1F773A19),
+ const_float32(0x1FFE90D5),
+ const_float32(0xA041ED22),
+ const_float32(0x1F853F3A),
+};
+
+static const floatx80 exp2_tbl[64] = {
+ make_floatx80_init(0x3FFF, 0x8000000000000000),
+ make_floatx80_init(0x3FFF, 0x8164D1F3BC030773),
+ make_floatx80_init(0x3FFF, 0x82CD8698AC2BA1D7),
+ make_floatx80_init(0x3FFF, 0x843A28C3ACDE4046),
+ make_floatx80_init(0x3FFF, 0x85AAC367CC487B15),
+ make_floatx80_init(0x3FFF, 0x871F61969E8D1010),
+ make_floatx80_init(0x3FFF, 0x88980E8092DA8527),
+ make_floatx80_init(0x3FFF, 0x8A14D575496EFD9A),
+ make_floatx80_init(0x3FFF, 0x8B95C1E3EA8BD6E7),
+ make_floatx80_init(0x3FFF, 0x8D1ADF5B7E5BA9E6),
+ make_floatx80_init(0x3FFF, 0x8EA4398B45CD53C0),
+ make_floatx80_init(0x3FFF, 0x9031DC431466B1DC),
+ make_floatx80_init(0x3FFF, 0x91C3D373AB11C336),
+ make_floatx80_init(0x3FFF, 0x935A2B2F13E6E92C),
+ make_floatx80_init(0x3FFF, 0x94F4EFA8FEF70961),
+ make_floatx80_init(0x3FFF, 0x96942D3720185A00),
+ make_floatx80_init(0x3FFF, 0x9837F0518DB8A96F),
+ make_floatx80_init(0x3FFF, 0x99E0459320B7FA65),
+ make_floatx80_init(0x3FFF, 0x9B8D39B9D54E5539),
+ make_floatx80_init(0x3FFF, 0x9D3ED9A72CFFB751),
+ make_floatx80_init(0x3FFF, 0x9EF5326091A111AE),
+ make_floatx80_init(0x3FFF, 0xA0B0510FB9714FC2),
+ make_floatx80_init(0x3FFF, 0xA27043030C496819),
+ make_floatx80_init(0x3FFF, 0xA43515AE09E6809E),
+ make_floatx80_init(0x3FFF, 0xA5FED6A9B15138EA),
+ make_floatx80_init(0x3FFF, 0xA7CD93B4E965356A),
+ make_floatx80_init(0x3FFF, 0xA9A15AB4EA7C0EF8),
+ make_floatx80_init(0x3FFF, 0xAB7A39B5A93ED337),
+ make_floatx80_init(0x3FFF, 0xAD583EEA42A14AC6),
+ make_floatx80_init(0x3FFF, 0xAF3B78AD690A4375),
+ make_floatx80_init(0x3FFF, 0xB123F581D2AC2590),
+ make_floatx80_init(0x3FFF, 0xB311C412A9112489),
+ make_floatx80_init(0x3FFF, 0xB504F333F9DE6484),
+ make_floatx80_init(0x3FFF, 0xB6FD91E328D17791),
+ make_floatx80_init(0x3FFF, 0xB8FBAF4762FB9EE9),
+ make_floatx80_init(0x3FFF, 0xBAFF5AB2133E45FB),
+ make_floatx80_init(0x3FFF, 0xBD08A39F580C36BF),
+ make_floatx80_init(0x3FFF, 0xBF1799B67A731083),
+ make_floatx80_init(0x3FFF, 0xC12C4CCA66709456),
+ make_floatx80_init(0x3FFF, 0xC346CCDA24976407),
+ make_floatx80_init(0x3FFF, 0xC5672A115506DADD),
+ make_floatx80_init(0x3FFF, 0xC78D74C8ABB9B15D),
+ make_floatx80_init(0x3FFF, 0xC9B9BD866E2F27A3),
+ make_floatx80_init(0x3FFF, 0xCBEC14FEF2727C5D),
+ make_floatx80_init(0x3FFF, 0xCE248C151F8480E4),
+ make_floatx80_init(0x3FFF, 0xD06333DAEF2B2595),
+ make_floatx80_init(0x3FFF, 0xD2A81D91F12AE45A),
+ make_floatx80_init(0x3FFF, 0xD4F35AABCFEDFA1F),
+ make_floatx80_init(0x3FFF, 0xD744FCCAD69D6AF4),
+ make_floatx80_init(0x3FFF, 0xD99D15C278AFD7B6),
+ make_floatx80_init(0x3FFF, 0xDBFBB797DAF23755),
+ make_floatx80_init(0x3FFF, 0xDE60F4825E0E9124),
+ make_floatx80_init(0x3FFF, 0xE0CCDEEC2A94E111),
+ make_floatx80_init(0x3FFF, 0xE33F8972BE8A5A51),
+ make_floatx80_init(0x3FFF, 0xE5B906E77C8348A8),
+ make_floatx80_init(0x3FFF, 0xE8396A503C4BDC68),
+ make_floatx80_init(0x3FFF, 0xEAC0C6E7DD24392F),
+ make_floatx80_init(0x3FFF, 0xED4F301ED9942B84),
+ make_floatx80_init(0x3FFF, 0xEFE4B99BDCDAF5CB),
+ make_floatx80_init(0x3FFF, 0xF281773C59FFB13A),
+ make_floatx80_init(0x3FFF, 0xF5257D152486CC2C),
+ make_floatx80_init(0x3FFF, 0xF7D0DF730AD13BB9),
+ make_floatx80_init(0x3FFF, 0xFA83B2DB722A033A),
+ make_floatx80_init(0x3FFF, 0xFD3E0C0CF486C175)
+};
+
+static const uint32_t exp2_tbl2[64] = {
+ 0x3F738000, 0x3FBEF7CA, 0x3FBDF8A9, 0x3FBCD7C9,
+ 0xBFBDE8DA, 0x3FBDE85C, 0x3FBEBBF1, 0x3FBB80CA,
+ 0xBFBA8373, 0xBFBE9670, 0x3FBDB700, 0x3FBEEEB0,
+ 0x3FBBFD6D, 0xBFBDB319, 0x3FBDBA2B, 0x3FBE91D5,
+ 0x3FBE8D5A, 0xBFBCDE7B, 0xBFBEBAAF, 0xBFBD86DA,
+ 0xBFBEBEDD, 0x3FBCC96E, 0xBFBEC90B, 0x3FBBD1DB,
+ 0x3FBCE5EB, 0xBFBEC274, 0x3FBEA83C, 0x3FBECB00,
+ 0x3FBE9301, 0xBFBD8367, 0xBFBEF05F, 0x3FBDFB3C,
+ 0x3FBEB2FB, 0x3FBAE2CB, 0x3FBCDC3C, 0x3FBEE9AA,
+ 0xBFBEAEFD, 0xBFBCBF51, 0x3FBEF88A, 0x3FBD83B2,
+ 0x3FBDF8AB, 0xBFBDFB17, 0xBFBEFE3C, 0xBFBBB6F8,
+ 0xBFBCEE53, 0xBFBDA4AE, 0x3FBC9124, 0x3FBEB243,
+ 0x3FBDE69A, 0xBFB8BC61, 0x3FBDF610, 0xBFBD8BE1,
+ 0x3FBACB12, 0x3FBB9BFE, 0x3FBCF2F4, 0x3FBEF22F,
+ 0xBFBDBF4A, 0x3FBEC01A, 0x3FBE8CAC, 0xBFBCBB3F,
+ 0x3FBEF73A, 0xBFB8B795, 0x3FBEF84B, 0xBFBEF581
+};
+
+static const floatx80 pi_tbl[65] = {
+ make_floatx80_init(0xC004, 0xC90FDAA22168C235),
+ make_floatx80_init(0xC004, 0xC2C75BCD105D7C23),
+ make_floatx80_init(0xC004, 0xBC7EDCF7FF523611),
+ make_floatx80_init(0xC004, 0xB6365E22EE46F000),
+ make_floatx80_init(0xC004, 0xAFEDDF4DDD3BA9EE),
+ make_floatx80_init(0xC004, 0xA9A56078CC3063DD),
+ make_floatx80_init(0xC004, 0xA35CE1A3BB251DCB),
+ make_floatx80_init(0xC004, 0x9D1462CEAA19D7B9),
+ make_floatx80_init(0xC004, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0xC004, 0x9083652488034B96),
+ make_floatx80_init(0xC004, 0x8A3AE64F76F80584),
+ make_floatx80_init(0xC004, 0x83F2677A65ECBF73),
+ make_floatx80_init(0xC003, 0xFB53D14AA9C2F2C2),
+ make_floatx80_init(0xC003, 0xEEC2D3A087AC669F),
+ make_floatx80_init(0xC003, 0xE231D5F66595DA7B),
+ make_floatx80_init(0xC003, 0xD5A0D84C437F4E58),
+ make_floatx80_init(0xC003, 0xC90FDAA22168C235),
+ make_floatx80_init(0xC003, 0xBC7EDCF7FF523611),
+ make_floatx80_init(0xC003, 0xAFEDDF4DDD3BA9EE),
+ make_floatx80_init(0xC003, 0xA35CE1A3BB251DCB),
+ make_floatx80_init(0xC003, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0xC003, 0x8A3AE64F76F80584),
+ make_floatx80_init(0xC002, 0xFB53D14AA9C2F2C2),
+ make_floatx80_init(0xC002, 0xE231D5F66595DA7B),
+ make_floatx80_init(0xC002, 0xC90FDAA22168C235),
+ make_floatx80_init(0xC002, 0xAFEDDF4DDD3BA9EE),
+ make_floatx80_init(0xC002, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0xC001, 0xFB53D14AA9C2F2C2),
+ make_floatx80_init(0xC001, 0xC90FDAA22168C235),
+ make_floatx80_init(0xC001, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0xC000, 0xC90FDAA22168C235),
+ make_floatx80_init(0xBFFF, 0xC90FDAA22168C235),
+ make_floatx80_init(0x0000, 0x0000000000000000),
+ make_floatx80_init(0x3FFF, 0xC90FDAA22168C235),
+ make_floatx80_init(0x4000, 0xC90FDAA22168C235),
+ make_floatx80_init(0x4001, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0x4001, 0xC90FDAA22168C235),
+ make_floatx80_init(0x4001, 0xFB53D14AA9C2F2C2),
+ make_floatx80_init(0x4002, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0x4002, 0xAFEDDF4DDD3BA9EE),
+ make_floatx80_init(0x4002, 0xC90FDAA22168C235),
+ make_floatx80_init(0x4002, 0xE231D5F66595DA7B),
+ make_floatx80_init(0x4002, 0xFB53D14AA9C2F2C2),
+ make_floatx80_init(0x4003, 0x8A3AE64F76F80584),
+ make_floatx80_init(0x4003, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0x4003, 0xA35CE1A3BB251DCB),
+ make_floatx80_init(0x4003, 0xAFEDDF4DDD3BA9EE),
+ make_floatx80_init(0x4003, 0xBC7EDCF7FF523611),
+ make_floatx80_init(0x4003, 0xC90FDAA22168C235),
+ make_floatx80_init(0x4003, 0xD5A0D84C437F4E58),
+ make_floatx80_init(0x4003, 0xE231D5F66595DA7B),
+ make_floatx80_init(0x4003, 0xEEC2D3A087AC669F),
+ make_floatx80_init(0x4003, 0xFB53D14AA9C2F2C2),
+ make_floatx80_init(0x4004, 0x83F2677A65ECBF73),
+ make_floatx80_init(0x4004, 0x8A3AE64F76F80584),
+ make_floatx80_init(0x4004, 0x9083652488034B96),
+ make_floatx80_init(0x4004, 0x96CBE3F9990E91A8),
+ make_floatx80_init(0x4004, 0x9D1462CEAA19D7B9),
+ make_floatx80_init(0x4004, 0xA35CE1A3BB251DCB),
+ make_floatx80_init(0x4004, 0xA9A56078CC3063DD),
+ make_floatx80_init(0x4004, 0xAFEDDF4DDD3BA9EE),
+ make_floatx80_init(0x4004, 0xB6365E22EE46F000),
+ make_floatx80_init(0x4004, 0xBC7EDCF7FF523611),
+ make_floatx80_init(0x4004, 0xC2C75BCD105D7C23),
+ make_floatx80_init(0x4004, 0xC90FDAA22168C235)
+};
+
+static const float32 pi_tbl2[65] = {
+ const_float32(0x21800000),
+ const_float32(0xA0D00000),
+ const_float32(0xA1E80000),
+ const_float32(0x21480000),
+ const_float32(0xA1200000),
+ const_float32(0x21FC0000),
+ const_float32(0x21100000),
+ const_float32(0xA1580000),
+ const_float32(0x21E00000),
+ const_float32(0x20B00000),
+ const_float32(0xA1880000),
+ const_float32(0x21C40000),
+ const_float32(0x20000000),
+ const_float32(0x21380000),
+ const_float32(0xA1300000),
+ const_float32(0x9FC00000),
+ const_float32(0x21000000),
+ const_float32(0xA1680000),
+ const_float32(0xA0A00000),
+ const_float32(0x20900000),
+ const_float32(0x21600000),
+ const_float32(0xA1080000),
+ const_float32(0x1F800000),
+ const_float32(0xA0B00000),
+ const_float32(0x20800000),
+ const_float32(0xA0200000),
+ const_float32(0x20E00000),
+ const_float32(0x1F000000),
+ const_float32(0x20000000),
+ const_float32(0x20600000),
+ const_float32(0x1F800000),
+ const_float32(0x1F000000),
+ const_float32(0x00000000),
+ const_float32(0x9F000000),
+ const_float32(0x9F800000),
+ const_float32(0xA0600000),
+ const_float32(0xA0000000),
+ const_float32(0x9F000000),
+ const_float32(0xA0E00000),
+ const_float32(0x20200000),
+ const_float32(0xA0800000),
+ const_float32(0x20B00000),
+ const_float32(0x9F800000),
+ const_float32(0x21080000),
+ const_float32(0xA1600000),
+ const_float32(0xA0900000),
+ const_float32(0x20A00000),
+ const_float32(0x21680000),
+ const_float32(0xA1000000),
+ const_float32(0x1FC00000),
+ const_float32(0x21300000),
+ const_float32(0xA1380000),
+ const_float32(0xA0000000),
+ const_float32(0xA1C40000),
+ const_float32(0x21880000),
+ const_float32(0xA0B00000),
+ const_float32(0xA1E00000),
+ const_float32(0x21580000),
+ const_float32(0xA1100000),
+ const_float32(0xA1FC0000),
+ const_float32(0x21200000),
+ const_float32(0xA1480000),
+ const_float32(0x21E80000),
+ const_float32(0x20D00000),
+ const_float32(0xA1800000),
+};
+
+static const floatx80 atan_tbl[128] = {
+ make_floatx80_init(0x3FFB, 0x83D152C5060B7A51),
+ make_floatx80_init(0x3FFB, 0x8BC8544565498B8B),
+ make_floatx80_init(0x3FFB, 0x93BE406017626B0D),
+ make_floatx80_init(0x3FFB, 0x9BB3078D35AEC202),
+ make_floatx80_init(0x3FFB, 0xA3A69A525DDCE7DE),
+ make_floatx80_init(0x3FFB, 0xAB98E94362765619),
+ make_floatx80_init(0x3FFB, 0xB389E502F9C59862),
+ make_floatx80_init(0x3FFB, 0xBB797E436B09E6FB),
+ make_floatx80_init(0x3FFB, 0xC367A5C739E5F446),
+ make_floatx80_init(0x3FFB, 0xCB544C61CFF7D5C6),
+ make_floatx80_init(0x3FFB, 0xD33F62F82488533E),
+ make_floatx80_init(0x3FFB, 0xDB28DA8162404C77),
+ make_floatx80_init(0x3FFB, 0xE310A4078AD34F18),
+ make_floatx80_init(0x3FFB, 0xEAF6B0A8188EE1EB),
+ make_floatx80_init(0x3FFB, 0xF2DAF1949DBE79D5),
+ make_floatx80_init(0x3FFB, 0xFABD581361D47E3E),
+ make_floatx80_init(0x3FFC, 0x8346AC210959ECC4),
+ make_floatx80_init(0x3FFC, 0x8B232A08304282D8),
+ make_floatx80_init(0x3FFC, 0x92FB70B8D29AE2F9),
+ make_floatx80_init(0x3FFC, 0x9ACF476F5CCD1CB4),
+ make_floatx80_init(0x3FFC, 0xA29E76304954F23F),
+ make_floatx80_init(0x3FFC, 0xAA68C5D08AB85230),
+ make_floatx80_init(0x3FFC, 0xB22DFFFD9D539F83),
+ make_floatx80_init(0x3FFC, 0xB9EDEF453E900EA5),
+ make_floatx80_init(0x3FFC, 0xC1A85F1CC75E3EA5),
+ make_floatx80_init(0x3FFC, 0xC95D1BE828138DE6),
+ make_floatx80_init(0x3FFC, 0xD10BF300840D2DE4),
+ make_floatx80_init(0x3FFC, 0xD8B4B2BA6BC05E7A),
+ make_floatx80_init(0x3FFC, 0xE0572A6BB42335F6),
+ make_floatx80_init(0x3FFC, 0xE7F32A70EA9CAA8F),
+ make_floatx80_init(0x3FFC, 0xEF88843264ECEFAA),
+ make_floatx80_init(0x3FFC, 0xF7170A28ECC06666),
+ make_floatx80_init(0x3FFD, 0x812FD288332DAD32),
+ make_floatx80_init(0x3FFD, 0x88A8D1B1218E4D64),
+ make_floatx80_init(0x3FFD, 0x9012AB3F23E4AEE8),
+ make_floatx80_init(0x3FFD, 0x976CC3D411E7F1B9),
+ make_floatx80_init(0x3FFD, 0x9EB689493889A227),
+ make_floatx80_init(0x3FFD, 0xA5EF72C34487361B),
+ make_floatx80_init(0x3FFD, 0xAD1700BAF07A7227),
+ make_floatx80_init(0x3FFD, 0xB42CBCFAFD37EFB7),
+ make_floatx80_init(0x3FFD, 0xBB303A940BA80F89),
+ make_floatx80_init(0x3FFD, 0xC22115C6FCAEBBAF),
+ make_floatx80_init(0x3FFD, 0xC8FEF3E686331221),
+ make_floatx80_init(0x3FFD, 0xCFC98330B4000C70),
+ make_floatx80_init(0x3FFD, 0xD6807AA1102C5BF9),
+ make_floatx80_init(0x3FFD, 0xDD2399BC31252AA3),
+ make_floatx80_init(0x3FFD, 0xE3B2A8556B8FC517),
+ make_floatx80_init(0x3FFD, 0xEA2D764F64315989),
+ make_floatx80_init(0x3FFD, 0xF3BF5BF8BAD1A21D),
+ make_floatx80_init(0x3FFE, 0x801CE39E0D205C9A),
+ make_floatx80_init(0x3FFE, 0x8630A2DADA1ED066),
+ make_floatx80_init(0x3FFE, 0x8C1AD445F3E09B8C),
+ make_floatx80_init(0x3FFE, 0x91DB8F1664F350E2),
+ make_floatx80_init(0x3FFE, 0x97731420365E538C),
+ make_floatx80_init(0x3FFE, 0x9CE1C8E6A0B8CDBA),
+ make_floatx80_init(0x3FFE, 0xA22832DBCADAAE09),
+ make_floatx80_init(0x3FFE, 0xA746F2DDB7602294),
+ make_floatx80_init(0x3FFE, 0xAC3EC0FB997DD6A2),
+ make_floatx80_init(0x3FFE, 0xB110688AEBDC6F6A),
+ make_floatx80_init(0x3FFE, 0xB5BCC49059ECC4B0),
+ make_floatx80_init(0x3FFE, 0xBA44BC7DD470782F),
+ make_floatx80_init(0x3FFE, 0xBEA94144FD049AAC),
+ make_floatx80_init(0x3FFE, 0xC2EB4ABB661628B6),
+ make_floatx80_init(0x3FFE, 0xC70BD54CE602EE14),
+ make_floatx80_init(0x3FFE, 0xCD000549ADEC7159),
+ make_floatx80_init(0x3FFE, 0xD48457D2D8EA4EA3),
+ make_floatx80_init(0x3FFE, 0xDB948DA712DECE3B),
+ make_floatx80_init(0x3FFE, 0xE23855F969E8096A),
+ make_floatx80_init(0x3FFE, 0xE8771129C4353259),
+ make_floatx80_init(0x3FFE, 0xEE57C16E0D379C0D),
+ make_floatx80_init(0x3FFE, 0xF3E10211A87C3779),
+ make_floatx80_init(0x3FFE, 0xF919039D758B8D41),
+ make_floatx80_init(0x3FFE, 0xFE058B8F64935FB3),
+ make_floatx80_init(0x3FFF, 0x8155FB497B685D04),
+ make_floatx80_init(0x3FFF, 0x83889E3549D108E1),
+ make_floatx80_init(0x3FFF, 0x859CFA76511D724B),
+ make_floatx80_init(0x3FFF, 0x87952ECFFF8131E7),
+ make_floatx80_init(0x3FFF, 0x89732FD19557641B),
+ make_floatx80_init(0x3FFF, 0x8B38CAD101932A35),
+ make_floatx80_init(0x3FFF, 0x8CE7A8D8301EE6B5),
+ make_floatx80_init(0x3FFF, 0x8F46A39E2EAE5281),
+ make_floatx80_init(0x3FFF, 0x922DA7D791888487),
+ make_floatx80_init(0x3FFF, 0x94D19FCBDEDF5241),
+ make_floatx80_init(0x3FFF, 0x973AB94419D2A08B),
+ make_floatx80_init(0x3FFF, 0x996FF00E08E10B96),
+ make_floatx80_init(0x3FFF, 0x9B773F9512321DA7),
+ make_floatx80_init(0x3FFF, 0x9D55CC320F935624),
+ make_floatx80_init(0x3FFF, 0x9F100575006CC571),
+ make_floatx80_init(0x3FFF, 0xA0A9C290D97CC06C),
+ make_floatx80_init(0x3FFF, 0xA22659EBEBC0630A),
+ make_floatx80_init(0x3FFF, 0xA388B4AFF6EF0EC9),
+ make_floatx80_init(0x3FFF, 0xA4D35F1061D292C4),
+ make_floatx80_init(0x3FFF, 0xA60895DCFBE3187E),
+ make_floatx80_init(0x3FFF, 0xA72A51DC7367BEAC),
+ make_floatx80_init(0x3FFF, 0xA83A51530956168F),
+ make_floatx80_init(0x3FFF, 0xA93A20077539546E),
+ make_floatx80_init(0x3FFF, 0xAA9E7245023B2605),
+ make_floatx80_init(0x3FFF, 0xAC4C84BA6FE4D58F),
+ make_floatx80_init(0x3FFF, 0xADCE4A4A606B9712),
+ make_floatx80_init(0x3FFF, 0xAF2A2DCD8D263C9C),
+ make_floatx80_init(0x3FFF, 0xB0656F81F22265C7),
+ make_floatx80_init(0x3FFF, 0xB18465150F71496A),
+ make_floatx80_init(0x3FFF, 0xB28AAA156F9ADA35),
+ make_floatx80_init(0x3FFF, 0xB37B44FF3766B895),
+ make_floatx80_init(0x3FFF, 0xB458C3DCE9630433),
+ make_floatx80_init(0x3FFF, 0xB525529D562246BD),
+ make_floatx80_init(0x3FFF, 0xB5E2CCA95F9D88CC),
+ make_floatx80_init(0x3FFF, 0xB692CADA7ACA1ADA),
+ make_floatx80_init(0x3FFF, 0xB736AEA7A6925838),
+ make_floatx80_init(0x3FFF, 0xB7CFAB287E9F7B36),
+ make_floatx80_init(0x3FFF, 0xB85ECC66CB219835),
+ make_floatx80_init(0x3FFF, 0xB8E4FD5A20A593DA),
+ make_floatx80_init(0x3FFF, 0xB99F41F64AFF9BB5),
+ make_floatx80_init(0x3FFF, 0xBA7F1E17842BBE7B),
+ make_floatx80_init(0x3FFF, 0xBB4712857637E17D),
+ make_floatx80_init(0x3FFF, 0xBBFABE8A4788DF6F),
+ make_floatx80_init(0x3FFF, 0xBC9D0FAD2B689D79),
+ make_floatx80_init(0x3FFF, 0xBD306A39471ECD86),
+ make_floatx80_init(0x3FFF, 0xBDB6C731856AF18A),
+ make_floatx80_init(0x3FFF, 0xBE31CAC502E80D70),
+ make_floatx80_init(0x3FFF, 0xBEA2D55CE33194E2),
+ make_floatx80_init(0x3FFF, 0xBF0B10B7C03128F0),
+ make_floatx80_init(0x3FFF, 0xBF6B7A18DACB778D),
+ make_floatx80_init(0x3FFF, 0xBFC4EA4663FA18F6),
+ make_floatx80_init(0x3FFF, 0xC0181BDE8B89A454),
+ make_floatx80_init(0x3FFF, 0xC065B066CFBF6439),
+ make_floatx80_init(0x3FFF, 0xC0AE345F56340AE6),
+ make_floatx80_init(0x3FFF, 0xC0F222919CB9E6A7)
+};
+#endif
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
new file mode 100644
index 000000000..af43c8eab
--- /dev/null
+++ b/target/m68k/translate.c
@@ -0,0 +1,6373 @@
+/*
+ * m68k translation
+ *
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "qemu/log.h"
+#include "qemu/qemu-print.h"
+#include "exec/cpu_ldst.h"
+#include "exec/translator.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/log.h"
+#include "fpu/softfloat.h"
+
+
+//#define DEBUG_DISPATCH 1
+
+#define DEFO32(name, offset) static TCGv QREG_##name;
+#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
+#include "qregs.def"
+#undef DEFO32
+#undef DEFO64
+
+static TCGv_i32 cpu_halted;
+static TCGv_i32 cpu_exception_index;
+
+static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
+static TCGv cpu_dregs[8];
+static TCGv cpu_aregs[8];
+static TCGv_i64 cpu_macc[4];
+
+#define REG(insn, pos) (((insn) >> (pos)) & 7)
+#define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
+#define AREG(insn, pos) get_areg(s, REG(insn, pos))
+#define MACREG(acc) cpu_macc[acc]
+#define QREG_SP get_areg(s, 7)
+
+static TCGv NULL_QREG;
+#define IS_NULL_QREG(t) (t == NULL_QREG)
+/* Used to distinguish stores from bad addressing modes. */
+static TCGv store_dummy;
+
+#include "exec/gen-icount.h"
+
+void m68k_tcg_init(void)
+{
+ char *p;
+ int i;
+
+#define DEFO32(name, offset) \
+ QREG_##name = tcg_global_mem_new_i32(cpu_env, \
+ offsetof(CPUM68KState, offset), #name);
+#define DEFO64(name, offset) \
+ QREG_##name = tcg_global_mem_new_i64(cpu_env, \
+ offsetof(CPUM68KState, offset), #name);
+#include "qregs.def"
+#undef DEFO32
+#undef DEFO64
+
+ cpu_halted = tcg_global_mem_new_i32(cpu_env,
+ -offsetof(M68kCPU, env) +
+ offsetof(CPUState, halted), "HALTED");
+ cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
+ -offsetof(M68kCPU, env) +
+ offsetof(CPUState, exception_index),
+ "EXCEPTION");
+
+ p = cpu_reg_names;
+ for (i = 0; i < 8; i++) {
+ sprintf(p, "D%d", i);
+ cpu_dregs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUM68KState, dregs[i]), p);
+ p += 3;
+ sprintf(p, "A%d", i);
+ cpu_aregs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUM68KState, aregs[i]), p);
+ p += 3;
+ }
+ for (i = 0; i < 4; i++) {
+ sprintf(p, "ACC%d", i);
+ cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUM68KState, macc[i]), p);
+ p += 5;
+ }
+
+ NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
+ store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
+}
+
+/* internal defines */
+typedef struct DisasContext {
+ DisasContextBase base;
+ CPUM68KState *env;
+ target_ulong pc;
+ CCOp cc_op; /* Current CC operation */
+ int cc_op_synced;
+ TCGv_i64 mactmp;
+ int done_mac;
+ int writeback_mask;
+ TCGv writeback[8];
+#define MAX_TO_RELEASE 8
+ int release_count;
+ TCGv release[MAX_TO_RELEASE];
+ bool ss_active;
+} DisasContext;
+
+static void init_release_array(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ memset(s->release, 0, sizeof(s->release));
+#endif
+ s->release_count = 0;
+}
+
+static void do_release(DisasContext *s)
+{
+ int i;
+ for (i = 0; i < s->release_count; i++) {
+ tcg_temp_free(s->release[i]);
+ }
+ init_release_array(s);
+}
+
+static TCGv mark_to_release(DisasContext *s, TCGv tmp)
+{
+ g_assert(s->release_count < MAX_TO_RELEASE);
+ return s->release[s->release_count++] = tmp;
+}
+
+static TCGv get_areg(DisasContext *s, unsigned regno)
+{
+ if (s->writeback_mask & (1 << regno)) {
+ return s->writeback[regno];
+ } else {
+ return cpu_aregs[regno];
+ }
+}
+
+static void delay_set_areg(DisasContext *s, unsigned regno,
+ TCGv val, bool give_temp)
+{
+ if (s->writeback_mask & (1 << regno)) {
+ if (give_temp) {
+ tcg_temp_free(s->writeback[regno]);
+ s->writeback[regno] = val;
+ } else {
+ tcg_gen_mov_i32(s->writeback[regno], val);
+ }
+ } else {
+ s->writeback_mask |= 1 << regno;
+ if (give_temp) {
+ s->writeback[regno] = val;
+ } else {
+ TCGv tmp = tcg_temp_new();
+ s->writeback[regno] = tmp;
+ tcg_gen_mov_i32(tmp, val);
+ }
+ }
+}
+
+static void do_writebacks(DisasContext *s)
+{
+ unsigned mask = s->writeback_mask;
+ if (mask) {
+ s->writeback_mask = 0;
+ do {
+ unsigned regno = ctz32(mask);
+ tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
+ tcg_temp_free(s->writeback[regno]);
+ mask &= mask - 1;
+ } while (mask);
+ }
+}
+
+/* is_jmp field values */
+#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
+#define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(s) 1
+#else
+#define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
+#define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
+ MMU_KERNEL_IDX : MMU_USER_IDX)
+#define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
+ MMU_KERNEL_IDX : MMU_USER_IDX)
+#endif
+
+typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
+
+#ifdef DEBUG_DISPATCH
+#define DISAS_INSN(name) \
+ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn); \
+ static void disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn) \
+ { \
+ qemu_log("Dispatch " #name "\n"); \
+ real_disas_##name(env, s, insn); \
+ } \
+ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn)
+#else
+#define DISAS_INSN(name) \
+ static void disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn)
+#endif
+
+static const uint8_t cc_op_live[CC_OP_NB] = {
+ [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
+ [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
+ [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
+ [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
+ [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
+ [CC_OP_LOGIC] = CCF_X | CCF_N
+};
+
+static void set_cc_op(DisasContext *s, CCOp op)
+{
+ CCOp old_op = s->cc_op;
+ int dead;
+
+ if (old_op == op) {
+ return;
+ }
+ s->cc_op = op;
+ s->cc_op_synced = 0;
+
+ /*
+ * Discard CC computation that will no longer be used.
+ * Note that X and N are never dead.
+ */
+ dead = cc_op_live[old_op] & ~cc_op_live[op];
+ if (dead & CCF_C) {
+ tcg_gen_discard_i32(QREG_CC_C);
+ }
+ if (dead & CCF_Z) {
+ tcg_gen_discard_i32(QREG_CC_Z);
+ }
+ if (dead & CCF_V) {
+ tcg_gen_discard_i32(QREG_CC_V);
+ }
+}
+
+/* Update the CPU env CC_OP state. */
+static void update_cc_op(DisasContext *s)
+{
+ if (!s->cc_op_synced) {
+ s->cc_op_synced = 1;
+ tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
+ }
+}
+
+/* Generate a jump to an immediate address. */
+static void gen_jmp_im(DisasContext *s, uint32_t dest)
+{
+ update_cc_op(s);
+ tcg_gen_movi_i32(QREG_PC, dest);
+ s->base.is_jmp = DISAS_JUMP;
+}
+
+/* Generate a jump to the address in qreg DEST. */
+static void gen_jmp(DisasContext *s, TCGv dest)
+{
+ update_cc_op(s);
+ tcg_gen_mov_i32(QREG_PC, dest);
+ s->base.is_jmp = DISAS_JUMP;
+}
+
+static void gen_raise_exception(int nr)
+{
+ TCGv_i32 tmp;
+
+ tmp = tcg_const_i32(nr);
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_exception(DisasContext *s, uint32_t dest, int nr)
+{
+ update_cc_op(s);
+ tcg_gen_movi_i32(QREG_PC, dest);
+
+ gen_raise_exception(nr);
+
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+static inline void gen_addr_fault(DisasContext *s)
+{
+ gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
+}
+
+/*
+ * Generate a load from the specified address. Narrow values are
+ * sign extended to full register width.
+ */
+static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
+ int sign, int index)
+{
+ TCGv tmp;
+ tmp = tcg_temp_new_i32();
+ switch(opsize) {
+ case OS_BYTE:
+ if (sign)
+ tcg_gen_qemu_ld8s(tmp, addr, index);
+ else
+ tcg_gen_qemu_ld8u(tmp, addr, index);
+ break;
+ case OS_WORD:
+ if (sign)
+ tcg_gen_qemu_ld16s(tmp, addr, index);
+ else
+ tcg_gen_qemu_ld16u(tmp, addr, index);
+ break;
+ case OS_LONG:
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return tmp;
+}
+
+/* Generate a store. */
+static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
+ int index)
+{
+ switch(opsize) {
+ case OS_BYTE:
+ tcg_gen_qemu_st8(val, addr, index);
+ break;
+ case OS_WORD:
+ tcg_gen_qemu_st16(val, addr, index);
+ break;
+ case OS_LONG:
+ tcg_gen_qemu_st32(val, addr, index);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+typedef enum {
+ EA_STORE,
+ EA_LOADU,
+ EA_LOADS
+} ea_what;
+
+/*
+ * Generate an unsigned load if VAL is 0 a signed load if val is -1,
+ * otherwise generate a store.
+ */
+static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
+ ea_what what, int index)
+{
+ if (what == EA_STORE) {
+ gen_store(s, opsize, addr, val, index);
+ return store_dummy;
+ } else {
+ return mark_to_release(s, gen_load(s, opsize, addr,
+ what == EA_LOADS, index));
+ }
+}
+
+/* Read a 16-bit immediate constant */
+static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
+{
+ uint16_t im;
+ im = translator_lduw(env, &s->base, s->pc);
+ s->pc += 2;
+ return im;
+}
+
+/* Read an 8-bit immediate constant */
+static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
+{
+ return read_im16(env, s);
+}
+
+/* Read a 32-bit immediate constant. */
+static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
+{
+ uint32_t im;
+ im = read_im16(env, s) << 16;
+ im |= 0xffff & read_im16(env, s);
+ return im;
+}
+
+/* Read a 64-bit immediate constant. */
+static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
+{
+ uint64_t im;
+ im = (uint64_t)read_im32(env, s) << 32;
+ im |= (uint64_t)read_im32(env, s);
+ return im;
+}
+
+/* Calculate and address index. */
+static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
+{
+ TCGv add;
+ int scale;
+
+ add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
+ if ((ext & 0x800) == 0) {
+ tcg_gen_ext16s_i32(tmp, add);
+ add = tmp;
+ }
+ scale = (ext >> 9) & 3;
+ if (scale != 0) {
+ tcg_gen_shli_i32(tmp, add, scale);
+ add = tmp;
+ }
+ return add;
+}
+
+/*
+ * Handle a base + index + displacement effective address.
+ * A NULL_QREG base means pc-relative.
+ */
+static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
+{
+ uint32_t offset;
+ uint16_t ext;
+ TCGv add;
+ TCGv tmp;
+ uint32_t bd, od;
+
+ offset = s->pc;
+ ext = read_im16(env, s);
+
+ if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
+ return NULL_QREG;
+
+ if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
+ !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
+ ext &= ~(3 << 9);
+ }
+
+ if (ext & 0x100) {
+ /* full extension word format */
+ if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
+ return NULL_QREG;
+
+ if ((ext & 0x30) > 0x10) {
+ /* base displacement */
+ if ((ext & 0x30) == 0x20) {
+ bd = (int16_t)read_im16(env, s);
+ } else {
+ bd = read_im32(env, s);
+ }
+ } else {
+ bd = 0;
+ }
+ tmp = mark_to_release(s, tcg_temp_new());
+ if ((ext & 0x44) == 0) {
+ /* pre-index */
+ add = gen_addr_index(s, ext, tmp);
+ } else {
+ add = NULL_QREG;
+ }
+ if ((ext & 0x80) == 0) {
+ /* base not suppressed */
+ if (IS_NULL_QREG(base)) {
+ base = mark_to_release(s, tcg_const_i32(offset + bd));
+ bd = 0;
+ }
+ if (!IS_NULL_QREG(add)) {
+ tcg_gen_add_i32(tmp, add, base);
+ add = tmp;
+ } else {
+ add = base;
+ }
+ }
+ if (!IS_NULL_QREG(add)) {
+ if (bd != 0) {
+ tcg_gen_addi_i32(tmp, add, bd);
+ add = tmp;
+ }
+ } else {
+ add = mark_to_release(s, tcg_const_i32(bd));
+ }
+ if ((ext & 3) != 0) {
+ /* memory indirect */
+ base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s)));
+ if ((ext & 0x44) == 4) {
+ add = gen_addr_index(s, ext, tmp);
+ tcg_gen_add_i32(tmp, add, base);
+ add = tmp;
+ } else {
+ add = base;
+ }
+ if ((ext & 3) > 1) {
+ /* outer displacement */
+ if ((ext & 3) == 2) {
+ od = (int16_t)read_im16(env, s);
+ } else {
+ od = read_im32(env, s);
+ }
+ } else {
+ od = 0;
+ }
+ if (od != 0) {
+ tcg_gen_addi_i32(tmp, add, od);
+ add = tmp;
+ }
+ }
+ } else {
+ /* brief extension word format */
+ tmp = mark_to_release(s, tcg_temp_new());
+ add = gen_addr_index(s, ext, tmp);
+ if (!IS_NULL_QREG(base)) {
+ tcg_gen_add_i32(tmp, add, base);
+ if ((int8_t)ext)
+ tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
+ } else {
+ tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
+ }
+ add = tmp;
+ }
+ return add;
+}
+
+/* Sign or zero extend a value. */
+
+static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
+{
+ switch (opsize) {
+ case OS_BYTE:
+ if (sign) {
+ tcg_gen_ext8s_i32(res, val);
+ } else {
+ tcg_gen_ext8u_i32(res, val);
+ }
+ break;
+ case OS_WORD:
+ if (sign) {
+ tcg_gen_ext16s_i32(res, val);
+ } else {
+ tcg_gen_ext16u_i32(res, val);
+ }
+ break;
+ case OS_LONG:
+ tcg_gen_mov_i32(res, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Evaluate all the CC flags. */
+
+static void gen_flush_flags(DisasContext *s)
+{
+ TCGv t0, t1;
+
+ switch (s->cc_op) {
+ case CC_OP_FLAGS:
+ return;
+
+ case CC_OP_ADDB:
+ case CC_OP_ADDW:
+ case CC_OP_ADDL:
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ /* Compute signed overflow for addition. */
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
+ gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
+ tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
+ tcg_temp_free(t1);
+ break;
+
+ case CC_OP_SUBB:
+ case CC_OP_SUBW:
+ case CC_OP_SUBL:
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ /* Compute signed overflow for subtraction. */
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
+ gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
+ tcg_gen_xor_i32(t1, QREG_CC_N, t0);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
+ tcg_temp_free(t1);
+ break;
+
+ case CC_OP_CMPB:
+ case CC_OP_CMPW:
+ case CC_OP_CMPL:
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
+ tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
+ gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
+ /* Compute signed overflow for subtraction. */
+ t0 = tcg_temp_new();
+ tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
+ break;
+
+ case CC_OP_LOGIC:
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ tcg_gen_movi_i32(QREG_CC_C, 0);
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ break;
+
+ case CC_OP_DYNAMIC:
+ gen_helper_flush_flags(cpu_env, QREG_CC_OP);
+ s->cc_op_synced = 1;
+ break;
+
+ default:
+ t0 = tcg_const_i32(s->cc_op);
+ gen_helper_flush_flags(cpu_env, t0);
+ tcg_temp_free(t0);
+ s->cc_op_synced = 1;
+ break;
+ }
+
+ /* Note that flush_flags also assigned to env->cc_op. */
+ s->cc_op = CC_OP_FLAGS;
+}
+
+static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
+{
+ TCGv tmp;
+
+ if (opsize == OS_LONG) {
+ tmp = val;
+ } else {
+ tmp = mark_to_release(s, tcg_temp_new());
+ gen_ext(tmp, val, opsize, sign);
+ }
+
+ return tmp;
+}
+
+static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
+{
+ gen_ext(QREG_CC_N, val, opsize, 1);
+ set_cc_op(s, CC_OP_LOGIC);
+}
+
+static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
+{
+ tcg_gen_mov_i32(QREG_CC_N, dest);
+ tcg_gen_mov_i32(QREG_CC_V, src);
+ set_cc_op(s, CC_OP_CMPB + opsize);
+}
+
+static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
+{
+ gen_ext(QREG_CC_N, dest, opsize, 1);
+ tcg_gen_mov_i32(QREG_CC_V, src);
+}
+
+static inline int opsize_bytes(int opsize)
+{
+ switch (opsize) {
+ case OS_BYTE: return 1;
+ case OS_WORD: return 2;
+ case OS_LONG: return 4;
+ case OS_SINGLE: return 4;
+ case OS_DOUBLE: return 8;
+ case OS_EXTENDED: return 12;
+ case OS_PACKED: return 12;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int insn_opsize(int insn)
+{
+ switch ((insn >> 6) & 3) {
+ case 0: return OS_BYTE;
+ case 1: return OS_WORD;
+ case 2: return OS_LONG;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int ext_opsize(int ext, int pos)
+{
+ switch ((ext >> pos) & 7) {
+ case 0: return OS_LONG;
+ case 1: return OS_SINGLE;
+ case 2: return OS_EXTENDED;
+ case 3: return OS_PACKED;
+ case 4: return OS_WORD;
+ case 5: return OS_DOUBLE;
+ case 6: return OS_BYTE;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/*
+ * Assign value to a register. If the width is less than the register width
+ * only the low part of the register is set.
+ */
+static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
+{
+ TCGv tmp;
+ switch (opsize) {
+ case OS_BYTE:
+ tcg_gen_andi_i32(reg, reg, 0xffffff00);
+ tmp = tcg_temp_new();
+ tcg_gen_ext8u_i32(tmp, val);
+ tcg_gen_or_i32(reg, reg, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case OS_WORD:
+ tcg_gen_andi_i32(reg, reg, 0xffff0000);
+ tmp = tcg_temp_new();
+ tcg_gen_ext16u_i32(tmp, val);
+ tcg_gen_or_i32(reg, reg, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case OS_LONG:
+ case OS_SINGLE:
+ tcg_gen_mov_i32(reg, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/*
+ * Generate code for an "effective address". Does not adjust the base
+ * register for autoincrement addressing modes.
+ */
+static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
+ int mode, int reg0, int opsize)
+{
+ TCGv reg;
+ TCGv tmp;
+ uint16_t ext;
+ uint32_t offset;
+
+ switch (mode) {
+ case 0: /* Data register direct. */
+ case 1: /* Address register direct. */
+ return NULL_QREG;
+ case 3: /* Indirect postincrement. */
+ if (opsize == OS_UNSIZED) {
+ return NULL_QREG;
+ }
+ /* fallthru */
+ case 2: /* Indirect register */
+ return get_areg(s, reg0);
+ case 4: /* Indirect predecrememnt. */
+ if (opsize == OS_UNSIZED) {
+ return NULL_QREG;
+ }
+ reg = get_areg(s, reg0);
+ tmp = mark_to_release(s, tcg_temp_new());
+ if (reg0 == 7 && opsize == OS_BYTE &&
+ m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ tcg_gen_subi_i32(tmp, reg, 2);
+ } else {
+ tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
+ }
+ return tmp;
+ case 5: /* Indirect displacement. */
+ reg = get_areg(s, reg0);
+ tmp = mark_to_release(s, tcg_temp_new());
+ ext = read_im16(env, s);
+ tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
+ return tmp;
+ case 6: /* Indirect index + displacement. */
+ reg = get_areg(s, reg0);
+ return gen_lea_indexed(env, s, reg);
+ case 7: /* Other */
+ switch (reg0) {
+ case 0: /* Absolute short. */
+ offset = (int16_t)read_im16(env, s);
+ return mark_to_release(s, tcg_const_i32(offset));
+ case 1: /* Absolute long. */
+ offset = read_im32(env, s);
+ return mark_to_release(s, tcg_const_i32(offset));
+ case 2: /* pc displacement */
+ offset = s->pc;
+ offset += (int16_t)read_im16(env, s);
+ return mark_to_release(s, tcg_const_i32(offset));
+ case 3: /* pc index+displacement. */
+ return gen_lea_indexed(env, s, NULL_QREG);
+ case 4: /* Immediate. */
+ default:
+ return NULL_QREG;
+ }
+ }
+ /* Should never happen. */
+ return NULL_QREG;
+}
+
+static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize)
+{
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
+ return gen_lea_mode(env, s, mode, reg0, opsize);
+}
+
+/*
+ * Generate code to load/store a value from/into an EA. If WHAT > 0 this is
+ * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
+ * ADDRP is non-null for readwrite operands.
+ */
+static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
+ int opsize, TCGv val, TCGv *addrp, ea_what what,
+ int index)
+{
+ TCGv reg, tmp, result;
+ int32_t offset;
+
+ switch (mode) {
+ case 0: /* Data register direct. */
+ reg = cpu_dregs[reg0];
+ if (what == EA_STORE) {
+ gen_partset_reg(opsize, reg, val);
+ return store_dummy;
+ } else {
+ return gen_extend(s, reg, opsize, what == EA_LOADS);
+ }
+ case 1: /* Address register direct. */
+ reg = get_areg(s, reg0);
+ if (what == EA_STORE) {
+ tcg_gen_mov_i32(reg, val);
+ return store_dummy;
+ } else {
+ return gen_extend(s, reg, opsize, what == EA_LOADS);
+ }
+ case 2: /* Indirect register */
+ reg = get_areg(s, reg0);
+ return gen_ldst(s, opsize, reg, val, what, index);
+ case 3: /* Indirect postincrement. */
+ reg = get_areg(s, reg0);
+ result = gen_ldst(s, opsize, reg, val, what, index);
+ if (what == EA_STORE || !addrp) {
+ TCGv tmp = tcg_temp_new();
+ if (reg0 == 7 && opsize == OS_BYTE &&
+ m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ tcg_gen_addi_i32(tmp, reg, 2);
+ } else {
+ tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
+ }
+ delay_set_areg(s, reg0, tmp, true);
+ }
+ return result;
+ case 4: /* Indirect predecrememnt. */
+ if (addrp && what == EA_STORE) {
+ tmp = *addrp;
+ } else {
+ tmp = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(tmp)) {
+ return tmp;
+ }
+ if (addrp) {
+ *addrp = tmp;
+ }
+ }
+ result = gen_ldst(s, opsize, tmp, val, what, index);
+ if (what == EA_STORE || !addrp) {
+ delay_set_areg(s, reg0, tmp, false);
+ }
+ return result;
+ case 5: /* Indirect displacement. */
+ case 6: /* Indirect index + displacement. */
+ do_indirect:
+ if (addrp && what == EA_STORE) {
+ tmp = *addrp;
+ } else {
+ tmp = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(tmp)) {
+ return tmp;
+ }
+ if (addrp) {
+ *addrp = tmp;
+ }
+ }
+ return gen_ldst(s, opsize, tmp, val, what, index);
+ case 7: /* Other */
+ switch (reg0) {
+ case 0: /* Absolute short. */
+ case 1: /* Absolute long. */
+ case 2: /* pc displacement */
+ case 3: /* pc index+displacement. */
+ goto do_indirect;
+ case 4: /* Immediate. */
+ /* Sign extend values for consistency. */
+ switch (opsize) {
+ case OS_BYTE:
+ if (what == EA_LOADS) {
+ offset = (int8_t)read_im8(env, s);
+ } else {
+ offset = read_im8(env, s);
+ }
+ break;
+ case OS_WORD:
+ if (what == EA_LOADS) {
+ offset = (int16_t)read_im16(env, s);
+ } else {
+ offset = read_im16(env, s);
+ }
+ break;
+ case OS_LONG:
+ offset = read_im32(env, s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return mark_to_release(s, tcg_const_i32(offset));
+ default:
+ return NULL_QREG;
+ }
+ }
+ /* Should never happen. */
+ return NULL_QREG;
+}
+
+static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
+{
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
+ return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
+}
+
+static TCGv_ptr gen_fp_ptr(int freg)
+{
+ TCGv_ptr fp = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg]));
+ return fp;
+}
+
+static TCGv_ptr gen_fp_result_ptr(void)
+{
+ TCGv_ptr fp = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result));
+ return fp;
+}
+
+static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
+{
+ TCGv t32;
+ TCGv_i64 t64;
+
+ t32 = tcg_temp_new();
+ tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
+ tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
+ tcg_temp_free(t32);
+
+ t64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
+ tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
+ int index)
+{
+ TCGv tmp;
+ TCGv_i64 t64;
+
+ t64 = tcg_temp_new_i64();
+ tmp = tcg_temp_new();
+ switch (opsize) {
+ case OS_BYTE:
+ tcg_gen_qemu_ld8s(tmp, addr, index);
+ gen_helper_exts32(cpu_env, fp, tmp);
+ break;
+ case OS_WORD:
+ tcg_gen_qemu_ld16s(tmp, addr, index);
+ gen_helper_exts32(cpu_env, fp, tmp);
+ break;
+ case OS_LONG:
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ gen_helper_exts32(cpu_env, fp, tmp);
+ break;
+ case OS_SINGLE:
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ gen_helper_extf32(cpu_env, fp, tmp);
+ break;
+ case OS_DOUBLE:
+ tcg_gen_qemu_ld64(t64, addr, index);
+ gen_helper_extf64(cpu_env, fp, t64);
+ break;
+ case OS_EXTENDED:
+ if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
+ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
+ break;
+ }
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
+ tcg_gen_addi_i32(tmp, addr, 4);
+ tcg_gen_qemu_ld64(t64, tmp, index);
+ tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
+ break;
+ case OS_PACKED:
+ /*
+ * unimplemented data type on 68040/ColdFire
+ * FIXME if needed for another FPU
+ */
+ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free(tmp);
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
+ int index)
+{
+ TCGv tmp;
+ TCGv_i64 t64;
+
+ t64 = tcg_temp_new_i64();
+ tmp = tcg_temp_new();
+ switch (opsize) {
+ case OS_BYTE:
+ gen_helper_reds32(tmp, cpu_env, fp);
+ tcg_gen_qemu_st8(tmp, addr, index);
+ break;
+ case OS_WORD:
+ gen_helper_reds32(tmp, cpu_env, fp);
+ tcg_gen_qemu_st16(tmp, addr, index);
+ break;
+ case OS_LONG:
+ gen_helper_reds32(tmp, cpu_env, fp);
+ tcg_gen_qemu_st32(tmp, addr, index);
+ break;
+ case OS_SINGLE:
+ gen_helper_redf32(tmp, cpu_env, fp);
+ tcg_gen_qemu_st32(tmp, addr, index);
+ break;
+ case OS_DOUBLE:
+ gen_helper_redf64(t64, cpu_env, fp);
+ tcg_gen_qemu_st64(t64, addr, index);
+ break;
+ case OS_EXTENDED:
+ if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
+ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
+ break;
+ }
+ tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
+ tcg_gen_shli_i32(tmp, tmp, 16);
+ tcg_gen_qemu_st32(tmp, addr, index);
+ tcg_gen_addi_i32(tmp, addr, 4);
+ tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
+ tcg_gen_qemu_st64(t64, tmp, index);
+ break;
+ case OS_PACKED:
+ /*
+ * unimplemented data type on 68040/ColdFire
+ * FIXME if needed for another FPU
+ */
+ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free(tmp);
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
+ TCGv_ptr fp, ea_what what, int index)
+{
+ if (what == EA_STORE) {
+ gen_store_fp(s, opsize, addr, fp, index);
+ } else {
+ gen_load_fp(s, opsize, addr, fp, index);
+ }
+}
+
+static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
+ int reg0, int opsize, TCGv_ptr fp, ea_what what,
+ int index)
+{
+ TCGv reg, addr, tmp;
+ TCGv_i64 t64;
+
+ switch (mode) {
+ case 0: /* Data register direct. */
+ reg = cpu_dregs[reg0];
+ if (what == EA_STORE) {
+ switch (opsize) {
+ case OS_BYTE:
+ case OS_WORD:
+ case OS_LONG:
+ gen_helper_reds32(reg, cpu_env, fp);
+ break;
+ case OS_SINGLE:
+ gen_helper_redf32(reg, cpu_env, fp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ tmp = tcg_temp_new();
+ switch (opsize) {
+ case OS_BYTE:
+ tcg_gen_ext8s_i32(tmp, reg);
+ gen_helper_exts32(cpu_env, fp, tmp);
+ break;
+ case OS_WORD:
+ tcg_gen_ext16s_i32(tmp, reg);
+ gen_helper_exts32(cpu_env, fp, tmp);
+ break;
+ case OS_LONG:
+ gen_helper_exts32(cpu_env, fp, reg);
+ break;
+ case OS_SINGLE:
+ gen_helper_extf32(cpu_env, fp, reg);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free(tmp);
+ }
+ return 0;
+ case 1: /* Address register direct. */
+ return -1;
+ case 2: /* Indirect register */
+ addr = get_areg(s, reg0);
+ gen_ldst_fp(s, opsize, addr, fp, what, index);
+ return 0;
+ case 3: /* Indirect postincrement. */
+ addr = cpu_aregs[reg0];
+ gen_ldst_fp(s, opsize, addr, fp, what, index);
+ tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
+ return 0;
+ case 4: /* Indirect predecrememnt. */
+ addr = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(addr)) {
+ return -1;
+ }
+ gen_ldst_fp(s, opsize, addr, fp, what, index);
+ tcg_gen_mov_i32(cpu_aregs[reg0], addr);
+ return 0;
+ case 5: /* Indirect displacement. */
+ case 6: /* Indirect index + displacement. */
+ do_indirect:
+ addr = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(addr)) {
+ return -1;
+ }
+ gen_ldst_fp(s, opsize, addr, fp, what, index);
+ return 0;
+ case 7: /* Other */
+ switch (reg0) {
+ case 0: /* Absolute short. */
+ case 1: /* Absolute long. */
+ case 2: /* pc displacement */
+ case 3: /* pc index+displacement. */
+ goto do_indirect;
+ case 4: /* Immediate. */
+ if (what == EA_STORE) {
+ return -1;
+ }
+ switch (opsize) {
+ case OS_BYTE:
+ tmp = tcg_const_i32((int8_t)read_im8(env, s));
+ gen_helper_exts32(cpu_env, fp, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case OS_WORD:
+ tmp = tcg_const_i32((int16_t)read_im16(env, s));
+ gen_helper_exts32(cpu_env, fp, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case OS_LONG:
+ tmp = tcg_const_i32(read_im32(env, s));
+ gen_helper_exts32(cpu_env, fp, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case OS_SINGLE:
+ tmp = tcg_const_i32(read_im32(env, s));
+ gen_helper_extf32(cpu_env, fp, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case OS_DOUBLE:
+ t64 = tcg_const_i64(read_im64(env, s));
+ gen_helper_extf64(cpu_env, fp, t64);
+ tcg_temp_free_i64(t64);
+ break;
+ case OS_EXTENDED:
+ if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
+ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
+ break;
+ }
+ tmp = tcg_const_i32(read_im32(env, s) >> 16);
+ tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
+ tcg_temp_free(tmp);
+ t64 = tcg_const_i64(read_im64(env, s));
+ tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
+ tcg_temp_free_i64(t64);
+ break;
+ case OS_PACKED:
+ /*
+ * unimplemented data type on 68040/ColdFire
+ * FIXME if needed for another FPU
+ */
+ gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ }
+ return -1;
+}
+
+static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize, TCGv_ptr fp, ea_what what, int index)
+{
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
+ return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
+}
+
+typedef struct {
+ TCGCond tcond;
+ bool g1;
+ bool g2;
+ TCGv v1;
+ TCGv v2;
+} DisasCompare;
+
+static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
+{
+ TCGv tmp, tmp2;
+ TCGCond tcond;
+ CCOp op = s->cc_op;
+
+ /* The CC_OP_CMP form can handle most normal comparisons directly. */
+ if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
+ c->g1 = c->g2 = 1;
+ c->v1 = QREG_CC_N;
+ c->v2 = QREG_CC_V;
+ switch (cond) {
+ case 2: /* HI */
+ case 3: /* LS */
+ tcond = TCG_COND_LEU;
+ goto done;
+ case 4: /* CC */
+ case 5: /* CS */
+ tcond = TCG_COND_LTU;
+ goto done;
+ case 6: /* NE */
+ case 7: /* EQ */
+ tcond = TCG_COND_EQ;
+ goto done;
+ case 10: /* PL */
+ case 11: /* MI */
+ c->g1 = c->g2 = 0;
+ c->v2 = tcg_const_i32(0);
+ c->v1 = tmp = tcg_temp_new();
+ tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
+ gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
+ /* fallthru */
+ case 12: /* GE */
+ case 13: /* LT */
+ tcond = TCG_COND_LT;
+ goto done;
+ case 14: /* GT */
+ case 15: /* LE */
+ tcond = TCG_COND_LE;
+ goto done;
+ }
+ }
+
+ c->g1 = 1;
+ c->g2 = 0;
+ c->v2 = tcg_const_i32(0);
+
+ switch (cond) {
+ case 0: /* T */
+ case 1: /* F */
+ c->v1 = c->v2;
+ tcond = TCG_COND_NEVER;
+ goto done;
+ case 14: /* GT (!(Z || (N ^ V))) */
+ case 15: /* LE (Z || (N ^ V)) */
+ /*
+ * Logic operations clear V, which simplifies LE to (Z || N),
+ * and since Z and N are co-located, this becomes a normal
+ * comparison vs N.
+ */
+ if (op == CC_OP_LOGIC) {
+ c->v1 = QREG_CC_N;
+ tcond = TCG_COND_LE;
+ goto done;
+ }
+ break;
+ case 12: /* GE (!(N ^ V)) */
+ case 13: /* LT (N ^ V) */
+ /* Logic operations clear V, which simplifies this to N. */
+ if (op != CC_OP_LOGIC) {
+ break;
+ }
+ /* fallthru */
+ case 10: /* PL (!N) */
+ case 11: /* MI (N) */
+ /* Several cases represent N normally. */
+ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
+ op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
+ op == CC_OP_LOGIC) {
+ c->v1 = QREG_CC_N;
+ tcond = TCG_COND_LT;
+ goto done;
+ }
+ break;
+ case 6: /* NE (!Z) */
+ case 7: /* EQ (Z) */
+ /* Some cases fold Z into N. */
+ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
+ op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
+ op == CC_OP_LOGIC) {
+ tcond = TCG_COND_EQ;
+ c->v1 = QREG_CC_N;
+ goto done;
+ }
+ break;
+ case 4: /* CC (!C) */
+ case 5: /* CS (C) */
+ /* Some cases fold C into X. */
+ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
+ op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
+ tcond = TCG_COND_NE;
+ c->v1 = QREG_CC_X;
+ goto done;
+ }
+ /* fallthru */
+ case 8: /* VC (!V) */
+ case 9: /* VS (V) */
+ /* Logic operations clear V and C. */
+ if (op == CC_OP_LOGIC) {
+ tcond = TCG_COND_NEVER;
+ c->v1 = c->v2;
+ goto done;
+ }
+ break;
+ }
+
+ /* Otherwise, flush flag state to CC_OP_FLAGS. */
+ gen_flush_flags(s);
+
+ switch (cond) {
+ case 0: /* T */
+ case 1: /* F */
+ default:
+ /* Invalid, or handled above. */
+ abort();
+ case 2: /* HI (!C && !Z) -> !(C || Z)*/
+ case 3: /* LS (C || Z) */
+ c->v1 = tmp = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
+ tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
+ tcond = TCG_COND_NE;
+ break;
+ case 4: /* CC (!C) */
+ case 5: /* CS (C) */
+ c->v1 = QREG_CC_C;
+ tcond = TCG_COND_NE;
+ break;
+ case 6: /* NE (!Z) */
+ case 7: /* EQ (Z) */
+ c->v1 = QREG_CC_Z;
+ tcond = TCG_COND_EQ;
+ break;
+ case 8: /* VC (!V) */
+ case 9: /* VS (V) */
+ c->v1 = QREG_CC_V;
+ tcond = TCG_COND_LT;
+ break;
+ case 10: /* PL (!N) */
+ case 11: /* MI (N) */
+ c->v1 = QREG_CC_N;
+ tcond = TCG_COND_LT;
+ break;
+ case 12: /* GE (!(N ^ V)) */
+ case 13: /* LT (N ^ V) */
+ c->v1 = tmp = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
+ tcond = TCG_COND_LT;
+ break;
+ case 14: /* GT (!(Z || (N ^ V))) */
+ case 15: /* LE (Z || (N ^ V)) */
+ c->v1 = tmp = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
+ tcg_gen_neg_i32(tmp, tmp);
+ tmp2 = tcg_temp_new();
+ tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free(tmp2);
+ tcond = TCG_COND_LT;
+ break;
+ }
+
+ done:
+ if ((cond & 1) == 0) {
+ tcond = tcg_invert_cond(tcond);
+ }
+ c->tcond = tcond;
+}
+
+static void free_cond(DisasCompare *c)
+{
+ if (!c->g1) {
+ tcg_temp_free(c->v1);
+ }
+ if (!c->g2) {
+ tcg_temp_free(c->v2);
+ }
+}
+
+static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
+{
+ DisasCompare c;
+
+ gen_cc_cond(&c, s, cond);
+ update_cc_op(s);
+ tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
+ free_cond(&c);
+}
+
+/* Force a TB lookup after an instruction that changes the CPU state. */
+static void gen_exit_tb(DisasContext *s)
+{
+ update_cc_op(s);
+ tcg_gen_movi_i32(QREG_PC, s->pc);
+ s->base.is_jmp = DISAS_EXIT;
+}
+
+#define SRC_EA(env, result, opsize, op_sign, addrp) do { \
+ result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
+ op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
+ if (IS_NULL_QREG(result)) { \
+ gen_addr_fault(s); \
+ return; \
+ } \
+ } while (0)
+
+#define DEST_EA(env, insn, opsize, val, addrp) do { \
+ TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
+ EA_STORE, IS_USER(s)); \
+ if (IS_NULL_QREG(ea_result)) { \
+ gen_addr_fault(s); \
+ return; \
+ } \
+ } while (0)
+
+/* Generate a jump to an immediate address. */
+static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
+{
+ if (unlikely(s->ss_active)) {
+ update_cc_op(s);
+ tcg_gen_movi_i32(QREG_PC, dest);
+ gen_raise_exception(EXCP_TRACE);
+ } else if (translator_use_goto_tb(&s->base, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(QREG_PC, dest);
+ tcg_gen_exit_tb(s->base.tb, n);
+ } else {
+ gen_jmp_im(s, dest);
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ s->base.is_jmp = DISAS_NORETURN;
+}
+
+DISAS_INSN(scc)
+{
+ DisasCompare c;
+ int cond;
+ TCGv tmp;
+
+ cond = (insn >> 8) & 0xf;
+ gen_cc_cond(&c, s, cond);
+
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
+ free_cond(&c);
+
+ tcg_gen_neg_i32(tmp, tmp);
+ DEST_EA(env, insn, OS_BYTE, tmp, NULL);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(dbcc)
+{
+ TCGLabel *l1;
+ TCGv reg;
+ TCGv tmp;
+ int16_t offset;
+ uint32_t base;
+
+ reg = DREG(insn, 0);
+ base = s->pc;
+ offset = (int16_t)read_im16(env, s);
+ l1 = gen_new_label();
+ gen_jmpcc(s, (insn >> 8) & 0xf, l1);
+
+ tmp = tcg_temp_new();
+ tcg_gen_ext16s_i32(tmp, reg);
+ tcg_gen_addi_i32(tmp, tmp, -1);
+ gen_partset_reg(OS_WORD, reg, tmp);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
+ gen_jmp_tb(s, 1, base + offset);
+ gen_set_label(l1);
+ gen_jmp_tb(s, 0, s->pc);
+}
+
+DISAS_INSN(undef_mac)
+{
+ gen_exception(s, s->base.pc_next, EXCP_LINEA);
+}
+
+DISAS_INSN(undef_fpu)
+{
+ gen_exception(s, s->base.pc_next, EXCP_LINEF);
+}
+
+DISAS_INSN(undef)
+{
+ /*
+ * ??? This is both instructions that are as yet unimplemented
+ * for the 680x0 series, as well as those that are implemented
+ * but actually illegal for CPU32 or pre-68020.
+ */
+ qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x\n",
+ insn, s->base.pc_next);
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+}
+
+DISAS_INSN(mulw)
+{
+ TCGv reg;
+ TCGv tmp;
+ TCGv src;
+ int sign;
+
+ sign = (insn & 0x100) != 0;
+ reg = DREG(insn, 9);
+ tmp = tcg_temp_new();
+ if (sign)
+ tcg_gen_ext16s_i32(tmp, reg);
+ else
+ tcg_gen_ext16u_i32(tmp, reg);
+ SRC_EA(env, src, OS_WORD, sign, NULL);
+ tcg_gen_mul_i32(tmp, tmp, src);
+ tcg_gen_mov_i32(reg, tmp);
+ gen_logic_cc(s, tmp, OS_LONG);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(divw)
+{
+ int sign;
+ TCGv src;
+ TCGv destr;
+
+ /* divX.w <EA>,Dn 32/16 -> 16r:16q */
+
+ sign = (insn & 0x100) != 0;
+
+ /* dest.l / src.w */
+
+ SRC_EA(env, src, OS_WORD, sign, NULL);
+ destr = tcg_const_i32(REG(insn, 9));
+ if (sign) {
+ gen_helper_divsw(cpu_env, destr, src);
+ } else {
+ gen_helper_divuw(cpu_env, destr, src);
+ }
+ tcg_temp_free(destr);
+
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(divl)
+{
+ TCGv num, reg, den;
+ int sign;
+ uint16_t ext;
+
+ ext = read_im16(env, s);
+
+ sign = (ext & 0x0800) != 0;
+
+ if (ext & 0x400) {
+ if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+
+ /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
+
+ SRC_EA(env, den, OS_LONG, 0, NULL);
+ num = tcg_const_i32(REG(ext, 12));
+ reg = tcg_const_i32(REG(ext, 0));
+ if (sign) {
+ gen_helper_divsll(cpu_env, num, reg, den);
+ } else {
+ gen_helper_divull(cpu_env, num, reg, den);
+ }
+ tcg_temp_free(reg);
+ tcg_temp_free(num);
+ set_cc_op(s, CC_OP_FLAGS);
+ return;
+ }
+
+ /* divX.l <EA>, Dq 32/32 -> 32q */
+ /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
+
+ SRC_EA(env, den, OS_LONG, 0, NULL);
+ num = tcg_const_i32(REG(ext, 12));
+ reg = tcg_const_i32(REG(ext, 0));
+ if (sign) {
+ gen_helper_divsl(cpu_env, num, reg, den);
+ } else {
+ gen_helper_divul(cpu_env, num, reg, den);
+ }
+ tcg_temp_free(reg);
+ tcg_temp_free(num);
+
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void bcd_add(TCGv dest, TCGv src)
+{
+ TCGv t0, t1;
+
+ /*
+ * dest10 = dest10 + src10 + X
+ *
+ * t1 = src
+ * t2 = t1 + 0x066
+ * t3 = t2 + dest + X
+ * t4 = t2 ^ dest
+ * t5 = t3 ^ t4
+ * t6 = ~t5 & 0x110
+ * t7 = (t6 >> 2) | (t6 >> 3)
+ * return t3 - t7
+ */
+
+ /*
+ * t1 = (src + 0x066) + dest + X
+ * = result with some possible exceeding 0x6
+ */
+
+ t0 = tcg_const_i32(0x066);
+ tcg_gen_add_i32(t0, t0, src);
+
+ t1 = tcg_temp_new();
+ tcg_gen_add_i32(t1, t0, dest);
+ tcg_gen_add_i32(t1, t1, QREG_CC_X);
+
+ /* we will remove exceeding 0x6 where there is no carry */
+
+ /*
+ * t0 = (src + 0x0066) ^ dest
+ * = t1 without carries
+ */
+
+ tcg_gen_xor_i32(t0, t0, dest);
+
+ /*
+ * extract the carries
+ * t0 = t0 ^ t1
+ * = only the carries
+ */
+
+ tcg_gen_xor_i32(t0, t0, t1);
+
+ /*
+ * generate 0x1 where there is no carry
+ * and for each 0x10, generate a 0x6
+ */
+
+ tcg_gen_shri_i32(t0, t0, 3);
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 0x22);
+ tcg_gen_add_i32(dest, t0, t0);
+ tcg_gen_add_i32(dest, dest, t0);
+ tcg_temp_free(t0);
+
+ /*
+ * remove the exceeding 0x6
+ * for digits that have not generated a carry
+ */
+
+ tcg_gen_sub_i32(dest, t1, dest);
+ tcg_temp_free(t1);
+}
+
+static void bcd_sub(TCGv dest, TCGv src)
+{
+ TCGv t0, t1, t2;
+
+ /*
+ * dest10 = dest10 - src10 - X
+ * = bcd_add(dest + 1 - X, 0x199 - src)
+ */
+
+ /* t0 = 0x066 + (0x199 - src) */
+
+ t0 = tcg_temp_new();
+ tcg_gen_subfi_i32(t0, 0x1ff, src);
+
+ /* t1 = t0 + dest + 1 - X*/
+
+ t1 = tcg_temp_new();
+ tcg_gen_add_i32(t1, t0, dest);
+ tcg_gen_addi_i32(t1, t1, 1);
+ tcg_gen_sub_i32(t1, t1, QREG_CC_X);
+
+ /* t2 = t0 ^ dest */
+
+ t2 = tcg_temp_new();
+ tcg_gen_xor_i32(t2, t0, dest);
+
+ /* t0 = t1 ^ t2 */
+
+ tcg_gen_xor_i32(t0, t1, t2);
+
+ /*
+ * t2 = ~t0 & 0x110
+ * t0 = (t2 >> 2) | (t2 >> 3)
+ *
+ * to fit on 8bit operands, changed in:
+ *
+ * t2 = ~(t0 >> 3) & 0x22
+ * t0 = t2 + t2
+ * t0 = t0 + t2
+ */
+
+ tcg_gen_shri_i32(t2, t0, 3);
+ tcg_gen_not_i32(t2, t2);
+ tcg_gen_andi_i32(t2, t2, 0x22);
+ tcg_gen_add_i32(t0, t2, t2);
+ tcg_gen_add_i32(t0, t0, t2);
+ tcg_temp_free(t2);
+
+ /* return t1 - t0 */
+
+ tcg_gen_sub_i32(dest, t1, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void bcd_flags(TCGv val)
+{
+ tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
+
+ tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
+
+ tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
+}
+
+DISAS_INSN(abcd_reg)
+{
+ TCGv src;
+ TCGv dest;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
+ dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
+ bcd_add(dest, src);
+ gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(abcd_mem)
+{
+ TCGv src, dest, addr;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ /* Indirect pre-decrement load (mode 4) */
+
+ src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
+ NULL_QREG, NULL, EA_LOADU, IS_USER(s));
+ dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
+ NULL_QREG, &addr, EA_LOADU, IS_USER(s));
+
+ bcd_add(dest, src);
+
+ gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
+ EA_STORE, IS_USER(s));
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(sbcd_reg)
+{
+ TCGv src, dest;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
+ dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
+
+ bcd_sub(dest, src);
+
+ gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(sbcd_mem)
+{
+ TCGv src, dest, addr;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ /* Indirect pre-decrement load (mode 4) */
+
+ src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
+ NULL_QREG, NULL, EA_LOADU, IS_USER(s));
+ dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
+ NULL_QREG, &addr, EA_LOADU, IS_USER(s));
+
+ bcd_sub(dest, src);
+
+ gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
+ EA_STORE, IS_USER(s));
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(nbcd)
+{
+ TCGv src, dest;
+ TCGv addr;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ SRC_EA(env, src, OS_BYTE, 0, &addr);
+
+ dest = tcg_const_i32(0);
+ bcd_sub(dest, src);
+
+ DEST_EA(env, insn, OS_BYTE, dest, &addr);
+
+ bcd_flags(dest);
+
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(addsub)
+{
+ TCGv reg;
+ TCGv dest;
+ TCGv src;
+ TCGv tmp;
+ TCGv addr;
+ int add;
+ int opsize;
+
+ add = (insn & 0x4000) != 0;
+ opsize = insn_opsize(insn);
+ reg = gen_extend(s, DREG(insn, 9), opsize, 1);
+ dest = tcg_temp_new();
+ if (insn & 0x100) {
+ SRC_EA(env, tmp, opsize, 1, &addr);
+ src = reg;
+ } else {
+ tmp = reg;
+ SRC_EA(env, src, opsize, 1, NULL);
+ }
+ if (add) {
+ tcg_gen_add_i32(dest, tmp, src);
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
+ set_cc_op(s, CC_OP_ADDB + opsize);
+ } else {
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
+ tcg_gen_sub_i32(dest, tmp, src);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ }
+ gen_update_cc_add(dest, src, opsize);
+ if (insn & 0x100) {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ } else {
+ gen_partset_reg(opsize, DREG(insn, 9), dest);
+ }
+ tcg_temp_free(dest);
+}
+
+/* Reverse the order of the bits in REG. */
+DISAS_INSN(bitrev)
+{
+ TCGv reg;
+ reg = DREG(insn, 0);
+ gen_helper_bitrev(reg, reg);
+}
+
+DISAS_INSN(bitop_reg)
+{
+ int opsize;
+ int op;
+ TCGv src1;
+ TCGv src2;
+ TCGv tmp;
+ TCGv addr;
+ TCGv dest;
+
+ if ((insn & 0x38) != 0)
+ opsize = OS_BYTE;
+ else
+ opsize = OS_LONG;
+ op = (insn >> 6) & 3;
+ SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
+
+ gen_flush_flags(s);
+ src2 = tcg_temp_new();
+ if (opsize == OS_BYTE)
+ tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
+ else
+ tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
+
+ tmp = tcg_const_i32(1);
+ tcg_gen_shl_i32(tmp, tmp, src2);
+ tcg_temp_free(src2);
+
+ tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
+
+ dest = tcg_temp_new();
+ switch (op) {
+ case 1: /* bchg */
+ tcg_gen_xor_i32(dest, src1, tmp);
+ break;
+ case 2: /* bclr */
+ tcg_gen_andc_i32(dest, src1, tmp);
+ break;
+ case 3: /* bset */
+ tcg_gen_or_i32(dest, src1, tmp);
+ break;
+ default: /* btst */
+ break;
+ }
+ tcg_temp_free(tmp);
+ if (op) {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ }
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(sats)
+{
+ TCGv reg;
+ reg = DREG(insn, 0);
+ gen_flush_flags(s);
+ gen_helper_sats(reg, reg, QREG_CC_V);
+ gen_logic_cc(s, reg, OS_LONG);
+}
+
+static void gen_push(DisasContext *s, TCGv val)
+{
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+ tcg_gen_subi_i32(tmp, QREG_SP, 4);
+ gen_store(s, OS_LONG, tmp, val, IS_USER(s));
+ tcg_gen_mov_i32(QREG_SP, tmp);
+ tcg_temp_free(tmp);
+}
+
+static TCGv mreg(int reg)
+{
+ if (reg < 8) {
+ /* Dx */
+ return cpu_dregs[reg];
+ }
+ /* Ax */
+ return cpu_aregs[reg & 7];
+}
+
+DISAS_INSN(movem)
+{
+ TCGv addr, incr, tmp, r[16];
+ int is_load = (insn & 0x0400) != 0;
+ int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
+ uint16_t mask = read_im16(env, s);
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
+ int i;
+
+ tmp = cpu_aregs[reg0];
+
+ switch (mode) {
+ case 0: /* data register direct */
+ case 1: /* addr register direct */
+ do_addr_fault:
+ gen_addr_fault(s);
+ return;
+
+ case 2: /* indirect */
+ break;
+
+ case 3: /* indirect post-increment */
+ if (!is_load) {
+ /* post-increment is not allowed */
+ goto do_addr_fault;
+ }
+ break;
+
+ case 4: /* indirect pre-decrement */
+ if (is_load) {
+ /* pre-decrement is not allowed */
+ goto do_addr_fault;
+ }
+ /*
+ * We want a bare copy of the address reg, without any pre-decrement
+ * adjustment, as gen_lea would provide.
+ */
+ break;
+
+ default:
+ tmp = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(tmp)) {
+ goto do_addr_fault;
+ }
+ break;
+ }
+
+ addr = tcg_temp_new();
+ tcg_gen_mov_i32(addr, tmp);
+ incr = tcg_const_i32(opsize_bytes(opsize));
+
+ if (is_load) {
+ /* memory to register */
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i)) {
+ r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
+ tcg_gen_add_i32(addr, addr, incr);
+ }
+ }
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i)) {
+ tcg_gen_mov_i32(mreg(i), r[i]);
+ tcg_temp_free(r[i]);
+ }
+ }
+ if (mode == 3) {
+ /* post-increment: movem (An)+,X */
+ tcg_gen_mov_i32(cpu_aregs[reg0], addr);
+ }
+ } else {
+ /* register to memory */
+ if (mode == 4) {
+ /* pre-decrement: movem X,-(An) */
+ for (i = 15; i >= 0; i--) {
+ if ((mask << i) & 0x8000) {
+ tcg_gen_sub_i32(addr, addr, incr);
+ if (reg0 + 8 == i &&
+ m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
+ /*
+ * M68020+: if the addressing register is the
+ * register moved to memory, the value written
+ * is the initial value decremented by the size of
+ * the operation, regardless of how many actual
+ * stores have been performed until this point.
+ * M68000/M68010: the value is the initial value.
+ */
+ tmp = tcg_temp_new();
+ tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
+ gen_store(s, opsize, addr, tmp, IS_USER(s));
+ tcg_temp_free(tmp);
+ } else {
+ gen_store(s, opsize, addr, mreg(i), IS_USER(s));
+ }
+ }
+ }
+ tcg_gen_mov_i32(cpu_aregs[reg0], addr);
+ } else {
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i)) {
+ gen_store(s, opsize, addr, mreg(i), IS_USER(s));
+ tcg_gen_add_i32(addr, addr, incr);
+ }
+ }
+ }
+ }
+
+ tcg_temp_free(incr);
+ tcg_temp_free(addr);
+}
+
+DISAS_INSN(movep)
+{
+ uint8_t i;
+ int16_t displ;
+ TCGv reg;
+ TCGv addr;
+ TCGv abuf;
+ TCGv dbuf;
+
+ displ = read_im16(env, s);
+
+ addr = AREG(insn, 0);
+ reg = DREG(insn, 9);
+
+ abuf = tcg_temp_new();
+ tcg_gen_addi_i32(abuf, addr, displ);
+ dbuf = tcg_temp_new();
+
+ if (insn & 0x40) {
+ i = 4;
+ } else {
+ i = 2;
+ }
+
+ if (insn & 0x80) {
+ for ( ; i > 0 ; i--) {
+ tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
+ tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
+ if (i > 1) {
+ tcg_gen_addi_i32(abuf, abuf, 2);
+ }
+ }
+ } else {
+ for ( ; i > 0 ; i--) {
+ tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
+ tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
+ if (i > 1) {
+ tcg_gen_addi_i32(abuf, abuf, 2);
+ }
+ }
+ }
+ tcg_temp_free(abuf);
+ tcg_temp_free(dbuf);
+}
+
+DISAS_INSN(bitop_im)
+{
+ int opsize;
+ int op;
+ TCGv src1;
+ uint32_t mask;
+ int bitnum;
+ TCGv tmp;
+ TCGv addr;
+
+ if ((insn & 0x38) != 0)
+ opsize = OS_BYTE;
+ else
+ opsize = OS_LONG;
+ op = (insn >> 6) & 3;
+
+ bitnum = read_im16(env, s);
+ if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (bitnum & 0xfe00) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ } else {
+ if (bitnum & 0xff00) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ }
+
+ SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
+
+ gen_flush_flags(s);
+ if (opsize == OS_BYTE)
+ bitnum &= 7;
+ else
+ bitnum &= 31;
+ mask = 1 << bitnum;
+
+ tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
+
+ if (op) {
+ tmp = tcg_temp_new();
+ switch (op) {
+ case 1: /* bchg */
+ tcg_gen_xori_i32(tmp, src1, mask);
+ break;
+ case 2: /* bclr */
+ tcg_gen_andi_i32(tmp, src1, ~mask);
+ break;
+ case 3: /* bset */
+ tcg_gen_ori_i32(tmp, src1, mask);
+ break;
+ default: /* btst */
+ break;
+ }
+ DEST_EA(env, insn, opsize, tmp, &addr);
+ tcg_temp_free(tmp);
+ }
+}
+
+static TCGv gen_get_ccr(DisasContext *s)
+{
+ TCGv dest;
+
+ update_cc_op(s);
+ dest = tcg_temp_new();
+ gen_helper_get_ccr(dest, cpu_env);
+ return dest;
+}
+
+static TCGv gen_get_sr(DisasContext *s)
+{
+ TCGv ccr;
+ TCGv sr;
+
+ ccr = gen_get_ccr(s);
+ sr = tcg_temp_new();
+ tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
+ tcg_gen_or_i32(sr, sr, ccr);
+ tcg_temp_free(ccr);
+ return sr;
+}
+
+static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
+{
+ if (ccr_only) {
+ tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
+ tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
+ tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
+ tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
+ tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
+ } else {
+ TCGv sr = tcg_const_i32(val);
+ gen_helper_set_sr(cpu_env, sr);
+ tcg_temp_free(sr);
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
+{
+ if (ccr_only) {
+ gen_helper_set_ccr(cpu_env, val);
+ } else {
+ gen_helper_set_sr(cpu_env, val);
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ bool ccr_only)
+{
+ if ((insn & 0x3f) == 0x3c) {
+ uint16_t val;
+ val = read_im16(env, s);
+ gen_set_sr_im(s, val, ccr_only);
+ } else {
+ TCGv src;
+ SRC_EA(env, src, OS_WORD, 0, NULL);
+ gen_set_sr(s, src, ccr_only);
+ }
+}
+
+DISAS_INSN(arith_im)
+{
+ int op;
+ TCGv im;
+ TCGv src1;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+ bool with_SR = ((insn & 0x3f) == 0x3c);
+
+ op = (insn >> 9) & 7;
+ opsize = insn_opsize(insn);
+ switch (opsize) {
+ case OS_BYTE:
+ im = tcg_const_i32((int8_t)read_im8(env, s));
+ break;
+ case OS_WORD:
+ im = tcg_const_i32((int16_t)read_im16(env, s));
+ break;
+ case OS_LONG:
+ im = tcg_const_i32(read_im32(env, s));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (with_SR) {
+ /* SR/CCR can only be used with andi/eori/ori */
+ if (op == 2 || op == 3 || op == 6) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ switch (opsize) {
+ case OS_BYTE:
+ src1 = gen_get_ccr(s);
+ break;
+ case OS_WORD:
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ src1 = gen_get_sr(s);
+ break;
+ default:
+ /* OS_LONG; others already g_assert_not_reached. */
+ disas_undef(env, s, insn);
+ return;
+ }
+ } else {
+ SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
+ }
+ dest = tcg_temp_new();
+ switch (op) {
+ case 0: /* ori */
+ tcg_gen_or_i32(dest, src1, im);
+ if (with_SR) {
+ gen_set_sr(s, dest, opsize == OS_BYTE);
+ } else {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+ }
+ break;
+ case 1: /* andi */
+ tcg_gen_and_i32(dest, src1, im);
+ if (with_SR) {
+ gen_set_sr(s, dest, opsize == OS_BYTE);
+ } else {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+ }
+ break;
+ case 2: /* subi */
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
+ tcg_gen_sub_i32(dest, src1, im);
+ gen_update_cc_add(dest, im, opsize);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ break;
+ case 3: /* addi */
+ tcg_gen_add_i32(dest, src1, im);
+ gen_update_cc_add(dest, im, opsize);
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
+ set_cc_op(s, CC_OP_ADDB + opsize);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ break;
+ case 5: /* eori */
+ tcg_gen_xor_i32(dest, src1, im);
+ if (with_SR) {
+ gen_set_sr(s, dest, opsize == OS_BYTE);
+ } else {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+ }
+ break;
+ case 6: /* cmpi */
+ gen_update_cc_cmp(s, src1, im, opsize);
+ break;
+ default:
+ abort();
+ }
+ tcg_temp_free(im);
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(cas)
+{
+ int opsize;
+ TCGv addr;
+ uint16_t ext;
+ TCGv load;
+ TCGv cmp;
+ MemOp opc;
+
+ switch ((insn >> 9) & 3) {
+ case 1:
+ opsize = OS_BYTE;
+ opc = MO_SB;
+ break;
+ case 2:
+ opsize = OS_WORD;
+ opc = MO_TESW;
+ break;
+ case 3:
+ opsize = OS_LONG;
+ opc = MO_TESL;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ext = read_im16(env, s);
+
+ /* cas Dc,Du,<EA> */
+
+ addr = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
+
+ /*
+ * if <EA> == Dc then
+ * <EA> = Du
+ * Dc = <EA> (because <EA> == Dc)
+ * else
+ * Dc = <EA>
+ */
+
+ load = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
+ IS_USER(s), opc);
+ /* update flags before setting cmp to load */
+ gen_update_cc_cmp(s, load, cmp, opsize);
+ gen_partset_reg(opsize, DREG(ext, 0), load);
+
+ tcg_temp_free(load);
+
+ switch (extract32(insn, 3, 3)) {
+ case 3: /* Indirect postincrement. */
+ tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
+ break;
+ case 4: /* Indirect predecrememnt. */
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ break;
+ }
+}
+
+DISAS_INSN(cas2w)
+{
+ uint16_t ext1, ext2;
+ TCGv addr1, addr2;
+ TCGv regs;
+
+ /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
+
+ ext1 = read_im16(env, s);
+
+ if (ext1 & 0x8000) {
+ /* Address Register */
+ addr1 = AREG(ext1, 12);
+ } else {
+ /* Data Register */
+ addr1 = DREG(ext1, 12);
+ }
+
+ ext2 = read_im16(env, s);
+ if (ext2 & 0x8000) {
+ /* Address Register */
+ addr2 = AREG(ext2, 12);
+ } else {
+ /* Data Register */
+ addr2 = DREG(ext2, 12);
+ }
+
+ /*
+ * if (R1) == Dc1 && (R2) == Dc2 then
+ * (R1) = Du1
+ * (R2) = Du2
+ * else
+ * Dc1 = (R1)
+ * Dc2 = (R2)
+ */
+
+ regs = tcg_const_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_exit_atomic(cpu_env);
+ } else {
+ gen_helper_cas2w(cpu_env, regs, addr1, addr2);
+ }
+ tcg_temp_free(regs);
+
+ /* Note that cas2w also assigned to env->cc_op. */
+ s->cc_op = CC_OP_CMPW;
+ s->cc_op_synced = 1;
+}
+
+DISAS_INSN(cas2l)
+{
+ uint16_t ext1, ext2;
+ TCGv addr1, addr2, regs;
+
+ /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
+
+ ext1 = read_im16(env, s);
+
+ if (ext1 & 0x8000) {
+ /* Address Register */
+ addr1 = AREG(ext1, 12);
+ } else {
+ /* Data Register */
+ addr1 = DREG(ext1, 12);
+ }
+
+ ext2 = read_im16(env, s);
+ if (ext2 & 0x8000) {
+ /* Address Register */
+ addr2 = AREG(ext2, 12);
+ } else {
+ /* Data Register */
+ addr2 = DREG(ext2, 12);
+ }
+
+ /*
+ * if (R1) == Dc1 && (R2) == Dc2 then
+ * (R1) = Du1
+ * (R2) = Du2
+ * else
+ * Dc1 = (R1)
+ * Dc2 = (R2)
+ */
+
+ regs = tcg_const_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2);
+ } else {
+ gen_helper_cas2l(cpu_env, regs, addr1, addr2);
+ }
+ tcg_temp_free(regs);
+
+ /* Note that cas2l also assigned to env->cc_op. */
+ s->cc_op = CC_OP_CMPL;
+ s->cc_op_synced = 1;
+}
+
+DISAS_INSN(byterev)
+{
+ TCGv reg;
+
+ reg = DREG(insn, 0);
+ tcg_gen_bswap32_i32(reg, reg);
+}
+
+DISAS_INSN(move)
+{
+ TCGv src;
+ TCGv dest;
+ int op;
+ int opsize;
+
+ switch (insn >> 12) {
+ case 1: /* move.b */
+ opsize = OS_BYTE;
+ break;
+ case 2: /* move.l */
+ opsize = OS_LONG;
+ break;
+ case 3: /* move.w */
+ opsize = OS_WORD;
+ break;
+ default:
+ abort();
+ }
+ SRC_EA(env, src, opsize, 1, NULL);
+ op = (insn >> 6) & 7;
+ if (op == 1) {
+ /* movea */
+ /* The value will already have been sign extended. */
+ dest = AREG(insn, 9);
+ tcg_gen_mov_i32(dest, src);
+ } else {
+ /* normal move */
+ uint16_t dest_ea;
+ dest_ea = ((insn >> 9) & 7) | (op << 3);
+ DEST_EA(env, dest_ea, opsize, src, NULL);
+ /* This will be correct because loads sign extend. */
+ gen_logic_cc(s, src, opsize);
+ }
+}
+
+DISAS_INSN(negx)
+{
+ TCGv z;
+ TCGv src;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src, opsize, 1, &addr);
+
+ gen_flush_flags(s); /* compute old Z */
+
+ /*
+ * Perform subtract with borrow.
+ * (X, N) = -(src + X);
+ */
+
+ z = tcg_const_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
+ tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
+ tcg_temp_free(z);
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+
+ tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
+
+ /*
+ * Compute signed-overflow for negation. The normal formula for
+ * subtraction is (res ^ src) & (src ^ dest), but with dest==0
+ * this simplifies to res & src.
+ */
+
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
+
+ /* Copy the rest of the results into place. */
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ /* result is in QREG_CC_N */
+
+ DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
+}
+
+DISAS_INSN(lea)
+{
+ TCGv reg;
+ TCGv tmp;
+
+ reg = AREG(insn, 9);
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ tcg_gen_mov_i32(reg, tmp);
+}
+
+DISAS_INSN(clr)
+{
+ int opsize;
+ TCGv zero;
+
+ zero = tcg_const_i32(0);
+
+ opsize = insn_opsize(insn);
+ DEST_EA(env, insn, opsize, zero, NULL);
+ gen_logic_cc(s, zero, opsize);
+ tcg_temp_free(zero);
+}
+
+DISAS_INSN(move_from_ccr)
+{
+ TCGv ccr;
+
+ ccr = gen_get_ccr(s);
+ DEST_EA(env, insn, OS_WORD, ccr, NULL);
+}
+
+DISAS_INSN(neg)
+{
+ TCGv src1;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src1, opsize, 1, &addr);
+ dest = tcg_temp_new();
+ tcg_gen_neg_i32(dest, src1);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ gen_update_cc_add(dest, src1, opsize);
+ tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(move_to_ccr)
+{
+ gen_move_to_sr(env, s, insn, true);
+}
+
+DISAS_INSN(not)
+{
+ TCGv src1;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src1, opsize, 1, &addr);
+ dest = tcg_temp_new();
+ tcg_gen_not_i32(dest, src1);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+}
+
+DISAS_INSN(swap)
+{
+ TCGv src1;
+ TCGv src2;
+ TCGv reg;
+
+ src1 = tcg_temp_new();
+ src2 = tcg_temp_new();
+ reg = DREG(insn, 0);
+ tcg_gen_shli_i32(src1, reg, 16);
+ tcg_gen_shri_i32(src2, reg, 16);
+ tcg_gen_or_i32(reg, src1, src2);
+ tcg_temp_free(src2);
+ tcg_temp_free(src1);
+ gen_logic_cc(s, reg, OS_LONG);
+}
+
+DISAS_INSN(bkpt)
+{
+ gen_exception(s, s->base.pc_next, EXCP_DEBUG);
+}
+
+DISAS_INSN(pea)
+{
+ TCGv tmp;
+
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ gen_push(s, tmp);
+}
+
+DISAS_INSN(ext)
+{
+ int op;
+ TCGv reg;
+ TCGv tmp;
+
+ reg = DREG(insn, 0);
+ op = (insn >> 6) & 7;
+ tmp = tcg_temp_new();
+ if (op == 3)
+ tcg_gen_ext16s_i32(tmp, reg);
+ else
+ tcg_gen_ext8s_i32(tmp, reg);
+ if (op == 2)
+ gen_partset_reg(OS_WORD, reg, tmp);
+ else
+ tcg_gen_mov_i32(reg, tmp);
+ gen_logic_cc(s, tmp, OS_LONG);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(tst)
+{
+ int opsize;
+ TCGv tmp;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, tmp, opsize, 1, NULL);
+ gen_logic_cc(s, tmp, opsize);
+}
+
+DISAS_INSN(pulse)
+{
+ /* Implemented as a NOP. */
+}
+
+DISAS_INSN(illegal)
+{
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+}
+
+/* ??? This should be atomic. */
+DISAS_INSN(tas)
+{
+ TCGv dest;
+ TCGv src1;
+ TCGv addr;
+
+ dest = tcg_temp_new();
+ SRC_EA(env, src1, OS_BYTE, 1, &addr);
+ gen_logic_cc(s, src1, OS_BYTE);
+ tcg_gen_ori_i32(dest, src1, 0x80);
+ DEST_EA(env, insn, OS_BYTE, dest, &addr);
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(mull)
+{
+ uint16_t ext;
+ TCGv src1;
+ int sign;
+
+ ext = read_im16(env, s);
+
+ sign = ext & 0x800;
+
+ if (ext & 0x400) {
+ if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+
+ SRC_EA(env, src1, OS_LONG, 0, NULL);
+
+ if (sign) {
+ tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
+ } else {
+ tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
+ }
+ /* if Dl == Dh, 68040 returns low word */
+ tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
+ tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
+
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ tcg_gen_movi_i32(QREG_CC_C, 0);
+
+ set_cc_op(s, CC_OP_FLAGS);
+ return;
+ }
+ SRC_EA(env, src1, OS_LONG, 0, NULL);
+ if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ tcg_gen_movi_i32(QREG_CC_C, 0);
+ if (sign) {
+ tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
+ /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
+ tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
+ } else {
+ tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
+ /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
+ }
+ tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
+ tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
+
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+
+ set_cc_op(s, CC_OP_FLAGS);
+ } else {
+ /*
+ * The upper 32 bits of the product are discarded, so
+ * muls.l and mulu.l are functionally equivalent.
+ */
+ tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
+ gen_logic_cc(s, DREG(ext, 12), OS_LONG);
+ }
+}
+
+static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
+{
+ TCGv reg;
+ TCGv tmp;
+
+ reg = AREG(insn, 0);
+ tmp = tcg_temp_new();
+ tcg_gen_subi_i32(tmp, QREG_SP, 4);
+ gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
+ if ((insn & 7) != 7) {
+ tcg_gen_mov_i32(reg, tmp);
+ }
+ tcg_gen_addi_i32(QREG_SP, tmp, offset);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(link)
+{
+ int16_t offset;
+
+ offset = read_im16(env, s);
+ gen_link(s, insn, offset);
+}
+
+DISAS_INSN(linkl)
+{
+ int32_t offset;
+
+ offset = read_im32(env, s);
+ gen_link(s, insn, offset);
+}
+
+DISAS_INSN(unlk)
+{
+ TCGv src;
+ TCGv reg;
+ TCGv tmp;
+
+ src = tcg_temp_new();
+ reg = AREG(insn, 0);
+ tcg_gen_mov_i32(src, reg);
+ tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
+ tcg_gen_mov_i32(reg, tmp);
+ tcg_gen_addi_i32(QREG_SP, src, 4);
+ tcg_temp_free(src);
+ tcg_temp_free(tmp);
+}
+
+#if defined(CONFIG_SOFTMMU)
+DISAS_INSN(reset)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ gen_helper_reset(cpu_env);
+}
+#endif
+
+DISAS_INSN(nop)
+{
+}
+
+DISAS_INSN(rtd)
+{
+ TCGv tmp;
+ int16_t offset = read_im16(env, s);
+
+ tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
+ tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
+ gen_jmp(s, tmp);
+}
+
+DISAS_INSN(rtr)
+{
+ TCGv tmp;
+ TCGv ccr;
+ TCGv sp;
+
+ sp = tcg_temp_new();
+ ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
+ tcg_gen_addi_i32(sp, QREG_SP, 2);
+ tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
+ tcg_gen_addi_i32(QREG_SP, sp, 4);
+ tcg_temp_free(sp);
+
+ gen_set_sr(s, ccr, true);
+ tcg_temp_free(ccr);
+
+ gen_jmp(s, tmp);
+}
+
+DISAS_INSN(rts)
+{
+ TCGv tmp;
+
+ tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
+ tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
+ gen_jmp(s, tmp);
+}
+
+DISAS_INSN(jump)
+{
+ TCGv tmp;
+
+ /*
+ * Load the target address first to ensure correct exception
+ * behavior.
+ */
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ if ((insn & 0x40) == 0) {
+ /* jsr */
+ gen_push(s, tcg_const_i32(s->pc));
+ }
+ gen_jmp(s, tmp);
+}
+
+DISAS_INSN(addsubq)
+{
+ TCGv src;
+ TCGv dest;
+ TCGv val;
+ int imm;
+ TCGv addr;
+ int opsize;
+
+ if ((insn & 070) == 010) {
+ /* Operation on address register is always long. */
+ opsize = OS_LONG;
+ } else {
+ opsize = insn_opsize(insn);
+ }
+ SRC_EA(env, src, opsize, 1, &addr);
+ imm = (insn >> 9) & 7;
+ if (imm == 0) {
+ imm = 8;
+ }
+ val = tcg_const_i32(imm);
+ dest = tcg_temp_new();
+ tcg_gen_mov_i32(dest, src);
+ if ((insn & 0x38) == 0x08) {
+ /*
+ * Don't update condition codes if the destination is an
+ * address register.
+ */
+ if (insn & 0x0100) {
+ tcg_gen_sub_i32(dest, dest, val);
+ } else {
+ tcg_gen_add_i32(dest, dest, val);
+ }
+ } else {
+ if (insn & 0x0100) {
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
+ tcg_gen_sub_i32(dest, dest, val);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ } else {
+ tcg_gen_add_i32(dest, dest, val);
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
+ set_cc_op(s, CC_OP_ADDB + opsize);
+ }
+ gen_update_cc_add(dest, val, opsize);
+ }
+ tcg_temp_free(val);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(tpf)
+{
+ switch (insn & 7) {
+ case 2: /* One extension word. */
+ s->pc += 2;
+ break;
+ case 3: /* Two extension words. */
+ s->pc += 4;
+ break;
+ case 4: /* No extension words. */
+ break;
+ default:
+ disas_undef(env, s, insn);
+ }
+}
+
+DISAS_INSN(branch)
+{
+ int32_t offset;
+ uint32_t base;
+ int op;
+
+ base = s->pc;
+ op = (insn >> 8) & 0xf;
+ offset = (int8_t)insn;
+ if (offset == 0) {
+ offset = (int16_t)read_im16(env, s);
+ } else if (offset == -1) {
+ offset = read_im32(env, s);
+ }
+ if (op == 1) {
+ /* bsr */
+ gen_push(s, tcg_const_i32(s->pc));
+ }
+ if (op > 1) {
+ /* Bcc */
+ TCGLabel *l1 = gen_new_label();
+ gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
+ gen_jmp_tb(s, 1, base + offset);
+ gen_set_label(l1);
+ gen_jmp_tb(s, 0, s->pc);
+ } else {
+ /* Unconditional branch. */
+ update_cc_op(s);
+ gen_jmp_tb(s, 0, base + offset);
+ }
+}
+
+DISAS_INSN(moveq)
+{
+ tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
+ gen_logic_cc(s, DREG(insn, 9), OS_LONG);
+}
+
+DISAS_INSN(mvzs)
+{
+ int opsize;
+ TCGv src;
+ TCGv reg;
+
+ if (insn & 0x40)
+ opsize = OS_WORD;
+ else
+ opsize = OS_BYTE;
+ SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
+ reg = DREG(insn, 9);
+ tcg_gen_mov_i32(reg, src);
+ gen_logic_cc(s, src, opsize);
+}
+
+DISAS_INSN(or)
+{
+ TCGv reg;
+ TCGv dest;
+ TCGv src;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ reg = gen_extend(s, DREG(insn, 9), opsize, 0);
+ dest = tcg_temp_new();
+ if (insn & 0x100) {
+ SRC_EA(env, src, opsize, 0, &addr);
+ tcg_gen_or_i32(dest, src, reg);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ } else {
+ SRC_EA(env, src, opsize, 0, NULL);
+ tcg_gen_or_i32(dest, src, reg);
+ gen_partset_reg(opsize, DREG(insn, 9), dest);
+ }
+ gen_logic_cc(s, dest, opsize);
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(suba)
+{
+ TCGv src;
+ TCGv reg;
+
+ SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
+ reg = AREG(insn, 9);
+ tcg_gen_sub_i32(reg, reg, src);
+}
+
+static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
+{
+ TCGv tmp;
+
+ gen_flush_flags(s); /* compute old Z */
+
+ /*
+ * Perform subtract with borrow.
+ * (X, N) = dest - (src + X);
+ */
+
+ tmp = tcg_const_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
+ tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+ tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
+
+ /* Compute signed-overflow for subtract. */
+
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
+ tcg_gen_xor_i32(tmp, dest, src);
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
+ tcg_temp_free(tmp);
+
+ /* Copy the rest of the results into place. */
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ /* result is in QREG_CC_N */
+}
+
+DISAS_INSN(subx_reg)
+{
+ TCGv dest;
+ TCGv src;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ src = gen_extend(s, DREG(insn, 0), opsize, 1);
+ dest = gen_extend(s, DREG(insn, 9), opsize, 1);
+
+ gen_subx(s, src, dest, opsize);
+
+ gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
+}
+
+DISAS_INSN(subx_mem)
+{
+ TCGv src;
+ TCGv addr_src;
+ TCGv dest;
+ TCGv addr_dest;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ addr_src = AREG(insn, 0);
+ tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
+ src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
+
+ addr_dest = AREG(insn, 9);
+ tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
+ dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
+
+ gen_subx(s, src, dest, opsize);
+
+ gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
+
+ tcg_temp_free(dest);
+ tcg_temp_free(src);
+}
+
+DISAS_INSN(mov3q)
+{
+ TCGv src;
+ int val;
+
+ val = (insn >> 9) & 7;
+ if (val == 0)
+ val = -1;
+ src = tcg_const_i32(val);
+ gen_logic_cc(s, src, OS_LONG);
+ DEST_EA(env, insn, OS_LONG, src, NULL);
+ tcg_temp_free(src);
+}
+
+DISAS_INSN(cmp)
+{
+ TCGv src;
+ TCGv reg;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src, opsize, 1, NULL);
+ reg = gen_extend(s, DREG(insn, 9), opsize, 1);
+ gen_update_cc_cmp(s, reg, src, opsize);
+}
+
+DISAS_INSN(cmpa)
+{
+ int opsize;
+ TCGv src;
+ TCGv reg;
+
+ if (insn & 0x100) {
+ opsize = OS_LONG;
+ } else {
+ opsize = OS_WORD;
+ }
+ SRC_EA(env, src, opsize, 1, NULL);
+ reg = AREG(insn, 9);
+ gen_update_cc_cmp(s, reg, src, OS_LONG);
+}
+
+DISAS_INSN(cmpm)
+{
+ int opsize = insn_opsize(insn);
+ TCGv src, dst;
+
+ /* Post-increment load (mode 3) from Ay. */
+ src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
+ NULL_QREG, NULL, EA_LOADS, IS_USER(s));
+ /* Post-increment load (mode 3) from Ax. */
+ dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
+ NULL_QREG, NULL, EA_LOADS, IS_USER(s));
+
+ gen_update_cc_cmp(s, dst, src, opsize);
+}
+
+DISAS_INSN(eor)
+{
+ TCGv src;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ SRC_EA(env, src, opsize, 0, &addr);
+ dest = tcg_temp_new();
+ tcg_gen_xor_i32(dest, src, DREG(insn, 9));
+ gen_logic_cc(s, dest, opsize);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ tcg_temp_free(dest);
+}
+
+static void do_exg(TCGv reg1, TCGv reg2)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_mov_i32(temp, reg1);
+ tcg_gen_mov_i32(reg1, reg2);
+ tcg_gen_mov_i32(reg2, temp);
+ tcg_temp_free(temp);
+}
+
+DISAS_INSN(exg_dd)
+{
+ /* exchange Dx and Dy */
+ do_exg(DREG(insn, 9), DREG(insn, 0));
+}
+
+DISAS_INSN(exg_aa)
+{
+ /* exchange Ax and Ay */
+ do_exg(AREG(insn, 9), AREG(insn, 0));
+}
+
+DISAS_INSN(exg_da)
+{
+ /* exchange Dx and Ay */
+ do_exg(DREG(insn, 9), AREG(insn, 0));
+}
+
+DISAS_INSN(and)
+{
+ TCGv src;
+ TCGv reg;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ dest = tcg_temp_new();
+
+ opsize = insn_opsize(insn);
+ reg = DREG(insn, 9);
+ if (insn & 0x100) {
+ SRC_EA(env, src, opsize, 0, &addr);
+ tcg_gen_and_i32(dest, src, reg);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ } else {
+ SRC_EA(env, src, opsize, 0, NULL);
+ tcg_gen_and_i32(dest, src, reg);
+ gen_partset_reg(opsize, reg, dest);
+ }
+ gen_logic_cc(s, dest, opsize);
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(adda)
+{
+ TCGv src;
+ TCGv reg;
+
+ SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
+ reg = AREG(insn, 9);
+ tcg_gen_add_i32(reg, reg, src);
+}
+
+static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
+{
+ TCGv tmp;
+
+ gen_flush_flags(s); /* compute old Z */
+
+ /*
+ * Perform addition with carry.
+ * (X, N) = src + dest + X;
+ */
+
+ tmp = tcg_const_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+
+ /* Compute signed-overflow for addition. */
+
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
+ tcg_gen_xor_i32(tmp, dest, src);
+ tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
+ tcg_temp_free(tmp);
+
+ /* Copy the rest of the results into place. */
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ /* result is in QREG_CC_N */
+}
+
+DISAS_INSN(addx_reg)
+{
+ TCGv dest;
+ TCGv src;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ dest = gen_extend(s, DREG(insn, 9), opsize, 1);
+ src = gen_extend(s, DREG(insn, 0), opsize, 1);
+
+ gen_addx(s, src, dest, opsize);
+
+ gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
+}
+
+DISAS_INSN(addx_mem)
+{
+ TCGv src;
+ TCGv addr_src;
+ TCGv dest;
+ TCGv addr_dest;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ addr_src = AREG(insn, 0);
+ tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
+ src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
+
+ addr_dest = AREG(insn, 9);
+ tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
+ dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
+
+ gen_addx(s, src, dest, opsize);
+
+ gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
+
+ tcg_temp_free(dest);
+ tcg_temp_free(src);
+}
+
+static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
+{
+ int count = (insn >> 9) & 7;
+ int logical = insn & 8;
+ int left = insn & 0x100;
+ int bits = opsize_bytes(opsize) * 8;
+ TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
+
+ if (count == 0) {
+ count = 8;
+ }
+
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ if (left) {
+ tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
+ tcg_gen_shli_i32(QREG_CC_N, reg, count);
+
+ /*
+ * Note that ColdFire always clears V (done above),
+ * while M68000 sets if the most significant bit is changed at
+ * any time during the shift operation.
+ */
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ /* if shift count >= bits, V is (reg != 0) */
+ if (count >= bits) {
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
+ } else {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
+ tcg_gen_sari_i32(t0, reg, bits - count - 1);
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ }
+ tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
+ }
+ } else {
+ tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
+ if (logical) {
+ tcg_gen_shri_i32(QREG_CC_N, reg, count);
+ } else {
+ tcg_gen_sari_i32(QREG_CC_N, reg, count);
+ }
+ }
+
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
+
+ gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
+{
+ int logical = insn & 8;
+ int left = insn & 0x100;
+ int bits = opsize_bytes(opsize) * 8;
+ TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
+ TCGv s32;
+ TCGv_i64 t64, s64;
+
+ t64 = tcg_temp_new_i64();
+ s64 = tcg_temp_new_i64();
+ s32 = tcg_temp_new();
+
+ /*
+ * Note that m68k truncates the shift count modulo 64, not 32.
+ * In addition, a 64-bit shift makes it easy to find "the last
+ * bit shifted out", for the carry flag.
+ */
+ tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
+ tcg_gen_extu_i32_i64(s64, s32);
+ tcg_gen_extu_i32_i64(t64, reg);
+
+ /* Optimistically set V=0. Also used as a zero source below. */
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ if (left) {
+ tcg_gen_shl_i64(t64, t64, s64);
+
+ if (opsize == OS_LONG) {
+ tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
+ /* Note that C=0 if shift count is 0, and we get that for free. */
+ } else {
+ TCGv zero = tcg_const_i32(0);
+ tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
+ tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ s32, zero, zero, QREG_CC_C);
+ tcg_temp_free(zero);
+ }
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+
+ /* X = C, but only if the shift count was non-zero. */
+ tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
+ QREG_CC_C, QREG_CC_X);
+
+ /*
+ * M68000 sets V if the most significant bit is changed at
+ * any time during the shift operation. Do this via creating
+ * an extension of the sign bit, comparing, and discarding
+ * the bits below the sign bit. I.e.
+ * int64_t s = (intN_t)reg;
+ * int64_t t = (int64_t)(intN_t)reg << count;
+ * V = ((s ^ t) & (-1 << (bits - 1))) != 0
+ */
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ TCGv_i64 tt = tcg_const_i64(32);
+ /* if shift is greater than 32, use 32 */
+ tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
+ tcg_temp_free_i64(tt);
+ /* Sign extend the input to 64 bits; re-do the shift. */
+ tcg_gen_ext_i32_i64(t64, reg);
+ tcg_gen_shl_i64(s64, t64, s64);
+ /* Clear all bits that are unchanged. */
+ tcg_gen_xor_i64(t64, t64, s64);
+ /* Ignore the bits below the sign bit. */
+ tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
+ /* If any bits remain set, we have overflow. */
+ tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
+ tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
+ tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
+ }
+ } else {
+ tcg_gen_shli_i64(t64, t64, 32);
+ if (logical) {
+ tcg_gen_shr_i64(t64, t64, s64);
+ } else {
+ tcg_gen_sar_i64(t64, t64, s64);
+ }
+ tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
+
+ /* Note that C=0 if shift count is 0, and we get that for free. */
+ tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
+
+ /* X = C, but only if the shift count was non-zero. */
+ tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
+ QREG_CC_C, QREG_CC_X);
+ }
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+
+ tcg_temp_free(s32);
+ tcg_temp_free_i64(s64);
+ tcg_temp_free_i64(t64);
+
+ /* Write back the result. */
+ gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(shift8_im)
+{
+ shift_im(s, insn, OS_BYTE);
+}
+
+DISAS_INSN(shift16_im)
+{
+ shift_im(s, insn, OS_WORD);
+}
+
+DISAS_INSN(shift_im)
+{
+ shift_im(s, insn, OS_LONG);
+}
+
+DISAS_INSN(shift8_reg)
+{
+ shift_reg(s, insn, OS_BYTE);
+}
+
+DISAS_INSN(shift16_reg)
+{
+ shift_reg(s, insn, OS_WORD);
+}
+
+DISAS_INSN(shift_reg)
+{
+ shift_reg(s, insn, OS_LONG);
+}
+
+DISAS_INSN(shift_mem)
+{
+ int logical = insn & 8;
+ int left = insn & 0x100;
+ TCGv src;
+ TCGv addr;
+
+ SRC_EA(env, src, OS_WORD, !logical, &addr);
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ if (left) {
+ tcg_gen_shri_i32(QREG_CC_C, src, 15);
+ tcg_gen_shli_i32(QREG_CC_N, src, 1);
+
+ /*
+ * Note that ColdFire always clears V,
+ * while M68000 sets if the most significant bit is changed at
+ * any time during the shift operation
+ */
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ src = gen_extend(s, src, OS_WORD, 1);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
+ }
+ } else {
+ tcg_gen_mov_i32(QREG_CC_C, src);
+ if (logical) {
+ tcg_gen_shri_i32(QREG_CC_N, src, 1);
+ } else {
+ tcg_gen_sari_i32(QREG_CC_N, src, 1);
+ }
+ }
+
+ gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
+
+ DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void rotate(TCGv reg, TCGv shift, int left, int size)
+{
+ switch (size) {
+ case 8:
+ /* Replicate the 8-bit input so that a 32-bit rotate works. */
+ tcg_gen_ext8u_i32(reg, reg);
+ tcg_gen_muli_i32(reg, reg, 0x01010101);
+ goto do_long;
+ case 16:
+ /* Replicate the 16-bit input so that a 32-bit rotate works. */
+ tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
+ goto do_long;
+ do_long:
+ default:
+ if (left) {
+ tcg_gen_rotl_i32(reg, reg, shift);
+ } else {
+ tcg_gen_rotr_i32(reg, reg, shift);
+ }
+ }
+
+ /* compute flags */
+
+ switch (size) {
+ case 8:
+ tcg_gen_ext8s_i32(reg, reg);
+ break;
+ case 16:
+ tcg_gen_ext16s_i32(reg, reg);
+ break;
+ default:
+ break;
+ }
+
+ /* QREG_CC_X is not affected */
+
+ tcg_gen_mov_i32(QREG_CC_N, reg);
+ tcg_gen_mov_i32(QREG_CC_Z, reg);
+
+ if (left) {
+ tcg_gen_andi_i32(QREG_CC_C, reg, 1);
+ } else {
+ tcg_gen_shri_i32(QREG_CC_C, reg, 31);
+ }
+
+ tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
+}
+
+static void rotate_x_flags(TCGv reg, TCGv X, int size)
+{
+ switch (size) {
+ case 8:
+ tcg_gen_ext8s_i32(reg, reg);
+ break;
+ case 16:
+ tcg_gen_ext16s_i32(reg, reg);
+ break;
+ default:
+ break;
+ }
+ tcg_gen_mov_i32(QREG_CC_N, reg);
+ tcg_gen_mov_i32(QREG_CC_Z, reg);
+ tcg_gen_mov_i32(QREG_CC_X, X);
+ tcg_gen_mov_i32(QREG_CC_C, X);
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+}
+
+/* Result of rotate_x() is valid if 0 <= shift <= size */
+static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
+{
+ TCGv X, shl, shr, shx, sz, zero;
+
+ sz = tcg_const_i32(size);
+
+ shr = tcg_temp_new();
+ shl = tcg_temp_new();
+ shx = tcg_temp_new();
+ if (left) {
+ tcg_gen_mov_i32(shl, shift); /* shl = shift */
+ tcg_gen_movi_i32(shr, size + 1);
+ tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
+ tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
+ /* shx = shx < 0 ? size : shx; */
+ zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
+ tcg_temp_free(zero);
+ } else {
+ tcg_gen_mov_i32(shr, shift); /* shr = shift */
+ tcg_gen_movi_i32(shl, size + 1);
+ tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
+ tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
+ }
+ tcg_temp_free_i32(sz);
+
+ /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
+
+ tcg_gen_shl_i32(shl, reg, shl);
+ tcg_gen_shr_i32(shr, reg, shr);
+ tcg_gen_or_i32(reg, shl, shr);
+ tcg_temp_free(shl);
+ tcg_temp_free(shr);
+ tcg_gen_shl_i32(shx, QREG_CC_X, shx);
+ tcg_gen_or_i32(reg, reg, shx);
+ tcg_temp_free(shx);
+
+ /* X = (reg >> size) & 1 */
+
+ X = tcg_temp_new();
+ tcg_gen_extract_i32(X, reg, size, 1);
+
+ return X;
+}
+
+/* Result of rotate32_x() is valid if 0 <= shift < 33 */
+static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
+{
+ TCGv_i64 t0, shift64;
+ TCGv X, lo, hi, zero;
+
+ shift64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(shift64, shift);
+
+ t0 = tcg_temp_new_i64();
+
+ X = tcg_temp_new();
+ lo = tcg_temp_new();
+ hi = tcg_temp_new();
+
+ if (left) {
+ /* create [reg:X:..] */
+
+ tcg_gen_shli_i32(lo, QREG_CC_X, 31);
+ tcg_gen_concat_i32_i64(t0, lo, reg);
+
+ /* rotate */
+
+ tcg_gen_rotl_i64(t0, t0, shift64);
+ tcg_temp_free_i64(shift64);
+
+ /* result is [reg:..:reg:X] */
+
+ tcg_gen_extr_i64_i32(lo, hi, t0);
+ tcg_gen_andi_i32(X, lo, 1);
+
+ tcg_gen_shri_i32(lo, lo, 1);
+ } else {
+ /* create [..:X:reg] */
+
+ tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
+
+ tcg_gen_rotr_i64(t0, t0, shift64);
+ tcg_temp_free_i64(shift64);
+
+ /* result is value: [X:reg:..:reg] */
+
+ tcg_gen_extr_i64_i32(lo, hi, t0);
+
+ /* extract X */
+
+ tcg_gen_shri_i32(X, hi, 31);
+
+ /* extract result */
+
+ tcg_gen_shli_i32(hi, hi, 1);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_gen_or_i32(lo, lo, hi);
+ tcg_temp_free(hi);
+
+ /* if shift == 0, register and X are not affected */
+
+ zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
+ tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
+ tcg_temp_free(zero);
+ tcg_temp_free(lo);
+
+ return X;
+}
+
+DISAS_INSN(rotate_im)
+{
+ TCGv shift;
+ int tmp;
+ int left = (insn & 0x100);
+
+ tmp = (insn >> 9) & 7;
+ if (tmp == 0) {
+ tmp = 8;
+ }
+
+ shift = tcg_const_i32(tmp);
+ if (insn & 8) {
+ rotate(DREG(insn, 0), shift, left, 32);
+ } else {
+ TCGv X = rotate32_x(DREG(insn, 0), shift, left);
+ rotate_x_flags(DREG(insn, 0), X, 32);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
+
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate8_im)
+{
+ int left = (insn & 0x100);
+ TCGv reg;
+ TCGv shift;
+ int tmp;
+
+ reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
+
+ tmp = (insn >> 9) & 7;
+ if (tmp == 0) {
+ tmp = 8;
+ }
+
+ shift = tcg_const_i32(tmp);
+ if (insn & 8) {
+ rotate(reg, shift, left, 8);
+ } else {
+ TCGv X = rotate_x(reg, shift, left, 8);
+ rotate_x_flags(reg, X, 8);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
+ gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate16_im)
+{
+ int left = (insn & 0x100);
+ TCGv reg;
+ TCGv shift;
+ int tmp;
+
+ reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
+ tmp = (insn >> 9) & 7;
+ if (tmp == 0) {
+ tmp = 8;
+ }
+
+ shift = tcg_const_i32(tmp);
+ if (insn & 8) {
+ rotate(reg, shift, left, 16);
+ } else {
+ TCGv X = rotate_x(reg, shift, left, 16);
+ rotate_x_flags(reg, X, 16);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
+ gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate_reg)
+{
+ TCGv reg;
+ TCGv src;
+ TCGv t0, t1;
+ int left = (insn & 0x100);
+
+ reg = DREG(insn, 0);
+ src = DREG(insn, 9);
+ /* shift in [0..63] */
+ t0 = tcg_temp_new();
+ tcg_gen_andi_i32(t0, src, 63);
+ t1 = tcg_temp_new_i32();
+ if (insn & 8) {
+ tcg_gen_andi_i32(t1, src, 31);
+ rotate(reg, t1, left, 32);
+ /* if shift == 0, clear C */
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ t0, QREG_CC_V /* 0 */,
+ QREG_CC_V /* 0 */, QREG_CC_C);
+ } else {
+ TCGv X;
+ /* modulo 33 */
+ tcg_gen_movi_i32(t1, 33);
+ tcg_gen_remu_i32(t1, t0, t1);
+ X = rotate32_x(DREG(insn, 0), t1, left);
+ rotate_x_flags(DREG(insn, 0), X, 32);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate8_reg)
+{
+ TCGv reg;
+ TCGv src;
+ TCGv t0, t1;
+ int left = (insn & 0x100);
+
+ reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
+ src = DREG(insn, 9);
+ /* shift in [0..63] */
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, src, 63);
+ t1 = tcg_temp_new_i32();
+ if (insn & 8) {
+ tcg_gen_andi_i32(t1, src, 7);
+ rotate(reg, t1, left, 8);
+ /* if shift == 0, clear C */
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ t0, QREG_CC_V /* 0 */,
+ QREG_CC_V /* 0 */, QREG_CC_C);
+ } else {
+ TCGv X;
+ /* modulo 9 */
+ tcg_gen_movi_i32(t1, 9);
+ tcg_gen_remu_i32(t1, t0, t1);
+ X = rotate_x(reg, t1, left, 8);
+ rotate_x_flags(reg, X, 8);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate16_reg)
+{
+ TCGv reg;
+ TCGv src;
+ TCGv t0, t1;
+ int left = (insn & 0x100);
+
+ reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
+ src = DREG(insn, 9);
+ /* shift in [0..63] */
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, src, 63);
+ t1 = tcg_temp_new_i32();
+ if (insn & 8) {
+ tcg_gen_andi_i32(t1, src, 15);
+ rotate(reg, t1, left, 16);
+ /* if shift == 0, clear C */
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ t0, QREG_CC_V /* 0 */,
+ QREG_CC_V /* 0 */, QREG_CC_C);
+ } else {
+ TCGv X;
+ /* modulo 17 */
+ tcg_gen_movi_i32(t1, 17);
+ tcg_gen_remu_i32(t1, t0, t1);
+ X = rotate_x(reg, t1, left, 16);
+ rotate_x_flags(reg, X, 16);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate_mem)
+{
+ TCGv src;
+ TCGv addr;
+ TCGv shift;
+ int left = (insn & 0x100);
+
+ SRC_EA(env, src, OS_WORD, 0, &addr);
+
+ shift = tcg_const_i32(1);
+ if (insn & 0x0200) {
+ rotate(src, shift, left, 16);
+ } else {
+ TCGv X = rotate_x(src, shift, left, 16);
+ rotate_x_flags(src, X, 16);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
+ DEST_EA(env, insn, OS_WORD, src, &addr);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(bfext_reg)
+{
+ int ext = read_im16(env, s);
+ int is_sign = insn & 0x200;
+ TCGv src = DREG(insn, 0);
+ TCGv dst = DREG(ext, 12);
+ int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
+ int ofs = extract32(ext, 6, 5); /* big bit-endian */
+ int pos = 32 - ofs - len; /* little bit-endian */
+ TCGv tmp = tcg_temp_new();
+ TCGv shift;
+
+ /*
+ * In general, we're going to rotate the field so that it's at the
+ * top of the word and then right-shift by the complement of the
+ * width to extend the field.
+ */
+ if (ext & 0x20) {
+ /* Variable width. */
+ if (ext & 0x800) {
+ /* Variable offset. */
+ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
+ tcg_gen_rotl_i32(tmp, src, tmp);
+ } else {
+ tcg_gen_rotli_i32(tmp, src, ofs);
+ }
+
+ shift = tcg_temp_new();
+ tcg_gen_neg_i32(shift, DREG(ext, 0));
+ tcg_gen_andi_i32(shift, shift, 31);
+ tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
+ if (is_sign) {
+ tcg_gen_mov_i32(dst, QREG_CC_N);
+ } else {
+ tcg_gen_shr_i32(dst, tmp, shift);
+ }
+ tcg_temp_free(shift);
+ } else {
+ /* Immediate width. */
+ if (ext & 0x800) {
+ /* Variable offset */
+ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
+ tcg_gen_rotl_i32(tmp, src, tmp);
+ src = tmp;
+ pos = 32 - len;
+ } else {
+ /*
+ * Immediate offset. If the field doesn't wrap around the
+ * end of the word, rely on (s)extract completely.
+ */
+ if (pos < 0) {
+ tcg_gen_rotli_i32(tmp, src, ofs);
+ src = tmp;
+ pos = 32 - len;
+ }
+ }
+
+ tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
+ if (is_sign) {
+ tcg_gen_mov_i32(dst, QREG_CC_N);
+ } else {
+ tcg_gen_extract_i32(dst, src, pos, len);
+ }
+ }
+
+ tcg_temp_free(tmp);
+ set_cc_op(s, CC_OP_LOGIC);
+}
+
+DISAS_INSN(bfext_mem)
+{
+ int ext = read_im16(env, s);
+ int is_sign = insn & 0x200;
+ TCGv dest = DREG(ext, 12);
+ TCGv addr, len, ofs;
+
+ addr = gen_lea(env, s, insn, OS_UNSIZED);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x20) {
+ len = DREG(ext, 0);
+ } else {
+ len = tcg_const_i32(extract32(ext, 0, 5));
+ }
+ if (ext & 0x800) {
+ ofs = DREG(ext, 6);
+ } else {
+ ofs = tcg_const_i32(extract32(ext, 6, 5));
+ }
+
+ if (is_sign) {
+ gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
+ tcg_gen_mov_i32(QREG_CC_N, dest);
+ } else {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
+ tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ if (!(ext & 0x20)) {
+ tcg_temp_free(len);
+ }
+ if (!(ext & 0x800)) {
+ tcg_temp_free(ofs);
+ }
+}
+
+DISAS_INSN(bfop_reg)
+{
+ int ext = read_im16(env, s);
+ TCGv src = DREG(insn, 0);
+ int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
+ int ofs = extract32(ext, 6, 5); /* big bit-endian */
+ TCGv mask, tofs, tlen;
+
+ tofs = NULL;
+ tlen = NULL;
+ if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
+ tofs = tcg_temp_new();
+ tlen = tcg_temp_new();
+ }
+
+ if ((ext & 0x820) == 0) {
+ /* Immediate width and offset. */
+ uint32_t maski = 0x7fffffffu >> (len - 1);
+ if (ofs + len <= 32) {
+ tcg_gen_shli_i32(QREG_CC_N, src, ofs);
+ } else {
+ tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
+ }
+ tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
+ mask = tcg_const_i32(ror32(maski, ofs));
+ if (tofs) {
+ tcg_gen_movi_i32(tofs, ofs);
+ tcg_gen_movi_i32(tlen, len);
+ }
+ } else {
+ TCGv tmp = tcg_temp_new();
+ if (ext & 0x20) {
+ /* Variable width */
+ tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
+ tcg_gen_andi_i32(tmp, tmp, 31);
+ mask = tcg_const_i32(0x7fffffffu);
+ tcg_gen_shr_i32(mask, mask, tmp);
+ if (tlen) {
+ tcg_gen_addi_i32(tlen, tmp, 1);
+ }
+ } else {
+ /* Immediate width */
+ mask = tcg_const_i32(0x7fffffffu >> (len - 1));
+ if (tlen) {
+ tcg_gen_movi_i32(tlen, len);
+ }
+ }
+ if (ext & 0x800) {
+ /* Variable offset */
+ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
+ tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
+ tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
+ tcg_gen_rotr_i32(mask, mask, tmp);
+ if (tofs) {
+ tcg_gen_mov_i32(tofs, tmp);
+ }
+ } else {
+ /* Immediate offset (and variable width) */
+ tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
+ tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
+ tcg_gen_rotri_i32(mask, mask, ofs);
+ if (tofs) {
+ tcg_gen_movi_i32(tofs, ofs);
+ }
+ }
+ tcg_temp_free(tmp);
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ switch (insn & 0x0f00) {
+ case 0x0a00: /* bfchg */
+ tcg_gen_eqv_i32(src, src, mask);
+ break;
+ case 0x0c00: /* bfclr */
+ tcg_gen_and_i32(src, src, mask);
+ break;
+ case 0x0d00: /* bfffo */
+ gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
+ tcg_temp_free(tlen);
+ tcg_temp_free(tofs);
+ break;
+ case 0x0e00: /* bfset */
+ tcg_gen_orc_i32(src, src, mask);
+ break;
+ case 0x0800: /* bftst */
+ /* flags already set; no other work to do. */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free(mask);
+}
+
+DISAS_INSN(bfop_mem)
+{
+ int ext = read_im16(env, s);
+ TCGv addr, len, ofs;
+ TCGv_i64 t64;
+
+ addr = gen_lea(env, s, insn, OS_UNSIZED);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x20) {
+ len = DREG(ext, 0);
+ } else {
+ len = tcg_const_i32(extract32(ext, 0, 5));
+ }
+ if (ext & 0x800) {
+ ofs = DREG(ext, 6);
+ } else {
+ ofs = tcg_const_i32(extract32(ext, 6, 5));
+ }
+
+ switch (insn & 0x0f00) {
+ case 0x0a00: /* bfchg */
+ gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ case 0x0c00: /* bfclr */
+ gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ case 0x0d00: /* bfffo */
+ t64 = tcg_temp_new_i64();
+ gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
+ tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
+ tcg_temp_free_i64(t64);
+ break;
+ case 0x0e00: /* bfset */
+ gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ case 0x0800: /* bftst */
+ gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ if (!(ext & 0x20)) {
+ tcg_temp_free(len);
+ }
+ if (!(ext & 0x800)) {
+ tcg_temp_free(ofs);
+ }
+}
+
+DISAS_INSN(bfins_reg)
+{
+ int ext = read_im16(env, s);
+ TCGv dst = DREG(insn, 0);
+ TCGv src = DREG(ext, 12);
+ int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
+ int ofs = extract32(ext, 6, 5); /* big bit-endian */
+ int pos = 32 - ofs - len; /* little bit-endian */
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+
+ if (ext & 0x20) {
+ /* Variable width */
+ tcg_gen_neg_i32(tmp, DREG(ext, 0));
+ tcg_gen_andi_i32(tmp, tmp, 31);
+ tcg_gen_shl_i32(QREG_CC_N, src, tmp);
+ } else {
+ /* Immediate width */
+ tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ /* Immediate width and offset */
+ if ((ext & 0x820) == 0) {
+ /* Check for suitability for deposit. */
+ if (pos >= 0) {
+ tcg_gen_deposit_i32(dst, dst, src, pos, len);
+ } else {
+ uint32_t maski = -2U << (len - 1);
+ uint32_t roti = (ofs + len) & 31;
+ tcg_gen_andi_i32(tmp, src, ~maski);
+ tcg_gen_rotri_i32(tmp, tmp, roti);
+ tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
+ tcg_gen_or_i32(dst, dst, tmp);
+ }
+ } else {
+ TCGv mask = tcg_temp_new();
+ TCGv rot = tcg_temp_new();
+
+ if (ext & 0x20) {
+ /* Variable width */
+ tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
+ tcg_gen_andi_i32(rot, rot, 31);
+ tcg_gen_movi_i32(mask, -2);
+ tcg_gen_shl_i32(mask, mask, rot);
+ tcg_gen_mov_i32(rot, DREG(ext, 0));
+ tcg_gen_andc_i32(tmp, src, mask);
+ } else {
+ /* Immediate width (variable offset) */
+ uint32_t maski = -2U << (len - 1);
+ tcg_gen_andi_i32(tmp, src, ~maski);
+ tcg_gen_movi_i32(mask, maski);
+ tcg_gen_movi_i32(rot, len & 31);
+ }
+ if (ext & 0x800) {
+ /* Variable offset */
+ tcg_gen_add_i32(rot, rot, DREG(ext, 6));
+ } else {
+ /* Immediate offset (variable width) */
+ tcg_gen_addi_i32(rot, rot, ofs);
+ }
+ tcg_gen_andi_i32(rot, rot, 31);
+ tcg_gen_rotr_i32(mask, mask, rot);
+ tcg_gen_rotr_i32(tmp, tmp, rot);
+ tcg_gen_and_i32(dst, dst, mask);
+ tcg_gen_or_i32(dst, dst, tmp);
+
+ tcg_temp_free(rot);
+ tcg_temp_free(mask);
+ }
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(bfins_mem)
+{
+ int ext = read_im16(env, s);
+ TCGv src = DREG(ext, 12);
+ TCGv addr, len, ofs;
+
+ addr = gen_lea(env, s, insn, OS_UNSIZED);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x20) {
+ len = DREG(ext, 0);
+ } else {
+ len = tcg_const_i32(extract32(ext, 0, 5));
+ }
+ if (ext & 0x800) {
+ ofs = DREG(ext, 6);
+ } else {
+ ofs = tcg_const_i32(extract32(ext, 6, 5));
+ }
+
+ gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
+ set_cc_op(s, CC_OP_LOGIC);
+
+ if (!(ext & 0x20)) {
+ tcg_temp_free(len);
+ }
+ if (!(ext & 0x800)) {
+ tcg_temp_free(ofs);
+ }
+}
+
+DISAS_INSN(ff1)
+{
+ TCGv reg;
+ reg = DREG(insn, 0);
+ gen_logic_cc(s, reg, OS_LONG);
+ gen_helper_ff1(reg, reg);
+}
+
+DISAS_INSN(chk)
+{
+ TCGv src, reg;
+ int opsize;
+
+ switch ((insn >> 7) & 3) {
+ case 3:
+ opsize = OS_WORD;
+ break;
+ case 2:
+ if (m68k_feature(env, M68K_FEATURE_CHK2)) {
+ opsize = OS_LONG;
+ break;
+ }
+ /* fallthru */
+ default:
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+ SRC_EA(env, src, opsize, 1, NULL);
+ reg = gen_extend(s, DREG(insn, 9), opsize, 1);
+
+ gen_flush_flags(s);
+ gen_helper_chk(cpu_env, reg, src);
+}
+
+DISAS_INSN(chk2)
+{
+ uint16_t ext;
+ TCGv addr1, addr2, bound1, bound2, reg;
+ int opsize;
+
+ switch ((insn >> 9) & 3) {
+ case 0:
+ opsize = OS_BYTE;
+ break;
+ case 1:
+ opsize = OS_WORD;
+ break;
+ case 2:
+ opsize = OS_LONG;
+ break;
+ default:
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+
+ ext = read_im16(env, s);
+ if ((ext & 0x0800) == 0) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+
+ addr1 = gen_lea(env, s, insn, OS_UNSIZED);
+ addr2 = tcg_temp_new();
+ tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
+
+ bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
+ tcg_temp_free(addr1);
+ bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
+ tcg_temp_free(addr2);
+
+ reg = tcg_temp_new();
+ if (ext & 0x8000) {
+ tcg_gen_mov_i32(reg, AREG(ext, 12));
+ } else {
+ gen_ext(reg, DREG(ext, 12), opsize, 1);
+ }
+
+ gen_flush_flags(s);
+ gen_helper_chk2(cpu_env, reg, bound1, bound2);
+ tcg_temp_free(reg);
+ tcg_temp_free(bound1);
+ tcg_temp_free(bound2);
+}
+
+static void m68k_copy_line(TCGv dst, TCGv src, int index)
+{
+ TCGv addr;
+ TCGv_i64 t0, t1;
+
+ addr = tcg_temp_new();
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i32(addr, src, ~15);
+ tcg_gen_qemu_ld64(t0, addr, index);
+ tcg_gen_addi_i32(addr, addr, 8);
+ tcg_gen_qemu_ld64(t1, addr, index);
+
+ tcg_gen_andi_i32(addr, dst, ~15);
+ tcg_gen_qemu_st64(t0, addr, index);
+ tcg_gen_addi_i32(addr, addr, 8);
+ tcg_gen_qemu_st64(t1, addr, index);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(addr);
+}
+
+DISAS_INSN(move16_reg)
+{
+ int index = IS_USER(s);
+ TCGv tmp;
+ uint16_t ext;
+
+ ext = read_im16(env, s);
+ if ((ext & (1 << 15)) == 0) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ }
+
+ m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
+
+ /* Ax can be Ay, so save Ay before incrementing Ax */
+ tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, AREG(ext, 12));
+ tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
+ tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(move16_mem)
+{
+ int index = IS_USER(s);
+ TCGv reg, addr;
+
+ reg = AREG(insn, 0);
+ addr = tcg_const_i32(read_im32(env, s));
+
+ if ((insn >> 3) & 1) {
+ /* MOVE16 (xxx).L, (Ay) */
+ m68k_copy_line(reg, addr, index);
+ } else {
+ /* MOVE16 (Ay), (xxx).L */
+ m68k_copy_line(addr, reg, index);
+ }
+
+ tcg_temp_free(addr);
+
+ if (((insn >> 3) & 2) == 0) {
+ /* (Ay)+ */
+ tcg_gen_addi_i32(reg, reg, 16);
+ }
+}
+
+DISAS_INSN(strldsr)
+{
+ uint16_t ext;
+ uint32_t addr;
+
+ addr = s->pc - 2;
+ ext = read_im16(env, s);
+ if (ext != 0x46FC) {
+ gen_exception(s, addr, EXCP_ILLEGAL);
+ return;
+ }
+ ext = read_im16(env, s);
+ if (IS_USER(s) || (ext & SR_S) == 0) {
+ gen_exception(s, addr, EXCP_PRIVILEGE);
+ return;
+ }
+ gen_push(s, gen_get_sr(s));
+ gen_set_sr_im(s, ext, 0);
+}
+
+DISAS_INSN(move_from_sr)
+{
+ TCGv sr;
+
+ if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ sr = gen_get_sr(s);
+ DEST_EA(env, insn, OS_WORD, sr, NULL);
+}
+
+#if defined(CONFIG_SOFTMMU)
+DISAS_INSN(moves)
+{
+ int opsize;
+ uint16_t ext;
+ TCGv reg;
+ TCGv addr;
+ int extend;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ opsize = insn_opsize(insn);
+
+ if (ext & 0x8000) {
+ /* address register */
+ reg = AREG(ext, 12);
+ extend = 1;
+ } else {
+ /* data register */
+ reg = DREG(ext, 12);
+ extend = 0;
+ }
+
+ addr = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x0800) {
+ /* from reg to ea */
+ gen_store(s, opsize, addr, reg, DFC_INDEX(s));
+ } else {
+ /* from ea to reg */
+ TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
+ if (extend) {
+ gen_ext(reg, tmp, opsize, 1);
+ } else {
+ gen_partset_reg(opsize, reg, tmp);
+ }
+ tcg_temp_free(tmp);
+ }
+ switch (extract32(insn, 3, 3)) {
+ case 3: /* Indirect postincrement. */
+ tcg_gen_addi_i32(AREG(insn, 0), addr,
+ REG(insn, 0) == 7 && opsize == OS_BYTE
+ ? 2
+ : opsize_bytes(opsize));
+ break;
+ case 4: /* Indirect predecrememnt. */
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ break;
+ }
+}
+
+DISAS_INSN(move_to_sr)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ gen_move_to_sr(env, s, insn, false);
+ gen_exit_tb(s);
+}
+
+DISAS_INSN(move_from_usp)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
+ offsetof(CPUM68KState, sp[M68K_USP]));
+}
+
+DISAS_INSN(move_to_usp)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ tcg_gen_st_i32(AREG(insn, 0), cpu_env,
+ offsetof(CPUM68KState, sp[M68K_USP]));
+}
+
+DISAS_INSN(halt)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ gen_exception(s, s->pc, EXCP_HALT_INSN);
+}
+
+DISAS_INSN(stop)
+{
+ uint16_t ext;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ gen_set_sr_im(s, ext, 0);
+ tcg_gen_movi_i32(cpu_halted, 1);
+ gen_exception(s, s->pc, EXCP_HLT);
+}
+
+DISAS_INSN(rte)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ gen_exception(s, s->base.pc_next, EXCP_RTE);
+}
+
+DISAS_INSN(cf_movec)
+{
+ uint16_t ext;
+ TCGv reg;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ if (ext & 0x8000) {
+ reg = AREG(ext, 12);
+ } else {
+ reg = DREG(ext, 12);
+ }
+ gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_exit_tb(s);
+}
+
+DISAS_INSN(m68k_movec)
+{
+ uint16_t ext;
+ TCGv reg;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ if (ext & 0x8000) {
+ reg = AREG(ext, 12);
+ } else {
+ reg = DREG(ext, 12);
+ }
+ if (insn & 1) {
+ gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ } else {
+ gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
+ }
+ gen_exit_tb(s);
+}
+
+DISAS_INSN(intouch)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ /* ICache fetch. Implement as no-op. */
+}
+
+DISAS_INSN(cpushl)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ /* Cache push/invalidate. Implement as no-op. */
+}
+
+DISAS_INSN(cpush)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ /* Cache push/invalidate. Implement as no-op. */
+}
+
+DISAS_INSN(cinv)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ /* Invalidate cache line. Implement as no-op. */
+}
+
+#if defined(CONFIG_SOFTMMU)
+DISAS_INSN(pflush)
+{
+ TCGv opmode;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ opmode = tcg_const_i32((insn >> 3) & 3);
+ gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
+ tcg_temp_free(opmode);
+}
+
+DISAS_INSN(ptest)
+{
+ TCGv is_read;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ is_read = tcg_const_i32((insn >> 5) & 1);
+ gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
+ tcg_temp_free(is_read);
+}
+#endif
+
+DISAS_INSN(wddata)
+{
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+}
+
+DISAS_INSN(wdebug)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ /* TODO: Implement wdebug. */
+ cpu_abort(env_cpu(env), "WDEBUG not implemented");
+}
+#endif
+
+DISAS_INSN(trap)
+{
+ gen_exception(s, s->base.pc_next, EXCP_TRAP0 + (insn & 0xf));
+}
+
+static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
+{
+ switch (reg) {
+ case M68K_FPIAR:
+ tcg_gen_movi_i32(res, 0);
+ break;
+ case M68K_FPSR:
+ tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr));
+ break;
+ case M68K_FPCR:
+ tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr));
+ break;
+ }
+}
+
+static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
+{
+ switch (reg) {
+ case M68K_FPIAR:
+ break;
+ case M68K_FPSR:
+ tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr));
+ break;
+ case M68K_FPCR:
+ gen_helper_set_fpcr(cpu_env, val);
+ break;
+ }
+}
+
+static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
+{
+ int index = IS_USER(s);
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+ gen_load_fcr(s, tmp, reg);
+ tcg_gen_qemu_st32(tmp, addr, index);
+ tcg_temp_free(tmp);
+}
+
+static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
+{
+ int index = IS_USER(s);
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ gen_store_fcr(s, tmp, reg);
+ tcg_temp_free(tmp);
+}
+
+
+static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
+ uint32_t insn, uint32_t ext)
+{
+ int mask = (ext >> 10) & 7;
+ int is_write = (ext >> 13) & 1;
+ int mode = extract32(insn, 3, 3);
+ int i;
+ TCGv addr, tmp;
+
+ switch (mode) {
+ case 0: /* Dn */
+ if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+ if (is_write) {
+ gen_load_fcr(s, DREG(insn, 0), mask);
+ } else {
+ gen_store_fcr(s, DREG(insn, 0), mask);
+ }
+ return;
+ case 1: /* An, only with FPIAR */
+ if (mask != M68K_FPIAR) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+ if (is_write) {
+ gen_load_fcr(s, AREG(insn, 0), mask);
+ } else {
+ gen_store_fcr(s, AREG(insn, 0), mask);
+ }
+ return;
+ case 7: /* Immediate */
+ if (REG(insn, 0) == 4) {
+ if (is_write ||
+ (mask != M68K_FPIAR && mask != M68K_FPSR &&
+ mask != M68K_FPCR)) {
+ gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
+ return;
+ }
+ tmp = tcg_const_i32(read_im32(env, s));
+ gen_store_fcr(s, tmp, mask);
+ tcg_temp_free(tmp);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ addr = tcg_temp_new();
+ tcg_gen_mov_i32(addr, tmp);
+
+ /*
+ * mask:
+ *
+ * 0b100 Floating-Point Control Register
+ * 0b010 Floating-Point Status Register
+ * 0b001 Floating-Point Instruction Address Register
+ *
+ */
+
+ if (is_write && mode == 4) {
+ for (i = 2; i >= 0; i--, mask >>= 1) {
+ if (mask & 1) {
+ gen_qemu_store_fcr(s, addr, 1 << i);
+ if (mask != 1) {
+ tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
+ }
+ }
+ }
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ } else {
+ for (i = 0; i < 3; i++, mask >>= 1) {
+ if (mask & 1) {
+ if (is_write) {
+ gen_qemu_store_fcr(s, addr, 1 << i);
+ } else {
+ gen_qemu_load_fcr(s, addr, 1 << i);
+ }
+ if (mask != 1 || mode == 3) {
+ tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
+ }
+ }
+ }
+ if (mode == 3) {
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ }
+ }
+ tcg_temp_free_i32(addr);
+}
+
+static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
+ uint32_t insn, uint32_t ext)
+{
+ int opsize;
+ TCGv addr, tmp;
+ int mode = (ext >> 11) & 0x3;
+ int is_load = ((ext & 0x2000) == 0);
+
+ if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
+ opsize = OS_EXTENDED;
+ } else {
+ opsize = OS_DOUBLE; /* FIXME */
+ }
+
+ addr = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ tmp = tcg_temp_new();
+ if (mode & 0x1) {
+ /* Dynamic register list */
+ tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
+ } else {
+ /* Static register list */
+ tcg_gen_movi_i32(tmp, ext & 0xff);
+ }
+
+ if (!is_load && (mode & 2) == 0) {
+ /*
+ * predecrement addressing mode
+ * only available to store register to memory
+ */
+ if (opsize == OS_EXTENDED) {
+ gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp);
+ } else {
+ gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp);
+ }
+ } else {
+ /* postincrement addressing mode */
+ if (opsize == OS_EXTENDED) {
+ if (is_load) {
+ gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp);
+ } else {
+ gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp);
+ }
+ } else {
+ if (is_load) {
+ gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp);
+ } else {
+ gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp);
+ }
+ }
+ }
+ if ((insn & 070) == 030 || (insn & 070) == 040) {
+ tcg_gen_mov_i32(AREG(insn, 0), tmp);
+ }
+ tcg_temp_free(tmp);
+}
+
+/*
+ * ??? FP exceptions are not implemented. Most exceptions are deferred until
+ * immediately before the next FP instruction is executed.
+ */
+DISAS_INSN(fpu)
+{
+ uint16_t ext;
+ int opmode;
+ int opsize;
+ TCGv_ptr cpu_src, cpu_dest;
+
+ ext = read_im16(env, s);
+ opmode = ext & 0x7f;
+ switch ((ext >> 13) & 7) {
+ case 0:
+ break;
+ case 1:
+ goto undef;
+ case 2:
+ if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
+ /* fmovecr */
+ TCGv rom_offset = tcg_const_i32(opmode);
+ cpu_dest = gen_fp_ptr(REG(ext, 7));
+ gen_helper_fconst(cpu_env, cpu_dest, rom_offset);
+ tcg_temp_free_ptr(cpu_dest);
+ tcg_temp_free(rom_offset);
+ return;
+ }
+ break;
+ case 3: /* fmove out */
+ cpu_src = gen_fp_ptr(REG(ext, 7));
+ opsize = ext_opsize(ext, 10);
+ if (gen_ea_fp(env, s, insn, opsize, cpu_src,
+ EA_STORE, IS_USER(s)) == -1) {
+ gen_addr_fault(s);
+ }
+ gen_helper_ftst(cpu_env, cpu_src);
+ tcg_temp_free_ptr(cpu_src);
+ return;
+ case 4: /* fmove to control register. */
+ case 5: /* fmove from control register. */
+ gen_op_fmove_fcr(env, s, insn, ext);
+ return;
+ case 6: /* fmovem */
+ case 7:
+ if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
+ goto undef;
+ }
+ gen_op_fmovem(env, s, insn, ext);
+ return;
+ }
+ if (ext & (1 << 14)) {
+ /* Source effective address. */
+ opsize = ext_opsize(ext, 10);
+ cpu_src = gen_fp_result_ptr();
+ if (gen_ea_fp(env, s, insn, opsize, cpu_src,
+ EA_LOADS, IS_USER(s)) == -1) {
+ gen_addr_fault(s);
+ return;
+ }
+ } else {
+ /* Source register. */
+ opsize = OS_EXTENDED;
+ cpu_src = gen_fp_ptr(REG(ext, 10));
+ }
+ cpu_dest = gen_fp_ptr(REG(ext, 7));
+ switch (opmode) {
+ case 0: /* fmove */
+ gen_fp_move(cpu_dest, cpu_src);
+ break;
+ case 0x40: /* fsmove */
+ gen_helper_fsround(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x44: /* fdmove */
+ gen_helper_fdround(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 1: /* fint */
+ gen_helper_firound(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 2: /* fsinh */
+ gen_helper_fsinh(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 3: /* fintrz */
+ gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 4: /* fsqrt */
+ gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x41: /* fssqrt */
+ gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x45: /* fdsqrt */
+ gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x06: /* flognp1 */
+ gen_helper_flognp1(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x08: /* fetoxm1 */
+ gen_helper_fetoxm1(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x09: /* ftanh */
+ gen_helper_ftanh(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x0a: /* fatan */
+ gen_helper_fatan(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x0c: /* fasin */
+ gen_helper_fasin(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x0d: /* fatanh */
+ gen_helper_fatanh(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x0e: /* fsin */
+ gen_helper_fsin(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x0f: /* ftan */
+ gen_helper_ftan(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x10: /* fetox */
+ gen_helper_fetox(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x11: /* ftwotox */
+ gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x12: /* ftentox */
+ gen_helper_ftentox(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x14: /* flogn */
+ gen_helper_flogn(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x15: /* flog10 */
+ gen_helper_flog10(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x16: /* flog2 */
+ gen_helper_flog2(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x18: /* fabs */
+ gen_helper_fabs(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x58: /* fsabs */
+ gen_helper_fsabs(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x5c: /* fdabs */
+ gen_helper_fdabs(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x19: /* fcosh */
+ gen_helper_fcosh(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x1a: /* fneg */
+ gen_helper_fneg(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x5a: /* fsneg */
+ gen_helper_fsneg(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x5e: /* fdneg */
+ gen_helper_fdneg(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x1c: /* facos */
+ gen_helper_facos(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x1d: /* fcos */
+ gen_helper_fcos(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x1e: /* fgetexp */
+ gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x1f: /* fgetman */
+ gen_helper_fgetman(cpu_env, cpu_dest, cpu_src);
+ break;
+ case 0x20: /* fdiv */
+ gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x60: /* fsdiv */
+ gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x64: /* fddiv */
+ gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x21: /* fmod */
+ gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x22: /* fadd */
+ gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x62: /* fsadd */
+ gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x66: /* fdadd */
+ gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x23: /* fmul */
+ gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x63: /* fsmul */
+ gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x67: /* fdmul */
+ gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x24: /* fsgldiv */
+ gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x25: /* frem */
+ gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x26: /* fscale */
+ gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x27: /* fsglmul */
+ gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x28: /* fsub */
+ gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x68: /* fssub */
+ gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x6c: /* fdsub */
+ gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest);
+ break;
+ case 0x30: case 0x31: case 0x32:
+ case 0x33: case 0x34: case 0x35:
+ case 0x36: case 0x37: {
+ TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
+ gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src);
+ tcg_temp_free_ptr(cpu_dest2);
+ }
+ break;
+ case 0x38: /* fcmp */
+ gen_helper_fcmp(cpu_env, cpu_src, cpu_dest);
+ return;
+ case 0x3a: /* ftst */
+ gen_helper_ftst(cpu_env, cpu_src);
+ return;
+ default:
+ goto undef;
+ }
+ tcg_temp_free_ptr(cpu_src);
+ gen_helper_ftst(cpu_env, cpu_dest);
+ tcg_temp_free_ptr(cpu_dest);
+ return;
+undef:
+ /* FIXME: Is this right for offset addressing modes? */
+ s->pc -= 2;
+ disas_undef_fpu(env, s, insn);
+}
+
+static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
+{
+ TCGv fpsr;
+
+ c->g1 = 1;
+ c->v2 = tcg_const_i32(0);
+ c->g2 = 0;
+ /* TODO: Raise BSUN exception. */
+ fpsr = tcg_temp_new();
+ gen_load_fcr(s, fpsr, M68K_FPSR);
+ switch (cond) {
+ case 0: /* False */
+ case 16: /* Signaling False */
+ c->v1 = c->v2;
+ c->tcond = TCG_COND_NEVER;
+ break;
+ case 1: /* EQual Z */
+ case 17: /* Signaling EQual Z */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 2: /* Ordered Greater Than !(A || Z || N) */
+ case 18: /* Greater Than !(A || Z || N) */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr,
+ FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
+ c->tcond = TCG_COND_EQ;
+ break;
+ case 3: /* Ordered Greater than or Equal Z || !(A || N) */
+ case 19: /* Greater than or Equal Z || !(A || N) */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
+ tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
+ tcg_gen_or_i32(c->v1, c->v1, fpsr);
+ tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 4: /* Ordered Less Than !(!N || A || Z); */
+ case 20: /* Less Than !(!N || A || Z); */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
+ tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
+ c->tcond = TCG_COND_EQ;
+ break;
+ case 5: /* Ordered Less than or Equal Z || (N && !A) */
+ case 21: /* Less than or Equal Z || (N && !A) */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
+ tcg_gen_andc_i32(c->v1, fpsr, c->v1);
+ tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 6: /* Ordered Greater or Less than !(A || Z) */
+ case 22: /* Greater or Less than !(A || Z) */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
+ c->tcond = TCG_COND_EQ;
+ break;
+ case 7: /* Ordered !A */
+ case 23: /* Greater, Less or Equal !A */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ c->tcond = TCG_COND_EQ;
+ break;
+ case 8: /* Unordered A */
+ case 24: /* Not Greater, Less or Equal A */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 9: /* Unordered or Equal A || Z */
+ case 25: /* Not Greater or Less then A || Z */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 10: /* Unordered or Greater Than A || !(N || Z)) */
+ case 26: /* Not Less or Equal A || !(N || Z)) */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
+ tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
+ tcg_gen_or_i32(c->v1, c->v1, fpsr);
+ tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 11: /* Unordered or Greater or Equal A || Z || !N */
+ case 27: /* Not Less Than A || Z || !N */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
+ tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 12: /* Unordered or Less Than A || (N && !Z) */
+ case 28: /* Not Greater than or Equal A || (N && !Z) */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
+ tcg_gen_andc_i32(c->v1, fpsr, c->v1);
+ tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 13: /* Unordered or Less or Equal A || Z || N */
+ case 29: /* Not Greater Than A || Z || N */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
+ c->tcond = TCG_COND_NE;
+ break;
+ case 14: /* Not Equal !Z */
+ case 30: /* Signaling Not Equal !Z */
+ c->v1 = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
+ c->tcond = TCG_COND_EQ;
+ break;
+ case 15: /* True */
+ case 31: /* Signaling True */
+ c->v1 = c->v2;
+ c->tcond = TCG_COND_ALWAYS;
+ break;
+ }
+ tcg_temp_free(fpsr);
+}
+
+static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
+{
+ DisasCompare c;
+
+ gen_fcc_cond(&c, s, cond);
+ update_cc_op(s);
+ tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
+ free_cond(&c);
+}
+
+DISAS_INSN(fbcc)
+{
+ uint32_t offset;
+ uint32_t base;
+ TCGLabel *l1;
+
+ base = s->pc;
+ offset = (int16_t)read_im16(env, s);
+ if (insn & (1 << 6)) {
+ offset = (offset << 16) | read_im16(env, s);
+ }
+
+ l1 = gen_new_label();
+ update_cc_op(s);
+ gen_fjmpcc(s, insn & 0x3f, l1);
+ gen_jmp_tb(s, 0, s->pc);
+ gen_set_label(l1);
+ gen_jmp_tb(s, 1, base + offset);
+}
+
+DISAS_INSN(fscc)
+{
+ DisasCompare c;
+ int cond;
+ TCGv tmp;
+ uint16_t ext;
+
+ ext = read_im16(env, s);
+ cond = ext & 0x3f;
+ gen_fcc_cond(&c, s, cond);
+
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
+ free_cond(&c);
+
+ tcg_gen_neg_i32(tmp, tmp);
+ DEST_EA(env, insn, OS_BYTE, tmp, NULL);
+ tcg_temp_free(tmp);
+}
+
+#if defined(CONFIG_SOFTMMU)
+DISAS_INSN(frestore)
+{
+ TCGv addr;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+ if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
+ SRC_EA(env, addr, OS_LONG, 0, NULL);
+ /* FIXME: check the state frame */
+ } else {
+ disas_undef(env, s, insn);
+ }
+}
+
+DISAS_INSN(fsave)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
+ return;
+ }
+
+ if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
+ /* always write IDLE */
+ TCGv idle = tcg_const_i32(0x41000000);
+ DEST_EA(env, insn, OS_LONG, idle, NULL);
+ tcg_temp_free(idle);
+ } else {
+ disas_undef(env, s, insn);
+ }
+}
+#endif
+
+static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
+{
+ TCGv tmp = tcg_temp_new();
+ if (s->env->macsr & MACSR_FI) {
+ if (upper)
+ tcg_gen_andi_i32(tmp, val, 0xffff0000);
+ else
+ tcg_gen_shli_i32(tmp, val, 16);
+ } else if (s->env->macsr & MACSR_SU) {
+ if (upper)
+ tcg_gen_sari_i32(tmp, val, 16);
+ else
+ tcg_gen_ext16s_i32(tmp, val);
+ } else {
+ if (upper)
+ tcg_gen_shri_i32(tmp, val, 16);
+ else
+ tcg_gen_ext16u_i32(tmp, val);
+ }
+ return tmp;
+}
+
+static void gen_mac_clear_flags(void)
+{
+ tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
+ ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
+}
+
+DISAS_INSN(mac)
+{
+ TCGv rx;
+ TCGv ry;
+ uint16_t ext;
+ int acc;
+ TCGv tmp;
+ TCGv addr;
+ TCGv loadval;
+ int dual;
+ TCGv saved_flags;
+
+ if (!s->done_mac) {
+ s->mactmp = tcg_temp_new_i64();
+ s->done_mac = 1;
+ }
+
+ ext = read_im16(env, s);
+
+ acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
+ dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
+ if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ if (insn & 0x30) {
+ /* MAC with load. */
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ addr = tcg_temp_new();
+ tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
+ /*
+ * Load the value now to ensure correct exception behavior.
+ * Perform writeback after reading the MAC inputs.
+ */
+ loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
+
+ acc ^= 1;
+ rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
+ ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
+ } else {
+ loadval = addr = NULL_QREG;
+ rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
+ ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ }
+
+ gen_mac_clear_flags();
+#if 0
+ l1 = -1;
+ /* Disabled because conditional branches clobber temporary vars. */
+ if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
+ /* Skip the multiply if we know we will ignore it. */
+ l1 = gen_new_label();
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+#endif
+
+ if ((ext & 0x0800) == 0) {
+ /* Word. */
+ rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
+ ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
+ }
+ if (s->env->macsr & MACSR_FI) {
+ gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
+ } else {
+ if (s->env->macsr & MACSR_SU)
+ gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
+ else
+ gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
+ switch ((ext >> 9) & 3) {
+ case 1:
+ tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
+ break;
+ case 3:
+ tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
+ break;
+ }
+ }
+
+ if (dual) {
+ /* Save the overflow flag from the multiply. */
+ saved_flags = tcg_temp_new();
+ tcg_gen_mov_i32(saved_flags, QREG_MACSR);
+ } else {
+ saved_flags = NULL_QREG;
+ }
+
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
+ /* Skip the accumulate if the value is already saturated. */
+ l1 = gen_new_label();
+ tmp = tcg_temp_new();
+ gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+#endif
+
+ if (insn & 0x100)
+ tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
+ else
+ tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
+
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ else if (s->env->macsr & MACSR_SU)
+ gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ else
+ gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if (l1 != -1)
+ gen_set_label(l1);
+#endif
+
+ if (dual) {
+ /* Dual accumulate variant. */
+ acc = (ext >> 2) & 3;
+ /* Restore the overflow flag from the multiplier. */
+ tcg_gen_mov_i32(QREG_MACSR, saved_flags);
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if ((s->env->macsr & MACSR_OMC) != 0) {
+ /* Skip the accumulate if the value is already saturated. */
+ l1 = gen_new_label();
+ tmp = tcg_temp_new();
+ gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+#endif
+ if (ext & 2)
+ tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
+ else
+ tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ else if (s->env->macsr & MACSR_SU)
+ gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ else
+ gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if (l1 != -1)
+ gen_set_label(l1);
+#endif
+ }
+ gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
+
+ if (insn & 0x30) {
+ TCGv rw;
+ rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
+ tcg_gen_mov_i32(rw, loadval);
+ /*
+ * FIXME: Should address writeback happen with the masked or
+ * unmasked value?
+ */
+ switch ((insn >> 3) & 7) {
+ case 3: /* Post-increment. */
+ tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
+ break;
+ case 4: /* Pre-decrement. */
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ }
+ tcg_temp_free(loadval);
+ }
+}
+
+DISAS_INSN(from_mac)
+{
+ TCGv rx;
+ TCGv_i64 acc;
+ int accnum;
+
+ rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ accnum = (insn >> 9) & 3;
+ acc = MACREG(accnum);
+ if (s->env->macsr & MACSR_FI) {
+ gen_helper_get_macf(rx, cpu_env, acc);
+ } else if ((s->env->macsr & MACSR_OMC) == 0) {
+ tcg_gen_extrl_i64_i32(rx, acc);
+ } else if (s->env->macsr & MACSR_SU) {
+ gen_helper_get_macs(rx, acc);
+ } else {
+ gen_helper_get_macu(rx, acc);
+ }
+ if (insn & 0x40) {
+ tcg_gen_movi_i64(acc, 0);
+ tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
+ }
+}
+
+DISAS_INSN(move_mac)
+{
+ /* FIXME: This can be done without a helper. */
+ int src;
+ TCGv dest;
+ src = insn & 3;
+ dest = tcg_const_i32((insn >> 9) & 3);
+ gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
+ gen_mac_clear_flags();
+ gen_helper_mac_set_flags(cpu_env, dest);
+}
+
+DISAS_INSN(from_macsr)
+{
+ TCGv reg;
+
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ tcg_gen_mov_i32(reg, QREG_MACSR);
+}
+
+DISAS_INSN(from_mask)
+{
+ TCGv reg;
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ tcg_gen_mov_i32(reg, QREG_MAC_MASK);
+}
+
+DISAS_INSN(from_mext)
+{
+ TCGv reg;
+ TCGv acc;
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_get_mac_extf(reg, cpu_env, acc);
+ else
+ gen_helper_get_mac_exti(reg, cpu_env, acc);
+}
+
+DISAS_INSN(macsr_to_ccr)
+{
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
+ gen_helper_set_sr(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(to_mac)
+{
+ TCGv_i64 acc;
+ TCGv val;
+ int accnum;
+ accnum = (insn >> 9) & 3;
+ acc = MACREG(accnum);
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ if (s->env->macsr & MACSR_FI) {
+ tcg_gen_ext_i32_i64(acc, val);
+ tcg_gen_shli_i64(acc, acc, 8);
+ } else if (s->env->macsr & MACSR_SU) {
+ tcg_gen_ext_i32_i64(acc, val);
+ } else {
+ tcg_gen_extu_i32_i64(acc, val);
+ }
+ tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
+ gen_mac_clear_flags();
+ gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
+}
+
+DISAS_INSN(to_macsr)
+{
+ TCGv val;
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ gen_helper_set_macsr(cpu_env, val);
+ gen_exit_tb(s);
+}
+
+DISAS_INSN(to_mask)
+{
+ TCGv val;
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
+}
+
+DISAS_INSN(to_mext)
+{
+ TCGv val;
+ TCGv acc;
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_set_mac_extf(cpu_env, val, acc);
+ else if (s->env->macsr & MACSR_SU)
+ gen_helper_set_mac_exts(cpu_env, val, acc);
+ else
+ gen_helper_set_mac_extu(cpu_env, val, acc);
+}
+
+static disas_proc opcode_table[65536];
+
+static void
+register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
+{
+ int i;
+ int from;
+ int to;
+
+ /* Sanity check. All set bits must be included in the mask. */
+ if (opcode & ~mask) {
+ fprintf(stderr,
+ "qemu internal error: bogus opcode definition %04x/%04x\n",
+ opcode, mask);
+ abort();
+ }
+ /*
+ * This could probably be cleverer. For now just optimize the case where
+ * the top bits are known.
+ */
+ /* Find the first zero bit in the mask. */
+ i = 0x8000;
+ while ((i & mask) != 0)
+ i >>= 1;
+ /* Iterate over all combinations of this and lower bits. */
+ if (i == 0)
+ i = 1;
+ else
+ i <<= 1;
+ from = opcode & ~(i - 1);
+ to = from + i;
+ for (i = from; i < to; i++) {
+ if ((i & mask) == opcode)
+ opcode_table[i] = proc;
+ }
+}
+
+/*
+ * Register m68k opcode handlers. Order is important.
+ * Later insn override earlier ones.
+ */
+void register_m68k_insns (CPUM68KState *env)
+{
+ /*
+ * Build the opcode table only once to avoid
+ * multithreading issues.
+ */
+ if (opcode_table[0] != NULL) {
+ return;
+ }
+
+ /*
+ * use BASE() for instruction available
+ * for CF_ISA_A and M68000.
+ */
+#define BASE(name, opcode, mask) \
+ register_opcode(disas_##name, 0x##opcode, 0x##mask)
+#define INSN(name, opcode, mask, feature) do { \
+ if (m68k_feature(env, M68K_FEATURE_##feature)) \
+ BASE(name, opcode, mask); \
+ } while(0)
+ BASE(undef, 0000, 0000);
+ INSN(arith_im, 0080, fff8, CF_ISA_A);
+ INSN(arith_im, 0000, ff00, M68000);
+ INSN(chk2, 00c0, f9c0, CHK2);
+ INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
+ BASE(bitop_reg, 0100, f1c0);
+ BASE(bitop_reg, 0140, f1c0);
+ BASE(bitop_reg, 0180, f1c0);
+ BASE(bitop_reg, 01c0, f1c0);
+ INSN(movep, 0108, f138, MOVEP);
+ INSN(arith_im, 0280, fff8, CF_ISA_A);
+ INSN(arith_im, 0200, ff00, M68000);
+ INSN(undef, 02c0, ffc0, M68000);
+ INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
+ INSN(arith_im, 0480, fff8, CF_ISA_A);
+ INSN(arith_im, 0400, ff00, M68000);
+ INSN(undef, 04c0, ffc0, M68000);
+ INSN(arith_im, 0600, ff00, M68000);
+ INSN(undef, 06c0, ffc0, M68000);
+ INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
+ INSN(arith_im, 0680, fff8, CF_ISA_A);
+ INSN(arith_im, 0c00, ff38, CF_ISA_A);
+ INSN(arith_im, 0c00, ff00, M68000);
+ BASE(bitop_im, 0800, ffc0);
+ BASE(bitop_im, 0840, ffc0);
+ BASE(bitop_im, 0880, ffc0);
+ BASE(bitop_im, 08c0, ffc0);
+ INSN(arith_im, 0a80, fff8, CF_ISA_A);
+ INSN(arith_im, 0a00, ff00, M68000);
+#if defined(CONFIG_SOFTMMU)
+ INSN(moves, 0e00, ff00, M68000);
+#endif
+ INSN(cas, 0ac0, ffc0, CAS);
+ INSN(cas, 0cc0, ffc0, CAS);
+ INSN(cas, 0ec0, ffc0, CAS);
+ INSN(cas2w, 0cfc, ffff, CAS);
+ INSN(cas2l, 0efc, ffff, CAS);
+ BASE(move, 1000, f000);
+ BASE(move, 2000, f000);
+ BASE(move, 3000, f000);
+ INSN(chk, 4000, f040, M68000);
+ INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
+ INSN(negx, 4080, fff8, CF_ISA_A);
+ INSN(negx, 4000, ff00, M68000);
+ INSN(undef, 40c0, ffc0, M68000);
+ INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
+ INSN(move_from_sr, 40c0, ffc0, M68000);
+ BASE(lea, 41c0, f1c0);
+ BASE(clr, 4200, ff00);
+ BASE(undef, 42c0, ffc0);
+ INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
+ INSN(move_from_ccr, 42c0, ffc0, M68000);
+ INSN(neg, 4480, fff8, CF_ISA_A);
+ INSN(neg, 4400, ff00, M68000);
+ INSN(undef, 44c0, ffc0, M68000);
+ BASE(move_to_ccr, 44c0, ffc0);
+ INSN(not, 4680, fff8, CF_ISA_A);
+ INSN(not, 4600, ff00, M68000);
+#if defined(CONFIG_SOFTMMU)
+ BASE(move_to_sr, 46c0, ffc0);
+#endif
+ INSN(nbcd, 4800, ffc0, M68000);
+ INSN(linkl, 4808, fff8, M68000);
+ BASE(pea, 4840, ffc0);
+ BASE(swap, 4840, fff8);
+ INSN(bkpt, 4848, fff8, BKPT);
+ INSN(movem, 48d0, fbf8, CF_ISA_A);
+ INSN(movem, 48e8, fbf8, CF_ISA_A);
+ INSN(movem, 4880, fb80, M68000);
+ BASE(ext, 4880, fff8);
+ BASE(ext, 48c0, fff8);
+ BASE(ext, 49c0, fff8);
+ BASE(tst, 4a00, ff00);
+ INSN(tas, 4ac0, ffc0, CF_ISA_B);
+ INSN(tas, 4ac0, ffc0, M68000);
+#if defined(CONFIG_SOFTMMU)
+ INSN(halt, 4ac8, ffff, CF_ISA_A);
+#endif
+ INSN(pulse, 4acc, ffff, CF_ISA_A);
+ BASE(illegal, 4afc, ffff);
+ INSN(mull, 4c00, ffc0, CF_ISA_A);
+ INSN(mull, 4c00, ffc0, LONG_MULDIV);
+ INSN(divl, 4c40, ffc0, CF_ISA_A);
+ INSN(divl, 4c40, ffc0, LONG_MULDIV);
+ INSN(sats, 4c80, fff8, CF_ISA_B);
+ BASE(trap, 4e40, fff0);
+ BASE(link, 4e50, fff8);
+ BASE(unlk, 4e58, fff8);
+#if defined(CONFIG_SOFTMMU)
+ INSN(move_to_usp, 4e60, fff8, USP);
+ INSN(move_from_usp, 4e68, fff8, USP);
+ INSN(reset, 4e70, ffff, M68000);
+ BASE(stop, 4e72, ffff);
+ BASE(rte, 4e73, ffff);
+ INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
+ INSN(m68k_movec, 4e7a, fffe, MOVEC);
+#endif
+ BASE(nop, 4e71, ffff);
+ INSN(rtd, 4e74, ffff, RTD);
+ BASE(rts, 4e75, ffff);
+ INSN(rtr, 4e77, ffff, M68000);
+ BASE(jump, 4e80, ffc0);
+ BASE(jump, 4ec0, ffc0);
+ INSN(addsubq, 5000, f080, M68000);
+ BASE(addsubq, 5080, f0c0);
+ INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
+ INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
+ INSN(dbcc, 50c8, f0f8, M68000);
+ INSN(tpf, 51f8, fff8, CF_ISA_A);
+
+ /* Branch instructions. */
+ BASE(branch, 6000, f000);
+ /* Disable long branch instructions, then add back the ones we want. */
+ BASE(undef, 60ff, f0ff); /* All long branches. */
+ INSN(branch, 60ff, f0ff, CF_ISA_B);
+ INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
+ INSN(branch, 60ff, ffff, BRAL);
+ INSN(branch, 60ff, f0ff, BCCL);
+
+ BASE(moveq, 7000, f100);
+ INSN(mvzs, 7100, f100, CF_ISA_B);
+ BASE(or, 8000, f000);
+ BASE(divw, 80c0, f0c0);
+ INSN(sbcd_reg, 8100, f1f8, M68000);
+ INSN(sbcd_mem, 8108, f1f8, M68000);
+ BASE(addsub, 9000, f000);
+ INSN(undef, 90c0, f0c0, CF_ISA_A);
+ INSN(subx_reg, 9180, f1f8, CF_ISA_A);
+ INSN(subx_reg, 9100, f138, M68000);
+ INSN(subx_mem, 9108, f138, M68000);
+ INSN(suba, 91c0, f1c0, CF_ISA_A);
+ INSN(suba, 90c0, f0c0, M68000);
+
+ BASE(undef_mac, a000, f000);
+ INSN(mac, a000, f100, CF_EMAC);
+ INSN(from_mac, a180, f9b0, CF_EMAC);
+ INSN(move_mac, a110, f9fc, CF_EMAC);
+ INSN(from_macsr,a980, f9f0, CF_EMAC);
+ INSN(from_mask, ad80, fff0, CF_EMAC);
+ INSN(from_mext, ab80, fbf0, CF_EMAC);
+ INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
+ INSN(to_mac, a100, f9c0, CF_EMAC);
+ INSN(to_macsr, a900, ffc0, CF_EMAC);
+ INSN(to_mext, ab00, fbc0, CF_EMAC);
+ INSN(to_mask, ad00, ffc0, CF_EMAC);
+
+ INSN(mov3q, a140, f1c0, CF_ISA_B);
+ INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
+ INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
+ INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
+ INSN(cmp, b080, f1c0, CF_ISA_A);
+ INSN(cmpa, b1c0, f1c0, CF_ISA_A);
+ INSN(cmp, b000, f100, M68000);
+ INSN(eor, b100, f100, M68000);
+ INSN(cmpm, b108, f138, M68000);
+ INSN(cmpa, b0c0, f0c0, M68000);
+ INSN(eor, b180, f1c0, CF_ISA_A);
+ BASE(and, c000, f000);
+ INSN(exg_dd, c140, f1f8, M68000);
+ INSN(exg_aa, c148, f1f8, M68000);
+ INSN(exg_da, c188, f1f8, M68000);
+ BASE(mulw, c0c0, f0c0);
+ INSN(abcd_reg, c100, f1f8, M68000);
+ INSN(abcd_mem, c108, f1f8, M68000);
+ BASE(addsub, d000, f000);
+ INSN(undef, d0c0, f0c0, CF_ISA_A);
+ INSN(addx_reg, d180, f1f8, CF_ISA_A);
+ INSN(addx_reg, d100, f138, M68000);
+ INSN(addx_mem, d108, f138, M68000);
+ INSN(adda, d1c0, f1c0, CF_ISA_A);
+ INSN(adda, d0c0, f0c0, M68000);
+ INSN(shift_im, e080, f0f0, CF_ISA_A);
+ INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
+ INSN(shift8_im, e000, f0f0, M68000);
+ INSN(shift16_im, e040, f0f0, M68000);
+ INSN(shift_im, e080, f0f0, M68000);
+ INSN(shift8_reg, e020, f0f0, M68000);
+ INSN(shift16_reg, e060, f0f0, M68000);
+ INSN(shift_reg, e0a0, f0f0, M68000);
+ INSN(shift_mem, e0c0, fcc0, M68000);
+ INSN(rotate_im, e090, f0f0, M68000);
+ INSN(rotate8_im, e010, f0f0, M68000);
+ INSN(rotate16_im, e050, f0f0, M68000);
+ INSN(rotate_reg, e0b0, f0f0, M68000);
+ INSN(rotate8_reg, e030, f0f0, M68000);
+ INSN(rotate16_reg, e070, f0f0, M68000);
+ INSN(rotate_mem, e4c0, fcc0, M68000);
+ INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
+ INSN(bfext_reg, e9c0, fdf8, BITFIELD);
+ INSN(bfins_mem, efc0, ffc0, BITFIELD);
+ INSN(bfins_reg, efc0, fff8, BITFIELD);
+ INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
+ INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
+ INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
+ INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
+ INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
+ INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
+ INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
+ INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
+ INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
+ INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
+ BASE(undef_fpu, f000, f000);
+ INSN(fpu, f200, ffc0, CF_FPU);
+ INSN(fbcc, f280, ffc0, CF_FPU);
+ INSN(fpu, f200, ffc0, FPU);
+ INSN(fscc, f240, ffc0, FPU);
+ INSN(fbcc, f280, ff80, FPU);
+#if defined(CONFIG_SOFTMMU)
+ INSN(frestore, f340, ffc0, CF_FPU);
+ INSN(fsave, f300, ffc0, CF_FPU);
+ INSN(frestore, f340, ffc0, FPU);
+ INSN(fsave, f300, ffc0, FPU);
+ INSN(intouch, f340, ffc0, CF_ISA_A);
+ INSN(cpushl, f428, ff38, CF_ISA_A);
+ INSN(cpush, f420, ff20, M68040);
+ INSN(cinv, f400, ff20, M68040);
+ INSN(pflush, f500, ffe0, M68040);
+ INSN(ptest, f548, ffd8, M68040);
+ INSN(wddata, fb00, ff00, CF_ISA_A);
+ INSN(wdebug, fbc0, ffc0, CF_ISA_A);
+#endif
+ INSN(move16_mem, f600, ffe0, M68040);
+ INSN(move16_reg, f620, fff8, M68040);
+#undef INSN
+}
+
+static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUM68KState *env = cpu->env_ptr;
+
+ dc->env = env;
+ dc->pc = dc->base.pc_first;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->cc_op_synced = 1;
+ dc->done_mac = 0;
+ dc->writeback_mask = 0;
+ init_release_array(dc);
+
+ dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
+ /* If architectural single step active, limit to 1 */
+ if (dc->ss_active) {
+ dc->base.max_insns = 1;
+ }
+}
+
+static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+}
+
+static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
+}
+
+static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUM68KState *env = cpu->env_ptr;
+ uint16_t insn = read_im16(env, dc);
+
+ opcode_table[insn](env, dc, insn);
+ do_writebacks(dc);
+ do_release(dc);
+
+ dc->base.pc_next = dc->pc;
+
+ if (dc->base.is_jmp == DISAS_NEXT) {
+ /*
+ * Stop translation when the next insn might touch a new page.
+ * This ensures that prefetch aborts at the right place.
+ *
+ * We cannot determine the size of the next insn without
+ * completely decoding it. However, the maximum insn size
+ * is 32 bytes, so end if we do not have that much remaining.
+ * This may produce several small TBs at the end of each page,
+ * but they will all be linked with goto_tb.
+ *
+ * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
+ * smaller than MC68020's.
+ */
+ target_ulong start_page_offset
+ = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
+
+ if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ switch (dc->base.is_jmp) {
+ case DISAS_NORETURN:
+ break;
+ case DISAS_TOO_MANY:
+ update_cc_op(dc);
+ if (dc->ss_active) {
+ tcg_gen_movi_i32(QREG_PC, dc->pc);
+ gen_raise_exception(EXCP_TRACE);
+ } else {
+ gen_jmp_tb(dc, 0, dc->pc);
+ }
+ break;
+ case DISAS_JUMP:
+ /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
+ if (dc->ss_active) {
+ gen_raise_exception(EXCP_TRACE);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ break;
+ case DISAS_EXIT:
+ /*
+ * We updated CC_OP and PC in gen_exit_tb, but also modified
+ * other state that may require returning to the main loop.
+ */
+ if (dc->ss_active) {
+ gen_raise_exception(EXCP_TRACE);
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void m68k_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps m68k_tr_ops = {
+ .init_disas_context = m68k_tr_init_disas_context,
+ .tb_start = m68k_tr_tb_start,
+ .insn_start = m68k_tr_insn_start,
+ .translate_insn = m68k_tr_translate_insn,
+ .tb_stop = m68k_tr_tb_stop,
+ .disas_log = m68k_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+ translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
+}
+
+static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
+{
+ floatx80 a = { .high = high, .low = low };
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.f64 = floatx80_to_float64(a, &env->fp_status);
+ return u.d;
+}
+
+void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+ int i;
+ uint16_t sr;
+ for (i = 0; i < 8; i++) {
+ qemu_fprintf(f, "D%d = %08x A%d = %08x "
+ "F%d = %04x %016"PRIx64" (%12g)\n",
+ i, env->dregs[i], i, env->aregs[i],
+ i, env->fregs[i].l.upper, env->fregs[i].l.lower,
+ floatx80_to_double(env, env->fregs[i].l.upper,
+ env->fregs[i].l.lower));
+ }
+ qemu_fprintf(f, "PC = %08x ", env->pc);
+ sr = env->sr | cpu_m68k_get_ccr(env);
+ qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
+ sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
+ (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
+ (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
+ (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
+ (sr & CCF_C) ? 'C' : '-');
+ qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
+ (env->fpsr & FPSR_CC_A) ? 'A' : '-',
+ (env->fpsr & FPSR_CC_I) ? 'I' : '-',
+ (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
+ (env->fpsr & FPSR_CC_N) ? 'N' : '-');
+ qemu_fprintf(f, "\n "
+ "FPCR = %04x ", env->fpcr);
+ switch (env->fpcr & FPCR_PREC_MASK) {
+ case FPCR_PREC_X:
+ qemu_fprintf(f, "X ");
+ break;
+ case FPCR_PREC_S:
+ qemu_fprintf(f, "S ");
+ break;
+ case FPCR_PREC_D:
+ qemu_fprintf(f, "D ");
+ break;
+ }
+ switch (env->fpcr & FPCR_RND_MASK) {
+ case FPCR_RND_N:
+ qemu_fprintf(f, "RN ");
+ break;
+ case FPCR_RND_Z:
+ qemu_fprintf(f, "RZ ");
+ break;
+ case FPCR_RND_M:
+ qemu_fprintf(f, "RM ");
+ break;
+ case FPCR_RND_P:
+ qemu_fprintf(f, "RP ");
+ break;
+ }
+ qemu_fprintf(f, "\n");
+#ifdef CONFIG_SOFTMMU
+ qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
+ env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
+ env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
+ env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
+ qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
+ qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
+ qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
+ env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
+ qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
+ env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
+ env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
+ qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
+ env->mmu.mmusr, env->mmu.ar);
+#endif
+}
+
+void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ int cc_op = data[1];
+ env->pc = data[0];
+ if (cc_op != CC_OP_DYNAMIC) {
+ env->cc_op = cc_op;
+ }
+}
diff --git a/target/meson.build b/target/meson.build
new file mode 100644
index 000000000..2f6940255
--- /dev/null
+++ b/target/meson.build
@@ -0,0 +1,20 @@
+subdir('alpha')
+subdir('arm')
+subdir('avr')
+subdir('cris')
+subdir('hexagon')
+subdir('hppa')
+subdir('i386')
+subdir('m68k')
+subdir('microblaze')
+subdir('mips')
+subdir('nios2')
+subdir('openrisc')
+subdir('ppc')
+subdir('riscv')
+subdir('rx')
+subdir('s390x')
+subdir('sh4')
+subdir('sparc')
+subdir('tricore')
+subdir('xtensa')
diff --git a/target/microblaze/Kconfig b/target/microblaze/Kconfig
new file mode 100644
index 000000000..a5410d921
--- /dev/null
+++ b/target/microblaze/Kconfig
@@ -0,0 +1,2 @@
+config MICROBLAZE
+ bool
diff --git a/target/microblaze/cpu-param.h b/target/microblaze/cpu-param.h
new file mode 100644
index 000000000..4d8297fa9
--- /dev/null
+++ b/target/microblaze/cpu-param.h
@@ -0,0 +1,33 @@
+/*
+ * MicroBlaze cpu parameters for qemu.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef MICROBLAZE_CPU_PARAM_H
+#define MICROBLAZE_CPU_PARAM_H 1
+
+/*
+ * While system mode can address up to 64 bits of address space,
+ * this is done via the lea/sea instructions, which are system-only
+ * (as they also bypass the mmu).
+ *
+ * We can improve the user-only experience by only exposing 32 bits
+ * of address space.
+ */
+#ifdef CONFIG_USER_ONLY
+#define TARGET_LONG_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#else
+#define TARGET_LONG_BITS 64
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#endif
+
+/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
+#define TARGET_PAGE_BITS 12
+#define NB_MMU_MODES 3
+
+#endif
diff --git a/target/microblaze/cpu-qom.h b/target/microblaze/cpu-qom.h
new file mode 100644
index 000000000..e520eefb1
--- /dev/null
+++ b/target/microblaze/cpu-qom.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU MicroBlaze CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_MICROBLAZE_CPU_QOM_H
+#define QEMU_MICROBLAZE_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_MICROBLAZE_CPU "microblaze-cpu"
+
+OBJECT_DECLARE_TYPE(MicroBlazeCPU, MicroBlazeCPUClass,
+ MICROBLAZE_CPU)
+
+/**
+ * MicroBlazeCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A MicroBlaze CPU model.
+ */
+struct MicroBlazeCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#endif
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
new file mode 100644
index 000000000..b9c888b87
--- /dev/null
+++ b/target/microblaze/cpu.c
@@ -0,0 +1,421 @@
+/*
+ * QEMU MicroBlaze CPU
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2009 Edgar E. Iglesias, Axis Communications AB.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu/module.h"
+#include "hw/qdev-properties.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat-helpers.h"
+
+static const struct {
+ const char *name;
+ uint8_t version_id;
+} mb_cpu_lookup[] = {
+ /* These key value are as per MBV field in PVR0 */
+ {"5.00.a", 0x01},
+ {"5.00.b", 0x02},
+ {"5.00.c", 0x03},
+ {"6.00.a", 0x04},
+ {"6.00.b", 0x06},
+ {"7.00.a", 0x05},
+ {"7.00.b", 0x07},
+ {"7.10.a", 0x08},
+ {"7.10.b", 0x09},
+ {"7.10.c", 0x0a},
+ {"7.10.d", 0x0b},
+ {"7.20.a", 0x0c},
+ {"7.20.b", 0x0d},
+ {"7.20.c", 0x0e},
+ {"7.20.d", 0x0f},
+ {"7.30.a", 0x10},
+ {"7.30.b", 0x11},
+ {"8.00.a", 0x12},
+ {"8.00.b", 0x13},
+ {"8.10.a", 0x14},
+ {"8.20.a", 0x15},
+ {"8.20.b", 0x16},
+ {"8.30.a", 0x17},
+ {"8.40.a", 0x18},
+ {"8.40.b", 0x19},
+ {"8.50.a", 0x1A},
+ {"9.0", 0x1B},
+ {"9.1", 0x1D},
+ {"9.2", 0x1F},
+ {"9.3", 0x20},
+ {"9.4", 0x21},
+ {"9.5", 0x22},
+ {"9.6", 0x23},
+ {"10.0", 0x24},
+ {NULL, 0},
+};
+
+/* If no specific version gets selected, default to the following. */
+#define DEFAULT_CPU_VERSION "10.0"
+
+static void mb_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+
+ cpu->env.pc = value;
+ /* Ensure D_FLAG and IMM_FLAG are clear for the new PC */
+ cpu->env.iflags = 0;
+}
+
+static void mb_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+
+ cpu->env.pc = tb->pc;
+ cpu->env.iflags = tb->flags & IFLAGS_TB_MASK;
+}
+
+static bool mb_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void mb_cpu_ns_axi_dp(void *opaque, int irq, int level)
+{
+ MicroBlazeCPU *cpu = opaque;
+ bool en = cpu->cfg.use_non_secure & USE_NON_SECURE_M_AXI_DP_MASK;
+
+ cpu->ns_axi_dp = level & en;
+}
+
+static void mb_cpu_ns_axi_ip(void *opaque, int irq, int level)
+{
+ MicroBlazeCPU *cpu = opaque;
+ bool en = cpu->cfg.use_non_secure & USE_NON_SECURE_M_AXI_IP_MASK;
+
+ cpu->ns_axi_ip = level & en;
+}
+
+static void mb_cpu_ns_axi_dc(void *opaque, int irq, int level)
+{
+ MicroBlazeCPU *cpu = opaque;
+ bool en = cpu->cfg.use_non_secure & USE_NON_SECURE_M_AXI_DC_MASK;
+
+ cpu->ns_axi_dc = level & en;
+}
+
+static void mb_cpu_ns_axi_ic(void *opaque, int irq, int level)
+{
+ MicroBlazeCPU *cpu = opaque;
+ bool en = cpu->cfg.use_non_secure & USE_NON_SECURE_M_AXI_IC_MASK;
+
+ cpu->ns_axi_ic = level & en;
+}
+
+static void microblaze_cpu_set_irq(void *opaque, int irq, int level)
+{
+ MicroBlazeCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ int type = irq ? CPU_INTERRUPT_NMI : CPU_INTERRUPT_HARD;
+
+ if (level) {
+ cpu_interrupt(cs, type);
+ } else {
+ cpu_reset_interrupt(cs, type);
+ }
+}
+#endif
+
+static void mb_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(s);
+ MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(cpu);
+ CPUMBState *env = &cpu->env;
+
+ mcc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUMBState, end_reset_fields));
+ env->res_addr = RES_ADDR_NONE;
+
+ /* Disable stack protector. */
+ env->shr = ~0;
+
+ env->pc = cpu->cfg.base_vectors;
+
+#if defined(CONFIG_USER_ONLY)
+ /* start in user mode with interrupts enabled. */
+ mb_cpu_write_msr(env, MSR_EE | MSR_IE | MSR_VM | MSR_UM);
+#else
+ mb_cpu_write_msr(env, 0);
+ mmu_init(&env->mmu);
+#endif
+}
+
+static void mb_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_arch_microblaze;
+ info->print_insn = print_insn_microblaze;
+}
+
+static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(dev);
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ uint8_t version_code = 0;
+ const char *version;
+ int i = 0;
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (cpu->cfg.addr_size < 32 || cpu->cfg.addr_size > 64) {
+ error_setg(errp, "addr-size %d is out of range (32 - 64)",
+ cpu->cfg.addr_size);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ version = cpu->cfg.version ? cpu->cfg.version : DEFAULT_CPU_VERSION;
+ for (i = 0; mb_cpu_lookup[i].name && version; i++) {
+ if (strcmp(mb_cpu_lookup[i].name, version) == 0) {
+ version_code = mb_cpu_lookup[i].version_id;
+ break;
+ }
+ }
+
+ if (!version_code) {
+ qemu_log("Invalid MicroBlaze version number: %s\n", cpu->cfg.version);
+ }
+
+ cpu->cfg.pvr_regs[0] =
+ (PVR0_USE_EXC_MASK |
+ PVR0_USE_ICACHE_MASK |
+ PVR0_USE_DCACHE_MASK |
+ (cpu->cfg.stackprot ? PVR0_SPROT_MASK : 0) |
+ (cpu->cfg.use_fpu ? PVR0_USE_FPU_MASK : 0) |
+ (cpu->cfg.use_hw_mul ? PVR0_USE_HW_MUL_MASK : 0) |
+ (cpu->cfg.use_barrel ? PVR0_USE_BARREL_MASK : 0) |
+ (cpu->cfg.use_div ? PVR0_USE_DIV_MASK : 0) |
+ (cpu->cfg.use_mmu ? PVR0_USE_MMU_MASK : 0) |
+ (cpu->cfg.endi ? PVR0_ENDI_MASK : 0) |
+ (version_code << PVR0_VERSION_SHIFT) |
+ (cpu->cfg.pvr == C_PVR_FULL ? PVR0_PVR_FULL_MASK : 0) |
+ cpu->cfg.pvr_user1);
+
+ cpu->cfg.pvr_regs[1] = cpu->cfg.pvr_user2;
+
+ cpu->cfg.pvr_regs[2] =
+ (PVR2_D_OPB_MASK |
+ PVR2_D_LMB_MASK |
+ PVR2_I_OPB_MASK |
+ PVR2_I_LMB_MASK |
+ PVR2_FPU_EXC_MASK |
+ (cpu->cfg.use_fpu ? PVR2_USE_FPU_MASK : 0) |
+ (cpu->cfg.use_fpu > 1 ? PVR2_USE_FPU2_MASK : 0) |
+ (cpu->cfg.use_hw_mul ? PVR2_USE_HW_MUL_MASK : 0) |
+ (cpu->cfg.use_hw_mul > 1 ? PVR2_USE_MUL64_MASK : 0) |
+ (cpu->cfg.use_barrel ? PVR2_USE_BARREL_MASK : 0) |
+ (cpu->cfg.use_div ? PVR2_USE_DIV_MASK : 0) |
+ (cpu->cfg.use_msr_instr ? PVR2_USE_MSR_INSTR : 0) |
+ (cpu->cfg.use_pcmp_instr ? PVR2_USE_PCMP_INSTR : 0) |
+ (cpu->cfg.dopb_bus_exception ? PVR2_DOPB_BUS_EXC_MASK : 0) |
+ (cpu->cfg.iopb_bus_exception ? PVR2_IOPB_BUS_EXC_MASK : 0) |
+ (cpu->cfg.div_zero_exception ? PVR2_DIV_ZERO_EXC_MASK : 0) |
+ (cpu->cfg.illegal_opcode_exception ? PVR2_ILL_OPCODE_EXC_MASK : 0) |
+ (cpu->cfg.unaligned_exceptions ? PVR2_UNALIGNED_EXC_MASK : 0) |
+ (cpu->cfg.opcode_0_illegal ? PVR2_OPCODE_0x0_ILL_MASK : 0));
+
+ cpu->cfg.pvr_regs[5] |=
+ cpu->cfg.dcache_writeback ? PVR5_DCACHE_WRITEBACK_MASK : 0;
+
+ cpu->cfg.pvr_regs[10] =
+ (0x0c000000 | /* Default to spartan 3a dsp family. */
+ (cpu->cfg.addr_size - 32) << PVR10_ASIZE_SHIFT);
+
+ cpu->cfg.pvr_regs[11] = ((cpu->cfg.use_mmu ? PVR11_USE_MMU : 0) |
+ 16 << 17);
+
+ cpu->cfg.mmu = 3;
+ cpu->cfg.mmu_tlb_access = 3;
+ cpu->cfg.mmu_zones = 16;
+ cpu->cfg.addr_mask = MAKE_64BIT_MASK(0, cpu->cfg.addr_size);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void mb_cpu_initfn(Object *obj)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
+ CPUMBState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+
+#ifndef CONFIG_USER_ONLY
+ /* Inbound IRQ and FIR lines */
+ qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2);
+ qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dp, "ns_axi_dp", 1);
+ qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ip, "ns_axi_ip", 1);
+ qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dc, "ns_axi_dc", 1);
+ qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ic, "ns_axi_ic", 1);
+#endif
+}
+
+static Property mb_properties[] = {
+ DEFINE_PROP_UINT32("base-vectors", MicroBlazeCPU, cfg.base_vectors, 0),
+ DEFINE_PROP_BOOL("use-stack-protection", MicroBlazeCPU, cfg.stackprot,
+ false),
+ /*
+ * This is the C_ADDR_SIZE synth-time configuration option of the
+ * MicroBlaze cores. Supported values range between 32 and 64.
+ *
+ * When set to > 32, 32bit MicroBlaze can emit load/stores
+ * with extended addressing.
+ */
+ DEFINE_PROP_UINT8("addr-size", MicroBlazeCPU, cfg.addr_size, 32),
+ /* If use-fpu > 0 - FPU is enabled
+ * If use-fpu = 2 - Floating point conversion and square root instructions
+ * are enabled
+ */
+ DEFINE_PROP_UINT8("use-fpu", MicroBlazeCPU, cfg.use_fpu, 2),
+ /* If use-hw-mul > 0 - Multiplier is enabled
+ * If use-hw-mul = 2 - 64-bit multiplier is enabled
+ */
+ DEFINE_PROP_UINT8("use-hw-mul", MicroBlazeCPU, cfg.use_hw_mul, 2),
+ DEFINE_PROP_BOOL("use-barrel", MicroBlazeCPU, cfg.use_barrel, true),
+ DEFINE_PROP_BOOL("use-div", MicroBlazeCPU, cfg.use_div, true),
+ DEFINE_PROP_BOOL("use-msr-instr", MicroBlazeCPU, cfg.use_msr_instr, true),
+ DEFINE_PROP_BOOL("use-pcmp-instr", MicroBlazeCPU, cfg.use_pcmp_instr, true),
+ DEFINE_PROP_BOOL("use-mmu", MicroBlazeCPU, cfg.use_mmu, true),
+ /*
+ * use-non-secure enables/disables the use of the non_secure[3:0] signals.
+ * It is a bitfield where 1 = non-secure for the following bits and their
+ * corresponding interfaces:
+ * 0x1 - M_AXI_DP
+ * 0x2 - M_AXI_IP
+ * 0x4 - M_AXI_DC
+ * 0x8 - M_AXI_IC
+ */
+ DEFINE_PROP_UINT8("use-non-secure", MicroBlazeCPU, cfg.use_non_secure, 0),
+ DEFINE_PROP_BOOL("dcache-writeback", MicroBlazeCPU, cfg.dcache_writeback,
+ false),
+ DEFINE_PROP_BOOL("endianness", MicroBlazeCPU, cfg.endi, false),
+ /* Enables bus exceptions on failed data accesses (load/stores). */
+ DEFINE_PROP_BOOL("dopb-bus-exception", MicroBlazeCPU,
+ cfg.dopb_bus_exception, false),
+ /* Enables bus exceptions on failed instruction fetches. */
+ DEFINE_PROP_BOOL("iopb-bus-exception", MicroBlazeCPU,
+ cfg.iopb_bus_exception, false),
+ DEFINE_PROP_BOOL("ill-opcode-exception", MicroBlazeCPU,
+ cfg.illegal_opcode_exception, false),
+ DEFINE_PROP_BOOL("div-zero-exception", MicroBlazeCPU,
+ cfg.div_zero_exception, false),
+ DEFINE_PROP_BOOL("unaligned-exceptions", MicroBlazeCPU,
+ cfg.unaligned_exceptions, false),
+ DEFINE_PROP_BOOL("opcode-0x0-illegal", MicroBlazeCPU,
+ cfg.opcode_0_illegal, false),
+ DEFINE_PROP_STRING("version", MicroBlazeCPU, cfg.version),
+ DEFINE_PROP_UINT8("pvr", MicroBlazeCPU, cfg.pvr, C_PVR_FULL),
+ DEFINE_PROP_UINT8("pvr-user1", MicroBlazeCPU, cfg.pvr_user1, 0),
+ DEFINE_PROP_UINT32("pvr-user2", MicroBlazeCPU, cfg.pvr_user2, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
+{
+ return object_class_by_name(TYPE_MICROBLAZE_CPU);
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps mb_sysemu_ops = {
+ .get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps mb_tcg_ops = {
+ .initialize = mb_tcg_init,
+ .synchronize_from_tb = mb_cpu_synchronize_from_tb,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = mb_cpu_tlb_fill,
+ .cpu_exec_interrupt = mb_cpu_exec_interrupt,
+ .do_interrupt = mb_cpu_do_interrupt,
+ .do_transaction_failed = mb_cpu_transaction_failed,
+ .do_unaligned_access = mb_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void mb_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, mb_cpu_realizefn,
+ &mcc->parent_realize);
+ device_class_set_parent_reset(dc, mb_cpu_reset, &mcc->parent_reset);
+
+ cc->class_by_name = mb_cpu_class_by_name;
+ cc->has_work = mb_cpu_has_work;
+
+ cc->dump_state = mb_cpu_dump_state;
+ cc->set_pc = mb_cpu_set_pc;
+ cc->gdb_read_register = mb_cpu_gdb_read_register;
+ cc->gdb_write_register = mb_cpu_gdb_write_register;
+
+#ifndef CONFIG_USER_ONLY
+ dc->vmsd = &vmstate_mb_cpu;
+ cc->sysemu_ops = &mb_sysemu_ops;
+#endif
+ device_class_set_props(dc, mb_properties);
+ cc->gdb_num_core_regs = 32 + 27;
+
+ cc->disas_set_info = mb_disas_set_info;
+ cc->tcg_ops = &mb_tcg_ops;
+}
+
+static const TypeInfo mb_cpu_type_info = {
+ .name = TYPE_MICROBLAZE_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(MicroBlazeCPU),
+ .instance_init = mb_cpu_initfn,
+ .class_size = sizeof(MicroBlazeCPUClass),
+ .class_init = mb_cpu_class_init,
+};
+
+static void mb_cpu_register_types(void)
+{
+ type_register_static(&mb_cpu_type_info);
+}
+
+type_init(mb_cpu_register_types)
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
new file mode 100644
index 000000000..e9cd0b88d
--- /dev/null
+++ b/target/microblaze/cpu.h
@@ -0,0 +1,443 @@
+/*
+ * MicroBlaze virtual CPU header
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MICROBLAZE_CPU_H
+#define MICROBLAZE_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat-types.h"
+
+typedef struct CPUMBState CPUMBState;
+#if !defined(CONFIG_USER_ONLY)
+#include "mmu.h"
+#endif
+
+#define EXCP_MMU 1
+#define EXCP_IRQ 2
+#define EXCP_SYSCALL 3 /* user-only */
+#define EXCP_HW_BREAK 4
+#define EXCP_HW_EXCP 5
+
+/* MicroBlaze-specific interrupt pending bits. */
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+
+/* Meanings of the MBCPU object's two inbound GPIO lines */
+#define MB_CPU_IRQ 0
+#define MB_CPU_FIR 1
+
+/* Register aliases. R0 - R15 */
+#define R_SP 1
+#define SR_PC 0
+#define SR_MSR 1
+#define SR_EAR 3
+#define SR_ESR 5
+#define SR_FSR 7
+#define SR_BTR 0xb
+#define SR_EDR 0xd
+
+/* MSR flags. */
+#define MSR_BE (1<<0) /* 0x001 */
+#define MSR_IE (1<<1) /* 0x002 */
+#define MSR_C (1<<2) /* 0x004 */
+#define MSR_BIP (1<<3) /* 0x008 */
+#define MSR_FSL (1<<4) /* 0x010 */
+#define MSR_ICE (1<<5) /* 0x020 */
+#define MSR_DZ (1<<6) /* 0x040 */
+#define MSR_DCE (1<<7) /* 0x080 */
+#define MSR_EE (1<<8) /* 0x100 */
+#define MSR_EIP (1<<9) /* 0x200 */
+#define MSR_PVR (1<<10) /* 0x400 */
+#define MSR_CC (1<<31)
+
+/* Machine State Register (MSR) Fields */
+#define MSR_UM (1<<11) /* User Mode */
+#define MSR_UMS (1<<12) /* User Mode Save */
+#define MSR_VM (1<<13) /* Virtual Mode */
+#define MSR_VMS (1<<14) /* Virtual Mode Save */
+
+#define MSR_KERNEL MSR_EE|MSR_VM
+//#define MSR_USER MSR_KERNEL|MSR_UM|MSR_IE
+#define MSR_KERNEL_VMS MSR_EE|MSR_VMS
+//#define MSR_USER_VMS MSR_KERNEL_VMS|MSR_UMS|MSR_IE
+
+/* Exception State Register (ESR) Fields */
+#define ESR_DIZ (1<<11) /* Zone Protection */
+#define ESR_W (1<<11) /* Unaligned word access */
+#define ESR_S (1<<10) /* Store instruction */
+
+#define ESR_ESS_FSL_OFFSET 5
+
+#define ESR_ESS_MASK (0x7f << 5)
+
+#define ESR_EC_FSL 0
+#define ESR_EC_UNALIGNED_DATA 1
+#define ESR_EC_ILLEGAL_OP 2
+#define ESR_EC_INSN_BUS 3
+#define ESR_EC_DATA_BUS 4
+#define ESR_EC_DIVZERO 5
+#define ESR_EC_FPU 6
+#define ESR_EC_PRIVINSN 7
+#define ESR_EC_STACKPROT 7 /* Same as PRIVINSN. */
+#define ESR_EC_DATA_STORAGE 8
+#define ESR_EC_INSN_STORAGE 9
+#define ESR_EC_DATA_TLB 10
+#define ESR_EC_INSN_TLB 11
+#define ESR_EC_MASK 31
+
+/* Floating Point Status Register (FSR) Bits */
+#define FSR_IO (1<<4) /* Invalid operation */
+#define FSR_DZ (1<<3) /* Divide-by-zero */
+#define FSR_OF (1<<2) /* Overflow */
+#define FSR_UF (1<<1) /* Underflow */
+#define FSR_DO (1<<0) /* Denormalized operand error */
+
+/* Version reg. */
+/* Basic PVR mask */
+#define PVR0_PVR_FULL_MASK 0x80000000
+#define PVR0_USE_BARREL_MASK 0x40000000
+#define PVR0_USE_DIV_MASK 0x20000000
+#define PVR0_USE_HW_MUL_MASK 0x10000000
+#define PVR0_USE_FPU_MASK 0x08000000
+#define PVR0_USE_EXC_MASK 0x04000000
+#define PVR0_USE_ICACHE_MASK 0x02000000
+#define PVR0_USE_DCACHE_MASK 0x01000000
+#define PVR0_USE_MMU_MASK 0x00800000
+#define PVR0_USE_BTC 0x00400000
+#define PVR0_ENDI_MASK 0x00200000
+#define PVR0_FAULT 0x00100000
+#define PVR0_VERSION_MASK 0x0000FF00
+#define PVR0_USER1_MASK 0x000000FF
+#define PVR0_SPROT_MASK 0x00000001
+
+#define PVR0_VERSION_SHIFT 8
+
+/* User 2 PVR mask */
+#define PVR1_USER2_MASK 0xFFFFFFFF
+
+/* Configuration PVR masks */
+#define PVR2_D_OPB_MASK 0x80000000
+#define PVR2_D_LMB_MASK 0x40000000
+#define PVR2_I_OPB_MASK 0x20000000
+#define PVR2_I_LMB_MASK 0x10000000
+#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
+#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
+#define PVR2_D_PLB_MASK 0x02000000 /* new */
+#define PVR2_I_PLB_MASK 0x01000000 /* new */
+#define PVR2_INTERCONNECT 0x00800000 /* new */
+#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
+#define PVR2_USE_FSL_EXC 0x00040000 /* new */
+#define PVR2_USE_MSR_INSTR 0x00020000
+#define PVR2_USE_PCMP_INSTR 0x00010000
+#define PVR2_AREA_OPTIMISED 0x00008000
+#define PVR2_USE_BARREL_MASK 0x00004000
+#define PVR2_USE_DIV_MASK 0x00002000
+#define PVR2_USE_HW_MUL_MASK 0x00001000
+#define PVR2_USE_FPU_MASK 0x00000800
+#define PVR2_USE_MUL64_MASK 0x00000400
+#define PVR2_USE_FPU2_MASK 0x00000200 /* new */
+#define PVR2_USE_IPLBEXC 0x00000100
+#define PVR2_USE_DPLBEXC 0x00000080
+#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
+#define PVR2_UNALIGNED_EXC_MASK 0x00000020
+#define PVR2_ILL_OPCODE_EXC_MASK 0x00000010
+#define PVR2_IOPB_BUS_EXC_MASK 0x00000008
+#define PVR2_DOPB_BUS_EXC_MASK 0x00000004
+#define PVR2_DIV_ZERO_EXC_MASK 0x00000002
+#define PVR2_FPU_EXC_MASK 0x00000001
+
+/* Debug and exception PVR masks */
+#define PVR3_DEBUG_ENABLED_MASK 0x80000000
+#define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000
+#define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000
+#define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000
+#define PVR3_FSL_LINKS_MASK 0x00000380
+
+/* ICache config PVR masks */
+#define PVR4_USE_ICACHE_MASK 0x80000000
+#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
+#define PVR4_ICACHE_USE_FSL_MASK 0x02000000
+#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
+#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
+#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
+
+/* DCache config PVR masks */
+#define PVR5_USE_DCACHE_MASK 0x80000000
+#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
+#define PVR5_DCACHE_USE_FSL_MASK 0x02000000
+#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
+#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
+#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
+#define PVR5_DCACHE_WRITEBACK_MASK 0x00004000
+
+/* ICache base address PVR mask */
+#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
+
+/* ICache high address PVR mask */
+#define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF
+
+/* DCache base address PVR mask */
+#define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF
+
+/* DCache high address PVR mask */
+#define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF
+
+/* Target family PVR mask */
+#define PVR10_TARGET_FAMILY_MASK 0xFF000000
+#define PVR10_ASIZE_SHIFT 18
+
+/* MMU descrtiption */
+#define PVR11_USE_MMU 0xC0000000
+#define PVR11_MMU_ITLB_SIZE 0x38000000
+#define PVR11_MMU_DTLB_SIZE 0x07000000
+#define PVR11_MMU_TLB_ACCESS 0x00C00000
+#define PVR11_MMU_ZONES 0x003E0000
+/* MSR Reset value PVR mask */
+#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF
+
+#define C_PVR_NONE 0
+#define C_PVR_BASIC 1
+#define C_PVR_FULL 2
+
+/* CPU flags. */
+
+/* Condition codes. */
+#define CC_GE 5
+#define CC_GT 4
+#define CC_LE 3
+#define CC_LT 2
+#define CC_NE 1
+#define CC_EQ 0
+
+#define STREAM_EXCEPTION (1 << 0)
+#define STREAM_ATOMIC (1 << 1)
+#define STREAM_TEST (1 << 2)
+#define STREAM_CONTROL (1 << 3)
+#define STREAM_NONBLOCK (1 << 4)
+
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+/* use-non-secure property masks */
+#define USE_NON_SECURE_M_AXI_DP_MASK 0x1
+#define USE_NON_SECURE_M_AXI_IP_MASK 0x2
+#define USE_NON_SECURE_M_AXI_DC_MASK 0x4
+#define USE_NON_SECURE_M_AXI_IC_MASK 0x8
+
+struct CPUMBState {
+ uint32_t bvalue; /* TCG temporary, only valid during a TB */
+ uint32_t btarget; /* Full resolved branch destination */
+
+ uint32_t imm;
+ uint32_t regs[32];
+ uint32_t pc;
+ uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */
+ uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */
+ target_ulong ear;
+ uint32_t esr;
+ uint32_t fsr;
+ uint32_t btr;
+ uint32_t edr;
+ float_status fp_status;
+ /* Stack protectors. Yes, it's a hw feature. */
+ uint32_t slr, shr;
+
+ /* lwx/swx reserved address */
+#define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */
+ target_ulong res_addr;
+ uint32_t res_val;
+
+ /* Internal flags. */
+#define IMM_FLAG (1 << 0)
+#define BIMM_FLAG (1 << 1)
+#define ESR_ESS_FLAG (1 << 2) /* indicates ESR_ESS_MASK is present */
+/* MSR_EE (1 << 8) -- these 3 are not in iflags but tb_flags */
+/* MSR_UM (1 << 11) */
+/* MSR_VM (1 << 13) */
+/* ESR_ESS_MASK [11:5] -- unwind into iflags for unaligned excp */
+#define D_FLAG (1 << 12) /* Bit in ESR. */
+#define DRTI_FLAG (1 << 16)
+#define DRTE_FLAG (1 << 17)
+#define DRTB_FLAG (1 << 18)
+
+/* TB dependent CPUMBState. */
+#define IFLAGS_TB_MASK (D_FLAG | BIMM_FLAG | IMM_FLAG | \
+ DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)
+#define MSR_TB_MASK (MSR_UM | MSR_VM | MSR_EE)
+
+ uint32_t iflags;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* Unified MMU. */
+ MicroBlazeMMU mmu;
+#endif
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* These fields are preserved on reset. */
+};
+
+/*
+ * Microblaze Configuration Settings
+ *
+ * Note that the structure is sorted by type and size to minimize holes.
+ */
+typedef struct {
+ char *version;
+
+ uint64_t addr_mask;
+
+ uint32_t base_vectors;
+ uint32_t pvr_user2;
+ uint32_t pvr_regs[13];
+
+ uint8_t addr_size;
+ uint8_t use_fpu;
+ uint8_t use_hw_mul;
+ uint8_t pvr_user1;
+ uint8_t pvr;
+ uint8_t mmu;
+ uint8_t mmu_tlb_access;
+ uint8_t mmu_zones;
+
+ bool stackprot;
+ bool use_barrel;
+ bool use_div;
+ bool use_msr_instr;
+ bool use_pcmp_instr;
+ bool use_mmu;
+ uint8_t use_non_secure;
+ bool dcache_writeback;
+ bool endi;
+ bool dopb_bus_exception;
+ bool iopb_bus_exception;
+ bool illegal_opcode_exception;
+ bool opcode_0_illegal;
+ bool div_zero_exception;
+ bool unaligned_exceptions;
+} MicroBlazeCPUConfig;
+
+/**
+ * MicroBlazeCPU:
+ * @env: #CPUMBState
+ *
+ * A MicroBlaze CPU.
+ */
+struct MicroBlazeCPU {
+ /*< private >*/
+ CPUState parent_obj;
+
+ /*< public >*/
+ bool ns_axi_dp;
+ bool ns_axi_ip;
+ bool ns_axi_dc;
+ bool ns_axi_ic;
+
+ CPUNegativeOffsetState neg;
+ CPUMBState env;
+ MicroBlazeCPUConfig cfg;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+void mb_cpu_do_interrupt(CPUState *cs);
+bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
+#endif /* !CONFIG_USER_ONLY */
+void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr) QEMU_NORETURN;
+void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
+int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+static inline uint32_t mb_cpu_read_msr(const CPUMBState *env)
+{
+ /* Replicate MSR[C] to MSR[CC]. */
+ return env->msr | (env->msr_c * (MSR_C | MSR_CC));
+}
+
+static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
+{
+ env->msr_c = (val >> 2) & 1;
+ /*
+ * Clear both MSR[C] and MSR[CC] from the saved copy.
+ * MSR_PVR is not writable and is always clear.
+ */
+ env->msr = val & ~(MSR_C | MSR_CC | MSR_PVR);
+}
+
+void mb_tcg_init(void);
+
+#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
+
+/* MMU modes definitions */
+#define MMU_NOMMU_IDX 0
+#define MMU_KERNEL_IDX 1
+#define MMU_USER_IDX 2
+/* See NB_MMU_MODES further up the file. */
+
+typedef CPUMBState CPUArchState;
+typedef MicroBlazeCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+/* Ensure there is no overlap between the two masks. */
+QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK);
+
+static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);
+ *cs_base = (*flags & IMM_FLAG ? env->imm : 0);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+#endif
+
+static inline int cpu_mmu_index(CPUMBState *env, bool ifetch)
+{
+ MicroBlazeCPU *cpu = env_archcpu(env);
+
+ /* Are we in nommu mode?. */
+ if (!(env->msr & MSR_VM) || !cpu->cfg.use_mmu) {
+ return MMU_NOMMU_IDX;
+ }
+
+ if (env->msr & MSR_UM) {
+ return MMU_USER_IDX;
+ }
+ return MMU_KERNEL_IDX;
+}
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_mb_cpu;
+#endif
+
+#endif
diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c
new file mode 100644
index 000000000..2e6e07005
--- /dev/null
+++ b/target/microblaze/gdbstub.c
@@ -0,0 +1,146 @@
+/*
+ * MicroBlaze gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+/*
+ * GDB expects SREGs in the following order:
+ * PC, MSR, EAR, ESR, FSR, BTR, EDR, PID, ZPR, TLBX, TLBSX, TLBLO, TLBHI.
+ *
+ * PID, ZPR, TLBx, TLBsx, TLBLO, and TLBHI aren't modeled, so we don't
+ * map them to anything and return a value of 0 instead.
+ */
+
+enum {
+ GDB_PC = 32 + 0,
+ GDB_MSR = 32 + 1,
+ GDB_EAR = 32 + 2,
+ GDB_ESR = 32 + 3,
+ GDB_FSR = 32 + 4,
+ GDB_BTR = 32 + 5,
+ GDB_PVR0 = 32 + 6,
+ GDB_PVR11 = 32 + 17,
+ GDB_EDR = 32 + 18,
+ GDB_SLR = 32 + 25,
+ GDB_SHR = 32 + 26,
+};
+
+int mb_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUMBState *env = &cpu->env;
+ uint32_t val;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ switch (n) {
+ case 1 ... 31:
+ val = env->regs[n];
+ break;
+ case GDB_PC:
+ val = env->pc;
+ break;
+ case GDB_MSR:
+ val = mb_cpu_read_msr(env);
+ break;
+ case GDB_EAR:
+ val = env->ear;
+ break;
+ case GDB_ESR:
+ val = env->esr;
+ break;
+ case GDB_FSR:
+ val = env->fsr;
+ break;
+ case GDB_BTR:
+ val = env->btr;
+ break;
+ case GDB_PVR0 ... GDB_PVR11:
+ /* PVR12 is intentionally skipped */
+ val = cpu->cfg.pvr_regs[n - GDB_PVR0];
+ break;
+ case GDB_EDR:
+ val = env->edr;
+ break;
+ case GDB_SLR:
+ val = env->slr;
+ break;
+ case GDB_SHR:
+ val = env->shr;
+ break;
+ default:
+ /* Other SRegs aren't modeled, so report a value of 0 */
+ val = 0;
+ break;
+ }
+ return gdb_get_reg32(mem_buf, val);
+}
+
+int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUMBState *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ switch (n) {
+ case 1 ... 31:
+ env->regs[n] = tmp;
+ break;
+ case GDB_PC:
+ env->pc = tmp;
+ break;
+ case GDB_MSR:
+ mb_cpu_write_msr(env, tmp);
+ break;
+ case GDB_EAR:
+ env->ear = tmp;
+ break;
+ case GDB_ESR:
+ env->esr = tmp;
+ break;
+ case GDB_FSR:
+ env->fsr = tmp;
+ break;
+ case GDB_BTR:
+ env->btr = tmp;
+ break;
+ case GDB_EDR:
+ env->edr = tmp;
+ break;
+ case GDB_SLR:
+ env->slr = tmp;
+ break;
+ case GDB_SHR:
+ env->shr = tmp;
+ break;
+ }
+ return 4;
+}
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
new file mode 100644
index 000000000..a607fe68e
--- /dev/null
+++ b/target/microblaze/helper.c
@@ -0,0 +1,298 @@
+/*
+ * MicroBlaze helper routines.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/log.h"
+
+#ifndef CONFIG_USER_ONLY
+static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
+ MMUAccessType access_type)
+{
+ if (access_type == MMU_INST_FETCH) {
+ return !cpu->ns_axi_ip;
+ } else {
+ return !cpu->ns_axi_dp;
+ }
+}
+
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ MicroBlazeMMULookup lu;
+ unsigned int hit;
+ int prot;
+ MemTxAttrs attrs = {};
+
+ attrs.secure = mb_cpu_access_is_secure(cpu, access_type);
+
+ if (mmu_idx == MMU_NOMMU_IDX) {
+ /* MMU disabled or not available. */
+ address &= TARGET_PAGE_MASK;
+ prot = PAGE_BITS;
+ tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
+ if (likely(hit)) {
+ uint32_t vaddr = address & TARGET_PAGE_MASK;
+ uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
+
+ qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
+ mmu_idx, vaddr, paddr, lu.prot);
+ tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
+ TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* TLB miss. */
+ if (probe) {
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
+ mmu_idx, address);
+
+ env->ear = address;
+ switch (lu.err) {
+ case ERR_PROT:
+ env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
+ env->esr |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ case ERR_MISS:
+ env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
+ env->esr |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ default:
+ abort();
+ }
+
+ if (cs->exception_index == EXCP_MMU) {
+ cpu_abort(cs, "recursive faults\n");
+ }
+
+ /* TLB miss. */
+ cs->exception_index = EXCP_MMU;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void mb_cpu_do_interrupt(CPUState *cs)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ uint32_t t, msr = mb_cpu_read_msr(env);
+ bool set_esr;
+
+ /* IMM flag cannot propagate across a branch and into the dslot. */
+ assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
+ /* BIMM flag cannot be set without D_FLAG. */
+ assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
+ /* RTI flags are private to translate. */
+ assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
+
+ switch (cs->exception_index) {
+ case EXCP_HW_EXCP:
+ if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Exception raised on system without exceptions!\n");
+ return;
+ }
+
+ qemu_log_mask(CPU_LOG_INT,
+ "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
+ env->pc, msr, env->iflags);
+
+ /* Exception breaks branch + dslot sequence? */
+ set_esr = true;
+ env->esr &= ~D_FLAG;
+ if (env->iflags & D_FLAG) {
+ env->esr |= D_FLAG;
+ env->btr = env->btarget;
+ }
+
+ /* Exception in progress. */
+ msr |= MSR_EIP;
+ env->regs[17] = env->pc + 4;
+ env->pc = cpu->cfg.base_vectors + 0x20;
+ break;
+
+ case EXCP_MMU:
+ qemu_log_mask(CPU_LOG_INT,
+ "INT: MMU at pc=%08x msr=%08x "
+ "ear=%" PRIx64 " iflags=%x\n",
+ env->pc, msr, env->ear, env->iflags);
+
+ /* Exception breaks branch + dslot sequence? */
+ set_esr = true;
+ env->esr &= ~D_FLAG;
+ if (env->iflags & D_FLAG) {
+ env->esr |= D_FLAG;
+ env->btr = env->btarget;
+ /* Reexecute the branch. */
+ env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
+ } else if (env->iflags & IMM_FLAG) {
+ /* Reexecute the imm. */
+ env->regs[17] = env->pc - 4;
+ } else {
+ env->regs[17] = env->pc;
+ }
+
+ /* Exception in progress. */
+ msr |= MSR_EIP;
+ env->pc = cpu->cfg.base_vectors + 0x20;
+ break;
+
+ case EXCP_IRQ:
+ assert(!(msr & (MSR_EIP | MSR_BIP)));
+ assert(msr & MSR_IE);
+ assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
+
+ qemu_log_mask(CPU_LOG_INT,
+ "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
+ env->pc, msr, env->iflags);
+ set_esr = false;
+
+ /* Disable interrupts. */
+ msr &= ~MSR_IE;
+ env->regs[14] = env->pc;
+ env->pc = cpu->cfg.base_vectors + 0x10;
+ break;
+
+ case EXCP_HW_BREAK:
+ assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
+
+ qemu_log_mask(CPU_LOG_INT,
+ "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
+ env->pc, msr, env->iflags);
+ set_esr = false;
+
+ /* Break in progress. */
+ msr |= MSR_BIP;
+ env->regs[16] = env->pc;
+ env->pc = cpu->cfg.base_vectors + 0x18;
+ break;
+
+ default:
+ cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
+ /* not reached */
+ }
+
+ /* Save previous mode, disable mmu, disable user-mode. */
+ t = (msr & (MSR_VM | MSR_UM)) << 1;
+ msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
+ msr |= t;
+ mb_cpu_write_msr(env, msr);
+
+ env->res_addr = RES_ADDR_NONE;
+ env->iflags = 0;
+
+ if (!set_esr) {
+ qemu_log_mask(CPU_LOG_INT,
+ " to pc=%08x msr=%08x\n", env->pc, msr);
+ } else if (env->esr & D_FLAG) {
+ qemu_log_mask(CPU_LOG_INT,
+ " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
+ env->pc, msr, env->esr, env->btr);
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ " to pc=%08x msr=%08x esr=%04x\n",
+ env->pc, msr, env->esr);
+ }
+}
+
+hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ target_ulong vaddr, paddr = 0;
+ MicroBlazeMMULookup lu;
+ int mmu_idx = cpu_mmu_index(env, false);
+ unsigned int hit;
+
+ /* Caller doesn't initialize */
+ *attrs = (MemTxAttrs) {};
+ attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD);
+
+ if (mmu_idx != MMU_NOMMU_IDX) {
+ hit = mmu_translate(cpu, &lu, addr, 0, 0);
+ if (hit) {
+ vaddr = addr & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+ } else
+ paddr = 0; /* ???. */
+ } else
+ paddr = addr & TARGET_PAGE_MASK;
+
+ return paddr;
+}
+
+bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD)
+ && (env->msr & MSR_IE)
+ && !(env->msr & (MSR_EIP | MSR_BIP))
+ && !(env->iflags & (D_FLAG | IMM_FLAG))) {
+ cs->exception_index = EXCP_IRQ;
+ mb_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ uint32_t esr, iflags;
+
+ /* Recover the pc and iflags from the corresponding insn_start. */
+ cpu_restore_state(cs, retaddr, true);
+ iflags = cpu->env.iflags;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
+ (target_ulong)addr, cpu->env.pc, iflags);
+
+ esr = ESR_EC_UNALIGNED_DATA;
+ if (likely(iflags & ESR_ESS_FLAG)) {
+ esr |= iflags & ESR_ESS_MASK;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
+ }
+
+ cpu->env.ear = addr;
+ cpu->env.esr = esr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit(cs);
+}
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
new file mode 100644
index 000000000..f740835fc
--- /dev/null
+++ b/target/microblaze/helper.h
@@ -0,0 +1,31 @@
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32)
+
+DEF_HELPER_FLAGS_3(divs, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(frsub, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fmul, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fdiv, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_2(flt, TCG_CALL_NO_WG, i32, env, i32)
+DEF_HELPER_FLAGS_2(fint, TCG_CALL_NO_WG, i32, env, i32)
+DEF_HELPER_FLAGS_2(fsqrt, TCG_CALL_NO_WG, i32, env, i32)
+
+DEF_HELPER_FLAGS_3(fcmp_un, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fcmp_lt, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fcmp_eq, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fcmp_le, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fcmp_gt, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
+#endif
+
+DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
+
+DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
+DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
diff --git a/target/microblaze/insns.decode b/target/microblaze/insns.decode
new file mode 100644
index 000000000..fb0f0e683
--- /dev/null
+++ b/target/microblaze/insns.decode
@@ -0,0 +1,256 @@
+#
+# MicroBlaze instruction decode definitions.
+#
+# Copyright (c) 2020 Richard Henderson <rth@twiddle.net>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+&typea0 rd ra
+&typea rd ra rb
+&typea_br rd rb
+&typea_bc ra rb
+&typeb rd ra imm
+&typeb_br rd imm
+&typeb_bc ra imm
+&type_msr rd imm
+
+# Include any IMM prefix in the value reported.
+%extimm 0:s16 !function=typeb_imm
+
+@typea ...... rd:5 ra:5 rb:5 ... .... .... &typea
+@typeb ...... rd:5 ra:5 ................ &typeb imm=%extimm
+
+# Officially typea, but with rb==0, which is not used.
+@typea0 ...... rd:5 ra:5 ................ &typea0
+
+# Officially typea, but with ra as opcode.
+@typea_br ...... rd:5 ..... rb:5 ........... &typea_br
+
+# Officially typea, but with rd as opcode.
+@typea_bc ...... ..... ra:5 rb:5 ........... &typea_bc
+
+# Officially typeb, but any immediate extension is unused.
+@typeb_bs ...... rd:5 ra:5 ..... ...... imm:5 &typeb
+
+# Officially typeb, but with ra as opcode.
+@typeb_br ...... rd:5 ..... ................ &typeb_br imm=%extimm
+
+# Officially typeb, but with rd as opcode.
+@typeb_bc ...... ..... ra:5 ................ &typeb_bc imm=%extimm
+
+# For convenience, extract the two imm_w/imm_s fields, then pack
+# them back together as "imm". Doing this makes it easiest to
+# match the required zero at bit 5.
+%ieimm 6:5 0:5
+@typeb_ie ...... rd:5 ra:5 ..... ..... . ..... &typeb imm=%ieimm
+
+@type_msr ...... rd:5 ...... imm:15 &type_msr
+
+###
+
+{
+ zero 000000 00000 00000 00000 000 0000 0000
+ add 000000 ..... ..... ..... 000 0000 0000 @typea
+}
+addc 000010 ..... ..... ..... 000 0000 0000 @typea
+addk 000100 ..... ..... ..... 000 0000 0000 @typea
+addkc 000110 ..... ..... ..... 000 0000 0000 @typea
+
+addi 001000 ..... ..... ................ @typeb
+addic 001010 ..... ..... ................ @typeb
+addik 001100 ..... ..... ................ @typeb
+addikc 001110 ..... ..... ................ @typeb
+
+and 100001 ..... ..... ..... 000 0000 0000 @typea
+andi 101001 ..... ..... ................ @typeb
+
+andn 100011 ..... ..... ..... 000 0000 0000 @typea
+andni 101011 ..... ..... ................ @typeb
+
+beq 100111 00000 ..... ..... 000 0000 0000 @typea_bc
+bge 100111 00101 ..... ..... 000 0000 0000 @typea_bc
+bgt 100111 00100 ..... ..... 000 0000 0000 @typea_bc
+ble 100111 00011 ..... ..... 000 0000 0000 @typea_bc
+blt 100111 00010 ..... ..... 000 0000 0000 @typea_bc
+bne 100111 00001 ..... ..... 000 0000 0000 @typea_bc
+
+beqd 100111 10000 ..... ..... 000 0000 0000 @typea_bc
+bged 100111 10101 ..... ..... 000 0000 0000 @typea_bc
+bgtd 100111 10100 ..... ..... 000 0000 0000 @typea_bc
+bled 100111 10011 ..... ..... 000 0000 0000 @typea_bc
+bltd 100111 10010 ..... ..... 000 0000 0000 @typea_bc
+bned 100111 10001 ..... ..... 000 0000 0000 @typea_bc
+
+beqi 101111 00000 ..... ................ @typeb_bc
+bgei 101111 00101 ..... ................ @typeb_bc
+bgti 101111 00100 ..... ................ @typeb_bc
+blei 101111 00011 ..... ................ @typeb_bc
+blti 101111 00010 ..... ................ @typeb_bc
+bnei 101111 00001 ..... ................ @typeb_bc
+
+beqid 101111 10000 ..... ................ @typeb_bc
+bgeid 101111 10101 ..... ................ @typeb_bc
+bgtid 101111 10100 ..... ................ @typeb_bc
+bleid 101111 10011 ..... ................ @typeb_bc
+bltid 101111 10010 ..... ................ @typeb_bc
+bneid 101111 10001 ..... ................ @typeb_bc
+
+br 100110 ..... 00000 ..... 000 0000 0000 @typea_br
+bra 100110 ..... 01000 ..... 000 0000 0000 @typea_br
+brd 100110 ..... 10000 ..... 000 0000 0000 @typea_br
+brad 100110 ..... 11000 ..... 000 0000 0000 @typea_br
+brld 100110 ..... 10100 ..... 000 0000 0000 @typea_br
+brald 100110 ..... 11100 ..... 000 0000 0000 @typea_br
+
+bri 101110 ..... 00000 ................ @typeb_br
+brai 101110 ..... 01000 ................ @typeb_br
+brid 101110 ..... 10000 ................ @typeb_br
+braid 101110 ..... 11000 ................ @typeb_br
+brlid 101110 ..... 10100 ................ @typeb_br
+bralid 101110 ..... 11100 ................ @typeb_br
+
+brk 100110 ..... 01100 ..... 000 0000 0000 @typea_br
+brki 101110 ..... 01100 ................ @typeb_br
+
+bsrl 010001 ..... ..... ..... 000 0000 0000 @typea
+bsra 010001 ..... ..... ..... 010 0000 0000 @typea
+bsll 010001 ..... ..... ..... 100 0000 0000 @typea
+
+bsrli 011001 ..... ..... 00000 000000 ..... @typeb_bs
+bsrai 011001 ..... ..... 00000 010000 ..... @typeb_bs
+bslli 011001 ..... ..... 00000 100000 ..... @typeb_bs
+
+bsefi 011001 ..... ..... 01000 .....0 ..... @typeb_ie
+bsifi 011001 ..... ..... 10000 .....0 ..... @typeb_ie
+
+clz 100100 ..... ..... 00000 000 1110 0000 @typea0
+
+cmp 000101 ..... ..... ..... 000 0000 0001 @typea
+cmpu 000101 ..... ..... ..... 000 0000 0011 @typea
+
+fadd 010110 ..... ..... ..... 0000 000 0000 @typea
+frsub 010110 ..... ..... ..... 0001 000 0000 @typea
+fmul 010110 ..... ..... ..... 0010 000 0000 @typea
+fdiv 010110 ..... ..... ..... 0011 000 0000 @typea
+fcmp_un 010110 ..... ..... ..... 0100 000 0000 @typea
+fcmp_lt 010110 ..... ..... ..... 0100 001 0000 @typea
+fcmp_eq 010110 ..... ..... ..... 0100 010 0000 @typea
+fcmp_le 010110 ..... ..... ..... 0100 011 0000 @typea
+fcmp_gt 010110 ..... ..... ..... 0100 100 0000 @typea
+fcmp_ne 010110 ..... ..... ..... 0100 101 0000 @typea
+fcmp_ge 010110 ..... ..... ..... 0100 110 0000 @typea
+
+# Note that flt and fint, unlike fsqrt, are documented as having the RB
+# operand which is unused. So allow the field to be non-zero but discard
+# the value and treat as 2-operand insns.
+flt 010110 ..... ..... ----- 0101 000 0000 @typea0
+fint 010110 ..... ..... ----- 0110 000 0000 @typea0
+fsqrt 010110 ..... ..... 00000 0111 000 0000 @typea0
+
+get 011011 rd:5 00000 0 ctrl:5 000000 imm:4
+getd 010011 rd:5 00000 rb:5 0 ctrl:5 00000
+
+idiv 010010 ..... ..... ..... 000 0000 0000 @typea
+idivu 010010 ..... ..... ..... 000 0000 0010 @typea
+
+imm 101100 00000 00000 imm:16
+
+lbu 110000 ..... ..... ..... 0000 000 0000 @typea
+lbur 110000 ..... ..... ..... 0100 000 0000 @typea
+lbuea 110000 ..... ..... ..... 0001 000 0000 @typea
+lbui 111000 ..... ..... ................ @typeb
+
+lhu 110001 ..... ..... ..... 0000 000 0000 @typea
+lhur 110001 ..... ..... ..... 0100 000 0000 @typea
+lhuea 110001 ..... ..... ..... 0001 000 0000 @typea
+lhui 111001 ..... ..... ................ @typeb
+
+lw 110010 ..... ..... ..... 0000 000 0000 @typea
+lwr 110010 ..... ..... ..... 0100 000 0000 @typea
+lwea 110010 ..... ..... ..... 0001 000 0000 @typea
+lwx 110010 ..... ..... ..... 1000 000 0000 @typea
+lwi 111010 ..... ..... ................ @typeb
+
+mbar 101110 imm:5 00010 0000 0000 0000 0100
+
+mfs 100101 rd:5 0 e:1 000 10 rs:14
+mts 100101 0 e:1 000 ra:5 11 rs:14
+
+msrclr 100101 ..... 100010 ............... @type_msr
+msrset 100101 ..... 100000 ............... @type_msr
+
+mul 010000 ..... ..... ..... 000 0000 0000 @typea
+mulh 010000 ..... ..... ..... 000 0000 0001 @typea
+mulhu 010000 ..... ..... ..... 000 0000 0011 @typea
+mulhsu 010000 ..... ..... ..... 000 0000 0010 @typea
+muli 011000 ..... ..... ................ @typeb
+
+or 100000 ..... ..... ..... 000 0000 0000 @typea
+ori 101000 ..... ..... ................ @typeb
+
+pcmpbf 100000 ..... ..... ..... 100 0000 0000 @typea
+pcmpeq 100010 ..... ..... ..... 100 0000 0000 @typea
+pcmpne 100011 ..... ..... ..... 100 0000 0000 @typea
+
+put 011011 00000 ra:5 1 ctrl:5 000000 imm:4
+putd 010011 00000 ra:5 rb:5 1 ctrl:5 00000
+
+rsub 000001 ..... ..... ..... 000 0000 0000 @typea
+rsubc 000011 ..... ..... ..... 000 0000 0000 @typea
+rsubk 000101 ..... ..... ..... 000 0000 0000 @typea
+rsubkc 000111 ..... ..... ..... 000 0000 0000 @typea
+
+rsubi 001001 ..... ..... ................ @typeb
+rsubic 001011 ..... ..... ................ @typeb
+rsubik 001101 ..... ..... ................ @typeb
+rsubikc 001111 ..... ..... ................ @typeb
+
+rtbd 101101 10010 ..... ................ @typeb_bc
+rtid 101101 10001 ..... ................ @typeb_bc
+rted 101101 10100 ..... ................ @typeb_bc
+rtsd 101101 10000 ..... ................ @typeb_bc
+
+sb 110100 ..... ..... ..... 0000 000 0000 @typea
+sbr 110100 ..... ..... ..... 0100 000 0000 @typea
+sbea 110100 ..... ..... ..... 0001 000 0000 @typea
+sbi 111100 ..... ..... ................ @typeb
+
+sh 110101 ..... ..... ..... 0000 000 0000 @typea
+shr 110101 ..... ..... ..... 0100 000 0000 @typea
+shea 110101 ..... ..... ..... 0001 000 0000 @typea
+shi 111101 ..... ..... ................ @typeb
+
+sw 110110 ..... ..... ..... 0000 000 0000 @typea
+swr 110110 ..... ..... ..... 0100 000 0000 @typea
+swea 110110 ..... ..... ..... 0001 000 0000 @typea
+swx 110110 ..... ..... ..... 1000 000 0000 @typea
+swi 111110 ..... ..... ................ @typeb
+
+sext8 100100 ..... ..... 00000 000 0110 0000 @typea0
+sext16 100100 ..... ..... 00000 000 0110 0001 @typea0
+
+sra 100100 ..... ..... 00000 000 0000 0001 @typea0
+src 100100 ..... ..... 00000 000 0010 0001 @typea0
+srl 100100 ..... ..... 00000 000 0100 0001 @typea0
+
+swapb 100100 ..... ..... 00000 001 1110 0000 @typea0
+swaph 100100 ..... ..... 00000 001 1110 0010 @typea0
+
+# Cache operations have no effect in qemu: discard the arguments.
+wdic 100100 00000 ----- ----- -00 -11- 01-0 # wdc
+wdic 100100 00000 ----- ----- 000 0110 1000 # wic
+
+xor 100010 ..... ..... ..... 000 0000 0000 @typea
+xori 101010 ..... ..... ................ @typeb
diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c
new file mode 100644
index 000000000..d24def399
--- /dev/null
+++ b/target/microblaze/machine.c
@@ -0,0 +1,106 @@
+/*
+ * Microblaze VMState for qemu.
+ *
+ * Copyright (c) 2020 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+
+static VMStateField vmstate_mmu_fields[] = {
+ VMSTATE_UINT64_2DARRAY(rams, MicroBlazeMMU, 2, TLB_ENTRIES),
+ VMSTATE_UINT8_ARRAY(tids, MicroBlazeMMU, TLB_ENTRIES),
+ VMSTATE_UINT32_ARRAY(regs, MicroBlazeMMU, 3),
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_mmu = {
+ .name = "mmu",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = vmstate_mmu_fields,
+};
+
+static int get_msr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ CPUMBState *env = container_of(opaque, CPUMBState, msr);
+
+ mb_cpu_write_msr(env, qemu_get_be32(f));
+ return 0;
+}
+
+static int put_msr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ CPUMBState *env = container_of(opaque, CPUMBState, msr);
+
+ qemu_put_be32(f, mb_cpu_read_msr(env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_msr = {
+ .name = "msr",
+ .get = get_msr,
+ .put = put_msr,
+};
+
+static VMStateField vmstate_env_fields[] = {
+ VMSTATE_UINT32_ARRAY(regs, CPUMBState, 32),
+
+ VMSTATE_UINT32(pc, CPUMBState),
+ VMSTATE_SINGLE(msr, CPUMBState, 0, vmstate_msr, uint32_t),
+ VMSTATE_UINT32(esr, CPUMBState),
+ VMSTATE_UINT32(fsr, CPUMBState),
+ VMSTATE_UINT32(btr, CPUMBState),
+ VMSTATE_UINT32(edr, CPUMBState),
+ VMSTATE_UINT32(slr, CPUMBState),
+ VMSTATE_UINT32(shr, CPUMBState),
+ VMSTATE_UINT64(ear, CPUMBState),
+
+ VMSTATE_UINT32(btarget, CPUMBState),
+ VMSTATE_UINT32(imm, CPUMBState),
+ VMSTATE_UINT32(iflags, CPUMBState),
+
+ VMSTATE_UINT32(res_val, CPUMBState),
+ VMSTATE_UINTTL(res_addr, CPUMBState),
+
+ VMSTATE_STRUCT(mmu, CPUMBState, 0, vmstate_mmu, MicroBlazeMMU),
+
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = vmstate_env_fields,
+};
+
+static VMStateField vmstate_cpu_fields[] = {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, MicroBlazeCPU, 1, vmstate_env, CPUMBState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_mb_cpu = {
+ .name = "cpu",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = vmstate_cpu_fields,
+};
diff --git a/target/microblaze/meson.build b/target/microblaze/meson.build
new file mode 100644
index 000000000..05ee0ec16
--- /dev/null
+++ b/target/microblaze/meson.build
@@ -0,0 +1,20 @@
+gen = decodetree.process('insns.decode')
+
+microblaze_ss = ss.source_set()
+microblaze_ss.add(gen)
+microblaze_ss.add(files(
+ 'cpu.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'op_helper.c',
+ 'translate.c',
+))
+
+microblaze_softmmu_ss = ss.source_set()
+microblaze_softmmu_ss.add(files(
+ 'mmu.c',
+ 'machine.c',
+))
+
+target_arch += {'microblaze': microblaze_ss}
+target_softmmu_arch += {'microblaze': microblaze_softmmu_ss}
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
new file mode 100644
index 000000000..cc40f275e
--- /dev/null
+++ b/target/microblaze/mmu.c
@@ -0,0 +1,327 @@
+/*
+ * Microblaze MMU emulation for qemu.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+
+static unsigned int tlb_decode_size(unsigned int f)
+{
+ static const unsigned int sizes[] = {
+ 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
+ 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
+ };
+ assert(f < ARRAY_SIZE(sizes));
+ return sizes[f];
+}
+
+static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
+{
+ CPUState *cs = env_cpu(env);
+ MicroBlazeMMU *mmu = &env->mmu;
+ unsigned int tlb_size;
+ uint32_t tlb_tag, end, t;
+
+ t = mmu->rams[RAM_TAG][idx];
+ if (!(t & TLB_VALID))
+ return;
+
+ tlb_tag = t & TLB_EPN_MASK;
+ tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
+ end = tlb_tag + tlb_size;
+
+ while (tlb_tag < end) {
+ tlb_flush_page(cs, tlb_tag);
+ tlb_tag += TARGET_PAGE_SIZE;
+ }
+}
+
+static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
+{
+ MicroBlazeMMU *mmu = &env->mmu;
+ unsigned int i;
+ uint32_t t;
+
+ if (newpid & ~0xff)
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
+
+ for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
+ /* Lookup and decode. */
+ t = mmu->rams[RAM_TAG][i];
+ if (t & TLB_VALID) {
+ if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
+ mmu_flush_idx(env, i);
+ }
+ }
+}
+
+/* rw - 0 = read, 1 = write, 2 = fetch. */
+unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
+ target_ulong vaddr, MMUAccessType rw, int mmu_idx)
+{
+ MicroBlazeMMU *mmu = &cpu->env.mmu;
+ unsigned int i, hit = 0;
+ unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
+ uint64_t tlb_tag, tlb_rpn, mask;
+ uint32_t tlb_size, t0;
+
+ lu->err = ERR_MISS;
+ for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
+ uint64_t t, d;
+
+ /* Lookup and decode. */
+ t = mmu->rams[RAM_TAG][i];
+ if (t & TLB_VALID) {
+ tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
+ if (tlb_size < TARGET_PAGE_SIZE) {
+ qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
+ abort();
+ }
+
+ mask = ~((uint64_t)tlb_size - 1);
+ tlb_tag = t & TLB_EPN_MASK;
+ if ((vaddr & mask) != (tlb_tag & mask)) {
+ continue;
+ }
+ if (mmu->tids[i]
+ && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
+ continue;
+ }
+
+ /* Bring in the data part. */
+ d = mmu->rams[RAM_DATA][i];
+ tlb_ex = d & TLB_EX;
+ tlb_wr = d & TLB_WR;
+
+ /* Now let's see if there is a zone that overrides the protbits. */
+ tlb_zsel = (d >> 4) & 0xf;
+ t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
+ t0 &= 0x3;
+
+ if (tlb_zsel > cpu->cfg.mmu_zones) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "tlb zone select out of range! %d\n", tlb_zsel);
+ t0 = 1; /* Ignore. */
+ }
+
+ if (cpu->cfg.mmu == 1) {
+ t0 = 1; /* Zones are disabled. */
+ }
+
+ switch (t0) {
+ case 0:
+ if (mmu_idx == MMU_USER_IDX)
+ continue;
+ break;
+ case 2:
+ if (mmu_idx != MMU_USER_IDX) {
+ tlb_ex = 1;
+ tlb_wr = 1;
+ }
+ break;
+ case 3:
+ tlb_ex = 1;
+ tlb_wr = 1;
+ break;
+ default: break;
+ }
+
+ lu->err = ERR_PROT;
+ lu->prot = PAGE_READ;
+ if (tlb_wr)
+ lu->prot |= PAGE_WRITE;
+ else if (rw == 1)
+ goto done;
+ if (tlb_ex)
+ lu->prot |=PAGE_EXEC;
+ else if (rw == 2) {
+ goto done;
+ }
+
+ tlb_rpn = d & TLB_RPN_MASK;
+
+ lu->vaddr = tlb_tag;
+ lu->paddr = tlb_rpn & cpu->cfg.addr_mask;
+ lu->size = tlb_size;
+ lu->err = ERR_HIT;
+ lu->idx = i;
+ hit = 1;
+ goto done;
+ }
+ }
+done:
+ qemu_log_mask(CPU_LOG_MMU,
+ "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ vaddr, rw, tlb_wr, tlb_ex, hit);
+ return hit;
+}
+
+/* Writes/reads to the MMU's special regs end up here. */
+uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
+{
+ MicroBlazeCPU *cpu = env_archcpu(env);
+ unsigned int i;
+ uint32_t r = 0;
+
+ if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
+ return 0;
+ }
+ if (ext && rn != MMU_R_TLBLO) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
+ return 0;
+ }
+
+ switch (rn) {
+ /* Reads to HI/LO trig reads from the mmu rams. */
+ case MMU_R_TLBLO:
+ case MMU_R_TLBHI:
+ if (!(cpu->cfg.mmu_tlb_access & 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
+ return 0;
+ }
+
+ i = env->mmu.regs[MMU_R_TLBX] & 0xff;
+ r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
+ if (rn == MMU_R_TLBHI)
+ env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
+ break;
+ case MMU_R_PID:
+ case MMU_R_ZPR:
+ if (!(cpu->cfg.mmu_tlb_access & 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
+ return 0;
+ }
+ r = env->mmu.regs[rn];
+ break;
+ case MMU_R_TLBX:
+ r = env->mmu.regs[rn];
+ break;
+ case MMU_R_TLBSX:
+ qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
+ break;
+ }
+ qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
+ return r;
+}
+
+void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
+{
+ MicroBlazeCPU *cpu = env_archcpu(env);
+ uint64_t tmp64;
+ unsigned int i;
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s rn=%d=%x old=%x\n", __func__, rn, v,
+ rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]);
+
+ if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
+ return;
+ }
+ if (ext && rn != MMU_R_TLBLO) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
+ return;
+ }
+
+ switch (rn) {
+ /* Writes to HI/LO trig writes to the mmu rams. */
+ case MMU_R_TLBLO:
+ case MMU_R_TLBHI:
+ i = env->mmu.regs[MMU_R_TLBX] & 0xff;
+ if (rn == MMU_R_TLBHI) {
+ if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "invalidating index %x at pc=%x\n",
+ i, env->pc);
+ env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
+ mmu_flush_idx(env, i);
+ }
+ tmp64 = env->mmu.rams[rn & 1][i];
+ env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
+ break;
+ case MMU_R_ZPR:
+ if (cpu->cfg.mmu_tlb_access <= 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
+ return;
+ }
+
+ /* Changes to the zone protection reg flush the QEMU TLB.
+ Fortunately, these are very uncommon. */
+ if (v != env->mmu.regs[rn]) {
+ tlb_flush(env_cpu(env));
+ }
+ env->mmu.regs[rn] = v;
+ break;
+ case MMU_R_PID:
+ if (cpu->cfg.mmu_tlb_access <= 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
+ return;
+ }
+
+ if (v != env->mmu.regs[rn]) {
+ mmu_change_pid(env, v);
+ env->mmu.regs[rn] = v;
+ }
+ break;
+ case MMU_R_TLBX:
+ /* Bit 31 is read-only. */
+ env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
+ break;
+ case MMU_R_TLBSX:
+ {
+ MicroBlazeMMULookup lu;
+ int hit;
+
+ if (cpu->cfg.mmu_tlb_access <= 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
+ return;
+ }
+
+ hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK,
+ 0, cpu_mmu_index(env, false));
+ if (hit) {
+ env->mmu.regs[MMU_R_TLBX] = lu.idx;
+ } else {
+ env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
+ }
+ break;
+ }
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
+ break;
+ }
+}
+
+void mmu_init(MicroBlazeMMU *mmu)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
+ mmu->regs[i] = 0;
+ }
+}
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
new file mode 100644
index 000000000..b6b4b9ad6
--- /dev/null
+++ b/target/microblaze/mmu.h
@@ -0,0 +1,92 @@
+/*
+ * Microblaze MMU emulation for qemu.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_MICROBLAZE_MMU_H
+#define TARGET_MICROBLAZE_MMU_H
+
+#define MMU_R_PID 0
+#define MMU_R_ZPR 1
+#define MMU_R_TLBX 2
+#define MMU_R_TLBLO 3
+#define MMU_R_TLBHI 4
+#define MMU_R_TLBSX 5
+
+#define RAM_DATA 1
+#define RAM_TAG 0
+
+/* Tag portion */
+#define TLB_EPN_MASK MAKE_64BIT_MASK(10, 64 - 10)
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
+#define PAGESZ_1K 0
+#define PAGESZ_4K 1
+#define PAGESZ_16K 2
+#define PAGESZ_64K 3
+#define PAGESZ_256K 4
+#define PAGESZ_1M 5
+#define PAGESZ_4M 6
+#define PAGESZ_16M 7
+#define TLB_VALID 0x00000040 /* Entry is valid */
+
+/* Data portion */
+#define TLB_RPN_MASK MAKE_64BIT_MASK(10, 64 - 10)
+#define TLB_PERM_MASK 0x00000300
+#define TLB_EX 0x00000200 /* Instruction execution allowed */
+#define TLB_WR 0x00000100 /* Writes permitted */
+#define TLB_ZSEL_MASK 0x000000F0
+#define TLB_ZSEL(x) (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK 0x0000000F
+#define TLB_W 0x00000008 /* Caching is write-through */
+#define TLB_I 0x00000004 /* Caching is inhibited */
+#define TLB_M 0x00000002 /* Memory is coherent */
+#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
+
+/* TLBX */
+#define R_TBLX_MISS_SHIFT 31
+#define R_TBLX_MISS_MASK (1U << R_TBLX_MISS_SHIFT)
+
+#define TLB_ENTRIES 64
+
+typedef struct {
+ /* Data and tag brams. */
+ uint64_t rams[2][TLB_ENTRIES];
+ /* We keep a separate ram for the tids to avoid the 48 bit tag width. */
+ uint8_t tids[TLB_ENTRIES];
+ /* Control flops. */
+ uint32_t regs[3];
+} MicroBlazeMMU;
+
+typedef struct {
+ uint32_t paddr;
+ uint32_t vaddr;
+ unsigned int size;
+ unsigned int idx;
+ int prot;
+ enum {
+ ERR_PROT, ERR_MISS, ERR_HIT
+ } err;
+} MicroBlazeMMULookup;
+
+unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
+ target_ulong vaddr, MMUAccessType rw, int mmu_idx);
+uint32_t mmu_read(CPUMBState *env, bool ea, uint32_t rn);
+void mmu_write(CPUMBState *env, bool ea, uint32_t rn, uint32_t v);
+void mmu_init(MicroBlazeMMU *mmu);
+
+#endif
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
new file mode 100644
index 000000000..58d633584
--- /dev/null
+++ b/target/microblaze/op_helper.c
@@ -0,0 +1,430 @@
+/*
+ * Microblaze helper routines.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>.
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "fpu/softfloat.h"
+
+void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
+{
+ int test = ctrl & STREAM_TEST;
+ int atomic = ctrl & STREAM_ATOMIC;
+ int control = ctrl & STREAM_CONTROL;
+ int nonblock = ctrl & STREAM_NONBLOCK;
+ int exception = ctrl & STREAM_EXCEPTION;
+
+ qemu_log_mask(LOG_UNIMP, "Unhandled stream put to stream-id=%d data=%x %s%s%s%s%s\n",
+ id, data,
+ test ? "t" : "",
+ nonblock ? "n" : "",
+ exception ? "e" : "",
+ control ? "c" : "",
+ atomic ? "a" : "");
+}
+
+uint32_t helper_get(uint32_t id, uint32_t ctrl)
+{
+ int test = ctrl & STREAM_TEST;
+ int atomic = ctrl & STREAM_ATOMIC;
+ int control = ctrl & STREAM_CONTROL;
+ int nonblock = ctrl & STREAM_NONBLOCK;
+ int exception = ctrl & STREAM_EXCEPTION;
+
+ qemu_log_mask(LOG_UNIMP, "Unhandled stream get from stream-id=%d %s%s%s%s%s\n",
+ id,
+ test ? "t" : "",
+ nonblock ? "n" : "",
+ exception ? "e" : "",
+ control ? "c" : "",
+ atomic ? "a" : "");
+ return 0xdead0000 | id;
+}
+
+void helper_raise_exception(CPUMBState *env, uint32_t index)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
+
+static bool check_divz(CPUMBState *env, uint32_t a, uint32_t b, uintptr_t ra)
+{
+ if (unlikely(b == 0)) {
+ env->msr |= MSR_DZ;
+
+ if ((env->msr & MSR_EE) &&
+ env_archcpu(env)->cfg.div_zero_exception) {
+ CPUState *cs = env_cpu(env);
+
+ env->esr = ESR_EC_DIVZERO;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, ra);
+ }
+ return false;
+ }
+ return true;
+}
+
+uint32_t helper_divs(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ if (!check_divz(env, a, b, GETPC())) {
+ return 0;
+ }
+ return (int32_t)a / (int32_t)b;
+}
+
+uint32_t helper_divu(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ if (!check_divz(env, a, b, GETPC())) {
+ return 0;
+ }
+ return a / b;
+}
+
+/* raise FPU exception. */
+static void raise_fpu_exception(CPUMBState *env, uintptr_t ra)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->esr = ESR_EC_FPU;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, ra);
+}
+
+static void update_fpu_flags(CPUMBState *env, int flags, uintptr_t ra)
+{
+ int raise = 0;
+
+ if (flags & float_flag_invalid) {
+ env->fsr |= FSR_IO;
+ raise = 1;
+ }
+ if (flags & float_flag_divbyzero) {
+ env->fsr |= FSR_DZ;
+ raise = 1;
+ }
+ if (flags & float_flag_overflow) {
+ env->fsr |= FSR_OF;
+ raise = 1;
+ }
+ if (flags & float_flag_underflow) {
+ env->fsr |= FSR_UF;
+ raise = 1;
+ }
+ if (raise
+ && (env_archcpu(env)->cfg.pvr_regs[2] & PVR2_FPU_EXC_MASK)
+ && (env->msr & MSR_EE)) {
+ raise_fpu_exception(env, ra);
+ }
+}
+
+uint32_t helper_fadd(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_add(fa.f, fb.f, &env->fp_status);
+
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags, GETPC());
+ return fd.l;
+}
+
+uint32_t helper_frsub(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_sub(fb.f, fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags, GETPC());
+ return fd.l;
+}
+
+uint32_t helper_fmul(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_mul(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags, GETPC());
+
+ return fd.l;
+}
+
+uint32_t helper_fdiv(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_div(fb.f, fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags, GETPC());
+
+ return fd.l;
+}
+
+uint32_t helper_fcmp_un(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ uint32_t r = 0;
+
+ fa.l = a;
+ fb.l = b;
+
+ if (float32_is_signaling_nan(fa.f, &env->fp_status) ||
+ float32_is_signaling_nan(fb.f, &env->fp_status)) {
+ update_fpu_flags(env, float_flag_invalid, GETPC());
+ r = 1;
+ }
+
+ if (float32_is_quiet_nan(fa.f, &env->fp_status) ||
+ float32_is_quiet_nan(fb.f, &env->fp_status)) {
+ r = 1;
+ }
+
+ return r;
+}
+
+uint32_t helper_fcmp_lt(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int r;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ r = float32_lt(fb.f, fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid, GETPC());
+
+ return r;
+}
+
+uint32_t helper_fcmp_eq(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags;
+ int r;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ r = float32_eq_quiet(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid, GETPC());
+
+ return r;
+}
+
+uint32_t helper_fcmp_le(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags;
+ int r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = float32_le(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid, GETPC());
+
+
+ return r;
+}
+
+uint32_t helper_fcmp_gt(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags, r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = float32_lt(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid, GETPC());
+ return r;
+}
+
+uint32_t helper_fcmp_ne(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags, r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = !float32_eq_quiet(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid, GETPC());
+
+ return r;
+}
+
+uint32_t helper_fcmp_ge(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags, r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = !float32_lt(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid, GETPC());
+
+ return r;
+}
+
+uint32_t helper_flt(CPUMBState *env, uint32_t a)
+{
+ CPU_FloatU fd, fa;
+
+ fa.l = a;
+ fd.f = int32_to_float32(fa.l, &env->fp_status);
+ return fd.l;
+}
+
+uint32_t helper_fint(CPUMBState *env, uint32_t a)
+{
+ CPU_FloatU fa;
+ uint32_t r;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ r = float32_to_int32(fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags, GETPC());
+
+ return r;
+}
+
+uint32_t helper_fsqrt(CPUMBState *env, uint32_t a)
+{
+ CPU_FloatU fd, fa;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fd.l = float32_sqrt(fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags, GETPC());
+
+ return fd.l;
+}
+
+uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
+{
+ unsigned int i;
+ uint32_t mask = 0xff000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((a & mask) == (b & mask))
+ return i + 1;
+ mask >>= 8;
+ }
+ return 0;
+}
+
+void helper_stackprot(CPUMBState *env, target_ulong addr)
+{
+ if (addr < env->slr || addr > env->shr) {
+ CPUState *cs = env_cpu(env);
+
+ qemu_log_mask(CPU_LOG_INT, "Stack protector violation at "
+ TARGET_FMT_lx " %x %x\n",
+ addr, env->slr, env->shr);
+
+ env->ear = addr;
+ env->esr = ESR_EC_STACKPROT;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, GETPC());
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* Writes/reads to the MMU's special regs end up here. */
+uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn)
+{
+ return mmu_read(env, ext, rn);
+}
+
+void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v)
+{
+ mmu_write(env, ext, rn, v);
+}
+
+void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+
+ qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx
+ " physaddr 0x" TARGET_FMT_plx " size %d access type %s\n",
+ addr, physaddr, size,
+ access_type == MMU_INST_FETCH ? "INST_FETCH" :
+ (access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE"));
+
+ if (!(env->msr & MSR_EE)) {
+ return;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ if (!cpu->cfg.iopb_bus_exception) {
+ return;
+ }
+ env->esr = ESR_EC_INSN_BUS;
+ } else {
+ if (!cpu->cfg.dopb_bus_exception) {
+ return;
+ }
+ env->esr = ESR_EC_DATA_BUS;
+ }
+
+ env->ear = addr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
new file mode 100644
index 000000000..2561b904b
--- /dev/null
+++ b/target/microblaze/translate.c
@@ -0,0 +1,1953 @@
+/*
+ * Xilinx MicroBlaze emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias.
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-gen.h"
+#include "exec/translator.h"
+#include "qemu/qemu-print.h"
+
+#include "exec/log.h"
+
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+/* is_jmp field values */
+#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
+#define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */
+
+/* cpu state besides pc was modified dynamically; update pc to next */
+#define DISAS_EXIT_NEXT DISAS_TARGET_2
+/* cpu state besides pc was modified dynamically; update pc to btarget */
+#define DISAS_EXIT_JUMP DISAS_TARGET_3
+
+static TCGv_i32 cpu_R[32];
+static TCGv_i32 cpu_pc;
+static TCGv_i32 cpu_msr;
+static TCGv_i32 cpu_msr_c;
+static TCGv_i32 cpu_imm;
+static TCGv_i32 cpu_bvalue;
+static TCGv_i32 cpu_btarget;
+static TCGv_i32 cpu_iflags;
+static TCGv cpu_res_addr;
+static TCGv_i32 cpu_res_val;
+
+#include "exec/gen-icount.h"
+
+/* This is the state at translation time. */
+typedef struct DisasContext {
+ DisasContextBase base;
+ const MicroBlazeCPUConfig *cfg;
+
+ /* TCG op of the current insn_start. */
+ TCGOp *insn_start;
+
+ TCGv_i32 r0;
+ bool r0_set;
+
+ /* Decoder. */
+ uint32_t ext_imm;
+ unsigned int tb_flags;
+ unsigned int tb_flags_to_set;
+ int mem_index;
+
+ /* Condition under which to jump, including NEVER and ALWAYS. */
+ TCGCond jmp_cond;
+
+ /* Immediate branch-taken destination, or -1 for indirect. */
+ uint32_t jmp_dest;
+} DisasContext;
+
+static int typeb_imm(DisasContext *dc, int x)
+{
+ if (dc->tb_flags & IMM_FLAG) {
+ return deposit32(dc->ext_imm, 0, 16, x);
+ }
+ return x;
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+static void t_sync_flags(DisasContext *dc)
+{
+ /* Synch the tb dependent flags between translator and runtime. */
+ if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
+ tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
+ }
+}
+
+static void gen_raise_exception(DisasContext *dc, uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
+{
+ t_sync_flags(dc);
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
+ gen_raise_exception(dc, index);
+}
+
+static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
+{
+ TCGv_i32 tmp = tcg_const_i32(esr_ec);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
+ tcg_temp_free_i32(tmp);
+
+ gen_raise_exception_sync(dc, EXCP_HW_EXCP);
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (translator_use_goto_tb(&dc->base, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb(dc->base.tb, n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+/*
+ * Returns true if the insn an illegal operation.
+ * If exceptions are enabled, an exception is raised.
+ */
+static bool trap_illegal(DisasContext *dc, bool cond)
+{
+ if (cond && (dc->tb_flags & MSR_EE)
+ && dc->cfg->illegal_opcode_exception) {
+ gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
+ }
+ return cond;
+}
+
+/*
+ * Returns true if the insn is illegal in userspace.
+ * If exceptions are enabled, an exception is raised.
+ */
+static bool trap_userspace(DisasContext *dc, bool cond)
+{
+ bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
+
+ if (cond_user && (dc->tb_flags & MSR_EE)) {
+ gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
+ }
+ return cond_user;
+}
+
+/*
+ * Return true, and log an error, if the current insn is
+ * within a delay slot.
+ */
+static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
+{
+ if (dc->tb_flags & D_FLAG) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid insn in delay slot: %s at %08x\n",
+ insn_type, (uint32_t)dc->base.pc_next);
+ return true;
+ }
+ return false;
+}
+
+static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
+{
+ if (likely(reg != 0)) {
+ return cpu_R[reg];
+ }
+ if (!dc->r0_set) {
+ if (dc->r0 == NULL) {
+ dc->r0 = tcg_temp_new_i32();
+ }
+ tcg_gen_movi_i32(dc->r0, 0);
+ dc->r0_set = true;
+ }
+ return dc->r0;
+}
+
+static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
+{
+ if (likely(reg != 0)) {
+ return cpu_R[reg];
+ }
+ if (dc->r0 == NULL) {
+ dc->r0 = tcg_temp_new_i32();
+ }
+ return dc->r0;
+}
+
+static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
+ void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 rd, ra, rb;
+
+ if (arg->rd == 0 && !side_effects) {
+ return true;
+ }
+
+ rd = reg_for_write(dc, arg->rd);
+ ra = reg_for_read(dc, arg->ra);
+ rb = reg_for_read(dc, arg->rb);
+ fn(rd, ra, rb);
+ return true;
+}
+
+static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
+ void (*fn)(TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 rd, ra;
+
+ if (arg->rd == 0 && !side_effects) {
+ return true;
+ }
+
+ rd = reg_for_write(dc, arg->rd);
+ ra = reg_for_read(dc, arg->ra);
+ fn(rd, ra);
+ return true;
+}
+
+static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
+ void (*fni)(TCGv_i32, TCGv_i32, int32_t))
+{
+ TCGv_i32 rd, ra;
+
+ if (arg->rd == 0 && !side_effects) {
+ return true;
+ }
+
+ rd = reg_for_write(dc, arg->rd);
+ ra = reg_for_read(dc, arg->ra);
+ fni(rd, ra, arg->imm);
+ return true;
+}
+
+static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
+ void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 rd, ra, imm;
+
+ if (arg->rd == 0 && !side_effects) {
+ return true;
+ }
+
+ rd = reg_for_write(dc, arg->rd);
+ ra = reg_for_read(dc, arg->ra);
+ imm = tcg_const_i32(arg->imm);
+
+ fn(rd, ra, imm);
+
+ tcg_temp_free_i32(imm);
+ return true;
+}
+
+#define DO_TYPEA(NAME, SE, FN) \
+ static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
+ { return do_typea(dc, a, SE, FN); }
+
+#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
+ static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
+ { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
+
+#define DO_TYPEA0(NAME, SE, FN) \
+ static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
+ { return do_typea0(dc, a, SE, FN); }
+
+#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
+ static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
+ { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
+
+#define DO_TYPEBI(NAME, SE, FNI) \
+ static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
+ { return do_typeb_imm(dc, a, SE, FNI); }
+
+#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
+ static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
+ { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
+
+#define DO_TYPEBV(NAME, SE, FN) \
+ static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
+ { return do_typeb_val(dc, a, SE, FN); }
+
+#define ENV_WRAPPER2(NAME, HELPER) \
+ static void NAME(TCGv_i32 out, TCGv_i32 ina) \
+ { HELPER(out, cpu_env, ina); }
+
+#define ENV_WRAPPER3(NAME, HELPER) \
+ static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
+ { HELPER(out, cpu_env, ina, inb); }
+
+/* No input carry, but output carry. */
+static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
+
+ tcg_temp_free_i32(zero);
+}
+
+/* Input and output carry. */
+static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
+ tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(zero);
+}
+
+/* Input carry, but no output carry. */
+static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ tcg_gen_add_i32(out, ina, inb);
+ tcg_gen_add_i32(out, out, cpu_msr_c);
+}
+
+DO_TYPEA(add, true, gen_add)
+DO_TYPEA(addc, true, gen_addc)
+DO_TYPEA(addk, false, tcg_gen_add_i32)
+DO_TYPEA(addkc, true, gen_addkc)
+
+DO_TYPEBV(addi, true, gen_add)
+DO_TYPEBV(addic, true, gen_addc)
+DO_TYPEBI(addik, false, tcg_gen_addi_i32)
+DO_TYPEBV(addikc, true, gen_addkc)
+
+static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
+{
+ tcg_gen_andi_i32(out, ina, ~imm);
+}
+
+DO_TYPEA(and, false, tcg_gen_and_i32)
+DO_TYPEBI(andi, false, tcg_gen_andi_i32)
+DO_TYPEA(andn, false, tcg_gen_andc_i32)
+DO_TYPEBI(andni, false, gen_andni)
+
+static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, inb, 31);
+ tcg_gen_sar_i32(out, ina, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, inb, 31);
+ tcg_gen_shr_i32(out, ina, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, inb, 31);
+ tcg_gen_shl_i32(out, ina, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
+{
+ /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
+ int imm_w = extract32(imm, 5, 5);
+ int imm_s = extract32(imm, 0, 5);
+
+ if (imm_w + imm_s > 32 || imm_w == 0) {
+ /* These inputs have an undefined behavior. */
+ qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
+ imm_w, imm_s);
+ } else {
+ tcg_gen_extract_i32(out, ina, imm_s, imm_w);
+ }
+}
+
+static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
+{
+ /* Note that decodetree has extracted and reassembled imm_w/imm_s. */
+ int imm_w = extract32(imm, 5, 5);
+ int imm_s = extract32(imm, 0, 5);
+ int width = imm_w - imm_s + 1;
+
+ if (imm_w < imm_s) {
+ /* These inputs have an undefined behavior. */
+ qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
+ imm_w, imm_s);
+ } else {
+ tcg_gen_deposit_i32(out, out, ina, imm_s, width);
+ }
+}
+
+DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
+DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
+DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
+
+DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
+DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
+DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
+
+DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
+DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
+
+static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
+{
+ tcg_gen_clzi_i32(out, ina, 32);
+}
+
+DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
+
+static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 lt = tcg_temp_new_i32();
+
+ tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
+ tcg_gen_sub_i32(out, inb, ina);
+ tcg_gen_deposit_i32(out, out, lt, 31, 1);
+ tcg_temp_free_i32(lt);
+}
+
+static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 lt = tcg_temp_new_i32();
+
+ tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
+ tcg_gen_sub_i32(out, inb, ina);
+ tcg_gen_deposit_i32(out, out, lt, 31, 1);
+ tcg_temp_free_i32(lt);
+}
+
+DO_TYPEA(cmp, false, gen_cmp)
+DO_TYPEA(cmpu, false, gen_cmpu)
+
+ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
+ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
+ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
+ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
+ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
+ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
+ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
+ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
+ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
+ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
+ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
+
+DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
+DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
+DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
+DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
+DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
+DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
+DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
+DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
+DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
+DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
+DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
+
+ENV_WRAPPER2(gen_flt, gen_helper_flt)
+ENV_WRAPPER2(gen_fint, gen_helper_fint)
+ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
+
+DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
+DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
+DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
+
+/* Does not use ENV_WRAPPER3, because arguments are swapped as well. */
+static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ gen_helper_divs(out, cpu_env, inb, ina);
+}
+
+static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ gen_helper_divu(out, cpu_env, inb, ina);
+}
+
+DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
+DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
+
+static bool trans_imm(DisasContext *dc, arg_imm *arg)
+{
+ if (invalid_delay_slot(dc, "imm")) {
+ return true;
+ }
+ dc->ext_imm = arg->imm << 16;
+ tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
+ dc->tb_flags_to_set = IMM_FLAG;
+ return true;
+}
+
+static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_muls2_i32(tmp, out, ina, inb);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mulu2_i32(tmp, out, ina, inb);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mulsu2_i32(tmp, out, ina, inb);
+ tcg_temp_free_i32(tmp);
+}
+
+DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
+DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
+DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
+DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
+DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
+
+DO_TYPEA(or, false, tcg_gen_or_i32)
+DO_TYPEBI(ori, false, tcg_gen_ori_i32)
+
+static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
+}
+
+static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
+}
+
+DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
+DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
+DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
+
+/* No input carry, but output carry. */
+static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
+ tcg_gen_sub_i32(out, inb, ina);
+}
+
+/* Input and output carry. */
+static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_not_i32(tmp, ina);
+ tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
+ tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
+
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(tmp);
+}
+
+/* No input or output carry. */
+static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ tcg_gen_sub_i32(out, inb, ina);
+}
+
+/* Input carry, no output carry. */
+static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
+{
+ TCGv_i32 nota = tcg_temp_new_i32();
+
+ tcg_gen_not_i32(nota, ina);
+ tcg_gen_add_i32(out, inb, nota);
+ tcg_gen_add_i32(out, out, cpu_msr_c);
+
+ tcg_temp_free_i32(nota);
+}
+
+DO_TYPEA(rsub, true, gen_rsub)
+DO_TYPEA(rsubc, true, gen_rsubc)
+DO_TYPEA(rsubk, false, gen_rsubk)
+DO_TYPEA(rsubkc, true, gen_rsubkc)
+
+DO_TYPEBV(rsubi, true, gen_rsub)
+DO_TYPEBV(rsubic, true, gen_rsubc)
+DO_TYPEBV(rsubik, false, gen_rsubk)
+DO_TYPEBV(rsubikc, true, gen_rsubkc)
+
+DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
+DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
+
+static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
+{
+ tcg_gen_andi_i32(cpu_msr_c, ina, 1);
+ tcg_gen_sari_i32(out, ina, 1);
+}
+
+static void gen_src(TCGv_i32 out, TCGv_i32 ina)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, cpu_msr_c);
+ tcg_gen_andi_i32(cpu_msr_c, ina, 1);
+ tcg_gen_extract2_i32(out, ina, tmp, 1);
+
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
+{
+ tcg_gen_andi_i32(cpu_msr_c, ina, 1);
+ tcg_gen_shri_i32(out, ina, 1);
+}
+
+DO_TYPEA0(sra, false, gen_sra)
+DO_TYPEA0(src, false, gen_src)
+DO_TYPEA0(srl, false, gen_srl)
+
+static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
+{
+ tcg_gen_rotri_i32(out, ina, 16);
+}
+
+DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
+DO_TYPEA0(swaph, false, gen_swaph)
+
+static bool trans_wdic(DisasContext *dc, arg_wdic *a)
+{
+ /* Cache operations are nops: only check for supervisor mode. */
+ trap_userspace(dc, true);
+ return true;
+}
+
+DO_TYPEA(xor, false, tcg_gen_xor_i32)
+DO_TYPEBI(xori, false, tcg_gen_xori_i32)
+
+static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
+{
+ TCGv ret = tcg_temp_new();
+
+ /* If any of the regs is r0, set t to the value of the other reg. */
+ if (ra && rb) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
+ tcg_gen_extu_i32_tl(ret, tmp);
+ tcg_temp_free_i32(tmp);
+ } else if (ra) {
+ tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+ } else if (rb) {
+ tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ } else {
+ tcg_gen_movi_tl(ret, 0);
+ }
+
+ if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
+ gen_helper_stackprot(cpu_env, ret);
+ }
+ return ret;
+}
+
+static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
+{
+ TCGv ret = tcg_temp_new();
+
+ /* If any of the regs is r0, set t to the value of the other reg. */
+ if (ra) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
+ tcg_gen_extu_i32_tl(ret, tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ tcg_gen_movi_tl(ret, (uint32_t)imm);
+ }
+
+ if (ra == 1 && dc->cfg->stackprot) {
+ gen_helper_stackprot(cpu_env, ret);
+ }
+ return ret;
+}
+
+#ifndef CONFIG_USER_ONLY
+static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
+{
+ int addr_size = dc->cfg->addr_size;
+ TCGv ret = tcg_temp_new();
+
+ if (addr_size == 32 || ra == 0) {
+ if (rb) {
+ tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ } else {
+ tcg_gen_movi_tl(ret, 0);
+ }
+ } else {
+ if (rb) {
+ tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
+ } else {
+ tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+ tcg_gen_shli_tl(ret, ret, 32);
+ }
+ if (addr_size < 64) {
+ /* Mask off out of range bits. */
+ tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
+ }
+ }
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+static void record_unaligned_ess(DisasContext *dc, int rd,
+ MemOp size, bool store)
+{
+ uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
+
+ iflags |= ESR_ESS_FLAG;
+ iflags |= rd << 5;
+ iflags |= store * ESR_S;
+ iflags |= (size == MO_32) * ESR_W;
+
+ tcg_set_insn_start_param(dc->insn_start, 1, iflags);
+}
+#endif
+
+static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+ int mem_index, bool rev)
+{
+ MemOp size = mop & MO_SIZE;
+
+ /*
+ * When doing reverse accesses we need to do two things.
+ *
+ * 1. Reverse the address wrt endianness.
+ * 2. Byteswap the data lanes on the way back into the CPU core.
+ */
+ if (rev) {
+ if (size > MO_8) {
+ mop ^= MO_BSWAP;
+ }
+ if (size < MO_32) {
+ tcg_gen_xori_tl(addr, addr, 3 - size);
+ }
+ }
+
+ /*
+ * For system mode, enforce alignment if the cpu configuration
+ * requires it. For user-mode, the Linux kernel will have fixed up
+ * any unaligned access, so emulate that by *not* setting MO_ALIGN.
+ */
+#ifndef CONFIG_USER_ONLY
+ if (size > MO_8 &&
+ (dc->tb_flags & MSR_EE) &&
+ dc->cfg->unaligned_exceptions) {
+ record_unaligned_ess(dc, rd, size, false);
+ mop |= MO_ALIGN;
+ }
+#endif
+
+ tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
+
+ tcg_temp_free(addr);
+ return true;
+}
+
+static bool trans_lbu(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_lbur(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
+}
+
+static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+#endif
+}
+
+static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
+{
+ TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_lhu(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_lhur(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
+}
+
+static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
+#endif
+}
+
+static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
+{
+ TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_lw(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_lwr(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
+}
+
+static bool trans_lwea(DisasContext *dc, arg_typea *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
+#endif
+}
+
+static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
+{
+ TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_lwx(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+
+ /* lwx does not throw unaligned access errors, so force alignment */
+ tcg_gen_andi_tl(addr, addr, ~3);
+
+ tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
+ tcg_gen_mov_tl(cpu_res_addr, addr);
+ tcg_temp_free(addr);
+
+ if (arg->rd) {
+ tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
+ }
+
+ /* No support for AXI exclusive so always clear C */
+ tcg_gen_movi_i32(cpu_msr_c, 0);
+ return true;
+}
+
+static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
+ int mem_index, bool rev)
+{
+ MemOp size = mop & MO_SIZE;
+
+ /*
+ * When doing reverse accesses we need to do two things.
+ *
+ * 1. Reverse the address wrt endianness.
+ * 2. Byteswap the data lanes on the way back into the CPU core.
+ */
+ if (rev) {
+ if (size > MO_8) {
+ mop ^= MO_BSWAP;
+ }
+ if (size < MO_32) {
+ tcg_gen_xori_tl(addr, addr, 3 - size);
+ }
+ }
+
+ /*
+ * For system mode, enforce alignment if the cpu configuration
+ * requires it. For user-mode, the Linux kernel will have fixed up
+ * any unaligned access, so emulate that by *not* setting MO_ALIGN.
+ */
+#ifndef CONFIG_USER_ONLY
+ if (size > MO_8 &&
+ (dc->tb_flags & MSR_EE) &&
+ dc->cfg->unaligned_exceptions) {
+ record_unaligned_ess(dc, rd, size, true);
+ mop |= MO_ALIGN;
+ }
+#endif
+
+ tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
+
+ tcg_temp_free(addr);
+ return true;
+}
+
+static bool trans_sb(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_sbr(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
+}
+
+static bool trans_sbea(DisasContext *dc, arg_typea *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+#endif
+}
+
+static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
+{
+ TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
+}
+
+static bool trans_sh(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_shr(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
+}
+
+static bool trans_shea(DisasContext *dc, arg_typea *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
+#endif
+}
+
+static bool trans_shi(DisasContext *dc, arg_typeb *arg)
+{
+ TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
+}
+
+static bool trans_sw(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_swr(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
+}
+
+static bool trans_swea(DisasContext *dc, arg_typea *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
+#endif
+}
+
+static bool trans_swi(DisasContext *dc, arg_typeb *arg)
+{
+ TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
+ return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
+}
+
+static bool trans_swx(DisasContext *dc, arg_typea *arg)
+{
+ TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
+ TCGLabel *swx_done = gen_new_label();
+ TCGLabel *swx_fail = gen_new_label();
+ TCGv_i32 tval;
+
+ /* swx does not throw unaligned access errors, so force alignment */
+ tcg_gen_andi_tl(addr, addr, ~3);
+
+ /*
+ * Compare the address vs the one we used during lwx.
+ * On mismatch, the operation fails. On match, addr dies at the
+ * branch, but we know we can use the equal version in the global.
+ * In either case, addr is no longer needed.
+ */
+ tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
+ tcg_temp_free(addr);
+
+ /*
+ * Compare the value loaded during lwx with current contents of
+ * the reserved location.
+ */
+ tval = tcg_temp_new_i32();
+
+ tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
+ reg_for_write(dc, arg->rd),
+ dc->mem_index, MO_TEUL);
+
+ tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
+ tcg_temp_free_i32(tval);
+
+ /* Success */
+ tcg_gen_movi_i32(cpu_msr_c, 0);
+ tcg_gen_br(swx_done);
+
+ /* Failure */
+ gen_set_label(swx_fail);
+ tcg_gen_movi_i32(cpu_msr_c, 1);
+
+ gen_set_label(swx_done);
+
+ /*
+ * Prevent the saved address from working again without another ldx.
+ * Akin to the pseudocode setting reservation = 0.
+ */
+ tcg_gen_movi_tl(cpu_res_addr, -1);
+ return true;
+}
+
+static void setup_dslot(DisasContext *dc, bool type_b)
+{
+ dc->tb_flags_to_set |= D_FLAG;
+ if (type_b && (dc->tb_flags & IMM_FLAG)) {
+ dc->tb_flags_to_set |= BIMM_FLAG;
+ }
+}
+
+static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
+ bool delay, bool abs, int link)
+{
+ uint32_t add_pc;
+
+ if (invalid_delay_slot(dc, "branch")) {
+ return true;
+ }
+ if (delay) {
+ setup_dslot(dc, dest_rb < 0);
+ }
+
+ if (link) {
+ tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
+ }
+
+ /* Store the branch taken destination into btarget. */
+ add_pc = abs ? 0 : dc->base.pc_next;
+ if (dest_rb > 0) {
+ dc->jmp_dest = -1;
+ tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
+ } else {
+ dc->jmp_dest = add_pc + dest_imm;
+ tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
+ }
+ dc->jmp_cond = TCG_COND_ALWAYS;
+ return true;
+}
+
+#define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
+ static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
+ { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
+ static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
+ { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
+
+DO_BR(br, bri, false, false, false)
+DO_BR(bra, brai, false, true, false)
+DO_BR(brd, brid, true, false, false)
+DO_BR(brad, braid, true, true, false)
+DO_BR(brld, brlid, true, false, true)
+DO_BR(brald, bralid, true, true, true)
+
+static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
+ TCGCond cond, int ra, bool delay)
+{
+ TCGv_i32 zero, next;
+
+ if (invalid_delay_slot(dc, "bcc")) {
+ return true;
+ }
+ if (delay) {
+ setup_dslot(dc, dest_rb < 0);
+ }
+
+ dc->jmp_cond = cond;
+
+ /* Cache the condition register in cpu_bvalue across any delay slot. */
+ tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
+
+ /* Store the branch taken destination into btarget. */
+ if (dest_rb > 0) {
+ dc->jmp_dest = -1;
+ tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
+ } else {
+ dc->jmp_dest = dc->base.pc_next + dest_imm;
+ tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
+ }
+
+ /* Compute the final destination into btarget. */
+ zero = tcg_const_i32(0);
+ next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
+ tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
+ reg_for_read(dc, ra), zero,
+ cpu_btarget, next);
+ tcg_temp_free_i32(zero);
+ tcg_temp_free_i32(next);
+
+ return true;
+}
+
+#define DO_BCC(NAME, COND) \
+ static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
+ { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
+ static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
+ { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
+ static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
+ { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
+ static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
+ { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
+
+DO_BCC(beq, TCG_COND_EQ)
+DO_BCC(bge, TCG_COND_GE)
+DO_BCC(bgt, TCG_COND_GT)
+DO_BCC(ble, TCG_COND_LE)
+DO_BCC(blt, TCG_COND_LT)
+DO_BCC(bne, TCG_COND_NE)
+
+static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+ if (invalid_delay_slot(dc, "brk")) {
+ return true;
+ }
+
+ tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
+ if (arg->rd) {
+ tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
+ }
+ tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
+ tcg_gen_movi_tl(cpu_res_addr, -1);
+
+ dc->base.is_jmp = DISAS_EXIT;
+ return true;
+}
+
+static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
+{
+ uint32_t imm = arg->imm;
+
+ if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
+ return true;
+ }
+ if (invalid_delay_slot(dc, "brki")) {
+ return true;
+ }
+
+ tcg_gen_movi_i32(cpu_pc, imm);
+ if (arg->rd) {
+ tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
+ }
+ tcg_gen_movi_tl(cpu_res_addr, -1);
+
+#ifdef CONFIG_USER_ONLY
+ switch (imm) {
+ case 0x8: /* syscall trap */
+ gen_raise_exception_sync(dc, EXCP_SYSCALL);
+ break;
+ case 0x18: /* debug trap */
+ gen_raise_exception_sync(dc, EXCP_DEBUG);
+ break;
+ default: /* eliminated with trap_userspace check */
+ g_assert_not_reached();
+ }
+#else
+ uint32_t msr_to_set = 0;
+
+ if (imm != 0x18) {
+ msr_to_set |= MSR_BIP;
+ }
+ if (imm == 0x8 || imm == 0x18) {
+ /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */
+ msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
+ tcg_gen_andi_i32(cpu_msr, cpu_msr,
+ ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
+ }
+ tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
+ dc->base.is_jmp = DISAS_EXIT;
+#endif
+
+ return true;
+}
+
+static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
+{
+ int mbar_imm = arg->imm;
+
+ /* Note that mbar is a specialized branch instruction. */
+ if (invalid_delay_slot(dc, "mbar")) {
+ return true;
+ }
+
+ /* Data access memory barrier. */
+ if ((mbar_imm & 2) == 0) {
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+ }
+
+ /* Sleep. */
+ if (mbar_imm & 16) {
+ TCGv_i32 tmp_1;
+
+ if (trap_userspace(dc, true)) {
+ /* Sleep is a privileged instruction. */
+ return true;
+ }
+
+ t_sync_flags(dc);
+
+ tmp_1 = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp_1, cpu_env,
+ -offsetof(MicroBlazeCPU, env)
+ +offsetof(CPUState, halted));
+ tcg_temp_free_i32(tmp_1);
+
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
+
+ gen_raise_exception(dc, EXCP_HLT);
+ }
+
+ /*
+ * If !(mbar_imm & 1), this is an instruction access memory barrier
+ * and we need to end the TB so that we recognize self-modified
+ * code immediately.
+ *
+ * However, there are some data mbars that need the TB break
+ * (and return to main loop) to recognize interrupts right away.
+ * E.g. recognizing a change to an interrupt controller register.
+ *
+ * Therefore, choose to end the TB always.
+ */
+ dc->base.is_jmp = DISAS_EXIT_NEXT;
+ return true;
+}
+
+static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
+{
+ if (trap_userspace(dc, to_set)) {
+ return true;
+ }
+ if (invalid_delay_slot(dc, "rts")) {
+ return true;
+ }
+
+ dc->tb_flags_to_set |= to_set;
+ setup_dslot(dc, true);
+
+ dc->jmp_cond = TCG_COND_ALWAYS;
+ dc->jmp_dest = -1;
+ tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
+ return true;
+}
+
+#define DO_RTS(NAME, IFLAG) \
+ static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
+ { return do_rts(dc, arg, IFLAG); }
+
+DO_RTS(rtbd, DRTB_FLAG)
+DO_RTS(rtid, DRTI_FLAG)
+DO_RTS(rted, DRTE_FLAG)
+DO_RTS(rtsd, 0)
+
+static bool trans_zero(DisasContext *dc, arg_zero *arg)
+{
+ /* If opcode_0_illegal, trap. */
+ if (dc->cfg->opcode_0_illegal) {
+ trap_illegal(dc, true);
+ return true;
+ }
+ /*
+ * Otherwise, this is "add r0, r0, r0".
+ * Continue to trans_add so that MSR[C] gets cleared.
+ */
+ return false;
+}
+
+static void msr_read(DisasContext *dc, TCGv_i32 d)
+{
+ TCGv_i32 t;
+
+ /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
+ t = tcg_temp_new_i32();
+ tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
+ tcg_gen_or_i32(d, cpu_msr, t);
+ tcg_temp_free_i32(t);
+}
+
+static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
+{
+ uint32_t imm = arg->imm;
+
+ if (trap_userspace(dc, imm != MSR_C)) {
+ return true;
+ }
+
+ if (arg->rd) {
+ msr_read(dc, cpu_R[arg->rd]);
+ }
+
+ /*
+ * Handle the carry bit separately.
+ * This is the only bit that userspace can modify.
+ */
+ if (imm & MSR_C) {
+ tcg_gen_movi_i32(cpu_msr_c, set);
+ }
+
+ /*
+ * MSR_C and MSR_CC set above.
+ * MSR_PVR is not writable, and is always clear.
+ */
+ imm &= ~(MSR_C | MSR_CC | MSR_PVR);
+
+ if (imm != 0) {
+ if (set) {
+ tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
+ } else {
+ tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
+ }
+ dc->base.is_jmp = DISAS_EXIT_NEXT;
+ }
+ return true;
+}
+
+static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
+{
+ return do_msrclrset(dc, arg, false);
+}
+
+static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
+{
+ return do_msrclrset(dc, arg, true);
+}
+
+static bool trans_mts(DisasContext *dc, arg_mts *arg)
+{
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ if (arg->e && arg->rs != 0x1003) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid extended mts reg 0x%x\n", arg->rs);
+ return true;
+ }
+
+ TCGv_i32 src = reg_for_read(dc, arg->ra);
+ switch (arg->rs) {
+ case SR_MSR:
+ /* Install MSR_C. */
+ tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
+ /*
+ * Clear MSR_C and MSR_CC;
+ * MSR_PVR is not writable, and is always clear.
+ */
+ tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
+ break;
+ case SR_FSR:
+ tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
+ break;
+ case 0x800:
+ tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
+ break;
+ case 0x802:
+ tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
+ break;
+
+ case 0x1000: /* PID */
+ case 0x1001: /* ZPR */
+ case 0x1002: /* TLBX */
+ case 0x1003: /* TLBLO */
+ case 0x1004: /* TLBHI */
+ case 0x1005: /* TLBSX */
+ {
+ TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
+ TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
+
+ gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
+ tcg_temp_free_i32(tmp_reg);
+ tcg_temp_free_i32(tmp_ext);
+ }
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
+ return true;
+ }
+ dc->base.is_jmp = DISAS_EXIT_NEXT;
+ return true;
+#endif
+}
+
+static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
+{
+ TCGv_i32 dest = reg_for_write(dc, arg->rd);
+
+ if (arg->e) {
+ switch (arg->rs) {
+ case SR_EAR:
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
+ tcg_gen_extrh_i64_i32(dest, t64);
+ tcg_temp_free_i64(t64);
+ }
+ return true;
+#ifndef CONFIG_USER_ONLY
+ case 0x1003: /* TLBLO */
+ /* Handled below. */
+ break;
+#endif
+ case 0x2006 ... 0x2009:
+ /* High bits of PVR6-9 not implemented. */
+ tcg_gen_movi_i32(dest, 0);
+ return true;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid extended mfs reg 0x%x\n", arg->rs);
+ return true;
+ }
+ }
+
+ switch (arg->rs) {
+ case SR_PC:
+ tcg_gen_movi_i32(dest, dc->base.pc_next);
+ break;
+ case SR_MSR:
+ msr_read(dc, dest);
+ break;
+ case SR_EAR:
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
+ tcg_gen_extrl_i64_i32(dest, t64);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+ case SR_ESR:
+ tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
+ break;
+ case SR_FSR:
+ tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
+ break;
+ case SR_BTR:
+ tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
+ break;
+ case SR_EDR:
+ tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
+ break;
+ case 0x800:
+ tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
+ break;
+ case 0x802:
+ tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
+ break;
+
+#ifndef CONFIG_USER_ONLY
+ case 0x1000: /* PID */
+ case 0x1001: /* ZPR */
+ case 0x1002: /* TLBX */
+ case 0x1003: /* TLBLO */
+ case 0x1004: /* TLBHI */
+ case 0x1005: /* TLBSX */
+ {
+ TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
+ TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
+
+ gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
+ tcg_temp_free_i32(tmp_reg);
+ tcg_temp_free_i32(tmp_ext);
+ }
+ break;
+#endif
+
+ case 0x2000 ... 0x200c:
+ tcg_gen_ld_i32(dest, cpu_env,
+ offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
+ - offsetof(MicroBlazeCPU, env));
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
+ break;
+ }
+ return true;
+}
+
+static void do_rti(DisasContext *dc)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_shri_i32(tmp, cpu_msr, 1);
+ tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
+ tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
+ tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
+
+ tcg_temp_free_i32(tmp);
+}
+
+static void do_rtb(DisasContext *dc)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_shri_i32(tmp, cpu_msr, 1);
+ tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
+ tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
+ tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
+
+ tcg_temp_free_i32(tmp);
+}
+
+static void do_rte(DisasContext *dc)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_shri_i32(tmp, cpu_msr, 1);
+ tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
+ tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
+ tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
+ tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
+
+ tcg_temp_free_i32(tmp);
+}
+
+/* Insns connected to FSL or AXI stream attached devices. */
+static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
+{
+ TCGv_i32 t_id, t_ctrl;
+
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+
+ t_id = tcg_temp_new_i32();
+ if (rb) {
+ tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
+ } else {
+ tcg_gen_movi_i32(t_id, imm);
+ }
+
+ t_ctrl = tcg_const_i32(ctrl);
+ gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
+ tcg_temp_free_i32(t_id);
+ tcg_temp_free_i32(t_ctrl);
+ return true;
+}
+
+static bool trans_get(DisasContext *dc, arg_get *arg)
+{
+ return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
+}
+
+static bool trans_getd(DisasContext *dc, arg_getd *arg)
+{
+ return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
+}
+
+static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
+{
+ TCGv_i32 t_id, t_ctrl;
+
+ if (trap_userspace(dc, true)) {
+ return true;
+ }
+
+ t_id = tcg_temp_new_i32();
+ if (rb) {
+ tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
+ } else {
+ tcg_gen_movi_i32(t_id, imm);
+ }
+
+ t_ctrl = tcg_const_i32(ctrl);
+ gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
+ tcg_temp_free_i32(t_id);
+ tcg_temp_free_i32(t_ctrl);
+ return true;
+}
+
+static bool trans_put(DisasContext *dc, arg_put *arg)
+{
+ return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
+}
+
+static bool trans_putd(DisasContext *dc, arg_putd *arg)
+{
+ return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
+}
+
+static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcb, DisasContext, base);
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ int bound;
+
+ dc->cfg = &cpu->cfg;
+ dc->tb_flags = dc->base.tb->flags;
+ dc->ext_imm = dc->base.tb->cs_base;
+ dc->r0 = NULL;
+ dc->r0_set = false;
+ dc->mem_index = cpu_mmu_index(&cpu->env, false);
+ dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
+ dc->jmp_dest = -1;
+
+ bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
+ dc->base.max_insns = MIN(dc->base.max_insns, bound);
+}
+
+static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
+{
+}
+
+static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcb, DisasContext, base);
+
+ tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
+ dc->insn_start = tcg_last_op();
+}
+
+static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcb, DisasContext, base);
+ CPUMBState *env = cs->env_ptr;
+ uint32_t ir;
+
+ /* TODO: This should raise an exception, not terminate qemu. */
+ if (dc->base.pc_next & 3) {
+ cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
+ (uint32_t)dc->base.pc_next);
+ }
+
+ dc->tb_flags_to_set = 0;
+
+ ir = cpu_ldl_code(env, dc->base.pc_next);
+ if (!decode(dc, ir)) {
+ trap_illegal(dc, true);
+ }
+
+ if (dc->r0) {
+ tcg_temp_free_i32(dc->r0);
+ dc->r0 = NULL;
+ dc->r0_set = false;
+ }
+
+ /* Discard the imm global when its contents cannot be used. */
+ if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
+ tcg_gen_discard_i32(cpu_imm);
+ }
+
+ dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
+ dc->tb_flags |= dc->tb_flags_to_set;
+ dc->base.pc_next += 4;
+
+ if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
+ /*
+ * Finish any return-from branch.
+ */
+ uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
+ if (unlikely(rt_ibe != 0)) {
+ dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
+ if (rt_ibe & DRTI_FLAG) {
+ do_rti(dc);
+ } else if (rt_ibe & DRTB_FLAG) {
+ do_rtb(dc);
+ } else {
+ do_rte(dc);
+ }
+ }
+
+ /* Complete the branch, ending the TB. */
+ switch (dc->base.is_jmp) {
+ case DISAS_NORETURN:
+ /*
+ * E.g. illegal insn in a delay slot. We've already exited
+ * and will handle D_FLAG in mb_cpu_do_interrupt.
+ */
+ break;
+ case DISAS_NEXT:
+ /*
+ * Normal insn a delay slot.
+ * However, the return-from-exception type insns should
+ * return to the main loop, as they have adjusted MSR.
+ */
+ dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
+ break;
+ case DISAS_EXIT_NEXT:
+ /*
+ * E.g. mts insn in a delay slot. Continue with btarget,
+ * but still return to the main loop.
+ */
+ dc->base.is_jmp = DISAS_EXIT_JUMP;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcb, DisasContext, base);
+
+ if (dc->base.is_jmp == DISAS_NORETURN) {
+ /* We have already exited the TB. */
+ return;
+ }
+
+ t_sync_flags(dc);
+
+ switch (dc->base.is_jmp) {
+ case DISAS_TOO_MANY:
+ gen_goto_tb(dc, 0, dc->base.pc_next);
+ return;
+
+ case DISAS_EXIT:
+ break;
+ case DISAS_EXIT_NEXT:
+ tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
+ break;
+ case DISAS_EXIT_JUMP:
+ tcg_gen_mov_i32(cpu_pc, cpu_btarget);
+ tcg_gen_discard_i32(cpu_btarget);
+ break;
+
+ case DISAS_JUMP:
+ if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
+ /* Direct jump. */
+ tcg_gen_discard_i32(cpu_btarget);
+
+ if (dc->jmp_cond != TCG_COND_ALWAYS) {
+ /* Conditional direct jump. */
+ TCGLabel *taken = gen_new_label();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ /*
+ * Copy bvalue to a temp now, so we can discard bvalue.
+ * This can avoid writing bvalue to memory when the
+ * delay slot cannot raise an exception.
+ */
+ tcg_gen_mov_i32(tmp, cpu_bvalue);
+ tcg_gen_discard_i32(cpu_bvalue);
+
+ tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
+ gen_goto_tb(dc, 1, dc->base.pc_next);
+ gen_set_label(taken);
+ }
+ gen_goto_tb(dc, 0, dc->jmp_dest);
+ return;
+ }
+
+ /* Indirect jump (or direct jump w/ goto_tb disabled) */
+ tcg_gen_mov_i32(cpu_pc, cpu_btarget);
+ tcg_gen_discard_i32(cpu_btarget);
+ tcg_gen_lookup_and_goto_ptr();
+ return;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Finish DISAS_EXIT_* */
+ if (unlikely(cs->singlestep_enabled)) {
+ gen_raise_exception(dc, EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+}
+
+static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
+ log_target_disas(cs, dcb->pc_first, dcb->tb->size);
+}
+
+static const TranslatorOps mb_tr_ops = {
+ .init_disas_context = mb_tr_init_disas_context,
+ .tb_start = mb_tr_tb_start,
+ .insn_start = mb_tr_insn_start,
+ .translate_insn = mb_tr_translate_insn,
+ .tb_stop = mb_tr_tb_stop,
+ .disas_log = mb_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+ translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
+}
+
+void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ uint32_t iflags;
+ int i;
+
+ qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
+ env->pc, env->msr,
+ (env->msr & MSR_UM) ? "user" : "kernel",
+ (env->msr & MSR_UMS) ? "user" : "kernel",
+ (bool)(env->msr & MSR_EIP),
+ (bool)(env->msr & MSR_IE));
+
+ iflags = env->iflags;
+ qemu_fprintf(f, "iflags: 0x%08x", iflags);
+ if (iflags & IMM_FLAG) {
+ qemu_fprintf(f, " IMM(0x%08x)", env->imm);
+ }
+ if (iflags & BIMM_FLAG) {
+ qemu_fprintf(f, " BIMM");
+ }
+ if (iflags & D_FLAG) {
+ qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
+ }
+ if (iflags & DRTI_FLAG) {
+ qemu_fprintf(f, " DRTI");
+ }
+ if (iflags & DRTE_FLAG) {
+ qemu_fprintf(f, " DRTE");
+ }
+ if (iflags & DRTB_FLAG) {
+ qemu_fprintf(f, " DRTB");
+ }
+ if (iflags & ESR_ESS_FLAG) {
+ qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
+ }
+
+ qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
+ "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
+ env->esr, env->fsr, env->btr, env->edr,
+ env->ear, env->slr, env->shr);
+
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, "r%2.2d=%08x%c",
+ i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
+ }
+ qemu_fprintf(f, "\n");
+}
+
+void mb_tcg_init(void)
+{
+#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
+#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
+
+ static const struct {
+ TCGv_i32 *var; int ofs; char name[8];
+ } i32s[] = {
+ /*
+ * Note that r0 is handled specially in reg_for_read
+ * and reg_for_write. Nothing should touch cpu_R[0].
+ * Leave that element NULL, which will assert quickly
+ * inside the tcg generator functions.
+ */
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
+ R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
+ R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
+
+ SP(pc),
+ SP(msr),
+ SP(msr_c),
+ SP(imm),
+ SP(iflags),
+ SP(bvalue),
+ SP(btarget),
+ SP(res_val),
+ };
+
+#undef R
+#undef SP
+
+ for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
+ *i32s[i].var =
+ tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
+ }
+
+ cpu_res_addr =
+ tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
+}
+
+void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+ env->iflags = data[1];
+}
diff --git a/target/mips/Kconfig b/target/mips/Kconfig
new file mode 100644
index 000000000..6adf14535
--- /dev/null
+++ b/target/mips/Kconfig
@@ -0,0 +1,6 @@
+config MIPS
+ bool
+
+config MIPS64
+ bool
+ select MIPS
diff --git a/target/mips/TODO b/target/mips/TODO
new file mode 100644
index 000000000..1d782d802
--- /dev/null
+++ b/target/mips/TODO
@@ -0,0 +1,51 @@
+Unsolved issues/bugs in the mips/mipsel backend
+-----------------------------------------------
+
+General
+-------
+- Unimplemented ASEs:
+ - MDMX
+ - SmartMIPS
+ - microMIPS DSP r1 & r2 encodings
+- MT ASE only partially implemented and not functional
+- Shadow register support only partially implemented,
+ lacks set switching on interrupt/exception.
+- 34K ITC not implemented.
+- A general lack of documentation, especially for technical internals.
+ Existing documentation is x86-centric.
+- Reverse endianness bit not implemented
+- The TLB emulation is very inefficient:
+ QEMU's softmmu implements a x86-style MMU, with separate entries
+ for read/write/execute, a TLB index which is just a modulo of the
+ virtual address, and a set of TLBs for each user/kernel/supervisor
+ MMU mode.
+ MIPS has a single entry for read/write/execute and only one MMU mode.
+ But it is fully associative with randomized entry indices, and uses
+ up to 256 ASID tags as additional matching criterion (which roughly
+ equates to 256 MMU modes). It also has a global flag which causes
+ entries to match regardless of ASID.
+ To cope with these differences, QEMU currently flushes the TLB at
+ each ASID change. Using the MMU modes to implement ASIDs hinges on
+ implementing the global bit efficiently.
+- save/restore of the CPU state is not implemented (see machine.c).
+
+MIPS64
+------
+- Userland emulation (both n32 and n64) not functional.
+
+"Generic" 4Kc system emulation
+------------------------------
+- Doesn't correspond to any real hardware. Should be removed some day,
+ U-Boot is the last remaining user.
+
+PICA 61 system emulation
+------------------------
+- No framebuffer support yet.
+
+MALTA system emulation
+----------------------
+- We fake firmware support instead of doing the real thing
+- Real firmware (YAMON) falls over when trying to init RAM, presumably
+ due to lacking system controller emulation.
+- Bonito system controller not implemented
+- MSC1 system controller not implemented
diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc
new file mode 100644
index 000000000..582f94007
--- /dev/null
+++ b/target/mips/cpu-defs.c.inc
@@ -0,0 +1,977 @@
+/*
+ * MIPS emulation for qemu: CPU initialisation routines.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2007 Herve Poussineau
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* CPU / CPU family specific config register values. */
+
+/* Have config1, uncached coherency */
+#define MIPS_CONFIG0 \
+ ((1U << CP0C0_M) | (0x2 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+ no performance counters, watch registers present,
+ no code compression, EJTAG present, no FPU */
+#define MIPS_CONFIG1 \
+((1U << CP0C1_M) | \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
+ (1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2 \
+((1U << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+ no external interrupt controller, no vectored interrupts,
+ no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3 \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+#define MIPS_CONFIG4 \
+((0 << CP0C4_M))
+
+#define MIPS_CONFIG5 \
+((0 << CP0C5_M))
+
+/*****************************************************************************/
+/* MIPS CPU definitions */
+const mips_def_t mips_defs[] =
+{
+ {
+ .name = "4Kc",
+ .CP0_PRid = 0x00018000,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R1,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "4Km",
+ .CP0_PRid = 0x00018300,
+ /* Config1 implemented, fixed mapping MMU,
+ no virtual icache, uncached coherency. */
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R1 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "4KEcR1",
+ .CP0_PRid = 0x00018400,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R1,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "4KEmR1",
+ .CP0_PRid = 0x00018500,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R1 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "4KEc",
+ .CP0_PRid = 0x00019000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "4KEm",
+ .CP0_PRid = 0x00019100,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "24Kc",
+ .CP0_PRid = 0x00019300,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ /* No DSP implemented. */
+ .CP0_Status_rw_bitmask = 0x1278FF1F,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "24KEc",
+ .CP0_PRid = 0x00019600,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSPP) | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ /* we have a DSP, but no FPU */
+ .CP0_Status_rw_bitmask = 0x1378FF1F,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "24Kf",
+ .CP0_PRid = 0x00019300,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ /* No DSP implemented. */
+ .CP0_Status_rw_bitmask = 0x3678FF1F,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "34Kf",
+ .CP0_PRid = 0x00019500,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_VInt) | (1 << CP0C3_MT) |
+ (1 << CP0C3_DSPP),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3778FF1F,
+ .CP0_TCStatus_rw_bitmask = (0 << CP0TCSt_TCU3) | (0 << CP0TCSt_TCU2) |
+ (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) |
+ (0 << CP0TCSt_TMX) | (1 << CP0TCSt_DT) |
+ (1 << CP0TCSt_DA) | (1 << CP0TCSt_A) |
+ (0x3 << CP0TCSt_TKSU) | (1 << CP0TCSt_IXMT) |
+ (0xff << CP0TCSt_TASID),
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .CP0_SRSCtl = (0xf << CP0SRSCtl_HSS),
+ .CP0_SRSConf0_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) |
+ (0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1),
+ .CP0_SRSConf1_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf1 = (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) |
+ (0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4),
+ .CP0_SRSConf2_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf2 = (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) |
+ (0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7),
+ .CP0_SRSConf3_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf3 = (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) |
+ (0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10),
+ .CP0_SRSConf4_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf4 = (0x3fe << CP0SRSC4_SRS15) |
+ (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13),
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "74Kf",
+ .CP0_PRid = 0x00019700,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) |
+ (1 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3778FF1F,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSP_R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "M14K",
+ .CP0_PRid = 0x00019b00,
+ /* Config1 implemented, fixed mapping MMU,
+ no virtual icache, uncached coherency. */
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_KU) | (0x2 << CP0C0_K23) |
+ (0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1,
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "M14Kc",
+ /* This is the TLB-based MMU core. */
+ .CP0_PRid = 0x00019c00,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* FIXME:
+ * Config3: VZ, CTXTC, CDMM, TL
+ * Config4: MMUExtDef
+ * Config5: MRP
+ * */
+ .name = "P5600",
+ .CP0_PRid = 0x0001A800,
+ .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (0x3F << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_FP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) |
+ (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_SC) |
+ (1 << CP0C3_PW) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) |
+ (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) |
+ (0x1c << CP0C4_KScrExist),
+ .CP0_Config4_rw_bitmask = 0,
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_EVA) | (1 << CP0C5_MVH) |
+ (1 << CP0C5_LLB) | (1 << CP0C5_MRP),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) |
+ (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_UFR),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3C68FF1F,
+ .CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) |
+ (1 << CP0PG_ELPA) | (1 << CP0PG_IEC),
+ .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_UFRP) | (1 << FCR0_HAS2008) |
+ (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 40,
+ .insn_flags = CPU_MIPS32R5,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* A generic CPU supporting MIPS32 Release 6 ISA.
+ FIXME: Support IEEE 754-2008 FP.
+ Eventually this should be replaced by a real CPU model. */
+ .name = "mips32r6-generic",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) |
+ (2 << CP0C3_ISA) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1U << CP0C3_M),
+ .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) |
+ (3 << CP0C4_IE) | (1U << CP0C4_M),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) |
+ (1 << CP0C5_UFE),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3058FF1F,
+ .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
+ (1U << CP0PG_RIE),
+ .CP0_PageGrain_rw_bitmask = 0,
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "I7200",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (0x2 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = (1U << CP0C1_M) | (15 << CP0C1_MMU) | (2 << CP0C1_IS) |
+ (4 << CP0C1_IL) | (3 << CP0C1_IA) | (2 << CP0C1_DS) |
+ (4 << CP0C1_DL) | (3 << CP0C1_DA) | (1 << CP0C1_PC) |
+ (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_CMGCR) |
+ (1 << CP0C3_BI) | (1 << CP0C3_SC) | (3 << CP0C3_MMAR) |
+ (1 << CP0C3_ISA_ON_EXC) | (1 << CP0C3_ISA) |
+ (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) |
+ (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) |
+ (1 << CP0C3_CTXTC) | (1 << CP0C3_VInt) |
+ (1 << CP0C3_CDMM) | (1 << CP0C3_MT) | (1 << CP0C3_TL),
+ .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) |
+ (2 << CP0C4_IE) | (1U << CP0C4_M),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_MVH) | (1 << CP0C5_LLB),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) |
+ (1 << CP0C5_UFE),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3158FF1F,
+ .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
+ (1U << CP0PG_RIE),
+ .CP0_PageGrain_rw_bitmask = 0,
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x02 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R6 | ISA_NANOMIPS32 |
+ ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | ASE_MT,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+#if defined(TARGET_MIPS64)
+ {
+ .name = "R4000",
+ .CP0_PRid = 0x00000400,
+ /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */
+ .CP0_Config0 = (2 << CP0C0_Impl) | (1 << CP0C0_IC) | (1 << CP0C0_DC) |
+ (2 << CP0C0_K0),
+ /* Note: Config1 is only used internally, the R4000 has only Config0. */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .CP0_LLAddr_rw_bitmask = 0xFFFFFFFF,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3678FFFF,
+ /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0x0183FFFF,
+ .SEGBITS = 40,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS3,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "VR5432",
+ .CP0_PRid = 0x00005400,
+ /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */
+ .CP0_Config0 = (2 << CP0C0_Impl) | (1 << CP0C0_IC) | (1 << CP0C0_DC) |
+ (2 << CP0C0_K0),
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .CP0_LLAddr_rw_bitmask = 0xFFFFFFFFL,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3678FFFF,
+ /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (0x54 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS4 | INSN_VR54XX,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5Kc",
+ .CP0_PRid = 0x00018100,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x12F8FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R1,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5Kf",
+ .CP0_PRid = 0x00018100,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x36F8FFFF,
+ /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (1 << FCR0_D) | (1 << FCR0_S) |
+ (0x81 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R1,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "20Kc",
+ /* We emulate a later version of the 20Kc, earlier ones had a broken
+ WAIT instruction. */
+ .CP0_PRid = 0x000182a0,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT) | (1 << CP0C0_VI),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (47 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 1,
+ .CP0_Status_rw_bitmask = 0x36FBFFFF,
+ /* The 20Kc has F64 / L / W but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (1 << FCR0_3D) | (1 << FCR0_PS) |
+ (1 << FCR0_D) | (1 << FCR0_S) |
+ (0x82 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R1 | ASE_MIPS3D,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* A generic CPU providing MIPS64 Release 2 features.
+ FIXME: Eventually this should be replaced by a real CPU model. */
+ .name = "MIPS64R2-generic",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x36FBFFFF,
+ .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5KEc",
+ .CP0_PRid = 0x00018900,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x12F8FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5KEf",
+ .CP0_PRid = 0x00018900,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x36F8FFFF,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) |
+ (0x89 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "I6400",
+ .CP0_PRid = 0x1A900,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) |
+ (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) |
+ (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) |
+ (1 << CP0C5_LLB) | (1 << CP0C5_MRP),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_UFE),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x30D8FFFF,
+ .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
+ (1U << CP0PG_RIE),
+ .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA),
+ .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
+ .MSAIR = 0x03 << MSAIR_ProcID,
+ .SEGBITS = 48,
+ .PABITS = 48,
+ .insn_flags = CPU_MIPS64R6,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "I6500",
+ .CP0_PRid = 0x1B000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) |
+ (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) |
+ (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) |
+ (1 << CP0C5_LLB) | (1 << CP0C5_MRP),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_UFE),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 64,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x30D8FFFF,
+ .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
+ (1U << CP0PG_RIE),
+ .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA),
+ .CP0_EBaseWG_rw_bitmask = (1 << CP0EBase_WG),
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
+ .MSAIR = 0x03 << MSAIR_ProcID,
+ .SEGBITS = 48,
+ .PABITS = 48,
+ .insn_flags = CPU_MIPS64R6,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-2E",
+ .CP0_PRid = 0x6302,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = (3 << CP0C0_Impl) | (4 << CP0C0_IC) | (4 << CP0C0_DC) |
+ (1 << CP0C0_IB) | (1 << CP0C0_DB) | (0x2 << CP0C0_K0),
+ /* Note: Config1 is only used internally,
+ Loongson-2E has only Config0. */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x35D0FFFF,
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 40,
+ .insn_flags = CPU_MIPS3 | INSN_LOONGSON2E,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-2F",
+ .CP0_PRid = 0x6303,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = (3 << CP0C0_Impl) | (4 << CP0C0_IC) | (4 << CP0C0_DC) |
+ (1 << CP0C0_IB) | (1 << CP0C0_DB) | (0x2 << CP0C0_K0),
+ /* Note: Config1 is only used internally,
+ Loongson-2F has only Config0. */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0xF5D0FF1F, /* Bits 7:5 not writable. */
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 40,
+ .insn_flags = CPU_MIPS3 | INSN_LOONGSON2F | ASE_LMMI,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-3A1000", /* Loongson-3A R1, GS464-based */
+ .CP0_PRid = 0x6305,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (3 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (3 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2 | (7 << CP0C2_SS) | (4 << CP0C2_SL) |
+ (3 << CP0C2_SA),
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x74D8FFFF,
+ .CP0_PageGrain = (1 << CP0PG_ELPA),
+ .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA),
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV) | (0x1 << FCR0_F64) |
+ (0x1 << FCR0_PS) | (0x1 << FCR0_L) | (0x1 << FCR0_W) |
+ (0x1 << FCR0_D) | (0x1 << FCR0_S),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 48,
+ .PABITS = 48,
+ .insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A |
+ ASE_LMMI | ASE_LEXT,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-3A4000", /* Loongson-3A R4, GS464V-based */
+ .CP0_PRid = 0x14C000,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2 | (5 << CP0C2_SS) | (5 << CP0C2_SL) |
+ (15 << CP0C2_SA),
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) |
+ (1 << CP0C4_AE) | (0x1c << CP0C4_KScrExist),
+ .CP0_Config4_rw_bitmask = 0,
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_CRCP) | (1 << CP0C5_NFExists),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) |
+ (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_SBRI),
+ .CP0_Config6 = (1 << CP0C6_VCLRU) | (1 << CP0C6_DCLRU) |
+ (1 << CP0C6_SFBEN) | (1 << CP0C6_VLTINT) |
+ (1 << CP0C6_INSTPREF) | (1 << CP0C6_DATAPREF),
+ .CP0_Config6_rw_bitmask = (1 << CP0C6_BPPASS) | (0x3f << CP0C6_KPOS) |
+ (1 << CP0C6_KE) | (1 << CP0C6_VTLBONLY) |
+ (1 << CP0C6_LASX) | (1 << CP0C6_SSEN) |
+ (1 << CP0C6_DISDRTIME) | (1 << CP0C6_PIXNUEN) |
+ (1 << CP0C6_SCRAND) | (1 << CP0C6_LLEXCEN) |
+ (1 << CP0C6_DISVC) | (1 << CP0C6_VCLRU) |
+ (1 << CP0C6_DCLRU) | (1 << CP0C6_PIXUEN) |
+ (1 << CP0C6_DISBLKLYEN) | (1 << CP0C6_UMEMUALEN) |
+ (1 << CP0C6_SFBEN) | (1 << CP0C6_FLTINT) |
+ (1 << CP0C6_VLTINT) | (1 << CP0C6_DISBTB) |
+ (3 << CP0C6_STPREFCTL) | (1 << CP0C6_INSTPREF) |
+ (1 << CP0C6_DATAPREF),
+ .CP0_Config7 = 0,
+ .CP0_Config7_rw_bitmask = (1 << CP0C7_NAPCGEN) | (1 << CP0C7_UNIMUEN) |
+ (1 << CP0C7_VFPUCGEN),
+ .CP0_LLAddr_rw_bitmask = 1,
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x7DDBFFFF,
+ .CP0_PageGrain = (1 << CP0PG_ELPA),
+ .CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) |
+ (1 << CP0PG_ELPA) | (1 << CP0PG_IEC),
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV) | (0x1 << FCR0_F64) |
+ (0x1 << FCR0_PS) | (0x1 << FCR0_L) | (0x1 << FCR0_W) |
+ (0x1 << FCR0_D) | (0x1 << FCR0_S),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .MSAIR = (0x01 << MSAIR_ProcID) | (0x40 << MSAIR_Rev),
+ .SEGBITS = 48,
+ .PABITS = 48,
+ .insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A |
+ ASE_LMMI | ASE_LEXT,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* A generic CPU providing MIPS64 DSP R2 ASE features.
+ FIXME: Eventually this should be replaced by a real CPU model. */
+ .name = "mips64dspr2",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_DSP2P) |
+ (1 << CP0C3_DSPP) | (1 << CP0C3_LPA),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x37FBFFFF,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSP_R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+
+#endif
+};
+const int mips_defs_number = ARRAY_SIZE(mips_defs);
+
+void mips_cpu_list(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mips_defs); i++) {
+ qemu_printf("MIPS '%s'\n", mips_defs[i].name);
+ }
+}
+
+static void fpu_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ int i;
+
+ for (i = 0; i < MIPS_FPU_MAX; i++)
+ env->fpus[i].fcr0 = def->CP1_fcr0;
+
+ memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu));
+}
+
+static void mvp_init(CPUMIPSState *env)
+{
+ env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext));
+
+ if (!ase_mt_available(env)) {
+ return;
+ }
+
+ /* MVPConf1 implemented, TLB sharable, no gating storage support,
+ programmable cache partitioning implemented, number of allocatable
+ and shareable TLB entries, MVP has allocatable TCs, 2 VPEs
+ implemented, 5 TCs implemented. */
+ env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) |
+ (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) |
+// TODO: actually do 2 VPEs.
+// (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) |
+// (0x04 << CP0MVPC0_PTC);
+ (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) |
+ (0x00 << CP0MVPC0_PTC);
+#if !defined(CONFIG_USER_ONLY)
+ /* Usermode has no TLB support */
+ env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE);
+#endif
+
+ /* Allocatable CP1 have media extensions, allocatable CP1 have FP support,
+ no UDI implemented, no CP2 implemented, 1 CP1 implemented. */
+ env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) |
+ (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) |
+ (0x1 << CP0MVPC1_PCP1);
+}
diff --git a/target/mips/cpu-param.h b/target/mips/cpu-param.h
new file mode 100644
index 000000000..9c4a6ea45
--- /dev/null
+++ b/target/mips/cpu-param.h
@@ -0,0 +1,34 @@
+/*
+ * MIPS cpu parameters for qemu.
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef MIPS_CPU_PARAM_H
+#define MIPS_CPU_PARAM_H 1
+
+#ifdef TARGET_MIPS64
+# define TARGET_LONG_BITS 64
+#else
+# define TARGET_LONG_BITS 32
+#endif
+#ifdef TARGET_MIPS64
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
+#define TARGET_VIRT_ADDR_SPACE_BITS 48
+#else
+#define TARGET_PHYS_ADDR_SPACE_BITS 40
+# ifdef CONFIG_USER_ONLY
+# define TARGET_VIRT_ADDR_SPACE_BITS 31
+# else
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+#endif
+#ifdef CONFIG_USER_ONLY
+#define TARGET_PAGE_BITS 12
+#else
+#define TARGET_PAGE_BITS_VARY
+#define TARGET_PAGE_BITS_MIN 12
+#endif
+#define NB_MMU_MODES 4
+
+#endif
diff --git a/target/mips/cpu-qom.h b/target/mips/cpu-qom.h
new file mode 100644
index 000000000..dda0c911f
--- /dev/null
+++ b/target/mips/cpu-qom.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU MIPS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_MIPS_CPU_QOM_H
+#define QEMU_MIPS_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#ifdef TARGET_MIPS64
+#define TYPE_MIPS_CPU "mips64-cpu"
+#else
+#define TYPE_MIPS_CPU "mips-cpu"
+#endif
+
+OBJECT_DECLARE_TYPE(MIPSCPU, MIPSCPUClass,
+ MIPS_CPU)
+
+/**
+ * MIPSCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A MIPS CPU model.
+ */
+struct MIPSCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+ const struct mips_def_t *cpu_def;
+
+ /* Used for the jazz board to modify mips_cpu_do_transaction_failed. */
+ bool no_data_aborts;
+};
+
+
+#endif
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
new file mode 100644
index 000000000..4aae23934
--- /dev/null
+++ b/target/mips/cpu.c
@@ -0,0 +1,679 @@
+/*
+ * QEMU MIPS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "qemu/qemu-print.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "internal.h"
+#include "kvm_mips.h"
+#include "qemu/module.h"
+#include "sysemu/kvm.h"
+#include "sysemu/qtest.h"
+#include "exec/exec-all.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-clock.h"
+#include "semihosting/semihost.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "fpu_helper.h"
+
+const char regnames[32][3] = {
+ "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra",
+};
+
+static void fpu_dump_fpr(fpr_t *fpr, FILE *f, bool is_fpu64)
+{
+ if (is_fpu64) {
+ qemu_fprintf(f, "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu: %13g\n",
+ fpr->w[FP_ENDIAN_IDX], fpr->d,
+ (double)fpr->fd,
+ (double)fpr->fs[FP_ENDIAN_IDX],
+ (double)fpr->fs[!FP_ENDIAN_IDX]);
+ } else {
+ fpr_t tmp;
+
+ tmp.w[FP_ENDIAN_IDX] = fpr->w[FP_ENDIAN_IDX];
+ tmp.w[!FP_ENDIAN_IDX] = (fpr + 1)->w[FP_ENDIAN_IDX];
+ qemu_fprintf(f, "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu:%13g\n",
+ tmp.w[FP_ENDIAN_IDX], tmp.d,
+ (double)tmp.fd,
+ (double)tmp.fs[FP_ENDIAN_IDX],
+ (double)tmp.fs[!FP_ENDIAN_IDX]);
+ }
+}
+
+static void fpu_dump_state(CPUMIPSState *env, FILE *f, int flags)
+{
+ int i;
+ bool is_fpu64 = !!(env->hflags & MIPS_HFLAG_F64);
+
+ qemu_fprintf(f,
+ "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%02x\n",
+ env->active_fpu.fcr0, env->active_fpu.fcr31, is_fpu64,
+ get_float_exception_flags(&env->active_fpu.fp_status));
+ for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) {
+ qemu_fprintf(f, "%3s: ", fregnames[i]);
+ fpu_dump_fpr(&env->active_fpu.fpr[i], f, is_fpu64);
+ }
+}
+
+static void mips_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "pc=0x" TARGET_FMT_lx " HI=0x" TARGET_FMT_lx
+ " LO=0x" TARGET_FMT_lx " ds %04x "
+ TARGET_FMT_lx " " TARGET_FMT_ld "\n",
+ env->active_tc.PC, env->active_tc.HI[0], env->active_tc.LO[0],
+ env->hflags, env->btarget, env->bcond);
+ for (i = 0; i < 32; i++) {
+ if ((i & 3) == 0) {
+ qemu_fprintf(f, "GPR%02d:", i);
+ }
+ qemu_fprintf(f, " %s " TARGET_FMT_lx,
+ regnames[i], env->active_tc.gpr[i]);
+ if ((i & 3) == 3) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+
+ qemu_fprintf(f, "CP0 Status 0x%08x Cause 0x%08x EPC 0x"
+ TARGET_FMT_lx "\n",
+ env->CP0_Status, env->CP0_Cause, env->CP0_EPC);
+ qemu_fprintf(f, " Config0 0x%08x Config1 0x%08x LLAddr 0x%016"
+ PRIx64 "\n",
+ env->CP0_Config0, env->CP0_Config1, env->CP0_LLAddr);
+ qemu_fprintf(f, " Config2 0x%08x Config3 0x%08x\n",
+ env->CP0_Config2, env->CP0_Config3);
+ qemu_fprintf(f, " Config4 0x%08x Config5 0x%08x\n",
+ env->CP0_Config4, env->CP0_Config5);
+ if ((flags & CPU_DUMP_FPU) && (env->hflags & MIPS_HFLAG_FPU)) {
+ fpu_dump_state(env, f, flags);
+ }
+}
+
+void cpu_set_exception_base(int vp_index, target_ulong address)
+{
+ MIPSCPU *vp = MIPS_CPU(qemu_get_cpu(vp_index));
+ vp->env.exception_base = address;
+}
+
+static void mips_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+
+ mips_env_set_pc(&cpu->env, value);
+}
+
+static bool mips_cpu_has_work(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool has_work = false;
+
+ /*
+ * Prior to MIPS Release 6 it is implementation dependent if non-enabled
+ * interrupts wake-up the CPU, however most of the implementations only
+ * check for interrupts that can be taken.
+ */
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ if (cpu_mips_hw_interrupts_enabled(env) ||
+ (env->insn_flags & ISA_MIPS_R6)) {
+ has_work = true;
+ }
+ }
+
+ /* MIPS-MT has the ability to halt the CPU. */
+ if (ase_mt_available(env)) {
+ /*
+ * The QEMU model will issue an _WAKE request whenever the CPUs
+ * should be woken up.
+ */
+ if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
+ has_work = true;
+ }
+
+ if (!mips_vpe_active(env)) {
+ has_work = false;
+ }
+ }
+ /* MIPS Release 6 has the ability to halt the CPU. */
+ if (env->CP0_Config5 & (1 << CP0C5_VP)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
+ has_work = true;
+ }
+ if (!mips_vp_active(env)) {
+ has_work = false;
+ }
+ }
+ return has_work;
+}
+
+#include "cpu-defs.c.inc"
+
+static void mips_cpu_reset(DeviceState *dev)
+{
+ CPUState *cs = CPU(dev);
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu);
+ CPUMIPSState *env = &cpu->env;
+
+ mcc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUMIPSState, end_reset_fields));
+
+ /* Reset registers to their default values */
+ env->CP0_PRid = env->cpu_model->CP0_PRid;
+ env->CP0_Config0 = env->cpu_model->CP0_Config0;
+#ifdef TARGET_WORDS_BIGENDIAN
+ env->CP0_Config0 |= (1 << CP0C0_BE);
+#endif
+ env->CP0_Config1 = env->cpu_model->CP0_Config1;
+ env->CP0_Config2 = env->cpu_model->CP0_Config2;
+ env->CP0_Config3 = env->cpu_model->CP0_Config3;
+ env->CP0_Config4 = env->cpu_model->CP0_Config4;
+ env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask;
+ env->CP0_Config5 = env->cpu_model->CP0_Config5;
+ env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask;
+ env->CP0_Config6 = env->cpu_model->CP0_Config6;
+ env->CP0_Config6_rw_bitmask = env->cpu_model->CP0_Config6_rw_bitmask;
+ env->CP0_Config7 = env->cpu_model->CP0_Config7;
+ env->CP0_Config7_rw_bitmask = env->cpu_model->CP0_Config7_rw_bitmask;
+ env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask
+ << env->cpu_model->CP0_LLAddr_shift;
+ env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift;
+ env->SYNCI_Step = env->cpu_model->SYNCI_Step;
+ env->CCRes = env->cpu_model->CCRes;
+ env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask;
+ env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask;
+ env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl;
+ env->current_tc = 0;
+ env->SEGBITS = env->cpu_model->SEGBITS;
+ env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1);
+#if defined(TARGET_MIPS64)
+ if (env->cpu_model->insn_flags & ISA_MIPS3) {
+ env->SEGMask |= 3ULL << 62;
+ }
+#endif
+ env->PABITS = env->cpu_model->PABITS;
+ env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask;
+ env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0;
+ env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask;
+ env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1;
+ env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask;
+ env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2;
+ env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask;
+ env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3;
+ env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask;
+ env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
+ env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
+ env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
+ env->CP0_EBaseWG_rw_bitmask = env->cpu_model->CP0_EBaseWG_rw_bitmask;
+ env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
+ env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask;
+ env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31;
+ env->msair = env->cpu_model->MSAIR;
+ env->insn_flags = env->cpu_model->insn_flags;
+
+#if defined(CONFIG_USER_ONLY)
+ env->CP0_Status = (MIPS_HFLAG_UM << CP0St_KSU);
+# ifdef TARGET_MIPS64
+ /* Enable 64-bit register mode. */
+ env->CP0_Status |= (1 << CP0St_PX);
+# endif
+# ifdef TARGET_ABI_MIPSN64
+ /* Enable 64-bit address mode. */
+ env->CP0_Status |= (1 << CP0St_UX);
+# endif
+ /*
+ * Enable access to the CPUNum, SYNCI_Step, CC, and CCRes RDHWR
+ * hardware registers.
+ */
+ env->CP0_HWREna |= 0x0000000F;
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ env->CP0_Status |= (1 << CP0St_CU1);
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_DSPP)) {
+ env->CP0_Status |= (1 << CP0St_MX);
+ }
+# if defined(TARGET_MIPS64)
+ /* For MIPS64, init FR bit to 1 if FPU unit is there and bit is writable. */
+ if ((env->CP0_Config1 & (1 << CP0C1_FP)) &&
+ (env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ }
+# endif
+#else /* !CONFIG_USER_ONLY */
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ /*
+ * If the exception was raised from a delay slot,
+ * come back to the jump.
+ */
+ env->CP0_ErrorEPC = (env->active_tc.PC
+ - (env->hflags & MIPS_HFLAG_B16 ? 2 : 4));
+ } else {
+ env->CP0_ErrorEPC = env->active_tc.PC;
+ }
+ env->active_tc.PC = env->exception_base;
+ env->CP0_Random = env->tlb->nb_tlb - 1;
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+ env->CP0_Wired = 0;
+ env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId;
+ env->CP0_EBase = (cs->cpu_index & 0x3FF);
+ if (mips_um_ksegs_enabled()) {
+ env->CP0_EBase |= 0x40000000;
+ } else {
+ env->CP0_EBase |= (int32_t)0x80000000;
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_CMGCR)) {
+ env->CP0_CMGCRBase = 0x1fbf8000 >> 4;
+ }
+ env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
+ 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
+ env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL);
+ /*
+ * Vectored interrupts not implemented, timer on int 7,
+ * no performance counters.
+ */
+ env->CP0_IntCtl = 0xe0000000;
+ {
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ env->CP0_WatchLo[i] = 0;
+ env->CP0_WatchHi[i] = 0x80000000;
+ }
+ env->CP0_WatchLo[7] = 0;
+ env->CP0_WatchHi[7] = 0;
+ }
+ /* Count register increments in debug mode, EJTAG version 1 */
+ env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER);
+
+ cpu_mips_store_count(env, 1);
+
+ if (ase_mt_available(env)) {
+ int i;
+
+ /* Only TC0 on VPE 0 starts as active. */
+ for (i = 0; i < ARRAY_SIZE(env->tcs); i++) {
+ env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE;
+ env->tcs[i].CP0_TCHalt = 1;
+ }
+ env->active_tc.CP0_TCHalt = 1;
+ cs->halted = 1;
+
+ if (cs->cpu_index == 0) {
+ /* VPE0 starts up enabled. */
+ env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
+ env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+
+ /* TC0 starts up unhalted. */
+ cs->halted = 0;
+ env->active_tc.CP0_TCHalt = 0;
+ env->tcs[0].CP0_TCHalt = 0;
+ /* With thread 0 active. */
+ env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A);
+ env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A);
+ }
+ }
+
+ /*
+ * Configure default legacy segmentation control. We use this regardless of
+ * whether segmentation control is presented to the guest.
+ */
+ /* KSeg3 (seg0 0xE0000000..0xFFFFFFFF) */
+ env->CP0_SegCtl0 = (CP0SC_AM_MK << CP0SC_AM);
+ /* KSeg2 (seg1 0xC0000000..0xDFFFFFFF) */
+ env->CP0_SegCtl0 |= ((CP0SC_AM_MSK << CP0SC_AM)) << 16;
+ /* KSeg1 (seg2 0xA0000000..0x9FFFFFFF) */
+ env->CP0_SegCtl1 = (0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) |
+ (2 << CP0SC_C);
+ /* KSeg0 (seg3 0x80000000..0x9FFFFFFF) */
+ env->CP0_SegCtl1 |= ((0 << CP0SC_PA) | (CP0SC_AM_UK << CP0SC_AM) |
+ (3 << CP0SC_C)) << 16;
+ /* USeg (seg4 0x40000000..0x7FFFFFFF) */
+ env->CP0_SegCtl2 = (2 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) |
+ (1 << CP0SC_EU) | (2 << CP0SC_C);
+ /* USeg (seg5 0x00000000..0x3FFFFFFF) */
+ env->CP0_SegCtl2 |= ((0 << CP0SC_PA) | (CP0SC_AM_MUSK << CP0SC_AM) |
+ (1 << CP0SC_EU) | (2 << CP0SC_C)) << 16;
+ /* XKPhys (note, SegCtl2.XR = 0, so XAM won't be used) */
+ env->CP0_SegCtl1 |= (CP0SC_AM_UK << CP0SC1_XAM);
+#endif /* !CONFIG_USER_ONLY */
+ if ((env->insn_flags & ISA_MIPS_R6) &&
+ (env->active_fpu.fcr0 & (1 << FCR0_F64))) {
+ /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */
+ env->CP0_Status |= (1 << CP0St_FR);
+ }
+
+ if (env->insn_flags & ISA_MIPS_R6) {
+ /* PTW = 1 */
+ env->CP0_PWSize = 0x40;
+ /* GDI = 12 */
+ /* UDI = 12 */
+ /* MDI = 12 */
+ /* PRI = 12 */
+ /* PTEI = 2 */
+ env->CP0_PWField = 0x0C30C302;
+ } else {
+ /* GDI = 0 */
+ /* UDI = 0 */
+ /* MDI = 0 */
+ /* PRI = 0 */
+ /* PTEI = 2 */
+ env->CP0_PWField = 0x02;
+ }
+
+ if (env->CP0_Config3 & (1 << CP0C3_ISA) & (1 << (CP0C3_ISA + 1))) {
+ /* microMIPS on reset when Config3.ISA is 3 */
+ env->hflags |= MIPS_HFLAG_M16;
+ }
+
+ msa_reset(env);
+
+ compute_hflags(env);
+ restore_fp_status(env);
+ restore_pamask(env);
+ cs->exception_index = EXCP_NONE;
+
+ if (semihosting_get_argc()) {
+ /* UHI interface can be used to obtain argc and argv */
+ env->active_tc.gpr[4] = -1;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (kvm_enabled()) {
+ kvm_mips_reset_vcpu(cpu);
+ }
+#endif
+}
+
+static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+ MIPSCPU *cpu = MIPS_CPU(s);
+ CPUMIPSState *env = &cpu->env;
+
+ if (!(env->insn_flags & ISA_NANOMIPS32)) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ info->print_insn = print_insn_big_mips;
+#else
+ info->print_insn = print_insn_little_mips;
+#endif
+ } else {
+#if defined(CONFIG_NANOMIPS_DIS)
+ info->print_insn = print_insn_nanomips;
+#endif
+ }
+}
+
+/*
+ * Since commit 6af0bf9c7c3 this model assumes a CPU clocked at 200MHz.
+ */
+#define CPU_FREQ_HZ_DEFAULT 200000000
+#define CP0_COUNT_RATE_DEFAULT 2
+
+static void mips_cp0_period_set(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ env->cp0_count_ns = clock_ticks_to_ns(MIPS_CPU(cpu)->clock,
+ cpu->cp0_count_rate);
+ assert(env->cp0_count_ns);
+}
+
+static void mips_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ MIPSCPU *cpu = MIPS_CPU(dev);
+ CPUMIPSState *env = &cpu->env;
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ if (!clock_get(cpu->clock)) {
+#ifndef CONFIG_USER_ONLY
+ if (!qtest_enabled()) {
+ g_autofree char *cpu_freq_str = freq_to_str(CPU_FREQ_HZ_DEFAULT);
+
+ warn_report("CPU input clock is not connected to any output clock, "
+ "using default frequency of %s.", cpu_freq_str);
+ }
+#endif
+ /* Initialize the frequency in case the clock remains unconnected. */
+ clock_set_hz(cpu->clock, CPU_FREQ_HZ_DEFAULT);
+ }
+ mips_cp0_period_set(cpu);
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ env->exception_base = (int32_t)0xBFC00000;
+
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
+ mmu_init(env, env->cpu_model);
+#endif
+ fpu_init(env, env->cpu_model);
+ mvp_init(env);
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void mips_cpu_initfn(Object *obj)
+{
+ MIPSCPU *cpu = MIPS_CPU(obj);
+ CPUMIPSState *env = &cpu->env;
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+ cpu->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, cpu, 0);
+ env->cpu_model = mcc->cpu_def;
+}
+
+static char *mips_cpu_type_name(const char *cpu_model)
+{
+ return g_strdup_printf(MIPS_CPU_TYPE_NAME("%s"), cpu_model);
+}
+
+static ObjectClass *mips_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = mips_cpu_type_name(cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+static Property mips_cpu_properties[] = {
+ /* CP0 timer running at half the clock of the CPU */
+ DEFINE_PROP_UINT32("cp0-count-rate", MIPSCPU, cp0_count_rate,
+ CP0_COUNT_RATE_DEFAULT),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps mips_sysemu_ops = {
+ .get_phys_page_debug = mips_cpu_get_phys_page_debug,
+ .legacy_vmsd = &vmstate_mips_cpu,
+};
+#endif
+
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+/*
+ * NB: cannot be const, as some elements are changed for specific
+ * mips hardware (see hw/mips/jazz.c).
+ */
+static const struct TCGCPUOps mips_tcg_ops = {
+ .initialize = mips_tcg_init,
+ .synchronize_from_tb = mips_cpu_synchronize_from_tb,
+
+#if !defined(CONFIG_USER_ONLY)
+ .tlb_fill = mips_cpu_tlb_fill,
+ .cpu_exec_interrupt = mips_cpu_exec_interrupt,
+ .do_interrupt = mips_cpu_do_interrupt,
+ .do_transaction_failed = mips_cpu_do_transaction_failed,
+ .do_unaligned_access = mips_cpu_do_unaligned_access,
+ .io_recompile_replay_branch = mips_io_recompile_replay_branch,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+static void mips_cpu_class_init(ObjectClass *c, void *data)
+{
+ MIPSCPUClass *mcc = MIPS_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ device_class_set_parent_realize(dc, mips_cpu_realizefn,
+ &mcc->parent_realize);
+ device_class_set_parent_reset(dc, mips_cpu_reset, &mcc->parent_reset);
+ device_class_set_props(dc, mips_cpu_properties);
+
+ cc->class_by_name = mips_cpu_class_by_name;
+ cc->has_work = mips_cpu_has_work;
+ cc->dump_state = mips_cpu_dump_state;
+ cc->set_pc = mips_cpu_set_pc;
+ cc->gdb_read_register = mips_cpu_gdb_read_register;
+ cc->gdb_write_register = mips_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &mips_sysemu_ops;
+#endif
+ cc->disas_set_info = mips_cpu_disas_set_info;
+ cc->gdb_num_core_regs = 73;
+ cc->gdb_stop_before_watchpoint = true;
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &mips_tcg_ops;
+#endif /* CONFIG_TCG */
+}
+
+static const TypeInfo mips_cpu_type_info = {
+ .name = TYPE_MIPS_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(MIPSCPU),
+ .instance_init = mips_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(MIPSCPUClass),
+ .class_init = mips_cpu_class_init,
+};
+
+static void mips_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+ MIPSCPUClass *mcc = MIPS_CPU_CLASS(oc);
+ mcc->cpu_def = data;
+}
+
+static void mips_register_cpudef_type(const struct mips_def_t *def)
+{
+ char *typename = mips_cpu_type_name(def->name);
+ TypeInfo ti = {
+ .name = typename,
+ .parent = TYPE_MIPS_CPU,
+ .class_init = mips_cpu_cpudef_class_init,
+ .class_data = (void *)def,
+ };
+
+ type_register(&ti);
+ g_free(typename);
+}
+
+static void mips_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&mips_cpu_type_info);
+ for (i = 0; i < mips_defs_number; i++) {
+ mips_register_cpudef_type(&mips_defs[i]);
+ }
+}
+
+type_init(mips_cpu_register_types)
+
+static void mips_cpu_add_definition(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfo *info;
+ const char *typename;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_MIPS_CPU));
+ info->q_typename = g_strdup(typename);
+
+ QAPI_LIST_PREPEND(*cpu_list, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+
+ list = object_class_get_list(TYPE_MIPS_CPU, false);
+ g_slist_foreach(list, mips_cpu_add_definition, &cpu_list);
+ g_slist_free(list);
+
+ return cpu_list;
+}
+
+/* Could be used by generic CPU object */
+MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk)
+{
+ DeviceState *cpu;
+
+ cpu = DEVICE(object_new(cpu_type));
+ qdev_connect_clock_in(cpu, "clk-in", cpu_refclk);
+ qdev_realize(cpu, NULL, &error_abort);
+
+ return MIPS_CPU(cpu);
+}
+
+bool cpu_supports_isa(const CPUMIPSState *env, uint64_t isa_mask)
+{
+ return (env->cpu_model->insn_flags & isa_mask) != 0;
+}
+
+bool cpu_type_supports_isa(const char *cpu_type, uint64_t isa)
+{
+ const MIPSCPUClass *mcc = MIPS_CPU_CLASS(object_class_by_name(cpu_type));
+ return (mcc->cpu_def->insn_flags & isa) != 0;
+}
+
+bool cpu_type_supports_cps_smp(const char *cpu_type)
+{
+ const MIPSCPUClass *mcc = MIPS_CPU_CLASS(object_class_by_name(cpu_type));
+ return (mcc->cpu_def->CP0_Config3 & (1 << CP0C3_CMGCR)) != 0;
+}
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
new file mode 100644
index 000000000..56b1cbd09
--- /dev/null
+++ b/target/mips/cpu.h
@@ -0,0 +1,1350 @@
+#ifndef MIPS_CPU_H
+#define MIPS_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat-types.h"
+#include "hw/clock.h"
+#include "mips-defs.h"
+
+#define TCG_GUEST_DEFAULT_MO (0)
+
+typedef struct CPUMIPSTLBContext CPUMIPSTLBContext;
+
+/* MSA Context */
+#define MSA_WRLEN (128)
+
+typedef union wr_t wr_t;
+union wr_t {
+ int8_t b[MSA_WRLEN / 8];
+ int16_t h[MSA_WRLEN / 16];
+ int32_t w[MSA_WRLEN / 32];
+ int64_t d[MSA_WRLEN / 64];
+};
+
+typedef union fpr_t fpr_t;
+union fpr_t {
+ float64 fd; /* ieee double precision */
+ float32 fs[2];/* ieee single precision */
+ uint64_t d; /* binary double fixed-point */
+ uint32_t w[2]; /* binary single fixed-point */
+/* FPU/MSA register mapping is not tested on big-endian hosts. */
+ wr_t wr; /* vector data */
+};
+/*
+ *define FP_ENDIAN_IDX to access the same location
+ * in the fpr_t union regardless of the host endianness
+ */
+#if defined(HOST_WORDS_BIGENDIAN)
+# define FP_ENDIAN_IDX 1
+#else
+# define FP_ENDIAN_IDX 0
+#endif
+
+typedef struct CPUMIPSFPUContext CPUMIPSFPUContext;
+struct CPUMIPSFPUContext {
+ /* Floating point registers */
+ fpr_t fpr[32];
+ float_status fp_status;
+ /* fpu implementation/revision register (fir) */
+ uint32_t fcr0;
+#define FCR0_FREP 29
+#define FCR0_UFRP 28
+#define FCR0_HAS2008 23
+#define FCR0_F64 22
+#define FCR0_L 21
+#define FCR0_W 20
+#define FCR0_3D 19
+#define FCR0_PS 18
+#define FCR0_D 17
+#define FCR0_S 16
+#define FCR0_PRID 8
+#define FCR0_REV 0
+ /* fcsr */
+ uint32_t fcr31_rw_bitmask;
+ uint32_t fcr31;
+#define FCR31_FS 24
+#define FCR31_ABS2008 19
+#define FCR31_NAN2008 18
+#define SET_FP_COND(num, env) do { ((env).fcr31) |= \
+ ((num) ? (1 << ((num) + 24)) : \
+ (1 << 23)); \
+ } while (0)
+#define CLEAR_FP_COND(num, env) do { ((env).fcr31) &= \
+ ~((num) ? (1 << ((num) + 24)) : \
+ (1 << 23)); \
+ } while (0)
+#define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | \
+ (((env).fcr31 >> 23) & 0x1))
+#define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f)
+#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
+#define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f)
+#define SET_FP_CAUSE(reg, v) do { (reg) = ((reg) & ~(0x3f << 12)) | \
+ ((v & 0x3f) << 12); \
+ } while (0)
+#define SET_FP_ENABLE(reg, v) do { (reg) = ((reg) & ~(0x1f << 7)) | \
+ ((v & 0x1f) << 7); \
+ } while (0)
+#define SET_FP_FLAGS(reg, v) do { (reg) = ((reg) & ~(0x1f << 2)) | \
+ ((v & 0x1f) << 2); \
+ } while (0)
+#define UPDATE_FP_FLAGS(reg, v) do { (reg) |= ((v & 0x1f) << 2); } while (0)
+#define FP_INEXACT 1
+#define FP_UNDERFLOW 2
+#define FP_OVERFLOW 4
+#define FP_DIV0 8
+#define FP_INVALID 16
+#define FP_UNIMPLEMENTED 32
+};
+
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
+struct CPUMIPSMVPContext {
+ int32_t CP0_MVPControl;
+#define CP0MVPCo_CPA 3
+#define CP0MVPCo_STLB 2
+#define CP0MVPCo_VPC 1
+#define CP0MVPCo_EVP 0
+ int32_t CP0_MVPConf0;
+#define CP0MVPC0_M 31
+#define CP0MVPC0_TLBS 29
+#define CP0MVPC0_GS 28
+#define CP0MVPC0_PCP 27
+#define CP0MVPC0_PTLBE 16
+#define CP0MVPC0_TCA 15
+#define CP0MVPC0_PVPE 10
+#define CP0MVPC0_PTC 0
+ int32_t CP0_MVPConf1;
+#define CP0MVPC1_CIM 31
+#define CP0MVPC1_CIF 30
+#define CP0MVPC1_PCX 20
+#define CP0MVPC1_PCP2 10
+#define CP0MVPC1_PCP1 0
+};
+
+typedef struct mips_def_t mips_def_t;
+
+#define MIPS_SHADOW_SET_MAX 16
+#define MIPS_TC_MAX 5
+#define MIPS_FPU_MAX 1
+#define MIPS_DSP_ACC 4
+#define MIPS_KSCRATCH_NUM 6
+#define MIPS_MAAR_MAX 16 /* Must be an even number. */
+
+
+/*
+ * Summary of CP0 registers
+ * ========================
+ *
+ *
+ * Register 0 Register 1 Register 2 Register 3
+ * ---------- ---------- ---------- ----------
+ *
+ * 0 Index Random EntryLo0 EntryLo1
+ * 1 MVPControl VPEControl TCStatus GlobalNumber
+ * 2 MVPConf0 VPEConf0 TCBind
+ * 3 MVPConf1 VPEConf1 TCRestart
+ * 4 VPControl YQMask TCHalt
+ * 5 VPESchedule TCContext
+ * 6 VPEScheFBack TCSchedule
+ * 7 VPEOpt TCScheFBack TCOpt
+ *
+ *
+ * Register 4 Register 5 Register 6 Register 7
+ * ---------- ---------- ---------- ----------
+ *
+ * 0 Context PageMask Wired HWREna
+ * 1 ContextConfig PageGrain SRSConf0
+ * 2 UserLocal SegCtl0 SRSConf1
+ * 3 XContextConfig SegCtl1 SRSConf2
+ * 4 DebugContextID SegCtl2 SRSConf3
+ * 5 MemoryMapID PWBase SRSConf4
+ * 6 PWField PWCtl
+ * 7 PWSize
+ *
+ *
+ * Register 8 Register 9 Register 10 Register 11
+ * ---------- ---------- ----------- -----------
+ *
+ * 0 BadVAddr Count EntryHi Compare
+ * 1 BadInstr
+ * 2 BadInstrP
+ * 3 BadInstrX
+ * 4 GuestCtl1 GuestCtl0Ext
+ * 5 GuestCtl2
+ * 6 SAARI GuestCtl3
+ * 7 SAAR
+ *
+ *
+ * Register 12 Register 13 Register 14 Register 15
+ * ----------- ----------- ----------- -----------
+ *
+ * 0 Status Cause EPC PRId
+ * 1 IntCtl EBase
+ * 2 SRSCtl NestedEPC CDMMBase
+ * 3 SRSMap CMGCRBase
+ * 4 View_IPL View_RIPL BEVVA
+ * 5 SRSMap2 NestedExc
+ * 6 GuestCtl0
+ * 7 GTOffset
+ *
+ *
+ * Register 16 Register 17 Register 18 Register 19
+ * ----------- ----------- ----------- -----------
+ *
+ * 0 Config LLAddr WatchLo0 WatchHi
+ * 1 Config1 MAAR WatchLo1 WatchHi
+ * 2 Config2 MAARI WatchLo2 WatchHi
+ * 3 Config3 WatchLo3 WatchHi
+ * 4 Config4 WatchLo4 WatchHi
+ * 5 Config5 WatchLo5 WatchHi
+ * 6 Config6 WatchLo6 WatchHi
+ * 7 Config7 WatchLo7 WatchHi
+ *
+ *
+ * Register 20 Register 21 Register 22 Register 23
+ * ----------- ----------- ----------- -----------
+ *
+ * 0 XContext Debug
+ * 1 TraceControl
+ * 2 TraceControl2
+ * 3 UserTraceData1
+ * 4 TraceIBPC
+ * 5 TraceDBPC
+ * 6 Debug2
+ * 7
+ *
+ *
+ * Register 24 Register 25 Register 26 Register 27
+ * ----------- ----------- ----------- -----------
+ *
+ * 0 DEPC PerfCnt ErrCtl CacheErr
+ * 1 PerfCnt
+ * 2 TraceControl3 PerfCnt
+ * 3 UserTraceData2 PerfCnt
+ * 4 PerfCnt
+ * 5 PerfCnt
+ * 6 PerfCnt
+ * 7 PerfCnt
+ *
+ *
+ * Register 28 Register 29 Register 30 Register 31
+ * ----------- ----------- ----------- -----------
+ *
+ * 0 DataLo DataHi ErrorEPC DESAVE
+ * 1 TagLo TagHi
+ * 2 DataLo1 DataHi1 KScratch<n>
+ * 3 TagLo1 TagHi1 KScratch<n>
+ * 4 DataLo2 DataHi2 KScratch<n>
+ * 5 TagLo2 TagHi2 KScratch<n>
+ * 6 DataLo3 DataHi3 KScratch<n>
+ * 7 TagLo3 TagHi3 KScratch<n>
+ *
+ */
+#define CP0_REGISTER_00 0
+#define CP0_REGISTER_01 1
+#define CP0_REGISTER_02 2
+#define CP0_REGISTER_03 3
+#define CP0_REGISTER_04 4
+#define CP0_REGISTER_05 5
+#define CP0_REGISTER_06 6
+#define CP0_REGISTER_07 7
+#define CP0_REGISTER_08 8
+#define CP0_REGISTER_09 9
+#define CP0_REGISTER_10 10
+#define CP0_REGISTER_11 11
+#define CP0_REGISTER_12 12
+#define CP0_REGISTER_13 13
+#define CP0_REGISTER_14 14
+#define CP0_REGISTER_15 15
+#define CP0_REGISTER_16 16
+#define CP0_REGISTER_17 17
+#define CP0_REGISTER_18 18
+#define CP0_REGISTER_19 19
+#define CP0_REGISTER_20 20
+#define CP0_REGISTER_21 21
+#define CP0_REGISTER_22 22
+#define CP0_REGISTER_23 23
+#define CP0_REGISTER_24 24
+#define CP0_REGISTER_25 25
+#define CP0_REGISTER_26 26
+#define CP0_REGISTER_27 27
+#define CP0_REGISTER_28 28
+#define CP0_REGISTER_29 29
+#define CP0_REGISTER_30 30
+#define CP0_REGISTER_31 31
+
+
+/* CP0 Register 00 */
+#define CP0_REG00__INDEX 0
+#define CP0_REG00__MVPCONTROL 1
+#define CP0_REG00__MVPCONF0 2
+#define CP0_REG00__MVPCONF1 3
+#define CP0_REG00__VPCONTROL 4
+/* CP0 Register 01 */
+#define CP0_REG01__RANDOM 0
+#define CP0_REG01__VPECONTROL 1
+#define CP0_REG01__VPECONF0 2
+#define CP0_REG01__VPECONF1 3
+#define CP0_REG01__YQMASK 4
+#define CP0_REG01__VPESCHEDULE 5
+#define CP0_REG01__VPESCHEFBACK 6
+#define CP0_REG01__VPEOPT 7
+/* CP0 Register 02 */
+#define CP0_REG02__ENTRYLO0 0
+#define CP0_REG02__TCSTATUS 1
+#define CP0_REG02__TCBIND 2
+#define CP0_REG02__TCRESTART 3
+#define CP0_REG02__TCHALT 4
+#define CP0_REG02__TCCONTEXT 5
+#define CP0_REG02__TCSCHEDULE 6
+#define CP0_REG02__TCSCHEFBACK 7
+/* CP0 Register 03 */
+#define CP0_REG03__ENTRYLO1 0
+#define CP0_REG03__GLOBALNUM 1
+#define CP0_REG03__TCOPT 7
+/* CP0 Register 04 */
+#define CP0_REG04__CONTEXT 0
+#define CP0_REG04__CONTEXTCONFIG 1
+#define CP0_REG04__USERLOCAL 2
+#define CP0_REG04__XCONTEXTCONFIG 3
+#define CP0_REG04__DBGCONTEXTID 4
+#define CP0_REG04__MMID 5
+/* CP0 Register 05 */
+#define CP0_REG05__PAGEMASK 0
+#define CP0_REG05__PAGEGRAIN 1
+#define CP0_REG05__SEGCTL0 2
+#define CP0_REG05__SEGCTL1 3
+#define CP0_REG05__SEGCTL2 4
+#define CP0_REG05__PWBASE 5
+#define CP0_REG05__PWFIELD 6
+#define CP0_REG05__PWSIZE 7
+/* CP0 Register 06 */
+#define CP0_REG06__WIRED 0
+#define CP0_REG06__SRSCONF0 1
+#define CP0_REG06__SRSCONF1 2
+#define CP0_REG06__SRSCONF2 3
+#define CP0_REG06__SRSCONF3 4
+#define CP0_REG06__SRSCONF4 5
+#define CP0_REG06__PWCTL 6
+/* CP0 Register 07 */
+#define CP0_REG07__HWRENA 0
+/* CP0 Register 08 */
+#define CP0_REG08__BADVADDR 0
+#define CP0_REG08__BADINSTR 1
+#define CP0_REG08__BADINSTRP 2
+#define CP0_REG08__BADINSTRX 3
+/* CP0 Register 09 */
+#define CP0_REG09__COUNT 0
+#define CP0_REG09__SAARI 6
+#define CP0_REG09__SAAR 7
+/* CP0 Register 10 */
+#define CP0_REG10__ENTRYHI 0
+#define CP0_REG10__GUESTCTL1 4
+#define CP0_REG10__GUESTCTL2 5
+#define CP0_REG10__GUESTCTL3 6
+/* CP0 Register 11 */
+#define CP0_REG11__COMPARE 0
+#define CP0_REG11__GUESTCTL0EXT 4
+/* CP0 Register 12 */
+#define CP0_REG12__STATUS 0
+#define CP0_REG12__INTCTL 1
+#define CP0_REG12__SRSCTL 2
+#define CP0_REG12__SRSMAP 3
+#define CP0_REG12__VIEW_IPL 4
+#define CP0_REG12__SRSMAP2 5
+#define CP0_REG12__GUESTCTL0 6
+#define CP0_REG12__GTOFFSET 7
+/* CP0 Register 13 */
+#define CP0_REG13__CAUSE 0
+#define CP0_REG13__VIEW_RIPL 4
+#define CP0_REG13__NESTEDEXC 5
+/* CP0 Register 14 */
+#define CP0_REG14__EPC 0
+#define CP0_REG14__NESTEDEPC 2
+/* CP0 Register 15 */
+#define CP0_REG15__PRID 0
+#define CP0_REG15__EBASE 1
+#define CP0_REG15__CDMMBASE 2
+#define CP0_REG15__CMGCRBASE 3
+#define CP0_REG15__BEVVA 4
+/* CP0 Register 16 */
+#define CP0_REG16__CONFIG 0
+#define CP0_REG16__CONFIG1 1
+#define CP0_REG16__CONFIG2 2
+#define CP0_REG16__CONFIG3 3
+#define CP0_REG16__CONFIG4 4
+#define CP0_REG16__CONFIG5 5
+#define CP0_REG16__CONFIG6 6
+#define CP0_REG16__CONFIG7 7
+/* CP0 Register 17 */
+#define CP0_REG17__LLADDR 0
+#define CP0_REG17__MAAR 1
+#define CP0_REG17__MAARI 2
+/* CP0 Register 18 */
+#define CP0_REG18__WATCHLO0 0
+#define CP0_REG18__WATCHLO1 1
+#define CP0_REG18__WATCHLO2 2
+#define CP0_REG18__WATCHLO3 3
+#define CP0_REG18__WATCHLO4 4
+#define CP0_REG18__WATCHLO5 5
+#define CP0_REG18__WATCHLO6 6
+#define CP0_REG18__WATCHLO7 7
+/* CP0 Register 19 */
+#define CP0_REG19__WATCHHI0 0
+#define CP0_REG19__WATCHHI1 1
+#define CP0_REG19__WATCHHI2 2
+#define CP0_REG19__WATCHHI3 3
+#define CP0_REG19__WATCHHI4 4
+#define CP0_REG19__WATCHHI5 5
+#define CP0_REG19__WATCHHI6 6
+#define CP0_REG19__WATCHHI7 7
+/* CP0 Register 20 */
+#define CP0_REG20__XCONTEXT 0
+/* CP0 Register 21 */
+/* CP0 Register 22 */
+/* CP0 Register 23 */
+#define CP0_REG23__DEBUG 0
+#define CP0_REG23__TRACECONTROL 1
+#define CP0_REG23__TRACECONTROL2 2
+#define CP0_REG23__USERTRACEDATA1 3
+#define CP0_REG23__TRACEIBPC 4
+#define CP0_REG23__TRACEDBPC 5
+#define CP0_REG23__DEBUG2 6
+/* CP0 Register 24 */
+#define CP0_REG24__DEPC 0
+/* CP0 Register 25 */
+#define CP0_REG25__PERFCTL0 0
+#define CP0_REG25__PERFCNT0 1
+#define CP0_REG25__PERFCTL1 2
+#define CP0_REG25__PERFCNT1 3
+#define CP0_REG25__PERFCTL2 4
+#define CP0_REG25__PERFCNT2 5
+#define CP0_REG25__PERFCTL3 6
+#define CP0_REG25__PERFCNT3 7
+/* CP0 Register 26 */
+#define CP0_REG26__ERRCTL 0
+/* CP0 Register 27 */
+#define CP0_REG27__CACHERR 0
+/* CP0 Register 28 */
+#define CP0_REG28__TAGLO 0
+#define CP0_REG28__DATALO 1
+#define CP0_REG28__TAGLO1 2
+#define CP0_REG28__DATALO1 3
+#define CP0_REG28__TAGLO2 4
+#define CP0_REG28__DATALO2 5
+#define CP0_REG28__TAGLO3 6
+#define CP0_REG28__DATALO3 7
+/* CP0 Register 29 */
+#define CP0_REG29__TAGHI 0
+#define CP0_REG29__DATAHI 1
+#define CP0_REG29__TAGHI1 2
+#define CP0_REG29__DATAHI1 3
+#define CP0_REG29__TAGHI2 4
+#define CP0_REG29__DATAHI2 5
+#define CP0_REG29__TAGHI3 6
+#define CP0_REG29__DATAHI3 7
+/* CP0 Register 30 */
+#define CP0_REG30__ERROREPC 0
+/* CP0 Register 31 */
+#define CP0_REG31__DESAVE 0
+#define CP0_REG31__KSCRATCH1 2
+#define CP0_REG31__KSCRATCH2 3
+#define CP0_REG31__KSCRATCH3 4
+#define CP0_REG31__KSCRATCH4 5
+#define CP0_REG31__KSCRATCH5 6
+#define CP0_REG31__KSCRATCH6 7
+
+
+typedef struct TCState TCState;
+struct TCState {
+ target_ulong gpr[32];
+#if defined(TARGET_MIPS64)
+ /*
+ * For CPUs using 128-bit GPR registers, we put the lower halves in gpr[])
+ * and the upper halves in gpr_hi[].
+ */
+ uint64_t gpr_hi[32];
+#endif /* TARGET_MIPS64 */
+ target_ulong PC;
+ target_ulong HI[MIPS_DSP_ACC];
+ target_ulong LO[MIPS_DSP_ACC];
+ target_ulong ACX[MIPS_DSP_ACC];
+ target_ulong DSPControl;
+ int32_t CP0_TCStatus;
+#define CP0TCSt_TCU3 31
+#define CP0TCSt_TCU2 30
+#define CP0TCSt_TCU1 29
+#define CP0TCSt_TCU0 28
+#define CP0TCSt_TMX 27
+#define CP0TCSt_RNST 23
+#define CP0TCSt_TDS 21
+#define CP0TCSt_DT 20
+#define CP0TCSt_DA 15
+#define CP0TCSt_A 13
+#define CP0TCSt_TKSU 11
+#define CP0TCSt_IXMT 10
+#define CP0TCSt_TASID 0
+ int32_t CP0_TCBind;
+#define CP0TCBd_CurTC 21
+#define CP0TCBd_TBE 17
+#define CP0TCBd_CurVPE 0
+ target_ulong CP0_TCHalt;
+ target_ulong CP0_TCContext;
+ target_ulong CP0_TCSchedule;
+ target_ulong CP0_TCScheFBack;
+ int32_t CP0_Debug_tcstatus;
+ target_ulong CP0_UserLocal;
+
+ int32_t msacsr;
+
+#define MSACSR_FS 24
+#define MSACSR_FS_MASK (1 << MSACSR_FS)
+#define MSACSR_NX 18
+#define MSACSR_NX_MASK (1 << MSACSR_NX)
+#define MSACSR_CEF 2
+#define MSACSR_CEF_MASK (0xffff << MSACSR_CEF)
+#define MSACSR_RM 0
+#define MSACSR_RM_MASK (0x3 << MSACSR_RM)
+#define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \
+ MSACSR_FS_MASK)
+
+ float_status msa_fp_status;
+
+#define NUMBER_OF_MXU_REGISTERS 16
+ target_ulong mxu_gpr[NUMBER_OF_MXU_REGISTERS - 1];
+ target_ulong mxu_cr;
+#define MXU_CR_LC 31
+#define MXU_CR_RC 30
+#define MXU_CR_BIAS 2
+#define MXU_CR_RD_EN 1
+#define MXU_CR_MXU_EN 0
+
+};
+
+struct MIPSITUState;
+typedef struct CPUMIPSState CPUMIPSState;
+struct CPUMIPSState {
+ TCState active_tc;
+ CPUMIPSFPUContext active_fpu;
+
+ uint32_t current_tc;
+ uint32_t current_fpu;
+
+ uint32_t SEGBITS;
+ uint32_t PABITS;
+#if defined(TARGET_MIPS64)
+# define PABITS_BASE 36
+#else
+# define PABITS_BASE 32
+#endif
+ target_ulong SEGMask;
+ uint64_t PAMask;
+#define PAMASK_BASE ((1ULL << PABITS_BASE) - 1)
+
+ int32_t msair;
+#define MSAIR_ProcID 8
+#define MSAIR_Rev 0
+
+/*
+ * CP0 Register 0
+ */
+ int32_t CP0_Index;
+ /* CP0_MVP* are per MVP registers. */
+ int32_t CP0_VPControl;
+#define CP0VPCtl_DIS 0
+/*
+ * CP0 Register 1
+ */
+ int32_t CP0_Random;
+ int32_t CP0_VPEControl;
+#define CP0VPECo_YSI 21
+#define CP0VPECo_GSI 20
+#define CP0VPECo_EXCPT 16
+#define CP0VPECo_TE 15
+#define CP0VPECo_TargTC 0
+ int32_t CP0_VPEConf0;
+#define CP0VPEC0_M 31
+#define CP0VPEC0_XTC 21
+#define CP0VPEC0_TCS 19
+#define CP0VPEC0_SCS 18
+#define CP0VPEC0_DSC 17
+#define CP0VPEC0_ICS 16
+#define CP0VPEC0_MVP 1
+#define CP0VPEC0_VPA 0
+ int32_t CP0_VPEConf1;
+#define CP0VPEC1_NCX 20
+#define CP0VPEC1_NCP2 10
+#define CP0VPEC1_NCP1 0
+ target_ulong CP0_YQMask;
+ target_ulong CP0_VPESchedule;
+ target_ulong CP0_VPEScheFBack;
+ int32_t CP0_VPEOpt;
+#define CP0VPEOpt_IWX7 15
+#define CP0VPEOpt_IWX6 14
+#define CP0VPEOpt_IWX5 13
+#define CP0VPEOpt_IWX4 12
+#define CP0VPEOpt_IWX3 11
+#define CP0VPEOpt_IWX2 10
+#define CP0VPEOpt_IWX1 9
+#define CP0VPEOpt_IWX0 8
+#define CP0VPEOpt_DWX7 7
+#define CP0VPEOpt_DWX6 6
+#define CP0VPEOpt_DWX5 5
+#define CP0VPEOpt_DWX4 4
+#define CP0VPEOpt_DWX3 3
+#define CP0VPEOpt_DWX2 2
+#define CP0VPEOpt_DWX1 1
+#define CP0VPEOpt_DWX0 0
+/*
+ * CP0 Register 2
+ */
+ uint64_t CP0_EntryLo0;
+/*
+ * CP0 Register 3
+ */
+ uint64_t CP0_EntryLo1;
+#if defined(TARGET_MIPS64)
+# define CP0EnLo_RI 63
+# define CP0EnLo_XI 62
+#else
+# define CP0EnLo_RI 31
+# define CP0EnLo_XI 30
+#endif
+ int32_t CP0_GlobalNumber;
+#define CP0GN_VPId 0
+/*
+ * CP0 Register 4
+ */
+ target_ulong CP0_Context;
+ int32_t CP0_MemoryMapID;
+/*
+ * CP0 Register 5
+ */
+ int32_t CP0_PageMask;
+#define CP0PM_MASK 13
+ int32_t CP0_PageGrain_rw_bitmask;
+ int32_t CP0_PageGrain;
+#define CP0PG_RIE 31
+#define CP0PG_XIE 30
+#define CP0PG_ELPA 29
+#define CP0PG_IEC 27
+ target_ulong CP0_SegCtl0;
+ target_ulong CP0_SegCtl1;
+ target_ulong CP0_SegCtl2;
+#define CP0SC_PA 9
+#define CP0SC_PA_MASK (0x7FULL << CP0SC_PA)
+#define CP0SC_PA_1GMASK (0x7EULL << CP0SC_PA)
+#define CP0SC_AM 4
+#define CP0SC_AM_MASK (0x7ULL << CP0SC_AM)
+#define CP0SC_AM_UK 0ULL
+#define CP0SC_AM_MK 1ULL
+#define CP0SC_AM_MSK 2ULL
+#define CP0SC_AM_MUSK 3ULL
+#define CP0SC_AM_MUSUK 4ULL
+#define CP0SC_AM_USK 5ULL
+#define CP0SC_AM_UUSK 7ULL
+#define CP0SC_EU 3
+#define CP0SC_EU_MASK (1ULL << CP0SC_EU)
+#define CP0SC_C 0
+#define CP0SC_C_MASK (0x7ULL << CP0SC_C)
+#define CP0SC_MASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \
+ CP0SC_PA_MASK)
+#define CP0SC_1GMASK (CP0SC_C_MASK | CP0SC_EU_MASK | CP0SC_AM_MASK | \
+ CP0SC_PA_1GMASK)
+#define CP0SC0_MASK (CP0SC_MASK | (CP0SC_MASK << 16))
+#define CP0SC1_XAM 59
+#define CP0SC1_XAM_MASK (0x7ULL << CP0SC1_XAM)
+#define CP0SC1_MASK (CP0SC_MASK | (CP0SC_MASK << 16) | CP0SC1_XAM_MASK)
+#define CP0SC2_XR 56
+#define CP0SC2_XR_MASK (0xFFULL << CP0SC2_XR)
+#define CP0SC2_MASK (CP0SC_1GMASK | (CP0SC_1GMASK << 16) | CP0SC2_XR_MASK)
+ target_ulong CP0_PWBase;
+ target_ulong CP0_PWField;
+#if defined(TARGET_MIPS64)
+#define CP0PF_BDI 32 /* 37..32 */
+#define CP0PF_GDI 24 /* 29..24 */
+#define CP0PF_UDI 18 /* 23..18 */
+#define CP0PF_MDI 12 /* 17..12 */
+#define CP0PF_PTI 6 /* 11..6 */
+#define CP0PF_PTEI 0 /* 5..0 */
+#else
+#define CP0PF_GDW 24 /* 29..24 */
+#define CP0PF_UDW 18 /* 23..18 */
+#define CP0PF_MDW 12 /* 17..12 */
+#define CP0PF_PTW 6 /* 11..6 */
+#define CP0PF_PTEW 0 /* 5..0 */
+#endif
+ target_ulong CP0_PWSize;
+#if defined(TARGET_MIPS64)
+#define CP0PS_BDW 32 /* 37..32 */
+#endif
+#define CP0PS_PS 30
+#define CP0PS_GDW 24 /* 29..24 */
+#define CP0PS_UDW 18 /* 23..18 */
+#define CP0PS_MDW 12 /* 17..12 */
+#define CP0PS_PTW 6 /* 11..6 */
+#define CP0PS_PTEW 0 /* 5..0 */
+/*
+ * CP0 Register 6
+ */
+ int32_t CP0_Wired;
+ int32_t CP0_PWCtl;
+#define CP0PC_PWEN 31
+#if defined(TARGET_MIPS64)
+#define CP0PC_PWDIREXT 30
+#define CP0PC_XK 28
+#define CP0PC_XS 27
+#define CP0PC_XU 26
+#endif
+#define CP0PC_DPH 7
+#define CP0PC_HUGEPG 6
+#define CP0PC_PSN 0 /* 5..0 */
+ int32_t CP0_SRSConf0_rw_bitmask;
+ int32_t CP0_SRSConf0;
+#define CP0SRSC0_M 31
+#define CP0SRSC0_SRS3 20
+#define CP0SRSC0_SRS2 10
+#define CP0SRSC0_SRS1 0
+ int32_t CP0_SRSConf1_rw_bitmask;
+ int32_t CP0_SRSConf1;
+#define CP0SRSC1_M 31
+#define CP0SRSC1_SRS6 20
+#define CP0SRSC1_SRS5 10
+#define CP0SRSC1_SRS4 0
+ int32_t CP0_SRSConf2_rw_bitmask;
+ int32_t CP0_SRSConf2;
+#define CP0SRSC2_M 31
+#define CP0SRSC2_SRS9 20
+#define CP0SRSC2_SRS8 10
+#define CP0SRSC2_SRS7 0
+ int32_t CP0_SRSConf3_rw_bitmask;
+ int32_t CP0_SRSConf3;
+#define CP0SRSC3_M 31
+#define CP0SRSC3_SRS12 20
+#define CP0SRSC3_SRS11 10
+#define CP0SRSC3_SRS10 0
+ int32_t CP0_SRSConf4_rw_bitmask;
+ int32_t CP0_SRSConf4;
+#define CP0SRSC4_SRS15 20
+#define CP0SRSC4_SRS14 10
+#define CP0SRSC4_SRS13 0
+/*
+ * CP0 Register 7
+ */
+ int32_t CP0_HWREna;
+/*
+ * CP0 Register 8
+ */
+ target_ulong CP0_BadVAddr;
+ uint32_t CP0_BadInstr;
+ uint32_t CP0_BadInstrP;
+ uint32_t CP0_BadInstrX;
+/*
+ * CP0 Register 9
+ */
+ int32_t CP0_Count;
+ uint32_t CP0_SAARI;
+#define CP0SAARI_TARGET 0 /* 5..0 */
+ uint64_t CP0_SAAR[2];
+#define CP0SAAR_BASE 12 /* 43..12 */
+#define CP0SAAR_SIZE 1 /* 5..1 */
+#define CP0SAAR_EN 0
+/*
+ * CP0 Register 10
+ */
+ target_ulong CP0_EntryHi;
+#define CP0EnHi_EHINV 10
+ target_ulong CP0_EntryHi_ASID_mask;
+/*
+ * CP0 Register 11
+ */
+ int32_t CP0_Compare;
+/*
+ * CP0 Register 12
+ */
+ int32_t CP0_Status;
+#define CP0St_CU3 31
+#define CP0St_CU2 30
+#define CP0St_CU1 29
+#define CP0St_CU0 28
+#define CP0St_RP 27
+#define CP0St_FR 26
+#define CP0St_RE 25
+#define CP0St_MX 24
+#define CP0St_PX 23
+#define CP0St_BEV 22
+#define CP0St_TS 21
+#define CP0St_SR 20
+#define CP0St_NMI 19
+#define CP0St_IM 8
+#define CP0St_KX 7
+#define CP0St_SX 6
+#define CP0St_UX 5
+#define CP0St_KSU 3
+#define CP0St_ERL 2
+#define CP0St_EXL 1
+#define CP0St_IE 0
+ int32_t CP0_IntCtl;
+#define CP0IntCtl_IPTI 29
+#define CP0IntCtl_IPPCI 26
+#define CP0IntCtl_VS 5
+ int32_t CP0_SRSCtl;
+#define CP0SRSCtl_HSS 26
+#define CP0SRSCtl_EICSS 18
+#define CP0SRSCtl_ESS 12
+#define CP0SRSCtl_PSS 6
+#define CP0SRSCtl_CSS 0
+ int32_t CP0_SRSMap;
+#define CP0SRSMap_SSV7 28
+#define CP0SRSMap_SSV6 24
+#define CP0SRSMap_SSV5 20
+#define CP0SRSMap_SSV4 16
+#define CP0SRSMap_SSV3 12
+#define CP0SRSMap_SSV2 8
+#define CP0SRSMap_SSV1 4
+#define CP0SRSMap_SSV0 0
+/*
+ * CP0 Register 13
+ */
+ int32_t CP0_Cause;
+#define CP0Ca_BD 31
+#define CP0Ca_TI 30
+#define CP0Ca_CE 28
+#define CP0Ca_DC 27
+#define CP0Ca_PCI 26
+#define CP0Ca_IV 23
+#define CP0Ca_WP 22
+#define CP0Ca_IP 8
+#define CP0Ca_IP_mask 0x0000FF00
+#define CP0Ca_EC 2
+/*
+ * CP0 Register 14
+ */
+ target_ulong CP0_EPC;
+/*
+ * CP0 Register 15
+ */
+ int32_t CP0_PRid;
+ target_ulong CP0_EBase;
+ target_ulong CP0_EBaseWG_rw_bitmask;
+#define CP0EBase_WG 11
+ target_ulong CP0_CMGCRBase;
+/*
+ * CP0 Register 16 (after Release 1)
+ */
+ int32_t CP0_Config0;
+#define CP0C0_M 31
+#define CP0C0_K23 28 /* 30..28 */
+#define CP0C0_KU 25 /* 27..25 */
+#define CP0C0_MDU 20
+#define CP0C0_MM 18
+#define CP0C0_BM 16
+#define CP0C0_Impl 16 /* 24..16 */
+#define CP0C0_BE 15
+#define CP0C0_AT 13 /* 14..13 */
+#define CP0C0_AR 10 /* 12..10 */
+#define CP0C0_MT 7 /* 9..7 */
+#define CP0C0_VI 3
+#define CP0C0_K0 0 /* 2..0 */
+#define CP0C0_AR_LENGTH 3
+/*
+ * CP0 Register 16 (before Release 1)
+ */
+#define CP0C0_Impl 16 /* 24..16 */
+#define CP0C0_IC 9 /* 11..9 */
+#define CP0C0_DC 6 /* 8..6 */
+#define CP0C0_IB 5
+#define CP0C0_DB 4
+ int32_t CP0_Config1;
+#define CP0C1_M 31
+#define CP0C1_MMU 25 /* 30..25 */
+#define CP0C1_IS 22 /* 24..22 */
+#define CP0C1_IL 19 /* 21..19 */
+#define CP0C1_IA 16 /* 18..16 */
+#define CP0C1_DS 13 /* 15..13 */
+#define CP0C1_DL 10 /* 12..10 */
+#define CP0C1_DA 7 /* 9..7 */
+#define CP0C1_C2 6
+#define CP0C1_MD 5
+#define CP0C1_PC 4
+#define CP0C1_WR 3
+#define CP0C1_CA 2
+#define CP0C1_EP 1
+#define CP0C1_FP 0
+ int32_t CP0_Config2;
+#define CP0C2_M 31
+#define CP0C2_TU 28 /* 30..28 */
+#define CP0C2_TS 24 /* 27..24 */
+#define CP0C2_TL 20 /* 23..20 */
+#define CP0C2_TA 16 /* 19..16 */
+#define CP0C2_SU 12 /* 15..12 */
+#define CP0C2_SS 8 /* 11..8 */
+#define CP0C2_SL 4 /* 7..4 */
+#define CP0C2_SA 0 /* 3..0 */
+ int32_t CP0_Config3;
+#define CP0C3_M 31
+#define CP0C3_BPG 30
+#define CP0C3_CMGCR 29
+#define CP0C3_MSAP 28
+#define CP0C3_BP 27
+#define CP0C3_BI 26
+#define CP0C3_SC 25
+#define CP0C3_PW 24
+#define CP0C3_VZ 23
+#define CP0C3_IPLV 21 /* 22..21 */
+#define CP0C3_MMAR 18 /* 20..18 */
+#define CP0C3_MCU 17
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ISA 14 /* 15..14 */
+#define CP0C3_ULRI 13
+#define CP0C3_RXI 12
+#define CP0C3_DSP2P 11
+#define CP0C3_DSPP 10
+#define CP0C3_CTXTC 9
+#define CP0C3_ITL 8
+#define CP0C3_LPA 7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP 4
+#define CP0C3_CDMM 3
+#define CP0C3_MT 2
+#define CP0C3_SM 1
+#define CP0C3_TL 0
+ int32_t CP0_Config4;
+ int32_t CP0_Config4_rw_bitmask;
+#define CP0C4_M 31
+#define CP0C4_IE 29 /* 30..29 */
+#define CP0C4_AE 28
+#define CP0C4_VTLBSizeExt 24 /* 27..24 */
+#define CP0C4_KScrExist 16
+#define CP0C4_MMUExtDef 14
+#define CP0C4_FTLBPageSize 8 /* 12..8 */
+/* bit layout if MMUExtDef=1 */
+#define CP0C4_MMUSizeExt 0 /* 7..0 */
+/* bit layout if MMUExtDef=2 */
+#define CP0C4_FTLBWays 4 /* 7..4 */
+#define CP0C4_FTLBSets 0 /* 3..0 */
+ int32_t CP0_Config5;
+ int32_t CP0_Config5_rw_bitmask;
+#define CP0C5_M 31
+#define CP0C5_K 30
+#define CP0C5_CV 29
+#define CP0C5_EVA 28
+#define CP0C5_MSAEn 27
+#define CP0C5_PMJ 23 /* 25..23 */
+#define CP0C5_WR2 22
+#define CP0C5_NMS 21
+#define CP0C5_ULS 20
+#define CP0C5_XPA 19
+#define CP0C5_CRCP 18
+#define CP0C5_MI 17
+#define CP0C5_GI 15 /* 16..15 */
+#define CP0C5_CA2 14
+#define CP0C5_XNP 13
+#define CP0C5_DEC 11
+#define CP0C5_L2C 10
+#define CP0C5_UFE 9
+#define CP0C5_FRE 8
+#define CP0C5_VP 7
+#define CP0C5_SBRI 6
+#define CP0C5_MVH 5
+#define CP0C5_LLB 4
+#define CP0C5_MRP 3
+#define CP0C5_UFR 2
+#define CP0C5_NFExists 0
+ int32_t CP0_Config6;
+ int32_t CP0_Config6_rw_bitmask;
+#define CP0C6_BPPASS 31
+#define CP0C6_KPOS 24
+#define CP0C6_KE 23
+#define CP0C6_VTLBONLY 22
+#define CP0C6_LASX 21
+#define CP0C6_SSEN 20
+#define CP0C6_DISDRTIME 19
+#define CP0C6_PIXNUEN 18
+#define CP0C6_SCRAND 17
+#define CP0C6_LLEXCEN 16
+#define CP0C6_DISVC 15
+#define CP0C6_VCLRU 14
+#define CP0C6_DCLRU 13
+#define CP0C6_PIXUEN 12
+#define CP0C6_DISBLKLYEN 11
+#define CP0C6_UMEMUALEN 10
+#define CP0C6_SFBEN 8
+#define CP0C6_FLTINT 7
+#define CP0C6_VLTINT 6
+#define CP0C6_DISBTB 5
+#define CP0C6_STPREFCTL 2
+#define CP0C6_INSTPREF 1
+#define CP0C6_DATAPREF 0
+ int32_t CP0_Config7;
+ int64_t CP0_Config7_rw_bitmask;
+#define CP0C7_NAPCGEN 2
+#define CP0C7_UNIMUEN 1
+#define CP0C7_VFPUCGEN 0
+ uint64_t CP0_LLAddr;
+ uint64_t CP0_MAAR[MIPS_MAAR_MAX];
+ int32_t CP0_MAARI;
+ /* XXX: Maybe make LLAddr per-TC? */
+/*
+ * CP0 Register 17
+ */
+ target_ulong lladdr; /* LL virtual address compared against SC */
+ target_ulong llval;
+ uint64_t llval_wp;
+ uint32_t llnewval_wp;
+ uint64_t CP0_LLAddr_rw_bitmask;
+ int CP0_LLAddr_shift;
+/*
+ * CP0 Register 18
+ */
+ target_ulong CP0_WatchLo[8];
+/*
+ * CP0 Register 19
+ */
+ uint64_t CP0_WatchHi[8];
+#define CP0WH_ASID 16
+/*
+ * CP0 Register 20
+ */
+ target_ulong CP0_XContext;
+ int32_t CP0_Framemask;
+/*
+ * CP0 Register 23
+ */
+ int32_t CP0_Debug;
+#define CP0DB_DBD 31
+#define CP0DB_DM 30
+#define CP0DB_LSNM 28
+#define CP0DB_Doze 27
+#define CP0DB_Halt 26
+#define CP0DB_CNT 25
+#define CP0DB_IBEP 24
+#define CP0DB_DBEP 21
+#define CP0DB_IEXI 20
+#define CP0DB_VER 15
+#define CP0DB_DEC 10
+#define CP0DB_SSt 8
+#define CP0DB_DINT 5
+#define CP0DB_DIB 4
+#define CP0DB_DDBS 3
+#define CP0DB_DDBL 2
+#define CP0DB_DBp 1
+#define CP0DB_DSS 0
+/*
+ * CP0 Register 24
+ */
+ target_ulong CP0_DEPC;
+/*
+ * CP0 Register 25
+ */
+ int32_t CP0_Performance0;
+/*
+ * CP0 Register 26
+ */
+ int32_t CP0_ErrCtl;
+#define CP0EC_WST 29
+#define CP0EC_SPR 28
+#define CP0EC_ITC 26
+/*
+ * CP0 Register 28
+ */
+ uint64_t CP0_TagLo;
+ int32_t CP0_DataLo;
+/*
+ * CP0 Register 29
+ */
+ int32_t CP0_TagHi;
+ int32_t CP0_DataHi;
+/*
+ * CP0 Register 30
+ */
+ target_ulong CP0_ErrorEPC;
+/*
+ * CP0 Register 31
+ */
+ int32_t CP0_DESAVE;
+ target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
+
+ /* We waste some space so we can handle shadow registers like TCs. */
+ TCState tcs[MIPS_SHADOW_SET_MAX];
+ CPUMIPSFPUContext fpus[MIPS_FPU_MAX];
+ /* QEMU */
+ int error_code;
+#define EXCP_TLB_NOMATCH 0x1
+#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
+ uint32_t hflags; /* CPU State */
+ /* TMASK defines different execution modes */
+#define MIPS_HFLAG_TMASK 0x1F5807FF
+#define MIPS_HFLAG_MODE 0x00007 /* execution modes */
+ /*
+ * The KSU flags must be the lowest bits in hflags. The flag order
+ * must be the same as defined for CP0 Status. This allows to use
+ * the bits as the value of mmu_idx.
+ */
+#define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */
+#define MIPS_HFLAG_UM 0x00002 /* user mode flag */
+#define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */
+#define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */
+#define MIPS_HFLAG_DM 0x00004 /* Debug mode */
+#define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */
+#define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */
+#define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */
+#define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */
+ /*
+ * True if the MIPS IV COP1X instructions can be used. This also
+ * controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S
+ * and RSQRT.D.
+ */
+#define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */
+#define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */
+#define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
+#define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */
+#define MIPS_HFLAG_M16_SHIFT 10
+ /*
+ * If translation is interrupted between the branch instruction and
+ * the delay slot, record what type of branch it is so that we can
+ * resume translation properly. It might be possible to reduce
+ * this from three bits to two.
+ */
+#define MIPS_HFLAG_BMASK_BASE 0x803800
+#define MIPS_HFLAG_B 0x00800 /* Unconditional branch */
+#define MIPS_HFLAG_BC 0x01000 /* Conditional branch */
+#define MIPS_HFLAG_BL 0x01800 /* Likely branch */
+#define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
+ /* Extra flags about the current pending branch. */
+#define MIPS_HFLAG_BMASK_EXT 0x7C000
+#define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */
+#define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */
+#define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */
+#define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */
+#define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */
+#define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT)
+ /* MIPS DSP resources access. */
+#define MIPS_HFLAG_DSP 0x080000 /* Enable access to DSP resources. */
+#define MIPS_HFLAG_DSP_R2 0x100000 /* Enable access to DSP R2 resources. */
+#define MIPS_HFLAG_DSP_R3 0x20000000 /* Enable access to DSP R3 resources. */
+ /* Extra flag about HWREna register. */
+#define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */
+#define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */
+#define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */
+#define MIPS_HFLAG_MSA 0x1000000
+#define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */
+#define MIPS_HFLAG_ELPA 0x4000000
+#define MIPS_HFLAG_ITC_CACHE 0x8000000 /* CACHE instr. operates on ITC tag */
+#define MIPS_HFLAG_ERL 0x10000000 /* error level flag */
+ target_ulong btarget; /* Jump / branch target */
+ target_ulong bcond; /* Branch condition (if needed) */
+
+ int SYNCI_Step; /* Address step size for SYNCI */
+ int CCRes; /* Cycle count resolution/divisor */
+ uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */
+ uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */
+ uint64_t insn_flags; /* Supported instruction set */
+ int saarp;
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields from here on are preserved across CPU reset. */
+ CPUMIPSMVPContext *mvp;
+#if !defined(CONFIG_USER_ONLY)
+ CPUMIPSTLBContext *tlb;
+ void *irq[8];
+ struct MIPSITUState *itu;
+ MemoryRegion *itc_tag; /* ITC Configuration Tags */
+#endif
+
+ const mips_def_t *cpu_model;
+ QEMUTimer *timer; /* Internal timer */
+ target_ulong exception_base; /* ExceptionBase input to the core */
+ uint64_t cp0_count_ns; /* CP0_Count clock period (in nanoseconds) */
+};
+
+/**
+ * MIPSCPU:
+ * @env: #CPUMIPSState
+ * @clock: this CPU input clock (may be connected
+ * to an output clock from another device).
+ * @cp0_count_rate: rate at which the coprocessor 0 counter increments
+ *
+ * A MIPS CPU.
+ */
+struct MIPSCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ Clock *clock;
+ CPUNegativeOffsetState neg;
+ CPUMIPSState env;
+ /*
+ * The Count register acts as a timer, incrementing at a constant rate,
+ * whether or not an instruction is executed, retired, or any forward
+ * progress is made through the pipeline. The rate at which the counter
+ * increments is implementation dependent, and is a function of the
+ * pipeline clock of the processor, not the issue width of the processor.
+ */
+ unsigned cp0_count_rate;
+};
+
+
+void mips_cpu_list(void);
+
+#define cpu_list mips_cpu_list
+
+extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env);
+extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
+
+/*
+ * MMU modes definitions. We carefully match the indices with our
+ * hflags layout.
+ */
+#define MMU_USER_IDX 2
+
+static inline int hflags_mmu_index(uint32_t hflags)
+{
+ if (hflags & MIPS_HFLAG_ERL) {
+ return 3; /* ERL */
+ } else {
+ return hflags & MIPS_HFLAG_KSU;
+ }
+}
+
+static inline int cpu_mmu_index(CPUMIPSState *env, bool ifetch)
+{
+ return hflags_mmu_index(env->hflags);
+}
+
+typedef CPUMIPSState CPUArchState;
+typedef MIPSCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+/* Exceptions */
+enum {
+ EXCP_NONE = -1,
+ EXCP_RESET = 0,
+ EXCP_SRESET,
+ EXCP_DSS,
+ EXCP_DINT,
+ EXCP_DDBL,
+ EXCP_DDBS,
+ EXCP_NMI,
+ EXCP_MCHECK,
+ EXCP_EXT_INTERRUPT, /* 8 */
+ EXCP_DFWATCH,
+ EXCP_DIB,
+ EXCP_IWATCH,
+ EXCP_AdEL,
+ EXCP_AdES,
+ EXCP_TLBF,
+ EXCP_IBE,
+ EXCP_DBp, /* 16 */
+ EXCP_SYSCALL,
+ EXCP_BREAK,
+ EXCP_CpU,
+ EXCP_RI,
+ EXCP_OVERFLOW,
+ EXCP_TRAP,
+ EXCP_FPE,
+ EXCP_DWATCH, /* 24 */
+ EXCP_LTLBL,
+ EXCP_TLBL,
+ EXCP_TLBS,
+ EXCP_DBE,
+ EXCP_THREAD,
+ EXCP_MDMX,
+ EXCP_C2E,
+ EXCP_CACHE, /* 32 */
+ EXCP_DSPDIS,
+ EXCP_MSADIS,
+ EXCP_MSAFPE,
+ EXCP_TLBXI,
+ EXCP_TLBRI,
+
+ EXCP_LAST = EXCP_TLBRI,
+};
+
+/*
+ * This is an internally generated WAKE request line.
+ * It is driven by the CPU itself. Raised when the MT
+ * block wants to wake a VPE from an inactive state and
+ * cleared when VPE goes from active to inactive.
+ */
+#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
+
+#define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU
+#define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_MIPS_CPU
+
+bool cpu_type_supports_cps_smp(const char *cpu_type);
+bool cpu_supports_isa(const CPUMIPSState *env, uint64_t isa_mask);
+bool cpu_type_supports_isa(const char *cpu_type, uint64_t isa);
+
+/* Check presence of MSA implementation */
+static inline bool ase_msa_available(CPUMIPSState *env)
+{
+ return env->CP0_Config3 & (1 << CP0C3_MSAP);
+}
+
+/* Check presence of multi-threading ASE implementation */
+static inline bool ase_mt_available(CPUMIPSState *env)
+{
+ return env->CP0_Config3 & (1 << CP0C3_MT);
+}
+
+static inline bool cpu_type_is_64bit(const char *cpu_type)
+{
+ return cpu_type_supports_isa(cpu_type, CPU_MIPS64);
+}
+
+void cpu_set_exception_base(int vp_index, target_ulong address);
+
+/* addr.c */
+uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr);
+uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr);
+
+uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr);
+uint64_t cpu_mips_kseg1_to_phys(void *opaque, uint64_t addr);
+uint64_t cpu_mips_phys_to_kseg1(void *opaque, uint64_t addr);
+bool mips_um_ksegs_enabled(void);
+void mips_um_ksegs_enable(void);
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* mips_int.c */
+void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
+
+/* mips_itu.c */
+void itc_reconfigure(struct MIPSITUState *tag);
+
+#endif /* !CONFIG_USER_ONLY */
+
+/* helper.c */
+target_ulong exception_resume_pc(CPUMIPSState *env);
+
+static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->active_tc.PC;
+ *cs_base = 0;
+ *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
+ MIPS_HFLAG_HWRENA_ULR);
+}
+
+/**
+ * mips_cpu_create_with_clock:
+ * @typename: a MIPS CPU type.
+ * @cpu_refclk: this cpu input clock (an output clock of another device)
+ *
+ * Instantiates a MIPS CPU, set the input clock of the CPU to @cpu_refclk,
+ * then realizes the CPU.
+ *
+ * Returns: A #CPUState or %NULL if an error occurred.
+ */
+MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk);
+
+#endif /* MIPS_CPU_H */
diff --git a/target/mips/fpu.c b/target/mips/fpu.c
new file mode 100644
index 000000000..c7c487c1f
--- /dev/null
+++ b/target/mips/fpu.c
@@ -0,0 +1,25 @@
+/*
+ * Helpers for emulation of FPU-related MIPS instructions.
+ *
+ * Copyright (C) 2004-2005 Jocelyn Mayer
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#include "qemu/osdep.h"
+#include "fpu/softfloat-helpers.h"
+#include "fpu_helper.h"
+
+/* convert MIPS rounding mode in FCR31 to IEEE library */
+const FloatRoundMode ieee_rm[4] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down
+};
+
+const char fregnames[32][4] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+};
diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h
new file mode 100644
index 000000000..ad1116e8c
--- /dev/null
+++ b/target/mips/fpu_helper.h
@@ -0,0 +1,65 @@
+/*
+ * Helpers for emulation of FPU-related MIPS instructions.
+ *
+ * Copyright (C) 2004-2005 Jocelyn Mayer
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#include "fpu/softfloat-helpers.h"
+#include "cpu.h"
+
+extern const FloatRoundMode ieee_rm[4];
+
+uint32_t float_class_s(uint32_t arg, float_status *fst);
+uint64_t float_class_d(uint64_t arg, float_status *fst);
+
+static inline void restore_rounding_mode(CPUMIPSState *env)
+{
+ set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3],
+ &env->active_fpu.fp_status);
+}
+
+static inline void restore_flush_mode(CPUMIPSState *env)
+{
+ set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0,
+ &env->active_fpu.fp_status);
+}
+
+static inline void restore_snan_bit_mode(CPUMIPSState *env)
+{
+ bool nan2008 = env->active_fpu.fcr31 & (1 << FCR31_NAN2008);
+
+ /*
+ * With nan2008, SNaNs are silenced in the usual way.
+ * Before that, SNaNs are not silenced; default nans are produced.
+ */
+ set_snan_bit_is_one(!nan2008, &env->active_fpu.fp_status);
+ set_default_nan_mode(!nan2008, &env->active_fpu.fp_status);
+}
+
+static inline void restore_fp_status(CPUMIPSState *env)
+{
+ restore_rounding_mode(env);
+ restore_flush_mode(env);
+ restore_snan_bit_mode(env);
+}
+
+/* MSA */
+
+enum CPUMIPSMSADataFormat {
+ DF_BYTE = 0,
+ DF_HALF,
+ DF_WORD,
+ DF_DOUBLE
+};
+
+static inline void restore_msa_fp_status(CPUMIPSState *env)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM;
+ bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0;
+
+ set_float_rounding_mode(ieee_rm[rounding_mode], status);
+ set_flush_to_zero(flush_to_zero, status);
+ set_flush_inputs_to_zero(flush_to_zero, status);
+}
diff --git a/target/mips/gdbstub.c b/target/mips/gdbstub.c
new file mode 100644
index 000000000..f1c2a2cf6
--- /dev/null
+++ b/target/mips/gdbstub.c
@@ -0,0 +1,150 @@
+/*
+ * MIPS gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/gdbstub.h"
+#include "fpu_helper.h"
+
+int mips_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_regl(mem_buf, env->active_tc.gpr[n]);
+ }
+ if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 72) {
+ switch (n) {
+ case 70:
+ return gdb_get_regl(mem_buf, (int32_t)env->active_fpu.fcr31);
+ case 71:
+ return gdb_get_regl(mem_buf, (int32_t)env->active_fpu.fcr0);
+ default:
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ return gdb_get_regl(mem_buf,
+ env->active_fpu.fpr[n - 38].d);
+ } else {
+ return gdb_get_regl(mem_buf,
+ env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
+ }
+ }
+ }
+ switch (n) {
+ case 32:
+ return gdb_get_regl(mem_buf, (int32_t)env->CP0_Status);
+ case 33:
+ return gdb_get_regl(mem_buf, env->active_tc.LO[0]);
+ case 34:
+ return gdb_get_regl(mem_buf, env->active_tc.HI[0]);
+ case 35:
+ return gdb_get_regl(mem_buf, env->CP0_BadVAddr);
+ case 36:
+ return gdb_get_regl(mem_buf, (int32_t)env->CP0_Cause);
+ case 37:
+ return gdb_get_regl(mem_buf, env->active_tc.PC |
+ !!(env->hflags & MIPS_HFLAG_M16));
+ case 72:
+ return gdb_get_regl(mem_buf, 0); /* fp */
+ case 89:
+ return gdb_get_regl(mem_buf, (int32_t)env->CP0_PRid);
+ default:
+ if (n > 89) {
+ return 0;
+ }
+ /* 16 embedded regs. */
+ return gdb_get_regl(mem_buf, 0);
+ }
+
+ return 0;
+}
+
+int mips_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ target_ulong tmp;
+
+ tmp = ldtul_p(mem_buf);
+
+ if (n < 32) {
+ env->active_tc.gpr[n] = tmp;
+ return sizeof(target_ulong);
+ }
+ if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 72) {
+ switch (n) {
+ case 70:
+ env->active_fpu.fcr31 = (tmp & env->active_fpu.fcr31_rw_bitmask) |
+ (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
+ restore_fp_status(env);
+ break;
+ case 71:
+ /* FIR is read-only. Ignore writes. */
+ break;
+ default:
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ env->active_fpu.fpr[n - 38].d = tmp;
+ } else {
+ env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
+ }
+ break;
+ }
+ return sizeof(target_ulong);
+ }
+ switch (n) {
+ case 32:
+#ifndef CONFIG_USER_ONLY
+ cpu_mips_store_status(env, tmp);
+#endif
+ break;
+ case 33:
+ env->active_tc.LO[0] = tmp;
+ break;
+ case 34:
+ env->active_tc.HI[0] = tmp;
+ break;
+ case 35:
+ env->CP0_BadVAddr = tmp;
+ break;
+ case 36:
+#ifndef CONFIG_USER_ONLY
+ cpu_mips_store_cause(env, tmp);
+#endif
+ break;
+ case 37:
+ env->active_tc.PC = tmp & ~(target_ulong)1;
+ if (tmp & 1) {
+ env->hflags |= MIPS_HFLAG_M16;
+ } else {
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ }
+ break;
+ case 72: /* fp, ignored */
+ break;
+ default:
+ if (n > 89) {
+ return 0;
+ }
+ /* Other registers are readonly. Ignore writes. */
+ break;
+ }
+
+ return sizeof(target_ulong);
+}
diff --git a/target/mips/helper.h b/target/mips/helper.h
new file mode 100644
index 000000000..de32d82e9
--- /dev/null
+++ b/target/mips/helper.h
@@ -0,0 +1,599 @@
+DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int)
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+DEF_HELPER_1(raise_exception_debug, noreturn, env)
+
+#ifdef TARGET_MIPS64
+DEF_HELPER_4(sdl, void, env, tl, tl, int)
+DEF_HELPER_4(sdr, void, env, tl, tl, int)
+#endif
+DEF_HELPER_4(swl, void, env, tl, tl, int)
+DEF_HELPER_4(swr, void, env, tl, tl, int)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(ll, tl, env, tl, int)
+#ifdef TARGET_MIPS64
+DEF_HELPER_3(lld, tl, env, tl, int)
+#endif
+#endif
+
+DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+#ifdef TARGET_MIPS64
+DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+
+DEF_HELPER_FLAGS_4(rotx, TCG_CALL_NO_RWG_SE, tl, tl, i32, i32, i32)
+
+/* microMIPS functions */
+DEF_HELPER_4(lwm, void, env, tl, tl, i32)
+DEF_HELPER_4(swm, void, env, tl, tl, i32)
+#ifdef TARGET_MIPS64
+DEF_HELPER_4(ldm, void, env, tl, tl, i32)
+DEF_HELPER_4(sdm, void, env, tl, tl, i32)
+#endif
+
+DEF_HELPER_2(fork, void, tl, tl)
+DEF_HELPER_2(yield, tl, env, tl)
+
+/* CP1 functions */
+DEF_HELPER_2(cfc1, tl, env, i32)
+DEF_HELPER_4(ctc1, void, env, tl, i32, i32)
+
+DEF_HELPER_2(float_cvtd_s, i64, env, i32)
+DEF_HELPER_2(float_cvtd_w, i64, env, i32)
+DEF_HELPER_2(float_cvtd_l, i64, env, i64)
+DEF_HELPER_2(float_cvtps_pw, i64, env, i64)
+DEF_HELPER_2(float_cvtpw_ps, i64, env, i64)
+DEF_HELPER_2(float_cvts_d, i32, env, i64)
+DEF_HELPER_2(float_cvts_w, i32, env, i32)
+DEF_HELPER_2(float_cvts_l, i32, env, i64)
+DEF_HELPER_2(float_cvts_pl, i32, env, i32)
+DEF_HELPER_2(float_cvts_pu, i32, env, i32)
+
+DEF_HELPER_3(float_addr_ps, i64, env, i64, i64)
+DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32)
+DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
+DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64)
+FOP_PROTO(maddf)
+FOP_PROTO(msubf)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64)
+FOP_PROTO(max)
+FOP_PROTO(maxa)
+FOP_PROTO(min)
+FOP_PROTO(mina)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \
+DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64)
+FOP_PROTO(cvt)
+FOP_PROTO(round)
+FOP_PROTO(trunc)
+FOP_PROTO(ceil)
+FOP_PROTO(floor)
+FOP_PROTO(cvt_2008)
+FOP_PROTO(round_2008)
+FOP_PROTO(trunc_2008)
+FOP_PROTO(ceil_2008)
+FOP_PROTO(floor_2008)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64)
+FOP_PROTO(sqrt)
+FOP_PROTO(rsqrt)
+FOP_PROTO(recip)
+FOP_PROTO(rint)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_1(float_ ## op ## _s, i32, i32) \
+DEF_HELPER_1(float_ ## op ## _d, i64, i64) \
+DEF_HELPER_1(float_ ## op ## _ps, i64, i64)
+FOP_PROTO(abs)
+FOP_PROTO(chs)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _ps, i64, env, i64)
+FOP_PROTO(recip1)
+FOP_PROTO(rsqrt1)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) \
+DEF_HELPER_3(float_ ## op ## _ps, i64, env, i64, i64)
+FOP_PROTO(add)
+FOP_PROTO(sub)
+FOP_PROTO(mul)
+FOP_PROTO(div)
+FOP_PROTO(recip2)
+FOP_PROTO(rsqrt2)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
+DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) \
+DEF_HELPER_4(float_ ## op ## _ps, i64, env, i64, i64, i64)
+FOP_PROTO(madd)
+FOP_PROTO(msub)
+FOP_PROTO(nmadd)
+FOP_PROTO(nmsub)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(cmp_d_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmpabs_d_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmp_s_ ## op, void, env, i32, i32, int) \
+DEF_HELPER_4(cmpabs_s_ ## op, void, env, i32, i32, int) \
+DEF_HELPER_4(cmp_ps_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmpabs_ps_ ## op, void, env, i64, i64, int)
+FOP_PROTO(f)
+FOP_PROTO(un)
+FOP_PROTO(eq)
+FOP_PROTO(ueq)
+FOP_PROTO(olt)
+FOP_PROTO(ult)
+FOP_PROTO(ole)
+FOP_PROTO(ule)
+FOP_PROTO(sf)
+FOP_PROTO(ngle)
+FOP_PROTO(seq)
+FOP_PROTO(ngl)
+FOP_PROTO(lt)
+FOP_PROTO(nge)
+FOP_PROTO(le)
+FOP_PROTO(ngt)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(r6_cmp_d_ ## op, i64, env, i64, i64) \
+DEF_HELPER_3(r6_cmp_s_ ## op, i32, env, i32, i32)
+FOP_PROTO(af)
+FOP_PROTO(un)
+FOP_PROTO(eq)
+FOP_PROTO(ueq)
+FOP_PROTO(lt)
+FOP_PROTO(ult)
+FOP_PROTO(le)
+FOP_PROTO(ule)
+FOP_PROTO(saf)
+FOP_PROTO(sun)
+FOP_PROTO(seq)
+FOP_PROTO(sueq)
+FOP_PROTO(slt)
+FOP_PROTO(sult)
+FOP_PROTO(sle)
+FOP_PROTO(sule)
+FOP_PROTO(or)
+FOP_PROTO(une)
+FOP_PROTO(ne)
+FOP_PROTO(sor)
+FOP_PROTO(sune)
+FOP_PROTO(sne)
+#undef FOP_PROTO
+
+DEF_HELPER_1(rdhwr_cpunum, tl, env)
+DEF_HELPER_1(rdhwr_synci_step, tl, env)
+DEF_HELPER_1(rdhwr_cc, tl, env)
+DEF_HELPER_1(rdhwr_ccres, tl, env)
+DEF_HELPER_1(rdhwr_performance, tl, env)
+DEF_HELPER_1(rdhwr_xnp, tl, env)
+DEF_HELPER_2(pmon, void, env, int)
+DEF_HELPER_1(wait, void, env)
+
+/* Loongson multimedia functions. */
+DEF_HELPER_FLAGS_2(paddsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddush, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddsb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddusb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(psubsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubush, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubsb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubusb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pshufh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(packsswh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(packsshb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(packushb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(punpcklhw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpckhhw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpcklbh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpckhbh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpcklwd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpckhwd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pavgh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pavgb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmaxsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pminsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmaxub, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pminub, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pcmpeqw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpgtw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpeqh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpgth, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpeqb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpgtb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(psllw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psllh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psrlw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psrlh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psraw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psrah, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pmullh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmulhh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmulhuh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmaddhw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pasubub, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(biadd, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(pmovmskb, TCG_CALL_NO_RWG_SE, i64, i64)
+
+/*** MIPS DSP ***/
+/* DSP Arithmetic Sub-class insns */
+DEF_HELPER_FLAGS_3(addq_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addq_s_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(addq_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addq_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(addq_s_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(addq_pw, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addq_s_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(addu_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(adduh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(adduh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(addu_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(addqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(addqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(addqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(addqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(addu_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(adduh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(adduh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(addu_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(subq_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subq_s_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(subq_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subq_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(subq_s_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(subq_pw, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subq_s_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(subu_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(subuh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subuh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(subu_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(subqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(subu_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(subuh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subuh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(subu_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(addsc, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addwc, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(modsub, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_1(raddu_w_qb, TCG_CALL_NO_RWG_SE, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_1(raddu_l_ob, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+DEF_HELPER_FLAGS_2(absq_s_qb, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_ph, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_w, 0, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(absq_s_ob, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_qh, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_pw, 0, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_2(precr_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(precrq_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precr_sra_ph_w, TCG_CALL_NO_RWG_SE,
+ tl, i32, tl, tl)
+DEF_HELPER_FLAGS_3(precr_sra_r_ph_w, TCG_CALL_NO_RWG_SE,
+ tl, i32, tl, tl)
+DEF_HELPER_FLAGS_2(precrq_ph_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precrq_rs_ph_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(precr_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precr_sra_qh_pw,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, i32)
+DEF_HELPER_FLAGS_3(precr_sra_r_qh_pw,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, i32)
+DEF_HELPER_FLAGS_2(precrq_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(precrq_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precrq_rs_qh_pw,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(precrq_pw_l, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+DEF_HELPER_FLAGS_3(precrqu_s_qb_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(precrqu_s_ob_qh,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, env)
+
+DEF_HELPER_FLAGS_1(preceq_pw_qhl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceq_pw_qhr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceq_pw_qhla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceq_pw_qhra, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+DEF_HELPER_FLAGS_1(precequ_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_1(precequ_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+DEF_HELPER_FLAGS_1(preceu_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_1(preceu_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+
+/* DSP GPR-Based Shift Sub-class insns */
+DEF_HELPER_FLAGS_3(shll_qb, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(shll_ob, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(shll_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(shll_s_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(shll_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(shll_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(shll_s_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(shll_pw, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(shll_s_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_2(shrl_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(shrl_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shrl_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+DEF_HELPER_FLAGS_2(shra_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(shra_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+DEF_HELPER_FLAGS_2(shra_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(shra_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+
+/* DSP Multiply Sub-class insns */
+DEF_HELPER_FLAGS_3(muleu_s_ph_qbl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleu_s_ph_qbr, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(muleu_s_qh_obl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleu_s_qh_obr, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(mulq_rs_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(mulq_rs_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(muleq_s_w_phl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleq_s_w_phr, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(muleq_s_pw_qhl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleq_s_pw_qhr, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_4(dpau_h_qbl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpau_h_qbr, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpau_h_obl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dpau_h_obr, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsu_h_qbl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpsu_h_qbr, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpsu_h_obl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dpsu_h_obr, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpa_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpa_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpax_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpaq_s_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpaq_s_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpaqx_s_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpaqx_sa_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dps_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dps_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsx_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpsq_s_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpsq_s_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsqx_s_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpsqx_sa_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(mulsaq_s_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(mulsaq_s_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpaq_sa_l_w, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpaq_sa_l_pw, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsq_sa_l_w, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpsq_sa_l_pw, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(mulsaq_s_l_pw, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(maq_s_w_phl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(maq_s_w_phr, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_phl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_phr, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_3(mul_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mul_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mulq_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mulq_s_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mulq_rs_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_4(mulsa_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(maq_s_w_qhll, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_w_qhlr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_w_qhrl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_w_qhrr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhll, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhlr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhrl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhrr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_l_pwl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_l_pwr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmadd, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmaddu, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmsub, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmsubu, 0, void, tl, tl, i32, env)
+#endif
+
+/* DSP Bit/Manipulation Sub-class insns */
+DEF_HELPER_FLAGS_1(bitrev, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_3(insv, 0, tl, env, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl)
+#endif
+
+/* DSP Compare-Pick Sub-class insns */
+DEF_HELPER_FLAGS_3(cmpu_eq_qb, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_lt_qb, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_le_qb, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_2(cmpgu_eq_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_lt_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_le_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(cmp_eq_ph, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_lt_ph, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_le_ph, 0, void, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(cmpu_eq_ob, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_lt_ob, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_le_ob, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpgdu_eq_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpgdu_lt_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpgdu_le_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(cmpgu_eq_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_lt_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_le_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(cmp_eq_qh, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_lt_qh, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_le_qh, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_eq_pw, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_lt_pw, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_le_pw, 0, void, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(pick_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(pick_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(pick_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(pick_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(pick_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_2(packrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(packrl_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+
+/* DSP Accumulator and DSPControl Access Sub-class insns */
+DEF_HELPER_FLAGS_3(extr_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(extr_r_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(extr_rs_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dextr_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_r_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_rs_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_l, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_r_l, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_rs_l, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(extr_s_h, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dextr_s_h, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(extp, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(extpdp, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dextp, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextpdp, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(shilo, 0, void, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dshilo, 0, void, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(mthlip, 0, void, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dmthlip, 0, void, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env)
+
+#ifndef CONFIG_USER_ONLY
+#include "tcg/sysemu_helper.h.inc"
+#endif /* !CONFIG_USER_ONLY */
+
+#include "tcg/msa_helper.h.inc"
+
+/* Vendor extensions */
+#include "tcg/vr54xx_helper.h.inc"
diff --git a/target/mips/internal.h b/target/mips/internal.h
new file mode 100644
index 000000000..daddb05fd
--- /dev/null
+++ b/target/mips/internal.h
@@ -0,0 +1,400 @@
+/*
+ * MIPS internal definitions and helpers
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef MIPS_INTERNAL_H
+#define MIPS_INTERNAL_H
+
+#include "exec/memattrs.h"
+#ifdef CONFIG_TCG
+#include "tcg/tcg-internal.h"
+#endif
+
+/*
+ * MMU types, the first four entries have the same layout as the
+ * CP0C0_MT field.
+ */
+enum mips_mmu_types {
+ MMU_TYPE_NONE = 0,
+ MMU_TYPE_R4000 = 1, /* Standard TLB */
+ MMU_TYPE_BAT = 2, /* Block Address Translation */
+ MMU_TYPE_FMT = 3, /* Fixed Mapping */
+ MMU_TYPE_DVF = 4, /* Dual VTLB and FTLB */
+ MMU_TYPE_R3000,
+ MMU_TYPE_R6000,
+ MMU_TYPE_R8000
+};
+
+struct mips_def_t {
+ const char *name;
+ int32_t CP0_PRid;
+ int32_t CP0_Config0;
+ int32_t CP0_Config1;
+ int32_t CP0_Config2;
+ int32_t CP0_Config3;
+ int32_t CP0_Config4;
+ int32_t CP0_Config4_rw_bitmask;
+ int32_t CP0_Config5;
+ int32_t CP0_Config5_rw_bitmask;
+ int32_t CP0_Config6;
+ int32_t CP0_Config6_rw_bitmask;
+ int32_t CP0_Config7;
+ int32_t CP0_Config7_rw_bitmask;
+ target_ulong CP0_LLAddr_rw_bitmask;
+ int CP0_LLAddr_shift;
+ int32_t SYNCI_Step;
+ int32_t CCRes;
+ int32_t CP0_Status_rw_bitmask;
+ int32_t CP0_TCStatus_rw_bitmask;
+ int32_t CP0_SRSCtl;
+ int32_t CP1_fcr0;
+ int32_t CP1_fcr31_rw_bitmask;
+ int32_t CP1_fcr31;
+ int32_t MSAIR;
+ int32_t SEGBITS;
+ int32_t PABITS;
+ int32_t CP0_SRSConf0_rw_bitmask;
+ int32_t CP0_SRSConf0;
+ int32_t CP0_SRSConf1_rw_bitmask;
+ int32_t CP0_SRSConf1;
+ int32_t CP0_SRSConf2_rw_bitmask;
+ int32_t CP0_SRSConf2;
+ int32_t CP0_SRSConf3_rw_bitmask;
+ int32_t CP0_SRSConf3;
+ int32_t CP0_SRSConf4_rw_bitmask;
+ int32_t CP0_SRSConf4;
+ int32_t CP0_PageGrain_rw_bitmask;
+ int32_t CP0_PageGrain;
+ target_ulong CP0_EBaseWG_rw_bitmask;
+ uint64_t insn_flags;
+ enum mips_mmu_types mmu_type;
+ int32_t SAARP;
+};
+
+extern const char regnames[32][3];
+extern const char fregnames[32][4];
+
+extern const struct mips_def_t mips_defs[];
+extern const int mips_defs_number;
+
+int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+#define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
+#define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
+#define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
+#define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
+#define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
+
+#define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
+#define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
+
+#if !defined(CONFIG_USER_ONLY)
+
+enum {
+ TLBRET_XI = -6,
+ TLBRET_RI = -5,
+ TLBRET_DIRTY = -4,
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+int get_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx);
+hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+typedef struct r4k_tlb_t r4k_tlb_t;
+struct r4k_tlb_t {
+ target_ulong VPN;
+ uint32_t PageMask;
+ uint16_t ASID;
+ uint32_t MMID;
+ unsigned int G:1;
+ unsigned int C0:3;
+ unsigned int C1:3;
+ unsigned int V0:1;
+ unsigned int V1:1;
+ unsigned int D0:1;
+ unsigned int D1:1;
+ unsigned int XI0:1;
+ unsigned int XI1:1;
+ unsigned int RI0:1;
+ unsigned int RI1:1;
+ unsigned int EHINV:1;
+ uint64_t PFN[2];
+};
+
+struct CPUMIPSTLBContext {
+ uint32_t nb_tlb;
+ uint32_t tlb_in_use;
+ int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type);
+ void (*helper_tlbwi)(struct CPUMIPSState *env);
+ void (*helper_tlbwr)(struct CPUMIPSState *env);
+ void (*helper_tlbp)(struct CPUMIPSState *env);
+ void (*helper_tlbr)(struct CPUMIPSState *env);
+ void (*helper_tlbinv)(struct CPUMIPSState *env);
+ void (*helper_tlbinvf)(struct CPUMIPSState *env);
+ union {
+ struct {
+ r4k_tlb_t tlb[MIPS_TLB_MAX];
+ } r4k;
+ } mmu;
+};
+
+void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
+void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
+void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
+
+extern const VMStateDescription vmstate_mips_cpu;
+
+#endif /* !CONFIG_USER_ONLY */
+
+static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
+{
+ return (env->CP0_Status & (1 << CP0St_IE)) &&
+ !(env->CP0_Status & (1 << CP0St_EXL)) &&
+ !(env->CP0_Status & (1 << CP0St_ERL)) &&
+ !(env->hflags & MIPS_HFLAG_DM) &&
+ /*
+ * Note that the TCStatus IXMT field is initialized to zero,
+ * and only MT capable cores can set it to one. So we don't
+ * need to check for MT capabilities here.
+ */
+ !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT));
+}
+
+/* Check if there is pending and not masked out interrupt */
+static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
+{
+ int32_t pending;
+ int32_t status;
+ bool r;
+
+ pending = env->CP0_Cause & CP0Ca_IP_mask;
+ status = env->CP0_Status & CP0Ca_IP_mask;
+
+ if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+ /*
+ * A MIPS configured with a vectorizing external interrupt controller
+ * will feed a vector into the Cause pending lines. The core treats
+ * the status lines as a vector level, not as individual masks.
+ */
+ r = pending > status;
+ } else {
+ /*
+ * A MIPS configured with compatibility or VInt (Vectored Interrupts)
+ * treats the pending lines as individual interrupt lines, the status
+ * lines are individual masks.
+ */
+ r = (pending & status) != 0;
+ }
+ return r;
+}
+
+void msa_reset(CPUMIPSState *env);
+
+/* cp0_timer.c */
+uint32_t cpu_mips_get_count(CPUMIPSState *env);
+void cpu_mips_store_count(CPUMIPSState *env, uint32_t value);
+void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value);
+void cpu_mips_start_count(CPUMIPSState *env);
+void cpu_mips_stop_count(CPUMIPSState *env);
+
+static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value)
+{
+ env->active_tc.PC = value & ~(target_ulong)1;
+ if (value & 1) {
+ env->hflags |= MIPS_HFLAG_M16;
+ } else {
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ }
+}
+
+static inline void restore_pamask(CPUMIPSState *env)
+{
+ if (env->hflags & MIPS_HFLAG_ELPA) {
+ env->PAMask = (1ULL << env->PABITS) - 1;
+ } else {
+ env->PAMask = PAMASK_BASE;
+ }
+}
+
+static inline int mips_vpe_active(CPUMIPSState *env)
+{
+ int active = 1;
+
+ /* Check that the VPE is enabled. */
+ if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
+ active = 0;
+ }
+ /* Check that the VPE is activated. */
+ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
+ active = 0;
+ }
+
+ /*
+ * Now verify that there are active thread contexts in the VPE.
+ *
+ * This assumes the CPU model will internally reschedule threads
+ * if the active one goes to sleep. If there are no threads available
+ * the active one will be in a sleeping state, and we can turn off
+ * the entire VPE.
+ */
+ if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
+ /* TC is not activated. */
+ active = 0;
+ }
+ if (env->active_tc.CP0_TCHalt & 1) {
+ /* TC is in halt state. */
+ active = 0;
+ }
+
+ return active;
+}
+
+static inline int mips_vp_active(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+
+ /* Check if the VP disabled other VPs (which means the VP is enabled) */
+ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+ return 1;
+ }
+
+ /* Check if the virtual processor is disabled due to a DVP */
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ if ((&other_cpu->env != env) &&
+ ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static inline void compute_hflags(CPUMIPSState *env)
+{
+ env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
+ MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
+ MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
+ MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA |
+ MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ env->hflags |= MIPS_HFLAG_ERL;
+ }
+ if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
+ !(env->CP0_Status & (1 << CP0St_ERL)) &&
+ !(env->hflags & MIPS_HFLAG_DM)) {
+ env->hflags |= (env->CP0_Status >> CP0St_KSU) &
+ MIPS_HFLAG_KSU;
+ }
+#if defined(TARGET_MIPS64)
+ if ((env->insn_flags & ISA_MIPS3) &&
+ (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
+ (env->CP0_Status & (1 << CP0St_PX)) ||
+ (env->CP0_Status & (1 << CP0St_UX)))) {
+ env->hflags |= MIPS_HFLAG_64;
+ }
+
+ if (!(env->insn_flags & ISA_MIPS3)) {
+ env->hflags |= MIPS_HFLAG_AWRAP;
+ } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
+ !(env->CP0_Status & (1 << CP0St_UX))) {
+ env->hflags |= MIPS_HFLAG_AWRAP;
+ } else if (env->insn_flags & ISA_MIPS_R6) {
+ /* Address wrapping for Supervisor and Kernel is specified in R6 */
+ if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
+ !(env->CP0_Status & (1 << CP0St_SX))) ||
+ (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
+ !(env->CP0_Status & (1 << CP0St_KX)))) {
+ env->hflags |= MIPS_HFLAG_AWRAP;
+ }
+ }
+#endif
+ if (((env->CP0_Status & (1 << CP0St_CU0)) &&
+ !(env->insn_flags & ISA_MIPS_R6)) ||
+ !(env->hflags & MIPS_HFLAG_KSU)) {
+ env->hflags |= MIPS_HFLAG_CP0;
+ }
+ if (env->CP0_Status & (1 << CP0St_CU1)) {
+ env->hflags |= MIPS_HFLAG_FPU;
+ }
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ env->hflags |= MIPS_HFLAG_F64;
+ }
+ if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
+ (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
+ env->hflags |= MIPS_HFLAG_SBRI;
+ }
+ if (env->insn_flags & ASE_DSP_R3) {
+ /*
+ * Our cpu supports DSP R3 ASE, so enable
+ * access to DSP R3 resources.
+ */
+ if (env->CP0_Status & (1 << CP0St_MX)) {
+ env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
+ MIPS_HFLAG_DSP_R3;
+ }
+ } else if (env->insn_flags & ASE_DSP_R2) {
+ /*
+ * Our cpu supports DSP R2 ASE, so enable
+ * access to DSP R2 resources.
+ */
+ if (env->CP0_Status & (1 << CP0St_MX)) {
+ env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2;
+ }
+
+ } else if (env->insn_flags & ASE_DSP) {
+ /*
+ * Our cpu supports DSP ASE, so enable
+ * access to DSP resources.
+ */
+ if (env->CP0_Status & (1 << CP0St_MX)) {
+ env->hflags |= MIPS_HFLAG_DSP;
+ }
+
+ }
+ if (env->insn_flags & ISA_MIPS_R2) {
+ if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
+ env->hflags |= MIPS_HFLAG_COP1X;
+ }
+ } else if (env->insn_flags & ISA_MIPS_R1) {
+ if (env->hflags & MIPS_HFLAG_64) {
+ env->hflags |= MIPS_HFLAG_COP1X;
+ }
+ } else if (env->insn_flags & ISA_MIPS4) {
+ /*
+ * All supported MIPS IV CPUs use the XX (CU3) to enable
+ * and disable the MIPS IV extensions to the MIPS III ISA.
+ * Some other MIPS IV CPUs ignore the bit, so the check here
+ * would be too restrictive for them.
+ */
+ if (env->CP0_Status & (1U << CP0St_CU3)) {
+ env->hflags |= MIPS_HFLAG_COP1X;
+ }
+ }
+ if (ase_msa_available(env)) {
+ if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
+ env->hflags |= MIPS_HFLAG_MSA;
+ }
+ }
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
+ env->hflags |= MIPS_HFLAG_FRE;
+ }
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
+ if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
+ env->hflags |= MIPS_HFLAG_ELPA;
+ }
+ }
+}
+
+#endif
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
new file mode 100644
index 000000000..086debd9f
--- /dev/null
+++ b/target/mips/kvm.c
@@ -0,0 +1,1297 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012-2014 Imagination Technologies Ltd.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "sysemu/kvm.h"
+#include "sysemu/kvm_int.h"
+#include "sysemu/runstate.h"
+#include "kvm_mips.h"
+#include "hw/boards.h"
+#include "fpu_helper.h"
+
+#define DEBUG_KVM 0
+
+#define DPRINTF(fmt, ...) \
+ do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
+
+static int kvm_mips_fpu_cap;
+static int kvm_mips_msa_cap;
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static void kvm_mips_update_state(void *opaque, bool running, RunState state);
+
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+ return cs->cpu_index;
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ /* MIPS has 128 signals */
+ kvm_set_sigmask_len(s, 16);
+
+ kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
+ kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
+
+ DPRINTF("%s\n", __func__);
+ return 0;
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ return 0;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int ret = 0;
+
+ qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
+
+ if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
+ if (ret < 0) {
+ /* mark unsupported so it gets disabled on reset */
+ kvm_mips_fpu_cap = 0;
+ ret = 0;
+ }
+ }
+
+ if (kvm_mips_msa_cap && ase_msa_available(env)) {
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
+ if (ret < 0) {
+ /* mark unsupported so it gets disabled on reset */
+ kvm_mips_msa_cap = 0;
+ ret = 0;
+ }
+ }
+
+ DPRINTF("%s\n", __func__);
+ return ret;
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
+void kvm_mips_reset_vcpu(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
+ warn_report("KVM does not support FPU, disabling");
+ env->CP0_Config1 &= ~(1 << CP0C1_FP);
+ }
+ if (!kvm_mips_msa_cap && ase_msa_available(env)) {
+ warn_report("KVM does not support MSA, disabling");
+ env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
+ }
+
+ DPRINTF("%s\n", __func__);
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ DPRINTF("%s\n", __func__);
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ DPRINTF("%s\n", __func__);
+ return 0;
+}
+
+static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
+}
+
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ int r;
+ struct kvm_mips_interrupt intr;
+
+ qemu_mutex_lock_iothread();
+
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_mips_io_interrupts_pending(cpu)) {
+ intr.cpu = -1;
+ intr.irq = 2;
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+ if (r < 0) {
+ error_report("%s: cpu %d: failed to inject IRQ %x",
+ __func__, cs->cpu_index, intr.irq);
+ }
+ }
+
+ qemu_mutex_unlock_iothread();
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ int ret;
+
+ DPRINTF("%s\n", __func__);
+ switch (run->exit_reason) {
+ default:
+ error_report("%s: unknown exit reason %d",
+ __func__, run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ DPRINTF("%s\n", __func__);
+ return true;
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_mips_interrupt intr;
+
+ assert(kvm_enabled());
+
+ intr.cpu = -1;
+
+ if (level) {
+ intr.irq = irq;
+ } else {
+ intr.irq = -irq;
+ }
+
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+
+ return 0;
+}
+
+int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
+{
+ CPUState *cs = current_cpu;
+ CPUState *dest_cs = CPU(cpu);
+ struct kvm_mips_interrupt intr;
+
+ assert(kvm_enabled());
+
+ intr.cpu = dest_cs->cpu_index;
+
+ if (level) {
+ intr.irq = irq;
+ } else {
+ intr.irq = -irq;
+ }
+
+ DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
+
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+
+ return 0;
+}
+
+#define MIPS_CP0_32(_R, _S) \
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S) \
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_RANDOM MIPS_CP0_32(1, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
+#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
+#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
+#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
+#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
+#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
+#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
+#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
+#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
+#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
+#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
+#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
+#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
+#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
+#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
+
+static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
+ uint32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
+ target_ulong *addr)
+{
+ uint64_t val64 = *addr;
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)&val64
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
+ int64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
+ uint64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
+ uint32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
+ target_ulong *addr)
+{
+ int ret;
+ uint64_t val64 = 0;
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)&val64
+ };
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+ if (ret >= 0) {
+ *addr = val64;
+ }
+ return ret;
+}
+
+static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
+ int64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
+ uint64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+#define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
+#define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
+ (1U << CP0C1_FP))
+#define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
+#define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
+ (1U << CP0C3_MSAP))
+#define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
+#define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
+ (1U << CP0C5_UFE) | \
+ (1U << CP0C5_FRE) | \
+ (1U << CP0C5_UFR))
+#define KVM_REG_MIPS_CP0_CONFIG6_MASK ((1U << CP0C6_BPPASS) | \
+ (0x3fU << CP0C6_KPOS) | \
+ (1U << CP0C6_KE) | \
+ (1U << CP0C6_VTLBONLY) | \
+ (1U << CP0C6_LASX) | \
+ (1U << CP0C6_SSEN) | \
+ (1U << CP0C6_DISDRTIME) | \
+ (1U << CP0C6_PIXNUEN) | \
+ (1U << CP0C6_SCRAND) | \
+ (1U << CP0C6_LLEXCEN) | \
+ (1U << CP0C6_DISVC) | \
+ (1U << CP0C6_VCLRU) | \
+ (1U << CP0C6_DCLRU) | \
+ (1U << CP0C6_PIXUEN) | \
+ (1U << CP0C6_DISBLKLYEN) | \
+ (1U << CP0C6_UMEMUALEN) | \
+ (1U << CP0C6_SFBEN) | \
+ (1U << CP0C6_FLTINT) | \
+ (1U << CP0C6_VLTINT) | \
+ (1U << CP0C6_DISBTB) | \
+ (3U << CP0C6_STPREFCTL) | \
+ (1U << CP0C6_INSTPREF) | \
+ (1U << CP0C6_DATAPREF))
+
+static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr, int32_t mask)
+{
+ int err;
+ int32_t tmp, change;
+
+ err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
+ if (err < 0) {
+ return err;
+ }
+
+ /* only change bits in mask */
+ change = (*addr ^ tmp) & mask;
+ if (!change) {
+ return 0;
+ }
+
+ tmp = tmp ^ change;
+ return kvm_mips_put_one_reg(cs, reg_id, &tmp);
+}
+
+/*
+ * We freeze the KVM timer when either the VM clock is stopped or the state is
+ * saved (the state is dirty).
+ */
+
+/*
+ * Save the state of the KVM timer when VM clock is stopped or state is synced
+ * to QEMU.
+ */
+static int kvm_mips_save_count(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ uint64_t count_ctl;
+ int err, ret = 0;
+
+ /* freeze KVM timer */
+ err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
+ ret = err;
+ } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
+ count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+
+ /* read CP0_Cause */
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* read CP0_Count */
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+/*
+ * Restore the state of the KVM timer when VM clock is restarted or state is
+ * synced to KVM.
+ */
+static int kvm_mips_restore_count(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ uint64_t count_ctl;
+ int err_dc, err, ret = 0;
+
+ /* check the timer is frozen */
+ err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err_dc < 0) {
+ DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
+ ret = err_dc;
+ } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
+ /* freeze timer (sets COUNT_RESUME for us) */
+ count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+
+ /* load CP0_Cause */
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* load CP0_Count */
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* resume KVM timer */
+ if (err_dc >= 0) {
+ count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Handle the VM clock being started or stopped
+ */
+static void kvm_mips_update_state(void *opaque, bool running, RunState state)
+{
+ CPUState *cs = opaque;
+ int ret;
+ uint64_t count_resume;
+
+ /*
+ * If state is already dirty (synced to QEMU) then the KVM timer state is
+ * already saved and can be restored when it is synced back to KVM.
+ */
+ if (!running) {
+ if (!cs->vcpu_dirty) {
+ ret = kvm_mips_save_count(cs);
+ if (ret < 0) {
+ warn_report("Failed saving count");
+ }
+ }
+ } else {
+ /* Set clock restore time to now */
+ count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
+ &count_resume);
+ if (ret < 0) {
+ warn_report("Failed setting COUNT_RESUME");
+ return;
+ }
+
+ if (!cs->vcpu_dirty) {
+ ret = kvm_mips_restore_count(cs);
+ if (ret < 0) {
+ warn_report("Failed restoring count");
+ }
+ }
+ }
+}
+
+static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+ unsigned int i;
+
+ /* Only put FPU state if we're emulating a CPU with an FPU */
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ /* FPU Control Registers */
+ if (level == KVM_PUT_FULL_STATE) {
+ err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
+ &env->active_fpu.fcr0);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+ err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
+ &env->active_fpu.fcr31);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /*
+ * FPU register state is a subset of MSA vector state, so don't put FPU
+ * registers if we're emulating a CPU with MSA.
+ */
+ if (!ase_msa_available(env)) {
+ /* Floating point registers */
+ for (i = 0; i < 32; ++i) {
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
+ &env->active_fpu.fpr[i].d);
+ } else {
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
+ &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
+ }
+ if (err < 0) {
+ DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+ }
+
+ /* Only put MSA state if we're emulating a CPU with MSA */
+ if (ase_msa_available(env)) {
+ /* MSA Control Registers */
+ if (level == KVM_PUT_FULL_STATE) {
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
+ &env->msair);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
+ &env->active_tc.msacsr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* Vector registers (includes FP registers) */
+ for (i = 0; i < 32; ++i) {
+ /* Big endian MSA not supported by QEMU yet anyway */
+ err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
+ env->active_fpu.fpr[i].wr.d);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int kvm_mips_get_fpu_registers(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+ unsigned int i;
+
+ /* Only get FPU state if we're emulating a CPU with an FPU */
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ /* FPU Control Registers */
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
+ &env->active_fpu.fcr0);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
+ &env->active_fpu.fcr31);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
+ ret = err;
+ } else {
+ restore_fp_status(env);
+ }
+
+ /*
+ * FPU register state is a subset of MSA vector state, so don't save FPU
+ * registers if we're emulating a CPU with MSA.
+ */
+ if (!ase_msa_available(env)) {
+ /* Floating point registers */
+ for (i = 0; i < 32; ++i) {
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
+ &env->active_fpu.fpr[i].d);
+ } else {
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
+ &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
+ }
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+ }
+
+ /* Only get MSA state if we're emulating a CPU with MSA */
+ if (ase_msa_available(env)) {
+ /* MSA Control Registers */
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
+ &env->msair);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
+ &env->active_tc.msacsr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
+ ret = err;
+ } else {
+ restore_msa_fp_status(env);
+ }
+
+ /* Vector registers (includes FP registers) */
+ for (i = 0; i < 32; ++i) {
+ /* Big endian MSA not supported by QEMU yet anyway */
+ err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
+ env->active_fpu.fpr[i].wr.d);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+
+ (void)level;
+
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_RANDOM (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
+ &env->CP0_Context);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
+ &env->active_tc.CP0_UserLocal);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
+ &env->CP0_PageMask);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
+ &env->CP0_PageGrain);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PAGEGRAIN (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
+ &env->CP0_PWBase);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PWBASE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
+ &env->CP0_PWField);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PWField (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
+ &env->CP0_PWSize);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PWSIZE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PWCTL (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
+ &env->CP0_BadVAddr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* If VM clock stopped then state will be restored when it is restarted */
+ if (runstate_is_running()) {
+ err = kvm_mips_restore_count(cs);
+ if (err < 0) {
+ ret = err;
+ }
+ }
+
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
+ &env->CP0_EntryHi);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
+ &env->CP0_Compare);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_EBASE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
+ &env->CP0_Config0,
+ KVM_REG_MIPS_CP0_CONFIG_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
+ &env->CP0_Config1,
+ KVM_REG_MIPS_CP0_CONFIG1_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
+ &env->CP0_Config2,
+ KVM_REG_MIPS_CP0_CONFIG2_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
+ &env->CP0_Config3,
+ KVM_REG_MIPS_CP0_CONFIG3_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
+ &env->CP0_Config4,
+ KVM_REG_MIPS_CP0_CONFIG4_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
+ &env->CP0_Config5,
+ KVM_REG_MIPS_CP0_CONFIG5_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6,
+ &env->CP0_Config6,
+ KVM_REG_MIPS_CP0_CONFIG6_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG6 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
+ &env->CP0_XContext);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_XCONTEXT (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
+ &env->CP0_ErrorEPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
+ &env->CP0_KScratch[0]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_KSCRATCH1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
+ &env->CP0_KScratch[1]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_KSCRATCH2 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
+ &env->CP0_KScratch[2]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_KSCRATCH3 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
+ &env->CP0_KScratch[3]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_KSCRATCH4 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
+ &env->CP0_KScratch[4]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_KSCRATCH5 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
+ &env->CP0_KScratch[5]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_KSCRATCH6 (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int kvm_mips_get_cp0_registers(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_RANDOM, &env->CP0_Random);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_RANDOM (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
+ &env->CP0_Context);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
+ &env->active_tc.CP0_UserLocal);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
+ &env->CP0_PageMask);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEGRAIN,
+ &env->CP0_PageGrain);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PAGEGRAIN (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWBASE,
+ &env->CP0_PWBase);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PWBASE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWFIELD,
+ &env->CP0_PWField);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PWFIELD (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_PWSIZE,
+ &env->CP0_PWSize);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PWSIZE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PWCTL, &env->CP0_PWCtl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PWCtl (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
+ &env->CP0_BadVAddr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
+ &env->CP0_EntryHi);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
+ &env->CP0_Compare);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* If VM clock stopped then state was already saved when it was stopped */
+ if (runstate_is_running()) {
+ err = kvm_mips_save_count(cs);
+ if (err < 0) {
+ ret = err;
+ }
+ }
+
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EBASE, &env->CP0_EBase);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_EBASE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG6, &env->CP0_Config6);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG6 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_XCONTEXT,
+ &env->CP0_XContext);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_XCONTEXT (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
+ &env->CP0_ErrorEPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH1,
+ &env->CP0_KScratch[0]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_KSCRATCH1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH2,
+ &env->CP0_KScratch[1]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_KSCRATCH2 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH3,
+ &env->CP0_KScratch[2]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_KSCRATCH3 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH4,
+ &env->CP0_KScratch[3]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_KSCRATCH4 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH5,
+ &env->CP0_KScratch[4]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_KSCRATCH5 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_KSCRATCH6,
+ &env->CP0_KScratch[5]);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_KSCRATCH6 (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret;
+ int i;
+
+ /* Set the registers based on QEMU's view of things */
+ for (i = 0; i < 32; i++) {
+ regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
+ }
+
+ regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
+ regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
+ regs.pc = (int64_t)(target_long)env->active_tc.PC;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_mips_put_cp0_registers(cs, level);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_mips_put_fpu_registers(cs, level);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int ret = 0;
+ struct kvm_regs regs;
+ int i;
+
+ /* Get the current register set as KVM seems it */
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ env->active_tc.gpr[i] = regs.gpr[i];
+ }
+
+ env->active_tc.HI[0] = regs.hi;
+ env->active_tc.LO[0] = regs.lo;
+ env->active_tc.PC = regs.pc;
+
+ kvm_mips_get_cp0_registers(cs);
+ kvm_mips_get_fpu_registers(cs);
+
+ return ret;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
+
+int mips_kvm_type(MachineState *machine, const char *vm_type)
+{
+#if defined(KVM_CAP_MIPS_VZ) || defined(KVM_CAP_MIPS_TE)
+ int r;
+ KVMState *s = KVM_STATE(machine->accelerator);
+#endif
+
+#if defined(KVM_CAP_MIPS_VZ)
+ r = kvm_check_extension(s, KVM_CAP_MIPS_VZ);
+ if (r > 0) {
+ return KVM_VM_MIPS_VZ;
+ }
+#endif
+
+#if defined(KVM_CAP_MIPS_TE)
+ r = kvm_check_extension(s, KVM_CAP_MIPS_TE);
+ if (r > 0) {
+ return KVM_VM_MIPS_TE;
+ }
+#endif
+
+ return -1;
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return true;
+}
diff --git a/target/mips/kvm_mips.h b/target/mips/kvm_mips.h
new file mode 100644
index 000000000..171d53dbe
--- /dev/null
+++ b/target/mips/kvm_mips.h
@@ -0,0 +1,37 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012-2014 Imagination Technologies Ltd.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef KVM_MIPS_H
+#define KVM_MIPS_H
+
+#include "cpu.h"
+
+/**
+ * kvm_mips_reset_vcpu:
+ * @cpu: MIPSCPU
+ *
+ * Called at reset time to set kernel registers to their initial values.
+ */
+void kvm_mips_reset_vcpu(MIPSCPU *cpu);
+
+int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level);
+int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level);
+
+#ifdef CONFIG_KVM
+int mips_kvm_type(MachineState *machine, const char *vm_type);
+#else
+static inline int mips_kvm_type(MachineState *machine, const char *vm_type)
+{
+ return 0;
+}
+#endif
+
+#endif /* KVM_MIPS_H */
diff --git a/target/mips/meson.build b/target/mips/meson.build
new file mode 100644
index 000000000..2407a05d4
--- /dev/null
+++ b/target/mips/meson.build
@@ -0,0 +1,23 @@
+mips_user_ss = ss.source_set()
+mips_softmmu_ss = ss.source_set()
+mips_ss = ss.source_set()
+mips_ss.add(files(
+ 'cpu.c',
+ 'fpu.c',
+ 'gdbstub.c',
+ 'msa.c',
+))
+
+if have_system
+ subdir('sysemu')
+endif
+
+if 'CONFIG_TCG' in config_all
+ subdir('tcg')
+endif
+
+mips_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
+
+target_arch += {'mips': mips_ss}
+target_softmmu_arch += {'mips': mips_softmmu_ss}
+target_user_arch += {'mips': mips_user_ss}
diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h
new file mode 100644
index 000000000..0a12d982a
--- /dev/null
+++ b/target/mips/mips-defs.h
@@ -0,0 +1,94 @@
+#ifndef QEMU_MIPS_DEFS_H
+#define QEMU_MIPS_DEFS_H
+
+/* Real pages are variable size... */
+#define MIPS_TLB_MAX 128
+
+/*
+ * bit definitions for insn_flags (ISAs/ASEs flags)
+ * ------------------------------------------------
+ */
+/*
+ * bits 0-23: MIPS base instruction sets
+ */
+#define ISA_MIPS1 0x0000000000000001ULL
+#define ISA_MIPS2 0x0000000000000002ULL
+#define ISA_MIPS3 0x0000000000000004ULL /* 64-bit */
+#define ISA_MIPS4 0x0000000000000008ULL
+#define ISA_MIPS5 0x0000000000000010ULL
+#define ISA_MIPS_R1 0x0000000000000020ULL
+#define ISA_MIPS_R2 0x0000000000000040ULL
+#define ISA_MIPS_R3 0x0000000000000080ULL
+#define ISA_MIPS_R5 0x0000000000000100ULL
+#define ISA_MIPS_R6 0x0000000000000200ULL
+#define ISA_NANOMIPS32 0x0000000000008000ULL
+/*
+ * bits 24-39: MIPS ASEs
+ */
+#define ASE_MIPS16 0x0000000001000000ULL
+#define ASE_MIPS3D 0x0000000002000000ULL
+#define ASE_MDMX 0x0000000004000000ULL
+#define ASE_DSP 0x0000000008000000ULL
+#define ASE_DSP_R2 0x0000000010000000ULL
+#define ASE_DSP_R3 0x0000000020000000ULL
+#define ASE_MT 0x0000000040000000ULL
+#define ASE_SMARTMIPS 0x0000000080000000ULL
+#define ASE_MICROMIPS 0x0000000100000000ULL
+/*
+ * bits 40-51: vendor-specific base instruction sets
+ */
+#define INSN_VR54XX 0x0000010000000000ULL
+#define INSN_R5900 0x0000020000000000ULL
+#define INSN_LOONGSON2E 0x0000040000000000ULL
+#define INSN_LOONGSON2F 0x0000080000000000ULL
+#define INSN_LOONGSON3A 0x0000100000000000ULL
+/*
+ * bits 52-63: vendor-specific ASEs
+ */
+/* MultiMedia Instructions defined by R5900 */
+#define ASE_MMI 0x0010000000000000ULL
+/* MIPS eXtension/enhanced Unit defined by Ingenic */
+#define ASE_MXU 0x0020000000000000ULL
+/* Loongson MultiMedia Instructions */
+#define ASE_LMMI 0x0040000000000000ULL
+/* Loongson EXTensions */
+#define ASE_LEXT 0x0080000000000000ULL
+
+/* MIPS CPU defines. */
+#define CPU_MIPS1 (ISA_MIPS1)
+#define CPU_MIPS2 (CPU_MIPS1 | ISA_MIPS2)
+#define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3)
+#define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4)
+#define CPU_MIPS5 (CPU_MIPS4 | ISA_MIPS5)
+
+#define CPU_MIPS64 (ISA_MIPS3)
+
+/* MIPS Technologies "Release 1" */
+#define CPU_MIPS32R1 (CPU_MIPS2 | ISA_MIPS_R1)
+#define CPU_MIPS64R1 (CPU_MIPS5 | CPU_MIPS32R1)
+
+/* MIPS Technologies "Release 2" */
+#define CPU_MIPS32R2 (CPU_MIPS32R1 | ISA_MIPS_R2)
+#define CPU_MIPS64R2 (CPU_MIPS64R1 | CPU_MIPS32R2)
+
+/* MIPS Technologies "Release 3" */
+#define CPU_MIPS32R3 (CPU_MIPS32R2 | ISA_MIPS_R3)
+#define CPU_MIPS64R3 (CPU_MIPS64R2 | CPU_MIPS32R3)
+
+/* MIPS Technologies "Release 5" */
+#define CPU_MIPS32R5 (CPU_MIPS32R3 | ISA_MIPS_R5)
+#define CPU_MIPS64R5 (CPU_MIPS64R3 | CPU_MIPS32R5)
+
+/* MIPS Technologies "Release 6" */
+#define CPU_MIPS32R6 (CPU_MIPS32R5 | ISA_MIPS_R6)
+#define CPU_MIPS64R6 (CPU_MIPS64R5 | CPU_MIPS32R6)
+
+/*
+ * Strictly follow the architecture standard:
+ * - Disallow "special" instruction handling for PMON/SPIM.
+ * Note that we still maintain Count/Compare to match the host clock.
+ *
+ * #define MIPS_STRICT_STANDARD 1
+ */
+
+#endif /* QEMU_MIPS_DEFS_H */
diff --git a/target/mips/msa.c b/target/mips/msa.c
new file mode 100644
index 000000000..61f1a9a59
--- /dev/null
+++ b/target/mips/msa.c
@@ -0,0 +1,60 @@
+/*
+ * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2014 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "fpu/softfloat.h"
+#include "fpu_helper.h"
+
+void msa_reset(CPUMIPSState *env)
+{
+ if (!ase_msa_available(env)) {
+ return;
+ }
+
+#ifdef CONFIG_USER_ONLY
+ /* MSA access enabled */
+ env->CP0_Config5 |= 1 << CP0C5_MSAEn;
+ env->CP0_Status |= (1 << CP0St_CU1) | (1 << CP0St_FR);
+#endif
+
+ /*
+ * MSA CSR:
+ * - non-signaling floating point exception mode off (NX bit is 0)
+ * - Cause, Enables, and Flags are all 0
+ * - round to nearest / ties to even (RM bits are 0)
+ */
+ env->active_tc.msacsr = 0;
+
+ restore_msa_fp_status(env);
+
+ /* tininess detected after rounding.*/
+ set_float_detect_tininess(float_tininess_after_rounding,
+ &env->active_tc.msa_fp_status);
+
+ /* clear float_status exception flags */
+ set_float_exception_flags(0, &env->active_tc.msa_fp_status);
+
+ /* clear float_status nan mode */
+ set_default_nan_mode(0, &env->active_tc.msa_fp_status);
+
+ /* set proper signanling bit meaning ("1" means "quiet") */
+ set_snan_bit_is_one(0, &env->active_tc.msa_fp_status);
+}
diff --git a/target/mips/sysemu/addr.c b/target/mips/sysemu/addr.c
new file mode 100644
index 000000000..86f1c129c
--- /dev/null
+++ b/target/mips/sysemu/addr.c
@@ -0,0 +1,61 @@
+/*
+ * QEMU MIPS address translation support
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+static int mips_um_ksegs;
+
+uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr)
+{
+ return addr & 0x1fffffffll;
+}
+
+uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr)
+{
+ return addr | ~0x7fffffffll;
+}
+
+uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr)
+{
+ return addr | 0x40000000ll;
+}
+
+uint64_t cpu_mips_kseg1_to_phys(void *opaque, uint64_t addr)
+{
+ return addr & 0x1fffffffll;
+}
+
+uint64_t cpu_mips_phys_to_kseg1(void *opaque, uint64_t addr)
+{
+ return (addr & 0x1fffffffll) | 0xffffffffa0000000ll;
+}
+
+bool mips_um_ksegs_enabled(void)
+{
+ return mips_um_ksegs;
+}
+
+void mips_um_ksegs_enable(void)
+{
+ mips_um_ksegs = 1;
+}
diff --git a/target/mips/sysemu/cp0.c b/target/mips/sysemu/cp0.c
new file mode 100644
index 000000000..bae37f515
--- /dev/null
+++ b/target/mips/sysemu/cp0.c
@@ -0,0 +1,123 @@
+/*
+ * QEMU MIPS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/exec-all.h"
+
+/* Called for updates to CP0_Status. */
+void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
+{
+ int32_t tcstatus, *tcst;
+ uint32_t v = cpu->CP0_Status;
+ uint32_t cu, mx, asid, ksu;
+ uint32_t mask = ((1 << CP0TCSt_TCU3)
+ | (1 << CP0TCSt_TCU2)
+ | (1 << CP0TCSt_TCU1)
+ | (1 << CP0TCSt_TCU0)
+ | (1 << CP0TCSt_TMX)
+ | (3 << CP0TCSt_TKSU)
+ | (0xff << CP0TCSt_TASID));
+
+ cu = (v >> CP0St_CU0) & 0xf;
+ mx = (v >> CP0St_MX) & 0x1;
+ ksu = (v >> CP0St_KSU) & 0x3;
+ asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+
+ tcstatus = cu << CP0TCSt_TCU0;
+ tcstatus |= mx << CP0TCSt_TMX;
+ tcstatus |= ksu << CP0TCSt_TKSU;
+ tcstatus |= asid;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~mask;
+ *tcst |= tcstatus;
+ compute_hflags(cpu);
+}
+
+void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
+{
+ uint32_t mask = env->CP0_Status_rw_bitmask;
+ target_ulong old = env->CP0_Status;
+
+ if (env->insn_flags & ISA_MIPS_R6) {
+ bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
+#if defined(TARGET_MIPS64)
+ uint32_t ksux = (1 << CP0St_KX) & val;
+ ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */
+ ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */
+ val = (val & ~(7 << CP0St_UX)) | ksux;
+#endif
+ if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
+ mask &= ~(3 << CP0St_KSU);
+ }
+ mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
+ }
+
+ env->CP0_Status = (old & ~mask) | (val & mask);
+#if defined(TARGET_MIPS64)
+ if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
+ /* Access to at least one of the 64-bit segments has been disabled */
+ tlb_flush(env_cpu(env));
+ }
+#endif
+ if (ase_mt_available(env)) {
+ sync_c0_status(env, env, env->current_tc);
+ } else {
+ compute_hflags(env);
+ }
+}
+
+void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
+{
+ uint32_t mask = 0x00C00300;
+ uint32_t old = env->CP0_Cause;
+ int i;
+
+ if (env->insn_flags & ISA_MIPS_R2) {
+ mask |= 1 << CP0Ca_DC;
+ }
+ if (env->insn_flags & ISA_MIPS_R6) {
+ mask &= ~((1 << CP0Ca_WP) & val);
+ }
+
+ env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
+
+ if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ cpu_mips_stop_count(env);
+ } else {
+ cpu_mips_start_count(env);
+ }
+ }
+
+ /* Set/reset software interrupts */
+ for (i = 0 ; i < 2 ; i++) {
+ if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
+ cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
+ }
+ }
+}
diff --git a/target/mips/sysemu/cp0_timer.c b/target/mips/sysemu/cp0_timer.c
new file mode 100644
index 000000000..70de95d33
--- /dev/null
+++ b/target/mips/sysemu/cp0_timer.c
@@ -0,0 +1,145 @@
+/*
+ * QEMU MIPS timer support
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/irq.h"
+#include "hw/mips/cpudevs.h"
+#include "qemu/timer.h"
+#include "sysemu/kvm.h"
+#include "internal.h"
+
+/* MIPS R4K timer */
+static void cpu_mips_timer_update(CPUMIPSState *env)
+{
+ uint64_t now_ns, next_ns;
+ uint32_t wait;
+
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ wait = env->CP0_Compare - env->CP0_Count -
+ (uint32_t)(now_ns / env->cp0_count_ns);
+ next_ns = now_ns + (uint64_t)wait * env->cp0_count_ns;
+ timer_mod(env->timer, next_ns);
+}
+
+/* Expire the timer. */
+static void cpu_mips_timer_expire(CPUMIPSState *env)
+{
+ cpu_mips_timer_update(env);
+ if (env->insn_flags & ISA_MIPS_R2) {
+ env->CP0_Cause |= 1 << CP0Ca_TI;
+ }
+ qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
+}
+
+uint32_t cpu_mips_get_count(CPUMIPSState *env)
+{
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ return env->CP0_Count;
+ } else {
+ uint64_t now_ns;
+
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ if (timer_pending(env->timer)
+ && timer_expired(env->timer, now_ns)) {
+ /* The timer has already expired. */
+ cpu_mips_timer_expire(env);
+ }
+
+ return env->CP0_Count + (uint32_t)(now_ns / env->cp0_count_ns);
+ }
+}
+
+void cpu_mips_store_count(CPUMIPSState *env, uint32_t count)
+{
+ /*
+ * This gets called from cpu_state_reset(), potentially before timer init.
+ * So env->timer may be NULL, which is also the case with KVM enabled so
+ * treat timer as disabled in that case.
+ */
+ if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) {
+ env->CP0_Count = count;
+ } else {
+ /* Store new count register */
+ env->CP0_Count = count -
+ (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
+ env->cp0_count_ns);
+ /* Update timer timer */
+ cpu_mips_timer_update(env);
+ }
+}
+
+void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value)
+{
+ env->CP0_Compare = value;
+ if (!(env->CP0_Cause & (1 << CP0Ca_DC))) {
+ cpu_mips_timer_update(env);
+ }
+ if (env->insn_flags & ISA_MIPS_R2) {
+ env->CP0_Cause &= ~(1 << CP0Ca_TI);
+ }
+ qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
+}
+
+void cpu_mips_start_count(CPUMIPSState *env)
+{
+ cpu_mips_store_count(env, env->CP0_Count);
+}
+
+void cpu_mips_stop_count(CPUMIPSState *env)
+{
+ /* Store the current value */
+ env->CP0_Count += (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
+ env->cp0_count_ns);
+}
+
+static void mips_timer_cb(void *opaque)
+{
+ CPUMIPSState *env;
+
+ env = opaque;
+
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ return;
+ }
+
+ /*
+ * ??? This callback should occur when the counter is exactly equal to
+ * the comparator value. Offset the count by one to avoid immediately
+ * retriggering the callback before any virtual time has passed.
+ */
+ env->CP0_Count++;
+ cpu_mips_timer_expire(env);
+ env->CP0_Count--;
+}
+
+void cpu_mips_clock_init(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ /*
+ * If we're in KVM mode, don't create the periodic timer, that is handled in
+ * kernel.
+ */
+ if (!kvm_enabled()) {
+ env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &mips_timer_cb, env);
+ }
+}
diff --git a/target/mips/sysemu/machine.c b/target/mips/sysemu/machine.c
new file mode 100644
index 000000000..80d37f9c2
--- /dev/null
+++ b/target/mips/sysemu/machine.c
@@ -0,0 +1,333 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "migration/cpu.h"
+#include "fpu_helper.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ MIPSCPU *cpu = opaque;
+ CPUMIPSState *env = &cpu->env;
+
+ restore_fp_status(env);
+ restore_msa_fp_status(env);
+ compute_hflags(env);
+ restore_pamask(env);
+
+ return 0;
+}
+
+/* FPU state */
+
+static int get_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ int i;
+ fpr_t *v = pv;
+ /* Restore entire MSA vector register */
+ for (i = 0; i < MSA_WRLEN / 64; i++) {
+ qemu_get_sbe64s(f, &v->wr.d[i]);
+ }
+ return 0;
+}
+
+static int put_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ int i;
+ fpr_t *v = pv;
+ /* Save entire MSA vector register */
+ for (i = 0; i < MSA_WRLEN / 64; i++) {
+ qemu_put_sbe64s(f, &v->wr.d[i]);
+ }
+
+ return 0;
+}
+
+const VMStateInfo vmstate_info_fpr = {
+ .name = "fpr",
+ .get = get_fpr,
+ .put = put_fpr,
+};
+
+#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t)
+
+#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
+ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
+
+static VMStateField vmstate_fpu_fields[] = {
+ VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32),
+ VMSTATE_UINT32(fcr0, CPUMIPSFPUContext),
+ VMSTATE_UINT32(fcr31, CPUMIPSFPUContext),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+const VMStateDescription vmstate_inactive_fpu = {
+ .name = "cpu/inactive_fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+/* TC state */
+
+static VMStateField vmstate_tc_fields[] = {
+ VMSTATE_UINTTL_ARRAY(gpr, TCState, 32),
+#if defined(TARGET_MIPS64)
+ VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32),
+#endif /* TARGET_MIPS64 */
+ VMSTATE_UINTTL(PC, TCState),
+ VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL_ARRAY(ACX, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL(DSPControl, TCState),
+ VMSTATE_INT32(CP0_TCStatus, TCState),
+ VMSTATE_INT32(CP0_TCBind, TCState),
+ VMSTATE_UINTTL(CP0_TCHalt, TCState),
+ VMSTATE_UINTTL(CP0_TCContext, TCState),
+ VMSTATE_UINTTL(CP0_TCSchedule, TCState),
+ VMSTATE_UINTTL(CP0_TCScheFBack, TCState),
+ VMSTATE_INT32(CP0_Debug_tcstatus, TCState),
+ VMSTATE_UINTTL(CP0_UserLocal, TCState),
+ VMSTATE_INT32(msacsr, TCState),
+ VMSTATE_UINTTL_ARRAY(mxu_gpr, TCState, NUMBER_OF_MXU_REGISTERS - 1),
+ VMSTATE_UINTTL(mxu_cr, TCState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_tc = {
+ .name = "cpu/tc",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = vmstate_tc_fields
+};
+
+const VMStateDescription vmstate_inactive_tc = {
+ .name = "cpu/inactive_tc",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = vmstate_tc_fields
+};
+
+/* MVP state */
+
+const VMStateDescription vmstate_mvp = {
+ .name = "cpu/mvp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext),
+ VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext),
+ VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* TLB state */
+
+static int get_tlb(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ r4k_tlb_t *v = pv;
+ uint16_t flags;
+
+ qemu_get_betls(f, &v->VPN);
+ qemu_get_be32s(f, &v->PageMask);
+ qemu_get_be16s(f, &v->ASID);
+ qemu_get_be16s(f, &flags);
+ v->G = (flags >> 10) & 1;
+ v->C0 = (flags >> 7) & 3;
+ v->C1 = (flags >> 4) & 3;
+ v->V0 = (flags >> 3) & 1;
+ v->V1 = (flags >> 2) & 1;
+ v->D0 = (flags >> 1) & 1;
+ v->D1 = (flags >> 0) & 1;
+ v->EHINV = (flags >> 15) & 1;
+ v->RI1 = (flags >> 14) & 1;
+ v->RI0 = (flags >> 13) & 1;
+ v->XI1 = (flags >> 12) & 1;
+ v->XI0 = (flags >> 11) & 1;
+ qemu_get_be64s(f, &v->PFN[0]);
+ qemu_get_be64s(f, &v->PFN[1]);
+
+ return 0;
+}
+
+static int put_tlb(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ r4k_tlb_t *v = pv;
+
+ uint16_t asid = v->ASID;
+ uint16_t flags = ((v->EHINV << 15) |
+ (v->RI1 << 14) |
+ (v->RI0 << 13) |
+ (v->XI1 << 12) |
+ (v->XI0 << 11) |
+ (v->G << 10) |
+ (v->C0 << 7) |
+ (v->C1 << 4) |
+ (v->V0 << 3) |
+ (v->V1 << 2) |
+ (v->D0 << 1) |
+ (v->D1 << 0));
+
+ qemu_put_betls(f, &v->VPN);
+ qemu_put_be32s(f, &v->PageMask);
+ qemu_put_be16s(f, &asid);
+ qemu_put_be16s(f, &flags);
+ qemu_put_be64s(f, &v->PFN[0]);
+ qemu_put_be64s(f, &v->PFN[1]);
+
+ return 0;
+}
+
+const VMStateInfo vmstate_info_tlb = {
+ .name = "tlb_entry",
+ .get = get_tlb,
+ .put = put_tlb,
+};
+
+#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, r4k_tlb_t)
+
+#define VMSTATE_TLB_ARRAY(_f, _s, _n) \
+ VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0)
+
+const VMStateDescription vmstate_tlb = {
+ .name = "cpu/tlb",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext),
+ VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext),
+ VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* MIPS CPU state */
+
+const VMStateDescription vmstate_mips_cpu = {
+ .name = "cpu",
+ .version_id = 21,
+ .minimum_version_id = 21,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ /* Active TC */
+ VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState),
+
+ /* Active FPU */
+ VMSTATE_STRUCT(env.active_fpu, MIPSCPU, 1, vmstate_fpu,
+ CPUMIPSFPUContext),
+
+ /* MVP */
+ VMSTATE_STRUCT_POINTER(env.mvp, MIPSCPU, vmstate_mvp,
+ CPUMIPSMVPContext),
+
+ /* TLB */
+ VMSTATE_STRUCT_POINTER(env.tlb, MIPSCPU, vmstate_tlb,
+ CPUMIPSTLBContext),
+
+ /* CPU metastate */
+ VMSTATE_UINT32(env.current_tc, MIPSCPU),
+ VMSTATE_UINT32(env.current_fpu, MIPSCPU),
+ VMSTATE_INT32(env.error_code, MIPSCPU),
+ VMSTATE_UINTTL(env.btarget, MIPSCPU),
+ VMSTATE_UINTTL(env.bcond, MIPSCPU),
+
+ /* Remaining CP0 registers */
+ VMSTATE_INT32(env.CP0_Index, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPControl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Random, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_GlobalNumber, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_Context, MIPSCPU),
+ VMSTATE_INT32(env.CP0_MemoryMapID, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PageMask, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_PWBase, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_PWField, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_PWSize, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Wired, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PWCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU),
+ VMSTATE_INT32(env.CP0_HWREna, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstrX, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Count, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_SAARI, MIPSCPU),
+ VMSTATE_UINT64_ARRAY(env.CP0_SAAR, MIPSCPU, 2),
+ VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Compare, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Status, MIPSCPU),
+ VMSTATE_INT32(env.CP0_IntCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSMap, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Cause, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PRid, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_EBase, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_CMGCRBase, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config2, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config3, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config4, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config5, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config6, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config7, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_LLAddr, MIPSCPU),
+ VMSTATE_UINT64_ARRAY(env.CP0_MAAR, MIPSCPU, MIPS_MAAR_MAX),
+ VMSTATE_INT32(env.CP0_MAARI, MIPSCPU),
+ VMSTATE_UINTTL(env.lladdr, MIPSCPU),
+ VMSTATE_UINTTL_ARRAY(env.CP0_WatchLo, MIPSCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.CP0_WatchHi, MIPSCPU, 8),
+ VMSTATE_UINTTL(env.CP0_XContext, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Framemask, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Debug, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Performance0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_ErrCtl, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DataLo, MIPSCPU),
+ VMSTATE_INT32(env.CP0_TagHi, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DataHi, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_ErrorEPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DESAVE, MIPSCPU),
+ VMSTATE_UINTTL_ARRAY(env.CP0_KScratch, MIPSCPU, MIPS_KSCRATCH_NUM),
+
+ /* Inactive TC */
+ VMSTATE_STRUCT_ARRAY(env.tcs, MIPSCPU, MIPS_SHADOW_SET_MAX, 1,
+ vmstate_inactive_tc, TCState),
+ VMSTATE_STRUCT_ARRAY(env.fpus, MIPSCPU, MIPS_FPU_MAX, 1,
+ vmstate_inactive_fpu, CPUMIPSFPUContext),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
diff --git a/target/mips/sysemu/meson.build b/target/mips/sysemu/meson.build
new file mode 100644
index 000000000..cefc22758
--- /dev/null
+++ b/target/mips/sysemu/meson.build
@@ -0,0 +1,7 @@
+mips_softmmu_ss.add(files(
+ 'addr.c',
+ 'cp0.c',
+ 'cp0_timer.c',
+ 'machine.c',
+ 'physaddr.c',
+))
diff --git a/target/mips/sysemu/physaddr.c b/target/mips/sysemu/physaddr.c
new file mode 100644
index 000000000..1918633aa
--- /dev/null
+++ b/target/mips/sysemu/physaddr.c
@@ -0,0 +1,257 @@
+/*
+ * MIPS TLB (Translation lookaside buffer) helpers.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "../internal.h"
+
+static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx)
+{
+ /*
+ * Interpret access control mode and mmu_idx.
+ * AdE? TLB?
+ * AM K S U E K S U E
+ * UK 0 0 1 1 0 0 - - 0
+ * MK 1 0 1 1 0 1 - - !eu
+ * MSK 2 0 0 1 0 1 1 - !eu
+ * MUSK 3 0 0 0 0 1 1 1 !eu
+ * MUSUK 4 0 0 0 0 0 1 1 0
+ * USK 5 0 0 1 0 0 0 - 0
+ * - 6 - - - - - - - -
+ * UUSK 7 0 0 0 0 0 0 0 0
+ */
+ int32_t adetlb_mask;
+
+ switch (mmu_idx) {
+ case 3: /* ERL */
+ /* If EU is set, always unmapped */
+ if (eu) {
+ return 0;
+ }
+ /* fall through */
+ case MIPS_HFLAG_KM:
+ /* Never AdE, TLB mapped if AM={1,2,3} */
+ adetlb_mask = 0x70000000;
+ goto check_tlb;
+
+ case MIPS_HFLAG_SM:
+ /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
+ adetlb_mask = 0xc0380000;
+ goto check_ade;
+
+ case MIPS_HFLAG_UM:
+ /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
+ adetlb_mask = 0xe4180000;
+ /* fall through */
+ check_ade:
+ /* does this AM cause AdE in current execution mode */
+ if ((adetlb_mask << am) < 0) {
+ return TLBRET_BADADDR;
+ }
+ adetlb_mask <<= 8;
+ /* fall through */
+ check_tlb:
+ /* is this AM mapped in current execution mode */
+ return ((adetlb_mask << am) < 0);
+ default:
+ assert(0);
+ return TLBRET_BADADDR;
+ };
+}
+
+static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx,
+ unsigned int am, bool eu,
+ target_ulong segmask,
+ hwaddr physical_base)
+{
+ int mapped = is_seg_am_mapped(am, eu, mmu_idx);
+
+ if (mapped < 0) {
+ /* is_seg_am_mapped can report TLBRET_BADADDR */
+ return mapped;
+ } else if (mapped) {
+ /* The segment is TLB mapped */
+ return env->tlb->map_address(env, physical, prot, real_address,
+ access_type);
+ } else {
+ /* The segment is unmapped */
+ *physical = physical_base | (real_address & segmask);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+}
+
+static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx,
+ uint16_t segctl, target_ulong segmask)
+{
+ unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM;
+ bool eu = (segctl >> CP0SC_EU) & 1;
+ hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20;
+
+ return get_seg_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx, am, eu, segmask,
+ pa & ~(hwaddr)segmask);
+}
+
+int get_physical_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ MMUAccessType access_type, int mmu_idx)
+{
+ /* User mode can only access useg/xuseg */
+#if defined(TARGET_MIPS64)
+ int user_mode = mmu_idx == MIPS_HFLAG_UM;
+ int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
+ int kernel_mode = !user_mode && !supervisor_mode;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+#endif
+ int ret = TLBRET_MATCH;
+ /* effective address (modified for KVM T&E kernel segments) */
+ target_ulong address = real_address;
+
+ if (mips_um_ksegs_enabled()) {
+ /* KVM T&E adds guest kernel segments in useg */
+ if (real_address >= KVM_KSEG0_BASE) {
+ if (real_address < KVM_KSEG2_BASE) {
+ /* kseg0 */
+ address += KSEG0_BASE - KVM_KSEG0_BASE;
+ } else if (real_address <= USEG_LIMIT) {
+ /* kseg2/3 */
+ address += KSEG2_BASE - KVM_KSEG2_BASE;
+ }
+ }
+ }
+
+ if (address <= USEG_LIMIT) {
+ /* useg */
+ uint16_t segctl;
+
+ if (address >= 0x40000000UL) {
+ segctl = env->CP0_SegCtl2;
+ } else {
+ segctl = env->CP0_SegCtl2 >> 16;
+ }
+ ret = get_segctl_physical_address(env, physical, prot,
+ real_address, access_type,
+ mmu_idx, segctl, 0x3FFFFFFF);
+#if defined(TARGET_MIPS64)
+ } else if (address < 0x4000000000000000ULL) {
+ /* xuseg */
+ if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot,
+ real_address, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0x8000000000000000ULL) {
+ /* xsseg */
+ if ((supervisor_mode || kernel_mode) &&
+ SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot,
+ real_address, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0xC000000000000000ULL) {
+ /* xkphys */
+ if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
+ /* KX/SX/UX bit to check for each xkphys EVA access mode */
+ static const uint8_t am_ksux[8] = {
+ [CP0SC_AM_UK] = (1u << CP0St_KX),
+ [CP0SC_AM_MK] = (1u << CP0St_KX),
+ [CP0SC_AM_MSK] = (1u << CP0St_SX),
+ [CP0SC_AM_MUSK] = (1u << CP0St_UX),
+ [CP0SC_AM_MUSUK] = (1u << CP0St_UX),
+ [CP0SC_AM_USK] = (1u << CP0St_SX),
+ [6] = (1u << CP0St_KX),
+ [CP0SC_AM_UUSK] = (1u << CP0St_UX),
+ };
+ unsigned int am = CP0SC_AM_UK;
+ unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR;
+
+ if (xr & (1 << ((address >> 59) & 0x7))) {
+ am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM;
+ }
+ /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
+ if (env->CP0_Status & am_ksux[am]) {
+ ret = get_seg_physical_address(env, physical, prot,
+ real_address, access_type,
+ mmu_idx, am, false, env->PAMask,
+ 0);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0xFFFFFFFF80000000ULL) {
+ /* xkseg */
+ if (kernel_mode && KX &&
+ address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot,
+ real_address, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+#endif
+ } else if (address < KSEG1_BASE) {
+ /* kseg0 */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl1 >> 16, 0x1FFFFFFF);
+ } else if (address < KSEG2_BASE) {
+ /* kseg1 */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl1, 0x1FFFFFFF);
+ } else if (address < KSEG3_BASE) {
+ /* sseg (kseg2) */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl0 >> 16, 0x1FFFFFFF);
+ } else {
+ /*
+ * kseg3
+ * XXX: debug segment is not emulated
+ */
+ ret = get_segctl_physical_address(env, physical, prot, real_address,
+ access_type, mmu_idx,
+ env->CP0_SegCtl0, 0x1FFFFFFF);
+ }
+ return ret;
+}
+
+hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ hwaddr phys_addr;
+ int prot;
+
+ if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
+ cpu_mmu_index(env, false)) != 0) {
+ return -1;
+ }
+ return phys_addr;
+}
diff --git a/target/mips/tcg/dsp_helper.c b/target/mips/tcg/dsp_helper.c
new file mode 100644
index 000000000..09b6e5fb1
--- /dev/null
+++ b/target/mips/tcg/dsp_helper.c
@@ -0,0 +1,3771 @@
+/*
+ * MIPS ASE DSP Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2012 Jia Liu <proljc@gmail.com>
+ * Dongxue Zhang <elta.era@gmail.com>
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/bitops.h"
+
+/*
+ * As the byte ordering doesn't matter, i.e. all columns are treated
+ * identically, these unions can be used directly.
+ */
+typedef union {
+ uint8_t ub[4];
+ int8_t sb[4];
+ uint16_t uh[2];
+ int16_t sh[2];
+ uint32_t uw[1];
+ int32_t sw[1];
+} DSP32Value;
+
+typedef union {
+ uint8_t ub[8];
+ int8_t sb[8];
+ uint16_t uh[4];
+ int16_t sh[4];
+ uint32_t uw[2];
+ int32_t sw[2];
+ uint64_t ul[1];
+ int64_t sl[1];
+} DSP64Value;
+
+/*** MIPS DSP internal functions begin ***/
+#define MIPSDSP_ABS(x) (((x) >= 0) ? (x) : -(x))
+#define MIPSDSP_OVERFLOW_ADD(a, b, c, d) (~((a) ^ (b)) & ((a) ^ (c)) & (d))
+#define MIPSDSP_OVERFLOW_SUB(a, b, c, d) (((a) ^ (b)) & ((a) ^ (c)) & (d))
+
+static inline void set_DSPControl_overflow_flag(uint32_t flag, int position,
+ CPUMIPSState *env)
+{
+ env->active_tc.DSPControl |= (target_ulong)flag << position;
+}
+
+static inline void set_DSPControl_carryflag(bool flag, CPUMIPSState *env)
+{
+ env->active_tc.DSPControl &= ~(1 << 13);
+ env->active_tc.DSPControl |= flag << 13;
+}
+
+static inline uint32_t get_DSPControl_carryflag(CPUMIPSState *env)
+{
+ return (env->active_tc.DSPControl >> 13) & 0x01;
+}
+
+static inline void set_DSPControl_24(uint32_t flag, int len, CPUMIPSState *env)
+{
+ uint32_t filter;
+
+ filter = ((0x01 << len) - 1) << 24;
+ filter = ~filter;
+
+ env->active_tc.DSPControl &= filter;
+ env->active_tc.DSPControl |= (target_ulong)flag << 24;
+}
+
+static inline void set_DSPControl_pos(uint32_t pos, CPUMIPSState *env)
+{
+ target_ulong dspc;
+
+ dspc = env->active_tc.DSPControl;
+#ifndef TARGET_MIPS64
+ dspc = dspc & 0xFFFFFFC0;
+ dspc |= (pos & 0x3F);
+#else
+ dspc = dspc & 0xFFFFFF80;
+ dspc |= (pos & 0x7F);
+#endif
+ env->active_tc.DSPControl = dspc;
+}
+
+static inline uint32_t get_DSPControl_pos(CPUMIPSState *env)
+{
+ target_ulong dspc;
+ uint32_t pos;
+
+ dspc = env->active_tc.DSPControl;
+
+#ifndef TARGET_MIPS64
+ pos = dspc & 0x3F;
+#else
+ pos = dspc & 0x7F;
+#endif
+
+ return pos;
+}
+
+static inline void set_DSPControl_efi(uint32_t flag, CPUMIPSState *env)
+{
+ env->active_tc.DSPControl &= 0xFFFFBFFF;
+ env->active_tc.DSPControl |= (target_ulong)flag << 14;
+}
+
+#define DO_MIPS_SAT_ABS(size) \
+static inline int##size##_t mipsdsp_sat_abs##size(int##size##_t a, \
+ CPUMIPSState *env) \
+{ \
+ if (a == INT##size##_MIN) { \
+ set_DSPControl_overflow_flag(1, 20, env); \
+ return INT##size##_MAX; \
+ } else { \
+ return MIPSDSP_ABS(a); \
+ } \
+}
+DO_MIPS_SAT_ABS(8)
+DO_MIPS_SAT_ABS(16)
+DO_MIPS_SAT_ABS(32)
+#undef DO_MIPS_SAT_ABS
+
+/* get sum value */
+static inline int16_t mipsdsp_add_i16(int16_t a, int16_t b, CPUMIPSState *env)
+{
+ int16_t tempI;
+
+ tempI = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x8000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return tempI;
+}
+
+static inline int16_t mipsdsp_sat_add_i16(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int16_t tempS;
+
+ tempS = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, tempS, 0x8000)) {
+ if (a > 0) {
+ tempS = 0x7FFF;
+ } else {
+ tempS = 0x8000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return tempS;
+}
+
+static inline int32_t mipsdsp_sat_add_i32(int32_t a, int32_t b,
+ CPUMIPSState *env)
+{
+ int32_t tempI;
+
+ tempI = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x80000000)) {
+ if (a > 0) {
+ tempI = 0x7FFFFFFF;
+ } else {
+ tempI = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return tempI;
+}
+
+static inline uint8_t mipsdsp_add_u8(uint8_t a, uint8_t b, CPUMIPSState *env)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b;
+
+ if (temp & 0x0100) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0xFF;
+}
+
+static inline uint16_t mipsdsp_add_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint32_t temp;
+
+ temp = (uint32_t)a + (uint32_t)b;
+
+ if (temp & 0x00010000) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0xFFFF;
+}
+
+static inline uint8_t mipsdsp_sat_add_u8(uint8_t a, uint8_t b,
+ CPUMIPSState *env)
+{
+ uint8_t result;
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b;
+ result = temp & 0xFF;
+
+ if (0x0100 & temp) {
+ result = 0xFF;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return result;
+}
+
+static inline uint16_t mipsdsp_sat_add_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint16_t result;
+ uint32_t temp;
+
+ temp = (uint32_t)a + (uint32_t)b;
+ result = temp & 0xFFFF;
+
+ if (0x00010000 & temp) {
+ result = 0xFFFF;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return result;
+}
+
+static inline int32_t mipsdsp_sat32_acc_q31(int32_t acc, int32_t a,
+ CPUMIPSState *env)
+{
+ int64_t temp;
+ int32_t temp32, temp31, result;
+ int64_t temp_sum;
+
+#ifndef TARGET_MIPS64
+ temp = ((uint64_t)env->active_tc.HI[acc] << 32) |
+ (uint64_t)env->active_tc.LO[acc];
+#else
+ temp = (uint64_t)env->active_tc.LO[acc];
+#endif
+
+ temp_sum = (int64_t)a + temp;
+
+ temp32 = (temp_sum >> 32) & 0x01;
+ temp31 = (temp_sum >> 31) & 0x01;
+ result = temp_sum & 0xFFFFFFFF;
+
+ if (temp32 != temp31) {
+ if (temp32 == 0) {
+ result = 0x7FFFFFFF;
+ } else {
+ result = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 16 + acc, env);
+ }
+
+ return result;
+}
+
+#ifdef TARGET_MIPS64
+/* a[0] is LO, a[1] is HI. */
+static inline void mipsdsp_sat64_acc_add_q63(int64_t *ret,
+ int32_t ac,
+ int64_t *a,
+ CPUMIPSState *env)
+{
+ bool temp64;
+
+ ret[0] = env->active_tc.LO[ac] + a[0];
+ ret[1] = env->active_tc.HI[ac] + a[1];
+
+ if (((uint64_t)ret[0] < (uint64_t)env->active_tc.LO[ac]) &&
+ ((uint64_t)ret[0] < (uint64_t)a[0])) {
+ ret[1] += 1;
+ }
+ temp64 = ret[1] & 1;
+ if (temp64 != ((ret[0] >> 63) & 0x01)) {
+ if (temp64) {
+ ret[0] = (0x01ull << 63);
+ ret[1] = ~0ull;
+ } else {
+ ret[0] = (0x01ull << 63) - 1;
+ ret[1] = 0x00;
+ }
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ }
+}
+
+static inline void mipsdsp_sat64_acc_sub_q63(int64_t *ret,
+ int32_t ac,
+ int64_t *a,
+ CPUMIPSState *env)
+{
+ bool temp64;
+
+ ret[0] = env->active_tc.LO[ac] - a[0];
+ ret[1] = env->active_tc.HI[ac] - a[1];
+
+ if ((uint64_t)ret[0] > (uint64_t)env->active_tc.LO[ac]) {
+ ret[1] -= 1;
+ }
+ temp64 = ret[1] & 1;
+ if (temp64 != ((ret[0] >> 63) & 0x01)) {
+ if (temp64) {
+ ret[0] = (0x01ull << 63);
+ ret[1] = ~0ull;
+ } else {
+ ret[0] = (0x01ull << 63) - 1;
+ ret[1] = 0x00;
+ }
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ }
+}
+#endif
+
+static inline int32_t mipsdsp_mul_i16_i16(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = (int32_t)a * (int32_t)b;
+
+ if ((temp > (int)0x7FFF) || (temp < (int)0xFFFF8000)) {
+ set_DSPControl_overflow_flag(1, 21, env);
+ }
+ temp &= 0x0000FFFF;
+
+ return temp;
+}
+
+static inline int32_t mipsdsp_mul_u16_u16(int32_t a, int32_t b)
+{
+ return a * b;
+}
+
+#ifdef TARGET_MIPS64
+static inline int32_t mipsdsp_mul_i32_i32(int32_t a, int32_t b)
+{
+ return a * b;
+}
+#endif
+
+static inline int32_t mipsdsp_sat16_mul_i16_i16(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = (int32_t)a * (int32_t)b;
+
+ if (temp > (int)0x7FFF) {
+ temp = 0x00007FFF;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else if (temp < (int)0xffff8000) {
+ temp = 0xFFFF8000;
+ set_DSPControl_overflow_flag(1, 21, env);
+ }
+ temp &= 0x0000FFFF;
+
+ return temp;
+}
+
+static inline int32_t mipsdsp_mul_q15_q15_overflowflag21(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFFFFFF;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else {
+ temp = ((int16_t)a * (int16_t)b) << 1;
+ }
+
+ return temp;
+}
+
+/* right shift */
+static inline uint8_t mipsdsp_rshift_u8(uint8_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+static inline uint16_t mipsdsp_rshift_u16(uint16_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+static inline int8_t mipsdsp_rashift8(int8_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+static inline int16_t mipsdsp_rashift16(int16_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+#ifdef TARGET_MIPS64
+static inline int32_t mipsdsp_rashift32(int32_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+#endif
+
+static inline int16_t mipsdsp_rshift1_add_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a + (int32_t)b;
+
+ return (temp >> 1) & 0xFFFF;
+}
+
+/* round right shift */
+static inline int16_t mipsdsp_rrshift1_add_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a + (int32_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFF;
+}
+
+static inline int32_t mipsdsp_rshift1_add_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a + (int64_t)b;
+
+ return (temp >> 1) & 0xFFFFFFFF;
+}
+
+static inline int32_t mipsdsp_rrshift1_add_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a + (int64_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFFFFFF;
+}
+
+static inline uint8_t mipsdsp_rshift1_add_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b;
+
+ return (temp >> 1) & 0x00FF;
+}
+
+static inline uint8_t mipsdsp_rrshift1_add_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b + 1;
+
+ return (temp >> 1) & 0x00FF;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint8_t mipsdsp_rshift1_sub_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b;
+
+ return (temp >> 1) & 0x00FF;
+}
+
+static inline uint8_t mipsdsp_rrshift1_sub_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b + 1;
+
+ return (temp >> 1) & 0x00FF;
+}
+#endif
+
+/* 128 bits long. p[0] is LO, p[1] is HI. */
+static inline void mipsdsp_rndrashift_short_acc(int64_t *p,
+ int32_t ac,
+ int32_t shift,
+ CPUMIPSState *env)
+{
+ int64_t acc;
+
+ acc = ((int64_t)env->active_tc.HI[ac] << 32) |
+ ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF);
+ p[0] = (shift == 0) ? (acc << 1) : (acc >> (shift - 1));
+ p[1] = (acc >> 63) & 0x01;
+}
+
+#ifdef TARGET_MIPS64
+/* 128 bits long. p[0] is LO, p[1] is HI */
+static inline void mipsdsp_rashift_acc(uint64_t *p,
+ uint32_t ac,
+ uint32_t shift,
+ CPUMIPSState *env)
+{
+ uint64_t tempB, tempA;
+
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+ shift = shift & 0x1F;
+
+ if (shift == 0) {
+ p[1] = tempB;
+ p[0] = tempA;
+ } else {
+ p[0] = (tempB << (64 - shift)) | (tempA >> shift);
+ p[1] = (int64_t)tempB >> shift;
+ }
+}
+
+/* 128 bits long. p[0] is LO, p[1] is HI , p[2] is sign of HI.*/
+static inline void mipsdsp_rndrashift_acc(uint64_t *p,
+ uint32_t ac,
+ uint32_t shift,
+ CPUMIPSState *env)
+{
+ int64_t tempB, tempA;
+
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+ shift = shift & 0x3F;
+
+ if (shift == 0) {
+ p[2] = tempB >> 63;
+ p[1] = (tempB << 1) | (tempA >> 63);
+ p[0] = tempA << 1;
+ } else {
+ p[0] = (tempB << (65 - shift)) | (tempA >> (shift - 1));
+ p[1] = (int64_t)tempB >> (shift - 1);
+ if (tempB >= 0) {
+ p[2] = 0x0;
+ } else {
+ p[2] = ~0ull;
+ }
+ }
+}
+#endif
+
+static inline int32_t mipsdsp_mul_q15_q15(int32_t ac, uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFFFFFF;
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ } else {
+ temp = ((int16_t)a * (int16_t)b) << 1;
+ }
+
+ return temp;
+}
+
+static inline int64_t mipsdsp_mul_q31_q31(int32_t ac, uint32_t a, uint32_t b,
+ CPUMIPSState *env)
+{
+ uint64_t temp;
+
+ if ((a == 0x80000000) && (b == 0x80000000)) {
+ temp = (0x01ull << 63) - 1;
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ } else {
+ temp = ((int64_t)(int32_t)a * (int32_t)b) << 1;
+ }
+
+ return temp;
+}
+
+static inline uint16_t mipsdsp_mul_u8_u8(uint8_t a, uint8_t b)
+{
+ return (uint16_t)a * (uint16_t)b;
+}
+
+static inline uint16_t mipsdsp_mul_u8_u16(uint8_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint32_t tempI;
+
+ tempI = (uint32_t)a * (uint32_t)b;
+ if (tempI > 0x0000FFFF) {
+ tempI = 0x0000FFFF;
+ set_DSPControl_overflow_flag(1, 21, env);
+ }
+
+ return tempI & 0x0000FFFF;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint64_t mipsdsp_mul_u32_u32(uint32_t a, uint32_t b)
+{
+ return (uint64_t)a * (uint64_t)b;
+}
+#endif
+
+static inline int16_t mipsdsp_rndq15_mul_q15_q15(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFF0000;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else {
+ temp = ((int16_t)a * (int16_t)b) << 1;
+ temp = temp + 0x00008000;
+ }
+
+ return (temp & 0xFFFF0000) >> 16;
+}
+
+static inline int32_t mipsdsp_sat16_mul_q15_q15(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFF0000;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else {
+ temp = (int16_t)a * (int16_t)b;
+ temp = temp << 1;
+ }
+
+ return (temp >> 16) & 0x0000FFFF;
+}
+
+static inline uint16_t mipsdsp_trunc16_sat16_round(int32_t a,
+ CPUMIPSState *env)
+{
+ uint16_t temp;
+
+
+ /*
+ * The value 0x00008000 will be added to the input Q31 value, and the code
+ * needs to check if the addition causes an overflow. Since a positive value
+ * is added, overflow can happen in one direction only.
+ */
+ if (a > 0x7FFF7FFF) {
+ temp = 0x7FFF;
+ set_DSPControl_overflow_flag(1, 22, env);
+ } else {
+ temp = ((a + 0x8000) >> 16) & 0xFFFF;
+ }
+
+ return temp;
+}
+
+static inline uint8_t mipsdsp_sat8_reduce_precision(uint16_t a,
+ CPUMIPSState *env)
+{
+ uint16_t mag;
+ uint32_t sign;
+
+ sign = (a >> 15) & 0x01;
+ mag = a & 0x7FFF;
+
+ if (sign == 0) {
+ if (mag > 0x7F80) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return 0xFF;
+ } else {
+ return (mag >> 7) & 0xFFFF;
+ }
+ } else {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return 0x00;
+ }
+}
+
+static inline uint8_t mipsdsp_lshift8(uint8_t a, uint8_t s, CPUMIPSState *env)
+{
+ uint8_t discard;
+
+ if (s != 0) {
+ discard = a >> (8 - s);
+
+ if (discard != 0x00) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ }
+ }
+ return a << s;
+}
+
+static inline uint16_t mipsdsp_lshift16(uint16_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint16_t discard;
+
+ if (s != 0) {
+ discard = (int16_t)a >> (15 - s);
+
+ if ((discard != 0x0000) && (discard != 0xFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ }
+ }
+ return a << s;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint32_t mipsdsp_lshift32(uint32_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint32_t discard;
+
+ if (s == 0) {
+ return a;
+ } else {
+ discard = (int32_t)a >> (31 - (s - 1));
+
+ if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ }
+ return a << s;
+ }
+}
+#endif
+
+static inline uint16_t mipsdsp_sat16_lshift(uint16_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint8_t sign;
+ uint16_t discard;
+
+ if (s == 0) {
+ return a;
+ } else {
+ sign = (a >> 15) & 0x01;
+ if (sign != 0) {
+ discard = (((0x01 << (16 - s)) - 1) << s) |
+ ((a >> (14 - (s - 1))) & ((0x01 << s) - 1));
+ } else {
+ discard = a >> (14 - (s - 1));
+ }
+
+ if ((discard != 0x0000) && (discard != 0xFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return (sign == 0) ? 0x7FFF : 0x8000;
+ } else {
+ return a << s;
+ }
+ }
+}
+
+static inline uint32_t mipsdsp_sat32_lshift(uint32_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint8_t sign;
+ uint32_t discard;
+
+ if (s == 0) {
+ return a;
+ } else {
+ sign = (a >> 31) & 0x01;
+ if (sign != 0) {
+ discard = (((0x01 << (32 - s)) - 1) << s) |
+ ((a >> (30 - (s - 1))) & ((0x01 << s) - 1));
+ } else {
+ discard = a >> (30 - (s - 1));
+ }
+
+ if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return (sign == 0) ? 0x7FFFFFFF : 0x80000000;
+ } else {
+ return a << s;
+ }
+ }
+}
+
+static inline uint8_t mipsdsp_rnd8_rashift(uint8_t a, uint8_t s)
+{
+ uint32_t temp;
+
+ if (s == 0) {
+ temp = (uint32_t)a << 1;
+ } else {
+ temp = (int32_t)(int8_t)a >> (s - 1);
+ }
+
+ return (temp + 1) >> 1;
+}
+
+static inline uint16_t mipsdsp_rnd16_rashift(uint16_t a, uint8_t s)
+{
+ uint32_t temp;
+
+ if (s == 0) {
+ temp = (uint32_t)a << 1;
+ } else {
+ temp = (int32_t)(int16_t)a >> (s - 1);
+ }
+
+ return (temp + 1) >> 1;
+}
+
+static inline uint32_t mipsdsp_rnd32_rashift(uint32_t a, uint8_t s)
+{
+ int64_t temp;
+
+ if (s == 0) {
+ temp = (uint64_t)a << 1;
+ } else {
+ temp = (int64_t)(int32_t)a >> (s - 1);
+ }
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFFFFFFull;
+}
+
+static inline uint16_t mipsdsp_sub_i16(int16_t a, int16_t b, CPUMIPSState *env)
+{
+ int16_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+
+static inline uint16_t mipsdsp_sat16_sub(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int16_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) {
+ if (a >= 0) {
+ temp = 0x7FFF;
+ } else {
+ temp = 0x8000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+
+static inline uint32_t mipsdsp_sat32_sub(int32_t a, int32_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) {
+ if (a >= 0) {
+ temp = 0x7FFFFFFF;
+ } else {
+ temp = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0xFFFFFFFFull;
+}
+
+static inline uint16_t mipsdsp_rshift1_sub_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a - (int32_t)b;
+
+ return (temp >> 1) & 0x0000FFFF;
+}
+
+static inline uint16_t mipsdsp_rrshift1_sub_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a - (int32_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0x0000FFFF;
+}
+
+static inline uint32_t mipsdsp_rshift1_sub_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a - (int64_t)b;
+
+ return (temp >> 1) & 0xFFFFFFFFull;
+}
+
+static inline uint32_t mipsdsp_rrshift1_sub_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a - (int64_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFFFFFFull;
+}
+
+static inline uint16_t mipsdsp_sub_u16_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint8_t temp16;
+ uint32_t temp;
+
+ temp = (uint32_t)a - (uint32_t)b;
+ temp16 = (temp >> 16) & 0x01;
+ if (temp16 == 1) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+ return temp & 0x0000FFFF;
+}
+
+static inline uint16_t mipsdsp_satu16_sub_u16_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint8_t temp16;
+ uint32_t temp;
+
+ temp = (uint32_t)a - (uint32_t)b;
+ temp16 = (temp >> 16) & 0x01;
+
+ if (temp16 == 1) {
+ temp = 0x0000;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0x0000FFFF;
+}
+
+static inline uint8_t mipsdsp_sub_u8(uint8_t a, uint8_t b, CPUMIPSState *env)
+{
+ uint8_t temp8;
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b;
+ temp8 = (temp >> 8) & 0x01;
+ if (temp8 == 1) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0x00FF;
+}
+
+static inline uint8_t mipsdsp_satu8_sub(uint8_t a, uint8_t b, CPUMIPSState *env)
+{
+ uint8_t temp8;
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b;
+ temp8 = (temp >> 8) & 0x01;
+ if (temp8 == 1) {
+ temp = 0x00;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0x00FF;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint32_t mipsdsp_sub32(int32_t a, int32_t b, CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+
+static inline int32_t mipsdsp_add_i32(int32_t a, int32_t b, CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, temp, 0x80000000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+#endif
+
+static inline int32_t mipsdsp_cmp_eq(int32_t a, int32_t b)
+{
+ return a == b;
+}
+
+static inline int32_t mipsdsp_cmp_le(int32_t a, int32_t b)
+{
+ return a <= b;
+}
+
+static inline int32_t mipsdsp_cmp_lt(int32_t a, int32_t b)
+{
+ return a < b;
+}
+
+static inline int32_t mipsdsp_cmpu_eq(uint32_t a, uint32_t b)
+{
+ return a == b;
+}
+
+static inline int32_t mipsdsp_cmpu_le(uint32_t a, uint32_t b)
+{
+ return a <= b;
+}
+
+static inline int32_t mipsdsp_cmpu_lt(uint32_t a, uint32_t b)
+{
+ return a < b;
+}
+/*** MIPS DSP internal functions end ***/
+
+#define MIPSDSP_LHI 0xFFFFFFFF00000000ull
+#define MIPSDSP_LLO 0x00000000FFFFFFFFull
+#define MIPSDSP_HI 0xFFFF0000
+#define MIPSDSP_LO 0x0000FFFF
+#define MIPSDSP_Q3 0xFF000000
+#define MIPSDSP_Q2 0x00FF0000
+#define MIPSDSP_Q1 0x0000FF00
+#define MIPSDSP_Q0 0x000000FF
+
+#define MIPSDSP_SPLIT32_8(num, a, b, c, d) \
+ do { \
+ a = ((num) >> 24) & MIPSDSP_Q0; \
+ b = ((num) >> 16) & MIPSDSP_Q0; \
+ c = ((num) >> 8) & MIPSDSP_Q0; \
+ d = (num) & MIPSDSP_Q0; \
+ } while (0)
+
+#define MIPSDSP_SPLIT32_16(num, a, b) \
+ do { \
+ a = ((num) >> 16) & MIPSDSP_LO; \
+ b = (num) & MIPSDSP_LO; \
+ } while (0)
+
+#define MIPSDSP_RETURN32_8(a, b, c, d) ((target_long)(int32_t) \
+ (((uint32_t)(a) << 24) | \
+ ((uint32_t)(b) << 16) | \
+ ((uint32_t)(c) << 8) | \
+ ((uint32_t)(d) & 0xFF)))
+#define MIPSDSP_RETURN32_16(a, b) ((target_long)(int32_t) \
+ (((uint32_t)(a) << 16) | \
+ ((uint32_t)(b) & 0xFFFF)))
+
+#ifdef TARGET_MIPS64
+#define MIPSDSP_SPLIT64_16(num, a, b, c, d) \
+ do { \
+ a = ((num) >> 48) & MIPSDSP_LO; \
+ b = ((num) >> 32) & MIPSDSP_LO; \
+ c = ((num) >> 16) & MIPSDSP_LO; \
+ d = (num) & MIPSDSP_LO; \
+ } while (0)
+
+#define MIPSDSP_SPLIT64_32(num, a, b) \
+ do { \
+ a = ((num) >> 32) & MIPSDSP_LLO; \
+ b = (num) & MIPSDSP_LLO; \
+ } while (0)
+
+#define MIPSDSP_RETURN64_16(a, b, c, d) (((uint64_t)(a) << 48) | \
+ ((uint64_t)(b) << 32) | \
+ ((uint64_t)(c) << 16) | \
+ (uint64_t)(d))
+#define MIPSDSP_RETURN64_32(a, b) (((uint64_t)(a) << 32) | (uint64_t)(b))
+#endif
+
+/** DSP Arithmetic Sub-class insns **/
+#define MIPSDSP32_UNOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \
+{ \
+ DSP32Value dt; \
+ unsigned int i; \
+ \
+ dt.sw[0] = rt; \
+ \
+ for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \
+ dt.element[i] = mipsdsp_##func(dt.element[i], env); \
+ } \
+ \
+ return (target_long)dt.sw[0]; \
+}
+MIPSDSP32_UNOP_ENV(absq_s_ph, sat_abs16, sh)
+MIPSDSP32_UNOP_ENV(absq_s_qb, sat_abs8, sb)
+MIPSDSP32_UNOP_ENV(absq_s_w, sat_abs32, sw)
+#undef MIPSDSP32_UNOP_ENV
+
+#if defined(TARGET_MIPS64)
+#define MIPSDSP64_UNOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \
+{ \
+ DSP64Value dt; \
+ unsigned int i; \
+ \
+ dt.sl[0] = rt; \
+ \
+ for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \
+ dt.element[i] = mipsdsp_##func(dt.element[i], env); \
+ } \
+ \
+ return dt.sl[0]; \
+}
+MIPSDSP64_UNOP_ENV(absq_s_ob, sat_abs8, sb)
+MIPSDSP64_UNOP_ENV(absq_s_qh, sat_abs16, sh)
+MIPSDSP64_UNOP_ENV(absq_s_pw, sat_abs32, sw)
+#undef MIPSDSP64_UNOP_ENV
+#endif
+
+#define MIPSDSP32_BINOP(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt) \
+{ \
+ DSP32Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sw[0] = rs; \
+ dt.sw[0] = rt; \
+ \
+ for (i = 0; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \
+ } \
+ \
+ return (target_long)ds.sw[0]; \
+}
+MIPSDSP32_BINOP(addqh_ph, rshift1_add_q16, sh);
+MIPSDSP32_BINOP(addqh_r_ph, rrshift1_add_q16, sh);
+MIPSDSP32_BINOP(addqh_r_w, rrshift1_add_q32, sw);
+MIPSDSP32_BINOP(addqh_w, rshift1_add_q32, sw);
+MIPSDSP32_BINOP(adduh_qb, rshift1_add_u8, ub);
+MIPSDSP32_BINOP(adduh_r_qb, rrshift1_add_u8, ub);
+MIPSDSP32_BINOP(subqh_ph, rshift1_sub_q16, sh);
+MIPSDSP32_BINOP(subqh_r_ph, rrshift1_sub_q16, sh);
+MIPSDSP32_BINOP(subqh_r_w, rrshift1_sub_q32, sw);
+MIPSDSP32_BINOP(subqh_w, rshift1_sub_q32, sw);
+#undef MIPSDSP32_BINOP
+
+#define MIPSDSP32_BINOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ DSP32Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sw[0] = rs; \
+ dt.sw[0] = rt; \
+ \
+ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \
+ } \
+ \
+ return (target_long)ds.sw[0]; \
+}
+MIPSDSP32_BINOP_ENV(addq_ph, add_i16, sh)
+MIPSDSP32_BINOP_ENV(addq_s_ph, sat_add_i16, sh)
+MIPSDSP32_BINOP_ENV(addq_s_w, sat_add_i32, sw);
+MIPSDSP32_BINOP_ENV(addu_ph, add_u16, sh)
+MIPSDSP32_BINOP_ENV(addu_qb, add_u8, ub);
+MIPSDSP32_BINOP_ENV(addu_s_ph, sat_add_u16, sh)
+MIPSDSP32_BINOP_ENV(addu_s_qb, sat_add_u8, ub);
+MIPSDSP32_BINOP_ENV(subq_ph, sub_i16, sh);
+MIPSDSP32_BINOP_ENV(subq_s_ph, sat16_sub, sh);
+MIPSDSP32_BINOP_ENV(subq_s_w, sat32_sub, sw);
+MIPSDSP32_BINOP_ENV(subu_ph, sub_u16_u16, sh);
+MIPSDSP32_BINOP_ENV(subu_qb, sub_u8, ub);
+MIPSDSP32_BINOP_ENV(subu_s_ph, satu16_sub_u16_u16, sh);
+MIPSDSP32_BINOP_ENV(subu_s_qb, satu8_sub, ub);
+#undef MIPSDSP32_BINOP_ENV
+
+#ifdef TARGET_MIPS64
+#define MIPSDSP64_BINOP(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt) \
+{ \
+ DSP64Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sl[0] = rs; \
+ dt.sl[0] = rt; \
+ \
+ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \
+ } \
+ \
+ return ds.sl[0]; \
+}
+MIPSDSP64_BINOP(adduh_ob, rshift1_add_u8, ub);
+MIPSDSP64_BINOP(adduh_r_ob, rrshift1_add_u8, ub);
+MIPSDSP64_BINOP(subuh_ob, rshift1_sub_u8, ub);
+MIPSDSP64_BINOP(subuh_r_ob, rrshift1_sub_u8, ub);
+#undef MIPSDSP64_BINOP
+
+#define MIPSDSP64_BINOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ DSP64Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sl[0] = rs; \
+ dt.sl[0] = rt; \
+ \
+ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \
+ } \
+ \
+ return ds.sl[0]; \
+}
+MIPSDSP64_BINOP_ENV(addq_pw, add_i32, sw);
+MIPSDSP64_BINOP_ENV(addq_qh, add_i16, sh);
+MIPSDSP64_BINOP_ENV(addq_s_pw, sat_add_i32, sw);
+MIPSDSP64_BINOP_ENV(addq_s_qh, sat_add_i16, sh);
+MIPSDSP64_BINOP_ENV(addu_ob, add_u8, uh);
+MIPSDSP64_BINOP_ENV(addu_qh, add_u16, uh);
+MIPSDSP64_BINOP_ENV(addu_s_ob, sat_add_u8, uh);
+MIPSDSP64_BINOP_ENV(addu_s_qh, sat_add_u16, uh);
+MIPSDSP64_BINOP_ENV(subq_pw, sub32, sw);
+MIPSDSP64_BINOP_ENV(subq_qh, sub_i16, sh);
+MIPSDSP64_BINOP_ENV(subq_s_pw, sat32_sub, sw);
+MIPSDSP64_BINOP_ENV(subq_s_qh, sat16_sub, sh);
+MIPSDSP64_BINOP_ENV(subu_ob, sub_u8, uh);
+MIPSDSP64_BINOP_ENV(subu_qh, sub_u16_u16, uh);
+MIPSDSP64_BINOP_ENV(subu_s_ob, satu8_sub, uh);
+MIPSDSP64_BINOP_ENV(subu_s_qh, satu16_sub_u16_u16, uh);
+#undef MIPSDSP64_BINOP_ENV
+
+#endif
+
+#define SUBUH_QB(name, var) \
+target_ulong helper_##name##_qb(target_ulong rs, target_ulong rt) \
+{ \
+ uint8_t rs3, rs2, rs1, rs0; \
+ uint8_t rt3, rt2, rt1, rt0; \
+ uint8_t tempD, tempC, tempB, tempA; \
+ \
+ MIPSDSP_SPLIT32_8(rs, rs3, rs2, rs1, rs0); \
+ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \
+ \
+ tempD = ((uint16_t)rs3 - (uint16_t)rt3 + var) >> 1; \
+ tempC = ((uint16_t)rs2 - (uint16_t)rt2 + var) >> 1; \
+ tempB = ((uint16_t)rs1 - (uint16_t)rt1 + var) >> 1; \
+ tempA = ((uint16_t)rs0 - (uint16_t)rt0 + var) >> 1; \
+ \
+ return ((uint32_t)tempD << 24) | ((uint32_t)tempC << 16) | \
+ ((uint32_t)tempB << 8) | ((uint32_t)tempA); \
+}
+
+SUBUH_QB(subuh, 0);
+SUBUH_QB(subuh_r, 1);
+
+#undef SUBUH_QB
+
+target_ulong helper_addsc(target_ulong rs, target_ulong rt, CPUMIPSState *env)
+{
+ uint64_t temp, tempRs, tempRt;
+ bool flag;
+
+ tempRs = (uint64_t)rs & MIPSDSP_LLO;
+ tempRt = (uint64_t)rt & MIPSDSP_LLO;
+
+ temp = tempRs + tempRt;
+ flag = (temp & 0x0100000000ull) >> 32;
+ set_DSPControl_carryflag(flag, env);
+
+ return (target_long)(int32_t)(temp & MIPSDSP_LLO);
+}
+
+target_ulong helper_addwc(target_ulong rs, target_ulong rt, CPUMIPSState *env)
+{
+ uint32_t rd;
+ int32_t temp32, temp31;
+ int64_t tempL;
+
+ tempL = (int64_t)(int32_t)rs + (int64_t)(int32_t)rt +
+ get_DSPControl_carryflag(env);
+ temp31 = (tempL >> 31) & 0x01;
+ temp32 = (tempL >> 32) & 0x01;
+
+ if (temp31 != temp32) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ rd = tempL & MIPSDSP_LLO;
+
+ return (target_long)(int32_t)rd;
+}
+
+target_ulong helper_modsub(target_ulong rs, target_ulong rt)
+{
+ int32_t decr;
+ uint16_t lastindex;
+ target_ulong rd;
+
+ decr = rt & MIPSDSP_Q0;
+ lastindex = (rt >> 8) & MIPSDSP_LO;
+
+ if ((rs & MIPSDSP_LLO) == 0x00000000) {
+ rd = (target_ulong)lastindex;
+ } else {
+ rd = rs - decr;
+ }
+
+ return rd;
+}
+
+target_ulong helper_raddu_w_qb(target_ulong rs)
+{
+ target_ulong ret = 0;
+ DSP32Value ds;
+ unsigned int i;
+
+ ds.uw[0] = rs;
+ for (i = 0; i < 4; i++) {
+ ret += ds.ub[i];
+ }
+ return ret;
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_raddu_l_ob(target_ulong rs)
+{
+ target_ulong ret = 0;
+ DSP64Value ds;
+ unsigned int i;
+
+ ds.ul[0] = rs;
+ for (i = 0; i < 8; i++) {
+ ret += ds.ub[i];
+ }
+ return ret;
+}
+#endif
+
+#define PRECR_QB_PH(name, a, b)\
+target_ulong helper_##name##_qb_ph(target_ulong rs, target_ulong rt) \
+{ \
+ uint8_t tempD, tempC, tempB, tempA; \
+ \
+ tempD = (rs >> a) & MIPSDSP_Q0; \
+ tempC = (rs >> b) & MIPSDSP_Q0; \
+ tempB = (rt >> a) & MIPSDSP_Q0; \
+ tempA = (rt >> b) & MIPSDSP_Q0; \
+ \
+ return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA); \
+}
+
+PRECR_QB_PH(precr, 16, 0);
+PRECR_QB_PH(precrq, 24, 8);
+
+#undef PRECR_QB_OH
+
+target_ulong helper_precr_sra_ph_w(uint32_t sa, target_ulong rs,
+ target_ulong rt)
+{
+ uint16_t tempB, tempA;
+
+ tempB = ((int32_t)rt >> sa) & MIPSDSP_LO;
+ tempA = ((int32_t)rs >> sa) & MIPSDSP_LO;
+
+ return MIPSDSP_RETURN32_16(tempB, tempA);
+}
+
+target_ulong helper_precr_sra_r_ph_w(uint32_t sa,
+ target_ulong rs, target_ulong rt)
+{
+ uint64_t tempB, tempA;
+
+ /* If sa = 0, then (sa - 1) = -1 will case shift error, so we need else. */
+ if (sa == 0) {
+ tempB = (rt & MIPSDSP_LO) << 1;
+ tempA = (rs & MIPSDSP_LO) << 1;
+ } else {
+ tempB = ((int32_t)rt >> (sa - 1)) + 1;
+ tempA = ((int32_t)rs >> (sa - 1)) + 1;
+ }
+ rt = (((tempB >> 1) & MIPSDSP_LO) << 16) | ((tempA >> 1) & MIPSDSP_LO);
+
+ return (target_long)(int32_t)rt;
+}
+
+target_ulong helper_precrq_ph_w(target_ulong rs, target_ulong rt)
+{
+ uint16_t tempB, tempA;
+
+ tempB = (rs & MIPSDSP_HI) >> 16;
+ tempA = (rt & MIPSDSP_HI) >> 16;
+
+ return MIPSDSP_RETURN32_16(tempB, tempA);
+}
+
+target_ulong helper_precrq_rs_ph_w(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ uint16_t tempB, tempA;
+
+ tempB = mipsdsp_trunc16_sat16_round(rs, env);
+ tempA = mipsdsp_trunc16_sat16_round(rt, env);
+
+ return MIPSDSP_RETURN32_16(tempB, tempA);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_precr_ob_qh(target_ulong rs, target_ulong rt)
+{
+ uint8_t rs6, rs4, rs2, rs0;
+ uint8_t rt6, rt4, rt2, rt0;
+ uint64_t temp;
+
+ rs6 = (rs >> 48) & MIPSDSP_Q0;
+ rs4 = (rs >> 32) & MIPSDSP_Q0;
+ rs2 = (rs >> 16) & MIPSDSP_Q0;
+ rs0 = rs & MIPSDSP_Q0;
+ rt6 = (rt >> 48) & MIPSDSP_Q0;
+ rt4 = (rt >> 32) & MIPSDSP_Q0;
+ rt2 = (rt >> 16) & MIPSDSP_Q0;
+ rt0 = rt & MIPSDSP_Q0;
+
+ temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) |
+ ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) |
+ ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) |
+ ((uint64_t)rt2 << 8) | (uint64_t)rt0;
+
+ return temp;
+}
+
+
+/*
+ * In case sa == 0, use rt2, rt0, rs2, rs0.
+ * In case sa != 0, use rt3, rt1, rs3, rs1.
+ */
+#define PRECR_QH_PW(name, var) \
+target_ulong helper_precr_##name##_qh_pw(target_ulong rs, \
+ target_ulong rt, \
+ uint32_t sa) \
+{ \
+ uint16_t rs3, rs2, rs1, rs0; \
+ uint16_t rt3, rt2, rt1, rt0; \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ if (sa == 0) { \
+ tempD = rt2 << var; \
+ tempC = rt0 << var; \
+ tempB = rs2 << var; \
+ tempA = rs0 << var; \
+ } else { \
+ tempD = (((int16_t)rt3 >> sa) + var) >> var; \
+ tempC = (((int16_t)rt1 >> sa) + var) >> var; \
+ tempB = (((int16_t)rs3 >> sa) + var) >> var; \
+ tempA = (((int16_t)rs1 >> sa) + var) >> var; \
+ } \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+PRECR_QH_PW(sra, 0);
+PRECR_QH_PW(sra_r, 1);
+
+#undef PRECR_QH_PW
+
+target_ulong helper_precrq_ob_qh(target_ulong rs, target_ulong rt)
+{
+ uint8_t rs6, rs4, rs2, rs0;
+ uint8_t rt6, rt4, rt2, rt0;
+ uint64_t temp;
+
+ rs6 = (rs >> 56) & MIPSDSP_Q0;
+ rs4 = (rs >> 40) & MIPSDSP_Q0;
+ rs2 = (rs >> 24) & MIPSDSP_Q0;
+ rs0 = (rs >> 8) & MIPSDSP_Q0;
+ rt6 = (rt >> 56) & MIPSDSP_Q0;
+ rt4 = (rt >> 40) & MIPSDSP_Q0;
+ rt2 = (rt >> 24) & MIPSDSP_Q0;
+ rt0 = (rt >> 8) & MIPSDSP_Q0;
+
+ temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) |
+ ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) |
+ ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) |
+ ((uint64_t)rt2 << 8) | (uint64_t)rt0;
+
+ return temp;
+}
+
+target_ulong helper_precrq_qh_pw(target_ulong rs, target_ulong rt)
+{
+ uint16_t tempD, tempC, tempB, tempA;
+
+ tempD = (rs >> 48) & MIPSDSP_LO;
+ tempC = (rs >> 16) & MIPSDSP_LO;
+ tempB = (rt >> 48) & MIPSDSP_LO;
+ tempA = (rt >> 16) & MIPSDSP_LO;
+
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA);
+}
+
+target_ulong helper_precrq_rs_qh_pw(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ uint32_t rs2, rs0;
+ uint32_t rt2, rt0;
+ uint16_t tempD, tempC, tempB, tempA;
+
+ rs2 = (rs >> 32) & MIPSDSP_LLO;
+ rs0 = rs & MIPSDSP_LLO;
+ rt2 = (rt >> 32) & MIPSDSP_LLO;
+ rt0 = rt & MIPSDSP_LLO;
+
+ tempD = mipsdsp_trunc16_sat16_round(rs2, env);
+ tempC = mipsdsp_trunc16_sat16_round(rs0, env);
+ tempB = mipsdsp_trunc16_sat16_round(rt2, env);
+ tempA = mipsdsp_trunc16_sat16_round(rt0, env);
+
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA);
+}
+
+target_ulong helper_precrq_pw_l(target_ulong rs, target_ulong rt)
+{
+ uint32_t tempB, tempA;
+
+ tempB = (rs >> 32) & MIPSDSP_LLO;
+ tempA = (rt >> 32) & MIPSDSP_LLO;
+
+ return MIPSDSP_RETURN64_32(tempB, tempA);
+}
+#endif
+
+target_ulong helper_precrqu_s_qb_ph(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ uint8_t tempD, tempC, tempB, tempA;
+ uint16_t rsh, rsl, rth, rtl;
+
+ rsh = (rs & MIPSDSP_HI) >> 16;
+ rsl = rs & MIPSDSP_LO;
+ rth = (rt & MIPSDSP_HI) >> 16;
+ rtl = rt & MIPSDSP_LO;
+
+ tempD = mipsdsp_sat8_reduce_precision(rsh, env);
+ tempC = mipsdsp_sat8_reduce_precision(rsl, env);
+ tempB = mipsdsp_sat8_reduce_precision(rth, env);
+ tempA = mipsdsp_sat8_reduce_precision(rtl, env);
+
+ return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_precrqu_s_ob_qh(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ int i;
+ uint16_t rs3, rs2, rs1, rs0;
+ uint16_t rt3, rt2, rt1, rt0;
+ uint8_t temp[8];
+ uint64_t result;
+
+ result = 0;
+
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0);
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0);
+
+ temp[7] = mipsdsp_sat8_reduce_precision(rs3, env);
+ temp[6] = mipsdsp_sat8_reduce_precision(rs2, env);
+ temp[5] = mipsdsp_sat8_reduce_precision(rs1, env);
+ temp[4] = mipsdsp_sat8_reduce_precision(rs0, env);
+ temp[3] = mipsdsp_sat8_reduce_precision(rt3, env);
+ temp[2] = mipsdsp_sat8_reduce_precision(rt2, env);
+ temp[1] = mipsdsp_sat8_reduce_precision(rt1, env);
+ temp[0] = mipsdsp_sat8_reduce_precision(rt0, env);
+
+ for (i = 0; i < 8; i++) {
+ result |= (uint64_t)temp[i] << (8 * i);
+ }
+
+ return result;
+}
+
+#define PRECEQ_PW(name, a, b) \
+target_ulong helper_preceq_pw_##name(target_ulong rt) \
+{ \
+ uint16_t tempB, tempA; \
+ uint32_t tempBI, tempAI; \
+ \
+ tempB = (rt >> a) & MIPSDSP_LO; \
+ tempA = (rt >> b) & MIPSDSP_LO; \
+ \
+ tempBI = (uint32_t)tempB << 16; \
+ tempAI = (uint32_t)tempA << 16; \
+ \
+ return MIPSDSP_RETURN64_32(tempBI, tempAI); \
+}
+
+PRECEQ_PW(qhl, 48, 32);
+PRECEQ_PW(qhr, 16, 0);
+PRECEQ_PW(qhla, 48, 16);
+PRECEQ_PW(qhra, 32, 0);
+
+#undef PRECEQ_PW
+
+#endif
+
+#define PRECEQU_PH(name, a, b) \
+target_ulong helper_precequ_ph_##name(target_ulong rt) \
+{ \
+ uint16_t tempB, tempA; \
+ \
+ tempB = (rt >> a) & MIPSDSP_Q0; \
+ tempA = (rt >> b) & MIPSDSP_Q0; \
+ \
+ tempB = tempB << 7; \
+ tempA = tempA << 7; \
+ \
+ return MIPSDSP_RETURN32_16(tempB, tempA); \
+}
+
+PRECEQU_PH(qbl, 24, 16);
+PRECEQU_PH(qbr, 8, 0);
+PRECEQU_PH(qbla, 24, 8);
+PRECEQU_PH(qbra, 16, 0);
+
+#undef PRECEQU_PH
+
+#if defined(TARGET_MIPS64)
+#define PRECEQU_QH(name, a, b, c, d) \
+target_ulong helper_precequ_qh_##name(target_ulong rt) \
+{ \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ tempD = (rt >> a) & MIPSDSP_Q0; \
+ tempC = (rt >> b) & MIPSDSP_Q0; \
+ tempB = (rt >> c) & MIPSDSP_Q0; \
+ tempA = (rt >> d) & MIPSDSP_Q0; \
+ \
+ tempD = tempD << 7; \
+ tempC = tempC << 7; \
+ tempB = tempB << 7; \
+ tempA = tempA << 7; \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+PRECEQU_QH(obl, 56, 48, 40, 32);
+PRECEQU_QH(obr, 24, 16, 8, 0);
+PRECEQU_QH(obla, 56, 40, 24, 8);
+PRECEQU_QH(obra, 48, 32, 16, 0);
+
+#undef PRECEQU_QH
+
+#endif
+
+#define PRECEU_PH(name, a, b) \
+target_ulong helper_preceu_ph_##name(target_ulong rt) \
+{ \
+ uint16_t tempB, tempA; \
+ \
+ tempB = (rt >> a) & MIPSDSP_Q0; \
+ tempA = (rt >> b) & MIPSDSP_Q0; \
+ \
+ return MIPSDSP_RETURN32_16(tempB, tempA); \
+}
+
+PRECEU_PH(qbl, 24, 16);
+PRECEU_PH(qbr, 8, 0);
+PRECEU_PH(qbla, 24, 8);
+PRECEU_PH(qbra, 16, 0);
+
+#undef PRECEU_PH
+
+#if defined(TARGET_MIPS64)
+#define PRECEU_QH(name, a, b, c, d) \
+target_ulong helper_preceu_qh_##name(target_ulong rt) \
+{ \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ tempD = (rt >> a) & MIPSDSP_Q0; \
+ tempC = (rt >> b) & MIPSDSP_Q0; \
+ tempB = (rt >> c) & MIPSDSP_Q0; \
+ tempA = (rt >> d) & MIPSDSP_Q0; \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+PRECEU_QH(obl, 56, 48, 40, 32);
+PRECEU_QH(obr, 24, 16, 8, 0);
+PRECEU_QH(obla, 56, 40, 24, 8);
+PRECEU_QH(obra, 48, 32, 16, 0);
+
+#undef PRECEU_QH
+
+#endif
+
+/** DSP GPR-Based Shift Sub-class insns **/
+#define SHIFT_QB(name, func) \
+target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt) \
+{ \
+ uint8_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x07; \
+ \
+ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa); \
+ rt2 = mipsdsp_##func(rt2, sa); \
+ rt1 = mipsdsp_##func(rt1, sa); \
+ rt0 = mipsdsp_##func(rt0, sa); \
+ \
+ return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \
+}
+
+#define SHIFT_QB_ENV(name, func) \
+target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt,\
+ CPUMIPSState *env) \
+{ \
+ uint8_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x07; \
+ \
+ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa, env); \
+ rt2 = mipsdsp_##func(rt2, sa, env); \
+ rt1 = mipsdsp_##func(rt1, sa, env); \
+ rt0 = mipsdsp_##func(rt0, sa, env); \
+ \
+ return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \
+}
+
+SHIFT_QB_ENV(shll, lshift8);
+SHIFT_QB(shrl, rshift_u8);
+
+SHIFT_QB(shra, rashift8);
+SHIFT_QB(shra_r, rnd8_rashift);
+
+#undef SHIFT_QB
+#undef SHIFT_QB_ENV
+
+#if defined(TARGET_MIPS64)
+#define SHIFT_OB(name, func) \
+target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa) \
+{ \
+ int i; \
+ uint8_t rt_t[8]; \
+ uint64_t temp; \
+ \
+ sa = sa & 0x07; \
+ temp = 0; \
+ \
+ for (i = 0; i < 8; i++) { \
+ rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \
+ rt_t[i] = mipsdsp_##func(rt_t[i], sa); \
+ temp |= (uint64_t)rt_t[i] << (8 * i); \
+ } \
+ \
+ return temp; \
+}
+
+#define SHIFT_OB_ENV(name, func) \
+target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa, \
+ CPUMIPSState *env) \
+{ \
+ int i; \
+ uint8_t rt_t[8]; \
+ uint64_t temp; \
+ \
+ sa = sa & 0x07; \
+ temp = 0; \
+ \
+ for (i = 0; i < 8; i++) { \
+ rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \
+ rt_t[i] = mipsdsp_##func(rt_t[i], sa, env); \
+ temp |= (uint64_t)rt_t[i] << (8 * i); \
+ } \
+ \
+ return temp; \
+}
+
+SHIFT_OB_ENV(shll, lshift8);
+SHIFT_OB(shrl, rshift_u8);
+
+SHIFT_OB(shra, rashift8);
+SHIFT_OB(shra_r, rnd8_rashift);
+
+#undef SHIFT_OB
+#undef SHIFT_OB_ENV
+
+#endif
+
+#define SHIFT_PH(name, func) \
+target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rth, rtl; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ rth = mipsdsp_##func(rth, sa, env); \
+ rtl = mipsdsp_##func(rtl, sa, env); \
+ \
+ return MIPSDSP_RETURN32_16(rth, rtl); \
+}
+
+SHIFT_PH(shll, lshift16);
+SHIFT_PH(shll_s, sat16_lshift);
+
+#undef SHIFT_PH
+
+#if defined(TARGET_MIPS64)
+#define SHIFT_QH(name, func) \
+target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa) \
+{ \
+ uint16_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa); \
+ rt2 = mipsdsp_##func(rt2, sa); \
+ rt1 = mipsdsp_##func(rt1, sa); \
+ rt0 = mipsdsp_##func(rt0, sa); \
+ \
+ return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \
+}
+
+#define SHIFT_QH_ENV(name, func) \
+target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa, env); \
+ rt2 = mipsdsp_##func(rt2, sa, env); \
+ rt1 = mipsdsp_##func(rt1, sa, env); \
+ rt0 = mipsdsp_##func(rt0, sa, env); \
+ \
+ return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \
+}
+
+SHIFT_QH_ENV(shll, lshift16);
+SHIFT_QH_ENV(shll_s, sat16_lshift);
+
+SHIFT_QH(shrl, rshift_u16);
+SHIFT_QH(shra, rashift16);
+SHIFT_QH(shra_r, rnd16_rashift);
+
+#undef SHIFT_QH
+#undef SHIFT_QH_ENV
+
+#endif
+
+#define SHIFT_W(name, func) \
+target_ulong helper_##name##_w(target_ulong sa, target_ulong rt) \
+{ \
+ uint32_t temp; \
+ \
+ sa = sa & 0x1F; \
+ temp = mipsdsp_##func(rt, sa); \
+ \
+ return (target_long)(int32_t)temp; \
+}
+
+#define SHIFT_W_ENV(name, func) \
+target_ulong helper_##name##_w(target_ulong sa, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint32_t temp; \
+ \
+ sa = sa & 0x1F; \
+ temp = mipsdsp_##func(rt, sa, env); \
+ \
+ return (target_long)(int32_t)temp; \
+}
+
+SHIFT_W_ENV(shll_s, sat32_lshift);
+SHIFT_W(shra_r, rnd32_rashift);
+
+#undef SHIFT_W
+#undef SHIFT_W_ENV
+
+#if defined(TARGET_MIPS64)
+#define SHIFT_PW(name, func) \
+target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa) \
+{ \
+ uint32_t rt1, rt0; \
+ \
+ sa = sa & 0x1F; \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ rt1 = mipsdsp_##func(rt1, sa); \
+ rt0 = mipsdsp_##func(rt0, sa); \
+ \
+ return MIPSDSP_RETURN64_32(rt1, rt0); \
+}
+
+#define SHIFT_PW_ENV(name, func) \
+target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa, \
+ CPUMIPSState *env) \
+{ \
+ uint32_t rt1, rt0; \
+ \
+ sa = sa & 0x1F; \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ rt1 = mipsdsp_##func(rt1, sa, env); \
+ rt0 = mipsdsp_##func(rt0, sa, env); \
+ \
+ return MIPSDSP_RETURN64_32(rt1, rt0); \
+}
+
+SHIFT_PW_ENV(shll, lshift32);
+SHIFT_PW_ENV(shll_s, sat32_lshift);
+
+SHIFT_PW(shra, rashift32);
+SHIFT_PW(shra_r, rnd32_rashift);
+
+#undef SHIFT_PW
+#undef SHIFT_PW_ENV
+
+#endif
+
+#define SHIFT_PH(name, func) \
+target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt) \
+{ \
+ uint16_t rth, rtl; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ rth = mipsdsp_##func(rth, sa); \
+ rtl = mipsdsp_##func(rtl, sa); \
+ \
+ return MIPSDSP_RETURN32_16(rth, rtl); \
+}
+
+SHIFT_PH(shrl, rshift_u16);
+SHIFT_PH(shra, rashift16);
+SHIFT_PH(shra_r, rnd16_rashift);
+
+#undef SHIFT_PH
+
+/** DSP Multiply Sub-class insns **/
+/*
+ * Return value made up by two 16bits value.
+ * FIXME give the macro a better name.
+ */
+#define MUL_RETURN32_16_PH(name, func, \
+ rsmov1, rsmov2, rsfilter, \
+ rtmov1, rtmov2, rtfilter) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rsB, rsA, rtB, rtA; \
+ \
+ rsB = (rs >> rsmov1) & rsfilter; \
+ rsA = (rs >> rsmov2) & rsfilter; \
+ rtB = (rt >> rtmov1) & rtfilter; \
+ rtA = (rt >> rtmov2) & rtfilter; \
+ \
+ rsB = mipsdsp_##func(rsB, rtB, env); \
+ rsA = mipsdsp_##func(rsA, rtA, env); \
+ \
+ return MIPSDSP_RETURN32_16(rsB, rsA); \
+}
+
+MUL_RETURN32_16_PH(muleu_s_ph_qbl, mul_u8_u16, \
+ 24, 16, MIPSDSP_Q0, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(muleu_s_ph_qbr, mul_u8_u16, \
+ 8, 0, MIPSDSP_Q0, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mulq_rs_ph, rndq15_mul_q15_q15, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mul_ph, mul_i16_i16, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mul_s_ph, sat16_mul_i16_i16, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mulq_s_ph, sat16_mul_q15_q15, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+
+#undef MUL_RETURN32_16_PH
+
+#define MUL_RETURN32_32_ph(name, func, movbits) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rth; \
+ int32_t temp; \
+ \
+ rsh = (rs >> movbits) & MIPSDSP_LO; \
+ rth = (rt >> movbits) & MIPSDSP_LO; \
+ temp = mipsdsp_##func(rsh, rth, env); \
+ \
+ return (target_long)(int32_t)temp; \
+}
+
+MUL_RETURN32_32_ph(muleq_s_w_phl, mul_q15_q15_overflowflag21, 16);
+MUL_RETURN32_32_ph(muleq_s_w_phr, mul_q15_q15_overflowflag21, 0);
+
+#undef MUL_RETURN32_32_ph
+
+#define MUL_VOID_PH(name, use_ac_env) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rsl, rth, rtl; \
+ int32_t tempB, tempA; \
+ int64_t acc, dotp; \
+ \
+ MIPSDSP_SPLIT32_16(rs, rsh, rsl); \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ if (use_ac_env == 1) { \
+ tempB = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rsl, rtl, env); \
+ } else { \
+ tempB = mipsdsp_mul_u16_u16(rsh, rth); \
+ tempA = mipsdsp_mul_u16_u16(rsl, rtl); \
+ } \
+ \
+ dotp = (int64_t)tempB - (int64_t)tempA; \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ dotp = dotp + acc; \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((dotp & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)(dotp & MIPSDSP_LLO); \
+}
+
+MUL_VOID_PH(mulsaq_s_w_ph, 1);
+MUL_VOID_PH(mulsa_w_ph, 0);
+
+#undef MUL_VOID_PH
+
+#if defined(TARGET_MIPS64)
+#define MUL_RETURN64_16_QH(name, func, \
+ rsmov1, rsmov2, rsmov3, rsmov4, rsfilter, \
+ rtmov1, rtmov2, rtmov3, rtmov4, rtfilter) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rs3, rs2, rs1, rs0; \
+ uint16_t rt3, rt2, rt1, rt0; \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ rs3 = (rs >> rsmov1) & rsfilter; \
+ rs2 = (rs >> rsmov2) & rsfilter; \
+ rs1 = (rs >> rsmov3) & rsfilter; \
+ rs0 = (rs >> rsmov4) & rsfilter; \
+ rt3 = (rt >> rtmov1) & rtfilter; \
+ rt2 = (rt >> rtmov2) & rtfilter; \
+ rt1 = (rt >> rtmov3) & rtfilter; \
+ rt0 = (rt >> rtmov4) & rtfilter; \
+ \
+ tempD = mipsdsp_##func(rs3, rt3, env); \
+ tempC = mipsdsp_##func(rs2, rt2, env); \
+ tempB = mipsdsp_##func(rs1, rt1, env); \
+ tempA = mipsdsp_##func(rs0, rt0, env); \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+MUL_RETURN64_16_QH(muleu_s_qh_obl, mul_u8_u16, \
+ 56, 48, 40, 32, MIPSDSP_Q0, \
+ 48, 32, 16, 0, MIPSDSP_LO);
+MUL_RETURN64_16_QH(muleu_s_qh_obr, mul_u8_u16, \
+ 24, 16, 8, 0, MIPSDSP_Q0, \
+ 48, 32, 16, 0, MIPSDSP_LO);
+MUL_RETURN64_16_QH(mulq_rs_qh, rndq15_mul_q15_q15, \
+ 48, 32, 16, 0, MIPSDSP_LO, \
+ 48, 32, 16, 0, MIPSDSP_LO);
+
+#undef MUL_RETURN64_16_QH
+
+#define MUL_RETURN64_32_QH(name, \
+ rsmov1, rsmov2, \
+ rtmov1, rtmov2) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rsB, rsA; \
+ uint16_t rtB, rtA; \
+ uint32_t tempB, tempA; \
+ \
+ rsB = (rs >> rsmov1) & MIPSDSP_LO; \
+ rsA = (rs >> rsmov2) & MIPSDSP_LO; \
+ rtB = (rt >> rtmov1) & MIPSDSP_LO; \
+ rtA = (rt >> rtmov2) & MIPSDSP_LO; \
+ \
+ tempB = mipsdsp_mul_q15_q15(5, rsB, rtB, env); \
+ tempA = mipsdsp_mul_q15_q15(5, rsA, rtA, env); \
+ \
+ return ((uint64_t)tempB << 32) | (uint64_t)tempA; \
+}
+
+MUL_RETURN64_32_QH(muleq_s_pw_qhl, 48, 32, 48, 32);
+MUL_RETURN64_32_QH(muleq_s_pw_qhr, 16, 0, 16, 0);
+
+#undef MUL_RETURN64_32_QH
+
+void helper_mulsaq_s_w_qh(target_ulong rs, target_ulong rt, uint32_t ac,
+ CPUMIPSState *env)
+{
+ int16_t rs3, rs2, rs1, rs0;
+ int16_t rt3, rt2, rt1, rt0;
+ int32_t tempD, tempC, tempB, tempA;
+ int64_t acc[2];
+ int64_t temp[2];
+ int64_t temp_sum;
+
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0);
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0);
+
+ tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env);
+ tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env);
+ tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env);
+ tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env);
+
+ temp[0] = ((int32_t)tempD - (int32_t)tempC) +
+ ((int32_t)tempB - (int32_t)tempA);
+ temp[0] = (int64_t)(temp[0] << 30) >> 30;
+ if (((temp[0] >> 33) & 0x01) == 0) {
+ temp[1] = 0x00;
+ } else {
+ temp[1] = ~0ull;
+ }
+
+ acc[0] = env->active_tc.LO[ac];
+ acc[1] = env->active_tc.HI[ac];
+
+ temp_sum = acc[0] + temp[0];
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) &&
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) {
+ acc[1] += 1;
+ }
+ acc[0] = temp_sum;
+ acc[1] += temp[1];
+
+ env->active_tc.HI[ac] = acc[1];
+ env->active_tc.LO[ac] = acc[0];
+}
+#endif
+
+#define DP_QB(name, func, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint8_t rs3, rs2; \
+ uint8_t rt3, rt2; \
+ uint16_t tempB, tempA; \
+ uint64_t tempC, dotp; \
+ \
+ rs3 = (rs >> rsmov1) & MIPSDSP_Q0; \
+ rs2 = (rs >> rsmov2) & MIPSDSP_Q0; \
+ rt3 = (rt >> rtmov1) & MIPSDSP_Q0; \
+ rt2 = (rt >> rtmov2) & MIPSDSP_Q0; \
+ tempB = mipsdsp_##func(rs3, rt3); \
+ tempA = mipsdsp_##func(rs2, rt2); \
+ dotp = (int64_t)tempB + (int64_t)tempA; \
+ if (is_add) { \
+ tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \
+ + dotp; \
+ } else { \
+ tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \
+ - dotp; \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((tempC & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)(tempC & MIPSDSP_LLO); \
+}
+
+DP_QB(dpau_h_qbl, mul_u8_u8, 1, 24, 16, 24, 16);
+DP_QB(dpau_h_qbr, mul_u8_u8, 1, 8, 0, 8, 0);
+DP_QB(dpsu_h_qbl, mul_u8_u8, 0, 24, 16, 24, 16);
+DP_QB(dpsu_h_qbr, mul_u8_u8, 0, 8, 0, 8, 0);
+
+#undef DP_QB
+
+#if defined(TARGET_MIPS64)
+#define DP_OB(name, add_sub, \
+ rsmov1, rsmov2, rsmov3, rsmov4, \
+ rtmov1, rtmov2, rtmov3, rtmov4) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ uint8_t rsD, rsC, rsB, rsA; \
+ uint8_t rtD, rtC, rtB, rtA; \
+ uint16_t tempD, tempC, tempB, tempA; \
+ uint64_t temp[2]; \
+ uint64_t acc[2]; \
+ uint64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ rsD = (rs >> rsmov1) & MIPSDSP_Q0; \
+ rsC = (rs >> rsmov2) & MIPSDSP_Q0; \
+ rsB = (rs >> rsmov3) & MIPSDSP_Q0; \
+ rsA = (rs >> rsmov4) & MIPSDSP_Q0; \
+ rtD = (rt >> rtmov1) & MIPSDSP_Q0; \
+ rtC = (rt >> rtmov2) & MIPSDSP_Q0; \
+ rtB = (rt >> rtmov3) & MIPSDSP_Q0; \
+ rtA = (rt >> rtmov4) & MIPSDSP_Q0; \
+ \
+ tempD = mipsdsp_mul_u8_u8(rsD, rtD); \
+ tempC = mipsdsp_mul_u8_u8(rsC, rtC); \
+ tempB = mipsdsp_mul_u8_u8(rsB, rtB); \
+ tempA = mipsdsp_mul_u8_u8(rsA, rtA); \
+ \
+ temp[0] = (uint64_t)tempD + (uint64_t)tempC + \
+ (uint64_t)tempB + (uint64_t)tempA; \
+ \
+ acc[0] = env->active_tc.LO[ac]; \
+ acc[1] = env->active_tc.HI[ac]; \
+ \
+ if (add_sub) { \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] + temp[1]; \
+ } else { \
+ temp_sum = acc[0] - temp[0]; \
+ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \
+ acc[1] -= 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] - temp[1]; \
+ } \
+ \
+ env->active_tc.HI[ac] = temp[1]; \
+ env->active_tc.LO[ac] = temp[0]; \
+}
+
+DP_OB(dpau_h_obl, 1, 56, 48, 40, 32, 56, 48, 40, 32);
+DP_OB(dpau_h_obr, 1, 24, 16, 8, 0, 24, 16, 8, 0);
+DP_OB(dpsu_h_obl, 0, 56, 48, 40, 32, 56, 48, 40, 32);
+DP_OB(dpsu_h_obr, 0, 24, 16, 8, 0, 24, 16, 8, 0);
+
+#undef DP_OB
+#endif
+
+#define DP_NOFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsB, rsA, rtB, rtA; \
+ int32_t tempA, tempB; \
+ int64_t acc; \
+ \
+ rsB = (rs >> rsmov1) & MIPSDSP_LO; \
+ rsA = (rs >> rsmov2) & MIPSDSP_LO; \
+ rtB = (rt >> rtmov1) & MIPSDSP_LO; \
+ rtA = (rt >> rtmov2) & MIPSDSP_LO; \
+ \
+ tempB = (int32_t)rsB * (int32_t)rtB; \
+ tempA = (int32_t)rsA * (int32_t)rtA; \
+ \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ \
+ if (is_add) { \
+ acc = acc + ((int64_t)tempB + (int64_t)tempA); \
+ } else { \
+ acc = acc - ((int64_t)tempB + (int64_t)tempA); \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t)((acc & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)(acc & MIPSDSP_LLO); \
+}
+
+DP_NOFUNC_PH(dpa_w_ph, 1, 16, 0, 16, 0);
+DP_NOFUNC_PH(dpax_w_ph, 1, 16, 0, 0, 16);
+DP_NOFUNC_PH(dps_w_ph, 0, 16, 0, 16, 0);
+DP_NOFUNC_PH(dpsx_w_ph, 0, 16, 0, 0, 16);
+#undef DP_NOFUNC_PH
+
+#define DP_HASFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsB, rsA, rtB, rtA; \
+ int32_t tempB, tempA; \
+ int64_t acc, dotp; \
+ \
+ rsB = (rs >> rsmov1) & MIPSDSP_LO; \
+ rsA = (rs >> rsmov2) & MIPSDSP_LO; \
+ rtB = (rt >> rtmov1) & MIPSDSP_LO; \
+ rtA = (rt >> rtmov2) & MIPSDSP_LO; \
+ \
+ tempB = mipsdsp_mul_q15_q15(ac, rsB, rtB, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rsA, rtA, env); \
+ \
+ dotp = (int64_t)tempB + (int64_t)tempA; \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ \
+ if (is_add) { \
+ acc = acc + dotp; \
+ } else { \
+ acc = acc - dotp; \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((acc & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (acc & MIPSDSP_LLO); \
+}
+
+DP_HASFUNC_PH(dpaq_s_w_ph, 1, 16, 0, 16, 0);
+DP_HASFUNC_PH(dpaqx_s_w_ph, 1, 16, 0, 0, 16);
+DP_HASFUNC_PH(dpsq_s_w_ph, 0, 16, 0, 16, 0);
+DP_HASFUNC_PH(dpsqx_s_w_ph, 0, 16, 0, 0, 16);
+
+#undef DP_HASFUNC_PH
+
+#define DP_128OPERATION_PH(name, is_add) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rsl, rth, rtl; \
+ int32_t tempB, tempA, tempC62_31, tempC63; \
+ int64_t acc, dotp, tempC; \
+ \
+ MIPSDSP_SPLIT32_16(rs, rsh, rsl); \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ tempB = mipsdsp_mul_q15_q15(ac, rsh, rtl, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rsl, rth, env); \
+ \
+ dotp = (int64_t)tempB + (int64_t)tempA; \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ if (is_add) { \
+ tempC = acc + dotp; \
+ } else { \
+ tempC = acc - dotp; \
+ } \
+ tempC63 = (tempC >> 63) & 0x01; \
+ tempC62_31 = (tempC >> 31) & 0xFFFFFFFF; \
+ \
+ if ((tempC63 == 0) && (tempC62_31 != 0x00000000)) { \
+ tempC = 0x7FFFFFFF; \
+ set_DSPControl_overflow_flag(1, 16 + ac, env); \
+ } \
+ \
+ if ((tempC63 == 1) && (tempC62_31 != 0xFFFFFFFF)) { \
+ tempC = (int64_t)(int32_t)0x80000000; \
+ set_DSPControl_overflow_flag(1, 16 + ac, env); \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((tempC & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (tempC & MIPSDSP_LLO); \
+}
+
+DP_128OPERATION_PH(dpaqx_sa_w_ph, 1);
+DP_128OPERATION_PH(dpsqx_sa_w_ph, 0);
+
+#undef DP_128OPERATION_HP
+
+#if defined(TARGET_MIPS64)
+#define DP_QH(name, is_add, use_ac_env) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs3, rs2, rs1, rs0; \
+ int32_t rt3, rt2, rt1, rt0; \
+ int32_t tempD, tempC, tempB, tempA; \
+ int64_t acc[2]; \
+ int64_t temp[2]; \
+ int64_t temp_sum; \
+ \
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ if (use_ac_env) { \
+ tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env); \
+ tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env); \
+ tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env); \
+ } else { \
+ tempD = mipsdsp_mul_u16_u16(rs3, rt3); \
+ tempC = mipsdsp_mul_u16_u16(rs2, rt2); \
+ tempB = mipsdsp_mul_u16_u16(rs1, rt1); \
+ tempA = mipsdsp_mul_u16_u16(rs0, rt0); \
+ } \
+ \
+ temp[0] = (int64_t)tempD + (int64_t)tempC + \
+ (int64_t)tempB + (int64_t)tempA; \
+ \
+ if (temp[0] >= 0) { \
+ temp[1] = 0; \
+ } else { \
+ temp[1] = ~0ull; \
+ } \
+ \
+ acc[1] = env->active_tc.HI[ac]; \
+ acc[0] = env->active_tc.LO[ac]; \
+ \
+ if (is_add) { \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] = acc[1] + 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] + temp[1]; \
+ } else { \
+ temp_sum = acc[0] - temp[0]; \
+ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \
+ acc[1] = acc[1] - 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] - temp[1]; \
+ } \
+ \
+ env->active_tc.HI[ac] = temp[1]; \
+ env->active_tc.LO[ac] = temp[0]; \
+}
+
+DP_QH(dpa_w_qh, 1, 0);
+DP_QH(dpaq_s_w_qh, 1, 1);
+DP_QH(dps_w_qh, 0, 0);
+DP_QH(dpsq_s_w_qh, 0, 1);
+
+#undef DP_QH
+
+#endif
+
+#define DP_L_W(name, is_add) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int32_t temp63; \
+ int64_t dotp, acc; \
+ uint64_t temp; \
+ bool overflow; \
+ \
+ dotp = mipsdsp_mul_q31_q31(ac, rs, rt, env); \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ if (is_add) { \
+ temp = acc + dotp; \
+ overflow = MIPSDSP_OVERFLOW_ADD((uint64_t)acc, (uint64_t)dotp, \
+ temp, (0x01ull << 63)); \
+ } else { \
+ temp = acc - dotp; \
+ overflow = MIPSDSP_OVERFLOW_SUB((uint64_t)acc, (uint64_t)dotp, \
+ temp, (0x01ull << 63)); \
+ } \
+ \
+ if (overflow) { \
+ temp63 = (temp >> 63) & 0x01; \
+ if (temp63 == 1) { \
+ temp = (0x01ull << 63) - 1; \
+ } else { \
+ temp = 0x01ull << 63; \
+ } \
+ \
+ set_DSPControl_overflow_flag(1, 16 + ac, env); \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((temp & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (temp & MIPSDSP_LLO); \
+}
+
+DP_L_W(dpaq_sa_l_w, 1);
+DP_L_W(dpsq_sa_l_w, 0);
+
+#undef DP_L_W
+
+#if defined(TARGET_MIPS64)
+#define DP_L_PW(name, func) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs1, rs0; \
+ int32_t rt1, rt0; \
+ int64_t tempB[2], tempA[2]; \
+ int64_t temp[2]; \
+ int64_t acc[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ MIPSDSP_SPLIT64_32(rs, rs1, rs0); \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env); \
+ tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env); \
+ \
+ if (tempB[0] >= 0) { \
+ tempB[1] = 0x00; \
+ } else { \
+ tempB[1] = ~0ull; \
+ } \
+ \
+ if (tempA[0] >= 0) { \
+ tempA[1] = 0x00; \
+ } else { \
+ tempA[1] = ~0ull; \
+ } \
+ \
+ temp_sum = tempB[0] + tempA[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)tempB[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)tempA[0])) { \
+ temp[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] += tempB[1] + tempA[1]; \
+ \
+ mipsdsp_##func(acc, ac, temp, env); \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+DP_L_PW(dpaq_sa_l_pw, sat64_acc_add_q63);
+DP_L_PW(dpsq_sa_l_pw, sat64_acc_sub_q63);
+
+#undef DP_L_PW
+
+void helper_mulsaq_s_l_pw(target_ulong rs, target_ulong rt, uint32_t ac,
+ CPUMIPSState *env)
+{
+ int32_t rs1, rs0;
+ int32_t rt1, rt0;
+ int64_t tempB[2], tempA[2];
+ int64_t temp[2];
+ int64_t acc[2];
+ int64_t temp_sum;
+
+ rs1 = (rs >> 32) & MIPSDSP_LLO;
+ rs0 = rs & MIPSDSP_LLO;
+ rt1 = (rt >> 32) & MIPSDSP_LLO;
+ rt0 = rt & MIPSDSP_LLO;
+
+ tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env);
+ tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env);
+
+ if (tempB[0] >= 0) {
+ tempB[1] = 0x00;
+ } else {
+ tempB[1] = ~0ull;
+ }
+
+ if (tempA[0] >= 0) {
+ tempA[1] = 0x00;
+ } else {
+ tempA[1] = ~0ull;
+ }
+
+ acc[0] = env->active_tc.LO[ac];
+ acc[1] = env->active_tc.HI[ac];
+
+ temp_sum = tempB[0] - tempA[0];
+ if ((uint64_t)temp_sum > (uint64_t)tempB[0]) {
+ tempB[1] -= 1;
+ }
+ temp[0] = temp_sum;
+ temp[1] = tempB[1] - tempA[1];
+
+ if ((temp[1] & 0x01) == 0) {
+ temp[1] = 0x00;
+ } else {
+ temp[1] = ~0ull;
+ }
+
+ temp_sum = acc[0] + temp[0];
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) &&
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) {
+ acc[1] += 1;
+ }
+ acc[0] = temp_sum;
+ acc[1] += temp[1];
+
+ env->active_tc.HI[ac] = acc[1];
+ env->active_tc.LO[ac] = acc[0];
+}
+#endif
+
+#define MAQ_S_W(name, mov) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rth; \
+ int32_t tempA; \
+ int64_t tempL, acc; \
+ \
+ rsh = (rs >> mov) & MIPSDSP_LO; \
+ rth = (rt >> mov) & MIPSDSP_LO; \
+ tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ tempL = (int64_t)tempA + acc; \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((tempL & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (tempL & MIPSDSP_LLO); \
+}
+
+MAQ_S_W(maq_s_w_phl, 16);
+MAQ_S_W(maq_s_w_phr, 0);
+
+#undef MAQ_S_W
+
+#define MAQ_SA_W(name, mov) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rth; \
+ int32_t tempA; \
+ \
+ rsh = (rs >> mov) & MIPSDSP_LO; \
+ rth = (rt >> mov) & MIPSDSP_LO; \
+ tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \
+ tempA = mipsdsp_sat32_acc_q31(ac, tempA, env); \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t)(((int64_t)tempA & \
+ MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)((int64_t)tempA & \
+ MIPSDSP_LLO); \
+}
+
+MAQ_SA_W(maq_sa_w_phl, 16);
+MAQ_SA_W(maq_sa_w_phr, 0);
+
+#undef MAQ_SA_W
+
+#define MULQ_W(name, addvar) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs_t, rt_t; \
+ int32_t tempI; \
+ int64_t tempL; \
+ \
+ rs_t = rs & MIPSDSP_LLO; \
+ rt_t = rt & MIPSDSP_LLO; \
+ \
+ if ((rs_t == 0x80000000) && (rt_t == 0x80000000)) { \
+ tempL = 0x7FFFFFFF00000000ull; \
+ set_DSPControl_overflow_flag(1, 21, env); \
+ } else { \
+ tempL = ((int64_t)rs_t * (int64_t)rt_t) << 1; \
+ tempL += addvar; \
+ } \
+ tempI = (tempL & MIPSDSP_LHI) >> 32; \
+ \
+ return (target_long)(int32_t)tempI; \
+}
+
+MULQ_W(mulq_s_w, 0);
+MULQ_W(mulq_rs_w, 0x80000000ull);
+
+#undef MULQ_W
+
+#if defined(TARGET_MIPS64)
+
+#define MAQ_S_W_QH(name, mov) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rs_t, rt_t; \
+ int32_t temp_mul; \
+ int64_t temp[2]; \
+ int64_t acc[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ rs_t = (rs >> mov) & MIPSDSP_LO; \
+ rt_t = (rt >> mov) & MIPSDSP_LO; \
+ temp_mul = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \
+ \
+ temp[0] = (int64_t)temp_mul; \
+ if (temp[0] >= 0) { \
+ temp[1] = 0x00; \
+ } else { \
+ temp[1] = ~0ull; \
+ } \
+ \
+ acc[0] = env->active_tc.LO[ac]; \
+ acc[1] = env->active_tc.HI[ac]; \
+ \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ acc[0] = temp_sum; \
+ acc[1] += temp[1]; \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+MAQ_S_W_QH(maq_s_w_qhll, 48);
+MAQ_S_W_QH(maq_s_w_qhlr, 32);
+MAQ_S_W_QH(maq_s_w_qhrl, 16);
+MAQ_S_W_QH(maq_s_w_qhrr, 0);
+
+#undef MAQ_S_W_QH
+
+#define MAQ_SA_W(name, mov) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rs_t, rt_t; \
+ int32_t temp; \
+ int64_t acc[2]; \
+ \
+ rs_t = (rs >> mov) & MIPSDSP_LO; \
+ rt_t = (rt >> mov) & MIPSDSP_LO; \
+ temp = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \
+ temp = mipsdsp_sat32_acc_q31(ac, temp, env); \
+ \
+ acc[0] = (int64_t)(int32_t)temp; \
+ if (acc[0] >= 0) { \
+ acc[1] = 0x00; \
+ } else { \
+ acc[1] = ~0ull; \
+ } \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+MAQ_SA_W(maq_sa_w_qhll, 48);
+MAQ_SA_W(maq_sa_w_qhlr, 32);
+MAQ_SA_W(maq_sa_w_qhrl, 16);
+MAQ_SA_W(maq_sa_w_qhrr, 0);
+
+#undef MAQ_SA_W
+
+#define MAQ_S_L_PW(name, mov) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs_t, rt_t; \
+ int64_t temp[2]; \
+ int64_t acc[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ rs_t = (rs >> mov) & MIPSDSP_LLO; \
+ rt_t = (rt >> mov) & MIPSDSP_LLO; \
+ \
+ temp[0] = mipsdsp_mul_q31_q31(ac, rs_t, rt_t, env); \
+ if (temp[0] >= 0) { \
+ temp[1] = 0x00; \
+ } else { \
+ temp[1] = ~0ull; \
+ } \
+ \
+ acc[0] = env->active_tc.LO[ac]; \
+ acc[1] = env->active_tc.HI[ac]; \
+ \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ acc[0] = temp_sum; \
+ acc[1] += temp[1]; \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+MAQ_S_L_PW(maq_s_l_pwl, 32);
+MAQ_S_L_PW(maq_s_l_pwr, 0);
+
+#undef MAQ_S_L_PW
+
+#define DM_OPERATE(name, func, is_add, sigext) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs1, rs0; \
+ int32_t rt1, rt0; \
+ int64_t tempBL[2], tempAL[2]; \
+ int64_t acc[2]; \
+ int64_t temp[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0x00; \
+ temp[1] = 0x00; \
+ \
+ MIPSDSP_SPLIT64_32(rs, rs1, rs0); \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ if (sigext) { \
+ tempBL[0] = (int64_t)mipsdsp_##func(rs1, rt1); \
+ tempAL[0] = (int64_t)mipsdsp_##func(rs0, rt0); \
+ \
+ if (tempBL[0] >= 0) { \
+ tempBL[1] = 0x0; \
+ } else { \
+ tempBL[1] = ~0ull; \
+ } \
+ \
+ if (tempAL[0] >= 0) { \
+ tempAL[1] = 0x0; \
+ } else { \
+ tempAL[1] = ~0ull; \
+ } \
+ } else { \
+ tempBL[0] = mipsdsp_##func(rs1, rt1); \
+ tempAL[0] = mipsdsp_##func(rs0, rt0); \
+ tempBL[1] = 0; \
+ tempAL[1] = 0; \
+ } \
+ \
+ acc[1] = env->active_tc.HI[ac]; \
+ acc[0] = env->active_tc.LO[ac]; \
+ \
+ temp_sum = tempBL[0] + tempAL[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)tempBL[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)tempAL[0])) { \
+ temp[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] += tempBL[1] + tempAL[1]; \
+ \
+ if (is_add) { \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] + temp[1]; \
+ } else { \
+ temp_sum = acc[0] - temp[0]; \
+ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \
+ acc[1] -= 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] - temp[1]; \
+ } \
+ \
+ env->active_tc.HI[ac] = temp[1]; \
+ env->active_tc.LO[ac] = temp[0]; \
+}
+
+DM_OPERATE(dmadd, mul_i32_i32, 1, 1);
+DM_OPERATE(dmaddu, mul_u32_u32, 1, 0);
+DM_OPERATE(dmsub, mul_i32_i32, 0, 1);
+DM_OPERATE(dmsubu, mul_u32_u32, 0, 0);
+#undef DM_OPERATE
+#endif
+
+/** DSP Bit/Manipulation Sub-class insns **/
+target_ulong helper_bitrev(target_ulong rt)
+{
+ int32_t temp;
+ uint32_t rd;
+ int i;
+
+ temp = rt & MIPSDSP_LO;
+ rd = 0;
+ for (i = 0; i < 16; i++) {
+ rd = (rd << 1) | (temp & 1);
+ temp = temp >> 1;
+ }
+
+ return (target_ulong)rd;
+}
+
+#define BIT_INSV(name, posfilter, ret_type) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong rs, \
+ target_ulong rt) \
+{ \
+ uint32_t pos, size, msb, lsb; \
+ uint32_t const sizefilter = 0x3F; \
+ target_ulong temp; \
+ target_ulong dspc; \
+ \
+ dspc = env->active_tc.DSPControl; \
+ \
+ pos = dspc & posfilter; \
+ size = (dspc >> 7) & sizefilter; \
+ \
+ msb = pos + size - 1; \
+ lsb = pos; \
+ \
+ if (lsb > msb || (msb > TARGET_LONG_BITS)) { \
+ return rt; \
+ } \
+ \
+ temp = deposit64(rt, pos, size, rs); \
+ \
+ return (target_long)(ret_type)temp; \
+}
+
+BIT_INSV(insv, 0x1F, int32_t);
+#ifdef TARGET_MIPS64
+BIT_INSV(dinsv, 0x7F, target_long);
+#endif
+
+#undef BIT_INSV
+
+
+/** DSP Compare-Pick Sub-class insns **/
+#define CMP_HAS_RET(name, func, split_num, filter, bit_size) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt) \
+{ \
+ uint32_t rs_t, rt_t; \
+ uint8_t cc; \
+ uint32_t temp = 0; \
+ int i; \
+ \
+ for (i = 0; i < split_num; i++) { \
+ rs_t = (rs >> (bit_size * i)) & filter; \
+ rt_t = (rt >> (bit_size * i)) & filter; \
+ cc = mipsdsp_##func(rs_t, rt_t); \
+ temp |= cc << i; \
+ } \
+ \
+ return (target_ulong)temp; \
+}
+
+CMP_HAS_RET(cmpgu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8);
+
+#ifdef TARGET_MIPS64
+CMP_HAS_RET(cmpgu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8);
+#endif
+
+#undef CMP_HAS_RET
+
+
+#define CMP_NO_RET(name, func, split_num, filter, bit_size) \
+void helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int##bit_size##_t rs_t, rt_t; \
+ int##bit_size##_t flag = 0; \
+ int##bit_size##_t cc; \
+ int i; \
+ \
+ for (i = 0; i < split_num; i++) { \
+ rs_t = (rs >> (bit_size * i)) & filter; \
+ rt_t = (rt >> (bit_size * i)) & filter; \
+ \
+ cc = mipsdsp_##func((int32_t)rs_t, (int32_t)rt_t); \
+ flag |= cc << i; \
+ } \
+ \
+ set_DSPControl_24(flag, split_num, env); \
+}
+
+CMP_NO_RET(cmpu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8);
+
+CMP_NO_RET(cmp_eq_ph, cmp_eq, 2, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_lt_ph, cmp_lt, 2, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_le_ph, cmp_le, 2, MIPSDSP_LO, 16);
+
+#ifdef TARGET_MIPS64
+CMP_NO_RET(cmpu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8);
+
+CMP_NO_RET(cmp_eq_qh, cmp_eq, 4, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_lt_qh, cmp_lt, 4, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_le_qh, cmp_le, 4, MIPSDSP_LO, 16);
+
+CMP_NO_RET(cmp_eq_pw, cmp_eq, 2, MIPSDSP_LLO, 32);
+CMP_NO_RET(cmp_lt_pw, cmp_lt, 2, MIPSDSP_LLO, 32);
+CMP_NO_RET(cmp_le_pw, cmp_le, 2, MIPSDSP_LLO, 32);
+#endif
+#undef CMP_NO_RET
+
+#if defined(TARGET_MIPS64)
+
+#define CMPGDU_OB(name) \
+target_ulong helper_cmpgdu_##name##_ob(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int i; \
+ uint8_t rs_t, rt_t; \
+ uint32_t cond; \
+ \
+ cond = 0; \
+ \
+ for (i = 0; i < 8; i++) { \
+ rs_t = (rs >> (8 * i)) & MIPSDSP_Q0; \
+ rt_t = (rt >> (8 * i)) & MIPSDSP_Q0; \
+ \
+ if (mipsdsp_cmpu_##name(rs_t, rt_t)) { \
+ cond |= 0x01 << i; \
+ } \
+ } \
+ \
+ set_DSPControl_24(cond, 8, env); \
+ \
+ return (uint64_t)cond; \
+}
+
+CMPGDU_OB(eq)
+CMPGDU_OB(lt)
+CMPGDU_OB(le)
+#undef CMPGDU_OB
+#endif
+
+#define PICK_INSN(name, split_num, filter, bit_size, ret32bit) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint32_t rs_t, rt_t; \
+ uint32_t cc; \
+ target_ulong dsp; \
+ int i; \
+ target_ulong result = 0; \
+ \
+ dsp = env->active_tc.DSPControl; \
+ for (i = 0; i < split_num; i++) { \
+ rs_t = (rs >> (bit_size * i)) & filter; \
+ rt_t = (rt >> (bit_size * i)) & filter; \
+ cc = (dsp >> (24 + i)) & 0x01; \
+ cc = cc == 1 ? rs_t : rt_t; \
+ \
+ result |= (target_ulong)cc << (bit_size * i); \
+ } \
+ \
+ if (ret32bit) { \
+ result = (target_long)(int32_t)(result & MIPSDSP_LLO); \
+ } \
+ \
+ return result; \
+}
+
+PICK_INSN(pick_qb, 4, MIPSDSP_Q0, 8, 1);
+PICK_INSN(pick_ph, 2, MIPSDSP_LO, 16, 1);
+
+#ifdef TARGET_MIPS64
+PICK_INSN(pick_ob, 8, MIPSDSP_Q0, 8, 0);
+PICK_INSN(pick_qh, 4, MIPSDSP_LO, 16, 0);
+PICK_INSN(pick_pw, 2, MIPSDSP_LLO, 32, 0);
+#endif
+#undef PICK_INSN
+
+target_ulong helper_packrl_ph(target_ulong rs, target_ulong rt)
+{
+ uint32_t rsl, rth;
+
+ rsl = rs & MIPSDSP_LO;
+ rth = (rt & MIPSDSP_HI) >> 16;
+
+ return (target_long)(int32_t)((rsl << 16) | rth);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_packrl_pw(target_ulong rs, target_ulong rt)
+{
+ uint32_t rs0, rt1;
+
+ rs0 = rs & MIPSDSP_LLO;
+ rt1 = (rt >> 32) & MIPSDSP_LLO;
+
+ return ((uint64_t)rs0 << 32) | (uint64_t)rt1;
+}
+#endif
+
+/** DSP Accumulator and DSPControl Access Sub-class insns **/
+target_ulong helper_extr_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int32_t tempI;
+ int64_t tempDL[2];
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env);
+ if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ tempI = (tempDL[0] >> 1) & MIPSDSP_LLO;
+
+ tempDL[0] += 1;
+ if (tempDL[0] == 0) {
+ tempDL[1] += 1;
+ }
+
+ if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)tempI;
+}
+
+target_ulong helper_extr_r_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int64_t tempDL[2];
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env);
+ if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ tempDL[0] += 1;
+ if (tempDL[0] == 0) {
+ tempDL[1] += 1;
+ }
+
+ if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)(int32_t)(tempDL[0] >> 1);
+}
+
+target_ulong helper_extr_rs_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int32_t tempI, temp64;
+ int64_t tempDL[2];
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env);
+ if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+ tempDL[0] += 1;
+ if (tempDL[0] == 0) {
+ tempDL[1] += 1;
+ }
+ tempI = tempDL[0] >> 1;
+
+ if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ temp64 = tempDL[1] & 0x01;
+ if (temp64 == 0) {
+ tempI = 0x7FFFFFFF;
+ } else {
+ tempI = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)tempI;
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dextr_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+
+ shift = shift & 0x3F;
+
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ return (int64_t)(int32_t)(temp[0] >> 1);
+}
+
+target_ulong helper_dextr_r_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (int64_t)(int32_t)(temp[0] >> 1);
+}
+
+target_ulong helper_dextr_rs_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ if (temp128 == 0) {
+ temp[0] = 0x0FFFFFFFF;
+ } else {
+ temp[0] = 0x0100000000ULL;
+ }
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (int64_t)(int32_t)(temp[0] >> 1);
+}
+
+target_ulong helper_dextr_l(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ target_ulong ret;
+
+ shift = shift & 0x3F;
+
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ ret = (temp[1] << 63) | (temp[0] >> 1);
+
+ return ret;
+}
+
+target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+ target_ulong ret;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ ret = (temp[1] << 63) | (temp[0] >> 1);
+
+ return ret;
+}
+
+target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+ target_ulong ret;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ if (temp128 == 0) {
+ temp[1] &= ~0x00ull - 1;
+ temp[0] |= ~0x00ull - 1;
+ } else {
+ temp[1] |= 0x01;
+ temp[0] &= 0x01;
+ }
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ ret = (temp[1] << 63) | (temp[0] >> 1);
+
+ return ret;
+}
+#endif
+
+target_ulong helper_extr_s_h(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int64_t temp, acc;
+
+ shift = shift & 0x1F;
+
+ acc = ((int64_t)env->active_tc.HI[ac] << 32) |
+ ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF);
+
+ temp = acc >> shift;
+
+ if (temp > (int64_t)0x7FFF) {
+ temp = 0x00007FFF;
+ set_DSPControl_overflow_flag(1, 23, env);
+ } else if (temp < (int64_t)0xFFFFFFFFFFFF8000ULL) {
+ temp = 0xFFFF8000;
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)(int32_t)(temp & 0xFFFFFFFF);
+}
+
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dextr_s_h(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int64_t temp[2];
+ uint32_t temp127;
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rashift_acc((uint64_t *)temp, ac, shift, env);
+
+ temp127 = (temp[1] >> 63) & 0x01;
+
+ if ((temp127 == 0) && (temp[1] > 0 || temp[0] > 32767)) {
+ temp[0] &= 0xFFFF0000;
+ temp[0] |= 0x00007FFF;
+ set_DSPControl_overflow_flag(1, 23, env);
+ } else if ((temp127 == 1) &&
+ (temp[1] < 0xFFFFFFFFFFFFFFFFll
+ || temp[0] < 0xFFFFFFFFFFFF1000ll)) {
+ temp[0] &= 0xFFFF0000;
+ temp[0] |= 0x00008000;
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (int64_t)(int16_t)(temp[0] & MIPSDSP_LO);
+}
+
+#endif
+
+target_ulong helper_extp(target_ulong ac, target_ulong size, CPUMIPSState *env)
+{
+ int32_t start_pos;
+ int sub;
+ uint32_t temp;
+ uint64_t acc;
+
+ size = size & 0x1F;
+
+ temp = 0;
+ start_pos = get_DSPControl_pos(env);
+ sub = start_pos - (size + 1);
+ if (sub >= -1) {
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) |
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO);
+ temp = (acc >> (start_pos - size)) & (~0U >> (31 - size));
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return (target_ulong)temp;
+}
+
+target_ulong helper_extpdp(target_ulong ac, target_ulong size,
+ CPUMIPSState *env)
+{
+ int32_t start_pos;
+ int sub;
+ uint32_t temp;
+ uint64_t acc;
+
+ size = size & 0x1F;
+ temp = 0;
+ start_pos = get_DSPControl_pos(env);
+ sub = start_pos - (size + 1);
+ if (sub >= -1) {
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) |
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO);
+ temp = extract64(acc, start_pos - size, size + 1);
+
+ set_DSPControl_pos(sub, env);
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return (target_ulong)temp;
+}
+
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dextp(target_ulong ac, target_ulong size, CPUMIPSState *env)
+{
+ int start_pos;
+ int len;
+ int sub;
+ uint64_t tempB, tempA;
+ uint64_t temp;
+
+ temp = 0;
+
+ size = size & 0x3F;
+ start_pos = get_DSPControl_pos(env);
+ len = start_pos - size;
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+
+ sub = start_pos - (size + 1);
+
+ if (sub >= -1) {
+ temp = (tempB << (64 - len)) | (tempA >> len);
+ temp = temp & ((1ULL << (size + 1)) - 1);
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return temp;
+}
+
+target_ulong helper_dextpdp(target_ulong ac, target_ulong size,
+ CPUMIPSState *env)
+{
+ int start_pos;
+ int len;
+ int sub;
+ uint64_t tempB, tempA;
+ uint64_t temp;
+
+ temp = 0;
+ size = size & 0x3F;
+ start_pos = get_DSPControl_pos(env);
+ len = start_pos - size;
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+
+ sub = start_pos - (size + 1);
+
+ if (sub >= -1) {
+ temp = (tempB << (64 - len)) | (tempA >> len);
+ temp = temp & ((1ULL << (size + 1)) - 1);
+ set_DSPControl_pos(sub, env);
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return temp;
+}
+
+#endif
+
+void helper_shilo(target_ulong ac, target_ulong rs, CPUMIPSState *env)
+{
+ int8_t rs5_0;
+ uint64_t temp, acc;
+
+ rs5_0 = rs & 0x3F;
+ rs5_0 = (int8_t)(rs5_0 << 2) >> 2;
+
+ if (unlikely(rs5_0 == 0)) {
+ return;
+ }
+
+ acc = (((uint64_t)env->active_tc.HI[ac] << 32) & MIPSDSP_LHI) |
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO);
+
+ if (rs5_0 > 0) {
+ temp = acc >> rs5_0;
+ } else {
+ temp = acc << -rs5_0;
+ }
+
+ env->active_tc.HI[ac] = (target_ulong)(int32_t)((temp & MIPSDSP_LHI) >> 32);
+ env->active_tc.LO[ac] = (target_ulong)(int32_t)(temp & MIPSDSP_LLO);
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dshilo(target_ulong shift, target_ulong ac, CPUMIPSState *env)
+{
+ int8_t shift_t;
+ uint64_t tempB, tempA;
+
+ shift_t = (int8_t)(shift << 1) >> 1;
+
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+
+ if (shift_t != 0) {
+ if (shift_t >= 0) {
+ tempA = (tempB << (64 - shift_t)) | (tempA >> shift_t);
+ tempB = tempB >> shift_t;
+ } else {
+ shift_t = -shift_t;
+ tempB = (tempB << shift_t) | (tempA >> (64 - shift_t));
+ tempA = tempA << shift_t;
+ }
+ }
+
+ env->active_tc.HI[ac] = tempB;
+ env->active_tc.LO[ac] = tempA;
+}
+
+#endif
+void helper_mthlip(target_ulong ac, target_ulong rs, CPUMIPSState *env)
+{
+ int32_t tempA, tempB, pos;
+
+ tempA = rs;
+ tempB = env->active_tc.LO[ac];
+ env->active_tc.HI[ac] = (target_long)tempB;
+ env->active_tc.LO[ac] = (target_long)tempA;
+ pos = get_DSPControl_pos(env);
+
+ if (pos > 32) {
+ return;
+ } else {
+ set_DSPControl_pos(pos + 32, env);
+ }
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dmthlip(target_ulong rs, target_ulong ac, CPUMIPSState *env)
+{
+ uint8_t ac_t;
+ uint8_t pos;
+ uint64_t tempB, tempA;
+
+ ac_t = ac & 0x3;
+
+ tempA = rs;
+ tempB = env->active_tc.LO[ac_t];
+
+ env->active_tc.HI[ac_t] = tempB;
+ env->active_tc.LO[ac_t] = tempA;
+
+ pos = get_DSPControl_pos(env);
+
+ if (pos <= 64) {
+ pos = pos + 64;
+ set_DSPControl_pos(pos, env);
+ }
+}
+#endif
+
+void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env)
+{
+ uint8_t mask[6];
+ uint8_t i;
+ uint32_t newbits, overwrite;
+ target_ulong dsp;
+
+ newbits = 0x00;
+ overwrite = 0xFFFFFFFF;
+ dsp = env->active_tc.DSPControl;
+
+ for (i = 0; i < 6; i++) {
+ mask[i] = (mask_num >> i) & 0x01;
+ }
+
+ if (mask[0] == 1) {
+#if defined(TARGET_MIPS64)
+ overwrite &= 0xFFFFFF80;
+ newbits &= 0xFFFFFF80;
+ newbits |= 0x0000007F & rs;
+#else
+ overwrite &= 0xFFFFFFC0;
+ newbits &= 0xFFFFFFC0;
+ newbits |= 0x0000003F & rs;
+#endif
+ }
+
+ if (mask[1] == 1) {
+ overwrite &= 0xFFFFE07F;
+ newbits &= 0xFFFFE07F;
+ newbits |= 0x00001F80 & rs;
+ }
+
+ if (mask[2] == 1) {
+ overwrite &= 0xFFFFDFFF;
+ newbits &= 0xFFFFDFFF;
+ newbits |= 0x00002000 & rs;
+ }
+
+ if (mask[3] == 1) {
+ overwrite &= 0xFF00FFFF;
+ newbits &= 0xFF00FFFF;
+ newbits |= 0x00FF0000 & rs;
+ }
+
+ if (mask[4] == 1) {
+ overwrite &= 0x00FFFFFF;
+ newbits &= 0x00FFFFFF;
+#if defined(TARGET_MIPS64)
+ newbits |= 0xFF000000 & rs;
+#else
+ newbits |= 0x0F000000 & rs;
+#endif
+ }
+
+ if (mask[5] == 1) {
+ overwrite &= 0xFFFFBFFF;
+ newbits &= 0xFFFFBFFF;
+ newbits |= 0x00004000 & rs;
+ }
+
+ dsp = dsp & overwrite;
+ dsp = dsp | newbits;
+ env->active_tc.DSPControl = dsp;
+}
+
+void helper_wrdsp(target_ulong rs, target_ulong mask_num, CPUMIPSState *env)
+{
+ cpu_wrdsp(rs, mask_num, env);
+}
+
+uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env)
+{
+ uint8_t mask[6];
+ uint32_t ruler, i;
+ target_ulong temp;
+ target_ulong dsp;
+
+ ruler = 0x01;
+ for (i = 0; i < 6; i++) {
+ mask[i] = (mask_num & ruler) >> i ;
+ ruler = ruler << 1;
+ }
+
+ temp = 0x00;
+ dsp = env->active_tc.DSPControl;
+
+ if (mask[0] == 1) {
+#if defined(TARGET_MIPS64)
+ temp |= dsp & 0x7F;
+#else
+ temp |= dsp & 0x3F;
+#endif
+ }
+
+ if (mask[1] == 1) {
+ temp |= dsp & 0x1F80;
+ }
+
+ if (mask[2] == 1) {
+ temp |= dsp & 0x2000;
+ }
+
+ if (mask[3] == 1) {
+ temp |= dsp & 0x00FF0000;
+ }
+
+ if (mask[4] == 1) {
+#if defined(TARGET_MIPS64)
+ temp |= dsp & 0xFF000000;
+#else
+ temp |= dsp & 0x0F000000;
+#endif
+ }
+
+ if (mask[5] == 1) {
+ temp |= dsp & 0x4000;
+ }
+
+ return temp;
+}
+
+target_ulong helper_rddsp(target_ulong mask_num, CPUMIPSState *env)
+{
+ return cpu_rddsp(mask_num, env);
+}
+
+
+#undef MIPSDSP_LHI
+#undef MIPSDSP_LLO
+#undef MIPSDSP_HI
+#undef MIPSDSP_LO
+#undef MIPSDSP_Q3
+#undef MIPSDSP_Q2
+#undef MIPSDSP_Q1
+#undef MIPSDSP_Q0
+
+#undef MIPSDSP_SPLIT32_8
+#undef MIPSDSP_SPLIT32_16
+
+#undef MIPSDSP_RETURN32_8
+#undef MIPSDSP_RETURN32_16
+
+#ifdef TARGET_MIPS64
+#undef MIPSDSP_SPLIT64_16
+#undef MIPSDSP_SPLIT64_32
+#undef MIPSDSP_RETURN64_16
+#undef MIPSDSP_RETURN64_32
+#endif
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
new file mode 100644
index 000000000..7b3026b10
--- /dev/null
+++ b/target/mips/tcg/exception.c
@@ -0,0 +1,149 @@
+/*
+ * MIPS Exceptions processing helpers for QEMU.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+
+target_ulong exception_resume_pc(CPUMIPSState *env)
+{
+ target_ulong bad_pc;
+ target_ulong isa_mode;
+
+ isa_mode = !!(env->hflags & MIPS_HFLAG_M16);
+ bad_pc = env->active_tc.PC | isa_mode;
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ /*
+ * If the exception was raised from a delay slot, come back to
+ * the jump.
+ */
+ bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
+ }
+
+ return bad_pc;
+}
+
+void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+ int error_code)
+{
+ do_raise_exception_err(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
+{
+ do_raise_exception(env, exception, GETPC());
+}
+
+void helper_raise_exception_debug(CPUMIPSState *env)
+{
+ do_raise_exception(env, EXCP_DEBUG, 0);
+}
+
+static void raise_exception(CPUMIPSState *env, uint32_t exception)
+{
+ do_raise_exception(env, exception, 0);
+}
+
+void helper_wait(CPUMIPSState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+ /*
+ * Last instruction in the block, PC was updated before
+ * - no need to recover PC and icount.
+ */
+ raise_exception(env, EXCP_HLT);
+}
+
+void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ env->active_tc.PC = tb->pc;
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
+}
+
+static const char * const excp_names[EXCP_LAST + 1] = {
+ [EXCP_RESET] = "reset",
+ [EXCP_SRESET] = "soft reset",
+ [EXCP_DSS] = "debug single step",
+ [EXCP_DINT] = "debug interrupt",
+ [EXCP_NMI] = "non-maskable interrupt",
+ [EXCP_MCHECK] = "machine check",
+ [EXCP_EXT_INTERRUPT] = "interrupt",
+ [EXCP_DFWATCH] = "deferred watchpoint",
+ [EXCP_DIB] = "debug instruction breakpoint",
+ [EXCP_IWATCH] = "instruction fetch watchpoint",
+ [EXCP_AdEL] = "address error load",
+ [EXCP_AdES] = "address error store",
+ [EXCP_TLBF] = "TLB refill",
+ [EXCP_IBE] = "instruction bus error",
+ [EXCP_DBp] = "debug breakpoint",
+ [EXCP_SYSCALL] = "syscall",
+ [EXCP_BREAK] = "break",
+ [EXCP_CpU] = "coprocessor unusable",
+ [EXCP_RI] = "reserved instruction",
+ [EXCP_OVERFLOW] = "arithmetic overflow",
+ [EXCP_TRAP] = "trap",
+ [EXCP_FPE] = "floating point",
+ [EXCP_DDBS] = "debug data break store",
+ [EXCP_DWATCH] = "data watchpoint",
+ [EXCP_LTLBL] = "TLB modify",
+ [EXCP_TLBL] = "TLB load",
+ [EXCP_TLBS] = "TLB store",
+ [EXCP_DBE] = "data bus error",
+ [EXCP_DDBL] = "debug data break load",
+ [EXCP_THREAD] = "thread",
+ [EXCP_MDMX] = "MDMX",
+ [EXCP_C2E] = "precise coprocessor 2",
+ [EXCP_CACHE] = "cache error",
+ [EXCP_TLBXI] = "TLB execute-inhibit",
+ [EXCP_TLBRI] = "TLB read-inhibit",
+ [EXCP_MSADIS] = "MSA disabled",
+ [EXCP_MSAFPE] = "MSA floating point",
+};
+
+const char *mips_exception_name(int32_t exception)
+{
+ if (exception < 0 || exception > EXCP_LAST) {
+ return "unknown";
+ }
+ return excp_names[exception];
+}
+
+void do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+ int error_code, uintptr_t pc)
+{
+ CPUState *cs = env_cpu(env);
+
+ qemu_log_mask(CPU_LOG_INT, "%s: %d (%s) %d\n",
+ __func__, exception, mips_exception_name(exception),
+ error_code);
+ cs->exception_index = exception;
+ env->error_code = error_code;
+
+ cpu_loop_exit_restore(cs, pc);
+}
diff --git a/target/mips/tcg/fpu_helper.c b/target/mips/tcg/fpu_helper.c
new file mode 100644
index 000000000..8ce56ed7c
--- /dev/null
+++ b/target/mips/tcg/fpu_helper.c
@@ -0,0 +1,2254 @@
+/*
+ * Helpers for emulation of FPU-related MIPS instructions.
+ *
+ * Copyright (C) 2004-2005 Jocelyn Mayer
+ * Copyright (C) 2020 Wave Computing, Inc.
+ * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "fpu/softfloat.h"
+#include "fpu_helper.h"
+
+
+/* Complex FPU operations which may need stack space. */
+
+#define FLOAT_TWO32 make_float32(1 << 30)
+#define FLOAT_TWO64 make_float64(1ULL << 62)
+
+#define FP_TO_INT32_OVERFLOW 0x7fffffff
+#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
+
+target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
+{
+ target_ulong arg1 = 0;
+
+ switch (reg) {
+ case 0:
+ arg1 = (int32_t)env->active_fpu.fcr0;
+ break;
+ case 1:
+ /* UFR Support - Read Status FR */
+ if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) {
+ if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
+ arg1 = (int32_t)
+ ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR);
+ } else {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ }
+ break;
+ case 5:
+ /* FRE Support - read Config5.FRE bit */
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
+ arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1;
+ } else {
+ helper_raise_exception(env, EXCP_RI);
+ }
+ }
+ break;
+ case 25:
+ arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) |
+ ((env->active_fpu.fcr31 >> 23) & 0x1);
+ break;
+ case 26:
+ arg1 = env->active_fpu.fcr31 & 0x0003f07c;
+ break;
+ case 28:
+ arg1 = (env->active_fpu.fcr31 & 0x00000f83) |
+ ((env->active_fpu.fcr31 >> 22) & 0x4);
+ break;
+ default:
+ arg1 = (int32_t)env->active_fpu.fcr31;
+ break;
+ }
+
+ return arg1;
+}
+
+void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt)
+{
+ switch (fs) {
+ case 1:
+ /* UFR Alias - Reset Status FR */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
+ env->CP0_Status &= ~(1 << CP0St_FR);
+ compute_hflags(env);
+ } else {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ break;
+ case 4:
+ /* UNFR Alias - Set Status FR */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ compute_hflags(env);
+ } else {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ break;
+ case 5:
+ /* FRE Support - clear Config5.FRE bit */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
+ env->CP0_Config5 &= ~(1 << CP0C5_FRE);
+ compute_hflags(env);
+ } else {
+ helper_raise_exception(env, EXCP_RI);
+ }
+ break;
+ case 6:
+ /* FRE Support - set Config5.FRE bit */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
+ env->CP0_Config5 |= (1 << CP0C5_FRE);
+ compute_hflags(env);
+ } else {
+ helper_raise_exception(env, EXCP_RI);
+ }
+ break;
+ case 25:
+ if ((env->insn_flags & ISA_MIPS_R6) || (arg1 & 0xffffff00)) {
+ return;
+ }
+ env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) |
+ ((arg1 & 0xfe) << 24) |
+ ((arg1 & 0x1) << 23);
+ break;
+ case 26:
+ if (arg1 & 0x007c0000) {
+ return;
+ }
+ env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) |
+ (arg1 & 0x0003f07c);
+ break;
+ case 28:
+ if (arg1 & 0x007c0000) {
+ return;
+ }
+ env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) |
+ (arg1 & 0x00000f83) |
+ ((arg1 & 0x4) << 22);
+ break;
+ case 31:
+ env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) |
+ (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
+ break;
+ default:
+ if (env->insn_flags & ISA_MIPS_R6) {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ return;
+ }
+ restore_fp_status(env);
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+ if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) &
+ GET_FP_CAUSE(env->active_fpu.fcr31)) {
+ do_raise_exception(env, EXCP_FPE, GETPC());
+ }
+}
+
+static inline int ieee_to_mips_xcpt(int ieee_xcpt)
+{
+ int mips_xcpt = 0;
+
+ if (ieee_xcpt & float_flag_invalid) {
+ mips_xcpt |= FP_INVALID;
+ }
+ if (ieee_xcpt & float_flag_overflow) {
+ mips_xcpt |= FP_OVERFLOW;
+ }
+ if (ieee_xcpt & float_flag_underflow) {
+ mips_xcpt |= FP_UNDERFLOW;
+ }
+ if (ieee_xcpt & float_flag_divbyzero) {
+ mips_xcpt |= FP_DIV0;
+ }
+ if (ieee_xcpt & float_flag_inexact) {
+ mips_xcpt |= FP_INEXACT;
+ }
+
+ return mips_xcpt;
+}
+
+static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
+{
+ int ieee_exception_flags = get_float_exception_flags(
+ &env->active_fpu.fp_status);
+ int mips_exception_flags = 0;
+
+ if (ieee_exception_flags) {
+ mips_exception_flags = ieee_to_mips_xcpt(ieee_exception_flags);
+ }
+
+ SET_FP_CAUSE(env->active_fpu.fcr31, mips_exception_flags);
+
+ if (mips_exception_flags) {
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+
+ if (GET_FP_ENABLE(env->active_fpu.fcr31) & mips_exception_flags) {
+ do_raise_exception(env, EXCP_FPE, pc);
+ } else {
+ UPDATE_FP_FLAGS(env->active_fpu.fcr31, mips_exception_flags);
+ }
+ }
+}
+
+/*
+ * Float support.
+ * Single precition routines have a "s" suffix, double precision a
+ * "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
+ * paired single lower "pl", paired single upper "pu".
+ */
+
+/* unary operations, modifying fp status */
+uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt0;
+}
+
+uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
+{
+ fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
+{
+ uint32_t fst2;
+ uint32_t fsth2;
+
+ fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+ uint32_t wth2;
+ int excp, excph;
+
+ wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ excp = get_float_exception_flags(&env->active_fpu.fp_status);
+ if (excp & (float_flag_overflow | float_flag_invalid)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+ wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
+ excph = get_float_exception_flags(&env->active_fpu.fp_status);
+ if (excph & (float_flag_overflow | float_flag_invalid)) {
+ wth2 = FP_TO_INT32_OVERFLOW;
+ }
+
+ set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+
+ return ((uint64_t)wth2 << 32) | wt2;
+}
+
+uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t fst2;
+
+ fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
+{
+ uint32_t fst2;
+
+ fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
+{
+ uint32_t fst2;
+
+ fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
+{
+ uint32_t wt2;
+
+ wt2 = wt0;
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
+{
+ uint32_t wt2;
+
+ wt2 = wth0;
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64_round_to_zero(fdt0,
+ &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+/* unary operations, not modifying fp status */
+
+uint64_t helper_float_abs_d(uint64_t fdt0)
+{
+ return float64_abs(fdt0);
+}
+
+uint32_t helper_float_abs_s(uint32_t fst0)
+{
+ return float32_abs(fst0);
+}
+
+uint64_t helper_float_abs_ps(uint64_t fdt0)
+{
+ uint32_t wt0;
+ uint32_t wth0;
+
+ wt0 = float32_abs(fdt0 & 0XFFFFFFFF);
+ wth0 = float32_abs(fdt0 >> 32);
+ return ((uint64_t)wth0 << 32) | wt0;
+}
+
+uint64_t helper_float_chs_d(uint64_t fdt0)
+{
+ return float64_chs(fdt0);
+}
+
+uint32_t helper_float_chs_s(uint32_t fst0)
+{
+ return float32_chs(fst0);
+}
+
+uint64_t helper_float_chs_ps(uint64_t fdt0)
+{
+ uint32_t wt0;
+ uint32_t wth0;
+
+ wt0 = float32_chs(fdt0 & 0XFFFFFFFF);
+ wth0 = float32_chs(fdt0 >> 32);
+ return ((uint64_t)wth0 << 32) | wt0;
+}
+
+/* MIPS specific unary operations */
+uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t fstl2;
+ uint32_t fsth2;
+
+ fstl2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF,
+ &env->active_fpu.fp_status);
+ fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fstl2;
+}
+
+uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t fstl2;
+ uint32_t fsth2;
+
+ fstl2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
+ fstl2 = float32_div(float32_one, fstl2, &env->active_fpu.fp_status);
+ fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fstl2;
+}
+
+uint64_t helper_float_rint_d(CPUMIPSState *env, uint64_t fs)
+{
+ uint64_t fdret;
+
+ fdret = float64_round_to_int(fs, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint32_t helper_float_rint_s(CPUMIPSState *env, uint32_t fs)
+{
+ uint32_t fdret;
+
+ fdret = float32_round_to_int(fs, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+#define FLOAT_CLASS_SIGNALING_NAN 0x001
+#define FLOAT_CLASS_QUIET_NAN 0x002
+#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
+#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
+#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
+#define FLOAT_CLASS_NEGATIVE_ZERO 0x020
+#define FLOAT_CLASS_POSITIVE_INFINITY 0x040
+#define FLOAT_CLASS_POSITIVE_NORMAL 0x080
+#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
+#define FLOAT_CLASS_POSITIVE_ZERO 0x200
+
+uint64_t float_class_d(uint64_t arg, float_status *status)
+{
+ if (float64_is_signaling_nan(arg, status)) {
+ return FLOAT_CLASS_SIGNALING_NAN;
+ } else if (float64_is_quiet_nan(arg, status)) {
+ return FLOAT_CLASS_QUIET_NAN;
+ } else if (float64_is_neg(arg)) {
+ if (float64_is_infinity(arg)) {
+ return FLOAT_CLASS_NEGATIVE_INFINITY;
+ } else if (float64_is_zero(arg)) {
+ return FLOAT_CLASS_NEGATIVE_ZERO;
+ } else if (float64_is_zero_or_denormal(arg)) {
+ return FLOAT_CLASS_NEGATIVE_SUBNORMAL;
+ } else {
+ return FLOAT_CLASS_NEGATIVE_NORMAL;
+ }
+ } else {
+ if (float64_is_infinity(arg)) {
+ return FLOAT_CLASS_POSITIVE_INFINITY;
+ } else if (float64_is_zero(arg)) {
+ return FLOAT_CLASS_POSITIVE_ZERO;
+ } else if (float64_is_zero_or_denormal(arg)) {
+ return FLOAT_CLASS_POSITIVE_SUBNORMAL;
+ } else {
+ return FLOAT_CLASS_POSITIVE_NORMAL;
+ }
+ }
+}
+
+uint64_t helper_float_class_d(CPUMIPSState *env, uint64_t arg)
+{
+ return float_class_d(arg, &env->active_fpu.fp_status);
+}
+
+uint32_t float_class_s(uint32_t arg, float_status *status)
+{
+ if (float32_is_signaling_nan(arg, status)) {
+ return FLOAT_CLASS_SIGNALING_NAN;
+ } else if (float32_is_quiet_nan(arg, status)) {
+ return FLOAT_CLASS_QUIET_NAN;
+ } else if (float32_is_neg(arg)) {
+ if (float32_is_infinity(arg)) {
+ return FLOAT_CLASS_NEGATIVE_INFINITY;
+ } else if (float32_is_zero(arg)) {
+ return FLOAT_CLASS_NEGATIVE_ZERO;
+ } else if (float32_is_zero_or_denormal(arg)) {
+ return FLOAT_CLASS_NEGATIVE_SUBNORMAL;
+ } else {
+ return FLOAT_CLASS_NEGATIVE_NORMAL;
+ }
+ } else {
+ if (float32_is_infinity(arg)) {
+ return FLOAT_CLASS_POSITIVE_INFINITY;
+ } else if (float32_is_zero(arg)) {
+ return FLOAT_CLASS_POSITIVE_ZERO;
+ } else if (float32_is_zero_or_denormal(arg)) {
+ return FLOAT_CLASS_POSITIVE_SUBNORMAL;
+ } else {
+ return FLOAT_CLASS_POSITIVE_NORMAL;
+ }
+ }
+}
+
+uint32_t helper_float_class_s(CPUMIPSState *env, uint32_t arg)
+{
+ return float_class_s(arg, &env->active_fpu.fp_status);
+}
+
+/* binary operations */
+
+uint64_t helper_float_add_d(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint64_t dt2;
+
+ dt2 = float64_add(fdt0, fdt1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_add_s(CPUMIPSState *env,
+ uint32_t fst0, uint32_t fst1)
+{
+ uint32_t wt2;
+
+ wt2 = float32_add(fst0, fst1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_add_ps(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t wtl2;
+ uint32_t wth2;
+
+ wtl2 = float32_add(fstl0, fstl1, &env->active_fpu.fp_status);
+ wth2 = float32_add(fsth0, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)wth2 << 32) | wtl2;
+}
+
+uint64_t helper_float_sub_d(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint64_t dt2;
+
+ dt2 = float64_sub(fdt0, fdt1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_sub_s(CPUMIPSState *env,
+ uint32_t fst0, uint32_t fst1)
+{
+ uint32_t wt2;
+
+ wt2 = float32_sub(fst0, fst1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_sub_ps(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t wtl2;
+ uint32_t wth2;
+
+ wtl2 = float32_sub(fstl0, fstl1, &env->active_fpu.fp_status);
+ wth2 = float32_sub(fsth0, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)wth2 << 32) | wtl2;
+}
+
+uint64_t helper_float_mul_d(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint64_t dt2;
+
+ dt2 = float64_mul(fdt0, fdt1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_mul_s(CPUMIPSState *env,
+ uint32_t fst0, uint32_t fst1)
+{
+ uint32_t wt2;
+
+ wt2 = float32_mul(fst0, fst1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_mul_ps(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t wtl2;
+ uint32_t wth2;
+
+ wtl2 = float32_mul(fstl0, fstl1, &env->active_fpu.fp_status);
+ wth2 = float32_mul(fsth0, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)wth2 << 32) | wtl2;
+}
+
+uint64_t helper_float_div_d(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint64_t dt2;
+
+ dt2 = float64_div(fdt0, fdt1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_div_s(CPUMIPSState *env,
+ uint32_t fst0, uint32_t fst1)
+{
+ uint32_t wt2;
+
+ wt2 = float32_div(fst0, fst1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_div_ps(CPUMIPSState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t wtl2;
+ uint32_t wth2;
+
+ wtl2 = float32_div(fstl0, fstl1, &env->active_fpu.fp_status);
+ wth2 = float32_div(fsth0, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)wth2 << 32) | wtl2;
+}
+
+
+/* MIPS specific binary operations */
+uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
+ fdt2 = float64_chs(float64_sub(fdt2, float64_one,
+ &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
+{
+ fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
+ fst2 = float32_chs(float32_sub(fst2, float32_one,
+ &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fstl2 = float32_mul(fstl0, fstl2, &env->active_fpu.fp_status);
+ fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
+ fstl2 = float32_chs(float32_sub(fstl2, float32_one,
+ &env->active_fpu.fp_status));
+ fsth2 = float32_chs(float32_sub(fsth2, float32_one,
+ &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fstl2;
+}
+
+uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
+ fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
+ fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64,
+ &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
+{
+ fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
+ fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
+ fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32,
+ &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fstl2 = float32_mul(fstl0, fstl2, &env->active_fpu.fp_status);
+ fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
+ fstl2 = float32_sub(fstl2, float32_one, &env->active_fpu.fp_status);
+ fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
+ fstl2 = float32_chs(float32_div(fstl2, FLOAT_TWO32,
+ &env->active_fpu.fp_status));
+ fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32,
+ &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fstl2;
+}
+
+uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fstl2;
+ uint32_t fsth2;
+
+ fstl2 = float32_add(fstl0, fsth0, &env->active_fpu.fp_status);
+ fsth2 = float32_add(fstl1, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fstl2;
+}
+
+uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fstl2;
+ uint32_t fsth2;
+
+ fstl2 = float32_mul(fstl0, fsth0, &env->active_fpu.fp_status);
+ fsth2 = float32_mul(fstl1, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fstl2;
+}
+
+
+uint32_t helper_float_max_s(CPUMIPSState *env, uint32_t fs, uint32_t ft)
+{
+ uint32_t fdret;
+
+ fdret = float32_maxnum(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_max_d(CPUMIPSState *env, uint64_t fs, uint64_t ft)
+{
+ uint64_t fdret;
+
+ fdret = float64_maxnum(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint32_t helper_float_maxa_s(CPUMIPSState *env, uint32_t fs, uint32_t ft)
+{
+ uint32_t fdret;
+
+ fdret = float32_maxnummag(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_maxa_d(CPUMIPSState *env, uint64_t fs, uint64_t ft)
+{
+ uint64_t fdret;
+
+ fdret = float64_maxnummag(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint32_t helper_float_min_s(CPUMIPSState *env, uint32_t fs, uint32_t ft)
+{
+ uint32_t fdret;
+
+ fdret = float32_minnum(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_min_d(CPUMIPSState *env, uint64_t fs, uint64_t ft)
+{
+ uint64_t fdret;
+
+ fdret = float64_minnum(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint32_t helper_float_mina_s(CPUMIPSState *env, uint32_t fs, uint32_t ft)
+{
+ uint32_t fdret;
+
+ fdret = float32_minnummag(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_mina_d(CPUMIPSState *env, uint64_t fs, uint64_t ft)
+{
+ uint64_t fdret;
+
+ fdret = float64_minnummag(fs, ft, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+
+/* ternary operations */
+
+uint64_t helper_float_madd_d(CPUMIPSState *env, uint64_t fst0,
+ uint64_t fst1, uint64_t fst2)
+{
+ fst0 = float64_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float64_add(fst0, fst2, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint32_t helper_float_madd_s(CPUMIPSState *env, uint32_t fst0,
+ uint32_t fst1, uint32_t fst2)
+{
+ fst0 = float32_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float32_add(fst0, fst2, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_madd_ps(CPUMIPSState *env, uint64_t fdt0,
+ uint64_t fdt1, uint64_t fdt2)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fstl2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fstl0 = float32_mul(fstl0, fstl1, &env->active_fpu.fp_status);
+ fstl0 = float32_add(fstl0, fstl2, &env->active_fpu.fp_status);
+ fsth0 = float32_mul(fsth0, fsth1, &env->active_fpu.fp_status);
+ fsth0 = float32_add(fsth0, fsth2, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth0 << 32) | fstl0;
+}
+
+uint64_t helper_float_msub_d(CPUMIPSState *env, uint64_t fst0,
+ uint64_t fst1, uint64_t fst2)
+{
+ fst0 = float64_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float64_sub(fst0, fst2, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint32_t helper_float_msub_s(CPUMIPSState *env, uint32_t fst0,
+ uint32_t fst1, uint32_t fst2)
+{
+ fst0 = float32_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float32_sub(fst0, fst2, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_msub_ps(CPUMIPSState *env, uint64_t fdt0,
+ uint64_t fdt1, uint64_t fdt2)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fstl2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fstl0 = float32_mul(fstl0, fstl1, &env->active_fpu.fp_status);
+ fstl0 = float32_sub(fstl0, fstl2, &env->active_fpu.fp_status);
+ fsth0 = float32_mul(fsth0, fsth1, &env->active_fpu.fp_status);
+ fsth0 = float32_sub(fsth0, fsth2, &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth0 << 32) | fstl0;
+}
+
+uint64_t helper_float_nmadd_d(CPUMIPSState *env, uint64_t fst0,
+ uint64_t fst1, uint64_t fst2)
+{
+ fst0 = float64_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float64_add(fst0, fst2, &env->active_fpu.fp_status);
+ fst0 = float64_chs(fst0);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint32_t helper_float_nmadd_s(CPUMIPSState *env, uint32_t fst0,
+ uint32_t fst1, uint32_t fst2)
+{
+ fst0 = float32_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float32_add(fst0, fst2, &env->active_fpu.fp_status);
+ fst0 = float32_chs(fst0);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_nmadd_ps(CPUMIPSState *env, uint64_t fdt0,
+ uint64_t fdt1, uint64_t fdt2)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fstl2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fstl0 = float32_mul(fstl0, fstl1, &env->active_fpu.fp_status);
+ fstl0 = float32_add(fstl0, fstl2, &env->active_fpu.fp_status);
+ fstl0 = float32_chs(fstl0);
+ fsth0 = float32_mul(fsth0, fsth1, &env->active_fpu.fp_status);
+ fsth0 = float32_add(fsth0, fsth2, &env->active_fpu.fp_status);
+ fsth0 = float32_chs(fsth0);
+
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth0 << 32) | fstl0;
+}
+
+uint64_t helper_float_nmsub_d(CPUMIPSState *env, uint64_t fst0,
+ uint64_t fst1, uint64_t fst2)
+{
+ fst0 = float64_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float64_sub(fst0, fst2, &env->active_fpu.fp_status);
+ fst0 = float64_chs(fst0);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint32_t helper_float_nmsub_s(CPUMIPSState *env, uint32_t fst0,
+ uint32_t fst1, uint32_t fst2)
+{
+ fst0 = float32_mul(fst0, fst1, &env->active_fpu.fp_status);
+ fst0 = float32_sub(fst0, fst2, &env->active_fpu.fp_status);
+ fst0 = float32_chs(fst0);
+
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_nmsub_ps(CPUMIPSState *env, uint64_t fdt0,
+ uint64_t fdt1, uint64_t fdt2)
+{
+ uint32_t fstl0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fstl1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fstl2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fstl0 = float32_mul(fstl0, fstl1, &env->active_fpu.fp_status);
+ fstl0 = float32_sub(fstl0, fstl2, &env->active_fpu.fp_status);
+ fstl0 = float32_chs(fstl0);
+ fsth0 = float32_mul(fsth0, fsth1, &env->active_fpu.fp_status);
+ fsth0 = float32_sub(fsth0, fsth2, &env->active_fpu.fp_status);
+ fsth0 = float32_chs(fsth0);
+
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth0 << 32) | fstl0;
+}
+
+
+uint32_t helper_float_maddf_s(CPUMIPSState *env, uint32_t fs,
+ uint32_t ft, uint32_t fd)
+{
+ uint32_t fdret;
+
+ fdret = float32_muladd(fs, ft, fd, 0,
+ &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_maddf_d(CPUMIPSState *env, uint64_t fs,
+ uint64_t ft, uint64_t fd)
+{
+ uint64_t fdret;
+
+ fdret = float64_muladd(fs, ft, fd, 0,
+ &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint32_t helper_float_msubf_s(CPUMIPSState *env, uint32_t fs,
+ uint32_t ft, uint32_t fd)
+{
+ uint32_t fdret;
+
+ fdret = float32_muladd(fs, ft, fd, float_muladd_negate_product,
+ &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_msubf_d(CPUMIPSState *env, uint64_t fs,
+ uint64_t ft, uint64_t fd)
+{
+ uint64_t fdret;
+
+ fdret = float64_muladd(fs, ft, fd, float_muladd_negate_product,
+ &env->active_fpu.fp_status);
+
+ update_fcr31(env, GETPC());
+ return fdret;
+}
+
+
+/* compare operations */
+#define FOP_COND_D(op, cond) \
+void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ int c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+} \
+void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ int c; \
+ fdt0 = float64_abs(fdt0); \
+ fdt1 = float64_abs(fdt1); \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered_quiet() is still called.
+ */
+FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status), 0))
+FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status))
+FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_eq_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered() is still called.
+ */
+FOP_COND_D(sf, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status), 0))
+FOP_COND_D(ngle, float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status))
+FOP_COND_D(seq, float64_eq(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(ngl, float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_eq(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(lt, float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(nge, float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(le, float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+FOP_COND_D(ngt, float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status))
+
+#define FOP_COND_S(op, cond) \
+void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1, int cc) \
+{ \
+ int c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+} \
+void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1, int cc) \
+{ \
+ int c; \
+ fst0 = float32_abs(fst0); \
+ fst1 = float32_abs(fst1); \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called.
+ */
+FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status), 0))
+FOP_COND_S(un, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status))
+FOP_COND_S(eq, float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(olt, float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(ole, float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called.
+ */
+FOP_COND_S(sf, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status), 0))
+FOP_COND_S(ngle, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status))
+FOP_COND_S(seq, float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(ngl, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(lt, float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(nge, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(le, float32_le(fst0, fst1,
+ &env->active_fpu.fp_status))
+FOP_COND_S(ngt, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1,
+ &env->active_fpu.fp_status))
+
+#define FOP_COND_PS(op, condl, condh) \
+void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ uint32_t fst0, fsth0, fst1, fsth1; \
+ int ch, cl; \
+ fst0 = fdt0 & 0XFFFFFFFF; \
+ fsth0 = fdt0 >> 32; \
+ fst1 = fdt1 & 0XFFFFFFFF; \
+ fsth1 = fdt1 >> 32; \
+ cl = condl; \
+ ch = condh; \
+ update_fcr31(env, GETPC()); \
+ if (cl) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+ if (ch) \
+ SET_FP_COND(cc + 1, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc + 1, env->active_fpu); \
+} \
+void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ uint32_t fst0, fsth0, fst1, fsth1; \
+ int ch, cl; \
+ fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
+ fsth0 = float32_abs(fdt0 >> 32); \
+ fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
+ fsth1 = float32_abs(fdt1 >> 32); \
+ cl = condl; \
+ ch = condh; \
+ update_fcr31(env, GETPC()); \
+ if (cl) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+ if (ch) \
+ SET_FP_COND(cc + 1, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc + 1, env->active_fpu); \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called.
+ */
+FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status), 0),
+ (float32_unordered_quiet(fsth1, fsth0,
+ &env->active_fpu.fp_status), 0))
+FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_eq_quiet(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0,
+ &env->active_fpu.fp_status)
+ || float32_eq_quiet(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_lt_quiet(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(ole, float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_le_quiet(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called.
+ */
+FOP_COND_PS(sf, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status), 0),
+ (float32_unordered(fsth1, fsth0,
+ &env->active_fpu.fp_status), 0))
+FOP_COND_PS(ngle, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(seq, float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_eq(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(ngl, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0,
+ &env->active_fpu.fp_status)
+ || float32_eq(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(lt, float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_lt(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(nge, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(le, float32_le(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_le(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+FOP_COND_PS(ngt, float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1,
+ &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0,
+ &env->active_fpu.fp_status)
+ || float32_le(fsth0, fsth1,
+ &env->active_fpu.fp_status))
+
+/* R6 compare operations */
+#define FOP_CONDN_D(op, cond) \
+uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1) \
+{ \
+ uint64_t c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) { \
+ return -1; \
+ } else { \
+ return 0; \
+ } \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered_quiet() is still called.
+ */
+FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_eq_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered() is still called.\
+ */
+FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_eq(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sle, (float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sor, (float64_le(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+
+#define FOP_CONDN_S(op, cond) \
+uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1) \
+{ \
+ uint64_t c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) { \
+ return -1; \
+ } else { \
+ return 0; \
+ } \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called.
+ */
+FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called.
+ */
+FOP_CONDN_S(saf, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_S(sun, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(seq, (float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(slt, (float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sult, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sle, (float32_le(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sule, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sor, (float32_le(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sune, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sne, (float32_lt(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c
new file mode 100644
index 000000000..d0bd0267b
--- /dev/null
+++ b/target/mips/tcg/ldst_helper.c
@@ -0,0 +1,306 @@
+/*
+ * MIPS emulation load/store helpers for QEMU.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/memop.h"
+#include "internal.h"
+
+#ifndef CONFIG_USER_ONLY
+
+#define HELPER_LD_ATOMIC(name, insn, almask, do_cast) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
+{ \
+ if (arg & almask) { \
+ if (!(env->hflags & MIPS_HFLAG_DM)) { \
+ env->CP0_BadVAddr = arg; \
+ } \
+ do_raise_exception(env, EXCP_AdEL, GETPC()); \
+ } \
+ env->CP0_LLAddr = cpu_mips_translate_address(env, arg, MMU_DATA_LOAD, \
+ GETPC()); \
+ env->lladdr = arg; \
+ env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \
+ return env->llval; \
+}
+HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t))
+#ifdef TARGET_MIPS64
+HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
+#endif
+#undef HELPER_LD_ATOMIC
+
+#endif /* !CONFIG_USER_ONLY */
+
+static inline bool cpu_is_bigendian(CPUMIPSState *env)
+{
+ return extract32(env->CP0_Config0, CP0C0_BE, 1);
+}
+
+static inline target_ulong get_lmask(CPUMIPSState *env,
+ target_ulong value, unsigned bits)
+{
+ unsigned mask = (bits / BITS_PER_BYTE) - 1;
+
+ value &= mask;
+
+ if (!cpu_is_bigendian(env)) {
+ value ^= mask;
+ }
+
+ return value;
+}
+
+void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ target_ulong lmask = get_lmask(env, arg2, 32);
+ int dir = cpu_is_bigendian(env) ? 1 : -1;
+
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
+
+ if (lmask <= 2) {
+ cpu_stb_mmuidx_ra(env, arg2 + 1 * dir, (uint8_t)(arg1 >> 16),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 1) {
+ cpu_stb_mmuidx_ra(env, arg2 + 2 * dir, (uint8_t)(arg1 >> 8),
+ mem_idx, GETPC());
+ }
+
+ if (lmask == 0) {
+ cpu_stb_mmuidx_ra(env, arg2 + 3 * dir, (uint8_t)arg1,
+ mem_idx, GETPC());
+ }
+}
+
+void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ target_ulong lmask = get_lmask(env, arg2, 32);
+ int dir = cpu_is_bigendian(env) ? 1 : -1;
+
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
+
+ if (lmask >= 1) {
+ cpu_stb_mmuidx_ra(env, arg2 - 1 * dir, (uint8_t)(arg1 >> 8),
+ mem_idx, GETPC());
+ }
+
+ if (lmask >= 2) {
+ cpu_stb_mmuidx_ra(env, arg2 - 2 * dir, (uint8_t)(arg1 >> 16),
+ mem_idx, GETPC());
+ }
+
+ if (lmask == 3) {
+ cpu_stb_mmuidx_ra(env, arg2 - 3 * dir, (uint8_t)(arg1 >> 24),
+ mem_idx, GETPC());
+ }
+}
+
+#if defined(TARGET_MIPS64)
+/*
+ * "half" load and stores. We must do the memory access inline,
+ * or fault handling won't work.
+ */
+
+void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ target_ulong lmask = get_lmask(env, arg2, 64);
+ int dir = cpu_is_bigendian(env) ? 1 : -1;
+
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
+
+ if (lmask <= 6) {
+ cpu_stb_mmuidx_ra(env, arg2 + 1 * dir, (uint8_t)(arg1 >> 48),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 5) {
+ cpu_stb_mmuidx_ra(env, arg2 + 2 * dir, (uint8_t)(arg1 >> 40),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 4) {
+ cpu_stb_mmuidx_ra(env, arg2 + 3 * dir, (uint8_t)(arg1 >> 32),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 3) {
+ cpu_stb_mmuidx_ra(env, arg2 + 4 * dir, (uint8_t)(arg1 >> 24),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 2) {
+ cpu_stb_mmuidx_ra(env, arg2 + 5 * dir, (uint8_t)(arg1 >> 16),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 1) {
+ cpu_stb_mmuidx_ra(env, arg2 + 6 * dir, (uint8_t)(arg1 >> 8),
+ mem_idx, GETPC());
+ }
+
+ if (lmask <= 0) {
+ cpu_stb_mmuidx_ra(env, arg2 + 7 * dir, (uint8_t)arg1,
+ mem_idx, GETPC());
+ }
+}
+
+void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ target_ulong lmask = get_lmask(env, arg2, 64);
+ int dir = cpu_is_bigendian(env) ? 1 : -1;
+
+ cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
+
+ if (lmask >= 1) {
+ cpu_stb_mmuidx_ra(env, arg2 - 1 * dir, (uint8_t)(arg1 >> 8),
+ mem_idx, GETPC());
+ }
+
+ if (lmask >= 2) {
+ cpu_stb_mmuidx_ra(env, arg2 - 2 * dir, (uint8_t)(arg1 >> 16),
+ mem_idx, GETPC());
+ }
+
+ if (lmask >= 3) {
+ cpu_stb_mmuidx_ra(env, arg2 - 3 * dir, (uint8_t)(arg1 >> 24),
+ mem_idx, GETPC());
+ }
+
+ if (lmask >= 4) {
+ cpu_stb_mmuidx_ra(env, arg2 - 4 * dir, (uint8_t)(arg1 >> 32),
+ mem_idx, GETPC());
+ }
+
+ if (lmask >= 5) {
+ cpu_stb_mmuidx_ra(env, arg2 - 5 * dir, (uint8_t)(arg1 >> 40),
+ mem_idx, GETPC());
+ }
+
+ if (lmask >= 6) {
+ cpu_stb_mmuidx_ra(env, arg2 - 6 * dir, (uint8_t)(arg1 >> 48),
+ mem_idx, GETPC());
+ }
+
+ if (lmask == 7) {
+ cpu_stb_mmuidx_ra(env, arg2 - 7 * dir, (uint8_t)(arg1 >> 56),
+ mem_idx, GETPC());
+ }
+}
+#endif /* TARGET_MIPS64 */
+
+static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
+
+void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ env->active_tc.gpr[multiple_regs[i]] =
+ (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
+ addr += 4;
+ }
+ }
+
+ if (do_r31) {
+ env->active_tc.gpr[31] =
+ (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
+ }
+}
+
+void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
+ mem_idx, GETPC());
+ addr += 4;
+ }
+ }
+
+ if (do_r31) {
+ cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
+ }
+}
+
+#if defined(TARGET_MIPS64)
+void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ env->active_tc.gpr[multiple_regs[i]] =
+ cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
+ addr += 8;
+ }
+ }
+
+ if (do_r31) {
+ env->active_tc.gpr[31] =
+ cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
+ }
+}
+
+void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
+ mem_idx, GETPC());
+ addr += 8;
+ }
+ }
+
+ if (do_r31) {
+ cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
+ }
+}
+
+#endif /* TARGET_MIPS64 */
diff --git a/target/mips/tcg/lmmi_helper.c b/target/mips/tcg/lmmi_helper.c
new file mode 100644
index 000000000..abeb7736a
--- /dev/null
+++ b/target/mips/tcg/lmmi_helper.c
@@ -0,0 +1,747 @@
+/*
+ * Loongson Multimedia Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2011 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/*
+ * If the byte ordering doesn't matter, i.e. all columns are treated
+ * identically, then this union can be used directly. If byte ordering
+ * does matter, we generally ignore dumping to memory.
+ */
+typedef union {
+ uint8_t ub[8];
+ int8_t sb[8];
+ uint16_t uh[4];
+ int16_t sh[4];
+ uint32_t uw[2];
+ int32_t sw[2];
+ uint64_t d;
+} LMIValue;
+
+/* Some byte ordering issues can be mitigated by XORing in the following. */
+#ifdef HOST_WORDS_BIGENDIAN
+# define BYTE_ORDER_XOR(N) N
+#else
+# define BYTE_ORDER_XOR(N) 0
+#endif
+
+#define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x)
+#define SATUB(x) (x > 0xff ? 0xff : x)
+
+#define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x)
+#define SATUH(x) (x > 0xffff ? 0xffff : x)
+
+#define SATSW(x) \
+ (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x)
+#define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x)
+
+uint64_t helper_paddsb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.sb[i] + vt.sb[i];
+ vs.sb[i] = SATSB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddusb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] + vt.ub[i];
+ vs.ub[i] = SATUB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.sh[i] + vt.sh[i];
+ vs.sh[i] = SATSH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddush(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.uh[i] + vt.uh[i];
+ vs.uh[i] = SATUH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ vs.ub[i] += vt.ub[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] += vt.uh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] += vt.uw[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubsb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.sb[i] - vt.sb[i];
+ vs.sb[i] = SATSB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubusb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] - vt.ub[i];
+ vs.ub[i] = SATUB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.sh[i] - vt.sh[i];
+ vs.sh[i] = SATSH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubush(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.uh[i] - vt.uh[i];
+ vs.uh[i] = SATUH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ vs.ub[i] -= vt.ub[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] -= vt.uh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] -= vt.uw[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_pshufh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs;
+ unsigned i;
+
+ vs.d = fs;
+ vd.d = 0;
+ for (i = 0; i < 4; i++, ft >>= 2) {
+ vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host];
+ }
+ return vd.d;
+}
+
+uint64_t helper_packsswh(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ int64_t tmp;
+
+ tmp = (int32_t)(fs >> 0);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 0;
+
+ tmp = (int32_t)(fs >> 32);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 16;
+
+ tmp = (int32_t)(ft >> 0);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 32;
+
+ tmp = (int32_t)(ft >> 32);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 48;
+
+ return fd;
+}
+
+uint64_t helper_packsshb(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = fs >> (i * 16);
+ tmp = SATSB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8);
+ }
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = ft >> (i * 16);
+ tmp = SATSB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
+ }
+
+ return fd;
+}
+
+uint64_t helper_packushb(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = fs >> (i * 16);
+ tmp = SATUB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8);
+ }
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = ft >> (i * 16);
+ tmp = SATUB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
+ }
+
+ return fd;
+}
+
+uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft)
+{
+ return (fs & 0xffffffff) | (ft << 32);
+}
+
+uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft)
+{
+ return (fs >> 32) | (ft & ~0xffffffffull);
+}
+
+uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.uh[0 ^ host] = vs.uh[0 ^ host];
+ vd.uh[1 ^ host] = vt.uh[0 ^ host];
+ vd.uh[2 ^ host] = vs.uh[1 ^ host];
+ vd.uh[3 ^ host] = vt.uh[1 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.uh[0 ^ host] = vs.uh[2 ^ host];
+ vd.uh[1 ^ host] = vt.uh[2 ^ host];
+ vd.uh[2 ^ host] = vs.uh[3 ^ host];
+ vd.uh[3 ^ host] = vt.uh[3 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(7);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.ub[0 ^ host] = vs.ub[0 ^ host];
+ vd.ub[1 ^ host] = vt.ub[0 ^ host];
+ vd.ub[2 ^ host] = vs.ub[1 ^ host];
+ vd.ub[3 ^ host] = vt.ub[1 ^ host];
+ vd.ub[4 ^ host] = vs.ub[2 ^ host];
+ vd.ub[5 ^ host] = vt.ub[2 ^ host];
+ vd.ub[6 ^ host] = vs.ub[3 ^ host];
+ vd.ub[7 ^ host] = vt.ub[3 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(7);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.ub[0 ^ host] = vs.ub[4 ^ host];
+ vd.ub[1 ^ host] = vt.ub[4 ^ host];
+ vd.ub[2 ^ host] = vs.ub[5 ^ host];
+ vd.ub[3 ^ host] = vt.ub[5 ^ host];
+ vd.ub[4 ^ host] = vs.ub[6 ^ host];
+ vd.ub[5 ^ host] = vt.ub[6 ^ host];
+ vd.ub[6 ^ host] = vs.ub[7 ^ host];
+ vd.ub[7 ^ host] = vt.ub[7 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_pavgh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pavgb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pminsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaxub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pminub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; i++) {
+ vs.uw[i] = -(vs.uw[i] == vt.uw[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; i++) {
+ vs.uw[i] = -(vs.uw[i] > vt.uw[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = -(vs.uh[i] == vt.uh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = -(vs.uh[i] > vt.uh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = -(vs.ub[i] == vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = -(vs.ub[i] > vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psllw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] <<= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrlw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psraw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ ft = 31;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.sw[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psllh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] <<= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrlh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrah(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ ft = 15;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.sh[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmullh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.sh[i] *= vt.sh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmulhh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int32_t r = vs.sh[i] * vt.sh[i];
+ vs.sh[i] = r >> 16;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ uint32_t r = vs.uh[i] * vt.uh[i];
+ vs.uh[i] = r >> 16;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vs, vt;
+ uint32_t p0, p1;
+
+ vs.d = fs;
+ vt.d = ft;
+ p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host];
+ p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host];
+ p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host];
+ p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host];
+
+ return ((uint64_t)p1 << 32) | p0;
+}
+
+uint64_t helper_pasubub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] - vt.ub[i];
+ vs.ub[i] = (r < 0 ? -r : r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_biadd(uint64_t fs)
+{
+ unsigned i, fd;
+
+ for (i = fd = 0; i < 8; ++i) {
+ fd += (fs >> (i * 8)) & 0xff;
+ }
+ return fd & 0xffff;
+}
+
+uint64_t helper_pmovmskb(uint64_t fs)
+{
+ unsigned fd = 0;
+
+ fd |= ((fs >> 7) & 1) << 0;
+ fd |= ((fs >> 15) & 1) << 1;
+ fd |= ((fs >> 23) & 1) << 2;
+ fd |= ((fs >> 31) & 1) << 3;
+ fd |= ((fs >> 39) & 1) << 4;
+ fd |= ((fs >> 47) & 1) << 5;
+ fd |= ((fs >> 55) & 1) << 6;
+ fd |= ((fs >> 63) & 1) << 7;
+
+ return fd & 0xff;
+}
diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build
new file mode 100644
index 000000000..98003779a
--- /dev/null
+++ b/target/mips/tcg/meson.build
@@ -0,0 +1,33 @@
+gen = [
+ decodetree.process('rel6.decode', extra_args: ['--decode=decode_isa_rel6']),
+ decodetree.process('msa.decode', extra_args: '--decode=decode_ase_msa'),
+ decodetree.process('tx79.decode', extra_args: '--static-decode=decode_tx79'),
+ decodetree.process('vr54xx.decode', extra_args: '--decode=decode_ext_vr54xx'),
+]
+
+mips_ss.add(gen)
+mips_ss.add(files(
+ 'dsp_helper.c',
+ 'exception.c',
+ 'fpu_helper.c',
+ 'ldst_helper.c',
+ 'lmmi_helper.c',
+ 'msa_helper.c',
+ 'msa_translate.c',
+ 'op_helper.c',
+ 'rel6_translate.c',
+ 'translate.c',
+ 'translate_addr_const.c',
+ 'txx9_translate.c',
+ 'vr54xx_helper.c',
+ 'vr54xx_translate.c',
+))
+mips_ss.add(when: 'TARGET_MIPS64', if_true: files(
+ 'tx79_translate.c',
+), if_false: files(
+ 'mxu_translate.c',
+))
+
+if have_system
+ subdir('sysemu')
+endif
diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc
new file mode 100644
index 000000000..0da4c802a
--- /dev/null
+++ b/target/mips/tcg/micromips_translate.c.inc
@@ -0,0 +1,3231 @@
+/*
+ * microMIPS translation routines
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+/*
+ * microMIPS32/microMIPS64 major opcodes
+ *
+ * 1. MIPS Architecture for Programmers Volume II-B:
+ * The microMIPS32 Instruction Set (Revision 3.05)
+ *
+ * Table 6.2 microMIPS32 Encoding of Major Opcode Field
+ *
+ * 2. MIPS Architecture For Programmers Volume II-A:
+ * The MIPS64 Instruction Set (Revision 3.51)
+ */
+
+enum {
+ POOL32A = 0x00,
+ POOL16A = 0x01,
+ LBU16 = 0x02,
+ MOVE16 = 0x03,
+ ADDI32 = 0x04,
+ R6_LUI = 0x04,
+ AUI = 0x04,
+ LBU32 = 0x05,
+ SB32 = 0x06,
+ LB32 = 0x07,
+
+ POOL32B = 0x08,
+ POOL16B = 0x09,
+ LHU16 = 0x0a,
+ ANDI16 = 0x0b,
+ ADDIU32 = 0x0c,
+ LHU32 = 0x0d,
+ SH32 = 0x0e,
+ LH32 = 0x0f,
+
+ POOL32I = 0x10,
+ POOL16C = 0x11,
+ LWSP16 = 0x12,
+ POOL16D = 0x13,
+ ORI32 = 0x14,
+ POOL32F = 0x15,
+ POOL32S = 0x16, /* MIPS64 */
+ DADDIU32 = 0x17, /* MIPS64 */
+
+ POOL32C = 0x18,
+ LWGP16 = 0x19,
+ LW16 = 0x1a,
+ POOL16E = 0x1b,
+ XORI32 = 0x1c,
+ JALS32 = 0x1d,
+ BOVC = 0x1d,
+ BEQC = 0x1d,
+ BEQZALC = 0x1d,
+ ADDIUPC = 0x1e,
+ PCREL = 0x1e,
+ BNVC = 0x1f,
+ BNEC = 0x1f,
+ BNEZALC = 0x1f,
+
+ R6_BEQZC = 0x20,
+ JIC = 0x20,
+ POOL16F = 0x21,
+ SB16 = 0x22,
+ BEQZ16 = 0x23,
+ BEQZC16 = 0x23,
+ SLTI32 = 0x24,
+ BEQ32 = 0x25,
+ BC = 0x25,
+ SWC132 = 0x26,
+ LWC132 = 0x27,
+
+ /* 0x29 is reserved */
+ RES_29 = 0x29,
+ R6_BNEZC = 0x28,
+ JIALC = 0x28,
+ SH16 = 0x2a,
+ BNEZ16 = 0x2b,
+ BNEZC16 = 0x2b,
+ SLTIU32 = 0x2c,
+ BNE32 = 0x2d,
+ BALC = 0x2d,
+ SDC132 = 0x2e,
+ LDC132 = 0x2f,
+
+ /* 0x31 is reserved */
+ RES_31 = 0x31,
+ BLEZALC = 0x30,
+ BGEZALC = 0x30,
+ BGEUC = 0x30,
+ SWSP16 = 0x32,
+ B16 = 0x33,
+ BC16 = 0x33,
+ ANDI32 = 0x34,
+ J32 = 0x35,
+ BGTZC = 0x35,
+ BLTZC = 0x35,
+ BLTC = 0x35,
+ SD32 = 0x36, /* MIPS64 */
+ LD32 = 0x37, /* MIPS64 */
+
+ /* 0x39 is reserved */
+ RES_39 = 0x39,
+ BGTZALC = 0x38,
+ BLTZALC = 0x38,
+ BLTUC = 0x38,
+ SW16 = 0x3a,
+ LI16 = 0x3b,
+ JALX32 = 0x3c,
+ JAL32 = 0x3d,
+ BLEZC = 0x3d,
+ BGEZC = 0x3d,
+ BGEC = 0x3d,
+ SW32 = 0x3e,
+ LW32 = 0x3f
+};
+
+/* PCREL Instructions perform PC-Relative address calculation. bits 20..16 */
+enum {
+ ADDIUPC_00 = 0x00,
+ ADDIUPC_01 = 0x01,
+ ADDIUPC_02 = 0x02,
+ ADDIUPC_03 = 0x03,
+ ADDIUPC_04 = 0x04,
+ ADDIUPC_05 = 0x05,
+ ADDIUPC_06 = 0x06,
+ ADDIUPC_07 = 0x07,
+ AUIPC = 0x1e,
+ ALUIPC = 0x1f,
+ LWPC_08 = 0x08,
+ LWPC_09 = 0x09,
+ LWPC_0A = 0x0A,
+ LWPC_0B = 0x0B,
+ LWPC_0C = 0x0C,
+ LWPC_0D = 0x0D,
+ LWPC_0E = 0x0E,
+ LWPC_0F = 0x0F,
+};
+
+/* POOL32A encoding of minor opcode field */
+
+enum {
+ /*
+ * These opcodes are distinguished only by bits 9..6; those bits are
+ * what are recorded below.
+ */
+ SLL32 = 0x0,
+ SRL32 = 0x1,
+ SRA = 0x2,
+ ROTR = 0x3,
+ SELEQZ = 0x5,
+ SELNEZ = 0x6,
+ R6_RDHWR = 0x7,
+
+ SLLV = 0x0,
+ SRLV = 0x1,
+ SRAV = 0x2,
+ ROTRV = 0x3,
+ ADD = 0x4,
+ ADDU32 = 0x5,
+ SUB = 0x6,
+ SUBU32 = 0x7,
+ MUL = 0x8,
+ AND = 0x9,
+ OR32 = 0xa,
+ NOR = 0xb,
+ XOR32 = 0xc,
+ SLT = 0xd,
+ SLTU = 0xe,
+
+ MOVN = 0x0,
+ R6_MUL = 0x0,
+ MOVZ = 0x1,
+ MUH = 0x1,
+ MULU = 0x2,
+ MUHU = 0x3,
+ LWXS = 0x4,
+ R6_DIV = 0x4,
+ MOD = 0x5,
+ R6_DIVU = 0x6,
+ MODU = 0x7,
+
+ /* The following can be distinguished by their lower 6 bits. */
+ BREAK32 = 0x07,
+ INS = 0x0c,
+ LSA = 0x0f,
+ ALIGN = 0x1f,
+ EXT = 0x2c,
+ POOL32AXF = 0x3c,
+ SIGRIE = 0x3f
+};
+
+/* POOL32AXF encoding of minor opcode field extension */
+
+/*
+ * 1. MIPS Architecture for Programmers Volume II-B:
+ * The microMIPS32 Instruction Set (Revision 3.05)
+ *
+ * Table 6.5 POOL32Axf Encoding of Minor Opcode Extension Field
+ *
+ * 2. MIPS Architecture for Programmers VolumeIV-e:
+ * The MIPS DSP Application-Specific Extension
+ * to the microMIPS32 Architecture (Revision 2.34)
+ *
+ * Table 5.5 POOL32Axf Encoding of Minor Opcode Extension Field
+ */
+
+enum {
+ /* bits 11..6 */
+ TEQ = 0x00,
+ TGE = 0x08,
+ TGEU = 0x10,
+ TLT = 0x20,
+ TLTU = 0x28,
+ TNE = 0x30,
+
+ MFC0 = 0x03,
+ MTC0 = 0x0b,
+
+ /* begin of microMIPS32 DSP */
+
+ /* bits 13..12 for 0x01 */
+ MFHI_ACC = 0x0,
+ MFLO_ACC = 0x1,
+ MTHI_ACC = 0x2,
+ MTLO_ACC = 0x3,
+
+ /* bits 13..12 for 0x2a */
+ MADD_ACC = 0x0,
+ MADDU_ACC = 0x1,
+ MSUB_ACC = 0x2,
+ MSUBU_ACC = 0x3,
+
+ /* bits 13..12 for 0x32 */
+ MULT_ACC = 0x0,
+ MULTU_ACC = 0x1,
+
+ /* end of microMIPS32 DSP */
+
+ /* bits 15..12 for 0x2c */
+ BITSWAP = 0x0,
+ SEB = 0x2,
+ SEH = 0x3,
+ CLO = 0x4,
+ CLZ = 0x5,
+ RDHWR = 0x6,
+ WSBH = 0x7,
+ MULT = 0x8,
+ MULTU = 0x9,
+ DIV = 0xa,
+ DIVU = 0xb,
+ MADD = 0xc,
+ MADDU = 0xd,
+ MSUB = 0xe,
+ MSUBU = 0xf,
+
+ /* bits 15..12 for 0x34 */
+ MFC2 = 0x4,
+ MTC2 = 0x5,
+ MFHC2 = 0x8,
+ MTHC2 = 0x9,
+ CFC2 = 0xc,
+ CTC2 = 0xd,
+
+ /* bits 15..12 for 0x3c */
+ JALR = 0x0,
+ JR = 0x0, /* alias */
+ JALRC = 0x0,
+ JRC = 0x0,
+ JALR_HB = 0x1,
+ JALRC_HB = 0x1,
+ JALRS = 0x4,
+ JALRS_HB = 0x5,
+
+ /* bits 15..12 for 0x05 */
+ RDPGPR = 0xe,
+ WRPGPR = 0xf,
+
+ /* bits 15..12 for 0x0d */
+ TLBP = 0x0,
+ TLBR = 0x1,
+ TLBWI = 0x2,
+ TLBWR = 0x3,
+ TLBINV = 0x4,
+ TLBINVF = 0x5,
+ WAIT = 0x9,
+ IRET = 0xd,
+ DERET = 0xe,
+ ERET = 0xf,
+
+ /* bits 15..12 for 0x15 */
+ DMT = 0x0,
+ DVPE = 0x1,
+ EMT = 0x2,
+ EVPE = 0x3,
+
+ /* bits 15..12 for 0x1d */
+ DI = 0x4,
+ EI = 0x5,
+
+ /* bits 15..12 for 0x2d */
+ SYNC = 0x6,
+ SYSCALL = 0x8,
+ SDBBP = 0xd,
+
+ /* bits 15..12 for 0x35 */
+ MFHI32 = 0x0,
+ MFLO32 = 0x1,
+ MTHI32 = 0x2,
+ MTLO32 = 0x3,
+};
+
+/* POOL32B encoding of minor opcode field (bits 15..12) */
+
+enum {
+ LWC2 = 0x0,
+ LWP = 0x1,
+ LDP = 0x4,
+ LWM32 = 0x5,
+ CACHE = 0x6,
+ LDM = 0x7,
+ SWC2 = 0x8,
+ SWP = 0x9,
+ SDP = 0xc,
+ SWM32 = 0xd,
+ SDM = 0xf
+};
+
+/* POOL32C encoding of minor opcode field (bits 15..12) */
+
+enum {
+ LWL = 0x0,
+ SWL = 0x8,
+ LWR = 0x1,
+ SWR = 0x9,
+ PREF = 0x2,
+ ST_EVA = 0xa,
+ LL = 0x3,
+ SC = 0xb,
+ LDL = 0x4,
+ SDL = 0xc,
+ LDR = 0x5,
+ SDR = 0xd,
+ LD_EVA = 0x6,
+ LWU = 0xe,
+ LLD = 0x7,
+ SCD = 0xf
+};
+
+/* POOL32C LD-EVA encoding of minor opcode field (bits 11..9) */
+
+enum {
+ LBUE = 0x0,
+ LHUE = 0x1,
+ LWLE = 0x2,
+ LWRE = 0x3,
+ LBE = 0x4,
+ LHE = 0x5,
+ LLE = 0x6,
+ LWE = 0x7,
+};
+
+/* POOL32C ST-EVA encoding of minor opcode field (bits 11..9) */
+
+enum {
+ SWLE = 0x0,
+ SWRE = 0x1,
+ PREFE = 0x2,
+ CACHEE = 0x3,
+ SBE = 0x4,
+ SHE = 0x5,
+ SCE = 0x6,
+ SWE = 0x7,
+};
+
+/* POOL32F encoding of minor opcode field (bits 5..0) */
+
+enum {
+ /* These are the bit 7..6 values */
+ ADD_FMT = 0x0,
+
+ SUB_FMT = 0x1,
+
+ MUL_FMT = 0x2,
+
+ DIV_FMT = 0x3,
+
+ /* These are the bit 8..6 values */
+ MOVN_FMT = 0x0,
+ RSQRT2_FMT = 0x0,
+ MOVF_FMT = 0x0,
+ RINT_FMT = 0x0,
+ SELNEZ_FMT = 0x0,
+
+ MOVZ_FMT = 0x1,
+ LWXC1 = 0x1,
+ MOVT_FMT = 0x1,
+ CLASS_FMT = 0x1,
+ SELEQZ_FMT = 0x1,
+
+ PLL_PS = 0x2,
+ SWXC1 = 0x2,
+ SEL_FMT = 0x2,
+
+ PLU_PS = 0x3,
+ LDXC1 = 0x3,
+
+ MOVN_FMT_04 = 0x4,
+ PUL_PS = 0x4,
+ SDXC1 = 0x4,
+ RECIP2_FMT = 0x4,
+
+ MOVZ_FMT_05 = 0x05,
+ PUU_PS = 0x5,
+ LUXC1 = 0x5,
+
+ CVT_PS_S = 0x6,
+ SUXC1 = 0x6,
+ ADDR_PS = 0x6,
+ PREFX = 0x6,
+ MADDF_FMT = 0x6,
+
+ MULR_PS = 0x7,
+ MSUBF_FMT = 0x7,
+
+ MADD_S = 0x01,
+ MADD_D = 0x09,
+ MADD_PS = 0x11,
+ ALNV_PS = 0x19,
+ MSUB_S = 0x21,
+ MSUB_D = 0x29,
+ MSUB_PS = 0x31,
+
+ NMADD_S = 0x02,
+ NMADD_D = 0x0a,
+ NMADD_PS = 0x12,
+ NMSUB_S = 0x22,
+ NMSUB_D = 0x2a,
+ NMSUB_PS = 0x32,
+
+ MIN_FMT = 0x3,
+ MAX_FMT = 0xb,
+ MINA_FMT = 0x23,
+ MAXA_FMT = 0x2b,
+ POOL32FXF = 0x3b,
+
+ CABS_COND_FMT = 0x1c, /* MIPS3D */
+ C_COND_FMT = 0x3c,
+
+ CMP_CONDN_S = 0x5,
+ CMP_CONDN_D = 0x15
+};
+
+/* POOL32Fxf encoding of minor opcode extension field */
+
+enum {
+ CVT_L = 0x04,
+ RSQRT_FMT = 0x08,
+ FLOOR_L = 0x0c,
+ CVT_PW_PS = 0x1c,
+ CVT_W = 0x24,
+ SQRT_FMT = 0x28,
+ FLOOR_W = 0x2c,
+ CVT_PS_PW = 0x3c,
+ CFC1 = 0x40,
+ RECIP_FMT = 0x48,
+ CEIL_L = 0x4c,
+ CTC1 = 0x60,
+ CEIL_W = 0x6c,
+ MFC1 = 0x80,
+ CVT_S_PL = 0x84,
+ TRUNC_L = 0x8c,
+ MTC1 = 0xa0,
+ CVT_S_PU = 0xa4,
+ TRUNC_W = 0xac,
+ MFHC1 = 0xc0,
+ ROUND_L = 0xcc,
+ MTHC1 = 0xe0,
+ ROUND_W = 0xec,
+
+ MOV_FMT = 0x01,
+ MOVF = 0x05,
+ ABS_FMT = 0x0d,
+ RSQRT1_FMT = 0x1d,
+ MOVT = 0x25,
+ NEG_FMT = 0x2d,
+ CVT_D = 0x4d,
+ RECIP1_FMT = 0x5d,
+ CVT_S = 0x6d
+};
+
+/* POOL32I encoding of minor opcode field (bits 25..21) */
+
+enum {
+ BLTZ = 0x00,
+ BLTZAL = 0x01,
+ BGEZ = 0x02,
+ BGEZAL = 0x03,
+ BLEZ = 0x04,
+ BNEZC = 0x05,
+ BGTZ = 0x06,
+ BEQZC = 0x07,
+ TLTI = 0x08,
+ BC1EQZC = 0x08,
+ TGEI = 0x09,
+ BC1NEZC = 0x09,
+ TLTIU = 0x0a,
+ BC2EQZC = 0x0a,
+ TGEIU = 0x0b,
+ BC2NEZC = 0x0a,
+ TNEI = 0x0c,
+ R6_SYNCI = 0x0c,
+ LUI = 0x0d,
+ TEQI = 0x0e,
+ SYNCI = 0x10,
+ BLTZALS = 0x11,
+ BGEZALS = 0x13,
+ BC2F = 0x14,
+ BC2T = 0x15,
+ /* These overlap and are distinguished by bit16 of the instruction */
+ BC1F = 0x1c,
+ BC1T = 0x1d,
+ BC1ANY2F = 0x1c,
+ BC1ANY2T = 0x1d,
+ BC1ANY4F = 0x1e,
+ BC1ANY4T = 0x1f
+};
+
+/* POOL16A encoding of minor opcode field */
+
+enum {
+ ADDU16 = 0x0,
+ SUBU16 = 0x1
+};
+
+/* POOL16B encoding of minor opcode field */
+
+enum {
+ SLL16 = 0x0,
+ SRL16 = 0x1
+};
+
+/* POOL16C encoding of minor opcode field */
+
+enum {
+ NOT16 = 0x00,
+ XOR16 = 0x04,
+ AND16 = 0x08,
+ OR16 = 0x0c,
+ LWM16 = 0x10,
+ SWM16 = 0x14,
+ JR16 = 0x18,
+ JRC16 = 0x1a,
+ JALR16 = 0x1c,
+ JALR16S = 0x1e,
+ MFHI16 = 0x20,
+ MFLO16 = 0x24,
+ BREAK16 = 0x28,
+ SDBBP16 = 0x2c,
+ JRADDIUSP = 0x30
+};
+
+/* R6 POOL16C encoding of minor opcode field (bits 0..5) */
+
+enum {
+ R6_NOT16 = 0x00,
+ R6_AND16 = 0x01,
+ R6_LWM16 = 0x02,
+ R6_JRC16 = 0x03,
+ MOVEP = 0x04,
+ MOVEP_05 = 0x05,
+ MOVEP_06 = 0x06,
+ MOVEP_07 = 0x07,
+ R6_XOR16 = 0x08,
+ R6_OR16 = 0x09,
+ R6_SWM16 = 0x0a,
+ JALRC16 = 0x0b,
+ MOVEP_0C = 0x0c,
+ MOVEP_0D = 0x0d,
+ MOVEP_0E = 0x0e,
+ MOVEP_0F = 0x0f,
+ JRCADDIUSP = 0x13,
+ R6_BREAK16 = 0x1b,
+ R6_SDBBP16 = 0x3b
+};
+
+/* POOL16D encoding of minor opcode field */
+
+enum {
+ ADDIUS5 = 0x0,
+ ADDIUSP = 0x1
+};
+
+/* POOL16E encoding of minor opcode field */
+
+enum {
+ ADDIUR2 = 0x0,
+ ADDIUR1SP = 0x1
+};
+
+static int mmreg(int r)
+{
+ static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+/* Used for 16-bit store instructions. */
+static int mmreg2(int r)
+{
+ static const int map[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+#define uMIPS_RD(op) ((op >> 7) & 0x7)
+#define uMIPS_RS(op) ((op >> 4) & 0x7)
+#define uMIPS_RS2(op) uMIPS_RS(op)
+#define uMIPS_RS1(op) ((op >> 1) & 0x7)
+#define uMIPS_RD5(op) ((op >> 5) & 0x1f)
+#define uMIPS_RS5(op) (op & 0x1f)
+
+/* Signed immediate */
+#define SIMM(op, start, width) \
+ ((int32_t)(((op >> start) & ((~0U) >> (32 - width))) \
+ << (32 - width)) \
+ >> (32 - width))
+/* Zero-extended immediate */
+#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32 - width)))
+
+static void gen_addiur1sp(DisasContext *ctx)
+{
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+
+ gen_arith_imm(ctx, OPC_ADDIU, rd, 29, ((ctx->opcode >> 1) & 0x3f) << 2);
+}
+
+static void gen_addiur2(DisasContext *ctx)
+{
+ static const int decoded_imm[] = { 1, 4, 8, 12, 16, 20, 24, -1 };
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs = mmreg(uMIPS_RS(ctx->opcode));
+
+ gen_arith_imm(ctx, OPC_ADDIU, rd, rs, decoded_imm[ZIMM(ctx->opcode, 1, 3)]);
+}
+
+static void gen_addiusp(DisasContext *ctx)
+{
+ int encoded = ZIMM(ctx->opcode, 1, 9);
+ int decoded;
+
+ if (encoded <= 1) {
+ decoded = 256 + encoded;
+ } else if (encoded <= 255) {
+ decoded = encoded;
+ } else if (encoded <= 509) {
+ decoded = encoded - 512;
+ } else {
+ decoded = encoded - 768;
+ }
+
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, decoded << 2);
+}
+
+static void gen_addius5(DisasContext *ctx)
+{
+ int imm = SIMM(ctx->opcode, 1, 4);
+ int rd = (ctx->opcode >> 5) & 0x1f;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rd, rd, imm);
+}
+
+static void gen_andi16(DisasContext *ctx)
+{
+ static const int decoded_imm[] = { 128, 1, 2, 3, 4, 7, 8, 15, 16,
+ 31, 32, 63, 64, 255, 32768, 65535 };
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs = mmreg(uMIPS_RS(ctx->opcode));
+ int encoded = ZIMM(ctx->opcode, 0, 4);
+
+ gen_logic_imm(ctx, OPC_ANDI, rd, rs, decoded_imm[encoded]);
+}
+
+static void gen_ldst_multiple(DisasContext *ctx, uint32_t opc, int reglist,
+ int base, int16_t offset)
+{
+ TCGv t0, t1;
+ TCGv_i32 t2;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ t1 = tcg_const_tl(reglist);
+ t2 = tcg_const_i32(ctx->mem_idx);
+
+ save_cpu_state(ctx, 1);
+ switch (opc) {
+ case LWM32:
+ gen_helper_lwm(cpu_env, t0, t1, t2);
+ break;
+ case SWM32:
+ gen_helper_swm(cpu_env, t0, t1, t2);
+ break;
+#ifdef TARGET_MIPS64
+ case LDM:
+ gen_helper_ldm(cpu_env, t0, t1, t2);
+ break;
+ case SDM:
+ gen_helper_sdm(cpu_env, t0, t1, t2);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free_i32(t2);
+}
+
+
+static void gen_pool16c_insn(DisasContext *ctx)
+{
+ int rd = mmreg((ctx->opcode >> 3) & 0x7);
+ int rs = mmreg(ctx->opcode & 0x7);
+
+ switch (((ctx->opcode) >> 4) & 0x3f) {
+ case NOT16 + 0:
+ case NOT16 + 1:
+ case NOT16 + 2:
+ case NOT16 + 3:
+ gen_logic(ctx, OPC_NOR, rd, rs, 0);
+ break;
+ case XOR16 + 0:
+ case XOR16 + 1:
+ case XOR16 + 2:
+ case XOR16 + 3:
+ gen_logic(ctx, OPC_XOR, rd, rd, rs);
+ break;
+ case AND16 + 0:
+ case AND16 + 1:
+ case AND16 + 2:
+ case AND16 + 3:
+ gen_logic(ctx, OPC_AND, rd, rd, rs);
+ break;
+ case OR16 + 0:
+ case OR16 + 1:
+ case OR16 + 2:
+ case OR16 + 3:
+ gen_logic(ctx, OPC_OR, rd, rd, rs);
+ break;
+ case LWM16 + 0:
+ case LWM16 + 1:
+ case LWM16 + 2:
+ case LWM16 + 3:
+ {
+ static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 };
+ int offset = ZIMM(ctx->opcode, 0, 4);
+
+ gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3],
+ 29, offset << 2);
+ }
+ break;
+ case SWM16 + 0:
+ case SWM16 + 1:
+ case SWM16 + 2:
+ case SWM16 + 3:
+ {
+ static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 };
+ int offset = ZIMM(ctx->opcode, 0, 4);
+
+ gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3],
+ 29, offset << 2);
+ }
+ break;
+ case JR16 + 0:
+ case JR16 + 1:
+ {
+ int reg = ctx->opcode & 0x1f;
+
+ gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4);
+ }
+ break;
+ case JRC16 + 0:
+ case JRC16 + 1:
+ {
+ int reg = ctx->opcode & 0x1f;
+ gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0);
+ /*
+ * Let normal delay slot handling in our caller take us
+ * to the branch target.
+ */
+ }
+ break;
+ case JALR16 + 0:
+ case JALR16 + 1:
+ gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case JALR16S + 0:
+ case JALR16S + 1:
+ gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case MFHI16 + 0:
+ case MFHI16 + 1:
+ gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode));
+ break;
+ case MFLO16 + 0:
+ case MFLO16 + 1:
+ gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode));
+ break;
+ case BREAK16:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case SDBBP16:
+ if (is_uhi(extract32(ctx->opcode, 0, 4))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ /*
+ * XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(ctx, ISA_MIPS_R1);
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ break;
+ case JRADDIUSP + 0:
+ case JRADDIUSP + 1:
+ {
+ int imm = ZIMM(ctx->opcode, 0, 5);
+ gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0);
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2);
+ /*
+ * Let normal delay slot handling in our caller take us
+ * to the branch target.
+ */
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static inline void gen_movep(DisasContext *ctx, int enc_dest, int enc_rt,
+ int enc_rs)
+{
+ int rd, re;
+ static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 };
+ static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 };
+ static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 };
+
+ rd = rd_enc[enc_dest];
+ re = re_enc[enc_dest];
+ gen_load_gpr(cpu_gpr[rd], rs_rt_enc[enc_rs]);
+ gen_load_gpr(cpu_gpr[re], rs_rt_enc[enc_rt]);
+}
+
+static void gen_pool16c_r6_insn(DisasContext *ctx)
+{
+ int rt = mmreg((ctx->opcode >> 7) & 0x7);
+ int rs = mmreg((ctx->opcode >> 4) & 0x7);
+
+ switch (ctx->opcode & 0xf) {
+ case R6_NOT16:
+ gen_logic(ctx, OPC_NOR, rt, rs, 0);
+ break;
+ case R6_AND16:
+ gen_logic(ctx, OPC_AND, rt, rt, rs);
+ break;
+ case R6_LWM16:
+ {
+ int lwm_converted = 0x11 + extract32(ctx->opcode, 8, 2);
+ int offset = extract32(ctx->opcode, 4, 4);
+ gen_ldst_multiple(ctx, LWM32, lwm_converted, 29, offset << 2);
+ }
+ break;
+ case R6_JRC16: /* JRCADDIUSP */
+ if ((ctx->opcode >> 4) & 1) {
+ /* JRCADDIUSP */
+ int imm = extract32(ctx->opcode, 5, 5);
+ gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0);
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2);
+ } else {
+ /* JRC16 */
+ rs = extract32(ctx->opcode, 5, 5);
+ gen_compute_branch(ctx, OPC_JR, 2, rs, 0, 0, 0);
+ }
+ break;
+ case MOVEP:
+ case MOVEP_05:
+ case MOVEP_06:
+ case MOVEP_07:
+ case MOVEP_0C:
+ case MOVEP_0D:
+ case MOVEP_0E:
+ case MOVEP_0F:
+ {
+ int enc_dest = uMIPS_RD(ctx->opcode);
+ int enc_rt = uMIPS_RS2(ctx->opcode);
+ int enc_rs = (ctx->opcode & 3) | ((ctx->opcode >> 1) & 4);
+ gen_movep(ctx, enc_dest, enc_rt, enc_rs);
+ }
+ break;
+ case R6_XOR16:
+ gen_logic(ctx, OPC_XOR, rt, rt, rs);
+ break;
+ case R6_OR16:
+ gen_logic(ctx, OPC_OR, rt, rt, rs);
+ break;
+ case R6_SWM16:
+ {
+ int swm_converted = 0x11 + extract32(ctx->opcode, 8, 2);
+ int offset = extract32(ctx->opcode, 4, 4);
+ gen_ldst_multiple(ctx, SWM32, swm_converted, 29, offset << 2);
+ }
+ break;
+ case JALRC16: /* BREAK16, SDBBP16 */
+ switch (ctx->opcode & 0x3f) {
+ case JALRC16:
+ case JALRC16 + 0x20:
+ /* JALRC16 */
+ gen_compute_branch(ctx, OPC_JALR, 2, (ctx->opcode >> 5) & 0x1f,
+ 31, 0, 0);
+ break;
+ case R6_BREAK16:
+ /* BREAK16 */
+ generate_exception(ctx, EXCP_BREAK);
+ break;
+ case R6_SDBBP16:
+ /* SDBBP16 */
+ if (is_uhi(extract32(ctx->opcode, 6, 4))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ generate_exception(ctx, EXCP_RI);
+ } else {
+ generate_exception(ctx, EXCP_DBp);
+ }
+ }
+ break;
+ }
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd,
+ int base, int16_t offset)
+{
+ TCGv t0, t1;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31) {
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ switch (opc) {
+ case LWP:
+ if (rd == base) {
+ gen_reserved_instruction(ctx);
+ return;
+ }
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t1, rd);
+ tcg_gen_movi_tl(t1, 4);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t1, rd + 1);
+ break;
+ case SWP:
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_movi_tl(t1, 4);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ gen_load_gpr(t1, rd + 1);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ break;
+#ifdef TARGET_MIPS64
+ case LDP:
+ if (rd == base) {
+ gen_reserved_instruction(ctx);
+ return;
+ }
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t1, rd);
+ tcg_gen_movi_tl(t1, 8);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t1, rd + 1);
+ break;
+ case SDP:
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_movi_tl(t1, 8);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ gen_load_gpr(t1, rd + 1);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_pool32axf(CPUMIPSState *env, DisasContext *ctx, int rt, int rs)
+{
+ int extension = (ctx->opcode >> 6) & 0x3f;
+ int minor = (ctx->opcode >> 12) & 0xf;
+ uint32_t mips32_op;
+
+ switch (extension) {
+ case TEQ:
+ mips32_op = OPC_TEQ;
+ goto do_trap;
+ case TGE:
+ mips32_op = OPC_TGE;
+ goto do_trap;
+ case TGEU:
+ mips32_op = OPC_TGEU;
+ goto do_trap;
+ case TLT:
+ mips32_op = OPC_TLT;
+ goto do_trap;
+ case TLTU:
+ mips32_op = OPC_TLTU;
+ goto do_trap;
+ case TNE:
+ mips32_op = OPC_TNE;
+ do_trap:
+ gen_trap(ctx, mips32_op, rs, rt, -1);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case MFC0:
+ case MFC0 + 32:
+ check_cp0_enabled(ctx);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ gen_mfc0(ctx, cpu_gpr[rt], rs, (ctx->opcode >> 11) & 0x7);
+ break;
+ case MTC0:
+ case MTC0 + 32:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7);
+ tcg_temp_free(t0);
+ }
+ break;
+#endif
+ case 0x2a:
+ switch (minor & 3) {
+ case MADD_ACC:
+ gen_muldiv(ctx, OPC_MADD, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MADDU_ACC:
+ gen_muldiv(ctx, OPC_MADDU, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MSUB_ACC:
+ gen_muldiv(ctx, OPC_MSUB, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MSUBU_ACC:
+ gen_muldiv(ctx, OPC_MSUBU, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x32:
+ switch (minor & 3) {
+ case MULT_ACC:
+ gen_muldiv(ctx, OPC_MULT, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MULTU_ACC:
+ gen_muldiv(ctx, OPC_MULTU, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x2c:
+ switch (minor) {
+ case BITSWAP:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_bitswap(ctx, OPC_BITSWAP, rs, rt);
+ break;
+ case SEB:
+ gen_bshfl(ctx, OPC_SEB, rs, rt);
+ break;
+ case SEH:
+ gen_bshfl(ctx, OPC_SEH, rs, rt);
+ break;
+ case CLO:
+ mips32_op = OPC_CLO;
+ goto do_cl;
+ case CLZ:
+ mips32_op = OPC_CLZ;
+ do_cl:
+ check_insn(ctx, ISA_MIPS_R1);
+ gen_cl(ctx, mips32_op, rt, rs);
+ break;
+ case RDHWR:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_rdhwr(ctx, rt, rs, 0);
+ break;
+ case WSBH:
+ gen_bshfl(ctx, OPC_WSBH, rs, rt);
+ break;
+ case MULT:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MULT;
+ goto do_mul;
+ case MULTU:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MULTU;
+ goto do_mul;
+ case DIV:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_DIV;
+ goto do_div;
+ case DIVU:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_DIVU;
+ goto do_div;
+ do_div:
+ check_insn(ctx, ISA_MIPS_R1);
+ gen_muldiv(ctx, mips32_op, 0, rs, rt);
+ break;
+ case MADD:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MADD;
+ goto do_mul;
+ case MADDU:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MADDU;
+ goto do_mul;
+ case MSUB:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MSUB;
+ goto do_mul;
+ case MSUBU:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MSUBU;
+ do_mul:
+ check_insn(ctx, ISA_MIPS_R1);
+ gen_muldiv(ctx, mips32_op, 0, rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x34:
+ switch (minor) {
+ case MFC2:
+ case MTC2:
+ case MFHC2:
+ case MTHC2:
+ case CFC2:
+ case CTC2:
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x3c:
+ switch (minor) {
+ case JALR: /* JALRC */
+ case JALR_HB: /* JALRC_HB */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* JALRC, JALRC_HB */
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 0);
+ } else {
+ /* JALR, JALR_HB */
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ }
+ break;
+ case JALRS:
+ case JALRS_HB:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x05:
+ switch (minor) {
+ case RDPGPR:
+ check_cp0_enabled(ctx);
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_load_srsgpr(rs, rt);
+ break;
+ case WRPGPR:
+ check_cp0_enabled(ctx);
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_store_srsgpr(rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0x0d:
+ switch (minor) {
+ case TLBP:
+ mips32_op = OPC_TLBP;
+ goto do_cp0;
+ case TLBR:
+ mips32_op = OPC_TLBR;
+ goto do_cp0;
+ case TLBWI:
+ mips32_op = OPC_TLBWI;
+ goto do_cp0;
+ case TLBWR:
+ mips32_op = OPC_TLBWR;
+ goto do_cp0;
+ case TLBINV:
+ mips32_op = OPC_TLBINV;
+ goto do_cp0;
+ case TLBINVF:
+ mips32_op = OPC_TLBINVF;
+ goto do_cp0;
+ case WAIT:
+ mips32_op = OPC_WAIT;
+ goto do_cp0;
+ case DERET:
+ mips32_op = OPC_DERET;
+ goto do_cp0;
+ case ERET:
+ mips32_op = OPC_ERET;
+ do_cp0:
+ gen_cp0(env, ctx, mips32_op, rt, rs);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x1d:
+ switch (minor) {
+ case DI:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ save_cpu_state(ctx, 1);
+ gen_helper_di(t0, cpu_env);
+ gen_store_gpr(t0, rs);
+ /*
+ * Stop translation as we may have switched the execution
+ * mode.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ tcg_temp_free(t0);
+ }
+ break;
+ case EI:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ save_cpu_state(ctx, 1);
+ gen_helper_ei(t0, cpu_env);
+ gen_store_gpr(t0, rs);
+ /*
+ * DISAS_STOP isn't sufficient, we need to ensure we break out
+ * of translated code to check for pending interrupts.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ tcg_temp_free(t0);
+ }
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+#endif
+ case 0x2d:
+ switch (minor) {
+ case SYNC:
+ gen_sync(extract32(ctx->opcode, 16, 5));
+ break;
+ case SYSCALL:
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ break;
+ case SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 16, 10))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ check_insn(ctx, ISA_MIPS_R1);
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ gen_reserved_instruction(ctx);
+ } else {
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ }
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x01:
+ switch (minor & 3) {
+ case MFHI_ACC:
+ gen_HILO(ctx, OPC_MFHI, minor >> 2, rs);
+ break;
+ case MFLO_ACC:
+ gen_HILO(ctx, OPC_MFLO, minor >> 2, rs);
+ break;
+ case MTHI_ACC:
+ gen_HILO(ctx, OPC_MTHI, minor >> 2, rs);
+ break;
+ case MTLO_ACC:
+ gen_HILO(ctx, OPC_MTLO, minor >> 2, rs);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x35:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ switch (minor) {
+ case MFHI32:
+ gen_HILO(ctx, OPC_MFHI, 0, rs);
+ break;
+ case MFLO32:
+ gen_HILO(ctx, OPC_MFLO, 0, rs);
+ break;
+ case MTHI32:
+ gen_HILO(ctx, OPC_MTHI, 0, rs);
+ break;
+ case MTLO32:
+ gen_HILO(ctx, OPC_MTLO, 0, rs);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ default:
+ pool32axf_invalid:
+ MIPS_INVAL("pool32axf");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void gen_pool32fxf(DisasContext *ctx, int rt, int rs)
+{
+ int extension = (ctx->opcode >> 6) & 0x3ff;
+ uint32_t mips32_op;
+
+#define FLOAT_1BIT_FMT(opc, fmt) ((fmt << 8) | opc)
+#define FLOAT_2BIT_FMT(opc, fmt) ((fmt << 7) | opc)
+#define COND_FLOAT_MOV(opc, cond) ((cond << 7) | opc)
+
+ switch (extension) {
+ case FLOAT_1BIT_FMT(CFC1, 0):
+ mips32_op = OPC_CFC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(CTC1, 0):
+ mips32_op = OPC_CTC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MFC1, 0):
+ mips32_op = OPC_MFC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MTC1, 0):
+ mips32_op = OPC_MTC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MFHC1, 0):
+ mips32_op = OPC_MFHC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MTHC1, 0):
+ mips32_op = OPC_MTHC1;
+ do_cp1:
+ gen_cp1(ctx, mips32_op, rt, rs);
+ break;
+
+ /* Reciprocal square root */
+ case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_S):
+ mips32_op = OPC_RSQRT_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_D):
+ mips32_op = OPC_RSQRT_D;
+ goto do_unaryfp;
+
+ /* Square root */
+ case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_S):
+ mips32_op = OPC_SQRT_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_D):
+ mips32_op = OPC_SQRT_D;
+ goto do_unaryfp;
+
+ /* Reciprocal */
+ case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_S):
+ mips32_op = OPC_RECIP_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_D):
+ mips32_op = OPC_RECIP_D;
+ goto do_unaryfp;
+
+ /* Floor */
+ case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_S):
+ mips32_op = OPC_FLOOR_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_D):
+ mips32_op = OPC_FLOOR_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_S):
+ mips32_op = OPC_FLOOR_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_D):
+ mips32_op = OPC_FLOOR_W_D;
+ goto do_unaryfp;
+
+ /* Ceiling */
+ case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_S):
+ mips32_op = OPC_CEIL_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_D):
+ mips32_op = OPC_CEIL_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_S):
+ mips32_op = OPC_CEIL_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_D):
+ mips32_op = OPC_CEIL_W_D;
+ goto do_unaryfp;
+
+ /* Truncation */
+ case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_S):
+ mips32_op = OPC_TRUNC_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_D):
+ mips32_op = OPC_TRUNC_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_S):
+ mips32_op = OPC_TRUNC_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_D):
+ mips32_op = OPC_TRUNC_W_D;
+ goto do_unaryfp;
+
+ /* Round */
+ case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_S):
+ mips32_op = OPC_ROUND_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_D):
+ mips32_op = OPC_ROUND_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_S):
+ mips32_op = OPC_ROUND_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_D):
+ mips32_op = OPC_ROUND_W_D;
+ goto do_unaryfp;
+
+ /* Integer to floating-point conversion */
+ case FLOAT_1BIT_FMT(CVT_L, FMT_SD_S):
+ mips32_op = OPC_CVT_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_L, FMT_SD_D):
+ mips32_op = OPC_CVT_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_W, FMT_SD_S):
+ mips32_op = OPC_CVT_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_W, FMT_SD_D):
+ mips32_op = OPC_CVT_W_D;
+ goto do_unaryfp;
+
+ /* Paired-foo conversions */
+ case FLOAT_1BIT_FMT(CVT_S_PL, 0):
+ mips32_op = OPC_CVT_S_PL;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_S_PU, 0):
+ mips32_op = OPC_CVT_S_PU;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_PW_PS, 0):
+ mips32_op = OPC_CVT_PW_PS;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_PS_PW, 0):
+ mips32_op = OPC_CVT_PS_PW;
+ goto do_unaryfp;
+
+ /* Floating-point moves */
+ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_S):
+ mips32_op = OPC_MOV_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_D):
+ mips32_op = OPC_MOV_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_MOV_PS;
+ goto do_unaryfp;
+
+ /* Absolute value */
+ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_S):
+ mips32_op = OPC_ABS_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_D):
+ mips32_op = OPC_ABS_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_ABS_PS;
+ goto do_unaryfp;
+
+ /* Negation */
+ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_S):
+ mips32_op = OPC_NEG_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_D):
+ mips32_op = OPC_NEG_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_NEG_PS;
+ goto do_unaryfp;
+
+ /* Reciprocal square root step */
+ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_S):
+ mips32_op = OPC_RSQRT1_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_D):
+ mips32_op = OPC_RSQRT1_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_RSQRT1_PS;
+ goto do_unaryfp;
+
+ /* Reciprocal step */
+ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_S):
+ mips32_op = OPC_RECIP1_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_D):
+ mips32_op = OPC_RECIP1_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_RECIP1_PS;
+ goto do_unaryfp;
+
+ /* Conversions from double */
+ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_S):
+ mips32_op = OPC_CVT_D_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_W):
+ mips32_op = OPC_CVT_D_W;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_L):
+ mips32_op = OPC_CVT_D_L;
+ goto do_unaryfp;
+
+ /* Conversions from single */
+ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_D):
+ mips32_op = OPC_CVT_S_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_W):
+ mips32_op = OPC_CVT_S_W;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_L):
+ mips32_op = OPC_CVT_S_L;
+ do_unaryfp:
+ gen_farith(ctx, mips32_op, -1, rs, rt, 0);
+ break;
+
+ /* Conditional moves on floating-point codes */
+ case COND_FLOAT_MOV(MOVT, 0):
+ case COND_FLOAT_MOV(MOVT, 1):
+ case COND_FLOAT_MOV(MOVT, 2):
+ case COND_FLOAT_MOV(MOVT, 3):
+ case COND_FLOAT_MOV(MOVT, 4):
+ case COND_FLOAT_MOV(MOVT, 5):
+ case COND_FLOAT_MOV(MOVT, 6):
+ case COND_FLOAT_MOV(MOVT, 7):
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 1);
+ break;
+ case COND_FLOAT_MOV(MOVF, 0):
+ case COND_FLOAT_MOV(MOVF, 1):
+ case COND_FLOAT_MOV(MOVF, 2):
+ case COND_FLOAT_MOV(MOVF, 3):
+ case COND_FLOAT_MOV(MOVF, 4):
+ case COND_FLOAT_MOV(MOVF, 5):
+ case COND_FLOAT_MOV(MOVF, 6):
+ case COND_FLOAT_MOV(MOVF, 7):
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 0);
+ break;
+ default:
+ MIPS_INVAL("pool32fxf");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
+{
+ int32_t offset;
+ uint16_t insn;
+ int rt, rs, rd, rr;
+ int16_t imm;
+ uint32_t op, minor, minor2, mips32_op;
+ uint32_t cond, fmt, cc;
+
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
+ ctx->opcode = (ctx->opcode << 16) | insn;
+
+ rt = (ctx->opcode >> 21) & 0x1f;
+ rs = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ rr = (ctx->opcode >> 6) & 0x1f;
+ imm = (int16_t) ctx->opcode;
+
+ op = (ctx->opcode >> 26) & 0x3f;
+ switch (op) {
+ case POOL32A:
+ minor = ctx->opcode & 0x3f;
+ switch (minor) {
+ case 0x00:
+ minor = (ctx->opcode >> 6) & 0xf;
+ switch (minor) {
+ case SLL32:
+ mips32_op = OPC_SLL;
+ goto do_shifti;
+ case SRA:
+ mips32_op = OPC_SRA;
+ goto do_shifti;
+ case SRL32:
+ mips32_op = OPC_SRL;
+ goto do_shifti;
+ case ROTR:
+ mips32_op = OPC_ROTR;
+ do_shifti:
+ gen_shift_imm(ctx, mips32_op, rt, rs, rd);
+ break;
+ case SELEQZ:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_cond_move(ctx, OPC_SELEQZ, rd, rs, rt);
+ break;
+ case SELNEZ:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_cond_move(ctx, OPC_SELNEZ, rd, rs, rt);
+ break;
+ case R6_RDHWR:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3));
+ break;
+ default:
+ goto pool32a_invalid;
+ }
+ break;
+ case 0x10:
+ minor = (ctx->opcode >> 6) & 0xf;
+ switch (minor) {
+ /* Arithmetic */
+ case ADD:
+ mips32_op = OPC_ADD;
+ goto do_arith;
+ case ADDU32:
+ mips32_op = OPC_ADDU;
+ goto do_arith;
+ case SUB:
+ mips32_op = OPC_SUB;
+ goto do_arith;
+ case SUBU32:
+ mips32_op = OPC_SUBU;
+ goto do_arith;
+ case MUL:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MUL;
+ do_arith:
+ gen_arith(ctx, mips32_op, rd, rs, rt);
+ break;
+ /* Shifts */
+ case SLLV:
+ mips32_op = OPC_SLLV;
+ goto do_shift;
+ case SRLV:
+ mips32_op = OPC_SRLV;
+ goto do_shift;
+ case SRAV:
+ mips32_op = OPC_SRAV;
+ goto do_shift;
+ case ROTRV:
+ mips32_op = OPC_ROTRV;
+ do_shift:
+ gen_shift(ctx, mips32_op, rd, rs, rt);
+ break;
+ /* Logical operations */
+ case AND:
+ mips32_op = OPC_AND;
+ goto do_logic;
+ case OR32:
+ mips32_op = OPC_OR;
+ goto do_logic;
+ case NOR:
+ mips32_op = OPC_NOR;
+ goto do_logic;
+ case XOR32:
+ mips32_op = OPC_XOR;
+ do_logic:
+ gen_logic(ctx, mips32_op, rd, rs, rt);
+ break;
+ /* Set less than */
+ case SLT:
+ mips32_op = OPC_SLT;
+ goto do_slt;
+ case SLTU:
+ mips32_op = OPC_SLTU;
+ do_slt:
+ gen_slt(ctx, mips32_op, rd, rs, rt);
+ break;
+ default:
+ goto pool32a_invalid;
+ }
+ break;
+ case 0x18:
+ minor = (ctx->opcode >> 6) & 0xf;
+ switch (minor) {
+ /* Conditional moves */
+ case MOVN: /* MUL */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* MUL */
+ gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt);
+ } else {
+ /* MOVN */
+ gen_cond_move(ctx, OPC_MOVN, rd, rs, rt);
+ }
+ break;
+ case MOVZ: /* MUH */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* MUH */
+ gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt);
+ } else {
+ /* MOVZ */
+ gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt);
+ }
+ break;
+ case MULU:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt);
+ break;
+ case MUHU:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt);
+ break;
+ case LWXS: /* DIV */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* DIV */
+ gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt);
+ } else {
+ /* LWXS */
+ gen_ldxs(ctx, rs, rt, rd);
+ }
+ break;
+ case MOD:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt);
+ break;
+ case R6_DIVU:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt);
+ break;
+ case MODU:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt);
+ break;
+ default:
+ goto pool32a_invalid;
+ }
+ break;
+ case INS:
+ gen_bitops(ctx, OPC_INS, rt, rs, rr, rd);
+ return;
+ case LSA:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2));
+ break;
+ case ALIGN:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_align(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 9, 2));
+ break;
+ case EXT:
+ gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd);
+ return;
+ case POOL32AXF:
+ gen_pool32axf(env, ctx, rt, rs);
+ break;
+ case BREAK32:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case SIGRIE:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_reserved_instruction(ctx);
+ break;
+ default:
+ pool32a_invalid:
+ MIPS_INVAL("pool32a");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case POOL32B:
+ minor = (ctx->opcode >> 12) & 0xf;
+ switch (minor) {
+ case CACHE:
+ check_cp0_enabled(ctx);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ break;
+ case LWC2:
+ case SWC2:
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ break;
+#ifdef TARGET_MIPS64
+ case LDP:
+ case SDP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+#endif
+ /* fall through */
+ case LWP:
+ case SWP:
+ gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12));
+ break;
+#ifdef TARGET_MIPS64
+ case LDM:
+ case SDM:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+#endif
+ /* fall through */
+ case LWM32:
+ case SWM32:
+ gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12));
+ break;
+ default:
+ MIPS_INVAL("pool32b");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case POOL32F:
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ minor = ctx->opcode & 0x3f;
+ check_cp1_enabled(ctx);
+ switch (minor) {
+ case ALNV_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_ALNV_PS;
+ goto do_madd;
+ case MADD_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MADD_S;
+ goto do_madd;
+ case MADD_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MADD_D;
+ goto do_madd;
+ case MADD_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MADD_PS;
+ goto do_madd;
+ case MSUB_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MSUB_S;
+ goto do_madd;
+ case MSUB_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MSUB_D;
+ goto do_madd;
+ case MSUB_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_MSUB_PS;
+ goto do_madd;
+ case NMADD_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_NMADD_S;
+ goto do_madd;
+ case NMADD_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_NMADD_D;
+ goto do_madd;
+ case NMADD_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_NMADD_PS;
+ goto do_madd;
+ case NMSUB_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_NMSUB_S;
+ goto do_madd;
+ case NMSUB_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_NMSUB_D;
+ goto do_madd;
+ case NMSUB_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_NMSUB_PS;
+ do_madd:
+ gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt);
+ break;
+ case CABS_COND_FMT:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ cond = (ctx->opcode >> 6) & 0xf;
+ cc = (ctx->opcode >> 13) & 0x7;
+ fmt = (ctx->opcode >> 10) & 0x3;
+ switch (fmt) {
+ case 0x0:
+ gen_cmpabs_s(ctx, cond, rt, rs, cc);
+ break;
+ case 0x1:
+ gen_cmpabs_d(ctx, cond, rt, rs, cc);
+ break;
+ case 0x2:
+ gen_cmpabs_ps(ctx, cond, rt, rs, cc);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case C_COND_FMT:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ cond = (ctx->opcode >> 6) & 0xf;
+ cc = (ctx->opcode >> 13) & 0x7;
+ fmt = (ctx->opcode >> 10) & 0x3;
+ switch (fmt) {
+ case 0x0:
+ gen_cmp_s(ctx, cond, rt, rs, cc);
+ break;
+ case 0x1:
+ gen_cmp_d(ctx, cond, rt, rs, cc);
+ break;
+ case 0x2:
+ gen_cmp_ps(ctx, cond, rt, rs, cc);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case CMP_CONDN_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_cmp_s(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd);
+ break;
+ case CMP_CONDN_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_r6_cmp_d(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd);
+ break;
+ case POOL32FXF:
+ gen_pool32fxf(ctx, rt, rs);
+ break;
+ case 0x00:
+ /* PLL foo */
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case PLL_PS:
+ mips32_op = OPC_PLL_PS;
+ goto do_ps;
+ case PLU_PS:
+ mips32_op = OPC_PLU_PS;
+ goto do_ps;
+ case PUL_PS:
+ mips32_op = OPC_PUL_PS;
+ goto do_ps;
+ case PUU_PS:
+ mips32_op = OPC_PUU_PS;
+ goto do_ps;
+ case CVT_PS_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_CVT_PS_S;
+ do_ps:
+ gen_farith(ctx, mips32_op, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MIN_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x08:
+ /* [LS][WDU]XC1 */
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case LWXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LWXC1;
+ goto do_ldst_cp1;
+ case SWXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SWXC1;
+ goto do_ldst_cp1;
+ case LDXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LDXC1;
+ goto do_ldst_cp1;
+ case SDXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SDXC1;
+ goto do_ldst_cp1;
+ case LUXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LUXC1;
+ goto do_ldst_cp1;
+ case SUXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SUXC1;
+ do_ldst_cp1:
+ gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MAX_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x18:
+ /* 3D insns */
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ fmt = (ctx->opcode >> 9) & 0x3;
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case RSQRT2_FMT:
+ switch (fmt) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_RSQRT2_S;
+ goto do_3d;
+ case FMT_SDPS_D:
+ mips32_op = OPC_RSQRT2_D;
+ goto do_3d;
+ case FMT_SDPS_PS:
+ mips32_op = OPC_RSQRT2_PS;
+ goto do_3d;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case RECIP2_FMT:
+ switch (fmt) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_RECIP2_S;
+ goto do_3d;
+ case FMT_SDPS_D:
+ mips32_op = OPC_RECIP2_D;
+ goto do_3d;
+ case FMT_SDPS_PS:
+ mips32_op = OPC_RECIP2_PS;
+ goto do_3d;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case ADDR_PS:
+ mips32_op = OPC_ADDR_PS;
+ goto do_3d;
+ case MULR_PS:
+ mips32_op = OPC_MULR_PS;
+ do_3d:
+ gen_farith(ctx, mips32_op, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x20:
+ /* MOV[FT].fmt, PREFX, RINT.fmt, CLASS.fmt*/
+ cc = (ctx->opcode >> 13) & 0x7;
+ fmt = (ctx->opcode >> 9) & 0x3;
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case MOVF_FMT: /* RINT_FMT */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* RINT_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVF_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_movcf_s(ctx, rs, rt, cc, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_movcf_d(ctx, rs, rt, cc, 0);
+ break;
+ case FMT_SDPS_PS:
+ check_ps(ctx);
+ gen_movcf_ps(ctx, rs, rt, cc, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ }
+ break;
+ case MOVT_FMT: /* CLASS_FMT */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* CLASS_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVT_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_movcf_s(ctx, rs, rt, cc, 1);
+ break;
+ case FMT_SDPS_D:
+ gen_movcf_d(ctx, rs, rt, cc, 1);
+ break;
+ case FMT_SDPS_PS:
+ check_ps(ctx);
+ gen_movcf_ps(ctx, rs, rt, cc, 1);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ }
+ break;
+ case PREFX:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+#define FINSN_3ARG_SDPS(prfx) \
+ switch ((ctx->opcode >> 8) & 0x3) { \
+ case FMT_SDPS_S: \
+ mips32_op = OPC_##prfx##_S; \
+ goto do_fpop; \
+ case FMT_SDPS_D: \
+ mips32_op = OPC_##prfx##_D; \
+ goto do_fpop; \
+ case FMT_SDPS_PS: \
+ check_ps(ctx); \
+ mips32_op = OPC_##prfx##_PS; \
+ goto do_fpop; \
+ default: \
+ goto pool32f_invalid; \
+ }
+ case MINA_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MAXA_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x30:
+ /* regular FP ops */
+ switch ((ctx->opcode >> 6) & 0x3) {
+ case ADD_FMT:
+ FINSN_3ARG_SDPS(ADD);
+ break;
+ case SUB_FMT:
+ FINSN_3ARG_SDPS(SUB);
+ break;
+ case MUL_FMT:
+ FINSN_3ARG_SDPS(MUL);
+ break;
+ case DIV_FMT:
+ fmt = (ctx->opcode >> 8) & 0x3;
+ if (fmt == 1) {
+ mips32_op = OPC_DIV_D;
+ } else if (fmt == 0) {
+ mips32_op = OPC_DIV_S;
+ } else {
+ goto pool32f_invalid;
+ }
+ goto do_fpop;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x38:
+ /* cmovs */
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case MOVN_FMT: /* SELEQZ_FMT */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* SELEQZ_FMT */
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs);
+ break;
+ case FMT_SDPS_D:
+ gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVN_FMT */
+ FINSN_3ARG_SDPS(MOVN);
+ }
+ break;
+ case MOVN_FMT_04:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ FINSN_3ARG_SDPS(MOVN);
+ break;
+ case MOVZ_FMT: /* SELNEZ_FMT */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* SELNEZ_FMT */
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs);
+ break;
+ case FMT_SDPS_D:
+ gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVZ_FMT */
+ FINSN_3ARG_SDPS(MOVZ);
+ }
+ break;
+ case MOVZ_FMT_05:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ FINSN_3ARG_SDPS(MOVZ);
+ break;
+ case SEL_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs);
+ break;
+ case FMT_SDPS_D:
+ gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MADDF_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_MADDF_S;
+ goto do_fpop;
+ case FMT_SDPS_D:
+ mips32_op = OPC_MADDF_D;
+ goto do_fpop;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MSUBF_FMT:
+ check_insn(ctx, ISA_MIPS_R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_MSUBF_S;
+ goto do_fpop;
+ case FMT_SDPS_D:
+ mips32_op = OPC_MSUBF_D;
+ goto do_fpop;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ do_fpop:
+ gen_farith(ctx, mips32_op, rt, rs, rd, 0);
+ break;
+ default:
+ pool32f_invalid:
+ MIPS_INVAL("pool32f");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ case POOL32I:
+ minor = (ctx->opcode >> 21) & 0x1f;
+ switch (minor) {
+ case BLTZ:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4);
+ break;
+ case BLTZAL:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BLTZALS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BGEZ:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4);
+ break;
+ case BGEZAL:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BGEZALS:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BLEZ:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4);
+ break;
+ case BGTZ:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4);
+ break;
+
+ /* Traps */
+ case TLTI: /* BC1EQZC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* BC1EQZC */
+ check_cp1_enabled(ctx);
+ gen_compute_branch1_r6(ctx, OPC_BC1EQZ, rs, imm << 1, 0);
+ } else {
+ /* TLTI */
+ mips32_op = OPC_TLTI;
+ goto do_trapi;
+ }
+ break;
+ case TGEI: /* BC1NEZC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* BC1NEZC */
+ check_cp1_enabled(ctx);
+ gen_compute_branch1_r6(ctx, OPC_BC1NEZ, rs, imm << 1, 0);
+ } else {
+ /* TGEI */
+ mips32_op = OPC_TGEI;
+ goto do_trapi;
+ }
+ break;
+ case TLTIU:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_TLTIU;
+ goto do_trapi;
+ case TGEIU:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_TGEIU;
+ goto do_trapi;
+ case TNEI: /* SYNCI */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* SYNCI */
+ /*
+ * Break the TB to be able to sync copied instructions
+ * immediately.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ } else {
+ /* TNEI */
+ mips32_op = OPC_TNEI;
+ goto do_trapi;
+ }
+ break;
+ case TEQI:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_TEQI;
+ do_trapi:
+ gen_trap(ctx, mips32_op, rs, -1, imm);
+ break;
+
+ case BNEZC:
+ case BEQZC:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ,
+ 4, rs, 0, imm << 1, 0);
+ /*
+ * Compact branches don't have a delay slot, so just let
+ * the normal delay slot handling take us to the branch
+ * target.
+ */
+ break;
+ case LUI:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_logic_imm(ctx, OPC_LUI, rs, 0, imm);
+ break;
+ case SYNCI:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ /*
+ * Break the TB to be able to sync copied instructions
+ * immediately.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case BC2F:
+ case BC2T:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ break;
+ case BC1F:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F;
+ goto do_cp1branch;
+ case BC1T:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T;
+ goto do_cp1branch;
+ case BC1ANY4F:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_BC1FANY4;
+ goto do_cp1mips3d;
+ case BC1ANY4T:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_BC1TANY4;
+ do_cp1mips3d:
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ /* Fall through */
+ do_cp1branch:
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ gen_compute_branch1(ctx, mips32_op,
+ (ctx->opcode >> 18) & 0x7, imm << 1);
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ default:
+ MIPS_INVAL("pool32i");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case POOL32C:
+ minor = (ctx->opcode >> 12) & 0xf;
+ offset = sextract32(ctx->opcode, 0,
+ (ctx->insn_flags & ISA_MIPS_R6) ? 9 : 12);
+ switch (minor) {
+ case LWL:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LWL;
+ goto do_ld_lr;
+ case SWL:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SWL;
+ goto do_st_lr;
+ case LWR:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LWR;
+ goto do_ld_lr;
+ case SWR:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SWR;
+ goto do_st_lr;
+#if defined(TARGET_MIPS64)
+ case LDL:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LDL;
+ goto do_ld_lr;
+ case SDL:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SDL;
+ goto do_st_lr;
+ case LDR:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LDR;
+ goto do_ld_lr;
+ case SDR:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SDR;
+ goto do_st_lr;
+ case LWU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_LWU;
+ goto do_ld_lr;
+ case LLD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_LLD;
+ goto do_ld_lr;
+#endif
+ case LL:
+ mips32_op = OPC_LL;
+ goto do_ld_lr;
+ do_ld_lr:
+ gen_ld(ctx, mips32_op, rt, rs, offset);
+ break;
+ do_st_lr:
+ gen_st(ctx, mips32_op, rt, rs, offset);
+ break;
+ case SC:
+ gen_st_cond(ctx, rt, rs, offset, MO_TESL, false);
+ break;
+#if defined(TARGET_MIPS64)
+ case SCD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st_cond(ctx, rt, rs, offset, MO_TEQ, false);
+ break;
+#endif
+ case LD_EVA:
+ if (!ctx->eva) {
+ MIPS_INVAL("pool32c ld-eva");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ check_cp0_enabled(ctx);
+
+ minor2 = (ctx->opcode >> 9) & 0x7;
+ offset = sextract32(ctx->opcode, 0, 9);
+ switch (minor2) {
+ case LBUE:
+ mips32_op = OPC_LBUE;
+ goto do_ld_lr;
+ case LHUE:
+ mips32_op = OPC_LHUE;
+ goto do_ld_lr;
+ case LWLE:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LWLE;
+ goto do_ld_lr;
+ case LWRE:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_LWRE;
+ goto do_ld_lr;
+ case LBE:
+ mips32_op = OPC_LBE;
+ goto do_ld_lr;
+ case LHE:
+ mips32_op = OPC_LHE;
+ goto do_ld_lr;
+ case LLE:
+ mips32_op = OPC_LLE;
+ goto do_ld_lr;
+ case LWE:
+ mips32_op = OPC_LWE;
+ goto do_ld_lr;
+ };
+ break;
+ case ST_EVA:
+ if (!ctx->eva) {
+ MIPS_INVAL("pool32c st-eva");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ check_cp0_enabled(ctx);
+
+ minor2 = (ctx->opcode >> 9) & 0x7;
+ offset = sextract32(ctx->opcode, 0, 9);
+ switch (minor2) {
+ case SWLE:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SWLE;
+ goto do_st_lr;
+ case SWRE:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ mips32_op = OPC_SWRE;
+ goto do_st_lr;
+ case PREFE:
+ /* Treat as no-op */
+ if ((ctx->insn_flags & ISA_MIPS_R6) && (rt >= 24)) {
+ /* hint codes 24-31 are reserved and signal RI */
+ generate_exception(ctx, EXCP_RI);
+ }
+ break;
+ case CACHEE:
+ /* Treat as no-op */
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, offset);
+ }
+ break;
+ case SBE:
+ mips32_op = OPC_SBE;
+ goto do_st_lr;
+ case SHE:
+ mips32_op = OPC_SHE;
+ goto do_st_lr;
+ case SCE:
+ gen_st_cond(ctx, rt, rs, offset, MO_TESL, true);
+ break;
+ case SWE:
+ mips32_op = OPC_SWE;
+ goto do_st_lr;
+ };
+ break;
+ case PREF:
+ /* Treat as no-op */
+ if ((ctx->insn_flags & ISA_MIPS_R6) && (rt >= 24)) {
+ /* hint codes 24-31 are reserved and signal RI */
+ generate_exception(ctx, EXCP_RI);
+ }
+ break;
+ default:
+ MIPS_INVAL("pool32c");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case ADDI32: /* AUI, LUI */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* AUI, LUI */
+ gen_logic_imm(ctx, OPC_LUI, rt, rs, imm);
+ } else {
+ /* ADDI32 */
+ mips32_op = OPC_ADDI;
+ goto do_addi;
+ }
+ break;
+ case ADDIU32:
+ mips32_op = OPC_ADDIU;
+ do_addi:
+ gen_arith_imm(ctx, mips32_op, rt, rs, imm);
+ break;
+
+ /* Logical operations */
+ case ORI32:
+ mips32_op = OPC_ORI;
+ goto do_logici;
+ case XORI32:
+ mips32_op = OPC_XORI;
+ goto do_logici;
+ case ANDI32:
+ mips32_op = OPC_ANDI;
+ do_logici:
+ gen_logic_imm(ctx, mips32_op, rt, rs, imm);
+ break;
+
+ /* Set less than immediate */
+ case SLTI32:
+ mips32_op = OPC_SLTI;
+ goto do_slti;
+ case SLTIU32:
+ mips32_op = OPC_SLTIU;
+ do_slti:
+ gen_slt_imm(ctx, mips32_op, rt, rs, imm);
+ break;
+ case JALX32:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case JALS32: /* BOVC, BEQC, BEQZALC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rs >= rt) {
+ /* BOVC */
+ mips32_op = OPC_BOVC;
+ } else if (rs < rt && rs == 0) {
+ /* BEQZALC */
+ mips32_op = OPC_BEQZALC;
+ } else {
+ /* BEQC */
+ mips32_op = OPC_BEQC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ } else {
+ /* JALS32 */
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1;
+ gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ }
+ break;
+ case BEQ32: /* BC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* BC */
+ gen_compute_compact_branch(ctx, OPC_BC, 0, 0,
+ sextract32(ctx->opcode << 1, 0, 27));
+ } else {
+ /* BEQ32 */
+ gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4);
+ }
+ break;
+ case BNE32: /* BALC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* BALC */
+ gen_compute_compact_branch(ctx, OPC_BALC, 0, 0,
+ sextract32(ctx->opcode << 1, 0, 27));
+ } else {
+ /* BNE32 */
+ gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4);
+ }
+ break;
+ case J32: /* BGTZC, BLTZC, BLTC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rs == 0 && rt != 0) {
+ /* BGTZC */
+ mips32_op = OPC_BGTZC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BLTZC */
+ mips32_op = OPC_BLTZC;
+ } else {
+ /* BLTC */
+ mips32_op = OPC_BLTC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ } else {
+ /* J32 */
+ gen_compute_branch(ctx, OPC_J, 4, rt, rs,
+ (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4);
+ }
+ break;
+ case JAL32: /* BLEZC, BGEZC, BGEC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rs == 0 && rt != 0) {
+ /* BLEZC */
+ mips32_op = OPC_BLEZC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BGEZC */
+ mips32_op = OPC_BGEZC;
+ } else {
+ /* BGEC */
+ mips32_op = OPC_BGEC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ } else {
+ /* JAL32 */
+ gen_compute_branch(ctx, OPC_JAL, 4, rt, rs,
+ (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ }
+ break;
+ /* Floating point (COP1) */
+ case LWC132:
+ mips32_op = OPC_LWC1;
+ goto do_cop1;
+ case LDC132:
+ mips32_op = OPC_LDC1;
+ goto do_cop1;
+ case SWC132:
+ mips32_op = OPC_SWC1;
+ goto do_cop1;
+ case SDC132:
+ mips32_op = OPC_SDC1;
+ do_cop1:
+ gen_cop1_ldst(ctx, mips32_op, rt, rs, imm);
+ break;
+ case ADDIUPC: /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */
+ switch ((ctx->opcode >> 16) & 0x1f) {
+ case ADDIUPC_00:
+ case ADDIUPC_01:
+ case ADDIUPC_02:
+ case ADDIUPC_03:
+ case ADDIUPC_04:
+ case ADDIUPC_05:
+ case ADDIUPC_06:
+ case ADDIUPC_07:
+ gen_pcrel(ctx, OPC_ADDIUPC, ctx->base.pc_next & ~0x3, rt);
+ break;
+ case AUIPC:
+ gen_pcrel(ctx, OPC_AUIPC, ctx->base.pc_next, rt);
+ break;
+ case ALUIPC:
+ gen_pcrel(ctx, OPC_ALUIPC, ctx->base.pc_next, rt);
+ break;
+ case LWPC_08:
+ case LWPC_09:
+ case LWPC_0A:
+ case LWPC_0B:
+ case LWPC_0C:
+ case LWPC_0D:
+ case LWPC_0E:
+ case LWPC_0F:
+ gen_pcrel(ctx, R6_OPC_LWPC, ctx->base.pc_next & ~0x3, rt);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ } else {
+ /* ADDIUPC */
+ int reg = mmreg(ZIMM(ctx->opcode, 23, 3));
+ offset = SIMM(ctx->opcode, 0, 23) << 2;
+
+ gen_addiupc(ctx, reg, offset, 0, 0);
+ }
+ break;
+ case BNVC: /* BNEC, BNEZALC */
+ check_insn(ctx, ISA_MIPS_R6);
+ if (rs >= rt) {
+ /* BNVC */
+ mips32_op = OPC_BNVC;
+ } else if (rs < rt && rs == 0) {
+ /* BNEZALC */
+ mips32_op = OPC_BNEZALC;
+ } else {
+ /* BNEC */
+ mips32_op = OPC_BNEC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ break;
+ case R6_BNEZC: /* JIALC */
+ check_insn(ctx, ISA_MIPS_R6);
+ if (rt != 0) {
+ /* BNEZC */
+ gen_compute_compact_branch(ctx, OPC_BNEZC, rt, 0,
+ sextract32(ctx->opcode << 1, 0, 22));
+ } else {
+ /* JIALC */
+ gen_compute_compact_branch(ctx, OPC_JIALC, 0, rs, imm);
+ }
+ break;
+ case R6_BEQZC: /* JIC */
+ check_insn(ctx, ISA_MIPS_R6);
+ if (rt != 0) {
+ /* BEQZC */
+ gen_compute_compact_branch(ctx, OPC_BEQZC, rt, 0,
+ sextract32(ctx->opcode << 1, 0, 22));
+ } else {
+ /* JIC */
+ gen_compute_compact_branch(ctx, OPC_JIC, 0, rs, imm);
+ }
+ break;
+ case BLEZALC: /* BGEZALC, BGEUC */
+ check_insn(ctx, ISA_MIPS_R6);
+ if (rs == 0 && rt != 0) {
+ /* BLEZALC */
+ mips32_op = OPC_BLEZALC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BGEZALC */
+ mips32_op = OPC_BGEZALC;
+ } else {
+ /* BGEUC */
+ mips32_op = OPC_BGEUC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ break;
+ case BGTZALC: /* BLTZALC, BLTUC */
+ check_insn(ctx, ISA_MIPS_R6);
+ if (rs == 0 && rt != 0) {
+ /* BGTZALC */
+ mips32_op = OPC_BGTZALC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BLTZALC */
+ mips32_op = OPC_BLTZALC;
+ } else {
+ /* BLTUC */
+ mips32_op = OPC_BLTUC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ break;
+ /* Loads and stores */
+ case LB32:
+ mips32_op = OPC_LB;
+ goto do_ld;
+ case LBU32:
+ mips32_op = OPC_LBU;
+ goto do_ld;
+ case LH32:
+ mips32_op = OPC_LH;
+ goto do_ld;
+ case LHU32:
+ mips32_op = OPC_LHU;
+ goto do_ld;
+ case LW32:
+ mips32_op = OPC_LW;
+ goto do_ld;
+#ifdef TARGET_MIPS64
+ case LD32:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_LD;
+ goto do_ld;
+ case SD32:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_SD;
+ goto do_st;
+#endif
+ case SB32:
+ mips32_op = OPC_SB;
+ goto do_st;
+ case SH32:
+ mips32_op = OPC_SH;
+ goto do_st;
+ case SW32:
+ mips32_op = OPC_SW;
+ goto do_st;
+ do_ld:
+ gen_ld(ctx, mips32_op, rt, rs, imm);
+ break;
+ do_st:
+ gen_st(ctx, mips32_op, rt, rs, imm);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static int decode_isa_micromips(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t op;
+
+ /* make sure instructions are on a halfword boundary */
+ if (ctx->base.pc_next & 0x1) {
+ env->CP0_BadVAddr = ctx->base.pc_next;
+ generate_exception_end(ctx, EXCP_AdEL);
+ return 2;
+ }
+
+ op = (ctx->opcode >> 10) & 0x3f;
+ /* Enforce properly-sized instructions in a delay slot */
+ if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) {
+ switch (op & 0x7) { /* MSB-3..MSB-5 */
+ case 0:
+ /* POOL32A, POOL32B, POOL32I, POOL32C */
+ case 4:
+ /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */
+ case 5:
+ /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */
+ case 6:
+ /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */
+ case 7:
+ /* LB32, LH32, LWC132, LDC132, LW32 */
+ if (ctx->hflags & MIPS_HFLAG_BDS16) {
+ gen_reserved_instruction(ctx);
+ return 2;
+ }
+ break;
+ case 1:
+ /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */
+ case 2:
+ /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */
+ case 3:
+ /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */
+ if (ctx->hflags & MIPS_HFLAG_BDS32) {
+ gen_reserved_instruction(ctx);
+ return 2;
+ }
+ break;
+ }
+ }
+
+ switch (op) {
+ case POOL16A:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs1 = mmreg(uMIPS_RS1(ctx->opcode));
+ int rs2 = mmreg(uMIPS_RS2(ctx->opcode));
+ uint32_t opc = 0;
+
+ switch (ctx->opcode & 0x1) {
+ case ADDU16:
+ opc = OPC_ADDU;
+ break;
+ case SUBU16:
+ opc = OPC_SUBU;
+ break;
+ }
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /*
+ * In the Release 6, the register number location in
+ * the instruction encoding has changed.
+ */
+ gen_arith(ctx, opc, rs1, rd, rs2);
+ } else {
+ gen_arith(ctx, opc, rd, rs1, rs2);
+ }
+ }
+ break;
+ case POOL16B:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs = mmreg(uMIPS_RS(ctx->opcode));
+ int amount = (ctx->opcode >> 1) & 0x7;
+ uint32_t opc = 0;
+ amount = amount == 0 ? 8 : amount;
+
+ switch (ctx->opcode & 0x1) {
+ case SLL16:
+ opc = OPC_SLL;
+ break;
+ case SRL16:
+ opc = OPC_SRL;
+ break;
+ }
+
+ gen_shift_imm(ctx, opc, rd, rs, amount);
+ }
+ break;
+ case POOL16C:
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ gen_pool16c_r6_insn(ctx);
+ } else {
+ gen_pool16c_insn(ctx);
+ }
+ break;
+ case LWGP16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = 28; /* GP */
+ int16_t offset = SIMM(ctx->opcode, 0, 7) << 2;
+
+ gen_ld(ctx, OPC_LW, rd, rb, offset);
+ }
+ break;
+ case POOL16F:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ if (ctx->opcode & 1) {
+ gen_reserved_instruction(ctx);
+ } else {
+ /* MOVEP */
+ int enc_dest = uMIPS_RD(ctx->opcode);
+ int enc_rt = uMIPS_RS2(ctx->opcode);
+ int enc_rs = uMIPS_RS1(ctx->opcode);
+ gen_movep(ctx, enc_dest, enc_rt, enc_rs);
+ }
+ break;
+ case LBU16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4);
+ offset = (offset == 0xf ? -1 : offset);
+
+ gen_ld(ctx, OPC_LBU, rd, rb, offset);
+ }
+ break;
+ case LHU16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1;
+
+ gen_ld(ctx, OPC_LHU, rd, rb, offset);
+ }
+ break;
+ case LWSP16:
+ {
+ int rd = (ctx->opcode >> 5) & 0x1f;
+ int rb = 29; /* SP */
+ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2;
+
+ gen_ld(ctx, OPC_LW, rd, rb, offset);
+ }
+ break;
+ case LW16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2;
+
+ gen_ld(ctx, OPC_LW, rd, rb, offset);
+ }
+ break;
+ case SB16:
+ {
+ int rd = mmreg2(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4);
+
+ gen_st(ctx, OPC_SB, rd, rb, offset);
+ }
+ break;
+ case SH16:
+ {
+ int rd = mmreg2(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1;
+
+ gen_st(ctx, OPC_SH, rd, rb, offset);
+ }
+ break;
+ case SWSP16:
+ {
+ int rd = (ctx->opcode >> 5) & 0x1f;
+ int rb = 29; /* SP */
+ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2;
+
+ gen_st(ctx, OPC_SW, rd, rb, offset);
+ }
+ break;
+ case SW16:
+ {
+ int rd = mmreg2(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2;
+
+ gen_st(ctx, OPC_SW, rd, rb, offset);
+ }
+ break;
+ case MOVE16:
+ {
+ int rd = uMIPS_RD5(ctx->opcode);
+ int rs = uMIPS_RS5(ctx->opcode);
+
+ gen_arith(ctx, OPC_ADDU, rd, rs, 0);
+ }
+ break;
+ case ANDI16:
+ gen_andi16(ctx);
+ break;
+ case POOL16D:
+ switch (ctx->opcode & 0x1) {
+ case ADDIUS5:
+ gen_addius5(ctx);
+ break;
+ case ADDIUSP:
+ gen_addiusp(ctx);
+ break;
+ }
+ break;
+ case POOL16E:
+ switch (ctx->opcode & 0x1) {
+ case ADDIUR2:
+ gen_addiur2(ctx);
+ break;
+ case ADDIUR1SP:
+ gen_addiur1sp(ctx);
+ break;
+ }
+ break;
+ case B16: /* BC16 */
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0,
+ sextract32(ctx->opcode, 0, 10) << 1,
+ (ctx->insn_flags & ISA_MIPS_R6) ? 0 : 4);
+ break;
+ case BNEZ16: /* BNEZC16 */
+ case BEQZ16: /* BEQZC16 */
+ gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2,
+ mmreg(uMIPS_RD(ctx->opcode)),
+ 0, sextract32(ctx->opcode, 0, 7) << 1,
+ (ctx->insn_flags & ISA_MIPS_R6) ? 0 : 4);
+
+ break;
+ case LI16:
+ {
+ int reg = mmreg(uMIPS_RD(ctx->opcode));
+ int imm = ZIMM(ctx->opcode, 0, 7);
+
+ imm = (imm == 0x7f ? -1 : imm);
+ tcg_gen_movi_tl(cpu_gpr[reg], imm);
+ }
+ break;
+ case RES_29:
+ case RES_31:
+ case RES_39:
+ gen_reserved_instruction(ctx);
+ break;
+ default:
+ decode_micromips32_opc(env, ctx);
+ return 4;
+ }
+
+ return 2;
+}
diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc
new file mode 100644
index 000000000..84d816603
--- /dev/null
+++ b/target/mips/tcg/mips16e_translate.c.inc
@@ -0,0 +1,1123 @@
+/*
+ * MIPS16 extension (Code Compaction) ASE translation routines
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+/* MIPS16 major opcodes */
+enum {
+ M16_OPC_ADDIUSP = 0x00,
+ M16_OPC_ADDIUPC = 0x01,
+ M16_OPC_B = 0x02,
+ M16_OPC_JAL = 0x03,
+ M16_OPC_BEQZ = 0x04,
+ M16_OPC_BNEQZ = 0x05,
+ M16_OPC_SHIFT = 0x06,
+ M16_OPC_LD = 0x07,
+ M16_OPC_RRIA = 0x08,
+ M16_OPC_ADDIU8 = 0x09,
+ M16_OPC_SLTI = 0x0a,
+ M16_OPC_SLTIU = 0x0b,
+ M16_OPC_I8 = 0x0c,
+ M16_OPC_LI = 0x0d,
+ M16_OPC_CMPI = 0x0e,
+ M16_OPC_SD = 0x0f,
+ M16_OPC_LB = 0x10,
+ M16_OPC_LH = 0x11,
+ M16_OPC_LWSP = 0x12,
+ M16_OPC_LW = 0x13,
+ M16_OPC_LBU = 0x14,
+ M16_OPC_LHU = 0x15,
+ M16_OPC_LWPC = 0x16,
+ M16_OPC_LWU = 0x17,
+ M16_OPC_SB = 0x18,
+ M16_OPC_SH = 0x19,
+ M16_OPC_SWSP = 0x1a,
+ M16_OPC_SW = 0x1b,
+ M16_OPC_RRR = 0x1c,
+ M16_OPC_RR = 0x1d,
+ M16_OPC_EXTEND = 0x1e,
+ M16_OPC_I64 = 0x1f
+};
+
+/* I8 funct field */
+enum {
+ I8_BTEQZ = 0x0,
+ I8_BTNEZ = 0x1,
+ I8_SWRASP = 0x2,
+ I8_ADJSP = 0x3,
+ I8_SVRS = 0x4,
+ I8_MOV32R = 0x5,
+ I8_MOVR32 = 0x7
+};
+
+/* RRR f field */
+enum {
+ RRR_DADDU = 0x0,
+ RRR_ADDU = 0x1,
+ RRR_DSUBU = 0x2,
+ RRR_SUBU = 0x3
+};
+
+/* RR funct field */
+enum {
+ RR_JR = 0x00,
+ RR_SDBBP = 0x01,
+ RR_SLT = 0x02,
+ RR_SLTU = 0x03,
+ RR_SLLV = 0x04,
+ RR_BREAK = 0x05,
+ RR_SRLV = 0x06,
+ RR_SRAV = 0x07,
+ RR_DSRL = 0x08,
+ RR_CMP = 0x0a,
+ RR_NEG = 0x0b,
+ RR_AND = 0x0c,
+ RR_OR = 0x0d,
+ RR_XOR = 0x0e,
+ RR_NOT = 0x0f,
+ RR_MFHI = 0x10,
+ RR_CNVT = 0x11,
+ RR_MFLO = 0x12,
+ RR_DSRA = 0x13,
+ RR_DSLLV = 0x14,
+ RR_DSRLV = 0x16,
+ RR_DSRAV = 0x17,
+ RR_MULT = 0x18,
+ RR_MULTU = 0x19,
+ RR_DIV = 0x1a,
+ RR_DIVU = 0x1b,
+ RR_DMULT = 0x1c,
+ RR_DMULTU = 0x1d,
+ RR_DDIV = 0x1e,
+ RR_DDIVU = 0x1f
+};
+
+/* I64 funct field */
+enum {
+ I64_LDSP = 0x0,
+ I64_SDSP = 0x1,
+ I64_SDRASP = 0x2,
+ I64_DADJSP = 0x3,
+ I64_LDPC = 0x4,
+ I64_DADDIU5 = 0x5,
+ I64_DADDIUPC = 0x6,
+ I64_DADDIUSP = 0x7
+};
+
+/* RR ry field for CNVT */
+enum {
+ RR_RY_CNVT_ZEB = 0x0,
+ RR_RY_CNVT_ZEH = 0x1,
+ RR_RY_CNVT_ZEW = 0x2,
+ RR_RY_CNVT_SEB = 0x4,
+ RR_RY_CNVT_SEH = 0x5,
+ RR_RY_CNVT_SEW = 0x6,
+};
+
+static int xlat(int r)
+{
+ static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+static void gen_mips16_save(DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ int args, astatic;
+
+ switch (aregs) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 11:
+ args = 0;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ args = 1;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ args = 2;
+ break;
+ case 12:
+ case 13:
+ args = 3;
+ break;
+ case 14:
+ args = 4;
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ switch (args) {
+ case 4:
+ gen_base_offset_addr(ctx, t0, 29, 12);
+ gen_load_gpr(t1, 7);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ /* Fall through */
+ case 3:
+ gen_base_offset_addr(ctx, t0, 29, 8);
+ gen_load_gpr(t1, 6);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ /* Fall through */
+ case 2:
+ gen_base_offset_addr(ctx, t0, 29, 4);
+ gen_load_gpr(t1, 5);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ /* Fall through */
+ case 1:
+ gen_base_offset_addr(ctx, t0, 29, 0);
+ gen_load_gpr(t1, 4);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ }
+
+ gen_load_gpr(t0, 29);
+
+#define DECR_AND_STORE(reg) do { \
+ tcg_gen_movi_tl(t2, -4); \
+ gen_op_addr_add(ctx, t0, t0, t2); \
+ gen_load_gpr(t1, reg); \
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_STORE(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_STORE(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_STORE(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_STORE(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_STORE(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_STORE(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_STORE(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_STORE(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_STORE(17);
+ }
+ if (do_s0) {
+ DECR_AND_STORE(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_STORE(7);
+ if (astatic > 1) {
+ DECR_AND_STORE(6);
+ if (astatic > 2) {
+ DECR_AND_STORE(5);
+ if (astatic > 3) {
+ DECR_AND_STORE(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_STORE
+
+ tcg_gen_movi_tl(t2, -framesize);
+ gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_mips16_restore(DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ int astatic;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t2, framesize);
+ gen_op_addr_add(ctx, t0, cpu_gpr[29], t2);
+
+#define DECR_AND_LOAD(reg) do { \
+ tcg_gen_movi_tl(t2, -4); \
+ gen_op_addr_add(ctx, t0, t0, t2); \
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); \
+ gen_store_gpr(t1, reg); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_LOAD(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_LOAD(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_LOAD(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_LOAD(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_LOAD(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_LOAD(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_LOAD(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_LOAD(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_LOAD(17);
+ }
+ if (do_s0) {
+ DECR_AND_LOAD(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_LOAD(7);
+ if (astatic > 1) {
+ DECR_AND_LOAD(6);
+ if (astatic > 2) {
+ DECR_AND_LOAD(5);
+ if (astatic > 3) {
+ DECR_AND_LOAD(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_LOAD
+
+ tcg_gen_movi_tl(t2, framesize);
+ gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+#if defined(TARGET_MIPS64)
+static void decode_i64_mips16(DisasContext *ctx,
+ int ry, int funct, int16_t offset,
+ int extended)
+{
+ switch (funct) {
+ case I64_LDSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_ld(ctx, OPC_LD, ry, 29, offset);
+ break;
+ case I64_SDSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_st(ctx, OPC_SD, ry, 29, offset);
+ break;
+ case I64_SDRASP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : (ctx->opcode & 0xff) << 3;
+ gen_st(ctx, OPC_SD, 31, 29, offset);
+ break;
+ case I64_DADJSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)ctx->opcode) << 3;
+ gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset);
+ break;
+ case I64_LDPC:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ gen_reserved_instruction(ctx);
+ } else {
+ offset = extended ? offset : offset << 3;
+ gen_ld(ctx, OPC_LDPC, ry, 0, offset);
+ }
+ break;
+ case I64_DADDIU5:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)(offset << 3)) >> 3;
+ gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset);
+ break;
+ case I64_DADDIUPC:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_addiupc(ctx, ry, offset, 1, extended);
+ break;
+ case I64_DADDIUSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset);
+ break;
+ }
+}
+#endif
+
+static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx)
+{
+ int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
+ int op, rx, ry, funct, sa;
+ int16_t imm, offset;
+
+ ctx->opcode = (ctx->opcode << 16) | extend;
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 22) & 0x1f;
+ funct = (ctx->opcode >> 8) & 0x7;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11
+ | ((ctx->opcode >> 21) & 0x3f) << 5
+ | (ctx->opcode & 0x1f));
+
+ /*
+ * The extended opcodes cleverly reuse the opcodes from their 16-bit
+ * counterparts.
+ */
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm);
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, imm, 0, 1);
+ break;
+ case M16_OPC_B:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa);
+#else
+ gen_reserved_instruction(ctx);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ imm = ctx->opcode & 0xf;
+ imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4;
+ imm = imm | ((ctx->opcode >> 16) & 0xf) << 11;
+ imm = (int16_t) (imm << 1) >> 1;
+ if ((ctx->opcode >> 4) & 0x1) {
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ gen_reserved_instruction(ctx);
+#endif
+ } else {
+ gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm);
+ break;
+ case M16_OPC_SLTI:
+ gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm);
+ break;
+ case M16_OPC_SLTIU:
+ gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm);
+ break;
+ case M16_OPC_I8:
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0);
+ break;
+ case I8_SWRASP:
+ gen_st(ctx, OPC_SW, 31, 29, imm);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm);
+ break;
+ case I8_SVRS:
+ check_insn(ctx, ISA_MIPS_R1);
+ {
+ int xsregs = (ctx->opcode >> 24) & 0x7;
+ int aregs = (ctx->opcode >> 16) & 0xf;
+ int do_ra = (ctx->opcode >> 6) & 0x1;
+ int do_s0 = (ctx->opcode >> 5) & 0x1;
+ int do_s1 = (ctx->opcode >> 4) & 0x1;
+ int framesize = (((ctx->opcode >> 20) & 0xf) << 4
+ | (ctx->opcode & 0xf)) << 3;
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ } else {
+ gen_mips16_restore(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ }
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case M16_OPC_LI:
+ tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm);
+ break;
+ case M16_OPC_CMPI:
+ tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st(ctx, OPC_SD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ld(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ld(ctx, OPC_LH, ry, rx, offset);
+ break;
+ case M16_OPC_LWSP:
+ gen_ld(ctx, OPC_LW, rx, 29, offset);
+ break;
+ case M16_OPC_LW:
+ gen_ld(ctx, OPC_LW, ry, rx, offset);
+ break;
+ case M16_OPC_LBU:
+ gen_ld(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ld(ctx, OPC_LHU, ry, rx, offset);
+ break;
+ case M16_OPC_LWPC:
+ gen_ld(ctx, OPC_LWPC, rx, 0, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LWU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LWU, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_st(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_st(ctx, OPC_SH, ry, rx, offset);
+ break;
+ case M16_OPC_SWSP:
+ gen_st(ctx, OPC_SW, rx, 29, offset);
+ break;
+ case M16_OPC_SW:
+ gen_st(ctx, OPC_SW, ry, rx, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ decode_i64_mips16(ctx, ry, funct, offset, 1);
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ return 4;
+}
+
+static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rx, ry;
+ int sa;
+ int op, cnvt_op, op1, offset;
+ int funct;
+ int n_bytes;
+
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 2) & 0x7;
+ sa = sa == 0 ? 8 : sa;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ cnvt_op = (ctx->opcode >> 5) & 0x7;
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ op1 = offset = ctx->opcode & 0x1f;
+
+ n_bytes = 2;
+
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ {
+ int16_t imm = ((uint8_t) ctx->opcode) << 2;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm);
+ }
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0);
+ break;
+ case M16_OPC_B:
+ offset = (ctx->opcode & 0x7ff) << 1;
+ offset = (int16_t)(offset << 4) >> 4;
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_JAL:
+ offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
+ offset = (((ctx->opcode & 0x1f) << 21)
+ | ((ctx->opcode >> 5) & 0x1f) << 16
+ | offset) << 2;
+ op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL;
+ gen_compute_branch(ctx, op, 4, rx, ry, offset, 2);
+ n_bytes = 4;
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, rx, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa);
+#else
+ gen_reserved_instruction(ctx);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ {
+ int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4;
+
+ if ((ctx->opcode >> 4) & 1) {
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ gen_reserved_instruction(ctx);
+#endif
+ } else {
+ gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ {
+ int16_t imm = (int8_t) ctx->opcode;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+ gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTIU:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+ gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_I8:
+ {
+ int reg32;
+
+ funct = (ctx->opcode >> 8) & 0x7;
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ break;
+ case I8_SWRASP:
+ gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29,
+ ((int8_t)ctx->opcode) << 3);
+ break;
+ case I8_SVRS:
+ check_insn(ctx, ISA_MIPS_R1);
+ {
+ int do_ra = ctx->opcode & (1 << 6);
+ int do_s0 = ctx->opcode & (1 << 5);
+ int do_s1 = ctx->opcode & (1 << 4);
+ int framesize = ctx->opcode & 0xf;
+
+ if (framesize == 0) {
+ framesize = 128;
+ } else {
+ framesize = framesize << 3;
+ }
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ } else {
+ gen_mips16_restore(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ }
+ }
+ break;
+ case I8_MOV32R:
+ {
+ int rz = xlat(ctx->opcode & 0x7);
+
+ reg32 = (((ctx->opcode >> 3) & 0x3) << 3) |
+ ((ctx->opcode >> 5) & 0x7);
+ gen_arith(ctx, OPC_ADDU, reg32, rz, 0);
+ }
+ break;
+ case I8_MOVR32:
+ reg32 = ctx->opcode & 0x1f;
+ gen_arith(ctx, OPC_ADDU, ry, reg32, 0);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ break;
+ case M16_OPC_LI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm);
+ }
+ break;
+ case M16_OPC_CMPI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+ gen_logic_imm(ctx, OPC_XORI, 24, rx, imm);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st(ctx, OPC_SD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ld(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ld(ctx, OPC_LH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWSP:
+ gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_LW:
+ gen_ld(ctx, OPC_LW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_LBU:
+ gen_ld(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ld(ctx, OPC_LHU, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWPC:
+ gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LWU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LWU, ry, rx, offset << 2);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_st(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_st(ctx, OPC_SH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_SWSP:
+ gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_SW:
+ gen_st(ctx, OPC_SW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_RRR:
+ {
+ int rz = xlat((ctx->opcode >> 2) & 0x7);
+ int mips32_op;
+
+ switch (ctx->opcode & 0x3) {
+ case RRR_ADDU:
+ mips32_op = OPC_ADDU;
+ break;
+ case RRR_SUBU:
+ mips32_op = OPC_SUBU;
+ break;
+#if defined(TARGET_MIPS64)
+ case RRR_DADDU:
+ mips32_op = OPC_DADDU;
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ break;
+ case RRR_DSUBU:
+ mips32_op = OPC_DSUBU;
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ goto done;
+ }
+
+ gen_arith(ctx, mips32_op, rz, rx, ry);
+ done:
+ ;
+ }
+ break;
+ case M16_OPC_RR:
+ switch (op1) {
+ case RR_JR:
+ {
+ int nd = (ctx->opcode >> 7) & 0x1;
+ int link = (ctx->opcode >> 6) & 0x1;
+ int ra = (ctx->opcode >> 5) & 0x1;
+
+ if (nd) {
+ check_insn(ctx, ISA_MIPS_R1);
+ }
+
+ if (link) {
+ op = OPC_JALR;
+ } else {
+ op = OPC_JR;
+ }
+
+ gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0,
+ (nd ? 0 : 2));
+ }
+ break;
+ case RR_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 5, 6))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ /*
+ * XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(ctx, ISA_MIPS_R1);
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ break;
+ case RR_SLT:
+ gen_slt(ctx, OPC_SLT, 24, rx, ry);
+ break;
+ case RR_SLTU:
+ gen_slt(ctx, OPC_SLTU, 24, rx, ry);
+ break;
+ case RR_BREAK:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case RR_SLLV:
+ gen_shift(ctx, OPC_SLLV, ry, rx, ry);
+ break;
+ case RR_SRLV:
+ gen_shift(ctx, OPC_SRLV, ry, rx, ry);
+ break;
+ case RR_SRAV:
+ gen_shift(ctx, OPC_SRAV, ry, rx, ry);
+ break;
+#if defined(TARGET_MIPS64)
+ case RR_DSRL:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa);
+ break;
+#endif
+ case RR_CMP:
+ gen_logic(ctx, OPC_XOR, 24, rx, ry);
+ break;
+ case RR_NEG:
+ gen_arith(ctx, OPC_SUBU, rx, 0, ry);
+ break;
+ case RR_AND:
+ gen_logic(ctx, OPC_AND, rx, rx, ry);
+ break;
+ case RR_OR:
+ gen_logic(ctx, OPC_OR, rx, rx, ry);
+ break;
+ case RR_XOR:
+ gen_logic(ctx, OPC_XOR, rx, rx, ry);
+ break;
+ case RR_NOT:
+ gen_logic(ctx, OPC_NOR, rx, ry, 0);
+ break;
+ case RR_MFHI:
+ gen_HILO(ctx, OPC_MFHI, 0, rx);
+ break;
+ case RR_CNVT:
+ check_insn(ctx, ISA_MIPS_R1);
+ switch (cnvt_op) {
+ case RR_RY_CNVT_ZEB:
+ tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_ZEH:
+ tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEB:
+ tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEH:
+ tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#if defined(TARGET_MIPS64)
+ case RR_RY_CNVT_ZEW:
+ check_insn(ctx, ISA_MIPS_R1);
+ check_mips_64(ctx);
+ tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEW:
+ check_insn(ctx, ISA_MIPS_R1);
+ check_mips_64(ctx);
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case RR_MFLO:
+ gen_HILO(ctx, OPC_MFLO, 0, rx);
+ break;
+#if defined(TARGET_MIPS64)
+ case RR_DSRA:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa);
+ break;
+ case RR_DSLLV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, OPC_DSLLV, ry, rx, ry);
+ break;
+ case RR_DSRLV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, OPC_DSRLV, ry, rx, ry);
+ break;
+ case RR_DSRAV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, OPC_DSRAV, ry, rx, ry);
+ break;
+#endif
+ case RR_MULT:
+ gen_muldiv(ctx, OPC_MULT, 0, rx, ry);
+ break;
+ case RR_MULTU:
+ gen_muldiv(ctx, OPC_MULTU, 0, rx, ry);
+ break;
+ case RR_DIV:
+ gen_muldiv(ctx, OPC_DIV, 0, rx, ry);
+ break;
+ case RR_DIVU:
+ gen_muldiv(ctx, OPC_DIVU, 0, rx, ry);
+ break;
+#if defined(TARGET_MIPS64)
+ case RR_DMULT:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULT, 0, rx, ry);
+ break;
+ case RR_DMULTU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry);
+ break;
+ case RR_DDIV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIV, 0, rx, ry);
+ break;
+ case RR_DDIVU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry);
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case M16_OPC_EXTEND:
+ decode_extended_mips16_opc(env, ctx);
+ n_bytes = 4;
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ funct = (ctx->opcode >> 8) & 0x7;
+ decode_i64_mips16(ctx, ry, funct, offset, 0);
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ return n_bytes;
+}
diff --git a/target/mips/tcg/msa.decode b/target/mips/tcg/msa.decode
new file mode 100644
index 000000000..957528919
--- /dev/null
+++ b/target/mips/tcg/msa.decode
@@ -0,0 +1,258 @@
+# MIPS SIMD Architecture Module instruction set
+#
+# Copyright (C) 2020 Philippe Mathieu-Daudé
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Reference:
+# MIPS Architecture for Programmers Volume IV-j
+# - The MIPS32 SIMD Architecture Module, Revision 1.12
+# (Document Number: MD00866-2B-MSA32-AFP-01.12)
+# - The MIPS64 SIMD Architecture Module, Revision 1.12
+# (Document Number: MD00868-1D-MSA64-AFP-01.12)
+
+&r rs rt rd sa
+
+&msa_r df wd ws wt
+&msa_bz df wt sa
+&msa_ldi df wd sa
+&msa_i df wd ws sa
+&msa_bit df wd ws m
+&msa_elm_df df wd ws n
+&msa_elm wd ws
+
+%elm_df 16:6 !function=elm_df
+%elm_n 16:6 !function=elm_n
+%bit_df 16:7 !function=bit_df
+%bit_m 16:7 !function=bit_m
+%2r_df_w 16:1 !function=plus_2
+%3r_df_h 21:1 !function=plus_1
+%3r_df_w 21:1 !function=plus_2
+
+@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r
+@ldst ...... sa:s10 ws:5 wd:5 .... df:2 &msa_i
+@bz_v ...... ... .. wt:5 sa:16 &msa_bz df=3
+@bz ...... ... df:2 wt:5 sa:16 &msa_bz
+@elm_df ...... .... ...... ws:5 wd:5 ...... &msa_elm_df df=%elm_df n=%elm_n
+@elm ...... .......... ws:5 wd:5 ...... &msa_elm
+@vec ...... ..... wt:5 ws:5 wd:5 ...... &msa_r df=0
+@2r ...... ........ df:2 ws:5 wd:5 ...... &msa_r wt=0
+@2rf ...... ......... . ws:5 wd:5 ...... &msa_r wt=0 df=%2r_df_w
+@3r ...... ... df:2 wt:5 ws:5 wd:5 ...... &msa_r
+@3rf_h ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_h
+@3rf_w ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_w
+@u5 ...... ... df:2 sa:5 ws:5 wd:5 ...... &msa_i
+@s5 ...... ... df:2 sa:s5 ws:5 wd:5 ...... &msa_i
+@i8_df ...... df:2 sa:s8 ws:5 wd:5 ...... &msa_i
+@i8 ...... .. sa:s8 ws:5 wd:5 ...... &msa_i df=0
+@ldi ...... ... df:2 sa:s10 wd:5 ...... &msa_ldi
+@bit ...... ... ....... ws:5 wd:5 ...... &msa_bit df=%bit_df m=%bit_m
+
+LSA 000000 ..... ..... ..... 000 .. 000101 @lsa
+DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa
+
+BZ_V 010001 01011 ..... ................ @bz_v
+BNZ_V 010001 01111 ..... ................ @bz_v
+BZ 010001 110 .. ..... ................ @bz
+BNZ 010001 111 .. ..... ................ @bz
+
+ANDI 011110 00 ........ ..... ..... 000000 @i8
+ORI 011110 01 ........ ..... ..... 000000 @i8
+NORI 011110 10 ........ ..... ..... 000000 @i8
+XORI 011110 11 ........ ..... ..... 000000 @i8
+BMNZI 011110 00 ........ ..... ..... 000001 @i8
+BMZI 011110 01 ........ ..... ..... 000001 @i8
+BSELI 011110 10 ........ ..... ..... 000001 @i8
+SHF 011110 .. ........ ..... ..... 000010 @i8_df
+
+ADDVI 011110 000 .. ..... ..... ..... 000110 @u5
+SUBVI 011110 001 .. ..... ..... ..... 000110 @u5
+MAXI_S 011110 010 .. ..... ..... ..... 000110 @s5
+MAXI_U 011110 011 .. ..... ..... ..... 000110 @u5
+MINI_S 011110 100 .. ..... ..... ..... 000110 @s5
+MINI_U 011110 101 .. ..... ..... ..... 000110 @u5
+
+CEQI 011110 000 .. ..... ..... ..... 000111 @s5
+CLTI_S 011110 010 .. ..... ..... ..... 000111 @s5
+CLTI_U 011110 011 .. ..... ..... ..... 000111 @u5
+CLEI_S 011110 100 .. ..... ..... ..... 000111 @s5
+CLEI_U 011110 101 .. ..... ..... ..... 000111 @u5
+
+LDI 011110 110 .. .......... ..... 000111 @ldi
+
+SLLI 011110 000 ....... ..... ..... 001001 @bit
+SRAI 011110 001 ....... ..... ..... 001001 @bit
+SRLI 011110 010 ....... ..... ..... 001001 @bit
+BCLRI 011110 011 ....... ..... ..... 001001 @bit
+BSETI 011110 100 ....... ..... ..... 001001 @bit
+BNEGI 011110 101 ....... ..... ..... 001001 @bit
+BINSLI 011110 110 ....... ..... ..... 001001 @bit
+BINSRI 011110 111 ....... ..... ..... 001001 @bit
+
+SAT_S 011110 000 ....... ..... ..... 001010 @bit
+SAT_U 011110 001 ....... ..... ..... 001010 @bit
+SRARI 011110 010 ....... ..... ..... 001010 @bit
+SRLRI 011110 011 ....... ..... ..... 001010 @bit
+
+SLL 011110 000.. ..... ..... ..... 001101 @3r
+SRA 011110 001.. ..... ..... ..... 001101 @3r
+SRL 011110 010.. ..... ..... ..... 001101 @3r
+BCLR 011110 011.. ..... ..... ..... 001101 @3r
+BSET 011110 100.. ..... ..... ..... 001101 @3r
+BNEG 011110 101.. ..... ..... ..... 001101 @3r
+BINSL 011110 110.. ..... ..... ..... 001101 @3r
+BINSR 011110 111.. ..... ..... ..... 001101 @3r
+
+ADDV 011110 000.. ..... ..... ..... 001110 @3r
+SUBV 011110 001.. ..... ..... ..... 001110 @3r
+MAX_S 011110 010.. ..... ..... ..... 001110 @3r
+MAX_U 011110 011.. ..... ..... ..... 001110 @3r
+MIN_S 011110 100.. ..... ..... ..... 001110 @3r
+MIN_U 011110 101.. ..... ..... ..... 001110 @3r
+MAX_A 011110 110.. ..... ..... ..... 001110 @3r
+MIN_A 011110 111.. ..... ..... ..... 001110 @3r
+
+CEQ 011110 000.. ..... ..... ..... 001111 @3r
+CLT_S 011110 010.. ..... ..... ..... 001111 @3r
+CLT_U 011110 011.. ..... ..... ..... 001111 @3r
+CLE_S 011110 100.. ..... ..... ..... 001111 @3r
+CLE_U 011110 101.. ..... ..... ..... 001111 @3r
+
+ADD_A 011110 000.. ..... ..... ..... 010000 @3r
+ADDS_A 011110 001.. ..... ..... ..... 010000 @3r
+ADDS_S 011110 010.. ..... ..... ..... 010000 @3r
+ADDS_U 011110 011.. ..... ..... ..... 010000 @3r
+AVE_S 011110 100.. ..... ..... ..... 010000 @3r
+AVE_U 011110 101.. ..... ..... ..... 010000 @3r
+AVER_S 011110 110.. ..... ..... ..... 010000 @3r
+AVER_U 011110 111.. ..... ..... ..... 010000 @3r
+
+SUBS_S 011110 000.. ..... ..... ..... 010001 @3r
+SUBS_U 011110 001.. ..... ..... ..... 010001 @3r
+SUBSUS_U 011110 010.. ..... ..... ..... 010001 @3r
+SUBSUU_S 011110 011.. ..... ..... ..... 010001 @3r
+ASUB_S 011110 100.. ..... ..... ..... 010001 @3r
+ASUB_U 011110 101.. ..... ..... ..... 010001 @3r
+
+MULV 011110 000.. ..... ..... ..... 010010 @3r
+MADDV 011110 001.. ..... ..... ..... 010010 @3r
+MSUBV 011110 010.. ..... ..... ..... 010010 @3r
+DIV_S 011110 100.. ..... ..... ..... 010010 @3r
+DIV_U 011110 101.. ..... ..... ..... 010010 @3r
+MOD_S 011110 110.. ..... ..... ..... 010010 @3r
+MOD_U 011110 111.. ..... ..... ..... 010010 @3r
+
+DOTP_S 011110 000.. ..... ..... ..... 010011 @3r
+DOTP_U 011110 001.. ..... ..... ..... 010011 @3r
+DPADD_S 011110 010.. ..... ..... ..... 010011 @3r
+DPADD_U 011110 011.. ..... ..... ..... 010011 @3r
+DPSUB_S 011110 100.. ..... ..... ..... 010011 @3r
+DPSUB_U 011110 101.. ..... ..... ..... 010011 @3r
+
+SLD 011110 000 .. ..... ..... ..... 010100 @3r
+SPLAT 011110 001 .. ..... ..... ..... 010100 @3r
+PCKEV 011110 010 .. ..... ..... ..... 010100 @3r
+PCKOD 011110 011 .. ..... ..... ..... 010100 @3r
+ILVL 011110 100 .. ..... ..... ..... 010100 @3r
+ILVR 011110 101 .. ..... ..... ..... 010100 @3r
+ILVEV 011110 110 .. ..... ..... ..... 010100 @3r
+ILVOD 011110 111 .. ..... ..... ..... 010100 @3r
+
+VSHF 011110 000 .. ..... ..... ..... 010101 @3r
+SRAR 011110 001 .. ..... ..... ..... 010101 @3r
+SRLR 011110 010 .. ..... ..... ..... 010101 @3r
+HADD_S 011110 100.. ..... ..... ..... 010101 @3r
+HADD_U 011110 101.. ..... ..... ..... 010101 @3r
+HSUB_S 011110 110.. ..... ..... ..... 010101 @3r
+HSUB_U 011110 111.. ..... ..... ..... 010101 @3r
+
+{
+ CTCMSA 011110 0000111110 ..... ..... 011001 @elm
+ SLDI 011110 0000 ...... ..... ..... 011001 @elm_df
+}
+{
+ CFCMSA 011110 0001111110 ..... ..... 011001 @elm
+ SPLATI 011110 0001 ...... ..... ..... 011001 @elm_df
+}
+{
+ MOVE_V 011110 0010111110 ..... ..... 011001 @elm
+ COPY_S 011110 0010 ...... ..... ..... 011001 @elm_df
+}
+COPY_U 011110 0011 ...... ..... ..... 011001 @elm_df
+INSERT 011110 0100 ...... ..... ..... 011001 @elm_df
+INSVE 011110 0101 ...... ..... ..... 011001 @elm_df
+
+FCAF 011110 0000 . ..... ..... ..... 011010 @3rf_w
+FCUN 011110 0001 . ..... ..... ..... 011010 @3rf_w
+FCEQ 011110 0010 . ..... ..... ..... 011010 @3rf_w
+FCUEQ 011110 0011 . ..... ..... ..... 011010 @3rf_w
+FCLT 011110 0100 . ..... ..... ..... 011010 @3rf_w
+FCULT 011110 0101 . ..... ..... ..... 011010 @3rf_w
+FCLE 011110 0110 . ..... ..... ..... 011010 @3rf_w
+FCULE 011110 0111 . ..... ..... ..... 011010 @3rf_w
+FSAF 011110 1000 . ..... ..... ..... 011010 @3rf_w
+FSUN 011110 1001 . ..... ..... ..... 011010 @3rf_w
+FSEQ 011110 1010 . ..... ..... ..... 011010 @3rf_w
+FSUEQ 011110 1011 . ..... ..... ..... 011010 @3rf_w
+FSLT 011110 1100 . ..... ..... ..... 011010 @3rf_w
+FSULT 011110 1101 . ..... ..... ..... 011010 @3rf_w
+FSLE 011110 1110 . ..... ..... ..... 011010 @3rf_w
+FSULE 011110 1111 . ..... ..... ..... 011010 @3rf_w
+
+FADD 011110 0000 . ..... ..... ..... 011011 @3rf_w
+FSUB 011110 0001 . ..... ..... ..... 011011 @3rf_w
+FMUL 011110 0010 . ..... ..... ..... 011011 @3rf_w
+FDIV 011110 0011 . ..... ..... ..... 011011 @3rf_w
+FMADD 011110 0100 . ..... ..... ..... 011011 @3rf_w
+FMSUB 011110 0101 . ..... ..... ..... 011011 @3rf_w
+FEXP2 011110 0111 . ..... ..... ..... 011011 @3rf_w
+FEXDO 011110 1000 . ..... ..... ..... 011011 @3rf_w
+FTQ 011110 1010 . ..... ..... ..... 011011 @3rf_w
+FMIN 011110 1100 . ..... ..... ..... 011011 @3rf_w
+FMIN_A 011110 1101 . ..... ..... ..... 011011 @3rf_w
+FMAX 011110 1110 . ..... ..... ..... 011011 @3rf_w
+FMAX_A 011110 1111 . ..... ..... ..... 011011 @3rf_w
+
+FCOR 011110 0001 . ..... ..... ..... 011100 @3rf_w
+FCUNE 011110 0010 . ..... ..... ..... 011100 @3rf_w
+FCNE 011110 0011 . ..... ..... ..... 011100 @3rf_w
+MUL_Q 011110 0100 . ..... ..... ..... 011100 @3rf_h
+MADD_Q 011110 0101 . ..... ..... ..... 011100 @3rf_h
+MSUB_Q 011110 0110 . ..... ..... ..... 011100 @3rf_h
+FSOR 011110 1001 . ..... ..... ..... 011100 @3rf_w
+FSUNE 011110 1010 . ..... ..... ..... 011100 @3rf_w
+FSNE 011110 1011 . ..... ..... ..... 011100 @3rf_w
+MULR_Q 011110 1100 . ..... ..... ..... 011100 @3rf_h
+MADDR_Q 011110 1101 . ..... ..... ..... 011100 @3rf_h
+MSUBR_Q 011110 1110 . ..... ..... ..... 011100 @3rf_h
+
+AND_V 011110 00000 ..... ..... ..... 011110 @vec
+OR_V 011110 00001 ..... ..... ..... 011110 @vec
+NOR_V 011110 00010 ..... ..... ..... 011110 @vec
+XOR_V 011110 00011 ..... ..... ..... 011110 @vec
+BMNZ_V 011110 00100 ..... ..... ..... 011110 @vec
+BMZ_V 011110 00101 ..... ..... ..... 011110 @vec
+BSEL_V 011110 00110 ..... ..... ..... 011110 @vec
+FILL 011110 11000000 .. ..... ..... 011110 @2r
+PCNT 011110 11000001 .. ..... ..... 011110 @2r
+NLOC 011110 11000010 .. ..... ..... 011110 @2r
+NLZC 011110 11000011 .. ..... ..... 011110 @2r
+FCLASS 011110 110010000 . ..... ..... 011110 @2rf
+FTRUNC_S 011110 110010001 . ..... ..... 011110 @2rf
+FTRUNC_U 011110 110010010 . ..... ..... 011110 @2rf
+FSQRT 011110 110010011 . ..... ..... 011110 @2rf
+FRSQRT 011110 110010100 . ..... ..... 011110 @2rf
+FRCP 011110 110010101 . ..... ..... 011110 @2rf
+FRINT 011110 110010110 . ..... ..... 011110 @2rf
+FLOG2 011110 110010111 . ..... ..... 011110 @2rf
+FEXUPL 011110 110011000 . ..... ..... 011110 @2rf
+FEXUPR 011110 110011001 . ..... ..... 011110 @2rf
+FFQL 011110 110011010 . ..... ..... 011110 @2rf
+FFQR 011110 110011011 . ..... ..... 011110 @2rf
+FTINT_S 011110 110011100 . ..... ..... 011110 @2rf
+FTINT_U 011110 110011101 . ..... ..... 011110 @2rf
+FFINT_S 011110 110011110 . ..... ..... 011110 @2rf
+FFINT_U 011110 110011111 . ..... ..... 011110 @2rf
+
+LD 011110 .......... ..... ..... 1000 .. @ldst
+ST 011110 .......... ..... ..... 1001 .. @ldst
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
new file mode 100644
index 000000000..5667b1f0a
--- /dev/null
+++ b/target/mips/tcg/msa_helper.c
@@ -0,0 +1,8388 @@
+/*
+ * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2014 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "tcg/tcg.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exec/memop.h"
+#include "fpu/softfloat.h"
+#include "fpu_helper.h"
+
+/* Data format min and max values */
+#define DF_BITS(df) (1 << ((df) + 3))
+
+#define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
+#define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
+
+#define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
+#define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
+
+#define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
+#define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
+
+#define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
+#define SIGNED(x, df) \
+ ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
+
+/* Element-by-element access macros */
+#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
+
+
+
+/*
+ * Bit Count
+ * ---------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | NLOC.B | Vector Leading Ones Count (byte) |
+ * | NLOC.H | Vector Leading Ones Count (halfword) |
+ * | NLOC.W | Vector Leading Ones Count (word) |
+ * | NLOC.D | Vector Leading Ones Count (doubleword) |
+ * | NLZC.B | Vector Leading Zeros Count (byte) |
+ * | NLZC.H | Vector Leading Zeros Count (halfword) |
+ * | NLZC.W | Vector Leading Zeros Count (word) |
+ * | NLZC.D | Vector Leading Zeros Count (doubleword) |
+ * | PCNT.B | Vector Population Count (byte) |
+ * | PCNT.H | Vector Population Count (halfword) |
+ * | PCNT.W | Vector Population Count (word) |
+ * | PCNT.D | Vector Population Count (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg)
+{
+ uint64_t x, y;
+ int n, c;
+
+ x = UNSIGNED(arg, df);
+ n = DF_BITS(df);
+ c = DF_BITS(df) / 2;
+
+ do {
+ y = x >> c;
+ if (y != 0) {
+ n = n - c;
+ x = y;
+ }
+ c = c >> 1;
+ } while (c != 0);
+
+ return n - x;
+}
+
+static inline int64_t msa_nloc_df(uint32_t df, int64_t arg)
+{
+ return msa_nlzc_df(df, UNSIGNED((~arg), df));
+}
+
+void helper_msa_nloc_b(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->b[0] = msa_nloc_df(DF_BYTE, pws->b[0]);
+ pwd->b[1] = msa_nloc_df(DF_BYTE, pws->b[1]);
+ pwd->b[2] = msa_nloc_df(DF_BYTE, pws->b[2]);
+ pwd->b[3] = msa_nloc_df(DF_BYTE, pws->b[3]);
+ pwd->b[4] = msa_nloc_df(DF_BYTE, pws->b[4]);
+ pwd->b[5] = msa_nloc_df(DF_BYTE, pws->b[5]);
+ pwd->b[6] = msa_nloc_df(DF_BYTE, pws->b[6]);
+ pwd->b[7] = msa_nloc_df(DF_BYTE, pws->b[7]);
+ pwd->b[8] = msa_nloc_df(DF_BYTE, pws->b[8]);
+ pwd->b[9] = msa_nloc_df(DF_BYTE, pws->b[9]);
+ pwd->b[10] = msa_nloc_df(DF_BYTE, pws->b[10]);
+ pwd->b[11] = msa_nloc_df(DF_BYTE, pws->b[11]);
+ pwd->b[12] = msa_nloc_df(DF_BYTE, pws->b[12]);
+ pwd->b[13] = msa_nloc_df(DF_BYTE, pws->b[13]);
+ pwd->b[14] = msa_nloc_df(DF_BYTE, pws->b[14]);
+ pwd->b[15] = msa_nloc_df(DF_BYTE, pws->b[15]);
+}
+
+void helper_msa_nloc_h(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->h[0] = msa_nloc_df(DF_HALF, pws->h[0]);
+ pwd->h[1] = msa_nloc_df(DF_HALF, pws->h[1]);
+ pwd->h[2] = msa_nloc_df(DF_HALF, pws->h[2]);
+ pwd->h[3] = msa_nloc_df(DF_HALF, pws->h[3]);
+ pwd->h[4] = msa_nloc_df(DF_HALF, pws->h[4]);
+ pwd->h[5] = msa_nloc_df(DF_HALF, pws->h[5]);
+ pwd->h[6] = msa_nloc_df(DF_HALF, pws->h[6]);
+ pwd->h[7] = msa_nloc_df(DF_HALF, pws->h[7]);
+}
+
+void helper_msa_nloc_w(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->w[0] = msa_nloc_df(DF_WORD, pws->w[0]);
+ pwd->w[1] = msa_nloc_df(DF_WORD, pws->w[1]);
+ pwd->w[2] = msa_nloc_df(DF_WORD, pws->w[2]);
+ pwd->w[3] = msa_nloc_df(DF_WORD, pws->w[3]);
+}
+
+void helper_msa_nloc_d(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->d[0] = msa_nloc_df(DF_DOUBLE, pws->d[0]);
+ pwd->d[1] = msa_nloc_df(DF_DOUBLE, pws->d[1]);
+}
+
+void helper_msa_nlzc_b(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->b[0] = msa_nlzc_df(DF_BYTE, pws->b[0]);
+ pwd->b[1] = msa_nlzc_df(DF_BYTE, pws->b[1]);
+ pwd->b[2] = msa_nlzc_df(DF_BYTE, pws->b[2]);
+ pwd->b[3] = msa_nlzc_df(DF_BYTE, pws->b[3]);
+ pwd->b[4] = msa_nlzc_df(DF_BYTE, pws->b[4]);
+ pwd->b[5] = msa_nlzc_df(DF_BYTE, pws->b[5]);
+ pwd->b[6] = msa_nlzc_df(DF_BYTE, pws->b[6]);
+ pwd->b[7] = msa_nlzc_df(DF_BYTE, pws->b[7]);
+ pwd->b[8] = msa_nlzc_df(DF_BYTE, pws->b[8]);
+ pwd->b[9] = msa_nlzc_df(DF_BYTE, pws->b[9]);
+ pwd->b[10] = msa_nlzc_df(DF_BYTE, pws->b[10]);
+ pwd->b[11] = msa_nlzc_df(DF_BYTE, pws->b[11]);
+ pwd->b[12] = msa_nlzc_df(DF_BYTE, pws->b[12]);
+ pwd->b[13] = msa_nlzc_df(DF_BYTE, pws->b[13]);
+ pwd->b[14] = msa_nlzc_df(DF_BYTE, pws->b[14]);
+ pwd->b[15] = msa_nlzc_df(DF_BYTE, pws->b[15]);
+}
+
+void helper_msa_nlzc_h(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->h[0] = msa_nlzc_df(DF_HALF, pws->h[0]);
+ pwd->h[1] = msa_nlzc_df(DF_HALF, pws->h[1]);
+ pwd->h[2] = msa_nlzc_df(DF_HALF, pws->h[2]);
+ pwd->h[3] = msa_nlzc_df(DF_HALF, pws->h[3]);
+ pwd->h[4] = msa_nlzc_df(DF_HALF, pws->h[4]);
+ pwd->h[5] = msa_nlzc_df(DF_HALF, pws->h[5]);
+ pwd->h[6] = msa_nlzc_df(DF_HALF, pws->h[6]);
+ pwd->h[7] = msa_nlzc_df(DF_HALF, pws->h[7]);
+}
+
+void helper_msa_nlzc_w(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->w[0] = msa_nlzc_df(DF_WORD, pws->w[0]);
+ pwd->w[1] = msa_nlzc_df(DF_WORD, pws->w[1]);
+ pwd->w[2] = msa_nlzc_df(DF_WORD, pws->w[2]);
+ pwd->w[3] = msa_nlzc_df(DF_WORD, pws->w[3]);
+}
+
+void helper_msa_nlzc_d(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->d[0] = msa_nlzc_df(DF_DOUBLE, pws->d[0]);
+ pwd->d[1] = msa_nlzc_df(DF_DOUBLE, pws->d[1]);
+}
+
+static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg)
+{
+ uint64_t x;
+
+ x = UNSIGNED(arg, df);
+
+ x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL);
+ x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
+ x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL);
+ x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL);
+ x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL);
+ x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32));
+
+ return x;
+}
+
+void helper_msa_pcnt_b(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->b[0] = msa_pcnt_df(DF_BYTE, pws->b[0]);
+ pwd->b[1] = msa_pcnt_df(DF_BYTE, pws->b[1]);
+ pwd->b[2] = msa_pcnt_df(DF_BYTE, pws->b[2]);
+ pwd->b[3] = msa_pcnt_df(DF_BYTE, pws->b[3]);
+ pwd->b[4] = msa_pcnt_df(DF_BYTE, pws->b[4]);
+ pwd->b[5] = msa_pcnt_df(DF_BYTE, pws->b[5]);
+ pwd->b[6] = msa_pcnt_df(DF_BYTE, pws->b[6]);
+ pwd->b[7] = msa_pcnt_df(DF_BYTE, pws->b[7]);
+ pwd->b[8] = msa_pcnt_df(DF_BYTE, pws->b[8]);
+ pwd->b[9] = msa_pcnt_df(DF_BYTE, pws->b[9]);
+ pwd->b[10] = msa_pcnt_df(DF_BYTE, pws->b[10]);
+ pwd->b[11] = msa_pcnt_df(DF_BYTE, pws->b[11]);
+ pwd->b[12] = msa_pcnt_df(DF_BYTE, pws->b[12]);
+ pwd->b[13] = msa_pcnt_df(DF_BYTE, pws->b[13]);
+ pwd->b[14] = msa_pcnt_df(DF_BYTE, pws->b[14]);
+ pwd->b[15] = msa_pcnt_df(DF_BYTE, pws->b[15]);
+}
+
+void helper_msa_pcnt_h(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->h[0] = msa_pcnt_df(DF_HALF, pws->h[0]);
+ pwd->h[1] = msa_pcnt_df(DF_HALF, pws->h[1]);
+ pwd->h[2] = msa_pcnt_df(DF_HALF, pws->h[2]);
+ pwd->h[3] = msa_pcnt_df(DF_HALF, pws->h[3]);
+ pwd->h[4] = msa_pcnt_df(DF_HALF, pws->h[4]);
+ pwd->h[5] = msa_pcnt_df(DF_HALF, pws->h[5]);
+ pwd->h[6] = msa_pcnt_df(DF_HALF, pws->h[6]);
+ pwd->h[7] = msa_pcnt_df(DF_HALF, pws->h[7]);
+}
+
+void helper_msa_pcnt_w(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->w[0] = msa_pcnt_df(DF_WORD, pws->w[0]);
+ pwd->w[1] = msa_pcnt_df(DF_WORD, pws->w[1]);
+ pwd->w[2] = msa_pcnt_df(DF_WORD, pws->w[2]);
+ pwd->w[3] = msa_pcnt_df(DF_WORD, pws->w[3]);
+}
+
+void helper_msa_pcnt_d(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ pwd->d[0] = msa_pcnt_df(DF_DOUBLE, pws->d[0]);
+ pwd->d[1] = msa_pcnt_df(DF_DOUBLE, pws->d[1]);
+}
+
+
+/*
+ * Bit Move
+ * --------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | BINSL.B | Vector Bit Insert Left (byte) |
+ * | BINSL.H | Vector Bit Insert Left (halfword) |
+ * | BINSL.W | Vector Bit Insert Left (word) |
+ * | BINSL.D | Vector Bit Insert Left (doubleword) |
+ * | BINSR.B | Vector Bit Insert Right (byte) |
+ * | BINSR.H | Vector Bit Insert Right (halfword) |
+ * | BINSR.W | Vector Bit Insert Right (word) |
+ * | BINSR.D | Vector Bit Insert Right (doubleword) |
+ * | BMNZ.V | Vector Bit Move If Not Zero |
+ * | BMZ.V | Vector Bit Move If Zero |
+ * | BSEL.V | Vector Bit Select |
+ * +---------------+----------------------------------------------------------+
+ */
+
+/* Data format bit position and unsigned values */
+#define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
+
+static inline int64_t msa_binsl_df(uint32_t df,
+ int64_t dest, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_dest = UNSIGNED(dest, df);
+ int32_t sh_d = BIT_POSITION(arg2, df) + 1;
+ int32_t sh_a = DF_BITS(df) - sh_d;
+ if (sh_d == DF_BITS(df)) {
+ return u_arg1;
+ } else {
+ return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) |
+ UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df);
+ }
+}
+
+void helper_msa_binsl_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_binsl_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_binsl_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_binsl_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_binsl_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_binsl_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_binsl_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_binsl_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_binsl_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_binsl_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_binsl_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_binsl_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_binsl_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_binsl_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_binsl_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_binsl_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_binsl_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_binsl_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_binsl_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_binsl_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_binsl_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_binsl_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_binsl_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_binsl_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_binsl_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_binsl_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_binsl_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_binsl_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_binsl_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_binsl_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_binsl_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_binsl_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_binsl_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_binsl_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_binsr_df(uint32_t df,
+ int64_t dest, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_dest = UNSIGNED(dest, df);
+ int32_t sh_d = BIT_POSITION(arg2, df) + 1;
+ int32_t sh_a = DF_BITS(df) - sh_d;
+ if (sh_d == DF_BITS(df)) {
+ return u_arg1;
+ } else {
+ return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) |
+ UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df);
+ }
+}
+
+void helper_msa_binsr_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_binsr_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_binsr_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_binsr_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_binsr_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_binsr_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_binsr_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_binsr_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_binsr_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_binsr_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_binsr_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_binsr_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_binsr_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_binsr_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_binsr_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_binsr_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_binsr_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_binsr_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_binsr_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_binsr_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_binsr_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_binsr_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_binsr_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_binsr_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_binsr_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_binsr_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_binsr_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_binsr_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_binsr_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_binsr_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_binsr_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_binsr_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_binsr_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_binsr_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+void helper_msa_bmnz_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = UNSIGNED( \
+ ((pwd->d[0] & (~pwt->d[0])) | (pws->d[0] & pwt->d[0])), DF_DOUBLE);
+ pwd->d[1] = UNSIGNED( \
+ ((pwd->d[1] & (~pwt->d[1])) | (pws->d[1] & pwt->d[1])), DF_DOUBLE);
+}
+
+void helper_msa_bmz_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = UNSIGNED( \
+ ((pwd->d[0] & pwt->d[0]) | (pws->d[0] & (~pwt->d[0]))), DF_DOUBLE);
+ pwd->d[1] = UNSIGNED( \
+ ((pwd->d[1] & pwt->d[1]) | (pws->d[1] & (~pwt->d[1]))), DF_DOUBLE);
+}
+
+void helper_msa_bsel_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = UNSIGNED( \
+ (pws->d[0] & (~pwd->d[0])) | (pwt->d[0] & pwd->d[0]), DF_DOUBLE);
+ pwd->d[1] = UNSIGNED( \
+ (pws->d[1] & (~pwd->d[1])) | (pwt->d[1] & pwd->d[1]), DF_DOUBLE);
+}
+
+
+/*
+ * Bit Set
+ * -------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | BCLR.B | Vector Bit Clear (byte) |
+ * | BCLR.H | Vector Bit Clear (halfword) |
+ * | BCLR.W | Vector Bit Clear (word) |
+ * | BCLR.D | Vector Bit Clear (doubleword) |
+ * | BNEG.B | Vector Bit Negate (byte) |
+ * | BNEG.H | Vector Bit Negate (halfword) |
+ * | BNEG.W | Vector Bit Negate (word) |
+ * | BNEG.D | Vector Bit Negate (doubleword) |
+ * | BSET.B | Vector Bit Set (byte) |
+ * | BSET.H | Vector Bit Set (halfword) |
+ * | BSET.W | Vector Bit Set (word) |
+ * | BSET.D | Vector Bit Set (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return UNSIGNED(arg1 & (~(1LL << b_arg2)), df);
+}
+
+void helper_msa_bclr_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_bclr_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_bclr_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_bclr_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_bclr_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_bclr_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_bclr_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_bclr_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_bclr_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_bclr_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_bclr_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_bclr_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_bclr_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_bclr_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_bclr_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_bclr_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_bclr_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_bclr_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_bclr_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_bclr_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_bclr_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_bclr_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_bclr_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_bclr_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_bclr_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_bclr_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_bclr_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_bclr_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_bclr_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_bclr_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_bclr_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_bclr_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_bclr_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_bclr_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return UNSIGNED(arg1 ^ (1LL << b_arg2), df);
+}
+
+void helper_msa_bneg_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_bneg_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_bneg_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_bneg_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_bneg_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_bneg_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_bneg_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_bneg_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_bneg_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_bneg_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_bneg_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_bneg_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_bneg_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_bneg_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_bneg_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_bneg_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_bneg_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_bneg_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_bneg_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_bneg_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_bneg_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_bneg_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_bneg_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_bneg_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_bneg_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_bneg_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_bneg_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_bneg_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_bneg_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_bneg_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_bneg_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_bneg_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_bneg_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_bneg_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_bset_df(uint32_t df, int64_t arg1,
+ int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return UNSIGNED(arg1 | (1LL << b_arg2), df);
+}
+
+void helper_msa_bset_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_bset_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_bset_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_bset_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_bset_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_bset_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_bset_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_bset_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_bset_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_bset_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_bset_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_bset_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_bset_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_bset_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_bset_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_bset_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_bset_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_bset_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_bset_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_bset_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_bset_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_bset_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_bset_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_bset_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_bset_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_bset_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_bset_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_bset_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_bset_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_bset_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_bset_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_bset_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_bset_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_bset_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Fixed Multiply
+ * --------------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | MADD_Q.H | Vector Fixed-Point Multiply and Add (halfword) |
+ * | MADD_Q.W | Vector Fixed-Point Multiply and Add (word) |
+ * | MADDR_Q.H | Vector Fixed-Point Multiply and Add Rounded (halfword) |
+ * | MADDR_Q.W | Vector Fixed-Point Multiply and Add Rounded (word) |
+ * | MSUB_Q.H | Vector Fixed-Point Multiply and Subtr. (halfword) |
+ * | MSUB_Q.W | Vector Fixed-Point Multiply and Subtr. (word) |
+ * | MSUBR_Q.H | Vector Fixed-Point Multiply and Subtr. Rounded (halfword)|
+ * | MSUBR_Q.W | Vector Fixed-Point Multiply and Subtr. Rounded (word) |
+ * | MUL_Q.H | Vector Fixed-Point Multiply (halfword) |
+ * | MUL_Q.W | Vector Fixed-Point Multiply (word) |
+ * | MULR_Q.H | Vector Fixed-Point Multiply Rounded (halfword) |
+ * | MULR_Q.W | Vector Fixed-Point Multiply Rounded (word) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+/* TODO: insert Fixed Multiply group helpers here */
+
+
+/*
+ * Float Max Min
+ * -------------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | FMAX_A.W | Vector Floating-Point Maximum (Absolute) (word) |
+ * | FMAX_A.D | Vector Floating-Point Maximum (Absolute) (doubleword) |
+ * | FMAX.W | Vector Floating-Point Maximum (word) |
+ * | FMAX.D | Vector Floating-Point Maximum (doubleword) |
+ * | FMIN_A.W | Vector Floating-Point Minimum (Absolute) (word) |
+ * | FMIN_A.D | Vector Floating-Point Minimum (Absolute) (doubleword) |
+ * | FMIN.W | Vector Floating-Point Minimum (word) |
+ * | FMIN.D | Vector Floating-Point Minimum (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+/* TODO: insert Float Max Min group helpers here */
+
+
+/*
+ * Int Add
+ * -------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | ADD_A.B | Vector Add Absolute Values (byte) |
+ * | ADD_A.H | Vector Add Absolute Values (halfword) |
+ * | ADD_A.W | Vector Add Absolute Values (word) |
+ * | ADD_A.D | Vector Add Absolute Values (doubleword) |
+ * | ADDS_A.B | Vector Signed Saturated Add (of Absolute) (byte) |
+ * | ADDS_A.H | Vector Signed Saturated Add (of Absolute) (halfword) |
+ * | ADDS_A.W | Vector Signed Saturated Add (of Absolute) (word) |
+ * | ADDS_A.D | Vector Signed Saturated Add (of Absolute) (doubleword) |
+ * | ADDS_S.B | Vector Signed Saturated Add (of Signed) (byte) |
+ * | ADDS_S.H | Vector Signed Saturated Add (of Signed) (halfword) |
+ * | ADDS_S.W | Vector Signed Saturated Add (of Signed) (word) |
+ * | ADDS_S.D | Vector Signed Saturated Add (of Signed) (doubleword) |
+ * | ADDS_U.B | Vector Unsigned Saturated Add (of Unsigned) (byte) |
+ * | ADDS_U.H | Vector Unsigned Saturated Add (of Unsigned) (halfword) |
+ * | ADDS_U.W | Vector Unsigned Saturated Add (of Unsigned) (word) |
+ * | ADDS_U.D | Vector Unsigned Saturated Add (of Unsigned) (doubleword) |
+ * | ADDV.B | Vector Add (byte) |
+ * | ADDV.H | Vector Add (halfword) |
+ * | ADDV.W | Vector Add (word) |
+ * | ADDV.D | Vector Add (doubleword) |
+ * | HADD_S.H | Vector Signed Horizontal Add (halfword) |
+ * | HADD_S.W | Vector Signed Horizontal Add (word) |
+ * | HADD_S.D | Vector Signed Horizontal Add (doubleword) |
+ * | HADD_U.H | Vector Unigned Horizontal Add (halfword) |
+ * | HADD_U.W | Vector Unigned Horizontal Add (word) |
+ * | HADD_U.D | Vector Unigned Horizontal Add (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ return abs_arg1 + abs_arg2;
+}
+
+void helper_msa_add_a_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_add_a_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_add_a_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_add_a_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_add_a_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_add_a_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_add_a_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_add_a_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_add_a_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_add_a_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_add_a_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_add_a_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_add_a_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_add_a_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_add_a_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_add_a_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_add_a_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_add_a_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_add_a_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_add_a_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_add_a_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_add_a_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_add_a_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_add_a_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_add_a_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_add_a_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_add_a_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_add_a_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_add_a_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_add_a_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_add_a_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_add_a_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_add_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_add_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t max_int = (uint64_t)DF_MAX_INT(df);
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ if (abs_arg1 > max_int || abs_arg2 > max_int) {
+ return (int64_t)max_int;
+ } else {
+ return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int;
+ }
+}
+
+void helper_msa_adds_a_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_adds_a_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_adds_a_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_adds_a_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_adds_a_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_adds_a_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_adds_a_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_adds_a_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_adds_a_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_adds_a_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_adds_a_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_adds_a_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_adds_a_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_adds_a_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_adds_a_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_adds_a_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_adds_a_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_adds_a_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_adds_a_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_adds_a_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_adds_a_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_adds_a_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_adds_a_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_adds_a_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_adds_a_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_adds_a_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_adds_a_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_adds_a_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_adds_a_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_adds_a_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_adds_a_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_adds_a_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_adds_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_adds_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t max_int = DF_MAX_INT(df);
+ int64_t min_int = DF_MIN_INT(df);
+ if (arg1 < 0) {
+ return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int;
+ } else {
+ return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int;
+ }
+}
+
+void helper_msa_adds_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_adds_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_adds_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_adds_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_adds_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_adds_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_adds_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_adds_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_adds_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_adds_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_adds_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_adds_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_adds_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_adds_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_adds_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_adds_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_adds_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_adds_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_adds_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_adds_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_adds_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_adds_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_adds_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_adds_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_adds_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_adds_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_adds_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_adds_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_adds_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_adds_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_adds_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_adds_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_adds_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_adds_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t max_uint = DF_MAX_UINT(df);
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint;
+}
+
+void helper_msa_adds_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_adds_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_adds_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_adds_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_adds_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_adds_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_adds_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_adds_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_adds_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_adds_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_adds_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_adds_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_adds_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_adds_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_adds_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_adds_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_adds_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_adds_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_adds_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_adds_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_adds_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_adds_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_adds_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_adds_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_adds_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_adds_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_adds_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_adds_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_adds_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_adds_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_adds_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_adds_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_adds_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_adds_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 + arg2;
+}
+
+void helper_msa_addv_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_addv_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_addv_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_addv_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_addv_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_addv_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_addv_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_addv_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_addv_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_addv_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_addv_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_addv_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_addv_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_addv_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_addv_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_addv_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_addv_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_addv_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_addv_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_addv_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_addv_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_addv_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_addv_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_addv_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_addv_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_addv_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_addv_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_addv_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_addv_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_addv_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_addv_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_addv_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_addv_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_addv_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+#define SIGNED_EVEN(a, df) \
+ ((((int64_t)(a)) << (64 - DF_BITS(df) / 2)) >> (64 - DF_BITS(df) / 2))
+
+#define UNSIGNED_EVEN(a, df) \
+ ((((uint64_t)(a)) << (64 - DF_BITS(df) / 2)) >> (64 - DF_BITS(df) / 2))
+
+#define SIGNED_ODD(a, df) \
+ ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df) / 2))
+
+#define UNSIGNED_ODD(a, df) \
+ ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df) / 2))
+
+
+static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df);
+}
+
+void helper_msa_hadd_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_hadd_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_hadd_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_hadd_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_hadd_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_hadd_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_hadd_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_hadd_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_hadd_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_hadd_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_hadd_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_hadd_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_hadd_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_hadd_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_hadd_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_hadd_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_hadd_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df);
+}
+
+void helper_msa_hadd_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_hadd_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_hadd_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_hadd_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_hadd_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_hadd_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_hadd_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_hadd_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_hadd_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_hadd_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_hadd_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_hadd_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_hadd_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_hadd_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_hadd_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_hadd_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_hadd_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Average
+ * -----------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | AVE_S.B | Vector Signed Average (byte) |
+ * | AVE_S.H | Vector Signed Average (halfword) |
+ * | AVE_S.W | Vector Signed Average (word) |
+ * | AVE_S.D | Vector Signed Average (doubleword) |
+ * | AVE_U.B | Vector Unsigned Average (byte) |
+ * | AVE_U.H | Vector Unsigned Average (halfword) |
+ * | AVE_U.W | Vector Unsigned Average (word) |
+ * | AVE_U.D | Vector Unsigned Average (doubleword) |
+ * | AVER_S.B | Vector Signed Average Rounded (byte) |
+ * | AVER_S.H | Vector Signed Average Rounded (halfword) |
+ * | AVER_S.W | Vector Signed Average Rounded (word) |
+ * | AVER_S.D | Vector Signed Average Rounded (doubleword) |
+ * | AVER_U.B | Vector Unsigned Average Rounded (byte) |
+ * | AVER_U.H | Vector Unsigned Average Rounded (halfword) |
+ * | AVER_U.W | Vector Unsigned Average Rounded (word) |
+ * | AVER_U.D | Vector Unsigned Average Rounded (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ /* signed shift */
+ return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1);
+}
+
+void helper_msa_ave_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_ave_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_ave_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_ave_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_ave_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_ave_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_ave_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_ave_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_ave_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_ave_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_ave_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_ave_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_ave_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_ave_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_ave_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_ave_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_ave_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_ave_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_ave_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_ave_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_ave_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_ave_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_ave_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_ave_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_ave_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_ave_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_ave_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_ave_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_ave_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_ave_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_ave_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_ave_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_ave_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_ave_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ /* unsigned shift */
+ return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1);
+}
+
+void helper_msa_ave_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_ave_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_ave_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_ave_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_ave_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_ave_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_ave_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_ave_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_ave_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_ave_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_ave_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_ave_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_ave_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_ave_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_ave_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_ave_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_ave_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_ave_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_ave_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_ave_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_ave_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_ave_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_ave_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_ave_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_ave_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_ave_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_ave_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_ave_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_ave_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_ave_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_ave_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_ave_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_ave_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_ave_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ /* signed shift */
+ return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1);
+}
+
+void helper_msa_aver_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_aver_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_aver_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_aver_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_aver_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_aver_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_aver_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_aver_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_aver_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_aver_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_aver_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_aver_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_aver_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_aver_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_aver_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_aver_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_aver_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_aver_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_aver_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_aver_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_aver_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_aver_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_aver_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_aver_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_aver_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_aver_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_aver_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_aver_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_aver_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_aver_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_aver_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_aver_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_aver_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_aver_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ /* unsigned shift */
+ return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1);
+}
+
+void helper_msa_aver_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_aver_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_aver_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_aver_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_aver_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_aver_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_aver_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_aver_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_aver_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_aver_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_aver_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_aver_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_aver_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_aver_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_aver_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_aver_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_aver_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_aver_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_aver_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_aver_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_aver_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_aver_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_aver_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_aver_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_aver_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_aver_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_aver_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_aver_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_aver_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_aver_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_aver_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_aver_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_aver_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_aver_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Compare
+ * -----------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | CEQ.B | Vector Compare Equal (byte) |
+ * | CEQ.H | Vector Compare Equal (halfword) |
+ * | CEQ.W | Vector Compare Equal (word) |
+ * | CEQ.D | Vector Compare Equal (doubleword) |
+ * | CLE_S.B | Vector Compare Signed Less Than or Equal (byte) |
+ * | CLE_S.H | Vector Compare Signed Less Than or Equal (halfword) |
+ * | CLE_S.W | Vector Compare Signed Less Than or Equal (word) |
+ * | CLE_S.D | Vector Compare Signed Less Than or Equal (doubleword) |
+ * | CLE_U.B | Vector Compare Unsigned Less Than or Equal (byte) |
+ * | CLE_U.H | Vector Compare Unsigned Less Than or Equal (halfword) |
+ * | CLE_U.W | Vector Compare Unsigned Less Than or Equal (word) |
+ * | CLE_U.D | Vector Compare Unsigned Less Than or Equal (doubleword) |
+ * | CLT_S.B | Vector Compare Signed Less Than (byte) |
+ * | CLT_S.H | Vector Compare Signed Less Than (halfword) |
+ * | CLT_S.W | Vector Compare Signed Less Than (word) |
+ * | CLT_S.D | Vector Compare Signed Less Than (doubleword) |
+ * | CLT_U.B | Vector Compare Unsigned Less Than (byte) |
+ * | CLT_U.H | Vector Compare Unsigned Less Than (halfword) |
+ * | CLT_U.W | Vector Compare Unsigned Less Than (word) |
+ * | CLT_U.D | Vector Compare Unsigned Less Than (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 == arg2 ? -1 : 0;
+}
+
+static inline int8_t msa_ceq_b(int8_t arg1, int8_t arg2)
+{
+ return arg1 == arg2 ? -1 : 0;
+}
+
+void helper_msa_ceq_b(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_ceq_b(pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_ceq_b(pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_ceq_b(pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_ceq_b(pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_ceq_b(pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_ceq_b(pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_ceq_b(pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_ceq_b(pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_ceq_b(pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_ceq_b(pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_ceq_b(pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_ceq_b(pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_ceq_b(pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_ceq_b(pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_ceq_b(pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_ceq_b(pws->b[15], pwt->b[15]);
+}
+
+static inline int16_t msa_ceq_h(int16_t arg1, int16_t arg2)
+{
+ return arg1 == arg2 ? -1 : 0;
+}
+
+void helper_msa_ceq_h(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_ceq_h(pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_ceq_h(pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_ceq_h(pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_ceq_h(pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_ceq_h(pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_ceq_h(pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_ceq_h(pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_ceq_h(pws->h[7], pwt->h[7]);
+}
+
+static inline int32_t msa_ceq_w(int32_t arg1, int32_t arg2)
+{
+ return arg1 == arg2 ? -1 : 0;
+}
+
+void helper_msa_ceq_w(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_ceq_w(pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_ceq_w(pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_ceq_w(pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_ceq_w(pws->w[3], pwt->w[3]);
+}
+
+static inline int64_t msa_ceq_d(int64_t arg1, int64_t arg2)
+{
+ return arg1 == arg2 ? -1 : 0;
+}
+
+void helper_msa_ceq_d(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_ceq_d(pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_ceq_d(pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 <= arg2 ? -1 : 0;
+}
+
+void helper_msa_cle_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_cle_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_cle_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_cle_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_cle_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_cle_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_cle_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_cle_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_cle_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_cle_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_cle_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_cle_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_cle_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_cle_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_cle_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_cle_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_cle_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_cle_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_cle_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_cle_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_cle_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_cle_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_cle_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_cle_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_cle_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_cle_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_cle_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_cle_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_cle_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_cle_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_cle_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_cle_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_cle_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_cle_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 <= u_arg2 ? -1 : 0;
+}
+
+void helper_msa_cle_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_cle_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_cle_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_cle_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_cle_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_cle_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_cle_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_cle_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_cle_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_cle_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_cle_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_cle_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_cle_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_cle_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_cle_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_cle_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_cle_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_cle_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_cle_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_cle_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_cle_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_cle_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_cle_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_cle_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_cle_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_cle_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_cle_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_cle_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_cle_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_cle_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_cle_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_cle_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_cle_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_cle_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 < arg2 ? -1 : 0;
+}
+
+static inline int8_t msa_clt_s_b(int8_t arg1, int8_t arg2)
+{
+ return arg1 < arg2 ? -1 : 0;
+}
+
+void helper_msa_clt_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_clt_s_b(pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_clt_s_b(pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_clt_s_b(pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_clt_s_b(pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_clt_s_b(pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_clt_s_b(pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_clt_s_b(pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_clt_s_b(pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_clt_s_b(pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_clt_s_b(pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_clt_s_b(pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_clt_s_b(pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_clt_s_b(pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_clt_s_b(pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_clt_s_b(pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_clt_s_b(pws->b[15], pwt->b[15]);
+}
+
+static inline int16_t msa_clt_s_h(int16_t arg1, int16_t arg2)
+{
+ return arg1 < arg2 ? -1 : 0;
+}
+
+void helper_msa_clt_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_clt_s_h(pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_clt_s_h(pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_clt_s_h(pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_clt_s_h(pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_clt_s_h(pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_clt_s_h(pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_clt_s_h(pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_clt_s_h(pws->h[7], pwt->h[7]);
+}
+
+static inline int32_t msa_clt_s_w(int32_t arg1, int32_t arg2)
+{
+ return arg1 < arg2 ? -1 : 0;
+}
+
+void helper_msa_clt_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_clt_s_w(pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_clt_s_w(pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_clt_s_w(pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_clt_s_w(pws->w[3], pwt->w[3]);
+}
+
+static inline int64_t msa_clt_s_d(int64_t arg1, int64_t arg2)
+{
+ return arg1 < arg2 ? -1 : 0;
+}
+
+void helper_msa_clt_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_clt_s_d(pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_clt_s_d(pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 < u_arg2 ? -1 : 0;
+}
+
+void helper_msa_clt_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_clt_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_clt_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_clt_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_clt_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_clt_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_clt_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_clt_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_clt_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_clt_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_clt_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_clt_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_clt_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_clt_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_clt_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_clt_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_clt_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_clt_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_clt_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_clt_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_clt_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_clt_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_clt_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_clt_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_clt_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_clt_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_clt_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_clt_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_clt_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_clt_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_clt_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_clt_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_clt_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_clt_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Divide
+ * ----------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | DIV_S.B | Vector Signed Divide (byte) |
+ * | DIV_S.H | Vector Signed Divide (halfword) |
+ * | DIV_S.W | Vector Signed Divide (word) |
+ * | DIV_S.D | Vector Signed Divide (doubleword) |
+ * | DIV_U.B | Vector Unsigned Divide (byte) |
+ * | DIV_U.H | Vector Unsigned Divide (halfword) |
+ * | DIV_U.W | Vector Unsigned Divide (word) |
+ * | DIV_U.D | Vector Unsigned Divide (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
+ return DF_MIN_INT(df);
+ }
+ return arg2 ? arg1 / arg2
+ : arg1 >= 0 ? -1 : 1;
+}
+
+void helper_msa_div_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_div_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_div_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_div_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_div_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_div_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_div_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_div_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_div_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_div_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_div_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_div_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_div_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_div_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_div_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_div_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_div_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_div_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_div_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_div_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_div_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_div_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_div_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_div_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_div_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_div_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_div_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_div_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_div_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_div_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_div_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_div_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_div_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_div_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return arg2 ? u_arg1 / u_arg2 : -1;
+}
+
+void helper_msa_div_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_div_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_div_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_div_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_div_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_div_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_div_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_div_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_div_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_div_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_div_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_div_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_div_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_div_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_div_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_div_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_div_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_div_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_div_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_div_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_div_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_div_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_div_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_div_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_div_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_div_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_div_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_div_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_div_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_div_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_div_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_div_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_div_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_div_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Dot Product
+ * ---------------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | DOTP_S.H | Vector Signed Dot Product (halfword) |
+ * | DOTP_S.W | Vector Signed Dot Product (word) |
+ * | DOTP_S.D | Vector Signed Dot Product (doubleword) |
+ * | DOTP_U.H | Vector Unsigned Dot Product (halfword) |
+ * | DOTP_U.W | Vector Unsigned Dot Product (word) |
+ * | DOTP_U.D | Vector Unsigned Dot Product (doubleword) |
+ * | DPADD_S.H | Vector Signed Dot Product (halfword) |
+ * | DPADD_S.W | Vector Signed Dot Product (word) |
+ * | DPADD_S.D | Vector Signed Dot Product (doubleword) |
+ * | DPADD_U.H | Vector Unsigned Dot Product (halfword) |
+ * | DPADD_U.W | Vector Unsigned Dot Product (word) |
+ * | DPADD_U.D | Vector Unsigned Dot Product (doubleword) |
+ * | DPSUB_S.H | Vector Signed Dot Product (halfword) |
+ * | DPSUB_S.W | Vector Signed Dot Product (word) |
+ * | DPSUB_S.D | Vector Signed Dot Product (doubleword) |
+ * | DPSUB_U.H | Vector Unsigned Dot Product (halfword) |
+ * | DPSUB_U.W | Vector Unsigned Dot Product (word) |
+ * | DPSUB_U.D | Vector Unsigned Dot Product (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+#define SIGNED_EXTRACT(e, o, a, df) \
+ do { \
+ e = SIGNED_EVEN(a, df); \
+ o = SIGNED_ODD(a, df); \
+ } while (0)
+
+#define UNSIGNED_EXTRACT(e, o, a, df) \
+ do { \
+ e = UNSIGNED_EVEN(a, df); \
+ o = UNSIGNED_ODD(a, df); \
+ } while (0)
+
+
+static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+void helper_msa_dotp_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_dotp_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_dotp_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_dotp_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_dotp_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_dotp_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_dotp_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_dotp_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_dotp_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_dotp_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_dotp_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_dotp_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_dotp_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_dotp_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_dotp_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_dotp_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_dotp_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+void helper_msa_dotp_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_dotp_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_dotp_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_dotp_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_dotp_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_dotp_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_dotp_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_dotp_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_dotp_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_dotp_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_dotp_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_dotp_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_dotp_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_dotp_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_dotp_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_dotp_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_dotp_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+void helper_msa_dpadd_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_dpadd_s_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_dpadd_s_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_dpadd_s_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_dpadd_s_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_dpadd_s_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_dpadd_s_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_dpadd_s_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_dpadd_s_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_dpadd_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_dpadd_s_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_dpadd_s_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_dpadd_s_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_dpadd_s_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_dpadd_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_dpadd_s_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_dpadd_s_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+void helper_msa_dpadd_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_dpadd_u_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_dpadd_u_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_dpadd_u_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_dpadd_u_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_dpadd_u_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_dpadd_u_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_dpadd_u_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_dpadd_u_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_dpadd_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_dpadd_u_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_dpadd_u_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_dpadd_u_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_dpadd_u_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_dpadd_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_dpadd_u_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_dpadd_u_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
+}
+
+void helper_msa_dpsub_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_dpsub_s_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_dpsub_s_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_dpsub_s_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_dpsub_s_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_dpsub_s_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_dpsub_s_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_dpsub_s_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_dpsub_s_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_dpsub_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_dpsub_s_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_dpsub_s_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_dpsub_s_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_dpsub_s_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_dpsub_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_dpsub_s_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_dpsub_s_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
+}
+
+void helper_msa_dpsub_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_dpsub_u_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_dpsub_u_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_dpsub_u_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_dpsub_u_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_dpsub_u_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_dpsub_u_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_dpsub_u_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_dpsub_u_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_dpsub_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_dpsub_u_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_dpsub_u_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_dpsub_u_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_dpsub_u_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_dpsub_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_dpsub_u_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_dpsub_u_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Max Min
+ * -----------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | MAX_A.B | Vector Maximum Based on Absolute Value (byte) |
+ * | MAX_A.H | Vector Maximum Based on Absolute Value (halfword) |
+ * | MAX_A.W | Vector Maximum Based on Absolute Value (word) |
+ * | MAX_A.D | Vector Maximum Based on Absolute Value (doubleword) |
+ * | MAX_S.B | Vector Signed Maximum (byte) |
+ * | MAX_S.H | Vector Signed Maximum (halfword) |
+ * | MAX_S.W | Vector Signed Maximum (word) |
+ * | MAX_S.D | Vector Signed Maximum (doubleword) |
+ * | MAX_U.B | Vector Unsigned Maximum (byte) |
+ * | MAX_U.H | Vector Unsigned Maximum (halfword) |
+ * | MAX_U.W | Vector Unsigned Maximum (word) |
+ * | MAX_U.D | Vector Unsigned Maximum (doubleword) |
+ * | MIN_A.B | Vector Minimum Based on Absolute Value (byte) |
+ * | MIN_A.H | Vector Minimum Based on Absolute Value (halfword) |
+ * | MIN_A.W | Vector Minimum Based on Absolute Value (word) |
+ * | MIN_A.D | Vector Minimum Based on Absolute Value (doubleword) |
+ * | MIN_S.B | Vector Signed Minimum (byte) |
+ * | MIN_S.H | Vector Signed Minimum (halfword) |
+ * | MIN_S.W | Vector Signed Minimum (word) |
+ * | MIN_S.D | Vector Signed Minimum (doubleword) |
+ * | MIN_U.B | Vector Unsigned Minimum (byte) |
+ * | MIN_U.H | Vector Unsigned Minimum (halfword) |
+ * | MIN_U.W | Vector Unsigned Minimum (word) |
+ * | MIN_U.D | Vector Unsigned Minimum (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ return abs_arg1 > abs_arg2 ? arg1 : arg2;
+}
+
+void helper_msa_max_a_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_max_a_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_max_a_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_max_a_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_max_a_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_max_a_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_max_a_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_max_a_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_max_a_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_max_a_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_max_a_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_max_a_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_max_a_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_max_a_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_max_a_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_max_a_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_max_a_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_max_a_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_max_a_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_max_a_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_max_a_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_max_a_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_max_a_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_max_a_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_max_a_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_max_a_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_max_a_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_max_a_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_max_a_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_max_a_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_max_a_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_max_a_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_max_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_max_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 > arg2 ? arg1 : arg2;
+}
+
+void helper_msa_max_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_max_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_max_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_max_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_max_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_max_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_max_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_max_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_max_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_max_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_max_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_max_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_max_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_max_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_max_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_max_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_max_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_max_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_max_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_max_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_max_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_max_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_max_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_max_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_max_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_max_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_max_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_max_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_max_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_max_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_max_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_max_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_max_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_max_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 > u_arg2 ? arg1 : arg2;
+}
+
+void helper_msa_max_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_max_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_max_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_max_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_max_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_max_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_max_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_max_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_max_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_max_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_max_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_max_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_max_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_max_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_max_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_max_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_max_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_max_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_max_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_max_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_max_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_max_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_max_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_max_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_max_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_max_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_max_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_max_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_max_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_max_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_max_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_max_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_max_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_max_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ return abs_arg1 < abs_arg2 ? arg1 : arg2;
+}
+
+void helper_msa_min_a_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_min_a_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_min_a_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_min_a_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_min_a_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_min_a_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_min_a_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_min_a_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_min_a_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_min_a_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_min_a_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_min_a_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_min_a_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_min_a_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_min_a_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_min_a_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_min_a_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_min_a_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_min_a_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_min_a_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_min_a_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_min_a_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_min_a_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_min_a_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_min_a_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_min_a_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_min_a_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_min_a_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_min_a_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_min_a_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_min_a_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_min_a_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_min_a_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_min_a_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 < arg2 ? arg1 : arg2;
+}
+
+void helper_msa_min_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_min_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_min_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_min_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_min_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_min_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_min_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_min_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_min_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_min_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_min_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_min_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_min_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_min_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_min_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_min_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_min_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_min_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_min_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_min_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_min_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_min_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_min_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_min_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_min_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_min_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_min_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_min_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_min_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_min_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_min_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_min_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_min_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_min_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 < u_arg2 ? arg1 : arg2;
+}
+
+void helper_msa_min_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_min_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_min_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_min_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_min_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_min_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_min_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_min_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_min_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_min_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_min_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_min_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_min_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_min_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_min_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_min_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_min_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_min_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_min_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_min_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_min_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_min_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_min_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_min_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_min_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_min_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_min_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_min_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_min_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_min_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_min_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_min_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_min_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_min_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Modulo
+ * ----------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | MOD_S.B | Vector Signed Modulo (byte) |
+ * | MOD_S.H | Vector Signed Modulo (halfword) |
+ * | MOD_S.W | Vector Signed Modulo (word) |
+ * | MOD_S.D | Vector Signed Modulo (doubleword) |
+ * | MOD_U.B | Vector Unsigned Modulo (byte) |
+ * | MOD_U.H | Vector Unsigned Modulo (halfword) |
+ * | MOD_U.W | Vector Unsigned Modulo (word) |
+ * | MOD_U.D | Vector Unsigned Modulo (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
+ return 0;
+ }
+ return arg2 ? arg1 % arg2 : arg1;
+}
+
+void helper_msa_mod_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_mod_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_mod_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_mod_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_mod_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_mod_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_mod_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_mod_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_mod_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_mod_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_mod_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_mod_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_mod_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_mod_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_mod_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_mod_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_mod_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_mod_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_mod_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_mod_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_mod_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_mod_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_mod_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_mod_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_mod_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_mod_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_mod_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_mod_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_mod_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_mod_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_mod_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_mod_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_mod_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_mod_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg2 ? u_arg1 % u_arg2 : u_arg1;
+}
+
+void helper_msa_mod_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_mod_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_mod_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_mod_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_mod_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_mod_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_mod_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_mod_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_mod_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_mod_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_mod_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_mod_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_mod_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_mod_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_mod_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_mod_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_mod_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_mod_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_mod_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_mod_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_mod_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_mod_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_mod_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_mod_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_mod_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_mod_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_mod_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_mod_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_mod_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_mod_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_mod_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_mod_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_mod_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_mod_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Multiply
+ * ------------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | MADDV.B | Vector Multiply and Add (byte) |
+ * | MADDV.H | Vector Multiply and Add (halfword) |
+ * | MADDV.W | Vector Multiply and Add (word) |
+ * | MADDV.D | Vector Multiply and Add (doubleword) |
+ * | MSUBV.B | Vector Multiply and Subtract (byte) |
+ * | MSUBV.H | Vector Multiply and Subtract (halfword) |
+ * | MSUBV.W | Vector Multiply and Subtract (word) |
+ * | MSUBV.D | Vector Multiply and Subtract (doubleword) |
+ * | MULV.B | Vector Multiply (byte) |
+ * | MULV.H | Vector Multiply (halfword) |
+ * | MULV.W | Vector Multiply (word) |
+ * | MULV.D | Vector Multiply (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ return dest + arg1 * arg2;
+}
+
+void helper_msa_maddv_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_maddv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_maddv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_maddv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_maddv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_maddv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_maddv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_maddv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_maddv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_maddv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_maddv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_maddv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_maddv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_maddv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_maddv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_maddv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_maddv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_maddv_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_maddv_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_maddv_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_maddv_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_maddv_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_maddv_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_maddv_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_maddv_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_maddv_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_maddv_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_maddv_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_maddv_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_maddv_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_maddv_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_maddv_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_maddv_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_maddv_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ return dest - arg1 * arg2;
+}
+
+void helper_msa_msubv_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_msubv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_msubv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_msubv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_msubv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_msubv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_msubv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_msubv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_msubv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_msubv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_msubv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_msubv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_msubv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_msubv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_msubv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_msubv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_msubv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_msubv_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_msubv_df(DF_HALF, pwd->h[0], pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_msubv_df(DF_HALF, pwd->h[1], pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_msubv_df(DF_HALF, pwd->h[2], pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_msubv_df(DF_HALF, pwd->h[3], pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_msubv_df(DF_HALF, pwd->h[4], pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_msubv_df(DF_HALF, pwd->h[5], pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_msubv_df(DF_HALF, pwd->h[6], pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_msubv_df(DF_HALF, pwd->h[7], pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_msubv_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_msubv_df(DF_WORD, pwd->w[0], pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_msubv_df(DF_WORD, pwd->w[1], pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_msubv_df(DF_WORD, pwd->w[2], pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_msubv_df(DF_WORD, pwd->w[3], pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_msubv_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_msubv_df(DF_DOUBLE, pwd->d[0], pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_msubv_df(DF_DOUBLE, pwd->d[1], pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 * arg2;
+}
+
+void helper_msa_mulv_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_mulv_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_mulv_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_mulv_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_mulv_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_mulv_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_mulv_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_mulv_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_mulv_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_mulv_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_mulv_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_mulv_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_mulv_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_mulv_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_mulv_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_mulv_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_mulv_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_mulv_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_mulv_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_mulv_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_mulv_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_mulv_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_mulv_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_mulv_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_mulv_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_mulv_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_mulv_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_mulv_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_mulv_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_mulv_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_mulv_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_mulv_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_mulv_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_mulv_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Int Subtract
+ * ------------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | ASUB_S.B | Vector Absolute Values of Signed Subtract (byte) |
+ * | ASUB_S.H | Vector Absolute Values of Signed Subtract (halfword) |
+ * | ASUB_S.W | Vector Absolute Values of Signed Subtract (word) |
+ * | ASUB_S.D | Vector Absolute Values of Signed Subtract (doubleword) |
+ * | ASUB_U.B | Vector Absolute Values of Unsigned Subtract (byte) |
+ * | ASUB_U.H | Vector Absolute Values of Unsigned Subtract (halfword) |
+ * | ASUB_U.W | Vector Absolute Values of Unsigned Subtract (word) |
+ * | ASUB_U.D | Vector Absolute Values of Unsigned Subtract (doubleword) |
+ * | HSUB_S.H | Vector Signed Horizontal Subtract (halfword) |
+ * | HSUB_S.W | Vector Signed Horizontal Subtract (word) |
+ * | HSUB_S.D | Vector Signed Horizontal Subtract (doubleword) |
+ * | HSUB_U.H | Vector Unigned Horizontal Subtract (halfword) |
+ * | HSUB_U.W | Vector Unigned Horizontal Subtract (word) |
+ * | HSUB_U.D | Vector Unigned Horizontal Subtract (doubleword) |
+ * | SUBS_S.B | Vector Signed Saturated Subtract (of Signed) (byte) |
+ * | SUBS_S.H | Vector Signed Saturated Subtract (of Signed) (halfword) |
+ * | SUBS_S.W | Vector Signed Saturated Subtract (of Signed) (word) |
+ * | SUBS_S.D | Vector Signed Saturated Subtract (of Signed) (doubleword)|
+ * | SUBS_U.B | Vector Unsigned Saturated Subtract (of Uns.) (byte) |
+ * | SUBS_U.H | Vector Unsigned Saturated Subtract (of Uns.) (halfword) |
+ * | SUBS_U.W | Vector Unsigned Saturated Subtract (of Uns.) (word) |
+ * | SUBS_U.D | Vector Unsigned Saturated Subtract (of Uns.) (doubleword)|
+ * | SUBSUS_U.B | Vector Uns. Sat. Subtract (of S. from Uns.) (byte) |
+ * | SUBSUS_U.H | Vector Uns. Sat. Subtract (of S. from Uns.) (halfword) |
+ * | SUBSUS_U.W | Vector Uns. Sat. Subtract (of S. from Uns.) (word) |
+ * | SUBSUS_U.D | Vector Uns. Sat. Subtract (of S. from Uns.) (doubleword) |
+ * | SUBSUU_S.B | Vector Signed Saturated Subtract (of Uns.) (byte) |
+ * | SUBSUU_S.H | Vector Signed Saturated Subtract (of Uns.) (halfword) |
+ * | SUBSUU_S.W | Vector Signed Saturated Subtract (of Uns.) (word) |
+ * | SUBSUU_S.D | Vector Signed Saturated Subtract (of Uns.) (doubleword) |
+ * | SUBV.B | Vector Subtract (byte) |
+ * | SUBV.H | Vector Subtract (halfword) |
+ * | SUBV.W | Vector Subtract (word) |
+ * | SUBV.D | Vector Subtract (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ /* signed compare */
+ return (arg1 < arg2) ?
+ (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2);
+}
+
+void helper_msa_asub_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_asub_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_asub_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_asub_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_asub_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_asub_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_asub_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_asub_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_asub_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_asub_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_asub_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_asub_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_asub_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_asub_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_asub_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_asub_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_asub_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_asub_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_asub_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_asub_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_asub_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_asub_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_asub_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_asub_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_asub_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_asub_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_asub_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_asub_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_asub_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_asub_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_asub_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_asub_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_asub_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_asub_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ /* unsigned compare */
+ return (u_arg1 < u_arg2) ?
+ (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2);
+}
+
+void helper_msa_asub_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_asub_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_asub_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_asub_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_asub_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_asub_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_asub_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_asub_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_asub_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_asub_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_asub_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_asub_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_asub_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_asub_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_asub_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_asub_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_asub_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_asub_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_asub_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_asub_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_asub_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_asub_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_asub_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_asub_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_asub_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_asub_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_asub_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_asub_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_asub_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_asub_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_asub_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_asub_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_asub_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_asub_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df);
+}
+
+void helper_msa_hsub_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_hsub_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_hsub_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_hsub_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_hsub_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_hsub_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_hsub_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_hsub_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_hsub_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_hsub_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_hsub_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_hsub_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_hsub_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_hsub_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_hsub_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_hsub_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_hsub_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df);
+}
+
+void helper_msa_hsub_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_hsub_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_hsub_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_hsub_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_hsub_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_hsub_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_hsub_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_hsub_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_hsub_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_hsub_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_hsub_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_hsub_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_hsub_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_hsub_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_hsub_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_hsub_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_hsub_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t max_int = DF_MAX_INT(df);
+ int64_t min_int = DF_MIN_INT(df);
+ if (arg2 > 0) {
+ return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int;
+ } else {
+ return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int;
+ }
+}
+
+void helper_msa_subs_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_subs_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_subs_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_subs_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_subs_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_subs_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_subs_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_subs_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_subs_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_subs_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_subs_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_subs_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_subs_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_subs_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_subs_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_subs_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_subs_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_subs_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_subs_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_subs_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_subs_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_subs_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_subs_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_subs_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_subs_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_subs_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_subs_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_subs_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_subs_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_subs_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_subs_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_subs_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_subs_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_subs_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0;
+}
+
+void helper_msa_subs_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_subs_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_subs_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_subs_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_subs_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_subs_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_subs_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_subs_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_subs_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_subs_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_subs_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_subs_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_subs_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_subs_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_subs_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_subs_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_subs_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_subs_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_subs_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_subs_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_subs_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_subs_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_subs_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_subs_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_subs_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_subs_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_subs_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_subs_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_subs_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_subs_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_subs_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_subs_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_subs_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_subs_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t max_uint = DF_MAX_UINT(df);
+ if (arg2 >= 0) {
+ uint64_t u_arg2 = (uint64_t)arg2;
+ return (u_arg1 > u_arg2) ?
+ (int64_t)(u_arg1 - u_arg2) :
+ 0;
+ } else {
+ uint64_t u_arg2 = (uint64_t)(-arg2);
+ return (u_arg1 < max_uint - u_arg2) ?
+ (int64_t)(u_arg1 + u_arg2) :
+ (int64_t)max_uint;
+ }
+}
+
+void helper_msa_subsus_u_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_subsus_u_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_subsus_u_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_subsus_u_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_subsus_u_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_subsus_u_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_subsus_u_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_subsus_u_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_subsus_u_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_subsus_u_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_subsus_u_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_subsus_u_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_subsus_u_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_subsus_u_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_subsus_u_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_subsus_u_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_subsus_u_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_subsus_u_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_subsus_u_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_subsus_u_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_subsus_u_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_subsus_u_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_subsus_u_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_subsus_u_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_subsus_u_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_subsus_u_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_subsus_u_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_subsus_u_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_subsus_u_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_subsus_u_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_subsus_u_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_subsus_u_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_subsus_u_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_subsus_u_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ int64_t max_int = DF_MAX_INT(df);
+ int64_t min_int = DF_MIN_INT(df);
+ if (u_arg1 > u_arg2) {
+ return u_arg1 - u_arg2 < (uint64_t)max_int ?
+ (int64_t)(u_arg1 - u_arg2) :
+ max_int;
+ } else {
+ return u_arg2 - u_arg1 < (uint64_t)(-min_int) ?
+ (int64_t)(u_arg1 - u_arg2) :
+ min_int;
+ }
+}
+
+void helper_msa_subsuu_s_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_subsuu_s_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_subsuu_s_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_subsuu_s_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_subsuu_s_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_subsuu_s_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_subsuu_s_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_subsuu_s_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_subsuu_s_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_subsuu_s_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_subsuu_s_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_subsuu_s_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_subsuu_s_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_subsuu_s_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_subsuu_s_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_subsuu_s_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_subsuu_s_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_subsuu_s_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_subsuu_s_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_subsuu_s_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_subsuu_s_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_subsuu_s_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_subsuu_s_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_subsuu_s_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_subsuu_s_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_subsuu_s_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_subsuu_s_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_subsuu_s_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_subsuu_s_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_subsuu_s_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_subsuu_s_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_subsuu_s_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_subsuu_s_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_subsuu_s_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 - arg2;
+}
+
+void helper_msa_subv_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_subv_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_subv_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_subv_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_subv_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_subv_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_subv_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_subv_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_subv_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_subv_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_subv_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_subv_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_subv_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_subv_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_subv_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_subv_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_subv_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_subv_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_subv_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_subv_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_subv_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_subv_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_subv_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_subv_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_subv_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_subv_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_subv_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_subv_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_subv_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_subv_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_subv_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_subv_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_subv_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_subv_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+/*
+ * Interleave
+ * ----------
+ *
+ * +---------------+----------------------------------------------------------+
+ * | ILVEV.B | Vector Interleave Even (byte) |
+ * | ILVEV.H | Vector Interleave Even (halfword) |
+ * | ILVEV.W | Vector Interleave Even (word) |
+ * | ILVEV.D | Vector Interleave Even (doubleword) |
+ * | ILVOD.B | Vector Interleave Odd (byte) |
+ * | ILVOD.H | Vector Interleave Odd (halfword) |
+ * | ILVOD.W | Vector Interleave Odd (word) |
+ * | ILVOD.D | Vector Interleave Odd (doubleword) |
+ * | ILVL.B | Vector Interleave Left (byte) |
+ * | ILVL.H | Vector Interleave Left (halfword) |
+ * | ILVL.W | Vector Interleave Left (word) |
+ * | ILVL.D | Vector Interleave Left (doubleword) |
+ * | ILVR.B | Vector Interleave Right (byte) |
+ * | ILVR.H | Vector Interleave Right (halfword) |
+ * | ILVR.W | Vector Interleave Right (word) |
+ * | ILVR.D | Vector Interleave Right (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+void helper_msa_ilvev_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->b[8] = pws->b[9];
+ pwd->b[9] = pwt->b[9];
+ pwd->b[10] = pws->b[11];
+ pwd->b[11] = pwt->b[11];
+ pwd->b[12] = pws->b[13];
+ pwd->b[13] = pwt->b[13];
+ pwd->b[14] = pws->b[15];
+ pwd->b[15] = pwt->b[15];
+ pwd->b[0] = pws->b[1];
+ pwd->b[1] = pwt->b[1];
+ pwd->b[2] = pws->b[3];
+ pwd->b[3] = pwt->b[3];
+ pwd->b[4] = pws->b[5];
+ pwd->b[5] = pwt->b[5];
+ pwd->b[6] = pws->b[7];
+ pwd->b[7] = pwt->b[7];
+#else
+ pwd->b[15] = pws->b[14];
+ pwd->b[14] = pwt->b[14];
+ pwd->b[13] = pws->b[12];
+ pwd->b[12] = pwt->b[12];
+ pwd->b[11] = pws->b[10];
+ pwd->b[10] = pwt->b[10];
+ pwd->b[9] = pws->b[8];
+ pwd->b[8] = pwt->b[8];
+ pwd->b[7] = pws->b[6];
+ pwd->b[6] = pwt->b[6];
+ pwd->b[5] = pws->b[4];
+ pwd->b[4] = pwt->b[4];
+ pwd->b[3] = pws->b[2];
+ pwd->b[2] = pwt->b[2];
+ pwd->b[1] = pws->b[0];
+ pwd->b[0] = pwt->b[0];
+#endif
+}
+
+void helper_msa_ilvev_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->h[4] = pws->h[5];
+ pwd->h[5] = pwt->h[5];
+ pwd->h[6] = pws->h[7];
+ pwd->h[7] = pwt->h[7];
+ pwd->h[0] = pws->h[1];
+ pwd->h[1] = pwt->h[1];
+ pwd->h[2] = pws->h[3];
+ pwd->h[3] = pwt->h[3];
+#else
+ pwd->h[7] = pws->h[6];
+ pwd->h[6] = pwt->h[6];
+ pwd->h[5] = pws->h[4];
+ pwd->h[4] = pwt->h[4];
+ pwd->h[3] = pws->h[2];
+ pwd->h[2] = pwt->h[2];
+ pwd->h[1] = pws->h[0];
+ pwd->h[0] = pwt->h[0];
+#endif
+}
+
+void helper_msa_ilvev_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->w[2] = pws->w[3];
+ pwd->w[3] = pwt->w[3];
+ pwd->w[0] = pws->w[1];
+ pwd->w[1] = pwt->w[1];
+#else
+ pwd->w[3] = pws->w[2];
+ pwd->w[2] = pwt->w[2];
+ pwd->w[1] = pws->w[0];
+ pwd->w[0] = pwt->w[0];
+#endif
+}
+
+void helper_msa_ilvev_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[1] = pws->d[0];
+ pwd->d[0] = pwt->d[0];
+}
+
+
+void helper_msa_ilvod_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->b[7] = pwt->b[6];
+ pwd->b[6] = pws->b[6];
+ pwd->b[5] = pwt->b[4];
+ pwd->b[4] = pws->b[4];
+ pwd->b[3] = pwt->b[2];
+ pwd->b[2] = pws->b[2];
+ pwd->b[1] = pwt->b[0];
+ pwd->b[0] = pws->b[0];
+ pwd->b[15] = pwt->b[14];
+ pwd->b[14] = pws->b[14];
+ pwd->b[13] = pwt->b[12];
+ pwd->b[12] = pws->b[12];
+ pwd->b[11] = pwt->b[10];
+ pwd->b[10] = pws->b[10];
+ pwd->b[9] = pwt->b[8];
+ pwd->b[8] = pws->b[8];
+#else
+ pwd->b[0] = pwt->b[1];
+ pwd->b[1] = pws->b[1];
+ pwd->b[2] = pwt->b[3];
+ pwd->b[3] = pws->b[3];
+ pwd->b[4] = pwt->b[5];
+ pwd->b[5] = pws->b[5];
+ pwd->b[6] = pwt->b[7];
+ pwd->b[7] = pws->b[7];
+ pwd->b[8] = pwt->b[9];
+ pwd->b[9] = pws->b[9];
+ pwd->b[10] = pwt->b[11];
+ pwd->b[11] = pws->b[11];
+ pwd->b[12] = pwt->b[13];
+ pwd->b[13] = pws->b[13];
+ pwd->b[14] = pwt->b[15];
+ pwd->b[15] = pws->b[15];
+#endif
+}
+
+void helper_msa_ilvod_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->h[3] = pwt->h[2];
+ pwd->h[2] = pws->h[2];
+ pwd->h[1] = pwt->h[0];
+ pwd->h[0] = pws->h[0];
+ pwd->h[7] = pwt->h[6];
+ pwd->h[6] = pws->h[6];
+ pwd->h[5] = pwt->h[4];
+ pwd->h[4] = pws->h[4];
+#else
+ pwd->h[0] = pwt->h[1];
+ pwd->h[1] = pws->h[1];
+ pwd->h[2] = pwt->h[3];
+ pwd->h[3] = pws->h[3];
+ pwd->h[4] = pwt->h[5];
+ pwd->h[5] = pws->h[5];
+ pwd->h[6] = pwt->h[7];
+ pwd->h[7] = pws->h[7];
+#endif
+}
+
+void helper_msa_ilvod_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->w[1] = pwt->w[0];
+ pwd->w[0] = pws->w[0];
+ pwd->w[3] = pwt->w[2];
+ pwd->w[2] = pws->w[2];
+#else
+ pwd->w[0] = pwt->w[1];
+ pwd->w[1] = pws->w[1];
+ pwd->w[2] = pwt->w[3];
+ pwd->w[3] = pws->w[3];
+#endif
+}
+
+void helper_msa_ilvod_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = pwt->d[1];
+ pwd->d[1] = pws->d[1];
+}
+
+
+void helper_msa_ilvl_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->b[7] = pwt->b[15];
+ pwd->b[6] = pws->b[15];
+ pwd->b[5] = pwt->b[14];
+ pwd->b[4] = pws->b[14];
+ pwd->b[3] = pwt->b[13];
+ pwd->b[2] = pws->b[13];
+ pwd->b[1] = pwt->b[12];
+ pwd->b[0] = pws->b[12];
+ pwd->b[15] = pwt->b[11];
+ pwd->b[14] = pws->b[11];
+ pwd->b[13] = pwt->b[10];
+ pwd->b[12] = pws->b[10];
+ pwd->b[11] = pwt->b[9];
+ pwd->b[10] = pws->b[9];
+ pwd->b[9] = pwt->b[8];
+ pwd->b[8] = pws->b[8];
+#else
+ pwd->b[0] = pwt->b[8];
+ pwd->b[1] = pws->b[8];
+ pwd->b[2] = pwt->b[9];
+ pwd->b[3] = pws->b[9];
+ pwd->b[4] = pwt->b[10];
+ pwd->b[5] = pws->b[10];
+ pwd->b[6] = pwt->b[11];
+ pwd->b[7] = pws->b[11];
+ pwd->b[8] = pwt->b[12];
+ pwd->b[9] = pws->b[12];
+ pwd->b[10] = pwt->b[13];
+ pwd->b[11] = pws->b[13];
+ pwd->b[12] = pwt->b[14];
+ pwd->b[13] = pws->b[14];
+ pwd->b[14] = pwt->b[15];
+ pwd->b[15] = pws->b[15];
+#endif
+}
+
+void helper_msa_ilvl_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->h[3] = pwt->h[7];
+ pwd->h[2] = pws->h[7];
+ pwd->h[1] = pwt->h[6];
+ pwd->h[0] = pws->h[6];
+ pwd->h[7] = pwt->h[5];
+ pwd->h[6] = pws->h[5];
+ pwd->h[5] = pwt->h[4];
+ pwd->h[4] = pws->h[4];
+#else
+ pwd->h[0] = pwt->h[4];
+ pwd->h[1] = pws->h[4];
+ pwd->h[2] = pwt->h[5];
+ pwd->h[3] = pws->h[5];
+ pwd->h[4] = pwt->h[6];
+ pwd->h[5] = pws->h[6];
+ pwd->h[6] = pwt->h[7];
+ pwd->h[7] = pws->h[7];
+#endif
+}
+
+void helper_msa_ilvl_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->w[1] = pwt->w[3];
+ pwd->w[0] = pws->w[3];
+ pwd->w[3] = pwt->w[2];
+ pwd->w[2] = pws->w[2];
+#else
+ pwd->w[0] = pwt->w[2];
+ pwd->w[1] = pws->w[2];
+ pwd->w[2] = pwt->w[3];
+ pwd->w[3] = pws->w[3];
+#endif
+}
+
+void helper_msa_ilvl_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = pwt->d[1];
+ pwd->d[1] = pws->d[1];
+}
+
+
+void helper_msa_ilvr_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->b[8] = pws->b[0];
+ pwd->b[9] = pwt->b[0];
+ pwd->b[10] = pws->b[1];
+ pwd->b[11] = pwt->b[1];
+ pwd->b[12] = pws->b[2];
+ pwd->b[13] = pwt->b[2];
+ pwd->b[14] = pws->b[3];
+ pwd->b[15] = pwt->b[3];
+ pwd->b[0] = pws->b[4];
+ pwd->b[1] = pwt->b[4];
+ pwd->b[2] = pws->b[5];
+ pwd->b[3] = pwt->b[5];
+ pwd->b[4] = pws->b[6];
+ pwd->b[5] = pwt->b[6];
+ pwd->b[6] = pws->b[7];
+ pwd->b[7] = pwt->b[7];
+#else
+ pwd->b[15] = pws->b[7];
+ pwd->b[14] = pwt->b[7];
+ pwd->b[13] = pws->b[6];
+ pwd->b[12] = pwt->b[6];
+ pwd->b[11] = pws->b[5];
+ pwd->b[10] = pwt->b[5];
+ pwd->b[9] = pws->b[4];
+ pwd->b[8] = pwt->b[4];
+ pwd->b[7] = pws->b[3];
+ pwd->b[6] = pwt->b[3];
+ pwd->b[5] = pws->b[2];
+ pwd->b[4] = pwt->b[2];
+ pwd->b[3] = pws->b[1];
+ pwd->b[2] = pwt->b[1];
+ pwd->b[1] = pws->b[0];
+ pwd->b[0] = pwt->b[0];
+#endif
+}
+
+void helper_msa_ilvr_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->h[4] = pws->h[0];
+ pwd->h[5] = pwt->h[0];
+ pwd->h[6] = pws->h[1];
+ pwd->h[7] = pwt->h[1];
+ pwd->h[0] = pws->h[2];
+ pwd->h[1] = pwt->h[2];
+ pwd->h[2] = pws->h[3];
+ pwd->h[3] = pwt->h[3];
+#else
+ pwd->h[7] = pws->h[3];
+ pwd->h[6] = pwt->h[3];
+ pwd->h[5] = pws->h[2];
+ pwd->h[4] = pwt->h[2];
+ pwd->h[3] = pws->h[1];
+ pwd->h[2] = pwt->h[1];
+ pwd->h[1] = pws->h[0];
+ pwd->h[0] = pwt->h[0];
+#endif
+}
+
+void helper_msa_ilvr_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->w[2] = pws->w[0];
+ pwd->w[3] = pwt->w[0];
+ pwd->w[0] = pws->w[1];
+ pwd->w[1] = pwt->w[1];
+#else
+ pwd->w[3] = pws->w[1];
+ pwd->w[2] = pwt->w[1];
+ pwd->w[1] = pws->w[0];
+ pwd->w[0] = pwt->w[0];
+#endif
+}
+
+void helper_msa_ilvr_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[1] = pws->d[0];
+ pwd->d[0] = pwt->d[0];
+}
+
+
+/*
+ * Logic
+ * -----
+ *
+ * +---------------+----------------------------------------------------------+
+ * | AND.V | Vector Logical And |
+ * | NOR.V | Vector Logical Negated Or |
+ * | OR.V | Vector Logical Or |
+ * | XOR.V | Vector Logical Exclusive Or |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+void helper_msa_and_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = pws->d[0] & pwt->d[0];
+ pwd->d[1] = pws->d[1] & pwt->d[1];
+}
+
+void helper_msa_nor_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = ~(pws->d[0] | pwt->d[0]);
+ pwd->d[1] = ~(pws->d[1] | pwt->d[1]);
+}
+
+void helper_msa_or_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = pws->d[0] | pwt->d[0];
+ pwd->d[1] = pws->d[1] | pwt->d[1];
+}
+
+void helper_msa_xor_v(CPUMIPSState *env, uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = pws->d[0] ^ pwt->d[0];
+ pwd->d[1] = pws->d[1] ^ pwt->d[1];
+}
+
+
+/*
+ * Move
+ * ----
+ *
+ * +---------------+----------------------------------------------------------+
+ * | MOVE.V | Vector Move |
+ * +---------------+----------------------------------------------------------+
+ */
+
+static inline void msa_move_v(wr_t *pwd, wr_t *pws)
+{
+ pwd->d[0] = pws->d[0];
+ pwd->d[1] = pws->d[1];
+}
+
+void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_move_v(pwd, pws);
+}
+
+
+/*
+ * Pack
+ * ----
+ *
+ * +---------------+----------------------------------------------------------+
+ * | PCKEV.B | Vector Pack Even (byte) |
+ * | PCKEV.H | Vector Pack Even (halfword) |
+ * | PCKEV.W | Vector Pack Even (word) |
+ * | PCKEV.D | Vector Pack Even (doubleword) |
+ * | PCKOD.B | Vector Pack Odd (byte) |
+ * | PCKOD.H | Vector Pack Odd (halfword) |
+ * | PCKOD.W | Vector Pack Odd (word) |
+ * | PCKOD.D | Vector Pack Odd (doubleword) |
+ * | VSHF.B | Vector Data Preserving Shuffle (byte) |
+ * | VSHF.H | Vector Data Preserving Shuffle (halfword) |
+ * | VSHF.W | Vector Data Preserving Shuffle (word) |
+ * | VSHF.D | Vector Data Preserving Shuffle (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+void helper_msa_pckev_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->b[8] = pws->b[9];
+ pwd->b[10] = pws->b[13];
+ pwd->b[12] = pws->b[1];
+ pwd->b[14] = pws->b[5];
+ pwd->b[0] = pwt->b[9];
+ pwd->b[2] = pwt->b[13];
+ pwd->b[4] = pwt->b[1];
+ pwd->b[6] = pwt->b[5];
+ pwd->b[9] = pws->b[11];
+ pwd->b[13] = pws->b[3];
+ pwd->b[1] = pwt->b[11];
+ pwd->b[5] = pwt->b[3];
+ pwd->b[11] = pws->b[15];
+ pwd->b[3] = pwt->b[15];
+ pwd->b[15] = pws->b[7];
+ pwd->b[7] = pwt->b[7];
+#else
+ pwd->b[15] = pws->b[14];
+ pwd->b[13] = pws->b[10];
+ pwd->b[11] = pws->b[6];
+ pwd->b[9] = pws->b[2];
+ pwd->b[7] = pwt->b[14];
+ pwd->b[5] = pwt->b[10];
+ pwd->b[3] = pwt->b[6];
+ pwd->b[1] = pwt->b[2];
+ pwd->b[14] = pws->b[12];
+ pwd->b[10] = pws->b[4];
+ pwd->b[6] = pwt->b[12];
+ pwd->b[2] = pwt->b[4];
+ pwd->b[12] = pws->b[8];
+ pwd->b[4] = pwt->b[8];
+ pwd->b[8] = pws->b[0];
+ pwd->b[0] = pwt->b[0];
+#endif
+}
+
+void helper_msa_pckev_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->h[4] = pws->h[5];
+ pwd->h[6] = pws->h[1];
+ pwd->h[0] = pwt->h[5];
+ pwd->h[2] = pwt->h[1];
+ pwd->h[5] = pws->h[7];
+ pwd->h[1] = pwt->h[7];
+ pwd->h[7] = pws->h[3];
+ pwd->h[3] = pwt->h[3];
+#else
+ pwd->h[7] = pws->h[6];
+ pwd->h[5] = pws->h[2];
+ pwd->h[3] = pwt->h[6];
+ pwd->h[1] = pwt->h[2];
+ pwd->h[6] = pws->h[4];
+ pwd->h[2] = pwt->h[4];
+ pwd->h[4] = pws->h[0];
+ pwd->h[0] = pwt->h[0];
+#endif
+}
+
+void helper_msa_pckev_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->w[2] = pws->w[3];
+ pwd->w[0] = pwt->w[3];
+ pwd->w[3] = pws->w[1];
+ pwd->w[1] = pwt->w[1];
+#else
+ pwd->w[3] = pws->w[2];
+ pwd->w[1] = pwt->w[2];
+ pwd->w[2] = pws->w[0];
+ pwd->w[0] = pwt->w[0];
+#endif
+}
+
+void helper_msa_pckev_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[1] = pws->d[0];
+ pwd->d[0] = pwt->d[0];
+}
+
+
+void helper_msa_pckod_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->b[7] = pwt->b[6];
+ pwd->b[5] = pwt->b[2];
+ pwd->b[3] = pwt->b[14];
+ pwd->b[1] = pwt->b[10];
+ pwd->b[15] = pws->b[6];
+ pwd->b[13] = pws->b[2];
+ pwd->b[11] = pws->b[14];
+ pwd->b[9] = pws->b[10];
+ pwd->b[6] = pwt->b[4];
+ pwd->b[2] = pwt->b[12];
+ pwd->b[14] = pws->b[4];
+ pwd->b[10] = pws->b[12];
+ pwd->b[4] = pwt->b[0];
+ pwd->b[12] = pws->b[0];
+ pwd->b[0] = pwt->b[8];
+ pwd->b[8] = pws->b[8];
+#else
+ pwd->b[0] = pwt->b[1];
+ pwd->b[2] = pwt->b[5];
+ pwd->b[4] = pwt->b[9];
+ pwd->b[6] = pwt->b[13];
+ pwd->b[8] = pws->b[1];
+ pwd->b[10] = pws->b[5];
+ pwd->b[12] = pws->b[9];
+ pwd->b[14] = pws->b[13];
+ pwd->b[1] = pwt->b[3];
+ pwd->b[5] = pwt->b[11];
+ pwd->b[9] = pws->b[3];
+ pwd->b[13] = pws->b[11];
+ pwd->b[3] = pwt->b[7];
+ pwd->b[11] = pws->b[7];
+ pwd->b[7] = pwt->b[15];
+ pwd->b[15] = pws->b[15];
+#endif
+
+}
+
+void helper_msa_pckod_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->h[3] = pwt->h[2];
+ pwd->h[1] = pwt->h[6];
+ pwd->h[7] = pws->h[2];
+ pwd->h[5] = pws->h[6];
+ pwd->h[2] = pwt->h[0];
+ pwd->h[6] = pws->h[0];
+ pwd->h[0] = pwt->h[4];
+ pwd->h[4] = pws->h[4];
+#else
+ pwd->h[0] = pwt->h[1];
+ pwd->h[2] = pwt->h[5];
+ pwd->h[4] = pws->h[1];
+ pwd->h[6] = pws->h[5];
+ pwd->h[1] = pwt->h[3];
+ pwd->h[5] = pws->h[3];
+ pwd->h[3] = pwt->h[7];
+ pwd->h[7] = pws->h[7];
+#endif
+}
+
+void helper_msa_pckod_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ pwd->w[1] = pwt->w[0];
+ pwd->w[3] = pws->w[0];
+ pwd->w[0] = pwt->w[2];
+ pwd->w[2] = pws->w[2];
+#else
+ pwd->w[0] = pwt->w[1];
+ pwd->w[2] = pws->w[1];
+ pwd->w[1] = pwt->w[3];
+ pwd->w[3] = pws->w[3];
+#endif
+}
+
+void helper_msa_pckod_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = pwt->d[1];
+ pwd->d[1] = pws->d[1];
+}
+
+
+/*
+ * Shift
+ * -----
+ *
+ * +---------------+----------------------------------------------------------+
+ * | SLL.B | Vector Shift Left (byte) |
+ * | SLL.H | Vector Shift Left (halfword) |
+ * | SLL.W | Vector Shift Left (word) |
+ * | SLL.D | Vector Shift Left (doubleword) |
+ * | SRA.B | Vector Shift Right Arithmetic (byte) |
+ * | SRA.H | Vector Shift Right Arithmetic (halfword) |
+ * | SRA.W | Vector Shift Right Arithmetic (word) |
+ * | SRA.D | Vector Shift Right Arithmetic (doubleword) |
+ * | SRAR.B | Vector Shift Right Arithmetic Rounded (byte) |
+ * | SRAR.H | Vector Shift Right Arithmetic Rounded (halfword) |
+ * | SRAR.W | Vector Shift Right Arithmetic Rounded (word) |
+ * | SRAR.D | Vector Shift Right Arithmetic Rounded (doubleword) |
+ * | SRL.B | Vector Shift Right Logical (byte) |
+ * | SRL.H | Vector Shift Right Logical (halfword) |
+ * | SRL.W | Vector Shift Right Logical (word) |
+ * | SRL.D | Vector Shift Right Logical (doubleword) |
+ * | SRLR.B | Vector Shift Right Logical Rounded (byte) |
+ * | SRLR.H | Vector Shift Right Logical Rounded (halfword) |
+ * | SRLR.W | Vector Shift Right Logical Rounded (word) |
+ * | SRLR.D | Vector Shift Right Logical Rounded (doubleword) |
+ * +---------------+----------------------------------------------------------+
+ */
+
+
+static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return arg1 << b_arg2;
+}
+
+void helper_msa_sll_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_sll_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_sll_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_sll_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_sll_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_sll_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_sll_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_sll_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_sll_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_sll_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_sll_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_sll_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_sll_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_sll_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_sll_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_sll_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_sll_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_sll_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_sll_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_sll_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_sll_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_sll_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_sll_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_sll_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_sll_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_sll_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_sll_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_sll_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_sll_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_sll_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_sll_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_sll_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_sll_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_sll_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return arg1 >> b_arg2;
+}
+
+void helper_msa_sra_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_sra_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_sra_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_sra_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_sra_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_sra_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_sra_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_sra_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_sra_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_sra_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_sra_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_sra_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_sra_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_sra_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_sra_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_sra_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_sra_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_sra_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_sra_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_sra_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_sra_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_sra_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_sra_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_sra_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_sra_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_sra_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_sra_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_sra_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_sra_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_sra_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_sra_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_sra_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_sra_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_sra_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ if (b_arg2 == 0) {
+ return arg1;
+ } else {
+ int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1;
+ return (arg1 >> b_arg2) + r_bit;
+ }
+}
+
+void helper_msa_srar_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_srar_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_srar_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_srar_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_srar_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_srar_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_srar_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_srar_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_srar_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_srar_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_srar_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_srar_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_srar_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_srar_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_srar_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_srar_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_srar_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_srar_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_srar_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_srar_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_srar_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_srar_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_srar_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_srar_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_srar_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_srar_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_srar_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_srar_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_srar_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_srar_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_srar_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_srar_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_srar_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_srar_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return u_arg1 >> b_arg2;
+}
+
+void helper_msa_srl_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_srl_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_srl_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_srl_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_srl_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_srl_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_srl_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_srl_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_srl_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_srl_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_srl_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_srl_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_srl_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_srl_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_srl_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_srl_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_srl_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_srl_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_srl_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_srl_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_srl_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_srl_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_srl_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_srl_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_srl_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_srl_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_srl_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_srl_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_srl_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_srl_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_srl_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_srl_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_srl_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_srl_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ if (b_arg2 == 0) {
+ return u_arg1;
+ } else {
+ uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1;
+ return (u_arg1 >> b_arg2) + r_bit;
+ }
+}
+
+void helper_msa_srlr_b(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->b[0] = msa_srlr_df(DF_BYTE, pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_srlr_df(DF_BYTE, pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_srlr_df(DF_BYTE, pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_srlr_df(DF_BYTE, pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_srlr_df(DF_BYTE, pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_srlr_df(DF_BYTE, pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_srlr_df(DF_BYTE, pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_srlr_df(DF_BYTE, pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_srlr_df(DF_BYTE, pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_srlr_df(DF_BYTE, pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_srlr_df(DF_BYTE, pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_srlr_df(DF_BYTE, pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_srlr_df(DF_BYTE, pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_srlr_df(DF_BYTE, pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_srlr_df(DF_BYTE, pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_srlr_df(DF_BYTE, pws->b[15], pwt->b[15]);
+}
+
+void helper_msa_srlr_h(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->h[0] = msa_srlr_df(DF_HALF, pws->h[0], pwt->h[0]);
+ pwd->h[1] = msa_srlr_df(DF_HALF, pws->h[1], pwt->h[1]);
+ pwd->h[2] = msa_srlr_df(DF_HALF, pws->h[2], pwt->h[2]);
+ pwd->h[3] = msa_srlr_df(DF_HALF, pws->h[3], pwt->h[3]);
+ pwd->h[4] = msa_srlr_df(DF_HALF, pws->h[4], pwt->h[4]);
+ pwd->h[5] = msa_srlr_df(DF_HALF, pws->h[5], pwt->h[5]);
+ pwd->h[6] = msa_srlr_df(DF_HALF, pws->h[6], pwt->h[6]);
+ pwd->h[7] = msa_srlr_df(DF_HALF, pws->h[7], pwt->h[7]);
+}
+
+void helper_msa_srlr_w(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->w[0] = msa_srlr_df(DF_WORD, pws->w[0], pwt->w[0]);
+ pwd->w[1] = msa_srlr_df(DF_WORD, pws->w[1], pwt->w[1]);
+ pwd->w[2] = msa_srlr_df(DF_WORD, pws->w[2], pwt->w[2]);
+ pwd->w[3] = msa_srlr_df(DF_WORD, pws->w[3], pwt->w[3]);
+}
+
+void helper_msa_srlr_d(CPUMIPSState *env,
+ uint32_t wd, uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ pwd->d[0] = msa_srlr_df(DF_DOUBLE, pws->d[0], pwt->d[0]);
+ pwd->d[1] = msa_srlr_df(DF_DOUBLE, pws->d[1], pwt->d[1]);
+}
+
+
+#define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
+void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
+ uint32_t i8) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ DEST = OPERATION; \
+ } \
+}
+
+MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8)
+MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8)
+MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8))
+MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8)
+
+#define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
+ UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
+MSA_FN_IMM8(bmnzi_b, pwd->b[i],
+ BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
+
+#define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
+ UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
+MSA_FN_IMM8(bmzi_b, pwd->b[i],
+ BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
+
+#define BIT_SELECT(dest, arg1, arg2, df) \
+ UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
+MSA_FN_IMM8(bseli_b, pwd->b[i],
+ BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE))
+
+#undef BIT_SELECT
+#undef BIT_MOVE_IF_ZERO
+#undef BIT_MOVE_IF_NOT_ZERO
+#undef MSA_FN_IMM8
+
+#define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
+
+void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t imm)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwx->b[i] = pws->b[SHF_POS(i, imm)];
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwx->h[i] = pws->h[SHF_POS(i, imm)];
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwx->w[i] = pws->w[SHF_POS(i, imm)];
+ }
+ break;
+ default:
+ assert(0);
+ }
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_BINOP_IMM_DF(helper, func) \
+void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws, int32_t u5) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_BINOP_IMM_DF(addvi, addv)
+MSA_BINOP_IMM_DF(subvi, subv)
+MSA_BINOP_IMM_DF(ceqi, ceq)
+MSA_BINOP_IMM_DF(clei_s, cle_s)
+MSA_BINOP_IMM_DF(clei_u, cle_u)
+MSA_BINOP_IMM_DF(clti_s, clt_s)
+MSA_BINOP_IMM_DF(clti_u, clt_u)
+MSA_BINOP_IMM_DF(maxi_s, max_s)
+MSA_BINOP_IMM_DF(maxi_u, max_u)
+MSA_BINOP_IMM_DF(mini_s, min_s)
+MSA_BINOP_IMM_DF(mini_u, min_u)
+#undef MSA_BINOP_IMM_DF
+
+void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ int32_t s10)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwd->b[i] = (int8_t)s10;
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwd->h[i] = (int16_t)s10;
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwd->w[i] = (int32_t)s10;
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = (int64_t)s10;
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m)
+{
+ return arg < M_MIN_INT(m + 1) ? M_MIN_INT(m + 1) :
+ arg > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) :
+ arg;
+}
+
+static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m)
+{
+ uint64_t u_arg = UNSIGNED(arg, df);
+ return u_arg < M_MAX_UINT(m + 1) ? u_arg :
+ M_MAX_UINT(m + 1);
+}
+
+#define MSA_BINOP_IMMU_DF(helper, func) \
+void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
+ uint32_t ws, uint32_t u5) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_BINOP_IMMU_DF(slli, sll)
+MSA_BINOP_IMMU_DF(srai, sra)
+MSA_BINOP_IMMU_DF(srli, srl)
+MSA_BINOP_IMMU_DF(bclri, bclr)
+MSA_BINOP_IMMU_DF(bseti, bset)
+MSA_BINOP_IMMU_DF(bnegi, bneg)
+MSA_BINOP_IMMU_DF(sat_s, sat_s)
+MSA_BINOP_IMMU_DF(sat_u, sat_u)
+MSA_BINOP_IMMU_DF(srari, srar)
+MSA_BINOP_IMMU_DF(srlri, srlr)
+#undef MSA_BINOP_IMMU_DF
+
+#define MSA_TEROP_IMMU_DF(helper, func) \
+void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws, uint32_t u5) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
+ u5); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
+ u5); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
+ u5); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
+ u5); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_TEROP_IMMU_DF(binsli, binsl)
+MSA_TEROP_IMMU_DF(binsri, binsr)
+#undef MSA_TEROP_IMMU_DF
+
+#define CONCATENATE_AND_SLIDE(s, k) \
+ do { \
+ for (i = 0; i < s; i++) { \
+ v[i] = pws->b[s * k + i]; \
+ v[i + s] = pwd->b[s * k + i]; \
+ } \
+ for (i = 0; i < s; i++) { \
+ pwd->b[s * k + i] = v[i + n]; \
+ } \
+ } while (0)
+
+static inline void msa_sld_df(uint32_t df, wr_t *pwd,
+ wr_t *pws, target_ulong rt)
+{
+ uint32_t n = rt % DF_ELEMENTS(df);
+ uint8_t v[64];
+ uint32_t i, k;
+
+ switch (df) {
+ case DF_BYTE:
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0);
+ break;
+ case DF_HALF:
+ for (k = 0; k < 2; k++) {
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k);
+ }
+ break;
+ case DF_WORD:
+ for (k = 0; k < 4; k++) {
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k);
+ }
+ break;
+ case DF_DOUBLE:
+ for (k = 0; k < 8; k++) {
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t q_max = DF_MAX_INT(df);
+
+ if (arg1 == q_min && arg2 == q_min) {
+ return q_max;
+ }
+ return (arg1 * arg2) >> (DF_BITS(df) - 1);
+}
+
+static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t r_bit = 1 << (DF_BITS(df) - 2);
+
+ if (arg1 == q_min && arg2 == q_min) {
+ return q_max;
+ }
+ return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1);
+}
+
+#define MSA_BINOP_DF(func) \
+void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws, uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ pwd->b[0] = msa_ ## func ## _df(df, pws->b[0], pwt->b[0]); \
+ pwd->b[1] = msa_ ## func ## _df(df, pws->b[1], pwt->b[1]); \
+ pwd->b[2] = msa_ ## func ## _df(df, pws->b[2], pwt->b[2]); \
+ pwd->b[3] = msa_ ## func ## _df(df, pws->b[3], pwt->b[3]); \
+ pwd->b[4] = msa_ ## func ## _df(df, pws->b[4], pwt->b[4]); \
+ pwd->b[5] = msa_ ## func ## _df(df, pws->b[5], pwt->b[5]); \
+ pwd->b[6] = msa_ ## func ## _df(df, pws->b[6], pwt->b[6]); \
+ pwd->b[7] = msa_ ## func ## _df(df, pws->b[7], pwt->b[7]); \
+ pwd->b[8] = msa_ ## func ## _df(df, pws->b[8], pwt->b[8]); \
+ pwd->b[9] = msa_ ## func ## _df(df, pws->b[9], pwt->b[9]); \
+ pwd->b[10] = msa_ ## func ## _df(df, pws->b[10], pwt->b[10]); \
+ pwd->b[11] = msa_ ## func ## _df(df, pws->b[11], pwt->b[11]); \
+ pwd->b[12] = msa_ ## func ## _df(df, pws->b[12], pwt->b[12]); \
+ pwd->b[13] = msa_ ## func ## _df(df, pws->b[13], pwt->b[13]); \
+ pwd->b[14] = msa_ ## func ## _df(df, pws->b[14], pwt->b[14]); \
+ pwd->b[15] = msa_ ## func ## _df(df, pws->b[15], pwt->b[15]); \
+ break; \
+ case DF_HALF: \
+ pwd->h[0] = msa_ ## func ## _df(df, pws->h[0], pwt->h[0]); \
+ pwd->h[1] = msa_ ## func ## _df(df, pws->h[1], pwt->h[1]); \
+ pwd->h[2] = msa_ ## func ## _df(df, pws->h[2], pwt->h[2]); \
+ pwd->h[3] = msa_ ## func ## _df(df, pws->h[3], pwt->h[3]); \
+ pwd->h[4] = msa_ ## func ## _df(df, pws->h[4], pwt->h[4]); \
+ pwd->h[5] = msa_ ## func ## _df(df, pws->h[5], pwt->h[5]); \
+ pwd->h[6] = msa_ ## func ## _df(df, pws->h[6], pwt->h[6]); \
+ pwd->h[7] = msa_ ## func ## _df(df, pws->h[7], pwt->h[7]); \
+ break; \
+ case DF_WORD: \
+ pwd->w[0] = msa_ ## func ## _df(df, pws->w[0], pwt->w[0]); \
+ pwd->w[1] = msa_ ## func ## _df(df, pws->w[1], pwt->w[1]); \
+ pwd->w[2] = msa_ ## func ## _df(df, pws->w[2], pwt->w[2]); \
+ pwd->w[3] = msa_ ## func ## _df(df, pws->w[3], pwt->w[3]); \
+ break; \
+ case DF_DOUBLE: \
+ pwd->d[0] = msa_ ## func ## _df(df, pws->d[0], pwt->d[0]); \
+ pwd->d[1] = msa_ ## func ## _df(df, pws->d[1], pwt->d[1]); \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_BINOP_DF(mul_q)
+MSA_BINOP_DF(mulr_q)
+#undef MSA_BINOP_DF
+
+void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t rt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]);
+}
+
+static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t r_bit = 1 << (DF_BITS(df) - 2);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t r_bit = 1 << (DF_BITS(df) - 2);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+#define MSA_TEROP_DF(func) \
+void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
+ uint32_t ws, uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ pwd->b[0] = msa_ ## func ## _df(df, pwd->b[0], pws->b[0], \
+ pwt->b[0]); \
+ pwd->b[1] = msa_ ## func ## _df(df, pwd->b[1], pws->b[1], \
+ pwt->b[1]); \
+ pwd->b[2] = msa_ ## func ## _df(df, pwd->b[2], pws->b[2], \
+ pwt->b[2]); \
+ pwd->b[3] = msa_ ## func ## _df(df, pwd->b[3], pws->b[3], \
+ pwt->b[3]); \
+ pwd->b[4] = msa_ ## func ## _df(df, pwd->b[4], pws->b[4], \
+ pwt->b[4]); \
+ pwd->b[5] = msa_ ## func ## _df(df, pwd->b[5], pws->b[5], \
+ pwt->b[5]); \
+ pwd->b[6] = msa_ ## func ## _df(df, pwd->b[6], pws->b[6], \
+ pwt->b[6]); \
+ pwd->b[7] = msa_ ## func ## _df(df, pwd->b[7], pws->b[7], \
+ pwt->b[7]); \
+ pwd->b[8] = msa_ ## func ## _df(df, pwd->b[8], pws->b[8], \
+ pwt->b[8]); \
+ pwd->b[9] = msa_ ## func ## _df(df, pwd->b[9], pws->b[9], \
+ pwt->b[9]); \
+ pwd->b[10] = msa_ ## func ## _df(df, pwd->b[10], pws->b[10], \
+ pwt->b[10]); \
+ pwd->b[11] = msa_ ## func ## _df(df, pwd->b[11], pws->b[11], \
+ pwt->b[11]); \
+ pwd->b[12] = msa_ ## func ## _df(df, pwd->b[12], pws->b[12], \
+ pwt->b[12]); \
+ pwd->b[13] = msa_ ## func ## _df(df, pwd->b[13], pws->b[13], \
+ pwt->b[13]); \
+ pwd->b[14] = msa_ ## func ## _df(df, pwd->b[14], pws->b[14], \
+ pwt->b[14]); \
+ pwd->b[15] = msa_ ## func ## _df(df, pwd->b[15], pws->b[15], \
+ pwt->b[15]); \
+ break; \
+ case DF_HALF: \
+ pwd->h[0] = msa_ ## func ## _df(df, pwd->h[0], pws->h[0], pwt->h[0]); \
+ pwd->h[1] = msa_ ## func ## _df(df, pwd->h[1], pws->h[1], pwt->h[1]); \
+ pwd->h[2] = msa_ ## func ## _df(df, pwd->h[2], pws->h[2], pwt->h[2]); \
+ pwd->h[3] = msa_ ## func ## _df(df, pwd->h[3], pws->h[3], pwt->h[3]); \
+ pwd->h[4] = msa_ ## func ## _df(df, pwd->h[4], pws->h[4], pwt->h[4]); \
+ pwd->h[5] = msa_ ## func ## _df(df, pwd->h[5], pws->h[5], pwt->h[5]); \
+ pwd->h[6] = msa_ ## func ## _df(df, pwd->h[6], pws->h[6], pwt->h[6]); \
+ pwd->h[7] = msa_ ## func ## _df(df, pwd->h[7], pws->h[7], pwt->h[7]); \
+ break; \
+ case DF_WORD: \
+ pwd->w[0] = msa_ ## func ## _df(df, pwd->w[0], pws->w[0], pwt->w[0]); \
+ pwd->w[1] = msa_ ## func ## _df(df, pwd->w[1], pws->w[1], pwt->w[1]); \
+ pwd->w[2] = msa_ ## func ## _df(df, pwd->w[2], pws->w[2], pwt->w[2]); \
+ pwd->w[3] = msa_ ## func ## _df(df, pwd->w[3], pws->w[3], pwt->w[3]); \
+ break; \
+ case DF_DOUBLE: \
+ pwd->d[0] = msa_ ## func ## _df(df, pwd->d[0], pws->d[0], pwt->d[0]); \
+ pwd->d[1] = msa_ ## func ## _df(df, pwd->d[1], pws->d[1], pwt->d[1]); \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_TEROP_DF(binsl)
+MSA_TEROP_DF(binsr)
+MSA_TEROP_DF(madd_q)
+MSA_TEROP_DF(msub_q)
+MSA_TEROP_DF(maddr_q)
+MSA_TEROP_DF(msubr_q)
+#undef MSA_TEROP_DF
+
+static inline void msa_splat_df(uint32_t df, wr_t *pwd,
+ wr_t *pws, target_ulong rt)
+{
+ uint32_t n = rt % DF_ELEMENTS(df);
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwd->b[i] = pws->b[n];
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwd->h[i] = pws->h[n];
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwd->w[i] = pws->w[n];
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = pws->d[n];
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t rt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]);
+}
+
+#define MSA_DO_B MSA_DO(b)
+#define MSA_DO_H MSA_DO(h)
+#define MSA_DO_W MSA_DO(w)
+#define MSA_DO_D MSA_DO(d)
+
+#define MSA_LOOP_B MSA_LOOP(B)
+#define MSA_LOOP_H MSA_LOOP(H)
+#define MSA_LOOP_W MSA_LOOP(W)
+#define MSA_LOOP_D MSA_LOOP(D)
+
+#define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
+#define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
+#define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
+#define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
+
+#define MSA_LOOP(DF) \
+ do { \
+ for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
+ MSA_DO_ ## DF; \
+ } \
+ } while (0)
+
+#define MSA_FN_DF(FUNC) \
+void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
+ uint32_t ws, uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ wr_t wx, *pwx = &wx; \
+ uint32_t i; \
+ switch (df) { \
+ case DF_BYTE: \
+ MSA_LOOP_B; \
+ break; \
+ case DF_HALF: \
+ MSA_LOOP_H; \
+ break; \
+ case DF_WORD: \
+ MSA_LOOP_W; \
+ break; \
+ case DF_DOUBLE: \
+ MSA_LOOP_D; \
+ break; \
+ default: \
+ assert(0); \
+ } \
+ msa_move_v(pwd, pwx); \
+}
+
+#define MSA_LOOP_COND(DF) \
+ (DF_ELEMENTS(DF) / 2)
+
+#define Rb(pwr, i) (pwr->b[i])
+#define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE) / 2])
+#define Rh(pwr, i) (pwr->h[i])
+#define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF) / 2])
+#define Rw(pwr, i) (pwr->w[i])
+#define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD) / 2])
+#define Rd(pwr, i) (pwr->d[i])
+#define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE) / 2])
+
+#undef MSA_LOOP_COND
+
+#define MSA_LOOP_COND(DF) \
+ (DF_ELEMENTS(DF))
+
+#define MSA_DO(DF) \
+ do { \
+ uint32_t n = DF_ELEMENTS(df); \
+ uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
+ pwx->DF[i] = \
+ (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
+ } while (0)
+MSA_FN_DF(vshf_df)
+#undef MSA_DO
+#undef MSA_LOOP_COND
+#undef MSA_FN_DF
+
+
+void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_sld_df(df, pwd, pws, n);
+}
+
+void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_splat_df(df, pwd, pws, n);
+}
+
+void helper_msa_copy_s_b(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 16;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 8) {
+ n = 8 - n - 1;
+ } else {
+ n = 24 - n - 1;
+ }
+#endif
+ env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n];
+}
+
+void helper_msa_copy_s_h(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 8;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 4) {
+ n = 4 - n - 1;
+ } else {
+ n = 12 - n - 1;
+ }
+#endif
+ env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n];
+}
+
+void helper_msa_copy_s_w(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 4;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 2) {
+ n = 2 - n - 1;
+ } else {
+ n = 6 - n - 1;
+ }
+#endif
+ env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n];
+}
+
+void helper_msa_copy_s_d(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 2;
+ env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n];
+}
+
+void helper_msa_copy_u_b(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 16;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 8) {
+ n = 8 - n - 1;
+ } else {
+ n = 24 - n - 1;
+ }
+#endif
+ env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n];
+}
+
+void helper_msa_copy_u_h(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 8;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 4) {
+ n = 4 - n - 1;
+ } else {
+ n = 12 - n - 1;
+ }
+#endif
+ env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n];
+}
+
+void helper_msa_copy_u_w(CPUMIPSState *env, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= 4;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 2) {
+ n = 2 - n - 1;
+ } else {
+ n = 6 - n - 1;
+ }
+#endif
+ env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n];
+}
+
+void helper_msa_insert_b(CPUMIPSState *env, uint32_t wd,
+ uint32_t rs_num, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ target_ulong rs = env->active_tc.gpr[rs_num];
+ n %= 16;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 8) {
+ n = 8 - n - 1;
+ } else {
+ n = 24 - n - 1;
+ }
+#endif
+ pwd->b[n] = (int8_t)rs;
+}
+
+void helper_msa_insert_h(CPUMIPSState *env, uint32_t wd,
+ uint32_t rs_num, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ target_ulong rs = env->active_tc.gpr[rs_num];
+ n %= 8;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 4) {
+ n = 4 - n - 1;
+ } else {
+ n = 12 - n - 1;
+ }
+#endif
+ pwd->h[n] = (int16_t)rs;
+}
+
+void helper_msa_insert_w(CPUMIPSState *env, uint32_t wd,
+ uint32_t rs_num, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ target_ulong rs = env->active_tc.gpr[rs_num];
+ n %= 4;
+#if defined(HOST_WORDS_BIGENDIAN)
+ if (n < 2) {
+ n = 2 - n - 1;
+ } else {
+ n = 6 - n - 1;
+ }
+#endif
+ pwd->w[n] = (int32_t)rs;
+}
+
+void helper_msa_insert_d(CPUMIPSState *env, uint32_t wd,
+ uint32_t rs_num, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ target_ulong rs = env->active_tc.gpr[rs_num];
+ n %= 2;
+ pwd->d[n] = (int64_t)rs;
+}
+
+void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ switch (df) {
+ case DF_BYTE:
+ pwd->b[n] = (int8_t)pws->b[0];
+ break;
+ case DF_HALF:
+ pwd->h[n] = (int16_t)pws->h[0];
+ break;
+ case DF_WORD:
+ pwd->w[n] = (int32_t)pws->w[0];
+ break;
+ case DF_DOUBLE:
+ pwd->d[n] = (int64_t)pws->d[0];
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd)
+{
+ switch (cd) {
+ case 0:
+ break;
+ case 1:
+ env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK;
+ restore_msa_fp_status(env);
+ /* check exception */
+ if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)
+ & GET_FP_CAUSE(env->active_tc.msacsr)) {
+ do_raise_exception(env, EXCP_MSAFPE, GETPC());
+ }
+ break;
+ }
+}
+
+target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs)
+{
+ switch (cs) {
+ case 0:
+ return env->msair;
+ case 1:
+ return env->active_tc.msacsr & MSACSR_MASK;
+ }
+ return 0;
+}
+
+void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t rs)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwd->b[i] = (int8_t)env->active_tc.gpr[rs];
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwd->h[i] = (int16_t)env->active_tc.gpr[rs];
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwd->w[i] = (int32_t)env->active_tc.gpr[rs];
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = (int64_t)env->active_tc.gpr[rs];
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+#define FLOAT_ONE32 make_float32(0x3f8 << 20)
+#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
+
+#define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220)
+ /* 0x7c20 */
+#define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020)
+ /* 0x7f800020 */
+#define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL)
+ /* 0x7ff0000000000020 */
+
+static inline void clear_msacsr_cause(CPUMIPSState *env)
+{
+ SET_FP_CAUSE(env->active_tc.msacsr, 0);
+}
+
+static inline void check_msacsr_cause(CPUMIPSState *env, uintptr_t retaddr)
+{
+ if ((GET_FP_CAUSE(env->active_tc.msacsr) &
+ (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) {
+ UPDATE_FP_FLAGS(env->active_tc.msacsr,
+ GET_FP_CAUSE(env->active_tc.msacsr));
+ } else {
+ do_raise_exception(env, EXCP_MSAFPE, retaddr);
+ }
+}
+
+/* Flush-to-zero use cases for update_msacsr() */
+#define CLEAR_FS_UNDERFLOW 1
+#define CLEAR_IS_INEXACT 2
+#define RECIPROCAL_INEXACT 4
+
+
+static inline int ieee_to_mips_xcpt_msa(int ieee_xcpt)
+{
+ int mips_xcpt = 0;
+
+ if (ieee_xcpt & float_flag_invalid) {
+ mips_xcpt |= FP_INVALID;
+ }
+ if (ieee_xcpt & float_flag_overflow) {
+ mips_xcpt |= FP_OVERFLOW;
+ }
+ if (ieee_xcpt & float_flag_underflow) {
+ mips_xcpt |= FP_UNDERFLOW;
+ }
+ if (ieee_xcpt & float_flag_divbyzero) {
+ mips_xcpt |= FP_DIV0;
+ }
+ if (ieee_xcpt & float_flag_inexact) {
+ mips_xcpt |= FP_INEXACT;
+ }
+
+ return mips_xcpt;
+}
+
+static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
+{
+ int ieee_exception_flags;
+ int mips_exception_flags = 0;
+ int cause;
+ int enable;
+
+ ieee_exception_flags = get_float_exception_flags(
+ &env->active_tc.msa_fp_status);
+
+ /* QEMU softfloat does not signal all underflow cases */
+ if (denormal) {
+ ieee_exception_flags |= float_flag_underflow;
+ }
+ if (ieee_exception_flags) {
+ mips_exception_flags = ieee_to_mips_xcpt_msa(ieee_exception_flags);
+ }
+ enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
+
+ /* Set Inexact (I) when flushing inputs to zero */
+ if ((ieee_exception_flags & float_flag_input_denormal) &&
+ (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
+ if (action & CLEAR_IS_INEXACT) {
+ mips_exception_flags &= ~FP_INEXACT;
+ } else {
+ mips_exception_flags |= FP_INEXACT;
+ }
+ }
+
+ /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
+ if ((ieee_exception_flags & float_flag_output_denormal) &&
+ (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
+ mips_exception_flags |= FP_INEXACT;
+ if (action & CLEAR_FS_UNDERFLOW) {
+ mips_exception_flags &= ~FP_UNDERFLOW;
+ } else {
+ mips_exception_flags |= FP_UNDERFLOW;
+ }
+ }
+
+ /* Set Inexact (I) when Overflow (O) is not enabled */
+ if ((mips_exception_flags & FP_OVERFLOW) != 0 &&
+ (enable & FP_OVERFLOW) == 0) {
+ mips_exception_flags |= FP_INEXACT;
+ }
+
+ /* Clear Exact Underflow when Underflow (U) is not enabled */
+ if ((mips_exception_flags & FP_UNDERFLOW) != 0 &&
+ (enable & FP_UNDERFLOW) == 0 &&
+ (mips_exception_flags & FP_INEXACT) == 0) {
+ mips_exception_flags &= ~FP_UNDERFLOW;
+ }
+
+ /*
+ * Reciprocal operations set only Inexact when valid and not
+ * divide by zero
+ */
+ if ((action & RECIPROCAL_INEXACT) &&
+ (mips_exception_flags & (FP_INVALID | FP_DIV0)) == 0) {
+ mips_exception_flags = FP_INEXACT;
+ }
+
+ cause = mips_exception_flags & enable; /* all current enabled exceptions */
+
+ if (cause == 0) {
+ /*
+ * No enabled exception, update the MSACSR Cause
+ * with all current exceptions
+ */
+ SET_FP_CAUSE(env->active_tc.msacsr,
+ (GET_FP_CAUSE(env->active_tc.msacsr) | mips_exception_flags));
+ } else {
+ /* Current exceptions are enabled */
+ if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) {
+ /*
+ * Exception(s) will trap, update MSACSR Cause
+ * with all enabled exceptions
+ */
+ SET_FP_CAUSE(env->active_tc.msacsr,
+ (GET_FP_CAUSE(env->active_tc.msacsr) | mips_exception_flags));
+ }
+ }
+
+ return mips_exception_flags;
+}
+
+static inline int get_enabled_exceptions(const CPUMIPSState *env, int c)
+{
+ int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
+ return c & enable;
+}
+
+static inline float16 float16_from_float32(int32_t a, bool ieee,
+ float_status *status)
+{
+ float16 f_val;
+
+ f_val = float32_to_float16((float32)a, ieee, status);
+
+ return a < 0 ? (f_val | (1 << 15)) : f_val;
+}
+
+static inline float32 float32_from_float64(int64_t a, float_status *status)
+{
+ float32 f_val;
+
+ f_val = float64_to_float32((float64)a, status);
+
+ return a < 0 ? (f_val | (1 << 31)) : f_val;
+}
+
+static inline float32 float32_from_float16(int16_t a, bool ieee,
+ float_status *status)
+{
+ float32 f_val;
+
+ f_val = float16_to_float32((float16)a, ieee, status);
+
+ return a < 0 ? (f_val | (1 << 31)) : f_val;
+}
+
+static inline float64 float64_from_float32(int32_t a, float_status *status)
+{
+ float64 f_val;
+
+ f_val = float32_to_float64((float64)a, status);
+
+ return a < 0 ? (f_val | (1ULL << 63)) : f_val;
+}
+
+static inline float32 float32_from_q16(int16_t a, float_status *status)
+{
+ float32 f_val;
+
+ /* conversion as integer and scaling */
+ f_val = int32_to_float32(a, status);
+ f_val = float32_scalbn(f_val, -15, status);
+
+ return f_val;
+}
+
+static inline float64 float64_from_q32(int32_t a, float_status *status)
+{
+ float64 f_val;
+
+ /* conversion as integer and scaling */
+ f_val = int32_to_float64(a, status);
+ f_val = float64_scalbn(f_val, -31, status);
+
+ return f_val;
+}
+
+static inline int16_t float32_to_q16(float32 a, float_status *status)
+{
+ int32_t q_val;
+ int32_t q_min = 0xffff8000;
+ int32_t q_max = 0x00007fff;
+
+ int ieee_ex;
+
+ if (float32_is_any_nan(a)) {
+ float_raise(float_flag_invalid, status);
+ return 0;
+ }
+
+ /* scaling */
+ a = float32_scalbn(a, 15, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_overflow) {
+ float_raise(float_flag_inexact, status);
+ return (int32_t)a < 0 ? q_min : q_max;
+ }
+
+ /* conversion to int */
+ q_val = float32_to_int32(a, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_invalid) {
+ set_float_exception_flags(ieee_ex & (~float_flag_invalid)
+ , status);
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int32_t)a < 0 ? q_min : q_max;
+ }
+
+ if (q_val < q_min) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int16_t)q_min;
+ }
+
+ if (q_max < q_val) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int16_t)q_max;
+ }
+
+ return (int16_t)q_val;
+}
+
+static inline int32_t float64_to_q32(float64 a, float_status *status)
+{
+ int64_t q_val;
+ int64_t q_min = 0xffffffff80000000LL;
+ int64_t q_max = 0x000000007fffffffLL;
+
+ int ieee_ex;
+
+ if (float64_is_any_nan(a)) {
+ float_raise(float_flag_invalid, status);
+ return 0;
+ }
+
+ /* scaling */
+ a = float64_scalbn(a, 31, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_overflow) {
+ float_raise(float_flag_inexact, status);
+ return (int64_t)a < 0 ? q_min : q_max;
+ }
+
+ /* conversion to integer */
+ q_val = float64_to_int64(a, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_invalid) {
+ set_float_exception_flags(ieee_ex & (~float_flag_invalid)
+ , status);
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int64_t)a < 0 ? q_min : q_max;
+ }
+
+ if (q_val < q_min) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int32_t)q_min;
+ }
+
+ if (q_max < q_val) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int32_t)q_max;
+ }
+
+ return (int32_t)q_val;
+}
+
+#define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ int64_t cond; \
+ set_float_exception_flags(0, status); \
+ if (!QUIET) { \
+ cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ } else { \
+ cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \
+ } \
+ DEST = cond ? M_MAX_UINT(BITS) : 0; \
+ c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
+ if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
+ DEST = 0; \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
+ } \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
+ } \
+ } while (0)
+
+static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32,
+ quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64,
+ quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_af(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_un(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_eq(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ueq(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_lt(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ult(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_le(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ule(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_af(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_un(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_eq(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ueq(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_lt(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ult(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_le(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ule(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_or(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_une(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ne(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_or(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_une(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ne(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+#define float16_is_zero(ARG) 0
+#define float16_is_zero_or_denormal(ARG) 0
+
+#define IS_DENORMAL(ARG, BITS) \
+ (!float ## BITS ## _is_zero(ARG) \
+ && float ## BITS ## _is_zero_or_denormal(ARG))
+
+#define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
+ pws->w[i], pwt->w[i], 0, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
+ pws->d[i], pwt->d[i], 0, 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
+ pws->w[i], pwt->w[i],
+ float_muladd_negate_product, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
+ pws->d[i], pwt->d[i],
+ float_muladd_negate_product, 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i],
+ pwt->w[i] > 0x200 ? 0x200 :
+ pwt->w[i] < -0x200 ? -0x200 : pwt->w[i],
+ 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i],
+ pwt->d[i] > 0x1000 ? 0x1000 :
+ pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i],
+ 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG, status); \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ /*
+ * Half precision floats come in two formats: standard
+ * IEEE and "ARM" format. The latter gains extra exponent
+ * range by omitting the NaN/Inf encodings.
+ */
+ bool ieee = true;
+
+ MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16);
+ MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32);
+ MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG, status); \
+ c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16);
+ MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32);
+ MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \
+ !float ## BITS ## _is_any_nan(ARG1) \
+ && float ## BITS ## _is_quiet_nan(ARG2, STATUS)
+
+#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ c = update_msacsr(env, 0, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+#define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \
+ do { \
+ uint## BITS ##_t S = _S, T = _T; \
+ uint## BITS ##_t as, at, xs, xt, xd; \
+ if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \
+ T = S; \
+ } \
+ else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \
+ S = T; \
+ } \
+ as = float## BITS ##_abs(S); \
+ at = float## BITS ##_abs(T); \
+ MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
+ MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
+ MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
+ X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
+ } while (0)
+
+void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ clear_msacsr_cause(env);
+
+ if (df == DF_WORD) {
+
+ if (NUMBER_QNAN_PAIR(pws->w[0], pwt->w[0], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[0], min, pws->w[0], pws->w[0], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[0], pws->w[0], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[0], min, pwt->w[0], pwt->w[0], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[0], min, pws->w[0], pwt->w[0], 32);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->w[1], pwt->w[1], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[1], min, pws->w[1], pws->w[1], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[1], pws->w[1], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[1], min, pwt->w[1], pwt->w[1], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[1], min, pws->w[1], pwt->w[1], 32);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->w[2], pwt->w[2], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[2], min, pws->w[2], pws->w[2], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[2], pws->w[2], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[2], min, pwt->w[2], pwt->w[2], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[2], min, pws->w[2], pwt->w[2], 32);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->w[3], pwt->w[3], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[3], min, pws->w[3], pws->w[3], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[3], pws->w[3], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[3], min, pwt->w[3], pwt->w[3], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[3], min, pws->w[3], pwt->w[3], 32);
+ }
+
+ } else if (df == DF_DOUBLE) {
+
+ if (NUMBER_QNAN_PAIR(pws->d[0], pwt->d[0], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[0], min, pws->d[0], pws->d[0], 64);
+ } else if (NUMBER_QNAN_PAIR(pwt->d[0], pws->d[0], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[0], min, pwt->d[0], pwt->d[0], 64);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->d[0], min, pws->d[0], pwt->d[0], 64);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->d[1], pwt->d[1], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[1], min, pws->d[1], pws->d[1], 64);
+ } else if (NUMBER_QNAN_PAIR(pwt->d[1], pws->d[1], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[1], min, pwt->d[1], pwt->d[1], 64);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->d[1], min, pws->d[1], pwt->d[1], 64);
+ }
+
+ } else {
+
+ assert(0);
+
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ clear_msacsr_cause(env);
+
+ if (df == DF_WORD) {
+ FMAXMIN_A(min, max, pwx->w[0], pws->w[0], pwt->w[0], 32, status);
+ FMAXMIN_A(min, max, pwx->w[1], pws->w[1], pwt->w[1], 32, status);
+ FMAXMIN_A(min, max, pwx->w[2], pws->w[2], pwt->w[2], 32, status);
+ FMAXMIN_A(min, max, pwx->w[3], pws->w[3], pwt->w[3], 32, status);
+ } else if (df == DF_DOUBLE) {
+ FMAXMIN_A(min, max, pwx->d[0], pws->d[0], pwt->d[0], 64, status);
+ FMAXMIN_A(min, max, pwx->d[1], pws->d[1], pwt->d[1], 64, status);
+ } else {
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ clear_msacsr_cause(env);
+
+ if (df == DF_WORD) {
+
+ if (NUMBER_QNAN_PAIR(pws->w[0], pwt->w[0], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[0], max, pws->w[0], pws->w[0], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[0], pws->w[0], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[0], max, pwt->w[0], pwt->w[0], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[0], max, pws->w[0], pwt->w[0], 32);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->w[1], pwt->w[1], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[1], max, pws->w[1], pws->w[1], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[1], pws->w[1], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[1], max, pwt->w[1], pwt->w[1], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[1], max, pws->w[1], pwt->w[1], 32);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->w[2], pwt->w[2], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[2], max, pws->w[2], pws->w[2], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[2], pws->w[2], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[2], max, pwt->w[2], pwt->w[2], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[2], max, pws->w[2], pwt->w[2], 32);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->w[3], pwt->w[3], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[3], max, pws->w[3], pws->w[3], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[3], pws->w[3], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[3], max, pwt->w[3], pwt->w[3], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[3], max, pws->w[3], pwt->w[3], 32);
+ }
+
+ } else if (df == DF_DOUBLE) {
+
+ if (NUMBER_QNAN_PAIR(pws->d[0], pwt->d[0], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[0], max, pws->d[0], pws->d[0], 64);
+ } else if (NUMBER_QNAN_PAIR(pwt->d[0], pws->d[0], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[0], max, pwt->d[0], pwt->d[0], 64);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->d[0], max, pws->d[0], pwt->d[0], 64);
+ }
+
+ if (NUMBER_QNAN_PAIR(pws->d[1], pwt->d[1], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[1], max, pws->d[1], pws->d[1], 64);
+ } else if (NUMBER_QNAN_PAIR(pwt->d[1], pws->d[1], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[1], max, pwt->d[1], pwt->d[1], 64);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->d[1], max, pws->d[1], pwt->d[1], 64);
+ }
+
+ } else {
+
+ assert(0);
+
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+
+ clear_msacsr_cause(env);
+
+ if (df == DF_WORD) {
+ FMAXMIN_A(max, min, pwx->w[0], pws->w[0], pwt->w[0], 32, status);
+ FMAXMIN_A(max, min, pwx->w[1], pws->w[1], pwt->w[1], 32, status);
+ FMAXMIN_A(max, min, pwx->w[2], pws->w[2], pwt->w[2], 32, status);
+ FMAXMIN_A(max, min, pwx->w[3], pws->w[3], pwt->w[3], 32, status);
+ } else if (df == DF_DOUBLE) {
+ FMAXMIN_A(max, min, pwx->d[0], pws->d[0], pwt->d[0], 64, status);
+ FMAXMIN_A(max, min, pwx->d[1], pws->d[1], pwt->d[1], 64, status);
+ } else {
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
+ uint32_t wd, uint32_t ws)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ if (df == DF_WORD) {
+ pwd->w[0] = float_class_s(pws->w[0], status);
+ pwd->w[1] = float_class_s(pws->w[1], status);
+ pwd->w[2] = float_class_s(pws->w[2], status);
+ pwd->w[3] = float_class_s(pws->w[3], status);
+ } else if (df == DF_DOUBLE) {
+ pwd->d[0] = float_class_d(pws->d[0], status);
+ pwd->d[1] = float_class_d(pws->d[1], status);
+ } else {
+ assert(0);
+ }
+}
+
+#define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG, status); \
+ c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } else if (float ## BITS ## _is_any_nan(ARG)) { \
+ DEST = 0; \
+ } \
+ } while (0)
+
+void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
+ c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
+ float ## BITS ## _is_quiet_nan(DEST, status) ? \
+ 0 : RECIPROCAL_INEXACT, \
+ IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i],
+ &env->active_tc.msa_fp_status), 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i],
+ &env->active_tc.msa_fp_status), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ set_float_rounding_mode(float_round_down, status); \
+ DEST = float ## BITS ## _ ## log2(ARG, status); \
+ DEST = float ## BITS ## _ ## round_to_int(DEST, status); \
+ set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
+ MSACSR_RM_MASK) >> MSACSR_RM], \
+ status); \
+ \
+ set_float_exception_flags(get_float_exception_flags(status) & \
+ (~float_flag_inexact), \
+ status); \
+ \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ /*
+ * Half precision floats come in two formats: standard
+ * IEEE and "ARM" format. The latter gains extra exponent
+ * range by omitting the NaN/Inf encodings.
+ */
+ bool ieee = true;
+
+ MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ /*
+ * Half precision floats come in two formats: standard
+ * IEEE and "ARM" format. The latter gains extra exponent
+ * range by omitting the NaN/Inf encodings.
+ */
+ bool ieee = true;
+
+ MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define float32_from_int32 int32_to_float32
+#define float32_from_uint32 uint32_to_float32
+
+#define float64_from_int64 int64_to_float64
+#define float64_from_uint64 uint64_to_float64
+
+void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+/* Data format min and max values */
+#define DF_BITS(df) (1 << ((df) + 3))
+
+/* Element-by-element access macros */
+#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
+
+#if !defined(CONFIG_USER_ONLY)
+#define MEMOP_IDX(DF) \
+ MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
+ cpu_mmu_index(env, false));
+#else
+#define MEMOP_IDX(DF)
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+static inline uint64_t bswap16x4(uint64_t x)
+{
+ uint64_t m = 0x00ff00ff00ff00ffull;
+ return ((x & m) << 8) | ((x >> 8) & m);
+}
+
+static inline uint64_t bswap32x2(uint64_t x)
+{
+ return ror64(bswap64(x), 32);
+}
+#endif
+
+void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uintptr_t ra = GETPC();
+ uint64_t d0, d1;
+
+ /* Load 8 bytes at a time. Vector element ordering makes this LE. */
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
+ pwd->d[0] = d0;
+ pwd->d[1] = d1;
+}
+
+void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uintptr_t ra = GETPC();
+ uint64_t d0, d1;
+
+ /*
+ * Load 8 bytes at a time. Use little-endian load, then for
+ * big-endian target, we must then swap the four halfwords.
+ */
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
+#ifdef TARGET_WORDS_BIGENDIAN
+ d0 = bswap16x4(d0);
+ d1 = bswap16x4(d1);
+#endif
+ pwd->d[0] = d0;
+ pwd->d[1] = d1;
+}
+
+void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uintptr_t ra = GETPC();
+ uint64_t d0, d1;
+
+ /*
+ * Load 8 bytes at a time. Use little-endian load, then for
+ * big-endian target, we must then bswap the two words.
+ */
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
+#ifdef TARGET_WORDS_BIGENDIAN
+ d0 = bswap32x2(d0);
+ d1 = bswap32x2(d1);
+#endif
+ pwd->d[0] = d0;
+ pwd->d[1] = d1;
+}
+
+void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uintptr_t ra = GETPC();
+ uint64_t d0, d1;
+
+ d0 = cpu_ldq_data_ra(env, addr + 0, ra);
+ d1 = cpu_ldq_data_ra(env, addr + 8, ra);
+ pwd->d[0] = d0;
+ pwd->d[1] = d1;
+}
+
+#define MSA_PAGESPAN(x) \
+ ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE)
+
+static inline void ensure_writable_pages(CPUMIPSState *env,
+ target_ulong addr,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ /* FIXME: Probe the actual accesses (pass and use a size) */
+ if (unlikely(MSA_PAGESPAN(addr))) {
+ /* first page */
+ probe_write(env, addr, 0, mmu_idx, retaddr);
+ /* second page */
+ addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ probe_write(env, addr, 0, mmu_idx, retaddr);
+ }
+}
+
+void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+
+ ensure_writable_pages(env, addr, mmu_idx, ra);
+
+ /* Store 8 bytes at a time. Vector element ordering makes this LE. */
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra);
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra);
+}
+
+void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ uint64_t d0, d1;
+
+ ensure_writable_pages(env, addr, mmu_idx, ra);
+
+ /* Store 8 bytes at a time. See helper_msa_ld_h. */
+ d0 = pwd->d[0];
+ d1 = pwd->d[1];
+#ifdef TARGET_WORDS_BIGENDIAN
+ d0 = bswap16x4(d0);
+ d1 = bswap16x4(d1);
+#endif
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
+}
+
+void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ uint64_t d0, d1;
+
+ ensure_writable_pages(env, addr, mmu_idx, ra);
+
+ /* Store 8 bytes at a time. See helper_msa_ld_w. */
+ d0 = pwd->d[0];
+ d1 = pwd->d[1];
+#ifdef TARGET_WORDS_BIGENDIAN
+ d0 = bswap32x2(d0);
+ d1 = bswap32x2(d1);
+#endif
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
+}
+
+void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
+ target_ulong addr)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+
+ ensure_writable_pages(env, addr, mmu_idx, GETPC());
+
+ cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra);
+ cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra);
+}
diff --git a/target/mips/tcg/msa_helper.h.inc b/target/mips/tcg/msa_helper.h.inc
new file mode 100644
index 000000000..4963d1553
--- /dev/null
+++ b/target/mips/tcg/msa_helper.h.inc
@@ -0,0 +1,443 @@
+/*
+ * MIPS SIMD Architecture Module (MSA) helpers for QEMU.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+DEF_HELPER_3(msa_nloc_b, void, env, i32, i32)
+DEF_HELPER_3(msa_nloc_h, void, env, i32, i32)
+DEF_HELPER_3(msa_nloc_w, void, env, i32, i32)
+DEF_HELPER_3(msa_nloc_d, void, env, i32, i32)
+
+DEF_HELPER_3(msa_nlzc_b, void, env, i32, i32)
+DEF_HELPER_3(msa_nlzc_h, void, env, i32, i32)
+DEF_HELPER_3(msa_nlzc_w, void, env, i32, i32)
+DEF_HELPER_3(msa_nlzc_d, void, env, i32, i32)
+
+DEF_HELPER_3(msa_pcnt_b, void, env, i32, i32)
+DEF_HELPER_3(msa_pcnt_h, void, env, i32, i32)
+DEF_HELPER_3(msa_pcnt_w, void, env, i32, i32)
+DEF_HELPER_3(msa_pcnt_d, void, env, i32, i32)
+
+DEF_HELPER_4(msa_binsl_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_binsl_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_binsl_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_binsl_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_binsr_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_binsr_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_binsr_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_binsr_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_bmnz_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmz_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bsel_v, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_bclr_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bclr_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bclr_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bclr_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_bneg_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bneg_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bneg_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bneg_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_bset_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bset_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bset_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bset_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_add_a_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_add_a_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_add_a_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_add_a_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_adds_a_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_a_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_a_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_a_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_adds_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_adds_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_adds_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_addv_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_addv_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_addv_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_addv_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_hadd_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hadd_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hadd_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_hadd_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hadd_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hadd_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_ave_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ave_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ave_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ave_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_ave_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ave_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ave_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ave_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_aver_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_aver_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_aver_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_aver_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_aver_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_aver_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_aver_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_aver_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_ceq_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ceq_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ceq_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ceq_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_cle_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_cle_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_cle_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_cle_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_cle_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_cle_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_cle_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_cle_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_clt_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_clt_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_clt_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_clt_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_clt_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_clt_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_clt_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_clt_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_div_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_div_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_div_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_div_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_div_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_div_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_div_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_div_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_max_a_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_a_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_a_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_a_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_s_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_max_u_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_a_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_a_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_a_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_a_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_s_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_min_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_mod_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mod_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mod_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mod_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_mod_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mod_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mod_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mod_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_maddv_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_maddv_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_maddv_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_maddv_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_msubv_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_msubv_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_msubv_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_msubv_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_mulv_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mulv_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mulv_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_mulv_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_asub_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_asub_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_asub_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_asub_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_asub_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_asub_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_asub_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_asub_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_hsub_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hsub_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hsub_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_hsub_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hsub_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_hsub_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_subs_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subs_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subs_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subs_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_subs_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subs_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subs_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subs_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_subsus_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subsus_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subsus_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subsus_u_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_subsuu_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subsuu_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subsuu_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subsuu_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_subv_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subv_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subv_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_subv_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_ilvev_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvev_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvev_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvev_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvod_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvod_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvod_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvod_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvl_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvl_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvl_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvl_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvr_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvr_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvr_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ilvr_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_and_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_nor_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_or_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_xor_v, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_pckev_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckev_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckev_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckev_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckod_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckod_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckod_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pckod_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_sll_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_sll_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_sll_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_sll_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_sra_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_sra_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_sra_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_sra_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_srar_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srar_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srar_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srar_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_srl_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srl_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srl_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srl_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_srlr_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srlr_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srlr_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_srlr_d, void, env, i32, i32, i32)
+
+DEF_HELPER_3(msa_move_v, void, env, i32, i32)
+
+DEF_HELPER_4(msa_andi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ori_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_nori_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_xori_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmnzi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmzi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bseli_b, void, env, i32, i32, i32)
+DEF_HELPER_5(msa_shf_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_addvi_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_subvi_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_maxi_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_maxi_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_mini_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_mini_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_ceqi_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clti_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clti_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clei_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clei_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_4(msa_ldi_df, void, env, i32, i32, s32)
+
+DEF_HELPER_5(msa_slli_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srai_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srli_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bclri_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bseti_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bnegi_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsli_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsri_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_sat_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_sat_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srari_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srlri_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_binsl_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsr_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_4(msa_dotp_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dotp_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dotp_s_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dotp_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dotp_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dotp_u_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpadd_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpadd_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpadd_s_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpadd_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpadd_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpadd_u_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpsub_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpsub_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpsub_s_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpsub_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpsub_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_dpsub_u_d, void, env, i32, i32, i32)
+DEF_HELPER_5(msa_sld_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_splat_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_vshf_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_sldi_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_splati_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_insve_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_3(msa_ctcmsa, void, env, tl, i32)
+DEF_HELPER_2(msa_cfcmsa, tl, env, i32)
+
+DEF_HELPER_5(msa_fcaf_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcun_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fceq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcueq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fclt_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcult_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcle_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcule_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsaf_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsun_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fseq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsueq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fslt_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsult_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsle_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsule_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fadd_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsub_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmul_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fdiv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmadd_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmsub_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fexp2_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fexdo_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ftq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmin_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmin_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmax_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmax_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcor_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcune_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcne_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mul_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_madd_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_msub_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsor_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsune_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsne_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mulr_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_maddr_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_msubr_q_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_4(msa_fill_df, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_copy_s_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_copy_s_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_copy_s_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_copy_s_d, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_copy_u_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_copy_u_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_copy_u_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_insert_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_insert_h, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_insert_w, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_insert_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_fclass_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftrunc_s_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftrunc_u_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fsqrt_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_frsqrt_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_frcp_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_frint_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_flog2_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fexupl_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fexupr_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffql_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffqr_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftint_s_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftint_u_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffint_s_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffint_u_df, void, env, i32, i32, i32)
+
+#define MSALDST_PROTO(type) \
+DEF_HELPER_3(msa_ld_ ## type, void, env, i32, tl) \
+DEF_HELPER_3(msa_st_ ## type, void, env, i32, tl)
+MSALDST_PROTO(b)
+MSALDST_PROTO(h)
+MSALDST_PROTO(w)
+MSALDST_PROTO(d)
+#undef MSALDST_PROTO
diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c
new file mode 100644
index 000000000..7576b3ed8
--- /dev/null
+++ b/target/mips/tcg/msa_translate.c
@@ -0,0 +1,799 @@
+/*
+ * MIPS SIMD Architecture (MSA) translation routines
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ * Copyright (c) 2020 Philippe Mathieu-Daudé
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-gen.h"
+#include "translate.h"
+#include "fpu_helper.h"
+#include "internal.h"
+
+static int elm_n(DisasContext *ctx, int x);
+static int elm_df(DisasContext *ctx, int x);
+static int bit_m(DisasContext *ctx, int x);
+static int bit_df(DisasContext *ctx, int x);
+
+static inline int plus_1(DisasContext *s, int x)
+{
+ return x + 1;
+}
+
+static inline int plus_2(DisasContext *s, int x)
+{
+ return x + 2;
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-msa.c.inc"
+
+static const char msaregnames[][6] = {
+ "w0.d0", "w0.d1", "w1.d0", "w1.d1",
+ "w2.d0", "w2.d1", "w3.d0", "w3.d1",
+ "w4.d0", "w4.d1", "w5.d0", "w5.d1",
+ "w6.d0", "w6.d1", "w7.d0", "w7.d1",
+ "w8.d0", "w8.d1", "w9.d0", "w9.d1",
+ "w10.d0", "w10.d1", "w11.d0", "w11.d1",
+ "w12.d0", "w12.d1", "w13.d0", "w13.d1",
+ "w14.d0", "w14.d1", "w15.d0", "w15.d1",
+ "w16.d0", "w16.d1", "w17.d0", "w17.d1",
+ "w18.d0", "w18.d1", "w19.d0", "w19.d1",
+ "w20.d0", "w20.d1", "w21.d0", "w21.d1",
+ "w22.d0", "w22.d1", "w23.d0", "w23.d1",
+ "w24.d0", "w24.d1", "w25.d0", "w25.d1",
+ "w26.d0", "w26.d1", "w27.d0", "w27.d1",
+ "w28.d0", "w28.d1", "w29.d0", "w29.d1",
+ "w30.d0", "w30.d1", "w31.d0", "w31.d1",
+};
+
+/* Encoding of Operation Field (must be indexed by CPUMIPSMSADataFormat) */
+struct dfe {
+ int start;
+ int length;
+ uint32_t mask;
+};
+
+/*
+ * Extract immediate from df/{m,n} format (used by ELM & BIT instructions).
+ * Returns the immediate value, or -1 if the format does not match.
+ */
+static int df_extract_val(DisasContext *ctx, int x, const struct dfe *s)
+{
+ for (unsigned i = 0; i < 4; i++) {
+ if (extract32(x, s->start, s->length) == s->mask) {
+ return extract32(x, 0, s->start);
+ }
+ }
+ return -1;
+}
+
+/*
+ * Extract DataField from df/{m,n} format (used by ELM & BIT instructions).
+ * Returns the DataField, or -1 if the format does not match.
+ */
+static int df_extract_df(DisasContext *ctx, int x, const struct dfe *s)
+{
+ for (unsigned i = 0; i < 4; i++) {
+ if (extract32(x, s->start, s->length) == s->mask) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static const struct dfe df_elm[] = {
+ /* Table 3.26 ELM Instruction Format */
+ [DF_BYTE] = {4, 2, 0b00},
+ [DF_HALF] = {3, 3, 0b100},
+ [DF_WORD] = {2, 4, 0b1100},
+ [DF_DOUBLE] = {1, 5, 0b11100}
+};
+
+static int elm_n(DisasContext *ctx, int x)
+{
+ return df_extract_val(ctx, x, df_elm);
+}
+
+static int elm_df(DisasContext *ctx, int x)
+{
+ return df_extract_df(ctx, x, df_elm);
+}
+
+static const struct dfe df_bit[] = {
+ /* Table 3.28 BIT Instruction Format */
+ [DF_BYTE] = {3, 4, 0b1110},
+ [DF_HALF] = {4, 3, 0b110},
+ [DF_WORD] = {5, 2, 0b10},
+ [DF_DOUBLE] = {6, 1, 0b0}
+};
+
+static int bit_m(DisasContext *ctx, int x)
+{
+ return df_extract_val(ctx, x, df_bit);
+}
+
+static int bit_df(DisasContext *ctx, int x)
+{
+ return df_extract_df(ctx, x, df_bit);
+}
+
+static TCGv_i64 msa_wr_d[64];
+
+void msa_translate_init(void)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ int off;
+
+ /*
+ * The MSA vector registers are mapped on the
+ * scalar floating-point unit (FPU) registers.
+ */
+ off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
+ msa_wr_d[i * 2] = fpu_f64[i];
+
+ off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]);
+ msa_wr_d[i * 2 + 1] =
+ tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]);
+ }
+}
+
+/*
+ * Check if MSA is enabled.
+ * This function is always called with MSA available.
+ * If MSA is disabled, raise an exception.
+ */
+static inline bool check_msa_enabled(DisasContext *ctx)
+{
+ if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) &&
+ !(ctx->hflags & MIPS_HFLAG_F64))) {
+ gen_reserved_instruction(ctx);
+ return false;
+ }
+
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) {
+ generate_exception_end(ctx, EXCP_MSADIS);
+ return false;
+ }
+ return true;
+}
+
+typedef void gen_helper_piv(TCGv_ptr, TCGv_i32, TCGv);
+typedef void gen_helper_pii(TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void gen_helper_piii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void gen_helper_piiii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+
+#define TRANS_DF_x(TYPE, NAME, trans_func, gen_func) \
+ static gen_helper_p##TYPE * const NAME##_tab[4] = { \
+ gen_func##_b, gen_func##_h, gen_func##_w, gen_func##_d \
+ }; \
+ TRANS(NAME, trans_func, NAME##_tab[a->df])
+
+#define TRANS_DF_iv(NAME, trans_func, gen_func) \
+ TRANS_DF_x(iv, NAME, trans_func, gen_func)
+
+#define TRANS_DF_ii(NAME, trans_func, gen_func) \
+ TRANS_DF_x(ii, NAME, trans_func, gen_func)
+
+#define TRANS_DF_iii(NAME, trans_func, gen_func) \
+ TRANS_DF_x(iii, NAME, trans_func, gen_func)
+
+#define TRANS_DF_iii_b(NAME, trans_func, gen_func) \
+ static gen_helper_piii * const NAME##_tab[4] = { \
+ NULL, gen_func##_h, gen_func##_w, gen_func##_d \
+ }; \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { \
+ return trans_func(ctx, a, NAME##_tab[a->df]); \
+ }
+
+static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt,
+ TCGCond cond)
+{
+ /* generates tcg ops to check if any element is 0 */
+ /* Note this function only works with MSA_WRLEN = 128 */
+ uint64_t eval_zero_or_big = dup_const(df, 1);
+ uint64_t eval_big = eval_zero_or_big << ((8 << df) - 1);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_subi_i64(t0, msa_wr_d[wt << 1], eval_zero_or_big);
+ tcg_gen_andc_i64(t0, t0, msa_wr_d[wt << 1]);
+ tcg_gen_andi_i64(t0, t0, eval_big);
+ tcg_gen_subi_i64(t1, msa_wr_d[(wt << 1) + 1], eval_zero_or_big);
+ tcg_gen_andc_i64(t1, t1, msa_wr_d[(wt << 1) + 1]);
+ tcg_gen_andi_i64(t1, t1, eval_big);
+ tcg_gen_or_i64(t0, t0, t1);
+ /* if all bits are zero then all elements are not zero */
+ /* if some bit is non-zero then some element is zero */
+ tcg_gen_setcondi_i64(cond, t0, t0, 0);
+ tcg_gen_trunc_i64_tl(tresult, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond)
+{
+ TCGv_i64 t0;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ gen_reserved_instruction(ctx);
+ return true;
+ }
+ t0 = tcg_temp_new_i64();
+ tcg_gen_or_i64(t0, msa_wr_d[wt << 1], msa_wr_d[(wt << 1) + 1]);
+ tcg_gen_setcondi_i64(cond, t0, t0, 0);
+ tcg_gen_trunc_i64_tl(bcond, t0);
+ tcg_temp_free_i64(t0);
+
+ ctx->btarget = ctx->base.pc_next + (sa << 2) + 4;
+
+ ctx->hflags |= MIPS_HFLAG_BC;
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+
+ return true;
+}
+
+static bool trans_BZ_V(DisasContext *ctx, arg_msa_bz *a)
+{
+ return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_EQ);
+}
+
+static bool trans_BNZ_V(DisasContext *ctx, arg_msa_bz *a)
+{
+ return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_NE);
+}
+
+static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int sa, bool if_not)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ gen_reserved_instruction(ctx);
+ return true;
+ }
+
+ gen_check_zero_element(bcond, df, wt, if_not ? TCG_COND_EQ : TCG_COND_NE);
+
+ ctx->btarget = ctx->base.pc_next + (sa << 2) + 4;
+ ctx->hflags |= MIPS_HFLAG_BC;
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+
+ return true;
+}
+
+static bool trans_BZ(DisasContext *ctx, arg_msa_bz *a)
+{
+ return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, false);
+}
+
+static bool trans_BNZ(DisasContext *ctx, arg_msa_bz *a)
+{
+ return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, true);
+}
+
+static bool trans_msa_i8(DisasContext *ctx, arg_msa_i *a,
+ gen_helper_piii *gen_msa_i8)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_i8(cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->sa));
+
+ return true;
+}
+
+TRANS(ANDI, trans_msa_i8, gen_helper_msa_andi_b);
+TRANS(ORI, trans_msa_i8, gen_helper_msa_ori_b);
+TRANS(NORI, trans_msa_i8, gen_helper_msa_nori_b);
+TRANS(XORI, trans_msa_i8, gen_helper_msa_xori_b);
+TRANS(BMNZI, trans_msa_i8, gen_helper_msa_bmnzi_b);
+TRANS(BMZI, trans_msa_i8, gen_helper_msa_bmzi_b);
+TRANS(BSELI, trans_msa_i8, gen_helper_msa_bseli_b);
+
+static bool trans_SHF(DisasContext *ctx, arg_msa_i *a)
+{
+ if (a->df == DF_DOUBLE) {
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_helper_msa_shf_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->sa));
+
+ return true;
+}
+
+static bool trans_msa_i5(DisasContext *ctx, arg_msa_i *a,
+ gen_helper_piiii *gen_msa_i5)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_i5(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->sa));
+
+ return true;
+}
+
+TRANS(ADDVI, trans_msa_i5, gen_helper_msa_addvi_df);
+TRANS(SUBVI, trans_msa_i5, gen_helper_msa_subvi_df);
+TRANS(MAXI_S, trans_msa_i5, gen_helper_msa_maxi_s_df);
+TRANS(MAXI_U, trans_msa_i5, gen_helper_msa_maxi_u_df);
+TRANS(MINI_S, trans_msa_i5, gen_helper_msa_mini_s_df);
+TRANS(MINI_U, trans_msa_i5, gen_helper_msa_mini_u_df);
+TRANS(CLTI_S, trans_msa_i5, gen_helper_msa_clti_s_df);
+TRANS(CLTI_U, trans_msa_i5, gen_helper_msa_clti_u_df);
+TRANS(CLEI_S, trans_msa_i5, gen_helper_msa_clei_s_df);
+TRANS(CLEI_U, trans_msa_i5, gen_helper_msa_clei_u_df);
+TRANS(CEQI, trans_msa_i5, gen_helper_msa_ceqi_df);
+
+static bool trans_LDI(DisasContext *ctx, arg_msa_ldi *a)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_helper_msa_ldi_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->sa));
+
+ return true;
+}
+
+static bool trans_msa_bit(DisasContext *ctx, arg_msa_bit *a,
+ gen_helper_piiii *gen_msa_bit)
+{
+ if (a->df < 0) {
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_bit(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->m));
+
+ return true;
+}
+
+TRANS(SLLI, trans_msa_bit, gen_helper_msa_slli_df);
+TRANS(SRAI, trans_msa_bit, gen_helper_msa_srai_df);
+TRANS(SRLI, trans_msa_bit, gen_helper_msa_srli_df);
+TRANS(BCLRI, trans_msa_bit, gen_helper_msa_bclri_df);
+TRANS(BSETI, trans_msa_bit, gen_helper_msa_bseti_df);
+TRANS(BNEGI, trans_msa_bit, gen_helper_msa_bnegi_df);
+TRANS(BINSLI, trans_msa_bit, gen_helper_msa_binsli_df);
+TRANS(BINSRI, trans_msa_bit, gen_helper_msa_binsri_df);
+TRANS(SAT_S, trans_msa_bit, gen_helper_msa_sat_u_df);
+TRANS(SAT_U, trans_msa_bit, gen_helper_msa_sat_u_df);
+TRANS(SRARI, trans_msa_bit, gen_helper_msa_srari_df);
+TRANS(SRLRI, trans_msa_bit, gen_helper_msa_srlri_df);
+
+static bool trans_msa_3rf(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_piiii *gen_msa_3rf)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_3rf(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->wt));
+
+ return true;
+}
+
+static bool trans_msa_3r(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_piii *gen_msa_3r)
+{
+ if (!gen_msa_3r) {
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_3r(cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->wt));
+
+ return true;
+}
+
+TRANS(AND_V, trans_msa_3r, gen_helper_msa_and_v);
+TRANS(OR_V, trans_msa_3r, gen_helper_msa_or_v);
+TRANS(NOR_V, trans_msa_3r, gen_helper_msa_nor_v);
+TRANS(XOR_V, trans_msa_3r, gen_helper_msa_xor_v);
+TRANS(BMNZ_V, trans_msa_3r, gen_helper_msa_bmnz_v);
+TRANS(BMZ_V, trans_msa_3r, gen_helper_msa_bmz_v);
+TRANS(BSEL_V, trans_msa_3r, gen_helper_msa_bsel_v);
+
+TRANS_DF_iii(SLL, trans_msa_3r, gen_helper_msa_sll);
+TRANS_DF_iii(SRA, trans_msa_3r, gen_helper_msa_sra);
+TRANS_DF_iii(SRL, trans_msa_3r, gen_helper_msa_srl);
+TRANS_DF_iii(BCLR, trans_msa_3r, gen_helper_msa_bclr);
+TRANS_DF_iii(BSET, trans_msa_3r, gen_helper_msa_bset);
+TRANS_DF_iii(BNEG, trans_msa_3r, gen_helper_msa_bneg);
+TRANS_DF_iii(BINSL, trans_msa_3r, gen_helper_msa_binsl);
+TRANS_DF_iii(BINSR, trans_msa_3r, gen_helper_msa_binsr);
+
+TRANS_DF_iii(ADDV, trans_msa_3r, gen_helper_msa_addv);
+TRANS_DF_iii(SUBV, trans_msa_3r, gen_helper_msa_subv);
+TRANS_DF_iii(MAX_S, trans_msa_3r, gen_helper_msa_max_s);
+TRANS_DF_iii(MAX_U, trans_msa_3r, gen_helper_msa_max_u);
+TRANS_DF_iii(MIN_S, trans_msa_3r, gen_helper_msa_min_s);
+TRANS_DF_iii(MIN_U, trans_msa_3r, gen_helper_msa_min_u);
+TRANS_DF_iii(MAX_A, trans_msa_3r, gen_helper_msa_max_a);
+TRANS_DF_iii(MIN_A, trans_msa_3r, gen_helper_msa_min_a);
+
+TRANS_DF_iii(CEQ, trans_msa_3r, gen_helper_msa_ceq);
+TRANS_DF_iii(CLT_S, trans_msa_3r, gen_helper_msa_clt_s);
+TRANS_DF_iii(CLT_U, trans_msa_3r, gen_helper_msa_clt_u);
+TRANS_DF_iii(CLE_S, trans_msa_3r, gen_helper_msa_cle_s);
+TRANS_DF_iii(CLE_U, trans_msa_3r, gen_helper_msa_cle_u);
+
+TRANS_DF_iii(ADD_A, trans_msa_3r, gen_helper_msa_add_a);
+TRANS_DF_iii(ADDS_A, trans_msa_3r, gen_helper_msa_adds_a);
+TRANS_DF_iii(ADDS_S, trans_msa_3r, gen_helper_msa_adds_s);
+TRANS_DF_iii(ADDS_U, trans_msa_3r, gen_helper_msa_adds_u);
+TRANS_DF_iii(AVE_S, trans_msa_3r, gen_helper_msa_ave_s);
+TRANS_DF_iii(AVE_U, trans_msa_3r, gen_helper_msa_ave_u);
+TRANS_DF_iii(AVER_S, trans_msa_3r, gen_helper_msa_aver_s);
+TRANS_DF_iii(AVER_U, trans_msa_3r, gen_helper_msa_aver_u);
+
+TRANS_DF_iii(SUBS_S, trans_msa_3r, gen_helper_msa_subs_s);
+TRANS_DF_iii(SUBS_U, trans_msa_3r, gen_helper_msa_subs_u);
+TRANS_DF_iii(SUBSUS_U, trans_msa_3r, gen_helper_msa_subsus_u);
+TRANS_DF_iii(SUBSUU_S, trans_msa_3r, gen_helper_msa_subsuu_s);
+TRANS_DF_iii(ASUB_S, trans_msa_3r, gen_helper_msa_asub_s);
+TRANS_DF_iii(ASUB_U, trans_msa_3r, gen_helper_msa_asub_u);
+
+TRANS_DF_iii(MULV, trans_msa_3r, gen_helper_msa_mulv);
+TRANS_DF_iii(MADDV, trans_msa_3r, gen_helper_msa_maddv);
+TRANS_DF_iii(MSUBV, trans_msa_3r, gen_helper_msa_msubv);
+TRANS_DF_iii(DIV_S, trans_msa_3r, gen_helper_msa_div_s);
+TRANS_DF_iii(DIV_U, trans_msa_3r, gen_helper_msa_div_u);
+TRANS_DF_iii(MOD_S, trans_msa_3r, gen_helper_msa_mod_s);
+TRANS_DF_iii(MOD_U, trans_msa_3r, gen_helper_msa_mod_u);
+
+TRANS_DF_iii_b(DOTP_S, trans_msa_3r, gen_helper_msa_dotp_s);
+TRANS_DF_iii_b(DOTP_U, trans_msa_3r, gen_helper_msa_dotp_u);
+TRANS_DF_iii_b(DPADD_S, trans_msa_3r, gen_helper_msa_dpadd_s);
+TRANS_DF_iii_b(DPADD_U, trans_msa_3r, gen_helper_msa_dpadd_u);
+TRANS_DF_iii_b(DPSUB_S, trans_msa_3r, gen_helper_msa_dpsub_s);
+TRANS_DF_iii_b(DPSUB_U, trans_msa_3r, gen_helper_msa_dpsub_u);
+
+TRANS(SLD, trans_msa_3rf, gen_helper_msa_sld_df);
+TRANS(SPLAT, trans_msa_3rf, gen_helper_msa_splat_df);
+TRANS_DF_iii(PCKEV, trans_msa_3r, gen_helper_msa_pckev);
+TRANS_DF_iii(PCKOD, trans_msa_3r, gen_helper_msa_pckod);
+TRANS_DF_iii(ILVL, trans_msa_3r, gen_helper_msa_ilvl);
+TRANS_DF_iii(ILVR, trans_msa_3r, gen_helper_msa_ilvr);
+TRANS_DF_iii(ILVEV, trans_msa_3r, gen_helper_msa_ilvev);
+TRANS_DF_iii(ILVOD, trans_msa_3r, gen_helper_msa_ilvod);
+
+TRANS(VSHF, trans_msa_3rf, gen_helper_msa_vshf_df);
+TRANS_DF_iii(SRAR, trans_msa_3r, gen_helper_msa_srar);
+TRANS_DF_iii(SRLR, trans_msa_3r, gen_helper_msa_srlr);
+TRANS_DF_iii_b(HADD_S, trans_msa_3r, gen_helper_msa_hadd_s);
+TRANS_DF_iii_b(HADD_U, trans_msa_3r, gen_helper_msa_hadd_u);
+TRANS_DF_iii_b(HSUB_S, trans_msa_3r, gen_helper_msa_hsub_s);
+TRANS_DF_iii_b(HSUB_U, trans_msa_3r, gen_helper_msa_hsub_u);
+
+static bool trans_MOVE_V(DisasContext *ctx, arg_msa_elm *a)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_helper_msa_move_v(cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws));
+
+ return true;
+}
+
+static bool trans_CTCMSA(DisasContext *ctx, arg_msa_elm *a)
+{
+ TCGv telm;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ telm = tcg_temp_new();
+
+ gen_load_gpr(telm, a->ws);
+ gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd));
+
+ tcg_temp_free(telm);
+
+ return true;
+}
+
+static bool trans_CFCMSA(DisasContext *ctx, arg_msa_elm *a)
+{
+ TCGv telm;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ telm = tcg_temp_new();
+
+ gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws));
+ gen_store_gpr(telm, a->wd);
+
+ tcg_temp_free(telm);
+
+ return true;
+}
+
+static bool trans_msa_elm(DisasContext *ctx, arg_msa_elm_df *a,
+ gen_helper_piiii *gen_msa_elm_df)
+{
+ if (a->df < 0) {
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_elm_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->n));
+
+ return true;
+}
+
+TRANS(SLDI, trans_msa_elm, gen_helper_msa_sldi_df);
+TRANS(SPLATI, trans_msa_elm, gen_helper_msa_splati_df);
+TRANS(INSVE, trans_msa_elm, gen_helper_msa_insve_df);
+
+static bool trans_msa_elm_fn(DisasContext *ctx, arg_msa_elm_df *a,
+ gen_helper_piii * const gen_msa_elm[4])
+{
+ if (a->df < 0 || !gen_msa_elm[a->df]) {
+ return false;
+ }
+
+ if (check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ if (a->wd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+
+ gen_msa_elm[a->df](cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->n));
+
+ return true;
+}
+
+#if defined(TARGET_MIPS64)
+#define NULL_IF_MIPS32(function) function
+#else
+#define NULL_IF_MIPS32(function) NULL
+#endif
+
+static bool trans_COPY_U(DisasContext *ctx, arg_msa_elm_df *a)
+{
+ static gen_helper_piii * const gen_msa_copy_u[4] = {
+ gen_helper_msa_copy_u_b, gen_helper_msa_copy_u_h,
+ NULL_IF_MIPS32(gen_helper_msa_copy_u_w), NULL
+ };
+
+ return trans_msa_elm_fn(ctx, a, gen_msa_copy_u);
+}
+
+static bool trans_COPY_S(DisasContext *ctx, arg_msa_elm_df *a)
+{
+ static gen_helper_piii * const gen_msa_copy_s[4] = {
+ gen_helper_msa_copy_s_b, gen_helper_msa_copy_s_h,
+ gen_helper_msa_copy_s_w, NULL_IF_MIPS32(gen_helper_msa_copy_s_d)
+ };
+
+ return trans_msa_elm_fn(ctx, a, gen_msa_copy_s);
+}
+
+static bool trans_INSERT(DisasContext *ctx, arg_msa_elm_df *a)
+{
+ static gen_helper_piii * const gen_msa_insert[4] = {
+ gen_helper_msa_insert_b, gen_helper_msa_insert_h,
+ gen_helper_msa_insert_w, NULL_IF_MIPS32(gen_helper_msa_insert_d)
+ };
+
+ return trans_msa_elm_fn(ctx, a, gen_msa_insert);
+}
+
+TRANS(FCAF, trans_msa_3rf, gen_helper_msa_fcaf_df);
+TRANS(FCUN, trans_msa_3rf, gen_helper_msa_fcun_df);
+TRANS(FCEQ, trans_msa_3rf, gen_helper_msa_fceq_df);
+TRANS(FCUEQ, trans_msa_3rf, gen_helper_msa_fcueq_df);
+TRANS(FCLT, trans_msa_3rf, gen_helper_msa_fclt_df);
+TRANS(FCULT, trans_msa_3rf, gen_helper_msa_fcult_df);
+TRANS(FCLE, trans_msa_3rf, gen_helper_msa_fcle_df);
+TRANS(FCULE, trans_msa_3rf, gen_helper_msa_fcule_df);
+TRANS(FSAF, trans_msa_3rf, gen_helper_msa_fsaf_df);
+TRANS(FSUN, trans_msa_3rf, gen_helper_msa_fsun_df);
+TRANS(FSEQ, trans_msa_3rf, gen_helper_msa_fseq_df);
+TRANS(FSUEQ, trans_msa_3rf, gen_helper_msa_fsueq_df);
+TRANS(FSLT, trans_msa_3rf, gen_helper_msa_fslt_df);
+TRANS(FSULT, trans_msa_3rf, gen_helper_msa_fsult_df);
+TRANS(FSLE, trans_msa_3rf, gen_helper_msa_fsle_df);
+TRANS(FSULE, trans_msa_3rf, gen_helper_msa_fsule_df);
+
+TRANS(FADD, trans_msa_3rf, gen_helper_msa_fadd_df);
+TRANS(FSUB, trans_msa_3rf, gen_helper_msa_fsub_df);
+TRANS(FMUL, trans_msa_3rf, gen_helper_msa_fmul_df);
+TRANS(FDIV, trans_msa_3rf, gen_helper_msa_fdiv_df);
+TRANS(FMADD, trans_msa_3rf, gen_helper_msa_fmadd_df);
+TRANS(FMSUB, trans_msa_3rf, gen_helper_msa_fmsub_df);
+TRANS(FEXP2, trans_msa_3rf, gen_helper_msa_fexp2_df);
+TRANS(FEXDO, trans_msa_3rf, gen_helper_msa_fexdo_df);
+TRANS(FTQ, trans_msa_3rf, gen_helper_msa_ftq_df);
+TRANS(FMIN, trans_msa_3rf, gen_helper_msa_fmin_df);
+TRANS(FMIN_A, trans_msa_3rf, gen_helper_msa_fmin_a_df);
+TRANS(FMAX, trans_msa_3rf, gen_helper_msa_fmax_df);
+TRANS(FMAX_A, trans_msa_3rf, gen_helper_msa_fmax_a_df);
+
+TRANS(FCOR, trans_msa_3rf, gen_helper_msa_fcor_df);
+TRANS(FCUNE, trans_msa_3rf, gen_helper_msa_fcune_df);
+TRANS(FCNE, trans_msa_3rf, gen_helper_msa_fcne_df);
+TRANS(MUL_Q, trans_msa_3rf, gen_helper_msa_mul_q_df);
+TRANS(MADD_Q, trans_msa_3rf, gen_helper_msa_madd_q_df);
+TRANS(MSUB_Q, trans_msa_3rf, gen_helper_msa_msub_q_df);
+TRANS(FSOR, trans_msa_3rf, gen_helper_msa_fsor_df);
+TRANS(FSUNE, trans_msa_3rf, gen_helper_msa_fsune_df);
+TRANS(FSNE, trans_msa_3rf, gen_helper_msa_fsne_df);
+TRANS(MULR_Q, trans_msa_3rf, gen_helper_msa_mulr_q_df);
+TRANS(MADDR_Q, trans_msa_3rf, gen_helper_msa_maddr_q_df);
+TRANS(MSUBR_Q, trans_msa_3rf, gen_helper_msa_msubr_q_df);
+
+static bool trans_msa_2r(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_pii *gen_msa_2r)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_2r(cpu_env, tcg_constant_i32(a->wd), tcg_constant_i32(a->ws));
+
+ return true;
+}
+
+TRANS_DF_ii(PCNT, trans_msa_2r, gen_helper_msa_pcnt);
+TRANS_DF_ii(NLOC, trans_msa_2r, gen_helper_msa_nloc);
+TRANS_DF_ii(NLZC, trans_msa_2r, gen_helper_msa_nlzc);
+
+static bool trans_FILL(DisasContext *ctx, arg_msa_r *a)
+{
+ if (TARGET_LONG_BITS != 64 && a->df == DF_DOUBLE) {
+ /* Double format valid only for MIPS64 */
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_helper_msa_fill_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws));
+
+ return true;
+}
+
+static bool trans_msa_2rf(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_piii *gen_msa_2rf)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_2rf(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws));
+
+ return true;
+}
+
+TRANS(FCLASS, trans_msa_2rf, gen_helper_msa_fclass_df);
+TRANS(FTRUNC_S, trans_msa_2rf, gen_helper_msa_fclass_df);
+TRANS(FTRUNC_U, trans_msa_2rf, gen_helper_msa_ftrunc_s_df);
+TRANS(FSQRT, trans_msa_2rf, gen_helper_msa_fsqrt_df);
+TRANS(FRSQRT, trans_msa_2rf, gen_helper_msa_frsqrt_df);
+TRANS(FRCP, trans_msa_2rf, gen_helper_msa_frcp_df);
+TRANS(FRINT, trans_msa_2rf, gen_helper_msa_frint_df);
+TRANS(FLOG2, trans_msa_2rf, gen_helper_msa_flog2_df);
+TRANS(FEXUPL, trans_msa_2rf, gen_helper_msa_fexupl_df);
+TRANS(FEXUPR, trans_msa_2rf, gen_helper_msa_fexupr_df);
+TRANS(FFQL, trans_msa_2rf, gen_helper_msa_ffql_df);
+TRANS(FFQR, trans_msa_2rf, gen_helper_msa_ffqr_df);
+TRANS(FTINT_S, trans_msa_2rf, gen_helper_msa_ftint_s_df);
+TRANS(FTINT_U, trans_msa_2rf, gen_helper_msa_ftint_u_df);
+TRANS(FFINT_S, trans_msa_2rf, gen_helper_msa_ffint_s_df);
+TRANS(FFINT_U, trans_msa_2rf, gen_helper_msa_ffint_u_df);
+
+static bool trans_msa_ldst(DisasContext *ctx, arg_msa_i *a,
+ gen_helper_piv *gen_msa_ldst)
+{
+ TCGv taddr;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ taddr = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df);
+ gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr);
+
+ tcg_temp_free(taddr);
+
+ return true;
+}
+
+TRANS_DF_iv(LD, trans_msa_ldst, gen_helper_msa_ld);
+TRANS_DF_iv(ST, trans_msa_ldst, gen_helper_msa_st);
+
+static bool trans_LSA(DisasContext *ctx, arg_r *a)
+{
+ return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa);
+}
+
+static bool trans_DLSA(DisasContext *ctx, arg_r *a)
+{
+ if (TARGET_LONG_BITS != 64) {
+ return false;
+ }
+ return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa);
+}
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
new file mode 100644
index 000000000..f52244e1b
--- /dev/null
+++ b/target/mips/tcg/mxu_translate.c
@@ -0,0 +1,1605 @@
+/*
+ * Ingenic XBurst Media eXtension Unit (MXU) translation routines.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Datasheet:
+ *
+ * "XBurst® Instruction Set Architecture MIPS eXtension/enhanced Unit
+ * Programming Manual", Ingenic Semiconductor Co, Ltd., revision June 2, 2017
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-gen.h"
+#include "translate.h"
+
+/*
+ *
+ * AN OVERVIEW OF MXU EXTENSION INSTRUCTION SET
+ * ============================================
+ *
+ *
+ * MXU (full name: MIPS eXtension/enhanced Unit) is a SIMD extension of MIPS32
+ * instructions set. It is designed to fit the needs of signal, graphical and
+ * video processing applications. MXU instruction set is used in Xburst family
+ * of microprocessors by Ingenic.
+ *
+ * MXU unit contains 17 registers called X0-X16. X0 is always zero, and X16 is
+ * the control register.
+ *
+ *
+ * The notation used in MXU assembler mnemonics
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Register operands:
+ *
+ * XRa, XRb, XRc, XRd - MXU registers
+ * Rb, Rc, Rd, Rs, Rt - general purpose MIPS registers
+ *
+ * Non-register operands:
+ *
+ * aptn1 - 1-bit accumulate add/subtract pattern
+ * aptn2 - 2-bit accumulate add/subtract pattern
+ * eptn2 - 2-bit execute add/subtract pattern
+ * optn2 - 2-bit operand pattern
+ * optn3 - 3-bit operand pattern
+ * sft4 - 4-bit shift amount
+ * strd2 - 2-bit stride amount
+ *
+ * Prefixes:
+ *
+ * Level of parallelism: Operand size:
+ * S - single operation at a time 32 - word
+ * D - two operations in parallel 16 - half word
+ * Q - four operations in parallel 8 - byte
+ *
+ * Operations:
+ *
+ * ADD - Add or subtract
+ * ADDC - Add with carry-in
+ * ACC - Accumulate
+ * ASUM - Sum together then accumulate (add or subtract)
+ * ASUMC - Sum together then accumulate (add or subtract) with carry-in
+ * AVG - Average between 2 operands
+ * ABD - Absolute difference
+ * ALN - Align data
+ * AND - Logical bitwise 'and' operation
+ * CPS - Copy sign
+ * EXTR - Extract bits
+ * I2M - Move from GPR register to MXU register
+ * LDD - Load data from memory to XRF
+ * LDI - Load data from memory to XRF (and increase the address base)
+ * LUI - Load unsigned immediate
+ * MUL - Multiply
+ * MULU - Unsigned multiply
+ * MADD - 64-bit operand add 32x32 product
+ * MSUB - 64-bit operand subtract 32x32 product
+ * MAC - Multiply and accumulate (add or subtract)
+ * MAD - Multiply and add or subtract
+ * MAX - Maximum between 2 operands
+ * MIN - Minimum between 2 operands
+ * M2I - Move from MXU register to GPR register
+ * MOVZ - Move if zero
+ * MOVN - Move if non-zero
+ * NOR - Logical bitwise 'nor' operation
+ * OR - Logical bitwise 'or' operation
+ * STD - Store data from XRF to memory
+ * SDI - Store data from XRF to memory (and increase the address base)
+ * SLT - Set of less than comparison
+ * SAD - Sum of absolute differences
+ * SLL - Logical shift left
+ * SLR - Logical shift right
+ * SAR - Arithmetic shift right
+ * SAT - Saturation
+ * SFL - Shuffle
+ * SCOP - Calculate x’s scope (-1, means x<0; 0, means x==0; 1, means x>0)
+ * XOR - Logical bitwise 'exclusive or' operation
+ *
+ * Suffixes:
+ *
+ * E - Expand results
+ * F - Fixed point multiplication
+ * L - Low part result
+ * R - Doing rounding
+ * V - Variable instead of immediate
+ * W - Combine above L and V
+ *
+ *
+ * The list of MXU instructions grouped by functionality
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Load/Store instructions Multiplication instructions
+ * ----------------------- ---------------------------
+ *
+ * S32LDD XRa, Rb, s12 S32MADD XRa, XRd, Rs, Rt
+ * S32STD XRa, Rb, s12 S32MADDU XRa, XRd, Rs, Rt
+ * S32LDDV XRa, Rb, rc, strd2 S32MSUB XRa, XRd, Rs, Rt
+ * S32STDV XRa, Rb, rc, strd2 S32MSUBU XRa, XRd, Rs, Rt
+ * S32LDI XRa, Rb, s12 S32MUL XRa, XRd, Rs, Rt
+ * S32SDI XRa, Rb, s12 S32MULU XRa, XRd, Rs, Rt
+ * S32LDIV XRa, Rb, rc, strd2 D16MUL XRa, XRb, XRc, XRd, optn2
+ * S32SDIV XRa, Rb, rc, strd2 D16MULE XRa, XRb, XRc, optn2
+ * S32LDDR XRa, Rb, s12 D16MULF XRa, XRb, XRc, optn2
+ * S32STDR XRa, Rb, s12 D16MAC XRa, XRb, XRc, XRd, aptn2, optn2
+ * S32LDDVR XRa, Rb, rc, strd2 D16MACE XRa, XRb, XRc, XRd, aptn2, optn2
+ * S32STDVR XRa, Rb, rc, strd2 D16MACF XRa, XRb, XRc, XRd, aptn2, optn2
+ * S32LDIR XRa, Rb, s12 D16MADL XRa, XRb, XRc, XRd, aptn2, optn2
+ * S32SDIR XRa, Rb, s12 S16MAD XRa, XRb, XRc, XRd, aptn1, optn2
+ * S32LDIVR XRa, Rb, rc, strd2 Q8MUL XRa, XRb, XRc, XRd
+ * S32SDIVR XRa, Rb, rc, strd2 Q8MULSU XRa, XRb, XRc, XRd
+ * S16LDD XRa, Rb, s10, eptn2 Q8MAC XRa, XRb, XRc, XRd, aptn2
+ * S16STD XRa, Rb, s10, eptn2 Q8MACSU XRa, XRb, XRc, XRd, aptn2
+ * S16LDI XRa, Rb, s10, eptn2 Q8MADL XRa, XRb, XRc, XRd, aptn2
+ * S16SDI XRa, Rb, s10, eptn2
+ * S8LDD XRa, Rb, s8, eptn3
+ * S8STD XRa, Rb, s8, eptn3 Addition and subtraction instructions
+ * S8LDI XRa, Rb, s8, eptn3 -------------------------------------
+ * S8SDI XRa, Rb, s8, eptn3
+ * LXW Rd, Rs, Rt, strd2 D32ADD XRa, XRb, XRc, XRd, eptn2
+ * LXH Rd, Rs, Rt, strd2 D32ADDC XRa, XRb, XRc, XRd
+ * LXHU Rd, Rs, Rt, strd2 D32ACC XRa, XRb, XRc, XRd, eptn2
+ * LXB Rd, Rs, Rt, strd2 D32ACCM XRa, XRb, XRc, XRd, eptn2
+ * LXBU Rd, Rs, Rt, strd2 D32ASUM XRa, XRb, XRc, XRd, eptn2
+ * S32CPS XRa, XRb, XRc
+ * Q16ADD XRa, XRb, XRc, XRd, eptn2, optn2
+ * Comparison instructions Q16ACC XRa, XRb, XRc, XRd, eptn2
+ * ----------------------- Q16ACCM XRa, XRb, XRc, XRd, eptn2
+ * D16ASUM XRa, XRb, XRc, XRd, eptn2
+ * S32MAX XRa, XRb, XRc D16CPS XRa, XRb,
+ * S32MIN XRa, XRb, XRc D16AVG XRa, XRb, XRc
+ * S32SLT XRa, XRb, XRc D16AVGR XRa, XRb, XRc
+ * S32MOVZ XRa, XRb, XRc Q8ADD XRa, XRb, XRc, eptn2
+ * S32MOVN XRa, XRb, XRc Q8ADDE XRa, XRb, XRc, XRd, eptn2
+ * D16MAX XRa, XRb, XRc Q8ACCE XRa, XRb, XRc, XRd, eptn2
+ * D16MIN XRa, XRb, XRc Q8ABD XRa, XRb, XRc
+ * D16SLT XRa, XRb, XRc Q8SAD XRa, XRb, XRc, XRd
+ * D16MOVZ XRa, XRb, XRc Q8AVG XRa, XRb, XRc
+ * D16MOVN XRa, XRb, XRc Q8AVGR XRa, XRb, XRc
+ * Q8MAX XRa, XRb, XRc D8SUM XRa, XRb, XRc, XRd
+ * Q8MIN XRa, XRb, XRc D8SUMC XRa, XRb, XRc, XRd
+ * Q8SLT XRa, XRb, XRc
+ * Q8SLTU XRa, XRb, XRc
+ * Q8MOVZ XRa, XRb, XRc Shift instructions
+ * Q8MOVN XRa, XRb, XRc ------------------
+ *
+ * D32SLL XRa, XRb, XRc, XRd, sft4
+ * Bitwise instructions D32SLR XRa, XRb, XRc, XRd, sft4
+ * -------------------- D32SAR XRa, XRb, XRc, XRd, sft4
+ * D32SARL XRa, XRb, XRc, sft4
+ * S32NOR XRa, XRb, XRc D32SLLV XRa, XRb, Rb
+ * S32AND XRa, XRb, XRc D32SLRV XRa, XRb, Rb
+ * S32XOR XRa, XRb, XRc D32SARV XRa, XRb, Rb
+ * S32OR XRa, XRb, XRc D32SARW XRa, XRb, XRc, Rb
+ * Q16SLL XRa, XRb, XRc, XRd, sft4
+ * Q16SLR XRa, XRb, XRc, XRd, sft4
+ * Miscellaneous instructions Q16SAR XRa, XRb, XRc, XRd, sft4
+ * ------------------------- Q16SLLV XRa, XRb, Rb
+ * Q16SLRV XRa, XRb, Rb
+ * S32SFL XRa, XRb, XRc, XRd, optn2 Q16SARV XRa, XRb, Rb
+ * S32ALN XRa, XRb, XRc, Rb
+ * S32ALNI XRa, XRb, XRc, s3
+ * S32LUI XRa, s8, optn3 Move instructions
+ * S32EXTR XRa, XRb, Rb, bits5 -----------------
+ * S32EXTRV XRa, XRb, Rs, Rt
+ * Q16SCOP XRa, XRb, XRc, XRd S32M2I XRa, Rb
+ * Q16SAT XRa, XRb, XRc S32I2M XRa, Rb
+ *
+ *
+ * The opcode organization of MXU instructions
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The bits 31..26 of all MXU instructions are equal to 0x1C (also referred
+ * as opcode SPECIAL2 in the base MIPS ISA). The organization and meaning of
+ * other bits up to the instruction level is as follows:
+ *
+ * bits
+ * 05..00
+ *
+ * ┌─ 000000 ─ OPC_MXU_S32MADD
+ * ├─ 000001 ─ OPC_MXU_S32MADDU
+ * ├─ 000010 ─ <not assigned> (non-MXU OPC_MUL)
+ * │
+ * │ 20..18
+ * ├─ 000011 ─ OPC_MXU__POOL00 ─┬─ 000 ─ OPC_MXU_S32MAX
+ * │ ├─ 001 ─ OPC_MXU_S32MIN
+ * │ ├─ 010 ─ OPC_MXU_D16MAX
+ * │ ├─ 011 ─ OPC_MXU_D16MIN
+ * │ ├─ 100 ─ OPC_MXU_Q8MAX
+ * │ ├─ 101 ─ OPC_MXU_Q8MIN
+ * │ ├─ 110 ─ OPC_MXU_Q8SLT
+ * │ └─ 111 ─ OPC_MXU_Q8SLTU
+ * ├─ 000100 ─ OPC_MXU_S32MSUB
+ * ├─ 000101 ─ OPC_MXU_S32MSUBU 20..18
+ * ├─ 000110 ─ OPC_MXU__POOL01 ─┬─ 000 ─ OPC_MXU_S32SLT
+ * │ ├─ 001 ─ OPC_MXU_D16SLT
+ * │ ├─ 010 ─ OPC_MXU_D16AVG
+ * │ ├─ 011 ─ OPC_MXU_D16AVGR
+ * │ ├─ 100 ─ OPC_MXU_Q8AVG
+ * │ ├─ 101 ─ OPC_MXU_Q8AVGR
+ * │ └─ 111 ─ OPC_MXU_Q8ADD
+ * │
+ * │ 20..18
+ * ├─ 000111 ─ OPC_MXU__POOL02 ─┬─ 000 ─ OPC_MXU_S32CPS
+ * │ ├─ 010 ─ OPC_MXU_D16CPS
+ * │ ├─ 100 ─ OPC_MXU_Q8ABD
+ * │ └─ 110 ─ OPC_MXU_Q16SAT
+ * ├─ 001000 ─ OPC_MXU_D16MUL
+ * │ 25..24
+ * ├─ 001001 ─ OPC_MXU__POOL03 ─┬─ 00 ─ OPC_MXU_D16MULF
+ * │ └─ 01 ─ OPC_MXU_D16MULE
+ * ├─ 001010 ─ OPC_MXU_D16MAC
+ * ├─ 001011 ─ OPC_MXU_D16MACF
+ * ├─ 001100 ─ OPC_MXU_D16MADL
+ * ├─ 001101 ─ OPC_MXU_S16MAD
+ * ├─ 001110 ─ OPC_MXU_Q16ADD
+ * ├─ 001111 ─ OPC_MXU_D16MACE 23
+ * │ ┌─ 0 ─ OPC_MXU_S32LDD
+ * ├─ 010000 ─ OPC_MXU__POOL04 ─┴─ 1 ─ OPC_MXU_S32LDDR
+ * │
+ * │ 23
+ * ├─ 010001 ─ OPC_MXU__POOL05 ─┬─ 0 ─ OPC_MXU_S32STD
+ * │ └─ 1 ─ OPC_MXU_S32STDR
+ * │
+ * │ 13..10
+ * ├─ 010010 ─ OPC_MXU__POOL06 ─┬─ 0000 ─ OPC_MXU_S32LDDV
+ * │ └─ 0001 ─ OPC_MXU_S32LDDVR
+ * │
+ * │ 13..10
+ * ├─ 010011 ─ OPC_MXU__POOL07 ─┬─ 0000 ─ OPC_MXU_S32STDV
+ * │ └─ 0001 ─ OPC_MXU_S32STDVR
+ * │
+ * │ 23
+ * ├─ 010100 ─ OPC_MXU__POOL08 ─┬─ 0 ─ OPC_MXU_S32LDI
+ * │ └─ 1 ─ OPC_MXU_S32LDIR
+ * │
+ * │ 23
+ * ├─ 010101 ─ OPC_MXU__POOL09 ─┬─ 0 ─ OPC_MXU_S32SDI
+ * │ └─ 1 ─ OPC_MXU_S32SDIR
+ * │
+ * │ 13..10
+ * ├─ 010110 ─ OPC_MXU__POOL10 ─┬─ 0000 ─ OPC_MXU_S32LDIV
+ * │ └─ 0001 ─ OPC_MXU_S32LDIVR
+ * │
+ * │ 13..10
+ * ├─ 010111 ─ OPC_MXU__POOL11 ─┬─ 0000 ─ OPC_MXU_S32SDIV
+ * │ └─ 0001 ─ OPC_MXU_S32SDIVR
+ * ├─ 011000 ─ OPC_MXU_D32ADD
+ * │ 23..22
+ * MXU ├─ 011001 ─ OPC_MXU__POOL12 ─┬─ 00 ─ OPC_MXU_D32ACC
+ * opcodes ─┤ ├─ 01 ─ OPC_MXU_D32ACCM
+ * │ └─ 10 ─ OPC_MXU_D32ASUM
+ * ├─ 011010 ─ <not assigned>
+ * │ 23..22
+ * ├─ 011011 ─ OPC_MXU__POOL13 ─┬─ 00 ─ OPC_MXU_Q16ACC
+ * │ ├─ 01 ─ OPC_MXU_Q16ACCM
+ * │ └─ 10 ─ OPC_MXU_Q16ASUM
+ * │
+ * │ 23..22
+ * ├─ 011100 ─ OPC_MXU__POOL14 ─┬─ 00 ─ OPC_MXU_Q8ADDE
+ * │ ├─ 01 ─ OPC_MXU_D8SUM
+ * ├─ 011101 ─ OPC_MXU_Q8ACCE └─ 10 ─ OPC_MXU_D8SUMC
+ * ├─ 011110 ─ <not assigned>
+ * ├─ 011111 ─ <not assigned>
+ * ├─ 100000 ─ <not assigned> (overlaps with CLZ)
+ * ├─ 100001 ─ <not assigned> (overlaps with CLO)
+ * ├─ 100010 ─ OPC_MXU_S8LDD
+ * ├─ 100011 ─ OPC_MXU_S8STD 15..14
+ * ├─ 100100 ─ OPC_MXU_S8LDI ┌─ 00 ─ OPC_MXU_S32MUL
+ * ├─ 100101 ─ OPC_MXU_S8SDI ├─ 00 ─ OPC_MXU_S32MULU
+ * │ ├─ 00 ─ OPC_MXU_S32EXTR
+ * ├─ 100110 ─ OPC_MXU__POOL15 ─┴─ 00 ─ OPC_MXU_S32EXTRV
+ * │
+ * │ 20..18
+ * ├─ 100111 ─ OPC_MXU__POOL16 ─┬─ 000 ─ OPC_MXU_D32SARW
+ * │ ├─ 001 ─ OPC_MXU_S32ALN
+ * │ ├─ 010 ─ OPC_MXU_S32ALNI
+ * │ ├─ 011 ─ OPC_MXU_S32LUI
+ * │ ├─ 100 ─ OPC_MXU_S32NOR
+ * │ ├─ 101 ─ OPC_MXU_S32AND
+ * │ ├─ 110 ─ OPC_MXU_S32OR
+ * │ └─ 111 ─ OPC_MXU_S32XOR
+ * │
+ * │ 7..5
+ * ├─ 101000 ─ OPC_MXU__POOL17 ─┬─ 000 ─ OPC_MXU_LXB
+ * │ ├─ 001 ─ OPC_MXU_LXH
+ * ├─ 101001 ─ <not assigned> ├─ 011 ─ OPC_MXU_LXW
+ * ├─ 101010 ─ OPC_MXU_S16LDD ├─ 100 ─ OPC_MXU_LXBU
+ * ├─ 101011 ─ OPC_MXU_S16STD └─ 101 ─ OPC_MXU_LXHU
+ * ├─ 101100 ─ OPC_MXU_S16LDI
+ * ├─ 101101 ─ OPC_MXU_S16SDI
+ * ├─ 101110 ─ OPC_MXU_S32M2I
+ * ├─ 101111 ─ OPC_MXU_S32I2M
+ * ├─ 110000 ─ OPC_MXU_D32SLL
+ * ├─ 110001 ─ OPC_MXU_D32SLR 20..18
+ * ├─ 110010 ─ OPC_MXU_D32SARL ┌─ 000 ─ OPC_MXU_D32SLLV
+ * ├─ 110011 ─ OPC_MXU_D32SAR ├─ 001 ─ OPC_MXU_D32SLRV
+ * ├─ 110100 ─ OPC_MXU_Q16SLL ├─ 010 ─ OPC_MXU_D32SARV
+ * ├─ 110101 ─ OPC_MXU_Q16SLR ├─ 011 ─ OPC_MXU_Q16SLLV
+ * │ ├─ 100 ─ OPC_MXU_Q16SLRV
+ * ├─ 110110 ─ OPC_MXU__POOL18 ─┴─ 101 ─ OPC_MXU_Q16SARV
+ * │
+ * ├─ 110111 ─ OPC_MXU_Q16SAR
+ * │ 23..22
+ * ├─ 111000 ─ OPC_MXU__POOL19 ─┬─ 00 ─ OPC_MXU_Q8MUL
+ * │ └─ 01 ─ OPC_MXU_Q8MULSU
+ * │
+ * │ 20..18
+ * ├─ 111001 ─ OPC_MXU__POOL20 ─┬─ 000 ─ OPC_MXU_Q8MOVZ
+ * │ ├─ 001 ─ OPC_MXU_Q8MOVN
+ * │ ├─ 010 ─ OPC_MXU_D16MOVZ
+ * │ ├─ 011 ─ OPC_MXU_D16MOVN
+ * │ ├─ 100 ─ OPC_MXU_S32MOVZ
+ * │ └─ 101 ─ OPC_MXU_S32MOVN
+ * │
+ * │ 23..22
+ * ├─ 111010 ─ OPC_MXU__POOL21 ─┬─ 00 ─ OPC_MXU_Q8MAC
+ * │ └─ 10 ─ OPC_MXU_Q8MACSU
+ * ├─ 111011 ─ OPC_MXU_Q16SCOP
+ * ├─ 111100 ─ OPC_MXU_Q8MADL
+ * ├─ 111101 ─ OPC_MXU_S32SFL
+ * ├─ 111110 ─ OPC_MXU_Q8SAD
+ * └─ 111111 ─ <not assigned> (overlaps with SDBBP)
+ *
+ *
+ * Compiled after:
+ *
+ * "XBurst® Instruction Set Architecture MIPS eXtension/enhanced Unit
+ * Programming Manual", Ingenic Semiconductor Co, Ltd., revision June 2, 2017
+ */
+
+enum {
+ OPC_MXU__POOL00 = 0x03,
+ OPC_MXU_D16MUL = 0x08,
+ OPC_MXU_D16MAC = 0x0A,
+ OPC_MXU__POOL04 = 0x10,
+ OPC_MXU_S8LDD = 0x22,
+ OPC_MXU__POOL16 = 0x27,
+ OPC_MXU_S32M2I = 0x2E,
+ OPC_MXU_S32I2M = 0x2F,
+ OPC_MXU__POOL19 = 0x38,
+};
+
+
+/*
+ * MXU pool 00
+ */
+enum {
+ OPC_MXU_S32MAX = 0x00,
+ OPC_MXU_S32MIN = 0x01,
+ OPC_MXU_D16MAX = 0x02,
+ OPC_MXU_D16MIN = 0x03,
+ OPC_MXU_Q8MAX = 0x04,
+ OPC_MXU_Q8MIN = 0x05,
+};
+
+/*
+ * MXU pool 04
+ */
+enum {
+ OPC_MXU_S32LDD = 0x00,
+ OPC_MXU_S32LDDR = 0x01,
+};
+
+/*
+ * MXU pool 16
+ */
+enum {
+ OPC_MXU_S32ALNI = 0x02,
+ OPC_MXU_S32NOR = 0x04,
+ OPC_MXU_S32AND = 0x05,
+ OPC_MXU_S32OR = 0x06,
+ OPC_MXU_S32XOR = 0x07,
+};
+
+/*
+ * MXU pool 19
+ */
+enum {
+ OPC_MXU_Q8MUL = 0x00,
+ OPC_MXU_Q8MULSU = 0x01,
+};
+
+/* MXU accumulate add/subtract 1-bit pattern 'aptn1' */
+#define MXU_APTN1_A 0
+#define MXU_APTN1_S 1
+
+/* MXU accumulate add/subtract 2-bit pattern 'aptn2' */
+#define MXU_APTN2_AA 0
+#define MXU_APTN2_AS 1
+#define MXU_APTN2_SA 2
+#define MXU_APTN2_SS 3
+
+/* MXU execute add/subtract 2-bit pattern 'eptn2' */
+#define MXU_EPTN2_AA 0
+#define MXU_EPTN2_AS 1
+#define MXU_EPTN2_SA 2
+#define MXU_EPTN2_SS 3
+
+/* MXU operand getting pattern 'optn2' */
+#define MXU_OPTN2_PTN0 0
+#define MXU_OPTN2_PTN1 1
+#define MXU_OPTN2_PTN2 2
+#define MXU_OPTN2_PTN3 3
+/* alternative naming scheme for 'optn2' */
+#define MXU_OPTN2_WW 0
+#define MXU_OPTN2_LW 1
+#define MXU_OPTN2_HW 2
+#define MXU_OPTN2_XW 3
+
+/* MXU operand getting pattern 'optn3' */
+#define MXU_OPTN3_PTN0 0
+#define MXU_OPTN3_PTN1 1
+#define MXU_OPTN3_PTN2 2
+#define MXU_OPTN3_PTN3 3
+#define MXU_OPTN3_PTN4 4
+#define MXU_OPTN3_PTN5 5
+#define MXU_OPTN3_PTN6 6
+#define MXU_OPTN3_PTN7 7
+
+/* MXU registers */
+static TCGv mxu_gpr[NUMBER_OF_MXU_REGISTERS - 1];
+static TCGv mxu_CR;
+
+static const char mxuregnames[][4] = {
+ "XR1", "XR2", "XR3", "XR4", "XR5", "XR6", "XR7", "XR8",
+ "XR9", "XR10", "XR11", "XR12", "XR13", "XR14", "XR15", "XCR",
+};
+
+void mxu_translate_init(void)
+{
+ for (unsigned i = 0; i < NUMBER_OF_MXU_REGISTERS - 1; i++) {
+ mxu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.mxu_gpr[i]),
+ mxuregnames[i]);
+ }
+
+ mxu_CR = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.mxu_cr),
+ mxuregnames[NUMBER_OF_MXU_REGISTERS - 1]);
+}
+
+/* MXU General purpose registers moves. */
+static inline void gen_load_mxu_gpr(TCGv t, unsigned int reg)
+{
+ if (reg == 0) {
+ tcg_gen_movi_tl(t, 0);
+ } else if (reg <= 15) {
+ tcg_gen_mov_tl(t, mxu_gpr[reg - 1]);
+ }
+}
+
+static inline void gen_store_mxu_gpr(TCGv t, unsigned int reg)
+{
+ if (reg > 0 && reg <= 15) {
+ tcg_gen_mov_tl(mxu_gpr[reg - 1], t);
+ }
+}
+
+/* MXU control register moves. */
+static inline void gen_load_mxu_cr(TCGv t)
+{
+ tcg_gen_mov_tl(t, mxu_CR);
+}
+
+static inline void gen_store_mxu_cr(TCGv t)
+{
+ /* TODO: Add handling of RW rules for MXU_CR. */
+ tcg_gen_mov_tl(mxu_CR, t);
+}
+
+/*
+ * S32I2M XRa, rb - Register move from GRF to XRF
+ */
+static void gen_mxu_s32i2m(DisasContext *ctx)
+{
+ TCGv t0;
+ uint32_t XRa, Rb;
+
+ t0 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 5);
+ Rb = extract32(ctx->opcode, 16, 5);
+
+ gen_load_gpr(t0, Rb);
+ if (XRa <= 15) {
+ gen_store_mxu_gpr(t0, XRa);
+ } else if (XRa == 16) {
+ gen_store_mxu_cr(t0);
+ }
+
+ tcg_temp_free(t0);
+}
+
+/*
+ * S32M2I XRa, rb - Register move from XRF to GRF
+ */
+static void gen_mxu_s32m2i(DisasContext *ctx)
+{
+ TCGv t0;
+ uint32_t XRa, Rb;
+
+ t0 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 5);
+ Rb = extract32(ctx->opcode, 16, 5);
+
+ if (XRa <= 15) {
+ gen_load_mxu_gpr(t0, XRa);
+ } else if (XRa == 16) {
+ gen_load_mxu_cr(t0);
+ }
+
+ gen_store_gpr(t0, Rb);
+
+ tcg_temp_free(t0);
+}
+
+/*
+ * S8LDD XRa, Rb, s8, optn3 - Load a byte from memory to XRF
+ */
+static void gen_mxu_s8ldd(DisasContext *ctx)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, s8, optn3;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s8 = extract32(ctx->opcode, 10, 8);
+ optn3 = extract32(ctx->opcode, 18, 3);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+ tcg_gen_addi_tl(t0, t0, (int8_t)s8);
+
+ switch (optn3) {
+ /* XRa[7:0] = tmp8 */
+ case MXU_OPTN3_PTN0:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ gen_load_mxu_gpr(t0, XRa);
+ tcg_gen_deposit_tl(t0, t0, t1, 0, 8);
+ break;
+ /* XRa[15:8] = tmp8 */
+ case MXU_OPTN3_PTN1:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ gen_load_mxu_gpr(t0, XRa);
+ tcg_gen_deposit_tl(t0, t0, t1, 8, 8);
+ break;
+ /* XRa[23:16] = tmp8 */
+ case MXU_OPTN3_PTN2:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ gen_load_mxu_gpr(t0, XRa);
+ tcg_gen_deposit_tl(t0, t0, t1, 16, 8);
+ break;
+ /* XRa[31:24] = tmp8 */
+ case MXU_OPTN3_PTN3:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ gen_load_mxu_gpr(t0, XRa);
+ tcg_gen_deposit_tl(t0, t0, t1, 24, 8);
+ break;
+ /* XRa = {8'b0, tmp8, 8'b0, tmp8} */
+ case MXU_OPTN3_PTN4:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_deposit_tl(t0, t1, t1, 16, 16);
+ break;
+ /* XRa = {tmp8, 8'b0, tmp8, 8'b0} */
+ case MXU_OPTN3_PTN5:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_shli_tl(t1, t1, 8);
+ tcg_gen_deposit_tl(t0, t1, t1, 16, 16);
+ break;
+ /* XRa = {{8{sign of tmp8}}, tmp8, {8{sign of tmp8}}, tmp8} */
+ case MXU_OPTN3_PTN6:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_SB);
+ tcg_gen_mov_tl(t0, t1);
+ tcg_gen_andi_tl(t0, t0, 0xFF00FFFF);
+ tcg_gen_shli_tl(t1, t1, 16);
+ tcg_gen_or_tl(t0, t0, t1);
+ break;
+ /* XRa = {tmp8, tmp8, tmp8, tmp8} */
+ case MXU_OPTN3_PTN7:
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_deposit_tl(t1, t1, t1, 8, 8);
+ tcg_gen_deposit_tl(t0, t1, t1, 16, 16);
+ break;
+ }
+
+ gen_store_mxu_gpr(t0, XRa);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/*
+ * D16MUL XRa, XRb, XRc, XRd, optn2 - Signed 16 bit pattern multiplication
+ */
+static void gen_mxu_d16mul(DisasContext *ctx)
+{
+ TCGv t0, t1, t2, t3;
+ uint32_t XRa, XRb, XRc, XRd, optn2;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ optn2 = extract32(ctx->opcode, 22, 2);
+
+ gen_load_mxu_gpr(t1, XRb);
+ tcg_gen_sextract_tl(t0, t1, 0, 16);
+ tcg_gen_sextract_tl(t1, t1, 16, 16);
+ gen_load_mxu_gpr(t3, XRc);
+ tcg_gen_sextract_tl(t2, t3, 0, 16);
+ tcg_gen_sextract_tl(t3, t3, 16, 16);
+
+ switch (optn2) {
+ case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t1, t3);
+ tcg_gen_mul_tl(t2, t0, t2);
+ break;
+ case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t0, t3);
+ tcg_gen_mul_tl(t2, t0, t2);
+ break;
+ case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t1, t3);
+ tcg_gen_mul_tl(t2, t1, t2);
+ break;
+ case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t0, t3);
+ tcg_gen_mul_tl(t2, t1, t2);
+ break;
+ }
+ gen_store_mxu_gpr(t3, XRa);
+ gen_store_mxu_gpr(t2, XRd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+}
+
+/*
+ * D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 - Signed 16 bit pattern multiply
+ * and accumulate
+ */
+static void gen_mxu_d16mac(DisasContext *ctx)
+{
+ TCGv t0, t1, t2, t3;
+ uint32_t XRa, XRb, XRc, XRd, optn2, aptn2;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ optn2 = extract32(ctx->opcode, 22, 2);
+ aptn2 = extract32(ctx->opcode, 24, 2);
+
+ gen_load_mxu_gpr(t1, XRb);
+ tcg_gen_sextract_tl(t0, t1, 0, 16);
+ tcg_gen_sextract_tl(t1, t1, 16, 16);
+
+ gen_load_mxu_gpr(t3, XRc);
+ tcg_gen_sextract_tl(t2, t3, 0, 16);
+ tcg_gen_sextract_tl(t3, t3, 16, 16);
+
+ switch (optn2) {
+ case MXU_OPTN2_WW: /* XRB.H*XRC.H == lop, XRB.L*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t1, t3);
+ tcg_gen_mul_tl(t2, t0, t2);
+ break;
+ case MXU_OPTN2_LW: /* XRB.L*XRC.H == lop, XRB.L*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t0, t3);
+ tcg_gen_mul_tl(t2, t0, t2);
+ break;
+ case MXU_OPTN2_HW: /* XRB.H*XRC.H == lop, XRB.H*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t1, t3);
+ tcg_gen_mul_tl(t2, t1, t2);
+ break;
+ case MXU_OPTN2_XW: /* XRB.L*XRC.H == lop, XRB.H*XRC.L == rop */
+ tcg_gen_mul_tl(t3, t0, t3);
+ tcg_gen_mul_tl(t2, t1, t2);
+ break;
+ }
+ gen_load_mxu_gpr(t0, XRa);
+ gen_load_mxu_gpr(t1, XRd);
+
+ switch (aptn2) {
+ case MXU_APTN2_AA:
+ tcg_gen_add_tl(t3, t0, t3);
+ tcg_gen_add_tl(t2, t1, t2);
+ break;
+ case MXU_APTN2_AS:
+ tcg_gen_add_tl(t3, t0, t3);
+ tcg_gen_sub_tl(t2, t1, t2);
+ break;
+ case MXU_APTN2_SA:
+ tcg_gen_sub_tl(t3, t0, t3);
+ tcg_gen_add_tl(t2, t1, t2);
+ break;
+ case MXU_APTN2_SS:
+ tcg_gen_sub_tl(t3, t0, t3);
+ tcg_gen_sub_tl(t2, t1, t2);
+ break;
+ }
+ gen_store_mxu_gpr(t3, XRa);
+ gen_store_mxu_gpr(t2, XRd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+}
+
+/*
+ * Q8MUL XRa, XRb, XRc, XRd - Parallel unsigned 8 bit pattern multiply
+ * Q8MULSU XRa, XRb, XRc, XRd - Parallel signed 8 bit pattern multiply
+ */
+static void gen_mxu_q8mul_q8mulsu(DisasContext *ctx)
+{
+ TCGv t0, t1, t2, t3, t4, t5, t6, t7;
+ uint32_t XRa, XRb, XRc, XRd, sel;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ t3 = tcg_temp_new();
+ t4 = tcg_temp_new();
+ t5 = tcg_temp_new();
+ t6 = tcg_temp_new();
+ t7 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRd = extract32(ctx->opcode, 18, 4);
+ sel = extract32(ctx->opcode, 22, 2);
+
+ gen_load_mxu_gpr(t3, XRb);
+ gen_load_mxu_gpr(t7, XRc);
+
+ if (sel == 0x2) {
+ /* Q8MULSU */
+ tcg_gen_ext8s_tl(t0, t3);
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_ext8s_tl(t1, t3);
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_ext8s_tl(t2, t3);
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_ext8s_tl(t3, t3);
+ } else {
+ /* Q8MUL */
+ tcg_gen_ext8u_tl(t0, t3);
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_ext8u_tl(t1, t3);
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_ext8u_tl(t2, t3);
+ tcg_gen_shri_tl(t3, t3, 8);
+ tcg_gen_ext8u_tl(t3, t3);
+ }
+
+ tcg_gen_ext8u_tl(t4, t7);
+ tcg_gen_shri_tl(t7, t7, 8);
+ tcg_gen_ext8u_tl(t5, t7);
+ tcg_gen_shri_tl(t7, t7, 8);
+ tcg_gen_ext8u_tl(t6, t7);
+ tcg_gen_shri_tl(t7, t7, 8);
+ tcg_gen_ext8u_tl(t7, t7);
+
+ tcg_gen_mul_tl(t0, t0, t4);
+ tcg_gen_mul_tl(t1, t1, t5);
+ tcg_gen_mul_tl(t2, t2, t6);
+ tcg_gen_mul_tl(t3, t3, t7);
+
+ tcg_gen_andi_tl(t0, t0, 0xFFFF);
+ tcg_gen_andi_tl(t1, t1, 0xFFFF);
+ tcg_gen_andi_tl(t2, t2, 0xFFFF);
+ tcg_gen_andi_tl(t3, t3, 0xFFFF);
+
+ tcg_gen_shli_tl(t1, t1, 16);
+ tcg_gen_shli_tl(t3, t3, 16);
+
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_or_tl(t1, t2, t3);
+
+ gen_store_mxu_gpr(t0, XRd);
+ gen_store_mxu_gpr(t1, XRa);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ tcg_temp_free(t4);
+ tcg_temp_free(t5);
+ tcg_temp_free(t6);
+ tcg_temp_free(t7);
+}
+
+/*
+ * S32LDD XRa, Rb, S12 - Load a word from memory to XRF
+ * S32LDDR XRa, Rb, S12 - Load a word from memory to XRF, reversed byte seq.
+ */
+static void gen_mxu_s32ldd_s32lddr(DisasContext *ctx)
+{
+ TCGv t0, t1;
+ uint32_t XRa, Rb, s12, sel;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ XRa = extract32(ctx->opcode, 6, 4);
+ s12 = extract32(ctx->opcode, 10, 10);
+ sel = extract32(ctx->opcode, 20, 1);
+ Rb = extract32(ctx->opcode, 21, 5);
+
+ gen_load_gpr(t0, Rb);
+
+ tcg_gen_movi_tl(t1, s12);
+ tcg_gen_shli_tl(t1, t1, 2);
+ if (s12 & 0x200) {
+ tcg_gen_ori_tl(t1, t1, 0xFFFFF000);
+ }
+ tcg_gen_add_tl(t1, t0, t1);
+ tcg_gen_qemu_ld_tl(t1, t1, ctx->mem_idx, MO_TESL ^ (sel * MO_BSWAP));
+
+ gen_store_mxu_gpr(t1, XRa);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+
+/*
+ * MXU instruction category: logic
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * S32NOR S32AND S32OR S32XOR
+ */
+
+/*
+ * S32NOR XRa, XRb, XRc
+ * Update XRa with the result of logical bitwise 'nor' operation
+ * applied to the content of XRb and XRc.
+ */
+static void gen_mxu_S32NOR(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to all 1s */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0xFFFFFFFF);
+ } else if (unlikely(XRb == 0)) {
+ /* XRb zero register -> just set destination to the negation of XRc */
+ tcg_gen_not_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
+ } else if (unlikely(XRc == 0)) {
+ /* XRa zero register -> just set destination to the negation of XRb */
+ tcg_gen_not_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to the negation of XRb */
+ tcg_gen_not_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ tcg_gen_nor_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1], mxu_gpr[XRc - 1]);
+ }
+}
+
+/*
+ * S32AND XRa, XRb, XRc
+ * Update XRa with the result of logical bitwise 'and' operation
+ * applied to the content of XRb and XRc.
+ */
+static void gen_mxu_S32AND(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) || (XRc == 0))) {
+ /* one of operands zero register -> just set destination to all 0s */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to one of them */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ tcg_gen_and_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1], mxu_gpr[XRc - 1]);
+ }
+}
+
+/*
+ * S32OR XRa, XRb, XRc
+ * Update XRa with the result of logical bitwise 'or' operation
+ * applied to the content of XRb and XRc.
+ */
+static void gen_mxu_S32OR(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to all 0s */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == 0)) {
+ /* XRb zero register -> just set destination to the content of XRc */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
+ } else if (unlikely(XRc == 0)) {
+ /* XRc zero register -> just set destination to the content of XRb */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to one of them */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1], mxu_gpr[XRc - 1]);
+ }
+}
+
+/*
+ * S32XOR XRa, XRb, XRc
+ * Update XRa with the result of logical bitwise 'xor' operation
+ * applied to the content of XRb and XRc.
+ */
+static void gen_mxu_S32XOR(DisasContext *ctx)
+{
+ uint32_t pad, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to all 0s */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == 0)) {
+ /* XRb zero register -> just set destination to the content of XRc */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
+ } else if (unlikely(XRc == 0)) {
+ /* XRc zero register -> just set destination to the content of XRb */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to all 0s */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else {
+ /* the most general case */
+ tcg_gen_xor_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1], mxu_gpr[XRc - 1]);
+ }
+}
+
+
+/*
+ * MXU instruction category max/min
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * S32MAX D16MAX Q8MAX
+ * S32MIN D16MIN Q8MIN
+ */
+
+/*
+ * S32MAX XRa, XRb, XRc
+ * Update XRa with the maximum of signed 32-bit integers contained
+ * in XRb and XRc.
+ *
+ * S32MIN XRa, XRb, XRc
+ * Update XRa with the minimum of signed 32-bit integers contained
+ * in XRb and XRc.
+ */
+static void gen_mxu_S32MAX_S32MIN(DisasContext *ctx)
+{
+ uint32_t pad, opc, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ opc = extract32(ctx->opcode, 18, 3);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely((XRb == 0) || (XRc == 0))) {
+ /* exactly one operand is zero register - find which one is not...*/
+ uint32_t XRx = XRb ? XRb : XRc;
+ /* ...and do max/min operation with one operand 0 */
+ if (opc == OPC_MXU_S32MAX) {
+ tcg_gen_smax_i32(mxu_gpr[XRa - 1], mxu_gpr[XRx - 1], 0);
+ } else {
+ tcg_gen_smin_i32(mxu_gpr[XRa - 1], mxu_gpr[XRx - 1], 0);
+ }
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to one of them */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ if (opc == OPC_MXU_S32MAX) {
+ tcg_gen_smax_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1],
+ mxu_gpr[XRc - 1]);
+ } else {
+ tcg_gen_smin_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1],
+ mxu_gpr[XRc - 1]);
+ }
+ }
+}
+
+/*
+ * D16MAX
+ * Update XRa with the 16-bit-wise maximums of signed integers
+ * contained in XRb and XRc.
+ *
+ * D16MIN
+ * Update XRa with the 16-bit-wise minimums of signed integers
+ * contained in XRb and XRc.
+ */
+static void gen_mxu_D16MAX_D16MIN(DisasContext *ctx)
+{
+ uint32_t pad, opc, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ opc = extract32(ctx->opcode, 18, 3);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely((XRb == 0) || (XRc == 0))) {
+ /* exactly one operand is zero register - find which one is not...*/
+ uint32_t XRx = XRb ? XRb : XRc;
+ /* ...and do half-word-wise max/min with one operand 0 */
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(0);
+
+ /* the left half-word first */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFFFF0000);
+ if (opc == OPC_MXU_D16MAX) {
+ tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ } else {
+ tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+
+ /* the right half-word */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0x0000FFFF);
+ /* move half-words to the leftmost position */
+ tcg_gen_shli_i32(t0, t0, 16);
+ /* t0 will be max/min of t0 and t1 */
+ if (opc == OPC_MXU_D16MAX) {
+ tcg_gen_smax_i32(t0, t0, t1);
+ } else {
+ tcg_gen_smin_i32(t0, t0, t1);
+ }
+ /* return resulting half-words to its original position */
+ tcg_gen_shri_i32(t0, t0, 16);
+ /* finally update the destination */
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to one of them */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+
+ /* the left half-word first */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0xFFFF0000);
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFF0000);
+ if (opc == OPC_MXU_D16MAX) {
+ tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ } else {
+ tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+
+ /* the right half-word */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x0000FFFF);
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0x0000FFFF);
+ /* move half-words to the leftmost position */
+ tcg_gen_shli_i32(t0, t0, 16);
+ tcg_gen_shli_i32(t1, t1, 16);
+ /* t0 will be max/min of t0 and t1 */
+ if (opc == OPC_MXU_D16MAX) {
+ tcg_gen_smax_i32(t0, t0, t1);
+ } else {
+ tcg_gen_smin_i32(t0, t0, t1);
+ }
+ /* return resulting half-words to its original position */
+ tcg_gen_shri_i32(t0, t0, 16);
+ /* finally update the destination */
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+}
+
+/*
+ * Q8MAX
+ * Update XRa with the 8-bit-wise maximums of signed integers
+ * contained in XRb and XRc.
+ *
+ * Q8MIN
+ * Update XRa with the 8-bit-wise minimums of signed integers
+ * contained in XRb and XRc.
+ */
+static void gen_mxu_Q8MAX_Q8MIN(DisasContext *ctx)
+{
+ uint32_t pad, opc, XRc, XRb, XRa;
+
+ pad = extract32(ctx->opcode, 21, 5);
+ opc = extract32(ctx->opcode, 18, 3);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to zero */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely((XRb == 0) || (XRc == 0))) {
+ /* exactly one operand is zero register - make it be the first...*/
+ uint32_t XRx = XRb ? XRb : XRc;
+ /* ...and do byte-wise max/min with one operand 0 */
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(0);
+ int32_t i;
+
+ /* the leftmost byte (byte 3) first */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFF000000);
+ if (opc == OPC_MXU_Q8MAX) {
+ tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ } else {
+ tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+
+ /* bytes 2, 1, 0 */
+ for (i = 2; i >= 0; i--) {
+ /* extract the byte */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRx - 1], 0xFF << (8 * i));
+ /* move the byte to the leftmost position */
+ tcg_gen_shli_i32(t0, t0, 8 * (3 - i));
+ /* t0 will be max/min of t0 and t1 */
+ if (opc == OPC_MXU_Q8MAX) {
+ tcg_gen_smax_i32(t0, t0, t1);
+ } else {
+ tcg_gen_smin_i32(t0, t0, t1);
+ }
+ /* return resulting byte to its original position */
+ tcg_gen_shri_i32(t0, t0, 8 * (3 - i));
+ /* finally update the destination */
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ }
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just set destination to one of them */
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ } else {
+ /* the most general case */
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+ int32_t i;
+
+ /* the leftmost bytes (bytes 3) first */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0xFF000000);
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFF000000);
+ if (opc == OPC_MXU_Q8MAX) {
+ tcg_gen_smax_i32(mxu_gpr[XRa - 1], t0, t1);
+ } else {
+ tcg_gen_smin_i32(mxu_gpr[XRa - 1], t0, t1);
+ }
+
+ /* bytes 2, 1, 0 */
+ for (i = 2; i >= 0; i--) {
+ /* extract corresponding bytes */
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0xFF << (8 * i));
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFF << (8 * i));
+ /* move the bytes to the leftmost position */
+ tcg_gen_shli_i32(t0, t0, 8 * (3 - i));
+ tcg_gen_shli_i32(t1, t1, 8 * (3 - i));
+ /* t0 will be max/min of t0 and t1 */
+ if (opc == OPC_MXU_Q8MAX) {
+ tcg_gen_smax_i32(t0, t0, t1);
+ } else {
+ tcg_gen_smin_i32(t0, t0, t1);
+ }
+ /* return resulting byte to its original position */
+ tcg_gen_shri_i32(t0, t0, 8 * (3 - i));
+ /* finally update the destination */
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], mxu_gpr[XRa - 1], t0);
+ }
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+}
+
+
+/*
+ * MXU instruction category: align
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * S32ALN S32ALNI
+ */
+
+/*
+ * S32ALNI XRc, XRb, XRa, optn3
+ * Arrange bytes from XRb and XRc according to one of five sets of
+ * rules determined by optn3, and place the result in XRa.
+ */
+static void gen_mxu_S32ALNI(DisasContext *ctx)
+{
+ uint32_t optn3, pad, XRc, XRb, XRa;
+
+ optn3 = extract32(ctx->opcode, 23, 3);
+ pad = extract32(ctx->opcode, 21, 2);
+ XRc = extract32(ctx->opcode, 14, 4);
+ XRb = extract32(ctx->opcode, 10, 4);
+ XRa = extract32(ctx->opcode, 6, 4);
+
+ if (unlikely(pad != 0)) {
+ /* opcode padding incorrect -> do nothing */
+ } else if (unlikely(XRa == 0)) {
+ /* destination is zero register -> do nothing */
+ } else if (unlikely((XRb == 0) && (XRc == 0))) {
+ /* both operands zero registers -> just set destination to all 0s */
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ } else if (unlikely(XRb == 0)) {
+ /* XRb zero register -> just appropriatelly shift XRc into XRa */
+ switch (optn3) {
+ case MXU_OPTN3_PTN0:
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ break;
+ case MXU_OPTN3_PTN1:
+ case MXU_OPTN3_PTN2:
+ case MXU_OPTN3_PTN3:
+ tcg_gen_shri_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1],
+ 8 * (4 - optn3));
+ break;
+ case MXU_OPTN3_PTN4:
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
+ break;
+ }
+ } else if (unlikely(XRc == 0)) {
+ /* XRc zero register -> just appropriatelly shift XRb into XRa */
+ switch (optn3) {
+ case MXU_OPTN3_PTN0:
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ break;
+ case MXU_OPTN3_PTN1:
+ case MXU_OPTN3_PTN2:
+ case MXU_OPTN3_PTN3:
+ tcg_gen_shri_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1], 8 * optn3);
+ break;
+ case MXU_OPTN3_PTN4:
+ tcg_gen_movi_i32(mxu_gpr[XRa - 1], 0);
+ break;
+ }
+ } else if (unlikely(XRb == XRc)) {
+ /* both operands same -> just rotation or moving from any of them */
+ switch (optn3) {
+ case MXU_OPTN3_PTN0:
+ case MXU_OPTN3_PTN4:
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ break;
+ case MXU_OPTN3_PTN1:
+ case MXU_OPTN3_PTN2:
+ case MXU_OPTN3_PTN3:
+ tcg_gen_rotli_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1], 8 * optn3);
+ break;
+ }
+ } else {
+ /* the most general case */
+ switch (optn3) {
+ case MXU_OPTN3_PTN0:
+ {
+ /* */
+ /* XRb XRc */
+ /* +---------------+ */
+ /* | A B C D | E F G H */
+ /* +-------+-------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRb - 1]);
+ }
+ break;
+ case MXU_OPTN3_PTN1:
+ {
+ /* */
+ /* XRb XRc */
+ /* +-------------------+ */
+ /* A | B C D E | F G H */
+ /* +---------+---------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x00FFFFFF);
+ tcg_gen_shli_i32(t0, t0, 8);
+
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFF000000);
+ tcg_gen_shri_i32(t1, t1, 24);
+
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ break;
+ case MXU_OPTN3_PTN2:
+ {
+ /* */
+ /* XRb XRc */
+ /* +-------------------+ */
+ /* A B | C D E F | G H */
+ /* +---------+---------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x0000FFFF);
+ tcg_gen_shli_i32(t0, t0, 16);
+
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFF0000);
+ tcg_gen_shri_i32(t1, t1, 16);
+
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ break;
+ case MXU_OPTN3_PTN3:
+ {
+ /* */
+ /* XRb XRc */
+ /* +-------------------+ */
+ /* A B C | D E F G | H */
+ /* +---------+---------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ TCGv_i32 t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, mxu_gpr[XRb - 1], 0x000000FF);
+ tcg_gen_shli_i32(t0, t0, 24);
+
+ tcg_gen_andi_i32(t1, mxu_gpr[XRc - 1], 0xFFFFFF00);
+ tcg_gen_shri_i32(t1, t1, 8);
+
+ tcg_gen_or_i32(mxu_gpr[XRa - 1], t0, t1);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ break;
+ case MXU_OPTN3_PTN4:
+ {
+ /* */
+ /* XRb XRc */
+ /* +---------------+ */
+ /* A B C D | E F G H | */
+ /* +-------+-------+ */
+ /* | */
+ /* XRa */
+ /* */
+
+ tcg_gen_mov_i32(mxu_gpr[XRa - 1], mxu_gpr[XRc - 1]);
+ }
+ break;
+ }
+ }
+}
+
+
+/*
+ * Decoding engine for MXU
+ * =======================
+ */
+
+static void decode_opc_mxu__pool00(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_S32MAX:
+ case OPC_MXU_S32MIN:
+ gen_mxu_S32MAX_S32MIN(ctx);
+ break;
+ case OPC_MXU_D16MAX:
+ case OPC_MXU_D16MIN:
+ gen_mxu_D16MAX_D16MIN(ctx);
+ break;
+ case OPC_MXU_Q8MAX:
+ case OPC_MXU_Q8MIN:
+ gen_mxu_Q8MAX_Q8MIN(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool04(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 20, 1);
+
+ switch (opcode) {
+ case OPC_MXU_S32LDD:
+ case OPC_MXU_S32LDDR:
+ gen_mxu_s32ldd_s32lddr(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool16(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 18, 3);
+
+ switch (opcode) {
+ case OPC_MXU_S32ALNI:
+ gen_mxu_S32ALNI(ctx);
+ break;
+ case OPC_MXU_S32NOR:
+ gen_mxu_S32NOR(ctx);
+ break;
+ case OPC_MXU_S32AND:
+ gen_mxu_S32AND(ctx);
+ break;
+ case OPC_MXU_S32OR:
+ gen_mxu_S32OR(ctx);
+ break;
+ case OPC_MXU_S32XOR:
+ gen_mxu_S32XOR(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_mxu__pool19(DisasContext *ctx)
+{
+ uint32_t opcode = extract32(ctx->opcode, 22, 2);
+
+ switch (opcode) {
+ case OPC_MXU_Q8MUL:
+ case OPC_MXU_Q8MULSU:
+ gen_mxu_q8mul_q8mulsu(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+bool decode_ase_mxu(DisasContext *ctx, uint32_t insn)
+{
+ uint32_t opcode = extract32(insn, 0, 6);
+
+ if (opcode == OPC_MXU_S32M2I) {
+ gen_mxu_s32m2i(ctx);
+ return true;
+ }
+
+ if (opcode == OPC_MXU_S32I2M) {
+ gen_mxu_s32i2m(ctx);
+ return true;
+ }
+
+ {
+ TCGv t_mxu_cr = tcg_temp_new();
+ TCGLabel *l_exit = gen_new_label();
+
+ gen_load_mxu_cr(t_mxu_cr);
+ tcg_gen_andi_tl(t_mxu_cr, t_mxu_cr, MXU_CR_MXU_EN);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t_mxu_cr, MXU_CR_MXU_EN, l_exit);
+
+ switch (opcode) {
+ case OPC_MXU__POOL00:
+ decode_opc_mxu__pool00(ctx);
+ break;
+ case OPC_MXU_D16MUL:
+ gen_mxu_d16mul(ctx);
+ break;
+ case OPC_MXU_D16MAC:
+ gen_mxu_d16mac(ctx);
+ break;
+ case OPC_MXU__POOL04:
+ decode_opc_mxu__pool04(ctx);
+ break;
+ case OPC_MXU_S8LDD:
+ gen_mxu_s8ldd(ctx);
+ break;
+ case OPC_MXU__POOL16:
+ decode_opc_mxu__pool16(ctx);
+ break;
+ case OPC_MXU__POOL19:
+ decode_opc_mxu__pool19(ctx);
+ break;
+ default:
+ MIPS_INVAL("decode_opc_mxu");
+ gen_reserved_instruction(ctx);
+ }
+
+ gen_set_label(l_exit);
+ tcg_temp_free(t_mxu_cr);
+ }
+
+ return true;
+}
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
new file mode 100644
index 000000000..2c022a49f
--- /dev/null
+++ b/target/mips/tcg/nanomips_translate.c.inc
@@ -0,0 +1,4928 @@
+/*
+ * MIPS emulation for QEMU - nanoMIPS translation routines
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+/* MAJOR, P16, and P32 pools opcodes */
+enum {
+ NM_P_ADDIU = 0x00,
+ NM_ADDIUPC = 0x01,
+ NM_MOVE_BALC = 0x02,
+ NM_P16_MV = 0x04,
+ NM_LW16 = 0x05,
+ NM_BC16 = 0x06,
+ NM_P16_SR = 0x07,
+
+ NM_POOL32A = 0x08,
+ NM_P_BAL = 0x0a,
+ NM_P16_SHIFT = 0x0c,
+ NM_LWSP16 = 0x0d,
+ NM_BALC16 = 0x0e,
+ NM_P16_4X4 = 0x0f,
+
+ NM_P_GP_W = 0x10,
+ NM_P_GP_BH = 0x11,
+ NM_P_J = 0x12,
+ NM_P16C = 0x14,
+ NM_LWGP16 = 0x15,
+ NM_P16_LB = 0x17,
+
+ NM_P48I = 0x18,
+ NM_P16_A1 = 0x1c,
+ NM_LW4X4 = 0x1d,
+ NM_P16_LH = 0x1f,
+
+ NM_P_U12 = 0x20,
+ NM_P_LS_U12 = 0x21,
+ NM_P_BR1 = 0x22,
+ NM_P16_A2 = 0x24,
+ NM_SW16 = 0x25,
+ NM_BEQZC16 = 0x26,
+
+ NM_POOL32F = 0x28,
+ NM_P_LS_S9 = 0x29,
+ NM_P_BR2 = 0x2a,
+
+ NM_P16_ADDU = 0x2c,
+ NM_SWSP16 = 0x2d,
+ NM_BNEZC16 = 0x2e,
+ NM_MOVEP = 0x2f,
+
+ NM_POOL32S = 0x30,
+ NM_P_BRI = 0x32,
+ NM_LI16 = 0x34,
+ NM_SWGP16 = 0x35,
+ NM_P16_BR = 0x36,
+
+ NM_P_LUI = 0x38,
+ NM_ANDI16 = 0x3c,
+ NM_SW4X4 = 0x3d,
+ NM_MOVEPREV = 0x3f,
+};
+
+/* POOL32A instruction pool */
+enum {
+ NM_POOL32A0 = 0x00,
+ NM_SPECIAL2 = 0x01,
+ NM_COP2_1 = 0x02,
+ NM_UDI = 0x03,
+ NM_POOL32A5 = 0x05,
+ NM_POOL32A7 = 0x07,
+};
+
+/* P.GP.W instruction pool */
+enum {
+ NM_ADDIUGP_W = 0x00,
+ NM_LWGP = 0x02,
+ NM_SWGP = 0x03,
+};
+
+/* P48I instruction pool */
+enum {
+ NM_LI48 = 0x00,
+ NM_ADDIU48 = 0x01,
+ NM_ADDIUGP48 = 0x02,
+ NM_ADDIUPC48 = 0x03,
+ NM_LWPC48 = 0x0b,
+ NM_SWPC48 = 0x0f,
+};
+
+/* P.U12 instruction pool */
+enum {
+ NM_ORI = 0x00,
+ NM_XORI = 0x01,
+ NM_ANDI = 0x02,
+ NM_P_SR = 0x03,
+ NM_SLTI = 0x04,
+ NM_SLTIU = 0x05,
+ NM_SEQI = 0x06,
+ NM_ADDIUNEG = 0x08,
+ NM_P_SHIFT = 0x0c,
+ NM_P_ROTX = 0x0d,
+ NM_P_INS = 0x0e,
+ NM_P_EXT = 0x0f,
+};
+
+/* POOL32F instruction pool */
+enum {
+ NM_POOL32F_0 = 0x00,
+ NM_POOL32F_3 = 0x03,
+ NM_POOL32F_5 = 0x05,
+};
+
+/* POOL32S instruction pool */
+enum {
+ NM_POOL32S_0 = 0x00,
+ NM_POOL32S_4 = 0x04,
+};
+
+/* P.LUI instruction pool */
+enum {
+ NM_LUI = 0x00,
+ NM_ALUIPC = 0x01,
+};
+
+/* P.GP.BH instruction pool */
+enum {
+ NM_LBGP = 0x00,
+ NM_SBGP = 0x01,
+ NM_LBUGP = 0x02,
+ NM_ADDIUGP_B = 0x03,
+ NM_P_GP_LH = 0x04,
+ NM_P_GP_SH = 0x05,
+ NM_P_GP_CP1 = 0x06,
+};
+
+/* P.LS.U12 instruction pool */
+enum {
+ NM_LB = 0x00,
+ NM_SB = 0x01,
+ NM_LBU = 0x02,
+ NM_P_PREFU12 = 0x03,
+ NM_LH = 0x04,
+ NM_SH = 0x05,
+ NM_LHU = 0x06,
+ NM_LWU = 0x07,
+ NM_LW = 0x08,
+ NM_SW = 0x09,
+ NM_LWC1 = 0x0a,
+ NM_SWC1 = 0x0b,
+ NM_LDC1 = 0x0e,
+ NM_SDC1 = 0x0f,
+};
+
+/* P.LS.S9 instruction pool */
+enum {
+ NM_P_LS_S0 = 0x00,
+ NM_P_LS_S1 = 0x01,
+ NM_P_LS_E0 = 0x02,
+ NM_P_LS_WM = 0x04,
+ NM_P_LS_UAWM = 0x05,
+};
+
+/* P.BAL instruction pool */
+enum {
+ NM_BC = 0x00,
+ NM_BALC = 0x01,
+};
+
+/* P.J instruction pool */
+enum {
+ NM_JALRC = 0x00,
+ NM_JALRC_HB = 0x01,
+ NM_P_BALRSC = 0x08,
+};
+
+/* P.BR1 instruction pool */
+enum {
+ NM_BEQC = 0x00,
+ NM_P_BR3A = 0x01,
+ NM_BGEC = 0x02,
+ NM_BGEUC = 0x03,
+};
+
+/* P.BR2 instruction pool */
+enum {
+ NM_BNEC = 0x00,
+ NM_BLTC = 0x02,
+ NM_BLTUC = 0x03,
+};
+
+/* P.BRI instruction pool */
+enum {
+ NM_BEQIC = 0x00,
+ NM_BBEQZC = 0x01,
+ NM_BGEIC = 0x02,
+ NM_BGEIUC = 0x03,
+ NM_BNEIC = 0x04,
+ NM_BBNEZC = 0x05,
+ NM_BLTIC = 0x06,
+ NM_BLTIUC = 0x07,
+};
+
+/* P16.SHIFT instruction pool */
+enum {
+ NM_SLL16 = 0x00,
+ NM_SRL16 = 0x01,
+};
+
+/* POOL16C instruction pool */
+enum {
+ NM_POOL16C_0 = 0x00,
+ NM_LWXS16 = 0x01,
+};
+
+/* P16.A1 instruction pool */
+enum {
+ NM_ADDIUR1SP = 0x01,
+};
+
+/* P16.A2 instruction pool */
+enum {
+ NM_ADDIUR2 = 0x00,
+ NM_P_ADDIURS5 = 0x01,
+};
+
+/* P16.ADDU instruction pool */
+enum {
+ NM_ADDU16 = 0x00,
+ NM_SUBU16 = 0x01,
+};
+
+/* P16.SR instruction pool */
+enum {
+ NM_SAVE16 = 0x00,
+ NM_RESTORE_JRC16 = 0x01,
+};
+
+/* P16.4X4 instruction pool */
+enum {
+ NM_ADDU4X4 = 0x00,
+ NM_MUL4X4 = 0x01,
+};
+
+/* P16.LB instruction pool */
+enum {
+ NM_LB16 = 0x00,
+ NM_SB16 = 0x01,
+ NM_LBU16 = 0x02,
+};
+
+/* P16.LH instruction pool */
+enum {
+ NM_LH16 = 0x00,
+ NM_SH16 = 0x01,
+ NM_LHU16 = 0x02,
+};
+
+/* P.RI instruction pool */
+enum {
+ NM_SIGRIE = 0x00,
+ NM_P_SYSCALL = 0x01,
+ NM_BREAK = 0x02,
+ NM_SDBBP = 0x03,
+};
+
+/* POOL32A0 instruction pool */
+enum {
+ NM_P_TRAP = 0x00,
+ NM_SEB = 0x01,
+ NM_SLLV = 0x02,
+ NM_MUL = 0x03,
+ NM_MFC0 = 0x06,
+ NM_MFHC0 = 0x07,
+ NM_SEH = 0x09,
+ NM_SRLV = 0x0a,
+ NM_MUH = 0x0b,
+ NM_MTC0 = 0x0e,
+ NM_MTHC0 = 0x0f,
+ NM_SRAV = 0x12,
+ NM_MULU = 0x13,
+ NM_ROTRV = 0x1a,
+ NM_MUHU = 0x1b,
+ NM_ADD = 0x22,
+ NM_DIV = 0x23,
+ NM_ADDU = 0x2a,
+ NM_MOD = 0x2b,
+ NM_SUB = 0x32,
+ NM_DIVU = 0x33,
+ NM_RDHWR = 0x38,
+ NM_SUBU = 0x3a,
+ NM_MODU = 0x3b,
+ NM_P_CMOVE = 0x42,
+ NM_FORK = 0x45,
+ NM_MFTR = 0x46,
+ NM_MFHTR = 0x47,
+ NM_AND = 0x4a,
+ NM_YIELD = 0x4d,
+ NM_MTTR = 0x4e,
+ NM_MTHTR = 0x4f,
+ NM_OR = 0x52,
+ NM_D_E_MT_VPE = 0x56,
+ NM_NOR = 0x5a,
+ NM_XOR = 0x62,
+ NM_SLT = 0x6a,
+ NM_P_SLTU = 0x72,
+ NM_SOV = 0x7a,
+};
+
+/* CRC32 instruction pool */
+enum {
+ NM_CRC32B = 0x00,
+ NM_CRC32H = 0x01,
+ NM_CRC32W = 0x02,
+ NM_CRC32CB = 0x04,
+ NM_CRC32CH = 0x05,
+ NM_CRC32CW = 0x06,
+};
+
+/* POOL32A5 instruction pool */
+enum {
+ NM_CMP_EQ_PH = 0x00,
+ NM_CMP_LT_PH = 0x08,
+ NM_CMP_LE_PH = 0x10,
+ NM_CMPGU_EQ_QB = 0x18,
+ NM_CMPGU_LT_QB = 0x20,
+ NM_CMPGU_LE_QB = 0x28,
+ NM_CMPGDU_EQ_QB = 0x30,
+ NM_CMPGDU_LT_QB = 0x38,
+ NM_CMPGDU_LE_QB = 0x40,
+ NM_CMPU_EQ_QB = 0x48,
+ NM_CMPU_LT_QB = 0x50,
+ NM_CMPU_LE_QB = 0x58,
+ NM_ADDQ_S_W = 0x60,
+ NM_SUBQ_S_W = 0x68,
+ NM_ADDSC = 0x70,
+ NM_ADDWC = 0x78,
+
+ NM_ADDQ_S_PH = 0x01,
+ NM_ADDQH_R_PH = 0x09,
+ NM_ADDQH_R_W = 0x11,
+ NM_ADDU_S_QB = 0x19,
+ NM_ADDU_S_PH = 0x21,
+ NM_ADDUH_R_QB = 0x29,
+ NM_SHRAV_R_PH = 0x31,
+ NM_SHRAV_R_QB = 0x39,
+ NM_SUBQ_S_PH = 0x41,
+ NM_SUBQH_R_PH = 0x49,
+ NM_SUBQH_R_W = 0x51,
+ NM_SUBU_S_QB = 0x59,
+ NM_SUBU_S_PH = 0x61,
+ NM_SUBUH_R_QB = 0x69,
+ NM_SHLLV_S_PH = 0x71,
+ NM_PRECR_SRA_R_PH_W = 0x79,
+
+ NM_MULEU_S_PH_QBL = 0x12,
+ NM_MULEU_S_PH_QBR = 0x1a,
+ NM_MULQ_RS_PH = 0x22,
+ NM_MULQ_S_PH = 0x2a,
+ NM_MULQ_RS_W = 0x32,
+ NM_MULQ_S_W = 0x3a,
+ NM_APPEND = 0x42,
+ NM_MODSUB = 0x52,
+ NM_SHRAV_R_W = 0x5a,
+ NM_SHRLV_PH = 0x62,
+ NM_SHRLV_QB = 0x6a,
+ NM_SHLLV_QB = 0x72,
+ NM_SHLLV_S_W = 0x7a,
+
+ NM_SHILO = 0x03,
+
+ NM_MULEQ_S_W_PHL = 0x04,
+ NM_MULEQ_S_W_PHR = 0x0c,
+
+ NM_MUL_S_PH = 0x05,
+ NM_PRECR_QB_PH = 0x0d,
+ NM_PRECRQ_QB_PH = 0x15,
+ NM_PRECRQ_PH_W = 0x1d,
+ NM_PRECRQ_RS_PH_W = 0x25,
+ NM_PRECRQU_S_QB_PH = 0x2d,
+ NM_PACKRL_PH = 0x35,
+ NM_PICK_QB = 0x3d,
+ NM_PICK_PH = 0x45,
+
+ NM_SHRA_R_W = 0x5e,
+ NM_SHRA_R_PH = 0x66,
+ NM_SHLL_S_PH = 0x76,
+ NM_SHLL_S_W = 0x7e,
+
+ NM_REPL_PH = 0x07
+};
+
+/* POOL32A7 instruction pool */
+enum {
+ NM_P_LSX = 0x00,
+ NM_LSA = 0x01,
+ NM_EXTW = 0x03,
+ NM_POOL32AXF = 0x07,
+};
+
+/* P.SR instruction pool */
+enum {
+ NM_PP_SR = 0x00,
+ NM_P_SR_F = 0x01,
+};
+
+/* P.SHIFT instruction pool */
+enum {
+ NM_P_SLL = 0x00,
+ NM_SRL = 0x02,
+ NM_SRA = 0x04,
+ NM_ROTR = 0x06,
+};
+
+/* P.ROTX instruction pool */
+enum {
+ NM_ROTX = 0x00,
+};
+
+/* P.INS instruction pool */
+enum {
+ NM_INS = 0x00,
+};
+
+/* P.EXT instruction pool */
+enum {
+ NM_EXT = 0x00,
+};
+
+/* POOL32F_0 (fmt) instruction pool */
+enum {
+ NM_RINT_S = 0x04,
+ NM_RINT_D = 0x44,
+ NM_ADD_S = 0x06,
+ NM_SELEQZ_S = 0x07,
+ NM_SELEQZ_D = 0x47,
+ NM_CLASS_S = 0x0c,
+ NM_CLASS_D = 0x4c,
+ NM_SUB_S = 0x0e,
+ NM_SELNEZ_S = 0x0f,
+ NM_SELNEZ_D = 0x4f,
+ NM_MUL_S = 0x16,
+ NM_SEL_S = 0x17,
+ NM_SEL_D = 0x57,
+ NM_DIV_S = 0x1e,
+ NM_ADD_D = 0x26,
+ NM_SUB_D = 0x2e,
+ NM_MUL_D = 0x36,
+ NM_MADDF_S = 0x37,
+ NM_MADDF_D = 0x77,
+ NM_DIV_D = 0x3e,
+ NM_MSUBF_S = 0x3f,
+ NM_MSUBF_D = 0x7f,
+};
+
+/* POOL32F_3 instruction pool */
+enum {
+ NM_MIN_FMT = 0x00,
+ NM_MAX_FMT = 0x01,
+ NM_MINA_FMT = 0x04,
+ NM_MAXA_FMT = 0x05,
+ NM_POOL32FXF = 0x07,
+};
+
+/* POOL32F_5 instruction pool */
+enum {
+ NM_CMP_CONDN_S = 0x00,
+ NM_CMP_CONDN_D = 0x02,
+};
+
+/* P.GP.LH instruction pool */
+enum {
+ NM_LHGP = 0x00,
+ NM_LHUGP = 0x01,
+};
+
+/* P.GP.SH instruction pool */
+enum {
+ NM_SHGP = 0x00,
+};
+
+/* P.GP.CP1 instruction pool */
+enum {
+ NM_LWC1GP = 0x00,
+ NM_SWC1GP = 0x01,
+ NM_LDC1GP = 0x02,
+ NM_SDC1GP = 0x03,
+};
+
+/* P.LS.S0 instruction pool */
+enum {
+ NM_LBS9 = 0x00,
+ NM_LHS9 = 0x04,
+ NM_LWS9 = 0x08,
+ NM_LDS9 = 0x0c,
+
+ NM_SBS9 = 0x01,
+ NM_SHS9 = 0x05,
+ NM_SWS9 = 0x09,
+ NM_SDS9 = 0x0d,
+
+ NM_LBUS9 = 0x02,
+ NM_LHUS9 = 0x06,
+ NM_LWC1S9 = 0x0a,
+ NM_LDC1S9 = 0x0e,
+
+ NM_P_PREFS9 = 0x03,
+ NM_LWUS9 = 0x07,
+ NM_SWC1S9 = 0x0b,
+ NM_SDC1S9 = 0x0f,
+};
+
+/* P.LS.S1 instruction pool */
+enum {
+ NM_ASET_ACLR = 0x02,
+ NM_UALH = 0x04,
+ NM_UASH = 0x05,
+ NM_CACHE = 0x07,
+ NM_P_LL = 0x0a,
+ NM_P_SC = 0x0b,
+};
+
+/* P.LS.E0 instruction pool */
+enum {
+ NM_LBE = 0x00,
+ NM_SBE = 0x01,
+ NM_LBUE = 0x02,
+ NM_P_PREFE = 0x03,
+ NM_LHE = 0x04,
+ NM_SHE = 0x05,
+ NM_LHUE = 0x06,
+ NM_CACHEE = 0x07,
+ NM_LWE = 0x08,
+ NM_SWE = 0x09,
+ NM_P_LLE = 0x0a,
+ NM_P_SCE = 0x0b,
+};
+
+/* P.PREFE instruction pool */
+enum {
+ NM_SYNCIE = 0x00,
+ NM_PREFE = 0x01,
+};
+
+/* P.LLE instruction pool */
+enum {
+ NM_LLE = 0x00,
+ NM_LLWPE = 0x01,
+};
+
+/* P.SCE instruction pool */
+enum {
+ NM_SCE = 0x00,
+ NM_SCWPE = 0x01,
+};
+
+/* P.LS.WM instruction pool */
+enum {
+ NM_LWM = 0x00,
+ NM_SWM = 0x01,
+};
+
+/* P.LS.UAWM instruction pool */
+enum {
+ NM_UALWM = 0x00,
+ NM_UASWM = 0x01,
+};
+
+/* P.BR3A instruction pool */
+enum {
+ NM_BC1EQZC = 0x00,
+ NM_BC1NEZC = 0x01,
+ NM_BC2EQZC = 0x02,
+ NM_BC2NEZC = 0x03,
+ NM_BPOSGE32C = 0x04,
+};
+
+/* P16.RI instruction pool */
+enum {
+ NM_P16_SYSCALL = 0x01,
+ NM_BREAK16 = 0x02,
+ NM_SDBBP16 = 0x03,
+};
+
+/* POOL16C_0 instruction pool */
+enum {
+ NM_POOL16C_00 = 0x00,
+};
+
+/* P16.JRC instruction pool */
+enum {
+ NM_JRC = 0x00,
+ NM_JALRC16 = 0x01,
+};
+
+/* P.SYSCALL instruction pool */
+enum {
+ NM_SYSCALL = 0x00,
+ NM_HYPCALL = 0x01,
+};
+
+/* P.TRAP instruction pool */
+enum {
+ NM_TEQ = 0x00,
+ NM_TNE = 0x01,
+};
+
+/* P.CMOVE instruction pool */
+enum {
+ NM_MOVZ = 0x00,
+ NM_MOVN = 0x01,
+};
+
+/* POOL32Axf instruction pool */
+enum {
+ NM_POOL32AXF_1 = 0x01,
+ NM_POOL32AXF_2 = 0x02,
+ NM_POOL32AXF_4 = 0x04,
+ NM_POOL32AXF_5 = 0x05,
+ NM_POOL32AXF_7 = 0x07,
+};
+
+/* POOL32Axf_1 instruction pool */
+enum {
+ NM_POOL32AXF_1_0 = 0x00,
+ NM_POOL32AXF_1_1 = 0x01,
+ NM_POOL32AXF_1_3 = 0x03,
+ NM_POOL32AXF_1_4 = 0x04,
+ NM_POOL32AXF_1_5 = 0x05,
+ NM_POOL32AXF_1_7 = 0x07,
+};
+
+/* POOL32Axf_2 instruction pool */
+enum {
+ NM_POOL32AXF_2_0_7 = 0x00,
+ NM_POOL32AXF_2_8_15 = 0x01,
+ NM_POOL32AXF_2_16_23 = 0x02,
+ NM_POOL32AXF_2_24_31 = 0x03,
+};
+
+/* POOL32Axf_7 instruction pool */
+enum {
+ NM_SHRA_R_QB = 0x0,
+ NM_SHRL_PH = 0x1,
+ NM_REPL_QB = 0x2,
+};
+
+/* POOL32Axf_1_0 instruction pool */
+enum {
+ NM_MFHI = 0x0,
+ NM_MFLO = 0x1,
+ NM_MTHI = 0x2,
+ NM_MTLO = 0x3,
+};
+
+/* POOL32Axf_1_1 instruction pool */
+enum {
+ NM_MTHLIP = 0x0,
+ NM_SHILOV = 0x1,
+};
+
+/* POOL32Axf_1_3 instruction pool */
+enum {
+ NM_RDDSP = 0x0,
+ NM_WRDSP = 0x1,
+ NM_EXTP = 0x2,
+ NM_EXTPDP = 0x3,
+};
+
+/* POOL32Axf_1_4 instruction pool */
+enum {
+ NM_SHLL_QB = 0x0,
+ NM_SHRL_QB = 0x1,
+};
+
+/* POOL32Axf_1_5 instruction pool */
+enum {
+ NM_MAQ_S_W_PHR = 0x0,
+ NM_MAQ_S_W_PHL = 0x1,
+ NM_MAQ_SA_W_PHR = 0x2,
+ NM_MAQ_SA_W_PHL = 0x3,
+};
+
+/* POOL32Axf_1_7 instruction pool */
+enum {
+ NM_EXTR_W = 0x0,
+ NM_EXTR_R_W = 0x1,
+ NM_EXTR_RS_W = 0x2,
+ NM_EXTR_S_H = 0x3,
+};
+
+/* POOL32Axf_2_0_7 instruction pool */
+enum {
+ NM_DPA_W_PH = 0x0,
+ NM_DPAQ_S_W_PH = 0x1,
+ NM_DPS_W_PH = 0x2,
+ NM_DPSQ_S_W_PH = 0x3,
+ NM_BALIGN = 0x4,
+ NM_MADD = 0x5,
+ NM_MULT = 0x6,
+ NM_EXTRV_W = 0x7,
+};
+
+/* POOL32Axf_2_8_15 instruction pool */
+enum {
+ NM_DPAX_W_PH = 0x0,
+ NM_DPAQ_SA_L_W = 0x1,
+ NM_DPSX_W_PH = 0x2,
+ NM_DPSQ_SA_L_W = 0x3,
+ NM_MADDU = 0x5,
+ NM_MULTU = 0x6,
+ NM_EXTRV_R_W = 0x7,
+};
+
+/* POOL32Axf_2_16_23 instruction pool */
+enum {
+ NM_DPAU_H_QBL = 0x0,
+ NM_DPAQX_S_W_PH = 0x1,
+ NM_DPSU_H_QBL = 0x2,
+ NM_DPSQX_S_W_PH = 0x3,
+ NM_EXTPV = 0x4,
+ NM_MSUB = 0x5,
+ NM_MULSA_W_PH = 0x6,
+ NM_EXTRV_RS_W = 0x7,
+};
+
+/* POOL32Axf_2_24_31 instruction pool */
+enum {
+ NM_DPAU_H_QBR = 0x0,
+ NM_DPAQX_SA_W_PH = 0x1,
+ NM_DPSU_H_QBR = 0x2,
+ NM_DPSQX_SA_W_PH = 0x3,
+ NM_EXTPDPV = 0x4,
+ NM_MSUBU = 0x5,
+ NM_MULSAQ_S_W_PH = 0x6,
+ NM_EXTRV_S_H = 0x7,
+};
+
+/* POOL32Axf_{4, 5} instruction pool */
+enum {
+ NM_CLO = 0x25,
+ NM_CLZ = 0x2d,
+
+ NM_TLBP = 0x01,
+ NM_TLBR = 0x09,
+ NM_TLBWI = 0x11,
+ NM_TLBWR = 0x19,
+ NM_TLBINV = 0x03,
+ NM_TLBINVF = 0x0b,
+ NM_DI = 0x23,
+ NM_EI = 0x2b,
+ NM_RDPGPR = 0x70,
+ NM_WRPGPR = 0x78,
+ NM_WAIT = 0x61,
+ NM_DERET = 0x71,
+ NM_ERETX = 0x79,
+
+ /* nanoMIPS DSP instructions */
+ NM_ABSQ_S_QB = 0x00,
+ NM_ABSQ_S_PH = 0x08,
+ NM_ABSQ_S_W = 0x10,
+ NM_PRECEQ_W_PHL = 0x28,
+ NM_PRECEQ_W_PHR = 0x30,
+ NM_PRECEQU_PH_QBL = 0x38,
+ NM_PRECEQU_PH_QBR = 0x48,
+ NM_PRECEU_PH_QBL = 0x58,
+ NM_PRECEU_PH_QBR = 0x68,
+ NM_PRECEQU_PH_QBLA = 0x39,
+ NM_PRECEQU_PH_QBRA = 0x49,
+ NM_PRECEU_PH_QBLA = 0x59,
+ NM_PRECEU_PH_QBRA = 0x69,
+ NM_REPLV_PH = 0x01,
+ NM_REPLV_QB = 0x09,
+ NM_BITREV = 0x18,
+ NM_INSV = 0x20,
+ NM_RADDU_W_QB = 0x78,
+
+ NM_BITSWAP = 0x05,
+ NM_WSBH = 0x3d,
+};
+
+/* PP.SR instruction pool */
+enum {
+ NM_SAVE = 0x00,
+ NM_RESTORE = 0x02,
+ NM_RESTORE_JRC = 0x03,
+};
+
+/* P.SR.F instruction pool */
+enum {
+ NM_SAVEF = 0x00,
+ NM_RESTOREF = 0x01,
+};
+
+/* P16.SYSCALL instruction pool */
+enum {
+ NM_SYSCALL16 = 0x00,
+ NM_HYPCALL16 = 0x01,
+};
+
+/* POOL16C_00 instruction pool */
+enum {
+ NM_NOT16 = 0x00,
+ NM_XOR16 = 0x01,
+ NM_AND16 = 0x02,
+ NM_OR16 = 0x03,
+};
+
+/* PP.LSX and PP.LSXS instruction pool */
+enum {
+ NM_LBX = 0x00,
+ NM_LHX = 0x04,
+ NM_LWX = 0x08,
+ NM_LDX = 0x0c,
+
+ NM_SBX = 0x01,
+ NM_SHX = 0x05,
+ NM_SWX = 0x09,
+ NM_SDX = 0x0d,
+
+ NM_LBUX = 0x02,
+ NM_LHUX = 0x06,
+ NM_LWC1X = 0x0a,
+ NM_LDC1X = 0x0e,
+
+ NM_LWUX = 0x07,
+ NM_SWC1X = 0x0b,
+ NM_SDC1X = 0x0f,
+
+ NM_LHXS = 0x04,
+ NM_LWXS = 0x08,
+ NM_LDXS = 0x0c,
+
+ NM_SHXS = 0x05,
+ NM_SWXS = 0x09,
+ NM_SDXS = 0x0d,
+
+ NM_LHUXS = 0x06,
+ NM_LWC1XS = 0x0a,
+ NM_LDC1XS = 0x0e,
+
+ NM_LWUXS = 0x07,
+ NM_SWC1XS = 0x0b,
+ NM_SDC1XS = 0x0f,
+};
+
+/* ERETx instruction pool */
+enum {
+ NM_ERET = 0x00,
+ NM_ERETNC = 0x01,
+};
+
+/* POOL32FxF_{0, 1} insturction pool */
+enum {
+ NM_CFC1 = 0x40,
+ NM_CTC1 = 0x60,
+ NM_MFC1 = 0x80,
+ NM_MTC1 = 0xa0,
+ NM_MFHC1 = 0xc0,
+ NM_MTHC1 = 0xe0,
+
+ NM_CVT_S_PL = 0x84,
+ NM_CVT_S_PU = 0xa4,
+
+ NM_CVT_L_S = 0x004,
+ NM_CVT_L_D = 0x104,
+ NM_CVT_W_S = 0x024,
+ NM_CVT_W_D = 0x124,
+
+ NM_RSQRT_S = 0x008,
+ NM_RSQRT_D = 0x108,
+
+ NM_SQRT_S = 0x028,
+ NM_SQRT_D = 0x128,
+
+ NM_RECIP_S = 0x048,
+ NM_RECIP_D = 0x148,
+
+ NM_FLOOR_L_S = 0x00c,
+ NM_FLOOR_L_D = 0x10c,
+
+ NM_FLOOR_W_S = 0x02c,
+ NM_FLOOR_W_D = 0x12c,
+
+ NM_CEIL_L_S = 0x04c,
+ NM_CEIL_L_D = 0x14c,
+ NM_CEIL_W_S = 0x06c,
+ NM_CEIL_W_D = 0x16c,
+ NM_TRUNC_L_S = 0x08c,
+ NM_TRUNC_L_D = 0x18c,
+ NM_TRUNC_W_S = 0x0ac,
+ NM_TRUNC_W_D = 0x1ac,
+ NM_ROUND_L_S = 0x0cc,
+ NM_ROUND_L_D = 0x1cc,
+ NM_ROUND_W_S = 0x0ec,
+ NM_ROUND_W_D = 0x1ec,
+
+ NM_MOV_S = 0x01,
+ NM_MOV_D = 0x81,
+ NM_ABS_S = 0x0d,
+ NM_ABS_D = 0x8d,
+ NM_NEG_S = 0x2d,
+ NM_NEG_D = 0xad,
+ NM_CVT_D_S = 0x04d,
+ NM_CVT_D_W = 0x0cd,
+ NM_CVT_D_L = 0x14d,
+ NM_CVT_S_D = 0x06d,
+ NM_CVT_S_W = 0x0ed,
+ NM_CVT_S_L = 0x16d,
+};
+
+/* P.LL instruction pool */
+enum {
+ NM_LL = 0x00,
+ NM_LLWP = 0x01,
+};
+
+/* P.SC instruction pool */
+enum {
+ NM_SC = 0x00,
+ NM_SCWP = 0x01,
+};
+
+/* P.DVP instruction pool */
+enum {
+ NM_DVP = 0x00,
+ NM_EVP = 0x01,
+};
+
+
+/*
+ *
+ * nanoMIPS decoding engine
+ *
+ */
+
+
+/* extraction utilities */
+
+#define NANOMIPS_EXTRACT_RT3(op) ((op >> 7) & 0x7)
+#define NANOMIPS_EXTRACT_RS3(op) ((op >> 4) & 0x7)
+#define NANOMIPS_EXTRACT_RD3(op) ((op >> 1) & 0x7)
+#define NANOMIPS_EXTRACT_RD5(op) ((op >> 5) & 0x1f)
+#define NANOMIPS_EXTRACT_RS5(op) (op & 0x1f)
+
+/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3'). */
+static inline int decode_gpr_gpr3(int r)
+{
+ static const int map[] = { 16, 17, 18, 19, 4, 5, 6, 7 };
+
+ return map[r & 0x7];
+}
+
+/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr3.src.store'). */
+static inline int decode_gpr_gpr3_src_store(int r)
+{
+ static const int map[] = { 0, 17, 18, 19, 4, 5, 6, 7 };
+
+ return map[r & 0x7];
+}
+
+/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4'). */
+static inline int decode_gpr_gpr4(int r)
+{
+ static const int map[] = { 8, 9, 10, 11, 4, 5, 6, 7,
+ 16, 17, 18, 19, 20, 21, 22, 23 };
+
+ return map[r & 0xf];
+}
+
+/* Implement nanoMIPS pseudocode decode_gpr(encoded_gpr, 'gpr4.zero'). */
+static inline int decode_gpr_gpr4_zero(int r)
+{
+ static const int map[] = { 8, 9, 10, 0, 4, 5, 6, 7,
+ 16, 17, 18, 19, 20, 21, 22, 23 };
+
+ return map[r & 0xf];
+}
+
+static void gen_ext(DisasContext *ctx, int wordsz, int rd, int rs, int rt,
+ int shift)
+{
+ gen_align_bits(ctx, wordsz, rd, rs, rt, wordsz - shift);
+}
+
+static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
+ uint32_t reg1, uint32_t reg2)
+{
+ TCGv taddr = tcg_temp_new();
+ TCGv_i64 tval = tcg_temp_new_i64();
+ TCGv tmp1 = tcg_temp_new();
+ TCGv tmp2 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, taddr, base, offset);
+ tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
+ } else {
+ tcg_gen_extr_i64_tl(tmp1, tmp2, tval);
+ }
+ gen_store_gpr(tmp1, reg1);
+ tcg_temp_free(tmp1);
+ gen_store_gpr(tmp2, reg2);
+ tcg_temp_free(tmp2);
+ tcg_gen_st_i64(tval, cpu_env, offsetof(CPUMIPSState, llval_wp));
+ tcg_temp_free_i64(tval);
+ tcg_gen_st_tl(taddr, cpu_env, offsetof(CPUMIPSState, lladdr));
+ tcg_temp_free(taddr);
+}
+
+static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset,
+ uint32_t reg1, uint32_t reg2, bool eva)
+{
+ TCGv taddr = tcg_temp_local_new();
+ TCGv lladdr = tcg_temp_local_new();
+ TCGv_i64 tval = tcg_temp_new_i64();
+ TCGv_i64 llval = tcg_temp_new_i64();
+ TCGv_i64 val = tcg_temp_new_i64();
+ TCGv tmp1 = tcg_temp_new();
+ TCGv tmp2 = tcg_temp_new();
+ TCGLabel *lab_fail = gen_new_label();
+ TCGLabel *lab_done = gen_new_label();
+
+ gen_base_offset_addr(ctx, taddr, base, offset);
+
+ tcg_gen_ld_tl(lladdr, cpu_env, offsetof(CPUMIPSState, lladdr));
+ tcg_gen_brcond_tl(TCG_COND_NE, taddr, lladdr, lab_fail);
+
+ gen_load_gpr(tmp1, reg1);
+ gen_load_gpr(tmp2, reg2);
+
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_concat_tl_i64(tval, tmp2, tmp1);
+ } else {
+ tcg_gen_concat_tl_i64(tval, tmp1, tmp2);
+ }
+
+ tcg_gen_ld_i64(llval, cpu_env, offsetof(CPUMIPSState, llval_wp));
+ tcg_gen_atomic_cmpxchg_i64(val, taddr, llval, tval,
+ eva ? MIPS_HFLAG_UM : ctx->mem_idx, MO_64);
+ if (reg1 != 0) {
+ tcg_gen_movi_tl(cpu_gpr[reg1], 1);
+ }
+ tcg_gen_brcond_i64(TCG_COND_EQ, val, llval, lab_done);
+
+ gen_set_label(lab_fail);
+
+ if (reg1 != 0) {
+ tcg_gen_movi_tl(cpu_gpr[reg1], 0);
+ }
+ gen_set_label(lab_done);
+ tcg_gen_movi_tl(lladdr, -1);
+ tcg_gen_st_tl(lladdr, cpu_env, offsetof(CPUMIPSState, lladdr));
+}
+
+static void gen_adjust_sp(DisasContext *ctx, int u)
+{
+ gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], u);
+}
+
+static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count,
+ uint8_t gp, uint16_t u)
+{
+ int counter = 0;
+ TCGv va = tcg_temp_new();
+ TCGv t0 = tcg_temp_new();
+
+ while (counter != count) {
+ bool use_gp = gp && (counter == count - 1);
+ int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f);
+ int this_offset = -((counter + 1) << 2);
+ gen_base_offset_addr(ctx, va, 29, this_offset);
+ gen_load_gpr(t0, this_rt);
+ tcg_gen_qemu_st_tl(t0, va, ctx->mem_idx,
+ (MO_TEUL | ctx->default_tcg_memop_mask));
+ counter++;
+ }
+
+ /* adjust stack pointer */
+ gen_adjust_sp(ctx, -u);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(va);
+}
+
+static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count,
+ uint8_t gp, uint16_t u)
+{
+ int counter = 0;
+ TCGv va = tcg_temp_new();
+ TCGv t0 = tcg_temp_new();
+
+ while (counter != count) {
+ bool use_gp = gp && (counter == count - 1);
+ int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f);
+ int this_offset = u - ((counter + 1) << 2);
+ gen_base_offset_addr(ctx, va, 29, this_offset);
+ tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, this_rt);
+ counter++;
+ }
+
+ /* adjust stack pointer */
+ gen_adjust_sp(ctx, u);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(va);
+}
+
+static void gen_compute_branch_nm(DisasContext *ctx, uint32_t opc,
+ int insn_bytes,
+ int rs, int rt, int32_t offset)
+{
+ target_ulong btgt = -1;
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /* Load needed operands */
+ switch (opc) {
+ case OPC_BEQ:
+ case OPC_BNE:
+ /* Compare two registers */
+ if (rs != rt) {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ }
+ btgt = ctx->base.pc_next + insn_bytes + offset;
+ break;
+ case OPC_BGEZAL:
+ /* Compare to zero */
+ if (rs != 0) {
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ }
+ btgt = ctx->base.pc_next + insn_bytes + offset;
+ break;
+ case OPC_BPOSGE32:
+ tcg_gen_andi_tl(t0, cpu_dspctrl, 0x3F);
+ bcond_compute = 1;
+ btgt = ctx->base.pc_next + insn_bytes + offset;
+ break;
+ case OPC_JR:
+ case OPC_JALR:
+ /* Jump to register */
+ if (offset != 0 && offset != 16) {
+ /*
+ * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
+ * others are reserved.
+ */
+ MIPS_INVAL("jump hint");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ gen_load_gpr(btarget, rs);
+ break;
+ default:
+ MIPS_INVAL("branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ if (bcond_compute == 0) {
+ /* No condition to be computed */
+ switch (opc) {
+ case OPC_BEQ: /* rx == rx */
+ /* Always take */
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BGEZAL: /* 0 >= 0 */
+ /* Always take and link */
+ tcg_gen_movi_tl(cpu_gpr[31],
+ ctx->base.pc_next + insn_bytes);
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BNE: /* rx != rx */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 8);
+ /* Skip the instruction in the delay slot */
+ ctx->base.pc_next += 4;
+ goto out;
+ case OPC_JR:
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ case OPC_JALR:
+ if (rt > 0) {
+ tcg_gen_movi_tl(cpu_gpr[rt],
+ ctx->base.pc_next + insn_bytes);
+ }
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ default:
+ MIPS_INVAL("branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ } else {
+ switch (opc) {
+ case OPC_BEQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
+ goto not_likely;
+ case OPC_BNE:
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
+ goto not_likely;
+ case OPC_BGEZAL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ tcg_gen_movi_tl(cpu_gpr[31],
+ ctx->base.pc_next + insn_bytes);
+ goto not_likely;
+ case OPC_BPOSGE32:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 32);
+ not_likely:
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL("conditional branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ }
+
+ ctx->btarget = btgt;
+
+ out:
+ if (insn_bytes == 2) {
+ ctx->hflags |= MIPS_HFLAG_B16;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_pool16c_nanomips_insn(DisasContext *ctx)
+{
+ int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode));
+ int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode));
+
+ switch (extract32(ctx->opcode, 2, 2)) {
+ case NM_NOT16:
+ gen_logic(ctx, OPC_NOR, rt, rs, 0);
+ break;
+ case NM_AND16:
+ gen_logic(ctx, OPC_AND, rt, rt, rs);
+ break;
+ case NM_XOR16:
+ gen_logic(ctx, OPC_XOR, rt, rt, rs);
+ break;
+ case NM_OR16:
+ gen_logic(ctx, OPC_OR, rt, rt, rs);
+ break;
+ }
+}
+
+static void gen_pool32a0_nanomips_insn(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rt = extract32(ctx->opcode, 21, 5);
+ int rs = extract32(ctx->opcode, 16, 5);
+ int rd = extract32(ctx->opcode, 11, 5);
+
+ switch (extract32(ctx->opcode, 3, 7)) {
+ case NM_P_TRAP:
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case NM_TEQ:
+ check_nms(ctx);
+ gen_trap(ctx, OPC_TEQ, rs, rt, -1);
+ break;
+ case NM_TNE:
+ check_nms(ctx);
+ gen_trap(ctx, OPC_TNE, rs, rt, -1);
+ break;
+ }
+ break;
+ case NM_RDHWR:
+ check_nms(ctx);
+ gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3));
+ break;
+ case NM_SEB:
+ check_nms(ctx);
+ gen_bshfl(ctx, OPC_SEB, rs, rt);
+ break;
+ case NM_SEH:
+ gen_bshfl(ctx, OPC_SEH, rs, rt);
+ break;
+ case NM_SLLV:
+ gen_shift(ctx, OPC_SLLV, rd, rt, rs);
+ break;
+ case NM_SRLV:
+ gen_shift(ctx, OPC_SRLV, rd, rt, rs);
+ break;
+ case NM_SRAV:
+ gen_shift(ctx, OPC_SRAV, rd, rt, rs);
+ break;
+ case NM_ROTRV:
+ gen_shift(ctx, OPC_ROTRV, rd, rt, rs);
+ break;
+ case NM_ADD:
+ gen_arith(ctx, OPC_ADD, rd, rs, rt);
+ break;
+ case NM_ADDU:
+ gen_arith(ctx, OPC_ADDU, rd, rs, rt);
+ break;
+ case NM_SUB:
+ check_nms(ctx);
+ gen_arith(ctx, OPC_SUB, rd, rs, rt);
+ break;
+ case NM_SUBU:
+ gen_arith(ctx, OPC_SUBU, rd, rs, rt);
+ break;
+ case NM_P_CMOVE:
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case NM_MOVZ:
+ gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt);
+ break;
+ case NM_MOVN:
+ gen_cond_move(ctx, OPC_MOVN, rd, rs, rt);
+ break;
+ }
+ break;
+ case NM_AND:
+ gen_logic(ctx, OPC_AND, rd, rs, rt);
+ break;
+ case NM_OR:
+ gen_logic(ctx, OPC_OR, rd, rs, rt);
+ break;
+ case NM_NOR:
+ gen_logic(ctx, OPC_NOR, rd, rs, rt);
+ break;
+ case NM_XOR:
+ gen_logic(ctx, OPC_XOR, rd, rs, rt);
+ break;
+ case NM_SLT:
+ gen_slt(ctx, OPC_SLT, rd, rs, rt);
+ break;
+ case NM_P_SLTU:
+ if (rd == 0) {
+ /* P_DVP */
+#ifndef CONFIG_USER_ONLY
+ TCGv t0 = tcg_temp_new();
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case NM_DVP:
+ if (ctx->vp) {
+ check_cp0_enabled(ctx);
+ gen_helper_dvp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ case NM_EVP:
+ if (ctx->vp) {
+ check_cp0_enabled(ctx);
+ gen_helper_evp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ }
+ tcg_temp_free(t0);
+#endif
+ } else {
+ gen_slt(ctx, OPC_SLTU, rd, rs, rt);
+ }
+ break;
+ case NM_SOV:
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+
+ /* operands of same sign, result different sign */
+ tcg_gen_setcondi_tl(TCG_COND_LT, t0, t1, 0);
+ gen_store_gpr(t0, rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ case NM_MUL:
+ gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt);
+ break;
+ case NM_MUH:
+ gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt);
+ break;
+ case NM_MULU:
+ gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt);
+ break;
+ case NM_MUHU:
+ gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt);
+ break;
+ case NM_DIV:
+ gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt);
+ break;
+ case NM_MOD:
+ gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt);
+ break;
+ case NM_DIVU:
+ gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt);
+ break;
+ case NM_MODU:
+ gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case NM_MFC0:
+ check_cp0_enabled(ctx);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ gen_mfc0(ctx, cpu_gpr[rt], rs, extract32(ctx->opcode, 11, 3));
+ break;
+ case NM_MTC0:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_mtc0(ctx, t0, rs, extract32(ctx->opcode, 11, 3));
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_D_E_MT_VPE:
+ {
+ uint8_t sc = extract32(ctx->opcode, 10, 1);
+ TCGv t0 = tcg_temp_new();
+
+ switch (sc) {
+ case 0:
+ if (rs == 1) {
+ /* DMT */
+ check_cp0_mt(ctx);
+ gen_helper_dmt(t0);
+ gen_store_gpr(t0, rt);
+ } else if (rs == 0) {
+ /* DVPE */
+ check_cp0_mt(ctx);
+ gen_helper_dvpe(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ case 1:
+ if (rs == 1) {
+ /* EMT */
+ check_cp0_mt(ctx);
+ gen_helper_emt(t0);
+ gen_store_gpr(t0, rt);
+ } else if (rs == 0) {
+ /* EVPE */
+ check_cp0_mt(ctx);
+ gen_helper_evpe(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ }
+
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_FORK:
+ check_mt(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ gen_helper_fork(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ case NM_MFTR:
+ case NM_MFHTR:
+ check_cp0_enabled(ctx);
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mftr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1),
+ extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1));
+ break;
+ case NM_MTTR:
+ case NM_MTHTR:
+ check_cp0_enabled(ctx);
+ gen_mttr(env, ctx, rs, rt, extract32(ctx->opcode, 10, 1),
+ extract32(ctx->opcode, 11, 5), extract32(ctx->opcode, 3, 1));
+ break;
+ case NM_YIELD:
+ check_mt(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_helper_yield(t0, cpu_env, t0);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+/* dsp */
+static void gen_pool32axf_1_5_nanomips_insn(DisasContext *ctx, uint32_t opc,
+ int ret, int v1, int v2)
+{
+ TCGv_i32 t0;
+ TCGv v0_t;
+ TCGv v1_t;
+
+ t0 = tcg_temp_new_i32();
+
+ v0_t = tcg_temp_new();
+ v1_t = tcg_temp_new();
+
+ tcg_gen_movi_i32(t0, v2 >> 3);
+
+ gen_load_gpr(v0_t, ret);
+ gen_load_gpr(v1_t, v1);
+
+ switch (opc) {
+ case NM_MAQ_S_W_PHR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_phr(t0, v1_t, v0_t, cpu_env);
+ break;
+ case NM_MAQ_S_W_PHL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_phl(t0, v1_t, v0_t, cpu_env);
+ break;
+ case NM_MAQ_SA_W_PHR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_phr(t0, v1_t, v0_t, cpu_env);
+ break;
+ case NM_MAQ_SA_W_PHL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_phl(t0, v1_t, v0_t, cpu_env);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free_i32(t0);
+
+ tcg_temp_free(v0_t);
+ tcg_temp_free(v1_t);
+}
+
+
+static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc,
+ int ret, int v1, int v2)
+{
+ int16_t imm;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv v0_t = tcg_temp_new();
+
+ gen_load_gpr(v0_t, v1);
+
+ switch (opc) {
+ case NM_POOL32AXF_1_0:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 12, 2)) {
+ case NM_MFHI:
+ gen_HILO(ctx, OPC_MFHI, v2 >> 3, ret);
+ break;
+ case NM_MFLO:
+ gen_HILO(ctx, OPC_MFLO, v2 >> 3, ret);
+ break;
+ case NM_MTHI:
+ gen_HILO(ctx, OPC_MTHI, v2 >> 3, v1);
+ break;
+ case NM_MTLO:
+ gen_HILO(ctx, OPC_MTLO, v2 >> 3, v1);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_1_1:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 12, 2)) {
+ case NM_MTHLIP:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_mthlip(t0, v0_t, cpu_env);
+ break;
+ case NM_SHILOV:
+ tcg_gen_movi_tl(t0, v2 >> 3);
+ gen_helper_shilo(t0, v0_t, cpu_env);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_1_3:
+ check_dsp(ctx);
+ imm = extract32(ctx->opcode, 14, 7);
+ switch (extract32(ctx->opcode, 12, 2)) {
+ case NM_RDDSP:
+ tcg_gen_movi_tl(t0, imm);
+ gen_helper_rddsp(t0, t0, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_WRDSP:
+ gen_load_gpr(t0, ret);
+ tcg_gen_movi_tl(t1, imm);
+ gen_helper_wrdsp(t0, t1, cpu_env);
+ break;
+ case NM_EXTP:
+ tcg_gen_movi_tl(t0, v2 >> 3);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extp(t0, t0, t1, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_EXTPDP:
+ tcg_gen_movi_tl(t0, v2 >> 3);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extpdp(t0, t0, t1, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_1_4:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, v2 >> 2);
+ switch (extract32(ctx->opcode, 12, 1)) {
+ case NM_SHLL_QB:
+ gen_helper_shll_qb(t0, t0, v0_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_SHRL_QB:
+ gen_helper_shrl_qb(t0, t0, v0_t);
+ gen_store_gpr(t0, ret);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_1_5:
+ opc = extract32(ctx->opcode, 12, 2);
+ gen_pool32axf_1_5_nanomips_insn(ctx, opc, ret, v1, v2);
+ break;
+ case NM_POOL32AXF_1_7:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, v2 >> 3);
+ tcg_gen_movi_tl(t1, v1);
+ switch (extract32(ctx->opcode, 12, 2)) {
+ case NM_EXTR_W:
+ gen_helper_extr_w(t0, t0, t1, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_EXTR_R_W:
+ gen_helper_extr_r_w(t0, t0, t1, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_EXTR_RS_W:
+ gen_helper_extr_rs_w(t0, t0, t1, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_EXTR_S_H:
+ gen_helper_extr_s_h(t0, t0, t1, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(v0_t);
+}
+
+static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc,
+ TCGv v0, TCGv v1, int rd)
+{
+ TCGv_i32 t0;
+
+ t0 = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(t0, rd >> 3);
+
+ switch (opc) {
+ case NM_POOL32AXF_2_0_7:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpa_w_ph(t0, v1, v0, cpu_env);
+ break;
+ case NM_DPAQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_dpaq_s_w_ph(t0, v1, v0, cpu_env);
+ break;
+ case NM_DPS_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dps_w_ph(t0, v1, v0, cpu_env);
+ break;
+ case NM_DPSQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_dpsq_s_w_ph(t0, v1, v0, cpu_env);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_2_8_15:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPAX_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpax_w_ph(t0, v0, v1, cpu_env);
+ break;
+ case NM_DPAQ_SA_L_W:
+ check_dsp(ctx);
+ gen_helper_dpaq_sa_l_w(t0, v0, v1, cpu_env);
+ break;
+ case NM_DPSX_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpsx_w_ph(t0, v0, v1, cpu_env);
+ break;
+ case NM_DPSQ_SA_L_W:
+ check_dsp(ctx);
+ gen_helper_dpsq_sa_l_w(t0, v0, v1, cpu_env);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_2_16_23:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPAU_H_QBL:
+ check_dsp(ctx);
+ gen_helper_dpau_h_qbl(t0, v0, v1, cpu_env);
+ break;
+ case NM_DPAQX_S_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpaqx_s_w_ph(t0, v0, v1, cpu_env);
+ break;
+ case NM_DPSU_H_QBL:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_qbl(t0, v0, v1, cpu_env);
+ break;
+ case NM_DPSQX_S_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpsqx_s_w_ph(t0, v0, v1, cpu_env);
+ break;
+ case NM_MULSA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_mulsa_w_ph(t0, v0, v1, cpu_env);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_2_24_31:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPAU_H_QBR:
+ check_dsp(ctx);
+ gen_helper_dpau_h_qbr(t0, v1, v0, cpu_env);
+ break;
+ case NM_DPAQX_SA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpaqx_sa_w_ph(t0, v1, v0, cpu_env);
+ break;
+ case NM_DPSU_H_QBR:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_qbr(t0, v1, v0, cpu_env);
+ break;
+ case NM_DPSQX_SA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpsqx_sa_w_ph(t0, v1, v0, cpu_env);
+ break;
+ case NM_MULSAQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_w_ph(t0, v1, v0, cpu_env);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int rd)
+{
+ int ret = rt;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv v0_t = tcg_temp_new();
+ TCGv v1_t = tcg_temp_new();
+
+ gen_load_gpr(v0_t, rt);
+ gen_load_gpr(v1_t, rs);
+
+ switch (opc) {
+ case NM_POOL32AXF_2_0_7:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPA_W_PH:
+ case NM_DPAQ_S_W_PH:
+ case NM_DPS_W_PH:
+ case NM_DPSQ_S_W_PH:
+ gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
+ break;
+ case NM_BALIGN:
+ check_dsp_r2(ctx);
+ if (rt != 0) {
+ gen_load_gpr(t0, rs);
+ rd &= 3;
+ if (rd != 0 && rd != 2) {
+ tcg_gen_shli_tl(cpu_gpr[ret], cpu_gpr[ret], 8 * rd);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(t0, t0, 8 * (4 - rd));
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ }
+ break;
+ case NM_MADD:
+ check_dsp(ctx);
+ {
+ int acc = extract32(ctx->opcode, 14, 2);
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case NM_MULT:
+ check_dsp(ctx);
+ {
+ int acc = extract32(ctx->opcode, 14, 2);
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ if (acc || ctx->insn_flags & ISA_MIPS_R6) {
+ check_dsp_r2(ctx);
+ }
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case NM_EXTRV_W:
+ check_dsp(ctx);
+ gen_load_gpr(v1_t, rs);
+ tcg_gen_movi_tl(t0, rd >> 3);
+ gen_helper_extr_w(t0, t0, v1_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_2_8_15:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPAX_W_PH:
+ case NM_DPAQ_SA_L_W:
+ case NM_DPSX_W_PH:
+ case NM_DPSQ_SA_L_W:
+ gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
+ break;
+ case NM_MADDU:
+ check_dsp(ctx);
+ {
+ int acc = extract32(ctx->opcode, 14, 2);
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case NM_MULTU:
+ check_dsp(ctx);
+ {
+ int acc = extract32(ctx->opcode, 14, 2);
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ if (acc || ctx->insn_flags & ISA_MIPS_R6) {
+ check_dsp_r2(ctx);
+ }
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case NM_EXTRV_R_W:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 3);
+ gen_helper_extr_r_w(t0, t0, v1_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_2_16_23:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPAU_H_QBL:
+ case NM_DPAQX_S_W_PH:
+ case NM_DPSU_H_QBL:
+ case NM_DPSQX_S_W_PH:
+ case NM_MULSA_W_PH:
+ gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
+ break;
+ case NM_EXTPV:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 3);
+ gen_helper_extp(t0, t0, v1_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_MSUB:
+ check_dsp(ctx);
+ {
+ int acc = extract32(ctx->opcode, 14, 2);
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_sub_i64(t2, t3, t2);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case NM_EXTRV_RS_W:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 3);
+ gen_helper_extr_rs_w(t0, t0, v1_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_2_24_31:
+ switch (extract32(ctx->opcode, 9, 3)) {
+ case NM_DPAU_H_QBR:
+ case NM_DPAQX_SA_W_PH:
+ case NM_DPSU_H_QBR:
+ case NM_DPSQX_SA_W_PH:
+ case NM_MULSAQ_S_W_PH:
+ gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd);
+ break;
+ case NM_EXTPDPV:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 3);
+ gen_helper_extpdp(t0, t0, v1_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ case NM_MSUBU:
+ check_dsp(ctx);
+ {
+ int acc = extract32(ctx->opcode, 14, 2);
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_sub_i64(t2, t3, t2);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case NM_EXTRV_S_H:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 3);
+ gen_helper_extr_s_h(t0, t0, v0_t, cpu_env);
+ gen_store_gpr(t0, ret);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+
+ tcg_temp_free(v0_t);
+ tcg_temp_free(v1_t);
+}
+
+static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc,
+ int rt, int rs)
+{
+ int ret = rt;
+ TCGv t0 = tcg_temp_new();
+ TCGv v0_t = tcg_temp_new();
+
+ gen_load_gpr(v0_t, rs);
+
+ switch (opc) {
+ case NM_ABSQ_S_QB:
+ check_dsp_r2(ctx);
+ gen_helper_absq_s_qb(v0_t, v0_t, cpu_env);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_ABSQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_absq_s_ph(v0_t, v0_t, cpu_env);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_ABSQ_S_W:
+ check_dsp(ctx);
+ gen_helper_absq_s_w(v0_t, v0_t, cpu_env);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEQ_W_PHL:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(v0_t, v0_t, 0xFFFF0000);
+ tcg_gen_ext32s_tl(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEQ_W_PHR:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(v0_t, v0_t, 0x0000FFFF);
+ tcg_gen_shli_tl(v0_t, v0_t, 16);
+ tcg_gen_ext32s_tl(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEQU_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbl(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEQU_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbr(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEQU_PH_QBLA:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbla(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEQU_PH_QBRA:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbra(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEU_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbl(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEU_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbr(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEU_PH_QBLA:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbla(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_PRECEU_PH_QBRA:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbra(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_REPLV_PH:
+ check_dsp(ctx);
+ tcg_gen_ext16u_tl(v0_t, v0_t);
+ tcg_gen_shli_tl(t0, v0_t, 16);
+ tcg_gen_or_tl(v0_t, v0_t, t0);
+ tcg_gen_ext32s_tl(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_REPLV_QB:
+ check_dsp(ctx);
+ tcg_gen_ext8u_tl(v0_t, v0_t);
+ tcg_gen_shli_tl(t0, v0_t, 8);
+ tcg_gen_or_tl(v0_t, v0_t, t0);
+ tcg_gen_shli_tl(t0, v0_t, 16);
+ tcg_gen_or_tl(v0_t, v0_t, t0);
+ tcg_gen_ext32s_tl(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_BITREV:
+ check_dsp(ctx);
+ gen_helper_bitrev(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_INSV:
+ check_dsp(ctx);
+ {
+ TCGv tv0 = tcg_temp_new();
+
+ gen_load_gpr(tv0, rt);
+ gen_helper_insv(v0_t, cpu_env, v0_t, tv0);
+ gen_store_gpr(v0_t, ret);
+ tcg_temp_free(tv0);
+ }
+ break;
+ case NM_RADDU_W_QB:
+ check_dsp(ctx);
+ gen_helper_raddu_w_qb(v0_t, v0_t);
+ gen_store_gpr(v0_t, ret);
+ break;
+ case NM_BITSWAP:
+ gen_bitswap(ctx, OPC_BITSWAP, ret, rs);
+ break;
+ case NM_CLO:
+ check_nms(ctx);
+ gen_cl(ctx, OPC_CLO, ret, rs);
+ break;
+ case NM_CLZ:
+ check_nms(ctx);
+ gen_cl(ctx, OPC_CLZ, ret, rs);
+ break;
+ case NM_WSBH:
+ gen_bshfl(ctx, OPC_WSBH, ret, rs);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free(v0_t);
+ tcg_temp_free(t0);
+}
+
+static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int rd)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv rs_t = tcg_temp_new();
+
+ gen_load_gpr(rs_t, rs);
+
+ switch (opc) {
+ case NM_SHRA_R_QB:
+ check_dsp_r2(ctx);
+ tcg_gen_movi_tl(t0, rd >> 2);
+ switch (extract32(ctx->opcode, 12, 1)) {
+ case 0:
+ /* NM_SHRA_QB */
+ gen_helper_shra_qb(t0, t0, rs_t);
+ gen_store_gpr(t0, rt);
+ break;
+ case 1:
+ /* NM_SHRA_R_QB */
+ gen_helper_shra_r_qb(t0, t0, rs_t);
+ gen_store_gpr(t0, rt);
+ break;
+ }
+ break;
+ case NM_SHRL_PH:
+ check_dsp_r2(ctx);
+ tcg_gen_movi_tl(t0, rd >> 1);
+ gen_helper_shrl_ph(t0, t0, rs_t);
+ gen_store_gpr(t0, rt);
+ break;
+ case NM_REPL_QB:
+ check_dsp(ctx);
+ {
+ int16_t imm;
+ target_long result;
+ imm = extract32(ctx->opcode, 13, 8);
+ result = (uint32_t)imm << 24 |
+ (uint32_t)imm << 16 |
+ (uint32_t)imm << 8 |
+ (uint32_t)imm;
+ result = (int32_t)result;
+ tcg_gen_movi_tl(t0, result);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(rs_t);
+}
+
+
+static void gen_pool32axf_nanomips_insn(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rt = extract32(ctx->opcode, 21, 5);
+ int rs = extract32(ctx->opcode, 16, 5);
+ int rd = extract32(ctx->opcode, 11, 5);
+
+ switch (extract32(ctx->opcode, 6, 3)) {
+ case NM_POOL32AXF_1:
+ {
+ int32_t op1 = extract32(ctx->opcode, 9, 3);
+ gen_pool32axf_1_nanomips_insn(ctx, op1, rt, rs, rd);
+ }
+ break;
+ case NM_POOL32AXF_2:
+ {
+ int32_t op1 = extract32(ctx->opcode, 12, 2);
+ gen_pool32axf_2_nanomips_insn(ctx, op1, rt, rs, rd);
+ }
+ break;
+ case NM_POOL32AXF_4:
+ {
+ int32_t op1 = extract32(ctx->opcode, 9, 7);
+ gen_pool32axf_4_nanomips_insn(ctx, op1, rt, rs);
+ }
+ break;
+ case NM_POOL32AXF_5:
+ switch (extract32(ctx->opcode, 9, 7)) {
+#ifndef CONFIG_USER_ONLY
+ case NM_TLBP:
+ gen_cp0(env, ctx, OPC_TLBP, 0, 0);
+ break;
+ case NM_TLBR:
+ gen_cp0(env, ctx, OPC_TLBR, 0, 0);
+ break;
+ case NM_TLBWI:
+ gen_cp0(env, ctx, OPC_TLBWI, 0, 0);
+ break;
+ case NM_TLBWR:
+ gen_cp0(env, ctx, OPC_TLBWR, 0, 0);
+ break;
+ case NM_TLBINV:
+ gen_cp0(env, ctx, OPC_TLBINV, 0, 0);
+ break;
+ case NM_TLBINVF:
+ gen_cp0(env, ctx, OPC_TLBINVF, 0, 0);
+ break;
+ case NM_DI:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ save_cpu_state(ctx, 1);
+ gen_helper_di(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_EI:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ save_cpu_state(ctx, 1);
+ gen_helper_ei(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_RDPGPR:
+ check_cp0_enabled(ctx);
+ gen_load_srsgpr(rs, rt);
+ break;
+ case NM_WRPGPR:
+ check_cp0_enabled(ctx);
+ gen_store_srsgpr(rs, rt);
+ break;
+ case NM_WAIT:
+ gen_cp0(env, ctx, OPC_WAIT, 0, 0);
+ break;
+ case NM_DERET:
+ gen_cp0(env, ctx, OPC_DERET, 0, 0);
+ break;
+ case NM_ERETX:
+ gen_cp0(env, ctx, OPC_ERET, 0, 0);
+ break;
+#endif
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32AXF_7:
+ {
+ int32_t op1 = extract32(ctx->opcode, 9, 3);
+ gen_pool32axf_7_nanomips_insn(ctx, op1, rt, rs, rd);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+/* Immediate Value Compact Branches */
+static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc,
+ int rt, int32_t imm, int32_t offset)
+{
+ TCGCond cond = TCG_COND_ALWAYS;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ tcg_gen_movi_tl(t1, imm);
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+
+ /* Load needed operands and calculate btarget */
+ switch (opc) {
+ case NM_BEQIC:
+ if (rt == 0 && imm == 0) {
+ /* Unconditional branch */
+ } else if (rt == 0 && imm != 0) {
+ /* Treat as NOP */
+ goto out;
+ } else {
+ cond = TCG_COND_EQ;
+ }
+ break;
+ case NM_BBEQZC:
+ case NM_BBNEZC:
+ check_nms(ctx);
+ if (imm >= 32 && !(ctx->hflags & MIPS_HFLAG_64)) {
+ gen_reserved_instruction(ctx);
+ goto out;
+ } else if (rt == 0 && opc == NM_BBEQZC) {
+ /* Unconditional branch */
+ } else if (rt == 0 && opc == NM_BBNEZC) {
+ /* Treat as NOP */
+ goto out;
+ } else {
+ tcg_gen_shri_tl(t0, t0, imm);
+ tcg_gen_andi_tl(t0, t0, 1);
+ tcg_gen_movi_tl(t1, 0);
+ if (opc == NM_BBEQZC) {
+ cond = TCG_COND_EQ;
+ } else {
+ cond = TCG_COND_NE;
+ }
+ }
+ break;
+ case NM_BNEIC:
+ if (rt == 0 && imm == 0) {
+ /* Treat as NOP */
+ goto out;
+ } else if (rt == 0 && imm != 0) {
+ /* Unconditional branch */
+ } else {
+ cond = TCG_COND_NE;
+ }
+ break;
+ case NM_BGEIC:
+ if (rt == 0 && imm == 0) {
+ /* Unconditional branch */
+ } else {
+ cond = TCG_COND_GE;
+ }
+ break;
+ case NM_BLTIC:
+ cond = TCG_COND_LT;
+ break;
+ case NM_BGEIUC:
+ if (rt == 0 && imm == 0) {
+ /* Unconditional branch */
+ } else {
+ cond = TCG_COND_GEU;
+ }
+ break;
+ case NM_BLTIUC:
+ cond = TCG_COND_LTU;
+ break;
+ default:
+ MIPS_INVAL("Immediate Value Compact branch");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ /* branch completion */
+ clear_branch_hflags(ctx);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ if (cond == TCG_COND_ALWAYS) {
+ /* Uncoditional compact branch */
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ } else {
+ /* Conditional compact branch */
+ TCGLabel *fs = gen_new_label();
+
+ tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, t1, fs);
+
+ gen_goto_tb(ctx, 1, ctx->btarget);
+ gen_set_label(fs);
+
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
+ }
+
+out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* P.BALRSC type nanoMIPS R6 branches: BALRSC and BRSC */
+static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs,
+ int rt)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /* load rs */
+ gen_load_gpr(t0, rs);
+
+ /* link */
+ if (rt != 0) {
+ tcg_gen_movi_tl(cpu_gpr[rt], ctx->base.pc_next + 4);
+ }
+
+ /* calculate btarget */
+ tcg_gen_shli_tl(t0, t0, 1);
+ tcg_gen_movi_tl(t1, ctx->base.pc_next + 4);
+ gen_op_addr_add(ctx, btarget, t1, t0);
+
+ /* branch completion */
+ clear_branch_hflags(ctx);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ /* unconditional branch to register */
+ tcg_gen_mov_tl(cpu_PC, btarget);
+ tcg_gen_lookup_and_goto_ptr();
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* nanoMIPS Branches */
+static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc,
+ int rs, int rt, int32_t offset)
+{
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /* Load needed operands and calculate btarget */
+ switch (opc) {
+ /* compact branch */
+ case OPC_BGEC:
+ case OPC_BLTC:
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ break;
+ case OPC_BGEUC:
+ case OPC_BLTUC:
+ if (rs == 0 || rs == rt) {
+ /* OPC_BLEZALC, OPC_BGEZALC */
+ /* OPC_BGTZALC, OPC_BLTZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 4);
+ }
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ break;
+ case OPC_BC:
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ break;
+ case OPC_BEQZC:
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ TCGv tbase = tcg_temp_new();
+ TCGv toffset = tcg_temp_new();
+
+ gen_load_gpr(tbase, rt);
+ tcg_gen_movi_tl(toffset, offset);
+ gen_op_addr_add(ctx, btarget, tbase, toffset);
+ tcg_temp_free(tbase);
+ tcg_temp_free(toffset);
+ }
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ if (bcond_compute == 0) {
+ /* Uncoditional compact branch */
+ switch (opc) {
+ case OPC_BC:
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ } else {
+ /* Conditional compact branch */
+ TCGLabel *fs = gen_new_label();
+
+ switch (opc) {
+ case OPC_BGEUC:
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GEU), t0, t1, fs);
+ }
+ break;
+ case OPC_BLTUC:
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LTU), t0, t1, fs);
+ }
+ break;
+ case OPC_BGEC:
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GE), t0, t1, fs);
+ }
+ break;
+ case OPC_BLTC:
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LT), t0, t1, fs);
+ }
+ break;
+ case OPC_BEQZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t0, 0, fs);
+ break;
+ default:
+ MIPS_INVAL("Compact conditional branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ /* branch completion */
+ clear_branch_hflags(ctx);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_goto_tb(ctx, 1, ctx->btarget);
+ gen_set_label(fs);
+
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + 4);
+ }
+
+out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+
+/* nanoMIPS CP1 Branches */
+static void gen_compute_branch_cp1_nm(DisasContext *ctx, uint32_t op,
+ int32_t ft, int32_t offset)
+{
+ target_ulong btarget;
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, t0, ft);
+ tcg_gen_andi_i64(t0, t0, 1);
+
+ btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+
+ switch (op) {
+ case NM_BC1EQZC:
+ tcg_gen_xori_i64(t0, t0, 1);
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ case NM_BC1NEZC:
+ /* t0 already set */
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL("cp1 cond branch");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ tcg_gen_trunc_i64_tl(bcond, t0);
+
+ ctx->btarget = btarget;
+
+out:
+ tcg_temp_free_i64(t0);
+}
+
+
+static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if ((extract32(ctx->opcode, 6, 1)) == 1) {
+ /* PP.LSXS instructions require shifting */
+ switch (extract32(ctx->opcode, 7, 4)) {
+ case NM_SHXS:
+ check_nms(ctx);
+ /* fall through */
+ case NM_LHXS:
+ case NM_LHUXS:
+ tcg_gen_shli_tl(t0, t0, 1);
+ break;
+ case NM_SWXS:
+ check_nms(ctx);
+ /* fall through */
+ case NM_LWXS:
+ case NM_LWC1XS:
+ case NM_SWC1XS:
+ tcg_gen_shli_tl(t0, t0, 2);
+ break;
+ case NM_LDC1XS:
+ case NM_SDC1XS:
+ tcg_gen_shli_tl(t0, t0, 3);
+ break;
+ }
+ }
+ gen_op_addr_add(ctx, t0, t0, t1);
+
+ switch (extract32(ctx->opcode, 7, 4)) {
+ case NM_LBX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
+ MO_SB);
+ gen_store_gpr(t0, rd);
+ break;
+ case NM_LHX:
+ /*case NM_LHXS:*/
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
+ MO_TESW);
+ gen_store_gpr(t0, rd);
+ break;
+ case NM_LWX:
+ /*case NM_LWXS:*/
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
+ MO_TESL);
+ gen_store_gpr(t0, rd);
+ break;
+ case NM_LBUX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
+ MO_UB);
+ gen_store_gpr(t0, rd);
+ break;
+ case NM_LHUX:
+ /*case NM_LHUXS:*/
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx,
+ MO_TEUW);
+ gen_store_gpr(t0, rd);
+ break;
+ case NM_SBX:
+ check_nms(ctx);
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
+ MO_8);
+ break;
+ case NM_SHX:
+ /*case NM_SHXS:*/
+ check_nms(ctx);
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
+ MO_TEUW);
+ break;
+ case NM_SWX:
+ /*case NM_SWXS:*/
+ check_nms(ctx);
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx,
+ MO_TEUL);
+ break;
+ case NM_LWC1X:
+ /*case NM_LWC1XS:*/
+ case NM_LDC1X:
+ /*case NM_LDC1XS:*/
+ case NM_SWC1X:
+ /*case NM_SWC1XS:*/
+ case NM_SDC1X:
+ /*case NM_SDC1XS:*/
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ switch (extract32(ctx->opcode, 7, 4)) {
+ case NM_LWC1X:
+ /*case NM_LWC1XS:*/
+ gen_flt_ldst(ctx, OPC_LWC1, rd, t0);
+ break;
+ case NM_LDC1X:
+ /*case NM_LDC1XS:*/
+ gen_flt_ldst(ctx, OPC_LDC1, rd, t0);
+ break;
+ case NM_SWC1X:
+ /*case NM_SWC1XS:*/
+ gen_flt_ldst(ctx, OPC_SWC1, rd, t0);
+ break;
+ case NM_SDC1X:
+ /*case NM_SDC1XS:*/
+ gen_flt_ldst(ctx, OPC_SDC1, rd, t0);
+ break;
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_pool32f_nanomips_insn(DisasContext *ctx)
+{
+ int rt, rs, rd;
+
+ rt = extract32(ctx->opcode, 21, 5);
+ rs = extract32(ctx->opcode, 16, 5);
+ rd = extract32(ctx->opcode, 11, 5);
+
+ if (!(ctx->CP0_Config1 & (1 << CP0C1_FP))) {
+ gen_reserved_instruction(ctx);
+ return;
+ }
+ check_cp1_enabled(ctx);
+ switch (extract32(ctx->opcode, 0, 3)) {
+ case NM_POOL32F_0:
+ switch (extract32(ctx->opcode, 3, 7)) {
+ case NM_RINT_S:
+ gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0);
+ break;
+ case NM_RINT_D:
+ gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0);
+ break;
+ case NM_CLASS_S:
+ gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0);
+ break;
+ case NM_CLASS_D:
+ gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0);
+ break;
+ case NM_ADD_S:
+ gen_farith(ctx, OPC_ADD_S, rt, rs, rd, 0);
+ break;
+ case NM_ADD_D:
+ gen_farith(ctx, OPC_ADD_D, rt, rs, rd, 0);
+ break;
+ case NM_SUB_S:
+ gen_farith(ctx, OPC_SUB_S, rt, rs, rd, 0);
+ break;
+ case NM_SUB_D:
+ gen_farith(ctx, OPC_SUB_D, rt, rs, rd, 0);
+ break;
+ case NM_MUL_S:
+ gen_farith(ctx, OPC_MUL_S, rt, rs, rd, 0);
+ break;
+ case NM_MUL_D:
+ gen_farith(ctx, OPC_MUL_D, rt, rs, rd, 0);
+ break;
+ case NM_DIV_S:
+ gen_farith(ctx, OPC_DIV_S, rt, rs, rd, 0);
+ break;
+ case NM_DIV_D:
+ gen_farith(ctx, OPC_DIV_D, rt, rs, rd, 0);
+ break;
+ case NM_SELEQZ_S:
+ gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs);
+ break;
+ case NM_SELEQZ_D:
+ gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs);
+ break;
+ case NM_SELNEZ_S:
+ gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs);
+ break;
+ case NM_SELNEZ_D:
+ gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs);
+ break;
+ case NM_SEL_S:
+ gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs);
+ break;
+ case NM_SEL_D:
+ gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs);
+ break;
+ case NM_MADDF_S:
+ gen_farith(ctx, OPC_MADDF_S, rt, rs, rd, 0);
+ break;
+ case NM_MADDF_D:
+ gen_farith(ctx, OPC_MADDF_D, rt, rs, rd, 0);
+ break;
+ case NM_MSUBF_S:
+ gen_farith(ctx, OPC_MSUBF_S, rt, rs, rd, 0);
+ break;
+ case NM_MSUBF_D:
+ gen_farith(ctx, OPC_MSUBF_D, rt, rs, rd, 0);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32F_3:
+ switch (extract32(ctx->opcode, 3, 3)) {
+ case NM_MIN_FMT:
+ switch (extract32(ctx->opcode, 9, 1)) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0);
+ break;
+ }
+ break;
+ case NM_MAX_FMT:
+ switch (extract32(ctx->opcode, 9, 1)) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0);
+ break;
+ }
+ break;
+ case NM_MINA_FMT:
+ switch (extract32(ctx->opcode, 9, 1)) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0);
+ break;
+ }
+ break;
+ case NM_MAXA_FMT:
+ switch (extract32(ctx->opcode, 9, 1)) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0);
+ break;
+ }
+ break;
+ case NM_POOL32FXF:
+ switch (extract32(ctx->opcode, 6, 8)) {
+ case NM_CFC1:
+ gen_cp1(ctx, OPC_CFC1, rt, rs);
+ break;
+ case NM_CTC1:
+ gen_cp1(ctx, OPC_CTC1, rt, rs);
+ break;
+ case NM_MFC1:
+ gen_cp1(ctx, OPC_MFC1, rt, rs);
+ break;
+ case NM_MTC1:
+ gen_cp1(ctx, OPC_MTC1, rt, rs);
+ break;
+ case NM_MFHC1:
+ gen_cp1(ctx, OPC_MFHC1, rt, rs);
+ break;
+ case NM_MTHC1:
+ gen_cp1(ctx, OPC_MTHC1, rt, rs);
+ break;
+ case NM_CVT_S_PL:
+ gen_farith(ctx, OPC_CVT_S_PL, -1, rs, rt, 0);
+ break;
+ case NM_CVT_S_PU:
+ gen_farith(ctx, OPC_CVT_S_PU, -1, rs, rt, 0);
+ break;
+ default:
+ switch (extract32(ctx->opcode, 6, 9)) {
+ case NM_CVT_L_S:
+ gen_farith(ctx, OPC_CVT_L_S, -1, rs, rt, 0);
+ break;
+ case NM_CVT_L_D:
+ gen_farith(ctx, OPC_CVT_L_D, -1, rs, rt, 0);
+ break;
+ case NM_CVT_W_S:
+ gen_farith(ctx, OPC_CVT_W_S, -1, rs, rt, 0);
+ break;
+ case NM_CVT_W_D:
+ gen_farith(ctx, OPC_CVT_W_D, -1, rs, rt, 0);
+ break;
+ case NM_RSQRT_S:
+ gen_farith(ctx, OPC_RSQRT_S, -1, rs, rt, 0);
+ break;
+ case NM_RSQRT_D:
+ gen_farith(ctx, OPC_RSQRT_D, -1, rs, rt, 0);
+ break;
+ case NM_SQRT_S:
+ gen_farith(ctx, OPC_SQRT_S, -1, rs, rt, 0);
+ break;
+ case NM_SQRT_D:
+ gen_farith(ctx, OPC_SQRT_D, -1, rs, rt, 0);
+ break;
+ case NM_RECIP_S:
+ gen_farith(ctx, OPC_RECIP_S, -1, rs, rt, 0);
+ break;
+ case NM_RECIP_D:
+ gen_farith(ctx, OPC_RECIP_D, -1, rs, rt, 0);
+ break;
+ case NM_FLOOR_L_S:
+ gen_farith(ctx, OPC_FLOOR_L_S, -1, rs, rt, 0);
+ break;
+ case NM_FLOOR_L_D:
+ gen_farith(ctx, OPC_FLOOR_L_D, -1, rs, rt, 0);
+ break;
+ case NM_FLOOR_W_S:
+ gen_farith(ctx, OPC_FLOOR_W_S, -1, rs, rt, 0);
+ break;
+ case NM_FLOOR_W_D:
+ gen_farith(ctx, OPC_FLOOR_W_D, -1, rs, rt, 0);
+ break;
+ case NM_CEIL_L_S:
+ gen_farith(ctx, OPC_CEIL_L_S, -1, rs, rt, 0);
+ break;
+ case NM_CEIL_L_D:
+ gen_farith(ctx, OPC_CEIL_L_D, -1, rs, rt, 0);
+ break;
+ case NM_CEIL_W_S:
+ gen_farith(ctx, OPC_CEIL_W_S, -1, rs, rt, 0);
+ break;
+ case NM_CEIL_W_D:
+ gen_farith(ctx, OPC_CEIL_W_D, -1, rs, rt, 0);
+ break;
+ case NM_TRUNC_L_S:
+ gen_farith(ctx, OPC_TRUNC_L_S, -1, rs, rt, 0);
+ break;
+ case NM_TRUNC_L_D:
+ gen_farith(ctx, OPC_TRUNC_L_D, -1, rs, rt, 0);
+ break;
+ case NM_TRUNC_W_S:
+ gen_farith(ctx, OPC_TRUNC_W_S, -1, rs, rt, 0);
+ break;
+ case NM_TRUNC_W_D:
+ gen_farith(ctx, OPC_TRUNC_W_D, -1, rs, rt, 0);
+ break;
+ case NM_ROUND_L_S:
+ gen_farith(ctx, OPC_ROUND_L_S, -1, rs, rt, 0);
+ break;
+ case NM_ROUND_L_D:
+ gen_farith(ctx, OPC_ROUND_L_D, -1, rs, rt, 0);
+ break;
+ case NM_ROUND_W_S:
+ gen_farith(ctx, OPC_ROUND_W_S, -1, rs, rt, 0);
+ break;
+ case NM_ROUND_W_D:
+ gen_farith(ctx, OPC_ROUND_W_D, -1, rs, rt, 0);
+ break;
+ case NM_MOV_S:
+ gen_farith(ctx, OPC_MOV_S, -1, rs, rt, 0);
+ break;
+ case NM_MOV_D:
+ gen_farith(ctx, OPC_MOV_D, -1, rs, rt, 0);
+ break;
+ case NM_ABS_S:
+ gen_farith(ctx, OPC_ABS_S, -1, rs, rt, 0);
+ break;
+ case NM_ABS_D:
+ gen_farith(ctx, OPC_ABS_D, -1, rs, rt, 0);
+ break;
+ case NM_NEG_S:
+ gen_farith(ctx, OPC_NEG_S, -1, rs, rt, 0);
+ break;
+ case NM_NEG_D:
+ gen_farith(ctx, OPC_NEG_D, -1, rs, rt, 0);
+ break;
+ case NM_CVT_D_S:
+ gen_farith(ctx, OPC_CVT_D_S, -1, rs, rt, 0);
+ break;
+ case NM_CVT_D_W:
+ gen_farith(ctx, OPC_CVT_D_W, -1, rs, rt, 0);
+ break;
+ case NM_CVT_D_L:
+ gen_farith(ctx, OPC_CVT_D_L, -1, rs, rt, 0);
+ break;
+ case NM_CVT_S_D:
+ gen_farith(ctx, OPC_CVT_S_D, -1, rs, rt, 0);
+ break;
+ case NM_CVT_S_W:
+ gen_farith(ctx, OPC_CVT_S_W, -1, rs, rt, 0);
+ break;
+ case NM_CVT_S_L:
+ gen_farith(ctx, OPC_CVT_S_L, -1, rs, rt, 0);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ case NM_POOL32F_5:
+ switch (extract32(ctx->opcode, 3, 3)) {
+ case NM_CMP_CONDN_S:
+ gen_r6_cmp_s(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd);
+ break;
+ case NM_CMP_CONDN_D:
+ gen_r6_cmp_d(ctx, extract32(ctx->opcode, 6, 5), rt, rs, rd);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc,
+ int rd, int rs, int rt)
+{
+ int ret = rd;
+ TCGv t0 = tcg_temp_new();
+ TCGv v1_t = tcg_temp_new();
+ TCGv v2_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, rs);
+ gen_load_gpr(v2_t, rt);
+
+ switch (opc) {
+ case NM_CMP_EQ_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env);
+ break;
+ case NM_CMP_LT_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env);
+ break;
+ case NM_CMP_LE_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env);
+ break;
+ case NM_CMPU_EQ_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env);
+ break;
+ case NM_CMPU_LT_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env);
+ break;
+ case NM_CMPU_LE_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env);
+ break;
+ case NM_CMPGU_EQ_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_eq_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_CMPGU_LT_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_lt_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_CMPGU_LE_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_le_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_CMPGDU_EQ_QB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgu_eq_qb(v1_t, v1_t, v2_t);
+ tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_CMPGDU_LT_QB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgu_lt_qb(v1_t, v1_t, v2_t);
+ tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_CMPGDU_LE_QB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgu_le_qb(v1_t, v1_t, v2_t);
+ tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PACKRL_PH:
+ check_dsp(ctx);
+ gen_helper_packrl_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PICK_QB:
+ check_dsp(ctx);
+ gen_helper_pick_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PICK_PH:
+ check_dsp(ctx);
+ gen_helper_pick_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_ADDQ_S_W:
+ check_dsp(ctx);
+ gen_helper_addq_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SUBQ_S_W:
+ check_dsp(ctx);
+ gen_helper_subq_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_ADDSC:
+ check_dsp(ctx);
+ gen_helper_addsc(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_ADDWC:
+ check_dsp(ctx);
+ gen_helper_addwc(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_ADDQ_S_PH:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* ADDQ_PH */
+ gen_helper_addq_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* ADDQ_S_PH */
+ gen_helper_addq_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_ADDQH_R_PH:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* ADDQH_PH */
+ gen_helper_addqh_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* ADDQH_R_PH */
+ gen_helper_addqh_r_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_ADDQH_R_W:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* ADDQH_W */
+ gen_helper_addqh_w(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* ADDQH_R_W */
+ gen_helper_addqh_r_w(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_ADDU_S_QB:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* ADDU_QB */
+ gen_helper_addu_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* ADDU_S_QB */
+ gen_helper_addu_s_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_ADDU_S_PH:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* ADDU_PH */
+ gen_helper_addu_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* ADDU_S_PH */
+ gen_helper_addu_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_ADDUH_R_QB:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* ADDUH_QB */
+ gen_helper_adduh_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* ADDUH_R_QB */
+ gen_helper_adduh_r_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SHRAV_R_PH:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SHRAV_PH */
+ gen_helper_shra_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SHRAV_R_PH */
+ gen_helper_shra_r_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SHRAV_R_QB:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SHRAV_QB */
+ gen_helper_shra_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SHRAV_R_QB */
+ gen_helper_shra_r_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SUBQ_S_PH:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SUBQ_PH */
+ gen_helper_subq_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SUBQ_S_PH */
+ gen_helper_subq_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SUBQH_R_PH:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SUBQH_PH */
+ gen_helper_subqh_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SUBQH_R_PH */
+ gen_helper_subqh_r_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SUBQH_R_W:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SUBQH_W */
+ gen_helper_subqh_w(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SUBQH_R_W */
+ gen_helper_subqh_r_w(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SUBU_S_QB:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SUBU_QB */
+ gen_helper_subu_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SUBU_S_QB */
+ gen_helper_subu_s_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SUBU_S_PH:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SUBU_PH */
+ gen_helper_subu_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SUBU_S_PH */
+ gen_helper_subu_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SUBUH_R_QB:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SUBUH_QB */
+ gen_helper_subuh_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SUBUH_R_QB */
+ gen_helper_subuh_r_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_SHLLV_S_PH:
+ check_dsp(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SHLLV_PH */
+ gen_helper_shll_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* SHLLV_S_PH */
+ gen_helper_shll_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_PRECR_SRA_R_PH_W:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* PRECR_SRA_PH_W */
+ {
+ TCGv_i32 sa_t = tcg_const_i32(rd);
+ gen_helper_precr_sra_ph_w(v1_t, sa_t, v1_t,
+ cpu_gpr[rt]);
+ gen_store_gpr(v1_t, rt);
+ tcg_temp_free_i32(sa_t);
+ }
+ break;
+ case 1:
+ /* PRECR_SRA_R_PH_W */
+ {
+ TCGv_i32 sa_t = tcg_const_i32(rd);
+ gen_helper_precr_sra_r_ph_w(v1_t, sa_t, v1_t,
+ cpu_gpr[rt]);
+ gen_store_gpr(v1_t, rt);
+ tcg_temp_free_i32(sa_t);
+ }
+ break;
+ }
+ break;
+ case NM_MULEU_S_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_muleu_s_ph_qbl(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MULEU_S_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_muleu_s_ph_qbr(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MULQ_RS_PH:
+ check_dsp(ctx);
+ gen_helper_mulq_rs_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MULQ_S_PH:
+ check_dsp_r2(ctx);
+ gen_helper_mulq_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MULQ_RS_W:
+ check_dsp_r2(ctx);
+ gen_helper_mulq_rs_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MULQ_S_W:
+ check_dsp_r2(ctx);
+ gen_helper_mulq_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_APPEND:
+ check_dsp_r2(ctx);
+ gen_load_gpr(t0, rs);
+ if (rd != 0) {
+ tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], rd, 32 - rd);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ case NM_MODSUB:
+ check_dsp(ctx);
+ gen_helper_modsub(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHRAV_R_W:
+ check_dsp(ctx);
+ gen_helper_shra_r_w(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHRLV_PH:
+ check_dsp_r2(ctx);
+ gen_helper_shrl_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHRLV_QB:
+ check_dsp(ctx);
+ gen_helper_shrl_qb(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHLLV_QB:
+ check_dsp(ctx);
+ gen_helper_shll_qb(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHLLV_S_W:
+ check_dsp(ctx);
+ gen_helper_shll_s_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHILO:
+ check_dsp(ctx);
+ {
+ TCGv tv0 = tcg_temp_new();
+ TCGv tv1 = tcg_temp_new();
+ int16_t imm = extract32(ctx->opcode, 16, 7);
+
+ tcg_gen_movi_tl(tv0, rd >> 3);
+ tcg_gen_movi_tl(tv1, imm);
+ gen_helper_shilo(tv0, tv1, cpu_env);
+ tcg_temp_free(tv1);
+ tcg_temp_free(tv0);
+ }
+ break;
+ case NM_MULEQ_S_W_PHL:
+ check_dsp(ctx);
+ gen_helper_muleq_s_w_phl(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MULEQ_S_W_PHR:
+ check_dsp(ctx);
+ gen_helper_muleq_s_w_phr(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_MUL_S_PH:
+ check_dsp_r2(ctx);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* MUL_PH */
+ gen_helper_mul_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case 1:
+ /* MUL_S_PH */
+ gen_helper_mul_s_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ }
+ break;
+ case NM_PRECR_QB_PH:
+ check_dsp_r2(ctx);
+ gen_helper_precr_qb_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PRECRQ_QB_PH:
+ check_dsp(ctx);
+ gen_helper_precrq_qb_ph(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PRECRQ_PH_W:
+ check_dsp(ctx);
+ gen_helper_precrq_ph_w(v1_t, v1_t, v2_t);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PRECRQ_RS_PH_W:
+ check_dsp(ctx);
+ gen_helper_precrq_rs_ph_w(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_PRECRQU_S_QB_PH:
+ check_dsp(ctx);
+ gen_helper_precrqu_s_qb_ph(v1_t, v1_t, v2_t, cpu_env);
+ gen_store_gpr(v1_t, ret);
+ break;
+ case NM_SHRA_R_W:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd);
+ gen_helper_shra_r_w(v1_t, t0, v1_t);
+ gen_store_gpr(v1_t, rt);
+ break;
+ case NM_SHRA_R_PH:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 1);
+ switch (extract32(ctx->opcode, 10, 1)) {
+ case 0:
+ /* SHRA_PH */
+ gen_helper_shra_ph(v1_t, t0, v1_t);
+ gen_store_gpr(v1_t, rt);
+ break;
+ case 1:
+ /* SHRA_R_PH */
+ gen_helper_shra_r_ph(v1_t, t0, v1_t);
+ gen_store_gpr(v1_t, rt);
+ break;
+ }
+ break;
+ case NM_SHLL_S_PH:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd >> 1);
+ switch (extract32(ctx->opcode, 10, 2)) {
+ case 0:
+ /* SHLL_PH */
+ gen_helper_shll_ph(v1_t, t0, v1_t, cpu_env);
+ gen_store_gpr(v1_t, rt);
+ break;
+ case 2:
+ /* SHLL_S_PH */
+ gen_helper_shll_s_ph(v1_t, t0, v1_t, cpu_env);
+ gen_store_gpr(v1_t, rt);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_SHLL_S_W:
+ check_dsp(ctx);
+ tcg_gen_movi_tl(t0, rd);
+ gen_helper_shll_s_w(v1_t, t0, v1_t, cpu_env);
+ gen_store_gpr(v1_t, rt);
+ break;
+ case NM_REPL_PH:
+ check_dsp(ctx);
+ {
+ int16_t imm;
+ imm = sextract32(ctx->opcode, 11, 11);
+ imm = (int16_t)(imm << 6) >> 6;
+ if (rt != 0) {
+ tcg_gen_movi_tl(cpu_gpr[rt], dup_const(MO_16, imm));
+ }
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ tcg_temp_free(v2_t);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(t0);
+}
+
+static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint16_t insn;
+ uint32_t op;
+ int rt, rs, rd;
+ int offset;
+ int imm;
+
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
+ ctx->opcode = (ctx->opcode << 16) | insn;
+
+ rt = extract32(ctx->opcode, 21, 5);
+ rs = extract32(ctx->opcode, 16, 5);
+ rd = extract32(ctx->opcode, 11, 5);
+
+ op = extract32(ctx->opcode, 26, 6);
+ switch (op) {
+ case NM_P_ADDIU:
+ if (rt == 0) {
+ /* P.RI */
+ switch (extract32(ctx->opcode, 19, 2)) {
+ case NM_SIGRIE:
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ case NM_P_SYSCALL:
+ if ((extract32(ctx->opcode, 18, 1)) == NM_SYSCALL) {
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ case NM_BREAK:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case NM_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 0, 19))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ gen_reserved_instruction(ctx);
+ } else {
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ }
+ break;
+ }
+ } else {
+ /* NM_ADDIU */
+ imm = extract32(ctx->opcode, 0, 16);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], imm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], imm);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ }
+ break;
+ case NM_ADDIUPC:
+ if (rt != 0) {
+ offset = sextract32(ctx->opcode, 0, 1) << 21 |
+ extract32(ctx->opcode, 1, 20) << 1;
+ target_long addr = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ tcg_gen_movi_tl(cpu_gpr[rt], addr);
+ }
+ break;
+ case NM_POOL32A:
+ switch (ctx->opcode & 0x07) {
+ case NM_POOL32A0:
+ gen_pool32a0_nanomips_insn(env, ctx);
+ break;
+ case NM_POOL32A5:
+ {
+ int32_t op1 = extract32(ctx->opcode, 3, 7);
+ gen_pool32a5_nanomips_insn(ctx, op1, rd, rs, rt);
+ }
+ break;
+ case NM_POOL32A7:
+ switch (extract32(ctx->opcode, 3, 3)) {
+ case NM_P_LSX:
+ gen_p_lsx(ctx, rd, rs, rt);
+ break;
+ case NM_LSA:
+ /*
+ * In nanoMIPS, the shift field directly encodes the shift
+ * amount, meaning that the supported shift values are in
+ * the range 0 to 3 (instead of 1 to 4 in MIPSR6).
+ */
+ gen_lsa(ctx, rd, rt, rs, extract32(ctx->opcode, 9, 2) - 1);
+ break;
+ case NM_EXTW:
+ gen_ext(ctx, 32, rd, rs, rt, extract32(ctx->opcode, 6, 5));
+ break;
+ case NM_POOL32AXF:
+ gen_pool32axf_nanomips_insn(env, ctx);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_GP_W:
+ switch (ctx->opcode & 0x03) {
+ case NM_ADDIUGP_W:
+ if (rt != 0) {
+ offset = extract32(ctx->opcode, 0, 21);
+ gen_op_addr_addi(ctx, cpu_gpr[rt], cpu_gpr[28], offset);
+ }
+ break;
+ case NM_LWGP:
+ gen_ld(ctx, OPC_LW, rt, 28, extract32(ctx->opcode, 2, 19) << 2);
+ break;
+ case NM_SWGP:
+ gen_st(ctx, OPC_SW, rt, 28, extract32(ctx->opcode, 2, 19) << 2);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P48I:
+ {
+ insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 4);
+ target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16;
+ switch (extract32(ctx->opcode, 16, 5)) {
+ case NM_LI48:
+ check_nms(ctx);
+ if (rt != 0) {
+ tcg_gen_movi_tl(cpu_gpr[rt], addr_off);
+ }
+ break;
+ case NM_ADDIU48:
+ check_nms(ctx);
+ if (rt != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rt], addr_off);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ }
+ break;
+ case NM_ADDIUGP48:
+ check_nms(ctx);
+ if (rt != 0) {
+ gen_op_addr_addi(ctx, cpu_gpr[rt], cpu_gpr[28], addr_off);
+ }
+ break;
+ case NM_ADDIUPC48:
+ check_nms(ctx);
+ if (rt != 0) {
+ target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
+ addr_off);
+
+ tcg_gen_movi_tl(cpu_gpr[rt], addr);
+ }
+ break;
+ case NM_LWPC48:
+ check_nms(ctx);
+ if (rt != 0) {
+ TCGv t0;
+ t0 = tcg_temp_new();
+
+ target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
+ addr_off);
+
+ tcg_gen_movi_tl(t0, addr);
+ tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, MO_TESL);
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_SWPC48:
+ check_nms(ctx);
+ {
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ target_long addr = addr_add(ctx, ctx->base.pc_next + 6,
+ addr_off);
+
+ tcg_gen_movi_tl(t0, addr);
+ gen_load_gpr(t1, rt);
+
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ return 6;
+ }
+ case NM_P_U12:
+ switch (extract32(ctx->opcode, 12, 4)) {
+ case NM_ORI:
+ gen_logic_imm(ctx, OPC_ORI, rt, rs, extract32(ctx->opcode, 0, 12));
+ break;
+ case NM_XORI:
+ gen_logic_imm(ctx, OPC_XORI, rt, rs, extract32(ctx->opcode, 0, 12));
+ break;
+ case NM_ANDI:
+ gen_logic_imm(ctx, OPC_ANDI, rt, rs, extract32(ctx->opcode, 0, 12));
+ break;
+ case NM_P_SR:
+ switch (extract32(ctx->opcode, 20, 1)) {
+ case NM_PP_SR:
+ switch (ctx->opcode & 3) {
+ case NM_SAVE:
+ gen_save(ctx, rt, extract32(ctx->opcode, 16, 4),
+ extract32(ctx->opcode, 2, 1),
+ extract32(ctx->opcode, 3, 9) << 3);
+ break;
+ case NM_RESTORE:
+ case NM_RESTORE_JRC:
+ gen_restore(ctx, rt, extract32(ctx->opcode, 16, 4),
+ extract32(ctx->opcode, 2, 1),
+ extract32(ctx->opcode, 3, 9) << 3);
+ if ((ctx->opcode & 3) == NM_RESTORE_JRC) {
+ gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_SR_F:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_SLTI:
+ gen_slt_imm(ctx, OPC_SLTI, rt, rs, extract32(ctx->opcode, 0, 12));
+ break;
+ case NM_SLTIU:
+ gen_slt_imm(ctx, OPC_SLTIU, rt, rs, extract32(ctx->opcode, 0, 12));
+ break;
+ case NM_SEQI:
+ {
+ TCGv t0 = tcg_temp_new();
+
+ imm = extract32(ctx->opcode, 0, 12);
+ gen_load_gpr(t0, rs);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, imm);
+ gen_store_gpr(t0, rt);
+
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_ADDIUNEG:
+ imm = (int16_t) extract32(ctx->opcode, 0, 12);
+ gen_arith_imm(ctx, OPC_ADDIU, rt, rs, -imm);
+ break;
+ case NM_P_SHIFT:
+ {
+ int shift = extract32(ctx->opcode, 0, 5);
+ switch (extract32(ctx->opcode, 5, 4)) {
+ case NM_P_SLL:
+ if (rt == 0 && shift == 0) {
+ /* NOP */
+ } else if (rt == 0 && shift == 3) {
+ /* EHB - treat as NOP */
+ } else if (rt == 0 && shift == 5) {
+ /* PAUSE - treat as NOP */
+ } else if (rt == 0 && shift == 6) {
+ /* SYNC */
+ gen_sync(extract32(ctx->opcode, 16, 5));
+ } else {
+ /* SLL */
+ gen_shift_imm(ctx, OPC_SLL, rt, rs,
+ extract32(ctx->opcode, 0, 5));
+ }
+ break;
+ case NM_SRL:
+ gen_shift_imm(ctx, OPC_SRL, rt, rs,
+ extract32(ctx->opcode, 0, 5));
+ break;
+ case NM_SRA:
+ gen_shift_imm(ctx, OPC_SRA, rt, rs,
+ extract32(ctx->opcode, 0, 5));
+ break;
+ case NM_ROTR:
+ gen_shift_imm(ctx, OPC_ROTR, rt, rs,
+ extract32(ctx->opcode, 0, 5));
+ break;
+ }
+ }
+ break;
+ case NM_P_ROTX:
+ check_nms(ctx);
+ if (rt != 0) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 shift = tcg_const_i32(extract32(ctx->opcode, 0, 5));
+ TCGv_i32 shiftx = tcg_const_i32(extract32(ctx->opcode, 7, 4)
+ << 1);
+ TCGv_i32 stripe = tcg_const_i32(extract32(ctx->opcode, 6, 1));
+
+ gen_load_gpr(t0, rs);
+ gen_helper_rotx(cpu_gpr[rt], t0, shift, shiftx, stripe);
+ tcg_temp_free(t0);
+
+ tcg_temp_free_i32(shift);
+ tcg_temp_free_i32(shiftx);
+ tcg_temp_free_i32(stripe);
+ }
+ break;
+ case NM_P_INS:
+ switch (((ctx->opcode >> 10) & 2) |
+ (extract32(ctx->opcode, 5, 1))) {
+ case NM_INS:
+ check_nms(ctx);
+ gen_bitops(ctx, OPC_INS, rt, rs, extract32(ctx->opcode, 0, 5),
+ extract32(ctx->opcode, 6, 5));
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_EXT:
+ switch (((ctx->opcode >> 10) & 2) |
+ (extract32(ctx->opcode, 5, 1))) {
+ case NM_EXT:
+ check_nms(ctx);
+ gen_bitops(ctx, OPC_EXT, rt, rs, extract32(ctx->opcode, 0, 5),
+ extract32(ctx->opcode, 6, 5));
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_POOL32F:
+ gen_pool32f_nanomips_insn(ctx);
+ break;
+ case NM_POOL32S:
+ break;
+ case NM_P_LUI:
+ switch (extract32(ctx->opcode, 1, 1)) {
+ case NM_LUI:
+ if (rt != 0) {
+ tcg_gen_movi_tl(cpu_gpr[rt],
+ sextract32(ctx->opcode, 0, 1) << 31 |
+ extract32(ctx->opcode, 2, 10) << 21 |
+ extract32(ctx->opcode, 12, 9) << 12);
+ }
+ break;
+ case NM_ALUIPC:
+ if (rt != 0) {
+ offset = sextract32(ctx->opcode, 0, 1) << 31 |
+ extract32(ctx->opcode, 2, 10) << 21 |
+ extract32(ctx->opcode, 12, 9) << 12;
+ target_long addr;
+ addr = ~0xFFF & addr_add(ctx, ctx->base.pc_next + 4, offset);
+ tcg_gen_movi_tl(cpu_gpr[rt], addr);
+ }
+ break;
+ }
+ break;
+ case NM_P_GP_BH:
+ {
+ uint32_t u = extract32(ctx->opcode, 0, 18);
+
+ switch (extract32(ctx->opcode, 18, 3)) {
+ case NM_LBGP:
+ gen_ld(ctx, OPC_LB, rt, 28, u);
+ break;
+ case NM_SBGP:
+ gen_st(ctx, OPC_SB, rt, 28, u);
+ break;
+ case NM_LBUGP:
+ gen_ld(ctx, OPC_LBU, rt, 28, u);
+ break;
+ case NM_ADDIUGP_B:
+ if (rt != 0) {
+ gen_op_addr_addi(ctx, cpu_gpr[rt], cpu_gpr[28], u);
+ }
+ break;
+ case NM_P_GP_LH:
+ u &= ~1;
+ switch (ctx->opcode & 1) {
+ case NM_LHGP:
+ gen_ld(ctx, OPC_LH, rt, 28, u);
+ break;
+ case NM_LHUGP:
+ gen_ld(ctx, OPC_LHU, rt, 28, u);
+ break;
+ }
+ break;
+ case NM_P_GP_SH:
+ u &= ~1;
+ switch (ctx->opcode & 1) {
+ case NM_SHGP:
+ gen_st(ctx, OPC_SH, rt, 28, u);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_GP_CP1:
+ u &= ~0x3;
+ switch (ctx->opcode & 0x3) {
+ case NM_LWC1GP:
+ gen_cop1_ldst(ctx, OPC_LWC1, rt, 28, u);
+ break;
+ case NM_LDC1GP:
+ gen_cop1_ldst(ctx, OPC_LDC1, rt, 28, u);
+ break;
+ case NM_SWC1GP:
+ gen_cop1_ldst(ctx, OPC_SWC1, rt, 28, u);
+ break;
+ case NM_SDC1GP:
+ gen_cop1_ldst(ctx, OPC_SDC1, rt, 28, u);
+ break;
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ break;
+ case NM_P_LS_U12:
+ {
+ uint32_t u = extract32(ctx->opcode, 0, 12);
+
+ switch (extract32(ctx->opcode, 12, 4)) {
+ case NM_P_PREFU12:
+ if (rt == 31) {
+ /* SYNCI */
+ /*
+ * Break the TB to be able to sync copied instructions
+ * immediately.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ } else {
+ /* PREF */
+ /* Treat as NOP. */
+ }
+ break;
+ case NM_LB:
+ gen_ld(ctx, OPC_LB, rt, rs, u);
+ break;
+ case NM_LH:
+ gen_ld(ctx, OPC_LH, rt, rs, u);
+ break;
+ case NM_LW:
+ gen_ld(ctx, OPC_LW, rt, rs, u);
+ break;
+ case NM_LBU:
+ gen_ld(ctx, OPC_LBU, rt, rs, u);
+ break;
+ case NM_LHU:
+ gen_ld(ctx, OPC_LHU, rt, rs, u);
+ break;
+ case NM_SB:
+ gen_st(ctx, OPC_SB, rt, rs, u);
+ break;
+ case NM_SH:
+ gen_st(ctx, OPC_SH, rt, rs, u);
+ break;
+ case NM_SW:
+ gen_st(ctx, OPC_SW, rt, rs, u);
+ break;
+ case NM_LWC1:
+ gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, u);
+ break;
+ case NM_LDC1:
+ gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, u);
+ break;
+ case NM_SWC1:
+ gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, u);
+ break;
+ case NM_SDC1:
+ gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, u);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ break;
+ case NM_P_LS_S9:
+ {
+ int32_t s = (sextract32(ctx->opcode, 15, 1) << 8) |
+ extract32(ctx->opcode, 0, 8);
+
+ switch (extract32(ctx->opcode, 8, 3)) {
+ case NM_P_LS_S0:
+ switch (extract32(ctx->opcode, 11, 4)) {
+ case NM_LBS9:
+ gen_ld(ctx, OPC_LB, rt, rs, s);
+ break;
+ case NM_LHS9:
+ gen_ld(ctx, OPC_LH, rt, rs, s);
+ break;
+ case NM_LWS9:
+ gen_ld(ctx, OPC_LW, rt, rs, s);
+ break;
+ case NM_LBUS9:
+ gen_ld(ctx, OPC_LBU, rt, rs, s);
+ break;
+ case NM_LHUS9:
+ gen_ld(ctx, OPC_LHU, rt, rs, s);
+ break;
+ case NM_SBS9:
+ gen_st(ctx, OPC_SB, rt, rs, s);
+ break;
+ case NM_SHS9:
+ gen_st(ctx, OPC_SH, rt, rs, s);
+ break;
+ case NM_SWS9:
+ gen_st(ctx, OPC_SW, rt, rs, s);
+ break;
+ case NM_LWC1S9:
+ gen_cop1_ldst(ctx, OPC_LWC1, rt, rs, s);
+ break;
+ case NM_LDC1S9:
+ gen_cop1_ldst(ctx, OPC_LDC1, rt, rs, s);
+ break;
+ case NM_SWC1S9:
+ gen_cop1_ldst(ctx, OPC_SWC1, rt, rs, s);
+ break;
+ case NM_SDC1S9:
+ gen_cop1_ldst(ctx, OPC_SDC1, rt, rs, s);
+ break;
+ case NM_P_PREFS9:
+ if (rt == 31) {
+ /* SYNCI */
+ /*
+ * Break the TB to be able to sync copied instructions
+ * immediately.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ } else {
+ /* PREF */
+ /* Treat as NOP. */
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_LS_S1:
+ switch (extract32(ctx->opcode, 11, 4)) {
+ case NM_UALH:
+ case NM_UASH:
+ check_nms(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, rs, s);
+
+ switch (extract32(ctx->opcode, 11, 4)) {
+ case NM_UALH:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
+ MO_UNALN);
+ gen_store_gpr(t0, rt);
+ break;
+ case NM_UASH:
+ gen_load_gpr(t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
+ MO_UNALN);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ case NM_P_LL:
+ switch (ctx->opcode & 0x03) {
+ case NM_LL:
+ gen_ld(ctx, OPC_LL, rt, rs, s);
+ break;
+ case NM_LLWP:
+ check_xnp(ctx);
+ gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5));
+ break;
+ }
+ break;
+ case NM_P_SC:
+ switch (ctx->opcode & 0x03) {
+ case NM_SC:
+ gen_st_cond(ctx, rt, rs, s, MO_TESL, false);
+ break;
+ case NM_SCWP:
+ check_xnp(ctx);
+ gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5),
+ false);
+ break;
+ }
+ break;
+ case NM_CACHE:
+ check_cp0_enabled(ctx);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, s);
+ }
+ break;
+ }
+ break;
+ case NM_P_LS_E0:
+ switch (extract32(ctx->opcode, 11, 4)) {
+ case NM_LBE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, OPC_LBE, rt, rs, s);
+ break;
+ case NM_SBE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_st(ctx, OPC_SBE, rt, rs, s);
+ break;
+ case NM_LBUE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, OPC_LBUE, rt, rs, s);
+ break;
+ case NM_P_PREFE:
+ if (rt == 31) {
+ /* case NM_SYNCIE */
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ /*
+ * Break the TB to be able to sync copied instructions
+ * immediately.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ } else {
+ /* case NM_PREFE */
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ /* Treat as NOP. */
+ }
+ break;
+ case NM_LHE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, OPC_LHE, rt, rs, s);
+ break;
+ case NM_SHE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_st(ctx, OPC_SHE, rt, rs, s);
+ break;
+ case NM_LHUE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, OPC_LHUE, rt, rs, s);
+ break;
+ case NM_CACHEE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ check_nms_dl_il_sl_tl_l2c(ctx);
+ gen_cache_operation(ctx, rt, rs, s);
+ break;
+ case NM_LWE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, OPC_LWE, rt, rs, s);
+ break;
+ case NM_SWE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_st(ctx, OPC_SWE, rt, rs, s);
+ break;
+ case NM_P_LLE:
+ switch (extract32(ctx->opcode, 2, 2)) {
+ case NM_LLE:
+ check_xnp(ctx);
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, OPC_LLE, rt, rs, s);
+ break;
+ case NM_LLWPE:
+ check_xnp(ctx);
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_llwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5));
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_SCE:
+ switch (extract32(ctx->opcode, 2, 2)) {
+ case NM_SCE:
+ check_xnp(ctx);
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_st_cond(ctx, rt, rs, s, MO_TESL, true);
+ break;
+ case NM_SCWPE:
+ check_xnp(ctx);
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ gen_scwp(ctx, rs, 0, rt, extract32(ctx->opcode, 3, 5),
+ true);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ }
+ break;
+ case NM_P_LS_WM:
+ case NM_P_LS_UAWM:
+ check_nms(ctx);
+ {
+ int count = extract32(ctx->opcode, 12, 3);
+ int counter = 0;
+
+ offset = sextract32(ctx->opcode, 15, 1) << 8 |
+ extract32(ctx->opcode, 0, 8);
+ TCGv va = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ MemOp memop = (extract32(ctx->opcode, 8, 3)) ==
+ NM_P_LS_UAWM ? MO_UNALN : 0;
+
+ count = (count == 0) ? 8 : count;
+ while (counter != count) {
+ int this_rt = ((rt + counter) & 0x1f) | (rt & 0x10);
+ int this_offset = offset + (counter << 2);
+
+ gen_base_offset_addr(ctx, va, rs, this_offset);
+
+ switch (extract32(ctx->opcode, 11, 1)) {
+ case NM_LWM:
+ tcg_gen_qemu_ld_tl(t1, va, ctx->mem_idx,
+ memop | MO_TESL);
+ gen_store_gpr(t1, this_rt);
+ if ((this_rt == rs) &&
+ (counter != (count - 1))) {
+ /* UNPREDICTABLE */
+ }
+ break;
+ case NM_SWM:
+ this_rt = (rt == 0) ? 0 : this_rt;
+ gen_load_gpr(t1, this_rt);
+ tcg_gen_qemu_st_tl(t1, va, ctx->mem_idx,
+ memop | MO_TEUL);
+ break;
+ }
+ counter++;
+ }
+ tcg_temp_free(va);
+ tcg_temp_free(t1);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ break;
+ case NM_MOVE_BALC:
+ check_nms(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+ int32_t s = sextract32(ctx->opcode, 0, 1) << 21 |
+ extract32(ctx->opcode, 1, 20) << 1;
+ rd = (extract32(ctx->opcode, 24, 1)) == 0 ? 4 : 5;
+ rt = decode_gpr_gpr4_zero(extract32(ctx->opcode, 25, 1) << 3 |
+ extract32(ctx->opcode, 21, 3));
+ gen_load_gpr(t0, rt);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s);
+ tcg_temp_free(t0);
+ }
+ break;
+ case NM_P_BAL:
+ {
+ int32_t s = sextract32(ctx->opcode, 0, 1) << 25 |
+ extract32(ctx->opcode, 1, 24) << 1;
+
+ if ((extract32(ctx->opcode, 25, 1)) == 0) {
+ /* BC */
+ gen_compute_branch_nm(ctx, OPC_BEQ, 4, 0, 0, s);
+ } else {
+ /* BALC */
+ gen_compute_branch_nm(ctx, OPC_BGEZAL, 4, 0, 0, s);
+ }
+ }
+ break;
+ case NM_P_J:
+ switch (extract32(ctx->opcode, 12, 4)) {
+ case NM_JALRC:
+ case NM_JALRC_HB:
+ gen_compute_branch_nm(ctx, OPC_JALR, 4, rs, rt, 0);
+ break;
+ case NM_P_BALRSC:
+ gen_compute_nanomips_pbalrsc_branch(ctx, rs, rt);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P_BR1:
+ {
+ int32_t s = sextract32(ctx->opcode, 0, 1) << 14 |
+ extract32(ctx->opcode, 1, 13) << 1;
+ switch (extract32(ctx->opcode, 14, 2)) {
+ case NM_BEQC:
+ check_nms(ctx);
+ gen_compute_branch_nm(ctx, OPC_BEQ, 4, rs, rt, s);
+ break;
+ case NM_P_BR3A:
+ s = sextract32(ctx->opcode, 0, 1) << 14 |
+ extract32(ctx->opcode, 1, 13) << 1;
+ check_cp1_enabled(ctx);
+ switch (extract32(ctx->opcode, 16, 5)) {
+ case NM_BC1EQZC:
+ gen_compute_branch_cp1_nm(ctx, OPC_BC1EQZ, rt, s);
+ break;
+ case NM_BC1NEZC:
+ gen_compute_branch_cp1_nm(ctx, OPC_BC1NEZ, rt, s);
+ break;
+ case NM_BPOSGE32C:
+ check_dsp_r3(ctx);
+ {
+ int32_t imm = extract32(ctx->opcode, 1, 13) |
+ extract32(ctx->opcode, 0, 1) << 13;
+
+ gen_compute_branch_nm(ctx, OPC_BPOSGE32, 4, -1, -2,
+ imm << 1);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_BGEC:
+ if (rs == rt) {
+ gen_compute_compact_branch_nm(ctx, OPC_BC, rs, rt, s);
+ } else {
+ gen_compute_compact_branch_nm(ctx, OPC_BGEC, rs, rt, s);
+ }
+ break;
+ case NM_BGEUC:
+ if (rs == rt || rt == 0) {
+ gen_compute_compact_branch_nm(ctx, OPC_BC, 0, 0, s);
+ } else if (rs == 0) {
+ gen_compute_compact_branch_nm(ctx, OPC_BEQZC, rt, 0, s);
+ } else {
+ gen_compute_compact_branch_nm(ctx, OPC_BGEUC, rs, rt, s);
+ }
+ break;
+ }
+ }
+ break;
+ case NM_P_BR2:
+ {
+ int32_t s = sextract32(ctx->opcode, 0, 1) << 14 |
+ extract32(ctx->opcode, 1, 13) << 1;
+ switch (extract32(ctx->opcode, 14, 2)) {
+ case NM_BNEC:
+ check_nms(ctx);
+ gen_compute_branch_nm(ctx, OPC_BNE, 4, rs, rt, s);
+ break;
+ case NM_BLTC:
+ if (rs != 0 && rt != 0 && rs == rt) {
+ /* NOP */
+ ctx->hflags |= MIPS_HFLAG_FBNSLOT;
+ } else {
+ gen_compute_compact_branch_nm(ctx, OPC_BLTC, rs, rt, s);
+ }
+ break;
+ case NM_BLTUC:
+ if (rs == 0 || rs == rt) {
+ /* NOP */
+ ctx->hflags |= MIPS_HFLAG_FBNSLOT;
+ } else {
+ gen_compute_compact_branch_nm(ctx, OPC_BLTUC, rs, rt, s);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ break;
+ case NM_P_BRI:
+ {
+ int32_t s = sextract32(ctx->opcode, 0, 1) << 11 |
+ extract32(ctx->opcode, 1, 10) << 1;
+ uint32_t u = extract32(ctx->opcode, 11, 7);
+
+ gen_compute_imm_branch(ctx, extract32(ctx->opcode, 18, 3),
+ rt, u, s);
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ return 4;
+}
+
+static int decode_isa_nanomips(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t op;
+ int rt = decode_gpr_gpr3(NANOMIPS_EXTRACT_RT3(ctx->opcode));
+ int rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode));
+ int rd = decode_gpr_gpr3(NANOMIPS_EXTRACT_RD3(ctx->opcode));
+ int offset;
+ int imm;
+
+ /* make sure instructions are on a halfword boundary */
+ if (ctx->base.pc_next & 0x1) {
+ TCGv tmp = tcg_const_tl(ctx->base.pc_next);
+ tcg_gen_st_tl(tmp, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ tcg_temp_free(tmp);
+ generate_exception_end(ctx, EXCP_AdEL);
+ return 2;
+ }
+
+ op = extract32(ctx->opcode, 10, 6);
+ switch (op) {
+ case NM_P16_MV:
+ rt = NANOMIPS_EXTRACT_RD5(ctx->opcode);
+ if (rt != 0) {
+ /* MOVE */
+ rs = NANOMIPS_EXTRACT_RS5(ctx->opcode);
+ gen_arith(ctx, OPC_ADDU, rt, rs, 0);
+ } else {
+ /* P16.RI */
+ switch (extract32(ctx->opcode, 3, 2)) {
+ case NM_P16_SYSCALL:
+ if (extract32(ctx->opcode, 2, 1) == 0) {
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ case NM_BREAK16:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case NM_SDBBP16:
+ if (is_uhi(extract32(ctx->opcode, 0, 3))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ gen_reserved_instruction(ctx);
+ } else {
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ }
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ break;
+ case NM_P16_SHIFT:
+ {
+ int shift = extract32(ctx->opcode, 0, 3);
+ uint32_t opc = 0;
+ shift = (shift == 0) ? 8 : shift;
+
+ switch (extract32(ctx->opcode, 3, 1)) {
+ case NM_SLL16:
+ opc = OPC_SLL;
+ break;
+ case NM_SRL16:
+ opc = OPC_SRL;
+ break;
+ }
+ gen_shift_imm(ctx, opc, rt, rs, shift);
+ }
+ break;
+ case NM_P16C:
+ switch (ctx->opcode & 1) {
+ case NM_POOL16C_0:
+ gen_pool16c_nanomips_insn(ctx);
+ break;
+ case NM_LWXS16:
+ gen_ldxs(ctx, rt, rs, rd);
+ break;
+ }
+ break;
+ case NM_P16_A1:
+ switch (extract32(ctx->opcode, 6, 1)) {
+ case NM_ADDIUR1SP:
+ imm = extract32(ctx->opcode, 0, 6) << 2;
+ gen_arith_imm(ctx, OPC_ADDIU, rt, 29, imm);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P16_A2:
+ switch (extract32(ctx->opcode, 3, 1)) {
+ case NM_ADDIUR2:
+ imm = extract32(ctx->opcode, 0, 3) << 2;
+ gen_arith_imm(ctx, OPC_ADDIU, rt, rs, imm);
+ break;
+ case NM_P_ADDIURS5:
+ rt = extract32(ctx->opcode, 5, 5);
+ if (rt != 0) {
+ /* imm = sign_extend(s[3] . s[2:0] , from_nbits = 4) */
+ imm = (sextract32(ctx->opcode, 4, 1) << 3) |
+ (extract32(ctx->opcode, 0, 3));
+ gen_arith_imm(ctx, OPC_ADDIU, rt, rt, imm);
+ }
+ break;
+ }
+ break;
+ case NM_P16_ADDU:
+ switch (ctx->opcode & 0x1) {
+ case NM_ADDU16:
+ gen_arith(ctx, OPC_ADDU, rd, rs, rt);
+ break;
+ case NM_SUBU16:
+ gen_arith(ctx, OPC_SUBU, rd, rs, rt);
+ break;
+ }
+ break;
+ case NM_P16_4X4:
+ rt = (extract32(ctx->opcode, 9, 1) << 3) |
+ extract32(ctx->opcode, 5, 3);
+ rs = (extract32(ctx->opcode, 4, 1) << 3) |
+ extract32(ctx->opcode, 0, 3);
+ rt = decode_gpr_gpr4(rt);
+ rs = decode_gpr_gpr4(rs);
+ switch ((extract32(ctx->opcode, 7, 2) & 0x2) |
+ (extract32(ctx->opcode, 3, 1))) {
+ case NM_ADDU4X4:
+ check_nms(ctx);
+ gen_arith(ctx, OPC_ADDU, rt, rs, rt);
+ break;
+ case NM_MUL4X4:
+ check_nms(ctx);
+ gen_r6_muldiv(ctx, R6_OPC_MUL, rt, rs, rt);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_LI16:
+ {
+ int imm = extract32(ctx->opcode, 0, 7);
+ imm = (imm == 0x7f ? -1 : imm);
+ if (rt != 0) {
+ tcg_gen_movi_tl(cpu_gpr[rt], imm);
+ }
+ }
+ break;
+ case NM_ANDI16:
+ {
+ uint32_t u = extract32(ctx->opcode, 0, 4);
+ u = (u == 12) ? 0xff :
+ (u == 13) ? 0xffff : u;
+ gen_logic_imm(ctx, OPC_ANDI, rt, rs, u);
+ }
+ break;
+ case NM_P16_LB:
+ offset = extract32(ctx->opcode, 0, 2);
+ switch (extract32(ctx->opcode, 2, 2)) {
+ case NM_LB16:
+ gen_ld(ctx, OPC_LB, rt, rs, offset);
+ break;
+ case NM_SB16:
+ rt = decode_gpr_gpr3_src_store(
+ NANOMIPS_EXTRACT_RT3(ctx->opcode));
+ gen_st(ctx, OPC_SB, rt, rs, offset);
+ break;
+ case NM_LBU16:
+ gen_ld(ctx, OPC_LBU, rt, rs, offset);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_P16_LH:
+ offset = extract32(ctx->opcode, 1, 2) << 1;
+ switch ((extract32(ctx->opcode, 3, 1) << 1) | (ctx->opcode & 1)) {
+ case NM_LH16:
+ gen_ld(ctx, OPC_LH, rt, rs, offset);
+ break;
+ case NM_SH16:
+ rt = decode_gpr_gpr3_src_store(
+ NANOMIPS_EXTRACT_RT3(ctx->opcode));
+ gen_st(ctx, OPC_SH, rt, rs, offset);
+ break;
+ case NM_LHU16:
+ gen_ld(ctx, OPC_LHU, rt, rs, offset);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case NM_LW16:
+ offset = extract32(ctx->opcode, 0, 4) << 2;
+ gen_ld(ctx, OPC_LW, rt, rs, offset);
+ break;
+ case NM_LWSP16:
+ rt = NANOMIPS_EXTRACT_RD5(ctx->opcode);
+ offset = extract32(ctx->opcode, 0, 5) << 2;
+ gen_ld(ctx, OPC_LW, rt, 29, offset);
+ break;
+ case NM_LW4X4:
+ check_nms(ctx);
+ rt = (extract32(ctx->opcode, 9, 1) << 3) |
+ extract32(ctx->opcode, 5, 3);
+ rs = (extract32(ctx->opcode, 4, 1) << 3) |
+ extract32(ctx->opcode, 0, 3);
+ offset = (extract32(ctx->opcode, 3, 1) << 3) |
+ (extract32(ctx->opcode, 8, 1) << 2);
+ rt = decode_gpr_gpr4(rt);
+ rs = decode_gpr_gpr4(rs);
+ gen_ld(ctx, OPC_LW, rt, rs, offset);
+ break;
+ case NM_SW4X4:
+ check_nms(ctx);
+ rt = (extract32(ctx->opcode, 9, 1) << 3) |
+ extract32(ctx->opcode, 5, 3);
+ rs = (extract32(ctx->opcode, 4, 1) << 3) |
+ extract32(ctx->opcode, 0, 3);
+ offset = (extract32(ctx->opcode, 3, 1) << 3) |
+ (extract32(ctx->opcode, 8, 1) << 2);
+ rt = decode_gpr_gpr4_zero(rt);
+ rs = decode_gpr_gpr4(rs);
+ gen_st(ctx, OPC_SW, rt, rs, offset);
+ break;
+ case NM_LWGP16:
+ offset = extract32(ctx->opcode, 0, 7) << 2;
+ gen_ld(ctx, OPC_LW, rt, 28, offset);
+ break;
+ case NM_SWSP16:
+ rt = NANOMIPS_EXTRACT_RD5(ctx->opcode);
+ offset = extract32(ctx->opcode, 0, 5) << 2;
+ gen_st(ctx, OPC_SW, rt, 29, offset);
+ break;
+ case NM_SW16:
+ rt = decode_gpr_gpr3_src_store(
+ NANOMIPS_EXTRACT_RT3(ctx->opcode));
+ rs = decode_gpr_gpr3(NANOMIPS_EXTRACT_RS3(ctx->opcode));
+ offset = extract32(ctx->opcode, 0, 4) << 2;
+ gen_st(ctx, OPC_SW, rt, rs, offset);
+ break;
+ case NM_SWGP16:
+ rt = decode_gpr_gpr3_src_store(
+ NANOMIPS_EXTRACT_RT3(ctx->opcode));
+ offset = extract32(ctx->opcode, 0, 7) << 2;
+ gen_st(ctx, OPC_SW, rt, 28, offset);
+ break;
+ case NM_BC16:
+ gen_compute_branch_nm(ctx, OPC_BEQ, 2, 0, 0,
+ (sextract32(ctx->opcode, 0, 1) << 10) |
+ (extract32(ctx->opcode, 1, 9) << 1));
+ break;
+ case NM_BALC16:
+ gen_compute_branch_nm(ctx, OPC_BGEZAL, 2, 0, 0,
+ (sextract32(ctx->opcode, 0, 1) << 10) |
+ (extract32(ctx->opcode, 1, 9) << 1));
+ break;
+ case NM_BEQZC16:
+ gen_compute_branch_nm(ctx, OPC_BEQ, 2, rt, 0,
+ (sextract32(ctx->opcode, 0, 1) << 7) |
+ (extract32(ctx->opcode, 1, 6) << 1));
+ break;
+ case NM_BNEZC16:
+ gen_compute_branch_nm(ctx, OPC_BNE, 2, rt, 0,
+ (sextract32(ctx->opcode, 0, 1) << 7) |
+ (extract32(ctx->opcode, 1, 6) << 1));
+ break;
+ case NM_P16_BR:
+ switch (ctx->opcode & 0xf) {
+ case 0:
+ /* P16.JRC */
+ switch (extract32(ctx->opcode, 4, 1)) {
+ case NM_JRC:
+ gen_compute_branch_nm(ctx, OPC_JR, 2,
+ extract32(ctx->opcode, 5, 5), 0, 0);
+ break;
+ case NM_JALRC16:
+ gen_compute_branch_nm(ctx, OPC_JALR, 2,
+ extract32(ctx->opcode, 5, 5), 31, 0);
+ break;
+ }
+ break;
+ default:
+ {
+ /* P16.BRI */
+ uint32_t opc = extract32(ctx->opcode, 4, 3) <
+ extract32(ctx->opcode, 7, 3) ? OPC_BEQ : OPC_BNE;
+ gen_compute_branch_nm(ctx, opc, 2, rs, rt,
+ extract32(ctx->opcode, 0, 4) << 1);
+ }
+ break;
+ }
+ break;
+ case NM_P16_SR:
+ {
+ int count = extract32(ctx->opcode, 0, 4);
+ int u = extract32(ctx->opcode, 4, 4) << 4;
+
+ rt = 30 + extract32(ctx->opcode, 9, 1);
+ switch (extract32(ctx->opcode, 8, 1)) {
+ case NM_SAVE16:
+ gen_save(ctx, rt, count, 0, u);
+ break;
+ case NM_RESTORE_JRC16:
+ gen_restore(ctx, rt, count, 0, u);
+ gen_compute_branch_nm(ctx, OPC_JR, 2, 31, 0, 0);
+ break;
+ }
+ }
+ break;
+ case NM_MOVEP:
+ case NM_MOVEPREV:
+ check_nms(ctx);
+ {
+ static const int gpr2reg1[] = {4, 5, 6, 7};
+ static const int gpr2reg2[] = {5, 6, 7, 8};
+ int re;
+ int rd2 = extract32(ctx->opcode, 3, 1) << 1 |
+ extract32(ctx->opcode, 8, 1);
+ int r1 = gpr2reg1[rd2];
+ int r2 = gpr2reg2[rd2];
+ int r3 = extract32(ctx->opcode, 4, 1) << 3 |
+ extract32(ctx->opcode, 0, 3);
+ int r4 = extract32(ctx->opcode, 9, 1) << 3 |
+ extract32(ctx->opcode, 5, 3);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ if (op == NM_MOVEP) {
+ rd = r1;
+ re = r2;
+ rs = decode_gpr_gpr4_zero(r3);
+ rt = decode_gpr_gpr4_zero(r4);
+ } else {
+ rd = decode_gpr_gpr4(r3);
+ re = decode_gpr_gpr4(r4);
+ rs = r1;
+ rt = r2;
+ }
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ tcg_gen_mov_tl(cpu_gpr[re], t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ default:
+ return decode_nanomips_32_48_opc(env, ctx);
+ }
+
+ return 2;
+}
diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c
new file mode 100644
index 000000000..ef3dafcbb
--- /dev/null
+++ b/target/mips/tcg/op_helper.c
@@ -0,0 +1,303 @@
+/*
+ * MIPS emulation helpers for qemu.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/memop.h"
+#include "fpu_helper.h"
+
+static inline target_ulong bitswap(target_ulong v)
+{
+ v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
+ ((v & (target_ulong)0x5555555555555555ULL) << 1);
+ v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
+ ((v & (target_ulong)0x3333333333333333ULL) << 2);
+ v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
+ ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
+ return v;
+}
+
+#ifdef TARGET_MIPS64
+target_ulong helper_dbitswap(target_ulong rt)
+{
+ return bitswap(rt);
+}
+#endif
+
+target_ulong helper_bitswap(target_ulong rt)
+{
+ return (int32_t)bitswap(rt);
+}
+
+target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
+ uint32_t stripe)
+{
+ int i;
+ uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
+ uint64_t tmp1 = tmp0;
+ for (i = 0; i <= 46; i++) {
+ int s;
+ if (i & 0x8) {
+ s = shift;
+ } else {
+ s = shiftx;
+ }
+
+ if (stripe != 0 && !(i & 0x4)) {
+ s = ~s;
+ }
+ if (s & 0x10) {
+ if (tmp0 & (1LL << (i + 16))) {
+ tmp1 |= 1LL << i;
+ } else {
+ tmp1 &= ~(1LL << i);
+ }
+ }
+ }
+
+ uint64_t tmp2 = tmp1;
+ for (i = 0; i <= 38; i++) {
+ int s;
+ if (i & 0x4) {
+ s = shift;
+ } else {
+ s = shiftx;
+ }
+
+ if (s & 0x8) {
+ if (tmp1 & (1LL << (i + 8))) {
+ tmp2 |= 1LL << i;
+ } else {
+ tmp2 &= ~(1LL << i);
+ }
+ }
+ }
+
+ uint64_t tmp3 = tmp2;
+ for (i = 0; i <= 34; i++) {
+ int s;
+ if (i & 0x2) {
+ s = shift;
+ } else {
+ s = shiftx;
+ }
+ if (s & 0x4) {
+ if (tmp2 & (1LL << (i + 4))) {
+ tmp3 |= 1LL << i;
+ } else {
+ tmp3 &= ~(1LL << i);
+ }
+ }
+ }
+
+ uint64_t tmp4 = tmp3;
+ for (i = 0; i <= 32; i++) {
+ int s;
+ if (i & 0x1) {
+ s = shift;
+ } else {
+ s = shiftx;
+ }
+ if (s & 0x2) {
+ if (tmp3 & (1LL << (i + 2))) {
+ tmp4 |= 1LL << i;
+ } else {
+ tmp4 &= ~(1LL << i);
+ }
+ }
+ }
+
+ uint64_t tmp5 = tmp4;
+ for (i = 0; i <= 31; i++) {
+ int s;
+ s = shift;
+ if (s & 0x1) {
+ if (tmp4 & (1LL << (i + 1))) {
+ tmp5 |= 1LL << i;
+ } else {
+ tmp5 &= ~(1LL << i);
+ }
+ }
+ }
+
+ return (int64_t)(int32_t)(uint32_t)tmp5;
+}
+
+void helper_fork(target_ulong arg1, target_ulong arg2)
+{
+ /*
+ * arg1 = rt, arg2 = rs
+ * TODO: store to TC register
+ */
+}
+
+target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
+{
+ target_long arg1 = arg;
+
+ if (arg1 < 0) {
+ /* No scheduling policy implemented. */
+ if (arg1 != -2) {
+ if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
+ env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
+ do_raise_exception(env, EXCP_THREAD, GETPC());
+ }
+ }
+ } else if (arg1 == 0) {
+ if (0) {
+ /* TODO: TC underflow */
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ do_raise_exception(env, EXCP_THREAD, GETPC());
+ } else {
+ /* TODO: Deallocate TC */
+ }
+ } else if (arg1 > 0) {
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
+ do_raise_exception(env, EXCP_THREAD, GETPC());
+ }
+ return env->CP0_YQMask;
+}
+
+static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
+{
+ if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
+ return;
+ }
+ do_raise_exception(env, EXCP_RI, pc);
+}
+
+target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
+{
+ check_hwrena(env, 0, GETPC());
+ return env->CP0_EBase & 0x3ff;
+}
+
+target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
+{
+ check_hwrena(env, 1, GETPC());
+ return env->SYNCI_Step;
+}
+
+target_ulong helper_rdhwr_cc(CPUMIPSState *env)
+{
+ check_hwrena(env, 2, GETPC());
+#ifdef CONFIG_USER_ONLY
+ return env->CP0_Count;
+#else
+ return (int32_t)cpu_mips_get_count(env);
+#endif
+}
+
+target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
+{
+ check_hwrena(env, 3, GETPC());
+ return env->CCRes;
+}
+
+target_ulong helper_rdhwr_performance(CPUMIPSState *env)
+{
+ check_hwrena(env, 4, GETPC());
+ return env->CP0_Performance0;
+}
+
+target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
+{
+ check_hwrena(env, 5, GETPC());
+ return (env->CP0_Config5 >> CP0C5_XNP) & 1;
+}
+
+void helper_pmon(CPUMIPSState *env, int function)
+{
+ function /= 2;
+ switch (function) {
+ case 2: /* TODO: char inbyte(int waitflag); */
+ if (env->active_tc.gpr[4] == 0) {
+ env->active_tc.gpr[2] = -1;
+ }
+ /* Fall through */
+ case 11: /* TODO: char inbyte (void); */
+ env->active_tc.gpr[2] = -1;
+ break;
+ case 3:
+ case 12:
+ printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
+ break;
+ case 17:
+ break;
+ case 158:
+ {
+ unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
+ printf("%s", fmt);
+ }
+ break;
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int error_code = 0;
+ int excp;
+
+ if (!(env->hflags & MIPS_HFLAG_DM)) {
+ env->CP0_BadVAddr = addr;
+ }
+
+ if (access_type == MMU_DATA_STORE) {
+ excp = EXCP_AdES;
+ } else {
+ excp = EXCP_AdEL;
+ if (access_type == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+ }
+
+ do_raise_exception_err(env, excp, error_code, retaddr);
+}
+
+void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu);
+ CPUMIPSState *env = &cpu->env;
+
+ if (access_type == MMU_INST_FETCH) {
+ do_raise_exception(env, EXCP_IBE, retaddr);
+ } else if (!mcc->no_data_aborts) {
+ do_raise_exception(env, EXCP_DBE, retaddr);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/mips/tcg/rel6.decode b/target/mips/tcg/rel6.decode
new file mode 100644
index 000000000..d6989cf56
--- /dev/null
+++ b/target/mips/tcg/rel6.decode
@@ -0,0 +1,49 @@
+# MIPS32 Release 6 instruction set
+#
+# Copyright (C) 2020 Philippe Mathieu-Daudé
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Reference:
+#
+# MIPS Architecture for Programmers Volume II-A
+# The MIPS32 Instruction Set Reference Manual, Revision 6.06
+# (Document Number: MD00086-2B-MIPS32BIS-AFP-06.06)
+#
+# MIPS Architecture for Programmers Volume II-A
+# The MIPS64 Instruction Set Reference Manual, Revision 6.06
+# (Document Number: MD00087-2B-MIPS64BIS-AFP-6.06)
+
+&r rs rt rd sa
+
+@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r
+
+LSA 000000 ..... ..... ..... 000 .. 000101 @lsa
+DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa
+
+REMOVED 010011 ----- ----- ----- ----- ------ # COP1X (COP3)
+
+REMOVED 011100 ----- ----- ----- ----- ------ # SPECIAL2
+
+REMOVED 011010 ----- ----- ---------------- # LDL
+REMOVED 011011 ----- ----- ---------------- # LDR
+
+REMOVED 011111 ----- ----- ---------- 011001 # LWLE
+REMOVED 011111 ----- ----- ---------- 011010 # LWRE
+REMOVED 011111 ----- ----- ---------- 100001 # SWLE
+REMOVED 011111 ----- ----- ---------- 100010 # SWRE
+
+REMOVED 100010 ----- ----- ---------------- # LWL
+REMOVED 100110 ----- ----- ---------------- # LWR
+REMOVED 101010 ----- ----- ---------------- # SWL
+REMOVED 101100 ----- ----- ---------------- # SDL
+REMOVED 101101 ----- ----- ---------------- # SDR
+REMOVED 101110 ----- ----- ---------------- # SWR
+
+REMOVED 101111 ----- ----- ---------------- # CACHE
+
+REMOVED 110000 ----- ----- ---------------- # LL
+REMOVED 110011 ----- ----- ---------------- # PREF
+REMOVED 110100 ----- ----- ---------------- # LLD
+REMOVED 111000 ----- ----- ---------------- # SC
+REMOVED 111100 ----- ----- ---------------- # SCD
diff --git a/target/mips/tcg/rel6_translate.c b/target/mips/tcg/rel6_translate.c
new file mode 100644
index 000000000..d63185125
--- /dev/null
+++ b/target/mips/tcg/rel6_translate.c
@@ -0,0 +1,37 @@
+/*
+ * MIPS emulation for QEMU - Release 6 translation routines
+ *
+ * Copyright (c) 2020 Philippe Mathieu-Daudé
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * This code is licensed under the LGPL v2.1 or later.
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-gen.h"
+#include "translate.h"
+
+/* Include the auto-generated decoders. */
+#include "decode-rel6.c.inc"
+
+bool trans_REMOVED(DisasContext *ctx, arg_REMOVED *a)
+{
+ gen_reserved_instruction(ctx);
+
+ return true;
+}
+
+static bool trans_LSA(DisasContext *ctx, arg_r *a)
+{
+ return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa);
+}
+
+static bool trans_DLSA(DisasContext *ctx, arg_r *a)
+{
+ if (TARGET_LONG_BITS != 64) {
+ return false;
+ }
+ return gen_dlsa(ctx, a->rd, a->rt, a->rs, a->sa);
+}
diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c
new file mode 100644
index 000000000..aae2af6ec
--- /dev/null
+++ b/target/mips/tcg/sysemu/cp0_helper.c
@@ -0,0 +1,1706 @@
+/*
+ * Helpers for emulation of CP0-related MIPS instructions.
+ *
+ * Copyright (C) 2004-2005 Jocelyn Mayer
+ * Copyright (C) 2020 Wave Computing, Inc.
+ * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+
+
+/* SMP helpers. */
+static bool mips_vpe_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ /*
+ * If the VPE is halted but otherwise active, it means it's waiting for
+ * an interrupt.\
+ */
+ return cpu->halted && mips_vpe_active(env);
+}
+
+static bool mips_vp_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ return cpu->halted && mips_vp_active(env);
+}
+
+static inline void mips_vpe_wake(MIPSCPU *c)
+{
+ /*
+ * Don't set ->halted = 0 directly, let it be done via cpu_has_work
+ * because there might be other conditions that state that c should
+ * be sleeping.
+ */
+ qemu_mutex_lock_iothread();
+ cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
+ qemu_mutex_unlock_iothread();
+}
+
+static inline void mips_vpe_sleep(MIPSCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ /*
+ * The VPE was shut off, really go to bed.
+ * Reset any old _WAKE requests.
+ */
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+}
+
+static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
+ mips_vpe_wake(cpu);
+ }
+}
+
+static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (!mips_vpe_active(c)) {
+ mips_vpe_sleep(cpu);
+ }
+}
+
+/**
+ * mips_cpu_map_tc:
+ * @env: CPU from which mapping is performed.
+ * @tc: Should point to an int with the value of the global TC index.
+ *
+ * This function will transform @tc into a local index within the
+ * returned #CPUMIPSState.
+ */
+
+/*
+ * FIXME: This code assumes that all VPEs have the same number of TCs,
+ * which depends on runtime setup. Can probably be fixed by
+ * walking the list of CPUMIPSStates.
+ */
+static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
+{
+ MIPSCPU *cpu;
+ CPUState *cs;
+ CPUState *other_cs;
+ int vpe_idx;
+ int tc_idx = *tc;
+
+ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
+ /* Not allowed to address other CPUs. */
+ *tc = env->current_tc;
+ return env;
+ }
+
+ cs = env_cpu(env);
+ vpe_idx = tc_idx / cs->nr_threads;
+ *tc = tc_idx % cs->nr_threads;
+ other_cs = qemu_get_cpu(vpe_idx);
+ if (other_cs == NULL) {
+ return env;
+ }
+ cpu = MIPS_CPU(other_cs);
+ return &cpu->env;
+}
+
+/*
+ * The per VPE CP0_Status register shares some fields with the per TC
+ * CP0_TCStatus registers. These fields are wired to the same registers,
+ * so changes to either of them should be reflected on both registers.
+ *
+ * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
+ *
+ * These helper call synchronizes the regs for a given cpu.
+ */
+
+/*
+ * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
+ * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
+ * int tc);
+ */
+
+/* Called for updates to CP0_TCStatus. */
+static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
+ target_ulong v)
+{
+ uint32_t status;
+ uint32_t tcu, tmx, tasid, tksu;
+ uint32_t mask = ((1U << CP0St_CU3)
+ | (1 << CP0St_CU2)
+ | (1 << CP0St_CU1)
+ | (1 << CP0St_CU0)
+ | (1 << CP0St_MX)
+ | (3 << CP0St_KSU));
+
+ tcu = (v >> CP0TCSt_TCU0) & 0xf;
+ tmx = (v >> CP0TCSt_TMX) & 0x1;
+ tasid = v & cpu->CP0_EntryHi_ASID_mask;
+ tksu = (v >> CP0TCSt_TKSU) & 0x3;
+
+ status = tcu << CP0St_CU0;
+ status |= tmx << CP0St_MX;
+ status |= tksu << CP0St_KSU;
+
+ cpu->CP0_Status &= ~mask;
+ cpu->CP0_Status |= status;
+
+ /* Sync the TASID with EntryHi. */
+ cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
+ cpu->CP0_EntryHi |= tasid;
+
+ compute_hflags(cpu);
+}
+
+/* Called for updates to CP0_EntryHi. */
+static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
+{
+ int32_t *tcst;
+ uint32_t asid, v = cpu->CP0_EntryHi;
+
+ asid = v & cpu->CP0_EntryHi_ASID_mask;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
+ *tcst |= asid;
+}
+
+/* XXX: do not use a global */
+uint32_t cpu_mips_get_random(CPUMIPSState *env)
+{
+ static uint32_t seed = 1;
+ static uint32_t prev_idx;
+ uint32_t idx;
+ uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
+
+ if (nb_rand_tlb == 1) {
+ return env->tlb->nb_tlb - 1;
+ }
+
+ /* Don't return same value twice, so get another value */
+ do {
+ /*
+ * Use a simple algorithm of Linear Congruential Generator
+ * from ISO/IEC 9899 standard.
+ */
+ seed = 1103515245 * seed + 12345;
+ idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
+ } while (idx == prev_idx);
+ prev_idx = idx;
+ return idx;
+}
+
+/* CP0 helpers */
+target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPControl;
+}
+
+target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf0;
+}
+
+target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf1;
+}
+
+target_ulong helper_mfc0_random(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_random(env);
+}
+
+target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCStatus;
+}
+
+target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCStatus;
+ } else {
+ return other->tcs[other_tc].CP0_TCStatus;
+ }
+}
+
+target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCBind;
+}
+
+target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCBind;
+ } else {
+ return other->tcs[other_tc].CP0_TCBind;
+ }
+}
+
+target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.PC;
+ } else {
+ return other->tcs[other_tc].PC;
+ }
+}
+
+target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCHalt;
+ } else {
+ return other->tcs[other_tc].CP0_TCHalt;
+ }
+}
+
+target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCContext;
+ } else {
+ return other->tcs[other_tc].CP0_TCContext;
+ }
+}
+
+target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCSchedule;
+ } else {
+ return other->tcs[other_tc].CP0_TCSchedule;
+ }
+}
+
+target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCScheFBack;
+ } else {
+ return other->tcs[other_tc].CP0_TCScheFBack;
+ }
+}
+
+target_ulong helper_mfc0_count(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_count(env);
+}
+
+target_ulong helper_mfc0_saar(CPUMIPSState *env)
+{
+ if ((env->CP0_SAARI & 0x3f) < 2) {
+ return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f];
+ }
+ return 0;
+}
+
+target_ulong helper_mfhc0_saar(CPUMIPSState *env)
+{
+ if ((env->CP0_SAARI & 0x3f) < 2) {
+ return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32;
+ }
+ return 0;
+}
+
+target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EntryHi;
+}
+
+target_ulong helper_mftc0_cause(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Cause;
+}
+
+target_ulong helper_mftc0_status(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Status;
+}
+
+target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
+{
+ return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
+}
+
+target_ulong helper_mfc0_maar(CPUMIPSState *env)
+{
+ return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_mfhc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI] >> 32;
+}
+
+target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t)env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t) env->CP0_WatchHi[sel];
+}
+
+target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel] >> 32;
+}
+
+target_ulong helper_mfc0_debug(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Debug;
+ if (env->hflags & MIPS_HFLAG_DM) {
+ t0 |= 1 << CP0DB_DM;
+ }
+
+ return t0;
+}
+
+target_ulong helper_mftc0_debug(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ int32_t tcstatus;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ tcstatus = other->active_tc.CP0_Debug_tcstatus;
+ } else {
+ tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
+ }
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
+{
+ return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
+}
+
+target_ulong helper_dmfc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel];
+}
+
+target_ulong helper_dmfc0_saar(CPUMIPSState *env)
+{
+ if ((env->CP0_SAARI & 0x3f) < 2) {
+ return env->CP0_SAAR[env->CP0_SAARI & 0x3f];
+ }
+ return 0;
+}
+#endif /* TARGET_MIPS64 */
+
+void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t index_p = env->CP0_Index & 0x80000000;
+ uint32_t tlb_index = arg1 & 0x7fffffff;
+ if (tlb_index < env->tlb->nb_tlb) {
+ if (env->insn_flags & ISA_MIPS_R6) {
+ index_p |= arg1 & 0x80000000;
+ }
+ env->CP0_Index = index_p | tlb_index;
+ }
+}
+
+void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
+ (1 << CP0MVPCo_EVP);
+ }
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0MVPCo_STLB);
+ }
+ newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
+
+ env->mvp->CP0_MVPControl = newval;
+}
+
+void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /*
+ * Yield scheduler intercept not implemented.
+ * Gating storage scheduler intercept not implemented.
+ */
+
+ /* TODO: Enable/disable TCs. */
+
+ env->CP0_VPEControl = newval;
+}
+
+void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable TCs. */
+
+ other->CP0_VPEControl = newval;
+}
+
+target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ /* FIXME: Mask away return zero on read bits. */
+ return other->CP0_VPEControl;
+}
+
+target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_VPEConf0;
+}
+
+void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
+ mask |= (0xff << CP0VPEC0_XTC);
+ }
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ }
+ newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+
+ env->CP0_VPEConf0 = newval;
+}
+
+void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+ other->CP0_VPEConf0 = newval;
+}
+
+void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
+ (0xff << CP0VPEC1_NCP1);
+ newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
+
+ /* UDI not implemented. */
+ /* CP2 not implemented. */
+
+ /* TODO: Handle FPU (CP1) binding. */
+
+ env->CP0_VPEConf1 = newval;
+}
+
+void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
+{
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_YQMask = 0x00000000;
+}
+
+void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_VPEOpt = arg1 & 0x0000ffff;
+}
+
+#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
+
+void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
+
+void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = env->CP0_TCStatus_rw_bitmask;
+ uint32_t newval;
+
+ newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
+
+ env->active_tc.CP0_TCStatus = newval;
+ sync_c0_tcstatus(env, env->current_tc, newval);
+}
+
+void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCStatus = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCStatus = arg1;
+ }
+ sync_c0_tcstatus(other, other_tc, arg1);
+}
+
+void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0TCBd_CurVPE);
+ }
+ newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ env->active_tc.CP0_TCBind = newval;
+}
+
+void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0TCBd_CurVPE);
+ }
+ if (other_tc == other->current_tc) {
+ newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ other->active_tc.CP0_TCBind = newval;
+ } else {
+ newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
+ other->tcs[other_tc].CP0_TCBind = newval;
+ }
+}
+
+void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.PC = arg1;
+ env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ env->CP0_LLAddr = 0;
+ env->lladdr = 0;
+ /* MIPS16 not implemented. */
+}
+
+void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.PC = arg1;
+ other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->CP0_LLAddr = 0;
+ other->lladdr = 0;
+ /* MIPS16 not implemented. */
+ } else {
+ other->tcs[other_tc].PC = arg1;
+ other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->CP0_LLAddr = 0;
+ other->lladdr = 0;
+ /* MIPS16 not implemented. */
+ }
+}
+
+void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ MIPSCPU *cpu = env_archcpu(env);
+
+ env->active_tc.CP0_TCHalt = arg1 & 0x1;
+
+ /* TODO: Halt TC / Restart (if allocated+active) TC. */
+ if (env->active_tc.CP0_TCHalt & 1) {
+ mips_tc_sleep(cpu, env->current_tc);
+ } else {
+ mips_tc_wake(cpu, env->current_tc);
+ }
+}
+
+void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ MIPSCPU *other_cpu = env_archcpu(other);
+
+ /* TODO: Halt TC / Restart (if allocated+active) TC. */
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCHalt = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCHalt = arg1;
+ }
+
+ if (arg1 & 1) {
+ mips_tc_sleep(other_cpu, other_tc);
+ } else {
+ mips_tc_wake(other_cpu, other_tc);
+ }
+}
+
+void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCContext = arg1;
+}
+
+void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCContext = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCContext = arg1;
+ }
+}
+
+void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCSchedule = arg1;
+}
+
+void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCSchedule = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCSchedule = arg1;
+ }
+}
+
+void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCScheFBack = arg1;
+}
+
+void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCScheFBack = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCScheFBack = arg1;
+ }
+}
+
+void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
+}
+
+void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t old;
+ old = env->CP0_MemoryMapID;
+ env->CP0_MemoryMapID = (int32_t) arg1;
+ /* If the MemoryMapID changes, flush qemu's TLB. */
+ if (old != env->CP0_MemoryMapID) {
+ cpu_mips_tlb_flush(env);
+ }
+}
+
+void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
+{
+ uint32_t mask;
+ int maskbits;
+
+ /* Don't care MASKX as we don't support 1KB page */
+ mask = extract32((uint32_t)arg1, CP0PM_MASK, 16);
+ maskbits = cto32(mask);
+
+ /* Ensure no more set bit after first zero */
+ if ((mask >> maskbits) != 0) {
+ goto invalid;
+ }
+ /* We don't support VTLB entry smaller than target page */
+ if ((maskbits + TARGET_PAGE_BITS_MIN) < TARGET_PAGE_BITS) {
+ goto invalid;
+ }
+ env->CP0_PageMask = mask << CP0PM_MASK;
+
+ return;
+
+invalid:
+ /* When invalid, set to default target page size. */
+ mask = (~TARGET_PAGE_MASK >> TARGET_PAGE_BITS_MIN);
+ env->CP0_PageMask = mask << CP0PM_MASK;
+}
+
+void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
+{
+ update_pagemask(env, arg1, &env->CP0_PageMask);
+}
+
+void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
+{
+ /* SmartMIPS not implemented */
+ /* 1k pages not implemented */
+ env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
+ (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
+ compute_hflags(env);
+ restore_pamask(env);
+}
+
+void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ uint64_t mask = 0x3F3FFFFFFFULL;
+ uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
+ uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
+
+ if ((env->insn_flags & ISA_MIPS_R6)) {
+ if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_BDI);
+ }
+ if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_GDI);
+ }
+ if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_UDI);
+ }
+ if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_MDI);
+ }
+ if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_PTI);
+ }
+ }
+ env->CP0_PWField = arg1 & mask;
+
+ if ((new_ptei >= 32) ||
+ ((env->insn_flags & ISA_MIPS_R6) &&
+ (new_ptei == 0 || new_ptei == 1))) {
+ env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
+ (old_ptei << CP0PF_PTEI);
+ }
+#else
+ uint32_t mask = 0x3FFFFFFF;
+ uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+ uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
+
+ if ((env->insn_flags & ISA_MIPS_R6)) {
+ if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_GDW);
+ }
+ if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_UDW);
+ }
+ if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_MDW);
+ }
+ if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_PTW);
+ }
+ }
+ env->CP0_PWField = arg1 & mask;
+
+ if ((new_ptew >= 32) ||
+ ((env->insn_flags & ISA_MIPS_R6) &&
+ (new_ptew == 0 || new_ptew == 1))) {
+ env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
+ (old_ptew << CP0PF_PTEW);
+ }
+#endif
+}
+
+void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
+#else
+ env->CP0_PWSize = arg1 & 0x3FFFFFFF;
+#endif
+}
+
+void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ISA_MIPS_R6) {
+ if (arg1 < env->tlb->nb_tlb) {
+ env->CP0_Wired = arg1;
+ }
+ } else {
+ env->CP0_Wired = arg1 % env->tlb->nb_tlb;
+ }
+}
+
+void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ /* PWEn = 0. Hardware page table walking is not implemented. */
+ env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
+#else
+ env->CP0_PWCtl = (arg1 & 0x800000FF);
+#endif
+}
+
+void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
+}
+
+void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
+}
+
+void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
+}
+
+void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
+}
+
+void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
+}
+
+void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0x0000000F;
+
+ if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
+ (env->insn_flags & ISA_MIPS_R6)) {
+ mask |= (1 << 4);
+ }
+ if (env->insn_flags & ISA_MIPS_R6) {
+ mask |= (1 << 5);
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
+ mask |= (1 << 29);
+
+ if (arg1 & (1 << 29)) {
+ env->hflags |= MIPS_HFLAG_HWRENA_ULR;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
+ }
+ }
+
+ env->CP0_HWREna = arg1 & mask;
+}
+
+void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_count(env, arg1);
+}
+
+void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t target = arg1 & 0x3f;
+ if (target <= 1) {
+ env->CP0_SAARI = target;
+ }
+}
+
+void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t target = env->CP0_SAARI & 0x3f;
+ if (target < 2) {
+ env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL;
+ switch (target) {
+ case 0:
+ if (env->itu) {
+ itc_reconfigure(env->itu);
+ }
+ break;
+ }
+ }
+}
+
+void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t target = env->CP0_SAARI & 0x3f;
+ if (target < 2) {
+ env->CP0_SAAR[target] =
+ (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) |
+ (env->CP0_SAAR[target] & 0x00000000ffffffffULL);
+ switch (target) {
+ case 0:
+ if (env->itu) {
+ itc_reconfigure(env->itu);
+ }
+ break;
+ }
+ }
+}
+
+void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong old, val, mask;
+ mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
+ if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
+ mask |= 1 << CP0EnHi_EHINV;
+ }
+
+ /* 1k pages not implemented */
+#if defined(TARGET_MIPS64)
+ if (env->insn_flags & ISA_MIPS_R6) {
+ int entryhi_r = extract64(arg1, 62, 2);
+ int config0_at = extract32(env->CP0_Config0, 13, 2);
+ bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
+ if ((entryhi_r == 2) ||
+ (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
+ /* skip EntryHi.R field if new value is reserved */
+ mask &= ~(0x3ull << 62);
+ }
+ }
+ mask &= env->SEGMask;
+#endif
+ old = env->CP0_EntryHi;
+ val = (arg1 & mask) | (old & ~mask);
+ env->CP0_EntryHi = val;
+ if (ase_mt_available(env)) {
+ sync_c0_entryhi(env, env->current_tc);
+ }
+ /* If the ASID changes, flush qemu's TLB. */
+ if ((old & env->CP0_EntryHi_ASID_mask) !=
+ (val & env->CP0_EntryHi_ASID_mask)) {
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_EntryHi = arg1;
+ sync_c0_entryhi(other, other_tc);
+}
+
+void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_compare(env, arg1);
+}
+
+void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t val, old;
+
+ old = env->CP0_Status;
+ cpu_mips_store_status(env, arg1);
+ val = env->CP0_Status;
+
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
+ old, old & env->CP0_Cause & CP0Ca_IP_mask,
+ val, val & env->CP0_Cause & CP0Ca_IP_mask,
+ env->CP0_Cause);
+ switch (cpu_mmu_index(env, false)) {
+ case 3:
+ qemu_log(", ERL\n");
+ break;
+ case MIPS_HFLAG_UM:
+ qemu_log(", UM\n");
+ break;
+ case MIPS_HFLAG_SM:
+ qemu_log(", SM\n");
+ break;
+ case MIPS_HFLAG_KM:
+ qemu_log("\n");
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
+ sync_c0_status(env, other, other_tc);
+}
+
+void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
+}
+
+void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
+ env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
+}
+
+void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_cause(env, arg1);
+}
+
+void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ cpu_mips_store_cause(other, arg1);
+}
+
+target_ulong helper_mftc0_epc(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EPC;
+}
+
+target_ulong helper_mftc0_ebase(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EBase;
+}
+
+void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
+ if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
+ mask |= ~0x3FFFFFFF;
+ }
+ env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
+}
+
+void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
+ if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
+ mask |= ~0x3FFFFFFF;
+ }
+ other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
+}
+
+target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ switch (idx) {
+ case 0: return other->CP0_Config0;
+ case 1: return other->CP0_Config1;
+ case 2: return other->CP0_Config2;
+ case 3: return other->CP0_Config3;
+ /* 4 and 5 are reserved. */
+ case 6: return other->CP0_Config6;
+ case 7: return other->CP0_Config7;
+ default:
+ break;
+ }
+ return 0;
+}
+
+void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
+}
+
+void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
+{
+ /* tertiary/secondary caches not implemented */
+ env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
+}
+
+void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
+ (arg1 & (1 << CP0C3_ISA_ON_EXC));
+ }
+}
+
+void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
+ (arg1 & env->CP0_Config4_rw_bitmask);
+}
+
+void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
+ (arg1 & env->CP0_Config5_rw_bitmask);
+ env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
+ 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
+ compute_hflags(env);
+}
+
+void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
+{
+ target_long mask = env->CP0_LLAddr_rw_bitmask;
+ arg1 = arg1 << env->CP0_LLAddr_shift;
+ env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
+}
+
+#define MTC0_MAAR_MASK(env) \
+ ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
+
+void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
+}
+
+void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] =
+ (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
+ (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
+{
+ int index = arg1 & 0x3f;
+ if (index == 0x3f) {
+ /*
+ * Software may write all ones to INDEX to determine the
+ * maximum value supported.
+ */
+ env->CP0_MAARI = MIPS_MAAR_MAX - 1;
+ } else if (index < MIPS_MAAR_MAX) {
+ env->CP0_MAARI = index;
+ }
+ /*
+ * Other than the all ones, if the value written is not supported,
+ * then INDEX is unchanged from its previous value.
+ */
+}
+
+void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ /*
+ * Watch exceptions for instructions, data loads, data stores
+ * not implemented.
+ */
+ env->CP0_WatchLo[sel] = (arg1 & ~0x7);
+}
+
+void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
+ if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
+ mask |= 0xFFFFFFFF00000000ULL; /* MMID */
+ }
+ env->CP0_WatchHi[sel] = arg1 & mask;
+ env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
+}
+
+void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
+ (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
+ env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
+}
+
+void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Framemask = arg1; /* XXX */
+}
+
+void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
+ if (arg1 & (1 << CP0DB_DM)) {
+ env->hflags |= MIPS_HFLAG_DM;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_DM;
+ }
+}
+
+void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_Debug_tcstatus = val;
+ } else {
+ other->tcs[other_tc].CP0_Debug_tcstatus = val;
+ }
+ other->CP0_Debug = (other->CP0_Debug &
+ ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Performance0 = arg1 & 0x000007ff;
+}
+
+void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t wst = arg1 & (1 << CP0EC_WST);
+ int32_t spr = arg1 & (1 << CP0EC_SPR);
+ int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
+
+ env->CP0_ErrCtl = wst | spr | itc;
+
+ if (itc && !wst && !spr) {
+ env->hflags |= MIPS_HFLAG_ITC_CACHE;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
+ }
+}
+
+void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
+ /*
+ * If CACHE instruction is configured for ITC tags then make all
+ * CP0.TagLo bits writable. The actual write to ITC Configuration
+ * Tag will take care of the read-only bits.
+ */
+ env->CP0_TagLo = arg1;
+ } else {
+ env->CP0_TagLo = arg1 & 0xFFFFFCF6;
+ }
+}
+
+void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataLo = arg1; /* XXX */
+}
+
+void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_TagHi = arg1; /* XXX */
+}
+
+void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataHi = arg1; /* XXX */
+}
+
+/* MIPS MT functions */
+target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.gpr[sel];
+ } else {
+ return other->tcs[other_tc].gpr[sel];
+ }
+}
+
+target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.LO[sel];
+ } else {
+ return other->tcs[other_tc].LO[sel];
+ }
+}
+
+target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.HI[sel];
+ } else {
+ return other->tcs[other_tc].HI[sel];
+ }
+}
+
+target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.ACX[sel];
+ } else {
+ return other->tcs[other_tc].ACX[sel];
+ }
+}
+
+target_ulong helper_mftdsp(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.DSPControl;
+ } else {
+ return other->tcs[other_tc].DSPControl;
+ }
+}
+
+void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.gpr[sel] = arg1;
+ } else {
+ other->tcs[other_tc].gpr[sel] = arg1;
+ }
+}
+
+void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.LO[sel] = arg1;
+ } else {
+ other->tcs[other_tc].LO[sel] = arg1;
+ }
+}
+
+void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.HI[sel] = arg1;
+ } else {
+ other->tcs[other_tc].HI[sel] = arg1;
+ }
+}
+
+void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.ACX[sel] = arg1;
+ } else {
+ other->tcs[other_tc].ACX[sel] = arg1;
+ }
+}
+
+void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.DSPControl = arg1;
+ } else {
+ other->tcs[other_tc].DSPControl = arg1;
+ }
+}
+
+/* MIPS MT functions */
+target_ulong helper_dmt(void)
+{
+ /* TODO */
+ return 0;
+}
+
+target_ulong helper_emt(void)
+{
+ /* TODO */
+ return 0;
+}
+
+target_ulong helper_dvpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPEs except the one executing the dvpe. */
+ if (&other_cpu->env != env) {
+ other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ return prev;
+}
+
+target_ulong helper_evpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+
+ if (&other_cpu->env != env
+ /* If the VPE is WFI, don't disturb its sleep. */
+ && !mips_vpe_is_wfi(other_cpu)) {
+ /* Enable the VPE. */
+ other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
+ mips_vpe_wake(other_cpu); /* And wake it up. */
+ }
+ }
+ return prev;
+}
+
+/* R6 Multi-threading */
+target_ulong helper_dvp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPs except the one executing the dvp. */
+ if (&other_cpu->env != env) {
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
+
+target_ulong helper_evp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
+ /*
+ * If the VP is WFI, don't disturb its sleep.
+ * Otherwise, wake it up.
+ */
+ mips_vpe_wake(other_cpu);
+ }
+ }
+ env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
diff --git a/target/mips/tcg/sysemu/meson.build b/target/mips/tcg/sysemu/meson.build
new file mode 100644
index 000000000..4da2c577b
--- /dev/null
+++ b/target/mips/tcg/sysemu/meson.build
@@ -0,0 +1,6 @@
+mips_softmmu_ss.add(files(
+ 'cp0_helper.c',
+ 'mips-semi.c',
+ 'special_helper.c',
+ 'tlb_helper.c',
+))
diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c
new file mode 100644
index 000000000..b4a383ae9
--- /dev/null
+++ b/target/mips/tcg/sysemu/mips-semi.c
@@ -0,0 +1,367 @@
+/*
+ * Unified Hosting Interface syscalls.
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "exec/helper-proto.h"
+#include "exec/softmmu-semi.h"
+#include "semihosting/semihost.h"
+#include "semihosting/console.h"
+
+typedef enum UHIOp {
+ UHI_exit = 1,
+ UHI_open = 2,
+ UHI_close = 3,
+ UHI_read = 4,
+ UHI_write = 5,
+ UHI_lseek = 6,
+ UHI_unlink = 7,
+ UHI_fstat = 8,
+ UHI_argc = 9,
+ UHI_argnlen = 10,
+ UHI_argn = 11,
+ UHI_plog = 13,
+ UHI_assert = 14,
+ UHI_pread = 19,
+ UHI_pwrite = 20,
+ UHI_link = 22
+} UHIOp;
+
+typedef struct UHIStat {
+ int16_t uhi_st_dev;
+ uint16_t uhi_st_ino;
+ uint32_t uhi_st_mode;
+ uint16_t uhi_st_nlink;
+ uint16_t uhi_st_uid;
+ uint16_t uhi_st_gid;
+ int16_t uhi_st_rdev;
+ uint64_t uhi_st_size;
+ uint64_t uhi_st_atime;
+ uint64_t uhi_st_spare1;
+ uint64_t uhi_st_mtime;
+ uint64_t uhi_st_spare2;
+ uint64_t uhi_st_ctime;
+ uint64_t uhi_st_spare3;
+ uint64_t uhi_st_blksize;
+ uint64_t uhi_st_blocks;
+ uint64_t uhi_st_spare4[2];
+} UHIStat;
+
+enum UHIOpenFlags {
+ UHIOpen_RDONLY = 0x0,
+ UHIOpen_WRONLY = 0x1,
+ UHIOpen_RDWR = 0x2,
+ UHIOpen_APPEND = 0x8,
+ UHIOpen_CREAT = 0x200,
+ UHIOpen_TRUNC = 0x400,
+ UHIOpen_EXCL = 0x800
+};
+
+static int errno_mips(int host_errno)
+{
+ /* Errno values taken from asm-mips/errno.h */
+ switch (host_errno) {
+ case 0: return 0;
+ case ENAMETOOLONG: return 78;
+#ifdef EOVERFLOW
+ case EOVERFLOW: return 79;
+#endif
+#ifdef ELOOP
+ case ELOOP: return 90;
+#endif
+ default: return EINVAL;
+ }
+}
+
+static int copy_stat_to_target(CPUMIPSState *env, const struct stat *src,
+ target_ulong vaddr)
+{
+ hwaddr len = sizeof(struct UHIStat);
+ UHIStat *dst = lock_user(VERIFY_WRITE, vaddr, len, 0);
+ if (!dst) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ dst->uhi_st_dev = tswap16(src->st_dev);
+ dst->uhi_st_ino = tswap16(src->st_ino);
+ dst->uhi_st_mode = tswap32(src->st_mode);
+ dst->uhi_st_nlink = tswap16(src->st_nlink);
+ dst->uhi_st_uid = tswap16(src->st_uid);
+ dst->uhi_st_gid = tswap16(src->st_gid);
+ dst->uhi_st_rdev = tswap16(src->st_rdev);
+ dst->uhi_st_size = tswap64(src->st_size);
+ dst->uhi_st_atime = tswap64(src->st_atime);
+ dst->uhi_st_mtime = tswap64(src->st_mtime);
+ dst->uhi_st_ctime = tswap64(src->st_ctime);
+#ifdef _WIN32
+ dst->uhi_st_blksize = 0;
+ dst->uhi_st_blocks = 0;
+#else
+ dst->uhi_st_blksize = tswap64(src->st_blksize);
+ dst->uhi_st_blocks = tswap64(src->st_blocks);
+#endif
+ unlock_user(dst, vaddr, len);
+ return 0;
+}
+
+static int get_open_flags(target_ulong target_flags)
+{
+ int open_flags = 0;
+
+ if (target_flags & UHIOpen_RDWR) {
+ open_flags |= O_RDWR;
+ } else if (target_flags & UHIOpen_WRONLY) {
+ open_flags |= O_WRONLY;
+ } else {
+ open_flags |= O_RDONLY;
+ }
+
+ open_flags |= (target_flags & UHIOpen_APPEND) ? O_APPEND : 0;
+ open_flags |= (target_flags & UHIOpen_CREAT) ? O_CREAT : 0;
+ open_flags |= (target_flags & UHIOpen_TRUNC) ? O_TRUNC : 0;
+ open_flags |= (target_flags & UHIOpen_EXCL) ? O_EXCL : 0;
+
+ return open_flags;
+}
+
+static int write_to_file(CPUMIPSState *env, target_ulong fd, target_ulong vaddr,
+ target_ulong len, target_ulong offset)
+{
+ int num_of_bytes;
+ void *dst = lock_user(VERIFY_READ, vaddr, len, 1);
+ if (!dst) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (offset) {
+#ifdef _WIN32
+ num_of_bytes = 0;
+#else
+ num_of_bytes = pwrite(fd, dst, len, offset);
+#endif
+ } else {
+ num_of_bytes = write(fd, dst, len);
+ }
+
+ unlock_user(dst, vaddr, 0);
+ return num_of_bytes;
+}
+
+static int read_from_file(CPUMIPSState *env, target_ulong fd,
+ target_ulong vaddr, target_ulong len,
+ target_ulong offset)
+{
+ int num_of_bytes;
+ void *dst = lock_user(VERIFY_WRITE, vaddr, len, 0);
+ if (!dst) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (offset) {
+#ifdef _WIN32
+ num_of_bytes = 0;
+#else
+ num_of_bytes = pread(fd, dst, len, offset);
+#endif
+ } else {
+ num_of_bytes = read(fd, dst, len);
+ }
+
+ unlock_user(dst, vaddr, len);
+ return num_of_bytes;
+}
+
+static int copy_argn_to_target(CPUMIPSState *env, int arg_num,
+ target_ulong vaddr)
+{
+ int strsize = strlen(semihosting_get_arg(arg_num)) + 1;
+ char *dst = lock_user(VERIFY_WRITE, vaddr, strsize, 0);
+ if (!dst) {
+ return -1;
+ }
+
+ strcpy(dst, semihosting_get_arg(arg_num));
+
+ unlock_user(dst, vaddr, strsize);
+ return 0;
+}
+
+#define GET_TARGET_STRING(p, addr) \
+ do { \
+ p = lock_user_string(addr); \
+ if (!p) { \
+ gpr[2] = -1; \
+ gpr[3] = EFAULT; \
+ return; \
+ } \
+ } while (0)
+
+#define GET_TARGET_STRINGS_2(p, addr, p2, addr2) \
+ do { \
+ p = lock_user_string(addr); \
+ if (!p) { \
+ gpr[2] = -1; \
+ gpr[3] = EFAULT; \
+ return; \
+ } \
+ p2 = lock_user_string(addr2); \
+ if (!p2) { \
+ unlock_user(p, addr, 0); \
+ gpr[2] = -1; \
+ gpr[3] = EFAULT; \
+ return; \
+ } \
+ } while (0)
+
+#define FREE_TARGET_STRING(p, gpr) \
+ do { \
+ unlock_user(p, gpr, 0); \
+ } while (0)
+
+void helper_do_semihosting(CPUMIPSState *env)
+{
+ target_ulong *gpr = env->active_tc.gpr;
+ const UHIOp op = gpr[25];
+ char *p, *p2;
+
+ switch (op) {
+ case UHI_exit:
+ qemu_log("UHI(%d): exit(%d)\n", op, (int)gpr[4]);
+ exit(gpr[4]);
+ case UHI_open:
+ GET_TARGET_STRING(p, gpr[4]);
+ if (!strcmp("/dev/stdin", p)) {
+ gpr[2] = 0;
+ } else if (!strcmp("/dev/stdout", p)) {
+ gpr[2] = 1;
+ } else if (!strcmp("/dev/stderr", p)) {
+ gpr[2] = 2;
+ } else {
+ gpr[2] = open(p, get_open_flags(gpr[5]), gpr[6]);
+ gpr[3] = errno_mips(errno);
+ }
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+ case UHI_close:
+ if (gpr[4] < 3) {
+ /* ignore closing stdin/stdout/stderr */
+ gpr[2] = 0;
+ return;
+ }
+ gpr[2] = close(gpr[4]);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_read:
+ gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], 0);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_write:
+ gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], 0);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_lseek:
+ gpr[2] = lseek(gpr[4], gpr[5], gpr[6]);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_unlink:
+ GET_TARGET_STRING(p, gpr[4]);
+ gpr[2] = remove(p);
+ gpr[3] = errno_mips(errno);
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+ case UHI_fstat:
+ {
+ struct stat sbuf;
+ memset(&sbuf, 0, sizeof(sbuf));
+ gpr[2] = fstat(gpr[4], &sbuf);
+ gpr[3] = errno_mips(errno);
+ if (gpr[2]) {
+ return;
+ }
+ gpr[2] = copy_stat_to_target(env, &sbuf, gpr[5]);
+ gpr[3] = errno_mips(errno);
+ }
+ break;
+ case UHI_argc:
+ gpr[2] = semihosting_get_argc();
+ break;
+ case UHI_argnlen:
+ if (gpr[4] >= semihosting_get_argc()) {
+ gpr[2] = -1;
+ return;
+ }
+ gpr[2] = strlen(semihosting_get_arg(gpr[4]));
+ break;
+ case UHI_argn:
+ if (gpr[4] >= semihosting_get_argc()) {
+ gpr[2] = -1;
+ return;
+ }
+ gpr[2] = copy_argn_to_target(env, gpr[4], gpr[5]);
+ break;
+ case UHI_plog:
+ GET_TARGET_STRING(p, gpr[4]);
+ p2 = strstr(p, "%d");
+ if (p2) {
+ int char_num = p2 - p;
+ GString *s = g_string_new_len(p, char_num);
+ g_string_append_printf(s, "%d%s", (int)gpr[5], p2 + 2);
+ gpr[2] = qemu_semihosting_log_out(s->str, s->len);
+ g_string_free(s, true);
+ } else {
+ gpr[2] = qemu_semihosting_log_out(p, strlen(p));
+ }
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+ case UHI_assert:
+ GET_TARGET_STRINGS_2(p, gpr[4], p2, gpr[5]);
+ printf("assertion '");
+ printf("\"%s\"", p);
+ printf("': file \"%s\", line %d\n", p2, (int)gpr[6]);
+ FREE_TARGET_STRING(p2, gpr[5]);
+ FREE_TARGET_STRING(p, gpr[4]);
+ abort();
+ break;
+ case UHI_pread:
+ gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], gpr[7]);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_pwrite:
+ gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], gpr[7]);
+ gpr[3] = errno_mips(errno);
+ break;
+#ifndef _WIN32
+ case UHI_link:
+ GET_TARGET_STRINGS_2(p, gpr[4], p2, gpr[5]);
+ gpr[2] = link(p, p2);
+ gpr[3] = errno_mips(errno);
+ FREE_TARGET_STRING(p2, gpr[5]);
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+#endif
+ default:
+ fprintf(stderr, "Unknown UHI operation %d\n", op);
+ abort();
+ }
+ return;
+}
diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c
new file mode 100644
index 000000000..2a2afb49e
--- /dev/null
+++ b/target/mips/tcg/sysemu/special_helper.c
@@ -0,0 +1,173 @@
+/*
+ * QEMU MIPS emulation: Special opcode helpers
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "internal.h"
+
+/* Specials */
+target_ulong helper_di(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 & ~(1 << CP0St_IE);
+ return t0;
+}
+
+target_ulong helper_ei(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 | (1 << CP0St_IE);
+ return t0;
+}
+
+static void debug_pre_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ }
+ if (env->hflags & MIPS_HFLAG_DM) {
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ }
+ qemu_log("\n");
+ }
+}
+
+static void debug_post_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ }
+ if (env->hflags & MIPS_HFLAG_DM) {
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ }
+ switch (cpu_mmu_index(env, false)) {
+ case 3:
+ qemu_log(", ERL\n");
+ break;
+ case MIPS_HFLAG_UM:
+ qemu_log(", UM\n");
+ break;
+ case MIPS_HFLAG_SM:
+ qemu_log(", SM\n");
+ break;
+ case MIPS_HFLAG_KM:
+ qemu_log("\n");
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if ((env->hflags & MIPS_HFLAG_BMASK) != 0
+ && env->active_tc.PC != tb->pc) {
+ env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ return true;
+ }
+ return false;
+}
+
+static inline void exception_return(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ mips_env_set_pc(env, env->CP0_ErrorEPC);
+ env->CP0_Status &= ~(1 << CP0St_ERL);
+ } else {
+ mips_env_set_pc(env, env->CP0_EPC);
+ env->CP0_Status &= ~(1 << CP0St_EXL);
+ }
+ compute_hflags(env);
+ debug_post_eret(env);
+}
+
+void helper_eret(CPUMIPSState *env)
+{
+ exception_return(env);
+ env->CP0_LLAddr = 1;
+ env->lladdr = 1;
+}
+
+void helper_eretnc(CPUMIPSState *env)
+{
+ exception_return(env);
+}
+
+void helper_deret(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+
+ env->hflags &= ~MIPS_HFLAG_DM;
+ compute_hflags(env);
+
+ mips_env_set_pc(env, env->CP0_DEPC);
+
+ debug_post_eret(env);
+}
+
+void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
+{
+ static const char *const type_name[] = {
+ "Primary Instruction",
+ "Primary Data or Unified Primary",
+ "Tertiary",
+ "Secondary"
+ };
+ uint32_t cache_type = extract32(op, 0, 2);
+ uint32_t cache_operation = extract32(op, 2, 3);
+ target_ulong index = addr & 0x1fffffff;
+
+ switch (cache_operation) {
+ case 0b010: /* Index Store Tag */
+ memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
+ MO_64, MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0b001: /* Index Load Tag */
+ memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
+ MO_64, MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0b000: /* Index Invalidate */
+ case 0b100: /* Hit Invalidate */
+ case 0b110: /* Hit Writeback */
+ /* no-op */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
+ cache_operation, type_name[cache_type]);
+ break;
+ }
+}
diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c
new file mode 100644
index 000000000..73254d192
--- /dev/null
+++ b/target/mips/tcg/sysemu/tlb_helper.c
@@ -0,0 +1,1423 @@
+/*
+ * MIPS TLB (Translation lookaside buffer) helpers.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+
+#include "cpu.h"
+#include "internal.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+#include "hw/mips/cpudevs.h"
+#include "exec/helper-proto.h"
+
+/* TLB management */
+static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
+{
+ /* Discard entries from env->tlb[first] onwards. */
+ while (env->tlb->tlb_in_use > first) {
+ r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
+ }
+}
+
+static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
+{
+#if defined(TARGET_MIPS64)
+ return extract64(entrylo, 6, 54);
+#else
+ return extract64(entrylo, 6, 24) | /* PFN */
+ (extract64(entrylo, 32, 32) << 24); /* PFNX */
+#endif
+}
+
+static void r4k_fill_tlb(CPUMIPSState *env, int idx)
+{
+ r4k_tlb_t *tlb;
+ uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
+
+ /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
+ tlb->EHINV = 1;
+ return;
+ }
+ tlb->EHINV = 0;
+ tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ tlb->VPN &= env->SEGMask;
+#endif
+ tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ tlb->MMID = env->CP0_MemoryMapID;
+ tlb->PageMask = env->CP0_PageMask;
+ tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
+ tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
+ tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
+ tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
+ tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
+ tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
+ tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
+ tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
+ tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
+ tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
+ tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
+ tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
+}
+
+static void r4k_helper_tlbinv(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if (!tlb->G && tlb_mmid == MMID) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+static void r4k_helper_tlbinvf(CPUMIPSState *env)
+{
+ int idx;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+static void r4k_helper_tlbwi(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ target_ulong VPN;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ VPN &= env->SEGMask;
+#endif
+ EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
+ G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ V0 = (env->CP0_EntryLo0 & 2) != 0;
+ D0 = (env->CP0_EntryLo0 & 4) != 0;
+ XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
+ RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
+ V1 = (env->CP0_EntryLo1 & 2) != 0;
+ D1 = (env->CP0_EntryLo1 & 4) != 0;
+ XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
+ RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
+
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /*
+ * Discard cached TLB entries, unless tlbwi is just upgrading access
+ * permissions on the current entry.
+ */
+ if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
+ (!tlb->EHINV && EHINV) ||
+ (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
+ (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
+ (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
+ (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+ }
+
+ r4k_invalidate_tlb(env, idx, 0);
+ r4k_fill_tlb(env, idx);
+}
+
+static void r4k_helper_tlbwr(CPUMIPSState *env)
+{
+ int r = cpu_mips_get_random(env);
+
+ r4k_invalidate_tlb(env, r, 1);
+ r4k_fill_tlb(env, r);
+}
+
+static void r4k_helper_tlbp(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ r4k_tlb_t *tlb;
+ target_ulong mask;
+ target_ulong tag;
+ target_ulong VPN;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ int i;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* Check ASID/MMID, virtual page number & size */
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ env->CP0_Index = i;
+ break;
+ }
+ }
+ if (i == env->tlb->nb_tlb) {
+ /* No match. Discard any shadow entries, if any of them match. */
+ for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* Check ASID/MMID, virtual page number & size */
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
+ r4k_mips_tlb_flush_extra(env, i);
+ break;
+ }
+ }
+
+ env->CP0_Index |= 0x80000000;
+ }
+}
+
+static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
+{
+#if defined(TARGET_MIPS64)
+ return tlb_pfn << 6;
+#else
+ return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
+ (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
+#endif
+}
+
+static void r4k_helper_tlbr(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* If this will change the current ASID/MMID, flush qemu's TLB. */
+ if (MMID != tlb_mmid) {
+ cpu_mips_tlb_flush(env);
+ }
+
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+
+ if (tlb->EHINV) {
+ env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
+ env->CP0_PageMask = 0;
+ env->CP0_EntryLo0 = 0;
+ env->CP0_EntryLo1 = 0;
+ } else {
+ env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
+ env->CP0_MemoryMapID = tlb->MMID;
+ env->CP0_PageMask = tlb->PageMask;
+ env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
+ ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
+ env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
+ ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
+ }
+}
+
+void helper_tlbwi(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwi(env);
+}
+
+void helper_tlbwr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwr(env);
+}
+
+void helper_tlbp(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbp(env);
+}
+
+void helper_tlbr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbr(env);
+}
+
+void helper_tlbinv(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinv(env);
+}
+
+void helper_tlbinvf(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinvf(env);
+}
+
+static void global_invalidate_tlb(CPUMIPSState *env,
+ uint32_t invMsgVPN2,
+ uint8_t invMsgR,
+ uint32_t invMsgMMid,
+ bool invAll,
+ bool invVAMMid,
+ bool invMMid,
+ bool invVA)
+{
+
+ int idx;
+ r4k_tlb_t *tlb;
+ bool VAMatch;
+ bool MMidMatch;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VAMatch =
+ (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
+#ifdef TARGET_MIPS64
+ &&
+ (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
+#endif
+ );
+ MMidMatch = tlb->MMID == invMsgMMid;
+ if ((invAll && (idx > env->CP0_Wired)) ||
+ (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
+ (VAMatch && invVA) ||
+ (MMidMatch && !(tlb->G) && invMMid)) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
+{
+ bool invAll = type == 0;
+ bool invVA = type == 1;
+ bool invMMid = type == 2;
+ bool invVAMMid = type == 3;
+ uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
+ uint8_t invMsgR = 0;
+ uint32_t invMsgMMid = env->CP0_MemoryMapID;
+ CPUState *other_cs = first_cpu;
+
+#ifdef TARGET_MIPS64
+ invMsgR = extract64(arg, 62, 2);
+#endif
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
+ invAll, invVAMMid, invMMid, invVA);
+ }
+}
+
+/* no MMU emulation */
+static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type)
+{
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+/* fixed mapping MMU emulation */
+static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
+{
+ if (address <= (int32_t)0x7FFFFFFFUL) {
+ if (!(env->CP0_Status & (1 << CP0St_ERL))) {
+ *physical = address + 0x40000000UL;
+ } else {
+ *physical = address;
+ }
+ } else if (address <= (int32_t)0xBFFFFFFFUL) {
+ *physical = address & 0x1FFFFFFF;
+ } else {
+ *physical = address;
+ }
+
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+/* MIPS32/MIPS64 R4000-style MMU emulation */
+static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type)
+{
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint32_t tlb_mmid;
+ int i;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ for (i = 0; i < env->tlb->tlb_in_use; i++) {
+ r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ target_ulong tag = address & ~mask;
+ target_ulong VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+
+ /* Check ASID/MMID, virtual page number & size */
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ int n = !!(address & mask & ~(mask >> 1));
+ /* Check access rights */
+ if (!(n ? tlb->V1 : tlb->V0)) {
+ return TLBRET_INVALID;
+ }
+ if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
+ return TLBRET_XI;
+ }
+ if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
+ return TLBRET_RI;
+ }
+ if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
+ *physical = tlb->PFN[n] | (address & (mask >> 1));
+ *prot = PAGE_READ;
+ if (n ? tlb->D1 : tlb->D0) {
+ *prot |= PAGE_WRITE;
+ }
+ if (!(n ? tlb->XI1 : tlb->XI0)) {
+ *prot |= PAGE_EXEC;
+ }
+ return TLBRET_MATCH;
+ }
+ return TLBRET_DIRTY;
+ }
+ }
+ return TLBRET_NOMATCH;
+}
+
+static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &no_mmu_map_address;
+}
+
+static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &fixed_mmu_map_address;
+}
+
+static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
+ env->tlb->map_address = &r4k_map_address;
+ env->tlb->helper_tlbwi = r4k_helper_tlbwi;
+ env->tlb->helper_tlbwr = r4k_helper_tlbwr;
+ env->tlb->helper_tlbp = r4k_helper_tlbp;
+ env->tlb->helper_tlbr = r4k_helper_tlbr;
+ env->tlb->helper_tlbinv = r4k_helper_tlbinv;
+ env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
+}
+
+void mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
+
+ switch (def->mmu_type) {
+ case MMU_TYPE_NONE:
+ no_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R4000:
+ r4k_mmu_init(env, def);
+ break;
+ case MMU_TYPE_FMT:
+ fixed_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R3000:
+ case MMU_TYPE_R6000:
+ case MMU_TYPE_R8000:
+ default:
+ cpu_abort(env_cpu(env), "MMU type not supported\n");
+ }
+}
+
+void cpu_mips_tlb_flush(CPUMIPSState *env)
+{
+ /* Flush qemu's TLB and discard all shadowed entries. */
+ tlb_flush(env_cpu(env));
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+}
+
+static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, int tlb_error)
+{
+ CPUState *cs = env_cpu(env);
+ int exception = 0, error_code = 0;
+
+ if (access_type == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+
+ switch (tlb_error) {
+ default:
+ case TLBRET_BADADDR:
+ /* Reference to kernel address from user mode or supervisor mode */
+ /* Reference to supervisor address from user mode */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_AdES;
+ } else {
+ exception = EXCP_AdEL;
+ }
+ break;
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ error_code |= EXCP_TLB_NOMATCH;
+ break;
+ case TLBRET_INVALID:
+ /* TLB match with no valid bit */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_DIRTY:
+ /* TLB match but 'D' bit is cleared */
+ exception = EXCP_LTLBL;
+ break;
+ case TLBRET_XI:
+ /* Execute-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBXI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_RI:
+ /* Read-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBRI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ }
+ /* Raise exception */
+ if (!(env->hflags & MIPS_HFLAG_DM)) {
+ env->CP0_BadVAddr = address;
+ }
+ env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
+ ((address >> 9) & 0x007ffff0);
+ env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
+ (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
+ (address & (TARGET_PAGE_MASK << 1));
+#if defined(TARGET_MIPS64)
+ env->CP0_EntryHi &= env->SEGMask;
+ env->CP0_XContext =
+ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
+ (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */
+ (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */
+#endif
+ cs->exception_index = exception;
+ env->error_code = error_code;
+}
+
+#if !defined(TARGET_MIPS64)
+
+/*
+ * Perform hardware page table walk
+ *
+ * Memory accesses are performed using the KERNEL privilege level.
+ * Synchronous exceptions detected on memory accesses cause a silent exit
+ * from page table walking, resulting in a TLB or XTLB Refill exception.
+ *
+ * Implementations are not required to support page table walk memory
+ * accesses from mapped memory regions. When an unsupported access is
+ * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
+ * exception.
+ *
+ * Note that if an exception is caused by AddressTranslation or LoadMemory
+ * functions, the exception is not taken, a silent exit is taken,
+ * resulting in a TLB or XTLB Refill exception.
+ */
+
+static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size,
+ uint64_t *pte)
+{
+ if ((vaddr & ((entry_size >> 3) - 1)) != 0) {
+ return false;
+ }
+ if (entry_size == 64) {
+ *pte = cpu_ldq_code(env, vaddr);
+ } else {
+ *pte = cpu_ldl_code(env, vaddr);
+ }
+ return true;
+}
+
+static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
+ int entry_size, int ptei)
+{
+ uint64_t result = entry;
+ uint64_t rixi;
+ if (ptei > entry_size) {
+ ptei -= 32;
+ }
+ result >>= (ptei - 2);
+ rixi = result & 3;
+ result >>= 2;
+ result |= rixi << CP0EnLo_XI;
+ return result;
+}
+
+static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
+ int directory_index, bool *huge_page, bool *hgpg_directory_hit,
+ uint64_t *pw_entrylo0, uint64_t *pw_entrylo1)
+{
+ int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
+ int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
+ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
+ int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+ int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
+ int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
+ int directory_shift = (ptew > 1) ? -1 :
+ (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
+ int leaf_shift = (ptew > 1) ? -1 :
+ (ptew == 1) ? native_shift + 1 : native_shift;
+ uint32_t direntry_size = 1 << (directory_shift + 3);
+ uint32_t leafentry_size = 1 << (leaf_shift + 3);
+ uint64_t entry;
+ uint64_t paddr;
+ int prot;
+ uint64_t lsb = 0;
+ uint64_t w = 0;
+
+ if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
+ cpu_mmu_index(env, false)) !=
+ TLBRET_MATCH) {
+ /* wrong base address */
+ return 0;
+ }
+ if (!get_pte(env, *vaddr, direntry_size, &entry)) {
+ return 0;
+ }
+
+ if ((entry & (1 << psn)) && hugepg) {
+ *huge_page = true;
+ *hgpg_directory_hit = true;
+ entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
+ w = directory_index - 1;
+ if (directory_index & 0x1) {
+ /* Generate adjacent page from same PTE for odd TLB page */
+ lsb = BIT_ULL(w) >> 6;
+ *pw_entrylo0 = entry & ~lsb; /* even page */
+ *pw_entrylo1 = entry | lsb; /* odd page */
+ } else if (dph) {
+ int oddpagebit = 1 << leaf_shift;
+ uint64_t vaddr2 = *vaddr ^ oddpagebit;
+ if (*vaddr & oddpagebit) {
+ *pw_entrylo1 = entry;
+ } else {
+ *pw_entrylo0 = entry;
+ }
+ if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
+ cpu_mmu_index(env, false)) !=
+ TLBRET_MATCH) {
+ return 0;
+ }
+ if (!get_pte(env, vaddr2, leafentry_size, &entry)) {
+ return 0;
+ }
+ entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
+ if (*vaddr & oddpagebit) {
+ *pw_entrylo0 = entry;
+ } else {
+ *pw_entrylo1 = entry;
+ }
+ } else {
+ return 0;
+ }
+ return 1;
+ } else {
+ *vaddr = entry;
+ return 2;
+ }
+}
+
+static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
+ int mmu_idx)
+{
+ int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
+ int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
+ int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
+ int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
+ int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
+
+ /* Initial values */
+ bool huge_page = false;
+ bool hgpg_bdhit = false;
+ bool hgpg_gdhit = false;
+ bool hgpg_udhit = false;
+ bool hgpg_mdhit = false;
+
+ int32_t pw_pagemask = 0;
+ target_ulong pw_entryhi = 0;
+ uint64_t pw_entrylo0 = 0;
+ uint64_t pw_entrylo1 = 0;
+
+ /* Native pointer size */
+ /*For the 32-bit architectures, this bit is fixed to 0.*/
+ int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
+
+ /* Indices from PWField */
+ int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
+ int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
+ int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
+ int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
+ int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+
+ /* Indices computed from faulting address */
+ int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
+ int uindex = (address >> pf_udw) & ((1 << udw) - 1);
+ int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
+ int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
+
+ /* Other HTW configs */
+ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
+
+ /* HTW Shift values (depend on entry size) */
+ int directory_shift = (ptew > 1) ? -1 :
+ (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
+ int leaf_shift = (ptew > 1) ? -1 :
+ (ptew == 1) ? native_shift + 1 : native_shift;
+
+ /* Offsets into tables */
+ int goffset = gindex << directory_shift;
+ int uoffset = uindex << directory_shift;
+ int moffset = mindex << directory_shift;
+ int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1);
+ int ptoffset1 = ptoffset0 | (1 << (leaf_shift));
+
+ uint32_t leafentry_size = 1 << (leaf_shift + 3);
+
+ /* Starting address - Page Table Base */
+ uint64_t vaddr = env->CP0_PWBase;
+
+ uint64_t dir_entry;
+ uint64_t paddr;
+ int prot;
+ int m;
+
+ if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
+ /* walker is unimplemented */
+ return false;
+ }
+ if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
+ /* walker is disabled */
+ return false;
+ }
+ if (!(gdw > 0 || udw > 0 || mdw > 0)) {
+ /* no structure to walk */
+ return false;
+ }
+ if ((directory_shift == -1) || (leaf_shift == -1)) {
+ return false;
+ }
+
+ /* Global Directory */
+ if (gdw > 0) {
+ vaddr |= goffset;
+ switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
+ &pw_entrylo0, &pw_entrylo1))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Upper directory */
+ if (udw > 0) {
+ vaddr |= uoffset;
+ switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
+ &pw_entrylo0, &pw_entrylo1))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Middle directory */
+ if (mdw > 0) {
+ vaddr |= moffset;
+ switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
+ &pw_entrylo0, &pw_entrylo1))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Leaf Level Page Table - First half of PTE pair */
+ vaddr |= ptoffset0;
+ if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
+ cpu_mmu_index(env, false)) !=
+ TLBRET_MATCH) {
+ return false;
+ }
+ if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
+ return false;
+ }
+ dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
+ pw_entrylo0 = dir_entry;
+
+ /* Leaf Level Page Table - Second half of PTE pair */
+ vaddr |= ptoffset1;
+ if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
+ cpu_mmu_index(env, false)) !=
+ TLBRET_MATCH) {
+ return false;
+ }
+ if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
+ return false;
+ }
+ dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
+ pw_entrylo1 = dir_entry;
+
+refill:
+
+ m = (1 << pf_ptw) - 1;
+
+ if (huge_page) {
+ switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
+ hgpg_mdhit)
+ {
+ case 4:
+ m = (1 << pf_gdw) - 1;
+ if (pf_gdw & 1) {
+ m >>= 1;
+ }
+ break;
+ case 2:
+ m = (1 << pf_udw) - 1;
+ if (pf_udw & 1) {
+ m >>= 1;
+ }
+ break;
+ case 1:
+ m = (1 << pf_mdw) - 1;
+ if (pf_mdw & 1) {
+ m >>= 1;
+ }
+ break;
+ }
+ }
+ pw_pagemask = m >> TARGET_PAGE_BITS_MIN;
+ update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask);
+ pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
+ {
+ target_ulong tmp_entryhi = env->CP0_EntryHi;
+ int32_t tmp_pagemask = env->CP0_PageMask;
+ uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
+ uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
+
+ env->CP0_EntryHi = pw_entryhi;
+ env->CP0_PageMask = pw_pagemask;
+ env->CP0_EntryLo0 = pw_entrylo0;
+ env->CP0_EntryLo1 = pw_entrylo1;
+
+ /*
+ * The hardware page walker inserts a page into the TLB in a manner
+ * identical to a TLBWR instruction as executed by the software refill
+ * handler.
+ */
+ r4k_helper_tlbwr(env);
+
+ env->CP0_EntryHi = tmp_entryhi;
+ env->CP0_PageMask = tmp_pagemask;
+ env->CP0_EntryLo0 = tmp_entrylo0;
+ env->CP0_EntryLo1 = tmp_entrylo1;
+ }
+ return true;
+}
+#endif
+
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ hwaddr physical;
+ int prot;
+ int ret = TLBRET_BADADDR;
+
+ /* data access */
+ /* XXX: put correct access by using cpu_restore_state() correctly */
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mmu_idx);
+ switch (ret) {
+ case TLBRET_MATCH:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
+ " prot %d\n", __func__, address, physical, prot);
+ break;
+ default:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
+ ret);
+ break;
+ }
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+#if !defined(TARGET_MIPS64)
+ if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
+ /*
+ * Memory reads during hardware page table walking are performed
+ * as if they were kernel-mode load instructions.
+ */
+ int mode = (env->hflags & MIPS_HFLAG_KSU);
+ bool ret_walker;
+ env->hflags &= ~MIPS_HFLAG_KSU;
+ ret_walker = page_table_walk_refill(env, address, mmu_idx);
+ env->hflags |= mode;
+ if (ret_walker) {
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mmu_idx);
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ }
+ }
+#endif
+ if (probe) {
+ return false;
+ }
+
+ raise_mmu_exception(env, address, access_type, ret);
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
+}
+
+hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ hwaddr physical;
+ int prot;
+ int ret = 0;
+ CPUState *cs = env_cpu(env);
+
+ /* data access */
+ ret = get_physical_address(env, &physical, &prot, address, access_type,
+ cpu_mmu_index(env, false));
+ if (ret == TLBRET_MATCH) {
+ return physical;
+ }
+
+ raise_mmu_exception(env, address, access_type, ret);
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+static void set_hflags_for_handler(CPUMIPSState *env)
+{
+ /* Exception handlers are entered in 32-bit mode. */
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ /* ...except that microMIPS lets you choose. */
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->hflags |= (!!(env->CP0_Config3 &
+ (1 << CP0C3_ISA_ON_EXC))
+ << MIPS_HFLAG_M16_SHIFT);
+ }
+}
+
+static inline void set_badinstr_registers(CPUMIPSState *env)
+{
+ if (env->insn_flags & ISA_NANOMIPS32) {
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
+ if ((instr & 0x10000000) == 0) {
+ instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
+ }
+ env->CP0_BadInstr = instr;
+
+ if ((instr & 0xFC000000) == 0x60000000) {
+ instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
+ env->CP0_BadInstrX = instr;
+ }
+ }
+ return;
+ }
+
+ if (env->hflags & MIPS_HFLAG_M16) {
+ /* TODO: add BadInstr support for microMIPS */
+ return;
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
+ }
+ if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
+ (env->hflags & MIPS_HFLAG_BMASK)) {
+ env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
+ }
+}
+
+void mips_cpu_do_interrupt(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool update_badinstr = 0;
+ target_ulong offset;
+ int cause = -1;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
+ " %s exception\n",
+ __func__, env->active_tc.PC, env->CP0_EPC,
+ mips_exception_name(cs->exception_index));
+ }
+ if (cs->exception_index == EXCP_EXT_INTERRUPT &&
+ (env->hflags & MIPS_HFLAG_DM)) {
+ cs->exception_index = EXCP_DINT;
+ }
+ offset = 0x180;
+ switch (cs->exception_index) {
+ case EXCP_DSS:
+ env->CP0_Debug |= 1 << CP0DB_DSS;
+ /*
+ * Debug single step cannot be raised inside a delay slot and
+ * resume will always occur on the next instruction
+ * (but we assume the pc has always been updated during
+ * code translation).
+ */
+ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
+ goto enter_debug_mode;
+ case EXCP_DINT:
+ env->CP0_Debug |= 1 << CP0DB_DINT;
+ goto set_DEPC;
+ case EXCP_DIB:
+ env->CP0_Debug |= 1 << CP0DB_DIB;
+ goto set_DEPC;
+ case EXCP_DBp:
+ env->CP0_Debug |= 1 << CP0DB_DBp;
+ /* Setup DExcCode - SDBBP instruction */
+ env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
+ (9 << CP0DB_DEC);
+ goto set_DEPC;
+ case EXCP_DDBS:
+ env->CP0_Debug |= 1 << CP0DB_DDBS;
+ goto set_DEPC;
+ case EXCP_DDBL:
+ env->CP0_Debug |= 1 << CP0DB_DDBL;
+ set_DEPC:
+ env->CP0_DEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ enter_debug_mode:
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ /* EJTAG probe trap enable is not implemented... */
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->active_tc.PC = env->exception_base + 0x480;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_RESET:
+ cpu_reset(CPU(cpu));
+ break;
+ case EXCP_SRESET:
+ env->CP0_Status |= (1 << CP0St_SR);
+ memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
+ goto set_error_EPC;
+ case EXCP_NMI:
+ env->CP0_Status |= (1 << CP0St_NMI);
+ set_error_EPC:
+ env->CP0_ErrorEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->active_tc.PC = env->exception_base;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_EXT_INTERRUPT:
+ cause = 0;
+ if (env->CP0_Cause & (1 << CP0Ca_IV)) {
+ uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
+
+ if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
+ offset = 0x200;
+ } else {
+ uint32_t vector = 0;
+ uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
+
+ if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+ /*
+ * For VEIC mode, the external interrupt controller feeds
+ * the vector through the CP0Cause IP lines.
+ */
+ vector = pending;
+ } else {
+ /*
+ * Vectored Interrupts
+ * Mask with Status.IM7-IM0 to get enabled interrupts.
+ */
+ pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
+ /* Find the highest-priority interrupt. */
+ while (pending >>= 1) {
+ vector++;
+ }
+ }
+ offset = 0x200 + (vector * (spacing << 5));
+ }
+ }
+ goto set_EPC;
+ case EXCP_LTLBL:
+ cause = 1;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_TLBL:
+ cause = 2;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if ((R != 0 || UX) && (R != 3 || KX) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
+ offset = 0x080;
+ } else {
+#endif
+ offset = 0x000;
+#if defined(TARGET_MIPS64)
+ }
+#endif
+ }
+ goto set_EPC;
+ case EXCP_TLBS:
+ cause = 3;
+ update_badinstr = 1;
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if ((R != 0 || UX) && (R != 3 || KX) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
+ offset = 0x080;
+ } else {
+#endif
+ offset = 0x000;
+#if defined(TARGET_MIPS64)
+ }
+#endif
+ }
+ goto set_EPC;
+ case EXCP_AdEL:
+ cause = 4;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_AdES:
+ cause = 5;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_IBE:
+ cause = 6;
+ goto set_EPC;
+ case EXCP_DBE:
+ cause = 7;
+ goto set_EPC;
+ case EXCP_SYSCALL:
+ cause = 8;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_BREAK:
+ cause = 9;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_RI:
+ cause = 10;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_CpU:
+ cause = 11;
+ update_badinstr = 1;
+ env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
+ (env->error_code << CP0Ca_CE);
+ goto set_EPC;
+ case EXCP_OVERFLOW:
+ cause = 12;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TRAP:
+ cause = 13;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MSAFPE:
+ cause = 14;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_FPE:
+ cause = 15;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_C2E:
+ cause = 18;
+ goto set_EPC;
+ case EXCP_TLBRI:
+ cause = 19;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TLBXI:
+ cause = 20;
+ goto set_EPC;
+ case EXCP_MSADIS:
+ cause = 21;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MDMX:
+ cause = 22;
+ goto set_EPC;
+ case EXCP_DWATCH:
+ cause = 23;
+ /* XXX: TODO: manage deferred watch exceptions */
+ goto set_EPC;
+ case EXCP_MCHECK:
+ cause = 24;
+ goto set_EPC;
+ case EXCP_THREAD:
+ cause = 25;
+ goto set_EPC;
+ case EXCP_DSPDIS:
+ cause = 26;
+ goto set_EPC;
+ case EXCP_CACHE:
+ cause = 30;
+ offset = 0x100;
+ set_EPC:
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_EPC = exception_resume_pc(env);
+ if (update_badinstr) {
+ set_badinstr_registers(env);
+ }
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ env->CP0_Cause |= (1U << CP0Ca_BD);
+ } else {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->CP0_Status |= (1 << CP0St_EXL);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ }
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ if (env->CP0_Status & (1 << CP0St_BEV)) {
+ env->active_tc.PC = env->exception_base + 0x200;
+ } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
+ env->CP0_Config5 & (1 << CP0C5_CV))) {
+ /* Force KSeg1 for cache errors */
+ env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
+ } else {
+ env->active_tc.PC = env->CP0_EBase & ~0xfff;
+ }
+
+ env->active_tc.PC += offset;
+ set_hflags_for_handler(env);
+ env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
+ (cause << CP0Ca_EC);
+ break;
+ default:
+ abort();
+ }
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
+ " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
+ __func__, env->active_tc.PC, env->CP0_EPC, cause,
+ env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
+ env->CP0_DEPC);
+ }
+ cs->exception_index = EXCP_NONE;
+}
+
+bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if (cpu_mips_hw_interrupts_enabled(env) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ mips_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
+{
+ CPUState *cs = env_cpu(env);
+ r4k_tlb_t *tlb;
+ target_ulong addr;
+ target_ulong end;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint32_t tlb_mmid;
+ target_ulong mask;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ /*
+ * The qemu TLB is flushed when the ASID/MMID changes, so no need to
+ * flush these entries again.
+ */
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if (tlb->G == 0 && tlb_mmid != MMID) {
+ return;
+ }
+
+ if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
+ /*
+ * For tlbwr, we can shadow the discarded entry into
+ * a new (fake) TLB entry, as long as the guest can not
+ * tell that it's there.
+ */
+ env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
+ env->tlb->tlb_in_use++;
+ return;
+ }
+
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ if (tlb->V0) {
+ addr = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | (mask >> 1);
+ while (addr < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+ if (tlb->V1) {
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | mask;
+ while (addr - 1 < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+}
diff --git a/target/mips/tcg/sysemu_helper.h.inc b/target/mips/tcg/sysemu_helper.h.inc
new file mode 100644
index 000000000..4353a966f
--- /dev/null
+++ b/target/mips/tcg/sysemu_helper.h.inc
@@ -0,0 +1,185 @@
+/*
+ * QEMU MIPS sysemu helpers
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+DEF_HELPER_1(do_semihosting, void, env)
+
+/* CP0 helpers */
+DEF_HELPER_1(mfc0_mvpcontrol, tl, env)
+DEF_HELPER_1(mfc0_mvpconf0, tl, env)
+DEF_HELPER_1(mfc0_mvpconf1, tl, env)
+DEF_HELPER_1(mftc0_vpecontrol, tl, env)
+DEF_HELPER_1(mftc0_vpeconf0, tl, env)
+DEF_HELPER_1(mfc0_random, tl, env)
+DEF_HELPER_1(mfc0_tcstatus, tl, env)
+DEF_HELPER_1(mftc0_tcstatus, tl, env)
+DEF_HELPER_1(mfc0_tcbind, tl, env)
+DEF_HELPER_1(mftc0_tcbind, tl, env)
+DEF_HELPER_1(mfc0_tcrestart, tl, env)
+DEF_HELPER_1(mftc0_tcrestart, tl, env)
+DEF_HELPER_1(mfc0_tchalt, tl, env)
+DEF_HELPER_1(mftc0_tchalt, tl, env)
+DEF_HELPER_1(mfc0_tccontext, tl, env)
+DEF_HELPER_1(mftc0_tccontext, tl, env)
+DEF_HELPER_1(mfc0_tcschedule, tl, env)
+DEF_HELPER_1(mftc0_tcschedule, tl, env)
+DEF_HELPER_1(mfc0_tcschefback, tl, env)
+DEF_HELPER_1(mftc0_tcschefback, tl, env)
+DEF_HELPER_1(mfc0_count, tl, env)
+DEF_HELPER_1(mfc0_saar, tl, env)
+DEF_HELPER_1(mfhc0_saar, tl, env)
+DEF_HELPER_1(mftc0_entryhi, tl, env)
+DEF_HELPER_1(mftc0_status, tl, env)
+DEF_HELPER_1(mftc0_cause, tl, env)
+DEF_HELPER_1(mftc0_epc, tl, env)
+DEF_HELPER_1(mftc0_ebase, tl, env)
+DEF_HELPER_2(mftc0_configx, tl, env, tl)
+DEF_HELPER_1(mfc0_lladdr, tl, env)
+DEF_HELPER_1(mfc0_maar, tl, env)
+DEF_HELPER_1(mfhc0_maar, tl, env)
+DEF_HELPER_2(mfc0_watchlo, tl, env, i32)
+DEF_HELPER_2(mfc0_watchhi, tl, env, i32)
+DEF_HELPER_2(mfhc0_watchhi, tl, env, i32)
+DEF_HELPER_1(mfc0_debug, tl, env)
+DEF_HELPER_1(mftc0_debug, tl, env)
+#ifdef TARGET_MIPS64
+DEF_HELPER_1(dmfc0_tcrestart, tl, env)
+DEF_HELPER_1(dmfc0_tchalt, tl, env)
+DEF_HELPER_1(dmfc0_tccontext, tl, env)
+DEF_HELPER_1(dmfc0_tcschedule, tl, env)
+DEF_HELPER_1(dmfc0_tcschefback, tl, env)
+DEF_HELPER_1(dmfc0_lladdr, tl, env)
+DEF_HELPER_1(dmfc0_maar, tl, env)
+DEF_HELPER_2(dmfc0_watchlo, tl, env, i32)
+DEF_HELPER_2(dmfc0_watchhi, tl, env, i32)
+DEF_HELPER_1(dmfc0_saar, tl, env)
+#endif /* TARGET_MIPS64 */
+
+DEF_HELPER_2(mtc0_index, void, env, tl)
+DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mttc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mttc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf1, void, env, tl)
+DEF_HELPER_2(mtc0_yqmask, void, env, tl)
+DEF_HELPER_2(mtc0_vpeopt, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo0, void, env, tl)
+DEF_HELPER_2(mtc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mttc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mtc0_tcbind, void, env, tl)
+DEF_HELPER_2(mttc0_tcbind, void, env, tl)
+DEF_HELPER_2(mtc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mttc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mtc0_tchalt, void, env, tl)
+DEF_HELPER_2(mttc0_tchalt, void, env, tl)
+DEF_HELPER_2(mtc0_tccontext, void, env, tl)
+DEF_HELPER_2(mttc0_tccontext, void, env, tl)
+DEF_HELPER_2(mtc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mttc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mtc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mttc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo1, void, env, tl)
+DEF_HELPER_2(mtc0_context, void, env, tl)
+DEF_HELPER_2(mtc0_memorymapid, void, env, tl)
+DEF_HELPER_2(mtc0_pagemask, void, env, tl)
+DEF_HELPER_2(mtc0_pagegrain, void, env, tl)
+DEF_HELPER_2(mtc0_segctl0, void, env, tl)
+DEF_HELPER_2(mtc0_segctl1, void, env, tl)
+DEF_HELPER_2(mtc0_segctl2, void, env, tl)
+DEF_HELPER_2(mtc0_pwfield, void, env, tl)
+DEF_HELPER_2(mtc0_pwsize, void, env, tl)
+DEF_HELPER_2(mtc0_wired, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf0, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf1, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf2, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf3, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf4, void, env, tl)
+DEF_HELPER_2(mtc0_hwrena, void, env, tl)
+DEF_HELPER_2(mtc0_pwctl, void, env, tl)
+DEF_HELPER_2(mtc0_count, void, env, tl)
+DEF_HELPER_2(mtc0_saari, void, env, tl)
+DEF_HELPER_2(mtc0_saar, void, env, tl)
+DEF_HELPER_2(mthc0_saar, void, env, tl)
+DEF_HELPER_2(mtc0_entryhi, void, env, tl)
+DEF_HELPER_2(mttc0_entryhi, void, env, tl)
+DEF_HELPER_2(mtc0_compare, void, env, tl)
+DEF_HELPER_2(mtc0_status, void, env, tl)
+DEF_HELPER_2(mttc0_status, void, env, tl)
+DEF_HELPER_2(mtc0_intctl, void, env, tl)
+DEF_HELPER_2(mtc0_srsctl, void, env, tl)
+DEF_HELPER_2(mtc0_cause, void, env, tl)
+DEF_HELPER_2(mttc0_cause, void, env, tl)
+DEF_HELPER_2(mtc0_ebase, void, env, tl)
+DEF_HELPER_2(mttc0_ebase, void, env, tl)
+DEF_HELPER_2(mtc0_config0, void, env, tl)
+DEF_HELPER_2(mtc0_config2, void, env, tl)
+DEF_HELPER_2(mtc0_config3, void, env, tl)
+DEF_HELPER_2(mtc0_config4, void, env, tl)
+DEF_HELPER_2(mtc0_config5, void, env, tl)
+DEF_HELPER_2(mtc0_lladdr, void, env, tl)
+DEF_HELPER_2(mtc0_maar, void, env, tl)
+DEF_HELPER_2(mthc0_maar, void, env, tl)
+DEF_HELPER_2(mtc0_maari, void, env, tl)
+DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32)
+DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32)
+DEF_HELPER_3(mthc0_watchhi, void, env, tl, i32)
+DEF_HELPER_2(mtc0_xcontext, void, env, tl)
+DEF_HELPER_2(mtc0_framemask, void, env, tl)
+DEF_HELPER_2(mtc0_debug, void, env, tl)
+DEF_HELPER_2(mttc0_debug, void, env, tl)
+DEF_HELPER_2(mtc0_performance0, void, env, tl)
+DEF_HELPER_2(mtc0_errctl, void, env, tl)
+DEF_HELPER_2(mtc0_taglo, void, env, tl)
+DEF_HELPER_2(mtc0_datalo, void, env, tl)
+DEF_HELPER_2(mtc0_taghi, void, env, tl)
+DEF_HELPER_2(mtc0_datahi, void, env, tl)
+
+#if defined(TARGET_MIPS64)
+DEF_HELPER_2(dmtc0_entrylo0, void, env, i64)
+DEF_HELPER_2(dmtc0_entrylo1, void, env, i64)
+#endif
+
+/* MIPS MT functions */
+DEF_HELPER_2(mftgpr, tl, env, i32)
+DEF_HELPER_2(mftlo, tl, env, i32)
+DEF_HELPER_2(mfthi, tl, env, i32)
+DEF_HELPER_2(mftacx, tl, env, i32)
+DEF_HELPER_1(mftdsp, tl, env)
+DEF_HELPER_3(mttgpr, void, env, tl, i32)
+DEF_HELPER_3(mttlo, void, env, tl, i32)
+DEF_HELPER_3(mtthi, void, env, tl, i32)
+DEF_HELPER_3(mttacx, void, env, tl, i32)
+DEF_HELPER_2(mttdsp, void, env, tl)
+DEF_HELPER_0(dmt, tl)
+DEF_HELPER_0(emt, tl)
+DEF_HELPER_1(dvpe, tl, env)
+DEF_HELPER_1(evpe, tl, env)
+
+/* R6 Multi-threading */
+DEF_HELPER_1(dvp, tl, env)
+DEF_HELPER_1(evp, tl, env)
+
+/* TLB */
+DEF_HELPER_1(tlbwi, void, env)
+DEF_HELPER_1(tlbwr, void, env)
+DEF_HELPER_1(tlbp, void, env)
+DEF_HELPER_1(tlbr, void, env)
+DEF_HELPER_1(tlbinv, void, env)
+DEF_HELPER_1(tlbinvf, void, env)
+DEF_HELPER_3(ginvt, void, env, tl, i32)
+
+/* Special */
+DEF_HELPER_1(di, tl, env)
+DEF_HELPER_1(ei, tl, env)
+DEF_HELPER_1(eret, void, env)
+DEF_HELPER_1(eretnc, void, env)
+DEF_HELPER_1(deret, void, env)
+DEF_HELPER_3(cache, void, env, tl, i32)
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
new file mode 100644
index 000000000..466768aec
--- /dev/null
+++ b/target/mips/tcg/tcg-internal.h
@@ -0,0 +1,66 @@
+/*
+ * MIPS internal definitions and helpers (TCG accelerator)
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef MIPS_TCG_INTERNAL_H
+#define MIPS_TCG_INTERNAL_H
+
+#include "tcg/tcg.h"
+#include "exec/memattrs.h"
+#include "hw/core/cpu.h"
+#include "cpu.h"
+
+void mips_tcg_init(void);
+
+void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
+void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+
+const char *mips_exception_name(int32_t exception);
+
+void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+ int error_code, uintptr_t pc);
+
+static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
+ uint32_t exception,
+ uintptr_t pc)
+{
+ do_raise_exception_err(env, exception, 0, pc);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void mips_cpu_do_interrupt(CPUState *cpu);
+bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+void mmu_init(CPUMIPSState *env, const mips_def_t *def);
+
+void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
+
+void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra);
+uint32_t cpu_mips_get_random(CPUMIPSState *env);
+
+bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb);
+
+hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, uintptr_t retaddr);
+void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+void cpu_mips_tlb_flush(CPUMIPSState *env);
+
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+#endif /* !CONFIG_USER_ONLY */
+
+#endif
diff --git a/target/mips/tcg/trace-events b/target/mips/tcg/trace-events
new file mode 100644
index 000000000..0c55e0bba
--- /dev/null
+++ b/target/mips/tcg/trace-events
@@ -0,0 +1,5 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# translate.c
+mips_translate_c0(const char *instr, const char *rn, int reg, int sel) "%s %s (reg %d sel %d)"
+mips_translate_tr(const char *instr, int rt, int u, int sel, int h) "%s (reg %d u %d sel %d h %d)"
diff --git a/target/mips/tcg/trace.h b/target/mips/tcg/trace.h
new file mode 100644
index 000000000..b8c6c4568
--- /dev/null
+++ b/target/mips/tcg/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_mips_tcg.h"
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
new file mode 100644
index 000000000..47db35d7d
--- /dev/null
+++ b/target/mips/tcg/translate.c
@@ -0,0 +1,16220 @@
+/*
+ * MIPS emulation for QEMU - main translation routines
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ * Copyright (c) 2020 Philippe Mathieu-Daudé
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "tcg/tcg-op.h"
+#include "exec/translator.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "semihosting/semihost.h"
+
+#include "trace.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+#include "qemu/qemu-print.h"
+#include "fpu_helper.h"
+#include "translate.h"
+
+/*
+ * Many sysemu-only helpers are not reachable for user-only.
+ * Define stub generators here, so that we need not either sprinkle
+ * ifdefs through the translator, nor provide the helper function.
+ */
+#define STUB_HELPER(NAME, ...) \
+ static inline void gen_helper_##NAME(__VA_ARGS__) \
+ { g_assert_not_reached(); }
+
+#ifdef CONFIG_USER_ONLY
+STUB_HELPER(cache, TCGv_env env, TCGv val, TCGv_i32 reg)
+#endif
+
+enum {
+ /* indirect opcode tables */
+ OPC_SPECIAL = (0x00 << 26),
+ OPC_REGIMM = (0x01 << 26),
+ OPC_CP0 = (0x10 << 26),
+ OPC_CP2 = (0x12 << 26),
+ OPC_CP3 = (0x13 << 26),
+ OPC_SPECIAL2 = (0x1C << 26),
+ OPC_SPECIAL3 = (0x1F << 26),
+ /* arithmetic with immediate */
+ OPC_ADDI = (0x08 << 26),
+ OPC_ADDIU = (0x09 << 26),
+ OPC_SLTI = (0x0A << 26),
+ OPC_SLTIU = (0x0B << 26),
+ /* logic with immediate */
+ OPC_ANDI = (0x0C << 26),
+ OPC_ORI = (0x0D << 26),
+ OPC_XORI = (0x0E << 26),
+ OPC_LUI = (0x0F << 26),
+ /* arithmetic with immediate */
+ OPC_DADDI = (0x18 << 26),
+ OPC_DADDIU = (0x19 << 26),
+ /* Jump and branches */
+ OPC_J = (0x02 << 26),
+ OPC_JAL = (0x03 << 26),
+ OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */
+ OPC_BEQL = (0x14 << 26),
+ OPC_BNE = (0x05 << 26),
+ OPC_BNEL = (0x15 << 26),
+ OPC_BLEZ = (0x06 << 26),
+ OPC_BLEZL = (0x16 << 26),
+ OPC_BGTZ = (0x07 << 26),
+ OPC_BGTZL = (0x17 << 26),
+ OPC_JALX = (0x1D << 26),
+ OPC_DAUI = (0x1D << 26),
+ /* Load and stores */
+ OPC_LDL = (0x1A << 26),
+ OPC_LDR = (0x1B << 26),
+ OPC_LB = (0x20 << 26),
+ OPC_LH = (0x21 << 26),
+ OPC_LWL = (0x22 << 26),
+ OPC_LW = (0x23 << 26),
+ OPC_LWPC = OPC_LW | 0x5,
+ OPC_LBU = (0x24 << 26),
+ OPC_LHU = (0x25 << 26),
+ OPC_LWR = (0x26 << 26),
+ OPC_LWU = (0x27 << 26),
+ OPC_SB = (0x28 << 26),
+ OPC_SH = (0x29 << 26),
+ OPC_SWL = (0x2A << 26),
+ OPC_SW = (0x2B << 26),
+ OPC_SDL = (0x2C << 26),
+ OPC_SDR = (0x2D << 26),
+ OPC_SWR = (0x2E << 26),
+ OPC_LL = (0x30 << 26),
+ OPC_LLD = (0x34 << 26),
+ OPC_LD = (0x37 << 26),
+ OPC_LDPC = OPC_LD | 0x5,
+ OPC_SC = (0x38 << 26),
+ OPC_SCD = (0x3C << 26),
+ OPC_SD = (0x3F << 26),
+ /* Floating point load/store */
+ OPC_LWC1 = (0x31 << 26),
+ OPC_LWC2 = (0x32 << 26),
+ OPC_LDC1 = (0x35 << 26),
+ OPC_LDC2 = (0x36 << 26),
+ OPC_SWC1 = (0x39 << 26),
+ OPC_SWC2 = (0x3A << 26),
+ OPC_SDC1 = (0x3D << 26),
+ OPC_SDC2 = (0x3E << 26),
+ /* Compact Branches */
+ OPC_BLEZALC = (0x06 << 26),
+ OPC_BGEZALC = (0x06 << 26),
+ OPC_BGEUC = (0x06 << 26),
+ OPC_BGTZALC = (0x07 << 26),
+ OPC_BLTZALC = (0x07 << 26),
+ OPC_BLTUC = (0x07 << 26),
+ OPC_BOVC = (0x08 << 26),
+ OPC_BEQZALC = (0x08 << 26),
+ OPC_BEQC = (0x08 << 26),
+ OPC_BLEZC = (0x16 << 26),
+ OPC_BGEZC = (0x16 << 26),
+ OPC_BGEC = (0x16 << 26),
+ OPC_BGTZC = (0x17 << 26),
+ OPC_BLTZC = (0x17 << 26),
+ OPC_BLTC = (0x17 << 26),
+ OPC_BNVC = (0x18 << 26),
+ OPC_BNEZALC = (0x18 << 26),
+ OPC_BNEC = (0x18 << 26),
+ OPC_BC = (0x32 << 26),
+ OPC_BEQZC = (0x36 << 26),
+ OPC_JIC = (0x36 << 26),
+ OPC_BALC = (0x3A << 26),
+ OPC_BNEZC = (0x3E << 26),
+ OPC_JIALC = (0x3E << 26),
+ /* MDMX ASE specific */
+ OPC_MDMX = (0x1E << 26),
+ /* Cache and prefetch */
+ OPC_CACHE = (0x2F << 26),
+ OPC_PREF = (0x33 << 26),
+ /* PC-relative address computation / loads */
+ OPC_PCREL = (0x3B << 26),
+};
+
+/* PC-relative address computation / loads */
+#define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19)))
+#define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16)))
+enum {
+ /* Instructions determined by bits 19 and 20 */
+ OPC_ADDIUPC = OPC_PCREL | (0 << 19),
+ R6_OPC_LWPC = OPC_PCREL | (1 << 19),
+ OPC_LWUPC = OPC_PCREL | (2 << 19),
+
+ /* Instructions determined by bits 16 ... 20 */
+ OPC_AUIPC = OPC_PCREL | (0x1e << 16),
+ OPC_ALUIPC = OPC_PCREL | (0x1f << 16),
+
+ /* Other */
+ R6_OPC_LDPC = OPC_PCREL | (6 << 18),
+};
+
+/* MIPS special opcodes */
+#define MASK_SPECIAL(op) (MASK_OP_MAJOR(op) | (op & 0x3F))
+
+enum {
+ /* Shifts */
+ OPC_SLL = 0x00 | OPC_SPECIAL,
+ /* NOP is SLL r0, r0, 0 */
+ /* SSNOP is SLL r0, r0, 1 */
+ /* EHB is SLL r0, r0, 3 */
+ OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */
+ OPC_ROTR = OPC_SRL | (1 << 21),
+ OPC_SRA = 0x03 | OPC_SPECIAL,
+ OPC_SLLV = 0x04 | OPC_SPECIAL,
+ OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */
+ OPC_ROTRV = OPC_SRLV | (1 << 6),
+ OPC_SRAV = 0x07 | OPC_SPECIAL,
+ OPC_DSLLV = 0x14 | OPC_SPECIAL,
+ OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */
+ OPC_DROTRV = OPC_DSRLV | (1 << 6),
+ OPC_DSRAV = 0x17 | OPC_SPECIAL,
+ OPC_DSLL = 0x38 | OPC_SPECIAL,
+ OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */
+ OPC_DROTR = OPC_DSRL | (1 << 21),
+ OPC_DSRA = 0x3B | OPC_SPECIAL,
+ OPC_DSLL32 = 0x3C | OPC_SPECIAL,
+ OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */
+ OPC_DROTR32 = OPC_DSRL32 | (1 << 21),
+ OPC_DSRA32 = 0x3F | OPC_SPECIAL,
+ /* Multiplication / division */
+ OPC_MULT = 0x18 | OPC_SPECIAL,
+ OPC_MULTU = 0x19 | OPC_SPECIAL,
+ OPC_DIV = 0x1A | OPC_SPECIAL,
+ OPC_DIVU = 0x1B | OPC_SPECIAL,
+ OPC_DMULT = 0x1C | OPC_SPECIAL,
+ OPC_DMULTU = 0x1D | OPC_SPECIAL,
+ OPC_DDIV = 0x1E | OPC_SPECIAL,
+ OPC_DDIVU = 0x1F | OPC_SPECIAL,
+
+ /* 2 registers arithmetic / logic */
+ OPC_ADD = 0x20 | OPC_SPECIAL,
+ OPC_ADDU = 0x21 | OPC_SPECIAL,
+ OPC_SUB = 0x22 | OPC_SPECIAL,
+ OPC_SUBU = 0x23 | OPC_SPECIAL,
+ OPC_AND = 0x24 | OPC_SPECIAL,
+ OPC_OR = 0x25 | OPC_SPECIAL,
+ OPC_XOR = 0x26 | OPC_SPECIAL,
+ OPC_NOR = 0x27 | OPC_SPECIAL,
+ OPC_SLT = 0x2A | OPC_SPECIAL,
+ OPC_SLTU = 0x2B | OPC_SPECIAL,
+ OPC_DADD = 0x2C | OPC_SPECIAL,
+ OPC_DADDU = 0x2D | OPC_SPECIAL,
+ OPC_DSUB = 0x2E | OPC_SPECIAL,
+ OPC_DSUBU = 0x2F | OPC_SPECIAL,
+ /* Jumps */
+ OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
+ OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
+ /* Traps */
+ OPC_TGE = 0x30 | OPC_SPECIAL,
+ OPC_TGEU = 0x31 | OPC_SPECIAL,
+ OPC_TLT = 0x32 | OPC_SPECIAL,
+ OPC_TLTU = 0x33 | OPC_SPECIAL,
+ OPC_TEQ = 0x34 | OPC_SPECIAL,
+ OPC_TNE = 0x36 | OPC_SPECIAL,
+ /* HI / LO registers load & stores */
+ OPC_MFHI = 0x10 | OPC_SPECIAL,
+ OPC_MTHI = 0x11 | OPC_SPECIAL,
+ OPC_MFLO = 0x12 | OPC_SPECIAL,
+ OPC_MTLO = 0x13 | OPC_SPECIAL,
+ /* Conditional moves */
+ OPC_MOVZ = 0x0A | OPC_SPECIAL,
+ OPC_MOVN = 0x0B | OPC_SPECIAL,
+
+ OPC_SELEQZ = 0x35 | OPC_SPECIAL,
+ OPC_SELNEZ = 0x37 | OPC_SPECIAL,
+
+ OPC_MOVCI = 0x01 | OPC_SPECIAL,
+
+ /* Special */
+ OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */
+ OPC_SYSCALL = 0x0C | OPC_SPECIAL,
+ OPC_BREAK = 0x0D | OPC_SPECIAL,
+ OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */
+ OPC_SYNC = 0x0F | OPC_SPECIAL,
+
+ OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL,
+ OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL,
+ OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL,
+ OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL,
+};
+
+/*
+ * R6 Multiply and Divide instructions have the same opcode
+ * and function field as legacy OPC_MULT[U]/OPC_DIV[U]
+ */
+#define MASK_R6_MULDIV(op) (MASK_SPECIAL(op) | (op & (0x7ff)))
+
+enum {
+ R6_OPC_MUL = OPC_MULT | (2 << 6),
+ R6_OPC_MUH = OPC_MULT | (3 << 6),
+ R6_OPC_MULU = OPC_MULTU | (2 << 6),
+ R6_OPC_MUHU = OPC_MULTU | (3 << 6),
+ R6_OPC_DIV = OPC_DIV | (2 << 6),
+ R6_OPC_MOD = OPC_DIV | (3 << 6),
+ R6_OPC_DIVU = OPC_DIVU | (2 << 6),
+ R6_OPC_MODU = OPC_DIVU | (3 << 6),
+
+ R6_OPC_DMUL = OPC_DMULT | (2 << 6),
+ R6_OPC_DMUH = OPC_DMULT | (3 << 6),
+ R6_OPC_DMULU = OPC_DMULTU | (2 << 6),
+ R6_OPC_DMUHU = OPC_DMULTU | (3 << 6),
+ R6_OPC_DDIV = OPC_DDIV | (2 << 6),
+ R6_OPC_DMOD = OPC_DDIV | (3 << 6),
+ R6_OPC_DDIVU = OPC_DDIVU | (2 << 6),
+ R6_OPC_DMODU = OPC_DDIVU | (3 << 6),
+
+ R6_OPC_CLZ = 0x10 | OPC_SPECIAL,
+ R6_OPC_CLO = 0x11 | OPC_SPECIAL,
+ R6_OPC_DCLZ = 0x12 | OPC_SPECIAL,
+ R6_OPC_DCLO = 0x13 | OPC_SPECIAL,
+ R6_OPC_SDBBP = 0x0e | OPC_SPECIAL,
+};
+
+/* REGIMM (rt field) opcodes */
+#define MASK_REGIMM(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 16)))
+
+enum {
+ OPC_BLTZ = (0x00 << 16) | OPC_REGIMM,
+ OPC_BLTZL = (0x02 << 16) | OPC_REGIMM,
+ OPC_BGEZ = (0x01 << 16) | OPC_REGIMM,
+ OPC_BGEZL = (0x03 << 16) | OPC_REGIMM,
+ OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM,
+ OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM,
+ OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM,
+ OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM,
+ OPC_TGEI = (0x08 << 16) | OPC_REGIMM,
+ OPC_TGEIU = (0x09 << 16) | OPC_REGIMM,
+ OPC_TLTI = (0x0A << 16) | OPC_REGIMM,
+ OPC_TLTIU = (0x0B << 16) | OPC_REGIMM,
+ OPC_TEQI = (0x0C << 16) | OPC_REGIMM,
+ OPC_TNEI = (0x0E << 16) | OPC_REGIMM,
+ OPC_SIGRIE = (0x17 << 16) | OPC_REGIMM,
+ OPC_SYNCI = (0x1F << 16) | OPC_REGIMM,
+
+ OPC_DAHI = (0x06 << 16) | OPC_REGIMM,
+ OPC_DATI = (0x1e << 16) | OPC_REGIMM,
+};
+
+/* Special2 opcodes */
+#define MASK_SPECIAL2(op) (MASK_OP_MAJOR(op) | (op & 0x3F))
+
+enum {
+ /* Multiply & xxx operations */
+ OPC_MADD = 0x00 | OPC_SPECIAL2,
+ OPC_MADDU = 0x01 | OPC_SPECIAL2,
+ OPC_MUL = 0x02 | OPC_SPECIAL2,
+ OPC_MSUB = 0x04 | OPC_SPECIAL2,
+ OPC_MSUBU = 0x05 | OPC_SPECIAL2,
+ /* Loongson 2F */
+ OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2,
+ OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2,
+ OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2,
+ OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2,
+ OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2,
+ OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2,
+ OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2,
+ OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2,
+ OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2,
+ OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2,
+ OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2,
+ OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2,
+ /* Misc */
+ OPC_CLZ = 0x20 | OPC_SPECIAL2,
+ OPC_CLO = 0x21 | OPC_SPECIAL2,
+ OPC_DCLZ = 0x24 | OPC_SPECIAL2,
+ OPC_DCLO = 0x25 | OPC_SPECIAL2,
+ /* Special */
+ OPC_SDBBP = 0x3F | OPC_SPECIAL2,
+};
+
+/* Special3 opcodes */
+#define MASK_SPECIAL3(op) (MASK_OP_MAJOR(op) | (op & 0x3F))
+
+enum {
+ OPC_EXT = 0x00 | OPC_SPECIAL3,
+ OPC_DEXTM = 0x01 | OPC_SPECIAL3,
+ OPC_DEXTU = 0x02 | OPC_SPECIAL3,
+ OPC_DEXT = 0x03 | OPC_SPECIAL3,
+ OPC_INS = 0x04 | OPC_SPECIAL3,
+ OPC_DINSM = 0x05 | OPC_SPECIAL3,
+ OPC_DINSU = 0x06 | OPC_SPECIAL3,
+ OPC_DINS = 0x07 | OPC_SPECIAL3,
+ OPC_FORK = 0x08 | OPC_SPECIAL3,
+ OPC_YIELD = 0x09 | OPC_SPECIAL3,
+ OPC_BSHFL = 0x20 | OPC_SPECIAL3,
+ OPC_DBSHFL = 0x24 | OPC_SPECIAL3,
+ OPC_RDHWR = 0x3B | OPC_SPECIAL3,
+ OPC_GINV = 0x3D | OPC_SPECIAL3,
+
+ /* Loongson 2E */
+ OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3,
+ OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3,
+ OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3,
+ OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3,
+ OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3,
+ OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3,
+ OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3,
+ OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3,
+ OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3,
+ OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3,
+ OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3,
+ OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3,
+
+ /* MIPS DSP Load */
+ OPC_LX_DSP = 0x0A | OPC_SPECIAL3,
+ /* MIPS DSP Arithmetic */
+ OPC_ADDU_QB_DSP = 0x10 | OPC_SPECIAL3,
+ OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3,
+ OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3,
+ OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3,
+ /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */
+ /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */
+ OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3,
+ OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3,
+ /* MIPS DSP GPR-Based Shift Sub-class */
+ OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3,
+ OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3,
+ /* MIPS DSP Multiply Sub-class insns */
+ /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */
+ /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */
+ OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3,
+ OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3,
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_INSV_DSP = 0x0C | OPC_SPECIAL3,
+ OPC_DINSV_DSP = 0x0D | OPC_SPECIAL3,
+ /* MIPS DSP Append Sub-class */
+ OPC_APPEND_DSP = 0x31 | OPC_SPECIAL3,
+ OPC_DAPPEND_DSP = 0x35 | OPC_SPECIAL3,
+ /* MIPS DSP Accumulator and DSPControl Access Sub-class */
+ OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3,
+ OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3,
+
+ /* EVA */
+ OPC_LWLE = 0x19 | OPC_SPECIAL3,
+ OPC_LWRE = 0x1A | OPC_SPECIAL3,
+ OPC_CACHEE = 0x1B | OPC_SPECIAL3,
+ OPC_SBE = 0x1C | OPC_SPECIAL3,
+ OPC_SHE = 0x1D | OPC_SPECIAL3,
+ OPC_SCE = 0x1E | OPC_SPECIAL3,
+ OPC_SWE = 0x1F | OPC_SPECIAL3,
+ OPC_SWLE = 0x21 | OPC_SPECIAL3,
+ OPC_SWRE = 0x22 | OPC_SPECIAL3,
+ OPC_PREFE = 0x23 | OPC_SPECIAL3,
+ OPC_LBUE = 0x28 | OPC_SPECIAL3,
+ OPC_LHUE = 0x29 | OPC_SPECIAL3,
+ OPC_LBE = 0x2C | OPC_SPECIAL3,
+ OPC_LHE = 0x2D | OPC_SPECIAL3,
+ OPC_LLE = 0x2E | OPC_SPECIAL3,
+ OPC_LWE = 0x2F | OPC_SPECIAL3,
+
+ /* R6 */
+ R6_OPC_PREF = 0x35 | OPC_SPECIAL3,
+ R6_OPC_CACHE = 0x25 | OPC_SPECIAL3,
+ R6_OPC_LL = 0x36 | OPC_SPECIAL3,
+ R6_OPC_SC = 0x26 | OPC_SPECIAL3,
+ R6_OPC_LLD = 0x37 | OPC_SPECIAL3,
+ R6_OPC_SCD = 0x27 | OPC_SPECIAL3,
+};
+
+/* Loongson EXT load/store quad word opcodes */
+#define MASK_LOONGSON_GSLSQ(op) (MASK_OP_MAJOR(op) | (op & 0x8020))
+enum {
+ OPC_GSLQ = 0x0020 | OPC_LWC2,
+ OPC_GSLQC1 = 0x8020 | OPC_LWC2,
+ OPC_GSSHFL = OPC_LWC2,
+ OPC_GSSQ = 0x0020 | OPC_SWC2,
+ OPC_GSSQC1 = 0x8020 | OPC_SWC2,
+ OPC_GSSHFS = OPC_SWC2,
+};
+
+/* Loongson EXT shifted load/store opcodes */
+#define MASK_LOONGSON_GSSHFLS(op) (MASK_OP_MAJOR(op) | (op & 0xc03f))
+enum {
+ OPC_GSLWLC1 = 0x4 | OPC_GSSHFL,
+ OPC_GSLWRC1 = 0x5 | OPC_GSSHFL,
+ OPC_GSLDLC1 = 0x6 | OPC_GSSHFL,
+ OPC_GSLDRC1 = 0x7 | OPC_GSSHFL,
+ OPC_GSSWLC1 = 0x4 | OPC_GSSHFS,
+ OPC_GSSWRC1 = 0x5 | OPC_GSSHFS,
+ OPC_GSSDLC1 = 0x6 | OPC_GSSHFS,
+ OPC_GSSDRC1 = 0x7 | OPC_GSSHFS,
+};
+
+/* Loongson EXT LDC2/SDC2 opcodes */
+#define MASK_LOONGSON_LSDC2(op) (MASK_OP_MAJOR(op) | (op & 0x7))
+
+enum {
+ OPC_GSLBX = 0x0 | OPC_LDC2,
+ OPC_GSLHX = 0x1 | OPC_LDC2,
+ OPC_GSLWX = 0x2 | OPC_LDC2,
+ OPC_GSLDX = 0x3 | OPC_LDC2,
+ OPC_GSLWXC1 = 0x6 | OPC_LDC2,
+ OPC_GSLDXC1 = 0x7 | OPC_LDC2,
+ OPC_GSSBX = 0x0 | OPC_SDC2,
+ OPC_GSSHX = 0x1 | OPC_SDC2,
+ OPC_GSSWX = 0x2 | OPC_SDC2,
+ OPC_GSSDX = 0x3 | OPC_SDC2,
+ OPC_GSSWXC1 = 0x6 | OPC_SDC2,
+ OPC_GSSDXC1 = 0x7 | OPC_SDC2,
+};
+
+/* BSHFL opcodes */
+#define MASK_BSHFL(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+
+enum {
+ OPC_WSBH = (0x02 << 6) | OPC_BSHFL,
+ OPC_SEB = (0x10 << 6) | OPC_BSHFL,
+ OPC_SEH = (0x18 << 6) | OPC_BSHFL,
+ OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp (010.00 to 010.11) */
+ OPC_ALIGN_1 = (0x09 << 6) | OPC_BSHFL,
+ OPC_ALIGN_2 = (0x0A << 6) | OPC_BSHFL,
+ OPC_ALIGN_3 = (0x0B << 6) | OPC_BSHFL,
+ OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */
+};
+
+/* DBSHFL opcodes */
+#define MASK_DBSHFL(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+
+enum {
+ OPC_DSBH = (0x02 << 6) | OPC_DBSHFL,
+ OPC_DSHD = (0x05 << 6) | OPC_DBSHFL,
+ OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp (01.000 to 01.111) */
+ OPC_DALIGN_1 = (0x09 << 6) | OPC_DBSHFL,
+ OPC_DALIGN_2 = (0x0A << 6) | OPC_DBSHFL,
+ OPC_DALIGN_3 = (0x0B << 6) | OPC_DBSHFL,
+ OPC_DALIGN_4 = (0x0C << 6) | OPC_DBSHFL,
+ OPC_DALIGN_5 = (0x0D << 6) | OPC_DBSHFL,
+ OPC_DALIGN_6 = (0x0E << 6) | OPC_DBSHFL,
+ OPC_DALIGN_7 = (0x0F << 6) | OPC_DBSHFL,
+ OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */
+};
+
+/* MIPS DSP REGIMM opcodes */
+enum {
+ OPC_BPOSGE32 = (0x1C << 16) | OPC_REGIMM,
+ OPC_BPOSGE64 = (0x1D << 16) | OPC_REGIMM,
+};
+
+#define MASK_LX(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+/* MIPS DSP Load */
+enum {
+ OPC_LBUX = (0x06 << 6) | OPC_LX_DSP,
+ OPC_LHX = (0x04 << 6) | OPC_LX_DSP,
+ OPC_LWX = (0x00 << 6) | OPC_LX_DSP,
+ OPC_LDX = (0x08 << 6) | OPC_LX_DSP,
+};
+
+#define MASK_ADDU_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_ADDQ_PH = (0x0A << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDQ_S_PH = (0x0E << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDQ_S_W = (0x16 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_QB = (0x00 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_S_QB = (0x04 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_PH = (0x08 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_S_PH = (0x0C << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBQ_PH = (0x0B << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBQ_S_PH = (0x0F << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBQ_S_W = (0x17 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_QB = (0x01 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_S_QB = (0x05 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_PH = (0x09 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_S_PH = (0x0D << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDSC = (0x10 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDWC = (0x11 << 6) | OPC_ADDU_QB_DSP,
+ OPC_MODSUB = (0x12 << 6) | OPC_ADDU_QB_DSP,
+ OPC_RADDU_W_QB = (0x14 << 6) | OPC_ADDU_QB_DSP,
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_MULEU_S_PH_QBL = (0x06 << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULEU_S_PH_QBR = (0x07 << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULQ_RS_PH = (0x1F << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULEQ_S_W_PHL = (0x1C << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULEQ_S_W_PHR = (0x1D << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP,
+};
+
+#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E
+#define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_ADDUH_QB = (0x00 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDUH_R_QB = (0x02 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_PH = (0x08 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_R_PH = (0x0A << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_W = (0x10 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_R_W = (0x12 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBUH_QB = (0x01 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBUH_R_QB = (0x03 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_PH = (0x09 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_R_PH = (0x0B << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_W = (0x11 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_R_W = (0x13 << 6) | OPC_ADDUH_QB_DSP,
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_MUL_PH = (0x0C << 6) | OPC_ADDUH_QB_DSP,
+ OPC_MUL_S_PH = (0x0E << 6) | OPC_ADDUH_QB_DSP,
+ OPC_MULQ_S_W = (0x16 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_MULQ_RS_W = (0x17 << 6) | OPC_ADDUH_QB_DSP,
+};
+
+#define MASK_ABSQ_S_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_ABSQ_S_QB = (0x01 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_ABSQ_S_PH = (0x09 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_ABSQ_S_W = (0x11 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQ_W_PHL = (0x0C << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQ_W_PHR = (0x0D << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBL = (0x04 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBR = (0x05 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBLA = (0x06 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBRA = (0x07 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBL = (0x1C << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBR = (0x1D << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBLA = (0x1E << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBRA = (0x1F << 6) | OPC_ABSQ_S_PH_DSP,
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_BITREV = (0x1B << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPL_QB = (0x02 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPLV_QB = (0x03 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPL_PH = (0x0A << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPLV_PH = (0x0B << 6) | OPC_ABSQ_S_PH_DSP,
+};
+
+#define MASK_CMPU_EQ_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_PRECR_QB_PH = (0x0D << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQ_QB_PH = (0x0C << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECR_SRA_PH_W = (0x1E << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECR_SRA_R_PH_W = (0x1F << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQ_PH_W = (0x14 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQ_RS_PH_W = (0x15 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQU_S_QB_PH = (0x0F << 6) | OPC_CMPU_EQ_QB_DSP,
+ /* DSP Compare-Pick Sub-class */
+ OPC_CMPU_EQ_QB = (0x00 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPU_LT_QB = (0x01 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPU_LE_QB = (0x02 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGU_EQ_QB = (0x04 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGU_LT_QB = (0x05 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGU_LE_QB = (0x06 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGDU_EQ_QB = (0x18 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGDU_LT_QB = (0x19 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGDU_LE_QB = (0x1A << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMP_EQ_PH = (0x08 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMP_LT_PH = (0x09 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMP_LE_PH = (0x0A << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PICK_QB = (0x03 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PICK_PH = (0x0B << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PACKRL_PH = (0x0E << 6) | OPC_CMPU_EQ_QB_DSP,
+};
+
+#define MASK_SHLL_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP GPR-Based Shift Sub-class */
+ OPC_SHLL_QB = (0x00 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_QB = (0x02 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLL_PH = (0x08 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_PH = (0x0A << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLL_S_PH = (0x0C << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_S_PH = (0x0E << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLL_S_W = (0x14 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_S_W = (0x16 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRL_QB = (0x01 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRLV_QB = (0x03 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRL_PH = (0x19 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRLV_PH = (0x1B << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_QB = (0x04 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_R_QB = (0x05 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_QB = (0x06 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_R_QB = (0x07 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_PH = (0x09 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_PH = (0x0B << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_R_PH = (0x0D << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_R_PH = (0x0F << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_R_W = (0x15 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_R_W = (0x17 << 6) | OPC_SHLL_QB_DSP,
+};
+
+#define MASK_DPA_W_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_DPAU_H_QBL = (0x03 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAU_H_QBR = (0x07 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSU_H_QBL = (0x0B << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSU_H_QBR = (0x0F << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPA_W_PH = (0x00 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAX_W_PH = (0x08 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQ_S_W_PH = (0x04 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQX_S_W_PH = (0x18 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQX_SA_W_PH = (0x1A << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPS_W_PH = (0x01 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSX_W_PH = (0x09 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQ_S_W_PH = (0x05 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQX_S_W_PH = (0x19 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQX_SA_W_PH = (0x1B << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MULSAQ_S_W_PH = (0x06 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQ_SA_L_W = (0x0C << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQ_SA_L_W = (0x0D << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_S_W_PHL = (0x14 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_S_W_PHR = (0x16 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_SA_W_PHL = (0x10 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_SA_W_PHR = (0x12 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MULSA_W_PH = (0x02 << 6) | OPC_DPA_W_PH_DSP,
+};
+
+#define MASK_INSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_INSV = (0x00 << 6) | OPC_INSV_DSP,
+};
+
+#define MASK_APPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Append Sub-class */
+ OPC_APPEND = (0x00 << 6) | OPC_APPEND_DSP,
+ OPC_PREPEND = (0x01 << 6) | OPC_APPEND_DSP,
+ OPC_BALIGN = (0x10 << 6) | OPC_APPEND_DSP,
+};
+
+#define MASK_EXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Accumulator and DSPControl Access Sub-class */
+ OPC_EXTR_W = (0x00 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTR_R_W = (0x04 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTR_RS_W = (0x06 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTR_S_H = (0x0E << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_S_H = (0x0F << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_W = (0x01 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_R_W = (0x05 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_RS_W = (0x07 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTP = (0x02 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTPV = (0x03 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTPDP = (0x0A << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTPDPV = (0x0B << 6) | OPC_EXTR_W_DSP,
+ OPC_SHILO = (0x1A << 6) | OPC_EXTR_W_DSP,
+ OPC_SHILOV = (0x1B << 6) | OPC_EXTR_W_DSP,
+ OPC_MTHLIP = (0x1F << 6) | OPC_EXTR_W_DSP,
+ OPC_WRDSP = (0x13 << 6) | OPC_EXTR_W_DSP,
+ OPC_RDDSP = (0x12 << 6) | OPC_EXTR_W_DSP,
+};
+
+#define MASK_ABSQ_S_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_PRECEQ_L_PWL = (0x14 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_L_PWR = (0x15 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHL = (0x0C << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHR = (0x0D << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHLA = (0x0E << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHRA = (0x0F << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBL = (0x04 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBR = (0x05 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBLA = (0x06 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBRA = (0x07 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBL = (0x1C << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBR = (0x1D << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBLA = (0x1E << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBRA = (0x1F << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_ABSQ_S_OB = (0x01 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_ABSQ_S_PW = (0x11 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_ABSQ_S_QH = (0x09 << 6) | OPC_ABSQ_S_QH_DSP,
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_REPL_OB = (0x02 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPL_PW = (0x12 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPL_QH = (0x0A << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPLV_OB = (0x03 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPLV_PW = (0x13 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPLV_QH = (0x0B << 6) | OPC_ABSQ_S_QH_DSP,
+};
+
+#define MASK_ADDU_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_MULEQ_S_PW_QHL = (0x1C << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULEQ_S_PW_QHR = (0x1D << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULEU_S_QH_OBL = (0x06 << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULEU_S_QH_OBR = (0x07 << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULQ_RS_QH = (0x1F << 6) | OPC_ADDU_OB_DSP,
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_RADDU_L_OB = (0x14 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_PW = (0x13 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_S_PW = (0x17 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_QH = (0x0B << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_S_QH = (0x0F << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_OB = (0x01 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_S_OB = (0x05 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_QH = (0x09 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_S_QH = (0x0D << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBUH_OB = (0x19 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBUH_R_OB = (0x1B << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_PW = (0x12 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_S_PW = (0x16 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_QH = (0x0A << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_S_QH = (0x0E << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_OB = (0x00 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_S_OB = (0x04 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_QH = (0x08 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_S_QH = (0x0C << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDUH_OB = (0x18 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDUH_R_OB = (0x1A << 6) | OPC_ADDU_OB_DSP,
+};
+
+#define MASK_CMPU_EQ_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Compare-Pick Sub-class */
+ OPC_CMP_EQ_PW = (0x10 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LT_PW = (0x11 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LE_PW = (0x12 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_EQ_QH = (0x08 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LT_QH = (0x09 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LE_QH = (0x0A << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGDU_EQ_OB = (0x18 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGDU_LT_OB = (0x19 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGDU_LE_OB = (0x1A << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGU_EQ_OB = (0x04 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGU_LT_OB = (0x05 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGU_LE_OB = (0x06 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPU_EQ_OB = (0x00 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPU_LT_OB = (0x01 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPU_LE_OB = (0x02 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PACKRL_PW = (0x0E << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PICK_OB = (0x03 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PICK_PW = (0x13 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PICK_QH = (0x0B << 6) | OPC_CMPU_EQ_OB_DSP,
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_PRECR_OB_QH = (0x0D << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECR_SRA_QH_PW = (0x1E << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECR_SRA_R_QH_PW = (0x1F << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_OB_QH = (0x0C << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_PW_L = (0x1C << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_QH_PW = (0x14 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_RS_QH_PW = (0x15 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQU_S_OB_QH = (0x0F << 6) | OPC_CMPU_EQ_OB_DSP,
+};
+
+#define MASK_DAPPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Append Sub-class */
+ OPC_DAPPEND = (0x00 << 6) | OPC_DAPPEND_DSP,
+ OPC_PREPENDD = (0x03 << 6) | OPC_DAPPEND_DSP,
+ OPC_PREPENDW = (0x01 << 6) | OPC_DAPPEND_DSP,
+ OPC_DBALIGN = (0x10 << 6) | OPC_DAPPEND_DSP,
+};
+
+#define MASK_DEXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Accumulator and DSPControl Access Sub-class */
+ OPC_DMTHLIP = (0x1F << 6) | OPC_DEXTR_W_DSP,
+ OPC_DSHILO = (0x1A << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTP = (0x02 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTPDP = (0x0A << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTPDPV = (0x0B << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTPV = (0x03 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_L = (0x10 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_R_L = (0x14 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_RS_L = (0x16 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_W = (0x00 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_R_W = (0x04 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_RS_W = (0x06 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_S_H = (0x0E << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_L = (0x11 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_R_L = (0x15 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_RS_L = (0x17 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_S_H = (0x0F << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_W = (0x01 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_R_W = (0x05 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_RS_W = (0x07 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DSHILOV = (0x1B << 6) | OPC_DEXTR_W_DSP,
+};
+
+#define MASK_DINSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_DINSV = (0x00 << 6) | OPC_DINSV_DSP,
+};
+
+#define MASK_DPAQ_W_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_DMADD = (0x19 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DMADDU = (0x1D << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DMSUB = (0x1B << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DMSUBU = (0x1F << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPA_W_QH = (0x00 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAQ_S_W_QH = (0x04 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAQ_SA_L_PW = (0x0C << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAU_H_OBL = (0x03 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAU_H_OBR = (0x07 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPS_W_QH = (0x01 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSQ_S_W_QH = (0x05 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSQ_SA_L_PW = (0x0D << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSU_H_OBL = (0x0B << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSU_H_OBR = (0x0F << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_L_PWL = (0x1C << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_L_PWR = (0x1E << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHLL = (0x14 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHLL = (0x10 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHLR = (0x15 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHLR = (0x11 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHRL = (0x16 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHRL = (0x12 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHRR = (0x17 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHRR = (0x13 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MULSAQ_S_L_PW = (0x0E << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MULSAQ_S_W_QH = (0x06 << 6) | OPC_DPAQ_W_QH_DSP,
+};
+
+#define MASK_SHLL_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP GPR-Based Shift Sub-class */
+ OPC_SHLL_PW = (0x10 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_S_PW = (0x14 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_OB = (0x02 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_PW = (0x12 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_S_PW = (0x16 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_QH = (0x0A << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_S_QH = (0x0E << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_PW = (0x11 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_R_PW = (0x15 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_OB = (0x06 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_R_OB = (0x07 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_PW = (0x13 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_R_PW = (0x17 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_QH = (0x0B << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_R_QH = (0x0F << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRLV_OB = (0x03 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRLV_QH = (0x1B << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_OB = (0x00 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_QH = (0x08 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_S_QH = (0x0C << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_OB = (0x04 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_R_OB = (0x05 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_QH = (0x09 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_R_QH = (0x0D << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRL_OB = (0x01 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRL_QH = (0x19 << 6) | OPC_SHLL_OB_DSP,
+};
+
+/* Coprocessor 0 (rs field) */
+#define MASK_CP0(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)))
+
+enum {
+ OPC_MFC0 = (0x00 << 21) | OPC_CP0,
+ OPC_DMFC0 = (0x01 << 21) | OPC_CP0,
+ OPC_MFHC0 = (0x02 << 21) | OPC_CP0,
+ OPC_MTC0 = (0x04 << 21) | OPC_CP0,
+ OPC_DMTC0 = (0x05 << 21) | OPC_CP0,
+ OPC_MTHC0 = (0x06 << 21) | OPC_CP0,
+ OPC_MFTR = (0x08 << 21) | OPC_CP0,
+ OPC_RDPGPR = (0x0A << 21) | OPC_CP0,
+ OPC_MFMC0 = (0x0B << 21) | OPC_CP0,
+ OPC_MTTR = (0x0C << 21) | OPC_CP0,
+ OPC_WRPGPR = (0x0E << 21) | OPC_CP0,
+ OPC_C0 = (0x10 << 21) | OPC_CP0,
+ OPC_C0_1 = (0x11 << 21) | OPC_CP0,
+ OPC_C0_2 = (0x12 << 21) | OPC_CP0,
+ OPC_C0_3 = (0x13 << 21) | OPC_CP0,
+ OPC_C0_4 = (0x14 << 21) | OPC_CP0,
+ OPC_C0_5 = (0x15 << 21) | OPC_CP0,
+ OPC_C0_6 = (0x16 << 21) | OPC_CP0,
+ OPC_C0_7 = (0x17 << 21) | OPC_CP0,
+ OPC_C0_8 = (0x18 << 21) | OPC_CP0,
+ OPC_C0_9 = (0x19 << 21) | OPC_CP0,
+ OPC_C0_A = (0x1A << 21) | OPC_CP0,
+ OPC_C0_B = (0x1B << 21) | OPC_CP0,
+ OPC_C0_C = (0x1C << 21) | OPC_CP0,
+ OPC_C0_D = (0x1D << 21) | OPC_CP0,
+ OPC_C0_E = (0x1E << 21) | OPC_CP0,
+ OPC_C0_F = (0x1F << 21) | OPC_CP0,
+};
+
+/* MFMC0 opcodes */
+#define MASK_MFMC0(op) (MASK_CP0(op) | (op & 0xFFFF))
+
+enum {
+ OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0,
+ OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0,
+ OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0,
+ OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0,
+ OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0,
+ OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0,
+ OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0,
+ OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0,
+};
+
+/* Coprocessor 0 (with rs == C0) */
+#define MASK_C0(op) (MASK_CP0(op) | (op & 0x3F))
+
+enum {
+ OPC_TLBR = 0x01 | OPC_C0,
+ OPC_TLBWI = 0x02 | OPC_C0,
+ OPC_TLBINV = 0x03 | OPC_C0,
+ OPC_TLBINVF = 0x04 | OPC_C0,
+ OPC_TLBWR = 0x06 | OPC_C0,
+ OPC_TLBP = 0x08 | OPC_C0,
+ OPC_RFE = 0x10 | OPC_C0,
+ OPC_ERET = 0x18 | OPC_C0,
+ OPC_DERET = 0x1F | OPC_C0,
+ OPC_WAIT = 0x20 | OPC_C0,
+};
+
+#define MASK_CP2(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)))
+
+enum {
+ OPC_MFC2 = (0x00 << 21) | OPC_CP2,
+ OPC_DMFC2 = (0x01 << 21) | OPC_CP2,
+ OPC_CFC2 = (0x02 << 21) | OPC_CP2,
+ OPC_MFHC2 = (0x03 << 21) | OPC_CP2,
+ OPC_MTC2 = (0x04 << 21) | OPC_CP2,
+ OPC_DMTC2 = (0x05 << 21) | OPC_CP2,
+ OPC_CTC2 = (0x06 << 21) | OPC_CP2,
+ OPC_MTHC2 = (0x07 << 21) | OPC_CP2,
+ OPC_BC2 = (0x08 << 21) | OPC_CP2,
+ OPC_BC2EQZ = (0x09 << 21) | OPC_CP2,
+ OPC_BC2NEZ = (0x0D << 21) | OPC_CP2,
+};
+
+#define MASK_LMMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F))
+
+enum {
+ OPC_PADDSH = (24 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDUSH = (25 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDH = (26 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDW = (27 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDSB = (28 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDUSB = (29 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDB = (30 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDD = (31 << 21) | (0x00) | OPC_CP2,
+
+ OPC_PSUBSH = (24 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBUSH = (25 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBH = (26 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBW = (27 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBSB = (28 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBUSB = (29 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBB = (30 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBD = (31 << 21) | (0x01) | OPC_CP2,
+
+ OPC_PSHUFH = (24 << 21) | (0x02) | OPC_CP2,
+ OPC_PACKSSWH = (25 << 21) | (0x02) | OPC_CP2,
+ OPC_PACKSSHB = (26 << 21) | (0x02) | OPC_CP2,
+ OPC_PACKUSHB = (27 << 21) | (0x02) | OPC_CP2,
+ OPC_XOR_CP2 = (28 << 21) | (0x02) | OPC_CP2,
+ OPC_NOR_CP2 = (29 << 21) | (0x02) | OPC_CP2,
+ OPC_AND_CP2 = (30 << 21) | (0x02) | OPC_CP2,
+ OPC_PANDN = (31 << 21) | (0x02) | OPC_CP2,
+
+ OPC_PUNPCKLHW = (24 << 21) | (0x03) | OPC_CP2,
+ OPC_PUNPCKHHW = (25 << 21) | (0x03) | OPC_CP2,
+ OPC_PUNPCKLBH = (26 << 21) | (0x03) | OPC_CP2,
+ OPC_PUNPCKHBH = (27 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_0 = (28 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_1 = (29 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_2 = (30 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_3 = (31 << 21) | (0x03) | OPC_CP2,
+
+ OPC_PAVGH = (24 << 21) | (0x08) | OPC_CP2,
+ OPC_PAVGB = (25 << 21) | (0x08) | OPC_CP2,
+ OPC_PMAXSH = (26 << 21) | (0x08) | OPC_CP2,
+ OPC_PMINSH = (27 << 21) | (0x08) | OPC_CP2,
+ OPC_PMAXUB = (28 << 21) | (0x08) | OPC_CP2,
+ OPC_PMINUB = (29 << 21) | (0x08) | OPC_CP2,
+
+ OPC_PCMPEQW = (24 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPGTW = (25 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPEQH = (26 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPGTH = (27 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPEQB = (28 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPGTB = (29 << 21) | (0x09) | OPC_CP2,
+
+ OPC_PSLLW = (24 << 21) | (0x0A) | OPC_CP2,
+ OPC_PSLLH = (25 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULLH = (26 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULHH = (27 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULUW = (28 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULHUH = (29 << 21) | (0x0A) | OPC_CP2,
+
+ OPC_PSRLW = (24 << 21) | (0x0B) | OPC_CP2,
+ OPC_PSRLH = (25 << 21) | (0x0B) | OPC_CP2,
+ OPC_PSRAW = (26 << 21) | (0x0B) | OPC_CP2,
+ OPC_PSRAH = (27 << 21) | (0x0B) | OPC_CP2,
+ OPC_PUNPCKLWD = (28 << 21) | (0x0B) | OPC_CP2,
+ OPC_PUNPCKHWD = (29 << 21) | (0x0B) | OPC_CP2,
+
+ OPC_ADDU_CP2 = (24 << 21) | (0x0C) | OPC_CP2,
+ OPC_OR_CP2 = (25 << 21) | (0x0C) | OPC_CP2,
+ OPC_ADD_CP2 = (26 << 21) | (0x0C) | OPC_CP2,
+ OPC_DADD_CP2 = (27 << 21) | (0x0C) | OPC_CP2,
+ OPC_SEQU_CP2 = (28 << 21) | (0x0C) | OPC_CP2,
+ OPC_SEQ_CP2 = (29 << 21) | (0x0C) | OPC_CP2,
+
+ OPC_SUBU_CP2 = (24 << 21) | (0x0D) | OPC_CP2,
+ OPC_PASUBUB = (25 << 21) | (0x0D) | OPC_CP2,
+ OPC_SUB_CP2 = (26 << 21) | (0x0D) | OPC_CP2,
+ OPC_DSUB_CP2 = (27 << 21) | (0x0D) | OPC_CP2,
+ OPC_SLTU_CP2 = (28 << 21) | (0x0D) | OPC_CP2,
+ OPC_SLT_CP2 = (29 << 21) | (0x0D) | OPC_CP2,
+
+ OPC_SLL_CP2 = (24 << 21) | (0x0E) | OPC_CP2,
+ OPC_DSLL_CP2 = (25 << 21) | (0x0E) | OPC_CP2,
+ OPC_PEXTRH = (26 << 21) | (0x0E) | OPC_CP2,
+ OPC_PMADDHW = (27 << 21) | (0x0E) | OPC_CP2,
+ OPC_SLEU_CP2 = (28 << 21) | (0x0E) | OPC_CP2,
+ OPC_SLE_CP2 = (29 << 21) | (0x0E) | OPC_CP2,
+
+ OPC_SRL_CP2 = (24 << 21) | (0x0F) | OPC_CP2,
+ OPC_DSRL_CP2 = (25 << 21) | (0x0F) | OPC_CP2,
+ OPC_SRA_CP2 = (26 << 21) | (0x0F) | OPC_CP2,
+ OPC_DSRA_CP2 = (27 << 21) | (0x0F) | OPC_CP2,
+ OPC_BIADD = (28 << 21) | (0x0F) | OPC_CP2,
+ OPC_PMOVMSKB = (29 << 21) | (0x0F) | OPC_CP2,
+};
+
+
+#define MASK_CP3(op) (MASK_OP_MAJOR(op) | (op & 0x3F))
+
+enum {
+ OPC_LWXC1 = 0x00 | OPC_CP3,
+ OPC_LDXC1 = 0x01 | OPC_CP3,
+ OPC_LUXC1 = 0x05 | OPC_CP3,
+ OPC_SWXC1 = 0x08 | OPC_CP3,
+ OPC_SDXC1 = 0x09 | OPC_CP3,
+ OPC_SUXC1 = 0x0D | OPC_CP3,
+ OPC_PREFX = 0x0F | OPC_CP3,
+ OPC_ALNV_PS = 0x1E | OPC_CP3,
+ OPC_MADD_S = 0x20 | OPC_CP3,
+ OPC_MADD_D = 0x21 | OPC_CP3,
+ OPC_MADD_PS = 0x26 | OPC_CP3,
+ OPC_MSUB_S = 0x28 | OPC_CP3,
+ OPC_MSUB_D = 0x29 | OPC_CP3,
+ OPC_MSUB_PS = 0x2E | OPC_CP3,
+ OPC_NMADD_S = 0x30 | OPC_CP3,
+ OPC_NMADD_D = 0x31 | OPC_CP3,
+ OPC_NMADD_PS = 0x36 | OPC_CP3,
+ OPC_NMSUB_S = 0x38 | OPC_CP3,
+ OPC_NMSUB_D = 0x39 | OPC_CP3,
+ OPC_NMSUB_PS = 0x3E | OPC_CP3,
+};
+
+/*
+ * MMI (MultiMedia Instruction) encodings
+ * ======================================
+ *
+ * MMI instructions encoding table keys:
+ *
+ * * This code is reserved for future use. An attempt to execute it
+ * causes a Reserved Instruction exception.
+ * % This code indicates an instruction class. The instruction word
+ * must be further decoded by examining additional tables that show
+ * the values for other instruction fields.
+ * # This code is reserved for the unsupported instructions DMULT,
+ * DMULTU, DDIV, DDIVU, LL, LLD, SC, SCD, LWC2 and SWC2. An attempt
+ * to execute it causes a Reserved Instruction exception.
+ *
+ * MMI instructions encoded by opcode field (MMI, LQ, SQ):
+ *
+ * 31 26 0
+ * +--------+----------------------------------------+
+ * | opcode | |
+ * +--------+----------------------------------------+
+ *
+ * opcode bits 28..26
+ * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * 31..29 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111
+ * -------+-------+-------+-------+-------+-------+-------+-------+-------
+ * 0 000 |SPECIAL| REGIMM| J | JAL | BEQ | BNE | BLEZ | BGTZ
+ * 1 001 | ADDI | ADDIU | SLTI | SLTIU | ANDI | ORI | XORI | LUI
+ * 2 010 | COP0 | COP1 | * | * | BEQL | BNEL | BLEZL | BGTZL
+ * 3 011 | DADDI | DADDIU| LDL | LDR | MMI% | * | LQ | SQ
+ * 4 100 | LB | LH | LWL | LW | LBU | LHU | LWR | LWU
+ * 5 101 | SB | SH | SWL | SW | SDL | SDR | SWR | CACHE
+ * 6 110 | # | LWC1 | # | PREF | # | LDC1 | # | LD
+ * 7 111 | # | SWC1 | # | * | # | SDC1 | # | SD
+ */
+
+enum {
+ MMI_OPC_CLASS_MMI = 0x1C << 26, /* Same as OPC_SPECIAL2 */
+ MMI_OPC_SQ = 0x1F << 26, /* Same as OPC_SPECIAL3 */
+};
+
+/*
+ * MMI instructions with opcode field = MMI:
+ *
+ * 31 26 5 0
+ * +--------+-------------------------------+--------+
+ * | MMI | |function|
+ * +--------+-------------------------------+--------+
+ *
+ * function bits 2..0
+ * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * 5..3 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111
+ * -------+-------+-------+-------+-------+-------+-------+-------+-------
+ * 0 000 | MADD | MADDU | * | * | PLZCW | * | * | *
+ * 1 001 | MMI0% | MMI2% | * | * | * | * | * | *
+ * 2 010 | MFHI1 | MTHI1 | MFLO1 | MTLO1 | * | * | * | *
+ * 3 011 | MULT1 | MULTU1| DIV1 | DIVU1 | * | * | * | *
+ * 4 100 | MADD1 | MADDU1| * | * | * | * | * | *
+ * 5 101 | MMI1% | MMI3% | * | * | * | * | * | *
+ * 6 110 | PMFHL | PMTHL | * | * | PSLLH | * | PSRLH | PSRAH
+ * 7 111 | * | * | * | * | PSLLW | * | PSRLW | PSRAW
+ */
+
+#define MASK_MMI(op) (MASK_OP_MAJOR(op) | ((op) & 0x3F))
+enum {
+ MMI_OPC_MADD = 0x00 | MMI_OPC_CLASS_MMI, /* Same as OPC_MADD */
+ MMI_OPC_MADDU = 0x01 | MMI_OPC_CLASS_MMI, /* Same as OPC_MADDU */
+ MMI_OPC_MULT1 = 0x18 | MMI_OPC_CLASS_MMI, /* Same minor as OPC_MULT */
+ MMI_OPC_MULTU1 = 0x19 | MMI_OPC_CLASS_MMI, /* Same min. as OPC_MULTU */
+ MMI_OPC_DIV1 = 0x1A | MMI_OPC_CLASS_MMI, /* Same minor as OPC_DIV */
+ MMI_OPC_DIVU1 = 0x1B | MMI_OPC_CLASS_MMI, /* Same minor as OPC_DIVU */
+ MMI_OPC_MADD1 = 0x20 | MMI_OPC_CLASS_MMI,
+ MMI_OPC_MADDU1 = 0x21 | MMI_OPC_CLASS_MMI,
+};
+
+/* global register indices */
+TCGv cpu_gpr[32], cpu_PC;
+/*
+ * For CPUs using 128-bit GPR registers, we put the lower halves in cpu_gpr[])
+ * and the upper halves in cpu_gpr_hi[].
+ */
+TCGv_i64 cpu_gpr_hi[32];
+TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
+static TCGv cpu_dspctrl, btarget;
+TCGv bcond;
+static TCGv cpu_lladdr, cpu_llval;
+static TCGv_i32 hflags;
+TCGv_i32 fpu_fcr0, fpu_fcr31;
+TCGv_i64 fpu_f64[32];
+
+#include "exec/gen-icount.h"
+
+#define DISAS_STOP DISAS_TARGET_0
+#define DISAS_EXIT DISAS_TARGET_1
+
+static const char regnames_HI[][4] = {
+ "HI0", "HI1", "HI2", "HI3",
+};
+
+static const char regnames_LO[][4] = {
+ "LO0", "LO1", "LO2", "LO3",
+};
+
+/* General purpose registers moves. */
+void gen_load_gpr(TCGv t, int reg)
+{
+ if (reg == 0) {
+ tcg_gen_movi_tl(t, 0);
+ } else {
+ tcg_gen_mov_tl(t, cpu_gpr[reg]);
+ }
+}
+
+void gen_store_gpr(TCGv t, int reg)
+{
+ if (reg != 0) {
+ tcg_gen_mov_tl(cpu_gpr[reg], t);
+ }
+}
+
+#if defined(TARGET_MIPS64)
+void gen_load_gpr_hi(TCGv_i64 t, int reg)
+{
+ if (reg == 0) {
+ tcg_gen_movi_i64(t, 0);
+ } else {
+ tcg_gen_mov_i64(t, cpu_gpr_hi[reg]);
+ }
+}
+
+void gen_store_gpr_hi(TCGv_i64 t, int reg)
+{
+ if (reg != 0) {
+ tcg_gen_mov_i64(cpu_gpr_hi[reg], t);
+ }
+}
+#endif /* TARGET_MIPS64 */
+
+/* Moves to/from shadow registers. */
+static inline void gen_load_srsgpr(int from, int to)
+{
+ TCGv t0 = tcg_temp_new();
+
+ if (from == 0) {
+ tcg_gen_movi_tl(t0, 0);
+ } else {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_ptr addr = tcg_temp_new_ptr();
+
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
+ tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(t2, t2, 0xf);
+ tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
+ tcg_gen_ext_i32_ptr(addr, t2);
+ tcg_gen_add_ptr(addr, cpu_env, addr);
+
+ tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
+ tcg_temp_free_ptr(addr);
+ tcg_temp_free_i32(t2);
+ }
+ gen_store_gpr(t0, to);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_store_srsgpr(int from, int to)
+{
+ if (to != 0) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_ptr addr = tcg_temp_new_ptr();
+
+ gen_load_gpr(t0, from);
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
+ tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(t2, t2, 0xf);
+ tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
+ tcg_gen_ext_i32_ptr(addr, t2);
+ tcg_gen_add_ptr(addr, cpu_env, addr);
+
+ tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
+ tcg_temp_free_ptr(addr);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free(t0);
+ }
+}
+
+/* Tests */
+static inline void gen_save_pc(target_ulong pc)
+{
+ tcg_gen_movi_tl(cpu_PC, pc);
+}
+
+static inline void save_cpu_state(DisasContext *ctx, int do_save_pc)
+{
+ LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags);
+ if (do_save_pc && ctx->base.pc_next != ctx->saved_pc) {
+ gen_save_pc(ctx->base.pc_next);
+ ctx->saved_pc = ctx->base.pc_next;
+ }
+ if (ctx->hflags != ctx->saved_hflags) {
+ tcg_gen_movi_i32(hflags, ctx->hflags);
+ ctx->saved_hflags = ctx->hflags;
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_BR:
+ break;
+ case MIPS_HFLAG_BC:
+ case MIPS_HFLAG_BL:
+ case MIPS_HFLAG_B:
+ tcg_gen_movi_tl(btarget, ctx->btarget);
+ break;
+ }
+ }
+}
+
+static inline void restore_cpu_state(CPUMIPSState *env, DisasContext *ctx)
+{
+ ctx->saved_hflags = ctx->hflags;
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_BR:
+ break;
+ case MIPS_HFLAG_BC:
+ case MIPS_HFLAG_BL:
+ case MIPS_HFLAG_B:
+ ctx->btarget = env->btarget;
+ break;
+ }
+}
+
+void generate_exception_err(DisasContext *ctx, int excp, int err)
+{
+ save_cpu_state(ctx, 1);
+ gen_helper_raise_exception_err(cpu_env, tcg_constant_i32(excp),
+ tcg_constant_i32(err));
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+void generate_exception(DisasContext *ctx, int excp)
+{
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+}
+
+void generate_exception_end(DisasContext *ctx, int excp)
+{
+ generate_exception_err(ctx, excp, 0);
+}
+
+void gen_reserved_instruction(DisasContext *ctx)
+{
+ generate_exception_end(ctx, EXCP_RI);
+}
+
+/* Floating point register moves. */
+void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_FRE) {
+ generate_exception(ctx, EXCP_RI);
+ }
+ tcg_gen_extrl_i64_i32(t, fpu_f64[reg]);
+}
+
+void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ TCGv_i64 t64;
+ if (ctx->hflags & MIPS_HFLAG_FRE) {
+ generate_exception(ctx, EXCP_RI);
+ }
+ t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32);
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_extrh_i64_i32(t, fpu_f64[reg]);
+ } else {
+ gen_load_fpr32(ctx, t, reg | 1);
+ }
+}
+
+static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
+ tcg_temp_free_i64(t64);
+ } else {
+ gen_store_fpr32(ctx, t, reg | 1);
+ }
+}
+
+void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_mov_i64(t, fpu_f64[reg]);
+ } else {
+ tcg_gen_concat32_i64(t, fpu_f64[reg & ~1], fpu_f64[reg | 1]);
+ }
+}
+
+void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_mov_i64(fpu_f64[reg], t);
+ } else {
+ TCGv_i64 t0;
+ tcg_gen_deposit_i64(fpu_f64[reg & ~1], fpu_f64[reg & ~1], t, 0, 32);
+ t0 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t0, t, 32);
+ tcg_gen_deposit_i64(fpu_f64[reg | 1], fpu_f64[reg | 1], t0, 0, 32);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+int get_fp_bit(int cc)
+{
+ if (cc) {
+ return 24 + cc;
+ } else {
+ return 23;
+ }
+}
+
+/* Addresses computation */
+void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1)
+{
+ tcg_gen_add_tl(ret, arg0, arg1);
+
+#if defined(TARGET_MIPS64)
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
+ tcg_gen_ext32s_i64(ret, ret);
+ }
+#endif
+}
+
+static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base,
+ target_long ofs)
+{
+ tcg_gen_addi_tl(ret, base, ofs);
+
+#if defined(TARGET_MIPS64)
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
+ tcg_gen_ext32s_i64(ret, ret);
+ }
+#endif
+}
+
+/* Addresses computation (translation time) */
+static target_long addr_add(DisasContext *ctx, target_long base,
+ target_long offset)
+{
+ target_long sum = base + offset;
+
+#if defined(TARGET_MIPS64)
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
+ sum = (int32_t)sum;
+ }
+#endif
+ return sum;
+}
+
+/* Sign-extract the low 32-bits to a target_long. */
+void gen_move_low32(TCGv ret, TCGv_i64 arg)
+{
+#if defined(TARGET_MIPS64)
+ tcg_gen_ext32s_i64(ret, arg);
+#else
+ tcg_gen_extrl_i64_i32(ret, arg);
+#endif
+}
+
+/* Sign-extract the high 32-bits to a target_long. */
+void gen_move_high32(TCGv ret, TCGv_i64 arg)
+{
+#if defined(TARGET_MIPS64)
+ tcg_gen_sari_i64(ret, arg, 32);
+#else
+ tcg_gen_extrh_i64_i32(ret, arg);
+#endif
+}
+
+bool check_cp0_enabled(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) {
+ generate_exception_end(ctx, EXCP_CpU);
+ return false;
+ }
+ return true;
+}
+
+void check_cp1_enabled(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU))) {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+}
+
+/*
+ * Verify that the processor is running with COP1X instructions enabled.
+ * This is associated with the nabla symbol in the MIPS32 and MIPS64
+ * opcode tables.
+ */
+void check_cop1x(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * Verify that the processor is running with 64-bit floating-point
+ * operations enabled.
+ */
+void check_cp1_64bitmode(DisasContext *ctx)
+{
+ if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * Verify if floating point register is valid; an operation is not defined
+ * if bit 0 of any register specification is set and the FR bit in the
+ * Status register equals zero, since the register numbers specify an
+ * even-odd pair of adjacent coprocessor general registers. When the FR bit
+ * in the Status register equals one, both even and odd register numbers
+ * are valid. This limitation exists only for 64 bit wide (d,l,ps) registers.
+ *
+ * Multiple 64 bit wide registers can be checked by calling
+ * gen_op_cp1_registers(freg1 | freg2 | ... | fregN);
+ */
+void check_cp1_registers(DisasContext *ctx, int regs)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * Verify that the processor is running with DSP instructions enabled.
+ * This is enabled by CP0 Status register MX(24) bit.
+ */
+static inline void check_dsp(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP))) {
+ if (ctx->insn_flags & ASE_DSP) {
+ generate_exception_end(ctx, EXCP_DSPDIS);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ }
+}
+
+static inline void check_dsp_r2(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R2))) {
+ if (ctx->insn_flags & ASE_DSP) {
+ generate_exception_end(ctx, EXCP_DSPDIS);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ }
+}
+
+static inline void check_dsp_r3(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R3))) {
+ if (ctx->insn_flags & ASE_DSP) {
+ generate_exception_end(ctx, EXCP_DSPDIS);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ }
+}
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * CPU does not support the instruction set corresponding to flags.
+ */
+void check_insn(DisasContext *ctx, uint64_t flags)
+{
+ if (unlikely(!(ctx->insn_flags & flags))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * CPU has corresponding flag set which indicates that the instruction
+ * has been removed.
+ */
+static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags)
+{
+ if (unlikely(ctx->insn_flags & flags)) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * The Linux kernel traps certain reserved instruction exceptions to
+ * emulate the corresponding instructions. QEMU is the kernel in user
+ * mode, so those traps are emulated by accepting the instructions.
+ *
+ * A reserved instruction exception is generated for flagged CPUs if
+ * QEMU runs in system mode.
+ */
+static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags)
+{
+#ifndef CONFIG_USER_ONLY
+ check_insn_opc_removed(ctx, flags);
+#endif
+}
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * CPU does not support 64-bit paired-single (PS) floating point data type.
+ */
+static inline void check_ps(DisasContext *ctx)
+{
+ if (unlikely(!ctx->ps)) {
+ generate_exception(ctx, EXCP_RI);
+ }
+ check_cp1_64bitmode(ctx);
+}
+
+/*
+ * This code generates a "reserved instruction" exception if cpu is not
+ * 64-bit or 64-bit instructions are not enabled.
+ */
+void check_mips_64(DisasContext *ctx)
+{
+ if (unlikely((TARGET_LONG_BITS != 64) || !(ctx->hflags & MIPS_HFLAG_64))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline void check_mvh(DisasContext *ctx)
+{
+ if (unlikely(!ctx->mvh)) {
+ generate_exception(ctx, EXCP_RI);
+ }
+}
+#endif
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * Config5 XNP bit is set.
+ */
+static inline void check_xnp(DisasContext *ctx)
+{
+ if (unlikely(ctx->CP0_Config5 & (1 << CP0C5_XNP))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/*
+ * This code generates a "reserved instruction" exception if the
+ * Config3 PW bit is NOT set.
+ */
+static inline void check_pw(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_PW)))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+#endif
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * Config3 MT bit is NOT set.
+ */
+static inline void check_mt(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_MT)))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/*
+ * This code generates a "coprocessor unusable" exception if CP0 is not
+ * available, and, if that is not the case, generates a "reserved instruction"
+ * exception if the Config5 MT bit is NOT set. This is needed for availability
+ * control of some of MT ASE instructions.
+ */
+static inline void check_cp0_mt(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) {
+ generate_exception_end(ctx, EXCP_CpU);
+ } else {
+ if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_MT)))) {
+ gen_reserved_instruction(ctx);
+ }
+ }
+}
+#endif
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * Config5 NMS bit is set.
+ */
+static inline void check_nms(DisasContext *ctx)
+{
+ if (unlikely(ctx->CP0_Config5 & (1 << CP0C5_NMS))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * Config5 NMS bit is set, and Config1 DL, Config1 IL, Config2 SL,
+ * Config2 TL, and Config5 L2C are unset.
+ */
+static inline void check_nms_dl_il_sl_tl_l2c(DisasContext *ctx)
+{
+ if (unlikely((ctx->CP0_Config5 & (1 << CP0C5_NMS)) &&
+ !(ctx->CP0_Config1 & (1 << CP0C1_DL)) &&
+ !(ctx->CP0_Config1 & (1 << CP0C1_IL)) &&
+ !(ctx->CP0_Config2 & (1 << CP0C2_SL)) &&
+ !(ctx->CP0_Config2 & (1 << CP0C2_TL)) &&
+ !(ctx->CP0_Config5 & (1 << CP0C5_L2C)))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+/*
+ * This code generates a "reserved instruction" exception if the
+ * Config5 EVA bit is NOT set.
+ */
+static inline void check_eva(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->CP0_Config5 & (1 << CP0C5_EVA)))) {
+ gen_reserved_instruction(ctx);
+ }
+}
+
+
+/*
+ * Define small wrappers for gen_load_fpr* so that we have a uniform
+ * calling interface for 32 and 64-bit FPRs. No sense in changing
+ * all callers for gen_load_fpr32 when we need the CTX parameter for
+ * this one use.
+ */
+#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y)
+#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y)
+#define FOP_CONDS(type, abs, fmt, ifmt, bits) \
+static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \
+ int ft, int fs, int cc) \
+{ \
+ TCGv_i##bits fp0 = tcg_temp_new_i##bits(); \
+ TCGv_i##bits fp1 = tcg_temp_new_i##bits(); \
+ switch (ifmt) { \
+ case FMT_PS: \
+ check_ps(ctx); \
+ break; \
+ case FMT_D: \
+ if (abs) { \
+ check_cop1x(ctx); \
+ } \
+ check_cp1_registers(ctx, fs | ft); \
+ break; \
+ case FMT_S: \
+ if (abs) { \
+ check_cop1x(ctx); \
+ } \
+ break; \
+ } \
+ gen_ldcmp_fpr##bits(ctx, fp0, fs); \
+ gen_ldcmp_fpr##bits(ctx, fp1, ft); \
+ switch (n) { \
+ case 0: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); \
+ break; \
+ case 1: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); \
+ break; \
+ case 2: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); \
+ break; \
+ case 3: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); \
+ break; \
+ case 4: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); \
+ break; \
+ case 5: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); \
+ break; \
+ case 6: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); \
+ break; \
+ case 7: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); \
+ break; \
+ case 8: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); \
+ break; \
+ case 9: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); \
+ break; \
+ case 10: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); \
+ break; \
+ case 11: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); \
+ break; \
+ case 12: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); \
+ break; \
+ case 13: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); \
+ break; \
+ case 14: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); \
+ break; \
+ case 15: \
+ gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ tcg_temp_free_i##bits(fp0); \
+ tcg_temp_free_i##bits(fp1); \
+}
+
+FOP_CONDS(, 0, d, FMT_D, 64)
+FOP_CONDS(abs, 1, d, FMT_D, 64)
+FOP_CONDS(, 0, s, FMT_S, 32)
+FOP_CONDS(abs, 1, s, FMT_S, 32)
+FOP_CONDS(, 0, ps, FMT_PS, 64)
+FOP_CONDS(abs, 1, ps, FMT_PS, 64)
+#undef FOP_CONDS
+
+#define FOP_CONDNS(fmt, ifmt, bits, STORE) \
+static inline void gen_r6_cmp_ ## fmt(DisasContext *ctx, int n, \
+ int ft, int fs, int fd) \
+{ \
+ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(); \
+ if (ifmt == FMT_D) { \
+ check_cp1_registers(ctx, fs | ft | fd); \
+ } \
+ gen_ldcmp_fpr ## bits(ctx, fp0, fs); \
+ gen_ldcmp_fpr ## bits(ctx, fp1, ft); \
+ switch (n) { \
+ case 0: \
+ gen_helper_r6_cmp_ ## fmt ## _af(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 1: \
+ gen_helper_r6_cmp_ ## fmt ## _un(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 2: \
+ gen_helper_r6_cmp_ ## fmt ## _eq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 3: \
+ gen_helper_r6_cmp_ ## fmt ## _ueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 4: \
+ gen_helper_r6_cmp_ ## fmt ## _lt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 5: \
+ gen_helper_r6_cmp_ ## fmt ## _ult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 6: \
+ gen_helper_r6_cmp_ ## fmt ## _le(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 7: \
+ gen_helper_r6_cmp_ ## fmt ## _ule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 8: \
+ gen_helper_r6_cmp_ ## fmt ## _saf(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 9: \
+ gen_helper_r6_cmp_ ## fmt ## _sun(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 10: \
+ gen_helper_r6_cmp_ ## fmt ## _seq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 11: \
+ gen_helper_r6_cmp_ ## fmt ## _sueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 12: \
+ gen_helper_r6_cmp_ ## fmt ## _slt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 13: \
+ gen_helper_r6_cmp_ ## fmt ## _sult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 14: \
+ gen_helper_r6_cmp_ ## fmt ## _sle(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 15: \
+ gen_helper_r6_cmp_ ## fmt ## _sule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 17: \
+ gen_helper_r6_cmp_ ## fmt ## _or(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 18: \
+ gen_helper_r6_cmp_ ## fmt ## _une(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 19: \
+ gen_helper_r6_cmp_ ## fmt ## _ne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 25: \
+ gen_helper_r6_cmp_ ## fmt ## _sor(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 26: \
+ gen_helper_r6_cmp_ ## fmt ## _sune(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 27: \
+ gen_helper_r6_cmp_ ## fmt ## _sne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ STORE; \
+ tcg_temp_free_i ## bits(fp0); \
+ tcg_temp_free_i ## bits(fp1); \
+}
+
+FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd))
+FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd))
+#undef FOP_CONDNS
+#undef gen_ldcmp_fpr32
+#undef gen_ldcmp_fpr64
+
+/* load/store instructions. */
+#ifdef CONFIG_USER_ONLY
+#define OP_LD_ATOMIC(insn, fname) \
+static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
+ DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_mov_tl(t0, arg1); \
+ tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
+ tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
+ tcg_temp_free(t0); \
+}
+#else
+#define OP_LD_ATOMIC(insn, fname) \
+static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
+ DisasContext *ctx) \
+{ \
+ gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \
+}
+#endif
+OP_LD_ATOMIC(ll, ld32s);
+#if defined(TARGET_MIPS64)
+OP_LD_ATOMIC(lld, ld64);
+#endif
+#undef OP_LD_ATOMIC
+
+void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base, int offset)
+{
+ if (base == 0) {
+ tcg_gen_movi_tl(addr, offset);
+ } else if (offset == 0) {
+ gen_load_gpr(addr, base);
+ } else {
+ tcg_gen_movi_tl(addr, offset);
+ gen_op_addr_add(ctx, addr, cpu_gpr[base], addr);
+ }
+}
+
+static target_ulong pc_relative_pc(DisasContext *ctx)
+{
+ target_ulong pc = ctx->base.pc_next;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4;
+
+ pc -= branch_bytes;
+ }
+
+ pc &= ~(target_ulong)3;
+ return pc;
+}
+
+/* Load */
+static void gen_ld(DisasContext *ctx, uint32_t opc,
+ int rt, int base, int offset)
+{
+ TCGv t0, t1, t2;
+ int mem_idx = ctx->mem_idx;
+
+ if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F |
+ INSN_LOONGSON3A)) {
+ /*
+ * Loongson CPU uses a load to zero register for prefetch.
+ * We emulate it as a NOP. On other CPU we must perform the
+ * actual memory access.
+ */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ switch (opc) {
+#if defined(TARGET_MIPS64)
+ case OPC_LWU:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LD:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LLD:
+ case R6_OPC_LLD:
+ op_ld_lld(t0, t0, mem_idx, ctx);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LDL:
+ t1 = tcg_temp_new();
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 7);
+ if (!cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 7);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~7);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_shl_tl(t0, t0, t1);
+ t2 = tcg_const_tl(-1);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LDR:
+ t1 = tcg_temp_new();
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 7);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 7);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~7);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, 63);
+ t2 = tcg_const_tl(0xfffffffffffffffeull);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LDPC:
+ t1 = tcg_const_tl(pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ gen_store_gpr(t0, rt);
+ break;
+#endif
+ case OPC_LWPC:
+ t1 = tcg_const_tl(pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LWE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LW:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LHE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LH:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LHUE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LHU:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LBE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LB:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_SB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LBUE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LBU:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_UB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LWLE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LWL:
+ t1 = tcg_temp_new();
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 3);
+ if (!cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 3);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
+ tcg_gen_shl_tl(t0, t0, t1);
+ t2 = tcg_const_tl(-1);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LWRE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LWR:
+ t1 = tcg_temp_new();
+ /*
+ * Do a byte access to possibly trigger a page
+ * fault with the unaligned address.
+ */
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 3);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 3);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, 31);
+ t2 = tcg_const_tl(0xfffffffeull);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LLE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_LL:
+ case R6_OPC_LL:
+ op_ld_ll(t0, t0, mem_idx, ctx);
+ gen_store_gpr(t0, rt);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Store */
+static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
+ int base, int offset)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+#if defined(TARGET_MIPS64)
+ case OPC_SD:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_SDL:
+ gen_helper_0e2i(sdl, t1, t0, mem_idx);
+ break;
+ case OPC_SDR:
+ gen_helper_0e2i(sdr, t1, t0, mem_idx);
+ break;
+#endif
+ case OPC_SWE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_SW:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_SHE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_SH:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_SBE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_SB:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8);
+ break;
+ case OPC_SWLE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_SWL:
+ gen_helper_0e2i(swl, t1, t0, mem_idx);
+ break;
+ case OPC_SWRE:
+ mem_idx = MIPS_HFLAG_UM;
+ /* fall through */
+ case OPC_SWR:
+ gen_helper_0e2i(swr, t1, t0, mem_idx);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+
+/* Store conditional */
+static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset,
+ MemOp tcg_mo, bool eva)
+{
+ TCGv addr, t0, val;
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *done = gen_new_label();
+
+ t0 = tcg_temp_new();
+ addr = tcg_temp_new();
+ /* compare the address against that of the preceding LL */
+ gen_base_offset_addr(ctx, addr, base, offset);
+ tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
+ tcg_temp_free(addr);
+ tcg_gen_movi_tl(t0, 0);
+ gen_store_gpr(t0, rt);
+ tcg_gen_br(done);
+
+ gen_set_label(l1);
+ /* generate cmpxchg */
+ val = tcg_temp_new();
+ gen_load_gpr(val, rt);
+ tcg_gen_atomic_cmpxchg_tl(t0, cpu_lladdr, cpu_llval, val,
+ eva ? MIPS_HFLAG_UM : ctx->mem_idx, tcg_mo);
+ tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(val);
+
+ gen_set_label(done);
+ tcg_temp_free(t0);
+}
+
+/* Load and store */
+static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
+ TCGv t0)
+{
+ /*
+ * Don't do NOP if destination is zero: we must perform the actual
+ * memory access.
+ */
+ switch (opc) {
+ case OPC_LWC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr32(ctx, fp0, ft);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SWC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, ft);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LDC1:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr64(ctx, fp0, ft);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SDC1:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, ft);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ default:
+ MIPS_INVAL("flt_ldst");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt,
+ int rs, int16_t imm)
+{
+ TCGv t0 = tcg_temp_new();
+
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ switch (op) {
+ case OPC_LDC1:
+ case OPC_SDC1:
+ check_insn(ctx, ISA_MIPS2);
+ /* Fallthrough */
+ default:
+ gen_base_offset_addr(ctx, t0, rs, imm);
+ gen_flt_ldst(ctx, op, rt, t0);
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ tcg_temp_free(t0);
+}
+
+/* Arithmetic with immediate operand */
+static void gen_arith_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+
+ if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) {
+ /*
+ * If no destination, treat it as a NOP.
+ * For addi, we must generate the overflow exception when needed.
+ */
+ return;
+ }
+ switch (opc) {
+ case OPC_ADDI:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ tcg_gen_addi_tl(t0, t1, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
+
+ tcg_gen_xori_tl(t1, t1, ~uimm);
+ tcg_gen_xori_tl(t2, t0, uimm);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_ADDIU:
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DADDI:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ tcg_gen_addi_tl(t0, t1, uimm);
+
+ tcg_gen_xori_tl(t1, t1, ~uimm);
+ tcg_gen_xori_tl(t2, t0, uimm);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_DADDIU:
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+#endif
+ }
+}
+
+/* Logic with immediate operand */
+static void gen_logic_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+ uimm = (uint16_t)imm;
+ switch (opc) {
+ case OPC_ANDI:
+ if (likely(rs != 0)) {
+ tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], 0);
+ }
+ break;
+ case OPC_ORI:
+ if (rs != 0) {
+ tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+ case OPC_XORI:
+ if (likely(rs != 0)) {
+ tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+ case OPC_LUI:
+ if (rs != 0 && (ctx->insn_flags & ISA_MIPS_R6)) {
+ /* OPC_AUI */
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], imm << 16);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], imm << 16);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Set on less than with immediate operand */
+static void gen_slt_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_SLTI:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_SLTIU:
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Shifts with immediate operand */
+static void gen_shift_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = ((uint16_t)imm) & 0x1f;
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_SLL:
+ tcg_gen_shli_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ break;
+ case OPC_SRA:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_SRL:
+ if (uimm != 0) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_ROTR:
+ if (uimm != 0) {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_rotri_i32(t1, t1, uimm);
+ tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
+ tcg_temp_free_i32(t1);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DSLL:
+ tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_DSRA:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_DSRL:
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_DROTR:
+ if (uimm != 0) {
+ tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_DSLL32:
+ tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+ case OPC_DSRA32:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+ case OPC_DSRL32:
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+ case OPC_DROTR32:
+ tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+/* Arithmetic */
+static void gen_arith(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB
+ && opc != OPC_DADD && opc != OPC_DSUB) {
+ /*
+ * If no destination, treat it as a NOP.
+ * For add & sub, we must generate the overflow exception when needed.
+ */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_ADD:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_ADDU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_SUB:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_sub_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t2, t1, t2);
+ tcg_gen_xor_tl(t1, t0, t1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /*
+ * operands of different sign, first operand and the result
+ * of different sign
+ */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_SUBU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DADD:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_DADDU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_DSUB:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_sub_tl(t0, t1, t2);
+ tcg_gen_xor_tl(t2, t1, t2);
+ tcg_gen_xor_tl(t1, t0, t1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /*
+ * Operands of different sign, first operand and result different
+ * sign.
+ */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_DSUBU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+#endif
+ case OPC_MUL:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_mul_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ }
+}
+
+/* Conditional move */
+static void gen_cond_move(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1, t2;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ t1 = tcg_const_tl(0);
+ t2 = tcg_temp_new();
+ gen_load_gpr(t2, rs);
+ switch (opc) {
+ case OPC_MOVN:
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, cpu_gpr[rd]);
+ break;
+ case OPC_MOVZ:
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, cpu_gpr[rd]);
+ break;
+ case OPC_SELNEZ:
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, t1);
+ break;
+ case OPC_SELEQZ:
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1);
+ break;
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+/* Logic */
+static void gen_logic(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_AND:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_NOR:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0));
+ }
+ break;
+ case OPC_OR:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_XOR:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ }
+}
+
+/* Set on lower than */
+static void gen_slt(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_SLT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_SLTU:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Shifts */
+static void gen_shift(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /*
+ * If no destination, treat it as a NOP.
+ * For add & sub, we must generate the overflow exception when needed.
+ */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_SLLV:
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shl_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_SRAV:
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_SRLV:
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_ROTRV:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_andi_i32(t2, t2, 0x1f);
+ tcg_gen_rotr_i32(t2, t3, t2);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DSLLV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shl_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_DSRAV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_DSRLV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_DROTRV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Arithmetic on HI/LO registers */
+static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
+{
+ if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ if (acc != 0) {
+ check_dsp(ctx);
+ }
+
+ switch (opc) {
+ case OPC_MFHI:
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_HI[acc]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_gpr[reg], cpu_HI[acc]);
+ }
+ break;
+ case OPC_MFLO:
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_LO[acc]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_gpr[reg], cpu_LO[acc]);
+ }
+ break;
+ case OPC_MTHI:
+ if (reg != 0) {
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_HI[acc], cpu_gpr[reg]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_HI[acc], cpu_gpr[reg]);
+ }
+ } else {
+ tcg_gen_movi_tl(cpu_HI[acc], 0);
+ }
+ break;
+ case OPC_MTLO:
+ if (reg != 0) {
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_LO[acc], cpu_gpr[reg]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_LO[acc], cpu_gpr[reg]);
+ }
+ } else {
+ tcg_gen_movi_tl(cpu_LO[acc], 0);
+ }
+ break;
+ }
+}
+
+static inline void gen_r6_ld(target_long addr, int reg, int memidx,
+ MemOp memop)
+{
+ TCGv t0 = tcg_const_tl(addr);
+ tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
+ gen_store_gpr(t0, reg);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
+ int rs)
+{
+ target_long offset;
+ target_long addr;
+
+ switch (MASK_OPC_PCREL_TOP2BITS(opc)) {
+ case OPC_ADDIUPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+ case R6_OPC_LWPC:
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, pc, offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_LWUPC:
+ check_mips_64(ctx);
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, pc, offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL);
+ break;
+#endif
+ default:
+ switch (MASK_OPC_PCREL_TOP5BITS(opc)) {
+ case OPC_AUIPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode, 0, 16) << 16;
+ addr = addr_add(ctx, pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+ case OPC_ALUIPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode, 0, 16) << 16;
+ addr = ~0xFFFF & addr_add(ctx, pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */
+ case R6_OPC_LDPC + (1 << 16):
+ case R6_OPC_LDPC + (2 << 16):
+ case R6_OPC_LDPC + (3 << 16):
+ check_mips_64(ctx);
+ offset = sextract32(ctx->opcode << 3, 0, 21);
+ addr = addr_add(ctx, (pc & ~0x7), offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEQ);
+ break;
+#endif
+ default:
+ MIPS_INVAL("OPC_PCREL");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ }
+}
+
+static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case R6_OPC_DIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_MOD:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_MODU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_MUL:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mul_i32(t2, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case R6_OPC_MUH:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case R6_OPC_MULU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mul_i32(t2, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case R6_OPC_MUHU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_DDIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMOD:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DDIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMODU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMUL:
+ tcg_gen_mul_i64(cpu_gpr[rd], t0, t1);
+ break;
+ case R6_OPC_DMUH:
+ {
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMULU:
+ tcg_gen_mul_i64(cpu_gpr[rd], t0, t1);
+ break;
+ case R6_OPC_DMUHU:
+ {
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ }
+ break;
+#endif
+ default:
+ MIPS_INVAL("r6 mul/div");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+#if defined(TARGET_MIPS64)
+static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case MMI_OPC_DIV1:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_LO[1], t0, t1);
+ tcg_gen_rem_tl(cpu_HI[1], t0, t1);
+ tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
+ tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case MMI_OPC_DIVU1:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_LO[1], t0, t1);
+ tcg_gen_remu_tl(cpu_HI[1], t0, t1);
+ tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]);
+ tcg_gen_ext32s_tl(cpu_HI[1], cpu_HI[1]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ default:
+ MIPS_INVAL("div1 TX79");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+#endif
+
+static void gen_muldiv(DisasContext *ctx, uint32_t opc,
+ int acc, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (acc != 0) {
+ check_dsp(ctx);
+ }
+
+ switch (opc) {
+ case OPC_DIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_LO[acc], t0, t1);
+ tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
+ tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
+ tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_DIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_LO[acc], t0, t1);
+ tcg_gen_remu_tl(cpu_HI[acc], t0, t1);
+ tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
+ tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_MULT:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case OPC_MULTU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DDIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_LO[acc], t0, t1);
+ tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_DDIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_i64(cpu_LO[acc], t0, t1);
+ tcg_gen_remu_i64(cpu_HI[acc], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_DMULT:
+ tcg_gen_muls2_i64(cpu_LO[acc], cpu_HI[acc], t0, t1);
+ break;
+ case OPC_DMULTU:
+ tcg_gen_mulu2_i64(cpu_LO[acc], cpu_HI[acc], t0, t1);
+ break;
+#endif
+ case OPC_MADD:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case OPC_MADDU:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case OPC_MSUB:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_sub_i64(t2, t3, t2);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case OPC_MSUBU:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_sub_i64(t2, t3, t2);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ default:
+ MIPS_INVAL("mul/div");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/*
+ * These MULT[U] and MADD[U] instructions implemented in for example
+ * the Toshiba/Sony R5900 and the Toshiba TX19, TX39 and TX79 core
+ * architectures are special three-operand variants with the syntax
+ *
+ * MULT[U][1] rd, rs, rt
+ *
+ * such that
+ *
+ * (rd, LO, HI) <- rs * rt
+ *
+ * and
+ *
+ * MADD[U][1] rd, rs, rt
+ *
+ * such that
+ *
+ * (rd, LO, HI) <- (LO, HI) + rs * rt
+ *
+ * where the low-order 32-bits of the result is placed into both the
+ * GPR rd and the special register LO. The high-order 32-bits of the
+ * result is placed into the special register HI.
+ *
+ * If the GPR rd is omitted in assembly language, it is taken to be 0,
+ * which is the zero register that always reads as 0.
+ */
+static void gen_mul_txx9(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int acc = 0;
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case MMI_OPC_MULT1:
+ acc = 1;
+ /* Fall through */
+ case OPC_MULT:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ if (rd) {
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ }
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case MMI_OPC_MULTU1:
+ acc = 1;
+ /* Fall through */
+ case OPC_MULTU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ if (rd) {
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ }
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case MMI_OPC_MADD1:
+ acc = 1;
+ /* Fall through */
+ case MMI_OPC_MADD:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ if (rd) {
+ gen_move_low32(cpu_gpr[rd], t2);
+ }
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case MMI_OPC_MADDU1:
+ acc = 1;
+ /* Fall through */
+ case MMI_OPC_MADDU:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ if (rd) {
+ gen_move_low32(cpu_gpr[rd], t2);
+ }
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ default:
+ MIPS_INVAL("mul/madd TXx9");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_cl(DisasContext *ctx, uint32_t opc,
+ int rd, int rs)
+{
+ TCGv t0;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = cpu_gpr[rd];
+ gen_load_gpr(t0, rs);
+
+ switch (opc) {
+ case OPC_CLO:
+ case R6_OPC_CLO:
+#if defined(TARGET_MIPS64)
+ case OPC_DCLO:
+ case R6_OPC_DCLO:
+#endif
+ tcg_gen_not_tl(t0, t0);
+ break;
+ }
+
+ switch (opc) {
+ case OPC_CLO:
+ case R6_OPC_CLO:
+ case OPC_CLZ:
+ case R6_OPC_CLZ:
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_clzi_tl(t0, t0, TARGET_LONG_BITS);
+ tcg_gen_subi_tl(t0, t0, TARGET_LONG_BITS - 32);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DCLO:
+ case R6_OPC_DCLO:
+ case OPC_DCLZ:
+ case R6_OPC_DCLZ:
+ tcg_gen_clzi_i64(t0, t0, 64);
+ break;
+#endif
+ }
+}
+
+/* Godson integer instructions */
+static void gen_loongson_integer(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_MULT_G_2E:
+ case OPC_MULT_G_2F:
+ case OPC_MULTU_G_2E:
+ case OPC_MULTU_G_2F:
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT_G_2E:
+ case OPC_DMULT_G_2F:
+ case OPC_DMULTU_G_2E:
+ case OPC_DMULTU_G_2F:
+#endif
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ break;
+ default:
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+ break;
+ }
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case OPC_MULT_G_2E:
+ case OPC_MULT_G_2F:
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ break;
+ case OPC_MULTU_G_2E:
+ case OPC_MULTU_G_2F:
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ break;
+ case OPC_DIV_G_2E:
+ case OPC_DIV_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_DIVU_G_2E:
+ case OPC_DIVU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l2);
+ }
+ break;
+ case OPC_MOD_G_2E:
+ case OPC_MOD_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_MODU_G_2E:
+ case OPC_MODU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l2);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT_G_2E:
+ case OPC_DMULT_G_2F:
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_DMULTU_G_2E:
+ case OPC_DMULTU_G_2F:
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_DDIV_G_2E:
+ case OPC_DDIV_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_DDIVU_G_2E:
+ case OPC_DDIVU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l2);
+ }
+ break;
+ case OPC_DMOD_G_2E:
+ case OPC_DMOD_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_DMODU_G_2E:
+ case OPC_DMODU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l2);
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Loongson multimedia instructions */
+static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt)
+{
+ uint32_t opc, shift_max;
+ TCGv_i64 t0, t1;
+ TCGCond cond;
+
+ opc = MASK_LMMI(ctx->opcode);
+ switch (opc) {
+ case OPC_ADD_CP2:
+ case OPC_SUB_CP2:
+ case OPC_DADD_CP2:
+ case OPC_DSUB_CP2:
+ t0 = tcg_temp_local_new_i64();
+ t1 = tcg_temp_local_new_i64();
+ break;
+ default:
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ break;
+ }
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, t0, rs);
+ gen_load_fpr64(ctx, t1, rt);
+
+ switch (opc) {
+ case OPC_PADDSH:
+ gen_helper_paddsh(t0, t0, t1);
+ break;
+ case OPC_PADDUSH:
+ gen_helper_paddush(t0, t0, t1);
+ break;
+ case OPC_PADDH:
+ gen_helper_paddh(t0, t0, t1);
+ break;
+ case OPC_PADDW:
+ gen_helper_paddw(t0, t0, t1);
+ break;
+ case OPC_PADDSB:
+ gen_helper_paddsb(t0, t0, t1);
+ break;
+ case OPC_PADDUSB:
+ gen_helper_paddusb(t0, t0, t1);
+ break;
+ case OPC_PADDB:
+ gen_helper_paddb(t0, t0, t1);
+ break;
+
+ case OPC_PSUBSH:
+ gen_helper_psubsh(t0, t0, t1);
+ break;
+ case OPC_PSUBUSH:
+ gen_helper_psubush(t0, t0, t1);
+ break;
+ case OPC_PSUBH:
+ gen_helper_psubh(t0, t0, t1);
+ break;
+ case OPC_PSUBW:
+ gen_helper_psubw(t0, t0, t1);
+ break;
+ case OPC_PSUBSB:
+ gen_helper_psubsb(t0, t0, t1);
+ break;
+ case OPC_PSUBUSB:
+ gen_helper_psubusb(t0, t0, t1);
+ break;
+ case OPC_PSUBB:
+ gen_helper_psubb(t0, t0, t1);
+ break;
+
+ case OPC_PSHUFH:
+ gen_helper_pshufh(t0, t0, t1);
+ break;
+ case OPC_PACKSSWH:
+ gen_helper_packsswh(t0, t0, t1);
+ break;
+ case OPC_PACKSSHB:
+ gen_helper_packsshb(t0, t0, t1);
+ break;
+ case OPC_PACKUSHB:
+ gen_helper_packushb(t0, t0, t1);
+ break;
+
+ case OPC_PUNPCKLHW:
+ gen_helper_punpcklhw(t0, t0, t1);
+ break;
+ case OPC_PUNPCKHHW:
+ gen_helper_punpckhhw(t0, t0, t1);
+ break;
+ case OPC_PUNPCKLBH:
+ gen_helper_punpcklbh(t0, t0, t1);
+ break;
+ case OPC_PUNPCKHBH:
+ gen_helper_punpckhbh(t0, t0, t1);
+ break;
+ case OPC_PUNPCKLWD:
+ gen_helper_punpcklwd(t0, t0, t1);
+ break;
+ case OPC_PUNPCKHWD:
+ gen_helper_punpckhwd(t0, t0, t1);
+ break;
+
+ case OPC_PAVGH:
+ gen_helper_pavgh(t0, t0, t1);
+ break;
+ case OPC_PAVGB:
+ gen_helper_pavgb(t0, t0, t1);
+ break;
+ case OPC_PMAXSH:
+ gen_helper_pmaxsh(t0, t0, t1);
+ break;
+ case OPC_PMINSH:
+ gen_helper_pminsh(t0, t0, t1);
+ break;
+ case OPC_PMAXUB:
+ gen_helper_pmaxub(t0, t0, t1);
+ break;
+ case OPC_PMINUB:
+ gen_helper_pminub(t0, t0, t1);
+ break;
+
+ case OPC_PCMPEQW:
+ gen_helper_pcmpeqw(t0, t0, t1);
+ break;
+ case OPC_PCMPGTW:
+ gen_helper_pcmpgtw(t0, t0, t1);
+ break;
+ case OPC_PCMPEQH:
+ gen_helper_pcmpeqh(t0, t0, t1);
+ break;
+ case OPC_PCMPGTH:
+ gen_helper_pcmpgth(t0, t0, t1);
+ break;
+ case OPC_PCMPEQB:
+ gen_helper_pcmpeqb(t0, t0, t1);
+ break;
+ case OPC_PCMPGTB:
+ gen_helper_pcmpgtb(t0, t0, t1);
+ break;
+
+ case OPC_PSLLW:
+ gen_helper_psllw(t0, t0, t1);
+ break;
+ case OPC_PSLLH:
+ gen_helper_psllh(t0, t0, t1);
+ break;
+ case OPC_PSRLW:
+ gen_helper_psrlw(t0, t0, t1);
+ break;
+ case OPC_PSRLH:
+ gen_helper_psrlh(t0, t0, t1);
+ break;
+ case OPC_PSRAW:
+ gen_helper_psraw(t0, t0, t1);
+ break;
+ case OPC_PSRAH:
+ gen_helper_psrah(t0, t0, t1);
+ break;
+
+ case OPC_PMULLH:
+ gen_helper_pmullh(t0, t0, t1);
+ break;
+ case OPC_PMULHH:
+ gen_helper_pmulhh(t0, t0, t1);
+ break;
+ case OPC_PMULHUH:
+ gen_helper_pmulhuh(t0, t0, t1);
+ break;
+ case OPC_PMADDHW:
+ gen_helper_pmaddhw(t0, t0, t1);
+ break;
+
+ case OPC_PASUBUB:
+ gen_helper_pasubub(t0, t0, t1);
+ break;
+ case OPC_BIADD:
+ gen_helper_biadd(t0, t0);
+ break;
+ case OPC_PMOVMSKB:
+ gen_helper_pmovmskb(t0, t0);
+ break;
+
+ case OPC_PADDD:
+ tcg_gen_add_i64(t0, t0, t1);
+ break;
+ case OPC_PSUBD:
+ tcg_gen_sub_i64(t0, t0, t1);
+ break;
+ case OPC_XOR_CP2:
+ tcg_gen_xor_i64(t0, t0, t1);
+ break;
+ case OPC_NOR_CP2:
+ tcg_gen_nor_i64(t0, t0, t1);
+ break;
+ case OPC_AND_CP2:
+ tcg_gen_and_i64(t0, t0, t1);
+ break;
+ case OPC_OR_CP2:
+ tcg_gen_or_i64(t0, t0, t1);
+ break;
+
+ case OPC_PANDN:
+ tcg_gen_andc_i64(t0, t1, t0);
+ break;
+
+ case OPC_PINSRH_0:
+ tcg_gen_deposit_i64(t0, t0, t1, 0, 16);
+ break;
+ case OPC_PINSRH_1:
+ tcg_gen_deposit_i64(t0, t0, t1, 16, 16);
+ break;
+ case OPC_PINSRH_2:
+ tcg_gen_deposit_i64(t0, t0, t1, 32, 16);
+ break;
+ case OPC_PINSRH_3:
+ tcg_gen_deposit_i64(t0, t0, t1, 48, 16);
+ break;
+
+ case OPC_PEXTRH:
+ tcg_gen_andi_i64(t1, t1, 3);
+ tcg_gen_shli_i64(t1, t1, 4);
+ tcg_gen_shr_i64(t0, t0, t1);
+ tcg_gen_ext16u_i64(t0, t0);
+ break;
+
+ case OPC_ADDU_CP2:
+ tcg_gen_add_i64(t0, t0, t1);
+ tcg_gen_ext32s_i64(t0, t0);
+ break;
+ case OPC_SUBU_CP2:
+ tcg_gen_sub_i64(t0, t0, t1);
+ tcg_gen_ext32s_i64(t0, t0);
+ break;
+
+ case OPC_SLL_CP2:
+ shift_max = 32;
+ goto do_shift;
+ case OPC_SRL_CP2:
+ shift_max = 32;
+ goto do_shift;
+ case OPC_SRA_CP2:
+ shift_max = 32;
+ goto do_shift;
+ case OPC_DSLL_CP2:
+ shift_max = 64;
+ goto do_shift;
+ case OPC_DSRL_CP2:
+ shift_max = 64;
+ goto do_shift;
+ case OPC_DSRA_CP2:
+ shift_max = 64;
+ goto do_shift;
+ do_shift:
+ /* Make sure shift count isn't TCG undefined behaviour. */
+ tcg_gen_andi_i64(t1, t1, shift_max - 1);
+
+ switch (opc) {
+ case OPC_SLL_CP2:
+ case OPC_DSLL_CP2:
+ tcg_gen_shl_i64(t0, t0, t1);
+ break;
+ case OPC_SRA_CP2:
+ case OPC_DSRA_CP2:
+ /*
+ * Since SRA is UndefinedResult without sign-extended inputs,
+ * we can treat SRA and DSRA the same.
+ */
+ tcg_gen_sar_i64(t0, t0, t1);
+ break;
+ case OPC_SRL_CP2:
+ /* We want to shift in zeros for SRL; zero-extend first. */
+ tcg_gen_ext32u_i64(t0, t0);
+ /* FALLTHRU */
+ case OPC_DSRL_CP2:
+ tcg_gen_shr_i64(t0, t0, t1);
+ break;
+ }
+
+ if (shift_max == 32) {
+ tcg_gen_ext32s_i64(t0, t0);
+ }
+
+ /* Shifts larger than MAX produce zero. */
+ tcg_gen_setcondi_i64(TCG_COND_LTU, t1, t1, shift_max);
+ tcg_gen_neg_i64(t1, t1);
+ tcg_gen_and_i64(t0, t0, t1);
+ break;
+
+ case OPC_ADD_CP2:
+ case OPC_DADD_CP2:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGLabel *lab = gen_new_label();
+
+ tcg_gen_mov_i64(t2, t0);
+ tcg_gen_add_i64(t0, t1, t2);
+ if (opc == OPC_ADD_CP2) {
+ tcg_gen_ext32s_i64(t0, t0);
+ }
+ tcg_gen_xor_i64(t1, t1, t2);
+ tcg_gen_xor_i64(t2, t2, t0);
+ tcg_gen_andc_i64(t1, t2, t1);
+ tcg_temp_free_i64(t2);
+ tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(lab);
+ break;
+ }
+
+ case OPC_SUB_CP2:
+ case OPC_DSUB_CP2:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGLabel *lab = gen_new_label();
+
+ tcg_gen_mov_i64(t2, t0);
+ tcg_gen_sub_i64(t0, t1, t2);
+ if (opc == OPC_SUB_CP2) {
+ tcg_gen_ext32s_i64(t0, t0);
+ }
+ tcg_gen_xor_i64(t1, t1, t2);
+ tcg_gen_xor_i64(t2, t2, t0);
+ tcg_gen_and_i64(t1, t1, t2);
+ tcg_temp_free_i64(t2);
+ tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(lab);
+ break;
+ }
+
+ case OPC_PMULUW:
+ tcg_gen_ext32u_i64(t0, t0);
+ tcg_gen_ext32u_i64(t1, t1);
+ tcg_gen_mul_i64(t0, t0, t1);
+ break;
+
+ case OPC_SEQU_CP2:
+ case OPC_SEQ_CP2:
+ cond = TCG_COND_EQ;
+ goto do_cc_cond;
+ break;
+ case OPC_SLTU_CP2:
+ cond = TCG_COND_LTU;
+ goto do_cc_cond;
+ break;
+ case OPC_SLT_CP2:
+ cond = TCG_COND_LT;
+ goto do_cc_cond;
+ break;
+ case OPC_SLEU_CP2:
+ cond = TCG_COND_LEU;
+ goto do_cc_cond;
+ break;
+ case OPC_SLE_CP2:
+ cond = TCG_COND_LE;
+ do_cc_cond:
+ {
+ int cc = (ctx->opcode >> 8) & 0x7;
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ TCGv_i32 t32 = tcg_temp_new_i32();
+
+ tcg_gen_setcond_i64(cond, t64, t0, t1);
+ tcg_gen_extrl_i64_i32(t32, t64);
+ tcg_gen_deposit_i32(fpu_fcr31, fpu_fcr31, t32,
+ get_fp_bit(cc), 1);
+
+ tcg_temp_free_i32(t32);
+ tcg_temp_free_i64(t64);
+ }
+ goto no_rd;
+ break;
+ default:
+ MIPS_INVAL("loongson_cp2");
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ gen_store_fpr64(ctx, t0, rd);
+
+no_rd:
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_loongson_lswc2(DisasContext *ctx, int rt,
+ int rs, int rd)
+{
+ TCGv t0, t1, t2;
+ TCGv_i32 fp0;
+#if defined(TARGET_MIPS64)
+ int lsq_rt1 = ctx->opcode & 0x1f;
+ int lsq_offset = sextract32(ctx->opcode, 6, 9) << 4;
+#endif
+ int shf_offset = sextract32(ctx->opcode, 6, 8);
+
+ t0 = tcg_temp_new();
+
+ switch (MASK_LOONGSON_GSLSQ(ctx->opcode)) {
+#if defined(TARGET_MIPS64)
+ case OPC_GSLQ:
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, rt);
+ gen_store_gpr(t0, lsq_rt1);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSLQC1:
+ check_cp1_enabled(ctx);
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr64(ctx, t1, rt);
+ gen_store_fpr64(ctx, t0, lsq_rt1);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSSQ:
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset);
+ gen_load_gpr(t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
+ gen_load_gpr(t1, lsq_rt1);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSSQC1:
+ check_cp1_enabled(ctx);
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset);
+ gen_load_fpr64(ctx, t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
+ gen_load_fpr64(ctx, t1, lsq_rt1);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t1);
+ break;
+#endif
+ case OPC_GSSHFL:
+ switch (MASK_LOONGSON_GSSHFLS(ctx->opcode)) {
+ case OPC_GSLWLC1:
+ check_cp1_enabled(ctx);
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ t1 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 3);
+ if (!cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 3);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_shl_tl(t0, t0, t1);
+ t2 = tcg_const_tl(-1);
+ tcg_gen_shl_tl(t2, t2, t1);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t1, fp0);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+#if defined(TARGET_MIPS64)
+ tcg_gen_extrl_i64_i32(fp0, t0);
+#else
+ tcg_gen_ext32s_tl(fp0, t0);
+#endif
+ gen_store_fpr32(ctx, fp0, rt);
+ tcg_temp_free_i32(fp0);
+ break;
+ case OPC_GSLWRC1:
+ check_cp1_enabled(ctx);
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ t1 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 3);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 3);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, 31);
+ t2 = tcg_const_tl(0xfffffffeull);
+ tcg_gen_shl_tl(t2, t2, t1);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t1, fp0);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+#if defined(TARGET_MIPS64)
+ tcg_gen_extrl_i64_i32(fp0, t0);
+#else
+ tcg_gen_ext32s_tl(fp0, t0);
+#endif
+ gen_store_fpr32(ctx, fp0, rt);
+ tcg_temp_free_i32(fp0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_GSLDLC1:
+ check_cp1_enabled(ctx);
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ t1 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 7);
+ if (!cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 7);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~7);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_shl_tl(t0, t0, t1);
+ t2 = tcg_const_tl(-1);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_fpr64(ctx, t1, rt);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_store_fpr64(ctx, t0, rt);
+ break;
+ case OPC_GSLDRC1:
+ check_cp1_enabled(ctx);
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ t1 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 7);
+ if (cpu_is_bigendian(ctx)) {
+ tcg_gen_xori_tl(t1, t1, 7);
+ }
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~7);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, 63);
+ t2 = tcg_const_tl(0xfffffffffffffffeull);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_fpr64(ctx, t1, rt);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_store_fpr64(ctx, t0, rt);
+ break;
+#endif
+ default:
+ MIPS_INVAL("loongson_gsshfl");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_GSSHFS:
+ switch (MASK_LOONGSON_GSSHFLS(ctx->opcode)) {
+ case OPC_GSSWLC1:
+ check_cp1_enabled(ctx);
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t1, fp0);
+ gen_helper_0e2i(swl, t1, t0, ctx->mem_idx);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSSWRC1:
+ check_cp1_enabled(ctx);
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t1, fp0);
+ gen_helper_0e2i(swr, t1, t0, ctx->mem_idx);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free(t1);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_GSSDLC1:
+ check_cp1_enabled(ctx);
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ gen_load_fpr64(ctx, t1, rt);
+ gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSSDRC1:
+ check_cp1_enabled(ctx);
+ t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, rs, shf_offset);
+ gen_load_fpr64(ctx, t1, rt);
+ gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx);
+ tcg_temp_free(t1);
+ break;
+#endif
+ default:
+ MIPS_INVAL("loongson_gsshfs");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ default:
+ MIPS_INVAL("loongson_gslsq");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Loongson EXT LDC2/SDC2 */
+static void gen_loongson_lsdc2(DisasContext *ctx, int rt,
+ int rs, int rd)
+{
+ int offset = sextract32(ctx->opcode, 3, 8);
+ uint32_t opc = MASK_LOONGSON_LSDC2(ctx->opcode);
+ TCGv t0, t1;
+ TCGv_i32 fp0;
+
+ /* Pre-conditions */
+ switch (opc) {
+ case OPC_GSLBX:
+ case OPC_GSLHX:
+ case OPC_GSLWX:
+ case OPC_GSLDX:
+ /* prefetch, implement as NOP */
+ if (rt == 0) {
+ return;
+ }
+ break;
+ case OPC_GSSBX:
+ case OPC_GSSHX:
+ case OPC_GSSWX:
+ case OPC_GSSDX:
+ break;
+ case OPC_GSLWXC1:
+#if defined(TARGET_MIPS64)
+ case OPC_GSLDXC1:
+#endif
+ check_cp1_enabled(ctx);
+ /* prefetch, implement as NOP */
+ if (rt == 0) {
+ return;
+ }
+ break;
+ case OPC_GSSWXC1:
+#if defined(TARGET_MIPS64)
+ case OPC_GSSDXC1:
+#endif
+ check_cp1_enabled(ctx);
+ break;
+ default:
+ MIPS_INVAL("loongson_lsdc2");
+ gen_reserved_instruction(ctx);
+ return;
+ break;
+ }
+
+ t0 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, rs, offset);
+ gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
+
+ switch (opc) {
+ case OPC_GSLBX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_GSLHX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_GSLWX:
+ gen_base_offset_addr(ctx, t0, rs, offset);
+ if (rd) {
+ gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
+ }
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_GSLDX:
+ gen_base_offset_addr(ctx, t0, rs, offset);
+ if (rd) {
+ gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
+ }
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+#endif
+ case OPC_GSLWXC1:
+ gen_base_offset_addr(ctx, t0, rs, offset);
+ if (rd) {
+ gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
+ }
+ fp0 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr32(ctx, fp0, rt);
+ tcg_temp_free_i32(fp0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_GSLDXC1:
+ gen_base_offset_addr(ctx, t0, rs, offset);
+ if (rd) {
+ gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
+ }
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr64(ctx, t0, rt);
+ break;
+#endif
+ case OPC_GSSBX:
+ t1 = tcg_temp_new();
+ gen_load_gpr(t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_SB);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSSHX:
+ t1 = tcg_temp_new();
+ gen_load_gpr(t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t1);
+ break;
+ case OPC_GSSWX:
+ t1 = tcg_temp_new();
+ gen_load_gpr(t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t1);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_GSSDX:
+ t1 = tcg_temp_new();
+ gen_load_gpr(t1, rt);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t1);
+ break;
+#endif
+ case OPC_GSSWXC1:
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i32(fp0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_GSSDXC1:
+ t1 = tcg_temp_new();
+ gen_load_fpr64(ctx, t1, rt);
+ tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t1);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ tcg_temp_free(t0);
+}
+
+/* Traps */
+static void gen_trap(DisasContext *ctx, uint32_t opc,
+ int rs, int rt, int16_t imm)
+{
+ int cond;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ cond = 0;
+ /* Load needed operands */
+ switch (opc) {
+ case OPC_TEQ:
+ case OPC_TGE:
+ case OPC_TGEU:
+ case OPC_TLT:
+ case OPC_TLTU:
+ case OPC_TNE:
+ /* Compare two registers */
+ if (rs != rt) {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ cond = 1;
+ }
+ break;
+ case OPC_TEQI:
+ case OPC_TGEI:
+ case OPC_TGEIU:
+ case OPC_TLTI:
+ case OPC_TLTIU:
+ case OPC_TNEI:
+ /* Compare register to immediate */
+ if (rs != 0 || imm != 0) {
+ gen_load_gpr(t0, rs);
+ tcg_gen_movi_tl(t1, (int32_t)imm);
+ cond = 1;
+ }
+ break;
+ }
+ if (cond == 0) {
+ switch (opc) {
+ case OPC_TEQ: /* rs == rs */
+ case OPC_TEQI: /* r0 == 0 */
+ case OPC_TGE: /* rs >= rs */
+ case OPC_TGEI: /* r0 >= 0 */
+ case OPC_TGEU: /* rs >= rs unsigned */
+ case OPC_TGEIU: /* r0 >= 0 unsigned */
+ /* Always trap */
+ generate_exception_end(ctx, EXCP_TRAP);
+ break;
+ case OPC_TLT: /* rs < rs */
+ case OPC_TLTI: /* r0 < 0 */
+ case OPC_TLTU: /* rs < rs unsigned */
+ case OPC_TLTIU: /* r0 < 0 unsigned */
+ case OPC_TNE: /* rs != rs */
+ case OPC_TNEI: /* r0 != 0 */
+ /* Never trap: treat as NOP. */
+ break;
+ }
+ } else {
+ TCGLabel *l1 = gen_new_label();
+
+ switch (opc) {
+ case OPC_TEQ:
+ case OPC_TEQI:
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, t1, l1);
+ break;
+ case OPC_TGE:
+ case OPC_TGEI:
+ tcg_gen_brcond_tl(TCG_COND_LT, t0, t1, l1);
+ break;
+ case OPC_TGEU:
+ case OPC_TGEIU:
+ tcg_gen_brcond_tl(TCG_COND_LTU, t0, t1, l1);
+ break;
+ case OPC_TLT:
+ case OPC_TLTI:
+ tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
+ break;
+ case OPC_TLTU:
+ case OPC_TLTIU:
+ tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+ break;
+ case OPC_TNE:
+ case OPC_TNEI:
+ tcg_gen_brcond_tl(TCG_COND_EQ, t0, t1, l1);
+ break;
+ }
+ generate_exception(ctx, EXCP_TRAP);
+ gen_set_label(l1);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (translator_use_goto_tb(&ctx->base, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_save_pc(dest);
+ tcg_gen_exit_tb(ctx->base.tb, n);
+ } else {
+ gen_save_pc(dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+/* Branches (before delay slot) */
+static void gen_compute_branch(DisasContext *ctx, uint32_t opc,
+ int insn_bytes,
+ int rs, int rt, int32_t offset,
+ int delayslot_size)
+{
+ target_ulong btgt = -1;
+ int blink = 0;
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x"
+ TARGET_FMT_lx "\n", ctx->base.pc_next);
+#endif
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ /* Load needed operands */
+ switch (opc) {
+ case OPC_BEQ:
+ case OPC_BEQL:
+ case OPC_BNE:
+ case OPC_BNEL:
+ /* Compare two registers */
+ if (rs != rt) {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ }
+ btgt = ctx->base.pc_next + insn_bytes + offset;
+ break;
+ case OPC_BGEZ:
+ case OPC_BGEZAL:
+ case OPC_BGEZALL:
+ case OPC_BGEZL:
+ case OPC_BGTZ:
+ case OPC_BGTZL:
+ case OPC_BLEZ:
+ case OPC_BLEZL:
+ case OPC_BLTZ:
+ case OPC_BLTZAL:
+ case OPC_BLTZALL:
+ case OPC_BLTZL:
+ /* Compare to zero */
+ if (rs != 0) {
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ }
+ btgt = ctx->base.pc_next + insn_bytes + offset;
+ break;
+ case OPC_BPOSGE32:
+#if defined(TARGET_MIPS64)
+ case OPC_BPOSGE64:
+ tcg_gen_andi_tl(t0, cpu_dspctrl, 0x7F);
+#else
+ tcg_gen_andi_tl(t0, cpu_dspctrl, 0x3F);
+#endif
+ bcond_compute = 1;
+ btgt = ctx->base.pc_next + insn_bytes + offset;
+ break;
+ case OPC_J:
+ case OPC_JAL:
+ case OPC_JALX:
+ /* Jump to immediate */
+ btgt = ((ctx->base.pc_next + insn_bytes) & (int32_t)0xF0000000) |
+ (uint32_t)offset;
+ break;
+ case OPC_JR:
+ case OPC_JALR:
+ /* Jump to register */
+ if (offset != 0 && offset != 16) {
+ /*
+ * Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
+ * others are reserved.
+ */
+ MIPS_INVAL("jump hint");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ gen_load_gpr(btarget, rs);
+ break;
+ default:
+ MIPS_INVAL("branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ if (bcond_compute == 0) {
+ /* No condition to be computed */
+ switch (opc) {
+ case OPC_BEQ: /* rx == rx */
+ case OPC_BEQL: /* rx == rx likely */
+ case OPC_BGEZ: /* 0 >= 0 */
+ case OPC_BGEZL: /* 0 >= 0 likely */
+ case OPC_BLEZ: /* 0 <= 0 */
+ case OPC_BLEZL: /* 0 <= 0 likely */
+ /* Always take */
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BGEZAL: /* 0 >= 0 */
+ case OPC_BGEZALL: /* 0 >= 0 likely */
+ /* Always take and link */
+ blink = 31;
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BNE: /* rx != rx */
+ case OPC_BGTZ: /* 0 > 0 */
+ case OPC_BLTZ: /* 0 < 0 */
+ /* Treat as NOP. */
+ goto out;
+ case OPC_BLTZAL: /* 0 < 0 */
+ /*
+ * Handle as an unconditional branch to get correct delay
+ * slot checking.
+ */
+ blink = 31;
+ btgt = ctx->base.pc_next + insn_bytes + delayslot_size;
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BLTZALL: /* 0 < 0 likely */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 8);
+ /* Skip the instruction in the delay slot */
+ ctx->base.pc_next += 4;
+ goto out;
+ case OPC_BNEL: /* rx != rx likely */
+ case OPC_BGTZL: /* 0 > 0 likely */
+ case OPC_BLTZL: /* 0 < 0 likely */
+ /* Skip the instruction in the delay slot */
+ ctx->base.pc_next += 4;
+ goto out;
+ case OPC_J:
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_JALX:
+ ctx->hflags |= MIPS_HFLAG_BX;
+ /* Fallthrough */
+ case OPC_JAL:
+ blink = 31;
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_JR:
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ case OPC_JALR:
+ blink = rt;
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ default:
+ MIPS_INVAL("branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ } else {
+ switch (opc) {
+ case OPC_BEQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
+ goto not_likely;
+ case OPC_BEQL:
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
+ goto likely;
+ case OPC_BNE:
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
+ goto not_likely;
+ case OPC_BNEL:
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
+ goto likely;
+ case OPC_BGEZ:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BGEZL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ goto likely;
+ case OPC_BGEZAL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ blink = 31;
+ goto not_likely;
+ case OPC_BGEZALL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ blink = 31;
+ goto likely;
+ case OPC_BGTZ:
+ tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BGTZL:
+ tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0);
+ goto likely;
+ case OPC_BLEZ:
+ tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BLEZL:
+ tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0);
+ goto likely;
+ case OPC_BLTZ:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BLTZL:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ goto likely;
+ case OPC_BPOSGE32:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 32);
+ goto not_likely;
+#if defined(TARGET_MIPS64)
+ case OPC_BPOSGE64:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 64);
+ goto not_likely;
+#endif
+ case OPC_BLTZAL:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ blink = 31;
+ not_likely:
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ case OPC_BLTZALL:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ blink = 31;
+ likely:
+ ctx->hflags |= MIPS_HFLAG_BL;
+ break;
+ default:
+ MIPS_INVAL("conditional branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ }
+
+ ctx->btarget = btgt;
+
+ switch (delayslot_size) {
+ case 2:
+ ctx->hflags |= MIPS_HFLAG_BDS16;
+ break;
+ case 4:
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ break;
+ }
+
+ if (blink > 0) {
+ int post_delay = insn_bytes + delayslot_size;
+ int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16);
+
+ tcg_gen_movi_tl(cpu_gpr[blink],
+ ctx->base.pc_next + post_delay + lowbit);
+ }
+
+ out:
+ if (insn_bytes == 2) {
+ ctx->hflags |= MIPS_HFLAG_B16;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+
+/* special3 bitfield operations */
+static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt,
+ int rs, int lsb, int msb)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t1, rs);
+ switch (opc) {
+ case OPC_EXT:
+ if (lsb + msb > 31) {
+ goto fail;
+ }
+ if (msb != 31) {
+ tcg_gen_extract_tl(t0, t1, lsb, msb + 1);
+ } else {
+ /*
+ * The two checks together imply that lsb == 0,
+ * so this is a simple sign-extension.
+ */
+ tcg_gen_ext32s_tl(t0, t1);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DEXTU:
+ lsb += 32;
+ goto do_dext;
+ case OPC_DEXTM:
+ msb += 32;
+ goto do_dext;
+ case OPC_DEXT:
+ do_dext:
+ if (lsb + msb > 63) {
+ goto fail;
+ }
+ tcg_gen_extract_tl(t0, t1, lsb, msb + 1);
+ break;
+#endif
+ case OPC_INS:
+ if (lsb > msb) {
+ goto fail;
+ }
+ gen_load_gpr(t0, rt);
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
+ tcg_gen_ext32s_tl(t0, t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DINSU:
+ lsb += 32;
+ /* FALLTHRU */
+ case OPC_DINSM:
+ msb += 32;
+ /* FALLTHRU */
+ case OPC_DINS:
+ if (lsb > msb) {
+ goto fail;
+ }
+ gen_load_gpr(t0, rt);
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
+ break;
+#endif
+ default:
+fail:
+ MIPS_INVAL("bitops");
+ gen_reserved_instruction(ctx);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return;
+ }
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd)
+{
+ TCGv t0;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ switch (op2) {
+ case OPC_WSBH:
+ {
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0x00FF00FF);
+
+ tcg_gen_shri_tl(t1, t0, 8);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ }
+ break;
+ case OPC_SEB:
+ tcg_gen_ext8s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_SEH:
+ tcg_gen_ext16s_tl(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DSBH:
+ {
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL);
+
+ tcg_gen_shri_tl(t1, t0, 8);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_DSHD:
+ {
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL);
+
+ tcg_gen_shri_tl(t1, t0, 16);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_shri_tl(t1, t0, 32);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ }
+ break;
+#endif
+ default:
+ MIPS_INVAL("bsfhl");
+ gen_reserved_instruction(ctx);
+ tcg_temp_free(t0);
+ return;
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs,
+ int rt, int bits)
+{
+ TCGv t0;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ if (bits == 0 || bits == wordsz) {
+ if (bits == 0) {
+ gen_load_gpr(t0, rt);
+ } else {
+ gen_load_gpr(t0, rs);
+ }
+ switch (wordsz) {
+ case 32:
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case 64:
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ break;
+#endif
+ }
+ } else {
+ TCGv t1 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ switch (wordsz) {
+ case 32:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t2, t1, t0);
+ tcg_gen_shri_i64(t2, t2, 32 - bits);
+ gen_move_low32(cpu_gpr[rd], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case 64:
+ tcg_gen_shli_tl(t0, t0, bits);
+ tcg_gen_shri_tl(t1, t1, 64 - bits);
+ tcg_gen_or_tl(cpu_gpr[rd], t1, t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t1);
+ }
+
+ tcg_temp_free(t0);
+}
+
+void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp)
+{
+ gen_align_bits(ctx, wordsz, rd, rs, rt, bp * 8);
+}
+
+static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt)
+{
+ TCGv t0;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ switch (opc) {
+ case OPC_BITSWAP:
+ gen_helper_bitswap(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DBITSWAP:
+ gen_helper_dbitswap(cpu_gpr[rd], t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+#ifndef CONFIG_USER_ONLY
+/* CP0 (MMU and control) */
+static inline void gen_mthc0_entrylo(TCGv arg, target_ulong off)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t0, arg);
+ tcg_gen_ld_i64(t1, cpu_env, off);
+#if defined(TARGET_MIPS64)
+ tcg_gen_deposit_i64(t1, t1, t0, 30, 32);
+#else
+ tcg_gen_concat32_i64(t1, t1, t0);
+#endif
+ tcg_gen_st_i64(t1, cpu_env, off);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mthc0_store64(TCGv arg, target_ulong off)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t0, arg);
+ tcg_gen_ld_i64(t1, cpu_env, off);
+ tcg_gen_concat32_i64(t1, t1, t0);
+ tcg_gen_st_i64(t1, cpu_env, off);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t0, cpu_env, off);
+#if defined(TARGET_MIPS64)
+ tcg_gen_shri_i64(t0, t0, 30);
+#else
+ tcg_gen_shri_i64(t0, t0, 32);
+#endif
+ gen_move_low32(arg, t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t0, cpu_env, off);
+ tcg_gen_shri_i64(t0, t0, 32 + shift);
+ gen_move_low32(arg, t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mfc0_load32(TCGv arg, target_ulong off)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(t0, cpu_env, off);
+ tcg_gen_ext_i32_tl(arg, t0);
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_mfc0_load64(TCGv arg, target_ulong off)
+{
+ tcg_gen_ld_tl(arg, cpu_env, off);
+ tcg_gen_ext32s_tl(arg, arg);
+}
+
+static inline void gen_mtc0_store32(TCGv arg, target_ulong off)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg);
+ tcg_gen_st_i32(t0, cpu_env, off);
+ tcg_temp_free_i32(t0);
+}
+
+#define CP0_CHECK(c) \
+ do { \
+ if (!(c)) { \
+ goto cp0_unimplemented; \
+ } \
+ } while (0)
+
+static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *register_name = "invalid";
+
+ switch (reg) {
+ case CP0_REGISTER_02:
+ switch (sel) {
+ case 0:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
+ gen_mfhc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo0));
+ register_name = "EntryLo0";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_03:
+ switch (sel) {
+ case CP0_REG03__ENTRYLO1:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
+ gen_mfhc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo1));
+ register_name = "EntryLo1";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_09:
+ switch (sel) {
+ case CP0_REG09__SAAR:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mfhc0_saar(arg, cpu_env);
+ register_name = "SAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_17:
+ switch (sel) {
+ case CP0_REG17__LLADDR:
+ gen_mfhc0_load64(arg, offsetof(CPUMIPSState, CP0_LLAddr),
+ ctx->CP0_LLAddr_shift);
+ register_name = "LLAddr";
+ break;
+ case CP0_REG17__MAAR:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mfhc0_maar(arg, cpu_env);
+ register_name = "MAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_19:
+ switch (sel) {
+ case CP0_REG19__WATCHHI0:
+ case CP0_REG19__WATCHHI1:
+ case CP0_REG19__WATCHHI2:
+ case CP0_REG19__WATCHHI3:
+ case CP0_REG19__WATCHHI4:
+ case CP0_REG19__WATCHHI5:
+ case CP0_REG19__WATCHHI6:
+ case CP0_REG19__WATCHHI7:
+ /* upper 32 bits are only available when Config5MI != 0 */
+ CP0_CHECK(ctx->mi);
+ gen_mfhc0_load64(arg, offsetof(CPUMIPSState, CP0_WatchHi[sel]), 0);
+ register_name = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_mfhc0_load64(arg, offsetof(CPUMIPSState, CP0_TagLo), 0);
+ register_name = "TagLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ trace_mips_translate_c0("mfhc0", register_name, reg, sel);
+ return;
+
+cp0_unimplemented:
+ qemu_log_mask(LOG_UNIMP, "mfhc0 %s (reg %d sel %d)\n",
+ register_name, reg, sel);
+ tcg_gen_movi_tl(arg, 0);
+}
+
+static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *register_name = "invalid";
+ uint64_t mask = ctx->PAMask >> 36;
+
+ switch (reg) {
+ case CP0_REGISTER_02:
+ switch (sel) {
+ case 0:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
+ tcg_gen_andi_tl(arg, arg, mask);
+ gen_mthc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo0));
+ register_name = "EntryLo0";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_03:
+ switch (sel) {
+ case CP0_REG03__ENTRYLO1:
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
+ tcg_gen_andi_tl(arg, arg, mask);
+ gen_mthc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo1));
+ register_name = "EntryLo1";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_09:
+ switch (sel) {
+ case CP0_REG09__SAAR:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mthc0_saar(cpu_env, arg);
+ register_name = "SAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_17:
+ switch (sel) {
+ case CP0_REG17__LLADDR:
+ /*
+ * LLAddr is read-only (the only exception is bit 0 if LLB is
+ * supported); the CP0_LLAddr_rw_bitmask does not seem to be
+ * relevant for modern MIPS cores supporting MTHC0, therefore
+ * treating MTHC0 to LLAddr as NOP.
+ */
+ register_name = "LLAddr";
+ break;
+ case CP0_REG17__MAAR:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mthc0_maar(cpu_env, arg);
+ register_name = "MAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_19:
+ switch (sel) {
+ case CP0_REG19__WATCHHI0:
+ case CP0_REG19__WATCHHI1:
+ case CP0_REG19__WATCHHI2:
+ case CP0_REG19__WATCHHI3:
+ case CP0_REG19__WATCHHI4:
+ case CP0_REG19__WATCHHI5:
+ case CP0_REG19__WATCHHI6:
+ case CP0_REG19__WATCHHI7:
+ /* upper 32 bits are only available when Config5MI != 0 */
+ CP0_CHECK(ctx->mi);
+ gen_helper_0e1i(mthc0_watchhi, arg, sel);
+ register_name = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ tcg_gen_andi_tl(arg, arg, mask);
+ gen_mthc0_store64(arg, offsetof(CPUMIPSState, CP0_TagLo));
+ register_name = "TagLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ trace_mips_translate_c0("mthc0", register_name, reg, sel);
+ return;
+
+cp0_unimplemented:
+ qemu_log_mask(LOG_UNIMP, "mthc0 %s (reg %d sel %d)\n",
+ register_name, reg, sel);
+}
+
+static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg)
+{
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ tcg_gen_movi_tl(arg, 0);
+ } else {
+ tcg_gen_movi_tl(arg, ~0);
+ }
+}
+
+static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *register_name = "invalid";
+
+ if (sel != 0) {
+ check_insn(ctx, ISA_MIPS_R1);
+ }
+
+ switch (reg) {
+ case CP0_REGISTER_00:
+ switch (sel) {
+ case CP0_REG00__INDEX:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index));
+ register_name = "Index";
+ break;
+ case CP0_REG00__MVPCONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpcontrol(arg, cpu_env);
+ register_name = "MVPControl";
+ break;
+ case CP0_REG00__MVPCONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf0(arg, cpu_env);
+ register_name = "MVPConf0";
+ break;
+ case CP0_REG00__MVPCONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf1(arg, cpu_env);
+ register_name = "MVPConf1";
+ break;
+ case CP0_REG00__VPCONTROL:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
+ register_name = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_01:
+ switch (sel) {
+ case CP0_REG01__RANDOM:
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
+ gen_helper_mfc0_random(arg, cpu_env);
+ register_name = "Random";
+ break;
+ case CP0_REG01__VPECONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
+ register_name = "VPEControl";
+ break;
+ case CP0_REG01__VPECONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
+ register_name = "VPEConf0";
+ break;
+ case CP0_REG01__VPECONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
+ register_name = "VPEConf1";
+ break;
+ case CP0_REG01__YQMASK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask));
+ register_name = "YQMask";
+ break;
+ case CP0_REG01__VPESCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule));
+ register_name = "VPESchedule";
+ break;
+ case CP0_REG01__VPESCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ register_name = "VPEScheFBack";
+ break;
+ case CP0_REG01__VPEOPT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
+ register_name = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_02:
+ switch (sel) {
+ case CP0_REG02__ENTRYLO0:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env,
+ offsetof(CPUMIPSState, CP0_EntryLo0));
+#if defined(TARGET_MIPS64)
+ if (ctx->rxi) {
+ /* Move RI/XI fields to bits 31:30 */
+ tcg_gen_shri_tl(arg, tmp, CP0EnLo_XI);
+ tcg_gen_deposit_tl(tmp, tmp, arg, 30, 2);
+ }
+#endif
+ gen_move_low32(arg, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ register_name = "EntryLo0";
+ break;
+ case CP0_REG02__TCSTATUS:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcstatus(arg, cpu_env);
+ register_name = "TCStatus";
+ break;
+ case CP0_REG02__TCBIND:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcbind(arg, cpu_env);
+ register_name = "TCBind";
+ break;
+ case CP0_REG02__TCRESTART:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcrestart(arg, cpu_env);
+ register_name = "TCRestart";
+ break;
+ case CP0_REG02__TCHALT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tchalt(arg, cpu_env);
+ register_name = "TCHalt";
+ break;
+ case CP0_REG02__TCCONTEXT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tccontext(arg, cpu_env);
+ register_name = "TCContext";
+ break;
+ case CP0_REG02__TCSCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcschedule(arg, cpu_env);
+ register_name = "TCSchedule";
+ break;
+ case CP0_REG02__TCSCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcschefback(arg, cpu_env);
+ register_name = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_03:
+ switch (sel) {
+ case CP0_REG03__ENTRYLO1:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env,
+ offsetof(CPUMIPSState, CP0_EntryLo1));
+#if defined(TARGET_MIPS64)
+ if (ctx->rxi) {
+ /* Move RI/XI fields to bits 31:30 */
+ tcg_gen_shri_tl(arg, tmp, CP0EnLo_XI);
+ tcg_gen_deposit_tl(tmp, tmp, arg, 30, 2);
+ }
+#endif
+ gen_move_low32(arg, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ register_name = "EntryLo1";
+ break;
+ case CP0_REG03__GLOBALNUM:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
+ register_name = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_04:
+ switch (sel) {
+ case CP0_REG04__CONTEXT:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "Context";
+ break;
+ case CP0_REG04__CONTEXTCONFIG:
+ /* SmartMIPS ASE */
+ /* gen_helper_mfc0_contextconfig(arg); */
+ register_name = "ContextConfig";
+ goto cp0_unimplemented;
+ case CP0_REG04__USERLOCAL:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "UserLocal";
+ break;
+ case CP0_REG04__MMID:
+ CP0_CHECK(ctx->mi);
+ gen_helper_mtc0_memorymapid(cpu_env, arg);
+ register_name = "MMID";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_05:
+ switch (sel) {
+ case CP0_REG05__PAGEMASK:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask));
+ register_name = "PageMask";
+ break;
+ case CP0_REG05__PAGEGRAIN:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
+ register_name = "PageGrain";
+ break;
+ case CP0_REG05__SEGCTL0:
+ CP0_CHECK(ctx->sc);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "SegCtl0";
+ break;
+ case CP0_REG05__SEGCTL1:
+ CP0_CHECK(ctx->sc);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "SegCtl1";
+ break;
+ case CP0_REG05__SEGCTL2:
+ CP0_CHECK(ctx->sc);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "SegCtl2";
+ break;
+ case CP0_REG05__PWBASE:
+ check_pw(ctx);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWBase));
+ register_name = "PWBase";
+ break;
+ case CP0_REG05__PWFIELD:
+ check_pw(ctx);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWField));
+ register_name = "PWField";
+ break;
+ case CP0_REG05__PWSIZE:
+ check_pw(ctx);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWSize));
+ register_name = "PWSize";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_06:
+ switch (sel) {
+ case CP0_REG06__WIRED:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired));
+ register_name = "Wired";
+ break;
+ case CP0_REG06__SRSCONF0:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0));
+ register_name = "SRSConf0";
+ break;
+ case CP0_REG06__SRSCONF1:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1));
+ register_name = "SRSConf1";
+ break;
+ case CP0_REG06__SRSCONF2:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2));
+ register_name = "SRSConf2";
+ break;
+ case CP0_REG06__SRSCONF3:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3));
+ register_name = "SRSConf3";
+ break;
+ case CP0_REG06__SRSCONF4:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4));
+ register_name = "SRSConf4";
+ break;
+ case CP0_REG06__PWCTL:
+ check_pw(ctx);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWCtl));
+ register_name = "PWCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_07:
+ switch (sel) {
+ case CP0_REG07__HWRENA:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna));
+ register_name = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_08:
+ switch (sel) {
+ case CP0_REG08__BADVADDR:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "BadVAddr";
+ break;
+ case CP0_REG08__BADINSTR:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr));
+ register_name = "BadInstr";
+ break;
+ case CP0_REG08__BADINSTRP:
+ CP0_CHECK(ctx->bp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
+ register_name = "BadInstrP";
+ break;
+ case CP0_REG08__BADINSTRX:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrX));
+ tcg_gen_andi_tl(arg, arg, ~0xffff);
+ register_name = "BadInstrX";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_09:
+ switch (sel) {
+ case CP0_REG09__COUNT:
+ /* Mark as an IO operation because we read the time. */
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_mfc0_count(arg, cpu_env);
+ /*
+ * Break the TB to be able to take timer interrupts immediately
+ * after reading count. DISAS_STOP isn't sufficient, we need to
+ * ensure we break completely out of translated code.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Count";
+ break;
+ case CP0_REG09__SAARI:
+ CP0_CHECK(ctx->saar);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SAARI));
+ register_name = "SAARI";
+ break;
+ case CP0_REG09__SAAR:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mfc0_saar(arg, cpu_env);
+ register_name = "SAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_10:
+ switch (sel) {
+ case CP0_REG10__ENTRYHI:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_11:
+ switch (sel) {
+ case CP0_REG11__COMPARE:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare));
+ register_name = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_12:
+ switch (sel) {
+ case CP0_REG12__STATUS:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status));
+ register_name = "Status";
+ break;
+ case CP0_REG12__INTCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl));
+ register_name = "IntCtl";
+ break;
+ case CP0_REG12__SRSCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl));
+ register_name = "SRSCtl";
+ break;
+ case CP0_REG12__SRSMAP:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ register_name = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_13:
+ switch (sel) {
+ case CP0_REG13__CAUSE:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause));
+ register_name = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_14:
+ switch (sel) {
+ case CP0_REG14__EPC:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_15:
+ switch (sel) {
+ case CP0_REG15__PRID:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid));
+ register_name = "PRid";
+ break;
+ case CP0_REG15__EBASE:
+ check_insn(ctx, ISA_MIPS_R2);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "EBase";
+ break;
+ case CP0_REG15__CMGCRBASE:
+ check_insn(ctx, ISA_MIPS_R2);
+ CP0_CHECK(ctx->cmgcr);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "CMGCRBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_16:
+ switch (sel) {
+ case CP0_REG16__CONFIG:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0));
+ register_name = "Config";
+ break;
+ case CP0_REG16__CONFIG1:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1));
+ register_name = "Config1";
+ break;
+ case CP0_REG16__CONFIG2:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2));
+ register_name = "Config2";
+ break;
+ case CP0_REG16__CONFIG3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
+ register_name = "Config3";
+ break;
+ case CP0_REG16__CONFIG4:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4));
+ register_name = "Config4";
+ break;
+ case CP0_REG16__CONFIG5:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5));
+ register_name = "Config5";
+ break;
+ /* 6,7 are implementation dependent */
+ case CP0_REG16__CONFIG6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
+ register_name = "Config6";
+ break;
+ case CP0_REG16__CONFIG7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7));
+ register_name = "Config7";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_17:
+ switch (sel) {
+ case CP0_REG17__LLADDR:
+ gen_helper_mfc0_lladdr(arg, cpu_env);
+ register_name = "LLAddr";
+ break;
+ case CP0_REG17__MAAR:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mfc0_maar(arg, cpu_env);
+ register_name = "MAAR";
+ break;
+ case CP0_REG17__MAARI:
+ CP0_CHECK(ctx->mrp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MAARI));
+ register_name = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_18:
+ switch (sel) {
+ case CP0_REG18__WATCHLO0:
+ case CP0_REG18__WATCHLO1:
+ case CP0_REG18__WATCHLO2:
+ case CP0_REG18__WATCHLO3:
+ case CP0_REG18__WATCHLO4:
+ case CP0_REG18__WATCHLO5:
+ case CP0_REG18__WATCHLO6:
+ case CP0_REG18__WATCHLO7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_1e0i(mfc0_watchlo, arg, sel);
+ register_name = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_19:
+ switch (sel) {
+ case CP0_REG19__WATCHHI0:
+ case CP0_REG19__WATCHHI1:
+ case CP0_REG19__WATCHHI2:
+ case CP0_REG19__WATCHHI3:
+ case CP0_REG19__WATCHHI4:
+ case CP0_REG19__WATCHHI5:
+ case CP0_REG19__WATCHHI6:
+ case CP0_REG19__WATCHHI7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_1e0i(mfc0_watchhi, arg, sel);
+ register_name = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_20:
+ switch (sel) {
+ case CP0_REG20__XCONTEXT:
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "XContext";
+ break;
+#endif
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask));
+ register_name = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_22:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ register_name = "'Diagnostic"; /* implementation dependent */
+ break;
+ case CP0_REGISTER_23:
+ switch (sel) {
+ case CP0_REG23__DEBUG:
+ gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
+ register_name = "Debug";
+ break;
+ case CP0_REG23__TRACECONTROL:
+ /* PDtrace support */
+ /* gen_helper_mfc0_tracecontrol(arg); */
+ register_name = "TraceControl";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACECONTROL2:
+ /* PDtrace support */
+ /* gen_helper_mfc0_tracecontrol2(arg); */
+ register_name = "TraceControl2";
+ goto cp0_unimplemented;
+ case CP0_REG23__USERTRACEDATA1:
+ /* PDtrace support */
+ /* gen_helper_mfc0_usertracedata1(arg);*/
+ register_name = "UserTraceData1";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEIBPC:
+ /* PDtrace support */
+ /* gen_helper_mfc0_traceibpc(arg); */
+ register_name = "TraceIBPC";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEDBPC:
+ /* PDtrace support */
+ /* gen_helper_mfc0_tracedbpc(arg); */
+ register_name = "TraceDBPC";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_24:
+ switch (sel) {
+ case CP0_REG24__DEPC:
+ /* EJTAG support */
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_25:
+ switch (sel) {
+ case CP0_REG25__PERFCTL0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0));
+ register_name = "Performance0";
+ break;
+ case CP0_REG25__PERFCNT0:
+ /* gen_helper_mfc0_performance1(arg); */
+ register_name = "Performance1";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL1:
+ /* gen_helper_mfc0_performance2(arg); */
+ register_name = "Performance2";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT1:
+ /* gen_helper_mfc0_performance3(arg); */
+ register_name = "Performance3";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL2:
+ /* gen_helper_mfc0_performance4(arg); */
+ register_name = "Performance4";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT2:
+ /* gen_helper_mfc0_performance5(arg); */
+ register_name = "Performance5";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL3:
+ /* gen_helper_mfc0_performance6(arg); */
+ register_name = "Performance6";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT3:
+ /* gen_helper_mfc0_performance7(arg); */
+ register_name = "Performance7";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_26:
+ switch (sel) {
+ case CP0_REG26__ERRCTL:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_ErrCtl));
+ register_name = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_27:
+ switch (sel) {
+ case CP0_REG27__CACHERR:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ register_name = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_28:
+ switch (sel) {
+ case CP0_REG28__TAGLO:
+ case CP0_REG28__TAGLO1:
+ case CP0_REG28__TAGLO2:
+ case CP0_REG28__TAGLO3:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo));
+ gen_move_low32(arg, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ register_name = "TagLo";
+ break;
+ case CP0_REG28__DATALO:
+ case CP0_REG28__DATALO1:
+ case CP0_REG28__DATALO2:
+ case CP0_REG28__DATALO3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo));
+ register_name = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_29:
+ switch (sel) {
+ case CP0_REG29__TAGHI:
+ case CP0_REG29__TAGHI1:
+ case CP0_REG29__TAGHI2:
+ case CP0_REG29__TAGHI3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi));
+ register_name = "TagHi";
+ break;
+ case CP0_REG29__DATAHI:
+ case CP0_REG29__DATAHI1:
+ case CP0_REG29__DATAHI2:
+ case CP0_REG29__DATAHI3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi));
+ register_name = "DataHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_30:
+ switch (sel) {
+ case CP0_REG30__ERROREPC:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_31:
+ switch (sel) {
+ case CP0_REG31__DESAVE:
+ /* EJTAG support */
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ register_name = "DESAVE";
+ break;
+ case CP0_REG31__KSCRATCH1:
+ case CP0_REG31__KSCRATCH2:
+ case CP0_REG31__KSCRATCH3:
+ case CP0_REG31__KSCRATCH4:
+ case CP0_REG31__KSCRATCH5:
+ case CP0_REG31__KSCRATCH6:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
+ tcg_gen_ext32s_tl(arg, arg);
+ register_name = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ trace_mips_translate_c0("mfc0", register_name, reg, sel);
+ return;
+
+cp0_unimplemented:
+ qemu_log_mask(LOG_UNIMP, "mfc0 %s (reg %d sel %d)\n",
+ register_name, reg, sel);
+ gen_mfc0_unimplemented(ctx, arg);
+}
+
+static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *register_name = "invalid";
+
+ if (sel != 0) {
+ check_insn(ctx, ISA_MIPS_R1);
+ }
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ switch (reg) {
+ case CP0_REGISTER_00:
+ switch (sel) {
+ case CP0_REG00__INDEX:
+ gen_helper_mtc0_index(cpu_env, arg);
+ register_name = "Index";
+ break;
+ case CP0_REG00__MVPCONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_mvpcontrol(cpu_env, arg);
+ register_name = "MVPControl";
+ break;
+ case CP0_REG00__MVPCONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ register_name = "MVPConf0";
+ break;
+ case CP0_REG00__MVPCONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ register_name = "MVPConf1";
+ break;
+ case CP0_REG00__VPCONTROL:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ register_name = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_01:
+ switch (sel) {
+ case CP0_REG01__RANDOM:
+ /* ignored */
+ register_name = "Random";
+ break;
+ case CP0_REG01__VPECONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpecontrol(cpu_env, arg);
+ register_name = "VPEControl";
+ break;
+ case CP0_REG01__VPECONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf0(cpu_env, arg);
+ register_name = "VPEConf0";
+ break;
+ case CP0_REG01__VPECONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf1(cpu_env, arg);
+ register_name = "VPEConf1";
+ break;
+ case CP0_REG01__YQMASK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_yqmask(cpu_env, arg);
+ register_name = "YQMask";
+ break;
+ case CP0_REG01__VPESCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPESchedule));
+ register_name = "VPESchedule";
+ break;
+ case CP0_REG01__VPESCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ register_name = "VPEScheFBack";
+ break;
+ case CP0_REG01__VPEOPT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeopt(cpu_env, arg);
+ register_name = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_02:
+ switch (sel) {
+ case CP0_REG02__ENTRYLO0:
+ gen_helper_mtc0_entrylo0(cpu_env, arg);
+ register_name = "EntryLo0";
+ break;
+ case CP0_REG02__TCSTATUS:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcstatus(cpu_env, arg);
+ register_name = "TCStatus";
+ break;
+ case CP0_REG02__TCBIND:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcbind(cpu_env, arg);
+ register_name = "TCBind";
+ break;
+ case CP0_REG02__TCRESTART:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcrestart(cpu_env, arg);
+ register_name = "TCRestart";
+ break;
+ case CP0_REG02__TCHALT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tchalt(cpu_env, arg);
+ register_name = "TCHalt";
+ break;
+ case CP0_REG02__TCCONTEXT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tccontext(cpu_env, arg);
+ register_name = "TCContext";
+ break;
+ case CP0_REG02__TCSCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschedule(cpu_env, arg);
+ register_name = "TCSchedule";
+ break;
+ case CP0_REG02__TCSCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschefback(cpu_env, arg);
+ register_name = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_03:
+ switch (sel) {
+ case CP0_REG03__ENTRYLO1:
+ gen_helper_mtc0_entrylo1(cpu_env, arg);
+ register_name = "EntryLo1";
+ break;
+ case CP0_REG03__GLOBALNUM:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ register_name = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_04:
+ switch (sel) {
+ case CP0_REG04__CONTEXT:
+ gen_helper_mtc0_context(cpu_env, arg);
+ register_name = "Context";
+ break;
+ case CP0_REG04__CONTEXTCONFIG:
+ /* SmartMIPS ASE */
+ /* gen_helper_mtc0_contextconfig(arg); */
+ register_name = "ContextConfig";
+ goto cp0_unimplemented;
+ case CP0_REG04__USERLOCAL:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ register_name = "UserLocal";
+ break;
+ case CP0_REG04__MMID:
+ CP0_CHECK(ctx->mi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MemoryMapID));
+ register_name = "MMID";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_05:
+ switch (sel) {
+ case CP0_REG05__PAGEMASK:
+ gen_helper_mtc0_pagemask(cpu_env, arg);
+ register_name = "PageMask";
+ break;
+ case CP0_REG05__PAGEGRAIN:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_pagegrain(cpu_env, arg);
+ register_name = "PageGrain";
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG05__SEGCTL0:
+ CP0_CHECK(ctx->sc);
+ gen_helper_mtc0_segctl0(cpu_env, arg);
+ register_name = "SegCtl0";
+ break;
+ case CP0_REG05__SEGCTL1:
+ CP0_CHECK(ctx->sc);
+ gen_helper_mtc0_segctl1(cpu_env, arg);
+ register_name = "SegCtl1";
+ break;
+ case CP0_REG05__SEGCTL2:
+ CP0_CHECK(ctx->sc);
+ gen_helper_mtc0_segctl2(cpu_env, arg);
+ register_name = "SegCtl2";
+ break;
+ case CP0_REG05__PWBASE:
+ check_pw(ctx);
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_PWBase));
+ register_name = "PWBase";
+ break;
+ case CP0_REG05__PWFIELD:
+ check_pw(ctx);
+ gen_helper_mtc0_pwfield(cpu_env, arg);
+ register_name = "PWField";
+ break;
+ case CP0_REG05__PWSIZE:
+ check_pw(ctx);
+ gen_helper_mtc0_pwsize(cpu_env, arg);
+ register_name = "PWSize";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_06:
+ switch (sel) {
+ case CP0_REG06__WIRED:
+ gen_helper_mtc0_wired(cpu_env, arg);
+ register_name = "Wired";
+ break;
+ case CP0_REG06__SRSCONF0:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf0(cpu_env, arg);
+ register_name = "SRSConf0";
+ break;
+ case CP0_REG06__SRSCONF1:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf1(cpu_env, arg);
+ register_name = "SRSConf1";
+ break;
+ case CP0_REG06__SRSCONF2:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf2(cpu_env, arg);
+ register_name = "SRSConf2";
+ break;
+ case CP0_REG06__SRSCONF3:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf3(cpu_env, arg);
+ register_name = "SRSConf3";
+ break;
+ case CP0_REG06__SRSCONF4:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf4(cpu_env, arg);
+ register_name = "SRSConf4";
+ break;
+ case CP0_REG06__PWCTL:
+ check_pw(ctx);
+ gen_helper_mtc0_pwctl(cpu_env, arg);
+ register_name = "PWCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_07:
+ switch (sel) {
+ case CP0_REG07__HWRENA:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_hwrena(cpu_env, arg);
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_08:
+ switch (sel) {
+ case CP0_REG08__BADVADDR:
+ /* ignored */
+ register_name = "BadVAddr";
+ break;
+ case CP0_REG08__BADINSTR:
+ /* ignored */
+ register_name = "BadInstr";
+ break;
+ case CP0_REG08__BADINSTRP:
+ /* ignored */
+ register_name = "BadInstrP";
+ break;
+ case CP0_REG08__BADINSTRX:
+ /* ignored */
+ register_name = "BadInstrX";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_09:
+ switch (sel) {
+ case CP0_REG09__COUNT:
+ gen_helper_mtc0_count(cpu_env, arg);
+ register_name = "Count";
+ break;
+ case CP0_REG09__SAARI:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mtc0_saari(cpu_env, arg);
+ register_name = "SAARI";
+ break;
+ case CP0_REG09__SAAR:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mtc0_saar(cpu_env, arg);
+ register_name = "SAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_10:
+ switch (sel) {
+ case CP0_REG10__ENTRYHI:
+ gen_helper_mtc0_entryhi(cpu_env, arg);
+ register_name = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_11:
+ switch (sel) {
+ case CP0_REG11__COMPARE:
+ gen_helper_mtc0_compare(cpu_env, arg);
+ register_name = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_12:
+ switch (sel) {
+ case CP0_REG12__STATUS:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_status(cpu_env, arg);
+ /* DISAS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Status";
+ break;
+ case CP0_REG12__INTCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_intctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "IntCtl";
+ break;
+ case CP0_REG12__SRSCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "SRSCtl";
+ break;
+ case CP0_REG12__SRSMAP:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_13:
+ switch (sel) {
+ case CP0_REG13__CAUSE:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_cause(cpu_env, arg);
+ /*
+ * Stop translation as we may have triggered an interrupt.
+ * DISAS_STOP isn't sufficient, we need to ensure we break out of
+ * translated code to check for pending interrupts.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_14:
+ switch (sel) {
+ case CP0_REG14__EPC:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ register_name = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_15:
+ switch (sel) {
+ case CP0_REG15__PRID:
+ /* ignored */
+ register_name = "PRid";
+ break;
+ case CP0_REG15__EBASE:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_ebase(cpu_env, arg);
+ register_name = "EBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_16:
+ switch (sel) {
+ case CP0_REG16__CONFIG:
+ gen_helper_mtc0_config0(cpu_env, arg);
+ register_name = "Config";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG1:
+ /* ignored, read only */
+ register_name = "Config1";
+ break;
+ case CP0_REG16__CONFIG2:
+ gen_helper_mtc0_config2(cpu_env, arg);
+ register_name = "Config2";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG3:
+ gen_helper_mtc0_config3(cpu_env, arg);
+ register_name = "Config3";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG4:
+ gen_helper_mtc0_config4(cpu_env, arg);
+ register_name = "Config4";
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG5:
+ gen_helper_mtc0_config5(cpu_env, arg);
+ register_name = "Config5";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ /* 6,7 are implementation dependent */
+ case CP0_REG16__CONFIG6:
+ /* ignored */
+ register_name = "Config6";
+ break;
+ case CP0_REG16__CONFIG7:
+ /* ignored */
+ register_name = "Config7";
+ break;
+ default:
+ register_name = "Invalid config selector";
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_17:
+ switch (sel) {
+ case CP0_REG17__LLADDR:
+ gen_helper_mtc0_lladdr(cpu_env, arg);
+ register_name = "LLAddr";
+ break;
+ case CP0_REG17__MAAR:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maar(cpu_env, arg);
+ register_name = "MAAR";
+ break;
+ case CP0_REG17__MAARI:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maari(cpu_env, arg);
+ register_name = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_18:
+ switch (sel) {
+ case CP0_REG18__WATCHLO0:
+ case CP0_REG18__WATCHLO1:
+ case CP0_REG18__WATCHLO2:
+ case CP0_REG18__WATCHLO3:
+ case CP0_REG18__WATCHLO4:
+ case CP0_REG18__WATCHLO5:
+ case CP0_REG18__WATCHLO6:
+ case CP0_REG18__WATCHLO7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_0e1i(mtc0_watchlo, arg, sel);
+ register_name = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_19:
+ switch (sel) {
+ case CP0_REG19__WATCHHI0:
+ case CP0_REG19__WATCHHI1:
+ case CP0_REG19__WATCHHI2:
+ case CP0_REG19__WATCHHI3:
+ case CP0_REG19__WATCHHI4:
+ case CP0_REG19__WATCHHI5:
+ case CP0_REG19__WATCHHI6:
+ case CP0_REG19__WATCHHI7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_0e1i(mtc0_watchhi, arg, sel);
+ register_name = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_20:
+ switch (sel) {
+ case CP0_REG20__XCONTEXT:
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ gen_helper_mtc0_xcontext(cpu_env, arg);
+ register_name = "XContext";
+ break;
+#endif
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_framemask(cpu_env, arg);
+ register_name = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_22:
+ /* ignored */
+ register_name = "Diagnostic"; /* implementation dependent */
+ break;
+ case CP0_REGISTER_23:
+ switch (sel) {
+ case CP0_REG23__DEBUG:
+ gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
+ /* DISAS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Debug";
+ break;
+ case CP0_REG23__TRACECONTROL:
+ /* PDtrace support */
+ /* gen_helper_mtc0_tracecontrol(cpu_env, arg); */
+ register_name = "TraceControl";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACECONTROL2:
+ /* PDtrace support */
+ /* gen_helper_mtc0_tracecontrol2(cpu_env, arg); */
+ register_name = "TraceControl2";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ goto cp0_unimplemented;
+ case CP0_REG23__USERTRACEDATA1:
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ /* PDtrace support */
+ /* gen_helper_mtc0_usertracedata1(cpu_env, arg);*/
+ register_name = "UserTraceData";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEIBPC:
+ /* PDtrace support */
+ /* gen_helper_mtc0_traceibpc(cpu_env, arg); */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "TraceIBPC";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEDBPC:
+ /* PDtrace support */
+ /* gen_helper_mtc0_tracedbpc(cpu_env, arg); */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "TraceDBPC";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_24:
+ switch (sel) {
+ case CP0_REG24__DEPC:
+ /* EJTAG support */
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ register_name = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_25:
+ switch (sel) {
+ case CP0_REG25__PERFCTL0:
+ gen_helper_mtc0_performance0(cpu_env, arg);
+ register_name = "Performance0";
+ break;
+ case CP0_REG25__PERFCNT0:
+ /* gen_helper_mtc0_performance1(arg); */
+ register_name = "Performance1";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL1:
+ /* gen_helper_mtc0_performance2(arg); */
+ register_name = "Performance2";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT1:
+ /* gen_helper_mtc0_performance3(arg); */
+ register_name = "Performance3";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL2:
+ /* gen_helper_mtc0_performance4(arg); */
+ register_name = "Performance4";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT2:
+ /* gen_helper_mtc0_performance5(arg); */
+ register_name = "Performance5";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL3:
+ /* gen_helper_mtc0_performance6(arg); */
+ register_name = "Performance6";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT3:
+ /* gen_helper_mtc0_performance7(arg); */
+ register_name = "Performance7";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_26:
+ switch (sel) {
+ case CP0_REG26__ERRCTL:
+ gen_helper_mtc0_errctl(cpu_env, arg);
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_27:
+ switch (sel) {
+ case CP0_REG27__CACHERR:
+ /* ignored */
+ register_name = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_28:
+ switch (sel) {
+ case CP0_REG28__TAGLO:
+ case CP0_REG28__TAGLO1:
+ case CP0_REG28__TAGLO2:
+ case CP0_REG28__TAGLO3:
+ gen_helper_mtc0_taglo(cpu_env, arg);
+ register_name = "TagLo";
+ break;
+ case CP0_REG28__DATALO:
+ case CP0_REG28__DATALO1:
+ case CP0_REG28__DATALO2:
+ case CP0_REG28__DATALO3:
+ gen_helper_mtc0_datalo(cpu_env, arg);
+ register_name = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_29:
+ switch (sel) {
+ case CP0_REG29__TAGHI:
+ case CP0_REG29__TAGHI1:
+ case CP0_REG29__TAGHI2:
+ case CP0_REG29__TAGHI3:
+ gen_helper_mtc0_taghi(cpu_env, arg);
+ register_name = "TagHi";
+ break;
+ case CP0_REG29__DATAHI:
+ case CP0_REG29__DATAHI1:
+ case CP0_REG29__DATAHI2:
+ case CP0_REG29__DATAHI3:
+ gen_helper_mtc0_datahi(cpu_env, arg);
+ register_name = "DataHi";
+ break;
+ default:
+ register_name = "invalid sel";
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_30:
+ switch (sel) {
+ case CP0_REG30__ERROREPC:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ register_name = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_31:
+ switch (sel) {
+ case CP0_REG31__DESAVE:
+ /* EJTAG support */
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ register_name = "DESAVE";
+ break;
+ case CP0_REG31__KSCRATCH1:
+ case CP0_REG31__KSCRATCH2:
+ case CP0_REG31__KSCRATCH3:
+ case CP0_REG31__KSCRATCH4:
+ case CP0_REG31__KSCRATCH5:
+ case CP0_REG31__KSCRATCH6:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
+ register_name = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ trace_mips_translate_c0("mtc0", register_name, reg, sel);
+
+ /* For simplicity assume that all writes can cause interrupts. */
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ /*
+ * DISAS_STOP isn't sufficient, we need to ensure we break out of
+ * translated code to check for pending interrupts.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ }
+ return;
+
+cp0_unimplemented:
+ qemu_log_mask(LOG_UNIMP, "mtc0 %s (reg %d sel %d)\n",
+ register_name, reg, sel);
+}
+
+#if defined(TARGET_MIPS64)
+static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *register_name = "invalid";
+
+ if (sel != 0) {
+ check_insn(ctx, ISA_MIPS_R1);
+ }
+
+ switch (reg) {
+ case CP0_REGISTER_00:
+ switch (sel) {
+ case CP0_REG00__INDEX:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index));
+ register_name = "Index";
+ break;
+ case CP0_REG00__MVPCONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpcontrol(arg, cpu_env);
+ register_name = "MVPControl";
+ break;
+ case CP0_REG00__MVPCONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf0(arg, cpu_env);
+ register_name = "MVPConf0";
+ break;
+ case CP0_REG00__MVPCONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf1(arg, cpu_env);
+ register_name = "MVPConf1";
+ break;
+ case CP0_REG00__VPCONTROL:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
+ register_name = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_01:
+ switch (sel) {
+ case CP0_REG01__RANDOM:
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
+ gen_helper_mfc0_random(arg, cpu_env);
+ register_name = "Random";
+ break;
+ case CP0_REG01__VPECONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
+ register_name = "VPEControl";
+ break;
+ case CP0_REG01__VPECONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
+ register_name = "VPEConf0";
+ break;
+ case CP0_REG01__VPECONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
+ register_name = "VPEConf1";
+ break;
+ case CP0_REG01__YQMASK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_YQMask));
+ register_name = "YQMask";
+ break;
+ case CP0_REG01__VPESCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPESchedule));
+ register_name = "VPESchedule";
+ break;
+ case CP0_REG01__VPESCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ register_name = "VPEScheFBack";
+ break;
+ case CP0_REG01__VPEOPT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
+ register_name = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_02:
+ switch (sel) {
+ case CP0_REG02__ENTRYLO0:
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_EntryLo0));
+ register_name = "EntryLo0";
+ break;
+ case CP0_REG02__TCSTATUS:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcstatus(arg, cpu_env);
+ register_name = "TCStatus";
+ break;
+ case CP0_REG02__TCBIND:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcbind(arg, cpu_env);
+ register_name = "TCBind";
+ break;
+ case CP0_REG02__TCRESTART:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tcrestart(arg, cpu_env);
+ register_name = "TCRestart";
+ break;
+ case CP0_REG02__TCHALT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tchalt(arg, cpu_env);
+ register_name = "TCHalt";
+ break;
+ case CP0_REG02__TCCONTEXT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tccontext(arg, cpu_env);
+ register_name = "TCContext";
+ break;
+ case CP0_REG02__TCSCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tcschedule(arg, cpu_env);
+ register_name = "TCSchedule";
+ break;
+ case CP0_REG02__TCSCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tcschefback(arg, cpu_env);
+ register_name = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_03:
+ switch (sel) {
+ case CP0_REG03__ENTRYLO1:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
+ register_name = "EntryLo1";
+ break;
+ case CP0_REG03__GLOBALNUM:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
+ register_name = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_04:
+ switch (sel) {
+ case CP0_REG04__CONTEXT:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
+ register_name = "Context";
+ break;
+ case CP0_REG04__CONTEXTCONFIG:
+ /* SmartMIPS ASE */
+ /* gen_helper_dmfc0_contextconfig(arg); */
+ register_name = "ContextConfig";
+ goto cp0_unimplemented;
+ case CP0_REG04__USERLOCAL:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ register_name = "UserLocal";
+ break;
+ case CP0_REG04__MMID:
+ CP0_CHECK(ctx->mi);
+ gen_helper_mtc0_memorymapid(cpu_env, arg);
+ register_name = "MMID";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_05:
+ switch (sel) {
+ case CP0_REG05__PAGEMASK:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask));
+ register_name = "PageMask";
+ break;
+ case CP0_REG05__PAGEGRAIN:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
+ register_name = "PageGrain";
+ break;
+ case CP0_REG05__SEGCTL0:
+ CP0_CHECK(ctx->sc);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl0));
+ register_name = "SegCtl0";
+ break;
+ case CP0_REG05__SEGCTL1:
+ CP0_CHECK(ctx->sc);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl1));
+ register_name = "SegCtl1";
+ break;
+ case CP0_REG05__SEGCTL2:
+ CP0_CHECK(ctx->sc);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2));
+ register_name = "SegCtl2";
+ break;
+ case CP0_REG05__PWBASE:
+ check_pw(ctx);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWBase));
+ register_name = "PWBase";
+ break;
+ case CP0_REG05__PWFIELD:
+ check_pw(ctx);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWField));
+ register_name = "PWField";
+ break;
+ case CP0_REG05__PWSIZE:
+ check_pw(ctx);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWSize));
+ register_name = "PWSize";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_06:
+ switch (sel) {
+ case CP0_REG06__WIRED:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired));
+ register_name = "Wired";
+ break;
+ case CP0_REG06__SRSCONF0:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0));
+ register_name = "SRSConf0";
+ break;
+ case CP0_REG06__SRSCONF1:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1));
+ register_name = "SRSConf1";
+ break;
+ case CP0_REG06__SRSCONF2:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2));
+ register_name = "SRSConf2";
+ break;
+ case CP0_REG06__SRSCONF3:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3));
+ register_name = "SRSConf3";
+ break;
+ case CP0_REG06__SRSCONF4:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4));
+ register_name = "SRSConf4";
+ break;
+ case CP0_REG06__PWCTL:
+ check_pw(ctx);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWCtl));
+ register_name = "PWCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_07:
+ switch (sel) {
+ case CP0_REG07__HWRENA:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna));
+ register_name = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_08:
+ switch (sel) {
+ case CP0_REG08__BADVADDR:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ register_name = "BadVAddr";
+ break;
+ case CP0_REG08__BADINSTR:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr));
+ register_name = "BadInstr";
+ break;
+ case CP0_REG08__BADINSTRP:
+ CP0_CHECK(ctx->bp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
+ register_name = "BadInstrP";
+ break;
+ case CP0_REG08__BADINSTRX:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrX));
+ tcg_gen_andi_tl(arg, arg, ~0xffff);
+ register_name = "BadInstrX";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_09:
+ switch (sel) {
+ case CP0_REG09__COUNT:
+ /* Mark as an IO operation because we read the time. */
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_mfc0_count(arg, cpu_env);
+ /*
+ * Break the TB to be able to take timer interrupts immediately
+ * after reading count. DISAS_STOP isn't sufficient, we need to
+ * ensure we break completely out of translated code.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Count";
+ break;
+ case CP0_REG09__SAARI:
+ CP0_CHECK(ctx->saar);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SAARI));
+ register_name = "SAARI";
+ break;
+ case CP0_REG09__SAAR:
+ CP0_CHECK(ctx->saar);
+ gen_helper_dmfc0_saar(arg, cpu_env);
+ register_name = "SAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_10:
+ switch (sel) {
+ case CP0_REG10__ENTRYHI:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
+ register_name = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_11:
+ switch (sel) {
+ case CP0_REG11__COMPARE:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare));
+ register_name = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_12:
+ switch (sel) {
+ case CP0_REG12__STATUS:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status));
+ register_name = "Status";
+ break;
+ case CP0_REG12__INTCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl));
+ register_name = "IntCtl";
+ break;
+ case CP0_REG12__SRSCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl));
+ register_name = "SRSCtl";
+ break;
+ case CP0_REG12__SRSMAP:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ register_name = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_13:
+ switch (sel) {
+ case CP0_REG13__CAUSE:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause));
+ register_name = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_14:
+ switch (sel) {
+ case CP0_REG14__EPC:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ register_name = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_15:
+ switch (sel) {
+ case CP0_REG15__PRID:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid));
+ register_name = "PRid";
+ break;
+ case CP0_REG15__EBASE:
+ check_insn(ctx, ISA_MIPS_R2);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EBase));
+ register_name = "EBase";
+ break;
+ case CP0_REG15__CMGCRBASE:
+ check_insn(ctx, ISA_MIPS_R2);
+ CP0_CHECK(ctx->cmgcr);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
+ register_name = "CMGCRBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_16:
+ switch (sel) {
+ case CP0_REG16__CONFIG:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0));
+ register_name = "Config";
+ break;
+ case CP0_REG16__CONFIG1:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1));
+ register_name = "Config1";
+ break;
+ case CP0_REG16__CONFIG2:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2));
+ register_name = "Config2";
+ break;
+ case CP0_REG16__CONFIG3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
+ register_name = "Config3";
+ break;
+ case CP0_REG16__CONFIG4:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4));
+ register_name = "Config4";
+ break;
+ case CP0_REG16__CONFIG5:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5));
+ register_name = "Config5";
+ break;
+ /* 6,7 are implementation dependent */
+ case CP0_REG16__CONFIG6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
+ register_name = "Config6";
+ break;
+ case CP0_REG16__CONFIG7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7));
+ register_name = "Config7";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_17:
+ switch (sel) {
+ case CP0_REG17__LLADDR:
+ gen_helper_dmfc0_lladdr(arg, cpu_env);
+ register_name = "LLAddr";
+ break;
+ case CP0_REG17__MAAR:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_dmfc0_maar(arg, cpu_env);
+ register_name = "MAAR";
+ break;
+ case CP0_REG17__MAARI:
+ CP0_CHECK(ctx->mrp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MAARI));
+ register_name = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_18:
+ switch (sel) {
+ case CP0_REG18__WATCHLO0:
+ case CP0_REG18__WATCHLO1:
+ case CP0_REG18__WATCHLO2:
+ case CP0_REG18__WATCHLO3:
+ case CP0_REG18__WATCHLO4:
+ case CP0_REG18__WATCHLO5:
+ case CP0_REG18__WATCHLO6:
+ case CP0_REG18__WATCHLO7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_1e0i(dmfc0_watchlo, arg, sel);
+ register_name = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_19:
+ switch (sel) {
+ case CP0_REG19__WATCHHI0:
+ case CP0_REG19__WATCHHI1:
+ case CP0_REG19__WATCHHI2:
+ case CP0_REG19__WATCHHI3:
+ case CP0_REG19__WATCHHI4:
+ case CP0_REG19__WATCHHI5:
+ case CP0_REG19__WATCHHI6:
+ case CP0_REG19__WATCHHI7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_1e0i(dmfc0_watchhi, arg, sel);
+ register_name = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_20:
+ switch (sel) {
+ case CP0_REG20__XCONTEXT:
+ check_insn(ctx, ISA_MIPS3);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
+ register_name = "XContext";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask));
+ register_name = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_22:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ register_name = "'Diagnostic"; /* implementation dependent */
+ break;
+ case CP0_REGISTER_23:
+ switch (sel) {
+ case CP0_REG23__DEBUG:
+ gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
+ register_name = "Debug";
+ break;
+ case CP0_REG23__TRACECONTROL:
+ /* PDtrace support */
+ /* gen_helper_dmfc0_tracecontrol(arg, cpu_env); */
+ register_name = "TraceControl";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACECONTROL2:
+ /* PDtrace support */
+ /* gen_helper_dmfc0_tracecontrol2(arg, cpu_env); */
+ register_name = "TraceControl2";
+ goto cp0_unimplemented;
+ case CP0_REG23__USERTRACEDATA1:
+ /* PDtrace support */
+ /* gen_helper_dmfc0_usertracedata1(arg, cpu_env);*/
+ register_name = "UserTraceData1";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEIBPC:
+ /* PDtrace support */
+ /* gen_helper_dmfc0_traceibpc(arg, cpu_env); */
+ register_name = "TraceIBPC";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEDBPC:
+ /* PDtrace support */
+ /* gen_helper_dmfc0_tracedbpc(arg, cpu_env); */
+ register_name = "TraceDBPC";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_24:
+ switch (sel) {
+ case CP0_REG24__DEPC:
+ /* EJTAG support */
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ register_name = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_25:
+ switch (sel) {
+ case CP0_REG25__PERFCTL0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0));
+ register_name = "Performance0";
+ break;
+ case CP0_REG25__PERFCNT0:
+ /* gen_helper_dmfc0_performance1(arg); */
+ register_name = "Performance1";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL1:
+ /* gen_helper_dmfc0_performance2(arg); */
+ register_name = "Performance2";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT1:
+ /* gen_helper_dmfc0_performance3(arg); */
+ register_name = "Performance3";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL2:
+ /* gen_helper_dmfc0_performance4(arg); */
+ register_name = "Performance4";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT2:
+ /* gen_helper_dmfc0_performance5(arg); */
+ register_name = "Performance5";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL3:
+ /* gen_helper_dmfc0_performance6(arg); */
+ register_name = "Performance6";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT3:
+ /* gen_helper_dmfc0_performance7(arg); */
+ register_name = "Performance7";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_26:
+ switch (sel) {
+ case CP0_REG26__ERRCTL:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_ErrCtl));
+ register_name = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_27:
+ switch (sel) {
+ /* ignored */
+ case CP0_REG27__CACHERR:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ register_name = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_28:
+ switch (sel) {
+ case CP0_REG28__TAGLO:
+ case CP0_REG28__TAGLO1:
+ case CP0_REG28__TAGLO2:
+ case CP0_REG28__TAGLO3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagLo));
+ register_name = "TagLo";
+ break;
+ case CP0_REG28__DATALO:
+ case CP0_REG28__DATALO1:
+ case CP0_REG28__DATALO2:
+ case CP0_REG28__DATALO3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo));
+ register_name = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_29:
+ switch (sel) {
+ case CP0_REG29__TAGHI:
+ case CP0_REG29__TAGHI1:
+ case CP0_REG29__TAGHI2:
+ case CP0_REG29__TAGHI3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi));
+ register_name = "TagHi";
+ break;
+ case CP0_REG29__DATAHI:
+ case CP0_REG29__DATAHI1:
+ case CP0_REG29__DATAHI2:
+ case CP0_REG29__DATAHI3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi));
+ register_name = "DataHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_30:
+ switch (sel) {
+ case CP0_REG30__ERROREPC:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ register_name = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_31:
+ switch (sel) {
+ case CP0_REG31__DESAVE:
+ /* EJTAG support */
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ register_name = "DESAVE";
+ break;
+ case CP0_REG31__KSCRATCH1:
+ case CP0_REG31__KSCRATCH2:
+ case CP0_REG31__KSCRATCH3:
+ case CP0_REG31__KSCRATCH4:
+ case CP0_REG31__KSCRATCH5:
+ case CP0_REG31__KSCRATCH6:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
+ register_name = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ trace_mips_translate_c0("dmfc0", register_name, reg, sel);
+ return;
+
+cp0_unimplemented:
+ qemu_log_mask(LOG_UNIMP, "dmfc0 %s (reg %d sel %d)\n",
+ register_name, reg, sel);
+ gen_mfc0_unimplemented(ctx, arg);
+}
+
+static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *register_name = "invalid";
+
+ if (sel != 0) {
+ check_insn(ctx, ISA_MIPS_R1);
+ }
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ switch (reg) {
+ case CP0_REGISTER_00:
+ switch (sel) {
+ case CP0_REG00__INDEX:
+ gen_helper_mtc0_index(cpu_env, arg);
+ register_name = "Index";
+ break;
+ case CP0_REG00__MVPCONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_mvpcontrol(cpu_env, arg);
+ register_name = "MVPControl";
+ break;
+ case CP0_REG00__MVPCONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ register_name = "MVPConf0";
+ break;
+ case CP0_REG00__MVPCONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ register_name = "MVPConf1";
+ break;
+ case CP0_REG00__VPCONTROL:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ register_name = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_01:
+ switch (sel) {
+ case CP0_REG01__RANDOM:
+ /* ignored */
+ register_name = "Random";
+ break;
+ case CP0_REG01__VPECONTROL:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpecontrol(cpu_env, arg);
+ register_name = "VPEControl";
+ break;
+ case CP0_REG01__VPECONF0:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf0(cpu_env, arg);
+ register_name = "VPEConf0";
+ break;
+ case CP0_REG01__VPECONF1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf1(cpu_env, arg);
+ register_name = "VPEConf1";
+ break;
+ case CP0_REG01__YQMASK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_yqmask(cpu_env, arg);
+ register_name = "YQMask";
+ break;
+ case CP0_REG01__VPESCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPESchedule));
+ register_name = "VPESchedule";
+ break;
+ case CP0_REG01__VPESCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ register_name = "VPEScheFBack";
+ break;
+ case CP0_REG01__VPEOPT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeopt(cpu_env, arg);
+ register_name = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_02:
+ switch (sel) {
+ case CP0_REG02__ENTRYLO0:
+ gen_helper_dmtc0_entrylo0(cpu_env, arg);
+ register_name = "EntryLo0";
+ break;
+ case CP0_REG02__TCSTATUS:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcstatus(cpu_env, arg);
+ register_name = "TCStatus";
+ break;
+ case CP0_REG02__TCBIND:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcbind(cpu_env, arg);
+ register_name = "TCBind";
+ break;
+ case CP0_REG02__TCRESTART:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcrestart(cpu_env, arg);
+ register_name = "TCRestart";
+ break;
+ case CP0_REG02__TCHALT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tchalt(cpu_env, arg);
+ register_name = "TCHalt";
+ break;
+ case CP0_REG02__TCCONTEXT:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tccontext(cpu_env, arg);
+ register_name = "TCContext";
+ break;
+ case CP0_REG02__TCSCHEDULE:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschedule(cpu_env, arg);
+ register_name = "TCSchedule";
+ break;
+ case CP0_REG02__TCSCHEFBACK:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschefback(cpu_env, arg);
+ register_name = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_03:
+ switch (sel) {
+ case CP0_REG03__ENTRYLO1:
+ gen_helper_dmtc0_entrylo1(cpu_env, arg);
+ register_name = "EntryLo1";
+ break;
+ case CP0_REG03__GLOBALNUM:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ register_name = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_04:
+ switch (sel) {
+ case CP0_REG04__CONTEXT:
+ gen_helper_mtc0_context(cpu_env, arg);
+ register_name = "Context";
+ break;
+ case CP0_REG04__CONTEXTCONFIG:
+ /* SmartMIPS ASE */
+ /* gen_helper_dmtc0_contextconfig(arg); */
+ register_name = "ContextConfig";
+ goto cp0_unimplemented;
+ case CP0_REG04__USERLOCAL:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ register_name = "UserLocal";
+ break;
+ case CP0_REG04__MMID:
+ CP0_CHECK(ctx->mi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MemoryMapID));
+ register_name = "MMID";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_05:
+ switch (sel) {
+ case CP0_REG05__PAGEMASK:
+ gen_helper_mtc0_pagemask(cpu_env, arg);
+ register_name = "PageMask";
+ break;
+ case CP0_REG05__PAGEGRAIN:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_pagegrain(cpu_env, arg);
+ register_name = "PageGrain";
+ break;
+ case CP0_REG05__SEGCTL0:
+ CP0_CHECK(ctx->sc);
+ gen_helper_mtc0_segctl0(cpu_env, arg);
+ register_name = "SegCtl0";
+ break;
+ case CP0_REG05__SEGCTL1:
+ CP0_CHECK(ctx->sc);
+ gen_helper_mtc0_segctl1(cpu_env, arg);
+ register_name = "SegCtl1";
+ break;
+ case CP0_REG05__SEGCTL2:
+ CP0_CHECK(ctx->sc);
+ gen_helper_mtc0_segctl2(cpu_env, arg);
+ register_name = "SegCtl2";
+ break;
+ case CP0_REG05__PWBASE:
+ check_pw(ctx);
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWBase));
+ register_name = "PWBase";
+ break;
+ case CP0_REG05__PWFIELD:
+ check_pw(ctx);
+ gen_helper_mtc0_pwfield(cpu_env, arg);
+ register_name = "PWField";
+ break;
+ case CP0_REG05__PWSIZE:
+ check_pw(ctx);
+ gen_helper_mtc0_pwsize(cpu_env, arg);
+ register_name = "PWSize";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_06:
+ switch (sel) {
+ case CP0_REG06__WIRED:
+ gen_helper_mtc0_wired(cpu_env, arg);
+ register_name = "Wired";
+ break;
+ case CP0_REG06__SRSCONF0:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf0(cpu_env, arg);
+ register_name = "SRSConf0";
+ break;
+ case CP0_REG06__SRSCONF1:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf1(cpu_env, arg);
+ register_name = "SRSConf1";
+ break;
+ case CP0_REG06__SRSCONF2:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf2(cpu_env, arg);
+ register_name = "SRSConf2";
+ break;
+ case CP0_REG06__SRSCONF3:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf3(cpu_env, arg);
+ register_name = "SRSConf3";
+ break;
+ case CP0_REG06__SRSCONF4:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsconf4(cpu_env, arg);
+ register_name = "SRSConf4";
+ break;
+ case CP0_REG06__PWCTL:
+ check_pw(ctx);
+ gen_helper_mtc0_pwctl(cpu_env, arg);
+ register_name = "PWCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_07:
+ switch (sel) {
+ case CP0_REG07__HWRENA:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_hwrena(cpu_env, arg);
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_08:
+ switch (sel) {
+ case CP0_REG08__BADVADDR:
+ /* ignored */
+ register_name = "BadVAddr";
+ break;
+ case CP0_REG08__BADINSTR:
+ /* ignored */
+ register_name = "BadInstr";
+ break;
+ case CP0_REG08__BADINSTRP:
+ /* ignored */
+ register_name = "BadInstrP";
+ break;
+ case CP0_REG08__BADINSTRX:
+ /* ignored */
+ register_name = "BadInstrX";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_09:
+ switch (sel) {
+ case CP0_REG09__COUNT:
+ gen_helper_mtc0_count(cpu_env, arg);
+ register_name = "Count";
+ break;
+ case CP0_REG09__SAARI:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mtc0_saari(cpu_env, arg);
+ register_name = "SAARI";
+ break;
+ case CP0_REG09__SAAR:
+ CP0_CHECK(ctx->saar);
+ gen_helper_mtc0_saar(cpu_env, arg);
+ register_name = "SAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REGISTER_10:
+ switch (sel) {
+ case CP0_REG10__ENTRYHI:
+ gen_helper_mtc0_entryhi(cpu_env, arg);
+ register_name = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_11:
+ switch (sel) {
+ case CP0_REG11__COMPARE:
+ gen_helper_mtc0_compare(cpu_env, arg);
+ register_name = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REGISTER_12:
+ switch (sel) {
+ case CP0_REG12__STATUS:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_status(cpu_env, arg);
+ /* DISAS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Status";
+ break;
+ case CP0_REG12__INTCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_intctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "IntCtl";
+ break;
+ case CP0_REG12__SRSCTL:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_srsctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "SRSCtl";
+ break;
+ case CP0_REG12__SRSMAP:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_13:
+ switch (sel) {
+ case CP0_REG13__CAUSE:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_cause(cpu_env, arg);
+ /*
+ * Stop translation as we may have triggered an interrupt.
+ * DISAS_STOP isn't sufficient, we need to ensure we break out of
+ * translated code to check for pending interrupts.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_14:
+ switch (sel) {
+ case CP0_REG14__EPC:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ register_name = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_15:
+ switch (sel) {
+ case CP0_REG15__PRID:
+ /* ignored */
+ register_name = "PRid";
+ break;
+ case CP0_REG15__EBASE:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_helper_mtc0_ebase(cpu_env, arg);
+ register_name = "EBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_16:
+ switch (sel) {
+ case CP0_REG16__CONFIG:
+ gen_helper_mtc0_config0(cpu_env, arg);
+ register_name = "Config";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG1:
+ /* ignored, read only */
+ register_name = "Config1";
+ break;
+ case CP0_REG16__CONFIG2:
+ gen_helper_mtc0_config2(cpu_env, arg);
+ register_name = "Config2";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG3:
+ gen_helper_mtc0_config3(cpu_env, arg);
+ register_name = "Config3";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case CP0_REG16__CONFIG4:
+ /* currently ignored */
+ register_name = "Config4";
+ break;
+ case CP0_REG16__CONFIG5:
+ gen_helper_mtc0_config5(cpu_env, arg);
+ register_name = "Config5";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ register_name = "Invalid config selector";
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_17:
+ switch (sel) {
+ case CP0_REG17__LLADDR:
+ gen_helper_mtc0_lladdr(cpu_env, arg);
+ register_name = "LLAddr";
+ break;
+ case CP0_REG17__MAAR:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maar(cpu_env, arg);
+ register_name = "MAAR";
+ break;
+ case CP0_REG17__MAARI:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maari(cpu_env, arg);
+ register_name = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_18:
+ switch (sel) {
+ case CP0_REG18__WATCHLO0:
+ case CP0_REG18__WATCHLO1:
+ case CP0_REG18__WATCHLO2:
+ case CP0_REG18__WATCHLO3:
+ case CP0_REG18__WATCHLO4:
+ case CP0_REG18__WATCHLO5:
+ case CP0_REG18__WATCHLO6:
+ case CP0_REG18__WATCHLO7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_0e1i(mtc0_watchlo, arg, sel);
+ register_name = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_19:
+ switch (sel) {
+ case CP0_REG19__WATCHHI0:
+ case CP0_REG19__WATCHHI1:
+ case CP0_REG19__WATCHHI2:
+ case CP0_REG19__WATCHHI3:
+ case CP0_REG19__WATCHHI4:
+ case CP0_REG19__WATCHHI5:
+ case CP0_REG19__WATCHHI6:
+ case CP0_REG19__WATCHHI7:
+ CP0_CHECK(ctx->CP0_Config1 & (1 << CP0C1_WR));
+ gen_helper_0e1i(mtc0_watchhi, arg, sel);
+ register_name = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_20:
+ switch (sel) {
+ case CP0_REG20__XCONTEXT:
+ check_insn(ctx, ISA_MIPS3);
+ gen_helper_mtc0_xcontext(cpu_env, arg);
+ register_name = "XContext";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS_R6));
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_framemask(cpu_env, arg);
+ register_name = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_22:
+ /* ignored */
+ register_name = "Diagnostic"; /* implementation dependent */
+ break;
+ case CP0_REGISTER_23:
+ switch (sel) {
+ case CP0_REG23__DEBUG:
+ gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
+ /* DISAS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ register_name = "Debug";
+ break;
+ case CP0_REG23__TRACECONTROL:
+ /* PDtrace support */
+ /* gen_helper_mtc0_tracecontrol(cpu_env, arg); */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "TraceControl";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACECONTROL2:
+ /* PDtrace support */
+ /* gen_helper_mtc0_tracecontrol2(cpu_env, arg); */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "TraceControl2";
+ goto cp0_unimplemented;
+ case CP0_REG23__USERTRACEDATA1:
+ /* PDtrace support */
+ /* gen_helper_mtc0_usertracedata1(cpu_env, arg);*/
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "UserTraceData1";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEIBPC:
+ /* PDtrace support */
+ /* gen_helper_mtc0_traceibpc(cpu_env, arg); */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "TraceIBPC";
+ goto cp0_unimplemented;
+ case CP0_REG23__TRACEDBPC:
+ /* PDtrace support */
+ /* gen_helper_mtc0_tracedbpc(cpu_env, arg); */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "TraceDBPC";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_24:
+ switch (sel) {
+ case CP0_REG24__DEPC:
+ /* EJTAG support */
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ register_name = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_25:
+ switch (sel) {
+ case CP0_REG25__PERFCTL0:
+ gen_helper_mtc0_performance0(cpu_env, arg);
+ register_name = "Performance0";
+ break;
+ case CP0_REG25__PERFCNT0:
+ /* gen_helper_mtc0_performance1(cpu_env, arg); */
+ register_name = "Performance1";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL1:
+ /* gen_helper_mtc0_performance2(cpu_env, arg); */
+ register_name = "Performance2";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT1:
+ /* gen_helper_mtc0_performance3(cpu_env, arg); */
+ register_name = "Performance3";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL2:
+ /* gen_helper_mtc0_performance4(cpu_env, arg); */
+ register_name = "Performance4";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT2:
+ /* gen_helper_mtc0_performance5(cpu_env, arg); */
+ register_name = "Performance5";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCTL3:
+ /* gen_helper_mtc0_performance6(cpu_env, arg); */
+ register_name = "Performance6";
+ goto cp0_unimplemented;
+ case CP0_REG25__PERFCNT3:
+ /* gen_helper_mtc0_performance7(cpu_env, arg); */
+ register_name = "Performance7";
+ goto cp0_unimplemented;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_26:
+ switch (sel) {
+ case CP0_REG26__ERRCTL:
+ gen_helper_mtc0_errctl(cpu_env, arg);
+ ctx->base.is_jmp = DISAS_STOP;
+ register_name = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_27:
+ switch (sel) {
+ case CP0_REG27__CACHERR:
+ /* ignored */
+ register_name = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_28:
+ switch (sel) {
+ case CP0_REG28__TAGLO:
+ case CP0_REG28__TAGLO1:
+ case CP0_REG28__TAGLO2:
+ case CP0_REG28__TAGLO3:
+ gen_helper_mtc0_taglo(cpu_env, arg);
+ register_name = "TagLo";
+ break;
+ case CP0_REG28__DATALO:
+ case CP0_REG28__DATALO1:
+ case CP0_REG28__DATALO2:
+ case CP0_REG28__DATALO3:
+ gen_helper_mtc0_datalo(cpu_env, arg);
+ register_name = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_29:
+ switch (sel) {
+ case CP0_REG29__TAGHI:
+ case CP0_REG29__TAGHI1:
+ case CP0_REG29__TAGHI2:
+ case CP0_REG29__TAGHI3:
+ gen_helper_mtc0_taghi(cpu_env, arg);
+ register_name = "TagHi";
+ break;
+ case CP0_REG29__DATAHI:
+ case CP0_REG29__DATAHI1:
+ case CP0_REG29__DATAHI2:
+ case CP0_REG29__DATAHI3:
+ gen_helper_mtc0_datahi(cpu_env, arg);
+ register_name = "DataHi";
+ break;
+ default:
+ register_name = "invalid sel";
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_30:
+ switch (sel) {
+ case CP0_REG30__ERROREPC:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ register_name = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case CP0_REGISTER_31:
+ switch (sel) {
+ case CP0_REG31__DESAVE:
+ /* EJTAG support */
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ register_name = "DESAVE";
+ break;
+ case CP0_REG31__KSCRATCH1:
+ case CP0_REG31__KSCRATCH2:
+ case CP0_REG31__KSCRATCH3:
+ case CP0_REG31__KSCRATCH4:
+ case CP0_REG31__KSCRATCH5:
+ case CP0_REG31__KSCRATCH6:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel - 2]));
+ register_name = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ trace_mips_translate_c0("dmtc0", register_name, reg, sel);
+
+ /* For simplicity assume that all writes can cause interrupts. */
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ /*
+ * DISAS_STOP isn't sufficient, we need to ensure we break out of
+ * translated code to check for pending interrupts.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ }
+ return;
+
+cp0_unimplemented:
+ qemu_log_mask(LOG_UNIMP, "dmtc0 %s (reg %d sel %d)\n",
+ register_name, reg, sel);
+}
+#endif /* TARGET_MIPS64 */
+
+static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd,
+ int u, int sel, int h)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ TCGv t0 = tcg_temp_local_new();
+
+ if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
+ ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) !=
+ (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) {
+ tcg_gen_movi_tl(t0, -1);
+ } else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) >
+ (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) {
+ tcg_gen_movi_tl(t0, -1);
+ } else if (u == 0) {
+ switch (rt) {
+ case 1:
+ switch (sel) {
+ case 1:
+ gen_helper_mftc0_vpecontrol(t0, cpu_env);
+ break;
+ case 2:
+ gen_helper_mftc0_vpeconf0(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 1:
+ gen_helper_mftc0_tcstatus(t0, cpu_env);
+ break;
+ case 2:
+ gen_helper_mftc0_tcbind(t0, cpu_env);
+ break;
+ case 3:
+ gen_helper_mftc0_tcrestart(t0, cpu_env);
+ break;
+ case 4:
+ gen_helper_mftc0_tchalt(t0, cpu_env);
+ break;
+ case 5:
+ gen_helper_mftc0_tccontext(t0, cpu_env);
+ break;
+ case 6:
+ gen_helper_mftc0_tcschedule(t0, cpu_env);
+ break;
+ case 7:
+ gen_helper_mftc0_tcschefback(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_entryhi(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ break;
+ case 12:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_status(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ break;
+ case 13:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_cause(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 14:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_epc(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 1:
+ gen_helper_mftc0_ebase(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 16:
+ switch (sel) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel));
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_debug(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ }
+ } else {
+ switch (sel) {
+ /* GPR registers. */
+ case 0:
+ gen_helper_1e0i(mftgpr, t0, rt);
+ break;
+ /* Auxiliary CPU registers */
+ case 1:
+ switch (rt) {
+ case 0:
+ gen_helper_1e0i(mftlo, t0, 0);
+ break;
+ case 1:
+ gen_helper_1e0i(mfthi, t0, 0);
+ break;
+ case 2:
+ gen_helper_1e0i(mftacx, t0, 0);
+ break;
+ case 4:
+ gen_helper_1e0i(mftlo, t0, 1);
+ break;
+ case 5:
+ gen_helper_1e0i(mfthi, t0, 1);
+ break;
+ case 6:
+ gen_helper_1e0i(mftacx, t0, 1);
+ break;
+ case 8:
+ gen_helper_1e0i(mftlo, t0, 2);
+ break;
+ case 9:
+ gen_helper_1e0i(mfthi, t0, 2);
+ break;
+ case 10:
+ gen_helper_1e0i(mftacx, t0, 2);
+ break;
+ case 12:
+ gen_helper_1e0i(mftlo, t0, 3);
+ break;
+ case 13:
+ gen_helper_1e0i(mfthi, t0, 3);
+ break;
+ case 14:
+ gen_helper_1e0i(mftacx, t0, 3);
+ break;
+ case 16:
+ gen_helper_mftdsp(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ }
+ break;
+ /* Floating point (COP1). */
+ case 2:
+ /* XXX: For now we support only a single FPU context. */
+ if (h == 0) {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ } else {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case 3:
+ /* XXX: For now we support only a single FPU context. */
+ gen_helper_1e0i(cfc1, t0, rt);
+ break;
+ /* COP2: Not implemented. */
+ case 4:
+ case 5:
+ /* fall through */
+ default:
+ goto die;
+ }
+ }
+ trace_mips_translate_tr("mftr", rt, u, sel, h);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ return;
+
+die:
+ tcg_temp_free(t0);
+ LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h);
+ gen_reserved_instruction(ctx);
+}
+
+static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt,
+ int u, int sel, int h)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ TCGv t0 = tcg_temp_local_new();
+
+ gen_load_gpr(t0, rt);
+ if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
+ ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) !=
+ (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) {
+ /* NOP */
+ ;
+ } else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) >
+ (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) {
+ /* NOP */
+ ;
+ } else if (u == 0) {
+ switch (rd) {
+ case 1:
+ switch (sel) {
+ case 1:
+ gen_helper_mttc0_vpecontrol(cpu_env, t0);
+ break;
+ case 2:
+ gen_helper_mttc0_vpeconf0(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 1:
+ gen_helper_mttc0_tcstatus(cpu_env, t0);
+ break;
+ case 2:
+ gen_helper_mttc0_tcbind(cpu_env, t0);
+ break;
+ case 3:
+ gen_helper_mttc0_tcrestart(cpu_env, t0);
+ break;
+ case 4:
+ gen_helper_mttc0_tchalt(cpu_env, t0);
+ break;
+ case 5:
+ gen_helper_mttc0_tccontext(cpu_env, t0);
+ break;
+ case 6:
+ gen_helper_mttc0_tcschedule(cpu_env, t0);
+ break;
+ case 7:
+ gen_helper_mttc0_tcschefback(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_entryhi(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ break;
+ case 12:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_status(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ break;
+ case 13:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_cause(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 1:
+ gen_helper_mttc0_ebase(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_debug(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ }
+ } else {
+ switch (sel) {
+ /* GPR registers. */
+ case 0:
+ gen_helper_0e1i(mttgpr, t0, rd);
+ break;
+ /* Auxiliary CPU registers */
+ case 1:
+ switch (rd) {
+ case 0:
+ gen_helper_0e1i(mttlo, t0, 0);
+ break;
+ case 1:
+ gen_helper_0e1i(mtthi, t0, 0);
+ break;
+ case 2:
+ gen_helper_0e1i(mttacx, t0, 0);
+ break;
+ case 4:
+ gen_helper_0e1i(mttlo, t0, 1);
+ break;
+ case 5:
+ gen_helper_0e1i(mtthi, t0, 1);
+ break;
+ case 6:
+ gen_helper_0e1i(mttacx, t0, 1);
+ break;
+ case 8:
+ gen_helper_0e1i(mttlo, t0, 2);
+ break;
+ case 9:
+ gen_helper_0e1i(mtthi, t0, 2);
+ break;
+ case 10:
+ gen_helper_0e1i(mttacx, t0, 2);
+ break;
+ case 12:
+ gen_helper_0e1i(mttlo, t0, 3);
+ break;
+ case 13:
+ gen_helper_0e1i(mtthi, t0, 3);
+ break;
+ case 14:
+ gen_helper_0e1i(mttacx, t0, 3);
+ break;
+ case 16:
+ gen_helper_mttdsp(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ }
+ break;
+ /* Floating point (COP1). */
+ case 2:
+ /* XXX: For now we support only a single FPU context. */
+ if (h == 0) {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, rd);
+ tcg_temp_free_i32(fp0);
+ } else {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32h(ctx, fp0, rd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case 3:
+ /* XXX: For now we support only a single FPU context. */
+ gen_helper_0e2i(ctc1, t0, tcg_constant_i32(rd), rt);
+ /* Stop translation as we may have changed hflags */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ /* COP2: Not implemented. */
+ case 4:
+ case 5:
+ /* fall through */
+ default:
+ goto die;
+ }
+ }
+ trace_mips_translate_tr("mttr", rd, u, sel, h);
+ tcg_temp_free(t0);
+ return;
+
+die:
+ tcg_temp_free(t0);
+ LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h);
+ gen_reserved_instruction(ctx);
+}
+
+static void gen_cp0(CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
+ int rt, int rd)
+{
+ const char *opn = "ldst";
+
+ check_cp0_enabled(ctx);
+ switch (opc) {
+ case OPC_MFC0:
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mfc0(ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
+ opn = "mfc0";
+ break;
+ case OPC_MTC0:
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
+ opn = "mtc0";
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC0:
+ check_insn(ctx, ISA_MIPS3);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_dmfc0(ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
+ opn = "dmfc0";
+ break;
+ case OPC_DMTC0:
+ check_insn(ctx, ISA_MIPS3);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
+ opn = "dmtc0";
+ break;
+#endif
+ case OPC_MFHC0:
+ check_mvh(ctx);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mfhc0(ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
+ opn = "mfhc0";
+ break;
+ case OPC_MTHC0:
+ check_mvh(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
+ opn = "mthc0";
+ break;
+ case OPC_MFTR:
+ check_cp0_enabled(ctx);
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1,
+ ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
+ opn = "mftr";
+ break;
+ case OPC_MTTR:
+ check_cp0_enabled(ctx);
+ gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1,
+ ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
+ opn = "mttr";
+ break;
+ case OPC_TLBWI:
+ opn = "tlbwi";
+ if (!env->tlb->helper_tlbwi) {
+ goto die;
+ }
+ gen_helper_tlbwi(cpu_env);
+ break;
+ case OPC_TLBINV:
+ opn = "tlbinv";
+ if (ctx->ie >= 2) {
+ if (!env->tlb->helper_tlbinv) {
+ goto die;
+ }
+ gen_helper_tlbinv(cpu_env);
+ } /* treat as nop if TLBINV not supported */
+ break;
+ case OPC_TLBINVF:
+ opn = "tlbinvf";
+ if (ctx->ie >= 2) {
+ if (!env->tlb->helper_tlbinvf) {
+ goto die;
+ }
+ gen_helper_tlbinvf(cpu_env);
+ } /* treat as nop if TLBINV not supported */
+ break;
+ case OPC_TLBWR:
+ opn = "tlbwr";
+ if (!env->tlb->helper_tlbwr) {
+ goto die;
+ }
+ gen_helper_tlbwr(cpu_env);
+ break;
+ case OPC_TLBP:
+ opn = "tlbp";
+ if (!env->tlb->helper_tlbp) {
+ goto die;
+ }
+ gen_helper_tlbp(cpu_env);
+ break;
+ case OPC_TLBR:
+ opn = "tlbr";
+ if (!env->tlb->helper_tlbr) {
+ goto die;
+ }
+ gen_helper_tlbr(cpu_env);
+ break;
+ case OPC_ERET: /* OPC_ERETNC */
+ if ((ctx->insn_flags & ISA_MIPS_R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ goto die;
+ } else {
+ int bit_shift = (ctx->hflags & MIPS_HFLAG_M16) ? 16 : 6;
+ if (ctx->opcode & (1 << bit_shift)) {
+ /* OPC_ERETNC */
+ opn = "eretnc";
+ check_insn(ctx, ISA_MIPS_R5);
+ gen_helper_eretnc(cpu_env);
+ } else {
+ /* OPC_ERET */
+ opn = "eret";
+ check_insn(ctx, ISA_MIPS2);
+ gen_helper_eret(cpu_env);
+ }
+ ctx->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case OPC_DERET:
+ opn = "deret";
+ check_insn(ctx, ISA_MIPS_R1);
+ if ((ctx->insn_flags & ISA_MIPS_R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ goto die;
+ }
+ if (!(ctx->hflags & MIPS_HFLAG_DM)) {
+ MIPS_INVAL(opn);
+ gen_reserved_instruction(ctx);
+ } else {
+ gen_helper_deret(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case OPC_WAIT:
+ opn = "wait";
+ check_insn(ctx, ISA_MIPS3 | ISA_MIPS_R1);
+ if ((ctx->insn_flags & ISA_MIPS_R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ goto die;
+ }
+ /* If we get an exception, we want to restart at next instruction */
+ ctx->base.pc_next += 4;
+ save_cpu_state(ctx, 1);
+ ctx->base.pc_next -= 4;
+ gen_helper_wait(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+ default:
+ die:
+ MIPS_INVAL(opn);
+ gen_reserved_instruction(ctx);
+ return;
+ }
+ (void)opn; /* avoid a compiler warning */
+}
+#endif /* !CONFIG_USER_ONLY */
+
+/* CP1 Branches (before delay slot) */
+static void gen_compute_branch1(DisasContext *ctx, uint32_t op,
+ int32_t cc, int32_t offset)
+{
+ target_ulong btarget;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ if ((ctx->insn_flags & ISA_MIPS_R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ if (cc != 0) {
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R1);
+ }
+
+ btarget = ctx->base.pc_next + 4 + offset;
+
+ switch (op) {
+ case OPC_BC1F:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ goto not_likely;
+ case OPC_BC1FL:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ goto likely;
+ case OPC_BC1T:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ goto not_likely;
+ case OPC_BC1TL:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ likely:
+ ctx->hflags |= MIPS_HFLAG_BL;
+ break;
+ case OPC_BC1FANY2:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
+ tcg_gen_nand_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ goto not_likely;
+ case OPC_BC1TANY2:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ goto not_likely;
+ case OPC_BC1FANY4:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
+ tcg_gen_and_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 2));
+ tcg_gen_and_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3));
+ tcg_gen_nand_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ goto not_likely;
+ case OPC_BC1TANY4:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 2));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc + 3));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ not_likely:
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL("cp1 cond branch");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+ ctx->btarget = btarget;
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ out:
+ tcg_temp_free_i32(t0);
+}
+
+/* R6 CP1 Branches */
+static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op,
+ int32_t ft, int32_t offset,
+ int delayslot_size)
+{
+ target_ulong btarget;
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx
+ "\n", ctx->base.pc_next);
+#endif
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ gen_load_fpr64(ctx, t0, ft);
+ tcg_gen_andi_i64(t0, t0, 1);
+
+ btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+
+ switch (op) {
+ case OPC_BC1EQZ:
+ tcg_gen_xori_i64(t0, t0, 1);
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ case OPC_BC1NEZ:
+ /* t0 already set */
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL("cp1 cond branch");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ tcg_gen_trunc_i64_tl(bcond, t0);
+
+ ctx->btarget = btarget;
+
+ switch (delayslot_size) {
+ case 2:
+ ctx->hflags |= MIPS_HFLAG_BDS16;
+ break;
+ case 4:
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ break;
+ }
+
+out:
+ tcg_temp_free_i64(t0);
+}
+
+/* Coprocessor 1 (FPU) */
+
+#define FOP(func, fmt) (((fmt) << 21) | (func))
+
+enum fopcode {
+ OPC_ADD_S = FOP(0, FMT_S),
+ OPC_SUB_S = FOP(1, FMT_S),
+ OPC_MUL_S = FOP(2, FMT_S),
+ OPC_DIV_S = FOP(3, FMT_S),
+ OPC_SQRT_S = FOP(4, FMT_S),
+ OPC_ABS_S = FOP(5, FMT_S),
+ OPC_MOV_S = FOP(6, FMT_S),
+ OPC_NEG_S = FOP(7, FMT_S),
+ OPC_ROUND_L_S = FOP(8, FMT_S),
+ OPC_TRUNC_L_S = FOP(9, FMT_S),
+ OPC_CEIL_L_S = FOP(10, FMT_S),
+ OPC_FLOOR_L_S = FOP(11, FMT_S),
+ OPC_ROUND_W_S = FOP(12, FMT_S),
+ OPC_TRUNC_W_S = FOP(13, FMT_S),
+ OPC_CEIL_W_S = FOP(14, FMT_S),
+ OPC_FLOOR_W_S = FOP(15, FMT_S),
+ OPC_SEL_S = FOP(16, FMT_S),
+ OPC_MOVCF_S = FOP(17, FMT_S),
+ OPC_MOVZ_S = FOP(18, FMT_S),
+ OPC_MOVN_S = FOP(19, FMT_S),
+ OPC_SELEQZ_S = FOP(20, FMT_S),
+ OPC_RECIP_S = FOP(21, FMT_S),
+ OPC_RSQRT_S = FOP(22, FMT_S),
+ OPC_SELNEZ_S = FOP(23, FMT_S),
+ OPC_MADDF_S = FOP(24, FMT_S),
+ OPC_MSUBF_S = FOP(25, FMT_S),
+ OPC_RINT_S = FOP(26, FMT_S),
+ OPC_CLASS_S = FOP(27, FMT_S),
+ OPC_MIN_S = FOP(28, FMT_S),
+ OPC_RECIP2_S = FOP(28, FMT_S),
+ OPC_MINA_S = FOP(29, FMT_S),
+ OPC_RECIP1_S = FOP(29, FMT_S),
+ OPC_MAX_S = FOP(30, FMT_S),
+ OPC_RSQRT1_S = FOP(30, FMT_S),
+ OPC_MAXA_S = FOP(31, FMT_S),
+ OPC_RSQRT2_S = FOP(31, FMT_S),
+ OPC_CVT_D_S = FOP(33, FMT_S),
+ OPC_CVT_W_S = FOP(36, FMT_S),
+ OPC_CVT_L_S = FOP(37, FMT_S),
+ OPC_CVT_PS_S = FOP(38, FMT_S),
+ OPC_CMP_F_S = FOP(48, FMT_S),
+ OPC_CMP_UN_S = FOP(49, FMT_S),
+ OPC_CMP_EQ_S = FOP(50, FMT_S),
+ OPC_CMP_UEQ_S = FOP(51, FMT_S),
+ OPC_CMP_OLT_S = FOP(52, FMT_S),
+ OPC_CMP_ULT_S = FOP(53, FMT_S),
+ OPC_CMP_OLE_S = FOP(54, FMT_S),
+ OPC_CMP_ULE_S = FOP(55, FMT_S),
+ OPC_CMP_SF_S = FOP(56, FMT_S),
+ OPC_CMP_NGLE_S = FOP(57, FMT_S),
+ OPC_CMP_SEQ_S = FOP(58, FMT_S),
+ OPC_CMP_NGL_S = FOP(59, FMT_S),
+ OPC_CMP_LT_S = FOP(60, FMT_S),
+ OPC_CMP_NGE_S = FOP(61, FMT_S),
+ OPC_CMP_LE_S = FOP(62, FMT_S),
+ OPC_CMP_NGT_S = FOP(63, FMT_S),
+
+ OPC_ADD_D = FOP(0, FMT_D),
+ OPC_SUB_D = FOP(1, FMT_D),
+ OPC_MUL_D = FOP(2, FMT_D),
+ OPC_DIV_D = FOP(3, FMT_D),
+ OPC_SQRT_D = FOP(4, FMT_D),
+ OPC_ABS_D = FOP(5, FMT_D),
+ OPC_MOV_D = FOP(6, FMT_D),
+ OPC_NEG_D = FOP(7, FMT_D),
+ OPC_ROUND_L_D = FOP(8, FMT_D),
+ OPC_TRUNC_L_D = FOP(9, FMT_D),
+ OPC_CEIL_L_D = FOP(10, FMT_D),
+ OPC_FLOOR_L_D = FOP(11, FMT_D),
+ OPC_ROUND_W_D = FOP(12, FMT_D),
+ OPC_TRUNC_W_D = FOP(13, FMT_D),
+ OPC_CEIL_W_D = FOP(14, FMT_D),
+ OPC_FLOOR_W_D = FOP(15, FMT_D),
+ OPC_SEL_D = FOP(16, FMT_D),
+ OPC_MOVCF_D = FOP(17, FMT_D),
+ OPC_MOVZ_D = FOP(18, FMT_D),
+ OPC_MOVN_D = FOP(19, FMT_D),
+ OPC_SELEQZ_D = FOP(20, FMT_D),
+ OPC_RECIP_D = FOP(21, FMT_D),
+ OPC_RSQRT_D = FOP(22, FMT_D),
+ OPC_SELNEZ_D = FOP(23, FMT_D),
+ OPC_MADDF_D = FOP(24, FMT_D),
+ OPC_MSUBF_D = FOP(25, FMT_D),
+ OPC_RINT_D = FOP(26, FMT_D),
+ OPC_CLASS_D = FOP(27, FMT_D),
+ OPC_MIN_D = FOP(28, FMT_D),
+ OPC_RECIP2_D = FOP(28, FMT_D),
+ OPC_MINA_D = FOP(29, FMT_D),
+ OPC_RECIP1_D = FOP(29, FMT_D),
+ OPC_MAX_D = FOP(30, FMT_D),
+ OPC_RSQRT1_D = FOP(30, FMT_D),
+ OPC_MAXA_D = FOP(31, FMT_D),
+ OPC_RSQRT2_D = FOP(31, FMT_D),
+ OPC_CVT_S_D = FOP(32, FMT_D),
+ OPC_CVT_W_D = FOP(36, FMT_D),
+ OPC_CVT_L_D = FOP(37, FMT_D),
+ OPC_CMP_F_D = FOP(48, FMT_D),
+ OPC_CMP_UN_D = FOP(49, FMT_D),
+ OPC_CMP_EQ_D = FOP(50, FMT_D),
+ OPC_CMP_UEQ_D = FOP(51, FMT_D),
+ OPC_CMP_OLT_D = FOP(52, FMT_D),
+ OPC_CMP_ULT_D = FOP(53, FMT_D),
+ OPC_CMP_OLE_D = FOP(54, FMT_D),
+ OPC_CMP_ULE_D = FOP(55, FMT_D),
+ OPC_CMP_SF_D = FOP(56, FMT_D),
+ OPC_CMP_NGLE_D = FOP(57, FMT_D),
+ OPC_CMP_SEQ_D = FOP(58, FMT_D),
+ OPC_CMP_NGL_D = FOP(59, FMT_D),
+ OPC_CMP_LT_D = FOP(60, FMT_D),
+ OPC_CMP_NGE_D = FOP(61, FMT_D),
+ OPC_CMP_LE_D = FOP(62, FMT_D),
+ OPC_CMP_NGT_D = FOP(63, FMT_D),
+
+ OPC_CVT_S_W = FOP(32, FMT_W),
+ OPC_CVT_D_W = FOP(33, FMT_W),
+ OPC_CVT_S_L = FOP(32, FMT_L),
+ OPC_CVT_D_L = FOP(33, FMT_L),
+ OPC_CVT_PS_PW = FOP(38, FMT_W),
+
+ OPC_ADD_PS = FOP(0, FMT_PS),
+ OPC_SUB_PS = FOP(1, FMT_PS),
+ OPC_MUL_PS = FOP(2, FMT_PS),
+ OPC_DIV_PS = FOP(3, FMT_PS),
+ OPC_ABS_PS = FOP(5, FMT_PS),
+ OPC_MOV_PS = FOP(6, FMT_PS),
+ OPC_NEG_PS = FOP(7, FMT_PS),
+ OPC_MOVCF_PS = FOP(17, FMT_PS),
+ OPC_MOVZ_PS = FOP(18, FMT_PS),
+ OPC_MOVN_PS = FOP(19, FMT_PS),
+ OPC_ADDR_PS = FOP(24, FMT_PS),
+ OPC_MULR_PS = FOP(26, FMT_PS),
+ OPC_RECIP2_PS = FOP(28, FMT_PS),
+ OPC_RECIP1_PS = FOP(29, FMT_PS),
+ OPC_RSQRT1_PS = FOP(30, FMT_PS),
+ OPC_RSQRT2_PS = FOP(31, FMT_PS),
+
+ OPC_CVT_S_PU = FOP(32, FMT_PS),
+ OPC_CVT_PW_PS = FOP(36, FMT_PS),
+ OPC_CVT_S_PL = FOP(40, FMT_PS),
+ OPC_PLL_PS = FOP(44, FMT_PS),
+ OPC_PLU_PS = FOP(45, FMT_PS),
+ OPC_PUL_PS = FOP(46, FMT_PS),
+ OPC_PUU_PS = FOP(47, FMT_PS),
+ OPC_CMP_F_PS = FOP(48, FMT_PS),
+ OPC_CMP_UN_PS = FOP(49, FMT_PS),
+ OPC_CMP_EQ_PS = FOP(50, FMT_PS),
+ OPC_CMP_UEQ_PS = FOP(51, FMT_PS),
+ OPC_CMP_OLT_PS = FOP(52, FMT_PS),
+ OPC_CMP_ULT_PS = FOP(53, FMT_PS),
+ OPC_CMP_OLE_PS = FOP(54, FMT_PS),
+ OPC_CMP_ULE_PS = FOP(55, FMT_PS),
+ OPC_CMP_SF_PS = FOP(56, FMT_PS),
+ OPC_CMP_NGLE_PS = FOP(57, FMT_PS),
+ OPC_CMP_SEQ_PS = FOP(58, FMT_PS),
+ OPC_CMP_NGL_PS = FOP(59, FMT_PS),
+ OPC_CMP_LT_PS = FOP(60, FMT_PS),
+ OPC_CMP_NGE_PS = FOP(61, FMT_PS),
+ OPC_CMP_LE_PS = FOP(62, FMT_PS),
+ OPC_CMP_NGT_PS = FOP(63, FMT_PS),
+};
+
+enum r6_f_cmp_op {
+ R6_OPC_CMP_AF_S = FOP(0, FMT_W),
+ R6_OPC_CMP_UN_S = FOP(1, FMT_W),
+ R6_OPC_CMP_EQ_S = FOP(2, FMT_W),
+ R6_OPC_CMP_UEQ_S = FOP(3, FMT_W),
+ R6_OPC_CMP_LT_S = FOP(4, FMT_W),
+ R6_OPC_CMP_ULT_S = FOP(5, FMT_W),
+ R6_OPC_CMP_LE_S = FOP(6, FMT_W),
+ R6_OPC_CMP_ULE_S = FOP(7, FMT_W),
+ R6_OPC_CMP_SAF_S = FOP(8, FMT_W),
+ R6_OPC_CMP_SUN_S = FOP(9, FMT_W),
+ R6_OPC_CMP_SEQ_S = FOP(10, FMT_W),
+ R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W),
+ R6_OPC_CMP_SLT_S = FOP(12, FMT_W),
+ R6_OPC_CMP_SULT_S = FOP(13, FMT_W),
+ R6_OPC_CMP_SLE_S = FOP(14, FMT_W),
+ R6_OPC_CMP_SULE_S = FOP(15, FMT_W),
+ R6_OPC_CMP_OR_S = FOP(17, FMT_W),
+ R6_OPC_CMP_UNE_S = FOP(18, FMT_W),
+ R6_OPC_CMP_NE_S = FOP(19, FMT_W),
+ R6_OPC_CMP_SOR_S = FOP(25, FMT_W),
+ R6_OPC_CMP_SUNE_S = FOP(26, FMT_W),
+ R6_OPC_CMP_SNE_S = FOP(27, FMT_W),
+
+ R6_OPC_CMP_AF_D = FOP(0, FMT_L),
+ R6_OPC_CMP_UN_D = FOP(1, FMT_L),
+ R6_OPC_CMP_EQ_D = FOP(2, FMT_L),
+ R6_OPC_CMP_UEQ_D = FOP(3, FMT_L),
+ R6_OPC_CMP_LT_D = FOP(4, FMT_L),
+ R6_OPC_CMP_ULT_D = FOP(5, FMT_L),
+ R6_OPC_CMP_LE_D = FOP(6, FMT_L),
+ R6_OPC_CMP_ULE_D = FOP(7, FMT_L),
+ R6_OPC_CMP_SAF_D = FOP(8, FMT_L),
+ R6_OPC_CMP_SUN_D = FOP(9, FMT_L),
+ R6_OPC_CMP_SEQ_D = FOP(10, FMT_L),
+ R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L),
+ R6_OPC_CMP_SLT_D = FOP(12, FMT_L),
+ R6_OPC_CMP_SULT_D = FOP(13, FMT_L),
+ R6_OPC_CMP_SLE_D = FOP(14, FMT_L),
+ R6_OPC_CMP_SULE_D = FOP(15, FMT_L),
+ R6_OPC_CMP_OR_D = FOP(17, FMT_L),
+ R6_OPC_CMP_UNE_D = FOP(18, FMT_L),
+ R6_OPC_CMP_NE_D = FOP(19, FMT_L),
+ R6_OPC_CMP_SOR_D = FOP(25, FMT_L),
+ R6_OPC_CMP_SUNE_D = FOP(26, FMT_L),
+ R6_OPC_CMP_SNE_D = FOP(27, FMT_L),
+};
+
+static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs)
+{
+ TCGv t0 = tcg_temp_new();
+
+ switch (opc) {
+ case OPC_MFC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_MTC1:
+ gen_load_gpr(t0, rt);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, fs);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CFC1:
+ gen_helper_1e0i(cfc1, t0, fs);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_CTC1:
+ gen_load_gpr(t0, rt);
+ save_cpu_state(ctx, 0);
+ gen_helper_0e2i(ctc1, t0, tcg_constant_i32(fs), rt);
+ /* Stop translation as we may have changed hflags */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC1:
+ gen_load_fpr64(ctx, t0, fs);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_DMTC1:
+ gen_load_gpr(t0, rt);
+ gen_store_fpr64(ctx, t0, fs);
+ break;
+#endif
+ case OPC_MFHC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_MTHC1:
+ gen_load_gpr(t0, rt);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32h(ctx, fp0, fs);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ default:
+ MIPS_INVAL("cp1 move");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ out:
+ tcg_temp_free(t0);
+}
+
+static void gen_movci(DisasContext *ctx, int rd, int rs, int cc, int tf)
+{
+ TCGLabel *l1;
+ TCGCond cond;
+ TCGv_i32 t0;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ if (tf) {
+ cond = TCG_COND_EQ;
+ } else {
+ cond = TCG_COND_NE;
+ }
+
+ l1 = gen_new_label();
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ tcg_temp_free_i32(t0);
+ gen_load_gpr(cpu_gpr[rd], rs);
+ gen_set_label(l1);
+}
+
+static inline void gen_movcf_s(DisasContext *ctx, int fs, int fd, int cc,
+ int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+
+ if (tf) {
+ cond = TCG_COND_EQ;
+ } else {
+ cond = TCG_COND_NE;
+ }
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(ctx, t0, fs);
+ gen_store_fpr32(ctx, t0, fd);
+ gen_set_label(l1);
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_movcf_d(DisasContext *ctx, int fs, int fd, int cc,
+ int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 fp0;
+ TCGLabel *l1 = gen_new_label();
+
+ if (tf) {
+ cond = TCG_COND_EQ;
+ } else {
+ cond = TCG_COND_NE;
+ }
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ tcg_temp_free_i32(t0);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+}
+
+static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd,
+ int cc, int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ if (tf) {
+ cond = TCG_COND_EQ;
+ } else {
+ cond = TCG_COND_NE;
+ }
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(ctx, t0, fs);
+ gen_store_fpr32(ctx, t0, fd);
+ gen_set_label(l1);
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc + 1));
+ tcg_gen_brcondi_i32(cond, t0, 0, l2);
+ gen_load_fpr32h(ctx, t0, fs);
+ gen_store_fpr32h(ctx, t0, fd);
+ tcg_temp_free_i32(t0);
+ gen_set_label(l2);
+}
+
+static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft,
+ int fs)
+{
+ TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fd);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fs);
+
+ switch (op1) {
+ case OPC_SEL_S:
+ tcg_gen_andi_i32(fp0, fp0, 1);
+ tcg_gen_movcond_i32(TCG_COND_NE, fp0, fp0, t1, fp1, fp2);
+ break;
+ case OPC_SELEQZ_S:
+ tcg_gen_andi_i32(fp1, fp1, 1);
+ tcg_gen_movcond_i32(TCG_COND_EQ, fp0, fp1, t1, fp2, t1);
+ break;
+ case OPC_SELNEZ_S:
+ tcg_gen_andi_i32(fp1, fp1, 1);
+ tcg_gen_movcond_i32(TCG_COND_NE, fp0, fp1, t1, fp2, t1);
+ break;
+ default:
+ MIPS_INVAL("gen_sel_s");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft,
+ int fs)
+{
+ TCGv_i64 t1 = tcg_const_i64(0);
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fd);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fs);
+
+ switch (op1) {
+ case OPC_SEL_D:
+ tcg_gen_andi_i64(fp0, fp0, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, fp0, fp0, t1, fp1, fp2);
+ break;
+ case OPC_SELEQZ_D:
+ tcg_gen_andi_i64(fp1, fp1, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, fp0, fp1, t1, fp2, t1);
+ break;
+ case OPC_SELNEZ_D:
+ tcg_gen_andi_i64(fp1, fp1, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, fp0, fp1, t1, fp2, t1);
+ break;
+ default:
+ MIPS_INVAL("gen_sel_d");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_farith(DisasContext *ctx, enum fopcode op1,
+ int ft, int fs, int fd, int cc)
+{
+ uint32_t func = ctx->opcode & 0x3f;
+ switch (op1) {
+ case OPC_ADD_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SUB_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MUL_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_DIV_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SQRT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_ABS_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_andi_i32(fp0, fp0, 0x7fffffffUL);
+ } else {
+ gen_helper_float_abs_s(fp0, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MOV_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_NEG_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_xori_i32(fp0, fp0, 1UL << 31);
+ } else {
+ gen_helper_float_chs_s(fp0, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_ROUND_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_round_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_TRUNC_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CEIL_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_FLOOR_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_ROUND_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_round_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_TRUNC_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CEIL_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_FLOOR_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SEL_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELEQZ_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELNEZ_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_MOVCF_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_movcf_s(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ break;
+ case OPC_MOVZ_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i32 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ gen_set_label(l1);
+ }
+ break;
+ case OPC_MOVN_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i32 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ gen_set_label(l1);
+ }
+ }
+ break;
+ case OPC_RECIP_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_recip_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_RSQRT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MADDF_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fd);
+ gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MSUBF_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fd);
+ gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_RINT_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rint_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CLASS_S:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_class_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MIN_S: /* OPC_RECIP2_S */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MIN_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RECIP2_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_MINA_S: /* OPC_RECIP1_S */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MINA_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RECIP1_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_recip1_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_MAX_S: /* OPC_RSQRT1_S */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MAX_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RSQRT1_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_MAXA_S: /* OPC_RSQRT2_S */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MAXA_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RSQRT2_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_CVT_D_S:
+ check_cp1_registers(ctx, fd);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CVT_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CVT_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CVT_PS_S:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+ TCGv_i32 fp32_0 = tcg_temp_new_i32();
+ TCGv_i32 fp32_1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp32_0, fs);
+ gen_load_fpr32(ctx, fp32_1, ft);
+ tcg_gen_concat_i32_i64(fp64, fp32_1, fp32_0);
+ tcg_temp_free_i32(fp32_1);
+ tcg_temp_free_i32(fp32_0);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CMP_F_S:
+ case OPC_CMP_UN_S:
+ case OPC_CMP_EQ_S:
+ case OPC_CMP_UEQ_S:
+ case OPC_CMP_OLT_S:
+ case OPC_CMP_ULT_S:
+ case OPC_CMP_OLE_S:
+ case OPC_CMP_ULE_S:
+ case OPC_CMP_SF_S:
+ case OPC_CMP_NGLE_S:
+ case OPC_CMP_SEQ_S:
+ case OPC_CMP_NGL_S:
+ case OPC_CMP_LT_S:
+ case OPC_CMP_NGE_S:
+ case OPC_CMP_LE_S:
+ case OPC_CMP_NGT_S:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ if (ctx->opcode & (1 << 6)) {
+ gen_cmpabs_s(ctx, func - 48, ft, fs, cc);
+ } else {
+ gen_cmp_s(ctx, func - 48, ft, fs, cc);
+ }
+ break;
+ case OPC_ADD_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SUB_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MUL_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_DIV_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SQRT_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ABS_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_andi_i64(fp0, fp0, 0x7fffffffffffffffULL);
+ } else {
+ gen_helper_float_abs_d(fp0, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MOV_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_NEG_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_xori_i64(fp0, fp0, 1ULL << 63);
+ } else {
+ gen_helper_float_chs_d(fp0, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ROUND_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_round_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_TRUNC_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CEIL_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_FLOOR_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ROUND_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_round_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_TRUNC_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CEIL_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_FLOOR_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_SEL_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELEQZ_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELNEZ_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_MOVCF_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ break;
+ case OPC_MOVZ_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ break;
+ case OPC_MOVN_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ }
+ break;
+ case OPC_RECIP_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RSQRT_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MADDF_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fd);
+ gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MSUBF_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fd);
+ gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RINT_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rint_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CLASS_D:
+ check_insn(ctx, ISA_MIPS_R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_class_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MIN_D: /* OPC_RECIP2_D */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MIN_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RECIP2_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_MINA_D: /* OPC_RECIP1_D */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MINA_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RECIP1_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip1_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_MAX_D: /* OPC_RSQRT1_D */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MAX_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RSQRT1_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_MAXA_D: /* OPC_RSQRT2_D */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_MAXA_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RSQRT2_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_CMP_F_D:
+ case OPC_CMP_UN_D:
+ case OPC_CMP_EQ_D:
+ case OPC_CMP_UEQ_D:
+ case OPC_CMP_OLT_D:
+ case OPC_CMP_ULT_D:
+ case OPC_CMP_OLE_D:
+ case OPC_CMP_ULE_D:
+ case OPC_CMP_SF_D:
+ case OPC_CMP_NGLE_D:
+ case OPC_CMP_SEQ_D:
+ case OPC_CMP_NGL_D:
+ case OPC_CMP_LT_D:
+ case OPC_CMP_NGE_D:
+ case OPC_CMP_LE_D:
+ case OPC_CMP_NGT_D:
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ if (ctx->opcode & (1 << 6)) {
+ gen_cmpabs_d(ctx, func - 48, ft, fs, cc);
+ } else {
+ gen_cmp_d(ctx, func - 48, ft, fs, cc);
+ }
+ break;
+ case OPC_CVT_S_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvts_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CVT_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CVT_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_S_W:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_cvts_w(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CVT_D_W:
+ check_cp1_registers(ctx, fd);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CVT_S_L:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvts_l(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CVT_D_L:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_PS_PW:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtps_pw(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ADD_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SUB_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MUL_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ABS_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_abs_ps(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MOV_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_NEG_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_chs_ps(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MOVCF_PS:
+ check_ps(ctx);
+ gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ break;
+ case OPC_MOVZ_PS:
+ check_ps(ctx);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ break;
+ case OPC_MOVN_PS:
+ check_ps(ctx);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ }
+ break;
+ case OPC_ADDR_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, ft);
+ gen_load_fpr64(ctx, fp1, fs);
+ gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MULR_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, ft);
+ gen_load_fpr64(ctx, fp1, fs);
+ gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RECIP2_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RECIP1_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip1_ps(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RSQRT1_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RSQRT2_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_S_PU:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CVT_PW_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_S_PL:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_cvts_pl(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_PLL_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_store_fpr32h(ctx, fp0, fd);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_PLU_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32h(ctx, fp1, ft);
+ gen_store_fpr32(ctx, fp1, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_PUL_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_store_fpr32(ctx, fp1, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_PUU_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_load_fpr32h(ctx, fp1, ft);
+ gen_store_fpr32(ctx, fp1, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_CMP_F_PS:
+ case OPC_CMP_UN_PS:
+ case OPC_CMP_EQ_PS:
+ case OPC_CMP_UEQ_PS:
+ case OPC_CMP_OLT_PS:
+ case OPC_CMP_ULT_PS:
+ case OPC_CMP_OLE_PS:
+ case OPC_CMP_ULE_PS:
+ case OPC_CMP_SF_PS:
+ case OPC_CMP_NGLE_PS:
+ case OPC_CMP_SEQ_PS:
+ case OPC_CMP_NGL_PS:
+ case OPC_CMP_LT_PS:
+ case OPC_CMP_NGE_PS:
+ case OPC_CMP_LE_PS:
+ case OPC_CMP_NGT_PS:
+ if (ctx->opcode & (1 << 6)) {
+ gen_cmpabs_ps(ctx, func - 48, ft, fs, cc);
+ } else {
+ gen_cmp_ps(ctx, func - 48, ft, fs, cc);
+ }
+ break;
+ default:
+ MIPS_INVAL("farith");
+ gen_reserved_instruction(ctx);
+ return;
+ }
+}
+
+/* Coprocessor 3 (FPU) */
+static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
+ int fd, int fs, int base, int index)
+{
+ TCGv t0 = tcg_temp_new();
+
+ if (base == 0) {
+ gen_load_gpr(t0, index);
+ } else if (index == 0) {
+ gen_load_gpr(t0, base);
+ } else {
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[index]);
+ }
+ /*
+ * Don't do NOP if destination is zero: we must perform the actual
+ * memory access.
+ */
+ switch (opc) {
+ case OPC_LWXC1:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LDXC1:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LUXC1:
+ check_cp1_64bitmode(ctx);
+ tcg_gen_andi_tl(t0, t0, ~0x7);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SWXC1:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SDXC1:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SUXC1:
+ check_cp1_64bitmode(ctx);
+ tcg_gen_andi_tl(t0, t0, ~0x7);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_flt3_arith(DisasContext *ctx, uint32_t opc,
+ int fd, int fr, int fs, int ft)
+{
+ switch (opc) {
+ case OPC_ALNV_PS:
+ check_ps(ctx);
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv_i32 fp = tcg_temp_new_i32();
+ TCGv_i32 fph = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ gen_load_gpr(t0, fr);
+ tcg_gen_andi_tl(t0, t0, 0x7);
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ gen_load_fpr32(ctx, fp, fs);
+ gen_load_fpr32h(ctx, fph, fs);
+ gen_store_fpr32(ctx, fp, fd);
+ gen_store_fpr32h(ctx, fph, fd);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
+ tcg_temp_free(t0);
+ if (cpu_is_bigendian(ctx)) {
+ gen_load_fpr32(ctx, fp, fs);
+ gen_load_fpr32h(ctx, fph, ft);
+ gen_store_fpr32h(ctx, fp, fd);
+ gen_store_fpr32(ctx, fph, fd);
+ } else {
+ gen_load_fpr32h(ctx, fph, fs);
+ gen_load_fpr32(ctx, fp, ft);
+ gen_store_fpr32(ctx, fph, fd);
+ gen_store_fpr32h(ctx, fp, fd);
+ }
+ gen_set_label(l2);
+ tcg_temp_free_i32(fp);
+ tcg_temp_free_i32(fph);
+ }
+ break;
+ case OPC_MADD_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_MADD_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_MADD_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_MSUB_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_MSUB_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_MSUB_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMADD_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_NMADD_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMADD_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMSUB_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_NMSUB_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMSUB_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ default:
+ MIPS_INVAL("flt3_arith");
+ gen_reserved_instruction(ctx);
+ return;
+ }
+}
+
+void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
+{
+ TCGv t0;
+
+#if !defined(CONFIG_USER_ONLY)
+ /*
+ * The Linux kernel will emulate rdhwr if it's not supported natively.
+ * Therefore only check the ISA in system mode.
+ */
+ check_insn(ctx, ISA_MIPS_R2);
+#endif
+ t0 = tcg_temp_new();
+
+ switch (rd) {
+ case 0:
+ gen_helper_rdhwr_cpunum(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 1:
+ gen_helper_rdhwr_synci_step(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 2:
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdhwr_cc(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /*
+ * Break the TB to be able to take timer interrupts immediately
+ * after reading count. DISAS_STOP isn't sufficient, we need to ensure
+ * we break completely out of translated code.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ break;
+ case 3:
+ gen_helper_rdhwr_ccres(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 4:
+ check_insn(ctx, ISA_MIPS_R6);
+ if (sel != 0) {
+ /*
+ * Performance counter registers are not implemented other than
+ * control register 0.
+ */
+ generate_exception(ctx, EXCP_RI);
+ }
+ gen_helper_rdhwr_performance(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_helper_rdhwr_xnp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 29:
+#if defined(CONFIG_USER_ONLY)
+ tcg_gen_ld_tl(t0, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ gen_store_gpr(t0, rt);
+ break;
+#else
+ if ((ctx->hflags & MIPS_HFLAG_CP0) ||
+ (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) {
+ tcg_gen_ld_tl(t0, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ gen_store_gpr(t0, rt);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("rdhwr");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+static inline void clear_branch_hflags(DisasContext *ctx)
+{
+ ctx->hflags &= ~MIPS_HFLAG_BMASK;
+ if (ctx->base.is_jmp == DISAS_NEXT) {
+ save_cpu_state(ctx, 0);
+ } else {
+ /*
+ * It is not safe to save ctx->hflags as hflags may be changed
+ * in execution time by the instruction in delay / forbidden slot.
+ */
+ tcg_gen_andi_i32(hflags, hflags, ~MIPS_HFLAG_BMASK);
+ }
+}
+
+static void gen_branch(DisasContext *ctx, int insn_bytes)
+{
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK;
+ /* Branches completion */
+ clear_branch_hflags(ctx);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ /* FIXME: Need to clear can_do_io. */
+ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_FBNSLOT:
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + insn_bytes);
+ break;
+ case MIPS_HFLAG_B:
+ /* unconditional branch */
+ if (proc_hflags & MIPS_HFLAG_BX) {
+ tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16);
+ }
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case MIPS_HFLAG_BL:
+ /* blikely taken case */
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case MIPS_HFLAG_BC:
+ /* Conditional branch */
+ {
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ }
+ break;
+ case MIPS_HFLAG_BR:
+ /* unconditional branch to register */
+ if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(t0, btarget, 0x1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
+ tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
+ tcg_gen_or_i32(hflags, hflags, t1);
+ tcg_temp_free_i32(t1);
+
+ tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
+ } else {
+ tcg_gen_mov_tl(cpu_PC, btarget);
+ }
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ default:
+ LOG_DISAS("unknown branch 0x%x\n", proc_hflags);
+ gen_reserved_instruction(ctx);
+ }
+ }
+}
+
+/* Compact Branches */
+static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
+ int rs, int rt, int32_t offset)
+{
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int m16_lowbit = (ctx->hflags & MIPS_HFLAG_M16) != 0;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx
+ "\n", ctx->base.pc_next);
+#endif
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ /* Load needed operands and calculate btarget */
+ switch (opc) {
+ /* compact branch */
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ if (rs <= rt && rs == 0) {
+ /* OPC_BEQZALC, OPC_BNEZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit);
+ }
+ break;
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ break;
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */
+ if (rs == 0 || rs == rt) {
+ /* OPC_BLEZALC, OPC_BGEZALC */
+ /* OPC_BGTZALC, OPC_BLTZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit);
+ }
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ break;
+ case OPC_BC:
+ case OPC_BALC:
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ break;
+ case OPC_BEQZC:
+ case OPC_BNEZC:
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset);
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ TCGv tbase = tcg_temp_new();
+ TCGv toffset = tcg_constant_tl(offset);
+
+ gen_load_gpr(tbase, rt);
+ gen_op_addr_add(ctx, btarget, tbase, toffset);
+ tcg_temp_free(tbase);
+ }
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ if (bcond_compute == 0) {
+ /* Unconditional compact branch */
+ switch (opc) {
+ case OPC_JIALC:
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit);
+ /* Fallthrough */
+ case OPC_JIC:
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ case OPC_BALC:
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->base.pc_next + 4 + m16_lowbit);
+ /* Fallthrough */
+ case OPC_BC:
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_branch(ctx, 4);
+ } else {
+ /* Conditional compact branch */
+ TCGLabel *fs = gen_new_label();
+ save_cpu_state(ctx, 0);
+
+ switch (opc) {
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GEU), t0, t1, fs);
+ }
+ break;
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LTU), t0, t1, fs);
+ }
+ break;
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GE), t0, t1, fs);
+ }
+ break;
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LT), t0, t1, fs);
+ }
+ break;
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ if (rs >= rt) {
+ /* OPC_BOVC, OPC_BNVC */
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv input_overflow = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext32s_tl(t2, t0);
+ tcg_gen_setcond_tl(TCG_COND_NE, input_overflow, t2, t0);
+ tcg_gen_ext32s_tl(t3, t1);
+ tcg_gen_setcond_tl(TCG_COND_NE, t4, t3, t1);
+ tcg_gen_or_tl(input_overflow, input_overflow, t4);
+
+ tcg_gen_add_tl(t4, t2, t3);
+ tcg_gen_ext32s_tl(t4, t4);
+ tcg_gen_xor_tl(t2, t2, t3);
+ tcg_gen_xor_tl(t3, t4, t3);
+ tcg_gen_andc_tl(t2, t3, t2);
+ tcg_gen_setcondi_tl(TCG_COND_LT, t4, t2, 0);
+ tcg_gen_or_tl(t4, t4, input_overflow);
+ if (opc == OPC_BOVC) {
+ /* OPC_BOVC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t4, 0, fs);
+ } else {
+ /* OPC_BNVC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs);
+ }
+ tcg_temp_free(input_overflow);
+ tcg_temp_free(t4);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ } else if (rs < rt && rs == 0) {
+ /* OPC_BEQZALC, OPC_BNEZALC */
+ if (opc == OPC_BEQZALC) {
+ /* OPC_BEQZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t1, 0, fs);
+ } else {
+ /* OPC_BNEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t1, 0, fs);
+ }
+ } else {
+ /* OPC_BEQC, OPC_BNEC */
+ if (opc == OPC_BEQC) {
+ /* OPC_BEQC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_EQ), t0, t1, fs);
+ } else {
+ /* OPC_BNEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_NE), t0, t1, fs);
+ }
+ }
+ break;
+ case OPC_BEQZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t0, 0, fs);
+ break;
+ case OPC_BNEZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t0, 0, fs);
+ break;
+ default:
+ MIPS_INVAL("Compact conditional branch/jump");
+ gen_reserved_instruction(ctx);
+ goto out;
+ }
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_goto_tb(ctx, 1, ctx->btarget);
+ gen_set_label(fs);
+
+ ctx->hflags |= MIPS_HFLAG_FBNSLOT;
+ }
+
+out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+void gen_addiupc(DisasContext *ctx, int rx, int imm,
+ int is_64_bit, int extended)
+{
+ TCGv t0;
+
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ gen_reserved_instruction(ctx);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t0, pc_relative_pc(ctx));
+ tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
+ if (!is_64_bit) {
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ }
+
+ tcg_temp_free(t0);
+}
+
+static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base,
+ int16_t offset)
+{
+ TCGv_i32 t0 = tcg_const_i32(op);
+ TCGv t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t1, base, offset);
+ gen_helper_cache(cpu_env, t1, t0);
+ tcg_temp_free(t1);
+ tcg_temp_free_i32(t0);
+}
+
+static inline bool is_uhi(int sdbbp_code)
+{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
+ return semihosting_enabled() && sdbbp_code == 1;
+#endif
+}
+
+#ifdef CONFIG_USER_ONLY
+/* The above should dead-code away any calls to this..*/
+static inline void gen_helper_do_semihosting(void *env)
+{
+ g_assert_not_reached();
+}
+#endif
+
+void gen_ldxs(DisasContext *ctx, int base, int index, int rd)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, base);
+
+ if (index != 0) {
+ gen_load_gpr(t1, index);
+ tcg_gen_shli_tl(t1, t1, 2);
+ gen_op_addr_add(ctx, t0, t1, t0);
+ }
+
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t1, rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_sync(int stype)
+{
+ TCGBar tcg_mo = TCG_BAR_SC;
+
+ switch (stype) {
+ case 0x4: /* SYNC_WMB */
+ tcg_mo |= TCG_MO_ST_ST;
+ break;
+ case 0x10: /* SYNC_MB */
+ tcg_mo |= TCG_MO_ALL;
+ break;
+ case 0x11: /* SYNC_ACQUIRE */
+ tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST;
+ break;
+ case 0x12: /* SYNC_RELEASE */
+ tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST;
+ break;
+ case 0x13: /* SYNC_RMB */
+ tcg_mo |= TCG_MO_LD_LD;
+ break;
+ default:
+ tcg_mo |= TCG_MO_ALL;
+ break;
+ }
+
+ tcg_gen_mb(tcg_mo);
+}
+
+/* ISA extensions (ASEs) */
+
+/* MIPS16 extension to MIPS32 */
+#include "mips16e_translate.c.inc"
+
+/* microMIPS extension to MIPS32/MIPS64 */
+
+/*
+ * Values for microMIPS fmt field. Variable-width, depending on which
+ * formats the instruction supports.
+ */
+enum {
+ FMT_SD_S = 0,
+ FMT_SD_D = 1,
+
+ FMT_SDPS_S = 0,
+ FMT_SDPS_D = 1,
+ FMT_SDPS_PS = 2,
+
+ FMT_SWL_S = 0,
+ FMT_SWL_W = 1,
+ FMT_SWL_L = 2,
+
+ FMT_DWL_D = 0,
+ FMT_DWL_W = 1,
+ FMT_DWL_L = 2
+};
+
+#include "micromips_translate.c.inc"
+
+#include "nanomips_translate.c.inc"
+
+/* MIPSDSP functions. */
+static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc,
+ int rd, int base, int offset)
+{
+ TCGv t0;
+
+ check_dsp(ctx);
+ t0 = tcg_temp_new();
+
+ if (base == 0) {
+ gen_load_gpr(t0, offset);
+ } else if (offset == 0) {
+ gen_load_gpr(t0, base);
+ } else {
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[offset]);
+ }
+
+ switch (opc) {
+ case OPC_LBUX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB);
+ gen_store_gpr(t0, rd);
+ break;
+ case OPC_LHX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW);
+ gen_store_gpr(t0, rd);
+ break;
+ case OPC_LWX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t0, rd);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_LDX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t0, rd);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2)
+{
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if (ret == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */
+ case OPC_MULT_G_2E:
+ check_dsp_r2(ctx);
+ switch (op2) {
+ case OPC_ADDUH_QB:
+ gen_helper_adduh_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDUH_R_QB:
+ gen_helper_adduh_r_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_PH:
+ gen_helper_addqh_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_R_PH:
+ gen_helper_addqh_r_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_W:
+ gen_helper_addqh_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_R_W:
+ gen_helper_addqh_r_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBUH_QB:
+ gen_helper_subuh_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBUH_R_QB:
+ gen_helper_subuh_r_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_PH:
+ gen_helper_subqh_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_R_PH:
+ gen_helper_subqh_r_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_W:
+ gen_helper_subqh_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_R_W:
+ gen_helper_subqh_r_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ }
+ break;
+ case OPC_ABSQ_S_PH_DSP:
+ switch (op2) {
+ case OPC_ABSQ_S_QB:
+ check_dsp_r2(ctx);
+ gen_helper_absq_s_qb(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_absq_s_ph(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_W:
+ check_dsp(ctx);
+ gen_helper_absq_s_w(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_PRECEQ_W_PHL:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(cpu_gpr[ret], v2_t, 0xFFFF0000);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ case OPC_PRECEQ_W_PHR:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(cpu_gpr[ret], v2_t, 0x0000FFFF);
+ tcg_gen_shli_tl(cpu_gpr[ret], cpu_gpr[ret], 16);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ case OPC_PRECEQU_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_PH_QBLA:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_PH_QBRA:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBLA:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBRA:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbra(cpu_gpr[ret], v2_t);
+ break;
+ }
+ break;
+ case OPC_ADDU_QB_DSP:
+ switch (op2) {
+ case OPC_ADDQ_PH:
+ check_dsp(ctx);
+ gen_helper_addq_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_addq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_W:
+ check_dsp(ctx);
+ gen_helper_addq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_QB:
+ check_dsp(ctx);
+ gen_helper_addu_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_QB:
+ check_dsp(ctx);
+ gen_helper_addu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_PH:
+ check_dsp_r2(ctx);
+ gen_helper_addu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_PH:
+ check_dsp_r2(ctx);
+ gen_helper_addu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_PH:
+ check_dsp(ctx);
+ gen_helper_subq_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_subq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_W:
+ check_dsp(ctx);
+ gen_helper_subq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_QB:
+ check_dsp(ctx);
+ gen_helper_subu_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_QB:
+ check_dsp(ctx);
+ gen_helper_subu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_PH:
+ check_dsp_r2(ctx);
+ gen_helper_subu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_PH:
+ check_dsp_r2(ctx);
+ gen_helper_subu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDSC:
+ check_dsp(ctx);
+ gen_helper_addsc(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDWC:
+ check_dsp(ctx);
+ gen_helper_addwc(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MODSUB:
+ check_dsp(ctx);
+ gen_helper_modsub(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_RADDU_W_QB:
+ check_dsp(ctx);
+ gen_helper_raddu_w_qb(cpu_gpr[ret], v1_t);
+ break;
+ }
+ break;
+ case OPC_CMPU_EQ_QB_DSP:
+ switch (op2) {
+ case OPC_PRECR_QB_PH:
+ check_dsp_r2(ctx);
+ gen_helper_precr_qb_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_QB_PH:
+ check_dsp(ctx);
+ gen_helper_precrq_qb_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECR_SRA_PH_W:
+ check_dsp_r2(ctx);
+ {
+ TCGv_i32 sa_t = tcg_const_i32(v2);
+ gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t,
+ cpu_gpr[ret]);
+ tcg_temp_free_i32(sa_t);
+ break;
+ }
+ case OPC_PRECR_SRA_R_PH_W:
+ check_dsp_r2(ctx);
+ {
+ TCGv_i32 sa_t = tcg_const_i32(v2);
+ gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t,
+ cpu_gpr[ret]);
+ tcg_temp_free_i32(sa_t);
+ break;
+ }
+ case OPC_PRECRQ_PH_W:
+ check_dsp(ctx);
+ gen_helper_precrq_ph_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_RS_PH_W:
+ check_dsp(ctx);
+ gen_helper_precrq_rs_ph_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PRECRQU_S_QB_PH:
+ check_dsp(ctx);
+ gen_helper_precrqu_s_qb_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_ABSQ_S_QH_DSP:
+ switch (op2) {
+ case OPC_PRECEQ_L_PWL:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(cpu_gpr[ret], v2_t, 0xFFFFFFFF00000000ull);
+ break;
+ case OPC_PRECEQ_L_PWR:
+ check_dsp(ctx);
+ tcg_gen_shli_tl(cpu_gpr[ret], v2_t, 32);
+ break;
+ case OPC_PRECEQ_PW_QHL:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQ_PW_QHR:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQ_PW_QHLA:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQ_PW_QHRA:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBL:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBR:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBLA:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBRA:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBL:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBR:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBLA:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBRA:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_ABSQ_S_OB:
+ check_dsp_r2(ctx);
+ gen_helper_absq_s_ob(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_PW:
+ check_dsp(ctx);
+ gen_helper_absq_s_pw(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_QH:
+ check_dsp(ctx);
+ gen_helper_absq_s_qh(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ }
+ break;
+ case OPC_ADDU_OB_DSP:
+ switch (op2) {
+ case OPC_RADDU_L_OB:
+ check_dsp(ctx);
+ gen_helper_raddu_l_ob(cpu_gpr[ret], v1_t);
+ break;
+ case OPC_SUBQ_PW:
+ check_dsp(ctx);
+ gen_helper_subq_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_PW:
+ check_dsp(ctx);
+ gen_helper_subq_s_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_QH:
+ check_dsp(ctx);
+ gen_helper_subq_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_QH:
+ check_dsp(ctx);
+ gen_helper_subq_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_OB:
+ check_dsp(ctx);
+ gen_helper_subu_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_OB:
+ check_dsp(ctx);
+ gen_helper_subu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_QH:
+ check_dsp_r2(ctx);
+ gen_helper_subu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_QH:
+ check_dsp_r2(ctx);
+ gen_helper_subu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBUH_OB:
+ check_dsp_r2(ctx);
+ gen_helper_subuh_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBUH_R_OB:
+ check_dsp_r2(ctx);
+ gen_helper_subuh_r_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQ_PW:
+ check_dsp(ctx);
+ gen_helper_addq_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_PW:
+ check_dsp(ctx);
+ gen_helper_addq_s_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_QH:
+ check_dsp(ctx);
+ gen_helper_addq_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_QH:
+ check_dsp(ctx);
+ gen_helper_addq_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_OB:
+ check_dsp(ctx);
+ gen_helper_addu_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_OB:
+ check_dsp(ctx);
+ gen_helper_addu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_QH:
+ check_dsp_r2(ctx);
+ gen_helper_addu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_QH:
+ check_dsp_r2(ctx);
+ gen_helper_addu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDUH_OB:
+ check_dsp_r2(ctx);
+ gen_helper_adduh_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDUH_R_OB:
+ check_dsp_r2(ctx);
+ gen_helper_adduh_r_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ }
+ break;
+ case OPC_CMPU_EQ_OB_DSP:
+ switch (op2) {
+ case OPC_PRECR_OB_QH:
+ check_dsp_r2(ctx);
+ gen_helper_precr_ob_qh(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECR_SRA_QH_PW:
+ check_dsp_r2(ctx);
+ {
+ TCGv_i32 ret_t = tcg_const_i32(ret);
+ gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t);
+ tcg_temp_free_i32(ret_t);
+ break;
+ }
+ case OPC_PRECR_SRA_R_QH_PW:
+ check_dsp_r2(ctx);
+ {
+ TCGv_i32 sa_v = tcg_const_i32(ret);
+ gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v);
+ tcg_temp_free_i32(sa_v);
+ break;
+ }
+ case OPC_PRECRQ_OB_QH:
+ check_dsp(ctx);
+ gen_helper_precrq_ob_qh(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_PW_L:
+ check_dsp(ctx);
+ gen_helper_precrq_pw_l(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_QH_PW:
+ check_dsp(ctx);
+ gen_helper_precrq_qh_pw(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_RS_QH_PW:
+ check_dsp(ctx);
+ gen_helper_precrq_rs_qh_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PRECRQU_S_OB_QH:
+ check_dsp(ctx);
+ gen_helper_precrqu_s_ob_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc,
+ int ret, int v1, int v2)
+{
+ uint32_t op2;
+ TCGv t0;
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if (ret == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ tcg_gen_movi_tl(t0, v1);
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (opc) {
+ case OPC_SHLL_QB_DSP:
+ {
+ op2 = MASK_SHLL_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_SHLL_QB:
+ check_dsp(ctx);
+ gen_helper_shll_qb(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_QB:
+ check_dsp(ctx);
+ gen_helper_shll_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHLL_PH:
+ check_dsp(ctx);
+ gen_helper_shll_ph(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_PH:
+ check_dsp(ctx);
+ gen_helper_shll_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHLL_S_PH:
+ check_dsp(ctx);
+ gen_helper_shll_s_ph(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_S_PH:
+ check_dsp(ctx);
+ gen_helper_shll_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHLL_S_W:
+ check_dsp(ctx);
+ gen_helper_shll_s_w(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_S_W:
+ check_dsp(ctx);
+ gen_helper_shll_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHRL_QB:
+ check_dsp(ctx);
+ gen_helper_shrl_qb(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRLV_QB:
+ check_dsp(ctx);
+ gen_helper_shrl_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRL_PH:
+ check_dsp_r2(ctx);
+ gen_helper_shrl_ph(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRLV_PH:
+ check_dsp_r2(ctx);
+ gen_helper_shrl_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRA_QB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_qb(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRA_R_QB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_r_qb(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRAV_QB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRAV_R_QB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_r_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRA_PH:
+ check_dsp(ctx);
+ gen_helper_shra_ph(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRA_R_PH:
+ check_dsp(ctx);
+ gen_helper_shra_r_ph(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRAV_PH:
+ check_dsp(ctx);
+ gen_helper_shra_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRAV_R_PH:
+ check_dsp(ctx);
+ gen_helper_shra_r_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRA_R_W:
+ check_dsp(ctx);
+ gen_helper_shra_r_w(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRAV_R_W:
+ check_dsp(ctx);
+ gen_helper_shra_r_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK SHLL.QB");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ }
+#ifdef TARGET_MIPS64
+ case OPC_SHLL_OB_DSP:
+ op2 = MASK_SHLL_OB(ctx->opcode);
+ switch (op2) {
+ case OPC_SHLL_PW:
+ check_dsp(ctx);
+ gen_helper_shll_pw(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_PW:
+ check_dsp(ctx);
+ gen_helper_shll_pw(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_S_PW:
+ check_dsp(ctx);
+ gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_S_PW:
+ check_dsp(ctx);
+ gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_OB:
+ check_dsp(ctx);
+ gen_helper_shll_ob(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_OB:
+ check_dsp(ctx);
+ gen_helper_shll_ob(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_QH:
+ check_dsp(ctx);
+ gen_helper_shll_qh(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_QH:
+ check_dsp(ctx);
+ gen_helper_shll_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_S_QH:
+ check_dsp(ctx);
+ gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_S_QH:
+ check_dsp(ctx);
+ gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHRA_OB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_ob(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_OB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_ob(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_R_OB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_r_ob(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_R_OB:
+ check_dsp_r2(ctx);
+ gen_helper_shra_r_ob(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_PW:
+ check_dsp(ctx);
+ gen_helper_shra_pw(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_PW:
+ check_dsp(ctx);
+ gen_helper_shra_pw(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_R_PW:
+ check_dsp(ctx);
+ gen_helper_shra_r_pw(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_R_PW:
+ check_dsp(ctx);
+ gen_helper_shra_r_pw(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_QH:
+ check_dsp(ctx);
+ gen_helper_shra_qh(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_QH:
+ check_dsp(ctx);
+ gen_helper_shra_qh(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_R_QH:
+ check_dsp(ctx);
+ gen_helper_shra_r_qh(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_R_QH:
+ check_dsp(ctx);
+ gen_helper_shra_r_qh(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRL_OB:
+ check_dsp(ctx);
+ gen_helper_shrl_ob(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRLV_OB:
+ check_dsp(ctx);
+ gen_helper_shrl_ob(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRL_QH:
+ check_dsp_r2(ctx);
+ gen_helper_shrl_qh(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRLV_QH:
+ check_dsp_r2(ctx);
+ gen_helper_shrl_qh(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK SHLL.OB");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2, int check_ret)
+{
+ TCGv_i32 t0;
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if ((ret == 0) && (check_ret == 1)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new_i32();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ tcg_gen_movi_i32(t0, ret);
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ /*
+ * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
+ * the same mask and op1.
+ */
+ case OPC_MULT_G_2E:
+ check_dsp_r2(ctx);
+ switch (op2) {
+ case OPC_MUL_PH:
+ gen_helper_mul_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MUL_S_PH:
+ gen_helper_mul_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_S_W:
+ gen_helper_mulq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_RS_W:
+ gen_helper_mulq_rs_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+ case OPC_DPA_W_PH_DSP:
+ switch (op2) {
+ case OPC_DPAU_H_QBL:
+ check_dsp(ctx);
+ gen_helper_dpau_h_qbl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAU_H_QBR:
+ check_dsp(ctx);
+ gen_helper_dpau_h_qbr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSU_H_QBL:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_qbl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSU_H_QBR:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_qbr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAX_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpax_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_dpaq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQX_S_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpaqx_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQX_SA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpaqx_sa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPS_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dps_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSX_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpsx_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_dpsq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQX_S_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpsqx_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQX_SA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_dpsqx_sa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULSAQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQ_SA_L_W:
+ check_dsp(ctx);
+ gen_helper_dpaq_sa_l_w(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQ_SA_L_W:
+ check_dsp(ctx);
+ gen_helper_dpsq_sa_l_w(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_S_W_PHL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_phl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_S_W_PHR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_phr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_PHL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_phl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_PHR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_phr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULSA_W_PH:
+ check_dsp_r2(ctx);
+ gen_helper_mulsa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_DPAQ_W_QH_DSP:
+ {
+ int ac = ret & 0x03;
+ tcg_gen_movi_i32(t0, ac);
+
+ switch (op2) {
+ case OPC_DMADD:
+ check_dsp(ctx);
+ gen_helper_dmadd(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DMADDU:
+ check_dsp(ctx);
+ gen_helper_dmaddu(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DMSUB:
+ check_dsp(ctx);
+ gen_helper_dmsub(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DMSUBU:
+ check_dsp(ctx);
+ gen_helper_dmsubu(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPA_W_QH:
+ check_dsp_r2(ctx);
+ gen_helper_dpa_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAQ_S_W_QH:
+ check_dsp(ctx);
+ gen_helper_dpaq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAQ_SA_L_PW:
+ check_dsp(ctx);
+ gen_helper_dpaq_sa_l_pw(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAU_H_OBL:
+ check_dsp(ctx);
+ gen_helper_dpau_h_obl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAU_H_OBR:
+ check_dsp(ctx);
+ gen_helper_dpau_h_obr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPS_W_QH:
+ check_dsp_r2(ctx);
+ gen_helper_dps_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSQ_S_W_QH:
+ check_dsp(ctx);
+ gen_helper_dpsq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSQ_SA_L_PW:
+ check_dsp(ctx);
+ gen_helper_dpsq_sa_l_pw(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSU_H_OBL:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_obl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSU_H_OBR:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_obr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_L_PWL:
+ check_dsp(ctx);
+ gen_helper_maq_s_l_pwl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_L_PWR:
+ check_dsp(ctx);
+ gen_helper_maq_s_l_pwr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHLL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhll(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHLL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhll(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHLR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhlr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHLR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhlr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHRL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhrl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHRL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhrl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHRR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhrr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHRR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhrr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MULSAQ_S_L_PW:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_l_pw(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MULSAQ_S_W_QH:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ }
+ }
+ break;
+#endif
+ case OPC_ADDU_QB_DSP:
+ switch (op2) {
+ case OPC_MULEU_S_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_muleu_s_ph_qbl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEU_S_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_muleu_s_ph_qbr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_RS_PH:
+ check_dsp(ctx);
+ gen_helper_mulq_rs_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEQ_S_W_PHL:
+ check_dsp(ctx);
+ gen_helper_muleq_s_w_phl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEQ_S_W_PHR:
+ check_dsp(ctx);
+ gen_helper_muleq_s_w_phr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_S_PH:
+ check_dsp_r2(ctx);
+ gen_helper_mulq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_ADDU_OB_DSP:
+ switch (op2) {
+ case OPC_MULEQ_S_PW_QHL:
+ check_dsp(ctx);
+ gen_helper_muleq_s_pw_qhl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEQ_S_PW_QHR:
+ check_dsp(ctx);
+ gen_helper_muleq_s_pw_qhr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEU_S_QH_OBL:
+ check_dsp(ctx);
+ gen_helper_muleu_s_qh_obl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEU_S_QH_OBR:
+ check_dsp(ctx);
+ gen_helper_muleu_s_qh_obr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_RS_QH:
+ check_dsp(ctx);
+ gen_helper_mulq_rs_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int val)
+{
+ int16_t imm;
+ TCGv t0;
+ TCGv val_t;
+
+ if (ret == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ val_t = tcg_temp_new();
+ gen_load_gpr(val_t, val);
+
+ switch (op1) {
+ case OPC_ABSQ_S_PH_DSP:
+ switch (op2) {
+ case OPC_BITREV:
+ check_dsp(ctx);
+ gen_helper_bitrev(cpu_gpr[ret], val_t);
+ break;
+ case OPC_REPL_QB:
+ check_dsp(ctx);
+ {
+ target_long result;
+ imm = (ctx->opcode >> 16) & 0xFF;
+ result = (uint32_t)imm << 24 |
+ (uint32_t)imm << 16 |
+ (uint32_t)imm << 8 |
+ (uint32_t)imm;
+ result = (int32_t)result;
+ tcg_gen_movi_tl(cpu_gpr[ret], result);
+ }
+ break;
+ case OPC_REPLV_QB:
+ check_dsp(ctx);
+ tcg_gen_ext8u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 8);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ case OPC_REPL_PH:
+ check_dsp(ctx);
+ {
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ imm = (int16_t)(imm << 6) >> 6;
+ tcg_gen_movi_tl(cpu_gpr[ret], \
+ (target_long)((int32_t)imm << 16 | \
+ (uint16_t)imm));
+ }
+ break;
+ case OPC_REPLV_PH:
+ check_dsp(ctx);
+ tcg_gen_ext16u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_ABSQ_S_QH_DSP:
+ switch (op2) {
+ case OPC_REPL_OB:
+ check_dsp(ctx);
+ {
+ target_long temp;
+
+ imm = (ctx->opcode >> 16) & 0xFF;
+ temp = ((uint64_t)imm << 8) | (uint64_t)imm;
+ temp = (temp << 16) | temp;
+ temp = (temp << 32) | temp;
+ tcg_gen_movi_tl(cpu_gpr[ret], temp);
+ break;
+ }
+ case OPC_REPL_PW:
+ check_dsp(ctx);
+ {
+ target_long temp;
+
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ imm = (int16_t)(imm << 6) >> 6;
+ temp = ((target_long)imm << 32) \
+ | ((target_long)imm & 0xFFFFFFFF);
+ tcg_gen_movi_tl(cpu_gpr[ret], temp);
+ break;
+ }
+ case OPC_REPL_QH:
+ check_dsp(ctx);
+ {
+ target_long temp;
+
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ imm = (int16_t)(imm << 6) >> 6;
+
+ temp = ((uint64_t)(uint16_t)imm << 48) |
+ ((uint64_t)(uint16_t)imm << 32) |
+ ((uint64_t)(uint16_t)imm << 16) |
+ (uint64_t)(uint16_t)imm;
+ tcg_gen_movi_tl(cpu_gpr[ret], temp);
+ break;
+ }
+ case OPC_REPLV_OB:
+ check_dsp(ctx);
+ tcg_gen_ext8u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 8);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 32);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ break;
+ case OPC_REPLV_PW:
+ check_dsp(ctx);
+ tcg_gen_ext32u_i64(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 32);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ break;
+ case OPC_REPLV_QH:
+ check_dsp(ctx);
+ tcg_gen_ext16u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 32);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ break;
+ }
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(val_t);
+}
+
+static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx,
+ uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2, int check_ret)
+{
+ TCGv t1;
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if ((ret == 0) && (check_ret == 1)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t1 = tcg_temp_new();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ case OPC_CMPU_EQ_QB_DSP:
+ switch (op2) {
+ case OPC_CMPU_EQ_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LT_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LE_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGU_EQ_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_eq_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LT_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_lt_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LE_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_le_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGDU_EQ_QB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgu_eq_qb(t1, v1_t, v2_t);
+ tcg_gen_mov_tl(cpu_gpr[ret], t1);
+ tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF);
+ tcg_gen_shli_tl(t1, t1, 24);
+ tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1);
+ break;
+ case OPC_CMPGDU_LT_QB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgu_lt_qb(t1, v1_t, v2_t);
+ tcg_gen_mov_tl(cpu_gpr[ret], t1);
+ tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF);
+ tcg_gen_shli_tl(t1, t1, 24);
+ tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1);
+ break;
+ case OPC_CMPGDU_LE_QB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgu_le_qb(t1, v1_t, v2_t);
+ tcg_gen_mov_tl(cpu_gpr[ret], t1);
+ tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF);
+ tcg_gen_shli_tl(t1, t1, 24);
+ tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1);
+ break;
+ case OPC_CMP_EQ_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LT_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LE_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_QB:
+ check_dsp(ctx);
+ gen_helper_pick_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_PH:
+ check_dsp(ctx);
+ gen_helper_pick_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PACKRL_PH:
+ check_dsp(ctx);
+ gen_helper_packrl_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_CMPU_EQ_OB_DSP:
+ switch (op2) {
+ case OPC_CMP_EQ_PW:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_pw(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LT_PW:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_pw(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LE_PW:
+ check_dsp(ctx);
+ gen_helper_cmp_le_pw(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_EQ_QH:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_qh(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LT_QH:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_qh(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LE_QH:
+ check_dsp(ctx);
+ gen_helper_cmp_le_qh(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGDU_EQ_OB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgdu_eq_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGDU_LT_OB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgdu_lt_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGDU_LE_OB:
+ check_dsp_r2(ctx);
+ gen_helper_cmpgdu_le_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGU_EQ_OB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_eq_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LT_OB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_lt_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LE_OB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_le_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPU_EQ_OB:
+ check_dsp(ctx);
+ gen_helper_cmpu_eq_ob(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LT_OB:
+ check_dsp(ctx);
+ gen_helper_cmpu_lt_ob(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LE_OB:
+ check_dsp(ctx);
+ gen_helper_cmpu_le_ob(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PACKRL_PW:
+ check_dsp(ctx);
+ gen_helper_packrl_pw(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PICK_OB:
+ check_dsp(ctx);
+ gen_helper_pick_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_PW:
+ check_dsp(ctx);
+ gen_helper_pick_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_QH:
+ check_dsp(ctx);
+ gen_helper_pick_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t1);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx,
+ uint32_t op1, int rt, int rs, int sa)
+{
+ TCGv t0;
+
+ check_dsp_r2(ctx);
+
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+
+ switch (op1) {
+ case OPC_APPEND_DSP:
+ switch (MASK_APPEND(ctx->opcode)) {
+ case OPC_APPEND:
+ if (sa != 0) {
+ tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], sa, 32 - sa);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ case OPC_PREPEND:
+ if (sa != 0) {
+ tcg_gen_ext32u_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ tcg_gen_shri_tl(cpu_gpr[rt], cpu_gpr[rt], sa);
+ tcg_gen_shli_tl(t0, t0, 32 - sa);
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ case OPC_BALIGN:
+ sa &= 3;
+ if (sa != 0 && sa != 2) {
+ tcg_gen_shli_tl(cpu_gpr[rt], cpu_gpr[rt], 8 * sa);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(t0, t0, 8 * (4 - sa));
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK APPEND");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_DAPPEND_DSP:
+ switch (MASK_DAPPEND(ctx->opcode)) {
+ case OPC_DAPPEND:
+ if (sa != 0) {
+ tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], sa, 64 - sa);
+ }
+ break;
+ case OPC_PREPENDD:
+ tcg_gen_shri_tl(cpu_gpr[rt], cpu_gpr[rt], 0x20 | sa);
+ tcg_gen_shli_tl(t0, t0, 64 - (0x20 | sa));
+ tcg_gen_or_tl(cpu_gpr[rt], t0, t0);
+ break;
+ case OPC_PREPENDW:
+ if (sa != 0) {
+ tcg_gen_shri_tl(cpu_gpr[rt], cpu_gpr[rt], sa);
+ tcg_gen_shli_tl(t0, t0, 64 - sa);
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_DBALIGN:
+ sa &= 7;
+ if (sa != 0 && sa != 2 && sa != 4) {
+ tcg_gen_shli_tl(cpu_gpr[rt], cpu_gpr[rt], 8 * sa);
+ tcg_gen_shri_tl(t0, t0, 8 * (8 - sa));
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK DAPPEND");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2, int check_ret)
+
+{
+ TCGv t0;
+ TCGv t1;
+ TCGv v1_t;
+ int16_t imm;
+
+ if ((ret == 0) && (check_ret == 1)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ v1_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, v1);
+
+ switch (op1) {
+ case OPC_EXTR_W_DSP:
+ check_dsp(ctx);
+ switch (op2) {
+ case OPC_EXTR_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTR_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_r_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTR_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_rs_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTR_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTRV_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTRV_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTRV_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_r_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTRV_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_rs_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTPDP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extpdp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTPDPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extpdp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_SHILO:
+ imm = (ctx->opcode >> 20) & 0x3F;
+ tcg_gen_movi_tl(t0, ret);
+ tcg_gen_movi_tl(t1, imm);
+ gen_helper_shilo(t0, t1, cpu_env);
+ break;
+ case OPC_SHILOV:
+ tcg_gen_movi_tl(t0, ret);
+ gen_helper_shilo(t0, v1_t, cpu_env);
+ break;
+ case OPC_MTHLIP:
+ tcg_gen_movi_tl(t0, ret);
+ gen_helper_mthlip(t0, v1_t, cpu_env);
+ break;
+ case OPC_WRDSP:
+ imm = (ctx->opcode >> 11) & 0x3FF;
+ tcg_gen_movi_tl(t0, imm);
+ gen_helper_wrdsp(v1_t, t0, cpu_env);
+ break;
+ case OPC_RDDSP:
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ tcg_gen_movi_tl(t0, imm);
+ gen_helper_rddsp(cpu_gpr[ret], t0, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_DEXTR_W_DSP:
+ check_dsp(ctx);
+ switch (op2) {
+ case OPC_DMTHLIP:
+ tcg_gen_movi_tl(t0, ret);
+ gen_helper_dmthlip(v1_t, t0, cpu_env);
+ break;
+ case OPC_DSHILO:
+ {
+ int shift = (ctx->opcode >> 19) & 0x7F;
+ int ac = (ctx->opcode >> 11) & 0x03;
+ tcg_gen_movi_tl(t0, shift);
+ tcg_gen_movi_tl(t1, ac);
+ gen_helper_dshilo(t0, t1, cpu_env);
+ break;
+ }
+ case OPC_DSHILOV:
+ {
+ int ac = (ctx->opcode >> 11) & 0x03;
+ tcg_gen_movi_tl(t0, ac);
+ gen_helper_dshilo(v1_t, t0, cpu_env);
+ break;
+ }
+ case OPC_DEXTP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+
+ gen_helper_dextp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTPDP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextpdp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTPDPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextpdp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTR_L:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_l(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_R_L:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_r_l(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_RS_L:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_rs_l(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_r_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_rs_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTRV_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_L:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_R_L:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_r_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_RS_L:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_rs_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_r_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_rs_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(v1_t);
+}
+
+/* End MIPSDSP functions. */
+
+static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_MULT:
+ case OPC_MULTU:
+ case OPC_DIV:
+ case OPC_DIVU:
+ op2 = MASK_R6_MULDIV(ctx->opcode);
+ switch (op2) {
+ case R6_OPC_MUL:
+ case R6_OPC_MUH:
+ case R6_OPC_MULU:
+ case R6_OPC_MUHU:
+ case R6_OPC_DIV:
+ case R6_OPC_MOD:
+ case R6_OPC_DIVU:
+ case R6_OPC_MODU:
+ gen_r6_muldiv(ctx, op2, rd, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("special_r6 muldiv");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_SELEQZ:
+ case OPC_SELNEZ:
+ gen_cond_move(ctx, op1, rd, rs, rt);
+ break;
+ case R6_OPC_CLO:
+ case R6_OPC_CLZ:
+ if (rt == 0 && sa == 1) {
+ /*
+ * Major opcode and function field is shared with preR6 MFHI/MTHI.
+ * We need additionally to check other fields.
+ */
+ gen_cl(ctx, op1, rd, rs);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ case R6_OPC_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 6, 20))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ gen_reserved_instruction(ctx);
+ } else {
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_DCLO:
+ case R6_OPC_DCLZ:
+ if (rt == 0 && sa == 1) {
+ /*
+ * Major opcode and function field is shared with preR6 MFHI/MTHI.
+ * We need additionally to check other fields.
+ */
+ check_mips_64(ctx);
+ gen_cl(ctx, op1, rd, rs);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ case OPC_DMULT:
+ case OPC_DMULTU:
+ case OPC_DDIV:
+ case OPC_DDIVU:
+
+ op2 = MASK_R6_MULDIV(ctx->opcode);
+ switch (op2) {
+ case R6_OPC_DMUL:
+ case R6_OPC_DMUH:
+ case R6_OPC_DMULU:
+ case R6_OPC_DMUHU:
+ case R6_OPC_DDIV:
+ case R6_OPC_DMOD:
+ case R6_OPC_DDIVU:
+ case R6_OPC_DMODU:
+ check_mips_64(ctx);
+ gen_r6_muldiv(ctx, op2, rd, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("special_r6 muldiv");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special_r6");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_special_tx79(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs = extract32(ctx->opcode, 21, 5);
+ int rt = extract32(ctx->opcode, 16, 5);
+ int rd = extract32(ctx->opcode, 11, 5);
+ uint32_t op1 = MASK_SPECIAL(ctx->opcode);
+
+ switch (op1) {
+ case OPC_MOVN: /* Conditional move */
+ case OPC_MOVZ:
+ gen_cond_move(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_MFHI: /* Move from HI/LO */
+ case OPC_MFLO:
+ gen_HILO(ctx, op1, 0, rd);
+ break;
+ case OPC_MTHI:
+ case OPC_MTLO: /* Move to HI/LO */
+ gen_HILO(ctx, op1, 0, rs);
+ break;
+ case OPC_MULT:
+ case OPC_MULTU:
+ gen_mul_txx9(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DIV:
+ case OPC_DIVU:
+ gen_muldiv(ctx, op1, 0, rs, rt);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT:
+ case OPC_DMULTU:
+ case OPC_DDIV:
+ case OPC_DDIVU:
+ check_insn_opc_user_only(ctx, INSN_R5900);
+ gen_muldiv(ctx, op1, 0, rs, rt);
+ break;
+#endif
+ case OPC_JR:
+ gen_compute_branch(ctx, op1, 4, rs, 0, 0, 4);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("special_tx79");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd;
+ uint32_t op1;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_MOVN: /* Conditional move */
+ case OPC_MOVZ:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R1 |
+ INSN_LOONGSON2E | INSN_LOONGSON2F);
+ gen_cond_move(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_MFHI: /* Move from HI/LO */
+ case OPC_MFLO:
+ gen_HILO(ctx, op1, rs & 3, rd);
+ break;
+ case OPC_MTHI:
+ case OPC_MTLO: /* Move to HI/LO */
+ gen_HILO(ctx, op1, rd & 3, rs);
+ break;
+ case OPC_MOVCI:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R1);
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7,
+ (ctx->opcode >> 16) & 1);
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ case OPC_MULT:
+ case OPC_MULTU:
+ gen_muldiv(ctx, op1, rd & 3, rs, rt);
+ break;
+ case OPC_DIV:
+ case OPC_DIVU:
+ gen_muldiv(ctx, op1, 0, rs, rt);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT:
+ case OPC_DMULTU:
+ case OPC_DDIV:
+ case OPC_DDIVU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, op1, 0, rs, rt);
+ break;
+#endif
+ case OPC_JR:
+ gen_compute_branch(ctx, op1, 4, rs, 0, 0, 4);
+ break;
+ case OPC_SPIM:
+#ifdef MIPS_STRICT_STANDARD
+ MIPS_INVAL("SPIM");
+ gen_reserved_instruction(ctx);
+#else
+ /* Implemented as RI exception for now. */
+ MIPS_INVAL("spim (unofficial)");
+ gen_reserved_instruction(ctx);
+#endif
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("special_legacy");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_SLL: /* Shift with immediate */
+ if (sa == 5 && rd == 0 &&
+ rs == 0 && rt == 0) { /* PAUSE */
+ if ((ctx->insn_flags & ISA_MIPS_R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ }
+ /* Fallthrough */
+ case OPC_SRA:
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ case OPC_SRL:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* rotr is decoded as srl on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS_R2) {
+ op1 = OPC_ROTR;
+ }
+ /* Fallthrough */
+ case 0:
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_ADD:
+ case OPC_ADDU:
+ case OPC_SUB:
+ case OPC_SUBU:
+ gen_arith(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_SLLV: /* Shifts */
+ case OPC_SRAV:
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_SRLV:
+ switch ((ctx->opcode >> 6) & 0x1f) {
+ case 1:
+ /* rotrv is decoded as srlv on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS_R2) {
+ op1 = OPC_ROTRV;
+ }
+ /* Fallthrough */
+ case 0:
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_SLT: /* Set on less than */
+ case OPC_SLTU:
+ gen_slt(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_AND: /* Logic*/
+ case OPC_OR:
+ case OPC_NOR:
+ case OPC_XOR:
+ gen_logic(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_JALR:
+ gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4);
+ break;
+ case OPC_TGE: /* Traps */
+ case OPC_TGEU:
+ case OPC_TLT:
+ case OPC_TLTU:
+ case OPC_TEQ:
+ case OPC_TNE:
+ check_insn(ctx, ISA_MIPS2);
+ gen_trap(ctx, op1, rs, rt, -1);
+ break;
+ case OPC_PMON:
+ /* Pmon entry point, also R4010 selsl */
+#ifdef MIPS_STRICT_STANDARD
+ MIPS_INVAL("PMON / selsl");
+ gen_reserved_instruction(ctx);
+#else
+ gen_helper_pmon(cpu_env, tcg_constant_i32(sa));
+#endif
+ break;
+ case OPC_SYSCALL:
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ break;
+ case OPC_BREAK:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case OPC_SYNC:
+ check_insn(ctx, ISA_MIPS2);
+ gen_sync(extract32(ctx->opcode, 6, 5));
+ break;
+
+#if defined(TARGET_MIPS64)
+ /* MIPS64 specific opcodes */
+ case OPC_DSLL:
+ case OPC_DSRA:
+ case OPC_DSLL32:
+ case OPC_DSRA32:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ case OPC_DSRL:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* drotr is decoded as dsrl on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS_R2) {
+ op1 = OPC_DROTR;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_DSRL32:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* drotr32 is decoded as dsrl32 on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS_R2) {
+ op1 = OPC_DROTR32;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_DADD:
+ case OPC_DADDU:
+ case OPC_DSUB:
+ case OPC_DSUBU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DSLLV:
+ case OPC_DSRAV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DSRLV:
+ switch ((ctx->opcode >> 6) & 0x1f) {
+ case 1:
+ /* drotrv is decoded as dsrlv on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS_R2) {
+ op1 = OPC_DROTRV;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#endif
+ default:
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ decode_opc_special_r6(env, ctx);
+ } else if (ctx->insn_flags & INSN_R5900) {
+ decode_opc_special_tx79(env, ctx);
+ } else {
+ decode_opc_special_legacy(env, ctx);
+ }
+ }
+}
+
+
+static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd;
+ uint32_t op1;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+
+ op1 = MASK_SPECIAL2(ctx->opcode);
+ switch (op1) {
+ case OPC_MADD: /* Multiply and add/sub */
+ case OPC_MADDU:
+ case OPC_MSUB:
+ case OPC_MSUBU:
+ check_insn(ctx, ISA_MIPS_R1);
+ gen_muldiv(ctx, op1, rd & 3, rs, rt);
+ break;
+ case OPC_MUL:
+ gen_arith(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DIV_G_2F:
+ case OPC_DIVU_G_2F:
+ case OPC_MULT_G_2F:
+ case OPC_MULTU_G_2F:
+ case OPC_MOD_G_2F:
+ case OPC_MODU_G_2F:
+ check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_CLO:
+ case OPC_CLZ:
+ check_insn(ctx, ISA_MIPS_R1);
+ gen_cl(ctx, op1, rd, rs);
+ break;
+ case OPC_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 6, 20))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ /*
+ * XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(ctx, ISA_MIPS_R1);
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DCLO:
+ case OPC_DCLZ:
+ check_insn(ctx, ISA_MIPS_R1);
+ check_mips_64(ctx);
+ gen_cl(ctx, op1, rd, rs);
+ break;
+ case OPC_DMULT_G_2F:
+ case OPC_DMULTU_G_2F:
+ case OPC_DDIV_G_2F:
+ case OPC_DDIVU_G_2F:
+ case OPC_DMOD_G_2F:
+ case OPC_DMODU_G_2F:
+ check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special2_legacy");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+ int16_t imm;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+ imm = (int16_t)ctx->opcode >> 7;
+
+ op1 = MASK_SPECIAL3(ctx->opcode);
+ switch (op1) {
+ case R6_OPC_PREF:
+ if (rt >= 24) {
+ /* hint codes 24-31 are reserved and signal RI */
+ gen_reserved_instruction(ctx);
+ }
+ /* Treat as NOP. */
+ break;
+ case R6_OPC_CACHE:
+ check_cp0_enabled(ctx);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ break;
+ case R6_OPC_SC:
+ gen_st_cond(ctx, rt, rs, imm, MO_TESL, false);
+ break;
+ case R6_OPC_LL:
+ gen_ld(ctx, op1, rt, rs, imm);
+ break;
+ case OPC_BSHFL:
+ {
+ if (rd == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ op2 = MASK_BSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_ALIGN:
+ case OPC_ALIGN_1:
+ case OPC_ALIGN_2:
+ case OPC_ALIGN_3:
+ gen_align(ctx, 32, rd, rs, rt, sa & 3);
+ break;
+ case OPC_BITSWAP:
+ gen_bitswap(ctx, op2, rd, rt);
+ break;
+ }
+ }
+ break;
+#ifndef CONFIG_USER_ONLY
+ case OPC_GINV:
+ if (unlikely(ctx->gi <= 1)) {
+ gen_reserved_instruction(ctx);
+ }
+ check_cp0_enabled(ctx);
+ switch ((ctx->opcode >> 6) & 3) {
+ case 0: /* GINVI */
+ /* Treat as NOP. */
+ break;
+ case 2: /* GINVT */
+ gen_helper_0e1i(ginvt, cpu_gpr[rs], extract32(ctx->opcode, 8, 2));
+ break;
+ default:
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#endif
+#if defined(TARGET_MIPS64)
+ case R6_OPC_SCD:
+ gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false);
+ break;
+ case R6_OPC_LLD:
+ gen_ld(ctx, op1, rt, rs, imm);
+ break;
+ case OPC_DBSHFL:
+ check_mips_64(ctx);
+ {
+ if (rd == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ op2 = MASK_DBSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_DALIGN:
+ case OPC_DALIGN_1:
+ case OPC_DALIGN_2:
+ case OPC_DALIGN_3:
+ case OPC_DALIGN_4:
+ case OPC_DALIGN_5:
+ case OPC_DALIGN_6:
+ case OPC_DALIGN_7:
+ gen_align(ctx, 64, rd, rs, rt, sa & 7);
+ break;
+ case OPC_DBITSWAP:
+ gen_bitswap(ctx, op2, rd, rt);
+ break;
+ }
+
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special3_r6");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd;
+ uint32_t op1, op2;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+
+ op1 = MASK_SPECIAL3(ctx->opcode);
+ switch (op1) {
+ case OPC_DIV_G_2E:
+ case OPC_DIVU_G_2E:
+ case OPC_MOD_G_2E:
+ case OPC_MODU_G_2E:
+ case OPC_MULT_G_2E:
+ case OPC_MULTU_G_2E:
+ /*
+ * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
+ * the same mask and op1.
+ */
+ if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) {
+ op2 = MASK_ADDUH_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_ADDUH_QB:
+ case OPC_ADDUH_R_QB:
+ case OPC_ADDQH_PH:
+ case OPC_ADDQH_R_PH:
+ case OPC_ADDQH_W:
+ case OPC_ADDQH_R_W:
+ case OPC_SUBUH_QB:
+ case OPC_SUBUH_R_QB:
+ case OPC_SUBQH_PH:
+ case OPC_SUBQH_R_PH:
+ case OPC_SUBQH_W:
+ case OPC_SUBQH_R_W:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_MUL_PH:
+ case OPC_MUL_S_PH:
+ case OPC_MULQ_S_W:
+ case OPC_MULQ_RS_W:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default:
+ MIPS_INVAL("MASK ADDUH.QB");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ } else if (ctx->insn_flags & INSN_LOONGSON2E) {
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ break;
+ case OPC_LX_DSP:
+ op2 = MASK_LX(ctx->opcode);
+ switch (op2) {
+#if defined(TARGET_MIPS64)
+ case OPC_LDX:
+#endif
+ case OPC_LBUX:
+ case OPC_LHX:
+ case OPC_LWX:
+ gen_mipsdsp_ld(ctx, op2, rd, rs, rt);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK LX");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_ABSQ_S_PH_DSP:
+ op2 = MASK_ABSQ_S_PH(ctx->opcode);
+ switch (op2) {
+ case OPC_ABSQ_S_QB:
+ case OPC_ABSQ_S_PH:
+ case OPC_ABSQ_S_W:
+ case OPC_PRECEQ_W_PHL:
+ case OPC_PRECEQ_W_PHR:
+ case OPC_PRECEQU_PH_QBL:
+ case OPC_PRECEQU_PH_QBR:
+ case OPC_PRECEQU_PH_QBLA:
+ case OPC_PRECEQU_PH_QBRA:
+ case OPC_PRECEU_PH_QBL:
+ case OPC_PRECEU_PH_QBR:
+ case OPC_PRECEU_PH_QBLA:
+ case OPC_PRECEU_PH_QBRA:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_BITREV:
+ case OPC_REPL_QB:
+ case OPC_REPLV_QB:
+ case OPC_REPL_PH:
+ case OPC_REPLV_PH:
+ gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt);
+ break;
+ default:
+ MIPS_INVAL("MASK ABSQ_S.PH");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_ADDU_QB_DSP:
+ op2 = MASK_ADDU_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_ADDQ_PH:
+ case OPC_ADDQ_S_PH:
+ case OPC_ADDQ_S_W:
+ case OPC_ADDU_QB:
+ case OPC_ADDU_S_QB:
+ case OPC_ADDU_PH:
+ case OPC_ADDU_S_PH:
+ case OPC_SUBQ_PH:
+ case OPC_SUBQ_S_PH:
+ case OPC_SUBQ_S_W:
+ case OPC_SUBU_QB:
+ case OPC_SUBU_S_QB:
+ case OPC_SUBU_PH:
+ case OPC_SUBU_S_PH:
+ case OPC_ADDSC:
+ case OPC_ADDWC:
+ case OPC_MODSUB:
+ case OPC_RADDU_W_QB:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_MULEU_S_PH_QBL:
+ case OPC_MULEU_S_PH_QBR:
+ case OPC_MULQ_RS_PH:
+ case OPC_MULEQ_S_W_PHL:
+ case OPC_MULEQ_S_W_PHR:
+ case OPC_MULQ_S_PH:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK ADDU.QB");
+ gen_reserved_instruction(ctx);
+ break;
+
+ }
+ break;
+ case OPC_CMPU_EQ_QB_DSP:
+ op2 = MASK_CMPU_EQ_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_PRECR_SRA_PH_W:
+ case OPC_PRECR_SRA_R_PH_W:
+ gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd);
+ break;
+ case OPC_PRECR_QB_PH:
+ case OPC_PRECRQ_QB_PH:
+ case OPC_PRECRQ_PH_W:
+ case OPC_PRECRQ_RS_PH_W:
+ case OPC_PRECRQU_S_QB_PH:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_CMPU_EQ_QB:
+ case OPC_CMPU_LT_QB:
+ case OPC_CMPU_LE_QB:
+ case OPC_CMP_EQ_PH:
+ case OPC_CMP_LT_PH:
+ case OPC_CMP_LE_PH:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ case OPC_CMPGU_EQ_QB:
+ case OPC_CMPGU_LT_QB:
+ case OPC_CMPGU_LE_QB:
+ case OPC_CMPGDU_EQ_QB:
+ case OPC_CMPGDU_LT_QB:
+ case OPC_CMPGDU_LE_QB:
+ case OPC_PICK_QB:
+ case OPC_PICK_PH:
+ case OPC_PACKRL_PH:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK CMPU.EQ.QB");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_SHLL_QB_DSP:
+ gen_mipsdsp_shift(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DPA_W_PH_DSP:
+ op2 = MASK_DPA_W_PH(ctx->opcode);
+ switch (op2) {
+ case OPC_DPAU_H_QBL:
+ case OPC_DPAU_H_QBR:
+ case OPC_DPSU_H_QBL:
+ case OPC_DPSU_H_QBR:
+ case OPC_DPA_W_PH:
+ case OPC_DPAX_W_PH:
+ case OPC_DPAQ_S_W_PH:
+ case OPC_DPAQX_S_W_PH:
+ case OPC_DPAQX_SA_W_PH:
+ case OPC_DPS_W_PH:
+ case OPC_DPSX_W_PH:
+ case OPC_DPSQ_S_W_PH:
+ case OPC_DPSQX_S_W_PH:
+ case OPC_DPSQX_SA_W_PH:
+ case OPC_MULSAQ_S_W_PH:
+ case OPC_DPAQ_SA_L_W:
+ case OPC_DPSQ_SA_L_W:
+ case OPC_MAQ_S_W_PHL:
+ case OPC_MAQ_S_W_PHR:
+ case OPC_MAQ_SA_W_PHL:
+ case OPC_MAQ_SA_W_PHR:
+ case OPC_MULSA_W_PH:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK DPAW.PH");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_INSV_DSP:
+ op2 = MASK_INSV(ctx->opcode);
+ switch (op2) {
+ case OPC_INSV:
+ check_dsp(ctx);
+ {
+ TCGv t0, t1;
+
+ if (rt == 0) {
+ break;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+
+ gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ break;
+ }
+ default: /* Invalid */
+ MIPS_INVAL("MASK INSV");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_APPEND_DSP:
+ gen_mipsdsp_append(env, ctx, op1, rt, rs, rd);
+ break;
+ case OPC_EXTR_W_DSP:
+ op2 = MASK_EXTR_W(ctx->opcode);
+ switch (op2) {
+ case OPC_EXTR_W:
+ case OPC_EXTR_R_W:
+ case OPC_EXTR_RS_W:
+ case OPC_EXTR_S_H:
+ case OPC_EXTRV_S_H:
+ case OPC_EXTRV_W:
+ case OPC_EXTRV_R_W:
+ case OPC_EXTRV_RS_W:
+ case OPC_EXTP:
+ case OPC_EXTPV:
+ case OPC_EXTPDP:
+ case OPC_EXTPDPV:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1);
+ break;
+ case OPC_RDDSP:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ case OPC_SHILO:
+ case OPC_SHILOV:
+ case OPC_MTHLIP:
+ case OPC_WRDSP:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK EXTR.W");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DDIV_G_2E:
+ case OPC_DDIVU_G_2E:
+ case OPC_DMULT_G_2E:
+ case OPC_DMULTU_G_2E:
+ case OPC_DMOD_G_2E:
+ case OPC_DMODU_G_2E:
+ check_insn(ctx, INSN_LOONGSON2E);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_ABSQ_S_QH_DSP:
+ op2 = MASK_ABSQ_S_QH(ctx->opcode);
+ switch (op2) {
+ case OPC_PRECEQ_L_PWL:
+ case OPC_PRECEQ_L_PWR:
+ case OPC_PRECEQ_PW_QHL:
+ case OPC_PRECEQ_PW_QHR:
+ case OPC_PRECEQ_PW_QHLA:
+ case OPC_PRECEQ_PW_QHRA:
+ case OPC_PRECEQU_QH_OBL:
+ case OPC_PRECEQU_QH_OBR:
+ case OPC_PRECEQU_QH_OBLA:
+ case OPC_PRECEQU_QH_OBRA:
+ case OPC_PRECEU_QH_OBL:
+ case OPC_PRECEU_QH_OBR:
+ case OPC_PRECEU_QH_OBLA:
+ case OPC_PRECEU_QH_OBRA:
+ case OPC_ABSQ_S_OB:
+ case OPC_ABSQ_S_PW:
+ case OPC_ABSQ_S_QH:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_REPL_OB:
+ case OPC_REPL_PW:
+ case OPC_REPL_QH:
+ case OPC_REPLV_OB:
+ case OPC_REPLV_PW:
+ case OPC_REPLV_QH:
+ gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK ABSQ_S.QH");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_ADDU_OB_DSP:
+ op2 = MASK_ADDU_OB(ctx->opcode);
+ switch (op2) {
+ case OPC_RADDU_L_OB:
+ case OPC_SUBQ_PW:
+ case OPC_SUBQ_S_PW:
+ case OPC_SUBQ_QH:
+ case OPC_SUBQ_S_QH:
+ case OPC_SUBU_OB:
+ case OPC_SUBU_S_OB:
+ case OPC_SUBU_QH:
+ case OPC_SUBU_S_QH:
+ case OPC_SUBUH_OB:
+ case OPC_SUBUH_R_OB:
+ case OPC_ADDQ_PW:
+ case OPC_ADDQ_S_PW:
+ case OPC_ADDQ_QH:
+ case OPC_ADDQ_S_QH:
+ case OPC_ADDU_OB:
+ case OPC_ADDU_S_OB:
+ case OPC_ADDU_QH:
+ case OPC_ADDU_S_QH:
+ case OPC_ADDUH_OB:
+ case OPC_ADDUH_R_OB:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_MULEQ_S_PW_QHL:
+ case OPC_MULEQ_S_PW_QHR:
+ case OPC_MULEU_S_QH_OBL:
+ case OPC_MULEU_S_QH_OBR:
+ case OPC_MULQ_RS_QH:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK ADDU.OB");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_CMPU_EQ_OB_DSP:
+ op2 = MASK_CMPU_EQ_OB(ctx->opcode);
+ switch (op2) {
+ case OPC_PRECR_SRA_QH_PW:
+ case OPC_PRECR_SRA_R_QH_PW:
+ /* Return value is rt. */
+ gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd);
+ break;
+ case OPC_PRECR_OB_QH:
+ case OPC_PRECRQ_OB_QH:
+ case OPC_PRECRQ_PW_L:
+ case OPC_PRECRQ_QH_PW:
+ case OPC_PRECRQ_RS_QH_PW:
+ case OPC_PRECRQU_S_OB_QH:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_CMPU_EQ_OB:
+ case OPC_CMPU_LT_OB:
+ case OPC_CMPU_LE_OB:
+ case OPC_CMP_EQ_QH:
+ case OPC_CMP_LT_QH:
+ case OPC_CMP_LE_QH:
+ case OPC_CMP_EQ_PW:
+ case OPC_CMP_LT_PW:
+ case OPC_CMP_LE_PW:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ case OPC_CMPGDU_EQ_OB:
+ case OPC_CMPGDU_LT_OB:
+ case OPC_CMPGDU_LE_OB:
+ case OPC_CMPGU_EQ_OB:
+ case OPC_CMPGU_LT_OB:
+ case OPC_CMPGU_LE_OB:
+ case OPC_PACKRL_PW:
+ case OPC_PICK_OB:
+ case OPC_PICK_PW:
+ case OPC_PICK_QH:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK CMPU_EQ.OB");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_DAPPEND_DSP:
+ gen_mipsdsp_append(env, ctx, op1, rt, rs, rd);
+ break;
+ case OPC_DEXTR_W_DSP:
+ op2 = MASK_DEXTR_W(ctx->opcode);
+ switch (op2) {
+ case OPC_DEXTP:
+ case OPC_DEXTPDP:
+ case OPC_DEXTPDPV:
+ case OPC_DEXTPV:
+ case OPC_DEXTR_L:
+ case OPC_DEXTR_R_L:
+ case OPC_DEXTR_RS_L:
+ case OPC_DEXTR_W:
+ case OPC_DEXTR_R_W:
+ case OPC_DEXTR_RS_W:
+ case OPC_DEXTR_S_H:
+ case OPC_DEXTRV_L:
+ case OPC_DEXTRV_R_L:
+ case OPC_DEXTRV_RS_L:
+ case OPC_DEXTRV_S_H:
+ case OPC_DEXTRV_W:
+ case OPC_DEXTRV_R_W:
+ case OPC_DEXTRV_RS_W:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1);
+ break;
+ case OPC_DMTHLIP:
+ case OPC_DSHILO:
+ case OPC_DSHILOV:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK EXTR.W");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_DPAQ_W_QH_DSP:
+ op2 = MASK_DPAQ_W_QH(ctx->opcode);
+ switch (op2) {
+ case OPC_DPAU_H_OBL:
+ case OPC_DPAU_H_OBR:
+ case OPC_DPSU_H_OBL:
+ case OPC_DPSU_H_OBR:
+ case OPC_DPA_W_QH:
+ case OPC_DPAQ_S_W_QH:
+ case OPC_DPS_W_QH:
+ case OPC_DPSQ_S_W_QH:
+ case OPC_MULSAQ_S_W_QH:
+ case OPC_DPAQ_SA_L_PW:
+ case OPC_DPSQ_SA_L_PW:
+ case OPC_MULSAQ_S_L_PW:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ case OPC_MAQ_S_W_QHLL:
+ case OPC_MAQ_S_W_QHLR:
+ case OPC_MAQ_S_W_QHRL:
+ case OPC_MAQ_S_W_QHRR:
+ case OPC_MAQ_SA_W_QHLL:
+ case OPC_MAQ_SA_W_QHLR:
+ case OPC_MAQ_SA_W_QHRL:
+ case OPC_MAQ_SA_W_QHRR:
+ case OPC_MAQ_S_L_PWL:
+ case OPC_MAQ_S_L_PWR:
+ case OPC_DMADD:
+ case OPC_DMADDU:
+ case OPC_DMSUB:
+ case OPC_DMSUBU:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK DPAQ.W.QH");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_DINSV_DSP:
+ op2 = MASK_INSV(ctx->opcode);
+ switch (op2) {
+ case OPC_DINSV:
+ {
+ TCGv t0, t1;
+
+ check_dsp(ctx);
+
+ if (rt == 0) {
+ break;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+
+ gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ break;
+ }
+ default: /* Invalid */
+ MIPS_INVAL("MASK DINSV");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_SHLL_OB_DSP:
+ gen_mipsdsp_shift(ctx, op1, rd, rs, rt);
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special3_legacy");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+
+#if defined(TARGET_MIPS64)
+
+static void decode_mmi(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opc = MASK_MMI(ctx->opcode);
+ int rs = extract32(ctx->opcode, 21, 5);
+ int rt = extract32(ctx->opcode, 16, 5);
+ int rd = extract32(ctx->opcode, 11, 5);
+
+ switch (opc) {
+ case MMI_OPC_MULT1:
+ case MMI_OPC_MULTU1:
+ case MMI_OPC_MADD:
+ case MMI_OPC_MADDU:
+ case MMI_OPC_MADD1:
+ case MMI_OPC_MADDU1:
+ gen_mul_txx9(ctx, opc, rd, rs, rt);
+ break;
+ case MMI_OPC_DIV1:
+ case MMI_OPC_DIVU1:
+ gen_div1_tx79(ctx, opc, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("TX79 MMI class");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+}
+
+static void gen_mmi_sq(DisasContext *ctx, int base, int rt, int offset)
+{
+ gen_reserved_instruction(ctx); /* TODO: MMI_OPC_SQ */
+}
+
+/*
+ * The TX79-specific instruction Store Quadword
+ *
+ * +--------+-------+-------+------------------------+
+ * | 011111 | base | rt | offset | SQ
+ * +--------+-------+-------+------------------------+
+ * 6 5 5 16
+ *
+ * has the same opcode as the Read Hardware Register instruction
+ *
+ * +--------+-------+-------+-------+-------+--------+
+ * | 011111 | 00000 | rt | rd | 00000 | 111011 | RDHWR
+ * +--------+-------+-------+-------+-------+--------+
+ * 6 5 5 5 5 6
+ *
+ * that is required, trapped and emulated by the Linux kernel. However, all
+ * RDHWR encodings yield address error exceptions on the TX79 since the SQ
+ * offset is odd. Therefore all valid SQ instructions can execute normally.
+ * In user mode, QEMU must verify the upper and lower 11 bits to distinguish
+ * between SQ and RDHWR, as the Linux kernel does.
+ */
+static void decode_mmi_sq(CPUMIPSState *env, DisasContext *ctx)
+{
+ int base = extract32(ctx->opcode, 21, 5);
+ int rt = extract32(ctx->opcode, 16, 5);
+ int offset = extract32(ctx->opcode, 0, 16);
+
+#ifdef CONFIG_USER_ONLY
+ uint32_t op1 = MASK_SPECIAL3(ctx->opcode);
+ uint32_t op2 = extract32(ctx->opcode, 6, 5);
+
+ if (base == 0 && op2 == 0 && op1 == OPC_RDHWR) {
+ int rd = extract32(ctx->opcode, 11, 5);
+
+ gen_rdhwr(ctx, rt, rd, 0);
+ return;
+ }
+#endif
+
+ gen_mmi_sq(ctx, base, rt, offset);
+}
+
+#endif
+
+static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+ int16_t imm;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+ imm = sextract32(ctx->opcode, 7, 9);
+
+ op1 = MASK_SPECIAL3(ctx->opcode);
+
+ /*
+ * EVA loads and stores overlap Loongson 2E instructions decoded by
+ * decode_opc_special3_legacy(), so be careful to allow their decoding when
+ * EVA is absent.
+ */
+ if (ctx->eva) {
+ switch (op1) {
+ case OPC_LWLE:
+ case OPC_LWRE:
+ case OPC_LBUE:
+ case OPC_LHUE:
+ case OPC_LBE:
+ case OPC_LHE:
+ case OPC_LLE:
+ case OPC_LWE:
+ check_cp0_enabled(ctx);
+ gen_ld(ctx, op1, rt, rs, imm);
+ return;
+ case OPC_SWLE:
+ case OPC_SWRE:
+ case OPC_SBE:
+ case OPC_SHE:
+ case OPC_SWE:
+ check_cp0_enabled(ctx);
+ gen_st(ctx, op1, rt, rs, imm);
+ return;
+ case OPC_SCE:
+ check_cp0_enabled(ctx);
+ gen_st_cond(ctx, rt, rs, imm, MO_TESL, true);
+ return;
+ case OPC_CACHEE:
+ check_eva(ctx);
+ check_cp0_enabled(ctx);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ return;
+ case OPC_PREFE:
+ check_cp0_enabled(ctx);
+ /* Treat as NOP. */
+ return;
+ }
+ }
+
+ switch (op1) {
+ case OPC_EXT:
+ case OPC_INS:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_bitops(ctx, op1, rt, rs, sa, rd);
+ break;
+ case OPC_BSHFL:
+ op2 = MASK_BSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_ALIGN:
+ case OPC_ALIGN_1:
+ case OPC_ALIGN_2:
+ case OPC_ALIGN_3:
+ case OPC_BITSWAP:
+ check_insn(ctx, ISA_MIPS_R6);
+ decode_opc_special3_r6(env, ctx);
+ break;
+ default:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_bshfl(ctx, op2, rt, rd);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DEXTM:
+ case OPC_DEXTU:
+ case OPC_DEXT:
+ case OPC_DINSM:
+ case OPC_DINSU:
+ case OPC_DINS:
+ check_insn(ctx, ISA_MIPS_R2);
+ check_mips_64(ctx);
+ gen_bitops(ctx, op1, rt, rs, sa, rd);
+ break;
+ case OPC_DBSHFL:
+ op2 = MASK_DBSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_DALIGN:
+ case OPC_DALIGN_1:
+ case OPC_DALIGN_2:
+ case OPC_DALIGN_3:
+ case OPC_DALIGN_4:
+ case OPC_DALIGN_5:
+ case OPC_DALIGN_6:
+ case OPC_DALIGN_7:
+ case OPC_DBITSWAP:
+ check_insn(ctx, ISA_MIPS_R6);
+ decode_opc_special3_r6(env, ctx);
+ break;
+ default:
+ check_insn(ctx, ISA_MIPS_R2);
+ check_mips_64(ctx);
+ op2 = MASK_DBSHFL(ctx->opcode);
+ gen_bshfl(ctx, op2, rt, rd);
+ break;
+ }
+ break;
+#endif
+ case OPC_RDHWR:
+ gen_rdhwr(ctx, rt, rd, extract32(ctx->opcode, 6, 3));
+ break;
+ case OPC_FORK:
+ check_mt(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ gen_helper_fork(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_YIELD:
+ check_mt(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_helper_yield(t0, cpu_env, t0);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ default:
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ decode_opc_special3_r6(env, ctx);
+ } else {
+ decode_opc_special3_legacy(env, ctx);
+ }
+ }
+}
+
+static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int32_t offset;
+ int rs, rt, rd, sa;
+ uint32_t op, op1;
+ int16_t imm;
+
+ op = MASK_OP_MAJOR(ctx->opcode);
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+ imm = (int16_t)ctx->opcode;
+ switch (op) {
+ case OPC_SPECIAL:
+ decode_opc_special(env, ctx);
+ break;
+ case OPC_SPECIAL2:
+#if defined(TARGET_MIPS64)
+ if ((ctx->insn_flags & INSN_R5900) && (ctx->insn_flags & ASE_MMI)) {
+ decode_mmi(env, ctx);
+ break;
+ }
+#endif
+ if (TARGET_LONG_BITS == 32 && (ctx->insn_flags & ASE_MXU)) {
+ if (MASK_SPECIAL2(ctx->opcode) == OPC_MUL) {
+ gen_arith(ctx, OPC_MUL, rd, rs, rt);
+ } else {
+ decode_ase_mxu(ctx, ctx->opcode);
+ }
+ break;
+ }
+ decode_opc_special2_legacy(env, ctx);
+ break;
+ case OPC_SPECIAL3:
+#if defined(TARGET_MIPS64)
+ if (ctx->insn_flags & INSN_R5900) {
+ decode_mmi_sq(env, ctx); /* MMI_OPC_SQ */
+ } else {
+ decode_opc_special3(env, ctx);
+ }
+#else
+ decode_opc_special3(env, ctx);
+#endif
+ break;
+ case OPC_REGIMM:
+ op1 = MASK_REGIMM(ctx->opcode);
+ switch (op1) {
+ case OPC_BLTZL: /* REGIMM branches */
+ case OPC_BGEZL:
+ case OPC_BLTZALL:
+ case OPC_BGEZALL:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ /* Fallthrough */
+ case OPC_BLTZ:
+ case OPC_BGEZ:
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
+ break;
+ case OPC_BLTZAL:
+ case OPC_BGEZAL:
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rs == 0) {
+ /* OPC_NAL, OPC_BAL */
+ gen_compute_branch(ctx, op1, 4, 0, -1, imm << 2, 4);
+ } else {
+ gen_reserved_instruction(ctx);
+ }
+ } else {
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
+ }
+ break;
+ case OPC_TGEI: /* REGIMM traps */
+ case OPC_TGEIU:
+ case OPC_TLTI:
+ case OPC_TLTIU:
+ case OPC_TEQI:
+
+ case OPC_TNEI:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_trap(ctx, op1, rs, -1, imm);
+ break;
+ case OPC_SIGRIE:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_reserved_instruction(ctx);
+ break;
+ case OPC_SYNCI:
+ check_insn(ctx, ISA_MIPS_R2);
+ /*
+ * Break the TB to be able to sync copied instructions
+ * immediately.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case OPC_BPOSGE32: /* MIPS DSP branch */
+#if defined(TARGET_MIPS64)
+ case OPC_BPOSGE64:
+#endif
+ check_dsp(ctx);
+ gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2, 4);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DAHI:
+ check_insn(ctx, ISA_MIPS_R6);
+ check_mips_64(ctx);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 32);
+ }
+ break;
+ case OPC_DATI:
+ check_insn(ctx, ISA_MIPS_R6);
+ check_mips_64(ctx);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 48);
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("regimm");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_CP0:
+ check_cp0_enabled(ctx);
+ op1 = MASK_CP0(ctx->opcode);
+ switch (op1) {
+ case OPC_MFC0:
+ case OPC_MTC0:
+ case OPC_MFTR:
+ case OPC_MTTR:
+ case OPC_MFHC0:
+ case OPC_MTHC0:
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC0:
+ case OPC_DMTC0:
+#endif
+#ifndef CONFIG_USER_ONLY
+ gen_cp0(env, ctx, op1, rt, rd);
+#endif /* !CONFIG_USER_ONLY */
+ break;
+ case OPC_C0:
+ case OPC_C0_1:
+ case OPC_C0_2:
+ case OPC_C0_3:
+ case OPC_C0_4:
+ case OPC_C0_5:
+ case OPC_C0_6:
+ case OPC_C0_7:
+ case OPC_C0_8:
+ case OPC_C0_9:
+ case OPC_C0_A:
+ case OPC_C0_B:
+ case OPC_C0_C:
+ case OPC_C0_D:
+ case OPC_C0_E:
+ case OPC_C0_F:
+#ifndef CONFIG_USER_ONLY
+ gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd);
+#endif /* !CONFIG_USER_ONLY */
+ break;
+ case OPC_MFMC0:
+#ifndef CONFIG_USER_ONLY
+ {
+ uint32_t op2;
+ TCGv t0 = tcg_temp_new();
+
+ op2 = MASK_MFMC0(ctx->opcode);
+ switch (op2) {
+ case OPC_DMT:
+ check_cp0_mt(ctx);
+ gen_helper_dmt(t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_EMT:
+ check_cp0_mt(ctx);
+ gen_helper_emt(t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_DVPE:
+ check_cp0_mt(ctx);
+ gen_helper_dvpe(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_EVPE:
+ check_cp0_mt(ctx);
+ gen_helper_evpe(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_DVP:
+ check_insn(ctx, ISA_MIPS_R6);
+ if (ctx->vp) {
+ gen_helper_dvp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ case OPC_EVP:
+ check_insn(ctx, ISA_MIPS_R6);
+ if (ctx->vp) {
+ gen_helper_evp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ case OPC_DI:
+ check_insn(ctx, ISA_MIPS_R2);
+ save_cpu_state(ctx, 1);
+ gen_helper_di(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /*
+ * Stop translation as we may have switched
+ * the execution mode.
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ break;
+ case OPC_EI:
+ check_insn(ctx, ISA_MIPS_R2);
+ save_cpu_state(ctx, 1);
+ gen_helper_ei(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /*
+ * DISAS_STOP isn't sufficient, we need to ensure we break
+ * out of translated code to check for pending interrupts.
+ */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("mfmc0");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ tcg_temp_free(t0);
+ }
+#endif /* !CONFIG_USER_ONLY */
+ break;
+ case OPC_RDPGPR:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_load_srsgpr(rt, rd);
+ break;
+ case OPC_WRPGPR:
+ check_insn(ctx, ISA_MIPS_R2);
+ gen_store_srsgpr(rt, rd);
+ break;
+ default:
+ MIPS_INVAL("cp0");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_ADDI */
+ /* Arithmetic with immediate opcode */
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ }
+ break;
+ case OPC_ADDIU:
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SLTI: /* Set on less than with immediate opcode */
+ case OPC_SLTIU:
+ gen_slt_imm(ctx, op, rt, rs, imm);
+ break;
+ case OPC_ANDI: /* Arithmetic with immediate opcode */
+ case OPC_LUI: /* OPC_AUI */
+ case OPC_ORI:
+ case OPC_XORI:
+ gen_logic_imm(ctx, op, rt, rs, imm);
+ break;
+ case OPC_J: /* Jump */
+ case OPC_JAL:
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
+ break;
+ /* Branch */
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rt == 0) {
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_BLEZL */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ }
+ break;
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rt == 0) {
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_BGTZL */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ }
+ break;
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */
+ if (rt == 0) {
+ /* OPC_BLEZ */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ } else {
+ check_insn(ctx, ISA_MIPS_R6);
+ /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ }
+ break;
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */
+ if (rt == 0) {
+ /* OPC_BGTZ */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ } else {
+ check_insn(ctx, ISA_MIPS_R6);
+ /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ }
+ break;
+ case OPC_BEQL:
+ case OPC_BNEL:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ /* Fallthrough */
+ case OPC_BEQ:
+ case OPC_BNE:
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ break;
+ case OPC_LL: /* Load and stores */
+ check_insn(ctx, ISA_MIPS2);
+ if (ctx->insn_flags & INSN_R5900) {
+ check_insn_opc_user_only(ctx, INSN_R5900);
+ }
+ /* Fallthrough */
+ case OPC_LWL:
+ case OPC_LWR:
+ case OPC_LB:
+ case OPC_LH:
+ case OPC_LW:
+ case OPC_LWPC:
+ case OPC_LBU:
+ case OPC_LHU:
+ gen_ld(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SWL:
+ case OPC_SWR:
+ case OPC_SB:
+ case OPC_SH:
+ case OPC_SW:
+ gen_st(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SC:
+ check_insn(ctx, ISA_MIPS2);
+ if (ctx->insn_flags & INSN_R5900) {
+ check_insn_opc_user_only(ctx, INSN_R5900);
+ }
+ gen_st_cond(ctx, rt, rs, imm, MO_TESL, false);
+ break;
+ case OPC_CACHE:
+ check_cp0_enabled(ctx);
+ check_insn(ctx, ISA_MIPS3 | ISA_MIPS_R1);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ /* Treat as NOP. */
+ break;
+ case OPC_PREF:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R1 | INSN_R5900);
+ /* Treat as NOP. */
+ break;
+
+ /* Floating point (COP1). */
+ case OPC_LWC1:
+ case OPC_LDC1:
+ case OPC_SWC1:
+ case OPC_SDC1:
+ gen_cop1_ldst(ctx, op, rt, rs, imm);
+ break;
+
+ case OPC_CP1:
+ op1 = MASK_CP1(ctx->opcode);
+
+ switch (op1) {
+ case OPC_MFHC1:
+ case OPC_MTHC1:
+ check_cp1_enabled(ctx);
+ check_insn(ctx, ISA_MIPS_R2);
+ /* fall through */
+ case OPC_MFC1:
+ case OPC_CFC1:
+ case OPC_MTC1:
+ case OPC_CTC1:
+ check_cp1_enabled(ctx);
+ gen_cp1(ctx, op1, rt, rd);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC1:
+ case OPC_DMTC1:
+ check_cp1_enabled(ctx);
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_cp1(ctx, op1, rt, rd);
+ break;
+#endif
+ case OPC_BC1EQZ: /* OPC_BC1ANY2 */
+ check_cp1_enabled(ctx);
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_BC1EQZ */
+ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode),
+ rt, imm << 2, 4);
+ } else {
+ /* OPC_BC1ANY2 */
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ gen_compute_branch1(ctx, MASK_BC1(ctx->opcode),
+ (rt >> 2) & 0x7, imm << 2);
+ }
+ break;
+ case OPC_BC1NEZ:
+ check_cp1_enabled(ctx);
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode),
+ rt, imm << 2, 4);
+ break;
+ case OPC_BC1ANY4:
+ check_cp1_enabled(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ /* fall through */
+ case OPC_BC1:
+ check_cp1_enabled(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS_R6);
+ gen_compute_branch1(ctx, MASK_BC1(ctx->opcode),
+ (rt >> 2) & 0x7, imm << 2);
+ break;
+ case OPC_PS_FMT:
+ check_ps(ctx);
+ /* fall through */
+ case OPC_S_FMT:
+ case OPC_D_FMT:
+ check_cp1_enabled(ctx);
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
+ (imm >> 8) & 0x7);
+ break;
+ case OPC_W_FMT:
+ case OPC_L_FMT:
+ {
+ int r6_op = ctx->opcode & FOP(0x3f, 0x1f);
+ check_cp1_enabled(ctx);
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ switch (r6_op) {
+ case R6_OPC_CMP_AF_S:
+ case R6_OPC_CMP_UN_S:
+ case R6_OPC_CMP_EQ_S:
+ case R6_OPC_CMP_UEQ_S:
+ case R6_OPC_CMP_LT_S:
+ case R6_OPC_CMP_ULT_S:
+ case R6_OPC_CMP_LE_S:
+ case R6_OPC_CMP_ULE_S:
+ case R6_OPC_CMP_SAF_S:
+ case R6_OPC_CMP_SUN_S:
+ case R6_OPC_CMP_SEQ_S:
+ case R6_OPC_CMP_SEUQ_S:
+ case R6_OPC_CMP_SLT_S:
+ case R6_OPC_CMP_SULT_S:
+ case R6_OPC_CMP_SLE_S:
+ case R6_OPC_CMP_SULE_S:
+ case R6_OPC_CMP_OR_S:
+ case R6_OPC_CMP_UNE_S:
+ case R6_OPC_CMP_NE_S:
+ case R6_OPC_CMP_SOR_S:
+ case R6_OPC_CMP_SUNE_S:
+ case R6_OPC_CMP_SNE_S:
+ gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa);
+ break;
+ case R6_OPC_CMP_AF_D:
+ case R6_OPC_CMP_UN_D:
+ case R6_OPC_CMP_EQ_D:
+ case R6_OPC_CMP_UEQ_D:
+ case R6_OPC_CMP_LT_D:
+ case R6_OPC_CMP_ULT_D:
+ case R6_OPC_CMP_LE_D:
+ case R6_OPC_CMP_ULE_D:
+ case R6_OPC_CMP_SAF_D:
+ case R6_OPC_CMP_SUN_D:
+ case R6_OPC_CMP_SEQ_D:
+ case R6_OPC_CMP_SEUQ_D:
+ case R6_OPC_CMP_SLT_D:
+ case R6_OPC_CMP_SULT_D:
+ case R6_OPC_CMP_SLE_D:
+ case R6_OPC_CMP_SULE_D:
+ case R6_OPC_CMP_OR_D:
+ case R6_OPC_CMP_UNE_D:
+ case R6_OPC_CMP_NE_D:
+ case R6_OPC_CMP_SOR_D:
+ case R6_OPC_CMP_SUNE_D:
+ case R6_OPC_CMP_SNE_D:
+ gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa);
+ break;
+ default:
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f),
+ rt, rd, sa, (imm >> 8) & 0x7);
+
+ break;
+ }
+ } else {
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
+ (imm >> 8) & 0x7);
+ }
+ break;
+ }
+ default:
+ MIPS_INVAL("cp1");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ break;
+
+ /* Compact branches [R6] and COP2 [non-R6] */
+ case OPC_BC: /* OPC_LWC2 */
+ case OPC_BALC: /* OPC_SWC2 */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_BC, OPC_BALC */
+ gen_compute_compact_branch(ctx, op, 0, 0,
+ sextract32(ctx->opcode << 2, 0, 28));
+ } else if (ctx->insn_flags & ASE_LEXT) {
+ gen_loongson_lswc2(ctx, rt, rs, rd);
+ } else {
+ /* OPC_LWC2, OPC_SWC2 */
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ }
+ break;
+ case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */
+ case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_compute_compact_branch(ctx, op, rs, 0,
+ sextract32(ctx->opcode << 2, 0, 23));
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ gen_compute_compact_branch(ctx, op, 0, rt, imm);
+ }
+ } else if (ctx->insn_flags & ASE_LEXT) {
+ gen_loongson_lsdc2(ctx, rt, rs, rd);
+ } else {
+ /* OPC_LWC2, OPC_SWC2 */
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ }
+ break;
+ case OPC_CP2:
+ check_insn(ctx, ASE_LMMI);
+ /* Note that these instructions use different fields. */
+ gen_loongson_multimedia(ctx, sa, rd, rt);
+ break;
+
+ case OPC_CP3:
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ op1 = MASK_CP3(ctx->opcode);
+ switch (op1) {
+ case OPC_LUXC1:
+ case OPC_SUXC1:
+ check_insn(ctx, ISA_MIPS5 | ISA_MIPS_R2);
+ /* Fallthrough */
+ case OPC_LWXC1:
+ case OPC_LDXC1:
+ case OPC_SWXC1:
+ case OPC_SDXC1:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R2);
+ gen_flt3_ldst(ctx, op1, sa, rd, rs, rt);
+ break;
+ case OPC_PREFX:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R2);
+ /* Treat as NOP. */
+ break;
+ case OPC_ALNV_PS:
+ check_insn(ctx, ISA_MIPS5 | ISA_MIPS_R2);
+ /* Fallthrough */
+ case OPC_MADD_S:
+ case OPC_MADD_D:
+ case OPC_MADD_PS:
+ case OPC_MSUB_S:
+ case OPC_MSUB_D:
+ case OPC_MSUB_PS:
+ case OPC_NMADD_S:
+ case OPC_NMADD_D:
+ case OPC_NMADD_PS:
+ case OPC_NMSUB_S:
+ case OPC_NMSUB_D:
+ case OPC_NMSUB_PS:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS_R2);
+ gen_flt3_arith(ctx, op1, sa, rs, rd, rt);
+ break;
+ default:
+ MIPS_INVAL("cp3");
+ gen_reserved_instruction(ctx);
+ break;
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+
+#if defined(TARGET_MIPS64)
+ /* MIPS64 opcodes */
+ case OPC_LLD:
+ if (ctx->insn_flags & INSN_R5900) {
+ check_insn_opc_user_only(ctx, INSN_R5900);
+ }
+ /* fall through */
+ case OPC_LDL:
+ case OPC_LDR:
+ case OPC_LWU:
+ case OPC_LD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SDL:
+ case OPC_SDR:
+ case OPC_SD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SCD:
+ check_insn(ctx, ISA_MIPS3);
+ if (ctx->insn_flags & INSN_R5900) {
+ check_insn_opc_user_only(ctx, INSN_R5900);
+ }
+ check_mips_64(ctx);
+ gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false);
+ break;
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_DADDI */
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ }
+ break;
+ case OPC_DADDIU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ break;
+#else
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ MIPS_INVAL("major opcode");
+ gen_reserved_instruction(ctx);
+ }
+ break;
+#endif
+ case OPC_DAUI: /* OPC_JALX */
+ if (ctx->insn_flags & ISA_MIPS_R6) {
+#if defined(TARGET_MIPS64)
+ /* OPC_DAUI */
+ check_mips_64(ctx);
+ if (rs == 0) {
+ generate_exception(ctx, EXCP_RI);
+ } else if (rt != 0) {
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16);
+ tcg_temp_free(t0);
+ }
+#else
+ gen_reserved_instruction(ctx);
+ MIPS_INVAL("major opcode");
+#endif
+ } else {
+ /* OPC_JALX */
+ check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS);
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
+ }
+ break;
+ case OPC_MDMX:
+ /* MDMX: Not implemented. */
+ break;
+ case OPC_PCREL:
+ check_insn(ctx, ISA_MIPS_R6);
+ gen_pcrel(ctx, ctx->opcode, ctx->base.pc_next, rs);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("major opcode");
+ return false;
+ }
+ return true;
+}
+
+static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
+{
+ /* make sure instructions are on a word boundary */
+ if (ctx->base.pc_next & 0x3) {
+ env->CP0_BadVAddr = ctx->base.pc_next;
+ generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL);
+ return;
+ }
+
+ /* Handle blikely not taken case */
+ if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) {
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
+ tcg_gen_movi_i32(hflags, ctx->hflags & ~MIPS_HFLAG_BMASK);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + 4);
+ gen_set_label(l1);
+ }
+
+ /* Transition to the auto-generated decoder. */
+
+ /* Vendor specific extensions */
+ if (cpu_supports_isa(env, INSN_R5900) && decode_ext_txx9(ctx, ctx->opcode)) {
+ return;
+ }
+ if (cpu_supports_isa(env, INSN_VR54XX) && decode_ext_vr54xx(ctx, ctx->opcode)) {
+ return;
+ }
+
+ /* ISA extensions */
+ if (ase_msa_available(env) && decode_ase_msa(ctx, ctx->opcode)) {
+ return;
+ }
+
+ /* ISA (from latest to oldest) */
+ if (cpu_supports_isa(env, ISA_MIPS_R6) && decode_isa_rel6(ctx, ctx->opcode)) {
+ return;
+ }
+
+ if (decode_opc_legacy(env, ctx)) {
+ return;
+ }
+
+ gen_reserved_instruction(ctx);
+}
+
+static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUMIPSState *env = cs->env_ptr;
+
+ ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
+ ctx->saved_pc = -1;
+ ctx->insn_flags = env->insn_flags;
+ ctx->CP0_Config0 = env->CP0_Config0;
+ ctx->CP0_Config1 = env->CP0_Config1;
+ ctx->CP0_Config2 = env->CP0_Config2;
+ ctx->CP0_Config3 = env->CP0_Config3;
+ ctx->CP0_Config5 = env->CP0_Config5;
+ ctx->btarget = 0;
+ ctx->kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
+ ctx->rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1;
+ ctx->ie = (env->CP0_Config4 >> CP0C4_IE) & 3;
+ ctx->bi = (env->CP0_Config3 >> CP0C3_BI) & 1;
+ ctx->bp = (env->CP0_Config3 >> CP0C3_BP) & 1;
+ ctx->PAMask = env->PAMask;
+ ctx->mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1;
+ ctx->eva = (env->CP0_Config5 >> CP0C5_EVA) & 1;
+ ctx->sc = (env->CP0_Config3 >> CP0C3_SC) & 1;
+ ctx->CP0_LLAddr_shift = env->CP0_LLAddr_shift;
+ ctx->cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1;
+ /* Restore delay slot state from the tb context. */
+ ctx->hflags = (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */
+ ctx->ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1;
+ ctx->ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) ||
+ (env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F));
+ ctx->vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
+ ctx->mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1;
+ ctx->nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1;
+ ctx->abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1;
+ ctx->mi = (env->CP0_Config5 >> CP0C5_MI) & 1;
+ ctx->gi = (env->CP0_Config5 >> CP0C5_GI) & 3;
+ restore_cpu_state(env, ctx);
+#ifdef CONFIG_USER_ONLY
+ ctx->mem_idx = MIPS_HFLAG_UM;
+#else
+ ctx->mem_idx = hflags_mmu_index(ctx->hflags);
+#endif
+ ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 |
+ INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN;
+
+ /*
+ * Execute a branch and its delay slot as a single instruction.
+ * This is what GDB expects and is consistent with what the
+ * hardware does (e.g. if a delay slot instruction faults, the
+ * reported PC is the PC of the branch).
+ */
+ if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ ctx->base.max_insns = 2;
+ }
+
+ LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
+ ctx->hflags);
+}
+
+static void mips_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
+{
+}
+
+static void mips_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next, ctx->hflags & MIPS_HFLAG_BMASK,
+ ctx->btarget);
+}
+
+static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ CPUMIPSState *env = cs->env_ptr;
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ int insn_bytes;
+ int is_slot;
+
+ is_slot = ctx->hflags & MIPS_HFLAG_BMASK;
+ if (ctx->insn_flags & ISA_NANOMIPS32) {
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
+ insn_bytes = decode_isa_nanomips(env, ctx);
+ } else if (!(ctx->hflags & MIPS_HFLAG_M16)) {
+ ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next);
+ insn_bytes = 4;
+ decode_opc(env, ctx);
+ } else if (ctx->insn_flags & ASE_MICROMIPS) {
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
+ insn_bytes = decode_isa_micromips(env, ctx);
+ } else if (ctx->insn_flags & ASE_MIPS16) {
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
+ insn_bytes = decode_ase_mips16e(env, ctx);
+ } else {
+ gen_reserved_instruction(ctx);
+ g_assert(ctx->base.is_jmp == DISAS_NORETURN);
+ return;
+ }
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ if (!(ctx->hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 |
+ MIPS_HFLAG_FBNSLOT))) {
+ /*
+ * Force to generate branch as there is neither delay nor
+ * forbidden slot.
+ */
+ is_slot = 1;
+ }
+ if ((ctx->hflags & MIPS_HFLAG_M16) &&
+ (ctx->hflags & MIPS_HFLAG_FBNSLOT)) {
+ /*
+ * Force to generate branch as microMIPS R6 doesn't restrict
+ * branches in the forbidden slot.
+ */
+ is_slot = 1;
+ }
+ }
+ if (is_slot) {
+ gen_branch(ctx, insn_bytes);
+ }
+ ctx->base.pc_next += insn_bytes;
+
+ if (ctx->base.is_jmp != DISAS_NEXT) {
+ return;
+ }
+
+ /*
+ * End the TB on (most) page crossings.
+ * See mips_tr_init_disas_context about single-stepping a branch
+ * together with its delay slot.
+ */
+ if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
+ && !ctx->base.singlestep_enabled) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_STOP:
+ gen_save_pc(ctx->base.pc_next);
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ save_cpu_state(ctx, 0);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
+ break;
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void mips_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps mips_tr_ops = {
+ .init_disas_context = mips_tr_init_disas_context,
+ .tb_start = mips_tr_tb_start,
+ .insn_start = mips_tr_insn_start,
+ .translate_insn = mips_tr_translate_insn,
+ .tb_stop = mips_tr_tb_stop,
+ .disas_log = mips_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void mips_tcg_init(void)
+{
+ int i;
+
+ cpu_gpr[0] = NULL;
+ for (i = 1; i < 32; i++)
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState,
+ active_tc.gpr[i]),
+ regnames[i]);
+#if defined(TARGET_MIPS64)
+ cpu_gpr_hi[0] = NULL;
+
+ for (unsigned i = 1; i < 32; i++) {
+ g_autofree char *rname = g_strdup_printf("%s[hi]", regnames[i]);
+
+ cpu_gpr_hi[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUMIPSState,
+ active_tc.gpr_hi[i]),
+ rname);
+ }
+#endif /* !TARGET_MIPS64 */
+ for (i = 0; i < 32; i++) {
+ int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
+
+ fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
+ }
+ msa_translate_init();
+ cpu_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.PC), "PC");
+ for (i = 0; i < MIPS_DSP_ACC; i++) {
+ cpu_HI[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.HI[i]),
+ regnames_HI[i]);
+ cpu_LO[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.LO[i]),
+ regnames_LO[i]);
+ }
+ cpu_dspctrl = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState,
+ active_tc.DSPControl),
+ "DSPControl");
+ bcond = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, bcond), "bcond");
+ btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, btarget), "btarget");
+ hflags = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMIPSState, hflags), "hflags");
+
+ fpu_fcr0 = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMIPSState, active_fpu.fcr0),
+ "fcr0");
+ fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMIPSState, active_fpu.fcr31),
+ "fcr31");
+ cpu_lladdr = tcg_global_mem_new(cpu_env, offsetof(CPUMIPSState, lladdr),
+ "lladdr");
+ cpu_llval = tcg_global_mem_new(cpu_env, offsetof(CPUMIPSState, llval),
+ "llval");
+
+ if (TARGET_LONG_BITS == 32) {
+ mxu_translate_init();
+ }
+}
+
+void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->active_tc.PC = data[0];
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->hflags |= data[1];
+ switch (env->hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_BR:
+ break;
+ case MIPS_HFLAG_BC:
+ case MIPS_HFLAG_BL:
+ case MIPS_HFLAG_B:
+ env->btarget = data[2];
+ break;
+ }
+}
diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h
new file mode 100644
index 000000000..611149365
--- /dev/null
+++ b/target/mips/tcg/translate.h
@@ -0,0 +1,232 @@
+/*
+ * MIPS translation routines.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#ifndef TARGET_MIPS_TRANSLATE_H
+#define TARGET_MIPS_TRANSLATE_H
+
+#include "exec/translator.h"
+
+#define MIPS_DEBUG_DISAS 0
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ target_ulong saved_pc;
+ target_ulong page_start;
+ uint32_t opcode;
+ uint64_t insn_flags;
+ int32_t CP0_Config0;
+ int32_t CP0_Config1;
+ int32_t CP0_Config2;
+ int32_t CP0_Config3;
+ int32_t CP0_Config5;
+ /* Routine used to access memory */
+ int mem_idx;
+ MemOp default_tcg_memop_mask;
+ uint32_t hflags, saved_hflags;
+ target_ulong btarget;
+ bool ulri;
+ int kscrexist;
+ bool rxi;
+ int ie;
+ bool bi;
+ bool bp;
+ uint64_t PAMask;
+ bool mvh;
+ bool eva;
+ bool sc;
+ int CP0_LLAddr_shift;
+ bool ps;
+ bool vp;
+ bool cmgcr;
+ bool mrp;
+ bool nan2008;
+ bool abs2008;
+ bool saar;
+ bool mi;
+ int gi;
+} DisasContext;
+
+/* MIPS major opcodes */
+#define MASK_OP_MAJOR(op) (op & (0x3F << 26))
+
+#define OPC_CP1 (0x11 << 26)
+
+/* Coprocessor 1 (rs field) */
+#define MASK_CP1(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)))
+
+/* Values for the fmt field in FP instructions */
+enum {
+ /* 0 - 15 are reserved */
+ FMT_S = 16, /* single fp */
+ FMT_D = 17, /* double fp */
+ FMT_E = 18, /* extended fp */
+ FMT_Q = 19, /* quad fp */
+ FMT_W = 20, /* 32-bit fixed */
+ FMT_L = 21, /* 64-bit fixed */
+ FMT_PS = 22, /* paired single fp */
+ /* 23 - 31 are reserved */
+};
+
+enum {
+ OPC_MFC1 = (0x00 << 21) | OPC_CP1,
+ OPC_DMFC1 = (0x01 << 21) | OPC_CP1,
+ OPC_CFC1 = (0x02 << 21) | OPC_CP1,
+ OPC_MFHC1 = (0x03 << 21) | OPC_CP1,
+ OPC_MTC1 = (0x04 << 21) | OPC_CP1,
+ OPC_DMTC1 = (0x05 << 21) | OPC_CP1,
+ OPC_CTC1 = (0x06 << 21) | OPC_CP1,
+ OPC_MTHC1 = (0x07 << 21) | OPC_CP1,
+ OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */
+ OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1,
+ OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1,
+ OPC_S_FMT = (FMT_S << 21) | OPC_CP1,
+ OPC_D_FMT = (FMT_D << 21) | OPC_CP1,
+ OPC_E_FMT = (FMT_E << 21) | OPC_CP1,
+ OPC_Q_FMT = (FMT_Q << 21) | OPC_CP1,
+ OPC_W_FMT = (FMT_W << 21) | OPC_CP1,
+ OPC_L_FMT = (FMT_L << 21) | OPC_CP1,
+ OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1,
+ OPC_BC1EQZ = (0x09 << 21) | OPC_CP1,
+ OPC_BC1NEZ = (0x0D << 21) | OPC_CP1,
+};
+
+#define MASK_CP1_FUNC(op) (MASK_CP1(op) | (op & 0x3F))
+#define MASK_BC1(op) (MASK_CP1(op) | (op & (0x3 << 16)))
+
+enum {
+ OPC_BC1F = (0x00 << 16) | OPC_BC1,
+ OPC_BC1T = (0x01 << 16) | OPC_BC1,
+ OPC_BC1FL = (0x02 << 16) | OPC_BC1,
+ OPC_BC1TL = (0x03 << 16) | OPC_BC1,
+};
+
+enum {
+ OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2,
+ OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2,
+};
+
+enum {
+ OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4,
+ OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4,
+};
+
+#define gen_helper_0e1i(name, arg1, arg2) do { \
+ gen_helper_##name(cpu_env, arg1, tcg_constant_i32(arg2)); \
+ } while (0)
+
+#define gen_helper_1e0i(name, ret, arg1) do { \
+ gen_helper_##name(ret, cpu_env, tcg_constant_i32(arg1)); \
+ } while (0)
+
+#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \
+ gen_helper_##name(cpu_env, arg1, arg2, tcg_constant_i32(arg3));\
+ } while (0)
+
+void generate_exception(DisasContext *ctx, int excp);
+void generate_exception_err(DisasContext *ctx, int excp, int err);
+void generate_exception_end(DisasContext *ctx, int excp);
+void gen_reserved_instruction(DisasContext *ctx);
+
+void check_insn(DisasContext *ctx, uint64_t flags);
+void check_mips_64(DisasContext *ctx);
+/**
+ * check_cp0_enabled:
+ * Return %true if CP0 is enabled, otherwise return %false
+ * and emit a 'coprocessor unusable' exception.
+ */
+bool check_cp0_enabled(DisasContext *ctx);
+void check_cp1_enabled(DisasContext *ctx);
+void check_cp1_64bitmode(DisasContext *ctx);
+void check_cp1_registers(DisasContext *ctx, int regs);
+void check_cop1x(DisasContext *ctx);
+
+void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base, int offset);
+void gen_move_low32(TCGv ret, TCGv_i64 arg);
+void gen_move_high32(TCGv ret, TCGv_i64 arg);
+void gen_load_gpr(TCGv t, int reg);
+void gen_store_gpr(TCGv t, int reg);
+#if defined(TARGET_MIPS64)
+void gen_load_gpr_hi(TCGv_i64 t, int reg);
+void gen_store_gpr_hi(TCGv_i64 t, int reg);
+#endif /* TARGET_MIPS64 */
+void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg);
+void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg);
+void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg);
+void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg);
+int get_fp_bit(int cc);
+
+void gen_ldxs(DisasContext *ctx, int base, int index, int rd);
+void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, int bp);
+void gen_addiupc(DisasContext *ctx, int rx, int imm,
+ int is_64_bit, int extended);
+
+/*
+ * Address Computation and Large Constant Instructions
+ */
+void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1);
+bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa);
+bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa);
+
+void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel);
+
+extern TCGv cpu_gpr[32], cpu_PC;
+#if defined(TARGET_MIPS64)
+extern TCGv_i64 cpu_gpr_hi[32];
+#endif
+extern TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
+extern TCGv_i32 fpu_fcr0, fpu_fcr31;
+extern TCGv_i64 fpu_f64[32];
+extern TCGv bcond;
+
+#define LOG_DISAS(...) \
+ do { \
+ if (MIPS_DEBUG_DISAS) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define MIPS_INVAL(op) \
+ do { \
+ if (MIPS_DEBUG_DISAS) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, \
+ TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \
+ ctx->base.pc_next, ctx->opcode, op, \
+ ctx->opcode >> 26, ctx->opcode & 0x3F, \
+ ((ctx->opcode >> 16) & 0x1F)); \
+ } \
+ } while (0)
+
+/* MSA */
+void msa_translate_init(void);
+
+/* MXU */
+void mxu_translate_init(void);
+bool decode_ase_mxu(DisasContext *ctx, uint32_t insn);
+
+/* decodetree generated */
+bool decode_isa_rel6(DisasContext *ctx, uint32_t insn);
+bool decode_ase_msa(DisasContext *ctx, uint32_t insn);
+bool decode_ext_txx9(DisasContext *ctx, uint32_t insn);
+#if defined(TARGET_MIPS64)
+bool decode_ext_tx79(DisasContext *ctx, uint32_t insn);
+#endif
+bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn);
+
+/*
+ * Helpers for implementing sets of trans_* functions.
+ * Defer the implementation of NAME to FUNC, with optional extra arguments.
+ */
+#define TRANS(NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { return FUNC(ctx, a, __VA_ARGS__); }
+
+static inline bool cpu_is_bigendian(DisasContext *ctx)
+{
+ return extract32(ctx->CP0_Config0, CP0C0_BE, 1);
+}
+
+#endif
diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c
new file mode 100644
index 000000000..96f483418
--- /dev/null
+++ b/target/mips/tcg/translate_addr_const.c
@@ -0,0 +1,61 @@
+/*
+ * Address Computation and Large Constant Instructions
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ * Copyright (c) 2020 Philippe Mathieu-Daudé
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "translate.h"
+
+bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa)
+{
+ TCGv t0;
+ TCGv t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_shli_tl(t0, t0, sa + 1);
+ tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+
+ return true;
+}
+
+bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa)
+{
+ TCGv t0;
+ TCGv t1;
+
+ check_mips_64(ctx);
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return true;
+ }
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_shli_tl(t0, t0, sa + 1);
+ tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+
+ return true;
+}
diff --git a/target/mips/tcg/tx79.decode b/target/mips/tcg/tx79.decode
new file mode 100644
index 000000000..57d87a207
--- /dev/null
+++ b/target/mips/tcg/tx79.decode
@@ -0,0 +1,73 @@
+# Toshiba C790's instruction set
+#
+# Copyright (C) 2021 Philippe Mathieu-Daudé
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Toshiba Appendix B C790-Specific Instruction Set Details
+
+###########################################################################
+# Named attribute sets. These are used to make nice(er) names
+# when creating helpers common to those for the individual
+# instruction patterns.
+
+&r rs rt rd sa
+
+&i base rt offset
+
+###########################################################################
+# Named instruction formats. These are generally used to
+# reduce the amount of duplication between instruction patterns.
+
+@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &r sa=0
+@rt_rd ...... ..... rt:5 rd:5 ..... ...... &r sa=0 rs=0
+@rs ...... rs:5 ..... .......... ...... &r sa=0 rt=0 rd=0
+@rd ...... .......... rd:5 ..... ...... &r sa=0 rs=0 rt=0
+
+@ldst ...... base:5 rt:5 offset:16 &i
+
+###########################################################################
+
+MFHI1 011100 0000000000 ..... 00000 010000 @rd
+MTHI1 011100 ..... 0000000000 00000 010001 @rs
+MFLO1 011100 0000000000 ..... 00000 010010 @rd
+MTLO1 011100 ..... 0000000000 00000 010011 @rs
+
+# MMI0
+
+PSUBW 011100 ..... ..... ..... 00001 001000 @rs_rt_rd
+PCGTW 011100 ..... ..... ..... 00010 001000 @rs_rt_rd
+PSUBH 011100 ..... ..... ..... 00101 001000 @rs_rt_rd
+PCGTH 011100 ..... ..... ..... 00110 001000 @rs_rt_rd
+PSUBB 011100 ..... ..... ..... 01001 001000 @rs_rt_rd
+PCGTB 011100 ..... ..... ..... 01010 001000 @rs_rt_rd
+PEXTLW 011100 ..... ..... ..... 10010 001000 @rs_rt_rd
+PPACW 011100 ..... ..... ..... 10011 001000 @rs_rt_rd
+PEXTLH 011100 ..... ..... ..... 10110 001000 @rs_rt_rd
+PEXTLB 011100 ..... ..... ..... 11010 001000 @rs_rt_rd
+
+# MMI1
+
+PCEQW 011100 ..... ..... ..... 00010 101000 @rs_rt_rd
+PCEQH 011100 ..... ..... ..... 00110 101000 @rs_rt_rd
+PCEQB 011100 ..... ..... ..... 01010 101000 @rs_rt_rd
+PEXTUW 011100 ..... ..... ..... 10010 101000 @rs_rt_rd
+
+# MMI2
+
+PCPYLD 011100 ..... ..... ..... 01110 001001 @rs_rt_rd
+PAND 011100 ..... ..... ..... 10010 001001 @rs_rt_rd
+PXOR 011100 ..... ..... ..... 10011 001001 @rs_rt_rd
+PROT3W 011100 00000 ..... ..... 11111 001001 @rt_rd
+
+# MMI3
+
+PCPYUD 011100 ..... ..... ..... 01110 101001 @rs_rt_rd
+POR 011100 ..... ..... ..... 10010 101001 @rs_rt_rd
+PNOR 011100 ..... ..... ..... 10011 101001 @rs_rt_rd
+PCPYH 011100 00000 ..... ..... 11011 101001 @rt_rd
+
+# SPECIAL
+
+LQ 011110 ..... ..... ................ @ldst
+SQ 011111 ..... ..... ................ @ldst
diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c
new file mode 100644
index 000000000..6d51fe17c
--- /dev/null
+++ b/target/mips/tcg/tx79_translate.c
@@ -0,0 +1,685 @@
+/*
+ * Toshiba TX79-specific instructions translation routines
+ *
+ * Copyright (c) 2018 Fredrik Noring
+ * Copyright (c) 2021 Philippe Mathieu-Daudé
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "exec/helper-gen.h"
+#include "translate.h"
+
+/* Include the auto-generated decoder. */
+#include "decode-tx79.c.inc"
+
+/*
+ * Overview of the TX79-specific instruction set
+ * =============================================
+ *
+ * The R5900 and the C790 have 128-bit wide GPRs, where the upper 64 bits
+ * are only used by the specific quadword (128-bit) LQ/SQ load/store
+ * instructions and certain multimedia instructions (MMIs). These MMIs
+ * configure the 128-bit data path as two 64-bit, four 32-bit, eight 16-bit
+ * or sixteen 8-bit paths.
+ *
+ * Reference:
+ *
+ * The Toshiba TX System RISC TX79 Core Architecture manual,
+ * https://wiki.qemu.org/File:C790.pdf
+ */
+
+bool decode_ext_tx79(DisasContext *ctx, uint32_t insn)
+{
+ if (TARGET_LONG_BITS == 64 && decode_tx79(ctx, insn)) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Three-Operand Multiply and Multiply-Add (4 instructions)
+ * --------------------------------------------------------
+ * MADD [rd,] rs, rt Multiply/Add
+ * MADDU [rd,] rs, rt Multiply/Add Unsigned
+ * MULT [rd,] rs, rt Multiply (3-operand)
+ * MULTU [rd,] rs, rt Multiply Unsigned (3-operand)
+ */
+
+/*
+ * Multiply Instructions for Pipeline 1 (10 instructions)
+ * ------------------------------------------------------
+ * MULT1 [rd,] rs, rt Multiply Pipeline 1
+ * MULTU1 [rd,] rs, rt Multiply Unsigned Pipeline 1
+ * DIV1 rs, rt Divide Pipeline 1
+ * DIVU1 rs, rt Divide Unsigned Pipeline 1
+ * MADD1 [rd,] rs, rt Multiply-Add Pipeline 1
+ * MADDU1 [rd,] rs, rt Multiply-Add Unsigned Pipeline 1
+ * MFHI1 rd Move From HI1 Register
+ * MFLO1 rd Move From LO1 Register
+ * MTHI1 rs Move To HI1 Register
+ * MTLO1 rs Move To LO1 Register
+ */
+
+static bool trans_MFHI1(DisasContext *ctx, arg_r *a)
+{
+ gen_store_gpr(cpu_HI[1], a->rd);
+
+ return true;
+}
+
+static bool trans_MFLO1(DisasContext *ctx, arg_r *a)
+{
+ gen_store_gpr(cpu_LO[1], a->rd);
+
+ return true;
+}
+
+static bool trans_MTHI1(DisasContext *ctx, arg_r *a)
+{
+ gen_load_gpr(cpu_HI[1], a->rs);
+
+ return true;
+}
+
+static bool trans_MTLO1(DisasContext *ctx, arg_r *a)
+{
+ gen_load_gpr(cpu_LO[1], a->rs);
+
+ return true;
+}
+
+/*
+ * Arithmetic (19 instructions)
+ * ----------------------------
+ * PADDB rd, rs, rt Parallel Add Byte
+ * PSUBB rd, rs, rt Parallel Subtract Byte
+ * PADDH rd, rs, rt Parallel Add Halfword
+ * PSUBH rd, rs, rt Parallel Subtract Halfword
+ * PADDW rd, rs, rt Parallel Add Word
+ * PSUBW rd, rs, rt Parallel Subtract Word
+ * PADSBH rd, rs, rt Parallel Add/Subtract Halfword
+ * PADDSB rd, rs, rt Parallel Add with Signed Saturation Byte
+ * PSUBSB rd, rs, rt Parallel Subtract with Signed Saturation Byte
+ * PADDSH rd, rs, rt Parallel Add with Signed Saturation Halfword
+ * PSUBSH rd, rs, rt Parallel Subtract with Signed Saturation Halfword
+ * PADDSW rd, rs, rt Parallel Add with Signed Saturation Word
+ * PSUBSW rd, rs, rt Parallel Subtract with Signed Saturation Word
+ * PADDUB rd, rs, rt Parallel Add with Unsigned saturation Byte
+ * PSUBUB rd, rs, rt Parallel Subtract with Unsigned saturation Byte
+ * PADDUH rd, rs, rt Parallel Add with Unsigned saturation Halfword
+ * PSUBUH rd, rs, rt Parallel Subtract with Unsigned saturation Halfword
+ * PADDUW rd, rs, rt Parallel Add with Unsigned saturation Word
+ * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word
+ */
+
+static bool trans_parallel_arith(DisasContext *ctx, arg_r *a,
+ void (*gen_logic_i64)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 ax, bx;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ ax = tcg_temp_new_i64();
+ bx = tcg_temp_new_i64();
+
+ /* Lower half */
+ gen_load_gpr(ax, a->rs);
+ gen_load_gpr(bx, a->rt);
+ gen_logic_i64(cpu_gpr[a->rd], ax, bx);
+
+ /* Upper half */
+ gen_load_gpr_hi(ax, a->rs);
+ gen_load_gpr_hi(bx, a->rt);
+ gen_logic_i64(cpu_gpr_hi[a->rd], ax, bx);
+
+ tcg_temp_free(bx);
+ tcg_temp_free(ax);
+
+ return true;
+}
+
+/* Parallel Subtract Byte */
+static bool trans_PSUBB(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_vec_sub8_i64);
+}
+
+/* Parallel Subtract Halfword */
+static bool trans_PSUBH(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_vec_sub16_i64);
+}
+
+/* Parallel Subtract Word */
+static bool trans_PSUBW(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_vec_sub32_i64);
+}
+
+/*
+ * Min/Max (4 instructions)
+ * ------------------------
+ * PMAXH rd, rs, rt Parallel Maximum Halfword
+ * PMINH rd, rs, rt Parallel Minimum Halfword
+ * PMAXW rd, rs, rt Parallel Maximum Word
+ * PMINW rd, rs, rt Parallel Minimum Word
+ */
+
+/*
+ * Absolute (2 instructions)
+ * -------------------------
+ * PABSH rd, rt Parallel Absolute Halfword
+ * PABSW rd, rt Parallel Absolute Word
+ */
+
+/*
+ * Logical (4 instructions)
+ * ------------------------
+ * PAND rd, rs, rt Parallel AND
+ * POR rd, rs, rt Parallel OR
+ * PXOR rd, rs, rt Parallel XOR
+ * PNOR rd, rs, rt Parallel NOR
+ */
+
+/* Parallel And */
+static bool trans_PAND(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_and_i64);
+}
+
+/* Parallel Or */
+static bool trans_POR(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_or_i64);
+}
+
+/* Parallel Exclusive Or */
+static bool trans_PXOR(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_xor_i64);
+}
+
+/* Parallel Not Or */
+static bool trans_PNOR(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_arith(ctx, a, tcg_gen_nor_i64);
+}
+
+/*
+ * Shift (9 instructions)
+ * ----------------------
+ * PSLLH rd, rt, sa Parallel Shift Left Logical Halfword
+ * PSRLH rd, rt, sa Parallel Shift Right Logical Halfword
+ * PSRAH rd, rt, sa Parallel Shift Right Arithmetic Halfword
+ * PSLLW rd, rt, sa Parallel Shift Left Logical Word
+ * PSRLW rd, rt, sa Parallel Shift Right Logical Word
+ * PSRAW rd, rt, sa Parallel Shift Right Arithmetic Word
+ * PSLLVW rd, rt, rs Parallel Shift Left Logical Variable Word
+ * PSRLVW rd, rt, rs Parallel Shift Right Logical Variable Word
+ * PSRAVW rd, rt, rs Parallel Shift Right Arithmetic Variable Word
+ */
+
+/*
+ * Compare (6 instructions)
+ * ------------------------
+ * PCGTB rd, rs, rt Parallel Compare for Greater Than Byte
+ * PCEQB rd, rs, rt Parallel Compare for Equal Byte
+ * PCGTH rd, rs, rt Parallel Compare for Greater Than Halfword
+ * PCEQH rd, rs, rt Parallel Compare for Equal Halfword
+ * PCGTW rd, rs, rt Parallel Compare for Greater Than Word
+ * PCEQW rd, rs, rt Parallel Compare for Equal Word
+ */
+
+static bool trans_parallel_compare(DisasContext *ctx, arg_r *a,
+ TCGCond cond, unsigned wlen)
+{
+ TCGv_i64 c0, c1, ax, bx, t0, t1, t2;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ c0 = tcg_const_tl(0);
+ c1 = tcg_const_tl(0xffffffff);
+ ax = tcg_temp_new_i64();
+ bx = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+
+ /* Lower half */
+ gen_load_gpr(ax, a->rs);
+ gen_load_gpr(bx, a->rt);
+ for (int i = 0; i < (64 / wlen); i++) {
+ tcg_gen_sextract_i64(t0, ax, wlen * i, wlen);
+ tcg_gen_sextract_i64(t1, bx, wlen * i, wlen);
+ tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0);
+ tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], t2, wlen * i, wlen);
+ }
+ /* Upper half */
+ gen_load_gpr_hi(ax, a->rs);
+ gen_load_gpr_hi(bx, a->rt);
+ for (int i = 0; i < (64 / wlen); i++) {
+ tcg_gen_sextract_i64(t0, ax, wlen * i, wlen);
+ tcg_gen_sextract_i64(t1, bx, wlen * i, wlen);
+ tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0);
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], t2, wlen * i, wlen);
+ }
+
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(bx);
+ tcg_temp_free(ax);
+ tcg_temp_free(c1);
+ tcg_temp_free(c0);
+
+ return true;
+}
+
+/* Parallel Compare for Greater Than Byte */
+static bool trans_PCGTB(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_compare(ctx, a, TCG_COND_GE, 8);
+}
+
+/* Parallel Compare for Equal Byte */
+static bool trans_PCEQB(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_compare(ctx, a, TCG_COND_EQ, 8);
+}
+
+/* Parallel Compare for Greater Than Halfword */
+static bool trans_PCGTH(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_compare(ctx, a, TCG_COND_GE, 16);
+}
+
+/* Parallel Compare for Equal Halfword */
+static bool trans_PCEQH(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_compare(ctx, a, TCG_COND_EQ, 16);
+}
+
+/* Parallel Compare for Greater Than Word */
+static bool trans_PCGTW(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_compare(ctx, a, TCG_COND_GE, 32);
+}
+
+/* Parallel Compare for Equal Word */
+static bool trans_PCEQW(DisasContext *ctx, arg_r *a)
+{
+ return trans_parallel_compare(ctx, a, TCG_COND_EQ, 32);
+}
+
+/*
+ * LZC (1 instruction)
+ * -------------------
+ * PLZCW rd, rs Parallel Leading Zero or One Count Word
+ */
+
+/*
+ * Quadword Load and Store (2 instructions)
+ * ----------------------------------------
+ * LQ rt, offset(base) Load Quadword
+ * SQ rt, offset(base) Store Quadword
+ */
+
+static bool trans_LQ(DisasContext *ctx, arg_i *a)
+{
+ TCGv_i64 t0;
+ TCGv addr;
+
+ if (a->rt == 0) {
+ /* nop */
+ return true;
+ }
+
+ t0 = tcg_temp_new_i64();
+ addr = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, addr, a->base, a->offset);
+ /*
+ * Clear least-significant four bits of the effective
+ * address, effectively creating an aligned address.
+ */
+ tcg_gen_andi_tl(addr, addr, ~0xf);
+
+ /* Lower half */
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t0, a->rt);
+
+ /* Upper half */
+ tcg_gen_addi_i64(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr_hi(t0, a->rt);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(addr);
+
+ return true;
+}
+
+static bool trans_SQ(DisasContext *ctx, arg_i *a)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv addr = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, addr, a->base, a->offset);
+ /*
+ * Clear least-significant four bits of the effective
+ * address, effectively creating an aligned address.
+ */
+ tcg_gen_andi_tl(addr, addr, ~0xf);
+
+ /* Lower half */
+ gen_load_gpr(t0, a->rt);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+
+ /* Upper half */
+ tcg_gen_addi_i64(addr, addr, 8);
+ gen_load_gpr_hi(t0, a->rt);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+
+ tcg_temp_free(addr);
+ tcg_temp_free(t0);
+
+ return true;
+}
+
+/*
+ * Multiply and Divide (19 instructions)
+ * -------------------------------------
+ * PMULTW rd, rs, rt Parallel Multiply Word
+ * PMULTUW rd, rs, rt Parallel Multiply Unsigned Word
+ * PDIVW rs, rt Parallel Divide Word
+ * PDIVUW rs, rt Parallel Divide Unsigned Word
+ * PMADDW rd, rs, rt Parallel Multiply-Add Word
+ * PMADDUW rd, rs, rt Parallel Multiply-Add Unsigned Word
+ * PMSUBW rd, rs, rt Parallel Multiply-Subtract Word
+ * PMULTH rd, rs, rt Parallel Multiply Halfword
+ * PMADDH rd, rs, rt Parallel Multiply-Add Halfword
+ * PMSUBH rd, rs, rt Parallel Multiply-Subtract Halfword
+ * PHMADH rd, rs, rt Parallel Horizontal Multiply-Add Halfword
+ * PHMSBH rd, rs, rt Parallel Horizontal Multiply-Subtract Halfword
+ * PDIVBW rs, rt Parallel Divide Broadcast Word
+ * PMFHI rd Parallel Move From HI Register
+ * PMFLO rd Parallel Move From LO Register
+ * PMTHI rs Parallel Move To HI Register
+ * PMTLO rs Parallel Move To LO Register
+ * PMFHL rd Parallel Move From HI/LO Register
+ * PMTHL rs Parallel Move To HI/LO Register
+ */
+
+/*
+ * Pack/Extend (11 instructions)
+ * -----------------------------
+ * PPAC5 rd, rt Parallel Pack to 5 bits
+ * PPACB rd, rs, rt Parallel Pack to Byte
+ * PPACH rd, rs, rt Parallel Pack to Halfword
+ * PPACW rd, rs, rt Parallel Pack to Word
+ * PEXT5 rd, rt Parallel Extend Upper from 5 bits
+ * PEXTUB rd, rs, rt Parallel Extend Upper from Byte
+ * PEXTLB rd, rs, rt Parallel Extend Lower from Byte
+ * PEXTUH rd, rs, rt Parallel Extend Upper from Halfword
+ * PEXTLH rd, rs, rt Parallel Extend Lower from Halfword
+ * PEXTUW rd, rs, rt Parallel Extend Upper from Word
+ * PEXTLW rd, rs, rt Parallel Extend Lower from Word
+ */
+
+/* Parallel Pack to Word */
+static bool trans_PPACW(DisasContext *ctx, arg_r *a)
+{
+ TCGv_i64 a0, b0, t0;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ a0 = tcg_temp_new_i64();
+ b0 = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+
+ gen_load_gpr(a0, a->rs);
+ gen_load_gpr(b0, a->rt);
+
+ gen_load_gpr_hi(t0, a->rt); /* b1 */
+ tcg_gen_deposit_i64(cpu_gpr[a->rd], b0, t0, 32, 32);
+
+ gen_load_gpr_hi(t0, a->rs); /* a1 */
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], a0, t0, 32, 32);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(b0);
+ tcg_temp_free(a0);
+
+ return true;
+}
+
+static void gen_pextw(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_deposit_i64(dl, b, a, 32, 32);
+ tcg_gen_shri_i64(b, b, 32);
+ tcg_gen_deposit_i64(dh, a, b, 0, 32);
+}
+
+static bool trans_PEXTLx(DisasContext *ctx, arg_r *a, unsigned wlen)
+{
+ TCGv_i64 ax, bx;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ ax = tcg_temp_new_i64();
+ bx = tcg_temp_new_i64();
+
+ gen_load_gpr(ax, a->rs);
+ gen_load_gpr(bx, a->rt);
+
+ /* Lower half */
+ for (int i = 0; i < 64 / (2 * wlen); i++) {
+ tcg_gen_deposit_i64(cpu_gpr[a->rd],
+ cpu_gpr[a->rd], bx, 2 * wlen * i, wlen);
+ tcg_gen_deposit_i64(cpu_gpr[a->rd],
+ cpu_gpr[a->rd], ax, 2 * wlen * i + wlen, wlen);
+ tcg_gen_shri_i64(bx, bx, wlen);
+ tcg_gen_shri_i64(ax, ax, wlen);
+ }
+ /* Upper half */
+ for (int i = 0; i < 64 / (2 * wlen); i++) {
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd],
+ cpu_gpr_hi[a->rd], bx, 2 * wlen * i, wlen);
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd],
+ cpu_gpr_hi[a->rd], ax, 2 * wlen * i + wlen, wlen);
+ tcg_gen_shri_i64(bx, bx, wlen);
+ tcg_gen_shri_i64(ax, ax, wlen);
+ }
+
+ tcg_temp_free(bx);
+ tcg_temp_free(ax);
+
+ return true;
+}
+
+/* Parallel Extend Lower from Byte */
+static bool trans_PEXTLB(DisasContext *ctx, arg_r *a)
+{
+ return trans_PEXTLx(ctx, a, 8);
+}
+
+/* Parallel Extend Lower from Halfword */
+static bool trans_PEXTLH(DisasContext *ctx, arg_r *a)
+{
+ return trans_PEXTLx(ctx, a, 16);
+}
+
+/* Parallel Extend Lower from Word */
+static bool trans_PEXTLW(DisasContext *ctx, arg_r *a)
+{
+ TCGv_i64 ax, bx;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ ax = tcg_temp_new_i64();
+ bx = tcg_temp_new_i64();
+
+ gen_load_gpr(ax, a->rs);
+ gen_load_gpr(bx, a->rt);
+ gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx);
+
+ tcg_temp_free(bx);
+ tcg_temp_free(ax);
+
+ return true;
+}
+
+/* Parallel Extend Upper from Word */
+static bool trans_PEXTUW(DisasContext *ctx, arg_r *a)
+{
+ TCGv_i64 ax, bx;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ ax = tcg_temp_new_i64();
+ bx = tcg_temp_new_i64();
+
+ gen_load_gpr_hi(ax, a->rs);
+ gen_load_gpr_hi(bx, a->rt);
+ gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx);
+
+ tcg_temp_free(bx);
+ tcg_temp_free(ax);
+
+ return true;
+}
+
+/*
+ * Others (16 instructions)
+ * ------------------------
+ * PCPYH rd, rt Parallel Copy Halfword
+ * PCPYLD rd, rs, rt Parallel Copy Lower Doubleword
+ * PCPYUD rd, rs, rt Parallel Copy Upper Doubleword
+ * PREVH rd, rt Parallel Reverse Halfword
+ * PINTH rd, rs, rt Parallel Interleave Halfword
+ * PINTEH rd, rs, rt Parallel Interleave Even Halfword
+ * PEXEH rd, rt Parallel Exchange Even Halfword
+ * PEXCH rd, rt Parallel Exchange Center Halfword
+ * PEXEW rd, rt Parallel Exchange Even Word
+ * PEXCW rd, rt Parallel Exchange Center Word
+ * QFSRV rd, rs, rt Quadword Funnel Shift Right Variable
+ * MFSA rd Move from Shift Amount Register
+ * MTSA rs Move to Shift Amount Register
+ * MTSAB rs, immediate Move Byte Count to Shift Amount Register
+ * MTSAH rs, immediate Move Halfword Count to Shift Amount Register
+ * PROT3W rd, rt Parallel Rotate 3 Words
+ */
+
+/* Parallel Copy Halfword */
+static bool trans_PCPYH(DisasContext *s, arg_r *a)
+{
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ if (a->rt == 0) {
+ tcg_gen_movi_i64(cpu_gpr[a->rd], 0);
+ tcg_gen_movi_i64(cpu_gpr_hi[a->rd], 0);
+ return true;
+ }
+
+ tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], cpu_gpr[a->rt], 16, 16);
+ tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], cpu_gpr[a->rd], 32, 32);
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rt], cpu_gpr_hi[a->rt], 16, 16);
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], 32, 32);
+
+ return true;
+}
+
+/* Parallel Copy Lower Doubleword */
+static bool trans_PCPYLD(DisasContext *s, arg_r *a)
+{
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ if (a->rs == 0) {
+ tcg_gen_movi_i64(cpu_gpr_hi[a->rd], 0);
+ } else {
+ tcg_gen_mov_i64(cpu_gpr_hi[a->rd], cpu_gpr[a->rs]);
+ }
+
+ if (a->rt == 0) {
+ tcg_gen_movi_i64(cpu_gpr[a->rd], 0);
+ } else if (a->rd != a->rt) {
+ tcg_gen_mov_i64(cpu_gpr[a->rd], cpu_gpr[a->rt]);
+ }
+
+ return true;
+}
+
+/* Parallel Copy Upper Doubleword */
+static bool trans_PCPYUD(DisasContext *s, arg_r *a)
+{
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+
+ gen_load_gpr_hi(cpu_gpr[a->rd], a->rs);
+
+ if (a->rt == 0) {
+ tcg_gen_movi_i64(cpu_gpr_hi[a->rd], 0);
+ } else if (a->rd != a->rt) {
+ tcg_gen_mov_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rt]);
+ }
+
+ return true;
+}
+
+/* Parallel Rotate 3 Words Left */
+static bool trans_PROT3W(DisasContext *ctx, arg_r *a)
+{
+ TCGv_i64 ax;
+
+ if (a->rd == 0) {
+ /* nop */
+ return true;
+ }
+ if (a->rt == 0) {
+ tcg_gen_movi_i64(cpu_gpr[a->rd], 0);
+ tcg_gen_movi_i64(cpu_gpr_hi[a->rd], 0);
+ return true;
+ }
+
+ ax = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(ax, cpu_gpr_hi[a->rt]);
+ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], ax, cpu_gpr[a->rt], 0, 32);
+
+ tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], ax, 0, 32);
+ tcg_gen_rotri_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], 32);
+
+ tcg_temp_free(ax);
+
+ return true;
+}
diff --git a/target/mips/tcg/txx9_translate.c b/target/mips/tcg/txx9_translate.c
new file mode 100644
index 000000000..8a2c0b766
--- /dev/null
+++ b/target/mips/tcg/txx9_translate.c
@@ -0,0 +1,20 @@
+/*
+ * Toshiba TXx9 instructions translation routines
+ *
+ * Copyright (c) 2021 Philippe Mathieu-Daudé
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "translate.h"
+
+bool decode_ext_txx9(DisasContext *ctx, uint32_t insn)
+{
+#if defined(TARGET_MIPS64)
+ if (decode_ext_tx79(ctx, insn)) {
+ return true;
+ }
+#endif
+ return false;
+}
diff --git a/target/mips/tcg/vr54xx.decode b/target/mips/tcg/vr54xx.decode
new file mode 100644
index 000000000..4fc708d80
--- /dev/null
+++ b/target/mips/tcg/vr54xx.decode
@@ -0,0 +1,27 @@
+# MIPS VR5432 instruction set extensions
+#
+# Copyright (C) 2021 Philippe Mathieu-Daudé
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Reference: VR5432 Microprocessor User’s Manual
+# (Document Number U13751EU5V0UM00)
+
+&r rs rt rd
+
+@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &r
+
+MULS 000000 ..... ..... ..... 00011011000 @rs_rt_rd
+MULSU 000000 ..... ..... ..... 00011011001 @rs_rt_rd
+MACC 000000 ..... ..... ..... 00101011000 @rs_rt_rd
+MACCU 000000 ..... ..... ..... 00101011001 @rs_rt_rd
+MSAC 000000 ..... ..... ..... 00111011000 @rs_rt_rd
+MSACU 000000 ..... ..... ..... 00111011001 @rs_rt_rd
+MULHI 000000 ..... ..... ..... 01001011000 @rs_rt_rd
+MULHIU 000000 ..... ..... ..... 01001011001 @rs_rt_rd
+MULSHI 000000 ..... ..... ..... 01011011000 @rs_rt_rd
+MULSHIU 000000 ..... ..... ..... 01011011001 @rs_rt_rd
+MACCHI 000000 ..... ..... ..... 01101011000 @rs_rt_rd
+MACCHIU 000000 ..... ..... ..... 01101011001 @rs_rt_rd
+MSACHI 000000 ..... ..... ..... 01111011000 @rs_rt_rd
+MSACHIU 000000 ..... ..... ..... 01111011001 @rs_rt_rd
diff --git a/target/mips/tcg/vr54xx_helper.c b/target/mips/tcg/vr54xx_helper.c
new file mode 100644
index 000000000..2255bd111
--- /dev/null
+++ b/target/mips/tcg/vr54xx_helper.c
@@ -0,0 +1,142 @@
+/*
+ * MIPS VR5432 emulation helpers
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/* 64 bits arithmetic for 32 bits hosts */
+static inline uint64_t get_HILO(CPUMIPSState *env)
+{
+ return ((uint64_t)(env->active_tc.HI[0]) << 32) |
+ (uint32_t)env->active_tc.LO[0];
+}
+
+static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
+{
+ env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
+ return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
+}
+
+static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
+{
+ target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
+ env->active_tc.HI[0] = (int32_t)(HILO >> 32);
+ return tmp;
+}
+
+/* Multiplication variants of the vr54xx. */
+target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2));
+}
+
+target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (uint64_t)get_HILO(env) + (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (uint64_t)get_HILO(env) + (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (uint64_t)get_HILO(env) - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (uint64_t)get_HILO(env) - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
diff --git a/target/mips/tcg/vr54xx_helper.h.inc b/target/mips/tcg/vr54xx_helper.h.inc
new file mode 100644
index 000000000..50b1f5b81
--- /dev/null
+++ b/target/mips/tcg/vr54xx_helper.h.inc
@@ -0,0 +1,24 @@
+/*
+ * MIPS NEC Vr54xx instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+DEF_HELPER_3(muls, tl, env, tl, tl)
+DEF_HELPER_3(mulsu, tl, env, tl, tl)
+DEF_HELPER_3(macc, tl, env, tl, tl)
+DEF_HELPER_3(maccu, tl, env, tl, tl)
+DEF_HELPER_3(msac, tl, env, tl, tl)
+DEF_HELPER_3(msacu, tl, env, tl, tl)
+DEF_HELPER_3(mulhi, tl, env, tl, tl)
+DEF_HELPER_3(mulhiu, tl, env, tl, tl)
+DEF_HELPER_3(mulshi, tl, env, tl, tl)
+DEF_HELPER_3(mulshiu, tl, env, tl, tl)
+DEF_HELPER_3(macchi, tl, env, tl, tl)
+DEF_HELPER_3(macchiu, tl, env, tl, tl)
+DEF_HELPER_3(msachi, tl, env, tl, tl)
+DEF_HELPER_3(msachiu, tl, env, tl, tl)
diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c
new file mode 100644
index 000000000..3e2c98f2c
--- /dev/null
+++ b/target/mips/tcg/vr54xx_translate.c
@@ -0,0 +1,72 @@
+/*
+ * VR5432 extensions translation routines
+ *
+ * Reference: VR5432 Microprocessor User’s Manual
+ * (Document Number U13751EU5V0UM00)
+ *
+ * Copyright (c) 2021 Philippe Mathieu-Daudé
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "tcg/tcg-op.h"
+#include "exec/helper-gen.h"
+#include "translate.h"
+#include "internal.h"
+
+/* Include the auto-generated decoder. */
+#include "decode-vr54xx.c.inc"
+
+/*
+ * Integer Multiply-Accumulate Instructions
+ *
+ * MACC Multiply, accumulate, and move LO
+ * MACCHI Multiply, accumulate, and move HI
+ * MACCHIU Unsigned multiply, accumulate, and move HI
+ * MACCU Unsigned multiply, accumulate, and move LO
+ * MSAC Multiply, negate, accumulate, and move LO
+ * MSACHI Multiply, negate, accumulate, and move HI
+ * MSACHIU Unsigned multiply, negate, accumulate, and move HI
+ * MSACU Unsigned multiply, negate, accumulate, and move LO
+ * MULHI Multiply and move HI
+ * MULHIU Unsigned multiply and move HI
+ * MULS Multiply, negate, and move LO
+ * MULSHI Multiply, negate, and move HI
+ * MULSHIU Unsigned multiply, negate, and move HI
+ * MULSU Unsigned multiply, negate, and move LO
+ */
+
+static bool trans_mult_acc(DisasContext *ctx, arg_r *a,
+ void (*gen_helper_mult_acc)(TCGv, TCGv_ptr, TCGv, TCGv))
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rs);
+ gen_load_gpr(t1, a->rt);
+
+ gen_helper_mult_acc(t0, cpu_env, t0, t1);
+
+ gen_store_gpr(t0, a->rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+
+ return false;
+}
+
+TRANS(MACC, trans_mult_acc, gen_helper_macc);
+TRANS(MACCHI, trans_mult_acc, gen_helper_macchi);
+TRANS(MACCHIU, trans_mult_acc, gen_helper_macchiu);
+TRANS(MACCU, trans_mult_acc, gen_helper_maccu);
+TRANS(MSAC, trans_mult_acc, gen_helper_msac);
+TRANS(MSACHI, trans_mult_acc, gen_helper_msachi);
+TRANS(MSACHIU, trans_mult_acc, gen_helper_msachiu);
+TRANS(MSACU, trans_mult_acc, gen_helper_msacu);
+TRANS(MULHI, trans_mult_acc, gen_helper_mulhi);
+TRANS(MULHIU, trans_mult_acc, gen_helper_mulhiu);
+TRANS(MULS, trans_mult_acc, gen_helper_muls);
+TRANS(MULSHI, trans_mult_acc, gen_helper_mulshi);
+TRANS(MULSHIU, trans_mult_acc, gen_helper_mulshiu);
+TRANS(MULSU, trans_mult_acc, gen_helper_mulsu);
diff --git a/target/nios2/Kconfig b/target/nios2/Kconfig
new file mode 100644
index 000000000..1529ab895
--- /dev/null
+++ b/target/nios2/Kconfig
@@ -0,0 +1,2 @@
+config NIOS2
+ bool
diff --git a/target/nios2/cpu-param.h b/target/nios2/cpu-param.h
new file mode 100644
index 000000000..38bedbfd6
--- /dev/null
+++ b/target/nios2/cpu-param.h
@@ -0,0 +1,21 @@
+/*
+ * Altera Nios II cpu parameters for qemu.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ * SPDX-License-Identifier: LGPL-2.1+
+ */
+
+#ifndef NIOS2_CPU_PARAM_H
+#define NIOS2_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 12
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#ifdef CONFIG_USER_ONLY
+# define TARGET_VIRT_ADDR_SPACE_BITS 31
+#else
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+#define NB_MMU_MODES 2
+
+#endif
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
new file mode 100644
index 000000000..4cade61e9
--- /dev/null
+++ b/target/nios2/cpu.c
@@ -0,0 +1,269 @@
+/*
+ * QEMU Nios II CPU
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "exec/log.h"
+#include "exec/gdbstub.h"
+#include "hw/qdev-properties.h"
+
+static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ env->regs[R_PC] = value;
+}
+
+static bool nios2_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
+static void nios2_cpu_reset(DeviceState *dev)
+{
+ CPUState *cs = CPU(dev);
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(cpu);
+ CPUNios2State *env = &cpu->env;
+
+ if (qemu_loglevel_mask(CPU_LOG_RESET)) {
+ qemu_log("CPU Reset (CPU %d)\n", cs->cpu_index);
+ log_cpu_state(cs, 0);
+ }
+
+ ncc->parent_reset(dev);
+
+ memset(env->regs, 0, sizeof(uint32_t) * NUM_CORE_REGS);
+ env->regs[R_PC] = cpu->reset_addr;
+
+#if defined(CONFIG_USER_ONLY)
+ /* Start in user mode with interrupts enabled. */
+ env->regs[CR_STATUS] = CR_STATUS_U | CR_STATUS_PIE;
+#else
+ env->regs[CR_STATUS] = 0;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static void nios2_cpu_set_irq(void *opaque, int irq, int level)
+{
+ Nios2CPU *cpu = opaque;
+ CPUNios2State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ env->regs[CR_IPENDING] = deposit32(env->regs[CR_IPENDING], irq, 1, !!level);
+
+ env->irq_pending = env->regs[CR_IPENDING] & env->regs[CR_IENABLE];
+
+ if (env->irq_pending && (env->regs[CR_STATUS] & CR_STATUS_PIE)) {
+ env->irq_pending = 0;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else if (!env->irq_pending) {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+#endif
+
+static void nios2_cpu_initfn(Object *obj)
+{
+ Nios2CPU *cpu = NIOS2_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+
+#if !defined(CONFIG_USER_ONLY)
+ mmu_init(&cpu->env);
+
+ /*
+ * These interrupt lines model the IIC (internal interrupt
+ * controller). QEMU does not currently support the EIC
+ * (external interrupt controller) -- if we did it would be
+ * a separate device in hw/intc with a custom interface to
+ * the CPU, and boards using it would not wire up these IRQ lines.
+ */
+ qdev_init_gpio_in_named(DEVICE(cpu), nios2_cpu_set_irq, "IRQ", 32);
+#endif
+}
+
+static ObjectClass *nios2_cpu_class_by_name(const char *cpu_model)
+{
+ return object_class_by_name(TYPE_NIOS2_CPU);
+}
+
+static void nios2_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ ncc->parent_realize(dev, errp);
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->regs[CR_STATUS] & CR_STATUS_PIE)) {
+ cs->exception_index = EXCP_IRQ;
+ nios2_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ /* NOTE: NiosII R2 is not supported yet. */
+ info->mach = bfd_arch_nios2;
+ info->print_insn = print_insn_nios2;
+}
+
+static int nios2_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ if (n < 32) { /* GP regs */
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ } else if (n == 32) { /* PC */
+ return gdb_get_reg32(mem_buf, env->regs[R_PC]);
+ } else if (n < 49) { /* Status regs */
+ return gdb_get_reg32(mem_buf, env->regs[n - 1]);
+ }
+
+ /* Invalid regs */
+ return 0;
+}
+
+static int nios2_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ if (n < 32) { /* GP regs */
+ env->regs[n] = ldl_p(mem_buf);
+ } else if (n == 32) { /* PC */
+ env->regs[R_PC] = ldl_p(mem_buf);
+ } else if (n < 49) { /* Status regs */
+ env->regs[n - 1] = ldl_p(mem_buf);
+ }
+
+ return 4;
+}
+
+static Property nios2_properties[] = {
+ DEFINE_PROP_BOOL("mmu_present", Nios2CPU, mmu_present, true),
+ /* ALTR,pid-num-bits */
+ DEFINE_PROP_UINT32("mmu_pid_num_bits", Nios2CPU, pid_num_bits, 8),
+ /* ALTR,tlb-num-ways */
+ DEFINE_PROP_UINT32("mmu_tlb_num_ways", Nios2CPU, tlb_num_ways, 16),
+ /* ALTR,tlb-num-entries */
+ DEFINE_PROP_UINT32("mmu_pid_num_entries", Nios2CPU, tlb_num_entries, 256),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps nios2_sysemu_ops = {
+ .get_phys_page_debug = nios2_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps nios2_tcg_ops = {
+ .initialize = nios2_tcg_init,
+
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = nios2_cpu_record_sigsegv,
+#else
+ .tlb_fill = nios2_cpu_tlb_fill,
+ .cpu_exec_interrupt = nios2_cpu_exec_interrupt,
+ .do_interrupt = nios2_cpu_do_interrupt,
+ .do_unaligned_access = nios2_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void nios2_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ Nios2CPUClass *ncc = NIOS2_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, nios2_cpu_realizefn,
+ &ncc->parent_realize);
+ device_class_set_props(dc, nios2_properties);
+ device_class_set_parent_reset(dc, nios2_cpu_reset, &ncc->parent_reset);
+
+ cc->class_by_name = nios2_cpu_class_by_name;
+ cc->has_work = nios2_cpu_has_work;
+ cc->dump_state = nios2_cpu_dump_state;
+ cc->set_pc = nios2_cpu_set_pc;
+ cc->disas_set_info = nios2_cpu_disas_set_info;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &nios2_sysemu_ops;
+#endif
+ cc->gdb_read_register = nios2_cpu_gdb_read_register;
+ cc->gdb_write_register = nios2_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 49;
+ cc->tcg_ops = &nios2_tcg_ops;
+}
+
+static const TypeInfo nios2_cpu_type_info = {
+ .name = TYPE_NIOS2_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(Nios2CPU),
+ .instance_init = nios2_cpu_initfn,
+ .class_size = sizeof(Nios2CPUClass),
+ .class_init = nios2_cpu_class_init,
+};
+
+static void nios2_cpu_register_types(void)
+{
+ type_register_static(&nios2_cpu_type_info);
+}
+
+type_init(nios2_cpu_register_types)
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
new file mode 100644
index 000000000..1a69ed7a4
--- /dev/null
+++ b/target/nios2/cpu.h
@@ -0,0 +1,249 @@
+/*
+ * Altera Nios II virtual CPU header
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef NIOS2_CPU_H
+#define NIOS2_CPU_H
+
+#include "exec/cpu-defs.h"
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+typedef struct CPUNios2State CPUNios2State;
+#if !defined(CONFIG_USER_ONLY)
+#include "mmu.h"
+#endif
+
+#define TYPE_NIOS2_CPU "nios2-cpu"
+
+OBJECT_DECLARE_TYPE(Nios2CPU, Nios2CPUClass,
+ NIOS2_CPU)
+
+/**
+ * Nios2CPUClass:
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Nios2 CPU model.
+ */
+struct Nios2CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+#define TARGET_HAS_ICE 1
+
+/* Configuration options for Nios II */
+#define RESET_ADDRESS 0x00000000
+#define EXCEPTION_ADDRESS 0x00000004
+#define FAST_TLB_MISS_ADDRESS 0x00000008
+
+
+/* GP regs + CR regs + PC */
+#define NUM_CORE_REGS (32 + 32 + 1)
+
+/* General purpose register aliases */
+#define R_ZERO 0
+#define R_AT 1
+#define R_RET0 2
+#define R_RET1 3
+#define R_ARG0 4
+#define R_ARG1 5
+#define R_ARG2 6
+#define R_ARG3 7
+#define R_ET 24
+#define R_BT 25
+#define R_GP 26
+#define R_SP 27
+#define R_FP 28
+#define R_EA 29
+#define R_BA 30
+#define R_RA 31
+
+/* Control register aliases */
+#define CR_BASE 32
+#define CR_STATUS (CR_BASE + 0)
+#define CR_STATUS_PIE (1 << 0)
+#define CR_STATUS_U (1 << 1)
+#define CR_STATUS_EH (1 << 2)
+#define CR_STATUS_IH (1 << 3)
+#define CR_STATUS_IL (63 << 4)
+#define CR_STATUS_CRS (63 << 10)
+#define CR_STATUS_PRS (63 << 16)
+#define CR_STATUS_NMI (1 << 22)
+#define CR_STATUS_RSIE (1 << 23)
+#define CR_ESTATUS (CR_BASE + 1)
+#define CR_BSTATUS (CR_BASE + 2)
+#define CR_IENABLE (CR_BASE + 3)
+#define CR_IPENDING (CR_BASE + 4)
+#define CR_CPUID (CR_BASE + 5)
+#define CR_CTL6 (CR_BASE + 6)
+#define CR_EXCEPTION (CR_BASE + 7)
+#define CR_PTEADDR (CR_BASE + 8)
+#define CR_PTEADDR_PTBASE_SHIFT 22
+#define CR_PTEADDR_PTBASE_MASK (0x3FF << CR_PTEADDR_PTBASE_SHIFT)
+#define CR_PTEADDR_VPN_SHIFT 2
+#define CR_PTEADDR_VPN_MASK (0xFFFFF << CR_PTEADDR_VPN_SHIFT)
+#define CR_TLBACC (CR_BASE + 9)
+#define CR_TLBACC_IGN_SHIFT 25
+#define CR_TLBACC_IGN_MASK (0x7F << CR_TLBACC_IGN_SHIFT)
+#define CR_TLBACC_C (1 << 24)
+#define CR_TLBACC_R (1 << 23)
+#define CR_TLBACC_W (1 << 22)
+#define CR_TLBACC_X (1 << 21)
+#define CR_TLBACC_G (1 << 20)
+#define CR_TLBACC_PFN_MASK 0x000FFFFF
+#define CR_TLBMISC (CR_BASE + 10)
+#define CR_TLBMISC_WAY_SHIFT 20
+#define CR_TLBMISC_WAY_MASK (0xF << CR_TLBMISC_WAY_SHIFT)
+#define CR_TLBMISC_RD (1 << 19)
+#define CR_TLBMISC_WR (1 << 18)
+#define CR_TLBMISC_PID_SHIFT 4
+#define CR_TLBMISC_PID_MASK (0x3FFF << CR_TLBMISC_PID_SHIFT)
+#define CR_TLBMISC_DBL (1 << 3)
+#define CR_TLBMISC_BAD (1 << 2)
+#define CR_TLBMISC_PERM (1 << 1)
+#define CR_TLBMISC_D (1 << 0)
+#define CR_ENCINJ (CR_BASE + 11)
+#define CR_BADADDR (CR_BASE + 12)
+#define CR_CONFIG (CR_BASE + 13)
+#define CR_MPUBASE (CR_BASE + 14)
+#define CR_MPUACC (CR_BASE + 15)
+
+/* Other registers */
+#define R_PC 64
+
+/* Exceptions */
+#define EXCP_BREAK 0x1000
+#define EXCP_RESET 0
+#define EXCP_PRESET 1
+#define EXCP_IRQ 2
+#define EXCP_TRAP 3
+#define EXCP_UNIMPL 4
+#define EXCP_ILLEGAL 5
+#define EXCP_UNALIGN 6
+#define EXCP_UNALIGND 7
+#define EXCP_DIV 8
+#define EXCP_SUPERA 9
+#define EXCP_SUPERI 10
+#define EXCP_SUPERD 11
+#define EXCP_TLBD 12
+#define EXCP_TLBX 13
+#define EXCP_TLBR 14
+#define EXCP_TLBW 15
+#define EXCP_MPUI 16
+#define EXCP_MPUD 17
+
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+
+struct CPUNios2State {
+ uint32_t regs[NUM_CORE_REGS];
+
+#if !defined(CONFIG_USER_ONLY)
+ Nios2MMU mmu;
+
+ uint32_t irq_pending;
+#endif
+};
+
+/**
+ * Nios2CPU:
+ * @env: #CPUNios2State
+ *
+ * A Nios2 CPU.
+ */
+struct Nios2CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUNios2State env;
+
+ bool mmu_present;
+ uint32_t pid_num_bits;
+ uint32_t tlb_num_ways;
+ uint32_t tlb_num_entries;
+
+ /* Addresses that are hard-coded in the FPGA build settings */
+ uint32_t reset_addr;
+ uint32_t exception_addr;
+ uint32_t fast_tlb_miss_addr;
+};
+
+
+void nios2_tcg_init(void);
+void nios2_cpu_do_interrupt(CPUState *cs);
+void dump_mmu(CPUNios2State *env);
+void nios2_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+
+void do_nios2_semihosting(CPUNios2State *env);
+
+#define CPU_RESOLVING_TYPE TYPE_NIOS2_CPU
+
+#define cpu_gen_code cpu_nios2_gen_code
+
+#define CPU_SAVE_VERSION 1
+
+/* MMU modes definitions */
+#define MMU_SUPERVISOR_IDX 0
+#define MMU_USER_IDX 1
+
+static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
+{
+ return (env->regs[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX :
+ MMU_SUPERVISOR_IDX;
+}
+
+#ifdef CONFIG_USER_ONLY
+void nios2_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+#endif
+
+static inline int cpu_interrupts_enabled(CPUNios2State *env)
+{
+ return env->regs[CR_STATUS] & CR_STATUS_PIE;
+}
+
+typedef CPUNios2State CPUArchState;
+typedef Nios2CPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->regs[R_PC];
+ *cs_base = 0;
+ *flags = (env->regs[CR_STATUS] & (CR_STATUS_EH | CR_STATUS_U));
+}
+
+#endif /* NIOS2_CPU_H */
diff --git a/target/nios2/helper.c b/target/nios2/helper.c
new file mode 100644
index 000000000..e5c98650e
--- /dev/null
+++ b/target/nios2/helper.c
@@ -0,0 +1,315 @@
+/*
+ * Altera Nios II helper routines.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+#include "exec/helper-proto.h"
+#include "semihosting/semihost.h"
+
+#if defined(CONFIG_USER_ONLY)
+
+void nios2_cpu_do_interrupt(CPUState *cs)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ cs->exception_index = -1;
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+}
+
+void nios2_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
+{
+ /* FIXME: Disentangle kuser page from linux-user sigsegv handling. */
+ cs->exception_index = 0xaa;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+void nios2_cpu_do_interrupt(CPUState *cs)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ switch (cs->exception_index) {
+ case EXCP_IRQ:
+ assert(env->regs[CR_STATUS] & CR_STATUS_PIE);
+
+ qemu_log_mask(CPU_LOG_INT, "interrupt at pc=%x\n", env->regs[R_PC]);
+
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] |= CR_STATUS_IH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_TLBD:
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ qemu_log_mask(CPU_LOG_INT, "TLB MISS (fast) at pc=%x\n",
+ env->regs[R_PC]);
+
+ /* Fast TLB miss */
+ /* Variation from the spec. Table 3-35 of the cpu reference shows
+ * estatus not being changed for TLB miss but this appears to
+ * be incorrect. */
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[CR_TLBMISC] &= ~CR_TLBMISC_DBL;
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_WR;
+
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->fast_tlb_miss_addr;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "TLB MISS (double) at pc=%x\n",
+ env->regs[R_PC]);
+
+ /* Double TLB miss */
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_DBL;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ }
+ break;
+
+ case EXCP_TLBR:
+ case EXCP_TLBW:
+ case EXCP_TLBX:
+ qemu_log_mask(CPU_LOG_INT, "TLB PERM at pc=%x\n", env->regs[R_PC]);
+
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_WR;
+ }
+
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_SUPERA:
+ case EXCP_SUPERI:
+ case EXCP_SUPERD:
+ qemu_log_mask(CPU_LOG_INT, "SUPERVISOR exception at pc=%x\n",
+ env->regs[R_PC]);
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ }
+
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_ILLEGAL:
+ case EXCP_TRAP:
+ qemu_log_mask(CPU_LOG_INT, "TRAP exception at pc=%x\n",
+ env->regs[R_PC]);
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ }
+
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_BREAK:
+ qemu_log_mask(CPU_LOG_INT, "BREAK exception at pc=%x\n",
+ env->regs[R_PC]);
+ /* The semihosting instruction is "break 1". */
+ if (semihosting_enabled() &&
+ cpu_ldl_code(env, env->regs[R_PC]) == 0x003da07a) {
+ qemu_log_mask(CPU_LOG_INT, "Entering semihosting\n");
+ env->regs[R_PC] += 4;
+ do_nios2_semihosting(env);
+ break;
+ }
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_BSTATUS] = env->regs[CR_STATUS];
+ env->regs[R_BA] = env->regs[R_PC] + 4;
+ }
+
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ default:
+ cpu_abort(cs, "unhandled exception type=%d\n",
+ cs->exception_index);
+ break;
+ }
+}
+
+hwaddr nios2_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ target_ulong vaddr, paddr = 0;
+ Nios2MMULookup lu;
+ unsigned int hit;
+
+ if (cpu->mmu_present && (addr < 0xC0000000)) {
+ hit = mmu_translate(env, &lu, addr, 0, 0);
+ if (hit) {
+ vaddr = addr & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+ } else {
+ paddr = -1;
+ qemu_log("cpu_get_phys_page debug MISS: %#" PRIx64 "\n", addr);
+ }
+ } else {
+ paddr = addr & TARGET_PAGE_MASK;
+ }
+
+ return paddr;
+}
+
+void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ env->regs[CR_BADADDR] = addr;
+ env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2;
+ helper_raise_exception(env, EXCP_UNALIGN);
+}
+
+bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ unsigned int excp = EXCP_TLBD;
+ target_ulong vaddr, paddr;
+ Nios2MMULookup lu;
+ unsigned int hit;
+
+ if (!cpu->mmu_present) {
+ /* No MMU */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ if (MMU_SUPERVISOR_IDX == mmu_idx) {
+ if (address >= 0xC0000000) {
+ /* Kernel physical page - TLB bypassed */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ } else {
+ if (address >= 0x80000000) {
+ /* Illegal access from user mode */
+ if (probe) {
+ return false;
+ }
+ cs->exception_index = EXCP_SUPERA;
+ env->regs[CR_BADADDR] = address;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+ }
+
+ /* Virtual page. */
+ hit = mmu_translate(env, &lu, address, access_type, mmu_idx);
+ if (hit) {
+ vaddr = address & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+
+ if (((access_type == MMU_DATA_LOAD) && (lu.prot & PAGE_READ)) ||
+ ((access_type == MMU_DATA_STORE) && (lu.prot & PAGE_WRITE)) ||
+ ((access_type == MMU_INST_FETCH) && (lu.prot & PAGE_EXEC))) {
+ tlb_set_page(cs, vaddr, paddr, lu.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* Permission violation */
+ excp = (access_type == MMU_DATA_LOAD ? EXCP_TLBR :
+ access_type == MMU_DATA_STORE ? EXCP_TLBW : EXCP_TLBX);
+ }
+
+ if (probe) {
+ return false;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
+ } else {
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
+ }
+ env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
+ env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
+ env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
+
+ cs->exception_index = excp;
+ env->regs[CR_BADADDR] = address;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/nios2/helper.h b/target/nios2/helper.h
new file mode 100644
index 000000000..6c8f0b5b3
--- /dev/null
+++ b/target/nios2/helper.h
@@ -0,0 +1,27 @@
+/*
+ * Altera Nios II helper routines header.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32)
+
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(mmu_read_debug, void, env, i32)
+DEF_HELPER_3(mmu_write, void, env, i32, i32)
+DEF_HELPER_1(check_interrupts, void, env)
+#endif
diff --git a/target/nios2/meson.build b/target/nios2/meson.build
new file mode 100644
index 000000000..e643917db
--- /dev/null
+++ b/target/nios2/meson.build
@@ -0,0 +1,15 @@
+nios2_ss = ss.source_set()
+nios2_ss.add(files(
+ 'cpu.c',
+ 'helper.c',
+ 'mmu.c',
+ 'nios2-semi.c',
+ 'op_helper.c',
+ 'translate.c',
+))
+
+nios2_softmmu_ss = ss.source_set()
+nios2_softmmu_ss.add(files('monitor.c'))
+
+target_arch += {'nios2': nios2_ss}
+target_softmmu_arch += {'nios2': nios2_softmmu_ss}
diff --git a/target/nios2/mmu.c b/target/nios2/mmu.c
new file mode 100644
index 000000000..2545c0676
--- /dev/null
+++ b/target/nios2/mmu.c
@@ -0,0 +1,281 @@
+/*
+ * Altera Nios II MMU emulation for qemu.
+ *
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "mmu.h"
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* Define this to enable MMU debug messages */
+/* #define DEBUG_MMU */
+
+#ifdef DEBUG_MMU
+#define MMU_LOG(x) x
+#else
+#define MMU_LOG(x)
+#endif
+
+void mmu_read_debug(CPUNios2State *env, uint32_t rn)
+{
+ switch (rn) {
+ case CR_TLBACC:
+ MMU_LOG(qemu_log("TLBACC READ %08X\n", env->regs[rn]));
+ break;
+
+ case CR_TLBMISC:
+ MMU_LOG(qemu_log("TLBMISC READ %08X\n", env->regs[rn]));
+ break;
+
+ case CR_PTEADDR:
+ MMU_LOG(qemu_log("PTEADDR READ %08X\n", env->regs[rn]));
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* rw - 0 = read, 1 = write, 2 = fetch. */
+unsigned int mmu_translate(CPUNios2State *env,
+ Nios2MMULookup *lu,
+ target_ulong vaddr, int rw, int mmu_idx)
+{
+ Nios2CPU *cpu = env_archcpu(env);
+ int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4;
+ int vpn = vaddr >> 12;
+
+ MMU_LOG(qemu_log("mmu_translate vaddr %08X, pid %08X, vpn %08X\n",
+ vaddr, pid, vpn));
+
+ int way;
+ for (way = 0; way < cpu->tlb_num_ways; way++) {
+
+ Nios2TLBEntry *entry =
+ &env->mmu.tlb[(way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask)];
+
+ MMU_LOG(qemu_log("TLB[%d] TAG %08X, VPN %08X\n",
+ (way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask),
+ entry->tag, (entry->tag >> 12)));
+
+ if (((entry->tag >> 12) != vpn) ||
+ (((entry->tag & (1 << 11)) == 0) &&
+ ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) != pid))) {
+ continue;
+ }
+ lu->vaddr = vaddr & TARGET_PAGE_MASK;
+ lu->paddr = (entry->data & CR_TLBACC_PFN_MASK) << TARGET_PAGE_BITS;
+ lu->prot = ((entry->data & CR_TLBACC_R) ? PAGE_READ : 0) |
+ ((entry->data & CR_TLBACC_W) ? PAGE_WRITE : 0) |
+ ((entry->data & CR_TLBACC_X) ? PAGE_EXEC : 0);
+
+ MMU_LOG(qemu_log("HIT TLB[%d] %08X %08X %08X\n",
+ (way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask),
+ lu->vaddr, lu->paddr, lu->prot));
+ return 1;
+ }
+ return 0;
+}
+
+static void mmu_flush_pid(CPUNios2State *env, uint32_t pid)
+{
+ CPUState *cs = env_cpu(env);
+ Nios2CPU *cpu = env_archcpu(env);
+ int idx;
+ MMU_LOG(qemu_log("TLB Flush PID %d\n", pid));
+
+ for (idx = 0; idx < cpu->tlb_num_entries; idx++) {
+ Nios2TLBEntry *entry = &env->mmu.tlb[idx];
+
+ MMU_LOG(qemu_log("TLB[%d] => %08X %08X\n",
+ idx, entry->tag, entry->data));
+
+ if ((entry->tag & (1 << 10)) && (!(entry->tag & (1 << 11))) &&
+ ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) == pid)) {
+ uint32_t vaddr = entry->tag & TARGET_PAGE_MASK;
+
+ MMU_LOG(qemu_log("TLB Flush Page %08X\n", vaddr));
+
+ tlb_flush_page(cs, vaddr);
+ }
+ }
+}
+
+void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v)
+{
+ CPUState *cs = env_cpu(env);
+ Nios2CPU *cpu = env_archcpu(env);
+
+ MMU_LOG(qemu_log("mmu_write %08X = %08X\n", rn, v));
+
+ switch (rn) {
+ case CR_TLBACC:
+ MMU_LOG(qemu_log("TLBACC: IG %02X, FLAGS %c%c%c%c%c, PFN %05X\n",
+ v >> CR_TLBACC_IGN_SHIFT,
+ (v & CR_TLBACC_C) ? 'C' : '.',
+ (v & CR_TLBACC_R) ? 'R' : '.',
+ (v & CR_TLBACC_W) ? 'W' : '.',
+ (v & CR_TLBACC_X) ? 'X' : '.',
+ (v & CR_TLBACC_G) ? 'G' : '.',
+ v & CR_TLBACC_PFN_MASK));
+
+ /* if tlbmisc.WE == 1 then trigger a TLB write on writes to TLBACC */
+ if (env->regs[CR_TLBMISC] & CR_TLBMISC_WR) {
+ int way = (env->regs[CR_TLBMISC] >> CR_TLBMISC_WAY_SHIFT);
+ int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2;
+ int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4;
+ int g = (v & CR_TLBACC_G) ? 1 : 0;
+ int valid = ((vpn & CR_TLBACC_PFN_MASK) < 0xC0000) ? 1 : 0;
+ Nios2TLBEntry *entry =
+ &env->mmu.tlb[(way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask)];
+ uint32_t newTag = (vpn << 12) | (g << 11) | (valid << 10) | pid;
+ uint32_t newData = v & (CR_TLBACC_C | CR_TLBACC_R | CR_TLBACC_W |
+ CR_TLBACC_X | CR_TLBACC_PFN_MASK);
+
+ if ((entry->tag != newTag) || (entry->data != newData)) {
+ if (entry->tag & (1 << 10)) {
+ /* Flush existing entry */
+ MMU_LOG(qemu_log("TLB Flush Page (OLD) %08X\n",
+ entry->tag & TARGET_PAGE_MASK));
+ tlb_flush_page(cs, entry->tag & TARGET_PAGE_MASK);
+ }
+ entry->tag = newTag;
+ entry->data = newData;
+ MMU_LOG(qemu_log("TLB[%d] = %08X %08X\n",
+ (way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask),
+ entry->tag, entry->data));
+ }
+ /* Auto-increment tlbmisc.WAY */
+ env->regs[CR_TLBMISC] =
+ (env->regs[CR_TLBMISC] & ~CR_TLBMISC_WAY_MASK) |
+ (((way + 1) & (cpu->tlb_num_ways - 1)) <<
+ CR_TLBMISC_WAY_SHIFT);
+ }
+
+ /* Writes to TLBACC don't change the read-back value */
+ env->mmu.tlbacc_wr = v;
+ break;
+
+ case CR_TLBMISC:
+ MMU_LOG(qemu_log("TLBMISC: WAY %X, FLAGS %c%c%c%c%c%c, PID %04X\n",
+ v >> CR_TLBMISC_WAY_SHIFT,
+ (v & CR_TLBMISC_RD) ? 'R' : '.',
+ (v & CR_TLBMISC_WR) ? 'W' : '.',
+ (v & CR_TLBMISC_DBL) ? '2' : '.',
+ (v & CR_TLBMISC_BAD) ? 'B' : '.',
+ (v & CR_TLBMISC_PERM) ? 'P' : '.',
+ (v & CR_TLBMISC_D) ? 'D' : '.',
+ (v & CR_TLBMISC_PID_MASK) >> 4));
+
+ if ((v & CR_TLBMISC_PID_MASK) !=
+ (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK)) {
+ mmu_flush_pid(env, (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >>
+ CR_TLBMISC_PID_SHIFT);
+ }
+ /* if tlbmisc.RD == 1 then trigger a TLB read on writes to TLBMISC */
+ if (v & CR_TLBMISC_RD) {
+ int way = (v >> CR_TLBMISC_WAY_SHIFT);
+ int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2;
+ Nios2TLBEntry *entry =
+ &env->mmu.tlb[(way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask)];
+
+ env->regs[CR_TLBACC] &= CR_TLBACC_IGN_MASK;
+ env->regs[CR_TLBACC] |= entry->data;
+ env->regs[CR_TLBACC] |= (entry->tag & (1 << 11)) ? CR_TLBACC_G : 0;
+ env->regs[CR_TLBMISC] =
+ (v & ~CR_TLBMISC_PID_MASK) |
+ ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) <<
+ CR_TLBMISC_PID_SHIFT);
+ env->regs[CR_PTEADDR] &= ~CR_PTEADDR_VPN_MASK;
+ env->regs[CR_PTEADDR] |= (entry->tag >> 12) << CR_PTEADDR_VPN_SHIFT;
+ MMU_LOG(qemu_log("TLB READ way %d, vpn %05X, tag %08X, data %08X, "
+ "tlbacc %08X, tlbmisc %08X, pteaddr %08X\n",
+ way, vpn, entry->tag, entry->data,
+ env->regs[CR_TLBACC], env->regs[CR_TLBMISC],
+ env->regs[CR_PTEADDR]));
+ } else {
+ env->regs[CR_TLBMISC] = v;
+ }
+
+ env->mmu.tlbmisc_wr = v;
+ break;
+
+ case CR_PTEADDR:
+ MMU_LOG(qemu_log("PTEADDR: PTBASE %03X, VPN %05X\n",
+ v >> CR_PTEADDR_PTBASE_SHIFT,
+ (v & CR_PTEADDR_VPN_MASK) >> CR_PTEADDR_VPN_SHIFT));
+
+ /* Writes to PTEADDR don't change the read-back VPN value */
+ env->regs[CR_PTEADDR] = (v & ~CR_PTEADDR_VPN_MASK) |
+ (env->regs[CR_PTEADDR] & CR_PTEADDR_VPN_MASK);
+ env->mmu.pteaddr_wr = v;
+ break;
+
+ default:
+ break;
+ }
+}
+
+void mmu_init(CPUNios2State *env)
+{
+ Nios2CPU *cpu = env_archcpu(env);
+ Nios2MMU *mmu = &env->mmu;
+
+ MMU_LOG(qemu_log("mmu_init\n"));
+
+ mmu->tlb_entry_mask = (cpu->tlb_num_entries / cpu->tlb_num_ways) - 1;
+ mmu->tlb = g_new0(Nios2TLBEntry, cpu->tlb_num_entries);
+}
+
+void dump_mmu(CPUNios2State *env)
+{
+ Nios2CPU *cpu = env_archcpu(env);
+ int i;
+
+ qemu_printf("MMU: ways %d, entries %d, pid bits %d\n",
+ cpu->tlb_num_ways, cpu->tlb_num_entries,
+ cpu->pid_num_bits);
+
+ for (i = 0; i < cpu->tlb_num_entries; i++) {
+ Nios2TLBEntry *entry = &env->mmu.tlb[i];
+ qemu_printf("TLB[%d] = %08X %08X %c VPN %05X "
+ "PID %02X %c PFN %05X %c%c%c%c\n",
+ i, entry->tag, entry->data,
+ (entry->tag & (1 << 10)) ? 'V' : '-',
+ entry->tag >> 12,
+ entry->tag & ((1 << cpu->pid_num_bits) - 1),
+ (entry->tag & (1 << 11)) ? 'G' : '-',
+ entry->data & CR_TLBACC_PFN_MASK,
+ (entry->data & CR_TLBACC_C) ? 'C' : '-',
+ (entry->data & CR_TLBACC_R) ? 'R' : '-',
+ (entry->data & CR_TLBACC_W) ? 'W' : '-',
+ (entry->data & CR_TLBACC_X) ? 'X' : '-');
+ }
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/nios2/mmu.h b/target/nios2/mmu.h
new file mode 100644
index 000000000..4f46fbb82
--- /dev/null
+++ b/target/nios2/mmu.h
@@ -0,0 +1,51 @@
+/*
+ * Altera Nios II MMU emulation for qemu.
+ *
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#ifndef NIOS2_MMU_H
+#define NIOS2_MMU_H
+
+typedef struct Nios2TLBEntry {
+ target_ulong tag;
+ target_ulong data;
+} Nios2TLBEntry;
+
+typedef struct Nios2MMU {
+ int tlb_entry_mask;
+ uint32_t pteaddr_wr;
+ uint32_t tlbacc_wr;
+ uint32_t tlbmisc_wr;
+ Nios2TLBEntry *tlb;
+} Nios2MMU;
+
+typedef struct Nios2MMULookup {
+ target_ulong vaddr;
+ target_ulong paddr;
+ int prot;
+} Nios2MMULookup;
+
+void mmu_flip_um(CPUNios2State *env, unsigned int um);
+unsigned int mmu_translate(CPUNios2State *env,
+ Nios2MMULookup *lu,
+ target_ulong vaddr, int rw, int mmu_idx);
+void mmu_read_debug(CPUNios2State *env, uint32_t rn);
+void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v);
+void mmu_init(CPUNios2State *env);
+
+#endif /* NIOS2_MMU_H */
diff --git a/target/nios2/monitor.c b/target/nios2/monitor.c
new file mode 100644
index 000000000..0152dec3f
--- /dev/null
+++ b/target/nios2/monitor.c
@@ -0,0 +1,35 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "monitor/hmp.h"
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env(mon);
+
+ dump_mmu(env1);
+}
diff --git a/target/nios2/nios2-semi.c b/target/nios2/nios2-semi.c
new file mode 100644
index 000000000..fe5598bae
--- /dev/null
+++ b/target/nios2/nios2-semi.c
@@ -0,0 +1,454 @@
+/*
+ * Nios II Semihosting syscall interface.
+ * This code is derived from m68k-semi.c.
+ * The semihosting protocol implemented here is described in the
+ * libgloss sources:
+ * https://sourceware.org/git/gitweb.cgi?p=newlib-cygwin.git;a=blob;f=libgloss/nios2/nios2-semi.txt;hb=HEAD
+ *
+ * Copyright (c) 2017-2019 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#if defined(CONFIG_USER_ONLY)
+#include "qemu.h"
+#else
+#include "qemu-common.h"
+#include "exec/softmmu-semi.h"
+#endif
+#include "qemu/log.h"
+
+#define HOSTED_EXIT 0
+#define HOSTED_INIT_SIM 1
+#define HOSTED_OPEN 2
+#define HOSTED_CLOSE 3
+#define HOSTED_READ 4
+#define HOSTED_WRITE 5
+#define HOSTED_LSEEK 6
+#define HOSTED_RENAME 7
+#define HOSTED_UNLINK 8
+#define HOSTED_STAT 9
+#define HOSTED_FSTAT 10
+#define HOSTED_GETTIMEOFDAY 11
+#define HOSTED_ISATTY 12
+#define HOSTED_SYSTEM 13
+
+typedef uint32_t gdb_mode_t;
+typedef uint32_t gdb_time_t;
+
+struct nios2_gdb_stat {
+ uint32_t gdb_st_dev; /* device */
+ uint32_t gdb_st_ino; /* inode */
+ gdb_mode_t gdb_st_mode; /* protection */
+ uint32_t gdb_st_nlink; /* number of hard links */
+ uint32_t gdb_st_uid; /* user ID of owner */
+ uint32_t gdb_st_gid; /* group ID of owner */
+ uint32_t gdb_st_rdev; /* device type (if inode device) */
+ uint64_t gdb_st_size; /* total size, in bytes */
+ uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
+ uint64_t gdb_st_blocks; /* number of blocks allocated */
+ gdb_time_t gdb_st_atime; /* time of last access */
+ gdb_time_t gdb_st_mtime; /* time of last modification */
+ gdb_time_t gdb_st_ctime; /* time of last change */
+} QEMU_PACKED;
+
+struct gdb_timeval {
+ gdb_time_t tv_sec; /* second */
+ uint64_t tv_usec; /* microsecond */
+} QEMU_PACKED;
+
+#define GDB_O_RDONLY 0x0
+#define GDB_O_WRONLY 0x1
+#define GDB_O_RDWR 0x2
+#define GDB_O_APPEND 0x8
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_EXCL 0x800
+
+static int translate_openflags(int flags)
+{
+ int hf;
+
+ if (flags & GDB_O_WRONLY) {
+ hf = O_WRONLY;
+ } else if (flags & GDB_O_RDWR) {
+ hf = O_RDWR;
+ } else {
+ hf = O_RDONLY;
+ }
+
+ if (flags & GDB_O_APPEND) {
+ hf |= O_APPEND;
+ }
+ if (flags & GDB_O_CREAT) {
+ hf |= O_CREAT;
+ }
+ if (flags & GDB_O_TRUNC) {
+ hf |= O_TRUNC;
+ }
+ if (flags & GDB_O_EXCL) {
+ hf |= O_EXCL;
+ }
+
+ return hf;
+}
+
+static bool translate_stat(CPUNios2State *env, target_ulong addr,
+ struct stat *s)
+{
+ struct nios2_gdb_stat *p;
+
+ p = lock_user(VERIFY_WRITE, addr, sizeof(struct nios2_gdb_stat), 0);
+
+ if (!p) {
+ return false;
+ }
+ p->gdb_st_dev = cpu_to_be32(s->st_dev);
+ p->gdb_st_ino = cpu_to_be32(s->st_ino);
+ p->gdb_st_mode = cpu_to_be32(s->st_mode);
+ p->gdb_st_nlink = cpu_to_be32(s->st_nlink);
+ p->gdb_st_uid = cpu_to_be32(s->st_uid);
+ p->gdb_st_gid = cpu_to_be32(s->st_gid);
+ p->gdb_st_rdev = cpu_to_be32(s->st_rdev);
+ p->gdb_st_size = cpu_to_be64(s->st_size);
+#ifdef _WIN32
+ /* Windows stat is missing some fields. */
+ p->gdb_st_blksize = 0;
+ p->gdb_st_blocks = 0;
+#else
+ p->gdb_st_blksize = cpu_to_be64(s->st_blksize);
+ p->gdb_st_blocks = cpu_to_be64(s->st_blocks);
+#endif
+ p->gdb_st_atime = cpu_to_be32(s->st_atime);
+ p->gdb_st_mtime = cpu_to_be32(s->st_mtime);
+ p->gdb_st_ctime = cpu_to_be32(s->st_ctime);
+ unlock_user(p, addr, sizeof(struct nios2_gdb_stat));
+ return true;
+}
+
+static void nios2_semi_return_u32(CPUNios2State *env, uint32_t ret,
+ uint32_t err)
+{
+ target_ulong args = env->regs[R_ARG1];
+ if (put_user_u32(ret, args) ||
+ put_user_u32(err, args + 4)) {
+ /*
+ * The nios2 semihosting ABI does not provide any way to report this
+ * error to the guest, so the best we can do is log it in qemu.
+ * It is always a guest error not to pass us a valid argument block.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "nios2-semihosting: return value "
+ "discarded because argument block not writable\n");
+ }
+}
+
+static void nios2_semi_return_u64(CPUNios2State *env, uint64_t ret,
+ uint32_t err)
+{
+ target_ulong args = env->regs[R_ARG1];
+ if (put_user_u32(ret >> 32, args) ||
+ put_user_u32(ret, args + 4) ||
+ put_user_u32(err, args + 8)) {
+ /* No way to report this via nios2 semihosting ABI; just log it */
+ qemu_log_mask(LOG_GUEST_ERROR, "nios2-semihosting: return value "
+ "discarded because argument block not writable\n");
+ }
+}
+
+static int nios2_semi_is_lseek;
+
+static void nios2_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if (nios2_semi_is_lseek) {
+ /*
+ * FIXME: We've already lost the high bits of the lseek
+ * return value.
+ */
+ nios2_semi_return_u64(env, ret, err);
+ nios2_semi_is_lseek = 0;
+ } else {
+ nios2_semi_return_u32(env, ret, err);
+ }
+}
+
+/*
+ * Read the input value from the argument block; fail the semihosting
+ * call if the memory read fails.
+ */
+#define GET_ARG(n) do { \
+ if (get_user_ual(arg ## n, args + (n) * 4)) { \
+ result = -1; \
+ errno = EFAULT; \
+ goto failed; \
+ } \
+} while (0)
+
+void do_nios2_semihosting(CPUNios2State *env)
+{
+ int nr;
+ uint32_t args;
+ target_ulong arg0, arg1, arg2, arg3;
+ void *p;
+ void *q;
+ uint32_t len;
+ uint32_t result;
+
+ nr = env->regs[R_ARG0];
+ args = env->regs[R_ARG1];
+ switch (nr) {
+ case HOSTED_EXIT:
+ gdb_exit(env->regs[R_ARG0]);
+ exit(env->regs[R_ARG0]);
+ case HOSTED_OPEN:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "open,%s,%x,%x", arg0, (int)arg1,
+ arg2, arg3);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = open(p, translate_openflags(arg2), arg3);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_CLOSE:
+ {
+ /* Ignore attempts to close stdin/out/err. */
+ GET_ARG(0);
+ int fd = arg0;
+ if (fd > 2) {
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "close,%x", arg0);
+ return;
+ } else {
+ result = close(fd);
+ }
+ } else {
+ result = 0;
+ }
+ break;
+ }
+ case HOSTED_READ:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "read,%x,%x,%x",
+ arg0, arg1, len);
+ return;
+ } else {
+ p = lock_user(VERIFY_WRITE, arg1, len, 0);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = read(arg0, p, len);
+ unlock_user(p, arg1, len);
+ }
+ }
+ break;
+ case HOSTED_WRITE:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "write,%x,%x,%x",
+ arg0, arg1, len);
+ return;
+ } else {
+ p = lock_user(VERIFY_READ, arg1, len, 1);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = write(arg0, p, len);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_LSEEK:
+ {
+ uint64_t off;
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ off = (uint32_t)arg2 | ((uint64_t)arg1 << 32);
+ if (use_gdb_syscalls()) {
+ nios2_semi_is_lseek = 1;
+ gdb_do_syscall(nios2_semi_cb, "lseek,%x,%lx,%x",
+ arg0, off, arg3);
+ } else {
+ off = lseek(arg0, off, arg3);
+ nios2_semi_return_u64(env, off, errno);
+ }
+ return;
+ }
+ case HOSTED_RENAME:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "rename,%s,%s",
+ arg0, (int)arg1, arg2, (int)arg3);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ q = lock_user_string(arg2);
+ if (!p || !q) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = rename(p, q);
+ }
+ unlock_user(p, arg0, 0);
+ unlock_user(q, arg2, 0);
+ }
+ break;
+ case HOSTED_UNLINK:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "unlink,%s",
+ arg0, (int)arg1);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = unlink(p);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_STAT:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "stat,%s,%x",
+ arg0, (int)arg1, arg2);
+ return;
+ } else {
+ struct stat s;
+ p = lock_user_string(arg0);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = stat(p, &s);
+ unlock_user(p, arg0, 0);
+ }
+ if (result == 0 && !translate_stat(env, arg2, &s)) {
+ result = -1;
+ errno = EFAULT;
+ }
+ }
+ break;
+ case HOSTED_FSTAT:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "fstat,%x,%x",
+ arg0, arg1);
+ return;
+ } else {
+ struct stat s;
+ result = fstat(arg0, &s);
+ if (result == 0 && !translate_stat(env, arg1, &s)) {
+ result = -1;
+ errno = EFAULT;
+ }
+ }
+ break;
+ case HOSTED_GETTIMEOFDAY:
+ /* Only the tv parameter is used. tz is assumed NULL. */
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "gettimeofday,%x,%x",
+ arg0, 0);
+ return;
+ } else {
+ qemu_timeval tv;
+ struct gdb_timeval *p;
+ result = qemu_gettimeofday(&tv);
+ if (result != 0) {
+ p = lock_user(VERIFY_WRITE, arg0, sizeof(struct gdb_timeval),
+ 0);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ p->tv_sec = cpu_to_be32(tv.tv_sec);
+ p->tv_usec = cpu_to_be64(tv.tv_usec);
+ unlock_user(p, arg0, sizeof(struct gdb_timeval));
+ }
+ }
+ }
+ break;
+ case HOSTED_ISATTY:
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "isatty,%x", arg0);
+ return;
+ } else {
+ result = isatty(arg0);
+ }
+ break;
+ case HOSTED_SYSTEM:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(nios2_semi_cb, "system,%s",
+ arg0, (int)arg1);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ result = -1;
+ errno = EFAULT;
+ } else {
+ result = system(p);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "nios2-semihosting: unsupported "
+ "semihosting syscall %d\n", nr);
+ result = 0;
+ }
+failed:
+ nios2_semi_return_u32(env, result, errno);
+}
diff --git a/target/nios2/op_helper.c b/target/nios2/op_helper.c
new file mode 100644
index 000000000..a59003855
--- /dev/null
+++ b/target/nios2/op_helper.c
@@ -0,0 +1,61 @@
+/*
+ * Altera Nios II helper routines.
+ *
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "qemu/main-loop.h"
+
+#if !defined(CONFIG_USER_ONLY)
+void helper_mmu_read_debug(CPUNios2State *env, uint32_t rn)
+{
+ mmu_read_debug(env, rn);
+}
+
+void helper_mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v)
+{
+ mmu_write(env, rn, v);
+}
+
+static void nios2_check_interrupts(CPUNios2State *env)
+{
+ if (env->irq_pending &&
+ (env->regs[CR_STATUS] & CR_STATUS_PIE)) {
+ env->irq_pending = 0;
+ cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HARD);
+ }
+}
+
+void helper_check_interrupts(CPUNios2State *env)
+{
+ qemu_mutex_lock_iothread();
+ nios2_check_interrupts(env);
+ qemu_mutex_unlock_iothread();
+}
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_raise_exception(CPUNios2State *env, uint32_t index)
+{
+ CPUState *cs = env_cpu(env);
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
new file mode 100644
index 000000000..08d7ac539
--- /dev/null
+++ b/target/nios2/translate.c
@@ -0,0 +1,901 @@
+/*
+ * Altera Nios II emulation for qemu: main translation routines.
+ *
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * (Portions of this file that were originally from nios2sim-ng.)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/tcg-op.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+#include "exec/cpu_ldst.h"
+#include "exec/translator.h"
+#include "qemu/qemu-print.h"
+#include "exec/gen-icount.h"
+
+/* is_jmp field values */
+#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
+#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
+
+#define INSTRUCTION_FLG(func, flags) { (func), (flags) }
+#define INSTRUCTION(func) \
+ INSTRUCTION_FLG(func, 0)
+#define INSTRUCTION_NOP() \
+ INSTRUCTION_FLG(nop, 0)
+#define INSTRUCTION_UNIMPLEMENTED() \
+ INSTRUCTION_FLG(gen_excp, EXCP_UNIMPL)
+#define INSTRUCTION_ILLEGAL() \
+ INSTRUCTION_FLG(gen_excp, EXCP_ILLEGAL)
+
+/* Special R-Type instruction opcode */
+#define INSN_R_TYPE 0x3A
+
+/* I-Type instruction parsing */
+#define I_TYPE(instr, code) \
+ struct { \
+ uint8_t op; \
+ union { \
+ uint16_t u; \
+ int16_t s; \
+ } imm16; \
+ uint8_t b; \
+ uint8_t a; \
+ } (instr) = { \
+ .op = extract32((code), 0, 6), \
+ .imm16.u = extract32((code), 6, 16), \
+ .b = extract32((code), 22, 5), \
+ .a = extract32((code), 27, 5), \
+ }
+
+/* R-Type instruction parsing */
+#define R_TYPE(instr, code) \
+ struct { \
+ uint8_t op; \
+ uint8_t imm5; \
+ uint8_t opx; \
+ uint8_t c; \
+ uint8_t b; \
+ uint8_t a; \
+ } (instr) = { \
+ .op = extract32((code), 0, 6), \
+ .imm5 = extract32((code), 6, 5), \
+ .opx = extract32((code), 11, 6), \
+ .c = extract32((code), 17, 5), \
+ .b = extract32((code), 22, 5), \
+ .a = extract32((code), 27, 5), \
+ }
+
+/* J-Type instruction parsing */
+#define J_TYPE(instr, code) \
+ struct { \
+ uint8_t op; \
+ uint32_t imm26; \
+ } (instr) = { \
+ .op = extract32((code), 0, 6), \
+ .imm26 = extract32((code), 6, 26), \
+ }
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ TCGv_i32 zero;
+ target_ulong pc;
+ int mem_idx;
+} DisasContext;
+
+static TCGv cpu_R[NUM_CORE_REGS];
+
+typedef struct Nios2Instruction {
+ void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags);
+ uint32_t flags;
+} Nios2Instruction;
+
+static uint8_t get_opcode(uint32_t code)
+{
+ I_TYPE(instr, code);
+ return instr.op;
+}
+
+static uint8_t get_opxcode(uint32_t code)
+{
+ R_TYPE(instr, code);
+ return instr.opx;
+}
+
+static TCGv load_zero(DisasContext *dc)
+{
+ if (!dc->zero) {
+ dc->zero = tcg_const_i32(0);
+ }
+ return dc->zero;
+}
+
+static TCGv load_gpr(DisasContext *dc, uint8_t reg)
+{
+ if (likely(reg != R_ZERO)) {
+ return cpu_R[reg];
+ } else {
+ return load_zero(dc);
+ }
+}
+
+static void t_gen_helper_raise_exception(DisasContext *dc,
+ uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+
+ tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
+{
+ const TranslationBlock *tb = dc->base.tb;
+
+ if (translator_use_goto_tb(&dc->base, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_R[R_PC], dest);
+ tcg_gen_exit_tb(tb, n);
+ } else {
+ tcg_gen_movi_tl(cpu_R[R_PC], dest);
+ tcg_gen_exit_tb(NULL, 0);
+ }
+}
+
+static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ t_gen_helper_raise_exception(dc, flags);
+}
+
+static void gen_check_supervisor(DisasContext *dc)
+{
+ if (dc->base.tb->flags & CR_STATUS_U) {
+ /* CPU in user mode, privileged instruction called, stop. */
+ t_gen_helper_raise_exception(dc, EXCP_SUPERI);
+ }
+}
+
+/*
+ * Used as a placeholder for all instructions which do not have
+ * an effect on the simulator (e.g. flush, sync)
+ */
+static void nop(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ /* Nothing to do here */
+}
+
+/*
+ * J-Type instructions
+ */
+static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ J_TYPE(instr, code);
+ gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2));
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void call(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
+ jmpi(dc, code, flags);
+}
+
+/*
+ * I-Type instructions
+ */
+/* Load instructions */
+static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+
+ TCGv addr = tcg_temp_new();
+ TCGv data;
+
+ /*
+ * WARNING: Loads into R_ZERO are ignored, but we must generate the
+ * memory access itself to emulate the CPU precisely. Load
+ * from a protected page to R_ZERO will cause SIGSEGV on
+ * the Nios2 CPU.
+ */
+ if (likely(instr.b != R_ZERO)) {
+ data = cpu_R[instr.b];
+ } else {
+ data = tcg_temp_new();
+ }
+
+ tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s);
+ tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags);
+
+ if (unlikely(instr.b == R_ZERO)) {
+ tcg_temp_free(data);
+ }
+
+ tcg_temp_free(addr);
+}
+
+/* Store instructions */
+static void gen_stx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+ TCGv val = load_gpr(dc, instr.b);
+
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s);
+ tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags);
+ tcg_temp_free(addr);
+}
+
+/* Branch instructions */
+static void br(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+
+ gen_goto_tb(dc, 0, dc->base.pc_next + (instr.imm16.s & -4));
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_brcond_tl(flags, cpu_R[instr.a], cpu_R[instr.b], l1);
+ gen_goto_tb(dc, 0, dc->base.pc_next);
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, dc->base.pc_next + (instr.imm16.s & -4));
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+/* Comparison instructions */
+#define gen_i_cmpxx(fname, op3) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ I_TYPE(instr, (code)); \
+ tcg_gen_setcondi_tl(flags, cpu_R[instr.b], cpu_R[instr.a], (op3)); \
+}
+
+gen_i_cmpxx(gen_cmpxxsi, instr.imm16.s)
+gen_i_cmpxx(gen_cmpxxui, instr.imm16.u)
+
+/* Math/logic instructions */
+#define gen_i_math_logic(fname, insn, resimm, op3) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ I_TYPE(instr, (code)); \
+ if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \
+ return; \
+ } else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \
+ tcg_gen_movi_tl(cpu_R[instr.b], (resimm) ? (op3) : 0); \
+ } else { \
+ tcg_gen_##insn##_tl(cpu_R[instr.b], cpu_R[instr.a], (op3)); \
+ } \
+}
+
+gen_i_math_logic(addi, addi, 1, instr.imm16.s)
+gen_i_math_logic(muli, muli, 0, instr.imm16.s)
+
+gen_i_math_logic(andi, andi, 0, instr.imm16.u)
+gen_i_math_logic(ori, ori, 1, instr.imm16.u)
+gen_i_math_logic(xori, xori, 1, instr.imm16.u)
+
+gen_i_math_logic(andhi, andi, 0, instr.imm16.u << 16)
+gen_i_math_logic(orhi , ori, 1, instr.imm16.u << 16)
+gen_i_math_logic(xorhi, xori, 1, instr.imm16.u << 16)
+
+/* Prototype only, defined below */
+static void handle_r_type_instr(DisasContext *dc, uint32_t code,
+ uint32_t flags);
+
+static const Nios2Instruction i_type_instructions[] = {
+ INSTRUCTION(call), /* call */
+ INSTRUCTION(jmpi), /* jmpi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UB), /* ldbu */
+ INSTRUCTION(addi), /* addi */
+ INSTRUCTION_FLG(gen_stx, MO_UB), /* stb */
+ INSTRUCTION(br), /* br */
+ INSTRUCTION_FLG(gen_ldx, MO_SB), /* ldb */
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_GE), /* cmpgei */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhu */
+ INSTRUCTION(andi), /* andi */
+ INSTRUCTION_FLG(gen_stx, MO_UW), /* sth */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_GE), /* bge */
+ INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldh */
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_LT), /* cmplti */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_NOP(), /* initda */
+ INSTRUCTION(ori), /* ori */
+ INSTRUCTION_FLG(gen_stx, MO_UL), /* stw */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_LT), /* blt */
+ INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldw */
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_NE), /* cmpnei */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_NOP(), /* flushda */
+ INSTRUCTION(xori), /* xori */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_NE), /* bne */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_EQ), /* cmpeqi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UB), /* ldbuio */
+ INSTRUCTION(muli), /* muli */
+ INSTRUCTION_FLG(gen_stx, MO_UB), /* stbio */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_EQ), /* beq */
+ INSTRUCTION_FLG(gen_ldx, MO_SB), /* ldbio */
+ INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_GEU), /* cmpgeui */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhuio */
+ INSTRUCTION(andhi), /* andhi */
+ INSTRUCTION_FLG(gen_stx, MO_UW), /* sthio */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_GEU), /* bgeu */
+ INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldhio */
+ INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_LTU), /* cmpltui */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_UNIMPLEMENTED(), /* custom */
+ INSTRUCTION_NOP(), /* initd */
+ INSTRUCTION(orhi), /* orhi */
+ INSTRUCTION_FLG(gen_stx, MO_SL), /* stwio */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_LTU), /* bltu */
+ INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldwio */
+ INSTRUCTION_UNIMPLEMENTED(), /* rdprs */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(handle_r_type_instr, 0), /* R-Type */
+ INSTRUCTION_NOP(), /* flushd */
+ INSTRUCTION(xorhi), /* xorhi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+};
+
+/*
+ * R-Type instructions
+ */
+/*
+ * status <- estatus
+ * PC <- ea
+ */
+static void eret(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_mov_tl(cpu_R[CR_STATUS], cpu_R[CR_ESTATUS]);
+ tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_EA]);
+
+ dc->base.is_jmp = DISAS_JUMP;
+}
+
+/* PC <- ra */
+static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_RA]);
+
+ dc->base.is_jmp = DISAS_JUMP;
+}
+
+/* PC <- ba */
+static void bret(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_mov_tl(cpu_R[R_PC], cpu_R[R_BA]);
+
+ dc->base.is_jmp = DISAS_JUMP;
+}
+
+/* PC <- rA */
+static void jmp(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
+
+ dc->base.is_jmp = DISAS_JUMP;
+}
+
+/* rC <- PC + 4 */
+static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_movi_tl(cpu_R[instr.c], dc->base.pc_next);
+ }
+}
+
+/*
+ * ra <- PC + 4
+ * PC <- rA
+ */
+static void callr(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ tcg_gen_mov_tl(cpu_R[R_PC], load_gpr(dc, instr.a));
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->base.pc_next);
+
+ dc->base.is_jmp = DISAS_JUMP;
+}
+
+/* rC <- ctlN */
+static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ gen_check_supervisor(dc);
+
+ switch (instr.imm5 + CR_BASE) {
+ case CR_PTEADDR:
+ case CR_TLBACC:
+ case CR_TLBMISC:
+ {
+#if !defined(CONFIG_USER_ONLY)
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]);
+#ifdef DEBUG_MMU
+ TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
+ gen_helper_mmu_read_debug(cpu_R[instr.c], cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+#endif
+ }
+#endif
+ break;
+ }
+
+ default:
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_mov_tl(cpu_R[instr.c], cpu_R[instr.imm5 + CR_BASE]);
+ }
+ break;
+ }
+}
+
+/* ctlN <- rA */
+static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ gen_check_supervisor(dc);
+
+ switch (instr.imm5 + CR_BASE) {
+ case CR_PTEADDR:
+ case CR_TLBACC:
+ case CR_TLBMISC:
+ {
+#if !defined(CONFIG_USER_ONLY)
+ TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
+ gen_helper_mmu_write(cpu_env, tmp, load_gpr(dc, instr.a));
+ tcg_temp_free_i32(tmp);
+#endif
+ break;
+ }
+
+ default:
+ tcg_gen_mov_tl(cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a));
+ break;
+ }
+
+ /* If interrupts were enabled using WRCTL, trigger them. */
+#if !defined(CONFIG_USER_ONLY)
+ if ((instr.imm5 + CR_BASE) == CR_STATUS) {
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_check_interrupts(cpu_env);
+ dc->base.is_jmp = DISAS_UPDATE;
+ }
+#endif
+}
+
+/* Comparison instructions */
+static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_setcond_tl(flags, cpu_R[instr.c], cpu_R[instr.a],
+ cpu_R[instr.b]);
+ }
+}
+
+/* Math/logic instructions */
+#define gen_r_math_logic(fname, insn, op3) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ R_TYPE(instr, (code)); \
+ if (likely(instr.c != R_ZERO)) { \
+ tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), (op3)); \
+ } \
+}
+
+gen_r_math_logic(add, add_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(sub, sub_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(mul, mul_tl, load_gpr(dc, instr.b))
+
+gen_r_math_logic(and, and_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(or, or_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(xor, xor_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(nor, nor_tl, load_gpr(dc, instr.b))
+
+gen_r_math_logic(srai, sari_tl, instr.imm5)
+gen_r_math_logic(srli, shri_tl, instr.imm5)
+gen_r_math_logic(slli, shli_tl, instr.imm5)
+gen_r_math_logic(roli, rotli_tl, instr.imm5)
+
+#define gen_r_mul(fname, insn) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ R_TYPE(instr, (code)); \
+ if (likely(instr.c != R_ZERO)) { \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_##insn(t0, cpu_R[instr.c], \
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
+ tcg_temp_free(t0); \
+ } \
+}
+
+gen_r_mul(mulxss, muls2_tl)
+gen_r_mul(mulxuu, mulu2_tl)
+gen_r_mul(mulxsu, mulsu2_tl)
+
+#define gen_r_shift_s(fname, insn) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ R_TYPE(instr, (code)); \
+ if (likely(instr.c != R_ZERO)) { \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \
+ tcg_gen_##insn(cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
+ tcg_temp_free(t0); \
+ } \
+}
+
+gen_r_shift_s(sra, sar_tl)
+gen_r_shift_s(srl, shr_tl)
+gen_r_shift_s(sll, shl_tl)
+gen_r_shift_s(rol, rotl_tl)
+gen_r_shift_s(ror, rotr_tl)
+
+static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, (code));
+
+ /* Stores into R_ZERO are ignored */
+ if (unlikely(instr.c == R_ZERO)) {
+ return;
+ }
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+
+ tcg_gen_ext32s_tl(t0, load_gpr(dc, instr.a));
+ tcg_gen_ext32s_tl(t1, load_gpr(dc, instr.b));
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_R[instr.c], t0, t1);
+ tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
+
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, (code));
+
+ /* Stores into R_ZERO are ignored */
+ if (unlikely(instr.c == R_ZERO)) {
+ return;
+ }
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+
+ tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a));
+ tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b));
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_R[instr.c], t0, t1);
+ tcg_gen_ext32s_tl(cpu_R[instr.c], cpu_R[instr.c]);
+
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static const Nios2Instruction r_type_instructions[] = {
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(eret), /* eret */
+ INSTRUCTION(roli), /* roli */
+ INSTRUCTION(rol), /* rol */
+ INSTRUCTION_NOP(), /* flushp */
+ INSTRUCTION(ret), /* ret */
+ INSTRUCTION(nor), /* nor */
+ INSTRUCTION(mulxuu), /* mulxuu */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_GE), /* cmpge */
+ INSTRUCTION(bret), /* bret */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(ror), /* ror */
+ INSTRUCTION_NOP(), /* flushi */
+ INSTRUCTION(jmp), /* jmp */
+ INSTRUCTION(and), /* and */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LT), /* cmplt */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(slli), /* slli */
+ INSTRUCTION(sll), /* sll */
+ INSTRUCTION_UNIMPLEMENTED(), /* wrprs */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(or), /* or */
+ INSTRUCTION(mulxsu), /* mulxsu */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_NE), /* cmpne */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(srli), /* srli */
+ INSTRUCTION(srl), /* srl */
+ INSTRUCTION(nextpc), /* nextpc */
+ INSTRUCTION(callr), /* callr */
+ INSTRUCTION(xor), /* xor */
+ INSTRUCTION(mulxss), /* mulxss */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_EQ), /* cmpeq */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(divu), /* divu */
+ INSTRUCTION(divs), /* div */
+ INSTRUCTION(rdctl), /* rdctl */
+ INSTRUCTION(mul), /* mul */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_GEU), /* cmpgeu */
+ INSTRUCTION_NOP(), /* initi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_excp, EXCP_TRAP), /* trap */
+ INSTRUCTION(wrctl), /* wrctl */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LTU), /* cmpltu */
+ INSTRUCTION(add), /* add */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_excp, EXCP_BREAK), /* break */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(nop), /* nop */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(sub), /* sub */
+ INSTRUCTION(srai), /* srai */
+ INSTRUCTION(sra), /* sra */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+};
+
+static void handle_r_type_instr(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ uint8_t opx;
+ const Nios2Instruction *instr;
+
+ opx = get_opxcode(code);
+ if (unlikely(opx >= ARRAY_SIZE(r_type_instructions))) {
+ goto illegal_op;
+ }
+
+ instr = &r_type_instructions[opx];
+ instr->handler(dc, code, instr->flags);
+
+ return;
+
+illegal_op:
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
+}
+
+static const char * const regnames[] = {
+ "zero", "at", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23",
+ "et", "bt", "gp", "sp",
+ "fp", "ea", "ba", "ra",
+ "status", "estatus", "bstatus", "ienable",
+ "ipending", "cpuid", "reserved0", "exception",
+ "pteaddr", "tlbacc", "tlbmisc", "reserved1",
+ "badaddr", "config", "mpubase", "mpuacc",
+ "reserved2", "reserved3", "reserved4", "reserved5",
+ "reserved6", "reserved7", "reserved8", "reserved9",
+ "reserved10", "reserved11", "reserved12", "reserved13",
+ "reserved14", "reserved15", "reserved16", "reserved17",
+ "rpc"
+};
+
+#include "exec/gen-icount.h"
+
+/* generate intermediate code for basic block 'tb'. */
+static void nios2_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUNios2State *env = cs->env_ptr;
+ int page_insns;
+
+ dc->mem_idx = cpu_mmu_index(env, false);
+
+ /* Bound the number of insns to execute to those left on the page. */
+ page_insns = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
+ dc->base.max_insns = MIN(page_insns, dc->base.max_insns);
+}
+
+static void nios2_tr_tb_start(DisasContextBase *db, CPUState *cs)
+{
+}
+
+static void nios2_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ tcg_gen_insn_start(dcbase->pc_next);
+}
+
+static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUNios2State *env = cs->env_ptr;
+ const Nios2Instruction *instr;
+ uint32_t code, pc;
+ uint8_t op;
+
+ pc = dc->base.pc_next;
+ dc->pc = pc;
+ dc->base.pc_next = pc + 4;
+
+ /* Decode an instruction */
+
+#if defined(CONFIG_USER_ONLY)
+ /* FIXME: Is this needed ? */
+ if (pc >= 0x1000 && pc < 0x2000) {
+ t_gen_helper_raise_exception(dc, 0xaa);
+ return;
+ }
+#endif
+
+ code = cpu_ldl_code(env, pc);
+ op = get_opcode(code);
+
+ if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
+ return;
+ }
+
+ dc->zero = NULL;
+
+ instr = &i_type_instructions[op];
+ instr->handler(dc, code, instr->flags);
+
+ if (dc->zero) {
+ tcg_temp_free(dc->zero);
+ }
+}
+
+static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ /* Indicate where the next block should start */
+ switch (dc->base.is_jmp) {
+ case DISAS_TOO_MANY:
+ case DISAS_UPDATE:
+ /* Save the current PC back into the CPU register */
+ tcg_gen_movi_tl(cpu_R[R_PC], dc->base.pc_next);
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+
+ case DISAS_JUMP:
+ /* The jump will already have updated the PC register */
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+
+ case DISAS_NORETURN:
+ /* nothing more to generate */
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void nios2_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps nios2_tr_ops = {
+ .init_disas_context = nios2_tr_init_disas_context,
+ .tb_start = nios2_tr_tb_start,
+ .insn_start = nios2_tr_insn_start,
+ .translate_insn = nios2_tr_translate_insn,
+ .tb_stop = nios2_tr_tb_stop,
+ .disas_log = nios2_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+ translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
+}
+
+void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ int i;
+
+ if (!env) {
+ return;
+ }
+
+ qemu_fprintf(f, "IN: PC=%x %s\n",
+ env->regs[R_PC], lookup_symbol(env->regs[R_PC]));
+
+ for (i = 0; i < NUM_CORE_REGS; i++) {
+ qemu_fprintf(f, "%9s=%8.8x ", regnames[i], env->regs[i]);
+ if ((i + 1) % 4 == 0) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+#if !defined(CONFIG_USER_ONLY)
+ qemu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n",
+ env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK,
+ (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4,
+ env->mmu.tlbacc_wr);
+#endif
+ qemu_fprintf(f, "\n\n");
+}
+
+void nios2_tcg_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_CORE_REGS; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUNios2State, regs[i]),
+ regnames[i]);
+ }
+}
+
+void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->regs[R_PC] = data[0];
+}
diff --git a/target/openrisc/Kconfig b/target/openrisc/Kconfig
new file mode 100644
index 000000000..e0da4ac1d
--- /dev/null
+++ b/target/openrisc/Kconfig
@@ -0,0 +1,2 @@
+config OPENRISC
+ bool
diff --git a/target/openrisc/cpu-param.h b/target/openrisc/cpu-param.h
new file mode 100644
index 000000000..06ee64d17
--- /dev/null
+++ b/target/openrisc/cpu-param.h
@@ -0,0 +1,17 @@
+/*
+ * OpenRISC cpu parameters for qemu.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef OPENRISC_CPU_PARAM_H
+#define OPENRISC_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 13
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define NB_MMU_MODES 3
+
+#endif
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
new file mode 100644
index 000000000..dfbafc523
--- /dev/null
+++ b/target/openrisc/cpu.c
@@ -0,0 +1,285 @@
+/*
+ * QEMU OpenRISC CPU
+ *
+ * Copyright (c) 2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+
+static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+
+ cpu->env.pc = value;
+ cpu->env.dflag = 0;
+}
+
+static bool openrisc_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_TIMER);
+}
+
+static void openrisc_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->print_insn = print_insn_or1k;
+}
+
+static void openrisc_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ OpenRISCCPU *cpu = OPENRISC_CPU(s);
+ OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(cpu);
+
+ occ->parent_reset(dev);
+
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, end_reset_fields));
+
+ cpu->env.pc = 0x100;
+ cpu->env.sr = SR_FO | SR_SM;
+ cpu->env.lock_addr = -1;
+ s->exception_index = -1;
+ cpu_set_fpcsr(&cpu->env, 0);
+
+#ifndef CONFIG_USER_ONLY
+ cpu->env.picmr = 0x00000000;
+ cpu->env.picsr = 0x00000000;
+
+ cpu->env.ttmr = 0x00000000;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static void openrisc_cpu_set_irq(void *opaque, int irq, int level)
+{
+ OpenRISCCPU *cpu = (OpenRISCCPU *)opaque;
+ CPUState *cs = CPU(cpu);
+ uint32_t irq_bit;
+
+ if (irq > 31 || irq < 0) {
+ return;
+ }
+
+ irq_bit = 1U << irq;
+
+ if (level) {
+ cpu->env.picsr |= irq_bit;
+ } else {
+ cpu->env.picsr &= ~irq_bit;
+ }
+
+ if (cpu->env.picsr & cpu->env.picmr) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ cpu->env.picsr = 0;
+ }
+}
+#endif
+
+static void openrisc_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ occ->parent_realize(dev, errp);
+}
+
+static void openrisc_cpu_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+
+#ifndef CONFIG_USER_ONLY
+ qdev_init_gpio_in_named(DEVICE(cpu), openrisc_cpu_set_irq, "IRQ", NR_IRQS);
+#endif
+}
+
+/* CPU models */
+
+static ObjectClass *openrisc_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = g_strdup_printf(OPENRISC_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_OPENRISC_CPU) ||
+ object_class_is_abstract(oc))) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void or1200_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ cpu->env.vr = 0x13000008;
+ cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP | UPR_PMP;
+ cpu->env.cpucfgr = CPUCFGR_NSGF | CPUCFGR_OB32S | CPUCFGR_OF32S |
+ CPUCFGR_EVBARP;
+
+ /* 1Way, TLB_SIZE entries. */
+ cpu->env.dmmucfgr = (DMMUCFGR_NTW & (0 << 2))
+ | (DMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
+ cpu->env.immucfgr = (IMMUCFGR_NTW & (0 << 2))
+ | (IMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
+}
+
+static void openrisc_any_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ cpu->env.vr = 0x13000040; /* Obsolete VER + UVRP for new SPRs */
+ cpu->env.vr2 = 0; /* No version specific id */
+ cpu->env.avr = 0x01030000; /* Architecture v1.3 */
+
+ cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP | UPR_PMP;
+ cpu->env.cpucfgr = CPUCFGR_NSGF | CPUCFGR_OB32S | CPUCFGR_OF32S |
+ CPUCFGR_AVRP | CPUCFGR_EVBARP | CPUCFGR_OF64A32S;
+
+ /* 1Way, TLB_SIZE entries. */
+ cpu->env.dmmucfgr = (DMMUCFGR_NTW & (0 << 2))
+ | (DMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
+ cpu->env.immucfgr = (IMMUCFGR_NTW & (0 << 2))
+ | (IMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps openrisc_sysemu_ops = {
+ .get_phys_page_debug = openrisc_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps openrisc_tcg_ops = {
+ .initialize = openrisc_translate_init,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = openrisc_cpu_tlb_fill,
+ .cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
+ .do_interrupt = openrisc_cpu_do_interrupt,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(occ);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_parent_realize(dc, openrisc_cpu_realizefn,
+ &occ->parent_realize);
+ device_class_set_parent_reset(dc, openrisc_cpu_reset, &occ->parent_reset);
+
+ cc->class_by_name = openrisc_cpu_class_by_name;
+ cc->has_work = openrisc_cpu_has_work;
+ cc->dump_state = openrisc_cpu_dump_state;
+ cc->set_pc = openrisc_cpu_set_pc;
+ cc->gdb_read_register = openrisc_cpu_gdb_read_register;
+ cc->gdb_write_register = openrisc_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ dc->vmsd = &vmstate_openrisc_cpu;
+ cc->sysemu_ops = &openrisc_sysemu_ops;
+#endif
+ cc->gdb_num_core_regs = 32 + 3;
+ cc->disas_set_info = openrisc_disas_set_info;
+ cc->tcg_ops = &openrisc_tcg_ops;
+}
+
+/* Sort alphabetically by type name, except for "any". */
+static gint openrisc_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any-" TYPE_OPENRISC_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any-" TYPE_OPENRISC_CPU) == 0) {
+ return -1;
+ } else {
+ return strcmp(name_a, name_b);
+ }
+}
+
+static void openrisc_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(oc);
+ name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_OPENRISC_CPU));
+ qemu_printf(" %s\n", name);
+ g_free(name);
+}
+
+void cpu_openrisc_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list(TYPE_OPENRISC_CPU, false);
+ list = g_slist_sort(list, openrisc_cpu_list_compare);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, openrisc_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+#define DEFINE_OPENRISC_CPU_TYPE(cpu_model, initfn) \
+ { \
+ .parent = TYPE_OPENRISC_CPU, \
+ .instance_init = initfn, \
+ .name = OPENRISC_CPU_TYPE_NAME(cpu_model), \
+ }
+
+static const TypeInfo openrisc_cpus_type_infos[] = {
+ { /* base class should be registered first */
+ .name = TYPE_OPENRISC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(OpenRISCCPU),
+ .instance_init = openrisc_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(OpenRISCCPUClass),
+ .class_init = openrisc_cpu_class_init,
+ },
+ DEFINE_OPENRISC_CPU_TYPE("or1200", or1200_initfn),
+ DEFINE_OPENRISC_CPU_TYPE("any", openrisc_any_initfn),
+};
+
+DEFINE_TYPES(openrisc_cpus_type_infos)
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
new file mode 100644
index 000000000..ee069b080
--- /dev/null
+++ b/target/openrisc/cpu.h
@@ -0,0 +1,416 @@
+/*
+ * OpenRISC virtual CPU header.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef OPENRISC_CPU_H
+#define OPENRISC_CPU_H
+
+#include "exec/cpu-defs.h"
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+/* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */
+struct OpenRISCCPU;
+
+#define TYPE_OPENRISC_CPU "or1k-cpu"
+
+OBJECT_DECLARE_TYPE(OpenRISCCPU, OpenRISCCPUClass,
+ OPENRISC_CPU)
+
+/**
+ * OpenRISCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A OpenRISC CPU model.
+ */
+struct OpenRISCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+enum {
+ MMU_NOMMU_IDX = 0,
+ MMU_SUPERVISOR_IDX = 1,
+ MMU_USER_IDX = 2,
+};
+
+#define SET_FP_CAUSE(reg, v) do {\
+ (reg) = ((reg) & ~(0x3f << 12)) | \
+ ((v & 0x3f) << 12);\
+ } while (0)
+#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
+#define UPDATE_FP_FLAGS(reg, v) do {\
+ (reg) |= ((v & 0x1f) << 2);\
+ } while (0)
+
+/* Interrupt */
+#define NR_IRQS 32
+
+/* Unit presece register */
+enum {
+ UPR_UP = (1 << 0),
+ UPR_DCP = (1 << 1),
+ UPR_ICP = (1 << 2),
+ UPR_DMP = (1 << 3),
+ UPR_IMP = (1 << 4),
+ UPR_MP = (1 << 5),
+ UPR_DUP = (1 << 6),
+ UPR_PCUR = (1 << 7),
+ UPR_PMP = (1 << 8),
+ UPR_PICP = (1 << 9),
+ UPR_TTP = (1 << 10),
+ UPR_CUP = (255 << 24),
+};
+
+/* CPU configure register */
+enum {
+ CPUCFGR_NSGF = (15 << 0),
+ CPUCFGR_CGF = (1 << 4),
+ CPUCFGR_OB32S = (1 << 5),
+ CPUCFGR_OB64S = (1 << 6),
+ CPUCFGR_OF32S = (1 << 7),
+ CPUCFGR_OF64S = (1 << 8),
+ CPUCFGR_OV64S = (1 << 9),
+ CPUCFGR_ND = (1 << 10),
+ CPUCFGR_AVRP = (1 << 11),
+ CPUCFGR_EVBARP = (1 << 12),
+ CPUCFGR_ISRP = (1 << 13),
+ CPUCFGR_AECSRP = (1 << 14),
+ CPUCFGR_OF64A32S = (1 << 15),
+};
+
+/* DMMU configure register */
+enum {
+ DMMUCFGR_NTW = (3 << 0),
+ DMMUCFGR_NTS = (7 << 2),
+ DMMUCFGR_NAE = (7 << 5),
+ DMMUCFGR_CRI = (1 << 8),
+ DMMUCFGR_PRI = (1 << 9),
+ DMMUCFGR_TEIRI = (1 << 10),
+ DMMUCFGR_HTR = (1 << 11),
+};
+
+/* IMMU configure register */
+enum {
+ IMMUCFGR_NTW = (3 << 0),
+ IMMUCFGR_NTS = (7 << 2),
+ IMMUCFGR_NAE = (7 << 5),
+ IMMUCFGR_CRI = (1 << 8),
+ IMMUCFGR_PRI = (1 << 9),
+ IMMUCFGR_TEIRI = (1 << 10),
+ IMMUCFGR_HTR = (1 << 11),
+};
+
+/* Power management register */
+enum {
+ PMR_SDF = (15 << 0),
+ PMR_DME = (1 << 4),
+ PMR_SME = (1 << 5),
+ PMR_DCGE = (1 << 6),
+ PMR_SUME = (1 << 7),
+};
+
+/* Float point control status register */
+enum {
+ FPCSR_FPEE = 1,
+ FPCSR_RM = (3 << 1),
+ FPCSR_OVF = (1 << 3),
+ FPCSR_UNF = (1 << 4),
+ FPCSR_SNF = (1 << 5),
+ FPCSR_QNF = (1 << 6),
+ FPCSR_ZF = (1 << 7),
+ FPCSR_IXF = (1 << 8),
+ FPCSR_IVF = (1 << 9),
+ FPCSR_INF = (1 << 10),
+ FPCSR_DZF = (1 << 11),
+};
+
+/* Exceptions indices */
+enum {
+ EXCP_RESET = 0x1,
+ EXCP_BUSERR = 0x2,
+ EXCP_DPF = 0x3,
+ EXCP_IPF = 0x4,
+ EXCP_TICK = 0x5,
+ EXCP_ALIGN = 0x6,
+ EXCP_ILLEGAL = 0x7,
+ EXCP_INT = 0x8,
+ EXCP_DTLBMISS = 0x9,
+ EXCP_ITLBMISS = 0xa,
+ EXCP_RANGE = 0xb,
+ EXCP_SYSCALL = 0xc,
+ EXCP_FPE = 0xd,
+ EXCP_TRAP = 0xe,
+ EXCP_NR,
+};
+
+/* Supervisor register */
+enum {
+ SR_SM = (1 << 0),
+ SR_TEE = (1 << 1),
+ SR_IEE = (1 << 2),
+ SR_DCE = (1 << 3),
+ SR_ICE = (1 << 4),
+ SR_DME = (1 << 5),
+ SR_IME = (1 << 6),
+ SR_LEE = (1 << 7),
+ SR_CE = (1 << 8),
+ SR_F = (1 << 9),
+ SR_CY = (1 << 10),
+ SR_OV = (1 << 11),
+ SR_OVE = (1 << 12),
+ SR_DSX = (1 << 13),
+ SR_EPH = (1 << 14),
+ SR_FO = (1 << 15),
+ SR_SUMRA = (1 << 16),
+ SR_SCE = (1 << 17),
+};
+
+/* Tick Timer Mode Register */
+enum {
+ TTMR_TP = (0xfffffff),
+ TTMR_IP = (1 << 28),
+ TTMR_IE = (1 << 29),
+ TTMR_M = (3 << 30),
+};
+
+/* Timer Mode */
+enum {
+ TIMER_NONE = (0 << 30),
+ TIMER_INTR = (1 << 30),
+ TIMER_SHOT = (2 << 30),
+ TIMER_CONT = (3 << 30),
+};
+
+/* TLB size */
+enum {
+ TLB_SIZE = 128,
+ TLB_MASK = TLB_SIZE - 1,
+};
+
+/* TLB prot */
+enum {
+ URE = (1 << 6),
+ UWE = (1 << 7),
+ SRE = (1 << 8),
+ SWE = (1 << 9),
+
+ SXE = (1 << 6),
+ UXE = (1 << 7),
+};
+
+typedef struct OpenRISCTLBEntry {
+ uint32_t mr;
+ uint32_t tr;
+} OpenRISCTLBEntry;
+
+#ifndef CONFIG_USER_ONLY
+typedef struct CPUOpenRISCTLBContext {
+ OpenRISCTLBEntry itlb[TLB_SIZE];
+ OpenRISCTLBEntry dtlb[TLB_SIZE];
+
+ int (*cpu_openrisc_map_address_code)(struct OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot,
+ target_ulong address, int rw);
+ int (*cpu_openrisc_map_address_data)(struct OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot,
+ target_ulong address, int rw);
+} CPUOpenRISCTLBContext;
+#endif
+
+typedef struct CPUOpenRISCState {
+ target_ulong shadow_gpr[16][32]; /* Shadow registers */
+
+ target_ulong pc; /* Program counter */
+ target_ulong ppc; /* Prev PC */
+ target_ulong jmp_pc; /* Jump PC */
+
+ uint64_t mac; /* Multiply registers MACHI:MACLO */
+
+ target_ulong epcr; /* Exception PC register */
+ target_ulong eear; /* Exception EA register */
+
+ target_ulong sr_f; /* the SR_F bit, values 0, 1. */
+ target_ulong sr_cy; /* the SR_CY bit, values 0, 1. */
+ target_long sr_ov; /* the SR_OV bit (in the sign bit only) */
+ uint32_t sr; /* Supervisor register, without SR_{F,CY,OV} */
+ uint32_t esr; /* Exception supervisor register */
+ uint32_t evbar; /* Exception vector base address register */
+ uint32_t pmr; /* Power Management Register */
+ uint32_t fpcsr; /* Float register */
+ float_status fp_status;
+
+ target_ulong lock_addr;
+ target_ulong lock_value;
+
+ uint32_t dflag; /* In delay slot (boolean) */
+
+#ifndef CONFIG_USER_ONLY
+ CPUOpenRISCTLBContext tlb;
+#endif
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields from here on are preserved across CPU reset. */
+ uint32_t vr; /* Version register */
+ uint32_t vr2; /* Version register 2 */
+ uint32_t avr; /* Architecture version register */
+ uint32_t upr; /* Unit presence register */
+ uint32_t cpucfgr; /* CPU configure register */
+ uint32_t dmmucfgr; /* DMMU configure register */
+ uint32_t immucfgr; /* IMMU configure register */
+
+#ifndef CONFIG_USER_ONLY
+ QEMUTimer *timer;
+ uint32_t ttmr; /* Timer tick mode register */
+ int is_counting;
+
+ uint32_t picmr; /* Interrupt mask register */
+ uint32_t picsr; /* Interrupt contrl register*/
+#endif
+} CPUOpenRISCState;
+
+/**
+ * OpenRISCCPU:
+ * @env: #CPUOpenRISCState
+ *
+ * A OpenRISC CPU.
+ */
+struct OpenRISCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUOpenRISCState env;
+};
+
+
+void cpu_openrisc_list(void);
+void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void openrisc_translate_init(void);
+int print_insn_or1k(bfd_vma addr, disassemble_info *info);
+
+#define cpu_list cpu_openrisc_list
+
+#ifndef CONFIG_USER_ONLY
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+extern const VMStateDescription vmstate_openrisc_cpu;
+
+void openrisc_cpu_do_interrupt(CPUState *cpu);
+bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+/* hw/openrisc_pic.c */
+void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
+
+/* hw/openrisc_timer.c */
+void cpu_openrisc_clock_init(OpenRISCCPU *cpu);
+uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu);
+void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val);
+void cpu_openrisc_count_update(OpenRISCCPU *cpu);
+void cpu_openrisc_timer_update(OpenRISCCPU *cpu);
+void cpu_openrisc_count_start(OpenRISCCPU *cpu);
+void cpu_openrisc_count_stop(OpenRISCCPU *cpu);
+#endif
+
+#define OPENRISC_CPU_TYPE_SUFFIX "-" TYPE_OPENRISC_CPU
+#define OPENRISC_CPU_TYPE_NAME(model) model OPENRISC_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_OPENRISC_CPU
+
+typedef CPUOpenRISCState CPUArchState;
+typedef OpenRISCCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+#define TB_FLAGS_SM SR_SM
+#define TB_FLAGS_DME SR_DME
+#define TB_FLAGS_IME SR_IME
+#define TB_FLAGS_OVE SR_OVE
+#define TB_FLAGS_DFLAG 2 /* reuse SR_TEE */
+#define TB_FLAGS_R0_0 4 /* reuse SR_IEE */
+
+static inline uint32_t cpu_get_gpr(const CPUOpenRISCState *env, int i)
+{
+ return env->shadow_gpr[0][i];
+}
+
+static inline void cpu_set_gpr(CPUOpenRISCState *env, int i, uint32_t val)
+{
+ env->shadow_gpr[0][i] = val;
+}
+
+static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
+ target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = (env->dflag ? TB_FLAGS_DFLAG : 0)
+ | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0)
+ | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE));
+}
+
+static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
+{
+ int ret = MMU_NOMMU_IDX; /* mmu is disabled */
+
+ if (env->sr & (ifetch ? SR_IME : SR_DME)) {
+ /* The mmu is enabled; test supervisor state. */
+ ret = env->sr & SR_SM ? MMU_SUPERVISOR_IDX : MMU_USER_IDX;
+ }
+
+ return ret;
+}
+
+static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env)
+{
+ return (env->sr
+ + env->sr_f * SR_F
+ + env->sr_cy * SR_CY
+ + (env->sr_ov < 0) * SR_OV);
+}
+
+static inline void cpu_set_sr(CPUOpenRISCState *env, uint32_t val)
+{
+ env->sr_f = (val & SR_F) != 0;
+ env->sr_cy = (val & SR_CY) != 0;
+ env->sr_ov = (val & SR_OV ? -1 : 0);
+ env->sr = (val & ~(SR_F | SR_CY | SR_OV)) | SR_FO;
+}
+
+void cpu_set_fpcsr(CPUOpenRISCState *env, uint32_t val);
+
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0
+
+#endif /* OPENRISC_CPU_H */
diff --git a/target/openrisc/disas.c b/target/openrisc/disas.c
new file mode 100644
index 000000000..dc025bd64
--- /dev/null
+++ b/target/openrisc/disas.c
@@ -0,0 +1,249 @@
+/*
+ * OpenRISC disassembler
+ *
+ * Copyright (c) 2018 Richard Henderson <rth@twiddle.net>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "disas/dis-asm.h"
+#include "qemu/bitops.h"
+#include "cpu.h"
+
+typedef disassemble_info DisasContext;
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+#define output(mnemonic, format, ...) \
+ (info->fprintf_func(info->stream, "%-9s " format, \
+ mnemonic, ##__VA_ARGS__))
+
+int print_insn_or1k(bfd_vma addr, disassemble_info *info)
+{
+ bfd_byte buffer[4];
+ uint32_t insn;
+ int status;
+
+ status = info->read_memory_func(addr, buffer, 4, info);
+ if (status != 0) {
+ info->memory_error_func(status, addr, info);
+ return -1;
+ }
+ insn = bfd_getb32(buffer);
+
+ if (!decode(info, insn)) {
+ output(".long", "%#08x", insn);
+ }
+ return 4;
+}
+
+#define INSN(opcode, format, ...) \
+static bool trans_l_##opcode(disassemble_info *info, arg_l_##opcode *a) \
+{ \
+ output("l." #opcode, format, ##__VA_ARGS__); \
+ return true; \
+}
+
+INSN(add, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(addc, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(sub, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(and, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(or, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(xor, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(sll, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(srl, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(sra, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(ror, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(exths, "r%d, r%d", a->d, a->a)
+INSN(extbs, "r%d, r%d", a->d, a->a)
+INSN(exthz, "r%d, r%d", a->d, a->a)
+INSN(extbz, "r%d, r%d", a->d, a->a)
+INSN(cmov, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(ff1, "r%d, r%d", a->d, a->a)
+INSN(fl1, "r%d, r%d", a->d, a->a)
+INSN(mul, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(mulu, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(div, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(divu, "r%d, r%d, r%d", a->d, a->a, a->b)
+INSN(muld, "r%d, r%d", a->a, a->b)
+INSN(muldu, "r%d, r%d", a->a, a->b)
+INSN(j, "%d", a->n)
+INSN(jal, "%d", a->n)
+INSN(bf, "%d", a->n)
+INSN(bnf, "%d", a->n)
+INSN(jr, "r%d", a->b)
+INSN(jalr, "r%d", a->b)
+INSN(lwa, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(lwz, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(lws, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(lbz, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(lbs, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(lhz, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(lhs, "r%d, %d(r%d)", a->d, a->i, a->a)
+INSN(swa, "%d(r%d), r%d", a->i, a->a, a->b)
+INSN(sw, "%d(r%d), r%d", a->i, a->a, a->b)
+INSN(sb, "%d(r%d), r%d", a->i, a->a, a->b)
+INSN(sh, "%d(r%d), r%d", a->i, a->a, a->b)
+INSN(nop, "")
+INSN(adrp, "r%d, %d", a->d, a->i)
+INSN(addi, "r%d, r%d, %d", a->d, a->a, a->i)
+INSN(addic, "r%d, r%d, %d", a->d, a->a, a->i)
+INSN(muli, "r%d, r%d, %d", a->d, a->a, a->i)
+INSN(maci, "r%d, %d", a->a, a->i)
+INSN(andi, "r%d, r%d, %d", a->d, a->a, a->k)
+INSN(ori, "r%d, r%d, %d", a->d, a->a, a->k)
+INSN(xori, "r%d, r%d, %d", a->d, a->a, a->i)
+INSN(mfspr, "r%d, r%d, %d", a->d, a->a, a->k)
+INSN(mtspr, "r%d, r%d, %d", a->a, a->b, a->k)
+INSN(mac, "r%d, r%d", a->a, a->b)
+INSN(msb, "r%d, r%d", a->a, a->b)
+INSN(macu, "r%d, r%d", a->a, a->b)
+INSN(msbu, "r%d, r%d", a->a, a->b)
+INSN(slli, "r%d, r%d, %d", a->d, a->a, a->l)
+INSN(srli, "r%d, r%d, %d", a->d, a->a, a->l)
+INSN(srai, "r%d, r%d, %d", a->d, a->a, a->l)
+INSN(rori, "r%d, r%d, %d", a->d, a->a, a->l)
+INSN(movhi, "r%d, %d", a->d, a->k)
+INSN(macrc, "r%d", a->d)
+INSN(sfeq, "r%d, r%d", a->a, a->b)
+INSN(sfne, "r%d, r%d", a->a, a->b)
+INSN(sfgtu, "r%d, r%d", a->a, a->b)
+INSN(sfgeu, "r%d, r%d", a->a, a->b)
+INSN(sfltu, "r%d, r%d", a->a, a->b)
+INSN(sfleu, "r%d, r%d", a->a, a->b)
+INSN(sfgts, "r%d, r%d", a->a, a->b)
+INSN(sfges, "r%d, r%d", a->a, a->b)
+INSN(sflts, "r%d, r%d", a->a, a->b)
+INSN(sfles, "r%d, r%d", a->a, a->b)
+INSN(sfeqi, "r%d, %d", a->a, a->i)
+INSN(sfnei, "r%d, %d", a->a, a->i)
+INSN(sfgtui, "r%d, %d", a->a, a->i)
+INSN(sfgeui, "r%d, %d", a->a, a->i)
+INSN(sfltui, "r%d, %d", a->a, a->i)
+INSN(sfleui, "r%d, %d", a->a, a->i)
+INSN(sfgtsi, "r%d, %d", a->a, a->i)
+INSN(sfgesi, "r%d, %d", a->a, a->i)
+INSN(sfltsi, "r%d, %d", a->a, a->i)
+INSN(sflesi, "r%d, %d", a->a, a->i)
+INSN(sys, "%d", a->k)
+INSN(trap, "%d", a->k)
+INSN(msync, "")
+INSN(psync, "")
+INSN(csync, "")
+INSN(rfe, "")
+
+#define FP_INSN(opcode, suffix, format, ...) \
+static bool trans_lf_##opcode##_##suffix(disassemble_info *info, \
+ arg_lf_##opcode##_##suffix *a) \
+{ \
+ output("lf." #opcode "." #suffix, format, ##__VA_ARGS__); \
+ return true; \
+}
+
+FP_INSN(add, s, "r%d, r%d, r%d", a->d, a->a, a->b)
+FP_INSN(sub, s, "r%d, r%d, r%d", a->d, a->a, a->b)
+FP_INSN(mul, s, "r%d, r%d, r%d", a->d, a->a, a->b)
+FP_INSN(div, s, "r%d, r%d, r%d", a->d, a->a, a->b)
+FP_INSN(rem, s, "r%d, r%d, r%d", a->d, a->a, a->b)
+FP_INSN(itof, s, "r%d, r%d", a->d, a->a)
+FP_INSN(ftoi, s, "r%d, r%d", a->d, a->a)
+FP_INSN(madd, s, "r%d, r%d, r%d", a->d, a->a, a->b)
+FP_INSN(sfeq, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfne, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfgt, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfge, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sflt, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfle, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfun, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfueq, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfuge, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfugt, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfule, s, "r%d, r%d", a->a, a->b)
+FP_INSN(sfult, s, "r%d, r%d", a->a, a->b)
+
+FP_INSN(add, d, "r%d,r%d, r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sub, d, "r%d,r%d, r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(mul, d, "r%d,r%d, r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(div, d, "r%d,r%d, r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(rem, d, "r%d,r%d, r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(madd, d, "r%d,r%d, r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+
+FP_INSN(itof, d, "r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1)
+FP_INSN(ftoi, d, "r%d,r%d, r%d,r%d",
+ a->d, a->d + a->dp + 1,
+ a->a, a->a + a->ap + 1)
+
+FP_INSN(stod, d, "r%d,r%d, r%d",
+ a->d, a->d + a->dp + 1, a->a)
+FP_INSN(dtos, d, "r%d r%d,r%d",
+ a->d, a->a, a->a + a->ap + 1)
+
+FP_INSN(sfeq, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfne, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfgt, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfge, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sflt, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfle, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfun, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfueq, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfuge, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfugt, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfule, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
+FP_INSN(sfult, d, "r%d,r%d, r%d,r%d",
+ a->a, a->a + a->ap + 1,
+ a->b, a->b + a->bp + 1)
diff --git a/target/openrisc/exception.c b/target/openrisc/exception.c
new file mode 100644
index 000000000..28c1fce52
--- /dev/null
+++ b/target/openrisc/exception.c
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC exception.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exception.h"
+
+void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp)
+{
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
diff --git a/target/openrisc/exception.h b/target/openrisc/exception.h
new file mode 100644
index 000000000..333bf8463
--- /dev/null
+++ b/target/openrisc/exception.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC exception header.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_OPENRISC_EXCEPTION_H
+#define TARGET_OPENRISC_EXCEPTION_H
+
+#include "cpu.h"
+
+void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp);
+
+#endif /* TARGET_OPENRISC_EXCEPTION_H */
diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c
new file mode 100644
index 000000000..d02a1cf0a
--- /dev/null
+++ b/target/openrisc/exception_helper.c
@@ -0,0 +1,60 @@
+/*
+ * OpenRISC exception helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exception.h"
+
+void HELPER(exception)(CPUOpenRISCState *env, uint32_t excp)
+{
+ OpenRISCCPU *cpu = env_archcpu(env);
+
+ raise_exception(cpu, excp);
+}
+
+static void QEMU_NORETURN do_range(CPUOpenRISCState *env, uintptr_t pc)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_RANGE;
+ cpu_loop_exit_restore(cs, pc);
+}
+
+void HELPER(ove_cy)(CPUOpenRISCState *env)
+{
+ if (env->sr_cy) {
+ do_range(env, GETPC());
+ }
+}
+
+void HELPER(ove_ov)(CPUOpenRISCState *env)
+{
+ if (env->sr_ov < 0) {
+ do_range(env, GETPC());
+ }
+}
+
+void HELPER(ove_cyov)(CPUOpenRISCState *env)
+{
+ if (env->sr_cy || env->sr_ov < 0) {
+ do_range(env, GETPC());
+ }
+}
diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c
new file mode 100644
index 000000000..f9e34fa2c
--- /dev/null
+++ b/target/openrisc/fpu_helper.c
@@ -0,0 +1,171 @@
+/*
+ * OpenRISC float helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exception.h"
+#include "fpu/softfloat.h"
+
+static int ieee_ex_to_openrisc(int fexcp)
+{
+ int ret = 0;
+ if (fexcp & float_flag_invalid) {
+ ret |= FPCSR_IVF;
+ }
+ if (fexcp & float_flag_overflow) {
+ ret |= FPCSR_OVF;
+ }
+ if (fexcp & float_flag_underflow) {
+ ret |= FPCSR_UNF;
+ }
+ if (fexcp & float_flag_divbyzero) {
+ ret |= FPCSR_DZF;
+ }
+ if (fexcp & float_flag_inexact) {
+ ret |= FPCSR_IXF;
+ }
+ return ret;
+}
+
+void HELPER(update_fpcsr)(CPUOpenRISCState *env)
+{
+ int tmp = get_float_exception_flags(&env->fp_status);
+
+ if (tmp) {
+ set_float_exception_flags(0, &env->fp_status);
+ tmp = ieee_ex_to_openrisc(tmp);
+ if (tmp) {
+ env->fpcsr |= tmp;
+ if (env->fpcsr & FPCSR_FPEE) {
+ helper_exception(env, EXCP_FPE);
+ }
+ }
+ }
+}
+
+void cpu_set_fpcsr(CPUOpenRISCState *env, uint32_t val)
+{
+ static const int rm_to_sf[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down
+ };
+
+ env->fpcsr = val & 0xfff;
+ set_float_rounding_mode(rm_to_sf[extract32(val, 1, 2)], &env->fp_status);
+}
+
+uint64_t HELPER(itofd)(CPUOpenRISCState *env, uint64_t val)
+{
+ return int64_to_float64(val, &env->fp_status);
+}
+
+uint32_t HELPER(itofs)(CPUOpenRISCState *env, uint32_t val)
+{
+ return int32_to_float32(val, &env->fp_status);
+}
+
+uint64_t HELPER(ftoid)(CPUOpenRISCState *env, uint64_t val)
+{
+ return float64_to_int64_round_to_zero(val, &env->fp_status);
+}
+
+uint32_t HELPER(ftois)(CPUOpenRISCState *env, uint32_t val)
+{
+ return float32_to_int32_round_to_zero(val, &env->fp_status);
+}
+
+uint64_t HELPER(stod)(CPUOpenRISCState *env, uint32_t val)
+{
+ return float32_to_float64(val, &env->fp_status);
+}
+
+uint32_t HELPER(dtos)(CPUOpenRISCState *env, uint64_t val)
+{
+ return float64_to_float32(val, &env->fp_status);
+}
+
+#define FLOAT_CALC(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ return float64_ ## name(fdt0, fdt1, &env->fp_status); } \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ return float32_ ## name(fdt0, fdt1, &env->fp_status); }
+
+FLOAT_CALC(add)
+FLOAT_CALC(sub)
+FLOAT_CALC(mul)
+FLOAT_CALC(div)
+FLOAT_CALC(rem)
+#undef FLOAT_CALC
+
+
+uint64_t helper_float_madd_d(CPUOpenRISCState *env, uint64_t a,
+ uint64_t b, uint64_t c)
+{
+ /* Note that or1ksim doesn't use fused operation. */
+ b = float64_mul(b, c, &env->fp_status);
+ return float64_add(a, b, &env->fp_status);
+}
+
+uint32_t helper_float_madd_s(CPUOpenRISCState *env, uint32_t a,
+ uint32_t b, uint32_t c)
+{
+ /* Note that or1ksim doesn't use fused operation. */
+ b = float32_mul(b, c, &env->fp_status);
+ return float32_add(a, b, &env->fp_status);
+}
+
+
+#define FLOAT_CMP(name, impl) \
+target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ return float64_ ## impl(fdt0, fdt1, &env->fp_status); } \
+target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ return float32_ ## impl(fdt0, fdt1, &env->fp_status); }
+
+FLOAT_CMP(le, le)
+FLOAT_CMP(lt, lt)
+FLOAT_CMP(eq, eq_quiet)
+FLOAT_CMP(un, unordered_quiet)
+#undef FLOAT_CMP
+
+#define FLOAT_UCMP(name, expr) \
+target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ FloatRelation r = float64_compare_quiet(fdt0, fdt1, &env->fp_status); \
+ return expr; \
+} \
+target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ FloatRelation r = float32_compare_quiet(fdt0, fdt1, &env->fp_status); \
+ return expr; \
+}
+
+FLOAT_UCMP(ueq, r == float_relation_equal || r == float_relation_unordered)
+FLOAT_UCMP(ult, r == float_relation_less || r == float_relation_unordered)
+FLOAT_UCMP(ule, r != float_relation_greater)
+#undef FLOAT_UCMP
diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c
new file mode 100644
index 000000000..095bf76c1
--- /dev/null
+++ b/target/openrisc/gdbstub.c
@@ -0,0 +1,88 @@
+/*
+ * OpenRISC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int openrisc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_reg32(mem_buf, cpu_get_gpr(env, n));
+ } else {
+ switch (n) {
+ case 32: /* PPC */
+ return gdb_get_reg32(mem_buf, env->ppc);
+
+ case 33: /* NPC (equals PC) */
+ return gdb_get_reg32(mem_buf, env->pc);
+
+ case 34: /* SR */
+ return gdb_get_reg32(mem_buf, cpu_get_sr(env));
+
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+int openrisc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 32) {
+ cpu_set_gpr(env, n, tmp);
+ } else {
+ switch (n) {
+ case 32: /* PPC */
+ env->ppc = tmp;
+ break;
+
+ case 33: /* NPC (equals PC) */
+ /* If setting PC to something different,
+ also clear delayed branch status. */
+ if (env->pc != tmp) {
+ env->pc = tmp;
+ env->dflag = 0;
+ }
+ break;
+
+ case 34: /* SR */
+ cpu_set_sr(env, tmp);
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 4;
+}
diff --git a/target/openrisc/helper.h b/target/openrisc/helper.h
new file mode 100644
index 000000000..d847814a2
--- /dev/null
+++ b/target/openrisc/helper.h
@@ -0,0 +1,66 @@
+/*
+ * OpenRISC helper defines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* exception */
+DEF_HELPER_FLAGS_2(exception, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_1(ove_cy, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_FLAGS_1(ove_ov, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_FLAGS_1(ove_cyov, TCG_CALL_NO_WG, void, env)
+
+/* float */
+DEF_HELPER_FLAGS_1(update_fpcsr, TCG_CALL_NO_WG, void, env)
+
+DEF_HELPER_FLAGS_2(itofd, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(itofs, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(ftoid, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(ftois, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(stod, TCG_CALL_NO_RWG, i64, env, i32)
+DEF_HELPER_FLAGS_2(dtos, TCG_CALL_NO_RWG, i32, env, i64)
+
+DEF_HELPER_FLAGS_4(float_madd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(float_madd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+
+#define FOP_CALC(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+FOP_CALC(add)
+FOP_CALC(sub)
+FOP_CALC(mul)
+FOP_CALC(div)
+FOP_CALC(rem)
+#undef FOP_CALC
+
+#define FOP_CMP(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, tl, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+FOP_CMP(eq)
+FOP_CMP(lt)
+FOP_CMP(le)
+FOP_CMP(un)
+FOP_CMP(ueq)
+FOP_CMP(ule)
+FOP_CMP(ult)
+#undef FOP_CMP
+
+/* interrupt */
+DEF_HELPER_FLAGS_1(rfe, 0, void, env)
+
+/* sys */
+DEF_HELPER_FLAGS_3(mtspr, 0, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, tl, env, tl, tl)
diff --git a/target/openrisc/insns.decode b/target/openrisc/insns.decode
new file mode 100644
index 000000000..0d6f7c29f
--- /dev/null
+++ b/target/openrisc/insns.decode
@@ -0,0 +1,234 @@
+#
+# OpenRISC instruction decode definitions.
+#
+# Copyright (c) 2018 Richard Henderson <rth@twiddle.net>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+&dab d a b
+&da d a
+&ab a b
+&dal d a l
+&ai a i
+&dab_pair d a b dp ap bp
+&ab_pair a b ap bp
+&da_pair d a dp ap
+
+####
+# System Instructions
+####
+
+l_sys 001000 0000000000 k:16
+l_trap 001000 0100000000 k:16
+l_msync 001000 1000000000 00000000 00000000
+l_psync 001000 1010000000 00000000 00000000
+l_csync 001000 1100000000 00000000 00000000
+
+l_rfe 001001 ----- ----- -------- --------
+
+####
+# Branch Instructions
+####
+
+l_j 000000 n:s26
+l_jal 000001 n:s26
+l_bnf 000011 n:s26
+l_bf 000100 n:s26
+
+l_jr 010001 ---------- b:5 -----------
+l_jalr 010010 ---------- b:5 -----------
+
+####
+# Memory Instructions
+####
+
+&load d a i
+@load ...... d:5 a:5 i:s16 &load
+
+%store_i 21:s5 0:11
+&store a b i
+@store ...... ..... a:5 b:5 ........... &store i=%store_i
+
+l_lwa 011011 ..... ..... ........ ........ @load
+l_lwz 100001 ..... ..... ........ ........ @load
+l_lws 100010 ..... ..... ........ ........ @load
+l_lbz 100011 ..... ..... ........ ........ @load
+l_lbs 100100 ..... ..... ........ ........ @load
+l_lhz 100101 ..... ..... ........ ........ @load
+l_lhs 100110 ..... ..... ........ ........ @load
+
+l_swa 110011 ..... ..... ..... ........... @store
+l_sw 110101 ..... ..... ..... ........... @store
+l_sb 110110 ..... ..... ..... ........... @store
+l_sh 110111 ..... ..... ..... ........... @store
+
+####
+# Immediate Operand Instructions
+####
+
+%mtspr_k 21:5 0:11
+
+&rri d a i
+&rrk d a k
+@rri ...... d:5 a:5 i:s16 &rri
+@rrk ...... d:5 a:5 k:16 &rrk
+
+l_nop 000101 01--- ----- k:16
+
+l_addi 100111 ..... ..... ........ ........ @rri
+l_addic 101000 ..... ..... ........ ........ @rri
+l_andi 101001 ..... ..... ........ ........ @rrk
+l_ori 101010 ..... ..... ........ ........ @rrk
+l_xori 101011 ..... ..... ........ ........ @rri
+l_muli 101100 ..... ..... ........ ........ @rri
+
+l_mfspr 101101 ..... ..... ........ ........ @rrk
+l_mtspr 110000 ..... a:5 b:5 ........... k=%mtspr_k
+
+l_maci 010011 ----- a:5 i:s16
+
+l_movhi 000110 d:5 ----0 k:16
+l_macrc 000110 d:5 ----1 00000000 00000000
+
+l_adrp 000010 d:5 i:s21
+
+####
+# Arithmetic Instructions
+####
+
+l_exths 111000 d:5 a:5 ----- - 0000 -- 1100
+l_extbs 111000 d:5 a:5 ----- - 0001 -- 1100
+l_exthz 111000 d:5 a:5 ----- - 0010 -- 1100
+l_extbz 111000 d:5 a:5 ----- - 0011 -- 1100
+
+l_add 111000 d:5 a:5 b:5 - 00 ---- 0000
+l_addc 111000 d:5 a:5 b:5 - 00 ---- 0001
+l_sub 111000 d:5 a:5 b:5 - 00 ---- 0010
+l_and 111000 d:5 a:5 b:5 - 00 ---- 0011
+l_or 111000 d:5 a:5 b:5 - 00 ---- 0100
+l_xor 111000 d:5 a:5 b:5 - 00 ---- 0101
+l_cmov 111000 d:5 a:5 b:5 - 00 ---- 1110
+l_ff1 111000 d:5 a:5 ----- - 00 ---- 1111
+l_fl1 111000 d:5 a:5 ----- - 01 ---- 1111
+
+l_sll 111000 d:5 a:5 b:5 - 0000 -- 1000
+l_srl 111000 d:5 a:5 b:5 - 0001 -- 1000
+l_sra 111000 d:5 a:5 b:5 - 0010 -- 1000
+l_ror 111000 d:5 a:5 b:5 - 0011 -- 1000
+
+l_mul 111000 d:5 a:5 b:5 - 11 ---- 0110
+l_mulu 111000 d:5 a:5 b:5 - 11 ---- 1011
+l_div 111000 d:5 a:5 b:5 - 11 ---- 1001
+l_divu 111000 d:5 a:5 b:5 - 11 ---- 1010
+
+l_muld 111000 ----- a:5 b:5 - 11 ---- 0111
+l_muldu 111000 ----- a:5 b:5 - 11 ---- 1100
+
+l_mac 110001 ----- a:5 b:5 ------- 0001
+l_macu 110001 ----- a:5 b:5 ------- 0011
+l_msb 110001 ----- a:5 b:5 ------- 0010
+l_msbu 110001 ----- a:5 b:5 ------- 0100
+
+l_slli 101110 d:5 a:5 -------- 00 l:6
+l_srli 101110 d:5 a:5 -------- 01 l:6
+l_srai 101110 d:5 a:5 -------- 10 l:6
+l_rori 101110 d:5 a:5 -------- 11 l:6
+
+####
+# Compare Instructions
+####
+
+l_sfeq 111001 00000 a:5 b:5 -----------
+l_sfne 111001 00001 a:5 b:5 -----------
+l_sfgtu 111001 00010 a:5 b:5 -----------
+l_sfgeu 111001 00011 a:5 b:5 -----------
+l_sfltu 111001 00100 a:5 b:5 -----------
+l_sfleu 111001 00101 a:5 b:5 -----------
+l_sfgts 111001 01010 a:5 b:5 -----------
+l_sfges 111001 01011 a:5 b:5 -----------
+l_sflts 111001 01100 a:5 b:5 -----------
+l_sfles 111001 01101 a:5 b:5 -----------
+
+l_sfeqi 101111 00000 a:5 i:s16
+l_sfnei 101111 00001 a:5 i:s16
+l_sfgtui 101111 00010 a:5 i:s16
+l_sfgeui 101111 00011 a:5 i:s16
+l_sfltui 101111 00100 a:5 i:s16
+l_sfleui 101111 00101 a:5 i:s16
+l_sfgtsi 101111 01010 a:5 i:s16
+l_sfgesi 101111 01011 a:5 i:s16
+l_sfltsi 101111 01100 a:5 i:s16
+l_sflesi 101111 01101 a:5 i:s16
+
+####
+# FP Instructions
+####
+
+lf_add_s 110010 d:5 a:5 b:5 --- 00000000
+lf_sub_s 110010 d:5 a:5 b:5 --- 00000001
+lf_mul_s 110010 d:5 a:5 b:5 --- 00000010
+lf_div_s 110010 d:5 a:5 b:5 --- 00000011
+lf_rem_s 110010 d:5 a:5 b:5 --- 00000110
+lf_madd_s 110010 d:5 a:5 b:5 --- 00000111
+
+lf_itof_s 110010 d:5 a:5 00000 --- 00000100
+lf_ftoi_s 110010 d:5 a:5 00000 --- 00000101
+
+lf_sfeq_s 110010 ----- a:5 b:5 --- 00001000
+lf_sfne_s 110010 ----- a:5 b:5 --- 00001001
+lf_sfgt_s 110010 ----- a:5 b:5 --- 00001010
+lf_sfge_s 110010 ----- a:5 b:5 --- 00001011
+lf_sflt_s 110010 ----- a:5 b:5 --- 00001100
+lf_sfle_s 110010 ----- a:5 b:5 --- 00001101
+lf_sfueq_s 110010 ----- a:5 b:5 --- 00101000
+lf_sfuge_s 110010 ----- a:5 b:5 --- 00101011
+lf_sfugt_s 110010 ----- a:5 b:5 --- 00101010
+lf_sfule_s 110010 ----- a:5 b:5 --- 00101101
+lf_sfult_s 110010 ----- a:5 b:5 --- 00101100
+lf_sfun_s 110010 ----- a:5 b:5 --- 00101110
+
+####
+# DP Instructions
+####
+
+@dab_pair ...... d:5 a:5 b:5 dp:1 ap:1 bp:1 ........ &dab_pair
+@ab_pair ...... ..... a:5 b:5 . ap:1 bp:1 ........ &ab_pair
+@da_pair ...... d:5 a:5 ..... dp:1 ap:1 . ........ &da_pair
+
+lf_add_d 110010 ..... ..... ..... ... 00010000 @dab_pair
+lf_sub_d 110010 ..... ..... ..... ... 00010001 @dab_pair
+lf_mul_d 110010 ..... ..... ..... ... 00010010 @dab_pair
+lf_div_d 110010 ..... ..... ..... ... 00010011 @dab_pair
+lf_rem_d 110010 ..... ..... ..... ... 00010110 @dab_pair
+lf_madd_d 110010 ..... ..... ..... ... 00010111 @dab_pair
+
+lf_itof_d 110010 ..... ..... 00000 ..0 00010100 @da_pair
+lf_ftoi_d 110010 ..... ..... 00000 ..0 00010101 @da_pair
+
+lf_stod_d 110010 d:5 a:5 00000 dp:1 0 0 00110100
+lf_dtos_d 110010 d:5 a:5 00000 0 ap:1 0 00110101
+
+lf_sfeq_d 110010 00000 ..... ..... 0.. 00011000 @ab_pair
+lf_sfne_d 110010 00000 ..... ..... 0.. 00011001 @ab_pair
+lf_sfgt_d 110010 00000 ..... ..... 0.. 00011010 @ab_pair
+lf_sfge_d 110010 00000 ..... ..... 0.. 00011011 @ab_pair
+lf_sflt_d 110010 00000 ..... ..... 0.. 00011100 @ab_pair
+lf_sfle_d 110010 00000 ..... ..... 0.. 00011101 @ab_pair
+lf_sfueq_d 110010 00000 ..... ..... 0.. 00111000 @ab_pair
+lf_sfuge_d 110010 00000 ..... ..... 0.. 00111011 @ab_pair
+lf_sfugt_d 110010 00000 ..... ..... 0.. 00111010 @ab_pair
+lf_sfule_d 110010 00000 ..... ..... 0.. 00111101 @ab_pair
+lf_sfult_d 110010 00000 ..... ..... 0.. 00111100 @ab_pair
+lf_sfun_d 110010 00000 ..... ..... 0.. 00111110 @ab_pair
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
new file mode 100644
index 000000000..19223e3f2
--- /dev/null
+++ b/target/openrisc/interrupt.c
@@ -0,0 +1,120 @@
+/*
+ * OpenRISC interrupt.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/loader.h"
+#endif
+
+void openrisc_cpu_do_interrupt(CPUState *cs)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ int exception = cs->exception_index;
+
+ env->epcr = env->pc;
+ if (exception == EXCP_SYSCALL) {
+ env->epcr += 4;
+ }
+ /* When we have an illegal instruction the error effective address
+ shall be set to the illegal instruction address. */
+ if (exception == EXCP_ILLEGAL) {
+ env->eear = env->pc;
+ }
+
+ /* During exceptions esr is populared with the pre-exception sr. */
+ env->esr = cpu_get_sr(env);
+ /* In parallel sr is updated to disable mmu, interrupts, timers and
+ set the delay slot exception flag. */
+ env->sr &= ~SR_DME;
+ env->sr &= ~SR_IME;
+ env->sr |= SR_SM;
+ env->sr &= ~SR_IEE;
+ env->sr &= ~SR_TEE;
+ env->pmr &= ~PMR_DME;
+ env->pmr &= ~PMR_SME;
+ env->lock_addr = -1;
+
+ /* Set/clear dsx to indicate if we are in a delay slot exception. */
+ if (env->dflag) {
+ env->dflag = 0;
+ env->sr |= SR_DSX;
+ env->epcr -= 4;
+ } else {
+ env->sr &= ~SR_DSX;
+ }
+
+ if (exception > 0 && exception < EXCP_NR) {
+ static const char * const int_name[EXCP_NR] = {
+ [EXCP_RESET] = "RESET",
+ [EXCP_BUSERR] = "BUSERR (bus error)",
+ [EXCP_DPF] = "DFP (data protection fault)",
+ [EXCP_IPF] = "IPF (code protection fault)",
+ [EXCP_TICK] = "TICK (timer interrupt)",
+ [EXCP_ALIGN] = "ALIGN",
+ [EXCP_ILLEGAL] = "ILLEGAL",
+ [EXCP_INT] = "INT (device interrupt)",
+ [EXCP_DTLBMISS] = "DTLBMISS (data tlb miss)",
+ [EXCP_ITLBMISS] = "ITLBMISS (code tlb miss)",
+ [EXCP_RANGE] = "RANGE",
+ [EXCP_SYSCALL] = "SYSCALL",
+ [EXCP_FPE] = "FPE",
+ [EXCP_TRAP] = "TRAP",
+ };
+
+ qemu_log_mask(CPU_LOG_INT, "INT: %s\n", int_name[exception]);
+
+ hwaddr vect_pc = exception << 8;
+ if (env->cpucfgr & CPUCFGR_EVBARP) {
+ vect_pc |= env->evbar;
+ }
+ if (env->sr & SR_EPH) {
+ vect_pc |= 0xf0000000;
+ }
+ env->pc = vect_pc;
+ } else {
+ cpu_abort(cs, "Unhandled exception 0x%x\n", exception);
+ }
+
+ cs->exception_index = -1;
+}
+
+bool openrisc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ int idx = -1;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->sr & SR_IEE)) {
+ idx = EXCP_INT;
+ }
+ if ((interrupt_request & CPU_INTERRUPT_TIMER) && (env->sr & SR_TEE)) {
+ idx = EXCP_TICK;
+ }
+ if (idx >= 0) {
+ cs->exception_index = idx;
+ openrisc_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
diff --git a/target/openrisc/interrupt_helper.c b/target/openrisc/interrupt_helper.c
new file mode 100644
index 000000000..ab4ea88b6
--- /dev/null
+++ b/target/openrisc/interrupt_helper.c
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC interrupt helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+void HELPER(rfe)(CPUOpenRISCState *env)
+{
+ env->pc = env->epcr;
+ env->lock_addr = -1;
+ cpu_set_sr(env, env->esr);
+}
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
new file mode 100644
index 000000000..6239725c4
--- /dev/null
+++ b/target/openrisc/machine.c
@@ -0,0 +1,144 @@
+/*
+ * OpenRISC Machine
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "migration/cpu.h"
+
+static const VMStateDescription vmstate_tlb_entry = {
+ .name = "tlb_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL(mr, OpenRISCTLBEntry),
+ VMSTATE_UINTTL(tr, OpenRISCTLBEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cpu_tlb = {
+ .name = "cpu_tlb",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(itlb, CPUOpenRISCTLBContext, TLB_SIZE, 0,
+ vmstate_tlb_entry, OpenRISCTLBEntry),
+ VMSTATE_STRUCT_ARRAY(dtlb, CPUOpenRISCTLBContext, TLB_SIZE, 0,
+ vmstate_tlb_entry, OpenRISCTLBEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int get_sr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ CPUOpenRISCState *env = opaque;
+ cpu_set_sr(env, qemu_get_be32(f));
+ return 0;
+}
+
+static int put_sr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ CPUOpenRISCState *env = opaque;
+ qemu_put_be32(f, cpu_get_sr(env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_sr = {
+ .name = "sr",
+ .get = get_sr,
+ .put = put_sr,
+};
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 6,
+ .minimum_version_id = 6,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32),
+ VMSTATE_UINTTL(pc, CPUOpenRISCState),
+ VMSTATE_UINTTL(ppc, CPUOpenRISCState),
+ VMSTATE_UINTTL(jmp_pc, CPUOpenRISCState),
+ VMSTATE_UINTTL(lock_addr, CPUOpenRISCState),
+ VMSTATE_UINTTL(lock_value, CPUOpenRISCState),
+ VMSTATE_UINTTL(epcr, CPUOpenRISCState),
+ VMSTATE_UINTTL(eear, CPUOpenRISCState),
+
+ /* Save the architecture value of the SR, not the internally
+ expanded version. Since this architecture value does not
+ exist in memory to be stored, this requires a but of hoop
+ jumping. We want OFFSET=0 so that we effectively pass ENV
+ to the helper functions, and we need to fill in the name by
+ hand since there's no field of that name. */
+ {
+ .name = "sr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_sr,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+
+ VMSTATE_UINT32(vr, CPUOpenRISCState),
+ VMSTATE_UINT32(upr, CPUOpenRISCState),
+ VMSTATE_UINT32(cpucfgr, CPUOpenRISCState),
+ VMSTATE_UINT32(dmmucfgr, CPUOpenRISCState),
+ VMSTATE_UINT32(immucfgr, CPUOpenRISCState),
+ VMSTATE_UINT32(evbar, CPUOpenRISCState),
+ VMSTATE_UINT32(pmr, CPUOpenRISCState),
+ VMSTATE_UINT32(esr, CPUOpenRISCState),
+ VMSTATE_UINT32(fpcsr, CPUOpenRISCState),
+ VMSTATE_UINT64(mac, CPUOpenRISCState),
+
+ VMSTATE_STRUCT(tlb, CPUOpenRISCState, 1,
+ vmstate_cpu_tlb, CPUOpenRISCTLBContext),
+
+ VMSTATE_TIMER_PTR(timer, CPUOpenRISCState),
+ VMSTATE_UINT32(ttmr, CPUOpenRISCState),
+
+ VMSTATE_UINT32(picmr, CPUOpenRISCState),
+ VMSTATE_UINT32(picsr, CPUOpenRISCState),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ OpenRISCCPU *cpu = opaque;
+ CPUOpenRISCState *env = &cpu->env;
+
+ /* Update env->fp_status to match env->fpcsr. */
+ cpu_set_fpcsr(env, env->fpcsr);
+ return 0;
+}
+
+const VMStateDescription vmstate_openrisc_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/openrisc/meson.build b/target/openrisc/meson.build
new file mode 100644
index 000000000..84322086e
--- /dev/null
+++ b/target/openrisc/meson.build
@@ -0,0 +1,25 @@
+gen = decodetree.process('insns.decode')
+
+openrisc_ss = ss.source_set()
+openrisc_ss.add(gen)
+openrisc_ss.add(files(
+ 'cpu.c',
+ 'disas.c',
+ 'exception.c',
+ 'exception_helper.c',
+ 'fpu_helper.c',
+ 'gdbstub.c',
+ 'interrupt_helper.c',
+ 'sys_helper.c',
+ 'translate.c',
+))
+
+openrisc_softmmu_ss = ss.source_set()
+openrisc_softmmu_ss.add(files(
+ 'interrupt.c',
+ 'machine.c',
+ 'mmu.c',
+))
+
+target_arch += {'openrisc': openrisc_ss}
+target_softmmu_arch += {'openrisc': openrisc_softmmu_ss}
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
new file mode 100644
index 000000000..e561ef245
--- /dev/null
+++ b/target/openrisc/mmu.c
@@ -0,0 +1,171 @@
+/*
+ * OpenRISC MMU.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "hw/loader.h"
+
+static inline void get_phys_nommu(hwaddr *phys_addr, int *prot,
+ target_ulong address)
+{
+ *phys_addr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+}
+
+static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
+ target_ulong addr, int need, bool super)
+{
+ int idx = (addr >> TARGET_PAGE_BITS) & TLB_MASK;
+ uint32_t imr = cpu->env.tlb.itlb[idx].mr;
+ uint32_t itr = cpu->env.tlb.itlb[idx].tr;
+ uint32_t dmr = cpu->env.tlb.dtlb[idx].mr;
+ uint32_t dtr = cpu->env.tlb.dtlb[idx].tr;
+ int right, match, valid;
+
+ /* If the ITLB and DTLB indexes map to the same page, we want to
+ load all permissions all at once. If the destination pages do
+ not match, zap the one we don't need. */
+ if (unlikely((itr ^ dtr) & TARGET_PAGE_MASK)) {
+ if (need & PAGE_EXEC) {
+ dmr = dtr = 0;
+ } else {
+ imr = itr = 0;
+ }
+ }
+
+ /* Check if either of the entries matches the source address. */
+ match = (imr ^ addr) & TARGET_PAGE_MASK ? 0 : PAGE_EXEC;
+ match |= (dmr ^ addr) & TARGET_PAGE_MASK ? 0 : PAGE_READ | PAGE_WRITE;
+
+ /* Check if either of the entries is valid. */
+ valid = imr & 1 ? PAGE_EXEC : 0;
+ valid |= dmr & 1 ? PAGE_READ | PAGE_WRITE : 0;
+ valid &= match;
+
+ /* Collect the permissions from the entries. */
+ right = itr & (super ? SXE : UXE) ? PAGE_EXEC : 0;
+ right |= dtr & (super ? SRE : URE) ? PAGE_READ : 0;
+ right |= dtr & (super ? SWE : UWE) ? PAGE_WRITE : 0;
+ right &= valid;
+
+ /* Note that above we validated that itr and dtr match on page.
+ So oring them together changes nothing without having to
+ check which one we needed. We also want to store to these
+ variables even on failure, as it avoids compiler warnings. */
+ *phys_addr = ((itr | dtr) & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK);
+ *prot = right;
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "MMU lookup: need %d match %d valid %d right %d -> %s\n",
+ need, match, valid, right, (need & right) ? "OK" : "FAIL");
+
+ /* Check the collective permissions are present. */
+ if (likely(need & right)) {
+ return 0; /* success! */
+ }
+
+ /* Determine what kind of failure we have. */
+ if (need & valid) {
+ return need & PAGE_EXEC ? EXCP_IPF : EXCP_DPF;
+ } else {
+ return need & PAGE_EXEC ? EXCP_ITLBMISS : EXCP_DTLBMISS;
+ }
+}
+
+static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
+ int exception)
+{
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = exception;
+ cpu->env.eear = address;
+ cpu->env.lock_addr = -1;
+}
+
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ int excp = EXCP_DPF;
+ int prot;
+ hwaddr phys_addr;
+
+ if (mmu_idx == MMU_NOMMU_IDX) {
+ /* The mmu is disabled; lookups never fail. */
+ get_phys_nommu(&phys_addr, &prot, addr);
+ excp = 0;
+ } else {
+ bool super = mmu_idx == MMU_SUPERVISOR_IDX;
+ int need = (access_type == MMU_INST_FETCH ? PAGE_EXEC
+ : access_type == MMU_DATA_STORE ? PAGE_WRITE
+ : PAGE_READ);
+ excp = get_phys_mmu(cpu, &phys_addr, &prot, addr, need, super);
+ }
+
+ if (likely(excp == 0)) {
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK,
+ phys_addr & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+
+ raise_mmu_exception(cpu, addr, excp);
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ int prot, excp, sr = cpu->env.sr;
+ hwaddr phys_addr;
+
+ switch (sr & (SR_DME | SR_IME)) {
+ case SR_DME | SR_IME:
+ /* The mmu is definitely enabled. */
+ excp = get_phys_mmu(cpu, &phys_addr, &prot, addr,
+ PAGE_EXEC | PAGE_READ | PAGE_WRITE,
+ (sr & SR_SM) != 0);
+ return excp ? -1 : phys_addr;
+
+ default:
+ /* The mmu is partially enabled, and we don't really have
+ a "real" access type. Begin by trying the mmu, but if
+ that fails try again without. */
+ excp = get_phys_mmu(cpu, &phys_addr, &prot, addr,
+ PAGE_EXEC | PAGE_READ | PAGE_WRITE,
+ (sr & SR_SM) != 0);
+ if (!excp) {
+ return phys_addr;
+ }
+ /* fallthru */
+
+ case 0:
+ /* The mmu is definitely disabled; lookups never fail. */
+ get_phys_nommu(&phys_addr, &prot, addr);
+ return phys_addr;
+ }
+}
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
new file mode 100644
index 000000000..48674231e
--- /dev/null
+++ b/target/openrisc/sys_helper.c
@@ -0,0 +1,316 @@
+/*
+ * OpenRISC system instructions helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exception.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/boards.h"
+#endif
+
+#define TO_SPR(group, number) (((group) << 11) + (number))
+
+void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
+{
+#ifndef CONFIG_USER_ONLY
+ OpenRISCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ target_ulong mr;
+ int idx;
+#endif
+
+ switch (spr) {
+#ifndef CONFIG_USER_ONLY
+ case TO_SPR(0, 11): /* EVBAR */
+ env->evbar = rb;
+ break;
+
+ case TO_SPR(0, 16): /* NPC */
+ cpu_restore_state(cs, GETPC(), true);
+ /* ??? Mirror or1ksim in not trashing delayed branch state
+ when "jumping" to the current instruction. */
+ if (env->pc != rb) {
+ env->pc = rb;
+ env->dflag = 0;
+ cpu_loop_exit(cs);
+ }
+ break;
+
+ case TO_SPR(0, 17): /* SR */
+ cpu_set_sr(env, rb);
+ break;
+
+ case TO_SPR(0, 32): /* EPCR */
+ env->epcr = rb;
+ break;
+
+ case TO_SPR(0, 48): /* EEAR */
+ env->eear = rb;
+ break;
+
+ case TO_SPR(0, 64): /* ESR */
+ env->esr = rb;
+ break;
+
+ case TO_SPR(0, 1024) ... TO_SPR(0, 1024 + (16 * 32)): /* Shadow GPRs */
+ idx = (spr - 1024);
+ env->shadow_gpr[idx / 32][idx % 32] = rb;
+ break;
+
+ case TO_SPR(1, 512) ... TO_SPR(1, 512 + TLB_SIZE - 1): /* DTLBW0MR 0-127 */
+ idx = spr - TO_SPR(1, 512);
+ mr = env->tlb.dtlb[idx].mr;
+ if (mr & 1) {
+ tlb_flush_page(cs, mr & TARGET_PAGE_MASK);
+ }
+ if (rb & 1) {
+ tlb_flush_page(cs, rb & TARGET_PAGE_MASK);
+ }
+ env->tlb.dtlb[idx].mr = rb;
+ break;
+ case TO_SPR(1, 640) ... TO_SPR(1, 640 + TLB_SIZE - 1): /* DTLBW0TR 0-127 */
+ idx = spr - TO_SPR(1, 640);
+ env->tlb.dtlb[idx].tr = rb;
+ break;
+ case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
+ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
+ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
+ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
+ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
+ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(2, 512) ... TO_SPR(2, 512 + TLB_SIZE - 1): /* ITLBW0MR 0-127 */
+ idx = spr - TO_SPR(2, 512);
+ mr = env->tlb.itlb[idx].mr;
+ if (mr & 1) {
+ tlb_flush_page(cs, mr & TARGET_PAGE_MASK);
+ }
+ if (rb & 1) {
+ tlb_flush_page(cs, rb & TARGET_PAGE_MASK);
+ }
+ env->tlb.itlb[idx].mr = rb;
+ break;
+ case TO_SPR(2, 640) ... TO_SPR(2, 640 + TLB_SIZE - 1): /* ITLBW0TR 0-127 */
+ idx = spr - TO_SPR(2, 640);
+ env->tlb.itlb[idx].tr = rb;
+ break;
+ case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
+ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
+ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
+ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
+ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
+ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(5, 1): /* MACLO */
+ env->mac = deposit64(env->mac, 0, 32, rb);
+ break;
+ case TO_SPR(5, 2): /* MACHI */
+ env->mac = deposit64(env->mac, 32, 32, rb);
+ break;
+ case TO_SPR(8, 0): /* PMR */
+ env->pmr = rb;
+ if (env->pmr & PMR_DME || env->pmr & PMR_SME) {
+ cpu_restore_state(cs, GETPC(), true);
+ env->pc += 4;
+ cs->halted = 1;
+ raise_exception(cpu, EXCP_HALTED);
+ }
+ break;
+ case TO_SPR(9, 0): /* PICMR */
+ env->picmr = rb;
+ break;
+ case TO_SPR(9, 2): /* PICSR */
+ env->picsr &= ~rb;
+ break;
+ case TO_SPR(10, 0): /* TTMR */
+ {
+ if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) {
+ switch (rb & TTMR_M) {
+ case TIMER_NONE:
+ cpu_openrisc_count_stop(cpu);
+ break;
+ case TIMER_INTR:
+ case TIMER_SHOT:
+ case TIMER_CONT:
+ cpu_openrisc_count_start(cpu);
+ break;
+ default:
+ break;
+ }
+ }
+
+ int ip = env->ttmr & TTMR_IP;
+
+ if (rb & TTMR_IP) { /* Keep IP bit. */
+ env->ttmr = (rb & ~TTMR_IP) | ip;
+ } else { /* Clear IP bit. */
+ env->ttmr = rb & ~TTMR_IP;
+ cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
+ }
+
+ cpu_openrisc_timer_update(cpu);
+ }
+ break;
+
+ case TO_SPR(10, 1): /* TTCR */
+ cpu_openrisc_count_set(cpu, rb);
+ cpu_openrisc_timer_update(cpu);
+ break;
+#endif
+
+ case TO_SPR(0, 20): /* FPCSR */
+ cpu_set_fpcsr(env, rb);
+ break;
+ }
+}
+
+target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
+ target_ulong spr)
+{
+#ifndef CONFIG_USER_ONLY
+ MachineState *ms = MACHINE(qdev_get_machine());
+ OpenRISCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+ int idx;
+#endif
+
+ switch (spr) {
+#ifndef CONFIG_USER_ONLY
+ case TO_SPR(0, 0): /* VR */
+ return env->vr;
+
+ case TO_SPR(0, 1): /* UPR */
+ return env->upr;
+
+ case TO_SPR(0, 2): /* CPUCFGR */
+ return env->cpucfgr;
+
+ case TO_SPR(0, 3): /* DMMUCFGR */
+ return env->dmmucfgr;
+
+ case TO_SPR(0, 4): /* IMMUCFGR */
+ return env->immucfgr;
+
+ case TO_SPR(0, 9): /* VR2 */
+ return env->vr2;
+
+ case TO_SPR(0, 10): /* AVR */
+ return env->avr;
+
+ case TO_SPR(0, 11): /* EVBAR */
+ return env->evbar;
+
+ case TO_SPR(0, 16): /* NPC (equals PC) */
+ cpu_restore_state(cs, GETPC(), false);
+ return env->pc;
+
+ case TO_SPR(0, 17): /* SR */
+ return cpu_get_sr(env);
+
+ case TO_SPR(0, 18): /* PPC */
+ cpu_restore_state(cs, GETPC(), false);
+ return env->ppc;
+
+ case TO_SPR(0, 32): /* EPCR */
+ return env->epcr;
+
+ case TO_SPR(0, 48): /* EEAR */
+ return env->eear;
+
+ case TO_SPR(0, 64): /* ESR */
+ return env->esr;
+
+ case TO_SPR(0, 128): /* COREID */
+ return cpu->parent_obj.cpu_index;
+
+ case TO_SPR(0, 129): /* NUMCORES */
+ return ms->smp.max_cpus;
+
+ case TO_SPR(0, 1024) ... TO_SPR(0, 1024 + (16 * 32)): /* Shadow GPRs */
+ idx = (spr - 1024);
+ return env->shadow_gpr[idx / 32][idx % 32];
+
+ case TO_SPR(1, 512) ... TO_SPR(1, 512 + TLB_SIZE - 1): /* DTLBW0MR 0-127 */
+ idx = spr - TO_SPR(1, 512);
+ return env->tlb.dtlb[idx].mr;
+
+ case TO_SPR(1, 640) ... TO_SPR(1, 640 + TLB_SIZE - 1): /* DTLBW0TR 0-127 */
+ idx = spr - TO_SPR(1, 640);
+ return env->tlb.dtlb[idx].tr;
+
+ case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
+ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
+ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
+ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
+ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
+ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(2, 512) ... TO_SPR(2, 512 + TLB_SIZE - 1): /* ITLBW0MR 0-127 */
+ idx = spr - TO_SPR(2, 512);
+ return env->tlb.itlb[idx].mr;
+
+ case TO_SPR(2, 640) ... TO_SPR(2, 640 + TLB_SIZE - 1): /* ITLBW0TR 0-127 */
+ idx = spr - TO_SPR(2, 640);
+ return env->tlb.itlb[idx].tr;
+
+ case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
+ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
+ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
+ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
+ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
+ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(5, 1): /* MACLO */
+ return (uint32_t)env->mac;
+ break;
+ case TO_SPR(5, 2): /* MACHI */
+ return env->mac >> 32;
+ break;
+
+ case TO_SPR(8, 0): /* PMR */
+ return env->pmr;
+
+ case TO_SPR(9, 0): /* PICMR */
+ return env->picmr;
+
+ case TO_SPR(9, 2): /* PICSR */
+ return env->picsr;
+
+ case TO_SPR(10, 0): /* TTMR */
+ return env->ttmr;
+
+ case TO_SPR(10, 1): /* TTCR */
+ cpu_openrisc_count_update(cpu);
+ return cpu_openrisc_count_get(cpu);
+#endif
+
+ case TO_SPR(0, 20): /* FPCSR */
+ return env->fpcsr;
+ }
+
+ /* for rd is passed in, if rd unchanged, just keep it back. */
+ return rd;
+}
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
new file mode 100644
index 000000000..ca79e609d
--- /dev/null
+++ b/target/openrisc/translate.c
@@ -0,0 +1,1735 @@
+/*
+ * OpenRISC translation
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "tcg/tcg-op.h"
+#include "qemu/log.h"
+#include "qemu/bitops.h"
+#include "qemu/qemu-print.h"
+#include "exec/cpu_ldst.h"
+#include "exec/translator.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/gen-icount.h"
+
+#include "exec/log.h"
+
+/* is_jmp field values */
+#define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */
+#define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ uint32_t mem_idx;
+ uint32_t tb_flags;
+ uint32_t delayed_branch;
+ uint32_t cpucfgr;
+ uint32_t avr;
+
+ /* If not -1, jmp_pc contains this value and so is a direct jump. */
+ target_ulong jmp_pc_imm;
+
+ /* The temporary corresponding to register 0 for this compilation. */
+ TCGv R0;
+ /* The constant zero. */
+ TCGv zero;
+} DisasContext;
+
+static inline bool is_user(DisasContext *dc)
+{
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ return !(dc->tb_flags & TB_FLAGS_SM);
+#endif
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+static TCGv cpu_sr;
+static TCGv cpu_regs[32];
+static TCGv cpu_pc;
+static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
+static TCGv cpu_ppc;
+static TCGv cpu_sr_f; /* bf/bnf, F flag taken */
+static TCGv cpu_sr_cy; /* carry (unsigned overflow) */
+static TCGv cpu_sr_ov; /* signed overflow */
+static TCGv cpu_lock_addr;
+static TCGv cpu_lock_value;
+static TCGv_i32 fpcsr;
+static TCGv_i64 cpu_mac; /* MACHI:MACLO */
+static TCGv_i32 cpu_dflag;
+
+void openrisc_translate_init(void)
+{
+ static const char * const regnames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ };
+ int i;
+
+ cpu_sr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, sr), "sr");
+ cpu_dflag = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUOpenRISCState, dflag),
+ "dflag");
+ cpu_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, pc), "pc");
+ cpu_ppc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, ppc), "ppc");
+ jmp_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
+ cpu_sr_f = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, sr_f), "sr_f");
+ cpu_sr_cy = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, sr_cy), "sr_cy");
+ cpu_sr_ov = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, sr_ov), "sr_ov");
+ cpu_lock_addr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, lock_addr),
+ "lock_addr");
+ cpu_lock_value = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, lock_value),
+ "lock_value");
+ fpcsr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUOpenRISCState, fpcsr),
+ "fpcsr");
+ cpu_mac = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUOpenRISCState, mac),
+ "mac");
+ for (i = 0; i < 32; i++) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState,
+ shadow_gpr[0][i]),
+ regnames[i]);
+ }
+}
+
+static void gen_exception(DisasContext *dc, unsigned int excp)
+{
+ gen_helper_exception(cpu_env, tcg_constant_i32(excp));
+}
+
+static void gen_illegal_exception(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ gen_exception(dc, EXCP_ILLEGAL);
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static bool check_v1_3(DisasContext *dc)
+{
+ return dc->avr >= 0x01030000;
+}
+
+static bool check_of32s(DisasContext *dc)
+{
+ return dc->cpucfgr & CPUCFGR_OF32S;
+}
+
+static bool check_of64a32s(DisasContext *dc)
+{
+ return dc->cpucfgr & CPUCFGR_OF64A32S;
+}
+
+static TCGv cpu_R(DisasContext *dc, int reg)
+{
+ if (reg == 0) {
+ return dc->R0;
+ } else {
+ return cpu_regs[reg];
+ }
+}
+
+/*
+ * We're about to write to REG. On the off-chance that the user is
+ * writing to R0, re-instate the architectural register.
+ */
+static void check_r0_write(DisasContext *dc, int reg)
+{
+ if (unlikely(reg == 0)) {
+ dc->R0 = cpu_regs[0];
+ }
+}
+
+static void gen_ove_cy(DisasContext *dc)
+{
+ if (dc->tb_flags & SR_OVE) {
+ gen_helper_ove_cy(cpu_env);
+ }
+}
+
+static void gen_ove_ov(DisasContext *dc)
+{
+ if (dc->tb_flags & SR_OVE) {
+ gen_helper_ove_ov(cpu_env);
+ }
+}
+
+static void gen_ove_cyov(DisasContext *dc)
+{
+ if (dc->tb_flags & SR_OVE) {
+ gen_helper_ove_cyov(cpu_env);
+ }
+}
+
+static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv res = tcg_temp_new();
+
+ tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero);
+ tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
+ tcg_gen_xor_tl(t0, res, srcb);
+ tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
+ tcg_temp_free(t0);
+
+ tcg_gen_mov_tl(dest, res);
+ tcg_temp_free(res);
+
+ gen_ove_cyov(dc);
+}
+
+static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv res = tcg_temp_new();
+
+ tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, cpu_sr_cy, dc->zero);
+ tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, dc->zero);
+ tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
+ tcg_gen_xor_tl(t0, res, srcb);
+ tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
+ tcg_temp_free(t0);
+
+ tcg_gen_mov_tl(dest, res);
+ tcg_temp_free(res);
+
+ gen_ove_cyov(dc);
+}
+
+static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ TCGv res = tcg_temp_new();
+
+ tcg_gen_sub_tl(res, srca, srcb);
+ tcg_gen_xor_tl(cpu_sr_cy, srca, srcb);
+ tcg_gen_xor_tl(cpu_sr_ov, res, srcb);
+ tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy);
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb);
+
+ tcg_gen_mov_tl(dest, res);
+ tcg_temp_free(res);
+
+ gen_ove_cyov(dc);
+}
+
+static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
+ tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
+ tcg_temp_free(t0);
+
+ tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
+ gen_ove_ov(dc);
+}
+
+static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0);
+
+ gen_ove_cy(dc);
+}
+
+static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0);
+ /* The result of divide-by-zero is undefined.
+ Supress the host-side exception by dividing by 1. */
+ tcg_gen_or_tl(t0, srcb, cpu_sr_ov);
+ tcg_gen_div_tl(dest, srca, t0);
+ tcg_temp_free(t0);
+
+ tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
+ gen_ove_ov(dc);
+}
+
+static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0);
+ /* The result of divide-by-zero is undefined.
+ Supress the host-side exception by dividing by 1. */
+ tcg_gen_or_tl(t0, srcb, cpu_sr_cy);
+ tcg_gen_divu_tl(dest, srca, t0);
+ tcg_temp_free(t0);
+
+ gen_ove_cy(dc);
+}
+
+static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t1, srca);
+ tcg_gen_ext_tl_i64(t2, srcb);
+ if (TARGET_LONG_BITS == 32) {
+ tcg_gen_mul_i64(cpu_mac, t1, t2);
+ tcg_gen_movi_tl(cpu_sr_ov, 0);
+ } else {
+ TCGv_i64 high = tcg_temp_new_i64();
+
+ tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
+ tcg_gen_sari_i64(t1, cpu_mac, 63);
+ tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
+ tcg_temp_free_i64(high);
+ tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
+ tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
+
+ gen_ove_ov(dc);
+ }
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(t1, srca);
+ tcg_gen_extu_tl_i64(t2, srcb);
+ if (TARGET_LONG_BITS == 32) {
+ tcg_gen_mul_i64(cpu_mac, t1, t2);
+ tcg_gen_movi_tl(cpu_sr_cy, 0);
+ } else {
+ TCGv_i64 high = tcg_temp_new_i64();
+
+ tcg_gen_mulu2_i64(cpu_mac, high, t1, t2);
+ tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0);
+ tcg_gen_trunc_i64_tl(cpu_sr_cy, high);
+ tcg_temp_free_i64(high);
+
+ gen_ove_cy(dc);
+ }
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t1, srca);
+ tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_mul_i64(t1, t1, t2);
+
+ /* Note that overflow is only computed during addition stage. */
+ tcg_gen_xor_i64(t2, cpu_mac, t1);
+ tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
+ tcg_gen_xor_i64(t1, t1, cpu_mac);
+ tcg_gen_andc_i64(t1, t1, t2);
+ tcg_temp_free_i64(t2);
+
+#if TARGET_LONG_BITS == 32
+ tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
+#else
+ tcg_gen_mov_i64(cpu_sr_ov, t1);
+#endif
+ tcg_temp_free_i64(t1);
+
+ gen_ove_ov(dc);
+}
+
+static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(t1, srca);
+ tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_mul_i64(t1, t1, t2);
+ tcg_temp_free_i64(t2);
+
+ /* Note that overflow is only computed during addition stage. */
+ tcg_gen_add_i64(cpu_mac, cpu_mac, t1);
+ tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1);
+ tcg_gen_trunc_i64_tl(cpu_sr_cy, t1);
+ tcg_temp_free_i64(t1);
+
+ gen_ove_cy(dc);
+}
+
+static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t1, srca);
+ tcg_gen_ext_tl_i64(t2, srcb);
+ tcg_gen_mul_i64(t1, t1, t2);
+
+ /* Note that overflow is only computed during subtraction stage. */
+ tcg_gen_xor_i64(t2, cpu_mac, t1);
+ tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
+ tcg_gen_xor_i64(t1, t1, cpu_mac);
+ tcg_gen_and_i64(t1, t1, t2);
+ tcg_temp_free_i64(t2);
+
+#if TARGET_LONG_BITS == 32
+ tcg_gen_extrh_i64_i32(cpu_sr_ov, t1);
+#else
+ tcg_gen_mov_i64(cpu_sr_ov, t1);
+#endif
+ tcg_temp_free_i64(t1);
+
+ gen_ove_ov(dc);
+}
+
+static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(t1, srca);
+ tcg_gen_extu_tl_i64(t2, srcb);
+ tcg_gen_mul_i64(t1, t1, t2);
+
+ /* Note that overflow is only computed during subtraction stage. */
+ tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1);
+ tcg_gen_sub_i64(cpu_mac, cpu_mac, t1);
+ tcg_gen_trunc_i64_tl(cpu_sr_cy, t2);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t1);
+
+ gen_ove_cy(dc);
+}
+
+static bool trans_l_add(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_addc(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sub(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_sub(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_and(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_and_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_or(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_or_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_xor(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_xor_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sll(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_shl_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_srl(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_shr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sra(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_sar_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_ror(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_rotr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_exths(DisasContext *dc, arg_da *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_ext16s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ return true;
+}
+
+static bool trans_l_extbs(DisasContext *dc, arg_da *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_ext8s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ return true;
+}
+
+static bool trans_l_exthz(DisasContext *dc, arg_da *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_ext16u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ return true;
+}
+
+static bool trans_l_extbz(DisasContext *dc, arg_da *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_ext8u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a));
+ return true;
+}
+
+static bool trans_l_cmov(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_ff1(DisasContext *dc, arg_da *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_ctzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), -1);
+ tcg_gen_addi_tl(cpu_R(dc, a->d), cpu_R(dc, a->d), 1);
+ return true;
+}
+
+static bool trans_l_fl1(DisasContext *dc, arg_da *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_clzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS);
+ tcg_gen_subfi_tl(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d));
+ return true;
+}
+
+static bool trans_l_mul(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_mulu(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_mulu(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_div(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_div(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_divu(DisasContext *dc, arg_dab *a)
+{
+ check_r0_write(dc, a->d);
+ gen_divu(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_muld(DisasContext *dc, arg_ab *a)
+{
+ gen_muld(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_muldu(DisasContext *dc, arg_ab *a)
+{
+ gen_muldu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_j(DisasContext *dc, arg_l_j *a)
+{
+ target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
+
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ dc->jmp_pc_imm = tmp_pc;
+ dc->delayed_branch = 2;
+ return true;
+}
+
+static bool trans_l_jal(DisasContext *dc, arg_l_jal *a)
+{
+ target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
+ target_ulong ret_pc = dc->base.pc_next + 8;
+
+ tcg_gen_movi_tl(cpu_regs[9], ret_pc);
+ /* Optimize jal being used to load the PC for PIC. */
+ if (tmp_pc != ret_pc) {
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ dc->jmp_pc_imm = tmp_pc;
+ dc->delayed_branch = 2;
+ }
+ return true;
+}
+
+static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond)
+{
+ target_ulong tmp_pc = dc->base.pc_next + a->n * 4;
+ TCGv t_next = tcg_constant_tl(dc->base.pc_next + 8);
+ TCGv t_true = tcg_constant_tl(tmp_pc);
+
+ tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next);
+ dc->delayed_branch = 2;
+}
+
+static bool trans_l_bf(DisasContext *dc, arg_l_bf *a)
+{
+ do_bf(dc, a, TCG_COND_NE);
+ return true;
+}
+
+static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a)
+{
+ do_bf(dc, a, TCG_COND_EQ);
+ return true;
+}
+
+static bool trans_l_jr(DisasContext *dc, arg_l_jr *a)
+{
+ tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
+ dc->delayed_branch = 2;
+ return true;
+}
+
+static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a)
+{
+ tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b));
+ tcg_gen_movi_tl(cpu_regs[9], dc->base.pc_next + 8);
+ dc->delayed_branch = 2;
+ return true;
+}
+
+static bool trans_l_lwa(DisasContext *dc, arg_load *a)
+{
+ TCGv ea;
+
+ check_r0_write(dc, a->d);
+ ea = tcg_temp_new();
+ tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL);
+ tcg_gen_mov_tl(cpu_lock_addr, ea);
+ tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d));
+ tcg_temp_free(ea);
+ return true;
+}
+
+static void do_load(DisasContext *dc, arg_load *a, MemOp mop)
+{
+ TCGv ea;
+
+ check_r0_write(dc, a->d);
+ ea = tcg_temp_new();
+ tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop);
+ tcg_temp_free(ea);
+}
+
+static bool trans_l_lwz(DisasContext *dc, arg_load *a)
+{
+ do_load(dc, a, MO_TEUL);
+ return true;
+}
+
+static bool trans_l_lws(DisasContext *dc, arg_load *a)
+{
+ do_load(dc, a, MO_TESL);
+ return true;
+}
+
+static bool trans_l_lbz(DisasContext *dc, arg_load *a)
+{
+ do_load(dc, a, MO_UB);
+ return true;
+}
+
+static bool trans_l_lbs(DisasContext *dc, arg_load *a)
+{
+ do_load(dc, a, MO_SB);
+ return true;
+}
+
+static bool trans_l_lhz(DisasContext *dc, arg_load *a)
+{
+ do_load(dc, a, MO_TEUW);
+ return true;
+}
+
+static bool trans_l_lhs(DisasContext *dc, arg_load *a)
+{
+ do_load(dc, a, MO_TESW);
+ return true;
+}
+
+static bool trans_l_swa(DisasContext *dc, arg_store *a)
+{
+ TCGv ea, val;
+ TCGLabel *lab_fail, *lab_done;
+
+ ea = tcg_temp_new();
+ tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i);
+
+ lab_fail = gen_new_label();
+ lab_done = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail);
+ tcg_temp_free(ea);
+
+ val = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value,
+ cpu_R(dc, a->b), dc->mem_idx, MO_TEUL);
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value);
+ tcg_temp_free(val);
+
+ tcg_gen_br(lab_done);
+
+ gen_set_label(lab_fail);
+ tcg_gen_movi_tl(cpu_sr_f, 0);
+
+ gen_set_label(lab_done);
+ tcg_gen_movi_tl(cpu_lock_addr, -1);
+ return true;
+}
+
+static void do_store(DisasContext *dc, arg_store *a, MemOp mop)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i);
+ tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop);
+ tcg_temp_free(t0);
+}
+
+static bool trans_l_sw(DisasContext *dc, arg_store *a)
+{
+ do_store(dc, a, MO_TEUL);
+ return true;
+}
+
+static bool trans_l_sb(DisasContext *dc, arg_store *a)
+{
+ do_store(dc, a, MO_UB);
+ return true;
+}
+
+static bool trans_l_sh(DisasContext *dc, arg_store *a)
+{
+ do_store(dc, a, MO_TEUW);
+ return true;
+}
+
+static bool trans_l_nop(DisasContext *dc, arg_l_nop *a)
+{
+ return true;
+}
+
+static bool trans_l_adrp(DisasContext *dc, arg_l_adrp *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+
+ tcg_gen_movi_i32(cpu_R(dc, a->d),
+ (dc->base.pc_next & TARGET_PAGE_MASK) +
+ ((target_long)a->i << TARGET_PAGE_BITS));
+ return true;
+}
+
+static bool trans_l_addi(DisasContext *dc, arg_rri *a)
+{
+ check_r0_write(dc, a->d);
+ gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ return true;
+}
+
+static bool trans_l_addic(DisasContext *dc, arg_rri *a)
+{
+ check_r0_write(dc, a->d);
+ gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ return true;
+}
+
+static bool trans_l_muli(DisasContext *dc, arg_rri *a)
+{
+ check_r0_write(dc, a->d);
+ gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ return true;
+}
+
+static bool trans_l_maci(DisasContext *dc, arg_l_maci *a)
+{
+ gen_mac(dc, cpu_R(dc, a->a), tcg_constant_tl(a->i));
+ return true;
+}
+
+static bool trans_l_andi(DisasContext *dc, arg_rrk *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_andi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
+ return true;
+}
+
+static bool trans_l_ori(DisasContext *dc, arg_rrk *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_ori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k);
+ return true;
+}
+
+static bool trans_l_xori(DisasContext *dc, arg_rri *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_xori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
+{
+ check_r0_write(dc, a->d);
+
+ if (is_user(dc)) {
+ gen_illegal_exception(dc);
+ } else {
+ TCGv spr = tcg_temp_new();
+
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ if (dc->delayed_branch) {
+ tcg_gen_mov_tl(cpu_pc, jmp_pc);
+ tcg_gen_discard_tl(jmp_pc);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
+ }
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+
+ tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
+ gen_helper_mfspr(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), spr);
+ tcg_temp_free(spr);
+ }
+ return true;
+}
+
+static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
+{
+ if (is_user(dc)) {
+ gen_illegal_exception(dc);
+ } else {
+ TCGv spr;
+
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ /* For SR, we will need to exit the TB to recognize the new
+ * exception state. For NPC, in theory this counts as a branch
+ * (although the SPR only exists for use by an ICE). Save all
+ * of the cpu state first, allowing it to be overwritten.
+ */
+ if (dc->delayed_branch) {
+ tcg_gen_mov_tl(cpu_pc, jmp_pc);
+ tcg_gen_discard_tl(jmp_pc);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4);
+ }
+ dc->base.is_jmp = DISAS_EXIT;
+
+ spr = tcg_temp_new();
+ tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k);
+ gen_helper_mtspr(cpu_env, spr, cpu_R(dc, a->b));
+ tcg_temp_free(spr);
+ }
+ return true;
+}
+
+static bool trans_l_mac(DisasContext *dc, arg_ab *a)
+{
+ gen_mac(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_msb(DisasContext *dc, arg_ab *a)
+{
+ gen_msb(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_macu(DisasContext *dc, arg_ab *a)
+{
+ gen_macu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_msbu(DisasContext *dc, arg_ab *a)
+{
+ gen_msbu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_slli(DisasContext *dc, arg_dal *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_shli_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ a->l & (TARGET_LONG_BITS - 1));
+ return true;
+}
+
+static bool trans_l_srli(DisasContext *dc, arg_dal *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_shri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ a->l & (TARGET_LONG_BITS - 1));
+ return true;
+}
+
+static bool trans_l_srai(DisasContext *dc, arg_dal *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_sari_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ a->l & (TARGET_LONG_BITS - 1));
+ return true;
+}
+
+static bool trans_l_rori(DisasContext *dc, arg_dal *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_rotri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a),
+ a->l & (TARGET_LONG_BITS - 1));
+ return true;
+}
+
+static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_movi_tl(cpu_R(dc, a->d), a->k << 16);
+ return true;
+}
+
+static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a)
+{
+ check_r0_write(dc, a->d);
+ tcg_gen_trunc_i64_tl(cpu_R(dc, a->d), cpu_mac);
+ tcg_gen_movi_i64(cpu_mac, 0);
+ return true;
+}
+
+static bool trans_l_sfeq(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfne(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfltu(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfleu(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfgts(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfges(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sflts(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f,
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfles(DisasContext *dc, arg_ab *a)
+{
+ tcg_gen_setcond_tl(TCG_COND_LE,
+ cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ return true;
+}
+
+static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfnei(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfltui(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfleui(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sflesi(DisasContext *dc, arg_ai *a)
+{
+ tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i);
+ return true;
+}
+
+static bool trans_l_sys(DisasContext *dc, arg_l_sys *a)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ gen_exception(dc, EXCP_SYSCALL);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+static bool trans_l_trap(DisasContext *dc, arg_l_trap *a)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->base.pc_next);
+ gen_exception(dc, EXCP_TRAP);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+static bool trans_l_msync(DisasContext *dc, arg_l_msync *a)
+{
+ tcg_gen_mb(TCG_MO_ALL);
+ return true;
+}
+
+static bool trans_l_psync(DisasContext *dc, arg_l_psync *a)
+{
+ return true;
+}
+
+static bool trans_l_csync(DisasContext *dc, arg_l_csync *a)
+{
+ return true;
+}
+
+static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a)
+{
+ if (is_user(dc)) {
+ gen_illegal_exception(dc);
+ } else {
+ gen_helper_rfe(cpu_env);
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ return true;
+}
+
+static bool do_fp2(DisasContext *dc, arg_da *a,
+ void (*fn)(TCGv, TCGv_env, TCGv))
+{
+ if (!check_of32s(dc)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+ fn(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->a));
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool do_fp3(DisasContext *dc, arg_dab *a,
+ void (*fn)(TCGv, TCGv_env, TCGv, TCGv))
+{
+ if (!check_of32s(dc)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+ fn(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool do_fpcmp(DisasContext *dc, arg_ab *a,
+ void (*fn)(TCGv, TCGv_env, TCGv, TCGv),
+ bool inv, bool swap)
+{
+ if (!check_of32s(dc)) {
+ return false;
+ }
+ if (swap) {
+ fn(cpu_sr_f, cpu_env, cpu_R(dc, a->b), cpu_R(dc, a->a));
+ } else {
+ fn(cpu_sr_f, cpu_env, cpu_R(dc, a->a), cpu_R(dc, a->b));
+ }
+ if (inv) {
+ tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
+ }
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool trans_lf_add_s(DisasContext *dc, arg_dab *a)
+{
+ return do_fp3(dc, a, gen_helper_float_add_s);
+}
+
+static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a)
+{
+ return do_fp3(dc, a, gen_helper_float_sub_s);
+}
+
+static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a)
+{
+ return do_fp3(dc, a, gen_helper_float_mul_s);
+}
+
+static bool trans_lf_div_s(DisasContext *dc, arg_dab *a)
+{
+ return do_fp3(dc, a, gen_helper_float_div_s);
+}
+
+static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a)
+{
+ return do_fp3(dc, a, gen_helper_float_rem_s);
+ return true;
+}
+
+static bool trans_lf_itof_s(DisasContext *dc, arg_da *a)
+{
+ return do_fp2(dc, a, gen_helper_itofs);
+}
+
+static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a)
+{
+ return do_fp2(dc, a, gen_helper_ftois);
+}
+
+static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a)
+{
+ if (!check_of32s(dc)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+ gen_helper_float_madd_s(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d),
+ cpu_R(dc, a->a), cpu_R(dc, a->b));
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a)
+{
+ return do_fpcmp(dc, a, gen_helper_float_eq_s, false, false);
+}
+
+static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a)
+{
+ return do_fpcmp(dc, a, gen_helper_float_eq_s, true, false);
+}
+
+static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a)
+{
+ return do_fpcmp(dc, a, gen_helper_float_lt_s, false, true);
+}
+
+static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a)
+{
+ return do_fpcmp(dc, a, gen_helper_float_le_s, false, true);
+}
+
+static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a)
+{
+ return do_fpcmp(dc, a, gen_helper_float_lt_s, false, false);
+}
+
+static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a)
+{
+ return do_fpcmp(dc, a, gen_helper_float_le_s, false, false);
+}
+
+static bool trans_lf_sfueq_s(DisasContext *dc, arg_ab *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ return do_fpcmp(dc, a, gen_helper_float_ueq_s, false, false);
+}
+
+static bool trans_lf_sfult_s(DisasContext *dc, arg_ab *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ return do_fpcmp(dc, a, gen_helper_float_ult_s, false, false);
+}
+
+static bool trans_lf_sfugt_s(DisasContext *dc, arg_ab *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ return do_fpcmp(dc, a, gen_helper_float_ult_s, false, true);
+}
+
+static bool trans_lf_sfule_s(DisasContext *dc, arg_ab *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ return do_fpcmp(dc, a, gen_helper_float_ule_s, false, false);
+}
+
+static bool trans_lf_sfuge_s(DisasContext *dc, arg_ab *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ return do_fpcmp(dc, a, gen_helper_float_ule_s, false, true);
+}
+
+static bool trans_lf_sfun_s(DisasContext *dc, arg_ab *a)
+{
+ if (!check_v1_3(dc)) {
+ return false;
+ }
+ return do_fpcmp(dc, a, gen_helper_float_un_s, false, false);
+}
+
+static bool check_pair(DisasContext *dc, int r, int p)
+{
+ return r + 1 + p < 32;
+}
+
+static void load_pair(DisasContext *dc, TCGv_i64 t, int r, int p)
+{
+ tcg_gen_concat_i32_i64(t, cpu_R(dc, r + 1 + p), cpu_R(dc, r));
+}
+
+static void save_pair(DisasContext *dc, TCGv_i64 t, int r, int p)
+{
+ tcg_gen_extr_i64_i32(cpu_R(dc, r + 1 + p), cpu_R(dc, r), t);
+}
+
+static bool do_dp3(DisasContext *dc, arg_dab_pair *a,
+ void (*fn)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 t0, t1;
+
+ if (!check_of64a32s(dc) ||
+ !check_pair(dc, a->a, a->ap) ||
+ !check_pair(dc, a->b, a->bp) ||
+ !check_pair(dc, a->d, a->dp)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ load_pair(dc, t0, a->a, a->ap);
+ load_pair(dc, t1, a->b, a->bp);
+ fn(t0, cpu_env, t0, t1);
+ save_pair(dc, t0, a->d, a->dp);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool do_dp2(DisasContext *dc, arg_da_pair *a,
+ void (*fn)(TCGv_i64, TCGv_env, TCGv_i64))
+{
+ TCGv_i64 t0;
+
+ if (!check_of64a32s(dc) ||
+ !check_pair(dc, a->a, a->ap) ||
+ !check_pair(dc, a->d, a->dp)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+
+ t0 = tcg_temp_new_i64();
+ load_pair(dc, t0, a->a, a->ap);
+ fn(t0, cpu_env, t0);
+ save_pair(dc, t0, a->d, a->dp);
+ tcg_temp_free_i64(t0);
+
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a,
+ void (*fn)(TCGv, TCGv_env, TCGv_i64, TCGv_i64),
+ bool inv, bool swap)
+{
+ TCGv_i64 t0, t1;
+
+ if (!check_of64a32s(dc) ||
+ !check_pair(dc, a->a, a->ap) ||
+ !check_pair(dc, a->b, a->bp)) {
+ return false;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ load_pair(dc, t0, a->a, a->ap);
+ load_pair(dc, t1, a->b, a->bp);
+ if (swap) {
+ fn(cpu_sr_f, cpu_env, t1, t0);
+ } else {
+ fn(cpu_sr_f, cpu_env, t0, t1);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ if (inv) {
+ tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1);
+ }
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool trans_lf_add_d(DisasContext *dc, arg_dab_pair *a)
+{
+ return do_dp3(dc, a, gen_helper_float_add_d);
+}
+
+static bool trans_lf_sub_d(DisasContext *dc, arg_dab_pair *a)
+{
+ return do_dp3(dc, a, gen_helper_float_sub_d);
+}
+
+static bool trans_lf_mul_d(DisasContext *dc, arg_dab_pair *a)
+{
+ return do_dp3(dc, a, gen_helper_float_mul_d);
+}
+
+static bool trans_lf_div_d(DisasContext *dc, arg_dab_pair *a)
+{
+ return do_dp3(dc, a, gen_helper_float_div_d);
+}
+
+static bool trans_lf_rem_d(DisasContext *dc, arg_dab_pair *a)
+{
+ return do_dp3(dc, a, gen_helper_float_rem_d);
+}
+
+static bool trans_lf_itof_d(DisasContext *dc, arg_da_pair *a)
+{
+ return do_dp2(dc, a, gen_helper_itofd);
+}
+
+static bool trans_lf_ftoi_d(DisasContext *dc, arg_da_pair *a)
+{
+ return do_dp2(dc, a, gen_helper_ftoid);
+}
+
+static bool trans_lf_stod_d(DisasContext *dc, arg_lf_stod_d *a)
+{
+ TCGv_i64 t0;
+
+ if (!check_of64a32s(dc) ||
+ !check_pair(dc, a->d, a->dp)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+
+ t0 = tcg_temp_new_i64();
+ gen_helper_stod(t0, cpu_env, cpu_R(dc, a->a));
+ save_pair(dc, t0, a->d, a->dp);
+ tcg_temp_free_i64(t0);
+
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool trans_lf_dtos_d(DisasContext *dc, arg_lf_dtos_d *a)
+{
+ TCGv_i64 t0;
+
+ if (!check_of64a32s(dc) ||
+ !check_pair(dc, a->a, a->ap)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+
+ t0 = tcg_temp_new_i64();
+ load_pair(dc, t0, a->a, a->ap);
+ gen_helper_dtos(cpu_R(dc, a->d), cpu_env, t0);
+ tcg_temp_free_i64(t0);
+
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool trans_lf_madd_d(DisasContext *dc, arg_dab_pair *a)
+{
+ TCGv_i64 t0, t1, t2;
+
+ if (!check_of64a32s(dc) ||
+ !check_pair(dc, a->a, a->ap) ||
+ !check_pair(dc, a->b, a->bp) ||
+ !check_pair(dc, a->d, a->dp)) {
+ return false;
+ }
+ check_r0_write(dc, a->d);
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ load_pair(dc, t0, a->d, a->dp);
+ load_pair(dc, t1, a->a, a->ap);
+ load_pair(dc, t2, a->b, a->bp);
+ gen_helper_float_madd_d(t0, cpu_env, t0, t1, t2);
+ save_pair(dc, t0, a->d, a->dp);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+
+ gen_helper_update_fpcsr(cpu_env);
+ return true;
+}
+
+static bool trans_lf_sfeq_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_eq_d, false, false);
+}
+
+static bool trans_lf_sfne_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_eq_d, true, false);
+}
+
+static bool trans_lf_sfgt_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_lt_d, false, true);
+}
+
+static bool trans_lf_sfge_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_le_d, false, true);
+}
+
+static bool trans_lf_sflt_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_lt_d, false, false);
+}
+
+static bool trans_lf_sfle_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_le_d, false, false);
+}
+
+static bool trans_lf_sfueq_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_ueq_d, false, false);
+}
+
+static bool trans_lf_sfule_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_ule_d, false, false);
+}
+
+static bool trans_lf_sfuge_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_ule_d, false, true);
+}
+
+static bool trans_lf_sfult_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_ult_d, false, false);
+}
+
+static bool trans_lf_sfugt_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_ult_d, false, true);
+}
+
+static bool trans_lf_sfun_d(DisasContext *dc, arg_ab_pair *a)
+{
+ return do_dpcmp(dc, a, gen_helper_float_un_d, false, false);
+}
+
+static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcb, DisasContext, base);
+ CPUOpenRISCState *env = cs->env_ptr;
+ int bound;
+
+ dc->mem_idx = cpu_mmu_index(env, false);
+ dc->tb_flags = dc->base.tb->flags;
+ dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0;
+ dc->cpucfgr = env->cpucfgr;
+ dc->avr = env->avr;
+ dc->jmp_pc_imm = -1;
+
+ bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
+ dc->base.max_insns = MIN(dc->base.max_insns, bound);
+}
+
+static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs)
+{
+ DisasContext *dc = container_of(db, DisasContext, base);
+
+ /* Allow the TCG optimizer to see that R0 == 0,
+ when it's true, which is the common case. */
+ dc->zero = tcg_constant_tl(0);
+ if (dc->tb_flags & TB_FLAGS_R0_0) {
+ dc->R0 = dc->zero;
+ } else {
+ dc->R0 = cpu_regs[0];
+ }
+}
+
+static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0)
+ | (dc->base.num_insns > 1 ? 2 : 0));
+}
+
+static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next);
+
+ if (!decode(dc, insn)) {
+ gen_illegal_exception(dc);
+ }
+ dc->base.pc_next += 4;
+
+ /* When exiting the delay slot normally, exit via jmp_pc.
+ * For DISAS_NORETURN, we have raised an exception and already exited.
+ * For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing
+ * in the manual saying this is illegal, but it surely it should.
+ * At least or1ksim overrides pcnext and ignores the branch.
+ */
+ if (dc->delayed_branch
+ && --dc->delayed_branch == 0
+ && dc->base.is_jmp == DISAS_NEXT) {
+ dc->base.is_jmp = DISAS_JUMP;
+ }
+}
+
+static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ target_ulong jmp_dest;
+
+ /* If we have already exited the TB, nothing following has effect. */
+ if (dc->base.is_jmp == DISAS_NORETURN) {
+ return;
+ }
+
+ /* Adjust the delayed branch state for the next TB. */
+ if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) {
+ tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0);
+ }
+
+ /* For DISAS_TOO_MANY, jump to the next insn. */
+ jmp_dest = dc->base.pc_next;
+ tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4);
+
+ switch (dc->base.is_jmp) {
+ case DISAS_JUMP:
+ jmp_dest = dc->jmp_pc_imm;
+ if (jmp_dest == -1) {
+ /* The jump destination is indirect/computed; use jmp_pc. */
+ tcg_gen_mov_tl(cpu_pc, jmp_pc);
+ tcg_gen_discard_tl(jmp_pc);
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ }
+ /* The jump destination is direct; use jmp_pc_imm.
+ However, we will have stored into jmp_pc as well;
+ we know now that it wasn't needed. */
+ tcg_gen_discard_tl(jmp_pc);
+ /* fallthru */
+
+ case DISAS_TOO_MANY:
+ if (translator_use_goto_tb(&dc->base, jmp_dest)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_tl(cpu_pc, jmp_dest);
+ tcg_gen_exit_tb(dc->base.tb, 0);
+ break;
+ }
+ tcg_gen_movi_tl(cpu_pc, jmp_dest);
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *s = container_of(dcbase, DisasContext, base);
+
+ qemu_log("IN: %s\n", lookup_symbol(s->base.pc_first));
+ log_target_disas(cs, s->base.pc_first, s->base.tb->size);
+}
+
+static const TranslatorOps openrisc_tr_ops = {
+ .init_disas_context = openrisc_tr_init_disas_context,
+ .tb_start = openrisc_tr_tb_start,
+ .insn_start = openrisc_tr_insn_start,
+ .translate_insn = openrisc_tr_translate_insn,
+ .tb_stop = openrisc_tr_tb_stop,
+ .disas_log = openrisc_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "PC=%08x\n", env->pc);
+ for (i = 0; i < 32; ++i) {
+ qemu_fprintf(f, "R%02d=%08x%c", i, cpu_get_gpr(env, i),
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+}
+
+void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+ env->dflag = data[1] & 1;
+ if (data[1] & 2) {
+ env->ppc = env->pc - 4;
+ }
+}
diff --git a/target/ppc/Kconfig b/target/ppc/Kconfig
new file mode 100644
index 000000000..3ff152051
--- /dev/null
+++ b/target/ppc/Kconfig
@@ -0,0 +1,5 @@
+config PPC
+ bool
+
+config PPC64
+ bool
diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c
new file mode 100644
index 000000000..bb392f6d8
--- /dev/null
+++ b/target/ppc/arch_dump.c
@@ -0,0 +1,309 @@
+/*
+ * writing ELF notes for ppc{64,} arch
+ *
+ *
+ * Copyright IBM, Corp. 2013
+ *
+ * Authors:
+ * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+#include "sysemu/kvm.h"
+
+#ifdef TARGET_PPC64
+#define ELFCLASS ELFCLASS64
+#define cpu_to_dump_reg cpu_to_dump64
+typedef uint64_t reg_t;
+typedef Elf64_Nhdr Elf_Nhdr;
+#else
+#define ELFCLASS ELFCLASS32
+#define cpu_to_dump_reg cpu_to_dump32
+typedef uint32_t reg_t;
+typedef Elf32_Nhdr Elf_Nhdr;
+#endif /* TARGET_PPC64 */
+
+struct PPCUserRegStruct {
+ reg_t gpr[32];
+ reg_t nip;
+ reg_t msr;
+ reg_t orig_gpr3;
+ reg_t ctr;
+ reg_t link;
+ reg_t xer;
+ reg_t ccr;
+ reg_t softe;
+ reg_t trap;
+ reg_t dar;
+ reg_t dsisr;
+ reg_t result;
+} QEMU_PACKED;
+
+struct PPCElfPrstatus {
+ char pad1[112];
+ struct PPCUserRegStruct pr_reg;
+ char pad2[40];
+} QEMU_PACKED;
+
+
+struct PPCElfFpregset {
+ uint64_t fpr[32];
+ reg_t fpscr;
+} QEMU_PACKED;
+
+
+struct PPCElfVmxregset {
+ ppc_avr_t avr[32];
+ ppc_avr_t vscr;
+ union {
+ ppc_avr_t unused;
+ uint32_t value;
+ } vrsave;
+} QEMU_PACKED;
+
+struct PPCElfVsxregset {
+ uint64_t vsr[32];
+} QEMU_PACKED;
+
+struct PPCElfSperegset {
+ uint32_t evr[32];
+ uint64_t spe_acc;
+ uint32_t spe_fscr;
+} QEMU_PACKED;
+
+typedef struct noteStruct {
+ Elf_Nhdr hdr;
+ char name[5];
+ char pad3[3];
+ union {
+ struct PPCElfPrstatus prstatus;
+ struct PPCElfFpregset fpregset;
+ struct PPCElfVmxregset vmxregset;
+ struct PPCElfVsxregset vsxregset;
+ struct PPCElfSperegset speregset;
+ } contents;
+} QEMU_PACKED Note;
+
+typedef struct NoteFuncArg {
+ Note note;
+ DumpState *state;
+} NoteFuncArg;
+
+static void ppc_write_elf_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ reg_t cr;
+ struct PPCElfPrstatus *prstatus;
+ struct PPCUserRegStruct *reg;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PRSTATUS);
+
+ prstatus = &note->contents.prstatus;
+ memset(prstatus, 0, sizeof(*prstatus));
+ reg = &prstatus->pr_reg;
+
+ for (i = 0; i < 32; i++) {
+ reg->gpr[i] = cpu_to_dump_reg(s, cpu->env.gpr[i]);
+ }
+ reg->nip = cpu_to_dump_reg(s, cpu->env.nip);
+ reg->msr = cpu_to_dump_reg(s, cpu->env.msr);
+ reg->ctr = cpu_to_dump_reg(s, cpu->env.ctr);
+ reg->link = cpu_to_dump_reg(s, cpu->env.lr);
+ reg->xer = cpu_to_dump_reg(s, cpu_read_xer(&cpu->env));
+
+ cr = 0;
+ for (i = 0; i < 8; i++) {
+ cr |= (cpu->env.crf[i] & 15) << (4 * (7 - i));
+ }
+ reg->ccr = cpu_to_dump_reg(s, cr);
+}
+
+static void ppc_write_elf_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ struct PPCElfFpregset *fpregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PRFPREG);
+
+ fpregset = &note->contents.fpregset;
+ memset(fpregset, 0, sizeof(*fpregset));
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
+ fpregset->fpr[i] = cpu_to_dump64(s, *fpr);
+ }
+ fpregset->fpscr = cpu_to_dump_reg(s, cpu->env.fpscr);
+}
+
+static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ struct PPCElfVmxregset *vmxregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PPC_VMX);
+ vmxregset = &note->contents.vmxregset;
+ memset(vmxregset, 0, sizeof(*vmxregset));
+
+ for (i = 0; i < 32; i++) {
+ bool needs_byteswap;
+ ppc_avr_t *avr = cpu_avr_ptr(&cpu->env, i);
+
+#ifdef HOST_WORDS_BIGENDIAN
+ needs_byteswap = s->dump_info.d_endian == ELFDATA2LSB;
+#else
+ needs_byteswap = s->dump_info.d_endian == ELFDATA2MSB;
+#endif
+
+ if (needs_byteswap) {
+ vmxregset->avr[i].u64[0] = bswap64(avr->u64[1]);
+ vmxregset->avr[i].u64[1] = bswap64(avr->u64[0]);
+ } else {
+ vmxregset->avr[i].u64[0] = avr->u64[0];
+ vmxregset->avr[i].u64[1] = avr->u64[1];
+ }
+ }
+ vmxregset->vscr.u32[3] = cpu_to_dump32(s, ppc_get_vscr(&cpu->env));
+}
+
+static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ struct PPCElfVsxregset *vsxregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PPC_VSX);
+ vsxregset = &note->contents.vsxregset;
+ memset(vsxregset, 0, sizeof(*vsxregset));
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
+ vsxregset->vsr[i] = cpu_to_dump64(s, *vsrl);
+ }
+}
+
+static void ppc_write_elf_speregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ struct PPCElfSperegset *speregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PPC_SPE);
+ speregset = &note->contents.speregset;
+ memset(speregset, 0, sizeof(*speregset));
+
+ speregset->spe_acc = cpu_to_dump64(s, cpu->env.spe_acc);
+ speregset->spe_fscr = cpu_to_dump32(s, cpu->env.spe_fscr);
+}
+
+static const struct NoteFuncDescStruct {
+ int contents_size;
+ void (*note_contents_func)(NoteFuncArg *arg, PowerPCCPU *cpu);
+} note_func[] = {
+ {sizeof_field(Note, contents.prstatus), ppc_write_elf_prstatus},
+ {sizeof_field(Note, contents.fpregset), ppc_write_elf_fpregset},
+ {sizeof_field(Note, contents.vmxregset), ppc_write_elf_vmxregset},
+ {sizeof_field(Note, contents.vsxregset), ppc_write_elf_vsxregset},
+ {sizeof_field(Note, contents.speregset), ppc_write_elf_speregset},
+ { 0, NULL}
+};
+
+typedef struct NoteFuncDescStruct NoteFuncDesc;
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const struct GuestPhysBlockList *guest_phys_blocks)
+{
+ PowerPCCPU *cpu;
+
+ if (first_cpu == NULL) {
+ return -1;
+ }
+
+ cpu = POWERPC_CPU(first_cpu);
+
+ info->d_machine = PPC_ELF_MACHINE;
+ info->d_class = ELFCLASS;
+
+ if (ppc_interrupts_little_endian(cpu)) {
+ info->d_endian = ELFDATA2LSB;
+ } else {
+ info->d_endian = ELFDATA2MSB;
+ }
+ /* 64KB is the max page size for pseries kernel */
+ if (strncmp(object_get_typename(qdev_get_machine()),
+ "pseries-", 8) == 0) {
+ info->page_size = (1U << 16);
+ }
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ int name_size = 8; /* "CORE" or "QEMU" rounded */
+ size_t elf_note_size = 0;
+ int note_head_size;
+ const NoteFuncDesc *nf;
+
+ note_head_size = sizeof(Elf_Nhdr);
+ for (nf = note_func; nf->note_contents_func; nf++) {
+ elf_note_size = elf_note_size + note_head_size + name_size +
+ nf->contents_size;
+ }
+
+ return (elf_note_size) * nr_cpus;
+}
+
+static int ppc_write_all_elf_notes(const char *note_name,
+ WriteCoreDumpFunction f,
+ PowerPCCPU *cpu, int id,
+ void *opaque)
+{
+ NoteFuncArg arg = { .state = opaque };
+ int ret = -1;
+ int note_size;
+ const NoteFuncDesc *nf;
+
+ for (nf = note_func; nf->note_contents_func; nf++) {
+ arg.note.hdr.n_namesz = cpu_to_dump32(opaque, sizeof(arg.note.name));
+ arg.note.hdr.n_descsz = cpu_to_dump32(opaque, nf->contents_size);
+ strncpy(arg.note.name, note_name, sizeof(arg.note.name));
+
+ (*nf->note_contents_func)(&arg, cpu);
+
+ note_size =
+ sizeof(arg.note) - sizeof(arg.note.contents) + nf->contents_size;
+ ret = f(&arg.note, note_size, opaque);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque);
+}
+
+int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque);
+}
diff --git a/target/ppc/compat.c b/target/ppc/compat.c
new file mode 100644
index 000000000..7949a24f5
--- /dev/null
+++ b/target/ppc/compat.c
@@ -0,0 +1,327 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright 2016, David Gibson, Red Hat Inc. <dgibson@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "sysemu/cpus.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "cpu-models.h"
+
+typedef struct {
+ const char *name;
+ uint32_t pvr;
+ uint64_t pcr;
+ uint64_t pcr_level;
+
+ /*
+ * Maximum allowed virtual threads per virtual core
+ *
+ * This is to stop older guests getting confused by seeing more
+ * threads than they think the cpu can support. Usually it's
+ * equal to the number of threads supported on bare metal
+ * hardware, but not always (see POWER9).
+ */
+ int max_vthreads;
+} CompatInfo;
+
+static const CompatInfo compat_table[] = {
+ /*
+ * Ordered from oldest to newest - the code relies on this
+ */
+ { /* POWER6, ISA2.05 */
+ .name = "power6",
+ .pvr = CPU_POWERPC_LOGICAL_2_05,
+ .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 |
+ PCR_COMPAT_2_06 | PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS,
+ .pcr_level = PCR_COMPAT_2_05,
+ .max_vthreads = 2,
+ },
+ { /* POWER7, ISA2.06 */
+ .name = "power7",
+ .pvr = CPU_POWERPC_LOGICAL_2_06,
+ .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 |
+ PCR_COMPAT_2_06 | PCR_TM_DIS,
+ .pcr_level = PCR_COMPAT_2_06,
+ .max_vthreads = 4,
+ },
+ {
+ .name = "power7+",
+ .pvr = CPU_POWERPC_LOGICAL_2_06_PLUS,
+ .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 |
+ PCR_COMPAT_2_06 | PCR_TM_DIS,
+ .pcr_level = PCR_COMPAT_2_06,
+ .max_vthreads = 4,
+ },
+ { /* POWER8, ISA2.07 */
+ .name = "power8",
+ .pvr = CPU_POWERPC_LOGICAL_2_07,
+ .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07,
+ .pcr_level = PCR_COMPAT_2_07,
+ .max_vthreads = 8,
+ },
+ { /* POWER9, ISA3.00 */
+ .name = "power9",
+ .pvr = CPU_POWERPC_LOGICAL_3_00,
+ .pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00,
+ .pcr_level = PCR_COMPAT_3_00,
+ /*
+ * POWER9 hardware only supports 4 threads / core, but this
+ * limit is for guests. We need to support 8 vthreads/vcore
+ * on POWER9 for POWER8 compatibility guests, and it's very
+ * confusing if half of the threads disappear from the guest
+ * if it announces it's POWER9 aware at CAS time.
+ */
+ .max_vthreads = 8,
+ },
+ { /* POWER10, ISA3.10 */
+ .name = "power10",
+ .pvr = CPU_POWERPC_LOGICAL_3_10,
+ .pcr = PCR_COMPAT_3_10,
+ .pcr_level = PCR_COMPAT_3_10,
+ .max_vthreads = 8,
+ },
+};
+
+static const CompatInfo *compat_by_pvr(uint32_t pvr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(compat_table); i++) {
+ if (compat_table[i].pvr == pvr) {
+ return &compat_table[i];
+ }
+ }
+ return NULL;
+}
+
+static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr,
+ uint32_t min_compat_pvr, uint32_t max_compat_pvr)
+{
+ const CompatInfo *compat = compat_by_pvr(compat_pvr);
+ const CompatInfo *min = compat_by_pvr(min_compat_pvr);
+ const CompatInfo *max = compat_by_pvr(max_compat_pvr);
+
+ g_assert(!min_compat_pvr || min);
+ g_assert(!max_compat_pvr || max);
+
+ if (!compat) {
+ /* Not a recognized logical PVR */
+ return false;
+ }
+ if ((min && (compat < min)) || (max && (compat > max))) {
+ /* Outside specified range */
+ return false;
+ }
+ if (!(pcc->pcr_supported & compat->pcr_level)) {
+ /* Not supported by this CPU */
+ return false;
+ }
+ return true;
+}
+
+bool ppc_check_compat(PowerPCCPU *cpu, uint32_t compat_pvr,
+ uint32_t min_compat_pvr, uint32_t max_compat_pvr)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+#if !defined(CONFIG_USER_ONLY)
+ g_assert(cpu->vhyp);
+#endif
+
+ return pcc_compat(pcc, compat_pvr, min_compat_pvr, max_compat_pvr);
+}
+
+bool ppc_type_check_compat(const char *cputype, uint32_t compat_pvr,
+ uint32_t min_compat_pvr, uint32_t max_compat_pvr)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(object_class_by_name(cputype));
+ return pcc_compat(pcc, compat_pvr, min_compat_pvr, max_compat_pvr);
+}
+
+int ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp)
+{
+ const CompatInfo *compat = compat_by_pvr(compat_pvr);
+ CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ uint64_t pcr;
+
+ if (!compat_pvr) {
+ pcr = 0;
+ } else if (!compat) {
+ error_setg(errp, "Unknown compatibility PVR 0x%08"PRIx32, compat_pvr);
+ return -EINVAL;
+ } else if (!ppc_check_compat(cpu, compat_pvr, 0, 0)) {
+ error_setg(errp, "Compatibility PVR 0x%08"PRIx32" not valid for CPU",
+ compat_pvr);
+ return -EINVAL;
+ } else {
+ pcr = compat->pcr;
+ }
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (kvm_enabled() && cpu->compat_pvr != compat_pvr) {
+ int ret = kvmppc_set_compat(cpu, compat_pvr);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "Unable to set CPU compatibility mode in KVM");
+ return ret;
+ }
+ }
+
+ cpu->compat_pvr = compat_pvr;
+ env->spr[SPR_PCR] = pcr & pcc->pcr_mask;
+ return 0;
+}
+
+typedef struct {
+ uint32_t compat_pvr;
+ Error **errp;
+ int ret;
+} SetCompatState;
+
+static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ SetCompatState *s = arg.host_ptr;
+
+ s->ret = ppc_set_compat(cpu, s->compat_pvr, s->errp);
+}
+
+int ppc_set_compat_all(uint32_t compat_pvr, Error **errp)
+{
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ SetCompatState s = {
+ .compat_pvr = compat_pvr,
+ .errp = errp,
+ .ret = 0,
+ };
+
+ run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));
+
+ if (s.ret < 0) {
+ return s.ret;
+ }
+ }
+
+ return 0;
+}
+
+int ppc_compat_max_vthreads(PowerPCCPU *cpu)
+{
+ const CompatInfo *compat = compat_by_pvr(cpu->compat_pvr);
+ int n_threads = CPU(cpu)->nr_threads;
+
+ if (cpu->compat_pvr) {
+ g_assert(compat);
+ n_threads = MIN(n_threads, compat->max_vthreads);
+ }
+
+ return n_threads;
+}
+
+static void ppc_compat_prop_get(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ uint32_t compat_pvr = *((uint32_t *)opaque);
+ const char *value;
+
+ if (!compat_pvr) {
+ value = "";
+ } else {
+ const CompatInfo *compat = compat_by_pvr(compat_pvr);
+
+ g_assert(compat);
+
+ value = compat->name;
+ }
+
+ visit_type_str(v, name, (char **)&value, errp);
+}
+
+static void ppc_compat_prop_set(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ char *value;
+ uint32_t compat_pvr;
+
+ if (!visit_type_str(v, name, &value, errp)) {
+ return;
+ }
+
+ if (strcmp(value, "") == 0) {
+ compat_pvr = 0;
+ } else {
+ int i;
+ const CompatInfo *compat = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(compat_table); i++) {
+ if (strcmp(value, compat_table[i].name) == 0) {
+ compat = &compat_table[i];
+ break;
+
+ }
+ }
+
+ if (!compat) {
+ error_setg(errp, "Invalid compatibility mode \"%s\"", value);
+ goto out;
+ }
+ compat_pvr = compat->pvr;
+ }
+
+ *((uint32_t *)opaque) = compat_pvr;
+
+out:
+ g_free(value);
+}
+
+void ppc_compat_add_property(Object *obj, const char *name,
+ uint32_t *compat_pvr, const char *basedesc)
+{
+ gchar *namesv[ARRAY_SIZE(compat_table) + 1];
+ gchar *names, *desc;
+ int i;
+
+ object_property_add(obj, name, "string",
+ ppc_compat_prop_get, ppc_compat_prop_set, NULL,
+ compat_pvr);
+
+ for (i = 0; i < ARRAY_SIZE(compat_table); i++) {
+ /*
+ * Have to discard const here, because g_strjoinv() takes
+ * (gchar **), not (const gchar **) :(
+ */
+ namesv[i] = (gchar *)compat_table[i].name;
+ }
+ namesv[ARRAY_SIZE(compat_table)] = NULL;
+
+ names = g_strjoinv(", ", namesv);
+ desc = g_strdup_printf("%s. Valid values are %s.", basedesc, names);
+ object_property_set_description(obj, name, desc);
+
+ g_free(names);
+ g_free(desc);
+}
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
new file mode 100644
index 000000000..4baa11171
--- /dev/null
+++ b/target/ppc/cpu-models.c
@@ -0,0 +1,968 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/module.h"
+#include "cpu-models.h"
+
+#if defined(CONFIG_USER_ONLY)
+#define TODO_USER_ONLY 1
+#endif
+
+/***************************************************************************/
+/* PowerPC CPU definitions */
+#define POWERPC_DEF_PREFIX(pvr, svr, type) \
+ glue(glue(glue(glue(pvr, _), svr), _), type)
+#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \
+ static void \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \
+ (ObjectClass *oc, void *data) \
+ { \
+ DeviceClass *dc = DEVICE_CLASS(oc); \
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \
+ \
+ pcc->pvr = _pvr; \
+ pcc->svr = _svr; \
+ dc->desc = _desc; \
+ } \
+ \
+ static const TypeInfo \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_type_info) = { \
+ .name = POWERPC_CPU_TYPE_NAME(_name), \
+ .parent = stringify(_type) "-family-" TYPE_POWERPC_CPU, \
+ .class_init = \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init), \
+ }; \
+ \
+ static void \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_register_types)(void) \
+ { \
+ type_register_static( \
+ &glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_type_info)); \
+ } \
+ \
+ type_init( \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_register_types))
+
+#define POWERPC_DEF(_name, _pvr, _type, _desc) \
+ POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type)
+
+ /* Embedded PowerPC */
+ /* PowerPC 401 family */
+ POWERPC_DEF("401", CPU_POWERPC_401, 401,
+ "Generic PowerPC 401")
+ /* PowerPC 401 cores */
+ POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401,
+ "PowerPC 401A1")
+ POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2,
+ "PowerPC 401B2")
+ POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2,
+ "PowerPC 401C2")
+ POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2,
+ "PowerPC 401D2")
+ POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2,
+ "PowerPC 401E2")
+ POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2,
+ "PowerPC 401F2")
+ /* XXX: to be checked */
+ POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2,
+ "PowerPC 401G2")
+ /* PowerPC 401 microcontrollers */
+ POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480,
+ "IOP480 (401 microcontroller)")
+ POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401,
+ "IBM Processor for Network Resources")
+ /* PowerPC 403 family */
+ /* PowerPC 403 microcontrollers */
+ POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403,
+ "PowerPC 403 GA")
+ POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403,
+ "PowerPC 403 GB")
+ POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403,
+ "PowerPC 403 GC")
+ POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX,
+ "PowerPC 403 GCX")
+ /* PowerPC 405 family */
+ /* PowerPC 405 cores */
+ POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405,
+ "PowerPC 405 D2")
+ POWERPC_DEF("405d4", CPU_POWERPC_405D4, 405,
+ "PowerPC 405 D4")
+ /* PowerPC 405 microcontrollers */
+ POWERPC_DEF("405cra", CPU_POWERPC_405CRa, 405,
+ "PowerPC 405 CRa")
+ POWERPC_DEF("405crb", CPU_POWERPC_405CRb, 405,
+ "PowerPC 405 CRb")
+ POWERPC_DEF("405crc", CPU_POWERPC_405CRc, 405,
+ "PowerPC 405 CRc")
+ POWERPC_DEF("405ep", CPU_POWERPC_405EP, 405,
+ "PowerPC 405 EP")
+ POWERPC_DEF("405ez", CPU_POWERPC_405EZ, 405,
+ "PowerPC 405 EZ")
+ POWERPC_DEF("405gpa", CPU_POWERPC_405GPa, 405,
+ "PowerPC 405 GPa")
+ POWERPC_DEF("405gpb", CPU_POWERPC_405GPb, 405,
+ "PowerPC 405 GPb")
+ POWERPC_DEF("405gpc", CPU_POWERPC_405GPc, 405,
+ "PowerPC 405 GPc")
+ POWERPC_DEF("405gpd", CPU_POWERPC_405GPd, 405,
+ "PowerPC 405 GPd")
+ POWERPC_DEF("405gpr", CPU_POWERPC_405GPR, 405,
+ "PowerPC 405 GPR")
+ POWERPC_DEF("405lp", CPU_POWERPC_405LP, 405,
+ "PowerPC 405 LP")
+ POWERPC_DEF("npe405h", CPU_POWERPC_NPE405H, 405,
+ "Npe405 H")
+ POWERPC_DEF("npe405h2", CPU_POWERPC_NPE405H2, 405,
+ "Npe405 H2")
+ POWERPC_DEF("npe405l", CPU_POWERPC_NPE405L, 405,
+ "Npe405 L")
+ POWERPC_DEF("npe4gs3", CPU_POWERPC_NPE4GS3, 405,
+ "Npe4GS3")
+ /* PowerPC 401/403/405 based set-top-box microcontrollers */
+ POWERPC_DEF("stb03", CPU_POWERPC_STB03, 405,
+ "STB03xx")
+ POWERPC_DEF("stb04", CPU_POWERPC_STB04, 405,
+ "STB04xx")
+ POWERPC_DEF("stb25", CPU_POWERPC_STB25, 405,
+ "STB25xx")
+ /* Xilinx PowerPC 405 cores */
+ POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405,
+ NULL)
+ POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405,
+ NULL)
+ /* PowerPC 440 family */
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440", CPU_POWERPC_440, 440GP,
+ "Generic PowerPC 440")
+#endif
+ /* PowerPC 440 cores */
+ POWERPC_DEF("440-xilinx", CPU_POWERPC_440_XILINX, 440x5,
+ "PowerPC 440 Xilinx 5")
+
+ POWERPC_DEF("440-xilinx-w-dfpu", CPU_POWERPC_440_XILINX, 440x5wDFPU,
+ "PowerPC 440 Xilinx 5 With a Double Prec. FPU")
+ /* PowerPC 440 microcontrollers */
+ POWERPC_DEF("440epa", CPU_POWERPC_440EPa, 440EP,
+ "PowerPC 440 EPa")
+ POWERPC_DEF("440epb", CPU_POWERPC_440EPb, 440EP,
+ "PowerPC 440 EPb")
+ POWERPC_DEF("440epx", CPU_POWERPC_440EPX, 440EP,
+ "PowerPC 440 EPX")
+ POWERPC_DEF("460exb", CPU_POWERPC_460EXb, 460EX,
+ "PowerPC 460 EXb")
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gpb", CPU_POWERPC_440GPb, 440GP,
+ "PowerPC 440 GPb")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gpc", CPU_POWERPC_440GPc, 440GP,
+ "PowerPC 440 GPc")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gra", CPU_POWERPC_440GRa, 440x5,
+ "PowerPC 440 GRa")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440grx", CPU_POWERPC_440GRX, 440x5,
+ "PowerPC 440 GRX")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gxa", CPU_POWERPC_440GXa, 440EP,
+ "PowerPC 440 GXa")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gxb", CPU_POWERPC_440GXb, 440EP,
+ "PowerPC 440 GXb")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gxc", CPU_POWERPC_440GXc, 440EP,
+ "PowerPC 440 GXc")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440gxf", CPU_POWERPC_440GXf, 440EP,
+ "PowerPC 440 GXf")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440sp", CPU_POWERPC_440SP, 440EP,
+ "PowerPC 440 SP")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440sp2", CPU_POWERPC_440SP2, 440EP,
+ "PowerPC 440 SP2")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440spe", CPU_POWERPC_440SPE, 440EP,
+ "PowerPC 440 SPE")
+#endif
+ /* Freescale embedded PowerPC cores */
+ /* MPC5xx family (aka RCPU) */
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("mpc5xx", CPU_POWERPC_MPC5xx, MPC5xx,
+ "Generic MPC5xx core")
+#endif
+ /* MPC8xx family (aka PowerQUICC) */
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("mpc8xx", CPU_POWERPC_MPC8xx, MPC8xx,
+ "Generic MPC8xx core")
+#endif
+ /* MPC82xx family (aka PowerQUICC-II) */
+ POWERPC_DEF("g2", CPU_POWERPC_G2, G2,
+ "PowerPC G2 core")
+ POWERPC_DEF("g2h4", CPU_POWERPC_G2H4, G2,
+ "PowerPC G2 H4 core")
+ POWERPC_DEF("g2gp", CPU_POWERPC_G2gp, G2,
+ "PowerPC G2 GP core")
+ POWERPC_DEF("g2ls", CPU_POWERPC_G2ls, G2,
+ "PowerPC G2 LS core")
+ POWERPC_DEF("g2hip3", CPU_POWERPC_G2_HIP3, G2,
+ "PowerPC G2 HiP3 core")
+ POWERPC_DEF("g2hip4", CPU_POWERPC_G2_HIP4, G2,
+ "PowerPC G2 HiP4 core")
+ POWERPC_DEF("mpc603", CPU_POWERPC_MPC603, 603E,
+ "PowerPC MPC603 core")
+ POWERPC_DEF("g2le", CPU_POWERPC_G2LE, G2LE,
+ "PowerPC G2le core (same as G2 plus little-endian mode support)")
+ POWERPC_DEF("g2legp", CPU_POWERPC_G2LEgp, G2LE,
+ "PowerPC G2LE GP core")
+ POWERPC_DEF("g2lels", CPU_POWERPC_G2LEls, G2LE,
+ "PowerPC G2LE LS core")
+ POWERPC_DEF("g2legp1", CPU_POWERPC_G2LEgp1, G2LE,
+ "PowerPC G2LE GP1 core")
+ POWERPC_DEF("g2legp3", CPU_POWERPC_G2LEgp3, G2LE,
+ "PowerPC G2LE GP3 core")
+ /* PowerPC G2 microcontrollers */
+ POWERPC_DEF_SVR("mpc5200_v10", "MPC5200 v1.0",
+ CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE)
+ POWERPC_DEF_SVR("mpc5200_v11", "MPC5200 v1.1",
+ CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE)
+ POWERPC_DEF_SVR("mpc5200_v12", "MPC5200 v1.2",
+ CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE)
+ POWERPC_DEF_SVR("mpc5200b_v20", "MPC5200B v2.0",
+ CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE)
+ POWERPC_DEF_SVR("mpc5200b_v21", "MPC5200B v2.1",
+ CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE)
+ /* e200 family */
+ POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200,
+ "PowerPC e200z5 core")
+ POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200,
+ "PowerPC e200z6 core")
+ /* e300 family */
+ POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300,
+ "PowerPC e300c1 core")
+ POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300,
+ "PowerPC e300c2 core")
+ POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300,
+ "PowerPC e300c3 core")
+ POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300,
+ "PowerPC e300c4 core")
+ /* PowerPC e300 microcontrollers */
+ POWERPC_DEF_SVR("mpc8343", "MPC8343",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300)
+ POWERPC_DEF_SVR("mpc8343a", "MPC8343A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300)
+ POWERPC_DEF_SVR("mpc8343e", "MPC8343E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300)
+ POWERPC_DEF_SVR("mpc8343ea", "MPC8343EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300)
+ POWERPC_DEF_SVR("mpc8347t", "MPC8347T",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300)
+ POWERPC_DEF_SVR("mpc8347p", "MPC8347P",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300)
+ POWERPC_DEF_SVR("mpc8347at", "MPC8347AT",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300)
+ POWERPC_DEF_SVR("mpc8347ap", "MPC8347AP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300)
+ POWERPC_DEF_SVR("mpc8347et", "MPC8347ET",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300)
+ POWERPC_DEF_SVR("mpc8347ep", "MPC8343EP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300)
+ POWERPC_DEF_SVR("mpc8347eat", "MPC8347EAT",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300)
+ POWERPC_DEF_SVR("mpc8347eap", "MPC8343EAP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300)
+ POWERPC_DEF_SVR("mpc8349", "MPC8349",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300)
+ POWERPC_DEF_SVR("mpc8349a", "MPC8349A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300)
+ POWERPC_DEF_SVR("mpc8349e", "MPC8349E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300)
+ POWERPC_DEF_SVR("mpc8349ea", "MPC8349EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300)
+ POWERPC_DEF_SVR("mpc8377", "MPC8377",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300)
+ POWERPC_DEF_SVR("mpc8377e", "MPC8377E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300)
+ POWERPC_DEF_SVR("mpc8378", "MPC8378",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300)
+ POWERPC_DEF_SVR("mpc8378e", "MPC8378E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300)
+ POWERPC_DEF_SVR("mpc8379", "MPC8379",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300)
+ POWERPC_DEF_SVR("mpc8379e", "MPC8379E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300)
+ /* e500 family */
+ POWERPC_DEF_SVR("e500_v10", "PowerPC e500 v1.0 core",
+ CPU_POWERPC_e500v1_v10, POWERPC_SVR_E500, e500v1);
+ POWERPC_DEF_SVR("e500_v20", "PowerPC e500 v2.0 core",
+ CPU_POWERPC_e500v1_v20, POWERPC_SVR_E500, e500v1);
+ POWERPC_DEF_SVR("e500v2_v10", "PowerPC e500v2 v1.0 core",
+ CPU_POWERPC_e500v2_v10, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v20", "PowerPC e500v2 v2.0 core",
+ CPU_POWERPC_e500v2_v20, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v21", "PowerPC e500v2 v2.1 core",
+ CPU_POWERPC_e500v2_v21, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v22", "PowerPC e500v2 v2.2 core",
+ CPU_POWERPC_e500v2_v22, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v30", "PowerPC e500v2 v3.0 core",
+ CPU_POWERPC_e500v2_v30, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500mc", "e500mc",
+ CPU_POWERPC_e500mc, POWERPC_SVR_E500, e500mc)
+#ifdef TARGET_PPC64
+ POWERPC_DEF_SVR("e5500", "e5500",
+ CPU_POWERPC_e5500, POWERPC_SVR_E500, e5500)
+ POWERPC_DEF_SVR("e6500", "e6500",
+ CPU_POWERPC_e6500, POWERPC_SVR_E500, e6500)
+#endif
+ /* PowerPC e500 microcontrollers */
+ POWERPC_DEF_SVR("mpc8533_v10", "MPC8533 v1.0",
+ CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8533_v11", "MPC8533 v1.1",
+ CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8533e_v10", "MPC8533E v1.0",
+ CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8533e_v11", "MPC8533E v1.1",
+ CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8540_v10", "MPC8540 v1.0",
+ CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1)
+ POWERPC_DEF_SVR("mpc8540_v20", "MPC8540 v2.0",
+ CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1)
+ POWERPC_DEF_SVR("mpc8540_v21", "MPC8540 v2.1",
+ CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1)
+ POWERPC_DEF_SVR("mpc8541_v10", "MPC8541 v1.0",
+ CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1)
+ POWERPC_DEF_SVR("mpc8541_v11", "MPC8541 v1.1",
+ CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1)
+ POWERPC_DEF_SVR("mpc8541e_v10", "MPC8541E v1.0",
+ CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1)
+ POWERPC_DEF_SVR("mpc8541e_v11", "MPC8541E v1.1",
+ CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1)
+ POWERPC_DEF_SVR("mpc8543_v10", "MPC8543 v1.0",
+ CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8543_v11", "MPC8543 v1.1",
+ CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8543_v20", "MPC8543 v2.0",
+ CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8543_v21", "MPC8543 v2.1",
+ CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8543e_v10", "MPC8543E v1.0",
+ CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8543e_v11", "MPC8543E v1.1",
+ CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8543e_v20", "MPC8543E v2.0",
+ CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8543e_v21", "MPC8543E v2.1",
+ CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8544_v10", "MPC8544 v1.0",
+ CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8544_v11", "MPC8544 v1.1",
+ CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8544e_v10", "MPC8544E v1.0",
+ CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8544e_v11", "MPC8544E v1.1",
+ CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8545_v20", "MPC8545 v2.0",
+ CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8545_v21", "MPC8545 v2.1",
+ CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8545e_v20", "MPC8545E v2.0",
+ CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8545e_v21", "MPC8545E v2.1",
+ CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8547e_v20", "MPC8547E v2.0",
+ CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8547e_v21", "MPC8547E v2.1",
+ CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8548_v10", "MPC8548 v1.0",
+ CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8548_v11", "MPC8548 v1.1",
+ CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8548_v20", "MPC8548 v2.0",
+ CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8548_v21", "MPC8548 v2.1",
+ CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8548e_v10", "MPC8548E v1.0",
+ CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8548e_v11", "MPC8548E v1.1",
+ CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8548e_v20", "MPC8548E v2.0",
+ CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1",
+ CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0",
+ CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1",
+ CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0",
+ CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1",
+ CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2)
+ POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0",
+ CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2)
+ POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0",
+ CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2)
+ POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1",
+ CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2)
+ POWERPC_DEF_SVR("mpc8567", "MPC8567",
+ CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2)
+ POWERPC_DEF_SVR("mpc8567e", "MPC8567E",
+ CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2)
+ POWERPC_DEF_SVR("mpc8568", "MPC8568",
+ CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2)
+ POWERPC_DEF_SVR("mpc8568e", "MPC8568E",
+ CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2)
+ POWERPC_DEF_SVR("mpc8572", "MPC8572",
+ CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2)
+ POWERPC_DEF_SVR("mpc8572e", "MPC8572E",
+ CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2)
+ /* e600 family */
+ POWERPC_DEF("e600", CPU_POWERPC_e600, e600,
+ "PowerPC e600 core")
+ /* PowerPC e600 microcontrollers */
+ POWERPC_DEF_SVR("mpc8610", "MPC8610",
+ CPU_POWERPC_MPC8610, POWERPC_SVR_8610, e600)
+ POWERPC_DEF_SVR("mpc8641", "MPC8641",
+ CPU_POWERPC_MPC8641, POWERPC_SVR_8641, e600)
+ POWERPC_DEF_SVR("mpc8641d", "MPC8641D",
+ CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600)
+ /* 32 bits "classic" PowerPC */
+ /* PowerPC 6xx family */
+ POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601,
+ "PowerPC 601v0")
+ POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601,
+ "PowerPC 601v1")
+ POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v,
+ "PowerPC 601v2")
+ POWERPC_DEF("602", CPU_POWERPC_602, 602,
+ "PowerPC 602")
+ POWERPC_DEF("603", CPU_POWERPC_603, 603,
+ "PowerPC 603")
+ POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E,
+ "PowerPC 603e v1.1")
+ POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E,
+ "PowerPC 603e v1.2")
+ POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E,
+ "PowerPC 603e v1.3")
+ POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E,
+ "PowerPC 603e v1.4")
+ POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E,
+ "PowerPC 603e v2.2")
+ POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E,
+ "PowerPC 603e v3")
+ POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E,
+ "PowerPC 603e v4")
+ POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E,
+ "PowerPC 603e v4.1")
+ POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E,
+ "PowerPC 603e (aka PID7)")
+ POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E,
+ "PowerPC 603e7t")
+ POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E,
+ "PowerPC 603e7v")
+ POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E,
+ "PowerPC 603e7v1")
+ POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E,
+ "PowerPC 603e7v2")
+ POWERPC_DEF("603p", CPU_POWERPC_603P, 603E,
+ "PowerPC 603p (aka PID7v)")
+ POWERPC_DEF("604", CPU_POWERPC_604, 604,
+ "PowerPC 604")
+ POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E,
+ "PowerPC 604e v1.0")
+ POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E,
+ "PowerPC 604e v2.2")
+ POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E,
+ "PowerPC 604e v2.4")
+ POWERPC_DEF("604r", CPU_POWERPC_604R, 604E,
+ "PowerPC 604r (aka PIDA)")
+ /* PowerPC 7xx family */
+ POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740,
+ "PowerPC 740 v1.0 (G3)")
+ POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750,
+ "PowerPC 750 v1.0 (G3)")
+ POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740,
+ "PowerPC 740 v2.0 (G3)")
+ POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750,
+ "PowerPC 750 v2.0 (G3)")
+ POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740,
+ "PowerPC 740 v2.1 (G3)")
+ POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750,
+ "PowerPC 750 v2.1 (G3)")
+ POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740,
+ "PowerPC 740 v2.2 (G3)")
+ POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750,
+ "PowerPC 750 v2.2 (G3)")
+ POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740,
+ "PowerPC 740 v3.0 (G3)")
+ POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750,
+ "PowerPC 750 v3.0 (G3)")
+ POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740,
+ "PowerPC 740 v3.1 (G3)")
+ POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750,
+ "PowerPC 750 v3.1 (G3)")
+ POWERPC_DEF("740e", CPU_POWERPC_740E, 740,
+ "PowerPC 740E (G3)")
+ POWERPC_DEF("750e", CPU_POWERPC_750E, 750,
+ "PowerPC 750E (G3)")
+ POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740,
+ "PowerPC 740P (G3)")
+ POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750,
+ "PowerPC 750P (G3)")
+ POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl,
+ "PowerPC 750CL v1.0")
+ POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl,
+ "PowerPC 750CL v2.0")
+ POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx,
+ "PowerPC 750CX v1.0 (G3 embedded)")
+ POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx,
+ "PowerPC 750CX v2.1 (G3 embedded)")
+ POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx,
+ "PowerPC 750CX v2.1 (G3 embedded)")
+ POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx,
+ "PowerPC 750CX v2.2 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx,
+ "PowerPC 750CXe v2.1 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx,
+ "PowerPC 750CXe v2.2 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx,
+ "PowerPC 750CXe v2.3 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx,
+ "PowerPC 750CXe v2.4 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx,
+ "PowerPC 750CXe v2.4b (G3 embedded)")
+ POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx,
+ "PowerPC 750CXe v3.0 (G3 embedded)")
+ POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx,
+ "PowerPC 750CXe v3.1 (G3 embedded)")
+ POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx,
+ "PowerPC 750CXe v3.1b (G3 embedded)")
+ POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx,
+ "PowerPC 750CXr (G3 embedded)")
+ POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx,
+ "PowerPC 750FL (G3 embedded)")
+ POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx,
+ "PowerPC 750FX v1.0 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx,
+ "PowerPC 750FX v2.0 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx,
+ "PowerPC 750FX v2.1 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx,
+ "PowerPC 750FX v2.2 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx,
+ "PowerPC 750FX v2.3 (G3 embedded)")
+ POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx,
+ "PowerPC 750GL (G3 embedded)")
+ POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx,
+ "PowerPC 750GX v1.0 (G3 embedded)")
+ POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx,
+ "PowerPC 750GX v1.1 (G3 embedded)")
+ POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx,
+ "PowerPC 750GX v1.2 (G3 embedded)")
+ POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750,
+ "PowerPC 750L v2.0 (G3 embedded)")
+ POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750,
+ "PowerPC 750L v2.1 (G3 embedded)")
+ POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750,
+ "PowerPC 750L v2.2 (G3 embedded)")
+ POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750,
+ "PowerPC 750L v3.0 (G3 embedded)")
+ POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750,
+ "PowerPC 750L v3.2 (G3 embedded)")
+ POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745,
+ "PowerPC 745 v1.0")
+ POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755,
+ "PowerPC 755 v1.0")
+ POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745,
+ "PowerPC 745 v1.1")
+ POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755,
+ "PowerPC 755 v1.1")
+ POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745,
+ "PowerPC 745 v2.0")
+ POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755,
+ "PowerPC 755 v2.0")
+ POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745,
+ "PowerPC 745 v2.1")
+ POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755,
+ "PowerPC 755 v2.1")
+ POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745,
+ "PowerPC 745 v2.2")
+ POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755,
+ "PowerPC 755 v2.2")
+ POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745,
+ "PowerPC 745 v2.3")
+ POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755,
+ "PowerPC 755 v2.3")
+ POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745,
+ "PowerPC 745 v2.4")
+ POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755,
+ "PowerPC 755 v2.4")
+ POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745,
+ "PowerPC 745 v2.5")
+ POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755,
+ "PowerPC 755 v2.5")
+ POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745,
+ "PowerPC 745 v2.6")
+ POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755,
+ "PowerPC 755 v2.6")
+ POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745,
+ "PowerPC 745 v2.7")
+ POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755,
+ "PowerPC 755 v2.7")
+ POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745,
+ "PowerPC 745 v2.8")
+ POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755,
+ "PowerPC 755 v2.8")
+ /* PowerPC 74xx family */
+ POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400,
+ "PowerPC 7400 v1.0 (G4)")
+ POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400,
+ "PowerPC 7400 v1.1 (G4)")
+ POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400,
+ "PowerPC 7400 v2.0 (G4)")
+ POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400,
+ "PowerPC 7400 v2.1 (G4)")
+ POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400,
+ "PowerPC 7400 v2.2 (G4)")
+ POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400,
+ "PowerPC 7400 v2.6 (G4)")
+ POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400,
+ "PowerPC 7400 v2.7 (G4)")
+ POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400,
+ "PowerPC 7400 v2.8 (G4)")
+ POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400,
+ "PowerPC 7400 v2.9 (G4)")
+ POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410,
+ "PowerPC 7410 v1.0 (G4)")
+ POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410,
+ "PowerPC 7410 v1.1 (G4)")
+ POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410,
+ "PowerPC 7410 v1.2 (G4)")
+ POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410,
+ "PowerPC 7410 v1.3 (G4)")
+ POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410,
+ "PowerPC 7410 v1.4 (G4)")
+ POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400,
+ "PowerPC 7448 v1.0 (G4)")
+ POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400,
+ "PowerPC 7448 v1.1 (G4)")
+ POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400,
+ "PowerPC 7448 v2.0 (G4)")
+ POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400,
+ "PowerPC 7448 v2.1 (G4)")
+ POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450,
+ "PowerPC 7450 v1.0 (G4)")
+ POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450,
+ "PowerPC 7450 v1.1 (G4)")
+ POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450,
+ "PowerPC 7450 v1.2 (G4)")
+ POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450,
+ "PowerPC 7450 v2.0 (G4)")
+ POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450,
+ "PowerPC 7450 v2.1 (G4)")
+ POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440,
+ "PowerPC 7441 v2.1 (G4)")
+ POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440,
+ "PowerPC 7441 v2.3 (G4)")
+ POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450,
+ "PowerPC 7451 v2.3 (G4)")
+ POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440,
+ "PowerPC 7441 v2.10 (G4)")
+ POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450,
+ "PowerPC 7451 v2.10 (G4)")
+ POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445,
+ "PowerPC 7445 v1.0 (G4)")
+ POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455,
+ "PowerPC 7455 v1.0 (G4)")
+ POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445,
+ "PowerPC 7445 v2.1 (G4)")
+ POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455,
+ "PowerPC 7455 v2.1 (G4)")
+ POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445,
+ "PowerPC 7445 v3.2 (G4)")
+ POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455,
+ "PowerPC 7455 v3.2 (G4)")
+ POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445,
+ "PowerPC 7445 v3.3 (G4)")
+ POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455,
+ "PowerPC 7455 v3.3 (G4)")
+ POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445,
+ "PowerPC 7445 v3.4 (G4)")
+ POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455,
+ "PowerPC 7455 v3.4 (G4)")
+ POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445,
+ "PowerPC 7447 v1.0 (G4)")
+ POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455,
+ "PowerPC 7457 v1.0 (G4)")
+ POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445,
+ "PowerPC 7447 v1.1 (G4)")
+ POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455,
+ "PowerPC 7457 v1.1 (G4)")
+ POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455,
+ "PowerPC 7457 v1.2 (G4)")
+ POWERPC_DEF("7447a_v1.0", CPU_POWERPC_74x7A_v10, 7445,
+ "PowerPC 7447A v1.0 (G4)")
+ POWERPC_DEF("7457a_v1.0", CPU_POWERPC_74x7A_v10, 7455,
+ "PowerPC 7457A v1.0 (G4)")
+ POWERPC_DEF("7447a_v1.1", CPU_POWERPC_74x7A_v11, 7445,
+ "PowerPC 7447A v1.1 (G4)")
+ POWERPC_DEF("7457a_v1.1", CPU_POWERPC_74x7A_v11, 7455,
+ "PowerPC 7457A v1.1 (G4)")
+ POWERPC_DEF("7447a_v1.2", CPU_POWERPC_74x7A_v12, 7445,
+ "PowerPC 7447A v1.2 (G4)")
+ POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455,
+ "PowerPC 7457A v1.2 (G4)")
+ /* 64 bits PowerPC */
+#if defined(TARGET_PPC64)
+ POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
+ "PowerPC 970 v2.2")
+ POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
+ "PowerPC 970FX v1.0 (G5)")
+ POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970,
+ "PowerPC 970FX v2.0 (G5)")
+ POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970,
+ "PowerPC 970FX v2.1 (G5)")
+ POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970,
+ "PowerPC 970FX v3.0 (G5)")
+ POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970,
+ "PowerPC 970FX v3.1 (G5)")
+ POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970,
+ "PowerPC 970MP v1.0")
+ POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970,
+ "PowerPC 970MP v1.1")
+ POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P,
+ "POWER5+ v2.1")
+ POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7,
+ "POWER7 v2.3")
+ POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
+ "POWER7+ v2.1")
+ POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8,
+ "POWER8E v2.1")
+ POWERPC_DEF("power8_v2.0", CPU_POWERPC_POWER8_v20, POWER8,
+ "POWER8 v2.0")
+ POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8,
+ "POWER8NVL v1.0")
+ POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9,
+ "POWER9 v1.0")
+ POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9,
+ "POWER9 v2.0")
+ POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10,
+ "POWER10 v1.0")
+ POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10,
+ "POWER10 v2.0")
+#endif /* defined (TARGET_PPC64) */
+
+/***************************************************************************/
+/* PowerPC CPU aliases */
+
+PowerPCCPUAlias ppc_cpu_aliases[] = {
+ { "403", "403gc" },
+ { "405", "405d4" },
+ { "405cr", "405crc" },
+ { "405gp", "405gpd" },
+ { "405gpe", "405crc" },
+ { "x2vp7", "x2vp4" },
+ { "x2vp50", "x2vp20" },
+
+ { "440ep", "440epb" },
+ { "460ex", "460exb" },
+#if defined(TODO_USER_ONLY)
+ { "440gp", "440gpc" },
+ { "440gr", "440gra" },
+ { "440gx", "440gxf" },
+
+ { "rcpu", "mpc5xx" },
+ /* MPC5xx microcontrollers */
+ { "mgt560", "mpc5xx" },
+ { "mpc509", "mpc5xx" },
+ { "mpc533", "mpc5xx" },
+ { "mpc534", "mpc5xx" },
+ { "mpc555", "mpc5xx" },
+ { "mpc556", "mpc5xx" },
+ { "mpc560", "mpc5xx" },
+ { "mpc561", "mpc5xx" },
+ { "mpc562", "mpc5xx" },
+ { "mpc563", "mpc5xx" },
+ { "mpc564", "mpc5xx" },
+ { "mpc565", "mpc5xx" },
+ { "mpc566", "mpc5xx" },
+
+ { "powerquicc", "mpc8xx" },
+ /* MPC8xx microcontrollers */
+ { "mgt823", "mpc8xx" },
+ { "mpc821", "mpc8xx" },
+ { "mpc823", "mpc8xx" },
+ { "mpc850", "mpc8xx" },
+ { "mpc852t", "mpc8xx" },
+ { "mpc855t", "mpc8xx" },
+ { "mpc857", "mpc8xx" },
+ { "mpc859", "mpc8xx" },
+ { "mpc860", "mpc8xx" },
+ { "mpc862", "mpc8xx" },
+ { "mpc866", "mpc8xx" },
+ { "mpc870", "mpc8xx" },
+ { "mpc875", "mpc8xx" },
+ { "mpc880", "mpc8xx" },
+ { "mpc885", "mpc8xx" },
+#endif
+
+ /* PowerPC MPC603 microcontrollers */
+ { "mpc8240", "603" },
+
+ { "mpc52xx", "mpc5200_v12" },
+ { "mpc5200", "mpc5200_v12" },
+ { "mpc5200b", "mpc5200b_v21" },
+
+ { "mpc82xx", "g2legp3" },
+ { "powerquicc-ii", "g2legp3" },
+ { "mpc8241", "g2hip4" },
+ { "mpc8245", "g2hip4" },
+ { "mpc8247", "g2legp3" },
+ { "mpc8248", "g2legp3" },
+ { "mpc8250", "g2hip4" },
+ { "mpc8250_hip3", "g2hip3" },
+ { "mpc8250_hip4", "g2hip4" },
+ { "mpc8255", "g2hip4" },
+ { "mpc8255_hip3", "g2hip3" },
+ { "mpc8255_hip4", "g2hip4" },
+ { "mpc8260", "g2hip4" },
+ { "mpc8260_hip3", "g2hip3" },
+ { "mpc8260_hip4", "g2hip4" },
+ { "mpc8264", "g2hip4" },
+ { "mpc8264_hip3", "g2hip3" },
+ { "mpc8264_hip4", "g2hip4" },
+ { "mpc8265", "g2hip4" },
+ { "mpc8265_hip3", "g2hip3" },
+ { "mpc8265_hip4", "g2hip4" },
+ { "mpc8266", "g2hip4" },
+ { "mpc8266_hip3", "g2hip3" },
+ { "mpc8266_hip4", "g2hip4" },
+ { "mpc8270", "g2legp3" },
+ { "mpc8271", "g2legp3" },
+ { "mpc8272", "g2legp3" },
+ { "mpc8275", "g2legp3" },
+ { "mpc8280", "g2legp3" },
+ { "e200", "e200z6" },
+ { "e300", "e300c3" },
+ { "mpc8347", "mpc8347t" },
+ { "mpc8347a", "mpc8347at" },
+ { "mpc8347e", "mpc8347et" },
+ { "mpc8347ea", "mpc8347eat" },
+ { "e500", "e500v2_v22" },
+ { "e500v1", "e500_v20" },
+ { "e500v2", "e500v2_v22" },
+ { "mpc8533", "mpc8533_v11" },
+ { "mpc8533e", "mpc8533e_v11" },
+ { "mpc8540", "mpc8540_v21" },
+ { "mpc8541", "mpc8541_v11" },
+ { "mpc8541e", "mpc8541e_v11" },
+ { "mpc8543", "mpc8543_v21" },
+ { "mpc8543e", "mpc8543e_v21" },
+ { "mpc8544", "mpc8544_v11" },
+ { "mpc8544e", "mpc8544e_v11" },
+ { "mpc8545", "mpc8545_v21" },
+ { "mpc8545e", "mpc8545e_v21" },
+ { "mpc8547e", "mpc8547e_v21" },
+ { "mpc8548", "mpc8548_v21" },
+ { "mpc8548e", "mpc8548e_v21" },
+ { "mpc8555", "mpc8555_v11" },
+ { "mpc8555e", "mpc8555e_v11" },
+ { "mpc8560", "mpc8560_v21" },
+ { "601", "601_v2" },
+ { "601v", "601_v2" },
+ { "vanilla", "603" },
+ { "603e", "603e_v4.1" },
+ { "stretch", "603e_v4.1" },
+ { "vaillant", "603e7v" },
+ { "603r", "603e7t" },
+ { "goldeneye", "603e7t" },
+ { "604e", "604e_v2.4" },
+ { "sirocco", "604e_v2.4" },
+ { "mach5", "604r" },
+ { "740", "740_v3.1" },
+ { "arthur", "740_v3.1" },
+ { "750", "750_v3.1" },
+ { "typhoon", "750_v3.1" },
+ { "g3", "750_v3.1" },
+ { "conan/doyle", "750p" },
+ { "750cl", "750cl_v2.0" },
+ { "750cx", "750cx_v2.2" },
+ { "750cxe", "750cxe_v3.1b" },
+ { "750fx", "750fx_v2.3" },
+ { "750gx", "750gx_v1.2" },
+ { "750l", "750l_v3.2" },
+ { "lonestar", "750l_v3.2" },
+ { "745", "745_v2.8" },
+ { "755", "755_v2.8" },
+ { "goldfinger", "755_v2.8" },
+ { "7400", "7400_v2.9" },
+ { "max", "7400_v2.9" },
+ { "g4", "7400_v2.9" },
+ { "7410", "7410_v1.4" },
+ { "nitro", "7410_v1.4" },
+ { "7448", "7448_v2.1" },
+ { "7450", "7450_v2.1" },
+ { "vger", "7450_v2.1" },
+ { "7441", "7441_v2.3" },
+ { "7451", "7451_v2.3" },
+ { "7445", "7445_v3.2" },
+ { "7455", "7455_v3.2" },
+ { "apollo6", "7455_v3.2" },
+ { "7447", "7447_v1.1" },
+ { "7457", "7457_v1.2" },
+ { "apollo7", "7457_v1.2" },
+ { "7447a", "7447a_v1.2" },
+ { "7457a", "7457a_v1.2" },
+ { "apollo7pm", "7457a_v1.0" },
+#if defined(TARGET_PPC64)
+ { "970", "970_v2.2" },
+ { "970fx", "970fx_v3.1" },
+ { "970mp", "970mp_v1.1" },
+ { "power5+", "power5+_v2.1" },
+ { "power5gs", "power5+_v2.1" },
+ { "power7", "power7_v2.3" },
+ { "power7+", "power7+_v2.1" },
+ { "power8e", "power8e_v2.1" },
+ { "power8", "power8_v2.0" },
+ { "power8nvl", "power8nvl_v1.0" },
+ { "power9", "power9_v2.0" },
+ { "power10", "power10_v2.0" },
+#endif
+
+ /* Generic PowerPCs */
+#if defined(TARGET_PPC64)
+ { "ppc64", "970fx_v3.1" },
+#endif
+ { "ppc32", "604" },
+ { "ppc", "604" },
+ { "default", "604" },
+ { NULL, NULL }
+};
diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h
new file mode 100644
index 000000000..095259275
--- /dev/null
+++ b/target/ppc/cpu-models.h
@@ -0,0 +1,505 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TARGET_PPC_CPU_MODELS_H
+#define TARGET_PPC_CPU_MODELS_H
+
+/**
+ * PowerPCCPUAlias:
+ * @alias: The alias name.
+ * @model: The CPU model @alias refers to, that directly resolves into CPU type
+ *
+ * A mapping entry from CPU @alias to CPU @model.
+ */
+typedef struct PowerPCCPUAlias {
+ const char *alias;
+ const char *model;
+} PowerPCCPUAlias;
+
+extern PowerPCCPUAlias ppc_cpu_aliases[];
+
+/*****************************************************************************/
+/* PVR definitions for most known PowerPC */
+enum {
+ /* PowerPC 401 family */
+ /* Generic PowerPC 401 */
+#define CPU_POWERPC_401 CPU_POWERPC_401G2
+ /* PowerPC 401 cores */
+ CPU_POWERPC_401A1 = 0x00210000,
+ CPU_POWERPC_401B2 = 0x00220000,
+ CPU_POWERPC_401C2 = 0x00230000,
+ CPU_POWERPC_401D2 = 0x00240000,
+ CPU_POWERPC_401E2 = 0x00250000,
+ CPU_POWERPC_401F2 = 0x00260000,
+ CPU_POWERPC_401G2 = 0x00270000,
+ /* PowerPC 401 microcontrolers */
+#define CPU_POWERPC_IOP480 CPU_POWERPC_401B2
+ /* IBM Processor for Network Resources */
+ CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */
+ /* PowerPC 403 family */
+ /* PowerPC 403 microcontrollers */
+ CPU_POWERPC_403GA = 0x00200011,
+ CPU_POWERPC_403GB = 0x00200100,
+ CPU_POWERPC_403GC = 0x00200200,
+ CPU_POWERPC_403GCX = 0x00201400,
+ /* PowerPC 405 family */
+ /* PowerPC 405 cores */
+ CPU_POWERPC_405D2 = 0x20010000,
+ CPU_POWERPC_405D4 = 0x41810000,
+ /* PowerPC 405 microcontrolers */
+ /* XXX: missing 0x200108a0 */
+ CPU_POWERPC_405CRa = 0x40110041,
+ CPU_POWERPC_405CRb = 0x401100C5,
+ CPU_POWERPC_405CRc = 0x40110145,
+ CPU_POWERPC_405EP = 0x51210950,
+ CPU_POWERPC_405EZ = 0x41511460, /* 0x51210950 ? */
+ CPU_POWERPC_405GPa = 0x40110000,
+ CPU_POWERPC_405GPb = 0x40110040,
+ CPU_POWERPC_405GPc = 0x40110082,
+ CPU_POWERPC_405GPd = 0x401100C4,
+ CPU_POWERPC_405GPR = 0x50910951,
+ CPU_POWERPC_405LP = 0x41F10000,
+ /* IBM network processors */
+ CPU_POWERPC_NPE405H = 0x414100C0,
+ CPU_POWERPC_NPE405H2 = 0x41410140,
+ CPU_POWERPC_NPE405L = 0x416100C0,
+ CPU_POWERPC_NPE4GS3 = 0x40B10000,
+ /* IBM STBxxx (PowerPC 401/403/405 core based microcontrollers) */
+ CPU_POWERPC_STB03 = 0x40310000, /* 0x40130000 ? */
+ CPU_POWERPC_STB04 = 0x41810000,
+ CPU_POWERPC_STB25 = 0x51510950,
+ /* Xilinx cores */
+ CPU_POWERPC_X2VP4 = 0x20010820,
+ CPU_POWERPC_X2VP20 = 0x20010860,
+ /* PowerPC 440 family */
+ /* Generic PowerPC 440 */
+#define CPU_POWERPC_440 CPU_POWERPC_440GXf
+ /* PowerPC 440 cores */
+ CPU_POWERPC_440_XILINX = 0x7ff21910,
+ /* PowerPC 440 microcontrolers */
+ CPU_POWERPC_440EPa = 0x42221850,
+ CPU_POWERPC_440EPb = 0x422218D3,
+ CPU_POWERPC_440GPb = 0x40120440,
+ CPU_POWERPC_440GPc = 0x40120481,
+#define CPU_POWERPC_440GRa CPU_POWERPC_440EPb
+ CPU_POWERPC_440GRX = 0x200008D0,
+#define CPU_POWERPC_440EPX CPU_POWERPC_440GRX
+ CPU_POWERPC_440GXa = 0x51B21850,
+ CPU_POWERPC_440GXb = 0x51B21851,
+ CPU_POWERPC_440GXc = 0x51B21892,
+ CPU_POWERPC_440GXf = 0x51B21894,
+ CPU_POWERPC_440SP = 0x53221850,
+ CPU_POWERPC_440SP2 = 0x53221891,
+ CPU_POWERPC_440SPE = 0x53421890,
+ CPU_POWERPC_460EXb = 0x130218A4, /* called 460 but 440 core */
+ /* Freescale embedded PowerPC cores */
+ /* PowerPC MPC 5xx cores (aka RCPU) */
+ CPU_POWERPC_MPC5xx = 0x00020020,
+ /* PowerPC MPC 8xx cores (aka PowerQUICC) */
+ CPU_POWERPC_MPC8xx = 0x00500000,
+ /* G2 cores (aka PowerQUICC-II) */
+ CPU_POWERPC_G2 = 0x00810011,
+ CPU_POWERPC_G2H4 = 0x80811010,
+ CPU_POWERPC_G2gp = 0x80821010,
+ CPU_POWERPC_G2ls = 0x90810010,
+ CPU_POWERPC_MPC603 = 0x00810100,
+ CPU_POWERPC_G2_HIP3 = 0x00810101,
+ CPU_POWERPC_G2_HIP4 = 0x80811014,
+ /* G2_LE core (aka PowerQUICC-II) */
+ CPU_POWERPC_G2LE = 0x80820010,
+ CPU_POWERPC_G2LEgp = 0x80822010,
+ CPU_POWERPC_G2LEls = 0xA0822010,
+ CPU_POWERPC_G2LEgp1 = 0x80822011,
+ CPU_POWERPC_G2LEgp3 = 0x80822013,
+ /* MPC52xx microcontrollers */
+ /* XXX: MPC 5121 ? */
+#define CPU_POWERPC_MPC5200_v10 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200_v11 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200_v12 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200B_v20 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200B_v21 CPU_POWERPC_G2LEgp1
+ /* e200 family */
+ /* e200 cores */
+ CPU_POWERPC_e200z5 = 0x81000000,
+ CPU_POWERPC_e200z6 = 0x81120000,
+ /* e300 family */
+ /* e300 cores */
+ CPU_POWERPC_e300c1 = 0x00830010,
+ CPU_POWERPC_e300c2 = 0x00840010,
+ CPU_POWERPC_e300c3 = 0x00850010,
+ CPU_POWERPC_e300c4 = 0x00860010,
+ /* MPC83xx microcontrollers */
+#define CPU_POWERPC_MPC834x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC837x CPU_POWERPC_e300c4
+ /* e500 family */
+ /* e500 cores */
+#define CPU_POWERPC_e500 CPU_POWERPC_e500v2_v22
+ CPU_POWERPC_e500v1_v10 = 0x80200010,
+ CPU_POWERPC_e500v1_v20 = 0x80200020,
+ CPU_POWERPC_e500v2_v10 = 0x80210010,
+ CPU_POWERPC_e500v2_v11 = 0x80210011,
+ CPU_POWERPC_e500v2_v20 = 0x80210020,
+ CPU_POWERPC_e500v2_v21 = 0x80210021,
+ CPU_POWERPC_e500v2_v22 = 0x80210022,
+ CPU_POWERPC_e500v2_v30 = 0x80210030,
+ CPU_POWERPC_e500mc = 0x80230020,
+ CPU_POWERPC_e5500 = 0x80240020,
+ CPU_POWERPC_e6500 = 0x80400020,
+ /* MPC85xx microcontrollers */
+#define CPU_POWERPC_MPC8533_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8533_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8533E_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8533E_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8540_v10 CPU_POWERPC_e500v1_v10
+#define CPU_POWERPC_MPC8540_v20 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8540_v21 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541E_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541E_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8543_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8543_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8543_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8543_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8543E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8543E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8543E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8543E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8544_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8544_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8544E_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8544E_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8545_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8545_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8545_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8545E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8545E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8545E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8547E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8547E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8547E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8548_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8548_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8548_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8548_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8548E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8568E CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8572 CPU_POWERPC_e500v2_v30
+#define CPU_POWERPC_MPC8572E CPU_POWERPC_e500v2_v30
+ /* e600 family */
+ /* e600 cores */
+ CPU_POWERPC_e600 = 0x80040010,
+ /* MPC86xx microcontrollers */
+#define CPU_POWERPC_MPC8610 CPU_POWERPC_e600
+#define CPU_POWERPC_MPC8641 CPU_POWERPC_e600
+#define CPU_POWERPC_MPC8641D CPU_POWERPC_e600
+ /* PowerPC 6xx cores */
+ CPU_POWERPC_601_v0 = 0x00010001,
+ CPU_POWERPC_601_v1 = 0x00010001,
+ CPU_POWERPC_601_v2 = 0x00010002,
+ CPU_POWERPC_602 = 0x00050100,
+ CPU_POWERPC_603 = 0x00030100,
+ CPU_POWERPC_603E_v11 = 0x00060101,
+ CPU_POWERPC_603E_v12 = 0x00060102,
+ CPU_POWERPC_603E_v13 = 0x00060103,
+ CPU_POWERPC_603E_v14 = 0x00060104,
+ CPU_POWERPC_603E_v22 = 0x00060202,
+ CPU_POWERPC_603E_v3 = 0x00060300,
+ CPU_POWERPC_603E_v4 = 0x00060400,
+ CPU_POWERPC_603E_v41 = 0x00060401,
+ CPU_POWERPC_603E7t = 0x00071201,
+ CPU_POWERPC_603E7v = 0x00070100,
+ CPU_POWERPC_603E7v1 = 0x00070101,
+ CPU_POWERPC_603E7v2 = 0x00070201,
+ CPU_POWERPC_603E7 = 0x00070200,
+ CPU_POWERPC_603P = 0x00070000,
+ /* XXX: missing 0x00040303 (604) */
+ CPU_POWERPC_604 = 0x00040103,
+ /* XXX: missing 0x00091203 */
+ /* XXX: missing 0x00092110 */
+ /* XXX: missing 0x00092120 */
+ CPU_POWERPC_604E_v10 = 0x00090100,
+ CPU_POWERPC_604E_v22 = 0x00090202,
+ CPU_POWERPC_604E_v24 = 0x00090204,
+ /* XXX: missing 0x000a0100 */
+ /* XXX: missing 0x00093102 */
+ CPU_POWERPC_604R = 0x000a0101,
+ /* PowerPC 740/750 cores (aka G3) */
+ /* XXX: missing 0x00084202 */
+ CPU_POWERPC_7x0_v10 = 0x00080100,
+ CPU_POWERPC_7x0_v20 = 0x00080200,
+ CPU_POWERPC_7x0_v21 = 0x00080201,
+ CPU_POWERPC_7x0_v22 = 0x00080202,
+ CPU_POWERPC_7x0_v30 = 0x00080300,
+ CPU_POWERPC_7x0_v31 = 0x00080301,
+ CPU_POWERPC_740E = 0x00080100,
+ CPU_POWERPC_750E = 0x00080200,
+ CPU_POWERPC_7x0P = 0x10080000,
+ /* XXX: missing 0x00087010 (CL ?) */
+ CPU_POWERPC_750CL_v10 = 0x00087200,
+ CPU_POWERPC_750CL_v20 = 0x00087210, /* aka rev E */
+ CPU_POWERPC_750CX_v10 = 0x00082100,
+ CPU_POWERPC_750CX_v20 = 0x00082200,
+ CPU_POWERPC_750CX_v21 = 0x00082201,
+ CPU_POWERPC_750CX_v22 = 0x00082202,
+ CPU_POWERPC_750CXE_v21 = 0x00082211,
+ CPU_POWERPC_750CXE_v22 = 0x00082212,
+ CPU_POWERPC_750CXE_v23 = 0x00082213,
+ CPU_POWERPC_750CXE_v24 = 0x00082214,
+ CPU_POWERPC_750CXE_v24b = 0x00083214,
+ CPU_POWERPC_750CXE_v30 = 0x00082310,
+ CPU_POWERPC_750CXE_v31 = 0x00082311,
+ CPU_POWERPC_750CXE_v31b = 0x00083311,
+ CPU_POWERPC_750CXR = 0x00083410,
+ CPU_POWERPC_750FL = 0x70000203,
+ CPU_POWERPC_750FX_v10 = 0x70000100,
+ CPU_POWERPC_750FX_v20 = 0x70000200,
+ CPU_POWERPC_750FX_v21 = 0x70000201,
+ CPU_POWERPC_750FX_v22 = 0x70000202,
+ CPU_POWERPC_750FX_v23 = 0x70000203,
+ CPU_POWERPC_750GL = 0x70020102,
+ CPU_POWERPC_750GX_v10 = 0x70020100,
+ CPU_POWERPC_750GX_v11 = 0x70020101,
+ CPU_POWERPC_750GX_v12 = 0x70020102,
+ CPU_POWERPC_750L_v20 = 0x00088200,
+ CPU_POWERPC_750L_v21 = 0x00088201,
+ CPU_POWERPC_750L_v22 = 0x00088202,
+ CPU_POWERPC_750L_v30 = 0x00088300,
+ CPU_POWERPC_750L_v32 = 0x00088302,
+ /* PowerPC 745/755 cores */
+ CPU_POWERPC_7x5_v10 = 0x00083100,
+ CPU_POWERPC_7x5_v11 = 0x00083101,
+ CPU_POWERPC_7x5_v20 = 0x00083200,
+ CPU_POWERPC_7x5_v21 = 0x00083201,
+ CPU_POWERPC_7x5_v22 = 0x00083202, /* aka D */
+ CPU_POWERPC_7x5_v23 = 0x00083203, /* aka E */
+ CPU_POWERPC_7x5_v24 = 0x00083204,
+ CPU_POWERPC_7x5_v25 = 0x00083205,
+ CPU_POWERPC_7x5_v26 = 0x00083206,
+ CPU_POWERPC_7x5_v27 = 0x00083207,
+ CPU_POWERPC_7x5_v28 = 0x00083208,
+ /* PowerPC 74xx cores (aka G4) */
+ /* XXX: missing 0x000C1101 */
+ CPU_POWERPC_7400_v10 = 0x000C0100,
+ CPU_POWERPC_7400_v11 = 0x000C0101,
+ CPU_POWERPC_7400_v20 = 0x000C0200,
+ CPU_POWERPC_7400_v21 = 0x000C0201,
+ CPU_POWERPC_7400_v22 = 0x000C0202,
+ CPU_POWERPC_7400_v26 = 0x000C0206,
+ CPU_POWERPC_7400_v27 = 0x000C0207,
+ CPU_POWERPC_7400_v28 = 0x000C0208,
+ CPU_POWERPC_7400_v29 = 0x000C0209,
+ CPU_POWERPC_7410_v10 = 0x800C1100,
+ CPU_POWERPC_7410_v11 = 0x800C1101,
+ CPU_POWERPC_7410_v12 = 0x800C1102, /* aka C */
+ CPU_POWERPC_7410_v13 = 0x800C1103, /* aka D */
+ CPU_POWERPC_7410_v14 = 0x800C1104, /* aka E */
+ CPU_POWERPC_7448_v10 = 0x80040100,
+ CPU_POWERPC_7448_v11 = 0x80040101,
+ CPU_POWERPC_7448_v20 = 0x80040200,
+ CPU_POWERPC_7448_v21 = 0x80040201,
+ CPU_POWERPC_7450_v10 = 0x80000100,
+ CPU_POWERPC_7450_v11 = 0x80000101,
+ CPU_POWERPC_7450_v12 = 0x80000102,
+ CPU_POWERPC_7450_v20 = 0x80000200, /* aka A, B, C, D: 2.04 */
+ CPU_POWERPC_7450_v21 = 0x80000201, /* aka E */
+ CPU_POWERPC_74x1_v23 = 0x80000203, /* aka G: 2.3 */
+ /* XXX: this entry might be a bug in some documentation */
+ CPU_POWERPC_74x1_v210 = 0x80000210, /* aka G: 2.3 ? */
+ CPU_POWERPC_74x5_v10 = 0x80010100,
+ /* XXX: missing 0x80010200 */
+ CPU_POWERPC_74x5_v21 = 0x80010201, /* aka C: 2.1 */
+ CPU_POWERPC_74x5_v32 = 0x80010302,
+ CPU_POWERPC_74x5_v33 = 0x80010303, /* aka F: 3.3 */
+ CPU_POWERPC_74x5_v34 = 0x80010304, /* aka G: 3.4 */
+ CPU_POWERPC_74x7_v10 = 0x80020100, /* aka A: 1.0 */
+ CPU_POWERPC_74x7_v11 = 0x80020101, /* aka B: 1.1 */
+ CPU_POWERPC_74x7_v12 = 0x80020102, /* aka C: 1.2 */
+ CPU_POWERPC_74x7A_v10 = 0x80030100, /* aka A: 1.0 */
+ CPU_POWERPC_74x7A_v11 = 0x80030101, /* aka B: 1.1 */
+ CPU_POWERPC_74x7A_v12 = 0x80030102, /* aka C: 1.2 */
+ /* 64 bits PowerPC */
+#if defined(TARGET_PPC64)
+ CPU_POWERPC_620 = 0x00140000,
+ CPU_POWERPC_630 = 0x00400000,
+ CPU_POWERPC_631 = 0x00410104,
+ CPU_POWERPC_POWER4 = 0x00350000,
+ CPU_POWERPC_POWER4P = 0x00380000,
+ /* XXX: missing 0x003A0201 */
+ CPU_POWERPC_POWER5 = 0x003A0203,
+ CPU_POWERPC_POWER5P_v21 = 0x003B0201,
+ CPU_POWERPC_POWER6 = 0x003E0000,
+ CPU_POWERPC_POWER_SERVER_MASK = 0xFFFF0000,
+ CPU_POWERPC_POWER7_BASE = 0x003F0000,
+ CPU_POWERPC_POWER7_v23 = 0x003F0203,
+ CPU_POWERPC_POWER7P_BASE = 0x004A0000,
+ CPU_POWERPC_POWER7P_v21 = 0x004A0201,
+ CPU_POWERPC_POWER8E_BASE = 0x004B0000,
+ CPU_POWERPC_POWER8E_v21 = 0x004B0201,
+ CPU_POWERPC_POWER8_BASE = 0x004D0000,
+ CPU_POWERPC_POWER8_v20 = 0x004D0200,
+ CPU_POWERPC_POWER8NVL_BASE = 0x004C0000,
+ CPU_POWERPC_POWER8NVL_v10 = 0x004C0100,
+ CPU_POWERPC_POWER9_BASE = 0x004E0000,
+ CPU_POWERPC_POWER9_DD1 = 0x004E0100,
+ CPU_POWERPC_POWER9_DD20 = 0x004E1200,
+ CPU_POWERPC_POWER10_BASE = 0x00800000,
+ CPU_POWERPC_POWER10_DD1 = 0x00800100,
+ CPU_POWERPC_POWER10_DD20 = 0x00800200,
+ CPU_POWERPC_970_v22 = 0x00390202,
+ CPU_POWERPC_970FX_v10 = 0x00391100,
+ CPU_POWERPC_970FX_v20 = 0x003C0200,
+ CPU_POWERPC_970FX_v21 = 0x003C0201,
+ CPU_POWERPC_970FX_v30 = 0x003C0300,
+ CPU_POWERPC_970FX_v31 = 0x003C0301,
+ CPU_POWERPC_970MP_v10 = 0x00440100,
+ CPU_POWERPC_970MP_v11 = 0x00440101,
+#define CPU_POWERPC_CELL CPU_POWERPC_CELL_v32
+ CPU_POWERPC_CELL_v10 = 0x00700100,
+ CPU_POWERPC_CELL_v20 = 0x00700400,
+ CPU_POWERPC_CELL_v30 = 0x00700500,
+ CPU_POWERPC_CELL_v31 = 0x00700501,
+#define CPU_POWERPC_CELL_v32 CPU_POWERPC_CELL_v31
+ CPU_POWERPC_RS64 = 0x00330000,
+ CPU_POWERPC_RS64II = 0x00340000,
+ CPU_POWERPC_RS64III = 0x00360000,
+ CPU_POWERPC_RS64IV = 0x00370000,
+#endif /* defined(TARGET_PPC64) */
+ /* Original POWER */
+ /*
+ * XXX: should be POWER (RIOS), RSC3308, RSC4608,
+ * POWER2 (RIOS2) & RSC2 (P2SC) here
+ */
+ /* PA Semi core */
+ CPU_POWERPC_PA6T = 0x00900000,
+};
+
+/* Logical PVR definitions for sPAPR */
+enum {
+ CPU_POWERPC_LOGICAL_2_04 = 0x0F000001,
+ CPU_POWERPC_LOGICAL_2_05 = 0x0F000002,
+ CPU_POWERPC_LOGICAL_2_06 = 0x0F000003,
+ CPU_POWERPC_LOGICAL_2_06_PLUS = 0x0F100003,
+ CPU_POWERPC_LOGICAL_2_07 = 0x0F000004,
+ CPU_POWERPC_LOGICAL_3_00 = 0x0F000005,
+ CPU_POWERPC_LOGICAL_3_10 = 0x0F000006,
+};
+
+/* System version register (used on MPC 8xxx) */
+enum {
+ POWERPC_SVR_NONE = 0x00000000,
+ POWERPC_SVR_5200_v10 = 0x80110010,
+ POWERPC_SVR_5200_v11 = 0x80110011,
+ POWERPC_SVR_5200_v12 = 0x80110012,
+ POWERPC_SVR_5200B_v20 = 0x80110020,
+ POWERPC_SVR_5200B_v21 = 0x80110021,
+#define POWERPC_SVR_55xx POWERPC_SVR_5567
+ POWERPC_SVR_8343 = 0x80570010,
+ POWERPC_SVR_8343A = 0x80570030,
+ POWERPC_SVR_8343E = 0x80560010,
+ POWERPC_SVR_8343EA = 0x80560030,
+ POWERPC_SVR_8347P = 0x80550010, /* PBGA package */
+ POWERPC_SVR_8347T = 0x80530010, /* TBGA package */
+ POWERPC_SVR_8347AP = 0x80550030, /* PBGA package */
+ POWERPC_SVR_8347AT = 0x80530030, /* TBGA package */
+ POWERPC_SVR_8347EP = 0x80540010, /* PBGA package */
+ POWERPC_SVR_8347ET = 0x80520010, /* TBGA package */
+ POWERPC_SVR_8347EAP = 0x80540030, /* PBGA package */
+ POWERPC_SVR_8347EAT = 0x80520030, /* TBGA package */
+ POWERPC_SVR_8349 = 0x80510010,
+ POWERPC_SVR_8349A = 0x80510030,
+ POWERPC_SVR_8349E = 0x80500010,
+ POWERPC_SVR_8349EA = 0x80500030,
+#define POWERPC_SVR_E500 0x40000000
+ POWERPC_SVR_8377 = 0x80C70010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8377E = 0x80C60010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8378 = 0x80C50010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8378E = 0x80C40010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8379 = 0x80C30010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8379E = 0x80C00010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533_v10 = 0x80340010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533_v11 = 0x80340011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533E_v10 = 0x803C0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533E_v11 = 0x803C0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v10 = 0x80300010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v20 = 0x80300020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v21 = 0x80300021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541_v10 = 0x80720010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541_v11 = 0x80720011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541E_v10 = 0x807A0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541E_v11 = 0x807A0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v10 = 0x80320010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v11 = 0x80320011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v20 = 0x80320020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v21 = 0x80320021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v10 = 0x803A0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v11 = 0x803A0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v20 = 0x803A0020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v21 = 0x803A0021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544_v10 = 0x80340110 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544_v11 = 0x80340111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544E_v10 = 0x803C0110 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544E_v11 = 0x803C0111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545_v20 = 0x80310220 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545_v21 = 0x80310221 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545E_v20 = 0x80390220 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545E_v21 = 0x80390221 | POWERPC_SVR_E500,
+ POWERPC_SVR_8547E_v20 = 0x80390120 | POWERPC_SVR_E500,
+ POWERPC_SVR_8547E_v21 = 0x80390121 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v10 = 0x80310010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v11 = 0x80310011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v20 = 0x80310020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v21 = 0x80310021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v10 = 0x80390010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v11 = 0x80390011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v20 = 0x80390020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v21 = 0x80390021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555_v10 = 0x80710010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555_v11 = 0x80710011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555E_v10 = 0x80790010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555E_v11 = 0x80790011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v10 = 0x80700010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v20 = 0x80700020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v21 = 0x80700021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8567 = 0x80750111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8567E = 0x807D0111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8568 = 0x80750011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8568E = 0x807D0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8572 = 0x80E00010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8572E = 0x80E80010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8610 = 0x80A00011,
+ POWERPC_SVR_8641 = 0x80900021,
+ POWERPC_SVR_8641D = 0x80900121,
+};
+
+#endif
diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h
new file mode 100644
index 000000000..37b458d33
--- /dev/null
+++ b/target/ppc/cpu-param.h
@@ -0,0 +1,37 @@
+/*
+ * PowerPC cpu parameters for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef PPC_CPU_PARAM_H
+#define PPC_CPU_PARAM_H 1
+
+#ifdef TARGET_PPC64
+# define TARGET_LONG_BITS 64
+/*
+ * Note that the official physical address space bits is 62-M where M
+ * is implementation dependent. I've not looked up M for the set of
+ * cpus we emulate at the system level.
+ */
+#define TARGET_PHYS_ADDR_SPACE_BITS 62
+/*
+ * Note that the PPC environment architecture talks about 80 bit virtual
+ * addresses, with segmentation. Obviously that's not all visible to a
+ * single process, which is all we're concerned with here.
+ */
+# ifdef TARGET_ABI32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+# else
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+# endif
+#else
+# define TARGET_LONG_BITS 32
+# define TARGET_PHYS_ADDR_SPACE_BITS 36
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+#define TARGET_PAGE_BITS 12
+#define NB_MMU_MODES 10
+
+#endif
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h
new file mode 100644
index 000000000..5800fa324
--- /dev/null
+++ b/target/ppc/cpu-qom.h
@@ -0,0 +1,225 @@
+/*
+ * QEMU PowerPC CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_PPC_CPU_QOM_H
+#define QEMU_PPC_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#ifdef TARGET_PPC64
+#define TYPE_POWERPC_CPU "powerpc64-cpu"
+#else
+#define TYPE_POWERPC_CPU "powerpc-cpu"
+#endif
+
+OBJECT_DECLARE_TYPE(PowerPCCPU, PowerPCCPUClass,
+ POWERPC_CPU)
+
+typedef struct CPUPPCState CPUPPCState;
+typedef struct ppc_tb_t ppc_tb_t;
+typedef struct ppc_dcr_t ppc_dcr_t;
+
+/*****************************************************************************/
+/* MMU model */
+typedef enum powerpc_mmu_t powerpc_mmu_t;
+enum powerpc_mmu_t {
+ POWERPC_MMU_UNKNOWN = 0x00000000,
+ /* Standard 32 bits PowerPC MMU */
+ POWERPC_MMU_32B = 0x00000001,
+ /* PowerPC 6xx MMU with software TLB */
+ POWERPC_MMU_SOFT_6xx = 0x00000002,
+ /* PowerPC 74xx MMU with software TLB */
+ POWERPC_MMU_SOFT_74xx = 0x00000003,
+ /* PowerPC 4xx MMU with software TLB */
+ POWERPC_MMU_SOFT_4xx = 0x00000004,
+ /* PowerPC 4xx MMU with software TLB and zones protections */
+ POWERPC_MMU_SOFT_4xx_Z = 0x00000005,
+ /* PowerPC MMU in real mode only */
+ POWERPC_MMU_REAL = 0x00000006,
+ /* Freescale MPC8xx MMU model */
+ POWERPC_MMU_MPC8xx = 0x00000007,
+ /* BookE MMU model */
+ POWERPC_MMU_BOOKE = 0x00000008,
+ /* BookE 2.06 MMU model */
+ POWERPC_MMU_BOOKE206 = 0x00000009,
+ /* PowerPC 601 MMU model (specific BATs format) */
+ POWERPC_MMU_601 = 0x0000000A,
+#define POWERPC_MMU_64 0x00010000
+ /* 64 bits PowerPC MMU */
+ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
+ /* Architecture 2.03 and later (has LPCR) */
+ POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
+ /* Architecture 2.06 variant */
+ POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003,
+ /* Architecture 2.07 variant */
+ POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004,
+ /* Architecture 3.00 variant */
+ POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005,
+};
+
+static inline bool mmu_is_64bit(powerpc_mmu_t mmu_model)
+{
+ return mmu_model & POWERPC_MMU_64;
+}
+
+/*****************************************************************************/
+/* Exception model */
+typedef enum powerpc_excp_t powerpc_excp_t;
+enum powerpc_excp_t {
+ POWERPC_EXCP_UNKNOWN = 0,
+ /* Standard PowerPC exception model */
+ POWERPC_EXCP_STD,
+ /* PowerPC 40x exception model */
+ POWERPC_EXCP_40x,
+ /* PowerPC 601 exception model */
+ POWERPC_EXCP_601,
+ /* PowerPC 602 exception model */
+ POWERPC_EXCP_602,
+ /* PowerPC 603 exception model */
+ POWERPC_EXCP_603,
+ /* PowerPC 603e exception model */
+ POWERPC_EXCP_603E,
+ /* PowerPC G2 exception model */
+ POWERPC_EXCP_G2,
+ /* PowerPC 604 exception model */
+ POWERPC_EXCP_604,
+ /* PowerPC 7x0 exception model */
+ POWERPC_EXCP_7x0,
+ /* PowerPC 7x5 exception model */
+ POWERPC_EXCP_7x5,
+ /* PowerPC 74xx exception model */
+ POWERPC_EXCP_74xx,
+ /* BookE exception model */
+ POWERPC_EXCP_BOOKE,
+ /* PowerPC 970 exception model */
+ POWERPC_EXCP_970,
+ /* POWER7 exception model */
+ POWERPC_EXCP_POWER7,
+ /* POWER8 exception model */
+ POWERPC_EXCP_POWER8,
+ /* POWER9 exception model */
+ POWERPC_EXCP_POWER9,
+ /* POWER10 exception model */
+ POWERPC_EXCP_POWER10,
+};
+
+/*****************************************************************************/
+/* PM instructions */
+typedef enum {
+ PPC_PM_DOZE,
+ PPC_PM_NAP,
+ PPC_PM_SLEEP,
+ PPC_PM_RVWINKLE,
+ PPC_PM_STOP,
+} powerpc_pm_insn_t;
+
+/*****************************************************************************/
+/* Input pins model */
+typedef enum powerpc_input_t powerpc_input_t;
+enum powerpc_input_t {
+ PPC_FLAGS_INPUT_UNKNOWN = 0,
+ /* PowerPC 6xx bus */
+ PPC_FLAGS_INPUT_6xx,
+ /* BookE bus */
+ PPC_FLAGS_INPUT_BookE,
+ /* PowerPC 405 bus */
+ PPC_FLAGS_INPUT_405,
+ /* PowerPC 970 bus */
+ PPC_FLAGS_INPUT_970,
+ /* PowerPC POWER7 bus */
+ PPC_FLAGS_INPUT_POWER7,
+ /* PowerPC POWER9 bus */
+ PPC_FLAGS_INPUT_POWER9,
+ /* PowerPC 401 bus */
+ PPC_FLAGS_INPUT_401,
+ /* Freescale RCPU bus */
+ PPC_FLAGS_INPUT_RCPU,
+};
+
+typedef struct PPCHash64Options PPCHash64Options;
+
+/**
+ * PowerPCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A PowerPC CPU model.
+ */
+struct PowerPCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ DeviceReset parent_reset;
+ void (*parent_parse_features)(const char *type, char *str, Error **errp);
+
+ uint32_t pvr;
+ bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr);
+ uint64_t pcr_mask; /* Available bits in PCR register */
+ uint64_t pcr_supported; /* Bits for supported PowerISA versions */
+ uint32_t svr;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+ uint64_t msr_mask;
+ uint64_t lpcr_mask; /* Available bits in the LPCR */
+ uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ uint32_t flags;
+ int bfd_mach;
+ uint32_t l1_dcache_size, l1_icache_size;
+#ifndef CONFIG_USER_ONLY
+ unsigned int gdb_num_sprs;
+ const char *gdb_spr_xml;
+#endif
+ const PPCHash64Options *hash64_opts;
+ struct ppc_radix_page_info *radix_page_info;
+ uint32_t lrg_decr_bits;
+ int n_host_threads;
+ void (*init_proc)(CPUPPCState *env);
+ int (*check_pow)(CPUPPCState *env);
+};
+
+#ifndef CONFIG_USER_ONLY
+typedef struct PPCTimebase {
+ uint64_t guest_timebase;
+ int64_t time_of_the_day_ns;
+ bool runstate_paused;
+} PPCTimebase;
+
+extern const VMStateDescription vmstate_ppc_timebase;
+
+#define VMSTATE_PPC_TIMEBASE_V(_field, _state, _version) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .size = sizeof(PPCTimebase), \
+ .vmsd = &vmstate_ppc_timebase, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, PPCTimebase), \
+}
+
+void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
+ RunState state);
+#endif
+
+#endif
diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c
new file mode 100644
index 000000000..f933d9f2b
--- /dev/null
+++ b/target/ppc/cpu.c
@@ -0,0 +1,126 @@
+/*
+ * PowerPC CPU routines for qemu.
+ *
+ * Copyright (c) 2017 Nikunj A Dadhania, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-models.h"
+#include "cpu-qom.h"
+#include "exec/log.h"
+#include "fpu/softfloat-helpers.h"
+#include "mmu-hash64.h"
+#include "helper_regs.h"
+#include "sysemu/tcg.h"
+
+target_ulong cpu_read_xer(const CPUPPCState *env)
+{
+ if (is_isa300(env)) {
+ return env->xer | (env->so << XER_SO) |
+ (env->ov << XER_OV) | (env->ca << XER_CA) |
+ (env->ov32 << XER_OV32) | (env->ca32 << XER_CA32);
+ }
+
+ return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) |
+ (env->ca << XER_CA);
+}
+
+void cpu_write_xer(CPUPPCState *env, target_ulong xer)
+{
+ env->so = (xer >> XER_SO) & 1;
+ env->ov = (xer >> XER_OV) & 1;
+ env->ca = (xer >> XER_CA) & 1;
+ /* write all the flags, while reading back check of isa300 */
+ env->ov32 = (xer >> XER_OV32) & 1;
+ env->ca32 = (xer >> XER_CA32) & 1;
+ env->xer = xer & ~((1ul << XER_SO) |
+ (1ul << XER_OV) | (1ul << XER_CA) |
+ (1ul << XER_OV32) | (1ul << XER_CA32));
+}
+
+void ppc_store_vscr(CPUPPCState *env, uint32_t vscr)
+{
+ env->vscr = vscr & ~(1u << VSCR_SAT);
+ /* Which bit we set is completely arbitrary, but clear the rest. */
+ env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
+ env->vscr_sat.u64[1] = 0;
+ set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
+}
+
+uint32_t ppc_get_vscr(CPUPPCState *env)
+{
+ uint32_t sat = (env->vscr_sat.u64[0] | env->vscr_sat.u64[1]) != 0;
+ return env->vscr | (sat << VSCR_SAT);
+}
+
+/* GDBstub can read and write MSR... */
+void ppc_store_msr(CPUPPCState *env, target_ulong value)
+{
+ hreg_store_msr(env, value, 0);
+}
+
+void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
+ /* The gtse bit affects hflags */
+ hreg_compute_hflags(env);
+}
+
+static inline void fpscr_set_rounding_mode(CPUPPCState *env)
+{
+ int rnd_type;
+
+ /* Set rounding mode */
+ switch (fpscr_rn) {
+ case 0:
+ /* Best approximation (round to nearest) */
+ rnd_type = float_round_nearest_even;
+ break;
+ case 1:
+ /* Smaller magnitude (round toward zero) */
+ rnd_type = float_round_to_zero;
+ break;
+ case 2:
+ /* Round toward +infinite */
+ rnd_type = float_round_up;
+ break;
+ default:
+ case 3:
+ /* Round toward -infinite */
+ rnd_type = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->fp_status);
+}
+
+void ppc_store_fpscr(CPUPPCState *env, target_ulong val)
+{
+ val &= ~(FP_VX | FP_FEX);
+ if (val & FPSCR_IX) {
+ val |= FP_VX;
+ }
+ if ((val >> FPSCR_XX) & (val >> FPSCR_XE) & 0x1f) {
+ val |= FP_FEX;
+ }
+ env->fpscr = val;
+ if (tcg_enabled()) {
+ fpscr_set_rounding_mode(env);
+ }
+}
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
new file mode 100644
index 000000000..e946da5f3
--- /dev/null
+++ b/target/ppc/cpu.h
@@ -0,0 +1,2689 @@
+/*
+ * PowerPC emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_CPU_H
+#define PPC_CPU_H
+
+#include "qemu/int128.h"
+#include "exec/cpu-defs.h"
+#include "cpu-qom.h"
+#include "qom/object.h"
+
+#define TCG_GUEST_DEFAULT_MO 0
+
+#define TARGET_PAGE_BITS_64K 16
+#define TARGET_PAGE_BITS_16M 24
+
+#if defined(TARGET_PPC64)
+#define PPC_ELF_MACHINE EM_PPC64
+#else
+#define PPC_ELF_MACHINE EM_PPC
+#endif
+
+#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
+#define PPC_BIT32(bit) (0x80000000 >> (bit))
+#define PPC_BIT8(bit) (0x80 >> (bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \
+ PPC_BIT32(bs))
+#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs))
+
+/*****************************************************************************/
+/* Exception vectors definitions */
+enum {
+ POWERPC_EXCP_NONE = -1,
+ /* The 64 first entries are used by the PowerPC embedded specification */
+ POWERPC_EXCP_CRITICAL = 0, /* Critical input */
+ POWERPC_EXCP_MCHECK = 1, /* Machine check exception */
+ POWERPC_EXCP_DSI = 2, /* Data storage exception */
+ POWERPC_EXCP_ISI = 3, /* Instruction storage exception */
+ POWERPC_EXCP_EXTERNAL = 4, /* External input */
+ POWERPC_EXCP_ALIGN = 5, /* Alignment exception */
+ POWERPC_EXCP_PROGRAM = 6, /* Program exception */
+ POWERPC_EXCP_FPU = 7, /* Floating-point unavailable exception */
+ POWERPC_EXCP_SYSCALL = 8, /* System call exception */
+ POWERPC_EXCP_APU = 9, /* Auxiliary processor unavailable */
+ POWERPC_EXCP_DECR = 10, /* Decrementer exception */
+ POWERPC_EXCP_FIT = 11, /* Fixed-interval timer interrupt */
+ POWERPC_EXCP_WDT = 12, /* Watchdog timer interrupt */
+ POWERPC_EXCP_DTLB = 13, /* Data TLB miss */
+ POWERPC_EXCP_ITLB = 14, /* Instruction TLB miss */
+ POWERPC_EXCP_DEBUG = 15, /* Debug interrupt */
+ /* Vectors 16 to 31 are reserved */
+ POWERPC_EXCP_SPEU = 32, /* SPE/embedded floating-point unavailable */
+ POWERPC_EXCP_EFPDI = 33, /* Embedded floating-point data interrupt */
+ POWERPC_EXCP_EFPRI = 34, /* Embedded floating-point round interrupt */
+ POWERPC_EXCP_EPERFM = 35, /* Embedded performance monitor interrupt */
+ POWERPC_EXCP_DOORI = 36, /* Embedded doorbell interrupt */
+ POWERPC_EXCP_DOORCI = 37, /* Embedded doorbell critical interrupt */
+ POWERPC_EXCP_GDOORI = 38, /* Embedded guest doorbell interrupt */
+ POWERPC_EXCP_GDOORCI = 39, /* Embedded guest doorbell critical interrupt*/
+ POWERPC_EXCP_HYPPRIV = 41, /* Embedded hypervisor priv instruction */
+ /* Vectors 42 to 63 are reserved */
+ /* Exceptions defined in the PowerPC server specification */
+ POWERPC_EXCP_RESET = 64, /* System reset exception */
+ POWERPC_EXCP_DSEG = 65, /* Data segment exception */
+ POWERPC_EXCP_ISEG = 66, /* Instruction segment exception */
+ POWERPC_EXCP_HDECR = 67, /* Hypervisor decrementer exception */
+ POWERPC_EXCP_TRACE = 68, /* Trace exception */
+ POWERPC_EXCP_HDSI = 69, /* Hypervisor data storage exception */
+ POWERPC_EXCP_HISI = 70, /* Hypervisor instruction storage exception */
+ POWERPC_EXCP_HDSEG = 71, /* Hypervisor data segment exception */
+ POWERPC_EXCP_HISEG = 72, /* Hypervisor instruction segment exception */
+ POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */
+ /* 40x specific exceptions */
+ POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */
+ /* 601 specific exceptions */
+ POWERPC_EXCP_IO = 75, /* IO error exception */
+ POWERPC_EXCP_RUNM = 76, /* Run mode exception */
+ /* 602 specific exceptions */
+ POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */
+ /* 602/603 specific exceptions */
+ POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */
+ POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */
+ POWERPC_EXCP_DSTLB = 80, /* Data store TLB miss */
+ /* Exceptions available on most PowerPC */
+ POWERPC_EXCP_FPA = 81, /* Floating-point assist exception */
+ POWERPC_EXCP_DABR = 82, /* Data address breakpoint */
+ POWERPC_EXCP_IABR = 83, /* Instruction address breakpoint */
+ POWERPC_EXCP_SMI = 84, /* System management interrupt */
+ POWERPC_EXCP_PERFM = 85, /* Embedded performance monitor interrupt */
+ /* 7xx/74xx specific exceptions */
+ POWERPC_EXCP_THERM = 86, /* Thermal interrupt */
+ /* 74xx specific exceptions */
+ POWERPC_EXCP_VPUA = 87, /* Vector assist exception */
+ /* 970FX specific exceptions */
+ POWERPC_EXCP_SOFTP = 88, /* Soft patch exception */
+ POWERPC_EXCP_MAINT = 89, /* Maintenance exception */
+ /* Freescale embedded cores specific exceptions */
+ POWERPC_EXCP_MEXTBR = 90, /* Maskable external breakpoint */
+ POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */
+ POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */
+ POWERPC_EXCP_DTLBE = 93, /* Data TLB error */
+ /* VSX Unavailable (Power ISA 2.06 and later) */
+ POWERPC_EXCP_VSXU = 94, /* VSX Unavailable */
+ POWERPC_EXCP_FU = 95, /* Facility Unavailable */
+ /* Additional ISA 2.06 and later server exceptions */
+ POWERPC_EXCP_HV_EMU = 96, /* HV emulation assistance */
+ POWERPC_EXCP_HV_MAINT = 97, /* HMI */
+ POWERPC_EXCP_HV_FU = 98, /* Hypervisor Facility unavailable */
+ /* Server doorbell variants */
+ POWERPC_EXCP_SDOOR = 99,
+ POWERPC_EXCP_SDOOR_HV = 100,
+ /* ISA 3.00 additions */
+ POWERPC_EXCP_HVIRT = 101,
+ POWERPC_EXCP_SYSCALL_VECTORED = 102, /* scv exception */
+ /* EOL */
+ POWERPC_EXCP_NB = 103,
+ /* QEMU exceptions: special cases we want to stop translation */
+ POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
+};
+
+/* Exceptions error codes */
+enum {
+ /* Exception subtypes for POWERPC_EXCP_ALIGN */
+ POWERPC_EXCP_ALIGN_FP = 0x01, /* FP alignment exception */
+ POWERPC_EXCP_ALIGN_LST = 0x02, /* Unaligned mult/extern load/store */
+ POWERPC_EXCP_ALIGN_LE = 0x03, /* Multiple little-endian access */
+ POWERPC_EXCP_ALIGN_PROT = 0x04, /* Access cross protection boundary */
+ POWERPC_EXCP_ALIGN_BAT = 0x05, /* Access cross a BAT/seg boundary */
+ POWERPC_EXCP_ALIGN_CACHE = 0x06, /* Impossible dcbz access */
+ POWERPC_EXCP_ALIGN_INSN = 0x07, /* Pref. insn x-ing 64-byte boundary */
+ /* Exception subtypes for POWERPC_EXCP_PROGRAM */
+ /* FP exceptions */
+ POWERPC_EXCP_FP = 0x10,
+ POWERPC_EXCP_FP_OX = 0x01, /* FP overflow */
+ POWERPC_EXCP_FP_UX = 0x02, /* FP underflow */
+ POWERPC_EXCP_FP_ZX = 0x03, /* FP divide by zero */
+ POWERPC_EXCP_FP_XX = 0x04, /* FP inexact */
+ POWERPC_EXCP_FP_VXSNAN = 0x05, /* FP invalid SNaN op */
+ POWERPC_EXCP_FP_VXISI = 0x06, /* FP invalid infinite subtraction */
+ POWERPC_EXCP_FP_VXIDI = 0x07, /* FP invalid infinite divide */
+ POWERPC_EXCP_FP_VXZDZ = 0x08, /* FP invalid zero divide */
+ POWERPC_EXCP_FP_VXIMZ = 0x09, /* FP invalid infinite * zero */
+ POWERPC_EXCP_FP_VXVC = 0x0A, /* FP invalid compare */
+ POWERPC_EXCP_FP_VXSOFT = 0x0B, /* FP invalid operation */
+ POWERPC_EXCP_FP_VXSQRT = 0x0C, /* FP invalid square root */
+ POWERPC_EXCP_FP_VXCVI = 0x0D, /* FP invalid integer conversion */
+ /* Invalid instruction */
+ POWERPC_EXCP_INVAL = 0x20,
+ POWERPC_EXCP_INVAL_INVAL = 0x01, /* Invalid instruction */
+ POWERPC_EXCP_INVAL_LSWX = 0x02, /* Invalid lswx instruction */
+ POWERPC_EXCP_INVAL_SPR = 0x03, /* Invalid SPR access */
+ POWERPC_EXCP_INVAL_FP = 0x04, /* Unimplemented mandatory fp instr */
+ /* Privileged instruction */
+ POWERPC_EXCP_PRIV = 0x30,
+ POWERPC_EXCP_PRIV_OPC = 0x01, /* Privileged operation exception */
+ POWERPC_EXCP_PRIV_REG = 0x02, /* Privileged register exception */
+ /* Trap */
+ POWERPC_EXCP_TRAP = 0x40,
+};
+
+#define PPC_INPUT(env) ((env)->bus_model)
+
+/*****************************************************************************/
+typedef struct opc_handler_t opc_handler_t;
+
+/*****************************************************************************/
+/* Types used to describe some PowerPC registers etc. */
+typedef struct DisasContext DisasContext;
+typedef struct ppc_spr_t ppc_spr_t;
+typedef union ppc_tlb_t ppc_tlb_t;
+typedef struct ppc_hash_pte64 ppc_hash_pte64_t;
+
+/* SPR access micro-ops generations callbacks */
+struct ppc_spr_t {
+ const char *name;
+ target_ulong default_value;
+#ifndef CONFIG_USER_ONLY
+ unsigned int gdb_id;
+#endif
+#ifdef CONFIG_TCG
+ void (*uea_read)(DisasContext *ctx, int gpr_num, int spr_num);
+ void (*uea_write)(DisasContext *ctx, int spr_num, int gpr_num);
+# ifndef CONFIG_USER_ONLY
+ void (*oea_read)(DisasContext *ctx, int gpr_num, int spr_num);
+ void (*oea_write)(DisasContext *ctx, int spr_num, int gpr_num);
+ void (*hea_read)(DisasContext *ctx, int gpr_num, int spr_num);
+ void (*hea_write)(DisasContext *ctx, int spr_num, int gpr_num);
+# endif
+#endif
+#ifdef CONFIG_KVM
+ /*
+ * We (ab)use the fact that all the SPRs will have ids for the
+ * ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
+ * don't sync this
+ */
+ uint64_t one_reg_id;
+#endif
+};
+
+/* VSX/Altivec registers (128 bits) */
+typedef union _ppc_vsr_t {
+ uint8_t u8[16];
+ uint16_t u16[8];
+ uint32_t u32[4];
+ uint64_t u64[2];
+ int8_t s8[16];
+ int16_t s16[8];
+ int32_t s32[4];
+ int64_t s64[2];
+ float32 f32[4];
+ float64 f64[2];
+ float128 f128;
+#ifdef CONFIG_INT128
+ __uint128_t u128;
+#endif
+ Int128 s128;
+} ppc_vsr_t;
+
+typedef ppc_vsr_t ppc_avr_t;
+typedef ppc_vsr_t ppc_fprp_t;
+
+#if !defined(CONFIG_USER_ONLY)
+/* Software TLB cache */
+typedef struct ppc6xx_tlb_t ppc6xx_tlb_t;
+struct ppc6xx_tlb_t {
+ target_ulong pte0;
+ target_ulong pte1;
+ target_ulong EPN;
+};
+
+typedef struct ppcemb_tlb_t ppcemb_tlb_t;
+struct ppcemb_tlb_t {
+ uint64_t RPN;
+ target_ulong EPN;
+ target_ulong PID;
+ target_ulong size;
+ uint32_t prot;
+ uint32_t attr; /* Storage attributes */
+};
+
+typedef struct ppcmas_tlb_t {
+ uint32_t mas8;
+ uint32_t mas1;
+ uint64_t mas2;
+ uint64_t mas7_3;
+} ppcmas_tlb_t;
+
+union ppc_tlb_t {
+ ppc6xx_tlb_t *tlb6;
+ ppcemb_tlb_t *tlbe;
+ ppcmas_tlb_t *tlbm;
+};
+
+/* possible TLB variants */
+#define TLB_NONE 0
+#define TLB_6XX 1
+#define TLB_EMB 2
+#define TLB_MAS 3
+#endif
+
+typedef struct PPCHash64SegmentPageSizes PPCHash64SegmentPageSizes;
+
+typedef struct ppc_slb_t ppc_slb_t;
+struct ppc_slb_t {
+ uint64_t esid;
+ uint64_t vsid;
+ const PPCHash64SegmentPageSizes *sps;
+};
+
+#define MAX_SLB_ENTRIES 64
+#define SEGMENT_SHIFT_256M 28
+#define SEGMENT_MASK_256M (~((1ULL << SEGMENT_SHIFT_256M) - 1))
+
+#define SEGMENT_SHIFT_1T 40
+#define SEGMENT_MASK_1T (~((1ULL << SEGMENT_SHIFT_1T) - 1))
+
+typedef struct ppc_v3_pate_t {
+ uint64_t dw0;
+ uint64_t dw1;
+} ppc_v3_pate_t;
+
+/*****************************************************************************/
+/* Machine state register bits definition */
+#define MSR_SF 63 /* Sixty-four-bit mode hflags */
+#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */
+#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */
+#define MSR_HV 60 /* hypervisor state hflags */
+#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */
+#define MSR_TS1 33
+#define MSR_TM 32 /* Transactional Memory Available (Book3s) */
+#define MSR_CM 31 /* Computation mode for BookE hflags */
+#define MSR_ICM 30 /* Interrupt computation mode for BookE */
+#define MSR_GS 28 /* guest state for BookE */
+#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
+#define MSR_VR 25 /* altivec available x hflags */
+#define MSR_SPE 25 /* SPE enable for BookE x hflags */
+#define MSR_AP 23 /* Access privilege state on 602 hflags */
+#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */
+#define MSR_SA 22 /* Supervisor access mode on 602 hflags */
+#define MSR_S 22 /* Secure state */
+#define MSR_KEY 19 /* key bit on 603e */
+#define MSR_POW 18 /* Power management */
+#define MSR_TGPR 17 /* TGPR usage on 602/603 x */
+#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */
+#define MSR_ILE 16 /* Interrupt little-endian mode */
+#define MSR_EE 15 /* External interrupt enable */
+#define MSR_PR 14 /* Problem state hflags */
+#define MSR_FP 13 /* Floating point available hflags */
+#define MSR_ME 12 /* Machine check interrupt enable */
+#define MSR_FE0 11 /* Floating point exception mode 0 */
+#define MSR_SE 10 /* Single-step trace enable x hflags */
+#define MSR_DWE 10 /* Debug wait enable on 405 x */
+#define MSR_UBLE 10 /* User BTB lock enable on e500 x */
+#define MSR_BE 9 /* Branch trace enable x hflags */
+#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */
+#define MSR_FE1 8 /* Floating point exception mode 1 */
+#define MSR_AL 7 /* AL bit on POWER */
+#define MSR_EP 6 /* Exception prefix on 601 */
+#define MSR_IR 5 /* Instruction relocate */
+#define MSR_DR 4 /* Data relocate */
+#define MSR_IS 5 /* Instruction address space (BookE) */
+#define MSR_DS 4 /* Data address space (BookE) */
+#define MSR_PE 3 /* Protection enable on 403 */
+#define MSR_PX 2 /* Protection exclusive on 403 x */
+#define MSR_PMM 2 /* Performance monitor mark on POWER x */
+#define MSR_RI 1 /* Recoverable interrupt 1 */
+#define MSR_LE 0 /* Little-endian mode 1 hflags */
+
+/* PMU bits */
+#define MMCR0_FC PPC_BIT(32) /* Freeze Counters */
+#define MMCR0_PMAO PPC_BIT(56) /* Perf Monitor Alert Ocurred */
+#define MMCR0_PMAE PPC_BIT(37) /* Perf Monitor Alert Enable */
+#define MMCR0_EBE PPC_BIT(43) /* Perf Monitor EBB Enable */
+#define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */
+#define MMCR0_PMCC0 PPC_BIT(44) /* PMC Control bit 0 */
+#define MMCR0_PMCC1 PPC_BIT(45) /* PMC Control bit 1 */
+/* MMCR0 userspace r/w mask */
+#define MMCR0_UREG_MASK (MMCR0_FC | MMCR0_PMAO | MMCR0_PMAE)
+/* MMCR2 userspace r/w mask */
+#define MMCR2_FC1P0 PPC_BIT(1) /* MMCR2 FCnP0 for PMC1 */
+#define MMCR2_FC2P0 PPC_BIT(10) /* MMCR2 FCnP0 for PMC2 */
+#define MMCR2_FC3P0 PPC_BIT(19) /* MMCR2 FCnP0 for PMC3 */
+#define MMCR2_FC4P0 PPC_BIT(28) /* MMCR2 FCnP0 for PMC4 */
+#define MMCR2_FC5P0 PPC_BIT(37) /* MMCR2 FCnP0 for PMC5 */
+#define MMCR2_FC6P0 PPC_BIT(46) /* MMCR2 FCnP0 for PMC6 */
+#define MMCR2_UREG_MASK (MMCR2_FC1P0 | MMCR2_FC2P0 | MMCR2_FC3P0 | \
+ MMCR2_FC4P0 | MMCR2_FC5P0 | MMCR2_FC6P0)
+
+/* LPCR bits */
+#define LPCR_VPM0 PPC_BIT(0)
+#define LPCR_VPM1 PPC_BIT(1)
+#define LPCR_ISL PPC_BIT(2)
+#define LPCR_KBV PPC_BIT(3)
+#define LPCR_DPFD_SHIFT (63 - 11)
+#define LPCR_DPFD (0x7ull << LPCR_DPFD_SHIFT)
+#define LPCR_VRMASD_SHIFT (63 - 16)
+#define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT)
+/* P9: Power-saving mode Exit Cause Enable (Upper Section) Mask */
+#define LPCR_PECE_U_SHIFT (63 - 19)
+#define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT)
+#define LPCR_HVEE PPC_BIT(17) /* Hypervisor Virt Exit Enable */
+#define LPCR_RMLS_SHIFT (63 - 37) /* RMLS (removed in ISA v3.0) */
+#define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT)
+#define LPCR_HAIL PPC_BIT(37) /* ISA v3.1 HV AIL=3 equivalent */
+#define LPCR_ILE PPC_BIT(38)
+#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
+#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
+#define LPCR_UPRT PPC_BIT(41) /* Use Process Table */
+#define LPCR_EVIRT PPC_BIT(42) /* Enhanced Virtualisation */
+#define LPCR_HR PPC_BIT(43) /* Host Radix */
+#define LPCR_ONL PPC_BIT(45)
+#define LPCR_LD PPC_BIT(46) /* Large Decrementer */
+#define LPCR_P7_PECE0 PPC_BIT(49)
+#define LPCR_P7_PECE1 PPC_BIT(50)
+#define LPCR_P7_PECE2 PPC_BIT(51)
+#define LPCR_P8_PECE0 PPC_BIT(47)
+#define LPCR_P8_PECE1 PPC_BIT(48)
+#define LPCR_P8_PECE2 PPC_BIT(49)
+#define LPCR_P8_PECE3 PPC_BIT(50)
+#define LPCR_P8_PECE4 PPC_BIT(51)
+/* P9: Power-saving mode Exit Cause Enable (Lower Section) Mask */
+#define LPCR_PECE_L_SHIFT (63 - 51)
+#define LPCR_PECE_L_MASK (0x1full << LPCR_PECE_L_SHIFT)
+#define LPCR_PDEE PPC_BIT(47) /* Privileged Doorbell Exit EN */
+#define LPCR_HDEE PPC_BIT(48) /* Hyperv Doorbell Exit Enable */
+#define LPCR_EEE PPC_BIT(49) /* External Exit Enable */
+#define LPCR_DEE PPC_BIT(50) /* Decrementer Exit Enable */
+#define LPCR_OEE PPC_BIT(51) /* Other Exit Enable */
+#define LPCR_MER PPC_BIT(52)
+#define LPCR_GTSE PPC_BIT(53) /* Guest Translation Shootdown */
+#define LPCR_TC PPC_BIT(54)
+#define LPCR_HEIC PPC_BIT(59) /* HV Extern Interrupt Control */
+#define LPCR_LPES0 PPC_BIT(60)
+#define LPCR_LPES1 PPC_BIT(61)
+#define LPCR_RMI PPC_BIT(62)
+#define LPCR_HVICE PPC_BIT(62) /* HV Virtualisation Int Enable */
+#define LPCR_HDICE PPC_BIT(63)
+
+/* PSSCR bits */
+#define PSSCR_ESL PPC_BIT(42) /* Enable State Loss */
+#define PSSCR_EC PPC_BIT(43) /* Exit Criterion */
+
+/* HFSCR bits */
+#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */
+#define HFSCR_IC_MSGP 0xA
+
+#define msr_sf ((env->msr >> MSR_SF) & 1)
+#define msr_isf ((env->msr >> MSR_ISF) & 1)
+#if defined(TARGET_PPC64)
+#define msr_hv ((env->msr >> MSR_HV) & 1)
+#else
+#define msr_hv (0)
+#endif
+#define msr_cm ((env->msr >> MSR_CM) & 1)
+#define msr_icm ((env->msr >> MSR_ICM) & 1)
+#define msr_gs ((env->msr >> MSR_GS) & 1)
+#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
+#define msr_vr ((env->msr >> MSR_VR) & 1)
+#define msr_spe ((env->msr >> MSR_SPE) & 1)
+#define msr_ap ((env->msr >> MSR_AP) & 1)
+#define msr_vsx ((env->msr >> MSR_VSX) & 1)
+#define msr_sa ((env->msr >> MSR_SA) & 1)
+#define msr_key ((env->msr >> MSR_KEY) & 1)
+#define msr_pow ((env->msr >> MSR_POW) & 1)
+#define msr_tgpr ((env->msr >> MSR_TGPR) & 1)
+#define msr_ce ((env->msr >> MSR_CE) & 1)
+#define msr_ile ((env->msr >> MSR_ILE) & 1)
+#define msr_ee ((env->msr >> MSR_EE) & 1)
+#define msr_pr ((env->msr >> MSR_PR) & 1)
+#define msr_fp ((env->msr >> MSR_FP) & 1)
+#define msr_me ((env->msr >> MSR_ME) & 1)
+#define msr_fe0 ((env->msr >> MSR_FE0) & 1)
+#define msr_se ((env->msr >> MSR_SE) & 1)
+#define msr_dwe ((env->msr >> MSR_DWE) & 1)
+#define msr_uble ((env->msr >> MSR_UBLE) & 1)
+#define msr_be ((env->msr >> MSR_BE) & 1)
+#define msr_de ((env->msr >> MSR_DE) & 1)
+#define msr_fe1 ((env->msr >> MSR_FE1) & 1)
+#define msr_al ((env->msr >> MSR_AL) & 1)
+#define msr_ep ((env->msr >> MSR_EP) & 1)
+#define msr_ir ((env->msr >> MSR_IR) & 1)
+#define msr_dr ((env->msr >> MSR_DR) & 1)
+#define msr_is ((env->msr >> MSR_IS) & 1)
+#define msr_ds ((env->msr >> MSR_DS) & 1)
+#define msr_pe ((env->msr >> MSR_PE) & 1)
+#define msr_px ((env->msr >> MSR_PX) & 1)
+#define msr_pmm ((env->msr >> MSR_PMM) & 1)
+#define msr_ri ((env->msr >> MSR_RI) & 1)
+#define msr_le ((env->msr >> MSR_LE) & 1)
+#define msr_ts ((env->msr >> MSR_TS1) & 3)
+#define msr_tm ((env->msr >> MSR_TM) & 1)
+
+#define DBCR0_ICMP (1 << 27)
+#define DBCR0_BRT (1 << 26)
+#define DBSR_ICMP (1 << 27)
+#define DBSR_BRT (1 << 26)
+
+/* Hypervisor bit is more specific */
+#if defined(TARGET_PPC64)
+#define MSR_HVB (1ULL << MSR_HV)
+#else
+#define MSR_HVB (0ULL)
+#endif
+
+/* DSISR */
+#define DSISR_NOPTE 0x40000000
+/* Not permitted by access authority of encoded access authority */
+#define DSISR_PROTFAULT 0x08000000
+#define DSISR_ISSTORE 0x02000000
+/* Not permitted by virtual page class key protection */
+#define DSISR_AMR 0x00200000
+/* Unsupported Radix Tree Configuration */
+#define DSISR_R_BADCONFIG 0x00080000
+#define DSISR_ATOMIC_RC 0x00040000
+/* Unable to translate address of (guest) pde or process/page table entry */
+#define DSISR_PRTABLE_FAULT 0x00020000
+
+/* SRR1 error code fields */
+
+#define SRR1_NOPTE DSISR_NOPTE
+/* Not permitted due to no-execute or guard bit set */
+#define SRR1_NOEXEC_GUARD 0x10000000
+#define SRR1_PROTFAULT DSISR_PROTFAULT
+#define SRR1_IAMR DSISR_AMR
+
+/* SRR1[42:45] wakeup fields for System Reset Interrupt */
+
+#define SRR1_WAKEMASK 0x003c0000 /* reason for wakeup */
+
+#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
+#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virt. Interrupt (P9) */
+#define SRR1_WAKEEE 0x00200000 /* External interrupt */
+#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell */
+#define SRR1_WAKERESET 0x00100000 /* System reset */
+#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell */
+#define SRR1_WAKESCOM 0x00080000 /* SCOM not in power-saving mode */
+
+/* SRR1[46:47] power-saving exit mode */
+
+#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask */
+
+#define SRR1_WS_HVLOSS 0x00030000 /* HV resources not maintained */
+#define SRR1_WS_GPRLOSS 0x00020000 /* GPRs not maintained */
+#define SRR1_WS_NOLOSS 0x00010000 /* All resources maintained */
+
+/* Facility Status and Control (FSCR) bits */
+#define FSCR_EBB (63 - 56) /* Event-Based Branch Facility */
+#define FSCR_TAR (63 - 55) /* Target Address Register */
+#define FSCR_SCV (63 - 51) /* System call vectored */
+/* Interrupt cause mask and position in FSCR. HFSCR has the same format */
+#define FSCR_IC_MASK (0xFFULL)
+#define FSCR_IC_POS (63 - 7)
+#define FSCR_IC_DSCR_SPR3 2
+#define FSCR_IC_PMU 3
+#define FSCR_IC_BHRB 4
+#define FSCR_IC_TM 5
+#define FSCR_IC_EBB 7
+#define FSCR_IC_TAR 8
+#define FSCR_IC_SCV 12
+
+/* Exception state register bits definition */
+#define ESR_PIL PPC_BIT(36) /* Illegal Instruction */
+#define ESR_PPR PPC_BIT(37) /* Privileged Instruction */
+#define ESR_PTR PPC_BIT(38) /* Trap */
+#define ESR_FP PPC_BIT(39) /* Floating-Point Operation */
+#define ESR_ST PPC_BIT(40) /* Store Operation */
+#define ESR_AP PPC_BIT(44) /* Auxiliary Processor Operation */
+#define ESR_PUO PPC_BIT(45) /* Unimplemented Operation */
+#define ESR_BO PPC_BIT(46) /* Byte Ordering */
+#define ESR_PIE PPC_BIT(47) /* Imprecise exception */
+#define ESR_DATA PPC_BIT(53) /* Data Access (Embedded page table) */
+#define ESR_TLBI PPC_BIT(54) /* TLB Ineligible (Embedded page table) */
+#define ESR_PT PPC_BIT(55) /* Page Table (Embedded page table) */
+#define ESR_SPV PPC_BIT(56) /* SPE/VMX operation */
+#define ESR_EPID PPC_BIT(57) /* External Process ID operation */
+#define ESR_VLEMI PPC_BIT(58) /* VLE operation */
+#define ESR_MIF PPC_BIT(62) /* Misaligned instruction (VLE) */
+
+/* Transaction EXception And Summary Register bits */
+#define TEXASR_FAILURE_PERSISTENT (63 - 7)
+#define TEXASR_DISALLOWED (63 - 8)
+#define TEXASR_NESTING_OVERFLOW (63 - 9)
+#define TEXASR_FOOTPRINT_OVERFLOW (63 - 10)
+#define TEXASR_SELF_INDUCED_CONFLICT (63 - 11)
+#define TEXASR_NON_TRANSACTIONAL_CONFLICT (63 - 12)
+#define TEXASR_TRANSACTION_CONFLICT (63 - 13)
+#define TEXASR_TRANSLATION_INVALIDATION_CONFLICT (63 - 14)
+#define TEXASR_IMPLEMENTATION_SPECIFIC (63 - 15)
+#define TEXASR_INSTRUCTION_FETCH_CONFLICT (63 - 16)
+#define TEXASR_ABORT (63 - 31)
+#define TEXASR_SUSPENDED (63 - 32)
+#define TEXASR_PRIVILEGE_HV (63 - 34)
+#define TEXASR_PRIVILEGE_PR (63 - 35)
+#define TEXASR_FAILURE_SUMMARY (63 - 36)
+#define TEXASR_TFIAR_EXACT (63 - 37)
+#define TEXASR_ROT (63 - 38)
+#define TEXASR_TRANSACTION_LEVEL (63 - 52) /* 12 bits */
+
+enum {
+ POWERPC_FLAG_NONE = 0x00000000,
+ /* Flag for MSR bit 25 signification (VRE/SPE) */
+ POWERPC_FLAG_SPE = 0x00000001,
+ POWERPC_FLAG_VRE = 0x00000002,
+ /* Flag for MSR bit 17 signification (TGPR/CE) */
+ POWERPC_FLAG_TGPR = 0x00000004,
+ POWERPC_FLAG_CE = 0x00000008,
+ /* Flag for MSR bit 10 signification (SE/DWE/UBLE) */
+ POWERPC_FLAG_SE = 0x00000010,
+ POWERPC_FLAG_DWE = 0x00000020,
+ POWERPC_FLAG_UBLE = 0x00000040,
+ /* Flag for MSR bit 9 signification (BE/DE) */
+ POWERPC_FLAG_BE = 0x00000080,
+ POWERPC_FLAG_DE = 0x00000100,
+ /* Flag for MSR bit 2 signification (PX/PMM) */
+ POWERPC_FLAG_PX = 0x00000200,
+ POWERPC_FLAG_PMM = 0x00000400,
+ /* Flag for special features */
+ /* Decrementer clock: RTC clock (POWER, 601) or bus clock */
+ POWERPC_FLAG_RTC_CLK = 0x00010000,
+ POWERPC_FLAG_BUS_CLK = 0x00020000,
+ /* Has CFAR */
+ POWERPC_FLAG_CFAR = 0x00040000,
+ /* Has VSX */
+ POWERPC_FLAG_VSX = 0x00080000,
+ /* Has Transaction Memory (ISA 2.07) */
+ POWERPC_FLAG_TM = 0x00100000,
+ /* Has SCV (ISA 3.00) */
+ POWERPC_FLAG_SCV = 0x00200000,
+ /* Has HID0 for LE bit (601) */
+ POWERPC_FLAG_HID0_LE = 0x00400000,
+};
+
+/*
+ * Bits for env->hflags.
+ *
+ * Most of these bits overlap with corresponding bits in MSR,
+ * but some come from other sources. Those that do come from
+ * the MSR are validated in hreg_compute_hflags.
+ */
+enum {
+ HFLAGS_LE = 0, /* MSR_LE -- comes from elsewhere on 601 */
+ HFLAGS_HV = 1, /* computed from MSR_HV and other state */
+ HFLAGS_64 = 2, /* computed from MSR_CE and MSR_SF */
+ HFLAGS_GTSE = 3, /* computed from SPR_LPCR[GTSE] */
+ HFLAGS_DR = 4, /* MSR_DR */
+ HFLAGS_HR = 5, /* computed from SPR_LPCR[HR] */
+ HFLAGS_SPE = 6, /* from MSR_SPE if cpu has SPE; avoid overlap w/ MSR_VR */
+ HFLAGS_TM = 8, /* computed from MSR_TM */
+ HFLAGS_BE = 9, /* MSR_BE -- from elsewhere on embedded ppc */
+ HFLAGS_SE = 10, /* MSR_SE -- from elsewhere on embedded ppc */
+ HFLAGS_FP = 13, /* MSR_FP */
+ HFLAGS_PR = 14, /* MSR_PR */
+ HFLAGS_PMCC0 = 15, /* MMCR0 PMCC bit 0 */
+ HFLAGS_PMCC1 = 16, /* MMCR0 PMCC bit 1 */
+ HFLAGS_VSX = 23, /* MSR_VSX if cpu has VSX */
+ HFLAGS_VR = 25, /* MSR_VR if cpu has VRE */
+
+ HFLAGS_IMMU_IDX = 26, /* 26..28 -- the composite immu_idx */
+ HFLAGS_DMMU_IDX = 29, /* 29..31 -- the composite dmmu_idx */
+};
+
+/*****************************************************************************/
+/* Floating point status and control register */
+#define FPSCR_DRN2 34 /* Decimal Floating-Point rounding control */
+#define FPSCR_DRN1 33 /* Decimal Floating-Point rounding control */
+#define FPSCR_DRN0 32 /* Decimal Floating-Point rounding control */
+#define FPSCR_FX 31 /* Floating-point exception summary */
+#define FPSCR_FEX 30 /* Floating-point enabled exception summary */
+#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */
+#define FPSCR_OX 28 /* Floating-point overflow exception */
+#define FPSCR_UX 27 /* Floating-point underflow exception */
+#define FPSCR_ZX 26 /* Floating-point zero divide exception */
+#define FPSCR_XX 25 /* Floating-point inexact exception */
+#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */
+#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */
+#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */
+#define FPSCR_FR 18 /* Floating-point fraction rounded */
+#define FPSCR_FI 17 /* Floating-point fraction inexact */
+#define FPSCR_C 16 /* Floating-point result class descriptor */
+#define FPSCR_FL 15 /* Floating-point less than or negative */
+#define FPSCR_FG 14 /* Floating-point greater than or negative */
+#define FPSCR_FE 13 /* Floating-point equal or zero */
+#define FPSCR_FU 12 /* Floating-point unordered or NaN */
+#define FPSCR_FPCC 12 /* Floating-point condition code */
+#define FPSCR_FPRF 12 /* Floating-point result flags */
+#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */
+#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */
+#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */
+#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */
+#define FPSCR_OE 6 /* Floating-point overflow exception enable */
+#define FPSCR_UE 5 /* Floating-point underflow exception enable */
+#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */
+#define FPSCR_XE 3 /* Floating-point inexact exception enable */
+#define FPSCR_NI 2 /* Floating-point non-IEEE mode */
+#define FPSCR_RN1 1
+#define FPSCR_RN0 0 /* Floating-point rounding control */
+#define fpscr_drn (((env->fpscr) & FP_DRN) >> FPSCR_DRN0)
+#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1)
+#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1)
+#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1)
+#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1)
+#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1)
+#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1)
+#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1)
+#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1)
+#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1)
+#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1)
+#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1)
+#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1)
+#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF)
+#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1)
+#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1)
+#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1)
+#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1)
+#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1)
+#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1)
+#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1)
+#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1)
+#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1)
+#define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3)
+/* Invalid operation exception summary */
+#define FPSCR_IX ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \
+ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \
+ (1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \
+ (1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \
+ (1 << FPSCR_VXCVI))
+/* exception summary */
+#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F)
+/* enabled exception summary */
+#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
+ 0x1F)
+
+#define FP_DRN2 (1ull << FPSCR_DRN2)
+#define FP_DRN1 (1ull << FPSCR_DRN1)
+#define FP_DRN0 (1ull << FPSCR_DRN0)
+#define FP_DRN (FP_DRN2 | FP_DRN1 | FP_DRN0)
+#define FP_FX (1ull << FPSCR_FX)
+#define FP_FEX (1ull << FPSCR_FEX)
+#define FP_VX (1ull << FPSCR_VX)
+#define FP_OX (1ull << FPSCR_OX)
+#define FP_UX (1ull << FPSCR_UX)
+#define FP_ZX (1ull << FPSCR_ZX)
+#define FP_XX (1ull << FPSCR_XX)
+#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
+#define FP_VXISI (1ull << FPSCR_VXISI)
+#define FP_VXIDI (1ull << FPSCR_VXIDI)
+#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
+#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
+#define FP_VXVC (1ull << FPSCR_VXVC)
+#define FP_FR (1ull << FPSCR_FR)
+#define FP_FI (1ull << FPSCR_FI)
+#define FP_C (1ull << FPSCR_C)
+#define FP_FL (1ull << FPSCR_FL)
+#define FP_FG (1ull << FPSCR_FG)
+#define FP_FE (1ull << FPSCR_FE)
+#define FP_FU (1ull << FPSCR_FU)
+#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_FPRF (FP_C | FP_FPCC)
+#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
+#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
+#define FP_VXCVI (1ull << FPSCR_VXCVI)
+#define FP_VE (1ull << FPSCR_VE)
+#define FP_OE (1ull << FPSCR_OE)
+#define FP_UE (1ull << FPSCR_UE)
+#define FP_ZE (1ull << FPSCR_ZE)
+#define FP_XE (1ull << FPSCR_XE)
+#define FP_NI (1ull << FPSCR_NI)
+#define FP_RN1 (1ull << FPSCR_RN1)
+#define FP_RN0 (1ull << FPSCR_RN0)
+#define FP_RN (FP_RN1 | FP_RN0)
+
+#define FP_ENABLES (FP_VE | FP_OE | FP_UE | FP_ZE | FP_XE)
+#define FP_STATUS (FP_FR | FP_FI | FP_FPRF)
+
+/* the exception bits which can be cleared by mcrfs - includes FX */
+#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
+ FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \
+ FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
+ FP_VXSQRT | FP_VXCVI)
+
+/*****************************************************************************/
+/* Vector status and control register */
+#define VSCR_NJ 16 /* Vector non-java */
+#define VSCR_SAT 0 /* Vector saturation */
+
+/*****************************************************************************/
+/* BookE e500 MMU registers */
+
+#define MAS0_NV_SHIFT 0
+#define MAS0_NV_MASK (0xfff << MAS0_NV_SHIFT)
+
+#define MAS0_WQ_SHIFT 12
+#define MAS0_WQ_MASK (3 << MAS0_WQ_SHIFT)
+/* Write TLB entry regardless of reservation */
+#define MAS0_WQ_ALWAYS (0 << MAS0_WQ_SHIFT)
+/* Write TLB entry only already in use */
+#define MAS0_WQ_COND (1 << MAS0_WQ_SHIFT)
+/* Clear TLB entry */
+#define MAS0_WQ_CLR_RSRV (2 << MAS0_WQ_SHIFT)
+
+#define MAS0_HES_SHIFT 14
+#define MAS0_HES (1 << MAS0_HES_SHIFT)
+
+#define MAS0_ESEL_SHIFT 16
+#define MAS0_ESEL_MASK (0xfff << MAS0_ESEL_SHIFT)
+
+#define MAS0_TLBSEL_SHIFT 28
+#define MAS0_TLBSEL_MASK (3 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB0 (0 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB1 (1 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB2 (2 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB3 (3 << MAS0_TLBSEL_SHIFT)
+
+#define MAS0_ATSEL_SHIFT 31
+#define MAS0_ATSEL (1 << MAS0_ATSEL_SHIFT)
+#define MAS0_ATSEL_TLB 0
+#define MAS0_ATSEL_LRAT MAS0_ATSEL
+
+#define MAS1_TSIZE_SHIFT 7
+#define MAS1_TSIZE_MASK (0x1f << MAS1_TSIZE_SHIFT)
+
+#define MAS1_TS_SHIFT 12
+#define MAS1_TS (1 << MAS1_TS_SHIFT)
+
+#define MAS1_IND_SHIFT 13
+#define MAS1_IND (1 << MAS1_IND_SHIFT)
+
+#define MAS1_TID_SHIFT 16
+#define MAS1_TID_MASK (0x3fff << MAS1_TID_SHIFT)
+
+#define MAS1_IPROT_SHIFT 30
+#define MAS1_IPROT (1 << MAS1_IPROT_SHIFT)
+
+#define MAS1_VALID_SHIFT 31
+#define MAS1_VALID 0x80000000
+
+#define MAS2_EPN_SHIFT 12
+#define MAS2_EPN_MASK (~0ULL << MAS2_EPN_SHIFT)
+
+#define MAS2_ACM_SHIFT 6
+#define MAS2_ACM (1 << MAS2_ACM_SHIFT)
+
+#define MAS2_VLE_SHIFT 5
+#define MAS2_VLE (1 << MAS2_VLE_SHIFT)
+
+#define MAS2_W_SHIFT 4
+#define MAS2_W (1 << MAS2_W_SHIFT)
+
+#define MAS2_I_SHIFT 3
+#define MAS2_I (1 << MAS2_I_SHIFT)
+
+#define MAS2_M_SHIFT 2
+#define MAS2_M (1 << MAS2_M_SHIFT)
+
+#define MAS2_G_SHIFT 1
+#define MAS2_G (1 << MAS2_G_SHIFT)
+
+#define MAS2_E_SHIFT 0
+#define MAS2_E (1 << MAS2_E_SHIFT)
+
+#define MAS3_RPN_SHIFT 12
+#define MAS3_RPN_MASK (0xfffff << MAS3_RPN_SHIFT)
+
+#define MAS3_U0 0x00000200
+#define MAS3_U1 0x00000100
+#define MAS3_U2 0x00000080
+#define MAS3_U3 0x00000040
+#define MAS3_UX 0x00000020
+#define MAS3_SX 0x00000010
+#define MAS3_UW 0x00000008
+#define MAS3_SW 0x00000004
+#define MAS3_UR 0x00000002
+#define MAS3_SR 0x00000001
+#define MAS3_SPSIZE_SHIFT 1
+#define MAS3_SPSIZE_MASK (0x3e << MAS3_SPSIZE_SHIFT)
+
+#define MAS4_TLBSELD_SHIFT MAS0_TLBSEL_SHIFT
+#define MAS4_TLBSELD_MASK MAS0_TLBSEL_MASK
+#define MAS4_TIDSELD_MASK 0x00030000
+#define MAS4_TIDSELD_PID0 0x00000000
+#define MAS4_TIDSELD_PID1 0x00010000
+#define MAS4_TIDSELD_PID2 0x00020000
+#define MAS4_TIDSELD_PIDZ 0x00030000
+#define MAS4_INDD 0x00008000 /* Default IND */
+#define MAS4_TSIZED_SHIFT MAS1_TSIZE_SHIFT
+#define MAS4_TSIZED_MASK MAS1_TSIZE_MASK
+#define MAS4_ACMD 0x00000040
+#define MAS4_VLED 0x00000020
+#define MAS4_WD 0x00000010
+#define MAS4_ID 0x00000008
+#define MAS4_MD 0x00000004
+#define MAS4_GD 0x00000002
+#define MAS4_ED 0x00000001
+#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */
+#define MAS4_WIMGED_SHIFT 0
+
+#define MAS5_SGS 0x80000000
+#define MAS5_SLPID_MASK 0x00000fff
+
+#define MAS6_SPID0 0x3fff0000
+#define MAS6_SPID1 0x00007ffe
+#define MAS6_ISIZE(x) MAS1_TSIZE(x)
+#define MAS6_SAS 0x00000001
+#define MAS6_SPID MAS6_SPID0
+#define MAS6_SIND 0x00000002 /* Indirect page */
+#define MAS6_SIND_SHIFT 1
+#define MAS6_SPID_MASK 0x3fff0000
+#define MAS6_SPID_SHIFT 16
+#define MAS6_ISIZE_MASK 0x00000f80
+#define MAS6_ISIZE_SHIFT 7
+
+#define MAS7_RPN 0xffffffff
+
+#define MAS8_TGS 0x80000000
+#define MAS8_VF 0x40000000
+#define MAS8_TLBPID 0x00000fff
+
+/* Bit definitions for MMUCFG */
+#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
+#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
+#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */
+#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */
+#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */
+#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */
+#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */
+#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */
+#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */
+
+/* Bit definitions for MMUCSR0 */
+#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
+#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
+#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
+#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */
+#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */
+#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
+#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
+
+/* TLBnCFG encoding */
+#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
+#define TLBnCFG_HES 0x00002000 /* HW select supported */
+#define TLBnCFG_AVAIL 0x00004000 /* variable page size */
+#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */
+#define TLBnCFG_GTWE 0x00010000 /* Guest can write */
+#define TLBnCFG_IND 0x00020000 /* IND entries supported */
+#define TLBnCFG_PT 0x00040000 /* Can load from page table */
+#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */
+#define TLBnCFG_MINSIZE_SHIFT 20
+#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
+#define TLBnCFG_MAXSIZE_SHIFT 16
+#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
+#define TLBnCFG_ASSOC_SHIFT 24
+
+/* TLBnPS encoding */
+#define TLBnPS_4K 0x00000004
+#define TLBnPS_8K 0x00000008
+#define TLBnPS_16K 0x00000010
+#define TLBnPS_32K 0x00000020
+#define TLBnPS_64K 0x00000040
+#define TLBnPS_128K 0x00000080
+#define TLBnPS_256K 0x00000100
+#define TLBnPS_512K 0x00000200
+#define TLBnPS_1M 0x00000400
+#define TLBnPS_2M 0x00000800
+#define TLBnPS_4M 0x00001000
+#define TLBnPS_8M 0x00002000
+#define TLBnPS_16M 0x00004000
+#define TLBnPS_32M 0x00008000
+#define TLBnPS_64M 0x00010000
+#define TLBnPS_128M 0x00020000
+#define TLBnPS_256M 0x00040000
+#define TLBnPS_512M 0x00080000
+#define TLBnPS_1G 0x00100000
+#define TLBnPS_2G 0x00200000
+#define TLBnPS_4G 0x00400000
+#define TLBnPS_8G 0x00800000
+#define TLBnPS_16G 0x01000000
+#define TLBnPS_32G 0x02000000
+#define TLBnPS_64G 0x04000000
+#define TLBnPS_128G 0x08000000
+#define TLBnPS_256G 0x10000000
+
+/* tlbilx action encoding */
+#define TLBILX_T_ALL 0
+#define TLBILX_T_TID 1
+#define TLBILX_T_FULLMATCH 3
+#define TLBILX_T_CLASS0 4
+#define TLBILX_T_CLASS1 5
+#define TLBILX_T_CLASS2 6
+#define TLBILX_T_CLASS3 7
+
+/* BookE 2.06 helper defines */
+
+#define BOOKE206_FLUSH_TLB0 (1 << 0)
+#define BOOKE206_FLUSH_TLB1 (1 << 1)
+#define BOOKE206_FLUSH_TLB2 (1 << 2)
+#define BOOKE206_FLUSH_TLB3 (1 << 3)
+
+/* number of possible TLBs */
+#define BOOKE206_MAX_TLBN 4
+
+#define EPID_EPID_SHIFT 0x0
+#define EPID_EPID 0xFF
+#define EPID_ELPID_SHIFT 0x10
+#define EPID_ELPID 0x3F0000
+#define EPID_EGS 0x20000000
+#define EPID_EGS_SHIFT 29
+#define EPID_EAS 0x40000000
+#define EPID_EAS_SHIFT 30
+#define EPID_EPR 0x80000000
+#define EPID_EPR_SHIFT 31
+/* We don't support EGS and ELPID */
+#define EPID_MASK (EPID_EPID | EPID_EAS | EPID_EPR)
+
+/*****************************************************************************/
+/* Server and Embedded Processor Control */
+
+#define DBELL_TYPE_SHIFT 27
+#define DBELL_TYPE_MASK (0x1f << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_DBELL (0x00 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_DBELL_CRIT (0x01 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_G_DBELL (0x02 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_G_DBELL_CRIT (0x03 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_G_DBELL_MC (0x04 << DBELL_TYPE_SHIFT)
+
+#define DBELL_TYPE_DBELL_SERVER (0x05 << DBELL_TYPE_SHIFT)
+
+#define DBELL_BRDCAST PPC_BIT(37)
+#define DBELL_LPIDTAG_SHIFT 14
+#define DBELL_LPIDTAG_MASK (0xfff << DBELL_LPIDTAG_SHIFT)
+#define DBELL_PIRTAG_MASK 0x3fff
+
+#define DBELL_PROCIDTAG_MASK PPC_BITMASK(44, 63)
+
+#define PPC_PAGE_SIZES_MAX_SZ 8
+
+struct ppc_radix_page_info {
+ uint32_t count;
+ uint32_t entries[PPC_PAGE_SIZES_MAX_SZ];
+};
+
+/*****************************************************************************/
+/* The whole PowerPC CPU context */
+
+/*
+ * PowerPC needs eight modes for different hypervisor/supervisor/guest
+ * + real/paged mode combinations. The other two modes are for
+ * external PID load/store.
+ */
+#define PPC_TLB_EPID_LOAD 8
+#define PPC_TLB_EPID_STORE 9
+
+#define PPC_CPU_OPCODES_LEN 0x40
+#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
+
+struct CPUPPCState {
+ /* Most commonly used resources during translated code execution first */
+ target_ulong gpr[32]; /* general purpose registers */
+ target_ulong gprh[32]; /* storage for GPR MSB, used by the SPE extension */
+ target_ulong lr;
+ target_ulong ctr;
+ uint32_t crf[8]; /* condition register */
+#if defined(TARGET_PPC64)
+ target_ulong cfar;
+#endif
+ target_ulong xer; /* XER (with SO, OV, CA split out) */
+ target_ulong so;
+ target_ulong ov;
+ target_ulong ca;
+ target_ulong ov32;
+ target_ulong ca32;
+
+ target_ulong reserve_addr; /* Reservation address */
+ target_ulong reserve_val; /* Reservation value */
+ target_ulong reserve_val2;
+
+ /* These are used in supervisor mode only */
+ target_ulong msr; /* machine state register */
+ target_ulong tgpr[4]; /* temporary general purpose registers, */
+ /* used to speed-up TLB assist handlers */
+
+ target_ulong nip; /* next instruction pointer */
+ uint64_t retxh; /* high part of 128-bit helper return */
+
+ /* when a memory exception occurs, the access type is stored here */
+ int access_type;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* MMU context, only relevant for full system emulation */
+#if defined(TARGET_PPC64)
+ ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */
+#endif
+ target_ulong sr[32]; /* segment registers */
+ uint32_t nb_BATs; /* number of BATs */
+ target_ulong DBAT[2][8];
+ target_ulong IBAT[2][8];
+ /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */
+ int32_t nb_tlb; /* Total number of TLB */
+ int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */
+ int nb_ways; /* Number of ways in the TLB set */
+ int last_way; /* Last used way used to allocate TLB in a LRU way */
+ int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */
+ int nb_pids; /* Number of available PID registers */
+ int tlb_type; /* Type of TLB we're dealing with */
+ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */
+ target_ulong pb[4]; /* 403 dedicated access protection registers */
+ bool tlb_dirty; /* Set to non-zero when modifying TLB */
+ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
+ uint32_t tlb_need_flush; /* Delayed flush needed */
+#define TLB_NEED_LOCAL_FLUSH 0x1
+#define TLB_NEED_GLOBAL_FLUSH 0x2
+#endif
+
+ /* Other registers */
+ target_ulong spr[1024]; /* special purpose registers */
+ ppc_spr_t spr_cb[1024];
+ /* Vector status and control register, minus VSCR_SAT */
+ uint32_t vscr;
+ /* VSX registers (including FP and AVR) */
+ ppc_vsr_t vsr[64] QEMU_ALIGNED(16);
+ /* Non-zero if and only if VSCR_SAT should be set */
+ ppc_vsr_t vscr_sat QEMU_ALIGNED(16);
+ /* SPE registers */
+ uint64_t spe_acc;
+ uint32_t spe_fscr;
+ /* SPE and Altivec share status as they'll never be used simultaneously */
+ float_status vec_status;
+ float_status fp_status; /* Floating point execution context */
+ target_ulong fpscr; /* Floating point status and control register */
+
+ /* Internal devices resources */
+ ppc_tb_t *tb_env; /* Time base and decrementer */
+ ppc_dcr_t *dcr_env; /* Device control registers */
+
+ int dcache_line_size;
+ int icache_line_size;
+
+ /* These resources are used during exception processing */
+ /* CPU model definition */
+ target_ulong msr_mask;
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ int bfd_mach;
+ uint32_t flags;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+
+ int error_code;
+ uint32_t pending_interrupts;
+#if !defined(CONFIG_USER_ONLY)
+ /*
+ * This is the IRQ controller, which is implementation dependent and only
+ * relevant when emulating a complete machine. Note that this isn't used
+ * by recent Book3s compatible CPUs (POWER7 and newer).
+ */
+ uint32_t irq_input_state;
+ void **irq_inputs;
+
+ target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */
+ target_ulong excp_prefix;
+ target_ulong ivor_mask;
+ target_ulong ivpr_mask;
+ target_ulong hreset_vector;
+ hwaddr mpic_iack;
+ bool mpic_proxy; /* true if the external proxy facility mode is enabled */
+ bool has_hv_mode; /* set when the processor has an HV mode, thus HV priv */
+ /* instructions and SPRs are diallowed if MSR:HV is 0 */
+ /*
+ * On P7/P8/P9, set when in PM state so we need to handle resume in a
+ * special way (such as routing some resume causes to 0x100, i.e. sreset).
+ */
+ bool resume_as_sreset;
+#endif
+
+ /* These resources are used only in TCG */
+ uint32_t hflags;
+ target_ulong hflags_compat_nmsr; /* for migration compatibility */
+
+ /* Power management */
+ int (*check_pow)(CPUPPCState *env);
+
+#if !defined(CONFIG_USER_ONLY)
+ void *load_info; /* holds boot loading state */
+#endif
+
+ /* booke timers */
+
+ /*
+ * Specifies bit locations of the Time Base used to signal a fixed timer
+ * exception on a transition from 0 to 1 (watchdog or fixed-interval timer)
+ *
+ * 0 selects the least significant bit, 63 selects the most significant bit
+ */
+ uint8_t fit_period[4];
+ uint8_t wdt_period[4];
+
+ /* Transactional memory state */
+ target_ulong tm_gpr[32];
+ ppc_avr_t tm_vsr[64];
+ uint64_t tm_cr;
+ uint64_t tm_lr;
+ uint64_t tm_ctr;
+ uint64_t tm_fpscr;
+ uint64_t tm_amr;
+ uint64_t tm_ppr;
+ uint64_t tm_vrsave;
+ uint32_t tm_vscr;
+ uint64_t tm_dscr;
+ uint64_t tm_tar;
+};
+
+#define SET_FIT_PERIOD(a_, b_, c_, d_) \
+do { \
+ env->fit_period[0] = (a_); \
+ env->fit_period[1] = (b_); \
+ env->fit_period[2] = (c_); \
+ env->fit_period[3] = (d_); \
+ } while (0)
+
+#define SET_WDT_PERIOD(a_, b_, c_, d_) \
+do { \
+ env->wdt_period[0] = (a_); \
+ env->wdt_period[1] = (b_); \
+ env->wdt_period[2] = (c_); \
+ env->wdt_period[3] = (d_); \
+ } while (0)
+
+typedef struct PPCVirtualHypervisor PPCVirtualHypervisor;
+typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass;
+
+/**
+ * PowerPCCPU:
+ * @env: #CPUPPCState
+ * @vcpu_id: vCPU identifier given to KVM
+ * @compat_pvr: Current logical PVR, zero if in "raw" mode
+ *
+ * A PowerPC CPU.
+ */
+struct PowerPCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUPPCState env;
+
+ int vcpu_id;
+ uint32_t compat_pvr;
+ PPCVirtualHypervisor *vhyp;
+ void *machine_data;
+ int32_t node_id; /* NUMA node this CPU belongs to */
+ PPCHash64Options *hash64_opts;
+
+ /* Those resources are used only during code translation */
+ /* opcode handlers */
+ opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN];
+
+ /* Fields related to migration compatibility hacks */
+ bool pre_2_8_migration;
+ target_ulong mig_msr_mask;
+ uint64_t mig_insns_flags;
+ uint64_t mig_insns_flags2;
+ uint32_t mig_nb_BATs;
+ bool pre_2_10_migration;
+ bool pre_3_0_migration;
+ int32_t mig_slb_nr;
+};
+
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr);
+PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr);
+PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc);
+
+#ifndef CONFIG_USER_ONLY
+struct PPCVirtualHypervisorClass {
+ InterfaceClass parent;
+ void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
+ hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp);
+ const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp,
+ hwaddr ptex, int n);
+ void (*unmap_hptes)(PPCVirtualHypervisor *vhyp,
+ const ppc_hash_pte64_t *hptes,
+ hwaddr ptex, int n);
+ void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
+ void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
+ void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
+ target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
+ void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
+ void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
+};
+
+#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
+DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass,
+ PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR)
+#endif /* CONFIG_USER_ONLY */
+
+void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int ppc_cpu_gdb_read_register_apple(CPUState *cpu, GByteArray *buf, int reg);
+int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg);
+#ifndef CONFIG_USER_ONLY
+void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu);
+const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name);
+#endif
+int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+#ifndef CONFIG_USER_ONLY
+void ppc_cpu_do_interrupt(CPUState *cpu);
+bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void ppc_cpu_do_system_reset(CPUState *cs);
+void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector);
+extern const VMStateDescription vmstate_ppc_cpu;
+#endif
+
+/*****************************************************************************/
+void ppc_translate_init(void);
+
+#if !defined(CONFIG_USER_ONLY)
+void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
+#endif /* !defined(CONFIG_USER_ONLY) */
+void ppc_store_msr(CPUPPCState *env, target_ulong value);
+void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
+
+void ppc_cpu_list(void);
+
+/* Time-base and decrementer management */
+#ifndef NO_CPU_IO_DEFS
+uint64_t cpu_ppc_load_tbl(CPUPPCState *env);
+uint32_t cpu_ppc_load_tbu(CPUPPCState *env);
+void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_tbl(CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_atbl(CPUPPCState *env);
+uint32_t cpu_ppc_load_atbu(CPUPPCState *env);
+void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_vtb(CPUPPCState *env);
+void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value);
+bool ppc_decr_clear_on_delivery(CPUPPCState *env);
+target_ulong cpu_ppc_load_decr(CPUPPCState *env);
+void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value);
+target_ulong cpu_ppc_load_hdecr(CPUPPCState *env);
+void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value);
+void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value);
+uint64_t cpu_ppc_load_purr(CPUPPCState *env);
+void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value);
+uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env);
+#if !defined(CONFIG_USER_ONLY)
+void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value);
+void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value);
+target_ulong load_40x_pit(CPUPPCState *env);
+void store_40x_pit(CPUPPCState *env, target_ulong val);
+void store_40x_dbcr0(CPUPPCState *env, uint32_t val);
+void store_40x_sler(CPUPPCState *env, uint32_t val);
+void store_booke_tcr(CPUPPCState *env, target_ulong val);
+void store_booke_tsr(CPUPPCState *env, target_ulong val);
+void ppc_tlb_invalidate_all(CPUPPCState *env);
+void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr);
+void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
+int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
+ hwaddr *raddrp, target_ulong address,
+ uint32_t pid);
+int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddrp,
+ target_ulong address, uint32_t pid, int ext,
+ int i);
+hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
+ ppcmas_tlb_t *tlb);
+#endif
+#endif
+
+void ppc_store_fpscr(CPUPPCState *env, target_ulong val);
+void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
+ const char *caller, uint32_t cause);
+
+static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
+{
+ uint64_t gprv;
+
+ gprv = env->gpr[gprn];
+ if (env->flags & POWERPC_FLAG_SPE) {
+ /*
+ * If the CPU implements the SPE extension, we have to get the
+ * high bits of the GPR from the gprh storage area
+ */
+ gprv &= 0xFFFFFFFFULL;
+ gprv |= (uint64_t)env->gprh[gprn] << 32;
+ }
+
+ return gprv;
+}
+
+/* Device control registers */
+int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
+int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
+
+#define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU
+#define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU
+
+#define cpu_list ppc_cpu_list
+
+/* MMU modes definitions */
+#define MMU_USER_IDX 0
+static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ return (env->hflags >> (ifetch ? HFLAGS_IMMU_IDX : HFLAGS_DMMU_IDX)) & 7;
+#endif
+}
+
+/* Compatibility modes */
+#if defined(TARGET_PPC64)
+bool ppc_check_compat(PowerPCCPU *cpu, uint32_t compat_pvr,
+ uint32_t min_compat_pvr, uint32_t max_compat_pvr);
+bool ppc_type_check_compat(const char *cputype, uint32_t compat_pvr,
+ uint32_t min_compat_pvr, uint32_t max_compat_pvr);
+
+int ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp);
+
+#if !defined(CONFIG_USER_ONLY)
+int ppc_set_compat_all(uint32_t compat_pvr, Error **errp);
+#endif
+int ppc_compat_max_vthreads(PowerPCCPU *cpu);
+void ppc_compat_add_property(Object *obj, const char *name,
+ uint32_t *compat_pvr, const char *basedesc);
+#endif /* defined(TARGET_PPC64) */
+
+typedef CPUPPCState CPUArchState;
+typedef PowerPCCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+/*****************************************************************************/
+/* CRF definitions */
+#define CRF_LT_BIT 3
+#define CRF_GT_BIT 2
+#define CRF_EQ_BIT 1
+#define CRF_SO_BIT 0
+#define CRF_LT (1 << CRF_LT_BIT)
+#define CRF_GT (1 << CRF_GT_BIT)
+#define CRF_EQ (1 << CRF_EQ_BIT)
+#define CRF_SO (1 << CRF_SO_BIT)
+/* For SPE extensions */
+#define CRF_CH (1 << CRF_LT_BIT)
+#define CRF_CL (1 << CRF_GT_BIT)
+#define CRF_CH_OR_CL (1 << CRF_EQ_BIT)
+#define CRF_CH_AND_CL (1 << CRF_SO_BIT)
+
+/* XER definitions */
+#define XER_SO 31
+#define XER_OV 30
+#define XER_CA 29
+#define XER_OV32 19
+#define XER_CA32 18
+#define XER_CMP 8
+#define XER_BC 0
+#define xer_so (env->so)
+#define xer_ov (env->ov)
+#define xer_ca (env->ca)
+#define xer_ov32 (env->ov)
+#define xer_ca32 (env->ca)
+#define xer_cmp ((env->xer >> XER_CMP) & 0xFF)
+#define xer_bc ((env->xer >> XER_BC) & 0x7F)
+
+/* SPR definitions */
+#define SPR_MQ (0x000)
+#define SPR_XER (0x001)
+#define SPR_601_VRTCU (0x004)
+#define SPR_601_VRTCL (0x005)
+#define SPR_601_UDECR (0x006)
+#define SPR_LR (0x008)
+#define SPR_CTR (0x009)
+#define SPR_UAMR (0x00D)
+#define SPR_DSCR (0x011)
+#define SPR_DSISR (0x012)
+#define SPR_DAR (0x013) /* DAE for PowerPC 601 */
+#define SPR_601_RTCU (0x014)
+#define SPR_601_RTCL (0x015)
+#define SPR_DECR (0x016)
+#define SPR_SDR1 (0x019)
+#define SPR_SRR0 (0x01A)
+#define SPR_SRR1 (0x01B)
+#define SPR_CFAR (0x01C)
+#define SPR_AMR (0x01D)
+#define SPR_ACOP (0x01F)
+#define SPR_BOOKE_PID (0x030)
+#define SPR_BOOKS_PID (0x030)
+#define SPR_BOOKE_DECAR (0x036)
+#define SPR_BOOKE_CSRR0 (0x03A)
+#define SPR_BOOKE_CSRR1 (0x03B)
+#define SPR_BOOKE_DEAR (0x03D)
+#define SPR_IAMR (0x03D)
+#define SPR_BOOKE_ESR (0x03E)
+#define SPR_BOOKE_IVPR (0x03F)
+#define SPR_MPC_EIE (0x050)
+#define SPR_MPC_EID (0x051)
+#define SPR_MPC_NRI (0x052)
+#define SPR_TFHAR (0x080)
+#define SPR_TFIAR (0x081)
+#define SPR_TEXASR (0x082)
+#define SPR_TEXASRU (0x083)
+#define SPR_UCTRL (0x088)
+#define SPR_TIDR (0x090)
+#define SPR_MPC_CMPA (0x090)
+#define SPR_MPC_CMPB (0x091)
+#define SPR_MPC_CMPC (0x092)
+#define SPR_MPC_CMPD (0x093)
+#define SPR_MPC_ECR (0x094)
+#define SPR_MPC_DER (0x095)
+#define SPR_MPC_COUNTA (0x096)
+#define SPR_MPC_COUNTB (0x097)
+#define SPR_CTRL (0x098)
+#define SPR_MPC_CMPE (0x098)
+#define SPR_MPC_CMPF (0x099)
+#define SPR_FSCR (0x099)
+#define SPR_MPC_CMPG (0x09A)
+#define SPR_MPC_CMPH (0x09B)
+#define SPR_MPC_LCTRL1 (0x09C)
+#define SPR_MPC_LCTRL2 (0x09D)
+#define SPR_UAMOR (0x09D)
+#define SPR_MPC_ICTRL (0x09E)
+#define SPR_MPC_BAR (0x09F)
+#define SPR_PSPB (0x09F)
+#define SPR_DPDES (0x0B0)
+#define SPR_DAWR0 (0x0B4)
+#define SPR_RPR (0x0BA)
+#define SPR_CIABR (0x0BB)
+#define SPR_DAWRX0 (0x0BC)
+#define SPR_HFSCR (0x0BE)
+#define SPR_VRSAVE (0x100)
+#define SPR_USPRG0 (0x100)
+#define SPR_USPRG1 (0x101)
+#define SPR_USPRG2 (0x102)
+#define SPR_USPRG3 (0x103)
+#define SPR_USPRG4 (0x104)
+#define SPR_USPRG5 (0x105)
+#define SPR_USPRG6 (0x106)
+#define SPR_USPRG7 (0x107)
+#define SPR_VTBL (0x10C)
+#define SPR_VTBU (0x10D)
+#define SPR_SPRG0 (0x110)
+#define SPR_SPRG1 (0x111)
+#define SPR_SPRG2 (0x112)
+#define SPR_SPRG3 (0x113)
+#define SPR_SPRG4 (0x114)
+#define SPR_SCOMC (0x114)
+#define SPR_SPRG5 (0x115)
+#define SPR_SCOMD (0x115)
+#define SPR_SPRG6 (0x116)
+#define SPR_SPRG7 (0x117)
+#define SPR_ASR (0x118)
+#define SPR_EAR (0x11A)
+#define SPR_TBL (0x11C)
+#define SPR_TBU (0x11D)
+#define SPR_TBU40 (0x11E)
+#define SPR_SVR (0x11E)
+#define SPR_BOOKE_PIR (0x11E)
+#define SPR_PVR (0x11F)
+#define SPR_HSPRG0 (0x130)
+#define SPR_BOOKE_DBSR (0x130)
+#define SPR_HSPRG1 (0x131)
+#define SPR_HDSISR (0x132)
+#define SPR_HDAR (0x133)
+#define SPR_BOOKE_EPCR (0x133)
+#define SPR_SPURR (0x134)
+#define SPR_BOOKE_DBCR0 (0x134)
+#define SPR_IBCR (0x135)
+#define SPR_PURR (0x135)
+#define SPR_BOOKE_DBCR1 (0x135)
+#define SPR_DBCR (0x136)
+#define SPR_HDEC (0x136)
+#define SPR_BOOKE_DBCR2 (0x136)
+#define SPR_HIOR (0x137)
+#define SPR_MBAR (0x137)
+#define SPR_RMOR (0x138)
+#define SPR_BOOKE_IAC1 (0x138)
+#define SPR_HRMOR (0x139)
+#define SPR_BOOKE_IAC2 (0x139)
+#define SPR_HSRR0 (0x13A)
+#define SPR_BOOKE_IAC3 (0x13A)
+#define SPR_HSRR1 (0x13B)
+#define SPR_BOOKE_IAC4 (0x13B)
+#define SPR_BOOKE_DAC1 (0x13C)
+#define SPR_MMCRH (0x13C)
+#define SPR_DABR2 (0x13D)
+#define SPR_BOOKE_DAC2 (0x13D)
+#define SPR_TFMR (0x13D)
+#define SPR_BOOKE_DVC1 (0x13E)
+#define SPR_LPCR (0x13E)
+#define SPR_BOOKE_DVC2 (0x13F)
+#define SPR_LPIDR (0x13F)
+#define SPR_BOOKE_TSR (0x150)
+#define SPR_HMER (0x150)
+#define SPR_HMEER (0x151)
+#define SPR_PCR (0x152)
+#define SPR_BOOKE_LPIDR (0x152)
+#define SPR_BOOKE_TCR (0x154)
+#define SPR_BOOKE_TLB0PS (0x158)
+#define SPR_BOOKE_TLB1PS (0x159)
+#define SPR_BOOKE_TLB2PS (0x15A)
+#define SPR_BOOKE_TLB3PS (0x15B)
+#define SPR_AMOR (0x15D)
+#define SPR_BOOKE_MAS7_MAS3 (0x174)
+#define SPR_BOOKE_IVOR0 (0x190)
+#define SPR_BOOKE_IVOR1 (0x191)
+#define SPR_BOOKE_IVOR2 (0x192)
+#define SPR_BOOKE_IVOR3 (0x193)
+#define SPR_BOOKE_IVOR4 (0x194)
+#define SPR_BOOKE_IVOR5 (0x195)
+#define SPR_BOOKE_IVOR6 (0x196)
+#define SPR_BOOKE_IVOR7 (0x197)
+#define SPR_BOOKE_IVOR8 (0x198)
+#define SPR_BOOKE_IVOR9 (0x199)
+#define SPR_BOOKE_IVOR10 (0x19A)
+#define SPR_BOOKE_IVOR11 (0x19B)
+#define SPR_BOOKE_IVOR12 (0x19C)
+#define SPR_BOOKE_IVOR13 (0x19D)
+#define SPR_BOOKE_IVOR14 (0x19E)
+#define SPR_BOOKE_IVOR15 (0x19F)
+#define SPR_BOOKE_IVOR38 (0x1B0)
+#define SPR_BOOKE_IVOR39 (0x1B1)
+#define SPR_BOOKE_IVOR40 (0x1B2)
+#define SPR_BOOKE_IVOR41 (0x1B3)
+#define SPR_BOOKE_IVOR42 (0x1B4)
+#define SPR_BOOKE_GIVOR2 (0x1B8)
+#define SPR_BOOKE_GIVOR3 (0x1B9)
+#define SPR_BOOKE_GIVOR4 (0x1BA)
+#define SPR_BOOKE_GIVOR8 (0x1BB)
+#define SPR_BOOKE_GIVOR13 (0x1BC)
+#define SPR_BOOKE_GIVOR14 (0x1BD)
+#define SPR_TIR (0x1BE)
+#define SPR_PTCR (0x1D0)
+#define SPR_BOOKE_SPEFSCR (0x200)
+#define SPR_Exxx_BBEAR (0x201)
+#define SPR_Exxx_BBTAR (0x202)
+#define SPR_Exxx_L1CFG0 (0x203)
+#define SPR_Exxx_L1CFG1 (0x204)
+#define SPR_Exxx_NPIDR (0x205)
+#define SPR_ATBL (0x20E)
+#define SPR_ATBU (0x20F)
+#define SPR_IBAT0U (0x210)
+#define SPR_BOOKE_IVOR32 (0x210)
+#define SPR_RCPU_MI_GRA (0x210)
+#define SPR_IBAT0L (0x211)
+#define SPR_BOOKE_IVOR33 (0x211)
+#define SPR_IBAT1U (0x212)
+#define SPR_BOOKE_IVOR34 (0x212)
+#define SPR_IBAT1L (0x213)
+#define SPR_BOOKE_IVOR35 (0x213)
+#define SPR_IBAT2U (0x214)
+#define SPR_BOOKE_IVOR36 (0x214)
+#define SPR_IBAT2L (0x215)
+#define SPR_BOOKE_IVOR37 (0x215)
+#define SPR_IBAT3U (0x216)
+#define SPR_IBAT3L (0x217)
+#define SPR_DBAT0U (0x218)
+#define SPR_RCPU_L2U_GRA (0x218)
+#define SPR_DBAT0L (0x219)
+#define SPR_DBAT1U (0x21A)
+#define SPR_DBAT1L (0x21B)
+#define SPR_DBAT2U (0x21C)
+#define SPR_DBAT2L (0x21D)
+#define SPR_DBAT3U (0x21E)
+#define SPR_DBAT3L (0x21F)
+#define SPR_IBAT4U (0x230)
+#define SPR_RPCU_BBCMCR (0x230)
+#define SPR_MPC_IC_CST (0x230)
+#define SPR_Exxx_CTXCR (0x230)
+#define SPR_IBAT4L (0x231)
+#define SPR_MPC_IC_ADR (0x231)
+#define SPR_Exxx_DBCR3 (0x231)
+#define SPR_IBAT5U (0x232)
+#define SPR_MPC_IC_DAT (0x232)
+#define SPR_Exxx_DBCNT (0x232)
+#define SPR_IBAT5L (0x233)
+#define SPR_IBAT6U (0x234)
+#define SPR_IBAT6L (0x235)
+#define SPR_IBAT7U (0x236)
+#define SPR_IBAT7L (0x237)
+#define SPR_DBAT4U (0x238)
+#define SPR_RCPU_L2U_MCR (0x238)
+#define SPR_MPC_DC_CST (0x238)
+#define SPR_Exxx_ALTCTXCR (0x238)
+#define SPR_DBAT4L (0x239)
+#define SPR_MPC_DC_ADR (0x239)
+#define SPR_DBAT5U (0x23A)
+#define SPR_BOOKE_MCSRR0 (0x23A)
+#define SPR_MPC_DC_DAT (0x23A)
+#define SPR_DBAT5L (0x23B)
+#define SPR_BOOKE_MCSRR1 (0x23B)
+#define SPR_DBAT6U (0x23C)
+#define SPR_BOOKE_MCSR (0x23C)
+#define SPR_DBAT6L (0x23D)
+#define SPR_Exxx_MCAR (0x23D)
+#define SPR_DBAT7U (0x23E)
+#define SPR_BOOKE_DSRR0 (0x23E)
+#define SPR_DBAT7L (0x23F)
+#define SPR_BOOKE_DSRR1 (0x23F)
+#define SPR_BOOKE_SPRG8 (0x25C)
+#define SPR_BOOKE_SPRG9 (0x25D)
+#define SPR_BOOKE_MAS0 (0x270)
+#define SPR_BOOKE_MAS1 (0x271)
+#define SPR_BOOKE_MAS2 (0x272)
+#define SPR_BOOKE_MAS3 (0x273)
+#define SPR_BOOKE_MAS4 (0x274)
+#define SPR_BOOKE_MAS5 (0x275)
+#define SPR_BOOKE_MAS6 (0x276)
+#define SPR_BOOKE_PID1 (0x279)
+#define SPR_BOOKE_PID2 (0x27A)
+#define SPR_MPC_DPDR (0x280)
+#define SPR_MPC_IMMR (0x288)
+#define SPR_BOOKE_TLB0CFG (0x2B0)
+#define SPR_BOOKE_TLB1CFG (0x2B1)
+#define SPR_BOOKE_TLB2CFG (0x2B2)
+#define SPR_BOOKE_TLB3CFG (0x2B3)
+#define SPR_BOOKE_EPR (0x2BE)
+#define SPR_PERF0 (0x300)
+#define SPR_RCPU_MI_RBA0 (0x300)
+#define SPR_MPC_MI_CTR (0x300)
+#define SPR_POWER_USIER (0x300)
+#define SPR_PERF1 (0x301)
+#define SPR_RCPU_MI_RBA1 (0x301)
+#define SPR_POWER_UMMCR2 (0x301)
+#define SPR_PERF2 (0x302)
+#define SPR_RCPU_MI_RBA2 (0x302)
+#define SPR_MPC_MI_AP (0x302)
+#define SPR_POWER_UMMCRA (0x302)
+#define SPR_PERF3 (0x303)
+#define SPR_RCPU_MI_RBA3 (0x303)
+#define SPR_MPC_MI_EPN (0x303)
+#define SPR_POWER_UPMC1 (0x303)
+#define SPR_PERF4 (0x304)
+#define SPR_POWER_UPMC2 (0x304)
+#define SPR_PERF5 (0x305)
+#define SPR_MPC_MI_TWC (0x305)
+#define SPR_POWER_UPMC3 (0x305)
+#define SPR_PERF6 (0x306)
+#define SPR_MPC_MI_RPN (0x306)
+#define SPR_POWER_UPMC4 (0x306)
+#define SPR_PERF7 (0x307)
+#define SPR_POWER_UPMC5 (0x307)
+#define SPR_PERF8 (0x308)
+#define SPR_RCPU_L2U_RBA0 (0x308)
+#define SPR_MPC_MD_CTR (0x308)
+#define SPR_POWER_UPMC6 (0x308)
+#define SPR_PERF9 (0x309)
+#define SPR_RCPU_L2U_RBA1 (0x309)
+#define SPR_MPC_MD_CASID (0x309)
+#define SPR_970_UPMC7 (0X309)
+#define SPR_PERFA (0x30A)
+#define SPR_RCPU_L2U_RBA2 (0x30A)
+#define SPR_MPC_MD_AP (0x30A)
+#define SPR_970_UPMC8 (0X30A)
+#define SPR_PERFB (0x30B)
+#define SPR_RCPU_L2U_RBA3 (0x30B)
+#define SPR_MPC_MD_EPN (0x30B)
+#define SPR_POWER_UMMCR0 (0X30B)
+#define SPR_PERFC (0x30C)
+#define SPR_MPC_MD_TWB (0x30C)
+#define SPR_POWER_USIAR (0X30C)
+#define SPR_PERFD (0x30D)
+#define SPR_MPC_MD_TWC (0x30D)
+#define SPR_POWER_USDAR (0X30D)
+#define SPR_PERFE (0x30E)
+#define SPR_MPC_MD_RPN (0x30E)
+#define SPR_POWER_UMMCR1 (0X30E)
+#define SPR_PERFF (0x30F)
+#define SPR_MPC_MD_TW (0x30F)
+#define SPR_UPERF0 (0x310)
+#define SPR_POWER_SIER (0x310)
+#define SPR_UPERF1 (0x311)
+#define SPR_POWER_MMCR2 (0x311)
+#define SPR_UPERF2 (0x312)
+#define SPR_POWER_MMCRA (0X312)
+#define SPR_UPERF3 (0x313)
+#define SPR_POWER_PMC1 (0X313)
+#define SPR_UPERF4 (0x314)
+#define SPR_POWER_PMC2 (0X314)
+#define SPR_UPERF5 (0x315)
+#define SPR_POWER_PMC3 (0X315)
+#define SPR_UPERF6 (0x316)
+#define SPR_POWER_PMC4 (0X316)
+#define SPR_UPERF7 (0x317)
+#define SPR_POWER_PMC5 (0X317)
+#define SPR_UPERF8 (0x318)
+#define SPR_POWER_PMC6 (0X318)
+#define SPR_UPERF9 (0x319)
+#define SPR_970_PMC7 (0X319)
+#define SPR_UPERFA (0x31A)
+#define SPR_970_PMC8 (0X31A)
+#define SPR_UPERFB (0x31B)
+#define SPR_POWER_MMCR0 (0X31B)
+#define SPR_UPERFC (0x31C)
+#define SPR_POWER_SIAR (0X31C)
+#define SPR_UPERFD (0x31D)
+#define SPR_POWER_SDAR (0X31D)
+#define SPR_UPERFE (0x31E)
+#define SPR_POWER_MMCR1 (0X31E)
+#define SPR_UPERFF (0x31F)
+#define SPR_RCPU_MI_RA0 (0x320)
+#define SPR_MPC_MI_DBCAM (0x320)
+#define SPR_BESCRS (0x320)
+#define SPR_RCPU_MI_RA1 (0x321)
+#define SPR_MPC_MI_DBRAM0 (0x321)
+#define SPR_BESCRSU (0x321)
+#define SPR_RCPU_MI_RA2 (0x322)
+#define SPR_MPC_MI_DBRAM1 (0x322)
+#define SPR_BESCRR (0x322)
+#define SPR_RCPU_MI_RA3 (0x323)
+#define SPR_BESCRRU (0x323)
+#define SPR_EBBHR (0x324)
+#define SPR_EBBRR (0x325)
+#define SPR_BESCR (0x326)
+#define SPR_RCPU_L2U_RA0 (0x328)
+#define SPR_MPC_MD_DBCAM (0x328)
+#define SPR_RCPU_L2U_RA1 (0x329)
+#define SPR_MPC_MD_DBRAM0 (0x329)
+#define SPR_RCPU_L2U_RA2 (0x32A)
+#define SPR_MPC_MD_DBRAM1 (0x32A)
+#define SPR_RCPU_L2U_RA3 (0x32B)
+#define SPR_TAR (0x32F)
+#define SPR_ASDR (0x330)
+#define SPR_IC (0x350)
+#define SPR_VTB (0x351)
+#define SPR_MMCRC (0x353)
+#define SPR_PSSCR (0x357)
+#define SPR_440_INV0 (0x370)
+#define SPR_440_INV1 (0x371)
+#define SPR_440_INV2 (0x372)
+#define SPR_440_INV3 (0x373)
+#define SPR_440_ITV0 (0x374)
+#define SPR_440_ITV1 (0x375)
+#define SPR_440_ITV2 (0x376)
+#define SPR_440_ITV3 (0x377)
+#define SPR_440_CCR1 (0x378)
+#define SPR_TACR (0x378)
+#define SPR_TCSCR (0x379)
+#define SPR_CSIGR (0x37a)
+#define SPR_DCRIPR (0x37B)
+#define SPR_POWER_SPMC1 (0x37C)
+#define SPR_POWER_SPMC2 (0x37D)
+#define SPR_POWER_MMCRS (0x37E)
+#define SPR_WORT (0x37F)
+#define SPR_PPR (0x380)
+#define SPR_750_GQR0 (0x390)
+#define SPR_440_DNV0 (0x390)
+#define SPR_750_GQR1 (0x391)
+#define SPR_440_DNV1 (0x391)
+#define SPR_750_GQR2 (0x392)
+#define SPR_440_DNV2 (0x392)
+#define SPR_750_GQR3 (0x393)
+#define SPR_440_DNV3 (0x393)
+#define SPR_750_GQR4 (0x394)
+#define SPR_440_DTV0 (0x394)
+#define SPR_750_GQR5 (0x395)
+#define SPR_440_DTV1 (0x395)
+#define SPR_750_GQR6 (0x396)
+#define SPR_440_DTV2 (0x396)
+#define SPR_750_GQR7 (0x397)
+#define SPR_440_DTV3 (0x397)
+#define SPR_750_THRM4 (0x398)
+#define SPR_750CL_HID2 (0x398)
+#define SPR_440_DVLIM (0x398)
+#define SPR_750_WPAR (0x399)
+#define SPR_440_IVLIM (0x399)
+#define SPR_TSCR (0x399)
+#define SPR_750_DMAU (0x39A)
+#define SPR_750_DMAL (0x39B)
+#define SPR_440_RSTCFG (0x39B)
+#define SPR_BOOKE_DCDBTRL (0x39C)
+#define SPR_BOOKE_DCDBTRH (0x39D)
+#define SPR_BOOKE_ICDBTRL (0x39E)
+#define SPR_BOOKE_ICDBTRH (0x39F)
+#define SPR_74XX_UMMCR2 (0x3A0)
+#define SPR_7XX_UPMC5 (0x3A1)
+#define SPR_7XX_UPMC6 (0x3A2)
+#define SPR_UBAMR (0x3A7)
+#define SPR_7XX_UMMCR0 (0x3A8)
+#define SPR_7XX_UPMC1 (0x3A9)
+#define SPR_7XX_UPMC2 (0x3AA)
+#define SPR_7XX_USIAR (0x3AB)
+#define SPR_7XX_UMMCR1 (0x3AC)
+#define SPR_7XX_UPMC3 (0x3AD)
+#define SPR_7XX_UPMC4 (0x3AE)
+#define SPR_USDA (0x3AF)
+#define SPR_40x_ZPR (0x3B0)
+#define SPR_BOOKE_MAS7 (0x3B0)
+#define SPR_74XX_MMCR2 (0x3B0)
+#define SPR_7XX_PMC5 (0x3B1)
+#define SPR_40x_PID (0x3B1)
+#define SPR_7XX_PMC6 (0x3B2)
+#define SPR_440_MMUCR (0x3B2)
+#define SPR_4xx_CCR0 (0x3B3)
+#define SPR_BOOKE_EPLC (0x3B3)
+#define SPR_405_IAC3 (0x3B4)
+#define SPR_BOOKE_EPSC (0x3B4)
+#define SPR_405_IAC4 (0x3B5)
+#define SPR_405_DVC1 (0x3B6)
+#define SPR_405_DVC2 (0x3B7)
+#define SPR_BAMR (0x3B7)
+#define SPR_7XX_MMCR0 (0x3B8)
+#define SPR_7XX_PMC1 (0x3B9)
+#define SPR_40x_SGR (0x3B9)
+#define SPR_7XX_PMC2 (0x3BA)
+#define SPR_40x_DCWR (0x3BA)
+#define SPR_7XX_SIAR (0x3BB)
+#define SPR_405_SLER (0x3BB)
+#define SPR_7XX_MMCR1 (0x3BC)
+#define SPR_405_SU0R (0x3BC)
+#define SPR_401_SKR (0x3BC)
+#define SPR_7XX_PMC3 (0x3BD)
+#define SPR_405_DBCR1 (0x3BD)
+#define SPR_7XX_PMC4 (0x3BE)
+#define SPR_SDA (0x3BF)
+#define SPR_403_VTBL (0x3CC)
+#define SPR_403_VTBU (0x3CD)
+#define SPR_DMISS (0x3D0)
+#define SPR_DCMP (0x3D1)
+#define SPR_HASH1 (0x3D2)
+#define SPR_HASH2 (0x3D3)
+#define SPR_BOOKE_ICDBDR (0x3D3)
+#define SPR_TLBMISS (0x3D4)
+#define SPR_IMISS (0x3D4)
+#define SPR_40x_ESR (0x3D4)
+#define SPR_PTEHI (0x3D5)
+#define SPR_ICMP (0x3D5)
+#define SPR_40x_DEAR (0x3D5)
+#define SPR_PTELO (0x3D6)
+#define SPR_RPA (0x3D6)
+#define SPR_40x_EVPR (0x3D6)
+#define SPR_L3PM (0x3D7)
+#define SPR_403_CDBCR (0x3D7)
+#define SPR_L3ITCR0 (0x3D8)
+#define SPR_TCR (0x3D8)
+#define SPR_40x_TSR (0x3D8)
+#define SPR_IBR (0x3DA)
+#define SPR_40x_TCR (0x3DA)
+#define SPR_ESASRR (0x3DB)
+#define SPR_40x_PIT (0x3DB)
+#define SPR_403_TBL (0x3DC)
+#define SPR_403_TBU (0x3DD)
+#define SPR_SEBR (0x3DE)
+#define SPR_40x_SRR2 (0x3DE)
+#define SPR_SER (0x3DF)
+#define SPR_40x_SRR3 (0x3DF)
+#define SPR_L3OHCR (0x3E8)
+#define SPR_L3ITCR1 (0x3E9)
+#define SPR_L3ITCR2 (0x3EA)
+#define SPR_L3ITCR3 (0x3EB)
+#define SPR_HID0 (0x3F0)
+#define SPR_40x_DBSR (0x3F0)
+#define SPR_HID1 (0x3F1)
+#define SPR_IABR (0x3F2)
+#define SPR_40x_DBCR0 (0x3F2)
+#define SPR_601_HID2 (0x3F2)
+#define SPR_Exxx_L1CSR0 (0x3F2)
+#define SPR_ICTRL (0x3F3)
+#define SPR_HID2 (0x3F3)
+#define SPR_750CL_HID4 (0x3F3)
+#define SPR_Exxx_L1CSR1 (0x3F3)
+#define SPR_440_DBDR (0x3F3)
+#define SPR_LDSTDB (0x3F4)
+#define SPR_750_TDCL (0x3F4)
+#define SPR_40x_IAC1 (0x3F4)
+#define SPR_MMUCSR0 (0x3F4)
+#define SPR_970_HID4 (0x3F4)
+#define SPR_DABR (0x3F5)
+#define DABR_MASK (~(target_ulong)0x7)
+#define SPR_Exxx_BUCSR (0x3F5)
+#define SPR_40x_IAC2 (0x3F5)
+#define SPR_601_HID5 (0x3F5)
+#define SPR_40x_DAC1 (0x3F6)
+#define SPR_MSSCR0 (0x3F6)
+#define SPR_970_HID5 (0x3F6)
+#define SPR_MSSSR0 (0x3F7)
+#define SPR_MSSCR1 (0x3F7)
+#define SPR_DABRX (0x3F7)
+#define SPR_40x_DAC2 (0x3F7)
+#define SPR_MMUCFG (0x3F7)
+#define SPR_LDSTCR (0x3F8)
+#define SPR_L2PMCR (0x3F8)
+#define SPR_750FX_HID2 (0x3F8)
+#define SPR_Exxx_L1FINV0 (0x3F8)
+#define SPR_L2CR (0x3F9)
+#define SPR_Exxx_L2CSR0 (0x3F9)
+#define SPR_L3CR (0x3FA)
+#define SPR_750_TDCH (0x3FA)
+#define SPR_IABR2 (0x3FA)
+#define SPR_40x_DCCR (0x3FA)
+#define SPR_ICTC (0x3FB)
+#define SPR_40x_ICCR (0x3FB)
+#define SPR_THRM1 (0x3FC)
+#define SPR_403_PBL1 (0x3FC)
+#define SPR_SP (0x3FD)
+#define SPR_THRM2 (0x3FD)
+#define SPR_403_PBU1 (0x3FD)
+#define SPR_604_HID13 (0x3FD)
+#define SPR_LT (0x3FE)
+#define SPR_THRM3 (0x3FE)
+#define SPR_RCPU_FPECR (0x3FE)
+#define SPR_403_PBL2 (0x3FE)
+#define SPR_PIR (0x3FF)
+#define SPR_403_PBU2 (0x3FF)
+#define SPR_601_HID15 (0x3FF)
+#define SPR_604_HID15 (0x3FF)
+#define SPR_E500_SVR (0x3FF)
+
+/* Disable MAS Interrupt Updates for Hypervisor */
+#define EPCR_DMIUH (1 << 22)
+/* Disable Guest TLB Management Instructions */
+#define EPCR_DGTMI (1 << 23)
+/* Guest Interrupt Computation Mode */
+#define EPCR_GICM (1 << 24)
+/* Interrupt Computation Mode */
+#define EPCR_ICM (1 << 25)
+/* Disable Embedded Hypervisor Debug */
+#define EPCR_DUVD (1 << 26)
+/* Instruction Storage Interrupt Directed to Guest State */
+#define EPCR_ISIGS (1 << 27)
+/* Data Storage Interrupt Directed to Guest State */
+#define EPCR_DSIGS (1 << 28)
+/* Instruction TLB Error Interrupt Directed to Guest State */
+#define EPCR_ITLBGS (1 << 29)
+/* Data TLB Error Interrupt Directed to Guest State */
+#define EPCR_DTLBGS (1 << 30)
+/* External Input Interrupt Directed to Guest State */
+#define EPCR_EXTGS (1 << 31)
+
+#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */
+#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */
+#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
+#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
+
+#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
+#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */
+#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */
+#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
+#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
+
+/* E500 L2CSR0 */
+#define E500_L2CSR0_L2FI (1 << 21) /* L2 cache flash invalidate */
+#define E500_L2CSR0_L2FL (1 << 11) /* L2 cache flush */
+#define E500_L2CSR0_L2LFC (1 << 10) /* L2 cache lock flash clear */
+
+/* HID0 bits */
+#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
+#define HID0_DOZE (1 << 23) /* pre-2.06 */
+#define HID0_NAP (1 << 22) /* pre-2.06 */
+#define HID0_HILE PPC_BIT(19) /* POWER8 */
+#define HID0_POWER9_HILE PPC_BIT(4)
+
+/*****************************************************************************/
+/* PowerPC Instructions types definitions */
+enum {
+ PPC_NONE = 0x0000000000000000ULL,
+ /* PowerPC base instructions set */
+ PPC_INSNS_BASE = 0x0000000000000001ULL,
+ /* integer operations instructions */
+#define PPC_INTEGER PPC_INSNS_BASE
+ /* flow control instructions */
+#define PPC_FLOW PPC_INSNS_BASE
+ /* virtual memory instructions */
+#define PPC_MEM PPC_INSNS_BASE
+ /* ld/st with reservation instructions */
+#define PPC_RES PPC_INSNS_BASE
+ /* spr/msr access instructions */
+#define PPC_MISC PPC_INSNS_BASE
+ /* Deprecated instruction sets */
+ /* Original POWER instruction set */
+ PPC_POWER = 0x0000000000000002ULL,
+ /* POWER2 instruction set extension */
+ PPC_POWER2 = 0x0000000000000004ULL,
+ /* Power RTC support */
+ PPC_POWER_RTC = 0x0000000000000008ULL,
+ /* Power-to-PowerPC bridge (601) */
+ PPC_POWER_BR = 0x0000000000000010ULL,
+ /* 64 bits PowerPC instruction set */
+ PPC_64B = 0x0000000000000020ULL,
+ /* New 64 bits extensions (PowerPC 2.0x) */
+ PPC_64BX = 0x0000000000000040ULL,
+ /* 64 bits hypervisor extensions */
+ PPC_64H = 0x0000000000000080ULL,
+ /* New wait instruction (PowerPC 2.0x) */
+ PPC_WAIT = 0x0000000000000100ULL,
+ /* Time base mftb instruction */
+ PPC_MFTB = 0x0000000000000200ULL,
+
+ /* Fixed-point unit extensions */
+ /* PowerPC 602 specific */
+ PPC_602_SPEC = 0x0000000000000400ULL,
+ /* isel instruction */
+ PPC_ISEL = 0x0000000000000800ULL,
+ /* popcntb instruction */
+ PPC_POPCNTB = 0x0000000000001000ULL,
+ /* string load / store */
+ PPC_STRING = 0x0000000000002000ULL,
+ /* real mode cache inhibited load / store */
+ PPC_CILDST = 0x0000000000004000ULL,
+
+ /* Floating-point unit extensions */
+ /* Optional floating point instructions */
+ PPC_FLOAT = 0x0000000000010000ULL,
+ /* New floating-point extensions (PowerPC 2.0x) */
+ PPC_FLOAT_EXT = 0x0000000000020000ULL,
+ PPC_FLOAT_FSQRT = 0x0000000000040000ULL,
+ PPC_FLOAT_FRES = 0x0000000000080000ULL,
+ PPC_FLOAT_FRSQRTE = 0x0000000000100000ULL,
+ PPC_FLOAT_FRSQRTES = 0x0000000000200000ULL,
+ PPC_FLOAT_FSEL = 0x0000000000400000ULL,
+ PPC_FLOAT_STFIWX = 0x0000000000800000ULL,
+
+ /* Vector/SIMD extensions */
+ /* Altivec support */
+ PPC_ALTIVEC = 0x0000000001000000ULL,
+ /* PowerPC 2.03 SPE extension */
+ PPC_SPE = 0x0000000002000000ULL,
+ /* PowerPC 2.03 SPE single-precision floating-point extension */
+ PPC_SPE_SINGLE = 0x0000000004000000ULL,
+ /* PowerPC 2.03 SPE double-precision floating-point extension */
+ PPC_SPE_DOUBLE = 0x0000000008000000ULL,
+
+ /* Optional memory control instructions */
+ PPC_MEM_TLBIA = 0x0000000010000000ULL,
+ PPC_MEM_TLBIE = 0x0000000020000000ULL,
+ PPC_MEM_TLBSYNC = 0x0000000040000000ULL,
+ /* sync instruction */
+ PPC_MEM_SYNC = 0x0000000080000000ULL,
+ /* eieio instruction */
+ PPC_MEM_EIEIO = 0x0000000100000000ULL,
+
+ /* Cache control instructions */
+ PPC_CACHE = 0x0000000200000000ULL,
+ /* icbi instruction */
+ PPC_CACHE_ICBI = 0x0000000400000000ULL,
+ /* dcbz instruction */
+ PPC_CACHE_DCBZ = 0x0000000800000000ULL,
+ /* dcba instruction */
+ PPC_CACHE_DCBA = 0x0000002000000000ULL,
+ /* Freescale cache locking instructions */
+ PPC_CACHE_LOCK = 0x0000004000000000ULL,
+
+ /* MMU related extensions */
+ /* external control instructions */
+ PPC_EXTERN = 0x0000010000000000ULL,
+ /* segment register access instructions */
+ PPC_SEGMENT = 0x0000020000000000ULL,
+ /* PowerPC 6xx TLB management instructions */
+ PPC_6xx_TLB = 0x0000040000000000ULL,
+ /* PowerPC 74xx TLB management instructions */
+ PPC_74xx_TLB = 0x0000080000000000ULL,
+ /* PowerPC 40x TLB management instructions */
+ PPC_40x_TLB = 0x0000100000000000ULL,
+ /* segment register access instructions for PowerPC 64 "bridge" */
+ PPC_SEGMENT_64B = 0x0000200000000000ULL,
+ /* SLB management */
+ PPC_SLBI = 0x0000400000000000ULL,
+
+ /* Embedded PowerPC dedicated instructions */
+ PPC_WRTEE = 0x0001000000000000ULL,
+ /* PowerPC 40x exception model */
+ PPC_40x_EXCP = 0x0002000000000000ULL,
+ /* PowerPC 405 Mac instructions */
+ PPC_405_MAC = 0x0004000000000000ULL,
+ /* PowerPC 440 specific instructions */
+ PPC_440_SPEC = 0x0008000000000000ULL,
+ /* BookE (embedded) PowerPC specification */
+ PPC_BOOKE = 0x0010000000000000ULL,
+ /* mfapidi instruction */
+ PPC_MFAPIDI = 0x0020000000000000ULL,
+ /* tlbiva instruction */
+ PPC_TLBIVA = 0x0040000000000000ULL,
+ /* tlbivax instruction */
+ PPC_TLBIVAX = 0x0080000000000000ULL,
+ /* PowerPC 4xx dedicated instructions */
+ PPC_4xx_COMMON = 0x0100000000000000ULL,
+ /* PowerPC 40x ibct instructions */
+ PPC_40x_ICBT = 0x0200000000000000ULL,
+ /* rfmci is not implemented in all BookE PowerPC */
+ PPC_RFMCI = 0x0400000000000000ULL,
+ /* rfdi instruction */
+ PPC_RFDI = 0x0800000000000000ULL,
+ /* DCR accesses */
+ PPC_DCR = 0x1000000000000000ULL,
+ /* DCR extended accesse */
+ PPC_DCRX = 0x2000000000000000ULL,
+ /* user-mode DCR access, implemented in PowerPC 460 */
+ PPC_DCRUX = 0x4000000000000000ULL,
+ /* popcntw and popcntd instructions */
+ PPC_POPCNTWD = 0x8000000000000000ULL,
+
+#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \
+ | PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \
+ | PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \
+ | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \
+ | PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \
+ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \
+ | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \
+ | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX \
+ | PPC_ALTIVEC | PPC_SPE | PPC_SPE_SINGLE \
+ | PPC_SPE_DOUBLE | PPC_MEM_TLBIA \
+ | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC \
+ | PPC_MEM_SYNC | PPC_MEM_EIEIO \
+ | PPC_CACHE | PPC_CACHE_ICBI \
+ | PPC_CACHE_DCBZ \
+ | PPC_CACHE_DCBA | PPC_CACHE_LOCK \
+ | PPC_EXTERN | PPC_SEGMENT | PPC_6xx_TLB \
+ | PPC_74xx_TLB | PPC_40x_TLB | PPC_SEGMENT_64B \
+ | PPC_SLBI | PPC_WRTEE | PPC_40x_EXCP \
+ | PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \
+ | PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \
+ | PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \
+ | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \
+ | PPC_POPCNTWD | PPC_CILDST)
+
+ /* extended type values */
+
+ /* BookE 2.06 PowerPC specification */
+ PPC2_BOOKE206 = 0x0000000000000001ULL,
+ /* VSX (extensions to Altivec / VMX) */
+ PPC2_VSX = 0x0000000000000002ULL,
+ /* Decimal Floating Point (DFP) */
+ PPC2_DFP = 0x0000000000000004ULL,
+ /* Embedded.Processor Control */
+ PPC2_PRCNTL = 0x0000000000000008ULL,
+ /* Byte-reversed, indexed, double-word load and store */
+ PPC2_DBRX = 0x0000000000000010ULL,
+ /* Book I 2.05 PowerPC specification */
+ PPC2_ISA205 = 0x0000000000000020ULL,
+ /* VSX additions in ISA 2.07 */
+ PPC2_VSX207 = 0x0000000000000040ULL,
+ /* ISA 2.06B bpermd */
+ PPC2_PERM_ISA206 = 0x0000000000000080ULL,
+ /* ISA 2.06B divide extended variants */
+ PPC2_DIVE_ISA206 = 0x0000000000000100ULL,
+ /* ISA 2.06B larx/stcx. instructions */
+ PPC2_ATOMIC_ISA206 = 0x0000000000000200ULL,
+ /* ISA 2.06B floating point integer conversion */
+ PPC2_FP_CVT_ISA206 = 0x0000000000000400ULL,
+ /* ISA 2.06B floating point test instructions */
+ PPC2_FP_TST_ISA206 = 0x0000000000000800ULL,
+ /* ISA 2.07 bctar instruction */
+ PPC2_BCTAR_ISA207 = 0x0000000000001000ULL,
+ /* ISA 2.07 load/store quadword */
+ PPC2_LSQ_ISA207 = 0x0000000000002000ULL,
+ /* ISA 2.07 Altivec */
+ PPC2_ALTIVEC_207 = 0x0000000000004000ULL,
+ /* PowerISA 2.07 Book3s specification */
+ PPC2_ISA207S = 0x0000000000008000ULL,
+ /* Double precision floating point conversion for signed integer 64 */
+ PPC2_FP_CVT_S64 = 0x0000000000010000ULL,
+ /* Transactional Memory (ISA 2.07, Book II) */
+ PPC2_TM = 0x0000000000020000ULL,
+ /* Server PM instructgions (ISA 2.06, Book III) */
+ PPC2_PM_ISA206 = 0x0000000000040000ULL,
+ /* POWER ISA 3.0 */
+ PPC2_ISA300 = 0x0000000000080000ULL,
+ /* POWER ISA 3.1 */
+ PPC2_ISA310 = 0x0000000000100000ULL,
+
+#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
+ PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \
+ PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \
+ PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | \
+ PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \
+ PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \
+ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \
+ PPC2_ISA300 | PPC2_ISA310)
+};
+
+/*****************************************************************************/
+/*
+ * Memory access type :
+ * may be needed for precise access rights control and precise exceptions.
+ */
+enum {
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_INT = 0x20, /* Integer load/store access */
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
+ ACCESS_RES = 0x40, /* load/store with reservation */
+ ACCESS_EXT = 0x50, /* external access */
+ ACCESS_CACHE = 0x60, /* Cache manipulation */
+};
+
+/*
+ * Hardware interrupt sources:
+ * all those exception can be raised simulteaneously
+ */
+/* Input pins definitions */
+enum {
+ /* 6xx bus input pins */
+ PPC6xx_INPUT_HRESET = 0,
+ PPC6xx_INPUT_SRESET = 1,
+ PPC6xx_INPUT_CKSTP_IN = 2,
+ PPC6xx_INPUT_MCP = 3,
+ PPC6xx_INPUT_SMI = 4,
+ PPC6xx_INPUT_INT = 5,
+ PPC6xx_INPUT_TBEN = 6,
+ PPC6xx_INPUT_WAKEUP = 7,
+ PPC6xx_INPUT_NB,
+};
+
+enum {
+ /* Embedded PowerPC input pins */
+ PPCBookE_INPUT_HRESET = 0,
+ PPCBookE_INPUT_SRESET = 1,
+ PPCBookE_INPUT_CKSTP_IN = 2,
+ PPCBookE_INPUT_MCP = 3,
+ PPCBookE_INPUT_SMI = 4,
+ PPCBookE_INPUT_INT = 5,
+ PPCBookE_INPUT_CINT = 6,
+ PPCBookE_INPUT_NB,
+};
+
+enum {
+ /* PowerPC E500 input pins */
+ PPCE500_INPUT_RESET_CORE = 0,
+ PPCE500_INPUT_MCK = 1,
+ PPCE500_INPUT_CINT = 3,
+ PPCE500_INPUT_INT = 4,
+ PPCE500_INPUT_DEBUG = 6,
+ PPCE500_INPUT_NB,
+};
+
+enum {
+ /* PowerPC 40x input pins */
+ PPC40x_INPUT_RESET_CORE = 0,
+ PPC40x_INPUT_RESET_CHIP = 1,
+ PPC40x_INPUT_RESET_SYS = 2,
+ PPC40x_INPUT_CINT = 3,
+ PPC40x_INPUT_INT = 4,
+ PPC40x_INPUT_HALT = 5,
+ PPC40x_INPUT_DEBUG = 6,
+ PPC40x_INPUT_NB,
+};
+
+enum {
+ /* RCPU input pins */
+ PPCRCPU_INPUT_PORESET = 0,
+ PPCRCPU_INPUT_HRESET = 1,
+ PPCRCPU_INPUT_SRESET = 2,
+ PPCRCPU_INPUT_IRQ0 = 3,
+ PPCRCPU_INPUT_IRQ1 = 4,
+ PPCRCPU_INPUT_IRQ2 = 5,
+ PPCRCPU_INPUT_IRQ3 = 6,
+ PPCRCPU_INPUT_IRQ4 = 7,
+ PPCRCPU_INPUT_IRQ5 = 8,
+ PPCRCPU_INPUT_IRQ6 = 9,
+ PPCRCPU_INPUT_IRQ7 = 10,
+ PPCRCPU_INPUT_NB,
+};
+
+#if defined(TARGET_PPC64)
+enum {
+ /* PowerPC 970 input pins */
+ PPC970_INPUT_HRESET = 0,
+ PPC970_INPUT_SRESET = 1,
+ PPC970_INPUT_CKSTP = 2,
+ PPC970_INPUT_TBEN = 3,
+ PPC970_INPUT_MCP = 4,
+ PPC970_INPUT_INT = 5,
+ PPC970_INPUT_THINT = 6,
+ PPC970_INPUT_NB,
+};
+
+enum {
+ /* POWER7 input pins */
+ POWER7_INPUT_INT = 0,
+ /*
+ * POWER7 probably has other inputs, but we don't care about them
+ * for any existing machine. We can wire these up when we need
+ * them
+ */
+ POWER7_INPUT_NB,
+};
+
+enum {
+ /* POWER9 input pins */
+ POWER9_INPUT_INT = 0,
+ POWER9_INPUT_HINT = 1,
+ POWER9_INPUT_NB,
+};
+#endif
+
+/* Hardware exceptions definitions */
+enum {
+ /* External hardware exception sources */
+ PPC_INTERRUPT_RESET = 0, /* Reset exception */
+ PPC_INTERRUPT_WAKEUP, /* Wakeup exception */
+ PPC_INTERRUPT_MCK, /* Machine check exception */
+ PPC_INTERRUPT_EXT, /* External interrupt */
+ PPC_INTERRUPT_SMI, /* System management interrupt */
+ PPC_INTERRUPT_CEXT, /* Critical external interrupt */
+ PPC_INTERRUPT_DEBUG, /* External debug exception */
+ PPC_INTERRUPT_THERM, /* Thermal exception */
+ /* Internal hardware exception sources */
+ PPC_INTERRUPT_DECR, /* Decrementer exception */
+ PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */
+ PPC_INTERRUPT_PIT, /* Programmable interval timer interrupt */
+ PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */
+ PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */
+ PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */
+ PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */
+ PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */
+ PPC_INTERRUPT_HMI, /* Hypervisor Maintenance interrupt */
+ PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */
+ PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */
+};
+
+/* Processor Compatibility mask (PCR) */
+enum {
+ PCR_COMPAT_2_05 = PPC_BIT(62),
+ PCR_COMPAT_2_06 = PPC_BIT(61),
+ PCR_COMPAT_2_07 = PPC_BIT(60),
+ PCR_COMPAT_3_00 = PPC_BIT(59),
+ PCR_COMPAT_3_10 = PPC_BIT(58),
+ PCR_VEC_DIS = PPC_BIT(0), /* Vec. disable (bit NA since POWER8) */
+ PCR_VSX_DIS = PPC_BIT(1), /* VSX disable (bit NA since POWER8) */
+ PCR_TM_DIS = PPC_BIT(2), /* Trans. memory disable (POWER8) */
+};
+
+/* HMER/HMEER */
+enum {
+ HMER_MALFUNCTION_ALERT = PPC_BIT(0),
+ HMER_PROC_RECV_DONE = PPC_BIT(2),
+ HMER_PROC_RECV_ERROR_MASKED = PPC_BIT(3),
+ HMER_TFAC_ERROR = PPC_BIT(4),
+ HMER_TFMR_PARITY_ERROR = PPC_BIT(5),
+ HMER_XSCOM_FAIL = PPC_BIT(8),
+ HMER_XSCOM_DONE = PPC_BIT(9),
+ HMER_PROC_RECV_AGAIN = PPC_BIT(11),
+ HMER_WARN_RISE = PPC_BIT(14),
+ HMER_WARN_FALL = PPC_BIT(15),
+ HMER_SCOM_FIR_HMI = PPC_BIT(16),
+ HMER_TRIG_FIR_HMI = PPC_BIT(17),
+ HMER_HYP_RESOURCE_ERR = PPC_BIT(20),
+ HMER_XSCOM_STATUS_MASK = PPC_BITMASK(21, 23),
+};
+
+/*****************************************************************************/
+
+#define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300))
+target_ulong cpu_read_xer(const CPUPPCState *env);
+void cpu_write_xer(CPUPPCState *env, target_ulong xer);
+
+/*
+ * All 64-bit server processors compliant with arch 2.x, ie. 970 and newer,
+ * have PPC_SEGMENT_64B.
+ */
+#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
+
+#ifdef CONFIG_DEBUG_TCG
+void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags);
+#else
+static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->nip;
+ *cs_base = 0;
+ *flags = env->hflags;
+}
+#endif
+
+void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception);
+void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr);
+void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr);
+
+#if !defined(CONFIG_USER_ONLY)
+static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm)
+{
+ uintptr_t tlbml = (uintptr_t)tlbm;
+ uintptr_t tlbl = (uintptr_t)env->tlb.tlbm;
+
+ return (tlbml - tlbl) / sizeof(env->tlb.tlbm[0]);
+}
+
+static inline int booke206_tlb_size(CPUPPCState *env, int tlbn)
+{
+ uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+ int r = tlbncfg & TLBnCFG_N_ENTRY;
+ return r;
+}
+
+static inline int booke206_tlb_ways(CPUPPCState *env, int tlbn)
+{
+ uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+ int r = tlbncfg >> TLBnCFG_ASSOC_SHIFT;
+ return r;
+}
+
+static inline int booke206_tlbm_to_tlbn(CPUPPCState *env, ppcmas_tlb_t *tlbm)
+{
+ int id = booke206_tlbm_id(env, tlbm);
+ int end = 0;
+ int i;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ end += booke206_tlb_size(env, i);
+ if (id < end) {
+ return i;
+ }
+ }
+
+ cpu_abort(env_cpu(env), "Unknown TLBe: %d\n", id);
+ return 0;
+}
+
+static inline int booke206_tlbm_to_way(CPUPPCState *env, ppcmas_tlb_t *tlb)
+{
+ int tlbn = booke206_tlbm_to_tlbn(env, tlb);
+ int tlbid = booke206_tlbm_id(env, tlb);
+ return tlbid & (booke206_tlb_ways(env, tlbn) - 1);
+}
+
+static inline ppcmas_tlb_t *booke206_get_tlbm(CPUPPCState *env, const int tlbn,
+ target_ulong ea, int way)
+{
+ int r;
+ uint32_t ways = booke206_tlb_ways(env, tlbn);
+ int ways_bits = ctz32(ways);
+ int tlb_bits = ctz32(booke206_tlb_size(env, tlbn));
+ int i;
+
+ way &= ways - 1;
+ ea >>= MAS2_EPN_SHIFT;
+ ea &= (1 << (tlb_bits - ways_bits)) - 1;
+ r = (ea << ways_bits) | way;
+
+ if (r >= booke206_tlb_size(env, tlbn)) {
+ return NULL;
+ }
+
+ /* bump up to tlbn index */
+ for (i = 0; i < tlbn; i++) {
+ r += booke206_tlb_size(env, i);
+ }
+
+ return &env->tlb.tlbm[r];
+}
+
+/* returns bitmap of supported page sizes for a given TLB */
+static inline uint32_t booke206_tlbnps(CPUPPCState *env, const int tlbn)
+{
+ uint32_t ret = 0;
+
+ if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
+ /* MAV2 */
+ ret = env->spr[SPR_BOOKE_TLB0PS + tlbn];
+ } else {
+ uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+ uint32_t min = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
+ uint32_t max = (tlbncfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
+ int i;
+ for (i = min; i <= max; i++) {
+ ret |= (1 << (i << 1));
+ }
+ }
+
+ return ret;
+}
+
+static inline void booke206_fixed_size_tlbn(CPUPPCState *env, const int tlbn,
+ ppcmas_tlb_t *tlb)
+{
+ uint8_t i;
+ int32_t tsize = -1;
+
+ for (i = 0; i < 32; i++) {
+ if ((env->spr[SPR_BOOKE_TLB0PS + tlbn]) & (1ULL << i)) {
+ if (tsize == -1) {
+ tsize = i;
+ } else {
+ return;
+ }
+ }
+ }
+
+ /* TLBnPS unimplemented? Odd.. */
+ assert(tsize != -1);
+ tlb->mas1 &= ~MAS1_TSIZE_MASK;
+ tlb->mas1 |= ((uint32_t)tsize) << MAS1_TSIZE_SHIFT;
+}
+
+#endif
+
+static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr)
+{
+ if (env->mmu_model == POWERPC_MMU_BOOKE206) {
+ return msr & (1ULL << MSR_CM);
+ }
+
+ return msr & (1ULL << MSR_SF);
+}
+
+/**
+ * Check whether register rx is in the range between start and
+ * start + nregs (as needed by the LSWX and LSWI instructions)
+ */
+static inline bool lsw_reg_in_range(int start, int nregs, int rx)
+{
+ return (start + nregs <= 32 && rx >= start && rx < start + nregs) ||
+ (start + nregs > 32 && (rx >= start || rx < start + nregs - 32));
+}
+
+/* Accessors for FP, VMX and VSX registers */
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VsrB(i) u8[i]
+#define VsrSB(i) s8[i]
+#define VsrH(i) u16[i]
+#define VsrSH(i) s16[i]
+#define VsrW(i) u32[i]
+#define VsrSW(i) s32[i]
+#define VsrD(i) u64[i]
+#define VsrSD(i) s64[i]
+#else
+#define VsrB(i) u8[15 - (i)]
+#define VsrSB(i) s8[15 - (i)]
+#define VsrH(i) u16[7 - (i)]
+#define VsrSH(i) s16[7 - (i)]
+#define VsrW(i) u32[3 - (i)]
+#define VsrSW(i) s32[3 - (i)]
+#define VsrD(i) u64[1 - (i)]
+#define VsrSD(i) s64[1 - (i)]
+#endif
+
+static inline int vsr64_offset(int i, bool high)
+{
+ return offsetof(CPUPPCState, vsr[i].VsrD(high ? 0 : 1));
+}
+
+static inline int vsr_full_offset(int i)
+{
+ return offsetof(CPUPPCState, vsr[i].u64[0]);
+}
+
+static inline int fpr_offset(int i)
+{
+ return vsr64_offset(i, true);
+}
+
+static inline uint64_t *cpu_fpr_ptr(CPUPPCState *env, int i)
+{
+ return (uint64_t *)((uintptr_t)env + fpr_offset(i));
+}
+
+static inline uint64_t *cpu_vsrl_ptr(CPUPPCState *env, int i)
+{
+ return (uint64_t *)((uintptr_t)env + vsr64_offset(i, false));
+}
+
+static inline long avr64_offset(int i, bool high)
+{
+ return vsr64_offset(i + 32, high);
+}
+
+static inline int avr_full_offset(int i)
+{
+ return vsr_full_offset(i + 32);
+}
+
+static inline ppc_avr_t *cpu_avr_ptr(CPUPPCState *env, int i)
+{
+ return (ppc_avr_t *)((uintptr_t)env + avr_full_offset(i));
+}
+
+static inline bool ppc_has_spr(PowerPCCPU *cpu, int spr)
+{
+ /* We can test whether the SPR is defined by checking for a valid name */
+ return cpu->env.spr_cb[spr].name != NULL;
+}
+
+static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+ /*
+ * Only models that have an LPCR and know about LPCR_ILE can do little
+ * endian.
+ */
+ if (pcc->lpcr_mask & LPCR_ILE) {
+ return !!(cpu->env.spr[SPR_LPCR] & LPCR_ILE);
+ }
+
+ return false;
+}
+
+void dump_mmu(CPUPPCState *env);
+
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
+void ppc_store_vscr(CPUPPCState *env, uint32_t vscr);
+uint32_t ppc_get_vscr(CPUPPCState *env);
+#endif /* PPC_CPU_H */
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
new file mode 100644
index 000000000..6695985e9
--- /dev/null
+++ b/target/ppc/cpu_init.c
@@ -0,0 +1,9291 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "disas/dis-asm.h"
+#include "exec/gdbstub.h"
+#include "kvm_ppc.h"
+#include "sysemu/cpus.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/tcg.h"
+#include "cpu-models.h"
+#include "mmu-hash32.h"
+#include "mmu-hash64.h"
+#include "qemu/error-report.h"
+#include "qemu/module.h"
+#include "qemu/qemu-print.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qnull.h"
+#include "qapi/visitor.h"
+#include "hw/qdev-properties.h"
+#include "hw/ppc/ppc.h"
+#include "mmu-book3s-v3.h"
+#include "qemu/cutils.h"
+#include "disas/capstone.h"
+#include "fpu/softfloat.h"
+#include "qapi/qapi-commands-machine-target.h"
+
+#include "helper_regs.h"
+#include "internal.h"
+#include "spr_tcg.h"
+
+/* #define PPC_DEBUG_SPR */
+/* #define USE_APPLE_GDB */
+
+static inline void vscr_init(CPUPPCState *env, uint32_t val)
+{
+ /* Altivec always uses round-to-nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->vec_status);
+ ppc_store_vscr(env, val);
+}
+
+/**
+ * _spr_register
+ *
+ * Register an SPR with all the callbacks required for tcg,
+ * and the ID number for KVM.
+ *
+ * The reason for the conditional compilation is that the tcg functions
+ * may be compiled out, and the system kvm header may not be available
+ * for supplying the ID numbers. This is ugly, but the best we can do.
+ */
+
+#ifdef CONFIG_TCG
+# define USR_ARG(X) X,
+# ifdef CONFIG_USER_ONLY
+# define SYS_ARG(X)
+# else
+# define SYS_ARG(X) X,
+# endif
+#else
+# define USR_ARG(X)
+# define SYS_ARG(X)
+#endif
+#ifdef CONFIG_KVM
+# define KVM_ARG(X) X,
+#else
+# define KVM_ARG(X)
+#endif
+
+typedef void spr_callback(DisasContext *, int, int);
+
+static void _spr_register(CPUPPCState *env, int num, const char *name,
+ USR_ARG(spr_callback *uea_read)
+ USR_ARG(spr_callback *uea_write)
+ SYS_ARG(spr_callback *oea_read)
+ SYS_ARG(spr_callback *oea_write)
+ SYS_ARG(spr_callback *hea_read)
+ SYS_ARG(spr_callback *hea_write)
+ KVM_ARG(uint64_t one_reg_id)
+ target_ulong initial_value)
+{
+ ppc_spr_t *spr = &env->spr_cb[num];
+
+ /* No SPR should be registered twice. */
+ assert(spr->name == NULL);
+ assert(name != NULL);
+
+ spr->name = name;
+ spr->default_value = initial_value;
+ env->spr[num] = initial_value;
+
+#ifdef CONFIG_TCG
+ spr->uea_read = uea_read;
+ spr->uea_write = uea_write;
+# ifndef CONFIG_USER_ONLY
+ spr->oea_read = oea_read;
+ spr->oea_write = oea_write;
+ spr->hea_read = hea_read;
+ spr->hea_write = hea_write;
+# endif
+#endif
+#ifdef CONFIG_KVM
+ spr->one_reg_id = one_reg_id;
+#endif
+}
+
+/* spr_register_kvm_hv passes all required arguments. */
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
+ _spr_register(env, num, name, \
+ USR_ARG(uea_read) USR_ARG(uea_write) \
+ SYS_ARG(oea_read) SYS_ARG(oea_write) \
+ SYS_ARG(hea_read) SYS_ARG(hea_write) \
+ KVM_ARG(one_reg_id) initial_value)
+
+/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */
+#define spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, one_reg_id, ival) \
+ spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
+ oea_write, oea_read, oea_write, one_reg_id, ival)
+
+/* spr_register_hv and spr_register are similar, except there is no kvm id. */
+#define spr_register_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, ival) \
+ spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
+ oea_write, hea_read, hea_write, 0, ival)
+
+#define spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, ival) \
+ spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, 0, ival)
+
+/* Generic PowerPC SPRs */
+static void register_generic_sprs(CPUPPCState *env)
+{
+ /* Integer processing */
+ spr_register(env, SPR_XER, "XER",
+ &spr_read_xer, &spr_write_xer,
+ &spr_read_xer, &spr_write_xer,
+ 0x00000000);
+ /* Branch control */
+ spr_register(env, SPR_LR, "LR",
+ &spr_read_lr, &spr_write_lr,
+ &spr_read_lr, &spr_write_lr,
+ 0x00000000);
+ spr_register(env, SPR_CTR, "CTR",
+ &spr_read_ctr, &spr_write_ctr,
+ &spr_read_ctr, &spr_write_ctr,
+ 0x00000000);
+ /* Interrupt processing */
+ spr_register(env, SPR_SRR0, "SRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SRR1, "SRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_SPRG0, "SPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG1, "SPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG2, "SPRG2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG3, "SPRG3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR common to all non-embedded PowerPC, including 601 */
+static void register_ne_601_sprs(CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register_kvm(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DSISR, 0x00000000);
+ spr_register_kvm(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAR, 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+}
+
+/* Storage Description Register 1 */
+static void register_sdr1_sprs(CPUPPCState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ if (env->has_hv_mode) {
+ /*
+ * SDR1 is a hypervisor resource on CPUs which have a
+ * hypervisor mode
+ */
+ spr_register_hv(env, SPR_SDR1, "SDR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_sdr1,
+ 0x00000000);
+ } else {
+ spr_register(env, SPR_SDR1, "SDR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_sdr1,
+ 0x00000000);
+ }
+#endif
+}
+
+/* BATs 0-3 */
+static void register_low_BATs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0U, "DBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0L, "DBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1U, "DBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1L, "DBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2U, "DBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2L, "DBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3U, "DBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3L, "DBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* BATs 4-7 */
+static void register_high_BATs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT4U, "IBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT4L, "IBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5U, "IBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5L, "IBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6U, "IBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6L, "IBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7U, "IBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7L, "IBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4U, "DBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4L, "DBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5U, "DBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5L, "DBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6U, "DBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6L, "DBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7U, "DBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7L, "DBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* Generic PowerPC time base */
+static void register_tbl(CPUPPCState *env)
+{
+ spr_register(env, SPR_VTBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_VTBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, &spr_write_tbu,
+ 0x00000000);
+}
+
+/* Softare table search registers */
+static void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ env->tlb_type = TLB_6XX;
+ spr_register(env, SPR_DMISS, "DMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_DCMP, "DCMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH1, "HASH1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH2, "HASH2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_IMISS, "IMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_ICMP, "ICMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_RPA, "RPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+/* SPR common to MPC755 and G2 */
+static void register_G2_755_sprs(CPUPPCState *env)
+{
+ /* SGPRs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR common to all 7xx PowerPC implementations */
+static void register_7xx_sprs(CPUPPCState *env)
+{
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Cache management */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTC, "ICTC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Performance monitors */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UMMCR0, "UMMCR0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UMMCR1, "UMMCR1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC1, "UPMC1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC2, "UPMC2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC3, "UPMC3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC4, "UPMC4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_USIAR, "USIAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+#ifdef TARGET_PPC64
+static void register_amr_sprs(CPUPPCState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ /*
+ * Virtual Page Class Key protection
+ *
+ * The AMR is accessible either via SPR 13 or SPR 29. 13 is
+ * userspace accessible, 29 is privileged. So we only need to set
+ * the kvm ONE_REG id on one of them, we use 29
+ */
+ spr_register(env, SPR_UAMR, "UAMR",
+ &spr_read_generic, &spr_write_amr,
+ &spr_read_generic, &spr_write_amr,
+ 0);
+ spr_register_kvm_hv(env, SPR_AMR, "AMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_amr,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_AMR, 0);
+ spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_uamor,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_UAMOR, 0);
+ spr_register_hv(env, SPR_AMOR, "AMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+#endif /* !CONFIG_USER_ONLY */
+}
+
+static void register_iamr_sprs(CPUPPCState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ spr_register_kvm_hv(env, SPR_IAMR, "IAMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_iamr,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_IAMR, 0);
+#endif /* !CONFIG_USER_ONLY */
+}
+#endif /* TARGET_PPC64 */
+
+static void register_thrm_sprs(CPUPPCState *env)
+{
+ /* Thermal management */
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 604 implementation */
+static void register_604_sprs(CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+ /* Performance counters */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 603 implementation */
+static void register_603_sprs(CPUPPCState *env)
+{
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+}
+
+/* SPR specific to PowerPC G2 implementation */
+static void register_G2_sprs(CPUPPCState *env)
+{
+ /* Memory base address */
+ /* MBAR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MBAR, "MBAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Exception processing */
+ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_CSRR1, "CSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR2, "DABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR2, "IABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IBCR, "IBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DBCR, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 602 implementation */
+static void register_602_sprs(CPUPPCState *env)
+{
+ /* ESA registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_SER, "SER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SEBR, "SEBR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_ESASRR, "ESASRR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Floating point status */
+ /* XXX : not implemented */
+ spr_register(env, SPR_SP, "SP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_LT, "LT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Watchdog timer */
+ /* XXX : not implemented */
+ spr_register(env, SPR_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Interrupt base */
+ spr_register(env, SPR_IBR, "IBR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 601 implementation */
+static void register_601_sprs(CPUPPCState *env)
+{
+ /* Multiplication/division register */
+ /* MQ */
+ spr_register(env, SPR_MQ, "MQ",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* RTC registers */
+ spr_register(env, SPR_601_RTCU, "RTCU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_601_rtcu,
+ 0x00000000);
+ spr_register(env, SPR_601_VRTCU, "RTCU",
+ &spr_read_601_rtcu, SPR_NOACCESS,
+ &spr_read_601_rtcu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_601_RTCL, "RTCL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_601_rtcl,
+ 0x00000000);
+ spr_register(env, SPR_601_VRTCL, "RTCL",
+ &spr_read_601_rtcl, SPR_NOACCESS,
+ &spr_read_601_rtcl, SPR_NOACCESS,
+ 0x00000000);
+ /* Timer */
+#if 0 /* ? */
+ spr_register(env, SPR_601_UDECR, "UDECR",
+ &spr_read_decr, SPR_NOACCESS,
+ &spr_read_decr, SPR_NOACCESS,
+ 0x00000000);
+#endif
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ env->nb_BATs = 4;
+#endif
+}
+
+static void register_74xx_sprs(CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_74XX_MMCR2, "MMCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_74XX_UMMCR2, "UMMCR2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX: not implemented */
+ spr_register(env, SPR_BAMR, "BAMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSCR0, "MSSCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Altivec */
+ spr_register(env, SPR_VRSAVE, "VRSAVE",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+}
+
+static void register_l3_ctrl(CPUPPCState *env)
+{
+ /* L3CR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3CR, "L3CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR0, "L3ITCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3PM */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3PM, "L3PM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ env->tlb_type = TLB_6XX;
+ /* XXX : not implemented */
+ spr_register(env, SPR_PTEHI, "PTEHI",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PTELO, "PTELO",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_TLBMISS, "TLBMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+static void register_usprg3_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_USPRG3, "USPRG3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void register_usprgh_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
+
+/* PowerPC BookE SPR */
+static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask)
+{
+ const char *ivor_names[64] = {
+ "IVOR0", "IVOR1", "IVOR2", "IVOR3",
+ "IVOR4", "IVOR5", "IVOR6", "IVOR7",
+ "IVOR8", "IVOR9", "IVOR10", "IVOR11",
+ "IVOR12", "IVOR13", "IVOR14", "IVOR15",
+ "IVOR16", "IVOR17", "IVOR18", "IVOR19",
+ "IVOR20", "IVOR21", "IVOR22", "IVOR23",
+ "IVOR24", "IVOR25", "IVOR26", "IVOR27",
+ "IVOR28", "IVOR29", "IVOR30", "IVOR31",
+ "IVOR32", "IVOR33", "IVOR34", "IVOR35",
+ "IVOR36", "IVOR37", "IVOR38", "IVOR39",
+ "IVOR40", "IVOR41", "IVOR42", "IVOR43",
+ "IVOR44", "IVOR45", "IVOR46", "IVOR47",
+ "IVOR48", "IVOR49", "IVOR50", "IVOR51",
+ "IVOR52", "IVOR53", "IVOR54", "IVOR55",
+ "IVOR56", "IVOR57", "IVOR58", "IVOR59",
+ "IVOR60", "IVOR61", "IVOR62", "IVOR63",
+ };
+#define SPR_BOOKE_IVORxx (-1)
+ int ivor_sprn[64] = {
+ SPR_BOOKE_IVOR0, SPR_BOOKE_IVOR1, SPR_BOOKE_IVOR2, SPR_BOOKE_IVOR3,
+ SPR_BOOKE_IVOR4, SPR_BOOKE_IVOR5, SPR_BOOKE_IVOR6, SPR_BOOKE_IVOR7,
+ SPR_BOOKE_IVOR8, SPR_BOOKE_IVOR9, SPR_BOOKE_IVOR10, SPR_BOOKE_IVOR11,
+ SPR_BOOKE_IVOR12, SPR_BOOKE_IVOR13, SPR_BOOKE_IVOR14, SPR_BOOKE_IVOR15,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVOR32, SPR_BOOKE_IVOR33, SPR_BOOKE_IVOR34, SPR_BOOKE_IVOR35,
+ SPR_BOOKE_IVOR36, SPR_BOOKE_IVOR37, SPR_BOOKE_IVOR38, SPR_BOOKE_IVOR39,
+ SPR_BOOKE_IVOR40, SPR_BOOKE_IVOR41, SPR_BOOKE_IVOR42, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ };
+ int i;
+
+ /* Interrupt processing */
+ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_CSRR1, "CSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Debug */
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR1, "DBCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR2, "DBCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DSRR0, "DSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DSRR1, "DSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DEAR, "DEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_ESR, "ESR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_IVPR, "IVPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_prefix,
+ 0x00000000);
+ /* Exception vectors */
+ for (i = 0; i < 64; i++) {
+ if (ivor_mask & (1ULL << i)) {
+ if (ivor_sprn[i] == SPR_BOOKE_IVORxx) {
+ fprintf(stderr, "ERROR: IVOR %d SPR is not defined\n", i);
+ exit(1);
+ }
+ spr_register(env, ivor_sprn[i], ivor_names[i],
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_vector,
+ 0x00000000);
+ }
+ }
+ spr_register(env, SPR_BOOKE_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_pid,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tcr,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TSR, "TSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tsr,
+ 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DECAR, "DECAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_USPRG0, "USPRG0",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_SPRG8, "SPRG8",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_SPRG9, "SPRG9",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static inline uint32_t register_tlbncfg(uint32_t assoc, uint32_t minsize,
+ uint32_t maxsize, uint32_t flags,
+ uint32_t nentries)
+{
+ return (assoc << TLBnCFG_ASSOC_SHIFT) |
+ (minsize << TLBnCFG_MINSIZE_SHIFT) |
+ (maxsize << TLBnCFG_MAXSIZE_SHIFT) |
+ flags | nentries;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+/* BookE 2.06 storage control registers */
+static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
+ uint32_t *tlbncfg, uint32_t mmucfg)
+{
+#if !defined(CONFIG_USER_ONLY)
+ const char *mas_names[8] = {
+ "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7",
+ };
+ int mas_sprn[8] = {
+ SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3,
+ SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7,
+ };
+ int i;
+
+ /* TLB assist registers */
+ /* XXX : not implemented */
+ for (i = 0; i < 8; i++) {
+ if (mas_mask & (1 << i)) {
+ spr_register(env, mas_sprn[i], mas_names[i],
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic,
+ (i == 2 && (env->insns_flags & PPC_64B))
+ ? &spr_write_generic : &spr_write_generic32,
+ 0x00000000);
+ }
+ }
+ if (env->nb_pids > 1) {
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_PID1, "PID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_pid,
+ 0x00000000);
+ }
+ if (env->nb_pids > 2) {
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_PID2, "PID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_pid,
+ 0x00000000);
+ }
+
+ spr_register(env, SPR_BOOKE_EPLC, "EPLC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_eplc,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_EPSC, "EPSC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_epsc,
+ 0x00000000);
+
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ mmucfg);
+ switch (env->nb_ways) {
+ case 4:
+ spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[3]);
+ /* Fallthru */
+ case 3:
+ spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[2]);
+ /* Fallthru */
+ case 2:
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[1]);
+ /* Fallthru */
+ case 1:
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[0]);
+ /* Fallthru */
+ case 0:
+ default:
+ break;
+ }
+#endif
+
+ register_usprgh_sprs(env);
+}
+
+/* SPR specific to PowerPC 440 implementation */
+static void register_440_sprs(CPUPPCState *env)
+{
+ /* Cache control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV0, "DNV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV1, "DNV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV2, "DNV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV3, "DNV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV0, "DTV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV1, "DTV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV2, "DTV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV3, "DTV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DVLIM, "DVLIM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV0, "INV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV1, "INV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV2, "INV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV3, "INV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV0, "ITV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV1, "ITV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV2, "ITV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV3, "ITV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_IVLIM, "IVLIM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Cache debug */
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DBDR, "DBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_4xx_CCR0, "CCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_440_RSTCFG, "RSTCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* Storage control */
+ spr_register(env, SPR_440_MMUCR, "MMUCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR shared between PowerPC 40x implementations */
+static void register_40x_sprs(CPUPPCState *env)
+{
+ /* Cache */
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCCR, "DCCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_ICCR, "ICCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* Exception */
+ spr_register(env, SPR_40x_DEAR, "DEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ESR, "ESR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_EVPR, "EVPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_prefix,
+ 0x00000000);
+ spr_register(env, SPR_40x_SRR2, "SRR2",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_SRR3, "SRR3",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Timers */
+ spr_register(env, SPR_40x_PIT, "PIT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_40x_pit, &spr_write_40x_pit,
+ 0x00000000);
+ spr_register(env, SPR_40x_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tcr,
+ 0x00000000);
+ spr_register(env, SPR_40x_TSR, "TSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tsr,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 405 implementation */
+static void register_405_sprs(CPUPPCState *env)
+{
+ /* MMU */
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_4xx_CCR0, "CCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00700000);
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DBCR1, "DBCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Storage control */
+ /* XXX: TODO: not implemented */
+ spr_register(env, SPR_405_SLER, "SLER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_sler,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_SU0R, "SU0R",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* SPRG */
+ spr_register(env, SPR_USPRG0, "USPRG0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ register_usprgh_sprs(env);
+}
+
+/* SPR shared between PowerPC 401 & 403 implementations */
+static void register_401_403_sprs(CPUPPCState *env)
+{
+ /* Time base */
+ spr_register(env, SPR_403_VTBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_403_TBL, "TBL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_403_VTBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_403_TBU, "TBU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbu,
+ 0x00000000);
+ /* Debug */
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_403_CDBCR, "CDBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 401 implementation */
+static void register_401_sprs(CPUPPCState *env)
+{
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Storage control */
+ /* XXX: TODO: not implemented */
+ spr_register(env, SPR_405_SLER, "SLER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_sler,
+ 0x00000000);
+ /* not emulated, as QEMU never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_401x2_sprs(CPUPPCState *env)
+{
+ register_401_sprs(env);
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 403 implementation */
+static void register_403_sprs(CPUPPCState *env)
+{
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_403_real_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_403_PBL1, "PBL1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBU1, "PBU1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBL2, "PBL2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBU2, "PBU2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+}
+
+static void register_403_mmu_sprs(CPUPPCState *env)
+{
+ /* MMU */
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC compression coprocessor extension */
+static void register_compress_sprs(CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_401_SKR, "SKR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_5xx_8xx_sprs(CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register_kvm(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DSISR, 0x00000000);
+ spr_register_kvm(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAR, 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_EIE, "EIE",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_EID, "EID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_NRI, "NRI",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPA, "CMPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPB, "CMPB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPC, "CMPC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPD, "CMPD",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_ECR, "ECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DER, "DER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_COUNTA, "COUNTA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_COUNTB, "COUNTB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPE, "CMPE",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPF, "CMPF",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPG, "CMPG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPH, "CMPH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_LCTRL1, "LCTRL1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_LCTRL2, "LCTRL2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_BAR, "BAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DPDR, "DPDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IMMR, "IMMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_5xx_sprs(CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_FPECR, "FPECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_8xx_sprs(CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_CST, "IC_CST",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_ADR, "IC_ADR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_DAT, "IC_DAT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_CST, "DC_CST",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_ADR, "DC_ADR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_DAT, "DC_DAT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_CTR, "MI_CTR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_AP, "MI_AP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_EPN, "MI_EPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_TWC, "MI_TWC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_RPN, "MI_RPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_CTR, "MD_CTR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_CASID, "MD_CASID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_AP, "MD_AP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_EPN, "MD_EPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TWB, "MD_TWB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TWC, "MD_TWC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_RPN, "MD_RPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TW, "MD_TW",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/*
+ * AMR => SPR 29 (Power 2.04)
+ * CTRL => SPR 136 (Power 2.04)
+ * CTRL => SPR 152 (Power 2.04)
+ * SCOMC => SPR 276 (64 bits ?)
+ * SCOMD => SPR 277 (64 bits ?)
+ * TBU40 => SPR 286 (Power 2.04 hypv)
+ * HSPRG0 => SPR 304 (Power 2.04 hypv)
+ * HSPRG1 => SPR 305 (Power 2.04 hypv)
+ * HDSISR => SPR 306 (Power 2.04 hypv)
+ * HDAR => SPR 307 (Power 2.04 hypv)
+ * PURR => SPR 309 (Power 2.04 hypv)
+ * HDEC => SPR 310 (Power 2.04 hypv)
+ * HIOR => SPR 311 (hypv)
+ * RMOR => SPR 312 (970)
+ * HRMOR => SPR 313 (Power 2.04 hypv)
+ * HSRR0 => SPR 314 (Power 2.04 hypv)
+ * HSRR1 => SPR 315 (Power 2.04 hypv)
+ * LPIDR => SPR 317 (970)
+ * EPR => SPR 702 (Power 2.04 emb)
+ * perf => 768-783 (Power 2.04)
+ * perf => 784-799 (Power 2.04)
+ * PPR => SPR 896 (Power 2.04)
+ * DABRX => 1015 (Power 2.04 hypv)
+ * FPECR => SPR 1022 (?)
+ * ... and more (thermal management, performance counters, ...)
+ */
+
+/*****************************************************************************/
+/* Exception vectors models */
+static void init_excp_4xx_real(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_4xx_softmmu(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_MPC5xx(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00;
+ env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_MPC8xx(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_ITLBE] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_DTLBE] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00;
+ env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_G2(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000FFC;
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000;
+ env->ivor_mask = 0x0000FFF7UL;
+ env->ivpr_mask = ivpr_mask;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_BookE(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_601(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_602(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* XXX: exception prefix has a special behavior on 602 */
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_603(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_604(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_7x0(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_750cl(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_750cx(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+/* XXX: Check if this is correct */
+static void init_excp_7x5(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_7400(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_7450(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+#if defined(TARGET_PPC64)
+static void init_excp_970(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001800;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x0000000000000100ULL;
+#endif
+}
+
+static void init_excp_POWER7(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_HDSI] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_HISI] = 0x00000E20;
+ env->excp_vectors[POWERPC_EXCP_HV_EMU] = 0x00000E40;
+ env->excp_vectors[POWERPC_EXCP_HV_MAINT] = 0x00000E60;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_VSXU] = 0x00000F40;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x0000000000000100ULL;
+#endif
+}
+
+static void init_excp_POWER8(CPUPPCState *env)
+{
+ init_excp_POWER7(env);
+
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_SDOOR] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60;
+ env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80;
+ env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80;
+#endif
+}
+
+static void init_excp_POWER9(CPUPPCState *env)
+{
+ init_excp_POWER8(env);
+
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_HVIRT] = 0x00000EA0;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL_VECTORED] = 0x00017000;
+#endif
+}
+
+static void init_excp_POWER10(CPUPPCState *env)
+{
+ init_excp_POWER9(env);
+}
+
+#endif
+
+/*****************************************************************************/
+/* Power management enable checks */
+static int check_pow_none(CPUPPCState *env)
+{
+ return 0;
+}
+
+static int check_pow_nocheck(CPUPPCState *env)
+{
+ return 1;
+}
+
+static int check_pow_hid0(CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00E00000) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int check_pow_hid0_74xx(CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00600000) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/*****************************************************************************/
+/* PowerPC implementations definitions */
+
+#define POWERPC_FAMILY(_name) \
+ static void \
+ glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \
+ \
+ static const TypeInfo \
+ glue(glue(ppc_, _name), _cpu_family_type_info) = { \
+ .name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \
+ .parent = TYPE_POWERPC_CPU, \
+ .abstract = true, \
+ .class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \
+ }; \
+ \
+ static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \
+ { \
+ type_register_static( \
+ &glue(glue(ppc_, _name), _cpu_family_type_info)); \
+ } \
+ \
+ type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \
+ \
+ static void glue(glue(ppc_, _name), _cpu_family_class_init)
+
+static void init_proc_401(CPUPPCState *env)
+{
+ register_40x_sprs(env);
+ register_401_403_sprs(env);
+ register_401_sprs(env);
+ init_excp_4xx_real(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(401)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 401";
+ pcc->init_proc = init_proc_401;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_WRTEE | PPC_DCR |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_REAL;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_401x2(CPUPPCState *env)
+{
+ register_40x_sprs(env);
+ register_401_403_sprs(env);
+ register_401x2_sprs(env);
+ register_compress_sprs(env);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(401x2)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 401x2";
+ pcc->init_proc = init_proc_401x2;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << 20) |
+ (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_401x3(CPUPPCState *env)
+{
+ register_40x_sprs(env);
+ register_401_403_sprs(env);
+ register_401_sprs(env);
+ register_401x2_sprs(env);
+ register_compress_sprs(env);
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(401x3)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 401x3";
+ pcc->init_proc = init_proc_401x3;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << 20) |
+ (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_IOP480(CPUPPCState *env)
+{
+ register_40x_sprs(env);
+ register_401_403_sprs(env);
+ register_401x2_sprs(env);
+ register_compress_sprs(env);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(IOP480)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "IOP480";
+ pcc->init_proc = init_proc_IOP480;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << 20) |
+ (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_403(CPUPPCState *env)
+{
+ register_40x_sprs(env);
+ register_401_403_sprs(env);
+ register_403_sprs(env);
+ register_403_real_sprs(env);
+ init_excp_4xx_real(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(403)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 403";
+ pcc->init_proc = init_proc_403;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_PE) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_REAL;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_403GCX(CPUPPCState *env)
+{
+ register_40x_sprs(env);
+ register_401_403_sprs(env);
+ register_403_sprs(env);
+ register_403_real_sprs(env);
+ register_403_mmu_sprs(env);
+ /* Bus access control */
+ /* not emulated, as QEMU never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(403GCX)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 403 GCX";
+ pcc->init_proc = init_proc_403GCX;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_PE) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_405(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_40x_sprs(env);
+ register_405_sprs(env);
+ /* Bus access control */
+ /* not emulated, as QEMU never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(405)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 405";
+ pcc->init_proc = init_proc_405;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_405;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440EP(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_BookE_sprs(env, 0x000000000000FFFFULL);
+ register_440_sprs(env);
+ register_usprgh_sprs(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440 EP";
+ pcc->init_proc = init_proc_440EP;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 460 EX";
+ pcc->init_proc = init_proc_440EP;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440GP(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_BookE_sprs(env, 0x000000000000FFFFULL);
+ register_440_sprs(env);
+ register_usprgh_sprs(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440 GP";
+ pcc->init_proc = init_proc_440GP;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_MFAPIDI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVA | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440x4(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_BookE_sprs(env, 0x000000000000FFFFULL);
+ register_440_sprs(env);
+ register_usprgh_sprs(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440x4)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440x4";
+ pcc->init_proc = init_proc_440x4;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440x5(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_BookE_sprs(env, 0x000000000000FFFFULL);
+ register_440_sprs(env);
+ register_usprgh_sprs(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ ppc40x_irq_init(env_archcpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440x5";
+ pcc->init_proc = init_proc_440x5;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440x5 with double precision FPU";
+ pcc->init_proc = init_proc_440x5;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_FLOAT | PPC_FLOAT_FSQRT |
+ PPC_FLOAT_STFIWX |
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_MPC5xx(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_5xx_8xx_sprs(env);
+ register_5xx_sprs(env);
+ init_excp_MPC5xx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "Freescale 5xx cores (aka RCPU)";
+ pcc->init_proc = init_proc_MPC5xx;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_MEM_EIEIO | PPC_MEM_SYNC |
+ PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX |
+ PPC_MFTB;
+ pcc->msr_mask = (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_REAL;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_RCPU;
+ pcc->bfd_mach = bfd_mach_ppc_505;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_MPC8xx(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_5xx_8xx_sprs(env);
+ register_8xx_sprs(env);
+ init_excp_MPC8xx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "Freescale 8xx cores (aka PowerQUICC)";
+ pcc->init_proc = init_proc_MPC8xx;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_MEM_EIEIO | PPC_MEM_SYNC |
+ PPC_CACHE_ICBI | PPC_MFTB;
+ pcc->msr_mask = (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_MPC8xx;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_RCPU;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+/* Freescale 82xx cores (aka PowerQUICC-II) */
+
+static void init_proc_G2(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_G2_755_sprs(env);
+ register_G2_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation register */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_G2(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(G2)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC G2";
+ pcc->init_proc = init_proc_G2;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_G2;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_G2LE(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_G2_755_sprs(env);
+ register_G2_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation register */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_G2(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC G2LE";
+ pcc->init_proc = init_proc_G2LE;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_G2;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e200(CPUPPCState *env)
+{
+ /* Time base */
+ register_tbl(env);
+ register_BookE_sprs(env, 0x000000070000FFFFULL);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
+ &spr_read_spefscr, &spr_write_spefscr,
+ &spr_read_spefscr, &spr_write_spefscr,
+ 0x00000000);
+ /* Memory management */
+ register_BookE206_sprs(env, 0x0000005D, NULL, 0);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_CTXCR, "CTXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_DBCNT, "DBCNT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_DBCR3, "DBCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ spr_register(env, SPR_BOOKE_DSRR0, "DSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DSRR1, "DSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_e200(env, 0xFFFF0000UL);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e200 core";
+ pcc->init_proc = init_proc_e200;
+ pcc->check_pow = check_pow_hid0;
+ /*
+ * XXX: unimplemented instructions:
+ * dcblc
+ * dcbtlst
+ * dcbtstls
+ * icblc
+ * icbtls
+ * tlbivax
+ * all SPE multiply-accumulate instructions
+ */
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL |
+ PPC_SPE | PPC_SPE_SINGLE |
+ PPC_WRTEE | PPC_RFDI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX |
+ PPC_BOOKE;
+ pcc->msr_mask = (1ull << MSR_UCLE) |
+ (1ull << MSR_SPE) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE |
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e300(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_603_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR2, "DABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR2, "IABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IBCR, "IBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DBCR, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(e300)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e300 core";
+ pcc->init_proc = init_proc_e300;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_603;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+enum fsl_e500_version {
+ fsl_e500v1,
+ fsl_e500v2,
+ fsl_e500mc,
+ fsl_e5500,
+ fsl_e6500,
+};
+
+static void init_proc_e500(CPUPPCState *env, int version)
+{
+ uint32_t tlbncfg[2];
+ uint64_t ivor_mask;
+ uint64_t ivpr_mask = 0xFFFF0000ULL;
+ uint32_t l1cfg0 = 0x3800 /* 8 ways */
+ | 0x0020; /* 32 kb */
+ uint32_t l1cfg1 = 0x3800 /* 8 ways */
+ | 0x0020; /* 32 kb */
+ uint32_t mmucfg = 0;
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+#endif
+
+ /* Time base */
+ register_tbl(env);
+ /*
+ * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't
+ * complain when accessing them.
+ * register_BookE_sprs(env, 0x0000000F0000FD7FULL);
+ */
+ switch (version) {
+ case fsl_e500v1:
+ case fsl_e500v2:
+ default:
+ ivor_mask = 0x0000000F0000FFFFULL;
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ ivor_mask = 0x000003FE0000FFFFULL;
+ break;
+ case fsl_e6500:
+ ivor_mask = 0x000003FF0000FFFFULL;
+ break;
+ }
+ register_BookE_sprs(env, ivor_mask);
+ register_usprg3_sprs(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
+ &spr_read_spefscr, &spr_write_spefscr,
+ &spr_read_spefscr, &spr_write_spefscr,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ /* Memory management */
+ env->nb_pids = 3;
+ env->nb_ways = 2;
+ env->id_tlbs = 0;
+ switch (version) {
+ case fsl_e500v1:
+ tlbncfg[0] = register_tlbncfg(2, 1, 1, 0, 256);
+ tlbncfg[1] = register_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16);
+ break;
+ case fsl_e500v2:
+ tlbncfg[0] = register_tlbncfg(4, 1, 1, 0, 512);
+ tlbncfg[1] = register_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16);
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ tlbncfg[0] = register_tlbncfg(4, 1, 1, 0, 512);
+ tlbncfg[1] = register_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64);
+ break;
+ case fsl_e6500:
+ mmucfg = 0x6510B45;
+ env->nb_pids = 1;
+ tlbncfg[0] = 0x08052400;
+ tlbncfg[1] = 0x40028040;
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n",
+ env->spr[SPR_PVR]);
+ }
+#endif
+ /* Cache sizes */
+ switch (version) {
+ case fsl_e500v1:
+ case fsl_e500v2:
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ env->dcache_line_size = 64;
+ env->icache_line_size = 64;
+ l1cfg0 |= 0x1000000; /* 64 byte cache block size */
+ l1cfg1 |= 0x1000000; /* 64 byte cache block size */
+ break;
+ case fsl_e6500:
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ l1cfg0 |= 0x0F83820;
+ l1cfg1 |= 0x0B83820;
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n",
+ env->spr[SPR_PVR]);
+ }
+ register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BBEAR, "BBEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BBTAR, "BBTAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_MCAR, "MCAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_NPIDR, "NPIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ l1cfg0);
+ spr_register(env, SPR_Exxx_L1CFG1, "L1CFG1",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ l1cfg1);
+ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_e500_l1csr0,
+ 0x00000000);
+ spr_register(env, SPR_Exxx_L1CSR1, "L1CSR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_e500_l1csr1,
+ 0x00000000);
+ if (version != fsl_e500v1 && version != fsl_e500v2) {
+ spr_register(env, SPR_Exxx_L2CSR0, "L2CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_e500_l2csr0,
+ 0x00000000);
+ }
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke206_mmucsr0,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_EPR, "EPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX better abstract into Emb.xxx features */
+ if ((version == fsl_e5500) || (version == fsl_e6500)) {
+ spr_register(env, SPR_BOOKE_EPCR, "EPCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MAS7_MAS3, "MAS7_MAS3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_mas73, &spr_write_mas73,
+ 0x00000000);
+ ivpr_mask = (target_ulong)~0xFFFFULL;
+ }
+
+ if (version == fsl_e6500) {
+ /* Thread identification */
+ spr_register(env, SPR_TIR, "TIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TLB0PS, "TLB0PS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000004);
+ spr_register(env, SPR_BOOKE_TLB1PS, "TLB1PS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x7FFFFFFC);
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 0;
+ env->tlb_type = TLB_MAS;
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ env->nb_tlb += booke206_tlb_size(env, i);
+ }
+#endif
+
+ init_excp_e200(env, ivpr_mask);
+ /* Allocate hardware IRQ controller */
+ ppce500_irq_init(env_archcpu(env));
+}
+
+static void init_proc_e500v1(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e500v1);
+}
+
+POWERPC_FAMILY(e500v1)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e500v1 core";
+ pcc->init_proc = init_proc_e500v1;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL |
+ PPC_SPE | PPC_SPE_SINGLE |
+ PPC_WRTEE | PPC_RFDI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC;
+ pcc->insns_flags2 = PPC2_BOOKE206;
+ pcc->msr_mask = (1ull << MSR_UCLE) |
+ (1ull << MSR_SPE) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE |
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e500v2(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e500v2);
+}
+
+POWERPC_FAMILY(e500v2)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e500v2 core";
+ pcc->init_proc = init_proc_e500v2;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL |
+ PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE |
+ PPC_WRTEE | PPC_RFDI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC;
+ pcc->insns_flags2 = PPC2_BOOKE206;
+ pcc->msr_mask = (1ull << MSR_UCLE) |
+ (1ull << MSR_SPE) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE |
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e500mc(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e500mc);
+}
+
+POWERPC_FAMILY(e500mc)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e500mc core";
+ pcc->init_proc = init_proc_e500mc;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB |
+ PPC_WRTEE | PPC_RFDI | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_FLOAT | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL |
+ PPC_FLOAT_STFIWX | PPC_WAIT |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC;
+ pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL;
+ pcc->msr_mask = (1ull << MSR_GS) |
+ (1ull << MSR_UCLE) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ /* FIXME: figure out the correct flag for e500mc */
+ pcc->bfd_mach = bfd_mach_ppc_e500;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+#ifdef TARGET_PPC64
+static void init_proc_e5500(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e5500);
+}
+
+POWERPC_FAMILY(e5500)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e5500 core";
+ pcc->init_proc = init_proc_e5500;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB |
+ PPC_WRTEE | PPC_RFDI | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_FLOAT | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL |
+ PPC_FLOAT_STFIWX | PPC_WAIT |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC |
+ PPC_64B | PPC_POPCNTB | PPC_POPCNTWD;
+ pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 |
+ PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_CM) |
+ (1ull << MSR_GS) |
+ (1ull << MSR_UCLE) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ /* FIXME: figure out the correct flag for e5500 */
+ pcc->bfd_mach = bfd_mach_ppc_e500;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e6500(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e6500);
+}
+
+POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e6500 core";
+ pcc->init_proc = init_proc_e6500;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB |
+ PPC_WRTEE | PPC_RFDI | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_FLOAT | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL |
+ PPC_FLOAT_STFIWX | PPC_WAIT |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC |
+ PPC_64B | PPC_POPCNTB | PPC_POPCNTWD | PPC_ALTIVEC;
+ pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 |
+ PPC2_FP_CVT_S64 | PPC2_ATOMIC_ISA206;
+ pcc->msr_mask = (1ull << MSR_CM) |
+ (1ull << MSR_GS) |
+ (1ull << MSR_UCLE) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IS) |
+ (1ull << MSR_DS) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_VR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_e500;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_VRE;
+}
+
+#endif
+
+/* Non-embedded PowerPC */
+
+#define POWERPC_MSRR_601 (0x0000000000001040ULL)
+
+static void init_proc_601(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_601_sprs(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hid0_601,
+ 0x80010080);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ init_excp_601(env);
+ /*
+ * XXX: beware that dcache line size is 64
+ * but dcbz uses 32 bytes "sectors"
+ * XXX: this breaks clcs instruction !
+ */
+ env->dcache_line_size = 32;
+ env->icache_line_size = 64;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(601)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 601";
+ pcc->init_proc = init_proc_601;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR |
+ PPC_FLOAT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_601;
+ pcc->excp_model = POWERPC_EXCP_601;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_601;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE;
+}
+
+#define POWERPC_MSRR_601v (0x0000000000001040ULL)
+
+static void init_proc_601v(CPUPPCState *env)
+{
+ init_proc_601(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID15, "HID15",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+POWERPC_FAMILY(601v)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 601v";
+ pcc->init_proc = init_proc_601v;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR |
+ PPC_FLOAT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_601;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_601;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE;
+}
+
+static void init_proc_602(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_602_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_602(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(602)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 602";
+ pcc->init_proc = init_proc_602;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_602_SPEC;
+ pcc->msr_mask = (1ull << MSR_VSX) |
+ (1ull << MSR_SA) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ /* XXX: 602 MMU is quite specific. Should add a special case */
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_602;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_602;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_603(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_603_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 603";
+ pcc->init_proc = init_proc_603;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_603;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_603E(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_603_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(603E)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 603e";
+ pcc->init_proc = init_proc_603E;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_603E;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_604(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_604_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ init_excp_604(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(604)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 604";
+ pcc->init_proc = init_proc_604;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_604;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_604;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_604E(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_604_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ register_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ init_excp_604(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(604E)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 604E";
+ pcc->init_proc = init_proc_604E;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_604;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_604;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_740(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(740)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 740";
+ pcc->init_proc = init_proc_740;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ /*
+ * XXX: high BATs are also present but are known to be bugged on
+ * die version 1.x
+ */
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(750)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750";
+ pcc->init_proc = init_proc_750;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750cl(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ /* Those registers are fake on 750CL */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX: not implemented */
+ spr_register(env, SPR_750_TDCL, "TDCL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_TDCH, "TDCH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* DMA */
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_WPAR, "WPAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_DMAL, "DMAL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_DMAU, "DMAU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750CL_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750CL_HID4, "HID4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Quantization registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR0, "GQR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR1, "GQR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR2, "GQR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR3, "GQR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR4, "GQR4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR5, "GQR5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR6, "GQR6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR7, "GQR7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ /* PowerPC 750cl has 8 DBATs and 8 IBATs */
+ register_high_BATs(env);
+ init_excp_750cl(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750 CL";
+ pcc->init_proc = init_proc_750cl;
+ pcc->check_pow = check_pow_hid0;
+ /*
+ * XXX: not implemented:
+ * cache lock instructions:
+ * dcbz_l
+ * floating point paired instructions
+ * psq_lux
+ * psq_lx
+ * psq_stux
+ * psq_stx
+ * ps_abs
+ * ps_add
+ * ps_cmpo0
+ * ps_cmpo1
+ * ps_cmpu0
+ * ps_cmpu1
+ * ps_div
+ * ps_madd
+ * ps_madds0
+ * ps_madds1
+ * ps_merge00
+ * ps_merge01
+ * ps_merge10
+ * ps_merge11
+ * ps_mr
+ * ps_msub
+ * ps_mul
+ * ps_muls0
+ * ps_muls1
+ * ps_nabs
+ * ps_neg
+ * ps_nmadd
+ * ps_nmsub
+ * ps_res
+ * ps_rsqrte
+ * ps_sel
+ * ps_sub
+ * ps_sum0
+ * ps_sum1
+ */
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750cx(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* This register is not implemented but is present for compatibility */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ /* PowerPC 750cx has 8 DBATs and 8 IBATs */
+ register_high_BATs(env);
+ init_excp_750cx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750CX";
+ pcc->init_proc = init_proc_750cx;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750fx(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_THRM4, "THRM4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
+ register_high_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750FX";
+ pcc->init_proc = init_proc_750fx;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750gx(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_THRM4, "THRM4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
+ register_high_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750GX";
+ pcc->init_proc = init_proc_750gx;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_745(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ register_G2_755_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_7x5(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(745)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 745";
+ pcc->init_proc = init_proc_745;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_7x5;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_755(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ register_G2_755_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* L2 cache control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_7x5(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(755)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 755";
+ pcc->init_proc = init_proc_755;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_7x5;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7400(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX: this seems not implemented on all revisions. */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSCR1, "MSSCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* Memory management */
+ register_low_BATs(env);
+ init_excp_7400(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7400)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7400 (aka G4)";
+ pcc->init_proc = init_proc_7400;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7410(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Thermal management */
+ register_thrm_sprs(env);
+ /* L2PMCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* LDSTDB */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTDB, "LDSTDB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ init_excp_7400(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7410)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7410 (aka G4)";
+ pcc->init_proc = init_proc_7410;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7440(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7440)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7440 (aka G4)";
+ pcc->init_proc = init_proc_7440;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7450(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* Level 3 cache control */
+ register_l3_ctrl(env);
+ /* L3ITCR1 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR1, "L3ITCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR2 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR2, "L3ITCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR3 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR3, "L3ITCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3OHCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3OHCR, "L3OHCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7450)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7450 (aka G4)";
+ pcc->init_proc = init_proc_7450;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7445(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7445)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7445 (aka G4)";
+ pcc->init_proc = init_proc_7445;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7455(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* Level 3 cache control */
+ register_l3_ctrl(env);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7455)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7455 (aka G4)";
+ pcc->init_proc = init_proc_7455;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7457(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* Level 3 cache control */
+ register_l3_ctrl(env);
+ /* L3ITCR1 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR1, "L3ITCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR2 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR2, "L3ITCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR3 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR3, "L3ITCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3OHCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3OHCR, "L3OHCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(7457)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7457 (aka G4)";
+ pcc->init_proc = init_proc_7457;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e600(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_sdr1_sprs(env);
+ register_7xx_sprs(env);
+ /* Time base */
+ register_tbl(env);
+ /* 74xx specific SPR */
+ register_74xx_sprs(env);
+ vscr_init(env, 0x00010000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ register_low_BATs(env);
+ register_high_BATs(env);
+ register_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(e600)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC e600";
+ pcc->init_proc = init_proc_e600;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->insns_flags2 = PPC_NONE;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+#if defined(TARGET_PPC64)
+#if defined(CONFIG_USER_ONLY)
+#define POWERPC970_HID5_INIT 0x00000080
+#else
+#define POWERPC970_HID5_INIT 0x00000000
+#endif
+
+static int check_pow_970(CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & (HID0_DEEPNAP | HID0_DOZE | HID0_NAP)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void register_970_hid_sprs(CPUPPCState *env)
+{
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x60000000);
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_970_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ POWERPC970_HID5_INIT);
+}
+
+static void register_970_hior_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_HIOR, "SPR_HIOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hior, &spr_write_hior,
+ 0x00000000);
+}
+
+static void register_book3s_ctrl_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_CTRL, "SPR_CTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_UCTRL, "SPR_UCTRL",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void register_book3s_altivec_sprs(CPUPPCState *env)
+{
+ if (!(env->insns_flags & PPC_ALTIVEC)) {
+ return;
+ }
+
+ spr_register_kvm(env, SPR_VRSAVE, "VRSAVE",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_VRSAVE, 0x00000000);
+
+}
+
+static void register_book3s_dbg_sprs(CPUPPCState *env)
+{
+ /*
+ * TODO: different specs define different scopes for these,
+ * will have to address this:
+ * 970: super/write and super/read
+ * powerisa 2.03..2.04: hypv/write and super/read.
+ * powerisa 2.05 and newer: hypv/write and hypv/read.
+ */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+ spr_register_kvm(env, SPR_DABRX, "DABRX",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABRX, 0x00000000);
+}
+
+static void register_book3s_207_dbg_sprs(CPUPPCState *env)
+{
+ spr_register_kvm_hv(env, SPR_DAWR0, "DAWR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAWR, 0x00000000);
+ spr_register_kvm_hv(env, SPR_DAWRX0, "DAWRX0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAWRX, 0x00000000);
+ spr_register_kvm_hv(env, SPR_CIABR, "CIABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_CIABR, 0x00000000);
+}
+
+static void register_970_dbg_sprs(CPUPPCState *env)
+{
+ /* Breakpoints */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_book3s_pmu_sup_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR0, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCRA, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC2, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC3, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC4, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC5, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC6, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIAR, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SDAR, "SDAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SDAR, 0x00000000);
+}
+
+static void register_book3s_pmu_user_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_POWER_UMMCR0, "UMMCR0",
+ &spr_read_MMCR0_ureg, &spr_write_MMCR0_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UMMCR1, "UMMCR1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UMMCRA, "UMMCRA",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC1, "UPMC1",
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC2, "UPMC2",
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC3, "UPMC3",
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC4, "UPMC4",
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC5, "UPMC5",
+ &spr_read_PMC56_ureg, &spr_write_PMC56_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC6, "UPMC6",
+ &spr_read_PMC56_ureg, &spr_write_PMC56_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USIAR, "USIAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USDAR, "USDAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+}
+
+static void register_970_pmu_sup_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_970_PMC7, "PMC7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC7, 0x00000000);
+ spr_register_kvm(env, SPR_970_PMC8, "PMC8",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC8, 0x00000000);
+}
+
+static void register_970_pmu_user_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_970_UPMC7, "UPMC7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_970_UPMC8, "UPMC8",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+}
+
+static void register_power8_pmu_sup_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_POWER_MMCR2, "MMCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR2, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_MMCRS, "MMCRS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCRS, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIER, "SIER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIER, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SPMC1, "SPMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SPMC1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SPMC2, "SPMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SPMC2, 0x00000000);
+ spr_register_kvm(env, SPR_TACR, "TACR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TACR, 0x00000000);
+ spr_register_kvm(env, SPR_TCSCR, "TCSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TCSCR, 0x00000000);
+ spr_register_kvm(env, SPR_CSIGR, "CSIGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_CSIGR, 0x00000000);
+}
+
+static void register_power8_pmu_user_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_POWER_UMMCR2, "UMMCR2",
+ &spr_read_MMCR2_ureg, &spr_write_MMCR2_ureg,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USIER, "USIER",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_power5p_ear_sprs(CPUPPCState *env)
+{
+ /* External access control */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_power5p_tb_sprs(CPUPPCState *env)
+{
+ /* TBU40 (High 40 bits of the Timebase register */
+ spr_register_hv(env, SPR_TBU40, "TBU40",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbu40,
+ 0x00000000);
+}
+
+static void register_970_lpar_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /*
+ * PPC970: HID4 covers things later controlled by the LPCR and
+ * RMOR in later CPUs, but with a different encoding. We only
+ * support the 970 in "Apple mode" which has all hypervisor
+ * facilities disabled by strapping, so we can basically just
+ * ignore it
+ */
+ spr_register(env, SPR_970_HID4, "HID4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+static void register_power5p_lpar_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* Logical partitionning */
+ spr_register_kvm_hv(env, SPR_LPCR, "LPCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_lpcr,
+ KVM_REG_PPC_LPCR, LPCR_LPES0 | LPCR_LPES1);
+ spr_register_hv(env, SPR_HDEC, "HDEC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hdecr, &spr_write_hdecr, 0);
+#endif
+}
+
+static void register_book3s_ids_sprs(CPUPPCState *env)
+{
+ /* FIXME: Will need to deal with thread vs core only SPRs */
+
+ /* Processor identification */
+ spr_register_hv(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, NULL,
+ 0x00000000);
+ spr_register_hv(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TSCR, "TSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMER, "HMER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hmer,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMEER, "HMEER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TFMR, "TFMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_LPIDR, "LPIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_lpidr,
+ 0x00000000);
+ spr_register_hv(env, SPR_HFSCR, "HFSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_MMCRC, "MMCRC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_MMCRH, "MMCRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSPRG0, "HSPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSPRG1, "HSPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSRR0, "HSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSRR1, "HSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HDAR, "HDAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HDSISR, "HDSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HRMOR, "HRMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_rmor_sprs(CPUPPCState *env)
+{
+ spr_register_hv(env, SPR_RMOR, "RMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_power8_ids_sprs(CPUPPCState *env)
+{
+ /* Thread identification */
+ spr_register(env, SPR_TIR, "TIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void register_book3s_purr_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* PURR & SPURR: Hack - treat these as aliases for the TB for now */
+ spr_register_kvm_hv(env, SPR_PURR, "PURR",
+ &spr_read_purr, SPR_NOACCESS,
+ &spr_read_purr, SPR_NOACCESS,
+ &spr_read_purr, &spr_write_purr,
+ KVM_REG_PPC_PURR, 0x00000000);
+ spr_register_kvm_hv(env, SPR_SPURR, "SPURR",
+ &spr_read_purr, SPR_NOACCESS,
+ &spr_read_purr, SPR_NOACCESS,
+ &spr_read_purr, &spr_write_purr,
+ KVM_REG_PPC_SPURR, 0x00000000);
+#endif
+}
+
+static void register_power6_dbg_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_CFAR, "SPR_CFAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_cfar, &spr_write_cfar,
+ 0x00000000);
+#endif
+}
+
+static void register_power5p_common_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_PPR, "PPR",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PPR, 0x00000000);
+}
+
+static void register_power6_common_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_DSCR, "SPR_DSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DSCR, 0x00000000);
+#endif
+ /*
+ * Register PCR to report POWERPC_EXCP_PRIV_REG instead of
+ * POWERPC_EXCP_INVAL_SPR in userspace. Permit hypervisor access.
+ */
+ spr_register_hv(env, SPR_PCR, "PCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pcr,
+ 0x00000000);
+}
+
+static void register_power8_tce_address_control_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_TAR, "TAR",
+ &spr_read_tar, &spr_write_tar,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TAR, 0x00000000);
+}
+
+static void register_power8_tm_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_TFHAR, "TFHAR",
+ &spr_read_tm, &spr_write_tm,
+ &spr_read_tm, &spr_write_tm,
+ KVM_REG_PPC_TFHAR, 0x00000000);
+ spr_register_kvm(env, SPR_TFIAR, "TFIAR",
+ &spr_read_tm, &spr_write_tm,
+ &spr_read_tm, &spr_write_tm,
+ KVM_REG_PPC_TFIAR, 0x00000000);
+ spr_register_kvm(env, SPR_TEXASR, "TEXASR",
+ &spr_read_tm, &spr_write_tm,
+ &spr_read_tm, &spr_write_tm,
+ KVM_REG_PPC_TEXASR, 0x00000000);
+ spr_register(env, SPR_TEXASRU, "TEXASRU",
+ &spr_read_tm_upper32, &spr_write_tm_upper32,
+ &spr_read_tm_upper32, &spr_write_tm_upper32,
+ 0x00000000);
+}
+
+static void register_power8_ebb_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_BESCRS, "BESCRS",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BESCRSU, "BESCRSU",
+ &spr_read_ebb_upper32, &spr_write_ebb_upper32,
+ &spr_read_prev_upper32, &spr_write_prev_upper32,
+ 0x00000000);
+ spr_register(env, SPR_BESCRR, "BESCRR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BESCRRU, "BESCRRU",
+ &spr_read_ebb_upper32, &spr_write_ebb_upper32,
+ &spr_read_prev_upper32, &spr_write_prev_upper32,
+ 0x00000000);
+ spr_register_kvm(env, SPR_EBBHR, "EBBHR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_EBBHR, 0x00000000);
+ spr_register_kvm(env, SPR_EBBRR, "EBBRR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_EBBRR, 0x00000000);
+ spr_register_kvm(env, SPR_BESCR, "BESCR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_BESCR, 0x00000000);
+}
+
+/* Virtual Time Base */
+static void register_vtb_sprs(CPUPPCState *env)
+{
+ spr_register_kvm_hv(env, SPR_VTB, "VTB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_vtb, SPR_NOACCESS,
+ &spr_read_vtb, &spr_write_vtb,
+ KVM_REG_PPC_VTB, 0x00000000);
+}
+
+static void register_power8_fscr_sprs(CPUPPCState *env)
+{
+#if defined(CONFIG_USER_ONLY)
+ target_ulong initval = 1ULL << FSCR_TAR;
+#else
+ target_ulong initval = 0;
+#endif
+ spr_register_kvm(env, SPR_FSCR, "FSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_FSCR, initval);
+}
+
+static void register_power8_pspb_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_PSPB, "PSPB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic32,
+ KVM_REG_PPC_PSPB, 0);
+}
+
+static void register_power8_dpdes_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* Directed Privileged Door-bell Exception State, used for IPI */
+ spr_register_kvm_hv(env, SPR_DPDES, "DPDES",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dpdes, SPR_NOACCESS,
+ &spr_read_dpdes, &spr_write_dpdes,
+ KVM_REG_PPC_DPDES, 0x00000000);
+#endif
+}
+
+static void register_power8_ic_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_IC, "IC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+#endif
+}
+
+static void register_power8_book4_sprs(CPUPPCState *env)
+{
+ /* Add a number of P8 book4 registers */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_ACOP, "ACOP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_ACOP, 0);
+ spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pidr,
+ KVM_REG_PPC_PID, 0);
+ spr_register_kvm(env, SPR_WORT, "WORT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_WORT, 0);
+#endif
+}
+
+static void register_power7_book4_sprs(CPUPPCState *env)
+{
+ /* Add a number of P7 book4 registers */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_ACOP, "ACOP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_ACOP, 0);
+ spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PID, 0);
+#endif
+}
+
+static void register_power8_rpr_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_RPR, "RPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000103070F1F3F);
+#endif
+}
+
+static void register_power9_mmu_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* Partition Table Control */
+ spr_register_kvm_hv(env, SPR_PTCR, "PTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_ptcr,
+ KVM_REG_PPC_PTCR, 0x00000000);
+ /* Address Segment Descriptor Register */
+ spr_register_hv(env, SPR_ASDR, "ASDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x0000000000000000);
+#endif
+}
+
+static void init_proc_book3s_common(CPUPPCState *env)
+{
+ register_ne_601_sprs(env);
+ register_tbl(env);
+ register_usprg3_sprs(env);
+ register_book3s_altivec_sprs(env);
+ register_book3s_pmu_sup_sprs(env);
+ register_book3s_pmu_user_sprs(env);
+ register_book3s_ctrl_sprs(env);
+ /*
+ * Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors.
+ */
+ vscr_init(env, 0x00010000);
+}
+
+static void init_proc_970(CPUPPCState *env)
+{
+ /* Common Registers */
+ init_proc_book3s_common(env);
+ register_sdr1_sprs(env);
+ register_book3s_dbg_sprs(env);
+
+ /* 970 Specific Registers */
+ register_970_hid_sprs(env);
+ register_970_hior_sprs(env);
+ register_low_BATs(env);
+ register_970_pmu_sup_sprs(env);
+ register_970_pmu_user_sprs(env);
+ register_970_lpar_sprs(env);
+ register_970_dbg_sprs(env);
+
+ /* env variables */
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+
+ /* Allocate hardware IRQ controller */
+ init_excp_970(env);
+ ppc970_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 970";
+ pcc->init_proc = init_proc_970;
+ pcc->check_pow = check_pow_970;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_64B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->hash64_opts = &ppc_hash64_opts_basic;
+#endif
+ pcc->excp_model = POWERPC_EXCP_970;
+ pcc->bus_model = PPC_FLAGS_INPUT_970;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
+}
+
+static void init_proc_power5plus(CPUPPCState *env)
+{
+ /* Common Registers */
+ init_proc_book3s_common(env);
+ register_sdr1_sprs(env);
+ register_book3s_dbg_sprs(env);
+
+ /* POWER5+ Specific Registers */
+ register_970_hid_sprs(env);
+ register_970_hior_sprs(env);
+ register_low_BATs(env);
+ register_970_pmu_sup_sprs(env);
+ register_970_pmu_user_sprs(env);
+ register_power5p_common_sprs(env);
+ register_power5p_lpar_sprs(env);
+ register_power5p_ear_sprs(env);
+ register_power5p_tb_sprs(env);
+
+ /* env variables */
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+
+ /* Allocate hardware IRQ controller */
+ init_excp_970(env);
+ ppc970_irq_init(env_archcpu(env));
+}
+
+POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER5";
+ dc->desc = "POWER5+";
+ pcc->init_proc = init_proc_power5plus;
+ pcc->check_pow = check_pow_970;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B |
+ PPC_SEGMENT_64B | PPC_SLBI;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI);
+ pcc->lpcr_mask = LPCR_RMLS | LPCR_ILE | LPCR_LPES0 | LPCR_LPES1 |
+ LPCR_RMI | LPCR_HDICE;
+ pcc->mmu_model = POWERPC_MMU_2_03;
+#if defined(CONFIG_SOFTMMU)
+ pcc->hash64_opts = &ppc_hash64_opts_basic;
+ pcc->lrg_decr_bits = 32;
+#endif
+ pcc->excp_model = POWERPC_EXCP_970;
+ pcc->bus_model = PPC_FLAGS_INPUT_970;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
+}
+
+static void init_proc_POWER7(CPUPPCState *env)
+{
+ /* Common Registers */
+ init_proc_book3s_common(env);
+ register_sdr1_sprs(env);
+ register_book3s_dbg_sprs(env);
+
+ /* POWER7 Specific Registers */
+ register_book3s_ids_sprs(env);
+ register_rmor_sprs(env);
+ register_amr_sprs(env);
+ register_book3s_purr_sprs(env);
+ register_power5p_common_sprs(env);
+ register_power5p_lpar_sprs(env);
+ register_power5p_ear_sprs(env);
+ register_power5p_tb_sprs(env);
+ register_power6_common_sprs(env);
+ register_power6_dbg_sprs(env);
+ register_power7_book4_sprs(env);
+
+ /* env variables */
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+
+ /* Allocate hardware IRQ controller */
+ init_excp_POWER7(env);
+ ppcPOWER7_irq_init(env_archcpu(env));
+}
+
+static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7P_BASE) {
+ return true;
+ }
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7_BASE) {
+ return true;
+ }
+ return false;
+}
+
+static bool cpu_has_work_POWER7(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (cs->halted) {
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ return true;
+ }
+ if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ return true;
+ }
+ return false;
+ } else {
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ }
+}
+
+POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER7";
+ dc->desc = "POWER7";
+ pcc->pvr_match = ppc_pvr_match_power7;
+ pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05;
+ pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->init_proc = init_proc_POWER7;
+ pcc->check_pow = check_pow_nocheck;
+ cc->has_work = cpu_has_work_POWER7;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205 |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 |
+ PPC2_PM_ISA206;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
+ LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
+ LPCR_MER | LPCR_TC |
+ LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2;
+ pcc->mmu_model = POWERPC_MMU_2_06;
+#if defined(CONFIG_SOFTMMU)
+ pcc->hash64_opts = &ppc_hash64_opts_POWER7;
+ pcc->lrg_decr_bits = 32;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER7;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+}
+
+static void init_proc_POWER8(CPUPPCState *env)
+{
+ /* Common Registers */
+ init_proc_book3s_common(env);
+ register_sdr1_sprs(env);
+ register_book3s_207_dbg_sprs(env);
+
+ /* POWER8 Specific Registers */
+ register_book3s_ids_sprs(env);
+ register_rmor_sprs(env);
+ register_amr_sprs(env);
+ register_iamr_sprs(env);
+ register_book3s_purr_sprs(env);
+ register_power5p_common_sprs(env);
+ register_power5p_lpar_sprs(env);
+ register_power5p_ear_sprs(env);
+ register_power5p_tb_sprs(env);
+ register_power6_common_sprs(env);
+ register_power6_dbg_sprs(env);
+ register_power8_tce_address_control_sprs(env);
+ register_power8_ids_sprs(env);
+ register_power8_ebb_sprs(env);
+ register_power8_fscr_sprs(env);
+ register_power8_pmu_sup_sprs(env);
+ register_power8_pmu_user_sprs(env);
+ register_power8_tm_sprs(env);
+ register_power8_pspb_sprs(env);
+ register_power8_dpdes_sprs(env);
+ register_vtb_sprs(env);
+ register_power8_ic_sprs(env);
+ register_power8_book4_sprs(env);
+ register_power8_rpr_sprs(env);
+
+ /* env variables */
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+
+ /* Allocate hardware IRQ controller */
+ init_excp_POWER8(env);
+ ppcPOWER7_irq_init(env_archcpu(env));
+}
+
+static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) {
+ return true;
+ }
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) {
+ return true;
+ }
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8_BASE) {
+ return true;
+ }
+ return false;
+}
+
+static bool cpu_has_work_POWER8(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (cs->halted) {
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
+ return true;
+ }
+ if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ return true;
+ }
+ return false;
+ } else {
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ }
+}
+
+POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER8";
+ dc->desc = "POWER8";
+ pcc->pvr_match = ppc_pvr_match_power8;
+ pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->init_proc = init_proc_POWER8;
+ pcc->check_pow = check_pow_nocheck;
+ cc->has_work = cpu_has_work_POWER8;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
+ PPC2_TM | PPC2_PM_ISA206;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_HV) |
+ (1ull << MSR_TM) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_TS0) |
+ (1ull << MSR_TS1) |
+ (1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
+ LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
+ LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
+ LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
+ LPCR_P8_PECE3 | LPCR_P8_PECE4;
+ pcc->mmu_model = POWERPC_MMU_2_07;
+#if defined(CONFIG_SOFTMMU)
+ pcc->hash64_opts = &ppc_hash64_opts_POWER7;
+ pcc->lrg_decr_bits = 32;
+ pcc->n_host_threads = 8;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER8;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX | POWERPC_FLAG_TM;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+}
+
+#ifdef CONFIG_SOFTMMU
+/*
+ * Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings
+ * Encoded as array of int_32s in the form:
+ * 0bxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
+ * x -> AP encoding
+ * y -> radix mode supported page size (encoded as a shift)
+ */
+static struct ppc_radix_page_info POWER9_radix_page_info = {
+ .count = 4,
+ .entries = {
+ 0x0000000c, /* 4K - enc: 0x0 */
+ 0xa0000010, /* 64K - enc: 0x5 */
+ 0x20000015, /* 2M - enc: 0x1 */
+ 0x4000001e /* 1G - enc: 0x2 */
+ }
+};
+#endif /* CONFIG_SOFTMMU */
+
+static void init_proc_POWER9(CPUPPCState *env)
+{
+ /* Common Registers */
+ init_proc_book3s_common(env);
+ register_book3s_207_dbg_sprs(env);
+
+ /* POWER8 Specific Registers */
+ register_book3s_ids_sprs(env);
+ register_amr_sprs(env);
+ register_iamr_sprs(env);
+ register_book3s_purr_sprs(env);
+ register_power5p_common_sprs(env);
+ register_power5p_lpar_sprs(env);
+ register_power5p_ear_sprs(env);
+ register_power5p_tb_sprs(env);
+ register_power6_common_sprs(env);
+ register_power6_dbg_sprs(env);
+ register_power8_tce_address_control_sprs(env);
+ register_power8_ids_sprs(env);
+ register_power8_ebb_sprs(env);
+ register_power8_fscr_sprs(env);
+ register_power8_pmu_sup_sprs(env);
+ register_power8_pmu_user_sprs(env);
+ register_power8_tm_sprs(env);
+ register_power8_pspb_sprs(env);
+ register_power8_dpdes_sprs(env);
+ register_vtb_sprs(env);
+ register_power8_ic_sprs(env);
+ register_power8_book4_sprs(env);
+ register_power8_rpr_sprs(env);
+ register_power9_mmu_sprs(env);
+
+ /* POWER9 Specific registers */
+ spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL,
+ spr_read_generic, spr_write_generic,
+ KVM_REG_PPC_TIDR, 0);
+
+ /* FIXME: Filter fields properly based on privilege level */
+ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
+ spr_read_generic, spr_write_generic,
+ KVM_REG_PPC_PSSCR, 0);
+
+ /* env variables */
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+
+ /* Allocate hardware IRQ controller */
+ init_excp_POWER9(env);
+ ppcPOWER9_irq_init(env_archcpu(env));
+}
+
+static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) {
+ return true;
+ }
+ return false;
+}
+
+static bool cpu_has_work_POWER9(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (cs->halted) {
+ uint64_t psscr = env->spr[SPR_PSSCR];
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+
+ /* If EC is clear, just return true on any pending interrupt */
+ if (!(psscr & PSSCR_EC)) {
+ return true;
+ }
+ /* External Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
+ (env->spr[SPR_LPCR] & LPCR_EEE)) {
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (heic == 0 || !msr_hv || msr_pr) {
+ return true;
+ }
+ }
+ /* Decrementer Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
+ (env->spr[SPR_LPCR] & LPCR_DEE)) {
+ return true;
+ }
+ /* Machine Check or Hypervisor Maintenance Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK |
+ 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) {
+ return true;
+ }
+ /* Privileged Doorbell Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_PDEE)) {
+ return true;
+ }
+ /* Hypervisor Doorbell Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_HDEE)) {
+ return true;
+ }
+ /* Hypervisor virtualization exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) &&
+ (env->spr[SPR_LPCR] & LPCR_HVEE)) {
+ return true;
+ }
+ if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ return true;
+ }
+ return false;
+ } else {
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ }
+}
+
+POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER9";
+ dc->desc = "POWER9";
+ pcc->pvr_match = ppc_pvr_match_power9;
+ pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07;
+ pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 |
+ PCR_COMPAT_2_05;
+ pcc->init_proc = init_proc_POWER9;
+ pcc->check_pow = check_pow_nocheck;
+ cc->has_work = cpu_has_work_POWER9;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
+ PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_HV) |
+ (1ull << MSR_TM) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
+ (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
+ LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
+ (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
+ LPCR_DEE | LPCR_OEE))
+ | LPCR_MER | LPCR_GTSE | LPCR_TC |
+ LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
+ pcc->mmu_model = POWERPC_MMU_3_00;
+#if defined(CONFIG_SOFTMMU)
+ /* segment page size remain the same */
+ pcc->hash64_opts = &ppc_hash64_opts_POWER7;
+ pcc->radix_page_info = &POWER9_radix_page_info;
+ pcc->lrg_decr_bits = 56;
+ pcc->n_host_threads = 4;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER9;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+}
+
+#ifdef CONFIG_SOFTMMU
+/*
+ * Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings
+ * Encoded as array of int_32s in the form:
+ * 0bxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
+ * x -> AP encoding
+ * y -> radix mode supported page size (encoded as a shift)
+ */
+static struct ppc_radix_page_info POWER10_radix_page_info = {
+ .count = 4,
+ .entries = {
+ 0x0000000c, /* 4K - enc: 0x0 */
+ 0xa0000010, /* 64K - enc: 0x5 */
+ 0x20000015, /* 2M - enc: 0x1 */
+ 0x4000001e /* 1G - enc: 0x2 */
+ }
+};
+#endif /* CONFIG_SOFTMMU */
+
+static void init_proc_POWER10(CPUPPCState *env)
+{
+ /* Common Registers */
+ init_proc_book3s_common(env);
+ register_book3s_207_dbg_sprs(env);
+
+ /* POWER8 Specific Registers */
+ register_book3s_ids_sprs(env);
+ register_amr_sprs(env);
+ register_iamr_sprs(env);
+ register_book3s_purr_sprs(env);
+ register_power5p_common_sprs(env);
+ register_power5p_lpar_sprs(env);
+ register_power5p_ear_sprs(env);
+ register_power6_common_sprs(env);
+ register_power6_dbg_sprs(env);
+ register_power8_tce_address_control_sprs(env);
+ register_power8_ids_sprs(env);
+ register_power8_ebb_sprs(env);
+ register_power8_fscr_sprs(env);
+ register_power8_pmu_sup_sprs(env);
+ register_power8_pmu_user_sprs(env);
+ register_power8_tm_sprs(env);
+ register_power8_pspb_sprs(env);
+ register_vtb_sprs(env);
+ register_power8_ic_sprs(env);
+ register_power8_book4_sprs(env);
+ register_power8_rpr_sprs(env);
+ register_power9_mmu_sprs(env);
+
+ /* FIXME: Filter fields properly based on privilege level */
+ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
+ spr_read_generic, spr_write_generic,
+ KVM_REG_PPC_PSSCR, 0);
+
+ /* env variables */
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+
+ /* Allocate hardware IRQ controller */
+ init_excp_POWER10(env);
+ ppcPOWER9_irq_init(env_archcpu(env));
+}
+
+static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER10_BASE) {
+ return true;
+ }
+ return false;
+}
+
+static bool cpu_has_work_POWER10(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (cs->halted) {
+ uint64_t psscr = env->spr[SPR_PSSCR];
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+
+ /* If EC is clear, just return true on any pending interrupt */
+ if (!(psscr & PSSCR_EC)) {
+ return true;
+ }
+ /* External Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
+ (env->spr[SPR_LPCR] & LPCR_EEE)) {
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (heic == 0 || !msr_hv || msr_pr) {
+ return true;
+ }
+ }
+ /* Decrementer Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
+ (env->spr[SPR_LPCR] & LPCR_DEE)) {
+ return true;
+ }
+ /* Machine Check or Hypervisor Maintenance Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK |
+ 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) {
+ return true;
+ }
+ /* Privileged Doorbell Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_PDEE)) {
+ return true;
+ }
+ /* Hypervisor Doorbell Exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_HDEE)) {
+ return true;
+ }
+ /* Hypervisor virtualization exception */
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) &&
+ (env->spr[SPR_LPCR] & LPCR_HVEE)) {
+ return true;
+ }
+ if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ return true;
+ }
+ return false;
+ } else {
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ }
+}
+
+POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER10";
+ dc->desc = "POWER10";
+ pcc->pvr_match = ppc_pvr_match_power10;
+ pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 |
+ PCR_COMPAT_3_00;
+ pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 |
+ PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->init_proc = init_proc_POWER10;
+ pcc->check_pow = check_pow_nocheck;
+ cc->has_work = cpu_has_work_POWER10;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
+ PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_HV) |
+ (1ull << MSR_TM) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
+ (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
+ LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
+ (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
+ LPCR_DEE | LPCR_OEE))
+ | LPCR_MER | LPCR_GTSE | LPCR_TC |
+ LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
+ /* DD2 adds an extra HAIL bit */
+ pcc->lpcr_mask |= LPCR_HAIL;
+
+ pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
+ pcc->mmu_model = POWERPC_MMU_3_00;
+#if defined(CONFIG_SOFTMMU)
+ /* segment page size remain the same */
+ pcc->hash64_opts = &ppc_hash64_opts_POWER7;
+ pcc->radix_page_info = &POWER10_radix_page_info;
+ pcc->lrg_decr_bits = 56;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER10;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER9;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
+{
+ CPUPPCState *env = &cpu->env;
+
+ cpu->vhyp = vhyp;
+
+ /*
+ * With a virtual hypervisor mode we never allow the CPU to go
+ * hypervisor mode itself
+ */
+ env->msr_mask &= ~MSR_HVB;
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+#endif /* defined(TARGET_PPC64) */
+
+/*****************************************************************************/
+/* Generic CPU instantiation routine */
+static void init_ppc_proc(PowerPCCPU *cpu)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+
+ env->irq_inputs = NULL;
+ /* Set all exception vectors to an invalid address */
+ for (i = 0; i < POWERPC_EXCP_NB; i++) {
+ env->excp_vectors[i] = (target_ulong)(-1ULL);
+ }
+ env->ivor_mask = 0x00000000;
+ env->ivpr_mask = 0x00000000;
+ /* Default MMU definitions */
+ env->nb_BATs = 0;
+ env->nb_tlb = 0;
+ env->nb_ways = 0;
+ env->tlb_type = TLB_NONE;
+#endif
+ /* Register SPR common to all PowerPC implementations */
+ register_generic_sprs(env);
+ spr_register(env, SPR_PVR, "PVR",
+ /* Linux permits userspace to read PVR */
+#if defined(CONFIG_LINUX_USER)
+ &spr_read_generic,
+#else
+ SPR_NOACCESS,
+#endif
+ SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->pvr);
+ /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */
+ if (pcc->svr != POWERPC_SVR_NONE) {
+ if (pcc->svr & POWERPC_SVR_E500) {
+ spr_register(env, SPR_E500_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->svr & ~POWERPC_SVR_E500);
+ } else {
+ spr_register(env, SPR_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->svr);
+ }
+ }
+ /* PowerPC implementation specific initialisations (SPRs, timers, ...) */
+ (*pcc->init_proc)(env);
+
+#if !defined(CONFIG_USER_ONLY)
+ ppc_gdb_gen_spr_xml(cpu);
+#endif
+
+ /* MSR bits & flags consistency checks */
+ if (env->msr_mask & (1 << 25)) {
+ switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
+ case POWERPC_FLAG_SPE:
+ case POWERPC_FLAG_VRE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 17)) {
+ switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) {
+ case POWERPC_FLAG_TGPR:
+ case POWERPC_FLAG_CE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 10)) {
+ switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_UBLE)) {
+ case POWERPC_FLAG_SE:
+ case POWERPC_FLAG_DWE:
+ case POWERPC_FLAG_UBLE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or "
+ "POWERPC_FLAG_UBLE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_UBLE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor "
+ "POWERPC_FLAG_UBLE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 9)) {
+ switch (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) {
+ case POWERPC_FLAG_BE:
+ case POWERPC_FLAG_DE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_BE or POWERPC_FLAG_DE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_BE nor POWERPC_FLAG_DE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 2)) {
+ switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) {
+ case POWERPC_FLAG_PX:
+ case POWERPC_FLAG_PMM:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n");
+ exit(1);
+ }
+ if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) {
+ fprintf(stderr, "PowerPC flags inconsistency\n"
+ "Should define the time-base and decrementer clock source\n");
+ exit(1);
+ }
+ /* Allocate TLBs buffer when needed */
+#if !defined(CONFIG_USER_ONLY)
+ if (env->nb_tlb != 0) {
+ int nb_tlb = env->nb_tlb;
+ if (env->id_tlbs != 0) {
+ nb_tlb *= 2;
+ }
+ switch (env->tlb_type) {
+ case TLB_6XX:
+ env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb);
+ break;
+ case TLB_EMB:
+ env->tlb.tlbe = g_new0(ppcemb_tlb_t, nb_tlb);
+ break;
+ case TLB_MAS:
+ env->tlb.tlbm = g_new0(ppcmas_tlb_t, nb_tlb);
+ break;
+ }
+ /* Pre-compute some useful values */
+ env->tlb_per_way = env->nb_tlb / env->nb_ways;
+ }
+ if (env->irq_inputs == NULL) {
+ warn_report("no internal IRQ controller registered."
+ " Attempt QEMU to crash very soon !");
+ }
+#endif
+ if (env->check_pow == NULL) {
+ warn_report("no power management check handler registered."
+ " Attempt QEMU to crash very soon !");
+ }
+}
+
+
+static void ppc_cpu_realize(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ PowerPCCPU *cpu = POWERPC_CPU(dev);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (cpu->vcpu_id == UNASSIGNED_CPU_INDEX) {
+ cpu->vcpu_id = cs->cpu_index;
+ }
+
+ if (tcg_enabled()) {
+ if (ppc_fixup_cpu(cpu) != 0) {
+ error_setg(errp, "Unable to emulate selected CPU with TCG");
+ goto unrealize;
+ }
+ }
+
+ create_ppc_opcodes(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ goto unrealize;
+ }
+ init_ppc_proc(cpu);
+
+ ppc_gdb_init(cs, pcc);
+ qemu_init_vcpu(cs);
+
+ pcc->parent_realize(dev, errp);
+
+ return;
+
+unrealize:
+ cpu_exec_unrealizefn(cs);
+}
+
+static void ppc_cpu_unrealize(DeviceState *dev)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(dev);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+ pcc->parent_unrealize(dev);
+
+ cpu_remove_sync(CPU(cpu));
+
+ destroy_ppc_opcodes(cpu);
+}
+
+static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc = (ObjectClass *)a;
+ uint32_t pvr = *(uint32_t *)b;
+ PowerPCCPUClass *pcc = (PowerPCCPUClass *)a;
+
+ /* -cpu host does a PVR lookup during construction */
+ if (unlikely(strcmp(object_class_get_name(oc),
+ TYPE_HOST_POWERPC_CPU) == 0)) {
+ return -1;
+ }
+
+ return pcc->pvr == pvr ? 0 : -1;
+}
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr)
+{
+ GSList *list, *item;
+ PowerPCCPUClass *pcc = NULL;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ item = g_slist_find_custom(list, &pvr, ppc_cpu_compare_class_pvr);
+ if (item != NULL) {
+ pcc = POWERPC_CPU_CLASS(item->data);
+ }
+ g_slist_free(list);
+
+ return pcc;
+}
+
+static gint ppc_cpu_compare_class_pvr_mask(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc = (ObjectClass *)a;
+ uint32_t pvr = *(uint32_t *)b;
+ PowerPCCPUClass *pcc = (PowerPCCPUClass *)a;
+
+ /* -cpu host does a PVR lookup during construction */
+ if (unlikely(strcmp(object_class_get_name(oc),
+ TYPE_HOST_POWERPC_CPU) == 0)) {
+ return -1;
+ }
+
+ if (pcc->pvr_match(pcc, pvr)) {
+ return 0;
+ }
+
+ return -1;
+}
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr)
+{
+ GSList *list, *item;
+ PowerPCCPUClass *pcc = NULL;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, true);
+ item = g_slist_find_custom(list, &pvr, ppc_cpu_compare_class_pvr_mask);
+ if (item != NULL) {
+ pcc = POWERPC_CPU_CLASS(item->data);
+ }
+ g_slist_free(list);
+
+ return pcc;
+}
+
+static const char *ppc_cpu_lookup_alias(const char *alias)
+{
+ int ai;
+
+ for (ai = 0; ppc_cpu_aliases[ai].alias != NULL; ai++) {
+ if (strcmp(ppc_cpu_aliases[ai].alias, alias) == 0) {
+ return ppc_cpu_aliases[ai].model;
+ }
+ }
+
+ return NULL;
+}
+
+static ObjectClass *ppc_cpu_class_by_name(const char *name)
+{
+ char *cpu_model, *typename;
+ ObjectClass *oc;
+ const char *p;
+ unsigned long pvr;
+
+ /*
+ * Lookup by PVR if cpu_model is valid 8 digit hex number (excl:
+ * 0x prefix if present)
+ */
+ if (!qemu_strtoul(name, &p, 16, &pvr)) {
+ int len = p - name;
+ len = (len == 10) && (name[1] == 'x') ? len - 2 : len;
+ if ((len == 8) && (*p == '\0')) {
+ return OBJECT_CLASS(ppc_cpu_class_by_pvr(pvr));
+ }
+ }
+
+ cpu_model = g_ascii_strdown(name, -1);
+ p = ppc_cpu_lookup_alias(cpu_model);
+ if (p) {
+ g_free(cpu_model);
+ cpu_model = g_strdup(p);
+ }
+
+ typename = g_strdup_printf("%s" POWERPC_CPU_TYPE_SUFFIX, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ g_free(cpu_model);
+
+ return oc;
+}
+
+PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
+{
+ ObjectClass *oc = OBJECT_CLASS(pcc);
+
+ while (oc && !object_class_is_abstract(oc)) {
+ oc = object_class_get_parent(oc);
+ }
+ assert(oc);
+
+ return POWERPC_CPU_CLASS(oc);
+}
+
+/* Sort by PVR, ordering special case "host" last. */
+static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc_a = (ObjectClass *)a;
+ ObjectClass *oc_b = (ObjectClass *)b;
+ PowerPCCPUClass *pcc_a = POWERPC_CPU_CLASS(oc_a);
+ PowerPCCPUClass *pcc_b = POWERPC_CPU_CLASS(oc_b);
+ const char *name_a = object_class_get_name(oc_a);
+ const char *name_b = object_class_get_name(oc_b);
+
+ if (strcmp(name_a, TYPE_HOST_POWERPC_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, TYPE_HOST_POWERPC_CPU) == 0) {
+ return -1;
+ } else {
+ /* Avoid an integer overflow during subtraction */
+ if (pcc_a->pvr < pcc_b->pvr) {
+ return -1;
+ } else if (pcc_a->pvr > pcc_b->pvr) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static void ppc_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ DeviceClass *family = DEVICE_CLASS(ppc_cpu_get_family_class(pcc));
+ const char *typename = object_class_get_name(oc);
+ char *name;
+ int i;
+
+ if (unlikely(strcmp(typename, TYPE_HOST_POWERPC_CPU) == 0)) {
+ return;
+ }
+
+ name = g_strndup(typename,
+ strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX));
+ qemu_printf("PowerPC %-16s PVR %08x\n", name, pcc->pvr);
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
+ ObjectClass *alias_oc = ppc_cpu_class_by_name(alias->model);
+
+ if (alias_oc != oc) {
+ continue;
+ }
+ /*
+ * If running with KVM, we might update the family alias later, so
+ * avoid printing the wrong alias here and use "preferred" instead
+ */
+ if (strcmp(alias->alias, family->desc) == 0) {
+ qemu_printf("PowerPC %-16s (alias for preferred %s CPU)\n",
+ alias->alias, family->desc);
+ } else {
+ qemu_printf("PowerPC %-16s (alias for %s)\n",
+ alias->alias, name);
+ }
+ }
+ g_free(name);
+}
+
+void ppc_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ list = g_slist_sort(list, ppc_cpu_list_compare);
+ g_slist_foreach(list, ppc_cpu_list_entry, NULL);
+ g_slist_free(list);
+
+#ifdef CONFIG_KVM
+ qemu_printf("\n");
+ qemu_printf("PowerPC %s\n", "host");
+#endif
+}
+
+static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **first = user_data;
+ const char *typename;
+ CpuDefinitionInfo *info;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strndup(typename,
+ strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX));
+
+ QAPI_LIST_PREPEND(*first, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+ int i;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list);
+ g_slist_free(list);
+
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
+ ObjectClass *oc;
+ CpuDefinitionInfo *info;
+
+ oc = ppc_cpu_class_by_name(alias->model);
+ if (oc == NULL) {
+ continue;
+ }
+
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strdup(alias->alias);
+ info->q_typename = g_strdup(object_class_get_name(oc));
+
+ QAPI_LIST_PREPEND(cpu_list, info);
+ }
+
+ return cpu_list;
+}
+
+static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ cpu->env.nip = value;
+}
+
+static bool ppc_cpu_has_work(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+}
+
+static void ppc_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ PowerPCCPU *cpu = POWERPC_CPU(s);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr;
+ int i;
+
+ pcc->parent_reset(dev);
+
+ msr = (target_ulong)0;
+ msr |= (target_ulong)MSR_HVB;
+ msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
+ msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
+ msr |= (target_ulong)1 << MSR_EP;
+#if defined(DO_SINGLE_STEP) && 0
+ /* Single step trace mode */
+ msr |= (target_ulong)1 << MSR_SE;
+ msr |= (target_ulong)1 << MSR_BE;
+#endif
+#if defined(CONFIG_USER_ONLY)
+ msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */
+ msr |= (target_ulong)1 << MSR_FE0; /* Allow floating point exceptions */
+ msr |= (target_ulong)1 << MSR_FE1;
+ msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */
+ msr |= (target_ulong)1 << MSR_VSX; /* Allow VSX usage */
+ msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */
+ msr |= (target_ulong)1 << MSR_PR;
+#if defined(TARGET_PPC64)
+ msr |= (target_ulong)1 << MSR_TM; /* Transactional memory */
+#endif
+#if !defined(TARGET_WORDS_BIGENDIAN)
+ msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */
+ if (!((env->msr_mask >> MSR_LE) & 1)) {
+ fprintf(stderr, "Selected CPU does not support little-endian.\n");
+ exit(1);
+ }
+#endif
+#endif
+
+#if defined(TARGET_PPC64)
+ if (mmu_is_64bit(env->mmu_model)) {
+ msr |= (1ULL << MSR_SF);
+ }
+#endif
+
+ hreg_store_msr(env, msr, 1);
+
+#if !defined(CONFIG_USER_ONLY)
+ env->nip = env->hreset_vector | env->excp_prefix;
+#if defined(CONFIG_TCG)
+ if (env->mmu_model != POWERPC_MMU_REAL) {
+ ppc_tlb_invalidate_all(env);
+ }
+#endif /* CONFIG_TCG */
+#endif
+
+ hreg_compute_hflags(env);
+ env->reserve_addr = (target_ulong)-1ULL;
+ /* Be sure no exception or interrupt is pending */
+ env->pending_interrupts = 0;
+ s->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ ppc_irq_reset(cpu);
+
+ /* tininess for underflow is detected before rounding */
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->fp_status);
+
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (!spr->name) {
+ continue;
+ }
+ env->spr[i] = spr->default_value;
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static bool ppc_cpu_is_big_endian(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_synchronize_state(cs);
+
+ return !msr_le;
+}
+
+#ifdef CONFIG_TCG
+static void ppc_cpu_exec_enter(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->cpu_exec_enter(cpu->vhyp, cpu);
+ }
+}
+
+static void ppc_cpu_exec_exit(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->cpu_exec_exit(cpu->vhyp, cpu);
+ }
+}
+#endif /* CONFIG_TCG */
+
+#endif /* !CONFIG_USER_ONLY */
+
+static void ppc_cpu_instance_init(Object *obj)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(obj);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+ cpu->vcpu_id = UNASSIGNED_CPU_INDEX;
+
+ env->msr_mask = pcc->msr_mask;
+ env->mmu_model = pcc->mmu_model;
+ env->excp_model = pcc->excp_model;
+ env->bus_model = pcc->bus_model;
+ env->insns_flags = pcc->insns_flags;
+ env->insns_flags2 = pcc->insns_flags2;
+ env->flags = pcc->flags;
+ env->bfd_mach = pcc->bfd_mach;
+ env->check_pow = pcc->check_pow;
+
+ /*
+ * Mark HV mode as supported if the CPU has an MSR_HV bit in the
+ * msr_mask. The mask can later be cleared by PAPR mode but the hv
+ * mode support will remain, thus enforcing that we cannot use
+ * priv. instructions in guest in PAPR mode. For 970 we currently
+ * simply don't set HV in msr_mask thus simulating an "Apple mode"
+ * 970. If we ever want to support 970 HV mode, we'll have to add
+ * a processor attribute of some sort.
+ */
+#if !defined(CONFIG_USER_ONLY)
+ env->has_hv_mode = !!(env->msr_mask & MSR_HVB);
+#endif
+
+ ppc_hash64_init(cpu);
+}
+
+static void ppc_cpu_instance_finalize(Object *obj)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(obj);
+
+ ppc_hash64_finalize(cpu);
+}
+
+static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ return pcc->pvr == pvr;
+}
+
+static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if ((env->hflags >> MSR_LE) & 1) {
+ info->endian = BFD_ENDIAN_LITTLE;
+ }
+ info->mach = env->bfd_mach;
+ if (!env->bfd_mach) {
+#ifdef TARGET_PPC64
+ info->mach = bfd_mach_ppc64;
+#else
+ info->mach = bfd_mach_ppc;
+#endif
+ }
+ info->disassembler_options = (char *)"any";
+ info->print_insn = print_insn_ppc;
+
+ info->cap_arch = CS_ARCH_PPC;
+#ifdef TARGET_PPC64
+ info->cap_mode = CS_MODE_64;
+#endif
+}
+
+static Property ppc_cpu_properties[] = {
+ DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false),
+ DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration,
+ false),
+ DEFINE_PROP_BOOL("pre-3.0-migration", PowerPCCPU, pre_3_0_migration,
+ false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps ppc_sysemu_ops = {
+ .get_phys_page_debug = ppc_cpu_get_phys_page_debug,
+ .write_elf32_note = ppc32_cpu_write_elf32_note,
+ .write_elf64_note = ppc64_cpu_write_elf64_note,
+ .virtio_is_big_endian = ppc_cpu_is_big_endian,
+ .legacy_vmsd = &vmstate_ppc_cpu,
+};
+#endif
+
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps ppc_tcg_ops = {
+ .initialize = ppc_translate_init,
+
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = ppc_cpu_record_sigsegv,
+#else
+ .tlb_fill = ppc_cpu_tlb_fill,
+ .cpu_exec_interrupt = ppc_cpu_exec_interrupt,
+ .do_interrupt = ppc_cpu_do_interrupt,
+ .cpu_exec_enter = ppc_cpu_exec_enter,
+ .cpu_exec_exit = ppc_cpu_exec_exit,
+ .do_unaligned_access = ppc_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+static void ppc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_parent_realize(dc, ppc_cpu_realize,
+ &pcc->parent_realize);
+ device_class_set_parent_unrealize(dc, ppc_cpu_unrealize,
+ &pcc->parent_unrealize);
+ pcc->pvr_match = ppc_pvr_match_default;
+ device_class_set_props(dc, ppc_cpu_properties);
+
+ device_class_set_parent_reset(dc, ppc_cpu_reset, &pcc->parent_reset);
+
+ cc->class_by_name = ppc_cpu_class_by_name;
+ cc->has_work = ppc_cpu_has_work;
+ cc->dump_state = ppc_cpu_dump_state;
+ cc->set_pc = ppc_cpu_set_pc;
+ cc->gdb_read_register = ppc_cpu_gdb_read_register;
+ cc->gdb_write_register = ppc_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &ppc_sysemu_ops;
+#endif
+
+ cc->gdb_num_core_regs = 71;
+#ifndef CONFIG_USER_ONLY
+ cc->gdb_get_dynamic_xml = ppc_gdb_get_dynamic_xml;
+#endif
+#ifdef USE_APPLE_GDB
+ cc->gdb_read_register = ppc_cpu_gdb_read_register_apple;
+ cc->gdb_write_register = ppc_cpu_gdb_write_register_apple;
+ cc->gdb_num_core_regs = 71 + 32;
+#endif
+
+ cc->gdb_arch_name = ppc_gdb_arch_name;
+#if defined(TARGET_PPC64)
+ cc->gdb_core_xml_file = "power64-core.xml";
+#else
+ cc->gdb_core_xml_file = "power-core.xml";
+#endif
+ cc->disas_set_info = ppc_disas_set_info;
+
+ dc->fw_name = "PowerPC,UNKNOWN";
+
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &ppc_tcg_ops;
+#endif /* CONFIG_TCG */
+}
+
+static const TypeInfo ppc_cpu_type_info = {
+ .name = TYPE_POWERPC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(PowerPCCPU),
+ .instance_align = __alignof__(PowerPCCPU),
+ .instance_init = ppc_cpu_instance_init,
+ .instance_finalize = ppc_cpu_instance_finalize,
+ .abstract = true,
+ .class_size = sizeof(PowerPCCPUClass),
+ .class_init = ppc_cpu_class_init,
+};
+
+#ifndef CONFIG_USER_ONLY
+static const TypeInfo ppc_vhyp_type_info = {
+ .name = TYPE_PPC_VIRTUAL_HYPERVISOR,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(PPCVirtualHypervisorClass),
+};
+#endif
+
+static void ppc_cpu_register_types(void)
+{
+ type_register_static(&ppc_cpu_type_info);
+#ifndef CONFIG_USER_ONLY
+ type_register_static(&ppc_vhyp_type_info);
+#endif
+}
+
+void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+#define RGPL 4
+#define RFPL 4
+
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR "
+ TARGET_FMT_lx " XER " TARGET_FMT_lx " CPU#%d\n",
+ env->nip, env->lr, env->ctr, cpu_read_xer(env),
+ cs->cpu_index);
+ qemu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF "
+ "%08x iidx %d didx %d\n",
+ env->msr, env->spr[SPR_HID0], env->hflags,
+ cpu_mmu_index(env, true), cpu_mmu_index(env, false));
+#if !defined(NO_TIMER_DUMP)
+ qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
+#if !defined(CONFIG_USER_ONLY)
+ " DECR " TARGET_FMT_lu
+#endif
+ "\n",
+ cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env)
+#if !defined(CONFIG_USER_ONLY)
+ , cpu_ppc_load_decr(env)
+#endif
+ );
+#endif
+ for (i = 0; i < 32; i++) {
+ if ((i & (RGPL - 1)) == 0) {
+ qemu_fprintf(f, "GPR%02d", i);
+ }
+ qemu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i));
+ if ((i & (RGPL - 1)) == (RGPL - 1)) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "CR ");
+ for (i = 0; i < 8; i++)
+ qemu_fprintf(f, "%01x", env->crf[i]);
+ qemu_fprintf(f, " [");
+ for (i = 0; i < 8; i++) {
+ char a = '-';
+ if (env->crf[i] & 0x08) {
+ a = 'L';
+ } else if (env->crf[i] & 0x04) {
+ a = 'G';
+ } else if (env->crf[i] & 0x02) {
+ a = 'E';
+ }
+ qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' ');
+ }
+ qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n",
+ env->reserve_addr);
+
+ if (flags & CPU_DUMP_FPU) {
+ for (i = 0; i < 32; i++) {
+ if ((i & (RFPL - 1)) == 0) {
+ qemu_fprintf(f, "FPR%02d", i);
+ }
+ qemu_fprintf(f, " %016" PRIx64, *cpu_fpr_ptr(env, i));
+ if ((i & (RFPL - 1)) == (RFPL - 1)) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "FPSCR " TARGET_FMT_lx "\n", env->fpscr);
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ qemu_fprintf(f, " SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx
+ " PVR " TARGET_FMT_lx " VRSAVE " TARGET_FMT_lx "\n",
+ env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ env->spr[SPR_PVR], env->spr[SPR_VRSAVE]);
+
+ qemu_fprintf(f, "SPRG0 " TARGET_FMT_lx " SPRG1 " TARGET_FMT_lx
+ " SPRG2 " TARGET_FMT_lx " SPRG3 " TARGET_FMT_lx "\n",
+ env->spr[SPR_SPRG0], env->spr[SPR_SPRG1],
+ env->spr[SPR_SPRG2], env->spr[SPR_SPRG3]);
+
+ qemu_fprintf(f, "SPRG4 " TARGET_FMT_lx " SPRG5 " TARGET_FMT_lx
+ " SPRG6 " TARGET_FMT_lx " SPRG7 " TARGET_FMT_lx "\n",
+ env->spr[SPR_SPRG4], env->spr[SPR_SPRG5],
+ env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]);
+
+#if defined(TARGET_PPC64)
+ if (env->excp_model == POWERPC_EXCP_POWER7 ||
+ env->excp_model == POWERPC_EXCP_POWER8 ||
+ env->excp_model == POWERPC_EXCP_POWER9 ||
+ env->excp_model == POWERPC_EXCP_POWER10) {
+ qemu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n",
+ env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+ }
+#endif
+ if (env->excp_model == POWERPC_EXCP_BOOKE) {
+ qemu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx
+ " MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1],
+ env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+
+ qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx
+ " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_TCR], env->spr[SPR_BOOKE_TSR],
+ env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]);
+
+ qemu_fprintf(f, " PIR " TARGET_FMT_lx " DECAR " TARGET_FMT_lx
+ " IVPR " TARGET_FMT_lx " EPCR " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_PIR], env->spr[SPR_BOOKE_DECAR],
+ env->spr[SPR_BOOKE_IVPR], env->spr[SPR_BOOKE_EPCR]);
+
+ qemu_fprintf(f, " MCSR " TARGET_FMT_lx " SPRG8 " TARGET_FMT_lx
+ " EPR " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_MCSR], env->spr[SPR_BOOKE_SPRG8],
+ env->spr[SPR_BOOKE_EPR]);
+
+ /* FSL-specific */
+ qemu_fprintf(f, " MCAR " TARGET_FMT_lx " PID1 " TARGET_FMT_lx
+ " PID2 " TARGET_FMT_lx " SVR " TARGET_FMT_lx "\n",
+ env->spr[SPR_Exxx_MCAR], env->spr[SPR_BOOKE_PID1],
+ env->spr[SPR_BOOKE_PID2], env->spr[SPR_E500_SVR]);
+
+ /*
+ * IVORs are left out as they are large and do not change often --
+ * they can be read with "p $ivor0", "p $ivor1", etc.
+ */
+ }
+
+#if defined(TARGET_PPC64)
+ if (env->flags & POWERPC_FLAG_CFAR) {
+ qemu_fprintf(f, " CFAR " TARGET_FMT_lx"\n", env->cfar);
+ }
+#endif
+
+ if (env->spr_cb[SPR_LPCR].name) {
+ qemu_fprintf(f, " LPCR " TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
+ }
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_3_00:
+#endif
+ if (env->spr_cb[SPR_SDR1].name) { /* SDR1 Exists */
+ qemu_fprintf(f, " SDR1 " TARGET_FMT_lx " ", env->spr[SPR_SDR1]);
+ }
+ if (env->spr_cb[SPR_PTCR].name) { /* PTCR Exists */
+ qemu_fprintf(f, " PTCR " TARGET_FMT_lx " ", env->spr[SPR_PTCR]);
+ }
+ qemu_fprintf(f, " DAR " TARGET_FMT_lx " DSISR " TARGET_FMT_lx "\n",
+ env->spr[SPR_DAR], env->spr[SPR_DSISR]);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ qemu_fprintf(f, " MAS0 " TARGET_FMT_lx " MAS1 " TARGET_FMT_lx
+ " MAS2 " TARGET_FMT_lx " MAS3 " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_MAS0], env->spr[SPR_BOOKE_MAS1],
+ env->spr[SPR_BOOKE_MAS2], env->spr[SPR_BOOKE_MAS3]);
+
+ qemu_fprintf(f, " MAS4 " TARGET_FMT_lx " MAS6 " TARGET_FMT_lx
+ " MAS7 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_MAS4], env->spr[SPR_BOOKE_MAS6],
+ env->spr[SPR_BOOKE_MAS7], env->spr[SPR_BOOKE_PID]);
+
+ qemu_fprintf(f, "MMUCFG " TARGET_FMT_lx " TLB0CFG " TARGET_FMT_lx
+ " TLB1CFG " TARGET_FMT_lx "\n",
+ env->spr[SPR_MMUCFG], env->spr[SPR_BOOKE_TLB0CFG],
+ env->spr[SPR_BOOKE_TLB1CFG]);
+ break;
+ default:
+ break;
+ }
+#endif
+
+#undef RGPL
+#undef RFPL
+}
+type_init(ppc_cpu_register_types)
diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c
new file mode 100644
index 000000000..0d01ac3de
--- /dev/null
+++ b/target/ppc/dfp_helper.c
@@ -0,0 +1,1393 @@
+/*
+ * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU.
+ *
+ * Copyright (c) 2014 IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+#define DECNUMDIGITS 34
+#include "libdecnumber/decContext.h"
+#include "libdecnumber/decNumber.h"
+#include "libdecnumber/dpd/decimal32.h"
+#include "libdecnumber/dpd/decimal64.h"
+#include "libdecnumber/dpd/decimal128.h"
+
+
+static void get_dfp64(ppc_vsr_t *dst, ppc_fprp_t *dfp)
+{
+ dst->VsrD(1) = dfp->VsrD(0);
+}
+
+static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp)
+{
+ dst->VsrD(0) = dfp[0].VsrD(0);
+ dst->VsrD(1) = dfp[1].VsrD(0);
+}
+
+static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src)
+{
+ dfp->VsrD(0) = src->VsrD(1);
+}
+
+static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src)
+{
+ dfp[0].VsrD(0) = src->VsrD(0);
+ dfp[1].VsrD(0) = src->VsrD(1);
+}
+
+static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src)
+{
+ *dst = *src;
+}
+
+struct PPC_DFP {
+ CPUPPCState *env;
+ ppc_vsr_t vt, va, vb;
+ decNumber t, a, b;
+ decContext context;
+ uint8_t crbf;
+};
+
+static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr)
+{
+ enum rounding rnd;
+
+ switch ((fpscr & FP_DRN) >> FPSCR_DRN0) {
+ case 0:
+ rnd = DEC_ROUND_HALF_EVEN;
+ break;
+ case 1:
+ rnd = DEC_ROUND_DOWN;
+ break;
+ case 2:
+ rnd = DEC_ROUND_CEILING;
+ break;
+ case 3:
+ rnd = DEC_ROUND_FLOOR;
+ break;
+ case 4:
+ rnd = DEC_ROUND_HALF_UP;
+ break;
+ case 5:
+ rnd = DEC_ROUND_HALF_DOWN;
+ break;
+ case 6:
+ rnd = DEC_ROUND_UP;
+ break;
+ case 7:
+ rnd = DEC_ROUND_05UP;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ decContextSetRounding(context, rnd);
+}
+
+static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc,
+ struct PPC_DFP *dfp)
+{
+ enum rounding rnd;
+ if (r == 0) {
+ switch (rmc & 3) {
+ case 0:
+ rnd = DEC_ROUND_HALF_EVEN;
+ break;
+ case 1:
+ rnd = DEC_ROUND_DOWN;
+ break;
+ case 2:
+ rnd = DEC_ROUND_HALF_UP;
+ break;
+ case 3: /* use FPSCR rounding mode */
+ return;
+ default:
+ assert(0); /* cannot get here */
+ }
+ } else { /* r == 1 */
+ switch (rmc & 3) {
+ case 0:
+ rnd = DEC_ROUND_CEILING;
+ break;
+ case 1:
+ rnd = DEC_ROUND_FLOOR;
+ break;
+ case 2:
+ rnd = DEC_ROUND_UP;
+ break;
+ case 3:
+ rnd = DEC_ROUND_HALF_DOWN;
+ break;
+ default:
+ assert(0); /* cannot get here */
+ }
+ }
+ decContextSetRounding(&dfp->context, rnd);
+}
+
+static void dfp_prepare_decimal64(struct PPC_DFP *dfp, ppc_fprp_t *a,
+ ppc_fprp_t *b, CPUPPCState *env)
+{
+ decContextDefault(&dfp->context, DEC_INIT_DECIMAL64);
+ dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
+ dfp->env = env;
+
+ if (a) {
+ get_dfp64(&dfp->va, a);
+ decimal64ToNumber((decimal64 *)&dfp->va.VsrD(1), &dfp->a);
+ } else {
+ dfp->va.VsrD(1) = 0;
+ decNumberZero(&dfp->a);
+ }
+
+ if (b) {
+ get_dfp64(&dfp->vb, b);
+ decimal64ToNumber((decimal64 *)&dfp->vb.VsrD(1), &dfp->b);
+ } else {
+ dfp->vb.VsrD(1) = 0;
+ decNumberZero(&dfp->b);
+ }
+}
+
+static void dfp_prepare_decimal128(struct PPC_DFP *dfp, ppc_fprp_t *a,
+ ppc_fprp_t *b, CPUPPCState *env)
+{
+ decContextDefault(&dfp->context, DEC_INIT_DECIMAL128);
+ dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
+ dfp->env = env;
+
+ if (a) {
+ get_dfp128(&dfp->va, a);
+ decimal128ToNumber((decimal128 *)&dfp->va, &dfp->a);
+ } else {
+ dfp->va.VsrD(0) = dfp->va.VsrD(1) = 0;
+ decNumberZero(&dfp->a);
+ }
+
+ if (b) {
+ get_dfp128(&dfp->vb, b);
+ decimal128ToNumber((decimal128 *)&dfp->vb, &dfp->b);
+ } else {
+ dfp->vb.VsrD(0) = dfp->vb.VsrD(1) = 0;
+ decNumberZero(&dfp->b);
+ }
+}
+
+static void dfp_finalize_decimal64(struct PPC_DFP *dfp)
+{
+ decimal64FromNumber((decimal64 *)&dfp->vt.VsrD(1), &dfp->t, &dfp->context);
+}
+
+static void dfp_finalize_decimal128(struct PPC_DFP *dfp)
+{
+ decimal128FromNumber((decimal128 *)&dfp->vt, &dfp->t, &dfp->context);
+}
+
+static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag,
+ uint64_t enabled)
+{
+ dfp->env->fpscr |= (flag | FP_FX);
+ if (dfp->env->fpscr & enabled) {
+ dfp->env->fpscr |= FP_FEX;
+ }
+}
+
+static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp,
+ decContext *context)
+{
+ uint64_t fprf = 0;
+
+ /* construct FPRF */
+ switch (decNumberClass(&dfp->t, context)) {
+ case DEC_CLASS_SNAN:
+ fprf = 0x01;
+ break;
+ case DEC_CLASS_QNAN:
+ fprf = 0x11;
+ break;
+ case DEC_CLASS_NEG_INF:
+ fprf = 0x09;
+ break;
+ case DEC_CLASS_NEG_NORMAL:
+ fprf = 0x08;
+ break;
+ case DEC_CLASS_NEG_SUBNORMAL:
+ fprf = 0x18;
+ break;
+ case DEC_CLASS_NEG_ZERO:
+ fprf = 0x12;
+ break;
+ case DEC_CLASS_POS_ZERO:
+ fprf = 0x02;
+ break;
+ case DEC_CLASS_POS_SUBNORMAL:
+ fprf = 0x14;
+ break;
+ case DEC_CLASS_POS_NORMAL:
+ fprf = 0x04;
+ break;
+ case DEC_CLASS_POS_INF:
+ fprf = 0x05;
+ break;
+ default:
+ assert(0); /* should never get here */
+ }
+ dfp->env->fpscr &= ~FP_FPRF;
+ dfp->env->fpscr |= (fprf << FPSCR_FPRF);
+}
+
+static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context);
+}
+
+static void dfp_set_FPRF_from_FRT_short(struct PPC_DFP *dfp)
+{
+ decContext shortContext;
+ decContextDefault(&shortContext, DEC_INIT_DECIMAL32);
+ dfp_set_FPRF_from_FRT_with_context(dfp, &shortContext);
+}
+
+static void dfp_set_FPRF_from_FRT_long(struct PPC_DFP *dfp)
+{
+ decContext longContext;
+ decContextDefault(&longContext, DEC_INIT_DECIMAL64);
+ dfp_set_FPRF_from_FRT_with_context(dfp, &longContext);
+}
+
+static void dfp_check_for_OX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Overflow) {
+ dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE);
+ }
+}
+
+static void dfp_check_for_UX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Underflow) {
+ dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE);
+ }
+}
+
+static void dfp_check_for_XX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Inexact) {
+ dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE);
+ }
+}
+
+static void dfp_check_for_ZX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Division_by_zero) {
+ dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE);
+ }
+}
+
+static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
+ }
+ }
+}
+
+static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp)
+{
+ if (decNumberIsSNaN(&dfp->t)) {
+ dfp->t.bits &= ~DECSNAN;
+ dfp->t.bits |= DECNAN;
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
+ }
+}
+
+static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
+ int same = decNumberClass(&dfp->a, &dfp->context) ==
+ decNumberClass(&dfp->b, &dfp->context);
+ if ((same && testForSameSign) || (!same && !testForSameSign)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE);
+ }
+ }
+ }
+}
+
+static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp)
+{
+ dfp_check_for_VXISI(dfp, 0);
+}
+
+static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp)
+{
+ dfp_check_for_VXISI(dfp, 1);
+}
+
+static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) ||
+ (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE);
+ }
+ }
+}
+
+static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Division_undefined) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE);
+ }
+}
+
+static void dfp_check_for_VXIDI(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE);
+ }
+ }
+}
+
+static void dfp_check_for_VXVC(struct PPC_DFP *dfp)
+{
+ if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE);
+ }
+}
+
+static void dfp_check_for_VXCVI(struct PPC_DFP *dfp)
+{
+ if ((dfp->context.status & DEC_Invalid_operation) &&
+ (!decNumberIsSNaN(&dfp->a)) &&
+ (!decNumberIsSNaN(&dfp->b))) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
+ }
+}
+
+static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp)
+{
+ if (decNumberIsNaN(&dfp->t)) {
+ dfp->crbf = 1;
+ } else if (decNumberIsZero(&dfp->t)) {
+ dfp->crbf = 2;
+ } else if (decNumberIsNegative(&dfp->t)) {
+ dfp->crbf = 8;
+ } else {
+ dfp->crbf = 4;
+ }
+}
+
+static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp)
+{
+ dfp->env->fpscr &= ~FP_FPCC;
+ dfp->env->fpscr |= (dfp->crbf << FPSCR_FPCC);
+}
+
+static inline void dfp_makeQNaN(decNumber *dn)
+{
+ dn->bits &= ~DECSPECIAL;
+ dn->bits |= DECNAN;
+}
+
+static inline int dfp_get_digit(decNumber *dn, int n)
+{
+ assert(DECDPUN == 3);
+ int unit = n / DECDPUN;
+ int dig = n % DECDPUN;
+ switch (dig) {
+ case 0:
+ return dn->lsu[unit] % 10;
+ case 1:
+ return (dn->lsu[unit] / 10) % 10;
+ case 2:
+ return dn->lsu[unit] / 100;
+ }
+ g_assert_not_reached();
+}
+
+#define DFP_HELPER_TAB(op, dnop, postprocs, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
+ ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
+ dfp_finalize_decimal##size(&dfp); \
+ postprocs(&dfp); \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+static void ADD_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXISI_add(dfp);
+}
+
+DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64)
+DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128)
+
+static void SUB_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXISI_subtract(dfp);
+}
+
+DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64)
+DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128)
+
+static void MUL_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXIMZ(dfp);
+}
+
+DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64)
+DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128)
+
+static void DIV_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_ZX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXZDZ(dfp);
+ dfp_check_for_VXIDI(dfp);
+}
+
+DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64)
+DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128)
+
+#define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \
+uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
+ dfp_finalize_decimal##size(&dfp); \
+ postprocs(&dfp); \
+ return dfp.crbf; \
+}
+
+static void CMPU_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_CRBF_from_T(dfp);
+ dfp_set_FPCC_from_CRBF(dfp);
+ dfp_check_for_VXSNAN(dfp);
+}
+
+DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64)
+DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128)
+
+static void CMPO_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_CRBF_from_T(dfp);
+ dfp_set_FPCC_from_CRBF(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXVC(dfp);
+}
+
+DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64)
+DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128)
+
+#define DFP_HELPER_TSTDC(op, size) \
+uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
+{ \
+ struct PPC_DFP dfp; \
+ int match = 0; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, 0, env); \
+ \
+ match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \
+ match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \
+ match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \
+ match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \
+ match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \
+ match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \
+ \
+ if (decNumberIsNegative(&dfp.a)) { \
+ dfp.crbf = match ? 0xA : 0x8; \
+ } else { \
+ dfp.crbf = match ? 0x2 : 0x0; \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTDC(DTSTDC, 64)
+DFP_HELPER_TSTDC(DTSTDCQ, 128)
+
+#define DFP_HELPER_TSTDG(op, size) \
+uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
+{ \
+ struct PPC_DFP dfp; \
+ int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \
+ is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \
+ match; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, 0, env); \
+ \
+ if ((size) == 64) { \
+ minexp = -398; \
+ maxexp = 369; \
+ nzero_digits = 16; \
+ nzero_idx = 5; \
+ } else if ((size) == 128) { \
+ minexp = -6176; \
+ maxexp = 6111; \
+ nzero_digits = 34; \
+ nzero_idx = 11; \
+ } \
+ \
+ is_negative = decNumberIsNegative(&dfp.a); \
+ is_zero = decNumberIsZero(&dfp.a); \
+ is_extreme_exp = (dfp.a.exponent == maxexp) || \
+ (dfp.a.exponent == minexp); \
+ is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \
+ is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \
+ leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \
+ (dfp.a.lsu[nzero_idx] != 0); \
+ match = 0; \
+ \
+ match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \
+ match |= (dcm & 0x10) && is_zero && is_extreme_exp; \
+ match |= (dcm & 0x08) && \
+ (is_subnormal || (is_normal && is_extreme_exp)); \
+ match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \
+ !leftmost_is_nonzero; \
+ match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \
+ leftmost_is_nonzero; \
+ match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \
+ \
+ if (is_negative) { \
+ dfp.crbf = match ? 0xA : 0x8; \
+ } else { \
+ dfp.crbf = match ? 0x2 : 0x0; \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTDG(DTSTDG, 64)
+DFP_HELPER_TSTDG(DTSTDGQ, 128)
+
+#define DFP_HELPER_TSTEX(op, size) \
+uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ int expa, expb, a_is_special, b_is_special; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ \
+ expa = dfp.a.exponent; \
+ expb = dfp.b.exponent; \
+ a_is_special = decNumberIsSpecial(&dfp.a); \
+ b_is_special = decNumberIsSpecial(&dfp.b); \
+ \
+ if (a_is_special || b_is_special) { \
+ int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \
+ int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \
+ dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \
+ } else if (expa < expb) { \
+ dfp.crbf = 0x8; \
+ } else if (expa > expb) { \
+ dfp.crbf = 0x4; \
+ } else { \
+ dfp.crbf = 0x2; \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTEX(DTSTEX, 64)
+DFP_HELPER_TSTEX(DTSTEXQ, 128)
+
+#define DFP_HELPER_TSTSF(op, size) \
+uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned k; \
+ ppc_vsr_t va; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ get_dfp64(&va, a); \
+ k = va.VsrD(1) & 0x3F; \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ dfp.crbf = 1; \
+ } else if (k == 0) { \
+ dfp.crbf = 4; \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ /* Zero has no sig digits */ \
+ dfp.crbf = 4; \
+ } else { \
+ unsigned nsd = dfp.b.digits; \
+ if (k < nsd) { \
+ dfp.crbf = 8; \
+ } else if (k > nsd) { \
+ dfp.crbf = 4; \
+ } else { \
+ dfp.crbf = 2; \
+ } \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTSF(DTSTSF, 64)
+DFP_HELPER_TSTSF(DTSTSFQ, 128)
+
+#define DFP_HELPER_TSTSFI(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned uim; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ uim = a & 0x3F; \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ dfp.crbf = 1; \
+ } else if (uim == 0) { \
+ dfp.crbf = 4; \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ /* Zero has no sig digits */ \
+ dfp.crbf = 4; \
+ } else { \
+ unsigned nsd = dfp.b.digits; \
+ if (uim < nsd) { \
+ dfp.crbf = 8; \
+ } else if (uim > nsd) { \
+ dfp.crbf = 4; \
+ } else { \
+ dfp.crbf = 2; \
+ } \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTSFI(DTSTSFI, 64)
+DFP_HELPER_TSTSFI(DTSTSFIQ, 128)
+
+static void QUA_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXCVI(dfp);
+}
+
+static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp)
+{
+ dfp_set_round_mode_from_immediate(0, rmc, dfp);
+ decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context);
+ if (decNumberIsSNaN(&dfp->a)) {
+ dfp->t = dfp->a;
+ dfp_makeQNaN(&dfp->t);
+ } else if (decNumberIsSNaN(&dfp->b)) {
+ dfp->t = dfp->b;
+ dfp_makeQNaN(&dfp->t);
+ } else if (decNumberIsQNaN(&dfp->a)) {
+ dfp->t = dfp->a;
+ } else if (decNumberIsQNaN(&dfp->b)) {
+ dfp->t = dfp->b;
+ }
+}
+
+#define DFP_HELPER_QUAI(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
+ uint32_t te, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ decNumberFromUInt32(&dfp.a, 1); \
+ dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \
+ \
+ dfp_quantize(rmc, &dfp); \
+ dfp_finalize_decimal##size(&dfp); \
+ QUA_PPs(&dfp); \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_QUAI(DQUAI, 64)
+DFP_HELPER_QUAI(DQUAIQ, 128)
+
+#define DFP_HELPER_QUA(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
+ ppc_fprp_t *b, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ \
+ dfp_quantize(rmc, &dfp); \
+ dfp_finalize_decimal##size(&dfp); \
+ QUA_PPs(&dfp); \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_QUA(DQUA, 64)
+DFP_HELPER_QUA(DQUAQ, 128)
+
+static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax,
+ struct PPC_DFP *dfp)
+{
+ int msd_orig, msd_rslt;
+
+ if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) {
+ dfp->t = dfp->b;
+ if (decNumberIsSNaN(&dfp->b)) {
+ dfp_makeQNaN(&dfp->t);
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE);
+ }
+ return;
+ }
+
+ /* Reround is equivalent to quantizing b with 1**E(n) where */
+ /* n = exp(b) + numDigits(b) - reference_significance. */
+
+ decNumberFromUInt32(&dfp->a, 1);
+ dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig;
+
+ if (unlikely(dfp->a.exponent > xmax)) {
+ dfp->t.digits = 0;
+ dfp->t.bits &= ~DECNEG;
+ dfp_makeQNaN(&dfp->t);
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
+ return;
+ }
+
+ dfp_quantize(rmc, dfp);
+
+ msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1);
+ msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1);
+
+ /* If the quantization resulted in rounding up to the next magnitude, */
+ /* then we need to shift the significand and adjust the exponent. */
+
+ if (unlikely((msd_orig == 9) && (msd_rslt == 1))) {
+
+ decNumber negone;
+
+ decNumberFromInt32(&negone, -1);
+ decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context);
+ dfp->t.exponent++;
+
+ if (unlikely(dfp->t.exponent > xmax)) {
+ dfp_makeQNaN(&dfp->t);
+ dfp->t.digits = 0;
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
+ /* Inhibit XX in this case */
+ decContextClearStatus(&dfp->context, DEC_Inexact);
+ }
+ }
+}
+
+#define DFP_HELPER_RRND(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
+ ppc_fprp_t *b, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ ppc_vsr_t va; \
+ int32_t ref_sig; \
+ int32_t xmax = ((size) == 64) ? 369 : 6111; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ get_dfp64(&va, a); \
+ ref_sig = va.VsrD(1) & 0x3f; \
+ \
+ _dfp_reround(rmc, ref_sig, xmax, &dfp); \
+ dfp_finalize_decimal##size(&dfp); \
+ QUA_PPs(&dfp); \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_RRND(DRRND, 64)
+DFP_HELPER_RRND(DRRNDQ, 128)
+
+#define DFP_HELPER_RINT(op, postprocs, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
+ uint32_t r, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ dfp_set_round_mode_from_immediate(r, rmc, &dfp); \
+ decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \
+ dfp_finalize_decimal##size(&dfp); \
+ postprocs(&dfp); \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+static void RINTX_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+}
+
+DFP_HELPER_RINT(DRINTX, RINTX_PPs, 64)
+DFP_HELPER_RINT(DRINTXQ, RINTX_PPs, 128)
+
+static void RINTN_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_VXSNAN(dfp);
+}
+
+DFP_HELPER_RINT(DRINTN, RINTN_PPs, 64)
+DFP_HELPER_RINT(DRINTNQ, RINTN_PPs, 128)
+
+void helper_DCTDP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+{
+ struct PPC_DFP dfp;
+ ppc_vsr_t vb;
+ uint32_t b_short;
+
+ get_dfp64(&vb, b);
+ b_short = (uint32_t)vb.VsrD(1);
+
+ dfp_prepare_decimal64(&dfp, 0, 0, env);
+ decimal32ToNumber((decimal32 *)&b_short, &dfp.t);
+ dfp_finalize_decimal64(&dfp);
+ set_dfp64(t, &dfp.vt);
+ dfp_set_FPRF_from_FRT(&dfp);
+}
+
+void helper_DCTQPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+{
+ struct PPC_DFP dfp;
+ ppc_vsr_t vb;
+ dfp_prepare_decimal128(&dfp, 0, 0, env);
+ get_dfp64(&vb, b);
+ decimal64ToNumber((decimal64 *)&vb.VsrD(1), &dfp.t);
+
+ dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
+ dfp_set_FPRF_from_FRT(&dfp);
+
+ dfp_finalize_decimal128(&dfp);
+ set_dfp128(t, &dfp.vt);
+}
+
+void helper_DRSP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+{
+ struct PPC_DFP dfp;
+ uint32_t t_short = 0;
+ ppc_vsr_t vt;
+ dfp_prepare_decimal64(&dfp, 0, b, env);
+ decimal32FromNumber((decimal32 *)&t_short, &dfp.b, &dfp.context);
+ decimal32ToNumber((decimal32 *)&t_short, &dfp.t);
+
+ dfp_set_FPRF_from_FRT_short(&dfp);
+ dfp_check_for_OX(&dfp);
+ dfp_check_for_UX(&dfp);
+ dfp_check_for_XX(&dfp);
+
+ vt.VsrD(1) = (uint64_t)t_short;
+ set_dfp64(t, &vt);
+}
+
+void helper_DRDPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+{
+ struct PPC_DFP dfp;
+ dfp_prepare_decimal128(&dfp, 0, b, env);
+ decimal64FromNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.b, &dfp.context);
+ decimal64ToNumber((decimal64 *)&dfp.vt.VsrD(1), &dfp.t);
+
+ dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
+ dfp_set_FPRF_from_FRT_long(&dfp);
+ dfp_check_for_OX(&dfp);
+ dfp_check_for_UX(&dfp);
+ dfp_check_for_XX(&dfp);
+
+ dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0;
+ dfp_finalize_decimal64(&dfp);
+ set_dfp128(t, &dfp.vt);
+}
+
+#define DFP_HELPER_CFFIX(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ ppc_vsr_t vb; \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ get_dfp64(&vb, b); \
+ decNumberFromInt64(&dfp.t, (int64_t)vb.VsrD(1)); \
+ dfp_finalize_decimal##size(&dfp); \
+ CFFIX_PPs(&dfp); \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+static void CFFIX_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_XX(dfp);
+}
+
+DFP_HELPER_CFFIX(DCFFIX, 64)
+DFP_HELPER_CFFIX(DCFFIXQ, 128)
+
+void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b)
+{
+ struct PPC_DFP dfp;
+
+ dfp_prepare_decimal128(&dfp, NULL, NULL, env);
+ decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0));
+ dfp_finalize_decimal128(&dfp);
+ CFFIX_PPs(&dfp);
+
+ set_dfp128(t, &dfp.vt);
+}
+
+#define DFP_HELPER_CTFIX(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ uint64_t invalid_flags = FP_VX | FP_VXCVI; \
+ if (decNumberIsInfinite(&dfp.b)) { \
+ dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \
+ INT64_MAX; \
+ } else { /* NaN */ \
+ dfp.vt.VsrD(1) = INT64_MIN; \
+ if (decNumberIsSNaN(&dfp.b)) { \
+ invalid_flags |= FP_VXSNAN; \
+ } \
+ } \
+ dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ dfp.vt.VsrD(1) = 0; \
+ } else { \
+ decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); \
+ dfp.vt.VsrD(1) = decNumberIntegralToInt64(&dfp.b, &dfp.context); \
+ if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { \
+ dfp.vt.VsrD(1) = decNumberIsNegative(&dfp.b) ? INT64_MIN : \
+ INT64_MAX; \
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); \
+ } else { \
+ dfp_check_for_XX(&dfp); \
+ } \
+ } \
+ \
+ set_dfp64(t, &dfp.vt); \
+}
+
+DFP_HELPER_CTFIX(DCTFIX, 64)
+DFP_HELPER_CTFIX(DCTFIXQ, 128)
+
+void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b)
+{
+ struct PPC_DFP dfp;
+ dfp_prepare_decimal128(&dfp, 0, b, env);
+
+ if (unlikely(decNumberIsSpecial(&dfp.b))) {
+ uint64_t invalid_flags = FP_VX | FP_VXCVI;
+ if (decNumberIsInfinite(&dfp.b)) {
+ if (decNumberIsNegative(&dfp.b)) {
+ dfp.vt.VsrD(0) = INT64_MIN;
+ dfp.vt.VsrD(1) = 0;
+ } else {
+ dfp.vt.VsrD(0) = INT64_MAX;
+ dfp.vt.VsrD(1) = UINT64_MAX;
+ }
+ } else { /* NaN */
+ dfp.vt.VsrD(0) = INT64_MIN;
+ dfp.vt.VsrD(1) = 0;
+ if (decNumberIsSNaN(&dfp.b)) {
+ invalid_flags |= FP_VXSNAN;
+ }
+ }
+ dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE);
+ } else if (unlikely(decNumberIsZero(&dfp.b))) {
+ dfp.vt.VsrD(0) = 0;
+ dfp.vt.VsrD(1) = 0;
+ } else {
+ decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context);
+ decNumberIntegralToInt128(&dfp.b, &dfp.context,
+ &dfp.vt.VsrD(1), &dfp.vt.VsrD(0));
+ if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) {
+ if (decNumberIsNegative(&dfp.b)) {
+ dfp.vt.VsrD(0) = INT64_MIN;
+ dfp.vt.VsrD(1) = 0;
+ } else {
+ dfp.vt.VsrD(0) = INT64_MAX;
+ dfp.vt.VsrD(1) = UINT64_MAX;
+ }
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE);
+ } else {
+ dfp_check_for_XX(&dfp);
+ }
+ }
+
+ set_dfp128_to_avr(t, &dfp.vt);
+}
+
+static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit,
+ unsigned n)
+{
+ t->VsrD(1) |= ((uint64_t)(digit & 0xF) << (n << 2));
+}
+
+static inline void dfp_set_bcd_digit_128(ppc_vsr_t *t, uint8_t digit,
+ unsigned n)
+{
+ t->VsrD((n & 0x10) ? 0 : 1) |=
+ ((uint64_t)(digit & 0xF) << ((n & 15) << 2));
+}
+
+static inline void dfp_set_sign_64(ppc_vsr_t *t, uint8_t sgn)
+{
+ t->VsrD(1) <<= 4;
+ t->VsrD(1) |= (sgn & 0xF);
+}
+
+static inline void dfp_set_sign_128(ppc_vsr_t *t, uint8_t sgn)
+{
+ t->VsrD(0) <<= 4;
+ t->VsrD(0) |= (t->VsrD(1) >> 60);
+ t->VsrD(1) <<= 4;
+ t->VsrD(1) |= (sgn & 0xF);
+}
+
+#define DFP_HELPER_DEDPD(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
+ uint32_t sp) \
+{ \
+ struct PPC_DFP dfp; \
+ uint8_t digits[34]; \
+ int i, N; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ decNumberGetBCD(&dfp.b, digits); \
+ dfp.vt.VsrD(0) = dfp.vt.VsrD(1) = 0; \
+ N = dfp.b.digits; \
+ \
+ for (i = 0; (i < N) && (i < (size)/4); i++) { \
+ dfp_set_bcd_digit_##size(&dfp.vt, digits[N - i - 1], i); \
+ } \
+ \
+ if (sp & 2) { \
+ uint8_t sgn; \
+ \
+ if (decNumberIsNegative(&dfp.b)) { \
+ sgn = 0xD; \
+ } else { \
+ sgn = ((sp & 1) ? 0xF : 0xC); \
+ } \
+ dfp_set_sign_##size(&dfp.vt, sgn); \
+ } \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_DEDPD(DDEDPD, 64)
+DFP_HELPER_DEDPD(DDEDPDQ, 128)
+
+static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n)
+{
+ return t->VsrD(1) >> ((n << 2) & 63) & 15;
+}
+
+static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n)
+{
+ return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15;
+}
+
+#define DFP_HELPER_ENBCD(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
+ uint32_t s) \
+{ \
+ struct PPC_DFP dfp; \
+ uint8_t digits[32]; \
+ int n = 0, offset = 0, sgn = 0, nonzero = 0; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ decNumberZero(&dfp.t); \
+ \
+ if (s) { \
+ uint8_t sgnNibble = dfp_get_bcd_digit_##size(&dfp.vb, offset++); \
+ switch (sgnNibble) { \
+ case 0xD: \
+ case 0xB: \
+ sgn = 1; \
+ break; \
+ case 0xC: \
+ case 0xF: \
+ case 0xA: \
+ case 0xE: \
+ sgn = 0; \
+ break; \
+ default: \
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ return; \
+ } \
+ } \
+ \
+ while (offset < (size) / 4) { \
+ n++; \
+ digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \
+ offset++); \
+ if (digits[(size) / 4 - n] > 10) { \
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ return; \
+ } else { \
+ nonzero |= (digits[(size) / 4 - n] > 0); \
+ } \
+ } \
+ \
+ if (nonzero) { \
+ decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \
+ } \
+ \
+ if (s && sgn) { \
+ dfp.t.bits |= DECNEG; \
+ } \
+ dfp_finalize_decimal##size(&dfp); \
+ dfp_set_FPRF_from_FRT(&dfp); \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_ENBCD(DENBCD, 64)
+DFP_HELPER_ENBCD(DENBCDQ, 128)
+
+#define DFP_HELPER_XEX(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ ppc_vsr_t vt; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ if (decNumberIsInfinite(&dfp.b)) { \
+ vt.VsrD(1) = -1; \
+ } else if (decNumberIsSNaN(&dfp.b)) { \
+ vt.VsrD(1) = -3; \
+ } else if (decNumberIsQNaN(&dfp.b)) { \
+ vt.VsrD(1) = -2; \
+ } else { \
+ assert(0); \
+ } \
+ set_dfp64(t, &vt); \
+ } else { \
+ if ((size) == 64) { \
+ vt.VsrD(1) = dfp.b.exponent + 398; \
+ } else if ((size) == 128) { \
+ vt.VsrD(1) = dfp.b.exponent + 6176; \
+ } else { \
+ assert(0); \
+ } \
+ set_dfp64(t, &vt); \
+ } \
+}
+
+DFP_HELPER_XEX(DXEX, 64)
+DFP_HELPER_XEX(DXEXQ, 128)
+
+static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw)
+{
+ t->VsrD(1) &= 0x8003ffffffffffffULL;
+ t->VsrD(1) |= (raw << (63 - 13));
+}
+
+static void dfp_set_raw_exp_128(ppc_vsr_t *t, uint64_t raw)
+{
+ t->VsrD(0) &= 0x80003fffffffffffULL;
+ t->VsrD(0) |= (raw << (63 - 17));
+}
+
+#define DFP_HELPER_IEX(op, size) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
+ ppc_fprp_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ uint64_t raw_qnan, raw_snan, raw_inf, max_exp; \
+ ppc_vsr_t va; \
+ int bias; \
+ int64_t exp; \
+ \
+ get_dfp64(&va, a); \
+ exp = (int64_t)va.VsrD(1); \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ if ((size) == 64) { \
+ max_exp = 767; \
+ raw_qnan = 0x1F00; \
+ raw_snan = 0x1F80; \
+ raw_inf = 0x1E00; \
+ bias = 398; \
+ } else if ((size) == 128) { \
+ max_exp = 12287; \
+ raw_qnan = 0x1f000; \
+ raw_snan = 0x1f800; \
+ raw_inf = 0x1e000; \
+ bias = 6176; \
+ } else { \
+ assert(0); \
+ } \
+ \
+ if (unlikely((exp < 0) || (exp > max_exp))) { \
+ dfp.vt.VsrD(0) = dfp.vb.VsrD(0); \
+ dfp.vt.VsrD(1) = dfp.vb.VsrD(1); \
+ if (exp == -1) { \
+ dfp_set_raw_exp_##size(&dfp.vt, raw_inf); \
+ } else if (exp == -3) { \
+ dfp_set_raw_exp_##size(&dfp.vt, raw_snan); \
+ } else { \
+ dfp_set_raw_exp_##size(&dfp.vt, raw_qnan); \
+ } \
+ } else { \
+ dfp.t = dfp.b; \
+ if (unlikely(decNumberIsSpecial(&dfp.t))) { \
+ dfp.t.bits &= ~DECSPECIAL; \
+ } \
+ dfp.t.exponent = exp - bias; \
+ dfp_finalize_decimal##size(&dfp); \
+ } \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_IEX(DIEX, 64)
+DFP_HELPER_IEX(DIEXQ, 128)
+
+static void dfp_clear_lmd_from_g5msb(uint64_t *t)
+{
+
+ /* The most significant 5 bits of the PowerPC DFP format combine bits */
+ /* from the left-most decimal digit (LMD) and the biased exponent. */
+ /* This routine clears the LMD bits while preserving the exponent */
+ /* bits. See "Figure 80: Encoding of bits 0:4 of the G field for */
+ /* Finite Numbers" in the Power ISA for additional details. */
+
+ uint64_t g5msb = (*t >> 58) & 0x1F;
+
+ if ((g5msb >> 3) < 3) { /* LMD in [0-7] ? */
+ *t &= ~(7ULL << 58);
+ } else {
+ switch (g5msb & 7) {
+ case 0:
+ case 1:
+ g5msb = 0;
+ break;
+ case 2:
+ case 3:
+ g5msb = 0x8;
+ break;
+ case 4:
+ case 5:
+ g5msb = 0x10;
+ break;
+ case 6:
+ g5msb = 0x1E;
+ break;
+ case 7:
+ g5msb = 0x1F;
+ break;
+ }
+
+ *t &= ~(0x1fULL << 58);
+ *t |= (g5msb << 58);
+ }
+}
+
+#define DFP_HELPER_SHIFT(op, size, shift_left) \
+void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
+ uint32_t sh) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned max_digits = ((size) == 64) ? 16 : 34; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, 0, env); \
+ \
+ if (sh <= max_digits) { \
+ \
+ decNumber shd; \
+ unsigned special = dfp.a.bits & DECSPECIAL; \
+ \
+ if (shift_left) { \
+ decNumberFromUInt32(&shd, sh); \
+ } else { \
+ decNumberFromInt32(&shd, -((int32_t)sh)); \
+ } \
+ \
+ dfp.a.bits &= ~DECSPECIAL; \
+ decNumberShift(&dfp.t, &dfp.a, &shd, &dfp.context); \
+ \
+ dfp.t.bits |= special; \
+ if (special && (dfp.t.digits >= max_digits)) { \
+ dfp.t.digits = max_digits - 1; \
+ } \
+ \
+ dfp_finalize_decimal##size(&dfp); \
+ } else { \
+ if ((size) == 64) { \
+ dfp.vt.VsrD(1) = dfp.va.VsrD(1) & \
+ 0xFFFC000000000000ULL; \
+ dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(1)); \
+ } else { \
+ dfp.vt.VsrD(0) = dfp.va.VsrD(0) & \
+ 0xFFFFC00000000000ULL; \
+ dfp_clear_lmd_from_g5msb(&dfp.vt.VsrD(0)); \
+ dfp.vt.VsrD(1) = 0; \
+ } \
+ } \
+ \
+ set_dfp##size(t, &dfp.vt); \
+}
+
+DFP_HELPER_SHIFT(DSCLI, 64, 1)
+DFP_HELPER_SHIFT(DSCLIQ, 128, 1)
+DFP_HELPER_SHIFT(DSCRI, 64, 0)
+DFP_HELPER_SHIFT(DSCRIQ, 128, 0)
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
new file mode 100644
index 000000000..17607adbe
--- /dev/null
+++ b/target/ppc/excp_helper.c
@@ -0,0 +1,1484 @@
+/*
+ * PowerPC exception emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "internal.h"
+#include "helper_regs.h"
+
+#include "trace.h"
+
+#ifdef CONFIG_TCG
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#endif
+
+/* #define DEBUG_SOFTWARE_TLB */
+
+/*****************************************************************************/
+/* Exception processing */
+#if !defined(CONFIG_USER_ONLY)
+
+static inline void dump_syscall(CPUPPCState *env)
+{
+ qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
+ " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
+ " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
+ " nip=" TARGET_FMT_lx "\n",
+ ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
+ ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
+ ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
+ ppc_dump_gpr(env, 8), env->nip);
+}
+
+static inline void dump_hcall(CPUPPCState *env)
+{
+ qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
+ " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
+ " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
+ " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
+ " nip=" TARGET_FMT_lx "\n",
+ ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
+ ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
+ ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
+ ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
+ ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
+ env->nip);
+}
+
+static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
+ target_ulong *msr)
+{
+ /* We no longer are in a PM state */
+ env->resume_as_sreset = false;
+
+ /* Pretend to be returning from doze always as we don't lose state */
+ *msr |= SRR1_WS_NOLOSS;
+
+ /* Machine checks are sent normally */
+ if (excp == POWERPC_EXCP_MCHECK) {
+ return excp;
+ }
+ switch (excp) {
+ case POWERPC_EXCP_RESET:
+ *msr |= SRR1_WAKERESET;
+ break;
+ case POWERPC_EXCP_EXTERNAL:
+ *msr |= SRR1_WAKEEE;
+ break;
+ case POWERPC_EXCP_DECR:
+ *msr |= SRR1_WAKEDEC;
+ break;
+ case POWERPC_EXCP_SDOOR:
+ *msr |= SRR1_WAKEDBELL;
+ break;
+ case POWERPC_EXCP_SDOOR_HV:
+ *msr |= SRR1_WAKEHDBELL;
+ break;
+ case POWERPC_EXCP_HV_MAINT:
+ *msr |= SRR1_WAKEHMI;
+ break;
+ case POWERPC_EXCP_HVIRT:
+ *msr |= SRR1_WAKEHVI;
+ break;
+ default:
+ cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
+ excp);
+ }
+ return POWERPC_EXCP_RESET;
+}
+
+/*
+ * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
+ * taken with the MMU on, and which uses an alternate location (e.g., so the
+ * kernel/hv can map the vectors there with an effective address).
+ *
+ * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
+ * are delivered in this way. AIL requires the LPCR to be set to enable this
+ * mode, and then a number of conditions have to be true for AIL to apply.
+ *
+ * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
+ * they specifically want to be in real mode (e.g., the MCE might be signaling
+ * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
+ *
+ * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
+ * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
+ * radix mode (LPCR[HR]).
+ *
+ * POWER8, POWER9 with LPCR[HR]=0
+ * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
+ * +-----------+-------------+---------+-------------+-----+
+ * | a | 00/01/10 | x | x | 0 |
+ * | a | 11 | 0 | 1 | 0 |
+ * | a | 11 | 1 | 1 | a |
+ * | a | 11 | 0 | 0 | a |
+ * +-------------------------------------------------------+
+ *
+ * POWER9 with LPCR[HR]=1
+ * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
+ * +-----------+-------------+---------+-------------+-----+
+ * | a | 00/01/10 | x | x | 0 |
+ * | a | 11 | x | x | a |
+ * +-------------------------------------------------------+
+ *
+ * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
+ * the hypervisor in AIL mode if the guest is radix. This is good for
+ * performance but allows the guest to influence the AIL of hypervisor
+ * interrupts using its MSR, and also the hypervisor must disallow guest
+ * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
+ * use AIL for its MSR[HV] 0->1 interrupts.
+ *
+ * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
+ * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
+ * MSR[HV] 1->1).
+ *
+ * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
+ *
+ * POWER10 behaviour is
+ * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
+ * +-----------+------------+-------------+---------+-------------+-----+
+ * | a | h | 00/01/10 | 0 | 0 | 0 |
+ * | a | h | 11 | 0 | 0 | a |
+ * | a | h | x | 0 | 1 | h |
+ * | a | h | 00/01/10 | 1 | 1 | 0 |
+ * | a | h | 11 | 1 | 1 | h |
+ * +--------------------------------------------------------------------+
+ */
+static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
+ target_ulong msr,
+ target_ulong *new_msr,
+ target_ulong *vector)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = &cpu->env;
+ bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
+ bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
+ int ail = 0;
+
+ if (excp == POWERPC_EXCP_MCHECK ||
+ excp == POWERPC_EXCP_RESET ||
+ excp == POWERPC_EXCP_HV_MAINT) {
+ /* SRESET, MCE, HMI never apply AIL */
+ return;
+ }
+
+ if (excp_model == POWERPC_EXCP_POWER8 ||
+ excp_model == POWERPC_EXCP_POWER9) {
+ if (!mmu_all_on) {
+ /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
+ return;
+ }
+ if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
+ /*
+ * AIL does not work if there is a MSR[HV] 0->1 transition and the
+ * partition is in HPT mode. For radix guests, such interrupts are
+ * allowed to be delivered to the hypervisor in ail mode.
+ */
+ return;
+ }
+
+ ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
+ if (ail == 0) {
+ return;
+ }
+ if (ail == 1) {
+ /* AIL=1 is reserved, treat it like AIL=0 */
+ return;
+ }
+
+ } else if (excp_model == POWERPC_EXCP_POWER10) {
+ if (!mmu_all_on && !hv_escalation) {
+ /*
+ * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
+ * Guest->guest and HV->HV interrupts do require MMU on.
+ */
+ return;
+ }
+
+ if (*new_msr & MSR_HVB) {
+ if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
+ /* HV interrupts depend on LPCR[HAIL] */
+ return;
+ }
+ ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
+ } else {
+ ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
+ }
+ if (ail == 0) {
+ return;
+ }
+ if (ail == 1 || ail == 2) {
+ /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
+ return;
+ }
+ } else {
+ /* Other processors do not support AIL */
+ return;
+ }
+
+ /*
+ * AIL applies, so the new MSR gets IR and DR set, and an offset applied
+ * to the new IP.
+ */
+ *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
+
+ if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
+ if (ail == 2) {
+ *vector |= 0x0000000000018000ull;
+ } else if (ail == 3) {
+ *vector |= 0xc000000000004000ull;
+ }
+ } else {
+ /*
+ * scv AIL is a little different. AIL=2 does not change the address,
+ * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
+ */
+ if (ail == 3) {
+ *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
+ *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
+ }
+ }
+#endif
+}
+
+static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
+ target_ulong vector, target_ulong msr)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ /*
+ * We don't use hreg_store_msr here as already have treated any
+ * special case that could occur. Just store MSR and update hflags
+ *
+ * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
+ * will prevent setting of the HV bit which some exceptions might need
+ * to do.
+ */
+ env->msr = msr & env->msr_mask;
+ hreg_compute_hflags(env);
+ env->nip = vector;
+ /* Reset exception state */
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+
+ /* Reset the reservation */
+ env->reserve_addr = -1;
+
+ /*
+ * Any interrupt is context synchronizing, check if TCG TLB needs
+ * a delayed flush on ppc64
+ */
+ check_tlb_flush(env, false);
+}
+
+/*
+ * Note that this function should be greatly optimized when called
+ * with a constant excp, from ppc_hw_interrupt
+ */
+static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+ int srr0, srr1, asrr0, asrr1, lev = -1;
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %08x (%02x)\n", env->nip, excp, env->error_code);
+
+ /* new srr1 value excluding must-be-zero bits */
+ if (excp_model == POWERPC_EXCP_BOOKE) {
+ msr = env->msr;
+ } else {
+ msr = env->msr & ~0x783f0000ULL;
+ }
+
+ /*
+ * new interrupt handler msr preserves existing HV and ME unless
+ * explicitly overriden
+ */
+ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
+
+ /* target registers */
+ srr0 = SPR_SRR0;
+ srr1 = SPR_SRR1;
+ asrr0 = -1;
+ asrr1 = -1;
+
+ /*
+ * check for special resume at 0x100 from doze/nap/sleep/winkle on
+ * P7/P8/P9
+ */
+ if (env->resume_as_sreset) {
+ excp = powerpc_reset_wakeup(cs, env, excp, &msr);
+ }
+
+ /*
+ * Hypervisor emulation assistance interrupt only exists on server
+ * arch 2.05 server or later. We also don't want to generate it if
+ * we don't have HVB in msr_mask (PAPR mode).
+ */
+ if (excp == POWERPC_EXCP_HV_EMU
+#if defined(TARGET_PPC64)
+ && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
+#endif /* defined(TARGET_PPC64) */
+
+ ) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ switch (excp) {
+ case POWERPC_EXCP_NONE:
+ /* Should never happen */
+ return;
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ switch (excp_model) {
+ case POWERPC_EXCP_40x:
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ case POWERPC_EXCP_G2:
+ break;
+ default:
+ goto excp_invalid;
+ }
+ break;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ if (msr_me == 0) {
+ /*
+ * Machine check exception is not enabled. Enter
+ * checkstop state.
+ */
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ cs->halted = 1;
+ cpu_interrupt_exittb(cs);
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ /* XXX: should also have something loaded in DAR / DSISR */
+ switch (excp_model) {
+ case POWERPC_EXCP_40x:
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_BOOKE:
+ /* FIXME: choose one or the other based on CPU type */
+ srr0 = SPR_BOOKE_MCSRR0;
+ srr1 = SPR_BOOKE_MCSRR1;
+ asrr0 = SPR_BOOKE_CSRR0;
+ asrr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ {
+ bool lpes0;
+
+ cs = CPU(cpu);
+
+ /*
+ * Exception targeting modifiers
+ *
+ * LPES0 is supported on POWER7/8/9
+ * LPES1 is not supported (old iSeries mode)
+ *
+ * On anything else, we behave as if LPES0 is 1
+ * (externals don't alter MSR:HV)
+ */
+#if defined(TARGET_PPC64)
+ if (excp_model == POWERPC_EXCP_POWER7 ||
+ excp_model == POWERPC_EXCP_POWER8 ||
+ excp_model == POWERPC_EXCP_POWER9 ||
+ excp_model == POWERPC_EXCP_POWER10) {
+ lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ } else
+#endif /* defined(TARGET_PPC64) */
+ {
+ lpes0 = true;
+ }
+
+ if (!lpes0) {
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ }
+ if (env->mpic_proxy) {
+ /* IACK the IRQ on delivery */
+ env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
+ }
+ break;
+ }
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /*
+ * Get rS/rD and rA from faulting opcode.
+ * Note: We will only invoke ALIGN for atomic operations,
+ * so all instructions are X-form.
+ */
+ {
+ uint32_t insn = cpu_ldl_code(env, env->nip);
+ env->spr[SPR_DSISR] |= (insn & 0x03FF0000) >> 16;
+ }
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ trace_ppc_excp_fp_ignore();
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ return;
+ }
+
+ /*
+ * FP exceptions always have NIP pointing to the faulting
+ * instruction, so always use store_next and claim we are
+ * precise in the MSR.
+ */
+ msr |= 0x00100000;
+ env->spr[SPR_BOOKE_ESR] = ESR_FP;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PIL;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PPR;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PTR;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(cs, "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ lev = env->error_code;
+
+ if ((lev == 1) && cpu->vhyp) {
+ dump_hcall(env);
+ } else {
+ dump_syscall(env);
+ }
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+
+ /* "PAPR mode" built-in hypercall emulation */
+ if ((lev == 1) && cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hypercall(cpu->vhyp, cpu);
+ return;
+ }
+ if (lev == 1) {
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
+ lev = env->error_code;
+ dump_syscall(env);
+ env->nip += 4;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
+ /* FIT on 4xx */
+ trace_ppc_excp_print("FIT");
+ break;
+ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
+ trace_ppc_excp_print("WDT");
+ switch (excp_model) {
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ break;
+ case POWERPC_EXCP_DEBUG: /* Debug interrupt */
+ if (env->flags & POWERPC_FLAG_DE) {
+ /* FIXME: choose one or the other based on CPU type */
+ srr0 = SPR_BOOKE_DSRR0;
+ srr1 = SPR_BOOKE_DSRR1;
+ asrr0 = SPR_BOOKE_CSRR0;
+ asrr1 = SPR_BOOKE_CSRR1;
+ /* DBSR already modified by caller */
+ } else {
+ cpu_abort(cs, "Debug exception triggered on unsupported model\n");
+ }
+ break;
+ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
+ env->spr[SPR_BOOKE_ESR] = ESR_SPV;
+ break;
+ case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "Embedded floating point data exception "
+ "is not implemented yet !\n");
+ env->spr[SPR_BOOKE_ESR] = ESR_SPV;
+ break;
+ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "Embedded floating point round exception "
+ "is not implemented yet !\n");
+ env->spr[SPR_BOOKE_ESR] = ESR_SPV;
+ break;
+ case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "Performance counter exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
+ break;
+ case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ /* A power-saving exception sets ME, otherwise it is unchanged */
+ if (msr_pow) {
+ /* indicate that we resumed from power save mode */
+ msr |= 0x10000;
+ new_msr |= ((target_ulong)1 << MSR_ME);
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ } else {
+ if (msr_pow) {
+ cpu_abort(cs, "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
+ }
+ }
+ break;
+ case POWERPC_EXCP_DSEG: /* Data segment exception */
+ case POWERPC_EXCP_ISEG: /* Instruction segment exception */
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
+ msr |= env->error_code;
+ /* fall through */
+ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
+ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
+ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
+ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
+ case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
+ case POWERPC_EXCP_HV_EMU:
+ case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ break;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
+ case POWERPC_EXCP_FU: /* Facility unavailable exception */
+#ifdef TARGET_PPC64
+ env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
+#endif
+ break;
+ case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
+#ifdef TARGET_PPC64
+ env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+#endif
+ break;
+ case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
+ trace_ppc_excp_print("PIT");
+ break;
+ case POWERPC_EXCP_IO: /* IO error exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_RUNM: /* Run mode exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_EMUL: /* Emulation trap exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "602 emulation trap exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
+ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
+ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ /* Swap temporary saved registers with GPRs */
+ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
+ new_msr |= (target_ulong)1 << MSR_TGPR;
+ hreg_swap_gpr_tgpr(env);
+ }
+ /* fall through */
+ case POWERPC_EXCP_7x5:
+#if defined(DEBUG_SOFTWARE_TLB)
+ if (qemu_log_enabled()) {
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_IMISS];
+ cmp = &env->spr[SPR_ICMP];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB) {
+ es = "DL";
+ } else {
+ es = "DS";
+ }
+ en = 'D';
+ miss = &env->spr[SPR_DMISS];
+ cmp = &env->spr[SPR_DCMP];
+ }
+ qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->spr[SPR_HASH1], env->spr[SPR_HASH2],
+ env->error_code);
+ }
+#endif
+ msr |= env->crf[0] << 28;
+ msr |= env->error_code; /* key, D/I, S/L bits */
+ /* Set way using a LRU mechanism */
+ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
+ break;
+ case POWERPC_EXCP_74xx:
+#if defined(DEBUG_SOFTWARE_TLB)
+ if (qemu_log_enabled()) {
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_TLBMISS];
+ cmp = &env->spr[SPR_PTEHI];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB) {
+ es = "DL";
+ } else {
+ es = "DS";
+ }
+ en = 'D';
+ miss = &env->spr[SPR_TLBMISS];
+ cmp = &env->spr[SPR_PTEHI];
+ }
+ qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->error_code);
+ }
+#endif
+ msr |= env->error_code; /* key bit */
+ break;
+ default:
+ cpu_abort(cs, "Invalid TLB miss exception\n");
+ break;
+ }
+ break;
+ case POWERPC_EXCP_FPA: /* Floating-point assist exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "Floating point assist exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_DABR: /* Data address breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "DABR exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "IABR exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "SMI exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "Thermal management exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "Performance counter exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_SOFTP: /* Soft patch exception */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "970 soft-patch exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_MAINT: /* Maintenance exception */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "970 maintenance exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "Maskable external exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "Non maskable external exception "
+ "is not implemented yet !\n");
+ break;
+ default:
+ excp_invalid:
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ break;
+ }
+
+ /* Sanity check */
+ if (!(env->msr_mask & MSR_HVB)) {
+ if (new_msr & MSR_HVB) {
+ cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
+ "no HV support\n", excp);
+ }
+ if (srr0 == SPR_HSRR0) {
+ cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
+ "no HV support\n", excp);
+ }
+ }
+
+ /*
+ * Sort out endianness of interrupt, this differs depending on the
+ * CPU, the HV mode, etc...
+ */
+#ifdef TARGET_PPC64
+ if (excp_model == POWERPC_EXCP_POWER7) {
+ if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (excp_model == POWERPC_EXCP_POWER8) {
+ if (new_msr & MSR_HVB) {
+ if (env->spr[SPR_HID0] & HID0_HILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (excp_model == POWERPC_EXCP_POWER9 ||
+ excp_model == POWERPC_EXCP_POWER10) {
+ if (new_msr & MSR_HVB) {
+ if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (msr_ile) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+#else
+ if (msr_ile) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+#endif
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+
+ vector |= env->excp_prefix;
+
+ /* If any alternate SRR register are defined, duplicate saved values */
+ if (asrr0 != -1) {
+ env->spr[asrr0] = env->nip;
+ }
+ if (asrr1 != -1) {
+ env->spr[asrr1] = msr;
+ }
+
+#if defined(TARGET_PPC64)
+ if (excp_model == POWERPC_EXCP_BOOKE) {
+ if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
+ /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
+ new_msr |= (target_ulong)1 << MSR_CM;
+ } else {
+ vector = (uint32_t)vector;
+ }
+ } else {
+ if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
+ vector = (uint32_t)vector;
+ } else {
+ new_msr |= (target_ulong)1 << MSR_SF;
+ }
+ }
+#endif
+
+ if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
+ /* Save PC */
+ env->spr[srr0] = env->nip;
+
+ /* Save MSR */
+ env->spr[srr1] = msr;
+
+#if defined(TARGET_PPC64)
+ } else {
+ vector += lev * 0x20;
+
+ env->lr = env->nip;
+ env->ctr = msr;
+#endif
+ }
+
+ /* This can update new_msr and vector if AIL applies */
+ ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
+
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+void ppc_cpu_do_interrupt(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ powerpc_excp(cpu, env->excp_model, cs->exception_index);
+}
+
+static void ppc_hw_interrupt(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ bool async_deliver;
+
+ /* External reset */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+ return;
+ }
+ /* Machine check exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
+ return;
+ }
+#if 0 /* TODO */
+ /* External debug exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
+ return;
+ }
+#endif
+
+ /*
+ * For interrupts that gate on MSR:EE, we need to do something a
+ * bit more subtle, as we need to let them through even when EE is
+ * clear when coming out of some power management states (in order
+ * for them to become a 0x100).
+ */
+ async_deliver = (msr_ee != 0) || env->resume_as_sreset;
+
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ if ((async_deliver || msr_hv == 0) && hdice) {
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
+ return;
+ }
+ }
+
+ /* Hypervisor virtualization interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
+ if ((async_deliver || msr_hv == 0) && hvice) {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
+ return;
+ }
+ }
+
+ /* External interrupt can ignore MSR:EE under some circumstances */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
+ bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ /* HEIC blocks delivery to the hypervisor */
+ if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
+ (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
+ return;
+ }
+ }
+ if (msr_ce != 0) {
+ /* External critical interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
+ return;
+ }
+ }
+ if (async_deliver != 0) {
+ /* Watchdog timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
+ return;
+ }
+ /* Fixed interval timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
+ return;
+ }
+ /* Programmable interval timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
+ return;
+ }
+ /* Decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
+ if (ppc_decr_clear_on_delivery(env)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+ }
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+ if (is_book3s_arch2x(env)) {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
+ } else {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
+ }
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
+ return;
+ }
+ /* Thermal interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
+ return;
+ }
+ }
+
+ if (env->resume_as_sreset) {
+ /*
+ * This is a bug ! It means that has_work took us out of halt without
+ * anything to deliver while in a PM state that requires getting
+ * out via a 0x100
+ *
+ * This means we will incorrectly execute past the power management
+ * instruction instead of triggering a reset.
+ *
+ * It generally means a discrepancy between the wakeup conditions in the
+ * processor has_work implementation and the logic in this function.
+ */
+ cpu_abort(env_cpu(env),
+ "Wakeup from PM state but interrupt Undelivered");
+ }
+}
+
+void ppc_cpu_do_system_reset(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+}
+
+void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr = 0;
+
+ /*
+ * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
+ * been set by KVM.
+ */
+ msr = (1ULL << MSR_ME);
+ msr |= env->msr & (1ULL << MSR_SF);
+ if (ppc_interrupts_little_endian(cpu)) {
+ msr |= (1ULL << MSR_LE);
+ }
+
+ powerpc_set_excp_state(cpu, vector, msr);
+}
+
+bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ ppc_hw_interrupt(env);
+ if (env->pending_interrupts == 0) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = exception;
+ env->error_code = error_code;
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+void raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+void raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr)
+{
+ raise_exception_err_ra(env, exception, 0, raddr);
+}
+
+#ifdef CONFIG_TCG
+void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+#ifdef CONFIG_TCG
+void helper_store_msr(CPUPPCState *env, target_ulong val)
+{
+ uint32_t excp = hreg_store_msr(env, val, 0);
+
+ if (excp != 0) {
+ CPUState *cs = env_cpu(env);
+ cpu_interrupt_exittb(cs);
+ raise_exception(env, excp);
+ }
+}
+
+#if defined(TARGET_PPC64)
+void helper_scv(CPUPPCState *env, uint32_t lev)
+{
+ if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
+ raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
+ } else {
+ raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
+ }
+}
+
+void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
+{
+ CPUState *cs;
+
+ cs = env_cpu(env);
+ cs->halted = 1;
+
+ /* Condition for waking up at 0x100 */
+ env->resume_as_sreset = (insn != PPC_PM_STOP) ||
+ (env->spr[SPR_PSSCR] & PSSCR_EC);
+}
+#endif /* defined(TARGET_PPC64) */
+#endif /* CONFIG_TCG */
+
+static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* MSR:POW cannot be set by any form of rfi */
+ msr &= ~(1ULL << MSR_POW);
+
+#if defined(TARGET_PPC64)
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ nip = (uint32_t)nip;
+ }
+#else
+ nip = (uint32_t)nip;
+#endif
+ /* XXX: beware: this is false if VLE is supported */
+ env->nip = nip & ~((target_ulong)0x00000003);
+ hreg_store_msr(env, msr, 1);
+ trace_ppc_excp_rfi(env->nip, env->msr);
+ /*
+ * No need to raise an exception here, as rfi is always the last
+ * insn of a TB
+ */
+ cpu_interrupt_exittb(cs);
+ /* Reset the reservation */
+ env->reserve_addr = -1;
+
+ /* Context synchronizing: check if TCG TLB needs flush */
+ check_tlb_flush(env, false);
+}
+
+#ifdef CONFIG_TCG
+void helper_rfi(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
+}
+
+#define MSR_BOOK3S_MASK
+#if defined(TARGET_PPC64)
+void helper_rfid(CPUPPCState *env)
+{
+ /*
+ * The architecture defines a number of rules for which bits can
+ * change but in practice, we handle this in hreg_store_msr()
+ * which will be called by do_rfi(), so there is no need to filter
+ * here
+ */
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
+}
+
+void helper_rfscv(CPUPPCState *env)
+{
+ do_rfi(env, env->lr, env->ctr);
+}
+
+void helper_hrfid(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+}
+#endif
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+void helper_40x_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
+}
+
+void helper_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
+}
+
+void helper_rfdi(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
+}
+
+void helper_rfmci(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+}
+#endif /* CONFIG_TCG */
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+#ifdef CONFIG_TCG
+void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
+ ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
+ ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
+ ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
+ ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+
+#if defined(TARGET_PPC64)
+void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
+ ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
+ ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
+ ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
+ ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+#endif
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+#ifdef CONFIG_TCG
+void helper_rfsvc(CPUPPCState *env)
+{
+ do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
+}
+
+/* Embedded.Processor Control */
+static int dbell2irq(target_ulong rb)
+{
+ int msg = rb & DBELL_TYPE_MASK;
+ int irq = -1;
+
+ switch (msg) {
+ case DBELL_TYPE_DBELL:
+ irq = PPC_INTERRUPT_DOORBELL;
+ break;
+ case DBELL_TYPE_DBELL_CRIT:
+ irq = PPC_INTERRUPT_CDOORBELL;
+ break;
+ case DBELL_TYPE_G_DBELL:
+ case DBELL_TYPE_G_DBELL_CRIT:
+ case DBELL_TYPE_G_DBELL_MC:
+ /* XXX implement */
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+void helper_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+
+ if (irq < 0) {
+ return;
+ }
+
+ env->pending_interrupts &= ~(1 << irq);
+}
+
+void helper_msgsnd(target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+ int pir = rb & DBELL_PIRTAG_MASK;
+ CPUState *cs;
+
+ if (irq < 0) {
+ return;
+ }
+
+ qemu_mutex_lock_iothread();
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+
+ if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
+ cenv->pending_interrupts |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+ qemu_mutex_unlock_iothread();
+}
+
+/* Server Processor Control */
+
+static bool dbell_type_server(target_ulong rb)
+{
+ /*
+ * A Directed Hypervisor Doorbell message is sent only if the
+ * message type is 5. All other types are reserved and the
+ * instruction is a no-op
+ */
+ return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
+}
+
+void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
+}
+
+static void book3s_msgsnd_common(int pir, int irq)
+{
+ CPUState *cs;
+
+ qemu_mutex_lock_iothread();
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+
+ /* TODO: broadcast message to all threads of the same processor */
+ if (cenv->spr_cb[SPR_PIR].default_value == pir) {
+ cenv->pending_interrupts |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+ qemu_mutex_unlock_iothread();
+}
+
+void helper_book3s_msgsnd(target_ulong rb)
+{
+ int pir = rb & DBELL_PROCIDTAG_MASK;
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
+}
+
+#if defined(TARGET_PPC64)
+void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+}
+
+/*
+ * sends a message to other threads that are on the same
+ * multi-threaded processor
+ */
+void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
+{
+ int pir = env->spr_cb[SPR_PIR].default_value;
+
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ /* TODO: TCG supports only one thread */
+
+ book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
+}
+#endif /* TARGET_PPC64 */
+
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUPPCState *env = cs->env_ptr;
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ env->spr[SPR_40x_DEAR] = vaddr;
+ break;
+ case POWERPC_MMU_BOOKE:
+ case POWERPC_MMU_BOOKE206:
+ env->spr[SPR_BOOKE_DEAR] = vaddr;
+ break;
+ default:
+ env->spr[SPR_DAR] = vaddr;
+ break;
+ }
+
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = 0;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif /* CONFIG_TCG */
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
new file mode 100644
index 000000000..c4896cecc
--- /dev/null
+++ b/target/ppc/fpu_helper.c
@@ -0,0 +1,3289 @@
+/*
+ * PowerPC floating point and SPE emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "internal.h"
+#include "fpu/softfloat.h"
+
+static inline float128 float128_snan_to_qnan(float128 x)
+{
+ float128 r;
+
+ r.high = x.high | 0x0000800000000000;
+ r.low = x.low;
+ return r;
+}
+
+#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
+#define float32_snan_to_qnan(x) ((x) | 0x00400000)
+#define float16_snan_to_qnan(x) ((x) | 0x0200)
+
+static inline bool fp_exceptions_enabled(CPUPPCState *env)
+{
+#ifdef CONFIG_USER_ONLY
+ return true;
+#else
+ return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
+#endif
+}
+
+/*****************************************************************************/
+/* Floating point operations helpers */
+
+/*
+ * This is the non-arithmatic conversion that happens e.g. on loads.
+ * In the Power ISA pseudocode, this is called DOUBLE.
+ */
+uint64_t helper_todouble(uint32_t arg)
+{
+ uint32_t abs_arg = arg & 0x7fffffff;
+ uint64_t ret;
+
+ if (likely(abs_arg >= 0x00800000)) {
+ if (unlikely(extract32(arg, 23, 8) == 0xff)) {
+ /* Inf or NAN. */
+ ret = (uint64_t)extract32(arg, 31, 1) << 63;
+ ret |= (uint64_t)0x7ff << 52;
+ ret |= (uint64_t)extract32(arg, 0, 23) << 29;
+ } else {
+ /* Normalized operand. */
+ ret = (uint64_t)extract32(arg, 30, 2) << 62;
+ ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
+ ret |= (uint64_t)extract32(arg, 0, 30) << 29;
+ }
+ } else {
+ /* Zero or Denormalized operand. */
+ ret = (uint64_t)extract32(arg, 31, 1) << 63;
+ if (unlikely(abs_arg != 0)) {
+ /*
+ * Denormalized operand.
+ * Shift fraction so that the msb is in the implicit bit position.
+ * Thus, shift is in the range [1:23].
+ */
+ int shift = clz32(abs_arg) - 8;
+ /*
+ * The first 3 terms compute the float64 exponent. We then bias
+ * this result by -1 so that we can swallow the implicit bit below.
+ */
+ int exp = -126 - shift + 1023 - 1;
+
+ ret |= (uint64_t)exp << 52;
+ ret += (uint64_t)abs_arg << (52 - 23 + shift);
+ }
+ }
+ return ret;
+}
+
+/*
+ * This is the non-arithmatic conversion that happens e.g. on stores.
+ * In the Power ISA pseudocode, this is called SINGLE.
+ */
+uint32_t helper_tosingle(uint64_t arg)
+{
+ int exp = extract64(arg, 52, 11);
+ uint32_t ret;
+
+ if (likely(exp > 896)) {
+ /* No denormalization required (includes Inf, NaN). */
+ ret = extract64(arg, 62, 2) << 30;
+ ret |= extract64(arg, 29, 30);
+ } else {
+ /*
+ * Zero or Denormal result. If the exponent is in bounds for
+ * a single-precision denormal result, extract the proper
+ * bits. If the input is not zero, and the exponent is out of
+ * bounds, then the result is undefined; this underflows to
+ * zero.
+ */
+ ret = extract64(arg, 63, 1) << 31;
+ if (unlikely(exp >= 874)) {
+ /* Denormal result. */
+ ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
+ }
+ }
+ return ret;
+}
+
+static inline int ppc_float32_get_unbiased_exp(float32 f)
+{
+ return ((f >> 23) & 0xFF) - 127;
+}
+
+static inline int ppc_float64_get_unbiased_exp(float64 f)
+{
+ return ((f >> 52) & 0x7FF) - 1023;
+}
+
+/* Classify a floating-point number. */
+enum {
+ is_normal = 1,
+ is_zero = 2,
+ is_denormal = 4,
+ is_inf = 8,
+ is_qnan = 16,
+ is_snan = 32,
+ is_neg = 64,
+};
+
+#define COMPUTE_CLASS(tp) \
+static int tp##_classify(tp arg) \
+{ \
+ int ret = tp##_is_neg(arg) * is_neg; \
+ if (unlikely(tp##_is_any_nan(arg))) { \
+ float_status dummy = { }; /* snan_bit_is_one = 0 */ \
+ ret |= (tp##_is_signaling_nan(arg, &dummy) \
+ ? is_snan : is_qnan); \
+ } else if (unlikely(tp##_is_infinity(arg))) { \
+ ret |= is_inf; \
+ } else if (tp##_is_zero(arg)) { \
+ ret |= is_zero; \
+ } else if (tp##_is_zero_or_denormal(arg)) { \
+ ret |= is_denormal; \
+ } else { \
+ ret |= is_normal; \
+ } \
+ return ret; \
+}
+
+COMPUTE_CLASS(float16)
+COMPUTE_CLASS(float32)
+COMPUTE_CLASS(float64)
+COMPUTE_CLASS(float128)
+
+static void set_fprf_from_class(CPUPPCState *env, int class)
+{
+ static const uint8_t fprf[6][2] = {
+ { 0x04, 0x08 }, /* normalized */
+ { 0x02, 0x12 }, /* zero */
+ { 0x14, 0x18 }, /* denormalized */
+ { 0x05, 0x09 }, /* infinity */
+ { 0x11, 0x11 }, /* qnan */
+ { 0x00, 0x00 }, /* snan -- flags are undefined */
+ };
+ bool isneg = class & is_neg;
+
+ env->fpscr &= ~FP_FPRF;
+ env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
+}
+
+#define COMPUTE_FPRF(tp) \
+void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
+{ \
+ set_fprf_from_class(env, tp##_classify(arg)); \
+}
+
+COMPUTE_FPRF(float16)
+COMPUTE_FPRF(float32)
+COMPUTE_FPRF(float64)
+COMPUTE_FPRF(float128)
+
+/* Floating-point invalid operations exception */
+static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
+{
+ /* Update the floating-point invalid operation summary */
+ env->fpscr |= FP_VX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_ve != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= FP_FEX;
+ if (fp_exceptions_enabled(env)) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | op, retaddr);
+ }
+ }
+}
+
+static void finish_invalid_op_arith(CPUPPCState *env, int op,
+ bool set_fpcc, uintptr_t retaddr)
+{
+ env->fpscr &= ~(FP_FR | FP_FI);
+ if (fpscr_ve == 0) {
+ if (set_fpcc) {
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= (FP_C | FP_FU);
+ }
+ }
+ finish_invalid_op_excp(env, op, retaddr);
+}
+
+/* Signalling NaN */
+static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXSNAN;
+ finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
+}
+
+/* Magnitude subtraction of infinities */
+static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXISI;
+ finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
+}
+
+/* Division of infinity by infinity */
+static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXIDI;
+ finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
+}
+
+/* Division of zero by zero */
+static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXZDZ;
+ finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
+}
+
+/* Multiplication of zero by infinity */
+static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXIMZ;
+ finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
+}
+
+/* Square root of a negative number */
+static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXSQRT;
+ finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
+}
+
+/* Ordered comparison of NaN */
+static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXVC;
+ if (set_fpcc) {
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= (FP_C | FP_FU);
+ }
+ /* Update the floating-point invalid operation summary */
+ env->fpscr |= FP_VX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ /* We must update the target FPR before raising the exception */
+ if (fpscr_ve != 0) {
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= FP_FEX;
+ /* Exception is deferred */
+ }
+}
+
+/* Invalid conversion */
+static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr)
+{
+ env->fpscr |= FP_VXCVI;
+ env->fpscr &= ~(FP_FR | FP_FI);
+ if (fpscr_ve == 0) {
+ if (set_fpcc) {
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= (FP_C | FP_FU);
+ }
+ }
+ finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
+}
+
+static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
+{
+ env->fpscr |= FP_ZX;
+ env->fpscr &= ~(FP_FR | FP_FI);
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_ze != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= FP_FEX;
+ if (fp_exceptions_enabled(env)) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
+ raddr);
+ }
+ }
+}
+
+static inline void float_overflow_excp(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->fpscr |= FP_OX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_oe != 0) {
+ /* XXX: should adjust the result */
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= FP_FEX;
+ /* We must update the target FPR before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
+ } else {
+ env->fpscr |= FP_XX;
+ env->fpscr |= FP_FI;
+ }
+}
+
+static inline void float_underflow_excp(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->fpscr |= FP_UX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_ue != 0) {
+ /* XXX: should adjust the result */
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= FP_FEX;
+ /* We must update the target FPR before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
+ }
+}
+
+static inline void float_inexact_excp(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->fpscr |= FP_FI;
+ env->fpscr |= FP_XX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_xe != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= FP_FEX;
+ /* We must update the target FPR before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
+ }
+}
+
+void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
+{
+ uint32_t mask = 1u << bit;
+ if (env->fpscr & mask) {
+ ppc_store_fpscr(env, env->fpscr & ~(target_ulong)mask);
+ }
+}
+
+void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
+{
+ uint32_t mask = 1u << bit;
+ if (!(env->fpscr & mask)) {
+ ppc_store_fpscr(env, env->fpscr | mask);
+ }
+}
+
+void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
+{
+ target_ulong mask = 0;
+ int i;
+
+ /* TODO: push this extension back to translation time */
+ for (i = 0; i < sizeof(target_ulong) * 2; i++) {
+ if (nibbles & (1 << i)) {
+ mask |= (target_ulong) 0xf << (4 * i);
+ }
+ }
+ val = (val & mask) | (env->fpscr & ~mask);
+ ppc_store_fpscr(env, val);
+}
+
+static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (status & float_flag_overflow) {
+ float_overflow_excp(env);
+ } else if (status & float_flag_underflow) {
+ float_underflow_excp(env);
+ }
+ if (status & float_flag_inexact) {
+ float_inexact_excp(env);
+ } else {
+ env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */
+ }
+
+ if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
+ (env->error_code & POWERPC_EXCP_FP)) {
+ /* Deferred floating-point exception after target FPR update */
+ if (fp_exceptions_enabled(env)) {
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, raddr);
+ }
+ }
+}
+
+void helper_float_check_status(CPUPPCState *env)
+{
+ do_float_check_status(env, GETPC());
+}
+
+void helper_reset_fpstatus(CPUPPCState *env)
+{
+ set_float_exception_flags(0, &env->fp_status);
+}
+
+static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
+ uintptr_t retaddr, int classes)
+{
+ if ((classes & ~is_neg) == is_inf) {
+ /* Magnitude subtraction of infinities */
+ float_invalid_op_vxisi(env, set_fpcc, retaddr);
+ } else if (classes & is_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+}
+
+/* fadd - fadd. */
+float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64_add(arg1, arg2, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status & float_flag_invalid)) {
+ float_invalid_op_addsub(env, 1, GETPC(),
+ float64_classify(arg1) |
+ float64_classify(arg2));
+ }
+
+ return ret;
+}
+
+/* fsub - fsub. */
+float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64_sub(arg1, arg2, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status & float_flag_invalid)) {
+ float_invalid_op_addsub(env, 1, GETPC(),
+ float64_classify(arg1) |
+ float64_classify(arg2));
+ }
+
+ return ret;
+}
+
+static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
+ uintptr_t retaddr, int classes)
+{
+ if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
+ /* Multiplication of zero by infinity */
+ float_invalid_op_vximz(env, set_fprc, retaddr);
+ } else if (classes & is_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+}
+
+/* fmul - fmul. */
+float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64_mul(arg1, arg2, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status & float_flag_invalid)) {
+ float_invalid_op_mul(env, 1, GETPC(),
+ float64_classify(arg1) |
+ float64_classify(arg2));
+ }
+
+ return ret;
+}
+
+static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
+ uintptr_t retaddr, int classes)
+{
+ classes &= ~is_neg;
+ if (classes == is_inf) {
+ /* Division of infinity by infinity */
+ float_invalid_op_vxidi(env, set_fprc, retaddr);
+ } else if (classes == is_zero) {
+ /* Division of zero by zero */
+ float_invalid_op_vxzdz(env, set_fprc, retaddr);
+ } else if (classes & is_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+}
+
+/* fdiv - fdiv. */
+float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64_div(arg1, arg2, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status)) {
+ if (status & float_flag_invalid) {
+ float_invalid_op_div(env, 1, GETPC(),
+ float64_classify(arg1) |
+ float64_classify(arg2));
+ }
+ if (status & float_flag_divbyzero) {
+ float_zero_divide_excp(env, GETPC());
+ }
+ }
+
+ return ret;
+}
+
+static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
+ uintptr_t retaddr, int class1)
+{
+ float_invalid_op_vxcvi(env, set_fprc, retaddr);
+ if (class1 & is_snan) {
+ float_invalid_op_vxsnan(env, retaddr);
+ }
+}
+
+#define FPU_FCTI(op, cvt, nanval) \
+uint64_t helper_##op(CPUPPCState *env, float64 arg) \
+{ \
+ uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
+ int status = get_float_exception_flags(&env->fp_status); \
+ \
+ if (unlikely(status)) { \
+ if (status & float_flag_invalid) { \
+ float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
+ ret = nanval; \
+ } \
+ do_float_check_status(env, GETPC()); \
+ } \
+ return ret; \
+}
+
+FPU_FCTI(fctiw, int32, 0x80000000U)
+FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
+FPU_FCTI(fctiwu, uint32, 0x00000000U)
+FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
+FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
+FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
+FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
+FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
+
+#define FPU_FCFI(op, cvtr, is_single) \
+uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
+{ \
+ CPU_DoubleU farg; \
+ \
+ if (is_single) { \
+ float32 tmp = cvtr(arg, &env->fp_status); \
+ farg.d = float32_to_float64(tmp, &env->fp_status); \
+ } else { \
+ farg.d = cvtr(arg, &env->fp_status); \
+ } \
+ do_float_check_status(env, GETPC()); \
+ return farg.ll; \
+}
+
+FPU_FCFI(fcfid, int64_to_float64, 0)
+FPU_FCFI(fcfids, int64_to_float32, 1)
+FPU_FCFI(fcfidu, uint64_to_float64, 0)
+FPU_FCFI(fcfidus, uint64_to_float32, 1)
+
+static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
+ int rounding_mode)
+{
+ CPU_DoubleU farg;
+ FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN round */
+ float_invalid_op_vxsnan(env, GETPC());
+ farg.ll = arg | 0x0008000000000000ULL;
+ } else {
+ int inexact = get_float_exception_flags(&env->fp_status) &
+ float_flag_inexact;
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ farg.ll = float64_round_to_int(farg.d, &env->fp_status);
+ set_float_rounding_mode(old_rounding_mode, &env->fp_status);
+
+ /* fri* does not set FPSCR[XX] */
+ if (!inexact) {
+ env->fp_status.float_exception_flags &= ~float_flag_inexact;
+ }
+ }
+ do_float_check_status(env, GETPC());
+ return farg.ll;
+}
+
+uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_ties_away);
+}
+
+uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_to_zero);
+}
+
+uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_up);
+}
+
+uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_down);
+}
+
+#define FPU_MADDSUB_UPDATE(NAME, TP) \
+static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
+ unsigned int madd_flags, uintptr_t retaddr) \
+{ \
+ if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
+ TP##_is_signaling_nan(arg2, &env->fp_status) || \
+ TP##_is_signaling_nan(arg3, &env->fp_status)) { \
+ /* sNaN operation */ \
+ float_invalid_op_vxsnan(env, retaddr); \
+ } \
+ if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
+ (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
+ /* Multiplication of zero by infinity */ \
+ float_invalid_op_vximz(env, 1, retaddr); \
+ } \
+ if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
+ TP##_is_infinity(arg3)) { \
+ uint8_t aSign, bSign, cSign; \
+ \
+ aSign = TP##_is_neg(arg1); \
+ bSign = TP##_is_neg(arg2); \
+ cSign = TP##_is_neg(arg3); \
+ if (madd_flags & float_muladd_negate_c) { \
+ cSign ^= 1; \
+ } \
+ if (aSign ^ bSign ^ cSign) { \
+ float_invalid_op_vxisi(env, 1, retaddr); \
+ } \
+ } \
+}
+FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
+FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
+
+#define FPU_FMADD(op, madd_flags) \
+uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
+ uint64_t arg2, uint64_t arg3) \
+{ \
+ uint32_t flags; \
+ float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
+ &env->fp_status); \
+ flags = get_float_exception_flags(&env->fp_status); \
+ if (flags) { \
+ if (flags & float_flag_invalid) { \
+ float64_maddsub_update_excp(env, arg1, arg2, arg3, \
+ madd_flags, GETPC()); \
+ } \
+ do_float_check_status(env, GETPC()); \
+ } \
+ return ret; \
+}
+
+#define MADD_FLGS 0
+#define MSUB_FLGS float_muladd_negate_c
+#define NMADD_FLGS float_muladd_negate_result
+#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
+
+FPU_FMADD(fmadd, MADD_FLGS)
+FPU_FMADD(fnmadd, NMADD_FLGS)
+FPU_FMADD(fmsub, MSUB_FLGS)
+FPU_FMADD(fnmsub, NMSUB_FLGS)
+
+/* frsp - frsp. */
+uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+
+ return farg.ll;
+}
+
+/* fsqrt - fsqrt. */
+float64 helper_fsqrt(CPUPPCState *env, float64 arg)
+{
+ float64 ret = float64_sqrt(arg, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status & float_flag_invalid)) {
+ if (unlikely(float64_is_any_nan(arg))) {
+ if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
+ /* sNaN square root */
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ } else {
+ /* Square root of a negative nonzero number */
+ float_invalid_op_vxsqrt(env, 1, GETPC());
+ }
+ }
+
+ return ret;
+}
+
+/* fre - fre. */
+float64 helper_fre(CPUPPCState *env, float64 arg)
+{
+ /* "Estimate" the reciprocal with actual division. */
+ float64 ret = float64_div(float64_one, arg, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status)) {
+ if (status & float_flag_invalid) {
+ if (float64_is_signaling_nan(arg, &env->fp_status)) {
+ /* sNaN reciprocal */
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ }
+ if (status & float_flag_divbyzero) {
+ float_zero_divide_excp(env, GETPC());
+ /* For FPSCR.ZE == 0, the result is 1/2. */
+ ret = float64_set_sign(float64_half, float64_is_neg(arg));
+ }
+ }
+
+ return ret;
+}
+
+/* fres - fres. */
+uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN reciprocal */
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+
+ return farg.ll;
+}
+
+/* frsqrte - frsqrte. */
+float64 helper_frsqrte(CPUPPCState *env, float64 arg)
+{
+ /* "Estimate" the reciprocal with actual division. */
+ float64 rets = float64_sqrt(arg, &env->fp_status);
+ float64 retd = float64_div(float64_one, rets, &env->fp_status);
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(status)) {
+ if (status & float_flag_invalid) {
+ if (float64_is_signaling_nan(arg, &env->fp_status)) {
+ /* sNaN reciprocal */
+ float_invalid_op_vxsnan(env, GETPC());
+ } else {
+ /* Square root of a negative nonzero number */
+ float_invalid_op_vxsqrt(env, 1, GETPC());
+ }
+ }
+ if (status & float_flag_divbyzero) {
+ /* Reciprocal of (square root of) zero. */
+ float_zero_divide_excp(env, GETPC());
+ }
+ }
+
+ return retd;
+}
+
+/* fsel - fsel. */
+uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
+{
+ CPU_DoubleU farg1;
+
+ farg1.ll = arg1;
+
+ if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
+ !float64_is_any_nan(farg1.d)) {
+ return arg2;
+ } else {
+ return arg3;
+ }
+}
+
+uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
+{
+ int fe_flag = 0;
+ int fg_flag = 0;
+
+ if (unlikely(float64_is_infinity(fra) ||
+ float64_is_infinity(frb) ||
+ float64_is_zero(frb))) {
+ fe_flag = 1;
+ fg_flag = 1;
+ } else {
+ int e_a = ppc_float64_get_unbiased_exp(fra);
+ int e_b = ppc_float64_get_unbiased_exp(frb);
+
+ if (unlikely(float64_is_any_nan(fra) ||
+ float64_is_any_nan(frb))) {
+ fe_flag = 1;
+ } else if ((e_b <= -1022) || (e_b >= 1021)) {
+ fe_flag = 1;
+ } else if (!float64_is_zero(fra) &&
+ (((e_a - e_b) >= 1023) ||
+ ((e_a - e_b) <= -1021) ||
+ (e_a <= -970))) {
+ fe_flag = 1;
+ }
+
+ if (unlikely(float64_is_zero_or_denormal(frb))) {
+ /* XB is not zero because of the above check and */
+ /* so must be denormalized. */
+ fg_flag = 1;
+ }
+ }
+
+ return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
+}
+
+uint32_t helper_ftsqrt(uint64_t frb)
+{
+ int fe_flag = 0;
+ int fg_flag = 0;
+
+ if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
+ fe_flag = 1;
+ fg_flag = 1;
+ } else {
+ int e_b = ppc_float64_get_unbiased_exp(frb);
+
+ if (unlikely(float64_is_any_nan(frb))) {
+ fe_flag = 1;
+ } else if (unlikely(float64_is_zero(frb))) {
+ fe_flag = 1;
+ } else if (unlikely(float64_is_neg(frb))) {
+ fe_flag = 1;
+ } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
+ fe_flag = 1;
+ }
+
+ if (unlikely(float64_is_zero_or_denormal(frb))) {
+ /* XB is not zero because of the above check and */
+ /* therefore must be denormalized. */
+ fg_flag = 1;
+ }
+ }
+
+ return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
+}
+
+void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint32_t crfD)
+{
+ CPU_DoubleU farg1, farg2;
+ uint32_t ret = 0;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_any_nan(farg1.d) ||
+ float64_is_any_nan(farg2.d))) {
+ ret = 0x01UL;
+ } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x08UL;
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x04UL;
+ } else {
+ ret = 0x02UL;
+ }
+
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= ret << FPSCR_FPCC;
+ env->crf[crfD] = ret;
+ if (unlikely(ret == 0x01UL
+ && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
+ /* sNaN comparison */
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+}
+
+void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint32_t crfD)
+{
+ CPU_DoubleU farg1, farg2;
+ uint32_t ret = 0;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_any_nan(farg1.d) ||
+ float64_is_any_nan(farg2.d))) {
+ ret = 0x01UL;
+ } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x08UL;
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x04UL;
+ } else {
+ ret = 0x02UL;
+ }
+
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= ret << FPSCR_FPCC;
+ env->crf[crfD] = (uint32_t) ret;
+ if (unlikely(ret == 0x01UL)) {
+ float_invalid_op_vxvc(env, 1, GETPC());
+ if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status)) {
+ /* sNaN comparison */
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ }
+}
+
+/* Single-precision floating-point conversions */
+static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.f = int32_to_float32(val, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.f = uint32_to_float32(val, &env->vec_status);
+
+ return u.l;
+}
+
+static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_int32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_uint32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_int32_round_to_zero(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
+}
+
+static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.f = int32_to_float32(val, &env->vec_status);
+ tmp = int64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.f = uint32_to_float32(val, &env->vec_status);
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
+
+ return float32_to_int32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
+
+ return float32_to_uint32(u.f, &env->vec_status);
+}
+
+#define HELPER_SPE_SINGLE_CONV(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
+ { \
+ return e##name(env, val); \
+ }
+/* efscfsi */
+HELPER_SPE_SINGLE_CONV(fscfsi);
+/* efscfui */
+HELPER_SPE_SINGLE_CONV(fscfui);
+/* efscfuf */
+HELPER_SPE_SINGLE_CONV(fscfuf);
+/* efscfsf */
+HELPER_SPE_SINGLE_CONV(fscfsf);
+/* efsctsi */
+HELPER_SPE_SINGLE_CONV(fsctsi);
+/* efsctui */
+HELPER_SPE_SINGLE_CONV(fsctui);
+/* efsctsiz */
+HELPER_SPE_SINGLE_CONV(fsctsiz);
+/* efsctuiz */
+HELPER_SPE_SINGLE_CONV(fsctuiz);
+/* efsctsf */
+HELPER_SPE_SINGLE_CONV(fsctsf);
+/* efsctuf */
+HELPER_SPE_SINGLE_CONV(fsctuf);
+
+#define HELPER_SPE_VECTOR_CONV(name) \
+ uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
+ { \
+ return ((uint64_t)e##name(env, val >> 32) << 32) | \
+ (uint64_t)e##name(env, val); \
+ }
+/* evfscfsi */
+HELPER_SPE_VECTOR_CONV(fscfsi);
+/* evfscfui */
+HELPER_SPE_VECTOR_CONV(fscfui);
+/* evfscfuf */
+HELPER_SPE_VECTOR_CONV(fscfuf);
+/* evfscfsf */
+HELPER_SPE_VECTOR_CONV(fscfsf);
+/* evfsctsi */
+HELPER_SPE_VECTOR_CONV(fsctsi);
+/* evfsctui */
+HELPER_SPE_VECTOR_CONV(fsctui);
+/* evfsctsiz */
+HELPER_SPE_VECTOR_CONV(fsctsiz);
+/* evfsctuiz */
+HELPER_SPE_VECTOR_CONV(fsctuiz);
+/* evfsctsf */
+HELPER_SPE_VECTOR_CONV(fsctsf);
+/* evfsctuf */
+HELPER_SPE_VECTOR_CONV(fsctuf);
+
+/* Single-precision floating-point arithmetic */
+static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_add(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_div(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+#define HELPER_SPE_SINGLE_ARITH(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
+ { \
+ return e##name(env, op1, op2); \
+ }
+/* efsadd */
+HELPER_SPE_SINGLE_ARITH(fsadd);
+/* efssub */
+HELPER_SPE_SINGLE_ARITH(fssub);
+/* efsmul */
+HELPER_SPE_SINGLE_ARITH(fsmul);
+/* efsdiv */
+HELPER_SPE_SINGLE_ARITH(fsdiv);
+
+#define HELPER_SPE_VECTOR_ARITH(name) \
+ uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
+ { \
+ return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
+ (uint64_t)e##name(env, op1, op2); \
+ }
+/* evfsadd */
+HELPER_SPE_VECTOR_ARITH(fsadd);
+/* evfssub */
+HELPER_SPE_VECTOR_ARITH(fssub);
+/* evfsmul */
+HELPER_SPE_VECTOR_ARITH(fsmul);
+/* evfsdiv */
+HELPER_SPE_VECTOR_ARITH(fsdiv);
+
+/* Single-precision floating-point comparisons */
+static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
+}
+
+static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
+}
+
+static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
+}
+
+static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: ignore special values (NaN, infinites, ...) */
+ return efscmplt(env, op1, op2);
+}
+
+static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: ignore special values (NaN, infinites, ...) */
+ return efscmpgt(env, op1, op2);
+}
+
+static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: ignore special values (NaN, infinites, ...) */
+ return efscmpeq(env, op1, op2);
+}
+
+#define HELPER_SINGLE_SPE_CMP(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
+ { \
+ return e##name(env, op1, op2); \
+ }
+/* efststlt */
+HELPER_SINGLE_SPE_CMP(fststlt);
+/* efststgt */
+HELPER_SINGLE_SPE_CMP(fststgt);
+/* efststeq */
+HELPER_SINGLE_SPE_CMP(fststeq);
+/* efscmplt */
+HELPER_SINGLE_SPE_CMP(fscmplt);
+/* efscmpgt */
+HELPER_SINGLE_SPE_CMP(fscmpgt);
+/* efscmpeq */
+HELPER_SINGLE_SPE_CMP(fscmpeq);
+
+static inline uint32_t evcmp_merge(int t0, int t1)
+{
+ return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
+}
+
+#define HELPER_VECTOR_SPE_CMP(name) \
+ uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
+ { \
+ return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
+ e##name(env, op1, op2)); \
+ }
+/* evfststlt */
+HELPER_VECTOR_SPE_CMP(fststlt);
+/* evfststgt */
+HELPER_VECTOR_SPE_CMP(fststgt);
+/* evfststeq */
+HELPER_VECTOR_SPE_CMP(fststeq);
+/* evfscmplt */
+HELPER_VECTOR_SPE_CMP(fscmplt);
+/* evfscmpgt */
+HELPER_VECTOR_SPE_CMP(fscmpgt);
+/* evfscmpeq */
+HELPER_VECTOR_SPE_CMP(fscmpeq);
+
+/* Double-precision floating-point conversion */
+uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = int32_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = int64_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = uint32_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = uint64_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int32_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int64_round_to_zero(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.d = int32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.d = uint32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
+
+ return u.ll;
+}
+
+uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
+
+ return float64_to_int32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
+
+ return float64_to_uint32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u1;
+ CPU_FloatU u2;
+
+ u1.ll = val;
+ u2.f = float64_to_float32(u1.d, &env->vec_status);
+
+ return u2.l;
+}
+
+uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u2;
+ CPU_FloatU u1;
+
+ u1.l = val;
+ u2.d = float32_to_float64(u1.f, &env->vec_status);
+
+ return u2.ll;
+}
+
+/* Double precision fixed-point arithmetic */
+uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_add(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_div(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+/* Double precision floating point helpers */
+uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
+}
+
+uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
+}
+
+uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
+}
+
+uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtstlt(env, op1, op2);
+}
+
+uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtstgt(env, op1, op2);
+}
+
+uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtsteq(env, op1, op2);
+}
+
+#define float64_to_float64(x, env) x
+
+
+/*
+ * VSX_ADD_SUB - VSX floating point add/subtract
+ * name - instruction mnemonic
+ * op - operation (add or sub)
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
+void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ float_invalid_op_addsub(env, sfprf, GETPC(), \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
+ } \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
+
+void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = *xt;
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ t.f128 = float128_add(xa->f128, xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ float_invalid_op_addsub(env, 1, GETPC(),
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
+
+/*
+ * VSX_MUL - VSX floating point multiply
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ float_invalid_op_mul(env, sfprf, GETPC(), \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
+ } \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
+VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
+VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
+VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
+
+void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = *xt;
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ float_invalid_op_mul(env, 1, GETPC(),
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
+ }
+ helper_compute_fprf_float128(env, t.f128);
+
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
+
+/*
+ * VSX_DIV - VSX floating point divide
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ float_invalid_op_div(env, sfprf, GETPC(), \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
+ } \
+ if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
+ float_zero_divide_excp(env, GETPC()); \
+ } \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
+VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
+VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
+VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
+
+void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = *xt;
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ t.f128 = float128_div(xa->f128, xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ float_invalid_op_div(env, 1, GETPC(),
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
+ }
+ if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
+ float_zero_divide_excp(env, GETPC());
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
+
+/*
+ * VSX_RE - VSX floating point reciprocal estimate
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
+VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
+VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
+VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
+
+/*
+ * VSX_SQRT - VSX floating point square root
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ t.fld = tp##_sqrt(xb->fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
+ float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
+ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
+VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
+VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
+VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
+
+/*
+ *VSX_RSQRTE - VSX floating point reciprocal square root estimate
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ t.fld = tp##_sqrt(xb->fld, &tstat); \
+ t.fld = tp##_div(tp##_one, t.fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
+ float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
+ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
+VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
+VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
+VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
+
+/*
+ * VSX_TDIV - VSX floating point test for divide
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * emin - minimum unbiased exponent
+ * emax - maximum unbiased exponent
+ * nbits - number of fraction bits
+ */
+#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ int i; \
+ int fe_flag = 0; \
+ int fg_flag = 0; \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_infinity(xa->fld) || \
+ tp##_is_infinity(xb->fld) || \
+ tp##_is_zero(xb->fld))) { \
+ fe_flag = 1; \
+ fg_flag = 1; \
+ } else { \
+ int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
+ \
+ if (unlikely(tp##_is_any_nan(xa->fld) || \
+ tp##_is_any_nan(xb->fld))) { \
+ fe_flag = 1; \
+ } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
+ fe_flag = 1; \
+ } else if (!tp##_is_zero(xa->fld) && \
+ (((e_a - e_b) >= emax) || \
+ ((e_a - e_b) <= (emin + 1)) || \
+ (e_a <= (emin + nbits)))) { \
+ fe_flag = 1; \
+ } \
+ \
+ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
+ /* \
+ * XB is not zero because of the above check and so \
+ * must be denormalized. \
+ */ \
+ fg_flag = 1; \
+ } \
+ } \
+ } \
+ \
+ env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
+}
+
+VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
+VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
+VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
+
+/*
+ * VSX_TSQRT - VSX floating point test for square root
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * emin - minimum unbiased exponent
+ * emax - maximum unbiased exponent
+ * nbits - number of fraction bits
+ */
+#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
+{ \
+ int i; \
+ int fe_flag = 0; \
+ int fg_flag = 0; \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_infinity(xb->fld) || \
+ tp##_is_zero(xb->fld))) { \
+ fe_flag = 1; \
+ fg_flag = 1; \
+ } else { \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
+ \
+ if (unlikely(tp##_is_any_nan(xb->fld))) { \
+ fe_flag = 1; \
+ } else if (unlikely(tp##_is_zero(xb->fld))) { \
+ fe_flag = 1; \
+ } else if (unlikely(tp##_is_neg(xb->fld))) { \
+ fe_flag = 1; \
+ } else if (!tp##_is_zero(xb->fld) && \
+ (e_b <= (emin + nbits))) { \
+ fe_flag = 1; \
+ } \
+ \
+ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
+ /* \
+ * XB is not zero because of the above check and \
+ * therefore must be denormalized. \
+ */ \
+ fg_flag = 1; \
+ } \
+ } \
+ } \
+ \
+ env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
+}
+
+VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
+VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
+VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
+
+/*
+ * VSX_MADD - VSX floating point muliply/add variations
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * maddflgs - flags for the float*muladd routine that control the
+ * various forms (madd, msub, nmadd, nmsub)
+ * sfprf - set FPRF
+ */
+#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
+ /* \
+ * Avoid double rounding errors by rounding the intermediate \
+ * result to odd. \
+ */ \
+ set_float_rounding_mode(float_round_to_zero, &tstat); \
+ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ t.fld |= (get_float_exception_flags(&tstat) & \
+ float_flag_inexact) != 0; \
+ } else { \
+ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ } \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ tp##_maddsub_update_excp(env, xa->fld, b->fld, \
+ c->fld, maddflgs, GETPC()); \
+ } \
+ \
+ if (r2sp) { \
+ t.fld = helper_frsp(env, t.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
+VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
+VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
+VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
+VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
+VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
+VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
+VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
+
+VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
+VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
+VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
+VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
+
+VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
+VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
+VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
+VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
+
+/*
+ * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+ * op - instruction mnemonic
+ * cmp - comparison operation
+ * exp - expected result of comparison
+ * svxvc - set VXVC bit
+ */
+#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
+ \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ if (fpscr_ve == 0 && svxvc) { \
+ vxvc_flag = true; \
+ } \
+ } else if (svxvc) { \
+ vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
+ } \
+ if (vxsnan_flag) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ if (vxvc_flag) { \
+ float_invalid_op_vxvc(env, 0, GETPC()); \
+ } \
+ vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
+ \
+ if (!vex_flag) { \
+ if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
+ &env->fp_status) == exp) { \
+ t.VsrD(0) = -1; \
+ t.VsrD(1) = 0; \
+ } else { \
+ t.VsrD(0) = 0; \
+ t.VsrD(1) = 0; \
+ } \
+ } \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
+VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
+
+void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xa, ppc_vsr_t *xb)
+{
+ int64_t exp_a, exp_b;
+ uint32_t cc;
+
+ exp_a = extract64(xa->VsrD(0), 52, 11);
+ exp_b = extract64(xb->VsrD(0), 52, 11);
+
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
+ float64_is_any_nan(xb->VsrD(0)))) {
+ cc = CRF_SO;
+ } else {
+ if (exp_a < exp_b) {
+ cc = CRF_LT;
+ } else if (exp_a > exp_b) {
+ cc = CRF_GT;
+ } else {
+ cc = CRF_EQ;
+ }
+ }
+
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= cc << FPSCR_FPCC;
+ env->crf[BF(opcode)] = cc;
+
+ do_float_check_status(env, GETPC());
+}
+
+void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xa, ppc_vsr_t *xb)
+{
+ int64_t exp_a, exp_b;
+ uint32_t cc;
+
+ exp_a = extract64(xa->VsrD(0), 48, 15);
+ exp_b = extract64(xb->VsrD(0), 48, 15);
+
+ if (unlikely(float128_is_any_nan(xa->f128) ||
+ float128_is_any_nan(xb->f128))) {
+ cc = CRF_SO;
+ } else {
+ if (exp_a < exp_b) {
+ cc = CRF_LT;
+ } else if (exp_a > exp_b) {
+ cc = CRF_GT;
+ } else {
+ cc = CRF_EQ;
+ }
+ }
+
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= cc << FPSCR_FPCC;
+ env->crf[BF(opcode)] = cc;
+
+ do_float_check_status(env, GETPC());
+}
+
+static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
+ int crf_idx, bool ordered)
+{
+ uint32_t cc;
+ bool vxsnan_flag = false, vxvc_flag = false;
+
+ helper_reset_fpstatus(env);
+
+ switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
+ case float_relation_less:
+ cc = CRF_LT;
+ break;
+ case float_relation_equal:
+ cc = CRF_EQ;
+ break;
+ case float_relation_greater:
+ cc = CRF_GT;
+ break;
+ case float_relation_unordered:
+ cc = CRF_SO;
+
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
+ vxsnan_flag = true;
+ if (fpscr_ve == 0 && ordered) {
+ vxvc_flag = true;
+ }
+ } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
+ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
+ if (ordered) {
+ vxvc_flag = true;
+ }
+ }
+
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= cc << FPSCR_FPCC;
+ env->crf[crf_idx] = cc;
+
+ if (vxsnan_flag) {
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ if (vxvc_flag) {
+ float_invalid_op_vxvc(env, 0, GETPC());
+ }
+
+ do_float_check_status(env, GETPC());
+}
+
+void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
+ ppc_vsr_t *xb)
+{
+ do_scalar_cmp(env, xa, xb, BF(opcode), true);
+}
+
+void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
+ ppc_vsr_t *xb)
+{
+ do_scalar_cmp(env, xa, xb, BF(opcode), false);
+}
+
+static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
+ ppc_vsr_t *xb, int crf_idx, bool ordered)
+{
+ uint32_t cc;
+ bool vxsnan_flag = false, vxvc_flag = false;
+
+ helper_reset_fpstatus(env);
+
+ switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
+ case float_relation_less:
+ cc = CRF_LT;
+ break;
+ case float_relation_equal:
+ cc = CRF_EQ;
+ break;
+ case float_relation_greater:
+ cc = CRF_GT;
+ break;
+ case float_relation_unordered:
+ cc = CRF_SO;
+
+ if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
+ float128_is_signaling_nan(xb->f128, &env->fp_status)) {
+ vxsnan_flag = true;
+ if (fpscr_ve == 0 && ordered) {
+ vxvc_flag = true;
+ }
+ } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
+ float128_is_quiet_nan(xb->f128, &env->fp_status)) {
+ if (ordered) {
+ vxvc_flag = true;
+ }
+ }
+
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= cc << FPSCR_FPCC;
+ env->crf[crf_idx] = cc;
+
+ if (vxsnan_flag) {
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ if (vxvc_flag) {
+ float_invalid_op_vxvc(env, 0, GETPC());
+ }
+
+ do_float_check_status(env, GETPC());
+}
+
+void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
+ ppc_vsr_t *xb)
+{
+ do_scalar_cmpq(env, xa, xb, BF(opcode), true);
+}
+
+void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
+ ppc_vsr_t *xb)
+{
+ do_scalar_cmpq(env, xa, xb, BF(opcode), false);
+}
+
+/*
+ * VSX_MAX_MIN - VSX floating point maximum/minimum
+ * name - instruction mnemonic
+ * op - operation (max or min)
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ */
+#define VSX_MAX_MIN(name, op, nels, tp, fld) \
+void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ for (i = 0; i < nels; i++) { \
+ t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
+ if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
+
+#define VSX_MAX_MINC(name, max) \
+void helper_##name(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ bool vxsnan_flag = false, vex_flag = false; \
+ \
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
+ float64_is_any_nan(xb->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ } \
+ t.VsrD(0) = xb->VsrD(0); \
+ } else if ((max && \
+ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
+ (!max && \
+ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
+ t.VsrD(0) = xa->VsrD(0); \
+ } else { \
+ t.VsrD(0) = xb->VsrD(0); \
+ } \
+ \
+ vex_flag = fpscr_ve & vxsnan_flag; \
+ if (vxsnan_flag) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ if (!vex_flag) { \
+ *xt = t; \
+ } \
+} \
+
+VSX_MAX_MINC(xsmaxcdp, 1);
+VSX_MAX_MINC(xsmincdp, 0);
+
+#define VSX_MAX_MINJ(name, max) \
+void helper_##name(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ bool vxsnan_flag = false, vex_flag = false; \
+ \
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ } \
+ t.VsrD(0) = xa->VsrD(0); \
+ } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ } \
+ t.VsrD(0) = xb->VsrD(0); \
+ } else if (float64_is_zero(xa->VsrD(0)) && \
+ float64_is_zero(xb->VsrD(0))) { \
+ if (max) { \
+ if (!float64_is_neg(xa->VsrD(0)) || \
+ !float64_is_neg(xb->VsrD(0))) { \
+ t.VsrD(0) = 0ULL; \
+ } else { \
+ t.VsrD(0) = 0x8000000000000000ULL; \
+ } \
+ } else { \
+ if (float64_is_neg(xa->VsrD(0)) || \
+ float64_is_neg(xb->VsrD(0))) { \
+ t.VsrD(0) = 0x8000000000000000ULL; \
+ } else { \
+ t.VsrD(0) = 0ULL; \
+ } \
+ } \
+ } else if ((max && \
+ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
+ (!max && \
+ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
+ t.VsrD(0) = xa->VsrD(0); \
+ } else { \
+ t.VsrD(0) = xb->VsrD(0); \
+ } \
+ \
+ vex_flag = fpscr_ve & vxsnan_flag; \
+ if (vxsnan_flag) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ if (!vex_flag) { \
+ *xt = t; \
+ } \
+} \
+
+VSX_MAX_MINJ(xsmaxjdp, 1);
+VSX_MAX_MINJ(xsminjdp, 0);
+
+/*
+ * VSX_CMP - VSX floating point compare
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * cmp - comparison operation
+ * svxvc - set VXVC bit
+ * exp - expected result of comparison
+ */
+#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
+uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ uint32_t crf6 = 0; \
+ int i; \
+ int all_true = 1; \
+ int all_false = 1; \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_any_nan(xa->fld) || \
+ tp##_is_any_nan(xb->fld))) { \
+ if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
+ if (svxvc) { \
+ float_invalid_op_vxvc(env, 0, GETPC()); \
+ } \
+ t.fld = 0; \
+ all_true = 0; \
+ } else { \
+ if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
+ t.fld = -1; \
+ all_false = 0; \
+ } else { \
+ t.fld = 0; \
+ all_true = 0; \
+ } \
+ } \
+ } \
+ \
+ *xt = t; \
+ crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
+ return crf6; \
+}
+
+VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
+VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
+VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
+VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
+VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
+VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
+VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
+VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
+
+/*
+ * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (float32 or float64)
+ * ttp - target type (float32 or float64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field (f32 or f64)
+ * sfprf - set FPRF
+ */
+#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ for (i = 0; i < nels; i++) { \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
+ &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf_##ttp(env, t.tfld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
+VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
+VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
+VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
+
+/*
+ * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (float32 or float64)
+ * ttp - target type (float32 or float64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field (f32 or f64)
+ * sfprf - set FPRF
+ */
+#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ for (i = 0; i < nels; i++) { \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
+ &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf_##ttp(env, t.tfld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
+
+/*
+ * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
+ * involving one half precision value
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type
+ * ttp - target type
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ * sfprf - set FPRF
+ */
+#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = { }; \
+ int i; \
+ \
+ for (i = 0; i < nels; i++) { \
+ t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
+ &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf_##ttp(env, t.tfld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
+VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
+VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
+VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
+
+/*
+ * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
+ * added to this later.
+ */
+void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = { };
+ float_status tstat;
+
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+ if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
+ float_invalid_op_vxsnan(env, GETPC());
+ t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
+ }
+ helper_compute_fprf_float64(env, t.VsrD(0));
+
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
+
+uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
+{
+ uint64_t result, sign, exp, frac;
+
+ float_status tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+
+ sign = extract64(xb, 63, 1);
+ exp = extract64(xb, 52, 11);
+ frac = extract64(xb, 0, 52) | 0x10000000000000ULL;
+
+ if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
+ /* DP denormal operand. */
+ /* Exponent override to DP min exp. */
+ exp = 1;
+ /* Implicit bit override to 0. */
+ frac = deposit64(frac, 53, 1, 0);
+ }
+
+ if (unlikely(exp < 897 && frac != 0)) {
+ /* SP tiny operand. */
+ if (897 - exp > 63) {
+ frac = 0;
+ } else {
+ /* Denormalize until exp = SP min exp. */
+ frac >>= (897 - exp);
+ }
+ /* Exponent override to SP min exp - 1. */
+ exp = 896;
+ }
+
+ result = sign << 31;
+ result |= extract64(exp, 10, 1) << 30;
+ result |= extract64(exp, 0, 7) << 23;
+ result |= extract64(frac, 29, 23);
+
+ /* hardware replicates result to both words of the doubleword result. */
+ return (result << 32) | result;
+}
+
+uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
+{
+ float_status tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+
+ return float32_to_float64(xb >> 32, &tstat);
+}
+
+/*
+ * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (float32 or float64)
+ * ttp - target type (int32, uint32, int64 or uint64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ * rnan - resulting NaN
+ */
+#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ int all_flags = env->fp_status.float_exception_flags, flags; \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ for (i = 0; i < nels; i++) { \
+ env->fp_status.float_exception_flags = 0; \
+ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
+ flags = env->fp_status.float_exception_flags; \
+ if (unlikely(flags & float_flag_invalid)) { \
+ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
+ t.tfld = rnan; \
+ } \
+ all_flags |= flags; \
+ } \
+ \
+ *xt = t; \
+ env->fp_status.float_exception_flags = all_flags; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
+VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
+VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
+
+/*
+ * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
+ * op - instruction mnemonic
+ * stp - source type (float32 or float64)
+ * ttp - target type (int32, uint32, int64 or uint64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ * rnan - resulting NaN
+ */
+#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = { }; \
+ \
+ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
+ if (env->fp_status.float_exception_flags & float_flag_invalid) { \
+ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
+ t.tfld = rnan; \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
+ 0x8000000000000000ULL)
+
+VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
+ 0xffffffff80000000ULL)
+VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
+VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
+
+/*
+ * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (int32, uint32, int64 or uint64)
+ * ttp - target type (float32 or float64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ * jdef - definition of the j index (i or 2*i)
+ * sfprf - set FPRF
+ */
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ \
+ for (i = 0; i < nels; i++) { \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (r2sp) { \
+ t.tfld = helper_frsp(env, t.tfld); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.tfld); \
+ } \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
+
+/*
+ * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
+ * op - instruction mnemonic
+ * stp - source type (int32, uint32, int64 or uint64)
+ * ttp - target type (float32 or float64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ */
+#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
+VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
+
+/*
+ * For "use current rounding mode", define a value that will not be
+ * one of the existing rounding model enums.
+ */
+#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
+ float_round_up + float_round_to_zero)
+
+/*
+ * VSX_ROUND - VSX floating point round
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * rmode - rounding mode
+ * sfprf - set FPRF
+ */
+#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i; \
+ FloatRoundMode curr_rounding_mode; \
+ \
+ if (rmode != FLOAT_ROUND_CURRENT) { \
+ curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
+ set_float_rounding_mode(rmode, &env->fp_status); \
+ } \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_signaling_nan(xb->fld, \
+ &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ t.fld = tp##_snan_to_qnan(xb->fld); \
+ } else { \
+ t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf_float64(env, t.fld); \
+ } \
+ } \
+ \
+ /* \
+ * If this is not a "use current rounding mode" instruction, \
+ * then inhibit setting of the XX bit and restore rounding \
+ * mode from FPSCR \
+ */ \
+ if (rmode != FLOAT_ROUND_CURRENT) { \
+ set_float_rounding_mode(curr_rounding_mode, &env->fp_status); \
+ env->fp_status.float_exception_flags &= ~float_flag_inexact; \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, GETPC()); \
+}
+
+VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
+VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
+VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
+VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
+VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
+
+VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
+VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
+VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
+VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
+
+VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
+VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
+VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
+VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
+
+uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
+{
+ helper_reset_fpstatus(env);
+
+ uint64_t xt = helper_frsp(env, xb);
+
+ helper_compute_fprf_float64(env, xt);
+ do_float_check_status(env, GETPC());
+ return xt;
+}
+
+#define VSX_XXPERM(op, indexed) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *pcv) \
+{ \
+ ppc_vsr_t t = *xt; \
+ int i, idx; \
+ \
+ for (i = 0; i < 16; i++) { \
+ idx = pcv->VsrB(i) & 0x1F; \
+ if (indexed) { \
+ idx = 31 - idx; \
+ } \
+ t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
+ : xt->VsrB(idx - 16); \
+ } \
+ *xt = t; \
+}
+
+VSX_XXPERM(xxperm, 0)
+VSX_XXPERM(xxpermr, 1)
+
+void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = { };
+ uint32_t exp, i, fraction;
+
+ for (i = 0; i < 4; i++) {
+ exp = (xb->VsrW(i) >> 23) & 0xFF;
+ fraction = xb->VsrW(i) & 0x7FFFFF;
+ if (exp != 0 && exp != 255) {
+ t.VsrW(i) = fraction | 0x00800000;
+ } else {
+ t.VsrW(i) = fraction;
+ }
+ }
+ *xt = t;
+}
+
+/*
+ * VSX_TEST_DC - VSX floating point test data class
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * xbn - VSR register number
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * tfld - target vsr_t field (VsrD(*) or VsrW(*))
+ * fld_max - target field max
+ * scrf - set result in CR and FPCC
+ */
+#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
+ ppc_vsr_t *xb = &env->vsr[xbn]; \
+ ppc_vsr_t t = { }; \
+ uint32_t i, sign, dcmx; \
+ uint32_t cc, match = 0; \
+ \
+ if (!scrf) { \
+ dcmx = DCMX_XV(opcode); \
+ } else { \
+ t = *xt; \
+ dcmx = DCMX(opcode); \
+ } \
+ \
+ for (i = 0; i < nels; i++) { \
+ sign = tp##_is_neg(xb->fld); \
+ if (tp##_is_any_nan(xb->fld)) { \
+ match = extract32(dcmx, 6, 1); \
+ } else if (tp##_is_infinity(xb->fld)) { \
+ match = extract32(dcmx, 4 + !sign, 1); \
+ } else if (tp##_is_zero(xb->fld)) { \
+ match = extract32(dcmx, 2 + !sign, 1); \
+ } else if (tp##_is_zero_or_denormal(xb->fld)) { \
+ match = extract32(dcmx, 0 + !sign, 1); \
+ } \
+ \
+ if (scrf) { \
+ cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
+ env->fpscr &= ~FP_FPCC; \
+ env->fpscr |= cc << FPSCR_FPCC; \
+ env->crf[BF(opcode)] = cc; \
+ } else { \
+ t.tfld = match ? fld_max : 0; \
+ } \
+ match = 0; \
+ } \
+ if (!scrf) { \
+ *xt = t; \
+ } \
+}
+
+VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
+VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
+VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
+VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
+
+void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
+{
+ uint32_t dcmx, sign, exp;
+ uint32_t cc, match = 0, not_sp = 0;
+
+ dcmx = DCMX(opcode);
+ exp = (xb->VsrD(0) >> 52) & 0x7FF;
+
+ sign = float64_is_neg(xb->VsrD(0));
+ if (float64_is_any_nan(xb->VsrD(0))) {
+ match = extract32(dcmx, 6, 1);
+ } else if (float64_is_infinity(xb->VsrD(0))) {
+ match = extract32(dcmx, 4 + !sign, 1);
+ } else if (float64_is_zero(xb->VsrD(0))) {
+ match = extract32(dcmx, 2 + !sign, 1);
+ } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
+ (exp > 0 && exp < 0x381)) {
+ match = extract32(dcmx, 0 + !sign, 1);
+ }
+
+ not_sp = !float64_eq(xb->VsrD(0),
+ float32_to_float64(
+ float64_to_float32(xb->VsrD(0), &env->fp_status),
+ &env->fp_status), &env->fp_status);
+
+ cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
+ env->fpscr &= ~FP_FPCC;
+ env->fpscr |= cc << FPSCR_FPCC;
+ env->crf[BF(opcode)] = cc;
+}
+
+void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = { };
+ uint8_t r = Rrm(opcode);
+ uint8_t ex = Rc(opcode);
+ uint8_t rmc = RMC(opcode);
+ uint8_t rmode = 0;
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+
+ if (r == 0 && rmc == 0) {
+ rmode = float_round_ties_away;
+ } else if (r == 0 && rmc == 0x3) {
+ rmode = fpscr_rn;
+ } else if (r == 1) {
+ switch (rmc) {
+ case 0:
+ rmode = float_round_nearest_even;
+ break;
+ case 1:
+ rmode = float_round_to_zero;
+ break;
+ case 2:
+ rmode = float_round_up;
+ break;
+ case 3:
+ rmode = float_round_down;
+ break;
+ default:
+ abort();
+ }
+ }
+
+ tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+ set_float_rounding_mode(rmode, &tstat);
+ t.f128 = float128_round_to_int(xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
+ float_invalid_op_vxsnan(env, GETPC());
+ t.f128 = float128_snan_to_qnan(t.f128);
+ }
+ }
+
+ if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
+ env->fp_status.float_exception_flags &= ~float_flag_inexact;
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+ do_float_check_status(env, GETPC());
+ *xt = t;
+}
+
+void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = { };
+ uint8_t r = Rrm(opcode);
+ uint8_t rmc = RMC(opcode);
+ uint8_t rmode = 0;
+ floatx80 round_res;
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+
+ if (r == 0 && rmc == 0) {
+ rmode = float_round_ties_away;
+ } else if (r == 0 && rmc == 0x3) {
+ rmode = fpscr_rn;
+ } else if (r == 1) {
+ switch (rmc) {
+ case 0:
+ rmode = float_round_nearest_even;
+ break;
+ case 1:
+ rmode = float_round_to_zero;
+ break;
+ case 2:
+ rmode = float_round_up;
+ break;
+ case 3:
+ rmode = float_round_down;
+ break;
+ default:
+ abort();
+ }
+ }
+
+ tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+ set_float_rounding_mode(rmode, &tstat);
+ round_res = float128_to_floatx80(xb->f128, &tstat);
+ t.f128 = floatx80_to_float128(round_res, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
+ float_invalid_op_vxsnan(env, GETPC());
+ t.f128 = float128_snan_to_qnan(t.f128);
+ }
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
+
+void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = { };
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ t.f128 = float128_sqrt(xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
+ float_invalid_op_vxsnan(env, GETPC());
+ t.f128 = float128_snan_to_qnan(xb->f128);
+ } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
+ t.f128 = xb->f128;
+ } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
+ float_invalid_op_vxsqrt(env, 1, GETPC());
+ t.f128 = float128_default_nan(&env->fp_status);
+ }
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
+
+void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = *xt;
+ float_status tstat;
+
+ helper_reset_fpstatus(env);
+
+ tstat = env->fp_status;
+ if (unlikely(Rc(opcode) != 0)) {
+ tstat.float_rounding_mode = float_round_to_odd;
+ }
+
+ set_float_exception_flags(0, &tstat);
+ t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags;
+
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
+ float_invalid_op_addsub(env, 1, GETPC(),
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
+ }
+
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
+ do_float_check_status(env, GETPC());
+}
diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c
new file mode 100644
index 000000000..105c2f7dd
--- /dev/null
+++ b/target/ppc/gdbstub.c
@@ -0,0 +1,628 @@
+/*
+ * PowerPC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "internal.h"
+
+static int ppc_gdb_register_len_apple(int n)
+{
+ switch (n) {
+ case 0 ... 31:
+ /* gprs */
+ return 8;
+ case 32 ... 63:
+ /* fprs */
+ return 8;
+ case 64 ... 95:
+ return 16;
+ case 64 + 32: /* nip */
+ case 65 + 32: /* msr */
+ case 67 + 32: /* lr */
+ case 68 + 32: /* ctr */
+ case 70 + 32: /* fpscr */
+ return 8;
+ case 66 + 32: /* cr */
+ case 69 + 32: /* xer */
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static int ppc_gdb_register_len(int n)
+{
+ switch (n) {
+ case 0 ... 31:
+ /* gprs */
+ return sizeof(target_ulong);
+ case 32 ... 63:
+ /* fprs */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return 8;
+ case 66:
+ /* cr */
+ case 69:
+ /* xer */
+ return 4;
+ case 64:
+ /* nip */
+ case 65:
+ /* msr */
+ case 67:
+ /* lr */
+ case 68:
+ /* ctr */
+ return sizeof(target_ulong);
+ case 70:
+ /* fpscr */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return sizeof(target_ulong);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * We need to present the registers to gdb in the "current" memory
+ * ordering. For user-only mode we get this for free;
+ * TARGET_WORDS_BIGENDIAN is set to the proper ordering for the
+ * binary, and cannot be changed. For system mode,
+ * TARGET_WORDS_BIGENDIAN is always set, and we must check the current
+ * mode of the chip to see if we're running in little-endian.
+ */
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
+{
+#ifndef CONFIG_USER_ONLY
+ if (!msr_le) {
+ /* do nothing */
+ } else if (len == 4) {
+ bswap32s((uint32_t *)mem_buf);
+ } else if (len == 8) {
+ bswap64s((uint64_t *)mem_buf);
+ } else if (len == 16) {
+ bswap128s((Int128 *)mem_buf);
+ } else {
+ g_assert_not_reached();
+ }
+#endif
+}
+
+/*
+ * Old gdb always expects FP registers. Newer (xml-aware) gdb only
+ * expects whatever the target description contains. Due to a
+ * historical mishap the FP registers appear in between core integer
+ * regs and PC, MSR, CR, and so forth. We hack round this by giving
+ * the FP regs zero size when talking to a newer gdb.
+ */
+
+int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ uint8_t *mem_buf;
+ int r = ppc_gdb_register_len(n);
+
+ if (!r) {
+ return r;
+ }
+
+ if (n < 32) {
+ /* gprs */
+ gdb_get_regl(buf, env->gpr[n]);
+ } else if (n < 64) {
+ /* fprs */
+ gdb_get_reg64(buf, *cpu_fpr_ptr(env, n - 32));
+ } else {
+ switch (n) {
+ case 64:
+ gdb_get_regl(buf, env->nip);
+ break;
+ case 65:
+ gdb_get_regl(buf, env->msr);
+ break;
+ case 66:
+ {
+ uint32_t cr = 0;
+ int i;
+ for (i = 0; i < 8; i++) {
+ cr |= env->crf[i] << (32 - ((i + 1) * 4));
+ }
+ gdb_get_reg32(buf, cr);
+ break;
+ }
+ case 67:
+ gdb_get_regl(buf, env->lr);
+ break;
+ case 68:
+ gdb_get_regl(buf, env->ctr);
+ break;
+ case 69:
+ gdb_get_reg32(buf, cpu_read_xer(env));
+ break;
+ case 70:
+ gdb_get_reg32(buf, env->fpscr);
+ break;
+ }
+ }
+ mem_buf = buf->data + buf->len - r;
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ return r;
+}
+
+int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ uint8_t *mem_buf;
+ int r = ppc_gdb_register_len_apple(n);
+
+ if (!r) {
+ return r;
+ }
+
+ if (n < 32) {
+ /* gprs */
+ gdb_get_reg64(buf, env->gpr[n]);
+ } else if (n < 64) {
+ /* fprs */
+ gdb_get_reg64(buf, *cpu_fpr_ptr(env, n - 32));
+ } else if (n < 96) {
+ /* Altivec */
+ gdb_get_reg64(buf, n - 64);
+ gdb_get_reg64(buf, 0);
+ } else {
+ switch (n) {
+ case 64 + 32:
+ gdb_get_reg64(buf, env->nip);
+ break;
+ case 65 + 32:
+ gdb_get_reg64(buf, env->msr);
+ break;
+ case 66 + 32:
+ {
+ uint32_t cr = 0;
+ int i;
+ for (i = 0; i < 8; i++) {
+ cr |= env->crf[i] << (32 - ((i + 1) * 4));
+ }
+ gdb_get_reg32(buf, cr);
+ break;
+ }
+ case 67 + 32:
+ gdb_get_reg64(buf, env->lr);
+ break;
+ case 68 + 32:
+ gdb_get_reg64(buf, env->ctr);
+ break;
+ case 69 + 32:
+ gdb_get_reg32(buf, cpu_read_xer(env));
+ break;
+ case 70 + 32:
+ gdb_get_reg64(buf, env->fpscr);
+ break;
+ }
+ }
+ mem_buf = buf->data + buf->len - r;
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ return r;
+}
+
+int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r = ppc_gdb_register_len(n);
+
+ if (!r) {
+ return r;
+ }
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ if (n < 32) {
+ /* gprs */
+ env->gpr[n] = ldtul_p(mem_buf);
+ } else if (n < 64) {
+ /* fprs */
+ *cpu_fpr_ptr(env, n - 32) = ldq_p(mem_buf);
+ } else {
+ switch (n) {
+ case 64:
+ env->nip = ldtul_p(mem_buf);
+ break;
+ case 65:
+ ppc_store_msr(env, ldtul_p(mem_buf));
+ break;
+ case 66:
+ {
+ uint32_t cr = ldl_p(mem_buf);
+ int i;
+ for (i = 0; i < 8; i++) {
+ env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
+ }
+ break;
+ }
+ case 67:
+ env->lr = ldtul_p(mem_buf);
+ break;
+ case 68:
+ env->ctr = ldtul_p(mem_buf);
+ break;
+ case 69:
+ cpu_write_xer(env, ldl_p(mem_buf));
+ break;
+ case 70:
+ /* fpscr */
+ ppc_store_fpscr(env, ldtul_p(mem_buf));
+ break;
+ }
+ }
+ return r;
+}
+int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r = ppc_gdb_register_len_apple(n);
+
+ if (!r) {
+ return r;
+ }
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ if (n < 32) {
+ /* gprs */
+ env->gpr[n] = ldq_p(mem_buf);
+ } else if (n < 64) {
+ /* fprs */
+ *cpu_fpr_ptr(env, n - 32) = ldq_p(mem_buf);
+ } else {
+ switch (n) {
+ case 64 + 32:
+ env->nip = ldq_p(mem_buf);
+ break;
+ case 65 + 32:
+ ppc_store_msr(env, ldq_p(mem_buf));
+ break;
+ case 66 + 32:
+ {
+ uint32_t cr = ldl_p(mem_buf);
+ int i;
+ for (i = 0; i < 8; i++) {
+ env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
+ }
+ break;
+ }
+ case 67 + 32:
+ env->lr = ldq_p(mem_buf);
+ break;
+ case 68 + 32:
+ env->ctr = ldq_p(mem_buf);
+ break;
+ case 69 + 32:
+ cpu_write_xer(env, ldl_p(mem_buf));
+ break;
+ case 70 + 32:
+ /* fpscr */
+ ppc_store_fpscr(env, ldq_p(mem_buf));
+ break;
+ }
+ }
+ return r;
+}
+
+#ifndef CONFIG_USER_ONLY
+void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ GString *xml;
+ char *spr_name;
+ unsigned int num_regs = 0;
+ int i;
+
+ if (pcc->gdb_spr_xml) {
+ return;
+ }
+
+ xml = g_string_new("<?xml version=\"1.0\"?>");
+ g_string_append(xml, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
+ g_string_append(xml, "<feature name=\"org.qemu.power.spr\">");
+
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (!spr->name) {
+ continue;
+ }
+
+ spr_name = g_ascii_strdown(spr->name, -1);
+ g_string_append_printf(xml, "<reg name=\"%s\"", spr_name);
+ g_free(spr_name);
+
+ g_string_append_printf(xml, " bitsize=\"%d\"", TARGET_LONG_BITS);
+ g_string_append(xml, " group=\"spr\"/>");
+
+ /*
+ * GDB identifies registers based on the order they are
+ * presented in the XML. These ids will not match QEMU's
+ * representation (which follows the PowerISA).
+ *
+ * Store the position of the current register description so
+ * we can make the correspondence later.
+ */
+ spr->gdb_id = num_regs;
+ num_regs++;
+ }
+
+ g_string_append(xml, "</feature>");
+
+ pcc->gdb_num_sprs = num_regs;
+ pcc->gdb_spr_xml = g_string_free(xml, false);
+}
+
+const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+
+ if (strcmp(xml_name, "power-spr.xml") == 0) {
+ return pcc->gdb_spr_xml;
+ }
+ return NULL;
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+static int gdb_find_spr_idx(CPUPPCState *env, int n)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (spr->name && spr->gdb_id == n) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n)
+{
+ int reg;
+ int len;
+
+ reg = gdb_find_spr_idx(env, n);
+ if (reg < 0) {
+ return 0;
+ }
+
+ len = TARGET_LONG_SIZE;
+ gdb_get_regl(buf, env->spr[reg]);
+ ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len);
+ return len;
+}
+
+static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ int reg;
+ int len;
+
+ reg = gdb_find_spr_idx(env, n);
+ if (reg < 0) {
+ return 0;
+ }
+
+ len = TARGET_LONG_SIZE;
+ ppc_maybe_bswap_register(env, mem_buf, len);
+ env->spr[reg] = ldn_p(mem_buf, len);
+
+ return len;
+}
+#endif
+
+static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n)
+{
+ uint8_t *mem_buf;
+ if (n < 32) {
+ gdb_get_reg64(buf, *cpu_fpr_ptr(env, n));
+ mem_buf = gdb_get_reg_ptr(buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ return 8;
+ }
+ if (n == 32) {
+ gdb_get_reg32(buf, env->fpscr);
+ mem_buf = gdb_get_reg_ptr(buf, 4);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ *cpu_fpr_ptr(env, n) = ldq_p(mem_buf);
+ return 8;
+ }
+ if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ ppc_store_fpscr(env, ldl_p(mem_buf));
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n)
+{
+ uint8_t *mem_buf;
+
+ if (n < 32) {
+ ppc_avr_t *avr = cpu_avr_ptr(env, n);
+ gdb_get_reg128(buf, avr->VsrD(0), avr->VsrD(1));
+ mem_buf = gdb_get_reg_ptr(buf, 16);
+ ppc_maybe_bswap_register(env, mem_buf, 16);
+ return 16;
+ }
+ if (n == 32) {
+ gdb_get_reg32(buf, ppc_get_vscr(env));
+ mem_buf = gdb_get_reg_ptr(buf, 4);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ if (n == 33) {
+ gdb_get_reg32(buf, (uint32_t)env->spr[SPR_VRSAVE]);
+ mem_buf = gdb_get_reg_ptr(buf, 4);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_avr_t *avr = cpu_avr_ptr(env, n);
+ ppc_maybe_bswap_register(env, mem_buf, 16);
+ avr->VsrD(0) = ldq_p(mem_buf);
+ avr->VsrD(1) = ldq_p(mem_buf + 8);
+ return 16;
+ }
+ if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ ppc_store_vscr(env, ldl_p(mem_buf));
+ return 4;
+ }
+ if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n)
+{
+ if (n < 32) {
+#if defined(TARGET_PPC64)
+ gdb_get_reg32(buf, env->gpr[n] >> 32);
+ ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4);
+#else
+ gdb_get_reg32(buf, env->gprh[n]);
+#endif
+ return 4;
+ }
+ if (n == 32) {
+ gdb_get_reg64(buf, env->spe_acc);
+ ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8);
+ return 8;
+ }
+ if (n == 33) {
+ gdb_get_reg32(buf, env->spe_fscr);
+ ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 4), 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#if defined(TARGET_PPC64)
+ target_ulong lo = (uint32_t)env->gpr[n];
+ target_ulong hi;
+
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+
+ hi = (target_ulong)ldl_p(mem_buf) << 32;
+ env->gpr[n] = lo | hi;
+#else
+ env->gprh[n] = ldl_p(mem_buf);
+#endif
+ return 4;
+ }
+ if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ env->spe_acc = ldq_p(mem_buf);
+ return 8;
+ }
+ if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ env->spe_fscr = ldl_p(mem_buf);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n)
+{
+ if (n < 32) {
+ gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n));
+ ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8);
+ return 8;
+ }
+ return 0;
+}
+
+static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ *cpu_vsrl_ptr(env, n) = ldq_p(mem_buf);
+ return 8;
+ }
+ return 0;
+}
+
+gchar *ppc_gdb_arch_name(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ return g_strdup("powerpc:common64");
+#else
+ return g_strdup("powerpc:common");
+#endif
+}
+
+void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *pcc)
+{
+ if (pcc->insns_flags & PPC_FLOAT) {
+ gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg,
+ 33, "power-fpu.xml", 0);
+ }
+ if (pcc->insns_flags & PPC_ALTIVEC) {
+ gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg,
+ 34, "power-altivec.xml", 0);
+ }
+ if (pcc->insns_flags & PPC_SPE) {
+ gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
+ 34, "power-spe.xml", 0);
+ }
+ if (pcc->insns_flags2 & PPC2_VSX) {
+ gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
+ 32, "power-vsx.xml", 0);
+ }
+#ifndef CONFIG_USER_ONLY
+ gdb_register_coprocessor(cs, gdb_get_spr_reg, gdb_set_spr_reg,
+ pcc->gdb_num_sprs, "power-spr.xml", 0);
+#endif
+}
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
new file mode 100644
index 000000000..627811cef
--- /dev/null
+++ b/target/ppc/helper.h
@@ -0,0 +1,776 @@
+DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, noreturn, env, i32, i32)
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32)
+DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32)
+#endif
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(store_msr, void, env, tl)
+DEF_HELPER_1(rfi, void, env)
+DEF_HELPER_1(rfsvc, void, env)
+DEF_HELPER_1(40x_rfci, void, env)
+DEF_HELPER_1(rfci, void, env)
+DEF_HELPER_1(rfdi, void, env)
+DEF_HELPER_1(rfmci, void, env)
+#if defined(TARGET_PPC64)
+DEF_HELPER_2(scv, noreturn, env, i32)
+DEF_HELPER_2(pminsn, void, env, i32)
+DEF_HELPER_1(rfid, void, env)
+DEF_HELPER_1(rfscv, void, env)
+DEF_HELPER_1(hrfid, void, env)
+DEF_HELPER_2(store_lpcr, void, env, tl)
+DEF_HELPER_2(store_pcr, void, env, tl)
+#endif
+DEF_HELPER_1(check_tlb_flush_local, void, env)
+DEF_HELPER_1(check_tlb_flush_global, void, env)
+#endif
+
+DEF_HELPER_3(lmw, void, env, tl, i32)
+DEF_HELPER_FLAGS_3(stmw, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_4(lsw, void, env, tl, i32, i32)
+DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_4(stsw, TCG_CALL_NO_WG, void, env, tl, i32, i32)
+DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_FLAGS_3(dcbzep, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_FLAGS_2(icbi, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(icbiep, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32)
+
+#if defined(TARGET_PPC64)
+DEF_HELPER_4(divdeu, i64, env, i64, i64, i32)
+DEF_HELPER_4(divde, i64, env, i64, i64, i32)
+#endif
+DEF_HELPER_4(divweu, tl, env, tl, tl, i32)
+DEF_HELPER_4(divwe, tl, env, tl, tl, i32)
+
+DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_3(sraw, tl, env, tl, tl)
+DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl)
+DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_3(srad, tl, env, tl, tl)
+DEF_HELPER_0(darn32, tl)
+DEF_HELPER_0(darn64, tl)
+#endif
+
+DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_2(brinc, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+
+DEF_HELPER_1(float_check_status, void, env)
+DEF_HELPER_1(reset_fpstatus, void, env)
+DEF_HELPER_2(compute_fprf_float64, void, env, i64)
+DEF_HELPER_3(store_fpscr, void, env, i64, i32)
+DEF_HELPER_2(fpscr_clrbit, void, env, i32)
+DEF_HELPER_2(fpscr_setbit, void, env, i32)
+DEF_HELPER_FLAGS_1(todouble, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_1(tosingle, TCG_CALL_NO_RWG_SE, i32, i64)
+
+DEF_HELPER_4(fcmpo, void, env, i64, i64, i32)
+DEF_HELPER_4(fcmpu, void, env, i64, i64, i32)
+
+DEF_HELPER_2(fctiw, i64, env, i64)
+DEF_HELPER_2(fctiwu, i64, env, i64)
+DEF_HELPER_2(fctiwz, i64, env, i64)
+DEF_HELPER_2(fctiwuz, i64, env, i64)
+DEF_HELPER_2(fcfid, i64, env, i64)
+DEF_HELPER_2(fcfidu, i64, env, i64)
+DEF_HELPER_2(fcfids, i64, env, i64)
+DEF_HELPER_2(fcfidus, i64, env, i64)
+DEF_HELPER_2(fctid, i64, env, i64)
+DEF_HELPER_2(fctidu, i64, env, i64)
+DEF_HELPER_2(fctidz, i64, env, i64)
+DEF_HELPER_2(fctiduz, i64, env, i64)
+DEF_HELPER_2(frsp, i64, env, i64)
+DEF_HELPER_2(frin, i64, env, i64)
+DEF_HELPER_2(friz, i64, env, i64)
+DEF_HELPER_2(frip, i64, env, i64)
+DEF_HELPER_2(frim, i64, env, i64)
+
+DEF_HELPER_3(fadd, f64, env, f64, f64)
+DEF_HELPER_3(fsub, f64, env, f64, f64)
+DEF_HELPER_3(fmul, f64, env, f64, f64)
+DEF_HELPER_3(fdiv, f64, env, f64, f64)
+DEF_HELPER_4(fmadd, i64, env, i64, i64, i64)
+DEF_HELPER_4(fmsub, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64)
+DEF_HELPER_2(fsqrt, f64, env, f64)
+DEF_HELPER_2(fre, i64, env, i64)
+DEF_HELPER_2(fres, i64, env, i64)
+DEF_HELPER_2(frsqrte, i64, env, i64)
+DEF_HELPER_4(fsel, i64, env, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64)
+
+#define dh_alias_avr ptr
+#define dh_ctype_avr ppc_avr_t *
+
+#define dh_alias_vsr ptr
+#define dh_ctype_vsr ppc_vsr_t *
+
+DEF_HELPER_3(vavgub, void, avr, avr, avr)
+DEF_HELPER_3(vavguh, void, avr, avr, avr)
+DEF_HELPER_3(vavguw, void, avr, avr, avr)
+DEF_HELPER_3(vabsdub, void, avr, avr, avr)
+DEF_HELPER_3(vabsduh, void, avr, avr, avr)
+DEF_HELPER_3(vabsduw, void, avr, avr, avr)
+DEF_HELPER_3(vavgsb, void, avr, avr, avr)
+DEF_HELPER_3(vavgsh, void, avr, avr, avr)
+DEF_HELPER_3(vavgsw, void, avr, avr, avr)
+DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr)
+DEF_HELPER_3(vmrglb, void, avr, avr, avr)
+DEF_HELPER_3(vmrglh, void, avr, avr, avr)
+DEF_HELPER_3(vmrglw, void, avr, avr, avr)
+DEF_HELPER_3(vmrghb, void, avr, avr, avr)
+DEF_HELPER_3(vmrghh, void, avr, avr, avr)
+DEF_HELPER_3(vmrghw, void, avr, avr, avr)
+DEF_HELPER_3(vmulesb, void, avr, avr, avr)
+DEF_HELPER_3(vmulesh, void, avr, avr, avr)
+DEF_HELPER_3(vmulesw, void, avr, avr, avr)
+DEF_HELPER_3(vmuleub, void, avr, avr, avr)
+DEF_HELPER_3(vmuleuh, void, avr, avr, avr)
+DEF_HELPER_3(vmuleuw, void, avr, avr, avr)
+DEF_HELPER_3(vmulosb, void, avr, avr, avr)
+DEF_HELPER_3(vmulosh, void, avr, avr, avr)
+DEF_HELPER_3(vmulosw, void, avr, avr, avr)
+DEF_HELPER_3(vmuloub, void, avr, avr, avr)
+DEF_HELPER_3(vmulouh, void, avr, avr, avr)
+DEF_HELPER_3(vmulouw, void, avr, avr, avr)
+DEF_HELPER_3(vmulhsw, void, avr, avr, avr)
+DEF_HELPER_3(vmulhuw, void, avr, avr, avr)
+DEF_HELPER_3(vmulhsd, void, avr, avr, avr)
+DEF_HELPER_3(vmulhud, void, avr, avr, avr)
+DEF_HELPER_3(vslo, void, avr, avr, avr)
+DEF_HELPER_3(vsro, void, avr, avr, avr)
+DEF_HELPER_3(vsrv, void, avr, avr, avr)
+DEF_HELPER_3(vslv, void, avr, avr, avr)
+DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
+DEF_HELPER_2(vprtybw, void, avr, avr)
+DEF_HELPER_2(vprtybd, void, avr, avr)
+DEF_HELPER_2(vprtybq, void, avr, avr)
+DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
+DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vsubsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vsubshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vsubsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vaddubs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vadduhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
+DEF_HELPER_3(vadduqm, void, avr, avr, avr)
+DEF_HELPER_4(vaddecuq, void, avr, avr, avr, avr)
+DEF_HELPER_4(vaddeuqm, void, avr, avr, avr, avr)
+DEF_HELPER_3(vaddcuq, void, avr, avr, avr)
+DEF_HELPER_3(vsubuqm, void, avr, avr, avr)
+DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr)
+DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr)
+DEF_HELPER_3(vsubcuq, void, avr, avr, avr)
+DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32)
+DEF_HELPER_3(vextractub, void, avr, avr, i32)
+DEF_HELPER_3(vextractuh, void, avr, avr, i32)
+DEF_HELPER_3(vextractuw, void, avr, avr, i32)
+DEF_HELPER_3(vextractd, void, avr, avr, i32)
+DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl)
+DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl)
+DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl)
+DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl)
+DEF_HELPER_2(vextsb2w, void, avr, avr)
+DEF_HELPER_2(vextsh2w, void, avr, avr)
+DEF_HELPER_2(vextsb2d, void, avr, avr)
+DEF_HELPER_2(vextsh2d, void, avr, avr)
+DEF_HELPER_2(vextsw2d, void, avr, avr)
+DEF_HELPER_2(vnegw, void, avr, avr)
+DEF_HELPER_2(vnegd, void, avr, avr)
+DEF_HELPER_2(vupkhpx, void, avr, avr)
+DEF_HELPER_2(vupklpx, void, avr, avr)
+DEF_HELPER_2(vupkhsb, void, avr, avr)
+DEF_HELPER_2(vupkhsh, void, avr, avr)
+DEF_HELPER_2(vupkhsw, void, avr, avr)
+DEF_HELPER_2(vupklsb, void, avr, avr)
+DEF_HELPER_2(vupklsh, void, avr, avr)
+DEF_HELPER_2(vupklsw, void, avr, avr)
+DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
+DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkswus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpksdss, void, env, avr, avr, avr)
+DEF_HELPER_4(vpksdus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuhus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuwus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkudus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkudum, void, env, avr, avr, avr)
+DEF_HELPER_3(vpkpx, void, avr, avr, avr)
+DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr)
+DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_2(mtvscr, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_1(mfvscr, TCG_CALL_NO_RWG, i32, env)
+DEF_HELPER_3(lvebx, void, env, avr, tl)
+DEF_HELPER_3(lvehx, void, env, avr, tl)
+DEF_HELPER_3(lvewx, void, env, avr, tl)
+DEF_HELPER_3(stvebx, void, env, avr, tl)
+DEF_HELPER_3(stvehx, void, env, avr, tl)
+DEF_HELPER_3(stvewx, void, env, avr, tl)
+#if defined(TARGET_PPC64)
+DEF_HELPER_4(lxvl, void, env, tl, vsr, tl)
+DEF_HELPER_4(lxvll, void, env, tl, vsr, tl)
+DEF_HELPER_4(stxvl, void, env, tl, vsr, tl)
+DEF_HELPER_4(stxvll, void, env, tl, vsr, tl)
+#endif
+DEF_HELPER_4(vsumsws, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum4sbs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum4shs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum4ubs, void, env, avr, avr, avr)
+DEF_HELPER_4(vaddfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vminfp, void, env, avr, avr, avr)
+DEF_HELPER_3(vrefp, void, env, avr, avr)
+DEF_HELPER_3(vrsqrtefp, void, env, avr, avr)
+DEF_HELPER_3(vrlwmi, void, avr, avr, avr)
+DEF_HELPER_3(vrldmi, void, avr, avr, avr)
+DEF_HELPER_3(vrldnm, void, avr, avr, avr)
+DEF_HELPER_3(vrlwnm, void, avr, avr, avr)
+DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr)
+DEF_HELPER_3(vexptefp, void, env, avr, avr)
+DEF_HELPER_3(vlogefp, void, env, avr, avr)
+DEF_HELPER_3(vrfim, void, env, avr, avr)
+DEF_HELPER_3(vrfin, void, env, avr, avr)
+DEF_HELPER_3(vrfip, void, env, avr, avr)
+DEF_HELPER_3(vrfiz, void, env, avr, avr)
+DEF_HELPER_4(vcfux, void, env, avr, avr, i32)
+DEF_HELPER_4(vcfsx, void, env, avr, avr, i32)
+DEF_HELPER_4(vctuxs, void, env, avr, avr, i32)
+DEF_HELPER_4(vctsxs, void, env, avr, avr, i32)
+
+DEF_HELPER_2(vclzb, void, avr, avr)
+DEF_HELPER_2(vclzh, void, avr, avr)
+DEF_HELPER_2(vctzb, void, avr, avr)
+DEF_HELPER_2(vctzh, void, avr, avr)
+DEF_HELPER_2(vctzw, void, avr, avr)
+DEF_HELPER_2(vctzd, void, avr, avr)
+DEF_HELPER_2(vpopcntb, void, avr, avr)
+DEF_HELPER_2(vpopcnth, void, avr, avr)
+DEF_HELPER_2(vpopcntw, void, avr, avr)
+DEF_HELPER_2(vpopcntd, void, avr, avr)
+DEF_HELPER_1(vclzlsbb, tl, avr)
+DEF_HELPER_1(vctzlsbb, tl, avr)
+DEF_HELPER_3(vbpermd, void, avr, avr, avr)
+DEF_HELPER_3(vbpermq, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumh, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumw, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumd, void, avr, avr, avr)
+DEF_HELPER_2(vextublx, tl, tl, avr)
+DEF_HELPER_2(vextuhlx, tl, tl, avr)
+DEF_HELPER_2(vextuwlx, tl, tl, avr)
+DEF_HELPER_2(vextubrx, tl, tl, avr)
+DEF_HELPER_2(vextuhrx, tl, tl, avr)
+DEF_HELPER_2(vextuwrx, tl, tl, avr)
+DEF_HELPER_5(VEXTDUBVLX, void, env, avr, avr, avr, tl)
+DEF_HELPER_5(VEXTDUHVLX, void, env, avr, avr, avr, tl)
+DEF_HELPER_5(VEXTDUWVLX, void, env, avr, avr, avr, tl)
+DEF_HELPER_5(VEXTDDVLX, void, env, avr, avr, avr, tl)
+
+DEF_HELPER_2(vsbox, void, avr, avr)
+DEF_HELPER_3(vcipher, void, avr, avr, avr)
+DEF_HELPER_3(vcipherlast, void, avr, avr, avr)
+DEF_HELPER_3(vncipher, void, avr, avr, avr)
+DEF_HELPER_3(vncipherlast, void, avr, avr, avr)
+DEF_HELPER_3(vshasigmaw, void, avr, avr, i32)
+DEF_HELPER_3(vshasigmad, void, avr, avr, i32)
+DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr)
+
+DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32)
+DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32)
+DEF_HELPER_3(bcdcfn, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctn, i32, avr, avr, i32)
+DEF_HELPER_3(bcdcfz, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctz, i32, avr, avr, i32)
+DEF_HELPER_3(bcdcfsq, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctsq, i32, avr, avr, i32)
+DEF_HELPER_4(bcdcpsgn, i32, avr, avr, avr, i32)
+DEF_HELPER_3(bcdsetsgn, i32, avr, avr, i32)
+DEF_HELPER_4(bcds, i32, avr, avr, avr, i32)
+DEF_HELPER_4(bcdus, i32, avr, avr, avr, i32)
+DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32)
+DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32)
+DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32)
+
+DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(xssubdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsmulqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(xsdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsdivqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_3(xsredp, void, env, vsr, vsr)
+DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
+DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr)
+DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr)
+DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpudp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
+DEF_HELPER_2(xscvdpspn, i64, env, i64)
+DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvspdp, void, env, vsr, vsr)
+DEF_HELPER_2(xscvspdpn, i64, env, i64)
+DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xscvsxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xststdcsp, void, env, i32, vsr)
+DEF_HELPER_2(xststdcdp, void, env, i32)
+DEF_HELPER_2(xststdcqp, void, env, i32)
+DEF_HELPER_3(xsrdpi, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpic, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpim, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpip, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpiz, void, env, vsr, vsr)
+DEF_HELPER_4(xsrqpi, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xsrqpxp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xssqrtqp, void, env, i32, vsr, vsr)
+DEF_HELPER_5(xssubqp, void, env, i32, vsr, vsr, vsr)
+
+DEF_HELPER_4(xsaddsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xssubsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmulsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xsresp, void, env, vsr, vsr)
+DEF_HELPER_2(xsrsp, i64, env, i64)
+DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
+DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr)
+DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr)
+
+DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvredp, void, env, vsr, vsr)
+DEF_HELPER_3(xvsqrtdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr)
+DEF_HELPER_4(xvtdivdp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xvtsqrtdp, void, env, i32, vsr)
+DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgtdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpnedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvcvdpsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxwdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxwdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpi, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpic, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpim, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpip, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpiz, void, env, vsr, vsr)
+
+DEF_HELPER_4(xvaddsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvsubsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmulsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvresp, void, env, vsr, vsr)
+DEF_HELPER_3(xvsqrtsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr)
+DEF_HELPER_4(xvtdivsp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xvtsqrtsp, void, env, i32, vsr)
+DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgtsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr)
+DEF_HELPER_2(xvtstdcsp, void, env, i32)
+DEF_HELPER_2(xvtstdcdp, void, env, i32)
+DEF_HELPER_3(xvrspi, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspic, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspim, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspip, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspiz, void, env, vsr, vsr)
+DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
+DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
+DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
+DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32)
+
+DEF_HELPER_2(efscfsi, i32, env, i32)
+DEF_HELPER_2(efscfui, i32, env, i32)
+DEF_HELPER_2(efscfuf, i32, env, i32)
+DEF_HELPER_2(efscfsf, i32, env, i32)
+DEF_HELPER_2(efsctsi, i32, env, i32)
+DEF_HELPER_2(efsctui, i32, env, i32)
+DEF_HELPER_2(efsctsiz, i32, env, i32)
+DEF_HELPER_2(efsctuiz, i32, env, i32)
+DEF_HELPER_2(efsctsf, i32, env, i32)
+DEF_HELPER_2(efsctuf, i32, env, i32)
+DEF_HELPER_2(evfscfsi, i64, env, i64)
+DEF_HELPER_2(evfscfui, i64, env, i64)
+DEF_HELPER_2(evfscfuf, i64, env, i64)
+DEF_HELPER_2(evfscfsf, i64, env, i64)
+DEF_HELPER_2(evfsctsi, i64, env, i64)
+DEF_HELPER_2(evfsctui, i64, env, i64)
+DEF_HELPER_2(evfsctsiz, i64, env, i64)
+DEF_HELPER_2(evfsctuiz, i64, env, i64)
+DEF_HELPER_2(evfsctsf, i64, env, i64)
+DEF_HELPER_2(evfsctuf, i64, env, i64)
+DEF_HELPER_3(efsadd, i32, env, i32, i32)
+DEF_HELPER_3(efssub, i32, env, i32, i32)
+DEF_HELPER_3(efsmul, i32, env, i32, i32)
+DEF_HELPER_3(efsdiv, i32, env, i32, i32)
+DEF_HELPER_3(evfsadd, i64, env, i64, i64)
+DEF_HELPER_3(evfssub, i64, env, i64, i64)
+DEF_HELPER_3(evfsmul, i64, env, i64, i64)
+DEF_HELPER_3(evfsdiv, i64, env, i64, i64)
+DEF_HELPER_3(efststlt, i32, env, i32, i32)
+DEF_HELPER_3(efststgt, i32, env, i32, i32)
+DEF_HELPER_3(efststeq, i32, env, i32, i32)
+DEF_HELPER_3(efscmplt, i32, env, i32, i32)
+DEF_HELPER_3(efscmpgt, i32, env, i32, i32)
+DEF_HELPER_3(efscmpeq, i32, env, i32, i32)
+DEF_HELPER_3(evfststlt, i32, env, i64, i64)
+DEF_HELPER_3(evfststgt, i32, env, i64, i64)
+DEF_HELPER_3(evfststeq, i32, env, i64, i64)
+DEF_HELPER_3(evfscmplt, i32, env, i64, i64)
+DEF_HELPER_3(evfscmpgt, i32, env, i64, i64)
+DEF_HELPER_3(evfscmpeq, i32, env, i64, i64)
+DEF_HELPER_2(efdcfsi, i64, env, i32)
+DEF_HELPER_2(efdcfsid, i64, env, i64)
+DEF_HELPER_2(efdcfui, i64, env, i32)
+DEF_HELPER_2(efdcfuid, i64, env, i64)
+DEF_HELPER_2(efdctsi, i32, env, i64)
+DEF_HELPER_2(efdctui, i32, env, i64)
+DEF_HELPER_2(efdctsiz, i32, env, i64)
+DEF_HELPER_2(efdctsidz, i64, env, i64)
+DEF_HELPER_2(efdctuiz, i32, env, i64)
+DEF_HELPER_2(efdctuidz, i64, env, i64)
+DEF_HELPER_2(efdcfsf, i64, env, i32)
+DEF_HELPER_2(efdcfuf, i64, env, i32)
+DEF_HELPER_2(efdctsf, i32, env, i64)
+DEF_HELPER_2(efdctuf, i32, env, i64)
+DEF_HELPER_2(efscfd, i32, env, i64)
+DEF_HELPER_2(efdcfs, i64, env, i32)
+DEF_HELPER_3(efdadd, i64, env, i64, i64)
+DEF_HELPER_3(efdsub, i64, env, i64, i64)
+DEF_HELPER_3(efdmul, i64, env, i64, i64)
+DEF_HELPER_3(efddiv, i64, env, i64, i64)
+DEF_HELPER_3(efdtstlt, i32, env, i64, i64)
+DEF_HELPER_3(efdtstgt, i32, env, i64, i64)
+DEF_HELPER_3(efdtsteq, i32, env, i64, i64)
+DEF_HELPER_3(efdcmplt, i32, env, i64, i64)
+DEF_HELPER_3(efdcmpgt, i32, env, i64, i64)
+DEF_HELPER_3(efdcmpeq, i32, env, i64, i64)
+
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(4xx_tlbre_hi, tl, env, tl)
+DEF_HELPER_2(4xx_tlbre_lo, tl, env, tl)
+DEF_HELPER_3(4xx_tlbwe_hi, void, env, tl, tl)
+DEF_HELPER_3(4xx_tlbwe_lo, void, env, tl, tl)
+DEF_HELPER_2(4xx_tlbsx, tl, env, tl)
+DEF_HELPER_3(440_tlbre, tl, env, i32, tl)
+DEF_HELPER_4(440_tlbwe, void, env, i32, tl, tl)
+DEF_HELPER_2(440_tlbsx, tl, env, tl)
+DEF_HELPER_1(booke206_tlbre, void, env)
+DEF_HELPER_1(booke206_tlbwe, void, env)
+DEF_HELPER_2(booke206_tlbsx, void, env, tl)
+DEF_HELPER_2(booke206_tlbivax, void, env, tl)
+DEF_HELPER_2(booke206_tlbilx0, void, env, tl)
+DEF_HELPER_2(booke206_tlbilx1, void, env, tl)
+DEF_HELPER_2(booke206_tlbilx3, void, env, tl)
+DEF_HELPER_2(booke206_tlbflush, void, env, tl)
+DEF_HELPER_3(booke_setpid, void, env, i32, tl)
+DEF_HELPER_2(booke_set_eplc, void, env, tl)
+DEF_HELPER_2(booke_set_epsc, void, env, tl)
+DEF_HELPER_2(6xx_tlbd, void, env, tl)
+DEF_HELPER_2(6xx_tlbi, void, env, tl)
+DEF_HELPER_2(74xx_tlbd, void, env, tl)
+DEF_HELPER_2(74xx_tlbi, void, env, tl)
+DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl)
+DEF_HELPER_2(load_slb_esid, tl, env, tl)
+DEF_HELPER_2(load_slb_vsid, tl, env, tl)
+DEF_HELPER_2(find_slb_vsid, tl, env, tl)
+DEF_HELPER_FLAGS_2(slbia, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl)
+#endif
+DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
+DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
+
+DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_1(msgsnd, void, tl)
+DEF_HELPER_2(msgclr, void, env, tl)
+DEF_HELPER_1(book3s_msgsnd, void, tl)
+DEF_HELPER_2(book3s_msgclr, void, env, tl)
+#endif
+
+DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32)
+DEF_HELPER_FLAGS_2(clcs, TCG_CALL_NO_RWG_SE, tl, env, i32)
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(rac, tl, env, tl)
+#endif
+DEF_HELPER_3(div, tl, env, tl, tl)
+DEF_HELPER_3(divo, tl, env, tl, tl)
+DEF_HELPER_3(divs, tl, env, tl, tl)
+DEF_HELPER_3(divso, tl, env, tl, tl)
+
+DEF_HELPER_2(load_dcr, tl, env, tl)
+DEF_HELPER_3(store_dcr, void, env, tl, tl)
+
+DEF_HELPER_2(load_dump_spr, void, env, i32)
+DEF_HELPER_2(store_dump_spr, void, env, i32)
+DEF_HELPER_4(fscr_facility_check, void, env, i32, i32, i32)
+DEF_HELPER_4(msr_facility_check, void, env, i32, i32, i32)
+DEF_HELPER_FLAGS_1(load_tbl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_vtb, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env)
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_purr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_ptcr, void, env, tl)
+DEF_HELPER_FLAGS_1(load_dpdes, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_dpdes, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(book3s_msgsndp, void, env, tl)
+DEF_HELPER_2(book3s_msgclrp, void, env, tl)
+#endif
+DEF_HELPER_2(store_sdr1, void, env, tl)
+DEF_HELPER_2(store_pidr, void, env, tl)
+DEF_HELPER_2(store_lpidr, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_vtb, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbu40, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_hid0_601, void, env, tl)
+DEF_HELPER_3(store_403_pbr, void, env, i32, tl)
+DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_40x_dbcr0, void, env, tl)
+DEF_HELPER_2(store_40x_sler, void, env, tl)
+DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_booke_tsr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_3(store_ibatl, void, env, i32, tl)
+DEF_HELPER_3(store_ibatu, void, env, i32, tl)
+DEF_HELPER_3(store_dbatl, void, env, i32, tl)
+DEF_HELPER_3(store_dbatu, void, env, i32, tl)
+DEF_HELPER_3(store_601_batl, void, env, i32, tl)
+DEF_HELPER_3(store_601_batu, void, env, i32, tl)
+#endif
+
+#define dh_alias_fprp ptr
+#define dh_ctype_fprp ppc_fprp_t *
+
+DEF_HELPER_4(DADD, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DADDQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DSUB, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DSUBQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DMUL, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DMULQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DDIV, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DDIVQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_3(DCMPO, i32, env, fprp, fprp)
+DEF_HELPER_3(DCMPOQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DCMPU, i32, env, fprp, fprp)
+DEF_HELPER_3(DCMPUQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTDC, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTDCQ, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTDG, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTDGQ, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTEX, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTEXQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTSF, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTSFQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTSFI, i32, env, i32, fprp)
+DEF_HELPER_3(DTSTSFIQ, i32, env, i32, fprp)
+DEF_HELPER_5(DQUAI, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DQUAIQ, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DQUA, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DQUAQ, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DRRND, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DRRNDQ, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DRINTX, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DRINTXQ, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DRINTN, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DRINTNQ, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_3(DCTDP, void, env, fprp, fprp)
+DEF_HELPER_3(DCTQPQ, void, env, fprp, fprp)
+DEF_HELPER_3(DRSP, void, env, fprp, fprp)
+DEF_HELPER_3(DRDPQ, void, env, fprp, fprp)
+DEF_HELPER_3(DCFFIX, void, env, fprp, fprp)
+DEF_HELPER_3(DCFFIXQ, void, env, fprp, fprp)
+DEF_HELPER_3(DCFFIXQQ, void, env, fprp, avr)
+DEF_HELPER_3(DCTFIX, void, env, fprp, fprp)
+DEF_HELPER_3(DCTFIXQ, void, env, fprp, fprp)
+DEF_HELPER_3(DCTFIXQQ, void, env, avr, fprp)
+DEF_HELPER_4(DDEDPD, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DDEDPDQ, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DENBCD, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DENBCDQ, void, env, fprp, fprp, i32)
+DEF_HELPER_3(DXEX, void, env, fprp, fprp)
+DEF_HELPER_3(DXEXQ, void, env, fprp, fprp)
+DEF_HELPER_4(DIEX, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DIEXQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DSCRI, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DSCRIQ, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DSCLI, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32)
+
+DEF_HELPER_1(tbegin, void, env)
+DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
+
+#ifdef TARGET_PPC64
+DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
+DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
+DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
+ void, env, tl, i64, i64, i32)
+DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
+ void, env, tl, i64, i64, i32)
+DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32)
+DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32)
+#endif
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
new file mode 100644
index 000000000..99562edd5
--- /dev/null
+++ b/target/ppc/helper_regs.c
@@ -0,0 +1,301 @@
+/*
+ * PowerPC emulation special registers manipulation helpers for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/main-loop.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "helper_regs.h"
+
+/* Swap temporary saved registers with GPRs */
+void hreg_swap_gpr_tgpr(CPUPPCState *env)
+{
+ target_ulong tmp;
+
+ tmp = env->gpr[0];
+ env->gpr[0] = env->tgpr[0];
+ env->tgpr[0] = tmp;
+ tmp = env->gpr[1];
+ env->gpr[1] = env->tgpr[1];
+ env->tgpr[1] = tmp;
+ tmp = env->gpr[2];
+ env->gpr[2] = env->tgpr[2];
+ env->tgpr[2] = tmp;
+ tmp = env->gpr[3];
+ env->gpr[3] = env->tgpr[3];
+ env->tgpr[3] = tmp;
+}
+
+static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
+{
+ target_ulong msr = env->msr;
+ uint32_t ppc_flags = env->flags;
+ uint32_t hflags = 0;
+ uint32_t msr_mask;
+
+ /* Some bits come straight across from MSR. */
+ QEMU_BUILD_BUG_ON(MSR_LE != HFLAGS_LE);
+ QEMU_BUILD_BUG_ON(MSR_PR != HFLAGS_PR);
+ QEMU_BUILD_BUG_ON(MSR_DR != HFLAGS_DR);
+ QEMU_BUILD_BUG_ON(MSR_FP != HFLAGS_FP);
+ msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) |
+ (1 << MSR_DR) | (1 << MSR_FP));
+
+ if (ppc_flags & POWERPC_FLAG_HID0_LE) {
+ /*
+ * Note that MSR_LE is not set in env->msr_mask for this cpu,
+ * and so will never be set in msr.
+ */
+ uint32_t le = extract32(env->spr[SPR_HID0], 3, 1);
+ hflags |= le << MSR_LE;
+ }
+
+ if (ppc_flags & POWERPC_FLAG_DE) {
+ target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
+ if (dbcr0 & DBCR0_ICMP) {
+ hflags |= 1 << HFLAGS_SE;
+ }
+ if (dbcr0 & DBCR0_BRT) {
+ hflags |= 1 << HFLAGS_BE;
+ }
+ } else {
+ if (ppc_flags & POWERPC_FLAG_BE) {
+ QEMU_BUILD_BUG_ON(MSR_BE != HFLAGS_BE);
+ msr_mask |= 1 << MSR_BE;
+ }
+ if (ppc_flags & POWERPC_FLAG_SE) {
+ QEMU_BUILD_BUG_ON(MSR_SE != HFLAGS_SE);
+ msr_mask |= 1 << MSR_SE;
+ }
+ }
+
+ if (msr_is_64bit(env, msr)) {
+ hflags |= 1 << HFLAGS_64;
+ }
+ if ((ppc_flags & POWERPC_FLAG_SPE) && (msr & (1 << MSR_SPE))) {
+ hflags |= 1 << HFLAGS_SPE;
+ }
+ if (ppc_flags & POWERPC_FLAG_VRE) {
+ QEMU_BUILD_BUG_ON(MSR_VR != HFLAGS_VR);
+ msr_mask |= 1 << MSR_VR;
+ }
+ if (ppc_flags & POWERPC_FLAG_VSX) {
+ QEMU_BUILD_BUG_ON(MSR_VSX != HFLAGS_VSX);
+ msr_mask |= 1 << MSR_VSX;
+ }
+ if ((ppc_flags & POWERPC_FLAG_TM) && (msr & (1ull << MSR_TM))) {
+ hflags |= 1 << HFLAGS_TM;
+ }
+ if (env->spr[SPR_LPCR] & LPCR_GTSE) {
+ hflags |= 1 << HFLAGS_GTSE;
+ }
+ if (env->spr[SPR_LPCR] & LPCR_HR) {
+ hflags |= 1 << HFLAGS_HR;
+ }
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
+ hflags |= 1 << HFLAGS_PMCC0;
+ }
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
+ hflags |= 1 << HFLAGS_PMCC1;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) {
+ hflags |= 1 << HFLAGS_HV;
+ }
+
+ /*
+ * This is our encoding for server processors. The architecture
+ * specifies that there is no such thing as userspace with
+ * translation off, however it appears that MacOS does it and some
+ * 32-bit CPUs support it. Weird...
+ *
+ * 0 = Guest User space virtual mode
+ * 1 = Guest Kernel space virtual mode
+ * 2 = Guest User space real mode
+ * 3 = Guest Kernel space real mode
+ * 4 = HV User space virtual mode
+ * 5 = HV Kernel space virtual mode
+ * 6 = HV User space real mode
+ * 7 = HV Kernel space real mode
+ *
+ * For BookE, we need 8 MMU modes as follow:
+ *
+ * 0 = AS 0 HV User space
+ * 1 = AS 0 HV Kernel space
+ * 2 = AS 1 HV User space
+ * 3 = AS 1 HV Kernel space
+ * 4 = AS 0 Guest User space
+ * 5 = AS 0 Guest Kernel space
+ * 6 = AS 1 Guest User space
+ * 7 = AS 1 Guest Kernel space
+ */
+ unsigned immu_idx, dmmu_idx;
+ dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1;
+ if (env->mmu_model & POWERPC_MMU_BOOKE) {
+ dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0;
+ immu_idx = dmmu_idx;
+ immu_idx |= msr & (1 << MSR_IS) ? 2 : 0;
+ dmmu_idx |= msr & (1 << MSR_DS) ? 2 : 0;
+ } else {
+ dmmu_idx |= msr & (1ull << MSR_HV) ? 4 : 0;
+ immu_idx = dmmu_idx;
+ immu_idx |= msr & (1 << MSR_IR) ? 0 : 2;
+ dmmu_idx |= msr & (1 << MSR_DR) ? 0 : 2;
+ }
+ hflags |= immu_idx << HFLAGS_IMMU_IDX;
+ hflags |= dmmu_idx << HFLAGS_DMMU_IDX;
+#endif
+
+ return hflags | (msr & msr_mask);
+}
+
+void hreg_compute_hflags(CPUPPCState *env)
+{
+ env->hflags = hreg_compute_hflags_value(env);
+}
+
+#ifdef CONFIG_DEBUG_TCG
+void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ uint32_t hflags_current = env->hflags;
+ uint32_t hflags_rebuilt;
+
+ *pc = env->nip;
+ *cs_base = 0;
+ *flags = hflags_current;
+
+ hflags_rebuilt = hreg_compute_hflags_value(env);
+ if (unlikely(hflags_current != hflags_rebuilt)) {
+ cpu_abort(env_cpu(env),
+ "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
+ hflags_current, hflags_rebuilt);
+ }
+}
+#endif
+
+void cpu_interrupt_exittb(CPUState *cs)
+{
+ if (!kvm_enabled()) {
+ return;
+ }
+
+ if (!qemu_mutex_iothread_locked()) {
+ qemu_mutex_lock_iothread();
+ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
+ qemu_mutex_unlock_iothread();
+ } else {
+ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
+ }
+}
+
+int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
+{
+ int excp;
+#if !defined(CONFIG_USER_ONLY)
+ CPUState *cs = env_cpu(env);
+#endif
+
+ excp = 0;
+ value &= env->msr_mask;
+#if !defined(CONFIG_USER_ONLY)
+ /* Neither mtmsr nor guest state can alter HV */
+ if (!alter_hv || !(env->msr & MSR_HVB)) {
+ value &= ~MSR_HVB;
+ value |= env->msr & MSR_HVB;
+ }
+ if (((value >> MSR_IR) & 1) != msr_ir ||
+ ((value >> MSR_DR) & 1) != msr_dr) {
+ cpu_interrupt_exittb(cs);
+ }
+ if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
+ ((value >> MSR_GS) & 1) != msr_gs) {
+ cpu_interrupt_exittb(cs);
+ }
+ if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
+ ((value ^ env->msr) & (1 << MSR_TGPR)))) {
+ /* Swap temporary saved registers with GPRs */
+ hreg_swap_gpr_tgpr(env);
+ }
+ if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
+ /* Change the exception prefix on PowerPC 601 */
+ env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
+ }
+ /*
+ * If PR=1 then EE, IR and DR must be 1
+ *
+ * Note: We only enforce this on 64-bit server processors.
+ * It appears that:
+ * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
+ * exploits it.
+ * - 64-bit embedded implementations do not need any operation to be
+ * performed when PR is set.
+ */
+ if (is_book3s_arch2x(env) && ((value >> MSR_PR) & 1)) {
+ value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
+ }
+#endif
+ env->msr = value;
+ hreg_compute_hflags(env);
+#if !defined(CONFIG_USER_ONLY)
+ if (unlikely(msr_pow == 1)) {
+ if (!env->pending_interrupts && (*env->check_pow)(env)) {
+ cs->halted = 1;
+ excp = EXCP_HALTED;
+ }
+ }
+#endif
+
+ return excp;
+}
+
+#ifdef CONFIG_SOFTMMU
+void store_40x_sler(CPUPPCState *env, uint32_t val)
+{
+ /* XXX: TO BE FIXED */
+ if (val != 0x00000000) {
+ cpu_abort(env_cpu(env),
+ "Little-endian regions are not supported by now\n");
+ }
+ env->spr[SPR_405_SLER] = val;
+}
+#endif /* CONFIG_SOFTMMU */
+
+#ifndef CONFIG_USER_ONLY
+void check_tlb_flush(CPUPPCState *env, bool global)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* Handle global flushes first */
+ if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
+ env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
+ env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ tlb_flush_all_cpus_synced(cs);
+ return;
+ }
+
+ /* Then handle local ones */
+ if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
+ env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ tlb_flush(cs);
+ }
+}
+#endif
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
new file mode 100644
index 000000000..42f26870b
--- /dev/null
+++ b/target/ppc/helper_regs.h
@@ -0,0 +1,34 @@
+/*
+ * PowerPC emulation special registers manipulation helpers for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HELPER_REGS_H
+#define HELPER_REGS_H
+
+void hreg_swap_gpr_tgpr(CPUPPCState *env);
+void hreg_compute_hflags(CPUPPCState *env);
+void cpu_interrupt_exittb(CPUState *cs);
+int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv);
+
+#ifdef CONFIG_USER_ONLY
+static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
+#else
+void check_tlb_flush(CPUPPCState *env, bool global);
+#endif
+
+#endif /* HELPER_REGS_H */
diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
new file mode 100644
index 000000000..e135b8aba
--- /dev/null
+++ b/target/ppc/insn32.decode
@@ -0,0 +1,429 @@
+#
+# Power ISA decode for 32-bit insns (opcode space 0)
+#
+# Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+&D rt ra si:int64_t
+@D ...... rt:5 ra:5 si:s16 &D
+
+&D_bf bf l:bool ra imm
+@D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf
+@D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf
+
+%dq_si 4:s12 !function=times_16
+%dq_rtp 22:4 !function=times_2
+@DQ_rtp ...... ....0 ra:5 ............ .... &D rt=%dq_rtp si=%dq_si
+
+%dq_rt_tsx 3:1 21:5
+@DQ_TSX ...... ..... ra:5 ............ .... &D si=%dq_si rt=%dq_rt_tsx
+
+%rt_tsxp 21:1 22:4 !function=times_2
+@DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp
+
+%ds_si 2:s14 !function=times_4
+@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si
+
+%ds_rtp 22:4 !function=times_2
+@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si
+
+&DX rt d
+%dx_d 6:s10 16:5 0:1
+@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d
+
+&VA vrt vra vrb rc
+@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA
+
+&VN vrt vra vrb sh
+@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN
+
+&VX vrt vra vrb
+@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX
+
+&VX_uim4 vrt uim vrb
+@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4
+
+&X rt ra rb
+@X ...... rt:5 ra:5 rb:5 .......... . &X
+
+&X_rc rt ra rb rc:bool
+@X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc
+
+%x_frtp 22:4 !function=times_2
+%x_frap 17:4 !function=times_2
+%x_frbp 12:4 !function=times_2
+@X_tp_ap_bp_rc ...... ....0 ....0 ....0 .......... rc:1 &X_rc rt=%x_frtp ra=%x_frap rb=%x_frbp
+
+@X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp
+
+&X_tb_rc rt rb rc:bool
+@X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc
+
+@X_tbp_rc ...... ....0 ..... ....0 .......... rc:1 &X_tb_rc rt=%x_frtp rb=%x_frbp
+
+@X_tp_b_rc ...... ....0 ..... rb:5 .......... rc:1 &X_tb_rc rt=%x_frtp
+
+@X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp
+
+&X_bi rt bi
+@X_bi ...... rt:5 bi:5 ----- .......... - &X_bi
+
+&X_bf bf ra rb
+@X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf
+
+@X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp
+
+@X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp
+
+&X_bf_uim bf uim rb
+@X_bf_uim ...... bf:3 . uim:6 rb:5 .......... . &X_bf_uim
+
+@X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp
+
+&X_bfl bf l:bool ra rb
+@X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl
+
+%x_xt 0:1 21:5
+&X_imm8 xt imm:uint8_t
+@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt
+
+&X_uim5 xt uim:uint8_t
+@X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt
+
+&X_tb_sp_rc rt rb sp rc:bool
+@X_tb_sp_rc ...... rt:5 sp:2 ... rb:5 .......... rc:1 &X_tb_sp_rc
+
+@X_tbp_sp_rc ...... ....0 sp:2 ... ....0 .......... rc:1 &X_tb_sp_rc rt=%x_frtp rb=%x_frbp
+
+&X_tb_s_rc rt rb s:bool rc:bool
+@X_tb_s_rc ...... rt:5 s:1 .... rb:5 .......... rc:1 &X_tb_s_rc
+
+@X_tbp_s_rc ...... ....0 s:1 .... ....0 .......... rc:1 &X_tb_s_rc rt=%x_frtp rb=%x_frbp
+
+%x_rt_tsx 0:1 21:5
+@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx
+@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp
+
+&X_frtp_vrb frtp vrb
+@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp
+
+&X_vrt_frbp vrt frbp
+@X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp
+
+&XX2 xt xb uim:uint8_t
+%xx2_xt 0:1 21:5
+%xx2_xb 1:1 11:5
+@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx2_xt xb=%xx2_xb
+
+&Z22_bf_fra bf fra dm
+@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra
+
+%z22_frap 17:4 !function=times_2
+@Z22_bf_frap ...... bf:3 .. ....0 dm:6 ......... . &Z22_bf_fra fra=%z22_frap
+
+&Z22_ta_sh_rc rt ra sh rc:bool
+@Z22_ta_sh_rc ...... rt:5 ra:5 sh:6 ......... rc:1 &Z22_ta_sh_rc
+
+%z22_frtp 22:4 !function=times_2
+@Z22_tap_sh_rc ...... ....0 ....0 sh:6 ......... rc:1 &Z22_ta_sh_rc rt=%z22_frtp ra=%z22_frap
+
+&Z23_tab frt fra frb rmc rc:bool
+@Z23_tab ...... frt:5 fra:5 frb:5 rmc:2 ........ rc:1 &Z23_tab
+
+%z23_frtp 22:4 !function=times_2
+%z23_frap 17:4 !function=times_2
+%z23_frbp 12:4 !function=times_2
+@Z23_tabp ...... ....0 ....0 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp fra=%z23_frap frb=%z23_frbp
+
+@Z23_tp_a_bp ...... ....0 fra:5 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp frb=%z23_frbp
+
+&Z23_tb frt frb r:bool rmc rc:bool
+@Z23_tb ...... frt:5 .... r:1 frb:5 rmc:2 ........ rc:1 &Z23_tb
+
+@Z23_tbp ...... ....0 .... r:1 ....0 rmc:2 ........ rc:1 &Z23_tb frt=%z23_frtp frb=%z23_frbp
+
+&Z23_te_tb te frt frb rmc rc:bool
+@Z23_te_tb ...... frt:5 te:5 frb:5 rmc:2 ........ rc:1 &Z23_te_tb
+
+@Z23_te_tbp ...... ....0 te:5 ....0 rmc:2 ........ rc:1 &Z23_te_tb frt=%z23_frtp frb=%z23_frbp
+
+### Fixed-Point Load Instructions
+
+LBZ 100010 ..... ..... ................ @D
+LBZU 100011 ..... ..... ................ @D
+LBZX 011111 ..... ..... ..... 0001010111 - @X
+LBZUX 011111 ..... ..... ..... 0001110111 - @X
+
+LHZ 101000 ..... ..... ................ @D
+LHZU 101001 ..... ..... ................ @D
+LHZX 011111 ..... ..... ..... 0100010111 - @X
+LHZUX 011111 ..... ..... ..... 0100110111 - @X
+
+LHA 101010 ..... ..... ................ @D
+LHAU 101011 ..... ..... ................ @D
+LHAX 011111 ..... ..... ..... 0101010111 - @X
+LHAXU 011111 ..... ..... ..... 0101110111 - @X
+
+LWZ 100000 ..... ..... ................ @D
+LWZU 100001 ..... ..... ................ @D
+LWZX 011111 ..... ..... ..... 0000010111 - @X
+LWZUX 011111 ..... ..... ..... 0000110111 - @X
+
+LWA 111010 ..... ..... ..............10 @DS
+LWAX 011111 ..... ..... ..... 0101010101 - @X
+LWAUX 011111 ..... ..... ..... 0101110101 - @X
+
+LD 111010 ..... ..... ..............00 @DS
+LDU 111010 ..... ..... ..............01 @DS
+LDX 011111 ..... ..... ..... 0000010101 - @X
+LDUX 011111 ..... ..... ..... 0000110101 - @X
+
+LQ 111000 ..... ..... ............ ---- @DQ_rtp
+
+### Fixed-Point Store Instructions
+
+STB 100110 ..... ..... ................ @D
+STBU 100111 ..... ..... ................ @D
+STBX 011111 ..... ..... ..... 0011010111 - @X
+STBUX 011111 ..... ..... ..... 0011110111 - @X
+
+STH 101100 ..... ..... ................ @D
+STHU 101101 ..... ..... ................ @D
+STHX 011111 ..... ..... ..... 0110010111 - @X
+STHUX 011111 ..... ..... ..... 0110110111 - @X
+
+STW 100100 ..... ..... ................ @D
+STWU 100101 ..... ..... ................ @D
+STWX 011111 ..... ..... ..... 0010010111 - @X
+STWUX 011111 ..... ..... ..... 0010110111 - @X
+
+STD 111110 ..... ..... ..............00 @DS
+STDU 111110 ..... ..... ..............01 @DS
+STDX 011111 ..... ..... ..... 0010010101 - @X
+STDUX 011111 ..... ..... ..... 0010110101 - @X
+
+STQ 111110 ..... ..... ..............10 @DS_rtp
+
+### Fixed-Point Compare Instructions
+
+CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl
+CMPL 011111 ... - . ..... ..... 0000100000 - @X_bfl
+CMPI 001011 ... - . ..... ................ @D_bfs
+CMPLI 001010 ... - . ..... ................ @D_bfu
+
+### Fixed-Point Arithmetic Instructions
+
+ADDI 001110 ..... ..... ................ @D
+ADDIS 001111 ..... ..... ................ @D
+
+ADDPCIS 010011 ..... ..... .......... 00010 . @DX
+
+## Fixed-Point Logical Instructions
+
+CFUGED 011111 ..... ..... ..... 0011011100 - @X
+CNTLZDM 011111 ..... ..... ..... 0000111011 - @X
+CNTTZDM 011111 ..... ..... ..... 1000111011 - @X
+PDEPD 011111 ..... ..... ..... 0010011100 - @X
+PEXTD 011111 ..... ..... ..... 0010111100 - @X
+
+### Float-Point Load Instructions
+
+LFS 110000 ..... ..... ................ @D
+LFSU 110001 ..... ..... ................ @D
+LFSX 011111 ..... ..... ..... 1000010111 - @X
+LFSUX 011111 ..... ..... ..... 1000110111 - @X
+
+LFD 110010 ..... ..... ................ @D
+LFDU 110011 ..... ..... ................ @D
+LFDX 011111 ..... ..... ..... 1001010111 - @X
+LFDUX 011111 ..... ..... ..... 1001110111 - @X
+
+### Float-Point Store Instructions
+
+STFS 110100 ..... ...... ............... @D
+STFSU 110101 ..... ...... ............... @D
+STFSX 011111 ..... ...... .... 1010010111 - @X
+STFSUX 011111 ..... ...... .... 1010110111 - @X
+
+STFD 110110 ..... ...... ............... @D
+STFDU 110111 ..... ...... ............... @D
+STFDX 011111 ..... ...... .... 1011010111 - @X
+STFDUX 011111 ..... ...... .... 1011110111 - @X
+
+### Move To/From System Register Instructions
+
+SETBC 011111 ..... ..... ----- 0110000000 - @X_bi
+SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi
+SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi
+SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi
+
+### Decimal Floating-Point Arithmetic Instructions
+
+DADD 111011 ..... ..... ..... 0000000010 . @X_rc
+DADDQ 111111 ..... ..... ..... 0000000010 . @X_tp_ap_bp_rc
+
+DSUB 111011 ..... ..... ..... 1000000010 . @X_rc
+DSUBQ 111111 ..... ..... ..... 1000000010 . @X_tp_ap_bp_rc
+
+DMUL 111011 ..... ..... ..... 0000100010 . @X_rc
+DMULQ 111111 ..... ..... ..... 0000100010 . @X_tp_ap_bp_rc
+
+DDIV 111011 ..... ..... ..... 1000100010 . @X_rc
+DDIVQ 111111 ..... ..... ..... 1000100010 . @X_tp_ap_bp_rc
+
+### Decimal Floating-Point Compare Instructions
+
+DCMPU 111011 ... -- ..... ..... 1010000010 - @X_bf
+DCMPUQ 111111 ... -- ..... ..... 1010000010 - @X_bf_ap_bp
+
+DCMPO 111011 ... -- ..... ..... 0010000010 - @X_bf
+DCMPOQ 111111 ... -- ..... ..... 0010000010 - @X_bf_ap_bp
+
+### Decimal Floating-Point Test Instructions
+
+DTSTDC 111011 ... -- ..... ...... 011000010 - @Z22_bf_fra
+DTSTDCQ 111111 ... -- ..... ...... 011000010 - @Z22_bf_frap
+
+DTSTDG 111011 ... -- ..... ...... 011100010 - @Z22_bf_fra
+DTSTDGQ 111111 ... -- ..... ...... 011100010 - @Z22_bf_frap
+
+DTSTEX 111011 ... -- ..... ..... 0010100010 - @X_bf
+DTSTEXQ 111111 ... -- ..... ..... 0010100010 - @X_bf_ap_bp
+
+DTSTSF 111011 ... -- ..... ..... 1010100010 - @X_bf
+DTSTSFQ 111111 ... -- ..... ..... 1010100010 - @X_bf_a_bp
+
+DTSTSFI 111011 ... - ...... ..... 1010100011 - @X_bf_uim
+DTSTSFIQ 111111 ... - ...... ..... 1010100011 - @X_bf_uim_bp
+
+### Decimal Floating-Point Quantum Adjustment Instructions
+
+DQUAI 111011 ..... ..... ..... .. 01000011 . @Z23_te_tb
+DQUAIQ 111111 ..... ..... ..... .. 01000011 . @Z23_te_tbp
+
+DQUA 111011 ..... ..... ..... .. 00000011 . @Z23_tab
+DQUAQ 111111 ..... ..... ..... .. 00000011 . @Z23_tabp
+
+DRRND 111011 ..... ..... ..... .. 00100011 . @Z23_tab
+DRRNDQ 111111 ..... ..... ..... .. 00100011 . @Z23_tp_a_bp
+
+DRINTX 111011 ..... ---- . ..... .. 01100011 . @Z23_tb
+DRINTXQ 111111 ..... ---- . ..... .. 01100011 . @Z23_tbp
+
+DRINTN 111011 ..... ---- . ..... .. 11100011 . @Z23_tb
+DRINTNQ 111111 ..... ---- . ..... .. 11100011 . @Z23_tbp
+
+### Decimal Floating-Point Conversion Instructions
+
+DCTDP 111011 ..... ----- ..... 0100000010 . @X_tb_rc
+DCTQPQ 111111 ..... ----- ..... 0100000010 . @X_tp_b_rc
+
+DRSP 111011 ..... ----- ..... 1100000010 . @X_tb_rc
+DRDPQ 111111 ..... ----- ..... 1100000010 . @X_tbp_rc
+
+DCFFIX 111011 ..... ----- ..... 1100100010 . @X_tb_rc
+DCFFIXQ 111111 ..... ----- ..... 1100100010 . @X_tp_b_rc
+DCFFIXQQ 111111 ..... 00000 ..... 1111100010 - @X_frtp_vrb
+
+DCTFIX 111011 ..... ----- ..... 0100100010 . @X_tb_rc
+DCTFIXQ 111111 ..... ----- ..... 0100100010 . @X_t_bp_rc
+DCTFIXQQ 111111 ..... 00001 ..... 1111100010 - @X_vrt_frbp
+
+### Decimal Floating-Point Format Instructions
+
+DDEDPD 111011 ..... .. --- ..... 0101000010 . @X_tb_sp_rc
+DDEDPDQ 111111 ..... .. --- ..... 0101000010 . @X_tbp_sp_rc
+
+DENBCD 111011 ..... . ---- ..... 1101000010 . @X_tb_s_rc
+DENBCDQ 111111 ..... . ---- ..... 1101000010 . @X_tbp_s_rc
+
+DXEX 111011 ..... ----- ..... 0101100010 . @X_tb_rc
+DXEXQ 111111 ..... ----- ..... 0101100010 . @X_t_bp_rc
+
+DIEX 111011 ..... ..... ..... 1101100010 . @X_rc
+DIEXQ 111111 ..... ..... ..... 1101100010 . @X_tp_a_bp_rc
+
+DSCLI 111011 ..... ..... ...... 001000010 . @Z22_ta_sh_rc
+DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc
+
+DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc
+DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc
+
+## Vector Bit Manipulation Instruction
+
+VCFUGED 000100 ..... ..... ..... 10101001101 @VX
+VCLZDM 000100 ..... ..... ..... 11110000100 @VX
+VCTZDM 000100 ..... ..... ..... 11111000100 @VX
+VPDEPD 000100 ..... ..... ..... 10111001101 @VX
+VPEXTD 000100 ..... ..... ..... 10110001101 @VX
+
+## Vector Permute and Formatting Instruction
+
+VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA
+VEXTDUBVRX 000100 ..... ..... ..... ..... 011001 @VA
+VEXTDUHVLX 000100 ..... ..... ..... ..... 011010 @VA
+VEXTDUHVRX 000100 ..... ..... ..... ..... 011011 @VA
+VEXTDUWVLX 000100 ..... ..... ..... ..... 011100 @VA
+VEXTDUWVRX 000100 ..... ..... ..... ..... 011101 @VA
+VEXTDDVLX 000100 ..... ..... ..... ..... 011110 @VA
+VEXTDDVRX 000100 ..... ..... ..... ..... 011111 @VA
+
+VINSERTB 000100 ..... - .... ..... 01100001101 @VX_uim4
+VINSERTH 000100 ..... - .... ..... 01101001101 @VX_uim4
+VINSERTW 000100 ..... - .... ..... 01110001101 @VX_uim4
+VINSERTD 000100 ..... - .... ..... 01111001101 @VX_uim4
+
+VINSBLX 000100 ..... ..... ..... 01000001111 @VX
+VINSBRX 000100 ..... ..... ..... 01100001111 @VX
+VINSHLX 000100 ..... ..... ..... 01001001111 @VX
+VINSHRX 000100 ..... ..... ..... 01101001111 @VX
+VINSWLX 000100 ..... ..... ..... 01010001111 @VX
+VINSWRX 000100 ..... ..... ..... 01110001111 @VX
+VINSDLX 000100 ..... ..... ..... 01011001111 @VX
+VINSDRX 000100 ..... ..... ..... 01111001111 @VX
+
+VINSW 000100 ..... - .... ..... 00011001111 @VX_uim4
+VINSD 000100 ..... - .... ..... 00111001111 @VX_uim4
+
+VINSBVLX 000100 ..... ..... ..... 00000001111 @VX
+VINSBVRX 000100 ..... ..... ..... 00100001111 @VX
+VINSHVLX 000100 ..... ..... ..... 00001001111 @VX
+VINSHVRX 000100 ..... ..... ..... 00101001111 @VX
+VINSWVLX 000100 ..... ..... ..... 00010001111 @VX
+VINSWVRX 000100 ..... ..... ..... 00110001111 @VX
+
+VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN
+VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
+
+# VSX Load/Store Instructions
+
+LXV 111101 ..... ..... ............ . 001 @DQ_TSX
+STXV 111101 ..... ..... ............ . 101 @DQ_TSX
+LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP
+STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP
+LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX
+STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX
+LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP
+STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP
+
+## VSX splat instruction
+
+XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8
+XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2
+
+## VSX Vector Load Special Value Instruction
+
+LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5
diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode
new file mode 100644
index 000000000..39e610913
--- /dev/null
+++ b/target/ppc/insn64.decode
@@ -0,0 +1,196 @@
+#
+# Power ISA decode for 64-bit prefixed insns (opcode space 0 and 1)
+#
+# Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+# Format MLS:D and 8LS:D
+&PLS_D rt ra si:int64_t r:bool
+%pls_si 32:s18 0:16
+@PLS_D ...... .. ... r:1 .. .................. \
+ ...... rt:5 ra:5 ................ \
+ &PLS_D si=%pls_si
+@8LS_D_TSX ...... .. . .. r:1 .. .................. \
+ ..... rt:6 ra:5 ................ \
+ &PLS_D si=%pls_si
+
+%rt_tsxp 21:1 22:4 !function=times_2
+@8LS_D_TSXP ...... .. . .. r:1 .. .................. \
+ ...... ..... ra:5 ................ \
+ &PLS_D si=%pls_si rt=%rt_tsxp
+
+# Format 8RR:D
+%8rr_si 32:s16 0:16
+%8rr_xt 16:1 21:5
+&8RR_D_IX xt ix si
+@8RR_D_IX ...... .. .... .. .. ................ \
+ ...... ..... ... ix:1 . ................ \
+ &8RR_D_IX si=%8rr_si xt=%8rr_xt
+&8RR_D xt si:int32_t
+@8RR_D ...... .. .... .. .. ................ \
+ ...... ..... .... . ................ \
+ &8RR_D si=%8rr_si xt=%8rr_xt
+
+# Format XX4
+&XX4 xt xa xb xc
+%xx4_xt 0:1 21:5
+%xx4_xa 2:1 16:5
+%xx4_xb 1:1 11:5
+%xx4_xc 3:1 6:5
+@XX4 ........ ........ ........ ........ \
+ ...... ..... ..... ..... ..... .. .... \
+ &XX4 xt=%xx4_xt xa=%xx4_xa xb=%xx4_xb xc=%xx4_xc
+
+### Fixed-Point Load Instructions
+
+PLBZ 000001 10 0--.-- .................. \
+ 100010 ..... ..... ................ @PLS_D
+PLHZ 000001 10 0--.-- .................. \
+ 101000 ..... ..... ................ @PLS_D
+PLHA 000001 10 0--.-- .................. \
+ 101010 ..... ..... ................ @PLS_D
+PLWZ 000001 10 0--.-- .................. \
+ 100000 ..... ..... ................ @PLS_D
+PLWA 000001 00 0--.-- .................. \
+ 101001 ..... ..... ................ @PLS_D
+PLD 000001 00 0--.-- .................. \
+ 111001 ..... ..... ................ @PLS_D
+PLQ 000001 00 0--.-- .................. \
+ 111000 ..... ..... ................ @PLS_D
+
+### Fixed-Point Store Instructions
+
+PSTW 000001 10 0--.-- .................. \
+ 100100 ..... ..... ................ @PLS_D
+PSTB 000001 10 0--.-- .................. \
+ 100110 ..... ..... ................ @PLS_D
+PSTH 000001 10 0--.-- .................. \
+ 101100 ..... ..... ................ @PLS_D
+
+PSTD 000001 00 0--.-- .................. \
+ 111101 ..... ..... ................ @PLS_D
+PSTQ 000001 00 0--.-- .................. \
+ 111100 ..... ..... ................ @PLS_D
+
+### Fixed-Point Arithmetic Instructions
+
+PADDI 000001 10 0--.-- .................. \
+ 001110 ..... ..... ................ @PLS_D
+
+### Float-Point Load and Store Instructions
+
+PLFS 000001 10 0--.-- .................. \
+ 110000 ..... ..... ................ @PLS_D
+PLFD 000001 10 0--.-- .................. \
+ 110010 ..... ..... ................ @PLS_D
+PSTFS 000001 10 0--.-- .................. \
+ 110100 ..... ..... ................ @PLS_D
+PSTFD 000001 10 0--.-- .................. \
+ 110110 ..... ..... ................ @PLS_D
+
+### Prefixed No-operation Instruction
+
+@PNOP 000001 11 0000-- 000000000000000000 \
+ ................................
+
+{
+ [
+ ## Invalid suffixes: Branch instruction
+ # bc[l][a]
+ INVALID ................................ \
+ 010000-------------------------- @PNOP
+ # b[l][a]
+ INVALID ................................ \
+ 010010-------------------------- @PNOP
+ # bclr[l]
+ INVALID ................................ \
+ 010011---------------0000010000- @PNOP
+ # bcctr[l]
+ INVALID ................................ \
+ 010011---------------1000010000- @PNOP
+ # bctar[l]
+ INVALID ................................ \
+ 010011---------------1000110000- @PNOP
+
+ ## Invalid suffixes: rfebb
+ INVALID ................................ \
+ 010011---------------0010010010- @PNOP
+
+ ## Invalid suffixes: context synchronizing other than isync
+ # sc
+ INVALID ................................ \
+ 010001------------------------1- @PNOP
+ # scv
+ INVALID ................................ \
+ 010001------------------------01 @PNOP
+ # rfscv
+ INVALID ................................ \
+ 010011---------------0001010010- @PNOP
+ # rfid
+ INVALID ................................ \
+ 010011---------------0000010010- @PNOP
+ # hrfid
+ INVALID ................................ \
+ 010011---------------0100010010- @PNOP
+ # urfid
+ INVALID ................................ \
+ 010011---------------0100110010- @PNOP
+ # stop
+ INVALID ................................ \
+ 010011---------------0101110010- @PNOP
+ # mtmsr w/ L=0
+ INVALID ................................ \
+ 011111---------0-----0010010010- @PNOP
+ # mtmsrd w/ L=0
+ INVALID ................................ \
+ 011111---------0-----0010110010- @PNOP
+
+ ## Invalid suffixes: Service Processor Attention
+ INVALID ................................ \
+ 000000----------------100000000- @PNOP
+ ]
+
+ ## Valid suffixes
+ PNOP ................................ \
+ -------------------------------- @PNOP
+}
+
+### VSX instructions
+
+PLXV 000001 00 0--.-- .................. \
+ 11001 ...... ..... ................ @8LS_D_TSX
+PSTXV 000001 00 0--.-- .................. \
+ 11011 ...... ..... ................ @8LS_D_TSX
+PLXVP 000001 00 0--.-- .................. \
+ 111010 ..... ..... ................ @8LS_D_TSXP
+PSTXVP 000001 00 0--.-- .................. \
+ 111110 ..... ..... ................ @8LS_D_TSXP
+
+XXSPLTIDP 000001 01 0000 -- -- ................ \
+ 100000 ..... 0010 . ................ @8RR_D
+XXSPLTIW 000001 01 0000 -- -- ................ \
+ 100000 ..... 0011 . ................ @8RR_D
+XXSPLTI32DX 000001 01 0000 -- -- ................ \
+ 100000 ..... 000 .. ................ @8RR_D_IX
+
+XXBLENDVD 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 11 .... @XX4
+XXBLENDVW 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 10 .... @XX4
+XXBLENDVH 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 01 .... @XX4
+XXBLENDVB 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 00 .... @XX4
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
new file mode 100644
index 000000000..9bc327bcb
--- /dev/null
+++ b/target/ppc/int_helper.c
@@ -0,0 +1,3160 @@
+/*
+ * PowerPC integer and vector emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "exec/helper-proto.h"
+#include "crypto/aes.h"
+#include "fpu/softfloat.h"
+#include "qapi/error.h"
+#include "qemu/guest-random.h"
+
+#include "helper_regs.h"
+/*****************************************************************************/
+/* Fixed point operations helpers */
+
+static inline void helper_update_ov_legacy(CPUPPCState *env, int ov)
+{
+ if (unlikely(ov)) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+}
+
+target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
+ uint32_t oe)
+{
+ uint64_t rt = 0;
+ int overflow = 0;
+
+ uint64_t dividend = (uint64_t)ra << 32;
+ uint64_t divisor = (uint32_t)rb;
+
+ if (unlikely(divisor == 0)) {
+ overflow = 1;
+ } else {
+ rt = dividend / divisor;
+ overflow = rt > UINT32_MAX;
+ }
+
+ if (unlikely(overflow)) {
+ rt = 0; /* Undefined */
+ }
+
+ if (oe) {
+ helper_update_ov_legacy(env, overflow);
+ }
+
+ return (target_ulong)rt;
+}
+
+target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
+ uint32_t oe)
+{
+ int64_t rt = 0;
+ int overflow = 0;
+
+ int64_t dividend = (int64_t)ra << 32;
+ int64_t divisor = (int64_t)((int32_t)rb);
+
+ if (unlikely((divisor == 0) ||
+ ((divisor == -1ull) && (dividend == INT64_MIN)))) {
+ overflow = 1;
+ } else {
+ rt = dividend / divisor;
+ overflow = rt != (int32_t)rt;
+ }
+
+ if (unlikely(overflow)) {
+ rt = 0; /* Undefined */
+ }
+
+ if (oe) {
+ helper_update_ov_legacy(env, overflow);
+ }
+
+ return (target_ulong)rt;
+}
+
+#if defined(TARGET_PPC64)
+
+uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
+{
+ uint64_t rt = 0;
+ int overflow = 0;
+
+ if (unlikely(rb == 0 || ra >= rb)) {
+ overflow = 1;
+ rt = 0; /* Undefined */
+ } else {
+ divu128(&rt, &ra, rb);
+ }
+
+ if (oe) {
+ helper_update_ov_legacy(env, overflow);
+ }
+
+ return rt;
+}
+
+uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
+{
+ uint64_t rt = 0;
+ int64_t ra = (int64_t)rau;
+ int64_t rb = (int64_t)rbu;
+ int overflow = 0;
+
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
+ overflow = 1;
+ rt = 0; /* Undefined */
+ } else {
+ divs128(&rt, &ra, rb);
+ }
+
+ if (oe) {
+ helper_update_ov_legacy(env, overflow);
+ }
+
+ return rt;
+}
+
+#endif
+
+
+#if defined(TARGET_PPC64)
+/* if x = 0xab, returns 0xababababababababa */
+#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
+
+/*
+ * subtract 1 from each byte, and with inverse, check if MSB is set at each
+ * byte.
+ * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
+ * (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
+ */
+#define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80))
+
+/* When you XOR the pattern and there is a match, that byte will be zero */
+#define hasvalue(x, n) (haszero((x) ^ pattern(n)))
+
+uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
+{
+ return hasvalue(rb, ra) ? CRF_GT : 0;
+}
+
+#undef pattern
+#undef haszero
+#undef hasvalue
+
+/*
+ * Return a random number.
+ */
+uint64_t helper_darn32(void)
+{
+ Error *err = NULL;
+ uint32_t ret;
+
+ if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
+ qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -1;
+ }
+
+ return ret;
+}
+
+uint64_t helper_darn64(void)
+{
+ Error *err = NULL;
+ uint64_t ret;
+
+ if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
+ qemu_log_mask(LOG_UNIMP, "darn: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -1;
+ }
+
+ return ret;
+}
+
+uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
+{
+ int i;
+ uint64_t ra = 0;
+
+ for (i = 0; i < 8; i++) {
+ int index = (rs >> (i * 8)) & 0xFF;
+ if (index < 64) {
+ if (rb & PPC_BIT(index)) {
+ ra |= 1 << i;
+ }
+ }
+ }
+ return ra;
+}
+
+#endif
+
+target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
+{
+ target_ulong mask = 0xff;
+ target_ulong ra = 0;
+ int i;
+
+ for (i = 0; i < sizeof(target_ulong); i++) {
+ if ((rs & mask) == (rb & mask)) {
+ ra |= mask;
+ }
+ mask <<= 8;
+ }
+ return ra;
+}
+
+/* shift right arithmetic helper */
+target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
+ target_ulong shift)
+{
+ int32_t ret;
+
+ if (likely(!(shift & 0x20))) {
+ if (likely((uint32_t)shift != 0)) {
+ shift &= 0x1f;
+ ret = (int32_t)value >> shift;
+ if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
+ env->ca32 = env->ca = 0;
+ } else {
+ env->ca32 = env->ca = 1;
+ }
+ } else {
+ ret = (int32_t)value;
+ env->ca32 = env->ca = 0;
+ }
+ } else {
+ ret = (int32_t)value >> 31;
+ env->ca32 = env->ca = (ret != 0);
+ }
+ return (target_long)ret;
+}
+
+#if defined(TARGET_PPC64)
+target_ulong helper_srad(CPUPPCState *env, target_ulong value,
+ target_ulong shift)
+{
+ int64_t ret;
+
+ if (likely(!(shift & 0x40))) {
+ if (likely((uint64_t)shift != 0)) {
+ shift &= 0x3f;
+ ret = (int64_t)value >> shift;
+ if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) {
+ env->ca32 = env->ca = 0;
+ } else {
+ env->ca32 = env->ca = 1;
+ }
+ } else {
+ ret = (int64_t)value;
+ env->ca32 = env->ca = 0;
+ }
+ } else {
+ ret = (int64_t)value >> 63;
+ env->ca32 = env->ca = (ret != 0);
+ }
+ return ret;
+}
+#endif
+
+#if defined(TARGET_PPC64)
+target_ulong helper_popcntb(target_ulong val)
+{
+ /* Note that we don't fold past bytes */
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) &
+ 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) &
+ 0x3333333333333333ULL);
+ val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
+ 0x0f0f0f0f0f0f0f0fULL);
+ return val;
+}
+
+target_ulong helper_popcntw(target_ulong val)
+{
+ /* Note that we don't fold past words. */
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) &
+ 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) &
+ 0x3333333333333333ULL);
+ val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
+ 0x0f0f0f0f0f0f0f0fULL);
+ val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
+ 0x00ff00ff00ff00ffULL);
+ val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
+ 0x0000ffff0000ffffULL);
+ return val;
+}
+#else
+target_ulong helper_popcntb(target_ulong val)
+{
+ /* Note that we don't fold past bytes */
+ val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
+ return val;
+}
+#endif
+
+uint64_t helper_CFUGED(uint64_t src, uint64_t mask)
+{
+ /*
+ * Instead of processing the mask bit-by-bit from the most significant to
+ * the least significant bit, as described in PowerISA, we'll handle it in
+ * blocks of 'n' zeros/ones from LSB to MSB. To avoid the decision to use
+ * ctz or cto, we negate the mask at the end of the loop.
+ */
+ target_ulong m, left = 0, right = 0;
+ unsigned int n, i = 64;
+ bool bit = false; /* tracks if we are processing zeros or ones */
+
+ if (mask == 0 || mask == -1) {
+ return src;
+ }
+
+ /* Processes the mask in blocks, from LSB to MSB */
+ while (i) {
+ /* Find how many bits we should take */
+ n = ctz64(mask);
+ if (n > i) {
+ n = i;
+ }
+
+ /*
+ * Extracts 'n' trailing bits of src and put them on the leading 'n'
+ * bits of 'right' or 'left', pushing down the previously extracted
+ * values.
+ */
+ m = (1ll << n) - 1;
+ if (bit) {
+ right = ror64(right | (src & m), n);
+ } else {
+ left = ror64(left | (src & m), n);
+ }
+
+ /*
+ * Discards the processed bits from 'src' and 'mask'. Note that we are
+ * removing 'n' trailing zeros from 'mask', but the logical shift will
+ * add 'n' leading zeros back, so the population count of 'mask' is kept
+ * the same.
+ */
+ src >>= n;
+ mask >>= n;
+ i -= n;
+ bit = !bit;
+ mask = ~mask;
+ }
+
+ /*
+ * At the end, right was ror'ed ctpop(mask) times. To put it back in place,
+ * we'll shift it more 64-ctpop(mask) times.
+ */
+ if (bit) {
+ n = ctpop64(mask);
+ } else {
+ n = 64 - ctpop64(mask);
+ }
+
+ return left | (right >> n);
+}
+
+uint64_t helper_PDEPD(uint64_t src, uint64_t mask)
+{
+ int i, o;
+ uint64_t result = 0;
+
+ if (mask == -1) {
+ return src;
+ }
+
+ for (i = 0; mask != 0; i++) {
+ o = ctz64(mask);
+ mask &= mask - 1;
+ result |= ((src >> i) & 1) << o;
+ }
+
+ return result;
+}
+
+uint64_t helper_PEXTD(uint64_t src, uint64_t mask)
+{
+ int i, o;
+ uint64_t result = 0;
+
+ if (mask == -1) {
+ return src;
+ }
+
+ for (o = 0; mask != 0; o++) {
+ i = ctz64(mask);
+ mask &= mask - 1;
+ result |= ((src >> i) & 1) << o;
+ }
+
+ return result;
+}
+
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
+{
+ uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+
+ if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = tmp % arg2;
+ return tmp / (int32_t)arg2;
+ }
+}
+
+target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+
+ if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->so = env->ov = 1;
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = tmp % arg2;
+ tmp /= (int32_t)arg2;
+ if ((int32_t)tmp != tmp) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+ return tmp;
+ }
+}
+
+target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
+ return (int32_t)arg1 / (int32_t)arg2;
+ }
+}
+
+target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->so = env->ov = 1;
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->ov = 0;
+ env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
+ return (int32_t)arg1 / (int32_t)arg2;
+ }
+}
+
+/*****************************************************************************/
+/* 602 specific instructions */
+/* mfrom is the most crazy instruction ever seen, imho ! */
+/* Real implementation uses a ROM table. Do the same */
+/*
+ * Extremely decomposed:
+ * -arg / 256
+ * return 256 * log10(10 + 1.0) + 0.5
+ */
+#if !defined(CONFIG_USER_ONLY)
+target_ulong helper_602_mfrom(target_ulong arg)
+{
+ if (likely(arg < 602)) {
+#include "mfrom_table.c.inc"
+ return mfrom_ROM_table[arg];
+ } else {
+ return 0;
+ }
+}
+#endif
+
+/*****************************************************************************/
+/* Altivec extension helpers */
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VECTOR_FOR_INORDER_I(index, element) \
+ for (index = 0; index < ARRAY_SIZE(r->element); index++)
+#else
+#define VECTOR_FOR_INORDER_I(index, element) \
+ for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--)
+#endif
+
+/* Saturating arithmetic helpers. */
+#define SATCVT(from, to, from_type, to_type, min, max) \
+ static inline to_type cvt##from##to(from_type x, int *sat) \
+ { \
+ to_type r; \
+ \
+ if (x < (from_type)min) { \
+ r = min; \
+ *sat = 1; \
+ } else if (x > (from_type)max) { \
+ r = max; \
+ *sat = 1; \
+ } else { \
+ r = x; \
+ } \
+ return r; \
+ }
+#define SATCVTU(from, to, from_type, to_type, min, max) \
+ static inline to_type cvt##from##to(from_type x, int *sat) \
+ { \
+ to_type r; \
+ \
+ if (x > (from_type)max) { \
+ r = max; \
+ *sat = 1; \
+ } else { \
+ r = x; \
+ } \
+ return r; \
+ }
+SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
+SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
+SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
+
+SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
+SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
+SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
+SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
+SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
+SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
+#undef SATCVT
+#undef SATCVTU
+
+void helper_mtvscr(CPUPPCState *env, uint32_t vscr)
+{
+ ppc_store_vscr(env, vscr);
+}
+
+uint32_t helper_mfvscr(CPUPPCState *env)
+{
+ return ppc_get_vscr(env);
+}
+
+static inline void set_vscr_sat(CPUPPCState *env)
+{
+ /* The choice of non-zero value is arbitrary. */
+ env->vscr_sat.u32[0] = 1;
+}
+
+void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ r->u32[i] = ~a->u32[i] < b->u32[i];
+ }
+}
+
+/* vprtybw */
+void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ uint64_t res = b->u32[i] ^ (b->u32[i] >> 16);
+ res ^= res >> 8;
+ r->u32[i] = res & 1;
+ }
+}
+
+/* vprtybd */
+void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ uint64_t res = b->u64[i] ^ (b->u64[i] >> 32);
+ res ^= res >> 16;
+ res ^= res >> 8;
+ r->u64[i] = res & 1;
+ }
+}
+
+/* vprtybq */
+void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
+{
+ uint64_t res = b->u64[0] ^ b->u64[1];
+ res ^= res >> 32;
+ res ^= res >> 16;
+ res ^= res >> 8;
+ r->VsrD(1) = res & 1;
+ r->VsrD(0) = 0;
+}
+
+#define VARITHFP(suffix, func) \
+ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
+ r->f32[i] = func(a->f32[i], b->f32[i], &env->vec_status); \
+ } \
+ }
+VARITHFP(addfp, float32_add)
+VARITHFP(subfp, float32_sub)
+VARITHFP(minfp, float32_min)
+VARITHFP(maxfp, float32_max)
+#undef VARITHFP
+
+#define VARITHFPFMA(suffix, type) \
+ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b, ppc_avr_t *c) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
+ r->f32[i] = float32_muladd(a->f32[i], c->f32[i], b->f32[i], \
+ type, &env->vec_status); \
+ } \
+ }
+VARITHFPFMA(maddfp, 0);
+VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
+#undef VARITHFPFMA
+
+#define VARITHSAT_CASE(type, op, cvt, element) \
+ { \
+ type result = (type)a->element[i] op (type)b->element[i]; \
+ r->element[i] = cvt(result, &sat); \
+ }
+
+#define VARITHSAT_DO(name, op, optype, cvt, element) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *vscr_sat, \
+ ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
+ { \
+ int sat = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ VARITHSAT_CASE(optype, op, cvt, element); \
+ } \
+ if (sat) { \
+ vscr_sat->u32[0] = 1; \
+ } \
+ }
+#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
+ VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
+ VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
+#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
+ VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
+ VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
+VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
+VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
+VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
+VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
+VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
+VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
+#undef VARITHSAT_CASE
+#undef VARITHSAT_DO
+#undef VARITHSAT_SIGNED
+#undef VARITHSAT_UNSIGNED
+
+#define VAVG_DO(name, element, etype) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
+ r->element[i] = x >> 1; \
+ } \
+ }
+
+#define VAVG(type, signed_element, signed_type, unsigned_element, \
+ unsigned_type) \
+ VAVG_DO(avgs##type, signed_element, signed_type) \
+ VAVG_DO(avgu##type, unsigned_element, unsigned_type)
+VAVG(b, s8, int16_t, u8, uint16_t)
+VAVG(h, s16, int32_t, u16, uint32_t)
+VAVG(w, s32, int64_t, u32, uint64_t)
+#undef VAVG_DO
+#undef VAVG
+
+#define VABSDU_DO(name, element) \
+void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = (a->element[i] > b->element[i]) ? \
+ (a->element[i] - b->element[i]) : \
+ (b->element[i] - a->element[i]); \
+ } \
+}
+
+/*
+ * VABSDU - Vector absolute difference unsigned
+ * name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
+ * element - element type to access from vector
+ */
+#define VABSDU(type, element) \
+ VABSDU_DO(absdu##type, element)
+VABSDU(b, u8)
+VABSDU(h, u16)
+VABSDU(w, u32)
+#undef VABSDU_DO
+#undef VABSDU
+
+#define VCF(suffix, cvt, element) \
+ void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *b, uint32_t uim) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
+ float32 t = cvt(b->element[i], &env->vec_status); \
+ r->f32[i] = float32_scalbn(t, -uim, &env->vec_status); \
+ } \
+ }
+VCF(ux, uint32_to_float32, u32)
+VCF(sx, int32_to_float32, s32)
+#undef VCF
+
+#define VCMP_DO(suffix, compare, element, record) \
+ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ uint64_t ones = (uint64_t)-1; \
+ uint64_t all = ones; \
+ uint64_t none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint64_t result = (a->element[i] compare b->element[i] ? \
+ ones : 0x0); \
+ switch (sizeof(a->element[0])) { \
+ case 8: \
+ r->u64[i] = result; \
+ break; \
+ case 4: \
+ r->u32[i] = result; \
+ break; \
+ case 2: \
+ r->u16[i] = result; \
+ break; \
+ case 1: \
+ r->u8[i] = result; \
+ break; \
+ } \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+ }
+#define VCMP(suffix, compare, element) \
+ VCMP_DO(suffix, compare, element, 0) \
+ VCMP_DO(suffix##_dot, compare, element, 1)
+VCMP(equb, ==, u8)
+VCMP(equh, ==, u16)
+VCMP(equw, ==, u32)
+VCMP(equd, ==, u64)
+VCMP(gtub, >, u8)
+VCMP(gtuh, >, u16)
+VCMP(gtuw, >, u32)
+VCMP(gtud, >, u64)
+VCMP(gtsb, >, s8)
+VCMP(gtsh, >, s16)
+VCMP(gtsw, >, s32)
+VCMP(gtsd, >, s64)
+#undef VCMP_DO
+#undef VCMP
+
+#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \
+void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ etype ones = (etype)-1; \
+ etype all = ones; \
+ etype result, none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ if (cmpzero) { \
+ result = ((a->element[i] == 0) \
+ || (b->element[i] == 0) \
+ || (a->element[i] != b->element[i]) ? \
+ ones : 0x0); \
+ } else { \
+ result = (a->element[i] != b->element[i]) ? ones : 0x0; \
+ } \
+ r->element[i] = result; \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+}
+
+/*
+ * VCMPNEZ - Vector compare not equal to zero
+ * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
+ * element - element type to access from vector
+ */
+#define VCMPNE(suffix, element, etype, cmpzero) \
+ VCMPNE_DO(suffix, element, etype, cmpzero, 0) \
+ VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1)
+VCMPNE(zb, u8, uint8_t, 1)
+VCMPNE(zh, u16, uint16_t, 1)
+VCMPNE(zw, u32, uint32_t, 1)
+VCMPNE(b, u8, uint8_t, 0)
+VCMPNE(h, u16, uint16_t, 0)
+VCMPNE(w, u32, uint32_t, 0)
+#undef VCMPNE_DO
+#undef VCMPNE
+
+#define VCMPFP_DO(suffix, compare, order, record) \
+ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ uint32_t ones = (uint32_t)-1; \
+ uint32_t all = ones; \
+ uint32_t none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
+ uint32_t result; \
+ FloatRelation rel = \
+ float32_compare_quiet(a->f32[i], b->f32[i], \
+ &env->vec_status); \
+ if (rel == float_relation_unordered) { \
+ result = 0; \
+ } else if (rel compare order) { \
+ result = ones; \
+ } else { \
+ result = 0; \
+ } \
+ r->u32[i] = result; \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+ }
+#define VCMPFP(suffix, compare, order) \
+ VCMPFP_DO(suffix, compare, order, 0) \
+ VCMPFP_DO(suffix##_dot, compare, order, 1)
+VCMPFP(eqfp, ==, float_relation_equal)
+VCMPFP(gefp, !=, float_relation_less)
+VCMPFP(gtfp, ==, float_relation_greater)
+#undef VCMPFP_DO
+#undef VCMPFP
+
+static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
+ ppc_avr_t *a, ppc_avr_t *b, int record)
+{
+ int i;
+ int all_in = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
+ FloatRelation le_rel = float32_compare_quiet(a->f32[i], b->f32[i],
+ &env->vec_status);
+ if (le_rel == float_relation_unordered) {
+ r->u32[i] = 0xc0000000;
+ all_in = 1;
+ } else {
+ float32 bneg = float32_chs(b->f32[i]);
+ FloatRelation ge_rel = float32_compare_quiet(a->f32[i], bneg,
+ &env->vec_status);
+ int le = le_rel != float_relation_greater;
+ int ge = ge_rel != float_relation_less;
+
+ r->u32[i] = ((!le) << 31) | ((!ge) << 30);
+ all_in |= (!le | !ge);
+ }
+ }
+ if (record) {
+ env->crf[6] = (all_in == 0) << 1;
+ }
+}
+
+void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ vcmpbfp_internal(env, r, a, b, 0);
+}
+
+void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b)
+{
+ vcmpbfp_internal(env, r, a, b, 1);
+}
+
+#define VCT(suffix, satcvt, element) \
+ void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *b, uint32_t uim) \
+ { \
+ int i; \
+ int sat = 0; \
+ float_status s = env->vec_status; \
+ \
+ set_float_rounding_mode(float_round_to_zero, &s); \
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
+ if (float32_is_any_nan(b->f32[i])) { \
+ r->element[i] = 0; \
+ } else { \
+ float64 t = float32_to_float64(b->f32[i], &s); \
+ int64_t j; \
+ \
+ t = float64_scalbn(t, uim, &s); \
+ j = float64_to_int64(t, &s); \
+ r->element[i] = satcvt(j, &sat); \
+ } \
+ } \
+ if (sat) { \
+ set_vscr_sat(env); \
+ } \
+ }
+VCT(uxs, cvtsduw, u32)
+VCT(sxs, cvtsdsw, s32)
+#undef VCT
+
+target_ulong helper_vclzlsbb(ppc_avr_t *r)
+{
+ target_ulong count = 0;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ if (r->VsrB(i) & 0x01) {
+ break;
+ }
+ count++;
+ }
+ return count;
+}
+
+target_ulong helper_vctzlsbb(ppc_avr_t *r)
+{
+ target_ulong count = 0;
+ int i;
+ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
+ if (r->VsrB(i) & 0x01) {
+ break;
+ }
+ count++;
+ }
+ return count;
+}
+
+void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i];
+ int32_t t = (int32_t)c->s16[i] + (prod >> 15);
+
+ r->s16[i] = cvtswsh(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
+ int32_t t = (int32_t)c->s16[i] + (prod >> 15);
+ r->s16[i] = cvtswsh(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i];
+ r->s16[i] = (int16_t) (prod + c->s16[i]);
+ }
+}
+
+#define VMRG_DO(name, element, access, ofs) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ ppc_avr_t result; \
+ int i, half = ARRAY_SIZE(r->element) / 2; \
+ \
+ for (i = 0; i < half; i++) { \
+ result.access(i * 2 + 0) = a->access(i + ofs); \
+ result.access(i * 2 + 1) = b->access(i + ofs); \
+ } \
+ *r = result; \
+ }
+
+#define VMRG(suffix, element, access) \
+ VMRG_DO(mrgl##suffix, element, access, half) \
+ VMRG_DO(mrgh##suffix, element, access, 0)
+VMRG(b, u8, VsrB)
+VMRG(h, u16, VsrH)
+VMRG(w, u32, VsrW)
+#undef VMRG_DO
+#undef VMRG
+
+void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[16];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
+ prod[i] = (int32_t)a->s8[i] * b->u8[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
+ prod[4 * i + 2] + prod[4 * i + 3];
+ }
+}
+
+void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[8];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ prod[i] = a->s16[i] * b->s16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
+ }
+}
+
+void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[8];
+ int i;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ prod[i] = (int32_t)a->s16[i] * b->s16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
+
+ r->u32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint16_t prod[16];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ prod[i] = a->u8[i] * b->u8[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
+ prod[4 * i + 2] + prod[4 * i + 3];
+ }
+}
+
+void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint32_t prod[8];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
+ prod[i] = a->u16[i] * b->u16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
+ }
+}
+
+void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint32_t prod[8];
+ int i;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
+ prod[i] = a->u16[i] * b->u16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
+
+ r->u32[i] = cvtuduw(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+#define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \
+ r->prod_access(i >> 1) = (cast)a->mul_access(i) * \
+ (cast)b->mul_access(i); \
+ } \
+ }
+
+#define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->mul_element); i += 2) { \
+ r->prod_access(i >> 1) = (cast)a->mul_access(i + 1) * \
+ (cast)b->mul_access(i + 1); \
+ } \
+ }
+
+#define VMUL(suffix, mul_element, mul_access, prod_access, cast) \
+ VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \
+ VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast)
+VMUL(sb, s8, VsrSB, VsrSH, int16_t)
+VMUL(sh, s16, VsrSH, VsrSW, int32_t)
+VMUL(sw, s32, VsrSW, VsrSD, int64_t)
+VMUL(ub, u8, VsrB, VsrH, uint16_t)
+VMUL(uh, u16, VsrH, VsrW, uint32_t)
+VMUL(uw, u32, VsrW, VsrD, uint64_t)
+#undef VMUL_DO_EVN
+#undef VMUL_DO_ODD
+#undef VMUL
+
+void helper_vmulhsw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ r->s32[i] = (int32_t)(((int64_t)a->s32[i] * (int64_t)b->s32[i]) >> 32);
+ }
+}
+
+void helper_vmulhuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ r->u32[i] = (uint32_t)(((uint64_t)a->u32[i] *
+ (uint64_t)b->u32[i]) >> 32);
+ }
+}
+
+void helper_vmulhsd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ uint64_t discard;
+
+ muls64(&discard, &r->u64[0], a->s64[0], b->s64[0]);
+ muls64(&discard, &r->u64[1], a->s64[1], b->s64[1]);
+}
+
+void helper_vmulhud(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ uint64_t discard;
+
+ mulu64(&discard, &r->u64[0], a->u64[0], b->u64[0]);
+ mulu64(&discard, &r->u64[1], a->u64[1], b->u64[1]);
+}
+
+void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int s = c->VsrB(i) & 0x1f;
+ int index = s & 0xf;
+
+ if (s & 0x10) {
+ result.VsrB(i) = b->VsrB(index);
+ } else {
+ result.VsrB(i) = a->VsrB(index);
+ }
+ }
+ *r = result;
+}
+
+void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int s = c->VsrB(i) & 0x1f;
+ int index = 15 - (s & 0xf);
+
+ if (s & 0x10) {
+ result.VsrB(i) = a->VsrB(index);
+ } else {
+ result.VsrB(i) = b->VsrB(index);
+ }
+ }
+ *r = result;
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
+#define VBPERMD_INDEX(i) (i)
+#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
+#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
+#else
+#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
+#define VBPERMD_INDEX(i) (1 - i)
+#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
+#define EXTRACT_BIT(avr, i, index) \
+ (extract64((avr)->u64[1 - i], 63 - index, 1))
+#endif
+
+void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result = { .u64 = { 0, 0 } };
+ VECTOR_FOR_INORDER_I(i, u64) {
+ for (j = 0; j < 8; j++) {
+ int index = VBPERMQ_INDEX(b, (i * 8) + j);
+ if (index < 64 && EXTRACT_BIT(a, i, index)) {
+ result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
+ }
+ }
+ }
+ *r = result;
+}
+
+void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ uint64_t perm = 0;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int index = VBPERMQ_INDEX(b, i);
+
+ if (index < 128) {
+ uint64_t mask = (1ull << (63 - (index & 0x3F)));
+ if (a->u64[VBPERMQ_DW(index)] & mask) {
+ perm |= (0x8000 >> i);
+ }
+ }
+ }
+
+ r->VsrD(0) = perm;
+ r->VsrD(1) = 0;
+}
+
+#undef VBPERMQ_INDEX
+#undef VBPERMQ_DW
+
+#define PMSUM(name, srcfld, trgfld, trgtyp) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i, j; \
+ trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
+ \
+ VECTOR_FOR_INORDER_I(i, srcfld) { \
+ prod[i] = 0; \
+ for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
+ if (a->srcfld[i] & (1ull << j)) { \
+ prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
+ } \
+ } \
+ } \
+ \
+ VECTOR_FOR_INORDER_I(i, trgfld) { \
+ r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
+ } \
+}
+
+PMSUM(vpmsumb, u8, u16, uint16_t)
+PMSUM(vpmsumh, u16, u32, uint32_t)
+PMSUM(vpmsumw, u32, u64, uint64_t)
+
+void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+
+#ifdef CONFIG_INT128
+ int i, j;
+ __uint128_t prod[2];
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ prod[i] = 0;
+ for (j = 0; j < 64; j++) {
+ if (a->u64[i] & (1ull << j)) {
+ prod[i] ^= (((__uint128_t)b->u64[i]) << j);
+ }
+ }
+ }
+
+ r->u128 = prod[0] ^ prod[1];
+
+#else
+ int i, j;
+ ppc_avr_t prod[2];
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ prod[i].VsrD(1) = prod[i].VsrD(0) = 0;
+ for (j = 0; j < 64; j++) {
+ if (a->u64[i] & (1ull << j)) {
+ ppc_avr_t bshift;
+ if (j == 0) {
+ bshift.VsrD(0) = 0;
+ bshift.VsrD(1) = b->u64[i];
+ } else {
+ bshift.VsrD(0) = b->u64[i] >> (64 - j);
+ bshift.VsrD(1) = b->u64[i] << j;
+ }
+ prod[i].VsrD(1) ^= bshift.VsrD(1);
+ prod[i].VsrD(0) ^= bshift.VsrD(0);
+ }
+ }
+ }
+
+ r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1);
+ r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0);
+#endif
+}
+
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define PKBIG 1
+#else
+#define PKBIG 0
+#endif
+void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result;
+#if defined(HOST_WORDS_BIGENDIAN)
+ const ppc_avr_t *x[2] = { a, b };
+#else
+ const ppc_avr_t *x[2] = { b, a };
+#endif
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ VECTOR_FOR_INORDER_I(j, u32) {
+ uint32_t e = x[i]->u32[j];
+
+ result.u16[4 * i + j] = (((e >> 9) & 0xfc00) |
+ ((e >> 6) & 0x3e0) |
+ ((e >> 3) & 0x1f));
+ }
+ }
+ *r = result;
+}
+
+#define VPK(suffix, from, to, cvt, dosat) \
+ void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ int sat = 0; \
+ ppc_avr_t result; \
+ ppc_avr_t *a0 = PKBIG ? a : b; \
+ ppc_avr_t *a1 = PKBIG ? b : a; \
+ \
+ VECTOR_FOR_INORDER_I(i, from) { \
+ result.to[i] = cvt(a0->from[i], &sat); \
+ result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\
+ } \
+ *r = result; \
+ if (dosat && sat) { \
+ set_vscr_sat(env); \
+ } \
+ }
+#define I(x, y) (x)
+VPK(shss, s16, s8, cvtshsb, 1)
+VPK(shus, s16, u8, cvtshub, 1)
+VPK(swss, s32, s16, cvtswsh, 1)
+VPK(swus, s32, u16, cvtswuh, 1)
+VPK(sdss, s64, s32, cvtsdsw, 1)
+VPK(sdus, s64, u32, cvtsduw, 1)
+VPK(uhus, u16, u8, cvtuhub, 1)
+VPK(uwus, u32, u16, cvtuwuh, 1)
+VPK(udus, u64, u32, cvtuduw, 1)
+VPK(uhum, u16, u8, I, 0)
+VPK(uwum, u32, u16, I, 0)
+VPK(udum, u64, u32, I, 0)
+#undef I
+#undef VPK
+#undef PKBIG
+
+void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
+ r->f32[i] = float32_div(float32_one, b->f32[i], &env->vec_status);
+ }
+}
+
+#define VRFI(suffix, rounding) \
+ void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *b) \
+ { \
+ int i; \
+ float_status s = env->vec_status; \
+ \
+ set_float_rounding_mode(rounding, &s); \
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) { \
+ r->f32[i] = float32_round_to_int (b->f32[i], &s); \
+ } \
+ }
+VRFI(n, float_round_nearest_even)
+VRFI(m, float_round_down)
+VRFI(p, float_round_up)
+VRFI(z, float_round_to_zero)
+#undef VRFI
+
+void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
+ float32 t = float32_sqrt(b->f32[i], &env->vec_status);
+
+ r->f32[i] = float32_div(float32_one, t, &env->vec_status);
+ }
+}
+
+#define VRLMI(name, size, element, insert) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint##size##_t src1 = a->element[i]; \
+ uint##size##_t src2 = b->element[i]; \
+ uint##size##_t src3 = r->element[i]; \
+ uint##size##_t begin, end, shift, mask, rot_val; \
+ \
+ shift = extract##size(src2, 0, 6); \
+ end = extract##size(src2, 8, 6); \
+ begin = extract##size(src2, 16, 6); \
+ rot_val = rol##size(src1, shift); \
+ mask = mask_u##size(begin, end); \
+ if (insert) { \
+ r->element[i] = (rot_val & mask) | (src3 & ~mask); \
+ } else { \
+ r->element[i] = (rot_val & mask); \
+ } \
+ } \
+}
+
+VRLMI(vrldmi, 64, u64, 1);
+VRLMI(vrlwmi, 32, u32, 1);
+VRLMI(vrldnm, 64, u64, 0);
+VRLMI(vrlwnm, 32, u32, 0);
+
+void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
+ r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
+}
+
+void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
+ r->f32[i] = float32_exp2(b->f32[i], &env->vec_status);
+ }
+}
+
+void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f32); i++) {
+ r->f32[i] = float32_log2(b->f32[i], &env->vec_status);
+ }
+}
+
+#define VEXTU_X_DO(name, size, left) \
+target_ulong glue(helper_, name)(target_ulong a, ppc_avr_t *b) \
+{ \
+ int index = (a & 0xf) * 8; \
+ if (left) { \
+ index = 128 - index - size; \
+ } \
+ return int128_getlo(int128_rshift(b->s128, index)) & \
+ MAKE_64BIT_MASK(0, size); \
+}
+VEXTU_X_DO(vextublx, 8, 1)
+VEXTU_X_DO(vextuhlx, 16, 1)
+VEXTU_X_DO(vextuwlx, 32, 1)
+VEXTU_X_DO(vextubrx, 8, 0)
+VEXTU_X_DO(vextuhrx, 16, 0)
+VEXTU_X_DO(vextuwrx, 32, 0)
+#undef VEXTU_X_DO
+
+void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ unsigned int shift, bytes, size;
+
+ size = ARRAY_SIZE(r->u8);
+ for (i = 0; i < size; i++) {
+ shift = b->VsrB(i) & 0x7; /* extract shift value */
+ bytes = (a->VsrB(i) << 8) + /* extract adjacent bytes */
+ (((i + 1) < size) ? a->VsrB(i + 1) : 0);
+ r->VsrB(i) = (bytes << shift) >> 8; /* shift and store result */
+ }
+}
+
+void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ unsigned int shift, bytes;
+
+ /*
+ * Use reverse order, as destination and source register can be
+ * same. Its being modified in place saving temporary, reverse
+ * order will guarantee that computed result is not fed back.
+ */
+ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
+ shift = b->VsrB(i) & 0x7; /* extract shift value */
+ bytes = ((i ? a->VsrB(i - 1) : 0) << 8) + a->VsrB(i);
+ /* extract adjacent bytes */
+ r->VsrB(i) = (bytes >> shift) & 0xFF; /* shift and store result */
+ }
+}
+
+void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
+{
+ int sh = shift & 0xf;
+ int i;
+ ppc_avr_t result;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int index = sh + i;
+ if (index > 0xf) {
+ result.VsrB(i) = b->VsrB(index - 0x10);
+ } else {
+ result.VsrB(i) = a->VsrB(index);
+ }
+ }
+ *r = result;
+}
+
+void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sh = (b->VsrB(0xf) >> 3) & 0xf;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ memmove(&r->u8[0], &a->u8[sh], 16 - sh);
+ memset(&r->u8[16 - sh], 0, sh);
+#else
+ memmove(&r->u8[sh], &a->u8[0], 16 - sh);
+ memset(&r->u8[0], 0, sh);
+#endif
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX])
+#else
+#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1)
+#endif
+
+#define VINSX(SUFFIX, TYPE) \
+void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \
+ uint64_t val, target_ulong index) \
+{ \
+ const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \
+ target_long idx = index; \
+ \
+ if (idx < 0 || idx > maxidx) { \
+ idx = idx < 0 ? sizeof(TYPE) - idx : idx; \
+ qemu_log_mask(LOG_GUEST_ERROR, \
+ "Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \
+ ", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \
+ } else { \
+ TYPE src = val; \
+ memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \
+ } \
+}
+VINSX(B, uint8_t)
+VINSX(H, uint16_t)
+VINSX(W, uint32_t)
+VINSX(D, uint64_t)
+#undef ELEM_ADDR
+#undef VINSX
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VEXTDVLX(NAME, SIZE) \
+void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ target_ulong index) \
+{ \
+ const target_long idx = index; \
+ ppc_avr_t tmp[2] = { *a, *b }; \
+ memset(t, 0, sizeof(*t)); \
+ if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
+ memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \
+ } else { \
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
+ TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
+ env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
+ } \
+}
+#else
+#define VEXTDVLX(NAME, SIZE) \
+void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ target_ulong index) \
+{ \
+ const target_long idx = index; \
+ ppc_avr_t tmp[2] = { *b, *a }; \
+ memset(t, 0, sizeof(*t)); \
+ if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
+ memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \
+ (void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \
+ } else { \
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
+ TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
+ env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
+ } \
+}
+#endif
+VEXTDVLX(VEXTDUBVLX, 1)
+VEXTDVLX(VEXTDUHVLX, 2)
+VEXTDVLX(VEXTDUWVLX, 4)
+VEXTDVLX(VEXTDDVLX, 8)
+#undef VEXTDVLX
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ memmove(&r->u8[8 - es], &b->u8[index], es); \
+ memset(&r->u8[8], 0, 8); \
+ memset(&r->u8[0], 0, 8 - es); \
+ }
+#else
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ uint32_t s = (16 - index) - es; \
+ memmove(&r->u8[8], &b->u8[s], es); \
+ memset(&r->u8[0], 0, 8); \
+ memset(&r->u8[8 + es], 0, 8 - es); \
+ }
+#endif
+VEXTRACT(ub, u8)
+VEXTRACT(uh, u16)
+VEXTRACT(uw, u32)
+VEXTRACT(d, u64)
+#undef VEXTRACT
+
+void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
+{
+ ppc_vsr_t t = { };
+ size_t es = sizeof(uint32_t);
+ uint32_t ext_index;
+ int i;
+
+ ext_index = index;
+ for (i = 0; i < es; i++, ext_index++) {
+ t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16);
+ }
+
+ *xt = t;
+}
+
+void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
+{
+ ppc_vsr_t t = *xt;
+ size_t es = sizeof(uint32_t);
+ int ins_index, i = 0;
+
+ ins_index = index;
+ for (i = 0; i < es && ins_index < 16; i++, ins_index++) {
+ t.VsrB(ins_index) = xb->VsrB(8 - es + i);
+ }
+
+ *xt = t;
+}
+
+#define XXBLEND(name, sz) \
+void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ ppc_avr_t *c, uint32_t desc) \
+{ \
+ for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \
+ t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \
+ b->glue(u, sz)[i] : a->glue(u, sz)[i]; \
+ } \
+}
+XXBLEND(B, 8)
+XXBLEND(H, 16)
+XXBLEND(W, 32)
+XXBLEND(D, 64)
+#undef XXBLEND
+
+#define VEXT_SIGNED(name, element, cast) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = (cast)b->element[i]; \
+ } \
+}
+VEXT_SIGNED(vextsb2w, s32, int8_t)
+VEXT_SIGNED(vextsb2d, s64, int8_t)
+VEXT_SIGNED(vextsh2w, s32, int16_t)
+VEXT_SIGNED(vextsh2d, s64, int16_t)
+VEXT_SIGNED(vextsw2d, s64, int32_t)
+#undef VEXT_SIGNED
+
+#define VNEG(name, element) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = -b->element[i]; \
+ } \
+}
+VNEG(vnegw, s32)
+VNEG(vnegd, s64)
+#undef VNEG
+
+void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sh = (b->VsrB(0xf) >> 3) & 0xf;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ memmove(&r->u8[sh], &a->u8[0], 16 - sh);
+ memset(&r->u8[0], 0, sh);
+#else
+ memmove(&r->u8[0], &a->u8[sh], 16 - sh);
+ memset(&r->u8[16 - sh], 0, sh);
+#endif
+}
+
+void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ r->u32[i] = a->u32[i] >= b->u32[i];
+ }
+}
+
+void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int64_t t;
+ int i, upper;
+ ppc_avr_t result;
+ int sat = 0;
+
+ upper = ARRAY_SIZE(r->s32) - 1;
+ t = (int64_t)b->VsrSW(upper);
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ t += a->VsrSW(i);
+ result.VsrSW(i) = 0;
+ }
+ result.VsrSW(upper) = cvtsdsw(t, &sat);
+ *r = result;
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j, upper;
+ ppc_avr_t result;
+ int sat = 0;
+
+ upper = 1;
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ int64_t t = (int64_t)b->VsrSW(upper + i * 2);
+
+ result.VsrD(i) = 0;
+ for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
+ t += a->VsrSW(2 * i + j);
+ }
+ result.VsrSW(upper + i * 2) = cvtsdsw(t, &sat);
+ }
+
+ *r = result;
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ int64_t t = (int64_t)b->s32[i];
+
+ for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
+ t += a->s8[4 * i + j];
+ }
+ r->s32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ int64_t t = (int64_t)b->s32[i];
+
+ t += a->s16[2 * i] + a->s16[2 * i + 1];
+ r->s32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ uint64_t t = (uint64_t)b->u32[i];
+
+ for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
+ t += a->u8[4 * i + j];
+ }
+ r->u32[i] = cvtuduw(t, &sat);
+ }
+
+ if (sat) {
+ set_vscr_sat(env);
+ }
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define UPKHI 1
+#define UPKLO 0
+#else
+#define UPKHI 0
+#define UPKLO 1
+#endif
+#define VUPKPX(suffix, hi) \
+ void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ ppc_avr_t result; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
+ uint16_t e = b->u16[hi ? i : i + 4]; \
+ uint8_t a = (e >> 15) ? 0xff : 0; \
+ uint8_t r = (e >> 10) & 0x1f; \
+ uint8_t g = (e >> 5) & 0x1f; \
+ uint8_t b = e & 0x1f; \
+ \
+ result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
+ } \
+ *r = result; \
+ }
+VUPKPX(lpx, UPKLO)
+VUPKPX(hpx, UPKHI)
+#undef VUPKPX
+
+#define VUPK(suffix, unpacked, packee, hi) \
+ void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ ppc_avr_t result; \
+ \
+ if (hi) { \
+ for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
+ result.unpacked[i] = b->packee[i]; \
+ } \
+ } else { \
+ for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
+ i++) { \
+ result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
+ } \
+ } \
+ *r = result; \
+ }
+VUPK(hsb, s16, s8, UPKHI)
+VUPK(hsh, s32, s16, UPKHI)
+VUPK(hsw, s64, s32, UPKHI)
+VUPK(lsb, s16, s8, UPKLO)
+VUPK(lsh, s32, s16, UPKLO)
+VUPK(lsw, s64, s32, UPKLO)
+#undef VUPK
+#undef UPKHI
+#undef UPKLO
+
+#define VGENERIC_DO(name, element) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = name(b->element[i]); \
+ } \
+ }
+
+#define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8)
+#define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16)
+
+VGENERIC_DO(clzb, u8)
+VGENERIC_DO(clzh, u16)
+
+#undef clzb
+#undef clzh
+
+#define ctzb(v) ((v) ? ctz32(v) : 8)
+#define ctzh(v) ((v) ? ctz32(v) : 16)
+#define ctzw(v) ctz32((v))
+#define ctzd(v) ctz64((v))
+
+VGENERIC_DO(ctzb, u8)
+VGENERIC_DO(ctzh, u16)
+VGENERIC_DO(ctzw, u32)
+VGENERIC_DO(ctzd, u64)
+
+#undef ctzb
+#undef ctzh
+#undef ctzw
+#undef ctzd
+
+#define popcntb(v) ctpop8(v)
+#define popcnth(v) ctpop16(v)
+#define popcntw(v) ctpop32(v)
+#define popcntd(v) ctpop64(v)
+
+VGENERIC_DO(popcntb, u8)
+VGENERIC_DO(popcnth, u16)
+VGENERIC_DO(popcntw, u32)
+VGENERIC_DO(popcntd, u64)
+
+#undef popcntb
+#undef popcnth
+#undef popcntw
+#undef popcntd
+
+#undef VGENERIC_DO
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define QW_ONE { .u64 = { 0, 1 } }
+#else
+#define QW_ONE { .u64 = { 1, 0 } }
+#endif
+
+#ifndef CONFIG_INT128
+
+static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a)
+{
+ t->u64[0] = ~a.u64[0];
+ t->u64[1] = ~a.u64[1];
+}
+
+static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b)
+{
+ if (a.VsrD(0) < b.VsrD(0)) {
+ return -1;
+ } else if (a.VsrD(0) > b.VsrD(0)) {
+ return 1;
+ } else if (a.VsrD(1) < b.VsrD(1)) {
+ return -1;
+ } else if (a.VsrD(1) > b.VsrD(1)) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
+{
+ t->VsrD(1) = a.VsrD(1) + b.VsrD(1);
+ t->VsrD(0) = a.VsrD(0) + b.VsrD(0) +
+ (~a.VsrD(1) < b.VsrD(1));
+}
+
+static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
+{
+ ppc_avr_t not_a;
+ t->VsrD(1) = a.VsrD(1) + b.VsrD(1);
+ t->VsrD(0) = a.VsrD(0) + b.VsrD(0) +
+ (~a.VsrD(1) < b.VsrD(1));
+ avr_qw_not(&not_a, a);
+ return avr_qw_cmpu(not_a, b) < 0;
+}
+
+#endif
+
+void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 + b->u128;
+#else
+ avr_qw_add(r, *a, *b);
+#endif
+}
+
+void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 + b->u128 + (c->u128 & 1);
+#else
+
+ if (c->VsrD(1) & 1) {
+ ppc_avr_t tmp;
+
+ tmp.VsrD(0) = 0;
+ tmp.VsrD(1) = c->VsrD(1) & 1;
+ avr_qw_add(&tmp, *a, tmp);
+ avr_qw_add(r, tmp, *b);
+ } else {
+ avr_qw_add(r, *a, *b);
+ }
+#endif
+}
+
+void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = (~a->u128 < b->u128);
+#else
+ ppc_avr_t not_a;
+
+ avr_qw_not(&not_a, *a);
+
+ r->VsrD(0) = 0;
+ r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0);
+#endif
+}
+
+void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ int carry_out = (~a->u128 < b->u128);
+ if (!carry_out && (c->u128 & 1)) {
+ carry_out = ((a->u128 + b->u128 + 1) == 0) &&
+ ((a->u128 != 0) || (b->u128 != 0));
+ }
+ r->u128 = carry_out;
+#else
+
+ int carry_in = c->VsrD(1) & 1;
+ int carry_out = 0;
+ ppc_avr_t tmp;
+
+ carry_out = avr_qw_addc(&tmp, *a, *b);
+
+ if (!carry_out && carry_in) {
+ ppc_avr_t one = QW_ONE;
+ carry_out = avr_qw_addc(&tmp, tmp, one);
+ }
+ r->VsrD(0) = 0;
+ r->VsrD(1) = carry_out;
+#endif
+}
+
+void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 - b->u128;
+#else
+ ppc_avr_t tmp;
+ ppc_avr_t one = QW_ONE;
+
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&tmp, *a, tmp);
+ avr_qw_add(r, tmp, one);
+#endif
+}
+
+void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 + ~b->u128 + (c->u128 & 1);
+#else
+ ppc_avr_t tmp, sum;
+
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&sum, *a, tmp);
+
+ tmp.VsrD(0) = 0;
+ tmp.VsrD(1) = c->VsrD(1) & 1;
+ avr_qw_add(r, sum, tmp);
+#endif
+}
+
+void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = (~a->u128 < ~b->u128) ||
+ (a->u128 + ~b->u128 == (__uint128_t)-1);
+#else
+ int carry = (avr_qw_cmpu(*a, *b) > 0);
+ if (!carry) {
+ ppc_avr_t tmp;
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&tmp, *a, tmp);
+ carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull));
+ }
+ r->VsrD(0) = 0;
+ r->VsrD(1) = carry;
+#endif
+}
+
+void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ r->u128 =
+ (~a->u128 < ~b->u128) ||
+ ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1));
+#else
+ int carry_in = c->VsrD(1) & 1;
+ int carry_out = (avr_qw_cmpu(*a, *b) > 0);
+ if (!carry_out && carry_in) {
+ ppc_avr_t tmp;
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&tmp, *a, tmp);
+ carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull));
+ }
+
+ r->VsrD(0) = 0;
+ r->VsrD(1) = carry_out;
+#endif
+}
+
+#define BCD_PLUS_PREF_1 0xC
+#define BCD_PLUS_PREF_2 0xF
+#define BCD_PLUS_ALT_1 0xA
+#define BCD_NEG_PREF 0xD
+#define BCD_NEG_ALT 0xB
+#define BCD_PLUS_ALT_2 0xE
+#define NATIONAL_PLUS 0x2B
+#define NATIONAL_NEG 0x2D
+
+#define BCD_DIG_BYTE(n) (15 - ((n) / 2))
+
+static int bcd_get_sgn(ppc_avr_t *bcd)
+{
+ switch (bcd->VsrB(BCD_DIG_BYTE(0)) & 0xF) {
+ case BCD_PLUS_PREF_1:
+ case BCD_PLUS_PREF_2:
+ case BCD_PLUS_ALT_1:
+ case BCD_PLUS_ALT_2:
+ {
+ return 1;
+ }
+
+ case BCD_NEG_PREF:
+ case BCD_NEG_ALT:
+ {
+ return -1;
+ }
+
+ default:
+ {
+ return 0;
+ }
+ }
+}
+
+static int bcd_preferred_sgn(int sgn, int ps)
+{
+ if (sgn >= 0) {
+ return (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2;
+ } else {
+ return BCD_NEG_PREF;
+ }
+}
+
+static uint8_t bcd_get_digit(ppc_avr_t *bcd, int n, int *invalid)
+{
+ uint8_t result;
+ if (n & 1) {
+ result = bcd->VsrB(BCD_DIG_BYTE(n)) >> 4;
+ } else {
+ result = bcd->VsrB(BCD_DIG_BYTE(n)) & 0xF;
+ }
+
+ if (unlikely(result > 9)) {
+ *invalid = true;
+ }
+ return result;
+}
+
+static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
+{
+ if (n & 1) {
+ bcd->VsrB(BCD_DIG_BYTE(n)) &= 0x0F;
+ bcd->VsrB(BCD_DIG_BYTE(n)) |= (digit << 4);
+ } else {
+ bcd->VsrB(BCD_DIG_BYTE(n)) &= 0xF0;
+ bcd->VsrB(BCD_DIG_BYTE(n)) |= digit;
+ }
+}
+
+static bool bcd_is_valid(ppc_avr_t *bcd)
+{
+ int i;
+ int invalid = 0;
+
+ if (bcd_get_sgn(bcd) == 0) {
+ return false;
+ }
+
+ for (i = 1; i < 32; i++) {
+ bcd_get_digit(bcd, i, &invalid);
+ if (unlikely(invalid)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static int bcd_cmp_zero(ppc_avr_t *bcd)
+{
+ if (bcd->VsrD(0) == 0 && (bcd->VsrD(1) >> 4) == 0) {
+ return CRF_EQ;
+ } else {
+ return (bcd_get_sgn(bcd) == 1) ? CRF_GT : CRF_LT;
+ }
+}
+
+static uint16_t get_national_digit(ppc_avr_t *reg, int n)
+{
+ return reg->VsrH(7 - n);
+}
+
+static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n)
+{
+ reg->VsrH(7 - n) = val;
+}
+
+static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ int invalid = 0;
+ for (i = 31; i > 0; i--) {
+ uint8_t dig_a = bcd_get_digit(a, i, &invalid);
+ uint8_t dig_b = bcd_get_digit(b, i, &invalid);
+ if (unlikely(invalid)) {
+ return 0; /* doesn't matter */
+ } else if (dig_a > dig_b) {
+ return 1;
+ } else if (dig_a < dig_b) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int bcd_add_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
+ int *overflow)
+{
+ int carry = 0;
+ int i;
+ int is_zero = 1;
+
+ for (i = 1; i <= 31; i++) {
+ uint8_t digit = bcd_get_digit(a, i, invalid) +
+ bcd_get_digit(b, i, invalid) + carry;
+ is_zero &= (digit == 0);
+ if (digit > 9) {
+ carry = 1;
+ digit -= 10;
+ } else {
+ carry = 0;
+ }
+
+ bcd_put_digit(t, digit, i);
+ }
+
+ *overflow = carry;
+ return is_zero;
+}
+
+static void bcd_sub_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
+ int *overflow)
+{
+ int carry = 0;
+ int i;
+
+ for (i = 1; i <= 31; i++) {
+ uint8_t digit = bcd_get_digit(a, i, invalid) -
+ bcd_get_digit(b, i, invalid) + carry;
+ if (digit & 0x80) {
+ carry = -1;
+ digit += 10;
+ } else {
+ carry = 0;
+ }
+
+ bcd_put_digit(t, digit, i);
+ }
+
+ *overflow = carry;
+}
+
+uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+
+ int sgna = bcd_get_sgn(a);
+ int sgnb = bcd_get_sgn(b);
+ int invalid = (sgna == 0) || (sgnb == 0);
+ int overflow = 0;
+ int zero = 0;
+ uint32_t cr = 0;
+ ppc_avr_t result = { .u64 = { 0, 0 } };
+
+ if (!invalid) {
+ if (sgna == sgnb) {
+ result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps);
+ zero = bcd_add_mag(&result, a, b, &invalid, &overflow);
+ cr = (sgna > 0) ? CRF_GT : CRF_LT;
+ } else {
+ int magnitude = bcd_cmp_mag(a, b);
+ if (magnitude > 0) {
+ result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgna, ps);
+ bcd_sub_mag(&result, a, b, &invalid, &overflow);
+ cr = (sgna > 0) ? CRF_GT : CRF_LT;
+ } else if (magnitude < 0) {
+ result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(sgnb, ps);
+ bcd_sub_mag(&result, b, a, &invalid, &overflow);
+ cr = (sgnb > 0) ? CRF_GT : CRF_LT;
+ } else {
+ result.VsrB(BCD_DIG_BYTE(0)) = bcd_preferred_sgn(0, ps);
+ cr = CRF_EQ;
+ }
+ }
+ }
+
+ if (unlikely(invalid)) {
+ result.VsrD(0) = result.VsrD(1) = -1;
+ cr = CRF_SO;
+ } else if (overflow) {
+ cr |= CRF_SO;
+ } else if (zero) {
+ cr |= CRF_EQ;
+ }
+
+ *r = result;
+
+ return cr;
+}
+
+uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ ppc_avr_t bcopy = *b;
+ int sgnb = bcd_get_sgn(b);
+ if (sgnb < 0) {
+ bcd_put_digit(&bcopy, BCD_PLUS_PREF_1, 0);
+ } else if (sgnb > 0) {
+ bcd_put_digit(&bcopy, BCD_NEG_PREF, 0);
+ }
+ /* else invalid ... defer to bcdadd code for proper handling */
+
+ return helper_bcdadd(r, a, &bcopy, ps);
+}
+
+uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ uint16_t national = 0;
+ uint16_t sgnb = get_national_digit(b, 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+ int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG);
+
+ for (i = 1; i < 8; i++) {
+ national = get_national_digit(b, i);
+ if (unlikely(national < 0x30 || national > 0x39)) {
+ invalid = 1;
+ break;
+ }
+
+ bcd_put_digit(&ret, national & 0xf, i);
+ }
+
+ if (sgnb == NATIONAL_PLUS) {
+ bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0);
+ } else {
+ bcd_put_digit(&ret, BCD_NEG_PREF, 0);
+ }
+
+ cr = bcd_cmp_zero(&ret);
+
+ if (unlikely(invalid)) {
+ cr = CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ int sgnb = bcd_get_sgn(b);
+ int invalid = (sgnb == 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ int ox_flag = (b->VsrD(0) != 0) || ((b->VsrD(1) >> 32) != 0);
+
+ for (i = 1; i < 8; i++) {
+ set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i);
+
+ if (unlikely(invalid)) {
+ break;
+ }
+ }
+ set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0);
+
+ cr = bcd_cmp_zero(b);
+
+ if (ox_flag) {
+ cr |= CRF_SO;
+ }
+
+ if (unlikely(invalid)) {
+ cr = CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ int invalid = 0;
+ int zone_digit = 0;
+ int zone_lead = ps ? 0xF : 0x3;
+ int digit = 0;
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+ int sgnb = b->VsrB(BCD_DIG_BYTE(0)) >> 4;
+
+ if (unlikely((sgnb < 0xA) && ps)) {
+ invalid = 1;
+ }
+
+ for (i = 0; i < 16; i++) {
+ zone_digit = i ? b->VsrB(BCD_DIG_BYTE(i * 2)) >> 4 : zone_lead;
+ digit = b->VsrB(BCD_DIG_BYTE(i * 2)) & 0xF;
+ if (unlikely(zone_digit != zone_lead || digit > 0x9)) {
+ invalid = 1;
+ break;
+ }
+
+ bcd_put_digit(&ret, digit, i + 1);
+ }
+
+ if ((ps && (sgnb == 0xB || sgnb == 0xD)) ||
+ (!ps && (sgnb & 0x4))) {
+ bcd_put_digit(&ret, BCD_NEG_PREF, 0);
+ } else {
+ bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0);
+ }
+
+ cr = bcd_cmp_zero(&ret);
+
+ if (unlikely(invalid)) {
+ cr = CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ uint8_t digit = 0;
+ int sgnb = bcd_get_sgn(b);
+ int zone_lead = (ps) ? 0xF0 : 0x30;
+ int invalid = (sgnb == 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ int ox_flag = ((b->VsrD(0) >> 4) != 0);
+
+ for (i = 0; i < 16; i++) {
+ digit = bcd_get_digit(b, i + 1, &invalid);
+
+ if (unlikely(invalid)) {
+ break;
+ }
+
+ ret.VsrB(BCD_DIG_BYTE(i * 2)) = zone_lead + digit;
+ }
+
+ if (ps) {
+ bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1);
+ } else {
+ bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1);
+ }
+
+ cr = bcd_cmp_zero(b);
+
+ if (ox_flag) {
+ cr |= CRF_SO;
+ }
+
+ if (unlikely(invalid)) {
+ cr = CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+/**
+ * Compare 2 128-bit unsigned integers, passed in as unsigned 64-bit pairs
+ *
+ * Returns:
+ * > 0 if ahi|alo > bhi|blo,
+ * 0 if ahi|alo == bhi|blo,
+ * < 0 if ahi|alo < bhi|blo
+ */
+static inline int ucmp128(uint64_t alo, uint64_t ahi,
+ uint64_t blo, uint64_t bhi)
+{
+ return (ahi == bhi) ?
+ (alo > blo ? 1 : (alo == blo ? 0 : -1)) :
+ (ahi > bhi ? 1 : -1);
+}
+
+uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr;
+ uint64_t lo_value;
+ uint64_t hi_value;
+ uint64_t rem;
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ if (b->VsrSD(0) < 0) {
+ lo_value = -b->VsrSD(1);
+ hi_value = ~b->VsrD(0) + !lo_value;
+ bcd_put_digit(&ret, 0xD, 0);
+
+ cr = CRF_LT;
+ } else {
+ lo_value = b->VsrD(1);
+ hi_value = b->VsrD(0);
+ bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0);
+
+ if (hi_value == 0 && lo_value == 0) {
+ cr = CRF_EQ;
+ } else {
+ cr = CRF_GT;
+ }
+ }
+
+ /*
+ * Check src limits: abs(src) <= 10^31 - 1
+ *
+ * 10^31 - 1 = 0x0000007e37be2022 c0914b267fffffff
+ */
+ if (ucmp128(lo_value, hi_value,
+ 0xc0914b267fffffffULL, 0x7e37be2022ULL) > 0) {
+ cr |= CRF_SO;
+
+ /*
+ * According to the ISA, if src wouldn't fit in the destination
+ * register, the result is undefined.
+ * In that case, we leave r unchanged.
+ */
+ } else {
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
+
+ for (i = 1; i < 16; rem /= 10, i++) {
+ bcd_put_digit(&ret, rem % 10, i);
+ }
+
+ for (; i < 32; lo_value /= 10, i++) {
+ bcd_put_digit(&ret, lo_value % 10, i);
+ }
+
+ *r = ret;
+ }
+
+ return cr;
+}
+
+uint32_t helper_bcdctsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ uint8_t i;
+ int cr;
+ uint64_t carry;
+ uint64_t unused;
+ uint64_t lo_value;
+ uint64_t hi_value = 0;
+ int sgnb = bcd_get_sgn(b);
+ int invalid = (sgnb == 0);
+
+ lo_value = bcd_get_digit(b, 31, &invalid);
+ for (i = 30; i > 0; i--) {
+ mulu64(&lo_value, &carry, lo_value, 10ULL);
+ mulu64(&hi_value, &unused, hi_value, 10ULL);
+ lo_value += bcd_get_digit(b, i, &invalid);
+ hi_value += carry;
+
+ if (unlikely(invalid)) {
+ break;
+ }
+ }
+
+ if (sgnb == -1) {
+ r->VsrSD(1) = -lo_value;
+ r->VsrSD(0) = ~hi_value + !r->VsrSD(1);
+ } else {
+ r->VsrSD(1) = lo_value;
+ r->VsrSD(0) = hi_value;
+ }
+
+ cr = bcd_cmp_zero(b);
+
+ if (unlikely(invalid)) {
+ cr = CRF_SO;
+ }
+
+ return cr;
+}
+
+uint32_t helper_bcdcpsgn(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int invalid = 0;
+
+ if (bcd_get_sgn(a) == 0 || bcd_get_sgn(b) == 0) {
+ return CRF_SO;
+ }
+
+ *r = *a;
+ bcd_put_digit(r, b->VsrB(BCD_DIG_BYTE(0)) & 0xF, 0);
+
+ for (i = 1; i < 32; i++) {
+ bcd_get_digit(a, i, &invalid);
+ bcd_get_digit(b, i, &invalid);
+ if (unlikely(invalid)) {
+ return CRF_SO;
+ }
+ }
+
+ return bcd_cmp_zero(r);
+}
+
+uint32_t helper_bcdsetsgn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int sgnb = bcd_get_sgn(b);
+
+ *r = *b;
+ bcd_put_digit(r, bcd_preferred_sgn(sgnb, ps), 0);
+
+ if (bcd_is_valid(b) == false) {
+ return CRF_SO;
+ }
+
+ return bcd_cmp_zero(r);
+}
+
+uint32_t helper_bcds(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ int cr;
+ int i = a->VsrSB(7);
+ bool ox_flag = false;
+ int sgnb = bcd_get_sgn(b);
+ ppc_avr_t ret = *b;
+ ret.VsrD(1) &= ~0xf;
+
+ if (bcd_is_valid(b) == false) {
+ return CRF_SO;
+ }
+
+ if (unlikely(i > 31)) {
+ i = 31;
+ } else if (unlikely(i < -31)) {
+ i = -31;
+ }
+
+ if (i > 0) {
+ ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
+ } else {
+ urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
+ }
+ bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0);
+
+ *r = ret;
+
+ cr = bcd_cmp_zero(r);
+ if (ox_flag) {
+ cr |= CRF_SO;
+ }
+
+ return cr;
+}
+
+uint32_t helper_bcdus(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ int cr;
+ int i;
+ int invalid = 0;
+ bool ox_flag = false;
+ ppc_avr_t ret = *b;
+
+ for (i = 0; i < 32; i++) {
+ bcd_get_digit(b, i, &invalid);
+
+ if (unlikely(invalid)) {
+ return CRF_SO;
+ }
+ }
+
+ i = a->VsrSB(7);
+ if (i >= 32) {
+ ox_flag = true;
+ ret.VsrD(1) = ret.VsrD(0) = 0;
+ } else if (i <= -32) {
+ ret.VsrD(1) = ret.VsrD(0) = 0;
+ } else if (i > 0) {
+ ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
+ } else {
+ urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
+ }
+ *r = ret;
+
+ cr = bcd_cmp_zero(r);
+ if (ox_flag) {
+ cr |= CRF_SO;
+ }
+
+ return cr;
+}
+
+uint32_t helper_bcdsr(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ int cr;
+ int unused = 0;
+ int invalid = 0;
+ bool ox_flag = false;
+ int sgnb = bcd_get_sgn(b);
+ ppc_avr_t ret = *b;
+ ret.VsrD(1) &= ~0xf;
+
+ int i = a->VsrSB(7);
+ ppc_avr_t bcd_one;
+
+ bcd_one.VsrD(0) = 0;
+ bcd_one.VsrD(1) = 0x10;
+
+ if (bcd_is_valid(b) == false) {
+ return CRF_SO;
+ }
+
+ if (unlikely(i > 31)) {
+ i = 31;
+ } else if (unlikely(i < -31)) {
+ i = -31;
+ }
+
+ if (i > 0) {
+ ulshift(&ret.VsrD(1), &ret.VsrD(0), i * 4, &ox_flag);
+ } else {
+ urshift(&ret.VsrD(1), &ret.VsrD(0), -i * 4);
+
+ if (bcd_get_digit(&ret, 0, &invalid) >= 5) {
+ bcd_add_mag(&ret, &ret, &bcd_one, &invalid, &unused);
+ }
+ }
+ bcd_put_digit(&ret, bcd_preferred_sgn(sgnb, ps), 0);
+
+ cr = bcd_cmp_zero(&ret);
+ if (ox_flag) {
+ cr |= CRF_SO;
+ }
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdtrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ uint64_t mask;
+ uint32_t ox_flag = 0;
+ int i = a->VsrSH(3) + 1;
+ ppc_avr_t ret = *b;
+
+ if (bcd_is_valid(b) == false) {
+ return CRF_SO;
+ }
+
+ if (i > 16 && i < 32) {
+ mask = (uint64_t)-1 >> (128 - i * 4);
+ if (ret.VsrD(0) & ~mask) {
+ ox_flag = CRF_SO;
+ }
+
+ ret.VsrD(0) &= mask;
+ } else if (i >= 0 && i <= 16) {
+ mask = (uint64_t)-1 >> (64 - i * 4);
+ if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) {
+ ox_flag = CRF_SO;
+ }
+
+ ret.VsrD(1) &= mask;
+ ret.VsrD(0) = 0;
+ }
+ bcd_put_digit(&ret, bcd_preferred_sgn(bcd_get_sgn(b), ps), 0);
+ *r = ret;
+
+ return bcd_cmp_zero(&ret) | ox_flag;
+}
+
+uint32_t helper_bcdutrunc(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ uint64_t mask;
+ uint32_t ox_flag = 0;
+ int invalid = 0;
+ ppc_avr_t ret = *b;
+
+ for (i = 0; i < 32; i++) {
+ bcd_get_digit(b, i, &invalid);
+
+ if (unlikely(invalid)) {
+ return CRF_SO;
+ }
+ }
+
+ i = a->VsrSH(3);
+ if (i > 16 && i < 33) {
+ mask = (uint64_t)-1 >> (128 - i * 4);
+ if (ret.VsrD(0) & ~mask) {
+ ox_flag = CRF_SO;
+ }
+
+ ret.VsrD(0) &= mask;
+ } else if (i > 0 && i <= 16) {
+ mask = (uint64_t)-1 >> (64 - i * 4);
+ if (ret.VsrD(0) || (ret.VsrD(1) & ~mask)) {
+ ox_flag = CRF_SO;
+ }
+
+ ret.VsrD(1) &= mask;
+ ret.VsrD(0) = 0;
+ } else if (i == 0) {
+ if (ret.VsrD(0) || ret.VsrD(1)) {
+ ox_flag = CRF_SO;
+ }
+ ret.VsrD(0) = ret.VsrD(1) = 0;
+ }
+
+ *r = ret;
+ if (r->VsrD(0) == 0 && r->VsrD(1) == 0) {
+ return ox_flag | CRF_EQ;
+ }
+
+ return ox_flag | CRF_GT;
+}
+
+void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
+{
+ int i;
+ VECTOR_FOR_INORDER_I(i, u8) {
+ r->u8[i] = AES_sbox[a->u8[i]];
+ }
+}
+
+void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ result.VsrW(i) = b->VsrW(i) ^
+ (AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^
+ AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^
+ AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^
+ AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]);
+ }
+ *r = result;
+}
+
+void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]);
+ }
+ *r = result;
+}
+
+void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ /* This differs from what is written in ISA V2.07. The RTL is */
+ /* incorrect and will be fixed in V2.07B. */
+ int i;
+ ppc_avr_t tmp;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->VsrW(i) =
+ AES_imc[tmp.VsrB(4 * i + 0)][0] ^
+ AES_imc[tmp.VsrB(4 * i + 1)][1] ^
+ AES_imc[tmp.VsrB(4 * i + 2)][2] ^
+ AES_imc[tmp.VsrB(4 * i + 3)][3];
+ }
+}
+
+void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]);
+ }
+ *r = result;
+}
+
+void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
+{
+ int st = (st_six & 0x10) != 0;
+ int six = st_six & 0xF;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ if (st == 0) {
+ if ((six & (0x8 >> i)) == 0) {
+ r->VsrW(i) = ror32(a->VsrW(i), 7) ^
+ ror32(a->VsrW(i), 18) ^
+ (a->VsrW(i) >> 3);
+ } else { /* six.bit[i] == 1 */
+ r->VsrW(i) = ror32(a->VsrW(i), 17) ^
+ ror32(a->VsrW(i), 19) ^
+ (a->VsrW(i) >> 10);
+ }
+ } else { /* st == 1 */
+ if ((six & (0x8 >> i)) == 0) {
+ r->VsrW(i) = ror32(a->VsrW(i), 2) ^
+ ror32(a->VsrW(i), 13) ^
+ ror32(a->VsrW(i), 22);
+ } else { /* six.bit[i] == 1 */
+ r->VsrW(i) = ror32(a->VsrW(i), 6) ^
+ ror32(a->VsrW(i), 11) ^
+ ror32(a->VsrW(i), 25);
+ }
+ }
+ }
+}
+
+void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
+{
+ int st = (st_six & 0x10) != 0;
+ int six = st_six & 0xF;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ if (st == 0) {
+ if ((six & (0x8 >> (2 * i))) == 0) {
+ r->VsrD(i) = ror64(a->VsrD(i), 1) ^
+ ror64(a->VsrD(i), 8) ^
+ (a->VsrD(i) >> 7);
+ } else { /* six.bit[2*i] == 1 */
+ r->VsrD(i) = ror64(a->VsrD(i), 19) ^
+ ror64(a->VsrD(i), 61) ^
+ (a->VsrD(i) >> 6);
+ }
+ } else { /* st == 1 */
+ if ((six & (0x8 >> (2 * i))) == 0) {
+ r->VsrD(i) = ror64(a->VsrD(i), 28) ^
+ ror64(a->VsrD(i), 34) ^
+ ror64(a->VsrD(i), 39);
+ } else { /* six.bit[2*i] == 1 */
+ r->VsrD(i) = ror64(a->VsrD(i), 14) ^
+ ror64(a->VsrD(i), 18) ^
+ ror64(a->VsrD(i), 41);
+ }
+ }
+ }
+}
+
+void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int indexA = c->VsrB(i) >> 4;
+ int indexB = c->VsrB(i) & 0xF;
+
+ result.VsrB(i) = a->VsrB(indexA) ^ b->VsrB(indexB);
+ }
+ *r = result;
+}
+
+#undef VECTOR_FOR_INORDER_I
+
+/*****************************************************************************/
+/* SPE extension helpers */
+/* Use a table to make this quicker */
+static const uint8_t hbrev[16] = {
+ 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
+ 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
+};
+
+static inline uint8_t byte_reverse(uint8_t val)
+{
+ return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
+}
+
+static inline uint32_t word_reverse(uint32_t val)
+{
+ return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
+ (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
+}
+
+#define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
+target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
+{
+ uint32_t a, b, d, mask;
+
+ mask = UINT32_MAX >> (32 - MASKBITS);
+ a = arg1 & mask;
+ b = arg2 & mask;
+ d = word_reverse(1 + word_reverse(a | ~b));
+ return (arg1 & ~mask) | (d & b);
+}
+
+uint32_t helper_cntlsw32(uint32_t val)
+{
+ if (val & 0x80000000) {
+ return clz32(~val);
+ } else {
+ return clz32(val);
+ }
+}
+
+uint32_t helper_cntlzw32(uint32_t val)
+{
+ return clz32(val);
+}
+
+/* 440 specific */
+target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
+ target_ulong low, uint32_t update_Rc)
+{
+ target_ulong mask;
+ int i;
+
+ i = 1;
+ for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
+ if ((high & mask) == 0) {
+ if (update_Rc) {
+ env->crf[0] = 0x4;
+ }
+ goto done;
+ }
+ i++;
+ }
+ for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
+ if ((low & mask) == 0) {
+ if (update_Rc) {
+ env->crf[0] = 0x8;
+ }
+ goto done;
+ }
+ i++;
+ }
+ i = 8;
+ if (update_Rc) {
+ env->crf[0] = 0x2;
+ }
+ done:
+ env->xer = (env->xer & ~0x7F) | i;
+ if (update_Rc) {
+ env->crf[0] |= xer_so;
+ }
+ return i;
+}
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
new file mode 100644
index 000000000..6aa9484f3
--- /dev/null
+++ b/target/ppc/internal.h
@@ -0,0 +1,294 @@
+/*
+ * PowerPC internal definitions for qemu.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_INTERNAL_H
+#define PPC_INTERNAL_H
+
+#define FUNC_MASK(name, ret_type, size, max_val) \
+static inline ret_type name(uint##size##_t start, \
+ uint##size##_t end) \
+{ \
+ ret_type ret, max_bit = size - 1; \
+ \
+ if (likely(start == 0)) { \
+ ret = max_val << (max_bit - end); \
+ } else if (likely(end == max_bit)) { \
+ ret = max_val >> start; \
+ } else { \
+ ret = (((uint##size##_t)(-1ULL)) >> (start)) ^ \
+ (((uint##size##_t)(-1ULL) >> (end)) >> 1); \
+ if (unlikely(start > end)) { \
+ return ~ret; \
+ } \
+ } \
+ \
+ return ret; \
+}
+
+#if defined(TARGET_PPC64)
+FUNC_MASK(MASK, target_ulong, 64, UINT64_MAX);
+#else
+FUNC_MASK(MASK, target_ulong, 32, UINT32_MAX);
+#endif
+FUNC_MASK(mask_u32, uint32_t, 32, UINT32_MAX);
+FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX);
+
+/*****************************************************************************/
+/*** Instruction decoding ***/
+#define EXTRACT_HELPER(name, shift, nb) \
+static inline uint32_t name(uint32_t opcode) \
+{ \
+ return extract32(opcode, shift, nb); \
+}
+
+#define EXTRACT_SHELPER(name, shift, nb) \
+static inline int32_t name(uint32_t opcode) \
+{ \
+ return sextract32(opcode, shift, nb); \
+}
+
+#define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
+static inline uint32_t name(uint32_t opcode) \
+{ \
+ return extract32(opcode, shift1, nb1) << nb2 | \
+ extract32(opcode, shift2, nb2); \
+}
+
+#define EXTRACT_HELPER_SPLIT_3(name, \
+ d0_bits, shift_op_d0, shift_d0, \
+ d1_bits, shift_op_d1, shift_d1, \
+ d2_bits, shift_op_d2, shift_d2) \
+static inline int16_t name(uint32_t opcode) \
+{ \
+ return \
+ (((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \
+ (((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \
+ (((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \
+}
+
+
+/* Opcode part 1 */
+EXTRACT_HELPER(opc1, 26, 6);
+/* Opcode part 2 */
+EXTRACT_HELPER(opc2, 1, 5);
+/* Opcode part 3 */
+EXTRACT_HELPER(opc3, 6, 5);
+/* Opcode part 4 */
+EXTRACT_HELPER(opc4, 16, 5);
+/* Update Cr0 flags */
+EXTRACT_HELPER(Rc, 0, 1);
+/* Update Cr6 flags (Altivec) */
+EXTRACT_HELPER(Rc21, 10, 1);
+/* Destination */
+EXTRACT_HELPER(rD, 21, 5);
+/* Source */
+EXTRACT_HELPER(rS, 21, 5);
+/* First operand */
+EXTRACT_HELPER(rA, 16, 5);
+/* Second operand */
+EXTRACT_HELPER(rB, 11, 5);
+/* Third operand */
+EXTRACT_HELPER(rC, 6, 5);
+/*** Get CRn ***/
+EXTRACT_HELPER(crfD, 23, 3);
+EXTRACT_HELPER(BF, 23, 3);
+EXTRACT_HELPER(crfS, 18, 3);
+EXTRACT_HELPER(crbD, 21, 5);
+EXTRACT_HELPER(crbA, 16, 5);
+EXTRACT_HELPER(crbB, 11, 5);
+/* SPR / TBL */
+EXTRACT_HELPER(_SPR, 11, 10);
+static inline uint32_t SPR(uint32_t opcode)
+{
+ uint32_t sprn = _SPR(opcode);
+
+ return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
+}
+/*** Get constants ***/
+/* 16 bits signed immediate value */
+EXTRACT_SHELPER(SIMM, 0, 16);
+/* 16 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM, 0, 16);
+/* 5 bits signed immediate value */
+EXTRACT_SHELPER(SIMM5, 16, 5);
+/* 5 bits signed immediate value */
+EXTRACT_HELPER(UIMM5, 16, 5);
+/* 4 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM4, 16, 4);
+/* Bit count */
+EXTRACT_HELPER(NB, 11, 5);
+/* Shift count */
+EXTRACT_HELPER(SH, 11, 5);
+/* lwat/stwat/ldat/lwat */
+EXTRACT_HELPER(FC, 11, 5);
+/* Vector shift count */
+EXTRACT_HELPER(VSH, 6, 4);
+/* Mask start */
+EXTRACT_HELPER(MB, 6, 5);
+/* Mask end */
+EXTRACT_HELPER(ME, 1, 5);
+/* Trap operand */
+EXTRACT_HELPER(TO, 21, 5);
+
+EXTRACT_HELPER(CRM, 12, 8);
+
+#ifndef CONFIG_USER_ONLY
+EXTRACT_HELPER(SR, 16, 4);
+#endif
+
+/* mtfsf/mtfsfi */
+EXTRACT_HELPER(FPBF, 23, 3);
+EXTRACT_HELPER(FPIMM, 12, 4);
+EXTRACT_HELPER(FPL, 25, 1);
+EXTRACT_HELPER(FPFLM, 17, 8);
+EXTRACT_HELPER(FPW, 16, 1);
+
+/* mffscrni */
+EXTRACT_HELPER(RM, 11, 2);
+
+/* addpcis */
+EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
+#if defined(TARGET_PPC64)
+/* darn */
+EXTRACT_HELPER(L, 16, 2);
+#endif
+
+/*** Jump target decoding ***/
+/* Immediate address */
+static inline target_ulong LI(uint32_t opcode)
+{
+ return (opcode >> 0) & 0x03FFFFFC;
+}
+
+static inline uint32_t BD(uint32_t opcode)
+{
+ return (opcode >> 0) & 0xFFFC;
+}
+
+EXTRACT_HELPER(BO, 21, 5);
+EXTRACT_HELPER(BI, 16, 5);
+/* Absolute/relative address */
+EXTRACT_HELPER(AA, 1, 1);
+/* Link */
+EXTRACT_HELPER(LK, 0, 1);
+
+/* DFP Z22-form */
+EXTRACT_HELPER(DCM, 10, 6)
+
+/* DFP Z23-form */
+EXTRACT_HELPER(RMC, 9, 2)
+EXTRACT_HELPER(Rrm, 16, 1)
+
+EXTRACT_HELPER_SPLIT(DQxT, 3, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
+EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
+EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
+EXTRACT_HELPER(DM, 8, 2);
+EXTRACT_HELPER(UIM, 16, 2);
+EXTRACT_HELPER(SHW, 8, 2);
+EXTRACT_HELPER(SP, 19, 2);
+EXTRACT_HELPER(IMM8, 11, 8);
+EXTRACT_HELPER(DCMX, 16, 7);
+EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6);
+
+void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
+void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
+void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
+
+/* translate.c */
+
+int ppc_fixup_cpu(PowerPCCPU *cpu);
+void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp);
+void destroy_ppc_opcodes(PowerPCCPU *cpu);
+
+/* gdbstub.c */
+void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *ppc);
+gchar *ppc_gdb_arch_name(CPUState *cs);
+
+/**
+ * prot_for_access_type:
+ * @access_type: Access type
+ *
+ * Return the protection bit required for the given access type.
+ */
+static inline int prot_for_access_type(MMUAccessType access_type)
+{
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ return PAGE_EXEC;
+ case MMU_DATA_LOAD:
+ return PAGE_READ;
+ case MMU_DATA_STORE:
+ return PAGE_WRITE;
+ }
+ g_assert_not_reached();
+}
+
+/* PowerPC MMU emulation */
+
+typedef struct mmu_ctx_t mmu_ctx_t;
+bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp,
+ int mmu_idx, bool guest_visible);
+int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr,
+ MMUAccessType access_type, int type,
+ int mmu_idx);
+/* Software driven TLB helpers */
+int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
+ int way, int is_code);
+/* Context used internally during MMU translations */
+struct mmu_ctx_t {
+ hwaddr raddr; /* Real address */
+ hwaddr eaddr; /* Effective address */
+ int prot; /* Protection bits */
+ hwaddr hash[2]; /* Pagetable hash values */
+ target_ulong ptem; /* Virtual segment ID | API */
+ int key; /* Access key */
+ int nx; /* Non-execute area */
+};
+
+/* Common routines used by software and hardware TLBs emulation */
+static inline int pte_is_valid(target_ulong pte0)
+{
+ return pte0 & 0x80000000 ? 1 : 0;
+}
+
+static inline void pte_invalidate(target_ulong *pte0)
+{
+ *pte0 &= ~0x80000000;
+}
+
+#define PTE_PTEM_MASK 0x7FFFFFBF
+#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+
+#ifdef CONFIG_USER_ONLY
+void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+#endif
+
+#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/kvm-stub.c b/target/ppc/kvm-stub.c
new file mode 100644
index 000000000..b98e1d404
--- /dev/null
+++ b/target/ppc/kvm-stub.c
@@ -0,0 +1,19 @@
+/*
+ * QEMU KVM PPC specific function stubs
+ *
+ * Copyright Freescale Inc. 2013
+ *
+ * Author: Alexander Graf <agraf@suse.de>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/ppc/openpic_kvm.h"
+
+int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)
+{
+ return -EINVAL;
+}
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
new file mode 100644
index 000000000..dc93b9918
--- /dev/null
+++ b/target/ppc/kvm.c
@@ -0,0 +1,2961 @@
+/*
+ * PowerPC implementation of KVM hooks
+ *
+ * Copyright IBM Corp. 2007
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <dirent.h>
+#include <sys/ioctl.h>
+#include <sys/vfs.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "cpu.h"
+#include "cpu-models.h"
+#include "qemu/timer.h"
+#include "sysemu/hw_accel.h"
+#include "kvm_ppc.h"
+#include "sysemu/cpus.h"
+#include "sysemu/device_tree.h"
+#include "mmu-hash64.h"
+
+#include "hw/sysbus.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_cpu_core.h"
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "migration/qemu-file-types.h"
+#include "sysemu/watchdog.h"
+#include "trace.h"
+#include "exec/gdbstub.h"
+#include "exec/memattrs.h"
+#include "exec/ram_addr.h"
+#include "sysemu/hostmem.h"
+#include "qemu/cutils.h"
+#include "qemu/main-loop.h"
+#include "qemu/mmap-alloc.h"
+#include "elf.h"
+#include "sysemu/kvm_int.h"
+
+#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
+
+#define DEBUG_RETURN_GUEST 0
+#define DEBUG_RETURN_GDB 1
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static int cap_interrupt_unset;
+static int cap_segstate;
+static int cap_booke_sregs;
+static int cap_ppc_smt;
+static int cap_ppc_smt_possible;
+static int cap_spapr_tce;
+static int cap_spapr_tce_64;
+static int cap_spapr_multitce;
+static int cap_spapr_vfio;
+static int cap_hior;
+static int cap_one_reg;
+static int cap_epr;
+static int cap_ppc_watchdog;
+static int cap_papr;
+static int cap_htab_fd;
+static int cap_fixup_hcalls;
+static int cap_htm; /* Hardware transactional memory support */
+static int cap_mmu_radix;
+static int cap_mmu_hash_v3;
+static int cap_xive;
+static int cap_resize_hpt;
+static int cap_ppc_pvr_compat;
+static int cap_ppc_safe_cache;
+static int cap_ppc_safe_bounds_check;
+static int cap_ppc_safe_indirect_branch;
+static int cap_ppc_count_cache_flush_assist;
+static int cap_ppc_nested_kvm_hv;
+static int cap_large_decr;
+static int cap_fwnmi;
+static int cap_rpt_invalidate;
+
+static uint32_t debug_inst_opcode;
+
+/*
+ * Check whether we are running with KVM-PR (instead of KVM-HV). This
+ * should only be used for fallback tests - generally we should use
+ * explicit capabilities for the features we want, rather than
+ * assuming what is/isn't available depending on the KVM variant.
+ */
+static bool kvmppc_is_pr(KVMState *ks)
+{
+ /* Assume KVM-PR if the GET_PVINFO capability is available */
+ return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
+}
+
+static int kvm_ppc_register_host_cpu_type(void);
+static void kvmppc_get_cpu_characteristics(KVMState *s);
+static int kvmppc_get_dec_bits(void);
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
+ cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
+ cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
+ cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
+ cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
+ cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
+ cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
+ cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
+ cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
+ cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
+ cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
+ cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
+ /*
+ * Note: we don't set cap_papr here, because this capability is
+ * only activated after this by kvmppc_set_papr()
+ */
+ cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
+ cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
+ cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
+ cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
+ cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
+ cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
+ cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
+ cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
+ kvmppc_get_cpu_characteristics(s);
+ cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
+ cap_large_decr = kvmppc_get_dec_bits();
+ cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
+ /*
+ * Note: setting it to false because there is not such capability
+ * in KVM at this moment.
+ *
+ * TODO: call kvm_vm_check_extension() with the right capability
+ * after the kernel starts implementing it.
+ */
+ cap_ppc_pvr_compat = false;
+
+ if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
+ error_report("KVM: Host kernel doesn't have level irq capability");
+ exit(1);
+ }
+
+ cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
+ kvm_ppc_register_host_cpu_type();
+
+ return 0;
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ return 0;
+}
+
+static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *cenv = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_sregs sregs;
+ int ret;
+
+ if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
+ /*
+ * What we're really trying to say is "if we're on BookE, we
+ * use the native PVR for now". This is the only sane way to
+ * check it though, so we potentially confuse users that they
+ * can run BookE guests on BookS. Let's hope nobody dares
+ * enough :)
+ */
+ return 0;
+ } else {
+ if (!cap_segstate) {
+ fprintf(stderr, "kvm error: missing PVR setting capability\n");
+ return -ENOSYS;
+ }
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ if (ret) {
+ return ret;
+ }
+
+ sregs.pvr = cenv->spr[SPR_PVR];
+ return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+}
+
+/* Set up a shared TLB array with KVM */
+static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_book3e_206_tlb_params params = {};
+ struct kvm_config_tlb cfg = {};
+ unsigned int entries = 0;
+ int ret, i;
+
+ if (!kvm_enabled() ||
+ !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
+ return 0;
+ }
+
+ assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ params.tlb_sizes[i] = booke206_tlb_size(env, i);
+ params.tlb_ways[i] = booke206_tlb_ways(env, i);
+ entries += params.tlb_sizes[i];
+ }
+
+ assert(entries == env->nb_tlb);
+ assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
+
+ env->tlb_dirty = true;
+
+ cfg.array = (uintptr_t)env->tlb.tlbm;
+ cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
+ cfg.params = (uintptr_t)&params;
+ cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
+ if (ret < 0) {
+ fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ env->kvm_sw_tlb = true;
+ return 0;
+}
+
+
+#if defined(TARGET_PPC64)
+static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
+{
+ int ret;
+
+ assert(kvm_state != NULL);
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
+ error_setg(errp, "KVM doesn't expose the MMU features it supports");
+ error_append_hint(errp, "Consider switching to a newer KVM\n");
+ return;
+ }
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
+ if (ret == 0) {
+ return;
+ }
+
+ error_setg_errno(errp, -ret,
+ "KVM failed to provide the MMU features it supports");
+}
+
+struct ppc_radix_page_info *kvm_get_radix_page_info(void)
+{
+ KVMState *s = KVM_STATE(current_accel());
+ struct ppc_radix_page_info *radix_page_info;
+ struct kvm_ppc_rmmu_info rmmu_info;
+ int i;
+
+ if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
+ return NULL;
+ }
+ if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
+ return NULL;
+ }
+ radix_page_info = g_malloc0(sizeof(*radix_page_info));
+ radix_page_info->count = 0;
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ if (rmmu_info.ap_encodings[i]) {
+ radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
+ radix_page_info->count++;
+ }
+ }
+ return radix_page_info;
+}
+
+target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
+ bool radix, bool gtse,
+ uint64_t proc_tbl)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+ uint64_t flags = 0;
+ struct kvm_ppc_mmuv3_cfg cfg = {
+ .process_table = proc_tbl,
+ };
+
+ if (radix) {
+ flags |= KVM_PPC_MMUV3_RADIX;
+ }
+ if (gtse) {
+ flags |= KVM_PPC_MMUV3_GTSE;
+ }
+ cfg.flags = flags;
+ ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
+ switch (ret) {
+ case 0:
+ return H_SUCCESS;
+ case -EINVAL:
+ return H_PARAMETER;
+ case -ENODEV:
+ return H_NOT_AVAILABLE;
+ default:
+ return H_HARDWARE;
+ }
+}
+
+bool kvmppc_hpt_needs_host_contiguous_pages(void)
+{
+ static struct kvm_ppc_smmu_info smmu_info;
+
+ if (!kvm_enabled()) {
+ return false;
+ }
+
+ kvm_get_smmu_info(&smmu_info, &error_fatal);
+ return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
+}
+
+void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
+{
+ struct kvm_ppc_smmu_info smmu_info;
+ int iq, ik, jq, jk;
+ Error *local_err = NULL;
+
+ /* For now, we only have anything to check on hash64 MMUs */
+ if (!cpu->hash64_opts || !kvm_enabled()) {
+ return;
+ }
+
+ kvm_get_smmu_info(&smmu_info, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
+ && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
+ error_setg(errp,
+ "KVM does not support 1TiB segments which guest expects");
+ return;
+ }
+
+ if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
+ error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
+ smmu_info.slb_size, cpu->hash64_opts->slb_size);
+ return;
+ }
+
+ /*
+ * Verify that every pagesize supported by the cpu model is
+ * supported by KVM with the same encodings
+ */
+ for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
+ PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
+ struct kvm_ppc_one_seg_page_size *ksps;
+
+ for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
+ if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
+ break;
+ }
+ }
+ if (ik >= ARRAY_SIZE(smmu_info.sps)) {
+ error_setg(errp, "KVM doesn't support for base page shift %u",
+ qsps->page_shift);
+ return;
+ }
+
+ ksps = &smmu_info.sps[ik];
+ if (ksps->slb_enc != qsps->slb_enc) {
+ error_setg(errp,
+"KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
+ ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
+ return;
+ }
+
+ for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
+ for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
+ if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
+ break;
+ }
+ }
+
+ if (jk >= ARRAY_SIZE(ksps->enc)) {
+ error_setg(errp, "KVM doesn't support page shift %u/%u",
+ qsps->enc[jq].page_shift, qsps->page_shift);
+ return;
+ }
+ if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
+ error_setg(errp,
+"KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
+ ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
+ qsps->page_shift, qsps->enc[jq].pte_enc);
+ return;
+ }
+ }
+ }
+
+ if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
+ /*
+ * Mostly what guest pagesizes we can use are related to the
+ * host pages used to map guest RAM, which is handled in the
+ * platform code. Cache-Inhibited largepages (64k) however are
+ * used for I/O, so if they're mapped to the host at all it
+ * will be a normal mapping, not a special hugepage one used
+ * for RAM.
+ */
+ if (qemu_real_host_page_size < 0x10000) {
+ error_setg(errp,
+ "KVM can't supply 64kiB CI pages, which guest expects");
+ }
+ }
+}
+#endif /* !defined (TARGET_PPC64) */
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return POWERPC_CPU(cpu)->vcpu_id;
+}
+
+/*
+ * e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
+ * only 1 watchpoint, so array size of 4 is sufficient for now.
+ */
+#define MAX_HW_BKPTS 4
+
+static struct HWBreakpoint {
+ target_ulong addr;
+ int type;
+} hw_debug_points[MAX_HW_BKPTS];
+
+static CPUWatchpoint hw_watchpoint;
+
+/* Default there is no breakpoint and watchpoint supported */
+static int max_hw_breakpoint;
+static int max_hw_watchpoint;
+static int nb_hw_breakpoint;
+static int nb_hw_watchpoint;
+
+static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
+{
+ if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
+ max_hw_breakpoint = 2;
+ max_hw_watchpoint = 2;
+ }
+
+ if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
+ fprintf(stderr, "Error initializing h/w breakpoints\n");
+ return;
+ }
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+ int ret;
+
+ /* Synchronize sregs with kvm */
+ ret = kvm_arch_sync_sregs(cpu);
+ if (ret) {
+ if (ret == -EINVAL) {
+ error_report("Register sync failed... If you're using kvm-hv.ko,"
+ " only \"-cpu host\" is possible");
+ }
+ return ret;
+ }
+
+ switch (cenv->mmu_model) {
+ case POWERPC_MMU_BOOKE206:
+ /* This target supports access to KVM's guest TLB */
+ ret = kvm_booke206_tlb_init(cpu);
+ break;
+ case POWERPC_MMU_2_07:
+ if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
+ /*
+ * KVM-HV has transactional memory on POWER8 also without
+ * the KVM_CAP_PPC_HTM extension, so enable it here
+ * instead as long as it's available to userspace on the
+ * host.
+ */
+ if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
+ cap_htm = true;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
+ kvmppc_hw_debug_points_init(cenv);
+
+ return ret;
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
+static void kvm_sw_tlb_put(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_dirty_tlb dirty_tlb;
+ unsigned char *bitmap;
+ int ret;
+
+ if (!env->kvm_sw_tlb) {
+ return;
+ }
+
+ bitmap = g_malloc((env->nb_tlb + 7) / 8);
+ memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
+
+ dirty_tlb.bitmap = (uintptr_t)bitmap;
+ dirty_tlb.num_dirty = env->nb_tlb;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
+ if (ret) {
+ fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
+ __func__, strerror(-ret));
+ }
+
+ g_free(bitmap);
+}
+
+static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ } val;
+ struct kvm_one_reg reg = {
+ .id = id,
+ .addr = (uintptr_t) &val,
+ };
+ int ret;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ trace_kvm_failed_spr_get(spr, strerror(errno));
+ } else {
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ env->spr[spr] = val.u32;
+ break;
+
+ case KVM_REG_SIZE_U64:
+ env->spr[spr] = val.u64;
+ break;
+
+ default:
+ /* Don't handle this size yet */
+ abort();
+ }
+ }
+}
+
+static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ } val;
+ struct kvm_one_reg reg = {
+ .id = id,
+ .addr = (uintptr_t) &val,
+ };
+ int ret;
+
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ val.u32 = env->spr[spr];
+ break;
+
+ case KVM_REG_SIZE_U64:
+ val.u64 = env->spr[spr];
+ break;
+
+ default:
+ /* Don't handle this size yet */
+ abort();
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret != 0) {
+ trace_kvm_failed_spr_set(spr, strerror(errno));
+ }
+}
+
+static int kvm_put_fp(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int i;
+ int ret;
+
+ if (env->insns_flags & PPC_FLOAT) {
+ uint64_t fpscr = env->fpscr;
+ bool vsx = !!(env->insns_flags2 & PPC2_VSX);
+
+ reg.id = KVM_REG_PPC_FPSCR;
+ reg.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_fpscr_set(strerror(errno));
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ uint64_t vsr[2];
+ uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
+ uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
+
+#ifdef HOST_WORDS_BIGENDIAN
+ vsr[0] = float64_val(*fpr);
+ vsr[1] = *vsrl;
+#else
+ vsr[0] = *vsrl;
+ vsr[1] = float64_val(*fpr);
+#endif
+ reg.addr = (uintptr_t) &vsr;
+ reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
+ strerror(errno));
+ return ret;
+ }
+ }
+ }
+
+ if (env->insns_flags & PPC_ALTIVEC) {
+ reg.id = KVM_REG_PPC_VSCR;
+ reg.addr = (uintptr_t)&env->vscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_vscr_set(strerror(errno));
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ reg.id = KVM_REG_PPC_VR(i);
+ reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_vr_set(i, strerror(errno));
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_get_fp(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int i;
+ int ret;
+
+ if (env->insns_flags & PPC_FLOAT) {
+ uint64_t fpscr;
+ bool vsx = !!(env->insns_flags2 & PPC2_VSX);
+
+ reg.id = KVM_REG_PPC_FPSCR;
+ reg.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_fpscr_get(strerror(errno));
+ return ret;
+ } else {
+ env->fpscr = fpscr;
+ }
+
+ for (i = 0; i < 32; i++) {
+ uint64_t vsr[2];
+ uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
+ uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
+
+ reg.addr = (uintptr_t) &vsr;
+ reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
+ strerror(errno));
+ return ret;
+ } else {
+#ifdef HOST_WORDS_BIGENDIAN
+ *fpr = vsr[0];
+ if (vsx) {
+ *vsrl = vsr[1];
+ }
+#else
+ *fpr = vsr[1];
+ if (vsx) {
+ *vsrl = vsr[0];
+ }
+#endif
+ }
+ }
+ }
+
+ if (env->insns_flags & PPC_ALTIVEC) {
+ reg.id = KVM_REG_PPC_VSCR;
+ reg.addr = (uintptr_t)&env->vscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_vscr_get(strerror(errno));
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ reg.id = KVM_REG_PPC_VR(i);
+ reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_vr_get(i, strerror(errno));
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#if defined(TARGET_PPC64)
+static int kvm_get_vpa(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_vpa_addr_get(strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&spapr_cpu->slb_shadow_size
+ == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_SLB;
+ reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_slb_get(strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&spapr_cpu->dtl_size
+ == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_DTL;
+ reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_dtl_get(strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kvm_put_vpa(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+ struct kvm_one_reg reg;
+ int ret;
+
+ /*
+ * SLB shadow or DTL can't be registered unless a master VPA is
+ * registered. That means when restoring state, if a VPA *is*
+ * registered, we need to set that up first. If not, we need to
+ * deregister the others before deregistering the master VPA
+ */
+ assert(spapr_cpu->vpa_addr
+ || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
+
+ if (spapr_cpu->vpa_addr) {
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_vpa_addr_set(strerror(errno));
+ return ret;
+ }
+ }
+
+ assert((uintptr_t)&spapr_cpu->slb_shadow_size
+ == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_SLB;
+ reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_slb_set(strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&spapr_cpu->dtl_size
+ == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_DTL;
+ reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_dtl_set(strerror(errno));
+ return ret;
+ }
+
+ if (!spapr_cpu->vpa_addr) {
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ trace_kvm_failed_null_vpa_addr_set(strerror(errno));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif /* TARGET_PPC64 */
+
+int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int i;
+
+ sregs.pvr = env->spr[SPR_PVR];
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
+ } else {
+ sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+ }
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+ if (env->slb[i].esid & SLB_ESID_V) {
+ sregs.u.s.ppc64.slb[i].slbe |= i;
+ }
+ sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ sregs.u.s.ppc32.sr[i] = env->sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ /* Beware. We have to swap upper and lower bits here */
+ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
+ | env->DBAT[1][i];
+ sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
+ | env->IBAT[1][i];
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ regs.ctr = env->ctr;
+ regs.lr = env->lr;
+ regs.xer = cpu_read_xer(env);
+ regs.msr = env->msr;
+ regs.pc = env->nip;
+
+ regs.srr0 = env->spr[SPR_SRR0];
+ regs.srr1 = env->spr[SPR_SRR1];
+
+ regs.sprg0 = env->spr[SPR_SPRG0];
+ regs.sprg1 = env->spr[SPR_SPRG1];
+ regs.sprg2 = env->spr[SPR_SPRG2];
+ regs.sprg3 = env->spr[SPR_SPRG3];
+ regs.sprg4 = env->spr[SPR_SPRG4];
+ regs.sprg5 = env->spr[SPR_SPRG5];
+ regs.sprg6 = env->spr[SPR_SPRG6];
+ regs.sprg7 = env->spr[SPR_SPRG7];
+
+ regs.pid = env->spr[SPR_BOOKE_PID];
+
+ for (i = 0; i < 32; i++) {
+ regs.gpr[i] = env->gpr[i];
+ }
+
+ regs.cr = 0;
+ for (i = 0; i < 8; i++) {
+ regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ kvm_put_fp(cs);
+
+ if (env->tlb_dirty) {
+ kvm_sw_tlb_put(cpu);
+ env->tlb_dirty = false;
+ }
+
+ if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
+ ret = kvmppc_put_books_sregs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
+ kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
+ }
+
+ if (cap_one_reg) {
+ int i;
+
+ /*
+ * We deliberately ignore errors here, for kernels which have
+ * the ONE_REG calls, but don't support the specific
+ * registers, there's a reasonable chance things will still
+ * work, at least until we try to migrate.
+ */
+ for (i = 0; i < 1024; i++) {
+ uint64_t id = env->spr_cb[i].one_reg_id;
+
+ if (id != 0) {
+ kvm_put_one_spr(cs, id, i);
+ }
+ }
+
+#ifdef TARGET_PPC64
+ if (msr_ts) {
+ for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
+ }
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
+ }
+
+ if (cap_papr) {
+ if (kvm_put_vpa(cs) < 0) {
+ trace_kvm_failed_put_vpa();
+ }
+ }
+
+ kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
+
+ if (level > KVM_PUT_RUNTIME_STATE) {
+ kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
+ }
+#endif /* TARGET_PPC64 */
+ }
+
+ return ret;
+}
+
+static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
+{
+ env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
+}
+
+static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_BASE) {
+ env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
+ env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
+ env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
+ env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
+ env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
+ env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
+ env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
+ env->spr[SPR_DECR] = sregs.u.e.dec;
+ env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
+ env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
+ env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
+ env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
+ env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
+ env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
+ env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
+ env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_64) {
+ env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
+ env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
+ env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
+ kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
+ env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
+ kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
+ env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
+ kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
+ env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
+ kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
+ env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
+ kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
+ env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
+ kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
+ env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
+ kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
+ env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
+ kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
+ env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
+ kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
+ env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
+ kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
+ env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
+ kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
+ env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
+ kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
+ env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
+ kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
+ env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
+ kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
+ env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
+ kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
+ env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
+ kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPE) {
+ env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
+ kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
+ env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
+ env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PM) {
+ env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
+ kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PC) {
+ env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
+ env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
+ }
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+ env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
+ env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
+ env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
+ env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
+ env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
+ env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
+ env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
+ env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
+ env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
+ env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_EXP) {
+ env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PD) {
+ env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
+ env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
+ }
+
+ if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+ env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
+ env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
+ env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
+
+ if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
+ env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
+ env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
+ }
+ }
+
+ return 0;
+}
+
+static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!cpu->vhyp) {
+ ppc_store_sdr1(env, sregs.u.s.sdr1);
+ }
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ /*
+ * The packed SLB array we get from KVM_GET_SREGS only contains
+ * information about valid entries. So we flush our internal copy
+ * to get rid of stale ones, then put all valid SLB entries back
+ * in.
+ */
+ memset(env->slb, 0, sizeof(env->slb));
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
+ target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
+ /*
+ * Only restore valid entries
+ */
+ if (rb & SLB_ESID_V) {
+ ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
+ }
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ env->sr[i] = sregs.u.s.ppc32.sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
+ env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
+ env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
+ env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
+ }
+
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_regs regs;
+ uint32_t cr;
+ int i, ret;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ cr = regs.cr;
+ for (i = 7; i >= 0; i--) {
+ env->crf[i] = cr & 15;
+ cr >>= 4;
+ }
+
+ env->ctr = regs.ctr;
+ env->lr = regs.lr;
+ cpu_write_xer(env, regs.xer);
+ env->msr = regs.msr;
+ env->nip = regs.pc;
+
+ env->spr[SPR_SRR0] = regs.srr0;
+ env->spr[SPR_SRR1] = regs.srr1;
+
+ env->spr[SPR_SPRG0] = regs.sprg0;
+ env->spr[SPR_SPRG1] = regs.sprg1;
+ env->spr[SPR_SPRG2] = regs.sprg2;
+ env->spr[SPR_SPRG3] = regs.sprg3;
+ env->spr[SPR_SPRG4] = regs.sprg4;
+ env->spr[SPR_SPRG5] = regs.sprg5;
+ env->spr[SPR_SPRG6] = regs.sprg6;
+ env->spr[SPR_SPRG7] = regs.sprg7;
+
+ env->spr[SPR_BOOKE_PID] = regs.pid;
+
+ for (i = 0; i < 32; i++) {
+ env->gpr[i] = regs.gpr[i];
+ }
+
+ kvm_get_fp(cs);
+
+ if (cap_booke_sregs) {
+ ret = kvmppc_get_booke_sregs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (cap_segstate) {
+ ret = kvmppc_get_books_sregs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (cap_hior) {
+ kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
+ }
+
+ if (cap_one_reg) {
+ int i;
+
+ /*
+ * We deliberately ignore errors here, for kernels which have
+ * the ONE_REG calls, but don't support the specific
+ * registers, there's a reasonable chance things will still
+ * work, at least until we try to migrate.
+ */
+ for (i = 0; i < 1024; i++) {
+ uint64_t id = env->spr_cb[i].one_reg_id;
+
+ if (id != 0) {
+ kvm_get_one_spr(cs, id, i);
+ }
+ }
+
+#ifdef TARGET_PPC64
+ if (msr_ts) {
+ for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
+ }
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
+ }
+
+ if (cap_papr) {
+ if (kvm_get_vpa(cs) < 0) {
+ trace_kvm_failed_get_vpa();
+ }
+ }
+
+ kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
+ kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
+#endif
+ }
+
+ return 0;
+}
+
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
+{
+ unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
+
+ if (irq != PPC_INTERRUPT_EXT) {
+ return 0;
+ }
+
+ if (!kvm_enabled() || !cap_interrupt_unset) {
+ return 0;
+ }
+
+ kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
+
+ return 0;
+}
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+ return;
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+static int kvmppc_handle_halt(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ }
+
+ return 0;
+}
+
+/* map dcr access to existing qemu dcr emulation */
+static int kvmppc_handle_dcr_read(CPUPPCState *env,
+ uint32_t dcrn, uint32_t *data)
+{
+ if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
+ fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
+ }
+
+ return 0;
+}
+
+static int kvmppc_handle_dcr_write(CPUPPCState *env,
+ uint32_t dcrn, uint32_t data)
+{
+ if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
+ fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
+ }
+
+ return 0;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ /* Mixed endian case is not handled */
+ uint32_t sc = debug_inst_opcode;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sizeof(sc), 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint32_t sc;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
+ sc != debug_inst_opcode ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sizeof(sc), 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int find_hw_breakpoint(target_ulong addr, int type)
+{
+ int n;
+
+ assert((nb_hw_breakpoint + nb_hw_watchpoint)
+ <= ARRAY_SIZE(hw_debug_points));
+
+ for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+ if (hw_debug_points[n].addr == addr &&
+ hw_debug_points[n].type == type) {
+ return n;
+ }
+ }
+
+ return -1;
+}
+
+static int find_hw_watchpoint(target_ulong addr, int *flag)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
+ if (n >= 0) {
+ *flag = BP_MEM_ACCESS;
+ return n;
+ }
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
+ if (n >= 0) {
+ *flag = BP_MEM_WRITE;
+ return n;
+ }
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
+ if (n >= 0) {
+ *flag = BP_MEM_READ;
+ return n;
+ }
+
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
+ return -ENOBUFS;
+ }
+
+ hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
+ hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
+
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ if (nb_hw_breakpoint >= max_hw_breakpoint) {
+ return -ENOBUFS;
+ }
+
+ if (find_hw_breakpoint(addr, type) >= 0) {
+ return -EEXIST;
+ }
+
+ nb_hw_breakpoint++;
+ break;
+
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ if (nb_hw_watchpoint >= max_hw_watchpoint) {
+ return -ENOBUFS;
+ }
+
+ if (find_hw_breakpoint(addr, type) >= 0) {
+ return -EEXIST;
+ }
+
+ nb_hw_watchpoint++;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, type);
+ if (n < 0) {
+ return -ENOENT;
+ }
+
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ nb_hw_breakpoint--;
+ break;
+
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ nb_hw_watchpoint--;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+ hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoint = nb_hw_watchpoint = 0;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+ int n;
+
+ /* Software Breakpoint updates */
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+
+ assert((nb_hw_breakpoint + nb_hw_watchpoint)
+ <= ARRAY_SIZE(hw_debug_points));
+ assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
+
+ if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
+ for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+ switch (hw_debug_points[n].type) {
+ case GDB_BREAKPOINT_HW:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
+ break;
+ case GDB_WATCHPOINT_READ:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
+ KVMPPC_DEBUG_WATCH_READ;
+ break;
+ default:
+ cpu_abort(cs, "Unsupported breakpoint type\n");
+ }
+ dbg->arch.bp[n].addr = hw_debug_points[n].addr;
+ }
+ }
+}
+
+static int kvm_handle_hw_breakpoint(CPUState *cs,
+ struct kvm_debug_exit_arch *arch_info)
+{
+ int handle = DEBUG_RETURN_GUEST;
+ int n;
+ int flag = 0;
+
+ if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+ if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
+ n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
+ if (n >= 0) {
+ handle = DEBUG_RETURN_GDB;
+ }
+ } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
+ KVMPPC_DEBUG_WATCH_WRITE)) {
+ n = find_hw_watchpoint(arch_info->address, &flag);
+ if (n >= 0) {
+ handle = DEBUG_RETURN_GDB;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_debug_points[n].addr;
+ hw_watchpoint.flags = flag;
+ }
+ }
+ }
+ return handle;
+}
+
+static int kvm_handle_singlestep(void)
+{
+ return DEBUG_RETURN_GDB;
+}
+
+static int kvm_handle_sw_breakpoint(void)
+{
+ return DEBUG_RETURN_GDB;
+}
+
+static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+
+ if (cs->singlestep_enabled) {
+ return kvm_handle_singlestep();
+ }
+
+ if (arch_info->status) {
+ return kvm_handle_hw_breakpoint(cs, arch_info);
+ }
+
+ if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
+ return kvm_handle_sw_breakpoint();
+ }
+
+ /*
+ * QEMU is not able to handle debug exception, so inject
+ * program exception to guest;
+ * Yes program exception NOT debug exception !!
+ * When QEMU is using debug resources then debug exception must
+ * be always set. To achieve this we set MSR_DE and also set
+ * MSRP_DEP so guest cannot change MSR_DE.
+ * When emulating debug resource for guest we want guest
+ * to control MSR_DE (enable/disable debug interrupt on need).
+ * Supporting both configurations are NOT possible.
+ * So the result is that we cannot share debug resources
+ * between QEMU and Guest on BOOKE architecture.
+ * In the current design QEMU gets the priority over guest,
+ * this means that if QEMU is using debug resources then guest
+ * cannot use them;
+ * For software breakpoint QEMU uses a privileged instruction;
+ * So there cannot be any reason that we are here for guest
+ * set debug exception, only possibility is guest executed a
+ * privileged / illegal instruction and that's why we are
+ * injecting a program interrupt.
+ */
+ cpu_synchronize_state(cs);
+ /*
+ * env->nip is PC, so increment this by 4 to use
+ * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
+ */
+ env->nip += 4;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_INVAL;
+ ppc_cpu_do_interrupt(cs);
+
+ return DEBUG_RETURN_GUEST;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int ret;
+
+ qemu_mutex_lock_iothread();
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_DCR:
+ if (run->dcr.is_write) {
+ trace_kvm_handle_dcr_write();
+ ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
+ } else {
+ trace_kvm_handle_dcr_read();
+ ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
+ }
+ break;
+ case KVM_EXIT_HLT:
+ trace_kvm_handle_halt();
+ ret = kvmppc_handle_halt(cpu);
+ break;
+#if defined(TARGET_PPC64)
+ case KVM_EXIT_PAPR_HCALL:
+ trace_kvm_handle_papr_hcall();
+ run->papr_hcall.ret = spapr_hypercall(cpu,
+ run->papr_hcall.nr,
+ run->papr_hcall.args);
+ ret = 0;
+ break;
+#endif
+ case KVM_EXIT_EPR:
+ trace_kvm_handle_epr();
+ run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
+ ret = 0;
+ break;
+ case KVM_EXIT_WATCHDOG:
+ trace_kvm_handle_watchdog_expiry();
+ watchdog_perform_action();
+ ret = 0;
+ break;
+
+ case KVM_EXIT_DEBUG:
+ trace_kvm_handle_debug_exception();
+ if (kvm_handle_debug(cpu, run)) {
+ ret = EXCP_DEBUG;
+ break;
+ }
+ /* re-enter, this exception was guest-internal */
+ ret = 0;
+ break;
+
+#if defined(TARGET_PPC64)
+ case KVM_EXIT_NMI:
+ trace_kvm_handle_nmi_exception();
+ ret = kvm_handle_nmi(cpu, run);
+ break;
+#endif
+
+ default:
+ fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ qemu_mutex_unlock_iothread();
+ return ret;
+}
+
+int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ CPUState *cs = CPU(cpu);
+ uint32_t bits = tsr_bits;
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_OR_TSR,
+ .addr = (uintptr_t) &bits,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+
+ CPUState *cs = CPU(cpu);
+ uint32_t bits = tsr_bits;
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_CLEAR_TSR,
+ .addr = (uintptr_t) &bits,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+int kvmppc_set_tcr(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ uint32_t tcr = env->spr[SPR_BOOKE_TCR];
+
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_TCR,
+ .addr = (uintptr_t) &tcr,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ if (!kvm_enabled()) {
+ return -1;
+ }
+
+ if (!cap_ppc_watchdog) {
+ printf("warning: KVM does not support watchdog");
+ return -1;
+ }
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+static int read_cpuinfo(const char *field, char *value, int len)
+{
+ FILE *f;
+ int ret = -1;
+ int field_len = strlen(field);
+ char line[512];
+
+ f = fopen("/proc/cpuinfo", "r");
+ if (!f) {
+ return -1;
+ }
+
+ do {
+ if (!fgets(line, sizeof(line), f)) {
+ break;
+ }
+ if (!strncmp(line, field, field_len)) {
+ pstrcpy(value, len, line);
+ ret = 0;
+ break;
+ }
+ } while (*line);
+
+ fclose(f);
+
+ return ret;
+}
+
+static uint32_t kvmppc_get_tbfreq_procfs(void)
+{
+ char line[512];
+ char *ns;
+ uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
+ uint32_t tbfreq_procfs;
+
+ if (read_cpuinfo("timebase", line, sizeof(line))) {
+ return tbfreq_fallback;
+ }
+
+ ns = strchr(line, ':');
+ if (!ns) {
+ return tbfreq_fallback;
+ }
+
+ tbfreq_procfs = atoi(++ns);
+
+ /* 0 is certainly not acceptable by the guest, return fallback value */
+ return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
+}
+
+uint32_t kvmppc_get_tbfreq(void)
+{
+ static uint32_t cached_tbfreq;
+
+ if (!cached_tbfreq) {
+ cached_tbfreq = kvmppc_get_tbfreq_procfs();
+ }
+
+ return cached_tbfreq;
+}
+
+bool kvmppc_get_host_serial(char **value)
+{
+ return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
+ NULL);
+}
+
+bool kvmppc_get_host_model(char **value)
+{
+ return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
+}
+
+/* Try to find a device tree node for a CPU with clock-frequency property */
+static int kvmppc_find_cpu_dt(char *buf, int buf_len)
+{
+ struct dirent *dirp;
+ DIR *dp;
+
+ dp = opendir(PROC_DEVTREE_CPU);
+ if (!dp) {
+ printf("Can't open directory " PROC_DEVTREE_CPU "\n");
+ return -1;
+ }
+
+ buf[0] = '\0';
+ while ((dirp = readdir(dp)) != NULL) {
+ FILE *f;
+ snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
+ dirp->d_name);
+ f = fopen(buf, "r");
+ if (f) {
+ snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
+ fclose(f);
+ break;
+ }
+ buf[0] = '\0';
+ }
+ closedir(dp);
+ if (buf[0] == '\0') {
+ printf("Unknown host!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint64_t kvmppc_read_int_dt(const char *filename)
+{
+ union {
+ uint32_t v32;
+ uint64_t v64;
+ } u;
+ FILE *f;
+ int len;
+
+ f = fopen(filename, "rb");
+ if (!f) {
+ return -1;
+ }
+
+ len = fread(&u, 1, sizeof(u), f);
+ fclose(f);
+ switch (len) {
+ case 4:
+ /* property is a 32-bit quantity */
+ return be32_to_cpu(u.v32);
+ case 8:
+ return be64_to_cpu(u.v64);
+ }
+
+ return 0;
+}
+
+/*
+ * Read a CPU node property from the host device tree that's a single
+ * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
+ * (can't find or open the property, or doesn't understand the format)
+ */
+static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
+{
+ char buf[PATH_MAX], *tmp;
+ uint64_t val;
+
+ if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
+ return -1;
+ }
+
+ tmp = g_strdup_printf("%s/%s", buf, propname);
+ val = kvmppc_read_int_dt(tmp);
+ g_free(tmp);
+
+ return val;
+}
+
+uint64_t kvmppc_get_clockfreq(void)
+{
+ return kvmppc_read_int_cpu_dt("clock-frequency");
+}
+
+static int kvmppc_get_dec_bits(void)
+{
+ int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
+
+ if (nr_bits > 0) {
+ return nr_bits;
+ }
+ return 0;
+}
+
+static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+ !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+int kvmppc_get_hasidle(CPUPPCState *env)
+{
+ struct kvm_ppc_pvinfo pvinfo;
+
+ if (!kvmppc_get_pvinfo(env, &pvinfo) &&
+ (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
+{
+ uint32_t *hc = (uint32_t *)buf;
+ struct kvm_ppc_pvinfo pvinfo;
+
+ if (!kvmppc_get_pvinfo(env, &pvinfo)) {
+ memcpy(buf, pvinfo.hcall, buf_len);
+ return 0;
+ }
+
+ /*
+ * Fallback to always fail hypercalls regardless of endianness:
+ *
+ * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
+ * li r3, -1
+ * b .+8 (becomes nop in wrong endian)
+ * bswap32(li r3, -1)
+ */
+
+ hc[0] = cpu_to_be32(0x08000048);
+ hc[1] = cpu_to_be32(0x3860ffff);
+ hc[2] = cpu_to_be32(0x48000008);
+ hc[3] = cpu_to_be32(bswap32(0x3860ffff));
+
+ return 1;
+}
+
+static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
+{
+ return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
+}
+
+void kvmppc_enable_logical_ci_hcalls(void)
+{
+ /*
+ * FIXME: it would be nice if we could detect the cases where
+ * we're using a device which requires the in kernel
+ * implementation of these hcalls, but the kernel lacks them and
+ * produce a warning.
+ */
+ kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
+ kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
+}
+
+void kvmppc_enable_set_mode_hcall(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_SET_MODE);
+}
+
+void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
+}
+
+void kvmppc_enable_h_page_init(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
+}
+
+void kvmppc_enable_h_rpt_invalidate(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
+}
+
+void kvmppc_set_papr(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ if (!kvm_enabled()) {
+ return;
+ }
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
+ if (ret) {
+ error_report("This vCPU type or KVM version does not support PAPR");
+ exit(1);
+ }
+
+ /*
+ * Update the capability flag so we sync the right information
+ * with kvm
+ */
+ cap_papr = 1;
+}
+
+int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
+{
+ return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
+}
+
+void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
+ if (ret && mpic_proxy) {
+ error_report("This KVM version does not support EPR");
+ exit(1);
+ }
+}
+
+bool kvmppc_get_fwnmi(void)
+{
+ return cap_fwnmi;
+}
+
+int kvmppc_set_fwnmi(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
+}
+
+int kvmppc_smt_threads(void)
+{
+ return cap_ppc_smt ? cap_ppc_smt : 1;
+}
+
+int kvmppc_set_smt_threads(int smt)
+{
+ int ret;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
+ if (!ret) {
+ cap_ppc_smt = smt;
+ }
+ return ret;
+}
+
+void kvmppc_error_append_smt_possible_hint(Error *const *errp)
+{
+ int i;
+ GString *g;
+ char *s;
+
+ assert(kvm_enabled());
+ if (cap_ppc_smt_possible) {
+ g = g_string_new("Available VSMT modes:");
+ for (i = 63; i >= 0; i--) {
+ if ((1UL << i) & cap_ppc_smt_possible) {
+ g_string_append_printf(g, " %lu", (1UL << i));
+ }
+ }
+ s = g_string_free(g, false);
+ error_append_hint(errp, "%s.\n", s);
+ g_free(s);
+ } else {
+ error_append_hint(errp,
+ "This KVM seems to be too old to support VSMT.\n");
+ }
+}
+
+
+#ifdef TARGET_PPC64
+uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
+{
+ struct kvm_ppc_smmu_info info;
+ long rampagesize, best_page_shift;
+ int i;
+
+ /*
+ * Find the largest hardware supported page size that's less than
+ * or equal to the (logical) backing page size of guest RAM
+ */
+ kvm_get_smmu_info(&info, &error_fatal);
+ rampagesize = qemu_minrampagesize();
+ best_page_shift = 0;
+
+ for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
+ struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
+
+ if (!sps->page_shift) {
+ continue;
+ }
+
+ if ((sps->page_shift > best_page_shift)
+ && ((1UL << sps->page_shift) <= rampagesize)) {
+ best_page_shift = sps->page_shift;
+ }
+ }
+
+ return 1ULL << (best_page_shift + hash_shift - 7);
+}
+#endif
+
+bool kvmppc_spapr_use_multitce(void)
+{
+ return cap_spapr_multitce;
+}
+
+int kvmppc_spapr_enable_inkernel_multitce(void)
+{
+ int ret;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
+ H_PUT_TCE_INDIRECT, 1);
+ if (!ret) {
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
+ H_STUFF_TCE, 1);
+ }
+
+ return ret;
+}
+
+void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
+ uint64_t bus_offset, uint32_t nb_table,
+ int *pfd, bool need_vfio)
+{
+ long len;
+ int fd;
+ void *table;
+
+ /*
+ * Must set fd to -1 so we don't try to munmap when called for
+ * destroying the table, which the upper layers -will- do
+ */
+ *pfd = -1;
+ if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
+ return NULL;
+ }
+
+ if (cap_spapr_tce_64) {
+ struct kvm_create_spapr_tce_64 args = {
+ .liobn = liobn,
+ .page_shift = page_shift,
+ .offset = bus_offset >> page_shift,
+ .size = nb_table,
+ .flags = 0
+ };
+ fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
+ if (fd < 0) {
+ fprintf(stderr,
+ "KVM: Failed to create TCE64 table for liobn 0x%x\n",
+ liobn);
+ return NULL;
+ }
+ } else if (cap_spapr_tce) {
+ uint64_t window_size = (uint64_t) nb_table << page_shift;
+ struct kvm_create_spapr_tce args = {
+ .liobn = liobn,
+ .window_size = window_size,
+ };
+ if ((window_size != args.window_size) || bus_offset) {
+ return NULL;
+ }
+ fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
+ if (fd < 0) {
+ fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
+ liobn);
+ return NULL;
+ }
+ } else {
+ return NULL;
+ }
+
+ len = nb_table * sizeof(uint64_t);
+ /* FIXME: round this up to page size */
+
+ table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (table == MAP_FAILED) {
+ fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
+ liobn);
+ close(fd);
+ return NULL;
+ }
+
+ *pfd = fd;
+ return table;
+}
+
+int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
+{
+ long len;
+
+ if (fd < 0) {
+ return -1;
+ }
+
+ len = nb_table * sizeof(uint64_t);
+ if ((munmap(table, len) < 0) ||
+ (close(fd) < 0)) {
+ fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
+ strerror(errno));
+ /* Leak the table */
+ }
+
+ return 0;
+}
+
+int kvmppc_reset_htab(int shift_hint)
+{
+ uint32_t shift = shift_hint;
+
+ if (!kvm_enabled()) {
+ /* Full emulation, tell caller to allocate htab itself */
+ return 0;
+ }
+ if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
+ int ret;
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
+ if (ret == -ENOTTY) {
+ /*
+ * At least some versions of PR KVM advertise the
+ * capability, but don't implement the ioctl(). Oops.
+ * Return 0 so that we allocate the htab in qemu, as is
+ * correct for PR.
+ */
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ return shift;
+ }
+
+ /*
+ * We have a kernel that predates the htab reset calls. For PR
+ * KVM, we need to allocate the htab ourselves, for an HV KVM of
+ * this era, it has allocated a 16MB fixed size hash table
+ * already.
+ */
+ if (kvmppc_is_pr(kvm_state)) {
+ /* PR - tell caller to allocate htab */
+ return 0;
+ } else {
+ /* HV - assume 16MB kernel allocated htab */
+ return 24;
+ }
+}
+
+static inline uint32_t mfpvr(void)
+{
+ uint32_t pvr;
+
+ asm ("mfpvr %0"
+ : "=r"(pvr));
+ return pvr;
+}
+
+static void alter_insns(uint64_t *word, uint64_t flags, bool on)
+{
+ if (on) {
+ *word |= flags;
+ } else {
+ *word &= ~flags;
+ }
+}
+
+static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
+ uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
+
+ /* Now fix up the class with information we can query from the host */
+ pcc->pvr = mfpvr();
+
+ alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
+ qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
+ alter_insns(&pcc->insns_flags2, PPC2_VSX,
+ qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
+ alter_insns(&pcc->insns_flags2, PPC2_DFP,
+ qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
+
+ if (dcache_size != -1) {
+ pcc->l1_dcache_size = dcache_size;
+ }
+
+ if (icache_size != -1) {
+ pcc->l1_icache_size = icache_size;
+ }
+
+#if defined(TARGET_PPC64)
+ pcc->radix_page_info = kvm_get_radix_page_info();
+
+ if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
+ /*
+ * POWER9 DD1 has some bugs which make it not really ISA 3.00
+ * compliant. More importantly, advertising ISA 3.00
+ * architected mode may prevent guests from activating
+ * necessary DD1 workarounds.
+ */
+ pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
+ | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
+ }
+#endif /* defined(TARGET_PPC64) */
+}
+
+bool kvmppc_has_cap_epr(void)
+{
+ return cap_epr;
+}
+
+bool kvmppc_has_cap_fixup_hcalls(void)
+{
+ return cap_fixup_hcalls;
+}
+
+bool kvmppc_has_cap_htm(void)
+{
+ return cap_htm;
+}
+
+bool kvmppc_has_cap_mmu_radix(void)
+{
+ return cap_mmu_radix;
+}
+
+bool kvmppc_has_cap_mmu_hash_v3(void)
+{
+ return cap_mmu_hash_v3;
+}
+
+static bool kvmppc_power8_host(void)
+{
+ bool ret = false;
+#ifdef TARGET_PPC64
+ {
+ uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
+ ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
+ (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
+ (base_pvr == CPU_POWERPC_POWER8_BASE);
+ }
+#endif /* TARGET_PPC64 */
+ return ret;
+}
+
+static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
+{
+ bool l1d_thread_priv_req = !kvmppc_power8_host();
+
+ if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
+ return 2;
+ } else if ((!l1d_thread_priv_req ||
+ c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
+ (c.character & c.character_mask
+ & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
+{
+ if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
+ return 2;
+ } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
+{
+ if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
+ (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
+ (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
+ return SPAPR_CAP_FIXED_NA;
+ } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
+ return SPAPR_CAP_WORKAROUND;
+ } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
+ return SPAPR_CAP_FIXED_CCD;
+ } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
+ return SPAPR_CAP_FIXED_IBS;
+ }
+
+ return 0;
+}
+
+static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
+{
+ if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
+ return 1;
+ }
+ return 0;
+}
+
+bool kvmppc_has_cap_xive(void)
+{
+ return cap_xive;
+}
+
+static void kvmppc_get_cpu_characteristics(KVMState *s)
+{
+ struct kvm_ppc_cpu_char c;
+ int ret;
+
+ /* Assume broken */
+ cap_ppc_safe_cache = 0;
+ cap_ppc_safe_bounds_check = 0;
+ cap_ppc_safe_indirect_branch = 0;
+
+ ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
+ if (!ret) {
+ return;
+ }
+ ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
+ if (ret < 0) {
+ return;
+ }
+
+ cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
+ cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
+ cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
+ cap_ppc_count_cache_flush_assist =
+ parse_cap_ppc_count_cache_flush_assist(c);
+}
+
+int kvmppc_get_cap_safe_cache(void)
+{
+ return cap_ppc_safe_cache;
+}
+
+int kvmppc_get_cap_safe_bounds_check(void)
+{
+ return cap_ppc_safe_bounds_check;
+}
+
+int kvmppc_get_cap_safe_indirect_branch(void)
+{
+ return cap_ppc_safe_indirect_branch;
+}
+
+int kvmppc_get_cap_count_cache_flush_assist(void)
+{
+ return cap_ppc_count_cache_flush_assist;
+}
+
+bool kvmppc_has_cap_nested_kvm_hv(void)
+{
+ return !!cap_ppc_nested_kvm_hv;
+}
+
+int kvmppc_set_cap_nested_kvm_hv(int enable)
+{
+ return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
+}
+
+bool kvmppc_has_cap_spapr_vfio(void)
+{
+ return cap_spapr_vfio;
+}
+
+int kvmppc_get_cap_large_decr(void)
+{
+ return cap_large_decr;
+}
+
+int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
+{
+ CPUState *cs = CPU(cpu);
+ uint64_t lpcr;
+
+ kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
+ /* Do we need to modify the LPCR? */
+ if (!!(lpcr & LPCR_LD) != !!enable) {
+ if (enable) {
+ lpcr |= LPCR_LD;
+ } else {
+ lpcr &= ~LPCR_LD;
+ }
+ kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
+
+ if (!!(lpcr & LPCR_LD) != !!enable) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int kvmppc_has_cap_rpt_invalidate(void)
+{
+ return cap_rpt_invalidate;
+}
+
+PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
+{
+ uint32_t host_pvr = mfpvr();
+ PowerPCCPUClass *pvr_pcc;
+
+ pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
+ if (pvr_pcc == NULL) {
+ pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
+ }
+
+ return pvr_pcc;
+}
+
+static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
+}
+
+static int kvm_ppc_register_host_cpu_type(void)
+{
+ TypeInfo type_info = {
+ .name = TYPE_HOST_POWERPC_CPU,
+ .class_init = kvmppc_host_cpu_class_init,
+ };
+ PowerPCCPUClass *pvr_pcc;
+ ObjectClass *oc;
+ DeviceClass *dc;
+ int i;
+
+ pvr_pcc = kvm_ppc_get_host_cpu_class();
+ if (pvr_pcc == NULL) {
+ return -1;
+ }
+ type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
+ type_register(&type_info);
+ /* override TCG default cpu type with 'host' cpu model */
+ object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
+ false, NULL);
+
+ oc = object_class_by_name(type_info.name);
+ g_assert(oc);
+
+ /*
+ * Update generic CPU family class alias (e.g. on a POWER8NVL host,
+ * we want "POWER8" to be a "family" alias that points to the current
+ * host CPU type, too)
+ */
+ dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
+ char *suffix;
+
+ ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
+ suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
+ if (suffix) {
+ *suffix = 0;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
+{
+ struct kvm_rtas_token_args args = {
+ .token = token,
+ };
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
+ return -ENOENT;
+ }
+
+ strncpy(args.name, function, sizeof(args.name) - 1);
+
+ return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
+}
+
+int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
+{
+ struct kvm_get_htab_fd s = {
+ .flags = write ? KVM_GET_HTAB_WRITE : 0,
+ .start_index = index,
+ };
+ int ret;
+
+ if (!cap_htab_fd) {
+ error_setg(errp, "KVM version doesn't support %s the HPT",
+ write ? "writing" : "reading");
+ return -ENOTSUP;
+ }
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
+ if (ret < 0) {
+ error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
+ write ? "writing" : "reading", write ? "to" : "from",
+ strerror(errno));
+ return -errno;
+ }
+
+ return ret;
+}
+
+int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
+{
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ uint8_t buf[bufsize];
+ ssize_t rc;
+
+ do {
+ rc = read(fd, buf, bufsize);
+ if (rc < 0) {
+ fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
+ strerror(errno));
+ return rc;
+ } else if (rc) {
+ uint8_t *buffer = buf;
+ ssize_t n = rc;
+ while (n) {
+ struct kvm_get_htab_header *head =
+ (struct kvm_get_htab_header *) buffer;
+ size_t chunksize = sizeof(*head) +
+ HASH_PTE_SIZE_64 * head->n_valid;
+
+ qemu_put_be32(f, head->index);
+ qemu_put_be16(f, head->n_valid);
+ qemu_put_be16(f, head->n_invalid);
+ qemu_put_buffer(f, (void *)(head + 1),
+ HASH_PTE_SIZE_64 * head->n_valid);
+
+ buffer += chunksize;
+ n -= chunksize;
+ }
+ }
+ } while ((rc != 0)
+ && ((max_ns < 0) ||
+ ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
+
+ return (rc == 0) ? 1 : 0;
+}
+
+int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
+ uint16_t n_valid, uint16_t n_invalid, Error **errp)
+{
+ struct kvm_get_htab_header *buf;
+ size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
+ ssize_t rc;
+
+ buf = alloca(chunksize);
+ buf->index = index;
+ buf->n_valid = n_valid;
+ buf->n_invalid = n_invalid;
+
+ qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
+
+ rc = write(fd, buf, chunksize);
+ if (rc < 0) {
+ error_setg_errno(errp, errno, "Error writing the KVM hash table");
+ return -errno;
+ }
+ if (rc != chunksize) {
+ /* We should never get a short write on a single chunk */
+ error_setg(errp, "Short write while restoring the KVM hash table");
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
+{
+ return true;
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
+{
+ int fd, rc;
+ int i;
+
+ fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
+
+ i = 0;
+ while (i < n) {
+ struct kvm_get_htab_header *hdr;
+ int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
+ char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
+
+ rc = read(fd, buf, sizeof(buf));
+ if (rc < 0) {
+ hw_error("kvmppc_read_hptes: Unable to read HPTEs");
+ }
+
+ hdr = (struct kvm_get_htab_header *)buf;
+ while ((i < n) && ((char *)hdr < (buf + rc))) {
+ int invalid = hdr->n_invalid, valid = hdr->n_valid;
+
+ if (hdr->index != (ptex + i)) {
+ hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
+ " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
+ }
+
+ if (n - i < valid) {
+ valid = n - i;
+ }
+ memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
+ i += valid;
+
+ if ((n - i) < invalid) {
+ invalid = n - i;
+ }
+ memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
+ i += invalid;
+
+ hdr = (struct kvm_get_htab_header *)
+ ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
+ }
+ }
+
+ close(fd);
+}
+
+void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
+{
+ int fd, rc;
+ struct {
+ struct kvm_get_htab_header hdr;
+ uint64_t pte0;
+ uint64_t pte1;
+ } buf;
+
+ fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
+
+ buf.hdr.n_valid = 1;
+ buf.hdr.n_invalid = 0;
+ buf.hdr.index = ptex;
+ buf.pte0 = cpu_to_be64(pte0);
+ buf.pte1 = cpu_to_be64(pte1);
+
+ rc = write(fd, &buf, sizeof(buf));
+ if (rc != sizeof(buf)) {
+ hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
+ }
+ close(fd);
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ return data & 0xffff;
+}
+
+#if defined(TARGET_PPC64)
+int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
+{
+ uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
+
+ return 0;
+}
+#endif
+
+int kvmppc_enable_hwrng(void)
+{
+ if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
+ return -1;
+ }
+
+ return kvmppc_enable_hcall(kvm_state, H_RANDOM);
+}
+
+void kvmppc_check_papr_resize_hpt(Error **errp)
+{
+ if (!kvm_enabled()) {
+ return; /* No KVM, we're good */
+ }
+
+ if (cap_resize_hpt) {
+ return; /* Kernel has explicit support, we're good */
+ }
+
+ /* Otherwise fallback on looking for PR KVM */
+ if (kvmppc_is_pr(kvm_state)) {
+ return;
+ }
+
+ error_setg(errp,
+ "Hash page table resizing not available with this KVM version");
+}
+
+int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_ppc_resize_hpt rhpt = {
+ .flags = flags,
+ .shift = shift,
+ };
+
+ if (!cap_resize_hpt) {
+ return -ENOSYS;
+ }
+
+ return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
+}
+
+int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_ppc_resize_hpt rhpt = {
+ .flags = flags,
+ .shift = shift,
+ };
+
+ if (!cap_resize_hpt) {
+ return -ENOSYS;
+ }
+
+ return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
+}
+
+/*
+ * This is a helper function to detect a post migration scenario
+ * in which a guest, running as KVM-HV, freezes in cpu_post_load because
+ * the guest kernel can't handle a PVR value other than the actual host
+ * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
+ *
+ * If we don't have cap_ppc_pvr_compat and we're not running in PR
+ * (so, we're HV), return true. The workaround itself is done in
+ * cpu_post_load.
+ *
+ * The order here is important: we'll only check for KVM PR as a
+ * fallback if the guest kernel can't handle the situation itself.
+ * We need to avoid as much as possible querying the running KVM type
+ * in QEMU level.
+ */
+bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (!kvm_enabled()) {
+ return false;
+ }
+
+ if (cap_ppc_pvr_compat) {
+ return false;
+ }
+
+ return !kvmppc_is_pr(cs->kvm_state);
+}
+
+void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_enabled()) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
+ }
+}
+
+void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_enabled()) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
+ }
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return true;
+}
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
new file mode 100644
index 000000000..ee9325bf9
--- /dev/null
+++ b/target/ppc/kvm_ppc.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright 2008 IBM Corporation.
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef KVM_PPC_H
+#define KVM_PPC_H
+
+#define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host")
+
+#ifdef CONFIG_KVM
+
+uint32_t kvmppc_get_tbfreq(void);
+uint64_t kvmppc_get_clockfreq(void);
+bool kvmppc_get_host_model(char **buf);
+bool kvmppc_get_host_serial(char **buf);
+int kvmppc_get_hasidle(CPUPPCState *env);
+int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
+void kvmppc_enable_logical_ci_hcalls(void);
+void kvmppc_enable_set_mode_hcall(void);
+void kvmppc_enable_clear_ref_mod_hcalls(void);
+void kvmppc_enable_h_page_init(void);
+void kvmppc_enable_h_rpt_invalidate(void);
+void kvmppc_set_papr(PowerPCCPU *cpu);
+int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr);
+void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
+bool kvmppc_get_fwnmi(void);
+int kvmppc_set_fwnmi(PowerPCCPU *cpu);
+int kvmppc_smt_threads(void);
+void kvmppc_error_append_smt_possible_hint(Error *const *errp);
+int kvmppc_set_smt_threads(int smt);
+int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits);
+int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits);
+int kvmppc_set_tcr(PowerPCCPU *cpu);
+int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
+target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
+ bool radix, bool gtse,
+ uint64_t proc_tbl);
+#ifndef CONFIG_USER_ONLY
+bool kvmppc_spapr_use_multitce(void);
+int kvmppc_spapr_enable_inkernel_multitce(void);
+void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
+ uint64_t bus_offset, uint32_t nb_table,
+ int *pfd, bool need_vfio);
+int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
+int kvmppc_reset_htab(int shift_hint);
+uint64_t kvmppc_vrma_limit(unsigned int hash_shift);
+bool kvmppc_has_cap_spapr_vfio(void);
+#endif /* !CONFIG_USER_ONLY */
+bool kvmppc_has_cap_epr(void);
+int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function);
+int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp);
+int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns);
+int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
+ uint16_t n_valid, uint16_t n_invalid, Error **errp);
+void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n);
+void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1);
+bool kvmppc_has_cap_fixup_hcalls(void);
+bool kvmppc_has_cap_htm(void);
+bool kvmppc_has_cap_mmu_radix(void);
+bool kvmppc_has_cap_mmu_hash_v3(void);
+bool kvmppc_has_cap_xive(void);
+int kvmppc_get_cap_safe_cache(void);
+int kvmppc_get_cap_safe_bounds_check(void);
+int kvmppc_get_cap_safe_indirect_branch(void);
+int kvmppc_get_cap_count_cache_flush_assist(void);
+bool kvmppc_has_cap_nested_kvm_hv(void);
+int kvmppc_set_cap_nested_kvm_hv(int enable);
+int kvmppc_get_cap_large_decr(void);
+int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable);
+int kvmppc_has_cap_rpt_invalidate(void);
+int kvmppc_enable_hwrng(void);
+int kvmppc_put_books_sregs(PowerPCCPU *cpu);
+PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
+void kvmppc_check_papr_resize_hpt(Error **errp);
+int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift);
+int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift);
+bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu);
+
+bool kvmppc_hpt_needs_host_contiguous_pages(void);
+void kvm_check_mmu(PowerPCCPU *cpu, Error **errp);
+void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online);
+void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset);
+
+int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run);
+
+#else
+
+static inline uint32_t kvmppc_get_tbfreq(void)
+{
+ return 0;
+}
+
+static inline bool kvmppc_get_host_model(char **buf)
+{
+ return false;
+}
+
+static inline bool kvmppc_get_host_serial(char **buf)
+{
+ return false;
+}
+
+static inline uint64_t kvmppc_get_clockfreq(void)
+{
+ return 0;
+}
+
+static inline uint32_t kvmppc_get_vmx(void)
+{
+ return 0;
+}
+
+static inline uint32_t kvmppc_get_dfp(void)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_hasidle(CPUPPCState *env)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_hypercall(CPUPPCState *env,
+ uint8_t *buf, int buf_len)
+{
+ return -1;
+}
+
+static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
+{
+ return -1;
+}
+
+static inline void kvmppc_enable_logical_ci_hcalls(void)
+{
+}
+
+static inline void kvmppc_enable_set_mode_hcall(void)
+{
+}
+
+static inline void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+}
+
+static inline void kvmppc_enable_h_page_init(void)
+{
+}
+
+static inline void kvmppc_enable_h_rpt_invalidate(void)
+{
+ g_assert_not_reached();
+}
+
+static inline void kvmppc_set_papr(PowerPCCPU *cpu)
+{
+}
+
+static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
+{
+}
+
+static inline bool kvmppc_get_fwnmi(void)
+{
+ return false;
+}
+
+static inline int kvmppc_set_fwnmi(PowerPCCPU *cpu)
+{
+ return -1;
+}
+
+static inline int kvmppc_smt_threads(void)
+{
+ return 1;
+}
+
+static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp)
+{
+ return;
+}
+
+static inline int kvmppc_set_smt_threads(int smt)
+{
+ return 0;
+}
+
+static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ return 0;
+}
+
+static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ return 0;
+}
+
+static inline int kvmppc_set_tcr(PowerPCCPU *cpu)
+{
+ return 0;
+}
+
+static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
+{
+ return -1;
+}
+
+static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
+ bool radix, bool gtse,
+ uint64_t proc_tbl)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu,
+ unsigned int online)
+{
+ return;
+}
+
+static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
+{
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline bool kvmppc_spapr_use_multitce(void)
+{
+ return false;
+}
+
+static inline int kvmppc_spapr_enable_inkernel_multitce(void)
+{
+ return -1;
+}
+
+static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
+ uint64_t bus_offset,
+ uint32_t nb_table,
+ int *pfd, bool need_vfio)
+{
+ return NULL;
+}
+
+static inline int kvmppc_remove_spapr_tce(void *table, int pfd,
+ uint32_t nb_table)
+{
+ return -1;
+}
+
+static inline int kvmppc_reset_htab(int shift_hint)
+{
+ return 0;
+}
+
+static inline uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
+{
+ g_assert_not_reached();
+}
+
+static inline bool kvmppc_hpt_needs_host_contiguous_pages(void)
+{
+ return false;
+}
+
+static inline void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
+{
+}
+
+static inline bool kvmppc_has_cap_spapr_vfio(void)
+{
+ return false;
+}
+
+static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes,
+ hwaddr ptex, int n)
+{
+ abort();
+}
+
+static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
+{
+ abort();
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+static inline bool kvmppc_has_cap_epr(void)
+{
+ return false;
+}
+
+static inline int kvmppc_define_rtas_kernel_token(uint32_t token,
+ const char *function)
+{
+ return -1;
+}
+
+static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
+{
+ return -1;
+}
+
+static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize,
+ int64_t max_ns)
+{
+ abort();
+}
+
+static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
+ uint16_t n_valid, uint16_t n_invalid,
+ Error **errp)
+{
+ abort();
+}
+
+static inline bool kvmppc_has_cap_fixup_hcalls(void)
+{
+ abort();
+}
+
+static inline bool kvmppc_has_cap_htm(void)
+{
+ return false;
+}
+
+static inline bool kvmppc_has_cap_mmu_radix(void)
+{
+ return false;
+}
+
+static inline bool kvmppc_has_cap_mmu_hash_v3(void)
+{
+ return false;
+}
+
+static inline bool kvmppc_has_cap_xive(void)
+{
+ return false;
+}
+
+static inline int kvmppc_get_cap_safe_cache(void)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_cap_safe_bounds_check(void)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_cap_safe_indirect_branch(void)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_cap_count_cache_flush_assist(void)
+{
+ return 0;
+}
+
+static inline bool kvmppc_has_cap_nested_kvm_hv(void)
+{
+ return false;
+}
+
+static inline int kvmppc_set_cap_nested_kvm_hv(int enable)
+{
+ return -1;
+}
+
+static inline int kvmppc_get_cap_large_decr(void)
+{
+ return 0;
+}
+
+static inline int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
+{
+ return -1;
+}
+
+static inline int kvmppc_has_cap_rpt_invalidate(void)
+{
+ return false;
+}
+
+static inline int kvmppc_enable_hwrng(void)
+{
+ return -1;
+}
+
+static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ abort();
+}
+
+static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
+{
+ return NULL;
+}
+
+static inline void kvmppc_check_papr_resize_hpt(Error **errp)
+{
+ return;
+}
+
+static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu,
+ target_ulong flags, int shift)
+{
+ return -ENOSYS;
+}
+
+static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu,
+ target_ulong flags, int shift)
+{
+ return -ENOSYS;
+}
+
+static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
+{
+ return false;
+}
+
+#endif
+
+#ifndef CONFIG_KVM
+
+#define kvmppc_eieio() do { } while (0)
+
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+#else /* CONFIG_KVM */
+
+#define kvmppc_eieio() \
+ do { \
+ if (kvm_enabled()) { \
+ asm volatile("eieio" : : : "memory"); \
+ } \
+ } while (0)
+
+/* Store data cache blocks back to memory */
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
+ asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+}
+
+/* Invalidate instruction cache blocks */
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
+ asm volatile("icbi 0,%0" : : "r"(p));
+ }
+}
+
+#endif /* CONFIG_KVM */
+
+#endif /* KVM_PPC_H */
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
new file mode 100644
index 000000000..93972df58
--- /dev/null
+++ b/target/ppc/machine.c
@@ -0,0 +1,859 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "helper_regs.h"
+#include "mmu-hash64.h"
+#include "migration/cpu.h"
+#include "qapi/error.h"
+#include "qemu/main-loop.h"
+#include "kvm_ppc.h"
+
+static void post_load_update_msr(CPUPPCState *env)
+{
+ target_ulong msr = env->msr;
+
+ /*
+ * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
+ * before restoring. Note that this recomputes hflags.
+ */
+ env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
+ ppc_store_msr(env, msr);
+}
+
+static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ unsigned int i, j;
+ target_ulong sdr1;
+ uint32_t fpscr, vscr;
+#if defined(TARGET_PPC64)
+ int32_t slb_nr;
+#endif
+ target_ulong xer;
+
+ for (i = 0; i < 32; i++) {
+ qemu_get_betls(f, &env->gpr[i]);
+ }
+#if !defined(TARGET_PPC64)
+ for (i = 0; i < 32; i++) {
+ qemu_get_betls(f, &env->gprh[i]);
+ }
+#endif
+ qemu_get_betls(f, &env->lr);
+ qemu_get_betls(f, &env->ctr);
+ for (i = 0; i < 8; i++) {
+ qemu_get_be32s(f, &env->crf[i]);
+ }
+ qemu_get_betls(f, &xer);
+ cpu_write_xer(env, xer);
+ qemu_get_betls(f, &env->reserve_addr);
+ qemu_get_betls(f, &env->msr);
+ for (i = 0; i < 4; i++) {
+ qemu_get_betls(f, &env->tgpr[i]);
+ }
+ for (i = 0; i < 32; i++) {
+ union {
+ float64 d;
+ uint64_t l;
+ } u;
+ u.l = qemu_get_be64(f);
+ *cpu_fpr_ptr(env, i) = u.d;
+ }
+ qemu_get_be32s(f, &fpscr);
+ env->fpscr = fpscr;
+ qemu_get_sbe32s(f, &env->access_type);
+#if defined(TARGET_PPC64)
+ qemu_get_betls(f, &env->spr[SPR_ASR]);
+ qemu_get_sbe32s(f, &slb_nr);
+#endif
+ qemu_get_betls(f, &sdr1);
+ for (i = 0; i < 32; i++) {
+ qemu_get_betls(f, &env->sr[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 8; j++) {
+ qemu_get_betls(f, &env->DBAT[i][j]);
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 8; j++) {
+ qemu_get_betls(f, &env->IBAT[i][j]);
+ }
+ }
+ qemu_get_sbe32s(f, &env->nb_tlb);
+ qemu_get_sbe32s(f, &env->tlb_per_way);
+ qemu_get_sbe32s(f, &env->nb_ways);
+ qemu_get_sbe32s(f, &env->last_way);
+ qemu_get_sbe32s(f, &env->id_tlbs);
+ qemu_get_sbe32s(f, &env->nb_pids);
+ if (env->tlb.tlb6) {
+ /* XXX assumes 6xx */
+ for (i = 0; i < env->nb_tlb; i++) {
+ qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
+ qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
+ qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ qemu_get_betls(f, &env->pb[i]);
+ }
+ for (i = 0; i < 1024; i++) {
+ qemu_get_betls(f, &env->spr[i]);
+ }
+ if (!cpu->vhyp) {
+ ppc_store_sdr1(env, sdr1);
+ }
+ qemu_get_be32s(f, &vscr);
+ ppc_store_vscr(env, vscr);
+ qemu_get_be64s(f, &env->spe_acc);
+ qemu_get_be32s(f, &env->spe_fscr);
+ qemu_get_betls(f, &env->msr_mask);
+ qemu_get_be32s(f, &env->flags);
+ qemu_get_sbe32s(f, &env->error_code);
+ qemu_get_be32s(f, &env->pending_interrupts);
+ qemu_get_be32s(f, &env->irq_input_state);
+ for (i = 0; i < POWERPC_EXCP_NB; i++) {
+ qemu_get_betls(f, &env->excp_vectors[i]);
+ }
+ qemu_get_betls(f, &env->excp_prefix);
+ qemu_get_betls(f, &env->ivor_mask);
+ qemu_get_betls(f, &env->ivpr_mask);
+ qemu_get_betls(f, &env->hreset_vector);
+ qemu_get_betls(f, &env->nip);
+ qemu_get_sbetl(f); /* Discard unused hflags */
+ qemu_get_sbetl(f); /* Discard unused hflags_nmsr */
+ qemu_get_sbe32(f); /* Discard unused mmu_idx */
+ qemu_get_sbe32(f); /* Discard unused power_mode */
+
+ post_load_update_msr(env);
+
+ return 0;
+}
+
+static int get_avr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ ppc_avr_t *v = pv;
+
+ v->u64[0] = qemu_get_be64(f);
+ v->u64[1] = qemu_get_be64(f);
+
+ return 0;
+}
+
+static int put_avr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ppc_avr_t *v = pv;
+
+ qemu_put_be64(f, v->u64[0]);
+ qemu_put_be64(f, v->u64[1]);
+ return 0;
+}
+
+static const VMStateInfo vmstate_info_avr = {
+ .name = "avr",
+ .get = get_avr,
+ .put = put_avr,
+};
+
+#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
+
+#define VMSTATE_AVR_ARRAY(_f, _s, _n) \
+ VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
+
+static int get_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ ppc_vsr_t *v = pv;
+
+ v->VsrD(0) = qemu_get_be64(f);
+
+ return 0;
+}
+
+static int put_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ppc_vsr_t *v = pv;
+
+ qemu_put_be64(f, v->VsrD(0));
+ return 0;
+}
+
+static const VMStateInfo vmstate_info_fpr = {
+ .name = "fpr",
+ .get = get_fpr,
+ .put = put_fpr,
+};
+
+#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
+
+#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
+ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
+
+static int get_vsr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ ppc_vsr_t *v = pv;
+
+ v->VsrD(1) = qemu_get_be64(f);
+
+ return 0;
+}
+
+static int put_vsr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ppc_vsr_t *v = pv;
+
+ qemu_put_be64(f, v->VsrD(1));
+ return 0;
+}
+
+static const VMStateInfo vmstate_info_vsr = {
+ .name = "vsr",
+ .get = get_vsr,
+ .put = put_vsr,
+};
+
+#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
+
+#define VMSTATE_VSR_ARRAY(_f, _s, _n) \
+ VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
+
+static bool cpu_pre_2_8_migration(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->pre_2_8_migration;
+}
+
+#if defined(TARGET_PPC64)
+static bool cpu_pre_3_0_migration(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->pre_3_0_migration;
+}
+#endif
+
+static int cpu_pre_save(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+ uint64_t insns_compat_mask =
+ PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
+ | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
+ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
+ | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
+ | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
+ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
+ | PPC_64B | PPC_64BX | PPC_ALTIVEC
+ | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
+ uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
+ | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
+ | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
+ | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
+ | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
+ | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
+
+ env->spr[SPR_LR] = env->lr;
+ env->spr[SPR_CTR] = env->ctr;
+ env->spr[SPR_XER] = cpu_read_xer(env);
+#if defined(TARGET_PPC64)
+ env->spr[SPR_CFAR] = env->cfar;
+#endif
+ env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
+
+ for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
+ env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
+ env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
+ env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
+ env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
+ }
+ for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
+ env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
+ env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
+ env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
+ env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
+ }
+
+ /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
+ if (cpu->pre_2_8_migration) {
+ /*
+ * Mask out bits that got added to msr_mask since the versions
+ * which stupidly included it in the migration stream.
+ */
+ target_ulong metamask = 0
+#if defined(TARGET_PPC64)
+ | (1ULL << MSR_TS0)
+ | (1ULL << MSR_TS1)
+#endif
+ ;
+ cpu->mig_msr_mask = env->msr_mask & ~metamask;
+ cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
+ /*
+ * CPU models supported by old machines all have
+ * PPC_MEM_TLBIE, so we set it unconditionally to allow
+ * backward migration from a POWER9 host to a POWER8 host.
+ */
+ cpu->mig_insns_flags |= PPC_MEM_TLBIE;
+ cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
+ cpu->mig_nb_BATs = env->nb_BATs;
+ }
+ if (cpu->pre_3_0_migration) {
+ if (cpu->hash64_opts) {
+ cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
+ }
+ }
+
+ /* Retain migration compatibility for pre 6.0 for 601 machines. */
+ env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE
+ ? env->hflags & MSR_LE : 0);
+
+ return 0;
+}
+
+/*
+ * Determine if a given PVR is a "close enough" match to the CPU
+ * object. For TCG and KVM PR it would probably be sufficient to
+ * require an exact PVR match. However for KVM HV the user is
+ * restricted to a PVR exactly matching the host CPU. The correct way
+ * to handle this is to put the guest into an architected
+ * compatibility mode. However, to allow a more forgiving transition
+ * and migration from before this was widely done, we allow migration
+ * between sufficiently similar PVRs, as determined by the CPU class's
+ * pvr_match() hook.
+ */
+static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+ if (pvr == pcc->pvr) {
+ return true;
+ }
+ return pcc->pvr_match(pcc, pvr);
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ /*
+ * If we're operating in compat mode, we should be ok as long as
+ * the destination supports the same compatibility mode.
+ *
+ * Otherwise, however, we require that the destination has exactly
+ * the same CPU model as the source.
+ */
+
+#if defined(TARGET_PPC64)
+ if (cpu->compat_pvr) {
+ uint32_t compat_pvr = cpu->compat_pvr;
+ Error *local_err = NULL;
+ int ret;
+
+ cpu->compat_pvr = 0;
+ ret = ppc_set_compat(cpu, compat_pvr, &local_err);
+ if (ret < 0) {
+ error_report_err(local_err);
+ return ret;
+ }
+ } else
+#endif
+ {
+ if (!pvr_match(cpu, env->spr[SPR_PVR])) {
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * If we're running with KVM HV, there is a chance that the guest
+ * is running with KVM HV and its kernel does not have the
+ * capability of dealing with a different PVR other than this
+ * exact host PVR in KVM_SET_SREGS. If that happens, the
+ * guest freezes after migration.
+ *
+ * The function kvmppc_pvr_workaround_required does this verification
+ * by first checking if the kernel has the cap, returning true immediately
+ * if that is the case. Otherwise, it checks if we're running in KVM PR.
+ * If the guest kernel does not have the cap and we're not running KVM-PR
+ * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
+ * receive the PVR it expects as a workaround.
+ *
+ */
+ if (kvmppc_pvr_workaround_required(cpu)) {
+ env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
+ }
+
+ env->lr = env->spr[SPR_LR];
+ env->ctr = env->spr[SPR_CTR];
+ cpu_write_xer(env, env->spr[SPR_XER]);
+#if defined(TARGET_PPC64)
+ env->cfar = env->spr[SPR_CFAR];
+#endif
+ env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
+
+ for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
+ env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
+ env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
+ env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
+ env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
+ }
+ for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
+ env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
+ env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
+ env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
+ env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
+ }
+
+ if (!cpu->vhyp) {
+ ppc_store_sdr1(env, env->spr[SPR_SDR1]);
+ }
+
+ post_load_update_msr(env);
+
+ return 0;
+}
+
+static bool fpu_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->env.insns_flags & PPC_FLOAT;
+}
+
+static const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
+ VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool altivec_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->env.insns_flags & PPC_ALTIVEC;
+}
+
+static int get_vscr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ PowerPCCPU *cpu = opaque;
+ ppc_store_vscr(&cpu->env, qemu_get_be32(f));
+ return 0;
+}
+
+static int put_vscr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ PowerPCCPU *cpu = opaque;
+ qemu_put_be32(f, ppc_get_vscr(&cpu->env));
+ return 0;
+}
+
+static const VMStateInfo vmstate_vscr = {
+ .name = "cpu/altivec/vscr",
+ .get = get_vscr,
+ .put = put_vscr,
+};
+
+static const VMStateDescription vmstate_altivec = {
+ .name = "cpu/altivec",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = altivec_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
+ /*
+ * Save the architecture value of the vscr, not the internally
+ * expanded version. Since this architecture value does not
+ * exist in memory to be stored, this requires a but of hoop
+ * jumping. We want OFFSET=0 so that we effectively pass CPU
+ * to the helper functions.
+ */
+ {
+ .name = "vscr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_vscr,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool vsx_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->env.insns_flags2 & PPC2_VSX;
+}
+
+static const VMStateDescription vmstate_vsx = {
+ .name = "cpu/vsx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vsx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+#ifdef TARGET_PPC64
+/* Transactional memory state */
+static bool tm_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ return msr_ts;
+}
+
+static const VMStateDescription vmstate_tm = {
+ .name = "cpu/tm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = tm_needed,
+ .fields = (VMStateField []) {
+ VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
+ VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
+ VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
+ VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+#endif
+
+static bool sr_needed(void *opaque)
+{
+#ifdef TARGET_PPC64
+ PowerPCCPU *cpu = opaque;
+
+ return !mmu_is_64bit(cpu->env.mmu_model);
+#else
+ return true;
+#endif
+}
+
+static const VMStateDescription vmstate_sr = {
+ .name = "cpu/sr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = sr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+#ifdef TARGET_PPC64
+static int get_slbe(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ ppc_slb_t *v = pv;
+
+ v->esid = qemu_get_be64(f);
+ v->vsid = qemu_get_be64(f);
+
+ return 0;
+}
+
+static int put_slbe(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ppc_slb_t *v = pv;
+
+ qemu_put_be64(f, v->esid);
+ qemu_put_be64(f, v->vsid);
+ return 0;
+}
+
+static const VMStateInfo vmstate_info_slbe = {
+ .name = "slbe",
+ .get = get_slbe,
+ .put = put_slbe,
+};
+
+#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
+
+#define VMSTATE_SLB_ARRAY(_f, _s, _n) \
+ VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
+
+static bool slb_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ /* We don't support any of the old segment table based 64-bit CPUs */
+ return mmu_is_64bit(cpu->env.mmu_model);
+}
+
+static int slb_post_load(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ /*
+ * We've pulled in the raw esid and vsid values from the migration
+ * stream, but we need to recompute the page size pointers
+ */
+ for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
+ if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
+ /* Migration source had bad values in its SLB */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_slb = {
+ .name = "cpu/slb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = slb_needed,
+ .post_load = slb_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
+ VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif /* TARGET_PPC64 */
+
+static const VMStateDescription vmstate_tlb6xx_entry = {
+ .name = "cpu/tlb6xx_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
+ VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
+ VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool tlb6xx_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ return env->nb_tlb && (env->tlb_type == TLB_6XX);
+}
+
+static const VMStateDescription vmstate_tlb6xx = {
+ .name = "cpu/tlb6xx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tlb6xx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
+ env.nb_tlb,
+ vmstate_tlb6xx_entry,
+ ppc6xx_tlb_t),
+ VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_tlbemb_entry = {
+ .name = "cpu/tlbemb_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(RPN, ppcemb_tlb_t),
+ VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
+ VMSTATE_UINTTL(PID, ppcemb_tlb_t),
+ VMSTATE_UINTTL(size, ppcemb_tlb_t),
+ VMSTATE_UINT32(prot, ppcemb_tlb_t),
+ VMSTATE_UINT32(attr, ppcemb_tlb_t),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool tlbemb_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ return env->nb_tlb && (env->tlb_type == TLB_EMB);
+}
+
+static bool pbr403_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ uint32_t pvr = cpu->env.spr[SPR_PVR];
+
+ return (pvr & 0xffff0000) == 0x00200000;
+}
+
+static const VMStateDescription vmstate_pbr403 = {
+ .name = "cpu/pbr403",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pbr403_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_tlbemb = {
+ .name = "cpu/tlb6xx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tlbemb_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
+ env.nb_tlb,
+ vmstate_tlbemb_entry,
+ ppcemb_tlb_t),
+ /* 403 protection registers */
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_pbr403,
+ NULL
+ }
+};
+
+static const VMStateDescription vmstate_tlbmas_entry = {
+ .name = "cpu/tlbmas_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(mas8, ppcmas_tlb_t),
+ VMSTATE_UINT32(mas1, ppcmas_tlb_t),
+ VMSTATE_UINT64(mas2, ppcmas_tlb_t),
+ VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool tlbmas_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ return env->nb_tlb && (env->tlb_type == TLB_MAS);
+}
+
+static const VMStateDescription vmstate_tlbmas = {
+ .name = "cpu/tlbmas",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tlbmas_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
+ env.nb_tlb,
+ vmstate_tlbmas_entry,
+ ppcmas_tlb_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool compat_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ assert(!(cpu->compat_pvr && !cpu->vhyp));
+ return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
+}
+
+static const VMStateDescription vmstate_compat = {
+ .name = "cpu/compat",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = compat_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(compat_pvr, PowerPCCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_ppc_cpu = {
+ .name = "cpu",
+ .version_id = 5,
+ .minimum_version_id = 5,
+ .minimum_version_id_old = 4,
+ .load_state_old = cpu_load_old,
+ .pre_save = cpu_pre_save,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
+
+ /* User mode architected state */
+ VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
+#if !defined(TARGET_PPC64)
+ VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
+#endif
+ VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
+ VMSTATE_UINTTL(env.nip, PowerPCCPU),
+
+ /* SPRs */
+ VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
+ VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
+
+ /* Reservation */
+ VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
+
+ /* Supervisor mode architected state */
+ VMSTATE_UINTTL(env.msr, PowerPCCPU),
+
+ /* Backward compatible internal state */
+ VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
+
+ /* Sanity checking */
+ VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
+ cpu_pre_2_8_migration),
+ VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fpu,
+ &vmstate_altivec,
+ &vmstate_vsx,
+ &vmstate_sr,
+#ifdef TARGET_PPC64
+ &vmstate_tm,
+ &vmstate_slb,
+#endif /* TARGET_PPC64 */
+ &vmstate_tlb6xx,
+ &vmstate_tlbemb,
+ &vmstate_tlbmas,
+ &vmstate_compat,
+ NULL
+ }
+};
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
new file mode 100644
index 000000000..39945d9ea
--- /dev/null
+++ b/target/ppc/mem_helper.c
@@ -0,0 +1,622 @@
+/*
+ * PowerPC memory access emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "exec/helper-proto.h"
+#include "helper_regs.h"
+#include "exec/cpu_ldst.h"
+#include "internal.h"
+#include "qemu/atomic128.h"
+
+/* #define DEBUG_OP */
+
+static inline bool needs_byteswap(const CPUPPCState *env)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+ return msr_le;
+#else
+ return !msr_le;
+#endif
+}
+
+/*****************************************************************************/
+/* Memory load and stores */
+
+static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
+ target_long arg)
+{
+#if defined(TARGET_PPC64)
+ if (!msr_is_64bit(env, env->msr)) {
+ return (uint32_t)(addr + arg);
+ } else
+#endif
+ {
+ return addr + arg;
+ }
+}
+
+static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t raddr)
+{
+ void *host1, *host2;
+ uint32_t nb_pg1, nb_pg2;
+
+ nb_pg1 = -(addr | TARGET_PAGE_MASK);
+ if (likely(nb <= nb_pg1)) {
+ /* The entire operation is on a single page. */
+ return probe_access(env, addr, nb, access_type, mmu_idx, raddr);
+ }
+
+ /* The operation spans two pages. */
+ nb_pg2 = nb - nb_pg1;
+ host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr);
+ addr = addr_add(env, addr, nb_pg1);
+ host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr);
+
+ /* If the two host pages are contiguous, optimize. */
+ if (host2 == host1 + nb_pg1) {
+ return host1;
+ }
+ return NULL;
+}
+
+void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
+{
+ uintptr_t raddr = GETPC();
+ int mmu_idx = cpu_mmu_index(env, false);
+ void *host = probe_contiguous(env, addr, (32 - reg) * 4,
+ MMU_DATA_LOAD, mmu_idx, raddr);
+
+ if (likely(host)) {
+ /* Fast path -- the entire operation is in RAM at host. */
+ for (; reg < 32; reg++) {
+ env->gpr[reg] = (uint32_t)ldl_be_p(host);
+ host += 4;
+ }
+ } else {
+ /* Slow path -- at least some of the operation requires i/o. */
+ for (; reg < 32; reg++) {
+ env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
+ addr = addr_add(env, addr, 4);
+ }
+ }
+}
+
+void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
+{
+ uintptr_t raddr = GETPC();
+ int mmu_idx = cpu_mmu_index(env, false);
+ void *host = probe_contiguous(env, addr, (32 - reg) * 4,
+ MMU_DATA_STORE, mmu_idx, raddr);
+
+ if (likely(host)) {
+ /* Fast path -- the entire operation is in RAM at host. */
+ for (; reg < 32; reg++) {
+ stl_be_p(host, env->gpr[reg]);
+ host += 4;
+ }
+ } else {
+ /* Slow path -- at least some of the operation requires i/o. */
+ for (; reg < 32; reg++) {
+ cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
+ addr = addr_add(env, addr, 4);
+ }
+ }
+}
+
+static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
+ uint32_t reg, uintptr_t raddr)
+{
+ int mmu_idx;
+ void *host;
+ uint32_t val;
+
+ if (unlikely(nb == 0)) {
+ return;
+ }
+
+ mmu_idx = cpu_mmu_index(env, false);
+ host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr);
+
+ if (likely(host)) {
+ /* Fast path -- the entire operation is in RAM at host. */
+ for (; nb > 3; nb -= 4) {
+ env->gpr[reg] = (uint32_t)ldl_be_p(host);
+ reg = (reg + 1) % 32;
+ host += 4;
+ }
+ switch (nb) {
+ default:
+ return;
+ case 1:
+ val = ldub_p(host) << 24;
+ break;
+ case 2:
+ val = lduw_be_p(host) << 16;
+ break;
+ case 3:
+ val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8);
+ break;
+ }
+ } else {
+ /* Slow path -- at least some of the operation requires i/o. */
+ for (; nb > 3; nb -= 4) {
+ env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
+ reg = (reg + 1) % 32;
+ addr = addr_add(env, addr, 4);
+ }
+ switch (nb) {
+ default:
+ return;
+ case 1:
+ val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24;
+ break;
+ case 2:
+ val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
+ break;
+ case 3:
+ val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
+ addr = addr_add(env, addr, 2);
+ val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8;
+ break;
+ }
+ }
+ env->gpr[reg] = val;
+}
+
+void helper_lsw(CPUPPCState *env, target_ulong addr,
+ uint32_t nb, uint32_t reg)
+{
+ do_lsw(env, addr, nb, reg, GETPC());
+}
+
+/*
+ * PPC32 specification says we must generate an exception if rA is in
+ * the range of registers to be loaded. In an other hand, IBM says
+ * this is valid, but rA won't be loaded. For now, I'll follow the
+ * spec...
+ */
+void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
+ uint32_t ra, uint32_t rb)
+{
+ if (likely(xer_bc != 0)) {
+ int num_used_regs = DIV_ROUND_UP(xer_bc, 4);
+ if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
+ lsw_reg_in_range(reg, num_used_regs, rb))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_LSWX, GETPC());
+ } else {
+ do_lsw(env, addr, xer_bc, reg, GETPC());
+ }
+ }
+}
+
+void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
+ uint32_t reg)
+{
+ uintptr_t raddr = GETPC();
+ int mmu_idx;
+ void *host;
+ uint32_t val;
+
+ if (unlikely(nb == 0)) {
+ return;
+ }
+
+ mmu_idx = cpu_mmu_index(env, false);
+ host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr);
+
+ if (likely(host)) {
+ /* Fast path -- the entire operation is in RAM at host. */
+ for (; nb > 3; nb -= 4) {
+ stl_be_p(host, env->gpr[reg]);
+ reg = (reg + 1) % 32;
+ host += 4;
+ }
+ val = env->gpr[reg];
+ switch (nb) {
+ case 1:
+ stb_p(host, val >> 24);
+ break;
+ case 2:
+ stw_be_p(host, val >> 16);
+ break;
+ case 3:
+ stw_be_p(host, val >> 16);
+ stb_p(host + 2, val >> 8);
+ break;
+ }
+ } else {
+ for (; nb > 3; nb -= 4) {
+ cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
+ reg = (reg + 1) % 32;
+ addr = addr_add(env, addr, 4);
+ }
+ val = env->gpr[reg];
+ switch (nb) {
+ case 1:
+ cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr);
+ break;
+ case 2:
+ cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
+ break;
+ case 3:
+ cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
+ addr = addr_add(env, addr, 2);
+ cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr);
+ break;
+ }
+ }
+}
+
+static void dcbz_common(CPUPPCState *env, target_ulong addr,
+ uint32_t opcode, bool epid, uintptr_t retaddr)
+{
+ target_ulong mask, dcbz_size = env->dcache_line_size;
+ uint32_t i;
+ void *haddr;
+ int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false);
+
+#if defined(TARGET_PPC64)
+ /* Check for dcbz vs dcbzl on 970 */
+ if (env->excp_model == POWERPC_EXCP_970 &&
+ !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
+ dcbz_size = 32;
+ }
+#endif
+
+ /* Align address */
+ mask = ~(dcbz_size - 1);
+ addr &= mask;
+
+ /* Check reservation */
+ if ((env->reserve_addr & mask) == addr) {
+ env->reserve_addr = (target_ulong)-1ULL;
+ }
+
+ /* Try fast path translate */
+ haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr);
+ if (haddr) {
+ memset(haddr, 0, dcbz_size);
+ } else {
+ /* Slow path */
+ for (i = 0; i < dcbz_size; i += 8) {
+ cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
+ }
+ }
+}
+
+void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
+{
+ dcbz_common(env, addr, opcode, false, GETPC());
+}
+
+void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
+{
+ dcbz_common(env, addr, opcode, true, GETPC());
+}
+
+void helper_icbi(CPUPPCState *env, target_ulong addr)
+{
+ addr &= ~(env->dcache_line_size - 1);
+ /*
+ * Invalidate one cache line :
+ * PowerPC specification says this is to be treated like a load
+ * (not a fetch) by the MMU. To be sure it will be so,
+ * do the load "by hand".
+ */
+ cpu_ldl_data_ra(env, addr, GETPC());
+}
+
+void helper_icbiep(CPUPPCState *env, target_ulong addr)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* See comments above */
+ addr &= ~(env->dcache_line_size - 1);
+ cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
+#endif
+}
+
+/* XXX: to be tested */
+target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
+ uint32_t ra, uint32_t rb)
+{
+ int i, c, d;
+
+ d = 24;
+ for (i = 0; i < xer_bc; i++) {
+ c = cpu_ldub_data_ra(env, addr, GETPC());
+ addr = addr_add(env, addr, 1);
+ /* ra (if not 0) and rb are never modified */
+ if (likely(reg != rb && (ra == 0 || reg != ra))) {
+ env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
+ }
+ if (unlikely(c == xer_cmp)) {
+ break;
+ }
+ if (likely(d != 0)) {
+ d -= 8;
+ } else {
+ d = 24;
+ reg++;
+ reg = reg & 0x1F;
+ }
+ }
+ return i;
+}
+
+#ifdef TARGET_PPC64
+uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint32_t opidx)
+{
+ Int128 ret;
+
+ /* We will have raised EXCP_ATOMIC from the translator. */
+ assert(HAVE_ATOMIC128);
+ ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
+ env->retxh = int128_gethi(ret);
+ return int128_getlo(ret);
+}
+
+uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint32_t opidx)
+{
+ Int128 ret;
+
+ /* We will have raised EXCP_ATOMIC from the translator. */
+ assert(HAVE_ATOMIC128);
+ ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
+ env->retxh = int128_gethi(ret);
+ return int128_getlo(ret);
+}
+
+void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t lo, uint64_t hi, uint32_t opidx)
+{
+ Int128 val;
+
+ /* We will have raised EXCP_ATOMIC from the translator. */
+ assert(HAVE_ATOMIC128);
+ val = int128_make128(lo, hi);
+ cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
+}
+
+void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t lo, uint64_t hi, uint32_t opidx)
+{
+ Int128 val;
+
+ /* We will have raised EXCP_ATOMIC from the translator. */
+ assert(HAVE_ATOMIC128);
+ val = int128_make128(lo, hi);
+ cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
+}
+
+uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t new_lo, uint64_t new_hi,
+ uint32_t opidx)
+{
+ bool success = false;
+
+ /* We will have raised EXCP_ATOMIC from the translator. */
+ assert(HAVE_CMPXCHG128);
+
+ if (likely(addr == env->reserve_addr)) {
+ Int128 oldv, cmpv, newv;
+
+ cmpv = int128_make128(env->reserve_val2, env->reserve_val);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
+ success = int128_eq(oldv, cmpv);
+ }
+ env->reserve_addr = -1;
+ return env->so + success * CRF_EQ_BIT;
+}
+
+uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
+ uint64_t new_lo, uint64_t new_hi,
+ uint32_t opidx)
+{
+ bool success = false;
+
+ /* We will have raised EXCP_ATOMIC from the translator. */
+ assert(HAVE_CMPXCHG128);
+
+ if (likely(addr == env->reserve_addr)) {
+ Int128 oldv, cmpv, newv;
+
+ cmpv = int128_make128(env->reserve_val2, env->reserve_val);
+ newv = int128_make128(new_lo, new_hi);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
+ opidx, GETPC());
+ success = int128_eq(oldv, cmpv);
+ }
+ env->reserve_addr = -1;
+ return env->so + success * CRF_EQ_BIT;
+}
+#endif
+
+/*****************************************************************************/
+/* Altivec extension helpers */
+#if defined(HOST_WORDS_BIGENDIAN)
+#define HI_IDX 0
+#define LO_IDX 1
+#else
+#define HI_IDX 1
+#define LO_IDX 0
+#endif
+
+/*
+ * We use msr_le to determine index ordering in a vector. However,
+ * byteswapping is not simply controlled by msr_le. We also need to
+ * take into account endianness of the target. This is done for the
+ * little-endian PPC64 user-mode target.
+ */
+
+#define LVE(name, access, swap, element) \
+ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
+ target_ulong addr) \
+ { \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ int adjust = HI_IDX * (n_elems - 1); \
+ int sh = sizeof(r->element[0]) >> 1; \
+ int index = (addr & 0xf) >> sh; \
+ if (msr_le) { \
+ index = n_elems - index - 1; \
+ } \
+ \
+ if (needs_byteswap(env)) { \
+ r->element[LO_IDX ? index : (adjust - index)] = \
+ swap(access(env, addr, GETPC())); \
+ } else { \
+ r->element[LO_IDX ? index : (adjust - index)] = \
+ access(env, addr, GETPC()); \
+ } \
+ }
+#define I(x) (x)
+LVE(lvebx, cpu_ldub_data_ra, I, u8)
+LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
+LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
+#undef I
+#undef LVE
+
+#define STVE(name, access, swap, element) \
+ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
+ target_ulong addr) \
+ { \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ int adjust = HI_IDX * (n_elems - 1); \
+ int sh = sizeof(r->element[0]) >> 1; \
+ int index = (addr & 0xf) >> sh; \
+ if (msr_le) { \
+ index = n_elems - index - 1; \
+ } \
+ \
+ if (needs_byteswap(env)) { \
+ access(env, addr, swap(r->element[LO_IDX ? index : \
+ (adjust - index)]), \
+ GETPC()); \
+ } else { \
+ access(env, addr, r->element[LO_IDX ? index : \
+ (adjust - index)], GETPC()); \
+ } \
+ }
+#define I(x) (x)
+STVE(stvebx, cpu_stb_data_ra, I, u8)
+STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
+STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
+#undef I
+#undef LVE
+
+#ifdef TARGET_PPC64
+#define GET_NB(rb) ((rb >> 56) & 0xFF)
+
+#define VSX_LXVL(name, lj) \
+void helper_##name(CPUPPCState *env, target_ulong addr, \
+ ppc_vsr_t *xt, target_ulong rb) \
+{ \
+ ppc_vsr_t t; \
+ uint64_t nb = GET_NB(rb); \
+ int i; \
+ \
+ t.s128 = int128_zero(); \
+ if (nb) { \
+ nb = (nb >= 16) ? 16 : nb; \
+ if (msr_le && !lj) { \
+ for (i = 16; i > 16 - nb; i--) { \
+ t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
+ addr = addr_add(env, addr, 1); \
+ } \
+ } else { \
+ for (i = 0; i < nb; i++) { \
+ t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
+ addr = addr_add(env, addr, 1); \
+ } \
+ } \
+ } \
+ *xt = t; \
+}
+
+VSX_LXVL(lxvl, 0)
+VSX_LXVL(lxvll, 1)
+#undef VSX_LXVL
+
+#define VSX_STXVL(name, lj) \
+void helper_##name(CPUPPCState *env, target_ulong addr, \
+ ppc_vsr_t *xt, target_ulong rb) \
+{ \
+ target_ulong nb = GET_NB(rb); \
+ int i; \
+ \
+ if (!nb) { \
+ return; \
+ } \
+ \
+ nb = (nb >= 16) ? 16 : nb; \
+ if (msr_le && !lj) { \
+ for (i = 16; i > 16 - nb; i--) { \
+ cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
+ addr = addr_add(env, addr, 1); \
+ } \
+ } else { \
+ for (i = 0; i < nb; i++) { \
+ cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
+ addr = addr_add(env, addr, 1); \
+ } \
+ } \
+}
+
+VSX_STXVL(stxvl, 0)
+VSX_STXVL(stxvll, 1)
+#undef VSX_STXVL
+#undef GET_NB
+#endif /* TARGET_PPC64 */
+
+#undef HI_IDX
+#undef LO_IDX
+
+void helper_tbegin(CPUPPCState *env)
+{
+ /*
+ * As a degenerate implementation, always fail tbegin. The reason
+ * given is "Nesting overflow". The "persistent" bit is set,
+ * providing a hint to the error handler to not retry. The TFIAR
+ * captures the address of the failure, which is this tbegin
+ * instruction. Instruction execution will continue with the next
+ * instruction in memory, which is precisely what we want.
+ */
+
+ env->spr[SPR_TEXASR] =
+ (1ULL << TEXASR_FAILURE_PERSISTENT) |
+ (1ULL << TEXASR_NESTING_OVERFLOW) |
+ (msr_hv << TEXASR_PRIVILEGE_HV) |
+ (msr_pr << TEXASR_PRIVILEGE_PR) |
+ (1ULL << TEXASR_FAILURE_SUMMARY) |
+ (1ULL << TEXASR_TFIAR_EXACT);
+ env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
+ env->spr[SPR_TFHAR] = env->nip + 4;
+ env->crf[0] = 0xB; /* 0b1010 = transaction failure */
+}
diff --git a/target/ppc/meson.build b/target/ppc/meson.build
new file mode 100644
index 000000000..b85f29570
--- /dev/null
+++ b/target/ppc/meson.build
@@ -0,0 +1,57 @@
+ppc_ss = ss.source_set()
+ppc_ss.add(files(
+ 'cpu-models.c',
+ 'cpu.c',
+ 'cpu_init.c',
+ 'excp_helper.c',
+ 'gdbstub.c',
+ 'helper_regs.c',
+))
+
+ppc_ss.add(when: 'CONFIG_TCG', if_true: files(
+ 'dfp_helper.c',
+ 'fpu_helper.c',
+ 'int_helper.c',
+ 'mem_helper.c',
+ 'misc_helper.c',
+ 'timebase_helper.c',
+ 'translate.c',
+))
+
+ppc_ss.add(libdecnumber)
+
+gen = [
+ decodetree.process('insn32.decode',
+ extra_args: '--static-decode=decode_insn32'),
+ decodetree.process('insn64.decode',
+ extra_args: ['--static-decode=decode_insn64',
+ '--insnwidth=64']),
+]
+ppc_ss.add(gen)
+
+ppc_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c'))
+ppc_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user_only_helper.c'))
+
+ppc_softmmu_ss = ss.source_set()
+ppc_softmmu_ss.add(files(
+ 'arch_dump.c',
+ 'machine.c',
+ 'mmu-hash32.c',
+ 'mmu_common.c',
+ 'monitor.c',
+))
+ppc_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files(
+ 'mmu_helper.c',
+), if_false: files(
+ 'tcg-stub.c',
+))
+
+ppc_softmmu_ss.add(when: 'TARGET_PPC64', if_true: files(
+ 'compat.c',
+ 'mmu-book3s-v3.c',
+ 'mmu-hash64.c',
+ 'mmu-radix64.c',
+))
+
+target_arch += {'ppc': ppc_ss}
+target_softmmu_arch += {'ppc': ppc_softmmu_ss}
diff --git a/target/ppc/mfrom_table.c.inc b/target/ppc/mfrom_table.c.inc
new file mode 100644
index 000000000..1653b974a
--- /dev/null
+++ b/target/ppc/mfrom_table.c.inc
@@ -0,0 +1,78 @@
+static const uint8_t mfrom_ROM_table[602] = {
+ 77, 77, 76, 76, 75, 75, 74, 74,
+ 73, 73, 72, 72, 71, 71, 70, 70,
+ 69, 69, 68, 68, 68, 67, 67, 66,
+ 66, 65, 65, 64, 64, 64, 63, 63,
+ 62, 62, 61, 61, 61, 60, 60, 59,
+ 59, 58, 58, 58, 57, 57, 56, 56,
+ 56, 55, 55, 54, 54, 54, 53, 53,
+ 53, 52, 52, 51, 51, 51, 50, 50,
+ 50, 49, 49, 49, 48, 48, 47, 47,
+ 47, 46, 46, 46, 45, 45, 45, 44,
+ 44, 44, 43, 43, 43, 42, 42, 42,
+ 42, 41, 41, 41, 40, 40, 40, 39,
+ 39, 39, 39, 38, 38, 38, 37, 37,
+ 37, 37, 36, 36, 36, 35, 35, 35,
+ 35, 34, 34, 34, 34, 33, 33, 33,
+ 33, 32, 32, 32, 32, 31, 31, 31,
+ 31, 30, 30, 30, 30, 29, 29, 29,
+ 29, 28, 28, 28, 28, 28, 27, 27,
+ 27, 27, 26, 26, 26, 26, 26, 25,
+ 25, 25, 25, 25, 24, 24, 24, 24,
+ 24, 23, 23, 23, 23, 23, 23, 22,
+ 22, 22, 22, 22, 21, 21, 21, 21,
+ 21, 21, 20, 20, 20, 20, 20, 20,
+ 19, 19, 19, 19, 19, 19, 19, 18,
+ 18, 18, 18, 18, 18, 17, 17, 17,
+ 17, 17, 17, 17, 16, 16, 16, 16,
+ 16, 16, 16, 16, 15, 15, 15, 15,
+ 15, 15, 15, 15, 14, 14, 14, 14,
+ 14, 14, 14, 14, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0,
+};
diff --git a/target/ppc/mfrom_table_gen.c b/target/ppc/mfrom_table_gen.c
new file mode 100644
index 000000000..f96c4268b
--- /dev/null
+++ b/target/ppc/mfrom_table_gen.c
@@ -0,0 +1,34 @@
+#define _GNU_SOURCE
+#include "qemu/osdep.h"
+#include <math.h>
+
+int main(void)
+{
+ double d;
+ uint8_t n;
+ int i;
+
+ printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
+ for (i = 0; i < 602; i++) {
+ /*
+ * Extremely decomposed:
+ * -T0 / 256
+ * T0 = 256 * log10(10 + 1.0) + 0.5
+ */
+ d = -i;
+ d /= 256.0;
+ d = exp10(d);
+ d += 1.0;
+ d = log10(d);
+ d *= 256;
+ d += 0.5;
+ n = d;
+ printf("%3d, ", n);
+ if ((i & 7) == 7) {
+ printf("\n ");
+ }
+ }
+ printf("\n};\n");
+
+ return 0;
+}
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
new file mode 100644
index 000000000..c33f5f39b
--- /dev/null
+++ b/target/ppc/misc_helper.c
@@ -0,0 +1,320 @@
+/*
+ * Miscellaneous PowerPC emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "mmu-book3s-v3.h"
+
+#include "helper_regs.h"
+
+/*****************************************************************************/
+/* SPR accesses */
+void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn)
+{
+ qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
+ env->spr[sprn]);
+}
+
+void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
+{
+ qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
+ env->spr[sprn]);
+}
+
+#ifdef TARGET_PPC64
+static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit,
+ const char *caller, uint32_t cause,
+ uintptr_t raddr)
+{
+ qemu_log_mask(CPU_LOG_INT, "HV Facility %d is unavailable (%s)\n",
+ bit, caller);
+
+ env->spr[SPR_HFSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
+
+ raise_exception_err_ra(env, POWERPC_EXCP_HV_FU, cause, raddr);
+}
+
+static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
+ uint32_t sprn, uint32_t cause,
+ uintptr_t raddr)
+{
+ qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit);
+
+ env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
+ cause &= FSCR_IC_MASK;
+ env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS;
+
+ raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr);
+}
+#endif
+
+void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
+ const char *caller, uint32_t cause)
+{
+#ifdef TARGET_PPC64
+ if ((env->msr_mask & MSR_HVB) && !msr_hv &&
+ !(env->spr[SPR_HFSCR] & (1UL << bit))) {
+ raise_hv_fu_exception(env, bit, caller, cause, GETPC());
+ }
+#endif
+}
+
+void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit,
+ uint32_t sprn, uint32_t cause)
+{
+#ifdef TARGET_PPC64
+ if (env->spr[SPR_FSCR] & (1ULL << bit)) {
+ /* Facility is enabled, continue */
+ return;
+ }
+ raise_fu_exception(env, bit, sprn, cause, GETPC());
+#endif
+}
+
+void helper_msr_facility_check(CPUPPCState *env, uint32_t bit,
+ uint32_t sprn, uint32_t cause)
+{
+#ifdef TARGET_PPC64
+ if (env->msr & (1ULL << bit)) {
+ /* Facility is enabled, continue */
+ return;
+ }
+ raise_fu_exception(env, bit, sprn, cause, GETPC());
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void helper_store_sdr1(CPUPPCState *env, target_ulong val)
+{
+ if (env->spr[SPR_SDR1] != val) {
+ ppc_store_sdr1(env, val);
+ tlb_flush(env_cpu(env));
+ }
+}
+
+#if defined(TARGET_PPC64)
+void helper_store_ptcr(CPUPPCState *env, target_ulong val)
+{
+ if (env->spr[SPR_PTCR] != val) {
+ PowerPCCPU *cpu = env_archcpu(env);
+ target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
+ target_ulong patbsize = val & PTCR_PATS;
+
+ qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, val);
+
+ assert(!cpu->vhyp);
+ assert(env->mmu_model & POWERPC_MMU_3_00);
+
+ if (val & ~ptcr_mask) {
+ error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
+ val & ~ptcr_mask);
+ val &= ptcr_mask;
+ }
+
+ if (patbsize > 24) {
+ error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
+ " stored in PTCR", patbsize);
+ return;
+ }
+
+ env->spr[SPR_PTCR] = val;
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void helper_store_pcr(CPUPPCState *env, target_ulong value)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+ env->spr[SPR_PCR] = value & pcc->pcr_mask;
+}
+
+/*
+ * DPDES register is shared. Each bit reflects the state of the
+ * doorbell interrupt of a thread of the same core.
+ */
+target_ulong helper_load_dpdes(CPUPPCState *env)
+{
+ target_ulong dpdes = 0;
+
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP);
+
+ /* TODO: TCG supports only one thread */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
+ dpdes = 1;
+ }
+
+ return dpdes;
+}
+
+void helper_store_dpdes(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *cs = CPU(cpu);
+
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP);
+
+ /* TODO: TCG supports only one thread */
+ if (val & ~0x1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value "
+ TARGET_FMT_lx"\n", val);
+ return;
+ }
+
+ if (val & 0x1) {
+ env->pending_interrupts |= 1 << PPC_INTERRUPT_DOORBELL;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+ }
+}
+#endif /* defined(TARGET_PPC64) */
+
+void helper_store_pidr(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_BOOKS_PID] = val;
+ tlb_flush(env_cpu(env));
+}
+
+void helper_store_lpidr(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_LPIDR] = val;
+
+ /*
+ * We need to flush the TLB on LPID changes as we only tag HV vs
+ * guest in TCG TLB. Also the quadrants means the HV will
+ * potentially access and cache entries for the current LPID as
+ * well.
+ */
+ tlb_flush(env_cpu(env));
+}
+
+void helper_store_hid0_601(CPUPPCState *env, target_ulong val)
+{
+ target_ulong hid0;
+
+ hid0 = env->spr[SPR_HID0];
+ env->spr[SPR_HID0] = (uint32_t)val;
+
+ if ((val ^ hid0) & 0x00000008) {
+ /* Change current endianness */
+ hreg_compute_hflags(env);
+ qemu_log("%s: set endianness to %c => %08x\n", __func__,
+ val & 0x8 ? 'l' : 'b', env->hflags);
+ }
+}
+
+void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value)
+{
+ if (likely(env->pb[num] != value)) {
+ env->pb[num] = value;
+ /* Should be optimized */
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val)
+{
+ /* Bits 26 & 27 affect single-stepping. */
+ hreg_compute_hflags(env);
+ /* Bits 28 & 29 affect reset or shutdown. */
+ store_40x_dbcr0(env, val);
+}
+
+void helper_store_40x_sler(CPUPPCState *env, target_ulong val)
+{
+ store_40x_sler(env, val);
+}
+#endif
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+target_ulong helper_clcs(CPUPPCState *env, uint32_t arg)
+{
+ switch (arg) {
+ case 0x0CUL:
+ /* Instruction cache line size */
+ return env->icache_line_size;
+ case 0x0DUL:
+ /* Data cache line size */
+ return env->dcache_line_size;
+ case 0x0EUL:
+ /* Minimum cache line size */
+ return (env->icache_line_size < env->dcache_line_size) ?
+ env->icache_line_size : env->dcache_line_size;
+ case 0x0FUL:
+ /* Maximum cache line size */
+ return (env->icache_line_size > env->dcache_line_size) ?
+ env->icache_line_size : env->dcache_line_size;
+ default:
+ /* Undefined */
+ return 0;
+ }
+}
+
+/*****************************************************************************/
+/* Special registers manipulation */
+
+/*
+ * This code is lifted from MacOnLinux. It is called whenever THRM1,2
+ * or 3 is read an fixes up the values in such a way that will make
+ * MacOS not hang. These registers exist on some 75x and 74xx
+ * processors.
+ */
+void helper_fixup_thrm(CPUPPCState *env)
+{
+ target_ulong v, t;
+ int i;
+
+#define THRM1_TIN (1 << 31)
+#define THRM1_TIV (1 << 30)
+#define THRM1_THRES(x) (((x) & 0x7f) << 23)
+#define THRM1_TID (1 << 2)
+#define THRM1_TIE (1 << 1)
+#define THRM1_V (1 << 0)
+#define THRM3_E (1 << 0)
+
+ if (!(env->spr[SPR_THRM3] & THRM3_E)) {
+ return;
+ }
+
+ /* Note: Thermal interrupts are unimplemented */
+ for (i = SPR_THRM1; i <= SPR_THRM2; i++) {
+ v = env->spr[i];
+ if (!(v & THRM1_V)) {
+ continue;
+ }
+ v |= THRM1_TIV;
+ v &= ~THRM1_TIN;
+ t = v & THRM1_THRES(127);
+ if ((v & THRM1_TID) && t < THRM1_THRES(24)) {
+ v |= THRM1_TIN;
+ }
+ if (!(v & THRM1_TID) && t > THRM1_THRES(24)) {
+ v |= THRM1_TIN;
+ }
+ env->spr[i] = v;
+ }
+}
diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c
new file mode 100644
index 000000000..f4985bae7
--- /dev/null
+++ b/target/ppc/mmu-book3s-v3.c
@@ -0,0 +1,42 @@
+/*
+ * PowerPC ISAV3 BookS emulation generic mmu helpers for qemu.
+ *
+ * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "mmu-hash64.h"
+#include "mmu-book3s-v3.h"
+#include "mmu-radix64.h"
+
+bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry)
+{
+ uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB;
+ uint64_t pats = cpu->env.spr[SPR_PTCR] & PTCR_PATS;
+
+ /* Calculate number of entries */
+ pats = 1ull << (pats + 12 - 4);
+ if (pats <= lpid) {
+ return false;
+ }
+
+ /* Grab entry */
+ patb += 16 * lpid;
+ entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
+ entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
+ return true;
+}
diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h
new file mode 100644
index 000000000..d6d5ed8f8
--- /dev/null
+++ b/target/ppc/mmu-book3s-v3.h
@@ -0,0 +1,117 @@
+/*
+ * PowerPC ISAV3 BookS emulation generic mmu definitions for qemu.
+ *
+ * Copyright (c) 2017 Suraj Jitindar Singh, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_MMU_BOOK3S_V3_H
+#define PPC_MMU_BOOK3S_V3_H
+
+#include "mmu-hash64.h"
+#include "mmu-books.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/*
+ * Partition table definitions
+ */
+#define PTCR_PATB 0x0FFFFFFFFFFFF000ULL /* Partition Table Base */
+#define PTCR_PATS 0x000000000000001FULL /* Partition Table Size */
+
+/* Partition Table Entry Fields */
+#define PATE0_HR 0x8000000000000000
+
+/*
+ * WARNING: This field doesn't actually exist in the final version of
+ * the architecture and is unused by hardware. However, qemu uses it
+ * as an indication of a radix guest in the pseudo-PATB entry that it
+ * maintains for SPAPR guests and in the migration stream, so we need
+ * to keep it around
+ */
+#define PATE1_GR 0x8000000000000000
+
+/* Process Table Entry */
+struct prtb_entry {
+ uint64_t prtbe0, prtbe1;
+};
+
+#ifdef TARGET_PPC64
+
+static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu)
+{
+ return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT);
+}
+
+bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid,
+ ppc_v3_pate_t *entry);
+
+/*
+ * The LPCR:HR bit is a shortcut that avoids having to
+ * dig out the partition table in the fast path. This is
+ * also how the HW uses it.
+ */
+static inline bool ppc64_v3_radix(PowerPCCPU *cpu)
+{
+ return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR);
+}
+
+static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
+{
+ uint64_t base;
+
+ if (cpu->vhyp) {
+ return 0;
+ }
+ if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
+ ppc_v3_pate_t pate;
+
+ if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
+ return 0;
+ }
+ base = pate.dw0;
+ } else {
+ base = cpu->env.spr[SPR_SDR1];
+ }
+ return base & SDR_64_HTABORG;
+}
+
+static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu)
+{
+ uint64_t base;
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ return vhc->hpt_mask(cpu->vhyp);
+ }
+ if (cpu->env.mmu_model == POWERPC_MMU_3_00) {
+ ppc_v3_pate_t pate;
+
+ if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
+ return 0;
+ }
+ base = pate.dw0;
+ } else {
+ base = cpu->env.spr[SPR_SDR1];
+ }
+ return (1ULL << ((base & SDR_64_HTABSIZE) + 18 - 7)) - 1;
+}
+
+#endif /* TARGET_PPC64 */
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif /* PPC_MMU_BOOK3S_V3_H */
diff --git a/target/ppc/mmu-books.h b/target/ppc/mmu-books.h
new file mode 100644
index 000000000..0d1255186
--- /dev/null
+++ b/target/ppc/mmu-books.h
@@ -0,0 +1,30 @@
+/*
+ * PowerPC BookS emulation generic mmu definitions for qemu.
+ *
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_MMU_BOOKS_H
+#define PPC_MMU_BOOKS_H
+
+/*
+ * These correspond to the mmu_idx values computed in
+ * hreg_compute_hflags_value. See the tables therein
+ */
+static inline bool mmuidx_pr(int idx) { return !(idx & 1); }
+static inline bool mmuidx_real(int idx) { return idx & 2; }
+static inline bool mmuidx_hv(int idx) { return idx & 4; }
+#endif /* PPC_MMU_BOOKS_H */
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
new file mode 100644
index 000000000..3957aab2d
--- /dev/null
+++ b/target/ppc/mmu-hash32.c
@@ -0,0 +1,571 @@
+/*
+ * PowerPC MMU, TLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (c) 2013 David Gibson, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "internal.h"
+#include "mmu-hash32.h"
+#include "mmu-books.h"
+#include "exec/log.h"
+
+/* #define DEBUG_BATS */
+
+#ifdef DEBUG_BATS
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_BATS(...) do { } while (0)
+#endif
+
+struct mmu_ctx_hash32 {
+ hwaddr raddr; /* Real address */
+ int prot; /* Protection bits */
+ int key; /* Access key */
+};
+
+static int ppc_hash32_pp_prot(int key, int pp, int nx)
+{
+ int prot;
+
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
+
+ case 0x3:
+ prot = PAGE_READ;
+ break;
+
+ default:
+ abort();
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ prot = 0;
+ break;
+
+ case 0x1:
+ case 0x3:
+ prot = PAGE_READ;
+ break;
+
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
+
+ default:
+ abort();
+ }
+ }
+ if (nx == 0) {
+ prot |= PAGE_EXEC;
+ }
+
+ return prot;
+}
+
+static int ppc_hash32_pte_prot(int mmu_idx,
+ target_ulong sr, ppc_hash_pte32_t pte)
+{
+ unsigned pp, key;
+
+ key = !!(mmuidx_pr(mmu_idx) ? (sr & SR32_KP) : (sr & SR32_KS));
+ pp = pte.pte1 & HPTE32_R_PP;
+
+ return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
+}
+
+static target_ulong hash32_bat_size(int mmu_idx,
+ target_ulong batu, target_ulong batl)
+{
+ if ((mmuidx_pr(mmu_idx) && !(batu & BATU32_VP))
+ || (!mmuidx_pr(mmu_idx) && !(batu & BATU32_VS))) {
+ return 0;
+ }
+
+ return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
+}
+
+static int hash32_bat_prot(PowerPCCPU *cpu,
+ target_ulong batu, target_ulong batl)
+{
+ int pp, prot;
+
+ prot = 0;
+ pp = batl & BATL32_PP;
+ if (pp != 0) {
+ prot = PAGE_READ | PAGE_EXEC;
+ if (pp == 0x2) {
+ prot |= PAGE_WRITE;
+ }
+ }
+ return prot;
+}
+
+static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
+ target_ulong batu, target_ulong batl)
+{
+ if (!(batl & BATL32_601_V)) {
+ return 0;
+ }
+
+ return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
+}
+
+static int hash32_bat_601_prot(int mmu_idx,
+ target_ulong batu, target_ulong batl)
+{
+ int key, pp;
+
+ pp = batu & BATU32_601_PP;
+ if (mmuidx_pr(mmu_idx) == 0) {
+ key = !!(batu & BATU32_601_KS);
+ } else {
+ key = !!(batu & BATU32_601_KP);
+ }
+ return ppc_hash32_pp_prot(key, pp, 0);
+}
+
+static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
+ MMUAccessType access_type, int *prot,
+ int mmu_idx)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong *BATlt, *BATut;
+ bool ifetch = access_type == MMU_INST_FETCH;
+ int i;
+
+ LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ ifetch ? 'I' : 'D', ea);
+ if (ifetch) {
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ } else {
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ }
+ for (i = 0; i < env->nb_BATs; i++) {
+ target_ulong batu = BATut[i];
+ target_ulong batl = BATlt[i];
+ target_ulong mask;
+
+ if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
+ mask = hash32_bat_601_size(cpu, batu, batl);
+ } else {
+ mask = hash32_bat_size(mmu_idx, batu, batl);
+ }
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n", __func__,
+ ifetch ? 'I' : 'D', i, ea, batu, batl);
+
+ if (mask && ((ea & mask) == (batu & BATU32_BEPI))) {
+ hwaddr raddr = (batl & mask) | (ea & ~mask);
+
+ if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
+ *prot = hash32_bat_601_prot(mmu_idx, batu, batl);
+ } else {
+ *prot = hash32_bat_prot(cpu, batu, batl);
+ }
+
+ return raddr & TARGET_PAGE_MASK;
+ }
+ }
+
+ /* No hit */
+#if defined(DEBUG_BATS)
+ if (qemu_log_enabled()) {
+ target_ulong *BATu, *BATl;
+ target_ulong BEPIl, BEPIu, bl;
+
+ LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea);
+ for (i = 0; i < 4; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & BATU32_BEPIU;
+ BEPIl = *BATu & BATU32_BEPIL;
+ bl = (*BATu & 0x00001FFC) << 15;
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, ifetch ? 'I' : 'D', i, ea,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+ }
+#endif
+
+ return -1;
+}
+
+static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
+ target_ulong eaddr,
+ MMUAccessType access_type,
+ hwaddr *raddr, int *prot, int mmu_idx,
+ bool guest_visible)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ int key = !!(mmuidx_pr(mmu_idx) ? (sr & SR32_KP) : (sr & SR32_KS));
+
+ qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
+
+ if ((sr & 0x1FF00000) >> 20 == 0x07f) {
+ /*
+ * Memory-forced I/O controller interface access
+ *
+ * If T=1 and BUID=x'07F', the 601 performs a memory access
+ * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
+ */
+ *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return true;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ /* No code fetch is allowed in direct-store areas */
+ if (guest_visible) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ }
+ return false;
+ }
+
+ /*
+ * From ppc_cpu_get_phys_page_debug, env->access_type is not set.
+ * Assume ACCESS_INT for that case.
+ */
+ switch (guest_visible ? env->access_type : ACCESS_INT) {
+ case ACCESS_INT:
+ /* Integer load/store : only access allowed */
+ break;
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = POWERPC_EXCP_ALIGN_FP;
+ env->spr[SPR_DAR] = eaddr;
+ return false;
+ case ACCESS_RES:
+ /* lwarx, ldarx or srwcx. */
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x06000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04000000;
+ }
+ return false;
+ case ACCESS_CACHE:
+ /*
+ * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
+ *
+ * Should make the instruction do no-op. As it already do
+ * no-op, it's quite easy :-)
+ */
+ *raddr = eaddr;
+ return true;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x06100000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04100000;
+ }
+ return false;
+ default:
+ cpu_abort(cs, "ERROR: insn should not need address translation\n");
+ }
+
+ *prot = key ? PAGE_READ | PAGE_WRITE : PAGE_READ;
+ if (*prot & prot_for_access_type(access_type)) {
+ *raddr = eaddr;
+ return true;
+ }
+
+ if (guest_visible) {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x0a000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ return false;
+}
+
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
+{
+ target_ulong mask = ppc_hash32_hpt_mask(cpu);
+
+ return (hash * HASH_PTEG_SIZE_32) & mask;
+}
+
+static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
+ bool secondary, target_ulong ptem,
+ ppc_hash_pte32_t *pte)
+{
+ hwaddr pte_offset = pteg_off;
+ target_ulong pte0, pte1;
+ int i;
+
+ for (i = 0; i < HPTES_PER_GROUP; i++) {
+ pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
+ /*
+ * pte0 contains the valid bit and must be read before pte1,
+ * otherwise we might see an old pte1 with a new valid bit and
+ * thus an inconsistent hpte value
+ */
+ smp_rmb();
+ pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
+
+ if ((pte0 & HPTE32_V_VALID)
+ && (secondary == !!(pte0 & HPTE32_V_SECONDARY))
+ && HPTE32_V_COMPARE(pte0, ptem)) {
+ pte->pte0 = pte0;
+ pte->pte1 = pte1;
+ return pte_offset;
+ }
+
+ pte_offset += HASH_PTE_SIZE_32;
+ }
+
+ return -1;
+}
+
+static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+ hwaddr offset = pte_offset + 6;
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
+}
+
+static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+ hwaddr offset = pte_offset + 7;
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
+}
+
+static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
+ target_ulong sr, target_ulong eaddr,
+ ppc_hash_pte32_t *pte)
+{
+ hwaddr pteg_off, pte_offset;
+ hwaddr hash;
+ uint32_t vsid, pgidx, ptem;
+
+ vsid = sr & SR32_VSID;
+ pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS;
+ hash = vsid ^ pgidx;
+ ptem = (vsid << 7) | (pgidx >> 10);
+
+ /* Page address translation */
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
+ " htab_mask " TARGET_FMT_plx
+ " hash " TARGET_FMT_plx "\n",
+ ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
+
+ /* Primary PTEG lookup */
+ qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=%" PRIx32 " ptem=%" PRIx32
+ " hash=" TARGET_FMT_plx "\n",
+ ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu),
+ vsid, ptem, hash);
+ pteg_off = get_pteg_offset32(cpu, hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
+ if (pte_offset == -1) {
+ /* Secondary PTEG lookup */
+ qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=%" PRIx32 " api=%" PRIx32
+ " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
+ ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash);
+ pteg_off = get_pteg_offset32(cpu, ~hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
+ }
+
+ return pte_offset;
+}
+
+static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
+ target_ulong eaddr)
+{
+ hwaddr rpn = pte.pte1 & HPTE32_R_RPN;
+ hwaddr mask = ~TARGET_PAGE_MASK;
+
+ return (rpn & ~mask) | (eaddr & mask);
+}
+
+bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong sr;
+ hwaddr pte_offset;
+ ppc_hash_pte32_t pte;
+ int prot;
+ int need_prot;
+ hwaddr raddr;
+
+ /* There are no hash32 large pages. */
+ *psizep = TARGET_PAGE_BITS;
+
+ /* 1. Handle real mode accesses */
+ if (mmuidx_real(mmu_idx)) {
+ /* Translation is off */
+ *raddrp = eaddr;
+ *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return true;
+ }
+
+ need_prot = prot_for_access_type(access_type);
+
+ /* 2. Check Block Address Translation entries (BATs) */
+ if (env->nb_BATs != 0) {
+ raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, protp, mmu_idx);
+ if (raddr != -1) {
+ if (need_prot & ~*protp) {
+ if (guest_visible) {
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x0a000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ }
+ return false;
+ }
+ *raddrp = raddr;
+ return true;
+ }
+ }
+
+ /* 3. Look up the Segment Register */
+ sr = env->sr[eaddr >> 28];
+
+ /* 4. Handle direct store segments */
+ if (sr & SR32_T) {
+ return ppc_hash32_direct_store(cpu, sr, eaddr, access_type,
+ raddrp, protp, mmu_idx, guest_visible);
+ }
+
+ /* 5. Check for segment level no-execute violation */
+ if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) {
+ if (guest_visible) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ }
+ return false;
+ }
+
+ /* 6. Locate the PTE in the hash table */
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
+ if (pte_offset == -1) {
+ if (guest_visible) {
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x40000000;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x42000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x40000000;
+ }
+ }
+ }
+ return false;
+ }
+ qemu_log_mask(CPU_LOG_MMU,
+ "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
+
+ /* 7. Check access permissions */
+
+ prot = ppc_hash32_pte_prot(mmu_idx, sr, pte);
+
+ if (need_prot & ~prot) {
+ /* Access right violation */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ if (guest_visible) {
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x0a000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ }
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
+
+ /* 8. Update PTE referenced and changed bits if necessary */
+
+ if (!(pte.pte1 & HPTE32_R_R)) {
+ ppc_hash32_set_r(cpu, pte_offset, pte.pte1);
+ }
+ if (!(pte.pte1 & HPTE32_R_C)) {
+ if (access_type == MMU_DATA_STORE) {
+ ppc_hash32_set_c(cpu, pte_offset, pte.pte1);
+ } else {
+ /*
+ * Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit
+ */
+ prot &= ~PAGE_WRITE;
+ }
+ }
+
+ /* 9. Determine the real address from the PTE */
+
+ *raddrp = ppc_hash32_pte_raddr(sr, pte, eaddr);
+ *protp = prot;
+ return true;
+}
diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h
new file mode 100644
index 000000000..3892b693d
--- /dev/null
+++ b/target/ppc/mmu-hash32.h
@@ -0,0 +1,120 @@
+#ifndef MMU_HASH32_H
+#define MMU_HASH32_H
+
+#ifndef CONFIG_USER_ONLY
+
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
+bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible);
+
+/*
+ * Segment register definitions
+ */
+
+#define SR32_T 0x80000000
+#define SR32_KS 0x40000000
+#define SR32_KP 0x20000000
+#define SR32_NX 0x10000000
+#define SR32_VSID 0x00ffffff
+
+/*
+ * Block Address Translation (BAT) definitions
+ */
+
+#define BATU32_BEPIU 0xf0000000
+#define BATU32_BEPIL 0x0ffe0000
+#define BATU32_BEPI 0xfffe0000
+#define BATU32_BL 0x00001ffc
+#define BATU32_VS 0x00000002
+#define BATU32_VP 0x00000001
+
+
+#define BATL32_BRPN 0xfffe0000
+#define BATL32_WIMG 0x00000078
+#define BATL32_PP 0x00000003
+
+/* PowerPC 601 has slightly different BAT registers */
+
+#define BATU32_601_KS 0x00000008
+#define BATU32_601_KP 0x00000004
+#define BATU32_601_PP 0x00000003
+
+#define BATL32_601_V 0x00000040
+#define BATL32_601_BL 0x0000003f
+
+/*
+ * Hash page table definitions
+ */
+#define SDR_32_HTABORG 0xFFFF0000UL
+#define SDR_32_HTABMASK 0x000001FFUL
+
+#define HPTES_PER_GROUP 8
+#define HASH_PTE_SIZE_32 8
+#define HASH_PTEG_SIZE_32 (HASH_PTE_SIZE_32 * HPTES_PER_GROUP)
+
+#define HPTE32_V_VALID 0x80000000
+#define HPTE32_V_VSID 0x7fffff80
+#define HPTE32_V_SECONDARY 0x00000040
+#define HPTE32_V_API 0x0000003f
+#define HPTE32_V_COMPARE(x, y) (!(((x) ^ (y)) & 0x7fffffbf))
+
+#define HPTE32_R_RPN 0xfffff000
+#define HPTE32_R_R 0x00000100
+#define HPTE32_R_C 0x00000080
+#define HPTE32_R_W 0x00000040
+#define HPTE32_R_I 0x00000020
+#define HPTE32_R_M 0x00000010
+#define HPTE32_R_G 0x00000008
+#define HPTE32_R_WIMG 0x00000078
+#define HPTE32_R_PP 0x00000003
+
+static inline hwaddr ppc_hash32_hpt_base(PowerPCCPU *cpu)
+{
+ return cpu->env.spr[SPR_SDR1] & SDR_32_HTABORG;
+}
+
+static inline hwaddr ppc_hash32_hpt_mask(PowerPCCPU *cpu)
+{
+ return ((cpu->env.spr[SPR_SDR1] & SDR_32_HTABMASK) << 16) | 0xFFFF;
+}
+
+static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu,
+ hwaddr pte_offset)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+
+ return ldl_phys(CPU(cpu)->as, base + pte_offset);
+}
+
+static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu,
+ hwaddr pte_offset)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+
+ return ldl_phys(CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2);
+}
+
+static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu,
+ hwaddr pte_offset, target_ulong pte0)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+
+ stl_phys(CPU(cpu)->as, base + pte_offset, pte0);
+}
+
+static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
+ hwaddr pte_offset, target_ulong pte1)
+{
+ target_ulong base = ppc_hash32_hpt_base(cpu);
+
+ stl_phys(CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
+}
+
+typedef struct {
+ uint32_t pte0, pte1;
+} ppc_hash_pte32_t;
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif /* MMU_HASH32_H */
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
new file mode 100644
index 000000000..da9fe99ff
--- /dev/null
+++ b/target/ppc/mmu-hash64.c
@@ -0,0 +1,1178 @@
+/*
+ * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (c) 2013 David Gibson, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/error-report.h"
+#include "qemu/qemu-print.h"
+#include "sysemu/hw_accel.h"
+#include "kvm_ppc.h"
+#include "mmu-hash64.h"
+#include "exec/log.h"
+#include "hw/hw.h"
+#include "internal.h"
+#include "mmu-book3s-v3.h"
+#include "helper_regs.h"
+
+#ifdef CONFIG_TCG
+#include "exec/helper-proto.h"
+#endif
+
+/* #define DEBUG_SLB */
+
+#ifdef DEBUG_SLB
+# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_SLB(...) do { } while (0)
+#endif
+
+/*
+ * SLB handling
+ */
+
+static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
+{
+ CPUPPCState *env = &cpu->env;
+ uint64_t esid_256M, esid_1T;
+ int n;
+
+ LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
+
+ esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
+ esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
+
+ for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
+ ppc_slb_t *slb = &env->slb[n];
+
+ LOG_SLB("%s: slot %d %016" PRIx64 " %016"
+ PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
+ /*
+ * We check for 1T matches on all MMUs here - if the MMU
+ * doesn't have 1T segment support, we will have prevented 1T
+ * entries from being inserted in the slbmte code.
+ */
+ if (((slb->esid == esid_256M) &&
+ ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
+ || ((slb->esid == esid_1T) &&
+ ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
+ return slb;
+ }
+ }
+
+ return NULL;
+}
+
+void dump_slb(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+ uint64_t slbe, slbv;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ qemu_printf("SLB\tESID\t\t\tVSID\n");
+ for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
+ slbe = env->slb[i].esid;
+ slbv = env->slb[i].vsid;
+ if (slbe == 0 && slbv == 0) {
+ continue;
+ }
+ qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
+ i, slbe, slbv);
+ }
+}
+
+#ifdef CONFIG_TCG
+void helper_slbia(CPUPPCState *env, uint32_t ih)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ int starting_entry;
+ int n;
+
+ /*
+ * slbia must always flush all TLB (which is equivalent to ERAT in ppc
+ * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
+ * can overwrite a valid SLB without flushing its lookaside information.
+ *
+ * It would be possible to keep the TLB in synch with the SLB by flushing
+ * when a valid entry is overwritten by slbmte, and therefore slbia would
+ * not have to flush unless it evicts a valid SLB entry. However it is
+ * expected that slbmte is more common than slbia, and slbia is usually
+ * going to evict valid SLB entries, so that tradeoff is unlikely to be a
+ * good one.
+ *
+ * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
+ * the same SLB entries (everything but entry 0), but differ in what
+ * "lookaside information" is invalidated. TCG can ignore this and flush
+ * everything.
+ *
+ * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
+ * invalidated.
+ */
+
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+
+ starting_entry = 1; /* default for IH=0,1,2,6 */
+
+ if (env->mmu_model == POWERPC_MMU_3_00) {
+ switch (ih) {
+ case 0x7:
+ /* invalidate no SLBs, but all lookaside information */
+ return;
+
+ case 0x3:
+ case 0x4:
+ /* also considers SLB entry 0 */
+ starting_entry = 0;
+ break;
+
+ case 0x5:
+ /* treat undefined values as ih==0, and warn */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "slbia undefined IH field %u.\n", ih);
+ break;
+
+ default:
+ /* 0,1,2,6 */
+ break;
+ }
+ }
+
+ for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
+ ppc_slb_t *slb = &env->slb[n];
+
+ if (!(slb->esid & SLB_ESID_V)) {
+ continue;
+ }
+ if (env->mmu_model == POWERPC_MMU_3_00) {
+ if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
+ /* preserves entries with a class value of 0 */
+ continue;
+ }
+ }
+
+ slb->esid &= ~SLB_ESID_V;
+ }
+}
+
+static void __helper_slbie(CPUPPCState *env, target_ulong addr,
+ target_ulong global)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc_slb_t *slb;
+
+ slb = slb_lookup(cpu, addr);
+ if (!slb) {
+ return;
+ }
+
+ if (slb->esid & SLB_ESID_V) {
+ slb->esid &= ~SLB_ESID_V;
+
+ /*
+ * XXX: given the fact that segment size is 256 MB or 1TB,
+ * and we still don't have a tlb_flush_mask(env, n, mask)
+ * in QEMU, we just invalidate all TLBs
+ */
+ env->tlb_need_flush |=
+ (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
+ }
+}
+
+void helper_slbie(CPUPPCState *env, target_ulong addr)
+{
+ __helper_slbie(env, addr, false);
+}
+
+void helper_slbieg(CPUPPCState *env, target_ulong addr)
+{
+ __helper_slbie(env, addr, true);
+}
+#endif
+
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t *slb = &env->slb[slot];
+ const PPCHash64SegmentPageSizes *sps = NULL;
+ int i;
+
+ if (slot >= cpu->hash64_opts->slb_size) {
+ return -1; /* Bad slot number */
+ }
+ if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
+ return -1; /* Reserved bits set */
+ }
+ if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
+ return -1; /* Bad segment size */
+ }
+ if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
+ return -1; /* 1T segment on MMU that doesn't support it */
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
+ " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
+ slot, esid, vsid);
+ return -1;
+ }
+
+ slb->esid = esid;
+ slb->vsid = vsid;
+ slb->sps = sps;
+
+ LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
+ " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
+ slb->esid, slb->vsid);
+
+ return 0;
+}
+
+#ifdef CONFIG_TCG
+static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
+ target_ulong *rt)
+{
+ CPUPPCState *env = &cpu->env;
+ int slot = rb & 0xfff;
+ ppc_slb_t *slb = &env->slb[slot];
+
+ if (slot >= cpu->hash64_opts->slb_size) {
+ return -1;
+ }
+
+ *rt = slb->esid;
+ return 0;
+}
+
+static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
+ target_ulong *rt)
+{
+ CPUPPCState *env = &cpu->env;
+ int slot = rb & 0xfff;
+ ppc_slb_t *slb = &env->slb[slot];
+
+ if (slot >= cpu->hash64_opts->slb_size) {
+ return -1;
+ }
+
+ *rt = slb->vsid;
+ return 0;
+}
+
+static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
+ target_ulong *rt)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t *slb;
+
+ if (!msr_is_64bit(env, env->msr)) {
+ rb &= 0xffffffff;
+ }
+ slb = slb_lookup(cpu, rb);
+ if (slb == NULL) {
+ *rt = (target_ulong)-1ul;
+ } else {
+ *rt = slb->vsid;
+ }
+ return 0;
+}
+
+void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+}
+
+target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ target_ulong rt = 0;
+
+ if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+ return rt;
+}
+
+target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ target_ulong rt = 0;
+
+ if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+ return rt;
+}
+
+target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ target_ulong rt = 0;
+
+ if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+ return rt;
+}
+#endif
+
+/* Check No-Execute or Guarded Storage */
+static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
+ ppc_hash_pte64_t pte)
+{
+ /* Exec permissions CANNOT take away read or write permissions */
+ return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
+ PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+}
+
+/* Check Basic Storage Protection */
+static int ppc_hash64_pte_prot(int mmu_idx,
+ ppc_slb_t *slb, ppc_hash_pte64_t pte)
+{
+ unsigned pp, key;
+ /*
+ * Some pp bit combinations have undefined behaviour, so default
+ * to no access in those cases
+ */
+ int prot = 0;
+
+ key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP)
+ : (slb->vsid & SLB_VSID_KS));
+ pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
+
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ break;
+
+ case 0x3:
+ case 0x6:
+ prot = PAGE_READ | PAGE_EXEC;
+ break;
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ case 0x6:
+ break;
+
+ case 0x1:
+ case 0x3:
+ prot = PAGE_READ | PAGE_EXEC;
+ break;
+
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ break;
+ }
+ }
+
+ return prot;
+}
+
+/* Check the instruction access permissions specified in the IAMR */
+static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
+{
+ CPUPPCState *env = &cpu->env;
+ int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
+
+ /*
+ * An instruction fetch is permitted if the IAMR bit is 0.
+ * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
+ * can only take away EXEC permissions not READ or WRITE permissions.
+ * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
+ * EXEC permissions are allowed.
+ */
+ return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+}
+
+static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
+{
+ CPUPPCState *env = &cpu->env;
+ int key, amrbits;
+ int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ /* Only recent MMUs implement Virtual Page Class Key Protection */
+ if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
+ return prot;
+ }
+
+ key = HPTE64_R_KEY(pte.pte1);
+ amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
+
+ /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
+ /* env->spr[SPR_AMR]); */
+
+ /*
+ * A store is permitted if the AMR bit is 0. Remove write
+ * protection if it is set.
+ */
+ if (amrbits & 0x2) {
+ prot &= ~PAGE_WRITE;
+ }
+ /*
+ * A load is permitted if the AMR bit is 0. Remove read
+ * protection if it is set.
+ */
+ if (amrbits & 0x1) {
+ prot &= ~PAGE_READ;
+ }
+
+ switch (env->mmu_model) {
+ /*
+ * MMU version 2.07 and later support IAMR
+ * Check if the IAMR allows the instruction access - it will return
+ * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
+ * if it does (and prot will be unchanged indicating execution support).
+ */
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_3_00:
+ prot &= ppc_hash64_iamr_prot(cpu, key);
+ break;
+ default:
+ break;
+ }
+
+ return prot;
+}
+
+const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
+ hwaddr ptex, int n)
+{
+ hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
+ hwaddr base;
+ hwaddr plen = n * HASH_PTE_SIZE_64;
+ const ppc_hash_pte64_t *hptes;
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ return vhc->map_hptes(cpu->vhyp, ptex, n);
+ }
+ base = ppc_hash64_hpt_base(cpu);
+
+ if (!base) {
+ return NULL;
+ }
+
+ hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
+ MEMTXATTRS_UNSPECIFIED);
+ if (plen < (n * HASH_PTE_SIZE_64)) {
+ hw_error("%s: Unable to map all requested HPTEs\n", __func__);
+ }
+ return hptes;
+}
+
+void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
+ hwaddr ptex, int n)
+{
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
+ return;
+ }
+
+ address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
+ false, n * HASH_PTE_SIZE_64);
+}
+
+static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
+ uint64_t pte0, uint64_t pte1)
+{
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ if (sps->page_shift != 12) {
+ /* 4kiB page in a non 4kiB segment */
+ return 0;
+ }
+ /* Normal 4kiB page */
+ return 12;
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const PPCHash64PageSize *ps = &sps->enc[i];
+ uint64_t mask;
+
+ if (!ps->page_shift) {
+ break;
+ }
+
+ if (ps->page_shift == 12) {
+ /* L bit is set so this can't be a 4kiB page */
+ continue;
+ }
+
+ mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+ if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
+ return ps->page_shift;
+ }
+ }
+
+ return 0; /* Bad page size encoding */
+}
+
+static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
+{
+ /* Insert B into pte0 */
+ *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
+ ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
+ (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
+
+ /* Remove B from pte1 */
+ *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
+}
+
+
+static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
+ const PPCHash64SegmentPageSizes *sps,
+ target_ulong ptem,
+ ppc_hash_pte64_t *pte, unsigned *pshift)
+{
+ int i;
+ const ppc_hash_pte64_t *pteg;
+ target_ulong pte0, pte1;
+ target_ulong ptex;
+
+ ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
+ pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
+ if (!pteg) {
+ return -1;
+ }
+ for (i = 0; i < HPTES_PER_GROUP; i++) {
+ pte0 = ppc_hash64_hpte0(cpu, pteg, i);
+ /*
+ * pte0 contains the valid bit and must be read before pte1,
+ * otherwise we might see an old pte1 with a new valid bit and
+ * thus an inconsistent hpte value
+ */
+ smp_rmb();
+ pte1 = ppc_hash64_hpte1(cpu, pteg, i);
+
+ /* Convert format if necessary */
+ if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
+ ppc64_v3_new_to_old_hpte(&pte0, &pte1);
+ }
+
+ /* This compares V, B, H (secondary) and the AVPN */
+ if (HPTE64_V_COMPARE(pte0, ptem)) {
+ *pshift = hpte_page_shift(sps, pte0, pte1);
+ /*
+ * If there is no match, ignore the PTE, it could simply
+ * be for a different segment size encoding and the
+ * architecture specifies we should not match. Linux will
+ * potentially leave behind PTEs for the wrong base page
+ * size when demoting segments.
+ */
+ if (*pshift == 0) {
+ continue;
+ }
+ /*
+ * We don't do anything with pshift yet as qemu TLB only
+ * deals with 4K pages anyway
+ */
+ pte->pte0 = pte0;
+ pte->pte1 = pte1;
+ ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
+ return ptex + i;
+ }
+ }
+ ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
+ /*
+ * We didn't find a valid entry.
+ */
+ return -1;
+}
+
+static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
+ ppc_slb_t *slb, target_ulong eaddr,
+ ppc_hash_pte64_t *pte, unsigned *pshift)
+{
+ CPUPPCState *env = &cpu->env;
+ hwaddr hash, ptex;
+ uint64_t vsid, epnmask, epn, ptem;
+ const PPCHash64SegmentPageSizes *sps = slb->sps;
+
+ /*
+ * The SLB store path should prevent any bad page size encodings
+ * getting in there, so:
+ */
+ assert(sps);
+
+ /* If ISL is set in LPCR we need to clamp the page size to 4K */
+ if (env->spr[SPR_LPCR] & LPCR_ISL) {
+ /* We assume that when using TCG, 4k is first entry of SPS */
+ sps = &cpu->hash64_opts->sps[0];
+ assert(sps->page_shift == 12);
+ }
+
+ epnmask = ~((1ULL << sps->page_shift) - 1);
+
+ if (slb->vsid & SLB_VSID_B) {
+ /* 1TB segment */
+ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
+ epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
+ hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
+ } else {
+ /* 256M segment */
+ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
+ epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
+ hash = vsid ^ (epn >> sps->page_shift);
+ }
+ ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
+ ptem |= HPTE64_V_VALID;
+
+ /* Page address translation */
+ qemu_log_mask(CPU_LOG_MMU,
+ "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
+ " hash " TARGET_FMT_plx "\n",
+ ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
+
+ /* Primary PTEG lookup */
+ qemu_log_mask(CPU_LOG_MMU,
+ "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
+ " hash=" TARGET_FMT_plx "\n",
+ ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
+ vsid, ptem, hash);
+ ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
+
+ if (ptex == -1) {
+ /* Secondary PTEG lookup */
+ ptem |= HPTE64_V_SECONDARY;
+ qemu_log_mask(CPU_LOG_MMU,
+ "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
+ " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
+ ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
+
+ ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
+ }
+
+ return ptex;
+}
+
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1)
+{
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ return 12;
+ }
+
+ /*
+ * The encodings in env->sps need to be carefully chosen so that
+ * this gives an unambiguous result.
+ */
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
+ unsigned shift;
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ shift = hpte_page_shift(sps, pte0, pte1);
+ if (shift) {
+ return shift;
+ }
+ }
+
+ return 0;
+}
+
+static bool ppc_hash64_use_vrma(CPUPPCState *env)
+{
+ switch (env->mmu_model) {
+ case POWERPC_MMU_3_00:
+ /*
+ * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
+ * register no longer exist
+ */
+ return true;
+
+ default:
+ return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ }
+}
+
+static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t error_code)
+{
+ CPUPPCState *env = &POWERPC_CPU(cs)->env;
+ bool vpm;
+
+ if (!mmuidx_real(mmu_idx)) {
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
+ } else {
+ vpm = ppc_hash64_use_vrma(env);
+ }
+ if (vpm && !mmuidx_hv(mmu_idx)) {
+ cs->exception_index = POWERPC_EXCP_HISI;
+ } else {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ }
+ env->error_code = error_code;
+}
+
+static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t dsisr)
+{
+ CPUPPCState *env = &POWERPC_CPU(cs)->env;
+ bool vpm;
+
+ if (!mmuidx_real(mmu_idx)) {
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
+ } else {
+ vpm = ppc_hash64_use_vrma(env);
+ }
+ if (vpm && !mmuidx_hv(mmu_idx)) {
+ cs->exception_index = POWERPC_EXCP_HDSI;
+ env->spr[SPR_HDAR] = dar;
+ env->spr[SPR_HDSISR] = dsisr;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->spr[SPR_DAR] = dar;
+ env->spr[SPR_DSISR] = dsisr;
+ }
+ env->error_code = 0;
+}
+
+
+static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
+{
+ hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
+ return;
+ }
+ base = ppc_hash64_hpt_base(cpu);
+
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
+}
+
+static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
+{
+ hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
+
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
+ return;
+ }
+ base = ppc_hash64_hpt_base(cpu);
+
+ /* The HW performs a non-atomic byte update */
+ stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
+}
+
+static target_ulong rmls_limit(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ /*
+ * In theory the meanings of RMLS values are implementation
+ * dependent. In practice, this seems to have been the set from
+ * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
+ *
+ * Unsupported values mean the OS has shot itself in the
+ * foot. Return a 0-sized RMA in this case, which we expect
+ * to trigger an immediate DSI or ISI
+ */
+ static const target_ulong rma_sizes[16] = {
+ [0] = 256 * GiB,
+ [1] = 16 * GiB,
+ [2] = 1 * GiB,
+ [3] = 64 * MiB,
+ [4] = 256 * MiB,
+ [7] = 128 * MiB,
+ [8] = 32 * MiB,
+ };
+ target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
+
+ return rma_sizes[rmls];
+}
+
+static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong lpcr = env->spr[SPR_LPCR];
+ uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+ target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
+ int i;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
+ slb->esid = SLB_ESID_V;
+ slb->vsid = vsid;
+ slb->sps = sps;
+ return 0;
+ }
+ }
+
+ error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
+ TARGET_FMT_lx, lpcr);
+
+ return -1;
+}
+
+bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t vrma_slbe;
+ ppc_slb_t *slb;
+ unsigned apshift;
+ hwaddr ptex;
+ ppc_hash_pte64_t pte;
+ int exec_prot, pp_prot, amr_prot, prot;
+ int need_prot;
+ hwaddr raddr;
+
+ /*
+ * Note on LPCR usage: 970 uses HID4, but our special variant of
+ * store_spr copies relevant fields into env->spr[SPR_LPCR].
+ * Similarly we filter unimplemented bits when storing into LPCR
+ * depending on the MMU version. This code can thus just use the
+ * LPCR "as-is".
+ */
+
+ /* 1. Handle real mode accesses */
+ if (mmuidx_real(mmu_idx)) {
+ /*
+ * Translation is supposedly "off", but in real mode the top 4
+ * effective address bits are (mostly) ignored
+ */
+ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
+
+ if (cpu->vhyp) {
+ /*
+ * In virtual hypervisor mode, there's nothing to do:
+ * EA == GPA == qemu guest address
+ */
+ } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if (!(eaddr >> 63)) {
+ raddr |= env->spr[SPR_HRMOR];
+ }
+ } else if (ppc_hash64_use_vrma(env)) {
+ /* Emulated VRMA mode */
+ slb = &vrma_slbe;
+ if (build_vrma_slbe(cpu, slb) != 0) {
+ /* Invalid VRMA setup, machine check */
+ if (guest_visible) {
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ env->error_code = 0;
+ }
+ return false;
+ }
+
+ goto skip_slb_search;
+ } else {
+ target_ulong limit = rmls_limit(cpu);
+
+ /* Emulated old-style RMO mode, bounds check against RMLS */
+ if (raddr >= limit) {
+ if (!guest_visible) {
+ return false;
+ }
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ ppc_hash64_set_isi(cs, mmu_idx, SRR1_PROTFAULT);
+ break;
+ case MMU_DATA_LOAD:
+ ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_PROTFAULT);
+ break;
+ case MMU_DATA_STORE:
+ ppc_hash64_set_dsi(cs, mmu_idx, eaddr,
+ DSISR_PROTFAULT | DSISR_ISSTORE);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return false;
+ }
+
+ raddr |= env->spr[SPR_RMOR];
+ }
+
+ *raddrp = raddr;
+ *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ *psizep = TARGET_PAGE_BITS;
+ return true;
+ }
+
+ /* 2. Translation is on, so look up the SLB */
+ slb = slb_lookup(cpu, eaddr);
+ if (!slb) {
+ /* No entry found, check if in-memory segment tables are in use */
+ if (ppc64_use_proc_tbl(cpu)) {
+ /* TODO - Unsupported */
+ error_report("Segment Table Support Unimplemented");
+ exit(1);
+ }
+ /* Segment still not found, generate the appropriate interrupt */
+ if (!guest_visible) {
+ return false;
+ }
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ cs->exception_index = POWERPC_EXCP_ISEG;
+ env->error_code = 0;
+ break;
+ case MMU_DATA_LOAD:
+ case MMU_DATA_STORE:
+ cs->exception_index = POWERPC_EXCP_DSEG;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return false;
+ }
+
+ skip_slb_search:
+
+ /* 3. Check for segment level no-execute violation */
+ if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
+ if (guest_visible) {
+ ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOEXEC_GUARD);
+ }
+ return false;
+ }
+
+ /* 4. Locate the PTE in the hash table */
+ ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
+ if (ptex == -1) {
+ if (!guest_visible) {
+ return false;
+ }
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOPTE);
+ break;
+ case MMU_DATA_LOAD:
+ ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE);
+ break;
+ case MMU_DATA_STORE:
+ ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE | DSISR_ISSTORE);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return false;
+ }
+ qemu_log_mask(CPU_LOG_MMU,
+ "found PTE at index %08" HWADDR_PRIx "\n", ptex);
+
+ /* 5. Check access permissions */
+
+ exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
+ pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
+ amr_prot = ppc_hash64_amr_prot(cpu, pte);
+ prot = exec_prot & pp_prot & amr_prot;
+
+ need_prot = prot_for_access_type(access_type);
+ if (need_prot & ~prot) {
+ /* Access right violation */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ if (!guest_visible) {
+ return false;
+ }
+ if (access_type == MMU_INST_FETCH) {
+ int srr1 = 0;
+ if (PAGE_EXEC & ~exec_prot) {
+ srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
+ } else if (PAGE_EXEC & ~pp_prot) {
+ srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
+ }
+ if (PAGE_EXEC & ~amr_prot) {
+ srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
+ }
+ ppc_hash64_set_isi(cs, mmu_idx, srr1);
+ } else {
+ int dsisr = 0;
+ if (need_prot & ~pp_prot) {
+ dsisr |= DSISR_PROTFAULT;
+ }
+ if (access_type == MMU_DATA_STORE) {
+ dsisr |= DSISR_ISSTORE;
+ }
+ if (need_prot & ~amr_prot) {
+ dsisr |= DSISR_AMR;
+ }
+ ppc_hash64_set_dsi(cs, mmu_idx, eaddr, dsisr);
+ }
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
+
+ /* 6. Update PTE referenced and changed bits if necessary */
+
+ if (!(pte.pte1 & HPTE64_R_R)) {
+ ppc_hash64_set_r(cpu, ptex, pte.pte1);
+ }
+ if (!(pte.pte1 & HPTE64_R_C)) {
+ if (access_type == MMU_DATA_STORE) {
+ ppc_hash64_set_c(cpu, ptex, pte.pte1);
+ } else {
+ /*
+ * Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit
+ */
+ prot &= ~PAGE_WRITE;
+ }
+ }
+
+ /* 7. Determine the real address from the PTE */
+
+ *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
+ *protp = prot;
+ *psizep = apshift;
+ return true;
+}
+
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
+ target_ulong pte0, target_ulong pte1)
+{
+ /*
+ * XXX: given the fact that there are too many segments to
+ * invalidate, and we still don't have a tlb_flush_mask(env, n,
+ * mask) in QEMU, we just invalidate all TLBs
+ */
+ cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
+}
+
+#ifdef CONFIG_TCG
+void helper_store_lpcr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ ppc_store_lpcr(cpu, val);
+}
+#endif
+
+void ppc_hash64_init(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+ if (!pcc->hash64_opts) {
+ assert(!mmu_is_64bit(env->mmu_model));
+ return;
+ }
+
+ cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
+}
+
+void ppc_hash64_finalize(PowerPCCPU *cpu)
+{
+ g_free(cpu->hash64_opts);
+}
+
+const PPCHash64Options ppc_hash64_opts_basic = {
+ .flags = 0,
+ .slb_size = 64,
+ .sps = {
+ { .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 } }
+ },
+ { .page_shift = 24, /* 16M */
+ .slb_enc = 0x100,
+ .enc = { { .page_shift = 24, .pte_enc = 0 } }
+ },
+ },
+};
+
+const PPCHash64Options ppc_hash64_opts_POWER7 = {
+ .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
+ .slb_size = 32,
+ .sps = {
+ {
+ .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 },
+ { .page_shift = 16, .pte_enc = 0x7 },
+ { .page_shift = 24, .pte_enc = 0x38 }, },
+ },
+ {
+ .page_shift = 16, /* 64K */
+ .slb_enc = SLB_VSID_64K,
+ .enc = { { .page_shift = 16, .pte_enc = 0x1 },
+ { .page_shift = 24, .pte_enc = 0x8 }, },
+ },
+ {
+ .page_shift = 24, /* 16M */
+ .slb_enc = SLB_VSID_16M,
+ .enc = { { .page_shift = 24, .pte_enc = 0 }, },
+ },
+ {
+ .page_shift = 34, /* 16G */
+ .slb_enc = SLB_VSID_16G,
+ .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
+ },
+ }
+};
+
+
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
new file mode 100644
index 000000000..1496955d3
--- /dev/null
+++ b/target/ppc/mmu-hash64.h
@@ -0,0 +1,175 @@
+#ifndef MMU_HASH64_H
+#define MMU_HASH64_H
+
+#ifndef CONFIG_USER_ONLY
+
+#ifdef TARGET_PPC64
+void dump_slb(PowerPCCPU *cpu);
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid);
+bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible);
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1);
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1);
+void ppc_hash64_init(PowerPCCPU *cpu);
+void ppc_hash64_finalize(PowerPCCPU *cpu);
+#endif
+
+/*
+ * SLB definitions
+ */
+
+/* Bits in the SLB ESID word */
+#define SLB_ESID_ESID 0xFFFFFFFFF0000000ULL
+#define SLB_ESID_V 0x0000000008000000ULL /* valid */
+
+/* Bits in the SLB VSID word */
+#define SLB_VSID_SHIFT 12
+#define SLB_VSID_SHIFT_1T 24
+#define SLB_VSID_SSIZE_SHIFT 62
+#define SLB_VSID_B 0xc000000000000000ULL
+#define SLB_VSID_B_256M 0x0000000000000000ULL
+#define SLB_VSID_B_1T 0x4000000000000000ULL
+#define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL
+#define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T)
+#define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID)
+#define SLB_VSID_KS 0x0000000000000800ULL
+#define SLB_VSID_KP 0x0000000000000400ULL
+#define SLB_VSID_N 0x0000000000000200ULL /* no-execute */
+#define SLB_VSID_L 0x0000000000000100ULL
+#define SLB_VSID_C 0x0000000000000080ULL /* class */
+#define SLB_VSID_LP 0x0000000000000030ULL
+#define SLB_VSID_ATTR 0x0000000000000FFFULL
+#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
+#define SLB_VSID_4K 0x0000000000000000ULL
+#define SLB_VSID_64K 0x0000000000000110ULL
+#define SLB_VSID_16M 0x0000000000000100ULL
+#define SLB_VSID_16G 0x0000000000000120ULL
+
+/*
+ * Hash page table definitions
+ */
+
+#define SDR_64_HTABORG 0x0FFFFFFFFFFC0000ULL
+#define SDR_64_HTABSIZE 0x000000000000001FULL
+
+#define PATE0_HTABORG 0x0FFFFFFFFFFC0000ULL
+#define HPTES_PER_GROUP 8
+#define HASH_PTE_SIZE_64 16
+#define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
+
+#define HPTE64_V_SSIZE SLB_VSID_B
+#define HPTE64_V_SSIZE_256M SLB_VSID_B_256M
+#define HPTE64_V_SSIZE_1T SLB_VSID_B_1T
+#define HPTE64_V_SSIZE_SHIFT 62
+#define HPTE64_V_AVPN_SHIFT 7
+#define HPTE64_V_AVPN 0x3fffffffffffff80ULL
+#define HPTE64_V_AVPN_VAL(x) (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT)
+#define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff83ULL))
+#define HPTE64_V_BOLTED 0x0000000000000010ULL
+#define HPTE64_V_LARGE 0x0000000000000004ULL
+#define HPTE64_V_SECONDARY 0x0000000000000002ULL
+#define HPTE64_V_VALID 0x0000000000000001ULL
+
+#define HPTE64_R_PP0 0x8000000000000000ULL
+#define HPTE64_R_TS 0x4000000000000000ULL
+#define HPTE64_R_KEY_HI 0x3000000000000000ULL
+#define HPTE64_R_RPN_SHIFT 12
+#define HPTE64_R_RPN 0x0ffffffffffff000ULL
+#define HPTE64_R_FLAGS 0x00000000000003ffULL
+#define HPTE64_R_PP 0x0000000000000003ULL
+#define HPTE64_R_N 0x0000000000000004ULL
+#define HPTE64_R_G 0x0000000000000008ULL
+#define HPTE64_R_M 0x0000000000000010ULL
+#define HPTE64_R_I 0x0000000000000020ULL
+#define HPTE64_R_W 0x0000000000000040ULL
+#define HPTE64_R_WIMG 0x0000000000000078ULL
+#define HPTE64_R_C 0x0000000000000080ULL
+#define HPTE64_R_R 0x0000000000000100ULL
+#define HPTE64_R_KEY_LO 0x0000000000000e00ULL
+#define HPTE64_R_KEY(x) ((((x) & HPTE64_R_KEY_HI) >> 57) | \
+ (((x) & HPTE64_R_KEY_LO) >> 9))
+
+#define HPTE64_V_1TB_SEG 0x4000000000000000ULL
+#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL
+
+/* PTE offsets */
+#define HPTE64_DW1 (HASH_PTE_SIZE_64 / 2)
+#define HPTE64_DW1_R (HPTE64_DW1 + 6)
+#define HPTE64_DW1_C (HPTE64_DW1 + 7)
+
+/* Format changes for ARCH v3 */
+#define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL
+#define HPTE64_R_3_0_SSIZE_SHIFT 58
+#define HPTE64_R_3_0_SSIZE_MASK (3ULL << HPTE64_R_3_0_SSIZE_SHIFT)
+
+struct ppc_hash_pte64 {
+ uint64_t pte0, pte1;
+};
+
+const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
+ hwaddr ptex, int n);
+void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
+ hwaddr ptex, int n);
+
+static inline uint64_t ppc_hash64_hpte0(PowerPCCPU *cpu,
+ const ppc_hash_pte64_t *hptes, int i)
+{
+ return ldq_p(&(hptes[i].pte0));
+}
+
+static inline uint64_t ppc_hash64_hpte1(PowerPCCPU *cpu,
+ const ppc_hash_pte64_t *hptes, int i)
+{
+ return ldq_p(&(hptes[i].pte1));
+}
+
+/*
+ * MMU Options
+ */
+
+struct PPCHash64PageSize {
+ uint32_t page_shift; /* Page shift (or 0) */
+ uint32_t pte_enc; /* Encoding in the HPTE (>>12) */
+};
+typedef struct PPCHash64PageSize PPCHash64PageSize;
+
+struct PPCHash64SegmentPageSizes {
+ uint32_t page_shift; /* Base page shift of segment (or 0) */
+ uint32_t slb_enc; /* SLB encoding for BookS */
+ PPCHash64PageSize enc[PPC_PAGE_SIZES_MAX_SZ];
+};
+
+struct PPCHash64Options {
+#define PPC_HASH64_1TSEG 0x00001
+#define PPC_HASH64_AMR 0x00002
+#define PPC_HASH64_CI_LARGEPAGE 0x00004
+ unsigned flags;
+ unsigned slb_size;
+ PPCHash64SegmentPageSizes sps[PPC_PAGE_SIZES_MAX_SZ];
+};
+
+extern const PPCHash64Options ppc_hash64_opts_basic;
+extern const PPCHash64Options ppc_hash64_opts_POWER7;
+
+static inline bool ppc_hash64_has(PowerPCCPU *cpu, unsigned feature)
+{
+ return !!(cpu->hash64_opts->flags & feature);
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+#if defined(CONFIG_USER_ONLY) || !defined(TARGET_PPC64)
+static inline void ppc_hash64_init(PowerPCCPU *cpu)
+{
+}
+static inline void ppc_hash64_finalize(PowerPCCPU *cpu)
+{
+}
+#endif
+
+#endif /* MMU_HASH64_H */
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
new file mode 100644
index 000000000..5b0e62e67
--- /dev/null
+++ b/target/ppc/mmu-radix64.c
@@ -0,0 +1,590 @@
+/*
+ * PowerPC Radix MMU mulation helpers for QEMU.
+ *
+ * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "exec/log.h"
+#include "internal.h"
+#include "mmu-radix64.h"
+#include "mmu-book3s-v3.h"
+
+static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
+ vaddr eaddr,
+ uint64_t *lpid, uint64_t *pid)
+{
+ if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
+ switch (eaddr & R_EADDR_QUADRANT) {
+ case R_EADDR_QUADRANT0:
+ *lpid = 0;
+ *pid = env->spr[SPR_BOOKS_PID];
+ break;
+ case R_EADDR_QUADRANT1:
+ *lpid = env->spr[SPR_LPIDR];
+ *pid = env->spr[SPR_BOOKS_PID];
+ break;
+ case R_EADDR_QUADRANT2:
+ *lpid = env->spr[SPR_LPIDR];
+ *pid = 0;
+ break;
+ case R_EADDR_QUADRANT3:
+ *lpid = 0;
+ *pid = 0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else { /* !MSR[HV] -> Guest */
+ switch (eaddr & R_EADDR_QUADRANT) {
+ case R_EADDR_QUADRANT0: /* Guest application */
+ *lpid = env->spr[SPR_LPIDR];
+ *pid = env->spr[SPR_BOOKS_PID];
+ break;
+ case R_EADDR_QUADRANT1: /* Illegal */
+ case R_EADDR_QUADRANT2:
+ return false;
+ case R_EADDR_QUADRANT3: /* Guest OS */
+ *lpid = env->spr[SPR_LPIDR];
+ *pid = 0; /* pid set to 0 -> addresses guest operating system */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ return true;
+}
+
+static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
+ vaddr eaddr)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ /* Instruction Segment Interrupt */
+ cs->exception_index = POWERPC_EXCP_ISEG;
+ break;
+ case MMU_DATA_STORE:
+ case MMU_DATA_LOAD:
+ /* Data Segment Interrupt */
+ cs->exception_index = POWERPC_EXCP_DSEG;
+ env->spr[SPR_DAR] = eaddr;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ env->error_code = 0;
+}
+
+static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
+ vaddr eaddr, uint32_t cause)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ /* Instruction Storage Interrupt */
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = cause;
+ break;
+ case MMU_DATA_STORE:
+ cause |= DSISR_ISSTORE;
+ /* fall through */
+ case MMU_DATA_LOAD:
+ /* Data Storage Interrupt */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->spr[SPR_DSISR] = cause;
+ env->spr[SPR_DAR] = eaddr;
+ env->error_code = 0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
+ vaddr eaddr, hwaddr g_raddr, uint32_t cause)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ /* H Instruction Storage Interrupt */
+ cs->exception_index = POWERPC_EXCP_HISI;
+ env->spr[SPR_ASDR] = g_raddr;
+ env->error_code = cause;
+ break;
+ case MMU_DATA_STORE:
+ cause |= DSISR_ISSTORE;
+ /* fall through */
+ case MMU_DATA_LOAD:
+ /* H Data Storage Interrupt */
+ cs->exception_index = POWERPC_EXCP_HDSI;
+ env->spr[SPR_HDSISR] = cause;
+ env->spr[SPR_HDAR] = eaddr;
+ env->spr[SPR_ASDR] = g_raddr;
+ env->error_code = 0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
+ uint64_t pte, int *fault_cause, int *prot,
+ int mmu_idx, bool partition_scoped)
+{
+ CPUPPCState *env = &cpu->env;
+ int need_prot;
+
+ /* Check Page Attributes (pte58:59) */
+ if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
+ /*
+ * Radix PTE entries with the non-idempotent I/O attribute are treated
+ * as guarded storage
+ */
+ *fault_cause |= SRR1_NOEXEC_GUARD;
+ return true;
+ }
+
+ /* Determine permissions allowed by Encoded Access Authority */
+ if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
+ *prot = 0;
+ } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
+ partition_scoped) {
+ *prot = ppc_radix64_get_prot_eaa(pte);
+ } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
+ *prot = ppc_radix64_get_prot_eaa(pte);
+ *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
+ }
+
+ /* Check if requested access type is allowed */
+ need_prot = prot_for_access_type(access_type);
+ if (need_prot & ~*prot) { /* Page Protected for that Access */
+ *fault_cause |= DSISR_PROTFAULT;
+ return true;
+ }
+
+ return false;
+}
+
+static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
+ uint64_t pte, hwaddr pte_addr, int *prot)
+{
+ CPUState *cs = CPU(cpu);
+ uint64_t npte;
+
+ npte = pte | R_PTE_R; /* Always set reference bit */
+
+ if (access_type == MMU_DATA_STORE) { /* Store/Write */
+ npte |= R_PTE_C; /* Set change bit */
+ } else {
+ /*
+ * Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit.
+ */
+ *prot &= ~PAGE_WRITE;
+ }
+
+ if (pte ^ npte) { /* If pte has changed then write it back */
+ stq_phys(cs->as, pte_addr, npte);
+ }
+}
+
+static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
+ uint64_t *pte_addr, uint64_t *nls,
+ int *psize, uint64_t *pte, int *fault_cause)
+{
+ uint64_t index, pde;
+
+ if (*nls < 5) { /* Directory maps less than 2**5 entries */
+ *fault_cause |= DSISR_R_BADCONFIG;
+ return 1;
+ }
+
+ /* Read page <directory/table> entry from guest address space */
+ pde = ldq_phys(as, *pte_addr);
+ if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
+ *fault_cause |= DSISR_NOPTE;
+ return 1;
+ }
+
+ *pte = pde;
+ *psize -= *nls;
+ if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
+ *nls = pde & R_PDE_NLS;
+ index = eaddr >> (*psize - *nls); /* Shift */
+ index &= ((1UL << *nls) - 1); /* Mask */
+ *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
+ }
+ return 0;
+}
+
+static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
+ uint64_t base_addr, uint64_t nls,
+ hwaddr *raddr, int *psize, uint64_t *pte,
+ int *fault_cause, hwaddr *pte_addr)
+{
+ uint64_t index, pde, rpn , mask;
+
+ if (nls < 5) { /* Directory maps less than 2**5 entries */
+ *fault_cause |= DSISR_R_BADCONFIG;
+ return 1;
+ }
+
+ index = eaddr >> (*psize - nls); /* Shift */
+ index &= ((1UL << nls) - 1); /* Mask */
+ *pte_addr = base_addr + (index * sizeof(pde));
+ do {
+ int ret;
+
+ ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
+ fault_cause);
+ if (ret) {
+ return ret;
+ }
+ } while (!(pde & R_PTE_LEAF));
+
+ *pte = pde;
+ rpn = pde & R_PTE_RPN;
+ mask = (1UL << *psize) - 1;
+
+ /* Or high bits of rpn and low bits to ea to form whole real addr */
+ *raddr = (rpn & ~mask) | (eaddr & mask);
+ return 0;
+}
+
+static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
+{
+ CPUPPCState *env = &cpu->env;
+
+ if (!(pate->dw0 & PATE0_HR)) {
+ return false;
+ }
+ if (lpid == 0 && !msr_hv) {
+ return false;
+ }
+ if ((pate->dw0 & PATE1_R_PRTS) < 5) {
+ return false;
+ }
+ /* More checks ... */
+ return true;
+}
+
+static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
+ MMUAccessType access_type,
+ vaddr eaddr, hwaddr g_raddr,
+ ppc_v3_pate_t pate,
+ hwaddr *h_raddr, int *h_prot,
+ int *h_page_size, bool pde_addr,
+ int mmu_idx, bool guest_visible)
+{
+ int fault_cause = 0;
+ hwaddr pte_addr;
+ uint64_t pte;
+
+ *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
+ /* No valid pte or access denied due to protection */
+ if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
+ pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
+ &pte, &fault_cause, &pte_addr) ||
+ ppc_radix64_check_prot(cpu, access_type, pte,
+ &fault_cause, h_prot, mmu_idx, true)) {
+ if (pde_addr) { /* address being translated was that of a guest pde */
+ fault_cause |= DSISR_PRTABLE_FAULT;
+ }
+ if (guest_visible) {
+ ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
+ }
+ return 1;
+ }
+
+ if (guest_visible) {
+ ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
+ }
+
+ return 0;
+}
+
+static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
+ MMUAccessType access_type,
+ vaddr eaddr, uint64_t pid,
+ ppc_v3_pate_t pate, hwaddr *g_raddr,
+ int *g_prot, int *g_page_size,
+ int mmu_idx, bool guest_visible)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
+ int fault_cause = 0, h_page_size, h_prot;
+ hwaddr h_raddr, pte_addr;
+ int ret;
+
+ /* Index Process Table by PID to Find Corresponding Process Table Entry */
+ offset = pid * sizeof(struct prtb_entry);
+ size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
+ if (offset >= size) {
+ /* offset exceeds size of the process table */
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
+ }
+ return 1;
+ }
+ prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
+
+ if (cpu->vhyp) {
+ prtbe0 = ldq_phys(cs->as, prtbe_addr);
+ } else {
+ /*
+ * Process table addresses are subject to partition-scoped
+ * translation
+ *
+ * On a Radix host, the partition-scoped page table for LPID=0
+ * is only used to translate the effective addresses of the
+ * process table entries.
+ */
+ ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
+ pate, &h_raddr, &h_prot,
+ &h_page_size, true,
+ /* mmu_idx is 5 because we're translating from hypervisor scope */
+ 5, guest_visible);
+ if (ret) {
+ return ret;
+ }
+ prtbe0 = ldq_phys(cs->as, h_raddr);
+ }
+
+ /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
+ *g_page_size = PRTBE_R_GET_RTS(prtbe0);
+ base_addr = prtbe0 & PRTBE_R_RPDB;
+ nls = prtbe0 & PRTBE_R_RPDS;
+ if (msr_hv || cpu->vhyp) {
+ /*
+ * Can treat process table addresses as real addresses
+ */
+ ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
+ nls, g_raddr, g_page_size, &pte,
+ &fault_cause, &pte_addr);
+ if (ret) {
+ /* No valid PTE */
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
+ }
+ return ret;
+ }
+ } else {
+ uint64_t rpn, mask;
+
+ index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
+ index &= ((1UL << nls) - 1); /* Mask */
+ pte_addr = base_addr + (index * sizeof(pte));
+
+ /*
+ * Each process table address is subject to a partition-scoped
+ * translation
+ */
+ do {
+ ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
+ pate, &h_raddr, &h_prot,
+ &h_page_size, true,
+ /* mmu_idx is 5 because we're translating from hypervisor scope */
+ 5, guest_visible);
+ if (ret) {
+ return ret;
+ }
+
+ ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
+ &nls, g_page_size, &pte, &fault_cause);
+ if (ret) {
+ /* No valid pte */
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
+ }
+ return ret;
+ }
+ pte_addr = h_raddr;
+ } while (!(pte & R_PTE_LEAF));
+
+ rpn = pte & R_PTE_RPN;
+ mask = (1UL << *g_page_size) - 1;
+
+ /* Or high bits of rpn and low bits to ea to form whole real addr */
+ *g_raddr = (rpn & ~mask) | (eaddr & mask);
+ }
+
+ if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
+ g_prot, mmu_idx, false)) {
+ /* Access denied due to protection */
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
+ }
+ return 1;
+ }
+
+ if (guest_visible) {
+ ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
+ }
+
+ return 0;
+}
+
+/*
+ * Radix tree translation is a 2 steps translation process:
+ *
+ * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
+ * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
+ *
+ * MSR[HV]
+ * +-------------+----------------+---------------+
+ * | | HV = 0 | HV = 1 |
+ * +-------------+----------------+---------------+
+ * | Relocation | Partition | No |
+ * | = Off | Scoped | Translation |
+ * Relocation +-------------+----------------+---------------+
+ * | Relocation | Partition & | Process |
+ * | = On | Process Scoped | Scoped |
+ * +-------------+----------------+---------------+
+ */
+bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddr, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
+{
+ CPUPPCState *env = &cpu->env;
+ uint64_t lpid, pid;
+ ppc_v3_pate_t pate;
+ int psize, prot;
+ hwaddr g_raddr;
+ bool relocation;
+
+ assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
+
+ relocation = !mmuidx_real(mmu_idx);
+
+ /* HV or virtual hypervisor Real Mode Access */
+ if (!relocation && (mmuidx_hv(mmu_idx) || cpu->vhyp)) {
+ /* In real mode top 4 effective addr bits (mostly) ignored */
+ *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
+
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
+ if (!(eaddr >> 63)) {
+ *raddr |= env->spr[SPR_HRMOR];
+ }
+ }
+ *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ *psizep = TARGET_PAGE_BITS;
+ return true;
+ }
+
+ /*
+ * Check UPRT (we avoid the check in real mode to deal with
+ * transitional states during kexec.
+ */
+ if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "LPCR:UPRT not set in radix mode ! LPCR="
+ TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
+ }
+
+ /* Virtual Mode Access - get the fully qualified address */
+ if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
+ if (guest_visible) {
+ ppc_radix64_raise_segi(cpu, access_type, eaddr);
+ }
+ return false;
+ }
+
+ /* Get Process Table */
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc;
+ vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->get_pate(cpu->vhyp, &pate);
+ } else {
+ if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
+ }
+ return false;
+ }
+ if (!validate_pate(cpu, lpid, &pate)) {
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
+ }
+ return false;
+ }
+ }
+
+ *psizep = INT_MAX;
+ *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ /*
+ * Perform process-scoped translation if relocation enabled.
+ *
+ * - Translates an effective address to a host real address in
+ * quadrants 0 and 3 when HV=1.
+ *
+ * - Translates an effective address to a guest real address.
+ */
+ if (relocation) {
+ int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
+ pate, &g_raddr, &prot,
+ &psize, mmu_idx, guest_visible);
+ if (ret) {
+ return false;
+ }
+ *psizep = MIN(*psizep, psize);
+ *protp &= prot;
+ } else {
+ g_raddr = eaddr & R_EADDR_MASK;
+ }
+
+ if (cpu->vhyp) {
+ *raddr = g_raddr;
+ } else {
+ /*
+ * Perform partition-scoped translation if !HV or HV access to
+ * quadrants 1 or 2. Translates a guest real address to a host
+ * real address.
+ */
+ if (lpid || !mmuidx_hv(mmu_idx)) {
+ int ret;
+
+ ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
+ g_raddr, pate, raddr,
+ &prot, &psize, false,
+ mmu_idx, guest_visible);
+ if (ret) {
+ return false;
+ }
+ *psizep = MIN(*psizep, psize);
+ *protp &= prot;
+ } else {
+ *raddr = g_raddr;
+ }
+ }
+
+ return true;
+}
diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h
new file mode 100644
index 000000000..b70357cf3
--- /dev/null
+++ b/target/ppc/mmu-radix64.h
@@ -0,0 +1,73 @@
+#ifndef MMU_RADIX64_H
+#define MMU_RADIX64_H
+
+#ifndef CONFIG_USER_ONLY
+
+/* Radix Quadrants */
+#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF
+#define R_EADDR_QUADRANT 0xC000000000000000
+#define R_EADDR_QUADRANT0 0x0000000000000000
+#define R_EADDR_QUADRANT1 0x4000000000000000
+#define R_EADDR_QUADRANT2 0x8000000000000000
+#define R_EADDR_QUADRANT3 0xC000000000000000
+
+/* Radix Partition Table Entry Fields */
+#define PATE1_R_PRTB 0x0FFFFFFFFFFFF000
+#define PATE1_R_PRTS 0x000000000000001F
+
+/* Radix Process Table Entry Fields */
+#define PRTBE_R_GET_RTS(rts) \
+ ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31)
+#define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00
+#define PRTBE_R_RPDS 0x000000000000001F
+
+/* Radix Page Directory/Table Entry Fields */
+#define R_PTE_VALID 0x8000000000000000
+#define R_PTE_LEAF 0x4000000000000000
+#define R_PTE_SW0 0x2000000000000000
+#define R_PTE_RPN 0x01FFFFFFFFFFF000
+#define R_PTE_SW1 0x0000000000000E00
+#define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7))
+#define R_PTE_R 0x0000000000000100
+#define R_PTE_C 0x0000000000000080
+#define R_PTE_ATT 0x0000000000000030
+#define R_PTE_ATT_NORMAL 0x0000000000000000
+#define R_PTE_ATT_SAO 0x0000000000000010
+#define R_PTE_ATT_NI_IO 0x0000000000000020
+#define R_PTE_ATT_TOLERANT_IO 0x0000000000000030
+#define R_PTE_EAA_PRIV 0x0000000000000008
+#define R_PTE_EAA_R 0x0000000000000004
+#define R_PTE_EAA_RW 0x0000000000000002
+#define R_PTE_EAA_X 0x0000000000000001
+#define R_PDE_NLB PRTBE_R_RPDB
+#define R_PDE_NLS PRTBE_R_RPDS
+
+#ifdef TARGET_PPC64
+
+bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddr, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible);
+
+static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
+{
+ return (pte & R_PTE_EAA_R ? PAGE_READ : 0) |
+ (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) |
+ (pte & R_PTE_EAA_X ? PAGE_EXEC : 0);
+}
+
+static inline int ppc_radix64_get_prot_amr(const PowerPCCPU *cpu)
+{
+ const CPUPPCState *env = &cpu->env;
+ int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */
+ int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */
+
+ return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */
+ (amr & 0x1 ? 0 : PAGE_READ) |
+ (iamr & 0x1 ? 0 : PAGE_EXEC);
+}
+
+#endif /* TARGET_PPC64 */
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif /* MMU_RADIX64_H */
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
new file mode 100644
index 000000000..754509e55
--- /dev/null
+++ b/target/ppc/mmu_common.c
@@ -0,0 +1,1620 @@
+/*
+ * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "mmu-hash64.h"
+#include "mmu-hash32.h"
+#include "exec/exec-all.h"
+#include "exec/log.h"
+#include "helper_regs.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/qemu-print.h"
+#include "internal.h"
+#include "mmu-book3s-v3.h"
+#include "mmu-radix64.h"
+
+/* #define DEBUG_MMU */
+/* #define DEBUG_BATS */
+/* #define DEBUG_SOFTWARE_TLB */
+/* #define DUMP_PAGE_TABLES */
+/* #define FLUSH_ALL_TLBS */
+
+#ifdef DEBUG_MMU
+# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
+#else
+# define LOG_MMU_STATE(cpu) do { } while (0)
+#endif
+
+#ifdef DEBUG_SOFTWARE_TLB
+# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_SWTLB(...) do { } while (0)
+#endif
+
+#ifdef DEBUG_BATS
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_BATS(...) do { } while (0)
+#endif
+
+void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
+ assert(!cpu->env.has_hv_mode || !cpu->vhyp);
+#if defined(TARGET_PPC64)
+ if (mmu_is_64bit(env->mmu_model)) {
+ target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
+ target_ulong htabsize = value & SDR_64_HTABSIZE;
+
+ if (value & ~sdr_mask) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx
+ " set in SDR1", value & ~sdr_mask);
+ value &= sdr_mask;
+ }
+ if (htabsize > 28) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx
+ " stored in SDR1", htabsize);
+ return;
+ }
+ }
+#endif /* defined(TARGET_PPC64) */
+ /* FIXME: Should check for valid HTABMASK values in 32-bit case */
+ env->spr[SPR_SDR1] = value;
+}
+
+/*****************************************************************************/
+/* PowerPC MMU emulation */
+
+static int pp_check(int key, int pp, int nx)
+{
+ int access;
+
+ /* Compute access rights */
+ access = 0;
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ access |= PAGE_WRITE;
+ /* fall through */
+ case 0x3:
+ access |= PAGE_READ;
+ break;
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ access = 0;
+ break;
+ case 0x1:
+ case 0x3:
+ access = PAGE_READ;
+ break;
+ case 0x2:
+ access = PAGE_READ | PAGE_WRITE;
+ break;
+ }
+ }
+ if (nx == 0) {
+ access |= PAGE_EXEC;
+ }
+
+ return access;
+}
+
+static int check_prot(int prot, MMUAccessType access_type)
+{
+ return prot & prot_for_access_type(access_type) ? 0 : -2;
+}
+
+int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
+ int way, int is_code)
+{
+ int nr;
+
+ /* Select TLB num in a way from address */
+ nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
+ /* Select TLB way */
+ nr += env->tlb_per_way * way;
+ /* 6xx have separate TLBs for instructions and data */
+ if (is_code && env->id_tlbs == 1) {
+ nr += env->nb_tlb;
+ }
+
+ return nr;
+}
+
+static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
+ target_ulong pte1, int h,
+ MMUAccessType access_type)
+{
+ target_ulong ptem, mmask;
+ int access, ret, pteh, ptev, pp;
+
+ ret = -1;
+ /* Check validity and table match */
+ ptev = pte_is_valid(pte0);
+ pteh = (pte0 >> 6) & 1;
+ if (ptev && h == pteh) {
+ /* Check vsid & api */
+ ptem = pte0 & PTE_PTEM_MASK;
+ mmask = PTE_CHECK_MASK;
+ pp = pte1 & 0x00000003;
+ if (ptem == ctx->ptem) {
+ if (ctx->raddr != (hwaddr)-1ULL) {
+ /* all matches should have equal RPN, WIMG & PP */
+ if ((ctx->raddr & mmask) != (pte1 & mmask)) {
+ qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
+ return -3;
+ }
+ }
+ /* Compute access rights */
+ access = pp_check(ctx->key, pp, ctx->nx);
+ /* Keep the matching PTE information */
+ ctx->raddr = pte1;
+ ctx->prot = access;
+ ret = check_prot(ctx->prot, access_type);
+ if (ret == 0) {
+ /* Access granted */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
+ } else {
+ /* Access right violation */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
+ int ret, MMUAccessType access_type)
+{
+ int store = 0;
+
+ /* Update page flags */
+ if (!(*pte1p & 0x00000100)) {
+ /* Update accessed flag */
+ *pte1p |= 0x00000100;
+ store = 1;
+ }
+ if (!(*pte1p & 0x00000080)) {
+ if (access_type == MMU_DATA_STORE && ret == 0) {
+ /* Update changed flag */
+ *pte1p |= 0x00000080;
+ store = 1;
+ } else {
+ /* Force page fault for first write access */
+ ctx->prot &= ~PAGE_WRITE;
+ }
+ }
+
+ return store;
+}
+
+/* Software driven TLB helpers */
+
+static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, MMUAccessType access_type)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr, best, way;
+ int ret;
+
+ best = -1;
+ ret = -1; /* No TLB found */
+ for (way = 0; way < env->nb_ways; way++) {
+ nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH);
+ tlb = &env->tlb.tlb6[nr];
+ /* This test "emulates" the PTE index match for hardware TLBs */
+ if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
+ LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
+ "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
+ continue;
+ }
+ LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
+ TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, eaddr, tlb->pte1,
+ access_type == MMU_DATA_STORE ? 'S' : 'L',
+ access_type == MMU_INST_FETCH ? 'I' : 'D');
+ switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
+ 0, access_type)) {
+ case -3:
+ /* TLB inconsistency */
+ return -1;
+ case -2:
+ /* Access violation */
+ ret = -2;
+ best = nr;
+ break;
+ case -1:
+ default:
+ /* No match */
+ break;
+ case 0:
+ /* access granted */
+ /*
+ * XXX: we should go on looping to check all TLBs
+ * consistency but we can speed-up the whole thing as
+ * the result would be undefined if TLBs are not
+ * consistent.
+ */
+ ret = 0;
+ best = nr;
+ goto done;
+ }
+ }
+ if (best != -1) {
+ done:
+ LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
+ ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
+ /* Update page flags */
+ pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
+ }
+
+ return ret;
+}
+
+/* Perform BAT hit & translation */
+static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
+ int *validp, int *protp, target_ulong *BATu,
+ target_ulong *BATl)
+{
+ target_ulong bl;
+ int pp, valid, prot;
+
+ bl = (*BATu & 0x00001FFC) << 15;
+ valid = 0;
+ prot = 0;
+ if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
+ ((msr_pr != 0) && (*BATu & 0x00000001))) {
+ valid = 1;
+ pp = *BATl & 0x00000003;
+ if (pp != 0) {
+ prot = PAGE_READ | PAGE_EXEC;
+ if (pp == 0x2) {
+ prot |= PAGE_WRITE;
+ }
+ }
+ }
+ *blp = bl;
+ *validp = valid;
+ *protp = prot;
+}
+
+static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong virtual, MMUAccessType access_type)
+{
+ target_ulong *BATlt, *BATut, *BATu, *BATl;
+ target_ulong BEPIl, BEPIu, bl;
+ int i, valid, prot;
+ int ret = -1;
+ bool ifetch = access_type == MMU_INST_FETCH;
+
+ LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ ifetch ? 'I' : 'D', virtual);
+ if (ifetch) {
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ } else {
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ }
+ for (i = 0; i < env->nb_BATs; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n", __func__,
+ ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
+ if ((virtual & 0xF0000000) == BEPIu &&
+ ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
+ /* BAT matches */
+ if (valid != 0) {
+ /* Get physical address */
+ ctx->raddr = (*BATl & 0xF0000000) |
+ ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
+ (virtual & 0x0001F000);
+ /* Compute access rights */
+ ctx->prot = prot;
+ ret = check_prot(ctx->prot, access_type);
+ if (ret == 0) {
+ LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
+ i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
+ ctx->prot & PAGE_WRITE ? 'W' : '-');
+ }
+ break;
+ }
+ }
+ }
+ if (ret < 0) {
+#if defined(DEBUG_BATS)
+ if (qemu_log_enabled()) {
+ LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
+ for (i = 0; i < 4; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bl = (*BATu & 0x00001FFC) << 15;
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, ifetch ? 'I' : 'D', i, virtual,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+ }
+#endif
+ }
+ /* No hit */
+ return ret;
+}
+
+/* Perform segment based translation */
+static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, MMUAccessType access_type,
+ int type)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ hwaddr hash;
+ target_ulong vsid;
+ int ds, pr, target_page_bits;
+ int ret;
+ target_ulong sr, pgidx;
+
+ pr = msr_pr;
+ ctx->eaddr = eaddr;
+
+ sr = env->sr[eaddr >> 28];
+ ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
+ ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
+ ds = sr & 0x80000000 ? 1 : 0;
+ ctx->nx = sr & 0x10000000 ? 1 : 0;
+ vsid = sr & 0x00FFFFFF;
+ target_page_bits = TARGET_PAGE_BITS;
+ qemu_log_mask(CPU_LOG_MMU,
+ "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
+ " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
+ " ir=%d dr=%d pr=%d %d t=%d\n",
+ eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
+ (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type);
+ pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
+ hash = vsid ^ pgidx;
+ ctx->ptem = (vsid << 7) | (pgidx >> 10);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
+ ctx->key, ds, ctx->nx, vsid);
+ ret = -1;
+ if (!ds) {
+ /* Check if instruction fetch is allowed, if needed */
+ if (type != ACCESS_CODE || ctx->nx == 0) {
+ /* Page address translation */
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
+ " htab_mask " TARGET_FMT_plx
+ " hash " TARGET_FMT_plx "\n",
+ ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
+ ctx->hash[0] = hash;
+ ctx->hash[1] = ~hash;
+
+ /* Initialize real address with an invalid value */
+ ctx->raddr = (hwaddr)-1ULL;
+ /* Software TLB search */
+ ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type);
+#if defined(DUMP_PAGE_TABLES)
+ if (qemu_loglevel_mask(CPU_LOG_MMU)) {
+ CPUState *cs = env_cpu(env);
+ hwaddr curaddr;
+ uint32_t a0, a1, a2, a3;
+
+ qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
+ "\n", ppc_hash32_hpt_base(cpu),
+ ppc_hash32_hpt_mask(cpu) + 0x80);
+ for (curaddr = ppc_hash32_hpt_base(cpu);
+ curaddr < (ppc_hash32_hpt_base(cpu)
+ + ppc_hash32_hpt_mask(cpu) + 0x80);
+ curaddr += 16) {
+ a0 = ldl_phys(cs->as, curaddr);
+ a1 = ldl_phys(cs->as, curaddr + 4);
+ a2 = ldl_phys(cs->as, curaddr + 8);
+ a3 = ldl_phys(cs->as, curaddr + 12);
+ if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
+ qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
+ curaddr, a0, a1, a2, a3);
+ }
+ }
+ }
+#endif
+ } else {
+ qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
+ ret = -3;
+ }
+ } else {
+ target_ulong sr;
+
+ qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
+ /* Direct-store segment : absolutely *BUGGY* for now */
+
+ /*
+ * Direct-store implies a 32-bit MMU.
+ * Check the Segment Register's bus unit ID (BUID).
+ */
+ sr = env->sr[eaddr >> 28];
+ if ((sr & 0x1FF00000) >> 20 == 0x07f) {
+ /*
+ * Memory-forced I/O controller interface access
+ *
+ * If T=1 and BUID=x'07F', the 601 performs a memory
+ * access to SR[28-31] LA[4-31], bypassing all protection
+ * mechanisms.
+ */
+ ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
+ ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ switch (type) {
+ case ACCESS_INT:
+ /* Integer load/store : only access allowed */
+ break;
+ case ACCESS_CODE:
+ /* No code fetch is allowed in direct-store areas */
+ return -4;
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ return -4;
+ case ACCESS_RES:
+ /* lwarx, ldarx or srwcx. */
+ return -4;
+ case ACCESS_CACHE:
+ /*
+ * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
+ *
+ * Should make the instruction do no-op. As it already do
+ * no-op, it's quite easy :-)
+ */
+ ctx->raddr = eaddr;
+ return 0;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ return -4;
+ default:
+ qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
+ "address translation\n");
+ return -4;
+ }
+ if ((access_type == MMU_DATA_STORE || ctx->key != 1) &&
+ (access_type == MMU_DATA_LOAD || ctx->key != 0)) {
+ ctx->raddr = eaddr;
+ ret = 2;
+ } else {
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+/* Generic TLB check function for embedded PowerPC implementations */
+int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddrp,
+ target_ulong address, uint32_t pid, int ext,
+ int i)
+{
+ target_ulong mask;
+
+ /* Check valid flag */
+ if (!(tlb->prot & PAGE_VALID)) {
+ return -1;
+ }
+ mask = ~(tlb->size - 1);
+ LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
+ " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
+ mask, (uint32_t)tlb->PID, tlb->prot);
+ /* Check PID */
+ if (tlb->PID != 0 && tlb->PID != pid) {
+ return -1;
+ }
+ /* Check effective address */
+ if ((address & mask) != tlb->EPN) {
+ return -1;
+ }
+ *raddrp = (tlb->RPN & mask) | (address & ~mask);
+ if (ext) {
+ /* Extend the physical address to 36 bits */
+ *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
+ }
+
+ return 0;
+}
+
+static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong address,
+ MMUAccessType access_type)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i, ret, zsel, zpr, pr;
+
+ ret = -1;
+ raddr = (hwaddr)-1ULL;
+ pr = msr_pr;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ if (ppcemb_tlb_check(env, tlb, &raddr, address,
+ env->spr[SPR_40x_PID], 0, i) < 0) {
+ continue;
+ }
+ zsel = (tlb->attr >> 4) & 0xF;
+ zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
+ LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
+ __func__, i, zsel, zpr, access_type, tlb->attr);
+ /* Check execute enable bit */
+ switch (zpr) {
+ case 0x2:
+ if (pr != 0) {
+ goto check_perms;
+ }
+ /* fall through */
+ case 0x3:
+ /* All accesses granted */
+ ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = 0;
+ break;
+ case 0x0:
+ if (pr != 0) {
+ /* Raise Zone protection fault. */
+ env->spr[SPR_40x_ESR] = 1 << 22;
+ ctx->prot = 0;
+ ret = -2;
+ break;
+ }
+ /* fall through */
+ case 0x1:
+ check_perms:
+ /* Check from TLB entry */
+ ctx->prot = tlb->prot;
+ ret = check_prot(ctx->prot, access_type);
+ if (ret == -2) {
+ env->spr[SPR_40x_ESR] = 0;
+ }
+ break;
+ }
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ return 0;
+ }
+ }
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+
+ return ret;
+}
+
+static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddr, int *prot, target_ulong address,
+ MMUAccessType access_type, int i)
+{
+ int prot2;
+
+ if (ppcemb_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID],
+ !env->nb_pids, i) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID1] &&
+ ppcemb_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID2] &&
+ ppcemb_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
+ goto found_tlb;
+ }
+
+ LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ return -1;
+
+found_tlb:
+
+ if (msr_pr != 0) {
+ prot2 = tlb->prot & 0xF;
+ } else {
+ prot2 = (tlb->prot >> 4) & 0xF;
+ }
+
+ /* Check the address space */
+ if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) {
+ LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ return -1;
+ }
+
+ *prot = prot2;
+ if (prot2 & prot_for_access_type(access_type)) {
+ LOG_SWTLB("%s: good TLB!\n", __func__);
+ return 0;
+ }
+
+ LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
+ return access_type == MMU_INST_FETCH ? -3 : -2;
+}
+
+static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong address,
+ MMUAccessType access_type)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i, ret;
+
+ ret = -1;
+ raddr = (hwaddr)-1ULL;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address,
+ access_type, i);
+ if (ret != -1) {
+ break;
+ }
+ }
+
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ } else {
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ }
+
+ return ret;
+}
+
+hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
+ ppcmas_tlb_t *tlb)
+{
+ int tlbm_size;
+
+ tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+
+ return 1024ULL << tlbm_size;
+}
+
+/* TLB check function for MAS based SoftTLBs */
+int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
+ hwaddr *raddrp, target_ulong address,
+ uint32_t pid)
+{
+ hwaddr mask;
+ uint32_t tlb_pid;
+
+ if (!msr_cm) {
+ /* In 32bit mode we can only address 32bit EAs */
+ address = (uint32_t)address;
+ }
+
+ /* Check valid flag */
+ if (!(tlb->mas1 & MAS1_VALID)) {
+ return -1;
+ }
+
+ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
+ LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
+ PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
+ PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
+ tlb->mas7_3, tlb->mas8);
+
+ /* Check PID */
+ tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
+ if (tlb_pid != 0 && tlb_pid != pid) {
+ return -1;
+ }
+
+ /* Check effective address */
+ if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
+ return -1;
+ }
+
+ if (raddrp) {
+ *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
+ }
+
+ return 0;
+}
+
+static bool is_epid_mmu(int mmu_idx)
+{
+ return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
+}
+
+static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type)
+{
+ uint32_t esr = 0;
+ if (access_type == MMU_DATA_STORE) {
+ esr |= ESR_ST;
+ }
+ if (is_epid_mmu(mmu_idx)) {
+ esr |= ESR_EPID;
+ }
+ return esr;
+}
+
+/*
+ * Get EPID register given the mmu_idx. If this is regular load,
+ * construct the EPID access bits from current processor state
+ *
+ * Get the effective AS and PR bits and the PID. The PID is returned
+ * only if EPID load is requested, otherwise the caller must detect
+ * the correct EPID. Return true if valid EPID is returned.
+ */
+static bool mmubooke206_get_as(CPUPPCState *env,
+ int mmu_idx, uint32_t *epid_out,
+ bool *as_out, bool *pr_out)
+{
+ if (is_epid_mmu(mmu_idx)) {
+ uint32_t epidr;
+ if (mmu_idx == PPC_TLB_EPID_STORE) {
+ epidr = env->spr[SPR_BOOKE_EPSC];
+ } else {
+ epidr = env->spr[SPR_BOOKE_EPLC];
+ }
+ *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT;
+ *as_out = !!(epidr & EPID_EAS);
+ *pr_out = !!(epidr & EPID_EPR);
+ return true;
+ } else {
+ *as_out = msr_ds;
+ *pr_out = msr_pr;
+ return false;
+ }
+}
+
+/* Check if the tlb found by hashing really matches */
+static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
+ hwaddr *raddr, int *prot,
+ target_ulong address,
+ MMUAccessType access_type, int mmu_idx)
+{
+ int prot2 = 0;
+ uint32_t epid;
+ bool as, pr;
+ bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
+
+ if (!use_epid) {
+ if (ppcmas_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID]) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID1] &&
+ ppcmas_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID1]) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID2] &&
+ ppcmas_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID2]) >= 0) {
+ goto found_tlb;
+ }
+ } else {
+ if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) {
+ goto found_tlb;
+ }
+ }
+
+ LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ return -1;
+
+found_tlb:
+
+ if (pr) {
+ if (tlb->mas7_3 & MAS3_UR) {
+ prot2 |= PAGE_READ;
+ }
+ if (tlb->mas7_3 & MAS3_UW) {
+ prot2 |= PAGE_WRITE;
+ }
+ if (tlb->mas7_3 & MAS3_UX) {
+ prot2 |= PAGE_EXEC;
+ }
+ } else {
+ if (tlb->mas7_3 & MAS3_SR) {
+ prot2 |= PAGE_READ;
+ }
+ if (tlb->mas7_3 & MAS3_SW) {
+ prot2 |= PAGE_WRITE;
+ }
+ if (tlb->mas7_3 & MAS3_SX) {
+ prot2 |= PAGE_EXEC;
+ }
+ }
+
+ /* Check the address space and permissions */
+ if (access_type == MMU_INST_FETCH) {
+ /* There is no way to fetch code using epid load */
+ assert(!use_epid);
+ as = msr_ir;
+ }
+
+ if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
+ LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ return -1;
+ }
+
+ *prot = prot2;
+ if (prot2 & prot_for_access_type(access_type)) {
+ LOG_SWTLB("%s: good TLB!\n", __func__);
+ return 0;
+ }
+
+ LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
+ return access_type == MMU_INST_FETCH ? -3 : -2;
+}
+
+static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong address,
+ MMUAccessType access_type,
+ int mmu_idx)
+{
+ ppcmas_tlb_t *tlb;
+ hwaddr raddr;
+ int i, j, ret;
+
+ ret = -1;
+ raddr = (hwaddr)-1ULL;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int ways = booke206_tlb_ways(env, i);
+
+ for (j = 0; j < ways; j++) {
+ tlb = booke206_get_tlbm(env, i, address, j);
+ if (!tlb) {
+ continue;
+ }
+ ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
+ access_type, mmu_idx);
+ if (ret != -1) {
+ goto found_tlb;
+ }
+ }
+ }
+
+found_tlb:
+
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ } else {
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ }
+
+ return ret;
+}
+
+static const char *book3e_tsize_to_str[32] = {
+ "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
+ "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
+ "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
+ "1T", "2T"
+};
+
+static void mmubooke_dump_mmu(CPUPPCState *env)
+{
+ ppcemb_tlb_t *entry;
+ int i;
+
+ if (kvm_enabled() && !env->kvm_sw_tlb) {
+ qemu_printf("Cannot access KVM TLB\n");
+ return;
+ }
+
+ qemu_printf("\nTLB:\n");
+ qemu_printf("Effective Physical Size PID Prot "
+ "Attr\n");
+
+ entry = &env->tlb.tlbe[0];
+ for (i = 0; i < env->nb_tlb; i++, entry++) {
+ hwaddr ea, pa;
+ target_ulong mask;
+ uint64_t size = (uint64_t)entry->size;
+ char size_buf[20];
+
+ /* Check valid flag */
+ if (!(entry->prot & PAGE_VALID)) {
+ continue;
+ }
+
+ mask = ~(entry->size - 1);
+ ea = entry->EPN & mask;
+ pa = entry->RPN & mask;
+ /* Extend the physical address to 36 bits */
+ pa |= (hwaddr)(entry->RPN & 0xF) << 32;
+ if (size >= 1 * MiB) {
+ snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB);
+ } else {
+ snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
+ }
+ qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
+ (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
+ entry->prot, entry->attr);
+ }
+
+}
+
+static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
+ int tlbsize)
+{
+ ppcmas_tlb_t *entry;
+ int i;
+
+ qemu_printf("\nTLB%d:\n", tlbn);
+ qemu_printf("Effective Physical Size TID TS SRWX"
+ " URWX WIMGE U0123\n");
+
+ entry = &env->tlb.tlbm[offset];
+ for (i = 0; i < tlbsize; i++, entry++) {
+ hwaddr ea, pa, size;
+ int tsize;
+
+ if (!(entry->mas1 & MAS1_VALID)) {
+ continue;
+ }
+
+ tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+ size = 1024ULL << tsize;
+ ea = entry->mas2 & ~(size - 1);
+ pa = entry->mas7_3 & ~(size - 1);
+
+ qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
+ "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
+ (uint64_t)ea, (uint64_t)pa,
+ book3e_tsize_to_str[tsize],
+ (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
+ (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
+ entry->mas7_3 & MAS3_SR ? 'R' : '-',
+ entry->mas7_3 & MAS3_SW ? 'W' : '-',
+ entry->mas7_3 & MAS3_SX ? 'X' : '-',
+ entry->mas7_3 & MAS3_UR ? 'R' : '-',
+ entry->mas7_3 & MAS3_UW ? 'W' : '-',
+ entry->mas7_3 & MAS3_UX ? 'X' : '-',
+ entry->mas2 & MAS2_W ? 'W' : '-',
+ entry->mas2 & MAS2_I ? 'I' : '-',
+ entry->mas2 & MAS2_M ? 'M' : '-',
+ entry->mas2 & MAS2_G ? 'G' : '-',
+ entry->mas2 & MAS2_E ? 'E' : '-',
+ entry->mas7_3 & MAS3_U0 ? '0' : '-',
+ entry->mas7_3 & MAS3_U1 ? '1' : '-',
+ entry->mas7_3 & MAS3_U2 ? '2' : '-',
+ entry->mas7_3 & MAS3_U3 ? '3' : '-');
+ }
+}
+
+static void mmubooke206_dump_mmu(CPUPPCState *env)
+{
+ int offset = 0;
+ int i;
+
+ if (kvm_enabled() && !env->kvm_sw_tlb) {
+ qemu_printf("Cannot access KVM TLB\n");
+ return;
+ }
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int size = booke206_tlb_size(env, i);
+
+ if (size == 0) {
+ continue;
+ }
+
+ mmubooke206_dump_one_tlb(env, i, offset, size);
+ offset += size;
+ }
+}
+
+static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
+{
+ target_ulong *BATlt, *BATut, *BATu, *BATl;
+ target_ulong BEPIl, BEPIu, bl;
+ int i;
+
+ switch (type) {
+ case ACCESS_CODE:
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ break;
+ default:
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ break;
+ }
+
+ for (i = 0; i < env->nb_BATs; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bl = (*BATu & 0x00001FFC) << 15;
+ qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ type == ACCESS_CODE ? "code" : "data", i,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+}
+
+static void mmu6xx_dump_mmu(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc6xx_tlb_t *tlb;
+ target_ulong sr;
+ int type, way, entry, i;
+
+ qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
+ qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
+
+ qemu_printf("\nSegment registers:\n");
+ for (i = 0; i < 32; i++) {
+ sr = env->sr[i];
+ if (sr & 0x80000000) {
+ qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
+ "CNTLR_SPEC=0x%05x\n", i,
+ sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
+ sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
+ (uint32_t)(sr & 0xFFFFF));
+ } else {
+ qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
+ sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
+ sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
+ (uint32_t)(sr & 0x00FFFFFF));
+ }
+ }
+
+ qemu_printf("\nBATs:\n");
+ mmu6xx_dump_BATs(env, ACCESS_INT);
+ mmu6xx_dump_BATs(env, ACCESS_CODE);
+
+ if (env->id_tlbs != 1) {
+ qemu_printf("ERROR: 6xx MMU should have separated TLB"
+ " for code and data\n");
+ }
+
+ qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
+
+ for (type = 0; type < 2; type++) {
+ for (way = 0; way < env->nb_ways; way++) {
+ for (entry = env->nb_tlb * type + env->tlb_per_way * way;
+ entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
+ entry++) {
+
+ tlb = &env->tlb.tlb6[entry];
+ qemu_printf("%s TLB %02d/%02d way:%d %s ["
+ TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
+ type ? "code" : "data", entry % env->nb_tlb,
+ env->nb_tlb, way,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
+ }
+ }
+ }
+}
+
+void dump_mmu(CPUPPCState *env)
+{
+ switch (env->mmu_model) {
+ case POWERPC_MMU_BOOKE:
+ mmubooke_dump_mmu(env);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ mmubooke206_dump_mmu(env);
+ break;
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ mmu6xx_dump_mmu(env);
+ break;
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_07:
+ dump_slb(env_archcpu(env));
+ break;
+ case POWERPC_MMU_3_00:
+ if (ppc64_v3_radix(env_archcpu(env))) {
+ qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n",
+ __func__);
+ } else {
+ dump_slb(env_archcpu(env));
+ }
+ break;
+#endif
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
+ }
+}
+
+static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
+ MMUAccessType access_type)
+{
+ int in_plb, ret;
+
+ ctx->raddr = eaddr;
+ ctx->prot = PAGE_READ | PAGE_EXEC;
+ ret = 0;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_REAL:
+ case POWERPC_MMU_BOOKE:
+ ctx->prot |= PAGE_WRITE;
+ break;
+
+ case POWERPC_MMU_SOFT_4xx_Z:
+ if (unlikely(msr_pe != 0)) {
+ /*
+ * 403 family add some particular protections, using
+ * PBL/PBU registers for accesses with no translation.
+ */
+ in_plb =
+ /* Check PLB validity */
+ (env->pb[0] < env->pb[1] &&
+ /* and address in plb area */
+ eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
+ (env->pb[2] < env->pb[3] &&
+ eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
+ if (in_plb ^ msr_px) {
+ /* Access in protected area */
+ if (access_type == MMU_DATA_STORE) {
+ /* Access is not allowed */
+ ret = -2;
+ }
+ } else {
+ /* Read-write access is allowed */
+ ctx->prot |= PAGE_WRITE;
+ }
+ }
+ break;
+
+ default:
+ /* Caller's checks mean we should never get here for other models */
+ abort();
+ return -1;
+ }
+
+ return ret;
+}
+
+int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr,
+ MMUAccessType access_type, int type,
+ int mmu_idx)
+{
+ int ret = -1;
+ bool real_mode = (type == ACCESS_CODE && msr_ir == 0)
+ || (type != ACCESS_CODE && msr_dr == 0);
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ if (real_mode) {
+ ret = check_physical(env, ctx, eaddr, access_type);
+ } else {
+ /* Try to find a BAT */
+ if (env->nb_BATs != 0) {
+ ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type);
+ }
+ if (ret < 0) {
+ /* We didn't match any BAT entry or don't have BATs */
+ ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type);
+ }
+ }
+ break;
+
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ if (real_mode) {
+ ret = check_physical(env, ctx, eaddr, access_type);
+ } else {
+ ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type);
+ }
+ break;
+ case POWERPC_MMU_BOOKE:
+ ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type,
+ mmu_idx);
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_REAL:
+ if (real_mode) {
+ ret = check_physical(env, ctx, eaddr, access_type);
+ } else {
+ cpu_abort(env_cpu(env),
+ "PowerPC in real mode do not do any translation\n");
+ }
+ return -1;
+ default:
+ cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
+ return -1;
+ }
+
+ return ret;
+}
+
+static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
+ MMUAccessType access_type, int mmu_idx)
+{
+ uint32_t epid;
+ bool as, pr;
+ uint32_t missed_tid = 0;
+ bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
+
+ if (access_type == MMU_INST_FETCH) {
+ as = msr_ir;
+ }
+ env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
+ env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
+ env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
+ env->spr[SPR_BOOKE_MAS3] = 0;
+ env->spr[SPR_BOOKE_MAS6] = 0;
+ env->spr[SPR_BOOKE_MAS7] = 0;
+
+ /* AS */
+ if (as) {
+ env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
+ env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
+ }
+
+ env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
+ env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
+
+ if (!use_epid) {
+ switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
+ case MAS4_TIDSELD_PID0:
+ missed_tid = env->spr[SPR_BOOKE_PID];
+ break;
+ case MAS4_TIDSELD_PID1:
+ missed_tid = env->spr[SPR_BOOKE_PID1];
+ break;
+ case MAS4_TIDSELD_PID2:
+ missed_tid = env->spr[SPR_BOOKE_PID2];
+ break;
+ }
+ env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
+ } else {
+ missed_tid = epid;
+ env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16;
+ }
+ env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT);
+
+
+ /* next victim logic */
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
+ env->last_way++;
+ env->last_way &= booke206_tlb_ways(env, 0) - 1;
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
+}
+
+/* Perform address translation */
+/* TODO: Split this by mmu_model. */
+static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
+ MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp,
+ int mmu_idx, bool guest_visible)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ mmu_ctx_t ctx;
+ int type;
+ int ret;
+
+ if (access_type == MMU_INST_FETCH) {
+ /* code access */
+ type = ACCESS_CODE;
+ } else if (guest_visible) {
+ /* data access */
+ type = env->access_type;
+ } else {
+ type = ACCESS_INT;
+ }
+
+ ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type,
+ type, mmu_idx);
+ if (ret == 0) {
+ *raddrp = ctx.raddr;
+ *protp = ctx.prot;
+ *psizep = TARGET_PAGE_BITS;
+ return true;
+ }
+
+ if (guest_visible) {
+ LOG_MMU_STATE(cs);
+ if (type == ACCESS_CODE) {
+ switch (ret) {
+ case -1:
+ /* No matches in page tables or TLB */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ cs->exception_index = POWERPC_EXCP_IFTLB;
+ env->error_code = 1 << 18;
+ env->spr[SPR_IMISS] = eaddr;
+ env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
+ goto tlb_miss;
+ case POWERPC_MMU_SOFT_74xx:
+ cs->exception_index = POWERPC_EXCP_IFTLB;
+ goto tlb_miss_74xx;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ cs->exception_index = POWERPC_EXCP_ITLB;
+ env->error_code = 0;
+ env->spr[SPR_40x_DEAR] = eaddr;
+ env->spr[SPR_40x_ESR] = 0x00000000;
+ break;
+ case POWERPC_MMU_BOOKE206:
+ booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx);
+ /* fall through */
+ case POWERPC_MMU_BOOKE:
+ cs->exception_index = POWERPC_EXCP_ITLB;
+ env->error_code = 0;
+ env->spr[SPR_BOOKE_DEAR] = eaddr;
+ env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD);
+ break;
+ case POWERPC_MMU_MPC8xx:
+ cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
+ case POWERPC_MMU_REAL:
+ cpu_abort(cs, "PowerPC in real mode should never raise "
+ "any MMU exceptions\n");
+ default:
+ cpu_abort(cs, "Unknown or invalid MMU model\n");
+ }
+ break;
+ case -2:
+ /* Access rights violation */
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ break;
+ case -3:
+ /* No execute protection violation */
+ if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->spr[SPR_BOOKE_ESR] = 0x00000000;
+ }
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ break;
+ case -4:
+ /* Direct store exception */
+ /* No code fetch is allowed in direct-store areas */
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case -1:
+ /* No matches in page tables or TLB */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ if (access_type == MMU_DATA_STORE) {
+ cs->exception_index = POWERPC_EXCP_DSTLB;
+ env->error_code = 1 << 16;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DLTLB;
+ env->error_code = 0;
+ }
+ env->spr[SPR_DMISS] = eaddr;
+ env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
+ tlb_miss:
+ env->error_code |= ctx.key << 19;
+ env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
+ get_pteg_offset32(cpu, ctx.hash[0]);
+ env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
+ get_pteg_offset32(cpu, ctx.hash[1]);
+ break;
+ case POWERPC_MMU_SOFT_74xx:
+ if (access_type == MMU_DATA_STORE) {
+ cs->exception_index = POWERPC_EXCP_DSTLB;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DLTLB;
+ }
+ tlb_miss_74xx:
+ /* Implement LRU algorithm */
+ env->error_code = ctx.key << 19;
+ env->spr[SPR_TLBMISS] = (eaddr & ~((target_ulong)0x3)) |
+ ((env->last_way + 1) & (env->nb_ways - 1));
+ env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ cs->exception_index = POWERPC_EXCP_DTLB;
+ env->error_code = 0;
+ env->spr[SPR_40x_DEAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_40x_ESR] = 0x00800000;
+ } else {
+ env->spr[SPR_40x_ESR] = 0x00000000;
+ }
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
+ case POWERPC_MMU_BOOKE206:
+ booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx);
+ /* fall through */
+ case POWERPC_MMU_BOOKE:
+ cs->exception_index = POWERPC_EXCP_DTLB;
+ env->error_code = 0;
+ env->spr[SPR_BOOKE_DEAR] = eaddr;
+ env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(cs, "PowerPC in real mode should never raise "
+ "any MMU exceptions\n");
+ default:
+ cpu_abort(cs, "Unknown or invalid MMU model\n");
+ }
+ break;
+ case -2:
+ /* Access rights violation */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ if (env->mmu_model == POWERPC_MMU_SOFT_4xx
+ || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
+ env->spr[SPR_40x_DEAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_40x_ESR] |= 0x00800000;
+ }
+ } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->spr[SPR_BOOKE_DEAR] = eaddr;
+ env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
+ } else {
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x0A000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ break;
+ case -4:
+ /* Direct store exception */
+ switch (type) {
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = POWERPC_EXCP_ALIGN_FP;
+ env->spr[SPR_DAR] = eaddr;
+ break;
+ case ACCESS_RES:
+ /* lwarx, ldarx or stwcx. */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x06000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04000000;
+ }
+ break;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (access_type == MMU_DATA_STORE) {
+ env->spr[SPR_DSISR] = 0x06100000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04100000;
+ }
+ break;
+ default:
+ printf("DSI: invalid exception (%d)\n", ret);
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code =
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
+ env->spr[SPR_DAR] = eaddr;
+ break;
+ }
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+/*****************************************************************************/
+
+bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp,
+ int mmu_idx, bool guest_visible)
+{
+ switch (cpu->env.mmu_model) {
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_3_00:
+ if (ppc64_v3_radix(cpu)) {
+ return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp,
+ psizep, protp, mmu_idx, guest_visible);
+ }
+ /* fall through */
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_07:
+ return ppc_hash64_xlate(cpu, eaddr, access_type,
+ raddrp, psizep, protp, mmu_idx, guest_visible);
+#endif
+
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp,
+ psizep, protp, mmu_idx, guest_visible);
+
+ default:
+ return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp,
+ psizep, protp, mmu_idx, guest_visible);
+ }
+}
+
+hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ hwaddr raddr;
+ int s, p;
+
+ /*
+ * Some MMUs have separate TLBs for code and data. If we only
+ * try an MMU_DATA_LOAD, we may not be able to read instructions
+ * mapped by code TLBs, so we also try a MMU_INST_FETCH.
+ */
+ if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p,
+ cpu_mmu_index(&cpu->env, false), false) ||
+ ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p,
+ cpu_mmu_index(&cpu->env, true), false)) {
+ return raddr & TARGET_PAGE_MASK;
+ }
+ return -1;
+}
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
new file mode 100644
index 000000000..e0c4950dd
--- /dev/null
+++ b/target/ppc/mmu_helper.c
@@ -0,0 +1,1382 @@
+/*
+ * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "mmu-hash64.h"
+#include "mmu-hash32.h"
+#include "exec/exec-all.h"
+#include "exec/log.h"
+#include "helper_regs.h"
+#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
+#include "qemu/qemu-print.h"
+#include "internal.h"
+#include "mmu-book3s-v3.h"
+#include "mmu-radix64.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+
+/* #define DEBUG_BATS */
+/* #define DEBUG_SOFTWARE_TLB */
+/* #define DUMP_PAGE_TABLES */
+/* #define FLUSH_ALL_TLBS */
+
+#ifdef DEBUG_SOFTWARE_TLB
+# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_SWTLB(...) do { } while (0)
+#endif
+
+#ifdef DEBUG_BATS
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_BATS(...) do { } while (0)
+#endif
+
+/*****************************************************************************/
+/* PowerPC MMU emulation */
+
+/* Software driven TLB helpers */
+static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr, max;
+
+ /* LOG_SWTLB("Invalidate all TLBs\n"); */
+ /* Invalidate all defined software TLB */
+ max = env->nb_tlb;
+ if (env->id_tlbs == 1) {
+ max *= 2;
+ }
+ for (nr = 0; nr < max; nr++) {
+ tlb = &env->tlb.tlb6[nr];
+ pte_invalidate(&tlb->pte0);
+ }
+ tlb_flush(env_cpu(env));
+}
+
+static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
+ target_ulong eaddr,
+ int is_code, int match_epn)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ CPUState *cs = env_cpu(env);
+ ppc6xx_tlb_t *tlb;
+ int way, nr;
+
+ /* Invalidate ITLB + DTLB, all ways */
+ for (way = 0; way < env->nb_ways; way++) {
+ nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
+ tlb = &env->tlb.tlb6[nr];
+ if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
+ LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
+ env->nb_tlb, eaddr);
+ pte_invalidate(&tlb->pte0);
+ tlb_flush_page(cs, tlb->EPN);
+ }
+ }
+#else
+ /* XXX: PowerPC specification say this is valid as well */
+ ppc6xx_tlb_invalidate_all(env);
+#endif
+}
+
+static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
+ target_ulong eaddr, int is_code)
+{
+ ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
+}
+
+static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
+ int is_code, target_ulong pte0, target_ulong pte1)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr;
+
+ nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
+ tlb = &env->tlb.tlb6[nr];
+ LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
+ /* Invalidate any pending reference in QEMU for this virtual address */
+ ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
+ tlb->pte0 = pte0;
+ tlb->pte1 = pte1;
+ tlb->EPN = EPN;
+ /* Store last way for LRU mechanism */
+ env->last_way = way;
+}
+
+/* Generic TLB search function for PowerPC embedded implementations */
+static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
+ uint32_t pid)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i, ret;
+
+ /* Default return value is no match */
+ ret = -1;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Helpers specific to PowerPC 40x implementations */
+static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
+{
+ ppcemb_tlb_t *tlb;
+ int i;
+
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ tlb->prot &= ~PAGE_VALID;
+ }
+ tlb_flush(env_cpu(env));
+}
+
+static void booke206_flush_tlb(CPUPPCState *env, int flags,
+ const int check_iprot)
+{
+ int tlb_size;
+ int i, j;
+ ppcmas_tlb_t *tlb = env->tlb.tlbm;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ if (flags & (1 << i)) {
+ tlb_size = booke206_tlb_size(env, i);
+ for (j = 0; j < tlb_size; j++) {
+ if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
+ tlb[j].mas1 &= ~MAS1_VALID;
+ }
+ }
+ }
+ tlb += booke206_tlb_size(env, i);
+ }
+
+ tlb_flush(env_cpu(env));
+}
+
+static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, MMUAccessType access_type,
+ int type)
+{
+ return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
+}
+
+
+
+/*****************************************************************************/
+/* BATs management */
+#if !defined(FLUSH_ALL_TLBS)
+static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
+ target_ulong mask)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong base, end, page;
+
+ base = BATu & ~0x0001FFFF;
+ end = base + mask + 0x00020000;
+ if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
+ /* Flushing 1024 4K pages is slower than a complete flush */
+ LOG_BATS("Flush all BATs\n");
+ tlb_flush(cs);
+ LOG_BATS("Flush done\n");
+ return;
+ }
+ LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
+ TARGET_FMT_lx ")\n", base, end, mask);
+ for (page = base; page != end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, page);
+ }
+ LOG_BATS("Flush done\n");
+}
+#endif
+
+static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
+ target_ulong value)
+{
+ LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
+ nr, ul == 0 ? 'u' : 'l', value, env->nip);
+}
+
+void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ target_ulong mask;
+
+ dump_store_bat(env, 'I', 0, nr, value);
+ if (env->IBAT[0][nr] != value) {
+ mask = (value << 15) & 0x0FFE0000UL;
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#endif
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
+ */
+ mask = (value << 15) & 0x0FFE0000UL;
+ env->IBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
+ (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ tlb_flush(env_cpu(env));
+#endif
+ }
+}
+
+void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ dump_store_bat(env, 'I', 1, nr, value);
+ env->IBAT[1][nr] = value;
+}
+
+void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ target_ulong mask;
+
+ dump_store_bat(env, 'D', 0, nr, value);
+ if (env->DBAT[0][nr] != value) {
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
+ */
+ mask = (value << 15) & 0x0FFE0000UL;
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->DBAT[0][nr], mask);
+#endif
+ mask = (value << 15) & 0x0FFE0000UL;
+ env->DBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
+ (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->DBAT[0][nr], mask);
+#else
+ tlb_flush(env_cpu(env));
+#endif
+ }
+}
+
+void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ dump_store_bat(env, 'D', 1, nr, value);
+ env->DBAT[1][nr] = value;
+}
+
+void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ target_ulong mask;
+#if defined(FLUSH_ALL_TLBS)
+ int do_inval;
+#endif
+
+ dump_store_bat(env, 'I', 0, nr, value);
+ if (env->IBAT[0][nr] != value) {
+#if defined(FLUSH_ALL_TLBS)
+ do_inval = 0;
+#endif
+ mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
+ if (env->IBAT[1][nr] & 0x40) {
+ /* Invalidate BAT only if it is valid */
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ /*
+ * When storing valid upper BAT, mask BEPI and BRPN and
+ * invalidate all TLBs covered by this BAT
+ */
+ env->IBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->DBAT[0][nr] = env->IBAT[0][nr];
+ if (env->IBAT[1][nr] & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+#if defined(FLUSH_ALL_TLBS)
+ if (do_inval) {
+ tlb_flush(env_cpu(env));
+ }
+#endif
+ }
+}
+
+void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ target_ulong mask;
+#else
+ int do_inval;
+#endif
+
+ dump_store_bat(env, 'I', 1, nr, value);
+ if (env->IBAT[1][nr] != value) {
+#if defined(FLUSH_ALL_TLBS)
+ do_inval = 0;
+#endif
+ if (env->IBAT[1][nr] & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ if (value & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ mask = (value << 17) & 0x0FFE0000UL;
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ env->IBAT[1][nr] = value;
+ env->DBAT[1][nr] = value;
+#if defined(FLUSH_ALL_TLBS)
+ if (do_inval) {
+ tlb_flush(env_cpu(env));
+ }
+#endif
+ }
+}
+
+/*****************************************************************************/
+/* TLB management */
+void ppc_tlb_invalidate_all(CPUPPCState *env)
+{
+#if defined(TARGET_PPC64)
+ if (mmu_is_64bit(env->mmu_model)) {
+ env->tlb_need_flush = 0;
+ tlb_flush(env_cpu(env));
+ } else
+#endif /* defined(TARGET_PPC64) */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ ppc6xx_tlb_invalidate_all(env);
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ ppc4xx_tlb_invalidate_all(env);
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE:
+ tlb_flush(env_cpu(env));
+ break;
+ case POWERPC_MMU_BOOKE206:
+ booke206_flush_tlb(env, -1, 0);
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ env->tlb_need_flush = 0;
+ tlb_flush(env_cpu(env));
+ break;
+ default:
+ /* XXX: TODO */
+ cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
+ break;
+ }
+}
+
+void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ addr &= TARGET_PAGE_MASK;
+#if defined(TARGET_PPC64)
+ if (mmu_is_64bit(env->mmu_model)) {
+ /* tlbie invalidate TLBs for all segments */
+ /*
+ * XXX: given the fact that there are too many segments to invalidate,
+ * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
+ * we just invalidate all TLBs
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ } else
+#endif /* defined(TARGET_PPC64) */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ ppc6xx_tlb_invalidate_virt(env, addr, 0);
+ if (env->id_tlbs == 1) {
+ ppc6xx_tlb_invalidate_virt(env, addr, 1);
+ }
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ /*
+ * Actual CPUs invalidate entire congruence classes based on
+ * the geometry of their TLBs and some OSes take that into
+ * account, we just mark the TLB to be flushed later (context
+ * synchronizing event or sync instruction on 32-bit).
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ break;
+ default:
+ /* Should never reach here with other MMU models */
+ assert(0);
+ }
+#else
+ ppc_tlb_invalidate_all(env);
+#endif
+}
+
+/*****************************************************************************/
+/* Special registers manipulation */
+
+/* Segment registers load and store */
+target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
+{
+#if defined(TARGET_PPC64)
+ if (mmu_is_64bit(env->mmu_model)) {
+ /* XXX */
+ return 0;
+ }
+#endif
+ return env->sr[sr_num];
+}
+
+void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
+{
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
+ (int)srnum, value, env->sr[srnum]);
+#if defined(TARGET_PPC64)
+ if (mmu_is_64bit(env->mmu_model)) {
+ PowerPCCPU *cpu = env_archcpu(env);
+ uint64_t esid, vsid;
+
+ /* ESID = srnum */
+ esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
+
+ /* VSID = VSID */
+ vsid = (value & 0xfffffff) << 12;
+ /* flags = flags */
+ vsid |= ((value >> 27) & 0xf) << 8;
+
+ ppc_store_slb(cpu, srnum, esid, vsid);
+ } else
+#endif
+ if (env->sr[srnum] != value) {
+ env->sr[srnum] = value;
+ /*
+ * Invalidating 256MB of virtual memory in 4kB pages is way
+ * longer than flushing the whole TLB.
+ */
+#if !defined(FLUSH_ALL_TLBS) && 0
+ {
+ target_ulong page, end;
+ /* Invalidate 256 MB of virtual memory */
+ page = (16 << 20) * srnum;
+ end = page + (16 << 20);
+ for (; page != end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(env_cpu(env), page);
+ }
+ }
+#else
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+#endif
+ }
+}
+
+/* TLB management */
+void helper_tlbia(CPUPPCState *env)
+{
+ ppc_tlb_invalidate_all(env);
+}
+
+void helper_tlbie(CPUPPCState *env, target_ulong addr)
+{
+ ppc_tlb_invalidate_one(env, addr);
+}
+
+void helper_tlbiva(CPUPPCState *env, target_ulong addr)
+{
+ /* tlbiva instruction only exists on BookE */
+ assert(env->mmu_model == POWERPC_MMU_BOOKE);
+ /* XXX: TODO */
+ cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
+}
+
+/* Software driven TLBs management */
+/* PowerPC 602/603 software TLB load instructions helpers */
+static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
+{
+ target_ulong RPN, CMP, EPN;
+ int way;
+
+ RPN = env->spr[SPR_RPA];
+ if (is_code) {
+ CMP = env->spr[SPR_ICMP];
+ EPN = env->spr[SPR_IMISS];
+ } else {
+ CMP = env->spr[SPR_DCMP];
+ EPN = env->spr[SPR_DMISS];
+ }
+ way = (env->spr[SPR_SRR1] >> 17) & 1;
+ (void)EPN; /* avoid a compiler warning */
+ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
+ RPN, way);
+ /* Store this TLB */
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
+}
+
+void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
+{
+ do_6xx_tlb(env, EPN, 0);
+}
+
+void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
+{
+ do_6xx_tlb(env, EPN, 1);
+}
+
+/* PowerPC 74xx software TLB load instructions helpers */
+static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
+{
+ target_ulong RPN, CMP, EPN;
+ int way;
+
+ RPN = env->spr[SPR_PTELO];
+ CMP = env->spr[SPR_PTEHI];
+ EPN = env->spr[SPR_TLBMISS] & ~0x3;
+ way = env->spr[SPR_TLBMISS] & 0x3;
+ (void)EPN; /* avoid a compiler warning */
+ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
+ RPN, way);
+ /* Store this TLB */
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
+}
+
+void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
+{
+ do_74xx_tlb(env, EPN, 0);
+}
+
+void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
+{
+ do_74xx_tlb(env, EPN, 1);
+}
+
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
+{
+ mmu_ctx_t ctx;
+ int nb_BATs;
+ target_ulong ret = 0;
+
+ /*
+ * We don't have to generate many instances of this instruction,
+ * as rac is supervisor only.
+ *
+ * XXX: FIX THIS: Pretend we have no BAT
+ */
+ nb_BATs = env->nb_BATs;
+ env->nb_BATs = 0;
+ if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
+ ret = ctx.raddr;
+ }
+ env->nb_BATs = nb_BATs;
+ return ret;
+}
+
+static inline target_ulong booke_tlb_to_page_size(int size)
+{
+ return 1024 << (2 * size);
+}
+
+static inline int booke_page_size_to_tlb(target_ulong page_size)
+{
+ int size;
+
+ switch (page_size) {
+ case 0x00000400UL:
+ size = 0x0;
+ break;
+ case 0x00001000UL:
+ size = 0x1;
+ break;
+ case 0x00004000UL:
+ size = 0x2;
+ break;
+ case 0x00010000UL:
+ size = 0x3;
+ break;
+ case 0x00040000UL:
+ size = 0x4;
+ break;
+ case 0x00100000UL:
+ size = 0x5;
+ break;
+ case 0x00400000UL:
+ size = 0x6;
+ break;
+ case 0x01000000UL:
+ size = 0x7;
+ break;
+ case 0x04000000UL:
+ size = 0x8;
+ break;
+ case 0x10000000UL:
+ size = 0x9;
+ break;
+ case 0x40000000UL:
+ size = 0xA;
+ break;
+#if defined(TARGET_PPC64)
+ case 0x000100000000ULL:
+ size = 0xB;
+ break;
+ case 0x000400000000ULL:
+ size = 0xC;
+ break;
+ case 0x001000000000ULL:
+ size = 0xD;
+ break;
+ case 0x004000000000ULL:
+ size = 0xE;
+ break;
+ case 0x010000000000ULL:
+ size = 0xF;
+ break;
+#endif
+ default:
+ size = -1;
+ break;
+ }
+
+ return size;
+}
+
+/* Helpers for 4xx TLB management */
+#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
+
+#define PPC4XX_TLBHI_V 0x00000040
+#define PPC4XX_TLBHI_E 0x00000020
+#define PPC4XX_TLBHI_SIZE_MIN 0
+#define PPC4XX_TLBHI_SIZE_MAX 7
+#define PPC4XX_TLBHI_SIZE_DEFAULT 1
+#define PPC4XX_TLBHI_SIZE_SHIFT 7
+#define PPC4XX_TLBHI_SIZE_MASK 0x00000007
+
+#define PPC4XX_TLBLO_EX 0x00000200
+#define PPC4XX_TLBLO_WR 0x00000100
+#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
+#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
+
+target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+ int size;
+
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ ret = tlb->EPN;
+ if (tlb->prot & PAGE_VALID) {
+ ret |= PPC4XX_TLBHI_V;
+ }
+ size = booke_page_size_to_tlb(tlb->size);
+ if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
+ size = PPC4XX_TLBHI_SIZE_DEFAULT;
+ }
+ ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
+ env->spr[SPR_40x_PID] = tlb->PID;
+ return ret;
+}
+
+target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ ret = tlb->RPN;
+ if (tlb->prot & PAGE_EXEC) {
+ ret |= PPC4XX_TLBLO_EX;
+ }
+ if (tlb->prot & PAGE_WRITE) {
+ ret |= PPC4XX_TLBLO_WR;
+ }
+ return ret;
+}
+
+void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
+ target_ulong val)
+{
+ CPUState *cs = env_cpu(env);
+ ppcemb_tlb_t *tlb;
+ target_ulong page, end;
+
+ LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ val);
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ /* Invalidate previous TLB (if it's valid) */
+ if (tlb->prot & PAGE_VALID) {
+ end = tlb->EPN + tlb->size;
+ LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
+ TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, page);
+ }
+ }
+ tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
+ & PPC4XX_TLBHI_SIZE_MASK);
+ /*
+ * We cannot handle TLB size < TARGET_PAGE_SIZE.
+ * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
+ */
+ if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
+ cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
+ "are not supported (%d)\n"
+ "Please implement TARGET_PAGE_BITS_VARY\n",
+ tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
+ }
+ tlb->EPN = val & ~(tlb->size - 1);
+ if (val & PPC4XX_TLBHI_V) {
+ tlb->prot |= PAGE_VALID;
+ if (val & PPC4XX_TLBHI_E) {
+ /* XXX: TO BE FIXED */
+ cpu_abort(cs,
+ "Little-endian TLB entries are not supported by now\n");
+ }
+ } else {
+ tlb->prot &= ~PAGE_VALID;
+ }
+ tlb->PID = env->spr[SPR_40x_PID]; /* PID */
+ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+ /* Invalidate new TLB (if valid) */
+ if (tlb->prot & PAGE_VALID) {
+ end = tlb->EPN + tlb->size;
+ LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
+ TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, page);
+ }
+ }
+}
+
+void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
+ target_ulong val)
+{
+ ppcemb_tlb_t *tlb;
+
+ LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ val);
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
+ tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
+ tlb->prot = PAGE_READ;
+ if (val & PPC4XX_TLBLO_EX) {
+ tlb->prot |= PAGE_EXEC;
+ }
+ if (val & PPC4XX_TLBLO_WR) {
+ tlb->prot |= PAGE_WRITE;
+ }
+ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+}
+
+target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
+{
+ return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
+}
+
+/* PowerPC 440 TLB management */
+void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
+ target_ulong value)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong EPN, RPN, size;
+ int do_flush_tlbs;
+
+ LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
+ __func__, word, (int)entry, value);
+ do_flush_tlbs = 0;
+ entry &= 0x3F;
+ tlb = &env->tlb.tlbe[entry];
+ switch (word) {
+ default:
+ /* Just here to please gcc */
+ case 0:
+ EPN = value & 0xFFFFFC00;
+ if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
+ do_flush_tlbs = 1;
+ }
+ tlb->EPN = EPN;
+ size = booke_tlb_to_page_size((value >> 4) & 0xF);
+ if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
+ do_flush_tlbs = 1;
+ }
+ tlb->size = size;
+ tlb->attr &= ~0x1;
+ tlb->attr |= (value >> 8) & 1;
+ if (value & 0x200) {
+ tlb->prot |= PAGE_VALID;
+ } else {
+ if (tlb->prot & PAGE_VALID) {
+ tlb->prot &= ~PAGE_VALID;
+ do_flush_tlbs = 1;
+ }
+ }
+ tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
+ if (do_flush_tlbs) {
+ tlb_flush(env_cpu(env));
+ }
+ break;
+ case 1:
+ RPN = value & 0xFFFFFC0F;
+ if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
+ tlb_flush(env_cpu(env));
+ }
+ tlb->RPN = RPN;
+ break;
+ case 2:
+ tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
+ tlb->prot = tlb->prot & PAGE_VALID;
+ if (value & 0x1) {
+ tlb->prot |= PAGE_READ << 4;
+ }
+ if (value & 0x2) {
+ tlb->prot |= PAGE_WRITE << 4;
+ }
+ if (value & 0x4) {
+ tlb->prot |= PAGE_EXEC << 4;
+ }
+ if (value & 0x8) {
+ tlb->prot |= PAGE_READ;
+ }
+ if (value & 0x10) {
+ tlb->prot |= PAGE_WRITE;
+ }
+ if (value & 0x20) {
+ tlb->prot |= PAGE_EXEC;
+ }
+ break;
+ }
+}
+
+target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
+ target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+ int size;
+
+ entry &= 0x3F;
+ tlb = &env->tlb.tlbe[entry];
+ switch (word) {
+ default:
+ /* Just here to please gcc */
+ case 0:
+ ret = tlb->EPN;
+ size = booke_page_size_to_tlb(tlb->size);
+ if (size < 0 || size > 0xF) {
+ size = 1;
+ }
+ ret |= size << 4;
+ if (tlb->attr & 0x1) {
+ ret |= 0x100;
+ }
+ if (tlb->prot & PAGE_VALID) {
+ ret |= 0x200;
+ }
+ env->spr[SPR_440_MMUCR] &= ~0x000000FF;
+ env->spr[SPR_440_MMUCR] |= tlb->PID;
+ break;
+ case 1:
+ ret = tlb->RPN;
+ break;
+ case 2:
+ ret = tlb->attr & ~0x1;
+ if (tlb->prot & (PAGE_READ << 4)) {
+ ret |= 0x1;
+ }
+ if (tlb->prot & (PAGE_WRITE << 4)) {
+ ret |= 0x2;
+ }
+ if (tlb->prot & (PAGE_EXEC << 4)) {
+ ret |= 0x4;
+ }
+ if (tlb->prot & PAGE_READ) {
+ ret |= 0x8;
+ }
+ if (tlb->prot & PAGE_WRITE) {
+ ret |= 0x10;
+ }
+ if (tlb->prot & PAGE_EXEC) {
+ ret |= 0x20;
+ }
+ break;
+ }
+ return ret;
+}
+
+target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
+{
+ return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
+}
+
+/* PowerPC BookE 2.06 TLB management */
+
+static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
+{
+ uint32_t tlbncfg = 0;
+ int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
+ int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
+ int tlb;
+
+ tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
+ tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
+
+ if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
+ cpu_abort(env_cpu(env), "we don't support HES yet\n");
+ }
+
+ return booke206_get_tlbm(env, tlb, ea, esel);
+}
+
+void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
+{
+ env->spr[pidn] = pid;
+ /* changing PIDs mean we're in a different address space now */
+ tlb_flush(env_cpu(env));
+}
+
+void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
+ tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
+}
+void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
+ tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
+}
+
+static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
+{
+ if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
+ tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
+ } else {
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void helper_booke206_tlbwe(CPUPPCState *env)
+{
+ uint32_t tlbncfg, tlbn;
+ ppcmas_tlb_t *tlb;
+ uint32_t size_tlb, size_ps;
+ target_ulong mask;
+
+
+ switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
+ case MAS0_WQ_ALWAYS:
+ /* good to go, write that entry */
+ break;
+ case MAS0_WQ_COND:
+ /* XXX check if reserved */
+ if (0) {
+ return;
+ }
+ break;
+ case MAS0_WQ_CLR_RSRV:
+ /* XXX clear entry */
+ return;
+ default:
+ /* no idea what to do */
+ return;
+ }
+
+ if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
+ !msr_gs) {
+ /* XXX we don't support direct LRAT setting yet */
+ fprintf(stderr, "cpu: don't support LRAT setting yet\n");
+ return;
+ }
+
+ tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
+ tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+
+ tlb = booke206_cur_tlb(env);
+
+ if (!tlb) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ }
+
+ /* check that we support the targeted size */
+ size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+ size_ps = booke206_tlbnps(env, tlbn);
+ if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
+ !(size_ps & (1 << size_tlb))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ }
+
+ if (msr_gs) {
+ cpu_abort(env_cpu(env), "missing HV implementation\n");
+ }
+
+ if (tlb->mas1 & MAS1_VALID) {
+ /*
+ * Invalidate the page in QEMU TLB if it was a valid entry.
+ *
+ * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
+ * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
+ * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
+ *
+ * "Note that when an L2 TLB entry is written, it may be displacing an
+ * already valid entry in the same L2 TLB location (a victim). If a
+ * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
+ * TLB entry is automatically invalidated."
+ */
+ flush_page(env, tlb);
+ }
+
+ tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
+ env->spr[SPR_BOOKE_MAS3];
+ tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
+
+ if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
+ /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
+ booke206_fixed_size_tlbn(env, tlbn, tlb);
+ } else {
+ if (!(tlbncfg & TLBnCFG_AVAIL)) {
+ /* force !AVAIL TLB entries to correct page size */
+ tlb->mas1 &= ~MAS1_TSIZE_MASK;
+ /* XXX can be configured in MMUCSR0 */
+ tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
+ }
+ }
+
+ /* Make a mask from TLB size to discard invalid bits in EPN field */
+ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
+ /* Add a mask for page attributes */
+ mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
+
+ if (!msr_cm) {
+ /*
+ * Executing a tlbwe instruction in 32-bit mode will set bits
+ * 0:31 of the TLB EPN field to zero.
+ */
+ mask &= 0xffffffff;
+ }
+
+ tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
+
+ if (!(tlbncfg & TLBnCFG_IPROT)) {
+ /* no IPROT supported by TLB */
+ tlb->mas1 &= ~MAS1_IPROT;
+ }
+
+ flush_page(env, tlb);
+}
+
+static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
+{
+ int tlbn = booke206_tlbm_to_tlbn(env, tlb);
+ int way = booke206_tlbm_to_way(env, tlb);
+
+ env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
+ env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
+
+ env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
+ env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
+ env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
+ env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
+}
+
+void helper_booke206_tlbre(CPUPPCState *env)
+{
+ ppcmas_tlb_t *tlb = NULL;
+
+ tlb = booke206_cur_tlb(env);
+ if (!tlb) {
+ env->spr[SPR_BOOKE_MAS1] = 0;
+ } else {
+ booke206_tlb_to_mas(env, tlb);
+ }
+}
+
+void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
+{
+ ppcmas_tlb_t *tlb = NULL;
+ int i, j;
+ hwaddr raddr;
+ uint32_t spid, sas;
+
+ spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
+ sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int ways = booke206_tlb_ways(env, i);
+
+ for (j = 0; j < ways; j++) {
+ tlb = booke206_get_tlbm(env, i, address, j);
+
+ if (!tlb) {
+ continue;
+ }
+
+ if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
+ continue;
+ }
+
+ if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
+ continue;
+ }
+
+ booke206_tlb_to_mas(env, tlb);
+ return;
+ }
+ }
+
+ /* no entry found, fill with defaults */
+ env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
+ env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
+ env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
+ env->spr[SPR_BOOKE_MAS3] = 0;
+ env->spr[SPR_BOOKE_MAS7] = 0;
+
+ if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
+ env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
+ }
+
+ env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
+ << MAS1_TID_SHIFT;
+
+ /* next victim logic */
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
+ env->last_way++;
+ env->last_way &= booke206_tlb_ways(env, 0) - 1;
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
+}
+
+static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
+ vaddr ea)
+{
+ int i;
+ int ways = booke206_tlb_ways(env, tlbn);
+ target_ulong mask;
+
+ for (i = 0; i < ways; i++) {
+ ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
+ if (!tlb) {
+ continue;
+ }
+ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
+ if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
+ !(tlb->mas1 & MAS1_IPROT)) {
+ tlb->mas1 &= ~MAS1_VALID;
+ }
+ }
+}
+
+void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
+{
+ CPUState *cs;
+
+ if (address & 0x4) {
+ /* flush all entries */
+ if (address & 0x8) {
+ /* flush all of TLB1 */
+ booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
+ } else {
+ /* flush all of TLB0 */
+ booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
+ }
+ return;
+ }
+
+ if (address & 0x8) {
+ /* flush TLB1 entries */
+ booke206_invalidate_ea_tlb(env, 1, address);
+ CPU_FOREACH(cs) {
+ tlb_flush(cs);
+ }
+ } else {
+ /* flush TLB0 entries */
+ booke206_invalidate_ea_tlb(env, 0, address);
+ CPU_FOREACH(cs) {
+ tlb_flush_page(cs, address & MAS2_EPN_MASK);
+ }
+ }
+}
+
+void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
+{
+ /* XXX missing LPID handling */
+ booke206_flush_tlb(env, -1, 1);
+}
+
+void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
+{
+ int i, j;
+ int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
+ ppcmas_tlb_t *tlb = env->tlb.tlbm;
+ int tlb_size;
+
+ /* XXX missing LPID handling */
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ tlb_size = booke206_tlb_size(env, i);
+ for (j = 0; j < tlb_size; j++) {
+ if (!(tlb[j].mas1 & MAS1_IPROT) &&
+ ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
+ tlb[j].mas1 &= ~MAS1_VALID;
+ }
+ }
+ tlb += booke206_tlb_size(env, i);
+ }
+ tlb_flush(env_cpu(env));
+}
+
+void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
+{
+ int i, j;
+ ppcmas_tlb_t *tlb;
+ int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
+ int pid = tid >> MAS6_SPID_SHIFT;
+ int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
+ int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
+ /* XXX check for unsupported isize and raise an invalid opcode then */
+ int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
+ /* XXX implement MAV2 handling */
+ bool mav2 = false;
+
+ /* XXX missing LPID handling */
+ /* flush by pid and ea */
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int ways = booke206_tlb_ways(env, i);
+
+ for (j = 0; j < ways; j++) {
+ tlb = booke206_get_tlbm(env, i, address, j);
+ if (!tlb) {
+ continue;
+ }
+ if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
+ (tlb->mas1 & MAS1_IPROT) ||
+ ((tlb->mas1 & MAS1_IND) != ind) ||
+ ((tlb->mas8 & MAS8_TGS) != sgs)) {
+ continue;
+ }
+ if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
+ /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
+ continue;
+ }
+ /* XXX e500mc doesn't match SAS, but other cores might */
+ tlb->mas1 &= ~MAS1_VALID;
+ }
+ }
+ tlb_flush(env_cpu(env));
+}
+
+void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
+{
+ int flags = 0;
+
+ if (type & 2) {
+ flags |= BOOKE206_FLUSH_TLB1;
+ }
+
+ if (type & 4) {
+ flags |= BOOKE206_FLUSH_TLB0;
+ }
+
+ booke206_flush_tlb(env, flags, 1);
+}
+
+
+void helper_check_tlb_flush_local(CPUPPCState *env)
+{
+ check_tlb_flush(env, false);
+}
+
+void helper_check_tlb_flush_global(CPUPPCState *env)
+{
+ check_tlb_flush(env, true);
+}
+
+
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ hwaddr raddr;
+ int page_size, prot;
+
+ if (ppc_xlate(cpu, eaddr, access_type, &raddr,
+ &page_size, &prot, mmu_idx, !probe)) {
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ prot, mmu_idx, 1UL << page_size);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+ raise_exception_err_ra(&cpu->env, cs->exception_index,
+ cpu->env.error_code, retaddr);
+}
diff --git a/target/ppc/monitor.c b/target/ppc/monitor.c
new file mode 100644
index 000000000..0b805ef6e
--- /dev/null
+++ b/target/ppc/monitor.c
@@ -0,0 +1,165 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "qemu/ctype.h"
+#include "monitor/hmp-target.h"
+#include "monitor/hmp.h"
+
+static target_long monitor_get_ccr(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ unsigned int u;
+ int i;
+
+ u = 0;
+ for (i = 0; i < 8; i++) {
+ u |= env->crf[i] << (32 - (4 * (i + 1)));
+ }
+
+ return u;
+}
+
+static target_long monitor_get_xer(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return cpu_read_xer(env);
+}
+
+static target_long monitor_get_decr(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return cpu_ppc_load_decr(env);
+}
+
+static target_long monitor_get_tbu(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return cpu_ppc_load_tbu(env);
+}
+
+static target_long monitor_get_tbl(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return cpu_ppc_load_tbl(env);
+}
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env(mon);
+
+ if (!env1) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+ dump_mmu(env1);
+}
+
+const MonitorDef monitor_defs[] = {
+ { "fpscr", offsetof(CPUPPCState, fpscr) },
+ /* Next instruction pointer */
+ { "nip|pc", offsetof(CPUPPCState, nip) },
+ { "lr", offsetof(CPUPPCState, lr) },
+ { "ctr", offsetof(CPUPPCState, ctr) },
+ { "decr", 0, &monitor_get_decr, },
+ { "ccr|cr", 0, &monitor_get_ccr, },
+ /* Machine state register */
+ { "xer", 0, &monitor_get_xer },
+ { "msr", offsetof(CPUPPCState, msr) },
+ { "tbu", 0, &monitor_get_tbu, },
+ { "tbl", 0, &monitor_get_tbl, },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
+
+static int ppc_cpu_get_reg_num(const char *numstr, int maxnum, int *pregnum)
+{
+ int regnum;
+ char *endptr = NULL;
+
+ if (!*numstr) {
+ return false;
+ }
+
+ regnum = strtoul(numstr, &endptr, 10);
+ if (*endptr || (regnum >= maxnum)) {
+ return false;
+ }
+ *pregnum = regnum;
+
+ return true;
+}
+
+int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
+{
+ int i, regnum;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ /* General purpose registers */
+ if ((qemu_tolower(name[0]) == 'r') &&
+ ppc_cpu_get_reg_num(name + 1, ARRAY_SIZE(env->gpr), &regnum)) {
+ *pval = env->gpr[regnum];
+ return 0;
+ }
+
+ /* Floating point registers */
+ if ((qemu_tolower(name[0]) == 'f') &&
+ ppc_cpu_get_reg_num(name + 1, 32, &regnum)) {
+ *pval = *cpu_fpr_ptr(env, regnum);
+ return 0;
+ }
+
+ /* Special purpose registers */
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); ++i) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (spr->name && (strcasecmp(name, spr->name) == 0)) {
+ *pval = env->spr[i];
+ return 0;
+ }
+ }
+
+ /* Segment registers */
+#if !defined(CONFIG_USER_ONLY)
+ if ((strncasecmp(name, "sr", 2) == 0) &&
+ ppc_cpu_get_reg_num(name + 2, ARRAY_SIZE(env->sr), &regnum)) {
+ *pval = env->sr[regnum];
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc
new file mode 100644
index 000000000..739185123
--- /dev/null
+++ b/target/ppc/power8-pmu-regs.c.inc
@@ -0,0 +1,262 @@
+/*
+ * PMU register read/write functions for TCG IBM POWER chips
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+
+/*
+ * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
+ * PMCs) has problem state read access.
+ *
+ * Read acccess is granted for all PMCC values but 0b01, where a
+ * Facility Unavailable Interrupt will occur.
+ */
+static bool spr_groupA_read_allowed(DisasContext *ctx)
+{
+ if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
+ * PMCs) has problem state write access.
+ *
+ * Write acccess is granted for PMCC values 0b10 and 0b11. Userspace
+ * writing with PMCC 0b00 will generate a Hypervisor Emulation
+ * Assistance Interrupt. Userspace writing with PMCC 0b01 will
+ * generate a Facility Unavailable Interrupt.
+ */
+static bool spr_groupA_write_allowed(DisasContext *ctx)
+{
+ if (ctx->mmcr0_pmcc0) {
+ return true;
+ }
+
+ if (ctx->mmcr0_pmcc1) {
+ /* PMCC = 0b01 */
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
+ } else {
+ /* PMCC = 0b00 */
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+
+ return false;
+}
+
+/*
+ * Helper function to avoid code repetition between MMCR0 and
+ * MMCR2 problem state write functions.
+ *
+ * 'ret' must be tcg_temp_freed() by the caller.
+ */
+static TCGv masked_gprn_for_spr_write(int gprn, int sprn,
+ uint64_t spr_mask)
+{
+ TCGv ret = tcg_temp_new();
+ TCGv t0 = tcg_temp_new();
+
+ /* 'ret' starts with all mask bits cleared */
+ gen_load_spr(ret, sprn);
+ tcg_gen_andi_tl(ret, ret, ~(spr_mask));
+
+ /* Apply the mask into 'gprn' in a temp var */
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask);
+
+ /* Add the masked gprn bits into 'ret' */
+ tcg_gen_or_tl(ret, ret, t0);
+
+ tcg_temp_free(t0);
+
+ return ret;
+}
+
+void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv t0;
+
+ if (!spr_groupA_read_allowed(ctx)) {
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ /*
+ * Filter out all bits but FC, PMAO, and PMAE, according
+ * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
+ * fourth paragraph.
+ */
+ gen_load_spr(t0, SPR_POWER_MMCR0);
+ tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK);
+ tcg_gen_mov_tl(cpu_gpr[gprn], t0);
+
+ tcg_temp_free(t0);
+}
+
+void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv masked_gprn;
+
+ if (!spr_groupA_write_allowed(ctx)) {
+ return;
+ }
+
+ /*
+ * Filter out all bits but FC, PMAO, and PMAE, according
+ * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
+ * fourth paragraph.
+ */
+ masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0,
+ MMCR0_UREG_MASK);
+ gen_store_spr(SPR_POWER_MMCR0, masked_gprn);
+
+ tcg_temp_free(masked_gprn);
+}
+
+void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv t0;
+
+ if (!spr_groupA_read_allowed(ctx)) {
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ /*
+ * On read, filter out all bits that are not FCnP0 bits.
+ * When MMCR0[PMCC] is set to 0b10 or 0b11, providing
+ * problem state programs read/write access to MMCR2,
+ * only the FCnP0 bits can be accessed. All other bits are
+ * not changed when mtspr is executed in problem state, and
+ * all other bits return 0s when mfspr is executed in problem
+ * state, according to ISA v3.1, section 10.4.6 Monitor Mode
+ * Control Register 2, p. 1316, third paragraph.
+ */
+ gen_load_spr(t0, SPR_POWER_MMCR2);
+ tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK);
+ tcg_gen_mov_tl(cpu_gpr[gprn], t0);
+
+ tcg_temp_free(t0);
+}
+
+void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv masked_gprn;
+
+ if (!spr_groupA_write_allowed(ctx)) {
+ return;
+ }
+
+ /*
+ * Filter the bits that can be written using MMCR2_UREG_MASK,
+ * similar to what is done in spr_write_MMCR0_ureg().
+ */
+ masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2,
+ MMCR2_UREG_MASK);
+ gen_store_spr(SPR_POWER_MMCR2, masked_gprn);
+
+ tcg_temp_free(masked_gprn);
+}
+
+void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ if (!spr_groupA_read_allowed(ctx)) {
+ return;
+ }
+
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ /*
+ * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
+ * Monitor, and a read attempt results in a Facility Unavailable
+ * Interrupt.
+ */
+ if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
+ return;
+ }
+
+ /* The remaining steps are similar to PMCs 1-4 userspace read */
+ spr_read_PMC14_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!spr_groupA_write_allowed(ctx)) {
+ return;
+ }
+
+ spr_write_ureg(ctx, sprn, gprn);
+}
+
+void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ /*
+ * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
+ * Monitor, and a write attempt results in a Facility Unavailable
+ * Interrupt.
+ */
+ if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_FU);
+ return;
+ }
+
+ /* The remaining steps are similar to PMCs 1-4 userspace write */
+ spr_write_PMC14_ureg(ctx, sprn, gprn);
+}
+#else
+void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
diff --git a/target/ppc/spr_tcg.h b/target/ppc/spr_tcg.h
new file mode 100644
index 000000000..520f1ef23
--- /dev/null
+++ b/target/ppc/spr_tcg.h
@@ -0,0 +1,144 @@
+/*
+ * PowerPC emulation for qemu: read/write callbacks for SPRs
+ *
+ * Copyright (C) 2021 Instituto de Pesquisas Eldorado
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef SPR_TCG_H
+#define SPR_TCG_H
+
+#define SPR_NOACCESS (&spr_noaccess)
+
+/* prototypes for readers and writers for SPRs */
+void spr_noaccess(DisasContext *ctx, int gprn, int sprn);
+void spr_read_generic(DisasContext *ctx, int gprn, int sprn);
+void spr_write_generic(DisasContext *ctx, int sprn, int gprn);
+void spr_read_xer(DisasContext *ctx, int gprn, int sprn);
+void spr_write_xer(DisasContext *ctx, int sprn, int gprn);
+void spr_read_lr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_lr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_ctr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_ctr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_tbl(DisasContext *ctx, int gprn, int sprn);
+void spr_read_tbu(DisasContext *ctx, int gprn, int sprn);
+void spr_read_atbl(DisasContext *ctx, int gprn, int sprn);
+void spr_read_atbu(DisasContext *ctx, int gprn, int sprn);
+void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn);
+void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn);
+void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn);
+
+#ifndef CONFIG_USER_ONLY
+void spr_write_generic32(DisasContext *ctx, int sprn, int gprn);
+void spr_write_clear(DisasContext *ctx, int sprn, int gprn);
+void spr_access_nop(DisasContext *ctx, int sprn, int gprn);
+void spr_read_decr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_decr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_tbl(DisasContext *ctx, int sprn, int gprn);
+void spr_write_tbu(DisasContext *ctx, int sprn, int gprn);
+void spr_write_atbl(DisasContext *ctx, int sprn, int gprn);
+void spr_write_atbu(DisasContext *ctx, int sprn, int gprn);
+void spr_read_ibat(DisasContext *ctx, int gprn, int sprn);
+void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn);
+void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn);
+void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn);
+void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn);
+void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn);
+void spr_read_dbat(DisasContext *ctx, int gprn, int sprn);
+void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn);
+void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn);
+void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn);
+void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn);
+void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn);
+void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn);
+void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn);
+void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn);
+void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn);
+void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn);
+void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn);
+void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_pir(DisasContext *ctx, int sprn, int gprn);
+void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn);
+void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn);
+void spr_read_thrm(DisasContext *ctx, int gprn, int sprn);
+void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn);
+void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn);
+void spr_write_eplc(DisasContext *ctx, int sprn, int gprn);
+void spr_write_epsc(DisasContext *ctx, int sprn, int gprn);
+void spr_write_mas73(DisasContext *ctx, int sprn, int gprn);
+void spr_read_mas73(DisasContext *ctx, int gprn, int sprn);
+#ifdef TARGET_PPC64
+void spr_read_cfar(DisasContext *ctx, int gprn, int sprn);
+void spr_write_cfar(DisasContext *ctx, int sprn, int gprn);
+void spr_write_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_read_purr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_purr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_vtb(DisasContext *ctx, int gprn, int sprn);
+void spr_write_vtb(DisasContext *ctx, int sprn, int gprn);
+void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn);
+void spr_write_pidr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_hior(DisasContext *ctx, int gprn, int sprn);
+void spr_write_hior(DisasContext *ctx, int sprn, int gprn);
+void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_pcr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn);
+void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn);
+void spr_write_amr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_uamor(DisasContext *ctx, int sprn, int gprn);
+void spr_write_iamr(DisasContext *ctx, int sprn, int gprn);
+#endif
+#endif
+
+#ifdef TARGET_PPC64
+void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn);
+void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn);
+void spr_read_tar(DisasContext *ctx, int gprn, int sprn);
+void spr_write_tar(DisasContext *ctx, int sprn, int gprn);
+void spr_read_tm(DisasContext *ctx, int gprn, int sprn);
+void spr_write_tm(DisasContext *ctx, int sprn, int gprn);
+void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn);
+void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn);
+void spr_read_ebb(DisasContext *ctx, int gprn, int sprn);
+void spr_write_ebb(DisasContext *ctx, int sprn, int gprn);
+void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn);
+void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn);
+void spr_write_hmer(DisasContext *ctx, int sprn, int gprn);
+void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn);
+#endif
+
+#endif
diff --git a/target/ppc/tcg-stub.c b/target/ppc/tcg-stub.c
new file mode 100644
index 000000000..aadcf59d2
--- /dev/null
+++ b/target/ppc/tcg-stub.c
@@ -0,0 +1,45 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (C) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "hw/ppc/spapr.h"
+
+void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
+{
+}
+
+void destroy_ppc_opcodes(PowerPCCPU *cpu)
+{
+}
+
+target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong shift)
+{
+ g_assert_not_reached();
+}
+
+target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu,
+ SpaprMachineState *spapr,
+ target_ulong flags,
+ target_ulong shift)
+{
+ g_assert_not_reached();
+}
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
new file mode 100644
index 000000000..8ff4080eb
--- /dev/null
+++ b/target/ppc/timebase_helper.c
@@ -0,0 +1,208 @@
+/*
+ * PowerPC emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+
+/*****************************************************************************/
+/* SPR accesses */
+
+target_ulong helper_load_tbl(CPUPPCState *env)
+{
+ return (target_ulong)cpu_ppc_load_tbl(env);
+}
+
+target_ulong helper_load_tbu(CPUPPCState *env)
+{
+ return cpu_ppc_load_tbu(env);
+}
+
+target_ulong helper_load_atbl(CPUPPCState *env)
+{
+ return (target_ulong)cpu_ppc_load_atbl(env);
+}
+
+target_ulong helper_load_atbu(CPUPPCState *env)
+{
+ return cpu_ppc_load_atbu(env);
+}
+
+target_ulong helper_load_vtb(CPUPPCState *env)
+{
+ return cpu_ppc_load_vtb(env);
+}
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+target_ulong helper_load_purr(CPUPPCState *env)
+{
+ return (target_ulong)cpu_ppc_load_purr(env);
+}
+
+void helper_store_purr(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_purr(env, val);
+}
+#endif
+
+target_ulong helper_load_601_rtcl(CPUPPCState *env)
+{
+ return cpu_ppc601_load_rtcl(env);
+}
+
+target_ulong helper_load_601_rtcu(CPUPPCState *env)
+{
+ return cpu_ppc601_load_rtcu(env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void helper_store_tbl(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_tbl(env, val);
+}
+
+void helper_store_tbu(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_tbu(env, val);
+}
+
+void helper_store_atbl(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_atbl(env, val);
+}
+
+void helper_store_atbu(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_atbu(env, val);
+}
+
+void helper_store_601_rtcl(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc601_store_rtcl(env, val);
+}
+
+void helper_store_601_rtcu(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc601_store_rtcu(env, val);
+}
+
+target_ulong helper_load_decr(CPUPPCState *env)
+{
+ return cpu_ppc_load_decr(env);
+}
+
+void helper_store_decr(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_decr(env, val);
+}
+
+target_ulong helper_load_hdecr(CPUPPCState *env)
+{
+ return cpu_ppc_load_hdecr(env);
+}
+
+void helper_store_hdecr(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_hdecr(env, val);
+}
+
+void helper_store_vtb(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_vtb(env, val);
+}
+
+void helper_store_tbu40(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_tbu40(env, val);
+}
+
+target_ulong helper_load_40x_pit(CPUPPCState *env)
+{
+ return load_40x_pit(env);
+}
+
+void helper_store_40x_pit(CPUPPCState *env, target_ulong val)
+{
+ store_40x_pit(env, val);
+}
+
+void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
+{
+ store_booke_tcr(env, val);
+}
+
+void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
+{
+ store_booke_tsr(env, val);
+}
+#endif
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+
+/* XXX: to be improved to check access rights when in user-mode */
+target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
+{
+ uint32_t val = 0;
+
+ if (unlikely(env->dcr_env == NULL)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ } else {
+ int ret;
+
+ qemu_mutex_lock_iothread();
+ ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
+ qemu_mutex_unlock_iothread();
+ if (unlikely(ret != 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
+ (uint32_t)dcrn, (uint32_t)dcrn);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_PRIV_REG, GETPC());
+ }
+ }
+ return val;
+}
+
+void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
+{
+ if (unlikely(env->dcr_env == NULL)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ } else {
+ int ret;
+ qemu_mutex_lock_iothread();
+ ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
+ qemu_mutex_unlock_iothread();
+ if (unlikely(ret != 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
+ (uint32_t)dcrn, (uint32_t)dcrn);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_PRIV_REG, GETPC());
+ }
+ }
+}
diff --git a/target/ppc/trace-events b/target/ppc/trace-events
new file mode 100644
index 000000000..53b107f56
--- /dev/null
+++ b/target/ppc/trace-events
@@ -0,0 +1,38 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# kvm.c
+kvm_failed_spr_set(int spr, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
+kvm_failed_spr_get(int spr, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
+kvm_failed_fpscr_set(const char *msg) "Unable to set FPSCR to KVM: %s"
+kvm_failed_fp_set(const char *fpname, int fpnum, const char *msg) "Unable to set %s%d to KVM: %s"
+kvm_failed_vscr_set(const char *msg) "Unable to set VSCR to KVM: %s"
+kvm_failed_vr_set(int vr, const char *msg) "Unable to set VR%d to KVM: %s"
+kvm_failed_fpscr_get(const char *msg) "Unable to get FPSCR from KVM: %s"
+kvm_failed_fp_get(const char *fpname, int fpnum, const char *msg) "Unable to get %s%d from KVM: %s"
+kvm_failed_vscr_get(const char *msg) "Unable to get VSCR from KVM: %s"
+kvm_failed_vr_get(int vr, const char *msg) "Unable to get VR%d from KVM: %s"
+kvm_failed_vpa_addr_get(const char *msg) "Unable to get VPA address from KVM: %s"
+kvm_failed_slb_get(const char *msg) "Unable to get SLB shadow state from KVM: %s"
+kvm_failed_dtl_get(const char *msg) "Unable to get dispatch trace log state from KVM: %s"
+kvm_failed_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
+kvm_failed_slb_set(const char *msg) "Unable to set SLB shadow state to KVM: %s"
+kvm_failed_dtl_set(const char *msg) "Unable to set dispatch trace log state to KVM: %s"
+kvm_failed_null_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s"
+kvm_failed_put_vpa(void) "Warning: Unable to set VPA information to KVM"
+kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM"
+kvm_handle_dcr_write(void) "handle dcr write"
+kvm_handle_dcr_read(void) "handle dcr read"
+kvm_handle_halt(void) "handle halt"
+kvm_handle_papr_hcall(void) "handle PAPR hypercall"
+kvm_handle_epr(void) "handle epr"
+kvm_handle_watchdog_expiry(void) "handle watchdog expiry"
+kvm_handle_debug_exception(void) "handle debug exception"
+kvm_handle_nmi_exception(void) "handle NMI exception"
+
+# excp_helper.c
+ppc_excp_rfi(uint64_t nip, uint64_t msr) "Return from exception at 0x%" PRIx64 " with flags 0x%016" PRIx64
+ppc_excp_dsi(uint64_t dsisr, uint64_t dar) "DSI exception: DSISR=0x%" PRIx64 " DAR=0x%" PRIx64
+ppc_excp_isi(uint64_t msr, uint64_t nip) "ISI exception: msr=0x%016" PRIx64 " nip=0x%" PRIx64
+ppc_excp_fp_ignore(void) "Ignore floating point exception"
+ppc_excp_inval(uint64_t nip) "Invalid instruction at 0x%" PRIx64
+ppc_excp_print(const char *excp) "%s exception"
diff --git a/target/ppc/trace.h b/target/ppc/trace.h
new file mode 100644
index 000000000..a9e896282
--- /dev/null
+++ b/target/ppc/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_ppc.h"
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
new file mode 100644
index 000000000..9960df6e1
--- /dev/null
+++ b/target/ppc/translate.c
@@ -0,0 +1,8618 @@
+/*
+ * PowerPC emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "qemu/host-utils.h"
+#include "qemu/main-loop.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/translator.h"
+#include "exec/log.h"
+#include "qemu/atomic128.h"
+#include "spr_tcg.h"
+
+#include "qemu/qemu-print.h"
+#include "qapi/error.h"
+
+#define CPU_SINGLE_STEP 0x1
+#define CPU_BRANCH_STEP 0x2
+
+/* Include definitions for instructions classes and implementations flags */
+/* #define PPC_DEBUG_DISAS */
+
+#ifdef PPC_DEBUG_DISAS
+# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+/*****************************************************************************/
+/* Code translation helpers */
+
+/* global register indexes */
+static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */
+ + 10 * 4 + 22 * 5 /* SPE GPRh */
+ + 8 * 5 /* CRF */];
+static TCGv cpu_gpr[32];
+static TCGv cpu_gprh[32];
+static TCGv_i32 cpu_crf[8];
+static TCGv cpu_nip;
+static TCGv cpu_msr;
+static TCGv cpu_ctr;
+static TCGv cpu_lr;
+#if defined(TARGET_PPC64)
+static TCGv cpu_cfar;
+#endif
+static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
+static TCGv cpu_reserve;
+static TCGv cpu_reserve_val;
+static TCGv cpu_fpscr;
+static TCGv_i32 cpu_access_type;
+
+#include "exec/gen-icount.h"
+
+void ppc_translate_init(void)
+{
+ int i;
+ char *p;
+ size_t cpu_reg_names_size;
+
+ p = cpu_reg_names;
+ cpu_reg_names_size = sizeof(cpu_reg_names);
+
+ for (i = 0; i < 8; i++) {
+ snprintf(p, cpu_reg_names_size, "crf%d", i);
+ cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUPPCState, crf[i]), p);
+ p += 5;
+ cpu_reg_names_size -= 5;
+ }
+
+ for (i = 0; i < 32; i++) {
+ snprintf(p, cpu_reg_names_size, "r%d", i);
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, gpr[i]), p);
+ p += (i < 10) ? 3 : 4;
+ cpu_reg_names_size -= (i < 10) ? 3 : 4;
+ snprintf(p, cpu_reg_names_size, "r%dH", i);
+ cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, gprh[i]), p);
+ p += (i < 10) ? 4 : 5;
+ cpu_reg_names_size -= (i < 10) ? 4 : 5;
+ }
+
+ cpu_nip = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, nip), "nip");
+
+ cpu_msr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, msr), "msr");
+
+ cpu_ctr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ctr), "ctr");
+
+ cpu_lr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, lr), "lr");
+
+#if defined(TARGET_PPC64)
+ cpu_cfar = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, cfar), "cfar");
+#endif
+
+ cpu_xer = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, xer), "xer");
+ cpu_so = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, so), "SO");
+ cpu_ov = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ov), "OV");
+ cpu_ca = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ca), "CA");
+ cpu_ov32 = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ov32), "OV32");
+ cpu_ca32 = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ca32), "CA32");
+
+ cpu_reserve = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, reserve_addr),
+ "reserve_addr");
+ cpu_reserve_val = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, reserve_val),
+ "reserve_val");
+
+ cpu_fpscr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, fpscr), "fpscr");
+
+ cpu_access_type = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUPPCState, access_type),
+ "access_type");
+}
+
+/* internal defines */
+struct DisasContext {
+ DisasContextBase base;
+ target_ulong cia; /* current instruction address */
+ uint32_t opcode;
+ /* Routine used to access memory */
+ bool pr, hv, dr, le_mode;
+ bool lazy_tlb_flush;
+ bool need_access_type;
+ int mem_idx;
+ int access_type;
+ /* Translation flags */
+ MemOp default_tcg_memop_mask;
+#if defined(TARGET_PPC64)
+ bool sf_mode;
+ bool has_cfar;
+#endif
+ bool fpu_enabled;
+ bool altivec_enabled;
+ bool vsx_enabled;
+ bool spe_enabled;
+ bool tm_enabled;
+ bool gtse;
+ bool hr;
+ bool mmcr0_pmcc0;
+ bool mmcr0_pmcc1;
+ ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
+ int singlestep_enabled;
+ uint32_t flags;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+};
+
+#define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */
+#define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */
+#define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */
+#define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */
+
+/* Return true iff byteswap is needed in a scalar memop */
+static inline bool need_byteswap(const DisasContext *ctx)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+ return ctx->le_mode;
+#else
+ return !ctx->le_mode;
+#endif
+}
+
+/* True when active word size < size of target_long. */
+#ifdef TARGET_PPC64
+# define NARROW_MODE(C) (!(C)->sf_mode)
+#else
+# define NARROW_MODE(C) 0
+#endif
+
+struct opc_handler_t {
+ /* invalid bits for instruction 1 (Rc(opcode) == 0) */
+ uint32_t inval1;
+ /* invalid bits for instruction 2 (Rc(opcode) == 1) */
+ uint32_t inval2;
+ /* instruction type */
+ uint64_t type;
+ /* extended instruction type */
+ uint64_t type2;
+ /* handler */
+ void (*handler)(DisasContext *ctx);
+};
+
+/* SPR load/store helpers */
+static inline void gen_load_spr(TCGv t, int reg)
+{
+ tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+}
+
+static inline void gen_store_spr(int reg, TCGv t)
+{
+ tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+}
+
+static inline void gen_set_access_type(DisasContext *ctx, int access_type)
+{
+ if (ctx->need_access_type && ctx->access_type != access_type) {
+ tcg_gen_movi_i32(cpu_access_type, access_type);
+ ctx->access_type = access_type;
+ }
+}
+
+static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
+{
+ if (NARROW_MODE(ctx)) {
+ nip = (uint32_t)nip;
+ }
+ tcg_gen_movi_tl(cpu_nip, nip);
+}
+
+static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
+{
+ TCGv_i32 t0, t1;
+
+ /*
+ * These are all synchronous exceptions, we set the PC back to the
+ * faulting instruction
+ */
+ gen_update_nip(ctx, ctx->cia);
+ t0 = tcg_const_i32(excp);
+ t1 = tcg_const_i32(error);
+ gen_helper_raise_exception_err(cpu_env, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_exception(DisasContext *ctx, uint32_t excp)
+{
+ TCGv_i32 t0;
+
+ /*
+ * These are all synchronous exceptions, we set the PC back to the
+ * faulting instruction
+ */
+ gen_update_nip(ctx, ctx->cia);
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
+ target_ulong nip)
+{
+ TCGv_i32 t0;
+
+ gen_update_nip(ctx, nip);
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_icount_io_start(DisasContext *ctx)
+{
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ /*
+ * An I/O instruction must be last in the TB.
+ * Chain to the next TB, and let the code from gen_tb_start
+ * decide if we need to return to the main loop.
+ * Doing this first also allows this value to be overridden.
+ */
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+/*
+ * Tells the caller what is the appropriate exception to generate and prepares
+ * SPR registers for this exception.
+ *
+ * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
+ * POWERPC_EXCP_DEBUG (on BookE).
+ */
+static uint32_t gen_prep_dbgex(DisasContext *ctx)
+{
+ if (ctx->flags & POWERPC_FLAG_DE) {
+ target_ulong dbsr = 0;
+ if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
+ dbsr = DBCR0_ICMP;
+ } else {
+ /* Must have been branch */
+ dbsr = DBCR0_BRT;
+ }
+ TCGv t0 = tcg_temp_new();
+ gen_load_spr(t0, SPR_BOOKE_DBSR);
+ tcg_gen_ori_tl(t0, t0, dbsr);
+ gen_store_spr(SPR_BOOKE_DBSR, t0);
+ tcg_temp_free(t0);
+ return POWERPC_EXCP_DEBUG;
+ } else {
+ return POWERPC_EXCP_TRACE;
+ }
+}
+
+static void gen_debug_exception(DisasContext *ctx)
+{
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
+{
+ /* Will be converted to program check if needed */
+ gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
+}
+
+static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
+{
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
+}
+
+static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
+{
+ /* Will be converted to program check if needed */
+ gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
+}
+
+/*****************************************************************************/
+/* SPR READ/WRITE CALLBACKS */
+
+void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
+{
+#if 0
+ sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
+ printf("ERROR: try to access SPR %d !\n", sprn);
+#endif
+}
+
+/* #define PPC_DUMP_SPR_ACCESSES */
+
+/*
+ * Generic callbacks:
+ * do nothing but store/retrieve spr value
+ */
+static void spr_load_dump_spr(int sprn)
+{
+#ifdef PPC_DUMP_SPR_ACCESSES
+ TCGv_i32 t0 = tcg_const_i32(sprn);
+ gen_helper_load_dump_spr(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+#endif
+}
+
+void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_load_spr(cpu_gpr[gprn], sprn);
+ spr_load_dump_spr(sprn);
+}
+
+static void spr_store_dump_spr(int sprn)
+{
+#ifdef PPC_DUMP_SPR_ACCESSES
+ TCGv_i32 t0 = tcg_const_i32(sprn);
+ gen_helper_store_dump_spr(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+#endif
+}
+
+void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_store_spr(sprn, cpu_gpr[gprn]);
+ spr_store_dump_spr(sprn);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
+{
+#ifdef TARGET_PPC64
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ spr_store_dump_spr(sprn);
+#else
+ spr_write_generic(ctx, sprn, gprn);
+#endif
+}
+
+void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_load_spr(t0, sprn);
+ tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
+ tcg_gen_and_tl(t0, t0, t1);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
+{
+}
+
+#endif
+
+/* SPR common to all PowerPC */
+/* XER */
+void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv dst = cpu_gpr[gprn];
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_mov_tl(dst, cpu_xer);
+ tcg_gen_shli_tl(t0, cpu_so, XER_SO);
+ tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
+ tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_or_tl(dst, dst, t2);
+ tcg_gen_or_tl(dst, dst, t0);
+ if (is_isa300(ctx)) {
+ tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
+ tcg_gen_or_tl(dst, dst, t0);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv src = cpu_gpr[gprn];
+ /* Write all flags, while reading back check for isa300 */
+ tcg_gen_andi_tl(cpu_xer, src,
+ ~((1u << XER_SO) |
+ (1u << XER_OV) | (1u << XER_OV32) |
+ (1u << XER_CA) | (1u << XER_CA32)));
+ tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
+ tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
+ tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
+ tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
+ tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
+}
+
+/* LR */
+void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
+}
+
+void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
+}
+
+/* CFAR */
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
+}
+
+void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
+}
+#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
+
+/* CTR */
+void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
+}
+
+void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
+}
+
+/* User read access to SPR */
+/* USPRx */
+/* UMMCRx */
+/* UPMCx */
+/* USIA */
+/* UDECR */
+void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
+}
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
+}
+#endif
+
+/* SPR common to all non-embedded PowerPC */
+/* DECR */
+#if !defined(CONFIG_USER_ONLY)
+void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
+}
+#endif
+
+/* SPR common to all non-embedded PowerPC, except 601 */
+/* Time base */
+void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
+}
+
+#if defined(TARGET_PPC64)
+void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
+}
+
+/* HDECR */
+void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
+}
+
+#endif
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/* IBAT0U...IBAT0U */
+/* IBAT0L...IBAT7L */
+void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+}
+
+void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
+}
+
+void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
+ gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
+ gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
+ gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+/* DBAT0U...DBAT7U */
+/* DBAT0L...DBAT7L */
+void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
+}
+
+void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
+}
+
+void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
+ gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
+ gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
+ gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
+ gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+/* SDR1 */
+void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
+}
+
+#if defined(TARGET_PPC64)
+/* 64 bits PowerPC specific SPRs */
+/* PIDR */
+void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
+}
+
+void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_temp_free(t0);
+}
+void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
+}
+
+/* DPDES */
+void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
+}
+#endif
+#endif
+
+/* PowerPC 601 specific registers */
+/* RTC */
+void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]);
+ /* Must stop the translation as endianness may have changed */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+}
+#endif
+
+/* Unified bats */
+#if !defined(CONFIG_USER_ONLY)
+void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState,
+ IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+}
+
+void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+#endif
+
+/* PowerPC 40x specific registers */
+#if !defined(CONFIG_USER_ONLY)
+void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
+}
+
+void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_store_spr(sprn, cpu_gpr[gprn]);
+ gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
+ /* We must stop translation as we may have rebooted */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+}
+
+void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
+}
+#endif
+
+/* PowerPC 403 specific registers */
+/* PBL1 / PBU1 / PBL2 / PBU2 */
+#if !defined(CONFIG_USER_ONLY)
+void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
+}
+
+void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1);
+ gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
+ gen_store_spr(SPR_PIR, t0);
+ tcg_temp_free(t0);
+}
+#endif
+
+/* SPE specific registers */
+void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
+ tcg_temp_free_i32(t0);
+}
+
+void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
+ tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_temp_free_i32(t0);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* Callback used to write the exception vector base */
+void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
+{
+ int sprn_offs;
+
+ if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
+ sprn_offs = sprn - SPR_BOOKE_IVOR0;
+ } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
+ sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
+ } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
+ sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
+ } else {
+ printf("Trying to write an unknown exception vector %d %03x\n",
+ sprn, sprn);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+#endif
+
+#ifdef TARGET_PPC64
+#ifndef CONFIG_USER_ONLY
+void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /*
+ * Note, the HV=1 PR=0 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ if (ctx->pr) {
+ gen_load_spr(t1, SPR_UAMOR);
+ } else {
+ gen_load_spr(t1, SPR_AMOR);
+ }
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_AMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_AMR, t0);
+ spr_store_dump_spr(SPR_AMR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /*
+ * Note, the HV=1 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ gen_load_spr(t1, SPR_AMOR);
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_UAMOR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_UAMOR, t0);
+ spr_store_dump_spr(SPR_UAMOR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /*
+ * Note, the HV=1 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ gen_load_spr(t1, SPR_AMOR);
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_IAMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_IAMR, t0);
+ spr_store_dump_spr(SPR_IAMR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+#endif
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_fixup_thrm(cpu_env);
+ gen_load_spr(cpu_gpr[gprn], sprn);
+ spr_load_dump_spr(sprn);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+#if !defined(CONFIG_USER_ONLY)
+void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn],
+ ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(sprn);
+ gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
+}
+void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
+}
+
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv val = tcg_temp_new();
+ tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
+ gen_store_spr(SPR_BOOKE_MAS3, val);
+ tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
+ gen_store_spr(SPR_BOOKE_MAS7, val);
+ tcg_temp_free(val);
+}
+
+void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv mas7 = tcg_temp_new();
+ TCGv mas3 = tcg_temp_new();
+ gen_load_spr(mas7, SPR_BOOKE_MAS7);
+ tcg_gen_shli_tl(mas7, mas7, 32);
+ gen_load_spr(mas3, SPR_BOOKE_MAS3);
+ tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
+ tcg_temp_free(mas3);
+ tcg_temp_free(mas7);
+}
+
+#endif
+
+#ifdef TARGET_PPC64
+static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
+ int bit, int sprn, int cause)
+{
+ TCGv_i32 t1 = tcg_const_i32(bit);
+ TCGv_i32 t2 = tcg_const_i32(sprn);
+ TCGv_i32 t3 = tcg_const_i32(cause);
+
+ gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
+ int bit, int sprn, int cause)
+{
+ TCGv_i32 t1 = tcg_const_i32(bit);
+ TCGv_i32 t2 = tcg_const_i32(sprn);
+ TCGv_i32 t3 = tcg_const_i32(cause);
+
+ gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv spr_up = tcg_temp_new();
+ TCGv spr = tcg_temp_new();
+
+ gen_load_spr(spr, sprn - 1);
+ tcg_gen_shri_tl(spr_up, spr, 32);
+ tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
+
+ tcg_temp_free(spr);
+ tcg_temp_free(spr_up);
+}
+
+void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv spr = tcg_temp_new();
+
+ gen_load_spr(spr, sprn - 1);
+ tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
+ gen_store_spr(sprn - 1, spr);
+
+ tcg_temp_free(spr);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv hmer = tcg_temp_new();
+
+ gen_load_spr(hmer, sprn);
+ tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
+ gen_store_spr(sprn, hmer);
+ spr_store_dump_spr(sprn);
+ tcg_temp_free(hmer);
+}
+
+void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+}
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
+ spr_read_generic(ctx, gprn, sprn);
+}
+
+void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_read_generic(ctx, gprn, sprn);
+}
+
+void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_read_prev_upper32(ctx, gprn, sprn);
+}
+
+void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_write_prev_upper32(ctx, sprn, gprn);
+}
+
+void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_read_generic(ctx, gprn, sprn);
+}
+
+void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_read_prev_upper32(ctx, gprn, sprn);
+}
+
+void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_write_prev_upper32(ctx, sprn, gprn);
+}
+#endif
+
+#define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
+GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
+
+#define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
+GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
+
+#define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
+GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
+
+#define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
+GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
+
+#define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
+GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
+
+#define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
+GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
+
+typedef struct opcode_t {
+ unsigned char opc1, opc2, opc3, opc4;
+#if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
+ unsigned char pad[4];
+#endif
+ opc_handler_t handler;
+ const char *oname;
+} opcode_t;
+
+/* Helpers for priv. check */
+#define GEN_PRIV \
+ do { \
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
+ } while (0)
+
+#if defined(CONFIG_USER_ONLY)
+#define CHK_HV GEN_PRIV
+#define CHK_SV GEN_PRIV
+#define CHK_HVRM GEN_PRIV
+#else
+#define CHK_HV \
+ do { \
+ if (unlikely(ctx->pr || !ctx->hv)) { \
+ GEN_PRIV; \
+ } \
+ } while (0)
+#define CHK_SV \
+ do { \
+ if (unlikely(ctx->pr)) { \
+ GEN_PRIV; \
+ } \
+ } while (0)
+#define CHK_HVRM \
+ do { \
+ if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
+ GEN_PRIV; \
+ } \
+ } while (0)
+#endif
+
+#define CHK_NONE
+
+/*****************************************************************************/
+/* PowerPC instructions table */
+
+#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl1, \
+ .inval2 = invl2, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = onam, \
+}
+#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = onam, \
+}
+
+/* Invalid instruction */
+static void gen_invalid(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+static opc_handler_t invalid_handler = {
+ .inval1 = 0xFFFFFFFF,
+ .inval2 = 0xFFFFFFFF,
+ .type = PPC_NONE,
+ .type2 = PPC_NONE,
+ .handler = gen_invalid,
+};
+
+/*** Integer comparison ***/
+
+static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_movi_tl(t0, CRF_EQ);
+ tcg_gen_movi_tl(t1, CRF_LT);
+ tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
+ t0, arg0, arg1, t1, t0);
+ tcg_gen_movi_tl(t1, CRF_GT);
+ tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
+ t0, arg0, arg1, t1, t0);
+
+ tcg_gen_trunc_tl_i32(t, t0);
+ tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
+ tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free_i32(t);
+}
+
+static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+ TCGv t0 = tcg_const_tl(arg1);
+ gen_op_cmp(arg0, t0, s, crf);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ if (s) {
+ tcg_gen_ext32s_tl(t0, arg0);
+ tcg_gen_ext32s_tl(t1, arg1);
+ } else {
+ tcg_gen_ext32u_tl(t0, arg0);
+ tcg_gen_ext32u_tl(t1, arg1);
+ }
+ gen_op_cmp(t0, t1, s, crf);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+ TCGv t0 = tcg_const_tl(arg1);
+ gen_op_cmp32(arg0, t0, s, crf);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
+{
+ if (NARROW_MODE(ctx)) {
+ gen_op_cmpi32(reg, 0, 1, 0);
+ } else {
+ gen_op_cmpi(reg, 0, 1, 0);
+ }
+}
+
+/* cmprb - range comparison: isupper, isaplha, islower*/
+static void gen_cmprb(DisasContext *ctx)
+{
+ TCGv_i32 src1 = tcg_temp_new_i32();
+ TCGv_i32 src2 = tcg_temp_new_i32();
+ TCGv_i32 src2lo = tcg_temp_new_i32();
+ TCGv_i32 src2hi = tcg_temp_new_i32();
+ TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
+
+ tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
+
+ tcg_gen_andi_i32(src1, src1, 0xFF);
+ tcg_gen_ext8u_i32(src2lo, src2);
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2hi, src2);
+
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
+ tcg_gen_and_i32(crf, src2lo, src2hi);
+
+ if (ctx->opcode & 0x00200000) {
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2lo, src2);
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2hi, src2);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
+ tcg_gen_and_i32(src2lo, src2lo, src2hi);
+ tcg_gen_or_i32(crf, crf, src2lo);
+ }
+ tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
+ tcg_temp_free_i32(src1);
+ tcg_temp_free_i32(src2);
+ tcg_temp_free_i32(src2lo);
+ tcg_temp_free_i32(src2hi);
+}
+
+#if defined(TARGET_PPC64)
+/* cmpeqb */
+static void gen_cmpeqb(DisasContext *ctx)
+{
+ gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+}
+#endif
+
+/* isel (PowerPC 2.03 specification) */
+static void gen_isel(DisasContext *ctx)
+{
+ uint32_t bi = rC(ctx->opcode);
+ uint32_t mask = 0x08 >> (bi & 0x03);
+ TCGv t0 = tcg_temp_new();
+ TCGv zr;
+
+ tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
+ tcg_gen_andi_tl(t0, t0, mask);
+
+ zr = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
+ rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(zr);
+ tcg_temp_free(t0);
+}
+
+/* cmpb: PowerPC 2.05 specification */
+static void gen_cmpb(DisasContext *ctx)
+{
+ gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+}
+
+/*** Integer arithmetic ***/
+
+static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
+ TCGv arg1, TCGv arg2, int sub)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_xor_tl(cpu_ov, arg0, arg2);
+ tcg_gen_xor_tl(t0, arg1, arg2);
+ if (sub) {
+ tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
+ } else {
+ tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
+ }
+ tcg_temp_free(t0);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ov32, cpu_ov);
+ }
+ } else {
+ if (is_isa300(ctx)) {
+ tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
+ }
+ tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
+ }
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+}
+
+static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
+ TCGv res, TCGv arg0, TCGv arg1,
+ TCGv ca32, int sub)
+{
+ TCGv t0;
+
+ if (!is_isa300(ctx)) {
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ if (sub) {
+ tcg_gen_eqv_tl(t0, arg0, arg1);
+ } else {
+ tcg_gen_xor_tl(t0, arg0, arg1);
+ }
+ tcg_gen_xor_tl(t0, t0, res);
+ tcg_gen_extract_tl(ca32, t0, 32, 1);
+ tcg_temp_free(t0);
+}
+
+/* Common add function */
+static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, TCGv ca, TCGv ca32,
+ bool add_ca, bool compute_ca,
+ bool compute_ov, bool compute_rc0)
+{
+ TCGv t0 = ret;
+
+ if (compute_ca || compute_ov) {
+ t0 = tcg_temp_new();
+ }
+
+ if (compute_ca) {
+ if (NARROW_MODE(ctx)) {
+ /*
+ * Caution: a non-obvious corner case of the spec is that
+ * we must produce the *entire* 64-bit addition, but
+ * produce the carry into bit 32.
+ */
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
+ tcg_gen_add_tl(t0, arg1, arg2);
+ if (add_ca) {
+ tcg_gen_add_tl(t0, t0, ca);
+ }
+ tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */
+ tcg_temp_free(t1);
+ tcg_gen_extract_tl(ca, ca, 32, 1);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(ca32, ca);
+ }
+ } else {
+ TCGv zero = tcg_const_tl(0);
+ if (add_ca) {
+ tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
+ tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
+ } else {
+ tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
+ }
+ gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
+ tcg_temp_free(zero);
+ }
+ } else {
+ tcg_gen_add_tl(t0, arg1, arg2);
+ if (add_ca) {
+ tcg_gen_add_tl(t0, t0, ca);
+ }
+ }
+
+ if (compute_ov) {
+ gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
+ }
+ if (unlikely(compute_rc0)) {
+ gen_set_Rc0(ctx, t0);
+ }
+
+ if (t0 != ret) {
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ }
+}
+/* Add functions with two operands */
+#define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ ca, glue(ca, 32), \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+}
+/* Add functions with one operand and one immediate */
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \
+ add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_const_tl(const_val); \
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], t0, \
+ ca, glue(ca, 32), \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+ tcg_temp_free(t0); \
+}
+
+/* add add. addo addo. */
+GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
+/* addc addc. addco addco. */
+GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
+/* adde adde. addeo addeo. */
+GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
+/* addme addme. addmeo addmeo. */
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
+/* addex */
+GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
+/* addze addze. addzeo addzeo.*/
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
+/* addic addic.*/
+static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
+{
+ TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
+ tcg_temp_free(c);
+}
+
+static void gen_addic(DisasContext *ctx)
+{
+ gen_op_addic(ctx, 0);
+}
+
+static void gen_addic_(DisasContext *ctx)
+{
+ gen_op_addic(ctx, 1);
+}
+
+static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign, int compute_ov)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_trunc_tl_i32(t1, arg2);
+ if (sign) {
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ } else {
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ }
+ if (compute_ov) {
+ tcg_gen_extu_i32_tl(cpu_ov, t2);
+ if (is_isa300(ctx)) {
+ tcg_gen_extu_i32_tl(cpu_ov32, t2);
+ }
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, ret);
+ }
+}
+/* Div functions */
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign, compute_ov); \
+}
+/* divwu divwu. divwuo divwuo. */
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
+/* divw divw. divwo divwo. */
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
+
+/* div[wd]eu[o][.] */
+#define GEN_DIVE(name, hlpr, compute_ov) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_const_i32(compute_ov); \
+ gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
+ tcg_temp_free_i32(t0); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
+ } \
+}
+
+GEN_DIVE(divweu, divweu, 0);
+GEN_DIVE(divweuo, divweu, 1);
+GEN_DIVE(divwe, divwe, 0);
+GEN_DIVE(divweo, divwe, 1);
+
+#if defined(TARGET_PPC64)
+static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign, int compute_ov)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
+ if (sign) {
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i64(ret, t0, t1);
+ } else {
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i64(ret, t0, t1);
+ }
+ if (compute_ov) {
+ tcg_gen_mov_tl(cpu_ov, t2);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ov32, t2);
+ }
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, ret);
+ }
+}
+
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign, compute_ov); \
+}
+/* divdu divdu. divduo divduo. */
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
+/* divd divd. divdo divdo. */
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
+
+GEN_DIVE(divdeu, divdeu, 0);
+GEN_DIVE(divdeuo, divdeu, 1);
+GEN_DIVE(divde, divde, 0);
+GEN_DIVE(divdeo, divde, 1);
+#endif
+
+static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_trunc_tl_i32(t1, arg2);
+ if (sign) {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_i32(t3, t0, t1);
+ tcg_gen_ext_i32_tl(ret, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ } else {
+ TCGv_i32 t2 = tcg_const_i32(1);
+ TCGv_i32 t3 = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
+ tcg_gen_remu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+}
+
+#define GEN_INT_ARITH_MODW(name, opc3, sign) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign); \
+}
+
+GEN_INT_ARITH_MODW(moduw, 0x08, 0);
+GEN_INT_ARITH_MODW(modsw, 0x18, 1);
+
+#if defined(TARGET_PPC64)
+static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
+ if (sign) {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_i64(ret, t0, t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ } else {
+ TCGv_i64 t2 = tcg_const_i64(1);
+ TCGv_i64 t3 = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
+ tcg_gen_remu_i64(ret, t0, t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+#define GEN_INT_ARITH_MODD(name, opc3, sign) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign); \
+}
+
+GEN_INT_ARITH_MODD(modud, 0x08, 0);
+GEN_INT_ARITH_MODD(modsd, 0x18, 1);
+#endif
+
+/* mulhw mulhw. */
+static void gen_mulhw(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_muls2_i32(t0, t1, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulhwu mulhwu. */
+static void gen_mulhwu(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mulu2_i32(t0, t1, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mullw mullw. */
+static void gen_mullw(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+#endif
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mullwo mullwo. */
+static void gen_mullwo(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_muls2_i32(t0, t1, t0, t1);
+#if defined(TARGET_PPC64)
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
+#else
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+
+ tcg_gen_sari_i32(t0, t0, 31);
+ tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_ov, t0);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ov32, cpu_ov);
+ }
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulli */
+static void gen_mulli(DisasContext *ctx)
+{
+ tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ SIMM(ctx->opcode));
+}
+
+#if defined(TARGET_PPC64)
+/* mulhd mulhd. */
+static void gen_mulhd(DisasContext *ctx)
+{
+ TCGv lo = tcg_temp_new();
+ tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(lo);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulhdu mulhdu. */
+static void gen_mulhdu(DisasContext *ctx)
+{
+ TCGv lo = tcg_temp_new();
+ tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(lo);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulld mulld. */
+static void gen_mulld(DisasContext *ctx)
+{
+ tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulldo mulldo. */
+static void gen_mulldo(DisasContext *ctx)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
+
+ tcg_gen_sari_i64(t0, t0, 63);
+ tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ov32, cpu_ov);
+ }
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+#endif
+
+/* Common subf function */
+static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, bool add_ca, bool compute_ca,
+ bool compute_ov, bool compute_rc0)
+{
+ TCGv t0 = ret;
+
+ if (compute_ca || compute_ov) {
+ t0 = tcg_temp_new();
+ }
+
+ if (compute_ca) {
+ /* dest = ~arg1 + arg2 [+ ca]. */
+ if (NARROW_MODE(ctx)) {
+ /*
+ * Caution: a non-obvious corner case of the spec is that
+ * we must produce the *entire* 64-bit addition, but
+ * produce the carry into bit 32.
+ */
+ TCGv inv1 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_not_tl(inv1, arg1);
+ if (add_ca) {
+ tcg_gen_add_tl(t0, arg2, cpu_ca);
+ } else {
+ tcg_gen_addi_tl(t0, arg2, 1);
+ }
+ tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
+ tcg_gen_add_tl(t0, t0, inv1);
+ tcg_temp_free(inv1);
+ tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
+ tcg_temp_free(t1);
+ tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ca32, cpu_ca);
+ }
+ } else if (add_ca) {
+ TCGv zero, inv1 = tcg_temp_new();
+ tcg_gen_not_tl(inv1, arg1);
+ zero = tcg_const_tl(0);
+ tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
+ tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
+ gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
+ tcg_temp_free(zero);
+ tcg_temp_free(inv1);
+ } else {
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
+ }
+ } else if (add_ca) {
+ /*
+ * Since we're ignoring carry-out, we can simplify the
+ * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
+ */
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ tcg_gen_add_tl(t0, t0, cpu_ca);
+ tcg_gen_subi_tl(t0, t0, 1);
+ } else {
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ }
+
+ if (compute_ov) {
+ gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
+ }
+ if (unlikely(compute_rc0)) {
+ gen_set_Rc0(ctx, t0);
+ }
+
+ if (t0 != ret) {
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ }
+}
+/* Sub functions with Two operands functions */
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+}
+/* Sub functions with one operand and one immediate */
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_const_tl(const_val); \
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], t0, \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+ tcg_temp_free(t0); \
+}
+/* subf subf. subfo subfo. */
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+/* subfc subfc. subfco subfco. */
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+/* subfe subfe. subfeo subfo. */
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+/* subfme subfme. subfmeo subfmeo. */
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+/* subfze subfze. subfzeo subfzeo.*/
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+
+/* subfic */
+static void gen_subfic(DisasContext *ctx)
+{
+ TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ c, 0, 1, 0, 0);
+ tcg_temp_free(c);
+}
+
+/* neg neg. nego nego. */
+static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
+{
+ TCGv zero = tcg_const_tl(0);
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ zero, 0, 0, compute_ov, Rc(ctx->opcode));
+ tcg_temp_free(zero);
+}
+
+static void gen_neg(DisasContext *ctx)
+{
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+static void gen_nego(DisasContext *ctx)
+{
+ gen_op_arith_neg(ctx, 1);
+}
+
+/*** Integer logical ***/
+#define GEN_LOGICAL2(name, tcg_op, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
+}
+
+#define GEN_LOGICAL1(name, tcg_op, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
+}
+
+/* and & and. */
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
+/* andc & andc. */
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
+
+/* andi. */
+static void gen_andi_(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ UIMM(ctx->opcode));
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* andis. */
+static void gen_andis_(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ UIMM(ctx->opcode) << 16);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* cntlzw */
+static void gen_cntlzw(DisasContext *ctx)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* cnttzw */
+static void gen_cnttzw(DisasContext *ctx)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_ctzi_i32(t, t, 32);
+ tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* eqv & eqv. */
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
+/* extsb & extsb. */
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
+/* extsh & extsh. */
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
+/* nand & nand. */
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
+/* nor & nor. */
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+static void gen_pause(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(0);
+ tcg_gen_st_i32(t0, cpu_env,
+ -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
+
+ /* Stop translation, this gives other CPUs a chance to run */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+}
+#endif /* defined(TARGET_PPC64) */
+
+/* or & or. */
+static void gen_or(DisasContext *ctx)
+{
+ int rs, ra, rb;
+
+ rs = rS(ctx->opcode);
+ ra = rA(ctx->opcode);
+ rb = rB(ctx->opcode);
+ /* Optimisation for mr. ri case */
+ if (rs != ra || rs != rb) {
+ if (rs != rb) {
+ tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[ra]);
+ }
+ } else if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rs]);
+#if defined(TARGET_PPC64)
+ } else if (rs != 0) { /* 0 is nop */
+ int prio = 0;
+
+ switch (rs) {
+ case 1:
+ /* Set process priority to low */
+ prio = 2;
+ break;
+ case 6:
+ /* Set process priority to medium-low */
+ prio = 3;
+ break;
+ case 2:
+ /* Set process priority to normal */
+ prio = 4;
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case 31:
+ if (!ctx->pr) {
+ /* Set process priority to very low */
+ prio = 1;
+ }
+ break;
+ case 5:
+ if (!ctx->pr) {
+ /* Set process priority to medium-hight */
+ prio = 5;
+ }
+ break;
+ case 3:
+ if (!ctx->pr) {
+ /* Set process priority to high */
+ prio = 6;
+ }
+ break;
+ case 7:
+ if (ctx->hv && !ctx->pr) {
+ /* Set process priority to very high */
+ prio = 7;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ if (prio) {
+ TCGv t0 = tcg_temp_new();
+ gen_load_spr(t0, SPR_PPR);
+ tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
+ tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
+ gen_store_spr(SPR_PPR, t0);
+ tcg_temp_free(t0);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ /*
+ * Pause out of TCG otherwise spin loops with smt_low eat too
+ * much CPU and the kernel hangs. This applies to all
+ * encodings other than no-op, e.g., miso(rs=26), yield(27),
+ * mdoio(29), mdoom(30), and all currently undefined.
+ */
+ gen_pause(ctx);
+#endif
+#endif
+ }
+}
+/* orc & orc. */
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
+
+/* xor & xor. */
+static void gen_xor(DisasContext *ctx)
+{
+ /* Optimisation for "set to zero" case */
+ if (rS(ctx->opcode) != rB(ctx->opcode)) {
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* ori */
+static void gen_ori(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
+}
+
+/* oris */
+static void gen_oris(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ uimm << 16);
+}
+
+/* xori */
+static void gen_xori(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
+}
+
+/* xoris */
+static void gen_xoris(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ uimm << 16);
+}
+
+/* popcntb : PowerPC 2.03 specification */
+static void gen_popcntb(DisasContext *ctx)
+{
+ gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+
+static void gen_popcntw(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+#else
+ tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+#endif
+}
+
+#if defined(TARGET_PPC64)
+/* popcntd: PowerPC 2.06 specification */
+static void gen_popcntd(DisasContext *ctx)
+{
+ tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+#endif
+
+/* prtyw: PowerPC 2.05 specification */
+static void gen_prtyw(DisasContext *ctx)
+{
+ TCGv ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, rs, 16);
+ tcg_gen_xor_tl(ra, rs, t0);
+ tcg_gen_shri_tl(t0, ra, 8);
+ tcg_gen_xor_tl(ra, ra, t0);
+ tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
+ tcg_temp_free(t0);
+}
+
+#if defined(TARGET_PPC64)
+/* prtyd: PowerPC 2.05 specification */
+static void gen_prtyd(DisasContext *ctx)
+{
+ TCGv ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, rs, 32);
+ tcg_gen_xor_tl(ra, rs, t0);
+ tcg_gen_shri_tl(t0, ra, 16);
+ tcg_gen_xor_tl(ra, ra, t0);
+ tcg_gen_shri_tl(t0, ra, 8);
+ tcg_gen_xor_tl(ra, ra, t0);
+ tcg_gen_andi_tl(ra, ra, 1);
+ tcg_temp_free(t0);
+}
+#endif
+
+#if defined(TARGET_PPC64)
+/* bpermd */
+static void gen_bpermd(DisasContext *ctx)
+{
+ gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+#endif
+
+#if defined(TARGET_PPC64)
+/* extsw & extsw. */
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
+
+/* cntlzd */
+static void gen_cntlzd(DisasContext *ctx)
+{
+ tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* cnttzd */
+static void gen_cnttzd(DisasContext *ctx)
+{
+ tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* darn */
+static void gen_darn(DisasContext *ctx)
+{
+ int l = L(ctx->opcode);
+
+ if (l > 2) {
+ tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
+ } else {
+ gen_icount_io_start(ctx);
+ if (l == 0) {
+ gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ /* Return 64-bit random for both CRN and RRN */
+ gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
+ }
+ }
+}
+#endif
+
+/*** Integer rotate ***/
+
+/* rlwimi & rlwimi. */
+static void gen_rlwimi(DisasContext *ctx)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode);
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+
+ if (sh == (31 - me) && mb <= me) {
+ tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
+ } else {
+ target_ulong mask;
+ bool mask_in_32b = true;
+ TCGv t1;
+
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+
+#if defined(TARGET_PPC64)
+ if (mask > 0xffffffffu) {
+ mask_in_32b = false;
+ }
+#endif
+ t1 = tcg_temp_new();
+ if (mask_in_32b) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ } else {
+#if defined(TARGET_PPC64)
+ tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
+ tcg_gen_rotli_i64(t1, t1, sh);
+#else
+ g_assert_not_reached();
+#endif
+ }
+
+ tcg_gen_andi_tl(t1, t1, mask);
+ tcg_gen_andi_tl(t_ra, t_ra, ~mask);
+ tcg_gen_or_tl(t_ra, t_ra, t1);
+ tcg_temp_free(t1);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rlwinm & rlwinm. */
+static void gen_rlwinm(DisasContext *ctx)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ int sh = SH(ctx->opcode);
+ int mb = MB(ctx->opcode);
+ int me = ME(ctx->opcode);
+ int len = me - mb + 1;
+ int rsh = (32 - sh) & 31;
+
+ if (sh != 0 && len > 0 && me == (31 - sh)) {
+ tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
+ } else if (me == 31 && rsh + len <= 32) {
+ tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
+ } else {
+ target_ulong mask;
+ bool mask_in_32b = true;
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+#if defined(TARGET_PPC64)
+ if (mask > 0xffffffffu) {
+ mask_in_32b = false;
+ }
+#endif
+ if (mask_in_32b) {
+ if (sh == 0) {
+ tcg_gen_andi_tl(t_ra, t_rs, mask);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_extu_i32_tl(t_ra, t0);
+ tcg_temp_free_i32(t0);
+ }
+ } else {
+#if defined(TARGET_PPC64)
+ tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
+ tcg_gen_rotli_i64(t_ra, t_ra, sh);
+ tcg_gen_andi_i64(t_ra, t_ra, mask);
+#else
+ g_assert_not_reached();
+#endif
+ }
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rlwnm & rlwnm. */
+static void gen_rlwnm(DisasContext *ctx)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ target_ulong mask;
+ bool mask_in_32b = true;
+
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+
+#if defined(TARGET_PPC64)
+ if (mask > 0xffffffffu) {
+ mask_in_32b = false;
+ }
+#endif
+ if (mask_in_32b) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rb);
+ tcg_gen_trunc_tl_i32(t1, t_rs);
+ tcg_gen_andi_i32(t0, t0, 0x1f);
+ tcg_gen_rotl_i32(t1, t1, t0);
+ tcg_gen_extu_i32_tl(t_ra, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ } else {
+#if defined(TARGET_PPC64)
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(t0, t_rb, 0x1f);
+ tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
+ tcg_gen_rotl_i64(t_ra, t_ra, t0);
+ tcg_temp_free_i64(t0);
+#else
+ g_assert_not_reached();
+#endif
+ }
+
+ tcg_gen_andi_tl(t_ra, t_ra, mask);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+#if defined(TARGET_PPC64)
+#define GEN_PPC64_R2(name, opc1, opc2) \
+static void glue(gen_, name##0)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0); \
+} \
+ \
+static void glue(gen_, name##1)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1); \
+}
+#define GEN_PPC64_R4(name, opc1, opc2) \
+static void glue(gen_, name##0)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0, 0); \
+} \
+ \
+static void glue(gen_, name##1)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0, 1); \
+} \
+ \
+static void glue(gen_, name##2)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1, 0); \
+} \
+ \
+static void glue(gen_, name##3)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1, 1); \
+}
+
+static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ int len = me - mb + 1;
+ int rsh = (64 - sh) & 63;
+
+ if (sh != 0 && len > 0 && me == (63 - sh)) {
+ tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
+ } else if (me == 63 && rsh + len <= 64) {
+ tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
+ } else {
+ tcg_gen_rotli_tl(t_ra, t_rs, sh);
+ tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rldicl - rldicl. */
+static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldinm(ctx, mb, 63, sh);
+}
+GEN_PPC64_R4(rldicl, 0x1E, 0x00);
+
+/* rldicr - rldicr. */
+static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
+{
+ uint32_t sh, me;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ me = MB(ctx->opcode) | (men << 5);
+ gen_rldinm(ctx, 0, me, sh);
+}
+GEN_PPC64_R4(rldicr, 0x1E, 0x02);
+
+/* rldic - rldic. */
+static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldinm(ctx, mb, 63 - sh, sh);
+}
+GEN_PPC64_R4(rldic, 0x1E, 0x04);
+
+static void gen_rldnm(DisasContext *ctx, int mb, int me)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
+ TCGv t0;
+
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, t_rb, 0x3f);
+ tcg_gen_rotl_tl(t_ra, t_rs, t0);
+ tcg_temp_free(t0);
+
+ tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rldcl - rldcl. */
+static inline void gen_rldcl(DisasContext *ctx, int mbn)
+{
+ uint32_t mb;
+
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldnm(ctx, mb, 63);
+}
+GEN_PPC64_R2(rldcl, 0x1E, 0x08);
+
+/* rldcr - rldcr. */
+static inline void gen_rldcr(DisasContext *ctx, int men)
+{
+ uint32_t me;
+
+ me = MB(ctx->opcode) | (men << 5);
+ gen_rldnm(ctx, 0, me);
+}
+GEN_PPC64_R2(rldcr, 0x1E, 0x09);
+
+/* rldimi - rldimi. */
+static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode) | (shn << 5);
+ uint32_t mb = MB(ctx->opcode) | (mbn << 5);
+ uint32_t me = 63 - sh;
+
+ if (mb <= me) {
+ tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
+ } else {
+ target_ulong mask = MASK(mb, me);
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_rotli_tl(t1, t_rs, sh);
+ tcg_gen_andi_tl(t1, t1, mask);
+ tcg_gen_andi_tl(t_ra, t_ra, ~mask);
+ tcg_gen_or_tl(t_ra, t_ra, t1);
+ tcg_temp_free(t1);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+GEN_PPC64_R4(rldimi, 0x1E, 0x06);
+#endif
+
+/*** Integer shift ***/
+
+/* slw & slw. */
+static void gen_slw(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x20 */
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+#else
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
+ tcg_gen_sari_tl(t0, t0, 0x1f);
+#endif
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sraw & sraw. */
+static void gen_sraw(DisasContext *ctx)
+{
+ gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* srawi & srawi. */
+static void gen_srawi(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+ if (sh == 0) {
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ if (is_isa300(ctx)) {
+ tcg_gen_movi_tl(cpu_ca32, 0);
+ }
+ } else {
+ TCGv t0;
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
+ tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
+ tcg_temp_free(t0);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ca32, cpu_ca);
+ }
+ tcg_gen_sari_tl(dst, dst, sh);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+/* srw & srw. */
+static void gen_srw(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x20 */
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+#else
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
+ tcg_gen_sari_tl(t0, t0, 0x1f);
+#endif
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_ext32u_tl(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+#if defined(TARGET_PPC64)
+/* sld & sld. */
+static void gen_sld(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x40 */
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* srad & srad. */
+static void gen_srad(DisasContext *ctx)
+{
+ gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+/* sradi & sradi. */
+static inline void gen_sradi(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+ if (sh == 0) {
+ tcg_gen_mov_tl(dst, src);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ if (is_isa300(ctx)) {
+ tcg_gen_movi_tl(cpu_ca32, 0);
+ }
+ } else {
+ TCGv t0;
+ tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
+ tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
+ tcg_temp_free(t0);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
+ if (is_isa300(ctx)) {
+ tcg_gen_mov_tl(cpu_ca32, cpu_ca);
+ }
+ tcg_gen_sari_tl(dst, src, sh);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+static void gen_sradi0(DisasContext *ctx)
+{
+ gen_sradi(ctx, 0);
+}
+
+static void gen_sradi1(DisasContext *ctx)
+{
+ gen_sradi(ctx, 1);
+}
+
+/* extswsli & extswsli. */
+static inline void gen_extswsli(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_shli_tl(dst, dst, sh);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+static void gen_extswsli0(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 0);
+}
+
+static void gen_extswsli1(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 1);
+}
+
+/* srd & srd. */
+static void gen_srd(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x40 */
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+#endif
+
+/*** Addressing modes ***/
+/* Register indirect with immediate index : EA = (rA|0) + SIMM */
+static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
+ target_long maskl)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ simm &= ~maskl;
+ if (rA(ctx->opcode) == 0) {
+ if (NARROW_MODE(ctx)) {
+ simm = (uint32_t)simm;
+ }
+ tcg_gen_movi_tl(EA, simm);
+ } else if (likely(simm != 0)) {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ } else {
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+ }
+}
+
+static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
+{
+ if (rA(ctx->opcode) == 0) {
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ }
+ } else {
+ tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ }
+}
+
+static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
+{
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, 0);
+ } else if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
+ target_long val)
+{
+ tcg_gen_addi_tl(ret, arg1, val);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(ret, ret);
+ }
+}
+
+static inline void gen_align_no_le(DisasContext *ctx)
+{
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
+ (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
+}
+
+static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
+{
+ TCGv ea = tcg_temp_new();
+ if (ra) {
+ tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
+ } else {
+ tcg_gen_mov_tl(ea, displ);
+ }
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(ea, ea);
+ }
+ return ea;
+}
+
+/*** Integer load ***/
+#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
+#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
+
+#define GEN_QEMU_LOAD_TL(ldop, op) \
+static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
+GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
+
+GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
+
+#define GEN_QEMU_LOAD_64(ldop, op) \
+static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
+
+#if defined(TARGET_PPC64)
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+#endif
+
+#define GEN_QEMU_STORE_TL(stop, op) \
+static void glue(gen_qemu_, stop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
+}
+
+#if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
+GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
+#endif
+GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
+
+GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
+
+#define GEN_QEMU_STORE_64(stop, op) \
+static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+
+#if defined(TARGET_PPC64)
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+#endif
+
+#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ chk; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDX(name, ldop, opc2, opc3, type) \
+ GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
+
+#define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
+ GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
+
+#define GEN_LDEPX(name, ldop, opc2, opc3) \
+static void glue(gen_, name##epx)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ CHK_SV; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
+ tcg_temp_free(EA); \
+}
+
+GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
+GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
+GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
+#if defined(TARGET_PPC64)
+GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+#endif
+
+#if defined(TARGET_PPC64)
+/* CI load/store variants */
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
+GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
+GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
+#endif
+
+/*** Integer store ***/
+#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ chk; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+#define GEN_STX(name, stop, opc2, opc3, type) \
+ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
+
+#define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
+ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
+
+#define GEN_STEPX(name, stop, opc2, opc3) \
+static void glue(gen_, name##epx)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ CHK_SV; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_qemu_st_tl( \
+ cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \
+ tcg_temp_free(EA); \
+}
+
+GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
+GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
+GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
+#if defined(TARGET_PPC64)
+GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04)
+#endif
+
+#if defined(TARGET_PPC64)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
+GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
+GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
+GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
+#endif
+/*** Integer load and store with byte reverse ***/
+
+/* lhbrx */
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
+
+/* lwbrx */
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
+
+#if defined(TARGET_PPC64)
+/* ldbrx */
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+/* stdbrx */
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
+#endif /* TARGET_PPC64 */
+
+/* sthbrx */
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
+/* stwbrx */
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
+
+/*** Integer load and store multiple ***/
+
+/* lmw */
+static void gen_lmw(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_helper_lmw(cpu_env, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+/* stmw */
+static void gen_stmw(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rS(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_helper_stmw(cpu_env, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+/*** Integer load and store strings ***/
+
+/* lswi */
+/*
+ * PowerPC32 specification says we must generate an exception if rA is
+ * in the range of registers to be loaded. In an other hand, IBM says
+ * this is valid, but rA won't be loaded. For now, I'll follow the
+ * spec...
+ */
+static void gen_lswi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ int nb = NB(ctx->opcode);
+ int start = rD(ctx->opcode);
+ int ra = rA(ctx->opcode);
+ int nr;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ if (nb == 0) {
+ nb = 32;
+ }
+ nr = DIV_ROUND_UP(nb, 4);
+ if (unlikely(lsw_reg_in_range(start, nr, ra))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(start);
+ gen_helper_lsw(cpu_env, t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/* lswx */
+static void gen_lswx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2, t3;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ t2 = tcg_const_i32(rA(ctx->opcode));
+ t3 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_lswx(cpu_env, t0, t1, t2, t3);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+}
+
+/* stswi */
+static void gen_stswi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ int nb = NB(ctx->opcode);
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
+ if (nb == 0) {
+ nb = 32;
+ }
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_helper_stsw(cpu_env, t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/* stswx */
+static void gen_stswx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t1, cpu_xer);
+ tcg_gen_andi_i32(t1, t1, 0x7F);
+ t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_helper_stsw(cpu_env, t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/*** Memory synchronisation ***/
+/* eieio */
+static void gen_eieio(DisasContext *ctx)
+{
+ TCGBar bar = TCG_MO_LD_ST;
+
+ /*
+ * POWER9 has a eieio instruction variant using bit 6 as a hint to
+ * tell the CPU it is a store-forwarding barrier.
+ */
+ if (ctx->opcode & 0x2000000) {
+ /*
+ * ISA says that "Reserved fields in instructions are ignored
+ * by the processor". So ignore the bit 6 on non-POWER9 CPU but
+ * as this is not an instruction software should be using,
+ * complain to the user.
+ */
+ if (!(ctx->insns_flags2 & PPC2_ISA300)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
+ TARGET_FMT_lx "\n", ctx->cia);
+ } else {
+ bar = TCG_MO_ST_LD;
+ }
+ }
+
+ tcg_gen_mb(bar | TCG_BAR_SC);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
+{
+ TCGv_i32 t;
+ TCGLabel *l;
+
+ if (!ctx->lazy_tlb_flush) {
+ return;
+ }
+ l = gen_new_label();
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
+ if (global) {
+ gen_helper_check_tlb_flush_global(cpu_env);
+ } else {
+ gen_helper_check_tlb_flush_local(cpu_env);
+ }
+ gen_set_label(l);
+ tcg_temp_free_i32(t);
+}
+#else
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
+#endif
+
+/* isync */
+static void gen_isync(DisasContext *ctx)
+{
+ /*
+ * We need to check for a pending TLB flush. This can only happen in
+ * kernel mode however so check MSR_PR
+ */
+ if (!ctx->pr) {
+ gen_check_tlb_flush(ctx, false);
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+}
+
+#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
+
+static void gen_load_locked(DisasContext *ctx, MemOp memop)
+{
+ TCGv gpr = cpu_gpr[rD(ctx->opcode)];
+ TCGv t0 = tcg_temp_new();
+
+ gen_set_access_type(ctx, ACCESS_RES);
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
+ tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_gen_mov_tl(cpu_reserve_val, gpr);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ tcg_temp_free(t0);
+}
+
+#define LARX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ gen_load_locked(ctx, memop); \
+}
+
+/* lwarx */
+LARX(lbarx, DEF_MEMOP(MO_UB))
+LARX(lharx, DEF_MEMOP(MO_UW))
+LARX(lwarx, DEF_MEMOP(MO_UL))
+
+static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
+ TCGv EA, TCGCond cond, int addend)
+{
+ TCGv t = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv u = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
+ tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
+ tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
+ tcg_gen_addi_tl(u, t, addend);
+
+ /* E.g. for fetch and increment bounded... */
+ /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
+ tcg_gen_movcond_tl(cond, u, t, t2, u, t);
+ tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
+
+ /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
+ tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
+ tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
+
+ tcg_temp_free(t);
+ tcg_temp_free(t2);
+ tcg_temp_free(u);
+}
+
+static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
+{
+ uint32_t gpr_FC = FC(ctx->opcode);
+ TCGv EA = tcg_temp_new();
+ int rt = rD(ctx->opcode);
+ bool need_serial;
+ TCGv src, dst;
+
+ gen_addr_register(ctx, EA);
+ dst = cpu_gpr[rt];
+ src = cpu_gpr[(rt + 1) & 31];
+
+ need_serial = false;
+ memop |= MO_ALIGN;
+ switch (gpr_FC) {
+ case 0: /* Fetch and add */
+ tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 1: /* Fetch and xor */
+ tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 2: /* Fetch and or */
+ tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 3: /* Fetch and 'and' */
+ tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 4: /* Fetch and max unsigned */
+ tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 5: /* Fetch and max signed */
+ tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 6: /* Fetch and min unsigned */
+ tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 7: /* Fetch and min signed */
+ tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+ case 8: /* Swap */
+ tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
+ break;
+
+ case 16: /* Compare and swap not equal */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
+ if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
+ tcg_gen_mov_tl(t1, src);
+ } else {
+ tcg_gen_ext32u_tl(t1, src);
+ }
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
+ cpu_gpr[(rt + 2) & 31], t0);
+ tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
+ tcg_gen_mov_tl(dst, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+
+ case 24: /* Fetch and increment bounded */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
+ }
+ break;
+ case 25: /* Fetch and increment equal */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
+ }
+ break;
+ case 28: /* Fetch and decrement bounded */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ need_serial = true;
+ } else {
+ gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
+ }
+ break;
+
+ default:
+ /* invoke data storage error handler */
+ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
+ }
+ tcg_temp_free(EA);
+
+ if (need_serial) {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+}
+
+static void gen_lwat(DisasContext *ctx)
+{
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
+}
+
+#ifdef TARGET_PPC64
+static void gen_ldat(DisasContext *ctx)
+{
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
+}
+#endif
+
+static void gen_st_atomic(DisasContext *ctx, MemOp memop)
+{
+ uint32_t gpr_FC = FC(ctx->opcode);
+ TCGv EA = tcg_temp_new();
+ TCGv src, discard;
+
+ gen_addr_register(ctx, EA);
+ src = cpu_gpr[rD(ctx->opcode)];
+ discard = tcg_temp_new();
+
+ memop |= MO_ALIGN;
+ switch (gpr_FC) {
+ case 0: /* add and Store */
+ tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 1: /* xor and Store */
+ tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 2: /* Or and Store */
+ tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 3: /* 'and' and Store */
+ tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 4: /* Store max unsigned */
+ tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 5: /* Store max signed */
+ tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 6: /* Store min unsigned */
+ tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 7: /* Store min signed */
+ tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
+ break;
+ case 24: /* Store twin */
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ } else {
+ TCGv t = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv s = tcg_temp_new();
+ TCGv s2 = tcg_temp_new();
+ TCGv ea_plus_s = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
+ tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
+ tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
+ tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
+ tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
+ tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
+ tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
+
+ tcg_temp_free(ea_plus_s);
+ tcg_temp_free(s2);
+ tcg_temp_free(s);
+ tcg_temp_free(t2);
+ tcg_temp_free(t);
+ }
+ break;
+ default:
+ /* invoke data storage error handler */
+ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
+ }
+ tcg_temp_free(discard);
+ tcg_temp_free(EA);
+}
+
+static void gen_stwat(DisasContext *ctx)
+{
+ gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
+}
+
+#ifdef TARGET_PPC64
+static void gen_stdat(DisasContext *ctx)
+{
+ gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
+}
+#endif
+
+static void gen_conditional_store(DisasContext *ctx, MemOp memop)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ int reg = rS(ctx->opcode);
+
+ gen_set_access_type(ctx, ACCESS_RES);
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
+ tcg_temp_free(t0);
+
+ t0 = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
+ cpu_gpr[reg], ctx->mem_idx,
+ DEF_MEMOP(memop) | MO_ALIGN);
+ tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
+ tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
+ tcg_gen_or_tl(t0, t0, cpu_so);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
+ tcg_temp_free(t0);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+
+ /*
+ * Address mismatch implies failure. But we still need to provide
+ * the memory barrier semantics of the instruction.
+ */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+
+ gen_set_label(l2);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+}
+
+#define STCX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ gen_conditional_store(ctx, memop); \
+}
+
+STCX(stbcx_, DEF_MEMOP(MO_UB))
+STCX(sthcx_, DEF_MEMOP(MO_UW))
+STCX(stwcx_, DEF_MEMOP(MO_UL))
+
+#if defined(TARGET_PPC64)
+/* ldarx */
+LARX(ldarx, DEF_MEMOP(MO_Q))
+/* stdcx. */
+STCX(stdcx_, DEF_MEMOP(MO_Q))
+
+/* lqarx */
+static void gen_lqarx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv EA, hi, lo;
+
+ if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
+ (rd == rB(ctx->opcode)))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+
+ /* Note that the low part is always in RD+1, even in LE mode. */
+ lo = cpu_gpr[rd + 1];
+ hi = cpu_gpr[rd];
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ if (HAVE_ATOMIC128) {
+ TCGv_i32 oi = tcg_temp_new_i32();
+ if (ctx->le_mode) {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
+ ctx->mem_idx));
+ gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
+ } else {
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
+ ctx->mem_idx));
+ gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
+ }
+ tcg_temp_free_i32(oi);
+ tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
+ } else {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ tcg_temp_free(EA);
+ return;
+ }
+ } else if (ctx->le_mode) {
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
+ } else {
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+
+ tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
+ tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
+}
+
+/* stqcx. */
+static void gen_stqcx_(DisasContext *ctx)
+{
+ int rs = rS(ctx->opcode);
+ TCGv EA, hi, lo;
+
+ if (unlikely(rs & 1)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+
+ /* Note that the low part is always in RS+1, even in LE mode. */
+ lo = cpu_gpr[rs + 1];
+ hi = cpu_gpr[rs];
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ if (HAVE_CMPXCHG128) {
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
+ if (ctx->le_mode) {
+ gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
+ EA, lo, hi, oi);
+ } else {
+ gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env,
+ EA, lo, hi, oi);
+ }
+ tcg_temp_free_i32(oi);
+ } else {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+ tcg_temp_free(EA);
+ } else {
+ TCGLabel *lab_fail = gen_new_label();
+ TCGLabel *lab_over = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
+ tcg_temp_free(EA);
+
+ gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
+ tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
+ ? offsetof(CPUPPCState, reserve_val2)
+ : offsetof(CPUPPCState, reserve_val)));
+ tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+
+ tcg_gen_addi_i64(t0, cpu_reserve, 8);
+ gen_qemu_ld64_i64(ctx, t0, t0);
+ tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
+ ? offsetof(CPUPPCState, reserve_val)
+ : offsetof(CPUPPCState, reserve_val2)));
+ tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+
+ /* Success */
+ gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
+ tcg_gen_addi_i64(t0, cpu_reserve, 8);
+ gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
+
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
+ tcg_gen_br(lab_over);
+
+ gen_set_label(lab_fail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+
+ gen_set_label(lab_over);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ }
+}
+#endif /* defined(TARGET_PPC64) */
+
+/* sync */
+static void gen_sync(DisasContext *ctx)
+{
+ uint32_t l = (ctx->opcode >> 21) & 3;
+
+ /*
+ * We may need to check for a pending TLB flush.
+ *
+ * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
+ *
+ * Additionally, this can only happen in kernel mode however so
+ * check MSR_PR as well.
+ */
+ if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
+ gen_check_tlb_flush(ctx, true);
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+}
+
+/* wait */
+static void gen_wait(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(1);
+ tcg_gen_st_i32(t0, cpu_env,
+ -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+}
+
+#if defined(TARGET_PPC64)
+static void gen_doze(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_DOZE);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_nap(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_NAP);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_stop(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_STOP);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_sleep(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_SLEEP);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rvwinkle(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_RVWINKLE);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+#endif /* #if defined(TARGET_PPC64) */
+
+static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->has_cfar) {
+ tcg_gen_movi_tl(cpu_cfar, nip);
+ }
+#endif
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ return translator_use_goto_tb(&ctx->base, dest);
+}
+
+static void gen_lookup_and_goto_ptr(DisasContext *ctx)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ gen_debug_exception(ctx);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+/*** Branch ***/
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (NARROW_MODE(ctx)) {
+ dest = (uint32_t) dest;
+ }
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_nip, dest & ~3);
+ tcg_gen_exit_tb(ctx->base.tb, n);
+ } else {
+ tcg_gen_movi_tl(cpu_nip, dest & ~3);
+ gen_lookup_and_goto_ptr(ctx);
+ }
+}
+
+static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
+{
+ if (NARROW_MODE(ctx)) {
+ nip = (uint32_t)nip;
+ }
+ tcg_gen_movi_tl(cpu_lr, nip);
+}
+
+/* b ba bl bla */
+static void gen_b(DisasContext *ctx)
+{
+ target_ulong li, target;
+
+ /* sign extend LI */
+ li = LI(ctx->opcode);
+ li = (li ^ 0x02000000) - 0x02000000;
+ if (likely(AA(ctx->opcode) == 0)) {
+ target = ctx->cia + li;
+ } else {
+ target = li;
+ }
+ if (LK(ctx->opcode)) {
+ gen_setlr(ctx, ctx->base.pc_next);
+ }
+ gen_update_cfar(ctx, ctx->cia);
+ gen_goto_tb(ctx, 0, target);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+#define BCOND_IM 0
+#define BCOND_LR 1
+#define BCOND_CTR 2
+#define BCOND_TAR 3
+
+static void gen_bcond(DisasContext *ctx, int type)
+{
+ uint32_t bo = BO(ctx->opcode);
+ TCGLabel *l1;
+ TCGv target;
+
+ if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
+ target = tcg_temp_local_new();
+ if (type == BCOND_CTR) {
+ tcg_gen_mov_tl(target, cpu_ctr);
+ } else if (type == BCOND_TAR) {
+ gen_load_spr(target, SPR_TAR);
+ } else {
+ tcg_gen_mov_tl(target, cpu_lr);
+ }
+ } else {
+ target = NULL;
+ }
+ if (LK(ctx->opcode)) {
+ gen_setlr(ctx, ctx->base.pc_next);
+ }
+ l1 = gen_new_label();
+ if ((bo & 0x4) == 0) {
+ /* Decrement and test CTR */
+ TCGv temp = tcg_temp_new();
+
+ if (type == BCOND_CTR) {
+ /*
+ * All ISAs up to v3 describe this form of bcctr as invalid but
+ * some processors, ie. 64-bit server processors compliant with
+ * arch 2.x, do implement a "test and decrement" logic instead,
+ * as described in their respective UMs. This logic involves CTR
+ * to act as both the branch target and a counter, which makes
+ * it basically useless and thus never used in real code.
+ *
+ * This form was hence chosen to trigger extra micro-architectural
+ * side-effect on real HW needed for the Spectre v2 workaround.
+ * It is up to guests that implement such workaround, ie. linux, to
+ * use this form in a way it just triggers the side-effect without
+ * doing anything else harmful.
+ */
+ if (unlikely(!is_book3s_arch2x(ctx))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ tcg_temp_free(temp);
+ tcg_temp_free(target);
+ return;
+ }
+
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(temp, cpu_ctr);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_ctr);
+ }
+ if (bo & 0x2) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
+ }
+ tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
+ } else {
+ tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(temp, cpu_ctr);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_ctr);
+ }
+ if (bo & 0x2) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
+ }
+ }
+ tcg_temp_free(temp);
+ }
+ if ((bo & 0x10) == 0) {
+ /* Test CR */
+ uint32_t bi = BI(ctx->opcode);
+ uint32_t mask = 0x08 >> (bi & 0x03);
+ TCGv_i32 temp = tcg_temp_new_i32();
+
+ if (bo & 0x8) {
+ tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
+ } else {
+ tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
+ }
+ tcg_temp_free_i32(temp);
+ }
+ gen_update_cfar(ctx, ctx->cia);
+ if (type == BCOND_IM) {
+ target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
+ if (likely(AA(ctx->opcode) == 0)) {
+ gen_goto_tb(ctx, 0, ctx->cia + li);
+ } else {
+ gen_goto_tb(ctx, 0, li);
+ }
+ } else {
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
+ } else {
+ tcg_gen_andi_tl(cpu_nip, target, ~3);
+ }
+ gen_lookup_and_goto_ptr(ctx);
+ tcg_temp_free(target);
+ }
+ if ((bo & 0x14) != 0x14) {
+ /* fallthrough case */
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next);
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_bc(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_IM);
+}
+
+static void gen_bcctr(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_CTR);
+}
+
+static void gen_bclr(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_LR);
+}
+
+static void gen_bctar(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_TAR);
+}
+
+/*** Condition register logical ***/
+#define GEN_CRLOGIC(name, tcg_op, opc) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ uint8_t bitmask; \
+ int sh; \
+ TCGv_i32 t0, t1; \
+ sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \
+ t0 = tcg_temp_new_i32(); \
+ if (sh > 0) \
+ tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \
+ else if (sh < 0) \
+ tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \
+ else \
+ tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \
+ t1 = tcg_temp_new_i32(); \
+ sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \
+ if (sh > 0) \
+ tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \
+ else if (sh < 0) \
+ tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \
+ else \
+ tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \
+ tcg_op(t0, t0, t1); \
+ bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \
+ tcg_gen_andi_i32(t0, t0, bitmask); \
+ tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
+ tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+/* crand */
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
+/* crandc */
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
+/* creqv */
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
+/* crnand */
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
+/* crnor */
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
+/* cror */
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
+/* crorc */
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
+/* crxor */
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
+
+/* mcrf */
+static void gen_mcrf(DisasContext *ctx)
+{
+ tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
+}
+
+/*** System linkage ***/
+
+/* rfi (supervisor only) */
+static void gen_rfi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /*
+ * This instruction doesn't exist anymore on 64-bit server
+ * processors compliant with arch 2.x
+ */
+ if (is_book3s_arch2x(ctx)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ /* Restore CPU state */
+ CHK_SV;
+ gen_icount_io_start(ctx);
+ gen_update_cfar(ctx, ctx->cia);
+ gen_helper_rfi(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif
+}
+
+#if defined(TARGET_PPC64)
+static void gen_rfid(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Restore CPU state */
+ CHK_SV;
+ gen_icount_io_start(ctx);
+ gen_update_cfar(ctx, ctx->cia);
+ gen_helper_rfid(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void gen_rfscv(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Restore CPU state */
+ CHK_SV;
+ gen_icount_io_start(ctx);
+ gen_update_cfar(ctx, ctx->cia);
+ gen_helper_rfscv(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif
+}
+#endif
+
+static void gen_hrfid(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Restore CPU state */
+ CHK_HV;
+ gen_helper_hrfid(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif
+}
+#endif
+
+/* sc */
+#if defined(CONFIG_USER_ONLY)
+#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
+#else
+#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
+#define POWERPC_SYSCALL_VECTORED POWERPC_EXCP_SYSCALL_VECTORED
+#endif
+static void gen_sc(DisasContext *ctx)
+{
+ uint32_t lev;
+
+ lev = (ctx->opcode >> 5) & 0x7F;
+ gen_exception_err(ctx, POWERPC_SYSCALL, lev);
+}
+
+#if defined(TARGET_PPC64)
+#if !defined(CONFIG_USER_ONLY)
+static void gen_scv(DisasContext *ctx)
+{
+ uint32_t lev = (ctx->opcode >> 5) & 0x7F;
+
+ /* Set the PC back to the faulting instruction. */
+ gen_update_nip(ctx, ctx->cia);
+ gen_helper_scv(cpu_env, tcg_constant_i32(lev));
+
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+#endif
+#endif
+
+/*** Trap ***/
+
+/* Check for unconditional traps (always or never) */
+static bool check_unconditional_trap(DisasContext *ctx)
+{
+ /* Trap never */
+ if (TO(ctx->opcode) == 0) {
+ return true;
+ }
+ /* Trap always */
+ if (TO(ctx->opcode) == 31) {
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
+ return true;
+ }
+ return false;
+}
+
+/* tw */
+static void gen_tw(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* twi */
+static void gen_twi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+#if defined(TARGET_PPC64)
+/* td */
+static void gen_td(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* tdi */
+static void gen_tdi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+#endif
+
+/*** Processor control ***/
+
+/* mcrxr */
+static void gen_mcrxr(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
+
+ tcg_gen_trunc_tl_i32(t0, cpu_so);
+ tcg_gen_trunc_tl_i32(t1, cpu_ov);
+ tcg_gen_trunc_tl_i32(dst, cpu_ca);
+ tcg_gen_shli_i32(t0, t0, 3);
+ tcg_gen_shli_i32(t1, t1, 2);
+ tcg_gen_shli_i32(dst, dst, 1);
+ tcg_gen_or_i32(dst, dst, t0);
+ tcg_gen_or_i32(dst, dst, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+
+ tcg_gen_movi_tl(cpu_so, 0);
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_movi_tl(cpu_ca, 0);
+}
+
+#ifdef TARGET_PPC64
+/* mcrxrx */
+static void gen_mcrxrx(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
+
+ /* copy OV and OV32 */
+ tcg_gen_shli_tl(t0, cpu_ov, 1);
+ tcg_gen_or_tl(t0, t0, cpu_ov32);
+ tcg_gen_shli_tl(t0, t0, 2);
+ /* copy CA and CA32 */
+ tcg_gen_shli_tl(t1, cpu_ca, 1);
+ tcg_gen_or_tl(t1, t1, cpu_ca32);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_trunc_tl_i32(dst, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+#endif
+
+/* mfcr mfocrf */
+static void gen_mfcr(DisasContext *ctx)
+{
+ uint32_t crm, crn;
+
+ if (likely(ctx->opcode & 0x00100000)) {
+ crm = CRM(ctx->opcode);
+ if (likely(crm && ((crm & (crm - 1)) == 0))) {
+ crn = ctz32(crm);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rD(ctx->opcode)], crn * 4);
+ }
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(t0, cpu_crf[0]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[1]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[2]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[3]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[4]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[5]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[6]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[7]);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+/* mfmsr */
+static void gen_mfmsr(DisasContext *ctx)
+{
+ CHK_SV;
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
+}
+
+/* mfspr */
+static inline void gen_op_mfspr(DisasContext *ctx)
+{
+ void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
+ uint32_t sprn = SPR(ctx->opcode);
+
+#if defined(CONFIG_USER_ONLY)
+ read_cb = ctx->spr_cb[sprn].uea_read;
+#else
+ if (ctx->pr) {
+ read_cb = ctx->spr_cb[sprn].uea_read;
+ } else if (ctx->hv) {
+ read_cb = ctx->spr_cb[sprn].hea_read;
+ } else {
+ read_cb = ctx->spr_cb[sprn].oea_read;
+ }
+#endif
+ if (likely(read_cb != NULL)) {
+ if (likely(read_cb != SPR_NOACCESS)) {
+ (*read_cb)(ctx, rD(ctx->opcode), sprn);
+ } else {
+ /* Privilege exception */
+ /*
+ * This is a hack to avoid warnings when running Linux:
+ * this OS breaks the PowerPC virtualisation model,
+ * allowing userland application to read the PVR
+ */
+ if (sprn != SPR_PVR) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
+ "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
+ ctx->cia);
+ }
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+ } else {
+ /* ISA 2.07 defines these as no-ops */
+ if ((ctx->insns_flags2 & PPC2_ISA207S) &&
+ (sprn >= 808 && sprn <= 811)) {
+ /* This is a nop */
+ return;
+ }
+ /* Not defined */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Trying to read invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
+
+ /*
+ * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
+ * generate a priv, a hv emu or a no-op
+ */
+ if (sprn & 0x10) {
+ if (ctx->pr) {
+ gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ } else {
+ if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ }
+ }
+}
+
+static void gen_mfspr(DisasContext *ctx)
+{
+ gen_op_mfspr(ctx);
+}
+
+/* mftb */
+static void gen_mftb(DisasContext *ctx)
+{
+ gen_op_mfspr(ctx);
+}
+
+/* mtcrf mtocrf*/
+static void gen_mtcrf(DisasContext *ctx)
+{
+ uint32_t crm, crn;
+
+ crm = CRM(ctx->opcode);
+ if (likely((ctx->opcode & 0x00100000))) {
+ if (crm && ((crm & (crm - 1)) == 0)) {
+ TCGv_i32 temp = tcg_temp_new_i32();
+ crn = ctz32(crm);
+ tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_shri_i32(temp, temp, crn * 4);
+ tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
+ tcg_temp_free_i32(temp);
+ }
+ } else {
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+ for (crn = 0 ; crn < 8 ; crn++) {
+ if (crm & (1 << crn)) {
+ tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
+ tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
+ }
+ }
+ tcg_temp_free_i32(temp);
+ }
+}
+
+/* mtmsr */
+#if defined(TARGET_PPC64)
+static void gen_mtmsrd(DisasContext *ctx)
+{
+ if (unlikely(!is_book3s_arch2x(ctx))) {
+ gen_invalid(ctx);
+ return;
+ }
+
+ CHK_SV;
+
+#if !defined(CONFIG_USER_ONLY)
+ TCGv t0, t1;
+ target_ulong mask;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_icount_io_start(ctx);
+
+ if (ctx->opcode & 0x00010000) {
+ /* L=1 form only updates EE and RI */
+ mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
+ } else {
+ /* mtmsrd does not alter HV, S, ME, or LE */
+ mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
+ (1ULL << MSR_HV));
+ /*
+ * XXX: we need to update nip before the store if we enter
+ * power saving mode, we will exit the loop directly from
+ * ppc_store_msr
+ */
+ gen_update_nip(ctx, ctx->base.pc_next);
+ }
+
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
+ tcg_gen_andi_tl(t1, cpu_msr, ~mask);
+ tcg_gen_or_tl(t0, t0, t1);
+
+ gen_helper_store_msr(cpu_env, t0);
+
+ /* Must stop the translation as machine state (may have) changed */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#endif /* !defined(CONFIG_USER_ONLY) */
+}
+#endif /* defined(TARGET_PPC64) */
+
+static void gen_mtmsr(DisasContext *ctx)
+{
+ CHK_SV;
+
+#if !defined(CONFIG_USER_ONLY)
+ TCGv t0, t1;
+ target_ulong mask = 0xFFFFFFFF;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_icount_io_start(ctx);
+ if (ctx->opcode & 0x00010000) {
+ /* L=1 form only updates EE and RI */
+ mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
+ } else {
+ /* mtmsr does not alter S, ME, or LE */
+ mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
+
+ /*
+ * XXX: we need to update nip before the store if we enter
+ * power saving mode, we will exit the loop directly from
+ * ppc_store_msr
+ */
+ gen_update_nip(ctx, ctx->base.pc_next);
+ }
+
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
+ tcg_gen_andi_tl(t1, cpu_msr, ~mask);
+ tcg_gen_or_tl(t0, t0, t1);
+
+ gen_helper_store_msr(cpu_env, t0);
+
+ /* Must stop the translation as machine state (may have) changed */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#endif
+}
+
+/* mtspr */
+static void gen_mtspr(DisasContext *ctx)
+{
+ void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
+ uint32_t sprn = SPR(ctx->opcode);
+
+#if defined(CONFIG_USER_ONLY)
+ write_cb = ctx->spr_cb[sprn].uea_write;
+#else
+ if (ctx->pr) {
+ write_cb = ctx->spr_cb[sprn].uea_write;
+ } else if (ctx->hv) {
+ write_cb = ctx->spr_cb[sprn].hea_write;
+ } else {
+ write_cb = ctx->spr_cb[sprn].oea_write;
+ }
+#endif
+ if (likely(write_cb != NULL)) {
+ if (likely(write_cb != SPR_NOACCESS)) {
+ (*write_cb)(ctx, sprn, rS(ctx->opcode));
+ } else {
+ /* Privilege exception */
+ qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
+ "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
+ ctx->cia);
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+ } else {
+ /* ISA 2.07 defines these as no-ops */
+ if ((ctx->insns_flags2 & PPC2_ISA207S) &&
+ (sprn >= 808 && sprn <= 811)) {
+ /* This is a nop */
+ return;
+ }
+
+ /* Not defined */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Trying to write invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
+
+
+ /*
+ * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
+ * generate a priv, a hv emu or a no-op
+ */
+ if (sprn & 0x10) {
+ if (ctx->pr) {
+ gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ } else {
+ if (ctx->pr || sprn == 0) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ }
+ }
+}
+
+#if defined(TARGET_PPC64)
+/* setb */
+static void gen_setb(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t8 = tcg_constant_i32(8);
+ TCGv_i32 tm1 = tcg_constant_i32(-1);
+ int crf = crfS(ctx->opcode);
+
+ tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
+ tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
+ tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+
+ tcg_temp_free_i32(t0);
+}
+#endif
+
+/*** Cache management ***/
+
+/* dcbf */
+static void gen_dcbf(DisasContext *ctx)
+{
+ /* XXX: specification says this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbfep (external PID dcbf) */
+static void gen_dcbfep(DisasContext *ctx)
+{
+ /* XXX: specification says this is treated as a load by the MMU */
+ TCGv t0;
+ CHK_SV;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
+ tcg_temp_free(t0);
+}
+
+/* dcbi (Supervisor only) */
+static void gen_dcbi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv EA, val;
+
+ CHK_SV;
+ EA = tcg_temp_new();
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ gen_addr_reg_index(ctx, EA);
+ val = tcg_temp_new();
+ /* XXX: specification says this should be treated as a store by the MMU */
+ gen_qemu_ld8u(ctx, val, EA);
+ gen_qemu_st8(ctx, val, EA);
+ tcg_temp_free(val);
+ tcg_temp_free(EA);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* dcdst */
+static void gen_dcbst(DisasContext *ctx)
+{
+ /* XXX: specification say this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbstep (dcbstep External PID version) */
+static void gen_dcbstep(DisasContext *ctx)
+{
+ /* XXX: specification say this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
+ tcg_temp_free(t0);
+}
+
+/* dcbt */
+static void gen_dcbt(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
+ */
+}
+
+/* dcbtep */
+static void gen_dcbtep(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
+ */
+}
+
+/* dcbtst */
+static void gen_dcbtst(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
+ */
+}
+
+/* dcbtstep */
+static void gen_dcbtstep(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
+ */
+}
+
+/* dcbtls */
+static void gen_dcbtls(DisasContext *ctx)
+{
+ /* Always fails locking the cache */
+ TCGv t0 = tcg_temp_new();
+ gen_load_spr(t0, SPR_Exxx_L1CSR0);
+ tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
+ gen_store_spr(SPR_Exxx_L1CSR0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbz */
+static void gen_dcbz(DisasContext *ctx)
+{
+ TCGv tcgv_addr;
+ TCGv_i32 tcgv_op;
+
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ tcgv_addr = tcg_temp_new();
+ tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ gen_addr_reg_index(ctx, tcgv_addr);
+ gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
+ tcg_temp_free(tcgv_addr);
+ tcg_temp_free_i32(tcgv_op);
+}
+
+/* dcbzep */
+static void gen_dcbzep(DisasContext *ctx)
+{
+ TCGv tcgv_addr;
+ TCGv_i32 tcgv_op;
+
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ tcgv_addr = tcg_temp_new();
+ tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ gen_addr_reg_index(ctx, tcgv_addr);
+ gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
+ tcg_temp_free(tcgv_addr);
+ tcg_temp_free_i32(tcgv_op);
+}
+
+/* dst / dstt */
+static void gen_dst(DisasContext *ctx)
+{
+ if (rA(ctx->opcode) == 0) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ } else {
+ /* interpreted as no-op */
+ }
+}
+
+/* dstst /dststt */
+static void gen_dstst(DisasContext *ctx)
+{
+ if (rA(ctx->opcode) == 0) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ } else {
+ /* interpreted as no-op */
+ }
+
+}
+
+/* dss / dssall */
+static void gen_dss(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* icbi */
+static void gen_icbi(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_icbi(cpu_env, t0);
+ tcg_temp_free(t0);
+}
+
+/* icbiep */
+static void gen_icbiep(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_icbiep(cpu_env, t0);
+ tcg_temp_free(t0);
+}
+
+/* Optional: */
+/* dcba */
+static void gen_dcba(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a store by the MMU
+ * but does not generate any exception
+ */
+}
+
+/*** Segment register manipulation ***/
+/* Supervisor only: */
+
+/* mfsr */
+static void gen_mfsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfsrin */
+static void gen_mfsrin(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsr */
+static void gen_mtsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsrin */
+static void gen_mtsrin(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+ CHK_SV;
+
+ t0 = tcg_temp_new();
+ tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#if defined(TARGET_PPC64)
+/* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
+
+/* mfsr */
+static void gen_mfsr_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfsrin */
+static void gen_mfsrin_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsr */
+static void gen_mtsr_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsrin */
+static void gen_mtsrin_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbmte */
+static void gen_slbmte(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_slbmfee(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_slbmfev(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_slbfee_(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGLabel *l1, *l2;
+
+ if (unlikely(ctx->pr)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0);
+ gen_set_label(l2);
+#endif
+}
+#endif /* defined(TARGET_PPC64) */
+
+/*** Lookaside buffer management ***/
+/* Optional & supervisor only: */
+
+/* tlbia */
+static void gen_tlbia(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_HV;
+
+ gen_helper_tlbia(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbiel */
+static void gen_tlbiel(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ bool psr = (ctx->opcode >> 17) & 0x1;
+
+ if (ctx->pr || (!ctx->hv && !psr && ctx->hr)) {
+ /*
+ * tlbiel is privileged except when PSR=0 and HR=1, making it
+ * hypervisor privileged.
+ */
+ GEN_PRIV;
+ }
+
+ gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbie */
+static void gen_tlbie(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ bool psr = (ctx->opcode >> 17) & 0x1;
+ TCGv_i32 t1;
+
+ if (ctx->pr) {
+ /* tlbie is privileged... */
+ GEN_PRIV;
+ } else if (!ctx->hv) {
+ if (!ctx->gtse || (!psr && ctx->hr)) {
+ /*
+ * ... except when GTSE=0 or when PSR=0 and HR=1, making it
+ * hypervisor privileged.
+ */
+ GEN_PRIV;
+ }
+ }
+
+ if (NARROW_MODE(ctx)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbie(cpu_env, t0);
+ tcg_temp_free(t0);
+ } else {
+ gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ }
+ t1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
+ tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_temp_free_i32(t1);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsync */
+static void gen_tlbsync(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+
+ if (ctx->gtse) {
+ CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */
+ } else {
+ CHK_HV; /* Else hypervisor privileged */
+ }
+
+ /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
+ if (ctx->insns_flags & PPC_BOOKE) {
+ gen_check_tlb_flush(ctx, true);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#if defined(TARGET_PPC64)
+/* slbia */
+static void gen_slbia(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ uint32_t ih = (ctx->opcode >> 21) & 0x7;
+ TCGv_i32 t0 = tcg_const_i32(ih);
+
+ CHK_SV;
+
+ gen_helper_slbia(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbie */
+static void gen_slbie(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbieg */
+static void gen_slbieg(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbsync */
+static void gen_slbsync(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_check_tlb_flush(ctx, true);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#endif /* defined(TARGET_PPC64) */
+
+/*** External control ***/
+/* Optional: */
+
+/* eciwx */
+static void gen_eciwx(DisasContext *ctx)
+{
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
+ DEF_MEMOP(MO_UL | MO_ALIGN));
+ tcg_temp_free(t0);
+}
+
+/* ecowx */
+static void gen_ecowx(DisasContext *ctx)
+{
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
+ DEF_MEMOP(MO_UL | MO_ALIGN));
+ tcg_temp_free(t0);
+}
+
+/* PowerPC 601 specific instructions */
+
+/* abs - abs. */
+static void gen_abs(DisasContext *ctx)
+{
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_abs_tl(d, a);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, d);
+ }
+}
+
+/* abso - abso. */
+static void gen_abso(DisasContext *ctx)
+{
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_ov, a, 0x80000000);
+ tcg_gen_abs_tl(d, a);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, d);
+ }
+}
+
+/* clcs */
+static void gen_clcs(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode));
+ gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ /* Rc=1 sets CR0 to an undefined state */
+}
+
+/* div - div. */
+static void gen_div(DisasContext *ctx)
+{
+ gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* divo - divo. */
+static void gen_divo(DisasContext *ctx)
+{
+ gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* divs - divs. */
+static void gen_divs(DisasContext *ctx)
+{
+ gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* divso - divso. */
+static void gen_divso(DisasContext *ctx)
+{
+ gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* doz - doz. */
+static void gen_doz(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* dozo - dozo. */
+static void gen_dozo(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* dozi */
+static void gen_dozi(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1);
+ tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* lscbx - lscbx. */
+static void gen_lscbx(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
+ TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
+
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
+ tcg_gen_or_tl(cpu_xer, cpu_xer, t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t0);
+ }
+ tcg_temp_free(t0);
+}
+
+/* maskg - maskg. */
+static void gen_maskg(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_movi_tl(t3, 0xFFFFFFFF);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F);
+ tcg_gen_addi_tl(t2, t0, 1);
+ tcg_gen_shr_tl(t2, t3, t2);
+ tcg_gen_shr_tl(t3, t3, t1);
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3);
+ tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
+ tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* maskir - maskir. */
+static void gen_maskir(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_and_tl(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* mul - mul. */
+static void gen_mul(DisasContext *ctx)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulo - mulo. */
+static void gen_mulo(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_gen_ext32s_i64(t1, t0);
+ tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ gen_set_label(l1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* nabs - nabs. */
+static void gen_nabs(DisasContext *ctx)
+{
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_abs_tl(d, a);
+ tcg_gen_neg_tl(d, d);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, d);
+ }
+}
+
+/* nabso - nabso. */
+static void gen_nabso(DisasContext *ctx)
+{
+ TCGv d = cpu_gpr[rD(ctx->opcode)];
+ TCGv a = cpu_gpr[rA(ctx->opcode)];
+
+ tcg_gen_abs_tl(d, a);
+ tcg_gen_neg_tl(d, d);
+ /* nabs never overflows */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, d);
+ }
+}
+
+/* rlmi - rlmi. */
+static void gen_rlmi(DisasContext *ctx)
+{
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_andi_tl(t0, t0, MASK(mb, me));
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ ~MASK(mb, me));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* rrib - rrib. */
+static void gen_rrib(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0x80000000);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sle - sle. */
+static void gen_sle(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sleq - sleq. */
+static void gen_sleq(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t2, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t2, t2, t0);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sliq - sliq. */
+static void gen_sliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shri_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* slliq - slliq. */
+static void gen_slliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU << sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU << sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sllq - sllq. */
+static void gen_sllq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t1, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ gen_load_spr(t2, SPR_MQ);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* slq - slq. */
+static void gen_slq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sraiq - sraiq. */
+static void gen_sraiq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t0, t0, t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
+ tcg_gen_movi_tl(cpu_ca, 1);
+ gen_set_label(l1);
+ tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sraq - sraq. */
+static void gen_sraq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_subfi_tl(t2, 32, t2);
+ tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1);
+ tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2);
+ tcg_gen_movi_tl(cpu_ca, 1);
+ gen_set_label(l2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sre - sre. */
+static void gen_sre(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* srea - srea. */
+static void gen_srea(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sreq */
+static void gen_sreq(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t2, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t2, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* sriq */
+static void gen_sriq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* srliq */
+static void gen_srliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU >> sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU >> sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* srlq */
+static void gen_srlq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t2, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ gen_load_spr(t1, SPR_MQ);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* srq */
+static void gen_srq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* PowerPC 602 specific instructions */
+
+/* dsa */
+static void gen_dsa(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* esa */
+static void gen_esa(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* mfrom */
+static void gen_mfrom(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* 602 - 603 - G2 TLB management */
+
+/* tlbld */
+static void gen_tlbld_6xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbli */
+static void gen_tlbli_6xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* 74xx TLB management */
+
+/* tlbld */
+static void gen_tlbld_74xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_74xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbli */
+static void gen_tlbli_74xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_74xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* POWER instructions not in PowerPC 601 */
+
+/* clf */
+static void gen_clf(DisasContext *ctx)
+{
+ /* Cache line flush: implemented as no-op */
+}
+
+/* cli */
+static void gen_cli(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Cache line invalidate: privileged and treated as no-op */
+ CHK_SV;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* dclst */
+static void gen_dclst(DisasContext *ctx)
+{
+ /* Data cache line store: treated as no-op */
+}
+
+static void gen_mfsri(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_extract_tl(t0, t0, 28, 4);
+ gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0);
+ tcg_temp_free(t0);
+ if (ra != 0 && ra != rd) {
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rac(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_rac(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rfsvc(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_rfsvc(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* svc is not implemented for now */
+
+/* BookE specific instructions */
+
+/* XXX: not implemented on 440 ? */
+static void gen_mfapidi(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* XXX: not implemented on 440 ? */
+static void gen_tlbiva(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* All 405 MAC instructions are translated here */
+static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
+ int ra, int rb, int rt, int Rc)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+
+ switch (opc3 & 0x0D) {
+ case 0x05:
+ /* macchw - macchw. - macchwo - macchwo. */
+ /* macchws - macchws. - macchwso - macchwso. */
+ /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
+ /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
+ /* mulchw - mulchw. */
+ tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16s_tl(t1, t1);
+ break;
+ case 0x04:
+ /* macchwu - macchwu. - macchwuo - macchwuo. */
+ /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
+ /* mulchwu - mulchwu. */
+ tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+ tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16u_tl(t1, t1);
+ break;
+ case 0x01:
+ /* machhw - machhw. - machhwo - machhwo. */
+ /* machhws - machhws. - machhwso - machhwso. */
+ /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
+ /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
+ /* mulhhw - mulhhw. */
+ tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
+ tcg_gen_ext16s_tl(t0, t0);
+ tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16s_tl(t1, t1);
+ break;
+ case 0x00:
+ /* machhwu - machhwu. - machhwuo - machhwuo. */
+ /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
+ /* mulhhwu - mulhhwu. */
+ tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
+ tcg_gen_ext16u_tl(t0, t0);
+ tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16u_tl(t1, t1);
+ break;
+ case 0x0D:
+ /* maclhw - maclhw. - maclhwo - maclhwo. */
+ /* maclhws - maclhws. - maclhwso - maclhwso. */
+ /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
+ /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
+ /* mullhw - mullhw. */
+ tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+ tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
+ break;
+ case 0x0C:
+ /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
+ /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
+ /* mullhwu - mullhwu. */
+ tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+ tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
+ break;
+ }
+ if (opc2 & 0x04) {
+ /* (n)multiply-and-accumulate (0x0C / 0x0E) */
+ tcg_gen_mul_tl(t1, t0, t1);
+ if (opc2 & 0x02) {
+ /* nmultiply-and-accumulate (0x0E) */
+ tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
+ } else {
+ /* multiply-and-accumulate (0x0C) */
+ tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
+ }
+
+ if (opc3 & 0x12) {
+ /* Check overflow and/or saturate */
+ TCGLabel *l1 = gen_new_label();
+
+ if (opc3 & 0x10) {
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ }
+ if (opc3 & 0x01) {
+ /* Signed */
+ tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
+ if (opc3 & 0x02) {
+ /* Saturate */
+ tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
+ tcg_gen_xori_tl(t0, t0, 0x7fffffff);
+ }
+ } else {
+ /* Unsigned */
+ tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+ if (opc3 & 0x02) {
+ /* Saturate */
+ tcg_gen_movi_tl(t0, UINT32_MAX);
+ }
+ }
+ if (opc3 & 0x10) {
+ /* Check overflow */
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ }
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rt], t0);
+ }
+ } else {
+ tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc) != 0) {
+ /* Update Rc0 */
+ gen_set_Rc0(ctx, cpu_gpr[rt]);
+ }
+}
+
+#define GEN_MAC_HANDLER(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \
+ rD(ctx->opcode), Rc(ctx->opcode)); \
+}
+
+/* macchw - macchw. */
+GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
+/* macchwo - macchwo. */
+GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
+/* macchws - macchws. */
+GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
+/* macchwso - macchwso. */
+GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
+/* macchwsu - macchwsu. */
+GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
+/* macchwsuo - macchwsuo. */
+GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
+/* macchwu - macchwu. */
+GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
+/* macchwuo - macchwuo. */
+GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
+/* machhw - machhw. */
+GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
+/* machhwo - machhwo. */
+GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
+/* machhws - machhws. */
+GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
+/* machhwso - machhwso. */
+GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
+/* machhwsu - machhwsu. */
+GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
+/* machhwsuo - machhwsuo. */
+GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
+/* machhwu - machhwu. */
+GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
+/* machhwuo - machhwuo. */
+GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
+/* maclhw - maclhw. */
+GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
+/* maclhwo - maclhwo. */
+GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
+/* maclhws - maclhws. */
+GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
+/* maclhwso - maclhwso. */
+GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
+/* maclhwu - maclhwu. */
+GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
+/* maclhwuo - maclhwuo. */
+GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
+/* maclhwsu - maclhwsu. */
+GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
+/* maclhwsuo - maclhwsuo. */
+GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
+/* nmacchw - nmacchw. */
+GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
+/* nmacchwo - nmacchwo. */
+GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
+/* nmacchws - nmacchws. */
+GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
+/* nmacchwso - nmacchwso. */
+GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
+/* nmachhw - nmachhw. */
+GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
+/* nmachhwo - nmachhwo. */
+GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
+/* nmachhws - nmachhws. */
+GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
+/* nmachhwso - nmachhwso. */
+GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
+/* nmaclhw - nmaclhw. */
+GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
+/* nmaclhwo - nmaclhwo. */
+GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
+/* nmaclhws - nmaclhws. */
+GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
+/* nmaclhwso - nmaclhwso. */
+GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
+
+/* mulchw - mulchw. */
+GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
+/* mulchwu - mulchwu. */
+GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
+/* mulhhw - mulhhw. */
+GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
+/* mulhhwu - mulhhwu. */
+GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
+/* mullhw - mullhw. */
+GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
+/* mullhwu - mullhwu. */
+GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
+
+/* mfdcr */
+static void gen_mfdcr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv dcrn;
+
+ CHK_SV;
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
+ tcg_temp_free(dcrn);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtdcr */
+static void gen_mtdcr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv dcrn;
+
+ CHK_SV;
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(dcrn);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfdcrx */
+/* XXX: not implemented on 440 ? */
+static void gen_mfdcrx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtdcrx */
+/* XXX: not implemented on 440 ? */
+static void gen_mtdcrx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfdcrux (PPC 460) : user-mode access to DCR */
+static void gen_mfdcrux(DisasContext *ctx)
+{
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+}
+
+/* mtdcrux (PPC 460) : user-mode access to DCR */
+static void gen_mtdcrux(DisasContext *ctx)
+{
+ gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+}
+
+/* dccci */
+static void gen_dccci(DisasContext *ctx)
+{
+ CHK_SV;
+ /* interpreted as no-op */
+}
+
+/* dcread */
+static void gen_dcread(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv EA, val;
+
+ CHK_SV;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ val = tcg_temp_new();
+ gen_qemu_ld32u(ctx, val, EA);
+ tcg_temp_free(val);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* icbt */
+static void gen_icbt_40x(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
+ */
+}
+
+/* iccci */
+static void gen_iccci(DisasContext *ctx)
+{
+ CHK_SV;
+ /* interpreted as no-op */
+}
+
+/* icread */
+static void gen_icread(DisasContext *ctx)
+{
+ CHK_SV;
+ /* interpreted as no-op */
+}
+
+/* rfci (supervisor only) */
+static void gen_rfci_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_40x_rfci(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rfci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_rfci(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* BookE specific */
+
+/* XXX: not implemented on 440 ? */
+static void gen_rfdi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_rfdi(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* XXX: not implemented on 440 ? */
+static void gen_rfmci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_rfmci(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* TLB management - PowerPC 405 implementation */
+
+/* tlbre */
+static void gen_tlbre_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ switch (rB(ctx->opcode)) {
+ case 0:
+ gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ break;
+ case 1:
+ gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbwe */
+static void gen_tlbwe_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ switch (rB(ctx->opcode)) {
+ case 0:
+ gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ break;
+ case 1:
+ gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* TLB management - PowerPC 440 implementation */
+
+/* tlbre */
+static void gen_tlbre_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ switch (rB(ctx->opcode)) {
+ case 0:
+ case 1:
+ case 2:
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbwe */
+static void gen_tlbwe_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ switch (rB(ctx->opcode)) {
+ case 0:
+ case 1:
+ case 2:
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* TLB management - PowerPC BookE 2.06 implementation */
+
+/* tlbre */
+static void gen_tlbre_booke206(DisasContext *ctx)
+{
+ #if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_booke206_tlbre(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ if (rA(ctx->opcode)) {
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ t0 = tcg_const_tl(0);
+ }
+
+ tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_booke206_tlbsx(cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbwe */
+static void gen_tlbwe_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_booke206_tlbwe(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_tlbivax_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_booke206_tlbivax(cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_tlbilx_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+
+ switch ((ctx->opcode >> 21) & 0x3) {
+ case 0:
+ gen_helper_booke206_tlbilx0(cpu_env, t0);
+ break;
+ case 1:
+ gen_helper_booke206_tlbilx1(cpu_env, t0);
+ break;
+ case 3:
+ gen_helper_booke206_tlbilx3(cpu_env, t0);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+
+/* wrtee */
+static void gen_wrtee(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ /*
+ * Stop translation to have a chance to raise an exception if we
+ * just set msr_ee to 1
+ */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* wrteei */
+static void gen_wrteei(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ if (ctx->opcode & 0x00008000) {
+ tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
+ /* Stop translation to have a chance to raise an exception */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+ } else {
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* PowerPC 440 specific instructions */
+
+/* dlmzb */
+static void gen_dlmzb(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
+ gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* mbar replaces eieio on 440 */
+static void gen_mbar(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* msync replaces sync on 440 */
+static void gen_msync_4xx(DisasContext *ctx)
+{
+ /* Only e500 seems to treat reserved bits as invalid */
+ if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
+ (ctx->opcode & 0x03FFF801)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ }
+ /* otherwise interpreted as no-op */
+}
+
+/* icbt */
+static void gen_icbt_440(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ * XXX: specification say this is treated as a load by the MMU but
+ * does not generate any exception
+ */
+}
+
+/* Embedded.Processor Control */
+
+static void gen_msgclr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_HV;
+ if (is_book3s_arch2x(ctx)) {
+ gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ } else {
+ gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_msgsnd(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_HV;
+ if (is_book3s_arch2x(ctx)) {
+ gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]);
+ } else {
+ gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#if defined(TARGET_PPC64)
+static void gen_msgclrp(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_msgsndp(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+#endif
+
+static void gen_msgsync(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_HV;
+#endif /* defined(CONFIG_USER_ONLY) */
+ /* interpreted as no-op */
+}
+
+#if defined(TARGET_PPC64)
+static void gen_maddld(DisasContext *ctx)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
+ tcg_temp_free_i64(t1);
+}
+
+/* maddhd maddhdu */
+static void gen_maddhd_maddhdu(DisasContext *ctx)
+{
+ TCGv_i64 lo = tcg_temp_new_i64();
+ TCGv_i64 hi = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ if (Rc(ctx->opcode)) {
+ tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_movi_i64(t1, 0);
+ } else {
+ tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
+ }
+ tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
+ cpu_gpr[rC(ctx->opcode)], t1);
+ tcg_temp_free_i64(lo);
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(t1);
+}
+#endif /* defined(TARGET_PPC64) */
+
+static void gen_tbegin(DisasContext *ctx)
+{
+ if (unlikely(!ctx->tm_enabled)) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
+ return;
+ }
+ gen_helper_tbegin(cpu_env);
+}
+
+#define GEN_TM_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->tm_enabled)) { \
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
+ return; \
+ } \
+ /* \
+ * Because tbegin always fails in QEMU, these user \
+ * space instructions all have a simple implementation: \
+ * \
+ * CR[0] = 0b0 || MSR[TS] || 0b0 \
+ * = 0b0 || 0b00 || 0b0 \
+ */ \
+ tcg_gen_movi_i32(cpu_crf[0], 0); \
+}
+
+GEN_TM_NOOP(tend);
+GEN_TM_NOOP(tabort);
+GEN_TM_NOOP(tabortwc);
+GEN_TM_NOOP(tabortwci);
+GEN_TM_NOOP(tabortdc);
+GEN_TM_NOOP(tabortdci);
+GEN_TM_NOOP(tsr);
+
+static inline void gen_cp_abort(DisasContext *ctx)
+{
+ /* Do Nothing */
+}
+
+#define GEN_CP_PASTE_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ /* \
+ * Generate invalid exception until we have an \
+ * implementation of the copy paste facility \
+ */ \
+ gen_invalid(ctx); \
+}
+
+GEN_CP_PASTE_NOOP(copy)
+GEN_CP_PASTE_NOOP(paste)
+
+static void gen_tcheck(DisasContext *ctx)
+{
+ if (unlikely(!ctx->tm_enabled)) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
+ return;
+ }
+ /*
+ * Because tbegin always fails, the tcheck implementation is
+ * simple:
+ *
+ * CR[CRF] = TDOOMED || MSR[TS] || 0b0
+ * = 0b1 || 0b00 || 0b0
+ */
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
+}
+
+#if defined(CONFIG_USER_ONLY)
+#define GEN_TM_PRIV_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \
+}
+
+#else
+
+#define GEN_TM_PRIV_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ CHK_SV; \
+ if (unlikely(!ctx->tm_enabled)) { \
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
+ return; \
+ } \
+ /* \
+ * Because tbegin always fails, the implementation is \
+ * simple: \
+ * \
+ * CR[0] = 0b0 || MSR[TS] || 0b0 \
+ * = 0b0 || 0b00 | 0b0 \
+ */ \
+ tcg_gen_movi_i32(cpu_crf[0], 0); \
+}
+
+#endif
+
+GEN_TM_PRIV_NOOP(treclaim);
+GEN_TM_PRIV_NOOP(trechkpt);
+
+static inline void get_fpr(TCGv_i64 dst, int regno)
+{
+ tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
+}
+
+static inline void set_fpr(int regno, TCGv_i64 src)
+{
+ tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
+}
+
+static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
+{
+ tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
+}
+
+static inline void set_avr64(int regno, TCGv_i64 src, bool high)
+{
+ tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
+}
+
+/*
+ * Helpers for decodetree used by !function for decoding arguments.
+ */
+static int times_2(DisasContext *ctx, int x)
+{
+ return x * 2;
+}
+
+static int times_4(DisasContext *ctx, int x)
+{
+ return x * 4;
+}
+
+static int times_16(DisasContext *ctx, int x)
+{
+ return x * 16;
+}
+
+/*
+ * Helpers for trans_* functions to check for specific insns flags.
+ * Use token pasting to ensure that we use the proper flag with the
+ * proper variable.
+ */
+#define REQUIRE_INSNS_FLAGS(CTX, NAME) \
+ do { \
+ if (((CTX)->insns_flags & PPC_##NAME) == 0) { \
+ return false; \
+ } \
+ } while (0)
+
+#define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
+ do { \
+ if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
+ return false; \
+ } \
+ } while (0)
+
+/* Then special-case the check for 64-bit so that we elide code for ppc32. */
+#if TARGET_LONG_BITS == 32
+# define REQUIRE_64BIT(CTX) return false
+#else
+# define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B)
+#endif
+
+#define REQUIRE_VECTOR(CTX) \
+ do { \
+ if (unlikely(!(CTX)->altivec_enabled)) { \
+ gen_exception((CTX), POWERPC_EXCP_VPU); \
+ return true; \
+ } \
+ } while (0)
+
+#define REQUIRE_VSX(CTX) \
+ do { \
+ if (unlikely(!(CTX)->vsx_enabled)) { \
+ gen_exception((CTX), POWERPC_EXCP_VSXU); \
+ return true; \
+ } \
+ } while (0)
+
+#define REQUIRE_FPU(ctx) \
+ do { \
+ if (unlikely(!(ctx)->fpu_enabled)) { \
+ gen_exception((ctx), POWERPC_EXCP_FPU); \
+ return true; \
+ } \
+ } while (0)
+
+/*
+ * Helpers for implementing sets of trans_* functions.
+ * Defer the implementation of NAME to FUNC, with optional extra arguments.
+ */
+#define TRANS(NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { return FUNC(ctx, a, __VA_ARGS__); }
+
+#define TRANS64(NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
+
+/* TODO: More TRANS* helpers for extra insn_flags checks. */
+
+
+#include "decode-insn32.c.inc"
+#include "decode-insn64.c.inc"
+#include "power8-pmu-regs.c.inc"
+
+/*
+ * Incorporate CIA into the constant when R=1.
+ * Validate that when R=1, RA=0.
+ */
+static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
+{
+ d->rt = a->rt;
+ d->ra = a->ra;
+ d->si = a->si;
+ if (a->r) {
+ if (unlikely(a->ra != 0)) {
+ gen_invalid(ctx);
+ return false;
+ }
+ d->si += ctx->cia;
+ }
+ return true;
+}
+
+#include "translate/fixedpoint-impl.c.inc"
+
+#include "translate/fp-impl.c.inc"
+
+#include "translate/vmx-impl.c.inc"
+
+#include "translate/vsx-impl.c.inc"
+
+#include "translate/dfp-impl.c.inc"
+
+#include "translate/spe-impl.c.inc"
+
+/* Handles lfdp, lxsd, lxssp */
+static void gen_dform39(DisasContext *ctx)
+{
+ switch (ctx->opcode & 0x3) {
+ case 0: /* lfdp */
+ if (ctx->insns_flags2 & PPC2_ISA205) {
+ return gen_lfdp(ctx);
+ }
+ break;
+ case 2: /* lxsd */
+ if (ctx->insns_flags2 & PPC2_ISA300) {
+ return gen_lxsd(ctx);
+ }
+ break;
+ case 3: /* lxssp */
+ if (ctx->insns_flags2 & PPC2_ISA300) {
+ return gen_lxssp(ctx);
+ }
+ break;
+ }
+ return gen_invalid(ctx);
+}
+
+/* handles stfdp, lxv, stxsd, stxssp lxvx */
+static void gen_dform3D(DisasContext *ctx)
+{
+ if ((ctx->opcode & 3) != 1) { /* DS-FORM */
+ switch (ctx->opcode & 0x3) {
+ case 0: /* stfdp */
+ if (ctx->insns_flags2 & PPC2_ISA205) {
+ return gen_stfdp(ctx);
+ }
+ break;
+ case 2: /* stxsd */
+ if (ctx->insns_flags2 & PPC2_ISA300) {
+ return gen_stxsd(ctx);
+ }
+ break;
+ case 3: /* stxssp */
+ if (ctx->insns_flags2 & PPC2_ISA300) {
+ return gen_stxssp(ctx);
+ }
+ break;
+ }
+ }
+ return gen_invalid(ctx);
+}
+
+#if defined(TARGET_PPC64)
+/* brd */
+static void gen_brd(DisasContext *ctx)
+{
+ tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+
+/* brw */
+static void gen_brw(DisasContext *ctx)
+{
+ tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
+
+}
+
+/* brh */
+static void gen_brh(DisasContext *ctx)
+{
+ TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
+ tcg_gen_and_i64(t2, t1, mask);
+ tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
+ tcg_gen_shli_i64(t1, t1, 8);
+ tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+#endif
+
+static opcode_t opcodes[] = {
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
+GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
+GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
+#endif
+GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
+#endif
+GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
+GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
+GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
+GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
+GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
+GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
+GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
+GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
+GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
+GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
+#endif
+GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
+GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
+GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
+GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
+GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
+GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
+GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
+#endif
+/* handles lfdp, lxsd, lxssp */
+GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
+/* handles stfdp, stxsd, stxssp */
+GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
+GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
+GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
+GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
+GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
+GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
+GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
+GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
+GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
+GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
+GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
+#endif
+GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
+GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT),
+GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
+GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
+GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
+GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
+#if !defined(CONFIG_USER_ONLY)
+/* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
+GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
+#endif
+GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
+#endif
+/* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
+GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
+GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
+GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
+GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
+GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
+GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
+GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
+GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
+GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
+GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
+GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
+#endif
+GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
+GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
+GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
+GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
+GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
+GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
+GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
+GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
+GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
+GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
+GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
+GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
+GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
+GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
+GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
+GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
+GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
+#if defined(TARGET_PPC64)
+GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
+GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
+ PPC_SEGMENT_64B),
+GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
+GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
+ PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B),
+#endif
+GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
+/*
+ * XXX Those instructions will need to be handled differently for
+ * different ISA versions
+ */
+GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE),
+GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE),
+GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI),
+GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI),
+GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
+#endif
+GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
+GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
+GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC),
+GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC),
+GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC),
+GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
+GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
+GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB),
+GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB),
+GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER),
+GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER),
+GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER),
+GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER),
+GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER),
+GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER),
+GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2),
+GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2),
+GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2),
+GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2),
+GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
+GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
+GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
+GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
+GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
+GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
+GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX),
+GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX),
+GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
+GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
+GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
+GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
+GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
+GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
+GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
+GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
+GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
+GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
+GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
+GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
+GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
+GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
+GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001,
+ PPC_NONE, PPC2_PRCNTL),
+GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001,
+ PPC_NONE, PPC2_PRCNTL),
+GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_PRCNTL),
+GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
+GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
+GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
+GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
+ PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
+GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
+ PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
+ PPC_440_SPEC),
+GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
+GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
+GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
+GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
+GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
+ PPC2_ISA300),
+GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER2_E(msgsndp, "msgsndp", 0x1F, 0x0E, 0x04, 0x03ff0001,
+ PPC_NONE, PPC2_ISA207S),
+GEN_HANDLER2_E(msgclrp, "msgclrp", 0x1F, 0x0E, 0x05, 0x03ff0001,
+ PPC_NONE, PPC2_ISA207S),
+#endif
+
+#undef GEN_INT_ARITH_ADD
+#undef GEN_INT_ARITH_ADD_CONST
+#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
+GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
+GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
+GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
+GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
+
+#undef GEN_INT_ARITH_DIVW
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
+GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
+
+#if defined(TARGET_PPC64)
+#undef GEN_INT_ARITH_DIVD
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
+
+GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
+
+#undef GEN_INT_ARITH_MUL_HELPER
+#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
+GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
+GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
+GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
+#endif
+
+#undef GEN_INT_ARITH_SUBF
+#undef GEN_INT_ARITH_SUBF_CONST
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+
+#undef GEN_LOGICAL1
+#undef GEN_LOGICAL2
+#define GEN_LOGICAL2(name, tcg_op, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
+#define GEN_LOGICAL1(name, tcg_op, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
+#endif
+
+#if defined(TARGET_PPC64)
+#undef GEN_PPC64_R2
+#undef GEN_PPC64_R4
+#define GEN_PPC64_R2(name, opc1, opc2) \
+GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
+GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
+ PPC_64B)
+#define GEN_PPC64_R4(name, opc1, opc2) \
+GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
+GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \
+ PPC_64B), \
+GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
+ PPC_64B), \
+GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \
+ PPC_64B)
+GEN_PPC64_R4(rldicl, 0x1E, 0x00),
+GEN_PPC64_R4(rldicr, 0x1E, 0x02),
+GEN_PPC64_R4(rldic, 0x1E, 0x04),
+GEN_PPC64_R2(rldcl, 0x1E, 0x08),
+GEN_PPC64_R2(rldcr, 0x1E, 0x09),
+GEN_PPC64_R4(rldimi, 0x1E, 0x06),
+#endif
+
+#undef GEN_LDX_E
+#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
+GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
+
+#if defined(TARGET_PPC64)
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
+
+/* HV/P7 and later only */
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
+GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
+GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
+#endif
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
+
+/* External PID based load */
+#undef GEN_LDEPX
+#define GEN_LDEPX(name, ldop, opc2, opc3) \
+GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
+ 0x00000001, PPC_NONE, PPC2_BOOKE206),
+
+GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
+GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
+GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
+#if defined(TARGET_PPC64)
+GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+#endif
+
+#undef GEN_STX_E
+#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
+GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
+
+#if defined(TARGET_PPC64)
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
+GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
+GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
+GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
+#endif
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
+
+#undef GEN_STEPX
+#define GEN_STEPX(name, ldop, opc2, opc3) \
+GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
+ 0x00000001, PPC_NONE, PPC2_BOOKE206),
+
+GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
+GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
+GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
+#if defined(TARGET_PPC64)
+GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04)
+#endif
+
+#undef GEN_CRLOGIC
+#define GEN_CRLOGIC(name, tcg_op, opc) \
+GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
+
+#undef GEN_MAC_HANDLER
+#define GEN_MAC_HANDLER(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
+GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
+GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
+GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
+GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
+GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
+GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
+GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
+GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
+GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
+GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
+GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
+GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
+GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
+GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
+GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
+GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
+GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
+GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
+GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
+GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
+GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
+GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
+GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
+GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
+GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
+GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
+GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
+GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
+GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
+GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
+GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
+GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
+GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
+GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
+GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
+GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
+GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
+GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
+GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
+GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
+GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
+GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
+
+GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
+ PPC_NONE, PPC2_TM),
+
+#include "translate/fp-ops.c.inc"
+
+#include "translate/vmx-ops.c.inc"
+
+#include "translate/vsx-ops.c.inc"
+
+#include "translate/spe-ops.c.inc"
+};
+
+/*****************************************************************************/
+/* Opcode types */
+enum {
+ PPC_DIRECT = 0, /* Opcode routine */
+ PPC_INDIRECT = 1, /* Indirect opcode table */
+};
+
+#define PPC_OPCODE_MASK 0x3
+
+static inline int is_indirect_opcode(void *handler)
+{
+ return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
+}
+
+static inline opc_handler_t **ind_table(void *handler)
+{
+ return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
+}
+
+/* Instruction table creation */
+/* Opcodes tables creation */
+static void fill_new_table(opc_handler_t **table, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ table[i] = &invalid_handler;
+ }
+}
+
+static int create_new_table(opc_handler_t **table, unsigned char idx)
+{
+ opc_handler_t **tmp;
+
+ tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
+ fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
+ table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
+
+ return 0;
+}
+
+static int insert_in_table(opc_handler_t **table, unsigned char idx,
+ opc_handler_t *handler)
+{
+ if (table[idx] != &invalid_handler) {
+ return -1;
+ }
+ table[idx] = handler;
+
+ return 0;
+}
+
+static int register_direct_insn(opc_handler_t **ppc_opcodes,
+ unsigned char idx, opc_handler_t *handler)
+{
+ if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
+ printf("*** ERROR: opcode %02x already assigned in main "
+ "opcode table\n", idx);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_ind_in_table(opc_handler_t **table,
+ unsigned char idx1, unsigned char idx2,
+ opc_handler_t *handler)
+{
+ if (table[idx1] == &invalid_handler) {
+ if (create_new_table(table, idx1) < 0) {
+ printf("*** ERROR: unable to create indirect table "
+ "idx=%02x\n", idx1);
+ return -1;
+ }
+ } else {
+ if (!is_indirect_opcode(table[idx1])) {
+ printf("*** ERROR: idx %02x already assigned to a direct "
+ "opcode\n", idx1);
+ return -1;
+ }
+ }
+ if (handler != NULL &&
+ insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
+ printf("*** ERROR: opcode %02x already assigned in "
+ "opcode table %02x\n", idx2, idx1);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_ind_insn(opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ opc_handler_t *handler)
+{
+ return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
+}
+
+static int register_dblind_insn(opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ unsigned char idx3, opc_handler_t *handler)
+{
+ if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
+ printf("*** ERROR: unable to join indirect table idx "
+ "[%02x-%02x]\n", idx1, idx2);
+ return -1;
+ }
+ if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
+ handler) < 0) {
+ printf("*** ERROR: unable to insert opcode "
+ "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_trplind_insn(opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ unsigned char idx3, unsigned char idx4,
+ opc_handler_t *handler)
+{
+ opc_handler_t **table;
+
+ if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
+ printf("*** ERROR: unable to join indirect table idx "
+ "[%02x-%02x]\n", idx1, idx2);
+ return -1;
+ }
+ table = ind_table(ppc_opcodes[idx1]);
+ if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
+ printf("*** ERROR: unable to join 2nd-level indirect table idx "
+ "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
+ return -1;
+ }
+ table = ind_table(table[idx2]);
+ if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
+ printf("*** ERROR: unable to insert opcode "
+ "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
+ return -1;
+ }
+ return 0;
+}
+static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
+{
+ if (insn->opc2 != 0xFF) {
+ if (insn->opc3 != 0xFF) {
+ if (insn->opc4 != 0xFF) {
+ if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, insn->opc4,
+ &insn->handler) < 0) {
+ return -1;
+ }
+ } else {
+ if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, &insn->handler) < 0) {
+ return -1;
+ }
+ }
+ } else {
+ if (register_ind_insn(ppc_opcodes, insn->opc1,
+ insn->opc2, &insn->handler) < 0) {
+ return -1;
+ }
+ }
+ } else {
+ if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int test_opcode_table(opc_handler_t **table, int len)
+{
+ int i, count, tmp;
+
+ for (i = 0, count = 0; i < len; i++) {
+ /* Consistency fixup */
+ if (table[i] == NULL) {
+ table[i] = &invalid_handler;
+ }
+ if (table[i] != &invalid_handler) {
+ if (is_indirect_opcode(table[i])) {
+ tmp = test_opcode_table(ind_table(table[i]),
+ PPC_CPU_INDIRECT_OPCODES_LEN);
+ if (tmp == 0) {
+ free(table[i]);
+ table[i] = &invalid_handler;
+ } else {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
+{
+ if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
+ printf("*** WARNING: no opcode defined !\n");
+ }
+}
+
+/*****************************************************************************/
+void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ opcode_t *opc;
+
+ fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
+ for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
+ if (((opc->handler.type & pcc->insns_flags) != 0) ||
+ ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
+ if (register_insn(cpu->opcodes, opc) < 0) {
+ error_setg(errp, "ERROR initializing PowerPC instruction "
+ "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
+ opc->opc3);
+ return;
+ }
+ }
+ }
+ fix_opcode_tables(cpu->opcodes);
+ fflush(stdout);
+ fflush(stderr);
+}
+
+void destroy_ppc_opcodes(PowerPCCPU *cpu)
+{
+ opc_handler_t **table, **table_2;
+ int i, j, k;
+
+ for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
+ if (cpu->opcodes[i] == &invalid_handler) {
+ continue;
+ }
+ if (is_indirect_opcode(cpu->opcodes[i])) {
+ table = ind_table(cpu->opcodes[i]);
+ for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
+ if (table[j] == &invalid_handler) {
+ continue;
+ }
+ if (is_indirect_opcode(table[j])) {
+ table_2 = ind_table(table[j]);
+ for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
+ if (table_2[k] != &invalid_handler &&
+ is_indirect_opcode(table_2[k])) {
+ g_free((opc_handler_t *)((uintptr_t)table_2[k] &
+ ~PPC_INDIRECT));
+ }
+ }
+ g_free((opc_handler_t *)((uintptr_t)table[j] &
+ ~PPC_INDIRECT));
+ }
+ }
+ g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
+ ~PPC_INDIRECT));
+ }
+ }
+}
+
+int ppc_fixup_cpu(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ /*
+ * TCG doesn't (yet) emulate some groups of instructions that are
+ * implemented on some otherwise supported CPUs (e.g. VSX and
+ * decimal floating point instructions on POWER7). We remove
+ * unsupported instruction groups from the cpu state's instruction
+ * masks and hope the guest can cope. For at least the pseries
+ * machine, the unavailability of these instructions can be
+ * advertised to the guest via the device tree.
+ */
+ if ((env->insns_flags & ~PPC_TCG_INSNS)
+ || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
+ warn_report("Disabling some instructions which are not "
+ "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
+ env->insns_flags & ~PPC_TCG_INSNS,
+ env->insns_flags2 & ~PPC_TCG_INSNS2);
+ }
+ env->insns_flags &= PPC_TCG_INSNS;
+ env->insns_flags2 &= PPC_TCG_INSNS2;
+ return 0;
+}
+
+static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
+{
+ opc_handler_t **table, *handler;
+ uint32_t inval;
+
+ ctx->opcode = insn;
+
+ LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
+ insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
+ ctx->le_mode ? "little" : "big");
+
+ table = cpu->opcodes;
+ handler = table[opc1(insn)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc2(insn)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc3(insn)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc4(insn)];
+ }
+ }
+ }
+
+ /* Is opcode *REALLY* valid ? */
+ if (unlikely(handler->handler == &gen_invalid)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
+ "%02x - %02x - %02x - %02x (%08x) "
+ TARGET_FMT_lx "\n",
+ opc1(insn), opc2(insn), opc3(insn), opc4(insn),
+ insn, ctx->cia);
+ return false;
+ }
+
+ if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
+ && Rc(insn))) {
+ inval = handler->inval2;
+ } else {
+ inval = handler->inval1;
+ }
+
+ if (unlikely((insn & inval) != 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
+ "%02x - %02x - %02x - %02x (%08x) "
+ TARGET_FMT_lx "\n", insn & inval,
+ opc1(insn), opc2(insn), opc3(insn), opc4(insn),
+ insn, ctx->cia);
+ return false;
+ }
+
+ handler->handler(ctx);
+ return true;
+}
+
+static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUPPCState *env = cs->env_ptr;
+ uint32_t hflags = ctx->base.tb->flags;
+
+ ctx->spr_cb = env->spr_cb;
+ ctx->pr = (hflags >> HFLAGS_PR) & 1;
+ ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
+ ctx->dr = (hflags >> HFLAGS_DR) & 1;
+ ctx->hv = (hflags >> HFLAGS_HV) & 1;
+ ctx->insns_flags = env->insns_flags;
+ ctx->insns_flags2 = env->insns_flags2;
+ ctx->access_type = -1;
+ ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
+ ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
+ ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
+ ctx->flags = env->flags;
+#if defined(TARGET_PPC64)
+ ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
+ ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
+#endif
+ ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
+ || env->mmu_model == POWERPC_MMU_601
+ || env->mmu_model & POWERPC_MMU_64;
+
+ ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
+ ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
+ ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
+ ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
+ ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
+ ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
+ ctx->hr = (hflags >> HFLAGS_HR) & 1;
+ ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
+ ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
+
+ ctx->singlestep_enabled = 0;
+ if ((hflags >> HFLAGS_SE) & 1) {
+ ctx->singlestep_enabled |= CPU_SINGLE_STEP;
+ ctx->base.max_insns = 1;
+ }
+ if ((hflags >> HFLAGS_BE) & 1) {
+ ctx->singlestep_enabled |= CPU_BRANCH_STEP;
+ }
+}
+
+static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
+{
+}
+
+static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ tcg_gen_insn_start(dcbase->pc_next);
+}
+
+static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ return opc1(insn) == 1;
+}
+
+static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = cs->env_ptr;
+ target_ulong pc;
+ uint32_t insn;
+ bool ok;
+
+ LOG_DISAS("----------------\n");
+ LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
+ ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
+
+ ctx->cia = pc = ctx->base.pc_next;
+ insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
+ ctx->base.pc_next = pc += 4;
+
+ if (!is_prefix_insn(ctx, insn)) {
+ ok = (decode_insn32(ctx, insn) ||
+ decode_legacy(cpu, ctx, insn));
+ } else if ((pc & 63) == 0) {
+ /*
+ * Power v3.1, section 1.9 Exceptions:
+ * attempt to execute a prefixed instruction that crosses a
+ * 64-byte address boundary (system alignment error).
+ */
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
+ ok = true;
+ } else {
+ uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
+ need_byteswap(ctx));
+ ctx->base.pc_next = pc += 4;
+ ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
+ }
+ if (!ok) {
+ gen_invalid(ctx);
+ }
+
+ /* End the TB when crossing a page boundary. */
+ if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+
+ translator_loop_temp_check(&ctx->base);
+}
+
+static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ DisasJumpType is_jmp = ctx->base.is_jmp;
+ target_ulong nip = ctx->base.pc_next;
+
+ if (is_jmp == DISAS_NORETURN) {
+ /* We have already exited the TB. */
+ return;
+ }
+
+ /* Honor single stepping. */
+ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
+ && (nip <= 0x100 || nip > 0xf00)) {
+ switch (is_jmp) {
+ case DISAS_TOO_MANY:
+ case DISAS_EXIT_UPDATE:
+ case DISAS_CHAIN_UPDATE:
+ gen_update_nip(ctx, nip);
+ break;
+ case DISAS_EXIT:
+ case DISAS_CHAIN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_debug_exception(ctx);
+ return;
+ }
+
+ switch (is_jmp) {
+ case DISAS_TOO_MANY:
+ if (use_goto_tb(ctx, nip)) {
+ tcg_gen_goto_tb(0);
+ gen_update_nip(ctx, nip);
+ tcg_gen_exit_tb(ctx->base.tb, 0);
+ break;
+ }
+ /* fall through */
+ case DISAS_CHAIN_UPDATE:
+ gen_update_nip(ctx, nip);
+ /* fall through */
+ case DISAS_CHAIN:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+
+ case DISAS_EXIT_UPDATE:
+ gen_update_nip(ctx, nip);
+ /* fall through */
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void ppc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps ppc_tr_ops = {
+ .init_disas_context = ppc_tr_init_disas_context,
+ .tb_start = ppc_tr_tb_start,
+ .insn_start = ppc_tr_insn_start,
+ .translate_insn = ppc_tr_translate_insn,
+ .tb_stop = ppc_tr_tb_stop,
+ .disas_log = ppc_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->nip = data[0];
+}
diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc
new file mode 100644
index 000000000..f9f1d58d4
--- /dev/null
+++ b/target/ppc/translate/dfp-impl.c.inc
@@ -0,0 +1,229 @@
+/*** Decimal Floating Point ***/
+
+static inline TCGv_ptr gen_fprp_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, vsr[reg].u64[0]));
+ return r;
+}
+
+#define TRANS_DFP_T_A_B_Rc(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, ra, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->rt); \
+ ra = gen_fprp_ptr(a->ra); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(cpu_env, rt, ra, rb); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ return true; \
+}
+
+#define TRANS_DFP_BF_A_B(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr ra, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ ra = gen_fprp_ptr(a->ra); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(cpu_crf[a->bf], \
+ cpu_env, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ return true; \
+}
+
+#define TRANS_DFP_BF_I_B(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(cpu_crf[a->bf], \
+ cpu_env, tcg_constant_i32(a->uim), rb);\
+ tcg_temp_free_ptr(rb); \
+ return true; \
+}
+
+#define TRANS_DFP_BF_A_DCM(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr ra; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ ra = gen_fprp_ptr(a->fra); \
+ gen_helper_##NAME(cpu_crf[a->bf], \
+ cpu_env, ra, tcg_constant_i32(a->dm)); \
+ tcg_temp_free_ptr(ra); \
+ return true; \
+}
+
+#define TRANS_DFP_T_B_U32_U32_Rc(NAME, U32F1, U32F2) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->frt); \
+ rb = gen_fprp_ptr(a->frb); \
+ gen_helper_##NAME(cpu_env, rt, rb, \
+ tcg_constant_i32(a->U32F1), \
+ tcg_constant_i32(a->U32F2)); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ return true; \
+}
+
+#define TRANS_DFP_T_A_B_I32_Rc(NAME, I32FLD) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, ra, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->frt); \
+ ra = gen_fprp_ptr(a->fra); \
+ rb = gen_fprp_ptr(a->frb); \
+ gen_helper_##NAME(cpu_env, rt, ra, rb, \
+ tcg_constant_i32(a->I32FLD)); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ return true; \
+}
+
+#define TRANS_DFP_T_B_Rc(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->rt); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(cpu_env, rt, rb); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ return true; \
+}
+
+#define TRANS_DFP_T_FPR_I32_Rc(NAME, FPRFLD, I32FLD) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, rx; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->rt); \
+ rx = gen_fprp_ptr(a->FPRFLD); \
+ gen_helper_##NAME(cpu_env, rt, rx, \
+ tcg_constant_i32(a->I32FLD)); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rx); \
+ return true; \
+}
+
+TRANS_DFP_T_A_B_Rc(DADD)
+TRANS_DFP_T_A_B_Rc(DADDQ)
+TRANS_DFP_T_A_B_Rc(DSUB)
+TRANS_DFP_T_A_B_Rc(DSUBQ)
+TRANS_DFP_T_A_B_Rc(DMUL)
+TRANS_DFP_T_A_B_Rc(DMULQ)
+TRANS_DFP_T_A_B_Rc(DDIV)
+TRANS_DFP_T_A_B_Rc(DDIVQ)
+TRANS_DFP_BF_A_B(DCMPU)
+TRANS_DFP_BF_A_B(DCMPUQ)
+TRANS_DFP_BF_A_B(DCMPO)
+TRANS_DFP_BF_A_B(DCMPOQ)
+TRANS_DFP_BF_A_DCM(DTSTDC)
+TRANS_DFP_BF_A_DCM(DTSTDCQ)
+TRANS_DFP_BF_A_DCM(DTSTDG)
+TRANS_DFP_BF_A_DCM(DTSTDGQ)
+TRANS_DFP_BF_A_B(DTSTEX)
+TRANS_DFP_BF_A_B(DTSTEXQ)
+TRANS_DFP_BF_A_B(DTSTSF)
+TRANS_DFP_BF_A_B(DTSTSFQ)
+TRANS_DFP_BF_I_B(DTSTSFI)
+TRANS_DFP_BF_I_B(DTSTSFIQ)
+TRANS_DFP_T_B_U32_U32_Rc(DQUAI, te, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DQUAIQ, te, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DQUA, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DQUAQ, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DRRND, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DRRNDQ, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTX, r, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTXQ, r, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTN, r, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTNQ, r, rmc)
+TRANS_DFP_T_B_Rc(DCTDP)
+TRANS_DFP_T_B_Rc(DCTQPQ)
+TRANS_DFP_T_B_Rc(DRSP)
+TRANS_DFP_T_B_Rc(DRDPQ)
+TRANS_DFP_T_B_Rc(DCFFIX)
+TRANS_DFP_T_B_Rc(DCFFIXQ)
+TRANS_DFP_T_B_Rc(DCTFIX)
+TRANS_DFP_T_B_Rc(DCTFIXQ)
+TRANS_DFP_T_FPR_I32_Rc(DDEDPD, rb, sp)
+TRANS_DFP_T_FPR_I32_Rc(DDEDPDQ, rb, sp)
+TRANS_DFP_T_FPR_I32_Rc(DENBCD, rb, s)
+TRANS_DFP_T_FPR_I32_Rc(DENBCDQ, rb, s)
+TRANS_DFP_T_B_Rc(DXEX)
+TRANS_DFP_T_B_Rc(DXEXQ)
+TRANS_DFP_T_A_B_Rc(DIEX)
+TRANS_DFP_T_A_B_Rc(DIEXQ)
+TRANS_DFP_T_FPR_I32_Rc(DSCLI, ra, sh)
+TRANS_DFP_T_FPR_I32_Rc(DSCLIQ, ra, sh)
+TRANS_DFP_T_FPR_I32_Rc(DSCRI, ra, sh)
+TRANS_DFP_T_FPR_I32_Rc(DSCRIQ, ra, sh)
+
+static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a)
+{
+ TCGv_ptr rt, rb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, DFP);
+ REQUIRE_FPU(ctx);
+ REQUIRE_VECTOR(ctx);
+
+ rt = gen_fprp_ptr(a->frtp);
+ rb = gen_avr_ptr(a->vrb);
+ gen_helper_DCFFIXQQ(cpu_env, rt, rb);
+ tcg_temp_free_ptr(rt);
+ tcg_temp_free_ptr(rb);
+
+ return true;
+}
+
+static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a)
+{
+ TCGv_ptr rt, rb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, DFP);
+ REQUIRE_FPU(ctx);
+ REQUIRE_VECTOR(ctx);
+
+ rt = gen_avr_ptr(a->vrt);
+ rb = gen_fprp_ptr(a->frbp);
+ gen_helper_DCTFIXQQ(cpu_env, rt, rb);
+ tcg_temp_free_ptr(rt);
+ tcg_temp_free_ptr(rb);
+
+ return true;
+}
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
new file mode 100644
index 000000000..7fecff457
--- /dev/null
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -0,0 +1,494 @@
+/*
+ * Power ISA decode for Fixed-Point Facility instructions
+ *
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Fixed-Point Load/Store Instructions
+ */
+
+static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update,
+ bool store, MemOp mop)
+{
+ TCGv ea;
+
+ if (update && (ra == 0 || (!store && ra == rt))) {
+ gen_invalid(ctx);
+ return true;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+
+ ea = do_ea_calc(ctx, ra, displ);
+ mop ^= ctx->default_tcg_memop_mask;
+ if (store) {
+ tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
+ } else {
+ tcg_gen_qemu_ld_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
+ }
+ if (update) {
+ tcg_gen_mov_tl(cpu_gpr[ra], ea);
+ }
+ tcg_temp_free(ea);
+
+ return true;
+}
+
+static bool do_ldst_D(DisasContext *ctx, arg_D *a, bool update, bool store,
+ MemOp mop)
+{
+ return do_ldst(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, mop);
+}
+
+static bool do_ldst_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update,
+ bool store, MemOp mop)
+{
+ arg_D d;
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+ return do_ldst_D(ctx, &d, update, store, mop);
+}
+
+static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update,
+ bool store, MemOp mop)
+{
+ return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop);
+}
+
+static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
+{
+#if defined(TARGET_PPC64)
+ TCGv ea;
+ TCGv_i64 low_addr_gpr, high_addr_gpr;
+ MemOp mop;
+
+ REQUIRE_INSNS_FLAGS(ctx, 64BX);
+
+ if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) {
+ if (ctx->pr) {
+ /* lq and stq were privileged prior to V. 2.07 */
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return true;
+ }
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return true;
+ }
+ }
+
+ if (!store && unlikely(a->ra == a->rt)) {
+ gen_invalid(ctx);
+ return true;
+ }
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si));
+
+ if (prefixed || !ctx->le_mode) {
+ low_addr_gpr = cpu_gpr[a->rt];
+ high_addr_gpr = cpu_gpr[a->rt + 1];
+ } else {
+ low_addr_gpr = cpu_gpr[a->rt + 1];
+ high_addr_gpr = cpu_gpr[a->rt];
+ }
+
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ if (HAVE_ATOMIC128) {
+ mop = DEF_MEMOP(MO_128);
+ TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx));
+ if (store) {
+ if (ctx->le_mode) {
+ gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr,
+ high_addr_gpr, oi);
+ } else {
+ gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr,
+ low_addr_gpr, oi);
+
+ }
+ } else {
+ if (ctx->le_mode) {
+ gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi);
+ tcg_gen_ld_i64(high_addr_gpr, cpu_env,
+ offsetof(CPUPPCState, retxh));
+ } else {
+ gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi);
+ tcg_gen_ld_i64(low_addr_gpr, cpu_env,
+ offsetof(CPUPPCState, retxh));
+ }
+ }
+ } else {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+ } else {
+ mop = DEF_MEMOP(MO_Q);
+ if (store) {
+ tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
+ }
+
+ gen_addr_add(ctx, ea, ea, 8);
+
+ if (store) {
+ tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop);
+ }
+ }
+ tcg_temp_free(ea);
+#else
+ qemu_build_not_reached();
+#endif
+
+ return true;
+}
+
+static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
+{
+ arg_D d;
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+
+ return do_ldst_quad(ctx, &d, store, true);
+}
+
+/* Load Byte and Zero */
+TRANS(LBZ, do_ldst_D, false, false, MO_UB)
+TRANS(LBZX, do_ldst_X, false, false, MO_UB)
+TRANS(LBZU, do_ldst_D, true, false, MO_UB)
+TRANS(LBZUX, do_ldst_X, true, false, MO_UB)
+TRANS(PLBZ, do_ldst_PLS_D, false, false, MO_UB)
+
+/* Load Halfword and Zero */
+TRANS(LHZ, do_ldst_D, false, false, MO_UW)
+TRANS(LHZX, do_ldst_X, false, false, MO_UW)
+TRANS(LHZU, do_ldst_D, true, false, MO_UW)
+TRANS(LHZUX, do_ldst_X, true, false, MO_UW)
+TRANS(PLHZ, do_ldst_PLS_D, false, false, MO_UW)
+
+/* Load Halfword Algebraic */
+TRANS(LHA, do_ldst_D, false, false, MO_SW)
+TRANS(LHAX, do_ldst_X, false, false, MO_SW)
+TRANS(LHAU, do_ldst_D, true, false, MO_SW)
+TRANS(LHAXU, do_ldst_X, true, false, MO_SW)
+TRANS(PLHA, do_ldst_PLS_D, false, false, MO_SW)
+
+/* Load Word and Zero */
+TRANS(LWZ, do_ldst_D, false, false, MO_UL)
+TRANS(LWZX, do_ldst_X, false, false, MO_UL)
+TRANS(LWZU, do_ldst_D, true, false, MO_UL)
+TRANS(LWZUX, do_ldst_X, true, false, MO_UL)
+TRANS(PLWZ, do_ldst_PLS_D, false, false, MO_UL)
+
+/* Load Word Algebraic */
+TRANS64(LWA, do_ldst_D, false, false, MO_SL)
+TRANS64(LWAX, do_ldst_X, false, false, MO_SL)
+TRANS64(LWAUX, do_ldst_X, true, false, MO_SL)
+TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL)
+
+/* Load Doubleword */
+TRANS64(LD, do_ldst_D, false, false, MO_Q)
+TRANS64(LDX, do_ldst_X, false, false, MO_Q)
+TRANS64(LDU, do_ldst_D, true, false, MO_Q)
+TRANS64(LDUX, do_ldst_X, true, false, MO_Q)
+TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q)
+
+/* Load Quadword */
+TRANS64(LQ, do_ldst_quad, false, false);
+TRANS64(PLQ, do_ldst_quad_PLS_D, false);
+
+/* Store Byte */
+TRANS(STB, do_ldst_D, false, true, MO_UB)
+TRANS(STBX, do_ldst_X, false, true, MO_UB)
+TRANS(STBU, do_ldst_D, true, true, MO_UB)
+TRANS(STBUX, do_ldst_X, true, true, MO_UB)
+TRANS(PSTB, do_ldst_PLS_D, false, true, MO_UB)
+
+/* Store Halfword */
+TRANS(STH, do_ldst_D, false, true, MO_UW)
+TRANS(STHX, do_ldst_X, false, true, MO_UW)
+TRANS(STHU, do_ldst_D, true, true, MO_UW)
+TRANS(STHUX, do_ldst_X, true, true, MO_UW)
+TRANS(PSTH, do_ldst_PLS_D, false, true, MO_UW)
+
+/* Store Word */
+TRANS(STW, do_ldst_D, false, true, MO_UL)
+TRANS(STWX, do_ldst_X, false, true, MO_UL)
+TRANS(STWU, do_ldst_D, true, true, MO_UL)
+TRANS(STWUX, do_ldst_X, true, true, MO_UL)
+TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL)
+
+/* Store Doubleword */
+TRANS64(STD, do_ldst_D, false, true, MO_Q)
+TRANS64(STDX, do_ldst_X, false, true, MO_Q)
+TRANS64(STDU, do_ldst_D, true, true, MO_Q)
+TRANS64(STDUX, do_ldst_X, true, true, MO_Q)
+TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q)
+
+/* Store Quadword */
+TRANS64(STQ, do_ldst_quad, true, false);
+TRANS64(PSTQ, do_ldst_quad_PLS_D, true);
+
+/*
+ * Fixed-Point Compare Instructions
+ */
+
+static bool do_cmp_X(DisasContext *ctx, arg_X_bfl *a, bool s)
+{
+ if ((ctx->insns_flags & PPC_64B) == 0) {
+ /*
+ * For 32-bit implementations, The Programming Environments Manual says
+ * that "the L field must be cleared, otherwise the instruction form is
+ * invalid." It seems, however, that most 32-bit CPUs ignore invalid
+ * forms (e.g., section "Instruction Formats" of the 405 and 440
+ * manuals, "Integer Compare Instructions" of the 601 manual), with the
+ * notable exception of the e500 and e500mc, where L=1 was reported to
+ * cause an exception.
+ */
+ if (a->l) {
+ if ((ctx->insns_flags2 & PPC2_BOOKE206)) {
+ /*
+ * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc),
+ * generate an illegal instruction exception.
+ */
+ return false;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n",
+ s ? "" : "L", ctx->cia);
+ }
+ }
+ gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
+ return true;
+ }
+
+ /* For 64-bit implementations, deal with bit L accordingly. */
+ if (a->l) {
+ gen_op_cmp(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
+ } else {
+ gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf);
+ }
+ return true;
+}
+
+static bool do_cmp_D(DisasContext *ctx, arg_D_bf *a, bool s)
+{
+ if ((ctx->insns_flags & PPC_64B) == 0) {
+ /*
+ * For 32-bit implementations, The Programming Environments Manual says
+ * that "the L field must be cleared, otherwise the instruction form is
+ * invalid." It seems, however, that most 32-bit CPUs ignore invalid
+ * forms (e.g., section "Instruction Formats" of the 405 and 440
+ * manuals, "Integer Compare Instructions" of the 601 manual), with the
+ * notable exception of the e500 and e500mc, where L=1 was reported to
+ * cause an exception.
+ */
+ if (a->l) {
+ if ((ctx->insns_flags2 & PPC2_BOOKE206)) {
+ /*
+ * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc),
+ * generate an illegal instruction exception.
+ */
+ return false;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n",
+ s ? "I" : "LI", ctx->cia);
+ }
+ }
+ gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
+ return true;
+ }
+
+ /* For 64-bit implementations, deal with bit L accordingly. */
+ if (a->l) {
+ gen_op_cmp(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
+ } else {
+ gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf);
+ }
+ return true;
+}
+
+TRANS(CMP, do_cmp_X, true);
+TRANS(CMPL, do_cmp_X, false);
+TRANS(CMPI, do_cmp_D, true);
+TRANS(CMPLI, do_cmp_D, false);
+
+/*
+ * Fixed-Point Arithmetic Instructions
+ */
+
+static bool trans_ADDI(DisasContext *ctx, arg_D *a)
+{
+ if (a->ra) {
+ tcg_gen_addi_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[a->rt], a->si);
+ }
+ return true;
+}
+
+static bool trans_PADDI(DisasContext *ctx, arg_PLS_D *a)
+{
+ arg_D d;
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+ return trans_ADDI(ctx, &d);
+}
+
+static bool trans_ADDIS(DisasContext *ctx, arg_D *a)
+{
+ a->si <<= 16;
+ return trans_ADDI(ctx, a);
+}
+
+static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ tcg_gen_movi_tl(cpu_gpr[a->rt], ctx->base.pc_next + (a->d << 16));
+ return true;
+}
+
+static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a)
+{
+ gen_invalid(ctx);
+ return true;
+}
+
+static bool trans_PNOP(DisasContext *ctx, arg_PNOP *a)
+{
+ return true;
+}
+
+static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ uint32_t mask = 0x08 >> (a->bi & 0x03);
+ TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE;
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]);
+ tcg_gen_andi_tl(temp, temp, mask);
+ tcg_gen_setcondi_tl(cond, cpu_gpr[a->rt], temp, 0);
+ if (neg) {
+ tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]);
+ }
+ tcg_temp_free(temp);
+
+ return true;
+}
+
+TRANS(SETBC, do_set_bool_cond, false, false)
+TRANS(SETBCR, do_set_bool_cond, false, true)
+TRANS(SETNBC, do_set_bool_cond, true, false)
+TRANS(SETNBCR, do_set_bool_cond, true, true)
+
+static bool trans_CFUGED(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail)
+{
+ TCGv_i64 t0, t1;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(t0, src, mask);
+ if (trail) {
+ tcg_gen_ctzi_i64(t0, t0, -1);
+ } else {
+ tcg_gen_clzi_i64(t0, t0, -1);
+ }
+
+ tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1);
+ tcg_gen_andi_i64(t0, t0, 63);
+ tcg_gen_xori_i64(t0, t0, 63);
+ if (trail) {
+ tcg_gen_shl_i64(t0, mask, t0);
+ tcg_gen_shl_i64(t0, t0, t1);
+ } else {
+ tcg_gen_shr_i64(t0, mask, t0);
+ tcg_gen_shr_i64(t0, t0, t1);
+ }
+
+ tcg_gen_ctpop_i64(dst, t0);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_PDEPD(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_PEXTD(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
new file mode 100644
index 000000000..c9e05201d
--- /dev/null
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -0,0 +1,1404 @@
+/*
+ * translate-fp.c
+ *
+ * Standard FPU translation
+ */
+
+static inline void gen_reset_fpstatus(void)
+{
+ gen_helper_reset_fpstatus(cpu_env);
+}
+
+static inline void gen_compute_fprf_float64(TCGv_i64 arg)
+{
+ gen_helper_compute_fprf_float64(cpu_env, arg);
+ gen_helper_float_check_status(cpu_env);
+}
+
+#if defined(TARGET_PPC64)
+static void gen_set_cr1_from_fpscr(DisasContext *ctx)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
+ tcg_temp_free_i32(tmp);
+}
+#else
+static void gen_set_cr1_from_fpscr(DisasContext *ctx)
+{
+ tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
+}
+#endif
+
+/*** Floating-Point arithmetic ***/
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ TCGv_i64 t2; \
+ TCGv_i64 t3; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ t2 = tcg_temp_new_i64(); \
+ t3 = tcg_temp_new_i64(); \
+ gen_reset_fpstatus(); \
+ get_fpr(t0, rA(ctx->opcode)); \
+ get_fpr(t1, rC(ctx->opcode)); \
+ get_fpr(t2, rB(ctx->opcode)); \
+ gen_helper_f##op(t3, cpu_env, t0, t1, t2); \
+ if (isfloat) { \
+ gen_helper_frsp(t3, cpu_env, t3); \
+ } \
+ set_fpr(rD(ctx->opcode), t3); \
+ if (set_fprf) { \
+ gen_compute_fprf_float64(t3); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
+ tcg_temp_free_i64(t3); \
+}
+
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ TCGv_i64 t2; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ t2 = tcg_temp_new_i64(); \
+ gen_reset_fpstatus(); \
+ get_fpr(t0, rA(ctx->opcode)); \
+ get_fpr(t1, rB(ctx->opcode)); \
+ gen_helper_f##op(t2, cpu_env, t0, t1); \
+ if (isfloat) { \
+ gen_helper_frsp(t2, cpu_env, t2); \
+ } \
+ set_fpr(rD(ctx->opcode), t2); \
+ if (set_fprf) { \
+ gen_compute_fprf_float64(t2); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
+}
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ TCGv_i64 t2; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ t2 = tcg_temp_new_i64(); \
+ gen_reset_fpstatus(); \
+ get_fpr(t0, rA(ctx->opcode)); \
+ get_fpr(t1, rC(ctx->opcode)); \
+ gen_helper_f##op(t2, cpu_env, t0, t1); \
+ if (isfloat) { \
+ gen_helper_frsp(t2, cpu_env, t2); \
+ } \
+ set_fpr(rD(ctx->opcode), t2); \
+ if (set_fprf) { \
+ gen_compute_fprf_float64(t2); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
+}
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_reset_fpstatus(); \
+ get_fpr(t0, rB(ctx->opcode)); \
+ gen_helper_f##name(t1, cpu_env, t0); \
+ set_fpr(rD(ctx->opcode), t1); \
+ if (set_fprf) { \
+ gen_compute_fprf_float64(t1); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_reset_fpstatus(); \
+ get_fpr(t0, rB(ctx->opcode)); \
+ gen_helper_f##name(t1, cpu_env, t0); \
+ set_fpr(rD(ctx->opcode), t1); \
+ if (set_fprf) { \
+ gen_compute_fprf_float64(t1); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+/* fadd - fadds */
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
+/* fdiv - fdivs */
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
+/* fmul - fmuls */
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
+
+/* fre */
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
+
+/* fres */
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
+
+/* frsqrte */
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
+
+/* frsqrtes */
+static void gen_frsqrtes(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ get_fpr(t0, rB(ctx->opcode));
+ gen_helper_frsqrte(t1, cpu_env, t0);
+ gen_helper_frsp(t1, cpu_env, t1);
+ set_fpr(rD(ctx->opcode), t1);
+ gen_compute_fprf_float64(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* fsel */
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
+/* fsub - fsubs */
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
+/* Optional: */
+
+/* fsqrt */
+static void gen_fsqrt(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ get_fpr(t0, rB(ctx->opcode));
+ gen_helper_fsqrt(t1, cpu_env, t0);
+ set_fpr(rD(ctx->opcode), t1);
+ gen_compute_fprf_float64(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_fsqrts(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ get_fpr(t0, rB(ctx->opcode));
+ gen_helper_fsqrt(t1, cpu_env, t0);
+ gen_helper_frsp(t1, cpu_env, t1);
+ set_fpr(rD(ctx->opcode), t1);
+ gen_compute_fprf_float64(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/*** Floating-Point multiply-and-add ***/
+/* fmadd - fmadds */
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
+/* fmsub - fmsubs */
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
+/* fnmadd - fnmadds */
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
+/* fnmsub - fnmsubs */
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
+
+/*** Floating-Point round & convert ***/
+/* fctiw */
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
+/* fctiwu */
+GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
+/* fctiwz */
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
+/* fctiwuz */
+GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
+/* frsp */
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
+/* fcfid */
+GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
+/* fcfids */
+GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
+/* fcfidu */
+GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
+/* fcfidus */
+GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
+/* fctid */
+GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
+/* fctidu */
+GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
+/* fctidz */
+GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
+/* fctidu */
+GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
+
+/* frin */
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
+/* friz */
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
+/* frip */
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
+/* frim */
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
+
+static void gen_ftdiv(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ get_fpr(t0, rA(ctx->opcode));
+ get_fpr(t1, rB(ctx->opcode));
+ gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_ftsqrt(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
+}
+
+
+
+/*** Floating-Point compare ***/
+
+/* fcmpo */
+static void gen_fcmpo(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ get_fpr(t0, rA(ctx->opcode));
+ get_fpr(t1, rB(ctx->opcode));
+ gen_helper_fcmpo(cpu_env, t0, t1, crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status(cpu_env);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* fcmpu */
+static void gen_fcmpu(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ get_fpr(t0, rA(ctx->opcode));
+ get_fpr(t1, rB(ctx->opcode));
+ gen_helper_fcmpu(cpu_env, t0, t1, crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status(cpu_env);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/*** Floating-point move ***/
+/* fabs */
+/* XXX: beware that fabs never checks for NaNs nor update FPSCR */
+static void gen_fabs(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ tcg_gen_andi_i64(t1, t0, ~(1ULL << 63));
+ set_fpr(rD(ctx->opcode), t1);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* fmr - fmr. */
+/* XXX: beware that fmr never checks for NaNs nor update FPSCR */
+static void gen_fmr(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ set_fpr(rD(ctx->opcode), t0);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+}
+
+/* fnabs */
+/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
+static void gen_fnabs(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ tcg_gen_ori_i64(t1, t0, 1ULL << 63);
+ set_fpr(rD(ctx->opcode), t1);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* fneg */
+/* XXX: beware that fneg never checks for NaNs nor update FPSCR */
+static void gen_fneg(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ tcg_gen_xori_i64(t1, t0, 1ULL << 63);
+ set_fpr(rD(ctx->opcode), t1);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* fcpsgn: PowerPC 2.05 specification */
+/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
+static void gen_fcpsgn(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ get_fpr(t0, rA(ctx->opcode));
+ get_fpr(t1, rB(ctx->opcode));
+ tcg_gen_deposit_i64(t2, t0, t1, 0, 63);
+ set_fpr(rD(ctx->opcode), t2);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static void gen_fmrgew(DisasContext *ctx)
+{
+ TCGv_i64 b0;
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ b0 = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ tcg_gen_shri_i64(b0, t0, 32);
+ get_fpr(t0, rA(ctx->opcode));
+ tcg_gen_deposit_i64(t1, t0, b0, 0, 32);
+ set_fpr(rD(ctx->opcode), t1);
+ tcg_temp_free_i64(b0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_fmrgow(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ get_fpr(t0, rB(ctx->opcode));
+ get_fpr(t1, rA(ctx->opcode));
+ tcg_gen_deposit_i64(t2, t0, t1, 32, 32);
+ set_fpr(rD(ctx->opcode), t2);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+/*** Floating-Point status & ctrl register ***/
+
+/* mcrfs */
+static void gen_mcrfs(DisasContext *ctx)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv_i32 tmask;
+ TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
+ int bfa;
+ int nibble;
+ int shift;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ bfa = crfS(ctx->opcode);
+ nibble = 7 - bfa;
+ shift = 4 * nibble;
+ tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
+ tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],
+ 0xf);
+ tcg_temp_free(tmp);
+ tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
+ /* Only the exception bits (including FX) should be cleared if read */
+ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
+ ~((0xF << shift) & FP_EX_CLEAR_BITS));
+ /* FEX and VX need to be updated, so don't set fpscr directly */
+ tmask = tcg_const_i32(1 << nibble);
+ gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
+ tcg_temp_free_i32(tmask);
+ tcg_temp_free_i64(tnew_fpscr);
+}
+
+/* mffs */
+static void gen_mffs(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ tcg_gen_extu_tl_i64(t0, cpu_fpscr);
+ set_fpr(rD(ctx->opcode), t0);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+ tcg_temp_free_i64(t0);
+}
+
+/* mffsl */
+static void gen_mffsl(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+
+ if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
+ return gen_mffs(ctx);
+ }
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ gen_reset_fpstatus();
+ tcg_gen_extu_tl_i64(t0, cpu_fpscr);
+ /* Mask everything except mode, status, and enables. */
+ tcg_gen_andi_i64(t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN);
+ set_fpr(rD(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
+}
+
+/* mffsce */
+static void gen_mffsce(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ TCGv_i32 mask;
+
+ if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
+ return gen_mffs(ctx);
+ }
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+
+ gen_reset_fpstatus();
+ tcg_gen_extu_tl_i64(t0, cpu_fpscr);
+ set_fpr(rD(ctx->opcode), t0);
+
+ /* Clear exception enable bits in the FPSCR. */
+ tcg_gen_andi_i64(t0, t0, ~FP_ENABLES);
+ mask = tcg_const_i32(0x0003);
+ gen_helper_store_fpscr(cpu_env, t0, mask);
+
+ tcg_temp_free_i32(mask);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_helper_mffscrn(DisasContext *ctx, TCGv_i64 t1)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i32 mask = tcg_const_i32(0x0001);
+
+ gen_reset_fpstatus();
+ tcg_gen_extu_tl_i64(t0, cpu_fpscr);
+ tcg_gen_andi_i64(t0, t0, FP_DRN | FP_ENABLES | FP_RN);
+ set_fpr(rD(ctx->opcode), t0);
+
+ /* Mask FPSCR value to clear RN. */
+ tcg_gen_andi_i64(t0, t0, ~FP_RN);
+
+ /* Merge RN into FPSCR value. */
+ tcg_gen_or_i64(t0, t0, t1);
+
+ gen_helper_store_fpscr(cpu_env, t0, mask);
+
+ tcg_temp_free_i32(mask);
+ tcg_temp_free_i64(t0);
+}
+
+/* mffscrn */
+static void gen_mffscrn(DisasContext *ctx)
+{
+ TCGv_i64 t1;
+
+ if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
+ return gen_mffs(ctx);
+ }
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+
+ t1 = tcg_temp_new_i64();
+ get_fpr(t1, rB(ctx->opcode));
+ /* Mask FRB to get just RN. */
+ tcg_gen_andi_i64(t1, t1, FP_RN);
+
+ gen_helper_mffscrn(ctx, t1);
+
+ tcg_temp_free_i64(t1);
+}
+
+/* mffscrni */
+static void gen_mffscrni(DisasContext *ctx)
+{
+ TCGv_i64 t1;
+
+ if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
+ return gen_mffs(ctx);
+ }
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+
+ t1 = tcg_const_i64((uint64_t)RM(ctx->opcode));
+
+ gen_helper_mffscrn(ctx, t1);
+
+ tcg_temp_free_i64(t1);
+}
+
+/* mtfsb0 */
+static void gen_mtfsb0(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
+ TCGv_i32 t0;
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_clrbit(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+}
+
+/* mtfsb1 */
+static void gen_mtfsb1(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ /* XXX: we pretend we can only do IEEE floating-point computations */
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
+ TCGv_i32 t0;
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_setbit(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a deferred exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* mtfsf */
+static void gen_mtfsf(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+ TCGv_i64 t1;
+ int flm, l, w;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ flm = FPFLM(ctx->opcode);
+ l = FPL(ctx->opcode);
+ w = FPW(ctx->opcode);
+ if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_reset_fpstatus();
+ if (l) {
+ t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
+ } else {
+ t0 = tcg_const_i32(flm << (w * 8));
+ }
+ t1 = tcg_temp_new_i64();
+ get_fpr(t1, rB(ctx->opcode));
+ gen_helper_store_fpscr(cpu_env, t1, t0);
+ tcg_temp_free_i32(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a deferred exception */
+ gen_helper_float_check_status(cpu_env);
+ tcg_temp_free_i64(t1);
+}
+
+/* mtfsfi */
+static void gen_mtfsfi(DisasContext *ctx)
+{
+ int bf, sh, w;
+ TCGv_i64 t0;
+ TCGv_i32 t1;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ w = FPW(ctx->opcode);
+ bf = FPBF(ctx->opcode);
+ if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ sh = (8 * w) + 7 - bf;
+ gen_reset_fpstatus();
+ t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
+ t1 = tcg_const_i32(1 << sh);
+ gen_helper_store_fpscr(cpu_env, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a deferred exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
+ gen_helper_todouble(dest, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+/* lfdepx (external PID lfdx) */
+static void gen_lfdepx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ CHK_SV;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new_i64();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q));
+ set_fpr(rD(ctx->opcode), t0);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+/* lfdp */
+static void gen_lfdp(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0);
+ t0 = tcg_temp_new_i64();
+ /*
+ * We only need to swap high and low halves. gen_qemu_ld64_i64
+ * does necessary 64-bit byteswap already.
+ */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode) + 1, t0);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode), t0);
+ } else {
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode), t0);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode) + 1, t0);
+ }
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+/* lfdpx */
+static void gen_lfdpx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ t0 = tcg_temp_new_i64();
+ /*
+ * We only need to swap high and low halves. gen_qemu_ld64_i64
+ * does necessary 64-bit byteswap already.
+ */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode) + 1, t0);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode), t0);
+ } else {
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode), t0);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode) + 1, t0);
+ }
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+/* lfiwax */
+static void gen_lfiwax(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv t0;
+ TCGv_i64 t1;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new_i64();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32s(ctx, t0, EA);
+ tcg_gen_ext_tl_i64(t1, t0);
+ set_fpr(rD(ctx->opcode), t1);
+ tcg_temp_free(EA);
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* lfiwzx */
+static void gen_lfiwzx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new_i64();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32u_i64(ctx, t0, EA);
+ set_fpr(rD(ctx->opcode), t0);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 t0; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ t0 = tcg_temp_new_i64(); \
+ gen_addr_reg_index(ctx, EA); \
+ get_fpr(t0, rS(ctx->opcode)); \
+ gen_qemu_##stop(ctx, t0, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(t0); \
+}
+
+static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_tosingle(tmp, src);
+ tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
+ tcg_temp_free_i32(tmp);
+}
+
+/* stfdepx (external PID lfdx) */
+static void gen_stfdepx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ CHK_SV;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new_i64();
+ gen_addr_reg_index(ctx, EA);
+ get_fpr(t0, rD(ctx->opcode));
+ tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q));
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+/* stfdp */
+static void gen_stfdp(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new_i64();
+ gen_addr_imm_index(ctx, EA, 0);
+ /*
+ * We only need to swap high and low halves. gen_qemu_st64_i64
+ * does necessary 64-bit byteswap already.
+ */
+ if (unlikely(ctx->le_mode)) {
+ get_fpr(t0, rD(ctx->opcode) + 1);
+ gen_qemu_st64_i64(ctx, t0, EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ get_fpr(t0, rD(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, EA);
+ } else {
+ get_fpr(t0, rD(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ get_fpr(t0, rD(ctx->opcode) + 1);
+ gen_qemu_st64_i64(ctx, t0, EA);
+ }
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+/* stfdpx */
+static void gen_stfdpx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new_i64();
+ gen_addr_reg_index(ctx, EA);
+ /*
+ * We only need to swap high and low halves. gen_qemu_st64_i64
+ * does necessary 64-bit byteswap already.
+ */
+ if (unlikely(ctx->le_mode)) {
+ get_fpr(t0, rD(ctx->opcode) + 1);
+ gen_qemu_st64_i64(ctx, t0, EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ get_fpr(t0, rD(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, EA);
+ } else {
+ get_fpr(t0, rD(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ get_fpr(t0, rD(ctx->opcode) + 1);
+ gen_qemu_st64_i64(ctx, t0, EA);
+ }
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+/* Optional: */
+static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_trunc_i64_tl(t0, arg1),
+ gen_qemu_st32(ctx, t0, arg2);
+ tcg_temp_free(t0);
+}
+/* stfiwx */
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
+
+/* POWER2 specific instructions */
+/* Quad manipulation (load/store two floats at a time) */
+
+/* lfq */
+static void gen_lfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ TCGv_i64 t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new_i64();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64_i64(ctx, t1, t0);
+ set_fpr(rd, t1);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64_i64(ctx, t1, t0);
+ set_fpr((rd + 1) % 32, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* lfqu */
+static void gen_lfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ TCGv_i64 t2;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new_i64();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64_i64(ctx, t2, t0);
+ set_fpr(rd, t2);
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64_i64(ctx, t2, t1);
+ set_fpr((rd + 1) % 32, t2);
+ if (ra != 0) {
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free_i64(t2);
+}
+
+/* lfqux */
+static void gen_lfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ TCGv t0, t1;
+ TCGv_i64 t2;
+ t2 = tcg_temp_new_i64();
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64_i64(ctx, t2, t0);
+ set_fpr(rd, t2);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64_i64(ctx, t2, t1);
+ set_fpr((rd + 1) % 32, t2);
+ tcg_temp_free(t1);
+ if (ra != 0) {
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t2);
+}
+
+/* lfqx */
+static void gen_lfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ TCGv_i64 t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new_i64();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64_i64(ctx, t1, t0);
+ set_fpr(rd, t1);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64_i64(ctx, t1, t0);
+ set_fpr((rd + 1) % 32, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* stfq */
+static void gen_stfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ TCGv_i64 t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new_i64();
+ gen_addr_imm_index(ctx, t0, 0);
+ get_fpr(t1, rd);
+ gen_qemu_st64_i64(ctx, t1, t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ get_fpr(t1, (rd + 1) % 32);
+ gen_qemu_st64_i64(ctx, t1, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* stfqu */
+static void gen_stfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ TCGv_i64 t2;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t2 = tcg_temp_new_i64();
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ get_fpr(t2, rd);
+ gen_qemu_st64_i64(ctx, t2, t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ get_fpr(t2, (rd + 1) % 32);
+ gen_qemu_st64_i64(ctx, t2, t1);
+ tcg_temp_free(t1);
+ if (ra != 0) {
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t2);
+}
+
+/* stfqux */
+static void gen_stfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ TCGv_i64 t2;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t2 = tcg_temp_new_i64();
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ get_fpr(t2, rd);
+ gen_qemu_st64_i64(ctx, t2, t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ get_fpr(t2, (rd + 1) % 32);
+ gen_qemu_st64_i64(ctx, t2, t1);
+ tcg_temp_free(t1);
+ if (ra != 0) {
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t2);
+}
+
+/* stfqx */
+static void gen_stfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ TCGv_i64 t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t1 = tcg_temp_new_i64();
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ get_fpr(t1, rd);
+ gen_qemu_st64_i64(ctx, t1, t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ get_fpr(t1, (rd + 1) % 32);
+ gen_qemu_st64_i64(ctx, t1, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* Floating-point Load/Store Instructions */
+static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ,
+ bool update, bool store, bool single)
+{
+ TCGv ea;
+ TCGv_i64 t0;
+ REQUIRE_INSNS_FLAGS(ctx, FLOAT);
+ REQUIRE_FPU(ctx);
+ if (update && ra == 0) {
+ gen_invalid(ctx);
+ return true;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new_i64();
+ ea = do_ea_calc(ctx, ra, displ);
+ if (store) {
+ get_fpr(t0, rt);
+ if (single) {
+ gen_qemu_st32fs(ctx, t0, ea);
+ } else {
+ gen_qemu_st64_i64(ctx, t0, ea);
+ }
+ } else {
+ if (single) {
+ gen_qemu_ld32fs(ctx, t0, ea);
+ } else {
+ gen_qemu_ld64_i64(ctx, t0, ea);
+ }
+ set_fpr(rt, t0);
+ }
+ if (update) {
+ tcg_gen_mov_tl(cpu_gpr[ra], ea);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free(ea);
+ return true;
+}
+
+static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store,
+ bool single)
+{
+ return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store,
+ single);
+}
+
+static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update,
+ bool store, bool single)
+{
+ arg_D d;
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+ return do_lsfp_D(ctx, &d, update, store, single);
+}
+
+static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update,
+ bool store, bool single)
+{
+ return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single);
+}
+
+TRANS(LFS, do_lsfp_D, false, false, true)
+TRANS(LFSU, do_lsfp_D, true, false, true)
+TRANS(LFSX, do_lsfp_X, false, false, true)
+TRANS(LFSUX, do_lsfp_X, true, false, true)
+TRANS(PLFS, do_lsfp_PLS_D, false, false, true)
+
+TRANS(LFD, do_lsfp_D, false, false, false)
+TRANS(LFDU, do_lsfp_D, true, false, false)
+TRANS(LFDX, do_lsfp_X, false, false, false)
+TRANS(LFDUX, do_lsfp_X, true, false, false)
+TRANS(PLFD, do_lsfp_PLS_D, false, false, false)
+
+TRANS(STFS, do_lsfp_D, false, true, true)
+TRANS(STFSU, do_lsfp_D, true, true, true)
+TRANS(STFSX, do_lsfp_X, false, true, true)
+TRANS(STFSUX, do_lsfp_X, true, true, true)
+TRANS(PSTFS, do_lsfp_PLS_D, false, true, true)
+
+TRANS(STFD, do_lsfp_D, false, true, false)
+TRANS(STFDU, do_lsfp_D, true, true, false)
+TRANS(STFDX, do_lsfp_X, false, true, false)
+TRANS(STFDUX, do_lsfp_X, true, true, false)
+TRANS(PSTFD, do_lsfp_PLS_D, false, true, false)
+
+#undef _GEN_FLOAT_ACB
+#undef GEN_FLOAT_ACB
+#undef _GEN_FLOAT_AB
+#undef GEN_FLOAT_AB
+#undef _GEN_FLOAT_AC
+#undef GEN_FLOAT_AC
+#undef GEN_FLOAT_B
+#undef GEN_FLOAT_BS
+
+#undef GEN_LDF
+#undef GEN_LDUF
+#undef GEN_LDUXF
+#undef GEN_LDXF
+#undef GEN_LDFS
+
+#undef GEN_STF
+#undef GEN_STUF
+#undef GEN_STUXF
+#undef GEN_STXF
+#undef GEN_STFS
diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc
new file mode 100644
index 000000000..4260635a1
--- /dev/null
+++ b/target/ppc/translate/fp-ops.c.inc
@@ -0,0 +1,90 @@
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type)
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type)
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type)
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type)
+
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT),
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES),
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE),
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL),
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT),
+GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206),
+GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206),
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT),
+GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT),
+GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT),
+GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
+
+GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205),
+
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
+GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),
+
+GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
+GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
+GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE),
+GEN_HANDLER_E_2(mffsce, 0x3F, 0x07, 0x12, 0x01, 0x00000000, PPC_FLOAT,
+ PPC2_ISA300),
+GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT,
+ PPC2_ISA300),
+GEN_HANDLER_E_2(mffscrn, 0x3F, 0x07, 0x12, 0x16, 0x00000000, PPC_FLOAT,
+ PPC_NONE),
+GEN_HANDLER_E_2(mffscrni, 0x3F, 0x07, 0x12, 0x17, 0x00000000, PPC_FLOAT,
+ PPC_NONE),
+GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT),
+GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006e0800, PPC_FLOAT),
diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc
new file mode 100644
index 000000000..2e6e799a2
--- /dev/null
+++ b/target/ppc/translate/spe-impl.c.inc
@@ -0,0 +1,1252 @@
+/*
+ * translate-spe.c
+ *
+ * Freescale SPE extension translation
+ */
+
+/*** SPE extension ***/
+/* Register moves */
+
+static inline void gen_evmra(DisasContext *ctx)
+{
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* tmp := rA_lo + rA_hi << 32 */
+ tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)],
+ cpu_gprh[rA(ctx->opcode)]);
+
+ /* spe_acc := tmp */
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_temp_free_i64(tmp);
+
+ /* rD := rA */
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+
+static inline void gen_load_gpr64(TCGv_i64 t, int reg)
+{
+ tcg_gen_concat_tl_i64(t, cpu_gpr[reg], cpu_gprh[reg]);
+}
+
+static inline void gen_store_gpr64(int reg, TCGv_i64 t)
+{
+ tcg_gen_extr_i64_tl(cpu_gpr[reg], cpu_gprh[reg], t);
+}
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if (Rc(ctx->opcode)) \
+ gen_##name1(ctx); \
+ else \
+ gen_##name0(ctx); \
+}
+
+/* Handler for undefined SPE opcodes */
+static inline void gen_speundef(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* SPE logic */
+#define GEN_SPEOP_LOGIC2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)]); \
+}
+
+GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl);
+GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl);
+GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl);
+GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl);
+GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl);
+GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl);
+GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl);
+GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl);
+
+/* SPE logic immediate */
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32);
+
+/* SPE arithmetic */
+#define GEN_SPEOP_ARITH1(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+
+GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32);
+GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
+GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
+GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
+static inline void gen_op_evrndw(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ tcg_gen_addi_i32(ret, arg1, 0x8000);
+ tcg_gen_ext16u_i32(ret, ret);
+}
+GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw);
+GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32);
+GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32);
+
+#define GEN_SPEOP_ARITH2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shr_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
+static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_sar_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
+static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shl_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
+static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, arg2, 0x1F);
+ tcg_gen_rotl_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
+static inline void gen_evmergehi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32);
+static inline void gen_op_evsubf(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ tcg_gen_sub_i32(ret, arg2, arg1);
+}
+GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf);
+
+/* SPE arithmetic immediate */
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
+GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
+
+/* SPE comparison */
+#define GEN_SPEOP_COMP(name, tcg_cond) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ TCGLabel *l1 = gen_new_label(); \
+ TCGLabel *l2 = gen_new_label(); \
+ TCGLabel *l3 = gen_new_label(); \
+ TCGLabel *l4 = gen_new_label(); \
+ \
+ tcg_gen_ext32s_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gpr[rB(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gprh[rB(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); \
+ \
+ tcg_gen_brcond_tl(tcg_cond, cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)], l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0); \
+ tcg_gen_br(l2); \
+ gen_set_label(l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], \
+ CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \
+ gen_set_label(l2); \
+ tcg_gen_brcond_tl(tcg_cond, cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)], l3); \
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ ~(CRF_CH | CRF_CH_AND_CL)); \
+ tcg_gen_br(l4); \
+ gen_set_label(l3); \
+ tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ CRF_CH | CRF_CH_OR_CL); \
+ gen_set_label(l4); \
+}
+GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU);
+GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT);
+GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU);
+GEN_SPEOP_COMP(evcmplts, TCG_COND_LT);
+GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ);
+
+/* SPE misc */
+static inline void gen_brinc(DisasContext *ctx)
+{
+ /* Note: brinc is usable even if SPE is disabled */
+ gen_helper_brinc(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergelo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergehilo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+static inline void gen_evmergelohi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ if (rD(ctx->opcode) == rA(ctx->opcode)) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+static inline void gen_evsplati(DisasContext *ctx)
+{
+ uint64_t imm;
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
+}
+static inline void gen_evsplatfi(DisasContext *ctx)
+{
+ uint64_t imm;
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ imm = rA(ctx->opcode) << 27;
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
+}
+
+static inline void gen_evsel(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ TCGLabel *l4 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ gen_set_label(l2);
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l3);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l4);
+ gen_set_label(l3);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ gen_set_label(l4);
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_evsel0(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ gen_evsel(ctx);
+}
+
+static void gen_evsel1(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ gen_evsel(ctx);
+}
+
+static void gen_evsel2(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ gen_evsel(ctx);
+}
+
+static void gen_evsel3(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ gen_evsel(ctx);
+}
+
+/* Multiply */
+
+static inline void gen_evmwumi(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* t0 := rA; t1 := rB */
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t0, t0);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t1, t1);
+
+ tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
+
+ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_evmwumia(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwumi(ctx); /* rD := rA * rB */
+
+ tmp = tcg_temp_new_i64();
+
+ /* acc := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwumiaa(DisasContext *ctx)
+{
+ TCGv_i64 acc;
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwumi(ctx); /* rD := rA * rB */
+
+ acc = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ /* tmp := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+
+ /* Load acc */
+ tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* acc := tmp + acc */
+ tcg_gen_add_i64(acc, acc, tmp);
+
+ /* Store acc */
+ tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* rD := acc */
+ gen_store_gpr64(rD(ctx->opcode), acc);
+
+ tcg_temp_free_i64(acc);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwsmi(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* t0 := rA; t1 := rB */
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t0, t0);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t1, t1);
+
+ tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
+
+ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_evmwsmia(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwsmi(ctx); /* rD := rA * rB */
+
+ tmp = tcg_temp_new_i64();
+
+ /* acc := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwsmiaa(DisasContext *ctx)
+{
+ TCGv_i64 acc;
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwsmi(ctx); /* rD := rA * rB */
+
+ acc = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ /* tmp := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+
+ /* Load acc */
+ tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* acc := tmp + acc */
+ tcg_gen_add_i64(acc, acc, tmp);
+
+ /* Store acc */
+ tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* rD := acc */
+ gen_store_gpr64(rD(ctx->opcode), acc);
+
+ tcg_temp_free_i64(acc);
+ tcg_temp_free_i64(tmp);
+}
+
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE); //
+GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE); //
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE);
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE); ////
+
+/* SPE load and stores */
+static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
+{
+ target_ulong uimm = rB(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, uimm << sh);
+ } else {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ }
+}
+
+static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_qemu_ld64_i64(ctx, t0, addr);
+ gen_store_gpr64(rD(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_load_gpr64(t0, rS(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, addr);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ t0 = tcg_temp_new(); \
+ if (Rc(ctx->opcode)) { \
+ gen_addr_spe_imm_index(ctx, t0, sh); \
+ } else { \
+ gen_addr_reg_index(ctx, t0); \
+ } \
+ gen_op_##name(ctx, t0); \
+ tcg_temp_free(t0); \
+}
+
+GEN_SPEOP_LDST(evldd, 0x00, 3);
+GEN_SPEOP_LDST(evldw, 0x01, 3);
+GEN_SPEOP_LDST(evldh, 0x02, 3);
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
+GEN_SPEOP_LDST(evlwhe, 0x08, 2);
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3);
+GEN_SPEOP_LDST(evstdw, 0x11, 3);
+GEN_SPEOP_LDST(evstdh, 0x12, 3);
+GEN_SPEOP_LDST(evstwhe, 0x18, 2);
+GEN_SPEOP_LDST(evstwho, 0x1A, 2);
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
+
+/* Multiply and add - TODO */
+#if 0
+GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);//
+GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+#endif
+
+/*** SPE floating-point extension ***/
+#define GEN_SPEFPUOP_CONV_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ tcg_temp_free_i32(t0); \
+}
+#define GEN_SPEFPUOP_CONV_32_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i32 t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i32(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t1, cpu_env, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i32 t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t0, cpu_env, t0); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+}
+#define GEN_SPEFPUOP_ARITH2_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_ARITH2_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+#define GEN_SPEFPUOP_COMP_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_COMP_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+/* Single precision floating-point vectors operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(evfsadd);
+GEN_SPEFPUOP_ARITH2_64_64(evfssub);
+GEN_SPEFPUOP_ARITH2_64_64(evfsmul);
+GEN_SPEFPUOP_ARITH2_64_64(evfsdiv);
+static inline void gen_evfsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ ~0x80000000);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ ~0x80000000);
+}
+static inline void gen_evfsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_evfsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_64(evfscfui);
+GEN_SPEFPUOP_CONV_64_64(evfscfsi);
+GEN_SPEFPUOP_CONV_64_64(evfscfuf);
+GEN_SPEFPUOP_CONV_64_64(evfscfsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctui);
+GEN_SPEFPUOP_CONV_64_64(evfsctsi);
+GEN_SPEFPUOP_CONV_64_64(evfsctuf);
+GEN_SPEFPUOP_CONV_64_64(evfsctsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctuiz);
+GEN_SPEFPUOP_CONV_64_64(evfsctsiz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(evfscmpgt);
+GEN_SPEFPUOP_COMP_64(evfscmplt);
+GEN_SPEFPUOP_COMP_64(evfscmpeq);
+GEN_SPEFPUOP_COMP_64(evfststgt);
+GEN_SPEFPUOP_COMP_64(evfststlt);
+GEN_SPEFPUOP_COMP_64(evfststeq);
+
+/* Opcodes definitions */
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+
+/* Single precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_32_32(efsadd);
+GEN_SPEFPUOP_ARITH2_32_32(efssub);
+GEN_SPEFPUOP_ARITH2_32_32(efsmul);
+GEN_SPEFPUOP_ARITH2_32_32(efsdiv);
+static inline void gen_efsabs(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ (target_long)~0x80000000LL);
+}
+static inline void gen_efsnabs(DisasContext *ctx)
+{
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_efsneg(DisasContext *ctx)
+{
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_32_32(efscfui);
+GEN_SPEFPUOP_CONV_32_32(efscfsi);
+GEN_SPEFPUOP_CONV_32_32(efscfuf);
+GEN_SPEFPUOP_CONV_32_32(efscfsf);
+GEN_SPEFPUOP_CONV_32_32(efsctui);
+GEN_SPEFPUOP_CONV_32_32(efsctsi);
+GEN_SPEFPUOP_CONV_32_32(efsctuf);
+GEN_SPEFPUOP_CONV_32_32(efsctsf);
+GEN_SPEFPUOP_CONV_32_32(efsctuiz);
+GEN_SPEFPUOP_CONV_32_32(efsctsiz);
+GEN_SPEFPUOP_CONV_32_64(efscfd);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_32(efscmpgt);
+GEN_SPEFPUOP_COMP_32(efscmplt);
+GEN_SPEFPUOP_COMP_32(efscmpeq);
+GEN_SPEFPUOP_COMP_32(efststgt);
+GEN_SPEFPUOP_COMP_32(efststlt);
+GEN_SPEFPUOP_COMP_32(efststeq);
+
+/* Opcodes definitions */
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+
+/* Double precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(efdadd);
+GEN_SPEFPUOP_ARITH2_64_64(efdsub);
+GEN_SPEFPUOP_ARITH2_64_64(efdmul);
+GEN_SPEFPUOP_ARITH2_64_64(efddiv);
+static inline void gen_efdabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ ~0x80000000);
+}
+static inline void gen_efdnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_efdneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_32(efdcfui);
+GEN_SPEFPUOP_CONV_64_32(efdcfsi);
+GEN_SPEFPUOP_CONV_64_32(efdcfuf);
+GEN_SPEFPUOP_CONV_64_32(efdcfsf);
+GEN_SPEFPUOP_CONV_32_64(efdctui);
+GEN_SPEFPUOP_CONV_32_64(efdctsi);
+GEN_SPEFPUOP_CONV_32_64(efdctuf);
+GEN_SPEFPUOP_CONV_32_64(efdctsf);
+GEN_SPEFPUOP_CONV_32_64(efdctuiz);
+GEN_SPEFPUOP_CONV_32_64(efdctsiz);
+GEN_SPEFPUOP_CONV_64_32(efdcfs);
+GEN_SPEFPUOP_CONV_64_64(efdcfuid);
+GEN_SPEFPUOP_CONV_64_64(efdcfsid);
+GEN_SPEFPUOP_CONV_64_64(efdctuidz);
+GEN_SPEFPUOP_CONV_64_64(efdctsidz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(efdcmpgt);
+GEN_SPEFPUOP_COMP_64(efdcmplt);
+GEN_SPEFPUOP_COMP_64(efdcmpeq);
+GEN_SPEFPUOP_COMP_64(efdtstgt);
+GEN_SPEFPUOP_COMP_64(efdtstlt);
+GEN_SPEFPUOP_COMP_64(efdtsteq);
+
+/* Opcodes definitions */
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE); //
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+
+#undef GEN_SPE
+#undef GEN_SPEOP_LDST
diff --git a/target/ppc/translate/spe-ops.c.inc b/target/ppc/translate/spe-ops.c.inc
new file mode 100644
index 000000000..7efe8b874
--- /dev/null
+++ b/target/ppc/translate/spe-ops.c.inc
@@ -0,0 +1,105 @@
+GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE),
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
+ GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE)
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE),
+GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE),
+
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE),
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE)
+GEN_SPEOP_LDST(evldd, 0x00, 3),
+GEN_SPEOP_LDST(evldw, 0x01, 3),
+GEN_SPEOP_LDST(evldh, 0x02, 3),
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1),
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1),
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1),
+GEN_SPEOP_LDST(evlwhe, 0x08, 2),
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2),
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2),
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2),
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2),
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3),
+GEN_SPEOP_LDST(evstdw, 0x11, 3),
+GEN_SPEOP_LDST(evstdh, 0x12, 3),
+GEN_SPEOP_LDST(evstwhe, 0x18, 2),
+GEN_SPEOP_LDST(evstwho, 0x1A, 2),
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2),
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2),
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
new file mode 100644
index 000000000..8eb8d3a06
--- /dev/null
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -0,0 +1,1894 @@
+/*
+ * translate/vmx-impl.c
+ *
+ * Altivec/VMX translation
+ */
+
+/*** Altivec vector extension ***/
+/* Altivec registers moves */
+
+static inline TCGv_ptr gen_avr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
+ return r;
+}
+
+#define GEN_VR_LDX(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 avr; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ avr = tcg_temp_new_i64(); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ /* \
+ * We only need to swap high and low halves. gen_qemu_ld64_i64 \
+ * does necessary 64-bit byteswap already. \
+ */ \
+ if (ctx->le_mode) { \
+ gen_qemu_ld64_i64(ctx, avr, EA); \
+ set_avr64(rD(ctx->opcode), avr, false); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64_i64(ctx, avr, EA); \
+ set_avr64(rD(ctx->opcode), avr, true); \
+ } else { \
+ gen_qemu_ld64_i64(ctx, avr, EA); \
+ set_avr64(rD(ctx->opcode), avr, true); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64_i64(ctx, avr, EA); \
+ set_avr64(rD(ctx->opcode), avr, false); \
+ } \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(avr); \
+}
+
+#define GEN_VR_STX(name, opc2, opc3) \
+static void gen_st##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 avr; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ avr = tcg_temp_new_i64(); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ /* \
+ * We only need to swap high and low halves. gen_qemu_st64_i64 \
+ * does necessary 64-bit byteswap already. \
+ */ \
+ if (ctx->le_mode) { \
+ get_avr64(avr, rD(ctx->opcode), false); \
+ gen_qemu_st64_i64(ctx, avr, EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ get_avr64(avr, rD(ctx->opcode), true); \
+ gen_qemu_st64_i64(ctx, avr, EA); \
+ } else { \
+ get_avr64(avr, rD(ctx->opcode), true); \
+ gen_qemu_st64_i64(ctx, avr, EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ get_avr64(avr, rD(ctx->opcode), false); \
+ gen_qemu_st64_i64(ctx, avr, EA); \
+ } \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(avr); \
+}
+
+#define GEN_VR_LVE(name, opc2, opc3, size) \
+static void gen_lve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ if (size > 1) { \
+ tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
+ } \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_lve##name(cpu_env, rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+#define GEN_VR_STVE(name, opc2, opc3, size) \
+static void gen_stve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ if (size > 1) { \
+ tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
+ } \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_stve##name(cpu_env, rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+GEN_VR_LDX(lvx, 0x07, 0x03);
+/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
+GEN_VR_LDX(lvxl, 0x07, 0x0B);
+
+GEN_VR_LVE(bx, 0x07, 0x00, 1);
+GEN_VR_LVE(hx, 0x07, 0x01, 2);
+GEN_VR_LVE(wx, 0x07, 0x02, 4);
+
+GEN_VR_STX(svx, 0x07, 0x07);
+/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
+GEN_VR_STX(svxl, 0x07, 0x0F);
+
+GEN_VR_STVE(bx, 0x07, 0x04, 1);
+GEN_VR_STVE(hx, 0x07, 0x05, 2);
+GEN_VR_STVE(wx, 0x07, 0x06, 4);
+
+static void gen_mfvscr(DisasContext *ctx)
+{
+ TCGv_i32 t;
+ TCGv_i64 avr;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ avr = tcg_temp_new_i64();
+ tcg_gen_movi_i64(avr, 0);
+ set_avr64(rD(ctx->opcode), avr, true);
+ t = tcg_temp_new_i32();
+ gen_helper_mfvscr(t, cpu_env);
+ tcg_gen_extu_i32_i64(avr, t);
+ set_avr64(rD(ctx->opcode), avr, false);
+ tcg_temp_free_i32(t);
+ tcg_temp_free_i64(avr);
+}
+
+static void gen_mtvscr(DisasContext *ctx)
+{
+ TCGv_i32 val;
+ int bofs;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ val = tcg_temp_new_i32();
+ bofs = avr_full_offset(rB(ctx->opcode));
+#ifdef HOST_WORDS_BIGENDIAN
+ bofs += 3 * 4;
+#endif
+
+ tcg_gen_ld_i32(val, cpu_env, bofs);
+ gen_helper_mtvscr(cpu_env, val);
+ tcg_temp_free_i32(val);
+}
+
+#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ TCGv_i64 t2; \
+ TCGv_i64 avr; \
+ TCGv_i64 ten, z; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ t2 = tcg_temp_new_i64(); \
+ avr = tcg_temp_new_i64(); \
+ ten = tcg_const_i64(10); \
+ z = tcg_const_i64(0); \
+ \
+ if (add_cin) { \
+ get_avr64(avr, rA(ctx->opcode), false); \
+ tcg_gen_mulu2_i64(t0, t1, avr, ten); \
+ get_avr64(avr, rB(ctx->opcode), false); \
+ tcg_gen_andi_i64(t2, avr, 0xF); \
+ tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
+ set_avr64(rD(ctx->opcode), avr, false); \
+ } else { \
+ get_avr64(avr, rA(ctx->opcode), false); \
+ tcg_gen_mulu2_i64(avr, t2, avr, ten); \
+ set_avr64(rD(ctx->opcode), avr, false); \
+ } \
+ \
+ if (ret_carry) { \
+ get_avr64(avr, rA(ctx->opcode), true); \
+ tcg_gen_mulu2_i64(t0, t1, avr, ten); \
+ tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
+ set_avr64(rD(ctx->opcode), avr, false); \
+ set_avr64(rD(ctx->opcode), z, true); \
+ } else { \
+ get_avr64(avr, rA(ctx->opcode), true); \
+ tcg_gen_mul_i64(t0, avr, ten); \
+ tcg_gen_add_i64(avr, t0, t2); \
+ set_avr64(rD(ctx->opcode), avr, true); \
+ } \
+ \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
+ tcg_temp_free_i64(avr); \
+ tcg_temp_free_i64(ten); \
+ tcg_temp_free_i64(z); \
+} \
+
+GEN_VX_VMUL10(vmul10uq, 0, 0);
+GEN_VX_VMUL10(vmul10euq, 1, 0);
+GEN_VX_VMUL10(vmul10cuq, 0, 1);
+GEN_VX_VMUL10(vmul10ecuq, 1, 1);
+
+#define GEN_VXFORM_V(name, vece, tcg_op, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ tcg_op(vece, \
+ avr_full_offset(rD(ctx->opcode)), \
+ avr_full_offset(rA(ctx->opcode)), \
+ avr_full_offset(rB(ctx->opcode)), \
+ 16, 16); \
+}
+
+/* Logical operations */
+GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16);
+GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17);
+GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18);
+GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19);
+GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20);
+GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26);
+GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22);
+GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21);
+
+#define GEN_VXFORM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+#define GEN_VXFORM_TRANS(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ trans_##name(ctx); \
+}
+
+#define GEN_VXFORM_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+#define GEN_VXFORM3(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, ra, rb, rc); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+}
+
+/*
+ * Support for Altivec instruction pairs that use bit 31 (Rc) as
+ * an opcode bit. In general, these pairs come from different
+ * versions of the ISA, so we must also support a pair of flags for
+ * each instruction.
+ */
+#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ gen_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+/*
+ * We use this macro if one instruction is realized with direct
+ * translation, and second one with helper.
+ */
+#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ trans_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+/* Adds support to provide invalid mask */
+#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \
+ name1, flg1, flg2_1, inval1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \
+ !(ctx->opcode & inval0)) { \
+ gen_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
+ !(ctx->opcode & inval1)) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+#define GEN_VXFORM_HETRO(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr rb; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
+ tcg_temp_free_ptr(rb); \
+}
+
+GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
+GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \
+ vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
+GEN_VXFORM_V(vadduhm, MO_16, tcg_gen_gvec_add, 0, 1);
+GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \
+ vmul10ecuq, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_V(vadduwm, MO_32, tcg_gen_gvec_add, 0, 2);
+GEN_VXFORM_V(vaddudm, MO_64, tcg_gen_gvec_add, 0, 3);
+GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16);
+GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17);
+GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18);
+GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19);
+GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0);
+GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1);
+GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2);
+GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3);
+GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4);
+GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5);
+GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6);
+GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7);
+GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8);
+GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9);
+GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10);
+GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11);
+GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
+GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
+GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
+GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
+GEN_VXFORM(vavgub, 1, 16);
+GEN_VXFORM(vabsdub, 1, 16);
+GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
+ vabsdub, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavguh, 1, 17);
+GEN_VXFORM(vabsduh, 1, 17);
+GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
+ vabsduh, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavguw, 1, 18);
+GEN_VXFORM(vabsduw, 1, 18);
+GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
+ vabsduw, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavgsb, 1, 20);
+GEN_VXFORM(vavgsh, 1, 21);
+GEN_VXFORM(vavgsw, 1, 22);
+GEN_VXFORM(vmrghb, 6, 0);
+GEN_VXFORM(vmrghh, 6, 1);
+GEN_VXFORM(vmrghw, 6, 2);
+GEN_VXFORM(vmrglb, 6, 4);
+GEN_VXFORM(vmrglh, 6, 5);
+GEN_VXFORM(vmrglw, 6, 6);
+
+static void trans_vmrgew(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VA = rA(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 avr = tcg_temp_new_i64();
+
+ get_avr64(avr, VB, true);
+ tcg_gen_shri_i64(tmp, avr, 32);
+ get_avr64(avr, VA, true);
+ tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
+ set_avr64(VT, avr, true);
+
+ get_avr64(avr, VB, false);
+ tcg_gen_shri_i64(tmp, avr, 32);
+ get_avr64(avr, VA, false);
+ tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
+ set_avr64(VT, avr, false);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(avr);
+}
+
+static void trans_vmrgow(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VA = rA(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 avr = tcg_temp_new_i64();
+
+ get_avr64(t0, VB, true);
+ get_avr64(t1, VA, true);
+ tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
+ set_avr64(VT, avr, true);
+
+ get_avr64(t0, VB, false);
+ get_avr64(t1, VA, false);
+ tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
+ set_avr64(VT, avr, false);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(avr);
+}
+
+/*
+ * lvsl VRT,RA,RB - Load Vector for Shift Left
+ *
+ * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
+ * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
+ * Bytes sh:sh+15 of X are placed into vD.
+ */
+static void trans_lvsl(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ TCGv_i64 result = tcg_temp_new_i64();
+ TCGv_i64 sh = tcg_temp_new_i64();
+ TCGv EA = tcg_temp_new();
+
+ /* Get sh(from description) by anding EA with 0xf. */
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_extu_tl_i64(sh, EA);
+ tcg_gen_andi_i64(sh, sh, 0xfULL);
+
+ /*
+ * Create bytes sh:sh+7 of X(from description) and place them in
+ * higher doubleword of vD.
+ */
+ tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
+ tcg_gen_addi_i64(result, sh, 0x0001020304050607ull);
+ set_avr64(VT, result, true);
+ /*
+ * Create bytes sh+8:sh+15 of X(from description) and place them in
+ * lower doubleword of vD.
+ */
+ tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
+ set_avr64(VT, result, false);
+
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(sh);
+ tcg_temp_free(EA);
+}
+
+/*
+ * lvsr VRT,RA,RB - Load Vector for Shift Right
+ *
+ * Let the EA be the sum (rA|0)+(rB). Let sh=EA[28–31].
+ * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F.
+ * Bytes (16-sh):(31-sh) of X are placed into vD.
+ */
+static void trans_lvsr(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ TCGv_i64 result = tcg_temp_new_i64();
+ TCGv_i64 sh = tcg_temp_new_i64();
+ TCGv EA = tcg_temp_new();
+
+
+ /* Get sh(from description) by anding EA with 0xf. */
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_extu_tl_i64(sh, EA);
+ tcg_gen_andi_i64(sh, sh, 0xfULL);
+
+ /*
+ * Create bytes (16-sh):(23-sh) of X(from description) and place them in
+ * higher doubleword of vD.
+ */
+ tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL);
+ tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh);
+ set_avr64(VT, result, true);
+ /*
+ * Create bytes (24-sh):(32-sh) of X(from description) and place them in
+ * lower doubleword of vD.
+ */
+ tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
+ set_avr64(VT, result, false);
+
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(sh);
+ tcg_temp_free(EA);
+}
+
+/*
+ * vsl VRT,VRA,VRB - Vector Shift Left
+ *
+ * Shifting left 128 bit value of vA by value specified in bits 125-127 of vB.
+ * Lowest 3 bits in each byte element of register vB must be identical or
+ * result is undefined.
+ */
+static void trans_vsl(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VA = rA(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i64 avr = tcg_temp_new_i64();
+ TCGv_i64 sh = tcg_temp_new_i64();
+ TCGv_i64 carry = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* Place bits 125-127 of vB in 'sh'. */
+ get_avr64(avr, VB, false);
+ tcg_gen_andi_i64(sh, avr, 0x07ULL);
+
+ /*
+ * Save highest 'sh' bits of lower doubleword element of vA in variable
+ * 'carry' and perform shift on lower doubleword.
+ */
+ get_avr64(avr, VA, false);
+ tcg_gen_subfi_i64(tmp, 32, sh);
+ tcg_gen_shri_i64(carry, avr, 32);
+ tcg_gen_shr_i64(carry, carry, tmp);
+ tcg_gen_shl_i64(avr, avr, sh);
+ set_avr64(VT, avr, false);
+
+ /*
+ * Perform shift on higher doubleword element of vA and replace lowest
+ * 'sh' bits with 'carry'.
+ */
+ get_avr64(avr, VA, true);
+ tcg_gen_shl_i64(avr, avr, sh);
+ tcg_gen_or_i64(avr, avr, carry);
+ set_avr64(VT, avr, true);
+
+ tcg_temp_free_i64(avr);
+ tcg_temp_free_i64(sh);
+ tcg_temp_free_i64(carry);
+ tcg_temp_free_i64(tmp);
+}
+
+/*
+ * vsr VRT,VRA,VRB - Vector Shift Right
+ *
+ * Shifting right 128 bit value of vA by value specified in bits 125-127 of vB.
+ * Lowest 3 bits in each byte element of register vB must be identical or
+ * result is undefined.
+ */
+static void trans_vsr(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VA = rA(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i64 avr = tcg_temp_new_i64();
+ TCGv_i64 sh = tcg_temp_new_i64();
+ TCGv_i64 carry = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* Place bits 125-127 of vB in 'sh'. */
+ get_avr64(avr, VB, false);
+ tcg_gen_andi_i64(sh, avr, 0x07ULL);
+
+ /*
+ * Save lowest 'sh' bits of higher doubleword element of vA in variable
+ * 'carry' and perform shift on higher doubleword.
+ */
+ get_avr64(avr, VA, true);
+ tcg_gen_subfi_i64(tmp, 32, sh);
+ tcg_gen_shli_i64(carry, avr, 32);
+ tcg_gen_shl_i64(carry, carry, tmp);
+ tcg_gen_shr_i64(avr, avr, sh);
+ set_avr64(VT, avr, true);
+ /*
+ * Perform shift on lower doubleword element of vA and replace highest
+ * 'sh' bits with 'carry'.
+ */
+ get_avr64(avr, VA, false);
+ tcg_gen_shr_i64(avr, avr, sh);
+ tcg_gen_or_i64(avr, avr, carry);
+ set_avr64(VT, avr, false);
+
+ tcg_temp_free_i64(avr);
+ tcg_temp_free_i64(sh);
+ tcg_temp_free_i64(carry);
+ tcg_temp_free_i64(tmp);
+}
+
+/*
+ * vgbbd VRT,VRB - Vector Gather Bits by Bytes by Doubleword
+ *
+ * All ith bits (i in range 1 to 8) of each byte of doubleword element in source
+ * register are concatenated and placed into ith byte of appropriate doubleword
+ * element in destination register.
+ *
+ * Following solution is done for both doubleword elements of source register
+ * in parallel, in order to reduce the number of instructions needed(that's why
+ * arrays are used):
+ * First, both doubleword elements of source register vB are placed in
+ * appropriate element of array avr. Bits are gathered in 2x8 iterations(2 for
+ * loops). In first iteration bit 1 of byte 1, bit 2 of byte 2,... bit 8 of
+ * byte 8 are in their final spots so avr[i], i={0,1} can be and-ed with
+ * tcg_mask. For every following iteration, both avr[i] and tcg_mask variables
+ * have to be shifted right for 7 and 8 places, respectively, in order to get
+ * bit 1 of byte 2, bit 2 of byte 3.. bit 7 of byte 8 in their final spots so
+ * shifted avr values(saved in tmp) can be and-ed with new value of tcg_mask...
+ * After first 8 iteration(first loop), all the first bits are in their final
+ * places, all second bits but second bit from eight byte are in their places...
+ * only 1 eight bit from eight byte is in it's place). In second loop we do all
+ * operations symmetrically, in order to get other half of bits in their final
+ * spots. Results for first and second doubleword elements are saved in
+ * result[0] and result[1] respectively. In the end those results are saved in
+ * appropriate doubleword element of destination register vD.
+ */
+static void trans_vgbbd(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ uint64_t mask = 0x8040201008040201ULL;
+ int i, j;
+
+ TCGv_i64 result[2];
+ result[0] = tcg_temp_new_i64();
+ result[1] = tcg_temp_new_i64();
+ TCGv_i64 avr[2];
+ avr[0] = tcg_temp_new_i64();
+ avr[1] = tcg_temp_new_i64();
+ TCGv_i64 tcg_mask = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(tcg_mask, mask);
+ for (j = 0; j < 2; j++) {
+ get_avr64(avr[j], VB, j);
+ tcg_gen_and_i64(result[j], avr[j], tcg_mask);
+ }
+ for (i = 1; i < 8; i++) {
+ tcg_gen_movi_i64(tcg_mask, mask >> (i * 8));
+ for (j = 0; j < 2; j++) {
+ tcg_gen_shri_i64(tmp, avr[j], i * 7);
+ tcg_gen_and_i64(tmp, tmp, tcg_mask);
+ tcg_gen_or_i64(result[j], result[j], tmp);
+ }
+ }
+ for (i = 1; i < 8; i++) {
+ tcg_gen_movi_i64(tcg_mask, mask << (i * 8));
+ for (j = 0; j < 2; j++) {
+ tcg_gen_shli_i64(tmp, avr[j], i * 7);
+ tcg_gen_and_i64(tmp, tmp, tcg_mask);
+ tcg_gen_or_i64(result[j], result[j], tmp);
+ }
+ }
+ for (j = 0; j < 2; j++) {
+ set_avr64(VT, result[j], j);
+ }
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(tcg_mask);
+ tcg_temp_free_i64(result[0]);
+ tcg_temp_free_i64(result[1]);
+ tcg_temp_free_i64(avr[0]);
+ tcg_temp_free_i64(avr[1]);
+}
+
+/*
+ * vclzw VRT,VRB - Vector Count Leading Zeros Word
+ *
+ * Counting the number of leading zero bits of each word element in source
+ * register and placing result in appropriate word element of destination
+ * register.
+ */
+static void trans_vclzw(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ int i;
+
+ /* Perform count for every word element using tcg_gen_clzi_i32. */
+ for (i = 0; i < 4; i++) {
+ tcg_gen_ld_i32(tmp, cpu_env,
+ offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
+ tcg_gen_clzi_i32(tmp, tmp, 32);
+ tcg_gen_st_i32(tmp, cpu_env,
+ offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
+ }
+
+ tcg_temp_free_i32(tmp);
+}
+
+/*
+ * vclzd VRT,VRB - Vector Count Leading Zeros Doubleword
+ *
+ * Counting the number of leading zero bits of each doubleword element in source
+ * register and placing result in appropriate doubleword element of destination
+ * register.
+ */
+static void trans_vclzd(DisasContext *ctx)
+{
+ int VT = rD(ctx->opcode);
+ int VB = rB(ctx->opcode);
+ TCGv_i64 avr = tcg_temp_new_i64();
+
+ /* high doubleword */
+ get_avr64(avr, VB, true);
+ tcg_gen_clzi_i64(avr, avr, 64);
+ set_avr64(VT, avr, true);
+
+ /* low doubleword */
+ get_avr64(avr, VB, false);
+ tcg_gen_clzi_i64(avr, avr, 64);
+ set_avr64(VT, avr, false);
+
+ tcg_temp_free_i64(avr);
+}
+
+GEN_VXFORM(vmuloub, 4, 0);
+GEN_VXFORM(vmulouh, 4, 1);
+GEN_VXFORM(vmulouw, 4, 2);
+GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
+GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE,
+ vmuluwm, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vmulosb, 4, 4);
+GEN_VXFORM(vmulosh, 4, 5);
+GEN_VXFORM(vmulosw, 4, 6);
+GEN_VXFORM_V(vmulld, MO_64, tcg_gen_gvec_mul, 4, 7);
+GEN_VXFORM(vmuleub, 4, 8);
+GEN_VXFORM(vmuleuh, 4, 9);
+GEN_VXFORM(vmuleuw, 4, 10);
+GEN_VXFORM(vmulhuw, 4, 10);
+GEN_VXFORM(vmulhud, 4, 11);
+GEN_VXFORM_DUAL(vmuleuw, PPC_ALTIVEC, PPC_NONE,
+ vmulhuw, PPC_NONE, PPC2_ISA310);
+GEN_VXFORM(vmulesb, 4, 12);
+GEN_VXFORM(vmulesh, 4, 13);
+GEN_VXFORM(vmulesw, 4, 14);
+GEN_VXFORM(vmulhsw, 4, 14);
+GEN_VXFORM_DUAL(vmulesw, PPC_ALTIVEC, PPC_NONE,
+ vmulhsw, PPC_NONE, PPC2_ISA310);
+GEN_VXFORM(vmulhsd, 4, 15);
+GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4);
+GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5);
+GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6);
+GEN_VXFORM(vrlwnm, 2, 6);
+GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \
+ vrlwnm, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23);
+GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8);
+GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9);
+GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10);
+GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27);
+GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12);
+GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13);
+GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14);
+GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15);
+GEN_VXFORM(vsrv, 2, 28);
+GEN_VXFORM(vslv, 2, 29);
+GEN_VXFORM(vslo, 6, 16);
+GEN_VXFORM(vsro, 6, 17);
+GEN_VXFORM(vaddcuw, 0, 6);
+GEN_VXFORM(vsubcuw, 0, 22);
+
+#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \
+static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \
+ TCGv_vec sat, TCGv_vec a, \
+ TCGv_vec b) \
+{ \
+ TCGv_vec x = tcg_temp_new_vec_matching(t); \
+ glue(glue(tcg_gen_, NORM), _vec)(VECE, x, a, b); \
+ glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \
+ tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \
+ tcg_gen_or_vec(VECE, sat, sat, x); \
+ tcg_temp_free_vec(x); \
+} \
+static void glue(gen_, NAME)(DisasContext *ctx) \
+{ \
+ static const TCGOpcode vecop_list[] = { \
+ glue(glue(INDEX_op_, NORM), _vec), \
+ glue(glue(INDEX_op_, SAT), _vec), \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen4 g = { \
+ .fniv = glue(glue(gen_, NAME), _vec), \
+ .fno = glue(gen_helper_, NAME), \
+ .opt_opc = vecop_list, \
+ .write_aofs = true, \
+ .vece = VECE, \
+ }; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ tcg_gen_gvec_4(avr_full_offset(rD(ctx->opcode)), \
+ offsetof(CPUPPCState, vscr_sat), \
+ avr_full_offset(rA(ctx->opcode)), \
+ avr_full_offset(rB(ctx->opcode)), \
+ 16, 16, &g); \
+}
+
+GEN_VXFORM_SAT(vaddubs, MO_8, add, usadd, 0, 8);
+GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \
+ vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
+GEN_VXFORM_SAT(vadduhs, MO_16, add, usadd, 0, 9);
+GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
+ vmul10euq, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_SAT(vadduws, MO_32, add, usadd, 0, 10);
+GEN_VXFORM_SAT(vaddsbs, MO_8, add, ssadd, 0, 12);
+GEN_VXFORM_SAT(vaddshs, MO_16, add, ssadd, 0, 13);
+GEN_VXFORM_SAT(vaddsws, MO_32, add, ssadd, 0, 14);
+GEN_VXFORM_SAT(vsububs, MO_8, sub, ussub, 0, 24);
+GEN_VXFORM_SAT(vsubuhs, MO_16, sub, ussub, 0, 25);
+GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
+GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
+GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
+GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
+GEN_VXFORM(vadduqm, 0, 4);
+GEN_VXFORM(vaddcuq, 0, 5);
+GEN_VXFORM3(vaddeuqm, 30, 0);
+GEN_VXFORM3(vaddecuq, 30, 0);
+GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
+ vaddecuq, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vsubuqm, 0, 20);
+GEN_VXFORM(vsubcuq, 0, 21);
+GEN_VXFORM3(vsubeuqm, 31, 0);
+GEN_VXFORM3(vsubecuq, 31, 0);
+GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
+ vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0);
+GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1);
+GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2);
+GEN_VXFORM(vrlwmi, 2, 2);
+GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \
+ vrlwmi, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3);
+GEN_VXFORM(vrldmi, 2, 3);
+GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \
+ vrldmi, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_TRANS(vsl, 2, 7);
+GEN_VXFORM(vrldnm, 2, 7);
+GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \
+ vrldnm, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_TRANS(vsr, 2, 11);
+GEN_VXFORM_ENV(vpkuhum, 7, 0);
+GEN_VXFORM_ENV(vpkuwum, 7, 1);
+GEN_VXFORM_ENV(vpkudum, 7, 17);
+GEN_VXFORM_ENV(vpkuhus, 7, 2);
+GEN_VXFORM_ENV(vpkuwus, 7, 3);
+GEN_VXFORM_ENV(vpkudus, 7, 19);
+GEN_VXFORM_ENV(vpkshus, 7, 4);
+GEN_VXFORM_ENV(vpkswus, 7, 5);
+GEN_VXFORM_ENV(vpksdus, 7, 21);
+GEN_VXFORM_ENV(vpkshss, 7, 6);
+GEN_VXFORM_ENV(vpkswss, 7, 7);
+GEN_VXFORM_ENV(vpksdss, 7, 23);
+GEN_VXFORM(vpkpx, 7, 12);
+GEN_VXFORM_ENV(vsum4ubs, 4, 24);
+GEN_VXFORM_ENV(vsum4sbs, 4, 28);
+GEN_VXFORM_ENV(vsum4shs, 4, 25);
+GEN_VXFORM_ENV(vsum2sws, 4, 26);
+GEN_VXFORM_ENV(vsumsws, 4, 30);
+GEN_VXFORM_ENV(vaddfp, 5, 0);
+GEN_VXFORM_ENV(vsubfp, 5, 1);
+GEN_VXFORM_ENV(vmaxfp, 5, 16);
+GEN_VXFORM_ENV(vminfp, 5, 17);
+GEN_VXFORM_HETRO(vextublx, 6, 24)
+GEN_VXFORM_HETRO(vextuhlx, 6, 25)
+GEN_VXFORM_HETRO(vextuwlx, 6, 26)
+GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
+ vextuwlx, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_HETRO(vextubrx, 6, 28)
+GEN_VXFORM_HETRO(vextuhrx, 6, 29)
+GEN_VXFORM_HETRO(vextuwrx, 6, 30)
+GEN_VXFORM_TRANS(lvsl, 6, 31)
+GEN_VXFORM_TRANS(lvsr, 6, 32)
+GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
+ vextuwrx, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##opname(cpu_env, rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+/*
+ * Support for Altivec instructions that use bit 31 (Rc) as an opcode
+ * bit but also use bit 21 as an actual Rc bit. In general, thse pairs
+ * come from different versions of the ISA, so we must also support a
+ * pair of flags for each instruction.
+ */
+#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ if (Rc21(ctx->opcode) == 0) { \
+ gen_##name0(ctx); \
+ } else { \
+ gen_##name0##_(ctx); \
+ } \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ if (Rc21(ctx->opcode) == 0) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_##name1##_(ctx); \
+ } \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+GEN_VXRFORM(vcmpequb, 3, 0)
+GEN_VXRFORM(vcmpequh, 3, 1)
+GEN_VXRFORM(vcmpequw, 3, 2)
+GEN_VXRFORM(vcmpequd, 3, 3)
+GEN_VXRFORM(vcmpnezb, 3, 4)
+GEN_VXRFORM(vcmpnezh, 3, 5)
+GEN_VXRFORM(vcmpnezw, 3, 6)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtsd, 3, 15)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM(vcmpgtud, 3, 11)
+GEN_VXRFORM(vcmpeqfp, 3, 3)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM(vcmpgtfp, 3, 11)
+GEN_VXRFORM(vcmpbfp, 3, 15)
+GEN_VXRFORM(vcmpneb, 3, 0)
+GEN_VXRFORM(vcmpneh, 3, 1)
+GEN_VXRFORM(vcmpnew, 3, 2)
+
+GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \
+ vcmpneb, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \
+ vcmpneh, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \
+ vcmpnew, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpequd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
+
+static void gen_vsplti(DisasContext *ctx, int vece)
+{
+ int simm;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ simm = SIMM5(ctx->opcode);
+ tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
+}
+
+#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
+
+GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
+GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
+GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \
+ tcg_temp_free_ptr(rb); \
+ }
+GEN_VXFORM_NOA(vupkhsb, 7, 8);
+GEN_VXFORM_NOA(vupkhsh, 7, 9);
+GEN_VXFORM_NOA(vupkhsw, 7, 25);
+GEN_VXFORM_NOA(vupklsb, 7, 10);
+GEN_VXFORM_NOA(vupklsh, 7, 11);
+GEN_VXFORM_NOA(vupklsw, 7, 27);
+GEN_VXFORM_NOA(vupkhpx, 7, 13);
+GEN_VXFORM_NOA(vupklpx, 7, 15);
+GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
+GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
+GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
+GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
+GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
+GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
+GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
+GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
+GEN_VXFORM_NOA(vprtybw, 1, 24);
+GEN_VXFORM_NOA(vprtybd, 1, 24);
+GEN_VXFORM_NOA(vprtybq, 1, 24);
+
+static void gen_vsplt(DisasContext *ctx, int vece)
+{
+ int uimm, dofs, bofs;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ uimm = UIMM5(ctx->opcode);
+ bofs = avr_full_offset(rB(ctx->opcode));
+ dofs = avr_full_offset(rD(ctx->opcode));
+
+ /* Experimental testing shows that hardware masks the immediate. */
+ bofs += (uimm << vece) & 15;
+#ifndef HOST_WORDS_BIGENDIAN
+ bofs ^= 15;
+ bofs &= ~((1 << vece) - 1);
+#endif
+
+ tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
+}
+
+#define GEN_VXFORM_VSPLT(name, vece, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) { gen_vsplt(ctx, vece); }
+
+#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ TCGv_i32 uimm; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, rb, uimm); \
+ tcg_temp_free_i32(uimm); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ uint8_t uimm = UIMM4(ctx->opcode); \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ if (uimm > splat_max) { \
+ uimm = 0; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ tcg_gen_movi_i32(t0, uimm); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
+GEN_VXFORM_VSPLT(vsplth, MO_16, 6, 9);
+GEN_VXFORM_VSPLT(vspltw, MO_32, 6, 10);
+GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
+GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
+GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
+GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
+GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
+GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
+GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
+GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
+GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
+ vextractub, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
+ vextractuh, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
+ vextractuw, PPC_NONE, PPC2_ISA300);
+
+static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
+{
+ TCGv_ptr vrt, vra, vrb;
+ TCGv rc;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ vrt = gen_avr_ptr(a->vrt);
+ vra = gen_avr_ptr(a->vra);
+ vrb = gen_avr_ptr(a->vrb);
+ rc = tcg_temp_new();
+
+ tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
+ if (right) {
+ tcg_gen_subfi_tl(rc, 32 - size, rc);
+ }
+ gen_helper(cpu_env, vrt, vra, vrb, rc);
+
+ tcg_temp_free_ptr(vrt);
+ tcg_temp_free_ptr(vra);
+ tcg_temp_free_ptr(vrb);
+ tcg_temp_free(rc);
+ return true;
+}
+
+TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
+TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
+TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
+TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
+
+TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
+TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
+TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
+TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
+
+static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
+ TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ TCGv_ptr t;
+ TCGv idx;
+
+ t = gen_avr_ptr(vrt);
+ idx = tcg_temp_new();
+
+ tcg_gen_andi_tl(idx, ra, 0xF);
+ if (right) {
+ tcg_gen_subfi_tl(idx, 16 - size, idx);
+ }
+
+ gen_helper(cpu_env, t, rb, idx);
+
+ tcg_temp_free_ptr(t);
+ tcg_temp_free(idx);
+
+ return true;
+}
+
+static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
+ int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ bool ok;
+ TCGv_i64 val;
+
+ val = tcg_temp_new_i64();
+ get_avr64(val, vrb, true);
+ ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
+
+ tcg_temp_free_i64(val);
+ return ok;
+}
+
+static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ bool ok;
+ TCGv_i64 val;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ val = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
+
+ ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
+
+ tcg_temp_free_i64(val);
+ return ok;
+}
+
+static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
+ gen_helper);
+}
+
+static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ bool ok;
+ TCGv_i64 val;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ if (a->uim > (16 - size)) {
+ /*
+ * PowerISA v3.1 says that the resulting value is undefined in this
+ * case, so just log a guest error and leave VRT unchanged. The
+ * real hardware would do a partial insert, e.g. if VRT is zeroed and
+ * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
+ * VRT = 0x0000...00001234, but we don't bother to reproduce this
+ * behavior as software shouldn't rely on it.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
+ " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
+ 16 - size);
+ return true;
+ }
+
+ val = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
+
+ ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
+ gen_helper);
+
+ tcg_temp_free_i64(val);
+ return ok;
+}
+
+static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ if (a->uim > (16 - size)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
+ " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
+ 16 - size);
+ return true;
+ }
+
+ return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
+ gen_helper);
+}
+
+TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
+TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
+TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
+TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
+
+TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
+TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
+TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
+TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
+
+TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
+TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
+
+TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
+TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
+TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
+
+TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
+TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
+TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
+
+TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
+TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
+TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
+TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
+
+static void gen_vsldoi(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rd;
+ TCGv_i32 sh;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ sh = tcg_const_i32(VSH(ctx->opcode));
+ gen_helper_vsldoi(rd, ra, rb, sh);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rd);
+ tcg_temp_free_i32(sh);
+}
+
+static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
+{
+ TCGv_i64 t0, t1, t2;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(t0, a->vra, true);
+ get_avr64(t1, a->vra, false);
+
+ if (a->sh != 0) {
+ t2 = tcg_temp_new_i64();
+
+ get_avr64(t2, a->vrb, true);
+
+ tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
+ tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
+
+ tcg_temp_free_i64(t2);
+ }
+
+ set_avr64(a->vrt, t0, true);
+ set_avr64(a->vrt, t1, false);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ return true;
+}
+
+static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
+{
+ TCGv_i64 t2, t1, t0;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(t0, a->vrb, false);
+ get_avr64(t1, a->vrb, true);
+
+ if (a->sh != 0) {
+ t2 = tcg_temp_new_i64();
+
+ get_avr64(t2, a->vra, false);
+
+ tcg_gen_extract2_i64(t0, t0, t1, a->sh);
+ tcg_gen_extract2_i64(t1, t1, t2, a->sh);
+
+ tcg_temp_free_i64(t2);
+ }
+
+ set_avr64(a->vrt, t0, false);
+ set_avr64(a->vrt, t1, true);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ return true;
+}
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ if (Rc(ctx->opcode)) { \
+ gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
+ } else { \
+ gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
+ } \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
+
+static void gen_vmladduhm(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vmladduhm(rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_vpermr(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vpermr(cpu_env, rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
+GEN_VAFORM_PAIRED(vsel, vperm, 21)
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
+
+GEN_VXFORM_NOA(vclzb, 1, 28)
+GEN_VXFORM_NOA(vclzh, 1, 29)
+GEN_VXFORM_TRANS(vclzw, 1, 30)
+GEN_VXFORM_TRANS(vclzd, 1, 31)
+GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
+GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
+GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16)
+GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17)
+GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24)
+GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25)
+GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26)
+GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
+GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
+GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
+GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
+GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
+GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
+GEN_VXFORM_NOA(vpopcntb, 1, 28)
+GEN_VXFORM_NOA(vpopcnth, 1, 29)
+GEN_VXFORM_NOA(vpopcntw, 1, 30)
+GEN_VXFORM_NOA(vpopcntd, 1, 31)
+GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vbpermd, 6, 23);
+GEN_VXFORM(vbpermq, 6, 21);
+GEN_VXFORM_TRANS(vgbbd, 6, 20);
+GEN_VXFORM(vpmsumb, 4, 16)
+GEN_VXFORM(vpmsumh, 4, 17)
+GEN_VXFORM(vpmsumw, 4, 18)
+GEN_VXFORM(vpmsumd, 4, 19)
+
+#define GEN_BCD(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ TCGv_i32 ps; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ \
+ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ \
+ gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
+ \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(ps); \
+}
+
+#define GEN_BCD2(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr rd, rb; \
+ TCGv_i32 ps; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ \
+ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ \
+ gen_helper_##op(cpu_crf[6], rd, rb, ps); \
+ \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(ps); \
+}
+
+GEN_BCD(bcdadd)
+GEN_BCD(bcdsub)
+GEN_BCD2(bcdcfn)
+GEN_BCD2(bcdctn)
+GEN_BCD2(bcdcfz)
+GEN_BCD2(bcdctz)
+GEN_BCD2(bcdcfsq)
+GEN_BCD2(bcdctsq)
+GEN_BCD2(bcdsetsgn)
+GEN_BCD(bcdcpsgn);
+GEN_BCD(bcds);
+GEN_BCD(bcdus);
+GEN_BCD(bcdsr);
+GEN_BCD(bcdtrunc);
+GEN_BCD(bcdutrunc);
+
+static void gen_xpnd04_1(DisasContext *ctx)
+{
+ switch (opc4(ctx->opcode)) {
+ case 0:
+ gen_bcdctsq(ctx);
+ break;
+ case 2:
+ gen_bcdcfsq(ctx);
+ break;
+ case 4:
+ gen_bcdctz(ctx);
+ break;
+ case 5:
+ gen_bcdctn(ctx);
+ break;
+ case 6:
+ gen_bcdcfz(ctx);
+ break;
+ case 7:
+ gen_bcdcfn(ctx);
+ break;
+ case 31:
+ gen_bcdsetsgn(ctx);
+ break;
+ default:
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+static void gen_xpnd04_2(DisasContext *ctx)
+{
+ switch (opc4(ctx->opcode)) {
+ case 0:
+ gen_bcdctsq(ctx);
+ break;
+ case 2:
+ gen_bcdcfsq(ctx);
+ break;
+ case 4:
+ gen_bcdctz(ctx);
+ break;
+ case 6:
+ gen_bcdcfz(ctx);
+ break;
+ case 7:
+ gen_bcdcfn(ctx);
+ break;
+ case 31:
+ gen_bcdsetsgn(ctx);
+ break;
+ default:
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+
+GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
+ xpnd04_1, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
+ xpnd04_2, PPC_NONE, PPC2_ISA300)
+
+GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
+ bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
+ bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
+ bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
+ bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vaddshs, PPC_ALTIVEC, PPC_NONE, \
+ bcdcpsgn, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubudm, PPC2_ALTIVEC_207, PPC_NONE, \
+ bcds, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
+ bcdus, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
+ bcdtrunc, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \
+ bcdtrunc, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \
+ bcdutrunc, PPC_NONE, PPC2_ISA300)
+
+
+static void gen_vsbox(DisasContext *ctx)
+{
+ TCGv_ptr ra, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vsbox(rd, ra);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VXFORM(vcipher, 4, 20)
+GEN_VXFORM(vcipherlast, 4, 20)
+GEN_VXFORM(vncipher, 4, 21)
+GEN_VXFORM(vncipherlast, 4, 21)
+
+GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
+ vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
+ vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define VSHASIGMA(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rd; \
+ TCGv_i32 st_six; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ st_six = tcg_const_i32(rB(ctx->opcode)); \
+ gen_helper_##op(rd, ra, st_six); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(st_six); \
+}
+
+VSHASIGMA(vshasigmaw)
+VSHASIGMA(vshasigmad)
+
+GEN_VXFORM3(vpermxor, 22, 0xFF)
+GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
+ vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
+
+static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3 g = {
+ .fni8 = gen_helper_CFUGED,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &g);
+
+ return true;
+}
+
+static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3i g = {
+ .fni8 = do_cntzdm,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, false, &g);
+
+ return true;
+}
+
+static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3i g = {
+ .fni8 = do_cntzdm,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, true, &g);
+
+ return true;
+}
+
+static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3 g = {
+ .fni8 = gen_helper_PDEPD,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &g);
+
+ return true;
+}
+
+static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3 g = {
+ .fni8 = gen_helper_PEXTD,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &g);
+
+ return true;
+}
+
+#undef GEN_VR_LDX
+#undef GEN_VR_STX
+#undef GEN_VR_LVE
+#undef GEN_VR_STVE
+
+#undef GEN_VX_LOGICAL
+#undef GEN_VX_LOGICAL_207
+#undef GEN_VXFORM
+#undef GEN_VXFORM_207
+#undef GEN_VXFORM_DUAL
+#undef GEN_VXRFORM_DUAL
+#undef GEN_VXRFORM1
+#undef GEN_VXRFORM
+#undef GEN_VXFORM_VSPLTI
+#undef GEN_VXFORM_NOA
+#undef GEN_VXFORM_UIMM
+#undef GEN_VAFORM_PAIRED
+
+#undef GEN_BCD2
diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc
new file mode 100644
index 000000000..25ee715b4
--- /dev/null
+++ b/target/ppc/translate/vmx-ops.c.inc
@@ -0,0 +1,303 @@
+#define GEN_VR_LDX(name, opc2, opc3) \
+GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STX(name, opc2, opc3) \
+GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_LVE(name, opc2, opc3) \
+ GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STVE(name, opc2, opc3) \
+ GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+GEN_VR_LDX(lvx, 0x07, 0x03),
+GEN_VR_LDX(lvxl, 0x07, 0x0B),
+GEN_VR_LVE(bx, 0x07, 0x00),
+GEN_VR_LVE(hx, 0x07, 0x01),
+GEN_VR_LVE(wx, 0x07, 0x02),
+GEN_VR_STX(svx, 0x07, 0x07),
+GEN_VR_STX(svxl, 0x07, 0x0F),
+GEN_VR_STVE(bx, 0x07, 0x04),
+GEN_VR_STVE(hx, 0x07, 0x05),
+GEN_VR_STVE(wx, 0x07, 0x06),
+
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+
+#define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
+
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16),
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17),
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18),
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19),
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20),
+GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26),
+GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22),
+GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21),
+
+#define GEN_VXFORM(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+
+#define GEN_VXFORM_207(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define GEN_VXFORM_300(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \
+GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \
+ PPC2_ISA300)
+
+#define GEN_VXFORM_310(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA310)
+
+#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
+
+#define GEN_VXRFORM_DUAL(name0, name1, opc2, opc3, tp0, tp1) \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, tp0, tp1), \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, (opc3 | 0x10), 0x00000000, tp0, tp1),
+
+GEN_VXFORM_DUAL(vaddubm, vmul10cuq, 0, 0, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vadduhm, vmul10ecuq, 0, 1, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vadduwm, 0, 2),
+GEN_VXFORM_207(vaddudm, 0, 3),
+GEN_VXFORM_DUAL(vsububm, bcdadd, 0, 16, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuwm, bcdus, 0, 18, PPC_ALTIVEC, PPC2_ISA300),
+GEN_VXFORM_DUAL(vsubudm, bcds, 0, 19, PPC2_ALTIVEC_207, PPC2_ISA300),
+GEN_VXFORM_300(bcds, 0, 27),
+GEN_VXFORM(vmaxub, 1, 0),
+GEN_VXFORM(vmaxuh, 1, 1),
+GEN_VXFORM(vmaxuw, 1, 2),
+GEN_VXFORM_207(vmaxud, 1, 3),
+GEN_VXFORM(vmaxsb, 1, 4),
+GEN_VXFORM(vmaxsh, 1, 5),
+GEN_VXFORM(vmaxsw, 1, 6),
+GEN_VXFORM_207(vmaxsd, 1, 7),
+GEN_VXFORM(vminub, 1, 8),
+GEN_VXFORM(vminuh, 1, 9),
+GEN_VXFORM(vminuw, 1, 10),
+GEN_VXFORM_207(vminud, 1, 11),
+GEN_VXFORM(vminsb, 1, 12),
+GEN_VXFORM(vminsh, 1, 13),
+GEN_VXFORM(vminsw, 1, 14),
+GEN_VXFORM_207(vminsd, 1, 15),
+GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vavgsb, 1, 20),
+GEN_VXFORM(vavgsh, 1, 21),
+GEN_VXFORM(vavgsw, 1, 22),
+GEN_VXFORM(vmrghb, 6, 0),
+GEN_VXFORM(vmrghh, 6, 1),
+GEN_VXFORM(vmrghw, 6, 2),
+GEN_VXFORM(vmrglb, 6, 4),
+GEN_VXFORM(vmrglh, 6, 5),
+GEN_VXFORM(vmrglw, 6, 6),
+GEN_VXFORM_300(vextublx, 6, 24),
+GEN_VXFORM_300(vextuhlx, 6, 25),
+GEN_VXFORM_DUAL(vmrgow, vextuwlx, 6, 26, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_300(vextubrx, 6, 28),
+GEN_VXFORM_300(vextuhrx, 6, 29),
+GEN_VXFORM_DUAL(vmrgew, vextuwrx, 6, 30, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM(vmuloub, 4, 0),
+GEN_VXFORM(vmulouh, 4, 1),
+GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vmulosb, 4, 4),
+GEN_VXFORM(vmulosh, 4, 5),
+GEN_VXFORM_207(vmulosw, 4, 6),
+GEN_VXFORM_310(vmulld, 4, 7),
+GEN_VXFORM(vmuleub, 4, 8),
+GEN_VXFORM(vmuleuh, 4, 9),
+GEN_VXFORM_DUAL(vmuleuw, vmulhuw, 4, 10, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_310(vmulhud, 4, 11),
+GEN_VXFORM(vmulesb, 4, 12),
+GEN_VXFORM(vmulesh, 4, 13),
+GEN_VXFORM_DUAL(vmulesw, vmulhsw, 4, 14, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_310(vmulhsd, 4, 15),
+GEN_VXFORM(vslb, 2, 4),
+GEN_VXFORM(vslh, 2, 5),
+GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_207(vsld, 2, 23),
+GEN_VXFORM(vsrb, 2, 8),
+GEN_VXFORM(vsrh, 2, 9),
+GEN_VXFORM(vsrw, 2, 10),
+GEN_VXFORM_207(vsrd, 2, 27),
+GEN_VXFORM(vsrab, 2, 12),
+GEN_VXFORM(vsrah, 2, 13),
+GEN_VXFORM(vsraw, 2, 14),
+GEN_VXFORM_207(vsrad, 2, 15),
+GEN_VXFORM_300(vsrv, 2, 28),
+GEN_VXFORM_300(vslv, 2, 29),
+GEN_VXFORM(vslo, 6, 16),
+GEN_VXFORM(vsro, 6, 17),
+GEN_VXFORM(vaddcuw, 0, 6),
+GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_300(bcdsr, 0, 23),
+GEN_VXFORM_300(bcdsr, 0, 31),
+GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vadduhs, vmul10euq, 0, 9, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vadduws, 0, 10),
+GEN_VXFORM(vaddsbs, 0, 12),
+GEN_VXFORM_DUAL(vaddshs, bcdcpsgn, 0, 13, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vaddsws, 0, 14),
+GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsubuws, 0, 26),
+GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300),
+GEN_VXFORM(vsubshs, 0, 29),
+GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_207(vadduqm, 0, 4),
+GEN_VXFORM_207(vaddcuq, 0, 5),
+GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300),
+GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300),
+GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM(vrlb, 2, 0),
+GEN_VXFORM(vrlh, 2, 1),
+GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsr, 2, 11),
+GEN_VXFORM(vpkuhum, 7, 0),
+GEN_VXFORM(vpkuwum, 7, 1),
+GEN_VXFORM_207(vpkudum, 7, 17),
+GEN_VXFORM(vpkuhus, 7, 2),
+GEN_VXFORM(vpkuwus, 7, 3),
+GEN_VXFORM_207(vpkudus, 7, 19),
+GEN_VXFORM(vpkshus, 7, 4),
+GEN_VXFORM(vpkswus, 7, 5),
+GEN_VXFORM_207(vpksdus, 7, 21),
+GEN_VXFORM(vpkshss, 7, 6),
+GEN_VXFORM(vpkswss, 7, 7),
+GEN_VXFORM_207(vpksdss, 7, 23),
+GEN_VXFORM(vpkpx, 7, 12),
+GEN_VXFORM(vsum4ubs, 4, 24),
+GEN_VXFORM(vsum4sbs, 4, 28),
+GEN_VXFORM(vsum4shs, 4, 25),
+GEN_VXFORM(vsum2sws, 4, 26),
+GEN_VXFORM(vsumsws, 4, 30),
+GEN_VXFORM(vaddfp, 5, 0),
+GEN_VXFORM(vsubfp, 5, 1),
+GEN_VXFORM(vmaxfp, 5, 16),
+GEN_VXFORM(vminfp, 5, 17),
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+ GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC),
+#define GEN_VXRFORM1_300(opname, name, str, opc2, opc3) \
+GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300),
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+#define GEN_VXRFORM_300(name, opc2, opc3) \
+ GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+GEN_VXRFORM_300(vcmpnezb, 3, 4)
+GEN_VXRFORM_300(vcmpnezh, 3, 5)
+GEN_VXRFORM_300(vcmpnezw, 3, 6)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE)
+
+#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
+GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
+ PPC_NONE)
+GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
+GEN_VXFORM(vspltisb, 6, 12),
+GEN_VXFORM(vspltish, 6, 13),
+GEN_VXFORM(vspltisw, 6, 14),
+GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06),
+GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07),
+GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10),
+GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11),
+GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18),
+GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19),
+GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A),
+GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
+GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
+GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
+GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
+GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0),
+GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1),
+GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
+GEN_VXFORM_NOA(vupkhsb, 7, 8),
+GEN_VXFORM_NOA(vupkhsh, 7, 9),
+GEN_VXFORM_207(vupkhsw, 7, 25),
+GEN_VXFORM_NOA(vupklsb, 7, 10),
+GEN_VXFORM_NOA(vupklsh, 7, 11),
+GEN_VXFORM_207(vupklsw, 7, 27),
+GEN_VXFORM_NOA(vupkhpx, 7, 13),
+GEN_VXFORM_NOA(vupklpx, 7, 15),
+GEN_VXFORM_NOA(vrefp, 5, 4),
+GEN_VXFORM_NOA(vrsqrtefp, 5, 5),
+GEN_VXFORM_NOA(vexptefp, 5, 6),
+GEN_VXFORM_NOA(vlogefp, 5, 7),
+GEN_VXFORM_NOA(vrfim, 5, 11),
+GEN_VXFORM_NOA(vrfin, 5, 8),
+GEN_VXFORM_NOA(vrfip, 5, 10),
+GEN_VXFORM_NOA(vrfiz, 5, 9),
+
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VXFORM_UIMM(vcfux, 5, 12),
+GEN_VXFORM_UIMM(vcfsx, 5, 13),
+GEN_VXFORM_UIMM(vctuxs, 5, 14),
+GEN_VXFORM_UIMM(vctsxs, 5, 15),
+
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC)
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
+GEN_VAFORM_PAIRED(vsel, vperm, 21),
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
+
+GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207),
+
+GEN_VXFORM_300(vbpermd, 6, 23),
+GEN_VXFORM_207(vbpermq, 6, 21),
+GEN_VXFORM_207(vgbbd, 6, 20),
+GEN_VXFORM_207(vpmsumb, 4, 16),
+GEN_VXFORM_207(vpmsumh, 4, 17),
+GEN_VXFORM_207(vpmsumw, 4, 18),
+GEN_VXFORM_207(vpmsumd, 4, 19),
+
+GEN_VXFORM_207(vsbox, 4, 23),
+
+GEN_VXFORM_DUAL(vcipher, vcipherlast, 4, 20, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vncipher, vncipherlast, 4, 21, PPC_NONE, PPC2_ALTIVEC_207),
+
+GEN_VXFORM_207(vshasigmaw, 1, 26),
+GEN_VXFORM_207(vshasigmad, 1, 27),
+
+GEN_VXFORM_DUAL(vsldoi, vpermxor, 22, 0xFF, PPC_ALTIVEC, PPC_NONE),
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
new file mode 100644
index 000000000..c0e38060b
--- /dev/null
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -0,0 +1,2193 @@
+/*** VSX extension ***/
+
+static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
+{
+ tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high));
+}
+
+static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
+{
+ tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high));
+}
+
+static inline TCGv_ptr gen_vsr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg));
+ return r;
+}
+
+#define VSX_LOAD_SCALAR(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 t0; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##operation(ctx, t0, EA); \
+ set_cpu_vsr(xT(ctx->opcode), t0, true); \
+ /* NOTE: cpu_vsrl is undefined */ \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(t0); \
+}
+
+VSX_LOAD_SCALAR(lxsdx, ld64_i64)
+VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
+VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
+VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
+VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
+VSX_LOAD_SCALAR(lxsspx, ld32fs)
+
+static void gen_lxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_cpu_vsr(xT(ctx->opcode), t0, false);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_lxvw4x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+static void gen_lxvwsx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i32 data;
+
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+
+ gen_addr_reg_index(ctx, EA);
+
+ data = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
+ tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
+
+ tcg_temp_free(EA);
+ tcg_temp_free_i32(data);
+}
+
+static void gen_lxvdsx(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 data;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+
+ gen_addr_reg_index(ctx, EA);
+
+ data = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
+
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(data);
+}
+
+static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
+ TCGv_i64 inh, TCGv_i64 inl)
+{
+ TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
+ tcg_gen_and_i64(t0, inh, mask);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(t1, inh, 8);
+ tcg_gen_and_i64(t1, t1, mask);
+ tcg_gen_or_i64(outh, t0, t1);
+
+ /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
+ tcg_gen_and_i64(t0, inl, mask);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(t1, inl, 8);
+ tcg_gen_and_i64(t1, t1, mask);
+ tcg_gen_or_i64(outl, t0, t1);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(mask);
+}
+
+static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
+ TCGv_i64 inh, TCGv_i64 inl)
+{
+ TCGv_i64 hi = tcg_temp_new_i64();
+ TCGv_i64 lo = tcg_temp_new_i64();
+
+ tcg_gen_bswap64_i64(hi, inh);
+ tcg_gen_bswap64_i64(lo, inl);
+ tcg_gen_shri_i64(outh, hi, 32);
+ tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
+ tcg_gen_shri_i64(outl, lo, 32);
+ tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
+
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(lo);
+}
+static void gen_lxvh8x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ gen_set_access_type(ctx, ACCESS_INT);
+
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ if (ctx->le_mode) {
+ gen_bswap16x8(xth, xtl, xth, xtl);
+ }
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+static void gen_lxvb16x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+#ifdef TARGET_PPC64
+#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_ptr xt; \
+ \
+ if (xT(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ EA = tcg_temp_new(); \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ gen_addr_register(ctx, EA); \
+ gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(xt); \
+}
+
+VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
+VSX_VECTOR_LOAD_STORE_LENGTH(lxvll)
+VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
+VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
+#endif
+
+#define VSX_LOAD_SCALAR_DS(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 xth; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ xth = tcg_temp_new_i64(); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0x03); \
+ gen_qemu_##operation(ctx, xth, EA); \
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); \
+ /* NOTE: cpu_vsrl is undefined */ \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(xth); \
+}
+
+VSX_LOAD_SCALAR_DS(lxsd, ld64_i64)
+VSX_LOAD_SCALAR_DS(lxssp, ld32fs)
+
+#define VSX_STORE_SCALAR(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 t0; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ get_cpu_vsr(t0, xS(ctx->opcode), true); \
+ gen_qemu_##operation(ctx, t0, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(t0); \
+}
+
+VSX_STORE_SCALAR(stxsdx, st64_i64)
+
+VSX_STORE_SCALAR(stxsibx, st8_i64)
+VSX_STORE_SCALAR(stxsihx, st16_i64)
+VSX_STORE_SCALAR(stxsiwx, st32_i64)
+VSX_STORE_SCALAR(stxsspx, st32fs)
+
+static void gen_stxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 t0;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ get_cpu_vsr(t0, xS(ctx->opcode), true);
+ gen_qemu_st64_i64(ctx, t0, EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ get_cpu_vsr(t0, xS(ctx->opcode), false);
+ gen_qemu_st64_i64(ctx, t0, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_stxvw4x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xsh;
+ TCGv_i64 xsl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xsh = tcg_temp_new_i64();
+ xsl = tcg_temp_new_i64();
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ get_cpu_vsr(xsl, xS(ctx->opcode), false);
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t0, xsh, 32);
+ tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_shri_i64(t0, xsl, 32);
+ tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(xsh);
+ tcg_temp_free_i64(xsl);
+}
+
+static void gen_stxvh8x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xsh;
+ TCGv_i64 xsl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xsh = tcg_temp_new_i64();
+ xsl = tcg_temp_new_i64();
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ get_cpu_vsr(xsl, xS(ctx->opcode), false);
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 outh = tcg_temp_new_i64();
+ TCGv_i64 outl = tcg_temp_new_i64();
+
+ gen_bswap16x8(outh, outl, xsh, xsl);
+ tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free_i64(outh);
+ tcg_temp_free_i64(outl);
+ } else {
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(xsh);
+ tcg_temp_free_i64(xsl);
+}
+
+static void gen_stxvb16x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xsh;
+ TCGv_i64 xsl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xsh = tcg_temp_new_i64();
+ xsl = tcg_temp_new_i64();
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ get_cpu_vsr(xsl, xS(ctx->opcode), false);
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free(EA);
+ tcg_temp_free_i64(xsh);
+ tcg_temp_free_i64(xsl);
+}
+
+#define VSX_STORE_SCALAR_DS(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_i64 xth; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ xth = tcg_temp_new_i64(); \
+ get_cpu_vsr(xth, rD(ctx->opcode) + 32, true); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0x03); \
+ gen_qemu_##operation(ctx, xth, EA); \
+ /* NOTE: cpu_vsrl is undefined */ \
+ tcg_temp_free(EA); \
+ tcg_temp_free_i64(xth); \
+}
+
+VSX_STORE_SCALAR_DS(stxsd, st64_i64)
+VSX_STORE_SCALAR_DS(stxssp, st32fs)
+
+static void gen_mfvsrwz(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 xsh = tcg_temp_new_i64();
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ tcg_gen_ext32u_i64(tmp, xsh);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(xsh);
+}
+
+static void gen_mtvsrwa(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 xsh = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_i64(xsh, tmp);
+ set_cpu_vsr(xT(ctx->opcode), xsh, true);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(xsh);
+}
+
+static void gen_mtvsrwz(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 xsh = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32u_i64(xsh, tmp);
+ set_cpu_vsr(xT(ctx->opcode), xsh, true);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(xsh);
+}
+
+#if defined(TARGET_PPC64)
+static void gen_mfvsrd(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ t0 = tcg_temp_new_i64();
+ get_cpu_vsr(t0, xS(ctx->opcode), true);
+ tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_mtvsrd(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ t0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_mfvsrld(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ t0 = tcg_temp_new_i64();
+ get_cpu_vsr(t0, xS(ctx->opcode), false);
+ tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_mtvsrdd(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ t0 = tcg_temp_new_i64();
+ if (!rA(ctx->opcode)) {
+ tcg_gen_movi_i64(t0, 0);
+ } else {
+ tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ }
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
+
+ tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
+ set_cpu_vsr(xT(ctx->opcode), t0, false);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_mtvsrws(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ t0 = tcg_temp_new_i64();
+ tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], 32, 32);
+ set_cpu_vsr(xT(ctx->opcode), t0, false);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
+ tcg_temp_free_i64(t0);
+}
+
+#endif
+
+static void gen_xxpermdi(DisasContext *ctx)
+{
+ TCGv_i64 xh, xl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ xh = tcg_temp_new_i64();
+ xl = tcg_temp_new_i64();
+
+ if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
+ (xT(ctx->opcode) == xB(ctx->opcode)))) {
+ get_cpu_vsr(xh, xA(ctx->opcode), (DM(ctx->opcode) & 2) == 0);
+ get_cpu_vsr(xl, xB(ctx->opcode), (DM(ctx->opcode) & 1) == 0);
+
+ set_cpu_vsr(xT(ctx->opcode), xh, true);
+ set_cpu_vsr(xT(ctx->opcode), xl, false);
+ } else {
+ if ((DM(ctx->opcode) & 2) == 0) {
+ get_cpu_vsr(xh, xA(ctx->opcode), true);
+ set_cpu_vsr(xT(ctx->opcode), xh, true);
+ } else {
+ get_cpu_vsr(xh, xA(ctx->opcode), false);
+ set_cpu_vsr(xT(ctx->opcode), xh, true);
+ }
+ if ((DM(ctx->opcode) & 1) == 0) {
+ get_cpu_vsr(xl, xB(ctx->opcode), true);
+ set_cpu_vsr(xT(ctx->opcode), xl, false);
+ } else {
+ get_cpu_vsr(xl, xB(ctx->opcode), false);
+ set_cpu_vsr(xT(ctx->opcode), xl, false);
+ }
+ }
+ tcg_temp_free_i64(xh);
+ tcg_temp_free_i64(xl);
+}
+
+#define OP_ABS 1
+#define OP_NABS 2
+#define OP_NEG 3
+#define OP_CPSGN 4
+#define SGN_MASK_DP 0x8000000000000000ull
+#define SGN_MASK_SP 0x8000000080000000ull
+
+#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_i64 xb, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xb = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ get_cpu_vsr(xb, xB(ctx->opcode), true); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xa = tcg_temp_new_i64(); \
+ get_cpu_vsr(xa, xA(ctx->opcode), true); \
+ tcg_gen_and_i64(xa, xa, sgm); \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ tcg_gen_or_i64(xb, xb, xa); \
+ tcg_temp_free_i64(xa); \
+ break; \
+ } \
+ } \
+ set_cpu_vsr(xT(ctx->opcode), xb, true); \
+ tcg_temp_free_i64(xb); \
+ tcg_temp_free_i64(sgm); \
+ }
+
+VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
+
+#define VSX_SCALAR_MOVE_QP(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ int xa; \
+ int xt = rD(ctx->opcode) + 32; \
+ int xb = rB(ctx->opcode) + 32; \
+ TCGv_i64 xah, xbh, xbl, sgm, tmp; \
+ \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xbh = tcg_temp_new_i64(); \
+ xbl = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ tmp = tcg_temp_new_i64(); \
+ get_cpu_vsr(xbh, xb, true); \
+ get_cpu_vsr(xbl, xb, false); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ break; \
+ case OP_NABS: \
+ tcg_gen_or_i64(xbh, xbh, sgm); \
+ break; \
+ case OP_NEG: \
+ tcg_gen_xor_i64(xbh, xbh, sgm); \
+ break; \
+ case OP_CPSGN: \
+ xah = tcg_temp_new_i64(); \
+ xa = rA(ctx->opcode) + 32; \
+ get_cpu_vsr(tmp, xa, true); \
+ tcg_gen_and_i64(xah, tmp, sgm); \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_or_i64(xbh, xbh, xah); \
+ tcg_temp_free_i64(xah); \
+ break; \
+ } \
+ set_cpu_vsr(xt, xbh, true); \
+ set_cpu_vsr(xt, xbl, false); \
+ tcg_temp_free_i64(xbl); \
+ tcg_temp_free_i64(xbh); \
+ tcg_temp_free_i64(sgm); \
+ tcg_temp_free_i64(tmp); \
+}
+
+VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
+VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
+
+#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_i64 xbh, xbl, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xbh = tcg_temp_new_i64(); \
+ xbl = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ get_cpu_vsr(xbh, xB(ctx->opcode), true); \
+ get_cpu_vsr(xbl, xB(ctx->opcode), false); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xbh, xbh, sgm); \
+ tcg_gen_or_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xbh, xbh, sgm); \
+ tcg_gen_xor_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xah = tcg_temp_new_i64(); \
+ TCGv_i64 xal = tcg_temp_new_i64(); \
+ get_cpu_vsr(xah, xA(ctx->opcode), true); \
+ get_cpu_vsr(xal, xA(ctx->opcode), false); \
+ tcg_gen_and_i64(xah, xah, sgm); \
+ tcg_gen_and_i64(xal, xal, sgm); \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ tcg_gen_or_i64(xbh, xbh, xah); \
+ tcg_gen_or_i64(xbl, xbl, xal); \
+ tcg_temp_free_i64(xah); \
+ tcg_temp_free_i64(xal); \
+ break; \
+ } \
+ } \
+ set_cpu_vsr(xT(ctx->opcode), xbh, true); \
+ set_cpu_vsr(xT(ctx->opcode), xbl, false); \
+ tcg_temp_free_i64(xbh); \
+ tcg_temp_free_i64(xbl); \
+ tcg_temp_free_i64(sgm); \
+ }
+
+VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+
+#define VSX_CMP(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 ignored; \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ if ((ctx->opcode >> (31 - 21)) & 1) { \
+ gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \
+ } else { \
+ ignored = tcg_temp_new_i32(); \
+ gen_helper_##name(ignored, cpu_env, xt, xa, xb); \
+ tcg_temp_free_i32(ignored); \
+ } \
+ gen_helper_float_check_status(cpu_env); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
+VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
+VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
+VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
+VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
+VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
+VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
+VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+
+static void gen_xscvqpdp(DisasContext *ctx)
+{
+ TCGv_i32 opc;
+ TCGv_ptr xt, xb;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ opc = tcg_const_i32(ctx->opcode);
+ xt = gen_vsr_ptr(xT(ctx->opcode));
+ xb = gen_vsr_ptr(xB(ctx->opcode));
+ gen_helper_xscvqpdp(cpu_env, opc, xt, xb);
+ tcg_temp_free_i32(opc);
+ tcg_temp_free_ptr(xt);
+ tcg_temp_free_ptr(xb);
+}
+
+#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ gen_helper_##name(cpu_env, opc); \
+ tcg_temp_free_i32(opc); \
+}
+
+#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, xt, xa, xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, xt, xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, opc, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, opc, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
+ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xt, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xt, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xt, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ get_cpu_vsr(t0, xB(ctx->opcode), true); \
+ gen_helper_##name(t1, cpu_env, t0); \
+ set_cpu_vsr(xT(ctx->opcode), t1, true); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
+GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
+GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300)
+
+GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
+
+GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+
+#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xa, b, c; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ if (ctx->opcode & PPC_BIT32(25)) { \
+ /* \
+ * AxT + B \
+ */ \
+ b = gen_vsr_ptr(xT(ctx->opcode)); \
+ c = gen_vsr_ptr(xB(ctx->opcode)); \
+ } else { \
+ /* \
+ * AxB + T \
+ */ \
+ b = gen_vsr_ptr(xB(ctx->opcode)); \
+ c = gen_vsr_ptr(xT(ctx->opcode)); \
+ } \
+ gen_helper_##name(cpu_env, xt, xa, b, c); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(b); \
+ tcg_temp_free_ptr(c); \
+}
+
+GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
+
+static void gen_xxbrd(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+
+ tcg_gen_bswap64_i64(xth, xbh);
+ tcg_gen_bswap64_i64(xtl, xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static void gen_xxbrh(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+
+ gen_bswap16x8(xth, xtl, xbh, xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static void gen_xxbrq(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+ TCGv_i64 t0;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+ t0 = tcg_temp_new_i64();
+
+ tcg_gen_bswap64_i64(t0, xbl);
+ tcg_gen_bswap64_i64(xtl, xbh);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+ tcg_gen_mov_i64(xth, t0);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static void gen_xxbrw(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+
+ gen_bswap32x4(xth, xtl, xbh, xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+#define VSX_LOGICAL(name, vece, tcg_op) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ tcg_op(vece, vsr_full_offset(xT(ctx->opcode)), \
+ vsr_full_offset(xA(ctx->opcode)), \
+ vsr_full_offset(xB(ctx->opcode)), 16, 16); \
+ }
+
+VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and)
+VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc)
+VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or)
+VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor)
+VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor)
+VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv)
+VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand)
+VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc)
+
+#define VSX_XXMRG(name, high) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_i64 a0, a1, b0, b1, tmp; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ a0 = tcg_temp_new_i64(); \
+ a1 = tcg_temp_new_i64(); \
+ b0 = tcg_temp_new_i64(); \
+ b1 = tcg_temp_new_i64(); \
+ tmp = tcg_temp_new_i64(); \
+ get_cpu_vsr(a0, xA(ctx->opcode), high); \
+ get_cpu_vsr(a1, xA(ctx->opcode), high); \
+ get_cpu_vsr(b0, xB(ctx->opcode), high); \
+ get_cpu_vsr(b1, xB(ctx->opcode), high); \
+ tcg_gen_shri_i64(a0, a0, 32); \
+ tcg_gen_shri_i64(b0, b0, 32); \
+ tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \
+ set_cpu_vsr(xT(ctx->opcode), tmp, true); \
+ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \
+ set_cpu_vsr(xT(ctx->opcode), tmp, false); \
+ tcg_temp_free_i64(a0); \
+ tcg_temp_free_i64(a1); \
+ tcg_temp_free_i64(b0); \
+ tcg_temp_free_i64(b1); \
+ tcg_temp_free_i64(tmp); \
+ }
+
+VSX_XXMRG(xxmrghw, 1)
+VSX_XXMRG(xxmrglw, 0)
+
+static void gen_xxsel(DisasContext *ctx)
+{
+ int rt = xT(ctx->opcode);
+ int ra = xA(ctx->opcode);
+ int rb = xB(ctx->opcode);
+ int rc = xC(ctx->opcode);
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(rt), vsr_full_offset(rc),
+ vsr_full_offset(rb), vsr_full_offset(ra), 16, 16);
+}
+
+static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2 *a)
+{
+ int tofs, bofs;
+
+ REQUIRE_VSX(ctx);
+
+ tofs = vsr_full_offset(a->xt);
+ bofs = vsr_full_offset(a->xb);
+ bofs += a->uim << MO_32;
+#ifndef HOST_WORDS_BIG_ENDIAN
+ bofs ^= 8 | 4;
+#endif
+
+ tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
+ return true;
+}
+
+#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
+
+static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
+{
+ if (a->xt < 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
+ }
+ tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
+ return true;
+}
+
+static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
+
+ return true;
+}
+
+static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
+ helper_todouble(a->si));
+ return true;
+}
+
+static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
+{
+ TCGv_i32 imm;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ imm = tcg_constant_i32(a->si);
+
+ tcg_gen_st_i32(imm, cpu_env,
+ offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
+ tcg_gen_st_i32(imm, cpu_env,
+ offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
+
+ return true;
+}
+
+static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
+{
+ static const uint64_t values[32] = {
+ 0, /* Unspecified */
+ 0x3FFF000000000000llu, /* QP +1.0 */
+ 0x4000000000000000llu, /* QP +2.0 */
+ 0x4000800000000000llu, /* QP +3.0 */
+ 0x4001000000000000llu, /* QP +4.0 */
+ 0x4001400000000000llu, /* QP +5.0 */
+ 0x4001800000000000llu, /* QP +6.0 */
+ 0x4001C00000000000llu, /* QP +7.0 */
+ 0x7FFF000000000000llu, /* QP +Inf */
+ 0x7FFF800000000000llu, /* QP dQNaN */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0x8000000000000000llu, /* QP -0.0 */
+ 0xBFFF000000000000llu, /* QP -1.0 */
+ 0xC000000000000000llu, /* QP -2.0 */
+ 0xC000800000000000llu, /* QP -3.0 */
+ 0xC001000000000000llu, /* QP -4.0 */
+ 0xC001400000000000llu, /* QP -5.0 */
+ 0xC001800000000000llu, /* QP -6.0 */
+ 0xC001C00000000000llu, /* QP -7.0 */
+ 0xFFFF000000000000llu, /* QP -Inf */
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ if (values[a->uim]) {
+ set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
+ set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
+ } else {
+ gen_invalid(ctx);
+ }
+
+ return true;
+}
+
+static void gen_xxsldwi(DisasContext *ctx)
+{
+ TCGv_i64 xth, xtl;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+
+ switch (SHW(ctx->opcode)) {
+ case 0: {
+ get_cpu_vsr(xth, xA(ctx->opcode), true);
+ get_cpu_vsr(xtl, xA(ctx->opcode), false);
+ break;
+ }
+ case 1: {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ get_cpu_vsr(xth, xA(ctx->opcode), true);
+ tcg_gen_shli_i64(xth, xth, 32);
+ get_cpu_vsr(t0, xA(ctx->opcode), false);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ get_cpu_vsr(xtl, xA(ctx->opcode), false);
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ get_cpu_vsr(t0, xB(ctx->opcode), true);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free_i64(t0);
+ break;
+ }
+ case 2: {
+ get_cpu_vsr(xth, xA(ctx->opcode), false);
+ get_cpu_vsr(xtl, xB(ctx->opcode), true);
+ break;
+ }
+ case 3: {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ get_cpu_vsr(xth, xA(ctx->opcode), false);
+ tcg_gen_shli_i64(xth, xth, 32);
+ get_cpu_vsr(t0, xB(ctx->opcode), true);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ get_cpu_vsr(xtl, xB(ctx->opcode), true);
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ get_cpu_vsr(t0, xB(ctx->opcode), false);
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free_i64(t0);
+ break;
+ }
+ }
+
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+#define VSX_EXTRACT_INSERT(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xb; \
+ TCGv_i32 t0; \
+ TCGv_i64 t1; \
+ uint8_t uimm = UIMM4(ctx->opcode); \
+ \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i64(); \
+ /* \
+ * uimm > 15 out of bound and for \
+ * uimm > 12 handle as per hardware in helper \
+ */ \
+ if (uimm > 15) { \
+ tcg_gen_movi_i64(t1, 0); \
+ set_cpu_vsr(xT(ctx->opcode), t1, true); \
+ set_cpu_vsr(xT(ctx->opcode), t1, false); \
+ return; \
+ } \
+ tcg_gen_movi_i32(t0, uimm); \
+ gen_helper_##name(cpu_env, xt, xb, t0); \
+ tcg_temp_free_ptr(xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+VSX_EXTRACT_INSERT(xxextractuw)
+VSX_EXTRACT_INSERT(xxinsertw)
+
+#ifdef TARGET_PPC64
+static void gen_xsxexpdp(DisasContext *ctx)
+{
+ TCGv rt = cpu_gpr[rD(ctx->opcode)];
+ TCGv_i64 t0;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ get_cpu_vsr(t0, xB(ctx->opcode), true);
+ tcg_gen_extract_i64(rt, t0, 52, 11);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_xsxexpqp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
+
+ tcg_gen_extract_i64(xth, xbh, 48, 15);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
+ tcg_gen_movi_i64(xtl, 0);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
+
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+static void gen_xsiexpdp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv rb = cpu_gpr[rB(ctx->opcode)];
+ TCGv_i64 t0;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ t0 = tcg_temp_new_i64();
+ xth = tcg_temp_new_i64();
+ tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
+ tcg_gen_andi_i64(t0, rb, 0x7FF);
+ tcg_gen_shli_i64(t0, t0, 52);
+ tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ /* dword[1] is undefined */
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+}
+
+static void gen_xsiexpqp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xah;
+ TCGv_i64 xal;
+ TCGv_i64 xbh;
+ TCGv_i64 t0;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xah = tcg_temp_new_i64();
+ xal = tcg_temp_new_i64();
+ get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
+ get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
+ xbh = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
+ t0 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
+ tcg_gen_andi_i64(t0, xbh, 0x7FFF);
+ tcg_gen_shli_i64(t0, t0, 48);
+ tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
+ tcg_gen_mov_i64(xtl, xal);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xah);
+ tcg_temp_free_i64(xal);
+ tcg_temp_free_i64(xbh);
+}
+
+static void gen_xsxsigdp(DisasContext *ctx)
+{
+ TCGv rt = cpu_gpr[rD(ctx->opcode)];
+ TCGv_i64 t0, t1, zr, nan, exp;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ exp = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ zr = tcg_const_i64(0);
+ nan = tcg_const_i64(2047);
+
+ get_cpu_vsr(t1, xB(ctx->opcode), true);
+ tcg_gen_extract_i64(exp, t1, 52, 11);
+ tcg_gen_movi_i64(t0, 0x0010000000000000);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
+ get_cpu_vsr(t1, xB(ctx->opcode), true);
+ tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(exp);
+ tcg_temp_free_i64(zr);
+ tcg_temp_free_i64(nan);
+}
+
+static void gen_xsxsigqp(DisasContext *ctx)
+{
+ TCGv_i64 t0, zr, nan, exp;
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
+ get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
+ exp = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ zr = tcg_const_i64(0);
+ nan = tcg_const_i64(32767);
+
+ tcg_gen_extract_i64(exp, xbh, 48, 15);
+ tcg_gen_movi_i64(t0, 0x0001000000000000);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
+ tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
+ tcg_gen_mov_i64(xtl, xbl);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(exp);
+ tcg_temp_free_i64(zr);
+ tcg_temp_free_i64(nan);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+#endif
+
+static void gen_xviexpsp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xah;
+ TCGv_i64 xal;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+ TCGv_i64 t0;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xah = tcg_temp_new_i64();
+ xal = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xah, xA(ctx->opcode), true);
+ get_cpu_vsr(xal, xA(ctx->opcode), false);
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+ t0 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
+ tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
+ tcg_gen_shli_i64(t0, t0, 23);
+ tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
+ tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
+ tcg_gen_shli_i64(t0, t0, 23);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xah);
+ tcg_temp_free_i64(xal);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static void gen_xviexpdp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xah;
+ TCGv_i64 xal;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xah = tcg_temp_new_i64();
+ xal = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xah, xA(ctx->opcode), true);
+ get_cpu_vsr(xal, xA(ctx->opcode), false);
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+
+ tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+
+ tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xah);
+ tcg_temp_free_i64(xal);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static void gen_xvxexpsp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+
+ tcg_gen_shri_i64(xth, xbh, 23);
+ tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ tcg_gen_shri_i64(xtl, xbl, 23);
+ tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static void gen_xvxexpdp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+
+ tcg_gen_extract_i64(xth, xbh, 52, 11);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ tcg_gen_extract_i64(xtl, xbl, 52, 11);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
+
+static void gen_xvxsigdp(DisasContext *ctx)
+{
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+ TCGv_i64 t0, zr, nan, exp;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
+ exp = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ zr = tcg_const_i64(0);
+ nan = tcg_const_i64(2047);
+
+ tcg_gen_extract_i64(exp, xbh, 52, 11);
+ tcg_gen_movi_i64(t0, 0x0010000000000000);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
+ tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+
+ tcg_gen_extract_i64(exp, xbl, 52, 11);
+ tcg_gen_movi_i64(t0, 0x0010000000000000);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
+ tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(exp);
+ tcg_temp_free_i64(zr);
+ tcg_temp_free_i64(nan);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
+}
+
+static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
+ int rt, bool store, bool paired)
+{
+ TCGv ea;
+ TCGv_i64 xt;
+ MemOp mop;
+ int rt1, rt2;
+
+ xt = tcg_temp_new_i64();
+
+ mop = DEF_MEMOP(MO_Q);
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, ra, displ);
+
+ if (paired && ctx->le_mode) {
+ rt1 = rt + 1;
+ rt2 = rt;
+ } else {
+ rt1 = rt;
+ rt2 = rt + 1;
+ }
+
+ if (store) {
+ get_cpu_vsr(xt, rt1, !ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ gen_addr_add(ctx, ea, ea, 8);
+ get_cpu_vsr(xt, rt1, ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ if (paired) {
+ gen_addr_add(ctx, ea, ea, 8);
+ get_cpu_vsr(xt, rt2, !ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ gen_addr_add(ctx, ea, ea, 8);
+ get_cpu_vsr(xt, rt2, ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ }
+ } else {
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt1, xt, !ctx->le_mode);
+ gen_addr_add(ctx, ea, ea, 8);
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt1, xt, ctx->le_mode);
+ if (paired) {
+ gen_addr_add(ctx, ea, ea, 8);
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt2, xt, !ctx->le_mode);
+ gen_addr_add(ctx, ea, ea, 8);
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt2, xt, ctx->le_mode);
+ }
+ }
+
+ tcg_temp_free(ea);
+ tcg_temp_free_i64(xt);
+ return true;
+}
+
+static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
+{
+ if (paired) {
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ } else {
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ }
+
+ if (paired || a->rt >= 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
+ }
+
+ return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
+}
+
+static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
+ bool store, bool paired)
+{
+ arg_D d;
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+
+ return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
+}
+
+static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
+{
+ if (paired) {
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ } else {
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ }
+
+ if (paired || a->rt >= 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
+ }
+
+ return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
+}
+
+TRANS(STXV, do_lstxv_D, true, false)
+TRANS(LXV, do_lstxv_D, false, false)
+TRANS(STXVP, do_lstxv_D, true, true)
+TRANS(LXVP, do_lstxv_D, false, true)
+TRANS(STXVX, do_lstxv_X, true, false)
+TRANS(LXVX, do_lstxv_X, false, false)
+TRANS(STXVPX, do_lstxv_X, true, true)
+TRANS(LXVPX, do_lstxv_X, false, true)
+TRANS64(PSTXV, do_lstxv_PLS_D, true, false)
+TRANS64(PLXV, do_lstxv_PLS_D, false, false)
+TRANS64(PSTXVP, do_lstxv_PLS_D, true, true)
+TRANS64(PLXVP, do_lstxv_PLS_D, false, true)
+
+static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ TCGv_vec c)
+{
+ TCGv_vec tmp = tcg_temp_new_vec_matching(c);
+ tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
+ tcg_gen_bitsel_vec(vece, t, tmp, b, a);
+ tcg_temp_free_vec(tmp);
+}
+
+static bool do_xxblendv(DisasContext *ctx, arg_XX4 *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVB,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVH,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVW,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVD,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ }
+ };
+
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
+ vsr_full_offset(a->xb), vsr_full_offset(a->xc),
+ 16, 16, &ops[vece]);
+
+ return true;
+}
+
+TRANS(XXBLENDVB, do_xxblendv, MO_8)
+TRANS(XXBLENDVH, do_xxblendv, MO_16)
+TRANS(XXBLENDVW, do_xxblendv, MO_32)
+TRANS(XXBLENDVD, do_xxblendv, MO_64)
+
+#undef GEN_XX2FORM
+#undef GEN_XX3FORM
+#undef GEN_XX2IFORM
+#undef GEN_XX3_RC_FORM
+#undef GEN_XX3FORM_DM
+#undef VSX_LOGICAL
diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc
new file mode 100644
index 000000000..152d1e5c3
--- /dev/null
+++ b/target/ppc/translate/vsx-ops.c.inc
@@ -0,0 +1,398 @@
+GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvwsx, 0x1F, 0x0C, 0x0B, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300),
+#endif
+
+GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300),
+#endif
+
+GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mfvsrld, 0X1F, 0x13, 0x09, 0x0000F800, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mtvsrdd, 0X1F, 0x13, 0x0D, 0x0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mtvsrws, 0x1F, 0x13, 0x0C, 0x0000F800, PPC_NONE, PPC2_ISA300),
+#endif
+
+#define GEN_XX1FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2FORM_EXT(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0x00100000, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0x00100000, PPC_NONE, fl2)
+
+#define GEN_XX2FORM_EO(name, opc2, opc3, opc4, fl2) \
+GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 0, opc3, opc4, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 1, opc3, opc4, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM_NAME(name, opcname, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2)
+
+#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM_DM(name, opc2, opc3) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX)
+
+#define GEN_VSX_XFORM_300(name, opc2, opc3, inval) \
+GEN_HANDLER_E(name, 0x3F, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VSX_XFORM_300_EO(name, opc2, opc3, opc4, inval) \
+GEN_HANDLER_E_2(name, 0x3F, opc2, opc3, opc4, inval, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VSX_Z23FORM_300(name, opc2, opc3, opc4, inval) \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x0, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x00, opc4 | 0x1, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x08, opc4 | 0x1, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x10, opc4 | 0x1, inval), \
+GEN_VSX_XFORM_300_EO(name, opc2, opc3 | 0x18, opc4 | 0x1, inval)
+
+GEN_VSX_Z23FORM_300(xsrqpi, 0x05, 0x0, 0x0, 0x0),
+GEN_VSX_Z23FORM_300(xsrqpxp, 0x05, 0x1, 0x0, 0x0),
+GEN_VSX_XFORM_300_EO(xssqrtqp, 0x04, 0x19, 0x1B, 0x0),
+GEN_VSX_XFORM_300(xssubqp, 0x04, 0x10, 0x0),
+
+GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX),
+GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),
+
+GEN_VSX_XFORM_300_EO(xsabsqp, 0x04, 0x19, 0x00, 0x00000001),
+GEN_VSX_XFORM_300_EO(xsnabsqp, 0x04, 0x19, 0x08, 0x00000001),
+GEN_VSX_XFORM_300_EO(xsnegqp, 0x04, 0x19, 0x10, 0x00000001),
+GEN_VSX_XFORM_300(xscpsgnqp, 0x04, 0x03, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0),
+GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001),
+GEN_VSX_XFORM_300_EO(xscvqpuwz, 0x04, 0x1A, 0x01, 0x00000001),
+
+#ifdef TARGET_PPC64
+GEN_XX2FORM_EO(xsxexpdp, 0x16, 0x15, 0x00, PPC2_ISA300),
+GEN_VSX_XFORM_300_EO(xsxexpqp, 0x04, 0x19, 0x02, 0x00000001),
+GEN_XX2FORM_EO(xsxsigdp, 0x16, 0x15, 0x01, PPC2_ISA300),
+GEN_VSX_XFORM_300_EO(xsxsigqp, 0x04, 0x19, 0x12, 0x00000001),
+GEN_HANDLER_E(xsiexpdp, 0x3C, 0x16, 0x1C, 0, PPC_NONE, PPC2_ISA300),
+GEN_VSX_XFORM_300(xsiexpqp, 0x4, 0x1B, 0x00000001),
+#endif
+
+GEN_XX2FORM(xststdcdp, 0x14, 0x16, PPC2_ISA300),
+GEN_XX2FORM(xststdcsp, 0x14, 0x12, PPC2_ISA300),
+GEN_VSX_XFORM_300(xststdcqp, 0x04, 0x16, 0x00000001),
+
+GEN_XX3FORM(xviexpsp, 0x00, 0x1B, PPC2_ISA300),
+GEN_XX3FORM(xviexpdp, 0x00, 0x1F, PPC2_ISA300),
+GEN_XX2FORM_EO(xvxexpdp, 0x16, 0x1D, 0x00, PPC2_ISA300),
+GEN_XX2FORM_EO(xvxsigdp, 0x16, 0x1D, 0x01, PPC2_ISA300),
+GEN_XX2FORM_EO(xvxexpsp, 0x16, 0x1D, 0x08, PPC2_ISA300),
+GEN_XX2FORM_EO(xvxsigsp, 0x16, 0x1D, 0x09, PPC2_ISA300),
+
+/* DCMX = bit[25] << 6 | bit[29] << 5 | bit[11:15] */
+#define GEN_XX2FORM_DCMX(name, opc2, opc3, fl2) \
+GEN_XX3FORM(name, opc2, opc3 | 0, fl2), \
+GEN_XX3FORM(name, opc2, opc3 | 1, fl2)
+
+GEN_XX2FORM_DCMX(xvtstdcdp, 0x14, 0x1E, PPC2_ISA300),
+GEN_XX2FORM_DCMX(xvtstdcsp, 0x14, 0x1A, PPC2_ISA300),
+
+GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
+
+GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX),
+GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0),
+GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX),
+GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX),
+GEN_VSX_XFORM_300(xsmulqp, 0x04, 0x01, 0x0),
+GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX),
+GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX),
+GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
+GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
+GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
+GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
+GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
+GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
+GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300),
+GEN_XX3FORM(xscmpexpdp, 0x0C, 0x07, PPC2_ISA300),
+GEN_VSX_XFORM_300(xscmpexpqp, 0x04, 0x05, 0x00600001),
+GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
+GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
+GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001),
+GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001),
+GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
+GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
+GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300),
+GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300),
+GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300),
+GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300),
+GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300),
+GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
+GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
+GEN_XX2FORM_EO(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300),
+GEN_VSX_XFORM_300_EO(xscvsdqp, 0x04, 0x1A, 0x0A, 0x00000001),
+GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX),
+GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207),
+GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX),
+GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX),
+GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX),
+GEN_XX2FORM(xscvdpuxws, 0x10, 0x04, PPC2_VSX),
+GEN_XX2FORM(xscvsxddp, 0x10, 0x17, PPC2_VSX),
+GEN_VSX_XFORM_300_EO(xscvudqp, 0x04, 0x1A, 0x02, 0x00000001),
+GEN_XX2FORM(xscvuxddp, 0x10, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsrdpi, 0x12, 0x04, PPC2_VSX),
+GEN_XX2FORM(xsrdpic, 0x16, 0x06, PPC2_VSX),
+GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX),
+GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX),
+GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX),
+
+GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207),
+GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207),
+GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207),
+GEN_VSX_XFORM_300(xsdivqp, 0x04, 0x11, 0x0),
+GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
+GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
+GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
+GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207),
+GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
+GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
+
+GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX),
+GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX),
+GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmadddp, "xvmaddadp", 0x04, 0x0C, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmadddp, "xvmaddmdp", 0x04, 0x0D, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubdp, "xvmsubadp", 0x04, 0x0E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubdp, "xvmsubmdp", 0x04, 0x0F, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
+GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpnedp, 0x0C, 0x0F, PPC2_ISA300),
+GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX),
+GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX),
+GEN_XX2FORM(xvcvdpuxds, 0x10, 0x1C, PPC2_VSX),
+GEN_XX2FORM(xvcvdpuxws, 0x10, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvcvsxddp, 0x10, 0x1F, PPC2_VSX),
+GEN_XX2FORM(xvcvuxddp, 0x10, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvcvsxwdp, 0x10, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvcvuxwdp, 0x10, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpi, 0x12, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvrdpic, 0x16, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX),
+
+GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX),
+GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX),
+GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmaddsp, "xvmaddasp", 0x04, 0x08, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmaddsp, "xvmaddmsp", 0x04, 0x09, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubsp, "xvmsubasp", 0x04, 0x0A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubsp, "xvmsubmsp", 0x04, 0x0B, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
+GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpnesp, 0x0C, 0x0B, PPC2_ISA300),
+GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX),
+GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX),
+GEN_XX2FORM(xvcvspuxds, 0x10, 0x18, PPC2_VSX),
+GEN_XX2FORM(xvcvspuxws, 0x10, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvcvsxdsp, 0x10, 0x1B, PPC2_VSX),
+GEN_XX2FORM(xvcvuxdsp, 0x10, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvcvsxwsp, 0x10, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvcvuxwsp, 0x10, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspi, 0x12, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvrspic, 0x16, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspim, 0x12, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvrspip, 0x12, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspiz, 0x12, 0x09, PPC2_VSX),
+GEN_XX2FORM_EO(xxbrh, 0x16, 0x1D, 0x07, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrw, 0x16, 0x1D, 0x0F, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrd, 0x16, 0x1D, 0x17, PPC2_ISA300),
+GEN_XX2FORM_EO(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300),
+GEN_XX2FORM_EO(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrq, 0x16, 0x1D, 0x1F, PPC2_ISA300),
+
+#define VSX_LOGICAL(name, opc2, opc3, fl2) \
+GEN_XX3FORM(name, opc2, opc3, fl2)
+
+VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX),
+VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX),
+VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX),
+VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX),
+VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX),
+VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207),
+VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
+VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
+GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
+GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
+GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300),
+GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300),
+GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
+GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300),
+GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300),
+
+#define GEN_XXSEL_ROW(opc3) \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
+
+GEN_XXSEL_ROW(0x00)
+GEN_XXSEL_ROW(0x01)
+GEN_XXSEL_ROW(0x02)
+GEN_XXSEL_ROW(0x03)
+GEN_XXSEL_ROW(0x04)
+GEN_XXSEL_ROW(0x05)
+GEN_XXSEL_ROW(0x06)
+GEN_XXSEL_ROW(0x07)
+GEN_XXSEL_ROW(0x08)
+GEN_XXSEL_ROW(0x09)
+GEN_XXSEL_ROW(0x0A)
+GEN_XXSEL_ROW(0x0B)
+GEN_XXSEL_ROW(0x0C)
+GEN_XXSEL_ROW(0x0D)
+GEN_XXSEL_ROW(0x0E)
+GEN_XXSEL_ROW(0x0F)
+GEN_XXSEL_ROW(0x10)
+GEN_XXSEL_ROW(0x11)
+GEN_XXSEL_ROW(0x12)
+GEN_XXSEL_ROW(0x13)
+GEN_XXSEL_ROW(0x14)
+GEN_XXSEL_ROW(0x15)
+GEN_XXSEL_ROW(0x16)
+GEN_XXSEL_ROW(0x17)
+GEN_XXSEL_ROW(0x18)
+GEN_XXSEL_ROW(0x19)
+GEN_XXSEL_ROW(0x1A)
+GEN_XXSEL_ROW(0x1B)
+GEN_XXSEL_ROW(0x1C)
+GEN_XXSEL_ROW(0x1D)
+GEN_XXSEL_ROW(0x1E)
+GEN_XXSEL_ROW(0x1F)
+
+GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
new file mode 100644
index 000000000..7ff76f7a0
--- /dev/null
+++ b/target/ppc/user_only_helper.c
@@ -0,0 +1,56 @@
+/*
+ * PowerPC MMU stub handling for user mode emulation
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (c) 2013 David Gibson, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "internal.h"
+
+void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int exception, error_code;
+
+ /*
+ * Both DSISR and the "trap number" (exception vector offset,
+ * looked up from exception_index) are present in the linux-user
+ * signal frame.
+ * FIXME: we don't actually populate the trap number properly.
+ * It would be easiest to fill in an env->trap value now.
+ */
+ if (access_type == MMU_INST_FETCH) {
+ exception = POWERPC_EXCP_ISI;
+ error_code = 0x40000000;
+ } else {
+ exception = POWERPC_EXCP_DSI;
+ error_code = 0x40000000;
+ if (access_type == MMU_DATA_STORE) {
+ error_code |= 0x02000000;
+ }
+ env->spr[SPR_DAR] = address;
+ env->spr[SPR_DSISR] = error_code;
+ }
+ cs->exception_index = exception;
+ env->error_code = error_code;
+ cpu_loop_exit_restore(cs, retaddr);
+}
diff --git a/target/riscv/Kconfig b/target/riscv/Kconfig
new file mode 100644
index 000000000..b9e5932f1
--- /dev/null
+++ b/target/riscv/Kconfig
@@ -0,0 +1,5 @@
+config RISCV32
+ bool
+
+config RISCV64
+ bool
diff --git a/target/riscv/arch_dump.c b/target/riscv/arch_dump.c
new file mode 100644
index 000000000..709f621d8
--- /dev/null
+++ b/target/riscv/arch_dump.c
@@ -0,0 +1,202 @@
+/* Support for writing ELF notes for RISC-V architectures
+ *
+ * Copyright (C) 2021 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+
+/* struct user_regs_struct from arch/riscv/include/uapi/asm/ptrace.h */
+struct riscv64_user_regs {
+ uint64_t pc;
+ uint64_t regs[31];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct riscv64_user_regs) != 256);
+
+/* struct elf_prstatus from include/linux/elfcore.h */
+struct riscv64_elf_prstatus {
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct riscv64_user_regs pr_reg;
+ char pad3[8];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct riscv64_elf_prstatus) != 376);
+
+struct riscv64_note {
+ Elf64_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ struct riscv64_elf_prstatus prstatus;
+} QEMU_PACKED;
+
+#define RISCV64_NOTE_HEADER_SIZE offsetof(struct riscv64_note, prstatus)
+#define RISCV64_PRSTATUS_NOTE_SIZE \
+ (RISCV64_NOTE_HEADER_SIZE + sizeof(struct riscv64_elf_prstatus))
+
+static void riscv64_note_init(struct riscv64_note *note, DumpState *s,
+ const char *name, Elf64_Word namesz,
+ Elf64_Word type, Elf64_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct riscv64_note note;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ DumpState *s = opaque;
+ int ret, i = 0;
+ const char name[] = "CORE";
+
+ riscv64_note_init(&note, s, name, sizeof(name),
+ NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+
+ note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc);
+
+ for (i = 0; i < 31; i++) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->gpr[i + 1]);
+ }
+
+ ret = f(&note, RISCV64_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return ret;
+}
+
+struct riscv32_user_regs {
+ uint32_t pc;
+ uint32_t regs[31];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct riscv32_user_regs) != 128);
+
+struct riscv32_elf_prstatus {
+ char pad1[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct riscv32_user_regs pr_reg;
+ char pad3[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct riscv32_elf_prstatus) != 204);
+
+struct riscv32_note {
+ Elf32_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ struct riscv32_elf_prstatus prstatus;
+} QEMU_PACKED;
+
+#define RISCV32_NOTE_HEADER_SIZE offsetof(struct riscv32_note, prstatus)
+#define RISCV32_PRSTATUS_NOTE_SIZE \
+ (RISCV32_NOTE_HEADER_SIZE + sizeof(struct riscv32_elf_prstatus))
+
+static void riscv32_note_init(struct riscv32_note *note, DumpState *s,
+ const char *name, Elf32_Word namesz,
+ Elf32_Word type, Elf32_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct riscv32_note note;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ DumpState *s = opaque;
+ int ret, i;
+ const char name[] = "CORE";
+
+ riscv32_note_init(&note, s, name, sizeof(name),
+ NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+
+ note.prstatus.pr_reg.pc = cpu_to_dump32(s, env->pc);
+
+ for (i = 0; i < 31; i++) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->gpr[i + 1]);
+ }
+
+ ret = f(&note, RISCV32_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return ret;
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ RISCVCPU *cpu;
+ CPURISCVState *env;
+
+ if (first_cpu == NULL) {
+ return -1;
+ }
+ cpu = RISCV_CPU(first_cpu);
+ env = &cpu->env;
+
+ info->d_machine = EM_RISCV;
+
+#if defined(TARGET_RISCV64)
+ info->d_class = ELFCLASS64;
+#else
+ info->d_class = ELFCLASS32;
+#endif
+
+ info->d_endian = (env->mstatus & MSTATUS_UBE) != 0
+ ? ELFDATA2MSB : ELFDATA2LSB;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ size_t note_size;
+
+ if (class == ELFCLASS64) {
+ note_size = RISCV64_PRSTATUS_NOTE_SIZE;
+ } else {
+ note_size = RISCV32_PRSTATUS_NOTE_SIZE;
+ }
+
+ return note_size * nr_cpus;
+}
diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c
new file mode 100644
index 000000000..f1b5e5549
--- /dev/null
+++ b/target/riscv/bitmanip_helper.c
@@ -0,0 +1,51 @@
+/*
+ * RISC-V Bitmanip Extension Helpers for QEMU.
+ *
+ * Copyright (c) 2020 Kito Cheng, kito.cheng@sifive.com
+ * Copyright (c) 2020 Frank Chang, frank.chang@sifive.com
+ * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "tcg/tcg.h"
+
+target_ulong HELPER(clmul)(target_ulong rs1, target_ulong rs2)
+{
+ target_ulong result = 0;
+
+ for (int i = 0; i < TARGET_LONG_BITS; i++) {
+ if ((rs2 >> i) & 1) {
+ result ^= (rs1 << i);
+ }
+ }
+
+ return result;
+}
+
+target_ulong HELPER(clmulr)(target_ulong rs1, target_ulong rs2)
+{
+ target_ulong result = 0;
+
+ for (int i = 0; i < TARGET_LONG_BITS; i++) {
+ if ((rs2 >> i) & 1) {
+ result ^= (rs1 >> (TARGET_LONG_BITS - i - 1));
+ }
+ }
+
+ return result;
+}
diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h
new file mode 100644
index 000000000..80eb615f9
--- /dev/null
+++ b/target/riscv/cpu-param.h
@@ -0,0 +1,32 @@
+/*
+ * RISC-V cpu parameters for qemu.
+ *
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef RISCV_CPU_PARAM_H
+#define RISCV_CPU_PARAM_H 1
+
+#if defined(TARGET_RISCV64)
+# define TARGET_LONG_BITS 64
+# define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */
+# define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */
+#elif defined(TARGET_RISCV32)
+# define TARGET_LONG_BITS 32
+# define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */
+# define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */
+#endif
+#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
+/*
+ * The current MMU Modes are:
+ * - U mode 0b000
+ * - S mode 0b001
+ * - M mode 0b011
+ * - U mode HLV/HLVX/HSV 0b100
+ * - S mode HLV/HLVX/HSV 0b101
+ * - M mode HLV/HLVX/HSV 0b111
+ */
+#define NB_MMU_MODES 8
+
+#endif
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
new file mode 100644
index 000000000..f81299812
--- /dev/null
+++ b/target/riscv/cpu.c
@@ -0,0 +1,819 @@
+/*
+ * QEMU RISC-V CPU
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "qemu/ctype.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "fpu/softfloat-helpers.h"
+
+/* RISC-V CPU definitions */
+
+static const char riscv_exts[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG";
+
+const char * const riscv_int_regnames[] = {
+ "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
+ "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
+ "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
+ "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
+ "x28/t3", "x29/t4", "x30/t5", "x31/t6"
+};
+
+const char * const riscv_fpr_regnames[] = {
+ "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
+ "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
+ "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
+ "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
+ "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
+ "f30/ft10", "f31/ft11"
+};
+
+static const char * const riscv_excp_names[] = {
+ "misaligned_fetch",
+ "fault_fetch",
+ "illegal_instruction",
+ "breakpoint",
+ "misaligned_load",
+ "fault_load",
+ "misaligned_store",
+ "fault_store",
+ "user_ecall",
+ "supervisor_ecall",
+ "hypervisor_ecall",
+ "machine_ecall",
+ "exec_page_fault",
+ "load_page_fault",
+ "reserved",
+ "store_page_fault",
+ "reserved",
+ "reserved",
+ "reserved",
+ "reserved",
+ "guest_exec_page_fault",
+ "guest_load_page_fault",
+ "reserved",
+ "guest_store_page_fault",
+};
+
+static const char * const riscv_intr_names[] = {
+ "u_software",
+ "s_software",
+ "vs_software",
+ "m_software",
+ "u_timer",
+ "s_timer",
+ "vs_timer",
+ "m_timer",
+ "u_external",
+ "s_external",
+ "vs_external",
+ "m_external",
+ "reserved",
+ "reserved",
+ "reserved",
+ "reserved"
+};
+
+const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
+{
+ if (async) {
+ return (cause < ARRAY_SIZE(riscv_intr_names)) ?
+ riscv_intr_names[cause] : "(unknown)";
+ } else {
+ return (cause < ARRAY_SIZE(riscv_excp_names)) ?
+ riscv_excp_names[cause] : "(unknown)";
+ }
+}
+
+static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
+{
+ env->misa_mxl_max = env->misa_mxl = mxl;
+ env->misa_ext_mask = env->misa_ext = ext;
+}
+
+static void set_priv_version(CPURISCVState *env, int priv_ver)
+{
+ env->priv_ver = priv_ver;
+}
+
+static void set_vext_version(CPURISCVState *env, int vext_ver)
+{
+ env->vext_ver = vext_ver;
+}
+
+static void set_feature(CPURISCVState *env, int feature)
+{
+ env->features |= (1ULL << feature);
+}
+
+static void set_resetvec(CPURISCVState *env, target_ulong resetvec)
+{
+#ifndef CONFIG_USER_ONLY
+ env->resetvec = resetvec;
+#endif
+}
+
+static void riscv_any_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+#if defined(TARGET_RISCV32)
+ set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
+#elif defined(TARGET_RISCV64)
+ set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
+#endif
+ set_priv_version(env, PRIV_VERSION_1_11_0);
+}
+
+#if defined(TARGET_RISCV64)
+static void rv64_base_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ /* We set this in the realise function */
+ set_misa(env, MXL_RV64, 0);
+}
+
+static void rv64_sifive_u_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+}
+
+static void rv64_sifive_e_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+ qdev_prop_set_bit(DEVICE(obj), "mmu", false);
+}
+#else
+static void rv32_base_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ /* We set this in the realise function */
+ set_misa(env, MXL_RV32, 0);
+}
+
+static void rv32_sifive_u_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+}
+
+static void rv32_sifive_e_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+ qdev_prop_set_bit(DEVICE(obj), "mmu", false);
+}
+
+static void rv32_ibex_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+ qdev_prop_set_bit(DEVICE(obj), "mmu", false);
+ qdev_prop_set_bit(DEVICE(obj), "x-epmp", true);
+}
+
+static void rv32_imafcu_nommu_cpu_init(Object *obj)
+{
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
+ set_priv_version(env, PRIV_VERSION_1_10_0);
+ set_resetvec(env, DEFAULT_RSTVEC);
+ qdev_prop_set_bit(DEVICE(obj), "mmu", false);
+}
+#endif
+
+static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+ char **cpuname;
+
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
+ oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
+ g_free(typename);
+ if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ int i;
+
+#if !defined(CONFIG_USER_ONLY)
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env));
+ }
+#endif
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
+#ifndef CONFIG_USER_ONLY
+ {
+ static const int dump_csrs[] = {
+ CSR_MHARTID,
+ CSR_MSTATUS,
+ CSR_MSTATUSH,
+ CSR_HSTATUS,
+ CSR_VSSTATUS,
+ CSR_MIP,
+ CSR_MIE,
+ CSR_MIDELEG,
+ CSR_HIDELEG,
+ CSR_MEDELEG,
+ CSR_HEDELEG,
+ CSR_MTVEC,
+ CSR_STVEC,
+ CSR_VSTVEC,
+ CSR_MEPC,
+ CSR_SEPC,
+ CSR_VSEPC,
+ CSR_MCAUSE,
+ CSR_SCAUSE,
+ CSR_VSCAUSE,
+ CSR_MTVAL,
+ CSR_STVAL,
+ CSR_HTVAL,
+ CSR_MTVAL2,
+ CSR_MSCRATCH,
+ CSR_SSCRATCH,
+ CSR_SATP,
+ CSR_MMTE,
+ CSR_UPMBASE,
+ CSR_UPMMASK,
+ CSR_SPMBASE,
+ CSR_SPMMASK,
+ CSR_MPMBASE,
+ CSR_MPMMASK,
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
+ int csrno = dump_csrs[i];
+ target_ulong val = 0;
+ RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
+
+ /*
+ * Rely on the smode, hmode, etc, predicates within csr.c
+ * to do the filtering of the registers that are present.
+ */
+ if (res == RISCV_EXCP_NONE) {
+ qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
+ csr_ops[csrno].name, val);
+ }
+ }
+ }
+#endif
+
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
+ riscv_int_regnames[i], env->gpr[i]);
+ if ((i & 3) == 3) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ if (flags & CPU_DUMP_FPU) {
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, " %-8s %016" PRIx64,
+ riscv_fpr_regnames[i], env->fpr[i]);
+ if ((i & 3) == 3) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ }
+}
+
+static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ env->pc = value;
+}
+
+static void riscv_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ env->pc = tb->pc;
+}
+
+static bool riscv_cpu_has_work(CPUState *cs)
+{
+#ifndef CONFIG_USER_ONLY
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ /*
+ * Definition of the WFI instruction requires it to ignore the privilege
+ * mode and delegation registers, but respect individual enables
+ */
+ return (env->mip & env->mie) != 0;
+#else
+ return true;
+#endif
+}
+
+void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
+
+static void riscv_cpu_reset(DeviceState *dev)
+{
+ CPUState *cs = CPU(dev);
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
+ CPURISCVState *env = &cpu->env;
+
+ mcc->parent_reset(dev);
+#ifndef CONFIG_USER_ONLY
+ env->misa_mxl = env->misa_mxl_max;
+ env->priv = PRV_M;
+ env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
+ if (env->misa_mxl > MXL_RV32) {
+ /*
+ * The reset status of SXL/UXL is undefined, but mstatus is WARL
+ * and we must ensure that the value after init is valid for read.
+ */
+ env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
+ env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
+ }
+ env->mcause = 0;
+ env->pc = env->resetvec;
+ env->two_stage_lookup = false;
+ /* mmte is supposed to have pm.current hardwired to 1 */
+ env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
+#endif
+ cs->exception_index = RISCV_EXCP_NONE;
+ env->load_res = -1;
+ set_default_nan_mode(1, &env->fp_status);
+}
+
+static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+ RISCVCPU *cpu = RISCV_CPU(s);
+
+ switch (riscv_cpu_mxl(&cpu->env)) {
+ case MXL_RV32:
+ info->print_insn = print_insn_riscv32;
+ break;
+ case MXL_RV64:
+ info->print_insn = print_insn_riscv64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ RISCVCPU *cpu = RISCV_CPU(dev);
+ CPURISCVState *env = &cpu->env;
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
+ int priv_version = 0;
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (cpu->cfg.priv_spec) {
+ if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
+ priv_version = PRIV_VERSION_1_11_0;
+ } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
+ priv_version = PRIV_VERSION_1_10_0;
+ } else {
+ error_setg(errp,
+ "Unsupported privilege spec version '%s'",
+ cpu->cfg.priv_spec);
+ return;
+ }
+ }
+
+ if (priv_version) {
+ set_priv_version(env, priv_version);
+ } else if (!env->priv_ver) {
+ set_priv_version(env, PRIV_VERSION_1_11_0);
+ }
+
+ if (cpu->cfg.mmu) {
+ set_feature(env, RISCV_FEATURE_MMU);
+ }
+
+ if (cpu->cfg.pmp) {
+ set_feature(env, RISCV_FEATURE_PMP);
+
+ /*
+ * Enhanced PMP should only be available
+ * on harts with PMP support
+ */
+ if (cpu->cfg.epmp) {
+ set_feature(env, RISCV_FEATURE_EPMP);
+ }
+ }
+
+ set_resetvec(env, cpu->cfg.resetvec);
+
+ /* Validate that MISA_MXL is set properly. */
+ switch (env->misa_mxl_max) {
+#ifdef TARGET_RISCV64
+ case MXL_RV64:
+ break;
+#endif
+ case MXL_RV32:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ assert(env->misa_mxl_max == env->misa_mxl);
+
+ /* If only MISA_EXT is unset for misa, then set it from properties */
+ if (env->misa_ext == 0) {
+ uint32_t ext = 0;
+
+ /* Do some ISA extension error checking */
+ if (cpu->cfg.ext_i && cpu->cfg.ext_e) {
+ error_setg(errp,
+ "I and E extensions are incompatible");
+ return;
+ }
+
+ if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) {
+ error_setg(errp,
+ "Either I or E extension must be set");
+ return;
+ }
+
+ if (cpu->cfg.ext_g && !(cpu->cfg.ext_i & cpu->cfg.ext_m &
+ cpu->cfg.ext_a & cpu->cfg.ext_f &
+ cpu->cfg.ext_d)) {
+ warn_report("Setting G will also set IMAFD");
+ cpu->cfg.ext_i = true;
+ cpu->cfg.ext_m = true;
+ cpu->cfg.ext_a = true;
+ cpu->cfg.ext_f = true;
+ cpu->cfg.ext_d = true;
+ }
+
+ /* Set the ISA extensions, checks should have happened above */
+ if (cpu->cfg.ext_i) {
+ ext |= RVI;
+ }
+ if (cpu->cfg.ext_e) {
+ ext |= RVE;
+ }
+ if (cpu->cfg.ext_m) {
+ ext |= RVM;
+ }
+ if (cpu->cfg.ext_a) {
+ ext |= RVA;
+ }
+ if (cpu->cfg.ext_f) {
+ ext |= RVF;
+ }
+ if (cpu->cfg.ext_d) {
+ ext |= RVD;
+ }
+ if (cpu->cfg.ext_c) {
+ ext |= RVC;
+ }
+ if (cpu->cfg.ext_s) {
+ ext |= RVS;
+ }
+ if (cpu->cfg.ext_u) {
+ ext |= RVU;
+ }
+ if (cpu->cfg.ext_h) {
+ ext |= RVH;
+ }
+ if (cpu->cfg.ext_v) {
+ int vext_version = VEXT_VERSION_0_07_1;
+ ext |= RVV;
+ if (!is_power_of_2(cpu->cfg.vlen)) {
+ error_setg(errp,
+ "Vector extension VLEN must be power of 2");
+ return;
+ }
+ if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
+ error_setg(errp,
+ "Vector extension implementation only supports VLEN "
+ "in the range [128, %d]", RV_VLEN_MAX);
+ return;
+ }
+ if (!is_power_of_2(cpu->cfg.elen)) {
+ error_setg(errp,
+ "Vector extension ELEN must be power of 2");
+ return;
+ }
+ if (cpu->cfg.elen > 64 || cpu->cfg.vlen < 8) {
+ error_setg(errp,
+ "Vector extension implementation only supports ELEN "
+ "in the range [8, 64]");
+ return;
+ }
+ if (cpu->cfg.vext_spec) {
+ if (!g_strcmp0(cpu->cfg.vext_spec, "v0.7.1")) {
+ vext_version = VEXT_VERSION_0_07_1;
+ } else {
+ error_setg(errp,
+ "Unsupported vector spec version '%s'",
+ cpu->cfg.vext_spec);
+ return;
+ }
+ } else {
+ qemu_log("vector version is not specified, "
+ "use the default value v0.7.1\n");
+ }
+ set_vext_version(env, vext_version);
+ }
+ if (cpu->cfg.ext_j) {
+ ext |= RVJ;
+ }
+
+ set_misa(env, env->misa_mxl, ext);
+ }
+
+ riscv_cpu_register_gdb_regs_for_features(cs);
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void riscv_cpu_set_irq(void *opaque, int irq, int level)
+{
+ RISCVCPU *cpu = RISCV_CPU(opaque);
+
+ switch (irq) {
+ case IRQ_U_SOFT:
+ case IRQ_S_SOFT:
+ case IRQ_VS_SOFT:
+ case IRQ_M_SOFT:
+ case IRQ_U_TIMER:
+ case IRQ_S_TIMER:
+ case IRQ_VS_TIMER:
+ case IRQ_M_TIMER:
+ case IRQ_U_EXT:
+ case IRQ_S_EXT:
+ case IRQ_VS_EXT:
+ case IRQ_M_EXT:
+ riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+#endif /* CONFIG_USER_ONLY */
+
+static void riscv_cpu_init(Object *obj)
+{
+ RISCVCPU *cpu = RISCV_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+
+#ifndef CONFIG_USER_ONLY
+ qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 12);
+#endif /* CONFIG_USER_ONLY */
+}
+
+static Property riscv_cpu_properties[] = {
+ /* Defaults for standard extensions */
+ DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true),
+ DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false),
+ DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, true),
+ DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true),
+ DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true),
+ DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true),
+ DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true),
+ DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
+ DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
+ DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
+ DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
+ DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
+ DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
+ DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
+ DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
+
+ DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
+
+ /* These are experimental so mark with 'x-' */
+ DEFINE_PROP_BOOL("x-zba", RISCVCPU, cfg.ext_zba, false),
+ DEFINE_PROP_BOOL("x-zbb", RISCVCPU, cfg.ext_zbb, false),
+ DEFINE_PROP_BOOL("x-zbc", RISCVCPU, cfg.ext_zbc, false),
+ DEFINE_PROP_BOOL("x-zbs", RISCVCPU, cfg.ext_zbs, false),
+ DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
+ DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
+ DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false),
+ DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
+ DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
+ DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
+ /* ePMP 0.9.3 */
+ DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
+
+ DEFINE_PROP_UINT64("resetvec", RISCVCPU, cfg.resetvec, DEFAULT_RSTVEC),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static gchar *riscv_gdb_arch_name(CPUState *cs)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+
+ switch (riscv_cpu_mxl(env)) {
+ case MXL_RV32:
+ return g_strdup("riscv:rv32");
+ case MXL_RV64:
+ return g_strdup("riscv:rv64");
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+
+ if (strcmp(xmlname, "riscv-csr.xml") == 0) {
+ return cpu->dyn_csr_xml;
+ }
+
+ return NULL;
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps riscv_sysemu_ops = {
+ .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
+ .write_elf64_note = riscv_cpu_write_elf64_note,
+ .write_elf32_note = riscv_cpu_write_elf32_note,
+ .legacy_vmsd = &vmstate_riscv_cpu,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps riscv_tcg_ops = {
+ .initialize = riscv_translate_init,
+ .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = riscv_cpu_tlb_fill,
+ .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
+ .do_interrupt = riscv_cpu_do_interrupt,
+ .do_transaction_failed = riscv_cpu_do_transaction_failed,
+ .do_unaligned_access = riscv_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void riscv_cpu_class_init(ObjectClass *c, void *data)
+{
+ RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ device_class_set_parent_realize(dc, riscv_cpu_realize,
+ &mcc->parent_realize);
+
+ device_class_set_parent_reset(dc, riscv_cpu_reset, &mcc->parent_reset);
+
+ cc->class_by_name = riscv_cpu_class_by_name;
+ cc->has_work = riscv_cpu_has_work;
+ cc->dump_state = riscv_cpu_dump_state;
+ cc->set_pc = riscv_cpu_set_pc;
+ cc->gdb_read_register = riscv_cpu_gdb_read_register;
+ cc->gdb_write_register = riscv_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 33;
+#if defined(TARGET_RISCV32)
+ cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
+#elif defined(TARGET_RISCV64)
+ cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
+#endif
+ cc->gdb_stop_before_watchpoint = true;
+ cc->disas_set_info = riscv_cpu_disas_set_info;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &riscv_sysemu_ops;
+#endif
+ cc->gdb_arch_name = riscv_gdb_arch_name;
+ cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
+ cc->tcg_ops = &riscv_tcg_ops;
+
+ device_class_set_props(dc, riscv_cpu_properties);
+}
+
+char *riscv_isa_string(RISCVCPU *cpu)
+{
+ int i;
+ const size_t maxlen = sizeof("rv128") + sizeof(riscv_exts) + 1;
+ char *isa_str = g_new(char, maxlen);
+ char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
+ for (i = 0; i < sizeof(riscv_exts); i++) {
+ if (cpu->env.misa_ext & RV(riscv_exts[i])) {
+ *p++ = qemu_tolower(riscv_exts[i]);
+ }
+ }
+ *p = '\0';
+ return isa_str;
+}
+
+static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ const char *typename = object_class_get_name(OBJECT_CLASS(data));
+ int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
+
+ qemu_printf("%.*s\n", len, typename);
+}
+
+void riscv_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list(TYPE_RISCV_CPU, false);
+ list = g_slist_sort(list, riscv_cpu_list_compare);
+ g_slist_foreach(list, riscv_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+#define DEFINE_CPU(type_name, initfn) \
+ { \
+ .name = type_name, \
+ .parent = TYPE_RISCV_CPU, \
+ .instance_init = initfn \
+ }
+
+static const TypeInfo riscv_cpu_type_infos[] = {
+ {
+ .name = TYPE_RISCV_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(RISCVCPU),
+ .instance_align = __alignof__(RISCVCPU),
+ .instance_init = riscv_cpu_init,
+ .abstract = true,
+ .class_size = sizeof(RISCVCPUClass),
+ .class_init = riscv_cpu_class_init,
+ },
+ DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
+#if defined(TARGET_RISCV32)
+ DEFINE_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
+#elif defined(TARGET_RISCV64)
+ DEFINE_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
+#endif
+};
+
+DEFINE_TYPES(riscv_cpu_type_infos)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
new file mode 100644
index 000000000..0760c0af9
--- /dev/null
+++ b/target/riscv/cpu.h
@@ -0,0 +1,495 @@
+/*
+ * QEMU RISC-V CPU
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_CPU_H
+#define RISCV_CPU_H
+
+#include "hw/core/cpu.h"
+#include "hw/registerfields.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat-types.h"
+#include "qom/object.h"
+#include "cpu_bits.h"
+
+#define TCG_GUEST_DEFAULT_MO 0
+
+#define TYPE_RISCV_CPU "riscv-cpu"
+
+#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
+#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
+
+#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
+#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
+#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
+#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
+#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
+#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
+#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34")
+#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
+#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
+#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
+
+#if defined(TARGET_RISCV32)
+# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
+#elif defined(TARGET_RISCV64)
+# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64
+#endif
+
+#define RV(x) ((target_ulong)1 << (x - 'A'))
+
+#define RVI RV('I')
+#define RVE RV('E') /* E and I are mutually exclusive */
+#define RVM RV('M')
+#define RVA RV('A')
+#define RVF RV('F')
+#define RVD RV('D')
+#define RVV RV('V')
+#define RVC RV('C')
+#define RVS RV('S')
+#define RVU RV('U')
+#define RVH RV('H')
+#define RVJ RV('J')
+
+/* S extension denotes that Supervisor mode exists, however it is possible
+ to have a core that support S mode but does not have an MMU and there
+ is currently no bit in misa to indicate whether an MMU exists or not
+ so a cpu features bitfield is required, likewise for optional PMP support */
+enum {
+ RISCV_FEATURE_MMU,
+ RISCV_FEATURE_PMP,
+ RISCV_FEATURE_EPMP,
+ RISCV_FEATURE_MISA
+};
+
+#define PRIV_VERSION_1_10_0 0x00011000
+#define PRIV_VERSION_1_11_0 0x00011100
+
+#define VEXT_VERSION_0_07_1 0x00000701
+
+enum {
+ TRANSLATE_SUCCESS,
+ TRANSLATE_FAIL,
+ TRANSLATE_PMP_FAIL,
+ TRANSLATE_G_STAGE_FAIL
+};
+
+#define MMU_USER_IDX 3
+
+#define MAX_RISCV_PMPS (16)
+
+typedef struct CPURISCVState CPURISCVState;
+
+#if !defined(CONFIG_USER_ONLY)
+#include "pmp.h"
+#endif
+
+#define RV_VLEN_MAX 256
+
+FIELD(VTYPE, VLMUL, 0, 2)
+FIELD(VTYPE, VSEW, 2, 3)
+FIELD(VTYPE, VEDIV, 5, 2)
+FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9)
+FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
+
+struct CPURISCVState {
+ target_ulong gpr[32];
+ uint64_t fpr[32]; /* assume both F and D extensions */
+
+ /* vector coprocessor state. */
+ uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
+ target_ulong vxrm;
+ target_ulong vxsat;
+ target_ulong vl;
+ target_ulong vstart;
+ target_ulong vtype;
+
+ target_ulong pc;
+ target_ulong load_res;
+ target_ulong load_val;
+
+ target_ulong frm;
+
+ target_ulong badaddr;
+ target_ulong guest_phys_fault_addr;
+
+ target_ulong priv_ver;
+ target_ulong bext_ver;
+ target_ulong vext_ver;
+
+ /* RISCVMXL, but uint32_t for vmstate migration */
+ uint32_t misa_mxl; /* current mxl */
+ uint32_t misa_mxl_max; /* max mxl for this cpu */
+ uint32_t misa_ext; /* current extensions */
+ uint32_t misa_ext_mask; /* max ext for this cpu */
+
+ uint32_t features;
+
+#ifdef CONFIG_USER_ONLY
+ uint32_t elf_flags;
+#endif
+
+#ifndef CONFIG_USER_ONLY
+ target_ulong priv;
+ /* This contains QEMU specific information about the virt state. */
+ target_ulong virt;
+ target_ulong resetvec;
+
+ target_ulong mhartid;
+ /*
+ * For RV32 this is 32-bit mstatus and 32-bit mstatush.
+ * For RV64 this is a 64-bit mstatus.
+ */
+ uint64_t mstatus;
+
+ target_ulong mip;
+
+ uint32_t miclaim;
+
+ target_ulong mie;
+ target_ulong mideleg;
+
+ target_ulong satp; /* since: priv-1.10.0 */
+ target_ulong stval;
+ target_ulong medeleg;
+
+ target_ulong stvec;
+ target_ulong sepc;
+ target_ulong scause;
+
+ target_ulong mtvec;
+ target_ulong mepc;
+ target_ulong mcause;
+ target_ulong mtval; /* since: priv-1.10.0 */
+
+ /* Hypervisor CSRs */
+ target_ulong hstatus;
+ target_ulong hedeleg;
+ target_ulong hideleg;
+ target_ulong hcounteren;
+ target_ulong htval;
+ target_ulong htinst;
+ target_ulong hgatp;
+ uint64_t htimedelta;
+
+ /* Virtual CSRs */
+ /*
+ * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
+ * For RV64 this is a 64-bit vsstatus.
+ */
+ uint64_t vsstatus;
+ target_ulong vstvec;
+ target_ulong vsscratch;
+ target_ulong vsepc;
+ target_ulong vscause;
+ target_ulong vstval;
+ target_ulong vsatp;
+
+ target_ulong mtval2;
+ target_ulong mtinst;
+
+ /* HS Backup CSRs */
+ target_ulong stvec_hs;
+ target_ulong sscratch_hs;
+ target_ulong sepc_hs;
+ target_ulong scause_hs;
+ target_ulong stval_hs;
+ target_ulong satp_hs;
+ uint64_t mstatus_hs;
+
+ /* Signals whether the current exception occurred with two-stage address
+ translation active. */
+ bool two_stage_lookup;
+
+ target_ulong scounteren;
+ target_ulong mcounteren;
+
+ target_ulong sscratch;
+ target_ulong mscratch;
+
+ /* temporary htif regs */
+ uint64_t mfromhost;
+ uint64_t mtohost;
+ uint64_t timecmp;
+
+ /* physical memory protection */
+ pmp_table_t pmp_state;
+ target_ulong mseccfg;
+
+ /* machine specific rdtime callback */
+ uint64_t (*rdtime_fn)(uint32_t);
+ uint32_t rdtime_fn_arg;
+
+ /* True if in debugger mode. */
+ bool debugger;
+
+ /*
+ * CSRs for PointerMasking extension
+ */
+ target_ulong mmte;
+ target_ulong mpmmask;
+ target_ulong mpmbase;
+ target_ulong spmmask;
+ target_ulong spmbase;
+ target_ulong upmmask;
+ target_ulong upmbase;
+#endif
+
+ float_status fp_status;
+
+ /* Fields from here on are preserved across CPU reset. */
+ QEMUTimer *timer; /* Internal timer */
+};
+
+OBJECT_DECLARE_TYPE(RISCVCPU, RISCVCPUClass,
+ RISCV_CPU)
+
+/**
+ * RISCVCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A RISCV CPU model.
+ */
+struct RISCVCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+/**
+ * RISCVCPU:
+ * @env: #CPURISCVState
+ *
+ * A RISCV CPU.
+ */
+struct RISCVCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+ CPUNegativeOffsetState neg;
+ CPURISCVState env;
+
+ char *dyn_csr_xml;
+
+ /* Configuration Settings */
+ struct {
+ bool ext_i;
+ bool ext_e;
+ bool ext_g;
+ bool ext_m;
+ bool ext_a;
+ bool ext_f;
+ bool ext_d;
+ bool ext_c;
+ bool ext_s;
+ bool ext_u;
+ bool ext_h;
+ bool ext_j;
+ bool ext_v;
+ bool ext_zba;
+ bool ext_zbb;
+ bool ext_zbc;
+ bool ext_zbs;
+ bool ext_counters;
+ bool ext_ifencei;
+ bool ext_icsr;
+
+ char *priv_spec;
+ char *user_spec;
+ char *bext_spec;
+ char *vext_spec;
+ uint16_t vlen;
+ uint16_t elen;
+ bool mmu;
+ bool pmp;
+ bool epmp;
+ uint64_t resetvec;
+ } cfg;
+};
+
+static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
+{
+ return (env->misa_ext & ext) != 0;
+}
+
+static inline bool riscv_feature(CPURISCVState *env, int feature)
+{
+ return env->features & (1ULL << feature);
+}
+
+#include "cpu_user.h"
+
+extern const char * const riscv_int_regnames[];
+extern const char * const riscv_fpr_regnames[];
+
+const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
+void riscv_cpu_do_interrupt(CPUState *cpu);
+int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+bool riscv_cpu_fp_enabled(CPURISCVState *env);
+bool riscv_cpu_virt_enabled(CPURISCVState *env);
+void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
+bool riscv_cpu_two_stage_lookup(int mmu_idx);
+int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
+hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+char *riscv_isa_string(RISCVCPU *cpu);
+void riscv_cpu_list(void);
+
+#define cpu_list riscv_cpu_list
+#define cpu_mmu_index riscv_cpu_mmu_index
+
+#ifndef CONFIG_USER_ONLY
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
+void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
+int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
+uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
+#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
+void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
+ uint32_t arg);
+#endif
+void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
+
+void riscv_translate_init(void);
+void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
+ uint32_t exception, uintptr_t pc);
+
+target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
+void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
+
+#define TB_FLAGS_PRIV_MMU_MASK 3
+#define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2)
+#define TB_FLAGS_MSTATUS_FS MSTATUS_FS
+
+typedef CPURISCVState CPUArchState;
+typedef RISCVCPU ArchCPU;
+#include "exec/cpu-all.h"
+
+FIELD(TB_FLAGS, MEM_IDX, 0, 3)
+FIELD(TB_FLAGS, VL_EQ_VLMAX, 3, 1)
+FIELD(TB_FLAGS, LMUL, 4, 2)
+FIELD(TB_FLAGS, SEW, 6, 3)
+FIELD(TB_FLAGS, VILL, 9, 1)
+/* Is a Hypervisor instruction load/store allowed? */
+FIELD(TB_FLAGS, HLSX, 10, 1)
+FIELD(TB_FLAGS, MSTATUS_HS_FS, 11, 2)
+/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
+FIELD(TB_FLAGS, XL, 13, 2)
+/* If PointerMasking should be applied */
+FIELD(TB_FLAGS, PM_ENABLED, 15, 1)
+
+#ifdef TARGET_RISCV32
+#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
+#else
+static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
+{
+ return env->misa_mxl;
+}
+#endif
+
+/*
+ * A simplification for VLMAX
+ * = (1 << LMUL) * VLEN / (8 * (1 << SEW))
+ * = (VLEN << LMUL) / (8 << SEW)
+ * = (VLEN << LMUL) >> (SEW + 3)
+ * = VLEN >> (SEW + 3 - LMUL)
+ */
+static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
+{
+ uint8_t sew, lmul;
+
+ sew = FIELD_EX64(vtype, VTYPE, VSEW);
+ lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
+ return cpu->cfg.vlen >> (sew + 3 - lmul);
+}
+
+void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags);
+
+RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask);
+RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value,
+ target_ulong write_mask);
+
+static inline void riscv_csr_write(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
+}
+
+static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
+{
+ target_ulong val = 0;
+ riscv_csrrw(env, csrno, &val, 0, 0);
+ return val;
+}
+
+typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
+ int csrno);
+typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
+ target_ulong *ret_value);
+typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
+ target_ulong new_value);
+typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value,
+ target_ulong write_mask);
+
+typedef struct {
+ const char *name;
+ riscv_csr_predicate_fn predicate;
+ riscv_csr_read_fn read;
+ riscv_csr_write_fn write;
+ riscv_csr_op_fn op;
+} riscv_csr_operations;
+
+/* CSR function table constants */
+enum {
+ CSR_TABLE_SIZE = 0x1000
+};
+
+/* CSR function table */
+extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
+
+void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
+void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
+
+void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
+
+#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
new file mode 100644
index 000000000..9913fa9f7
--- /dev/null
+++ b/target/riscv/cpu_bits.h
@@ -0,0 +1,618 @@
+/* RISC-V ISA constants */
+
+#ifndef TARGET_RISCV_CPU_BITS_H
+#define TARGET_RISCV_CPU_BITS_H
+
+#define get_field(reg, mask) (((reg) & \
+ (uint64_t)(mask)) / ((mask) & ~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(uint64_t)(mask)) | \
+ (((uint64_t)(val) * ((mask) & ~((mask) << 1))) & \
+ (uint64_t)(mask)))
+
+/* Floating point round mode */
+#define FSR_RD_SHIFT 5
+#define FSR_RD (0x7 << FSR_RD_SHIFT)
+
+/* Floating point accrued exception flags */
+#define FPEXC_NX 0x01
+#define FPEXC_UF 0x02
+#define FPEXC_OF 0x04
+#define FPEXC_DZ 0x08
+#define FPEXC_NV 0x10
+
+/* Floating point status register bits */
+#define FSR_AEXC_SHIFT 0
+#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT)
+#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT)
+#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT)
+#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT)
+#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT)
+#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+/* Vector Fixed-Point round model */
+#define FSR_VXRM_SHIFT 9
+#define FSR_VXRM (0x3 << FSR_VXRM_SHIFT)
+
+/* Vector Fixed-Point saturation flag */
+#define FSR_VXSAT_SHIFT 8
+#define FSR_VXSAT (0x1 << FSR_VXSAT_SHIFT)
+
+/* Control and Status Registers */
+
+/* User Trap Setup */
+#define CSR_USTATUS 0x000
+#define CSR_UIE 0x004
+#define CSR_UTVEC 0x005
+
+/* User Trap Handling */
+#define CSR_USCRATCH 0x040
+#define CSR_UEPC 0x041
+#define CSR_UCAUSE 0x042
+#define CSR_UTVAL 0x043
+#define CSR_UIP 0x044
+
+/* User Floating-Point CSRs */
+#define CSR_FFLAGS 0x001
+#define CSR_FRM 0x002
+#define CSR_FCSR 0x003
+
+/* User Vector CSRs */
+#define CSR_VSTART 0x008
+#define CSR_VXSAT 0x009
+#define CSR_VXRM 0x00a
+#define CSR_VL 0xc20
+#define CSR_VTYPE 0xc21
+
+/* User Timers and Counters */
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+
+/* Machine Timers and Counters */
+#define CSR_MCYCLE 0xb00
+#define CSR_MINSTRET 0xb02
+#define CSR_MCYCLEH 0xb80
+#define CSR_MINSTRETH 0xb82
+
+/* Machine Information Registers */
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+
+/* Machine Trap Setup */
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MEDELEG 0x302
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MCOUNTEREN 0x306
+
+/* 32-bit only */
+#define CSR_MSTATUSH 0x310
+
+/* Machine Trap Handling */
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+
+/* Supervisor Trap Setup */
+#define CSR_SSTATUS 0x100
+#define CSR_SEDELEG 0x102
+#define CSR_SIDELEG 0x103
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+
+/* Supervisor Trap Handling */
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+
+/* Supervisor Protection and Translation */
+#define CSR_SPTBR 0x180
+#define CSR_SATP 0x180
+
+/* Hpervisor CSRs */
+#define CSR_HSTATUS 0x600
+#define CSR_HEDELEG 0x602
+#define CSR_HIDELEG 0x603
+#define CSR_HIE 0x604
+#define CSR_HCOUNTEREN 0x606
+#define CSR_HGEIE 0x607
+#define CSR_HTVAL 0x643
+#define CSR_HVIP 0x645
+#define CSR_HIP 0x644
+#define CSR_HTINST 0x64A
+#define CSR_HGEIP 0xE12
+#define CSR_HGATP 0x680
+#define CSR_HTIMEDELTA 0x605
+#define CSR_HTIMEDELTAH 0x615
+
+/* Virtual CSRs */
+#define CSR_VSSTATUS 0x200
+#define CSR_VSIE 0x204
+#define CSR_VSTVEC 0x205
+#define CSR_VSSCRATCH 0x240
+#define CSR_VSEPC 0x241
+#define CSR_VSCAUSE 0x242
+#define CSR_VSTVAL 0x243
+#define CSR_VSIP 0x244
+#define CSR_VSATP 0x280
+
+#define CSR_MTINST 0x34a
+#define CSR_MTVAL2 0x34b
+
+/* Enhanced Physical Memory Protection (ePMP) */
+#define CSR_MSECCFG 0x747
+#define CSR_MSECCFGH 0x757
+/* Physical Memory Protection */
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPCFG1 0x3a1
+#define CSR_PMPCFG2 0x3a2
+#define CSR_PMPCFG3 0x3a3
+#define CSR_PMPADDR0 0x3b0
+#define CSR_PMPADDR1 0x3b1
+#define CSR_PMPADDR2 0x3b2
+#define CSR_PMPADDR3 0x3b3
+#define CSR_PMPADDR4 0x3b4
+#define CSR_PMPADDR5 0x3b5
+#define CSR_PMPADDR6 0x3b6
+#define CSR_PMPADDR7 0x3b7
+#define CSR_PMPADDR8 0x3b8
+#define CSR_PMPADDR9 0x3b9
+#define CSR_PMPADDR10 0x3ba
+#define CSR_PMPADDR11 0x3bb
+#define CSR_PMPADDR12 0x3bc
+#define CSR_PMPADDR13 0x3bd
+#define CSR_PMPADDR14 0x3be
+#define CSR_PMPADDR15 0x3bf
+
+/* Debug/Trace Registers (shared with Debug Mode) */
+#define CSR_TSELECT 0x7a0
+#define CSR_TDATA1 0x7a1
+#define CSR_TDATA2 0x7a2
+#define CSR_TDATA3 0x7a3
+
+/* Debug Mode Registers */
+#define CSR_DCSR 0x7b0
+#define CSR_DPC 0x7b1
+#define CSR_DSCRATCH 0x7b2
+
+/* Performance Counters */
+#define CSR_MHPMCOUNTER3 0xb03
+#define CSR_MHPMCOUNTER4 0xb04
+#define CSR_MHPMCOUNTER5 0xb05
+#define CSR_MHPMCOUNTER6 0xb06
+#define CSR_MHPMCOUNTER7 0xb07
+#define CSR_MHPMCOUNTER8 0xb08
+#define CSR_MHPMCOUNTER9 0xb09
+#define CSR_MHPMCOUNTER10 0xb0a
+#define CSR_MHPMCOUNTER11 0xb0b
+#define CSR_MHPMCOUNTER12 0xb0c
+#define CSR_MHPMCOUNTER13 0xb0d
+#define CSR_MHPMCOUNTER14 0xb0e
+#define CSR_MHPMCOUNTER15 0xb0f
+#define CSR_MHPMCOUNTER16 0xb10
+#define CSR_MHPMCOUNTER17 0xb11
+#define CSR_MHPMCOUNTER18 0xb12
+#define CSR_MHPMCOUNTER19 0xb13
+#define CSR_MHPMCOUNTER20 0xb14
+#define CSR_MHPMCOUNTER21 0xb15
+#define CSR_MHPMCOUNTER22 0xb16
+#define CSR_MHPMCOUNTER23 0xb17
+#define CSR_MHPMCOUNTER24 0xb18
+#define CSR_MHPMCOUNTER25 0xb19
+#define CSR_MHPMCOUNTER26 0xb1a
+#define CSR_MHPMCOUNTER27 0xb1b
+#define CSR_MHPMCOUNTER28 0xb1c
+#define CSR_MHPMCOUNTER29 0xb1d
+#define CSR_MHPMCOUNTER30 0xb1e
+#define CSR_MHPMCOUNTER31 0xb1f
+#define CSR_MHPMEVENT3 0x323
+#define CSR_MHPMEVENT4 0x324
+#define CSR_MHPMEVENT5 0x325
+#define CSR_MHPMEVENT6 0x326
+#define CSR_MHPMEVENT7 0x327
+#define CSR_MHPMEVENT8 0x328
+#define CSR_MHPMEVENT9 0x329
+#define CSR_MHPMEVENT10 0x32a
+#define CSR_MHPMEVENT11 0x32b
+#define CSR_MHPMEVENT12 0x32c
+#define CSR_MHPMEVENT13 0x32d
+#define CSR_MHPMEVENT14 0x32e
+#define CSR_MHPMEVENT15 0x32f
+#define CSR_MHPMEVENT16 0x330
+#define CSR_MHPMEVENT17 0x331
+#define CSR_MHPMEVENT18 0x332
+#define CSR_MHPMEVENT19 0x333
+#define CSR_MHPMEVENT20 0x334
+#define CSR_MHPMEVENT21 0x335
+#define CSR_MHPMEVENT22 0x336
+#define CSR_MHPMEVENT23 0x337
+#define CSR_MHPMEVENT24 0x338
+#define CSR_MHPMEVENT25 0x339
+#define CSR_MHPMEVENT26 0x33a
+#define CSR_MHPMEVENT27 0x33b
+#define CSR_MHPMEVENT28 0x33c
+#define CSR_MHPMEVENT29 0x33d
+#define CSR_MHPMEVENT30 0x33e
+#define CSR_MHPMEVENT31 0x33f
+#define CSR_MHPMCOUNTER3H 0xb83
+#define CSR_MHPMCOUNTER4H 0xb84
+#define CSR_MHPMCOUNTER5H 0xb85
+#define CSR_MHPMCOUNTER6H 0xb86
+#define CSR_MHPMCOUNTER7H 0xb87
+#define CSR_MHPMCOUNTER8H 0xb88
+#define CSR_MHPMCOUNTER9H 0xb89
+#define CSR_MHPMCOUNTER10H 0xb8a
+#define CSR_MHPMCOUNTER11H 0xb8b
+#define CSR_MHPMCOUNTER12H 0xb8c
+#define CSR_MHPMCOUNTER13H 0xb8d
+#define CSR_MHPMCOUNTER14H 0xb8e
+#define CSR_MHPMCOUNTER15H 0xb8f
+#define CSR_MHPMCOUNTER16H 0xb90
+#define CSR_MHPMCOUNTER17H 0xb91
+#define CSR_MHPMCOUNTER18H 0xb92
+#define CSR_MHPMCOUNTER19H 0xb93
+#define CSR_MHPMCOUNTER20H 0xb94
+#define CSR_MHPMCOUNTER21H 0xb95
+#define CSR_MHPMCOUNTER22H 0xb96
+#define CSR_MHPMCOUNTER23H 0xb97
+#define CSR_MHPMCOUNTER24H 0xb98
+#define CSR_MHPMCOUNTER25H 0xb99
+#define CSR_MHPMCOUNTER26H 0xb9a
+#define CSR_MHPMCOUNTER27H 0xb9b
+#define CSR_MHPMCOUNTER28H 0xb9c
+#define CSR_MHPMCOUNTER29H 0xb9d
+#define CSR_MHPMCOUNTER30H 0xb9e
+#define CSR_MHPMCOUNTER31H 0xb9f
+
+/*
+ * User PointerMasking registers
+ * NB: actual CSR numbers might be changed in future
+ */
+#define CSR_UMTE 0x4c0
+#define CSR_UPMMASK 0x4c1
+#define CSR_UPMBASE 0x4c2
+
+/*
+ * Machine PointerMasking registers
+ * NB: actual CSR numbers might be changed in future
+ */
+#define CSR_MMTE 0x3c0
+#define CSR_MPMMASK 0x3c1
+#define CSR_MPMBASE 0x3c2
+
+/*
+ * Supervisor PointerMaster registers
+ * NB: actual CSR numbers might be changed in future
+ */
+#define CSR_SMTE 0x1c0
+#define CSR_SPMMASK 0x1c1
+#define CSR_SPMBASE 0x1c2
+
+/*
+ * Hypervisor PointerMaster registers
+ * NB: actual CSR numbers might be changed in future
+ */
+#define CSR_VSMTE 0x2c0
+#define CSR_VSPMMASK 0x2c1
+#define CSR_VSPMBASE 0x2c2
+
+/* mstatus CSR bits */
+#define MSTATUS_UIE 0x00000001
+#define MSTATUS_SIE 0x00000002
+#define MSTATUS_MIE 0x00000008
+#define MSTATUS_UPIE 0x00000010
+#define MSTATUS_SPIE 0x00000020
+#define MSTATUS_UBE 0x00000040
+#define MSTATUS_MPIE 0x00000080
+#define MSTATUS_SPP 0x00000100
+#define MSTATUS_MPP 0x00001800
+#define MSTATUS_FS 0x00006000
+#define MSTATUS_XS 0x00018000
+#define MSTATUS_MPRV 0x00020000
+#define MSTATUS_SUM 0x00040000 /* since: priv-1.10 */
+#define MSTATUS_MXR 0x00080000
+#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */
+#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
+#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
+#define MSTATUS_GVA 0x4000000000ULL
+#define MSTATUS_MPV 0x8000000000ULL
+
+#define MSTATUS64_UXL 0x0000000300000000ULL
+#define MSTATUS64_SXL 0x0000000C00000000ULL
+
+#define MSTATUS32_SD 0x80000000
+#define MSTATUS64_SD 0x8000000000000000ULL
+
+#define MISA32_MXL 0xC0000000
+#define MISA64_MXL 0xC000000000000000ULL
+
+typedef enum {
+ MXL_RV32 = 1,
+ MXL_RV64 = 2,
+ MXL_RV128 = 3,
+} RISCVMXL;
+
+/* sstatus CSR bits */
+#define SSTATUS_UIE 0x00000001
+#define SSTATUS_SIE 0x00000002
+#define SSTATUS_UPIE 0x00000010
+#define SSTATUS_SPIE 0x00000020
+#define SSTATUS_SPP 0x00000100
+#define SSTATUS_FS 0x00006000
+#define SSTATUS_XS 0x00018000
+#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
+#define SSTATUS_MXR 0x00080000
+
+#define SSTATUS32_SD 0x80000000
+#define SSTATUS64_SD 0x8000000000000000ULL
+
+/* hstatus CSR bits */
+#define HSTATUS_VSBE 0x00000020
+#define HSTATUS_GVA 0x00000040
+#define HSTATUS_SPV 0x00000080
+#define HSTATUS_SPVP 0x00000100
+#define HSTATUS_HU 0x00000200
+#define HSTATUS_VGEIN 0x0003F000
+#define HSTATUS_VTVM 0x00100000
+#define HSTATUS_VTW 0x00200000
+#define HSTATUS_VTSR 0x00400000
+#define HSTATUS_VSXL 0x300000000
+
+#define HSTATUS32_WPRI 0xFF8FF87E
+#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL
+
+#define COUNTEREN_CY (1 << 0)
+#define COUNTEREN_TM (1 << 1)
+#define COUNTEREN_IR (1 << 2)
+#define COUNTEREN_HPM3 (1 << 3)
+
+/* Privilege modes */
+#define PRV_U 0
+#define PRV_S 1
+#define PRV_H 2 /* Reserved */
+#define PRV_M 3
+
+/* Virtulisation Register Fields */
+#define VIRT_ONOFF 1
+
+/* RV32 satp CSR field masks */
+#define SATP32_MODE 0x80000000
+#define SATP32_ASID 0x7fc00000
+#define SATP32_PPN 0x003fffff
+
+/* RV64 satp CSR field masks */
+#define SATP64_MODE 0xF000000000000000ULL
+#define SATP64_ASID 0x0FFFF00000000000ULL
+#define SATP64_PPN 0x00000FFFFFFFFFFFULL
+
+/* VM modes (satp.mode) privileged ISA 1.10 */
+#define VM_1_10_MBARE 0
+#define VM_1_10_SV32 1
+#define VM_1_10_SV39 8
+#define VM_1_10_SV48 9
+#define VM_1_10_SV57 10
+#define VM_1_10_SV64 11
+
+/* Page table entry (PTE) fields */
+#define PTE_V 0x001 /* Valid */
+#define PTE_R 0x002 /* Read */
+#define PTE_W 0x004 /* Write */
+#define PTE_X 0x008 /* Execute */
+#define PTE_U 0x010 /* User */
+#define PTE_G 0x020 /* Global */
+#define PTE_A 0x040 /* Accessed */
+#define PTE_D 0x080 /* Dirty */
+#define PTE_SOFT 0x300 /* Reserved for Software */
+
+/* Page table PPN shift amount */
+#define PTE_PPN_SHIFT 10
+
+/* Leaf page shift amount */
+#define PGSHIFT 12
+
+/* Default Reset Vector adress */
+#define DEFAULT_RSTVEC 0x1000
+
+/* Exception causes */
+typedef enum RISCVException {
+ RISCV_EXCP_NONE = -1, /* sentinel value */
+ RISCV_EXCP_INST_ADDR_MIS = 0x0,
+ RISCV_EXCP_INST_ACCESS_FAULT = 0x1,
+ RISCV_EXCP_ILLEGAL_INST = 0x2,
+ RISCV_EXCP_BREAKPOINT = 0x3,
+ RISCV_EXCP_LOAD_ADDR_MIS = 0x4,
+ RISCV_EXCP_LOAD_ACCESS_FAULT = 0x5,
+ RISCV_EXCP_STORE_AMO_ADDR_MIS = 0x6,
+ RISCV_EXCP_STORE_AMO_ACCESS_FAULT = 0x7,
+ RISCV_EXCP_U_ECALL = 0x8,
+ RISCV_EXCP_S_ECALL = 0x9,
+ RISCV_EXCP_VS_ECALL = 0xa,
+ RISCV_EXCP_M_ECALL = 0xb,
+ RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
+ RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
+ RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
+ RISCV_EXCP_SEMIHOST = 0x10,
+ RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
+ RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT = 0x15,
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT = 0x16,
+ RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT = 0x17,
+} RISCVException;
+
+#define RISCV_EXCP_INT_FLAG 0x80000000
+#define RISCV_EXCP_INT_MASK 0x7fffffff
+
+/* Interrupt causes */
+#define IRQ_U_SOFT 0
+#define IRQ_S_SOFT 1
+#define IRQ_VS_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_U_TIMER 4
+#define IRQ_S_TIMER 5
+#define IRQ_VS_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_U_EXT 8
+#define IRQ_S_EXT 9
+#define IRQ_VS_EXT 10
+#define IRQ_M_EXT 11
+
+/* mip masks */
+#define MIP_USIP (1 << IRQ_U_SOFT)
+#define MIP_SSIP (1 << IRQ_S_SOFT)
+#define MIP_VSSIP (1 << IRQ_VS_SOFT)
+#define MIP_MSIP (1 << IRQ_M_SOFT)
+#define MIP_UTIP (1 << IRQ_U_TIMER)
+#define MIP_STIP (1 << IRQ_S_TIMER)
+#define MIP_VSTIP (1 << IRQ_VS_TIMER)
+#define MIP_MTIP (1 << IRQ_M_TIMER)
+#define MIP_UEIP (1 << IRQ_U_EXT)
+#define MIP_SEIP (1 << IRQ_S_EXT)
+#define MIP_VSEIP (1 << IRQ_VS_EXT)
+#define MIP_MEIP (1 << IRQ_M_EXT)
+
+/* sip masks */
+#define SIP_SSIP MIP_SSIP
+#define SIP_STIP MIP_STIP
+#define SIP_SEIP MIP_SEIP
+
+/* MIE masks */
+#define MIE_SEIE (1 << IRQ_S_EXT)
+#define MIE_UEIE (1 << IRQ_U_EXT)
+#define MIE_STIE (1 << IRQ_S_TIMER)
+#define MIE_UTIE (1 << IRQ_U_TIMER)
+#define MIE_SSIE (1 << IRQ_S_SOFT)
+#define MIE_USIE (1 << IRQ_U_SOFT)
+
+/* General PointerMasking CSR bits*/
+#define PM_ENABLE 0x00000001ULL
+#define PM_CURRENT 0x00000002ULL
+#define PM_INSN 0x00000004ULL
+#define PM_XS_MASK 0x00000003ULL
+
+/* PointerMasking XS bits values */
+#define PM_EXT_DISABLE 0x00000000ULL
+#define PM_EXT_INITIAL 0x00000001ULL
+#define PM_EXT_CLEAN 0x00000002ULL
+#define PM_EXT_DIRTY 0x00000003ULL
+
+/* Offsets for every pair of control bits per each priv level */
+#define XS_OFFSET 0ULL
+#define U_OFFSET 2ULL
+#define S_OFFSET 5ULL
+#define M_OFFSET 8ULL
+
+#define PM_XS_BITS (PM_XS_MASK << XS_OFFSET)
+#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
+#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
+#define U_PM_INSN (PM_INSN << U_OFFSET)
+#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
+#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
+#define S_PM_INSN (PM_INSN << S_OFFSET)
+#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
+#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
+#define M_PM_INSN (PM_INSN << M_OFFSET)
+
+/* mmte CSR bits */
+#define MMTE_PM_XS_BITS PM_XS_BITS
+#define MMTE_U_PM_ENABLE U_PM_ENABLE
+#define MMTE_U_PM_CURRENT U_PM_CURRENT
+#define MMTE_U_PM_INSN U_PM_INSN
+#define MMTE_S_PM_ENABLE S_PM_ENABLE
+#define MMTE_S_PM_CURRENT S_PM_CURRENT
+#define MMTE_S_PM_INSN S_PM_INSN
+#define MMTE_M_PM_ENABLE M_PM_ENABLE
+#define MMTE_M_PM_CURRENT M_PM_CURRENT
+#define MMTE_M_PM_INSN M_PM_INSN
+#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
+ MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
+ MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
+ MMTE_PM_XS_BITS)
+
+/* (v)smte CSR bits */
+#define SMTE_PM_XS_BITS PM_XS_BITS
+#define SMTE_U_PM_ENABLE U_PM_ENABLE
+#define SMTE_U_PM_CURRENT U_PM_CURRENT
+#define SMTE_U_PM_INSN U_PM_INSN
+#define SMTE_S_PM_ENABLE S_PM_ENABLE
+#define SMTE_S_PM_CURRENT S_PM_CURRENT
+#define SMTE_S_PM_INSN S_PM_INSN
+#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
+ SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
+ SMTE_PM_XS_BITS)
+
+/* umte CSR bits */
+#define UMTE_U_PM_ENABLE U_PM_ENABLE
+#define UMTE_U_PM_CURRENT U_PM_CURRENT
+#define UMTE_U_PM_INSN U_PM_INSN
+#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
+
+#endif
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
new file mode 100644
index 000000000..9eeed38c7
--- /dev/null
+++ b/target/riscv/cpu_helper.c
@@ -0,0 +1,1138 @@
+/*
+ * RISC-V CPU helpers for qemu.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "trace.h"
+#include "semihosting/common-semi.h"
+
+int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return 0;
+#else
+ return env->priv;
+#endif
+}
+
+static RISCVMXL cpu_get_xl(CPURISCVState *env)
+{
+#if defined(TARGET_RISCV32)
+ return MXL_RV32;
+#elif defined(CONFIG_USER_ONLY)
+ return MXL_RV64;
+#else
+ RISCVMXL xl = riscv_cpu_mxl(env);
+
+ /*
+ * When emulating a 32-bit-only cpu, use RV32.
+ * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
+ * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
+ * back to RV64 for lower privs.
+ */
+ if (xl != MXL_RV32) {
+ switch (env->priv) {
+ case PRV_M:
+ break;
+ case PRV_U:
+ xl = get_field(env->mstatus, MSTATUS64_UXL);
+ break;
+ default: /* PRV_S | PRV_H */
+ xl = get_field(env->mstatus, MSTATUS64_SXL);
+ break;
+ }
+ }
+ return xl;
+#endif
+}
+
+void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ uint32_t flags = 0;
+
+ *pc = env->pc;
+ *cs_base = 0;
+
+ if (riscv_has_ext(env, RVV)) {
+ uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
+ bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL,
+ FIELD_EX64(env->vtype, VTYPE, VILL));
+ flags = FIELD_DP32(flags, TB_FLAGS, SEW,
+ FIELD_EX64(env->vtype, VTYPE, VSEW));
+ flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
+ FIELD_EX64(env->vtype, VTYPE, VLMUL));
+ flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
+ } else {
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ flags |= TB_FLAGS_MSTATUS_FS;
+#else
+ flags |= cpu_mmu_index(env, 0);
+ if (riscv_cpu_fp_enabled(env)) {
+ flags |= env->mstatus & MSTATUS_FS;
+ }
+
+ if (riscv_has_ext(env, RVH)) {
+ if (env->priv == PRV_M ||
+ (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
+ (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
+ get_field(env->hstatus, HSTATUS_HU))) {
+ flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
+ }
+
+ flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
+ get_field(env->mstatus_hs, MSTATUS_FS));
+ }
+ if (riscv_has_ext(env, RVJ)) {
+ int priv = flags & TB_FLAGS_PRIV_MMU_MASK;
+ bool pm_enabled = false;
+ switch (priv) {
+ case PRV_U:
+ pm_enabled = env->mmte & U_PM_ENABLE;
+ break;
+ case PRV_S:
+ pm_enabled = env->mmte & S_PM_ENABLE;
+ break;
+ case PRV_M:
+ pm_enabled = env->mmte & M_PM_ENABLE;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_ENABLED, pm_enabled);
+ }
+#endif
+
+ flags = FIELD_DP32(flags, TB_FLAGS, XL, cpu_get_xl(env));
+
+ *pflags = flags;
+}
+
+#ifndef CONFIG_USER_ONLY
+static int riscv_cpu_local_irq_pending(CPURISCVState *env)
+{
+ target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
+
+ target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
+ target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
+
+ target_ulong pending = env->mip & env->mie;
+
+ target_ulong mie = env->priv < PRV_M ||
+ (env->priv == PRV_M && mstatus_mie);
+ target_ulong sie = env->priv < PRV_S ||
+ (env->priv == PRV_S && mstatus_sie);
+ target_ulong hsie = virt_enabled || sie;
+ target_ulong vsie = virt_enabled && sie;
+
+ target_ulong irqs =
+ (pending & ~env->mideleg & -mie) |
+ (pending & env->mideleg & ~env->hideleg & -hsie) |
+ (pending & env->mideleg & env->hideleg & -vsie);
+
+ if (irqs) {
+ return ctz64(irqs); /* since non-zero */
+ } else {
+ return RISCV_EXCP_NONE; /* indicates no pending interrupt */
+ }
+}
+
+bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ int interruptno = riscv_cpu_local_irq_pending(env);
+ if (interruptno >= 0) {
+ cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
+ riscv_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Return true is floating point support is currently enabled */
+bool riscv_cpu_fp_enabled(CPURISCVState *env)
+{
+ if (env->mstatus & MSTATUS_FS) {
+ if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
+{
+ uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
+ MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
+ MSTATUS64_UXL;
+ bool current_virt = riscv_cpu_virt_enabled(env);
+
+ g_assert(riscv_has_ext(env, RVH));
+
+ if (current_virt) {
+ /* Current V=1 and we are about to change to V=0 */
+ env->vsstatus = env->mstatus & mstatus_mask;
+ env->mstatus &= ~mstatus_mask;
+ env->mstatus |= env->mstatus_hs;
+
+ env->vstvec = env->stvec;
+ env->stvec = env->stvec_hs;
+
+ env->vsscratch = env->sscratch;
+ env->sscratch = env->sscratch_hs;
+
+ env->vsepc = env->sepc;
+ env->sepc = env->sepc_hs;
+
+ env->vscause = env->scause;
+ env->scause = env->scause_hs;
+
+ env->vstval = env->stval;
+ env->stval = env->stval_hs;
+
+ env->vsatp = env->satp;
+ env->satp = env->satp_hs;
+ } else {
+ /* Current V=0 and we are about to change to V=1 */
+ env->mstatus_hs = env->mstatus & mstatus_mask;
+ env->mstatus &= ~mstatus_mask;
+ env->mstatus |= env->vsstatus;
+
+ env->stvec_hs = env->stvec;
+ env->stvec = env->vstvec;
+
+ env->sscratch_hs = env->sscratch;
+ env->sscratch = env->vsscratch;
+
+ env->sepc_hs = env->sepc;
+ env->sepc = env->vsepc;
+
+ env->scause_hs = env->scause;
+ env->scause = env->vscause;
+
+ env->stval_hs = env->stval;
+ env->stval = env->vstval;
+
+ env->satp_hs = env->satp;
+ env->satp = env->vsatp;
+ }
+}
+
+bool riscv_cpu_virt_enabled(CPURISCVState *env)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return false;
+ }
+
+ return get_field(env->virt, VIRT_ONOFF);
+}
+
+void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return;
+ }
+
+ /* Flush the TLB on all virt mode changes. */
+ if (get_field(env->virt, VIRT_ONOFF) != enable) {
+ tlb_flush(env_cpu(env));
+ }
+
+ env->virt = set_field(env->virt, VIRT_ONOFF, enable);
+}
+
+bool riscv_cpu_two_stage_lookup(int mmu_idx)
+{
+ return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
+}
+
+int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
+{
+ CPURISCVState *env = &cpu->env;
+ if (env->miclaim & interrupts) {
+ return -1;
+ } else {
+ env->miclaim |= interrupts;
+ return 0;
+ }
+}
+
+uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
+{
+ CPURISCVState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t old = env->mip;
+ bool locked = false;
+
+ if (!qemu_mutex_iothread_locked()) {
+ locked = true;
+ qemu_mutex_lock_iothread();
+ }
+
+ env->mip = (env->mip & ~mask) | (value & mask);
+
+ if (env->mip) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
+
+ return old;
+}
+
+void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
+ uint32_t arg)
+{
+ env->rdtime_fn = fn;
+ env->rdtime_fn_arg = arg;
+}
+
+void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
+{
+ if (newpriv > PRV_M) {
+ g_assert_not_reached();
+ }
+ if (newpriv == PRV_H) {
+ newpriv = PRV_U;
+ }
+ /* tlb_flush is unnecessary as mode is contained in mmu_idx */
+ env->priv = newpriv;
+
+ /*
+ * Clear the load reservation - otherwise a reservation placed in one
+ * context/process can be used by another, resulting in an SC succeeding
+ * incorrectly. Version 2.2 of the ISA specification explicitly requires
+ * this behaviour, while later revisions say that the kernel "should" use
+ * an SC instruction to force the yielding of a load reservation on a
+ * preemptive context switch. As a result, do both.
+ */
+ env->load_res = -1;
+}
+
+/*
+ * get_physical_address_pmp - check PMP permission for this physical address
+ *
+ * Match the PMP region and check permission for this physical address and it's
+ * TLB page. Returns 0 if the permission checking was successful
+ *
+ * @env: CPURISCVState
+ * @prot: The returned protection attributes
+ * @tlb_size: TLB page size containing addr. It could be modified after PMP
+ * permission checking. NULL if not set TLB page for addr.
+ * @addr: The physical address to be checked permission
+ * @access_type: The type of MMU access
+ * @mode: Indicates current privilege level.
+ */
+static int get_physical_address_pmp(CPURISCVState *env, int *prot,
+ target_ulong *tlb_size, hwaddr addr,
+ int size, MMUAccessType access_type,
+ int mode)
+{
+ pmp_priv_t pmp_priv;
+ target_ulong tlb_size_pmp = 0;
+
+ if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TRANSLATE_SUCCESS;
+ }
+
+ if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
+ mode)) {
+ *prot = 0;
+ return TRANSLATE_PMP_FAIL;
+ }
+
+ *prot = pmp_priv_to_page_prot(pmp_priv);
+ if (tlb_size != NULL) {
+ if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
+ *tlb_size = tlb_size_pmp;
+ }
+ }
+
+ return TRANSLATE_SUCCESS;
+}
+
+/* get_physical_address - get the physical address for this virtual address
+ *
+ * Do a page table walk to obtain the physical address corresponding to a
+ * virtual address. Returns 0 if the translation was successful
+ *
+ * Adapted from Spike's mmu_t::translate and mmu_t::walk
+ *
+ * @env: CPURISCVState
+ * @physical: This will be set to the calculated physical address
+ * @prot: The returned protection attributes
+ * @addr: The virtual address to be translated
+ * @fault_pte_addr: If not NULL, this will be set to fault pte address
+ * when a error occurs on pte address translation.
+ * This will already be shifted to match htval.
+ * @access_type: The type of MMU access
+ * @mmu_idx: Indicates current privilege level
+ * @first_stage: Are we in first stage translation?
+ * Second stage is used for hypervisor guest translation
+ * @two_stage: Are we going to perform two stage translation
+ * @is_debug: Is this access from a debugger or the monitor?
+ */
+static int get_physical_address(CPURISCVState *env, hwaddr *physical,
+ int *prot, target_ulong addr,
+ target_ulong *fault_pte_addr,
+ int access_type, int mmu_idx,
+ bool first_stage, bool two_stage,
+ bool is_debug)
+{
+ /* NOTE: the env->pc value visible here will not be
+ * correct, but the value visible to the exception handler
+ * (riscv_cpu_do_interrupt) is correct */
+ MemTxResult res;
+ MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
+ bool use_background = false;
+
+ /*
+ * Check if we should use the background registers for the two
+ * stage translation. We don't need to check if we actually need
+ * two stage translation as that happened before this function
+ * was called. Background registers will be used if the guest has
+ * forced a two stage translation to be on (in HS or M mode).
+ */
+ if (!riscv_cpu_virt_enabled(env) && two_stage) {
+ use_background = true;
+ }
+
+ /* MPRV does not affect the virtual-machine load/store
+ instructions, HLV, HLVX, and HSV. */
+ if (riscv_cpu_two_stage_lookup(mmu_idx)) {
+ mode = get_field(env->hstatus, HSTATUS_SPVP);
+ } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
+ if (get_field(env->mstatus, MSTATUS_MPRV)) {
+ mode = get_field(env->mstatus, MSTATUS_MPP);
+ }
+ }
+
+ if (first_stage == false) {
+ /* We are in stage 2 translation, this is similar to stage 1. */
+ /* Stage 2 is always taken as U-mode */
+ mode = PRV_U;
+ }
+
+ if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
+ *physical = addr;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TRANSLATE_SUCCESS;
+ }
+
+ *prot = 0;
+
+ hwaddr base;
+ int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
+
+ if (first_stage == true) {
+ mxr = get_field(env->mstatus, MSTATUS_MXR);
+ } else {
+ mxr = get_field(env->vsstatus, MSTATUS_MXR);
+ }
+
+ if (first_stage == true) {
+ if (use_background) {
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
+ vm = get_field(env->vsatp, SATP32_MODE);
+ } else {
+ base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
+ vm = get_field(env->vsatp, SATP64_MODE);
+ }
+ } else {
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
+ vm = get_field(env->satp, SATP32_MODE);
+ } else {
+ base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
+ vm = get_field(env->satp, SATP64_MODE);
+ }
+ }
+ widened = 0;
+ } else {
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
+ vm = get_field(env->hgatp, SATP32_MODE);
+ } else {
+ base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
+ vm = get_field(env->hgatp, SATP64_MODE);
+ }
+ widened = 2;
+ }
+ /* status.SUM will be ignored if execute on background */
+ sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
+ switch (vm) {
+ case VM_1_10_SV32:
+ levels = 2; ptidxbits = 10; ptesize = 4; break;
+ case VM_1_10_SV39:
+ levels = 3; ptidxbits = 9; ptesize = 8; break;
+ case VM_1_10_SV48:
+ levels = 4; ptidxbits = 9; ptesize = 8; break;
+ case VM_1_10_SV57:
+ levels = 5; ptidxbits = 9; ptesize = 8; break;
+ case VM_1_10_MBARE:
+ *physical = addr;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TRANSLATE_SUCCESS;
+ default:
+ g_assert_not_reached();
+ }
+
+ CPUState *cs = env_cpu(env);
+ int va_bits = PGSHIFT + levels * ptidxbits + widened;
+ target_ulong mask, masked_msbs;
+
+ if (TARGET_LONG_BITS > (va_bits - 1)) {
+ mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+ } else {
+ mask = 0;
+ }
+ masked_msbs = (addr >> (va_bits - 1)) & mask;
+
+ if (masked_msbs != 0 && masked_msbs != mask) {
+ return TRANSLATE_FAIL;
+ }
+
+ int ptshift = (levels - 1) * ptidxbits;
+ int i;
+
+#if !TCG_OVERSIZED_GUEST
+restart:
+#endif
+ for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
+ target_ulong idx;
+ if (i == 0) {
+ idx = (addr >> (PGSHIFT + ptshift)) &
+ ((1 << (ptidxbits + widened)) - 1);
+ } else {
+ idx = (addr >> (PGSHIFT + ptshift)) &
+ ((1 << ptidxbits) - 1);
+ }
+
+ /* check that physical address of PTE is legal */
+ hwaddr pte_addr;
+
+ if (two_stage && first_stage) {
+ int vbase_prot;
+ hwaddr vbase;
+
+ /* Do the second stage translation on the base PTE address. */
+ int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
+ base, NULL, MMU_DATA_LOAD,
+ mmu_idx, false, true,
+ is_debug);
+
+ if (vbase_ret != TRANSLATE_SUCCESS) {
+ if (fault_pte_addr) {
+ *fault_pte_addr = (base + idx * ptesize) >> 2;
+ }
+ return TRANSLATE_G_STAGE_FAIL;
+ }
+
+ pte_addr = vbase + idx * ptesize;
+ } else {
+ pte_addr = base + idx * ptesize;
+ }
+
+ int pmp_prot;
+ int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
+ sizeof(target_ulong),
+ MMU_DATA_LOAD, PRV_S);
+ if (pmp_ret != TRANSLATE_SUCCESS) {
+ return TRANSLATE_PMP_FAIL;
+ }
+
+ target_ulong pte;
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
+ } else {
+ pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
+ }
+
+ if (res != MEMTX_OK) {
+ return TRANSLATE_FAIL;
+ }
+
+ hwaddr ppn = pte >> PTE_PPN_SHIFT;
+
+ if (!(pte & PTE_V)) {
+ /* Invalid PTE */
+ return TRANSLATE_FAIL;
+ } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
+ /* Inner PTE, continue walking */
+ base = ppn << PGSHIFT;
+ } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
+ /* Reserved leaf PTE flags: PTE_W */
+ return TRANSLATE_FAIL;
+ } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
+ /* Reserved leaf PTE flags: PTE_W + PTE_X */
+ return TRANSLATE_FAIL;
+ } else if ((pte & PTE_U) && ((mode != PRV_U) &&
+ (!sum || access_type == MMU_INST_FETCH))) {
+ /* User PTE flags when not U mode and mstatus.SUM is not set,
+ or the access type is an instruction fetch */
+ return TRANSLATE_FAIL;
+ } else if (!(pte & PTE_U) && (mode != PRV_S)) {
+ /* Supervisor PTE flags when not S mode */
+ return TRANSLATE_FAIL;
+ } else if (ppn & ((1ULL << ptshift) - 1)) {
+ /* Misaligned PPN */
+ return TRANSLATE_FAIL;
+ } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
+ ((pte & PTE_X) && mxr))) {
+ /* Read access check failed */
+ return TRANSLATE_FAIL;
+ } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
+ /* Write access check failed */
+ return TRANSLATE_FAIL;
+ } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
+ /* Fetch access check failed */
+ return TRANSLATE_FAIL;
+ } else {
+ /* if necessary, set accessed and dirty bits. */
+ target_ulong updated_pte = pte | PTE_A |
+ (access_type == MMU_DATA_STORE ? PTE_D : 0);
+
+ /* Page table updates need to be atomic with MTTCG enabled */
+ if (updated_pte != pte) {
+ /*
+ * - if accessed or dirty bits need updating, and the PTE is
+ * in RAM, then we do so atomically with a compare and swap.
+ * - if the PTE is in IO space or ROM, then it can't be updated
+ * and we return TRANSLATE_FAIL.
+ * - if the PTE changed by the time we went to update it, then
+ * it is no longer valid and we must re-walk the page table.
+ */
+ MemoryRegion *mr;
+ hwaddr l = sizeof(target_ulong), addr1;
+ mr = address_space_translate(cs->as, pte_addr,
+ &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
+ if (memory_region_is_ram(mr)) {
+ target_ulong *pte_pa =
+ qemu_map_ram_ptr(mr->ram_block, addr1);
+#if TCG_OVERSIZED_GUEST
+ /* MTTCG is not enabled on oversized TCG guests so
+ * page table updates do not need to be atomic */
+ *pte_pa = pte = updated_pte;
+#else
+ target_ulong old_pte =
+ qatomic_cmpxchg(pte_pa, pte, updated_pte);
+ if (old_pte != pte) {
+ goto restart;
+ } else {
+ pte = updated_pte;
+ }
+#endif
+ } else {
+ /* misconfigured PTE in ROM (AD bits are not preset) or
+ * PTE is in IO space and can't be updated atomically */
+ return TRANSLATE_FAIL;
+ }
+ }
+
+ /* for superpage mappings, make a fake leaf PTE for the TLB's
+ benefit. */
+ target_ulong vpn = addr >> PGSHIFT;
+ *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
+ (addr & ~TARGET_PAGE_MASK);
+
+ /* set permissions on the TLB entry */
+ if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
+ *prot |= PAGE_READ;
+ }
+ if ((pte & PTE_X)) {
+ *prot |= PAGE_EXEC;
+ }
+ /* add write permission on stores or if the page is already dirty,
+ so that we TLB miss on later writes to update the dirty bit */
+ if ((pte & PTE_W) &&
+ (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
+ *prot |= PAGE_WRITE;
+ }
+ return TRANSLATE_SUCCESS;
+ }
+ }
+ return TRANSLATE_FAIL;
+}
+
+static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
+ MMUAccessType access_type, bool pmp_violation,
+ bool first_stage, bool two_stage)
+{
+ CPUState *cs = env_cpu(env);
+ int page_fault_exceptions, vm;
+ uint64_t stap_mode;
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ stap_mode = SATP32_MODE;
+ } else {
+ stap_mode = SATP64_MODE;
+ }
+
+ if (first_stage) {
+ vm = get_field(env->satp, stap_mode);
+ } else {
+ vm = get_field(env->hgatp, stap_mode);
+ }
+
+ page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
+ }
+ break;
+ case MMU_DATA_LOAD:
+ if (two_stage && !first_stage) {
+ cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
+ break;
+ case MMU_DATA_STORE:
+ if (two_stage && !first_stage) {
+ cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ env->badaddr = address;
+ env->two_stage_lookup = two_stage;
+}
+
+hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ hwaddr phys_addr;
+ int prot;
+ int mmu_idx = cpu_mmu_index(&cpu->env, false);
+
+ if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
+ true, riscv_cpu_virt_enabled(env), true)) {
+ return -1;
+ }
+
+ if (riscv_cpu_virt_enabled(env)) {
+ if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
+ 0, mmu_idx, false, true, true)) {
+ return -1;
+ }
+ }
+
+ return phys_addr & TARGET_PAGE_MASK;
+}
+
+void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+
+ if (access_type == MMU_DATA_STORE) {
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ } else if (access_type == MMU_DATA_LOAD) {
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+ } else {
+ cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
+ }
+
+ env->badaddr = addr;
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
+ riscv_cpu_two_stage_lookup(mmu_idx);
+ riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
+}
+
+void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
+ break;
+ case MMU_DATA_LOAD:
+ cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ break;
+ case MMU_DATA_STORE:
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ env->badaddr = addr;
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
+ riscv_cpu_two_stage_lookup(mmu_idx);
+ riscv_raise_exception(env, cs->exception_index, retaddr);
+}
+
+bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ vaddr im_address;
+ hwaddr pa = 0;
+ int prot, prot2, prot_pmp;
+ bool pmp_violation = false;
+ bool first_stage_error = true;
+ bool two_stage_lookup = false;
+ int ret = TRANSLATE_FAIL;
+ int mode = mmu_idx;
+ /* default TLB page size */
+ target_ulong tlb_size = TARGET_PAGE_SIZE;
+
+ env->guest_phys_fault_addr = 0;
+
+ qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
+ __func__, address, access_type, mmu_idx);
+
+ /* MPRV does not affect the virtual-machine load/store
+ instructions, HLV, HLVX, and HSV. */
+ if (riscv_cpu_two_stage_lookup(mmu_idx)) {
+ mode = get_field(env->hstatus, HSTATUS_SPVP);
+ } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
+ get_field(env->mstatus, MSTATUS_MPRV)) {
+ mode = get_field(env->mstatus, MSTATUS_MPP);
+ if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
+ two_stage_lookup = true;
+ }
+ }
+
+ if (riscv_cpu_virt_enabled(env) ||
+ ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
+ access_type != MMU_INST_FETCH)) {
+ /* Two stage lookup */
+ ret = get_physical_address(env, &pa, &prot, address,
+ &env->guest_phys_fault_addr, access_type,
+ mmu_idx, true, true, false);
+
+ /*
+ * A G-stage exception may be triggered during two state lookup.
+ * And the env->guest_phys_fault_addr has already been set in
+ * get_physical_address().
+ */
+ if (ret == TRANSLATE_G_STAGE_FAIL) {
+ first_stage_error = false;
+ access_type = MMU_DATA_LOAD;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, address, ret, pa, prot);
+
+ if (ret == TRANSLATE_SUCCESS) {
+ /* Second stage lookup */
+ im_address = pa;
+
+ ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
+ access_type, mmu_idx, false, true,
+ false);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, im_address, ret, pa, prot2);
+
+ prot &= prot2;
+
+ if (ret == TRANSLATE_SUCCESS) {
+ ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
+ size, access_type, mode);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s PMP address=" TARGET_FMT_plx " ret %d prot"
+ " %d tlb_size " TARGET_FMT_lu "\n",
+ __func__, pa, ret, prot_pmp, tlb_size);
+
+ prot &= prot_pmp;
+ }
+
+ if (ret != TRANSLATE_SUCCESS) {
+ /*
+ * Guest physical address translation failed, this is a HS
+ * level exception
+ */
+ first_stage_error = false;
+ env->guest_phys_fault_addr = (im_address |
+ (address &
+ (TARGET_PAGE_SIZE - 1))) >> 2;
+ }
+ }
+ } else {
+ /* Single stage lookup */
+ ret = get_physical_address(env, &pa, &prot, address, NULL,
+ access_type, mmu_idx, true, false, false);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, address, ret, pa, prot);
+
+ if (ret == TRANSLATE_SUCCESS) {
+ ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
+ size, access_type, mode);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s PMP address=" TARGET_FMT_plx " ret %d prot"
+ " %d tlb_size " TARGET_FMT_lu "\n",
+ __func__, pa, ret, prot_pmp, tlb_size);
+
+ prot &= prot_pmp;
+ }
+ }
+
+ if (ret == TRANSLATE_PMP_FAIL) {
+ pmp_violation = true;
+ }
+
+ if (ret == TRANSLATE_SUCCESS) {
+ tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
+ prot, mmu_idx, tlb_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ raise_mmu_exception(env, address, access_type, pmp_violation,
+ first_stage_error,
+ riscv_cpu_virt_enabled(env) ||
+ riscv_cpu_two_stage_lookup(mmu_idx));
+ riscv_raise_exception(env, cs->exception_index, retaddr);
+ }
+
+ return true;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+/*
+ * Handle Traps
+ *
+ * Adapted from Spike's processor_t::take_trap.
+ *
+ */
+void riscv_cpu_do_interrupt(CPUState *cs)
+{
+#if !defined(CONFIG_USER_ONLY)
+
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ uint64_t s;
+
+ /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
+ * so we mask off the MSB and separate into trap type and cause.
+ */
+ bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
+ target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
+ target_ulong deleg = async ? env->mideleg : env->medeleg;
+ bool write_tval = false;
+ target_ulong tval = 0;
+ target_ulong htval = 0;
+ target_ulong mtval2 = 0;
+
+ if (cause == RISCV_EXCP_SEMIHOST) {
+ if (env->priv >= PRV_S) {
+ env->gpr[xA0] = do_common_semihosting(cs);
+ env->pc += 4;
+ return;
+ }
+ cause = RISCV_EXCP_BREAKPOINT;
+ }
+
+ if (!async) {
+ /* set tval to badaddr for traps with address information */
+ switch (cause) {
+ case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
+ case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
+ case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
+ case RISCV_EXCP_INST_ADDR_MIS:
+ case RISCV_EXCP_INST_ACCESS_FAULT:
+ case RISCV_EXCP_LOAD_ADDR_MIS:
+ case RISCV_EXCP_STORE_AMO_ADDR_MIS:
+ case RISCV_EXCP_LOAD_ACCESS_FAULT:
+ case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
+ case RISCV_EXCP_INST_PAGE_FAULT:
+ case RISCV_EXCP_LOAD_PAGE_FAULT:
+ case RISCV_EXCP_STORE_PAGE_FAULT:
+ write_tval = true;
+ tval = env->badaddr;
+ break;
+ default:
+ break;
+ }
+ /* ecall is dispatched as one cause so translate based on mode */
+ if (cause == RISCV_EXCP_U_ECALL) {
+ assert(env->priv <= 3);
+
+ if (env->priv == PRV_M) {
+ cause = RISCV_EXCP_M_ECALL;
+ } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
+ cause = RISCV_EXCP_VS_ECALL;
+ } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
+ cause = RISCV_EXCP_S_ECALL;
+ } else if (env->priv == PRV_U) {
+ cause = RISCV_EXCP_U_ECALL;
+ }
+ }
+ }
+
+ trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
+ riscv_cpu_get_trap_name(cause, async));
+
+ qemu_log_mask(CPU_LOG_INT,
+ "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
+ "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
+ __func__, env->mhartid, async, cause, env->pc, tval,
+ riscv_cpu_get_trap_name(cause, async));
+
+ if (env->priv <= PRV_S &&
+ cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
+ /* handle the trap in S-mode */
+ if (riscv_has_ext(env, RVH)) {
+ target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
+
+ if (env->two_stage_lookup && write_tval) {
+ /*
+ * If we are writing a guest virtual address to stval, set
+ * this to 1. If we are trapping to VS we will set this to 0
+ * later.
+ */
+ env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1);
+ } else {
+ /* For other HS-mode traps, we set this to 0. */
+ env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
+ }
+
+ if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
+ /* Trap to VS mode */
+ /*
+ * See if we need to adjust cause. Yes if its VS mode interrupt
+ * no if hypervisor has delegated one of hs mode's interrupt
+ */
+ if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
+ cause == IRQ_VS_EXT) {
+ cause = cause - 1;
+ }
+ env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
+ } else if (riscv_cpu_virt_enabled(env)) {
+ /* Trap into HS mode, from virt */
+ riscv_cpu_swap_hypervisor_regs(env);
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
+ env->priv);
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
+ riscv_cpu_virt_enabled(env));
+
+ htval = env->guest_phys_fault_addr;
+
+ riscv_cpu_set_virt_enabled(env, 0);
+ } else {
+ /* Trap into HS mode */
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
+ htval = env->guest_phys_fault_addr;
+ }
+ }
+
+ s = env->mstatus;
+ s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
+ s = set_field(s, MSTATUS_SPP, env->priv);
+ s = set_field(s, MSTATUS_SIE, 0);
+ env->mstatus = s;
+ env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
+ env->sepc = env->pc;
+ env->stval = tval;
+ env->htval = htval;
+ env->pc = (env->stvec >> 2 << 2) +
+ ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
+ riscv_cpu_set_mode(env, PRV_S);
+ } else {
+ /* handle the trap in M-mode */
+ if (riscv_has_ext(env, RVH)) {
+ if (riscv_cpu_virt_enabled(env)) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
+ riscv_cpu_virt_enabled(env));
+ if (riscv_cpu_virt_enabled(env) && tval) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
+ }
+
+ mtval2 = env->guest_phys_fault_addr;
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_virt_enabled(env, 0);
+ }
+
+ s = env->mstatus;
+ s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
+ s = set_field(s, MSTATUS_MPP, env->priv);
+ s = set_field(s, MSTATUS_MIE, 0);
+ env->mstatus = s;
+ env->mcause = cause | ~(((target_ulong)-1) >> async);
+ env->mepc = env->pc;
+ env->mtval = tval;
+ env->mtval2 = mtval2;
+ env->pc = (env->mtvec >> 2 << 2) +
+ ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+ riscv_cpu_set_mode(env, PRV_M);
+ }
+
+ /* NOTE: it is not necessary to yield load reservations here. It is only
+ * necessary for an SC from "another hart" to cause a load reservation
+ * to be yielded. Refer to the memory consistency model section of the
+ * RISC-V ISA Specification.
+ */
+
+ env->two_stage_lookup = false;
+#endif
+ cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
+}
diff --git a/target/riscv/cpu_user.h b/target/riscv/cpu_user.h
new file mode 100644
index 000000000..02afad608
--- /dev/null
+++ b/target/riscv/cpu_user.h
@@ -0,0 +1,19 @@
+#ifndef TARGET_RISCV_CPU_USER_H
+#define TARGET_RISCV_CPU_USER_H
+
+#define xRA 1 /* return address (aka link register) */
+#define xSP 2 /* stack pointer */
+#define xGP 3 /* global pointer */
+#define xTP 4 /* thread pointer */
+
+#define xA0 10 /* gpr[10-17] are syscall arguments */
+#define xA1 11
+#define xA2 12
+#define xA3 13
+#define xA4 14
+#define xA5 15
+#define xA6 16
+#define xA7 17 /* syscall number for RVI ABI */
+#define xT0 5 /* syscall number for RVE ABI */
+
+#endif
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
new file mode 100644
index 000000000..9f4195489
--- /dev/null
+++ b/target/riscv/csr.c
@@ -0,0 +1,2098 @@
+/*
+ * RISC-V Control and Status Registers.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "qemu/main-loop.h"
+#include "exec/exec-all.h"
+
+/* CSR function table public API */
+void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
+{
+ *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
+}
+
+void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
+{
+ csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
+}
+
+/* Predicates */
+static RISCVException fs(CPURISCVState *env, int csrno)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* loose check condition for fcsr in vector extension */
+ if ((csrno == CSR_FCSR) && (env->misa_ext & RVV)) {
+ return RISCV_EXCP_NONE;
+ }
+ if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+#endif
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException vs(CPURISCVState *env, int csrno)
+{
+ if (env->misa_ext & RVV) {
+ return RISCV_EXCP_NONE;
+ }
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
+static RISCVException ctr(CPURISCVState *env, int csrno)
+{
+#if !defined(CONFIG_USER_ONLY)
+ CPUState *cs = env_cpu(env);
+ RISCVCPU *cpu = RISCV_CPU(cs);
+
+ if (!cpu->cfg.ext_counters) {
+ /* The Counters extensions is not enabled */
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (riscv_cpu_virt_enabled(env)) {
+ switch (csrno) {
+ case CSR_CYCLE:
+ if (!get_field(env->hcounteren, COUNTEREN_CY) &&
+ get_field(env->mcounteren, COUNTEREN_CY)) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ case CSR_TIME:
+ if (!get_field(env->hcounteren, COUNTEREN_TM) &&
+ get_field(env->mcounteren, COUNTEREN_TM)) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ case CSR_INSTRET:
+ if (!get_field(env->hcounteren, COUNTEREN_IR) &&
+ get_field(env->mcounteren, COUNTEREN_IR)) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31:
+ if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) &&
+ get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ }
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ switch (csrno) {
+ case CSR_CYCLEH:
+ if (!get_field(env->hcounteren, COUNTEREN_CY) &&
+ get_field(env->mcounteren, COUNTEREN_CY)) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ case CSR_TIMEH:
+ if (!get_field(env->hcounteren, COUNTEREN_TM) &&
+ get_field(env->mcounteren, COUNTEREN_TM)) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ case CSR_INSTRETH:
+ if (!get_field(env->hcounteren, COUNTEREN_IR) &&
+ get_field(env->mcounteren, COUNTEREN_IR)) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H:
+ if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) &&
+ get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ break;
+ }
+ }
+ }
+#endif
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException ctr32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return ctr(env, csrno);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static RISCVException any(CPURISCVState *env, int csrno)
+{
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException any32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return any(env, csrno);
+
+}
+
+static RISCVException smode(CPURISCVState *env, int csrno)
+{
+ if (riscv_has_ext(env, RVS)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
+static RISCVException hmode(CPURISCVState *env, int csrno)
+{
+ if (riscv_has_ext(env, RVS) &&
+ riscv_has_ext(env, RVH)) {
+ /* Hypervisor extension is supported */
+ if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
+ env->priv == PRV_M) {
+ return RISCV_EXCP_NONE;
+ } else {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
+static RISCVException hmode32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ if (riscv_cpu_virt_enabled(env)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ } else {
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
+ }
+ }
+
+ return hmode(env, csrno);
+
+}
+
+/* Checks if PointerMasking registers could be accessed */
+static RISCVException pointer_masking(CPURISCVState *env, int csrno)
+{
+ /* Check if j-ext is present */
+ if (riscv_has_ext(env, RVJ)) {
+ return RISCV_EXCP_NONE;
+ }
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
+static RISCVException pmp(CPURISCVState *env, int csrno)
+{
+ if (riscv_feature(env, RISCV_FEATURE_PMP)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+
+static RISCVException epmp(CPURISCVState *env, int csrno)
+{
+ if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ return RISCV_EXCP_ILLEGAL_INST;
+}
+#endif
+
+/* User Floating-Point CSRs */
+static RISCVException read_fflags(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = riscv_cpu_get_fflags(env);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_fflags(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->mstatus |= MSTATUS_FS;
+#endif
+ riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_frm(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->frm;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_frm(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->mstatus |= MSTATUS_FS;
+#endif
+ env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_fcsr(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
+ | (env->frm << FSR_RD_SHIFT);
+ if (vs(env, csrno) >= 0) {
+ *val |= (env->vxrm << FSR_VXRM_SHIFT)
+ | (env->vxsat << FSR_VXSAT_SHIFT);
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_fcsr(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->mstatus |= MSTATUS_FS;
+#endif
+ env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
+ if (vs(env, csrno) >= 0) {
+ env->vxrm = (val & FSR_VXRM) >> FSR_VXRM_SHIFT;
+ env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT;
+ }
+ riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vtype(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vtype;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vl(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vl;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vxrm(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vxrm;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vxrm(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vxrm = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vxsat(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vxsat;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vxsat(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vxsat = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vstart(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vstart;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vstart(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vstart = val;
+ return RISCV_EXCP_NONE;
+}
+
+/* User Timers and Counters */
+static RISCVException read_instret(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+#if !defined(CONFIG_USER_ONLY)
+ if (icount_enabled()) {
+ *val = icount_get();
+ } else {
+ *val = cpu_get_host_ticks();
+ }
+#else
+ *val = cpu_get_host_ticks();
+#endif
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_instreth(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+#if !defined(CONFIG_USER_ONLY)
+ if (icount_enabled()) {
+ *val = icount_get() >> 32;
+ } else {
+ *val = cpu_get_host_ticks() >> 32;
+ }
+#else
+ *val = cpu_get_host_ticks() >> 32;
+#endif
+ return RISCV_EXCP_NONE;
+}
+
+#if defined(CONFIG_USER_ONLY)
+static RISCVException read_time(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = cpu_get_host_ticks();
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_timeh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = cpu_get_host_ticks() >> 32;
+ return RISCV_EXCP_NONE;
+}
+
+#else /* CONFIG_USER_ONLY */
+
+static RISCVException read_time(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
+
+ if (!env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_timeh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
+
+ if (!env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
+ return RISCV_EXCP_NONE;
+}
+
+/* Machine constants */
+
+#define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP)
+#define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP)
+#define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)
+
+static const target_ulong delegable_ints = S_MODE_INTERRUPTS |
+ VS_MODE_INTERRUPTS;
+static const target_ulong vs_delegable_ints = VS_MODE_INTERRUPTS;
+static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
+ VS_MODE_INTERRUPTS;
+#define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
+ (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
+ (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
+ (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
+ (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
+ (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
+ (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
+ (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
+ (1ULL << (RISCV_EXCP_U_ECALL)) | \
+ (1ULL << (RISCV_EXCP_S_ECALL)) | \
+ (1ULL << (RISCV_EXCP_VS_ECALL)) | \
+ (1ULL << (RISCV_EXCP_M_ECALL)) | \
+ (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
+ (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
+ (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
+ (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
+ (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
+ (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
+ (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
+static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
+ ~((1ULL << (RISCV_EXCP_S_ECALL)) |
+ (1ULL << (RISCV_EXCP_VS_ECALL)) |
+ (1ULL << (RISCV_EXCP_M_ECALL)) |
+ (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
+ (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
+ (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
+ (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
+static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
+ SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
+ SSTATUS_SUM | SSTATUS_MXR;
+static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
+static const target_ulong hip_writable_mask = MIP_VSSIP;
+static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
+static const target_ulong vsip_writable_mask = MIP_VSSIP;
+
+static const char valid_vm_1_10_32[16] = {
+ [VM_1_10_MBARE] = 1,
+ [VM_1_10_SV32] = 1
+};
+
+static const char valid_vm_1_10_64[16] = {
+ [VM_1_10_MBARE] = 1,
+ [VM_1_10_SV39] = 1,
+ [VM_1_10_SV48] = 1,
+ [VM_1_10_SV57] = 1
+};
+
+/* Machine Information Registers */
+static RISCVException read_zero(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = 0;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mhartid(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mhartid;
+ return RISCV_EXCP_NONE;
+}
+
+/* Machine Trap Setup */
+
+/* We do not store SD explicitly, only compute it on demand. */
+static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
+{
+ if ((status & MSTATUS_FS) == MSTATUS_FS ||
+ (status & MSTATUS_XS) == MSTATUS_XS) {
+ switch (xl) {
+ case MXL_RV32:
+ return status | MSTATUS32_SD;
+ case MXL_RV64:
+ return status | MSTATUS64_SD;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return status;
+}
+
+static RISCVException read_mstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static int validate_vm(CPURISCVState *env, target_ulong vm)
+{
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ return valid_vm_1_10_32[vm & 0xf];
+ } else {
+ return valid_vm_1_10_64[vm & 0xf];
+ }
+}
+
+static RISCVException write_mstatus(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus = env->mstatus;
+ uint64_t mask = 0;
+
+ /* flush tlb on mstatus fields that affect VM */
+ if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
+ MSTATUS_MPRV | MSTATUS_SUM)) {
+ tlb_flush(env_cpu(env));
+ }
+ mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
+ MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
+ MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
+ MSTATUS_TW;
+
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ /*
+ * RV32: MPV and GVA are not in mstatus. The current plan is to
+ * add them to mstatush. For now, we just don't support it.
+ */
+ mask |= MSTATUS_MPV | MSTATUS_GVA;
+ }
+
+ mstatus = (mstatus & ~mask) | (val & mask);
+
+ if (riscv_cpu_mxl(env) == MXL_RV64) {
+ /* SXL and UXL fields are for now read only */
+ mstatus = set_field(mstatus, MSTATUS64_SXL, MXL_RV64);
+ mstatus = set_field(mstatus, MSTATUS64_UXL, MXL_RV64);
+ }
+ env->mstatus = mstatus;
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mstatush(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mstatus >> 32;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mstatush(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t valh = (uint64_t)val << 32;
+ uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
+
+ if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
+ tlb_flush(env_cpu(env));
+ }
+
+ env->mstatus = (env->mstatus & ~mask) | (valh & mask);
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_misa(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ target_ulong misa;
+
+ switch (env->misa_mxl) {
+ case MXL_RV32:
+ misa = (target_ulong)MXL_RV32 << 30;
+ break;
+#ifdef TARGET_RISCV64
+ case MXL_RV64:
+ misa = (target_ulong)MXL_RV64 << 62;
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+
+ *val = misa | env->misa_ext;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_misa(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
+ /* drop write to misa */
+ return RISCV_EXCP_NONE;
+ }
+
+ /* 'I' or 'E' must be present */
+ if (!(val & (RVI | RVE))) {
+ /* It is not, drop write to misa */
+ return RISCV_EXCP_NONE;
+ }
+
+ /* 'E' excludes all other extensions */
+ if (val & RVE) {
+ /* when we support 'E' we can do "val = RVE;" however
+ * for now we just drop writes if 'E' is present.
+ */
+ return RISCV_EXCP_NONE;
+ }
+
+ /*
+ * misa.MXL writes are not supported by QEMU.
+ * Drop writes to those bits.
+ */
+
+ /* Mask extensions that are not supported by this hart */
+ val &= env->misa_ext_mask;
+
+ /* Mask extensions that are not supported by QEMU */
+ val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
+
+ /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
+ if ((val & RVD) && !(val & RVF)) {
+ val &= ~RVD;
+ }
+
+ /* Suppress 'C' if next instruction is not aligned
+ * TODO: this should check next_pc
+ */
+ if ((val & RVC) && (GETPC() & ~3) != 0) {
+ val &= ~RVC;
+ }
+
+ /* If nothing changed, do nothing. */
+ if (val == env->misa_ext) {
+ return RISCV_EXCP_NONE;
+ }
+
+ /* flush translation cache */
+ tb_flush(env_cpu(env));
+ env->misa_ext = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_medeleg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->medeleg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_medeleg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mideleg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mideleg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mideleg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints);
+ if (riscv_has_ext(env, RVH)) {
+ env->mideleg |= VS_MODE_INTERRUPTS;
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mie(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mie;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mie(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mie = (env->mie & ~all_ints) | (val & all_ints);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mtvec(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mtvec;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mtvec(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
+ if ((val & 3) < 2) {
+ env->mtvec = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mcounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mcounteren = val;
+ return RISCV_EXCP_NONE;
+}
+
+/* Machine Trap Handling */
+static RISCVException read_mscratch(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mscratch;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mscratch(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mscratch = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mepc(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mepc;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mepc(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mepc = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mcause(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mcause;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mcause(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mcause = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mtval(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mtval;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mtval(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mtval = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_mip(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+ /* Allow software control of delegable interrupts not claimed by hardware */
+ target_ulong mask = write_mask & delegable_ints & ~env->miclaim;
+ uint32_t old_mip;
+
+ if (mask) {
+ old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask));
+ } else {
+ old_mip = env->mip;
+ }
+
+ if (ret_value) {
+ *ret_value = old_mip;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+/* Supervisor Trap Setup */
+static RISCVException read_sstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ target_ulong mask = (sstatus_v1_10_mask);
+
+ /* TODO: Use SXL not MXL. */
+ *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_sstatus(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong mask = (sstatus_v1_10_mask);
+ target_ulong newval = (env->mstatus & ~mask) | (val & mask);
+ return write_mstatus(env, CSR_MSTATUS, newval);
+}
+
+static RISCVException read_vsie(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ /* Shift the VS bits to their S bit location in vsie */
+ *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_sie(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ if (riscv_cpu_virt_enabled(env)) {
+ read_vsie(env, CSR_VSIE, val);
+ } else {
+ *val = env->mie & env->mideleg;
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vsie(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ /* Shift the S bits to their VS bit location in mie */
+ target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) |
+ ((val << 1) & env->hideleg & VS_MODE_INTERRUPTS);
+ return write_mie(env, CSR_MIE, newval);
+}
+
+static int write_sie(CPURISCVState *env, int csrno, target_ulong val)
+{
+ if (riscv_cpu_virt_enabled(env)) {
+ write_vsie(env, CSR_VSIE, val);
+ } else {
+ target_ulong newval = (env->mie & ~S_MODE_INTERRUPTS) |
+ (val & S_MODE_INTERRUPTS);
+ write_mie(env, CSR_MIE, newval);
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_stvec(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->stvec;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_stvec(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
+ if ((val & 3) < 2) {
+ env->stvec = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_scounteren(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->scounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_scounteren(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->scounteren = val;
+ return RISCV_EXCP_NONE;
+}
+
+/* Supervisor Trap Handling */
+static RISCVException read_sscratch(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->sscratch;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_sscratch(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->sscratch = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_sepc(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->sepc;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_sepc(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->sepc = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_scause(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->scause;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_scause(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->scause = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_stval(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->stval;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_stval(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->stval = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ /* Shift the S bits to their VS bit location in mip */
+ int ret = rmw_mip(env, 0, ret_value, new_value << 1,
+ (write_mask << 1) & vsip_writable_mask & env->hideleg);
+
+ if (ret_value) {
+ *ret_value &= VS_MODE_INTERRUPTS;
+ /* Shift the VS bits to their S bit location in vsip */
+ *ret_value >>= 1;
+ }
+ return ret;
+}
+
+static RISCVException rmw_sip(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ int ret;
+
+ if (riscv_cpu_virt_enabled(env)) {
+ ret = rmw_vsip(env, CSR_VSIP, ret_value, new_value, write_mask);
+ } else {
+ ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value,
+ write_mask & env->mideleg & sip_writable_mask);
+ }
+
+ if (ret_value) {
+ *ret_value &= env->mideleg;
+ }
+ return ret;
+}
+
+/* Supervisor Protection and Translation */
+static RISCVException read_satp(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+ *val = 0;
+ return RISCV_EXCP_NONE;
+ }
+
+ if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ } else {
+ *val = env->satp;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_satp(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong vm, mask, asid;
+
+ if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ vm = validate_vm(env, get_field(val, SATP32_MODE));
+ mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
+ asid = (val ^ env->satp) & SATP32_ASID;
+ } else {
+ vm = validate_vm(env, get_field(val, SATP64_MODE));
+ mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
+ asid = (val ^ env->satp) & SATP64_ASID;
+ }
+
+ if (vm && mask) {
+ if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ } else {
+ if (asid) {
+ tlb_flush(env_cpu(env));
+ }
+ env->satp = val;
+ }
+ }
+ return RISCV_EXCP_NONE;
+}
+
+/* Hypervisor Extensions */
+static RISCVException read_hstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->hstatus;
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ /* We only support 64-bit VSXL */
+ *val = set_field(*val, HSTATUS_VSXL, 2);
+ }
+ /* We only support little endian */
+ *val = set_field(*val, HSTATUS_VSBE, 0);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hstatus(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->hstatus = val;
+ if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
+ qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
+ }
+ if (get_field(val, HSTATUS_VSBE) != 0) {
+ qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->hedeleg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->hedeleg = val & vs_delegable_excps;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_hideleg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->hideleg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hideleg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->hideleg = val & vs_delegable_ints;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ int ret = rmw_mip(env, 0, ret_value, new_value,
+ write_mask & hvip_writable_mask);
+
+ if (ret_value) {
+ *ret_value &= hvip_writable_mask;
+ }
+ return ret;
+}
+
+static RISCVException rmw_hip(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ int ret = rmw_mip(env, 0, ret_value, new_value,
+ write_mask & hip_writable_mask);
+
+ if (ret_value) {
+ *ret_value &= hip_writable_mask;
+ }
+ return ret;
+}
+
+static RISCVException read_hie(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mie & VS_MODE_INTERRUPTS;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hie(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS);
+ return write_mie(env, CSR_MIE, newval);
+}
+
+static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->hcounteren;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->hcounteren = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hgeie(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ if (val) {
+ qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_htval(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->htval;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_htval(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->htval = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_htinst(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->htinst;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_htinst(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hgeip(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ if (val) {
+ qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_hgatp(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->hgatp;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_hgatp(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->hgatp = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ if (!env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ *val = env->htimedelta;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ if (!env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
+ } else {
+ env->htimedelta = val;
+ }
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ if (!env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ *val = env->htimedelta >> 32;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ if (!env->rdtime_fn) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
+ return RISCV_EXCP_NONE;
+}
+
+/* Virtual CSR Registers */
+static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vsstatus;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mask = (target_ulong)-1;
+ env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
+ return RISCV_EXCP_NONE;
+}
+
+static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vstvec;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vstvec(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vstvec = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vsscratch;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vsscratch = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vsepc(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vsepc;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vsepc(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vsepc = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vscause(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vscause;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vscause(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vscause = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vstval(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vstval;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vstval(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vstval = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_vsatp(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->vsatp;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_vsatp(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->vsatp = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mtval2(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mtval2;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mtval2(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mtval2 = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mtinst(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mtinst;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mtinst(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mtinst = val;
+ return RISCV_EXCP_NONE;
+}
+
+/* Physical Memory Protection */
+static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = mseccfg_csr_read(env);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ mseccfg_csr_write(env, val);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
+ return RISCV_EXCP_NONE;
+}
+
+/*
+ * Functions to access Pointer Masking feature registers
+ * We have to check if current priv lvl could modify
+ * csr in given mode
+ */
+static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
+{
+ int csr_priv = get_field(csrno, 0x300);
+ int pm_current;
+
+ /*
+ * If priv lvls differ that means we're accessing csr from higher priv lvl,
+ * so allow the access
+ */
+ if (env->priv != csr_priv) {
+ return false;
+ }
+ switch (env->priv) {
+ case PRV_M:
+ pm_current = get_field(env->mmte, M_PM_CURRENT);
+ break;
+ case PRV_S:
+ pm_current = get_field(env->mmte, S_PM_CURRENT);
+ break;
+ case PRV_U:
+ pm_current = get_field(env->mmte, U_PM_CURRENT);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
+ return !pm_current;
+}
+
+static RISCVException read_mmte(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mmte & MMTE_MASK;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mmte(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+ target_ulong wpri_val = val & MMTE_MASK;
+
+ if (val != wpri_val) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
+ "MMTE: WPRI violation written 0x", val,
+ "vs expected 0x", wpri_val);
+ }
+ /* for machine mode pm.current is hardwired to 1 */
+ wpri_val |= MMTE_M_PM_CURRENT;
+
+ /* hardwiring pm.instruction bit to 0, since it's not supported yet */
+ wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
+ env->mmte = wpri_val | PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_smte(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mmte & SMTE_MASK;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_smte(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong wpri_val = val & SMTE_MASK;
+
+ if (val != wpri_val) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
+ "SMTE: WPRI violation written 0x", val,
+ "vs expected 0x", wpri_val);
+ }
+
+ /* if pm.current==0 we can't modify current PM CSRs */
+ if (check_pm_current_disabled(env, csrno)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ wpri_val |= (env->mmte & ~SMTE_MASK);
+ write_mmte(env, csrno, wpri_val);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_umte(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mmte & UMTE_MASK;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_umte(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ target_ulong wpri_val = val & UMTE_MASK;
+
+ if (val != wpri_val) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
+ "UMTE: WPRI violation written 0x", val,
+ "vs expected 0x", wpri_val);
+ }
+
+ if (check_pm_current_disabled(env, csrno)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ wpri_val |= (env->mmte & ~UMTE_MASK);
+ write_mmte(env, csrno, wpri_val);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mpmmask;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+
+ env->mpmmask = val;
+ env->mmte |= PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_spmmask(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->spmmask;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_spmmask(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+
+ /* if pm.current==0 we can't modify current PM CSRs */
+ if (check_pm_current_disabled(env, csrno)) {
+ return RISCV_EXCP_NONE;
+ }
+ env->spmmask = val;
+ env->mmte |= PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_upmmask(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->upmmask;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_upmmask(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+
+ /* if pm.current==0 we can't modify current PM CSRs */
+ if (check_pm_current_disabled(env, csrno)) {
+ return RISCV_EXCP_NONE;
+ }
+ env->upmmask = val;
+ env->mmte |= PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mpmbase;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+
+ env->mpmbase = val;
+ env->mmte |= PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_spmbase(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->spmbase;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_spmbase(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+
+ /* if pm.current==0 we can't modify current PM CSRs */
+ if (check_pm_current_disabled(env, csrno)) {
+ return RISCV_EXCP_NONE;
+ }
+ env->spmbase = val;
+ env->mmte |= PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_upmbase(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->upmbase;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_upmbase(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ uint64_t mstatus;
+
+ /* if pm.current==0 we can't modify current PM CSRs */
+ if (check_pm_current_disabled(env, csrno)) {
+ return RISCV_EXCP_NONE;
+ }
+ env->upmbase = val;
+ env->mmte |= PM_EXT_DIRTY;
+
+ /* Set XS and SD bits, since PM CSRs are dirty */
+ mstatus = env->mstatus | MSTATUS_XS;
+ write_mstatus(env, csrno, mstatus);
+ return RISCV_EXCP_NONE;
+}
+
+#endif
+
+/*
+ * riscv_csrrw - read and/or update control and status register
+ *
+ * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
+ * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
+ * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
+ * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
+ */
+
+RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ RISCVException ret;
+ target_ulong old_value;
+ RISCVCPU *cpu = env_archcpu(env);
+ int read_only = get_field(csrno, 0xC00) == 3;
+
+ /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
+#if !defined(CONFIG_USER_ONLY)
+ int effective_priv = env->priv;
+
+ if (riscv_has_ext(env, RVH) &&
+ env->priv == PRV_S &&
+ !riscv_cpu_virt_enabled(env)) {
+ /*
+ * We are in S mode without virtualisation, therefore we are in HS Mode.
+ * Add 1 to the effective privledge level to allow us to access the
+ * Hypervisor CSRs.
+ */
+ effective_priv++;
+ }
+
+ if (!env->debugger && (effective_priv < get_field(csrno, 0x300))) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+#endif
+ if (write_mask && read_only) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ /* ensure the CSR extension is enabled. */
+ if (!cpu->cfg.ext_icsr) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ /* check predicate */
+ if (!csr_ops[csrno].predicate) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+ ret = csr_ops[csrno].predicate(env, csrno);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* execute combined read/write operation if it exists */
+ if (csr_ops[csrno].op) {
+ return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
+ }
+
+ /* if no accessor exists then return failure */
+ if (!csr_ops[csrno].read) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+ /* read old value */
+ ret = csr_ops[csrno].read(env, csrno, &old_value);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* write value if writable and write mask set, otherwise drop writes */
+ if (write_mask) {
+ new_value = (old_value & ~write_mask) | (new_value & write_mask);
+ if (csr_ops[csrno].write) {
+ ret = csr_ops[csrno].write(env, csrno, new_value);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+ }
+ }
+
+ /* return old value */
+ if (ret_value) {
+ *ret_value = old_value;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+/*
+ * Debugger support. If not in user mode, set env->debugger before the
+ * riscv_csrrw call and clear it after the call.
+ */
+RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value,
+ target_ulong write_mask)
+{
+ RISCVException ret;
+#if !defined(CONFIG_USER_ONLY)
+ env->debugger = true;
+#endif
+ ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
+#if !defined(CONFIG_USER_ONLY)
+ env->debugger = false;
+#endif
+ return ret;
+}
+
+/* Control and Status Register function table */
+riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
+ /* User Floating-Point CSRs */
+ [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
+ [CSR_FRM] = { "frm", fs, read_frm, write_frm },
+ [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
+ /* Vector CSRs */
+ [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
+ [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
+ [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
+ [CSR_VL] = { "vl", vs, read_vl },
+ [CSR_VTYPE] = { "vtype", vs, read_vtype },
+ /* User Timers and Counters */
+ [CSR_CYCLE] = { "cycle", ctr, read_instret },
+ [CSR_INSTRET] = { "instret", ctr, read_instret },
+ [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth },
+ [CSR_INSTRETH] = { "instreth", ctr32, read_instreth },
+
+ /*
+ * In privileged mode, the monitor will have to emulate TIME CSRs only if
+ * rdtime callback is not provided by machine/platform emulation.
+ */
+ [CSR_TIME] = { "time", ctr, read_time },
+ [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
+
+#if !defined(CONFIG_USER_ONLY)
+ /* Machine Timers and Counters */
+ [CSR_MCYCLE] = { "mcycle", any, read_instret },
+ [CSR_MINSTRET] = { "minstret", any, read_instret },
+ [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth },
+ [CSR_MINSTRETH] = { "minstreth", any32, read_instreth },
+
+ /* Machine Information Registers */
+ [CSR_MVENDORID] = { "mvendorid", any, read_zero },
+ [CSR_MARCHID] = { "marchid", any, read_zero },
+ [CSR_MIMPID] = { "mimpid", any, read_zero },
+ [CSR_MHARTID] = { "mhartid", any, read_mhartid },
+
+ /* Machine Trap Setup */
+ [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus },
+ [CSR_MISA] = { "misa", any, read_misa, write_misa },
+ [CSR_MIDELEG] = { "mideleg", any, read_mideleg, write_mideleg },
+ [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
+ [CSR_MIE] = { "mie", any, read_mie, write_mie },
+ [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
+ [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren },
+
+ [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush },
+
+ /* Machine Trap Handling */
+ [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch },
+ [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
+ [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
+ [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
+ [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
+
+ /* Supervisor Trap Setup */
+ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus },
+ [CSR_SIE] = { "sie", smode, read_sie, write_sie },
+ [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
+ [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren },
+
+ /* Supervisor Trap Handling */
+ [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch },
+ [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
+ [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
+ [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
+ [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
+
+ /* Supervisor Protection and Translation */
+ [CSR_SATP] = { "satp", smode, read_satp, write_satp },
+
+ [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus },
+ [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg },
+ [CSR_HIDELEG] = { "hideleg", hmode, read_hideleg, write_hideleg },
+ [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip },
+ [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip },
+ [CSR_HIE] = { "hie", hmode, read_hie, write_hie },
+ [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren },
+ [CSR_HGEIE] = { "hgeie", hmode, read_zero, write_hgeie },
+ [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval },
+ [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst },
+ [CSR_HGEIP] = { "hgeip", hmode, read_zero, write_hgeip },
+ [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp },
+ [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta },
+ [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah },
+
+ [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus },
+ [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip },
+ [CSR_VSIE] = { "vsie", hmode, read_vsie, write_vsie },
+ [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec },
+ [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch },
+ [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc },
+ [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause },
+ [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval },
+ [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp },
+
+ [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 },
+ [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst },
+
+ /* Physical Memory Protection */
+ [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg },
+ [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
+ [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
+ [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
+ [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
+ [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
+ [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
+
+ /* User Pointer Masking */
+ [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
+ [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, write_upmmask },
+ [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, write_upmbase },
+ /* Machine Pointer Masking */
+ [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
+ [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, write_mpmmask },
+ [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, write_mpmbase },
+ /* Supervisor Pointer Masking */
+ [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
+ [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, write_spmmask },
+ [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase },
+
+ /* Performance Counters */
+ [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero },
+ [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero },
+ [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero },
+ [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero },
+ [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero },
+ [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero },
+ [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero },
+ [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero },
+ [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero },
+ [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero },
+ [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero },
+ [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero },
+ [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero },
+ [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero },
+ [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero },
+ [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero },
+ [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero },
+ [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero },
+ [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero },
+ [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero },
+ [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero },
+ [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero },
+ [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero },
+ [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero },
+ [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero },
+ [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero },
+ [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero },
+ [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero },
+ [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero },
+
+ [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero },
+ [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero },
+ [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero },
+ [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero },
+ [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero },
+ [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero },
+ [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero },
+ [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero },
+ [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero },
+ [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero },
+ [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero },
+ [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero },
+ [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero },
+ [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero },
+ [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero },
+ [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero },
+ [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero },
+ [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero },
+ [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero },
+ [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero },
+ [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero },
+ [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero },
+ [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero },
+ [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero },
+ [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero },
+ [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero },
+ [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero },
+ [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero },
+ [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero },
+
+ [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
+ [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
+ [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero },
+ [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero },
+ [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero },
+ [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero },
+ [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero },
+ [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero },
+ [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero },
+ [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero },
+ [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero },
+ [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero },
+ [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero },
+ [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero },
+ [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero },
+ [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero },
+ [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero },
+ [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero },
+ [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero },
+ [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero },
+ [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero },
+ [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero },
+ [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero },
+ [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero },
+ [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero },
+ [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero },
+ [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero },
+ [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero },
+ [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero },
+
+ [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero },
+ [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero },
+ [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero },
+ [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero },
+ [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero },
+ [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero },
+ [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero },
+ [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero },
+ [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero },
+ [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero },
+ [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero },
+ [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero },
+ [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero },
+ [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero },
+ [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero },
+ [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero },
+ [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero },
+ [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero },
+ [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero },
+ [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero },
+ [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero },
+ [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero },
+ [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero },
+ [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero },
+ [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero },
+ [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero },
+ [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero },
+ [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero },
+ [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero },
+
+ [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero },
+ [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero },
+ [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero },
+ [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero },
+ [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero },
+ [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero },
+ [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero },
+ [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero },
+ [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero },
+ [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero },
+ [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero },
+ [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero },
+ [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero },
+ [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero },
+ [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero },
+ [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero },
+ [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero },
+ [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero },
+ [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero },
+ [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero },
+ [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero },
+ [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero },
+ [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero },
+ [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero },
+ [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero },
+ [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero },
+ [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero },
+ [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero },
+ [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero },
+#endif /* !CONFIG_USER_ONLY */
+};
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
new file mode 100644
index 000000000..d62f47090
--- /dev/null
+++ b/target/riscv/fpu_helper.c
@@ -0,0 +1,376 @@
+/*
+ * RISC-V FPU Emulation Helpers for QEMU.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "internals.h"
+
+target_ulong riscv_cpu_get_fflags(CPURISCVState *env)
+{
+ int soft = get_float_exception_flags(&env->fp_status);
+ target_ulong hard = 0;
+
+ hard |= (soft & float_flag_inexact) ? FPEXC_NX : 0;
+ hard |= (soft & float_flag_underflow) ? FPEXC_UF : 0;
+ hard |= (soft & float_flag_overflow) ? FPEXC_OF : 0;
+ hard |= (soft & float_flag_divbyzero) ? FPEXC_DZ : 0;
+ hard |= (soft & float_flag_invalid) ? FPEXC_NV : 0;
+
+ return hard;
+}
+
+void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong hard)
+{
+ int soft = 0;
+
+ soft |= (hard & FPEXC_NX) ? float_flag_inexact : 0;
+ soft |= (hard & FPEXC_UF) ? float_flag_underflow : 0;
+ soft |= (hard & FPEXC_OF) ? float_flag_overflow : 0;
+ soft |= (hard & FPEXC_DZ) ? float_flag_divbyzero : 0;
+ soft |= (hard & FPEXC_NV) ? float_flag_invalid : 0;
+
+ set_float_exception_flags(soft, &env->fp_status);
+}
+
+void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
+{
+ int softrm;
+
+ if (rm == 7) {
+ rm = env->frm;
+ }
+ switch (rm) {
+ case 0:
+ softrm = float_round_nearest_even;
+ break;
+ case 1:
+ softrm = float_round_to_zero;
+ break;
+ case 2:
+ softrm = float_round_down;
+ break;
+ case 3:
+ softrm = float_round_up;
+ break;
+ case 4:
+ softrm = float_round_ties_away;
+ break;
+ default:
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ set_float_rounding_mode(softrm, &env->fp_status);
+}
+
+static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
+ uint64_t rs3, int flags)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ float32 frs3 = check_nanbox_s(rs3);
+ return nanbox_s(float32_muladd(frs1, frs2, frs3, flags, &env->fp_status));
+}
+
+uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return do_fmadd_s(env, frs1, frs2, frs3, 0);
+}
+
+uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status);
+}
+
+uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return do_fmadd_s(env, frs1, frs2, frs3, float_muladd_negate_c);
+}
+
+uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c,
+ &env->fp_status);
+}
+
+uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return do_fmadd_s(env, frs1, frs2, frs3, float_muladd_negate_product);
+}
+
+uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return float64_muladd(frs1, frs2, frs3, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return do_fmadd_s(env, frs1, frs2, frs3,
+ float_muladd_negate_c | float_muladd_negate_product);
+}
+
+uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
+ uint64_t frs3)
+{
+ return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c |
+ float_muladd_negate_product, &env->fp_status);
+}
+
+uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return nanbox_s(float32_add(frs1, frs2, &env->fp_status));
+}
+
+uint64_t helper_fsub_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return nanbox_s(float32_sub(frs1, frs2, &env->fp_status));
+}
+
+uint64_t helper_fmul_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return nanbox_s(float32_mul(frs1, frs2, &env->fp_status));
+}
+
+uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return nanbox_s(float32_div(frs1, frs2, &env->fp_status));
+}
+
+uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
+ float32_minnum(frs1, frs2, &env->fp_status) :
+ float32_minimum_number(frs1, frs2, &env->fp_status));
+}
+
+uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
+ float32_maxnum(frs1, frs2, &env->fp_status) :
+ float32_maximum_number(frs1, frs2, &env->fp_status));
+}
+
+uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return nanbox_s(float32_sqrt(frs1, &env->fp_status));
+}
+
+target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return float32_le(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return float32_lt(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ float32 frs2 = check_nanbox_s(rs2);
+ return float32_eq_quiet(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return float32_to_int32(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return (int32_t)float32_to_uint32(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return float32_to_int64(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return float32_to_uint64(frs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1)
+{
+ return nanbox_s(int32_to_float32((int32_t)rs1, &env->fp_status));
+}
+
+uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1)
+{
+ return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status));
+}
+
+uint64_t helper_fcvt_s_l(CPURISCVState *env, target_ulong rs1)
+{
+ return nanbox_s(int64_to_float32(rs1, &env->fp_status));
+}
+
+uint64_t helper_fcvt_s_lu(CPURISCVState *env, target_ulong rs1)
+{
+ return nanbox_s(uint64_to_float32(rs1, &env->fp_status));
+}
+
+target_ulong helper_fclass_s(uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return fclass_s(frs1);
+}
+
+uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_add(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_sub(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmul_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_mul(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_div(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return env->priv_ver < PRIV_VERSION_1_11_0 ?
+ float64_minnum(frs1, frs2, &env->fp_status) :
+ float64_minimum_number(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return env->priv_ver < PRIV_VERSION_1_11_0 ?
+ float64_maxnum(frs1, frs2, &env->fp_status) :
+ float64_maximum_number(frs1, frs2, &env->fp_status);
+}
+
+uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
+{
+ return nanbox_s(float64_to_float32(rs1, &env->fp_status));
+}
+
+uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(rs1);
+ return float32_to_float64(frs1, &env->fp_status);
+}
+
+uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1)
+{
+ return float64_sqrt(frs1, &env->fp_status);
+}
+
+target_ulong helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_le(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_lt(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_eq_quiet(frs1, frs2, &env->fp_status);
+}
+
+target_ulong helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1)
+{
+ return float64_to_int32(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1)
+{
+ return (int32_t)float64_to_uint32(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1)
+{
+ return float64_to_int64(frs1, &env->fp_status);
+}
+
+target_ulong helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1)
+{
+ return float64_to_uint64(frs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_w(CPURISCVState *env, target_ulong rs1)
+{
+ return int32_to_float64((int32_t)rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_wu(CPURISCVState *env, target_ulong rs1)
+{
+ return uint32_to_float64((uint32_t)rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_l(CPURISCVState *env, target_ulong rs1)
+{
+ return int64_to_float64(rs1, &env->fp_status);
+}
+
+uint64_t helper_fcvt_d_lu(CPURISCVState *env, target_ulong rs1)
+{
+ return uint64_to_float64(rs1, &env->fp_status);
+}
+
+target_ulong helper_fclass_d(uint64_t frs1)
+{
+ return fclass_d(frs1);
+}
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
new file mode 100644
index 000000000..23429179e
--- /dev/null
+++ b/target/riscv/gdbstub.c
@@ -0,0 +1,212 @@
+/*
+ * RISC-V GDB Server Stub
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "exec/gdbstub.h"
+#include "cpu.h"
+
+int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_regl(mem_buf, env->gpr[n]);
+ } else if (n == 32) {
+ return gdb_get_regl(mem_buf, env->pc);
+ }
+ return 0;
+}
+
+int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+
+ if (n == 0) {
+ /* discard writes to x0 */
+ return sizeof(target_ulong);
+ } else if (n < 32) {
+ env->gpr[n] = ldtul_p(mem_buf);
+ return sizeof(target_ulong);
+ } else if (n == 32) {
+ env->pc = ldtul_p(mem_buf);
+ return sizeof(target_ulong);
+ }
+ return 0;
+}
+
+static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n)
+{
+ if (n < 32) {
+ if (env->misa_ext & RVD) {
+ return gdb_get_reg64(buf, env->fpr[n]);
+ }
+ if (env->misa_ext & RVF) {
+ return gdb_get_reg32(buf, env->fpr[n]);
+ }
+ /* there is hole between ft11 and fflags in fpu.xml */
+ } else if (n < 36 && n > 32) {
+ target_ulong val = 0;
+ int result;
+ /*
+ * CSR_FFLAGS is at index 1 in csr_register, and gdb says it is FP
+ * register 33, so we recalculate the map index.
+ * This also works for CSR_FRM and CSR_FCSR.
+ */
+ result = riscv_csrrw_debug(env, n - 32, &val,
+ 0, 0);
+ if (result == RISCV_EXCP_NONE) {
+ return gdb_get_regl(buf, val);
+ }
+ }
+ return 0;
+}
+
+static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ env->fpr[n] = ldq_p(mem_buf); /* always 64-bit */
+ return sizeof(uint64_t);
+ /* there is hole between ft11 and fflags in fpu.xml */
+ } else if (n < 36 && n > 32) {
+ target_ulong val = ldtul_p(mem_buf);
+ int result;
+ /*
+ * CSR_FFLAGS is at index 1 in csr_register, and gdb says it is FP
+ * register 33, so we recalculate the map index.
+ * This also works for CSR_FRM and CSR_FCSR.
+ */
+ result = riscv_csrrw_debug(env, n - 32, NULL,
+ val, -1);
+ if (result == RISCV_EXCP_NONE) {
+ return sizeof(target_ulong);
+ }
+ }
+ return 0;
+}
+
+static int riscv_gdb_get_csr(CPURISCVState *env, GByteArray *buf, int n)
+{
+ if (n < CSR_TABLE_SIZE) {
+ target_ulong val = 0;
+ int result;
+
+ result = riscv_csrrw_debug(env, n, &val, 0, 0);
+ if (result == RISCV_EXCP_NONE) {
+ return gdb_get_regl(buf, val);
+ }
+ }
+ return 0;
+}
+
+static int riscv_gdb_set_csr(CPURISCVState *env, uint8_t *mem_buf, int n)
+{
+ if (n < CSR_TABLE_SIZE) {
+ target_ulong val = ldtul_p(mem_buf);
+ int result;
+
+ result = riscv_csrrw_debug(env, n, NULL, val, -1);
+ if (result == RISCV_EXCP_NONE) {
+ return sizeof(target_ulong);
+ }
+ }
+ return 0;
+}
+
+static int riscv_gdb_get_virtual(CPURISCVState *cs, GByteArray *buf, int n)
+{
+ if (n == 0) {
+#ifdef CONFIG_USER_ONLY
+ return gdb_get_regl(buf, 0);
+#else
+ return gdb_get_regl(buf, cs->priv);
+#endif
+ }
+ return 0;
+}
+
+static int riscv_gdb_set_virtual(CPURISCVState *cs, uint8_t *mem_buf, int n)
+{
+ if (n == 0) {
+#ifndef CONFIG_USER_ONLY
+ cs->priv = ldtul_p(mem_buf) & 0x3;
+ if (cs->priv == PRV_H) {
+ cs->priv = PRV_S;
+ }
+#endif
+ return sizeof(target_ulong);
+ }
+ return 0;
+}
+
+static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ GString *s = g_string_new(NULL);
+ riscv_csr_predicate_fn predicate;
+ int bitsize = 16 << env->misa_mxl_max;
+ int i;
+
+ g_string_printf(s, "<?xml version=\"1.0\"?>");
+ g_string_append_printf(s, "<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">");
+ g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">");
+
+ for (i = 0; i < CSR_TABLE_SIZE; i++) {
+ predicate = csr_ops[i].predicate;
+ if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) {
+ if (csr_ops[i].name) {
+ g_string_append_printf(s, "<reg name=\"%s\"", csr_ops[i].name);
+ } else {
+ g_string_append_printf(s, "<reg name=\"csr%03x\"", i);
+ }
+ g_string_append_printf(s, " bitsize=\"%d\"", bitsize);
+ g_string_append_printf(s, " regnum=\"%d\"/>", base_reg + i);
+ }
+ }
+
+ g_string_append_printf(s, "</feature>");
+
+ cpu->dyn_csr_xml = g_string_free(s, false);
+ return CSR_TABLE_SIZE;
+}
+
+void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ if (env->misa_ext & RVD) {
+ gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu,
+ 36, "riscv-64bit-fpu.xml", 0);
+ } else if (env->misa_ext & RVF) {
+ gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu,
+ 36, "riscv-32bit-fpu.xml", 0);
+ }
+#if defined(TARGET_RISCV32)
+ gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual,
+ 1, "riscv-32bit-virtual.xml", 0);
+#elif defined(TARGET_RISCV64)
+ gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual,
+ 1, "riscv-64bit-virtual.xml", 0);
+#endif
+
+ gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
+ riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs),
+ "riscv-csr.xml", 0);
+}
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
new file mode 100644
index 000000000..c7a537622
--- /dev/null
+++ b/target/riscv/helper.h
@@ -0,0 +1,1149 @@
+/* Exceptions */
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+
+/* Floating Point - rounding mode */
+DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32)
+
+/* Floating Point - fused */
+DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+
+/* Floating Point - Single Precision */
+DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64)
+
+/* Floating Point - Double Precision */
+DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_d_w, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl)
+DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
+
+/* Bitmanip */
+DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+
+/* Special functions */
+DEF_HELPER_2(csrr, tl, env, int)
+DEF_HELPER_3(csrw, void, env, int, tl)
+DEF_HELPER_4(csrrw, tl, env, int, tl, tl)
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_2(sret, tl, env, tl)
+DEF_HELPER_2(mret, tl, env, tl)
+DEF_HELPER_1(wfi, void, env)
+DEF_HELPER_1(tlb_flush, void, env)
+#endif
+
+/* Hypervisor functions */
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(hyp_tlb_flush, void, env)
+DEF_HELPER_1(hyp_gvma_tlb_flush, void, env)
+DEF_HELPER_2(hyp_hlvx_hu, tl, env, tl)
+DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl)
+#endif
+
+/* Vector functions */
+DEF_HELPER_3(vsetvl, tl, env, tl, tl)
+DEF_HELPER_5(vlb_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_b_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlb_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlh_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlh_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlh_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlh_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlh_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlh_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlw_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlw_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlw_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlw_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_b_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vle_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_b_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbu_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhu_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhu_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhu_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhu_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhu_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhu_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwu_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwu_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwu_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwu_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_b_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsb_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsh_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsh_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsh_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsh_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsh_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsh_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsw_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsw_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsw_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vsw_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_b_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_h_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_w_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vse_v_d_mask, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_6(vlsb_v_b, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsb_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsb_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsb_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsh_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsh_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsh_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsw_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsw_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlse_v_b, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlse_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlse_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlse_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsbu_v_b, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsbu_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsbu_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlsbu_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlshu_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlshu_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlshu_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlswu_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlswu_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssb_v_b, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssb_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssb_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssb_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssh_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssh_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssh_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssw_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vssw_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vsse_v_b, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vsse_v_h, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vsse_v_w, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vsse_v_d, void, ptr, ptr, tl, tl, env, i32)
+DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhff_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vleff_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vleff_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vleff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vleff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbuff_v_b, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbuff_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbuff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlbuff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhuff_v_h, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32)
+DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrsub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrsub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrsub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrsub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_FLAGS_4(vec_rsubs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vec_rsubs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vec_rsubs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vec_rsubs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_6(vwaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsubu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsubu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsubu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwaddu_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwaddu_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwaddu_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsubu_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsubu_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsubu_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwadd_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwadd_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwadd_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsub_wv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsub_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsub_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwaddu_wx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwaddu_wx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwaddu_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsubu_wx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsubu_wx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsubu_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwadd_wx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwadd_wx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwadd_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsub_wx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsub_wx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsub_wx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vadc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsbc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsbc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsbc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsbc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vadc_vxm_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vadc_vxm_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vadc_vxm_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vadc_vxm_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsbc_vxm_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsbc_vxm_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsbc_vxm_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsbc_vxm_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadc_vxm_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadc_vxm_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadc_vxm_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadc_vxm_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vxm_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vxm_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vxm_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsbc_vxm_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vand_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vand_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vand_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vand_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vor_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vor_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vor_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vor_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vxor_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vxor_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vxor_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vxor_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vand_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vand_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vand_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vand_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vor_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vor_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vor_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vor_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vxor_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vxor_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vxor_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vxor_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vsll_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsll_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsll_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsll_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsrl_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsra_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsll_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsll_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsll_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsll_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsrl_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vnsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnsra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vmseq_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmseq_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmseq_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmseq_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsne_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsne_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsne_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsne_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmslt_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmslt_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmslt_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmslt_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsle_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsle_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsle_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmsle_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmseq_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmseq_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmseq_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmseq_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsne_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsne_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsne_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsne_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsltu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmslt_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmslt_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmslt_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmslt_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsleu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsle_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsle_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsle_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsle_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgtu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgtu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgtu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgtu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgt_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgt_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgt_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmsgt_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vminu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vminu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vminu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vminu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmin_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmax_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmax_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmax_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmax_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vminu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vminu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vminu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vminu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmin_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmin_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmin_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmin_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmaxu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmax_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmax_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmax_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmax_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulh_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulh_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulh_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulh_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmul_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulh_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulh_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulh_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulh_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmulhsu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vdivu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdivu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdivu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdivu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdiv_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdiv_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdiv_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdiv_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vremu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vremu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vremu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vremu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrem_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrem_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrem_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrem_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vdivu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdivu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdivu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdivu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdiv_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdiv_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdiv_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vdiv_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vremu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vremu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vremu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vremu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrem_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrem_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrem_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrem_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vwmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmulu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmulu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmulu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmulsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmulsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmulsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmul_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmulu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmulu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmulu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmulsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmulsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmulsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmacc_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsac_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnmsub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vwmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vmerge_vvm_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmerge_vvm_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmerge_vvm_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmerge_vvm_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmerge_vxm_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmerge_vxm_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmerge_vxm_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vmerge_vxm_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_4(vmv_v_v_b, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vmv_v_v_h, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vmv_v_v_w, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vmv_v_v_d, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vmv_v_x_b, void, ptr, i64, env, i32)
+DEF_HELPER_4(vmv_v_x_h, void, ptr, i64, env, i32)
+DEF_HELPER_4(vmv_v_x_w, void, ptr, i64, env, i32)
+DEF_HELPER_4(vmv_v_x_d, void, ptr, i64, env, i32)
+
+DEF_HELPER_6(vsaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssubu_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsaddu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssubu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssubu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssubu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssubu_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vwsmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwsmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssrl_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssra_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vssrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssrl_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vfadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfadd_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vfwadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwadd_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwadd_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwsub_wv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwsub_wv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwadd_wf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwadd_wf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwsub_wf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwsub_wf_w, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vfmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfdiv_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfdiv_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfdiv_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmul_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmul_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmul_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfdiv_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfdiv_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfdiv_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrdiv_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrdiv_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfrdiv_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vfwmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmul_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwmul_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vfmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfnmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmacc_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmacc_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmsac_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmsac_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmadd_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmadd_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmadd_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmadd_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfnmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vfwmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwnmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwnmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwnmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwnmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwnmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfwnmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_5(vfsqrt_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfsqrt_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfsqrt_v_d, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmax_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmax_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmax_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfmin_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmin_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmin_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmax_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmax_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmax_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vfsgnj_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnj_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnj_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnjn_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnjn_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnjn_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnjx_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnjx_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnjx_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfsgnj_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnj_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnj_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnjn_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnjn_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnjn_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnjx_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnjx_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfsgnjx_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_6(vmfeq_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfne_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfne_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfne_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmflt_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmflt_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmflt_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfle_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfle_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfle_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfeq_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfne_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfne_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfne_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmflt_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmflt_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmflt_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfle_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfle_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfle_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfgt_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfgt_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfgt_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfge_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfge_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmfge_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmford_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmford_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmford_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmford_vf_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmford_vf_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vmford_vf_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_5(vfclass_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfclass_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfclass_v_d, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfmerge_vfm_h, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmerge_vfm_w, void, ptr, ptr, i64, ptr, env, i32)
+DEF_HELPER_6(vfmerge_vfm_d, void, ptr, ptr, i64, ptr, env, i32)
+
+DEF_HELPER_5(vfcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_xu_f_v_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_x_f_v_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_f_xu_v_d, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfcvt_f_x_v_d, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vfwcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vfncvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_x_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_x_f_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_f_x_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_f_x_v_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_f_f_v_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfncvt_f_f_v_w, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmaxu_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmaxu_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmaxu_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmaxu_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmax_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredminu_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredminu_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredminu_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredminu_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmin_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredand_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredand_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredand_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredand_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredor_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredor_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredor_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredor_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredxor_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredxor_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredxor_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vredxor_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vwredsumu_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwredsumu_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwredsumu_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmandnot_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmxor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_4(vmpopc_m, tl, ptr, ptr, env, i32)
+
+DEF_HELPER_4(vmfirst_m, tl, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vmsbf_m, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vmsif_m, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vmsof_m, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(viota_m_b, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(viota_m_h, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(viota_m_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(viota_m_d, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_4(vid_v_b, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vid_v_h, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vid_v_w, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vid_v_d, void, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vslideup_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslideup_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslideup_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslideup_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32)
diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode
new file mode 100644
index 000000000..2e9212663
--- /dev/null
+++ b/target/riscv/insn16.decode
@@ -0,0 +1,162 @@
+#
+# RISC-V translation routines for the RVXI Base Integer Instruction Set.
+#
+# Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+# Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2 or later, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Fields:
+%rd 7:5
+%rs1_3 7:3 !function=ex_rvc_register
+%rs2_3 2:3 !function=ex_rvc_register
+%rs2_5 2:5
+
+# Immediates:
+%imm_ci 12:s1 2:5
+%nzuimm_ciw 7:4 11:2 5:1 6:1 !function=ex_shift_2
+%uimm_cl_d 5:2 10:3 !function=ex_shift_3
+%uimm_cl_w 5:1 10:3 6:1 !function=ex_shift_2
+%imm_cb 12:s1 5:2 2:1 10:2 3:2 !function=ex_shift_1
+%imm_cj 12:s1 8:1 9:2 6:1 7:1 2:1 11:1 3:3 !function=ex_shift_1
+
+%shimm_6bit 12:1 2:5 !function=ex_rvc_shifti
+%uimm_6bit_ld 2:3 12:1 5:2 !function=ex_shift_3
+%uimm_6bit_lw 2:2 12:1 4:3 !function=ex_shift_2
+%uimm_6bit_sd 7:3 10:3 !function=ex_shift_3
+%uimm_6bit_sw 7:2 9:4 !function=ex_shift_2
+
+%imm_addi16sp 12:s1 3:2 5:1 2:1 6:1 !function=ex_shift_4
+%imm_lui 12:s1 2:5 !function=ex_shift_12
+
+
+# Argument sets imported from insn32.decode:
+&empty !extern
+&r rd rs1 rs2 !extern
+&i imm rs1 rd !extern
+&s imm rs1 rs2 !extern
+&j imm rd !extern
+&b imm rs2 rs1 !extern
+&u imm rd !extern
+&shift shamt rs1 rd !extern
+
+
+# Formats 16:
+@cr .... ..... ..... .. &r rs2=%rs2_5 rs1=%rd %rd
+@ci ... . ..... ..... .. &i imm=%imm_ci rs1=%rd %rd
+@cl_d ... ... ... .. ... .. &i imm=%uimm_cl_d rs1=%rs1_3 rd=%rs2_3
+@cl_w ... ... ... .. ... .. &i imm=%uimm_cl_w rs1=%rs1_3 rd=%rs2_3
+@cs_2 ... ... ... .. ... .. &r rs2=%rs2_3 rs1=%rs1_3 rd=%rs1_3
+@cs_d ... ... ... .. ... .. &s imm=%uimm_cl_d rs1=%rs1_3 rs2=%rs2_3
+@cs_w ... ... ... .. ... .. &s imm=%uimm_cl_w rs1=%rs1_3 rs2=%rs2_3
+@cj ... ........... .. &j imm=%imm_cj
+@cb_z ... ... ... .. ... .. &b imm=%imm_cb rs1=%rs1_3 rs2=0
+
+@c_ldsp ... . ..... ..... .. &i imm=%uimm_6bit_ld rs1=2 %rd
+@c_lwsp ... . ..... ..... .. &i imm=%uimm_6bit_lw rs1=2 %rd
+@c_sdsp ... . ..... ..... .. &s imm=%uimm_6bit_sd rs1=2 rs2=%rs2_5
+@c_swsp ... . ..... ..... .. &s imm=%uimm_6bit_sw rs1=2 rs2=%rs2_5
+@c_li ... . ..... ..... .. &i imm=%imm_ci rs1=0 %rd
+@c_lui ... . ..... ..... .. &u imm=%imm_lui %rd
+@c_jalr ... . ..... ..... .. &i imm=0 rs1=%rd
+@c_mv ... . ..... ..... .. &i imm=0 rs1=%rs2_5 %rd
+
+@c_addi4spn ... . ..... ..... .. &i imm=%nzuimm_ciw rs1=2 rd=%rs2_3
+@c_addi16sp ... . ..... ..... .. &i imm=%imm_addi16sp rs1=2 rd=2
+
+@c_shift ... . .. ... ..... .. \
+ &shift rd=%rs1_3 rs1=%rs1_3 shamt=%shimm_6bit
+@c_shift2 ... . .. ... ..... .. \
+ &shift rd=%rd rs1=%rd shamt=%shimm_6bit
+
+@c_andi ... . .. ... ..... .. &i imm=%imm_ci rs1=%rs1_3 rd=%rs1_3
+
+# *** RV32/64C Standard Extension (Quadrant 0) ***
+{
+ # Opcode of all zeros is illegal; rd != 0, nzuimm == 0 is reserved.
+ illegal 000 000 000 00 --- 00
+ addi 000 ... ... .. ... 00 @c_addi4spn
+}
+fld 001 ... ... .. ... 00 @cl_d
+lw 010 ... ... .. ... 00 @cl_w
+fsd 101 ... ... .. ... 00 @cs_d
+sw 110 ... ... .. ... 00 @cs_w
+
+# *** RV32C and RV64C specific Standard Extension (Quadrant 0) ***
+{
+ ld 011 ... ... .. ... 00 @cl_d
+ flw 011 ... ... .. ... 00 @cl_w
+}
+{
+ sd 111 ... ... .. ... 00 @cs_d
+ fsw 111 ... ... .. ... 00 @cs_w
+}
+
+# *** RV32/64C Standard Extension (Quadrant 1) ***
+addi 000 . ..... ..... 01 @ci
+addi 010 . ..... ..... 01 @c_li
+{
+ illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0
+ addi 011 . 00010 ..... 01 @c_addi16sp
+ lui 011 . ..... ..... 01 @c_lui
+}
+srli 100 . 00 ... ..... 01 @c_shift
+srai 100 . 01 ... ..... 01 @c_shift
+andi 100 . 10 ... ..... 01 @c_andi
+sub 100 0 11 ... 00 ... 01 @cs_2
+xor 100 0 11 ... 01 ... 01 @cs_2
+or 100 0 11 ... 10 ... 01 @cs_2
+and 100 0 11 ... 11 ... 01 @cs_2
+jal 101 ........... 01 @cj rd=0 # C.J
+beq 110 ... ... ..... 01 @cb_z
+bne 111 ... ... ..... 01 @cb_z
+
+# *** RV64C and RV32C specific Standard Extension (Quadrant 1) ***
+{
+ c64_illegal 001 - 00000 ----- 01 # c.addiw, RES rd=0
+ addiw 001 . ..... ..... 01 @ci
+ jal 001 ........... 01 @cj rd=1 # C.JAL
+}
+subw 100 1 11 ... 00 ... 01 @cs_2
+addw 100 1 11 ... 01 ... 01 @cs_2
+
+# *** RV32/64C Standard Extension (Quadrant 2) ***
+slli 000 . ..... ..... 10 @c_shift2
+fld 001 . ..... ..... 10 @c_ldsp
+{
+ illegal 010 - 00000 ----- 10 # c.lwsp, RES rd=0
+ lw 010 . ..... ..... 10 @c_lwsp
+}
+{
+ illegal 100 0 00000 00000 10 # c.jr, RES rs1=0
+ jalr 100 0 ..... 00000 10 @c_jalr rd=0 # C.JR
+ addi 100 0 ..... ..... 10 @c_mv
+}
+{
+ ebreak 100 1 00000 00000 10
+ jalr 100 1 ..... 00000 10 @c_jalr rd=1 # C.JALR
+ add 100 1 ..... ..... 10 @cr
+}
+fsd 101 ...... ..... 10 @c_sdsp
+sw 110 . ..... ..... 10 @c_swsp
+
+# *** RV32C and RV64C specific Standard Extension (Quadrant 2) ***
+{
+ c64_illegal 011 - 00000 ----- 10 # c.ldsp, RES rd=0
+ ld 011 . ..... ..... 10 @c_ldsp
+ flw 011 . ..... ..... 10 @c_lwsp
+}
+{
+ sd 111 . ..... ..... 10 @c_sdsp
+ fsw 111 . ..... ..... 10 @c_swsp
+}
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
new file mode 100644
index 000000000..2f251dac1
--- /dev/null
+++ b/target/riscv/insn32.decode
@@ -0,0 +1,728 @@
+#
+# RISC-V translation routines for the RVXI Base Integer Instruction Set.
+#
+# Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+# Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2 or later, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Fields:
+%rs3 27:5
+%rs2 20:5
+%rs1 15:5
+%rd 7:5
+%sh5 20:5
+
+%sh7 20:7
+%csr 20:12
+%rm 12:3
+%nf 29:3 !function=ex_plus_1
+
+# immediates:
+%imm_i 20:s12
+%imm_s 25:s7 7:5
+%imm_b 31:s1 7:1 25:6 8:4 !function=ex_shift_1
+%imm_j 31:s1 12:8 20:1 21:10 !function=ex_shift_1
+%imm_u 12:s20 !function=ex_shift_12
+
+# Argument sets:
+&empty
+&b imm rs2 rs1
+&i imm rs1 rd
+&j imm rd
+&r rd rs1 rs2
+&r2 rd rs1
+&r2_s rs1 rs2
+&s imm rs1 rs2
+&u imm rd
+&shift shamt rs1 rd
+&atomic aq rl rs2 rs1 rd
+&rmrr vm rd rs1 rs2
+&rmr vm rd rs2
+&rwdvm vm wd rd rs1 rs2
+&r2nfvm vm rd rs1 nf
+&rnfvm vm rd rs1 rs2 nf
+
+# Formats 32:
+@r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd
+@i ............ ..... ... ..... ....... &i imm=%imm_i %rs1 %rd
+@b ....... ..... ..... ... ..... ....... &b imm=%imm_b %rs2 %rs1
+@s ....... ..... ..... ... ..... ....... &s imm=%imm_s %rs2 %rs1
+@u .................... ..... ....... &u imm=%imm_u %rd
+@j .................... ..... ....... &j imm=%imm_j %rd
+
+@sh ...... ...... ..... ... ..... ....... &shift shamt=%sh7 %rs1 %rd
+@csr ............ ..... ... ..... ....... %csr %rs1 %rd
+
+@atom_ld ..... aq:1 rl:1 ..... ........ ..... ....... &atomic rs2=0 %rs1 %rd
+@atom_st ..... aq:1 rl:1 ..... ........ ..... ....... &atomic %rs2 %rs1 %rd
+
+@r4_rm ..... .. ..... ..... ... ..... ....... %rs3 %rs2 %rs1 %rm %rd
+@r_rm ....... ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
+@r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd
+@r2 ....... ..... ..... ... ..... ....... &r2 %rs1 %rd
+@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd
+@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd
+@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd
+@r_nfvm ... ... vm:1 ..... ..... ... ..... ....... &rnfvm %nf %rs2 %rs1 %rd
+@r2rd ....... ..... ..... ... ..... ....... %rs2 %rd
+@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
+@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd
+@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd
+@r_wdvm ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
+@r2_zimm . zimm:11 ..... ... ..... ....... %rs1 %rd
+@r2_s ....... ..... ..... ... ..... ....... %rs2 %rs1
+
+@hfence_gvma ....... ..... ..... ... ..... ....... %rs2 %rs1
+@hfence_vvma ....... ..... ..... ... ..... ....... %rs2 %rs1
+
+@sfence_vma ....... ..... ..... ... ..... ....... %rs2 %rs1
+@sfence_vm ....... ..... ..... ... ..... ....... %rs1
+
+# Formats 64:
+@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd
+
+# *** Privileged Instructions ***
+ecall 000000000000 00000 000 00000 1110011
+ebreak 000000000001 00000 000 00000 1110011
+uret 0000000 00010 00000 000 00000 1110011
+sret 0001000 00010 00000 000 00000 1110011
+mret 0011000 00010 00000 000 00000 1110011
+wfi 0001000 00101 00000 000 00000 1110011
+sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
+sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
+
+# *** RV32I Base Instruction Set ***
+lui .................... ..... 0110111 @u
+auipc .................... ..... 0010111 @u
+jal .................... ..... 1101111 @j
+jalr ............ ..... 000 ..... 1100111 @i
+beq ....... ..... ..... 000 ..... 1100011 @b
+bne ....... ..... ..... 001 ..... 1100011 @b
+blt ....... ..... ..... 100 ..... 1100011 @b
+bge ....... ..... ..... 101 ..... 1100011 @b
+bltu ....... ..... ..... 110 ..... 1100011 @b
+bgeu ....... ..... ..... 111 ..... 1100011 @b
+lb ............ ..... 000 ..... 0000011 @i
+lh ............ ..... 001 ..... 0000011 @i
+lw ............ ..... 010 ..... 0000011 @i
+lbu ............ ..... 100 ..... 0000011 @i
+lhu ............ ..... 101 ..... 0000011 @i
+sb ....... ..... ..... 000 ..... 0100011 @s
+sh ....... ..... ..... 001 ..... 0100011 @s
+sw ....... ..... ..... 010 ..... 0100011 @s
+addi ............ ..... 000 ..... 0010011 @i
+slti ............ ..... 010 ..... 0010011 @i
+sltiu ............ ..... 011 ..... 0010011 @i
+xori ............ ..... 100 ..... 0010011 @i
+ori ............ ..... 110 ..... 0010011 @i
+andi ............ ..... 111 ..... 0010011 @i
+slli 00000. ...... ..... 001 ..... 0010011 @sh
+srli 00000. ...... ..... 101 ..... 0010011 @sh
+srai 01000. ...... ..... 101 ..... 0010011 @sh
+add 0000000 ..... ..... 000 ..... 0110011 @r
+sub 0100000 ..... ..... 000 ..... 0110011 @r
+sll 0000000 ..... ..... 001 ..... 0110011 @r
+slt 0000000 ..... ..... 010 ..... 0110011 @r
+sltu 0000000 ..... ..... 011 ..... 0110011 @r
+xor 0000000 ..... ..... 100 ..... 0110011 @r
+srl 0000000 ..... ..... 101 ..... 0110011 @r
+sra 0100000 ..... ..... 101 ..... 0110011 @r
+or 0000000 ..... ..... 110 ..... 0110011 @r
+and 0000000 ..... ..... 111 ..... 0110011 @r
+fence ---- pred:4 succ:4 ----- 000 ----- 0001111
+fence_i ---- ---- ---- ----- 001 ----- 0001111
+csrrw ............ ..... 001 ..... 1110011 @csr
+csrrs ............ ..... 010 ..... 1110011 @csr
+csrrc ............ ..... 011 ..... 1110011 @csr
+csrrwi ............ ..... 101 ..... 1110011 @csr
+csrrsi ............ ..... 110 ..... 1110011 @csr
+csrrci ............ ..... 111 ..... 1110011 @csr
+
+# *** RV64I Base Instruction Set (in addition to RV32I) ***
+lwu ............ ..... 110 ..... 0000011 @i
+ld ............ ..... 011 ..... 0000011 @i
+sd ....... ..... ..... 011 ..... 0100011 @s
+addiw ............ ..... 000 ..... 0011011 @i
+slliw 0000000 ..... ..... 001 ..... 0011011 @sh5
+srliw 0000000 ..... ..... 101 ..... 0011011 @sh5
+sraiw 0100000 ..... ..... 101 ..... 0011011 @sh5
+addw 0000000 ..... ..... 000 ..... 0111011 @r
+subw 0100000 ..... ..... 000 ..... 0111011 @r
+sllw 0000000 ..... ..... 001 ..... 0111011 @r
+srlw 0000000 ..... ..... 101 ..... 0111011 @r
+sraw 0100000 ..... ..... 101 ..... 0111011 @r
+
+# *** RV32M Standard Extension ***
+mul 0000001 ..... ..... 000 ..... 0110011 @r
+mulh 0000001 ..... ..... 001 ..... 0110011 @r
+mulhsu 0000001 ..... ..... 010 ..... 0110011 @r
+mulhu 0000001 ..... ..... 011 ..... 0110011 @r
+div 0000001 ..... ..... 100 ..... 0110011 @r
+divu 0000001 ..... ..... 101 ..... 0110011 @r
+rem 0000001 ..... ..... 110 ..... 0110011 @r
+remu 0000001 ..... ..... 111 ..... 0110011 @r
+
+# *** RV64M Standard Extension (in addition to RV32M) ***
+mulw 0000001 ..... ..... 000 ..... 0111011 @r
+divw 0000001 ..... ..... 100 ..... 0111011 @r
+divuw 0000001 ..... ..... 101 ..... 0111011 @r
+remw 0000001 ..... ..... 110 ..... 0111011 @r
+remuw 0000001 ..... ..... 111 ..... 0111011 @r
+
+# *** RV32A Standard Extension ***
+lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld
+sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st
+amoswap_w 00001 . . ..... ..... 010 ..... 0101111 @atom_st
+amoadd_w 00000 . . ..... ..... 010 ..... 0101111 @atom_st
+amoxor_w 00100 . . ..... ..... 010 ..... 0101111 @atom_st
+amoand_w 01100 . . ..... ..... 010 ..... 0101111 @atom_st
+amoor_w 01000 . . ..... ..... 010 ..... 0101111 @atom_st
+amomin_w 10000 . . ..... ..... 010 ..... 0101111 @atom_st
+amomax_w 10100 . . ..... ..... 010 ..... 0101111 @atom_st
+amominu_w 11000 . . ..... ..... 010 ..... 0101111 @atom_st
+amomaxu_w 11100 . . ..... ..... 010 ..... 0101111 @atom_st
+
+# *** RV64A Standard Extension (in addition to RV32A) ***
+lr_d 00010 . . 00000 ..... 011 ..... 0101111 @atom_ld
+sc_d 00011 . . ..... ..... 011 ..... 0101111 @atom_st
+amoswap_d 00001 . . ..... ..... 011 ..... 0101111 @atom_st
+amoadd_d 00000 . . ..... ..... 011 ..... 0101111 @atom_st
+amoxor_d 00100 . . ..... ..... 011 ..... 0101111 @atom_st
+amoand_d 01100 . . ..... ..... 011 ..... 0101111 @atom_st
+amoor_d 01000 . . ..... ..... 011 ..... 0101111 @atom_st
+amomin_d 10000 . . ..... ..... 011 ..... 0101111 @atom_st
+amomax_d 10100 . . ..... ..... 011 ..... 0101111 @atom_st
+amominu_d 11000 . . ..... ..... 011 ..... 0101111 @atom_st
+amomaxu_d 11100 . . ..... ..... 011 ..... 0101111 @atom_st
+
+# *** RV32F Standard Extension ***
+flw ............ ..... 010 ..... 0000111 @i
+fsw ....... ..... ..... 010 ..... 0100111 @s
+fmadd_s ..... 00 ..... ..... ... ..... 1000011 @r4_rm
+fmsub_s ..... 00 ..... ..... ... ..... 1000111 @r4_rm
+fnmsub_s ..... 00 ..... ..... ... ..... 1001011 @r4_rm
+fnmadd_s ..... 00 ..... ..... ... ..... 1001111 @r4_rm
+fadd_s 0000000 ..... ..... ... ..... 1010011 @r_rm
+fsub_s 0000100 ..... ..... ... ..... 1010011 @r_rm
+fmul_s 0001000 ..... ..... ... ..... 1010011 @r_rm
+fdiv_s 0001100 ..... ..... ... ..... 1010011 @r_rm
+fsqrt_s 0101100 00000 ..... ... ..... 1010011 @r2_rm
+fsgnj_s 0010000 ..... ..... 000 ..... 1010011 @r
+fsgnjn_s 0010000 ..... ..... 001 ..... 1010011 @r
+fsgnjx_s 0010000 ..... ..... 010 ..... 1010011 @r
+fmin_s 0010100 ..... ..... 000 ..... 1010011 @r
+fmax_s 0010100 ..... ..... 001 ..... 1010011 @r
+fcvt_w_s 1100000 00000 ..... ... ..... 1010011 @r2_rm
+fcvt_wu_s 1100000 00001 ..... ... ..... 1010011 @r2_rm
+fmv_x_w 1110000 00000 ..... 000 ..... 1010011 @r2
+feq_s 1010000 ..... ..... 010 ..... 1010011 @r
+flt_s 1010000 ..... ..... 001 ..... 1010011 @r
+fle_s 1010000 ..... ..... 000 ..... 1010011 @r
+fclass_s 1110000 00000 ..... 001 ..... 1010011 @r2
+fcvt_s_w 1101000 00000 ..... ... ..... 1010011 @r2_rm
+fcvt_s_wu 1101000 00001 ..... ... ..... 1010011 @r2_rm
+fmv_w_x 1111000 00000 ..... 000 ..... 1010011 @r2
+
+# *** RV64F Standard Extension (in addition to RV32F) ***
+fcvt_l_s 1100000 00010 ..... ... ..... 1010011 @r2_rm
+fcvt_lu_s 1100000 00011 ..... ... ..... 1010011 @r2_rm
+fcvt_s_l 1101000 00010 ..... ... ..... 1010011 @r2_rm
+fcvt_s_lu 1101000 00011 ..... ... ..... 1010011 @r2_rm
+
+# *** RV32D Standard Extension ***
+fld ............ ..... 011 ..... 0000111 @i
+fsd ....... ..... ..... 011 ..... 0100111 @s
+fmadd_d ..... 01 ..... ..... ... ..... 1000011 @r4_rm
+fmsub_d ..... 01 ..... ..... ... ..... 1000111 @r4_rm
+fnmsub_d ..... 01 ..... ..... ... ..... 1001011 @r4_rm
+fnmadd_d ..... 01 ..... ..... ... ..... 1001111 @r4_rm
+fadd_d 0000001 ..... ..... ... ..... 1010011 @r_rm
+fsub_d 0000101 ..... ..... ... ..... 1010011 @r_rm
+fmul_d 0001001 ..... ..... ... ..... 1010011 @r_rm
+fdiv_d 0001101 ..... ..... ... ..... 1010011 @r_rm
+fsqrt_d 0101101 00000 ..... ... ..... 1010011 @r2_rm
+fsgnj_d 0010001 ..... ..... 000 ..... 1010011 @r
+fsgnjn_d 0010001 ..... ..... 001 ..... 1010011 @r
+fsgnjx_d 0010001 ..... ..... 010 ..... 1010011 @r
+fmin_d 0010101 ..... ..... 000 ..... 1010011 @r
+fmax_d 0010101 ..... ..... 001 ..... 1010011 @r
+fcvt_s_d 0100000 00001 ..... ... ..... 1010011 @r2_rm
+fcvt_d_s 0100001 00000 ..... ... ..... 1010011 @r2_rm
+feq_d 1010001 ..... ..... 010 ..... 1010011 @r
+flt_d 1010001 ..... ..... 001 ..... 1010011 @r
+fle_d 1010001 ..... ..... 000 ..... 1010011 @r
+fclass_d 1110001 00000 ..... 001 ..... 1010011 @r2
+fcvt_w_d 1100001 00000 ..... ... ..... 1010011 @r2_rm
+fcvt_wu_d 1100001 00001 ..... ... ..... 1010011 @r2_rm
+fcvt_d_w 1101001 00000 ..... ... ..... 1010011 @r2_rm
+fcvt_d_wu 1101001 00001 ..... ... ..... 1010011 @r2_rm
+
+# *** RV64D Standard Extension (in addition to RV32D) ***
+fcvt_l_d 1100001 00010 ..... ... ..... 1010011 @r2_rm
+fcvt_lu_d 1100001 00011 ..... ... ..... 1010011 @r2_rm
+fmv_x_d 1110001 00000 ..... 000 ..... 1010011 @r2
+fcvt_d_l 1101001 00010 ..... ... ..... 1010011 @r2_rm
+fcvt_d_lu 1101001 00011 ..... ... ..... 1010011 @r2_rm
+fmv_d_x 1111001 00000 ..... 000 ..... 1010011 @r2
+
+# *** RV32H Base Instruction Set ***
+hlv_b 0110000 00000 ..... 100 ..... 1110011 @r2
+hlv_bu 0110000 00001 ..... 100 ..... 1110011 @r2
+hlv_h 0110010 00000 ..... 100 ..... 1110011 @r2
+hlv_hu 0110010 00001 ..... 100 ..... 1110011 @r2
+hlvx_hu 0110010 00011 ..... 100 ..... 1110011 @r2
+hlv_w 0110100 00000 ..... 100 ..... 1110011 @r2
+hlvx_wu 0110100 00011 ..... 100 ..... 1110011 @r2
+hsv_b 0110001 ..... ..... 100 00000 1110011 @r2_s
+hsv_h 0110011 ..... ..... 100 00000 1110011 @r2_s
+hsv_w 0110101 ..... ..... 100 00000 1110011 @r2_s
+hfence_gvma 0110001 ..... ..... 000 00000 1110011 @hfence_gvma
+hfence_vvma 0010001 ..... ..... 000 00000 1110011 @hfence_vvma
+
+# *** RV64H Base Instruction Set ***
+hlv_wu 0110100 00001 ..... 100 ..... 1110011 @r2
+hlv_d 0110110 00000 ..... 100 ..... 1110011 @r2
+hsv_d 0110111 ..... ..... 100 00000 1110011 @r2_s
+
+# *** Vector loads and stores are encoded within LOADFP/STORE-FP ***
+vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm
+vlh_v ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm
+vlw_v ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm
+vle_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm
+vlbu_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm
+vlhu_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm
+vlwu_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm
+vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm
+vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm
+vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm
+vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm
+vsb_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm
+vsh_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm
+vsw_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm
+vse_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm
+
+vlsb_v ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm
+vlsh_v ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm
+vlsw_v ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm
+vlse_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm
+vlsbu_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm
+vlshu_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm
+vlswu_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm
+vssb_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm
+vssh_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm
+vssw_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm
+vsse_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm
+
+vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm
+vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm
+vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm
+vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm
+vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm
+vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm
+vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm
+# Vector ordered-indexed and unordered-indexed store insns.
+vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm
+vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm
+vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm
+vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm
+
+#*** Vector AMO operations are encoded under the standard AMO major opcode ***
+vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoxorw_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoandw_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamoorw_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamominw_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamomaxw_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamominuw_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm
+vamomaxuw_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm
+
+# *** new major opcode OP-V ***
+vadd_vv 000000 . ..... ..... 000 ..... 1010111 @r_vm
+vadd_vx 000000 . ..... ..... 100 ..... 1010111 @r_vm
+vadd_vi 000000 . ..... ..... 011 ..... 1010111 @r_vm
+vsub_vv 000010 . ..... ..... 000 ..... 1010111 @r_vm
+vsub_vx 000010 . ..... ..... 100 ..... 1010111 @r_vm
+vrsub_vx 000011 . ..... ..... 100 ..... 1010111 @r_vm
+vrsub_vi 000011 . ..... ..... 011 ..... 1010111 @r_vm
+vwaddu_vv 110000 . ..... ..... 010 ..... 1010111 @r_vm
+vwaddu_vx 110000 . ..... ..... 110 ..... 1010111 @r_vm
+vwadd_vv 110001 . ..... ..... 010 ..... 1010111 @r_vm
+vwadd_vx 110001 . ..... ..... 110 ..... 1010111 @r_vm
+vwsubu_vv 110010 . ..... ..... 010 ..... 1010111 @r_vm
+vwsubu_vx 110010 . ..... ..... 110 ..... 1010111 @r_vm
+vwsub_vv 110011 . ..... ..... 010 ..... 1010111 @r_vm
+vwsub_vx 110011 . ..... ..... 110 ..... 1010111 @r_vm
+vwaddu_wv 110100 . ..... ..... 010 ..... 1010111 @r_vm
+vwaddu_wx 110100 . ..... ..... 110 ..... 1010111 @r_vm
+vwadd_wv 110101 . ..... ..... 010 ..... 1010111 @r_vm
+vwadd_wx 110101 . ..... ..... 110 ..... 1010111 @r_vm
+vwsubu_wv 110110 . ..... ..... 010 ..... 1010111 @r_vm
+vwsubu_wx 110110 . ..... ..... 110 ..... 1010111 @r_vm
+vwsub_wv 110111 . ..... ..... 010 ..... 1010111 @r_vm
+vwsub_wx 110111 . ..... ..... 110 ..... 1010111 @r_vm
+vadc_vvm 010000 1 ..... ..... 000 ..... 1010111 @r_vm_1
+vadc_vxm 010000 1 ..... ..... 100 ..... 1010111 @r_vm_1
+vadc_vim 010000 1 ..... ..... 011 ..... 1010111 @r_vm_1
+vmadc_vvm 010001 1 ..... ..... 000 ..... 1010111 @r_vm_1
+vmadc_vxm 010001 1 ..... ..... 100 ..... 1010111 @r_vm_1
+vmadc_vim 010001 1 ..... ..... 011 ..... 1010111 @r_vm_1
+vsbc_vvm 010010 1 ..... ..... 000 ..... 1010111 @r_vm_1
+vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r_vm_1
+vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r_vm_1
+vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r_vm_1
+vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm
+vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm
+vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm
+vor_vv 001010 . ..... ..... 000 ..... 1010111 @r_vm
+vor_vx 001010 . ..... ..... 100 ..... 1010111 @r_vm
+vor_vi 001010 . ..... ..... 011 ..... 1010111 @r_vm
+vxor_vv 001011 . ..... ..... 000 ..... 1010111 @r_vm
+vxor_vx 001011 . ..... ..... 100 ..... 1010111 @r_vm
+vxor_vi 001011 . ..... ..... 011 ..... 1010111 @r_vm
+vsll_vv 100101 . ..... ..... 000 ..... 1010111 @r_vm
+vsll_vx 100101 . ..... ..... 100 ..... 1010111 @r_vm
+vsll_vi 100101 . ..... ..... 011 ..... 1010111 @r_vm
+vsrl_vv 101000 . ..... ..... 000 ..... 1010111 @r_vm
+vsrl_vx 101000 . ..... ..... 100 ..... 1010111 @r_vm
+vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm
+vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm
+vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm
+vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm
+vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm
+vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm
+vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm
+vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm
+vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm
+vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm
+vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm
+vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm
+vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm
+vmsne_vv 011001 . ..... ..... 000 ..... 1010111 @r_vm
+vmsne_vx 011001 . ..... ..... 100 ..... 1010111 @r_vm
+vmsne_vi 011001 . ..... ..... 011 ..... 1010111 @r_vm
+vmsltu_vv 011010 . ..... ..... 000 ..... 1010111 @r_vm
+vmsltu_vx 011010 . ..... ..... 100 ..... 1010111 @r_vm
+vmslt_vv 011011 . ..... ..... 000 ..... 1010111 @r_vm
+vmslt_vx 011011 . ..... ..... 100 ..... 1010111 @r_vm
+vmsleu_vv 011100 . ..... ..... 000 ..... 1010111 @r_vm
+vmsleu_vx 011100 . ..... ..... 100 ..... 1010111 @r_vm
+vmsleu_vi 011100 . ..... ..... 011 ..... 1010111 @r_vm
+vmsle_vv 011101 . ..... ..... 000 ..... 1010111 @r_vm
+vmsle_vx 011101 . ..... ..... 100 ..... 1010111 @r_vm
+vmsle_vi 011101 . ..... ..... 011 ..... 1010111 @r_vm
+vmsgtu_vx 011110 . ..... ..... 100 ..... 1010111 @r_vm
+vmsgtu_vi 011110 . ..... ..... 011 ..... 1010111 @r_vm
+vmsgt_vx 011111 . ..... ..... 100 ..... 1010111 @r_vm
+vmsgt_vi 011111 . ..... ..... 011 ..... 1010111 @r_vm
+vminu_vv 000100 . ..... ..... 000 ..... 1010111 @r_vm
+vminu_vx 000100 . ..... ..... 100 ..... 1010111 @r_vm
+vmin_vv 000101 . ..... ..... 000 ..... 1010111 @r_vm
+vmin_vx 000101 . ..... ..... 100 ..... 1010111 @r_vm
+vmaxu_vv 000110 . ..... ..... 000 ..... 1010111 @r_vm
+vmaxu_vx 000110 . ..... ..... 100 ..... 1010111 @r_vm
+vmax_vv 000111 . ..... ..... 000 ..... 1010111 @r_vm
+vmax_vx 000111 . ..... ..... 100 ..... 1010111 @r_vm
+vmul_vv 100101 . ..... ..... 010 ..... 1010111 @r_vm
+vmul_vx 100101 . ..... ..... 110 ..... 1010111 @r_vm
+vmulh_vv 100111 . ..... ..... 010 ..... 1010111 @r_vm
+vmulh_vx 100111 . ..... ..... 110 ..... 1010111 @r_vm
+vmulhu_vv 100100 . ..... ..... 010 ..... 1010111 @r_vm
+vmulhu_vx 100100 . ..... ..... 110 ..... 1010111 @r_vm
+vmulhsu_vv 100110 . ..... ..... 010 ..... 1010111 @r_vm
+vmulhsu_vx 100110 . ..... ..... 110 ..... 1010111 @r_vm
+vdivu_vv 100000 . ..... ..... 010 ..... 1010111 @r_vm
+vdivu_vx 100000 . ..... ..... 110 ..... 1010111 @r_vm
+vdiv_vv 100001 . ..... ..... 010 ..... 1010111 @r_vm
+vdiv_vx 100001 . ..... ..... 110 ..... 1010111 @r_vm
+vremu_vv 100010 . ..... ..... 010 ..... 1010111 @r_vm
+vremu_vx 100010 . ..... ..... 110 ..... 1010111 @r_vm
+vrem_vv 100011 . ..... ..... 010 ..... 1010111 @r_vm
+vrem_vx 100011 . ..... ..... 110 ..... 1010111 @r_vm
+vwmulu_vv 111000 . ..... ..... 010 ..... 1010111 @r_vm
+vwmulu_vx 111000 . ..... ..... 110 ..... 1010111 @r_vm
+vwmulsu_vv 111010 . ..... ..... 010 ..... 1010111 @r_vm
+vwmulsu_vx 111010 . ..... ..... 110 ..... 1010111 @r_vm
+vwmul_vv 111011 . ..... ..... 010 ..... 1010111 @r_vm
+vwmul_vx 111011 . ..... ..... 110 ..... 1010111 @r_vm
+vmacc_vv 101101 . ..... ..... 010 ..... 1010111 @r_vm
+vmacc_vx 101101 . ..... ..... 110 ..... 1010111 @r_vm
+vnmsac_vv 101111 . ..... ..... 010 ..... 1010111 @r_vm
+vnmsac_vx 101111 . ..... ..... 110 ..... 1010111 @r_vm
+vmadd_vv 101001 . ..... ..... 010 ..... 1010111 @r_vm
+vmadd_vx 101001 . ..... ..... 110 ..... 1010111 @r_vm
+vnmsub_vv 101011 . ..... ..... 010 ..... 1010111 @r_vm
+vnmsub_vx 101011 . ..... ..... 110 ..... 1010111 @r_vm
+vwmaccu_vv 111100 . ..... ..... 010 ..... 1010111 @r_vm
+vwmaccu_vx 111100 . ..... ..... 110 ..... 1010111 @r_vm
+vwmacc_vv 111101 . ..... ..... 010 ..... 1010111 @r_vm
+vwmacc_vx 111101 . ..... ..... 110 ..... 1010111 @r_vm
+vwmaccsu_vv 111110 . ..... ..... 010 ..... 1010111 @r_vm
+vwmaccsu_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm
+vwmaccus_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm
+vmv_v_v 010111 1 00000 ..... 000 ..... 1010111 @r2
+vmv_v_x 010111 1 00000 ..... 100 ..... 1010111 @r2
+vmv_v_i 010111 1 00000 ..... 011 ..... 1010111 @r2
+vmerge_vvm 010111 0 ..... ..... 000 ..... 1010111 @r_vm_0
+vmerge_vxm 010111 0 ..... ..... 100 ..... 1010111 @r_vm_0
+vmerge_vim 010111 0 ..... ..... 011 ..... 1010111 @r_vm_0
+vsaddu_vv 100000 . ..... ..... 000 ..... 1010111 @r_vm
+vsaddu_vx 100000 . ..... ..... 100 ..... 1010111 @r_vm
+vsaddu_vi 100000 . ..... ..... 011 ..... 1010111 @r_vm
+vsadd_vv 100001 . ..... ..... 000 ..... 1010111 @r_vm
+vsadd_vx 100001 . ..... ..... 100 ..... 1010111 @r_vm
+vsadd_vi 100001 . ..... ..... 011 ..... 1010111 @r_vm
+vssubu_vv 100010 . ..... ..... 000 ..... 1010111 @r_vm
+vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm
+vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm
+vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm
+vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm
+vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm
+vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm
+vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm
+vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm
+vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm
+vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm
+vwsmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm
+vwsmaccu_vx 111100 . ..... ..... 100 ..... 1010111 @r_vm
+vwsmacc_vv 111101 . ..... ..... 000 ..... 1010111 @r_vm
+vwsmacc_vx 111101 . ..... ..... 100 ..... 1010111 @r_vm
+vwsmaccsu_vv 111110 . ..... ..... 000 ..... 1010111 @r_vm
+vwsmaccsu_vx 111110 . ..... ..... 100 ..... 1010111 @r_vm
+vwsmaccus_vx 111111 . ..... ..... 100 ..... 1010111 @r_vm
+vssrl_vv 101010 . ..... ..... 000 ..... 1010111 @r_vm
+vssrl_vx 101010 . ..... ..... 100 ..... 1010111 @r_vm
+vssrl_vi 101010 . ..... ..... 011 ..... 1010111 @r_vm
+vssra_vv 101011 . ..... ..... 000 ..... 1010111 @r_vm
+vssra_vx 101011 . ..... ..... 100 ..... 1010111 @r_vm
+vssra_vi 101011 . ..... ..... 011 ..... 1010111 @r_vm
+vnclipu_vv 101110 . ..... ..... 000 ..... 1010111 @r_vm
+vnclipu_vx 101110 . ..... ..... 100 ..... 1010111 @r_vm
+vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm
+vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm
+vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm
+vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm
+vfadd_vv 000000 . ..... ..... 001 ..... 1010111 @r_vm
+vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm
+vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm
+vfsub_vf 000010 . ..... ..... 101 ..... 1010111 @r_vm
+vfrsub_vf 100111 . ..... ..... 101 ..... 1010111 @r_vm
+vfwadd_vv 110000 . ..... ..... 001 ..... 1010111 @r_vm
+vfwadd_vf 110000 . ..... ..... 101 ..... 1010111 @r_vm
+vfwadd_wv 110100 . ..... ..... 001 ..... 1010111 @r_vm
+vfwadd_wf 110100 . ..... ..... 101 ..... 1010111 @r_vm
+vfwsub_vv 110010 . ..... ..... 001 ..... 1010111 @r_vm
+vfwsub_vf 110010 . ..... ..... 101 ..... 1010111 @r_vm
+vfwsub_wv 110110 . ..... ..... 001 ..... 1010111 @r_vm
+vfwsub_wf 110110 . ..... ..... 101 ..... 1010111 @r_vm
+vfmul_vv 100100 . ..... ..... 001 ..... 1010111 @r_vm
+vfmul_vf 100100 . ..... ..... 101 ..... 1010111 @r_vm
+vfdiv_vv 100000 . ..... ..... 001 ..... 1010111 @r_vm
+vfdiv_vf 100000 . ..... ..... 101 ..... 1010111 @r_vm
+vfrdiv_vf 100001 . ..... ..... 101 ..... 1010111 @r_vm
+vfwmul_vv 111000 . ..... ..... 001 ..... 1010111 @r_vm
+vfwmul_vf 111000 . ..... ..... 101 ..... 1010111 @r_vm
+vfmacc_vv 101100 . ..... ..... 001 ..... 1010111 @r_vm
+vfnmacc_vv 101101 . ..... ..... 001 ..... 1010111 @r_vm
+vfnmacc_vf 101101 . ..... ..... 101 ..... 1010111 @r_vm
+vfmacc_vf 101100 . ..... ..... 101 ..... 1010111 @r_vm
+vfmsac_vv 101110 . ..... ..... 001 ..... 1010111 @r_vm
+vfmsac_vf 101110 . ..... ..... 101 ..... 1010111 @r_vm
+vfnmsac_vv 101111 . ..... ..... 001 ..... 1010111 @r_vm
+vfnmsac_vf 101111 . ..... ..... 101 ..... 1010111 @r_vm
+vfmadd_vv 101000 . ..... ..... 001 ..... 1010111 @r_vm
+vfmadd_vf 101000 . ..... ..... 101 ..... 1010111 @r_vm
+vfnmadd_vv 101001 . ..... ..... 001 ..... 1010111 @r_vm
+vfnmadd_vf 101001 . ..... ..... 101 ..... 1010111 @r_vm
+vfmsub_vv 101010 . ..... ..... 001 ..... 1010111 @r_vm
+vfmsub_vf 101010 . ..... ..... 101 ..... 1010111 @r_vm
+vfnmsub_vv 101011 . ..... ..... 001 ..... 1010111 @r_vm
+vfnmsub_vf 101011 . ..... ..... 101 ..... 1010111 @r_vm
+vfwmacc_vv 111100 . ..... ..... 001 ..... 1010111 @r_vm
+vfwmacc_vf 111100 . ..... ..... 101 ..... 1010111 @r_vm
+vfwnmacc_vv 111101 . ..... ..... 001 ..... 1010111 @r_vm
+vfwnmacc_vf 111101 . ..... ..... 101 ..... 1010111 @r_vm
+vfwmsac_vv 111110 . ..... ..... 001 ..... 1010111 @r_vm
+vfwmsac_vf 111110 . ..... ..... 101 ..... 1010111 @r_vm
+vfwnmsac_vv 111111 . ..... ..... 001 ..... 1010111 @r_vm
+vfwnmsac_vf 111111 . ..... ..... 101 ..... 1010111 @r_vm
+vfsqrt_v 100011 . ..... 00000 001 ..... 1010111 @r2_vm
+vfmin_vv 000100 . ..... ..... 001 ..... 1010111 @r_vm
+vfmin_vf 000100 . ..... ..... 101 ..... 1010111 @r_vm
+vfmax_vv 000110 . ..... ..... 001 ..... 1010111 @r_vm
+vfmax_vf 000110 . ..... ..... 101 ..... 1010111 @r_vm
+vfsgnj_vv 001000 . ..... ..... 001 ..... 1010111 @r_vm
+vfsgnj_vf 001000 . ..... ..... 101 ..... 1010111 @r_vm
+vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm
+vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm
+vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm
+vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm
+vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm
+vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm
+vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm
+vmfne_vf 011100 . ..... ..... 101 ..... 1010111 @r_vm
+vmflt_vv 011011 . ..... ..... 001 ..... 1010111 @r_vm
+vmflt_vf 011011 . ..... ..... 101 ..... 1010111 @r_vm
+vmfle_vv 011001 . ..... ..... 001 ..... 1010111 @r_vm
+vmfle_vf 011001 . ..... ..... 101 ..... 1010111 @r_vm
+vmfgt_vf 011101 . ..... ..... 101 ..... 1010111 @r_vm
+vmfge_vf 011111 . ..... ..... 101 ..... 1010111 @r_vm
+vmford_vv 011010 . ..... ..... 001 ..... 1010111 @r_vm
+vmford_vf 011010 . ..... ..... 101 ..... 1010111 @r_vm
+vfclass_v 100011 . ..... 10000 001 ..... 1010111 @r2_vm
+vfmerge_vfm 010111 0 ..... ..... 101 ..... 1010111 @r_vm_0
+vfmv_v_f 010111 1 00000 ..... 101 ..... 1010111 @r2
+vfcvt_xu_f_v 100010 . ..... 00000 001 ..... 1010111 @r2_vm
+vfcvt_x_f_v 100010 . ..... 00001 001 ..... 1010111 @r2_vm
+vfcvt_f_xu_v 100010 . ..... 00010 001 ..... 1010111 @r2_vm
+vfcvt_f_x_v 100010 . ..... 00011 001 ..... 1010111 @r2_vm
+vfwcvt_xu_f_v 100010 . ..... 01000 001 ..... 1010111 @r2_vm
+vfwcvt_x_f_v 100010 . ..... 01001 001 ..... 1010111 @r2_vm
+vfwcvt_f_xu_v 100010 . ..... 01010 001 ..... 1010111 @r2_vm
+vfwcvt_f_x_v 100010 . ..... 01011 001 ..... 1010111 @r2_vm
+vfwcvt_f_f_v 100010 . ..... 01100 001 ..... 1010111 @r2_vm
+vfncvt_xu_f_v 100010 . ..... 10000 001 ..... 1010111 @r2_vm
+vfncvt_x_f_v 100010 . ..... 10001 001 ..... 1010111 @r2_vm
+vfncvt_f_xu_v 100010 . ..... 10010 001 ..... 1010111 @r2_vm
+vfncvt_f_x_v 100010 . ..... 10011 001 ..... 1010111 @r2_vm
+vfncvt_f_f_v 100010 . ..... 10100 001 ..... 1010111 @r2_vm
+vredsum_vs 000000 . ..... ..... 010 ..... 1010111 @r_vm
+vredand_vs 000001 . ..... ..... 010 ..... 1010111 @r_vm
+vredor_vs 000010 . ..... ..... 010 ..... 1010111 @r_vm
+vredxor_vs 000011 . ..... ..... 010 ..... 1010111 @r_vm
+vredminu_vs 000100 . ..... ..... 010 ..... 1010111 @r_vm
+vredmin_vs 000101 . ..... ..... 010 ..... 1010111 @r_vm
+vredmaxu_vs 000110 . ..... ..... 010 ..... 1010111 @r_vm
+vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm
+vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm
+vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm
+# Vector ordered and unordered reduction sum
+vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm
+vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm
+vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
+# Vector widening ordered and unordered float reduction sum
+vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm
+vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
+vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
+vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r
+vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
+vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
+vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
+vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r
+vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
+vmpopc_m 010100 . ..... ----- 010 ..... 1010111 @r2_vm
+vmfirst_m 010101 . ..... ----- 010 ..... 1010111 @r2_vm
+vmsbf_m 010110 . ..... 00001 010 ..... 1010111 @r2_vm
+vmsif_m 010110 . ..... 00011 010 ..... 1010111 @r2_vm
+vmsof_m 010110 . ..... 00010 010 ..... 1010111 @r2_vm
+viota_m 010110 . ..... 10000 010 ..... 1010111 @r2_vm
+vid_v 010110 . 00000 10001 010 ..... 1010111 @r1_vm
+vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
+vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2
+vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd
+vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2
+vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm
+vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm
+vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
+vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm
+vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm
+vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm
+vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
+vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
+vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
+vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
+
+vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
+vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
+
+#*** Vector AMO operations (in addition to Zvamo) ***
+vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm
+vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm
+
+# *** RV32 Zba Standard Extension ***
+sh1add 0010000 .......... 010 ..... 0110011 @r
+sh2add 0010000 .......... 100 ..... 0110011 @r
+sh3add 0010000 .......... 110 ..... 0110011 @r
+
+# *** RV64 Zba Standard Extension (in addition to RV32 Zba) ***
+add_uw 0000100 .......... 000 ..... 0111011 @r
+sh1add_uw 0010000 .......... 010 ..... 0111011 @r
+sh2add_uw 0010000 .......... 100 ..... 0111011 @r
+sh3add_uw 0010000 .......... 110 ..... 0111011 @r
+slli_uw 00001 ............ 001 ..... 0011011 @sh
+
+# *** RV32 Zbb Standard Extension ***
+andn 0100000 .......... 111 ..... 0110011 @r
+clz 011000 000000 ..... 001 ..... 0010011 @r2
+cpop 011000 000010 ..... 001 ..... 0010011 @r2
+ctz 011000 000001 ..... 001 ..... 0010011 @r2
+max 0000101 .......... 110 ..... 0110011 @r
+maxu 0000101 .......... 111 ..... 0110011 @r
+min 0000101 .......... 100 ..... 0110011 @r
+minu 0000101 .......... 101 ..... 0110011 @r
+orc_b 001010 000111 ..... 101 ..... 0010011 @r2
+orn 0100000 .......... 110 ..... 0110011 @r
+# The encoding for rev8 differs between RV32 and RV64.
+# rev8_32 denotes the RV32 variant.
+rev8_32 011010 011000 ..... 101 ..... 0010011 @r2
+rol 0110000 .......... 001 ..... 0110011 @r
+ror 0110000 .......... 101 ..... 0110011 @r
+rori 01100 ............ 101 ..... 0010011 @sh
+sext_b 011000 000100 ..... 001 ..... 0010011 @r2
+sext_h 011000 000101 ..... 001 ..... 0010011 @r2
+xnor 0100000 .......... 100 ..... 0110011 @r
+# The encoding for zext.h differs between RV32 and RV64.
+# zext_h_32 denotes the RV32 variant.
+zext_h_32 0000100 00000 ..... 100 ..... 0110011 @r2
+
+# *** RV64 Zbb Standard Extension (in addition to RV32 Zbb) ***
+clzw 0110000 00000 ..... 001 ..... 0011011 @r2
+ctzw 0110000 00001 ..... 001 ..... 0011011 @r2
+cpopw 0110000 00010 ..... 001 ..... 0011011 @r2
+# The encoding for rev8 differs between RV32 and RV64.
+# When executing on RV64, the encoding used in RV32 is an illegal
+# instruction, so we use different handler functions to differentiate.
+rev8_64 011010 111000 ..... 101 ..... 0010011 @r2
+rolw 0110000 .......... 001 ..... 0111011 @r
+roriw 0110000 .......... 101 ..... 0011011 @sh5
+rorw 0110000 .......... 101 ..... 0111011 @r
+# The encoding for zext.h differs between RV32 and RV64.
+# When executing on RV64, the encoding used in RV32 is an illegal
+# instruction, so we use different handler functions to differentiate.
+zext_h_64 0000100 00000 ..... 100 ..... 0111011 @r2
+
+# *** RV32 Zbc Standard Extension ***
+clmul 0000101 .......... 001 ..... 0110011 @r
+clmulh 0000101 .......... 011 ..... 0110011 @r
+clmulr 0000101 .......... 010 ..... 0110011 @r
+
+# *** RV32 Zbs Standard Extension ***
+bclr 0100100 .......... 001 ..... 0110011 @r
+bclri 01001. ........... 001 ..... 0010011 @sh
+bext 0100100 .......... 101 ..... 0110011 @r
+bexti 01001. ........... 101 ..... 0010011 @sh
+binv 0110100 .......... 001 ..... 0110011 @r
+binvi 01101. ........... 001 ..... 0010011 @sh
+bset 0010100 .......... 001 ..... 0110011 @r
+bseti 00101. ........... 001 ..... 0010011 @sh
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
new file mode 100644
index 000000000..75c6ef80a
--- /dev/null
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
@@ -0,0 +1,128 @@
+/*
+ * RISC-V translation routines for the RISC-V privileged instructions.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+ * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
+{
+ /* always generates U-level ECALL, fixed in do_interrupt handler */
+ generate_exception(ctx, RISCV_EXCP_U_ECALL);
+ return true;
+}
+
+static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
+{
+ target_ulong ebreak_addr = ctx->base.pc_next;
+ target_ulong pre_addr = ebreak_addr - 4;
+ target_ulong post_addr = ebreak_addr + 4;
+ uint32_t pre = 0;
+ uint32_t ebreak = 0;
+ uint32_t post = 0;
+
+ /*
+ * The RISC-V semihosting spec specifies the following
+ * three-instruction sequence to flag a semihosting call:
+ *
+ * slli zero, zero, 0x1f 0x01f01013
+ * ebreak 0x00100073
+ * srai zero, zero, 0x7 0x40705013
+ *
+ * The two shift operations on the zero register are no-ops, used
+ * here to signify a semihosting exception, rather than a breakpoint.
+ *
+ * Uncompressed instructions are required so that the sequence is easy
+ * to validate.
+ *
+ * The three instructions are required to lie in the same page so
+ * that no exception will be raised when fetching them.
+ */
+
+ if ((pre_addr & TARGET_PAGE_MASK) == (post_addr & TARGET_PAGE_MASK)) {
+ pre = opcode_at(&ctx->base, pre_addr);
+ ebreak = opcode_at(&ctx->base, ebreak_addr);
+ post = opcode_at(&ctx->base, post_addr);
+ }
+
+ if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
+ generate_exception(ctx, RISCV_EXCP_SEMIHOST);
+ } else {
+ generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
+ }
+ return true;
+}
+
+static bool trans_uret(DisasContext *ctx, arg_uret *a)
+{
+ return false;
+}
+
+static bool trans_sret(DisasContext *ctx, arg_sret *a)
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
+
+ if (has_ext(ctx, RVS)) {
+ gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
+ ctx->base.is_jmp = DISAS_NORETURN;
+ } else {
+ return false;
+ }
+ return true;
+#else
+ return false;
+#endif
+}
+
+static bool trans_mret(DisasContext *ctx, arg_mret *a)
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
+ gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+#else
+ return false;
+#endif
+}
+
+static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+ gen_helper_wfi(cpu_env);
+ return true;
+#else
+ return false;
+#endif
+}
+
+static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_tlb_flush(cpu_env);
+ return true;
+#endif
+ return false;
+}
+
+static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
+{
+ return false;
+}
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
new file mode 100644
index 000000000..40fe132b0
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -0,0 +1,226 @@
+/*
+ * RISC-V translation routines for the RV64A Standard Extension.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+ * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
+{
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
+
+ if (a->rl) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+ src1 = gen_pm_adjust_address(ctx, src1);
+ tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
+ if (a->aq) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+
+ /* Put addr in load_res, data in load_val. */
+ tcg_gen_mov_tl(load_res, src1);
+ gen_set_gpr(ctx, a->rd, load_val);
+
+ return true;
+}
+
+static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
+{
+ TCGv dest, src1, src2;
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
+ src1 = gen_pm_adjust_address(ctx, src1);
+ tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
+
+ /*
+ * Note that the TCG atomic primitives are SC,
+ * so we can ignore AQ/RL along this path.
+ */
+ dest = dest_gpr(ctx, a->rd);
+ src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ tcg_gen_atomic_cmpxchg_tl(dest, load_res, load_val, src2,
+ ctx->mem_idx, mop);
+ tcg_gen_setcond_tl(TCG_COND_NE, dest, dest, load_val);
+ gen_set_gpr(ctx, a->rd, dest);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+ /*
+ * Address comparison failure. However, we still need to
+ * provide the memory barrier implied by AQ/RL.
+ */
+ tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL);
+ gen_set_gpr(ctx, a->rd, tcg_constant_tl(1));
+
+ gen_set_label(l2);
+ /*
+ * Clear the load reservation, since an SC must fail if there is
+ * an SC to any address, in between an LR and SC pair.
+ */
+ tcg_gen_movi_tl(load_res, -1);
+
+ return true;
+}
+
+static bool gen_amo(DisasContext *ctx, arg_atomic *a,
+ void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
+ MemOp mop)
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ src1 = gen_pm_adjust_address(ctx, src1);
+ func(dest, src1, src2, ctx->mem_idx, mop);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_lr(ctx, a, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_sc(ctx, a, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
+{
+ REQUIRE_EXT(ctx, RVA);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL));
+}
+
+static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_lr(ctx, a, MO_ALIGN | MO_TEQ);
+}
+
+static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ));
+}
+
+static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ));
+}
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
new file mode 100644
index 000000000..c8d31907c
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
@@ -0,0 +1,506 @@
+/*
+ * RISC-V translation routines for the Zb[abcs] Standard Extension.
+ *
+ * Copyright (c) 2020 Kito Cheng, kito.cheng@sifive.com
+ * Copyright (c) 2020 Frank Chang, frank.chang@sifive.com
+ * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZBA(ctx) do { \
+ if (!RISCV_CPU(ctx->cs)->cfg.ext_zba) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZBB(ctx) do { \
+ if (!RISCV_CPU(ctx->cs)->cfg.ext_zbb) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZBC(ctx) do { \
+ if (!RISCV_CPU(ctx->cs)->cfg.ext_zbc) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZBS(ctx) do { \
+ if (!RISCV_CPU(ctx->cs)->cfg.ext_zbs) { \
+ return false; \
+ } \
+} while (0)
+
+static void gen_clz(TCGv ret, TCGv arg1)
+{
+ tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS);
+}
+
+static void gen_clzw(TCGv ret, TCGv arg1)
+{
+ TCGv t = tcg_temp_new();
+ tcg_gen_shli_tl(t, arg1, 32);
+ tcg_gen_clzi_tl(ret, t, 32);
+ tcg_temp_free(t);
+}
+
+static bool trans_clz(DisasContext *ctx, arg_clz *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_unary_per_ol(ctx, a, EXT_NONE, gen_clz, gen_clzw);
+}
+
+static void gen_ctz(TCGv ret, TCGv arg1)
+{
+ tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS);
+}
+
+static void gen_ctzw(TCGv ret, TCGv arg1)
+{
+ tcg_gen_ctzi_tl(ret, arg1, 32);
+}
+
+static bool trans_ctz(DisasContext *ctx, arg_ctz *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_unary_per_ol(ctx, a, EXT_ZERO, gen_ctz, gen_ctzw);
+}
+
+static bool trans_cpop(DisasContext *ctx, arg_cpop *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl);
+}
+
+static bool trans_andn(DisasContext *ctx, arg_andn *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_andc_tl);
+}
+
+static bool trans_orn(DisasContext *ctx, arg_orn *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_orc_tl);
+}
+
+static bool trans_xnor(DisasContext *ctx, arg_xnor *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_eqv_tl);
+}
+
+static bool trans_min(DisasContext *ctx, arg_min *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl);
+}
+
+static bool trans_max(DisasContext *ctx, arg_max *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl);
+}
+
+static bool trans_minu(DisasContext *ctx, arg_minu *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl);
+}
+
+static bool trans_maxu(DisasContext *ctx, arg_maxu *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl);
+}
+
+static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext8s_tl);
+}
+
+static bool trans_sext_h(DisasContext *ctx, arg_sext_h *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16s_tl);
+}
+
+static void gen_sbop_mask(TCGv ret, TCGv shamt)
+{
+ tcg_gen_movi_tl(ret, 1);
+ tcg_gen_shl_tl(ret, ret, shamt);
+}
+
+static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
+{
+ TCGv t = tcg_temp_new();
+
+ gen_sbop_mask(t, shamt);
+ tcg_gen_or_tl(ret, arg1, t);
+
+ tcg_temp_free(t);
+}
+
+static bool trans_bset(DisasContext *ctx, arg_bset *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift(ctx, a, EXT_NONE, gen_bset);
+}
+
+static bool trans_bseti(DisasContext *ctx, arg_bseti *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bset);
+}
+
+static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
+{
+ TCGv t = tcg_temp_new();
+
+ gen_sbop_mask(t, shamt);
+ tcg_gen_andc_tl(ret, arg1, t);
+
+ tcg_temp_free(t);
+}
+
+static bool trans_bclr(DisasContext *ctx, arg_bclr *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift(ctx, a, EXT_NONE, gen_bclr);
+}
+
+static bool trans_bclri(DisasContext *ctx, arg_bclri *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bclr);
+}
+
+static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
+{
+ TCGv t = tcg_temp_new();
+
+ gen_sbop_mask(t, shamt);
+ tcg_gen_xor_tl(ret, arg1, t);
+
+ tcg_temp_free(t);
+}
+
+static bool trans_binv(DisasContext *ctx, arg_binv *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift(ctx, a, EXT_NONE, gen_binv);
+}
+
+static bool trans_binvi(DisasContext *ctx, arg_binvi *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_binv);
+}
+
+static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
+{
+ tcg_gen_shr_tl(ret, arg1, shamt);
+ tcg_gen_andi_tl(ret, ret, 1);
+}
+
+static bool trans_bext(DisasContext *ctx, arg_bext *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift(ctx, a, EXT_NONE, gen_bext);
+}
+
+static bool trans_bexti(DisasContext *ctx, arg_bexti *a)
+{
+ REQUIRE_ZBS(ctx);
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bext);
+}
+
+static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ /* truncate to 32-bits */
+ tcg_gen_trunc_tl_i32(t1, arg1);
+ tcg_gen_trunc_tl_i32(t2, arg2);
+
+ tcg_gen_rotr_i32(t1, t1, t2);
+
+ /* sign-extend 64-bits */
+ tcg_gen_ext_i32_tl(ret, t1);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+static bool trans_ror(DisasContext *ctx, arg_ror *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw);
+}
+
+static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt)
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t1, arg1);
+ tcg_gen_rotri_i32(t1, t1, shamt);
+ tcg_gen_ext_i32_tl(ret, t1);
+
+ tcg_temp_free_i32(t1);
+}
+
+static bool trans_rori(DisasContext *ctx, arg_rori *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
+ tcg_gen_rotri_tl, gen_roriw);
+}
+
+static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+
+ /* truncate to 32-bits */
+ tcg_gen_trunc_tl_i32(t1, arg1);
+ tcg_gen_trunc_tl_i32(t2, arg2);
+
+ tcg_gen_rotl_i32(t1, t1, t2);
+
+ /* sign-extend 64-bits */
+ tcg_gen_ext_i32_tl(ret, t1);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+static bool trans_rol(DisasContext *ctx, arg_rol *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw);
+}
+
+static void gen_rev8_32(TCGv ret, TCGv src1)
+{
+ tcg_gen_bswap32_tl(ret, src1, TCG_BSWAP_OS);
+}
+
+static bool trans_rev8_32(DisasContext *ctx, arg_rev8_32 *a)
+{
+ REQUIRE_32BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, gen_rev8_32);
+}
+
+static bool trans_rev8_64(DisasContext *ctx, arg_rev8_64 *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_bswap_tl);
+}
+
+static void gen_orc_b(TCGv ret, TCGv source1)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv low7 = tcg_constant_tl(dup_const_tl(MO_8, 0x7f));
+
+ /* Set msb in each byte if the byte was non-zero. */
+ tcg_gen_and_tl(tmp, source1, low7);
+ tcg_gen_add_tl(tmp, tmp, low7);
+ tcg_gen_or_tl(tmp, tmp, source1);
+
+ /* Extract the msb to the lsb in each byte */
+ tcg_gen_andc_tl(tmp, tmp, low7);
+ tcg_gen_shri_tl(tmp, tmp, 7);
+
+ /* Replicate the lsb of each byte across the byte. */
+ tcg_gen_muli_tl(ret, tmp, 0xff);
+
+ tcg_temp_free(tmp);
+}
+
+static bool trans_orc_b(DisasContext *ctx, arg_orc_b *a)
+{
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_ZERO, gen_orc_b);
+}
+
+#define GEN_SHADD(SHAMT) \
+static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \
+{ \
+ TCGv t = tcg_temp_new(); \
+ \
+ tcg_gen_shli_tl(t, arg1, SHAMT); \
+ tcg_gen_add_tl(ret, t, arg2); \
+ \
+ tcg_temp_free(t); \
+}
+
+GEN_SHADD(1)
+GEN_SHADD(2)
+GEN_SHADD(3)
+
+#define GEN_TRANS_SHADD(SHAMT) \
+static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \
+{ \
+ REQUIRE_ZBA(ctx); \
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add); \
+}
+
+GEN_TRANS_SHADD(1)
+GEN_TRANS_SHADD(2)
+GEN_TRANS_SHADD(3)
+
+static bool trans_zext_h_32(DisasContext *ctx, arg_zext_h_32 *a)
+{
+ REQUIRE_32BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl);
+}
+
+static bool trans_zext_h_64(DisasContext *ctx, arg_zext_h_64 *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl);
+}
+
+static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_NONE, gen_clzw);
+}
+
+static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ return gen_unary(ctx, a, EXT_ZERO, gen_ctzw);
+}
+
+static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl);
+}
+
+static bool trans_rorw(DisasContext *ctx, arg_rorw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift(ctx, a, EXT_NONE, gen_rorw);
+}
+
+static bool trans_roriw(DisasContext *ctx, arg_roriw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw);
+}
+
+static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBB(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift(ctx, a, EXT_NONE, gen_rolw);
+}
+
+#define GEN_SHADD_UW(SHAMT) \
+static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \
+{ \
+ TCGv t = tcg_temp_new(); \
+ \
+ tcg_gen_ext32u_tl(t, arg1); \
+ \
+ tcg_gen_shli_tl(t, t, SHAMT); \
+ tcg_gen_add_tl(ret, t, arg2); \
+ \
+ tcg_temp_free(t); \
+}
+
+GEN_SHADD_UW(1)
+GEN_SHADD_UW(2)
+GEN_SHADD_UW(3)
+
+#define GEN_TRANS_SHADD_UW(SHAMT) \
+static bool trans_sh##SHAMT##add_uw(DisasContext *ctx, \
+ arg_sh##SHAMT##add_uw *a) \
+{ \
+ REQUIRE_64BIT(ctx); \
+ REQUIRE_ZBA(ctx); \
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw); \
+}
+
+GEN_TRANS_SHADD_UW(1)
+GEN_TRANS_SHADD_UW(2)
+GEN_TRANS_SHADD_UW(3)
+
+static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t = tcg_temp_new();
+ tcg_gen_ext32u_tl(t, arg1);
+ tcg_gen_add_tl(ret, t, arg2);
+ tcg_temp_free(t);
+}
+
+static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBA(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_add_uw);
+}
+
+static void gen_slli_uw(TCGv dest, TCGv src, target_long shamt)
+{
+ tcg_gen_deposit_z_tl(dest, src, shamt, MIN(32, TARGET_LONG_BITS - shamt));
+}
+
+static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_ZBA(ctx);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw);
+}
+
+static bool trans_clmul(DisasContext *ctx, arg_clmul *a)
+{
+ REQUIRE_ZBC(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul);
+}
+
+static void gen_clmulh(TCGv dst, TCGv src1, TCGv src2)
+{
+ gen_helper_clmulr(dst, src1, src2);
+ tcg_gen_shri_tl(dst, dst, 1);
+}
+
+static bool trans_clmulh(DisasContext *ctx, arg_clmulr *a)
+{
+ REQUIRE_ZBC(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_clmulh);
+}
+
+static bool trans_clmulr(DisasContext *ctx, arg_clmulh *a)
+{
+ REQUIRE_ZBC(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr);
+}
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
new file mode 100644
index 000000000..64fb0046f
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
@@ -0,0 +1,449 @@
+/*
+ * RISC-V translation routines for the RV64D Standard Extension.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+ * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+static bool trans_fld(DisasContext *ctx, arg_fld *a)
+{
+ TCGv addr;
+
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ if (a->imm) {
+ TCGv temp = temp_new(ctx);
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+ addr = gen_pm_adjust_address(ctx, addr);
+
+ tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEQ);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
+{
+ TCGv addr;
+
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ if (a->imm) {
+ TCGv temp = temp_new(ctx);
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+ addr = gen_pm_adjust_address(ctx, addr);
+
+ tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEQ);
+
+ return true;
+}
+
+static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
+{
+ if (a->rs1 == a->rs2) { /* FMOV */
+ tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
+ } else {
+ tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2],
+ cpu_fpr[a->rs1], 0, 63);
+ }
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ if (a->rs1 == a->rs2) { /* FNEG */
+ tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_not_i64(t0, cpu_fpr[a->rs2]);
+ tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63);
+ tcg_temp_free_i64(t0);
+ }
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+ if (a->rs1 == a->rs2) { /* FABS */
+ tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN);
+ } else {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN);
+ tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0);
+ tcg_temp_free_i64(t0);
+ }
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_feq_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_flt_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_fle_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_fclass_d(dest, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_w_d(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_wu_d(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_l_d(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_lu_d(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+#ifdef TARGET_RISCV64
+ gen_set_gpr(ctx, a->rd, cpu_fpr[a->rs1]);
+ return true;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVD);
+
+#ifdef TARGET_RISCV64
+ tcg_gen_mov_tl(cpu_fpr[a->rd], get_gpr(ctx, a->rs1, EXT_NONE));
+ mark_fs_dirty(ctx);
+ return true;
+#else
+ qemu_build_not_reached();
+#endif
+}
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
new file mode 100644
index 000000000..b5459249c
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
@@ -0,0 +1,473 @@
+/*
+ * RISC-V translation routines for the RV64F Standard Extension.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+ * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_FPU do {\
+ if (ctx->mstatus_fs == 0) \
+ return false; \
+} while (0)
+
+static bool trans_flw(DisasContext *ctx, arg_flw *a)
+{
+ TCGv_i64 dest;
+ TCGv addr;
+
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ if (a->imm) {
+ TCGv temp = temp_new(ctx);
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+ addr = gen_pm_adjust_address(ctx, addr);
+
+ dest = cpu_fpr[a->rd];
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
+ gen_nanbox_s(dest, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
+{
+ TCGv addr;
+
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ if (a->imm) {
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+ addr = gen_pm_adjust_address(ctx, addr);
+
+ tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
+
+ return true;
+}
+
+static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fnmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fnmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fadd_s(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fsub_s(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fmul_s(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fdiv_s(cpu_fpr[a->rd], cpu_env,
+ cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fsqrt_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ if (a->rs1 == a->rs2) { /* FMOV */
+ gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
+ } else { /* FSGNJ */
+ TCGv_i64 rs1 = tcg_temp_new_i64();
+ TCGv_i64 rs2 = tcg_temp_new_i64();
+
+ gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
+ gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
+
+ /* This formulation retains the nanboxing of rs2. */
+ tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31);
+ tcg_temp_free_i64(rs1);
+ tcg_temp_free_i64(rs2);
+ }
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
+{
+ TCGv_i64 rs1, rs2, mask;
+
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ rs1 = tcg_temp_new_i64();
+ gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
+
+ if (a->rs1 == a->rs2) { /* FNEG */
+ tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1));
+ } else {
+ rs2 = tcg_temp_new_i64();
+ gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
+
+ /*
+ * Replace bit 31 in rs1 with inverse in rs2.
+ * This formulation retains the nanboxing of rs1.
+ */
+ mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
+ tcg_gen_nor_i64(rs2, rs2, mask);
+ tcg_gen_and_i64(rs1, mask, rs1);
+ tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
+
+ tcg_temp_free_i64(rs2);
+ }
+ tcg_temp_free_i64(rs1);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
+{
+ TCGv_i64 rs1, rs2;
+
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ rs1 = tcg_temp_new_i64();
+ gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
+
+ if (a->rs1 == a->rs2) { /* FABS */
+ tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1));
+ } else {
+ rs2 = tcg_temp_new_i64();
+ gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
+
+ /*
+ * Xor bit 31 in rs1 with that in rs2.
+ * This formulation retains the nanboxing of rs1.
+ */
+ tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1));
+ tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
+
+ tcg_temp_free_i64(rs2);
+ }
+ tcg_temp_free_i64(rs1);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_helper_fmin_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ gen_helper_fmax_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
+ cpu_fpr[a->rs2]);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_w_s(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_wu_s(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
+{
+ /* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+#if defined(TARGET_RISCV64)
+ tcg_gen_ext32s_tl(dest, cpu_fpr[a->rs1]);
+#else
+ tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]);
+#endif
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_feq_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_flt_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_fle_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_helper_fclass_s(dest, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
+{
+ /* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
+
+ tcg_gen_extu_tl_i64(cpu_fpr[a->rd], src);
+ gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_l_s(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_lu_s(dest, cpu_env, cpu_fpr[a->rs1]);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_FPU;
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, src);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
new file mode 100644
index 000000000..ecbf77ff9
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
@@ -0,0 +1,186 @@
+/*
+ * RISC-V translation routines for the RVXI Base Integer Instruction Set.
+ *
+ * Copyright (c) 2020 Western Digital
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CONFIG_USER_ONLY
+static bool check_access(DisasContext *ctx)
+{
+ if (!ctx->hlsx) {
+ if (ctx->virt_enabled) {
+ generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
+ } else {
+ generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
+ }
+ return false;
+ }
+ return true;
+}
+#endif
+
+static bool do_hlv(DisasContext *ctx, arg_r2 *a, MemOp mop)
+{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
+ if (check_access(ctx)) {
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
+ tcg_gen_qemu_ld_tl(dest, addr, mem_idx, mop);
+ gen_set_gpr(ctx, a->rd, dest);
+ }
+ return true;
+#endif
+}
+
+static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_SB);
+}
+
+static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_TESW);
+}
+
+static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_TESL);
+}
+
+static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_UB);
+}
+
+static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_TEUW);
+}
+
+static bool do_hsv(DisasContext *ctx, arg_r2_s *a, MemOp mop)
+{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
+ if (check_access(ctx)) {
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
+ int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
+ tcg_gen_qemu_st_tl(data, addr, mem_idx, mop);
+ }
+ return true;
+#endif
+}
+
+static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hsv(ctx, a, MO_SB);
+}
+
+static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hsv(ctx, a, MO_TESW);
+}
+
+static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+ return do_hsv(ctx, a, MO_TESL);
+}
+
+static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_TEUL);
+}
+
+static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVH);
+ return do_hlv(ctx, a, MO_TEQ);
+}
+
+static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVH);
+ return do_hsv(ctx, a, MO_TEQ);
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool do_hlvx(DisasContext *ctx, arg_r2 *a,
+ void (*func)(TCGv, TCGv_env, TCGv))
+{
+ if (check_access(ctx)) {
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ func(dest, cpu_env, addr);
+ gen_set_gpr(ctx, a->rd, dest);
+ }
+ return true;
+}
+#endif
+
+static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+#ifndef CONFIG_USER_ONLY
+ return do_hlvx(ctx, a, gen_helper_hyp_hlvx_hu);
+#else
+ return false;
+#endif
+}
+
+static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+#ifndef CONFIG_USER_ONLY
+ return do_hlvx(ctx, a, gen_helper_hyp_hlvx_wu);
+#else
+ return false;
+#endif
+}
+
+static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+#ifndef CONFIG_USER_ONLY
+ gen_helper_hyp_gvma_tlb_flush(cpu_env);
+ return true;
+#endif
+ return false;
+}
+
+static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a)
+{
+ REQUIRE_EXT(ctx, RVH);
+#ifndef CONFIG_USER_ONLY
+ gen_helper_hyp_tlb_flush(cpu_env);
+ return true;
+#endif
+ return false;
+}
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
new file mode 100644
index 000000000..e51dbc41c
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -0,0 +1,577 @@
+/*
+ * RISC-V translation routines for the RVXI Base Integer Instruction Set.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+ * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+static bool trans_illegal(DisasContext *ctx, arg_empty *a)
+{
+ gen_exception_illegal(ctx);
+ return true;
+}
+
+static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
+{
+ REQUIRE_64BIT(ctx);
+ return trans_illegal(ctx, a);
+}
+
+static bool trans_lui(DisasContext *ctx, arg_lui *a)
+{
+ if (a->rd != 0) {
+ tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm);
+ }
+ return true;
+}
+
+static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
+{
+ if (a->rd != 0) {
+ tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm + ctx->base.pc_next);
+ }
+ return true;
+}
+
+static bool trans_jal(DisasContext *ctx, arg_jal *a)
+{
+ gen_jal(ctx, a->rd, a->imm);
+ return true;
+}
+
+static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
+{
+ TCGLabel *misaligned = NULL;
+
+ tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
+ tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
+
+ if (!has_ext(ctx, RVC)) {
+ TCGv t0 = tcg_temp_new();
+
+ misaligned = gen_new_label();
+ tcg_gen_andi_tl(t0, cpu_pc, 0x2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
+ tcg_temp_free(t0);
+ }
+
+ if (a->rd != 0) {
+ tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
+ }
+ tcg_gen_lookup_and_goto_ptr();
+
+ if (misaligned) {
+ gen_set_label(misaligned);
+ gen_exception_inst_addr_mis(ctx);
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ return true;
+}
+
+static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
+{
+ TCGLabel *l = gen_new_label();
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
+
+ tcg_gen_brcond_tl(cond, src1, src2, l);
+ gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
+
+ gen_set_label(l); /* branch taken */
+
+ if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
+ /* misaligned */
+ gen_exception_inst_addr_mis(ctx);
+ } else {
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ return true;
+}
+
+static bool trans_beq(DisasContext *ctx, arg_beq *a)
+{
+ return gen_branch(ctx, a, TCG_COND_EQ);
+}
+
+static bool trans_bne(DisasContext *ctx, arg_bne *a)
+{
+ return gen_branch(ctx, a, TCG_COND_NE);
+}
+
+static bool trans_blt(DisasContext *ctx, arg_blt *a)
+{
+ return gen_branch(ctx, a, TCG_COND_LT);
+}
+
+static bool trans_bge(DisasContext *ctx, arg_bge *a)
+{
+ return gen_branch(ctx, a, TCG_COND_GE);
+}
+
+static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
+{
+ return gen_branch(ctx, a, TCG_COND_LTU);
+}
+
+static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
+{
+ return gen_branch(ctx, a, TCG_COND_GEU);
+}
+
+static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
+
+ if (a->imm) {
+ TCGv temp = temp_new(ctx);
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+ addr = gen_pm_adjust_address(ctx, addr);
+
+ tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool trans_lb(DisasContext *ctx, arg_lb *a)
+{
+ return gen_load(ctx, a, MO_SB);
+}
+
+static bool trans_lh(DisasContext *ctx, arg_lh *a)
+{
+ return gen_load(ctx, a, MO_TESW);
+}
+
+static bool trans_lw(DisasContext *ctx, arg_lw *a)
+{
+ return gen_load(ctx, a, MO_TESL);
+}
+
+static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
+{
+ return gen_load(ctx, a, MO_UB);
+}
+
+static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
+{
+ return gen_load(ctx, a, MO_TEUW);
+}
+
+static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
+{
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ if (a->imm) {
+ TCGv temp = temp_new(ctx);
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+ addr = gen_pm_adjust_address(ctx, addr);
+
+ tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
+ return true;
+}
+
+static bool trans_sb(DisasContext *ctx, arg_sb *a)
+{
+ return gen_store(ctx, a, MO_SB);
+}
+
+static bool trans_sh(DisasContext *ctx, arg_sh *a)
+{
+ return gen_store(ctx, a, MO_TESW);
+}
+
+static bool trans_sw(DisasContext *ctx, arg_sw *a)
+{
+ return gen_store(ctx, a, MO_TESL);
+}
+
+static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load(ctx, a, MO_TEUL);
+}
+
+static bool trans_ld(DisasContext *ctx, arg_ld *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load(ctx, a, MO_TEQ);
+}
+
+static bool trans_sd(DisasContext *ctx, arg_sd *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_store(ctx, a, MO_TEQ);
+}
+
+static bool trans_addi(DisasContext *ctx, arg_addi *a)
+{
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
+}
+
+static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
+{
+ tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
+}
+
+static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
+{
+ tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
+}
+
+static bool trans_slti(DisasContext *ctx, arg_slti *a)
+{
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt);
+}
+
+static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
+{
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu);
+}
+
+static bool trans_xori(DisasContext *ctx, arg_xori *a)
+{
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_xori_tl);
+}
+
+static bool trans_ori(DisasContext *ctx, arg_ori *a)
+{
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_ori_tl);
+}
+
+static bool trans_andi(DisasContext *ctx, arg_andi *a)
+{
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_andi_tl);
+}
+
+static bool trans_slli(DisasContext *ctx, arg_slli *a)
+{
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
+}
+
+static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
+{
+ tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
+}
+
+static bool trans_srli(DisasContext *ctx, arg_srli *a)
+{
+ return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
+ tcg_gen_shri_tl, gen_srliw);
+}
+
+static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
+{
+ tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
+}
+
+static bool trans_srai(DisasContext *ctx, arg_srai *a)
+{
+ return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
+ tcg_gen_sari_tl, gen_sraiw);
+}
+
+static bool trans_add(DisasContext *ctx, arg_add *a)
+{
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
+}
+
+static bool trans_sub(DisasContext *ctx, arg_sub *a)
+{
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
+}
+
+static bool trans_sll(DisasContext *ctx, arg_sll *a)
+{
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
+}
+
+static bool trans_slt(DisasContext *ctx, arg_slt *a)
+{
+ return gen_arith(ctx, a, EXT_SIGN, gen_slt);
+}
+
+static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
+{
+ return gen_arith(ctx, a, EXT_SIGN, gen_sltu);
+}
+
+static bool trans_xor(DisasContext *ctx, arg_xor *a)
+{
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_xor_tl);
+}
+
+static bool trans_srl(DisasContext *ctx, arg_srl *a)
+{
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
+}
+
+static bool trans_sra(DisasContext *ctx, arg_sra *a)
+{
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
+}
+
+static bool trans_or(DisasContext *ctx, arg_or *a)
+{
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_or_tl);
+}
+
+static bool trans_and(DisasContext *ctx, arg_and *a)
+{
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_and_tl);
+}
+
+static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
+}
+
+static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
+}
+
+static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw);
+}
+
+static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw);
+}
+
+static bool trans_addw(DisasContext *ctx, arg_addw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
+}
+
+static bool trans_subw(DisasContext *ctx, arg_subw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
+}
+
+static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
+}
+
+static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
+}
+
+static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
+{
+ REQUIRE_64BIT(ctx);
+ ctx->ol = MXL_RV32;
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
+}
+
+static bool trans_fence(DisasContext *ctx, arg_fence *a)
+{
+ /* FENCE is a full memory barrier. */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ return true;
+}
+
+static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
+{
+ if (!ctx->ext_ifencei) {
+ return false;
+ }
+
+ /*
+ * FENCE_I is a no-op in QEMU,
+ * however we need to end the translation block
+ */
+ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+static bool do_csr_post(DisasContext *ctx)
+{
+ /* We may have changed important cpu state -- exit to main loop. */
+ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+static bool do_csrr(DisasContext *ctx, int rd, int rc)
+{
+ TCGv dest = dest_gpr(ctx, rd);
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrr(dest, cpu_env, csr);
+ gen_set_gpr(ctx, rd, dest);
+ return do_csr_post(ctx);
+}
+
+static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
+{
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrw(cpu_env, csr, src);
+ return do_csr_post(ctx);
+}
+
+static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
+{
+ TCGv dest = dest_gpr(ctx, rd);
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrrw(dest, cpu_env, csr, src, mask);
+ gen_set_gpr(ctx, rd, dest);
+ return do_csr_post(ctx);
+}
+
+static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
+{
+ TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw(ctx, a->csr, src);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
+}
+
+static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
+{
+ /*
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
+ * cause any of the side effects that might occur on a csr write.
+ * Note that if rs1 specifies a register other than x0, holding
+ * a zero value, the instruction will still attempt to write the
+ * unmodified value back to the csr and will cause side effects.
+ */
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+}
+
+static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
+{
+ /*
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
+ * cause any of the side effects that might occur on a csr write.
+ * Note that if rs1 specifies a register other than x0, holding
+ * a zero value, the instruction will still attempt to write the
+ * unmodified value back to the csr and will cause side effects.
+ */
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+}
+
+static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
+{
+ TCGv src = tcg_constant_tl(a->rs1);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw(ctx, a->csr, src);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
+}
+
+static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
+{
+ /*
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
+ * cause any of the side effects that might occur on a csr write.
+ * Note that if rs1 specifies a register other than x0, holding
+ * a zero value, the instruction will still attempt to write the
+ * unmodified value back to the csr and will cause side effects.
+ */
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+}
+
+static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
+{
+ /*
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
+ * cause any of the side effects that might occur on a csr write.
+ * Note that if rs1 specifies a register other than x0, holding
+ * a zero value, the instruction will still attempt to write the
+ * unmodified value back to the csr and will cause side effects.
+ */
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+}
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
new file mode 100644
index 000000000..2af0e5c13
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
@@ -0,0 +1,271 @@
+/*
+ * RISC-V translation routines for the RV64M Standard Extension.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
+ * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+static bool trans_mul(DisasContext *ctx, arg_mul *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
+}
+
+static void gen_mulh(TCGv ret, TCGv s1, TCGv s2)
+{
+ TCGv discard = tcg_temp_new();
+
+ tcg_gen_muls2_tl(discard, ret, s1, s2);
+ tcg_temp_free(discard);
+}
+
+static void gen_mulh_w(TCGv ret, TCGv s1, TCGv s2)
+{
+ tcg_gen_mul_tl(ret, s1, s2);
+ tcg_gen_sari_tl(ret, ret, 32);
+}
+
+static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w);
+}
+
+static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv rl = tcg_temp_new();
+ TCGv rh = tcg_temp_new();
+
+ tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
+ /* fix up for one negative */
+ tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
+ tcg_gen_and_tl(rl, rl, arg2);
+ tcg_gen_sub_tl(ret, rh, rl);
+
+ tcg_temp_free(rl);
+ tcg_temp_free(rh);
+}
+
+static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_ext32s_tl(t1, arg1);
+ tcg_gen_ext32u_tl(t2, arg2);
+ tcg_gen_mul_tl(ret, t1, t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_gen_sari_tl(ret, ret, 32);
+}
+
+static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w);
+}
+
+static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2)
+{
+ TCGv discard = tcg_temp_new();
+
+ tcg_gen_mulu2_tl(discard, ret, s1, s2);
+ tcg_temp_free(discard);
+}
+
+static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ /* gen_mulh_w works for either sign as input. */
+ return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w);
+}
+
+static void gen_div(TCGv ret, TCGv source1, TCGv source2)
+{
+ TCGv temp1, temp2, zero, one, mone, min;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ zero = tcg_constant_tl(0);
+ one = tcg_constant_tl(1);
+ mone = tcg_constant_tl(-1);
+ min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
+
+ /*
+ * If overflow, set temp2 to 1, else source2.
+ * This produces the required result of min.
+ */
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
+ tcg_gen_and_tl(temp1, temp1, temp2);
+ tcg_gen_movcond_tl(TCG_COND_NE, temp2, temp1, zero, one, source2);
+
+ /*
+ * If div by zero, set temp1 to -1 and temp2 to 1 to
+ * produce the required result of -1.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, mone, source1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2);
+
+ tcg_gen_div_tl(ret, temp1, temp2);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+static bool trans_div(DisasContext *ctx, arg_div *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith(ctx, a, EXT_SIGN, gen_div);
+}
+
+static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
+{
+ TCGv temp1, temp2, zero, one, max;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ zero = tcg_constant_tl(0);
+ one = tcg_constant_tl(1);
+ max = tcg_constant_tl(~0);
+
+ /*
+ * If div by zero, set temp1 to max and temp2 to 1 to
+ * produce the required result of max.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
+ tcg_gen_divu_tl(ret, temp1, temp2);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+static bool trans_divu(DisasContext *ctx, arg_divu *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu);
+}
+
+static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
+{
+ TCGv temp1, temp2, zero, one, mone, min;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ zero = tcg_constant_tl(0);
+ one = tcg_constant_tl(1);
+ mone = tcg_constant_tl(-1);
+ min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
+
+ /*
+ * If overflow, set temp1 to 0, else source1.
+ * This avoids a possible host trap, and produces the required result of 0.
+ */
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
+ tcg_gen_and_tl(temp1, temp1, temp2);
+ tcg_gen_movcond_tl(TCG_COND_NE, temp1, temp1, zero, zero, source1);
+
+ /*
+ * If div by zero, set temp2 to 1, else source2.
+ * This avoids a possible host trap, but produces an incorrect result.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
+
+ tcg_gen_rem_tl(temp1, temp1, temp2);
+
+ /* If div by zero, the required result is the original dividend. */
+ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+static bool trans_rem(DisasContext *ctx, arg_rem *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem);
+}
+
+static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
+{
+ TCGv temp, zero, one;
+
+ temp = tcg_temp_new();
+ zero = tcg_constant_tl(0);
+ one = tcg_constant_tl(1);
+
+ /*
+ * If div by zero, set temp to 1, else source2.
+ * This avoids a possible host trap, but produces an incorrect result.
+ */
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp, source2, zero, one, source2);
+
+ tcg_gen_remu_tl(temp, source1, temp);
+
+ /* If div by zero, the required result is the original dividend. */
+ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp);
+
+ tcg_temp_free(temp);
+}
+
+static bool trans_remu(DisasContext *ctx, arg_remu *a)
+{
+ REQUIRE_EXT(ctx, RVM);
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu);
+}
+
+static bool trans_mulw(DisasContext *ctx, arg_mulw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
+}
+
+static bool trans_divw(DisasContext *ctx, arg_divw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_SIGN, gen_div);
+}
+
+static bool trans_divuw(DisasContext *ctx, arg_divuw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu);
+}
+
+static bool trans_remw(DisasContext *ctx, arg_remw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem);
+}
+
+static bool trans_remuw(DisasContext *ctx, arg_remuw *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV32;
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu);
+}
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
new file mode 100644
index 000000000..17ee3babe
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -0,0 +1,2867 @@
+/*
+ * RISC-V translation routines for the RVV Standard Extension.
+ *
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include "tcg/tcg-op-gvec.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "internals.h"
+
+static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
+{
+ TCGv s1, s2, dst;
+
+ if (!has_ext(ctx, RVV)) {
+ return false;
+ }
+
+ s2 = get_gpr(ctx, a->rs2, EXT_ZERO);
+ dst = dest_gpr(ctx, a->rd);
+
+ /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
+ if (a->rs1 == 0) {
+ /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
+ s1 = tcg_constant_tl(RV_VLEN_MAX);
+ } else {
+ s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
+ }
+ gen_helper_vsetvl(dst, cpu_env, s1, s2);
+ gen_set_gpr(ctx, a->rd, dst);
+
+ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
+ tcg_gen_lookup_and_goto_ptr();
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
+{
+ TCGv s1, s2, dst;
+
+ if (!has_ext(ctx, RVV)) {
+ return false;
+ }
+
+ s2 = tcg_constant_tl(a->zimm);
+ dst = dest_gpr(ctx, a->rd);
+
+ /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
+ if (a->rs1 == 0) {
+ /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
+ s1 = tcg_constant_tl(RV_VLEN_MAX);
+ } else {
+ s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
+ }
+ gen_helper_vsetvl(dst, cpu_env, s1, s2);
+ gen_set_gpr(ctx, a->rd, dst);
+
+ gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+/* vector register offset from env */
+static uint32_t vreg_ofs(DisasContext *s, int reg)
+{
+ return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
+}
+
+/* check functions */
+
+/*
+ * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
+ * So RVV is also be checked in this function.
+ */
+static bool vext_check_isa_ill(DisasContext *s)
+{
+ return !s->vill;
+}
+
+/*
+ * There are two rules check here.
+ *
+ * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
+ *
+ * 2. For all widening instructions, the destination LMUL value must also be
+ * a supported LMUL value. (Section 11.2)
+ */
+static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
+{
+ /*
+ * The destination vector register group results are arranged as if both
+ * SEW and LMUL were at twice their current settings. (Section 11.2).
+ */
+ int legal = widen ? 2 << s->lmul : 1 << s->lmul;
+
+ return !((s->lmul == 0x3 && widen) || (reg % legal));
+}
+
+/*
+ * There are two rules check here.
+ *
+ * 1. The destination vector register group for a masked vector instruction can
+ * only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
+ *
+ * 2. In widen instructions and some other insturctions, like vslideup.vx,
+ * there is no need to check whether LMUL=1.
+ */
+static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
+ bool force)
+{
+ return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
+}
+
+/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
+static bool vext_check_nf(DisasContext *s, uint32_t nf)
+{
+ return (1 << s->lmul) * nf <= 8;
+}
+
+/*
+ * The destination vector register group cannot overlap a source vector register
+ * group of a different element width. (Section 11.2)
+ */
+static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen)
+{
+ return ((rd >= rs + slen) || (rs >= rd + dlen));
+}
+/* common translation macro */
+#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
+{ \
+ if (CHECK(s, a)) { \
+ return OP(s, a, SEQ); \
+ } \
+ return false; \
+}
+
+/*
+ *** unit stride load and store
+ */
+typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
+ TCGv_env, TCGv_i32);
+
+static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+ gen_helper_ldst_us *fn, DisasContext *s)
+{
+ TCGv_ptr dest, mask;
+ TCGv base;
+ TCGv_i32 desc;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ base = get_gpr(s, rs1, EXT_NONE);
+
+ /*
+ * As simd_desc supports at most 256 bytes, and in this implementation,
+ * the max vector group length is 2048 bytes. So split it into two parts.
+ *
+ * The first part is vlen in bytes, encoded in maxsz of simd_desc.
+ * The second part is lmul, encoded in data of simd_desc.
+ */
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, base, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ gen_set_label(over);
+ return true;
+}
+
+static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_us *fn;
+ static gen_helper_ldst_us * const fns[2][7][4] = {
+ /* masked unit stride load */
+ { { gen_helper_vlb_v_b_mask, gen_helper_vlb_v_h_mask,
+ gen_helper_vlb_v_w_mask, gen_helper_vlb_v_d_mask },
+ { NULL, gen_helper_vlh_v_h_mask,
+ gen_helper_vlh_v_w_mask, gen_helper_vlh_v_d_mask },
+ { NULL, NULL,
+ gen_helper_vlw_v_w_mask, gen_helper_vlw_v_d_mask },
+ { gen_helper_vle_v_b_mask, gen_helper_vle_v_h_mask,
+ gen_helper_vle_v_w_mask, gen_helper_vle_v_d_mask },
+ { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
+ gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
+ { NULL, gen_helper_vlhu_v_h_mask,
+ gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
+ { NULL, NULL,
+ gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
+ /* unmasked unit stride load */
+ { { gen_helper_vlb_v_b, gen_helper_vlb_v_h,
+ gen_helper_vlb_v_w, gen_helper_vlb_v_d },
+ { NULL, gen_helper_vlh_v_h,
+ gen_helper_vlh_v_w, gen_helper_vlh_v_d },
+ { NULL, NULL,
+ gen_helper_vlw_v_w, gen_helper_vlw_v_d },
+ { gen_helper_vle_v_b, gen_helper_vle_v_h,
+ gen_helper_vle_v_w, gen_helper_vle_v_d },
+ { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
+ gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
+ { NULL, gen_helper_vlhu_v_h,
+ gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
+ { NULL, NULL,
+ gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
+ };
+
+ fn = fns[a->vm][seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ return ldst_us_trans(a->rd, a->rs1, data, fn, s);
+}
+
+static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_nf(s, a->nf));
+}
+
+GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
+GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
+GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
+GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
+GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
+GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
+GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
+
+static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_us *fn;
+ static gen_helper_ldst_us * const fns[2][4][4] = {
+ /* masked unit stride load and store */
+ { { gen_helper_vsb_v_b_mask, gen_helper_vsb_v_h_mask,
+ gen_helper_vsb_v_w_mask, gen_helper_vsb_v_d_mask },
+ { NULL, gen_helper_vsh_v_h_mask,
+ gen_helper_vsh_v_w_mask, gen_helper_vsh_v_d_mask },
+ { NULL, NULL,
+ gen_helper_vsw_v_w_mask, gen_helper_vsw_v_d_mask },
+ { gen_helper_vse_v_b_mask, gen_helper_vse_v_h_mask,
+ gen_helper_vse_v_w_mask, gen_helper_vse_v_d_mask } },
+ /* unmasked unit stride store */
+ { { gen_helper_vsb_v_b, gen_helper_vsb_v_h,
+ gen_helper_vsb_v_w, gen_helper_vsb_v_d },
+ { NULL, gen_helper_vsh_v_h,
+ gen_helper_vsh_v_w, gen_helper_vsh_v_d },
+ { NULL, NULL,
+ gen_helper_vsw_v_w, gen_helper_vsw_v_d },
+ { gen_helper_vse_v_b, gen_helper_vse_v_h,
+ gen_helper_vse_v_w, gen_helper_vse_v_d } }
+ };
+
+ fn = fns[a->vm][seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ return ldst_us_trans(a->rd, a->rs1, data, fn, s);
+}
+
+static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_nf(s, a->nf));
+}
+
+GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
+GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
+GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
+GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
+
+/*
+ *** stride load and store
+ */
+typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
+ TCGv, TCGv_env, TCGv_i32);
+
+static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
+ uint32_t data, gen_helper_ldst_stride *fn,
+ DisasContext *s)
+{
+ TCGv_ptr dest, mask;
+ TCGv base, stride;
+ TCGv_i32 desc;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ base = get_gpr(s, rs1, EXT_NONE);
+ stride = get_gpr(s, rs2, EXT_NONE);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, base, stride, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ gen_set_label(over);
+ return true;
+}
+
+static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_stride *fn;
+ static gen_helper_ldst_stride * const fns[7][4] = {
+ { gen_helper_vlsb_v_b, gen_helper_vlsb_v_h,
+ gen_helper_vlsb_v_w, gen_helper_vlsb_v_d },
+ { NULL, gen_helper_vlsh_v_h,
+ gen_helper_vlsh_v_w, gen_helper_vlsh_v_d },
+ { NULL, NULL,
+ gen_helper_vlsw_v_w, gen_helper_vlsw_v_d },
+ { gen_helper_vlse_v_b, gen_helper_vlse_v_h,
+ gen_helper_vlse_v_w, gen_helper_vlse_v_d },
+ { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
+ gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
+ { NULL, gen_helper_vlshu_v_h,
+ gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
+ { NULL, NULL,
+ gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
+ };
+
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+}
+
+static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_nf(s, a->nf));
+}
+
+GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
+GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
+GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
+GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
+GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
+GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
+GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
+
+static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_stride *fn;
+ static gen_helper_ldst_stride * const fns[4][4] = {
+ /* masked stride store */
+ { gen_helper_vssb_v_b, gen_helper_vssb_v_h,
+ gen_helper_vssb_v_w, gen_helper_vssb_v_d },
+ { NULL, gen_helper_vssh_v_h,
+ gen_helper_vssh_v_w, gen_helper_vssh_v_d },
+ { NULL, NULL,
+ gen_helper_vssw_v_w, gen_helper_vssw_v_d },
+ { gen_helper_vsse_v_b, gen_helper_vsse_v_h,
+ gen_helper_vsse_v_w, gen_helper_vsse_v_d }
+ };
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+}
+
+static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_nf(s, a->nf));
+}
+
+GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
+GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
+GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
+GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)
+
+/*
+ *** index load and store
+ */
+typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
+ TCGv_ptr, TCGv_env, TCGv_i32);
+
+static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+ uint32_t data, gen_helper_ldst_index *fn,
+ DisasContext *s)
+{
+ TCGv_ptr dest, mask, index;
+ TCGv base;
+ TCGv_i32 desc;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ index = tcg_temp_new_ptr();
+ base = get_gpr(s, rs1, EXT_NONE);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, base, index, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(index);
+ gen_set_label(over);
+ return true;
+}
+
+static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_index *fn;
+ static gen_helper_ldst_index * const fns[7][4] = {
+ { gen_helper_vlxb_v_b, gen_helper_vlxb_v_h,
+ gen_helper_vlxb_v_w, gen_helper_vlxb_v_d },
+ { NULL, gen_helper_vlxh_v_h,
+ gen_helper_vlxh_v_w, gen_helper_vlxh_v_d },
+ { NULL, NULL,
+ gen_helper_vlxw_v_w, gen_helper_vlxw_v_d },
+ { gen_helper_vlxe_v_b, gen_helper_vlxe_v_h,
+ gen_helper_vlxe_v_w, gen_helper_vlxe_v_d },
+ { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
+ gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
+ { NULL, gen_helper_vlxhu_v_h,
+ gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
+ { NULL, NULL,
+ gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
+ };
+
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+}
+
+/*
+ * For vector indexed segment loads, the destination vector register
+ * groups cannot overlap the source vector register group (specified by
+ * `vs2`), else an illegal instruction exception is raised.
+ */
+static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_nf(s, a->nf) &&
+ ((a->nf == 1) ||
+ vext_check_overlap_group(a->rd, a->nf << s->lmul,
+ a->rs2, 1 << s->lmul)));
+}
+
+GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
+GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
+
+static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_index *fn;
+ static gen_helper_ldst_index * const fns[4][4] = {
+ { gen_helper_vsxb_v_b, gen_helper_vsxb_v_h,
+ gen_helper_vsxb_v_w, gen_helper_vsxb_v_d },
+ { NULL, gen_helper_vsxh_v_h,
+ gen_helper_vsxh_v_w, gen_helper_vsxh_v_d },
+ { NULL, NULL,
+ gen_helper_vsxw_v_w, gen_helper_vsxw_v_d },
+ { gen_helper_vsxe_v_b, gen_helper_vsxe_v_h,
+ gen_helper_vsxe_v_w, gen_helper_vsxe_v_d }
+ };
+
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+}
+
+static bool st_index_check(DisasContext *s, arg_rnfvm* a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_nf(s, a->nf));
+}
+
+GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
+GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
+
+/*
+ *** unit stride fault-only-first load
+ */
+static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
+ gen_helper_ldst_us *fn, DisasContext *s)
+{
+ TCGv_ptr dest, mask;
+ TCGv base;
+ TCGv_i32 desc;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ base = get_gpr(s, rs1, EXT_NONE);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, base, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ gen_set_label(over);
+ return true;
+}
+
+static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_ldst_us *fn;
+ static gen_helper_ldst_us * const fns[7][4] = {
+ { gen_helper_vlbff_v_b, gen_helper_vlbff_v_h,
+ gen_helper_vlbff_v_w, gen_helper_vlbff_v_d },
+ { NULL, gen_helper_vlhff_v_h,
+ gen_helper_vlhff_v_w, gen_helper_vlhff_v_d },
+ { NULL, NULL,
+ gen_helper_vlwff_v_w, gen_helper_vlwff_v_d },
+ { gen_helper_vleff_v_b, gen_helper_vleff_v_h,
+ gen_helper_vleff_v_w, gen_helper_vleff_v_d },
+ { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h,
+ gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d },
+ { NULL, gen_helper_vlhuff_v_h,
+ gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d },
+ { NULL, NULL,
+ gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d }
+ };
+
+ fn = fns[seq][s->sew];
+ if (fn == NULL) {
+ return false;
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, NF, a->nf);
+ return ldff_trans(a->rd, a->rs1, data, fn, s);
+}
+
+GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
+GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
+
+/*
+ *** vector atomic operation
+ */
+typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
+ TCGv_env, TCGv_i32);
+
+static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+ uint32_t data, gen_helper_amo *fn, DisasContext *s)
+{
+ TCGv_ptr dest, mask, index;
+ TCGv base;
+ TCGv_i32 desc;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ index = tcg_temp_new_ptr();
+ base = get_gpr(s, rs1, EXT_NONE);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, base, index, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(index);
+ gen_set_label(over);
+ return true;
+}
+
+static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
+{
+ uint32_t data = 0;
+ gen_helper_amo *fn;
+ static gen_helper_amo *const fnsw[9] = {
+ /* no atomic operation */
+ gen_helper_vamoswapw_v_w,
+ gen_helper_vamoaddw_v_w,
+ gen_helper_vamoxorw_v_w,
+ gen_helper_vamoandw_v_w,
+ gen_helper_vamoorw_v_w,
+ gen_helper_vamominw_v_w,
+ gen_helper_vamomaxw_v_w,
+ gen_helper_vamominuw_v_w,
+ gen_helper_vamomaxuw_v_w
+ };
+ static gen_helper_amo *const fnsd[18] = {
+ gen_helper_vamoswapw_v_d,
+ gen_helper_vamoaddw_v_d,
+ gen_helper_vamoxorw_v_d,
+ gen_helper_vamoandw_v_d,
+ gen_helper_vamoorw_v_d,
+ gen_helper_vamominw_v_d,
+ gen_helper_vamomaxw_v_d,
+ gen_helper_vamominuw_v_d,
+ gen_helper_vamomaxuw_v_d,
+ gen_helper_vamoswapd_v_d,
+ gen_helper_vamoaddd_v_d,
+ gen_helper_vamoxord_v_d,
+ gen_helper_vamoandd_v_d,
+ gen_helper_vamoord_v_d,
+ gen_helper_vamomind_v_d,
+ gen_helper_vamomaxd_v_d,
+ gen_helper_vamominud_v_d,
+ gen_helper_vamomaxud_v_d
+ };
+
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_exit_atomic(cpu_env);
+ s->base.is_jmp = DISAS_NORETURN;
+ return true;
+ }
+
+ switch (s->sew) {
+ case 0 ... 2:
+ assert(seq < ARRAY_SIZE(fnsw));
+ fn = fnsw[seq];
+ break;
+ case 3:
+ /* XLEN check done in amo_check(). */
+ assert(seq < ARRAY_SIZE(fnsd));
+ fn = fnsd[seq];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ data = FIELD_DP32(data, VDATA, WD, a->wd);
+ return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+}
+/*
+ * There are two rules check here.
+ *
+ * 1. SEW must be at least as wide as the AMO memory element size.
+ *
+ * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
+ */
+static bool amo_check(DisasContext *s, arg_rwdvm* a)
+{
+ return (!s->vill && has_ext(s, RVA) &&
+ (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ ((1 << s->sew) <= sizeof(target_ulong)) &&
+ ((1 << s->sew) >= 4));
+}
+
+static bool amo_check64(DisasContext *s, arg_rwdvm* a)
+{
+ REQUIRE_64BIT(s);
+ return amo_check(s, a);
+}
+
+GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
+GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check64)
+GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check64)
+
+/*
+ *** Vector Integer Arithmetic Instructions
+ */
+#define MAXSZ(s) (s->vlen >> (3 - s->lmul))
+
+static bool opivv_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false));
+}
+
+typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t, uint32_t);
+
+static inline bool
+do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
+ gen_helper_gvec_4_ptr *fn)
+{
+ TCGLabel *over = gen_new_label();
+ if (!opivv_check(s, a)) {
+ return false;
+ }
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ if (a->vm && s->vl_eq_vlmax) {
+ gvec_fn(s->sew, vreg_ofs(s, a->rd),
+ vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
+ MAXSZ(s), MAXSZ(s));
+ } else {
+ uint32_t data = 0;
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
+ cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
+ }
+ gen_set_label(over);
+ return true;
+}
+
+/* OPIVV with GVEC IR */
+#define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
+ }; \
+ return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
+}
+
+GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
+GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
+
+typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
+ TCGv_env, TCGv_i32);
+
+static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
+ gen_helper_opivx *fn, DisasContext *s)
+{
+ TCGv_ptr dest, src2, mask;
+ TCGv src1;
+ TCGv_i32 desc;
+ uint32_t data = 0;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ src2 = tcg_temp_new_ptr();
+ src1 = get_gpr(s, rs1, EXT_NONE);
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, src1, src2, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(src2);
+ gen_set_label(over);
+ return true;
+}
+
+static bool opivx_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false));
+}
+
+typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
+ uint32_t, uint32_t);
+
+static inline bool
+do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
+ gen_helper_opivx *fn)
+{
+ if (!opivx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ TCGv_i64 src1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
+ gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+ src1, MAXSZ(s), MAXSZ(s));
+
+ tcg_temp_free_i64(src1);
+ return true;
+ }
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+}
+
+/* OPIVX with GVEC IR */
+#define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_opivx * const fns[4] = { \
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
+ }; \
+ return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
+}
+
+GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
+GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
+
+static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_vec_sub8_i64(d, b, a);
+}
+
+static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_vec_sub16_i64(d, b, a);
+}
+
+static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ tcg_gen_sub_i32(ret, arg2, arg1);
+}
+
+static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ tcg_gen_sub_i64(ret, arg2, arg1);
+}
+
+static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_sub_vec(vece, r, b, a);
+}
+
+static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
+ static const GVecGen2s rsub_op[4] = {
+ { .fni8 = gen_vec_rsub8_i64,
+ .fniv = gen_rsub_vec,
+ .fno = gen_helper_vec_rsubs8,
+ .opt_opc = vecop_list,
+ .vece = MO_8 },
+ { .fni8 = gen_vec_rsub16_i64,
+ .fniv = gen_rsub_vec,
+ .fno = gen_helper_vec_rsubs16,
+ .opt_opc = vecop_list,
+ .vece = MO_16 },
+ { .fni4 = gen_rsub_i32,
+ .fniv = gen_rsub_vec,
+ .fno = gen_helper_vec_rsubs32,
+ .opt_opc = vecop_list,
+ .vece = MO_32 },
+ { .fni8 = gen_rsub_i64,
+ .fniv = gen_rsub_vec,
+ .fno = gen_helper_vec_rsubs64,
+ .opt_opc = vecop_list,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+ };
+
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
+}
+
+GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
+
+static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
+ gen_helper_opivx *fn, DisasContext *s, int zx)
+{
+ TCGv_ptr dest, src2, mask;
+ TCGv src1;
+ TCGv_i32 desc;
+ uint32_t data = 0;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ src2 = tcg_temp_new_ptr();
+ if (zx) {
+ src1 = tcg_constant_tl(imm);
+ } else {
+ src1 = tcg_constant_tl(sextract64(imm, 0, 5));
+ }
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, src1, src2, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(src2);
+ gen_set_label(over);
+ return true;
+}
+
+typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
+ uint32_t, uint32_t);
+
+static inline bool
+do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
+ gen_helper_opivx *fn, int zx)
+{
+ if (!opivx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ if (zx) {
+ gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+ extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
+ } else {
+ gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+ sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
+ }
+ } else {
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
+ }
+ return true;
+}
+
+/* OPIVI with GVEC IR */
+#define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_opivx * const fns[4] = { \
+ gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
+ gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
+ }; \
+ return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
+ fns[s->sew], ZX); \
+}
+
+GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
+
+static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
+{
+ TCGv_i64 tmp = tcg_constant_i64(c);
+ tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
+}
+
+GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
+
+/* Vector Widening Integer Add/Subtract */
+
+/* OPIVV with WIDEN */
+static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+ 1 << s->lmul) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3));
+}
+
+static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
+ gen_helper_gvec_4_ptr *fn,
+ bool (*checkfn)(DisasContext *, arg_rmrr *))
+{
+ if (checkfn(s, a)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs1),
+ vreg_ofs(s, a->rs2),
+ cpu_env, s->vlen / 8, s->vlen / 8,
+ data, fn);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+#define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w \
+ }; \
+ return do_opivv_widen(s, a, fns[s->sew], CHECK); \
+}
+
+GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
+
+/* OPIVX with WIDEN */
+static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3));
+}
+
+static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
+ gen_helper_opivx *fn)
+{
+ if (opivx_widen_check(s, a)) {
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+ }
+ return false;
+}
+
+#define GEN_OPIVX_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_opivx * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w \
+ }; \
+ return do_opivx_widen(s, a, fns[s->sew]); \
+}
+
+GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
+GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
+
+/* WIDEN OPIVV with WIDEN */
+static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, true) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3));
+}
+
+static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (opiwv_widen_check(s, a)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs1),
+ vreg_ofs(s, a->rs2),
+ cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+#define GEN_OPIWV_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_gvec_4_ptr * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w \
+ }; \
+ return do_opiwv_widen(s, a, fns[s->sew]); \
+}
+
+GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
+GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
+GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
+GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
+
+/* WIDEN OPIVX with WIDEN */
+static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, true) &&
+ (s->lmul < 0x3) && (s->sew < 0x3));
+}
+
+static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
+ gen_helper_opivx *fn)
+{
+ if (opiwx_widen_check(s, a)) {
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+ }
+ return false;
+}
+
+#define GEN_OPIWX_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_opivx * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w \
+ }; \
+ return do_opiwx_widen(s, a, fns[s->sew]); \
+}
+
+GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
+GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
+GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
+GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
+
+/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
+/* OPIVV without GVEC IR */
+#define GEN_OPIVV_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+/*
+ * For vadc and vsbc, an illegal instruction exception is raised if the
+ * destination vector register is v0 and LMUL > 1. (Section 12.3)
+ */
+static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ ((a->rd != 0) || (s->lmul == 0)));
+}
+
+GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
+GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
+
+/*
+ * For vmadc and vmsbc, an illegal instruction exception is raised if the
+ * destination vector register overlaps a source vector register group.
+ */
+static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+ vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
+}
+
+GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
+GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
+
+static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ ((a->rd != 0) || (s->lmul == 0)));
+}
+
+/* OPIVX without GVEC IR */
+#define GEN_OPIVX_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ static gen_helper_opivx * const fns[4] = { \
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
+ }; \
+ \
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
+ } \
+ return false; \
+}
+
+GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
+GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
+
+static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
+}
+
+GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
+GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
+
+/* OPIVI without GVEC IR */
+#define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ static gen_helper_opivx * const fns[4] = { \
+ gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
+ gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
+ }; \
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
+ fns[s->sew], s, ZX); \
+ } \
+ return false; \
+}
+
+GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check)
+GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check)
+
+/* Vector Bitwise Logical Instructions */
+GEN_OPIVV_GVEC_TRANS(vand_vv, and)
+GEN_OPIVV_GVEC_TRANS(vor_vv, or)
+GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
+GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
+GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
+GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
+GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi)
+GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx, ori)
+GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori)
+
+/* Vector Single-Width Bit Shift Instructions */
+GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
+GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
+GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
+
+typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
+ uint32_t, uint32_t);
+
+static inline bool
+do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
+ gen_helper_opivx *fn)
+{
+ if (!opivx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ TCGv_i32 src1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
+ tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
+ gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
+ src1, MAXSZ(s), MAXSZ(s));
+
+ tcg_temp_free_i32(src1);
+ return true;
+ }
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
+}
+
+#define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ static gen_helper_opivx * const fns[4] = { \
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
+ }; \
+ \
+ return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
+}
+
+GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
+GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
+GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
+
+GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx, shli)
+GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx, shri)
+GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx, sari)
+
+/* Vector Narrowing Integer Right Shift Instructions */
+static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, true) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
+ 2 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3));
+}
+
+/* OPIVV with NARROW */
+#define GEN_OPIVV_NARROW_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (opivv_narrow_check(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_4_ptr * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+GEN_OPIVV_NARROW_TRANS(vnsra_vv)
+GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
+
+static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, true) &&
+ vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
+ 2 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3));
+}
+
+/* OPIVX with NARROW */
+#define GEN_OPIVX_NARROW_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (opivx_narrow_check(s, a)) { \
+ static gen_helper_opivx * const fns[3] = { \
+ gen_helper_##NAME##_b, \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ }; \
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
+ } \
+ return false; \
+}
+
+GEN_OPIVX_NARROW_TRANS(vnsra_vx)
+GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
+
+/* OPIVI with NARROW */
+#define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (opivx_narrow_check(s, a)) { \
+ static gen_helper_opivx * const fns[3] = { \
+ gen_helper_##OPIVX##_b, \
+ gen_helper_##OPIVX##_h, \
+ gen_helper_##OPIVX##_w, \
+ }; \
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
+ fns[s->sew], s, ZX); \
+ } \
+ return false; \
+}
+
+GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx)
+GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
+
+/* Vector Integer Comparison Instructions */
+/*
+ * For all comparison instructions, an illegal instruction exception is raised
+ * if the destination vector register overlaps a source vector register group
+ * and LMUL > 1.
+ */
+static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+ vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
+ (s->lmul == 0)));
+}
+GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
+GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
+GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
+GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
+GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
+GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
+
+static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
+ (s->lmul == 0)));
+}
+
+GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
+GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
+
+GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check)
+GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check)
+
+/* Vector Integer Min/Max Instructions */
+GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
+GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
+GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
+GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
+GEN_OPIVX_TRANS(vminu_vx, opivx_check)
+GEN_OPIVX_TRANS(vmin_vx, opivx_check)
+GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
+GEN_OPIVX_TRANS(vmax_vx, opivx_check)
+
+/* Vector Single-Width Integer Multiply Instructions */
+GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
+GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
+GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
+GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
+GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
+GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
+GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
+GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
+
+/* Vector Integer Divide Instructions */
+GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
+GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
+GEN_OPIVV_TRANS(vremu_vv, opivv_check)
+GEN_OPIVV_TRANS(vrem_vv, opivv_check)
+GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
+GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
+GEN_OPIVX_TRANS(vremu_vx, opivx_check)
+GEN_OPIVX_TRANS(vrem_vx, opivx_check)
+
+/* Vector Widening Integer Multiply Instructions */
+GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
+GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
+
+/* Vector Single-Width Integer Multiply-Add Instructions */
+GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
+GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
+GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
+GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
+GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
+GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
+GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
+GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
+
+/* Vector Widening Integer Multiply-Add Instructions */
+GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
+
+/* Vector Integer Merge and Move Instructions */
+static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
+{
+ if (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs1, false)) {
+
+ if (s->vl_eq_vlmax) {
+ tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
+ vreg_ofs(s, a->rs1),
+ MAXSZ(s), MAXSZ(s));
+ } else {
+ uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
+ static gen_helper_gvec_2_ptr * const fns[4] = {
+ gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
+ gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
+ };
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
+ cpu_env, s->vlen / 8, s->vlen / 8, data,
+ fns[s->sew]);
+ gen_set_label(over);
+ }
+ return true;
+ }
+ return false;
+}
+
+typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
+static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
+{
+ if (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false)) {
+
+ TCGv s1;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ s1 = get_gpr(s, a->rs1, EXT_SIGN);
+
+ if (s->vl_eq_vlmax) {
+ tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), s1);
+ } else {
+ TCGv_i32 desc;
+ TCGv_i64 s1_i64 = tcg_temp_new_i64();
+ TCGv_ptr dest = tcg_temp_new_ptr();
+ uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
+ static gen_helper_vmv_vx * const fns[4] = {
+ gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
+ gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
+ };
+
+ tcg_gen_ext_tl_i64(s1_i64, s1);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
+ fns[s->sew](dest, s1_i64, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_i64(s1_i64);
+ }
+
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
+{
+ if (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false)) {
+
+ int64_t simm = sextract64(a->rs1, 0, 5);
+ if (s->vl_eq_vlmax) {
+ tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), simm);
+ } else {
+ TCGv_i32 desc;
+ TCGv_i64 s1;
+ TCGv_ptr dest;
+ uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
+ static gen_helper_vmv_vx * const fns[4] = {
+ gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
+ gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
+ };
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ s1 = tcg_constant_i64(simm);
+ dest = tcg_temp_new_ptr();
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
+ fns[s->sew](dest, s1, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ gen_set_label(over);
+ }
+ return true;
+ }
+ return false;
+}
+
+GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
+GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
+GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check)
+
+/*
+ *** Vector Fixed-Point Arithmetic Instructions
+ */
+
+/* Vector Single-Width Saturating Add and Subtract */
+GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
+GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
+GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
+GEN_OPIVV_TRANS(vssub_vv, opivv_check)
+GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
+GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
+GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
+GEN_OPIVX_TRANS(vssub_vx, opivx_check)
+GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check)
+GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check)
+
+/* Vector Single-Width Averaging Add and Subtract */
+GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
+GEN_OPIVV_TRANS(vasub_vv, opivv_check)
+GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
+GEN_OPIVX_TRANS(vasub_vx, opivx_check)
+GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
+
+/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
+GEN_OPIVX_TRANS(vsmul_vx, opivx_check)
+
+/* Vector Widening Saturating Scaled Multiply-Add */
+GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx)
+GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx)
+GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx)
+
+/* Vector Single-Width Scaling Shift Instructions */
+GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
+GEN_OPIVV_TRANS(vssra_vv, opivv_check)
+GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
+GEN_OPIVX_TRANS(vssra_vx, opivx_check)
+GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check)
+GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check)
+
+/* Vector Narrowing Fixed-Point Clip Instructions */
+GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
+GEN_OPIVV_NARROW_TRANS(vnclip_vv)
+GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
+GEN_OPIVX_NARROW_TRANS(vnclip_vx)
+GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx)
+GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
+
+/*
+ *** Vector Float Point Arithmetic Instructions
+ */
+/* Vector Single-Width Floating-Point Add/Subtract Instructions */
+
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised.
+ */
+static bool opfvv_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ (s->sew != 0));
+}
+
+/* OPFVV without GVEC IR */
+#define GEN_OPFVV_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_4_ptr * const fns[3] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ gen_helper_##NAME##_d, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ gen_set_rm(s, 7); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew - 1]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
+
+typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
+ TCGv_env, TCGv_i32);
+
+static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
+ uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
+{
+ TCGv_ptr dest, src2, mask;
+ TCGv_i32 desc;
+
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ src2 = tcg_temp_new_ptr();
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(src2);
+ gen_set_label(over);
+ return true;
+}
+
+static bool opfvf_check(DisasContext *s, arg_rmrr *a)
+{
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised
+ */
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (s->sew != 0));
+}
+
+/* OPFVF without GVEC IR */
+#define GEN_OPFVF_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_opfvf *const fns[3] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ gen_helper_##NAME##_d, \
+ }; \
+ gen_set_rm(s, 7); \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
+ fns[s->sew - 1], s); \
+ } \
+ return false; \
+}
+
+GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
+
+/* Vector Widening Floating-Point Add/Subtract Instructions */
+static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+ 1 << s->lmul) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
+}
+
+/* OPFVV with WIDEN */
+#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_4_ptr * const fns[2] = { \
+ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ gen_set_rm(s, 7); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew - 1]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
+
+static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
+}
+
+/* OPFVF with WIDEN */
+#define GEN_OPFVF_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (opfvf_widen_check(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_opfvf *const fns[2] = { \
+ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
+ }; \
+ gen_set_rm(s, 7); \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
+ fns[s->sew - 1], s); \
+ } \
+ return false; \
+}
+
+GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
+
+static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, true) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
+}
+
+/* WIDEN OPFVV with WIDEN */
+#define GEN_OPFWV_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (opfwv_widen_check(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_4_ptr * const fns[2] = { \
+ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ gen_set_rm(s, 7); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew - 1]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
+GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
+
+static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, true) &&
+ (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
+}
+
+/* WIDEN OPFVF with WIDEN */
+#define GEN_OPFWF_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+{ \
+ if (opfwf_widen_check(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_opfvf *const fns[2] = { \
+ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
+ }; \
+ gen_set_rm(s, 7); \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
+ fns[s->sew - 1], s); \
+ } \
+ return false; \
+}
+
+GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
+GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
+
+/* Vector Single-Width Floating-Point Multiply/Divide Instructions */
+GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
+GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
+
+/* Vector Widening Floating-Point Multiply */
+GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
+
+/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
+GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
+GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
+
+/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
+GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
+
+/* Vector Floating-Point Square-Root Instruction */
+
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised
+ */
+static bool opfv_check(DisasContext *s, arg_rmr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (s->sew != 0));
+}
+
+#define GEN_OPFV_TRANS(NAME, CHECK) \
+static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
+{ \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_3_ptr * const fns[3] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ gen_helper_##NAME##_d, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ gen_set_rm(s, 7); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew - 1]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_OPFV_TRANS(vfsqrt_v, opfv_check)
+
+/* Vector Floating-Point MIN/MAX Instructions */
+GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
+GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
+
+/* Vector Floating-Point Sign-Injection Instructions */
+GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
+GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
+GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
+GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
+
+/* Vector Floating-Point Compare Instructions */
+static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ (s->sew != 0) &&
+ ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
+ vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
+ (s->lmul == 0)));
+}
+
+GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
+GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
+
+static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (s->sew != 0) &&
+ (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
+ (s->lmul == 0)));
+}
+
+GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
+GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
+
+/* Vector Floating-Point Classify Instruction */
+GEN_OPFV_TRANS(vfclass_v, opfv_check)
+
+/* Vector Floating-Point Merge Instruction */
+GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
+
+static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
+{
+ if (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ (s->sew != 0)) {
+
+ if (s->vl_eq_vlmax) {
+ tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
+ } else {
+ TCGv_ptr dest;
+ TCGv_i32 desc;
+ uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
+ static gen_helper_vmv_vx * const fns[3] = {
+ gen_helper_vmv_v_x_h,
+ gen_helper_vmv_v_x_w,
+ gen_helper_vmv_v_x_d,
+ };
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ dest = tcg_temp_new_ptr();
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+ tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
+ fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc);
+
+ tcg_temp_free_ptr(dest);
+ gen_set_label(over);
+ }
+ return true;
+ }
+ return false;
+}
+
+/* Single-Width Floating-Point/Integer Type-Convert Instructions */
+GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check)
+GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check)
+GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check)
+GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
+
+/* Widening Floating-Point/Integer Type-Convert Instructions */
+
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised
+ */
+static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, true) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
+ 1 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
+}
+
+#define GEN_OPFV_WIDEN_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
+{ \
+ if (opfv_widen_check(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_3_ptr * const fns[2] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ gen_set_rm(s, 7); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew - 1]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
+
+/* Narrowing Floating-Point/Integer Type-Convert Instructions */
+
+/*
+ * If the current SEW does not correspond to a supported IEEE floating-point
+ * type, an illegal instruction exception is raised
+ */
+static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, true) &&
+ vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
+ 2 << s->lmul) &&
+ (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
+}
+
+#define GEN_OPFV_NARROW_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
+{ \
+ if (opfv_narrow_check(s, a)) { \
+ uint32_t data = 0; \
+ static gen_helper_gvec_3_ptr * const fns[2] = { \
+ gen_helper_##NAME##_h, \
+ gen_helper_##NAME##_w, \
+ }; \
+ TCGLabel *over = gen_new_label(); \
+ gen_set_rm(s, 7); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, \
+ fns[s->sew - 1]); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v)
+GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
+
+/*
+ *** Vector Reduction Operations
+ */
+/* Vector Single-Width Integer Reduction Instructions */
+static bool reduction_check(DisasContext *s, arg_rmrr *a)
+{
+ return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false);
+}
+
+GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
+GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
+GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
+GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
+GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
+GEN_OPIVV_TRANS(vredand_vs, reduction_check)
+GEN_OPIVV_TRANS(vredor_vs, reduction_check)
+GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
+
+/* Vector Widening Integer Reduction Instructions */
+GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check)
+GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check)
+
+/* Vector Single-Width Floating-Point Reduction Instructions */
+GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
+GEN_OPFVV_TRANS(vfredmax_vs, reduction_check)
+GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
+
+/* Vector Widening Floating-Point Reduction Instructions */
+GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
+
+/*
+ *** Vector Mask Operations
+ */
+
+/* Vector Mask-Register Logical Instructions */
+#define GEN_MM_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_r *a) \
+{ \
+ if (vext_check_isa_ill(s)) { \
+ uint32_t data = 0; \
+ gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
+ TCGLabel *over = gen_new_label(); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
+ vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->vlen / 8, s->vlen / 8, data, fn); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_MM_TRANS(vmand_mm)
+GEN_MM_TRANS(vmnand_mm)
+GEN_MM_TRANS(vmandnot_mm)
+GEN_MM_TRANS(vmxor_mm)
+GEN_MM_TRANS(vmor_mm)
+GEN_MM_TRANS(vmnor_mm)
+GEN_MM_TRANS(vmornot_mm)
+GEN_MM_TRANS(vmxnor_mm)
+
+/* Vector mask population count vmpopc */
+static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
+{
+ if (vext_check_isa_ill(s)) {
+ TCGv_ptr src2, mask;
+ TCGv dst;
+ TCGv_i32 desc;
+ uint32_t data = 0;
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+
+ mask = tcg_temp_new_ptr();
+ src2 = tcg_temp_new_ptr();
+ dst = dest_gpr(s, a->rd);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc);
+ gen_set_gpr(s, a->rd, dst);
+
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(src2);
+ return true;
+ }
+ return false;
+}
+
+/* vmfirst find-first-set mask bit */
+static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
+{
+ if (vext_check_isa_ill(s)) {
+ TCGv_ptr src2, mask;
+ TCGv dst;
+ TCGv_i32 desc;
+ uint32_t data = 0;
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+
+ mask = tcg_temp_new_ptr();
+ src2 = tcg_temp_new_ptr();
+ dst = dest_gpr(s, a->rd);
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
+
+ tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
+ tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
+
+ gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc);
+ gen_set_gpr(s, a->rd, dst);
+
+ tcg_temp_free_ptr(mask);
+ tcg_temp_free_ptr(src2);
+ return true;
+ }
+ return false;
+}
+
+/* vmsbf.m set-before-first mask bit */
+/* vmsif.m set-includ-first mask bit */
+/* vmsof.m set-only-first mask bit */
+#define GEN_M_TRANS(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
+{ \
+ if (vext_check_isa_ill(s)) { \
+ uint32_t data = 0; \
+ gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
+ TCGLabel *over = gen_new_label(); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
+ \
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
+ vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
+ cpu_env, s->vlen / 8, s->vlen / 8, \
+ data, fn); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+}
+
+GEN_M_TRANS(vmsbf_m)
+GEN_M_TRANS(vmsif_m)
+GEN_M_TRANS(vmsof_m)
+
+/* Vector Iota Instruction */
+static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
+{
+ if (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) &&
+ (a->vm != 0 || a->rd != 0)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ static gen_helper_gvec_3_ptr * const fns[4] = {
+ gen_helper_viota_m_b, gen_helper_viota_m_h,
+ gen_helper_viota_m_w, gen_helper_viota_m_d,
+ };
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs2), cpu_env,
+ s->vlen / 8, s->vlen / 8, data, fns[s->sew]);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+/* Vector Element Index Instruction */
+static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
+{
+ if (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, false)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ static gen_helper_gvec_2_ptr * const fns[4] = {
+ gen_helper_vid_v_b, gen_helper_vid_v_h,
+ gen_helper_vid_v_w, gen_helper_vid_v_d,
+ };
+ tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ cpu_env, s->vlen / 8, s->vlen / 8,
+ data, fns[s->sew]);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+/*
+ *** Vector Permutation Instructions
+ */
+
+/* Integer Extract Instruction */
+
+static void load_element(TCGv_i64 dest, TCGv_ptr base,
+ int ofs, int sew)
+{
+ switch (sew) {
+ case MO_8:
+ tcg_gen_ld8u_i64(dest, base, ofs);
+ break;
+ case MO_16:
+ tcg_gen_ld16u_i64(dest, base, ofs);
+ break;
+ case MO_32:
+ tcg_gen_ld32u_i64(dest, base, ofs);
+ break;
+ case MO_64:
+ tcg_gen_ld_i64(dest, base, ofs);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+/* offset of the idx element with base regsiter r */
+static uint32_t endian_ofs(DisasContext *s, int r, int idx)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
+#else
+ return vreg_ofs(s, r) + (idx << s->sew);
+#endif
+}
+
+/* adjust the index according to the endian */
+static void endian_adjust(TCGv_i32 ofs, int sew)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
+#endif
+}
+
+/* Load idx >= VLMAX ? 0 : vreg[idx] */
+static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
+ int vreg, TCGv idx, int vlmax)
+{
+ TCGv_i32 ofs = tcg_temp_new_i32();
+ TCGv_ptr base = tcg_temp_new_ptr();
+ TCGv_i64 t_idx = tcg_temp_new_i64();
+ TCGv_i64 t_vlmax, t_zero;
+
+ /*
+ * Mask the index to the length so that we do
+ * not produce an out-of-range load.
+ */
+ tcg_gen_trunc_tl_i32(ofs, idx);
+ tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
+
+ /* Convert the index to an offset. */
+ endian_adjust(ofs, s->sew);
+ tcg_gen_shli_i32(ofs, ofs, s->sew);
+
+ /* Convert the index to a pointer. */
+ tcg_gen_ext_i32_ptr(base, ofs);
+ tcg_gen_add_ptr(base, base, cpu_env);
+
+ /* Perform the load. */
+ load_element(dest, base,
+ vreg_ofs(s, vreg), s->sew);
+ tcg_temp_free_ptr(base);
+ tcg_temp_free_i32(ofs);
+
+ /* Flush out-of-range indexing to zero. */
+ t_vlmax = tcg_constant_i64(vlmax);
+ t_zero = tcg_constant_i64(0);
+ tcg_gen_extu_tl_i64(t_idx, idx);
+
+ tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
+ t_vlmax, dest, t_zero);
+
+ tcg_temp_free_i64(t_idx);
+}
+
+static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
+ int vreg, int idx)
+{
+ load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew);
+}
+
+static bool trans_vext_x_v(DisasContext *s, arg_r *a)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv dest = dest_gpr(s, a->rd);
+
+ if (a->rs1 == 0) {
+ /* Special case vmv.x.s rd, vs2. */
+ vec_element_loadi(s, tmp, a->rs2, 0);
+ } else {
+ /* This instruction ignores LMUL and vector register groups */
+ int vlmax = s->vlen >> (3 + s->sew);
+ vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
+ }
+
+ tcg_gen_trunc_i64_tl(dest, tmp);
+ gen_set_gpr(s, a->rd, dest);
+
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+/* Integer Scalar Move Instruction */
+
+static void store_element(TCGv_i64 val, TCGv_ptr base,
+ int ofs, int sew)
+{
+ switch (sew) {
+ case MO_8:
+ tcg_gen_st8_i64(val, base, ofs);
+ break;
+ case MO_16:
+ tcg_gen_st16_i64(val, base, ofs);
+ break;
+ case MO_32:
+ tcg_gen_st32_i64(val, base, ofs);
+ break;
+ case MO_64:
+ tcg_gen_st_i64(val, base, ofs);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+/*
+ * Store vreg[idx] = val.
+ * The index must be in range of VLMAX.
+ */
+static void vec_element_storei(DisasContext *s, int vreg,
+ int idx, TCGv_i64 val)
+{
+ store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
+}
+
+/* vmv.s.x vd, rs1 # vd[0] = rs1 */
+static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
+{
+ if (vext_check_isa_ill(s)) {
+ /* This instruction ignores LMUL and vector register groups */
+ int maxsz = s->vlen >> 3;
+ TCGv_i64 t1;
+ TCGLabel *over = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0);
+ if (a->rs1 == 0) {
+ goto done;
+ }
+
+ t1 = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]);
+ vec_element_storei(s, a->rd, 0, t1);
+ tcg_temp_free_i64(t1);
+ done:
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+/* Floating-Point Scalar Move Instructions */
+static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
+{
+ if (!s->vill && has_ext(s, RVF) &&
+ (s->mstatus_fs != 0) && (s->sew != 0)) {
+ unsigned int len = 8 << s->sew;
+
+ vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0);
+ if (len < 64) {
+ tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
+ MAKE_64BIT_MASK(len, 64 - len));
+ }
+
+ mark_fs_dirty(s);
+ return true;
+ }
+ return false;
+}
+
+/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
+static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
+{
+ if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) {
+ TCGv_i64 t1;
+ /* The instructions ignore LMUL and vector register group. */
+ uint32_t vlmax = s->vlen >> 3;
+
+ /* if vl == 0, skip vector register write back */
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ /* zeroed all elements */
+ tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0);
+
+ /* NaN-box f[rs1] as necessary for SEW */
+ t1 = tcg_temp_new_i64();
+ if (s->sew == MO_64 && !has_ext(s, RVD)) {
+ tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
+ } else {
+ tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
+ }
+ vec_element_storei(s, a->rd, 0, t1);
+ tcg_temp_free_i64(t1);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+/* Vector Slide Instructions */
+static bool slideup_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2));
+}
+
+GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
+GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
+GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
+
+GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
+GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
+GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
+
+/* Vector Register Gather Instruction */
+static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2) && (a->rd != a->rs1));
+}
+
+GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
+
+static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2));
+}
+
+/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
+{
+ if (!vrgather_vx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ int vlmax = s->vlen / s->mlen;
+ TCGv_i64 dest = tcg_temp_new_i64();
+
+ if (a->rs1 == 0) {
+ vec_element_loadi(s, dest, a->rs2, 0);
+ } else {
+ vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
+ }
+
+ tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), dest);
+ tcg_temp_free_i64(dest);
+ } else {
+ static gen_helper_opivx * const fns[4] = {
+ gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+ gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
+ };
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
+ }
+ return true;
+}
+
+/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
+static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+{
+ if (!vrgather_vx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ if (a->rs1 >= s->vlen / s->mlen) {
+ tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), 0);
+ } else {
+ tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
+ endian_ofs(s, a->rs2, a->rs1),
+ MAXSZ(s), MAXSZ(s));
+ }
+ } else {
+ static gen_helper_opivx * const fns[4] = {
+ gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+ gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
+ };
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1);
+ }
+ return true;
+}
+
+/* Vector Compress Instruction */
+static bool vcompress_vm_check(DisasContext *s, arg_r *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) &&
+ (a->rd != a->rs2));
+}
+
+static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
+{
+ if (vcompress_vm_check(s, a)) {
+ uint32_t data = 0;
+ static gen_helper_gvec_4_ptr * const fns[4] = {
+ gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
+ gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
+ };
+ TCGLabel *over = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+
+ data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
+ vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
+ cpu_env, s->vlen / 8, s->vlen / 8, data,
+ fns[s->sew]);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
diff --git a/target/riscv/instmap.h b/target/riscv/instmap.h
new file mode 100644
index 000000000..40b6d2b64
--- /dev/null
+++ b/target/riscv/instmap.h
@@ -0,0 +1,369 @@
+/*
+ * RISC-V emulation for qemu: Instruction decode helpers
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_RISCV_INSTMAP_H
+#define TARGET_RISCV_INSTMAP_H
+
+#define MASK_OP_MAJOR(op) (op & 0x7F)
+enum {
+ /* rv32i, rv64i, rv32m */
+ OPC_RISC_LUI = (0x37),
+ OPC_RISC_AUIPC = (0x17),
+ OPC_RISC_JAL = (0x6F),
+ OPC_RISC_JALR = (0x67),
+ OPC_RISC_BRANCH = (0x63),
+ OPC_RISC_LOAD = (0x03),
+ OPC_RISC_STORE = (0x23),
+ OPC_RISC_ARITH_IMM = (0x13),
+ OPC_RISC_ARITH = (0x33),
+ OPC_RISC_FENCE = (0x0F),
+ OPC_RISC_SYSTEM = (0x73),
+
+ /* rv64i, rv64m */
+ OPC_RISC_ARITH_IMM_W = (0x1B),
+ OPC_RISC_ARITH_W = (0x3B),
+
+ /* rv32a, rv64a */
+ OPC_RISC_ATOMIC = (0x2F),
+
+ /* floating point */
+ OPC_RISC_FP_LOAD = (0x7),
+ OPC_RISC_FP_STORE = (0x27),
+
+ OPC_RISC_FMADD = (0x43),
+ OPC_RISC_FMSUB = (0x47),
+ OPC_RISC_FNMSUB = (0x4B),
+ OPC_RISC_FNMADD = (0x4F),
+
+ OPC_RISC_FP_ARITH = (0x53),
+};
+
+#define MASK_OP_ARITH(op) (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | \
+ (0x7F << 25))))
+enum {
+ OPC_RISC_ADD = OPC_RISC_ARITH | (0x0 << 12) | (0x00 << 25),
+ OPC_RISC_SUB = OPC_RISC_ARITH | (0x0 << 12) | (0x20 << 25),
+ OPC_RISC_SLL = OPC_RISC_ARITH | (0x1 << 12) | (0x00 << 25),
+ OPC_RISC_SLT = OPC_RISC_ARITH | (0x2 << 12) | (0x00 << 25),
+ OPC_RISC_SLTU = OPC_RISC_ARITH | (0x3 << 12) | (0x00 << 25),
+ OPC_RISC_XOR = OPC_RISC_ARITH | (0x4 << 12) | (0x00 << 25),
+ OPC_RISC_SRL = OPC_RISC_ARITH | (0x5 << 12) | (0x00 << 25),
+ OPC_RISC_SRA = OPC_RISC_ARITH | (0x5 << 12) | (0x20 << 25),
+ OPC_RISC_OR = OPC_RISC_ARITH | (0x6 << 12) | (0x00 << 25),
+ OPC_RISC_AND = OPC_RISC_ARITH | (0x7 << 12) | (0x00 << 25),
+
+ /* RV64M */
+ OPC_RISC_MUL = OPC_RISC_ARITH | (0x0 << 12) | (0x01 << 25),
+ OPC_RISC_MULH = OPC_RISC_ARITH | (0x1 << 12) | (0x01 << 25),
+ OPC_RISC_MULHSU = OPC_RISC_ARITH | (0x2 << 12) | (0x01 << 25),
+ OPC_RISC_MULHU = OPC_RISC_ARITH | (0x3 << 12) | (0x01 << 25),
+
+ OPC_RISC_DIV = OPC_RISC_ARITH | (0x4 << 12) | (0x01 << 25),
+ OPC_RISC_DIVU = OPC_RISC_ARITH | (0x5 << 12) | (0x01 << 25),
+ OPC_RISC_REM = OPC_RISC_ARITH | (0x6 << 12) | (0x01 << 25),
+ OPC_RISC_REMU = OPC_RISC_ARITH | (0x7 << 12) | (0x01 << 25),
+};
+
+
+#define MASK_OP_ARITH_IMM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_ADDI = OPC_RISC_ARITH_IMM | (0x0 << 12),
+ OPC_RISC_SLTI = OPC_RISC_ARITH_IMM | (0x2 << 12),
+ OPC_RISC_SLTIU = OPC_RISC_ARITH_IMM | (0x3 << 12),
+ OPC_RISC_XORI = OPC_RISC_ARITH_IMM | (0x4 << 12),
+ OPC_RISC_ORI = OPC_RISC_ARITH_IMM | (0x6 << 12),
+ OPC_RISC_ANDI = OPC_RISC_ARITH_IMM | (0x7 << 12),
+ OPC_RISC_SLLI = OPC_RISC_ARITH_IMM | (0x1 << 12), /* additional part of
+ IMM */
+ OPC_RISC_SHIFT_RIGHT_I = OPC_RISC_ARITH_IMM | (0x5 << 12) /* SRAI, SRLI */
+};
+
+#define MASK_OP_BRANCH(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_BEQ = OPC_RISC_BRANCH | (0x0 << 12),
+ OPC_RISC_BNE = OPC_RISC_BRANCH | (0x1 << 12),
+ OPC_RISC_BLT = OPC_RISC_BRANCH | (0x4 << 12),
+ OPC_RISC_BGE = OPC_RISC_BRANCH | (0x5 << 12),
+ OPC_RISC_BLTU = OPC_RISC_BRANCH | (0x6 << 12),
+ OPC_RISC_BGEU = OPC_RISC_BRANCH | (0x7 << 12)
+};
+
+enum {
+ OPC_RISC_ADDIW = OPC_RISC_ARITH_IMM_W | (0x0 << 12),
+ OPC_RISC_SLLIW = OPC_RISC_ARITH_IMM_W | (0x1 << 12), /* additional part of
+ IMM */
+ OPC_RISC_SHIFT_RIGHT_IW = OPC_RISC_ARITH_IMM_W | (0x5 << 12) /* SRAI, SRLI
+ */
+};
+
+enum {
+ OPC_RISC_ADDW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x00 << 25),
+ OPC_RISC_SUBW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x20 << 25),
+ OPC_RISC_SLLW = OPC_RISC_ARITH_W | (0x1 << 12) | (0x00 << 25),
+ OPC_RISC_SRLW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x00 << 25),
+ OPC_RISC_SRAW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x20 << 25),
+
+ /* RV64M */
+ OPC_RISC_MULW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x01 << 25),
+ OPC_RISC_DIVW = OPC_RISC_ARITH_W | (0x4 << 12) | (0x01 << 25),
+ OPC_RISC_DIVUW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x01 << 25),
+ OPC_RISC_REMW = OPC_RISC_ARITH_W | (0x6 << 12) | (0x01 << 25),
+ OPC_RISC_REMUW = OPC_RISC_ARITH_W | (0x7 << 12) | (0x01 << 25),
+};
+
+#define MASK_OP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_LB = OPC_RISC_LOAD | (0x0 << 12),
+ OPC_RISC_LH = OPC_RISC_LOAD | (0x1 << 12),
+ OPC_RISC_LW = OPC_RISC_LOAD | (0x2 << 12),
+ OPC_RISC_LD = OPC_RISC_LOAD | (0x3 << 12),
+ OPC_RISC_LBU = OPC_RISC_LOAD | (0x4 << 12),
+ OPC_RISC_LHU = OPC_RISC_LOAD | (0x5 << 12),
+ OPC_RISC_LWU = OPC_RISC_LOAD | (0x6 << 12),
+};
+
+#define MASK_OP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_SB = OPC_RISC_STORE | (0x0 << 12),
+ OPC_RISC_SH = OPC_RISC_STORE | (0x1 << 12),
+ OPC_RISC_SW = OPC_RISC_STORE | (0x2 << 12),
+ OPC_RISC_SD = OPC_RISC_STORE | (0x3 << 12),
+};
+
+#define MASK_OP_JALR(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+/* no enum since OPC_RISC_JALR is the actual value */
+
+#define MASK_OP_ATOMIC(op) \
+ (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F << 25))))
+#define MASK_OP_ATOMIC_NO_AQ_RL_SZ(op) \
+ (MASK_OP_MAJOR(op) | (op & (0x1F << 27)))
+
+enum {
+ OPC_RISC_LR = OPC_RISC_ATOMIC | (0x02 << 27),
+ OPC_RISC_SC = OPC_RISC_ATOMIC | (0x03 << 27),
+ OPC_RISC_AMOSWAP = OPC_RISC_ATOMIC | (0x01 << 27),
+ OPC_RISC_AMOADD = OPC_RISC_ATOMIC | (0x00 << 27),
+ OPC_RISC_AMOXOR = OPC_RISC_ATOMIC | (0x04 << 27),
+ OPC_RISC_AMOAND = OPC_RISC_ATOMIC | (0x0C << 27),
+ OPC_RISC_AMOOR = OPC_RISC_ATOMIC | (0x08 << 27),
+ OPC_RISC_AMOMIN = OPC_RISC_ATOMIC | (0x10 << 27),
+ OPC_RISC_AMOMAX = OPC_RISC_ATOMIC | (0x14 << 27),
+ OPC_RISC_AMOMINU = OPC_RISC_ATOMIC | (0x18 << 27),
+ OPC_RISC_AMOMAXU = OPC_RISC_ATOMIC | (0x1C << 27),
+};
+
+#define MASK_OP_SYSTEM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_ECALL = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_EBREAK = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_ERET = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_MRTS = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_MRTH = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_HRTS = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_WFI = OPC_RISC_SYSTEM | (0x0 << 12),
+ OPC_RISC_SFENCEVM = OPC_RISC_SYSTEM | (0x0 << 12),
+
+ OPC_RISC_CSRRW = OPC_RISC_SYSTEM | (0x1 << 12),
+ OPC_RISC_CSRRS = OPC_RISC_SYSTEM | (0x2 << 12),
+ OPC_RISC_CSRRC = OPC_RISC_SYSTEM | (0x3 << 12),
+ OPC_RISC_CSRRWI = OPC_RISC_SYSTEM | (0x5 << 12),
+ OPC_RISC_CSRRSI = OPC_RISC_SYSTEM | (0x6 << 12),
+ OPC_RISC_CSRRCI = OPC_RISC_SYSTEM | (0x7 << 12),
+};
+
+#define MASK_OP_FP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_FLW = OPC_RISC_FP_LOAD | (0x2 << 12),
+ OPC_RISC_FLD = OPC_RISC_FP_LOAD | (0x3 << 12),
+};
+
+#define MASK_OP_FP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
+enum {
+ OPC_RISC_FSW = OPC_RISC_FP_STORE | (0x2 << 12),
+ OPC_RISC_FSD = OPC_RISC_FP_STORE | (0x3 << 12),
+};
+
+#define MASK_OP_FP_FMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+ OPC_RISC_FMADD_S = OPC_RISC_FMADD | (0x0 << 25),
+ OPC_RISC_FMADD_D = OPC_RISC_FMADD | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+ OPC_RISC_FMSUB_S = OPC_RISC_FMSUB | (0x0 << 25),
+ OPC_RISC_FMSUB_D = OPC_RISC_FMSUB | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FNMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+ OPC_RISC_FNMADD_S = OPC_RISC_FNMADD | (0x0 << 25),
+ OPC_RISC_FNMADD_D = OPC_RISC_FNMADD | (0x1 << 25),
+};
+
+#define MASK_OP_FP_FNMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
+enum {
+ OPC_RISC_FNMSUB_S = OPC_RISC_FNMSUB | (0x0 << 25),
+ OPC_RISC_FNMSUB_D = OPC_RISC_FNMSUB | (0x1 << 25),
+};
+
+#define MASK_OP_FP_ARITH(op) (MASK_OP_MAJOR(op) | (op & (0x7F << 25)))
+enum {
+ /* float */
+ OPC_RISC_FADD_S = OPC_RISC_FP_ARITH | (0x0 << 25),
+ OPC_RISC_FSUB_S = OPC_RISC_FP_ARITH | (0x4 << 25),
+ OPC_RISC_FMUL_S = OPC_RISC_FP_ARITH | (0x8 << 25),
+ OPC_RISC_FDIV_S = OPC_RISC_FP_ARITH | (0xC << 25),
+
+ OPC_RISC_FSGNJ_S = OPC_RISC_FP_ARITH | (0x10 << 25),
+ OPC_RISC_FSGNJN_S = OPC_RISC_FP_ARITH | (0x10 << 25),
+ OPC_RISC_FSGNJX_S = OPC_RISC_FP_ARITH | (0x10 << 25),
+
+ OPC_RISC_FMIN_S = OPC_RISC_FP_ARITH | (0x14 << 25),
+ OPC_RISC_FMAX_S = OPC_RISC_FP_ARITH | (0x14 << 25),
+
+ OPC_RISC_FSQRT_S = OPC_RISC_FP_ARITH | (0x2C << 25),
+
+ OPC_RISC_FEQ_S = OPC_RISC_FP_ARITH | (0x50 << 25),
+ OPC_RISC_FLT_S = OPC_RISC_FP_ARITH | (0x50 << 25),
+ OPC_RISC_FLE_S = OPC_RISC_FP_ARITH | (0x50 << 25),
+
+ OPC_RISC_FCVT_W_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+ OPC_RISC_FCVT_WU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+ OPC_RISC_FCVT_L_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+ OPC_RISC_FCVT_LU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
+
+ OPC_RISC_FCVT_S_W = OPC_RISC_FP_ARITH | (0x68 << 25),
+ OPC_RISC_FCVT_S_WU = OPC_RISC_FP_ARITH | (0x68 << 25),
+ OPC_RISC_FCVT_S_L = OPC_RISC_FP_ARITH | (0x68 << 25),
+ OPC_RISC_FCVT_S_LU = OPC_RISC_FP_ARITH | (0x68 << 25),
+
+ OPC_RISC_FMV_X_S = OPC_RISC_FP_ARITH | (0x70 << 25),
+ OPC_RISC_FCLASS_S = OPC_RISC_FP_ARITH | (0x70 << 25),
+
+ OPC_RISC_FMV_S_X = OPC_RISC_FP_ARITH | (0x78 << 25),
+
+ /* double */
+ OPC_RISC_FADD_D = OPC_RISC_FP_ARITH | (0x1 << 25),
+ OPC_RISC_FSUB_D = OPC_RISC_FP_ARITH | (0x5 << 25),
+ OPC_RISC_FMUL_D = OPC_RISC_FP_ARITH | (0x9 << 25),
+ OPC_RISC_FDIV_D = OPC_RISC_FP_ARITH | (0xD << 25),
+
+ OPC_RISC_FSGNJ_D = OPC_RISC_FP_ARITH | (0x11 << 25),
+ OPC_RISC_FSGNJN_D = OPC_RISC_FP_ARITH | (0x11 << 25),
+ OPC_RISC_FSGNJX_D = OPC_RISC_FP_ARITH | (0x11 << 25),
+
+ OPC_RISC_FMIN_D = OPC_RISC_FP_ARITH | (0x15 << 25),
+ OPC_RISC_FMAX_D = OPC_RISC_FP_ARITH | (0x15 << 25),
+
+ OPC_RISC_FCVT_S_D = OPC_RISC_FP_ARITH | (0x20 << 25),
+
+ OPC_RISC_FCVT_D_S = OPC_RISC_FP_ARITH | (0x21 << 25),
+
+ OPC_RISC_FSQRT_D = OPC_RISC_FP_ARITH | (0x2D << 25),
+
+ OPC_RISC_FEQ_D = OPC_RISC_FP_ARITH | (0x51 << 25),
+ OPC_RISC_FLT_D = OPC_RISC_FP_ARITH | (0x51 << 25),
+ OPC_RISC_FLE_D = OPC_RISC_FP_ARITH | (0x51 << 25),
+
+ OPC_RISC_FCVT_W_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+ OPC_RISC_FCVT_WU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+ OPC_RISC_FCVT_L_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+ OPC_RISC_FCVT_LU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
+
+ OPC_RISC_FCVT_D_W = OPC_RISC_FP_ARITH | (0x69 << 25),
+ OPC_RISC_FCVT_D_WU = OPC_RISC_FP_ARITH | (0x69 << 25),
+ OPC_RISC_FCVT_D_L = OPC_RISC_FP_ARITH | (0x69 << 25),
+ OPC_RISC_FCVT_D_LU = OPC_RISC_FP_ARITH | (0x69 << 25),
+
+ OPC_RISC_FMV_X_D = OPC_RISC_FP_ARITH | (0x71 << 25),
+ OPC_RISC_FCLASS_D = OPC_RISC_FP_ARITH | (0x71 << 25),
+
+ OPC_RISC_FMV_D_X = OPC_RISC_FP_ARITH | (0x79 << 25),
+};
+
+#define GET_B_IMM(inst) ((extract32(inst, 8, 4) << 1) \
+ | (extract32(inst, 25, 6) << 5) \
+ | (extract32(inst, 7, 1) << 11) \
+ | (sextract64(inst, 31, 1) << 12))
+
+#define GET_STORE_IMM(inst) ((extract32(inst, 7, 5)) \
+ | (sextract64(inst, 25, 7) << 5))
+
+#define GET_JAL_IMM(inst) ((extract32(inst, 21, 10) << 1) \
+ | (extract32(inst, 20, 1) << 11) \
+ | (extract32(inst, 12, 8) << 12) \
+ | (sextract64(inst, 31, 1) << 20))
+
+#define GET_RM(inst) extract32(inst, 12, 3)
+#define GET_RS3(inst) extract32(inst, 27, 5)
+#define GET_RS1(inst) extract32(inst, 15, 5)
+#define GET_RS2(inst) extract32(inst, 20, 5)
+#define GET_RD(inst) extract32(inst, 7, 5)
+#define GET_IMM(inst) sextract64(inst, 20, 12)
+
+/* RVC decoding macros */
+#define GET_C_IMM(inst) (extract32(inst, 2, 5) \
+ | (sextract64(inst, 12, 1) << 5))
+#define GET_C_ZIMM(inst) (extract32(inst, 2, 5) \
+ | (extract32(inst, 12, 1) << 5))
+#define GET_C_ADDI4SPN_IMM(inst) ((extract32(inst, 6, 1) << 2) \
+ | (extract32(inst, 5, 1) << 3) \
+ | (extract32(inst, 11, 2) << 4) \
+ | (extract32(inst, 7, 4) << 6))
+#define GET_C_ADDI16SP_IMM(inst) ((extract32(inst, 6, 1) << 4) \
+ | (extract32(inst, 2, 1) << 5) \
+ | (extract32(inst, 5, 1) << 6) \
+ | (extract32(inst, 3, 2) << 7) \
+ | (sextract64(inst, 12, 1) << 9))
+#define GET_C_LWSP_IMM(inst) ((extract32(inst, 4, 3) << 2) \
+ | (extract32(inst, 12, 1) << 5) \
+ | (extract32(inst, 2, 2) << 6))
+#define GET_C_LDSP_IMM(inst) ((extract32(inst, 5, 2) << 3) \
+ | (extract32(inst, 12, 1) << 5) \
+ | (extract32(inst, 2, 3) << 6))
+#define GET_C_SWSP_IMM(inst) ((extract32(inst, 9, 4) << 2) \
+ | (extract32(inst, 7, 2) << 6))
+#define GET_C_SDSP_IMM(inst) ((extract32(inst, 10, 3) << 3) \
+ | (extract32(inst, 7, 3) << 6))
+#define GET_C_LW_IMM(inst) ((extract32(inst, 6, 1) << 2) \
+ | (extract32(inst, 10, 3) << 3) \
+ | (extract32(inst, 5, 1) << 6))
+#define GET_C_LD_IMM(inst) ((extract16(inst, 10, 3) << 3) \
+ | (extract16(inst, 5, 2) << 6))
+#define GET_C_J_IMM(inst) ((extract32(inst, 3, 3) << 1) \
+ | (extract32(inst, 11, 1) << 4) \
+ | (extract32(inst, 2, 1) << 5) \
+ | (extract32(inst, 7, 1) << 6) \
+ | (extract32(inst, 6, 1) << 7) \
+ | (extract32(inst, 9, 2) << 8) \
+ | (extract32(inst, 8, 1) << 10) \
+ | (sextract64(inst, 12, 1) << 11))
+#define GET_C_B_IMM(inst) ((extract32(inst, 3, 2) << 1) \
+ | (extract32(inst, 10, 2) << 3) \
+ | (extract32(inst, 2, 1) << 5) \
+ | (extract32(inst, 5, 2) << 6) \
+ | (sextract64(inst, 12, 1) << 8))
+#define GET_C_SIMM3(inst) extract32(inst, 10, 3)
+#define GET_C_RD(inst) GET_RD(inst)
+#define GET_C_RS1(inst) GET_RD(inst)
+#define GET_C_RS2(inst) extract32(inst, 2, 5)
+#define GET_C_RS1S(inst) (8 + extract16(inst, 7, 3))
+#define GET_C_RS2S(inst) (8 + extract16(inst, 2, 3))
+
+#endif
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
new file mode 100644
index 000000000..b15ad394b
--- /dev/null
+++ b/target/riscv/internals.h
@@ -0,0 +1,61 @@
+/*
+ * QEMU RISC-V CPU -- internal functions and types
+ *
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_CPU_INTERNALS_H
+#define RISCV_CPU_INTERNALS_H
+
+#include "hw/registerfields.h"
+
+/* share data between vector helpers and decode code */
+FIELD(VDATA, MLEN, 0, 8)
+FIELD(VDATA, VM, 8, 1)
+FIELD(VDATA, LMUL, 9, 2)
+FIELD(VDATA, NF, 11, 4)
+FIELD(VDATA, WD, 11, 1)
+
+/* float point classify helpers */
+target_ulong fclass_h(uint64_t frs1);
+target_ulong fclass_s(uint64_t frs1);
+target_ulong fclass_d(uint64_t frs1);
+
+#define SEW8 0
+#define SEW16 1
+#define SEW32 2
+#define SEW64 3
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_riscv_cpu;
+#endif
+
+static inline uint64_t nanbox_s(float32 f)
+{
+ return f | MAKE_64BIT_MASK(32, 32);
+}
+
+static inline float32 check_nanbox_s(uint64_t f)
+{
+ uint64_t mask = MAKE_64BIT_MASK(32, 32);
+
+ if (likely((f & mask) == mask)) {
+ return (uint32_t)f;
+ } else {
+ return 0x7fc00000u; /* default qnan */
+ }
+}
+
+#endif
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
new file mode 100644
index 000000000..ad8248ebf
--- /dev/null
+++ b/target/riscv/machine.c
@@ -0,0 +1,223 @@
+/*
+ * RISC-V VMState Description
+ *
+ * Copyright (c) 2020 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "migration/cpu.h"
+
+static bool pmp_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+
+ return riscv_feature(env, RISCV_FEATURE_PMP);
+}
+
+static int pmp_post_load(void *opaque, int version_id)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ pmp_update_rule_addr(env, i);
+ }
+ pmp_update_rule_nums(env);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_pmp_entry = {
+ .name = "cpu/pmp/entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL(addr_reg, pmp_entry_t),
+ VMSTATE_UINT8(cfg_reg, pmp_entry_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_pmp = {
+ .name = "cpu/pmp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmp_needed,
+ .post_load = pmp_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS,
+ 0, vmstate_pmp_entry, pmp_entry_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyper_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+
+ return riscv_has_ext(env, RVH);
+}
+
+static const VMStateDescription vmstate_hyper = {
+ .name = "cpu/hyper",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyper_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL(env.hstatus, RISCVCPU),
+ VMSTATE_UINTTL(env.hedeleg, RISCVCPU),
+ VMSTATE_UINTTL(env.hideleg, RISCVCPU),
+ VMSTATE_UINTTL(env.hcounteren, RISCVCPU),
+ VMSTATE_UINTTL(env.htval, RISCVCPU),
+ VMSTATE_UINTTL(env.htinst, RISCVCPU),
+ VMSTATE_UINTTL(env.hgatp, RISCVCPU),
+ VMSTATE_UINT64(env.htimedelta, RISCVCPU),
+
+ VMSTATE_UINT64(env.vsstatus, RISCVCPU),
+ VMSTATE_UINTTL(env.vstvec, RISCVCPU),
+ VMSTATE_UINTTL(env.vsscratch, RISCVCPU),
+ VMSTATE_UINTTL(env.vsepc, RISCVCPU),
+ VMSTATE_UINTTL(env.vscause, RISCVCPU),
+ VMSTATE_UINTTL(env.vstval, RISCVCPU),
+ VMSTATE_UINTTL(env.vsatp, RISCVCPU),
+
+ VMSTATE_UINTTL(env.mtval2, RISCVCPU),
+ VMSTATE_UINTTL(env.mtinst, RISCVCPU),
+
+ VMSTATE_UINTTL(env.stvec_hs, RISCVCPU),
+ VMSTATE_UINTTL(env.sscratch_hs, RISCVCPU),
+ VMSTATE_UINTTL(env.sepc_hs, RISCVCPU),
+ VMSTATE_UINTTL(env.scause_hs, RISCVCPU),
+ VMSTATE_UINTTL(env.stval_hs, RISCVCPU),
+ VMSTATE_UINTTL(env.satp_hs, RISCVCPU),
+ VMSTATE_UINT64(env.mstatus_hs, RISCVCPU),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vector_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+
+ return riscv_has_ext(env, RVV);
+}
+
+static const VMStateDescription vmstate_vector = {
+ .name = "cpu/vector",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vector_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
+ VMSTATE_UINTTL(env.vxrm, RISCVCPU),
+ VMSTATE_UINTTL(env.vxsat, RISCVCPU),
+ VMSTATE_UINTTL(env.vl, RISCVCPU),
+ VMSTATE_UINTTL(env.vstart, RISCVCPU),
+ VMSTATE_UINTTL(env.vtype, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pointermasking_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+
+ return riscv_has_ext(env, RVJ);
+}
+
+static const VMStateDescription vmstate_pointermasking = {
+ .name = "cpu/pointer_masking",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pointermasking_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL(env.mmte, RISCVCPU),
+ VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
+ VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
+ VMSTATE_UINTTL(env.spmmask, RISCVCPU),
+ VMSTATE_UINTTL(env.spmbase, RISCVCPU),
+ VMSTATE_UINTTL(env.upmmask, RISCVCPU),
+ VMSTATE_UINTTL(env.upmbase, RISCVCPU),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_riscv_cpu = {
+ .name = "cpu",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
+ VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
+ VMSTATE_UINTTL(env.pc, RISCVCPU),
+ VMSTATE_UINTTL(env.load_res, RISCVCPU),
+ VMSTATE_UINTTL(env.load_val, RISCVCPU),
+ VMSTATE_UINTTL(env.frm, RISCVCPU),
+ VMSTATE_UINTTL(env.badaddr, RISCVCPU),
+ VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU),
+ VMSTATE_UINTTL(env.priv_ver, RISCVCPU),
+ VMSTATE_UINTTL(env.vext_ver, RISCVCPU),
+ VMSTATE_UINT32(env.misa_mxl, RISCVCPU),
+ VMSTATE_UINT32(env.misa_ext, RISCVCPU),
+ VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU),
+ VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU),
+ VMSTATE_UINT32(env.features, RISCVCPU),
+ VMSTATE_UINTTL(env.priv, RISCVCPU),
+ VMSTATE_UINTTL(env.virt, RISCVCPU),
+ VMSTATE_UINTTL(env.resetvec, RISCVCPU),
+ VMSTATE_UINTTL(env.mhartid, RISCVCPU),
+ VMSTATE_UINT64(env.mstatus, RISCVCPU),
+ VMSTATE_UINTTL(env.mip, RISCVCPU),
+ VMSTATE_UINT32(env.miclaim, RISCVCPU),
+ VMSTATE_UINTTL(env.mie, RISCVCPU),
+ VMSTATE_UINTTL(env.mideleg, RISCVCPU),
+ VMSTATE_UINTTL(env.satp, RISCVCPU),
+ VMSTATE_UINTTL(env.stval, RISCVCPU),
+ VMSTATE_UINTTL(env.medeleg, RISCVCPU),
+ VMSTATE_UINTTL(env.stvec, RISCVCPU),
+ VMSTATE_UINTTL(env.sepc, RISCVCPU),
+ VMSTATE_UINTTL(env.scause, RISCVCPU),
+ VMSTATE_UINTTL(env.mtvec, RISCVCPU),
+ VMSTATE_UINTTL(env.mepc, RISCVCPU),
+ VMSTATE_UINTTL(env.mcause, RISCVCPU),
+ VMSTATE_UINTTL(env.mtval, RISCVCPU),
+ VMSTATE_UINTTL(env.scounteren, RISCVCPU),
+ VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
+ VMSTATE_UINTTL(env.sscratch, RISCVCPU),
+ VMSTATE_UINTTL(env.mscratch, RISCVCPU),
+ VMSTATE_UINT64(env.mfromhost, RISCVCPU),
+ VMSTATE_UINT64(env.mtohost, RISCVCPU),
+ VMSTATE_UINT64(env.timecmp, RISCVCPU),
+
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_pmp,
+ &vmstate_hyper,
+ &vmstate_vector,
+ &vmstate_pointermasking,
+ NULL
+ }
+};
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
new file mode 100644
index 000000000..d5e0bc93e
--- /dev/null
+++ b/target/riscv/meson.build
@@ -0,0 +1,32 @@
+# FIXME extra_args should accept files()
+dir = meson.current_source_dir()
+
+gen = [
+ decodetree.process('insn16.decode', extra_args: ['--static-decode=decode_insn16', '--insnwidth=16']),
+ decodetree.process('insn32.decode', extra_args: '--static-decode=decode_insn32'),
+]
+
+riscv_ss = ss.source_set()
+riscv_ss.add(gen)
+riscv_ss.add(files(
+ 'cpu.c',
+ 'cpu_helper.c',
+ 'csr.c',
+ 'fpu_helper.c',
+ 'gdbstub.c',
+ 'op_helper.c',
+ 'vector_helper.c',
+ 'bitmanip_helper.c',
+ 'translate.c',
+))
+
+riscv_softmmu_ss = ss.source_set()
+riscv_softmmu_ss.add(files(
+ 'arch_dump.c',
+ 'pmp.c',
+ 'monitor.c',
+ 'machine.c'
+))
+
+target_arch += {'riscv': riscv_ss}
+target_softmmu_arch += {'riscv': riscv_softmmu_ss}
diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c
new file mode 100644
index 000000000..7efb4b62c
--- /dev/null
+++ b/target/riscv/monitor.c
@@ -0,0 +1,236 @@
+/*
+ * QEMU monitor for RISC-V
+ *
+ * Copyright (c) 2019 Bin Meng <bmeng.cn@gmail.com>
+ *
+ * RISC-V specific monitor commands implementation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu_bits.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+
+#ifdef TARGET_RISCV64
+#define PTE_HEADER_FIELDS "vaddr paddr "\
+ "size attr\n"
+#define PTE_HEADER_DELIMITER "---------------- ---------------- "\
+ "---------------- -------\n"
+#else
+#define PTE_HEADER_FIELDS "vaddr paddr size attr\n"
+#define PTE_HEADER_DELIMITER "-------- ---------------- -------- -------\n"
+#endif
+
+/* Perform linear address sign extension */
+static target_ulong addr_canonical(int va_bits, target_ulong addr)
+{
+#ifdef TARGET_RISCV64
+ if (addr & (1UL << (va_bits - 1))) {
+ addr |= (hwaddr)-(1L << va_bits);
+ }
+#endif
+
+ return addr;
+}
+
+static void print_pte_header(Monitor *mon)
+{
+ monitor_printf(mon, PTE_HEADER_FIELDS);
+ monitor_printf(mon, PTE_HEADER_DELIMITER);
+}
+
+static void print_pte(Monitor *mon, int va_bits, target_ulong vaddr,
+ hwaddr paddr, target_ulong size, int attr)
+{
+ /* santity check on vaddr */
+ if (vaddr >= (1UL << va_bits)) {
+ return;
+ }
+
+ if (!size) {
+ return;
+ }
+
+ monitor_printf(mon, TARGET_FMT_lx " " TARGET_FMT_plx " " TARGET_FMT_lx
+ " %c%c%c%c%c%c%c\n",
+ addr_canonical(va_bits, vaddr),
+ paddr, size,
+ attr & PTE_R ? 'r' : '-',
+ attr & PTE_W ? 'w' : '-',
+ attr & PTE_X ? 'x' : '-',
+ attr & PTE_U ? 'u' : '-',
+ attr & PTE_G ? 'g' : '-',
+ attr & PTE_A ? 'a' : '-',
+ attr & PTE_D ? 'd' : '-');
+}
+
+static void walk_pte(Monitor *mon, hwaddr base, target_ulong start,
+ int level, int ptidxbits, int ptesize, int va_bits,
+ target_ulong *vbase, hwaddr *pbase, hwaddr *last_paddr,
+ target_ulong *last_size, int *last_attr)
+{
+ hwaddr pte_addr;
+ hwaddr paddr;
+ target_ulong pgsize;
+ target_ulong pte;
+ int ptshift;
+ int attr;
+ int idx;
+
+ if (level < 0) {
+ return;
+ }
+
+ ptshift = level * ptidxbits;
+ pgsize = 1UL << (PGSHIFT + ptshift);
+
+ for (idx = 0; idx < (1UL << ptidxbits); idx++) {
+ pte_addr = base + idx * ptesize;
+ cpu_physical_memory_read(pte_addr, &pte, ptesize);
+
+ paddr = (hwaddr)(pte >> PTE_PPN_SHIFT) << PGSHIFT;
+ attr = pte & 0xff;
+
+ /* PTE has to be valid */
+ if (attr & PTE_V) {
+ if (attr & (PTE_R | PTE_W | PTE_X)) {
+ /*
+ * A leaf PTE has been found
+ *
+ * If current PTE's permission bits differ from the last one,
+ * or current PTE's ppn does not make a contiguous physical
+ * address block together with the last one, print out the last
+ * contiguous mapped block details.
+ */
+ if ((*last_attr != attr) ||
+ (*last_paddr + *last_size != paddr)) {
+ print_pte(mon, va_bits, *vbase, *pbase,
+ *last_paddr + *last_size - *pbase, *last_attr);
+
+ *vbase = start;
+ *pbase = paddr;
+ *last_attr = attr;
+ }
+
+ *last_paddr = paddr;
+ *last_size = pgsize;
+ } else {
+ /* pointer to the next level of the page table */
+ walk_pte(mon, paddr, start, level - 1, ptidxbits, ptesize,
+ va_bits, vbase, pbase, last_paddr,
+ last_size, last_attr);
+ }
+ }
+
+ start += pgsize;
+ }
+
+}
+
+static void mem_info_svxx(Monitor *mon, CPUArchState *env)
+{
+ int levels, ptidxbits, ptesize, vm, va_bits;
+ hwaddr base;
+ target_ulong vbase;
+ hwaddr pbase;
+ hwaddr last_paddr;
+ target_ulong last_size;
+ int last_attr;
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
+ vm = get_field(env->satp, SATP32_MODE);
+ } else {
+ base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
+ vm = get_field(env->satp, SATP64_MODE);
+ }
+
+ switch (vm) {
+ case VM_1_10_SV32:
+ levels = 2;
+ ptidxbits = 10;
+ ptesize = 4;
+ break;
+ case VM_1_10_SV39:
+ levels = 3;
+ ptidxbits = 9;
+ ptesize = 8;
+ break;
+ case VM_1_10_SV48:
+ levels = 4;
+ ptidxbits = 9;
+ ptesize = 8;
+ break;
+ case VM_1_10_SV57:
+ levels = 5;
+ ptidxbits = 9;
+ ptesize = 8;
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+
+ /* calculate virtual address bits */
+ va_bits = PGSHIFT + levels * ptidxbits;
+
+ /* print header */
+ print_pte_header(mon);
+
+ vbase = -1;
+ pbase = -1;
+ last_paddr = -1;
+ last_size = 0;
+ last_attr = 0;
+
+ /* walk page tables, starting from address 0 */
+ walk_pte(mon, base, 0, levels - 1, ptidxbits, ptesize, va_bits,
+ &vbase, &pbase, &last_paddr, &last_size, &last_attr);
+
+ /* don't forget the last one */
+ print_pte(mon, va_bits, vbase, pbase,
+ last_paddr + last_size - pbase, last_attr);
+}
+
+void hmp_info_mem(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env(mon);
+ if (!env) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+
+ if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
+ monitor_printf(mon, "S-mode MMU unavailable\n");
+ return;
+ }
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ if (!(env->satp & SATP32_MODE)) {
+ monitor_printf(mon, "No translation or protection\n");
+ return;
+ }
+ } else {
+ if (!(env->satp & SATP64_MODE)) {
+ monitor_printf(mon, "No translation or protection\n");
+ return;
+ }
+ }
+
+ mem_info_svxx(mon, env);
+}
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
new file mode 100644
index 000000000..ee7c24efe
--- /dev/null
+++ b/target/riscv/op_helper.c
@@ -0,0 +1,249 @@
+/*
+ * RISC-V Emulation Helpers for QEMU.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/main-loop.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+/* Exceptions processing helpers */
+void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
+ uint32_t exception, uintptr_t pc)
+{
+ CPUState *cs = env_cpu(env);
+ cs->exception_index = exception;
+ cpu_loop_exit_restore(cs, pc);
+}
+
+void helper_raise_exception(CPURISCVState *env, uint32_t exception)
+{
+ riscv_raise_exception(env, exception, 0);
+}
+
+target_ulong helper_csrr(CPURISCVState *env, int csr)
+{
+ target_ulong val = 0;
+ RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0);
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+ return val;
+}
+
+void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
+{
+ RISCVException ret = riscv_csrrw(env, csr, NULL, src, -1);
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+}
+
+target_ulong helper_csrrw(CPURISCVState *env, int csr,
+ target_ulong src, target_ulong write_mask)
+{
+ target_ulong val = 0;
+ RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask);
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+ return val;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
+{
+ uint64_t mstatus;
+ target_ulong prev_priv, prev_virt;
+
+ if (!(env->priv >= PRV_S)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ target_ulong retpc = env->sepc;
+ if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
+ }
+
+ if (get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) &&
+ get_field(env->hstatus, HSTATUS_VTSR)) {
+ riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
+ }
+
+ mstatus = env->mstatus;
+
+ if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) {
+ /* We support Hypervisor extensions and virtulisation is disabled */
+ target_ulong hstatus = env->hstatus;
+
+ prev_priv = get_field(mstatus, MSTATUS_SPP);
+ prev_virt = get_field(hstatus, HSTATUS_SPV);
+
+ hstatus = set_field(hstatus, HSTATUS_SPV, 0);
+ mstatus = set_field(mstatus, MSTATUS_SPP, 0);
+ mstatus = set_field(mstatus, SSTATUS_SIE,
+ get_field(mstatus, SSTATUS_SPIE));
+ mstatus = set_field(mstatus, SSTATUS_SPIE, 1);
+
+ env->mstatus = mstatus;
+ env->hstatus = hstatus;
+
+ if (prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_virt_enabled(env, prev_virt);
+ } else {
+ prev_priv = get_field(mstatus, MSTATUS_SPP);
+
+ mstatus = set_field(mstatus, MSTATUS_SIE,
+ get_field(mstatus, MSTATUS_SPIE));
+ mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
+ mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+ env->mstatus = mstatus;
+ }
+
+ riscv_cpu_set_mode(env, prev_priv);
+
+ return retpc;
+}
+
+target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
+{
+ if (!(env->priv >= PRV_M)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ target_ulong retpc = env->mepc;
+ if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
+ riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
+ }
+
+ uint64_t mstatus = env->mstatus;
+ target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+
+ if (!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV);
+ mstatus = set_field(mstatus, MSTATUS_MIE,
+ get_field(mstatus, MSTATUS_MPIE));
+ mstatus = set_field(mstatus, MSTATUS_MPIE, 1);
+ mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U);
+ mstatus = set_field(mstatus, MSTATUS_MPV, 0);
+ env->mstatus = mstatus;
+ riscv_cpu_set_mode(env, prev_priv);
+
+ if (riscv_has_ext(env, RVH)) {
+ if (prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_virt_enabled(env, prev_virt);
+ }
+
+ return retpc;
+}
+
+void helper_wfi(CPURISCVState *env)
+{
+ CPUState *cs = env_cpu(env);
+ bool rvs = riscv_has_ext(env, RVS);
+ bool prv_u = env->priv == PRV_U;
+ bool prv_s = env->priv == PRV_S;
+
+ if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) ||
+ (rvs && prv_u && !riscv_cpu_virt_enabled(env))) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ } else if (riscv_cpu_virt_enabled(env) && (prv_u ||
+ (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) {
+ riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
+ } else {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+ }
+}
+
+void helper_tlb_flush(CPURISCVState *env)
+{
+ CPUState *cs = env_cpu(env);
+ if (!(env->priv >= PRV_S) ||
+ (env->priv == PRV_S &&
+ get_field(env->mstatus, MSTATUS_TVM))) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ } else if (riscv_has_ext(env, RVH) && riscv_cpu_virt_enabled(env) &&
+ get_field(env->hstatus, HSTATUS_VTVM)) {
+ riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
+ } else {
+ tlb_flush(cs);
+ }
+}
+
+void helper_hyp_tlb_flush(CPURISCVState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
+ riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
+ }
+
+ if (env->priv == PRV_M ||
+ (env->priv == PRV_S && !riscv_cpu_virt_enabled(env))) {
+ tlb_flush(cs);
+ return;
+ }
+
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+}
+
+void helper_hyp_gvma_tlb_flush(CPURISCVState *env)
+{
+ if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env) &&
+ get_field(env->mstatus, MSTATUS_TVM)) {
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ }
+
+ helper_hyp_tlb_flush(env);
+}
+
+target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong address)
+{
+ int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
+
+ return cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
+}
+
+target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong address)
+{
+ int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
+
+ return cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
new file mode 100644
index 000000000..54abf4258
--- /dev/null
+++ b/target/riscv/pmp.c
@@ -0,0 +1,664 @@
+/*
+ * QEMU RISC-V PMP (Physical Memory Protection)
+ *
+ * Author: Daire McNamara, daire.mcnamara@emdalo.com
+ * Ivan Griffin, ivan.griffin@emdalo.com
+ *
+ * This provides a RISC-V Physical Memory Protection implementation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "trace.h"
+#include "exec/exec-all.h"
+
+static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
+ uint8_t val);
+static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
+static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
+
+/*
+ * Accessor method to extract address matching type 'a field' from cfg reg
+ */
+static inline uint8_t pmp_get_a_field(uint8_t cfg)
+{
+ uint8_t a = cfg >> 3;
+ return a & 0x3;
+}
+
+/*
+ * Check whether a PMP is locked or not.
+ */
+static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
+{
+
+ if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
+ return 1;
+ }
+
+ /* Top PMP has no 'next' to check */
+ if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Count the number of active rules.
+ */
+uint32_t pmp_get_num_rules(CPURISCVState *env)
+{
+ return env->pmp_state.num_rules;
+}
+
+/*
+ * Accessor to get the cfg reg for a specific PMP/HART
+ */
+static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
+{
+ if (pmp_index < MAX_RISCV_PMPS) {
+ return env->pmp_state.pmp[pmp_index].cfg_reg;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Accessor to set the cfg reg for a specific PMP/HART
+ * Bounds checks and relevant lock bit.
+ */
+static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
+{
+ if (pmp_index < MAX_RISCV_PMPS) {
+ bool locked = true;
+
+ if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ /* mseccfg.RLB is set */
+ if (MSECCFG_RLB_ISSET(env)) {
+ locked = false;
+ }
+
+ /* mseccfg.MML is not set */
+ if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
+ locked = false;
+ }
+
+ /* mseccfg.MML is set */
+ if (MSECCFG_MML_ISSET(env)) {
+ /* not adding execute bit */
+ if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
+ locked = false;
+ }
+ /* shared region and not adding X bit */
+ if ((val & PMP_LOCK) != PMP_LOCK &&
+ (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
+ locked = false;
+ }
+ }
+ } else {
+ if (!pmp_is_locked(env, pmp_index)) {
+ locked = false;
+ }
+ }
+
+ if (locked) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
+ } else {
+ env->pmp_state.pmp[pmp_index].cfg_reg = val;
+ pmp_update_rule(env, pmp_index);
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - out of bounds\n");
+ }
+}
+
+static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
+{
+ /*
+ aaaa...aaa0 8-byte NAPOT range
+ aaaa...aa01 16-byte NAPOT range
+ aaaa...a011 32-byte NAPOT range
+ ...
+ aa01...1111 2^XLEN-byte NAPOT range
+ a011...1111 2^(XLEN+1)-byte NAPOT range
+ 0111...1111 2^(XLEN+2)-byte NAPOT range
+ 1111...1111 Reserved
+ */
+ if (a == -1) {
+ *sa = 0u;
+ *ea = -1;
+ return;
+ } else {
+ target_ulong t1 = ctz64(~a);
+ target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2;
+ target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
+ *sa = base;
+ *ea = base + range;
+ }
+}
+
+void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
+{
+ uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
+ target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
+ target_ulong prev_addr = 0u;
+ target_ulong sa = 0u;
+ target_ulong ea = 0u;
+
+ if (pmp_index >= 1u) {
+ prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
+ }
+
+ switch (pmp_get_a_field(this_cfg)) {
+ case PMP_AMATCH_OFF:
+ sa = 0u;
+ ea = -1;
+ break;
+
+ case PMP_AMATCH_TOR:
+ sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
+ ea = (this_addr << 2) - 1u;
+ break;
+
+ case PMP_AMATCH_NA4:
+ sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
+ ea = (sa + 4u) - 1u;
+ break;
+
+ case PMP_AMATCH_NAPOT:
+ pmp_decode_napot(this_addr, &sa, &ea);
+ break;
+
+ default:
+ sa = 0u;
+ ea = 0u;
+ break;
+ }
+
+ env->pmp_state.addr[pmp_index].sa = sa;
+ env->pmp_state.addr[pmp_index].ea = ea;
+}
+
+void pmp_update_rule_nums(CPURISCVState *env)
+{
+ int i;
+
+ env->pmp_state.num_rules = 0;
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ const uint8_t a_field =
+ pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
+ if (PMP_AMATCH_OFF != a_field) {
+ env->pmp_state.num_rules++;
+ }
+ }
+}
+
+/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
+ * end address values.
+ * This function is called relatively infrequently whereas the check that
+ * an address is within a pmp rule is called often, so optimise that one
+ */
+static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
+{
+ pmp_update_rule_addr(env, pmp_index);
+ pmp_update_rule_nums(env);
+}
+
+static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
+{
+ int result = 0;
+
+ if ((addr >= env->pmp_state.addr[pmp_index].sa)
+ && (addr <= env->pmp_state.addr[pmp_index].ea)) {
+ result = 1;
+ } else {
+ result = 0;
+ }
+
+ return result;
+}
+
+/*
+ * Check if the address has required RWX privs when no PMP entry is matched.
+ */
+static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
+ target_ulong mode)
+{
+ bool ret;
+
+ if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ if (MSECCFG_MMWP_ISSET(env)) {
+ /*
+ * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
+ * so we default to deny all, even for M-mode.
+ */
+ *allowed_privs = 0;
+ return false;
+ } else if (MSECCFG_MML_ISSET(env)) {
+ /*
+ * The Machine Mode Lockdown (mseccfg.MML) bit is set
+ * so we can only execute code in M-mode with an applicable
+ * rule. Other modes are disabled.
+ */
+ if (mode == PRV_M && !(privs & PMP_EXEC)) {
+ ret = true;
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ } else {
+ ret = false;
+ *allowed_privs = 0;
+ }
+
+ return ret;
+ }
+ }
+
+ if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
+ /*
+ * Privileged spec v1.10 states if HW doesn't implement any PMP entry
+ * or no PMP entry matches an M-Mode access, the access succeeds.
+ */
+ ret = true;
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ } else {
+ /*
+ * Other modes are not allowed to succeed if they don't * match a rule,
+ * but there are rules. We've checked for no rule earlier in this
+ * function.
+ */
+ ret = false;
+ *allowed_privs = 0;
+ }
+
+ return ret;
+}
+
+
+/*
+ * Public Interface
+ */
+
+/*
+ * Check if the address has required RWX privs to complete desired operation
+ */
+bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
+ target_ulong mode)
+{
+ int i = 0;
+ int ret = -1;
+ int pmp_size = 0;
+ target_ulong s = 0;
+ target_ulong e = 0;
+
+ /* Short cut if no rules */
+ if (0 == pmp_get_num_rules(env)) {
+ return pmp_hart_has_privs_default(env, addr, size, privs,
+ allowed_privs, mode);
+ }
+
+ if (size == 0) {
+ if (riscv_feature(env, RISCV_FEATURE_MMU)) {
+ /*
+ * If size is unknown (0), assume that all bytes
+ * from addr to the end of the page will be accessed.
+ */
+ pmp_size = -(addr | TARGET_PAGE_MASK);
+ } else {
+ pmp_size = sizeof(target_ulong);
+ }
+ } else {
+ pmp_size = size;
+ }
+
+ /* 1.10 draft priv spec states there is an implicit order
+ from low to high */
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ s = pmp_is_in_range(env, i, addr);
+ e = pmp_is_in_range(env, i, addr + pmp_size - 1);
+
+ /* partially inside */
+ if ((s + e) == 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pmp violation - access is partially inside\n");
+ ret = 0;
+ break;
+ }
+
+ /* fully inside */
+ const uint8_t a_field =
+ pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
+
+ /*
+ * Convert the PMP permissions to match the truth table in the
+ * ePMP spec.
+ */
+ const uint8_t epmp_operation =
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
+ (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
+
+ if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
+ /*
+ * If the PMP entry is not off and the address is in range,
+ * do the priv check
+ */
+ if (!MSECCFG_MML_ISSET(env)) {
+ /*
+ * If mseccfg.MML Bit is not set, do pmp priv check
+ * This will always apply to regular PMP.
+ */
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ if ((mode != PRV_M) || pmp_is_locked(env, i)) {
+ *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+ }
+ } else {
+ /*
+ * If mseccfg.MML Bit set, do the enhanced pmp priv check
+ */
+ if (mode == PRV_M) {
+ switch (epmp_operation) {
+ case 0:
+ case 1:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ *allowed_privs = 0;
+ break;
+ case 2:
+ case 3:
+ case 14:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 9:
+ case 10:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 11:
+ case 13:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 12:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (epmp_operation) {
+ case 0:
+ case 8:
+ case 9:
+ case 12:
+ case 13:
+ case 14:
+ *allowed_privs = 0;
+ break;
+ case 1:
+ case 10:
+ case 11:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 2:
+ case 4:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ case 3:
+ case 6:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 5:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 7:
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ }
+
+ ret = ((privs & *allowed_privs) == privs);
+ break;
+ }
+ }
+
+ /* No rule matched */
+ if (ret == -1) {
+ return pmp_hart_has_privs_default(env, addr, size, privs,
+ allowed_privs, mode);
+ }
+
+ return ret == 1 ? true : false;
+}
+
+/*
+ * Handle a write to a pmpcfg CSR
+ */
+void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
+ target_ulong val)
+{
+ int i;
+ uint8_t cfg_val;
+
+ trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
+
+ if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - incorrect address\n");
+ return;
+ }
+
+ for (i = 0; i < sizeof(target_ulong); i++) {
+ cfg_val = (val >> 8 * i) & 0xff;
+ pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
+ }
+
+ /* If PMP permission of any addr has been changed, flush TLB pages. */
+ tlb_flush(env_cpu(env));
+}
+
+
+/*
+ * Handle a read from a pmpcfg CSR
+ */
+target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
+{
+ int i;
+ target_ulong cfg_val = 0;
+ target_ulong val = 0;
+
+ for (i = 0; i < sizeof(target_ulong); i++) {
+ val = pmp_read_cfg(env, (reg_index * 4) + i);
+ cfg_val |= (val << (i * 8));
+ }
+ trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val);
+
+ return cfg_val;
+}
+
+
+/*
+ * Handle a write to a pmpaddr CSR
+ */
+void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
+ target_ulong val)
+{
+ trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
+
+ if (addr_index < MAX_RISCV_PMPS) {
+ /*
+ * In TOR mode, need to check the lock bit of the next pmp
+ * (if there is a next).
+ */
+ if (addr_index + 1 < MAX_RISCV_PMPS) {
+ uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
+
+ if (pmp_cfg & PMP_LOCK &&
+ PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ return;
+ }
+ }
+
+ if (!pmp_is_locked(env, addr_index)) {
+ env->pmp_state.pmp[addr_index].addr_reg = val;
+ pmp_update_rule(env, addr_index);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpaddr write - locked\n");
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpaddr write - out of bounds\n");
+ }
+}
+
+
+/*
+ * Handle a read from a pmpaddr CSR
+ */
+target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
+{
+ target_ulong val = 0;
+
+ if (addr_index < MAX_RISCV_PMPS) {
+ val = env->pmp_state.pmp[addr_index].addr_reg;
+ trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpaddr read - out of bounds\n");
+ }
+
+ return val;
+}
+
+/*
+ * Handle a write to a mseccfg CSR
+ */
+void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
+{
+ int i;
+
+ trace_mseccfg_csr_write(env->mhartid, val);
+
+ /* RLB cannot be enabled if it's already 0 and if any regions are locked */
+ if (!MSECCFG_RLB_ISSET(env)) {
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ if (pmp_is_locked(env, i)) {
+ val &= ~MSECCFG_RLB;
+ break;
+ }
+ }
+ }
+
+ /* Sticky bits */
+ val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
+
+ env->mseccfg = val;
+}
+
+/*
+ * Handle a read from a mseccfg CSR
+ */
+target_ulong mseccfg_csr_read(CPURISCVState *env)
+{
+ trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
+ return env->mseccfg;
+}
+
+/*
+ * Calculate the TLB size if the start address or the end address of
+ * PMP entry is presented in the TLB page.
+ */
+static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
+ target_ulong tlb_sa, target_ulong tlb_ea)
+{
+ target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
+ target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
+
+ if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
+ return pmp_ea - pmp_sa + 1;
+ }
+
+ if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
+ return tlb_ea - pmp_sa + 1;
+ }
+
+ if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
+ return pmp_ea - tlb_sa + 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Check is there a PMP entry which range covers this page. If so,
+ * try to find the minimum granularity for the TLB size.
+ */
+bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
+ target_ulong *tlb_size)
+{
+ int i;
+ target_ulong val;
+ target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
+
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
+ if (val) {
+ if (*tlb_size == 0 || *tlb_size > val) {
+ *tlb_size = val;
+ }
+ }
+ }
+
+ if (*tlb_size != 0) {
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Convert PMP privilege to TLB page privilege.
+ */
+int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
+{
+ int prot = 0;
+
+ if (pmp_priv & PMP_READ) {
+ prot |= PAGE_READ;
+ }
+ if (pmp_priv & PMP_WRITE) {
+ prot |= PAGE_WRITE;
+ }
+ if (pmp_priv & PMP_EXEC) {
+ prot |= PAGE_EXEC;
+ }
+
+ return prot;
+}
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
new file mode 100644
index 000000000..a9a0b363a
--- /dev/null
+++ b/target/riscv/pmp.h
@@ -0,0 +1,85 @@
+/*
+ * QEMU RISC-V PMP (Physical Memory Protection)
+ *
+ * Author: Daire McNamara, daire.mcnamara@emdalo.com
+ * Ivan Griffin, ivan.griffin@emdalo.com
+ *
+ * This provides a RISC-V Physical Memory Protection interface
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RISCV_PMP_H
+#define RISCV_PMP_H
+
+typedef enum {
+ PMP_READ = 1 << 0,
+ PMP_WRITE = 1 << 1,
+ PMP_EXEC = 1 << 2,
+ PMP_LOCK = 1 << 7
+} pmp_priv_t;
+
+typedef enum {
+ PMP_AMATCH_OFF, /* Null (off) */
+ PMP_AMATCH_TOR, /* Top of Range */
+ PMP_AMATCH_NA4, /* Naturally aligned four-byte region */
+ PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */
+} pmp_am_t;
+
+typedef enum {
+ MSECCFG_MML = 1 << 0,
+ MSECCFG_MMWP = 1 << 1,
+ MSECCFG_RLB = 1 << 2
+} mseccfg_field_t;
+
+typedef struct {
+ target_ulong addr_reg;
+ uint8_t cfg_reg;
+} pmp_entry_t;
+
+typedef struct {
+ target_ulong sa;
+ target_ulong ea;
+} pmp_addr_t;
+
+typedef struct {
+ pmp_entry_t pmp[MAX_RISCV_PMPS];
+ pmp_addr_t addr[MAX_RISCV_PMPS];
+ uint32_t num_rules;
+} pmp_table_t;
+
+void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
+ target_ulong val);
+target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index);
+
+void mseccfg_csr_write(CPURISCVState *env, target_ulong val);
+target_ulong mseccfg_csr_read(CPURISCVState *env);
+
+void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
+ target_ulong val);
+target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
+bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
+ target_ulong mode);
+bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
+ target_ulong *tlb_size);
+void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
+void pmp_update_rule_nums(CPURISCVState *env);
+uint32_t pmp_get_num_rules(CPURISCVState *env);
+int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
+
+#define MSECCFG_MML_ISSET(env) get_field(env->mseccfg, MSECCFG_MML)
+#define MSECCFG_MMWP_ISSET(env) get_field(env->mseccfg, MSECCFG_MMWP)
+#define MSECCFG_RLB_ISSET(env) get_field(env->mseccfg, MSECCFG_RLB)
+
+#endif
diff --git a/target/riscv/trace-events b/target/riscv/trace-events
new file mode 100644
index 000000000..49ec4d3b7
--- /dev/null
+++ b/target/riscv/trace-events
@@ -0,0 +1,11 @@
+# cpu_helper.c
+riscv_trap(uint64_t hartid, bool async, uint64_t cause, uint64_t epc, uint64_t tval, const char *desc) "hart:%"PRId64", async:%d, cause:%"PRId64", epc:0x%"PRIx64", tval:0x%"PRIx64", desc=%s"
+
+# pmp.c
+pmpcfg_csr_read(uint64_t mhartid, uint32_t reg_index, uint64_t val) "hart %" PRIu64 ": read reg%" PRIu32", val: 0x%" PRIx64
+pmpcfg_csr_write(uint64_t mhartid, uint32_t reg_index, uint64_t val) "hart %" PRIu64 ": write reg%" PRIu32", val: 0x%" PRIx64
+pmpaddr_csr_read(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %" PRIu64 ": read addr%" PRIu32", val: 0x%" PRIx64
+pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %" PRIu64 ": write addr%" PRIu32", val: 0x%" PRIx64
+
+mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64
+mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64
diff --git a/target/riscv/trace.h b/target/riscv/trace.h
new file mode 100644
index 000000000..03a89fcd9
--- /dev/null
+++ b/target/riscv/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_riscv.h"
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
new file mode 100644
index 000000000..1d57bc97b
--- /dev/null
+++ b/target/riscv/translate.c
@@ -0,0 +1,773 @@
+/*
+ * RISC-V emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "tcg/tcg-op.h"
+#include "disas/disas.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/translator.h"
+#include "exec/log.h"
+
+#include "instmap.h"
+
+/* global register indices */
+static TCGv cpu_gpr[32], cpu_pc, cpu_vl;
+static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
+static TCGv load_res;
+static TCGv load_val;
+/* globals for PM CSRs */
+static TCGv pm_mask[4];
+static TCGv pm_base[4];
+
+#include "exec/gen-icount.h"
+
+/*
+ * If an operation is being performed on less than TARGET_LONG_BITS,
+ * it may require the inputs to be sign- or zero-extended; which will
+ * depend on the exact operation being performed.
+ */
+typedef enum {
+ EXT_NONE,
+ EXT_SIGN,
+ EXT_ZERO,
+} DisasExtend;
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ /* pc_succ_insn points to the instruction following base.pc_next */
+ target_ulong pc_succ_insn;
+ target_ulong priv_ver;
+ RISCVMXL xl;
+ uint32_t misa_ext;
+ uint32_t opcode;
+ uint32_t mstatus_fs;
+ uint32_t mstatus_hs_fs;
+ uint32_t mem_idx;
+ /* Remember the rounding mode encoded in the previous fp instruction,
+ which we have already installed into env->fp_status. Or -1 for
+ no previous fp instruction. Note that we exit the TB when writing
+ to any system register, which includes CSR_FRM, so we do not have
+ to reset this known value. */
+ int frm;
+ RISCVMXL ol;
+ bool virt_enabled;
+ bool ext_ifencei;
+ bool hlsx;
+ /* vector extension */
+ bool vill;
+ uint8_t lmul;
+ uint8_t sew;
+ uint16_t vlen;
+ uint16_t mlen;
+ bool vl_eq_vlmax;
+ uint8_t ntemp;
+ CPUState *cs;
+ TCGv zero;
+ /* Space for 3 operands plus 1 extra for address computation. */
+ TCGv temp[4];
+ /* PointerMasking extension */
+ bool pm_enabled;
+ TCGv pm_mask;
+ TCGv pm_base;
+} DisasContext;
+
+static inline bool has_ext(DisasContext *ctx, uint32_t ext)
+{
+ return ctx->misa_ext & ext;
+}
+
+#ifdef TARGET_RISCV32
+#define get_xl(ctx) MXL_RV32
+#elif defined(CONFIG_USER_ONLY)
+#define get_xl(ctx) MXL_RV64
+#else
+#define get_xl(ctx) ((ctx)->xl)
+#endif
+
+/* The word size for this machine mode. */
+static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
+{
+ return 16 << get_xl(ctx);
+}
+
+/* The operation length, as opposed to the xlen. */
+#ifdef TARGET_RISCV32
+#define get_ol(ctx) MXL_RV32
+#else
+#define get_ol(ctx) ((ctx)->ol)
+#endif
+
+static inline int get_olen(DisasContext *ctx)
+{
+ return 16 << get_ol(ctx);
+}
+
+/*
+ * RISC-V requires NaN-boxing of narrower width floating point values.
+ * This applies when a 32-bit value is assigned to a 64-bit FP register.
+ * For consistency and simplicity, we nanbox results even when the RVD
+ * extension is not present.
+ */
+static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
+{
+ tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32));
+}
+
+/*
+ * A narrow n-bit operation, where n < FLEN, checks that input operands
+ * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1.
+ * If so, the least-significant bits of the input are used, otherwise the
+ * input value is treated as an n-bit canonical NaN (v2.2 section 9.2).
+ *
+ * Here, the result is always nan-boxed, even the canonical nan.
+ */
+static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
+{
+ TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
+ TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull);
+
+ tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
+}
+
+static void generate_exception(DisasContext *ctx, int excp)
+{
+ tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void generate_exception_mtval(DisasContext *ctx, int excp)
+{
+ tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
+ tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_exception_illegal(DisasContext *ctx)
+{
+ generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
+}
+
+static void gen_exception_inst_addr_mis(DisasContext *ctx)
+{
+ generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (translator_use_goto_tb(&ctx->base, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_pc, dest);
+ tcg_gen_exit_tb(ctx->base.tb, n);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+/*
+ * Wrappers for getting reg values.
+ *
+ * The $zero register does not have cpu_gpr[0] allocated -- we supply the
+ * constant zero as a source, and an uninitialized sink as destination.
+ *
+ * Further, we may provide an extension for word operations.
+ */
+static TCGv temp_new(DisasContext *ctx)
+{
+ assert(ctx->ntemp < ARRAY_SIZE(ctx->temp));
+ return ctx->temp[ctx->ntemp++] = tcg_temp_new();
+}
+
+static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
+{
+ TCGv t;
+
+ if (reg_num == 0) {
+ return ctx->zero;
+ }
+
+ switch (get_ol(ctx)) {
+ case MXL_RV32:
+ switch (ext) {
+ case EXT_NONE:
+ break;
+ case EXT_SIGN:
+ t = temp_new(ctx);
+ tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
+ return t;
+ case EXT_ZERO:
+ t = temp_new(ctx);
+ tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
+ return t;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case MXL_RV64:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return cpu_gpr[reg_num];
+}
+
+static TCGv dest_gpr(DisasContext *ctx, int reg_num)
+{
+ if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
+ return temp_new(ctx);
+ }
+ return cpu_gpr[reg_num];
+}
+
+static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
+{
+ if (reg_num != 0) {
+ switch (get_ol(ctx)) {
+ case MXL_RV32:
+ tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
+ break;
+ case MXL_RV64:
+ tcg_gen_mov_tl(cpu_gpr[reg_num], t);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
+{
+ target_ulong next_pc;
+
+ /* check misaligned: */
+ next_pc = ctx->base.pc_next + imm;
+ if (!has_ext(ctx, RVC)) {
+ if ((next_pc & 0x3) != 0) {
+ gen_exception_inst_addr_mis(ctx);
+ return;
+ }
+ }
+ if (rd != 0) {
+ tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
+ }
+
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+/*
+ * Generates address adjustment for PointerMasking
+ */
+static TCGv gen_pm_adjust_address(DisasContext *s, TCGv src)
+{
+ TCGv temp;
+ if (!s->pm_enabled) {
+ /* Load unmodified address */
+ return src;
+ } else {
+ temp = temp_new(s);
+ tcg_gen_andc_tl(temp, src, s->pm_mask);
+ tcg_gen_or_tl(temp, temp, s->pm_base);
+ return temp;
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* The states of mstatus_fs are:
+ * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
+ * We will have already diagnosed disabled state,
+ * and need to turn initial/clean into dirty.
+ */
+static void mark_fs_dirty(DisasContext *ctx)
+{
+ TCGv tmp;
+
+ if (ctx->mstatus_fs != MSTATUS_FS) {
+ /* Remember the state change for the rest of the TB. */
+ ctx->mstatus_fs = MSTATUS_FS;
+
+ tmp = tcg_temp_new();
+ tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+ tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
+ tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+ tcg_temp_free(tmp);
+ }
+
+ if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) {
+ /* Remember the stage change for the rest of the TB. */
+ ctx->mstatus_hs_fs = MSTATUS_FS;
+
+ tmp = tcg_temp_new();
+ tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS);
+ tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_temp_free(tmp);
+ }
+}
+#else
+static inline void mark_fs_dirty(DisasContext *ctx) { }
+#endif
+
+static void gen_set_rm(DisasContext *ctx, int rm)
+{
+ if (ctx->frm == rm) {
+ return;
+ }
+ ctx->frm = rm;
+ gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
+}
+
+static int ex_plus_1(DisasContext *ctx, int nf)
+{
+ return nf + 1;
+}
+
+#define EX_SH(amount) \
+ static int ex_shift_##amount(DisasContext *ctx, int imm) \
+ { \
+ return imm << amount; \
+ }
+EX_SH(1)
+EX_SH(2)
+EX_SH(3)
+EX_SH(4)
+EX_SH(12)
+
+#define REQUIRE_EXT(ctx, ext) do { \
+ if (!has_ext(ctx, ext)) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_32BIT(ctx) do { \
+ if (get_xl(ctx) != MXL_RV32) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_64BIT(ctx) do { \
+ if (get_xl(ctx) < MXL_RV64) { \
+ return false; \
+ } \
+} while (0)
+
+static int ex_rvc_register(DisasContext *ctx, int reg)
+{
+ return 8 + reg;
+}
+
+static int ex_rvc_shifti(DisasContext *ctx, int imm)
+{
+ /* For RV128 a shamt of 0 means a shift by 64. */
+ return imm ? imm : 64;
+}
+
+/* Include the auto-generated decoder for 32 bit insn */
+#include "decode-insn32.c.inc"
+
+static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, target_long))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+
+ func(dest, src1, a->imm);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+ TCGv src2 = tcg_constant_tl(a->imm);
+
+ func(dest, src1, src2);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+ TCGv src2 = get_gpr(ctx, a->rs2, ext);
+
+ func(dest, src1, src2);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
+ void (*f_tl)(TCGv, TCGv, TCGv),
+ void (*f_32)(TCGv, TCGv, TCGv))
+{
+ int olen = get_olen(ctx);
+
+ if (olen != TARGET_LONG_BITS) {
+ if (olen == 32) {
+ f_tl = f_32;
+ } else {
+ g_assert_not_reached();
+ }
+ }
+ return gen_arith(ctx, a, ext, f_tl);
+}
+
+static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, target_long))
+{
+ TCGv dest, src1;
+ int max_len = get_olen(ctx);
+
+ if (a->shamt >= max_len) {
+ return false;
+ }
+
+ dest = dest_gpr(ctx, a->rd);
+ src1 = get_gpr(ctx, a->rs1, ext);
+
+ func(dest, src1, a->shamt);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
+ DisasExtend ext,
+ void (*f_tl)(TCGv, TCGv, target_long),
+ void (*f_32)(TCGv, TCGv, target_long))
+{
+ int olen = get_olen(ctx);
+ if (olen != TARGET_LONG_BITS) {
+ if (olen == 32) {
+ f_tl = f_32;
+ } else {
+ g_assert_not_reached();
+ }
+ }
+ return gen_shift_imm_fn(ctx, a, ext, f_tl);
+}
+
+static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dest, src1, src2;
+ int max_len = get_olen(ctx);
+
+ if (a->shamt >= max_len) {
+ return false;
+ }
+
+ dest = dest_gpr(ctx, a->rd);
+ src1 = get_gpr(ctx, a->rs1, ext);
+ src2 = tcg_constant_tl(a->shamt);
+
+ func(dest, src1, src2);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv ext2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(ext2, src2, get_olen(ctx) - 1);
+ func(dest, src1, ext2);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ tcg_temp_free(ext2);
+ return true;
+}
+
+static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
+ void (*f_tl)(TCGv, TCGv, TCGv),
+ void (*f_32)(TCGv, TCGv, TCGv))
+{
+ int olen = get_olen(ctx);
+ if (olen != TARGET_LONG_BITS) {
+ if (olen == 32) {
+ f_tl = f_32;
+ } else {
+ g_assert_not_reached();
+ }
+ }
+ return gen_shift(ctx, a, ext, f_tl);
+}
+
+static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+
+ func(dest, src1);
+
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
+ void (*f_tl)(TCGv, TCGv),
+ void (*f_32)(TCGv, TCGv))
+{
+ int olen = get_olen(ctx);
+
+ if (olen != TARGET_LONG_BITS) {
+ if (olen == 32) {
+ f_tl = f_32;
+ } else {
+ g_assert_not_reached();
+ }
+ }
+ return gen_unary(ctx, a, ext, f_tl);
+}
+
+static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUState *cpu = ctx->cs;
+ CPURISCVState *env = cpu->env_ptr;
+
+ return cpu_ldl_code(env, pc);
+}
+
+/* Include insn module translation function */
+#include "insn_trans/trans_rvi.c.inc"
+#include "insn_trans/trans_rvm.c.inc"
+#include "insn_trans/trans_rva.c.inc"
+#include "insn_trans/trans_rvf.c.inc"
+#include "insn_trans/trans_rvd.c.inc"
+#include "insn_trans/trans_rvh.c.inc"
+#include "insn_trans/trans_rvv.c.inc"
+#include "insn_trans/trans_rvb.c.inc"
+#include "insn_trans/trans_privileged.c.inc"
+
+/* Include the auto-generated decoder for 16 bit insn */
+#include "decode-insn16.c.inc"
+
+static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
+{
+ /* check for compressed insn */
+ if (extract16(opcode, 0, 2) != 3) {
+ if (!has_ext(ctx, RVC)) {
+ gen_exception_illegal(ctx);
+ } else {
+ ctx->pc_succ_insn = ctx->base.pc_next + 2;
+ if (!decode_insn16(ctx, opcode)) {
+ gen_exception_illegal(ctx);
+ }
+ }
+ } else {
+ uint32_t opcode32 = opcode;
+ opcode32 = deposit32(opcode32, 16, 16,
+ translator_lduw(env, &ctx->base,
+ ctx->base.pc_next + 2));
+ ctx->pc_succ_insn = ctx->base.pc_next + 4;
+ if (!decode_insn32(ctx, opcode32)) {
+ gen_exception_illegal(ctx);
+ }
+ }
+}
+
+static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPURISCVState *env = cs->env_ptr;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint32_t tb_flags = ctx->base.tb->flags;
+
+ ctx->pc_succ_insn = ctx->base.pc_first;
+ ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
+ ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
+ ctx->priv_ver = env->priv_ver;
+#if !defined(CONFIG_USER_ONLY)
+ if (riscv_has_ext(env, RVH)) {
+ ctx->virt_enabled = riscv_cpu_virt_enabled(env);
+ } else {
+ ctx->virt_enabled = false;
+ }
+#else
+ ctx->virt_enabled = false;
+#endif
+ ctx->misa_ext = env->misa_ext;
+ ctx->frm = -1; /* unknown rounding mode */
+ ctx->ext_ifencei = cpu->cfg.ext_ifencei;
+ ctx->vlen = cpu->cfg.vlen;
+ ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS);
+ ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
+ ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
+ ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
+ ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
+ ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
+ ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
+ ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
+ ctx->cs = cs;
+ ctx->ntemp = 0;
+ memset(ctx->temp, 0, sizeof(ctx->temp));
+ ctx->pm_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_ENABLED);
+ int priv = tb_flags & TB_FLAGS_PRIV_MMU_MASK;
+ ctx->pm_mask = pm_mask[priv];
+ ctx->pm_base = pm_base[priv];
+
+ ctx->zero = tcg_constant_tl(0);
+}
+
+static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next);
+}
+
+static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPURISCVState *env = cpu->env_ptr;
+ uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
+
+ ctx->ol = ctx->xl;
+ decode_opc(env, ctx, opcode16);
+ ctx->base.pc_next = ctx->pc_succ_insn;
+
+ for (int i = ctx->ntemp - 1; i >= 0; --i) {
+ tcg_temp_free(ctx->temp[i]);
+ ctx->temp[i] = NULL;
+ }
+ ctx->ntemp = 0;
+
+ if (ctx->base.is_jmp == DISAS_NEXT) {
+ target_ulong page_start;
+
+ page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
+ if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_TOO_MANY:
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+#ifndef CONFIG_USER_ONLY
+ RISCVCPU *rvcpu = RISCV_CPU(cpu);
+ CPURISCVState *env = &rvcpu->env;
+#endif
+
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+#ifndef CONFIG_USER_ONLY
+ qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
+#endif
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps riscv_tr_ops = {
+ .init_disas_context = riscv_tr_init_disas_context,
+ .tb_start = riscv_tr_tb_start,
+ .insn_start = riscv_tr_insn_start,
+ .translate_insn = riscv_tr_translate_insn,
+ .tb_stop = riscv_tr_tb_stop,
+ .disas_log = riscv_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void riscv_translate_init(void)
+{
+ int i;
+
+ /*
+ * cpu_gpr[0] is a placeholder for the zero register. Do not use it.
+ * Use the gen_set_gpr and get_gpr helper functions when accessing regs,
+ * unless you specifically block reads/writes to reg 0.
+ */
+ cpu_gpr[0] = NULL;
+
+ for (i = 1; i < 32; i++) {
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
+ }
+
+ for (i = 0; i < 32; i++) {
+ cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPURISCVState, fpr[i]), riscv_fpr_regnames[i]);
+ }
+
+ cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc");
+ cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl");
+ load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res),
+ "load_res");
+ load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
+ "load_val");
+#ifndef CONFIG_USER_ONLY
+ /* Assign PM CSRs to tcg globals */
+ pm_mask[PRV_U] =
+ tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmmask), "upmmask");
+ pm_base[PRV_U] =
+ tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmbase), "upmbase");
+ pm_mask[PRV_S] =
+ tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmmask), "spmmask");
+ pm_base[PRV_S] =
+ tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmbase), "spmbase");
+ pm_mask[PRV_M] =
+ tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmmask), "mpmmask");
+ pm_base[PRV_M] =
+ tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmbase), "mpmbase");
+#endif
+}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
new file mode 100644
index 000000000..12c31aa4b
--- /dev/null
+++ b/target/riscv/vector_helper.c
@@ -0,0 +1,4872 @@
+/*
+ * RISC-V Vector Extension Helpers for QEMU.
+ *
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/memop.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "internals.h"
+#include <math.h>
+
+target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
+ target_ulong s2)
+{
+ int vlmax, vl;
+ RISCVCPU *cpu = env_archcpu(env);
+ uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
+ uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
+ bool vill = FIELD_EX64(s2, VTYPE, VILL);
+ target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED);
+
+ if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) {
+ /* only set vill bit. */
+ env->vtype = FIELD_DP64(0, VTYPE, VILL, 1);
+ env->vl = 0;
+ env->vstart = 0;
+ return 0;
+ }
+
+ vlmax = vext_get_vlmax(cpu, s2);
+ if (s1 <= vlmax) {
+ vl = s1;
+ } else {
+ vl = vlmax;
+ }
+ env->vl = vl;
+ env->vtype = s2;
+ env->vstart = 0;
+ return vl;
+}
+
+/*
+ * Note that vector data is stored in host-endian 64-bit chunks,
+ * so addressing units smaller than that needs a host-endian fixup.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+#define H1(x) ((x) ^ 7)
+#define H1_2(x) ((x) ^ 6)
+#define H1_4(x) ((x) ^ 4)
+#define H2(x) ((x) ^ 3)
+#define H4(x) ((x) ^ 1)
+#define H8(x) ((x))
+#else
+#define H1(x) (x)
+#define H1_2(x) (x)
+#define H1_4(x) (x)
+#define H2(x) (x)
+#define H4(x) (x)
+#define H8(x) (x)
+#endif
+
+static inline uint32_t vext_nf(uint32_t desc)
+{
+ return FIELD_EX32(simd_data(desc), VDATA, NF);
+}
+
+static inline uint32_t vext_mlen(uint32_t desc)
+{
+ return FIELD_EX32(simd_data(desc), VDATA, MLEN);
+}
+
+static inline uint32_t vext_vm(uint32_t desc)
+{
+ return FIELD_EX32(simd_data(desc), VDATA, VM);
+}
+
+static inline uint32_t vext_lmul(uint32_t desc)
+{
+ return FIELD_EX32(simd_data(desc), VDATA, LMUL);
+}
+
+static uint32_t vext_wd(uint32_t desc)
+{
+ return (simd_data(desc) >> 11) & 0x1;
+}
+
+/*
+ * Get vector group length in bytes. Its range is [64, 2048].
+ *
+ * As simd_desc support at most 256, the max vlen is 512 bits.
+ * So vlen in bytes is encoded as maxsz.
+ */
+static inline uint32_t vext_maxsz(uint32_t desc)
+{
+ return simd_maxsz(desc) << vext_lmul(desc);
+}
+
+/*
+ * This function checks watchpoint before real load operation.
+ *
+ * In softmmu mode, the TLB API probe_access is enough for watchpoint check.
+ * In user mode, there is no watchpoint support now.
+ *
+ * It will trigger an exception if there is no mapping in TLB
+ * and page table walk can't fill the TLB entry. Then the guest
+ * software can return here after process the exception or never return.
+ */
+static void probe_pages(CPURISCVState *env, target_ulong addr,
+ target_ulong len, uintptr_t ra,
+ MMUAccessType access_type)
+{
+ target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
+ target_ulong curlen = MIN(pagelen, len);
+
+ probe_access(env, addr, curlen, access_type,
+ cpu_mmu_index(env, false), ra);
+ if (len > curlen) {
+ addr += curlen;
+ curlen = len - curlen;
+ probe_access(env, addr, curlen, access_type,
+ cpu_mmu_index(env, false), ra);
+ }
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
+{
+ /*
+ * Split the remaining range to two parts.
+ * The first part is in the last uint64_t unit.
+ * The second part start from the next uint64_t unit.
+ */
+ int part1 = 0, part2 = tot - cnt;
+ if (cnt % 8) {
+ part1 = 8 - (cnt % 8);
+ part2 = tot - cnt - part1;
+ memset(QEMU_ALIGN_PTR_DOWN(tail, 8), 0, part1);
+ memset(QEMU_ALIGN_PTR_UP(tail, 8), 0, part2);
+ } else {
+ memset(tail, 0, part2);
+ }
+}
+#else
+static void vext_clear(void *tail, uint32_t cnt, uint32_t tot)
+{
+ memset(tail, 0, tot - cnt);
+}
+#endif
+
+static void clearb(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+{
+ int8_t *cur = ((int8_t *)vd + H1(idx));
+ vext_clear(cur, cnt, tot);
+}
+
+static void clearh(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+{
+ int16_t *cur = ((int16_t *)vd + H2(idx));
+ vext_clear(cur, cnt, tot);
+}
+
+static void clearl(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+{
+ int32_t *cur = ((int32_t *)vd + H4(idx));
+ vext_clear(cur, cnt, tot);
+}
+
+static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot)
+{
+ int64_t *cur = (int64_t *)vd + idx;
+ vext_clear(cur, cnt, tot);
+}
+
+static inline void vext_set_elem_mask(void *v0, int mlen, int index,
+ uint8_t value)
+{
+ int idx = (index * mlen) / 64;
+ int pos = (index * mlen) % 64;
+ uint64_t old = ((uint64_t *)v0)[idx];
+ ((uint64_t *)v0)[idx] = deposit64(old, pos, mlen, value);
+}
+
+static inline int vext_elem_mask(void *v0, int mlen, int index)
+{
+ int idx = (index * mlen) / 64;
+ int pos = (index * mlen) % 64;
+ return (((uint64_t *)v0)[idx] >> pos) & 1;
+}
+
+/* elements operations for load and store */
+typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr,
+ uint32_t idx, void *vd, uintptr_t retaddr);
+typedef void clear_fn(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot);
+
+#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \
+static void NAME(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr)\
+{ \
+ MTYPE data; \
+ ETYPE *cur = ((ETYPE *)vd + H(idx)); \
+ data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
+ *cur = data; \
+} \
+
+GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb)
+GEN_VEXT_LD_ELEM(ldb_h, int8_t, int16_t, H2, ldsb)
+GEN_VEXT_LD_ELEM(ldb_w, int8_t, int32_t, H4, ldsb)
+GEN_VEXT_LD_ELEM(ldb_d, int8_t, int64_t, H8, ldsb)
+GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw)
+GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw)
+GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw)
+GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl)
+GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl)
+GEN_VEXT_LD_ELEM(lde_b, int8_t, int8_t, H1, ldsb)
+GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw)
+GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl)
+GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq)
+GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, uint8_t, H1, ldub)
+GEN_VEXT_LD_ELEM(ldbu_h, uint8_t, uint16_t, H2, ldub)
+GEN_VEXT_LD_ELEM(ldbu_w, uint8_t, uint32_t, H4, ldub)
+GEN_VEXT_LD_ELEM(ldbu_d, uint8_t, uint64_t, H8, ldub)
+GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw)
+GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw)
+GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw)
+GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl)
+GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl)
+
+#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
+static void NAME(CPURISCVState *env, abi_ptr addr, \
+ uint32_t idx, void *vd, uintptr_t retaddr)\
+{ \
+ ETYPE data = *((ETYPE *)vd + H(idx)); \
+ cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
+}
+
+GEN_VEXT_ST_ELEM(stb_b, int8_t, H1, stb)
+GEN_VEXT_ST_ELEM(stb_h, int16_t, H2, stb)
+GEN_VEXT_ST_ELEM(stb_w, int32_t, H4, stb)
+GEN_VEXT_ST_ELEM(stb_d, int64_t, H8, stb)
+GEN_VEXT_ST_ELEM(sth_h, int16_t, H2, stw)
+GEN_VEXT_ST_ELEM(sth_w, int32_t, H4, stw)
+GEN_VEXT_ST_ELEM(sth_d, int64_t, H8, stw)
+GEN_VEXT_ST_ELEM(stw_w, int32_t, H4, stl)
+GEN_VEXT_ST_ELEM(stw_d, int64_t, H8, stl)
+GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb)
+GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw)
+GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl)
+GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq)
+
+/*
+ *** stride: access vector element from strided memory
+ */
+static void
+vext_ldst_stride(void *vd, void *v0, target_ulong base,
+ target_ulong stride, CPURISCVState *env,
+ uint32_t desc, uint32_t vm,
+ vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem,
+ uint32_t esz, uint32_t msz, uintptr_t ra,
+ MMUAccessType access_type)
+{
+ uint32_t i, k;
+ uint32_t nf = vext_nf(desc);
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+
+ /* probe every access*/
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ probe_pages(env, base + stride * i, nf * msz, ra, access_type);
+ }
+ /* do real access */
+ for (i = 0; i < env->vl; i++) {
+ k = 0;
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ while (k < nf) {
+ target_ulong addr = base + stride * i + k * msz;
+ ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ k++;
+ }
+ }
+ /* clear tail elements */
+ if (clear_elem) {
+ for (k = 0; k < nf; k++) {
+ clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+ }
+ }
+}
+
+#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
+ target_ulong stride, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t vm = vext_vm(desc); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
+ CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC(), MMU_DATA_LOAD); \
+}
+
+GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b, clearb)
+GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h, clearh)
+GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w, clearl)
+GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d, clearq)
+GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h, clearh)
+GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w, clearl)
+GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d, clearq)
+GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w, clearl)
+GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d, clearq)
+GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b, clearb)
+GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h, clearh)
+GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w, clearl)
+GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d, clearq)
+GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b, clearb)
+GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h, clearh)
+GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w, clearl)
+GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d, clearq)
+GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h, clearh)
+GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w, clearl)
+GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d, clearq)
+GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w, clearl)
+GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d, clearq)
+
+#define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ target_ulong stride, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t vm = vext_vm(desc); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
+ NULL, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC(), MMU_DATA_STORE); \
+}
+
+GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t, int8_t, stb_b)
+GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t, int16_t, stb_h)
+GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t, int32_t, stb_w)
+GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t, int64_t, stb_d)
+GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h)
+GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w)
+GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d)
+GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w)
+GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d)
+GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t, int8_t, ste_b)
+GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h)
+GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w)
+GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d)
+
+/*
+ *** unit-stride: access elements stored contiguously in memory
+ */
+
+/* unmasked unit-stride load and store operation*/
+static void
+vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
+ vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem,
+ uint32_t esz, uint32_t msz, uintptr_t ra,
+ MMUAccessType access_type)
+{
+ uint32_t i, k;
+ uint32_t nf = vext_nf(desc);
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+
+ /* probe every access */
+ probe_pages(env, base, env->vl * nf * msz, ra, access_type);
+ /* load bytes from guest memory */
+ for (i = 0; i < env->vl; i++) {
+ k = 0;
+ while (k < nf) {
+ target_ulong addr = base + (i * nf + k) * msz;
+ ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ k++;
+ }
+ }
+ /* clear tail elements */
+ if (clear_elem) {
+ for (k = 0; k < nf; k++) {
+ clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+ }
+ }
+}
+
+/*
+ * masked unit-stride load and store operation will be a special case of stride,
+ * stride = NF * sizeof (MTYPE)
+ */
+
+#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
+void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
+ CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC(), MMU_DATA_LOAD); \
+} \
+ \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN, \
+ sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \
+}
+
+GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b, clearb)
+GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h, clearh)
+GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w, clearl)
+GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d, clearq)
+GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h, clearh)
+GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w, clearl)
+GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d, clearq)
+GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w, clearl)
+GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d, clearq)
+GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b, clearb)
+GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h, clearh)
+GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w, clearl)
+GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d, clearq)
+GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b, clearb)
+GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h, clearh)
+GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w, clearl)
+GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d, clearq)
+GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h, clearh)
+GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w, clearl)
+GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d, clearq)
+GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w, clearl)
+GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d, clearq)
+
+#define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \
+void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \
+ vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
+ NULL, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC(), MMU_DATA_STORE); \
+} \
+ \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_us(vd, base, env, desc, STORE_FN, NULL, \
+ sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\
+}
+
+GEN_VEXT_ST_US(vsb_v_b, int8_t, int8_t , stb_b)
+GEN_VEXT_ST_US(vsb_v_h, int8_t, int16_t, stb_h)
+GEN_VEXT_ST_US(vsb_v_w, int8_t, int32_t, stb_w)
+GEN_VEXT_ST_US(vsb_v_d, int8_t, int64_t, stb_d)
+GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h)
+GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w)
+GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d)
+GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w)
+GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d)
+GEN_VEXT_ST_US(vse_v_b, int8_t, int8_t , ste_b)
+GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h)
+GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w)
+GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d)
+
+/*
+ *** index: access vector element from indexed memory
+ */
+typedef target_ulong vext_get_index_addr(target_ulong base,
+ uint32_t idx, void *vs2);
+
+#define GEN_VEXT_GET_INDEX_ADDR(NAME, ETYPE, H) \
+static target_ulong NAME(target_ulong base, \
+ uint32_t idx, void *vs2) \
+{ \
+ return (base + *((ETYPE *)vs2 + H(idx))); \
+}
+
+GEN_VEXT_GET_INDEX_ADDR(idx_b, int8_t, H1)
+GEN_VEXT_GET_INDEX_ADDR(idx_h, int16_t, H2)
+GEN_VEXT_GET_INDEX_ADDR(idx_w, int32_t, H4)
+GEN_VEXT_GET_INDEX_ADDR(idx_d, int64_t, H8)
+
+static inline void
+vext_ldst_index(void *vd, void *v0, target_ulong base,
+ void *vs2, CPURISCVState *env, uint32_t desc,
+ vext_get_index_addr get_index_addr,
+ vext_ldst_elem_fn *ldst_elem,
+ clear_fn *clear_elem,
+ uint32_t esz, uint32_t msz, uintptr_t ra,
+ MMUAccessType access_type)
+{
+ uint32_t i, k;
+ uint32_t nf = vext_nf(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+
+ /* probe every access*/
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra,
+ access_type);
+ }
+ /* load bytes from guest memory */
+ for (i = 0; i < env->vl; i++) {
+ k = 0;
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ while (k < nf) {
+ abi_ptr addr = get_index_addr(base, i, vs2) + k * msz;
+ ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ k++;
+ }
+ }
+ /* clear tail elements */
+ if (clear_elem) {
+ for (k = 0; k < nf; k++) {
+ clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+ }
+ }
+}
+
+#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
+ LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC(), MMU_DATA_LOAD); \
+}
+
+GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b, clearb)
+GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h, clearh)
+GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w, clearl)
+GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d, clearq)
+GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h, clearh)
+GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w, clearl)
+GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d, clearq)
+GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w, clearl)
+GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d, clearq)
+GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b, clearb)
+GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h, clearh)
+GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w, clearl)
+GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d, clearq)
+GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b, clearb)
+GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h, clearh)
+GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w, clearl)
+GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d, clearq)
+GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h, clearh)
+GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w, clearl)
+GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d, clearq)
+GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w, clearl)
+GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d, clearq)
+
+#define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
+ STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\
+ GETPC(), MMU_DATA_STORE); \
+}
+
+GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t, int8_t, idx_b, stb_b)
+GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t, int16_t, idx_h, stb_h)
+GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t, int32_t, idx_w, stb_w)
+GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t, int64_t, idx_d, stb_d)
+GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h)
+GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w)
+GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d)
+GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w)
+GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d)
+GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t, int8_t, idx_b, ste_b)
+GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h)
+GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w)
+GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d)
+
+/*
+ *** unit-stride fault-only-fisrt load instructions
+ */
+static inline void
+vext_ldff(void *vd, void *v0, target_ulong base,
+ CPURISCVState *env, uint32_t desc,
+ vext_ldst_elem_fn *ldst_elem,
+ clear_fn *clear_elem,
+ uint32_t esz, uint32_t msz, uintptr_t ra)
+{
+ void *host;
+ uint32_t i, k, vl = 0;
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t nf = vext_nf(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+ target_ulong addr, offset, remain;
+
+ /* probe every access*/
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ addr = base + nf * i * msz;
+ if (i == 0) {
+ probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD);
+ } else {
+ /* if it triggers an exception, no need to check watchpoint */
+ remain = nf * msz;
+ while (remain > 0) {
+ offset = -(addr | TARGET_PAGE_MASK);
+ host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD,
+ cpu_mmu_index(env, false));
+ if (host) {
+#ifdef CONFIG_USER_ONLY
+ if (page_check_range(addr, nf * msz, PAGE_READ) < 0) {
+ vl = i;
+ goto ProbeSuccess;
+ }
+#else
+ probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD);
+#endif
+ } else {
+ vl = i;
+ goto ProbeSuccess;
+ }
+ if (remain <= offset) {
+ break;
+ }
+ remain -= offset;
+ addr += offset;
+ }
+ }
+ }
+ProbeSuccess:
+ /* load bytes from guest memory */
+ if (vl != 0) {
+ env->vl = vl;
+ }
+ for (i = 0; i < env->vl; i++) {
+ k = 0;
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ while (k < nf) {
+ target_ulong addr = base + (i * nf + k) * msz;
+ ldst_elem(env, addr, i + k * vlmax, vd, ra);
+ k++;
+ }
+ }
+ /* clear tail elements */
+ if (vl != 0) {
+ return;
+ }
+ for (k = 0; k < nf; k++) {
+ clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz);
+ }
+}
+
+#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \
+ sizeof(ETYPE), sizeof(MTYPE), GETPC()); \
+}
+
+GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b, clearb)
+GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h, clearh)
+GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w, clearl)
+GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d, clearq)
+GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h, clearh)
+GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w, clearl)
+GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d, clearq)
+GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w, clearl)
+GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d, clearq)
+GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b, clearb)
+GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h, clearh)
+GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w, clearl)
+GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d, clearq)
+GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b, clearb)
+GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h, clearh)
+GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w, clearl)
+GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d, clearq)
+GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh)
+GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl)
+GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq)
+GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl)
+GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq)
+
+/*
+ *** Vector AMO Operations (Zvamo)
+ */
+typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr,
+ uint32_t wd, uint32_t idx, CPURISCVState *env,
+ uintptr_t retaddr);
+
+/* no atomic opreation for vector atomic insructions */
+#define DO_SWAP(N, M) (M)
+#define DO_AND(N, M) (N & M)
+#define DO_XOR(N, M) (N ^ M)
+#define DO_OR(N, M) (N | M)
+#define DO_ADD(N, M) (N + M)
+
+#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \
+static void \
+vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \
+ uint32_t wd, uint32_t idx, \
+ CPURISCVState *env, uintptr_t retaddr)\
+{ \
+ typedef int##ESZ##_t ETYPE; \
+ typedef int##MSZ##_t MTYPE; \
+ typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \
+ ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \
+ MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \
+ \
+ cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \
+ if (wd) { \
+ *pe3 = a; \
+ } \
+}
+
+/* Signed min/max */
+#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
+#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
+
+/* Unsigned min/max */
+#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
+#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
+
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w, 32, 32, H4, DO_ADD, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w, 32, 32, H4, DO_XOR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w, 32, 32, H4, DO_AND, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w, 32, 32, H4, DO_OR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d, 64, 64, H8, DO_ADD, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d, 64, 32, H8, DO_XOR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d, 64, 64, H8, DO_XOR, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d, 64, 32, H8, DO_AND, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d, 64, 64, H8, DO_AND, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d, 64, 32, H8, DO_OR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d, 64, 64, H8, DO_OR, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d, 64, 32, H8, DO_MIN, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d, 64, 64, H8, DO_MIN, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d, 64, 32, H8, DO_MAX, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d, 64, 64, H8, DO_MAX, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q)
+
+static inline void
+vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+ void *vs2, CPURISCVState *env, uint32_t desc,
+ vext_get_index_addr get_index_addr,
+ vext_amo_noatomic_fn *noatomic_op,
+ clear_fn *clear_elem,
+ uint32_t esz, uint32_t msz, uintptr_t ra)
+{
+ uint32_t i;
+ target_long addr;
+ uint32_t wd = vext_wd(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
+ probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
+ }
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ addr = get_index_addr(base, i, vs2);
+ noatomic_op(vs3, addr, wd, i, env, ra);
+ }
+ clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz);
+}
+
+#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \
+void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
+ INDEX_FN, vext_##NAME##_noatomic_op, \
+ CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC()); \
+}
+
+GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl)
+
+/*
+ *** Vector Integer Arithmetic Instructions
+ */
+
+/* expand macro args before macro */
+#define RVVCALL(macro, ...) macro(__VA_ARGS__)
+
+/* (TD, T1, T2, TX1, TX2) */
+#define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
+#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
+#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
+#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
+#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
+#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
+#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
+#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
+#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
+#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
+#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
+#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
+#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
+#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
+#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
+#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
+#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
+#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
+#define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t
+#define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t
+#define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t
+#define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t
+#define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t
+#define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t
+#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
+#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
+#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
+#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
+#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
+#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
+
+/* operation of two vector elements */
+typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
+
+#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, s1); \
+}
+#define DO_SUB(N, M) (N - M)
+#define DO_RSUB(N, M) (M - N)
+
+RVVCALL(OPIVV2, vadd_vv_b, OP_SSS_B, H1, H1, H1, DO_ADD)
+RVVCALL(OPIVV2, vadd_vv_h, OP_SSS_H, H2, H2, H2, DO_ADD)
+RVVCALL(OPIVV2, vadd_vv_w, OP_SSS_W, H4, H4, H4, DO_ADD)
+RVVCALL(OPIVV2, vadd_vv_d, OP_SSS_D, H8, H8, H8, DO_ADD)
+RVVCALL(OPIVV2, vsub_vv_b, OP_SSS_B, H1, H1, H1, DO_SUB)
+RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB)
+RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB)
+RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
+
+static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
+ CPURISCVState *env, uint32_t desc,
+ uint32_t esz, uint32_t dsz,
+ opivv2_fn *fn, clear_fn *clearfn)
+{
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ uint32_t i;
+
+ for (i = 0; i < vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ fn(vd, vs1, vs2, i);
+ }
+ clearfn(vd, vl, vl * dsz, vlmax * dsz);
+}
+
+/* generate the helpers for OPIVV */
+#define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
+ do_##NAME, CLEAR_FN); \
+}
+
+GEN_VEXT_VV(vadd_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vadd_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vsub_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vsub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vsub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vsub_vv_d, 8, 8, clearq)
+
+typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
+
+/*
+ * (T1)s1 gives the real operator type.
+ * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
+ */
+#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
+}
+
+RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD)
+RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD)
+RVVCALL(OPIVX2, vadd_vx_w, OP_SSS_W, H4, H4, DO_ADD)
+RVVCALL(OPIVX2, vadd_vx_d, OP_SSS_D, H8, H8, DO_ADD)
+RVVCALL(OPIVX2, vsub_vx_b, OP_SSS_B, H1, H1, DO_SUB)
+RVVCALL(OPIVX2, vsub_vx_h, OP_SSS_H, H2, H2, DO_SUB)
+RVVCALL(OPIVX2, vsub_vx_w, OP_SSS_W, H4, H4, DO_SUB)
+RVVCALL(OPIVX2, vsub_vx_d, OP_SSS_D, H8, H8, DO_SUB)
+RVVCALL(OPIVX2, vrsub_vx_b, OP_SSS_B, H1, H1, DO_RSUB)
+RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB)
+RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB)
+RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
+
+static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
+ CPURISCVState *env, uint32_t desc,
+ uint32_t esz, uint32_t dsz,
+ opivx2_fn fn, clear_fn *clearfn)
+{
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ uint32_t i;
+
+ for (i = 0; i < vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ fn(vd, s1, vs2, i);
+ }
+ clearfn(vd, vl, vl * dsz, vlmax * dsz);
+}
+
+/* generate the helpers for OPIVX */
+#define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
+ void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
+ do_##NAME, CLEAR_FN); \
+}
+
+GEN_VEXT_VX(vadd_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vadd_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vadd_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vadd_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vsub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vsub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vsub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vsub_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vrsub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vrsub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vrsub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vrsub_vx_d, 8, 8, clearq)
+
+void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint8_t)) {
+ *(uint8_t *)(d + i) = (uint8_t)b - *(uint8_t *)(a + i);
+ }
+}
+
+void HELPER(vec_rsubs16)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint16_t)) {
+ *(uint16_t *)(d + i) = (uint16_t)b - *(uint16_t *)(a + i);
+ }
+}
+
+void HELPER(vec_rsubs32)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint32_t)) {
+ *(uint32_t *)(d + i) = (uint32_t)b - *(uint32_t *)(a + i);
+ }
+}
+
+void HELPER(vec_rsubs64)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+ intptr_t i;
+
+ for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+ *(uint64_t *)(d + i) = b - *(uint64_t *)(a + i);
+ }
+}
+
+/* Vector Widening Integer Add/Subtract */
+#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
+#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
+#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
+#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
+#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
+#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
+#define WOP_WUUU_B uint16_t, uint8_t, uint16_t, uint16_t, uint16_t
+#define WOP_WUUU_H uint32_t, uint16_t, uint32_t, uint32_t, uint32_t
+#define WOP_WUUU_W uint64_t, uint32_t, uint64_t, uint64_t, uint64_t
+#define WOP_WSSS_B int16_t, int8_t, int16_t, int16_t, int16_t
+#define WOP_WSSS_H int32_t, int16_t, int32_t, int32_t, int32_t
+#define WOP_WSSS_W int64_t, int32_t, int64_t, int64_t, int64_t
+RVVCALL(OPIVV2, vwaddu_vv_b, WOP_UUU_B, H2, H1, H1, DO_ADD)
+RVVCALL(OPIVV2, vwaddu_vv_h, WOP_UUU_H, H4, H2, H2, DO_ADD)
+RVVCALL(OPIVV2, vwaddu_vv_w, WOP_UUU_W, H8, H4, H4, DO_ADD)
+RVVCALL(OPIVV2, vwsubu_vv_b, WOP_UUU_B, H2, H1, H1, DO_SUB)
+RVVCALL(OPIVV2, vwsubu_vv_h, WOP_UUU_H, H4, H2, H2, DO_SUB)
+RVVCALL(OPIVV2, vwsubu_vv_w, WOP_UUU_W, H8, H4, H4, DO_SUB)
+RVVCALL(OPIVV2, vwadd_vv_b, WOP_SSS_B, H2, H1, H1, DO_ADD)
+RVVCALL(OPIVV2, vwadd_vv_h, WOP_SSS_H, H4, H2, H2, DO_ADD)
+RVVCALL(OPIVV2, vwadd_vv_w, WOP_SSS_W, H8, H4, H4, DO_ADD)
+RVVCALL(OPIVV2, vwsub_vv_b, WOP_SSS_B, H2, H1, H1, DO_SUB)
+RVVCALL(OPIVV2, vwsub_vv_h, WOP_SSS_H, H4, H2, H2, DO_SUB)
+RVVCALL(OPIVV2, vwsub_vv_w, WOP_SSS_W, H8, H4, H4, DO_SUB)
+RVVCALL(OPIVV2, vwaddu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_ADD)
+RVVCALL(OPIVV2, vwaddu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_ADD)
+RVVCALL(OPIVV2, vwaddu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_ADD)
+RVVCALL(OPIVV2, vwsubu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_SUB)
+RVVCALL(OPIVV2, vwsubu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_SUB)
+RVVCALL(OPIVV2, vwsubu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_SUB)
+RVVCALL(OPIVV2, vwadd_wv_b, WOP_WSSS_B, H2, H1, H1, DO_ADD)
+RVVCALL(OPIVV2, vwadd_wv_h, WOP_WSSS_H, H4, H2, H2, DO_ADD)
+RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD)
+RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB)
+RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB)
+RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB)
+GEN_VEXT_VV(vwaddu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwaddu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwaddu_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwsubu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwsubu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwsubu_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwadd_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwadd_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwadd_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwsub_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwsub_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwsub_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwaddu_wv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwaddu_wv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwaddu_wv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwsubu_wv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwsubu_wv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwsubu_wv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwadd_wv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwadd_wv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwadd_wv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwsub_wv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwsub_wv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwsub_wv_w, 4, 8, clearq)
+
+RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD)
+RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD)
+RVVCALL(OPIVX2, vwaddu_vx_w, WOP_UUU_W, H8, H4, DO_ADD)
+RVVCALL(OPIVX2, vwsubu_vx_b, WOP_UUU_B, H2, H1, DO_SUB)
+RVVCALL(OPIVX2, vwsubu_vx_h, WOP_UUU_H, H4, H2, DO_SUB)
+RVVCALL(OPIVX2, vwsubu_vx_w, WOP_UUU_W, H8, H4, DO_SUB)
+RVVCALL(OPIVX2, vwadd_vx_b, WOP_SSS_B, H2, H1, DO_ADD)
+RVVCALL(OPIVX2, vwadd_vx_h, WOP_SSS_H, H4, H2, DO_ADD)
+RVVCALL(OPIVX2, vwadd_vx_w, WOP_SSS_W, H8, H4, DO_ADD)
+RVVCALL(OPIVX2, vwsub_vx_b, WOP_SSS_B, H2, H1, DO_SUB)
+RVVCALL(OPIVX2, vwsub_vx_h, WOP_SSS_H, H4, H2, DO_SUB)
+RVVCALL(OPIVX2, vwsub_vx_w, WOP_SSS_W, H8, H4, DO_SUB)
+RVVCALL(OPIVX2, vwaddu_wx_b, WOP_WUUU_B, H2, H1, DO_ADD)
+RVVCALL(OPIVX2, vwaddu_wx_h, WOP_WUUU_H, H4, H2, DO_ADD)
+RVVCALL(OPIVX2, vwaddu_wx_w, WOP_WUUU_W, H8, H4, DO_ADD)
+RVVCALL(OPIVX2, vwsubu_wx_b, WOP_WUUU_B, H2, H1, DO_SUB)
+RVVCALL(OPIVX2, vwsubu_wx_h, WOP_WUUU_H, H4, H2, DO_SUB)
+RVVCALL(OPIVX2, vwsubu_wx_w, WOP_WUUU_W, H8, H4, DO_SUB)
+RVVCALL(OPIVX2, vwadd_wx_b, WOP_WSSS_B, H2, H1, DO_ADD)
+RVVCALL(OPIVX2, vwadd_wx_h, WOP_WSSS_H, H4, H2, DO_ADD)
+RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD)
+RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB)
+RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB)
+RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB)
+GEN_VEXT_VX(vwaddu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwaddu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwaddu_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwsubu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwsubu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwsubu_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwadd_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwadd_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwadd_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwsub_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwsub_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwsub_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwaddu_wx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwaddu_wx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwaddu_wx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwsubu_wx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwsubu_wx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwsubu_wx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwadd_wx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwadd_wx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwadd_wx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwsub_wx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwsub_wx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq)
+
+/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
+#define DO_VADC(N, M, C) (N + M + C)
+#define DO_VSBC(N, M, C) (N - M - C)
+
+#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ uint8_t carry = vext_elem_mask(v0, mlen, i); \
+ \
+ *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC, clearb)
+GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC, clearh)
+GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC, clearl)
+GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC, clearq)
+
+GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC, clearb)
+GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC, clearh)
+GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC, clearl)
+GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq)
+
+#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ uint8_t carry = vext_elem_mask(v0, mlen, i); \
+ \
+ *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC, clearb)
+GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC, clearh)
+GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC, clearl)
+GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC, clearq)
+
+GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC, clearb)
+GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC, clearh)
+GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC, clearl)
+GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq)
+
+#define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \
+ (__typeof(N))(N + M) < N)
+#define DO_MSBC(N, M, C) (C ? N <= M : N < M)
+
+#define GEN_VEXT_VMADC_VVM(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ uint8_t carry = vext_elem_mask(v0, mlen, i); \
+ \
+ vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t, H1, DO_MADC)
+GEN_VEXT_VMADC_VVM(vmadc_vvm_h, uint16_t, H2, DO_MADC)
+GEN_VEXT_VMADC_VVM(vmadc_vvm_w, uint32_t, H4, DO_MADC)
+GEN_VEXT_VMADC_VVM(vmadc_vvm_d, uint64_t, H8, DO_MADC)
+
+GEN_VEXT_VMADC_VVM(vmsbc_vvm_b, uint8_t, H1, DO_MSBC)
+GEN_VEXT_VMADC_VVM(vmsbc_vvm_h, uint16_t, H2, DO_MSBC)
+GEN_VEXT_VMADC_VVM(vmsbc_vvm_w, uint32_t, H4, DO_MSBC)
+GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC)
+
+#define GEN_VEXT_VMADC_VXM(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ uint8_t carry = vext_elem_mask(v0, mlen, i); \
+ \
+ vext_set_elem_mask(vd, mlen, i, \
+ DO_OP(s2, (ETYPE)(target_long)s1, carry)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t, H1, DO_MADC)
+GEN_VEXT_VMADC_VXM(vmadc_vxm_h, uint16_t, H2, DO_MADC)
+GEN_VEXT_VMADC_VXM(vmadc_vxm_w, uint32_t, H4, DO_MADC)
+GEN_VEXT_VMADC_VXM(vmadc_vxm_d, uint64_t, H8, DO_MADC)
+
+GEN_VEXT_VMADC_VXM(vmsbc_vxm_b, uint8_t, H1, DO_MSBC)
+GEN_VEXT_VMADC_VXM(vmsbc_vxm_h, uint16_t, H2, DO_MSBC)
+GEN_VEXT_VMADC_VXM(vmsbc_vxm_w, uint32_t, H4, DO_MSBC)
+GEN_VEXT_VMADC_VXM(vmsbc_vxm_d, uint64_t, H8, DO_MSBC)
+
+/* Vector Bitwise Logical Instructions */
+RVVCALL(OPIVV2, vand_vv_b, OP_SSS_B, H1, H1, H1, DO_AND)
+RVVCALL(OPIVV2, vand_vv_h, OP_SSS_H, H2, H2, H2, DO_AND)
+RVVCALL(OPIVV2, vand_vv_w, OP_SSS_W, H4, H4, H4, DO_AND)
+RVVCALL(OPIVV2, vand_vv_d, OP_SSS_D, H8, H8, H8, DO_AND)
+RVVCALL(OPIVV2, vor_vv_b, OP_SSS_B, H1, H1, H1, DO_OR)
+RVVCALL(OPIVV2, vor_vv_h, OP_SSS_H, H2, H2, H2, DO_OR)
+RVVCALL(OPIVV2, vor_vv_w, OP_SSS_W, H4, H4, H4, DO_OR)
+RVVCALL(OPIVV2, vor_vv_d, OP_SSS_D, H8, H8, H8, DO_OR)
+RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR)
+RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR)
+RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR)
+RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR)
+GEN_VEXT_VV(vand_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vand_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vand_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vand_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vor_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vor_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vor_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vor_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vxor_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vxor_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vxor_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vxor_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND)
+RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND)
+RVVCALL(OPIVX2, vand_vx_w, OP_SSS_W, H4, H4, DO_AND)
+RVVCALL(OPIVX2, vand_vx_d, OP_SSS_D, H8, H8, DO_AND)
+RVVCALL(OPIVX2, vor_vx_b, OP_SSS_B, H1, H1, DO_OR)
+RVVCALL(OPIVX2, vor_vx_h, OP_SSS_H, H2, H2, DO_OR)
+RVVCALL(OPIVX2, vor_vx_w, OP_SSS_W, H4, H4, DO_OR)
+RVVCALL(OPIVX2, vor_vx_d, OP_SSS_D, H8, H8, DO_OR)
+RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR)
+RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR)
+RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR)
+RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR)
+GEN_VEXT_VX(vand_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vand_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vand_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vand_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vor_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vor_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vor_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vor_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vxor_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vxor_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vxor_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Bit Shift Instructions */
+#define DO_SLL(N, M) (N << (M))
+#define DO_SRL(N, M) (N >> (M))
+
+/* generate the helpers for shift instructions with two vector operators */
+#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(TS1); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
+ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
+ *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7, clearb)
+GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf, clearh)
+GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f, clearl)
+GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f, clearq)
+
+GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb)
+GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh)
+GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl)
+GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq)
+
+GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb)
+GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh)
+GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
+GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
+
+/* generate the helpers for shift instructions with one vector and one scalar */
+#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(TD); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7, clearb)
+GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf, clearh)
+GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f, clearl)
+GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f, clearq)
+
+GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb)
+GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh)
+GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl)
+GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq)
+
+GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb)
+GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh)
+GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl)
+GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq)
+
+/* Vector Narrowing Integer Right Shift Instructions */
+GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl)
+GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb)
+GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh)
+GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl)
+
+/* Vector Integer Comparison Instructions */
+#define DO_MSEQ(N, M) (N == M)
+#define DO_MSNE(N, M) (N != M)
+#define DO_MSLT(N, M) (N < M)
+#define DO_MSLE(N, M) (N <= M)
+#define DO_MSGT(N, M) (N > M)
+
+#define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t, H1, DO_MSEQ)
+GEN_VEXT_CMP_VV(vmseq_vv_h, uint16_t, H2, DO_MSEQ)
+GEN_VEXT_CMP_VV(vmseq_vv_w, uint32_t, H4, DO_MSEQ)
+GEN_VEXT_CMP_VV(vmseq_vv_d, uint64_t, H8, DO_MSEQ)
+
+GEN_VEXT_CMP_VV(vmsne_vv_b, uint8_t, H1, DO_MSNE)
+GEN_VEXT_CMP_VV(vmsne_vv_h, uint16_t, H2, DO_MSNE)
+GEN_VEXT_CMP_VV(vmsne_vv_w, uint32_t, H4, DO_MSNE)
+GEN_VEXT_CMP_VV(vmsne_vv_d, uint64_t, H8, DO_MSNE)
+
+GEN_VEXT_CMP_VV(vmsltu_vv_b, uint8_t, H1, DO_MSLT)
+GEN_VEXT_CMP_VV(vmsltu_vv_h, uint16_t, H2, DO_MSLT)
+GEN_VEXT_CMP_VV(vmsltu_vv_w, uint32_t, H4, DO_MSLT)
+GEN_VEXT_CMP_VV(vmsltu_vv_d, uint64_t, H8, DO_MSLT)
+
+GEN_VEXT_CMP_VV(vmslt_vv_b, int8_t, H1, DO_MSLT)
+GEN_VEXT_CMP_VV(vmslt_vv_h, int16_t, H2, DO_MSLT)
+GEN_VEXT_CMP_VV(vmslt_vv_w, int32_t, H4, DO_MSLT)
+GEN_VEXT_CMP_VV(vmslt_vv_d, int64_t, H8, DO_MSLT)
+
+GEN_VEXT_CMP_VV(vmsleu_vv_b, uint8_t, H1, DO_MSLE)
+GEN_VEXT_CMP_VV(vmsleu_vv_h, uint16_t, H2, DO_MSLE)
+GEN_VEXT_CMP_VV(vmsleu_vv_w, uint32_t, H4, DO_MSLE)
+GEN_VEXT_CMP_VV(vmsleu_vv_d, uint64_t, H8, DO_MSLE)
+
+GEN_VEXT_CMP_VV(vmsle_vv_b, int8_t, H1, DO_MSLE)
+GEN_VEXT_CMP_VV(vmsle_vv_h, int16_t, H2, DO_MSLE)
+GEN_VEXT_CMP_VV(vmsle_vv_w, int32_t, H4, DO_MSLE)
+GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE)
+
+#define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, mlen, i, \
+ DO_OP(s2, (ETYPE)(target_long)s1)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t, H1, DO_MSEQ)
+GEN_VEXT_CMP_VX(vmseq_vx_h, uint16_t, H2, DO_MSEQ)
+GEN_VEXT_CMP_VX(vmseq_vx_w, uint32_t, H4, DO_MSEQ)
+GEN_VEXT_CMP_VX(vmseq_vx_d, uint64_t, H8, DO_MSEQ)
+
+GEN_VEXT_CMP_VX(vmsne_vx_b, uint8_t, H1, DO_MSNE)
+GEN_VEXT_CMP_VX(vmsne_vx_h, uint16_t, H2, DO_MSNE)
+GEN_VEXT_CMP_VX(vmsne_vx_w, uint32_t, H4, DO_MSNE)
+GEN_VEXT_CMP_VX(vmsne_vx_d, uint64_t, H8, DO_MSNE)
+
+GEN_VEXT_CMP_VX(vmsltu_vx_b, uint8_t, H1, DO_MSLT)
+GEN_VEXT_CMP_VX(vmsltu_vx_h, uint16_t, H2, DO_MSLT)
+GEN_VEXT_CMP_VX(vmsltu_vx_w, uint32_t, H4, DO_MSLT)
+GEN_VEXT_CMP_VX(vmsltu_vx_d, uint64_t, H8, DO_MSLT)
+
+GEN_VEXT_CMP_VX(vmslt_vx_b, int8_t, H1, DO_MSLT)
+GEN_VEXT_CMP_VX(vmslt_vx_h, int16_t, H2, DO_MSLT)
+GEN_VEXT_CMP_VX(vmslt_vx_w, int32_t, H4, DO_MSLT)
+GEN_VEXT_CMP_VX(vmslt_vx_d, int64_t, H8, DO_MSLT)
+
+GEN_VEXT_CMP_VX(vmsleu_vx_b, uint8_t, H1, DO_MSLE)
+GEN_VEXT_CMP_VX(vmsleu_vx_h, uint16_t, H2, DO_MSLE)
+GEN_VEXT_CMP_VX(vmsleu_vx_w, uint32_t, H4, DO_MSLE)
+GEN_VEXT_CMP_VX(vmsleu_vx_d, uint64_t, H8, DO_MSLE)
+
+GEN_VEXT_CMP_VX(vmsle_vx_b, int8_t, H1, DO_MSLE)
+GEN_VEXT_CMP_VX(vmsle_vx_h, int16_t, H2, DO_MSLE)
+GEN_VEXT_CMP_VX(vmsle_vx_w, int32_t, H4, DO_MSLE)
+GEN_VEXT_CMP_VX(vmsle_vx_d, int64_t, H8, DO_MSLE)
+
+GEN_VEXT_CMP_VX(vmsgtu_vx_b, uint8_t, H1, DO_MSGT)
+GEN_VEXT_CMP_VX(vmsgtu_vx_h, uint16_t, H2, DO_MSGT)
+GEN_VEXT_CMP_VX(vmsgtu_vx_w, uint32_t, H4, DO_MSGT)
+GEN_VEXT_CMP_VX(vmsgtu_vx_d, uint64_t, H8, DO_MSGT)
+
+GEN_VEXT_CMP_VX(vmsgt_vx_b, int8_t, H1, DO_MSGT)
+GEN_VEXT_CMP_VX(vmsgt_vx_h, int16_t, H2, DO_MSGT)
+GEN_VEXT_CMP_VX(vmsgt_vx_w, int32_t, H4, DO_MSGT)
+GEN_VEXT_CMP_VX(vmsgt_vx_d, int64_t, H8, DO_MSGT)
+
+/* Vector Integer Min/Max Instructions */
+RVVCALL(OPIVV2, vminu_vv_b, OP_UUU_B, H1, H1, H1, DO_MIN)
+RVVCALL(OPIVV2, vminu_vv_h, OP_UUU_H, H2, H2, H2, DO_MIN)
+RVVCALL(OPIVV2, vminu_vv_w, OP_UUU_W, H4, H4, H4, DO_MIN)
+RVVCALL(OPIVV2, vminu_vv_d, OP_UUU_D, H8, H8, H8, DO_MIN)
+RVVCALL(OPIVV2, vmin_vv_b, OP_SSS_B, H1, H1, H1, DO_MIN)
+RVVCALL(OPIVV2, vmin_vv_h, OP_SSS_H, H2, H2, H2, DO_MIN)
+RVVCALL(OPIVV2, vmin_vv_w, OP_SSS_W, H4, H4, H4, DO_MIN)
+RVVCALL(OPIVV2, vmin_vv_d, OP_SSS_D, H8, H8, H8, DO_MIN)
+RVVCALL(OPIVV2, vmaxu_vv_b, OP_UUU_B, H1, H1, H1, DO_MAX)
+RVVCALL(OPIVV2, vmaxu_vv_h, OP_UUU_H, H2, H2, H2, DO_MAX)
+RVVCALL(OPIVV2, vmaxu_vv_w, OP_UUU_W, H4, H4, H4, DO_MAX)
+RVVCALL(OPIVV2, vmaxu_vv_d, OP_UUU_D, H8, H8, H8, DO_MAX)
+RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX)
+RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX)
+RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX)
+RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX)
+GEN_VEXT_VV(vminu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vminu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vminu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vminu_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vmin_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmin_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmin_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmin_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vmaxu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmaxu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmaxu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmaxu_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vmax_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmax_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmax_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmax_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN)
+RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN)
+RVVCALL(OPIVX2, vminu_vx_w, OP_UUU_W, H4, H4, DO_MIN)
+RVVCALL(OPIVX2, vminu_vx_d, OP_UUU_D, H8, H8, DO_MIN)
+RVVCALL(OPIVX2, vmin_vx_b, OP_SSS_B, H1, H1, DO_MIN)
+RVVCALL(OPIVX2, vmin_vx_h, OP_SSS_H, H2, H2, DO_MIN)
+RVVCALL(OPIVX2, vmin_vx_w, OP_SSS_W, H4, H4, DO_MIN)
+RVVCALL(OPIVX2, vmin_vx_d, OP_SSS_D, H8, H8, DO_MIN)
+RVVCALL(OPIVX2, vmaxu_vx_b, OP_UUU_B, H1, H1, DO_MAX)
+RVVCALL(OPIVX2, vmaxu_vx_h, OP_UUU_H, H2, H2, DO_MAX)
+RVVCALL(OPIVX2, vmaxu_vx_w, OP_UUU_W, H4, H4, DO_MAX)
+RVVCALL(OPIVX2, vmaxu_vx_d, OP_UUU_D, H8, H8, DO_MAX)
+RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX)
+RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX)
+RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX)
+RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX)
+GEN_VEXT_VX(vminu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vminu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vminu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vminu_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmin_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmin_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmin_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmin_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmaxu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmaxu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmaxu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmaxu_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Integer Multiply Instructions */
+#define DO_MUL(N, M) (N * M)
+RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL)
+RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL)
+RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL)
+RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL)
+GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq)
+
+static int8_t do_mulh_b(int8_t s2, int8_t s1)
+{
+ return (int16_t)s2 * (int16_t)s1 >> 8;
+}
+
+static int16_t do_mulh_h(int16_t s2, int16_t s1)
+{
+ return (int32_t)s2 * (int32_t)s1 >> 16;
+}
+
+static int32_t do_mulh_w(int32_t s2, int32_t s1)
+{
+ return (int64_t)s2 * (int64_t)s1 >> 32;
+}
+
+static int64_t do_mulh_d(int64_t s2, int64_t s1)
+{
+ uint64_t hi_64, lo_64;
+
+ muls64(&lo_64, &hi_64, s1, s2);
+ return hi_64;
+}
+
+static uint8_t do_mulhu_b(uint8_t s2, uint8_t s1)
+{
+ return (uint16_t)s2 * (uint16_t)s1 >> 8;
+}
+
+static uint16_t do_mulhu_h(uint16_t s2, uint16_t s1)
+{
+ return (uint32_t)s2 * (uint32_t)s1 >> 16;
+}
+
+static uint32_t do_mulhu_w(uint32_t s2, uint32_t s1)
+{
+ return (uint64_t)s2 * (uint64_t)s1 >> 32;
+}
+
+static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1)
+{
+ uint64_t hi_64, lo_64;
+
+ mulu64(&lo_64, &hi_64, s2, s1);
+ return hi_64;
+}
+
+static int8_t do_mulhsu_b(int8_t s2, uint8_t s1)
+{
+ return (int16_t)s2 * (uint16_t)s1 >> 8;
+}
+
+static int16_t do_mulhsu_h(int16_t s2, uint16_t s1)
+{
+ return (int32_t)s2 * (uint32_t)s1 >> 16;
+}
+
+static int32_t do_mulhsu_w(int32_t s2, uint32_t s1)
+{
+ return (int64_t)s2 * (uint64_t)s1 >> 32;
+}
+
+/*
+ * Let A = signed operand,
+ * B = unsigned operand
+ * P = mulu64(A, B), unsigned product
+ *
+ * LET X = 2 ** 64 - A, 2's complement of A
+ * SP = signed product
+ * THEN
+ * IF A < 0
+ * SP = -X * B
+ * = -(2 ** 64 - A) * B
+ * = A * B - 2 ** 64 * B
+ * = P - 2 ** 64 * B
+ * ELSE
+ * SP = P
+ * THEN
+ * HI_P -= (A < 0 ? B : 0)
+ */
+
+static int64_t do_mulhsu_d(int64_t s2, uint64_t s1)
+{
+ uint64_t hi_64, lo_64;
+
+ mulu64(&lo_64, &hi_64, s2, s1);
+
+ hi_64 -= s2 < 0 ? s1 : 0;
+ return hi_64;
+}
+
+RVVCALL(OPIVV2, vmulh_vv_b, OP_SSS_B, H1, H1, H1, do_mulh_b)
+RVVCALL(OPIVV2, vmulh_vv_h, OP_SSS_H, H2, H2, H2, do_mulh_h)
+RVVCALL(OPIVV2, vmulh_vv_w, OP_SSS_W, H4, H4, H4, do_mulh_w)
+RVVCALL(OPIVV2, vmulh_vv_d, OP_SSS_D, H8, H8, H8, do_mulh_d)
+RVVCALL(OPIVV2, vmulhu_vv_b, OP_UUU_B, H1, H1, H1, do_mulhu_b)
+RVVCALL(OPIVV2, vmulhu_vv_h, OP_UUU_H, H2, H2, H2, do_mulhu_h)
+RVVCALL(OPIVV2, vmulhu_vv_w, OP_UUU_W, H4, H4, H4, do_mulhu_w)
+RVVCALL(OPIVV2, vmulhu_vv_d, OP_UUU_D, H8, H8, H8, do_mulhu_d)
+RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b)
+RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h)
+RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w)
+RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d)
+GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL)
+RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL)
+RVVCALL(OPIVX2, vmul_vx_w, OP_SSS_W, H4, H4, DO_MUL)
+RVVCALL(OPIVX2, vmul_vx_d, OP_SSS_D, H8, H8, DO_MUL)
+RVVCALL(OPIVX2, vmulh_vx_b, OP_SSS_B, H1, H1, do_mulh_b)
+RVVCALL(OPIVX2, vmulh_vx_h, OP_SSS_H, H2, H2, do_mulh_h)
+RVVCALL(OPIVX2, vmulh_vx_w, OP_SSS_W, H4, H4, do_mulh_w)
+RVVCALL(OPIVX2, vmulh_vx_d, OP_SSS_D, H8, H8, do_mulh_d)
+RVVCALL(OPIVX2, vmulhu_vx_b, OP_UUU_B, H1, H1, do_mulhu_b)
+RVVCALL(OPIVX2, vmulhu_vx_h, OP_UUU_H, H2, H2, do_mulhu_h)
+RVVCALL(OPIVX2, vmulhu_vx_w, OP_UUU_W, H4, H4, do_mulhu_w)
+RVVCALL(OPIVX2, vmulhu_vx_d, OP_UUU_D, H8, H8, do_mulhu_d)
+RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b)
+RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h)
+RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w)
+RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d)
+GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq)
+
+/* Vector Integer Divide Instructions */
+#define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M)
+#define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M)
+#define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) :\
+ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
+#define DO_REM(N, M) (unlikely(M == 0) ? N :\
+ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
+
+RVVCALL(OPIVV2, vdivu_vv_b, OP_UUU_B, H1, H1, H1, DO_DIVU)
+RVVCALL(OPIVV2, vdivu_vv_h, OP_UUU_H, H2, H2, H2, DO_DIVU)
+RVVCALL(OPIVV2, vdivu_vv_w, OP_UUU_W, H4, H4, H4, DO_DIVU)
+RVVCALL(OPIVV2, vdivu_vv_d, OP_UUU_D, H8, H8, H8, DO_DIVU)
+RVVCALL(OPIVV2, vdiv_vv_b, OP_SSS_B, H1, H1, H1, DO_DIV)
+RVVCALL(OPIVV2, vdiv_vv_h, OP_SSS_H, H2, H2, H2, DO_DIV)
+RVVCALL(OPIVV2, vdiv_vv_w, OP_SSS_W, H4, H4, H4, DO_DIV)
+RVVCALL(OPIVV2, vdiv_vv_d, OP_SSS_D, H8, H8, H8, DO_DIV)
+RVVCALL(OPIVV2, vremu_vv_b, OP_UUU_B, H1, H1, H1, DO_REMU)
+RVVCALL(OPIVV2, vremu_vv_h, OP_UUU_H, H2, H2, H2, DO_REMU)
+RVVCALL(OPIVV2, vremu_vv_w, OP_UUU_W, H4, H4, H4, DO_REMU)
+RVVCALL(OPIVV2, vremu_vv_d, OP_UUU_D, H8, H8, H8, DO_REMU)
+RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM)
+RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM)
+RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM)
+RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM)
+GEN_VEXT_VV(vdivu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vdivu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vdivu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vdivu_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vdiv_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vdiv_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vdiv_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vdiv_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vremu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vremu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vremu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vremu_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vrem_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vrem_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vrem_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vrem_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU)
+RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU)
+RVVCALL(OPIVX2, vdivu_vx_w, OP_UUU_W, H4, H4, DO_DIVU)
+RVVCALL(OPIVX2, vdivu_vx_d, OP_UUU_D, H8, H8, DO_DIVU)
+RVVCALL(OPIVX2, vdiv_vx_b, OP_SSS_B, H1, H1, DO_DIV)
+RVVCALL(OPIVX2, vdiv_vx_h, OP_SSS_H, H2, H2, DO_DIV)
+RVVCALL(OPIVX2, vdiv_vx_w, OP_SSS_W, H4, H4, DO_DIV)
+RVVCALL(OPIVX2, vdiv_vx_d, OP_SSS_D, H8, H8, DO_DIV)
+RVVCALL(OPIVX2, vremu_vx_b, OP_UUU_B, H1, H1, DO_REMU)
+RVVCALL(OPIVX2, vremu_vx_h, OP_UUU_H, H2, H2, DO_REMU)
+RVVCALL(OPIVX2, vremu_vx_w, OP_UUU_W, H4, H4, DO_REMU)
+RVVCALL(OPIVX2, vremu_vx_d, OP_UUU_D, H8, H8, DO_REMU)
+RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM)
+RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM)
+RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM)
+RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM)
+GEN_VEXT_VX(vdivu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vdivu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vdivu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vdivu_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vdiv_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vdiv_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vdiv_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vdiv_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vremu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vremu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vremu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vremu_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vrem_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vrem_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vrem_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vrem_vx_d, 8, 8, clearq)
+
+/* Vector Widening Integer Multiply Instructions */
+RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL)
+RVVCALL(OPIVV2, vwmul_vv_h, WOP_SSS_H, H4, H2, H2, DO_MUL)
+RVVCALL(OPIVV2, vwmul_vv_w, WOP_SSS_W, H8, H4, H4, DO_MUL)
+RVVCALL(OPIVV2, vwmulu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MUL)
+RVVCALL(OPIVV2, vwmulu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MUL)
+RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL)
+RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL)
+RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL)
+RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL)
+GEN_VEXT_VV(vwmul_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwmul_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwmul_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwmulu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwmulu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwmulu_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwmulsu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwmulsu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwmulsu_vv_w, 4, 8, clearq)
+
+RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL)
+RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL)
+RVVCALL(OPIVX2, vwmul_vx_w, WOP_SSS_W, H8, H4, DO_MUL)
+RVVCALL(OPIVX2, vwmulu_vx_b, WOP_UUU_B, H2, H1, DO_MUL)
+RVVCALL(OPIVX2, vwmulu_vx_h, WOP_UUU_H, H4, H2, DO_MUL)
+RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL)
+RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL)
+RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL)
+RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL)
+GEN_VEXT_VX(vwmul_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmul_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmul_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwmulu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmulu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmulu_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwmulsu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmulsu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmulsu_vx_w, 4, 8, clearq)
+
+/* Vector Single-Width Integer Multiply-Add Instructions */
+#define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ TD d = *((TD *)vd + HD(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, s1, d); \
+}
+
+#define DO_MACC(N, M, D) (M * N + D)
+#define DO_NMSAC(N, M, D) (-(M * N) + D)
+#define DO_MADD(N, M, D) (M * D + N)
+#define DO_NMSUB(N, M, D) (-(M * D) + N)
+RVVCALL(OPIVV3, vmacc_vv_b, OP_SSS_B, H1, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vmacc_vv_h, OP_SSS_H, H2, H2, H2, DO_MACC)
+RVVCALL(OPIVV3, vmacc_vv_w, OP_SSS_W, H4, H4, H4, DO_MACC)
+RVVCALL(OPIVV3, vmacc_vv_d, OP_SSS_D, H8, H8, H8, DO_MACC)
+RVVCALL(OPIVV3, vnmsac_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSAC)
+RVVCALL(OPIVV3, vnmsac_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSAC)
+RVVCALL(OPIVV3, vnmsac_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSAC)
+RVVCALL(OPIVV3, vnmsac_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSAC)
+RVVCALL(OPIVV3, vmadd_vv_b, OP_SSS_B, H1, H1, H1, DO_MADD)
+RVVCALL(OPIVV3, vmadd_vv_h, OP_SSS_H, H2, H2, H2, DO_MADD)
+RVVCALL(OPIVV3, vmadd_vv_w, OP_SSS_W, H4, H4, H4, DO_MADD)
+RVVCALL(OPIVV3, vmadd_vv_d, OP_SSS_D, H8, H8, H8, DO_MADD)
+RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB)
+RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB)
+RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB)
+RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB)
+GEN_VEXT_VV(vmacc_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmacc_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmacc_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmacc_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vnmsac_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vnmsac_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vnmsac_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vnmsac_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vmadd_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vmadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vmadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vmadd_vv_d, 8, 8, clearq)
+GEN_VEXT_VV(vnmsub_vv_b, 1, 1, clearb)
+GEN_VEXT_VV(vnmsub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV(vnmsub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV(vnmsub_vv_d, 8, 8, clearq)
+
+#define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ TD d = *((TD *)vd + HD(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d); \
+}
+
+RVVCALL(OPIVX3, vmacc_vx_b, OP_SSS_B, H1, H1, DO_MACC)
+RVVCALL(OPIVX3, vmacc_vx_h, OP_SSS_H, H2, H2, DO_MACC)
+RVVCALL(OPIVX3, vmacc_vx_w, OP_SSS_W, H4, H4, DO_MACC)
+RVVCALL(OPIVX3, vmacc_vx_d, OP_SSS_D, H8, H8, DO_MACC)
+RVVCALL(OPIVX3, vnmsac_vx_b, OP_SSS_B, H1, H1, DO_NMSAC)
+RVVCALL(OPIVX3, vnmsac_vx_h, OP_SSS_H, H2, H2, DO_NMSAC)
+RVVCALL(OPIVX3, vnmsac_vx_w, OP_SSS_W, H4, H4, DO_NMSAC)
+RVVCALL(OPIVX3, vnmsac_vx_d, OP_SSS_D, H8, H8, DO_NMSAC)
+RVVCALL(OPIVX3, vmadd_vx_b, OP_SSS_B, H1, H1, DO_MADD)
+RVVCALL(OPIVX3, vmadd_vx_h, OP_SSS_H, H2, H2, DO_MADD)
+RVVCALL(OPIVX3, vmadd_vx_w, OP_SSS_W, H4, H4, DO_MADD)
+RVVCALL(OPIVX3, vmadd_vx_d, OP_SSS_D, H8, H8, DO_MADD)
+RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB)
+RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB)
+RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB)
+RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB)
+GEN_VEXT_VX(vmacc_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmacc_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmacc_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmacc_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vnmsac_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vnmsac_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vnmsac_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vnmsac_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vmadd_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vmadd_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vmadd_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vmadd_vx_d, 8, 8, clearq)
+GEN_VEXT_VX(vnmsub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX(vnmsub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX(vnmsub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX(vnmsub_vx_d, 8, 8, clearq)
+
+/* Vector Widening Integer Multiply-Add Instructions */
+RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vwmaccu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MACC)
+RVVCALL(OPIVV3, vwmaccu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MACC)
+RVVCALL(OPIVV3, vwmacc_vv_b, WOP_SSS_B, H2, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vwmacc_vv_h, WOP_SSS_H, H4, H2, H2, DO_MACC)
+RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC)
+RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC)
+RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC)
+RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC)
+GEN_VEXT_VV(vwmaccu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwmaccu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwmaccu_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwmacc_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwmacc_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwmacc_vv_w, 4, 8, clearq)
+GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8, clearq)
+
+RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC)
+RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC)
+RVVCALL(OPIVX3, vwmaccu_vx_w, WOP_UUU_W, H8, H4, DO_MACC)
+RVVCALL(OPIVX3, vwmacc_vx_b, WOP_SSS_B, H2, H1, DO_MACC)
+RVVCALL(OPIVX3, vwmacc_vx_h, WOP_SSS_H, H4, H2, DO_MACC)
+RVVCALL(OPIVX3, vwmacc_vx_w, WOP_SSS_W, H8, H4, DO_MACC)
+RVVCALL(OPIVX3, vwmaccsu_vx_b, WOP_SSU_B, H2, H1, DO_MACC)
+RVVCALL(OPIVX3, vwmaccsu_vx_h, WOP_SSU_H, H4, H2, DO_MACC)
+RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC)
+RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC)
+RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC)
+RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC)
+GEN_VEXT_VX(vwmaccu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmaccu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmaccu_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwmacc_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmacc_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmacc_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8, clearq)
+GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh)
+GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl)
+GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq)
+
+/* Vector Integer Merge and Move Instructions */
+#define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1, clearb)
+GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2, clearh)
+GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4, clearl)
+GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8, clearq)
+
+#define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ *((ETYPE *)vd + H(i)) = (ETYPE)s1; \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1, clearb)
+GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2, clearh)
+GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4, clearl)
+GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq)
+
+#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1); \
+ *((ETYPE *)vd + H(i)) = *(vt + H(i)); \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1, clearb)
+GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2, clearh)
+GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4, clearl)
+GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq)
+
+#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 : \
+ (ETYPE)(target_long)s1); \
+ *((ETYPE *)vd + H(i)) = d; \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1, clearb)
+GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2, clearh)
+GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4, clearl)
+GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8, clearq)
+
+/*
+ *** Vector Fixed-Point Arithmetic Instructions
+ */
+
+/* Vector Single-Width Saturating Add and Subtract */
+
+/*
+ * As fixed point instructions probably have round mode and saturation,
+ * define common macros for fixed point here.
+ */
+typedef void opivv2_rm_fn(void *vd, void *vs1, void *vs2, int i,
+ CPURISCVState *env, int vxrm);
+
+#define OPIVV2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static inline void \
+do_##NAME(void *vd, void *vs1, void *vs2, int i, \
+ CPURISCVState *env, int vxrm) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1); \
+}
+
+static inline void
+vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
+ CPURISCVState *env,
+ uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm,
+ opivv2_rm_fn *fn)
+{
+ for (uint32_t i = 0; i < vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ fn(vd, vs1, vs2, i, env, vxrm);
+ }
+}
+
+static inline void
+vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
+ CPURISCVState *env,
+ uint32_t desc, uint32_t esz, uint32_t dsz,
+ opivv2_rm_fn *fn, clear_fn *clearfn)
+{
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+
+ switch (env->vxrm) {
+ case 0: /* rnu */
+ vext_vv_rm_1(vd, v0, vs1, vs2,
+ env, vl, vm, mlen, 0, fn);
+ break;
+ case 1: /* rne */
+ vext_vv_rm_1(vd, v0, vs1, vs2,
+ env, vl, vm, mlen, 1, fn);
+ break;
+ case 2: /* rdn */
+ vext_vv_rm_1(vd, v0, vs1, vs2,
+ env, vl, vm, mlen, 2, fn);
+ break;
+ default: /* rod */
+ vext_vv_rm_1(vd, v0, vs1, vs2,
+ env, vl, vm, mlen, 3, fn);
+ break;
+ }
+
+ clearfn(vd, vl, vl * dsz, vlmax * dsz);
+}
+
+/* generate helpers for fixed point instructions with OPIVV format */
+#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
+ do_##NAME, CLEAR_FN); \
+}
+
+static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+{
+ uint8_t res = a + b;
+ if (res < a) {
+ res = UINT8_MAX;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a,
+ uint16_t b)
+{
+ uint16_t res = a + b;
+ if (res < a) {
+ res = UINT16_MAX;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a,
+ uint32_t b)
+{
+ uint32_t res = a + b;
+ if (res < a) {
+ res = UINT32_MAX;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a,
+ uint64_t b)
+{
+ uint64_t res = a + b;
+ if (res < a) {
+ res = UINT64_MAX;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8)
+RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16)
+RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32)
+RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64)
+GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8, clearq)
+
+typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i,
+ CPURISCVState *env, int vxrm);
+
+#define OPIVX2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static inline void \
+do_##NAME(void *vd, target_long s1, void *vs2, int i, \
+ CPURISCVState *env, int vxrm) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1); \
+}
+
+static inline void
+vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
+ CPURISCVState *env,
+ uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm,
+ opivx2_rm_fn *fn)
+{
+ for (uint32_t i = 0; i < vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ fn(vd, s1, vs2, i, env, vxrm);
+ }
+}
+
+static inline void
+vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
+ CPURISCVState *env,
+ uint32_t desc, uint32_t esz, uint32_t dsz,
+ opivx2_rm_fn *fn, clear_fn *clearfn)
+{
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+
+ switch (env->vxrm) {
+ case 0: /* rnu */
+ vext_vx_rm_1(vd, v0, s1, vs2,
+ env, vl, vm, mlen, 0, fn);
+ break;
+ case 1: /* rne */
+ vext_vx_rm_1(vd, v0, s1, vs2,
+ env, vl, vm, mlen, 1, fn);
+ break;
+ case 2: /* rdn */
+ vext_vx_rm_1(vd, v0, s1, vs2,
+ env, vl, vm, mlen, 2, fn);
+ break;
+ default: /* rod */
+ vext_vx_rm_1(vd, v0, s1, vs2,
+ env, vl, vm, mlen, 3, fn);
+ break;
+ }
+
+ clearfn(vd, vl, vl * dsz, vlmax * dsz);
+}
+
+/* generate helpers for fixed point instructions with OPIVX format */
+#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
+ do_##NAME, CLEAR_FN); \
+}
+
+RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8)
+RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16)
+RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32)
+RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64)
+GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq)
+
+static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+{
+ int8_t res = a + b;
+ if ((res ^ a) & (res ^ b) & INT8_MIN) {
+ res = a > 0 ? INT8_MAX : INT8_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+{
+ int16_t res = a + b;
+ if ((res ^ a) & (res ^ b) & INT16_MIN) {
+ res = a > 0 ? INT16_MAX : INT16_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ int32_t res = a + b;
+ if ((res ^ a) & (res ^ b) & INT32_MIN) {
+ res = a > 0 ? INT32_MAX : INT32_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ int64_t res = a + b;
+ if ((res ^ a) & (res ^ b) & INT64_MIN) {
+ res = a > 0 ? INT64_MAX : INT64_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8)
+RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16)
+RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32)
+RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64)
+GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8)
+RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16)
+RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32)
+RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64)
+GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq)
+
+static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+{
+ uint8_t res = a - b;
+ if (res > a) {
+ res = 0;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a,
+ uint16_t b)
+{
+ uint16_t res = a - b;
+ if (res > a) {
+ res = 0;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a,
+ uint32_t b)
+{
+ uint32_t res = a - b;
+ if (res > a) {
+ res = 0;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a,
+ uint64_t b)
+{
+ uint64_t res = a - b;
+ if (res > a) {
+ res = 0;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8)
+RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16)
+RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32)
+RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64)
+GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8)
+RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16)
+RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32)
+RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64)
+GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq)
+
+static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+{
+ int8_t res = a - b;
+ if ((res ^ a) & (a ^ b) & INT8_MIN) {
+ res = a >= 0 ? INT8_MAX : INT8_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+{
+ int16_t res = a - b;
+ if ((res ^ a) & (a ^ b) & INT16_MIN) {
+ res = a >= 0 ? INT16_MAX : INT16_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ int32_t res = a - b;
+ if ((res ^ a) & (a ^ b) & INT32_MIN) {
+ res = a >= 0 ? INT32_MAX : INT32_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ int64_t res = a - b;
+ if ((res ^ a) & (a ^ b) & INT64_MIN) {
+ res = a >= 0 ? INT64_MAX : INT64_MIN;
+ env->vxsat = 0x1;
+ }
+ return res;
+}
+
+RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8)
+RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16)
+RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32)
+RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64)
+GEN_VEXT_VV_RM(vssub_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vssub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vssub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vssub_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8)
+RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16)
+RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32)
+RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64)
+GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Averaging Add and Subtract */
+static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift)
+{
+ uint8_t d = extract64(v, shift, 1);
+ uint8_t d1;
+ uint64_t D1, D2;
+
+ if (shift == 0 || shift > 64) {
+ return 0;
+ }
+
+ d1 = extract64(v, shift - 1, 1);
+ D1 = extract64(v, 0, shift);
+ if (vxrm == 0) { /* round-to-nearest-up (add +0.5 LSB) */
+ return d1;
+ } else if (vxrm == 1) { /* round-to-nearest-even */
+ if (shift > 1) {
+ D2 = extract64(v, 0, shift - 1);
+ return d1 & ((D2 != 0) | d);
+ } else {
+ return d1 & d;
+ }
+ } else if (vxrm == 3) { /* round-to-odd (OR bits into LSB, aka "jam") */
+ return !d & (D1 != 0);
+ }
+ return 0; /* round-down (truncate) */
+}
+
+static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ int64_t res = (int64_t)a + b;
+ uint8_t round = get_round(vxrm, res, 1);
+
+ return (res >> 1) + round;
+}
+
+static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ int64_t res = a + b;
+ uint8_t round = get_round(vxrm, res, 1);
+ int64_t over = (res ^ a) & (res ^ b) & INT64_MIN;
+
+ /* With signed overflow, bit 64 is inverse of bit 63. */
+ return ((res >> 1) ^ over) + round;
+}
+
+RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32)
+RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32)
+RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32)
+RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64)
+GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32)
+RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32)
+RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32)
+RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64)
+GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq)
+
+static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ int64_t res = (int64_t)a - b;
+ uint8_t round = get_round(vxrm, res, 1);
+
+ return (res >> 1) + round;
+}
+
+static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ int64_t res = (int64_t)a - b;
+ uint8_t round = get_round(vxrm, res, 1);
+ int64_t over = (res ^ a) & (a ^ b) & INT64_MIN;
+
+ /* With signed overflow, bit 64 is inverse of bit 63. */
+ return ((res >> 1) ^ over) + round;
+}
+
+RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32)
+RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32)
+RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32)
+RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64)
+GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32)
+RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32)
+RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32)
+RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64)
+GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
+static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+{
+ uint8_t round;
+ int16_t res;
+
+ res = (int16_t)a * (int16_t)b;
+ round = get_round(vxrm, res, 7);
+ res = (res >> 7) + round;
+
+ if (res > INT8_MAX) {
+ env->vxsat = 0x1;
+ return INT8_MAX;
+ } else if (res < INT8_MIN) {
+ env->vxsat = 0x1;
+ return INT8_MIN;
+ } else {
+ return res;
+ }
+}
+
+static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+{
+ uint8_t round;
+ int32_t res;
+
+ res = (int32_t)a * (int32_t)b;
+ round = get_round(vxrm, res, 15);
+ res = (res >> 15) + round;
+
+ if (res > INT16_MAX) {
+ env->vxsat = 0x1;
+ return INT16_MAX;
+ } else if (res < INT16_MIN) {
+ env->vxsat = 0x1;
+ return INT16_MIN;
+ } else {
+ return res;
+ }
+}
+
+static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ uint8_t round;
+ int64_t res;
+
+ res = (int64_t)a * (int64_t)b;
+ round = get_round(vxrm, res, 31);
+ res = (res >> 31) + round;
+
+ if (res > INT32_MAX) {
+ env->vxsat = 0x1;
+ return INT32_MAX;
+ } else if (res < INT32_MIN) {
+ env->vxsat = 0x1;
+ return INT32_MIN;
+ } else {
+ return res;
+ }
+}
+
+static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ uint8_t round;
+ uint64_t hi_64, lo_64;
+ int64_t res;
+
+ if (a == INT64_MIN && b == INT64_MIN) {
+ env->vxsat = 1;
+ return INT64_MAX;
+ }
+
+ muls64(&lo_64, &hi_64, a, b);
+ round = get_round(vxrm, lo_64, 63);
+ /*
+ * Cannot overflow, as there are always
+ * 2 sign bits after multiply.
+ */
+ res = (hi_64 << 1) | (lo_64 >> 63);
+ if (round) {
+ if (res == INT64_MAX) {
+ env->vxsat = 1;
+ } else {
+ res += 1;
+ }
+ }
+ return res;
+}
+
+RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8)
+RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16)
+RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32)
+RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64)
+GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8)
+RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16)
+RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32)
+RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64)
+GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq)
+
+/* Vector Widening Saturating Scaled Multiply-Add */
+static inline uint16_t
+vwsmaccu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b,
+ uint16_t c)
+{
+ uint8_t round;
+ uint16_t res = (uint16_t)a * b;
+
+ round = get_round(vxrm, res, 4);
+ res = (res >> 4) + round;
+ return saddu16(env, vxrm, c, res);
+}
+
+static inline uint32_t
+vwsmaccu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b,
+ uint32_t c)
+{
+ uint8_t round;
+ uint32_t res = (uint32_t)a * b;
+
+ round = get_round(vxrm, res, 8);
+ res = (res >> 8) + round;
+ return saddu32(env, vxrm, c, res);
+}
+
+static inline uint64_t
+vwsmaccu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b,
+ uint64_t c)
+{
+ uint8_t round;
+ uint64_t res = (uint64_t)a * b;
+
+ round = get_round(vxrm, res, 16);
+ res = (res >> 16) + round;
+ return saddu64(env, vxrm, c, res);
+}
+
+#define OPIVV3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static inline void \
+do_##NAME(void *vd, void *vs1, void *vs2, int i, \
+ CPURISCVState *env, int vxrm) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ TD d = *((TD *)vd + HD(i)); \
+ *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1, d); \
+}
+
+RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8)
+RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16)
+RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32)
+GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8, clearq)
+
+#define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static inline void \
+do_##NAME(void *vd, target_long s1, void *vs2, int i, \
+ CPURISCVState *env, int vxrm) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ TD d = *((TD *)vd + HD(i)); \
+ *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1, d); \
+}
+
+RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8)
+RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16)
+RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32)
+GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8, clearq)
+
+static inline int16_t
+vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c)
+{
+ uint8_t round;
+ int16_t res = (int16_t)a * b;
+
+ round = get_round(vxrm, res, 4);
+ res = (res >> 4) + round;
+ return sadd16(env, vxrm, c, res);
+}
+
+static inline int32_t
+vwsmacc16(CPURISCVState *env, int vxrm, int16_t a, int16_t b, int32_t c)
+{
+ uint8_t round;
+ int32_t res = (int32_t)a * b;
+
+ round = get_round(vxrm, res, 8);
+ res = (res >> 8) + round;
+ return sadd32(env, vxrm, c, res);
+
+}
+
+static inline int64_t
+vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c)
+{
+ uint8_t round;
+ int64_t res = (int64_t)a * b;
+
+ round = get_round(vxrm, res, 16);
+ res = (res >> 16) + round;
+ return sadd64(env, vxrm, c, res);
+}
+
+RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8)
+RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16)
+RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32)
+GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2, clearh)
+GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8, clearq)
+RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8)
+RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16)
+RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32)
+GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2, clearh)
+GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4, clearl)
+GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8, clearq)
+
+static inline int16_t
+vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c)
+{
+ uint8_t round;
+ int16_t res = a * (int16_t)b;
+
+ round = get_round(vxrm, res, 4);
+ res = (res >> 4) + round;
+ return ssub16(env, vxrm, c, res);
+}
+
+static inline int32_t
+vwsmaccsu16(CPURISCVState *env, int vxrm, uint16_t a, int16_t b, uint32_t c)
+{
+ uint8_t round;
+ int32_t res = a * (int32_t)b;
+
+ round = get_round(vxrm, res, 8);
+ res = (res >> 8) + round;
+ return ssub32(env, vxrm, c, res);
+}
+
+static inline int64_t
+vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c)
+{
+ uint8_t round;
+ int64_t res = a * (int64_t)b;
+
+ round = get_round(vxrm, res, 16);
+ res = (res >> 16) + round;
+ return ssub64(env, vxrm, c, res);
+}
+
+RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8)
+RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16)
+RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32)
+GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2, clearh)
+GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8, clearq)
+RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8)
+RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16)
+RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32)
+GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2, clearh)
+GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4, clearl)
+GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8, clearq)
+
+static inline int16_t
+vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c)
+{
+ uint8_t round;
+ int16_t res = (int16_t)a * b;
+
+ round = get_round(vxrm, res, 4);
+ res = (res >> 4) + round;
+ return ssub16(env, vxrm, c, res);
+}
+
+static inline int32_t
+vwsmaccus16(CPURISCVState *env, int vxrm, int16_t a, uint16_t b, int32_t c)
+{
+ uint8_t round;
+ int32_t res = (int32_t)a * b;
+
+ round = get_round(vxrm, res, 8);
+ res = (res >> 8) + round;
+ return ssub32(env, vxrm, c, res);
+}
+
+static inline int64_t
+vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c)
+{
+ uint8_t round;
+ int64_t res = (int64_t)a * b;
+
+ round = get_round(vxrm, res, 16);
+ res = (res >> 16) + round;
+ return ssub64(env, vxrm, c, res);
+}
+
+RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8)
+RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16)
+RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32)
+GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh)
+GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl)
+GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq)
+
+/* Vector Single-Width Scaling Shift Instructions */
+static inline uint8_t
+vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+{
+ uint8_t round, shift = b & 0x7;
+ uint8_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+static inline uint16_t
+vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
+{
+ uint8_t round, shift = b & 0xf;
+ uint16_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+static inline uint32_t
+vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
+{
+ uint8_t round, shift = b & 0x1f;
+ uint32_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+static inline uint64_t
+vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
+{
+ uint8_t round, shift = b & 0x3f;
+ uint64_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8)
+RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16)
+RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32)
+RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64)
+GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8)
+RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16)
+RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32)
+RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64)
+GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq)
+
+static inline int8_t
+vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+{
+ uint8_t round, shift = b & 0x7;
+ int8_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+static inline int16_t
+vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+{
+ uint8_t round, shift = b & 0xf;
+ int16_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+static inline int32_t
+vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ uint8_t round, shift = b & 0x1f;
+ int32_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+static inline int64_t
+vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ uint8_t round, shift = b & 0x3f;
+ int64_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ return res;
+}
+
+RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8)
+RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16)
+RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32)
+RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64)
+GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8)
+RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16)
+RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32)
+RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64)
+GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq)
+
+/* Vector Narrowing Fixed-Point Clip Instructions */
+static inline int8_t
+vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
+{
+ uint8_t round, shift = b & 0xf;
+ int16_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ if (res > INT8_MAX) {
+ env->vxsat = 0x1;
+ return INT8_MAX;
+ } else if (res < INT8_MIN) {
+ env->vxsat = 0x1;
+ return INT8_MIN;
+ } else {
+ return res;
+ }
+}
+
+static inline int16_t
+vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
+{
+ uint8_t round, shift = b & 0x1f;
+ int32_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ if (res > INT16_MAX) {
+ env->vxsat = 0x1;
+ return INT16_MAX;
+ } else if (res < INT16_MIN) {
+ env->vxsat = 0x1;
+ return INT16_MIN;
+ } else {
+ return res;
+ }
+}
+
+static inline int32_t
+vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
+{
+ uint8_t round, shift = b & 0x3f;
+ int64_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ if (res > INT32_MAX) {
+ env->vxsat = 0x1;
+ return INT32_MAX;
+ } else if (res < INT32_MIN) {
+ env->vxsat = 0x1;
+ return INT32_MIN;
+ } else {
+ return res;
+ }
+}
+
+RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
+RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
+RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
+GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl)
+
+RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8)
+RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16)
+RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32)
+GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl)
+
+static inline uint8_t
+vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
+{
+ uint8_t round, shift = b & 0xf;
+ uint16_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ if (res > UINT8_MAX) {
+ env->vxsat = 0x1;
+ return UINT8_MAX;
+ } else {
+ return res;
+ }
+}
+
+static inline uint16_t
+vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
+{
+ uint8_t round, shift = b & 0x1f;
+ uint32_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ if (res > UINT16_MAX) {
+ env->vxsat = 0x1;
+ return UINT16_MAX;
+ } else {
+ return res;
+ }
+}
+
+static inline uint32_t
+vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
+{
+ uint8_t round, shift = b & 0x3f;
+ int64_t res;
+
+ round = get_round(vxrm, a, shift);
+ res = (a >> shift) + round;
+ if (res > UINT32_MAX) {
+ env->vxsat = 0x1;
+ return UINT32_MAX;
+ } else {
+ return res;
+ }
+}
+
+RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
+RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
+RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
+GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl)
+
+RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8)
+RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16)
+RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
+GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl)
+
+/*
+ *** Vector Float Point Arithmetic Instructions
+ */
+/* Vector Single-Width Floating-Point Add/Subtract Instructions */
+#define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \
+}
+
+#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_maxsz(desc) / ESZ; \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ do_##NAME(vd, vs1, vs2, i, env); \
+ } \
+ CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
+}
+
+RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add)
+RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add)
+RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add)
+GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq)
+
+#define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\
+}
+
+#define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
+ void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_maxsz(desc) / ESZ; \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ do_##NAME(vd, s1, vs2, i, env); \
+ } \
+ CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
+}
+
+RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add)
+RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add)
+RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add)
+GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq)
+
+RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub)
+RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub)
+RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub)
+GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub)
+RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub)
+RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub)
+GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq)
+
+static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s)
+{
+ return float16_sub(b, a, s);
+}
+
+static uint32_t float32_rsub(uint32_t a, uint32_t b, float_status *s)
+{
+ return float32_sub(b, a, s);
+}
+
+static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s)
+{
+ return float64_sub(b, a, s);
+}
+
+RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub)
+RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub)
+RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub)
+GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq)
+
+/* Vector Widening Floating-Point Add/Subtract Instructions */
+static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s)
+{
+ return float32_add(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), s);
+}
+
+static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s)
+{
+ return float64_add(float32_to_float64(a, s),
+ float32_to_float64(b, s), s);
+
+}
+
+RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16)
+RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32)
+GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16)
+RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32)
+GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq)
+
+static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s)
+{
+ return float32_sub(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), s);
+}
+
+static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s)
+{
+ return float64_sub(float32_to_float64(a, s),
+ float32_to_float64(b, s), s);
+
+}
+
+RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16)
+RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32)
+GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16)
+RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32)
+GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq)
+
+static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s)
+{
+ return float32_add(a, float16_to_float32(b, true, s), s);
+}
+
+static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s)
+{
+ return float64_add(a, float32_to_float64(b, s), s);
+}
+
+RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16)
+RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32)
+GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq)
+RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16)
+RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32)
+GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq)
+
+static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s)
+{
+ return float32_sub(a, float16_to_float32(b, true, s), s);
+}
+
+static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s)
+{
+ return float64_sub(a, float32_to_float64(b, s), s);
+}
+
+RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16)
+RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32)
+GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq)
+RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16)
+RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32)
+GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq)
+
+/* Vector Single-Width Floating-Point Multiply/Divide Instructions */
+RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul)
+RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul)
+RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul)
+GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul)
+RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul)
+RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul)
+GEN_VEXT_VF(vfmul_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmul_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmul_vf_d, 8, 8, clearq)
+
+RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div)
+RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div)
+RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div)
+GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div)
+RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div)
+RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div)
+GEN_VEXT_VF(vfdiv_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfdiv_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfdiv_vf_d, 8, 8, clearq)
+
+static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s)
+{
+ return float16_div(b, a, s);
+}
+
+static uint32_t float32_rdiv(uint32_t a, uint32_t b, float_status *s)
+{
+ return float32_div(b, a, s);
+}
+
+static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s)
+{
+ return float64_div(b, a, s);
+}
+
+RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv)
+RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv)
+RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv)
+GEN_VEXT_VF(vfrdiv_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfrdiv_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfrdiv_vf_d, 8, 8, clearq)
+
+/* Vector Widening Floating-Point Multiply */
+static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s)
+{
+ return float32_mul(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), s);
+}
+
+static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s)
+{
+ return float64_mul(float32_to_float64(a, s),
+ float32_to_float64(b, s), s);
+
+}
+RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16)
+RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32)
+GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16)
+RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32)
+GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq)
+
+/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
+#define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ TD d = *((TD *)vd + HD(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, s1, d, &env->fp_status); \
+}
+
+static uint16_t fmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(a, b, d, 0, s);
+}
+
+static uint32_t fmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(a, b, d, 0, s);
+}
+
+static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(a, b, d, 0, s);
+}
+
+RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16)
+RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32)
+RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64)
+GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq)
+
+#define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ TD d = *((TD *)vd + HD(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d, &env->fp_status);\
+}
+
+RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16)
+RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32)
+RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64)
+GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq)
+
+static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(a, b, d,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+static uint32_t fnmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(a, b, d,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(a, b, d,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16)
+RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32)
+RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64)
+GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16)
+RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32)
+RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64)
+GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq)
+
+static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(a, b, d, float_muladd_negate_c, s);
+}
+
+static uint32_t fmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(a, b, d, float_muladd_negate_c, s);
+}
+
+static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(a, b, d, float_muladd_negate_c, s);
+}
+
+RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16)
+RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32)
+RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64)
+GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16)
+RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32)
+RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64)
+GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq)
+
+static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(a, b, d, float_muladd_negate_product, s);
+}
+
+static uint32_t fnmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(a, b, d, float_muladd_negate_product, s);
+}
+
+static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(a, b, d, float_muladd_negate_product, s);
+}
+
+RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16)
+RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32)
+RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64)
+GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16)
+RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32)
+RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64)
+GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq)
+
+static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(d, b, a, 0, s);
+}
+
+static uint32_t fmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(d, b, a, 0, s);
+}
+
+static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(d, b, a, 0, s);
+}
+
+RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16)
+RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32)
+RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64)
+GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16)
+RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32)
+RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64)
+GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq)
+
+static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(d, b, a,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+static uint32_t fnmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(d, b, a,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(d, b, a,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16)
+RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32)
+RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64)
+GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16)
+RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32)
+RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64)
+GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq)
+
+static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(d, b, a, float_muladd_negate_c, s);
+}
+
+static uint32_t fmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(d, b, a, float_muladd_negate_c, s);
+}
+
+static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(d, b, a, float_muladd_negate_c, s);
+}
+
+RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16)
+RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32)
+RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64)
+GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16)
+RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32)
+RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64)
+GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq)
+
+static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s)
+{
+ return float16_muladd(d, b, a, float_muladd_negate_product, s);
+}
+
+static uint32_t fnmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(d, b, a, float_muladd_negate_product, s);
+}
+
+static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(d, b, a, float_muladd_negate_product, s);
+}
+
+RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16)
+RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32)
+RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64)
+GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16)
+RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32)
+RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64)
+GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq)
+
+/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
+static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), d, 0, s);
+}
+
+static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(float32_to_float64(a, s),
+ float32_to_float64(b, s), d, 0, s);
+}
+
+RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16)
+RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32)
+GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16)
+RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32)
+GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq)
+
+static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), d,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(float32_to_float64(a, s),
+ float32_to_float64(b, s), d,
+ float_muladd_negate_c | float_muladd_negate_product, s);
+}
+
+RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16)
+RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32)
+GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16)
+RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32)
+GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq)
+
+static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), d,
+ float_muladd_negate_c, s);
+}
+
+static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(float32_to_float64(a, s),
+ float32_to_float64(b, s), d,
+ float_muladd_negate_c, s);
+}
+
+RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16)
+RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32)
+GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16)
+RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32)
+GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq)
+
+static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(float16_to_float32(a, true, s),
+ float16_to_float32(b, true, s), d,
+ float_muladd_negate_product, s);
+}
+
+static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s)
+{
+ return float64_muladd(float32_to_float64(a, s),
+ float32_to_float64(b, s), d,
+ float_muladd_negate_product, s);
+}
+
+RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16)
+RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32)
+GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl)
+GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq)
+RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16)
+RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32)
+GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl)
+GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq)
+
+/* Vector Floating-Point Square-Root Instruction */
+/* (TD, T2, TX2) */
+#define OP_UU_H uint16_t, uint16_t, uint16_t
+#define OP_UU_W uint32_t, uint32_t, uint32_t
+#define OP_UU_D uint64_t, uint64_t, uint64_t
+
+#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, void *vs2, int i, \
+ CPURISCVState *env) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \
+}
+
+#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_maxsz(desc) / ESZ; \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ if (vl == 0) { \
+ return; \
+ } \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ do_##NAME(vd, vs2, i, env); \
+ } \
+ CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
+}
+
+RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt)
+RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt)
+RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt)
+GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4, clearl)
+GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8, clearq)
+
+/* Vector Floating-Point MIN/MAX Instructions */
+RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum)
+RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum)
+RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum)
+GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum)
+RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum)
+RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum)
+GEN_VEXT_VF(vfmin_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmin_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmin_vf_d, 8, 8, clearq)
+
+RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum)
+RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum)
+RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum)
+GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum)
+RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum)
+RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum)
+GEN_VEXT_VF(vfmax_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfmax_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfmax_vf_d, 8, 8, clearq)
+
+/* Vector Floating-Point Sign-Injection Instructions */
+static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s)
+{
+ return deposit64(b, 0, 15, a);
+}
+
+static uint32_t fsgnj32(uint32_t a, uint32_t b, float_status *s)
+{
+ return deposit64(b, 0, 31, a);
+}
+
+static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s)
+{
+ return deposit64(b, 0, 63, a);
+}
+
+RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16)
+RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32)
+RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64)
+GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16)
+RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32)
+RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64)
+GEN_VEXT_VF(vfsgnj_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfsgnj_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfsgnj_vf_d, 8, 8, clearq)
+
+static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s)
+{
+ return deposit64(~b, 0, 15, a);
+}
+
+static uint32_t fsgnjn32(uint32_t a, uint32_t b, float_status *s)
+{
+ return deposit64(~b, 0, 31, a);
+}
+
+static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s)
+{
+ return deposit64(~b, 0, 63, a);
+}
+
+RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16)
+RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32)
+RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64)
+GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16)
+RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32)
+RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64)
+GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8, clearq)
+
+static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s)
+{
+ return deposit64(b ^ a, 0, 15, a);
+}
+
+static uint32_t fsgnjx32(uint32_t a, uint32_t b, float_status *s)
+{
+ return deposit64(b ^ a, 0, 31, a);
+}
+
+static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s)
+{
+ return deposit64(b ^ a, 0, 63, a);
+}
+
+RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16)
+RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32)
+RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64)
+GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8, clearq)
+RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16)
+RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32)
+RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64)
+GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh)
+GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl)
+GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq)
+
+/* Vector Floating-Point Compare Instructions */
+#define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, mlen, i, \
+ DO_OP(s2, s1, &env->fp_status)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet)
+GEN_VEXT_CMP_VV_ENV(vmfeq_vv_w, uint32_t, H4, float32_eq_quiet)
+GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet)
+
+#define GEN_VEXT_CMP_VF(NAME, ETYPE, H, DO_OP) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ vext_set_elem_mask(vd, mlen, i, \
+ DO_OP(s2, (ETYPE)s1, &env->fp_status)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet)
+GEN_VEXT_CMP_VF(vmfeq_vf_w, uint32_t, H4, float32_eq_quiet)
+GEN_VEXT_CMP_VF(vmfeq_vf_d, uint64_t, H8, float64_eq_quiet)
+
+static bool vmfne16(uint16_t a, uint16_t b, float_status *s)
+{
+ FloatRelation compare = float16_compare_quiet(a, b, s);
+ return compare != float_relation_equal;
+}
+
+static bool vmfne32(uint32_t a, uint32_t b, float_status *s)
+{
+ FloatRelation compare = float32_compare_quiet(a, b, s);
+ return compare != float_relation_equal;
+}
+
+static bool vmfne64(uint64_t a, uint64_t b, float_status *s)
+{
+ FloatRelation compare = float64_compare_quiet(a, b, s);
+ return compare != float_relation_equal;
+}
+
+GEN_VEXT_CMP_VV_ENV(vmfne_vv_h, uint16_t, H2, vmfne16)
+GEN_VEXT_CMP_VV_ENV(vmfne_vv_w, uint32_t, H4, vmfne32)
+GEN_VEXT_CMP_VV_ENV(vmfne_vv_d, uint64_t, H8, vmfne64)
+GEN_VEXT_CMP_VF(vmfne_vf_h, uint16_t, H2, vmfne16)
+GEN_VEXT_CMP_VF(vmfne_vf_w, uint32_t, H4, vmfne32)
+GEN_VEXT_CMP_VF(vmfne_vf_d, uint64_t, H8, vmfne64)
+
+GEN_VEXT_CMP_VV_ENV(vmflt_vv_h, uint16_t, H2, float16_lt)
+GEN_VEXT_CMP_VV_ENV(vmflt_vv_w, uint32_t, H4, float32_lt)
+GEN_VEXT_CMP_VV_ENV(vmflt_vv_d, uint64_t, H8, float64_lt)
+GEN_VEXT_CMP_VF(vmflt_vf_h, uint16_t, H2, float16_lt)
+GEN_VEXT_CMP_VF(vmflt_vf_w, uint32_t, H4, float32_lt)
+GEN_VEXT_CMP_VF(vmflt_vf_d, uint64_t, H8, float64_lt)
+
+GEN_VEXT_CMP_VV_ENV(vmfle_vv_h, uint16_t, H2, float16_le)
+GEN_VEXT_CMP_VV_ENV(vmfle_vv_w, uint32_t, H4, float32_le)
+GEN_VEXT_CMP_VV_ENV(vmfle_vv_d, uint64_t, H8, float64_le)
+GEN_VEXT_CMP_VF(vmfle_vf_h, uint16_t, H2, float16_le)
+GEN_VEXT_CMP_VF(vmfle_vf_w, uint32_t, H4, float32_le)
+GEN_VEXT_CMP_VF(vmfle_vf_d, uint64_t, H8, float64_le)
+
+static bool vmfgt16(uint16_t a, uint16_t b, float_status *s)
+{
+ FloatRelation compare = float16_compare(a, b, s);
+ return compare == float_relation_greater;
+}
+
+static bool vmfgt32(uint32_t a, uint32_t b, float_status *s)
+{
+ FloatRelation compare = float32_compare(a, b, s);
+ return compare == float_relation_greater;
+}
+
+static bool vmfgt64(uint64_t a, uint64_t b, float_status *s)
+{
+ FloatRelation compare = float64_compare(a, b, s);
+ return compare == float_relation_greater;
+}
+
+GEN_VEXT_CMP_VF(vmfgt_vf_h, uint16_t, H2, vmfgt16)
+GEN_VEXT_CMP_VF(vmfgt_vf_w, uint32_t, H4, vmfgt32)
+GEN_VEXT_CMP_VF(vmfgt_vf_d, uint64_t, H8, vmfgt64)
+
+static bool vmfge16(uint16_t a, uint16_t b, float_status *s)
+{
+ FloatRelation compare = float16_compare(a, b, s);
+ return compare == float_relation_greater ||
+ compare == float_relation_equal;
+}
+
+static bool vmfge32(uint32_t a, uint32_t b, float_status *s)
+{
+ FloatRelation compare = float32_compare(a, b, s);
+ return compare == float_relation_greater ||
+ compare == float_relation_equal;
+}
+
+static bool vmfge64(uint64_t a, uint64_t b, float_status *s)
+{
+ FloatRelation compare = float64_compare(a, b, s);
+ return compare == float_relation_greater ||
+ compare == float_relation_equal;
+}
+
+GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16)
+GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
+GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
+
+GEN_VEXT_CMP_VV_ENV(vmford_vv_h, uint16_t, H2, !float16_unordered_quiet)
+GEN_VEXT_CMP_VV_ENV(vmford_vv_w, uint32_t, H4, !float32_unordered_quiet)
+GEN_VEXT_CMP_VV_ENV(vmford_vv_d, uint64_t, H8, !float64_unordered_quiet)
+GEN_VEXT_CMP_VF(vmford_vf_h, uint16_t, H2, !float16_unordered_quiet)
+GEN_VEXT_CMP_VF(vmford_vf_w, uint32_t, H4, !float32_unordered_quiet)
+GEN_VEXT_CMP_VF(vmford_vf_d, uint64_t, H8, !float64_unordered_quiet)
+
+/* Vector Floating-Point Classify Instruction */
+#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
+static void do_##NAME(void *vd, void *vs2, int i) \
+{ \
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
+ *((TD *)vd + HD(i)) = OP(s2); \
+}
+
+#define GEN_VEXT_V(NAME, ESZ, DSZ, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t vlmax = vext_maxsz(desc) / ESZ; \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ do_##NAME(vd, vs2, i); \
+ } \
+ CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \
+}
+
+target_ulong fclass_h(uint64_t frs1)
+{
+ float16 f = frs1;
+ bool sign = float16_is_neg(f);
+
+ if (float16_is_infinity(f)) {
+ return sign ? 1 << 0 : 1 << 7;
+ } else if (float16_is_zero(f)) {
+ return sign ? 1 << 3 : 1 << 4;
+ } else if (float16_is_zero_or_denormal(f)) {
+ return sign ? 1 << 2 : 1 << 5;
+ } else if (float16_is_any_nan(f)) {
+ float_status s = { }; /* for snan_bit_is_one */
+ return float16_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
+ } else {
+ return sign ? 1 << 1 : 1 << 6;
+ }
+}
+
+target_ulong fclass_s(uint64_t frs1)
+{
+ float32 f = frs1;
+ bool sign = float32_is_neg(f);
+
+ if (float32_is_infinity(f)) {
+ return sign ? 1 << 0 : 1 << 7;
+ } else if (float32_is_zero(f)) {
+ return sign ? 1 << 3 : 1 << 4;
+ } else if (float32_is_zero_or_denormal(f)) {
+ return sign ? 1 << 2 : 1 << 5;
+ } else if (float32_is_any_nan(f)) {
+ float_status s = { }; /* for snan_bit_is_one */
+ return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
+ } else {
+ return sign ? 1 << 1 : 1 << 6;
+ }
+}
+
+target_ulong fclass_d(uint64_t frs1)
+{
+ float64 f = frs1;
+ bool sign = float64_is_neg(f);
+
+ if (float64_is_infinity(f)) {
+ return sign ? 1 << 0 : 1 << 7;
+ } else if (float64_is_zero(f)) {
+ return sign ? 1 << 3 : 1 << 4;
+ } else if (float64_is_zero_or_denormal(f)) {
+ return sign ? 1 << 2 : 1 << 5;
+ } else if (float64_is_any_nan(f)) {
+ float_status s = { }; /* for snan_bit_is_one */
+ return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
+ } else {
+ return sign ? 1 << 1 : 1 << 6;
+ }
+}
+
+RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h)
+RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s)
+RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d)
+GEN_VEXT_V(vfclass_v_h, 2, 2, clearh)
+GEN_VEXT_V(vfclass_v_w, 4, 4, clearl)
+GEN_VEXT_V(vfclass_v_d, 8, 8, clearq)
+
+/* Vector Floating-Point Merge Instruction */
+#define GEN_VFMERGE_VF(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t esz = sizeof(ETYPE); \
+ uint32_t vlmax = vext_maxsz(desc) / esz; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
+ *((ETYPE *)vd + H(i)) \
+ = (!vm && !vext_elem_mask(v0, mlen, i) ? s2 : s1); \
+ } \
+ CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \
+}
+
+GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2, clearh)
+GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4, clearl)
+GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8, clearq)
+
+/* Single-Width Floating-Point/Integer Type-Convert Instructions */
+/* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
+RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16)
+RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32)
+RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64)
+GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4, clearl)
+GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8, clearq)
+
+/* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */
+RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16)
+RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32)
+RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64)
+GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4, clearl)
+GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8, clearq)
+
+/* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */
+RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16)
+RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32)
+RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64)
+GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4, clearl)
+GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8, clearq)
+
+/* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */
+RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16)
+RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32)
+RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64)
+GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4, clearl)
+GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq)
+
+/* Widening Floating-Point/Integer Type-Convert Instructions */
+/* (TD, T2, TX2) */
+#define WOP_UU_H uint32_t, uint16_t, uint16_t
+#define WOP_UU_W uint64_t, uint32_t, uint32_t
+/* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/
+RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32)
+RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64)
+GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4, clearl)
+GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8, clearq)
+
+/* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */
+RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32)
+RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64)
+GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4, clearl)
+GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8, clearq)
+
+/* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */
+RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32)
+RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64)
+GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4, clearl)
+GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8, clearq)
+
+/* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */
+RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32)
+RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64)
+GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4, clearl)
+GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8, clearq)
+
+/*
+ * vfwcvt.f.f.v vd, vs2, vm #
+ * Convert single-width float to double-width float.
+ */
+static uint32_t vfwcvtffv16(uint16_t a, float_status *s)
+{
+ return float16_to_float32(a, true, s);
+}
+
+RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16)
+RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64)
+GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4, clearl)
+GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq)
+
+/* Narrowing Floating-Point/Integer Type-Convert Instructions */
+/* (TD, T2, TX2) */
+#define NOP_UU_H uint16_t, uint32_t, uint32_t
+#define NOP_UU_W uint32_t, uint64_t, uint64_t
+/* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
+RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16)
+RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32)
+GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4, clearl)
+
+/* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */
+RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16)
+RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32)
+GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4, clearl)
+
+/* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */
+RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16)
+RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32)
+GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4, clearl)
+
+/* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */
+RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16)
+RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32)
+GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4, clearl)
+
+/* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */
+static uint16_t vfncvtffv16(uint32_t a, float_status *s)
+{
+ return float32_to_float16(a, true, s);
+}
+
+RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16)
+RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32)
+GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh)
+GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl)
+
+/*
+ *** Vector Reduction Operations
+ */
+/* Vector Single-Width Integer Reduction Instructions */
+#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \
+ TD s1 = *((TD *)vs1 + HD(0)); \
+ \
+ for (i = 0; i < vl; i++) { \
+ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ s1 = OP(s1, (TD)s2); \
+ } \
+ *((TD *)vd + HD(0)) = s1; \
+ CLEAR_FN(vd, 1, sizeof(TD), tot); \
+}
+
+/* vd[0] = sum(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb)
+GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh)
+GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl)
+GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq)
+
+/* vd[0] = maxu(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb)
+GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh)
+GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl)
+GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq)
+
+/* vd[0] = max(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb)
+GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh)
+GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl)
+GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq)
+
+/* vd[0] = minu(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb)
+GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh)
+GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl)
+GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq)
+
+/* vd[0] = min(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb)
+GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh)
+GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl)
+GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq)
+
+/* vd[0] = and(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb)
+GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh)
+GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl)
+GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq)
+
+/* vd[0] = or(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb)
+GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh)
+GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl)
+GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq)
+
+/* vd[0] = xor(vs1[0], vs2[*]) */
+GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb)
+GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh)
+GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl)
+GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq)
+
+/* Vector Widening Integer Reduction Instructions */
+/* signed sum reduction into double-width accumulator */
+GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD, clearh)
+GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD, clearl)
+GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD, clearq)
+
+/* Unsigned sum reduction into double-width accumulator */
+GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD, clearh)
+GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD, clearl)
+GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD, clearq)
+
+/* Vector Single-Width Floating-Point Reduction Instructions */
+#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \
+ TD s1 = *((TD *)vs1 + HD(0)); \
+ \
+ for (i = 0; i < vl; i++) { \
+ TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ s1 = OP(s1, (TD)s2, &env->fp_status); \
+ } \
+ *((TD *)vd + HD(0)) = s1; \
+ CLEAR_FN(vd, 1, sizeof(TD), tot); \
+}
+
+/* Unordered sum */
+GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add, clearh)
+GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add, clearl)
+GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add, clearq)
+
+/* Maximum value */
+GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum, clearh)
+GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum, clearl)
+GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq)
+
+/* Minimum value */
+GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh)
+GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl)
+GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq)
+
+/* Vector Widening Floating-Point Reduction Instructions */
+/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
+void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1,
+ void *vs2, CPURISCVState *env, uint32_t desc)
+{
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ uint32_t i;
+ uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
+ uint32_t s1 = *((uint32_t *)vs1 + H4(0));
+
+ for (i = 0; i < vl; i++) {
+ uint16_t s2 = *((uint16_t *)vs2 + H2(i));
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status),
+ &env->fp_status);
+ }
+ *((uint32_t *)vd + H4(0)) = s1;
+ clearl(vd, 1, sizeof(uint32_t), tot);
+}
+
+void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1,
+ void *vs2, CPURISCVState *env, uint32_t desc)
+{
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ uint32_t i;
+ uint32_t tot = env_archcpu(env)->cfg.vlen / 8;
+ uint64_t s1 = *((uint64_t *)vs1);
+
+ for (i = 0; i < vl; i++) {
+ uint32_t s2 = *((uint32_t *)vs2 + H4(i));
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status),
+ &env->fp_status);
+ }
+ *((uint64_t *)vd) = s1;
+ clearq(vd, 1, sizeof(uint64_t), tot);
+}
+
+/*
+ *** Vector Mask Operations
+ */
+/* Vector Mask-Register Logical Instructions */
+#define GEN_VEXT_MASK_VV(NAME, OP) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
+ void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ int a, b; \
+ \
+ for (i = 0; i < vl; i++) { \
+ a = vext_elem_mask(vs1, mlen, i); \
+ b = vext_elem_mask(vs2, mlen, i); \
+ vext_set_elem_mask(vd, mlen, i, OP(b, a)); \
+ } \
+ for (; i < vlmax; i++) { \
+ vext_set_elem_mask(vd, mlen, i, 0); \
+ } \
+}
+
+#define DO_NAND(N, M) (!(N & M))
+#define DO_ANDNOT(N, M) (N & !M)
+#define DO_NOR(N, M) (!(N | M))
+#define DO_ORNOT(N, M) (N | !M)
+#define DO_XNOR(N, M) (!(N ^ M))
+
+GEN_VEXT_MASK_VV(vmand_mm, DO_AND)
+GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND)
+GEN_VEXT_MASK_VV(vmandnot_mm, DO_ANDNOT)
+GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR)
+GEN_VEXT_MASK_VV(vmor_mm, DO_OR)
+GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR)
+GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT)
+GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR)
+
+/* Vector mask population count vmpopc */
+target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ target_ulong cnt = 0;
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ int i;
+
+ for (i = 0; i < vl; i++) {
+ if (vm || vext_elem_mask(v0, mlen, i)) {
+ if (vext_elem_mask(vs2, mlen, i)) {
+ cnt++;
+ }
+ }
+ }
+ return cnt;
+}
+
+/* vmfirst find-first-set mask bit*/
+target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ int i;
+
+ for (i = 0; i < vl; i++) {
+ if (vm || vext_elem_mask(v0, mlen, i)) {
+ if (vext_elem_mask(vs2, mlen, i)) {
+ return i;
+ }
+ }
+ }
+ return -1LL;
+}
+
+enum set_mask_type {
+ ONLY_FIRST = 1,
+ INCLUDE_FIRST,
+ BEFORE_FIRST,
+};
+
+static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
+ uint32_t desc, enum set_mask_type type)
+{
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen;
+ uint32_t vm = vext_vm(desc);
+ uint32_t vl = env->vl;
+ int i;
+ bool first_mask_bit = false;
+
+ for (i = 0; i < vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ /* write a zero to all following active elements */
+ if (first_mask_bit) {
+ vext_set_elem_mask(vd, mlen, i, 0);
+ continue;
+ }
+ if (vext_elem_mask(vs2, mlen, i)) {
+ first_mask_bit = true;
+ if (type == BEFORE_FIRST) {
+ vext_set_elem_mask(vd, mlen, i, 0);
+ } else {
+ vext_set_elem_mask(vd, mlen, i, 1);
+ }
+ } else {
+ if (type == ONLY_FIRST) {
+ vext_set_elem_mask(vd, mlen, i, 0);
+ } else {
+ vext_set_elem_mask(vd, mlen, i, 1);
+ }
+ }
+ }
+ for (; i < vlmax; i++) {
+ vext_set_elem_mask(vd, mlen, i, 0);
+ }
+}
+
+void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ vmsetm(vd, v0, vs2, env, desc, BEFORE_FIRST);
+}
+
+void HELPER(vmsif_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ vmsetm(vd, v0, vs2, env, desc, INCLUDE_FIRST);
+}
+
+void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ vmsetm(vd, v0, vs2, env, desc, ONLY_FIRST);
+}
+
+/* Vector Iota Instruction */
+#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
+ uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t sum = 0; \
+ int i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = sum; \
+ if (vext_elem_mask(vs2, mlen, i)) { \
+ sum++; \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1, clearb)
+GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2, clearh)
+GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4, clearl)
+GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq)
+
+/* Vector Element Index Instruction */
+#define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ int i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = i; \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb)
+GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh)
+GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl)
+GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq)
+
+/*
+ *** Vector Permutation Instructions
+ */
+
+/* Vector Slide Instructions */
+#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ target_ulong offset = s1, i; \
+ \
+ for (i = offset; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ target_ulong offset = s1, i; \
+ \
+ for (i = 0; i < vl; ++i) { \
+ target_ulong j = i + offset; \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i == 0) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i == vl - 1) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
+
+/* Vector Register Gather Instruction */
+#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint64_t index; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ index = *((ETYPE *)vs1 + H(i)); \
+ if (index >= vlmax) { \
+ *((ETYPE *)vd + H(i)) = 0; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
+GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb)
+GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh)
+GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl)
+GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint64_t index = s1; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (index >= vlmax) { \
+ *((ETYPE *)vd + H(i)) = 0; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq)
+
+/* Vector Compress Instruction */
+#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vl = env->vl; \
+ uint32_t num = 0, i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vext_elem_mask(vs1, mlen, i)) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \
+ num++; \
+ } \
+ CLEAR_FN(vd, num, num * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* Compress into vd elements of vs2 where vs1 is enabled */
+GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb)
+GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh)
+GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl)
+GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq)
diff --git a/target/rx/Kconfig b/target/rx/Kconfig
new file mode 100644
index 000000000..aceb5ed28
--- /dev/null
+++ b/target/rx/Kconfig
@@ -0,0 +1,2 @@
+config RX
+ bool
diff --git a/target/rx/cpu-param.h b/target/rx/cpu-param.h
new file mode 100644
index 000000000..b156ad1ca
--- /dev/null
+++ b/target/rx/cpu-param.h
@@ -0,0 +1,30 @@
+/*
+ * RX cpu parameters
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RX_CPU_PARAM_H
+#define RX_CPU_PARAM_H
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define NB_MMU_MODES 1
+
+#endif
diff --git a/target/rx/cpu-qom.h b/target/rx/cpu-qom.h
new file mode 100644
index 000000000..7310558e0
--- /dev/null
+++ b/target/rx/cpu-qom.h
@@ -0,0 +1,50 @@
+/*
+ * RX CPU
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RX_CPU_QOM_H
+#define RX_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_RX_CPU "rx-cpu"
+
+#define TYPE_RX62N_CPU RX_CPU_TYPE_NAME("rx62n")
+
+OBJECT_DECLARE_TYPE(RXCPU, RXCPUClass,
+ RX_CPU)
+
+/*
+ * RXCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A RX CPU model.
+ */
+struct RXCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+#define CPUArchState struct CPURXState
+
+#endif
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
new file mode 100644
index 000000000..25a4aa297
--- /dev/null
+++ b/target/rx/cpu.c
@@ -0,0 +1,246 @@
+/*
+ * QEMU RX CPU
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+#include "hw/loader.h"
+#include "fpu/softfloat.h"
+
+static void rx_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ RXCPU *cpu = RX_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static void rx_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ RXCPU *cpu = RX_CPU(cs);
+
+ cpu->env.pc = tb->pc;
+}
+
+static bool rx_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request &
+ (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR);
+}
+
+static void rx_cpu_reset(DeviceState *dev)
+{
+ RXCPU *cpu = RX_CPU(dev);
+ RXCPUClass *rcc = RX_CPU_GET_CLASS(cpu);
+ CPURXState *env = &cpu->env;
+ uint32_t *resetvec;
+
+ rcc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPURXState, end_reset_fields));
+
+ resetvec = rom_ptr(0xfffffffc, 4);
+ if (resetvec) {
+ /* In the case of kernel, it is ignored because it is not set. */
+ env->pc = ldl_p(resetvec);
+ }
+ rx_cpu_unpack_psw(env, 0, 1);
+ env->regs[0] = env->isp = env->usp = 0;
+ env->fpsw = 0;
+ set_flush_to_zero(1, &env->fp_status);
+ set_flush_inputs_to_zero(1, &env->fp_status);
+}
+
+static void rx_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+
+ qemu_printf(" %s\n", object_class_get_name(oc));
+}
+
+void rx_cpu_list(void)
+{
+ GSList *list;
+ list = object_class_get_list_sorted(TYPE_RX_CPU, false);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, rx_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+static ObjectClass *rx_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL && object_class_dynamic_cast(oc, TYPE_RX_CPU) != NULL &&
+ !object_class_is_abstract(oc)) {
+ return oc;
+ }
+ typename = g_strdup_printf(RX_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && object_class_is_abstract(oc)) {
+ oc = NULL;
+ }
+
+ return oc;
+}
+
+static void rx_cpu_realize(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ RXCPUClass *rcc = RX_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ rcc->parent_realize(dev, errp);
+}
+
+static void rx_cpu_set_irq(void *opaque, int no, int request)
+{
+ RXCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ int irq = request & 0xff;
+
+ static const int mask[] = {
+ [RX_CPU_IRQ] = CPU_INTERRUPT_HARD,
+ [RX_CPU_FIR] = CPU_INTERRUPT_FIR,
+ };
+ if (irq) {
+ cpu->env.req_irq = irq;
+ cpu->env.req_ipl = (request >> 8) & 0x0f;
+ cpu_interrupt(cs, mask[no]);
+ } else {
+ cpu_reset_interrupt(cs, mask[no]);
+ }
+}
+
+static void rx_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_rx;
+ info->print_insn = print_insn_rx;
+}
+
+static bool rx_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ uint32_t address, physical, prot;
+
+ /* Linear mapping */
+ address = physical = addr & TARGET_PAGE_MASK;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+}
+
+static void rx_cpu_init(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ RXCPU *cpu = RX_CPU(obj);
+ CPURXState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+ cs->env_ptr = env;
+ qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2);
+}
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps rx_sysemu_ops = {
+ .get_phys_page_debug = rx_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps rx_tcg_ops = {
+ .initialize = rx_translate_init,
+ .synchronize_from_tb = rx_cpu_synchronize_from_tb,
+ .tlb_fill = rx_cpu_tlb_fill,
+
+#ifndef CONFIG_USER_ONLY
+ .cpu_exec_interrupt = rx_cpu_exec_interrupt,
+ .do_interrupt = rx_cpu_do_interrupt,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void rx_cpu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ CPUClass *cc = CPU_CLASS(klass);
+ RXCPUClass *rcc = RX_CPU_CLASS(klass);
+
+ device_class_set_parent_realize(dc, rx_cpu_realize,
+ &rcc->parent_realize);
+ device_class_set_parent_reset(dc, rx_cpu_reset,
+ &rcc->parent_reset);
+
+ cc->class_by_name = rx_cpu_class_by_name;
+ cc->has_work = rx_cpu_has_work;
+ cc->dump_state = rx_cpu_dump_state;
+ cc->set_pc = rx_cpu_set_pc;
+
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &rx_sysemu_ops;
+#endif
+ cc->gdb_read_register = rx_cpu_gdb_read_register;
+ cc->gdb_write_register = rx_cpu_gdb_write_register;
+ cc->disas_set_info = rx_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 26;
+ cc->gdb_core_xml_file = "rx-core.xml";
+ cc->tcg_ops = &rx_tcg_ops;
+}
+
+static const TypeInfo rx_cpu_info = {
+ .name = TYPE_RX_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(RXCPU),
+ .instance_init = rx_cpu_init,
+ .abstract = true,
+ .class_size = sizeof(RXCPUClass),
+ .class_init = rx_cpu_class_init,
+};
+
+static const TypeInfo rx62n_rx_cpu_info = {
+ .name = TYPE_RX62N_CPU,
+ .parent = TYPE_RX_CPU,
+};
+
+static void rx_cpu_register_types(void)
+{
+ type_register_static(&rx_cpu_info);
+ type_register_static(&rx62n_rx_cpu_info);
+}
+
+type_init(rx_cpu_register_types)
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
new file mode 100644
index 000000000..4ac71aec3
--- /dev/null
+++ b/target/rx/cpu.h
@@ -0,0 +1,177 @@
+/*
+ * RX emulation definition
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef RX_CPU_H
+#define RX_CPU_H
+
+#include "qemu/bitops.h"
+#include "qemu-common.h"
+#include "hw/registerfields.h"
+#include "cpu-qom.h"
+
+#include "exec/cpu-defs.h"
+
+/* PSW define */
+REG32(PSW, 0)
+FIELD(PSW, C, 0, 1)
+FIELD(PSW, Z, 1, 1)
+FIELD(PSW, S, 2, 1)
+FIELD(PSW, O, 3, 1)
+FIELD(PSW, I, 16, 1)
+FIELD(PSW, U, 17, 1)
+FIELD(PSW, PM, 20, 1)
+FIELD(PSW, IPL, 24, 4)
+
+/* FPSW define */
+REG32(FPSW, 0)
+FIELD(FPSW, RM, 0, 2)
+FIELD(FPSW, CV, 2, 1)
+FIELD(FPSW, CO, 3, 1)
+FIELD(FPSW, CZ, 4, 1)
+FIELD(FPSW, CU, 5, 1)
+FIELD(FPSW, CX, 6, 1)
+FIELD(FPSW, CE, 7, 1)
+FIELD(FPSW, CAUSE, 2, 6)
+FIELD(FPSW, DN, 8, 1)
+FIELD(FPSW, EV, 10, 1)
+FIELD(FPSW, EO, 11, 1)
+FIELD(FPSW, EZ, 12, 1)
+FIELD(FPSW, EU, 13, 1)
+FIELD(FPSW, EX, 14, 1)
+FIELD(FPSW, ENABLE, 10, 5)
+FIELD(FPSW, FV, 26, 1)
+FIELD(FPSW, FO, 27, 1)
+FIELD(FPSW, FZ, 28, 1)
+FIELD(FPSW, FU, 29, 1)
+FIELD(FPSW, FX, 30, 1)
+FIELD(FPSW, FLAGS, 26, 4)
+FIELD(FPSW, FS, 31, 1)
+
+enum {
+ NUM_REGS = 16,
+};
+
+typedef struct CPURXState {
+ /* CPU registers */
+ uint32_t regs[NUM_REGS]; /* general registers */
+ uint32_t psw_o; /* O bit of status register */
+ uint32_t psw_s; /* S bit of status register */
+ uint32_t psw_z; /* Z bit of status register */
+ uint32_t psw_c; /* C bit of status register */
+ uint32_t psw_u;
+ uint32_t psw_i;
+ uint32_t psw_pm;
+ uint32_t psw_ipl;
+ uint32_t bpsw; /* backup status */
+ uint32_t bpc; /* backup pc */
+ uint32_t isp; /* global base register */
+ uint32_t usp; /* vector base register */
+ uint32_t pc; /* program counter */
+ uint32_t intb; /* interrupt vector */
+ uint32_t fintv;
+ uint32_t fpsw;
+ uint64_t acc;
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Internal use */
+ uint32_t in_sleep;
+ uint32_t req_irq; /* Requested interrupt no (hard) */
+ uint32_t req_ipl; /* Requested interrupt level */
+ uint32_t ack_irq; /* execute irq */
+ uint32_t ack_ipl; /* execute ipl */
+ float_status fp_status;
+ qemu_irq ack; /* Interrupt acknowledge */
+} CPURXState;
+
+/*
+ * RXCPU:
+ * @env: #CPURXState
+ *
+ * A RX CPU
+ */
+struct RXCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPURXState env;
+};
+
+typedef RXCPU ArchCPU;
+
+#define ENV_OFFSET offsetof(RXCPU, env)
+
+#define RX_CPU_TYPE_SUFFIX "-" TYPE_RX_CPU
+#define RX_CPU_TYPE_NAME(model) model RX_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_RX_CPU
+
+const char *rx_crname(uint8_t cr);
+#ifndef CONFIG_USER_ONLY
+void rx_cpu_do_interrupt(CPUState *cpu);
+bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req);
+#endif /* !CONFIG_USER_ONLY */
+void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+void rx_translate_init(void);
+void rx_cpu_list(void);
+void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
+
+#define cpu_list rx_cpu_list
+
+#include "exec/cpu-all.h"
+
+#define CPU_INTERRUPT_SOFT CPU_INTERRUPT_TGT_INT_0
+#define CPU_INTERRUPT_FIR CPU_INTERRUPT_TGT_INT_1
+
+#define RX_CPU_IRQ 0
+#define RX_CPU_FIR 1
+
+static inline void cpu_get_tb_cpu_state(CPURXState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = FIELD_DP32(0, PSW, PM, env->psw_pm);
+}
+
+static inline int cpu_mmu_index(CPURXState *env, bool ifetch)
+{
+ return 0;
+}
+
+static inline uint32_t rx_cpu_pack_psw(CPURXState *env)
+{
+ uint32_t psw = 0;
+ psw = FIELD_DP32(psw, PSW, IPL, env->psw_ipl);
+ psw = FIELD_DP32(psw, PSW, PM, env->psw_pm);
+ psw = FIELD_DP32(psw, PSW, U, env->psw_u);
+ psw = FIELD_DP32(psw, PSW, I, env->psw_i);
+ psw = FIELD_DP32(psw, PSW, O, env->psw_o >> 31);
+ psw = FIELD_DP32(psw, PSW, S, env->psw_s >> 31);
+ psw = FIELD_DP32(psw, PSW, Z, env->psw_z == 0);
+ psw = FIELD_DP32(psw, PSW, C, env->psw_c);
+ return psw;
+}
+
+#endif /* RX_CPU_H */
diff --git a/target/rx/disas.c b/target/rx/disas.c
new file mode 100644
index 000000000..67b932882
--- /dev/null
+++ b/target/rx/disas.c
@@ -0,0 +1,1446 @@
+/*
+ * Renesas RX Disassembler
+ *
+ * Copyright (c) 2019 Yoshinori Sato <ysato@users.sourceforge.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "disas/dis-asm.h"
+#include "qemu/bitops.h"
+#include "cpu.h"
+
+typedef struct DisasContext {
+ disassemble_info *dis;
+ uint32_t addr;
+ uint32_t pc;
+ uint8_t len;
+ uint8_t bytes[8];
+} DisasContext;
+
+
+static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
+ int i, int n)
+{
+ uint32_t addr = ctx->addr;
+
+ g_assert(ctx->len == i);
+ g_assert(n <= ARRAY_SIZE(ctx->bytes));
+
+ while (++i <= n) {
+ ctx->dis->read_memory_func(addr++, &ctx->bytes[i - 1], 1, ctx->dis);
+ insn |= ctx->bytes[i - 1] << (32 - i * 8);
+ }
+ ctx->addr = addr;
+ ctx->len = n;
+
+ return insn;
+}
+
+static int32_t li(DisasContext *ctx, int sz)
+{
+ uint32_t addr = ctx->addr;
+ uintptr_t len = ctx->len;
+
+ switch (sz) {
+ case 1:
+ g_assert(len + 1 <= ARRAY_SIZE(ctx->bytes));
+ ctx->addr += 1;
+ ctx->len += 1;
+ ctx->dis->read_memory_func(addr, ctx->bytes + len, 1, ctx->dis);
+ return (int8_t)ctx->bytes[len];
+ case 2:
+ g_assert(len + 2 <= ARRAY_SIZE(ctx->bytes));
+ ctx->addr += 2;
+ ctx->len += 2;
+ ctx->dis->read_memory_func(addr, ctx->bytes + len, 2, ctx->dis);
+ return ldsw_le_p(ctx->bytes + len);
+ case 3:
+ g_assert(len + 3 <= ARRAY_SIZE(ctx->bytes));
+ ctx->addr += 3;
+ ctx->len += 3;
+ ctx->dis->read_memory_func(addr, ctx->bytes + len, 3, ctx->dis);
+ return (int8_t)ctx->bytes[len + 2] << 16 | lduw_le_p(ctx->bytes + len);
+ case 0:
+ g_assert(len + 4 <= ARRAY_SIZE(ctx->bytes));
+ ctx->addr += 4;
+ ctx->len += 4;
+ ctx->dis->read_memory_func(addr, ctx->bytes + len, 4, ctx->dis);
+ return ldl_le_p(ctx->bytes + len);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static int bdsp_s(DisasContext *ctx, int d)
+{
+ /*
+ * 0 -> 8
+ * 1 -> 9
+ * 2 -> 10
+ * 3 -> 3
+ * :
+ * 7 -> 7
+ */
+ if (d < 3) {
+ d += 8;
+ }
+ return d;
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+static void dump_bytes(DisasContext *ctx)
+{
+ int i, len = ctx->len;
+
+ for (i = 0; i < len; ++i) {
+ ctx->dis->fprintf_func(ctx->dis->stream, "%02x ", ctx->bytes[i]);
+ }
+ ctx->dis->fprintf_func(ctx->dis->stream, "%*c", (8 - i) * 3, '\t');
+}
+
+#define prt(...) \
+ do { \
+ dump_bytes(ctx); \
+ ctx->dis->fprintf_func(ctx->dis->stream, __VA_ARGS__); \
+ } while (0)
+
+#define RX_MEMORY_BYTE 0
+#define RX_MEMORY_WORD 1
+#define RX_MEMORY_LONG 2
+
+#define RX_IM_BYTE 0
+#define RX_IM_WORD 1
+#define RX_IM_LONG 2
+#define RX_IM_UWORD 3
+
+static const char size[] = {'b', 'w', 'l'};
+static const char cond[][4] = {
+ "eq", "ne", "c", "nc", "gtu", "leu", "pz", "n",
+ "ge", "lt", "gt", "le", "o", "no", "ra", "f"
+};
+static const char psw[] = {
+ 'c', 'z', 's', 'o', 0, 0, 0, 0,
+ 'i', 'u', 0, 0, 0, 0, 0, 0,
+};
+
+static void rx_index_addr(DisasContext *ctx, char out[8], int ld, int mi)
+{
+ uint32_t addr = ctx->addr;
+ uintptr_t len = ctx->len;
+ uint16_t dsp;
+
+ switch (ld) {
+ case 0:
+ /* No index; return empty string. */
+ out[0] = '\0';
+ return;
+ case 1:
+ g_assert(len + 1 <= ARRAY_SIZE(ctx->bytes));
+ ctx->addr += 1;
+ ctx->len += 1;
+ ctx->dis->read_memory_func(addr, ctx->bytes + len, 1, ctx->dis);
+ dsp = ctx->bytes[len];
+ break;
+ case 2:
+ g_assert(len + 2 <= ARRAY_SIZE(ctx->bytes));
+ ctx->addr += 2;
+ ctx->len += 2;
+ ctx->dis->read_memory_func(addr, ctx->bytes + len, 2, ctx->dis);
+ dsp = lduw_le_p(ctx->bytes + len);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ sprintf(out, "%u", dsp << (mi < 3 ? mi : 4 - mi));
+}
+
+static void prt_ldmi(DisasContext *ctx, const char *insn,
+ int ld, int mi, int rs, int rd)
+{
+ static const char sizes[][4] = {".b", ".w", ".l", ".uw", ".ub"};
+ char dsp[8];
+
+ if (ld < 3) {
+ rx_index_addr(ctx, dsp, ld, mi);
+ prt("%s\t%s[r%d]%s, r%d", insn, dsp, rs, sizes[mi], rd);
+ } else {
+ prt("%s\tr%d, r%d", insn, rs, rd);
+ }
+}
+
+static void prt_ir(DisasContext *ctx, const char *insn, int imm, int rd)
+{
+ if (imm < 0x100) {
+ prt("%s\t#%d, r%d", insn, imm, rd);
+ } else {
+ prt("%s\t#0x%08x, r%d", insn, imm, rd);
+ }
+}
+
+/* mov.[bwl] rs,dsp:[rd] */
+static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
+{
+ if (a->dsp > 0) {
+ prt("mov.%c\tr%d,%d[r%d]",
+ size[a->sz], a->rs, a->dsp << a->sz, a->rd);
+ } else {
+ prt("mov.%c\tr%d,[r%d]",
+ size[a->sz], a->rs, a->rd);
+ }
+ return true;
+}
+
+/* mov.[bwl] dsp:[rs],rd */
+static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
+{
+ if (a->dsp > 0) {
+ prt("mov.%c\t%d[r%d], r%d",
+ size[a->sz], a->dsp << a->sz, a->rs, a->rd);
+ } else {
+ prt("mov.%c\t[r%d], r%d",
+ size[a->sz], a->rs, a->rd);
+ }
+ return true;
+}
+
+/* mov.l #uimm4,rd */
+/* mov.l #uimm8,rd */
+/* mov.l #imm,rd */
+static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
+{
+ prt_ir(ctx, "mov.l", a->imm, a->rd);
+ return true;
+}
+
+/* mov.[bwl] #uimm8,dsp:[rd] */
+/* mov #imm, dsp:[rd] */
+static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
+{
+ if (a->dsp > 0) {
+ prt("mov.%c\t#%d,%d[r%d]",
+ size[a->sz], a->imm, a->dsp << a->sz, a->rd);
+ } else {
+ prt("mov.%c\t#%d,[r%d]",
+ size[a->sz], a->imm, a->rd);
+ }
+ return true;
+}
+
+/* mov.[bwl] [ri,rb],rd */
+static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
+{
+ prt("mov.%c\t[r%d,r%d], r%d", size[a->sz], a->ri, a->rb, a->rd);
+ return true;
+}
+
+/* mov.[bwl] rd,[ri,rb] */
+static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
+{
+ prt("mov.%c\tr%d, [r%d, r%d]", size[a->sz], a->rs, a->ri, a->rb);
+ return true;
+}
+
+
+/* mov.[bwl] dsp:[rs],dsp:[rd] */
+/* mov.[bwl] rs,dsp:[rd] */
+/* mov.[bwl] dsp:[rs],rd */
+/* mov.[bwl] rs,rd */
+static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
+{
+ char dspd[8], dsps[8], szc = size[a->sz];
+
+ if (a->lds == 3 && a->ldd == 3) {
+ /* mov.[bwl] rs,rd */
+ prt("mov.%c\tr%d, r%d", szc, a->rs, a->rd);
+ } else if (a->lds == 3) {
+ rx_index_addr(ctx, dspd, a->ldd, a->sz);
+ prt("mov.%c\tr%d, %s[r%d]", szc, a->rs, dspd, a->rd);
+ } else if (a->ldd == 3) {
+ rx_index_addr(ctx, dsps, a->lds, a->sz);
+ prt("mov.%c\t%s[r%d], r%d", szc, dsps, a->rs, a->rd);
+ } else {
+ rx_index_addr(ctx, dsps, a->lds, a->sz);
+ rx_index_addr(ctx, dspd, a->ldd, a->sz);
+ prt("mov.%c\t%s[r%d], %s[r%d]", szc, dsps, a->rs, dspd, a->rd);
+ }
+ return true;
+}
+
+/* mov.[bwl] rs,[rd+] */
+/* mov.[bwl] rs,[-rd] */
+static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
+{
+ if (a->ad) {
+ prt("mov.%c\tr%d, [-r%d]", size[a->sz], a->rs, a->rd);
+ } else {
+ prt("mov.%c\tr%d, [r%d+]", size[a->sz], a->rs, a->rd);
+ }
+ return true;
+}
+
+/* mov.[bwl] [rd+],rs */
+/* mov.[bwl] [-rd],rs */
+static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
+{
+ if (a->ad) {
+ prt("mov.%c\t[-r%d], r%d", size[a->sz], a->rd, a->rs);
+ } else {
+ prt("mov.%c\t[r%d+], r%d", size[a->sz], a->rd, a->rs);
+ }
+ return true;
+}
+
+/* movu.[bw] dsp5:[rs],rd */
+static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
+{
+ if (a->dsp > 0) {
+ prt("movu.%c\t%d[r%d], r%d", size[a->sz],
+ a->dsp << a->sz, a->rs, a->rd);
+ } else {
+ prt("movu.%c\t[r%d], r%d", size[a->sz], a->rs, a->rd);
+ }
+ return true;
+}
+
+/* movu.[bw] rs,rd */
+static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
+{
+ prt("movu.%c\tr%d, r%d", size[a->sz], a->rs, a->rd);
+ return true;
+}
+
+/* movu.[bw] [ri,rb],rd */
+static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
+{
+ prt("mov.%c\t[r%d,r%d], r%d", size[a->sz], a->ri, a->rb, a->rd);
+ return true;
+}
+
+/* movu.[bw] [rs+],rd */
+/* movu.[bw] [-rs],rd */
+static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
+{
+ if (a->ad) {
+ prt("movu.%c\t[-r%d], r%d", size[a->sz], a->rd, a->rs);
+ } else {
+ prt("movu.%c\t[r%d+], r%d", size[a->sz], a->rd, a->rs);
+ }
+ return true;
+}
+
+/* pop rd */
+static bool trans_POP(DisasContext *ctx, arg_POP *a)
+{
+ prt("pop\tr%d", a->rd);
+ return true;
+}
+
+/* popc rx */
+static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
+{
+ prt("pop\tr%s", rx_crname(a->cr));
+ return true;
+}
+
+/* popm rd-rd2 */
+static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
+{
+ prt("popm\tr%d-r%d", a->rd, a->rd2);
+ return true;
+}
+
+/* push rs */
+static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
+{
+ prt("push\tr%d", a->rs);
+ return true;
+}
+
+/* push dsp[rs] */
+static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
+{
+ char dsp[8];
+
+ rx_index_addr(ctx, dsp, a->ld, a->sz);
+ prt("push\t%s[r%d]", dsp, a->rs);
+ return true;
+}
+
+/* pushc rx */
+static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
+{
+ prt("push\t%s", rx_crname(a->cr));
+ return true;
+}
+
+/* pushm rs-rs2*/
+static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
+{
+ prt("pushm\tr%d-r%d", a->rs, a->rs2);
+ return true;
+}
+
+/* xchg rs,rd */
+static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
+{
+ prt("xchg\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+/* xchg dsp[rs].<mi>,rd */
+static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
+{
+ prt_ldmi(ctx, "xchg", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* stz #imm,rd */
+static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
+{
+ prt_ir(ctx, "stz", a->imm, a->rd);
+ return true;
+}
+
+/* stnz #imm,rd */
+static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
+{
+ prt_ir(ctx, "stnz", a->imm, a->rd);
+ return true;
+}
+
+/* rtsd #imm */
+static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
+{
+ prt("rtsd\t#%d", a->imm << 2);
+ return true;
+}
+
+/* rtsd #imm, rd-rd2 */
+static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
+{
+ prt("rtsd\t#%d, r%d - r%d", a->imm << 2, a->rd, a->rd2);
+ return true;
+}
+
+/* and #uimm:4, rd */
+/* and #imm, rd */
+static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
+{
+ prt_ir(ctx, "and", a->imm, a->rd);
+ return true;
+}
+
+/* and dsp[rs], rd */
+/* and rs,rd */
+static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
+{
+ prt_ldmi(ctx, "and", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* and rs,rs2,rd */
+static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
+{
+ prt("and\tr%d,r%d, r%d", a->rs, a->rs2, a->rd);
+ return true;
+}
+
+/* or #uimm:4, rd */
+/* or #imm, rd */
+static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
+{
+ prt_ir(ctx, "or", a->imm, a->rd);
+ return true;
+}
+
+/* or dsp[rs], rd */
+/* or rs,rd */
+static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
+{
+ prt_ldmi(ctx, "or", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* or rs,rs2,rd */
+static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
+{
+ prt("or\tr%d, r%d, r%d", a->rs, a->rs2, a->rd);
+ return true;
+}
+
+/* xor #imm, rd */
+static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
+{
+ prt_ir(ctx, "xor", a->imm, a->rd);
+ return true;
+}
+
+/* xor dsp[rs], rd */
+/* xor rs,rd */
+static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
+{
+ prt_ldmi(ctx, "xor", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* tst #imm, rd */
+static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
+{
+ prt_ir(ctx, "tst", a->imm, a->rd);
+ return true;
+}
+
+/* tst dsp[rs], rd */
+/* tst rs, rd */
+static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
+{
+ prt_ldmi(ctx, "tst", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* not rd */
+/* not rs, rd */
+static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
+{
+ if (a->rs != a->rd) {
+ prt("not\tr%d, r%d", a->rs, a->rd);
+ } else {
+ prt("not\tr%d", a->rs);
+ }
+ return true;
+}
+
+/* neg rd */
+/* neg rs, rd */
+static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
+{
+ if (a->rs != a->rd) {
+ prt("neg\tr%d, r%d", a->rs, a->rd);
+ } else {
+ prt("neg\tr%d", a->rs);
+ }
+ return true;
+}
+
+/* adc #imm, rd */
+static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
+{
+ prt_ir(ctx, "adc", a->imm, a->rd);
+ return true;
+}
+
+/* adc rs, rd */
+static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
+{
+ prt("adc\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* adc dsp[rs], rd */
+static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
+{
+ char dsp[8];
+
+ rx_index_addr(ctx, dsp, a->ld, 2);
+ prt("adc\t%s[r%d], r%d", dsp, a->rs, a->rd);
+ return true;
+}
+
+/* add #uimm4, rd */
+/* add #imm, rs, rd */
+static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
+{
+ if (a->imm < 0x10 && a->rs2 == a->rd) {
+ prt("add\t#%d, r%d", a->imm, a->rd);
+ } else {
+ prt("add\t#0x%08x, r%d, r%d", a->imm, a->rs2, a->rd);
+ }
+ return true;
+}
+
+/* add rs, rd */
+/* add dsp[rs], rd */
+static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
+{
+ prt_ldmi(ctx, "add", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* add rs, rs2, rd */
+static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
+{
+ prt("add\tr%d, r%d, r%d", a->rs, a->rs2, a->rd);
+ return true;
+}
+
+/* cmp #imm4, rd */
+/* cmp #imm8, rd */
+/* cmp #imm, rs2 */
+static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
+{
+ prt_ir(ctx, "cmp", a->imm, a->rs2);
+ return true;
+}
+
+/* cmp rs, rs2 */
+/* cmp dsp[rs], rs2 */
+static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
+{
+ prt_ldmi(ctx, "cmp", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* sub #imm4, rd */
+static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
+{
+ prt("sub\t#%d, r%d", a->imm, a->rd);
+ return true;
+}
+
+/* sub rs, rd */
+/* sub dsp[rs], rd */
+static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
+{
+ prt_ldmi(ctx, "sub", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* sub rs, rs2, rd */
+static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
+{
+ prt("sub\tr%d, r%d, r%d", a->rs, a->rs2, a->rd);
+ return true;
+}
+
+/* sbb rs, rd */
+static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
+{
+ prt("sbb\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* sbb dsp[rs], rd */
+static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
+{
+ prt_ldmi(ctx, "sbb", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* abs rd */
+/* abs rs, rd */
+static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
+{
+ if (a->rs != a->rd) {
+ prt("abs\tr%d, r%d", a->rs, a->rd);
+ } else {
+ prt("abs\tr%d", a->rs);
+ }
+ return true;
+}
+
+/* max #imm, rd */
+static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
+{
+ prt_ir(ctx, "max", a->imm, a->rd);
+ return true;
+}
+
+/* max rs, rd */
+/* max dsp[rs], rd */
+static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
+{
+ prt_ldmi(ctx, "max", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* min #imm, rd */
+static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
+{
+ prt_ir(ctx, "min", a->imm, a->rd);
+ return true;
+}
+
+/* min rs, rd */
+/* min dsp[rs], rd */
+static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
+{
+ prt_ldmi(ctx, "min", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* mul #uimm4, rd */
+/* mul #imm, rd */
+static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
+{
+ prt_ir(ctx, "mul", a->imm, a->rd);
+ return true;
+}
+
+/* mul rs, rd */
+/* mul dsp[rs], rd */
+static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
+{
+ prt_ldmi(ctx, "mul", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* mul rs, rs2, rd */
+static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
+{
+ prt("mul\tr%d,r%d,r%d", a->rs, a->rs2, a->rd);
+ return true;
+}
+
+/* emul #imm, rd */
+static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
+{
+ prt_ir(ctx, "emul", a->imm, a->rd);
+ return true;
+}
+
+/* emul rs, rd */
+/* emul dsp[rs], rd */
+static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
+{
+ prt_ldmi(ctx, "emul", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* emulu #imm, rd */
+static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
+{
+ prt_ir(ctx, "emulu", a->imm, a->rd);
+ return true;
+}
+
+/* emulu rs, rd */
+/* emulu dsp[rs], rd */
+static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
+{
+ prt_ldmi(ctx, "emulu", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* div #imm, rd */
+static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
+{
+ prt_ir(ctx, "div", a->imm, a->rd);
+ return true;
+}
+
+/* div rs, rd */
+/* div dsp[rs], rd */
+static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
+{
+ prt_ldmi(ctx, "div", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+/* divu #imm, rd */
+static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
+{
+ prt_ir(ctx, "divu", a->imm, a->rd);
+ return true;
+}
+
+/* divu rs, rd */
+/* divu dsp[rs], rd */
+static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
+{
+ prt_ldmi(ctx, "divu", a->ld, a->mi, a->rs, a->rd);
+ return true;
+}
+
+
+/* shll #imm:5, rd */
+/* shll #imm:5, rs, rd */
+static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
+{
+ if (a->rs2 != a->rd) {
+ prt("shll\t#%d, r%d, r%d", a->imm, a->rs2, a->rd);
+ } else {
+ prt("shll\t#%d, r%d", a->imm, a->rd);
+ }
+ return true;
+}
+
+/* shll rs, rd */
+static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
+{
+ prt("shll\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* shar #imm:5, rd */
+/* shar #imm:5, rs, rd */
+static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
+{
+ if (a->rs2 != a->rd) {
+ prt("shar\t#%d, r%d, r%d", a->imm, a->rs2, a->rd);
+ } else {
+ prt("shar\t#%d, r%d", a->imm, a->rd);
+ }
+ return true;
+}
+
+/* shar rs, rd */
+static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
+{
+ prt("shar\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* shlr #imm:5, rd */
+/* shlr #imm:5, rs, rd */
+static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
+{
+ if (a->rs2 != a->rd) {
+ prt("shlr\t#%d, r%d, r%d", a->imm, a->rs2, a->rd);
+ } else {
+ prt("shlr\t#%d, r%d", a->imm, a->rd);
+ }
+ return true;
+}
+
+/* shlr rs, rd */
+static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
+{
+ prt("shlr\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* rolc rd */
+static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
+{
+ prt("rorc\tr%d", a->rd);
+ return true;
+}
+
+/* rorc rd */
+static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
+{
+ prt("rorc\tr%d", a->rd);
+ return true;
+}
+
+/* rotl #imm, rd */
+static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
+{
+ prt("rotl\t#%d, r%d", a->imm, a->rd);
+ return true;
+}
+
+/* rotl rs, rd */
+static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
+{
+ prt("rotl\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* rotr #imm, rd */
+static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
+{
+ prt("rotr\t#%d, r%d", a->imm, a->rd);
+ return true;
+}
+
+/* rotr rs, rd */
+static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
+{
+ prt("rotr\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* revl rs, rd */
+static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
+{
+ prt("revl\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* revw rs, rd */
+static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
+{
+ prt("revw\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* conditional branch helper */
+static void rx_bcnd_main(DisasContext *ctx, int cd, int len, int dst)
+{
+ static const char sz[] = {'s', 'b', 'w', 'a'};
+ prt("b%s.%c\t%08x", cond[cd], sz[len - 1], ctx->pc + dst);
+}
+
+/* beq dsp:3 / bne dsp:3 */
+/* beq dsp:8 / bne dsp:8 */
+/* bc dsp:8 / bnc dsp:8 */
+/* bgtu dsp:8 / bleu dsp:8 */
+/* bpz dsp:8 / bn dsp:8 */
+/* bge dsp:8 / blt dsp:8 */
+/* bgt dsp:8 / ble dsp:8 */
+/* bo dsp:8 / bno dsp:8 */
+/* beq dsp:16 / bne dsp:16 */
+static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
+{
+ rx_bcnd_main(ctx, a->cd, a->sz, a->dsp);
+ return true;
+}
+
+/* bra dsp:3 */
+/* bra dsp:8 */
+/* bra dsp:16 */
+/* bra dsp:24 */
+static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
+{
+ rx_bcnd_main(ctx, 14, a->sz, a->dsp);
+ return true;
+}
+
+/* bra rs */
+static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
+{
+ prt("bra.l\tr%d", a->rd);
+ return true;
+}
+
+/* jmp rs */
+static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
+{
+ prt("jmp\tr%d", a->rs);
+ return true;
+}
+
+/* jsr rs */
+static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
+{
+ prt("jsr\tr%d", a->rs);
+ return true;
+}
+
+/* bsr dsp:16 */
+/* bsr dsp:24 */
+static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
+{
+ static const char sz[] = {'w', 'a'};
+ prt("bsr.%c\t%08x", sz[a->sz - 3], ctx->pc + a->dsp);
+ return true;
+}
+
+/* bsr rs */
+static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
+{
+ prt("bsr.l\tr%d", a->rd);
+ return true;
+}
+
+/* rts */
+static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
+{
+ prt("rts");
+ return true;
+}
+
+/* nop */
+static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
+{
+ prt("nop");
+ return true;
+}
+
+/* scmpu */
+static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
+{
+ prt("scmpu");
+ return true;
+}
+
+/* smovu */
+static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
+{
+ prt("smovu");
+ return true;
+}
+
+/* smovf */
+static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
+{
+ prt("smovf");
+ return true;
+}
+
+/* smovb */
+static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
+{
+ prt("smovb");
+ return true;
+}
+
+/* suntile */
+static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
+{
+ prt("suntil.%c", size[a->sz]);
+ return true;
+}
+
+/* swhile */
+static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
+{
+ prt("swhile.%c", size[a->sz]);
+ return true;
+}
+/* sstr */
+static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
+{
+ prt("sstr.%c", size[a->sz]);
+ return true;
+}
+
+/* rmpa */
+static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
+{
+ prt("rmpa.%c", size[a->sz]);
+ return true;
+}
+
+/* mulhi rs,rs2 */
+static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
+{
+ prt("mulhi\tr%d,r%d", a->rs, a->rs2);
+ return true;
+}
+
+/* mullo rs,rs2 */
+static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
+{
+ prt("mullo\tr%d, r%d", a->rs, a->rs2);
+ return true;
+}
+
+/* machi rs,rs2 */
+static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
+{
+ prt("machi\tr%d, r%d", a->rs, a->rs2);
+ return true;
+}
+
+/* maclo rs,rs2 */
+static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
+{
+ prt("maclo\tr%d, r%d", a->rs, a->rs2);
+ return true;
+}
+
+/* mvfachi rd */
+static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
+{
+ prt("mvfachi\tr%d", a->rd);
+ return true;
+}
+
+/* mvfacmi rd */
+static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
+{
+ prt("mvfacmi\tr%d", a->rd);
+ return true;
+}
+
+/* mvtachi rs */
+static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
+{
+ prt("mvtachi\tr%d", a->rs);
+ return true;
+}
+
+/* mvtaclo rs */
+static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
+{
+ prt("mvtaclo\tr%d", a->rs);
+ return true;
+}
+
+/* racw #imm */
+static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
+{
+ prt("racw\t#%d", a->imm + 1);
+ return true;
+}
+
+/* sat rd */
+static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
+{
+ prt("sat\tr%d", a->rd);
+ return true;
+}
+
+/* satr */
+static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
+{
+ prt("satr");
+ return true;
+}
+
+/* fadd #imm, rd */
+static bool trans_FADD_ir(DisasContext *ctx, arg_FADD_ir *a)
+{
+ prt("fadd\t#%d,r%d", li(ctx, 0), a->rd);
+ return true;
+}
+
+/* fadd dsp[rs], rd */
+/* fadd rs, rd */
+static bool trans_FADD_mr(DisasContext *ctx, arg_FADD_mr *a)
+{
+ prt_ldmi(ctx, "fadd", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* fcmp #imm, rd */
+static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir *a)
+{
+ prt("fadd\t#%d,r%d", li(ctx, 0), a->rd);
+ return true;
+}
+
+/* fcmp dsp[rs], rd */
+/* fcmp rs, rd */
+static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
+{
+ prt_ldmi(ctx, "fcmp", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* fsub #imm, rd */
+static bool trans_FSUB_ir(DisasContext *ctx, arg_FSUB_ir *a)
+{
+ prt("fsub\t#%d,r%d", li(ctx, 0), a->rd);
+ return true;
+}
+
+/* fsub dsp[rs], rd */
+/* fsub rs, rd */
+static bool trans_FSUB_mr(DisasContext *ctx, arg_FSUB_mr *a)
+{
+ prt_ldmi(ctx, "fsub", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* ftoi dsp[rs], rd */
+/* ftoi rs, rd */
+static bool trans_FTOI(DisasContext *ctx, arg_FTOI *a)
+{
+ prt_ldmi(ctx, "ftoi", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* fmul #imm, rd */
+static bool trans_FMUL_ir(DisasContext *ctx, arg_FMUL_ir *a)
+{
+ prt("fmul\t#%d,r%d", li(ctx, 0), a->rd);
+ return true;
+}
+
+/* fmul dsp[rs], rd */
+/* fmul rs, rd */
+static bool trans_FMUL_mr(DisasContext *ctx, arg_FMUL_mr *a)
+{
+ prt_ldmi(ctx, "fmul", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* fdiv #imm, rd */
+static bool trans_FDIV_ir(DisasContext *ctx, arg_FDIV_ir *a)
+{
+ prt("fdiv\t#%d,r%d", li(ctx, 0), a->rd);
+ return true;
+}
+
+/* fdiv dsp[rs], rd */
+/* fdiv rs, rd */
+static bool trans_FDIV_mr(DisasContext *ctx, arg_FDIV_mr *a)
+{
+ prt_ldmi(ctx, "fdiv", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* round dsp[rs], rd */
+/* round rs, rd */
+static bool trans_ROUND(DisasContext *ctx, arg_ROUND *a)
+{
+ prt_ldmi(ctx, "round", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+/* itof rs, rd */
+/* itof dsp[rs], rd */
+static bool trans_ITOF(DisasContext *ctx, arg_ITOF *a)
+{
+ prt_ldmi(ctx, "itof", a->ld, RX_IM_LONG, a->rs, a->rd);
+ return true;
+}
+
+#define BOP_IM(name, reg) \
+ do { \
+ char dsp[8]; \
+ rx_index_addr(ctx, dsp, a->ld, RX_MEMORY_BYTE); \
+ prt("b%s\t#%d, %s[r%d]", #name, a->imm, dsp, reg); \
+ return true; \
+ } while (0)
+
+#define BOP_RM(name) \
+ do { \
+ char dsp[8]; \
+ rx_index_addr(ctx, dsp, a->ld, RX_MEMORY_BYTE); \
+ prt("b%s\tr%d, %s[r%d]", #name, a->rd, dsp, a->rs); \
+ return true; \
+ } while (0)
+
+/* bset #imm, dsp[rd] */
+static bool trans_BSET_im(DisasContext *ctx, arg_BSET_im *a)
+{
+ BOP_IM(bset, a->rs);
+}
+
+/* bset rs, dsp[rd] */
+static bool trans_BSET_rm(DisasContext *ctx, arg_BSET_rm *a)
+{
+ BOP_RM(set);
+}
+
+/* bset rs, rd */
+static bool trans_BSET_rr(DisasContext *ctx, arg_BSET_rr *a)
+{
+ prt("bset\tr%d,r%d", a->rs, a->rd);
+ return true;
+}
+
+/* bset #imm, rd */
+static bool trans_BSET_ir(DisasContext *ctx, arg_BSET_ir *a)
+{
+ prt("bset\t#%d, r%d", a->imm, a->rd);
+ return true;
+}
+
+/* bclr #imm, dsp[rd] */
+static bool trans_BCLR_im(DisasContext *ctx, arg_BCLR_im *a)
+{
+ BOP_IM(clr, a->rs);
+}
+
+/* bclr rs, dsp[rd] */
+static bool trans_BCLR_rm(DisasContext *ctx, arg_BCLR_rm *a)
+{
+ BOP_RM(clr);
+}
+
+/* bclr rs, rd */
+static bool trans_BCLR_rr(DisasContext *ctx, arg_BCLR_rr *a)
+{
+ prt("bclr\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* bclr #imm, rd */
+static bool trans_BCLR_ir(DisasContext *ctx, arg_BCLR_ir *a)
+{
+ prt("bclr\t#%d,r%d", a->imm, a->rd);
+ return true;
+}
+
+/* btst #imm, dsp[rd] */
+static bool trans_BTST_im(DisasContext *ctx, arg_BTST_im *a)
+{
+ BOP_IM(tst, a->rs);
+}
+
+/* btst rs, dsp[rd] */
+static bool trans_BTST_rm(DisasContext *ctx, arg_BTST_rm *a)
+{
+ BOP_RM(tst);
+}
+
+/* btst rs, rd */
+static bool trans_BTST_rr(DisasContext *ctx, arg_BTST_rr *a)
+{
+ prt("btst\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* btst #imm, rd */
+static bool trans_BTST_ir(DisasContext *ctx, arg_BTST_ir *a)
+{
+ prt("btst\t#%d, r%d", a->imm, a->rd);
+ return true;
+}
+
+/* bnot rs, dsp[rd] */
+static bool trans_BNOT_rm(DisasContext *ctx, arg_BNOT_rm *a)
+{
+ BOP_RM(not);
+}
+
+/* bnot rs, rd */
+static bool trans_BNOT_rr(DisasContext *ctx, arg_BNOT_rr *a)
+{
+ prt("bnot\tr%d, r%d", a->rs, a->rd);
+ return true;
+}
+
+/* bnot #imm, dsp[rd] */
+static bool trans_BNOT_im(DisasContext *ctx, arg_BNOT_im *a)
+{
+ BOP_IM(not, a->rs);
+}
+
+/* bnot #imm, rd */
+static bool trans_BNOT_ir(DisasContext *ctx, arg_BNOT_ir *a)
+{
+ prt("bnot\t#%d, r%d", a->imm, a->rd);
+ return true;
+}
+
+/* bmcond #imm, dsp[rd] */
+static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
+{
+ char dsp[8];
+
+ rx_index_addr(ctx, dsp, a->ld, RX_MEMORY_BYTE);
+ prt("bm%s\t#%d, %s[r%d]", cond[a->cd], a->imm, dsp, a->rd);
+ return true;
+}
+
+/* bmcond #imm, rd */
+static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
+{
+ prt("bm%s\t#%d, r%d", cond[a->cd], a->imm, a->rd);
+ return true;
+}
+
+/* clrpsw psw */
+static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
+{
+ prt("clrpsw\t%c", psw[a->cb]);
+ return true;
+}
+
+/* setpsw psw */
+static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
+{
+ prt("setpsw\t%c", psw[a->cb]);
+ return true;
+}
+
+/* mvtipl #imm */
+static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
+{
+ prt("movtipl\t#%d", a->imm);
+ return true;
+}
+
+/* mvtc #imm, rd */
+static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
+{
+ prt("mvtc\t#0x%08x, %s", a->imm, rx_crname(a->cr));
+ return true;
+}
+
+/* mvtc rs, rd */
+static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
+{
+ prt("mvtc\tr%d, %s", a->rs, rx_crname(a->cr));
+ return true;
+}
+
+/* mvfc rs, rd */
+static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
+{
+ prt("mvfc\t%s, r%d", rx_crname(a->cr), a->rd);
+ return true;
+}
+
+/* rtfi */
+static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
+{
+ prt("rtfi");
+ return true;
+}
+
+/* rte */
+static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
+{
+ prt("rte");
+ return true;
+}
+
+/* brk */
+static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
+{
+ prt("brk");
+ return true;
+}
+
+/* int #imm */
+static bool trans_INT(DisasContext *ctx, arg_INT *a)
+{
+ prt("int\t#%d", a->imm);
+ return true;
+}
+
+/* wait */
+static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
+{
+ prt("wait");
+ return true;
+}
+
+/* sccnd.[bwl] rd */
+/* sccnd.[bwl] dsp:[rd] */
+static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
+{
+ if (a->ld < 3) {
+ char dsp[8];
+ rx_index_addr(ctx, dsp, a->sz, a->ld);
+ prt("sc%s.%c\t%s[r%d]", cond[a->cd], size[a->sz], dsp, a->rd);
+ } else {
+ prt("sc%s.%c\tr%d", cond[a->cd], size[a->sz], a->rd);
+ }
+ return true;
+}
+
+int print_insn_rx(bfd_vma addr, disassemble_info *dis)
+{
+ DisasContext ctx;
+ uint32_t insn;
+ int i;
+
+ ctx.dis = dis;
+ ctx.pc = ctx.addr = addr;
+ ctx.len = 0;
+
+ insn = decode_load(&ctx);
+ if (!decode(&ctx, insn)) {
+ ctx.dis->fprintf_func(ctx.dis->stream, ".byte\t");
+ for (i = 0; i < ctx.addr - addr; i++) {
+ if (i > 0) {
+ ctx.dis->fprintf_func(ctx.dis->stream, ",");
+ }
+ ctx.dis->fprintf_func(ctx.dis->stream, "0x%02x", insn >> 24);
+ insn <<= 8;
+ }
+ }
+ return ctx.addr - addr;
+}
diff --git a/target/rx/gdbstub.c b/target/rx/gdbstub.c
new file mode 100644
index 000000000..c811d4810
--- /dev/null
+++ b/target/rx/gdbstub.c
@@ -0,0 +1,112 @@
+/*
+ * RX gdb server stub
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ RXCPU *cpu = RX_CPU(cs);
+ CPURXState *env = &cpu->env;
+
+ switch (n) {
+ case 0 ... 15:
+ return gdb_get_regl(mem_buf, env->regs[n]);
+ case 16:
+ return gdb_get_regl(mem_buf, (env->psw_u) ? env->regs[0] : env->usp);
+ case 17:
+ return gdb_get_regl(mem_buf, (!env->psw_u) ? env->regs[0] : env->isp);
+ case 18:
+ return gdb_get_regl(mem_buf, rx_cpu_pack_psw(env));
+ case 19:
+ return gdb_get_regl(mem_buf, env->pc);
+ case 20:
+ return gdb_get_regl(mem_buf, env->intb);
+ case 21:
+ return gdb_get_regl(mem_buf, env->bpsw);
+ case 22:
+ return gdb_get_regl(mem_buf, env->bpc);
+ case 23:
+ return gdb_get_regl(mem_buf, env->fintv);
+ case 24:
+ return gdb_get_regl(mem_buf, env->fpsw);
+ case 25:
+ return 0;
+ }
+ return 0;
+}
+
+int rx_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ RXCPU *cpu = RX_CPU(cs);
+ CPURXState *env = &cpu->env;
+ uint32_t psw;
+ switch (n) {
+ case 0 ... 15:
+ env->regs[n] = ldl_p(mem_buf);
+ if (n == 0) {
+ if (env->psw_u) {
+ env->usp = env->regs[0];
+ } else {
+ env->isp = env->regs[0];
+ }
+ }
+ break;
+ case 16:
+ env->usp = ldl_p(mem_buf);
+ if (env->psw_u) {
+ env->regs[0] = ldl_p(mem_buf);
+ }
+ break;
+ case 17:
+ env->isp = ldl_p(mem_buf);
+ if (!env->psw_u) {
+ env->regs[0] = ldl_p(mem_buf);
+ }
+ break;
+ case 18:
+ psw = ldl_p(mem_buf);
+ rx_cpu_unpack_psw(env, psw, 1);
+ break;
+ case 19:
+ env->pc = ldl_p(mem_buf);
+ break;
+ case 20:
+ env->intb = ldl_p(mem_buf);
+ break;
+ case 21:
+ env->bpsw = ldl_p(mem_buf);
+ break;
+ case 22:
+ env->bpc = ldl_p(mem_buf);
+ break;
+ case 23:
+ env->fintv = ldl_p(mem_buf);
+ break;
+ case 24:
+ env->fpsw = ldl_p(mem_buf);
+ break;
+ case 25:
+ return 8;
+ default:
+ return 0;
+ }
+
+ return 4;
+}
diff --git a/target/rx/helper.c b/target/rx/helper.c
new file mode 100644
index 000000000..f34945e7e
--- /dev/null
+++ b/target/rx/helper.c
@@ -0,0 +1,152 @@
+/*
+ * RX emulation
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "cpu.h"
+#include "exec/log.h"
+#include "exec/cpu_ldst.h"
+#include "hw/irq.h"
+
+void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
+{
+ if (env->psw_pm == 0) {
+ env->psw_ipl = FIELD_EX32(psw, PSW, IPL);
+ if (rte) {
+ /* PSW.PM can write RTE and RTFI */
+ env->psw_pm = FIELD_EX32(psw, PSW, PM);
+ }
+ env->psw_u = FIELD_EX32(psw, PSW, U);
+ env->psw_i = FIELD_EX32(psw, PSW, I);
+ }
+ env->psw_o = FIELD_EX32(psw, PSW, O) << 31;
+ env->psw_s = FIELD_EX32(psw, PSW, S) << 31;
+ env->psw_z = 1 - FIELD_EX32(psw, PSW, Z);
+ env->psw_c = FIELD_EX32(psw, PSW, C);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+#define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR)
+void rx_cpu_do_interrupt(CPUState *cs)
+{
+ RXCPU *cpu = RX_CPU(cs);
+ CPURXState *env = &cpu->env;
+ int do_irq = cs->interrupt_request & INT_FLAGS;
+ uint32_t save_psw;
+
+ env->in_sleep = 0;
+
+ if (env->psw_u) {
+ env->usp = env->regs[0];
+ } else {
+ env->isp = env->regs[0];
+ }
+ save_psw = rx_cpu_pack_psw(env);
+ env->psw_pm = env->psw_i = env->psw_u = 0;
+
+ if (do_irq) {
+ if (do_irq & CPU_INTERRUPT_FIR) {
+ env->bpc = env->pc;
+ env->bpsw = save_psw;
+ env->pc = env->fintv;
+ env->psw_ipl = 15;
+ cs->interrupt_request &= ~CPU_INTERRUPT_FIR;
+ qemu_set_irq(env->ack, env->ack_irq);
+ qemu_log_mask(CPU_LOG_INT, "fast interrupt raised\n");
+ } else if (do_irq & CPU_INTERRUPT_HARD) {
+ env->isp -= 4;
+ cpu_stl_data(env, env->isp, save_psw);
+ env->isp -= 4;
+ cpu_stl_data(env, env->isp, env->pc);
+ env->pc = cpu_ldl_data(env, env->intb + env->ack_irq * 4);
+ env->psw_ipl = env->ack_ipl;
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ qemu_set_irq(env->ack, env->ack_irq);
+ qemu_log_mask(CPU_LOG_INT,
+ "interrupt 0x%02x raised\n", env->ack_irq);
+ }
+ } else {
+ uint32_t vec = cs->exception_index;
+ const char *expname = "unknown exception";
+
+ env->isp -= 4;
+ cpu_stl_data(env, env->isp, save_psw);
+ env->isp -= 4;
+ cpu_stl_data(env, env->isp, env->pc);
+
+ if (vec < 0x100) {
+ env->pc = cpu_ldl_data(env, 0xffffffc0 + vec * 4);
+ } else {
+ env->pc = cpu_ldl_data(env, env->intb + (vec & 0xff) * 4);
+ }
+ switch (vec) {
+ case 20:
+ expname = "privilege violation";
+ break;
+ case 21:
+ expname = "access exception";
+ break;
+ case 23:
+ expname = "illegal instruction";
+ break;
+ case 25:
+ expname = "fpu exception";
+ break;
+ case 30:
+ expname = "non-maskable interrupt";
+ break;
+ case 0x100 ... 0x1ff:
+ expname = "unconditional trap";
+ }
+ qemu_log_mask(CPU_LOG_INT, "exception 0x%02x [%s] raised\n",
+ (vec & 0xff), expname);
+ }
+ env->regs[0] = env->isp;
+}
+
+bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ RXCPU *cpu = RX_CPU(cs);
+ CPURXState *env = &cpu->env;
+ int accept = 0;
+ /* hardware interrupt (Normal) */
+ if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ env->psw_i && (env->psw_ipl < env->req_ipl)) {
+ env->ack_irq = env->req_irq;
+ env->ack_ipl = env->req_ipl;
+ accept = 1;
+ }
+ /* hardware interrupt (FIR) */
+ if ((interrupt_request & CPU_INTERRUPT_FIR) &&
+ env->psw_i && (env->psw_ipl < 15)) {
+ accept = 1;
+ }
+ if (accept) {
+ rx_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ return addr;
+}
diff --git a/target/rx/helper.h b/target/rx/helper.h
new file mode 100644
index 000000000..ebb473947
--- /dev/null
+++ b/target/rx/helper.h
@@ -0,0 +1,30 @@
+DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
+DEF_HELPER_1(raise_access_fault, noreturn, env)
+DEF_HELPER_1(raise_privilege_violation, noreturn, env)
+DEF_HELPER_1(wait, noreturn, env)
+DEF_HELPER_2(rxint, noreturn, env, i32)
+DEF_HELPER_1(rxbrk, noreturn, env)
+DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsub, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmul, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdiv, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmp, TCG_CALL_NO_WG, void, env, f32, f32)
+DEF_HELPER_FLAGS_2(ftoi, TCG_CALL_NO_WG, i32, env, f32)
+DEF_HELPER_FLAGS_2(round, TCG_CALL_NO_WG, i32, env, f32)
+DEF_HELPER_FLAGS_2(itof, TCG_CALL_NO_WG, f32, env, i32)
+DEF_HELPER_2(set_fpsw, void, env, i32)
+DEF_HELPER_FLAGS_2(racw, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(set_psw_rte, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(set_psw, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_1(pack_psw, i32, env)
+DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(divu, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_1(scmpu, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_1(smovu, void, env)
+DEF_HELPER_1(smovf, void, env)
+DEF_HELPER_1(smovb, void, env)
+DEF_HELPER_2(sstr, void, env, i32)
+DEF_HELPER_FLAGS_2(swhile, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(suntil, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(rmpa, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_1(satr, void, env)
diff --git a/target/rx/insns.decode b/target/rx/insns.decode
new file mode 100644
index 000000000..ca9334b37
--- /dev/null
+++ b/target/rx/insns.decode
@@ -0,0 +1,621 @@
+#
+# Renesas RX instruction decode definitions.
+#
+# Copyright (c) 2019 Richard Henderson <richard.henderson@linaro.org>
+# Copyright (c) 2019 Yoshinori Sato <ysato@users.sourceforge.jp>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+&bcnd cd dsp sz
+&jdsp dsp sz
+&jreg rs
+&rr rd rs
+&ri rd imm
+&rrr rd rs rs2
+&rri rd imm rs2
+&rm rd rs ld mi
+&mi rs ld mi imm
+&mr rs ld mi rs2
+&mcnd ld sz rd cd
+########
+%b1_bdsp 24:3 !function=bdsp_s
+
+@b1_bcnd_s .... cd:1 ... &bcnd dsp=%b1_bdsp sz=1
+@b1_bra_s .... .... &jdsp dsp=%b1_bdsp sz=1
+
+%b2_r_0 16:4
+%b2_li_2 18:2 !function=li
+%b2_li_8 24:2 !function=li
+%b2_dsp5_3 23:4 19:1
+
+@b2_rds .... .... .... rd:4 &rr rs=%b2_r_0
+@b2_rds_li .... .... .... rd:4 &rri rs2=%b2_r_0 imm=%b2_li_8
+@b2_rds_uimm4 .... .... imm:4 rd:4 &rri rs2=%b2_r_0
+@b2_rs2_uimm4 .... .... imm:4 rs2:4 &rri rd=0
+@b2_rds_imm5 .... ... imm:5 rd:4 &rri rs2=%b2_r_0
+@b2_rd_rs_li .... .... rs2:4 rd:4 &rri imm=%b2_li_8
+@b2_rd_ld_ub .... .. ld:2 rs:4 rd:4 &rm mi=4
+@b2_ld_imm3 .... .. ld:2 rs:4 . imm:3 &mi mi=4
+@b2_bcnd_b .... cd:4 dsp:s8 &bcnd sz=2
+@b2_bra_b .... .... dsp:s8 &jdsp sz=2
+
+########
+
+%b3_r_0 8:4
+%b3_li_10 18:2 !function=li
+%b3_dsp5_8 23:1 16:4
+%b3_bdsp 8:s8 16:8
+
+@b3_rd_rs .... .... .... .... rs:4 rd:4 &rr
+@b3_rs_rd .... .... .... .... rd:4 rs:4 &rr
+@b3_rd_li .... .... .... .... .... rd:4 \
+ &rri rs2=%b3_r_0 imm=%b3_li_10
+@b3_rd_ld .... .... mi:2 .... ld:2 rs:4 rd:4 &rm
+@b3_rd_ld_ub .... .... .... .. ld:2 rs:4 rd:4 &rm mi=4
+@b3_rd_ld_ul .... .... .... .. ld:2 rs:4 rd:4 &rm mi=2
+@b3_rd_rs_rs2 .... .... .... rd:4 rs:4 rs2:4 &rrr
+@b3_rds_imm5 .... .... ....... imm:5 rd:4 &rri rs2=%b3_r_0
+@b3_rd_rs_imm5 .... .... ... imm:5 rs2:4 rd:4 &rri
+@b3_bcnd_w .... ... cd:1 .... .... .... .... &bcnd dsp=%b3_bdsp sz=3
+@b3_bra_w .... .... .... .... .... .... &jdsp dsp=%b3_bdsp sz=3
+@b3_ld_rd_rs .... .... .... .. ld:2 rs:4 rd:4 &rm mi=0
+@b3_sz_ld_rd_cd .... .... .... sz:2 ld:2 rd:4 cd:4 &mcnd
+
+########
+
+%b4_li_18 18:2 !function=li
+%b4_dsp_16 0:s8 8:8
+%b4_bdsp 0:s8 8:8 16:8
+
+@b4_rd_ldmi .... .... mi:2 .... ld:2 .... .... rs:4 rd:4 &rm
+@b4_bra_a .... .... .... .... .... .... .... .... \
+ &jdsp dsp=%b4_bdsp sz=4
+########
+# ABS rd
+ABS_rr 0111 1110 0010 .... @b2_rds
+# ABS rs, rd
+ABS_rr 1111 1100 0000 1111 .... .... @b3_rd_rs
+
+# ADC #imm, rd
+ADC_ir 1111 1101 0111 ..00 0010 .... @b3_rd_li
+# ADC rs, rd
+ADC_rr 1111 1100 0000 1011 .... .... @b3_rd_rs
+# ADC dsp[rs].l, rd
+# Note only mi==2 allowed.
+ADC_mr 0000 0110 ..10 00.. 0000 0010 .... .... @b4_rd_ldmi
+
+# ADD #uimm4, rd
+ADD_irr 0110 0010 .... .... @b2_rds_uimm4
+# ADD #imm, rs, rd
+ADD_irr 0111 00.. .... .... @b2_rd_rs_li
+# ADD dsp[rs].ub, rd
+# ADD rs, rd
+ADD_mr 0100 10.. .... .... @b2_rd_ld_ub
+# ADD dsp[rs], rd
+ADD_mr 0000 0110 ..00 10.. .... .... @b3_rd_ld
+# ADD rs, rs2, rd
+ADD_rrr 1111 1111 0010 .... .... .... @b3_rd_rs_rs2
+
+# AND #uimm4, rd
+AND_ir 0110 0100 .... .... @b2_rds_uimm4
+# AND #imm, rd
+AND_ir 0111 01.. 0010 .... @b2_rds_li
+# AND dsp[rs].ub, rd
+# AND rs, rd
+AND_mr 0101 00.. .... .... @b2_rd_ld_ub
+# AND dsp[rs], rd
+AND_mr 0000 0110 ..01 00.. .... .... @b3_rd_ld
+# AND rs, rs2, rd
+AND_rrr 1111 1111 0100 .... .... .... @b3_rd_rs_rs2
+
+# BCLR #imm, dsp[rd]
+BCLR_im 1111 00.. .... 1... @b2_ld_imm3
+# BCLR #imm, rs
+BCLR_ir 0111 101. .... .... @b2_rds_imm5
+# BCLR rs, rd
+# BCLR rs, dsp[rd]
+{
+ BCLR_rr 1111 1100 0110 0111 .... .... @b3_rs_rd
+ BCLR_rm 1111 1100 0110 01.. .... .... @b3_rd_ld_ub
+}
+
+# BCnd.s dsp
+BCnd 0001 .... @b1_bcnd_s
+# BRA.b dsp
+# BCnd.b dsp
+{
+ BRA 0010 1110 .... .... @b2_bra_b
+ BCnd 0010 .... .... .... @b2_bcnd_b
+}
+
+# BCnd.w dsp
+BCnd 0011 101 . .... .... .... .... @b3_bcnd_w
+
+# BNOT #imm, dsp[rd]
+# BMCnd #imm, dsp[rd]
+{
+ BNOT_im 1111 1100 111 imm:3 ld:2 rs:4 1111
+ BMCnd_im 1111 1100 111 imm:3 ld:2 rd:4 cd:4
+}
+
+# BNOT #imm, rd
+# BMCnd #imm, rd
+{
+ BNOT_ir 1111 1101 111 imm:5 1111 rd:4
+ BMCnd_ir 1111 1101 111 imm:5 cd:4 rd:4
+}
+
+# BNOT rs, rd
+# BNOT rs, dsp[rd]
+{
+ BNOT_rr 1111 1100 0110 1111 .... .... @b3_rs_rd
+ BNOT_rm 1111 1100 0110 11.. .... .... @b3_rd_ld_ub
+}
+
+# BRA.s dsp
+BRA 0000 1 ... @b1_bra_s
+# BRA.w dsp
+BRA 0011 1000 .... .... .... .... @b3_bra_w
+# BRA.a dsp
+BRA 0000 0100 .... .... .... .... .... .... @b4_bra_a
+# BRA.l rs
+BRA_l 0111 1111 0100 rd:4
+
+BRK 0000 0000
+
+# BSET #imm, dsp[rd]
+BSET_im 1111 00.. .... 0... @b2_ld_imm3
+# BSET #imm, rd
+BSET_ir 0111 100. .... .... @b2_rds_imm5
+# BSET rs, rd
+# BSET rs, dsp[rd]
+{
+ BSET_rr 1111 1100 0110 0011 .... .... @b3_rs_rd
+ BSET_rm 1111 1100 0110 00.. .... .... @b3_rd_ld_ub
+}
+
+# BSR.w dsp
+BSR 0011 1001 .... .... .... .... @b3_bra_w
+# BSR.a dsp
+BSR 0000 0101 .... .... .... .... .... .... @b4_bra_a
+# BSR.l rs
+BSR_l 0111 1111 0101 rd:4
+
+# BSET #imm, dsp[rd]
+BTST_im 1111 01.. .... 0... @b2_ld_imm3
+# BSET #imm, rd
+BTST_ir 0111 110. .... .... @b2_rds_imm5
+# BSET rs, rd
+# BSET rs, dsp[rd]
+{
+ BTST_rr 1111 1100 0110 1011 .... .... @b3_rs_rd
+ BTST_rm 1111 1100 0110 10.. .... .... @b3_rd_ld_ub
+}
+
+# CLRSPW psw
+CLRPSW 0111 1111 1011 cb:4
+
+# CMP #uimm4, rs2
+CMP_ir 0110 0001 .... .... @b2_rs2_uimm4
+# CMP #uimm8, rs2
+CMP_ir 0111 0101 0101 rs2:4 imm:8 &rri rd=0
+# CMP #imm, rs2
+CMP_ir 0111 01.. 0000 rs2:4 &rri imm=%b2_li_8 rd=0
+# CMP dsp[rs].ub, rs2
+# CMP rs, rs2
+CMP_mr 0100 01.. .... .... @b2_rd_ld_ub
+# CMP dsp[rs], rs2
+CMP_mr 0000 0110 ..00 01.. .... .... @b3_rd_ld
+
+# DIV #imm, rd
+DIV_ir 1111 1101 0111 ..00 1000 .... @b3_rd_li
+# DIV dsp[rs].ub, rd
+# DIV rs, rd
+DIV_mr 1111 1100 0010 00.. .... .... @b3_rd_ld_ub
+# DIV dsp[rs], rd
+DIV_mr 0000 0110 ..10 00.. 0000 1000 .... .... @b4_rd_ldmi
+
+# DIVU #imm, rd
+DIVU_ir 1111 1101 0111 ..00 1001 .... @b3_rd_li
+# DIVU dsp[rs].ub, rd
+# DIVU rs, rd
+DIVU_mr 1111 1100 0010 01.. .... .... @b3_rd_ld_ub
+# DIVU dsp[rs], rd
+DIVU_mr 0000 0110 ..10 00.. 0000 1001 .... .... @b4_rd_ldmi
+
+# EMUL #imm, rd
+EMUL_ir 1111 1101 0111 ..00 0110 .... @b3_rd_li
+# EMUL dsp[rs].ub, rd
+# EMUL rs, rd
+EMUL_mr 1111 1100 0001 10.. .... .... @b3_rd_ld_ub
+# EMUL dsp[rs], rd
+EMUL_mr 0000 0110 ..10 00.. 0000 0110 .... .... @b4_rd_ldmi
+
+# EMULU #imm, rd
+EMULU_ir 1111 1101 0111 ..00 0111 .... @b3_rd_li
+# EMULU dsp[rs].ub, rd
+# EMULU rs, rd
+EMULU_mr 1111 1100 0001 11.. .... .... @b3_rd_ld_ub
+# EMULU dsp[rs], rd
+EMULU_mr 0000 0110 ..10 00.. 0000 0111 .... .... @b4_rd_ldmi
+
+# FADD #imm, rd
+FADD_ir 1111 1101 0111 0010 0010 rd:4
+# FADD rs, rd
+# FADD dsp[rs], rd
+FADD_mr 1111 1100 1000 10.. .... .... @b3_rd_ld_ul
+
+# FCMP #imm, rd
+FCMP_ir 1111 1101 0111 0010 0001 rd:4
+# FCMP rs, rd
+# FCMP dsp[rs], rd
+FCMP_mr 1111 1100 1000 01.. .... .... @b3_rd_ld_ul
+
+# FDIV #imm, rd
+FDIV_ir 1111 1101 0111 0010 0100 rd:4
+# FDIV rs, rd
+# FDIV dsp[rs], rd
+FDIV_mr 1111 1100 1001 00.. .... .... @b3_rd_ld_ul
+
+# FMUL #imm, rd
+FMUL_ir 1111 1101 0111 0010 0011 rd:4
+# FMUL rs, rd
+# FMUL dsp[rs], rd
+FMUL_mr 1111 1100 1000 11.. .... .... @b3_rd_ld_ul
+
+# FSUB #imm, rd
+FSUB_ir 1111 1101 0111 0010 0000 rd:4
+# FSUB rs, rd
+# FSUB dsp[rs], rd
+FSUB_mr 1111 1100 1000 00.. .... .... @b3_rd_ld_ul
+
+# FTOI rs, rd
+# FTOI dsp[rs], rd
+FTOI 1111 1100 1001 01.. .... .... @b3_rd_ld_ul
+
+# INT #uimm8
+INT 0111 0101 0110 0000 imm:8
+
+# ITOF dsp[rs].ub, rd
+# ITOF rs, rd
+ITOF 1111 1100 0100 01.. .... .... @b3_rd_ld_ub
+# ITOF dsp[rs], rd
+ITOF 0000 0110 ..10 00.. 0001 0001 .... .... @b4_rd_ldmi
+
+# JMP rs
+JMP 0111 1111 0000 rs:4 &jreg
+# JSR rs
+JSR 0111 1111 0001 rs:4 &jreg
+
+# MACHI rs, rs2
+MACHI 1111 1101 0000 0100 rs:4 rs2:4
+# MACLO rs, rs2
+MACLO 1111 1101 0000 0101 rs:4 rs2:4
+
+# MAX #imm, rd
+MAX_ir 1111 1101 0111 ..00 0100 .... @b3_rd_li
+# MAX dsp[rs].ub, rd
+# MAX rs, rd
+MAX_mr 1111 1100 0001 00.. .... .... @b3_rd_ld_ub
+# MAX dsp[rs], rd
+MAX_mr 0000 0110 ..10 00.. 0000 0100 .... .... @b4_rd_ldmi
+
+# MIN #imm, rd
+MIN_ir 1111 1101 0111 ..00 0101 .... @b3_rd_li
+# MIN dsp[rs].ub, rd
+# MIN rs, rd
+MIN_mr 1111 1100 0001 01.. .... .... @b3_rd_ld_ub
+# MIN dsp[rs], rd
+MIN_mr 0000 0110 ..10 00.. 0000 0101 .... .... @b4_rd_ldmi
+
+# MOV.b rs, dsp5[rd]
+MOV_rm 1000 0 .... rd:3 . rs:3 dsp=%b2_dsp5_3 sz=0
+# MOV.w rs, dsp5[rd]
+MOV_rm 1001 0 .... rd:3 . rs:3 dsp=%b2_dsp5_3 sz=1
+# MOV.l rs, dsp5[rd]
+MOV_rm 1010 0 .... rd:3 . rs:3 dsp=%b2_dsp5_3 sz=2
+# MOV.b dsp5[rs], rd
+MOV_mr 1000 1 .... rs:3 . rd:3 dsp=%b2_dsp5_3 sz=0
+# MOV.w dsp5[rs], rd
+MOV_mr 1001 1 .... rs:3 . rd:3 dsp=%b2_dsp5_3 sz=1
+# MOV.l dsp5[rs], rd
+MOV_mr 1010 1 .... rs:3 . rd:3 dsp=%b2_dsp5_3 sz=2
+# MOV.l #uimm4, rd
+MOV_ir 0110 0110 imm:4 rd:4
+# MOV.b #imm8, dsp5[rd]
+MOV_im 0011 1100 . rd:3 .... imm:8 sz=0 dsp=%b3_dsp5_8
+# MOV.w #imm8, dsp5[rd]
+MOV_im 0011 1101 . rd:3 .... imm:8 sz=1 dsp=%b3_dsp5_8
+# MOV.l #imm8, dsp5[rd]
+MOV_im 0011 1110 . rd:3 .... imm:8 sz=2 dsp=%b3_dsp5_8
+# MOV.l #imm8, rd
+MOV_ir 0111 0101 0100 rd:4 imm:8
+# MOV.l #mm8, rd
+MOV_ir 1111 1011 rd:4 .. 10 imm=%b2_li_2
+# MOV.<bwl> #imm, [rd]
+MOV_im 1111 1000 rd:4 .. sz:2 dsp=0 imm=%b2_li_2
+# MOV.<bwl> #imm, dsp8[rd]
+MOV_im 1111 1001 rd:4 .. sz:2 dsp:8 imm=%b3_li_10
+# MOV.<bwl> #imm, dsp16[rd]
+MOV_im 1111 1010 rd:4 .. sz:2 .... .... .... .... \
+ imm=%b4_li_18 dsp=%b4_dsp_16
+# MOV.<bwl> [ri,rb], rd
+MOV_ar 1111 1110 01 sz:2 ri:4 rb:4 rd:4
+# MOV.<bwl> rs, [ri,rb]
+MOV_ra 1111 1110 00 sz:2 ri:4 rb:4 rs:4
+# Note ldd=3 and lds=3 indicate register src or dst
+# MOV.b rs, rd
+# MOV.b rs, dsp[rd]
+# MOV.b dsp[rs], rd
+# MOV.b dsp[rs], dsp[rd]
+MOV_mm 1100 ldd:2 lds:2 rs:4 rd:4 sz=0
+# MOV.w rs, rd
+# MOV.w rs, dsp[rd]
+# MOV.w dsp[rs], rd
+# MOV.w dsp[rs], dsp[rd]
+MOV_mm 1101 ldd:2 lds:2 rs:4 rd:4 sz=1
+# MOV.l rs, rd
+# MOV.l rs, dsp[rd]
+# MOV.l dsp[rs], rd
+# MOV.l dsp[rs], dsp[rd]
+MOV_mm 1110 ldd:2 lds:2 rs:4 rd:4 sz=2
+# MOV.l rs, [rd+]
+# MOV.l rs, [-rd]
+MOV_rp 1111 1101 0010 0 ad:1 sz:2 rd:4 rs:4
+# MOV.l [rs+], rd
+# MOV.l [-rs], rd
+MOV_pr 1111 1101 0010 1 ad:1 sz:2 rd:4 rs:4
+
+# MOVU.<bw> dsp5[rs], rd
+MOVU_mr 1011 sz:1 ... . rs:3 . rd:3 dsp=%b2_dsp5_3
+# MOVU.<bw> [rs], rd
+MOVU_mr 0101 1 sz:1 00 rs:4 rd:4 dsp=0
+# MOVU.<bw> dsp8[rs], rd
+MOVU_mr 0101 1 sz:1 01 rs:4 rd:4 dsp:8
+# MOVU.<bw> dsp16[rs], rd
+MOVU_mr 0101 1 sz:1 10 rs:4 rd:4 .... .... .... .... dsp=%b4_dsp_16
+# MOVU.<bw> rs, rd
+MOVU_rr 0101 1 sz:1 11 rs:4 rd:4
+# MOVU.<bw> [ri, rb], rd
+MOVU_ar 1111 1110 110 sz:1 ri:4 rb:4 rd:4
+# MOVU.<bw> [rs+], rd
+MOVU_pr 1111 1101 0011 1 ad:1 0 sz:1 rd:4 rs:4
+
+# MUL #uimm4, rd
+MUL_ir 0110 0011 .... .... @b2_rds_uimm4
+# MUL #imm4, rd
+MUL_ir 0111 01.. 0001 .... @b2_rds_li
+# MUL dsp[rs].ub, rd
+# MUL rs, rd
+MUL_mr 0100 11.. .... .... @b2_rd_ld_ub
+# MUL dsp[rs], rd
+MUL_mr 0000 0110 ..00 11.. .... .... @b3_rd_ld
+# MOV rs, rs2, rd
+MUL_rrr 1111 1111 0011 .... .... .... @b3_rd_rs_rs2
+
+# MULHI rs, rs2
+MULHI 1111 1101 0000 0000 rs:4 rs2:4
+# MULLO rs, rs2
+MULLO 1111 1101 0000 0001 rs:4 rs2:4
+
+# MVFACHI rd
+MVFACHI 1111 1101 0001 1111 0000 rd:4
+# MVFACMI rd
+MVFACMI 1111 1101 0001 1111 0010 rd:4
+
+# MVFC cr, rd
+MVFC 1111 1101 0110 1010 cr:4 rd:4
+
+# MVTACHI rs
+MVTACHI 1111 1101 0001 0111 0000 rs:4
+# MVTACLO rs
+MVTACLO 1111 1101 0001 0111 0001 rs:4
+
+# MVTC #imm, cr
+MVTC_i 1111 1101 0111 ..11 0000 cr:4 imm=%b3_li_10
+# MVTC rs, cr
+MVTC_r 1111 1101 0110 1000 rs:4 cr:4
+
+# MVTIPL #imm
+MVTIPL 0111 0101 0111 0000 0000 imm:4
+
+# NEG rd
+NEG_rr 0111 1110 0001 .... @b2_rds
+# NEG rs, rd
+NEG_rr 1111 1100 0000 0111 .... .... @b3_rd_rs
+
+NOP 0000 0011
+
+# NOT rd
+NOT_rr 0111 1110 0000 .... @b2_rds
+# NOT rs, rd
+NOT_rr 1111 1100 0011 1011 .... .... @b3_rd_rs
+
+# OR #uimm4, rd
+OR_ir 0110 0101 .... .... @b2_rds_uimm4
+# OR #imm, rd
+OR_ir 0111 01.. 0011 .... @b2_rds_li
+# OR dsp[rs].ub, rd
+# OR rs, rd
+OR_mr 0101 01.. .... .... @b2_rd_ld_ub
+# OR dsp[rs], rd
+OR_mr 0000 0110 .. 0101 .. .... .... @b3_rd_ld
+# OR rs, rs2, rd
+OR_rrr 1111 1111 0101 .... .... .... @b3_rd_rs_rs2
+
+# POP cr
+POPC 0111 1110 1110 cr:4
+# POP rd-rd2
+POPM 0110 1111 rd:4 rd2:4
+
+# POP rd
+# PUSH.<bwl> rs
+{
+ POP 0111 1110 1011 rd:4
+ PUSH_r 0111 1110 10 sz:2 rs:4
+}
+# PUSH.<bwl> dsp[rs]
+PUSH_m 1111 01 ld:2 rs:4 10 sz:2
+# PUSH cr
+PUSHC 0111 1110 1100 cr:4
+# PUSHM rs-rs2
+PUSHM 0110 1110 rs:4 rs2:4
+
+# RACW #imm
+RACW 1111 1101 0001 1000 000 imm:1 0000
+
+# REVL rs,rd
+REVL 1111 1101 0110 0111 .... .... @b3_rd_rs
+# REVW rs,rd
+REVW 1111 1101 0110 0101 .... .... @b3_rd_rs
+
+# SMOVF
+# RPMA.<bwl>
+{
+ SMOVF 0111 1111 1000 1111
+ RMPA 0111 1111 1000 11 sz:2
+}
+
+# ROLC rd
+ROLC 0111 1110 0101 .... @b2_rds
+# RORC rd
+RORC 0111 1110 0100 .... @b2_rds
+
+# ROTL #imm, rd
+ROTL_ir 1111 1101 0110 111. .... .... @b3_rds_imm5
+# ROTL rs, rd
+ROTL_rr 1111 1101 0110 0110 .... .... @b3_rd_rs
+
+# ROTR #imm, rd
+ROTR_ir 1111 1101 0110 110. .... .... @b3_rds_imm5
+# ROTR #imm, rd
+ROTR_rr 1111 1101 0110 0100 .... .... @b3_rd_rs
+
+# ROUND rs,rd
+# ROUND dsp[rs],rd
+ROUND 1111 1100 1001 10 .. .... .... @b3_ld_rd_rs
+
+RTE 0111 1111 1001 0101
+
+RTFI 0111 1111 1001 0100
+
+RTS 0000 0010
+
+# RTSD #imm
+RTSD_i 0110 0111 imm:8
+# RTSD #imm, rd-rd2
+RTSD_irr 0011 1111 rd:4 rd2:4 imm:8
+
+# SAT rd
+SAT 0111 1110 0011 .... @b2_rds
+# SATR
+SATR 0111 1111 1001 0011
+
+# SBB rs, rd
+SBB_rr 1111 1100 0000 0011 .... .... @b3_rd_rs
+# SBB dsp[rs].l, rd
+# Note only mi==2 allowed.
+SBB_mr 0000 0110 ..10 00.. 0000 0000 .... .... @b4_rd_ldmi
+
+# SCCnd dsp[rd]
+# SCCnd rd
+SCCnd 1111 1100 1101 .... .... .... @b3_sz_ld_rd_cd
+
+# SETPSW psw
+SETPSW 0111 1111 1010 cb:4
+
+# SHAR #imm, rd
+SHAR_irr 0110 101. .... .... @b2_rds_imm5
+# SHAR #imm, rs, rd
+SHAR_irr 1111 1101 101. .... .... .... @b3_rd_rs_imm5
+# SHAR rs, rd
+SHAR_rr 1111 1101 0110 0001 .... .... @b3_rd_rs
+
+# SHLL #imm, rd
+SHLL_irr 0110 110. .... .... @b2_rds_imm5
+# SHLL #imm, rs, rd
+SHLL_irr 1111 1101 110. .... .... .... @b3_rd_rs_imm5
+# SHLL rs, rd
+SHLL_rr 1111 1101 0110 0010 .... .... @b3_rd_rs
+
+# SHLR #imm, rd
+SHLR_irr 0110 100. .... .... @b2_rds_imm5
+# SHLR #imm, rs, rd
+SHLR_irr 1111 1101 100. .... .... .... @b3_rd_rs_imm5
+# SHLR rs, rd
+SHLR_rr 1111 1101 0110 0000 .... .... @b3_rd_rs
+
+# SMOVB
+# SSTR.<bwl>
+{
+ SMOVB 0111 1111 1000 1011
+ SSTR 0111 1111 1000 10 sz:2
+}
+
+# STNZ #imm, rd
+STNZ 1111 1101 0111 ..00 1111 .... @b3_rd_li
+# STZ #imm, rd
+STZ 1111 1101 0111 ..00 1110 .... @b3_rd_li
+
+# SUB #uimm4, rd
+SUB_ir 0110 0000 .... .... @b2_rds_uimm4
+# SUB dsp[rs].ub, rd
+# SUB rs, rd
+SUB_mr 0100 00.. .... .... @b2_rd_ld_ub
+# SUB dsp[rs], rd
+SUB_mr 0000 0110 ..00 00.. .... .... @b3_rd_ld
+# SUB rs, rs2, rd
+SUB_rrr 1111 1111 0000 .... .... .... @b3_rd_rs_rs2
+
+# SCMPU
+# SUNTIL.<bwl>
+{
+ SCMPU 0111 1111 1000 0011
+ SUNTIL 0111 1111 1000 00 sz:2
+}
+
+# SMOVU
+# SWHILE.<bwl>
+{
+ SMOVU 0111 1111 1000 0111
+ SWHILE 0111 1111 1000 01 sz:2
+}
+
+# TST #imm, rd
+TST_ir 1111 1101 0111 ..00 1100 .... @b3_rd_li
+# TST dsp[rs].ub, rd
+# TST rs, rd
+TST_mr 1111 1100 0011 00.. .... .... @b3_rd_ld_ub
+# TST dsp[rs], rd
+TST_mr 0000 0110 ..10 00.. 0000 1100 .... .... @b4_rd_ldmi
+
+WAIT 0111 1111 1001 0110
+
+# XCHG rs, rd
+# XCHG dsp[rs].ub, rd
+{
+ XCHG_rr 1111 1100 0100 0011 .... .... @b3_rd_rs
+ XCHG_mr 1111 1100 0100 00.. .... .... @b3_rd_ld_ub
+}
+# XCHG dsp[rs], rd
+XCHG_mr 0000 0110 ..10 00.. 0001 0000 .... .... @b4_rd_ldmi
+
+# XOR #imm, rd
+XOR_ir 1111 1101 0111 ..00 1101 .... @b3_rd_li
+# XOR dsp[rs].ub, rd
+# XOR rs, rd
+XOR_mr 1111 1100 0011 01.. .... .... @b3_rd_ld_ub
+# XOR dsp[rs], rd
+XOR_mr 0000 0110 ..10 00.. 0000 1101 .... .... @b4_rd_ldmi
diff --git a/target/rx/meson.build b/target/rx/meson.build
new file mode 100644
index 000000000..8de0ad49b
--- /dev/null
+++ b/target/rx/meson.build
@@ -0,0 +1,16 @@
+gen = [
+ decodetree.process('insns.decode', extra_args: [ '--varinsnwidth', '32' ])
+]
+
+rx_ss = ss.source_set()
+rx_ss.add(gen)
+rx_ss.add(files(
+ 'translate.c',
+ 'op_helper.c',
+ 'helper.c',
+ 'cpu.c',
+ 'gdbstub.c',
+ 'disas.c'))
+
+target_arch += {'rx': rx_ss}
+target_softmmu_arch += {'rx': ss.source_set()}
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
new file mode 100644
index 000000000..11f952d34
--- /dev/null
+++ b/target/rx/op_helper.c
@@ -0,0 +1,462 @@
+/*
+ * RX helper functions
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "fpu/softfloat.h"
+
+static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index,
+ uintptr_t retaddr);
+
+static void _set_psw(CPURXState *env, uint32_t psw, uint32_t rte)
+{
+ uint32_t prev_u;
+ prev_u = env->psw_u;
+ rx_cpu_unpack_psw(env, psw, rte);
+ if (prev_u != env->psw_u) {
+ /* switch r0 */
+ if (env->psw_u) {
+ env->isp = env->regs[0];
+ env->regs[0] = env->usp;
+ } else {
+ env->usp = env->regs[0];
+ env->regs[0] = env->isp;
+ }
+ }
+}
+
+void helper_set_psw(CPURXState *env, uint32_t psw)
+{
+ _set_psw(env, psw, 0);
+}
+
+void helper_set_psw_rte(CPURXState *env, uint32_t psw)
+{
+ _set_psw(env, psw, 1);
+}
+
+uint32_t helper_pack_psw(CPURXState *env)
+{
+ return rx_cpu_pack_psw(env);
+}
+
+#define SET_FPSW(b) \
+ do { \
+ env->fpsw = FIELD_DP32(env->fpsw, FPSW, C ## b, 1); \
+ if (!FIELD_EX32(env->fpsw, FPSW, E ## b)) { \
+ env->fpsw = FIELD_DP32(env->fpsw, FPSW, F ## b, 1); \
+ } \
+ } while (0)
+
+/* fp operations */
+static void update_fpsw(CPURXState *env, float32 ret, uintptr_t retaddr)
+{
+ int xcpt, cause, enable;
+
+ env->psw_z = ret & ~(1 << 31); /* mask sign bit */
+ env->psw_s = ret;
+
+ xcpt = get_float_exception_flags(&env->fp_status);
+
+ /* Clear the cause entries */
+ env->fpsw = FIELD_DP32(env->fpsw, FPSW, CAUSE, 0);
+
+ /* set FPSW */
+ if (unlikely(xcpt)) {
+ if (xcpt & float_flag_invalid) {
+ SET_FPSW(V);
+ }
+ if (xcpt & float_flag_divbyzero) {
+ SET_FPSW(Z);
+ }
+ if (xcpt & float_flag_overflow) {
+ SET_FPSW(O);
+ }
+ if (xcpt & float_flag_underflow) {
+ SET_FPSW(U);
+ }
+ if (xcpt & float_flag_inexact) {
+ SET_FPSW(X);
+ }
+ if ((xcpt & (float_flag_input_denormal
+ | float_flag_output_denormal))
+ && !FIELD_EX32(env->fpsw, FPSW, DN)) {
+ env->fpsw = FIELD_DP32(env->fpsw, FPSW, CE, 1);
+ }
+
+ /* update FPSW_FLAG_S */
+ if (FIELD_EX32(env->fpsw, FPSW, FLAGS) != 0) {
+ env->fpsw = FIELD_DP32(env->fpsw, FPSW, FS, 1);
+ }
+
+ /* Generate an exception if enabled */
+ cause = FIELD_EX32(env->fpsw, FPSW, CAUSE);
+ enable = FIELD_EX32(env->fpsw, FPSW, ENABLE);
+ enable |= 1 << 5; /* CE always enabled */
+ if (cause & enable) {
+ raise_exception(env, 21, retaddr);
+ }
+ }
+}
+
+void helper_set_fpsw(CPURXState *env, uint32_t val)
+{
+ static const int roundmode[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ };
+ uint32_t fpsw = env->fpsw;
+ fpsw |= 0x7fffff03;
+ val &= ~0x80000000;
+ fpsw &= val;
+ FIELD_DP32(fpsw, FPSW, FS, FIELD_EX32(fpsw, FPSW, FLAGS) != 0);
+ env->fpsw = fpsw;
+ set_float_rounding_mode(roundmode[FIELD_EX32(env->fpsw, FPSW, RM)],
+ &env->fp_status);
+}
+
+#define FLOATOP(op, func) \
+ float32 helper_##op(CPURXState *env, float32 t0, float32 t1) \
+ { \
+ float32 ret; \
+ ret = func(t0, t1, &env->fp_status); \
+ update_fpsw(env, *(uint32_t *)&ret, GETPC()); \
+ return ret; \
+ }
+
+FLOATOP(fadd, float32_add)
+FLOATOP(fsub, float32_sub)
+FLOATOP(fmul, float32_mul)
+FLOATOP(fdiv, float32_div)
+
+void helper_fcmp(CPURXState *env, float32 t0, float32 t1)
+{
+ int st;
+ st = float32_compare(t0, t1, &env->fp_status);
+ update_fpsw(env, 0, GETPC());
+ env->psw_z = 1;
+ env->psw_s = env->psw_o = 0;
+ switch (st) {
+ case float_relation_equal:
+ env->psw_z = 0;
+ break;
+ case float_relation_less:
+ env->psw_s = -1;
+ break;
+ case float_relation_unordered:
+ env->psw_o = -1;
+ break;
+ }
+}
+
+uint32_t helper_ftoi(CPURXState *env, float32 t0)
+{
+ uint32_t ret;
+ ret = float32_to_int32_round_to_zero(t0, &env->fp_status);
+ update_fpsw(env, ret, GETPC());
+ return ret;
+}
+
+uint32_t helper_round(CPURXState *env, float32 t0)
+{
+ uint32_t ret;
+ ret = float32_to_int32(t0, &env->fp_status);
+ update_fpsw(env, ret, GETPC());
+ return ret;
+}
+
+float32 helper_itof(CPURXState *env, uint32_t t0)
+{
+ float32 ret;
+ ret = int32_to_float32(t0, &env->fp_status);
+ update_fpsw(env, ret, GETPC());
+ return ret;
+}
+
+/* string operations */
+void helper_scmpu(CPURXState *env)
+{
+ uint8_t tmp0, tmp1;
+ if (env->regs[3] == 0) {
+ return;
+ }
+ do {
+ tmp0 = cpu_ldub_data_ra(env, env->regs[1]++, GETPC());
+ tmp1 = cpu_ldub_data_ra(env, env->regs[2]++, GETPC());
+ env->regs[3]--;
+ if (tmp0 != tmp1 || tmp0 == '\0') {
+ break;
+ }
+ } while (env->regs[3] != 0);
+ env->psw_z = tmp0 - tmp1;
+ env->psw_c = (tmp0 >= tmp1);
+}
+
+static uint32_t (* const cpu_ldufn[])(CPUArchState *env,
+ target_ulong ptr,
+ uintptr_t retaddr) = {
+ cpu_ldub_data_ra, cpu_lduw_data_ra, cpu_ldl_data_ra,
+};
+
+static uint32_t (* const cpu_ldfn[])(CPUArchState *env,
+ target_ulong ptr,
+ uintptr_t retaddr) = {
+ cpu_ldub_data_ra, cpu_lduw_data_ra, cpu_ldl_data_ra,
+};
+
+static void (* const cpu_stfn[])(CPUArchState *env,
+ target_ulong ptr,
+ uint32_t val,
+ uintptr_t retaddr) = {
+ cpu_stb_data_ra, cpu_stw_data_ra, cpu_stl_data_ra,
+};
+
+void helper_sstr(CPURXState *env, uint32_t sz)
+{
+ tcg_debug_assert(sz < 3);
+ while (env->regs[3] != 0) {
+ cpu_stfn[sz](env, env->regs[1], env->regs[2], GETPC());
+ env->regs[1] += 1 << sz;
+ env->regs[3]--;
+ }
+}
+
+#define OP_SMOVU 1
+#define OP_SMOVF 0
+#define OP_SMOVB 2
+
+static void smov(uint32_t mode, CPURXState *env)
+{
+ uint8_t tmp;
+ int dir;
+
+ dir = (mode & OP_SMOVB) ? -1 : 1;
+ while (env->regs[3] != 0) {
+ tmp = cpu_ldub_data_ra(env, env->regs[2], GETPC());
+ cpu_stb_data_ra(env, env->regs[1], tmp, GETPC());
+ env->regs[1] += dir;
+ env->regs[2] += dir;
+ env->regs[3]--;
+ if ((mode & OP_SMOVU) && tmp == 0) {
+ break;
+ }
+ }
+}
+
+void helper_smovu(CPURXState *env)
+{
+ smov(OP_SMOVU, env);
+}
+
+void helper_smovf(CPURXState *env)
+{
+ smov(OP_SMOVF, env);
+}
+
+void helper_smovb(CPURXState *env)
+{
+ smov(OP_SMOVB, env);
+}
+
+
+void helper_suntil(CPURXState *env, uint32_t sz)
+{
+ uint32_t tmp;
+ tcg_debug_assert(sz < 3);
+ if (env->regs[3] == 0) {
+ return ;
+ }
+ do {
+ tmp = cpu_ldufn[sz](env, env->regs[1], GETPC());
+ env->regs[1] += 1 << sz;
+ env->regs[3]--;
+ if (tmp == env->regs[2]) {
+ break;
+ }
+ } while (env->regs[3] != 0);
+ env->psw_z = tmp - env->regs[2];
+ env->psw_c = (tmp <= env->regs[2]);
+}
+
+void helper_swhile(CPURXState *env, uint32_t sz)
+{
+ uint32_t tmp;
+ tcg_debug_assert(sz < 3);
+ if (env->regs[3] == 0) {
+ return ;
+ }
+ do {
+ tmp = cpu_ldufn[sz](env, env->regs[1], GETPC());
+ env->regs[1] += 1 << sz;
+ env->regs[3]--;
+ if (tmp != env->regs[2]) {
+ break;
+ }
+ } while (env->regs[3] != 0);
+ env->psw_z = env->regs[3];
+ env->psw_c = (tmp <= env->regs[2]);
+}
+
+/* accumulator operations */
+void helper_rmpa(CPURXState *env, uint32_t sz)
+{
+ uint64_t result_l, prev;
+ int32_t result_h;
+ int64_t tmp0, tmp1;
+
+ if (env->regs[3] == 0) {
+ return;
+ }
+ result_l = env->regs[5];
+ result_l <<= 32;
+ result_l |= env->regs[4];
+ result_h = env->regs[6];
+ env->psw_o = 0;
+
+ while (env->regs[3] != 0) {
+ tmp0 = cpu_ldfn[sz](env, env->regs[1], GETPC());
+ tmp1 = cpu_ldfn[sz](env, env->regs[2], GETPC());
+ tmp0 *= tmp1;
+ prev = result_l;
+ result_l += tmp0;
+ /* carry / bollow */
+ if (tmp0 < 0) {
+ if (prev > result_l) {
+ result_h--;
+ }
+ } else {
+ if (prev < result_l) {
+ result_h++;
+ }
+ }
+
+ env->regs[1] += 1 << sz;
+ env->regs[2] += 1 << sz;
+ }
+ env->psw_s = result_h;
+ env->psw_o = (result_h != 0 && result_h != -1) << 31;
+ env->regs[6] = result_h;
+ env->regs[5] = result_l >> 32;
+ env->regs[4] = result_l & 0xffffffff;
+}
+
+void helper_racw(CPURXState *env, uint32_t imm)
+{
+ int64_t acc;
+ acc = env->acc;
+ acc <<= (imm + 1);
+ acc += 0x0000000080000000LL;
+ if (acc > 0x00007fff00000000LL) {
+ acc = 0x00007fff00000000LL;
+ } else if (acc < -0x800000000000LL) {
+ acc = -0x800000000000LL;
+ } else {
+ acc &= 0xffffffff00000000LL;
+ }
+ env->acc = acc;
+}
+
+void helper_satr(CPURXState *env)
+{
+ if (env->psw_o >> 31) {
+ if ((int)env->psw_s < 0) {
+ env->regs[6] = 0x00000000;
+ env->regs[5] = 0x7fffffff;
+ env->regs[4] = 0xffffffff;
+ } else {
+ env->regs[6] = 0xffffffff;
+ env->regs[5] = 0x80000000;
+ env->regs[4] = 0x00000000;
+ }
+ }
+}
+
+/* div */
+uint32_t helper_div(CPURXState *env, uint32_t num, uint32_t den)
+{
+ uint32_t ret = num;
+ if (!((num == INT_MIN && den == -1) || den == 0)) {
+ ret = (int32_t)num / (int32_t)den;
+ env->psw_o = 0;
+ } else {
+ env->psw_o = -1;
+ }
+ return ret;
+}
+
+uint32_t helper_divu(CPURXState *env, uint32_t num, uint32_t den)
+{
+ uint32_t ret = num;
+ if (den != 0) {
+ ret = num / den;
+ env->psw_o = 0;
+ } else {
+ env->psw_o = -1;
+ }
+ return ret;
+}
+
+/* exception */
+static inline void QEMU_NORETURN raise_exception(CPURXState *env, int index,
+ uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = index;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void QEMU_NORETURN helper_raise_privilege_violation(CPURXState *env)
+{
+ raise_exception(env, 20, GETPC());
+}
+
+void QEMU_NORETURN helper_raise_access_fault(CPURXState *env)
+{
+ raise_exception(env, 21, GETPC());
+}
+
+void QEMU_NORETURN helper_raise_illegal_instruction(CPURXState *env)
+{
+ raise_exception(env, 23, GETPC());
+}
+
+void QEMU_NORETURN helper_wait(CPURXState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+ env->in_sleep = 1;
+ raise_exception(env, EXCP_HLT, 0);
+}
+
+void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec)
+{
+ raise_exception(env, 0x100 + vec, 0);
+}
+
+void QEMU_NORETURN helper_rxbrk(CPURXState *env)
+{
+ raise_exception(env, 0x100, 0);
+}
diff --git a/target/rx/translate.c b/target/rx/translate.c
new file mode 100644
index 000000000..5db8f79a8
--- /dev/null
+++ b/target/rx/translate.c
@@ -0,0 +1,2408 @@
+/*
+ * RX translation
+ *
+ * Copyright (c) 2019 Yoshinori Sato
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/bswap.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ CPURXState *env;
+ uint32_t pc;
+} DisasContext;
+
+typedef struct DisasCompare {
+ TCGv value;
+ TCGv temp;
+ TCGCond cond;
+} DisasCompare;
+
+const char *rx_crname(uint8_t cr)
+{
+ static const char *cr_names[] = {
+ "psw", "pc", "usp", "fpsw", "", "", "", "",
+ "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
+ };
+ if (cr >= ARRAY_SIZE(cr_names)) {
+ return "illegal";
+ }
+ return cr_names[cr];
+}
+
+/* Target-specific values for dc->base.is_jmp. */
+#define DISAS_JUMP DISAS_TARGET_0
+#define DISAS_UPDATE DISAS_TARGET_1
+#define DISAS_EXIT DISAS_TARGET_2
+
+/* global register indexes */
+static TCGv cpu_regs[16];
+static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
+static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
+static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
+static TCGv cpu_fintv, cpu_intb, cpu_pc;
+static TCGv_i64 cpu_acc;
+
+#define cpu_sp cpu_regs[0]
+
+#include "exec/gen-icount.h"
+
+/* decoder helper */
+static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
+ int i, int n)
+{
+ while (++i <= n) {
+ uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
+ insn |= b << (32 - i * 8);
+ }
+ return insn;
+}
+
+static uint32_t li(DisasContext *ctx, int sz)
+{
+ int32_t tmp, addr;
+ CPURXState *env = ctx->env;
+ addr = ctx->base.pc_next;
+
+ tcg_debug_assert(sz < 4);
+ switch (sz) {
+ case 1:
+ ctx->base.pc_next += 1;
+ return cpu_ldsb_code(env, addr);
+ case 2:
+ ctx->base.pc_next += 2;
+ return cpu_ldsw_code(env, addr);
+ case 3:
+ ctx->base.pc_next += 3;
+ tmp = cpu_ldsb_code(env, addr + 2) << 16;
+ tmp |= cpu_lduw_code(env, addr) & 0xffff;
+ return tmp;
+ case 0:
+ ctx->base.pc_next += 4;
+ return cpu_ldl_code(env, addr);
+ }
+ return 0;
+}
+
+static int bdsp_s(DisasContext *ctx, int d)
+{
+ /*
+ * 0 -> 8
+ * 1 -> 9
+ * 2 -> 10
+ * 3 -> 3
+ * :
+ * 7 -> 7
+ */
+ if (d < 3) {
+ d += 8;
+ }
+ return d;
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-insns.c.inc"
+
+void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ RXCPU *cpu = RX_CPU(cs);
+ CPURXState *env = &cpu->env;
+ int i;
+ uint32_t psw;
+
+ psw = rx_cpu_pack_psw(env);
+ qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
+ env->pc, psw);
+ for (i = 0; i < 16; i += 4) {
+ qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
+ i, env->regs[i], i + 1, env->regs[i + 1],
+ i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
+ }
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (translator_use_goto_tb(&dc->base, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb(dc->base.tb, n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+/* generic load wrapper */
+static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
+{
+ tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
+}
+
+/* unsigned load wrapper */
+static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
+{
+ tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
+}
+
+/* generic store wrapper */
+static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
+{
+ tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
+}
+
+/* [ri, rb] */
+static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
+ int size, int ri, int rb)
+{
+ tcg_gen_shli_i32(mem, cpu_regs[ri], size);
+ tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
+}
+
+/* dsp[reg] */
+static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
+ int ld, int size, int reg)
+{
+ uint32_t dsp;
+
+ tcg_debug_assert(ld < 3);
+ switch (ld) {
+ case 0:
+ return cpu_regs[reg];
+ case 1:
+ dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
+ tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
+ ctx->base.pc_next += 1;
+ return mem;
+ case 2:
+ dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
+ tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
+ ctx->base.pc_next += 2;
+ return mem;
+ }
+ return NULL;
+}
+
+static inline MemOp mi_to_mop(unsigned mi)
+{
+ static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
+ tcg_debug_assert(mi < 5);
+ return mop[mi];
+}
+
+/* load source operand */
+static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
+ int ld, int mi, int rs)
+{
+ TCGv addr;
+ MemOp mop;
+ if (ld < 3) {
+ mop = mi_to_mop(mi);
+ addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
+ tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
+ return mem;
+ } else {
+ return cpu_regs[rs];
+ }
+}
+
+/* Processor mode check */
+static int is_privileged(DisasContext *ctx, int is_exception)
+{
+ if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) {
+ if (is_exception) {
+ gen_helper_raise_privilege_violation(cpu_env);
+ }
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+/* generate QEMU condition */
+static void psw_cond(DisasCompare *dc, uint32_t cond)
+{
+ tcg_debug_assert(cond < 16);
+ switch (cond) {
+ case 0: /* z */
+ dc->cond = TCG_COND_EQ;
+ dc->value = cpu_psw_z;
+ break;
+ case 1: /* nz */
+ dc->cond = TCG_COND_NE;
+ dc->value = cpu_psw_z;
+ break;
+ case 2: /* c */
+ dc->cond = TCG_COND_NE;
+ dc->value = cpu_psw_c;
+ break;
+ case 3: /* nc */
+ dc->cond = TCG_COND_EQ;
+ dc->value = cpu_psw_c;
+ break;
+ case 4: /* gtu (C& ~Z) == 1 */
+ case 5: /* leu (C& ~Z) == 0 */
+ tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
+ tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
+ dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
+ dc->value = dc->temp;
+ break;
+ case 6: /* pz (S == 0) */
+ dc->cond = TCG_COND_GE;
+ dc->value = cpu_psw_s;
+ break;
+ case 7: /* n (S == 1) */
+ dc->cond = TCG_COND_LT;
+ dc->value = cpu_psw_s;
+ break;
+ case 8: /* ge (S^O)==0 */
+ case 9: /* lt (S^O)==1 */
+ tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
+ dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
+ dc->value = dc->temp;
+ break;
+ case 10: /* gt ((S^O)|Z)==0 */
+ case 11: /* le ((S^O)|Z)==1 */
+ tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
+ tcg_gen_sari_i32(dc->temp, dc->temp, 31);
+ tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
+ dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
+ dc->value = dc->temp;
+ break;
+ case 12: /* o */
+ dc->cond = TCG_COND_LT;
+ dc->value = cpu_psw_o;
+ break;
+ case 13: /* no */
+ dc->cond = TCG_COND_GE;
+ dc->value = cpu_psw_o;
+ break;
+ case 14: /* always true */
+ dc->cond = TCG_COND_ALWAYS;
+ dc->value = dc->temp;
+ break;
+ case 15: /* always false */
+ dc->cond = TCG_COND_NEVER;
+ dc->value = dc->temp;
+ break;
+ }
+}
+
+static void move_from_cr(TCGv ret, int cr, uint32_t pc)
+{
+ TCGv z = tcg_const_i32(0);
+ switch (cr) {
+ case 0: /* PSW */
+ gen_helper_pack_psw(ret, cpu_env);
+ break;
+ case 1: /* PC */
+ tcg_gen_movi_i32(ret, pc);
+ break;
+ case 2: /* USP */
+ tcg_gen_movcond_i32(TCG_COND_NE, ret,
+ cpu_psw_u, z, cpu_sp, cpu_usp);
+ break;
+ case 3: /* FPSW */
+ tcg_gen_mov_i32(ret, cpu_fpsw);
+ break;
+ case 8: /* BPSW */
+ tcg_gen_mov_i32(ret, cpu_bpsw);
+ break;
+ case 9: /* BPC */
+ tcg_gen_mov_i32(ret, cpu_bpc);
+ break;
+ case 10: /* ISP */
+ tcg_gen_movcond_i32(TCG_COND_EQ, ret,
+ cpu_psw_u, z, cpu_sp, cpu_isp);
+ break;
+ case 11: /* FINTV */
+ tcg_gen_mov_i32(ret, cpu_fintv);
+ break;
+ case 12: /* INTB */
+ tcg_gen_mov_i32(ret, cpu_intb);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
+ /* Unimplement registers return 0 */
+ tcg_gen_movi_i32(ret, 0);
+ break;
+ }
+ tcg_temp_free(z);
+}
+
+static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
+{
+ TCGv z;
+ if (cr >= 8 && !is_privileged(ctx, 0)) {
+ /* Some control registers can only be written in privileged mode. */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "disallow control register write %s", rx_crname(cr));
+ return;
+ }
+ z = tcg_const_i32(0);
+ switch (cr) {
+ case 0: /* PSW */
+ gen_helper_set_psw(cpu_env, val);
+ break;
+ /* case 1: to PC not supported */
+ case 2: /* USP */
+ tcg_gen_mov_i32(cpu_usp, val);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp,
+ cpu_psw_u, z, cpu_usp, cpu_sp);
+ break;
+ case 3: /* FPSW */
+ gen_helper_set_fpsw(cpu_env, val);
+ break;
+ case 8: /* BPSW */
+ tcg_gen_mov_i32(cpu_bpsw, val);
+ break;
+ case 9: /* BPC */
+ tcg_gen_mov_i32(cpu_bpc, val);
+ break;
+ case 10: /* ISP */
+ tcg_gen_mov_i32(cpu_isp, val);
+ /* if PSW.U is 0, copy isp to r0 */
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp,
+ cpu_psw_u, z, cpu_isp, cpu_sp);
+ break;
+ case 11: /* FINTV */
+ tcg_gen_mov_i32(cpu_fintv, val);
+ break;
+ case 12: /* INTB */
+ tcg_gen_mov_i32(cpu_intb, val);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Unimplement control register %d", cr);
+ break;
+ }
+ tcg_temp_free(z);
+}
+
+static void push(TCGv val)
+{
+ tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
+ rx_gen_st(MO_32, val, cpu_sp);
+}
+
+static void pop(TCGv ret)
+{
+ rx_gen_ld(MO_32, ret, cpu_sp);
+ tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
+}
+
+/* mov.<bwl> rs,dsp5[rd] */
+static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
+{
+ TCGv mem;
+ mem = tcg_temp_new();
+ tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
+ rx_gen_st(a->sz, cpu_regs[a->rs], mem);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* mov.<bwl> dsp5[rs],rd */
+static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
+{
+ TCGv mem;
+ mem = tcg_temp_new();
+ tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
+ rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* mov.l #uimm4,rd */
+/* mov.l #uimm8,rd */
+/* mov.l #imm,rd */
+static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
+{
+ tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
+ return true;
+}
+
+/* mov.<bwl> #uimm8,dsp[rd] */
+/* mov.<bwl> #imm, dsp[rd] */
+static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
+{
+ TCGv imm, mem;
+ imm = tcg_const_i32(a->imm);
+ mem = tcg_temp_new();
+ tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
+ rx_gen_st(a->sz, imm, mem);
+ tcg_temp_free(imm);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* mov.<bwl> [ri,rb],rd */
+static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
+{
+ TCGv mem;
+ mem = tcg_temp_new();
+ rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
+ rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* mov.<bwl> rd,[ri,rb] */
+static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
+{
+ TCGv mem;
+ mem = tcg_temp_new();
+ rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
+ rx_gen_st(a->sz, cpu_regs[a->rs], mem);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* mov.<bwl> dsp[rs],dsp[rd] */
+/* mov.<bwl> rs,dsp[rd] */
+/* mov.<bwl> dsp[rs],rd */
+/* mov.<bwl> rs,rd */
+static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
+{
+ static void (* const mov[])(TCGv ret, TCGv arg) = {
+ tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
+ };
+ TCGv tmp, mem, addr;
+ if (a->lds == 3 && a->ldd == 3) {
+ /* mov.<bwl> rs,rd */
+ mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
+ return true;
+ }
+
+ mem = tcg_temp_new();
+ if (a->lds == 3) {
+ /* mov.<bwl> rs,dsp[rd] */
+ addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
+ rx_gen_st(a->sz, cpu_regs[a->rd], addr);
+ } else if (a->ldd == 3) {
+ /* mov.<bwl> dsp[rs],rd */
+ addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
+ rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
+ } else {
+ /* mov.<bwl> dsp[rs],dsp[rd] */
+ tmp = tcg_temp_new();
+ addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
+ rx_gen_ld(a->sz, tmp, addr);
+ addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
+ rx_gen_st(a->sz, tmp, addr);
+ tcg_temp_free(tmp);
+ }
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* mov.<bwl> rs,[rd+] */
+/* mov.<bwl> rs,[-rd] */
+static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ tcg_gen_mov_i32(val, cpu_regs[a->rs]);
+ if (a->ad == 1) {
+ tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
+ }
+ rx_gen_st(a->sz, val, cpu_regs[a->rd]);
+ if (a->ad == 0) {
+ tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
+ }
+ tcg_temp_free(val);
+ return true;
+}
+
+/* mov.<bwl> [rd+],rs */
+/* mov.<bwl> [-rd],rs */
+static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ if (a->ad == 1) {
+ tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
+ }
+ rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
+ if (a->ad == 0) {
+ tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
+ }
+ tcg_gen_mov_i32(cpu_regs[a->rs], val);
+ tcg_temp_free(val);
+ return true;
+}
+
+/* movu.<bw> dsp5[rs],rd */
+/* movu.<bw> dsp[rs],rd */
+static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
+{
+ TCGv mem;
+ mem = tcg_temp_new();
+ tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
+ rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* movu.<bw> rs,rd */
+static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
+{
+ static void (* const ext[])(TCGv ret, TCGv arg) = {
+ tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
+ };
+ ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
+ return true;
+}
+
+/* movu.<bw> [ri,rb],rd */
+static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
+{
+ TCGv mem;
+ mem = tcg_temp_new();
+ rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
+ rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* movu.<bw> [rd+],rs */
+/* mov.<bw> [-rd],rs */
+static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ if (a->ad == 1) {
+ tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
+ }
+ rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
+ if (a->ad == 0) {
+ tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
+ }
+ tcg_gen_mov_i32(cpu_regs[a->rs], val);
+ tcg_temp_free(val);
+ return true;
+}
+
+
+/* pop rd */
+static bool trans_POP(DisasContext *ctx, arg_POP *a)
+{
+ /* mov.l [r0+], rd */
+ arg_MOV_rp mov_a;
+ mov_a.rd = 0;
+ mov_a.rs = a->rd;
+ mov_a.ad = 0;
+ mov_a.sz = MO_32;
+ trans_MOV_pr(ctx, &mov_a);
+ return true;
+}
+
+/* popc cr */
+static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ pop(val);
+ move_to_cr(ctx, val, a->cr);
+ if (a->cr == 0 && is_privileged(ctx, 0)) {
+ /* PSW.I may be updated here. exit TB. */
+ ctx->base.is_jmp = DISAS_UPDATE;
+ }
+ tcg_temp_free(val);
+ return true;
+}
+
+/* popm rd-rd2 */
+static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
+{
+ int r;
+ if (a->rd == 0 || a->rd >= a->rd2) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid register ranges r%d-r%d", a->rd, a->rd2);
+ }
+ r = a->rd;
+ while (r <= a->rd2 && r < 16) {
+ pop(cpu_regs[r++]);
+ }
+ return true;
+}
+
+
+/* push.<bwl> rs */
+static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ tcg_gen_mov_i32(val, cpu_regs[a->rs]);
+ tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
+ rx_gen_st(a->sz, val, cpu_sp);
+ tcg_temp_free(val);
+ return true;
+}
+
+/* push.<bwl> dsp[rs] */
+static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
+{
+ TCGv mem, val, addr;
+ mem = tcg_temp_new();
+ val = tcg_temp_new();
+ addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
+ rx_gen_ld(a->sz, val, addr);
+ tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
+ rx_gen_st(a->sz, val, cpu_sp);
+ tcg_temp_free(mem);
+ tcg_temp_free(val);
+ return true;
+}
+
+/* pushc rx */
+static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ move_from_cr(val, a->cr, ctx->pc);
+ push(val);
+ tcg_temp_free(val);
+ return true;
+}
+
+/* pushm rs-rs2 */
+static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
+{
+ int r;
+
+ if (a->rs == 0 || a->rs >= a->rs2) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid register ranges r%d-r%d", a->rs, a->rs2);
+ }
+ r = a->rs2;
+ while (r >= a->rs && r >= 0) {
+ push(cpu_regs[r--]);
+ }
+ return true;
+}
+
+/* xchg rs,rd */
+static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
+{
+ TCGv tmp;
+ tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
+ tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
+ tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
+ tcg_temp_free(tmp);
+ return true;
+}
+
+/* xchg dsp[rs].<mi>,rd */
+static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
+{
+ TCGv mem, addr;
+ mem = tcg_temp_new();
+ switch (a->mi) {
+ case 0: /* dsp[rs].b */
+ case 1: /* dsp[rs].w */
+ case 2: /* dsp[rs].l */
+ addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
+ break;
+ case 3: /* dsp[rs].uw */
+ case 4: /* dsp[rs].ub */
+ addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
+ 0, mi_to_mop(a->mi));
+ tcg_temp_free(mem);
+ return true;
+}
+
+static inline void stcond(TCGCond cond, int rd, int imm)
+{
+ TCGv z;
+ TCGv _imm;
+ z = tcg_const_i32(0);
+ _imm = tcg_const_i32(imm);
+ tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
+ _imm, cpu_regs[rd]);
+ tcg_temp_free(z);
+ tcg_temp_free(_imm);
+}
+
+/* stz #imm,rd */
+static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
+{
+ stcond(TCG_COND_EQ, a->rd, a->imm);
+ return true;
+}
+
+/* stnz #imm,rd */
+static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
+{
+ stcond(TCG_COND_NE, a->rd, a->imm);
+ return true;
+}
+
+/* sccnd.<bwl> rd */
+/* sccnd.<bwl> dsp:[rd] */
+static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
+{
+ DisasCompare dc;
+ TCGv val, mem, addr;
+ dc.temp = tcg_temp_new();
+ psw_cond(&dc, a->cd);
+ if (a->ld < 3) {
+ val = tcg_temp_new();
+ mem = tcg_temp_new();
+ tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
+ addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
+ rx_gen_st(a->sz, val, addr);
+ tcg_temp_free(val);
+ tcg_temp_free(mem);
+ } else {
+ tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
+ }
+ tcg_temp_free(dc.temp);
+ return true;
+}
+
+/* rtsd #imm */
+static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
+{
+ tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
+ pop(cpu_pc);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+/* rtsd #imm, rd-rd2 */
+static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
+{
+ int dst;
+ int adj;
+
+ if (a->rd2 >= a->rd) {
+ adj = a->imm - (a->rd2 - a->rd + 1);
+ } else {
+ adj = a->imm - (15 - a->rd + 1);
+ }
+
+ tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
+ dst = a->rd;
+ while (dst <= a->rd2 && dst < 16) {
+ pop(cpu_regs[dst++]);
+ }
+ pop(cpu_pc);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+typedef void (*op2fn)(TCGv ret, TCGv arg1);
+typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
+
+static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
+{
+ opr(cpu_regs[dst], cpu_regs[src]);
+}
+
+static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
+{
+ opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
+}
+
+static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
+{
+ TCGv imm = tcg_const_i32(src2);
+ opr(cpu_regs[dst], cpu_regs[src], imm);
+ tcg_temp_free(imm);
+}
+
+static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
+ int dst, int src, int ld, int mi)
+{
+ TCGv val, mem;
+ mem = tcg_temp_new();
+ val = rx_load_source(ctx, mem, ld, mi, src);
+ opr(cpu_regs[dst], cpu_regs[dst], val);
+ tcg_temp_free(mem);
+}
+
+static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+ tcg_gen_mov_i32(ret, cpu_psw_s);
+}
+
+/* and #uimm:4, rd */
+/* and #imm, rd */
+static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
+{
+ rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* and dsp[rs], rd */
+/* and rs,rd */
+static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
+{
+ rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* and rs,rs2,rd */
+static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
+{
+ rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
+ return true;
+}
+
+static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+ tcg_gen_mov_i32(ret, cpu_psw_s);
+}
+
+/* or #uimm:4, rd */
+/* or #imm, rd */
+static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
+{
+ rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* or dsp[rs], rd */
+/* or rs,rd */
+static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
+{
+ rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* or rs,rs2,rd */
+static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
+{
+ rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
+ return true;
+}
+
+static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+ tcg_gen_mov_i32(ret, cpu_psw_s);
+}
+
+/* xor #imm, rd */
+static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
+{
+ rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* xor dsp[rs], rd */
+/* xor rs,rd */
+static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
+{
+ rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+}
+
+/* tst #imm, rd */
+static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
+{
+ rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* tst dsp[rs], rd */
+/* tst rs, rd */
+static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
+{
+ rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+static void rx_not(TCGv ret, TCGv arg1)
+{
+ tcg_gen_not_i32(ret, arg1);
+ tcg_gen_mov_i32(cpu_psw_z, ret);
+ tcg_gen_mov_i32(cpu_psw_s, ret);
+}
+
+/* not rd */
+/* not rs, rd */
+static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
+{
+ rx_gen_op_rr(rx_not, a->rd, a->rs);
+ return true;
+}
+
+static void rx_neg(TCGv ret, TCGv arg1)
+{
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
+ tcg_gen_neg_i32(ret, arg1);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
+ tcg_gen_mov_i32(cpu_psw_z, ret);
+ tcg_gen_mov_i32(cpu_psw_s, ret);
+}
+
+
+/* neg rd */
+/* neg rs, rd */
+static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
+{
+ rx_gen_op_rr(rx_neg, a->rd, a->rs);
+ return true;
+}
+
+/* ret = arg1 + arg2 + psw_c */
+static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv z;
+ z = tcg_const_i32(0);
+ tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
+ tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+ tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
+ tcg_gen_xor_i32(z, arg1, arg2);
+ tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
+ tcg_gen_mov_i32(ret, cpu_psw_s);
+ tcg_temp_free(z);
+}
+
+/* adc #imm, rd */
+static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
+{
+ rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* adc rs, rd */
+static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
+{
+ rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
+ return true;
+}
+
+/* adc dsp[rs], rd */
+static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
+{
+ /* mi only 2 */
+ if (a->mi != 2) {
+ return false;
+ }
+ rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* ret = arg1 + arg2 */
+static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv z;
+ z = tcg_const_i32(0);
+ tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+ tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
+ tcg_gen_xor_i32(z, arg1, arg2);
+ tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
+ tcg_gen_mov_i32(ret, cpu_psw_s);
+ tcg_temp_free(z);
+}
+
+/* add #uimm4, rd */
+/* add #imm, rs, rd */
+static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
+{
+ rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
+ return true;
+}
+
+/* add rs, rd */
+/* add dsp[rs], rd */
+static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
+{
+ rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* add rs, rs2, rd */
+static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
+{
+ rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
+ return true;
+}
+
+/* ret = arg1 - arg2 */
+static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv temp;
+ tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
+ tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
+ temp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(temp, arg1, arg2);
+ tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
+ tcg_temp_free_i32(temp);
+ /* CMP not required return */
+ if (ret) {
+ tcg_gen_mov_i32(ret, cpu_psw_s);
+ }
+}
+static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
+{
+ rx_sub(NULL, arg1, arg2);
+}
+/* ret = arg1 - arg2 - !psw_c */
+/* -> ret = arg1 + ~arg2 + psw_c */
+static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv temp;
+ temp = tcg_temp_new();
+ tcg_gen_not_i32(temp, arg2);
+ rx_adc(ret, arg1, temp);
+ tcg_temp_free(temp);
+}
+
+/* cmp #imm4, rs2 */
+/* cmp #imm8, rs2 */
+/* cmp #imm, rs2 */
+static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
+{
+ rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
+ return true;
+}
+
+/* cmp rs, rs2 */
+/* cmp dsp[rs], rs2 */
+static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
+{
+ rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* sub #imm4, rd */
+static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
+{
+ rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* sub rs, rd */
+/* sub dsp[rs], rd */
+static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
+{
+ rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* sub rs2, rs, rd */
+static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
+{
+ rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
+ return true;
+}
+
+/* sbb rs, rd */
+static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
+{
+ rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
+ return true;
+}
+
+/* sbb dsp[rs], rd */
+static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
+{
+ /* mi only 2 */
+ if (a->mi != 2) {
+ return false;
+ }
+ rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+static void rx_abs(TCGv ret, TCGv arg1)
+{
+ TCGv neg;
+ TCGv zero;
+ neg = tcg_temp_new();
+ zero = tcg_const_i32(0);
+ tcg_gen_neg_i32(neg, arg1);
+ tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
+ tcg_temp_free(neg);
+ tcg_temp_free(zero);
+}
+
+/* abs rd */
+/* abs rs, rd */
+static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
+{
+ rx_gen_op_rr(rx_abs, a->rd, a->rs);
+ return true;
+}
+
+/* max #imm, rd */
+static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
+{
+ rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* max rs, rd */
+/* max dsp[rs], rd */
+static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
+{
+ rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* min #imm, rd */
+static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
+{
+ rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* min rs, rd */
+/* min dsp[rs], rd */
+static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
+{
+ rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* mul #uimm4, rd */
+/* mul #imm, rd */
+static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
+{
+ rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* mul rs, rd */
+/* mul dsp[rs], rd */
+static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
+{
+ rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* mul rs, rs2, rd */
+static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
+{
+ rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
+ return true;
+}
+
+/* emul #imm, rd */
+static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
+{
+ TCGv imm = tcg_const_i32(a->imm);
+ if (a->rd > 14) {
+ qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
+ }
+ tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
+ cpu_regs[a->rd], imm);
+ tcg_temp_free(imm);
+ return true;
+}
+
+/* emul rs, rd */
+/* emul dsp[rs], rd */
+static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
+{
+ TCGv val, mem;
+ if (a->rd > 14) {
+ qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
+ }
+ mem = tcg_temp_new();
+ val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
+ tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
+ cpu_regs[a->rd], val);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* emulu #imm, rd */
+static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
+{
+ TCGv imm = tcg_const_i32(a->imm);
+ if (a->rd > 14) {
+ qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
+ }
+ tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
+ cpu_regs[a->rd], imm);
+ tcg_temp_free(imm);
+ return true;
+}
+
+/* emulu rs, rd */
+/* emulu dsp[rs], rd */
+static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
+{
+ TCGv val, mem;
+ if (a->rd > 14) {
+ qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
+ }
+ mem = tcg_temp_new();
+ val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
+ tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
+ cpu_regs[a->rd], val);
+ tcg_temp_free(mem);
+ return true;
+}
+
+static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ gen_helper_div(ret, cpu_env, arg1, arg2);
+}
+
+static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
+{
+ gen_helper_divu(ret, cpu_env, arg1, arg2);
+}
+
+/* div #imm, rd */
+static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
+{
+ rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* div rs, rd */
+/* div dsp[rs], rd */
+static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
+{
+ rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+/* divu #imm, rd */
+static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
+{
+ rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
+ return true;
+}
+
+/* divu rs, rd */
+/* divu dsp[rs], rd */
+static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
+{
+ rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
+ return true;
+}
+
+
+/* shll #imm:5, rd */
+/* shll #imm:5, rs2, rd */
+static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
+{
+ TCGv tmp;
+ tmp = tcg_temp_new();
+ if (a->imm) {
+ tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
+ tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
+ tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
+ } else {
+ tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
+ tcg_gen_movi_i32(cpu_psw_c, 0);
+ tcg_gen_movi_i32(cpu_psw_o, 0);
+ }
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
+ return true;
+}
+
+/* shll rs, rd */
+static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
+{
+ TCGLabel *noshift, *done;
+ TCGv count, tmp;
+
+ noshift = gen_new_label();
+ done = gen_new_label();
+ /* if (cpu_regs[a->rs]) { */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
+ count = tcg_const_i32(32);
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
+ tcg_gen_sub_i32(count, count, tmp);
+ tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
+ tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
+ tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
+ tcg_gen_br(done);
+ /* } else { */
+ gen_set_label(noshift);
+ tcg_gen_movi_i32(cpu_psw_c, 0);
+ tcg_gen_movi_i32(cpu_psw_o, 0);
+ /* } */
+ gen_set_label(done);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
+ tcg_temp_free(count);
+ tcg_temp_free(tmp);
+ return true;
+}
+
+static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
+ unsigned int alith)
+{
+ static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
+ tcg_gen_shri_i32, tcg_gen_sari_i32,
+ };
+ tcg_debug_assert(alith < 2);
+ if (imm) {
+ gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
+ tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
+ gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
+ } else {
+ tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
+ tcg_gen_movi_i32(cpu_psw_c, 0);
+ }
+ tcg_gen_movi_i32(cpu_psw_o, 0);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
+}
+
+static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
+{
+ TCGLabel *noshift, *done;
+ TCGv count;
+ static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
+ tcg_gen_shri_i32, tcg_gen_sari_i32,
+ };
+ static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
+ tcg_gen_shr_i32, tcg_gen_sar_i32,
+ };
+ tcg_debug_assert(alith < 2);
+ noshift = gen_new_label();
+ done = gen_new_label();
+ count = tcg_temp_new();
+ /* if (cpu_regs[rs]) { */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
+ tcg_gen_andi_i32(count, cpu_regs[rs], 31);
+ tcg_gen_subi_i32(count, count, 1);
+ gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
+ tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
+ gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
+ tcg_gen_br(done);
+ /* } else { */
+ gen_set_label(noshift);
+ tcg_gen_movi_i32(cpu_psw_c, 0);
+ /* } */
+ gen_set_label(done);
+ tcg_gen_movi_i32(cpu_psw_o, 0);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
+ tcg_temp_free(count);
+}
+
+/* shar #imm:5, rd */
+/* shar #imm:5, rs2, rd */
+static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
+{
+ shiftr_imm(a->rd, a->rs2, a->imm, 1);
+ return true;
+}
+
+/* shar rs, rd */
+static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
+{
+ shiftr_reg(a->rd, a->rs, 1);
+ return true;
+}
+
+/* shlr #imm:5, rd */
+/* shlr #imm:5, rs2, rd */
+static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
+{
+ shiftr_imm(a->rd, a->rs2, a->imm, 0);
+ return true;
+}
+
+/* shlr rs, rd */
+static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
+{
+ shiftr_reg(a->rd, a->rs, 0);
+ return true;
+}
+
+/* rolc rd */
+static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
+{
+ TCGv tmp;
+ tmp = tcg_temp_new();
+ tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
+ tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
+ tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
+ tcg_gen_mov_i32(cpu_psw_c, tmp);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
+ tcg_temp_free(tmp);
+ return true;
+}
+
+/* rorc rd */
+static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
+{
+ TCGv tmp;
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
+ tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
+ tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
+ tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
+ tcg_gen_mov_i32(cpu_psw_c, tmp);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
+ return true;
+}
+
+enum {ROTR = 0, ROTL = 1};
+enum {ROT_IMM = 0, ROT_REG = 1};
+static inline void rx_rot(int ir, int dir, int rd, int src)
+{
+ switch (dir) {
+ case ROTL:
+ if (ir == ROT_IMM) {
+ tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
+ } else {
+ tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
+ }
+ tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
+ break;
+ case ROTR:
+ if (ir == ROT_IMM) {
+ tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
+ } else {
+ tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
+ }
+ tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
+ break;
+ }
+ tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
+ tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
+}
+
+/* rotl #imm, rd */
+static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
+{
+ rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
+ return true;
+}
+
+/* rotl rs, rd */
+static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
+{
+ rx_rot(ROT_REG, ROTL, a->rd, a->rs);
+ return true;
+}
+
+/* rotr #imm, rd */
+static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
+{
+ rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
+ return true;
+}
+
+/* rotr rs, rd */
+static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
+{
+ rx_rot(ROT_REG, ROTR, a->rd, a->rs);
+ return true;
+}
+
+/* revl rs, rd */
+static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
+{
+ tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
+ return true;
+}
+
+/* revw rs, rd */
+static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
+{
+ TCGv tmp;
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
+ tcg_gen_shli_i32(tmp, tmp, 8);
+ tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
+ tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
+ tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
+ tcg_temp_free(tmp);
+ return true;
+}
+
+/* conditional branch helper */
+static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
+{
+ DisasCompare dc;
+ TCGLabel *t, *done;
+
+ switch (cd) {
+ case 0 ... 13:
+ dc.temp = tcg_temp_new();
+ psw_cond(&dc, cd);
+ t = gen_new_label();
+ done = gen_new_label();
+ tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
+ tcg_gen_br(done);
+ gen_set_label(t);
+ gen_goto_tb(ctx, 1, ctx->pc + dst);
+ gen_set_label(done);
+ tcg_temp_free(dc.temp);
+ break;
+ case 14:
+ /* always true case */
+ gen_goto_tb(ctx, 0, ctx->pc + dst);
+ break;
+ case 15:
+ /* always false case */
+ /* Nothing do */
+ break;
+ }
+}
+
+/* beq dsp:3 / bne dsp:3 */
+/* beq dsp:8 / bne dsp:8 */
+/* bc dsp:8 / bnc dsp:8 */
+/* bgtu dsp:8 / bleu dsp:8 */
+/* bpz dsp:8 / bn dsp:8 */
+/* bge dsp:8 / blt dsp:8 */
+/* bgt dsp:8 / ble dsp:8 */
+/* bo dsp:8 / bno dsp:8 */
+/* beq dsp:16 / bne dsp:16 */
+static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
+{
+ rx_bcnd_main(ctx, a->cd, a->dsp);
+ return true;
+}
+
+/* bra dsp:3 */
+/* bra dsp:8 */
+/* bra dsp:16 */
+/* bra dsp:24 */
+static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
+{
+ rx_bcnd_main(ctx, 14, a->dsp);
+ return true;
+}
+
+/* bra rs */
+static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
+{
+ tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+static inline void rx_save_pc(DisasContext *ctx)
+{
+ TCGv pc = tcg_const_i32(ctx->base.pc_next);
+ push(pc);
+ tcg_temp_free(pc);
+}
+
+/* jmp rs */
+static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
+{
+ tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+/* jsr rs */
+static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
+{
+ rx_save_pc(ctx);
+ tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+/* bsr dsp:16 */
+/* bsr dsp:24 */
+static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
+{
+ rx_save_pc(ctx);
+ rx_bcnd_main(ctx, 14, a->dsp);
+ return true;
+}
+
+/* bsr rs */
+static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
+{
+ rx_save_pc(ctx);
+ tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+/* rts */
+static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
+{
+ pop(cpu_pc);
+ ctx->base.is_jmp = DISAS_JUMP;
+ return true;
+}
+
+/* nop */
+static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
+{
+ return true;
+}
+
+/* scmpu */
+static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
+{
+ gen_helper_scmpu(cpu_env);
+ return true;
+}
+
+/* smovu */
+static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
+{
+ gen_helper_smovu(cpu_env);
+ return true;
+}
+
+/* smovf */
+static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
+{
+ gen_helper_smovf(cpu_env);
+ return true;
+}
+
+/* smovb */
+static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
+{
+ gen_helper_smovb(cpu_env);
+ return true;
+}
+
+#define STRING(op) \
+ do { \
+ TCGv size = tcg_const_i32(a->sz); \
+ gen_helper_##op(cpu_env, size); \
+ tcg_temp_free(size); \
+ } while (0)
+
+/* suntile.<bwl> */
+static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
+{
+ STRING(suntil);
+ return true;
+}
+
+/* swhile.<bwl> */
+static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
+{
+ STRING(swhile);
+ return true;
+}
+/* sstr.<bwl> */
+static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
+{
+ STRING(sstr);
+ return true;
+}
+
+/* rmpa.<bwl> */
+static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
+{
+ STRING(rmpa);
+ return true;
+}
+
+static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
+{
+ TCGv_i64 tmp0, tmp1;
+ tmp0 = tcg_temp_new_i64();
+ tmp1 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
+ tcg_gen_sari_i64(tmp0, tmp0, 16);
+ tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
+ tcg_gen_sari_i64(tmp1, tmp1, 16);
+ tcg_gen_mul_i64(ret, tmp0, tmp1);
+ tcg_gen_shli_i64(ret, ret, 16);
+ tcg_temp_free_i64(tmp0);
+ tcg_temp_free_i64(tmp1);
+}
+
+static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
+{
+ TCGv_i64 tmp0, tmp1;
+ tmp0 = tcg_temp_new_i64();
+ tmp1 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
+ tcg_gen_ext16s_i64(tmp0, tmp0);
+ tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
+ tcg_gen_ext16s_i64(tmp1, tmp1);
+ tcg_gen_mul_i64(ret, tmp0, tmp1);
+ tcg_gen_shli_i64(ret, ret, 16);
+ tcg_temp_free_i64(tmp0);
+ tcg_temp_free_i64(tmp1);
+}
+
+/* mulhi rs,rs2 */
+static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
+{
+ rx_mul64hi(cpu_acc, a->rs, a->rs2);
+ return true;
+}
+
+/* mullo rs,rs2 */
+static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
+{
+ rx_mul64lo(cpu_acc, a->rs, a->rs2);
+ return true;
+}
+
+/* machi rs,rs2 */
+static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
+{
+ TCGv_i64 tmp;
+ tmp = tcg_temp_new_i64();
+ rx_mul64hi(tmp, a->rs, a->rs2);
+ tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+/* maclo rs,rs2 */
+static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
+{
+ TCGv_i64 tmp;
+ tmp = tcg_temp_new_i64();
+ rx_mul64lo(tmp, a->rs, a->rs2);
+ tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
+ tcg_temp_free_i64(tmp);
+ return true;
+}
+
+/* mvfachi rd */
+static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
+{
+ tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
+ return true;
+}
+
+/* mvfacmi rd */
+static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
+{
+ TCGv_i64 rd64;
+ rd64 = tcg_temp_new_i64();
+ tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
+ tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
+ tcg_temp_free_i64(rd64);
+ return true;
+}
+
+/* mvtachi rs */
+static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
+{
+ TCGv_i64 rs64;
+ rs64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
+ tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
+ tcg_temp_free_i64(rs64);
+ return true;
+}
+
+/* mvtaclo rs */
+static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
+{
+ TCGv_i64 rs64;
+ rs64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
+ tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
+ tcg_temp_free_i64(rs64);
+ return true;
+}
+
+/* racw #imm */
+static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
+{
+ TCGv imm = tcg_const_i32(a->imm + 1);
+ gen_helper_racw(cpu_env, imm);
+ tcg_temp_free(imm);
+ return true;
+}
+
+/* sat rd */
+static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
+{
+ TCGv tmp, z;
+ tmp = tcg_temp_new();
+ z = tcg_const_i32(0);
+ /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
+ tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
+ /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
+ tcg_gen_xori_i32(tmp, tmp, 0x80000000);
+ tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
+ cpu_psw_o, z, tmp, cpu_regs[a->rd]);
+ tcg_temp_free(tmp);
+ tcg_temp_free(z);
+ return true;
+}
+
+/* satr */
+static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
+{
+ gen_helper_satr(cpu_env);
+ return true;
+}
+
+#define cat3(a, b, c) a##b##c
+#define FOP(name, op) \
+ static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
+ cat3(arg_, name, _ir) * a) \
+ { \
+ TCGv imm = tcg_const_i32(li(ctx, 0)); \
+ gen_helper_##op(cpu_regs[a->rd], cpu_env, \
+ cpu_regs[a->rd], imm); \
+ tcg_temp_free(imm); \
+ return true; \
+ } \
+ static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
+ cat3(arg_, name, _mr) * a) \
+ { \
+ TCGv val, mem; \
+ mem = tcg_temp_new(); \
+ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
+ gen_helper_##op(cpu_regs[a->rd], cpu_env, \
+ cpu_regs[a->rd], val); \
+ tcg_temp_free(mem); \
+ return true; \
+ }
+
+#define FCONVOP(name, op) \
+ static bool trans_##name(DisasContext *ctx, arg_##name * a) \
+ { \
+ TCGv val, mem; \
+ mem = tcg_temp_new(); \
+ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
+ gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
+ tcg_temp_free(mem); \
+ return true; \
+ }
+
+FOP(FADD, fadd)
+FOP(FSUB, fsub)
+FOP(FMUL, fmul)
+FOP(FDIV, fdiv)
+
+/* fcmp #imm, rd */
+static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
+{
+ TCGv imm = tcg_const_i32(li(ctx, 0));
+ gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
+ tcg_temp_free(imm);
+ return true;
+}
+
+/* fcmp dsp[rs], rd */
+/* fcmp rs, rd */
+static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
+{
+ TCGv val, mem;
+ mem = tcg_temp_new();
+ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
+ gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
+ tcg_temp_free(mem);
+ return true;
+}
+
+FCONVOP(FTOI, ftoi)
+FCONVOP(ROUND, round)
+
+/* itof rs, rd */
+/* itof dsp[rs], rd */
+static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
+{
+ TCGv val, mem;
+ mem = tcg_temp_new();
+ val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
+ gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
+ tcg_temp_free(mem);
+ return true;
+}
+
+static void rx_bsetm(TCGv mem, TCGv mask)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ rx_gen_ld(MO_8, val, mem);
+ tcg_gen_or_i32(val, val, mask);
+ rx_gen_st(MO_8, val, mem);
+ tcg_temp_free(val);
+}
+
+static void rx_bclrm(TCGv mem, TCGv mask)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ rx_gen_ld(MO_8, val, mem);
+ tcg_gen_andc_i32(val, val, mask);
+ rx_gen_st(MO_8, val, mem);
+ tcg_temp_free(val);
+}
+
+static void rx_btstm(TCGv mem, TCGv mask)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ rx_gen_ld(MO_8, val, mem);
+ tcg_gen_and_i32(val, val, mask);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
+ tcg_temp_free(val);
+}
+
+static void rx_bnotm(TCGv mem, TCGv mask)
+{
+ TCGv val;
+ val = tcg_temp_new();
+ rx_gen_ld(MO_8, val, mem);
+ tcg_gen_xor_i32(val, val, mask);
+ rx_gen_st(MO_8, val, mem);
+ tcg_temp_free(val);
+}
+
+static void rx_bsetr(TCGv reg, TCGv mask)
+{
+ tcg_gen_or_i32(reg, reg, mask);
+}
+
+static void rx_bclrr(TCGv reg, TCGv mask)
+{
+ tcg_gen_andc_i32(reg, reg, mask);
+}
+
+static inline void rx_btstr(TCGv reg, TCGv mask)
+{
+ TCGv t0;
+ t0 = tcg_temp_new();
+ tcg_gen_and_i32(t0, reg, mask);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
+ tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
+ tcg_temp_free(t0);
+}
+
+static inline void rx_bnotr(TCGv reg, TCGv mask)
+{
+ tcg_gen_xor_i32(reg, reg, mask);
+}
+
+#define BITOP(name, op) \
+ static bool cat3(trans_, name, _im)(DisasContext *ctx, \
+ cat3(arg_, name, _im) * a) \
+ { \
+ TCGv mask, mem, addr; \
+ mem = tcg_temp_new(); \
+ mask = tcg_const_i32(1 << a->imm); \
+ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
+ cat3(rx_, op, m)(addr, mask); \
+ tcg_temp_free(mask); \
+ tcg_temp_free(mem); \
+ return true; \
+ } \
+ static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
+ cat3(arg_, name, _ir) * a) \
+ { \
+ TCGv mask; \
+ mask = tcg_const_i32(1 << a->imm); \
+ cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
+ tcg_temp_free(mask); \
+ return true; \
+ } \
+ static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
+ cat3(arg_, name, _rr) * a) \
+ { \
+ TCGv mask, b; \
+ mask = tcg_const_i32(1); \
+ b = tcg_temp_new(); \
+ tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
+ tcg_gen_shl_i32(mask, mask, b); \
+ cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
+ tcg_temp_free(mask); \
+ tcg_temp_free(b); \
+ return true; \
+ } \
+ static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
+ cat3(arg_, name, _rm) * a) \
+ { \
+ TCGv mask, mem, addr, b; \
+ mask = tcg_const_i32(1); \
+ b = tcg_temp_new(); \
+ tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
+ tcg_gen_shl_i32(mask, mask, b); \
+ mem = tcg_temp_new(); \
+ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
+ cat3(rx_, op, m)(addr, mask); \
+ tcg_temp_free(mem); \
+ tcg_temp_free(mask); \
+ tcg_temp_free(b); \
+ return true; \
+ }
+
+BITOP(BSET, bset)
+BITOP(BCLR, bclr)
+BITOP(BTST, btst)
+BITOP(BNOT, bnot)
+
+static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
+{
+ TCGv bit;
+ DisasCompare dc;
+ dc.temp = tcg_temp_new();
+ bit = tcg_temp_new();
+ psw_cond(&dc, cond);
+ tcg_gen_andi_i32(val, val, ~(1 << pos));
+ tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
+ tcg_gen_deposit_i32(val, val, bit, pos, 1);
+ tcg_temp_free(bit);
+ tcg_temp_free(dc.temp);
+ }
+
+/* bmcnd #imm, dsp[rd] */
+static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
+{
+ TCGv val, mem, addr;
+ val = tcg_temp_new();
+ mem = tcg_temp_new();
+ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
+ rx_gen_ld(MO_8, val, addr);
+ bmcnd_op(val, a->cd, a->imm);
+ rx_gen_st(MO_8, val, addr);
+ tcg_temp_free(val);
+ tcg_temp_free(mem);
+ return true;
+}
+
+/* bmcond #imm, rd */
+static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
+{
+ bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
+ return true;
+}
+
+enum {
+ PSW_C = 0,
+ PSW_Z = 1,
+ PSW_S = 2,
+ PSW_O = 3,
+ PSW_I = 8,
+ PSW_U = 9,
+};
+
+static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
+{
+ if (cb < 8) {
+ switch (cb) {
+ case PSW_C:
+ tcg_gen_movi_i32(cpu_psw_c, val);
+ break;
+ case PSW_Z:
+ tcg_gen_movi_i32(cpu_psw_z, val == 0);
+ break;
+ case PSW_S:
+ tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
+ break;
+ case PSW_O:
+ tcg_gen_movi_i32(cpu_psw_o, val << 31);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
+ break;
+ }
+ } else if (is_privileged(ctx, 0)) {
+ switch (cb) {
+ case PSW_I:
+ tcg_gen_movi_i32(cpu_psw_i, val);
+ ctx->base.is_jmp = DISAS_UPDATE;
+ break;
+ case PSW_U:
+ tcg_gen_movi_i32(cpu_psw_u, val);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
+ break;
+ }
+ }
+}
+
+/* clrpsw psw */
+static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
+{
+ clrsetpsw(ctx, a->cb, 0);
+ return true;
+}
+
+/* setpsw psw */
+static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
+{
+ clrsetpsw(ctx, a->cb, 1);
+ return true;
+}
+
+/* mvtipl #imm */
+static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
+{
+ if (is_privileged(ctx, 1)) {
+ tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
+ ctx->base.is_jmp = DISAS_UPDATE;
+ }
+ return true;
+}
+
+/* mvtc #imm, rd */
+static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
+{
+ TCGv imm;
+
+ imm = tcg_const_i32(a->imm);
+ move_to_cr(ctx, imm, a->cr);
+ if (a->cr == 0 && is_privileged(ctx, 0)) {
+ ctx->base.is_jmp = DISAS_UPDATE;
+ }
+ tcg_temp_free(imm);
+ return true;
+}
+
+/* mvtc rs, rd */
+static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
+{
+ move_to_cr(ctx, cpu_regs[a->rs], a->cr);
+ if (a->cr == 0 && is_privileged(ctx, 0)) {
+ ctx->base.is_jmp = DISAS_UPDATE;
+ }
+ return true;
+}
+
+/* mvfc rs, rd */
+static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
+{
+ move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc);
+ return true;
+}
+
+/* rtfi */
+static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
+{
+ TCGv psw;
+ if (is_privileged(ctx, 1)) {
+ psw = tcg_temp_new();
+ tcg_gen_mov_i32(cpu_pc, cpu_bpc);
+ tcg_gen_mov_i32(psw, cpu_bpsw);
+ gen_helper_set_psw_rte(cpu_env, psw);
+ ctx->base.is_jmp = DISAS_EXIT;
+ tcg_temp_free(psw);
+ }
+ return true;
+}
+
+/* rte */
+static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
+{
+ TCGv psw;
+ if (is_privileged(ctx, 1)) {
+ psw = tcg_temp_new();
+ pop(cpu_pc);
+ pop(psw);
+ gen_helper_set_psw_rte(cpu_env, psw);
+ ctx->base.is_jmp = DISAS_EXIT;
+ tcg_temp_free(psw);
+ }
+ return true;
+}
+
+/* brk */
+static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
+{
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
+ gen_helper_rxbrk(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+/* int #imm */
+static bool trans_INT(DisasContext *ctx, arg_INT *a)
+{
+ TCGv vec;
+
+ tcg_debug_assert(a->imm < 0x100);
+ vec = tcg_const_i32(a->imm);
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
+ gen_helper_rxint(cpu_env, vec);
+ tcg_temp_free(vec);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+/* wait */
+static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
+{
+ if (is_privileged(ctx, 1)) {
+ tcg_gen_addi_i32(cpu_pc, cpu_pc, 2);
+ gen_helper_wait(cpu_env);
+ }
+ return true;
+}
+
+static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ CPURXState *env = cs->env_ptr;
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ ctx->env = env;
+}
+
+static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
+{
+}
+
+static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next);
+}
+
+static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ uint32_t insn;
+
+ ctx->pc = ctx->base.pc_next;
+ insn = decode_load(ctx);
+ if (!decode(ctx, insn)) {
+ gen_helper_raise_illegal_instruction(cpu_env);
+ }
+}
+
+static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ gen_goto_tb(ctx, 0, dcbase->pc_next);
+ break;
+ case DISAS_JUMP:
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_UPDATE:
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
+ /* fall through */
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps rx_tr_ops = {
+ .init_disas_context = rx_tr_init_disas_context,
+ .tb_start = rx_tr_tb_start,
+ .insn_start = rx_tr_insn_start,
+ .translate_insn = rx_tr_translate_insn,
+ .tb_stop = rx_tr_tb_stop,
+ .disas_log = rx_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+
+ translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
+}
+
+void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
+
+#define ALLOC_REGISTER(sym, name) \
+ cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
+ offsetof(CPURXState, sym), name)
+
+void rx_translate_init(void)
+{
+ static const char * const regnames[NUM_REGS] = {
+ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
+ "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
+ };
+ int i;
+
+ for (i = 0; i < NUM_REGS; i++) {
+ cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPURXState, regs[i]),
+ regnames[i]);
+ }
+ ALLOC_REGISTER(pc, "PC");
+ ALLOC_REGISTER(psw_o, "PSW(O)");
+ ALLOC_REGISTER(psw_s, "PSW(S)");
+ ALLOC_REGISTER(psw_z, "PSW(Z)");
+ ALLOC_REGISTER(psw_c, "PSW(C)");
+ ALLOC_REGISTER(psw_u, "PSW(U)");
+ ALLOC_REGISTER(psw_i, "PSW(I)");
+ ALLOC_REGISTER(psw_pm, "PSW(PM)");
+ ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
+ ALLOC_REGISTER(usp, "USP");
+ ALLOC_REGISTER(fpsw, "FPSW");
+ ALLOC_REGISTER(bpsw, "BPSW");
+ ALLOC_REGISTER(bpc, "BPC");
+ ALLOC_REGISTER(isp, "ISP");
+ ALLOC_REGISTER(fintv, "FINTV");
+ ALLOC_REGISTER(intb, "INTB");
+ cpu_acc = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPURXState, acc), "ACC");
+}
diff --git a/target/s390x/Kconfig b/target/s390x/Kconfig
new file mode 100644
index 000000000..72da48136
--- /dev/null
+++ b/target/s390x/Kconfig
@@ -0,0 +1,2 @@
+config S390X
+ bool
diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c
new file mode 100644
index 000000000..08daf93ae
--- /dev/null
+++ b/target/s390x/arch_dump.c
@@ -0,0 +1,282 @@
+/*
+ * writing ELF notes for s390x arch
+ *
+ *
+ * Copyright IBM Corp. 2012, 2013
+ *
+ * Ekaterina Tumanova <tumanova@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+
+
+struct S390xUserRegsStruct {
+ uint64_t psw[2];
+ uint64_t gprs[16];
+ uint32_t acrs[16];
+} QEMU_PACKED;
+
+typedef struct S390xUserRegsStruct S390xUserRegs;
+
+struct S390xElfPrstatusStruct {
+ uint8_t pad1[32];
+ uint32_t pid;
+ uint8_t pad2[76];
+ S390xUserRegs regs;
+ uint8_t pad3[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfPrstatusStruct S390xElfPrstatus;
+
+struct S390xElfFpregsetStruct {
+ uint32_t fpc;
+ uint32_t pad;
+ uint64_t fprs[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfFpregsetStruct S390xElfFpregset;
+
+struct S390xElfVregsLoStruct {
+ uint64_t vregs[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfVregsLoStruct S390xElfVregsLo;
+
+struct S390xElfVregsHiStruct {
+ uint64_t vregs[16][2];
+} QEMU_PACKED;
+
+typedef struct S390xElfVregsHiStruct S390xElfVregsHi;
+
+struct S390xElfGSCBStruct {
+ uint64_t gsregs[4];
+} QEMU_PACKED;
+
+typedef struct S390xElfGSCBStruct S390xElfGSCB;
+
+typedef struct noteStruct {
+ Elf64_Nhdr hdr;
+ char name[8];
+ union {
+ S390xElfPrstatus prstatus;
+ S390xElfFpregset fpregset;
+ S390xElfVregsLo vregslo;
+ S390xElfVregsHi vregshi;
+ S390xElfGSCB gscb;
+ uint32_t prefix;
+ uint64_t timer;
+ uint64_t todcmp;
+ uint32_t todpreg;
+ uint64_t ctrs[16];
+ } contents;
+} QEMU_PACKED Note;
+
+static void s390x_write_elf64_prstatus(Note *note, S390CPU *cpu, int id)
+{
+ int i;
+ S390xUserRegs *regs;
+
+ note->hdr.n_type = cpu_to_be32(NT_PRSTATUS);
+
+ regs = &(note->contents.prstatus.regs);
+ regs->psw[0] = cpu_to_be64(cpu->env.psw.mask);
+ regs->psw[1] = cpu_to_be64(cpu->env.psw.addr);
+ for (i = 0; i <= 15; i++) {
+ regs->acrs[i] = cpu_to_be32(cpu->env.aregs[i]);
+ regs->gprs[i] = cpu_to_be64(cpu->env.regs[i]);
+ }
+ note->contents.prstatus.pid = id;
+}
+
+static void s390x_write_elf64_fpregset(Note *note, S390CPU *cpu, int id)
+{
+ int i;
+ CPUS390XState *cs = &cpu->env;
+
+ note->hdr.n_type = cpu_to_be32(NT_FPREGSET);
+ note->contents.fpregset.fpc = cpu_to_be32(cpu->env.fpc);
+ for (i = 0; i <= 15; i++) {
+ note->contents.fpregset.fprs[i] = cpu_to_be64(*get_freg(cs, i));
+ }
+}
+
+static void s390x_write_elf64_vregslo(Note *note, S390CPU *cpu, int id)
+{
+ int i;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_VXRS_LOW);
+ for (i = 0; i <= 15; i++) {
+ note->contents.vregslo.vregs[i] = cpu_to_be64(cpu->env.vregs[i][1]);
+ }
+}
+
+static void s390x_write_elf64_vregshi(Note *note, S390CPU *cpu, int id)
+{
+ int i;
+ S390xElfVregsHi *temp_vregshi;
+
+ temp_vregshi = &note->contents.vregshi;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_VXRS_HIGH);
+ for (i = 0; i <= 15; i++) {
+ temp_vregshi->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i + 16][0]);
+ temp_vregshi->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i + 16][1]);
+ }
+}
+
+static void s390x_write_elf64_gscb(Note *note, S390CPU *cpu, int id)
+{
+ int i;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_GS_CB);
+ for (i = 0; i < 4; i++) {
+ note->contents.gscb.gsregs[i] = cpu_to_be64(cpu->env.gscb[i]);
+ }
+}
+
+static void s390x_write_elf64_timer(Note *note, S390CPU *cpu, int id)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_TIMER);
+ note->contents.timer = cpu_to_be64((uint64_t)(cpu->env.cputm));
+}
+
+static void s390x_write_elf64_todcmp(Note *note, S390CPU *cpu, int id)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_TODCMP);
+ note->contents.todcmp = cpu_to_be64((uint64_t)(cpu->env.ckc));
+}
+
+static void s390x_write_elf64_todpreg(Note *note, S390CPU *cpu, int id)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_TODPREG);
+ note->contents.todpreg = cpu_to_be32((uint32_t)(cpu->env.todpr));
+}
+
+static void s390x_write_elf64_ctrs(Note *note, S390CPU *cpu, int id)
+{
+ int i;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_CTRS);
+
+ for (i = 0; i <= 15; i++) {
+ note->contents.ctrs[i] = cpu_to_be64(cpu->env.cregs[i]);
+ }
+}
+
+static void s390x_write_elf64_prefix(Note *note, S390CPU *cpu, int id)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_PREFIX);
+ note->contents.prefix = cpu_to_be32((uint32_t)(cpu->env.psa));
+}
+
+
+typedef struct NoteFuncDescStruct {
+ int contents_size;
+ void (*note_contents_func)(Note *note, S390CPU *cpu, int id);
+} NoteFuncDesc;
+
+static const NoteFuncDesc note_core[] = {
+ {sizeof_field(Note, contents.prstatus), s390x_write_elf64_prstatus},
+ {sizeof_field(Note, contents.fpregset), s390x_write_elf64_fpregset},
+ { 0, NULL}
+};
+
+static const NoteFuncDesc note_linux[] = {
+ {sizeof_field(Note, contents.prefix), s390x_write_elf64_prefix},
+ {sizeof_field(Note, contents.ctrs), s390x_write_elf64_ctrs},
+ {sizeof_field(Note, contents.timer), s390x_write_elf64_timer},
+ {sizeof_field(Note, contents.todcmp), s390x_write_elf64_todcmp},
+ {sizeof_field(Note, contents.todpreg), s390x_write_elf64_todpreg},
+ {sizeof_field(Note, contents.vregslo), s390x_write_elf64_vregslo},
+ {sizeof_field(Note, contents.vregshi), s390x_write_elf64_vregshi},
+ {sizeof_field(Note, contents.gscb), s390x_write_elf64_gscb},
+ { 0, NULL}
+};
+
+static int s390x_write_elf64_notes(const char *note_name,
+ WriteCoreDumpFunction f,
+ S390CPU *cpu, int id,
+ void *opaque,
+ const NoteFuncDesc *funcs)
+{
+ Note note;
+ const NoteFuncDesc *nf;
+ int note_size;
+ int ret = -1;
+
+ assert(strlen(note_name) < sizeof(note.name));
+
+ for (nf = funcs; nf->note_contents_func; nf++) {
+ memset(&note, 0, sizeof(note));
+ note.hdr.n_namesz = cpu_to_be32(strlen(note_name) + 1);
+ note.hdr.n_descsz = cpu_to_be32(nf->contents_size);
+ g_strlcpy(note.name, note_name, sizeof(note.name));
+ (*nf->note_contents_func)(&note, cpu, id);
+
+ note_size = sizeof(note) - sizeof(note.contents) + nf->contents_size;
+ ret = f(&note, note_size, opaque);
+
+ if (ret < 0) {
+ return -1;
+ }
+
+ }
+
+ return 0;
+}
+
+
+int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ int r;
+
+ r = s390x_write_elf64_notes("CORE", f, cpu, cpuid, opaque, note_core);
+ if (r) {
+ return r;
+ }
+ return s390x_write_elf64_notes("LINUX", f, cpu, cpuid, opaque, note_linux);
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const struct GuestPhysBlockList *guest_phys_blocks)
+{
+ info->d_machine = EM_S390;
+ info->d_endian = ELFDATA2MSB;
+ info->d_class = ELFCLASS64;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ int name_size = 8; /* "LINUX" or "CORE" + pad */
+ size_t elf_note_size = 0;
+ int note_head_size;
+ const NoteFuncDesc *nf;
+
+ assert(class == ELFCLASS64);
+ assert(machine == EM_S390);
+
+ note_head_size = sizeof(Elf64_Nhdr);
+
+ for (nf = note_core; nf->note_contents_func; nf++) {
+ elf_note_size = elf_note_size + note_head_size + name_size +
+ nf->contents_size;
+ }
+ for (nf = note_linux; nf->note_contents_func; nf++) {
+ elf_note_size = elf_note_size + note_head_size + name_size +
+ nf->contents_size;
+ }
+
+ return (elf_note_size) * nr_cpus;
+}
diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c
new file mode 100644
index 000000000..0f5c06299
--- /dev/null
+++ b/target/s390x/cpu-dump.c
@@ -0,0 +1,134 @@
+/*
+ * S/390 CPU dump to FILE
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "qemu/qemu-print.h"
+#include "sysemu/tcg.h"
+
+void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64,
+ s390_cpu_get_psw_mask(env), env->psw.addr);
+ if (!tcg_enabled()) {
+ qemu_fprintf(f, "\n");
+ } else if (env->cc_op > 3) {
+ qemu_fprintf(f, " cc %15s\n", cc_name(env->cc_op));
+ } else {
+ qemu_fprintf(f, " cc %02x\n", env->cc_op);
+ }
+
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+
+ if (flags & CPU_DUMP_FPU) {
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64 "%c",
+ i, env->vregs[i][0], env->vregs[i][1],
+ i % 2 ? '\n' : ' ');
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "F%02d=%016" PRIx64 "%c",
+ i, *get_freg(env, i),
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+ }
+ }
+
+#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+#endif
+
+#ifdef DEBUG_INLINE_BRANCHES
+ for (i = 0; i < CC_OP_MAX; i++) {
+ qemu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
+ inline_branch_miss[i], inline_branch_hit[i]);
+ }
+#endif
+
+ qemu_fprintf(f, "\n");
+}
+
+const char *cc_name(enum cc_op cc_op)
+{
+ static const char * const cc_names[] = {
+ [CC_OP_CONST0] = "CC_OP_CONST0",
+ [CC_OP_CONST1] = "CC_OP_CONST1",
+ [CC_OP_CONST2] = "CC_OP_CONST2",
+ [CC_OP_CONST3] = "CC_OP_CONST3",
+ [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
+ [CC_OP_STATIC] = "CC_OP_STATIC",
+ [CC_OP_NZ] = "CC_OP_NZ",
+ [CC_OP_ADDU] = "CC_OP_ADDU",
+ [CC_OP_SUBU] = "CC_OP_SUBU",
+ [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
+ [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
+ [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
+ [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
+ [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
+ [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
+ [CC_OP_ADD_64] = "CC_OP_ADD_64",
+ [CC_OP_SUB_64] = "CC_OP_SUB_64",
+ [CC_OP_ABS_64] = "CC_OP_ABS_64",
+ [CC_OP_NABS_64] = "CC_OP_NABS_64",
+ [CC_OP_ADD_32] = "CC_OP_ADD_32",
+ [CC_OP_SUB_32] = "CC_OP_SUB_32",
+ [CC_OP_ABS_32] = "CC_OP_ABS_32",
+ [CC_OP_NABS_32] = "CC_OP_NABS_32",
+ [CC_OP_COMP_32] = "CC_OP_COMP_32",
+ [CC_OP_COMP_64] = "CC_OP_COMP_64",
+ [CC_OP_TM_32] = "CC_OP_TM_32",
+ [CC_OP_TM_64] = "CC_OP_TM_64",
+ [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
+ [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
+ [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
+ [CC_OP_ICM] = "CC_OP_ICM",
+ [CC_OP_SLA_32] = "CC_OP_SLA_32",
+ [CC_OP_SLA_64] = "CC_OP_SLA_64",
+ [CC_OP_FLOGR] = "CC_OP_FLOGR",
+ [CC_OP_LCBB] = "CC_OP_LCBB",
+ [CC_OP_VC] = "CC_OP_VC",
+ [CC_OP_MULS_32] = "CC_OP_MULS_32",
+ [CC_OP_MULS_64] = "CC_OP_MULS_64",
+ };
+
+ return cc_names[cc_op];
+}
diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h
new file mode 100644
index 000000000..472db648d
--- /dev/null
+++ b/target/s390x/cpu-param.h
@@ -0,0 +1,17 @@
+/*
+ * S/390 cpu parameters for qemu.
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef S390_CPU_PARAM_H
+#define S390_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 64
+#define TARGET_PAGE_BITS 12
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#define NB_MMU_MODES 4
+
+#endif
diff --git a/target/s390x/cpu-qom.h b/target/s390x/cpu-qom.h
new file mode 100644
index 000000000..9f3a0d86c
--- /dev/null
+++ b/target/s390x/cpu-qom.h
@@ -0,0 +1,68 @@
+/*
+ * QEMU S/390 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_S390_CPU_QOM_H
+#define QEMU_S390_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_S390_CPU "s390x-cpu"
+
+OBJECT_DECLARE_TYPE(S390CPU, S390CPUClass,
+ S390_CPU)
+
+typedef struct S390CPUModel S390CPUModel;
+typedef struct S390CPUDef S390CPUDef;
+
+typedef enum cpu_reset_type {
+ S390_CPU_RESET_NORMAL,
+ S390_CPU_RESET_INITIAL,
+ S390_CPU_RESET_CLEAR,
+} cpu_reset_type;
+
+/**
+ * S390CPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @load_normal: Performs a load normal.
+ * @cpu_reset: Performs a CPU reset.
+ * @initial_cpu_reset: Performs an initial CPU reset.
+ *
+ * An S/390 CPU model.
+ */
+struct S390CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+ const S390CPUDef *cpu_def;
+ bool kvm_required;
+ bool is_static;
+ bool is_migration_safe;
+ const char *desc;
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+ void (*load_normal)(CPUState *cpu);
+ void (*reset)(CPUState *cpu, cpu_reset_type type);
+};
+
+typedef struct CPUS390XState CPUS390XState;
+
+#endif
diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c
new file mode 100644
index 000000000..5471e01ee
--- /dev/null
+++ b/target/s390x/cpu-sysemu.c
@@ -0,0 +1,308 @@
+/*
+ * QEMU S/390 CPU - System Emulation-only code
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2012 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "sysemu/kvm.h"
+#include "sysemu/reset.h"
+#include "qemu/timer.h"
+#include "trace.h"
+#include "qapi/qapi-visit-run-state.h"
+#include "sysemu/hw_accel.h"
+
+#include "hw/s390x/pv.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/tcg.h"
+#include "hw/core/sysemu-cpu-ops.h"
+
+/* S390CPUClass::load_normal() */
+static void s390_cpu_load_normal(CPUState *s)
+{
+ S390CPU *cpu = S390_CPU(s);
+ uint64_t spsw;
+
+ if (!s390_is_pv()) {
+ spsw = ldq_phys(s->as, 0);
+ cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL;
+ /*
+ * Invert short psw indication, so SIE will report a specification
+ * exception if it was not set.
+ */
+ cpu->env.psw.mask ^= PSW_MASK_SHORTPSW;
+ cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR;
+ } else {
+ /*
+ * Firmware requires us to set the load state before we set
+ * the cpu to operating on protected guests.
+ */
+ s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu);
+ }
+ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
+}
+
+void s390_cpu_machine_reset_cb(void *opaque)
+{
+ S390CPU *cpu = opaque;
+
+ run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+}
+
+static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs)
+{
+ GuestPanicInformation *panic_info;
+ S390CPU *cpu = S390_CPU(cs);
+
+ cpu_synchronize_state(cs);
+ panic_info = g_malloc0(sizeof(GuestPanicInformation));
+
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390;
+ panic_info->u.s390.core = cpu->env.core_id;
+ panic_info->u.s390.psw_mask = cpu->env.psw.mask;
+ panic_info->u.s390.psw_addr = cpu->env.psw.addr;
+ panic_info->u.s390.reason = cpu->env.crash_reason;
+
+ return panic_info;
+}
+
+static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ CPUState *cs = CPU(obj);
+ GuestPanicInformation *panic_info;
+
+ if (!cs->crash_occurred) {
+ error_setg(errp, "No crash occurred");
+ return;
+ }
+
+ panic_info = s390_cpu_get_crash_info(cs);
+
+ visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
+ errp);
+ qapi_free_GuestPanicInformation(panic_info);
+}
+
+void s390_cpu_init_sysemu(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ S390CPU *cpu = S390_CPU(obj);
+
+ cs->start_powered_off = true;
+ object_property_add(obj, "crash-information", "GuestPanicInformation",
+ s390_cpu_get_crash_info_qom, NULL, NULL, NULL);
+ cpu->env.tod_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu);
+ cpu->env.cpu_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
+ s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
+}
+
+bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp)
+{
+ S390CPU *cpu = S390_CPU(dev);
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int max_cpus = ms->smp.max_cpus;
+
+ if (cpu->env.core_id >= max_cpus) {
+ error_setg(errp, "Unable to add CPU with core-id: %" PRIu32
+ ", maximum core-id: %d", cpu->env.core_id,
+ max_cpus - 1);
+ return false;
+ }
+
+ if (cpu_exists(cpu->env.core_id)) {
+ error_setg(errp, "Unable to add CPU with core-id: %" PRIu32
+ ", it already exists", cpu->env.core_id);
+ return false;
+ }
+
+ /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */
+ CPU(cpu)->cpu_index = cpu->env.core_id;
+ return true;
+}
+
+void s390_cpu_finalize(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+
+ timer_free(cpu->env.tod_timer);
+ timer_free(cpu->env.cpu_timer);
+
+ qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu);
+ g_free(cpu->irqstate);
+}
+
+static const struct SysemuCPUOps s390_sysemu_ops = {
+ .get_phys_page_debug = s390_cpu_get_phys_page_debug,
+ .get_crash_info = s390_cpu_get_crash_info,
+ .write_elf64_note = s390_cpu_write_elf64_note,
+ .legacy_vmsd = &vmstate_s390_cpu,
+};
+
+void s390_cpu_class_init_sysemu(CPUClass *cc)
+{
+ S390CPUClass *scc = S390_CPU_CLASS(cc);
+
+ scc->load_normal = s390_cpu_load_normal;
+ cc->sysemu_ops = &s390_sysemu_ops;
+}
+
+static bool disabled_wait(CPUState *cpu)
+{
+ return cpu->halted && !(S390_CPU(cpu)->env.psw.mask &
+ (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK));
+}
+
+static unsigned s390_count_running_cpus(void)
+{
+ CPUState *cpu;
+ int nr_running = 0;
+
+ CPU_FOREACH(cpu) {
+ uint8_t state = S390_CPU(cpu)->env.cpu_state;
+ if (state == S390_CPU_STATE_OPERATING ||
+ state == S390_CPU_STATE_LOAD) {
+ if (!disabled_wait(cpu)) {
+ nr_running++;
+ }
+ }
+ }
+
+ return nr_running;
+}
+
+unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ trace_cpu_halt(cs->cpu_index);
+
+ if (!cs->halted) {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ }
+
+ return s390_count_running_cpus();
+}
+
+void s390_cpu_unhalt(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ trace_cpu_unhalt(cs->cpu_index);
+
+ if (cs->halted) {
+ cs->halted = 0;
+ cs->exception_index = -1;
+ }
+}
+
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+ {
+ trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state);
+
+ switch (cpu_state) {
+ case S390_CPU_STATE_STOPPED:
+ case S390_CPU_STATE_CHECK_STOP:
+ /* halt the cpu for common infrastructure */
+ s390_cpu_halt(cpu);
+ break;
+ case S390_CPU_STATE_OPERATING:
+ case S390_CPU_STATE_LOAD:
+ /*
+ * Starting a CPU with a PSW WAIT bit set:
+ * KVM: handles this internally and triggers another WAIT exit.
+ * TCG: will actually try to continue to run. Don't unhalt, will
+ * be done when the CPU actually has work (an interrupt).
+ */
+ if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) {
+ s390_cpu_unhalt(cpu);
+ }
+ break;
+ default:
+ error_report("Requested CPU state is not a valid S390 CPU state: %u",
+ cpu_state);
+ exit(1);
+ }
+ if (kvm_enabled() && cpu->env.cpu_state != cpu_state) {
+ kvm_s390_set_cpu_state(cpu, cpu_state);
+ }
+ cpu->env.cpu_state = cpu_state;
+
+ return s390_count_running_cpus();
+}
+
+int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_set_mem_limit(new_limit, hw_limit);
+ }
+ return 0;
+}
+
+void s390_set_max_pagesize(uint64_t pagesize, Error **errp)
+{
+ if (kvm_enabled()) {
+ kvm_s390_set_max_pagesize(pagesize, errp);
+ }
+}
+
+void s390_cmma_reset(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_cmma_reset();
+ }
+}
+
+int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id,
+ int vq, bool assign)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
+ } else {
+ return 0;
+ }
+}
+
+void s390_crypto_reset(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_crypto_reset();
+ }
+}
+
+void s390_enable_css_support(S390CPU *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_s390_enable_css_support(cpu);
+ }
+}
+
+void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg)
+{
+ if (kvm_enabled()) {
+ kvm_s390_set_diag318(cs, arg.host_ulong);
+ }
+}
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
new file mode 100644
index 000000000..ccdbaf84d
--- /dev/null
+++ b/target/s390x/cpu.c
@@ -0,0 +1,339 @@
+/*
+ * QEMU S/390 CPU
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2012 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "sysemu/kvm.h"
+#include "sysemu/reset.h"
+#include "qemu/module.h"
+#include "trace.h"
+#include "qapi/qapi-types-machine.h"
+#include "sysemu/hw_accel.h"
+#include "hw/qdev-properties.h"
+#include "fpu/softfloat-helpers.h"
+#include "disas/capstone.h"
+#include "sysemu/tcg.h"
+
+#define CR0_RESET 0xE0UL
+#define CR14_RESET 0xC2000000UL;
+
+void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+ uint64_t old_mask = env->psw.mask;
+#endif
+
+ env->psw.addr = addr;
+ env->psw.mask = mask;
+
+ /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */
+ if (!tcg_enabled()) {
+ return;
+ }
+ env->cc_op = (mask >> 44) & 3;
+
+#ifndef CONFIG_USER_ONLY
+ if ((old_mask ^ mask) & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(env_cpu(env));
+ }
+
+ if (mask & PSW_MASK_WAIT) {
+ s390_handle_wait(env_archcpu(env));
+ }
+#endif
+}
+
+uint64_t s390_cpu_get_psw_mask(CPUS390XState *env)
+{
+ uint64_t r = env->psw.mask;
+
+ if (tcg_enabled()) {
+ uint64_t cc = calc_cc(env, env->cc_op, env->cc_src,
+ env->cc_dst, env->cc_vr);
+
+ assert(cc <= 3);
+ r &= ~PSW_MASK_CC;
+ r |= cc << 44;
+ }
+
+ return r;
+}
+
+static void s390_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ cpu->env.psw.addr = value;
+}
+
+static bool s390_cpu_has_work(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ /* STOPPED cpus can never wake up */
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD &&
+ s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
+ return false;
+ }
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+
+ return s390_cpu_has_int(cpu);
+}
+
+/* S390CPUClass::reset() */
+static void s390_cpu_reset(CPUState *s, cpu_reset_type type)
+{
+ S390CPU *cpu = S390_CPU(s);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ CPUS390XState *env = &cpu->env;
+ DeviceState *dev = DEVICE(s);
+
+ scc->parent_reset(dev);
+ cpu->env.sigp_order = 0;
+ s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
+
+ switch (type) {
+ case S390_CPU_RESET_CLEAR:
+ memset(env, 0, offsetof(CPUS390XState, start_initial_reset_fields));
+ /* fall through */
+ case S390_CPU_RESET_INITIAL:
+ /* initial reset does not clear everything! */
+ memset(&env->start_initial_reset_fields, 0,
+ offsetof(CPUS390XState, start_normal_reset_fields) -
+ offsetof(CPUS390XState, start_initial_reset_fields));
+
+ /* architectured initial value for Breaking-Event-Address register */
+ env->gbea = 1;
+
+ /* architectured initial values for CR 0 and 14 */
+ env->cregs[0] = CR0_RESET;
+ env->cregs[14] = CR14_RESET;
+
+#if defined(CONFIG_USER_ONLY)
+ /* user mode should always be allowed to use the full FPU */
+ env->cregs[0] |= CR0_AFP;
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ env->cregs[0] |= CR0_VECTOR;
+ }
+#endif
+
+ /* tininess for underflow is detected before rounding */
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->fpu_status);
+ /* fall through */
+ case S390_CPU_RESET_NORMAL:
+ env->psw.mask &= ~PSW_MASK_RI;
+ memset(&env->start_normal_reset_fields, 0,
+ offsetof(CPUS390XState, end_reset_fields) -
+ offsetof(CPUS390XState, start_normal_reset_fields));
+
+ env->pfault_token = -1UL;
+ env->bpbc = false;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Reset state inside the kernel that we cannot access yet from QEMU. */
+ if (kvm_enabled()) {
+ switch (type) {
+ case S390_CPU_RESET_CLEAR:
+ kvm_s390_reset_vcpu_clear(cpu);
+ break;
+ case S390_CPU_RESET_INITIAL:
+ kvm_s390_reset_vcpu_initial(cpu);
+ break;
+ case S390_CPU_RESET_NORMAL:
+ kvm_s390_reset_vcpu_normal(cpu);
+ break;
+ }
+ }
+}
+
+static void s390_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_s390_64;
+ info->print_insn = print_insn_s390;
+ info->cap_arch = CS_ARCH_SYSZ;
+ info->cap_insn_unit = 2;
+ info->cap_insn_split = 6;
+}
+
+static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(dev);
+ Error *err = NULL;
+
+ /* the model has to be realized before qemu_init_vcpu() due to kvm */
+ s390_realize_cpu_model(cs, &err);
+ if (err) {
+ goto out;
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ if (!s390_cpu_realize_sysemu(dev, &err)) {
+ goto out;
+ }
+#endif
+
+ cpu_exec_realizefn(cs, &err);
+ if (err != NULL) {
+ goto out;
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ qemu_register_reset(s390_cpu_machine_reset_cb, S390_CPU(dev));
+#endif
+ s390_cpu_gdb_init(cs);
+ qemu_init_vcpu(cs);
+
+ /*
+ * KVM requires the initial CPU reset ioctl to be executed on the target
+ * CPU thread. CPU hotplug under single-threaded TCG will not work with
+ * run_on_cpu(), as run_on_cpu() will not work properly if called while
+ * the main thread is already running but the CPU hasn't been realized.
+ */
+ if (kvm_enabled()) {
+ run_on_cpu(cs, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+ } else {
+ cpu_reset(cs);
+ }
+
+ scc->parent_realize(dev, &err);
+out:
+ error_propagate(errp, err);
+}
+
+static void s390_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ S390CPU *cpu = S390_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+ cs->exception_index = EXCP_HLT;
+
+#if !defined(CONFIG_USER_ONLY)
+ s390_cpu_init_sysemu(obj);
+#endif
+}
+
+static gchar *s390_gdb_arch_name(CPUState *cs)
+{
+ return g_strdup("s390:64-bit");
+}
+
+static Property s390x_cpu_properties[] = {
+#if !defined(CONFIG_USER_ONLY)
+ DEFINE_PROP_UINT32("core-id", S390CPU, env.core_id, 0),
+#endif
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void s390_cpu_reset_full(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ return s390_cpu_reset(s, S390_CPU_RESET_CLEAR);
+}
+
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps s390_tcg_ops = {
+ .initialize = s390x_translate_init,
+
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = s390_cpu_record_sigsegv,
+ .record_sigbus = s390_cpu_record_sigbus,
+#else
+ .tlb_fill = s390_cpu_tlb_fill,
+ .cpu_exec_interrupt = s390_cpu_exec_interrupt,
+ .do_interrupt = s390_cpu_do_interrupt,
+ .debug_excp_handler = s390x_cpu_debug_excp_handler,
+ .do_unaligned_access = s390x_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+static void s390_cpu_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *scc = S390_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(scc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_parent_realize(dc, s390_cpu_realizefn,
+ &scc->parent_realize);
+ device_class_set_props(dc, s390x_cpu_properties);
+ dc->user_creatable = true;
+
+ device_class_set_parent_reset(dc, s390_cpu_reset_full, &scc->parent_reset);
+
+ scc->reset = s390_cpu_reset;
+ cc->class_by_name = s390_cpu_class_by_name,
+ cc->has_work = s390_cpu_has_work;
+ cc->dump_state = s390_cpu_dump_state;
+ cc->set_pc = s390_cpu_set_pc;
+ cc->gdb_read_register = s390_cpu_gdb_read_register;
+ cc->gdb_write_register = s390_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ s390_cpu_class_init_sysemu(cc);
+#endif
+ cc->disas_set_info = s390_cpu_disas_set_info;
+ cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
+ cc->gdb_core_xml_file = "s390x-core64.xml";
+ cc->gdb_arch_name = s390_gdb_arch_name;
+
+ s390_cpu_model_class_register_props(oc);
+
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &s390_tcg_ops;
+#endif /* CONFIG_TCG */
+}
+
+static const TypeInfo s390_cpu_type_info = {
+ .name = TYPE_S390_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(S390CPU),
+ .instance_align = __alignof__(S390CPU),
+ .instance_init = s390_cpu_initfn,
+
+#ifndef CONFIG_USER_ONLY
+ .instance_finalize = s390_cpu_finalize,
+#endif /* !CONFIG_USER_ONLY */
+
+ .abstract = true,
+ .class_size = sizeof(S390CPUClass),
+ .class_init = s390_cpu_class_init,
+};
+
+static void s390_cpu_register_types(void)
+{
+ type_register_static(&s390_cpu_type_info);
+}
+
+type_init(s390_cpu_register_types)
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
new file mode 100644
index 000000000..ca3845d02
--- /dev/null
+++ b/target/s390x/cpu.h
@@ -0,0 +1,848 @@
+/*
+ * S/390 virtual CPU header
+ *
+ * For details on the s390x architecture and used definitions (e.g.,
+ * PSW, PER and DAT (Dynamic Address Translation)), please refer to
+ * the "z/Architecture Principles of Operations" - a.k.a. PoP.
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright IBM Corp. 2012, 2018
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef S390X_CPU_H
+#define S390X_CPU_H
+
+#include "cpu-qom.h"
+#include "cpu_models.h"
+#include "exec/cpu-defs.h"
+
+#define ELF_MACHINE_UNAME "S390X"
+
+/* The z/Architecture has a strong memory model with some store-after-load re-ordering */
+#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
+
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+#define MMU_USER_IDX 0
+
+#define S390_MAX_CPUS 248
+
+#ifndef CONFIG_KVM
+#define S390_ADAPTER_SUPPRESSIBLE 0x01
+#else
+#define S390_ADAPTER_SUPPRESSIBLE KVM_S390_ADAPTER_SUPPRESSIBLE
+#endif
+
+typedef struct PSW {
+ uint64_t mask;
+ uint64_t addr;
+} PSW;
+
+struct CPUS390XState {
+ uint64_t regs[16]; /* GP registers */
+ /*
+ * The floating point registers are part of the vector registers.
+ * vregs[0][0] -> vregs[15][0] are 16 floating point registers
+ */
+ uint64_t vregs[32][2] QEMU_ALIGNED(16); /* vector registers */
+ uint32_t aregs[16]; /* access registers */
+ uint64_t gscb[4]; /* guarded storage control */
+ uint64_t etoken; /* etoken */
+ uint64_t etoken_extension; /* etoken extension */
+
+ /* Fields up to this point are not cleared by initial CPU reset */
+ struct {} start_initial_reset_fields;
+
+ uint32_t fpc; /* floating-point control register */
+ uint32_t cc_op;
+ bool bpbc; /* branch prediction blocking */
+
+ float_status fpu_status; /* passed to softfloat lib */
+
+ /* The low part of a 128-bit return, or remainder of a divide. */
+ uint64_t retxl;
+
+ PSW psw;
+
+ S390CrashReason crash_reason;
+
+ uint64_t cc_src;
+ uint64_t cc_dst;
+ uint64_t cc_vr;
+
+ uint64_t ex_value;
+
+ uint64_t __excp_addr;
+ uint64_t psa;
+
+ uint32_t int_pgm_code;
+ uint32_t int_pgm_ilen;
+
+ uint32_t int_svc_code;
+ uint32_t int_svc_ilen;
+
+ uint64_t per_address;
+ uint16_t per_perc_atmid;
+
+ uint64_t cregs[16]; /* control registers */
+
+ uint64_t ckc;
+ uint64_t cputm;
+ uint32_t todpr;
+
+ uint64_t pfault_token;
+ uint64_t pfault_compare;
+ uint64_t pfault_select;
+
+ uint64_t gbea;
+ uint64_t pp;
+
+ /* Fields up to this point are not cleared by normal CPU reset */
+ struct {} start_normal_reset_fields;
+ uint8_t riccb[64]; /* runtime instrumentation control */
+
+ int pending_int;
+ uint16_t external_call_addr;
+ DECLARE_BITMAP(emergency_signals, S390_MAX_CPUS);
+
+ uint64_t diag318_info;
+
+#if !defined(CONFIG_USER_ONLY)
+ uint64_t tlb_fill_tec; /* translation exception code during tlb_fill */
+ int tlb_fill_exc; /* exception number seen during tlb_fill */
+#endif
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+#if !defined(CONFIG_USER_ONLY)
+ uint32_t core_id; /* PoP "CPU address", same as cpu_index */
+ uint64_t cpuid;
+#endif
+
+ QEMUTimer *tod_timer;
+
+ QEMUTimer *cpu_timer;
+
+ /*
+ * The cpu state represents the logical state of a cpu. In contrast to other
+ * architectures, there is a difference between a halt and a stop on s390.
+ * If all cpus are either stopped (including check stop) or in the disabled
+ * wait state, the vm can be shut down.
+ * The acceptable cpu_state values are defined in the CpuInfoS390State
+ * enum.
+ */
+ uint8_t cpu_state;
+
+ /* currently processed sigp order */
+ uint8_t sigp_order;
+
+};
+
+static inline uint64_t *get_freg(CPUS390XState *cs, int nr)
+{
+ return &cs->vregs[nr][0];
+}
+
+/**
+ * S390CPU:
+ * @env: #CPUS390XState.
+ *
+ * An S/390 CPU.
+ */
+struct S390CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUS390XState env;
+ S390CPUModel *model;
+ /* needed for live migration */
+ void *irqstate;
+ uint32_t irqstate_saved_size;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_s390_cpu;
+#endif
+
+/* distinguish between 24 bit and 31 bit addressing */
+#define HIGH_ORDER_BIT 0x80000000
+
+/* Interrupt Codes */
+/* Program Interrupts */
+#define PGM_OPERATION 0x0001
+#define PGM_PRIVILEGED 0x0002
+#define PGM_EXECUTE 0x0003
+#define PGM_PROTECTION 0x0004
+#define PGM_ADDRESSING 0x0005
+#define PGM_SPECIFICATION 0x0006
+#define PGM_DATA 0x0007
+#define PGM_FIXPT_OVERFLOW 0x0008
+#define PGM_FIXPT_DIVIDE 0x0009
+#define PGM_DEC_OVERFLOW 0x000a
+#define PGM_DEC_DIVIDE 0x000b
+#define PGM_HFP_EXP_OVERFLOW 0x000c
+#define PGM_HFP_EXP_UNDERFLOW 0x000d
+#define PGM_HFP_SIGNIFICANCE 0x000e
+#define PGM_HFP_DIVIDE 0x000f
+#define PGM_SEGMENT_TRANS 0x0010
+#define PGM_PAGE_TRANS 0x0011
+#define PGM_TRANS_SPEC 0x0012
+#define PGM_SPECIAL_OP 0x0013
+#define PGM_OPERAND 0x0015
+#define PGM_TRACE_TABLE 0x0016
+#define PGM_VECTOR_PROCESSING 0x001b
+#define PGM_SPACE_SWITCH 0x001c
+#define PGM_HFP_SQRT 0x001d
+#define PGM_PC_TRANS_SPEC 0x001f
+#define PGM_AFX_TRANS 0x0020
+#define PGM_ASX_TRANS 0x0021
+#define PGM_LX_TRANS 0x0022
+#define PGM_EX_TRANS 0x0023
+#define PGM_PRIM_AUTH 0x0024
+#define PGM_SEC_AUTH 0x0025
+#define PGM_ALET_SPEC 0x0028
+#define PGM_ALEN_SPEC 0x0029
+#define PGM_ALE_SEQ 0x002a
+#define PGM_ASTE_VALID 0x002b
+#define PGM_ASTE_SEQ 0x002c
+#define PGM_EXT_AUTH 0x002d
+#define PGM_STACK_FULL 0x0030
+#define PGM_STACK_EMPTY 0x0031
+#define PGM_STACK_SPEC 0x0032
+#define PGM_STACK_TYPE 0x0033
+#define PGM_STACK_OP 0x0034
+#define PGM_ASCE_TYPE 0x0038
+#define PGM_REG_FIRST_TRANS 0x0039
+#define PGM_REG_SEC_TRANS 0x003a
+#define PGM_REG_THIRD_TRANS 0x003b
+#define PGM_MONITOR 0x0040
+#define PGM_PER 0x0080
+#define PGM_CRYPTO 0x0119
+
+/* External Interrupts */
+#define EXT_INTERRUPT_KEY 0x0040
+#define EXT_CLOCK_COMP 0x1004
+#define EXT_CPU_TIMER 0x1005
+#define EXT_MALFUNCTION 0x1200
+#define EXT_EMERGENCY 0x1201
+#define EXT_EXTERNAL_CALL 0x1202
+#define EXT_ETR 0x1406
+#define EXT_SERVICE 0x2401
+#define EXT_VIRTIO 0x2603
+
+/* PSW defines */
+#undef PSW_MASK_PER
+#undef PSW_MASK_UNUSED_2
+#undef PSW_MASK_UNUSED_3
+#undef PSW_MASK_DAT
+#undef PSW_MASK_IO
+#undef PSW_MASK_EXT
+#undef PSW_MASK_KEY
+#undef PSW_SHIFT_KEY
+#undef PSW_MASK_MCHECK
+#undef PSW_MASK_WAIT
+#undef PSW_MASK_PSTATE
+#undef PSW_MASK_ASC
+#undef PSW_SHIFT_ASC
+#undef PSW_MASK_CC
+#undef PSW_MASK_PM
+#undef PSW_MASK_RI
+#undef PSW_SHIFT_MASK_PM
+#undef PSW_MASK_64
+#undef PSW_MASK_32
+#undef PSW_MASK_ESA_ADDR
+
+#define PSW_MASK_PER 0x4000000000000000ULL
+#define PSW_MASK_UNUSED_2 0x2000000000000000ULL
+#define PSW_MASK_UNUSED_3 0x1000000000000000ULL
+#define PSW_MASK_DAT 0x0400000000000000ULL
+#define PSW_MASK_IO 0x0200000000000000ULL
+#define PSW_MASK_EXT 0x0100000000000000ULL
+#define PSW_MASK_KEY 0x00F0000000000000ULL
+#define PSW_SHIFT_KEY 52
+#define PSW_MASK_SHORTPSW 0x0008000000000000ULL
+#define PSW_MASK_MCHECK 0x0004000000000000ULL
+#define PSW_MASK_WAIT 0x0002000000000000ULL
+#define PSW_MASK_PSTATE 0x0001000000000000ULL
+#define PSW_MASK_ASC 0x0000C00000000000ULL
+#define PSW_SHIFT_ASC 46
+#define PSW_MASK_CC 0x0000300000000000ULL
+#define PSW_MASK_PM 0x00000F0000000000ULL
+#define PSW_SHIFT_MASK_PM 40
+#define PSW_MASK_RI 0x0000008000000000ULL
+#define PSW_MASK_64 0x0000000100000000ULL
+#define PSW_MASK_32 0x0000000080000000ULL
+#define PSW_MASK_SHORT_ADDR 0x000000007fffffffULL
+#define PSW_MASK_SHORT_CTRL 0xffffffff80000000ULL
+
+#undef PSW_ASC_PRIMARY
+#undef PSW_ASC_ACCREG
+#undef PSW_ASC_SECONDARY
+#undef PSW_ASC_HOME
+
+#define PSW_ASC_PRIMARY 0x0000000000000000ULL
+#define PSW_ASC_ACCREG 0x0000400000000000ULL
+#define PSW_ASC_SECONDARY 0x0000800000000000ULL
+#define PSW_ASC_HOME 0x0000C00000000000ULL
+
+/* the address space values shifted */
+#define AS_PRIMARY 0
+#define AS_ACCREG 1
+#define AS_SECONDARY 2
+#define AS_HOME 3
+
+/* tb flags */
+
+#define FLAG_MASK_PSW_SHIFT 31
+#define FLAG_MASK_PER (PSW_MASK_PER >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_DAT (PSW_MASK_DAT >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_ASC (PSW_MASK_ASC >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_64 (PSW_MASK_64 >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_32 (PSW_MASK_32 >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_PSW (FLAG_MASK_PER | FLAG_MASK_DAT | FLAG_MASK_PSTATE \
+ | FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32)
+
+/* we'll use some unused PSW positions to store CR flags in tb flags */
+#define FLAG_MASK_AFP (PSW_MASK_UNUSED_2 >> FLAG_MASK_PSW_SHIFT)
+#define FLAG_MASK_VECTOR (PSW_MASK_UNUSED_3 >> FLAG_MASK_PSW_SHIFT)
+
+/* Control register 0 bits */
+#define CR0_LOWPROT 0x0000000010000000ULL
+#define CR0_SECONDARY 0x0000000004000000ULL
+#define CR0_EDAT 0x0000000000800000ULL
+#define CR0_AFP 0x0000000000040000ULL
+#define CR0_VECTOR 0x0000000000020000ULL
+#define CR0_IEP 0x0000000000100000ULL
+#define CR0_EMERGENCY_SIGNAL_SC 0x0000000000004000ULL
+#define CR0_EXTERNAL_CALL_SC 0x0000000000002000ULL
+#define CR0_CKC_SC 0x0000000000000800ULL
+#define CR0_CPU_TIMER_SC 0x0000000000000400ULL
+#define CR0_SERVICE_SC 0x0000000000000200ULL
+
+/* Control register 14 bits */
+#define CR14_CHANNEL_REPORT_SC 0x0000000010000000ULL
+
+/* MMU */
+#define MMU_PRIMARY_IDX 0
+#define MMU_SECONDARY_IDX 1
+#define MMU_HOME_IDX 2
+#define MMU_REAL_IDX 3
+
+static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ return MMU_REAL_IDX;
+ }
+
+ if (ifetch) {
+ if ((env->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) {
+ return MMU_HOME_IDX;
+ }
+ return MMU_PRIMARY_IDX;
+ }
+
+ switch (env->psw.mask & PSW_MASK_ASC) {
+ case PSW_ASC_PRIMARY:
+ return MMU_PRIMARY_IDX;
+ case PSW_ASC_SECONDARY:
+ return MMU_SECONDARY_IDX;
+ case PSW_ASC_HOME:
+ return MMU_HOME_IDX;
+ case PSW_ASC_ACCREG:
+ /* Fallthrough: access register mode is not yet supported */
+ default:
+ abort();
+ }
+#endif
+}
+
+static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->psw.addr;
+ *cs_base = env->ex_value;
+ *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
+ if (env->cregs[0] & CR0_AFP) {
+ *flags |= FLAG_MASK_AFP;
+ }
+ if (env->cregs[0] & CR0_VECTOR) {
+ *flags |= FLAG_MASK_VECTOR;
+ }
+}
+
+/* PER bits from control register 9 */
+#define PER_CR9_EVENT_BRANCH 0x80000000
+#define PER_CR9_EVENT_IFETCH 0x40000000
+#define PER_CR9_EVENT_STORE 0x20000000
+#define PER_CR9_EVENT_STORE_REAL 0x08000000
+#define PER_CR9_EVENT_NULLIFICATION 0x01000000
+#define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
+#define PER_CR9_CONTROL_ALTERATION 0x00200000
+
+/* PER bits from the PER CODE/ATMID/AI in lowcore */
+#define PER_CODE_EVENT_BRANCH 0x8000
+#define PER_CODE_EVENT_IFETCH 0x4000
+#define PER_CODE_EVENT_STORE 0x2000
+#define PER_CODE_EVENT_STORE_REAL 0x0800
+#define PER_CODE_EVENT_NULLIFICATION 0x0100
+
+#define EXCP_EXT 1 /* external interrupt */
+#define EXCP_SVC 2 /* supervisor call (syscall) */
+#define EXCP_PGM 3 /* program interruption */
+#define EXCP_RESTART 4 /* restart interrupt */
+#define EXCP_STOP 5 /* stop interrupt */
+#define EXCP_IO 7 /* I/O interrupt */
+#define EXCP_MCHK 8 /* machine check */
+
+#define INTERRUPT_EXT_CPU_TIMER (1 << 3)
+#define INTERRUPT_EXT_CLOCK_COMPARATOR (1 << 4)
+#define INTERRUPT_EXTERNAL_CALL (1 << 5)
+#define INTERRUPT_EMERGENCY_SIGNAL (1 << 6)
+#define INTERRUPT_RESTART (1 << 7)
+#define INTERRUPT_STOP (1 << 8)
+
+/* Program Status Word. */
+#define S390_PSWM_REGNUM 0
+#define S390_PSWA_REGNUM 1
+/* General Purpose Registers. */
+#define S390_R0_REGNUM 2
+#define S390_R1_REGNUM 3
+#define S390_R2_REGNUM 4
+#define S390_R3_REGNUM 5
+#define S390_R4_REGNUM 6
+#define S390_R5_REGNUM 7
+#define S390_R6_REGNUM 8
+#define S390_R7_REGNUM 9
+#define S390_R8_REGNUM 10
+#define S390_R9_REGNUM 11
+#define S390_R10_REGNUM 12
+#define S390_R11_REGNUM 13
+#define S390_R12_REGNUM 14
+#define S390_R13_REGNUM 15
+#define S390_R14_REGNUM 16
+#define S390_R15_REGNUM 17
+/* Total Core Registers. */
+#define S390_NUM_CORE_REGS 18
+
+static inline void setcc(S390CPU *cpu, uint64_t cc)
+{
+ CPUS390XState *env = &cpu->env;
+
+ env->psw.mask &= ~(3ull << 44);
+ env->psw.mask |= (cc & 3) << 44;
+ env->cc_op = cc;
+}
+
+/* STSI */
+#define STSI_R0_FC_MASK 0x00000000f0000000ULL
+#define STSI_R0_FC_CURRENT 0x0000000000000000ULL
+#define STSI_R0_FC_LEVEL_1 0x0000000010000000ULL
+#define STSI_R0_FC_LEVEL_2 0x0000000020000000ULL
+#define STSI_R0_FC_LEVEL_3 0x0000000030000000ULL
+#define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
+#define STSI_R0_SEL1_MASK 0x00000000000000ffULL
+#define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
+#define STSI_R1_SEL2_MASK 0x000000000000ffffULL
+
+/* Basic Machine Configuration */
+typedef struct SysIB_111 {
+ uint8_t res1[32];
+ uint8_t manuf[16];
+ uint8_t type[4];
+ uint8_t res2[12];
+ uint8_t model[16];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint8_t res3[3996];
+} SysIB_111;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_111) != 4096);
+
+/* Basic Machine CPU */
+typedef struct SysIB_121 {
+ uint8_t res1[80];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint8_t res2[2];
+ uint16_t cpu_addr;
+ uint8_t res3[3992];
+} SysIB_121;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_121) != 4096);
+
+/* Basic Machine CPUs */
+typedef struct SysIB_122 {
+ uint8_t res1[32];
+ uint32_t capability;
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint16_t adjustments[2026];
+} SysIB_122;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_122) != 4096);
+
+/* LPAR CPU */
+typedef struct SysIB_221 {
+ uint8_t res1[80];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint16_t cpu_id;
+ uint16_t cpu_addr;
+ uint8_t res3[3992];
+} SysIB_221;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_221) != 4096);
+
+/* LPAR CPUs */
+typedef struct SysIB_222 {
+ uint8_t res1[32];
+ uint16_t lpar_num;
+ uint8_t res2;
+ uint8_t lcpuc;
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint8_t name[8];
+ uint32_t caf;
+ uint8_t res3[16];
+ uint16_t dedicated_cpus;
+ uint16_t shared_cpus;
+ uint8_t res4[4020];
+} SysIB_222;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_222) != 4096);
+
+/* VM CPUs */
+typedef struct SysIB_322 {
+ uint8_t res1[31];
+ uint8_t count;
+ struct {
+ uint8_t res2[4];
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint8_t name[8];
+ uint32_t caf;
+ uint8_t cpi[16];
+ uint8_t res5[3];
+ uint8_t ext_name_encoding;
+ uint32_t res3;
+ uint8_t uuid[16];
+ } vm[8];
+ uint8_t res4[1504];
+ uint8_t ext_names[8][256];
+} SysIB_322;
+QEMU_BUILD_BUG_ON(sizeof(SysIB_322) != 4096);
+
+typedef union SysIB {
+ SysIB_111 sysib_111;
+ SysIB_121 sysib_121;
+ SysIB_122 sysib_122;
+ SysIB_221 sysib_221;
+ SysIB_222 sysib_222;
+ SysIB_322 sysib_322;
+} SysIB;
+QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096);
+
+/* MMU defines */
+#define ASCE_ORIGIN (~0xfffULL) /* segment table origin */
+#define ASCE_SUBSPACE 0x200 /* subspace group control */
+#define ASCE_PRIVATE_SPACE 0x100 /* private space control */
+#define ASCE_ALT_EVENT 0x80 /* storage alteration event control */
+#define ASCE_SPACE_SWITCH 0x40 /* space switch event */
+#define ASCE_REAL_SPACE 0x20 /* real space control */
+#define ASCE_TYPE_MASK 0x0c /* asce table type mask */
+#define ASCE_TYPE_REGION1 0x0c /* region first table type */
+#define ASCE_TYPE_REGION2 0x08 /* region second table type */
+#define ASCE_TYPE_REGION3 0x04 /* region third table type */
+#define ASCE_TYPE_SEGMENT 0x00 /* segment table type */
+#define ASCE_TABLE_LENGTH 0x03 /* region table length */
+
+#define REGION_ENTRY_ORIGIN 0xfffffffffffff000ULL
+#define REGION_ENTRY_P 0x0000000000000200ULL
+#define REGION_ENTRY_TF 0x00000000000000c0ULL
+#define REGION_ENTRY_I 0x0000000000000020ULL
+#define REGION_ENTRY_TT 0x000000000000000cULL
+#define REGION_ENTRY_TL 0x0000000000000003ULL
+
+#define REGION_ENTRY_TT_REGION1 0x000000000000000cULL
+#define REGION_ENTRY_TT_REGION2 0x0000000000000008ULL
+#define REGION_ENTRY_TT_REGION3 0x0000000000000004ULL
+
+#define REGION3_ENTRY_RFAA 0xffffffff80000000ULL
+#define REGION3_ENTRY_AV 0x0000000000010000ULL
+#define REGION3_ENTRY_ACC 0x000000000000f000ULL
+#define REGION3_ENTRY_F 0x0000000000000800ULL
+#define REGION3_ENTRY_FC 0x0000000000000400ULL
+#define REGION3_ENTRY_IEP 0x0000000000000100ULL
+#define REGION3_ENTRY_CR 0x0000000000000010ULL
+
+#define SEGMENT_ENTRY_ORIGIN 0xfffffffffffff800ULL
+#define SEGMENT_ENTRY_SFAA 0xfffffffffff00000ULL
+#define SEGMENT_ENTRY_AV 0x0000000000010000ULL
+#define SEGMENT_ENTRY_ACC 0x000000000000f000ULL
+#define SEGMENT_ENTRY_F 0x0000000000000800ULL
+#define SEGMENT_ENTRY_FC 0x0000000000000400ULL
+#define SEGMENT_ENTRY_P 0x0000000000000200ULL
+#define SEGMENT_ENTRY_IEP 0x0000000000000100ULL
+#define SEGMENT_ENTRY_I 0x0000000000000020ULL
+#define SEGMENT_ENTRY_CS 0x0000000000000010ULL
+#define SEGMENT_ENTRY_TT 0x000000000000000cULL
+
+#define SEGMENT_ENTRY_TT_SEGMENT 0x0000000000000000ULL
+
+#define PAGE_ENTRY_0 0x0000000000000800ULL
+#define PAGE_ENTRY_I 0x0000000000000400ULL
+#define PAGE_ENTRY_P 0x0000000000000200ULL
+#define PAGE_ENTRY_IEP 0x0000000000000100ULL
+
+#define VADDR_REGION1_TX_MASK 0xffe0000000000000ULL
+#define VADDR_REGION2_TX_MASK 0x001ffc0000000000ULL
+#define VADDR_REGION3_TX_MASK 0x000003ff80000000ULL
+#define VADDR_SEGMENT_TX_MASK 0x000000007ff00000ULL
+#define VADDR_PAGE_TX_MASK 0x00000000000ff000ULL
+
+#define VADDR_REGION1_TX(vaddr) (((vaddr) & VADDR_REGION1_TX_MASK) >> 53)
+#define VADDR_REGION2_TX(vaddr) (((vaddr) & VADDR_REGION2_TX_MASK) >> 42)
+#define VADDR_REGION3_TX(vaddr) (((vaddr) & VADDR_REGION3_TX_MASK) >> 31)
+#define VADDR_SEGMENT_TX(vaddr) (((vaddr) & VADDR_SEGMENT_TX_MASK) >> 20)
+#define VADDR_PAGE_TX(vaddr) (((vaddr) & VADDR_PAGE_TX_MASK) >> 12)
+
+#define VADDR_REGION1_TL(vaddr) (((vaddr) & 0xc000000000000000ULL) >> 62)
+#define VADDR_REGION2_TL(vaddr) (((vaddr) & 0x0018000000000000ULL) >> 51)
+#define VADDR_REGION3_TL(vaddr) (((vaddr) & 0x0000030000000000ULL) >> 40)
+#define VADDR_SEGMENT_TL(vaddr) (((vaddr) & 0x0000000060000000ULL) >> 29)
+
+#define SK_C (0x1 << 1)
+#define SK_R (0x1 << 2)
+#define SK_F (0x1 << 3)
+#define SK_ACC_MASK (0xf << 4)
+
+/* SIGP order codes */
+#define SIGP_SENSE 0x01
+#define SIGP_EXTERNAL_CALL 0x02
+#define SIGP_EMERGENCY 0x03
+#define SIGP_START 0x04
+#define SIGP_STOP 0x05
+#define SIGP_RESTART 0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET 0x0c
+#define SIGP_SET_PREFIX 0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH 0x12
+#define SIGP_COND_EMERGENCY 0x13
+#define SIGP_SENSE_RUNNING 0x15
+#define SIGP_STORE_ADTL_STATUS 0x17
+
+/* SIGP condition codes */
+#define SIGP_CC_ORDER_CODE_ACCEPTED 0
+#define SIGP_CC_STATUS_STORED 1
+#define SIGP_CC_BUSY 2
+#define SIGP_CC_NOT_OPERATIONAL 3
+
+/* SIGP status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
+#define SIGP_STAT_NOT_RUNNING 0x00000400UL
+#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STAT_STOPPED 0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
+#define SIGP_STAT_CHECK_STOP 0x00000010UL
+#define SIGP_STAT_INOPERATIVE 0x00000004UL
+#define SIGP_STAT_INVALID_ORDER 0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
+
+/* SIGP order code mask corresponding to bit positions 56-63 */
+#define SIGP_ORDER_MASK 0x000000ff
+
+/* machine check interruption code */
+
+/* subclasses */
+#define MCIC_SC_SD 0x8000000000000000ULL
+#define MCIC_SC_PD 0x4000000000000000ULL
+#define MCIC_SC_SR 0x2000000000000000ULL
+#define MCIC_SC_CD 0x0800000000000000ULL
+#define MCIC_SC_ED 0x0400000000000000ULL
+#define MCIC_SC_DG 0x0100000000000000ULL
+#define MCIC_SC_W 0x0080000000000000ULL
+#define MCIC_SC_CP 0x0040000000000000ULL
+#define MCIC_SC_SP 0x0020000000000000ULL
+#define MCIC_SC_CK 0x0010000000000000ULL
+
+/* subclass modifiers */
+#define MCIC_SCM_B 0x0002000000000000ULL
+#define MCIC_SCM_DA 0x0000000020000000ULL
+#define MCIC_SCM_AP 0x0000000000080000ULL
+
+/* storage errors */
+#define MCIC_SE_SE 0x0000800000000000ULL
+#define MCIC_SE_SC 0x0000400000000000ULL
+#define MCIC_SE_KE 0x0000200000000000ULL
+#define MCIC_SE_DS 0x0000100000000000ULL
+#define MCIC_SE_IE 0x0000000080000000ULL
+
+/* validity bits */
+#define MCIC_VB_WP 0x0000080000000000ULL
+#define MCIC_VB_MS 0x0000040000000000ULL
+#define MCIC_VB_PM 0x0000020000000000ULL
+#define MCIC_VB_IA 0x0000010000000000ULL
+#define MCIC_VB_FA 0x0000008000000000ULL
+#define MCIC_VB_VR 0x0000004000000000ULL
+#define MCIC_VB_EC 0x0000002000000000ULL
+#define MCIC_VB_FP 0x0000001000000000ULL
+#define MCIC_VB_GR 0x0000000800000000ULL
+#define MCIC_VB_CR 0x0000000400000000ULL
+#define MCIC_VB_ST 0x0000000100000000ULL
+#define MCIC_VB_AR 0x0000000040000000ULL
+#define MCIC_VB_GS 0x0000000008000000ULL
+#define MCIC_VB_PR 0x0000000000200000ULL
+#define MCIC_VB_FC 0x0000000000100000ULL
+#define MCIC_VB_CT 0x0000000000020000ULL
+#define MCIC_VB_CC 0x0000000000010000ULL
+
+static inline uint64_t s390_build_validity_mcic(void)
+{
+ uint64_t mcic;
+
+ /*
+ * Indicate all validity bits (no damage) only. Other bits have to be
+ * added by the caller. (storage errors, subclasses and subclass modifiers)
+ */
+ mcic = MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
+ MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
+ MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ mcic |= MCIC_VB_VR;
+ }
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
+ mcic |= MCIC_VB_GS;
+ }
+ return mcic;
+}
+
+static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ cpu_reset(cs);
+}
+
+static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->reset(cs, S390_CPU_RESET_NORMAL);
+}
+
+static inline void s390_do_cpu_initial_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->reset(cs, S390_CPU_RESET_INITIAL);
+}
+
+static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->load_normal(cs);
+}
+
+
+/* cpu.c */
+void s390_crypto_reset(void);
+int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit);
+void s390_set_max_pagesize(uint64_t pagesize, Error **errp);
+void s390_cmma_reset(void);
+void s390_enable_css_support(S390CPU *cpu);
+void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg);
+int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id,
+ int vq, bool assign);
+#ifndef CONFIG_USER_ONLY
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
+#else
+static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+{
+ return 0;
+}
+#endif /* CONFIG_USER_ONLY */
+static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
+{
+ return cpu->env.cpu_state;
+}
+
+
+/* cpu_models.c */
+void s390_cpu_list(void);
+#define cpu_list s390_cpu_list
+void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ const S390FeatInit feat_init);
+
+
+/* helper.c */
+#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU
+#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX)
+#define CPU_RESOLVING_TYPE TYPE_S390_CPU
+
+/* interrupt.c */
+#define RA_IGNORED 0
+void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra);
+/* service interrupts are floating therefore we must not pass an cpustate */
+void s390_sclp_extint(uint32_t parm);
+
+/* mmu_helper.c */
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+ int len, bool is_write);
+#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
+#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
+#define s390_cpu_virt_mem_check_read(cpu, laddr, ar, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, false)
+#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
+void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra);
+int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf,
+ int len, bool is_write);
+#define s390_cpu_pv_mem_read(cpu, offset, dest, len) \
+ s390_cpu_pv_mem_rw(cpu, offset, dest, len, false)
+#define s390_cpu_pv_mem_write(cpu, offset, dest, len) \
+ s390_cpu_pv_mem_rw(cpu, offset, dest, len, true)
+
+/* sigp.c */
+int s390_cpu_restart(S390CPU *cpu);
+void s390_init_sigp(void);
+
+/* helper.c */
+void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
+uint64_t s390_cpu_get_psw_mask(CPUS390XState *env);
+
+/* outside of target/s390x/ */
+S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
+
+typedef CPUS390XState CPUArchState;
+typedef S390CPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+#endif
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
new file mode 100644
index 000000000..5528acd08
--- /dev/null
+++ b/target/s390x/cpu_features.c
@@ -0,0 +1,255 @@
+/*
+ * CPU features/facilities for s390x
+ *
+ * Copyright IBM Corp. 2016, 2018
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "cpu_features.h"
+#include "hw/s390x/pv.h"
+
+#define DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC) \
+ [S390_FEAT_##_FEAT] = { \
+ .name = _NAME, \
+ .type = S390_FEAT_TYPE_##_TYPE, \
+ .bit = _BIT, \
+ .desc = _DESC, \
+ },
+static const S390FeatDef s390_features[S390_FEAT_MAX] = {
+ #include "cpu_features_def.h.inc"
+};
+#undef DEF_FEAT
+
+const S390FeatDef *s390_feat_def(S390Feat feat)
+{
+ return &s390_features[feat];
+}
+
+S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit)
+{
+ S390Feat feat;
+
+ for (feat = 0; feat < ARRAY_SIZE(s390_features); feat++) {
+ if (s390_features[feat].type == type &&
+ s390_features[feat].bit == bit) {
+ return feat;
+ }
+ }
+ return S390_FEAT_MAX;
+}
+
+void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap)
+{
+ int i, j;
+
+ for (i = 0; i < (S390_FEAT_MAX / 64 + 1); i++) {
+ if (init[i]) {
+ for (j = 0; j < 64; j++) {
+ if (init[i] & 1ULL << j) {
+ set_bit(i * 64 + j, bitmap);
+ }
+ }
+ }
+ }
+}
+
+void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
+ uint8_t *data)
+{
+ S390Feat feat;
+ int bit_nr;
+
+ switch (type) {
+ case S390_FEAT_TYPE_STFL:
+ if (test_bit(S390_FEAT_ZARCH, features)) {
+ /* Features that are always active */
+ set_be_bit(2, data); /* z/Architecture */
+ set_be_bit(138, data); /* Configuration-z-architectural-mode */
+ }
+ break;
+ case S390_FEAT_TYPE_PTFF:
+ case S390_FEAT_TYPE_KMAC:
+ case S390_FEAT_TYPE_KMC:
+ case S390_FEAT_TYPE_KM:
+ case S390_FEAT_TYPE_KIMD:
+ case S390_FEAT_TYPE_KLMD:
+ case S390_FEAT_TYPE_PCKMO:
+ case S390_FEAT_TYPE_KMCTR:
+ case S390_FEAT_TYPE_KMF:
+ case S390_FEAT_TYPE_KMO:
+ case S390_FEAT_TYPE_PCC:
+ case S390_FEAT_TYPE_PPNO:
+ case S390_FEAT_TYPE_KMA:
+ case S390_FEAT_TYPE_KDSA:
+ case S390_FEAT_TYPE_SORTL:
+ case S390_FEAT_TYPE_DFLTCC:
+ set_be_bit(0, data); /* query is always available */
+ break;
+ default:
+ break;
+ };
+
+ feat = find_first_bit(features, S390_FEAT_MAX);
+ while (feat < S390_FEAT_MAX) {
+ if (s390_features[feat].type == type) {
+ bit_nr = s390_features[feat].bit;
+ /* big endian on uint8_t array */
+ set_be_bit(bit_nr, data);
+ }
+ feat = find_next_bit(features, S390_FEAT_MAX, feat + 1);
+ }
+
+ if (!s390_is_pv()) {
+ return;
+ }
+
+ /*
+ * Some facilities are not available for CPUs in protected mode:
+ * - All SIE facilities because SIE is not available
+ * - DIAG318
+ *
+ * As VMs can move in and out of protected mode the CPU model
+ * doesn't protect us from that problem because it is only
+ * validated at the start of the VM.
+ */
+ switch (type) {
+ case S390_FEAT_TYPE_SCLP_CPU:
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_F2)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_SKEY)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_GPERE)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_SIIF)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_SIGPIF)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_IB)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_CEI)->bit, data);
+ break;
+ case S390_FEAT_TYPE_SCLP_CONF_CHAR:
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_GSLS)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_HPMA2)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_KSS)->bit, data);
+ break;
+ case S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT:
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_64BSCAO)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_CMMA)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_PFMFI)->bit, data);
+ clear_be_bit(s390_feat_def(S390_FEAT_SIE_IBS)->bit, data);
+ break;
+ case S390_FEAT_TYPE_SCLP_FAC134:
+ clear_be_bit(s390_feat_def(S390_FEAT_DIAG_318)->bit, data);
+ break;
+ default:
+ return;
+ }
+}
+
+void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type,
+ uint8_t *data)
+{
+ int nr_bits, le_bit;
+
+ switch (type) {
+ case S390_FEAT_TYPE_STFL:
+ nr_bits = 16384;
+ break;
+ case S390_FEAT_TYPE_PLO:
+ case S390_FEAT_TYPE_SORTL:
+ case S390_FEAT_TYPE_DFLTCC:
+ nr_bits = 256;
+ break;
+ default:
+ /* all cpu subfunctions have 128 bit */
+ nr_bits = 128;
+ };
+
+ le_bit = find_first_bit((unsigned long *) data, nr_bits);
+ while (le_bit < nr_bits) {
+ /* convert the bit number to a big endian bit nr */
+ S390Feat feat = s390_feat_by_type_and_bit(type, BE_BIT_NR(le_bit));
+ /* ignore unknown bits */
+ if (feat < S390_FEAT_MAX) {
+ set_bit(feat, features);
+ }
+ le_bit = find_next_bit((unsigned long *) data, nr_bits, le_bit + 1);
+ }
+}
+
+void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque,
+ void (*fn)(const char *name, void *opaque))
+{
+ S390FeatBitmap bitmap, tmp;
+ S390FeatGroup group;
+ S390Feat feat;
+
+ bitmap_copy(bitmap, features, S390_FEAT_MAX);
+
+ /* process whole groups first */
+ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+
+ bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX);
+ if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) {
+ bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX);
+ fn(def->name, opaque);
+ }
+ }
+
+ /* report leftovers as separate features */
+ feat = find_first_bit(bitmap, S390_FEAT_MAX);
+ while (feat < S390_FEAT_MAX) {
+ fn(s390_feat_def(feat)->name, opaque);
+ feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1);
+ };
+}
+
+#define FEAT_GROUP_INIT(_name, _group, _desc) \
+ { \
+ .name = _name, \
+ .desc = _desc, \
+ .init = { S390_FEAT_GROUP_LIST_ ## _group }, \
+ }
+
+/* indexed by feature group number for easy lookup */
+static S390FeatGroupDef s390_feature_groups[] = {
+ FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"),
+ FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"),
+ FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"),
+ FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"),
+ FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"),
+ FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"),
+ FEAT_GROUP_INIT("msa3", MSA_EXT_3, "Message-security-assist-extension 3 facility"),
+ FEAT_GROUP_INIT("msa4", MSA_EXT_4, "Message-security-assist-extension 4 facility"),
+ FEAT_GROUP_INIT("msa5", MSA_EXT_5, "Message-security-assist-extension 5 facility"),
+ FEAT_GROUP_INIT("msa6", MSA_EXT_6, "Message-security-assist-extension 6 facility"),
+ FEAT_GROUP_INIT("msa7", MSA_EXT_7, "Message-security-assist-extension 7 facility"),
+ FEAT_GROUP_INIT("msa8", MSA_EXT_8, "Message-security-assist-extension 8 facility"),
+ FEAT_GROUP_INIT("msa9", MSA_EXT_9, "Message-security-assist-extension 9 facility"),
+ FEAT_GROUP_INIT("msa9_pckmo", MSA_EXT_9_PCKMO, "Message-security-assist-extension 9 PCKMO subfunctions"),
+ FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"),
+ FEAT_GROUP_INIT("esort", ENH_SORT, "Enhanced-sort facility"),
+ FEAT_GROUP_INIT("deflate", DEFLATE_CONVERSION, "Deflate-conversion facility"),
+};
+
+const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group)
+{
+ return &s390_feature_groups[group];
+}
+
+static void init_groups(void)
+{
+ int i;
+
+ /* init all bitmaps from gnerated data initially */
+ for (i = 0; i < ARRAY_SIZE(s390_feature_groups); i++) {
+ s390_init_feat_bitmap(s390_feature_groups[i].init,
+ s390_feature_groups[i].feat);
+ }
+}
+
+type_init(init_groups)
diff --git a/target/s390x/cpu_features.h b/target/s390x/cpu_features.h
new file mode 100644
index 000000000..87463f064
--- /dev/null
+++ b/target/s390x/cpu_features.h
@@ -0,0 +1,96 @@
+/*
+ * CPU features/facilities helper structs and utility functions for s390
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef TARGET_S390X_CPU_FEATURES_H
+#define TARGET_S390X_CPU_FEATURES_H
+
+#include "qemu/bitmap.h"
+#include "cpu_features_def.h"
+#include "target/s390x/gen-features.h"
+
+/* CPU features are announced via different ways */
+typedef enum {
+ S390_FEAT_TYPE_STFL,
+ S390_FEAT_TYPE_SCLP_CONF_CHAR,
+ S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT,
+ S390_FEAT_TYPE_SCLP_FAC134,
+ S390_FEAT_TYPE_SCLP_CPU,
+ S390_FEAT_TYPE_MISC,
+ S390_FEAT_TYPE_PLO,
+ S390_FEAT_TYPE_PTFF,
+ S390_FEAT_TYPE_KMAC,
+ S390_FEAT_TYPE_KMC,
+ S390_FEAT_TYPE_KM,
+ S390_FEAT_TYPE_KIMD,
+ S390_FEAT_TYPE_KLMD,
+ S390_FEAT_TYPE_PCKMO,
+ S390_FEAT_TYPE_KMCTR,
+ S390_FEAT_TYPE_KMF,
+ S390_FEAT_TYPE_KMO,
+ S390_FEAT_TYPE_PCC,
+ S390_FEAT_TYPE_PPNO,
+ S390_FEAT_TYPE_KMA,
+ S390_FEAT_TYPE_KDSA,
+ S390_FEAT_TYPE_SORTL,
+ S390_FEAT_TYPE_DFLTCC,
+} S390FeatType;
+
+/* Definition of a CPU feature */
+typedef struct {
+ const char *name; /* name exposed to the user */
+ const char *desc; /* description exposed to the user */
+ S390FeatType type; /* feature type (way of indication)*/
+ int bit; /* bit within the feature type area (fixed) */
+} S390FeatDef;
+
+/* use ordinary bitmap operations to work with features */
+typedef unsigned long S390FeatBitmap[BITS_TO_LONGS(S390_FEAT_MAX)];
+
+/* 64bit based bitmap used to init S390FeatBitmap from generated data */
+typedef uint64_t S390FeatInit[S390_FEAT_MAX / 64 + 1];
+
+const S390FeatDef *s390_feat_def(S390Feat feat);
+S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit);
+void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap);
+void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
+ uint8_t *data);
+void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type,
+ uint8_t *data);
+void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque,
+ void (*fn)(const char *name, void *opaque));
+
+/* Definition of a CPU feature group */
+typedef struct {
+ const char *name; /* name exposed to the user */
+ const char *desc; /* description exposed to the user */
+ S390FeatBitmap feat; /* features contained in the group */
+ S390FeatInit init; /* used to init feat from generated data */
+} S390FeatGroupDef;
+
+const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group);
+
+#define BE_BIT_NR(BIT) (BIT ^ (BITS_PER_LONG - 1))
+
+static inline void clear_be_bit(unsigned int bit_nr, uint8_t *array)
+{
+ array[bit_nr / 8] &= ~(0x80 >> (bit_nr % 8));
+}
+static inline void set_be_bit(unsigned int bit_nr, uint8_t *array)
+{
+ array[bit_nr / 8] |= 0x80 >> (bit_nr % 8);
+}
+static inline bool test_be_bit(unsigned int bit_nr, const uint8_t *array)
+{
+ return array[bit_nr / 8] & (0x80 >> (bit_nr % 8));
+}
+#endif /* TARGET_S390X_CPU_FEATURES_H */
diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h
new file mode 100644
index 000000000..87df31848
--- /dev/null
+++ b/target/s390x/cpu_features_def.h
@@ -0,0 +1,25 @@
+/*
+ * CPU features/facilities for s390
+ *
+ * Copyright IBM Corp. 2016, 2018
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef TARGET_S390X_CPU_FEATURES_DEF_H
+#define TARGET_S390X_CPU_FEATURES_DEF_H
+
+#define DEF_FEAT(_FEAT, ...) S390_FEAT_##_FEAT,
+typedef enum {
+ #include "cpu_features_def.h.inc"
+ S390_FEAT_MAX,
+} S390Feat;
+#undef DEF_FEAT
+
+#endif /* TARGET_S390X_CPU_FEATURES_DEF_H */
diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc
new file mode 100644
index 000000000..e86662bb3
--- /dev/null
+++ b/target/s390x/cpu_features_def.h.inc
@@ -0,0 +1,380 @@
+/*
+ * RAW s390x CPU feature definitions:
+ *
+ * DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC):
+ * - _FEAT: Feature (enum) name used internally (S390_FEAT_##_FEAT)
+ * - _NAME: Feature name exposed to the user.
+ * - _TYPE: Feature type (S390_FEAT_TYPE_##_TYPE).
+ * - _BIT: Feature bit number within feature type block (unused for MISC).
+ * - _DESC: Feature description, exposed to the user.
+ *
+ * Copyright IBM Corp. 2016, 2018
+ * Copyright Red Hat, Inc. 2019
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+/* Features exposed via the STFL(E) instruction. */
+DEF_FEAT(ESAN3, "esan3", STFL, 0, "Instructions marked as n3")
+DEF_FEAT(ZARCH, "zarch", STFL, 1, "z/Architecture architectural mode")
+DEF_FEAT(DAT_ENH, "dateh", STFL, 3, "DAT-enhancement facility")
+DEF_FEAT(IDTE_SEGMENT, "idtes", STFL, 4, "IDTE selective TLB segment-table clearing")
+DEF_FEAT(IDTE_REGION, "idter", STFL, 5, "IDTE selective TLB region-table clearing")
+DEF_FEAT(ASN_LX_REUSE, "asnlxr", STFL, 6, "ASN-and-LX reuse facility")
+DEF_FEAT(STFLE, "stfle", STFL, 7, "Store-facility-list-extended facility")
+DEF_FEAT(EDAT, "edat", STFL, 8, "Enhanced-DAT facility")
+DEF_FEAT(SENSE_RUNNING_STATUS, "srs", STFL, 9, "Sense-running-status facility")
+DEF_FEAT(CONDITIONAL_SSKE, "csske", STFL, 10, "Conditional-SSKE facility")
+DEF_FEAT(CONFIGURATION_TOPOLOGY, "ctop", STFL, 11, "Configuration-topology facility")
+DEF_FEAT(AP_QUERY_CONFIG_INFO, "apqci", STFL, 12, "Query AP Configuration Information facility")
+DEF_FEAT(IPTE_RANGE, "ipter", STFL, 13, "IPTE-range facility")
+DEF_FEAT(NONQ_KEY_SETTING, "nonqks", STFL, 14, "Nonquiescing key-setting facility")
+DEF_FEAT(AP_FACILITIES_TEST, "apft", STFL, 15, "AP Facilities Test facility")
+DEF_FEAT(EXTENDED_TRANSLATION_2, "etf2", STFL, 16, "Extended-translation facility 2")
+DEF_FEAT(MSA, "msa-base", STFL, 17, "Message-security-assist facility (excluding subfunctions)")
+DEF_FEAT(LONG_DISPLACEMENT, "ldisp", STFL, 18, "Long-displacement facility")
+DEF_FEAT(LONG_DISPLACEMENT_FAST, "ldisphp", STFL, 19, "Long-displacement facility has high performance")
+DEF_FEAT(HFP_MADDSUB, "hfpm", STFL, 20, "HFP-multiply-add/subtract facility")
+DEF_FEAT(EXTENDED_IMMEDIATE, "eimm", STFL, 21, "Extended-immediate facility")
+DEF_FEAT(EXTENDED_TRANSLATION_3, "etf3", STFL, 22, "Extended-translation facility 3")
+DEF_FEAT(HFP_UNNORMALIZED_EXT, "hfpue", STFL, 23, "HFP-unnormalized-extension facility")
+DEF_FEAT(ETF2_ENH, "etf2eh", STFL, 24, "ETF2-enhancement facility")
+DEF_FEAT(STORE_CLOCK_FAST, "stckf", STFL, 25, "Store-clock-fast facility")
+DEF_FEAT(PARSING_ENH, "parseh", STFL, 26, "Parsing-enhancement facility")
+DEF_FEAT(MOVE_WITH_OPTIONAL_SPEC, "mvcos", STFL, 27, "Move-with-optional-specification facility")
+DEF_FEAT(TOD_CLOCK_STEERING, "tods-base", STFL, 28, "TOD-clock-steering facility (excluding subfunctions)")
+DEF_FEAT(ETF3_ENH, "etf3eh", STFL, 30, "ETF3-enhancement facility")
+DEF_FEAT(EXTRACT_CPU_TIME, "ectg", STFL, 31, "Extract-CPU-time facility")
+DEF_FEAT(COMPARE_AND_SWAP_AND_STORE, "csst", STFL, 32, "Compare-and-swap-and-store facility")
+DEF_FEAT(COMPARE_AND_SWAP_AND_STORE_2, "csst2", STFL, 33, "Compare-and-swap-and-store facility 2")
+DEF_FEAT(GENERAL_INSTRUCTIONS_EXT, "ginste", STFL, 34, "General-instructions-extension facility")
+DEF_FEAT(EXECUTE_EXT, "exrl", STFL, 35, "Execute-extensions facility")
+DEF_FEAT(ENHANCED_MONITOR, "emon", STFL, 36, "Enhanced-monitor facility")
+DEF_FEAT(FLOATING_POINT_EXT, "fpe", STFL, 37, "Floating-point extension facility")
+DEF_FEAT(ORDER_PRESERVING_COMPRESSION, "opc", STFL, 38, "Order Preserving Compression facility")
+DEF_FEAT(SET_PROGRAM_PARAMETERS, "sprogp", STFL, 40, "Set-program-parameters facility")
+DEF_FEAT(FLOATING_POINT_SUPPPORT_ENH, "fpseh", STFL, 41, "Floating-point-support-enhancement facilities")
+DEF_FEAT(DFP, "dfp", STFL, 42, "DFP (decimal-floating-point) facility")
+DEF_FEAT(DFP_FAST, "dfphp", STFL, 43, "DFP (decimal-floating-point) facility has high performance")
+DEF_FEAT(PFPO, "pfpo", STFL, 44, "PFPO instruction")
+DEF_FEAT(STFLE_45, "stfle45", STFL, 45, "Various facilities introduced with z196")
+DEF_FEAT(CMPSC_ENH, "cmpsceh", STFL, 47, "CMPSC-enhancement facility")
+DEF_FEAT(DFP_ZONED_CONVERSION, "dfpzc", STFL, 48, "Decimal-floating-point zoned-conversion facility")
+DEF_FEAT(STFLE_49, "stfle49", STFL, 49, "Various facilities introduced with zEC12")
+DEF_FEAT(CONSTRAINT_TRANSACTIONAL_EXE, "cte", STFL, 50, "Constrained transactional-execution facility")
+DEF_FEAT(LOCAL_TLB_CLEARING, "ltlbc", STFL, 51, "Local-TLB-clearing facility")
+DEF_FEAT(INTERLOCKED_ACCESS_2, "iacc2", STFL, 52, "Interlocked-access facility 2")
+DEF_FEAT(STFLE_53, "stfle53", STFL, 53, "Various facilities introduced with z13")
+DEF_FEAT(ENTROPY_ENC_COMP, "eec", STFL, 54, "Entropy encoding compression facility")
+DEF_FEAT(MSA_EXT_5, "msa5-base", STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)")
+DEF_FEAT(MISC_INSTRUCTION_EXT2, "minste2", STFL, 58, "Miscellaneous-instruction-extensions facility 2")
+DEF_FEAT(SEMAPHORE_ASSIST, "sema", STFL, 59, "Semaphore-assist facility")
+DEF_FEAT(TIME_SLICE_INSTRUMENTATION, "tsi", STFL, 60, "Time-slice Instrumentation facility")
+DEF_FEAT(MISC_INSTRUCTION_EXT3, "minste3", STFL, 61, "Miscellaneous-Instruction-Extensions Facility 3")
+DEF_FEAT(RUNTIME_INSTRUMENTATION, "ri", STFL, 64, "CPU runtime-instrumentation facility")
+DEF_FEAT(AP_QUEUE_INTERRUPT_CONTROL, "apqi", STFL, 65, "AP-Queue interruption facility")
+DEF_FEAT(ZPCI, "zpci", STFL, 69, "z/PCI facility")
+DEF_FEAT(ADAPTER_EVENT_NOTIFICATION, "aen", STFL, 71, "General-purpose-adapter-event-notification facility")
+DEF_FEAT(ADAPTER_INT_SUPPRESSION, "ais", STFL, 72, "General-purpose-adapter-interruption-suppression facility")
+DEF_FEAT(TRANSACTIONAL_EXE, "te", STFL, 73, "Transactional-execution facility")
+DEF_FEAT(STORE_HYPERVISOR_INFO, "sthyi", STFL, 74, "Store-hypervisor-information facility")
+DEF_FEAT(ACCESS_EXCEPTION_FS_INDICATION, "aefsi", STFL, 75, "Access-exception-fetch/store-indication facility")
+DEF_FEAT(MSA_EXT_3, "msa3-base", STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)")
+DEF_FEAT(MSA_EXT_4, "msa4-base", STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)")
+DEF_FEAT(EDAT_2, "edat2", STFL, 78, "Enhanced-DAT facility 2")
+DEF_FEAT(DFP_PACKED_CONVERSION, "dfppc", STFL, 80, "Decimal-floating-point packed-conversion facility")
+DEF_FEAT(PPA15, "ppa15", STFL, 81, "PPA15 is installed")
+DEF_FEAT(BPB, "bpb", STFL, 82, "Branch prediction blocking")
+DEF_FEAT(VECTOR, "vx", STFL, 129, "Vector facility")
+DEF_FEAT(INSTRUCTION_EXEC_PROT, "iep", STFL, 130, "Instruction-execution-protection facility")
+DEF_FEAT(SIDE_EFFECT_ACCESS_ESOP2, "sea_esop2", STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2")
+DEF_FEAT(GUARDED_STORAGE, "gs", STFL, 133, "Guarded-storage facility")
+DEF_FEAT(VECTOR_PACKED_DECIMAL, "vxpd", STFL, 134, "Vector packed decimal facility")
+DEF_FEAT(VECTOR_ENH, "vxeh", STFL, 135, "Vector enhancements facility")
+DEF_FEAT(MULTIPLE_EPOCH, "mepoch", STFL, 139, "Multiple-epoch facility")
+DEF_FEAT(EXTENDED_LENGTH_SCCB, "els", STFL, 140, "Extended-length SCCB facility")
+DEF_FEAT(TEST_PENDING_EXT_INTERRUPTION, "tpei", STFL, 144, "Test-pending-external-interruption facility")
+DEF_FEAT(INSERT_REFERENCE_BITS_MULT, "irbm", STFL, 145, "Insert-reference-bits-multiple facility")
+DEF_FEAT(MSA_EXT_8, "msa8-base", STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)")
+DEF_FEAT(CMM_NT, "cmmnt", STFL, 147, "CMM: ESSA-enhancement (no translate) facility")
+DEF_FEAT(VECTOR_ENH2, "vxeh2", STFL, 148, "Vector Enhancements facility 2")
+DEF_FEAT(ESORT_BASE, "esort-base", STFL, 150, "Enhanced-sort facility (excluding subfunctions)")
+DEF_FEAT(DEFLATE_BASE, "deflate-base", STFL, 151, "Deflate-conversion facility (excluding subfunctions)")
+DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH, "vxpdeh", STFL, 152, "Vector-Packed-Decimal-Enhancement Facility")
+DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)")
+DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility")
+DEF_FEAT(UNPACK, "unpack", STFL, 161, "Unpack facility")
+DEF_FEAT(NNPA, "nnpa", STFL, 165, "NNPA facility")
+DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH2, "vxpdeh2", STFL, 192, "Vector-Packed-Decimal-Enhancement facility 2")
+DEF_FEAT(BEAR_ENH, "beareh", STFL, 193, "BEAR-enhancement facility")
+DEF_FEAT(RDP, "rdp", STFL, 194, "Reset-DAT-protection facility")
+DEF_FEAT(PAI, "pai", STFL, 196, "Processor-Activity-Instrumentation facility")
+
+/* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */
+DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility")
+DEF_FEAT(ESOP, "esop", SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility")
+DEF_FEAT(HPMA2, "hpma2", SCLP_CONF_CHAR, 90, "Host page management assist 2 Facility") /* 91-2 */
+DEF_FEAT(SIE_KSS, "kss", SCLP_CONF_CHAR, 151, "SIE: Keyless-subset facility") /* 98-7 */
+
+/* Features exposed via SCLP SCCB Byte 116 - 119 (bit numbers relative to byte-116) */
+DEF_FEAT(SIE_64BSCAO, "64bscao", SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility")
+DEF_FEAT(SIE_CMMA, "cmma", SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist")
+DEF_FEAT(SIE_PFMFI, "pfmfi", SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility")
+DEF_FEAT(SIE_IBS, "ibs", SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility")
+
+/* Features exposed via SCLP SCCB Facilities byte 134 (bit numbers relative to byte-134) */
+DEF_FEAT(DIAG_318, "diag318", SCLP_FAC134, 0, "Control program name and version codes")
+
+/* Features exposed via SCLP CPU info. */
+DEF_FEAT(SIE_F2, "sief2", SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)")
+DEF_FEAT(SIE_SKEY, "skey", SCLP_CPU, 5, "SIE: Storage-key facility")
+DEF_FEAT(SIE_GPERE, "gpereh", SCLP_CPU, 10, "SIE: Guest-PER enhancement facility")
+DEF_FEAT(SIE_SIIF, "siif", SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility")
+DEF_FEAT(SIE_SIGPIF, "sigpif", SCLP_CPU, 12, "SIE: SIGP interpretation facility")
+DEF_FEAT(SIE_IB, "ib", SCLP_CPU, 42, "SIE: Intervention bypass facility")
+DEF_FEAT(SIE_CEI, "cei", SCLP_CPU, 43, "SIE: Conditional-external-interception facility")
+
+/*
+ * Features exposed via no feature bit (but e.g., instruction sensing)
+ * -> the feature bit number is irrelavant
+ */
+DEF_FEAT(DAT_ENH_2, "dateh2", MISC, 0, "DAT-enhancement facility 2")
+DEF_FEAT(CMM, "cmm", MISC, 0, "Collaborative-memory-management facility")
+DEF_FEAT(AP, "ap", MISC, 0, "AP instructions installed")
+
+/* Features exposed via the PLO instruction. */
+DEF_FEAT(PLO_CL, "plo-cl", PLO, 0, "PLO Compare and load (32 bit in general registers)")
+DEF_FEAT(PLO_CLG, "plo-clg", PLO, 1, "PLO Compare and load (64 bit in parameter list)")
+DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (32 bit in general registers)")
+DEF_FEAT(PLO_CLX, "plo-clx", PLO, 3, "PLO Compare and load (128 bit in parameter list)")
+DEF_FEAT(PLO_CS, "plo-cs", PLO, 4, "PLO Compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_CSG, "plo-csg", PLO, 5, "PLO Compare and swap (64 bit in parameter list)")
+DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_CSX, "plo-csx", PLO, 7, "PLO Compare and swap (128 bit in parameter list)")
+DEF_FEAT(PLO_DCS, "plo-dcs", PLO, 8, "PLO Double compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_DCSG, "plo-dcsg", PLO, 9, "PLO Double compare and swap (64 bit in parameter list)")
+DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (32 bit in general registers)")
+DEF_FEAT(PLO_DCSX, "plo-dcsx", PLO, 11, "PLO Double compare and swap (128 bit in parameter list)")
+DEF_FEAT(PLO_CSST, "plo-csst", PLO, 12, "PLO Compare and swap and store (32 bit in general registers)")
+DEF_FEAT(PLO_CSSTG, "plo-csstg", PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)")
+DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (32 bit in general registers)")
+DEF_FEAT(PLO_CSSTX, "plo-csstx", PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)")
+DEF_FEAT(PLO_CSDST, "plo-csdst", PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)")
+DEF_FEAT(PLO_CSDSTG, "plo-csdstg", PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)")
+DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)")
+DEF_FEAT(PLO_CSDSTX, "plo-csdstx", PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)")
+DEF_FEAT(PLO_CSTST, "plo-cstst", PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)")
+DEF_FEAT(PLO_CSTSTG, "plo-cststg", PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)")
+DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)")
+DEF_FEAT(PLO_CSTSTX, "plo-cststx", PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)")
+
+/* Features exposed via the PTFF instruction. */
+DEF_FEAT(PTFF_QTO, "ptff-qto", PTFF, 1, "PTFF Query TOD Offset")
+DEF_FEAT(PTFF_QSI, "ptff-qsi", PTFF, 2, "PTFF Query Steering Information")
+DEF_FEAT(PTFF_QPT, "ptff-qpc", PTFF, 3, "PTFF Query Physical Clock")
+DEF_FEAT(PTFF_QUI, "ptff-qui", PTFF, 4, "PTFF Query UTC Information")
+DEF_FEAT(PTFF_QTOU, "ptff-qtou", PTFF, 5, "PTFF Query TOD Offset User")
+DEF_FEAT(PTFF_QSIE, "ptff-qsie", PTFF, 10, "PTFF Query Steering Information Extended")
+DEF_FEAT(PTFF_QTOUE, "ptff-qtoue", PTFF, 13, "PTFF Query TOD Offset User Extended")
+DEF_FEAT(PTFF_STO, "ptff-sto", PTFF, 65, "PTFF Set TOD Offset")
+DEF_FEAT(PTFF_STOU, "ptff-stou", PTFF, 69, "PTFF Set TOD Offset User")
+DEF_FEAT(PTFF_STOE, "ptff-stoe", PTFF, 73, "PTFF Set TOD Offset Extended")
+DEF_FEAT(PTFF_STOUE, "ptff-stoue", PTFF, 77, "PTFF Set TOD Offset User Extended")
+
+/* Features exposed via the KMAC instruction. */
+DEF_FEAT(KMAC_DEA, "kmac-dea", KMAC, 1, "KMAC DEA")
+DEF_FEAT(KMAC_TDEA_128, "kmac-tdea-128", KMAC, 2, "KMAC TDEA-128")
+DEF_FEAT(KMAC_TDEA_192, "kmac-tdea-192", KMAC, 3, "KMAC TDEA-192")
+DEF_FEAT(KMAC_EDEA, "kmac-edea", KMAC, 9, "KMAC Encrypted-DEA")
+DEF_FEAT(KMAC_ETDEA_128, "kmac-etdea-128", KMAC, 10, "KMAC Encrypted-TDEA-128")
+DEF_FEAT(KMAC_ETDEA_192, "kmac-etdea-192", KMAC, 11, "KMAC Encrypted-TDEA-192")
+DEF_FEAT(KMAC_AES_128, "kmac-aes-128", KMAC, 18, "KMAC AES-128")
+DEF_FEAT(KMAC_AES_192, "kmac-aes-192", KMAC, 19, "KMAC AES-192")
+DEF_FEAT(KMAC_AES_256, "kmac-aes-256", KMAC, 20, "KMAC AES-256")
+DEF_FEAT(KMAC_EAES_128, "kmac-eaes-128", KMAC, 26, "KMAC Encrypted-AES-128")
+DEF_FEAT(KMAC_EAES_192, "kmac-eaes-192", KMAC, 27, "KMAC Encrypted-AES-192")
+DEF_FEAT(KMAC_EAES_256, "kmac-eaes-256", KMAC, 28, "KMAC Encrypted-AES-256")
+
+/* Features exposed via the KMC instruction. */
+DEF_FEAT(KMC_DEA, "kmc-dea", KMC, 1, "KMC DEA")
+DEF_FEAT(KMC_TDEA_128, "kmc-tdea-128", KMC, 2, "KMC TDEA-128")
+DEF_FEAT(KMC_TDEA_192, "kmc-tdea-192", KMC, 3, "KMC TDEA-192")
+DEF_FEAT(KMC_EDEA, "kmc-edea", KMC, 9, "KMC Encrypted-DEA")
+DEF_FEAT(KMC_ETDEA_128, "kmc-etdea-128", KMC, 10, "KMC Encrypted-TDEA-128")
+DEF_FEAT(KMC_ETDEA_192, "kmc-etdea-192", KMC, 11, "KMC Encrypted-TDEA-192")
+DEF_FEAT(KMC_AES_128, "kmc-aes-128", KMC, 18, "KMC AES-128")
+DEF_FEAT(KMC_AES_192, "kmc-aes-192", KMC, 19, "KMC AES-192")
+DEF_FEAT(KMC_AES_256, "kmc-aes-256", KMC, 20, "KMC AES-256")
+DEF_FEAT(KMC_EAES_128, "kmc-eaes-128", KMC, 26, "KMC Encrypted-AES-128")
+DEF_FEAT(KMC_EAES_192, "kmc-eaes-192", KMC, 27, "KMC Encrypted-AES-192")
+DEF_FEAT(KMC_EAES_256, "kmc-eaes-256", KMC, 28, "KMC Encrypted-AES-256")
+DEF_FEAT(KMC_PRNG, "kmc-prng", KMC, 67, "KMC PRNG")
+
+/* Features exposed via the KM instruction. */
+DEF_FEAT(KM_DEA, "km-dea", KM, 1, "KM DEA")
+DEF_FEAT(KM_TDEA_128, "km-tdea-128", KM, 2, "KM TDEA-128")
+DEF_FEAT(KM_TDEA_192, "km-tdea-192", KM, 3, "KM TDEA-192")
+DEF_FEAT(KM_EDEA, "km-edea", KM, 9, "KM Encrypted-DEA")
+DEF_FEAT(KM_ETDEA_128, "km-etdea-128", KM, 10, "KM Encrypted-TDEA-128")
+DEF_FEAT(KM_ETDEA_192, "km-etdea-192", KM, 11, "KM Encrypted-TDEA-192")
+DEF_FEAT(KM_AES_128, "km-aes-128", KM, 18, "KM AES-128")
+DEF_FEAT(KM_AES_192, "km-aes-192", KM, 19, "KM AES-192")
+DEF_FEAT(KM_AES_256, "km-aes-256", KM, 20, "KM AES-256")
+DEF_FEAT(KM_EAES_128, "km-eaes-128", KM, 26, "KM Encrypted-AES-128")
+DEF_FEAT(KM_EAES_192, "km-eaes-192", KM, 27, "KM Encrypted-AES-192")
+DEF_FEAT(KM_EAES_256, "km-eaes-256", KM, 28, "KM Encrypted-AES-256")
+DEF_FEAT(KM_XTS_AES_128, "km-xts-aes-128", KM, 50, "KM XTS-AES-128")
+DEF_FEAT(KM_XTS_AES_256, "km-xts-aes-256", KM, 52, "KM XTS-AES-256")
+DEF_FEAT(KM_XTS_EAES_128, "km-xts-eaes-128", KM, 58, "KM XTS-Encrypted-AES-128")
+DEF_FEAT(KM_XTS_EAES_256, "km-xts-eaes-256", KM, 60, "KM XTS-Encrypted-AES-256")
+
+/* Features exposed via the KIMD instruction. */
+DEF_FEAT(KIMD_SHA_1, "kimd-sha-1", KIMD, 1, "KIMD SHA-1")
+DEF_FEAT(KIMD_SHA_256, "kimd-sha-256", KIMD, 2, "KIMD SHA-256")
+DEF_FEAT(KIMD_SHA_512, "kimd-sha-512", KIMD, 3, "KIMD SHA-512")
+DEF_FEAT(KIMD_SHA3_224, "kimd-sha3-224", KIMD, 32, "KIMD SHA3-224")
+DEF_FEAT(KIMD_SHA3_256, "kimd-sha3-256", KIMD, 33, "KIMD SHA3-256")
+DEF_FEAT(KIMD_SHA3_384, "kimd-sha3-384", KIMD, 34, "KIMD SHA3-384")
+DEF_FEAT(KIMD_SHA3_512, "kimd-sha3-512", KIMD, 35, "KIMD SHA3-512")
+DEF_FEAT(KIMD_SHAKE_128, "kimd-shake-128", KIMD, 36, "KIMD SHAKE-128")
+DEF_FEAT(KIMD_SHAKE_256, "kimd-shake-256", KIMD, 37, "KIMD SHAKE-256")
+DEF_FEAT(KIMD_GHASH, "kimd-ghash", KIMD, 65, "KIMD GHASH")
+
+/* Features exposed via the KLMD instruction. */
+DEF_FEAT(KLMD_SHA_1, "klmd-sha-1", KLMD, 1, "KLMD SHA-1")
+DEF_FEAT(KLMD_SHA_256, "klmd-sha-256", KLMD, 2, "KLMD SHA-256")
+DEF_FEAT(KLMD_SHA_512, "klmd-sha-512", KLMD, 3, "KLMD SHA-512")
+DEF_FEAT(KLMD_SHA3_224, "klmd-sha3-224", KLMD, 32, "KLMD SHA3-224")
+DEF_FEAT(KLMD_SHA3_256, "klmd-sha3-256", KLMD, 33, "KLMD SHA3-256")
+DEF_FEAT(KLMD_SHA3_384, "klmd-sha3-384", KLMD, 34, "KLMD SHA3-384")
+DEF_FEAT(KLMD_SHA3_512, "klmd-sha3-512", KLMD, 35, "KLMD SHA3-512")
+DEF_FEAT(KLMD_SHAKE_128, "klmd-shake-128", KLMD, 36, "KLMD SHAKE-128")
+DEF_FEAT(KLMD_SHAKE_256, "klmd-shake-256", KLMD, 37, "KLMD SHAKE-256")
+
+/* Features exposed via the PCKMO instruction. */
+DEF_FEAT(PCKMO_EDEA, "pckmo-edea", PCKMO, 1, "PCKMO Encrypted-DEA-Key")
+DEF_FEAT(PCKMO_ETDEA_128, "pckmo-etdea-128", PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key")
+DEF_FEAT(PCKMO_ETDEA_256, "pckmo-etdea-192", PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key")
+DEF_FEAT(PCKMO_AES_128, "pckmo-aes-128", PCKMO, 18, "PCKMO Encrypted-AES-128-Key")
+DEF_FEAT(PCKMO_AES_192, "pckmo-aes-192", PCKMO, 19, "PCKMO Encrypted-AES-192-Key")
+DEF_FEAT(PCKMO_AES_256, "pckmo-aes-256", PCKMO, 20, "PCKMO Encrypted-AES-256-Key")
+DEF_FEAT(PCKMO_ECC_P256, "pckmo-ecc-p256", PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key")
+DEF_FEAT(PCKMO_ECC_P384, "pckmo-ecc-p384", PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key")
+DEF_FEAT(PCKMO_ECC_P521, "pckmo-ecc-p521", PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key")
+DEF_FEAT(PCKMO_ECC_ED25519, "pckmo-ecc-ed25519", PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key")
+DEF_FEAT(PCKMO_ECC_ED448, "pckmo-ecc-ed448", PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key")
+
+/* Features exposed via the KMCTR instruction. */
+DEF_FEAT(KMCTR_DEA, "kmctr-dea", KMCTR, 1, "KMCTR DEA")
+DEF_FEAT(KMCTR_TDEA_128, "kmctr-tdea-128", KMCTR, 2, "KMCTR TDEA-128")
+DEF_FEAT(KMCTR_TDEA_192, "kmctr-tdea-192", KMCTR, 3, "KMCTR TDEA-192")
+DEF_FEAT(KMCTR_EDEA, "kmctr-edea", KMCTR, 9, "KMCTR Encrypted-DEA")
+DEF_FEAT(KMCTR_ETDEA_128, "kmctr-etdea-128", KMCTR, 10, "KMCTR Encrypted-TDEA-128")
+DEF_FEAT(KMCTR_ETDEA_192, "kmctr-etdea-192", KMCTR, 11, "KMCTR Encrypted-TDEA-192")
+DEF_FEAT(KMCTR_AES_128, "kmctr-aes-128", KMCTR, 18, "KMCTR AES-128")
+DEF_FEAT(KMCTR_AES_192, "kmctr-aes-192", KMCTR, 19, "KMCTR AES-192")
+DEF_FEAT(KMCTR_AES_256, "kmctr-aes-256", KMCTR, 20, "KMCTR AES-256")
+DEF_FEAT(KMCTR_EAES_128, "kmctr-eaes-128", KMCTR, 26, "KMCTR Encrypted-AES-128")
+DEF_FEAT(KMCTR_EAES_192, "kmctr-eaes-192", KMCTR, 27, "KMCTR Encrypted-AES-192")
+DEF_FEAT(KMCTR_EAES_256, "kmctr-eaes-256", KMCTR, 28, "KMCTR Encrypted-AES-256")
+
+/* Features exposed via the KMF instruction. */
+DEF_FEAT(KMF_DEA, "kmf-dea", KMF, 1, "KMF DEA")
+DEF_FEAT(KMF_TDEA_128, "kmf-tdea-128", KMF, 2, "KMF TDEA-128")
+DEF_FEAT(KMF_TDEA_192, "kmf-tdea-192", KMF, 3, "KMF TDEA-192")
+DEF_FEAT(KMF_EDEA, "kmf-edea", KMF, 9, "KMF Encrypted-DEA")
+DEF_FEAT(KMF_ETDEA_128, "kmf-etdea-128", KMF, 10, "KMF Encrypted-TDEA-128")
+DEF_FEAT(KMF_ETDEA_192, "kmf-etdea-192", KMF, 11, "KMF Encrypted-TDEA-192")
+DEF_FEAT(KMF_AES_128, "kmf-aes-128", KMF, 18, "KMF AES-128")
+DEF_FEAT(KMF_AES_192, "kmf-aes-192", KMF, 19, "KMF AES-192")
+DEF_FEAT(KMF_AES_256, "kmf-aes-256", KMF, 20, "KMF AES-256")
+DEF_FEAT(KMF_EAES_128, "kmf-eaes-128", KMF, 26, "KMF Encrypted-AES-128")
+DEF_FEAT(KMF_EAES_192, "kmf-eaes-192", KMF, 27, "KMF Encrypted-AES-192")
+DEF_FEAT(KMF_EAES_256, "kmf-eaes-256", KMF, 28, "KMF Encrypted-AES-256")
+
+/* Features exposed via the KMO instruction. */
+DEF_FEAT(KMO_DEA, "kmo-dea", KMO, 1, "KMO DEA")
+DEF_FEAT(KMO_TDEA_128, "kmo-tdea-128", KMO, 2, "KMO TDEA-128")
+DEF_FEAT(KMO_TDEA_192, "kmo-tdea-192", KMO, 3, "KMO TDEA-192")
+DEF_FEAT(KMO_EDEA, "kmo-edea", KMO, 9, "KMO Encrypted-DEA")
+DEF_FEAT(KMO_ETDEA_128, "kmo-etdea-128", KMO, 10, "KMO Encrypted-TDEA-128")
+DEF_FEAT(KMO_ETDEA_192, "kmo-etdea-192", KMO, 11, "KMO Encrypted-TDEA-192")
+DEF_FEAT(KMO_AES_128, "kmo-aes-128", KMO, 18, "KMO AES-128")
+DEF_FEAT(KMO_AES_192, "kmo-aes-192", KMO, 19, "KMO AES-192")
+DEF_FEAT(KMO_AES_256, "kmo-aes-256", KMO, 20, "KMO AES-256")
+DEF_FEAT(KMO_EAES_128, "kmo-eaes-128", KMO, 26, "KMO Encrypted-AES-128")
+DEF_FEAT(KMO_EAES_192, "kmo-eaes-192", KMO, 27, "KMO Encrypted-AES-192")
+DEF_FEAT(KMO_EAES_256, "kmo-eaes-256", KMO, 28, "KMO Encrypted-AES-256")
+
+/* Features exposed via the PCC instruction. */
+DEF_FEAT(PCC_CMAC_DEA, "pcc-cmac-dea", PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA")
+DEF_FEAT(PCC_CMAC_TDEA_128, "pcc-cmac-tdea-128", PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128")
+DEF_FEAT(PCC_CMAC_TDEA_192, "pcc-cmac-tdea-192", PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192")
+DEF_FEAT(PCC_CMAC_ETDEA_128, "pcc-cmac-edea", PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA")
+DEF_FEAT(PCC_CMAC_ETDEA_192, "pcc-cmac-etdea-128", PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128")
+DEF_FEAT(PCC_CMAC_TDEA, "pcc-cmac-etdea-192", PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192")
+DEF_FEAT(PCC_CMAC_AES_128, "pcc-cmac-aes-128", PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128")
+DEF_FEAT(PCC_CMAC_AES_192, "pcc-cmac-aes-192", PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192")
+DEF_FEAT(PCC_CMAC_AES_256, "pcc-cmac-aes-256", PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256")
+DEF_FEAT(PCC_CMAC_EAES_128, "pcc-cmac-eaes-128", PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128")
+DEF_FEAT(PCC_CMAC_EAES_192, "pcc-cmac-eaes-192", PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192")
+DEF_FEAT(PCC_CMAC_EAES_256, "pcc-cmac-eaes-256", PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256")
+DEF_FEAT(PCC_XTS_AES_128, "pcc-xts-aes-128", PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128")
+DEF_FEAT(PCC_XTS_AES_256, "pcc-xts-aes-256", PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256")
+DEF_FEAT(PCC_XTS_EAES_128, "pcc-xts-eaes-128", PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128")
+DEF_FEAT(PCC_XTS_EAES_256, "pcc-xts-eaes-256", PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256")
+DEF_FEAT(PCC_SCALAR_MULT_P256, "pcc-scalar-mult-p256", PCC, 64, "PCC Scalar-Multiply-P256")
+DEF_FEAT(PCC_SCALAR_MULT_P384, "pcc-scalar-mult-p384", PCC, 65, "PCC Scalar-Multiply-P384")
+DEF_FEAT(PCC_SCALAR_MULT_P512, "pcc-scalar-mult-p521", PCC, 66, "PCC Scalar-Multiply-P521")
+DEF_FEAT(PCC_SCALAR_MULT_ED25519, "pcc-scalar-mult-ed25519", PCC, 72, "PCC Scalar-Multiply-Ed25519")
+DEF_FEAT(PCC_SCALAR_MULT_ED448, "pcc-scalar-mult-ed448", PCC, 73, "PCC Scalar-Multiply-Ed448")
+DEF_FEAT(PCC_SCALAR_MULT_X25519, "pcc-scalar-mult-x25519", PCC, 80, "PCC Scalar-Multiply-X25519")
+DEF_FEAT(PCC_SCALAR_MULT_X448, "pcc-scalar-mult-x448", PCC, 81, "PCC Scalar-Multiply-X448")
+
+/* Features exposed via the PPNO/PRNO instruction. */
+DEF_FEAT(PPNO_SHA_512_DRNG, "ppno-sha-512-drng", PPNO, 3, "PPNO SHA-512-DRNG")
+DEF_FEAT(PRNO_TRNG_QRTCR, "prno-trng-qrtcr", PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio")
+DEF_FEAT(PRNO_TRNG, "prno-trng", PPNO, 114, "PRNO TRNG")
+
+/* Features exposed via the KMA instruction. */
+DEF_FEAT(KMA_GCM_AES_128, "kma-gcm-aes-128", KMA, 18, "KMA GCM-AES-128")
+DEF_FEAT(KMA_GCM_AES_192, "kma-gcm-aes-192", KMA, 19, "KMA GCM-AES-192")
+DEF_FEAT(KMA_GCM_AES_256, "kma-gcm-aes-256", KMA, 20, "KMA GCM-AES-256")
+DEF_FEAT(KMA_GCM_EAES_128, "kma-gcm-eaes-128", KMA, 26, "KMA GCM-Encrypted-AES-128")
+DEF_FEAT(KMA_GCM_EAES_192, "kma-gcm-eaes-192", KMA, 27, "KMA GCM-Encrypted-AES-192")
+DEF_FEAT(KMA_GCM_EAES_256, "kma-gcm-eaes-256", KMA, 28, "KMA GCM-Encrypted-AES-256")
+
+/* Features exposed via the KDSA instruction. */
+DEF_FEAT(KDSA_ECDSA_VERIFY_P256, "kdsa-ecdsa-verify-p256", KDSA, 1, "KDSA ECDSA-Verify-P256")
+DEF_FEAT(KDSA_ECDSA_VERIFY_P384, "kdsa-ecdsa-verify-p384", KDSA, 2, "KDSA ECDSA-Verify-P384")
+DEF_FEAT(KDSA_ECDSA_VERIFY_P512, "kdsa-ecdsa-verify-p521", KDSA, 3, "KDSA ECDSA-Verify-P521")
+DEF_FEAT(KDSA_ECDSA_SIGN_P256, "kdsa-ecdsa-sign-p256", KDSA, 9, "KDSA ECDSA-Sign-P256")
+DEF_FEAT(KDSA_ECDSA_SIGN_P384, "kdsa-ecdsa-sign-p384", KDSA, 10, "KDSA ECDSA-Sign-P384")
+DEF_FEAT(KDSA_ECDSA_SIGN_P512, "kdsa-ecdsa-sign-p521", KDSA, 11, "KDSA ECDSA-Sign-P521")
+DEF_FEAT(KDSA_EECDSA_SIGN_P256, "kdsa-eecdsa-sign-p256", KDSA, 17, "KDSA Encrypted-ECDSA-Sign-P256")
+DEF_FEAT(KDSA_EECDSA_SIGN_P384, "kdsa-eecdsa-sign-p384", KDSA, 18, "KDSA Encrypted-ECDSA-Sign-P384")
+DEF_FEAT(KDSA_EECDSA_SIGN_P512, "kdsa-eecdsa-sign-p521", KDSA, 19, "KDSA Encrypted-ECDSA-Sign-P521")
+DEF_FEAT(KDSA_EDDSA_VERIFY_ED25519, "kdsa-eddsa-verify-ed25519", KDSA, 32, "KDSA EdDSA-Verify-Ed25519")
+DEF_FEAT(KDSA_EDDSA_VERIFY_ED448, "kdsa-eddsa-verify-ed448", KDSA, 36, "KDSA EdDSA-Verify-Ed448")
+DEF_FEAT(KDSA_EDDSA_SIGN_ED25519, "kdsa-eddsa-sign-ed25519", KDSA, 40, "KDSA EdDSA-Sign-Ed25519")
+DEF_FEAT(KDSA_EDDSA_SIGN_ED448, "kdsa-eddsa-sign-ed448", KDSA, 44, "KDSA EdDSA-Sign-Ed448")
+DEF_FEAT(KDSA_EEDDSA_SIGN_ED25519, "kdsa-eeddsa-sign-ed25519", KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519")
+DEF_FEAT(KDSA_EEDDSA_SIGN_ED448, "kdsa-eeddsa-sign-ed448", KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448")
+
+/* Features exposed via the SORTL instruction. */
+DEF_FEAT(SORTL_SFLR, "sortl-sflr", SORTL, 1, "SORTL SFLR")
+DEF_FEAT(SORTL_SVLR, "sortl-svlr", SORTL, 2, "SORTL SVLR")
+DEF_FEAT(SORTL_32, "sortl-32", SORTL, 130, "SORTL 32 input lists")
+DEF_FEAT(SORTL_128, "sortl-128", SORTL, 132, "SORTL 128 input lists")
+DEF_FEAT(SORTL_F0, "sortl-f0", SORTL, 192, "SORTL format 0 parameter-block")
+
+/* Features exposed via the DEFLATE instruction. */
+DEF_FEAT(DEFLATE_GHDT, "dfltcc-gdht", DFLTCC, 1, "DFLTCC GDHT")
+DEF_FEAT(DEFLATE_CMPR, "dfltcc-cmpr", DFLTCC, 2, "DFLTCC CMPR")
+DEF_FEAT(DEFLATE_XPND, "dfltcc-xpnd", DFLTCC, 4, "DFLTCC XPND")
+DEF_FEAT(DEFLATE_F0, "dfltcc-f0", DFLTCC, 192, "DFLTCC format 0 parameter-block")
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
new file mode 100644
index 000000000..11e06cc51
--- /dev/null
+++ b/target/s390x/cpu_models.c
@@ -0,0 +1,1021 @@
+/*
+ * CPU models for s390x
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qemu/module.h"
+#include "qemu/qemu-print.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/sysemu.h"
+#endif
+#include "hw/s390x/pv.h"
+
+#define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \
+ { \
+ .name = _name, \
+ .type = _type, \
+ .gen = _gen, \
+ .ec_ga = _ec_ga, \
+ .mha_pow = _mha_pow, \
+ .hmfai = _hmfai, \
+ .desc = _desc, \
+ .base_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _BASE }, \
+ .default_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _DEFAULT }, \
+ .full_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _FULL }, \
+ }
+
+/*
+ * CPU definition list in order of release. Up to generation 14 base features
+ * of a following release have been a superset of the previous release. With
+ * generation 15 one base feature and one optional feature have been deprecated.
+ */
+static S390CPUDef s390_cpu_defs[] = {
+ CPUDEF_INIT(0x2064, 7, 1, 38, 0x00000000U, "z900", "IBM zSeries 900 GA1"),
+ CPUDEF_INIT(0x2064, 7, 2, 38, 0x00000000U, "z900.2", "IBM zSeries 900 GA2"),
+ CPUDEF_INIT(0x2064, 7, 3, 38, 0x00000000U, "z900.3", "IBM zSeries 900 GA3"),
+ CPUDEF_INIT(0x2066, 7, 3, 38, 0x00000000U, "z800", "IBM zSeries 800 GA1"),
+ CPUDEF_INIT(0x2084, 8, 1, 38, 0x00000000U, "z990", "IBM zSeries 990 GA1"),
+ CPUDEF_INIT(0x2084, 8, 2, 38, 0x00000000U, "z990.2", "IBM zSeries 990 GA2"),
+ CPUDEF_INIT(0x2084, 8, 3, 38, 0x00000000U, "z990.3", "IBM zSeries 990 GA3"),
+ CPUDEF_INIT(0x2086, 8, 3, 38, 0x00000000U, "z890", "IBM zSeries 880 GA1"),
+ CPUDEF_INIT(0x2084, 8, 4, 38, 0x00000000U, "z990.4", "IBM zSeries 990 GA4"),
+ CPUDEF_INIT(0x2086, 8, 4, 38, 0x00000000U, "z890.2", "IBM zSeries 880 GA2"),
+ CPUDEF_INIT(0x2084, 8, 5, 38, 0x00000000U, "z990.5", "IBM zSeries 990 GA5"),
+ CPUDEF_INIT(0x2086, 8, 5, 38, 0x00000000U, "z890.3", "IBM zSeries 880 GA3"),
+ CPUDEF_INIT(0x2094, 9, 1, 40, 0x00000000U, "z9EC", "IBM System z9 EC GA1"),
+ CPUDEF_INIT(0x2094, 9, 2, 40, 0x00000000U, "z9EC.2", "IBM System z9 EC GA2"),
+ CPUDEF_INIT(0x2096, 9, 2, 40, 0x00000000U, "z9BC", "IBM System z9 BC GA1"),
+ CPUDEF_INIT(0x2094, 9, 3, 40, 0x00000000U, "z9EC.3", "IBM System z9 EC GA3"),
+ CPUDEF_INIT(0x2096, 9, 3, 40, 0x00000000U, "z9BC.2", "IBM System z9 BC GA2"),
+ CPUDEF_INIT(0x2097, 10, 1, 43, 0x00000000U, "z10EC", "IBM System z10 EC GA1"),
+ CPUDEF_INIT(0x2097, 10, 2, 43, 0x00000000U, "z10EC.2", "IBM System z10 EC GA2"),
+ CPUDEF_INIT(0x2098, 10, 2, 43, 0x00000000U, "z10BC", "IBM System z10 BC GA1"),
+ CPUDEF_INIT(0x2097, 10, 3, 43, 0x00000000U, "z10EC.3", "IBM System z10 EC GA3"),
+ CPUDEF_INIT(0x2098, 10, 3, 43, 0x00000000U, "z10BC.2", "IBM System z10 BC GA2"),
+ CPUDEF_INIT(0x2817, 11, 1, 44, 0x08000000U, "z196", "IBM zEnterprise 196 GA1"),
+ CPUDEF_INIT(0x2817, 11, 2, 44, 0x08000000U, "z196.2", "IBM zEnterprise 196 GA2"),
+ CPUDEF_INIT(0x2818, 11, 2, 44, 0x08000000U, "z114", "IBM zEnterprise 114 GA1"),
+ CPUDEF_INIT(0x2827, 12, 1, 44, 0x08000000U, "zEC12", "IBM zEnterprise EC12 GA1"),
+ CPUDEF_INIT(0x2827, 12, 2, 44, 0x08000000U, "zEC12.2", "IBM zEnterprise EC12 GA2"),
+ CPUDEF_INIT(0x2828, 12, 2, 44, 0x08000000U, "zBC12", "IBM zEnterprise BC12 GA1"),
+ CPUDEF_INIT(0x2964, 13, 1, 47, 0x08000000U, "z13", "IBM z13 GA1"),
+ CPUDEF_INIT(0x2964, 13, 2, 47, 0x08000000U, "z13.2", "IBM z13 GA2"),
+ CPUDEF_INIT(0x2965, 13, 2, 47, 0x08000000U, "z13s", "IBM z13s GA1"),
+ CPUDEF_INIT(0x3906, 14, 1, 47, 0x08000000U, "z14", "IBM z14 GA1"),
+ CPUDEF_INIT(0x3906, 14, 2, 47, 0x08000000U, "z14.2", "IBM z14 GA2"),
+ CPUDEF_INIT(0x3907, 14, 1, 47, 0x08000000U, "z14ZR1", "IBM z14 Model ZR1 GA1"),
+ CPUDEF_INIT(0x8561, 15, 1, 47, 0x08000000U, "gen15a", "IBM z15 T01 GA1"),
+ CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM z15 T02 GA1"),
+ CPUDEF_INIT(0x3931, 16, 1, 47, 0x08000000U, "gen16a", "IBM 3931 GA1"),
+ CPUDEF_INIT(0x3932, 16, 1, 47, 0x08000000U, "gen16b", "IBM 3932 GA1"),
+};
+
+#define QEMU_MAX_CPU_TYPE 0x3906
+#define QEMU_MAX_CPU_GEN 14
+#define QEMU_MAX_CPU_EC_GA 2
+static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX };
+static S390FeatBitmap qemu_max_cpu_feat;
+
+/* features part of a base model but not relevant for finding a base model */
+S390FeatBitmap ignored_base_feat;
+
+void s390_cpudef_featoff(uint8_t gen, uint8_t ec_ga, S390Feat feat)
+{
+ const S390CPUDef *def;
+
+ def = s390_find_cpu_def(0, gen, ec_ga, NULL);
+ clear_bit(feat, (unsigned long *)&def->default_feat);
+}
+
+void s390_cpudef_featoff_greater(uint8_t gen, uint8_t ec_ga, S390Feat feat)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ const S390CPUDef *def = &s390_cpu_defs[i];
+
+ if (def->gen < gen) {
+ continue;
+ }
+ if (def->gen == gen && def->ec_ga < ec_ga) {
+ continue;
+ }
+
+ clear_bit(feat, (unsigned long *)&def->default_feat);
+ }
+}
+
+void s390_cpudef_group_featoff_greater(uint8_t gen, uint8_t ec_ga,
+ S390FeatGroup group)
+{
+ const S390FeatGroupDef *group_def = s390_feat_group_def(group);
+ S390FeatBitmap group_def_off;
+ int i;
+
+ bitmap_complement(group_def_off, group_def->feat, S390_FEAT_MAX);
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ const S390CPUDef *cpu_def = &s390_cpu_defs[i];
+
+ if (cpu_def->gen < gen) {
+ continue;
+ }
+ if (cpu_def->gen == gen && cpu_def->ec_ga < ec_ga) {
+ continue;
+ }
+
+ bitmap_and((unsigned long *)&cpu_def->default_feat,
+ cpu_def->default_feat, group_def_off, S390_FEAT_MAX);
+ }
+}
+
+uint32_t s390_get_hmfai(void)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return 0;
+ }
+ return cpu->model->def->hmfai;
+}
+
+uint8_t s390_get_mha_pow(void)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return 0;
+ }
+ return cpu->model->def->mha_pow;
+}
+
+uint32_t s390_get_ibc_val(void)
+{
+ uint16_t unblocked_ibc, lowest_ibc;
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return 0;
+ }
+ unblocked_ibc = s390_ibc_from_cpu_model(cpu->model);
+ lowest_ibc = cpu->model->lowest_ibc;
+ /* the lowest_ibc always has to be <= unblocked_ibc */
+ if (!lowest_ibc || lowest_ibc > unblocked_ibc) {
+ return 0;
+ }
+ return ((uint32_t) lowest_ibc << 16) | unblocked_ibc;
+}
+
+void s390_get_feat_block(S390FeatType type, uint8_t *data)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return;
+ }
+ s390_fill_feat_block(cpu->model->features, type, data);
+}
+
+bool s390_has_feat(S390Feat feat)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ if (feat == S390_FEAT_VECTOR) {
+ return kvm_check_extension(kvm_state,
+ KVM_CAP_S390_VECTOR_REGISTERS);
+ }
+ if (feat == S390_FEAT_RUNTIME_INSTRUMENTATION) {
+ return kvm_s390_get_ri();
+ }
+ if (feat == S390_FEAT_MSA_EXT_3) {
+ return true;
+ }
+ }
+#endif
+ if (feat == S390_FEAT_ZPCI) {
+ return true;
+ }
+ return 0;
+ }
+
+ if (s390_is_pv()) {
+ switch (feat) {
+ case S390_FEAT_DIAG_318:
+ case S390_FEAT_HPMA2:
+ case S390_FEAT_SIE_F2:
+ case S390_FEAT_SIE_SKEY:
+ case S390_FEAT_SIE_GPERE:
+ case S390_FEAT_SIE_SIIF:
+ case S390_FEAT_SIE_SIGPIF:
+ case S390_FEAT_SIE_IB:
+ case S390_FEAT_SIE_CEI:
+ case S390_FEAT_SIE_KSS:
+ case S390_FEAT_SIE_GSLS:
+ case S390_FEAT_SIE_64BSCAO:
+ case S390_FEAT_SIE_CMMA:
+ case S390_FEAT_SIE_PFMFI:
+ case S390_FEAT_SIE_IBS:
+ return false;
+ break;
+ default:
+ break;
+ }
+ }
+ return test_bit(feat, cpu->model->features);
+}
+
+uint8_t s390_get_gen_for_cpu_type(uint16_t type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ if (s390_cpu_defs[i].type == type) {
+ return s390_cpu_defs[i].gen;
+ }
+ }
+ return 0;
+}
+
+const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ S390FeatBitmap features)
+{
+ const S390CPUDef *last_compatible = NULL;
+ const S390CPUDef *matching_cpu_type = NULL;
+ int i;
+
+ if (!gen) {
+ ec_ga = 0;
+ }
+ if (!gen && type) {
+ gen = s390_get_gen_for_cpu_type(type);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ const S390CPUDef *def = &s390_cpu_defs[i];
+ S390FeatBitmap missing;
+
+ /* don't even try newer generations if we know the generation */
+ if (gen) {
+ if (def->gen > gen) {
+ break;
+ } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) {
+ break;
+ }
+ }
+
+ if (features) {
+ /* see if the model satisfies the minimum features */
+ bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX);
+ /*
+ * Ignore certain features that are in the base model, but not
+ * relevant for the search (esp. MSA subfunctions).
+ */
+ bitmap_andnot(missing, missing, ignored_base_feat, S390_FEAT_MAX);
+ if (!bitmap_empty(missing, S390_FEAT_MAX)) {
+ break;
+ }
+ }
+
+ /* stop the search if we found the exact model */
+ if (def->type == type && def->ec_ga == ec_ga) {
+ return def;
+ }
+ /* remember if we've at least seen one with the same cpu type */
+ if (def->type == type) {
+ matching_cpu_type = def;
+ }
+ last_compatible = def;
+ }
+ /* prefer the model with the same cpu type, esp. don't take the BC for EC */
+ if (matching_cpu_type) {
+ return matching_cpu_type;
+ }
+ return last_compatible;
+}
+
+static void s390_print_cpu_model_list_entry(gpointer data, gpointer user_data)
+{
+ const S390CPUClass *scc = S390_CPU_CLASS((ObjectClass *)data);
+ char *name = g_strdup(object_class_get_name((ObjectClass *)data));
+ const char *details = "";
+
+ if (scc->is_static) {
+ details = "(static, migration-safe)";
+ } else if (scc->is_migration_safe) {
+ details = "(migration-safe)";
+ }
+
+ /* strip off the -s390x-cpu */
+ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
+ qemu_printf("s390 %-15s %-35s %s\n", name, scc->desc, details);
+ g_free(name);
+}
+
+static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ const S390CPUClass *cc_a = S390_CPU_CLASS((ObjectClass *)a);
+ const S390CPUClass *cc_b = S390_CPU_CLASS((ObjectClass *)b);
+ const char *name_a = object_class_get_name((ObjectClass *)a);
+ const char *name_b = object_class_get_name((ObjectClass *)b);
+
+ /*
+ * Move qemu, host and max to the top of the list, qemu first, host second,
+ * max third.
+ */
+ if (name_a[0] == 'q') {
+ return -1;
+ } else if (name_b[0] == 'q') {
+ return 1;
+ } else if (name_a[0] == 'h') {
+ return -1;
+ } else if (name_b[0] == 'h') {
+ return 1;
+ } else if (name_a[0] == 'm') {
+ return -1;
+ } else if (name_b[0] == 'm') {
+ return 1;
+ }
+
+ /* keep the same order we have in our table (sorted by release date) */
+ if (cc_a->cpu_def != cc_b->cpu_def) {
+ return cc_a->cpu_def - cc_b->cpu_def;
+ }
+
+ /* exact same definition - list base model first */
+ return cc_a->is_static ? -1 : 1;
+}
+
+void s390_cpu_list(void)
+{
+ S390FeatGroup group;
+ S390Feat feat;
+ GSList *list;
+
+ list = object_class_get_list(TYPE_S390_CPU, false);
+ list = g_slist_sort(list, s390_cpu_list_compare);
+ g_slist_foreach(list, s390_print_cpu_model_list_entry, NULL);
+ g_slist_free(list);
+
+ qemu_printf("\nRecognized feature flags:\n");
+ for (feat = 0; feat < S390_FEAT_MAX; feat++) {
+ const S390FeatDef *def = s390_feat_def(feat);
+
+ qemu_printf("%-20s %s\n", def->name, def->desc);
+ }
+
+ qemu_printf("\nRecognized feature groups:\n");
+ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+
+ qemu_printf("%-20s %s\n", def->name, def->desc);
+ }
+}
+
+static void check_consistency(const S390CPUModel *model)
+{
+ static int dep[][2] = {
+ { S390_FEAT_IPTE_RANGE, S390_FEAT_DAT_ENH },
+ { S390_FEAT_IDTE_SEGMENT, S390_FEAT_DAT_ENH },
+ { S390_FEAT_IDTE_REGION, S390_FEAT_DAT_ENH },
+ { S390_FEAT_IDTE_REGION, S390_FEAT_IDTE_SEGMENT },
+ { S390_FEAT_LOCAL_TLB_CLEARING, S390_FEAT_DAT_ENH},
+ { S390_FEAT_LONG_DISPLACEMENT_FAST, S390_FEAT_LONG_DISPLACEMENT },
+ { S390_FEAT_DFP_FAST, S390_FEAT_DFP },
+ { S390_FEAT_TRANSACTIONAL_EXE, S390_FEAT_STFLE_49 },
+ { S390_FEAT_EDAT_2, S390_FEAT_EDAT},
+ { S390_FEAT_MSA_EXT_5, S390_FEAT_KIMD_SHA_512 },
+ { S390_FEAT_MSA_EXT_5, S390_FEAT_KLMD_SHA_512 },
+ { S390_FEAT_MSA_EXT_4, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_SIE_CMMA, S390_FEAT_CMM },
+ { S390_FEAT_SIE_CMMA, S390_FEAT_SIE_GSLS },
+ { S390_FEAT_SIE_PFMFI, S390_FEAT_EDAT },
+ { S390_FEAT_MSA_EXT_8, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_MSA_EXT_9, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_MSA_EXT_9, S390_FEAT_MSA_EXT_4 },
+ { S390_FEAT_MULTIPLE_EPOCH, S390_FEAT_TOD_CLOCK_STEERING },
+ { S390_FEAT_VECTOR_PACKED_DECIMAL, S390_FEAT_VECTOR },
+ { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, S390_FEAT_VECTOR_PACKED_DECIMAL },
+ { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH },
+ { S390_FEAT_VECTOR_ENH, S390_FEAT_VECTOR },
+ { S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2 },
+ { S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_ESOP },
+ { S390_FEAT_CMM_NT, S390_FEAT_CMM },
+ { S390_FEAT_GUARDED_STORAGE, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2 },
+ { S390_FEAT_MULTIPLE_EPOCH, S390_FEAT_STORE_CLOCK_FAST },
+ { S390_FEAT_MULTIPLE_EPOCH, S390_FEAT_TOD_CLOCK_STEERING },
+ { S390_FEAT_SEMAPHORE_ASSIST, S390_FEAT_STFLE_49 },
+ { S390_FEAT_KIMD_SHA3_224, S390_FEAT_MSA },
+ { S390_FEAT_KIMD_SHA3_256, S390_FEAT_MSA },
+ { S390_FEAT_KIMD_SHA3_384, S390_FEAT_MSA },
+ { S390_FEAT_KIMD_SHA3_512, S390_FEAT_MSA },
+ { S390_FEAT_KIMD_SHAKE_128, S390_FEAT_MSA },
+ { S390_FEAT_KIMD_SHAKE_256, S390_FEAT_MSA },
+ { S390_FEAT_KLMD_SHA3_224, S390_FEAT_MSA },
+ { S390_FEAT_KLMD_SHA3_256, S390_FEAT_MSA },
+ { S390_FEAT_KLMD_SHA3_384, S390_FEAT_MSA },
+ { S390_FEAT_KLMD_SHA3_512, S390_FEAT_MSA },
+ { S390_FEAT_KLMD_SHAKE_128, S390_FEAT_MSA },
+ { S390_FEAT_KLMD_SHAKE_256, S390_FEAT_MSA },
+ { S390_FEAT_PRNO_TRNG_QRTCR, S390_FEAT_MSA_EXT_5 },
+ { S390_FEAT_PRNO_TRNG, S390_FEAT_MSA_EXT_5 },
+ { S390_FEAT_SIE_KSS, S390_FEAT_SIE_F2 },
+ { S390_FEAT_AP_QUERY_CONFIG_INFO, S390_FEAT_AP },
+ { S390_FEAT_AP_FACILITIES_TEST, S390_FEAT_AP },
+ { S390_FEAT_PTFF_QSIE, S390_FEAT_MULTIPLE_EPOCH },
+ { S390_FEAT_PTFF_QTOUE, S390_FEAT_MULTIPLE_EPOCH },
+ { S390_FEAT_PTFF_STOE, S390_FEAT_MULTIPLE_EPOCH },
+ { S390_FEAT_PTFF_STOUE, S390_FEAT_MULTIPLE_EPOCH },
+ { S390_FEAT_AP_QUEUE_INTERRUPT_CONTROL, S390_FEAT_AP },
+ { S390_FEAT_DIAG_318, S390_FEAT_EXTENDED_LENGTH_SCCB },
+ { S390_FEAT_NNPA, S390_FEAT_VECTOR },
+ { S390_FEAT_RDP, S390_FEAT_LOCAL_TLB_CLEARING },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dep); i++) {
+ if (test_bit(dep[i][0], model->features) &&
+ !test_bit(dep[i][1], model->features)) {
+ warn_report("\'%s\' requires \'%s\'.",
+ s390_feat_def(dep[i][0])->name,
+ s390_feat_def(dep[i][1])->name);
+ }
+ }
+}
+
+static void error_prepend_missing_feat(const char *name, void *opaque)
+{
+ error_prepend((Error **) opaque, "%s ", name);
+}
+
+static void check_compatibility(const S390CPUModel *max_model,
+ const S390CPUModel *model, Error **errp)
+{
+ S390FeatBitmap missing;
+
+ if (model->def->gen > max_model->def->gen) {
+ error_setg(errp, "Selected CPU generation is too new. Maximum "
+ "supported model in the configuration: \'%s\'",
+ max_model->def->name);
+ return;
+ } else if (model->def->gen == max_model->def->gen &&
+ model->def->ec_ga > max_model->def->ec_ga) {
+ error_setg(errp, "Selected CPU GA level is too new. Maximum "
+ "supported model in the configuration: \'%s\'",
+ max_model->def->name);
+ return;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (only_migratable && test_bit(S390_FEAT_UNPACK, model->features)) {
+ error_setg(errp, "The unpack facility is not compatible with "
+ "the --only-migratable option. You must remove either "
+ "the 'unpack' facility or the --only-migratable option");
+ return;
+ }
+#endif
+
+ /* detect the missing features to properly report them */
+ bitmap_andnot(missing, model->features, max_model->features, S390_FEAT_MAX);
+ if (bitmap_empty(missing, S390_FEAT_MAX)) {
+ return;
+ }
+
+ error_setg(errp, " ");
+ s390_feat_bitmap_to_ascii(missing, errp, error_prepend_missing_feat);
+ error_prepend(errp, "Some features requested in the CPU model are not "
+ "available in the configuration: ");
+}
+
+S390CPUModel *get_max_cpu_model(Error **errp)
+{
+ Error *err = NULL;
+ static S390CPUModel max_model;
+ static bool cached;
+
+ if (cached) {
+ return &max_model;
+ }
+
+ if (kvm_enabled()) {
+ kvm_s390_get_host_cpu_model(&max_model, &err);
+ } else {
+ max_model.def = s390_find_cpu_def(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN,
+ QEMU_MAX_CPU_EC_GA, NULL);
+ bitmap_copy(max_model.features, qemu_max_cpu_feat, S390_FEAT_MAX);
+ }
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+ cached = true;
+ return &max_model;
+}
+
+void s390_realize_cpu_model(CPUState *cs, Error **errp)
+{
+ Error *err = NULL;
+ S390CPUClass *xcc = S390_CPU_GET_CLASS(cs);
+ S390CPU *cpu = S390_CPU(cs);
+ const S390CPUModel *max_model;
+
+ if (xcc->kvm_required && !kvm_enabled()) {
+ error_setg(errp, "CPU definition requires KVM");
+ return;
+ }
+
+ if (!cpu->model) {
+ /* no host model support -> perform compatibility stuff */
+ apply_cpu_model(NULL, errp);
+ return;
+ }
+
+ max_model = get_max_cpu_model(errp);
+ if (!max_model) {
+ error_prepend(errp, "CPU models are not available: ");
+ return;
+ }
+
+ /* copy over properties that can vary */
+ cpu->model->lowest_ibc = max_model->lowest_ibc;
+ cpu->model->cpu_id = max_model->cpu_id;
+ cpu->model->cpu_id_format = max_model->cpu_id_format;
+ cpu->model->cpu_ver = max_model->cpu_ver;
+
+ check_consistency(cpu->model);
+ check_compatibility(max_model, cpu->model, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ apply_cpu_model(cpu->model, errp);
+
+#if !defined(CONFIG_USER_ONLY)
+ cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model);
+ if (tcg_enabled()) {
+ /* basic mode, write the cpu address into the first 4 bit of the ID */
+ cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.core_id);
+ }
+#endif
+}
+
+static void get_feature(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390Feat feat = (S390Feat) (uintptr_t) opaque;
+ S390CPU *cpu = S390_CPU(obj);
+ bool value;
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be queried.");
+ return;
+ }
+
+ value = test_bit(feat, cpu->model->features);
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void set_feature(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390Feat feat = (S390Feat) (uintptr_t) opaque;
+ DeviceState *dev = DEVICE(obj);
+ S390CPU *cpu = S390_CPU(obj);
+ bool value;
+
+ if (dev->realized) {
+ error_setg(errp, "Attempt to set property '%s' on '%s' after "
+ "it was realized", name, object_get_typename(obj));
+ return;
+ } else if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be changed.");
+ return;
+ }
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+ if (value) {
+ if (!test_bit(feat, cpu->model->def->full_feat)) {
+ error_setg(errp, "Feature '%s' is not available for CPU model '%s',"
+ " it was introduced with later models.",
+ name, cpu->model->def->name);
+ return;
+ }
+ set_bit(feat, cpu->model->features);
+ } else {
+ clear_bit(feat, cpu->model->features);
+ }
+}
+
+static void get_feature_group(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390FeatGroup group = (S390FeatGroup) (uintptr_t) opaque;
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+ S390CPU *cpu = S390_CPU(obj);
+ S390FeatBitmap tmp;
+ bool value;
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be queried.");
+ return;
+ }
+
+ /* a group is enabled if all features are enabled */
+ bitmap_and(tmp, cpu->model->features, def->feat, S390_FEAT_MAX);
+ value = bitmap_equal(tmp, def->feat, S390_FEAT_MAX);
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void set_feature_group(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390FeatGroup group = (S390FeatGroup) (uintptr_t) opaque;
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+ DeviceState *dev = DEVICE(obj);
+ S390CPU *cpu = S390_CPU(obj);
+ bool value;
+
+ if (dev->realized) {
+ error_setg(errp, "Attempt to set property '%s' on '%s' after "
+ "it was realized", name, object_get_typename(obj));
+ return;
+ } else if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be changed.");
+ return;
+ }
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+ if (value) {
+ /* groups are added in one shot, so an intersect is sufficient */
+ if (!bitmap_intersects(def->feat, cpu->model->def->full_feat,
+ S390_FEAT_MAX)) {
+ error_setg(errp, "Group '%s' is not available for CPU model '%s',"
+ " it was introduced with later models.",
+ name, cpu->model->def->name);
+ return;
+ }
+ bitmap_or(cpu->model->features, cpu->model->features, def->feat,
+ S390_FEAT_MAX);
+ } else {
+ bitmap_andnot(cpu->model->features, cpu->model->features, def->feat,
+ S390_FEAT_MAX);
+ }
+}
+
+static void s390_cpu_model_initfn(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+ S390CPUClass *xcc = S390_CPU_GET_CLASS(cpu);
+
+ cpu->model = g_malloc0(sizeof(*cpu->model));
+ /* copy the model, so we can modify it */
+ cpu->model->def = xcc->cpu_def;
+ if (xcc->is_static) {
+ /* base model - features will never change */
+ bitmap_copy(cpu->model->features, cpu->model->def->base_feat,
+ S390_FEAT_MAX);
+ } else {
+ /* latest model - features can change */
+ bitmap_copy(cpu->model->features,
+ cpu->model->def->default_feat, S390_FEAT_MAX);
+ }
+}
+
+static S390CPUDef s390_qemu_cpu_def;
+static S390CPUModel s390_qemu_cpu_model;
+
+/* Set the qemu CPU model (on machine initialization). Must not be called
+ * once CPUs have been created.
+ */
+void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ const S390FeatInit feat_init)
+{
+ const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL);
+
+ g_assert(def);
+ g_assert(QTAILQ_EMPTY_RCU(&cpus));
+
+ /* TCG emulates some features that can usually not be enabled with
+ * the emulated machine generation. Make sure they can be enabled
+ * when using the QEMU model by adding them to full_feat. We have
+ * to copy the definition to do that.
+ */
+ memcpy(&s390_qemu_cpu_def, def, sizeof(s390_qemu_cpu_def));
+ bitmap_or(s390_qemu_cpu_def.full_feat, s390_qemu_cpu_def.full_feat,
+ qemu_max_cpu_feat, S390_FEAT_MAX);
+
+ /* build the CPU model */
+ s390_qemu_cpu_model.def = &s390_qemu_cpu_def;
+ bitmap_zero(s390_qemu_cpu_model.features, S390_FEAT_MAX);
+ s390_init_feat_bitmap(feat_init, s390_qemu_cpu_model.features);
+}
+
+static void s390_qemu_cpu_model_initfn(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+
+ cpu->model = g_malloc0(sizeof(*cpu->model));
+ /* copy the CPU model so we can modify it */
+ memcpy(cpu->model, &s390_qemu_cpu_model, sizeof(*cpu->model));
+}
+
+static void s390_max_cpu_model_initfn(Object *obj)
+{
+ const S390CPUModel *max_model;
+ S390CPU *cpu = S390_CPU(obj);
+ Error *local_err = NULL;
+
+ if (kvm_enabled() && !kvm_s390_cpu_models_supported()) {
+ /* "max" and "host" always work, even without CPU model support */
+ return;
+ }
+
+ max_model = get_max_cpu_model(&local_err);
+ if (local_err) {
+ /* we expect errors only under KVM, when actually querying the kernel */
+ g_assert(kvm_enabled());
+ error_report_err(local_err);
+ /* fallback to unsupported CPU models */
+ return;
+ }
+
+ cpu->model = g_new(S390CPUModel, 1);
+ /* copy the CPU model so we can modify it */
+ memcpy(cpu->model, max_model, sizeof(*cpu->model));
+}
+
+static void s390_cpu_model_finalize(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+
+ g_free(cpu->model);
+ cpu->model = NULL;
+}
+
+static bool get_is_migration_safe(Object *obj, Error **errp)
+{
+ return S390_CPU_GET_CLASS(obj)->is_migration_safe;
+}
+
+static bool get_is_static(Object *obj, Error **errp)
+{
+ return S390_CPU_GET_CLASS(obj)->is_static;
+}
+
+static char *get_description(Object *obj, Error **errp)
+{
+ return g_strdup(S390_CPU_GET_CLASS(obj)->desc);
+}
+
+void s390_cpu_model_class_register_props(ObjectClass *oc)
+{
+ S390FeatGroup group;
+ S390Feat feat;
+
+ object_class_property_add_bool(oc, "migration-safe", get_is_migration_safe,
+ NULL);
+ object_class_property_add_bool(oc, "static", get_is_static,
+ NULL);
+ object_class_property_add_str(oc, "description", get_description, NULL);
+
+ for (feat = 0; feat < S390_FEAT_MAX; feat++) {
+ const S390FeatDef *def = s390_feat_def(feat);
+ object_class_property_add(oc, def->name, "bool", get_feature,
+ set_feature, NULL, (void *) feat);
+ object_class_property_set_description(oc, def->name, def->desc);
+ }
+ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+ object_class_property_add(oc, def->name, "bool", get_feature_group,
+ set_feature_group, NULL, (void *) group);
+ object_class_property_set_description(oc, def->name, def->desc);
+ }
+}
+
+#ifdef CONFIG_KVM
+static void s390_host_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ xcc->kvm_required = true;
+ xcc->desc = "KVM only: All recognized features";
+}
+#endif
+
+static void s390_base_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ /* all base models are migration safe */
+ xcc->cpu_def = (const S390CPUDef *) data;
+ xcc->is_migration_safe = true;
+ xcc->is_static = true;
+ xcc->desc = xcc->cpu_def->desc;
+}
+
+static void s390_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ /* model that can change between QEMU versions */
+ xcc->cpu_def = (const S390CPUDef *) data;
+ xcc->is_migration_safe = true;
+ xcc->desc = xcc->cpu_def->desc;
+}
+
+static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ xcc->is_migration_safe = true;
+ xcc->desc = g_strdup_printf("QEMU Virtual CPU version %s",
+ qemu_hw_version());
+}
+
+static void s390_max_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ /*
+ * The "max" model is neither static nor migration safe. Under KVM
+ * it represents the "host" model. Under TCG it represents some kind of
+ * "qemu" CPU model without compat handling and maybe with some additional
+ * CPU features that are not yet unlocked in the "qemu" model.
+ */
+ xcc->desc =
+ "Enables all features supported by the accelerator in the current host";
+}
+
+/* Generate type name for a cpu model. Caller has to free the string. */
+static char *s390_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(S390_CPU_TYPE_NAME("%s"), model_name);
+}
+
+/* Generate type name for a base cpu model. Caller has to free the string. */
+static char *s390_base_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(S390_CPU_TYPE_NAME("%s-base"), model_name);
+}
+
+ObjectClass *s390_cpu_class_by_name(const char *name)
+{
+ char *typename = s390_cpu_type_name(name);
+ ObjectClass *oc;
+
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+static const TypeInfo qemu_s390_cpu_type_info = {
+ .name = S390_CPU_TYPE_NAME("qemu"),
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_qemu_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_qemu_cpu_model_class_init,
+};
+
+static const TypeInfo max_s390_cpu_type_info = {
+ .name = S390_CPU_TYPE_NAME("max"),
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_max_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_max_cpu_model_class_init,
+};
+
+#ifdef CONFIG_KVM
+static const TypeInfo host_s390_cpu_type_info = {
+ .name = S390_CPU_TYPE_NAME("host"),
+ .parent = S390_CPU_TYPE_NAME("max"),
+ .class_init = s390_host_cpu_model_class_init,
+};
+#endif
+
+static void init_ignored_base_feat(void)
+{
+ static const int feats[] = {
+ /* MSA subfunctions that could not be available on certain machines */
+ S390_FEAT_KMAC_DEA,
+ S390_FEAT_KMAC_TDEA_128,
+ S390_FEAT_KMAC_TDEA_192,
+ S390_FEAT_KMC_DEA,
+ S390_FEAT_KMC_TDEA_128,
+ S390_FEAT_KMC_TDEA_192,
+ S390_FEAT_KM_DEA,
+ S390_FEAT_KM_TDEA_128,
+ S390_FEAT_KM_TDEA_192,
+ S390_FEAT_KIMD_SHA_1,
+ S390_FEAT_KLMD_SHA_1,
+ /* CSSKE is deprecated on newer generations */
+ S390_FEAT_CONDITIONAL_SSKE,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(feats); i++) {
+ set_bit(feats[i], ignored_base_feat);
+ }
+}
+
+static void register_types(void)
+{
+ static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST };
+ int i;
+
+ init_ignored_base_feat();
+
+ /* init all bitmaps from gnerated data initially */
+ s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat);
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ s390_init_feat_bitmap(s390_cpu_defs[i].base_init,
+ s390_cpu_defs[i].base_feat);
+ s390_init_feat_bitmap(s390_cpu_defs[i].default_init,
+ s390_cpu_defs[i].default_feat);
+ s390_init_feat_bitmap(s390_cpu_defs[i].full_init,
+ s390_cpu_defs[i].full_feat);
+ }
+
+ /* initialize the qemu model with latest definition */
+ s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN,
+ QEMU_MAX_CPU_EC_GA, qemu_latest_init);
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name);
+ TypeInfo ti_base = {
+ .name = base_name,
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_base_cpu_model_class_init,
+ .class_data = (void *) &s390_cpu_defs[i],
+ };
+ char *name = s390_cpu_type_name(s390_cpu_defs[i].name);
+ TypeInfo ti = {
+ .name = name,
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_cpu_model_class_init,
+ .class_data = (void *) &s390_cpu_defs[i],
+ };
+
+ type_register_static(&ti_base);
+ type_register_static(&ti);
+ g_free(base_name);
+ g_free(name);
+ }
+
+ type_register_static(&qemu_s390_cpu_type_info);
+ type_register_static(&max_s390_cpu_type_info);
+#ifdef CONFIG_KVM
+ type_register_static(&host_s390_cpu_type_info);
+#endif
+}
+
+type_init(register_types)
diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h
new file mode 100644
index 000000000..74d1f87e4
--- /dev/null
+++ b/target/s390x/cpu_models.h
@@ -0,0 +1,128 @@
+/*
+ * CPU models for s390x
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef TARGET_S390X_CPU_MODELS_H
+#define TARGET_S390X_CPU_MODELS_H
+
+#include "cpu_features.h"
+#include "target/s390x/gen-features.h"
+#include "hw/core/cpu.h"
+
+/* static CPU definition */
+struct S390CPUDef {
+ const char *name; /* name exposed to the user */
+ const char *desc; /* description exposed to the user */
+ uint8_t gen; /* hw generation identification */
+ uint16_t type; /* cpu type identification */
+ uint8_t ec_ga; /* EC GA version (on which also the BC is based) */
+ uint8_t mha_pow; /* Maximum Host Adress Power, mha = 2^pow-1 */
+ uint32_t hmfai; /* hypervisor-managed facilities */
+ /* base/min features, must never be changed between QEMU versions */
+ S390FeatBitmap base_feat;
+ /* used to init base_feat from generated data */
+ S390FeatInit base_init;
+ /* deafault features, QEMU version specific */
+ S390FeatBitmap default_feat;
+ /* used to init default_feat from generated data */
+ S390FeatInit default_init;
+ /* max allowed features, QEMU version specific */
+ S390FeatBitmap full_feat;
+ /* used to init full_feat from generated data */
+ S390FeatInit full_init;
+};
+
+/* CPU model based on a CPU definition */
+struct S390CPUModel {
+ const S390CPUDef *def;
+ S390FeatBitmap features;
+ /* values copied from the "host" model, can change during migration */
+ uint16_t lowest_ibc; /* lowest IBC that the hardware supports */
+ uint32_t cpu_id; /* CPU id */
+ uint8_t cpu_id_format; /* CPU id format bit */
+ uint8_t cpu_ver; /* CPU version, usually "ff" for kvm */
+};
+
+/*
+ * CPU ID
+ *
+ * bits 0-7: Zeroes (ff for kvm)
+ * bits 8-31: CPU ID (serial number)
+ * bits 32-47: Machine type
+ * bit 48: CPU ID format
+ * bits 49-63: Zeroes
+ */
+#define cpuid_type(x) (((x) >> 16) & 0xffff)
+#define cpuid_id(x) (((x) >> 32) & 0xffffff)
+#define cpuid_ver(x) (((x) >> 56) & 0xff)
+#define cpuid_format(x) (((x) >> 15) & 0x1)
+
+#define lowest_ibc(x) (((uint32_t)(x) >> 16) & 0xfff)
+#define unblocked_ibc(x) ((uint32_t)(x) & 0xfff)
+#define has_ibc(x) (lowest_ibc(x) != 0)
+
+#define S390_GEN_Z10 0xa
+#define ibc_gen(x) (x == 0 ? 0 : ((x >> 4) + S390_GEN_Z10))
+#define ibc_ec_ga(x) (x & 0xf)
+
+void s390_cpudef_featoff(uint8_t gen, uint8_t ec_ga, S390Feat feat);
+void s390_cpudef_featoff_greater(uint8_t gen, uint8_t ec_ga, S390Feat feat);
+void s390_cpudef_group_featoff_greater(uint8_t gen, uint8_t ec_ga,
+ S390FeatGroup group);
+uint32_t s390_get_hmfai(void);
+uint8_t s390_get_mha_pow(void);
+uint32_t s390_get_ibc_val(void);
+static inline uint16_t s390_ibc_from_cpu_model(const S390CPUModel *model)
+{
+ uint16_t ibc = 0;
+
+ if (model->def->gen >= S390_GEN_Z10) {
+ ibc = ((model->def->gen - S390_GEN_Z10) << 4) + model->def->ec_ga;
+ }
+ return ibc;
+}
+void s390_get_feat_block(S390FeatType type, uint8_t *data);
+bool s390_has_feat(S390Feat feat);
+uint8_t s390_get_gen_for_cpu_type(uint16_t type);
+static inline bool s390_known_cpu_type(uint16_t type)
+{
+ return s390_get_gen_for_cpu_type(type) != 0;
+}
+static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model)
+{
+ return ((uint64_t)model->cpu_ver << 56) |
+ ((uint64_t)model->cpu_id << 32) |
+ ((uint64_t)model->def->type << 16) |
+ (model->def->gen == 7 ? 0 : (uint64_t)model->cpu_id_format << 15);
+}
+S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ S390FeatBitmap features);
+
+#ifdef CONFIG_KVM
+bool kvm_s390_cpu_models_supported(void);
+void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp);
+void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp);
+#else
+static inline void kvm_s390_get_host_cpu_model(S390CPUModel *model,
+ Error **errp)
+{
+}
+static inline void kvm_s390_apply_cpu_model(const S390CPUModel *model,
+ Error **errp)
+{
+}
+static inline bool kvm_s390_cpu_models_supported(void)
+{
+ return false;
+}
+#endif
+
+#endif /* TARGET_S390X_CPU_MODELS_H */
diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c
new file mode 100644
index 000000000..05c3ccaaf
--- /dev/null
+++ b/target/s390x/cpu_models_sysemu.c
@@ -0,0 +1,426 @@
+/*
+ * CPU models for s390x - System Emulation-only
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qapi/qmp/qerror.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-machine-target.h"
+
+static void list_add_feat(const char *name, void *opaque);
+
+static void check_unavailable_features(const S390CPUModel *max_model,
+ const S390CPUModel *model,
+ strList **unavailable)
+{
+ S390FeatBitmap missing;
+
+ /* check general model compatibility */
+ if (max_model->def->gen < model->def->gen ||
+ (max_model->def->gen == model->def->gen &&
+ max_model->def->ec_ga < model->def->ec_ga)) {
+ list_add_feat("type", unavailable);
+ }
+
+ /* detect missing features if any to properly report them */
+ bitmap_andnot(missing, model->features, max_model->features,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(missing, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat);
+ }
+}
+
+struct CpuDefinitionInfoListData {
+ CpuDefinitionInfoList *list;
+ S390CPUModel *model;
+};
+
+static void create_cpu_model_list(ObjectClass *klass, void *opaque)
+{
+ struct CpuDefinitionInfoListData *cpu_list_data = opaque;
+ CpuDefinitionInfoList **cpu_list = &cpu_list_data->list;
+ CpuDefinitionInfo *info;
+ char *name = g_strdup(object_class_get_name(klass));
+ S390CPUClass *scc = S390_CPU_CLASS(klass);
+
+ /* strip off the -s390x-cpu */
+ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
+ info = g_new0(CpuDefinitionInfo, 1);
+ info->name = name;
+ info->has_migration_safe = true;
+ info->migration_safe = scc->is_migration_safe;
+ info->q_static = scc->is_static;
+ info->q_typename = g_strdup(object_class_get_name(klass));
+ /* check for unavailable features */
+ if (cpu_list_data->model) {
+ Object *obj;
+ S390CPU *sc;
+ obj = object_new_with_class(klass);
+ sc = S390_CPU(obj);
+ if (sc->model) {
+ info->has_unavailable_features = true;
+ check_unavailable_features(cpu_list_data->model, sc->model,
+ &info->unavailable_features);
+ }
+ object_unref(obj);
+ }
+
+ QAPI_LIST_PREPEND(*cpu_list, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ struct CpuDefinitionInfoListData list_data = {
+ .list = NULL,
+ };
+
+ list_data.model = get_max_cpu_model(NULL);
+
+ object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false,
+ &list_data);
+
+ return list_data.list;
+}
+
+static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info,
+ Error **errp)
+{
+ Error *err = NULL;
+ const QDict *qdict = NULL;
+ const QDictEntry *e;
+ Visitor *visitor;
+ ObjectClass *oc;
+ S390CPU *cpu;
+ Object *obj;
+
+ if (info->props) {
+ qdict = qobject_to(QDict, info->props);
+ if (!qdict) {
+ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict");
+ return;
+ }
+ }
+
+ oc = cpu_class_by_name(TYPE_S390_CPU, info->name);
+ if (!oc) {
+ error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name);
+ return;
+ }
+ if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) {
+ error_setg(errp, "The CPU definition '%s' requires KVM", info->name);
+ return;
+ }
+ obj = object_new_with_class(oc);
+ cpu = S390_CPU(obj);
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "it cannot be used.");
+ object_unref(obj);
+ return;
+ }
+
+ if (qdict) {
+ visitor = qobject_input_visitor_new(info->props);
+ if (!visit_start_struct(visitor, NULL, NULL, 0, errp)) {
+ visit_free(visitor);
+ object_unref(obj);
+ return;
+ }
+ for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
+ if (!object_property_set(obj, e->key, visitor, &err)) {
+ break;
+ }
+ }
+ if (!err) {
+ visit_check_struct(visitor, &err);
+ }
+ visit_end_struct(visitor, NULL);
+ visit_free(visitor);
+ if (err) {
+ error_propagate(errp, err);
+ object_unref(obj);
+ return;
+ }
+ }
+
+ /* copy the model and throw the cpu away */
+ memcpy(model, cpu->model, sizeof(*model));
+ object_unref(obj);
+}
+
+static void qdict_add_disabled_feat(const char *name, void *opaque)
+{
+ qdict_put_bool(opaque, name, false);
+}
+
+static void qdict_add_enabled_feat(const char *name, void *opaque)
+{
+ qdict_put_bool(opaque, name, true);
+}
+
+/* convert S390CPUDef into a static CpuModelInfo */
+static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model,
+ bool delta_changes)
+{
+ QDict *qdict = qdict_new();
+ S390FeatBitmap bitmap;
+
+ /* always fallback to the static base model */
+ info->name = g_strdup_printf("%s-base", model->def->name);
+
+ if (delta_changes) {
+ /* features deleted from the base feature set */
+ bitmap_andnot(bitmap, model->def->base_feat, model->features,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
+ }
+
+ /* features added to the base feature set */
+ bitmap_andnot(bitmap, model->features, model->def->base_feat,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat);
+ }
+ } else {
+ /* expand all features */
+ s390_feat_bitmap_to_ascii(model->features, qdict,
+ qdict_add_enabled_feat);
+ bitmap_complement(bitmap, model->features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
+ }
+
+ if (!qdict_size(qdict)) {
+ qobject_unref(qdict);
+ } else {
+ info->props = QOBJECT(qdict);
+ info->has_props = true;
+ }
+}
+
+CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ Error *err = NULL;
+ CpuModelExpansionInfo *expansion_info = NULL;
+ S390CPUModel s390_model;
+ bool delta_changes = false;
+
+ /* convert it to our internal representation */
+ cpu_model_from_info(&s390_model, model, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+
+ if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) {
+ delta_changes = true;
+ } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
+ error_setg(errp, "The requested expansion type is not supported.");
+ return NULL;
+ }
+
+ /* convert it back to a static representation */
+ expansion_info = g_new0(CpuModelExpansionInfo, 1);
+ expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
+ cpu_info_from_model(expansion_info->model, &s390_model, delta_changes);
+ return expansion_info;
+}
+
+static void list_add_feat(const char *name, void *opaque)
+{
+ strList **last = (strList **) opaque;
+
+ QAPI_LIST_PREPEND(*last, g_strdup(name));
+}
+
+CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ Error *err = NULL;
+ CpuModelCompareResult feat_result, gen_result;
+ CpuModelCompareInfo *compare_info;
+ S390FeatBitmap missing, added;
+ S390CPUModel modela, modelb;
+
+ /* convert both models to our internal representation */
+ cpu_model_from_info(&modela, infoa, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+ cpu_model_from_info(&modelb, infob, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+ compare_info = g_new0(CpuModelCompareInfo, 1);
+
+ /* check the cpu generation and ga level */
+ if (modela.def->gen == modelb.def->gen) {
+ if (modela.def->ec_ga == modelb.def->ec_ga) {
+ /* ec and corresponding bc are identical */
+ gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
+ } else if (modela.def->ec_ga < modelb.def->ec_ga) {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ }
+ } else if (modela.def->gen < modelb.def->gen) {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ }
+ if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ /* both models cannot be made identical */
+ list_add_feat("type", &compare_info->responsible_properties);
+ }
+
+ /* check the feature set */
+ if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
+ } else {
+ bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(missing,
+ &compare_info->responsible_properties,
+ list_add_feat);
+ bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties,
+ list_add_feat);
+ if (bitmap_empty(missing, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else if (bitmap_empty(added, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ } else {
+ feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
+ }
+ }
+
+ /* combine the results */
+ if (gen_result == feat_result) {
+ compare_info->result = gen_result;
+ } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ compare_info->result = gen_result;
+ } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ compare_info->result = feat_result;
+ } else {
+ compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
+ }
+ return compare_info;
+}
+
+CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ Error *err = NULL;
+ CpuModelBaselineInfo *baseline_info;
+ S390CPUModel modela, modelb, model;
+ uint16_t cpu_type;
+ uint8_t max_gen_ga;
+ uint8_t max_gen;
+
+ /* convert both models to our internal representation */
+ cpu_model_from_info(&modela, infoa, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+
+ cpu_model_from_info(&modelb, infob, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return NULL;
+ }
+
+ /* features both models support */
+ bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX);
+
+ /* detect the maximum model not regarding features */
+ if (modela.def->gen == modelb.def->gen) {
+ if (modela.def->type == modelb.def->type) {
+ cpu_type = modela.def->type;
+ } else {
+ cpu_type = 0;
+ }
+ max_gen = modela.def->gen;
+ max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga);
+ } else if (modela.def->gen > modelb.def->gen) {
+ cpu_type = modelb.def->type;
+ max_gen = modelb.def->gen;
+ max_gen_ga = modelb.def->ec_ga;
+ } else {
+ cpu_type = modela.def->type;
+ max_gen = modela.def->gen;
+ max_gen_ga = modela.def->ec_ga;
+ }
+
+ model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga,
+ model.features);
+
+ /* models without early base features (esan3) are bad */
+ if (!model.def) {
+ error_setg(errp, "No compatible CPU model could be created as"
+ " important base features are disabled");
+ return NULL;
+ }
+
+ /* strip off features not part of the max model */
+ bitmap_and(model.features, model.features, model.def->full_feat,
+ S390_FEAT_MAX);
+
+ baseline_info = g_new0(CpuModelBaselineInfo, 1);
+ baseline_info->model = g_malloc0(sizeof(*baseline_info->model));
+ cpu_info_from_model(baseline_info->model, &model, true);
+ return baseline_info;
+}
+
+void apply_cpu_model(const S390CPUModel *model, Error **errp)
+{
+ Error *err = NULL;
+ static S390CPUModel applied_model;
+ static bool applied;
+
+ /*
+ * We have the same model for all VCPUs. KVM can only be configured before
+ * any VCPUs are defined in KVM.
+ */
+ if (applied) {
+ if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) {
+ error_setg(errp, "Mixed CPU models are not supported on s390x.");
+ }
+ return;
+ }
+
+ if (kvm_enabled()) {
+ kvm_s390_apply_cpu_model(model, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+
+ applied = true;
+ if (model) {
+ applied_model = *model;
+ }
+}
diff --git a/target/s390x/cpu_models_user.c b/target/s390x/cpu_models_user.c
new file mode 100644
index 000000000..df24d12d9
--- /dev/null
+++ b/target/s390x/cpu_models_user.c
@@ -0,0 +1,20 @@
+/*
+ * CPU models for s390x - User-mode
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "qapi/error.h"
+
+void apply_cpu_model(const S390CPUModel *model, Error **errp)
+{
+}
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
new file mode 100644
index 000000000..76b01dcd6
--- /dev/null
+++ b/target/s390x/diag.c
@@ -0,0 +1,185 @@
+/*
+ * S390x DIAG instruction helper functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "hw/watchdog/wdt_diag288.h"
+#include "sysemu/cpus.h"
+#include "hw/s390x/ipl.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+#include "hw/s390x/pv.h"
+#include "sysemu/kvm.h"
+#include "kvm/kvm_s390x.h"
+
+int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
+{
+ uint64_t func = env->regs[r1];
+ uint64_t timeout = env->regs[r1 + 1];
+ uint64_t action = env->regs[r3];
+ Object *obj;
+ DIAG288State *diag288;
+ DIAG288Class *diag288_class;
+
+ if (r1 % 2 || action != 0) {
+ return -1;
+ }
+
+ /* Timeout must be more than 15 seconds except for timer deletion */
+ if (func != WDT_DIAG288_CANCEL && timeout < 15) {
+ return -1;
+ }
+
+ obj = object_resolve_path_type("", TYPE_WDT_DIAG288, NULL);
+ if (!obj) {
+ return -1;
+ }
+
+ diag288 = DIAG288(obj);
+ diag288_class = DIAG288_GET_CLASS(diag288);
+ return diag288_class->handle_timer(diag288, func, timeout);
+}
+
+static int diag308_parm_check(CPUS390XState *env, uint64_t r1, uint64_t addr,
+ uintptr_t ra, bool write)
+{
+ /* Handled by the Ultravisor */
+ if (s390_is_pv()) {
+ return 0;
+ }
+ if ((r1 & 1) || (addr & ~TARGET_PAGE_MASK)) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return -1;
+ }
+ if (!address_space_access_valid(&address_space_memory, addr,
+ sizeof(IplParameterBlock), write,
+ MEMTXATTRS_UNSPECIFIED)) {
+ s390_program_interrupt(env, PGM_ADDRESSING, ra);
+ return -1;
+ }
+ return 0;
+}
+
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
+{
+ bool valid;
+ CPUState *cs = env_cpu(env);
+ S390CPU *cpu = S390_CPU(cs);
+ uint64_t addr = env->regs[r1];
+ uint64_t subcode = env->regs[r3];
+ IplParameterBlock *iplb;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ s390_program_interrupt(env, PGM_PRIVILEGED, ra);
+ return;
+ }
+
+ if (subcode & ~0x0ffffULL) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+
+ if (subcode >= DIAG308_PV_SET && !s390_has_feat(S390_FEAT_UNPACK)) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+
+ switch (subcode) {
+ case DIAG308_RESET_MOD_CLR:
+ s390_ipl_reset_request(cs, S390_RESET_MODIFIED_CLEAR);
+ break;
+ case DIAG308_RESET_LOAD_NORM:
+ s390_ipl_reset_request(cs, S390_RESET_LOAD_NORMAL);
+ break;
+ case DIAG308_LOAD_CLEAR:
+ /* Well we still lack the clearing bit... */
+ s390_ipl_reset_request(cs, S390_RESET_REIPL);
+ break;
+ case DIAG308_SET:
+ case DIAG308_PV_SET:
+ if (diag308_parm_check(env, r1, addr, ra, false)) {
+ return;
+ }
+ iplb = g_new0(IplParameterBlock, 1);
+ if (!s390_is_pv()) {
+ cpu_physical_memory_read(addr, iplb, sizeof(iplb->len));
+ } else {
+ s390_cpu_pv_mem_read(cpu, 0, iplb, sizeof(iplb->len));
+ }
+
+ if (!iplb_valid_len(iplb)) {
+ env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+ goto out;
+ }
+
+ if (!s390_is_pv()) {
+ cpu_physical_memory_read(addr, iplb, be32_to_cpu(iplb->len));
+ } else {
+ s390_cpu_pv_mem_read(cpu, 0, iplb, be32_to_cpu(iplb->len));
+ }
+
+ valid = subcode == DIAG308_PV_SET ? iplb_valid_pv(iplb) : iplb_valid(iplb);
+ if (!valid) {
+ env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+ goto out;
+ }
+
+ s390_ipl_update_diag308(iplb);
+ env->regs[r1 + 1] = DIAG_308_RC_OK;
+out:
+ g_free(iplb);
+ return;
+ case DIAG308_STORE:
+ case DIAG308_PV_STORE:
+ if (diag308_parm_check(env, r1, addr, ra, true)) {
+ return;
+ }
+ if (subcode == DIAG308_PV_STORE) {
+ iplb = s390_ipl_get_iplb_pv();
+ } else {
+ iplb = s390_ipl_get_iplb();
+ }
+ if (!iplb) {
+ env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
+ return;
+ }
+
+ if (!s390_is_pv()) {
+ cpu_physical_memory_write(addr, iplb, be32_to_cpu(iplb->len));
+ } else {
+ s390_cpu_pv_mem_write(cpu, 0, iplb, be32_to_cpu(iplb->len));
+ }
+ env->regs[r1 + 1] = DIAG_308_RC_OK;
+ return;
+ case DIAG308_PV_START:
+ iplb = s390_ipl_get_iplb_pv();
+ if (!iplb) {
+ env->regs[r1 + 1] = DIAG_308_RC_NO_PV_CONF;
+ return;
+ }
+
+ if (kvm_enabled() && kvm_s390_get_hpage_1m()) {
+ error_report("Protected VMs can currently not be backed with "
+ "huge pages");
+ env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV;
+ return;
+ }
+
+ s390_ipl_reset_request(cs, S390_RESET_PV);
+ break;
+ default:
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ break;
+ }
+}
diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c
new file mode 100644
index 000000000..a5d69d0e0
--- /dev/null
+++ b/target/s390x/gdbstub.c
@@ -0,0 +1,329 @@
+/*
+ * s390x gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/bitops.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/tcg.h"
+
+int s390_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ switch (n) {
+ case S390_PSWM_REGNUM:
+ return gdb_get_regl(mem_buf, s390_cpu_get_psw_mask(env));
+ case S390_PSWA_REGNUM:
+ return gdb_get_regl(mem_buf, env->psw.addr);
+ case S390_R0_REGNUM ... S390_R15_REGNUM:
+ return gdb_get_regl(mem_buf, env->regs[n - S390_R0_REGNUM]);
+ }
+ return 0;
+}
+
+int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ target_ulong tmpl = ldtul_p(mem_buf);
+
+ switch (n) {
+ case S390_PSWM_REGNUM:
+ s390_cpu_set_psw(env, tmpl, env->psw.addr);
+ break;
+ case S390_PSWA_REGNUM:
+ env->psw.addr = tmpl;
+ break;
+ case S390_R0_REGNUM ... S390_R15_REGNUM:
+ env->regs[n - S390_R0_REGNUM] = tmpl;
+ break;
+ default:
+ return 0;
+ }
+ return 8;
+}
+
+/* the values represent the positions in s390-acr.xml */
+#define S390_A0_REGNUM 0
+#define S390_A15_REGNUM 15
+/* total number of registers in s390-acr.xml */
+#define S390_NUM_AC_REGS 16
+
+static int cpu_read_ac_reg(CPUS390XState *env, GByteArray *buf, int n)
+{
+ switch (n) {
+ case S390_A0_REGNUM ... S390_A15_REGNUM:
+ return gdb_get_reg32(buf, env->aregs[n]);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_A0_REGNUM ... S390_A15_REGNUM:
+ env->aregs[n] = ldl_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-fpr.xml */
+#define S390_FPC_REGNUM 0
+#define S390_F0_REGNUM 1
+#define S390_F15_REGNUM 16
+/* total number of registers in s390-fpr.xml */
+#define S390_NUM_FP_REGS 17
+
+static int cpu_read_fp_reg(CPUS390XState *env, GByteArray *buf, int n)
+{
+ switch (n) {
+ case S390_FPC_REGNUM:
+ return gdb_get_reg32(buf, env->fpc);
+ case S390_F0_REGNUM ... S390_F15_REGNUM:
+ return gdb_get_reg64(buf, *get_freg(env, n - S390_F0_REGNUM));
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_FPC_REGNUM:
+ env->fpc = ldl_p(mem_buf);
+ return 4;
+ case S390_F0_REGNUM ... S390_F15_REGNUM:
+ *get_freg(env, n - S390_F0_REGNUM) = ldtul_p(mem_buf);
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-vx.xml */
+#define S390_V0L_REGNUM 0
+#define S390_V15L_REGNUM 15
+#define S390_V16_REGNUM 16
+#define S390_V31_REGNUM 31
+/* total number of registers in s390-vx.xml */
+#define S390_NUM_VREGS 32
+
+static int cpu_read_vreg(CPUS390XState *env, GByteArray *buf, int n)
+{
+ int ret;
+
+ switch (n) {
+ case S390_V0L_REGNUM ... S390_V15L_REGNUM:
+ ret = gdb_get_reg64(buf, env->vregs[n][1]);
+ break;
+ case S390_V16_REGNUM ... S390_V31_REGNUM:
+ ret = gdb_get_reg64(buf, env->vregs[n][0]);
+ ret += gdb_get_reg64(buf, env->vregs[n][1]);
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_V0L_REGNUM ... S390_V15L_REGNUM:
+ env->vregs[n][1] = ldtul_p(mem_buf + 8);
+ return 8;
+ case S390_V16_REGNUM ... S390_V31_REGNUM:
+ env->vregs[n][0] = ldtul_p(mem_buf);
+ env->vregs[n][1] = ldtul_p(mem_buf + 8);
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-cr.xml */
+#define S390_C0_REGNUM 0
+#define S390_C15_REGNUM 15
+/* total number of registers in s390-cr.xml */
+#define S390_NUM_C_REGS 16
+
+#ifndef CONFIG_USER_ONLY
+static int cpu_read_c_reg(CPUS390XState *env, GByteArray *buf, int n)
+{
+ switch (n) {
+ case S390_C0_REGNUM ... S390_C15_REGNUM:
+ return gdb_get_regl(buf, env->cregs[n]);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_C0_REGNUM ... S390_C15_REGNUM:
+ env->cregs[n] = ldtul_p(mem_buf);
+ if (tcg_enabled()) {
+ tlb_flush(env_cpu(env));
+ }
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-virt.xml */
+#define S390_VIRT_CKC_REGNUM 0
+#define S390_VIRT_CPUTM_REGNUM 1
+#define S390_VIRT_BEA_REGNUM 2
+#define S390_VIRT_PREFIX_REGNUM 3
+#define S390_VIRT_PP_REGNUM 4
+#define S390_VIRT_PFT_REGNUM 5
+#define S390_VIRT_PFS_REGNUM 6
+#define S390_VIRT_PFC_REGNUM 7
+/* total number of registers in s390-virt.xml */
+#define S390_NUM_VIRT_REGS 8
+
+static int cpu_read_virt_reg(CPUS390XState *env, GByteArray *mem_buf, int n)
+{
+ switch (n) {
+ case S390_VIRT_CKC_REGNUM:
+ return gdb_get_regl(mem_buf, env->ckc);
+ case S390_VIRT_CPUTM_REGNUM:
+ return gdb_get_regl(mem_buf, env->cputm);
+ case S390_VIRT_BEA_REGNUM:
+ return gdb_get_regl(mem_buf, env->gbea);
+ case S390_VIRT_PREFIX_REGNUM:
+ return gdb_get_regl(mem_buf, env->psa);
+ case S390_VIRT_PP_REGNUM:
+ return gdb_get_regl(mem_buf, env->pp);
+ case S390_VIRT_PFT_REGNUM:
+ return gdb_get_regl(mem_buf, env->pfault_token);
+ case S390_VIRT_PFS_REGNUM:
+ return gdb_get_regl(mem_buf, env->pfault_select);
+ case S390_VIRT_PFC_REGNUM:
+ return gdb_get_regl(mem_buf, env->pfault_compare);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_VIRT_CKC_REGNUM:
+ env->ckc = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_CPUTM_REGNUM:
+ env->cputm = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_BEA_REGNUM:
+ env->gbea = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_PREFIX_REGNUM:
+ env->psa = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_PP_REGNUM:
+ env->pp = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_PFT_REGNUM:
+ env->pfault_token = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_PFS_REGNUM:
+ env->pfault_select = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ case S390_VIRT_PFC_REGNUM:
+ env->pfault_compare = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+ default:
+ return 0;
+ }
+}
+#endif
+
+/* the values represent the positions in s390-gs.xml */
+#define S390_GS_RESERVED_REGNUM 0
+#define S390_GS_GSD_REGNUM 1
+#define S390_GS_GSSM_REGNUM 2
+#define S390_GS_GSEPLA_REGNUM 3
+/* total number of registers in s390-gs.xml */
+#define S390_NUM_GS_REGS 4
+
+static int cpu_read_gs_reg(CPUS390XState *env, GByteArray *buf, int n)
+{
+ return gdb_get_regl(buf, env->gscb[n]);
+}
+
+static int cpu_write_gs_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ env->gscb[n] = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(env_cpu(env));
+ return 8;
+}
+
+void s390_cpu_gdb_init(CPUState *cs)
+{
+ gdb_register_coprocessor(cs, cpu_read_ac_reg,
+ cpu_write_ac_reg,
+ S390_NUM_AC_REGS, "s390-acr.xml", 0);
+
+ gdb_register_coprocessor(cs, cpu_read_fp_reg,
+ cpu_write_fp_reg,
+ S390_NUM_FP_REGS, "s390-fpr.xml", 0);
+
+ gdb_register_coprocessor(cs, cpu_read_vreg,
+ cpu_write_vreg,
+ S390_NUM_VREGS, "s390-vx.xml", 0);
+
+ gdb_register_coprocessor(cs, cpu_read_gs_reg,
+ cpu_write_gs_reg,
+ S390_NUM_GS_REGS, "s390-gs.xml", 0);
+
+#ifndef CONFIG_USER_ONLY
+ gdb_register_coprocessor(cs, cpu_read_c_reg,
+ cpu_write_c_reg,
+ S390_NUM_C_REGS, "s390-cr.xml", 0);
+
+ if (kvm_enabled()) {
+ gdb_register_coprocessor(cs, cpu_read_virt_reg,
+ cpu_write_virt_reg,
+ S390_NUM_VIRT_REGS, "s390-virt.xml", 0);
+ }
+#endif
+}
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
new file mode 100644
index 000000000..7cb1a6ec1
--- /dev/null
+++ b/target/s390x/gen-features.c
@@ -0,0 +1,1015 @@
+/*
+ * S390 feature list generator
+ *
+ * Copyright IBM Corp. 2016, 2018
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include "cpu_features_def.h"
+
+#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
+
+/***** BEGIN FEATURE DEFS *****/
+
+#define S390_FEAT_GROUP_PLO \
+ S390_FEAT_PLO_CL, \
+ S390_FEAT_PLO_CLG, \
+ S390_FEAT_PLO_CLGR, \
+ S390_FEAT_PLO_CLX, \
+ S390_FEAT_PLO_CS, \
+ S390_FEAT_PLO_CSG, \
+ S390_FEAT_PLO_CSGR, \
+ S390_FEAT_PLO_CSX, \
+ S390_FEAT_PLO_DCS, \
+ S390_FEAT_PLO_DCSG, \
+ S390_FEAT_PLO_DCSGR, \
+ S390_FEAT_PLO_DCSX, \
+ S390_FEAT_PLO_CSST, \
+ S390_FEAT_PLO_CSSTG, \
+ S390_FEAT_PLO_CSSTGR, \
+ S390_FEAT_PLO_CSSTX, \
+ S390_FEAT_PLO_CSDST, \
+ S390_FEAT_PLO_CSDSTG, \
+ S390_FEAT_PLO_CSDSTGR, \
+ S390_FEAT_PLO_CSDSTX, \
+ S390_FEAT_PLO_CSTST, \
+ S390_FEAT_PLO_CSTSTG, \
+ S390_FEAT_PLO_CSTSTGR, \
+ S390_FEAT_PLO_CSTSTX
+
+#define S390_FEAT_GROUP_TOD_CLOCK_STEERING \
+ S390_FEAT_TOD_CLOCK_STEERING, \
+ S390_FEAT_PTFF_QTO, \
+ S390_FEAT_PTFF_QSI, \
+ S390_FEAT_PTFF_QPT, \
+ S390_FEAT_PTFF_STO
+
+#define S390_FEAT_GROUP_GEN13_PTFF \
+ S390_FEAT_PTFF_QUI, \
+ S390_FEAT_PTFF_QTOU, \
+ S390_FEAT_PTFF_STOU
+
+#define S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF \
+ S390_FEAT_PTFF_QSIE, \
+ S390_FEAT_PTFF_QTOUE, \
+ S390_FEAT_PTFF_STOE, \
+ S390_FEAT_PTFF_STOUE
+
+#define S390_FEAT_GROUP_MSA \
+ S390_FEAT_MSA, \
+ S390_FEAT_KMAC_DEA, \
+ S390_FEAT_KMAC_TDEA_128, \
+ S390_FEAT_KMAC_TDEA_192, \
+ S390_FEAT_KMC_DEA, \
+ S390_FEAT_KMC_TDEA_128, \
+ S390_FEAT_KMC_TDEA_192, \
+ S390_FEAT_KM_DEA, \
+ S390_FEAT_KM_TDEA_128, \
+ S390_FEAT_KM_TDEA_192, \
+ S390_FEAT_KIMD_SHA_1, \
+ S390_FEAT_KLMD_SHA_1
+
+#define S390_FEAT_GROUP_MSA_EXT_1 \
+ S390_FEAT_KMC_AES_128, \
+ S390_FEAT_KM_AES_128, \
+ S390_FEAT_KIMD_SHA_256, \
+ S390_FEAT_KLMD_SHA_256
+
+#define S390_FEAT_GROUP_MSA_EXT_2 \
+ S390_FEAT_KMC_AES_192, \
+ S390_FEAT_KMC_AES_256, \
+ S390_FEAT_KMC_PRNG, \
+ S390_FEAT_KM_AES_192, \
+ S390_FEAT_KM_AES_256, \
+ S390_FEAT_KIMD_SHA_512, \
+ S390_FEAT_KLMD_SHA_512
+
+#define S390_FEAT_GROUP_MSA_EXT_3 \
+ S390_FEAT_MSA_EXT_3, \
+ S390_FEAT_KMAC_EDEA, \
+ S390_FEAT_KMAC_ETDEA_128, \
+ S390_FEAT_KMAC_ETDEA_192, \
+ S390_FEAT_KMC_EAES_128, \
+ S390_FEAT_KMC_EAES_192, \
+ S390_FEAT_KMC_EAES_256, \
+ S390_FEAT_KMC_EDEA, \
+ S390_FEAT_KMC_ETDEA_128, \
+ S390_FEAT_KMC_ETDEA_192, \
+ S390_FEAT_KM_EDEA, \
+ S390_FEAT_KM_ETDEA_128, \
+ S390_FEAT_KM_ETDEA_192, \
+ S390_FEAT_KM_EAES_128, \
+ S390_FEAT_KM_EAES_192, \
+ S390_FEAT_KM_EAES_256, \
+ S390_FEAT_PCKMO_EDEA, \
+ S390_FEAT_PCKMO_ETDEA_128, \
+ S390_FEAT_PCKMO_ETDEA_256, \
+ S390_FEAT_PCKMO_AES_128, \
+ S390_FEAT_PCKMO_AES_192, \
+ S390_FEAT_PCKMO_AES_256
+
+#define S390_FEAT_GROUP_MSA_EXT_4 \
+ S390_FEAT_MSA_EXT_4, \
+ S390_FEAT_KMAC_AES_128, \
+ S390_FEAT_KMAC_AES_192, \
+ S390_FEAT_KMAC_AES_256, \
+ S390_FEAT_KMAC_EAES_128, \
+ S390_FEAT_KMAC_EAES_192, \
+ S390_FEAT_KMAC_EAES_256, \
+ S390_FEAT_KM_XTS_AES_128, \
+ S390_FEAT_KM_XTS_AES_256, \
+ S390_FEAT_KM_XTS_EAES_128, \
+ S390_FEAT_KM_XTS_EAES_256, \
+ S390_FEAT_KIMD_GHASH, \
+ S390_FEAT_KMCTR_DEA, \
+ S390_FEAT_KMCTR_TDEA_128, \
+ S390_FEAT_KMCTR_TDEA_192, \
+ S390_FEAT_KMCTR_EDEA, \
+ S390_FEAT_KMCTR_ETDEA_128, \
+ S390_FEAT_KMCTR_ETDEA_192, \
+ S390_FEAT_KMCTR_AES_128, \
+ S390_FEAT_KMCTR_AES_192, \
+ S390_FEAT_KMCTR_AES_256, \
+ S390_FEAT_KMCTR_EAES_128, \
+ S390_FEAT_KMCTR_EAES_192, \
+ S390_FEAT_KMCTR_EAES_256, \
+ S390_FEAT_KMF_DEA, \
+ S390_FEAT_KMF_TDEA_128, \
+ S390_FEAT_KMF_TDEA_192, \
+ S390_FEAT_KMF_EDEA, \
+ S390_FEAT_KMF_ETDEA_128, \
+ S390_FEAT_KMF_ETDEA_192, \
+ S390_FEAT_KMF_AES_128, \
+ S390_FEAT_KMF_AES_192, \
+ S390_FEAT_KMF_AES_256, \
+ S390_FEAT_KMF_EAES_128, \
+ S390_FEAT_KMF_EAES_192, \
+ S390_FEAT_KMF_EAES_256, \
+ S390_FEAT_KMO_DEA, \
+ S390_FEAT_KMO_TDEA_128, \
+ S390_FEAT_KMO_TDEA_192, \
+ S390_FEAT_KMO_EDEA, \
+ S390_FEAT_KMO_ETDEA_128, \
+ S390_FEAT_KMO_ETDEA_192, \
+ S390_FEAT_KMO_AES_128, \
+ S390_FEAT_KMO_AES_192, \
+ S390_FEAT_KMO_AES_256, \
+ S390_FEAT_KMO_EAES_128, \
+ S390_FEAT_KMO_EAES_192, \
+ S390_FEAT_KMO_EAES_256, \
+ S390_FEAT_PCC_CMAC_DEA, \
+ S390_FEAT_PCC_CMAC_TDEA_128, \
+ S390_FEAT_PCC_CMAC_TDEA_192, \
+ S390_FEAT_PCC_CMAC_ETDEA_128, \
+ S390_FEAT_PCC_CMAC_ETDEA_192, \
+ S390_FEAT_PCC_CMAC_TDEA, \
+ S390_FEAT_PCC_CMAC_AES_128, \
+ S390_FEAT_PCC_CMAC_AES_192, \
+ S390_FEAT_PCC_CMAC_AES_256, \
+ S390_FEAT_PCC_CMAC_EAES_128, \
+ S390_FEAT_PCC_CMAC_EAES_192, \
+ S390_FEAT_PCC_CMAC_EAES_256, \
+ S390_FEAT_PCC_XTS_AES_128, \
+ S390_FEAT_PCC_XTS_AES_256, \
+ S390_FEAT_PCC_XTS_EAES_128, \
+ S390_FEAT_PCC_XTS_EAES_256
+
+#define S390_FEAT_GROUP_MSA_EXT_5 \
+ S390_FEAT_MSA_EXT_5, \
+ S390_FEAT_PPNO_SHA_512_DRNG
+
+#define S390_FEAT_GROUP_MSA_EXT_6 \
+ S390_FEAT_KIMD_SHA3_224, \
+ S390_FEAT_KIMD_SHA3_256, \
+ S390_FEAT_KIMD_SHA3_384, \
+ S390_FEAT_KIMD_SHA3_512, \
+ S390_FEAT_KIMD_SHAKE_128, \
+ S390_FEAT_KIMD_SHAKE_256, \
+ S390_FEAT_KLMD_SHA3_224, \
+ S390_FEAT_KLMD_SHA3_256, \
+ S390_FEAT_KLMD_SHA3_384, \
+ S390_FEAT_KLMD_SHA3_512, \
+ S390_FEAT_KLMD_SHAKE_128, \
+ S390_FEAT_KLMD_SHAKE_256
+
+#define S390_FEAT_GROUP_MSA_EXT_7 \
+ S390_FEAT_PRNO_TRNG_QRTCR, \
+ S390_FEAT_PRNO_TRNG
+
+#define S390_FEAT_GROUP_MSA_EXT_8 \
+ S390_FEAT_MSA_EXT_8, \
+ S390_FEAT_KMA_GCM_AES_128, \
+ S390_FEAT_KMA_GCM_AES_192, \
+ S390_FEAT_KMA_GCM_AES_256 , \
+ S390_FEAT_KMA_GCM_EAES_128, \
+ S390_FEAT_KMA_GCM_EAES_192, \
+ S390_FEAT_KMA_GCM_EAES_256
+
+#define S390_FEAT_GROUP_MSA_EXT_9 \
+ S390_FEAT_MSA_EXT_9, \
+ S390_FEAT_KDSA_ECDSA_VERIFY_P256, \
+ S390_FEAT_KDSA_ECDSA_VERIFY_P384, \
+ S390_FEAT_KDSA_ECDSA_VERIFY_P512, \
+ S390_FEAT_KDSA_ECDSA_SIGN_P256, \
+ S390_FEAT_KDSA_ECDSA_SIGN_P384, \
+ S390_FEAT_KDSA_ECDSA_SIGN_P512, \
+ S390_FEAT_KDSA_EECDSA_SIGN_P256, \
+ S390_FEAT_KDSA_EECDSA_SIGN_P384, \
+ S390_FEAT_KDSA_EECDSA_SIGN_P512, \
+ S390_FEAT_KDSA_EDDSA_VERIFY_ED25519, \
+ S390_FEAT_KDSA_EDDSA_VERIFY_ED448, \
+ S390_FEAT_KDSA_EDDSA_SIGN_ED25519, \
+ S390_FEAT_KDSA_EDDSA_SIGN_ED448, \
+ S390_FEAT_KDSA_EEDDSA_SIGN_ED25519, \
+ S390_FEAT_KDSA_EEDDSA_SIGN_ED448, \
+ S390_FEAT_PCC_SCALAR_MULT_P256, \
+ S390_FEAT_PCC_SCALAR_MULT_P384, \
+ S390_FEAT_PCC_SCALAR_MULT_P512, \
+ S390_FEAT_PCC_SCALAR_MULT_ED25519, \
+ S390_FEAT_PCC_SCALAR_MULT_ED448, \
+ S390_FEAT_PCC_SCALAR_MULT_X25519, \
+ S390_FEAT_PCC_SCALAR_MULT_X448
+
+#define S390_FEAT_GROUP_MSA_EXT_9_PCKMO \
+ S390_FEAT_PCKMO_ECC_P256, \
+ S390_FEAT_PCKMO_ECC_P384, \
+ S390_FEAT_PCKMO_ECC_P521, \
+ S390_FEAT_PCKMO_ECC_ED25519, \
+ S390_FEAT_PCKMO_ECC_ED448
+
+#define S390_FEAT_GROUP_ENH_SORT \
+ S390_FEAT_ESORT_BASE, \
+ S390_FEAT_SORTL_SFLR, \
+ S390_FEAT_SORTL_SVLR, \
+ S390_FEAT_SORTL_32, \
+ S390_FEAT_SORTL_128, \
+ S390_FEAT_SORTL_F0
+
+
+#define S390_FEAT_GROUP_DEFLATE_CONVERSION \
+ S390_FEAT_DEFLATE_BASE, \
+ S390_FEAT_DEFLATE_GHDT, \
+ S390_FEAT_DEFLATE_CMPR, \
+ S390_FEAT_DEFLATE_XPND, \
+ S390_FEAT_DEFLATE_F0
+
+/* cpu feature groups */
+static uint16_t group_PLO[] = {
+ S390_FEAT_GROUP_PLO,
+};
+static uint16_t group_TOD_CLOCK_STEERING[] = {
+ S390_FEAT_GROUP_TOD_CLOCK_STEERING,
+};
+static uint16_t group_GEN13_PTFF[] = {
+ S390_FEAT_GROUP_GEN13_PTFF,
+};
+static uint16_t group_MULTIPLE_EPOCH_PTFF[] = {
+ S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
+};
+static uint16_t group_MSA[] = {
+ S390_FEAT_GROUP_MSA,
+};
+static uint16_t group_MSA_EXT_1[] = {
+ S390_FEAT_GROUP_MSA_EXT_1,
+};
+static uint16_t group_MSA_EXT_2[] = {
+ S390_FEAT_GROUP_MSA_EXT_2,
+};
+static uint16_t group_MSA_EXT_3[] = {
+ S390_FEAT_GROUP_MSA_EXT_3,
+};
+static uint16_t group_MSA_EXT_4[] = {
+ S390_FEAT_GROUP_MSA_EXT_4,
+};
+static uint16_t group_MSA_EXT_5[] = {
+ S390_FEAT_GROUP_MSA_EXT_5,
+};
+static uint16_t group_MSA_EXT_6[] = {
+ S390_FEAT_GROUP_MSA_EXT_6,
+};
+static uint16_t group_MSA_EXT_7[] = {
+ S390_FEAT_GROUP_MSA_EXT_7,
+};
+static uint16_t group_MSA_EXT_8[] = {
+ S390_FEAT_GROUP_MSA_EXT_8,
+};
+
+static uint16_t group_MSA_EXT_9[] = {
+ S390_FEAT_GROUP_MSA_EXT_9,
+};
+
+static uint16_t group_MSA_EXT_9_PCKMO[] = {
+ S390_FEAT_GROUP_MSA_EXT_9_PCKMO,
+};
+
+static uint16_t group_ENH_SORT[] = {
+ S390_FEAT_GROUP_ENH_SORT,
+};
+
+static uint16_t group_DEFLATE_CONVERSION[] = {
+ S390_FEAT_GROUP_DEFLATE_CONVERSION,
+};
+
+/* Base features (in order of release)
+ * Only non-hypervisor managed features belong here.
+ * Base feature sets are static meaning they do not change in future QEMU
+ * releases.
+ */
+static uint16_t base_GEN7_GA1[] = {
+ S390_FEAT_GROUP_PLO,
+ S390_FEAT_ESAN3,
+ S390_FEAT_ZARCH,
+};
+
+#define base_GEN7_GA2 EmptyFeat
+#define base_GEN7_GA3 EmptyFeat
+
+static uint16_t base_GEN8_GA1[] = {
+ S390_FEAT_DAT_ENH,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_GROUP_MSA,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_HFP_MADDSUB,
+};
+
+#define base_GEN8_GA2 EmptyFeat
+#define base_GEN8_GA3 EmptyFeat
+#define base_GEN8_GA4 EmptyFeat
+#define base_GEN8_GA5 EmptyFeat
+
+static uint16_t base_GEN9_GA1[] = {
+ S390_FEAT_IDTE_SEGMENT,
+ S390_FEAT_ASN_LX_REUSE,
+ S390_FEAT_STFLE,
+ S390_FEAT_SENSE_RUNNING_STATUS,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+ S390_FEAT_HFP_UNNORMALIZED_EXT,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_GROUP_TOD_CLOCK_STEERING,
+ S390_FEAT_ETF3_ENH,
+ S390_FEAT_DAT_ENH_2,
+};
+
+#define base_GEN9_GA2 EmptyFeat
+#define base_GEN9_GA3 EmptyFeat
+
+static uint16_t base_GEN10_GA1[] = {
+ S390_FEAT_CONDITIONAL_SSKE,
+ S390_FEAT_PARSING_ENH,
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_DFP,
+ S390_FEAT_DFP_FAST,
+ S390_FEAT_PFPO,
+};
+#define base_GEN10_GA2 EmptyFeat
+#define base_GEN10_GA3 EmptyFeat
+
+static uint16_t base_GEN11_GA1[] = {
+ S390_FEAT_NONQ_KEY_SETTING,
+ S390_FEAT_ENHANCED_MONITOR,
+ S390_FEAT_FLOATING_POINT_EXT,
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_STFLE_45,
+ S390_FEAT_CMPSC_ENH,
+ S390_FEAT_INTERLOCKED_ACCESS_2,
+};
+
+#define base_GEN11_GA2 EmptyFeat
+
+static uint16_t base_GEN12_GA1[] = {
+ S390_FEAT_DFP_ZONED_CONVERSION,
+ S390_FEAT_STFLE_49,
+ S390_FEAT_LOCAL_TLB_CLEARING,
+};
+
+#define base_GEN12_GA2 EmptyFeat
+
+static uint16_t base_GEN13_GA1[] = {
+ S390_FEAT_STFLE_53,
+ S390_FEAT_DFP_PACKED_CONVERSION,
+ S390_FEAT_GROUP_GEN13_PTFF,
+};
+
+#define base_GEN13_GA2 EmptyFeat
+
+static uint16_t base_GEN14_GA1[] = {
+ S390_FEAT_ENTROPY_ENC_COMP,
+ S390_FEAT_MISC_INSTRUCTION_EXT2,
+ S390_FEAT_SEMAPHORE_ASSIST,
+ S390_FEAT_TIME_SLICE_INSTRUMENTATION,
+ S390_FEAT_ORDER_PRESERVING_COMPRESSION,
+};
+
+#define base_GEN14_GA2 EmptyFeat
+
+static uint16_t base_GEN15_GA1[] = {
+ S390_FEAT_MISC_INSTRUCTION_EXT3,
+};
+
+#define base_GEN16_GA1 EmptyFeat
+
+/* Full features (in order of release)
+ * Automatically includes corresponding base features.
+ * Full features are all features this hardware supports even if kvm/QEMU do not
+ * support these features yet.
+ */
+static uint16_t full_GEN7_GA1[] = {
+ S390_FEAT_PPA15,
+ S390_FEAT_BPB,
+ S390_FEAT_SIE_F2,
+ S390_FEAT_SIE_SKEY,
+ S390_FEAT_SIE_GPERE,
+ S390_FEAT_SIE_IB,
+ S390_FEAT_SIE_CEI,
+};
+
+static uint16_t full_GEN7_GA2[] = {
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+};
+
+static uint16_t full_GEN7_GA3[] = {
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_SIE_SIIF,
+};
+
+static uint16_t full_GEN8_GA1[] = {
+ S390_FEAT_SIE_GSLS,
+ S390_FEAT_SIE_64BSCAO,
+};
+
+#define full_GEN8_GA2 EmptyFeat
+
+static uint16_t full_GEN8_GA3[] = {
+ S390_FEAT_ASN_LX_REUSE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+};
+
+#define full_GEN8_GA4 EmptyFeat
+#define full_GEN8_GA5 EmptyFeat
+
+static uint16_t full_GEN9_GA1[] = {
+ S390_FEAT_STORE_HYPERVISOR_INFO,
+ S390_FEAT_GROUP_MSA_EXT_1,
+ S390_FEAT_CMM,
+ S390_FEAT_SIE_CMMA,
+};
+
+static uint16_t full_GEN9_GA2[] = {
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_DFP,
+};
+
+static uint16_t full_GEN9_GA3[] = {
+ S390_FEAT_CONDITIONAL_SSKE,
+ S390_FEAT_PFPO,
+};
+
+static uint16_t full_GEN10_GA1[] = {
+ S390_FEAT_EDAT,
+ S390_FEAT_CONFIGURATION_TOPOLOGY,
+ S390_FEAT_GROUP_MSA_EXT_2,
+ S390_FEAT_ESOP,
+ S390_FEAT_SIE_PFMFI,
+ S390_FEAT_SIE_SIGPIF,
+};
+
+static uint16_t full_GEN10_GA2[] = {
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_SIE_IBS,
+};
+
+static uint16_t full_GEN10_GA3[] = {
+ S390_FEAT_GROUP_MSA_EXT_3,
+};
+
+static uint16_t full_GEN11_GA1[] = {
+ S390_FEAT_IPTE_RANGE,
+ S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
+ S390_FEAT_GROUP_MSA_EXT_4,
+};
+
+#define full_GEN11_GA2 EmptyFeat
+
+static uint16_t full_GEN12_GA1[] = {
+ S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE,
+ S390_FEAT_TRANSACTIONAL_EXE,
+ S390_FEAT_RUNTIME_INSTRUMENTATION,
+ S390_FEAT_ZPCI,
+ S390_FEAT_ADAPTER_EVENT_NOTIFICATION,
+ S390_FEAT_ADAPTER_INT_SUPPRESSION,
+ S390_FEAT_EDAT_2,
+ S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2,
+ S390_FEAT_AP_QUERY_CONFIG_INFO,
+ S390_FEAT_AP_QUEUE_INTERRUPT_CONTROL,
+ S390_FEAT_AP_FACILITIES_TEST,
+ S390_FEAT_AP,
+ S390_FEAT_EXTENDED_LENGTH_SCCB,
+ S390_FEAT_DIAG_318,
+};
+
+static uint16_t full_GEN12_GA2[] = {
+ S390_FEAT_GROUP_MSA_EXT_5,
+};
+
+static uint16_t full_GEN13_GA1[] = {
+ S390_FEAT_VECTOR,
+};
+
+#define full_GEN13_GA2 EmptyFeat
+
+static uint16_t full_GEN14_GA1[] = {
+ S390_FEAT_INSTRUCTION_EXEC_PROT,
+ S390_FEAT_GUARDED_STORAGE,
+ S390_FEAT_VECTOR_PACKED_DECIMAL,
+ S390_FEAT_VECTOR_ENH,
+ S390_FEAT_MULTIPLE_EPOCH,
+ S390_FEAT_TEST_PENDING_EXT_INTERRUPTION,
+ S390_FEAT_INSERT_REFERENCE_BITS_MULT,
+ S390_FEAT_GROUP_MSA_EXT_6,
+ S390_FEAT_GROUP_MSA_EXT_7,
+ S390_FEAT_GROUP_MSA_EXT_8,
+ S390_FEAT_CMM_NT,
+ S390_FEAT_ETOKEN,
+ S390_FEAT_HPMA2,
+ S390_FEAT_SIE_KSS,
+ S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
+};
+
+#define full_GEN14_GA2 EmptyFeat
+
+static uint16_t full_GEN15_GA1[] = {
+ S390_FEAT_VECTOR_ENH2,
+ S390_FEAT_GROUP_ENH_SORT,
+ S390_FEAT_GROUP_DEFLATE_CONVERSION,
+ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH,
+ S390_FEAT_GROUP_MSA_EXT_9,
+ S390_FEAT_GROUP_MSA_EXT_9_PCKMO,
+ S390_FEAT_ETOKEN,
+ S390_FEAT_UNPACK,
+};
+
+static uint16_t full_GEN16_GA1[] = {
+ S390_FEAT_NNPA,
+ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2,
+ S390_FEAT_BEAR_ENH,
+ S390_FEAT_RDP,
+ S390_FEAT_PAI,
+};
+
+
+/* Default features (in order of release)
+ * Automatically includes corresponding base features.
+ * Default features are all features this version of QEMU supports for this
+ * hardware model. Default feature sets can grow with new QEMU releases.
+ */
+#define default_GEN7_GA1 EmptyFeat
+#define default_GEN7_GA2 EmptyFeat
+#define default_GEN7_GA3 EmptyFeat
+#define default_GEN8_GA1 EmptyFeat
+#define default_GEN8_GA2 EmptyFeat
+#define default_GEN8_GA3 EmptyFeat
+#define default_GEN8_GA4 EmptyFeat
+#define default_GEN8_GA5 EmptyFeat
+
+static uint16_t default_GEN9_GA1[] = {
+ S390_FEAT_STORE_HYPERVISOR_INFO,
+ S390_FEAT_GROUP_MSA_EXT_1,
+ S390_FEAT_CMM,
+};
+
+#define default_GEN9_GA2 EmptyFeat
+#define default_GEN9_GA3 EmptyFeat
+
+static uint16_t default_GEN10_GA1[] = {
+ S390_FEAT_EDAT,
+ S390_FEAT_GROUP_MSA_EXT_2,
+};
+
+#define default_GEN10_GA2 EmptyFeat
+#define default_GEN10_GA3 EmptyFeat
+
+static uint16_t default_GEN11_GA1[] = {
+ S390_FEAT_GROUP_MSA_EXT_3,
+ S390_FEAT_IPTE_RANGE,
+ S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
+ S390_FEAT_GROUP_MSA_EXT_4,
+ S390_FEAT_PPA15,
+ S390_FEAT_BPB,
+};
+
+#define default_GEN11_GA2 EmptyFeat
+
+static uint16_t default_GEN12_GA1[] = {
+ S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE,
+ S390_FEAT_TRANSACTIONAL_EXE,
+ S390_FEAT_RUNTIME_INSTRUMENTATION,
+ S390_FEAT_ZPCI,
+ S390_FEAT_ADAPTER_EVENT_NOTIFICATION,
+ S390_FEAT_EDAT_2,
+ S390_FEAT_ESOP,
+ S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2,
+};
+
+#define default_GEN12_GA2 EmptyFeat
+
+static uint16_t default_GEN13_GA1[] = {
+ S390_FEAT_GROUP_MSA_EXT_5,
+ S390_FEAT_VECTOR,
+};
+
+#define default_GEN13_GA2 EmptyFeat
+
+static uint16_t default_GEN14_GA1[] = {
+ S390_FEAT_INSTRUCTION_EXEC_PROT,
+ S390_FEAT_GUARDED_STORAGE,
+ S390_FEAT_VECTOR_PACKED_DECIMAL,
+ S390_FEAT_VECTOR_ENH,
+ S390_FEAT_GROUP_MSA_EXT_6,
+ S390_FEAT_GROUP_MSA_EXT_7,
+ S390_FEAT_GROUP_MSA_EXT_8,
+ S390_FEAT_MULTIPLE_EPOCH,
+ S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
+};
+
+#define default_GEN14_GA2 EmptyFeat
+
+static uint16_t default_GEN15_GA1[] = {
+ S390_FEAT_VECTOR_ENH2,
+ S390_FEAT_GROUP_DEFLATE_CONVERSION,
+ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH,
+ S390_FEAT_GROUP_MSA_EXT_9,
+ S390_FEAT_GROUP_MSA_EXT_9_PCKMO,
+ S390_FEAT_ETOKEN,
+};
+
+static uint16_t default_GEN16_GA1[] = {
+ S390_FEAT_NNPA,
+ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2,
+ S390_FEAT_BEAR_ENH,
+ S390_FEAT_RDP,
+ S390_FEAT_PAI,
+};
+
+/* QEMU (CPU model) features */
+
+static uint16_t qemu_V2_11[] = {
+ S390_FEAT_GROUP_PLO,
+ S390_FEAT_ESAN3,
+ S390_FEAT_ZARCH,
+};
+
+static uint16_t qemu_V3_1[] = {
+ S390_FEAT_DAT_ENH,
+ S390_FEAT_IDTE_SEGMENT,
+ S390_FEAT_STFLE,
+ S390_FEAT_SENSE_RUNNING_STATUS,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_MSA,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_ETF3_ENH,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_STFLE_45,
+ S390_FEAT_STFLE_49,
+ S390_FEAT_LOCAL_TLB_CLEARING,
+ S390_FEAT_INTERLOCKED_ACCESS_2,
+ S390_FEAT_ADAPTER_EVENT_NOTIFICATION,
+ S390_FEAT_ADAPTER_INT_SUPPRESSION,
+ S390_FEAT_MSA_EXT_3,
+ S390_FEAT_MSA_EXT_4,
+};
+
+static uint16_t qemu_V4_0[] = {
+ /*
+ * Only BFP bits are implemented (HFP, DFP, PFPO and DIVIDE TO INTEGER not
+ * implemented yet).
+ */
+ S390_FEAT_FLOATING_POINT_EXT,
+ S390_FEAT_ZPCI,
+};
+
+static uint16_t qemu_V4_1[] = {
+ S390_FEAT_STFLE_53,
+ S390_FEAT_VECTOR,
+};
+
+static uint16_t qemu_V6_0[] = {
+ S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
+ S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2,
+ S390_FEAT_ESOP,
+};
+
+static uint16_t qemu_LATEST[] = {
+ S390_FEAT_INSTRUCTION_EXEC_PROT,
+ S390_FEAT_MISC_INSTRUCTION_EXT2,
+ S390_FEAT_MSA_EXT_8,
+ S390_FEAT_VECTOR_ENH,
+};
+
+/* add all new definitions before this point */
+static uint16_t qemu_MAX[] = {
+ /* generates a dependency warning, leave it out for now */
+ S390_FEAT_MSA_EXT_5,
+};
+
+/****** END FEATURE DEFS ******/
+
+#define _YEARS "2016"
+#define _NAME_H "TARGET_S390X_GEN_FEATURES_H"
+
+#define CPU_FEAT_INITIALIZER(_name) \
+ { \
+ .name = "S390_FEAT_LIST_" #_name, \
+ .base_bits = \
+ { .data = base_##_name, \
+ .len = ARRAY_SIZE(base_##_name) }, \
+ .default_bits = \
+ { .data = default_##_name, \
+ .len = ARRAY_SIZE(default_##_name) }, \
+ .full_bits = \
+ { .data = full_##_name, \
+ .len = ARRAY_SIZE(full_##_name) }, \
+ }
+
+typedef struct BitSpec {
+ uint16_t *data;
+ uint32_t len;
+} BitSpec;
+
+typedef struct {
+ const char *name;
+ BitSpec base_bits;
+ BitSpec default_bits;
+ BitSpec full_bits;
+} CpuFeatDefSpec;
+
+static uint16_t EmptyFeat[] = {};
+
+/*******************************
+ * processor GA series
+ *******************************/
+static CpuFeatDefSpec CpuFeatDef[] = {
+ CPU_FEAT_INITIALIZER(GEN7_GA1),
+ CPU_FEAT_INITIALIZER(GEN7_GA2),
+ CPU_FEAT_INITIALIZER(GEN7_GA3),
+ CPU_FEAT_INITIALIZER(GEN8_GA1),
+ CPU_FEAT_INITIALIZER(GEN8_GA2),
+ CPU_FEAT_INITIALIZER(GEN8_GA3),
+ CPU_FEAT_INITIALIZER(GEN8_GA4),
+ CPU_FEAT_INITIALIZER(GEN8_GA5),
+ CPU_FEAT_INITIALIZER(GEN9_GA1),
+ CPU_FEAT_INITIALIZER(GEN9_GA2),
+ CPU_FEAT_INITIALIZER(GEN9_GA3),
+ CPU_FEAT_INITIALIZER(GEN10_GA1),
+ CPU_FEAT_INITIALIZER(GEN10_GA2),
+ CPU_FEAT_INITIALIZER(GEN10_GA3),
+ CPU_FEAT_INITIALIZER(GEN11_GA1),
+ CPU_FEAT_INITIALIZER(GEN11_GA2),
+ CPU_FEAT_INITIALIZER(GEN12_GA1),
+ CPU_FEAT_INITIALIZER(GEN12_GA2),
+ CPU_FEAT_INITIALIZER(GEN13_GA1),
+ CPU_FEAT_INITIALIZER(GEN13_GA2),
+ CPU_FEAT_INITIALIZER(GEN14_GA1),
+ CPU_FEAT_INITIALIZER(GEN14_GA2),
+ CPU_FEAT_INITIALIZER(GEN15_GA1),
+ CPU_FEAT_INITIALIZER(GEN16_GA1),
+};
+
+#define FEAT_GROUP_INITIALIZER(_name) \
+ { \
+ .name = "S390_FEAT_GROUP_LIST_" #_name, \
+ .enum_name = "S390_FEAT_GROUP_" #_name, \
+ .bits = \
+ { .data = group_##_name, \
+ .len = ARRAY_SIZE(group_##_name) }, \
+ }
+
+typedef struct {
+ const char *name;
+ const char *enum_name;
+ BitSpec bits;
+} FeatGroupDefSpec;
+
+/*******************************
+ * feature groups
+ *******************************/
+static FeatGroupDefSpec FeatGroupDef[] = {
+ FEAT_GROUP_INITIALIZER(PLO),
+ FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING),
+ FEAT_GROUP_INITIALIZER(GEN13_PTFF),
+ FEAT_GROUP_INITIALIZER(MSA),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_1),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_2),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_3),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_4),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_5),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_6),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_7),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_8),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_9),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_9_PCKMO),
+ FEAT_GROUP_INITIALIZER(MULTIPLE_EPOCH_PTFF),
+ FEAT_GROUP_INITIALIZER(ENH_SORT),
+ FEAT_GROUP_INITIALIZER(DEFLATE_CONVERSION),
+};
+
+#define QEMU_FEAT_INITIALIZER(_name) \
+ { \
+ .name = "S390_FEAT_LIST_QEMU_" #_name, \
+ .bits = \
+ { .data = qemu_##_name, \
+ .len = ARRAY_SIZE(qemu_##_name) }, \
+ }
+
+/*******************************
+ * QEMU (CPU model) features
+ *******************************/
+static FeatGroupDefSpec QemuFeatDef[] = {
+ QEMU_FEAT_INITIALIZER(V2_11),
+ QEMU_FEAT_INITIALIZER(V3_1),
+ QEMU_FEAT_INITIALIZER(V4_0),
+ QEMU_FEAT_INITIALIZER(V4_1),
+ QEMU_FEAT_INITIALIZER(V6_0),
+ QEMU_FEAT_INITIALIZER(LATEST),
+ QEMU_FEAT_INITIALIZER(MAX),
+};
+
+
+static void set_bits(uint64_t list[], BitSpec bits)
+{
+ uint32_t i;
+
+ for (i = 0; i < bits.len; i++) {
+ list[bits.data[i] / 64] |= 1ULL << (bits.data[i] % 64);
+ }
+}
+
+static inline void clear_bit(uint64_t list[], unsigned long nr)
+{
+ list[nr / 64] &= ~(1ULL << (nr % 64));
+}
+
+static void print_feature_defs(void)
+{
+ uint64_t base_feat[S390_FEAT_MAX / 64 + 1] = {};
+ uint64_t default_feat[S390_FEAT_MAX / 64 + 1] = {};
+ uint64_t full_feat[S390_FEAT_MAX / 64 + 1] = {};
+ int i, j;
+
+ printf("\n/* CPU model feature list data */\n");
+
+ for (i = 0; i < ARRAY_SIZE(CpuFeatDef); i++) {
+ /* With gen15 CSSKE and BPB are deprecated */
+ if (strcmp(CpuFeatDef[i].name, "S390_FEAT_LIST_GEN15_GA1") == 0) {
+ clear_bit(base_feat, S390_FEAT_CONDITIONAL_SSKE);
+ clear_bit(default_feat, S390_FEAT_CONDITIONAL_SSKE);
+ clear_bit(default_feat, S390_FEAT_BPB);
+ }
+ set_bits(base_feat, CpuFeatDef[i].base_bits);
+ /* add the base to the default features */
+ set_bits(default_feat, CpuFeatDef[i].base_bits);
+ set_bits(default_feat, CpuFeatDef[i].default_bits);
+ /* add the base to the full features */
+ set_bits(full_feat, CpuFeatDef[i].base_bits);
+ set_bits(full_feat, CpuFeatDef[i].full_bits);
+
+ printf("#define %s_BASE\t", CpuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(base_feat); j++) {
+ printf("0x%016"PRIx64"ULL", base_feat[j]);
+ if (j < ARRAY_SIZE(base_feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ printf("#define %s_DEFAULT\t", CpuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(default_feat); j++) {
+ printf("0x%016"PRIx64"ULL", default_feat[j]);
+ if (j < ARRAY_SIZE(default_feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ printf("#define %s_FULL\t\t", CpuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(full_feat); j++) {
+ printf("0x%016"PRIx64"ULL", full_feat[j]);
+ if (j < ARRAY_SIZE(full_feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ }
+}
+
+static void print_qemu_feature_defs(void)
+{
+ uint64_t feat[S390_FEAT_MAX / 64 + 1] = {};
+ int i, j;
+
+ printf("\n/* QEMU (CPU model) feature list data */\n");
+
+ /* for now we assume that we only add new features */
+ for (i = 0; i < ARRAY_SIZE(QemuFeatDef); i++) {
+ set_bits(feat, QemuFeatDef[i].bits);
+
+ printf("#define %s\t", QemuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(feat); j++) {
+ printf("0x%016"PRIx64"ULL", feat[j]);
+ if (j < ARRAY_SIZE(feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ }
+}
+
+static void print_feature_group_defs(void)
+{
+ int i, j;
+
+ printf("\n/* CPU feature group list data */\n");
+
+ for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) {
+ uint64_t feat[S390_FEAT_MAX / 64 + 1] = {};
+
+ set_bits(feat, FeatGroupDef[i].bits);
+ printf("#define %s\t", FeatGroupDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(feat); j++) {
+ printf("0x%016"PRIx64"ULL", feat[j]);
+ if (j < ARRAY_SIZE(feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ }
+}
+
+static void print_feature_group_enum_type(void)
+{
+ int i;
+
+ printf("\n/* CPU feature group enum type */\n"
+ "typedef enum {\n");
+ for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) {
+ printf("\t%s,\n", FeatGroupDef[i].enum_name);
+ }
+ printf("\tS390_FEAT_GROUP_MAX,\n"
+ "} S390FeatGroup;\n");
+}
+
+int main(int argc, char *argv[])
+{
+ printf("/*\n"
+ " * AUTOMATICALLY GENERATED, DO NOT MODIFY HERE, EDIT\n"
+ " * SOURCE FILE \"%s\" INSTEAD.\n"
+ " *\n"
+ " * Copyright %s IBM Corp.\n"
+ " *\n"
+ " * This work is licensed under the terms of the GNU GPL, "
+ "version 2 or (at\n * your option) any later version. See "
+ "the COPYING file in the top-level\n * directory.\n"
+ " */\n\n"
+ "#ifndef %s\n#define %s\n", __FILE__, _YEARS, _NAME_H, _NAME_H);
+ print_feature_defs();
+ print_feature_group_defs();
+ print_qemu_feature_defs();
+ print_feature_group_enum_type();
+ printf("\n#endif\n");
+ return 0;
+}
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
new file mode 100644
index 000000000..6e35473c7
--- /dev/null
+++ b/target/s390x/helper.c
@@ -0,0 +1,283 @@
+/*
+ * S/390 helpers - sysemu only
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "exec/gdbstub.h"
+#include "qemu/timer.h"
+#include "hw/s390x/ioinst.h"
+#include "hw/s390x/pv.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/runstate.h"
+#include "sysemu/tcg.h"
+
+void s390x_tod_timer(void *opaque)
+{
+ cpu_inject_clock_comparator((S390CPU *) opaque);
+}
+
+void s390x_cpu_timer(void *opaque)
+{
+ cpu_inject_cpu_timer((S390CPU *) opaque);
+}
+
+hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ target_ulong raddr;
+ int prot;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ uint64_t tec;
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+
+ /* We want to read the code (e.g., see what we are single-stepping).*/
+ if (asc != PSW_ASC_HOME) {
+ asc = PSW_ASC_PRIMARY;
+ }
+
+ /*
+ * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead
+ * of MMU_INST_FETCH.
+ */
+ if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) {
+ return -1;
+ }
+ return raddr;
+}
+
+hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
+{
+ hwaddr phys_addr;
+ target_ulong page;
+
+ page = vaddr & TARGET_PAGE_MASK;
+ phys_addr = cpu_get_phys_page_debug(cs, page);
+ phys_addr += (vaddr & ~TARGET_PAGE_MASK);
+
+ return phys_addr;
+}
+
+static inline bool is_special_wait_psw(uint64_t psw_addr)
+{
+ /* signal quiesce */
+ return (psw_addr & 0xfffUL) == 0xfffUL;
+}
+
+void s390_handle_wait(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (s390_cpu_halt(cpu) == 0) {
+ if (is_special_wait_psw(cpu->env.psw.addr)) {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ } else {
+ cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT;
+ qemu_system_guest_panicked(cpu_get_crash_info(cs));
+ }
+ }
+}
+
+LowCore *cpu_map_lowcore(CPUS390XState *env)
+{
+ LowCore *lowcore;
+ hwaddr len = sizeof(LowCore);
+
+ lowcore = cpu_physical_memory_map(env->psa, &len, true);
+
+ if (len < sizeof(LowCore)) {
+ cpu_abort(env_cpu(env), "Could not map lowcore\n");
+ }
+
+ return lowcore;
+}
+
+void cpu_unmap_lowcore(LowCore *lowcore)
+{
+ cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
+}
+
+void do_restart_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->restart_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
+ lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->restart_new_psw.mask);
+ addr = be64_to_cpu(lowcore->restart_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+ env->pending_int &= ~INTERRUPT_RESTART;
+
+ s390_cpu_set_psw(env, mask, addr);
+}
+
+void s390_cpu_recompute_watchpoints(CPUState *cs)
+{
+ const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ /* We are called when the watchpoints have changed. First
+ remove them all. */
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+
+ /* Return if PER is not enabled */
+ if (!(env->psw.mask & PSW_MASK_PER)) {
+ return;
+ }
+
+ /* Return if storage-alteration event is not enabled. */
+ if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
+ return;
+ }
+
+ if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
+ /* We can't create a watchoint spanning the whole memory range, so
+ split it in two parts. */
+ cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
+ cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
+ } else if (env->cregs[10] > env->cregs[11]) {
+ /* The address range loops, create two watchpoints. */
+ cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
+ wp_flags, NULL);
+ cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
+
+ } else {
+ /* Default case, create a single watchpoint. */
+ cpu_watchpoint_insert(cs, env->cregs[10],
+ env->cregs[11] - env->cregs[10] + 1,
+ wp_flags, NULL);
+ }
+}
+
+typedef struct SigpSaveArea {
+ uint64_t fprs[16]; /* 0x0000 */
+ uint64_t grs[16]; /* 0x0080 */
+ PSW psw; /* 0x0100 */
+ uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
+ uint32_t prefix; /* 0x0118 */
+ uint32_t fpc; /* 0x011c */
+ uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
+ uint32_t todpr; /* 0x0124 */
+ uint64_t cputm; /* 0x0128 */
+ uint64_t ckc; /* 0x0130 */
+ uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
+ uint32_t ars[16]; /* 0x0140 */
+ uint64_t crs[16]; /* 0x0384 */
+} SigpSaveArea;
+QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);
+
+int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
+{
+ static const uint8_t ar_id = 1;
+ SigpSaveArea *sa;
+ hwaddr len = sizeof(*sa);
+ int i;
+
+ /* For PVMs storing will occur when this cpu enters SIE again */
+ if (s390_is_pv()) {
+ return 0;
+ }
+
+ sa = cpu_physical_memory_map(addr, &len, true);
+ if (!sa) {
+ return -EFAULT;
+ }
+ if (len != sizeof(*sa)) {
+ cpu_physical_memory_unmap(sa, len, 1, 0);
+ return -EFAULT;
+ }
+
+ if (store_arch) {
+ cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i));
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
+ }
+ sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
+ sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env));
+ sa->prefix = cpu_to_be32(cpu->env.psa);
+ sa->fpc = cpu_to_be32(cpu->env.fpc);
+ sa->todpr = cpu_to_be32(cpu->env.todpr);
+ sa->cputm = cpu_to_be64(cpu->env.cputm);
+ sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
+ for (i = 0; i < 16; ++i) {
+ sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
+ }
+
+ cpu_physical_memory_unmap(sa, len, 1, len);
+
+ return 0;
+}
+
+typedef struct SigpAdtlSaveArea {
+ uint64_t vregs[32][2]; /* 0x0000 */
+ uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
+ uint64_t gscb[4]; /* 0x0400 */
+ uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */
+} SigpAdtlSaveArea;
+QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);
+
+#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
+int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
+{
+ SigpAdtlSaveArea *sa;
+ hwaddr save = len;
+ int i;
+
+ sa = cpu_physical_memory_map(addr, &save, true);
+ if (!sa) {
+ return -EFAULT;
+ }
+ if (save != len) {
+ cpu_physical_memory_unmap(sa, len, 1, 0);
+ return -EFAULT;
+ }
+
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ for (i = 0; i < 32; i++) {
+ sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]);
+ sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]);
+ }
+ }
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
+ for (i = 0; i < 4; i++) {
+ sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
+ }
+ }
+
+ cpu_physical_memory_unmap(sa, len, 1, len);
+ return 0;
+}
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
new file mode 100644
index 000000000..271b081e8
--- /dev/null
+++ b/target/s390x/helper.h
@@ -0,0 +1,381 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_2(data_exception, noreturn, env, i32)
+DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvcin, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_3(mvcl, i32, env, i32, i32)
+DEF_HELPER_3(clcl, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64)
+DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_3(srst, void, env, i32, i32)
+DEF_HELPER_3(srstu, void, env, i32, i32)
+DEF_HELPER_4(clst, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mvn, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvo, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i32, i32)
+DEF_HELPER_FLAGS_4(mvz, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_3(mvst, i32, env, i32, i32)
+DEF_HELPER_4(ex, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_4(mvcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(mvclu, i32, env, i32, i64, i32)
+DEF_HELPER_4(clcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(clclu, i32, env, i32, i64, i32)
+DEF_HELPER_3(cegb, i64, env, s64, i32)
+DEF_HELPER_3(cdgb, i64, env, s64, i32)
+DEF_HELPER_3(cxgb, i64, env, s64, i32)
+DEF_HELPER_3(celgb, i64, env, i64, i32)
+DEF_HELPER_3(cdlgb, i64, env, i64, i32)
+DEF_HELPER_3(cxlgb, i64, env, i64, i32)
+DEF_HELPER_4(cdsg, void, env, i64, i32, i32)
+DEF_HELPER_4(cdsg_parallel, void, env, i64, i32, i32)
+DEF_HELPER_4(csst, i32, env, i32, i64, i64)
+DEF_HELPER_4(csst_parallel, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(seb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(sdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(sxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(deb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(ddb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(dxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(meeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mdeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(mxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mxdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_2(ldeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_4(ldxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_2(lxdb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lxeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(ledb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(lexb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(keb, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(kdb, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_FLAGS_5(kxb, TCG_CALL_NO_WG, i32, env, i64, i64, i64, i64)
+DEF_HELPER_3(cgeb, i64, env, i64, i32)
+DEF_HELPER_3(cgdb, i64, env, i64, i32)
+DEF_HELPER_4(cgxb, i64, env, i64, i64, i32)
+DEF_HELPER_3(cfeb, i64, env, i64, i32)
+DEF_HELPER_3(cfdb, i64, env, i64, i32)
+DEF_HELPER_4(cfxb, i64, env, i64, i64, i32)
+DEF_HELPER_3(clgeb, i64, env, i64, i32)
+DEF_HELPER_3(clgdb, i64, env, i64, i32)
+DEF_HELPER_4(clgxb, i64, env, i64, i64, i32)
+DEF_HELPER_3(clfeb, i64, env, i64, i32)
+DEF_HELPER_3(clfdb, i64, env, i64, i32)
+DEF_HELPER_4(clfxb, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(fieb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(fidb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(fixb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32)
+DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(unpka, TCG_CALL_NO_WG, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_4(unpku, TCG_CALL_NO_WG, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_3(tp, TCG_CALL_NO_WG, i32, env, i64, i32)
+DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_4(tre, i64, env, i64, i64, i64)
+DEF_HELPER_4(trt, i32, env, i32, i64, i64)
+DEF_HELPER_4(trtr, i32, env, i32, i64, i64)
+DEF_HELPER_5(trXX, i32, env, i32, i32, i32, i32)
+DEF_HELPER_4(cksm, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(srnm, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_2(stfle, i32, env, i64)
+DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lpq_parallel, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(stpq_parallel, TCG_CALL_NO_WG, void, env, i64, i64, i64)
+DEF_HELPER_4(mvcos, i32, env, i64, i64, i64)
+DEF_HELPER_4(cu12, i32, env, i32, i32, i32)
+DEF_HELPER_4(cu14, i32, env, i32, i32, i32)
+DEF_HELPER_4(cu21, i32, env, i32, i32, i32)
+DEF_HELPER_4(cu24, i32, env, i32, i32, i32)
+DEF_HELPER_4(cu41, i32, env, i32, i32, i32)
+DEF_HELPER_4(cu42, i32, env, i32, i32, i32)
+DEF_HELPER_5(msa, i32, env, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
+DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env)
+DEF_HELPER_FLAGS_3(probe_write_access, TCG_CALL_NO_WG, void, env, i64, i64)
+
+/* === Vector Support Instructions === */
+DEF_HELPER_FLAGS_4(gvec_vbperm, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(vll, TCG_CALL_NO_WG, void, env, ptr, i64, i64)
+DEF_HELPER_FLAGS_4(gvec_vpk16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpk32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpk64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpks16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpks32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpks64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_5(gvec_vpks_cc16, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vpks_cc32, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vpks_cc64, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vpkls16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpkls32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vpkls64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_5(gvec_vpkls_cc16, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vpkls_cc32, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vpkls_cc64, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vperm, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(vstl, TCG_CALL_NO_WG, void, env, cptr, i64, i64)
+
+/* === Vector Integer Instructions === */
+DEF_HELPER_FLAGS_4(gvec_vavg8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vavg16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vavgl8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vavgl16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vclz8, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vclz16, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vctz8, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vctz16, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vgfm8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vgfm16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vgfm32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vgfm64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vgfma8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vgfma16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vgfma32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vgfma64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmal8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmal16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmah8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmah16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmalh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmalh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmae8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmae16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmae32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmale8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmale16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmale32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmao8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmao16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmao32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmalo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmalo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vmalo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vme32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmle32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vmlo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vpopct8, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vpopct16, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_verim8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_verim16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vsl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_vsra, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_vsrl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_vscbi8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vscbi16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_4(gvec_vtm, void, ptr, cptr, env, i32)
+
+/* === Vector String Instructions === */
+DEF_HELPER_FLAGS_4(gvec_vfae8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vfae16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vfae32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_5(gvec_vfae_cc8, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfae_cc16, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfae_cc32, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfee8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vfee16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vfee32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_5(gvec_vfee_cc8, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfee_cc16, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfee_cc32, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfene8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vfene16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_4(gvec_vfene32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32)
+DEF_HELPER_5(gvec_vfene_cc8, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfene_cc16, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfene_cc32, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_3(gvec_vistr8, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vistr16, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_FLAGS_3(gvec_vistr32, TCG_CALL_NO_RWG, void, ptr, cptr, i32)
+DEF_HELPER_4(gvec_vistr_cc8, void, ptr, cptr, env, i32)
+DEF_HELPER_4(gvec_vistr_cc16, void, ptr, cptr, env, i32)
+DEF_HELPER_4(gvec_vistr_cc32, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vstrc8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vstrc16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vstrc32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vstrc_rt8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vstrc_rt16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_FLAGS_5(gvec_vstrc_rt32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32)
+DEF_HELPER_6(gvec_vstrc_cc8, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_6(gvec_vstrc_cc16, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_6(gvec_vstrc_cc32, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_6(gvec_vstrc_cc_rt8, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_6(gvec_vstrc_cc_rt16, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_6(gvec_vstrc_cc_rt32, void, ptr, cptr, cptr, cptr, env, i32)
+
+/* === Vector Floating-Point Instructions */
+DEF_HELPER_FLAGS_5(gvec_vfa32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfa64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfa128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_wfc32, void, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_wfk32, void, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_wfc64, void, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_wfk64, void, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_wfc128, void, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_wfk128, void, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfce32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfce32_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfce64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfce64_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfce128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfce128_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfch32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfch32_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfch64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfch64_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfch128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfch128_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfche32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfche32_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfche64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfche64_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfche128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_5(gvec_vfche128_cc, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vcdg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vcdlg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vcgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vclgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfd32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfd64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfd128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfi32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfi64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfi128, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfll32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfll64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vflr64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vflr128, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfm32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfm64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfm128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfmax32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfmax64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfmax128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfmin32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfmin64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfmin128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfma32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfma64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfma128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfms32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfms128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfnma32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfnma64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfnma128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfnms32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfnms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_vfnms128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfsq32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfsq64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_4(gvec_vfsq128, TCG_CALL_NO_WG, void, ptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfs32, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfs64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_vfs128, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32)
+DEF_HELPER_4(gvec_vftci32, void, ptr, cptr, env, i32)
+DEF_HELPER_4(gvec_vftci64, void, ptr, cptr, env, i32)
+DEF_HELPER_4(gvec_vftci128, void, ptr, cptr, env, i32)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(servc, i32, env, i64, i64)
+DEF_HELPER_4(diag, void, env, i32, i32, i32)
+DEF_HELPER_3(load_psw, noreturn, env, i64, i64)
+DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(sck, TCG_CALL_NO_RWG, i32, env, i64)
+DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(sckpf, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env)
+DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_4(stsi, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_3(tprot, TCG_CALL_NO_WG, i32, env, i64, i64)
+DEF_HELPER_2(iske, i64, env, i64)
+DEF_HELPER_3(sske, void, env, i64, i64)
+DEF_HELPER_2(rrbe, i32, env, i64)
+DEF_HELPER_4(mvcs, i32, env, i64, i64, i64)
+DEF_HELPER_4(mvcp, i32, env, i64, i64, i64)
+DEF_HELPER_4(sigp, i32, env, i64, i32, i32)
+DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_4(idte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32)
+DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(purge, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_2(lra, i64, env, i64)
+DEF_HELPER_1(per_check_exception, void, env)
+DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(per_store_real, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_2(xsch, void, env, i64)
+DEF_HELPER_2(csch, void, env, i64)
+DEF_HELPER_2(hsch, void, env, i64)
+DEF_HELPER_3(msch, void, env, i64, i64)
+DEF_HELPER_2(rchp, void, env, i64)
+DEF_HELPER_2(rsch, void, env, i64)
+DEF_HELPER_2(sal, void, env, i64)
+DEF_HELPER_4(schm, void, env, i64, i64, i64)
+DEF_HELPER_3(ssch, void, env, i64, i64)
+DEF_HELPER_2(stcrw, void, env, i64)
+DEF_HELPER_3(stsch, void, env, i64, i64)
+DEF_HELPER_2(tpi, i32, env, i64)
+DEF_HELPER_3(tsch, void, env, i64, i64)
+DEF_HELPER_2(chsc, void, env, i64)
+
+DEF_HELPER_2(clp, void, env, i32)
+DEF_HELPER_3(pcilg, void, env, i32, i32)
+DEF_HELPER_3(pcistg, void, env, i32, i32)
+DEF_HELPER_4(stpcifc, void, env, i32, i64, i32)
+DEF_HELPER_3(sic, void, env, i64, i64)
+DEF_HELPER_3(rpcit, void, env, i32, i32)
+DEF_HELPER_5(pcistb, void, env, i32, i32, i64, i32)
+DEF_HELPER_4(mpcifc, void, env, i32, i64, i32)
+DEF_HELPER_3(monitor_call, void, env, i64, i32)
+#endif
diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c
new file mode 100644
index 000000000..5195f060e
--- /dev/null
+++ b/target/s390x/interrupt.c
@@ -0,0 +1,244 @@
+/*
+ * QEMU S/390 Interrupt support
+ *
+ * Copyright IBM Corp. 2012, 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "kvm/kvm_s390x.h"
+#include "s390x-internal.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+#include "hw/s390x/ioinst.h"
+#include "tcg/tcg_s390x.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/s390x/s390_flic.h"
+#endif
+
+/* Ensure to exit the TB after this call! */
+void trigger_pgm_exception(CPUS390XState *env, uint32_t code)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_PGM;
+ env->int_pgm_code = code;
+ /* env->int_pgm_ilen is already set, or will be set during unwinding */
+}
+
+void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra)
+{
+ if (kvm_enabled()) {
+ kvm_s390_program_interrupt(env_archcpu(env), code);
+ } else if (tcg_enabled()) {
+ tcg_s390_program_interrupt(env, code, ra);
+ } else {
+ g_assert_not_reached();
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void cpu_inject_clock_comparator(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ env->pending_int |= INTERRUPT_EXT_CLOCK_COMPARATOR;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void cpu_inject_cpu_timer(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ env->pending_int |= INTERRUPT_EXT_CPU_TIMER;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr)
+{
+ CPUS390XState *env = &cpu->env;
+
+ g_assert(src_cpu_addr < S390_MAX_CPUS);
+ set_bit(src_cpu_addr, env->emergency_signals);
+
+ env->pending_int |= INTERRUPT_EMERGENCY_SIGNAL;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr)
+{
+ CPUS390XState *env = &cpu->env;
+
+ g_assert(src_cpu_addr < S390_MAX_CPUS);
+ if (env->pending_int & INTERRUPT_EXTERNAL_CALL) {
+ return -EBUSY;
+ }
+ env->external_call_addr = src_cpu_addr;
+
+ env->pending_int |= INTERRUPT_EXTERNAL_CALL;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+ return 0;
+}
+
+void cpu_inject_restart(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (kvm_enabled()) {
+ kvm_s390_restart_interrupt(cpu);
+ return;
+ }
+
+ env->pending_int |= INTERRUPT_RESTART;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void cpu_inject_stop(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (kvm_enabled()) {
+ kvm_s390_stop_interrupt(cpu);
+ return;
+ }
+
+ env->pending_int |= INTERRUPT_STOP;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+/*
+ * All of the following interrupts are floating, i.e. not per-vcpu.
+ * We just need a dummy cpustate in order to be able to inject in the
+ * non-kvm case.
+ */
+void s390_sclp_extint(uint32_t parm)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = s390_get_flic_class(fs);
+
+ fsc->inject_service(fs, parm);
+}
+
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+ uint32_t io_int_parm, uint32_t io_int_word)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = s390_get_flic_class(fs);
+
+ fsc->inject_io(fs, subchannel_id, subchannel_nr, io_int_parm, io_int_word);
+}
+
+void s390_crw_mchk(void)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = s390_get_flic_class(fs);
+
+ fsc->inject_crw_mchk(fs);
+}
+
+bool s390_cpu_has_mcck_int(S390CPU *cpu)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
+ CPUS390XState *env = &cpu->env;
+
+ if (!(env->psw.mask & PSW_MASK_MCHECK)) {
+ return false;
+ }
+
+ /* for now we only support channel report machine checks (floating) */
+ if (qemu_s390_flic_has_crw_mchk(flic) &&
+ (env->cregs[14] & CR14_CHANNEL_REPORT_SC)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool s390_cpu_has_ext_int(S390CPU *cpu)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
+ CPUS390XState *env = &cpu->env;
+
+ if (!(env->psw.mask & PSW_MASK_EXT)) {
+ return false;
+ }
+
+ if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
+ (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
+ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
+ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
+ (env->cregs[0] & CR0_CKC_SC)) {
+ return true;
+ }
+
+ if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
+ (env->cregs[0] & CR0_CPU_TIMER_SC)) {
+ return true;
+ }
+
+ if (qemu_s390_flic_has_service(flic) &&
+ (env->cregs[0] & CR0_SERVICE_SC)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool s390_cpu_has_io_int(S390CPU *cpu)
+{
+ QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
+ CPUS390XState *env = &cpu->env;
+
+ if (!(env->psw.mask & PSW_MASK_IO)) {
+ return false;
+ }
+
+ return qemu_s390_flic_has_io(flic, env->cregs[6]);
+}
+
+bool s390_cpu_has_restart_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ return env->pending_int & INTERRUPT_RESTART;
+}
+
+bool s390_cpu_has_stop_int(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ return env->pending_int & INTERRUPT_STOP;
+}
+#endif
+
+bool s390_cpu_has_int(S390CPU *cpu)
+{
+#ifndef CONFIG_USER_ONLY
+ if (!tcg_enabled()) {
+ return false;
+ }
+ return s390_cpu_has_mcck_int(cpu) ||
+ s390_cpu_has_ext_int(cpu) ||
+ s390_cpu_has_io_int(cpu) ||
+ s390_cpu_has_restart_int(cpu) ||
+ s390_cpu_has_stop_int(cpu);
+#else
+ return false;
+#endif
+}
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
new file mode 100644
index 000000000..bdae5090b
--- /dev/null
+++ b/target/s390x/ioinst.c
@@ -0,0 +1,818 @@
+/*
+ * I/O instructions for S/390
+ *
+ * Copyright 2012, 2015 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "hw/s390x/ioinst.h"
+#include "trace.h"
+#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/pv.h"
+
+/* All I/O instructions but chsc use the s format */
+static uint64_t get_address_from_regs(CPUS390XState *env, uint32_t ipb,
+ uint8_t *ar)
+{
+ /*
+ * Addresses for protected guests are all offsets into the
+ * satellite block which holds the IO control structures. Those
+ * control structures are always starting at offset 0 and are
+ * always aligned and accessible. So we can return 0 here which
+ * will pass the following address checks.
+ */
+ if (s390_is_pv()) {
+ *ar = 0;
+ return 0;
+ }
+ return decode_basedisp_s(env, ipb, ar);
+}
+
+int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
+ int *schid)
+{
+ if (!IOINST_SCHID_ONE(value)) {
+ return -EINVAL;
+ }
+ if (!IOINST_SCHID_M(value)) {
+ if (IOINST_SCHID_CSSID(value)) {
+ return -EINVAL;
+ }
+ *cssid = 0;
+ *m = 0;
+ } else {
+ *cssid = IOINST_SCHID_CSSID(value);
+ *m = 1;
+ }
+ *ssid = IOINST_SCHID_SSID(value);
+ *schid = IOINST_SCHID_NR(value);
+ return 0;
+}
+
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
+ return;
+ }
+ trace_ioinst_sch_id("xsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
+ }
+ setcc(cpu, css_do_xsch(sch));
+}
+
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
+ return;
+ }
+ trace_ioinst_sch_id("csch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
+ }
+ setcc(cpu, css_do_csch(sch));
+}
+
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
+ return;
+ }
+ trace_ioinst_sch_id("hsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
+ }
+ setcc(cpu, css_do_hsch(sch));
+}
+
+static int ioinst_schib_valid(SCHIB *schib)
+{
+ if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) ||
+ (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) {
+ return 0;
+ }
+ /* Disallow extended measurements for now. */
+ if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) {
+ return 0;
+ }
+ /* for MB format 1 bits 26-31 of word 11 must be 0 */
+ /* MBA uses words 10 and 11, it means align on 2**6 */
+ if ((be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_MBFC) &&
+ (be64_to_cpu(schib->mba) & 0x03fUL)) {
+ return 0;
+ }
+ return 1;
+}
+
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ SCHIB schib;
+ uint64_t addr;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = get_address_from_regs(env, ipb, &ar);
+ if (addr & 3) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, addr, &schib, sizeof(schib));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return;
+ }
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
+ !ioinst_schib_valid(&schib)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+ trace_ioinst_sch_id("msch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
+ }
+ setcc(cpu, css_do_msch(sch, &schib));
+}
+
+static void copy_orb_from_guest(ORB *dest, const ORB *src)
+{
+ dest->intparm = be32_to_cpu(src->intparm);
+ dest->ctrl0 = be16_to_cpu(src->ctrl0);
+ dest->lpm = src->lpm;
+ dest->ctrl1 = src->ctrl1;
+ dest->cpa = be32_to_cpu(src->cpa);
+}
+
+static int ioinst_orb_valid(ORB *orb)
+{
+ if ((orb->ctrl0 & ORB_CTRL0_MASK_INVALID) ||
+ (orb->ctrl1 & ORB_CTRL1_MASK_INVALID)) {
+ return 0;
+ }
+ /* We don't support MIDA. */
+ if (orb->ctrl1 & ORB_CTRL1_MASK_MIDAW) {
+ return 0;
+ }
+ if ((orb->cpa & HIGH_ORDER_BIT) != 0) {
+ return 0;
+ }
+ return 1;
+}
+
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ ORB orig_orb, orb;
+ uint64_t addr;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = get_address_from_regs(env, ipb, &ar);
+ if (addr & 3) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, addr, &orig_orb, sizeof(orb));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return;
+ }
+ copy_orb_from_guest(&orb, &orig_orb);
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
+ !ioinst_orb_valid(&orb)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+ trace_ioinst_sch_id("ssch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
+ }
+ setcc(cpu, css_do_ssch(sch, &orb));
+}
+
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
+{
+ CRW crw;
+ uint64_t addr;
+ int cc;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = get_address_from_regs(env, ipb, &ar);
+ if (addr & 3) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+
+ cc = css_do_stcrw(&crw);
+ /* 0 - crw stored, 1 - zeroes stored */
+
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr, &crw, sizeof(crw));
+ setcc(cpu, cc);
+ } else {
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
+ setcc(cpu, cc);
+ } else {
+ if (cc == 0) {
+ /* Write failed: requeue CRW since STCRW is suppressing */
+ css_undo_stcrw(&crw);
+ }
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ }
+ }
+}
+
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ uint64_t addr;
+ int cc;
+ SCHIB schib;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = get_address_from_regs(env, ipb, &ar);
+ if (addr & 3) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ /*
+ * The Ultravisor checks schid bit 16 to be one and bits 0-12
+ * to be 0 and injects a operand exception itself.
+ *
+ * Hence we should never end up here.
+ */
+ g_assert(!s390_is_pv());
+ /*
+ * As operand exceptions have a lower priority than access exceptions,
+ * we check whether the memory area is writeable (injecting the
+ * access execption if it is not) first.
+ */
+ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ } else {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ }
+ return;
+ }
+ trace_ioinst_sch_id("stsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch) {
+ if (css_subch_visible(sch)) {
+ cc = css_do_stsch(sch, &schib);
+ } else {
+ /* Indicate no more subchannels in this css/ss */
+ cc = 3;
+ }
+ } else {
+ if (css_schid_final(m, cssid, ssid, schid)) {
+ cc = 3; /* No more subchannels in this css/ss */
+ } else {
+ /* Store an empty schib. */
+ memset(&schib, 0, sizeof(schib));
+ cc = 0;
+ }
+ }
+ if (cc != 3) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr, &schib, sizeof(schib));
+ } else if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
+ sizeof(schib)) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return;
+ }
+ } else {
+ /* Access exceptions have a higher priority than cc3 */
+ if (!s390_is_pv() &&
+ s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return;
+ }
+ }
+ setcc(cpu, cc);
+}
+
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
+{
+ CPUS390XState *env = &cpu->env;
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ IRB irb;
+ uint64_t addr;
+ int cc, irb_len;
+ uint8_t ar;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("tsch", cssid, ssid, schid);
+ addr = get_address_from_regs(env, ipb, &ar);
+ if (addr & 3) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return -EIO;
+ }
+
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ cc = css_do_tsch_get_irb(sch, &irb, &irb_len);
+ } else {
+ cc = 3;
+ }
+ /* 0 - status pending, 1 - not status pending, 3 - not operational */
+ if (cc != 3) {
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr, &irb, irb_len);
+ } else if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return -EFAULT;
+ }
+ css_do_tsch_update_subch(sch);
+ } else {
+ irb_len = sizeof(irb) - sizeof(irb.emw);
+ /* Access exceptions have a higher priority than cc3 */
+ if (!s390_is_pv() &&
+ s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return -EFAULT;
+ }
+ }
+
+ setcc(cpu, cc);
+ return 0;
+}
+
+typedef struct ChscReq {
+ uint16_t len;
+ uint16_t command;
+ uint32_t param0;
+ uint32_t param1;
+ uint32_t param2;
+} QEMU_PACKED ChscReq;
+
+typedef struct ChscResp {
+ uint16_t len;
+ uint16_t code;
+ uint32_t param;
+ char data[];
+} QEMU_PACKED ChscResp;
+
+#define CHSC_MIN_RESP_LEN 0x0008
+
+#define CHSC_SCPD 0x0002
+#define CHSC_SCSC 0x0010
+#define CHSC_SDA 0x0031
+#define CHSC_SEI 0x000e
+
+#define CHSC_SCPD_0_M 0x20000000
+#define CHSC_SCPD_0_C 0x10000000
+#define CHSC_SCPD_0_FMT 0x0f000000
+#define CHSC_SCPD_0_CSSID 0x00ff0000
+#define CHSC_SCPD_0_RFMT 0x00000f00
+#define CHSC_SCPD_0_RES 0xc000f000
+#define CHSC_SCPD_1_RES 0xffffff00
+#define CHSC_SCPD_01_CHPID 0x000000ff
+static void ioinst_handle_chsc_scpd(ChscReq *req, ChscResp *res)
+{
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint32_t param1 = be32_to_cpu(req->param1);
+ uint16_t resp_code;
+ int rfmt;
+ uint16_t cssid;
+ uint8_t f_chpid, l_chpid;
+ int desc_size;
+ int m;
+
+ rfmt = (param0 & CHSC_SCPD_0_RFMT) >> 8;
+ if ((rfmt == 0) || (rfmt == 1)) {
+ rfmt = !!(param0 & CHSC_SCPD_0_C);
+ }
+ if ((len != 0x0010) || (param0 & CHSC_SCPD_0_RES) ||
+ (param1 & CHSC_SCPD_1_RES) || req->param2) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ if (param0 & CHSC_SCPD_0_FMT) {
+ resp_code = 0x0007;
+ goto out_err;
+ }
+ cssid = (param0 & CHSC_SCPD_0_CSSID) >> 16;
+ m = param0 & CHSC_SCPD_0_M;
+ if (cssid != 0) {
+ if (!m || !css_present(cssid)) {
+ resp_code = 0x0008;
+ goto out_err;
+ }
+ }
+ f_chpid = param0 & CHSC_SCPD_01_CHPID;
+ l_chpid = param1 & CHSC_SCPD_01_CHPID;
+ if (l_chpid < f_chpid) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ /* css_collect_chp_desc() is endian-aware */
+ desc_size = css_collect_chp_desc(m, cssid, f_chpid, l_chpid, rfmt,
+ &res->data);
+ res->code = cpu_to_be16(0x0001);
+ res->len = cpu_to_be16(8 + desc_size);
+ res->param = cpu_to_be32(rfmt);
+ return;
+
+ out_err:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = cpu_to_be32(rfmt);
+}
+
+#define CHSC_SCSC_0_M 0x20000000
+#define CHSC_SCSC_0_FMT 0x000f0000
+#define CHSC_SCSC_0_CSSID 0x0000ff00
+#define CHSC_SCSC_0_RES 0xdff000ff
+static void ioinst_handle_chsc_scsc(ChscReq *req, ChscResp *res)
+{
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint8_t cssid;
+ uint16_t resp_code;
+ uint32_t general_chars[510];
+ uint32_t chsc_chars[508];
+
+ if (len != 0x0010) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+
+ if (param0 & CHSC_SCSC_0_FMT) {
+ resp_code = 0x0007;
+ goto out_err;
+ }
+ cssid = (param0 & CHSC_SCSC_0_CSSID) >> 8;
+ if (cssid != 0) {
+ if (!(param0 & CHSC_SCSC_0_M) || !css_present(cssid)) {
+ resp_code = 0x0008;
+ goto out_err;
+ }
+ }
+ if ((param0 & CHSC_SCSC_0_RES) || req->param1 || req->param2) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ res->code = cpu_to_be16(0x0001);
+ res->len = cpu_to_be16(4080);
+ res->param = 0;
+
+ memset(general_chars, 0, sizeof(general_chars));
+ memset(chsc_chars, 0, sizeof(chsc_chars));
+
+ general_chars[0] = cpu_to_be32(0x03000000);
+ general_chars[1] = cpu_to_be32(0x00079000);
+ general_chars[3] = cpu_to_be32(0x00080000);
+
+ chsc_chars[0] = cpu_to_be32(0x40000000);
+ chsc_chars[3] = cpu_to_be32(0x00040000);
+
+ memcpy(res->data, general_chars, sizeof(general_chars));
+ memcpy(res->data + sizeof(general_chars), chsc_chars, sizeof(chsc_chars));
+ return;
+
+ out_err:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = 0;
+}
+
+#define CHSC_SDA_0_FMT 0x0f000000
+#define CHSC_SDA_0_OC 0x0000ffff
+#define CHSC_SDA_0_RES 0xf0ff0000
+#define CHSC_SDA_OC_MCSSE 0x0
+#define CHSC_SDA_OC_MSS 0x2
+static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res)
+{
+ uint16_t resp_code = 0x0001;
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint16_t oc;
+ int ret;
+
+ if ((len != 0x0400) || (param0 & CHSC_SDA_0_RES)) {
+ resp_code = 0x0003;
+ goto out;
+ }
+
+ if (param0 & CHSC_SDA_0_FMT) {
+ resp_code = 0x0007;
+ goto out;
+ }
+
+ oc = param0 & CHSC_SDA_0_OC;
+ switch (oc) {
+ case CHSC_SDA_OC_MCSSE:
+ ret = css_enable_mcsse();
+ if (ret == -EINVAL) {
+ resp_code = 0x0101;
+ goto out;
+ }
+ break;
+ case CHSC_SDA_OC_MSS:
+ ret = css_enable_mss();
+ if (ret == -EINVAL) {
+ resp_code = 0x0101;
+ goto out;
+ }
+ break;
+ default:
+ resp_code = 0x0003;
+ goto out;
+ }
+
+out:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = 0;
+}
+
+static int chsc_sei_nt0_get_event(void *res)
+{
+ /* no events yet */
+ return 1;
+}
+
+static int chsc_sei_nt0_have_event(void)
+{
+ /* no events yet */
+ return 0;
+}
+
+static int chsc_sei_nt2_get_event(void *res)
+{
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ return pci_chsc_sei_nt2_get_event(res);
+ }
+ return 1;
+}
+
+static int chsc_sei_nt2_have_event(void)
+{
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ return pci_chsc_sei_nt2_have_event();
+ }
+ return 0;
+}
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res)
+{
+ uint64_t selection_mask = ldq_p(&req->param1);
+ uint8_t *res_flags = (uint8_t *)res->data;
+ int have_event = 0;
+ int have_more = 0;
+
+ /* regarding architecture nt0 can not be masked */
+ have_event = !chsc_sei_nt0_get_event(res);
+ have_more = chsc_sei_nt0_have_event();
+
+ if (selection_mask & CHSC_SEI_NT2) {
+ if (!have_event) {
+ have_event = !chsc_sei_nt2_get_event(res);
+ }
+
+ if (!have_more) {
+ have_more = chsc_sei_nt2_have_event();
+ }
+ }
+
+ if (have_event) {
+ res->code = cpu_to_be16(0x0001);
+ if (have_more) {
+ (*res_flags) |= 0x80;
+ } else {
+ (*res_flags) &= ~0x80;
+ css_clear_sei_pending();
+ }
+ } else {
+ res->code = cpu_to_be16(0x0005);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ }
+}
+
+static void ioinst_handle_chsc_unimplemented(ChscResp *res)
+{
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->code = cpu_to_be16(0x0004);
+ res->param = 0;
+}
+
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
+{
+ ChscReq *req;
+ ChscResp *res;
+ uint64_t addr = 0;
+ int reg;
+ uint16_t len;
+ uint16_t command;
+ CPUS390XState *env = &cpu->env;
+ uint8_t buf[TARGET_PAGE_SIZE];
+
+ trace_ioinst("chsc");
+ reg = (ipb >> 20) & 0x00f;
+ if (!s390_is_pv()) {
+ addr = env->regs[reg];
+ }
+ /* Page boundary? */
+ if (addr & 0xfff) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ return;
+ }
+ /*
+ * Reading sizeof(ChscReq) bytes is currently enough for all of our
+ * present CHSC sub-handlers ... if we ever need more, we should take
+ * care of req->len here first.
+ */
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, addr, buf, sizeof(ChscReq));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return;
+ }
+ req = (ChscReq *)buf;
+ len = be16_to_cpu(req->len);
+ /* Length field valid? */
+ if ((len < 16) || (len > 4088) || (len & 7)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+ memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
+ res = (void *)((char *)req + len);
+ command = be16_to_cpu(req->command);
+ trace_ioinst_chsc_cmd(command, len);
+ switch (command) {
+ case CHSC_SCSC:
+ ioinst_handle_chsc_scsc(req, res);
+ break;
+ case CHSC_SCPD:
+ ioinst_handle_chsc_scpd(req, res);
+ break;
+ case CHSC_SDA:
+ ioinst_handle_chsc_sda(req, res);
+ break;
+ case CHSC_SEI:
+ ioinst_handle_chsc_sei(req, res);
+ break;
+ default:
+ ioinst_handle_chsc_unimplemented(res);
+ break;
+ }
+
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, addr + len, res, be16_to_cpu(res->len));
+ setcc(cpu, 0); /* Command execution complete */
+ } else {
+ if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
+ be16_to_cpu(res->len))) {
+ setcc(cpu, 0); /* Command execution complete */
+ } else {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ }
+ }
+}
+
+#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc)
+#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28)
+#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1)
+#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001)
+
+void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
+ uint32_t ipb, uintptr_t ra)
+{
+ uint8_t mbk;
+ int update;
+ int dct;
+ CPUS390XState *env = &cpu->env;
+
+ trace_ioinst("schm");
+
+ if (SCHM_REG1_RES(reg1)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+
+ mbk = SCHM_REG1_MBK(reg1);
+ update = SCHM_REG1_UPD(reg1);
+ dct = SCHM_REG1_DCT(reg1);
+
+ if (update && (reg2 & 0x000000000000001f)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+
+ css_do_schm(mbk, update, dct, update ? reg2 : 0);
+}
+
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
+ return;
+ }
+ trace_ioinst_sch_id("rsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ setcc(cpu, 3);
+ return;
+ }
+ setcc(cpu, css_do_rsch(sch));
+}
+
+#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
+#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16)
+#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff)
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
+{
+ int cc;
+ uint8_t cssid;
+ uint8_t chpid;
+ int ret;
+ CPUS390XState *env = &cpu->env;
+
+ if (RCHP_REG1_RES(reg1)) {
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+
+ cssid = RCHP_REG1_CSSID(reg1);
+ chpid = RCHP_REG1_CHPID(reg1);
+
+ trace_ioinst_chp_id("rchp", cssid, chpid);
+
+ ret = css_do_rchp(cssid, chpid);
+
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ /* Invalid channel subsystem. */
+ s390_program_interrupt(env, PGM_OPERAND, ra);
+ return;
+ }
+ setcc(cpu, cc);
+}
+
+#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000)
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
+{
+ /* We do not provide address limit checking, so let's suppress it. */
+ if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
+ }
+}
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
new file mode 100644
index 000000000..5b1fdb55c
--- /dev/null
+++ b/target/s390x/kvm/kvm.c
@@ -0,0 +1,2564 @@
+/*
+ * QEMU S390x KVM implementation
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ * Copyright IBM Corp. 2012
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+#include <asm/ptrace.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm_s390x.h"
+#include "sysemu/kvm_int.h"
+#include "qemu/cutils.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "qemu/units.h"
+#include "qemu/main-loop.h"
+#include "qemu/mmap-alloc.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/runstate.h"
+#include "sysemu/device_tree.h"
+#include "exec/gdbstub.h"
+#include "exec/ram_addr.h"
+#include "trace.h"
+#include "hw/s390x/s390-pci-inst.h"
+#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/ipl.h"
+#include "hw/s390x/ebcdic.h"
+#include "exec/memattrs.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+#include "hw/s390x/s390-virtio-hcall.h"
+#include "hw/s390x/pv.h"
+
+#ifndef DEBUG_KVM
+#define DEBUG_KVM 0
+#endif
+
+#define DPRINTF(fmt, ...) do { \
+ if (DEBUG_KVM) { \
+ fprintf(stderr, fmt, ## __VA_ARGS__); \
+ } \
+} while (0)
+
+#define kvm_vm_check_mem_attr(s, attr) \
+ kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
+
+#define IPA0_DIAG 0x8300
+#define IPA0_SIGP 0xae00
+#define IPA0_B2 0xb200
+#define IPA0_B9 0xb900
+#define IPA0_EB 0xeb00
+#define IPA0_E3 0xe300
+
+#define PRIV_B2_SCLP_CALL 0x20
+#define PRIV_B2_CSCH 0x30
+#define PRIV_B2_HSCH 0x31
+#define PRIV_B2_MSCH 0x32
+#define PRIV_B2_SSCH 0x33
+#define PRIV_B2_STSCH 0x34
+#define PRIV_B2_TSCH 0x35
+#define PRIV_B2_TPI 0x36
+#define PRIV_B2_SAL 0x37
+#define PRIV_B2_RSCH 0x38
+#define PRIV_B2_STCRW 0x39
+#define PRIV_B2_STCPS 0x3a
+#define PRIV_B2_RCHP 0x3b
+#define PRIV_B2_SCHM 0x3c
+#define PRIV_B2_CHSC 0x5f
+#define PRIV_B2_SIGA 0x74
+#define PRIV_B2_XSCH 0x76
+
+#define PRIV_EB_SQBS 0x8a
+#define PRIV_EB_PCISTB 0xd0
+#define PRIV_EB_SIC 0xd1
+
+#define PRIV_B9_EQBS 0x9c
+#define PRIV_B9_CLP 0xa0
+#define PRIV_B9_PCISTG 0xd0
+#define PRIV_B9_PCILG 0xd2
+#define PRIV_B9_RPCIT 0xd3
+
+#define PRIV_E3_MPCIFC 0xd0
+#define PRIV_E3_STPCIFC 0xd4
+
+#define DIAG_TIMEREVENT 0x288
+#define DIAG_IPL 0x308
+#define DIAG_SET_CONTROL_PROGRAM_CODES 0x318
+#define DIAG_KVM_HYPERCALL 0x500
+#define DIAG_KVM_BREAKPOINT 0x501
+
+#define ICPT_INSTRUCTION 0x04
+#define ICPT_PROGRAM 0x08
+#define ICPT_EXT_INT 0x14
+#define ICPT_WAITPSW 0x1c
+#define ICPT_SOFT_INTERCEPT 0x24
+#define ICPT_CPU_STOP 0x28
+#define ICPT_OPEREXC 0x2c
+#define ICPT_IO 0x40
+#define ICPT_PV_INSTR 0x68
+#define ICPT_PV_INSTR_NOTIFICATION 0x6c
+
+#define NR_LOCAL_IRQS 32
+/*
+ * Needs to be big enough to contain max_cpus emergency signals
+ * and in addition NR_LOCAL_IRQS interrupts
+ */
+#define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \
+ (max_cpus + NR_LOCAL_IRQS))
+/*
+ * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
+ * as the dirty bitmap must be managed by bitops that take an int as
+ * position indicator. This would end at an unaligned address
+ * (0x7fffff00000). As future variants might provide larger pages
+ * and to make all addresses properly aligned, let us split at 4TB.
+ */
+#define KVM_SLOT_MAX_BYTES (4UL * TiB)
+
+static CPUWatchpoint hw_watchpoint;
+/*
+ * We don't use a list because this structure is also used to transmit the
+ * hardware breakpoints to the kernel.
+ */
+static struct kvm_hw_breakpoint *hw_breakpoints;
+static int nb_hw_breakpoints;
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static int cap_sync_regs;
+static int cap_async_pf;
+static int cap_mem_op;
+static int cap_s390_irq;
+static int cap_ri;
+static int cap_hpage_1m;
+static int cap_vcpu_resets;
+static int cap_protected;
+
+static int active_cmma;
+
+static int kvm_s390_query_mem_limit(uint64_t *memory_limit)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+ .addr = (uint64_t) memory_limit,
+ };
+
+ return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
+{
+ int rc;
+
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+ .addr = (uint64_t) &new_limit,
+ };
+
+ if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) {
+ return 0;
+ }
+
+ rc = kvm_s390_query_mem_limit(hw_limit);
+ if (rc) {
+ return rc;
+ } else if (*hw_limit < new_limit) {
+ return -E2BIG;
+ }
+
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_cmma_active(void)
+{
+ return active_cmma;
+}
+
+static bool kvm_s390_cmma_available(void)
+{
+ static bool initialized, value;
+
+ if (!initialized) {
+ initialized = true;
+ value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
+ kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
+ }
+ return value;
+}
+
+void kvm_s390_cmma_reset(void)
+{
+ int rc;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_CLR_CMMA,
+ };
+
+ if (!kvm_s390_cmma_active()) {
+ return;
+ }
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ trace_kvm_clear_cmma(rc);
+}
+
+static void kvm_s390_enable_cmma(void)
+{
+ int rc;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
+ };
+
+ if (cap_hpage_1m) {
+ warn_report("CMM will not be enabled because it is not "
+ "compatible with huge memory backings.");
+ return;
+ }
+ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ active_cmma = !rc;
+ trace_kvm_enable_cmma(rc);
+}
+
+static void kvm_s390_set_attr(uint64_t attr)
+{
+ struct kvm_device_attr attribute = {
+ .group = KVM_S390_VM_CRYPTO,
+ .attr = attr,
+ };
+
+ int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
+
+ if (ret) {
+ error_report("Failed to set crypto device attribute %lu: %s",
+ attr, strerror(-ret));
+ }
+}
+
+static void kvm_s390_init_aes_kw(void)
+{
+ uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
+
+ if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
+ NULL)) {
+ attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
+ }
+
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+ kvm_s390_set_attr(attr);
+ }
+}
+
+static void kvm_s390_init_dea_kw(void)
+{
+ uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
+
+ if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
+ NULL)) {
+ attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
+ }
+
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+ kvm_s390_set_attr(attr);
+ }
+}
+
+void kvm_s390_crypto_reset(void)
+{
+ if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
+ kvm_s390_init_aes_kw();
+ kvm_s390_init_dea_kw();
+ }
+}
+
+void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp)
+{
+ if (pagesize == 4 * KiB) {
+ return;
+ }
+
+ if (!hpage_1m_allowed()) {
+ error_setg(errp, "This QEMU machine does not support huge page "
+ "mappings");
+ return;
+ }
+
+ if (pagesize != 1 * MiB) {
+ error_setg(errp, "Memory backing with 2G pages was specified, "
+ "but KVM does not support this memory backing");
+ return;
+ }
+
+ if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) {
+ error_setg(errp, "Memory backing with 1M pages was specified, "
+ "but KVM does not support this memory backing");
+ return;
+ }
+
+ cap_hpage_1m = 1;
+}
+
+int kvm_s390_get_hpage_1m(void)
+{
+ return cap_hpage_1m;
+}
+
+static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE,
+ false, NULL);
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) {
+ error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - "
+ "please use kernel 3.15 or newer");
+ return -1;
+ }
+ if (!kvm_check_extension(s, KVM_CAP_S390_COW)) {
+ error_report("KVM is missing capability KVM_CAP_S390_COW - "
+ "unsupported environment");
+ return -1;
+ }
+
+ cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
+ cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
+ cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
+ cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
+ cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS);
+ cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED);
+
+ kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
+ kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
+ kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
+ if (ri_allowed()) {
+ if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
+ cap_ri = 1;
+ }
+ }
+ if (cpu_model_allowed()) {
+ kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0);
+ }
+
+ /*
+ * The migration interface for ais was introduced with kernel 4.13
+ * but the capability itself had been active since 4.12. As migration
+ * support is considered necessary, we only try to enable this for
+ * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available.
+ */
+ if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() &&
+ kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) {
+ kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0);
+ }
+
+ kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES);
+ return 0;
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ return 0;
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return cpu->cpu_index;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
+ S390CPU *cpu = S390_CPU(cs);
+ kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+ cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus));
+ return 0;
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ g_free(cpu->irqstate);
+ cpu->irqstate = NULL;
+
+ return 0;
+}
+
+static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type)
+{
+ CPUState *cs = CPU(cpu);
+
+ /*
+ * The reset call is needed here to reset in-kernel vcpu data that
+ * we can't access directly from QEMU (i.e. with older kernels
+ * which don't support sync_regs/ONE_REG). Before this ioctl
+ * cpu_synchronize_state() is called in common kvm code
+ * (kvm-all).
+ */
+ if (kvm_vcpu_ioctl(cs, type)) {
+ error_report("CPU reset failed on CPU %i type %lx",
+ cs->cpu_index, type);
+ }
+}
+
+void kvm_s390_reset_vcpu_initial(S390CPU *cpu)
+{
+ kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET);
+}
+
+void kvm_s390_reset_vcpu_clear(S390CPU *cpu)
+{
+ if (cap_vcpu_resets) {
+ kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET);
+ } else {
+ kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET);
+ }
+}
+
+void kvm_s390_reset_vcpu_normal(S390CPU *cpu)
+{
+ if (cap_vcpu_resets) {
+ kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET);
+ }
+}
+
+static int can_sync_regs(CPUState *cs, int regs)
+{
+ return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ struct kvm_regs regs;
+ struct kvm_fpu fpu = {};
+ int r;
+ int i;
+
+ /* always save the PSW and the GPRS*/
+ cs->kvm_run->psw_addr = env->psw.addr;
+ cs->kvm_run->psw_mask = env->psw.mask;
+
+ if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
+ for (i = 0; i < 16; i++) {
+ cs->kvm_run->s.regs.gprs[i] = env->regs[i];
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ regs.gprs[i] = env->regs[i];
+ }
+ r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_VRS)) {
+ for (i = 0; i < 32; i++) {
+ cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0];
+ cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1];
+ }
+ cs->kvm_run->s.regs.fpc = env->fpc;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
+ } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
+ for (i = 0; i < 16; i++) {
+ cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i);
+ }
+ cs->kvm_run->s.regs.fpc = env->fpc;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
+ } else {
+ /* Floating point */
+ for (i = 0; i < 16; i++) {
+ fpu.fprs[i] = *get_freg(env, i);
+ }
+ fpu.fpc = env->fpc;
+
+ r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ /* Do we need to save more than that? */
+ if (level == KVM_PUT_RUNTIME_STATE) {
+ return 0;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
+ cs->kvm_run->s.regs.cputm = env->cputm;
+ cs->kvm_run->s.regs.ckc = env->ckc;
+ cs->kvm_run->s.regs.todpr = env->todpr;
+ cs->kvm_run->s.regs.gbea = env->gbea;
+ cs->kvm_run->s.regs.pp = env->pp;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
+ } else {
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
+ memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
+ }
+
+ /* pfault parameters */
+ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
+ cs->kvm_run->s.regs.pft = env->pfault_token;
+ cs->kvm_run->s.regs.pfs = env->pfault_select;
+ cs->kvm_run->s.regs.pfc = env->pfault_compare;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
+ } else if (cap_async_pf) {
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ /* access registers and control registers*/
+ if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
+ for (i = 0; i < 16; i++) {
+ cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
+ cs->kvm_run->s.regs.crs[i] = env->cregs[i];
+ }
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
+ } else {
+ for (i = 0; i < 16; i++) {
+ sregs.acrs[i] = env->aregs[i];
+ sregs.crs[i] = env->cregs[i];
+ }
+ r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
+ memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32);
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
+ cs->kvm_run->s.regs.bpbc = env->bpbc;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
+ cs->kvm_run->s.regs.etoken = env->etoken;
+ cs->kvm_run->s.regs.etoken_extension = env->etoken_extension;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_DIAG318)) {
+ cs->kvm_run->s.regs.diag318 = env->diag318_info;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
+ }
+
+ /* Finally the prefix */
+ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
+ cs->kvm_run->s.regs.prefix = env->psa;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
+ } else {
+ /* prefix is only supported via sync regs */
+ }
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ struct kvm_regs regs;
+ struct kvm_fpu fpu;
+ int i, r;
+
+ /* get the PSW */
+ env->psw.addr = cs->kvm_run->psw_addr;
+ env->psw.mask = cs->kvm_run->psw_mask;
+
+ /* the GPRS */
+ if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = cs->kvm_run->s.regs.gprs[i];
+ }
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+ if (r < 0) {
+ return r;
+ }
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = regs.gprs[i];
+ }
+ }
+
+ /* The ACRS and CRS */
+ if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
+ for (i = 0; i < 16; i++) {
+ env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
+ env->cregs[i] = cs->kvm_run->s.regs.crs[i];
+ }
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
+ }
+ for (i = 0; i < 16; i++) {
+ env->aregs[i] = sregs.acrs[i];
+ env->cregs[i] = sregs.crs[i];
+ }
+ }
+
+ /* Floating point and vector registers */
+ if (can_sync_regs(cs, KVM_SYNC_VRS)) {
+ for (i = 0; i < 32; i++) {
+ env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0];
+ env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1];
+ }
+ env->fpc = cs->kvm_run->s.regs.fpc;
+ } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
+ for (i = 0; i < 16; i++) {
+ *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i];
+ }
+ env->fpc = cs->kvm_run->s.regs.fpc;
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
+ if (r < 0) {
+ return r;
+ }
+ for (i = 0; i < 16; i++) {
+ *get_freg(env, i) = fpu.fprs[i];
+ }
+ env->fpc = fpu.fpc;
+ }
+
+ /* The prefix */
+ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
+ env->psa = cs->kvm_run->s.regs.prefix;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
+ env->cputm = cs->kvm_run->s.regs.cputm;
+ env->ckc = cs->kvm_run->s.regs.ckc;
+ env->todpr = cs->kvm_run->s.regs.todpr;
+ env->gbea = cs->kvm_run->s.regs.gbea;
+ env->pp = cs->kvm_run->s.regs.pp;
+ } else {
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
+ memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
+ memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32);
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_BPBC)) {
+ env->bpbc = cs->kvm_run->s.regs.bpbc;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) {
+ env->etoken = cs->kvm_run->s.regs.etoken;
+ env->etoken_extension = cs->kvm_run->s.regs.etoken_extension;
+ }
+
+ /* pfault parameters */
+ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
+ env->pfault_token = cs->kvm_run->s.regs.pft;
+ env->pfault_select = cs->kvm_run->s.regs.pfs;
+ env->pfault_compare = cs->kvm_run->s.regs.pfc;
+ } else if (cap_async_pf) {
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_DIAG318)) {
+ env->diag318_info = cs->kvm_run->s.regs.diag318;
+ }
+
+ return 0;
+}
+
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ int r;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_TOD,
+ .attr = KVM_S390_VM_TOD_LOW,
+ .addr = (uint64_t)tod_low,
+ };
+
+ r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (r) {
+ return r;
+ }
+
+ attr.attr = KVM_S390_VM_TOD_HIGH;
+ attr.addr = (uint64_t)tod_high;
+ return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
+{
+ int r;
+ struct kvm_s390_vm_tod_clock gtod;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_TOD,
+ .attr = KVM_S390_VM_TOD_EXT,
+ .addr = (uint64_t)&gtod,
+ };
+
+ r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ *tod_high = gtod.epoch_idx;
+ *tod_low = gtod.tod;
+
+ return r;
+}
+
+int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low)
+{
+ int r;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_TOD,
+ .attr = KVM_S390_VM_TOD_LOW,
+ .addr = (uint64_t)&tod_low,
+ };
+
+ r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ if (r) {
+ return r;
+ }
+
+ attr.attr = KVM_S390_VM_TOD_HIGH;
+ attr.addr = (uint64_t)&tod_high;
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low)
+{
+ struct kvm_s390_vm_tod_clock gtod = {
+ .epoch_idx = tod_high,
+ .tod = tod_low,
+ };
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_TOD,
+ .attr = KVM_S390_VM_TOD_EXT,
+ .addr = (uint64_t)&gtod,
+ };
+
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+/**
+ * kvm_s390_mem_op:
+ * @addr: the logical start address in guest memory
+ * @ar: the access register number
+ * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
+ * @len: length that should be transferred
+ * @is_write: true = write, false = read
+ * Returns: 0 on success, non-zero if an exception or error occurred
+ *
+ * Use KVM ioctl to read/write from/to guest memory. An access exception
+ * is injected into the vCPU in case of translation errors.
+ */
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+ int len, bool is_write)
+{
+ struct kvm_s390_mem_op mem_op = {
+ .gaddr = addr,
+ .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
+ .size = len,
+ .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
+ : KVM_S390_MEMOP_LOGICAL_READ,
+ .buf = (uint64_t)hostbuf,
+ .ar = ar,
+ };
+ int ret;
+
+ if (!cap_mem_op) {
+ return -ENOSYS;
+ }
+ if (!hostbuf) {
+ mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
+ if (ret < 0) {
+ warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
+ }
+ return ret;
+}
+
+int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf,
+ int len, bool is_write)
+{
+ struct kvm_s390_mem_op mem_op = {
+ .sida_offset = offset,
+ .size = len,
+ .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE
+ : KVM_S390_MEMOP_SIDA_READ,
+ .buf = (uint64_t)hostbuf,
+ };
+ int ret;
+
+ if (!cap_mem_op || !cap_protected) {
+ return -ENOSYS;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
+ if (ret < 0) {
+ error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret));
+ abort();
+ }
+ return ret;
+}
+
+static uint8_t const *sw_bp_inst;
+static uint8_t sw_bp_ilen;
+
+static void determine_sw_breakpoint_instr(void)
+{
+ /* DIAG 501 is used for sw breakpoints with old kernels */
+ static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
+ /* Instruction 0x0000 is used for sw breakpoints with recent kernels */
+ static const uint8_t instr_0x0000[] = {0x00, 0x00};
+
+ if (sw_bp_inst) {
+ return;
+ }
+ if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
+ sw_bp_inst = diag_501;
+ sw_bp_ilen = sizeof(diag_501);
+ DPRINTF("KVM: will use 4-byte sw breakpoints.\n");
+ } else {
+ sw_bp_inst = instr_0x0000;
+ sw_bp_ilen = sizeof(instr_0x0000);
+ DPRINTF("KVM: will use 2-byte sw breakpoints.\n");
+ }
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ determine_sw_breakpoint_instr();
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sw_bp_ilen, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint8_t t[MAX_ILEN];
+
+ if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
+ return -EINVAL;
+ } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
+ return -EINVAL;
+ } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sw_bp_ilen, 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
+ int len, int type)
+{
+ int n;
+
+ for (n = 0; n < nb_hw_breakpoints; n++) {
+ if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
+ (hw_breakpoints[n].len == len || len == -1)) {
+ return &hw_breakpoints[n];
+ }
+ }
+
+ return NULL;
+}
+
+static int insert_hw_breakpoint(target_ulong addr, int len, int type)
+{
+ int size;
+
+ if (find_hw_breakpoint(addr, len, type)) {
+ return -EEXIST;
+ }
+
+ size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
+
+ if (!hw_breakpoints) {
+ nb_hw_breakpoints = 0;
+ hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
+ } else {
+ hw_breakpoints =
+ (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
+ }
+
+ if (!hw_breakpoints) {
+ nb_hw_breakpoints = 0;
+ return -ENOMEM;
+ }
+
+ hw_breakpoints[nb_hw_breakpoints].addr = addr;
+ hw_breakpoints[nb_hw_breakpoints].len = len;
+ hw_breakpoints[nb_hw_breakpoints].type = type;
+
+ nb_hw_breakpoints++;
+
+ return 0;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ type = KVM_HW_BP;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ if (len < 1) {
+ return -EINVAL;
+ }
+ type = KVM_HW_WP_WRITE;
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return insert_hw_breakpoint(addr, len, type);
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int size;
+ struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
+
+ if (bp == NULL) {
+ return -ENOENT;
+ }
+
+ nb_hw_breakpoints--;
+ if (nb_hw_breakpoints > 0) {
+ /*
+ * In order to trim the array, move the last element to the position to
+ * be removed - if necessary.
+ */
+ if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
+ *bp = hw_breakpoints[nb_hw_breakpoints];
+ }
+ size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
+ hw_breakpoints =
+ (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
+ } else {
+ g_free(hw_breakpoints);
+ hw_breakpoints = NULL;
+ }
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoints = 0;
+ g_free(hw_breakpoints);
+ hw_breakpoints = NULL;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+ int i;
+
+ if (nb_hw_breakpoints > 0) {
+ dbg->arch.nr_hw_bp = nb_hw_breakpoints;
+ dbg->arch.hw_bp = hw_breakpoints;
+
+ for (i = 0; i < nb_hw_breakpoints; ++i) {
+ hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
+ hw_breakpoints[i].addr);
+ }
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ } else {
+ dbg->arch.nr_hw_bp = 0;
+ dbg->arch.hw_bp = NULL;
+ }
+}
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
+{
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
+ struct kvm_s390_interrupt *interrupt)
+{
+ int r = 0;
+
+ interrupt->type = irq->type;
+ switch (irq->type) {
+ case KVM_S390_INT_VIRTIO:
+ interrupt->parm = irq->u.ext.ext_params;
+ /* fall through */
+ case KVM_S390_INT_PFAULT_INIT:
+ case KVM_S390_INT_PFAULT_DONE:
+ interrupt->parm64 = irq->u.ext.ext_params2;
+ break;
+ case KVM_S390_PROGRAM_INT:
+ interrupt->parm = irq->u.pgm.code;
+ break;
+ case KVM_S390_SIGP_SET_PREFIX:
+ interrupt->parm = irq->u.prefix.address;
+ break;
+ case KVM_S390_INT_SERVICE:
+ interrupt->parm = irq->u.ext.ext_params;
+ break;
+ case KVM_S390_MCHK:
+ interrupt->parm = irq->u.mchk.cr14;
+ interrupt->parm64 = irq->u.mchk.mcic;
+ break;
+ case KVM_S390_INT_EXTERNAL_CALL:
+ interrupt->parm = irq->u.extcall.code;
+ break;
+ case KVM_S390_INT_EMERGENCY:
+ interrupt->parm = irq->u.emerg.code;
+ break;
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_RESTART:
+ break; /* These types have no parameters */
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ interrupt->parm = irq->u.io.subchannel_id << 16;
+ interrupt->parm |= irq->u.io.subchannel_nr;
+ interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
+ interrupt->parm64 |= irq->u.io.io_int_word;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_interrupt kvmint = {};
+ int r;
+
+ r = s390_kvm_irq_to_interrupt(irq, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "%s called with bogus interrupt\n", __func__);
+ exit(1);
+ }
+
+ r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "KVM failed to inject interrupt\n");
+ exit(1);
+ }
+}
+
+void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
+{
+ CPUState *cs = CPU(cpu);
+ int r;
+
+ if (cap_s390_irq) {
+ r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
+ if (!r) {
+ return;
+ }
+ error_report("KVM failed to inject interrupt %llx", irq->type);
+ exit(1);
+ }
+
+ inject_vcpu_irq_legacy(cs, irq);
+}
+
+void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_interrupt kvmint = {};
+ int r;
+
+ r = s390_kvm_irq_to_interrupt(irq, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "%s called with bogus interrupt\n", __func__);
+ exit(1);
+ }
+
+ r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "KVM failed to inject interrupt\n");
+ exit(1);
+ }
+}
+
+void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = code,
+ };
+ qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
+ cpu->env.psw.addr);
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = code,
+ .u.pgm.trans_exc_code = te_code,
+ .u.pgm.exc_access_id = te_code & 3,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
+ uint16_t ipbh0)
+{
+ CPUS390XState *env = &cpu->env;
+ uint64_t sccb;
+ uint32_t code;
+ int r;
+
+ sccb = env->regs[ipbh0 & 0xf];
+ code = env->regs[(ipbh0 & 0xf0) >> 4];
+
+ switch (run->s390_sieic.icptcode) {
+ case ICPT_PV_INSTR_NOTIFICATION:
+ g_assert(s390_is_pv());
+ /* The notification intercepts are currently handled by KVM */
+ error_report("unexpected SCLP PV notification");
+ exit(1);
+ break;
+ case ICPT_PV_INSTR:
+ g_assert(s390_is_pv());
+ sclp_service_call_protected(env, sccb, code);
+ /* Setting the CC is done by the Ultravisor. */
+ break;
+ case ICPT_INSTRUCTION:
+ g_assert(!s390_is_pv());
+ r = sclp_service_call(env, sccb, code);
+ if (r < 0) {
+ kvm_s390_program_interrupt(cpu, -r);
+ return;
+ }
+ setcc(cpu, r);
+ }
+}
+
+static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+ CPUS390XState *env = &cpu->env;
+ int rc = 0;
+ uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
+
+ switch (ipa1) {
+ case PRIV_B2_XSCH:
+ ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED);
+ break;
+ case PRIV_B2_CSCH:
+ ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED);
+ break;
+ case PRIV_B2_HSCH:
+ ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED);
+ break;
+ case PRIV_B2_MSCH:
+ ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
+ break;
+ case PRIV_B2_SSCH:
+ ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
+ break;
+ case PRIV_B2_STCRW:
+ ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED);
+ break;
+ case PRIV_B2_STSCH:
+ ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
+ break;
+ case PRIV_B2_TSCH:
+ /* We should only get tsch via KVM_EXIT_S390_TSCH. */
+ fprintf(stderr, "Spurious tsch intercept\n");
+ break;
+ case PRIV_B2_CHSC:
+ ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED);
+ break;
+ case PRIV_B2_TPI:
+ /* This should have been handled by kvm already. */
+ fprintf(stderr, "Spurious tpi intercept\n");
+ break;
+ case PRIV_B2_SCHM:
+ ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
+ run->s390_sieic.ipb, RA_IGNORED);
+ break;
+ case PRIV_B2_RSCH:
+ ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED);
+ break;
+ case PRIV_B2_RCHP:
+ ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED);
+ break;
+ case PRIV_B2_STCPS:
+ /* We do not provide this instruction, it is suppressed. */
+ break;
+ case PRIV_B2_SAL:
+ ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED);
+ break;
+ case PRIV_B2_SIGA:
+ /* Not provided, set CC = 3 for subchannel not operational */
+ setcc(cpu, 3);
+ break;
+ case PRIV_B2_SCLP_CALL:
+ kvm_sclp_service_call(cpu, run, ipbh0);
+ break;
+ default:
+ rc = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
+ break;
+ }
+
+ return rc;
+}
+
+static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
+ uint8_t *ar)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
+ uint32_t base2 = run->s390_sieic.ipb >> 28;
+ uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
+ ((run->s390_sieic.ipb & 0xff00) << 4);
+
+ if (disp2 & 0x80000) {
+ disp2 += 0xfff00000;
+ }
+ if (ar) {
+ *ar = base2;
+ }
+
+ return (base2 ? env->regs[base2] : 0) +
+ (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
+}
+
+static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
+ uint8_t *ar)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t base2 = run->s390_sieic.ipb >> 28;
+ uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
+ ((run->s390_sieic.ipb & 0xff00) << 4);
+
+ if (disp2 & 0x80000) {
+ disp2 += 0xfff00000;
+ }
+ if (ar) {
+ *ar = base2;
+ }
+
+ return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
+}
+
+static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ return clp_service_call(cpu, r2, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ return pcilg_service_call(cpu, r1, r2, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ return pcistg_service_call(cpu, r1, r2, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint64_t fiba;
+ uint8_t ar;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ fiba = get_base_disp_rxy(cpu, run, &ar);
+
+ return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUS390XState *env = &cpu->env;
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint8_t r3 = run->s390_sieic.ipa & 0x000f;
+ uint8_t isc;
+ uint16_t mode;
+ int r;
+
+ mode = env->regs[r1] & 0xffff;
+ isc = (env->regs[r3] >> 27) & 0x7;
+ r = css_do_sic(env, isc, mode);
+ if (r) {
+ kvm_s390_program_interrupt(cpu, -r);
+ }
+
+ return 0;
+}
+
+static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ return rpcit_service_call(cpu, r1, r2, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint8_t r3 = run->s390_sieic.ipa & 0x000f;
+ uint64_t gaddr;
+ uint8_t ar;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ gaddr = get_base_disp_rsy(cpu, run, &ar);
+
+ return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint64_t fiba;
+ uint8_t ar;
+
+ if (s390_has_feat(S390_FEAT_ZPCI)) {
+ fiba = get_base_disp_rxy(cpu, run, &ar);
+
+ return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
+ } else {
+ return -1;
+ }
+}
+
+static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+ int r = 0;
+
+ switch (ipa1) {
+ case PRIV_B9_CLP:
+ r = kvm_clp_service_call(cpu, run);
+ break;
+ case PRIV_B9_PCISTG:
+ r = kvm_pcistg_service_call(cpu, run);
+ break;
+ case PRIV_B9_PCILG:
+ r = kvm_pcilg_service_call(cpu, run);
+ break;
+ case PRIV_B9_RPCIT:
+ r = kvm_rpcit_service_call(cpu, run);
+ break;
+ case PRIV_B9_EQBS:
+ /* just inject exception */
+ r = -1;
+ break;
+ default:
+ r = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
+{
+ int r = 0;
+
+ switch (ipbl) {
+ case PRIV_EB_PCISTB:
+ r = kvm_pcistb_service_call(cpu, run);
+ break;
+ case PRIV_EB_SIC:
+ r = kvm_sic_service_call(cpu, run);
+ break;
+ case PRIV_EB_SQBS:
+ /* just inject exception */
+ r = -1;
+ break;
+ default:
+ r = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
+{
+ int r = 0;
+
+ switch (ipbl) {
+ case PRIV_E3_MPCIFC:
+ r = kvm_mpcifc_service_call(cpu, run);
+ break;
+ case PRIV_E3_STPCIFC:
+ r = kvm_stpcifc_service_call(cpu, run);
+ break;
+ default:
+ r = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUS390XState *env = &cpu->env;
+ int ret;
+
+ ret = s390_virtio_hypercall(env);
+ if (ret == -EINVAL) {
+ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
+ return 0;
+ }
+
+ return ret;
+}
+
+static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
+{
+ uint64_t r1, r3;
+ int rc;
+
+ r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ r3 = run->s390_sieic.ipa & 0x000f;
+ rc = handle_diag_288(&cpu->env, r1, r3);
+ if (rc) {
+ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
+ }
+}
+
+static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
+{
+ uint64_t r1, r3;
+
+ r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ r3 = run->s390_sieic.ipa & 0x000f;
+ handle_diag_308(&cpu->env, r1, r3, RA_IGNORED);
+}
+
+static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUS390XState *env = &cpu->env;
+ unsigned long pc;
+
+ pc = env->psw.addr - sw_bp_ilen;
+ if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
+ env->psw.addr = pc;
+ return EXCP_DEBUG;
+ }
+
+ return -ENOENT;
+}
+
+void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info)
+{
+ CPUS390XState *env = &S390_CPU(cs)->env;
+
+ /* Feat bit is set only if KVM supports sync for diag318 */
+ if (s390_has_feat(S390_FEAT_DIAG_318)) {
+ env->diag318_info = diag318_info;
+ cs->kvm_run->s.regs.diag318 = diag318_info;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
+ }
+}
+
+static void handle_diag_318(S390CPU *cpu, struct kvm_run *run)
+{
+ uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint64_t diag318_info = run->s.regs.gprs[reg];
+ CPUState *t;
+
+ /*
+ * DIAG 318 can only be enabled with KVM support. As such, let's
+ * ensure a guest cannot execute this instruction erroneously.
+ */
+ if (!s390_has_feat(S390_FEAT_DIAG_318)) {
+ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
+ return;
+ }
+
+ CPU_FOREACH(t) {
+ run_on_cpu(t, s390_do_cpu_set_diag318,
+ RUN_ON_CPU_HOST_ULONG(diag318_info));
+ }
+}
+
+#define DIAG_KVM_CODE_MASK 0x000000000000ffff
+
+static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
+{
+ int r = 0;
+ uint16_t func_code;
+
+ /*
+ * For any diagnose call we support, bits 48-63 of the resulting
+ * address specify the function code; the remainder is ignored.
+ */
+ func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
+ switch (func_code) {
+ case DIAG_TIMEREVENT:
+ kvm_handle_diag_288(cpu, run);
+ break;
+ case DIAG_IPL:
+ kvm_handle_diag_308(cpu, run);
+ break;
+ case DIAG_SET_CONTROL_PROGRAM_CODES:
+ handle_diag_318(cpu, run);
+ break;
+ case DIAG_KVM_HYPERCALL:
+ r = handle_hypercall(cpu, run);
+ break;
+ case DIAG_KVM_BREAKPOINT:
+ r = handle_sw_breakpoint(cpu, run);
+ break;
+ default:
+ DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
+ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb)
+{
+ CPUS390XState *env = &cpu->env;
+ const uint8_t r1 = ipa1 >> 4;
+ const uint8_t r3 = ipa1 & 0x0f;
+ int ret;
+ uint8_t order;
+
+ /* get order code */
+ order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK;
+
+ ret = handle_sigp(env, order, r1, r3);
+ setcc(cpu, ret);
+ return 0;
+}
+
+static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
+{
+ unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
+ uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
+ int r = -1;
+
+ DPRINTF("handle_instruction 0x%x 0x%x\n",
+ run->s390_sieic.ipa, run->s390_sieic.ipb);
+ switch (ipa0) {
+ case IPA0_B2:
+ r = handle_b2(cpu, run, ipa1);
+ break;
+ case IPA0_B9:
+ r = handle_b9(cpu, run, ipa1);
+ break;
+ case IPA0_EB:
+ r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
+ break;
+ case IPA0_E3:
+ r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
+ break;
+ case IPA0_DIAG:
+ r = handle_diag(cpu, run, run->s390_sieic.ipb);
+ break;
+ case IPA0_SIGP:
+ r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb);
+ break;
+ }
+
+ if (r < 0) {
+ r = 0;
+ kvm_s390_program_interrupt(cpu, PGM_OPERATION);
+ }
+
+ return r;
+}
+
+static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason,
+ int pswoffset)
+{
+ CPUState *cs = CPU(cpu);
+
+ s390_cpu_halt(cpu);
+ cpu->env.crash_reason = reason;
+ qemu_system_guest_panicked(cpu_get_crash_info(cs));
+}
+
+/* try to detect pgm check loops */
+static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUState *cs = CPU(cpu);
+ PSW oldpsw, newpsw;
+
+ newpsw.mask = ldq_phys(cs->as, cpu->env.psa +
+ offsetof(LowCore, program_new_psw));
+ newpsw.addr = ldq_phys(cs->as, cpu->env.psa +
+ offsetof(LowCore, program_new_psw) + 8);
+ oldpsw.mask = run->psw_mask;
+ oldpsw.addr = run->psw_addr;
+ /*
+ * Avoid endless loops of operation exceptions, if the pgm new
+ * PSW will cause a new operation exception.
+ * The heuristic checks if the pgm new psw is within 6 bytes before
+ * the faulting psw address (with same DAT, AS settings) and the
+ * new psw is not a wait psw and the fault was not triggered by
+ * problem state. In that case go into crashed state.
+ */
+
+ if (oldpsw.addr - newpsw.addr <= 6 &&
+ !(newpsw.mask & PSW_MASK_WAIT) &&
+ !(oldpsw.mask & PSW_MASK_PSTATE) &&
+ (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
+ (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) {
+ unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP,
+ offsetof(LowCore, program_new_psw));
+ return EXCP_HALTED;
+ }
+ return 0;
+}
+
+static int handle_intercept(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+ int icpt_code = run->s390_sieic.icptcode;
+ int r = 0;
+
+ DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, (long)run->psw_addr);
+ switch (icpt_code) {
+ case ICPT_INSTRUCTION:
+ case ICPT_PV_INSTR:
+ case ICPT_PV_INSTR_NOTIFICATION:
+ r = handle_instruction(cpu, run);
+ break;
+ case ICPT_PROGRAM:
+ unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP,
+ offsetof(LowCore, program_new_psw));
+ r = EXCP_HALTED;
+ break;
+ case ICPT_EXT_INT:
+ unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP,
+ offsetof(LowCore, external_new_psw));
+ r = EXCP_HALTED;
+ break;
+ case ICPT_WAITPSW:
+ /* disabled wait, since enabled wait is handled in kernel */
+ s390_handle_wait(cpu);
+ r = EXCP_HALTED;
+ break;
+ case ICPT_CPU_STOP:
+ do_stop_interrupt(&cpu->env);
+ r = EXCP_HALTED;
+ break;
+ case ICPT_OPEREXC:
+ /* check for break points */
+ r = handle_sw_breakpoint(cpu, run);
+ if (r == -ENOENT) {
+ /* Then check for potential pgm check loops */
+ r = handle_oper_loop(cpu, run);
+ if (r == 0) {
+ kvm_s390_program_interrupt(cpu, PGM_OPERATION);
+ }
+ }
+ break;
+ case ICPT_SOFT_INTERCEPT:
+ fprintf(stderr, "KVM unimplemented icpt SOFT\n");
+ exit(1);
+ break;
+ case ICPT_IO:
+ fprintf(stderr, "KVM unimplemented icpt IO\n");
+ exit(1);
+ break;
+ default:
+ fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
+ exit(1);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_tsch(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+ int ret;
+
+ ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb,
+ RA_IGNORED);
+ if (ret < 0) {
+ /*
+ * Failure.
+ * If an I/O interrupt had been dequeued, we have to reinject it.
+ */
+ if (run->s390_tsch.dequeued) {
+ s390_io_interrupt(run->s390_tsch.subchannel_id,
+ run->s390_tsch.subchannel_nr,
+ run->s390_tsch.io_int_parm,
+ run->s390_tsch.io_int_word);
+ }
+ ret = 0;
+ }
+ return ret;
+}
+
+static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
+{
+ const MachineState *ms = MACHINE(qdev_get_machine());
+ uint16_t conf_cpus = 0, reserved_cpus = 0;
+ SysIB_322 sysib;
+ int del, i;
+
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib));
+ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
+ return;
+ }
+ /* Shift the stack of Extended Names to prepare for our own data */
+ memmove(&sysib.ext_names[1], &sysib.ext_names[0],
+ sizeof(sysib.ext_names[0]) * (sysib.count - 1));
+ /* First virt level, that doesn't provide Ext Names delimits stack. It is
+ * assumed it's not capable of managing Extended Names for lower levels.
+ */
+ for (del = 1; del < sysib.count; del++) {
+ if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
+ break;
+ }
+ }
+ if (del < sysib.count) {
+ memset(sysib.ext_names[del], 0,
+ sizeof(sysib.ext_names[0]) * (sysib.count - del));
+ }
+
+ /* count the cpus and split them into configured and reserved ones */
+ for (i = 0; i < ms->possible_cpus->len; i++) {
+ if (ms->possible_cpus->cpus[i].cpu) {
+ conf_cpus++;
+ } else {
+ reserved_cpus++;
+ }
+ }
+ sysib.vm[0].total_cpus = conf_cpus + reserved_cpus;
+ sysib.vm[0].conf_cpus = conf_cpus;
+ sysib.vm[0].reserved_cpus = reserved_cpus;
+
+ /* Insert short machine name in EBCDIC, padded with blanks */
+ if (qemu_name) {
+ memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
+ ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
+ strlen(qemu_name)));
+ }
+ sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
+ /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
+ * considered by s390 as not capable of providing any Extended Name.
+ * Therefore if no name was specified on qemu invocation, we go with the
+ * same "KVMguest" default, which KVM has filled into short name field.
+ */
+ strpadcpy((char *)sysib.ext_names[0],
+ sizeof(sysib.ext_names[0]),
+ qemu_name ?: "KVMguest", '\0');
+
+ /* Insert UUID */
+ memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
+
+ if (s390_is_pv()) {
+ s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib));
+ } else {
+ s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
+ }
+}
+
+static int handle_stsi(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+
+ switch (run->s390_stsi.fc) {
+ case 3:
+ if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
+ return 0;
+ }
+ /* Only sysib 3.2.2 needs post-handling for now. */
+ insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static int kvm_arch_handle_debug_exit(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+
+ int ret = 0;
+ struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+
+ switch (arch_info->type) {
+ case KVM_HW_WP_WRITE:
+ if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = arch_info->addr;
+ hw_watchpoint.flags = BP_MEM_WRITE;
+ ret = EXCP_DEBUG;
+ }
+ break;
+ case KVM_HW_BP:
+ if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
+ ret = EXCP_DEBUG;
+ }
+ break;
+ case KVM_SINGLESTEP:
+ if (cs->singlestep_enabled) {
+ ret = EXCP_DEBUG;
+ }
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ int ret = 0;
+
+ qemu_mutex_lock_iothread();
+
+ kvm_cpu_synchronize_state(cs);
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_S390_SIEIC:
+ ret = handle_intercept(cpu);
+ break;
+ case KVM_EXIT_S390_RESET:
+ s390_ipl_reset_request(cs, S390_RESET_REIPL);
+ break;
+ case KVM_EXIT_S390_TSCH:
+ ret = handle_tsch(cpu);
+ break;
+ case KVM_EXIT_S390_STSI:
+ ret = handle_stsi(cpu);
+ break;
+ case KVM_EXIT_DEBUG:
+ ret = kvm_arch_handle_debug_exit(cpu);
+ break;
+ default:
+ fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
+ break;
+ }
+ qemu_mutex_unlock_iothread();
+
+ if (ret == 0) {
+ ret = EXCP_INTERRUPT;
+ }
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
+{
+ return true;
+}
+
+void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+ int r;
+
+ /* Activate host kernel channel subsystem support. */
+ r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
+ assert(r == 0);
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+ /*
+ * Note that while irqchip capabilities generally imply that cpustates
+ * are handled in-kernel, it is not true for s390 (yet); therefore, we
+ * have to override the common code kvm_halt_in_kernel_allowed setting.
+ */
+ if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+ kvm_gsi_routing_allowed = true;
+ kvm_halt_in_kernel_allowed = false;
+ }
+}
+
+int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
+ int vq, bool assign)
+{
+ struct kvm_ioeventfd kick = {
+ .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
+ KVM_IOEVENTFD_FLAG_DATAMATCH,
+ .fd = event_notifier_get_fd(notifier),
+ .datamatch = vq,
+ .addr = sch,
+ .len = 8,
+ };
+ trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign,
+ kick.datamatch);
+ if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
+ return -ENOSYS;
+ }
+ if (!assign) {
+ kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+ }
+ return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+}
+
+int kvm_s390_get_ri(void)
+{
+ return cap_ri;
+}
+
+int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
+{
+ struct kvm_mp_state mp_state = {};
+ int ret;
+
+ /* the kvm part might not have been initialized yet */
+ if (CPU(cpu)->kvm_state == NULL) {
+ return 0;
+ }
+
+ switch (cpu_state) {
+ case S390_CPU_STATE_STOPPED:
+ mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ break;
+ case S390_CPU_STATE_CHECK_STOP:
+ mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
+ break;
+ case S390_CPU_STATE_OPERATING:
+ mp_state.mp_state = KVM_MP_STATE_OPERATING;
+ break;
+ case S390_CPU_STATE_LOAD:
+ mp_state.mp_state = KVM_MP_STATE_LOAD;
+ break;
+ default:
+ error_report("Requested CPU state is not a valid S390 CPU state: %u",
+ cpu_state);
+ exit(1);
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+ if (ret) {
+ trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
+ strerror(-ret));
+ }
+
+ return ret;
+}
+
+void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
+{
+ unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
+ struct kvm_s390_irq_state irq_state = {
+ .buf = (uint64_t) cpu->irqstate,
+ .len = VCPU_IRQ_BUF_SIZE(max_cpus),
+ };
+ CPUState *cs = CPU(cpu);
+ int32_t bytes;
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
+ return;
+ }
+
+ bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
+ if (bytes < 0) {
+ cpu->irqstate_saved_size = 0;
+ error_report("Migration of interrupt state failed");
+ return;
+ }
+
+ cpu->irqstate_saved_size = bytes;
+}
+
+int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_s390_irq_state irq_state = {
+ .buf = (uint64_t) cpu->irqstate,
+ .len = cpu->irqstate_saved_size,
+ };
+ int r;
+
+ if (cpu->irqstate_saved_size == 0) {
+ return 0;
+ }
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
+ return -ENOSYS;
+ }
+
+ r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
+ if (r) {
+ error_report("Setting interrupt state failed %d", r);
+ }
+ return r;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ S390PCIBusDevice *pbdev;
+ uint32_t vec = data & ZPCI_MSI_VEC_MASK;
+
+ if (!dev) {
+ DPRINTF("add_msi_route no pci device\n");
+ return -ENODEV;
+ }
+
+ pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id);
+ if (!pbdev) {
+ DPRINTF("add_msi_route no zpci device\n");
+ return -ENODEV;
+ }
+
+ route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
+ route->flags = 0;
+ route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
+ route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
+ route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
+ route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec;
+ route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
+
+static int query_cpu_subfunc(S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_subfunc prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
+ .addr = (uint64_t) &prop,
+ };
+ int rc;
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (rc) {
+ return rc;
+ }
+
+ /*
+ * We're going to add all subfunctions now, if the corresponding feature
+ * is available that unlocks the query functions.
+ */
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
+ if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
+ }
+ if (test_bit(S390_FEAT_MSA, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_9, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa);
+ }
+ if (test_bit(S390_FEAT_ESORT_BASE, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl);
+ }
+ if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
+ }
+ return 0;
+}
+
+static int configure_cpu_subfunc(const S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_subfunc prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
+ .addr = (uint64_t) &prop,
+ };
+
+ if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
+ /* hardware support might be missing, IBC will handle most of this */
+ return 0;
+ }
+
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
+ if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
+ }
+ if (test_bit(S390_FEAT_MSA, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_9, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa);
+ }
+ if (test_bit(S390_FEAT_ESORT_BASE, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl);
+ }
+ if (test_bit(S390_FEAT_DEFLATE_BASE, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc);
+ }
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+static int kvm_to_feat[][2] = {
+ { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
+ { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
+ { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
+ { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
+ { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
+ { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
+ { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
+ { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
+ { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
+ { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
+ { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
+ { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
+ { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
+ { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS},
+};
+
+static int query_cpu_feat(S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_feat prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_MACHINE_FEAT,
+ .addr = (uint64_t) &prop,
+ };
+ int rc;
+ int i;
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (rc) {
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
+ if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) {
+ set_bit(kvm_to_feat[i][1], features);
+ }
+ }
+ return 0;
+}
+
+static int configure_cpu_feat(const S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_feat prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
+ .addr = (uint64_t) &prop,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
+ if (test_bit(kvm_to_feat[i][1], features)) {
+ set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat);
+ }
+ }
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+bool kvm_s390_cpu_models_supported(void)
+{
+ if (!cpu_model_allowed()) {
+ /* compatibility machines interfere with the cpu model */
+ return false;
+ }
+ return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_MACHINE) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_MACHINE_FEAT) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_MACHINE_SUBFUNC);
+}
+
+void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
+{
+ struct kvm_s390_vm_cpu_machine prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_MACHINE,
+ .addr = (uint64_t) &prop,
+ };
+ uint16_t unblocked_ibc = 0, cpu_type = 0;
+ int rc;
+
+ memset(model, 0, sizeof(*model));
+
+ if (!kvm_s390_cpu_models_supported()) {
+ error_setg(errp, "KVM doesn't support CPU models");
+ return;
+ }
+
+ /* query the basic cpu model properties */
+ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (rc) {
+ error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
+ return;
+ }
+
+ cpu_type = cpuid_type(prop.cpuid);
+ if (has_ibc(prop.ibc)) {
+ model->lowest_ibc = lowest_ibc(prop.ibc);
+ unblocked_ibc = unblocked_ibc(prop.ibc);
+ }
+ model->cpu_id = cpuid_id(prop.cpuid);
+ model->cpu_id_format = cpuid_format(prop.cpuid);
+ model->cpu_ver = 0xff;
+
+ /* get supported cpu features indicated via STFL(E) */
+ s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
+ (uint8_t *) prop.fac_mask);
+ /* dat-enhancement facility 2 has no bit but was introduced with stfle */
+ if (test_bit(S390_FEAT_STFLE, model->features)) {
+ set_bit(S390_FEAT_DAT_ENH_2, model->features);
+ }
+ /* get supported cpu features indicated e.g. via SCLP */
+ rc = query_cpu_feat(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error querying CPU features: %d", rc);
+ return;
+ }
+ /* get supported cpu subfunctions indicated via query / test bit */
+ rc = query_cpu_subfunc(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
+ return;
+ }
+
+ /* PTFF subfunctions might be indicated although kernel support missing */
+ if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) {
+ clear_bit(S390_FEAT_PTFF_QSIE, model->features);
+ clear_bit(S390_FEAT_PTFF_QTOUE, model->features);
+ clear_bit(S390_FEAT_PTFF_STOE, model->features);
+ clear_bit(S390_FEAT_PTFF_STOUE, model->features);
+ }
+
+ /* with cpu model support, CMM is only indicated if really available */
+ if (kvm_s390_cmma_available()) {
+ set_bit(S390_FEAT_CMM, model->features);
+ } else {
+ /* no cmm -> no cmm nt */
+ clear_bit(S390_FEAT_CMM_NT, model->features);
+ }
+
+ /* bpb needs kernel support for migration, VSIE and reset */
+ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) {
+ clear_bit(S390_FEAT_BPB, model->features);
+ }
+
+ /*
+ * If we have support for protected virtualization, indicate
+ * the protected virtualization IPL unpack facility.
+ */
+ if (cap_protected) {
+ set_bit(S390_FEAT_UNPACK, model->features);
+ }
+
+ /* We emulate a zPCI bus and AEN, therefore we don't need HW support */
+ set_bit(S390_FEAT_ZPCI, model->features);
+ set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
+
+ if (s390_known_cpu_type(cpu_type)) {
+ /* we want the exact model, even if some features are missing */
+ model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
+ ibc_ec_ga(unblocked_ibc), NULL);
+ } else {
+ /* model unknown, e.g. too new - search using features */
+ model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
+ ibc_ec_ga(unblocked_ibc),
+ model->features);
+ }
+ if (!model->def) {
+ error_setg(errp, "KVM: host CPU model could not be identified");
+ return;
+ }
+ /* for now, we can only provide the AP feature with HW support */
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO,
+ KVM_S390_VM_CRYPTO_ENABLE_APIE)) {
+ set_bit(S390_FEAT_AP, model->features);
+ }
+
+ /*
+ * Extended-Length SCCB is handled entirely within QEMU.
+ * For PV guests this is completely fenced by the Ultravisor, as Service
+ * Call error checking and STFLE interpretation are handled via SIE.
+ */
+ set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features);
+
+ if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) {
+ set_bit(S390_FEAT_DIAG_318, model->features);
+ }
+
+ /* strip of features that are not part of the maximum model */
+ bitmap_and(model->features, model->features, model->def->full_feat,
+ S390_FEAT_MAX);
+}
+
+static void kvm_s390_configure_apie(bool interpret)
+{
+ uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE :
+ KVM_S390_VM_CRYPTO_DISABLE_APIE;
+
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+ kvm_s390_set_attr(attr);
+ }
+}
+
+void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
+{
+ struct kvm_s390_vm_cpu_processor prop = {
+ .fac_list = { 0 },
+ };
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_PROCESSOR,
+ .addr = (uint64_t) &prop,
+ };
+ int rc;
+
+ if (!model) {
+ /* compatibility handling if cpu models are disabled */
+ if (kvm_s390_cmma_available()) {
+ kvm_s390_enable_cmma();
+ }
+ return;
+ }
+ if (!kvm_s390_cpu_models_supported()) {
+ error_setg(errp, "KVM doesn't support CPU models");
+ return;
+ }
+ prop.cpuid = s390_cpuid_from_cpu_model(model);
+ prop.ibc = s390_ibc_from_cpu_model(model);
+ /* configure cpu features indicated via STFL(e) */
+ s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
+ (uint8_t *) prop.fac_list);
+ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ if (rc) {
+ error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
+ return;
+ }
+ /* configure cpu features indicated e.g. via SCLP */
+ rc = configure_cpu_feat(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
+ return;
+ }
+ /* configure cpu subfunctions indicated via query / test bit */
+ rc = configure_cpu_subfunc(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
+ return;
+ }
+ /* enable CMM via CMMA */
+ if (test_bit(S390_FEAT_CMM, model->features)) {
+ kvm_s390_enable_cmma();
+ }
+
+ if (test_bit(S390_FEAT_AP, model->features)) {
+ kvm_s390_configure_apie(true);
+ }
+}
+
+void kvm_s390_restart_interrupt(S390CPU *cpu)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_RESTART,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+void kvm_s390_stop_interrupt(S390CPU *cpu)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return true;
+}
diff --git a/target/s390x/kvm/kvm_s390x.h b/target/s390x/kvm/kvm_s390x.h
new file mode 100644
index 000000000..05a5e1e6f
--- /dev/null
+++ b/target/s390x/kvm/kvm_s390x.h
@@ -0,0 +1,49 @@
+/*
+ * QEMU KVM support -- s390x specific functions.
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef KVM_S390X_H
+#define KVM_S390X_H
+
+#include "cpu-qom.h"
+
+struct kvm_s390_irq;
+
+void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq);
+void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
+void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+ int len, bool is_write);
+int kvm_s390_mem_op_pv(S390CPU *cpu, vaddr addr, void *hostbuf, int len,
+ bool is_write);
+void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code);
+int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
+void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
+int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
+int kvm_s390_get_hpage_1m(void);
+int kvm_s390_get_ri(void);
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
+int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_clock);
+int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_clock);
+int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_clock);
+void kvm_s390_enable_css_support(S390CPU *cpu);
+int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
+ int vq, bool assign);
+int kvm_s390_cmma_active(void);
+void kvm_s390_cmma_reset(void);
+void kvm_s390_reset_vcpu_clear(S390CPU *cpu);
+void kvm_s390_reset_vcpu_normal(S390CPU *cpu);
+void kvm_s390_reset_vcpu_initial(S390CPU *cpu);
+int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit);
+void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp);
+void kvm_s390_crypto_reset(void);
+void kvm_s390_restart_interrupt(S390CPU *cpu);
+void kvm_s390_stop_interrupt(S390CPU *cpu);
+void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info);
+
+#endif /* KVM_S390X_H */
diff --git a/target/s390x/kvm/meson.build b/target/s390x/kvm/meson.build
new file mode 100644
index 000000000..d1356356b
--- /dev/null
+++ b/target/s390x/kvm/meson.build
@@ -0,0 +1,17 @@
+
+s390x_ss.add(when: 'CONFIG_KVM', if_true: files(
+ 'kvm.c'
+))
+
+# Newer kernels on s390 check for an S390_PGSTE program header and
+# enable the pgste page table extensions in that case. This makes
+# the vm.allocate_pgste sysctl unnecessary. We enable this program
+# header if
+# - we build on s390x
+# - we build the system emulation for s390x (qemu-system-s390x)
+# - KVM is enabled
+# - the linker supports --s390-pgste
+if host_machine.cpu_family() == 's390x' and cc.has_link_argument('-Wl,--s390-pgste')
+ s390x_softmmu_ss.add(when: 'CONFIG_KVM',
+ if_true: declare_dependency(link_args: ['-Wl,--s390-pgste']))
+endif
diff --git a/target/s390x/kvm/trace-events b/target/s390x/kvm/trace-events
new file mode 100644
index 000000000..5289f5f67
--- /dev/null
+++ b/target/s390x/kvm/trace-events
@@ -0,0 +1,7 @@
+# See docs/devel/tracing.txt for syntax documentation.
+
+# kvm.c
+kvm_enable_cmma(int rc) "CMMA: enabling with result code %d"
+kvm_clear_cmma(int rc) "CMMA: clearing with result code %d"
+kvm_failed_cpu_state_set(int cpu_index, uint8_t state, const char *msg) "Warning: Unable to set cpu %d state %" PRIu8 " to KVM: %s"
+kvm_assign_subch_ioeventfd(int fd, uint32_t addr, bool assign, int datamatch) "fd: %d sch: @0x%x assign: %d vq: %d"
diff --git a/target/s390x/kvm/trace.h b/target/s390x/kvm/trace.h
new file mode 100644
index 000000000..ae195b130
--- /dev/null
+++ b/target/s390x/kvm/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_s390x_kvm.h"
diff --git a/target/s390x/machine.c b/target/s390x/machine.c
new file mode 100644
index 000000000..37a076858
--- /dev/null
+++ b/target/s390x/machine.c
@@ -0,0 +1,292 @@
+/*
+ * S390x machine definitions and functions
+ *
+ * Copyright IBM Corp. 2014, 2018
+ *
+ * Authors:
+ * Thomas Huth <thuth@linux.vnet.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ * Jason J. Herne <jjherne@us.ibm.com>
+ *
+ * This work is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "migration/vmstate.h"
+#include "tcg/tcg_s390x.h"
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ S390CPU *cpu = opaque;
+
+ /*
+ * As the cpu state is pushed to kvm via kvm_set_mp_state rather
+ * than via cpu_synchronize_state, we need update kvm here.
+ */
+ if (kvm_enabled()) {
+ kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+ return kvm_s390_vcpu_interrupt_post_load(cpu);
+ }
+
+ if (tcg_enabled()) {
+ /* Rearm the CKC timer if necessary */
+ tcg_s390_tod_updated(CPU(cpu), RUN_ON_CPU_NULL);
+ }
+
+ return 0;
+}
+
+static int cpu_pre_save(void *opaque)
+{
+ S390CPU *cpu = opaque;
+
+ if (kvm_enabled()) {
+ kvm_s390_vcpu_interrupt_pre_save(cpu);
+ }
+
+ return 0;
+}
+
+static inline bool fpu_needed(void *opaque)
+{
+ /* This looks odd, but we might want to NOT transfer fprs in the future */
+ return true;
+}
+
+static const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.vregs[0][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[1][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[2][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[3][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[4][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[5][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[6][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[7][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[8][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[9][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[10][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[11][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[12][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[13][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[14][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[15][0], S390CPU),
+ VMSTATE_UINT32(env.fpc, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vregs_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_VECTOR);
+}
+
+static const VMStateDescription vmstate_vregs = {
+ .name = "cpu/vregs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vregs_needed,
+ .fields = (VMStateField[]) {
+ /* vregs[0][0] -> vregs[15][0] and fregs are overlays */
+ VMSTATE_UINT64(env.vregs[16][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[17][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[18][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[19][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[20][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[21][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[22][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[23][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[24][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[25][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[26][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[27][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[28][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[29][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[30][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[31][0], S390CPU),
+ VMSTATE_UINT64(env.vregs[0][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[1][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[2][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[3][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[4][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[5][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[6][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[7][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[8][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[9][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[10][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[11][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[12][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[13][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[14][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[15][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[16][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[17][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[18][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[19][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[20][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[21][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[22][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[23][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[24][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[25][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[26][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[27][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[28][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[29][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[30][1], S390CPU),
+ VMSTATE_UINT64(env.vregs[31][1], S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool riccb_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_RUNTIME_INSTRUMENTATION);
+}
+
+const VMStateDescription vmstate_riccb = {
+ .name = "cpu/riccb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = riccb_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(env.riccb, S390CPU, 64),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool exval_needed(void *opaque)
+{
+ S390CPU *cpu = opaque;
+ return cpu->env.ex_value != 0;
+}
+
+const VMStateDescription vmstate_exval = {
+ .name = "cpu/exval",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = exval_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.ex_value, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool gscb_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_GUARDED_STORAGE);
+}
+
+const VMStateDescription vmstate_gscb = {
+ .name = "cpu/gscb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = gscb_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.gscb, S390CPU, 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool bpbc_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_BPB);
+}
+
+const VMStateDescription vmstate_bpbc = {
+ .name = "cpu/bpbc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = bpbc_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(env.bpbc, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool etoken_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_ETOKEN);
+}
+
+const VMStateDescription vmstate_etoken = {
+ .name = "cpu/etoken",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = etoken_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.etoken, S390CPU),
+ VMSTATE_UINT64(env.etoken_extension, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool diag318_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_DIAG_318);
+}
+
+const VMStateDescription vmstate_diag318 = {
+ .name = "cpu/diag318",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = diag318_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.diag318_info, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_s390_cpu = {
+ .name = "cpu",
+ .post_load = cpu_post_load,
+ .pre_save = cpu_pre_save,
+ .version_id = 4,
+ .minimum_version_id = 3,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16),
+ VMSTATE_UINT64(env.psw.mask, S390CPU),
+ VMSTATE_UINT64(env.psw.addr, S390CPU),
+ VMSTATE_UINT64(env.psa, S390CPU),
+ VMSTATE_UINT32(env.todpr, S390CPU),
+ VMSTATE_UINT64(env.pfault_token, S390CPU),
+ VMSTATE_UINT64(env.pfault_compare, S390CPU),
+ VMSTATE_UINT64(env.pfault_select, S390CPU),
+ VMSTATE_UINT64(env.cputm, S390CPU),
+ VMSTATE_UINT64(env.ckc, S390CPU),
+ VMSTATE_UINT64(env.gbea, S390CPU),
+ VMSTATE_UINT64(env.pp, S390CPU),
+ VMSTATE_UINT32_ARRAY(env.aregs, S390CPU, 16),
+ VMSTATE_UINT64_ARRAY(env.cregs, S390CPU, 16),
+ VMSTATE_UINT8(env.cpu_state, S390CPU),
+ VMSTATE_UINT8(env.sigp_order, S390CPU),
+ VMSTATE_UINT32_V(irqstate_saved_size, S390CPU, 4),
+ VMSTATE_VBUFFER_UINT32(irqstate, S390CPU, 4, NULL,
+ irqstate_saved_size),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fpu,
+ &vmstate_vregs,
+ &vmstate_riccb,
+ &vmstate_exval,
+ &vmstate_gscb,
+ &vmstate_bpbc,
+ &vmstate_etoken,
+ &vmstate_diag318,
+ NULL
+ },
+};
diff --git a/target/s390x/meson.build b/target/s390x/meson.build
new file mode 100644
index 000000000..84c1402a6
--- /dev/null
+++ b/target/s390x/meson.build
@@ -0,0 +1,44 @@
+s390x_ss = ss.source_set()
+s390x_ss.add(files(
+ 'cpu.c',
+ 'cpu_features.c',
+ 'cpu_models.c',
+ 'gdbstub.c',
+ 'interrupt.c',
+ 'cpu-dump.c',
+))
+
+gen_features = executable('gen-features', 'gen-features.c', native: true,
+ build_by_default: false)
+
+gen_features_h = custom_target('gen-features.h',
+ output: 'gen-features.h',
+ capture: true,
+ command: gen_features)
+
+s390x_ss.add(gen_features_h)
+
+s390x_softmmu_ss = ss.source_set()
+s390x_softmmu_ss.add(files(
+ 'helper.c',
+ 'arch_dump.c',
+ 'diag.c',
+ 'ioinst.c',
+ 'machine.c',
+ 'mmu_helper.c',
+ 'sigp.c',
+ 'cpu-sysemu.c',
+ 'cpu_models_sysemu.c',
+))
+
+s390x_user_ss = ss.source_set()
+s390x_user_ss.add(files(
+ 'cpu_models_user.c',
+))
+
+subdir('tcg')
+subdir('kvm')
+
+target_arch += {'s390x': s390x_ss}
+target_softmmu_arch += {'s390x': s390x_softmmu_ss}
+target_user_arch += {'s390x': s390x_user_ss}
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
new file mode 100644
index 000000000..b04b57c23
--- /dev/null
+++ b/target/s390x/mmu_helper.c
@@ -0,0 +1,610 @@
+/*
+ * S390x MMU related functions
+ *
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2015 Thomas Huth, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "exec/address-spaces.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "kvm/kvm_s390x.h"
+#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
+#include "exec/exec-all.h"
+#include "trace.h"
+#include "hw/hw.h"
+#include "hw/s390x/storage-keys.h"
+#include "hw/boards.h"
+
+/* Fetch/store bits in the translation exception code: */
+#define FS_READ 0x800
+#define FS_WRITE 0x400
+
+static void trigger_access_exception(CPUS390XState *env, uint32_t type,
+ uint64_t tec)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ if (kvm_enabled()) {
+ kvm_s390_access_exception(cpu, type, tec);
+ } else {
+ CPUState *cs = env_cpu(env);
+ if (type != PGM_ADDRESSING) {
+ stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
+ }
+ trigger_pgm_exception(env, type);
+ }
+}
+
+/* check whether the address would be proteted by Low-Address Protection */
+static bool is_low_address(uint64_t addr)
+{
+ return addr <= 511 || (addr >= 4096 && addr <= 4607);
+}
+
+/* check whether Low-Address Protection is enabled for mmu_translate() */
+static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc)
+{
+ if (!(env->cregs[0] & CR0_LOWPROT)) {
+ return false;
+ }
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ return true;
+ }
+
+ /* Check the private-space control bit */
+ switch (asc) {
+ case PSW_ASC_PRIMARY:
+ return !(env->cregs[1] & ASCE_PRIVATE_SPACE);
+ case PSW_ASC_SECONDARY:
+ return !(env->cregs[7] & ASCE_PRIVATE_SPACE);
+ case PSW_ASC_HOME:
+ return !(env->cregs[13] & ASCE_PRIVATE_SPACE);
+ default:
+ /* We don't support access register mode */
+ error_report("unsupported addressing mode");
+ exit(1);
+ }
+}
+
+/**
+ * Translate real address to absolute (= physical)
+ * address by taking care of the prefix mapping.
+ */
+target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
+{
+ if (raddr < 0x2000) {
+ return raddr + env->psa; /* Map the lowcore. */
+ } else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
+ return raddr - env->psa; /* Map the 0 page. */
+ }
+ return raddr;
+}
+
+bool mmu_absolute_addr_valid(target_ulong addr, bool is_write)
+{
+ return address_space_access_valid(&address_space_memory,
+ addr & TARGET_PAGE_MASK,
+ TARGET_PAGE_SIZE, is_write,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+static inline bool read_table_entry(CPUS390XState *env, hwaddr gaddr,
+ uint64_t *entry)
+{
+ CPUState *cs = env_cpu(env);
+
+ /*
+ * According to the PoP, these table addresses are "unpredictably real
+ * or absolute". Also, "it is unpredictable whether the address wraps
+ * or an addressing exception is recognized".
+ *
+ * We treat them as absolute addresses and don't wrap them.
+ */
+ if (unlikely(address_space_read(cs->as, gaddr, MEMTXATTRS_UNSPECIFIED,
+ entry, sizeof(*entry)) !=
+ MEMTX_OK)) {
+ return false;
+ }
+ *entry = be64_to_cpu(*entry);
+ return true;
+}
+
+static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
+ uint64_t asc, uint64_t asce, target_ulong *raddr,
+ int *flags)
+{
+ const bool edat1 = (env->cregs[0] & CR0_EDAT) &&
+ s390_has_feat(S390_FEAT_EDAT);
+ const bool edat2 = edat1 && s390_has_feat(S390_FEAT_EDAT_2);
+ const bool iep = (env->cregs[0] & CR0_IEP) &&
+ s390_has_feat(S390_FEAT_INSTRUCTION_EXEC_PROT);
+ const int asce_tl = asce & ASCE_TABLE_LENGTH;
+ const int asce_p = asce & ASCE_PRIVATE_SPACE;
+ hwaddr gaddr = asce & ASCE_ORIGIN;
+ uint64_t entry;
+
+ if (asce & ASCE_REAL_SPACE) {
+ /* direct mapping */
+ *raddr = vaddr;
+ return 0;
+ }
+
+ switch (asce & ASCE_TYPE_MASK) {
+ case ASCE_TYPE_REGION1:
+ if (VADDR_REGION1_TL(vaddr) > asce_tl) {
+ return PGM_REG_FIRST_TRANS;
+ }
+ gaddr += VADDR_REGION1_TX(vaddr) * 8;
+ break;
+ case ASCE_TYPE_REGION2:
+ if (VADDR_REGION1_TX(vaddr)) {
+ return PGM_ASCE_TYPE;
+ }
+ if (VADDR_REGION2_TL(vaddr) > asce_tl) {
+ return PGM_REG_SEC_TRANS;
+ }
+ gaddr += VADDR_REGION2_TX(vaddr) * 8;
+ break;
+ case ASCE_TYPE_REGION3:
+ if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr)) {
+ return PGM_ASCE_TYPE;
+ }
+ if (VADDR_REGION3_TL(vaddr) > asce_tl) {
+ return PGM_REG_THIRD_TRANS;
+ }
+ gaddr += VADDR_REGION3_TX(vaddr) * 8;
+ break;
+ case ASCE_TYPE_SEGMENT:
+ if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr) ||
+ VADDR_REGION3_TX(vaddr)) {
+ return PGM_ASCE_TYPE;
+ }
+ if (VADDR_SEGMENT_TL(vaddr) > asce_tl) {
+ return PGM_SEGMENT_TRANS;
+ }
+ gaddr += VADDR_SEGMENT_TX(vaddr) * 8;
+ break;
+ }
+
+ switch (asce & ASCE_TYPE_MASK) {
+ case ASCE_TYPE_REGION1:
+ if (!read_table_entry(env, gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & REGION_ENTRY_I) {
+ return PGM_REG_FIRST_TRANS;
+ }
+ if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION1) {
+ return PGM_TRANS_SPEC;
+ }
+ if (VADDR_REGION2_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
+ VADDR_REGION2_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
+ return PGM_REG_SEC_TRANS;
+ }
+ if (edat1 && (entry & REGION_ENTRY_P)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION2_TX(vaddr) * 8;
+ /* fall through */
+ case ASCE_TYPE_REGION2:
+ if (!read_table_entry(env, gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & REGION_ENTRY_I) {
+ return PGM_REG_SEC_TRANS;
+ }
+ if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION2) {
+ return PGM_TRANS_SPEC;
+ }
+ if (VADDR_REGION3_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
+ VADDR_REGION3_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
+ return PGM_REG_THIRD_TRANS;
+ }
+ if (edat1 && (entry & REGION_ENTRY_P)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION3_TX(vaddr) * 8;
+ /* fall through */
+ case ASCE_TYPE_REGION3:
+ if (!read_table_entry(env, gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & REGION_ENTRY_I) {
+ return PGM_REG_THIRD_TRANS;
+ }
+ if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION3) {
+ return PGM_TRANS_SPEC;
+ }
+ if (edat2 && (entry & REGION3_ENTRY_CR) && asce_p) {
+ return PGM_TRANS_SPEC;
+ }
+ if (edat1 && (entry & REGION_ENTRY_P)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ if (edat2 && (entry & REGION3_ENTRY_FC)) {
+ if (iep && (entry & REGION3_ENTRY_IEP)) {
+ *flags &= ~PAGE_EXEC;
+ }
+ *raddr = (entry & REGION3_ENTRY_RFAA) |
+ (vaddr & ~REGION3_ENTRY_RFAA);
+ return 0;
+ }
+ if (VADDR_SEGMENT_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 ||
+ VADDR_SEGMENT_TL(vaddr) > (entry & REGION_ENTRY_TL)) {
+ return PGM_SEGMENT_TRANS;
+ }
+ gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_SEGMENT_TX(vaddr) * 8;
+ /* fall through */
+ case ASCE_TYPE_SEGMENT:
+ if (!read_table_entry(env, gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & SEGMENT_ENTRY_I) {
+ return PGM_SEGMENT_TRANS;
+ }
+ if ((entry & SEGMENT_ENTRY_TT) != SEGMENT_ENTRY_TT_SEGMENT) {
+ return PGM_TRANS_SPEC;
+ }
+ if ((entry & SEGMENT_ENTRY_CS) && asce_p) {
+ return PGM_TRANS_SPEC;
+ }
+ if (entry & SEGMENT_ENTRY_P) {
+ *flags &= ~PAGE_WRITE;
+ }
+ if (edat1 && (entry & SEGMENT_ENTRY_FC)) {
+ if (iep && (entry & SEGMENT_ENTRY_IEP)) {
+ *flags &= ~PAGE_EXEC;
+ }
+ *raddr = (entry & SEGMENT_ENTRY_SFAA) |
+ (vaddr & ~SEGMENT_ENTRY_SFAA);
+ return 0;
+ }
+ gaddr = (entry & SEGMENT_ENTRY_ORIGIN) + VADDR_PAGE_TX(vaddr) * 8;
+ break;
+ }
+
+ if (!read_table_entry(env, gaddr, &entry)) {
+ return PGM_ADDRESSING;
+ }
+ if (entry & PAGE_ENTRY_I) {
+ return PGM_PAGE_TRANS;
+ }
+ if (entry & PAGE_ENTRY_0) {
+ return PGM_TRANS_SPEC;
+ }
+ if (entry & PAGE_ENTRY_P) {
+ *flags &= ~PAGE_WRITE;
+ }
+ if (iep && (entry & PAGE_ENTRY_IEP)) {
+ *flags &= ~PAGE_EXEC;
+ }
+
+ *raddr = entry & TARGET_PAGE_MASK;
+ return 0;
+}
+
+static void mmu_handle_skey(target_ulong addr, int rw, int *flags)
+{
+ static S390SKeysClass *skeyclass;
+ static S390SKeysState *ss;
+ uint8_t key, old_key;
+ int rc;
+
+ /*
+ * We expect to be called with an absolute address that has already been
+ * validated, such that we can reliably use it to lookup the storage key.
+ */
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ }
+
+ /*
+ * Don't enable storage keys if they are still disabled, i.e., no actual
+ * storage key instruction was issued yet.
+ */
+ if (!skeyclass->skeys_are_enabled(ss)) {
+ return;
+ }
+
+ /*
+ * Whenever we create a new TLB entry, we set the storage key reference
+ * bit. In case we allow write accesses, we set the storage key change
+ * bit. Whenever the guest changes the storage key, we have to flush the
+ * TLBs of all CPUs (the whole TLB or all affected entries), so that the
+ * next reference/change will result in an MMU fault and make us properly
+ * update the storage key here.
+ *
+ * Note 1: "record of references ... is not necessarily accurate",
+ * "change bit may be set in case no storing has occurred".
+ * -> We can set reference/change bits even on exceptions.
+ * Note 2: certain accesses seem to ignore storage keys. For example,
+ * DAT translation does not set reference bits for table accesses.
+ *
+ * TODO: key-controlled protection. Only CPU accesses make use of the
+ * PSW key. CSS accesses are different - we have to pass in the key.
+ *
+ * TODO: we have races between getting and setting the key.
+ */
+ rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (rc) {
+ trace_get_skeys_nonzero(rc);
+ return;
+ }
+ old_key = key;
+
+ switch (rw) {
+ case MMU_DATA_LOAD:
+ case MMU_INST_FETCH:
+ /*
+ * The TLB entry has to remain write-protected on read-faults if
+ * the storage key does not indicate a change already. Otherwise
+ * we might miss setting the change bit on write accesses.
+ */
+ if (!(key & SK_C)) {
+ *flags &= ~PAGE_WRITE;
+ }
+ break;
+ case MMU_DATA_STORE:
+ key |= SK_C;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Any store/fetch sets the reference bit */
+ key |= SK_R;
+
+ if (key != old_key) {
+ rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (rc) {
+ trace_set_skeys_nonzero(rc);
+ }
+ }
+}
+
+/**
+ * Translate a virtual (logical) address into a physical (absolute) address.
+ * @param vaddr the virtual address
+ * @param rw 0 = read, 1 = write, 2 = code fetch, < 0 = load real address
+ * @param asc address space control (one of the PSW_ASC_* modes)
+ * @param raddr the translated address is stored to this pointer
+ * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
+ * @param tec the translation exception code if stored to this pointer if
+ * there is an exception to raise
+ * @return 0 = success, != 0, the exception to raise
+ */
+int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
+ target_ulong *raddr, int *flags, uint64_t *tec)
+{
+ uint64_t asce;
+ int r;
+
+ *tec = (vaddr & TARGET_PAGE_MASK) | (asc >> 46) |
+ (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ);
+ *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) {
+ /*
+ * If any part of this page is currently protected, make sure the
+ * TLB entry will not be reused.
+ *
+ * As the protected range is always the first 512 bytes of the
+ * two first pages, we are able to catch all writes to these areas
+ * just by looking at the start address (triggering the tlb miss).
+ */
+ *flags |= PAGE_WRITE_INV;
+ if (is_low_address(vaddr) && rw == MMU_DATA_STORE) {
+ /* LAP sets bit 56 */
+ *tec |= 0x80;
+ return PGM_PROTECTION;
+ }
+ }
+
+ vaddr &= TARGET_PAGE_MASK;
+
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ *raddr = vaddr;
+ goto nodat;
+ }
+
+ switch (asc) {
+ case PSW_ASC_PRIMARY:
+ asce = env->cregs[1];
+ break;
+ case PSW_ASC_HOME:
+ asce = env->cregs[13];
+ break;
+ case PSW_ASC_SECONDARY:
+ asce = env->cregs[7];
+ break;
+ case PSW_ASC_ACCREG:
+ default:
+ hw_error("guest switched to unknown asc mode\n");
+ break;
+ }
+
+ /* perform the DAT translation */
+ r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags);
+ if (unlikely(r)) {
+ return r;
+ }
+
+ /* check for DAT protection */
+ if (unlikely(rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE))) {
+ /* DAT sets bit 61 only */
+ *tec |= 0x4;
+ return PGM_PROTECTION;
+ }
+
+ /* check for Instruction-Execution-Protection */
+ if (unlikely(rw == MMU_INST_FETCH && !(*flags & PAGE_EXEC))) {
+ /* IEP sets bit 56 and 61 */
+ *tec |= 0x84;
+ return PGM_PROTECTION;
+ }
+
+nodat:
+ if (rw >= 0) {
+ /* Convert real address -> absolute address */
+ *raddr = mmu_real2abs(env, *raddr);
+
+ if (!mmu_absolute_addr_valid(*raddr, rw == MMU_DATA_STORE)) {
+ *tec = 0; /* unused */
+ return PGM_ADDRESSING;
+ }
+
+ mmu_handle_skey(*raddr, rw, flags);
+ }
+ return 0;
+}
+
+/**
+ * translate_pages: Translate a set of consecutive logical page addresses
+ * to absolute addresses. This function is used for TCG and old KVM without
+ * the MEMOP interface.
+ */
+static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
+ target_ulong *pages, bool is_write, uint64_t *tec)
+{
+ uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
+ CPUS390XState *env = &cpu->env;
+ int ret, i, pflags;
+
+ for (i = 0; i < nr_pages; i++) {
+ ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, tec);
+ if (ret) {
+ return ret;
+ }
+ addr += TARGET_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf,
+ int len, bool is_write)
+{
+ int ret;
+
+ if (kvm_enabled()) {
+ ret = kvm_s390_mem_op_pv(cpu, offset, hostbuf, len, is_write);
+ } else {
+ /* Protected Virtualization is a KVM/Hardware only feature */
+ g_assert_not_reached();
+ }
+ return ret;
+}
+
+/**
+ * s390_cpu_virt_mem_rw:
+ * @laddr: the logical start address
+ * @ar: the access register number
+ * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
+ * @len: length that should be transferred
+ * @is_write: true = write, false = read
+ * Returns: 0 on success, non-zero if an exception occurred
+ *
+ * Copy from/to guest memory using logical addresses. Note that we inject a
+ * program interrupt in case there is an error while accessing the memory.
+ *
+ * This function will always return (also for TCG), make sure to call
+ * s390_cpu_virt_mem_handle_exc() to properly exit the CPU loop.
+ */
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+ int len, bool is_write)
+{
+ int currlen, nr_pages, i;
+ target_ulong *pages;
+ uint64_t tec;
+ int ret;
+
+ if (kvm_enabled()) {
+ ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write);
+ if (ret >= 0) {
+ return ret;
+ }
+ }
+
+ nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS)
+ + 1;
+ pages = g_malloc(nr_pages * sizeof(*pages));
+
+ ret = translate_pages(cpu, laddr, nr_pages, pages, is_write, &tec);
+ if (ret) {
+ trigger_access_exception(&cpu->env, ret, tec);
+ } else if (hostbuf != NULL) {
+ /* Copy data by stepping through the area page by page */
+ for (i = 0; i < nr_pages; i++) {
+ currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
+ cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
+ hostbuf, currlen, is_write);
+ laddr += currlen;
+ hostbuf += currlen;
+ len -= currlen;
+ }
+ }
+
+ g_free(pages);
+ return ret;
+}
+
+void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra)
+{
+ /* KVM will handle the interrupt automatically, TCG has to exit the TB */
+#ifdef CONFIG_TCG
+ if (tcg_enabled()) {
+ cpu_loop_exit_restore(CPU(cpu), ra);
+ }
+#endif
+}
+
+/**
+ * Translate a real address into a physical (absolute) address.
+ * @param raddr the real address
+ * @param rw 0 = read, 1 = write, 2 = code fetch
+ * @param addr the translated address is stored to this pointer
+ * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
+ * @return 0 = success, != 0, the exception to raise
+ */
+int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
+ target_ulong *addr, int *flags, uint64_t *tec)
+{
+ const bool lowprot_enabled = env->cregs[0] & CR0_LOWPROT;
+
+ *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ if (is_low_address(raddr & TARGET_PAGE_MASK) && lowprot_enabled) {
+ /* see comment in mmu_translate() how this works */
+ *flags |= PAGE_WRITE_INV;
+ if (is_low_address(raddr) && rw == MMU_DATA_STORE) {
+ /* LAP sets bit 56 */
+ *tec = (raddr & TARGET_PAGE_MASK) | FS_WRITE | 0x80;
+ return PGM_PROTECTION;
+ }
+ }
+
+ *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK);
+
+ if (!mmu_absolute_addr_valid(*addr, rw == MMU_DATA_STORE)) {
+ /* unused */
+ *tec = 0;
+ return PGM_ADDRESSING;
+ }
+
+ mmu_handle_skey(*addr, rw, flags);
+ return 0;
+}
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
new file mode 100644
index 000000000..1a178aed4
--- /dev/null
+++ b/target/s390x/s390x-internal.h
@@ -0,0 +1,408 @@
+/*
+ * s390x internal definitions and helpers
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef S390X_INTERNAL_H
+#define S390X_INTERNAL_H
+
+#include "cpu.h"
+
+#ifndef CONFIG_USER_ONLY
+typedef struct LowCore {
+ /* prefix area: defined by architecture */
+ uint32_t ccw1[2]; /* 0x000 */
+ uint32_t ccw2[4]; /* 0x008 */
+ uint8_t pad1[0x80 - 0x18]; /* 0x018 */
+ uint32_t ext_params; /* 0x080 */
+ uint16_t cpu_addr; /* 0x084 */
+ uint16_t ext_int_code; /* 0x086 */
+ uint16_t svc_ilen; /* 0x088 */
+ uint16_t svc_code; /* 0x08a */
+ uint16_t pgm_ilen; /* 0x08c */
+ uint16_t pgm_code; /* 0x08e */
+ uint32_t data_exc_code; /* 0x090 */
+ uint16_t mon_class_num; /* 0x094 */
+ uint16_t per_perc_atmid; /* 0x096 */
+ uint64_t per_address; /* 0x098 */
+ uint8_t exc_access_id; /* 0x0a0 */
+ uint8_t per_access_id; /* 0x0a1 */
+ uint8_t op_access_id; /* 0x0a2 */
+ uint8_t ar_access_id; /* 0x0a3 */
+ uint8_t pad2[0xA8 - 0xA4]; /* 0x0a4 */
+ uint64_t trans_exc_code; /* 0x0a8 */
+ uint64_t monitor_code; /* 0x0b0 */
+ uint16_t subchannel_id; /* 0x0b8 */
+ uint16_t subchannel_nr; /* 0x0ba */
+ uint32_t io_int_parm; /* 0x0bc */
+ uint32_t io_int_word; /* 0x0c0 */
+ uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */
+ uint32_t stfl_fac_list; /* 0x0c8 */
+ uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */
+ uint64_t mcic; /* 0x0e8 */
+ uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */
+ uint32_t external_damage_code; /* 0x0f4 */
+ uint64_t failing_storage_address; /* 0x0f8 */
+ uint8_t pad6[0x110 - 0x100]; /* 0x100 */
+ uint64_t per_breaking_event_addr; /* 0x110 */
+ uint8_t pad7[0x120 - 0x118]; /* 0x118 */
+ PSW restart_old_psw; /* 0x120 */
+ PSW external_old_psw; /* 0x130 */
+ PSW svc_old_psw; /* 0x140 */
+ PSW program_old_psw; /* 0x150 */
+ PSW mcck_old_psw; /* 0x160 */
+ PSW io_old_psw; /* 0x170 */
+ uint8_t pad8[0x1a0 - 0x180]; /* 0x180 */
+ PSW restart_new_psw; /* 0x1a0 */
+ PSW external_new_psw; /* 0x1b0 */
+ PSW svc_new_psw; /* 0x1c0 */
+ PSW program_new_psw; /* 0x1d0 */
+ PSW mcck_new_psw; /* 0x1e0 */
+ PSW io_new_psw; /* 0x1f0 */
+ uint8_t pad13[0x11b0 - 0x200]; /* 0x200 */
+
+ uint64_t mcesad; /* 0x11B0 */
+
+ /* 64 bit extparam used for pfault, diag 250 etc */
+ uint64_t ext_params2; /* 0x11B8 */
+
+ uint8_t pad14[0x1200 - 0x11C0]; /* 0x11C0 */
+
+ /* System info area */
+
+ uint64_t floating_pt_save_area[16]; /* 0x1200 */
+ uint64_t gpregs_save_area[16]; /* 0x1280 */
+ uint32_t st_status_fixed_logout[4]; /* 0x1300 */
+ uint8_t pad15[0x1318 - 0x1310]; /* 0x1310 */
+ uint32_t prefixreg_save_area; /* 0x1318 */
+ uint32_t fpt_creg_save_area; /* 0x131c */
+ uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */
+ uint32_t tod_progreg_save_area; /* 0x1324 */
+ uint64_t cpu_timer_save_area; /* 0x1328 */
+ uint64_t clock_comp_save_area; /* 0x1330 */
+ uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */
+ uint32_t access_regs_save_area[16]; /* 0x1340 */
+ uint64_t cregs_save_area[16]; /* 0x1380 */
+
+ /* align to the top of the prefix area */
+
+ uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */
+} QEMU_PACKED LowCore;
+QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192);
+#endif /* CONFIG_USER_ONLY */
+
+#define MAX_ILEN 6
+
+/* While the PoO talks about ILC (a number between 1-3) what is actually
+ stored in LowCore is shifted left one bit (an even between 2-6). As
+ this is the actual length of the insn and therefore more useful, that
+ is what we want to pass around and manipulate. To make sure that we
+ have applied this distinction universally, rename the "ILC" to "ILEN". */
+static inline int get_ilen(uint8_t opc)
+{
+ switch (opc >> 6) {
+ case 0:
+ return 2;
+ case 1:
+ case 2:
+ return 4;
+ default:
+ return 6;
+ }
+}
+
+/* Compute the ATMID field that is stored in the per_perc_atmid lowcore
+ entry when a PER exception is triggered. */
+static inline uint8_t get_per_atmid(CPUS390XState *env)
+{
+ return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
+ (1 << 6) |
+ ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
+ ((env->psw.mask & PSW_MASK_DAT) ? (1 << 4) : 0) |
+ ((env->psw.mask & PSW_ASC_SECONDARY) ? (1 << 3) : 0) |
+ ((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0);
+}
+
+static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a)
+{
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ /* 24-Bit mode */
+ a &= 0x00ffffff;
+ } else {
+ /* 31-Bit mode */
+ a &= 0x7fffffff;
+ }
+ }
+ return a;
+}
+
+/* CC optimization */
+
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores the result (called CC_DST), the type of operation
+ * (called CC_OP) and whatever operands are needed (CC_SRC and possibly
+ * CC_VR). When the condition codes are needed, the condition codes can
+ * be calculated using this information. Condition codes are not generated
+ * if they are only needed for conditional branches.
+ */
+enum cc_op {
+ CC_OP_CONST0 = 0, /* CC is 0 */
+ CC_OP_CONST1, /* CC is 1 */
+ CC_OP_CONST2, /* CC is 2 */
+ CC_OP_CONST3, /* CC is 3 */
+
+ CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
+ CC_OP_STATIC, /* CC value is env->cc_op */
+
+ CC_OP_NZ, /* env->cc_dst != 0 */
+ CC_OP_ADDU, /* dst != 0, src = carry out (0,1) */
+ CC_OP_SUBU, /* dst != 0, src = borrow out (0,-1) */
+
+ CC_OP_LTGT_32, /* signed less/greater than (32bit) */
+ CC_OP_LTGT_64, /* signed less/greater than (64bit) */
+ CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
+ CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
+ CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
+ CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
+
+ CC_OP_ADD_64, /* overflow on add (64bit) */
+ CC_OP_SUB_64, /* overflow on subtraction (64bit) */
+ CC_OP_ABS_64, /* sign eval on abs (64bit) */
+ CC_OP_NABS_64, /* sign eval on nabs (64bit) */
+ CC_OP_MULS_64, /* overflow on signed multiply (64bit) */
+
+ CC_OP_ADD_32, /* overflow on add (32bit) */
+ CC_OP_SUB_32, /* overflow on subtraction (32bit) */
+ CC_OP_ABS_32, /* sign eval on abs (64bit) */
+ CC_OP_NABS_32, /* sign eval on nabs (64bit) */
+ CC_OP_MULS_32, /* overflow on signed multiply (32bit) */
+
+ CC_OP_COMP_32, /* complement */
+ CC_OP_COMP_64, /* complement */
+
+ CC_OP_TM_32, /* test under mask (32bit) */
+ CC_OP_TM_64, /* test under mask (64bit) */
+
+ CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
+ CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
+ CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
+
+ CC_OP_ICM, /* insert characters under mask */
+ CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
+ CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
+ CC_OP_FLOGR, /* find leftmost one */
+ CC_OP_LCBB, /* load count to block boundary */
+ CC_OP_VC, /* vector compare result */
+ CC_OP_MAX
+};
+
+#ifndef CONFIG_USER_ONLY
+
+static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
+ uint8_t *ar)
+{
+ hwaddr addr = 0;
+ uint8_t reg;
+
+ reg = ipb >> 28;
+ if (reg > 0) {
+ addr = env->regs[reg];
+ }
+ addr += (ipb >> 16) & 0xfff;
+ if (ar) {
+ *ar = reg;
+ }
+
+ return addr;
+}
+
+/* Base/displacement are at the same locations. */
+#define decode_basedisp_rs decode_basedisp_s
+
+#endif /* CONFIG_USER_ONLY */
+
+/* arch_dump.c */
+int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+
+
+/* cc_helper.c */
+const char *cc_name(enum cc_op cc_op);
+uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr);
+
+/* cpu.c */
+#ifndef CONFIG_USER_ONLY
+unsigned int s390_cpu_halt(S390CPU *cpu);
+void s390_cpu_unhalt(S390CPU *cpu);
+void s390_cpu_init_sysemu(Object *obj);
+bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp);
+void s390_cpu_finalize(Object *obj);
+void s390_cpu_class_init_sysemu(CPUClass *cc);
+void s390_cpu_machine_reset_cb(void *opaque);
+
+#else
+static inline unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+ return 0;
+}
+
+static inline void s390_cpu_unhalt(S390CPU *cpu)
+{
+}
+#endif /* CONFIG_USER_ONLY */
+
+
+/* cpu_models.c */
+void s390_cpu_model_class_register_props(ObjectClass *oc);
+void s390_realize_cpu_model(CPUState *cs, Error **errp);
+S390CPUModel *get_max_cpu_model(Error **errp);
+void apply_cpu_model(const S390CPUModel *model, Error **errp);
+ObjectClass *s390_cpu_class_by_name(const char *name);
+
+
+/* excp_helper.c */
+void s390x_cpu_debug_excp_handler(CPUState *cs);
+void s390_cpu_do_interrupt(CPUState *cpu);
+bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+#ifdef CONFIG_USER_ONLY
+void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr);
+void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
+ MMUAccessType access_type, uintptr_t retaddr);
+#else
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+#endif
+
+
+/* fpu_helper.c */
+uint32_t set_cc_nz_f32(float32 v);
+uint32_t set_cc_nz_f64(float64 v);
+uint32_t set_cc_nz_f128(float128 v);
+#define S390_IEEE_MASK_INVALID 0x80
+#define S390_IEEE_MASK_DIVBYZERO 0x40
+#define S390_IEEE_MASK_OVERFLOW 0x20
+#define S390_IEEE_MASK_UNDERFLOW 0x10
+#define S390_IEEE_MASK_INEXACT 0x08
+#define S390_IEEE_MASK_QUANTUM 0x04
+uint8_t s390_softfloat_exc_to_ieee(unsigned int exc);
+int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3);
+void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode);
+int float_comp_to_cc(CPUS390XState *env, int float_compare);
+
+#define DCMASK_ZERO 0x0c00
+#define DCMASK_NORMAL 0x0300
+#define DCMASK_SUBNORMAL 0x00c0
+#define DCMASK_INFINITY 0x0030
+#define DCMASK_QUIET_NAN 0x000c
+#define DCMASK_SIGNALING_NAN 0x0003
+#define DCMASK_NAN 0x000f
+#define DCMASK_NEGATIVE 0x0555
+uint16_t float32_dcmask(CPUS390XState *env, float32 f1);
+uint16_t float64_dcmask(CPUS390XState *env, float64 f1);
+uint16_t float128_dcmask(CPUS390XState *env, float128 f1);
+
+
+/* gdbstub.c */
+int s390_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void s390_cpu_gdb_init(CPUState *cs);
+
+
+/* helper.c */
+void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+void do_restart_interrupt(CPUS390XState *env);
+#ifndef CONFIG_USER_ONLY
+void s390_cpu_recompute_watchpoints(CPUState *cs);
+void s390x_tod_timer(void *opaque);
+void s390x_cpu_timer(void *opaque);
+void s390_handle_wait(S390CPU *cpu);
+hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
+#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
+int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch);
+int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len);
+LowCore *cpu_map_lowcore(CPUS390XState *env);
+void cpu_unmap_lowcore(LowCore *lowcore);
+#endif /* CONFIG_USER_ONLY */
+
+
+/* interrupt.c */
+void trigger_pgm_exception(CPUS390XState *env, uint32_t code);
+void cpu_inject_clock_comparator(S390CPU *cpu);
+void cpu_inject_cpu_timer(S390CPU *cpu);
+void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr);
+int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr);
+bool s390_cpu_has_io_int(S390CPU *cpu);
+bool s390_cpu_has_ext_int(S390CPU *cpu);
+bool s390_cpu_has_mcck_int(S390CPU *cpu);
+bool s390_cpu_has_int(S390CPU *cpu);
+bool s390_cpu_has_restart_int(S390CPU *cpu);
+bool s390_cpu_has_stop_int(S390CPU *cpu);
+void cpu_inject_restart(S390CPU *cpu);
+void cpu_inject_stop(S390CPU *cpu);
+
+
+/* ioinst.c */
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra);
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra);
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra);
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra);
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
+void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
+ uint32_t ipb, uintptr_t ra);
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+
+
+/* mem_helper.c */
+target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr);
+void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
+ uintptr_t ra);
+
+
+/* mmu_helper.c */
+bool mmu_absolute_addr_valid(target_ulong addr, bool is_write);
+/* Special access mode only valid for mmu_translate() */
+#define MMU_S390_LRA -1
+int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
+ target_ulong *raddr, int *flags, uint64_t *tec);
+int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
+ target_ulong *addr, int *flags, uint64_t *tec);
+
+
+/* misc_helper.c */
+int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
+ uintptr_t ra);
+
+
+/* translate.c */
+void s390x_translate_init(void);
+
+
+/* sigp.c */
+int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3);
+void do_stop_interrupt(CPUS390XState *env);
+
+#endif /* S390X_INTERNAL_H */
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
new file mode 100644
index 000000000..51c727834
--- /dev/null
+++ b/target/s390x/sigp.c
@@ -0,0 +1,495 @@
+/*
+ * s390x SIGP instruction handling
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ * Copyright IBM Corp. 2012
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/runstate.h"
+#include "exec/address-spaces.h"
+#include "exec/exec-all.h"
+#include "sysemu/tcg.h"
+#include "trace.h"
+#include "qapi/qapi-types-machine.h"
+
+QemuMutex qemu_sigp_mutex;
+
+typedef struct SigpInfo {
+ uint64_t param;
+ int cc;
+ uint64_t *status_reg;
+} SigpInfo;
+
+static void set_sigp_status(SigpInfo *si, uint64_t status)
+{
+ *si->status_reg &= 0xffffffff00000000ULL;
+ *si->status_reg |= status;
+ si->cc = SIGP_CC_STATUS_STORED;
+}
+
+static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si)
+{
+ uint8_t state = s390_cpu_get_state(dst_cpu);
+ bool ext_call = dst_cpu->env.pending_int & INTERRUPT_EXTERNAL_CALL;
+ uint64_t status = 0;
+
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* sensing without locks is racy, but it's the same for real hw */
+ if (state != S390_CPU_STATE_STOPPED && !ext_call) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ } else {
+ if (ext_call) {
+ status |= SIGP_STAT_EXT_CALL_PENDING;
+ }
+ if (state == S390_CPU_STATE_STOPPED) {
+ status |= SIGP_STAT_STOPPED;
+ }
+ set_sigp_status(si, status);
+ }
+}
+
+static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
+{
+ int ret;
+
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ ret = cpu_inject_external_call(dst_cpu, src_cpu->env.core_id);
+ if (!ret) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ } else {
+ set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING);
+ }
+}
+
+static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si)
+{
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_start(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ return;
+ }
+
+ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ return;
+ }
+
+ /* disabled wait - sleeping in user space */
+ if (cs->halted) {
+ s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
+ } else {
+ /* execute the stop function */
+ cpu->env.sigp_order = SIGP_STOP;
+ cpu_inject_stop(cpu);
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ /* disabled wait - sleeping in user space */
+ if (s390_cpu_get_state(cpu) == S390_CPU_STATE_OPERATING && cs->halted) {
+ s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu);
+ }
+
+ switch (s390_cpu_get_state(cpu)) {
+ case S390_CPU_STATE_OPERATING:
+ cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
+ cpu_inject_stop(cpu);
+ /* store will be performed in do_stop_interrup() */
+ break;
+ case S390_CPU_STATE_STOPPED:
+ /* already stopped, just store the status */
+ cpu_synchronize_state(cs);
+ s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
+ break;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint32_t address = si->param & 0x7ffffe00u;
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ cpu_synchronize_state(cs);
+
+ if (s390_store_status(cpu, address, false)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+#define ADTL_SAVE_LC_MASK 0xfUL
+static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint8_t lc = si->param & ADTL_SAVE_LC_MASK;
+ hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK;
+ hwaddr len = 1UL << (lc ? lc : 10);
+
+ if (!s390_has_feat(S390_FEAT_VECTOR) &&
+ !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ /* address must be aligned to length */
+ if (addr & (len - 1)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* no GS: only lc == 0 is valid */
+ if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
+ lc != 0) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* GS: 0, 10, 11, 12 are valid */
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
+ lc != 0 &&
+ lc != 10 &&
+ lc != 11 &&
+ lc != 12) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ cpu_synchronize_state(cs);
+
+ if (s390_store_adtl_status(cpu, addr, len)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ switch (s390_cpu_get_state(cpu)) {
+ case S390_CPU_STATE_STOPPED:
+ /* the restart irq has to be delivered prior to any other pending irq */
+ cpu_synchronize_state(cs);
+ /*
+ * Set OPERATING (and unhalting) before loading the restart PSW.
+ * s390_cpu_set_psw() will then properly halt the CPU again if
+ * necessary (TCG).
+ */
+ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
+ do_restart_interrupt(&cpu->env);
+ break;
+ case S390_CPU_STATE_OPERATING:
+ cpu_inject_restart(cpu);
+ break;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ SigpInfo *si = arg.host_ptr;
+
+ cpu_synchronize_state(cs);
+ scc->reset(cs, S390_CPU_RESET_INITIAL);
+ cpu_synchronize_post_reset(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ SigpInfo *si = arg.host_ptr;
+
+ cpu_synchronize_state(cs);
+ scc->reset(cs, S390_CPU_RESET_NORMAL);
+ cpu_synchronize_post_reset(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint32_t addr = si->param & 0x7fffe000u;
+
+ cpu_synchronize_state(cs);
+
+ if (!address_space_access_valid(&address_space_memory, addr,
+ sizeof(struct LowCore), false,
+ MEMTXATTRS_UNSPECIFIED)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ cpu->env.psa = addr;
+ tlb_flush(cs);
+ cpu_synchronize_post_init(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu,
+ SigpInfo *si)
+{
+ const uint64_t psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
+ uint16_t p_asn, s_asn, asn;
+ uint64_t psw_addr, psw_mask;
+ bool idle;
+
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* this looks racy, but these values are only used when STOPPED */
+ idle = CPU(dst_cpu)->halted;
+ psw_addr = dst_cpu->env.psw.addr;
+ psw_mask = dst_cpu->env.psw.mask;
+ asn = si->param;
+ p_asn = dst_cpu->env.cregs[4] & 0xffff; /* Primary ASN */
+ s_asn = dst_cpu->env.cregs[3] & 0xffff; /* Secondary ASN */
+
+ if (s390_cpu_get_state(dst_cpu) != S390_CPU_STATE_STOPPED ||
+ (psw_mask & psw_int_mask) != psw_int_mask ||
+ (idle && psw_addr != 0) ||
+ (!idle && (asn == p_asn || asn == s_asn))) {
+ cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id);
+ } else {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ }
+
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si)
+{
+ if (!tcg_enabled()) {
+ /* handled in KVM */
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* sensing without locks is racy, but it's the same for real hw */
+ if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* If halted (which includes also STOPPED), it is not running */
+ if (CPU(dst_cpu)->halted) {
+ set_sigp_status(si, SIGP_STAT_NOT_RUNNING);
+ } else {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ }
+}
+
+static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order,
+ uint64_t param, uint64_t *status_reg)
+{
+ SigpInfo si = {
+ .param = param,
+ .status_reg = status_reg,
+ };
+
+ /* cpu available? */
+ if (dst_cpu == NULL) {
+ return SIGP_CC_NOT_OPERATIONAL;
+ }
+
+ /* only resets can break pending orders */
+ if (dst_cpu->env.sigp_order != 0 &&
+ order != SIGP_CPU_RESET &&
+ order != SIGP_INITIAL_CPU_RESET) {
+ return SIGP_CC_BUSY;
+ }
+
+ switch (order) {
+ case SIGP_SENSE:
+ sigp_sense(dst_cpu, &si);
+ break;
+ case SIGP_EXTERNAL_CALL:
+ sigp_external_call(cpu, dst_cpu, &si);
+ break;
+ case SIGP_EMERGENCY:
+ sigp_emergency(cpu, dst_cpu, &si);
+ break;
+ case SIGP_START:
+ run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STOP:
+ run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_RESTART:
+ run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STOP_STORE_STATUS:
+ run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STORE_STATUS_ADDR:
+ run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STORE_ADTL_STATUS:
+ run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_SET_PREFIX:
+ run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_CPU_RESET:
+ run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_COND_EMERGENCY:
+ sigp_cond_emergency(cpu, dst_cpu, &si);
+ break;
+ case SIGP_SENSE_RUNNING:
+ sigp_sense_running(dst_cpu, &si);
+ break;
+ default:
+ set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
+ }
+
+ return si.cc;
+}
+
+static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
+ uint64_t *status_reg)
+{
+ *status_reg &= 0xffffffff00000000ULL;
+
+ /* Reject set arch order, with czam we're always in z/Arch mode. */
+ *status_reg |= SIGP_STAT_INVALID_PARAMETER;
+ return SIGP_CC_STATUS_STORED;
+}
+
+int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3)
+{
+ uint64_t *status_reg = &env->regs[r1];
+ uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
+ S390CPU *cpu = env_archcpu(env);
+ S390CPU *dst_cpu = NULL;
+ int ret;
+
+ if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
+ ret = SIGP_CC_BUSY;
+ goto out;
+ }
+
+ switch (order) {
+ case SIGP_SET_ARCH:
+ ret = sigp_set_architecture(cpu, param, status_reg);
+ break;
+ default:
+ /* all other sigp orders target a single vcpu */
+ dst_cpu = s390_cpu_addr2state(env->regs[r3]);
+ ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg);
+ }
+ qemu_mutex_unlock(&qemu_sigp_mutex);
+
+out:
+ trace_sigp_finished(order, CPU(cpu)->cpu_index,
+ dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
+ g_assert(ret >= 0);
+
+ return ret;
+}
+
+int s390_cpu_restart(S390CPU *cpu)
+{
+ SigpInfo si = {};
+
+ run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
+ return 0;
+}
+
+void do_stop_interrupt(CPUS390XState *env)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ }
+ if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
+ s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true);
+ }
+ env->sigp_order = 0;
+ env->pending_int &= ~INTERRUPT_STOP;
+}
+
+void s390_init_sigp(void)
+{
+ qemu_mutex_init(&qemu_sigp_mutex);
+}
diff --git a/target/s390x/tcg/cc_helper.c b/target/s390x/tcg/cc_helper.c
new file mode 100644
index 000000000..c2c96c3a3
--- /dev/null
+++ b/target/s390x/tcg/cc_helper.c
@@ -0,0 +1,538 @@
+/*
+ * S/390 condition code helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "tcg_s390x.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_ltgt0_32(int32_t dst)
+{
+ return cc_calc_ltgt_32(dst, 0);
+}
+
+static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_ltgt0_64(int64_t dst)
+{
+ return cc_calc_ltgt_64(dst, 0);
+}
+
+static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
+{
+ uint32_t r = val & mask;
+
+ if (r == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ return 1;
+ }
+}
+
+static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
+{
+ uint64_t r = val & mask;
+
+ if (r == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ int top = clz64(mask);
+ if ((int64_t)(val << top) < 0) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static uint32_t cc_calc_nz(uint64_t dst)
+{
+ return !!dst;
+}
+
+static uint32_t cc_calc_addu(uint64_t carry_out, uint64_t result)
+{
+ g_assert(carry_out <= 1);
+ return (result != 0) + 2 * carry_out;
+}
+
+static uint32_t cc_calc_subu(uint64_t borrow_out, uint64_t result)
+{
+ return cc_calc_addu(borrow_out + 1, result);
+}
+
+static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_abs_64(int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t cc_calc_nabs_64(int64_t dst)
+{
+ return !!dst;
+}
+
+static uint32_t cc_calc_comp_64(int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+
+static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_abs_32(int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t cc_calc_nabs_32(int32_t dst)
+{
+ return !!dst;
+}
+
+static uint32_t cc_calc_comp_32(int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+/* calculate condition code for insert character under mask insn */
+static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
+{
+ if ((val & mask) == 0) {
+ return 0;
+ } else {
+ int top = clz64(mask);
+ if ((int64_t)(val << top) < 0) {
+ return 1;
+ } else {
+ return 2;
+ }
+ }
+}
+
+static uint32_t cc_calc_sla_32(uint32_t src, int shift)
+{
+ uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
+ uint32_t sign = 1U << 31;
+ uint32_t match;
+ int32_t r;
+
+ /* Check if the sign bit stays the same. */
+ if (src & sign) {
+ match = mask;
+ } else {
+ match = 0;
+ }
+ if ((src & mask) != match) {
+ /* Overflow. */
+ return 3;
+ }
+
+ r = ((src << shift) & ~sign) | (src & sign);
+ if (r == 0) {
+ return 0;
+ } else if (r < 0) {
+ return 1;
+ }
+ return 2;
+}
+
+static uint32_t cc_calc_sla_64(uint64_t src, int shift)
+{
+ uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
+ uint64_t sign = 1ULL << 63;
+ uint64_t match;
+ int64_t r;
+
+ /* Check if the sign bit stays the same. */
+ if (src & sign) {
+ match = mask;
+ } else {
+ match = 0;
+ }
+ if ((src & mask) != match) {
+ /* Overflow. */
+ return 3;
+ }
+
+ r = ((src << shift) & ~sign) | (src & sign);
+ if (r == 0) {
+ return 0;
+ } else if (r < 0) {
+ return 1;
+ }
+ return 2;
+}
+
+static uint32_t cc_calc_flogr(uint64_t dst)
+{
+ return dst ? 2 : 0;
+}
+
+static uint32_t cc_calc_lcbb(uint64_t dst)
+{
+ return dst == 16 ? 0 : 3;
+}
+
+static uint32_t cc_calc_vc(uint64_t low, uint64_t high)
+{
+ if (high == -1ull && low == -1ull) {
+ /* all elements match */
+ return 0;
+ } else if (high == 0 && low == 0) {
+ /* no elements match */
+ return 3;
+ } else {
+ /* some elements but not all match */
+ return 1;
+ }
+}
+
+static uint32_t cc_calc_muls_32(int64_t res)
+{
+ const int64_t tmp = res >> 31;
+
+ if (!res) {
+ return 0;
+ } else if (tmp && tmp != -1) {
+ return 3;
+ } else if (res < 0) {
+ return 1;
+ }
+ return 2;
+}
+
+static uint64_t cc_calc_muls_64(int64_t res_high, uint64_t res_low)
+{
+ if (!res_high && !res_low) {
+ return 0;
+ } else if (res_high + (res_low >> 63) != 0) {
+ return 3;
+ } else if (res_high < 0) {
+ return 1;
+ }
+ return 2;
+}
+
+static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
+ uint64_t src, uint64_t dst, uint64_t vr)
+{
+ uint32_t r = 0;
+
+ switch (cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* cc_op value _is_ cc */
+ r = cc_op;
+ break;
+ case CC_OP_LTGT0_32:
+ r = cc_calc_ltgt0_32(dst);
+ break;
+ case CC_OP_LTGT0_64:
+ r = cc_calc_ltgt0_64(dst);
+ break;
+ case CC_OP_LTGT_32:
+ r = cc_calc_ltgt_32(src, dst);
+ break;
+ case CC_OP_LTGT_64:
+ r = cc_calc_ltgt_64(src, dst);
+ break;
+ case CC_OP_LTUGTU_32:
+ r = cc_calc_ltugtu_32(src, dst);
+ break;
+ case CC_OP_LTUGTU_64:
+ r = cc_calc_ltugtu_64(src, dst);
+ break;
+ case CC_OP_TM_32:
+ r = cc_calc_tm_32(src, dst);
+ break;
+ case CC_OP_TM_64:
+ r = cc_calc_tm_64(src, dst);
+ break;
+ case CC_OP_NZ:
+ r = cc_calc_nz(dst);
+ break;
+ case CC_OP_ADDU:
+ r = cc_calc_addu(src, dst);
+ break;
+ case CC_OP_SUBU:
+ r = cc_calc_subu(src, dst);
+ break;
+ case CC_OP_ADD_64:
+ r = cc_calc_add_64(src, dst, vr);
+ break;
+ case CC_OP_SUB_64:
+ r = cc_calc_sub_64(src, dst, vr);
+ break;
+ case CC_OP_ABS_64:
+ r = cc_calc_abs_64(dst);
+ break;
+ case CC_OP_NABS_64:
+ r = cc_calc_nabs_64(dst);
+ break;
+ case CC_OP_COMP_64:
+ r = cc_calc_comp_64(dst);
+ break;
+ case CC_OP_MULS_64:
+ r = cc_calc_muls_64(src, dst);
+ break;
+
+ case CC_OP_ADD_32:
+ r = cc_calc_add_32(src, dst, vr);
+ break;
+ case CC_OP_SUB_32:
+ r = cc_calc_sub_32(src, dst, vr);
+ break;
+ case CC_OP_ABS_32:
+ r = cc_calc_abs_32(dst);
+ break;
+ case CC_OP_NABS_32:
+ r = cc_calc_nabs_32(dst);
+ break;
+ case CC_OP_COMP_32:
+ r = cc_calc_comp_32(dst);
+ break;
+ case CC_OP_MULS_32:
+ r = cc_calc_muls_32(dst);
+ break;
+
+ case CC_OP_ICM:
+ r = cc_calc_icm(src, dst);
+ break;
+ case CC_OP_SLA_32:
+ r = cc_calc_sla_32(src, dst);
+ break;
+ case CC_OP_SLA_64:
+ r = cc_calc_sla_64(src, dst);
+ break;
+ case CC_OP_FLOGR:
+ r = cc_calc_flogr(dst);
+ break;
+ case CC_OP_LCBB:
+ r = cc_calc_lcbb(dst);
+ break;
+ case CC_OP_VC:
+ r = cc_calc_vc(src, dst);
+ break;
+
+ case CC_OP_NZ_F32:
+ r = set_cc_nz_f32(dst);
+ break;
+ case CC_OP_NZ_F64:
+ r = set_cc_nz_f64(dst);
+ break;
+ case CC_OP_NZ_F128:
+ r = set_cc_nz_f128(make_float128(src, dst));
+ break;
+
+ default:
+ cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op));
+ }
+
+ HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
+ cc_name(cc_op), src, dst, vr, r);
+ return r;
+}
+
+uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
+ uint64_t dst, uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+ s390_cpu_set_psw(env, mask, addr);
+ cpu_loop_exit(env_cpu(env));
+}
+
+void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
+{
+ HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
+
+ switch (a1 & 0xf00) {
+ case 0x000:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_PRIMARY;
+ break;
+ case 0x100:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_SECONDARY;
+ break;
+ case 0x300:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_HOME;
+ break;
+ default:
+ HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+}
+#endif
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
new file mode 100644
index 000000000..138d9e7ad
--- /dev/null
+++ b/target/s390x/tcg/crypto_helper.c
@@ -0,0 +1,61 @@
+/*
+ * s390x crypto helpers
+ *
+ * Copyright (c) 2017 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "s390x-internal.h"
+#include "tcg_s390x.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
+ uint32_t type)
+{
+ const uintptr_t ra = GETPC();
+ const uint8_t mod = env->regs[0] & 0x80ULL;
+ const uint8_t fc = env->regs[0] & 0x7fULL;
+ uint8_t subfunc[16] = { 0 };
+ uint64_t param_addr;
+ int i;
+
+ switch (type) {
+ case S390_FEAT_TYPE_KMAC:
+ case S390_FEAT_TYPE_KIMD:
+ case S390_FEAT_TYPE_KLMD:
+ case S390_FEAT_TYPE_PCKMO:
+ case S390_FEAT_TYPE_PCC:
+ if (mod) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+ break;
+ }
+
+ s390_get_feat_block(type, subfunc);
+ if (!test_be_bit(fc, subfunc)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ switch (fc) {
+ case 0: /* query subfunction */
+ for (i = 0; i < 16; i++) {
+ param_addr = wrap_address(env, env->regs[1] + i);
+ cpu_stb_data_ra(env, param_addr, subfunc[i], ra);
+ }
+ break;
+ default:
+ /* we don't implement any other subfunction yet */
+ g_assert_not_reached();
+ }
+
+ return 0;
+}
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
new file mode 100644
index 000000000..4e7648f30
--- /dev/null
+++ b/target/s390x/tcg/excp_helper.c
@@ -0,0 +1,645 @@
+/*
+ * s390x exception / interrupt helpers
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "exec/helper-proto.h"
+#include "qemu/timer.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "hw/s390x/ioinst.h"
+#include "exec/address-spaces.h"
+#include "tcg_s390x.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/s390x/s390_flic.h"
+#include "hw/boards.h"
+#endif
+
+void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
+ uint32_t code, uintptr_t ra)
+{
+ CPUState *cs = env_cpu(env);
+
+ cpu_restore_state(cs, ra, true);
+ qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
+ env->psw.addr);
+ trigger_pgm_exception(env, code);
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
+ uintptr_t ra)
+{
+ g_assert(dxc <= 0xff);
+#if !defined(CONFIG_USER_ONLY)
+ /* Store the DXC into the lowcore */
+ stl_phys(env_cpu(env)->as,
+ env->psa + offsetof(LowCore, data_exc_code), dxc);
+#endif
+
+ /* Store the DXC into the FPC if AFP is enabled */
+ if (env->cregs[0] & CR0_AFP) {
+ env->fpc = deposit32(env->fpc, 8, 8, dxc);
+ }
+ tcg_s390_program_interrupt(env, PGM_DATA, ra);
+}
+
+void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
+ uintptr_t ra)
+{
+ g_assert(vxc <= 0xff);
+#if !defined(CONFIG_USER_ONLY)
+ /* Always store the VXC into the lowcore, without AFP it is undefined */
+ stl_phys(env_cpu(env)->as,
+ env->psa + offsetof(LowCore, data_exc_code), vxc);
+#endif
+
+ /* Always store the VXC into the FPC, without AFP it is undefined */
+ env->fpc = deposit32(env->fpc, 8, 8, vxc);
+ tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
+}
+
+void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
+{
+ tcg_s390_data_exception(env, dxc, GETPC());
+}
+
+/*
+ * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
+ * this is only for the atomic operations, for which we want to raise a
+ * specification exception.
+ */
+static void QEMU_NORETURN do_unaligned_access(CPUState *cs, uintptr_t retaddr)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+void s390_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
+ /*
+ * On real machines this value is dropped into LowMem. Since this
+ * is userland, simply put this someplace that cpu_loop can find it.
+ * S390 only gives the page of the fault, not the exact address.
+ * C.f. the construction of TEC in mmu_translate().
+ */
+ cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ do_unaligned_access(cs, retaddr);
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
+{
+ switch (mmu_idx) {
+ case MMU_PRIMARY_IDX:
+ return PSW_ASC_PRIMARY;
+ case MMU_SECONDARY_IDX:
+ return PSW_ASC_SECONDARY;
+ case MMU_HOME_IDX:
+ return PSW_ASC_HOME;
+ default:
+ abort();
+ }
+}
+
+bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ target_ulong vaddr, raddr;
+ uint64_t asc, tec;
+ int prot, excp;
+
+ qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
+ __func__, address, access_type, mmu_idx);
+
+ vaddr = address;
+
+ if (mmu_idx < MMU_REAL_IDX) {
+ asc = cpu_mmu_idx_to_asc(mmu_idx);
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+ excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
+ } else if (mmu_idx == MMU_REAL_IDX) {
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+ excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
+ } else {
+ g_assert_not_reached();
+ }
+
+ env->tlb_fill_exc = excp;
+ env->tlb_fill_tec = tec;
+
+ if (!excp) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
+ __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+
+ if (excp != PGM_ADDRESSING) {
+ stq_phys(env_cpu(env)->as,
+ env->psa + offsetof(LowCore, trans_exc_code), tec);
+ }
+
+ /*
+ * For data accesses, ILEN will be filled in from the unwind info,
+ * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
+ * and so unwinding will not occur. However, ILEN is also undefined
+ * for that case -- we choose to set ILEN = 2.
+ */
+ env->int_pgm_ilen = 2;
+ trigger_pgm_exception(env, excp);
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+static void do_program_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ int ilen = env->int_pgm_ilen;
+
+ assert(ilen == 2 || ilen == 4 || ilen == 6);
+
+ switch (env->int_pgm_code) {
+ case PGM_PER:
+ if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
+ break;
+ }
+ /* FALL THROUGH */
+ case PGM_OPERATION:
+ case PGM_PRIVILEGED:
+ case PGM_EXECUTE:
+ case PGM_PROTECTION:
+ case PGM_ADDRESSING:
+ case PGM_SPECIFICATION:
+ case PGM_DATA:
+ case PGM_FIXPT_OVERFLOW:
+ case PGM_FIXPT_DIVIDE:
+ case PGM_DEC_OVERFLOW:
+ case PGM_DEC_DIVIDE:
+ case PGM_HFP_EXP_OVERFLOW:
+ case PGM_HFP_EXP_UNDERFLOW:
+ case PGM_HFP_SIGNIFICANCE:
+ case PGM_HFP_DIVIDE:
+ case PGM_TRANS_SPEC:
+ case PGM_SPECIAL_OP:
+ case PGM_OPERAND:
+ case PGM_HFP_SQRT:
+ case PGM_PC_TRANS_SPEC:
+ case PGM_ALET_SPEC:
+ case PGM_MONITOR:
+ /* advance the PSW if our exception is not nullifying */
+ env->psw.addr += ilen;
+ break;
+ }
+
+ qemu_log_mask(CPU_LOG_INT,
+ "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
+ __func__, env->int_pgm_code, ilen, env->psw.mask,
+ env->psw.addr);
+
+ lowcore = cpu_map_lowcore(env);
+
+ /* Signal PER events with the exception. */
+ if (env->per_perc_atmid) {
+ env->int_pgm_code |= PGM_PER;
+ lowcore->per_address = cpu_to_be64(env->per_address);
+ lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
+ env->per_perc_atmid = 0;
+ }
+
+ lowcore->pgm_ilen = cpu_to_be16(ilen);
+ lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
+ lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
+ lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->program_new_psw.mask);
+ addr = be64_to_cpu(lowcore->program_new_psw.addr);
+ lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
+
+ cpu_unmap_lowcore(lowcore);
+
+ s390_cpu_set_psw(env, mask, addr);
+}
+
+static void do_svc_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->svc_code = cpu_to_be16(env->int_svc_code);
+ lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
+ lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
+ lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
+ mask = be64_to_cpu(lowcore->svc_new_psw.mask);
+ addr = be64_to_cpu(lowcore->svc_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ s390_cpu_set_psw(env, mask, addr);
+
+ /* When a PER event is pending, the PER exception has to happen
+ immediately after the SERVICE CALL one. */
+ if (env->per_perc_atmid) {
+ env->int_pgm_code = PGM_PER;
+ env->int_pgm_ilen = env->int_svc_ilen;
+ do_program_interrupt(env);
+ }
+}
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+static void do_ext_interrupt(CPUS390XState *env)
+{
+ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
+ S390CPU *cpu = env_archcpu(env);
+ uint64_t mask, addr;
+ uint16_t cpu_addr;
+ LowCore *lowcore;
+
+ if (!(env->psw.mask & PSW_MASK_EXT)) {
+ cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
+ }
+
+ lowcore = cpu_map_lowcore(env);
+
+ if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
+ (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int max_cpus = ms->smp.max_cpus;
+
+ lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
+ cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
+ g_assert(cpu_addr < S390_MAX_CPUS);
+ lowcore->cpu_addr = cpu_to_be16(cpu_addr);
+ clear_bit(cpu_addr, env->emergency_signals);
+ if (bitmap_empty(env->emergency_signals, max_cpus)) {
+ env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
+ }
+ } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
+ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
+ lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
+ env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
+ } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
+ (env->cregs[0] & CR0_CKC_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
+ lowcore->cpu_addr = 0;
+ env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
+ } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
+ (env->cregs[0] & CR0_CPU_TIMER_SC)) {
+ lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
+ lowcore->cpu_addr = 0;
+ env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
+ } else if (qemu_s390_flic_has_service(flic) &&
+ (env->cregs[0] & CR0_SERVICE_SC)) {
+ uint32_t param;
+
+ param = qemu_s390_flic_dequeue_service(flic);
+ lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
+ lowcore->ext_params = cpu_to_be32(param);
+ lowcore->cpu_addr = 0;
+ } else {
+ g_assert_not_reached();
+ }
+
+ mask = be64_to_cpu(lowcore->external_new_psw.mask);
+ addr = be64_to_cpu(lowcore->external_new_psw.addr);
+ lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
+ lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ s390_cpu_set_psw(env, mask, addr);
+}
+
+static void do_io_interrupt(CPUS390XState *env)
+{
+ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
+ uint64_t mask, addr;
+ QEMUS390FlicIO *io;
+ LowCore *lowcore;
+
+ g_assert(env->psw.mask & PSW_MASK_IO);
+ io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
+ g_assert(io);
+
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->subchannel_id = cpu_to_be16(io->id);
+ lowcore->subchannel_nr = cpu_to_be16(io->nr);
+ lowcore->io_int_parm = cpu_to_be32(io->parm);
+ lowcore->io_int_word = cpu_to_be32(io->word);
+ lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
+ lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->io_new_psw.mask);
+ addr = be64_to_cpu(lowcore->io_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+ g_free(io);
+
+ s390_cpu_set_psw(env, mask, addr);
+}
+
+typedef struct MchkExtSaveArea {
+ uint64_t vregs[32][2]; /* 0x0000 */
+ uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
+} MchkExtSaveArea;
+QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
+
+static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
+{
+ hwaddr len = sizeof(MchkExtSaveArea);
+ MchkExtSaveArea *sa;
+ int i;
+
+ sa = cpu_physical_memory_map(mcesao, &len, true);
+ if (!sa) {
+ return -EFAULT;
+ }
+ if (len != sizeof(MchkExtSaveArea)) {
+ cpu_physical_memory_unmap(sa, len, 1, 0);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < 32; i++) {
+ sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
+ sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
+ }
+
+ cpu_physical_memory_unmap(sa, len, 1, len);
+ return 0;
+}
+
+static void do_mchk_interrupt(CPUS390XState *env)
+{
+ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
+ uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
+ uint64_t mask, addr, mcesao = 0;
+ LowCore *lowcore;
+ int i;
+
+ /* for now we only support channel report machine checks (floating) */
+ g_assert(env->psw.mask & PSW_MASK_MCHECK);
+ g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
+
+ qemu_s390_flic_dequeue_crw_mchk(flic);
+
+ lowcore = cpu_map_lowcore(env);
+
+ /* extended save area */
+ if (mcic & MCIC_VB_VR) {
+ /* length and alignment is 1024 bytes */
+ mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
+ }
+
+ /* try to store vector registers */
+ if (!mcesao || mchk_store_vregs(env, mcesao)) {
+ mcic &= ~MCIC_VB_VR;
+ }
+
+ /* we are always in z/Architecture mode */
+ lowcore->ar_access_id = 1;
+
+ for (i = 0; i < 16; i++) {
+ lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
+ lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
+ lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
+ lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
+ }
+ lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
+ lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
+ lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
+ lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
+ lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
+
+ lowcore->mcic = cpu_to_be64(mcic);
+ lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
+ lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
+ addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ s390_cpu_set_psw(env, mask, addr);
+}
+
+void s390_cpu_do_interrupt(CPUState *cs)
+{
+ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ bool stopped = false;
+
+ qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
+ __func__, cs->exception_index, env->psw.mask, env->psw.addr);
+
+try_deliver:
+ /* handle machine checks */
+ if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
+ cs->exception_index = EXCP_MCHK;
+ }
+ /* handle external interrupts */
+ if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
+ cs->exception_index = EXCP_EXT;
+ }
+ /* handle I/O interrupts */
+ if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
+ cs->exception_index = EXCP_IO;
+ }
+ /* RESTART interrupt */
+ if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
+ cs->exception_index = EXCP_RESTART;
+ }
+ /* STOP interrupt has least priority */
+ if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
+ cs->exception_index = EXCP_STOP;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_PGM:
+ do_program_interrupt(env);
+ break;
+ case EXCP_SVC:
+ do_svc_interrupt(env);
+ break;
+ case EXCP_EXT:
+ do_ext_interrupt(env);
+ break;
+ case EXCP_IO:
+ do_io_interrupt(env);
+ break;
+ case EXCP_MCHK:
+ do_mchk_interrupt(env);
+ break;
+ case EXCP_RESTART:
+ do_restart_interrupt(env);
+ break;
+ case EXCP_STOP:
+ do_stop_interrupt(env);
+ stopped = true;
+ break;
+ }
+
+ if (cs->exception_index != -1 && !stopped) {
+ /* check if there are more pending interrupts to deliver */
+ cs->exception_index = -1;
+ goto try_deliver;
+ }
+ cs->exception_index = -1;
+
+ /* we might still have pending interrupts, but not deliverable */
+ if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+
+ /* WAIT PSW during interrupt injection or STOP interrupt */
+ if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
+ /* don't trigger a cpu_loop_exit(), use an interrupt instead */
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
+ } else if (cs->halted) {
+ /* unhalt if we had a WAIT PSW somehwere in our injection chain */
+ s390_cpu_unhalt(cpu);
+ }
+}
+
+bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ if (env->ex_value) {
+ /* Execution of the target insn is indivisible from
+ the parent EXECUTE insn. */
+ return false;
+ }
+ if (s390_cpu_has_int(cpu)) {
+ s390_cpu_do_interrupt(cs);
+ return true;
+ }
+ if (env->psw.mask & PSW_MASK_WAIT) {
+ /* Woken up because of a floating interrupt but it has already
+ * been delivered. Go back to sleep. */
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
+ }
+ }
+ return false;
+}
+
+void s390x_cpu_debug_excp_handler(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+ if (wp_hit && wp_hit->flags & BP_CPU) {
+ /* FIXME: When the storage-alteration-space control bit is set,
+ the exception should only be triggered if the memory access
+ is done using an address space with the storage-alteration-event
+ bit set. We have no way to detect that with the current
+ watchpoint code. */
+ cs->watchpoint_hit = NULL;
+
+ env->per_address = env->psw.addr;
+ env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
+ /* FIXME: We currently no way to detect the address space used
+ to trigger the watchpoint. For now just consider it is the
+ current default ASC. This turn to be true except when MVCP
+ and MVCS instrutions are not used. */
+ env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
+
+ /*
+ * Remove all watchpoints to re-execute the code. A PER exception
+ * will be triggered, it will call s390_cpu_set_psw which will
+ * recompute the watchpoints.
+ */
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+ cpu_loop_exit_noexc(cs);
+ }
+}
+
+void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ do_unaligned_access(cs, retaddr);
+}
+
+static void QEMU_NORETURN monitor_event(CPUS390XState *env,
+ uint64_t monitor_code,
+ uint8_t monitor_class, uintptr_t ra)
+{
+ /* Store the Monitor Code and the Monitor Class Number into the lowcore */
+ stq_phys(env_cpu(env)->as,
+ env->psa + offsetof(LowCore, monitor_code), monitor_code);
+ stw_phys(env_cpu(env)->as,
+ env->psa + offsetof(LowCore, mon_class_num), monitor_class);
+
+ tcg_s390_program_interrupt(env, PGM_MONITOR, ra);
+}
+
+void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
+ uint32_t monitor_class)
+{
+ g_assert(monitor_class <= 0xff);
+
+ if (env->cregs[8] & (0x8000 >> monitor_class)) {
+ monitor_event(env, monitor_code, monitor_class, GETPC());
+ }
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c
new file mode 100644
index 000000000..406720540
--- /dev/null
+++ b/target/s390x/tcg/fpu_helper.c
@@ -0,0 +1,976 @@
+/*
+ * S/390 FPU helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "tcg_s390x.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+#define RET128(F) (env->retxl = F.low, F.high)
+
+uint8_t s390_softfloat_exc_to_ieee(unsigned int exc)
+{
+ uint8_t s390_exc = 0;
+
+ s390_exc |= (exc & float_flag_invalid) ? S390_IEEE_MASK_INVALID : 0;
+ s390_exc |= (exc & float_flag_divbyzero) ? S390_IEEE_MASK_DIVBYZERO : 0;
+ s390_exc |= (exc & float_flag_overflow) ? S390_IEEE_MASK_OVERFLOW : 0;
+ s390_exc |= (exc & float_flag_underflow) ? S390_IEEE_MASK_UNDERFLOW : 0;
+ s390_exc |= (exc & float_flag_inexact) ? S390_IEEE_MASK_INEXACT : 0;
+
+ return s390_exc;
+}
+
+/* Should be called after any operation that may raise IEEE exceptions. */
+static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr)
+{
+ unsigned s390_exc, qemu_exc;
+
+ /* Get the exceptions raised by the current operation. Reset the
+ fpu_status contents so that the next operation has a clean slate. */
+ qemu_exc = env->fpu_status.float_exception_flags;
+ if (qemu_exc == 0) {
+ return;
+ }
+ env->fpu_status.float_exception_flags = 0;
+ s390_exc = s390_softfloat_exc_to_ieee(qemu_exc);
+
+ /*
+ * IEEE-Underflow exception recognition exists if a tininess condition
+ * (underflow) exists and
+ * - The mask bit in the FPC is zero and the result is inexact
+ * - The mask bit in the FPC is one
+ * So tininess conditions that are not inexact don't trigger any
+ * underflow action in case the mask bit is not one.
+ */
+ if (!(s390_exc & S390_IEEE_MASK_INEXACT) &&
+ !((env->fpc >> 24) & S390_IEEE_MASK_UNDERFLOW)) {
+ s390_exc &= ~S390_IEEE_MASK_UNDERFLOW;
+ }
+
+ /*
+ * FIXME:
+ * 1. Right now, all inexact conditions are inidicated as
+ * "truncated" (0) and never as "incremented" (1) in the DXC.
+ * 2. Only traps due to invalid/divbyzero are suppressing. Other traps
+ * are completing, meaning the target register has to be written!
+ * This, however will mean that we have to write the register before
+ * triggering the trap - impossible right now.
+ */
+
+ /*
+ * invalid/divbyzero cannot coexist with other conditions.
+ * overflow/underflow however can coexist with inexact, we have to
+ * handle it separatly.
+ */
+ if (s390_exc & ~S390_IEEE_MASK_INEXACT) {
+ if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) {
+ /* trap condition - inexact reported along */
+ tcg_s390_data_exception(env, s390_exc, retaddr);
+ }
+ /* nontrap condition - inexact handled differently */
+ env->fpc |= (s390_exc & ~S390_IEEE_MASK_INEXACT) << 16;
+ }
+
+ /* inexact handling */
+ if (s390_exc & S390_IEEE_MASK_INEXACT && !XxC) {
+ /* trap condition - overflow/underflow _not_ reported along */
+ if (s390_exc & S390_IEEE_MASK_INEXACT & env->fpc >> 24) {
+ tcg_s390_data_exception(env, s390_exc & S390_IEEE_MASK_INEXACT,
+ retaddr);
+ }
+ /* nontrap condition */
+ env->fpc |= (s390_exc & S390_IEEE_MASK_INEXACT) << 16;
+ }
+}
+
+int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare)
+{
+ switch (float_compare) {
+ case float_relation_equal:
+ return 0;
+ case float_relation_less:
+ return 1;
+ case float_relation_greater:
+ return 2;
+ case float_relation_unordered:
+ return 3;
+ default:
+ cpu_abort(env_cpu(env), "unknown return value for float compare\n");
+ }
+}
+
+/* condition codes for unary FP ops */
+uint32_t set_cc_nz_f32(float32 v)
+{
+ if (float32_is_any_nan(v)) {
+ return 3;
+ } else if (float32_is_zero(v)) {
+ return 0;
+ } else if (float32_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+uint32_t set_cc_nz_f64(float64 v)
+{
+ if (float64_is_any_nan(v)) {
+ return 3;
+ } else if (float64_is_zero(v)) {
+ return 0;
+ } else if (float64_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+uint32_t set_cc_nz_f128(float128 v)
+{
+ if (float128_is_any_nan(v)) {
+ return 3;
+ } else if (float128_is_zero(v)) {
+ return 0;
+ } else if (float128_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+/* condition codes for FP to integer conversion ops */
+static uint32_t set_cc_conv_f32(float32 v, float_status *stat)
+{
+ if (stat->float_exception_flags & float_flag_invalid) {
+ return 3;
+ } else {
+ return set_cc_nz_f32(v);
+ }
+}
+
+static uint32_t set_cc_conv_f64(float64 v, float_status *stat)
+{
+ if (stat->float_exception_flags & float_flag_invalid) {
+ return 3;
+ } else {
+ return set_cc_nz_f64(v);
+ }
+}
+
+static uint32_t set_cc_conv_f128(float128 v, float_status *stat)
+{
+ if (stat->float_exception_flags & float_flag_invalid) {
+ return 3;
+ } else {
+ return set_cc_nz_f128(v);
+ }
+}
+
+static inline uint8_t round_from_m34(uint32_t m34)
+{
+ return extract32(m34, 0, 4);
+}
+
+static inline bool xxc_from_m34(uint32_t m34)
+{
+ /* XxC is bit 1 of m4 */
+ return extract32(m34, 4 + 3 - 1, 1);
+}
+
+/* 32-bit FP addition */
+uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_add(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64-bit FP addition */
+uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_add(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 128-bit FP addition */
+uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_add(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP subtraction */
+uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_sub(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64-bit FP subtraction */
+uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_sub(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 128-bit FP subtraction */
+uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_sub(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP division */
+uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_div(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64-bit FP division */
+uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_div(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 128-bit FP division */
+uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_div(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP multiplication */
+uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_mul(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64-bit FP multiplication */
+uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_mul(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64/32-bit FP multiplication */
+uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float32_to_float64(f2, &env->fpu_status);
+ ret = float64_mul(f1, ret, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 128-bit FP multiplication */
+uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_mul(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* 128/64-bit FP multiplication */
+uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t f2)
+{
+ float128 ret = float64_to_float128(f2, &env->fpu_status);
+ ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* convert 32-bit float to 64-bit float */
+uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2)
+{
+ float64 ret = float32_to_float64(f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* convert 128-bit float to 64-bit float */
+uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* convert 64-bit float to 128-bit float */
+uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2)
+{
+ float128 ret = float64_to_float128(f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* convert 32-bit float to 128-bit float */
+uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2)
+{
+ float128 ret = float32_to_float128(f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+/* convert 64-bit float to 32-bit float */
+uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float32 ret = float64_to_float32(f2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* convert 128-bit float to 32-bit float */
+uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* 32-bit FP compare */
+uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ FloatRelation cmp = float32_compare_quiet(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 64-bit FP compare */
+uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ FloatRelation cmp = float64_compare_quiet(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 128-bit FP compare */
+uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ FloatRelation cmp = float128_compare_quiet(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3)
+{
+ int ret = env->fpu_status.float_rounding_mode;
+
+ switch (m3) {
+ case 0:
+ /* current mode */
+ break;
+ case 1:
+ /* round to nearest with ties away from 0 */
+ set_float_rounding_mode(float_round_ties_away, &env->fpu_status);
+ break;
+ case 3:
+ /* round to prepare for shorter precision */
+ set_float_rounding_mode(float_round_to_odd, &env->fpu_status);
+ break;
+ case 4:
+ /* round to nearest with ties to even */
+ set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
+ break;
+ case 5:
+ /* round to zero */
+ set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
+ break;
+ case 6:
+ /* round to +inf */
+ set_float_rounding_mode(float_round_up, &env->fpu_status);
+ break;
+ case 7:
+ /* round to -inf */
+ set_float_rounding_mode(float_round_down, &env->fpu_status);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return ret;
+}
+
+void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode)
+{
+ set_float_rounding_mode(old_mode, &env->fpu_status);
+}
+
+/* convert 64-bit int to 32-bit float */
+uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float32 ret = int64_to_float32(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* convert 64-bit int to 64-bit float */
+uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float64 ret = int64_to_float64(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* convert 64-bit int to 128-bit float */
+uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 ret = int64_to_float128(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return RET128(ret);
+}
+
+/* convert 64-bit uint to 32-bit float */
+uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float32 ret = uint64_to_float32(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* convert 64-bit uint to 64-bit float */
+uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float64 ret = uint64_to_float64(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* convert 64-bit uint to 128-bit float */
+uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 ret = uint64_to_float128(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return RET128(ret);
+}
+
+/* convert 32-bit float to 64-bit int */
+uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ int64_t ret = float32_to_int64(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float32_is_any_nan(v2)) {
+ return INT64_MIN;
+ }
+ return ret;
+}
+
+/* convert 64-bit float to 64-bit int */
+uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ int64_t ret = float64_to_int64(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float64_is_any_nan(v2)) {
+ return INT64_MIN;
+ }
+ return ret;
+}
+
+/* convert 128-bit float to 64-bit int */
+uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 v2 = make_float128(h, l);
+ int64_t ret = float128_to_int64(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float128_is_any_nan(v2)) {
+ return INT64_MIN;
+ }
+ return ret;
+}
+
+/* convert 32-bit float to 32-bit int */
+uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ int32_t ret = float32_to_int32(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float32_is_any_nan(v2)) {
+ return INT32_MIN;
+ }
+ return ret;
+}
+
+/* convert 64-bit float to 32-bit int */
+uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ int32_t ret = float64_to_int32(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float64_is_any_nan(v2)) {
+ return INT32_MIN;
+ }
+ return ret;
+}
+
+/* convert 128-bit float to 32-bit int */
+uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 v2 = make_float128(h, l);
+ int32_t ret = float128_to_int32(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float128_is_any_nan(v2)) {
+ return INT32_MIN;
+ }
+ return ret;
+}
+
+/* convert 32-bit float to 64-bit uint */
+uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ uint64_t ret = float32_to_uint64(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float32_is_any_nan(v2)) {
+ return 0;
+ }
+ return ret;
+}
+
+/* convert 64-bit float to 64-bit uint */
+uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ uint64_t ret = float64_to_uint64(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float64_is_any_nan(v2)) {
+ return 0;
+ }
+ return ret;
+}
+
+/* convert 128-bit float to 64-bit uint */
+uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 v2 = make_float128(h, l);
+ uint64_t ret = float128_to_uint64(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float128_is_any_nan(v2)) {
+ return 0;
+ }
+ return ret;
+}
+
+/* convert 32-bit float to 32-bit uint */
+uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ uint32_t ret = float32_to_uint32(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float32_is_any_nan(v2)) {
+ return 0;
+ }
+ return ret;
+}
+
+/* convert 64-bit float to 32-bit uint */
+uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ uint32_t ret = float64_to_uint32(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float64_is_any_nan(v2)) {
+ return 0;
+ }
+ return ret;
+}
+
+/* convert 128-bit float to 32-bit uint */
+uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 v2 = make_float128(h, l);
+ uint32_t ret = float128_to_uint32(v2, &env->fpu_status);
+ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ env->cc_op = cc;
+ if (float128_is_any_nan(v2)) {
+ return 0;
+ }
+ return ret;
+}
+
+/* round to integer 32-bit */
+uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float32 ret = float32_round_to_int(f2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* round to integer 64-bit */
+uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float64 ret = float64_round_to_int(f2, &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return ret;
+}
+
+/* round to integer 128-bit */
+uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint32_t m34)
+{
+ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34));
+ float128 ret = float128_round_to_int(make_float128(ah, al),
+ &env->fpu_status);
+
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_exceptions(env, xxc_from_m34(m34), GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP compare and signal */
+uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ FloatRelation cmp = float32_compare(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 64-bit FP compare and signal */
+uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ FloatRelation cmp = float64_compare(f1, f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 128-bit FP compare and signal */
+uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ FloatRelation cmp = float128_compare(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 32-bit FP multiply and add */
+uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64-bit FP multiply and add */
+uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 32-bit FP multiply and subtract */
+uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c,
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* 64-bit FP multiply and subtract */
+uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c,
+ &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* The rightmost bit has the number 11. */
+static inline uint16_t dcmask(int bit, bool neg)
+{
+ return 1 << (11 - bit - neg);
+}
+
+#define DEF_FLOAT_DCMASK(_TYPE) \
+uint16_t _TYPE##_dcmask(CPUS390XState *env, _TYPE f1) \
+{ \
+ const bool neg = _TYPE##_is_neg(f1); \
+ \
+ /* Sorted by most common cases - only one class is possible */ \
+ if (_TYPE##_is_normal(f1)) { \
+ return dcmask(2, neg); \
+ } else if (_TYPE##_is_zero(f1)) { \
+ return dcmask(0, neg); \
+ } else if (_TYPE##_is_denormal(f1)) { \
+ return dcmask(4, neg); \
+ } else if (_TYPE##_is_infinity(f1)) { \
+ return dcmask(6, neg); \
+ } else if (_TYPE##_is_quiet_nan(f1, &env->fpu_status)) { \
+ return dcmask(8, neg); \
+ } \
+ /* signaling nan, as last remaining case */ \
+ return dcmask(10, neg); \
+}
+DEF_FLOAT_DCMASK(float32)
+DEF_FLOAT_DCMASK(float64)
+DEF_FLOAT_DCMASK(float128)
+
+/* test data class 32-bit */
+uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2)
+{
+ return (m2 & float32_dcmask(env, f1)) != 0;
+}
+
+/* test data class 64-bit */
+uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2)
+{
+ return (m2 & float64_dcmask(env, v1)) != 0;
+}
+
+/* test data class 128-bit */
+uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t m2)
+{
+ return (m2 & float128_dcmask(env, make_float128(ah, al))) != 0;
+}
+
+/* square root 32-bit */
+uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2)
+{
+ float32 ret = float32_sqrt(f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* square root 64-bit */
+uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2)
+{
+ float64 ret = float64_sqrt(f2, &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return ret;
+}
+
+/* square root 128-bit */
+uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+ float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status);
+ handle_exceptions(env, false, GETPC());
+ return RET128(ret);
+}
+
+static const int fpc_to_rnd[8] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ -1,
+ -1,
+ -1,
+ float_round_to_odd,
+};
+
+/* set fpc */
+void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc)
+{
+ if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u ||
+ (!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+
+ /* Install everything in the main FPC. */
+ env->fpc = fpc;
+
+ /* Install the rounding mode in the shadow fpu_status. */
+ set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status);
+}
+
+/* set fpc and signal */
+void HELPER(sfas)(CPUS390XState *env, uint64_t fpc)
+{
+ uint32_t signalling = env->fpc;
+ uint32_t s390_exc;
+
+ if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u ||
+ (!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+
+ /*
+ * FPC is set to the FPC operand with a bitwise OR of the signalling
+ * flags.
+ */
+ env->fpc = fpc | (signalling & 0x00ff0000);
+ set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status);
+
+ /*
+ * If any signaling flag is enabled in the new FPC mask, a
+ * simulated-iee-exception exception occurs.
+ */
+ s390_exc = (signalling >> 16) & (fpc >> 24);
+ if (s390_exc) {
+ if (s390_exc & S390_IEEE_MASK_INVALID) {
+ s390_exc = S390_IEEE_MASK_INVALID;
+ } else if (s390_exc & S390_IEEE_MASK_DIVBYZERO) {
+ s390_exc = S390_IEEE_MASK_DIVBYZERO;
+ } else if (s390_exc & S390_IEEE_MASK_OVERFLOW) {
+ s390_exc &= (S390_IEEE_MASK_OVERFLOW | S390_IEEE_MASK_INEXACT);
+ } else if (s390_exc & S390_IEEE_MASK_UNDERFLOW) {
+ s390_exc &= (S390_IEEE_MASK_UNDERFLOW | S390_IEEE_MASK_INEXACT);
+ } else if (s390_exc & S390_IEEE_MASK_INEXACT) {
+ s390_exc = S390_IEEE_MASK_INEXACT;
+ } else if (s390_exc & S390_IEEE_MASK_QUANTUM) {
+ s390_exc = S390_IEEE_MASK_QUANTUM;
+ }
+ tcg_s390_data_exception(env, s390_exc | 3, GETPC());
+ }
+}
+
+/* set bfp rounding mode */
+void HELPER(srnm)(CPUS390XState *env, uint64_t rnd)
+{
+ if (rnd > 0x7 || fpc_to_rnd[rnd & 0x7] == -1) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+
+ env->fpc = deposit32(env->fpc, 0, 3, rnd);
+ set_float_rounding_mode(fpc_to_rnd[rnd & 0x7], &env->fpu_status);
+}
diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.def
new file mode 100644
index 000000000..3e5594210
--- /dev/null
+++ b/target/s390x/tcg/insn-data.def
@@ -0,0 +1,1398 @@
+/*
+ * Arguments to the opcode prototypes
+ *
+ * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC)
+ * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA)
+ * E(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA, FLAGS)
+ * F(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, FLAGS)
+ *
+ * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode
+ * NAME = name of the opcode, used internally
+ * FMT = format of the opcode (defined in insn-format.def)
+ * FAC = facility the opcode is available in (defined in DisasFacility)
+ * I1 = func in1_xx fills o->in1
+ * I2 = func in2_xx fills o->in2
+ * P = func prep_xx initializes o->*out*
+ * W = func wout_xx writes o->*out* somewhere
+ * OP = func op_xx does the bulk of the operation
+ * CC = func cout_xx defines how cc should get set
+ * DATA = immediate argument to op_xx function
+ * FLAGS = categorize the type of instruction (e.g. for advanced checks)
+ *
+ * The helpers get called in order: I1, I2, P, OP, W, CC
+ */
+
+/* ADD */
+ C(0x1a00, AR, RR_a, Z, r1, r2, new, r1_32, add, adds32)
+ C(0xb9f8, ARK, RRF_a, DO, r2, r3, new, r1_32, add, adds32)
+ C(0x5a00, A, RX_a, Z, r1, m2_32s, new, r1_32, add, adds32)
+ C(0xe35a, AY, RXY_a, LD, r1, m2_32s, new, r1_32, add, adds32)
+ C(0xb908, AGR, RRE, Z, r1, r2, r1, 0, add, adds64)
+ C(0xb918, AGFR, RRE, Z, r1, r2_32s, r1, 0, add, adds64)
+ C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64)
+ C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64)
+ C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64)
+ F(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32, IF_BFP)
+ F(0xb31a, ADBR, RRE, Z, f1, f2, new, f1, adb, f64, IF_BFP)
+ F(0xb34a, AXBR, RRE, Z, x2h, x2l, x1, x1, axb, f128, IF_BFP)
+ F(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32, IF_BFP)
+ F(0xed1a, ADB, RXE, Z, f1, m2_64, new, f1, adb, f64, IF_BFP)
+/* ADD HIGH */
+ C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32)
+ C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32)
+/* ADD IMMEDIATE */
+ C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32)
+ D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL)
+ C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32)
+ C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64)
+ D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ)
+ C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64)
+/* ADD IMMEDIATE HIGH */
+ C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32)
+/* ADD HALFWORD */
+ C(0x4a00, AH, RX_a, Z, r1, m2_16s, new, r1_32, add, adds32)
+ C(0xe37a, AHY, RXY_a, LD, r1, m2_16s, new, r1_32, add, adds32)
+ C(0xe338, AGH, RXY_a, MIE2,r1, m2_16s, r1, 0, add, adds64)
+/* ADD HALFWORD IMMEDIATE */
+ C(0xa70a, AHI, RI_a, Z, r1, i2, new, r1_32, add, adds32)
+ C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64)
+
+/* ADD LOGICAL */
+ C(0x1e00, ALR, RR_a, Z, r1_32u, r2_32u, new, r1_32, add, addu32)
+ C(0xb9fa, ALRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, add, addu32)
+ C(0x5e00, AL, RX_a, Z, r1_32u, m2_32u, new, r1_32, add, addu32)
+ C(0xe35e, ALY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, add, addu32)
+ C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, addu64, addu64)
+ C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, addu64, addu64)
+ C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, addu64, addu64)
+ C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, addu64, addu64)
+ C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, addu64, addu64)
+/* ADD LOGICAL HIGH */
+ C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32)
+ C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, add, addu32)
+/* ADD LOGICAL IMMEDIATE */
+ C(0xc20b, ALFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, add, addu32)
+ C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, addu64, addu64)
+/* ADD LOGICAL WITH SIGNED IMMEDIATE */
+ D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL)
+ C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32)
+ D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEQ)
+ C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64)
+/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
+ C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32)
+ C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, 0)
+/* ADD LOGICAL WITH CARRY */
+ C(0xb998, ALCR, RRE, Z, r1_32u, r2_32u, new, r1_32, addc32, addu32)
+ C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc64, addu64)
+ C(0xe398, ALC, RXY_a, Z, r1_32u, m2_32u, new, r1_32, addc32, addu32)
+ C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc64, addu64)
+
+/* AND */
+ C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32)
+ C(0xb9f4, NRK, RRF_a, DO, r2, r3, new, r1_32, and, nz32)
+ C(0x5400, N, RX_a, Z, r1, m2_32s, new, r1_32, and, nz32)
+ C(0xe354, NY, RXY_a, LD, r1, m2_32s, new, r1_32, and, nz32)
+ C(0xb980, NGR, RRE, Z, r1, r2, r1, 0, and, nz64)
+ C(0xb9e4, NGRK, RRF_a, DO, r2, r3, r1, 0, and, nz64)
+ C(0xe380, NG, RXY_a, Z, r1, m2_64, r1, 0, and, nz64)
+ C(0xd400, NC, SS_a, Z, la1, a2, 0, 0, nc, 0)
+/* AND IMMEDIATE */
+ D(0xc00a, NIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2020)
+ D(0xc00b, NILF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2000)
+ D(0xa504, NIHH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1030)
+ D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020)
+ D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010)
+ D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000)
+ D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB)
+ D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB)
+
+/* BRANCH AND LINK */
+ C(0x0500, BALR, RR_a, Z, 0, r2_nz, r1, 0, bal, 0)
+ C(0x4500, BAL, RX_a, Z, 0, a2, r1, 0, bal, 0)
+/* BRANCH AND SAVE */
+ C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0)
+ C(0x4d00, BAS, RX_a, Z, 0, a2, r1, 0, bas, 0)
+/* BRANCH RELATIVE AND SAVE */
+ C(0xa705, BRAS, RI_b, Z, 0, 0, r1, 0, basi, 0)
+ C(0xc005, BRASL, RIL_b, Z, 0, 0, r1, 0, basi, 0)
+/* BRANCH INDIRECT ON CONDITION */
+ C(0xe347, BIC, RXY_b, MIE2,0, m2_64w, 0, 0, bc, 0)
+/* BRANCH ON CONDITION */
+ C(0x0700, BCR, RR_b, Z, 0, r2_nz, 0, 0, bc, 0)
+ C(0x4700, BC, RX_b, Z, 0, a2, 0, 0, bc, 0)
+/* BRANCH RELATIVE ON CONDITION */
+ C(0xa704, BRC, RI_c, Z, 0, 0, 0, 0, bc, 0)
+ C(0xc004, BRCL, RIL_c, Z, 0, 0, 0, 0, bc, 0)
+/* BRANCH ON COUNT */
+ C(0x0600, BCTR, RR_a, Z, 0, r2_nz, 0, 0, bct32, 0)
+ C(0xb946, BCTGR, RRE, Z, 0, r2_nz, 0, 0, bct64, 0)
+ C(0x4600, BCT, RX_a, Z, 0, a2, 0, 0, bct32, 0)
+ C(0xe346, BCTG, RXY_a, Z, 0, a2, 0, 0, bct64, 0)
+/* BRANCH RELATIVE ON COUNT */
+ C(0xa706, BRCT, RI_b, Z, 0, 0, 0, 0, bct32, 0)
+ C(0xa707, BRCTG, RI_b, Z, 0, 0, 0, 0, bct64, 0)
+/* BRANCH RELATIVE ON COUNT HIGH */
+ C(0xcc06, BRCTH, RIL_b, HW, 0, 0, 0, 0, bcth, 0)
+/* BRANCH ON INDEX */
+ D(0x8600, BXH, RS_a, Z, 0, a2, 0, 0, bx32, 0, 0)
+ D(0x8700, BXLE, RS_a, Z, 0, a2, 0, 0, bx32, 0, 1)
+ D(0xeb44, BXHG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 0)
+ D(0xeb45, BXLEG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 1)
+/* BRANCH RELATIVE ON INDEX */
+ D(0x8400, BRXH, RSI, Z, 0, 0, 0, 0, bx32, 0, 0)
+ D(0x8500, BRXLE, RSI, Z, 0, 0, 0, 0, bx32, 0, 1)
+ D(0xec44, BRXHG, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 0)
+ D(0xec45, BRXHLE, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 1)
+/* BRANCH PREDICTION PRELOAD */
+ /* ??? Format is SMI, but implemented as NOP, so we need no fields. */
+ C(0xc700, BPP, E, EH, 0, 0, 0, 0, 0, 0)
+/* BRANCH PREDICTION RELATIVE PRELOAD */
+ /* ??? Format is MII, but implemented as NOP, so we need no fields. */
+ C(0xc500, BPRP, E, EH, 0, 0, 0, 0, 0, 0)
+/* NEXT INSTRUCTION ACCESS INTENT */
+ /* ??? Format is IE, but implemented as NOP, so we need no fields. */
+ C(0xb2fa, NIAI, E, EH, 0, 0, 0, 0, 0, 0)
+
+/* CHECKSUM */
+ C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0)
+
+/* COPY SIGN */
+ F(0xb372, CPSDR, RRF_b, FPSSH, f3, f2, new, f1, cps, 0, IF_AFP1 | IF_AFP2 | IF_AFP3)
+
+/* COMPARE */
+ C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32)
+ C(0x5900, C, RX_a, Z, r1_o, m2_32s, 0, 0, 0, cmps32)
+ C(0xe359, CY, RXY_a, LD, r1_o, m2_32s, 0, 0, 0, cmps32)
+ C(0xb920, CGR, RRE, Z, r1_o, r2_o, 0, 0, 0, cmps64)
+ C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64)
+ C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64)
+ C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64)
+ F(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0, IF_BFP)
+ F(0xb319, CDBR, RRE, Z, f1, f2, 0, 0, cdb, 0, IF_BFP)
+ F(0xb349, CXBR, RRE, Z, x2h, x2l, x1, 0, cxb, 0, IF_BFP)
+ F(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0, IF_BFP)
+ F(0xed19, CDB, RXE, Z, f1, m2_64, 0, 0, cdb, 0, IF_BFP)
+/* COMPARE AND SIGNAL */
+ F(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0, IF_BFP)
+ F(0xb318, KDBR, RRE, Z, f1, f2, 0, 0, kdb, 0, IF_BFP)
+ F(0xb348, KXBR, RRE, Z, x2h, x2l, x1, 0, kxb, 0, IF_BFP)
+ F(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0, IF_BFP)
+ F(0xed18, KDB, RXE, Z, f1, m2_64, 0, 0, kdb, 0, IF_BFP)
+/* COMPARE IMMEDIATE */
+ C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32)
+ C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64)
+/* COMPARE RELATIVE LONG */
+ C(0xc60d, CRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32)
+ C(0xc608, CGRL, RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64)
+ C(0xc60c, CGFRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD */
+ C(0x4900, CH, RX_a, Z, r1_o, m2_16s, 0, 0, 0, cmps32)
+ C(0xe379, CHY, RXY_a, LD, r1_o, m2_16s, 0, 0, 0, cmps32)
+ C(0xe334, CGH, RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD IMMEDIATE */
+ C(0xa70e, CHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps32)
+ C(0xa70f, CGHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps64)
+ C(0xe554, CHHSI, SIL, GIE, m1_16s, i2, 0, 0, 0, cmps64)
+ C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64)
+ C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD RELATIVE LONG */
+ C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32)
+ C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64)
+/* COMPARE HIGH */
+ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32)
+ C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32)
+ C(0xe3cd, CHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmps32)
+/* COMPARE IMMEDIATE HIGH */
+ C(0xcc0d, CIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmps32)
+
+/* COMPARE LOGICAL */
+ C(0x1500, CLR, RR_a, Z, r1, r2, 0, 0, 0, cmpu32)
+ C(0x5500, CL, RX_a, Z, r1, m2_32s, 0, 0, 0, cmpu32)
+ C(0xe355, CLY, RXY_a, LD, r1, m2_32s, 0, 0, 0, cmpu32)
+ C(0xb921, CLGR, RRE, Z, r1, r2, 0, 0, 0, cmpu64)
+ C(0xb931, CLGFR, RRE, Z, r1, r2_32u, 0, 0, 0, cmpu64)
+ C(0xe321, CLG, RXY_a, Z, r1, m2_64, 0, 0, 0, cmpu64)
+ C(0xe331, CLGF, RXY_a, Z, r1, m2_32u, 0, 0, 0, cmpu64)
+ C(0xd500, CLC, SS_a, Z, la1, a2, 0, 0, clc, 0)
+/* COMPARE LOGICAL HIGH */
+ C(0xb9cf, CLHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmpu32)
+ C(0xb9df, CLHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmpu32)
+ C(0xe3cf, CLHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmpu32)
+/* COMPARE LOGICAL IMMEDIATE */
+ C(0xc20f, CLFI, RIL_a, EI, r1, i2, 0, 0, 0, cmpu32)
+ C(0xc20e, CLGFI, RIL_a, EI, r1, i2_32u, 0, 0, 0, cmpu64)
+ C(0x9500, CLI, SI, Z, m1_8u, i2_8u, 0, 0, 0, cmpu64)
+ C(0xeb55, CLIY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, cmpu64)
+ C(0xe555, CLHHSI, SIL, GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64)
+ C(0xe55d, CLFHSI, SIL, GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64)
+ C(0xe559, CLGHSI, SIL, GIE, m1_64, i2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL IMMEDIATE HIGH */
+ C(0xcc0f, CLIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmpu32)
+/* COMPARE LOGICAL RELATIVE LONG */
+ C(0xc60f, CLRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32)
+ C(0xc60a, CLGRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64)
+ C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64)
+ C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32)
+ C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL LONG */
+ C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0)
+/* COMPARE LOGICAL LONG EXTENDED */
+ C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0)
+/* COMPARE LOGICAL LONG UNICODE */
+ C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0)
+/* COMPARE LOGICAL CHARACTERS UNDER MASK */
+ C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0)
+ C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0)
+ C(0xeb20, CLMH, RSY_b, Z, r1_sr32, a2, 0, 0, clm, 0)
+/* COMPARE LOGICAL STRING */
+ C(0xb25d, CLST, RRE, Z, r1_o, r2_o, 0, 0, clst, 0)
+
+/* COMPARE AND BRANCH */
+ D(0xecf6, CRB, RRS, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0)
+ D(0xece4, CGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 0)
+ D(0xec76, CRJ, RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0)
+ D(0xec64, CGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0)
+ D(0xecfe, CIB, RIS, GIE, r1_32s, i2, 0, 0, cj, 0, 0)
+ D(0xecfc, CGIB, RIS, GIE, r1_o, i2, 0, 0, cj, 0, 0)
+ D(0xec7e, CIJ, RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0)
+ D(0xec7c, CGIJ, RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0)
+/* COMPARE LOGICAL AND BRANCH */
+ D(0xecf7, CLRB, RRS, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1)
+ D(0xece5, CLGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 1)
+ D(0xec77, CLRJ, RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1)
+ D(0xec65, CLGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1)
+ D(0xecff, CLIB, RIS, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1)
+ D(0xecfd, CLGIB, RIS, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
+ D(0xec7f, CLIJ, RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1)
+ D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
+
+/* COMPARE AND SWAP */
+ D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
+ D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
+ D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ)
+/* COMPARE DOUBLE AND SWAP */
+ D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
+ D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
+ C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0)
+/* COMPARE AND SWAP AND STORE */
+ C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0)
+
+/* COMPARE AND TRAP */
+ D(0xb972, CRT, RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0)
+ D(0xb960, CGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0)
+ D(0xec72, CIT, RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0)
+ D(0xec70, CGIT, RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0)
+/* COMPARE LOGICAL AND TRAP */
+ D(0xb973, CLRT, RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1)
+ D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1)
+ D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1)
+ D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1)
+ D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1)
+ D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1)
+
+/* CONVERT TO DECIMAL */
+ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0)
+ C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0)
+/* CONVERT TO FIXED */
+ F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP)
+ F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP)
+ F(0xb39a, CFXBR, RRF_e, Z, x2h, x2l, new, r1_32, cfxb, 0, IF_BFP)
+ F(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0, IF_BFP)
+ F(0xb3a9, CGDBR, RRF_e, Z, 0, f2, r1, 0, cgdb, 0, IF_BFP)
+ F(0xb3aa, CGXBR, RRF_e, Z, x2h, x2l, r1, 0, cgxb, 0, IF_BFP)
+/* CONVERT FROM FIXED */
+ F(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0, IF_BFP)
+ F(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, new, f1, cdgb, 0, IF_BFP)
+ F(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, new_P, x1, cxgb, 0, IF_BFP)
+ F(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0, IF_BFP)
+ F(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, new, f1, cdgb, 0, IF_BFP)
+ F(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, new_P, x1, cxgb, 0, IF_BFP)
+/* CONVERT TO LOGICAL */
+ F(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0, IF_BFP)
+ F(0xb39d, CLFDBR, RRF_e, FPE, 0, f2, new, r1_32, clfdb, 0, IF_BFP)
+ F(0xb39e, CLFXBR, RRF_e, FPE, x2h, x2l, new, r1_32, clfxb, 0, IF_BFP)
+ F(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0, IF_BFP)
+ F(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2, r1, 0, clgdb, 0, IF_BFP)
+ F(0xb3ae, CLGXBR, RRF_e, FPE, x2h, x2l, r1, 0, clgxb, 0, IF_BFP)
+/* CONVERT FROM LOGICAL */
+ F(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0, IF_BFP)
+ F(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, new, f1, cdlgb, 0, IF_BFP)
+ F(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, new_P, x1, cxlgb, 0, IF_BFP)
+ F(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0, IF_BFP)
+ F(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, new, f1, cdlgb, 0, IF_BFP)
+ F(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, new_P, x1, cxlgb, 0, IF_BFP)
+
+/* CONVERT UTF-8 TO UTF-16 */
+ D(0xb2a7, CU12, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 12)
+/* CONVERT UTF-8 TO UTF-32 */
+ D(0xb9b0, CU14, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 14)
+/* CONVERT UTF-16 to UTF-8 */
+ D(0xb2a6, CU21, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 21)
+/* CONVERT UTF-16 to UTF-32 */
+ D(0xb9b1, CU24, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 24)
+/* CONVERT UTF-32 to UTF-8 */
+ D(0xb9b2, CU41, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 41)
+/* CONVERT UTF-32 to UTF-16 */
+ D(0xb9b3, CU42, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 42)
+
+/* DIVIDE */
+ C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0)
+ C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0)
+ F(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0, IF_BFP)
+ F(0xb31d, DDBR, RRE, Z, f1, f2, new, f1, ddb, 0, IF_BFP)
+ F(0xb34d, DXBR, RRE, Z, x2h, x2l, x1, x1, dxb, 0, IF_BFP)
+ F(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0, IF_BFP)
+ F(0xed1d, DDB, RXE, Z, f1, m2_64, new, f1, ddb, 0, IF_BFP)
+/* DIVIDE LOGICAL */
+ C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0)
+ C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0)
+ C(0xb987, DLGR, RRE, Z, 0, r2_o, r1_P, 0, divu64, 0)
+ C(0xe387, DLG, RXY_a, Z, 0, m2_64, r1_P, 0, divu64, 0)
+/* DIVIDE SINGLE */
+ C(0xb90d, DSGR, RRE, Z, r1p1, r2, r1_P, 0, divs64, 0)
+ C(0xb91d, DSGFR, RRE, Z, r1p1, r2_32s, r1_P, 0, divs64, 0)
+ C(0xe30d, DSG, RXY_a, Z, r1p1, m2_64, r1_P, 0, divs64, 0)
+ C(0xe31d, DSGF, RXY_a, Z, r1p1, m2_32s, r1_P, 0, divs64, 0)
+
+/* EXCLUSIVE OR */
+ C(0x1700, XR, RR_a, Z, r1, r2, new, r1_32, xor, nz32)
+ C(0xb9f7, XRK, RRF_a, DO, r2, r3, new, r1_32, xor, nz32)
+ C(0x5700, X, RX_a, Z, r1, m2_32s, new, r1_32, xor, nz32)
+ C(0xe357, XY, RXY_a, LD, r1, m2_32s, new, r1_32, xor, nz32)
+ C(0xb982, XGR, RRE, Z, r1, r2, r1, 0, xor, nz64)
+ C(0xb9e7, XGRK, RRF_a, DO, r2, r3, r1, 0, xor, nz64)
+ C(0xe382, XG, RXY_a, Z, r1, m2_64, r1, 0, xor, nz64)
+ C(0xd700, XC, SS_a, Z, 0, 0, 0, 0, xc, 0)
+/* EXCLUSIVE OR IMMEDIATE */
+ D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020)
+ D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000)
+ D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB)
+ D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB)
+
+/* EXECUTE */
+ C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0)
+/* EXECUTE RELATIVE LONG */
+ C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0)
+
+/* EXTRACT ACCESS */
+ C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0)
+/* EXTRACT CPU ATTRIBUTE */
+ C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0)
+/* EXTRACT CPU TIME */
+ F(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0, IF_IO)
+/* EXTRACT FPC */
+ F(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0, IF_BFP)
+/* EXTRACT PSW */
+ C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0)
+
+/* FIND LEFTMOST ONE */
+ C(0xb983, FLOGR, RRE, EI, 0, r2_o, r1_P, 0, flogr, 0)
+
+/* INSERT CHARACTER */
+ C(0x4300, IC, RX_a, Z, 0, m2_8u, 0, r1_8, mov2, 0)
+ C(0xe373, ICY, RXY_a, LD, 0, m2_8u, 0, r1_8, mov2, 0)
+/* INSERT CHARACTERS UNDER MASK */
+ D(0xbf00, ICM, RS_b, Z, 0, a2, r1, 0, icm, 0, 0)
+ D(0xeb81, ICMY, RSY_b, LD, 0, a2, r1, 0, icm, 0, 0)
+ D(0xeb80, ICMH, RSY_b, Z, 0, a2, r1, 0, icm, 0, 32)
+/* INSERT IMMEDIATE */
+ D(0xc008, IIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2020)
+ D(0xc009, IILF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2000)
+ D(0xa500, IIHH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1030)
+ D(0xa501, IIHL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1020)
+ D(0xa502, IILH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1010)
+ D(0xa503, IILL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1000)
+/* INSERT PROGRAM MASK */
+ C(0xb222, IPM, RRE, Z, 0, 0, r1, 0, ipm, 0)
+
+/* LOAD */
+ C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0)
+ C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0)
+ C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0)
+ C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0)
+ C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0)
+ C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0)
+ C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0)
+ F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2)
+ F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1)
+ F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1)
+ F(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0, IF_AFP1 | IF_AFP2)
+ F(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1)
+ F(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1)
+ F(0xb365, LXR, RRE, Z, x2h, x2l, 0, x1, movx, 0, IF_AFP1)
+/* LOAD IMMEDIATE */
+ C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0)
+/* LOAD RELATIVE LONG */
+ C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0)
+ C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0)
+ C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0)
+/* LOAD ADDRESS */
+ C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0)
+ C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0)
+/* LOAD ADDRESS EXTENDED */
+ C(0x5100, LAE, RX_a, Z, 0, a2, 0, r1, mov2e, 0)
+ C(0xe375, LAEY, RXY_a, GIE, 0, a2, 0, r1, mov2e, 0)
+/* LOAD ADDRESS RELATIVE LONG */
+ C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0)
+/* LOAD AND ADD */
+ D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL)
+ D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ)
+/* LOAD AND ADD LOGICAL */
+ D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL)
+ D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ)
+/* LOAD AND AND */
+ D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL)
+ D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ)
+/* LOAD AND EXCLUSIVE OR */
+ D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL)
+ D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ)
+/* LOAD AND OR */
+ D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL)
+ D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ)
+/* LOAD AND TEST */
+ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
+ C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)
+ C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64)
+ C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64)
+ C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64)
+ C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64)
+ F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP)
+ F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP)
+ F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1, movx, f128, IF_BFP)
+/* LOAD AND TRAP */
+ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0)
+ C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0)
+/* LOAD AND ZERO RIGHTMOST BYTE */
+ C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0)
+ C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0)
+/* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */
+ C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0)
+/* LOAD BYTE */
+ C(0xb926, LBR, RRE, EI, 0, r2_8s, 0, r1_32, mov2, 0)
+ C(0xb906, LGBR, RRE, EI, 0, r2_8s, 0, r1, mov2, 0)
+ C(0xe376, LB, RXY_a, LD, 0, a2, new, r1_32, ld8s, 0)
+ C(0xe377, LGB, RXY_a, LD, 0, a2, r1, 0, ld8s, 0)
+/* LOAD BYTE HIGH */
+ C(0xe3c0, LBH, RXY_a, HW, 0, a2, new, r1_32h, ld8s, 0)
+/* LOAD COMPLEMENT */
+ C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32)
+ C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64)
+ C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64)
+ F(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32, IF_BFP)
+ F(0xb313, LCDBR, RRE, Z, 0, f2, new, f1, negf64, f64, IF_BFP)
+ F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP)
+ F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2)
+/* LOAD COUNT TO BLOCK BOUNDARY */
+ C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0)
+/* LOAD HALFWORD */
+ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0)
+ C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0)
+ C(0x4800, LH, RX_a, Z, 0, a2, new, r1_32, ld16s, 0)
+ C(0xe378, LHY, RXY_a, LD, 0, a2, new, r1_32, ld16s, 0)
+ C(0xe315, LGH, RXY_a, Z, 0, a2, r1, 0, ld16s, 0)
+/* LOAD HALFWORD HIGH */
+ C(0xe3c4, LHH, RXY_a, HW, 0, a2, new, r1_32h, ld16s, 0)
+/* LOAD HALFWORD IMMEDIATE */
+ C(0xa708, LHI, RI_a, Z, 0, i2, 0, r1_32, mov2, 0)
+ C(0xa709, LGHI, RI_a, Z, 0, i2, 0, r1, mov2, 0)
+/* LOAD HALFWORD RELATIVE LONG */
+ C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0)
+ C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0)
+/* LOAD HIGH */
+ C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0)
+/* LOAG HIGH AND TRAP */
+ C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0)
+/* LOAD LOGICAL */
+ C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0)
+ C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0)
+/* LOAD LOGICAL AND TRAP */
+ C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0)
+/* LOAD LOGICAL RELATIVE LONG */
+ C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0)
+/* LOAD LOGICAL CHARACTER */
+ C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0)
+ C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0)
+ C(0xe394, LLC, RXY_a, EI, 0, a2, new, r1_32, ld8u, 0)
+ C(0xe390, LLGC, RXY_a, Z, 0, a2, r1, 0, ld8u, 0)
+/* LOAD LOGICAL CHARACTER HIGH */
+ C(0xe3c2, LLCH, RXY_a, HW, 0, a2, new, r1_32h, ld8u, 0)
+/* LOAD LOGICAL HALFWORD */
+ C(0xb995, LLHR, RRE, EI, 0, r2_16u, 0, r1_32, mov2, 0)
+ C(0xb985, LLGHR, RRE, EI, 0, r2_16u, 0, r1, mov2, 0)
+ C(0xe395, LLH, RXY_a, EI, 0, a2, new, r1_32, ld16u, 0)
+ C(0xe391, LLGH, RXY_a, Z, 0, a2, r1, 0, ld16u, 0)
+/* LOAD LOGICAL HALFWORD HIGH */
+ C(0xe3c6, LLHH, RXY_a, HW, 0, a2, new, r1_32h, ld16u, 0)
+/* LOAD LOGICAL HALFWORD RELATIVE LONG */
+ C(0xc402, LLHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0)
+ C(0xc406, LLGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0)
+/* LOAD LOGICAL IMMEDATE */
+ D(0xc00e, LLIHF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32)
+ D(0xc00f, LLILF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0)
+ D(0xa50c, LLIHH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 48)
+ D(0xa50d, LLIHL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 32)
+ D(0xa50e, LLILH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 16)
+ D(0xa50f, LLILL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 0)
+/* LOAD LOGICAL THIRTY ONE BITS */
+ C(0xb917, LLGTR, RRE, Z, 0, r2_o, r1, 0, llgt, 0)
+ C(0xe317, LLGT, RXY_a, Z, 0, m2_32u, r1, 0, llgt, 0)
+/* LOAD LOGICAL THIRTY ONE BITS AND TRAP */
+ C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0)
+
+/* LOAD FPR FROM GR */
+ F(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0, IF_AFP1)
+/* LOAD GR FROM FPR */
+ F(0xb3cd, LGDR, RRE, FPRGR, 0, f2, 0, r1, mov2, 0, IF_AFP2)
+/* LOAD NEGATIVE */
+ C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32)
+ C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64)
+ C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64)
+ F(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32, IF_BFP)
+ F(0xb311, LNDBR, RRE, Z, 0, f2, new, f1, nabsf64, f64, IF_BFP)
+ F(0xb341, LNXBR, RRE, Z, x2h, x2l, new_P, x1, nabsf128, f128, IF_BFP)
+ F(0xb371, LNDFR, RRE, FPSSH, 0, f2, new, f1, nabsf64, 0, IF_AFP1 | IF_AFP2)
+/* LOAD ON CONDITION */
+ C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0)
+ C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0)
+ C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0)
+ C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0)
+/* LOAD HALFWORD IMMEDIATE ON CONDITION */
+ C(0xec42, LOCHI, RIE_g, LOC2, r1, i2, new, r1_32, loc, 0)
+ C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0)
+ C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0)
+/* LOAD HIGH ON CONDITION */
+ C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0)
+ C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0)
+/* LOAD PAIR DISJOINT */
+ D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL)
+ D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ)
+/* LOAD PAIR FROM QUADWORD */
+ C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0)
+/* LOAD POSITIVE */
+ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32)
+ C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64)
+ C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64)
+ F(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32, IF_BFP)
+ F(0xb310, LPDBR, RRE, Z, 0, f2, new, f1, absf64, f64, IF_BFP)
+ F(0xb340, LPXBR, RRE, Z, x2h, x2l, new_P, x1, absf128, f128, IF_BFP)
+ F(0xb370, LPDFR, RRE, FPSSH, 0, f2, new, f1, absf64, 0, IF_AFP1 | IF_AFP2)
+/* LOAD REVERSED */
+ C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0)
+ C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0)
+ C(0xe31f, LRVH, RXY_a, Z, 0, m2_16u, new, r1_16, rev16, 0)
+ C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0)
+ C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0)
+/* LOAD ZERO */
+ F(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0, IF_AFP1)
+ F(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0, IF_AFP1)
+ F(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0, IF_AFP1)
+
+/* LOAD FPC */
+ F(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0, IF_BFP)
+/* LOAD FPC AND SIGNAL */
+ F(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0, IF_DFP)
+/* LOAD FP INTEGER */
+ F(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0, IF_BFP)
+ F(0xb35f, FIDBR, RRF_e, Z, 0, f2, new, f1, fidb, 0, IF_BFP)
+ F(0xb347, FIXBR, RRF_e, Z, x2h, x2l, new_P, x1, fixb, 0, IF_BFP)
+
+/* LOAD LENGTHENED */
+ F(0xb304, LDEBR, RRE, Z, 0, e2, new, f1, ldeb, 0, IF_BFP)
+ F(0xb305, LXDBR, RRE, Z, 0, f2, new_P, x1, lxdb, 0, IF_BFP)
+ F(0xb306, LXEBR, RRE, Z, 0, e2, new_P, x1, lxeb, 0, IF_BFP)
+ F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP)
+ F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP)
+ F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP)
+ F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1)
+ F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1)
+/* LOAD ROUNDED */
+ F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP)
+ F(0xb345, LDXBR, RRF_e, Z, x2h, x2l, new, f1, ldxb, 0, IF_BFP)
+ F(0xb346, LEXBR, RRF_e, Z, x2h, x2l, new, e1, lexb, 0, IF_BFP)
+
+/* LOAD MULTIPLE */
+ C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0)
+ C(0xeb98, LMY, RSY_a, LD, 0, a2, 0, 0, lm32, 0)
+ C(0xeb04, LMG, RSY_a, Z, 0, a2, 0, 0, lm64, 0)
+/* LOAD MULTIPLE HIGH */
+ C(0xeb96, LMH, RSY_a, Z, 0, a2, 0, 0, lmh, 0)
+/* LOAD ACCESS MULTIPLE */
+ C(0x9a00, LAM, RS_a, Z, 0, a2, 0, 0, lam, 0)
+ C(0xeb9a, LAMY, RSY_a, LD, 0, a2, 0, 0, lam, 0)
+
+/* MONITOR CALL */
+ C(0xaf00, MC, SI, Z, la1, 0, 0, 0, mc, 0)
+
+/* MOVE */
+ C(0xd200, MVC, SS_a, Z, la1, a2, 0, 0, mvc, 0)
+ C(0xe544, MVHHI, SIL, GIE, la1, i2, 0, m1_16, mov2, 0)
+ C(0xe54c, MVHI, SIL, GIE, la1, i2, 0, m1_32, mov2, 0)
+ C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0)
+ C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0)
+ C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0)
+/* MOVE INVERSE */
+ C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0)
+/* MOVE LONG */
+ C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0)
+/* MOVE LONG EXTENDED */
+ C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0)
+/* MOVE LONG UNICODE */
+ C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0)
+/* MOVE NUMERICS */
+ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0)
+/* MOVE PAGE */
+ C(0xb254, MVPG, RRE, Z, 0, 0, 0, 0, mvpg, 0)
+/* MOVE STRING */
+ C(0xb255, MVST, RRE, Z, 0, 0, 0, 0, mvst, 0)
+/* MOVE WITH OPTIONAL SPECIFICATION */
+ C(0xc800, MVCOS, SSF, MVCOS, la1, a2, 0, 0, mvcos, 0)
+/* MOVE WITH OFFSET */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0)
+/* MOVE ZONES */
+ C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0)
+
+/* MULTIPLY */
+ C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0)
+ C(0xb9ec, MGRK, RRF_a, MIE2,r3_o, r2_o, r1_P, 0, muls128, 0)
+ C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0)
+ C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0)
+ C(0xe384, MG, RXY_a, MIE2,r1p1_o, m2_64, r1_P, 0, muls128, 0)
+ F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP)
+ F(0xb31c, MDBR, RRE, Z, f1, f2, new, f1, mdb, 0, IF_BFP)
+ F(0xb34c, MXBR, RRE, Z, x2h, x2l, x1, x1, mxb, 0, IF_BFP)
+ F(0xb30c, MDEBR, RRE, Z, f1, e2, new, f1, mdeb, 0, IF_BFP)
+ F(0xb307, MXDBR, RRE, Z, 0, f2, x1, x1, mxdb, 0, IF_BFP)
+ F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP)
+ F(0xed1c, MDB, RXE, Z, f1, m2_64, new, f1, mdb, 0, IF_BFP)
+ F(0xed0c, MDEB, RXE, Z, f1, m2_32u, new, f1, mdeb, 0, IF_BFP)
+ F(0xed07, MXDB, RXE, Z, 0, m2_64, x1, x1, mxdb, 0, IF_BFP)
+/* MULTIPLY HALFWORD */
+ C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0)
+ C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0)
+ C(0xe33c, MGH, RXY_a, MIE2,r1_o, m2_16s, r1, 0, mul, 0)
+/* MULTIPLY HALFWORD IMMEDIATE */
+ C(0xa70c, MHI, RI_a, Z, r1_o, i2, new, r1_32, mul, 0)
+ C(0xa70d, MGHI, RI_a, Z, r1_o, i2, r1, 0, mul, 0)
+/* MULTIPLY LOGICAL */
+ C(0xb996, MLR, RRE, Z, r1p1_32u, r2_32u, new, r1_D32, mul, 0)
+ C(0xe396, ML, RXY_a, Z, r1p1_32u, m2_32u, new, r1_D32, mul, 0)
+ C(0xb986, MLGR, RRE, Z, r1p1, r2_o, r1_P, 0, mul128, 0)
+ C(0xe386, MLG, RXY_a, Z, r1p1, m2_64, r1_P, 0, mul128, 0)
+/* MULTIPLY SINGLE */
+ C(0xb252, MSR, RRE, Z, r1_o, r2_o, new, r1_32, mul, 0)
+ C(0xb9fd, MSRKC, RRF_a, MIE2,r3_32s, r2_32s, new, r1_32, mul, muls32)
+ C(0x7100, MS, RX_a, Z, r1_o, m2_32s, new, r1_32, mul, 0)
+ C(0xe351, MSY, RXY_a, LD, r1_o, m2_32s, new, r1_32, mul, 0)
+ C(0xe353, MSC, RXY_a, MIE2,r1_32s, m2_32s, new, r1_32, mul, muls32)
+ C(0xb90c, MSGR, RRE, Z, r1_o, r2_o, r1, 0, mul, 0)
+ C(0xb9ed, MSGRKC, RRF_a, MIE2,r3_o, r2_o, new_P, out2_r1, muls128, muls64)
+ C(0xb91c, MSGFR, RRE, Z, r1_o, r2_32s, r1, 0, mul, 0)
+ C(0xe30c, MSG, RXY_a, Z, r1_o, m2_64, r1, 0, mul, 0)
+ C(0xe383, MSGC, RXY_a, MIE2,r1_o, m2_64, new_P, out2_r1, muls128, muls64)
+ C(0xe31c, MSGF, RXY_a, Z, r1_o, m2_32s, r1, 0, mul, 0)
+/* MULTIPLY SINGLE IMMEDIATE */
+ C(0xc201, MSFI, RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0)
+ C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0)
+
+/* MULTIPLY AND ADD */
+ F(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0, IF_BFP)
+ F(0xb31e, MADBR, RRD, Z, f1, f2, new, f1, madb, 0, IF_BFP)
+ F(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0, IF_BFP)
+ F(0xed1e, MADB, RXF, Z, f1, m2_64, new, f1, madb, 0, IF_BFP)
+/* MULTIPLY AND SUBTRACT */
+ F(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0, IF_BFP)
+ F(0xb31f, MSDBR, RRD, Z, f1, f2, new, f1, msdb, 0, IF_BFP)
+ F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP)
+ F(0xed1f, MSDB, RXF, Z, f1, m2_64, new, f1, msdb, 0, IF_BFP)
+
+/* OR */
+ C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32)
+ C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32)
+ C(0x5600, O, RX_a, Z, r1, m2_32s, new, r1_32, or, nz32)
+ C(0xe356, OY, RXY_a, LD, r1, m2_32s, new, r1_32, or, nz32)
+ C(0xb981, OGR, RRE, Z, r1, r2, r1, 0, or, nz64)
+ C(0xb9e6, OGRK, RRF_a, DO, r2, r3, r1, 0, or, nz64)
+ C(0xe381, OG, RXY_a, Z, r1, m2_64, r1, 0, or, nz64)
+ C(0xd600, OC, SS_a, Z, la1, a2, 0, 0, oc, 0)
+/* OR IMMEDIATE */
+ D(0xc00c, OIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2020)
+ D(0xc00d, OILF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2000)
+ D(0xa508, OIHH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1030)
+ D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020)
+ D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010)
+ D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000)
+ D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB)
+ D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB)
+
+/* PACK */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0)
+/* PACK ASCII */
+ C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0)
+/* PACK UNICODE */
+ C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0)
+
+/* PREFETCH */
+ /* Implemented as nops of course. */
+ C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0)
+ C(0xc602, PFDRL, RIL_c, GIE, 0, 0, 0, 0, 0, 0)
+/* PERFORM PROCESSOR ASSIST */
+ /* Implemented as nop of course. */
+ C(0xb2e8, PPA, RRF_c, PPA, 0, 0, 0, 0, 0, 0)
+
+/* POPULATION COUNT */
+ C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64)
+
+/* ROTATE LEFT SINGLE LOGICAL */
+ C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0)
+ C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0)
+
+/* ROTATE THEN INSERT SELECTED BITS */
+ C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64)
+ C(0xec59, RISBGN, RIE_f, MIE, 0, r2, r1, 0, risbg, 0)
+ C(0xec5d, RISBHG, RIE_f, HW, 0, r2, r1, 0, risbg, 0)
+ C(0xec51, RISBLG, RIE_f, HW, 0, r2, r1, 0, risbg, 0)
+/* ROTATE_THEN <OP> SELECTED BITS */
+ C(0xec54, RNSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+ C(0xec56, ROSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+ C(0xec57, RXSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+
+/* SEARCH STRING */
+ C(0xb25e, SRST, RRE, Z, 0, 0, 0, 0, srst, 0)
+/* SEARCH STRING UNICODE */
+ C(0xb9be, SRSTU, RRE, ETF3, 0, 0, 0, 0, srstu, 0)
+
+/* SET ACCESS */
+ C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0)
+/* SET ADDRESSING MODE */
+ D(0x010c, SAM24, E, Z, 0, 0, 0, 0, sam, 0, 0)
+ D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1)
+ D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3)
+/* SET FPC */
+ F(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0, IF_BFP)
+/* SET FPC AND SIGNAL */
+ F(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0, IF_DFP)
+/* SET BFP ROUNDING MODE */
+ F(0xb299, SRNM, S, Z, la2, 0, 0, 0, srnm, 0, IF_BFP)
+ F(0xb2b8, SRNMB, S, FPE, la2, 0, 0, 0, srnmb, 0, IF_BFP)
+/* SET DFP ROUNDING MODE */
+ F(0xb2b9, SRNMT, S, DFPR, la2, 0, 0, 0, srnmt, 0, IF_DFP)
+/* SET PROGRAM MASK */
+ C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0)
+
+/* SHIFT LEFT SINGLE */
+ D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31)
+ D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31)
+ D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63)
+/* SHIFT LEFT SINGLE LOGICAL */
+ C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0)
+ C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0)
+ C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0)
+/* SHIFT RIGHT SINGLE */
+ C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32)
+ C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32)
+ C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64)
+/* SHIFT RIGHT SINGLE LOGICAL */
+ C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0)
+ C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0)
+ C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0)
+/* SHIFT LEFT DOUBLE */
+ D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31)
+/* SHIFT LEFT DOUBLE LOGICAL */
+ C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0)
+/* SHIFT RIGHT DOUBLE */
+ C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64)
+/* SHIFT RIGHT DOUBLE LOGICAL */
+ C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0)
+
+/* SQUARE ROOT */
+ F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP)
+ F(0xb315, SQDBR, RRE, Z, 0, f2, new, f1, sqdb, 0, IF_BFP)
+ F(0xb316, SQXBR, RRE, Z, x2h, x2l, new_P, x1, sqxb, 0, IF_BFP)
+ F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP)
+ F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP)
+
+/* STORE */
+ C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0)
+ C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0)
+ C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0)
+ F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1)
+ F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1)
+ F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1)
+ F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1)
+/* STORE RELATIVE LONG */
+ C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0)
+ C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0)
+/* STORE CHARACTER */
+ C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0)
+ C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0)
+/* STORE CHARACTER HIGH */
+ C(0xe3c3, STCH, RXY_a, HW, r1_sr32, a2, 0, 0, st8, 0)
+/* STORE CHARACTERS UNDER MASK */
+ D(0xbe00, STCM, RS_b, Z, r1_o, a2, 0, 0, stcm, 0, 0)
+ D(0xeb2d, STCMY, RSY_b, LD, r1_o, a2, 0, 0, stcm, 0, 0)
+ D(0xeb2c, STCMH, RSY_b, Z, r1_o, a2, 0, 0, stcm, 0, 32)
+/* STORE HALFWORD */
+ C(0x4000, STH, RX_a, Z, r1_o, a2, 0, 0, st16, 0)
+ C(0xe370, STHY, RXY_a, LD, r1_o, a2, 0, 0, st16, 0)
+/* STORE HALFWORD HIGH */
+ C(0xe3c7, STHH, RXY_a, HW, r1_sr32, a2, 0, 0, st16, 0)
+/* STORE HALFWORD RELATIVE LONG */
+ C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0)
+/* STORE HIGH */
+ C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0)
+/* STORE ON CONDITION */
+ D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0)
+ D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1)
+/* STORE HIGH ON CONDITION */
+ D(0xebe1, STOCFH, RSY_b, LOC2, 0, 0, 0, 0, soc, 0, 2)
+/* STORE REVERSED */
+ C(0xe33f, STRVH, RXY_a, Z, la2, r1_16u, new, m1_16, rev16, 0)
+ C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0)
+ C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0)
+
+/* STORE CLOCK */
+ F(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0, IF_IO)
+ F(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0, IF_IO)
+/* STORE CLOCK EXTENDED */
+ F(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0, IF_IO)
+
+/* STORE FACILITY LIST EXTENDED */
+ C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0)
+/* STORE FPC */
+ F(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0, IF_BFP)
+
+/* STORE MULTIPLE */
+ D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4)
+ D(0xeb90, STMY, RSY_a, LD, 0, a2, 0, 0, stm, 0, 4)
+ D(0xeb24, STMG, RSY_a, Z, 0, a2, 0, 0, stm, 0, 8)
+/* STORE MULTIPLE HIGH */
+ C(0xeb26, STMH, RSY_a, Z, 0, a2, 0, 0, stmh, 0)
+/* STORE ACCESS MULTIPLE */
+ C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0)
+ C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0)
+/* STORE PAIR TO QUADWORD */
+ C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0)
+
+/* SUBTRACT */
+ C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32)
+ C(0xb9f9, SRK, RRF_a, DO, r2, r3, new, r1_32, sub, subs32)
+ C(0x5b00, S, RX_a, Z, r1, m2_32s, new, r1_32, sub, subs32)
+ C(0xe35b, SY, RXY_a, LD, r1, m2_32s, new, r1_32, sub, subs32)
+ C(0xb909, SGR, RRE, Z, r1, r2, r1, 0, sub, subs64)
+ C(0xb919, SGFR, RRE, Z, r1, r2_32s, r1, 0, sub, subs64)
+ C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64)
+ C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64)
+ C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64)
+ F(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32, IF_BFP)
+ F(0xb31b, SDBR, RRE, Z, f1, f2, new, f1, sdb, f64, IF_BFP)
+ F(0xb34b, SXBR, RRE, Z, x2h, x2l, x1, x1, sxb, f128, IF_BFP)
+ F(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32, IF_BFP)
+ F(0xed1b, SDB, RXE, Z, f1, m2_64, new, f1, sdb, f64, IF_BFP)
+/* SUBTRACT HALFWORD */
+ C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32)
+ C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32)
+ C(0xe339, SGH, RXY_a, MIE2,r1, m2_16s, r1, 0, sub, subs64)
+/* SUBTRACT HIGH */
+ C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32)
+ C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32)
+/* SUBTRACT LOGICAL */
+ C(0x1f00, SLR, RR_a, Z, r1_32u, r2_32u, new, r1_32, sub, subu32)
+ C(0xb9fb, SLRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, sub, subu32)
+ C(0x5f00, SL, RX_a, Z, r1_32u, m2_32u, new, r1_32, sub, subu32)
+ C(0xe35f, SLY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, sub, subu32)
+ C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, subu64, subu64)
+ C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, subu64, subu64)
+ C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, subu64, subu64)
+ C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, subu64, subu64)
+ C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, subu64, subu64)
+/* SUBTRACT LOCICAL HIGH */
+ C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32)
+ C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, sub, subu32)
+/* SUBTRACT LOGICAL IMMEDIATE */
+ C(0xc205, SLFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, sub, subu32)
+ C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, subu64, subu64)
+/* SUBTRACT LOGICAL WITH BORROW */
+ C(0xb999, SLBR, RRE, Z, r1_32u, r2_32u, new, r1_32, subb32, subu32)
+ C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb64, subu64)
+ C(0xe399, SLB, RXY_a, Z, r1_32u, m2_32u, new, r1_32, subb32, subu32)
+ C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb64, subu64)
+
+/* SUPERVISOR CALL */
+ C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0)
+
+/* TEST ADDRESSING MODE */
+ C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0)
+
+/* TEST AND SET */
+ C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0)
+
+/* TEST DATA CLASS */
+ F(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0, IF_BFP)
+ F(0xed11, TCDB, RXE, Z, f1, a2, 0, 0, tcdb, 0, IF_BFP)
+ F(0xed12, TCXB, RXE, Z, 0, a2, x1, 0, tcxb, 0, IF_BFP)
+
+/* TEST DECIMAL */
+ C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0)
+
+/* TEST UNDER MASK */
+ C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32)
+ C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32)
+ D(0xa702, TMHH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 48)
+ D(0xa703, TMHL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 32)
+ D(0xa700, TMLH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 16)
+ D(0xa701, TMLL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 0)
+
+/* TRANSLATE */
+ C(0xdc00, TR, SS_a, Z, la1, a2, 0, 0, tr, 0)
+/* TRANSLATE AND TEST */
+ C(0xdd00, TRT, SS_a, Z, la1, a2, 0, 0, trt, 0)
+/* TRANSLATE AND TEST REVERSE */
+ C(0xd000, TRTR, SS_a, ETF3, la1, a2, 0, 0, trtr, 0)
+/* TRANSLATE EXTENDED */
+ C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0)
+
+/* TRANSLATE ONE TO ONE */
+ C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE ONE TO TWO */
+ C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE TWO TO ONE */
+ C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+/* TRANSLATE TWO TO TWO */
+ C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0)
+
+/* UNPACK */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0)
+/* UNPACK ASCII */
+ C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0)
+/* UNPACK UNICODE */
+ C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0)
+
+/* MSA Instructions */
+ D(0xb91e, KMAC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMAC)
+ D(0xb928, PCKMO, RRE, MSA3, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCKMO)
+ D(0xb92a, KMF, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMF)
+ D(0xb92b, KMO, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMO)
+ D(0xb92c, PCC, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCC)
+ D(0xb92d, KMCTR, RRF_b, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMCTR)
+ D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM)
+ D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC)
+ D(0xb929, KMA, RRF_b, MSA8, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMA)
+ D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO)
+ D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD)
+ D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD)
+
+/* === Vector Support Instructions === */
+
+/* VECTOR BIT PERMUTE */
+ E(0xe785, VBPERM, VRR_c, VE, 0, 0, 0, 0, vbperm, 0, 0, IF_VEC)
+/* VECTOR GATHER ELEMENT */
+ E(0xe713, VGEF, VRV, V, la2, 0, 0, 0, vge, 0, ES_32, IF_VEC)
+ E(0xe712, VGEG, VRV, V, la2, 0, 0, 0, vge, 0, ES_64, IF_VEC)
+/* VECTOR GENERATE BYTE MASK */
+ F(0xe744, VGBM, VRI_a, V, 0, 0, 0, 0, vgbm, 0, IF_VEC)
+/* VECTOR GENERATE MASK */
+ F(0xe746, VGM, VRI_b, V, 0, 0, 0, 0, vgm, 0, IF_VEC)
+/* VECTOR LOAD */
+ F(0xe706, VL, VRX, V, la2, 0, 0, 0, vl, 0, IF_VEC)
+ F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC)
+/* VECTOR LOAD AND REPLICATE */
+ F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC)
+/* VECTOR LOAD ELEMENT */
+ E(0xe700, VLEB, VRX, V, la2, 0, 0, 0, vle, 0, ES_8, IF_VEC)
+ E(0xe701, VLEH, VRX, V, la2, 0, 0, 0, vle, 0, ES_16, IF_VEC)
+ E(0xe703, VLEF, VRX, V, la2, 0, 0, 0, vle, 0, ES_32, IF_VEC)
+ E(0xe702, VLEG, VRX, V, la2, 0, 0, 0, vle, 0, ES_64, IF_VEC)
+/* VECTOR LOAD ELEMENT IMMEDIATE */
+ E(0xe740, VLEIB, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_8, IF_VEC)
+ E(0xe741, VLEIH, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_16, IF_VEC)
+ E(0xe743, VLEIF, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_32, IF_VEC)
+ E(0xe742, VLEIG, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_64, IF_VEC)
+/* VECTOR LOAD GR FROM VR ELEMENT */
+ F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC)
+/* VECTOR LOAD LOGICAL ELEMENT AND ZERO */
+ F(0xe704, VLLEZ, VRX, V, la2, 0, 0, 0, vllez, 0, IF_VEC)
+/* VECTOR LOAD MULTIPLE */
+ F(0xe736, VLM, VRS_a, V, la2, 0, 0, 0, vlm, 0, IF_VEC)
+/* VECTOR LOAD TO BLOCK BOUNDARY */
+ F(0xe707, VLBB, VRX, V, la2, 0, 0, 0, vlbb, 0, IF_VEC)
+/* VECTOR LOAD VR ELEMENT FROM GR */
+ F(0xe722, VLVG, VRS_b, V, la2, r3, 0, 0, vlvg, 0, IF_VEC)
+/* VECTOR LOAD VR FROM GRS DISJOINT */
+ F(0xe762, VLVGP, VRR_f, V, r2, r3, 0, 0, vlvgp, 0, IF_VEC)
+/* VECTOR LOAD WITH LENGTH */
+ F(0xe737, VLL, VRS_b, V, la2, r3_32u, 0, 0, vll, 0, IF_VEC)
+/* VECTOR MERGE HIGH */
+ F(0xe761, VMRH, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC)
+/* VECTOR MERGE LOW */
+ F(0xe760, VMRL, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC)
+/* VECTOR PACK */
+ F(0xe794, VPK, VRR_c, V, 0, 0, 0, 0, vpk, 0, IF_VEC)
+/* VECTOR PACK SATURATE */
+ F(0xe797, VPKS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC)
+/* VECTOR PACK LOGICAL SATURATE */
+ F(0xe795, VPKLS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC)
+ F(0xe78c, VPERM, VRR_e, V, 0, 0, 0, 0, vperm, 0, IF_VEC)
+/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
+ F(0xe784, VPDI, VRR_c, V, 0, 0, 0, 0, vpdi, 0, IF_VEC)
+/* VECTOR REPLICATE */
+ F(0xe74d, VREP, VRI_c, V, 0, 0, 0, 0, vrep, 0, IF_VEC)
+/* VECTOR REPLICATE IMMEDIATE */
+ F(0xe745, VREPI, VRI_a, V, 0, 0, 0, 0, vrepi, 0, IF_VEC)
+/* VECTOR SCATTER ELEMENT */
+ E(0xe71b, VSCEF, VRV, V, la2, 0, 0, 0, vsce, 0, ES_32, IF_VEC)
+ E(0xe71a, VSCEG, VRV, V, la2, 0, 0, 0, vsce, 0, ES_64, IF_VEC)
+/* VECTOR SELECT */
+ F(0xe78d, VSEL, VRR_e, V, 0, 0, 0, 0, vsel, 0, IF_VEC)
+/* VECTOR SIGN EXTEND TO DOUBLEWORD */
+ F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC)
+/* VECTOR STORE */
+ F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC)
+/* VECTOR STORE ELEMENT */
+ E(0xe708, VSTEB, VRX, V, la2, 0, 0, 0, vste, 0, ES_8, IF_VEC)
+ E(0xe709, VSTEH, VRX, V, la2, 0, 0, 0, vste, 0, ES_16, IF_VEC)
+ E(0xe70b, VSTEF, VRX, V, la2, 0, 0, 0, vste, 0, ES_32, IF_VEC)
+ E(0xe70a, VSTEG, VRX, V, la2, 0, 0, 0, vste, 0, ES_64, IF_VEC)
+/* VECTOR STORE MULTIPLE */
+ F(0xe73e, VSTM, VRS_a, V, la2, 0, 0, 0, vstm, 0, IF_VEC)
+/* VECTOR STORE WITH LENGTH */
+ F(0xe73f, VSTL, VRS_b, V, la2, r3_32u, 0, 0, vstl, 0, IF_VEC)
+/* VECTOR UNPACK HIGH */
+ F(0xe7d7, VUPH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC)
+/* VECTOR UNPACK LOGICAL HIGH */
+ F(0xe7d5, VUPLH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC)
+/* VECTOR UNPACK LOW */
+ F(0xe7d6, VUPL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC)
+/* VECTOR UNPACK LOGICAL LOW */
+ F(0xe7d4, VUPLL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC)
+
+/* === Vector Integer Instructions === */
+
+/* VECTOR ADD */
+ F(0xe7f3, VA, VRR_c, V, 0, 0, 0, 0, va, 0, IF_VEC)
+/* VECTOR ADD COMPUTE CARRY */
+ F(0xe7f1, VACC, VRR_c, V, 0, 0, 0, 0, vacc, 0, IF_VEC)
+/* VECTOR ADD WITH CARRY */
+ F(0xe7bb, VAC, VRR_d, V, 0, 0, 0, 0, vac, 0, IF_VEC)
+/* VECTOR ADD WITH CARRY COMPUTE CARRY */
+ F(0xe7b9, VACCC, VRR_d, V, 0, 0, 0, 0, vaccc, 0, IF_VEC)
+/* VECTOR AND */
+ F(0xe768, VN, VRR_c, V, 0, 0, 0, 0, vn, 0, IF_VEC)
+/* VECTOR AND WITH COMPLEMENT */
+ F(0xe769, VNC, VRR_c, V, 0, 0, 0, 0, vnc, 0, IF_VEC)
+/* VECTOR AVERAGE */
+ F(0xe7f2, VAVG, VRR_c, V, 0, 0, 0, 0, vavg, 0, IF_VEC)
+/* VECTOR AVERAGE LOGICAL */
+ F(0xe7f0, VAVGL, VRR_c, V, 0, 0, 0, 0, vavgl, 0, IF_VEC)
+/* VECTOR CHECKSUM */
+ F(0xe766, VCKSM, VRR_c, V, 0, 0, 0, 0, vcksm, 0, IF_VEC)
+/* VECTOR ELEMENT COMPARE */
+ F(0xe7db, VEC, VRR_a, V, 0, 0, 0, 0, vec, cmps64, IF_VEC)
+/* VECTOR ELEMENT COMPARE LOGICAL */
+ F(0xe7d9, VECL, VRR_a, V, 0, 0, 0, 0, vec, cmpu64, IF_VEC)
+/* VECTOR COMPARE EQUAL */
+ E(0xe7f8, VCEQ, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_EQ, IF_VEC)
+/* VECTOR COMPARE HIGH */
+ E(0xe7fb, VCH, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GT, IF_VEC)
+/* VECTOR COMPARE HIGH LOGICAL */
+ E(0xe7f9, VCHL, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GTU, IF_VEC)
+/* VECTOR COUNT LEADING ZEROS */
+ F(0xe753, VCLZ, VRR_a, V, 0, 0, 0, 0, vclz, 0, IF_VEC)
+/* VECTOR COUNT TRAILING ZEROS */
+ F(0xe752, VCTZ, VRR_a, V, 0, 0, 0, 0, vctz, 0, IF_VEC)
+/* VECTOR EXCLUSIVE OR */
+ F(0xe76d, VX, VRR_c, V, 0, 0, 0, 0, vx, 0, IF_VEC)
+/* VECTOR GALOIS FIELD MULTIPLY SUM */
+ F(0xe7b4, VGFM, VRR_c, V, 0, 0, 0, 0, vgfm, 0, IF_VEC)
+/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
+ F(0xe7bc, VGFMA, VRR_d, V, 0, 0, 0, 0, vgfma, 0, IF_VEC)
+/* VECTOR LOAD COMPLEMENT */
+ F(0xe7de, VLC, VRR_a, V, 0, 0, 0, 0, vlc, 0, IF_VEC)
+/* VECTOR LOAD POSITIVE */
+ F(0xe7df, VLP, VRR_a, V, 0, 0, 0, 0, vlp, 0, IF_VEC)
+/* VECTOR MAXIMUM */
+ F(0xe7ff, VMX, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC)
+/* VECTOR MAXIMUM LOGICAL */
+ F(0xe7fd, VMXL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC)
+/* VECTOR MINIMUM */
+ F(0xe7fe, VMN, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC)
+/* VECTOR MINIMUM LOGICAL */
+ F(0xe7fc, VMNL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD LOW */
+ F(0xe7aa, VMAL, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD HIGH */
+ F(0xe7ab, VMAH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD LOGICAL HIGH */
+ F(0xe7a9, VMALH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD EVEN */
+ F(0xe7ae, VMAE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD LOGICAL EVEN */
+ F(0xe7ac, VMALE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD ODD */
+ F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY AND ADD LOGICAL ODD */
+ F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC)
+/* VECTOR MULTIPLY HIGH */
+ F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL HIGH */
+ F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOW */
+ F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY EVEN */
+ F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL EVEN */
+ F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY ODD */
+ F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY LOGICAL ODD */
+ F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC)
+/* VECTOR MULTIPLY SUM LOGICAL */
+ F(0xe7b8, VMSL, VRR_d, VE, 0, 0, 0, 0, vmsl, 0, IF_VEC)
+/* VECTOR NAND */
+ F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC)
+/* VECTOR NOR */
+ F(0xe76b, VNO, VRR_c, V, 0, 0, 0, 0, vno, 0, IF_VEC)
+/* VECTOR NOT EXCLUSIVE OR */
+ F(0xe76c, VNX, VRR_c, VE, 0, 0, 0, 0, vnx, 0, IF_VEC)
+/* VECTOR OR */
+ F(0xe76a, VO, VRR_c, V, 0, 0, 0, 0, vo, 0, IF_VEC)
+/* VECTOR OR WITH COMPLEMENT */
+ F(0xe76f, VOC, VRR_c, VE, 0, 0, 0, 0, voc, 0, IF_VEC)
+/* VECTOR POPULATION COUNT */
+ F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC)
+/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
+ F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC)
+ F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC)
+/* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */
+ F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC)
+/* VECTOR ELEMENT SHIFT LEFT */
+ F(0xe770, VESLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC)
+ F(0xe730, VESL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC)
+/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
+ F(0xe77a, VESRAV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC)
+ F(0xe73a, VESRA, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC)
+/* VECTOR ELEMENT SHIFT RIGHT LOGICAL */
+ F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC)
+ F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC)
+/* VECTOR SHIFT LEFT */
+ F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC)
+/* VECTOR SHIFT LEFT BY BYTE */
+ F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC)
+/* VECTOR SHIFT LEFT DOUBLE BY BYTE */
+ F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC)
+/* VECTOR SHIFT RIGHT ARITHMETIC */
+ F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC)
+/* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */
+ F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC)
+/* VECTOR SHIFT RIGHT LOGICAL */
+ F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC)
+/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
+ F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC)
+/* VECTOR SUBTRACT */
+ F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC)
+/* VECTOR SUBTRACT COMPUTE BORROW INDICATION */
+ F(0xe7f5, VSCBI, VRR_c, V, 0, 0, 0, 0, vscbi, 0, IF_VEC)
+/* VECTOR SUBTRACT WITH BORROW INDICATION */
+ F(0xe7bf, VSBI, VRR_d, V, 0, 0, 0, 0, vsbi, 0, IF_VEC)
+/* VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION */
+ F(0xe7bd, VSBCBI, VRR_d, V, 0, 0, 0, 0, vsbcbi, 0, IF_VEC)
+/* VECTOR SUM ACROSS DOUBLEWORD */
+ F(0xe765, VSUMG, VRR_c, V, 0, 0, 0, 0, vsumg, 0, IF_VEC)
+/* VECTOR SUM ACROSS QUADWORD */
+ F(0xe767, VSUMQ, VRR_c, V, 0, 0, 0, 0, vsumq, 0, IF_VEC)
+/* VECTOR SUM ACROSS WORD */
+ F(0xe764, VSUM, VRR_c, V, 0, 0, 0, 0, vsum, 0, IF_VEC)
+/* VECTOR TEST UNDER MASK */
+ F(0xe7d8, VTM, VRR_a, V, 0, 0, 0, 0, vtm, 0, IF_VEC)
+
+/* === Vector String Instructions === */
+
+/* VECTOR FIND ANY ELEMENT EQUAL */
+ F(0xe782, VFAE, VRR_b, V, 0, 0, 0, 0, vfae, 0, IF_VEC)
+/* VECTOR FIND ELEMENT EQUAL */
+ F(0xe780, VFEE, VRR_b, V, 0, 0, 0, 0, vfee, 0, IF_VEC)
+/* VECTOR FIND ELEMENT NOT EQUAL */
+ F(0xe781, VFENE, VRR_b, V, 0, 0, 0, 0, vfene, 0, IF_VEC)
+/* VECTOR ISOLATE STRING */
+ F(0xe75c, VISTR, VRR_a, V, 0, 0, 0, 0, vistr, 0, IF_VEC)
+/* VECTOR STRING RANGE COMPARE */
+ F(0xe78a, VSTRC, VRR_d, V, 0, 0, 0, 0, vstrc, 0, IF_VEC)
+
+/* === Vector Floating-Point Instructions */
+
+/* VECTOR FP ADD */
+ F(0xe7e3, VFA, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC)
+/* VECTOR FP COMPARE SCALAR */
+ F(0xe7cb, WFC, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC)
+/* VECTOR FP COMPARE AND SIGNAL SCALAR */
+ F(0xe7ca, WFK, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC)
+/* VECTOR FP COMPARE EQUAL */
+ F(0xe7e8, VFCE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC)
+/* VECTOR FP COMPARE HIGH */
+ F(0xe7eb, VFCH, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC)
+/* VECTOR FP COMPARE HIGH OR EQUAL */
+ F(0xe7ea, VFCHE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC)
+/* VECTOR FP CONVERT FROM FIXED 64-BIT */
+ F(0xe7c3, VCDG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC)
+/* VECTOR FP CONVERT FROM LOGICAL 64-BIT */
+ F(0xe7c1, VCDLG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC)
+/* VECTOR FP CONVERT TO FIXED 64-BIT */
+ F(0xe7c2, VCGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC)
+/* VECTOR FP CONVERT TO LOGICAL 64-BIT */
+ F(0xe7c0, VCLGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC)
+/* VECTOR FP DIVIDE */
+ F(0xe7e5, VFD, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC)
+/* VECTOR LOAD FP INTEGER */
+ F(0xe7c7, VFI, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC)
+/* VECTOR FP LOAD LENGTHENED */
+ F(0xe7c4, VFLL, VRR_a, V, 0, 0, 0, 0, vfll, 0, IF_VEC)
+/* VECTOR FP LOAD ROUNDED */
+ F(0xe7c5, VFLR, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC)
+/* VECTOR FP MAXIMUM */
+ F(0xe7ef, VFMAX, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC)
+/* VECTOR FP MINIMUM */
+ F(0xe7ee, VFMIN, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC)
+/* VECTOR FP MULTIPLY */
+ F(0xe7e7, VFM, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC)
+/* VECTOR FP MULTIPLY AND ADD */
+ F(0xe78f, VFMA, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC)
+/* VECTOR FP MULTIPLY AND SUBTRACT */
+ F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC)
+/* VECTOR FP NEGATIVE MULTIPLY AND ADD */
+ F(0xe79f, VFNMA, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC)
+/* VECTOR FP NEGATIVE MULTIPLY AND SUBTRACT */
+ F(0xe79e, VFNMS, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC)
+/* VECTOR FP PERFORM SIGN OPERATION */
+ F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC)
+/* VECTOR FP SQUARE ROOT */
+ F(0xe7ce, VFSQ, VRR_a, V, 0, 0, 0, 0, vfsq, 0, IF_VEC)
+/* VECTOR FP SUBTRACT */
+ F(0xe7e2, VFS, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC)
+/* VECTOR FP TEST DATA CLASS IMMEDIATE */
+ F(0xe74a, VFTCI, VRI_e, V, 0, 0, 0, 0, vftci, 0, IF_VEC)
+
+#ifndef CONFIG_USER_ONLY
+/* COMPARE AND SWAP AND PURGE */
+ E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV)
+ E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV)
+/* DIAGNOSE (KVM hypercall) */
+ F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV | IF_IO)
+/* INSERT STORAGE KEY EXTENDED */
+ F(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0, IF_PRIV)
+/* INVALIDATE DAT TABLE ENTRY */
+ F(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0, IF_PRIV)
+/* INVALIDATE PAGE TABLE ENTRY */
+ F(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0, IF_PRIV)
+/* LOAD CONTROL */
+ F(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0, IF_PRIV)
+ F(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0, IF_PRIV)
+/* LOAD PROGRAM PARAMETER */
+ F(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0, IF_PRIV)
+/* LOAD PSW */
+ F(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0, IF_PRIV)
+/* LOAD PSW EXTENDED */
+ F(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0, IF_PRIV)
+/* LOAD REAL ADDRESS */
+ F(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV)
+ F(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0, IF_PRIV)
+ F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV)
+/* LOAD USING REAL ADDRESS */
+ E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV)
+ E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEQ, IF_PRIV)
+/* MOVE TO PRIMARY */
+ F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV)
+/* MOVE TO SECONDARY */
+ F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV)
+/* PURGE TLB */
+ F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV)
+/* RESET REFERENCE BIT EXTENDED */
+ F(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0, IF_PRIV)
+/* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */
+ F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV | IF_IO)
+/* SET ADDRESS SPACE CONTROL FAST */
+ F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV)
+/* SET CLOCK */
+ F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV | IF_IO)
+/* SET CLOCK COMPARATOR */
+ F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV | IF_IO)
+/* SET CLOCK PROGRAMMABLE FIELD */
+ F(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0, IF_PRIV)
+/* SET CPU TIMER */
+ F(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0, IF_PRIV | IF_IO)
+/* SET PREFIX */
+ F(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0, IF_PRIV)
+/* SET PSW KEY FROM ADDRESS */
+ F(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0, IF_PRIV)
+/* SET STORAGE KEY EXTENDED */
+ F(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0, IF_PRIV)
+/* SET SYSTEM MASK */
+ F(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0, IF_PRIV)
+/* SIGNAL PROCESSOR */
+ F(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0, IF_PRIV | IF_IO)
+/* STORE CLOCK COMPARATOR */
+ F(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0, IF_PRIV)
+/* STORE CONTROL */
+ F(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0, IF_PRIV)
+ F(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0, IF_PRIV)
+/* STORE CPU ADDRESS */
+ F(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0, IF_PRIV)
+/* STORE CPU ID */
+ F(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0, IF_PRIV)
+/* STORE CPU TIMER */
+ F(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0, IF_PRIV | IF_IO)
+/* STORE FACILITY LIST */
+ F(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0, IF_PRIV)
+/* STORE PREFIX */
+ F(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0, IF_PRIV)
+/* STORE SYSTEM INFORMATION */
+ F(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0, IF_PRIV)
+/* STORE THEN AND SYSTEM MASK */
+ F(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV)
+/* STORE THEN OR SYSTEM MASK */
+ F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV)
+/* STORE USING REAL ADDRESS */
+ E(0xb246, STURA, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUL, IF_PRIV)
+ E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEQ, IF_PRIV)
+/* TEST BLOCK */
+ F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV)
+/* TEST PROTECTION */
+ C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0)
+
+/* CCW I/O Instructions */
+ F(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0, IF_PRIV | IF_IO)
+ F(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0, IF_PRIV | IF_IO)
+ F(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0, IF_PRIV | IF_IO)
+ F(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0, IF_PRIV | IF_IO)
+ F(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0, IF_PRIV | IF_IO)
+ F(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0, IF_PRIV | IF_IO)
+ F(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0, IF_PRIV | IF_IO)
+ F(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0, IF_PRIV | IF_IO)
+ F(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0, IF_PRIV | IF_IO)
+ F(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0, IF_PRIV | IF_IO)
+ F(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0, IF_PRIV | IF_IO)
+ F(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0, IF_PRIV | IF_IO)
+ F(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0, IF_PRIV | IF_IO)
+ F(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0, IF_PRIV | IF_IO)
+ F(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0, IF_PRIV | IF_IO)
+ /* ??? Not listed in PoO ninth edition, but there's a linux driver that
+ uses it: "A CHSC subchannel is usually present on LPAR only." */
+ F(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0, IF_PRIV | IF_IO)
+
+/* zPCI Instructions */
+ /* None of these instructions are documented in the PoP, so this is all
+ based upon target/s390x/kvm.c and Linux code and likely incomplete */
+ F(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0, IF_PRIV | IF_IO)
+ F(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0, IF_PRIV | IF_IO)
+ F(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0, IF_PRIV | IF_IO)
+ F(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0, IF_PRIV | IF_IO)
+ F(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0, IF_PRIV | IF_IO)
+ F(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0, IF_PRIV | IF_IO)
+ F(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0, IF_PRIV | IF_IO)
+ F(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0, IF_PRIV | IF_IO)
+
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/s390x/tcg/insn-format.def b/target/s390x/tcg/insn-format.def
new file mode 100644
index 000000000..6253edbd1
--- /dev/null
+++ b/target/s390x/tcg/insn-format.def
@@ -0,0 +1,81 @@
+/* Description of s390 insn formats. */
+/* NAME F1, F2... */
+F0(E)
+F1(I, I(1, 8, 8))
+F2(RI_a, R(1, 8), I(2,16,16))
+F2(RI_b, R(1, 8), I(2,16,16))
+F2(RI_c, M(1, 8), I(2,16,16))
+F3(RIE_a, R(1, 8), I(2,16,16), M(3,32))
+F4(RIE_b, R(1, 8), R(2,12), M(3,32), I(4,16,16))
+F4(RIE_c, R(1, 8), I(2,32, 8), M(3,12), I(4,16,16))
+F3(RIE_d, R(1, 8), I(2,16,16), R(3,12))
+F3(RIE_e, R(1, 8), I(2,16,16), R(3,12))
+F5(RIE_f, R(1, 8), R(2,12), I(3,16,8), I(4,24,8), I(5,32,8))
+F3(RIE_g, R(1, 8), I(2,16,16), M(3,12))
+F2(RIL_a, R(1, 8), I(2,16,32))
+F2(RIL_b, R(1, 8), I(2,16,32))
+F2(RIL_c, M(1, 8), I(2,16,32))
+F4(RIS, R(1, 8), I(2,32, 8), M(3,12), BD(4,16,20))
+/* ??? The PoO does not call out subtypes _a and _b for RR, as it does
+ for e.g. RX. Our checking requires this for e.g. BCR. */
+F2(RR_a, R(1, 8), R(2,12))
+F2(RR_b, M(1, 8), R(2,12))
+F2(RRE, R(1,24), R(2,28))
+F3(RRD, R(1,16), R(2,28), R(3,24))
+F4(RRF_a, R(1,24), R(2,28), R(3,16), M(4,20))
+F4(RRF_b, R(1,24), R(2,28), R(3,16), M(4,20))
+F4(RRF_c, R(1,24), R(2,28), M(3,16), M(4,20))
+F4(RRF_d, R(1,24), R(2,28), M(3,16), M(4,20))
+F4(RRF_e, R(1,24), R(2,28), M(3,16), M(4,20))
+F4(RRS, R(1, 8), R(2,12), M(3,32), BD(4,16,20))
+F3(RS_a, R(1, 8), BD(2,16,20), R(3,12))
+F3(RS_b, R(1, 8), BD(2,16,20), M(3,12))
+F3(RSI, R(1, 8), I(2,16,16), R(3,12))
+F2(RSL, L(1, 8, 4), BD(1,16,20))
+F3(RSY_a, R(1, 8), BDL(2), R(3,12))
+F3(RSY_b, R(1, 8), BDL(2), M(3,12))
+F2(RX_a, R(1, 8), BXD(2))
+F2(RX_b, M(1, 8), BXD(2))
+F3(RXE, R(1, 8), BXD(2), M(3,32))
+F3(RXF, R(1,32), BXD(2), R(3, 8))
+F2(RXY_a, R(1, 8), BXDL(2))
+F2(RXY_b, M(1, 8), BXDL(2))
+F1(S, BD(2,16,20))
+F2(SI, BD(1,16,20), I(2,8,8))
+F2(SIL, BD(1,16,20), I(2,32,16))
+F2(SIY, BDL(1), I(2, 8, 8))
+F3(SS_a, L(1, 8, 8), BD(1,16,20), BD(2,32,36))
+F4(SS_b, L(1, 8, 4), BD(1,16,20), L(2,12,4), BD(2,32,36))
+F4(SS_c, L(1, 8, 4), BD(1,16,20), BD(2,32,36), I(3,12, 4))
+/* ??? Odd man out. The L1 field here is really a register, but the
+ easy way to compress the fields has R1 and B1 overlap. */
+F4(SS_d, L(1, 8, 4), BD(1,16,20), BD(2,32,36), R(3,12))
+F4(SS_e, R(1, 8), BD(2,16,20), R(3,12), BD(4,32,36))
+F3(SS_f, BD(1,16,20), L(2,8,8), BD(2,32,36))
+F2(SSE, BD(1,16,20), BD(2,32,36))
+F3(SSF, BD(1,16,20), BD(2,32,36), R(3,8))
+F3(VRI_a, V(1,8), I(2,16,16), M(3,32))
+F4(VRI_b, V(1,8), I(2,16,8), I(3,24,8), M(4,32))
+F4(VRI_c, V(1,8), V(3,12), I(2,16,16), M(4,32))
+F5(VRI_d, V(1,8), V(2,12), V(3,16), I(4,24,8), M(5,32))
+F5(VRI_e, V(1,8), V(2,12), I(3,16,12), M(5,28), M(4,32))
+F5(VRI_f, V(1,8), V(2,12), V(3,16), M(5,24), I(4,28,8))
+F5(VRI_g, V(1,8), V(2,12), I(4,16,8), M(5,24), I(3,28,8))
+F3(VRI_h, V(1,8), I(2,16,16), I(3,32,4))
+F4(VRI_i, V(1,8), R(2,12), M(4,24), I(3,28,8))
+F5(VRR_a, V(1,8), V(2,12), M(5,24), M(4,28), M(3,32))
+F5(VRR_b, V(1,8), V(2,12), V(3,16), M(5,24), M(4,32))
+F6(VRR_c, V(1,8), V(2,12), V(3,16), M(6,24), M(5,28), M(4,32))
+F6(VRR_d, V(1,8), V(2,12), V(3,16), M(5,20), M(6,24), V(4,32))
+F6(VRR_e, V(1,8), V(2,12), V(3,16), M(6,20), M(5,28), V(4,32))
+F3(VRR_f, V(1,8), R(2,12), R(3,16))
+F1(VRR_g, V(1,12))
+F3(VRR_h, V(1,12), V(2,16), M(3,24))
+F3(VRR_i, R(1,8), V(2,12), M(3,24))
+F4(VRS_a, V(1,8), V(3,12), BD(2,16,20), M(4,32))
+F4(VRS_b, V(1,8), R(3,12), BD(2,16,20), M(4,32))
+F4(VRS_c, R(1,8), V(3,12), BD(2,16,20), M(4,32))
+F3(VRS_d, R(3,12), BD(2,16,20), V(1,32))
+F4(VRV, V(1,8), V(2,12), BD(2,16,20), M(3,32))
+F3(VRX, V(1,8), BXD(2), M(3,32))
+F3(VSI, I(3,8,8), BD(2,16,20), V(1,32))
diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c
new file mode 100644
index 000000000..954542388
--- /dev/null
+++ b/target/s390x/tcg/int_helper.c
@@ -0,0 +1,148 @@
+/*
+ * S/390 integer helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "tcg_s390x.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* 64/32 -> 32 signed division */
+int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
+{
+ int32_t ret, b = b64;
+ int64_t q;
+
+ if (b == 0) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ ret = q = a / b;
+ env->retxl = a % b;
+
+ /* Catch non-representable quotient. */
+ if (ret != q) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ return ret;
+}
+
+/* 64/32 -> 32 unsigned division */
+uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
+{
+ uint32_t ret, b = b64;
+ uint64_t q;
+
+ if (b == 0) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ ret = q = a / b;
+ env->retxl = a % b;
+
+ /* Catch non-representable quotient. */
+ if (ret != q) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ return ret;
+}
+
+/* 64/64 -> 64 signed division */
+int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b)
+{
+ /* Catch divide by zero, and non-representable quotient (MIN / -1). */
+ if (b == 0 || (b == -1 && a == (1ll << 63))) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+ env->retxl = a % b;
+ return a / b;
+}
+
+/* 128 -> 64/64 unsigned division */
+uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t b)
+{
+ uint64_t ret;
+ /* Signal divide by zero. */
+ if (b == 0) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+ if (ah == 0) {
+ /* 64 -> 64/64 case */
+ env->retxl = al % b;
+ ret = al / b;
+ } else {
+ /* ??? Move i386 idivq helper to host-utils. */
+#ifdef CONFIG_INT128
+ __uint128_t a = ((__uint128_t)ah << 64) | al;
+ __uint128_t q = a / b;
+ env->retxl = a % b;
+ ret = q;
+ if (ret != q) {
+ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+#else
+ /* 32-bit hosts would need special wrapper functionality - just abort if
+ we encounter such a case; it's very unlikely anyways. */
+ cpu_abort(env_cpu(env), "128 -> 64/64 division not implemented\n");
+#endif
+ }
+ return ret;
+}
+
+uint64_t HELPER(cvd)(int32_t reg)
+{
+ /* positive 0 */
+ uint64_t dec = 0x0c;
+ int64_t bin = reg;
+ int shift;
+
+ if (bin < 0) {
+ bin = -bin;
+ dec = 0x0d;
+ }
+
+ for (shift = 4; (shift < 64) && bin; shift += 4) {
+ dec |= (bin % 10) << shift;
+ bin /= 10;
+ }
+
+ return dec;
+}
+
+uint64_t HELPER(popcnt)(uint64_t val)
+{
+ /* Note that we don't fold past bytes. */
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
+ return val;
+}
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
new file mode 100644
index 000000000..362a30d99
--- /dev/null
+++ b/target/s390x/tcg/mem_helper.c
@@ -0,0 +1,3017 @@
+/*
+ * S/390 memory access helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "tcg_s390x.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/int128.h"
+#include "qemu/atomic128.h"
+#include "trace.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/s390x/storage-keys.h"
+#include "hw/boards.h"
+#endif
+
+/*****************************************************************************/
+/* Softmmu support */
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
+{
+ uint16_t pkm = env->cregs[3] >> 16;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
+ return pkm & (0x80 >> psw_key);
+ }
+ return true;
+}
+
+static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest,
+ uint64_t src, uint32_t len)
+{
+ if (!len || src == dest) {
+ return false;
+ }
+ /* Take care of wrapping at the end of address space. */
+ if (unlikely(wrap_address(env, src + len - 1) < src)) {
+ return dest > src || dest <= wrap_address(env, src + len - 1);
+ }
+ return dest > src && dest <= src + len - 1;
+}
+
+/* Trigger a SPECIFICATION exception if an address or a length is not
+ naturally aligned. */
+static inline void check_alignment(CPUS390XState *env, uint64_t v,
+ int wordsize, uintptr_t ra)
+{
+ if (v % wordsize) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+}
+
+/* Load a value from memory according to its size. */
+static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
+ int wordsize, uintptr_t ra)
+{
+ switch (wordsize) {
+ case 1:
+ return cpu_ldub_data_ra(env, addr, ra);
+ case 2:
+ return cpu_lduw_data_ra(env, addr, ra);
+ default:
+ abort();
+ }
+}
+
+/* Store a to memory according to its size. */
+static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
+ uint64_t value, int wordsize,
+ uintptr_t ra)
+{
+ switch (wordsize) {
+ case 1:
+ cpu_stb_data_ra(env, addr, value, ra);
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr, value, ra);
+ break;
+ default:
+ abort();
+ }
+}
+
+/* An access covers at most 4096 bytes and therefore at most two pages. */
+typedef struct S390Access {
+ target_ulong vaddr1;
+ target_ulong vaddr2;
+ char *haddr1;
+ char *haddr2;
+ uint16_t size1;
+ uint16_t size2;
+ /*
+ * If we can't access the host page directly, we'll have to do I/O access
+ * via ld/st helpers. These are internal details, so we store the
+ * mmu idx to do the access here instead of passing it around in the
+ * helpers. Maybe, one day we can get rid of ld/st access - once we can
+ * handle TLB_NOTDIRTY differently. We don't expect these special accesses
+ * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
+ * pages, we might trigger a new MMU translation - very unlikely that
+ * the mapping changes in between and we would trigger a fault.
+ */
+ int mmu_idx;
+} S390Access;
+
+/*
+ * With nonfault=1, return the PGM_ exception that would have been injected
+ * into the guest; return 0 if no exception was detected.
+ *
+ * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
+ * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
+ */
+static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t ra)
+{
+#if defined(CONFIG_USER_ONLY)
+ return probe_access_flags(env, addr, access_type, mmu_idx,
+ nonfault, phost, ra);
+#else
+ int flags;
+
+ /*
+ * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
+ * to detect if there was an exception during tlb_fill().
+ */
+ env->tlb_fill_exc = 0;
+ flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
+ ra);
+ if (env->tlb_fill_exc) {
+ return env->tlb_fill_exc;
+ }
+
+ if (unlikely(flags & TLB_WATCHPOINT)) {
+ /* S390 does not presently use transaction attributes. */
+ cpu_check_watchpoint(env_cpu(env), addr, size,
+ MEMTXATTRS_UNSPECIFIED,
+ (access_type == MMU_DATA_STORE
+ ? BP_MEM_WRITE : BP_MEM_READ), ra);
+ }
+ return 0;
+#endif
+}
+
+static int access_prepare_nf(S390Access *access, CPUS390XState *env,
+ bool nonfault, vaddr vaddr1, int size,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t ra)
+{
+ void *haddr1, *haddr2 = NULL;
+ int size1, size2, exc;
+ vaddr vaddr2 = 0;
+
+ assert(size > 0 && size <= 4096);
+
+ size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)),
+ size2 = size - size1;
+
+ exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault,
+ &haddr1, ra);
+ if (exc) {
+ return exc;
+ }
+ if (unlikely(size2)) {
+ /* The access crosses page boundaries. */
+ vaddr2 = wrap_address(env, vaddr1 + size1);
+ exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx,
+ nonfault, &haddr2, ra);
+ if (exc) {
+ return exc;
+ }
+ }
+
+ *access = (S390Access) {
+ .vaddr1 = vaddr1,
+ .vaddr2 = vaddr2,
+ .haddr1 = haddr1,
+ .haddr2 = haddr2,
+ .size1 = size1,
+ .size2 = size2,
+ .mmu_idx = mmu_idx
+ };
+ return 0;
+}
+
+static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t ra)
+{
+ S390Access ret;
+ int exc = access_prepare_nf(&ret, env, false, vaddr, size,
+ access_type, mmu_idx, ra);
+ assert(!exc);
+ return ret;
+}
+
+/* Helper to handle memset on a single page. */
+static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
+ uint8_t byte, uint16_t size, int mmu_idx,
+ uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ g_assert(haddr);
+ memset(haddr, byte, size);
+#else
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ int i;
+
+ if (likely(haddr)) {
+ memset(haddr, byte, size);
+ } else {
+ /*
+ * Do a single access and test if we can then get access to the
+ * page. This is especially relevant to speed up TLB_NOTDIRTY.
+ */
+ g_assert(size > 0);
+ cpu_stb_mmu(env, vaddr, byte, oi, ra);
+ haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
+ if (likely(haddr)) {
+ memset(haddr + 1, byte, size - 1);
+ } else {
+ for (i = 1; i < size; i++) {
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
+ }
+ }
+ }
+#endif
+}
+
+static void access_memset(CPUS390XState *env, S390Access *desta,
+ uint8_t byte, uintptr_t ra)
+{
+
+ do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1,
+ desta->mmu_idx, ra);
+ if (likely(!desta->size2)) {
+ return;
+ }
+ do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2,
+ desta->mmu_idx, ra);
+}
+
+static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
+ int offset, int mmu_idx, uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ return ldub_p(*haddr + offset);
+#else
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ uint8_t byte;
+
+ if (likely(*haddr)) {
+ return ldub_p(*haddr + offset);
+ }
+ /*
+ * Do a single access and test if we can then get access to the
+ * page. This is especially relevant to speed up TLB_NOTDIRTY.
+ */
+ byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
+ *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
+ return byte;
+#endif
+}
+
+static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
+ int offset, uintptr_t ra)
+{
+ if (offset < access->size1) {
+ return do_access_get_byte(env, access->vaddr1, &access->haddr1,
+ offset, access->mmu_idx, ra);
+ }
+ return do_access_get_byte(env, access->vaddr2, &access->haddr2,
+ offset - access->size1, access->mmu_idx, ra);
+}
+
+static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
+ int offset, uint8_t byte, int mmu_idx,
+ uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ stb_p(*haddr + offset, byte);
+#else
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+
+ if (likely(*haddr)) {
+ stb_p(*haddr + offset, byte);
+ return;
+ }
+ /*
+ * Do a single access and test if we can then get access to the
+ * page. This is especially relevant to speed up TLB_NOTDIRTY.
+ */
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
+ *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
+#endif
+}
+
+static void access_set_byte(CPUS390XState *env, S390Access *access,
+ int offset, uint8_t byte, uintptr_t ra)
+{
+ if (offset < access->size1) {
+ do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
+ access->mmu_idx, ra);
+ } else {
+ do_access_set_byte(env, access->vaddr2, &access->haddr2,
+ offset - access->size1, byte, access->mmu_idx, ra);
+ }
+}
+
+/*
+ * Move data with the same semantics as memmove() in case ranges don't overlap
+ * or src > dest. Undefined behavior on destructive overlaps.
+ */
+static void access_memmove(CPUS390XState *env, S390Access *desta,
+ S390Access *srca, uintptr_t ra)
+{
+ int diff;
+
+ g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
+
+ /* Fallback to slow access in case we don't have access to all host pages */
+ if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
+ !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
+ int i;
+
+ for (i = 0; i < desta->size1 + desta->size2; i++) {
+ uint8_t byte = access_get_byte(env, srca, i, ra);
+
+ access_set_byte(env, desta, i, byte, ra);
+ }
+ return;
+ }
+
+ if (srca->size1 == desta->size1) {
+ memmove(desta->haddr1, srca->haddr1, srca->size1);
+ if (unlikely(srca->size2)) {
+ memmove(desta->haddr2, srca->haddr2, srca->size2);
+ }
+ } else if (srca->size1 < desta->size1) {
+ diff = desta->size1 - srca->size1;
+ memmove(desta->haddr1, srca->haddr1, srca->size1);
+ memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
+ if (likely(desta->size2)) {
+ memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
+ }
+ } else {
+ diff = srca->size1 - desta->size1;
+ memmove(desta->haddr1, srca->haddr1, desta->size1);
+ memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
+ if (likely(srca->size2)) {
+ memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
+ }
+ }
+}
+
+static int mmu_idx_from_as(uint8_t as)
+{
+ switch (as) {
+ case AS_PRIMARY:
+ return MMU_PRIMARY_IDX;
+ case AS_SECONDARY:
+ return MMU_SECONDARY_IDX;
+ case AS_HOME:
+ return MMU_HOME_IDX;
+ default:
+ /* FIXME AS_ACCREG */
+ g_assert_not_reached();
+ }
+}
+
+/* and on array */
+static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
+ uint32_t i;
+ uint8_t c = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+
+ /* NC always processes one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca1, i, ra) &
+ access_get_byte(env, &srca2, i, ra);
+
+ c |= x;
+ access_set_byte(env, &desta, i, x, ra);
+ }
+ return c != 0;
+}
+
+uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ return do_helper_nc(env, l, dest, src, GETPC());
+}
+
+/* xor on array */
+static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
+ uint32_t i;
+ uint8_t c = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+
+ /* XC always processes one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+
+ /* xor with itself is the same as memset(0) */
+ if (src == dest) {
+ access_memset(env, &desta, 0, ra);
+ return 0;
+ }
+
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
+ access_get_byte(env, &srca2, i, ra);
+
+ c |= x;
+ access_set_byte(env, &desta, i, x, ra);
+ }
+ return c != 0;
+}
+
+uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ return do_helper_xc(env, l, dest, src, GETPC());
+}
+
+/* or on array */
+static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
+ uint32_t i;
+ uint8_t c = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+
+ /* OC always processes one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca1, i, ra) |
+ access_get_byte(env, &srca2, i, ra);
+
+ c |= x;
+ access_set_byte(env, &desta, i, x, ra);
+ }
+ return c != 0;
+}
+
+uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ return do_helper_oc(env, l, dest, src, GETPC());
+}
+
+/* memmove */
+static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src, uintptr_t ra)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca, desta;
+ uint32_t i;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+
+ /* MVC always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+
+ /*
+ * "When the operands overlap, the result is obtained as if the operands
+ * were processed one byte at a time". Only non-destructive overlaps
+ * behave like memmove().
+ */
+ if (dest == src + 1) {
+ access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra);
+ } else if (!is_destructive_overlap(env, dest, src, l)) {
+ access_memmove(env, &desta, &srca, ra);
+ } else {
+ for (i = 0; i < l; i++) {
+ uint8_t byte = access_get_byte(env, &srca, i, ra);
+
+ access_set_byte(env, &desta, i, byte, ra);
+ }
+ }
+
+ return env->cc_op;
+}
+
+void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ do_helper_mvc(env, l, dest, src, GETPC());
+}
+
+/* move inverse */
+void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca, desta;
+ uintptr_t ra = GETPC();
+ int i;
+
+ /* MVCIN always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ src = wrap_address(env, src - l + 1);
+ srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
+
+ access_set_byte(env, &desta, i, x, ra);
+ }
+}
+
+/* move numerics */
+void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
+ uintptr_t ra = GETPC();
+ int i;
+
+ /* MVN always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
+ (access_get_byte(env, &srca2, i, ra) & 0xf0);
+
+ access_set_byte(env, &desta, i, x, ra);
+ }
+}
+
+/* move with offset */
+void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ /* MVO always processes one more byte than specified - maximum is 16 */
+ const int len_dest = (l >> 4) + 1;
+ const int len_src = (l & 0xf) + 1;
+ uintptr_t ra = GETPC();
+ uint8_t byte_dest, byte_src;
+ S390Access srca, desta;
+ int i, j;
+
+ srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
+
+ /* Handle rightmost byte */
+ byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
+ byte_src = access_get_byte(env, &srca, len_src - 1, ra);
+ byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
+ access_set_byte(env, &desta, len_dest - 1, byte_dest, ra);
+
+ /* Process remaining bytes from right to left */
+ for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) {
+ byte_dest = byte_src >> 4;
+ if (j >= 0) {
+ byte_src = access_get_byte(env, &srca, j, ra);
+ } else {
+ byte_src = 0;
+ }
+ byte_dest |= byte_src << 4;
+ access_set_byte(env, &desta, i, byte_dest, ra);
+ }
+}
+
+/* move zones */
+void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
+ uintptr_t ra = GETPC();
+ int i;
+
+ /* MVZ always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
+ (access_get_byte(env, &srca2, i, ra) & 0x0f);
+
+ access_set_byte(env, &desta, i, x, ra);
+ }
+}
+
+/* compare unsigned byte arrays */
+static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
+ uint64_t s2, uintptr_t ra)
+{
+ uint32_t i;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
+ __func__, l, s1, s2);
+
+ for (i = 0; i <= l; i++) {
+ uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
+ uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
+ HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
+ if (x < y) {
+ cc = 1;
+ break;
+ } else if (x > y) {
+ cc = 2;
+ break;
+ }
+ }
+
+ HELPER_LOG("\n");
+ return cc;
+}
+
+uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+{
+ return do_helper_clc(env, l, s1, s2, GETPC());
+}
+
+/* compare logical under mask */
+uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
+ uint64_t addr)
+{
+ uintptr_t ra = GETPC();
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
+ mask, addr);
+
+ while (mask) {
+ if (mask & 8) {
+ uint8_t d = cpu_ldub_data_ra(env, addr, ra);
+ uint8_t r = extract32(r1, 24, 8);
+ HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
+ addr);
+ if (r < d) {
+ cc = 1;
+ break;
+ } else if (r > d) {
+ cc = 2;
+ break;
+ }
+ addr++;
+ }
+ mask = (mask << 1) & 0xf;
+ r1 <<= 8;
+ }
+
+ HELPER_LOG("\n");
+ return cc;
+}
+
+static inline uint64_t get_address(CPUS390XState *env, int reg)
+{
+ return wrap_address(env, env->regs[reg]);
+}
+
+/*
+ * Store the address to the given register, zeroing out unused leftmost
+ * bits in bit positions 32-63 (24-bit and 31-bit mode only).
+ */
+static inline void set_address_zero(CPUS390XState *env, int reg,
+ uint64_t address)
+{
+ if (env->psw.mask & PSW_MASK_64) {
+ env->regs[reg] = address;
+ } else {
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ address &= 0x00ffffff;
+ } else {
+ address &= 0x7fffffff;
+ }
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
+ }
+}
+
+static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
+{
+ if (env->psw.mask & PSW_MASK_64) {
+ /* 64-Bit mode */
+ env->regs[reg] = address;
+ } else {
+ if (!(env->psw.mask & PSW_MASK_32)) {
+ /* 24-Bit mode. According to the PoO it is implementation
+ dependent if bits 32-39 remain unchanged or are set to
+ zeros. Choose the former so that the function can also be
+ used for TRT. */
+ env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
+ } else {
+ /* 31-Bit mode. According to the PoO it is implementation
+ dependent if bit 32 remains unchanged or is set to zero.
+ Choose the latter so that the function can also be used for
+ TRT. */
+ address &= 0x7fffffff;
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
+ }
+ }
+}
+
+static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length)
+{
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ return (uint32_t)length;
+ }
+ return length;
+}
+
+static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length)
+{
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ /* 24-Bit and 31-Bit mode */
+ length &= 0x7fffffff;
+ }
+ return length;
+}
+
+static inline uint64_t get_length(CPUS390XState *env, int reg)
+{
+ return wrap_length31(env, env->regs[reg]);
+}
+
+static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
+{
+ if (env->psw.mask & PSW_MASK_64) {
+ /* 64-Bit mode */
+ env->regs[reg] = length;
+ } else {
+ /* 24-Bit and 31-Bit mode */
+ env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
+ }
+}
+
+/* search string (c is byte to search, r2 is string, r1 end of string) */
+void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint64_t end, str;
+ uint32_t len;
+ uint8_t v, c = env->regs[0];
+
+ /* Bits 32-55 must contain all 0. */
+ if (env->regs[0] & 0xffffff00u) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ str = get_address(env, r2);
+ end = get_address(env, r1);
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ for (len = 0; len < 0x2000; ++len) {
+ if (str + len == end) {
+ /* Character not found. R1 & R2 are unmodified. */
+ env->cc_op = 2;
+ return;
+ }
+ v = cpu_ldub_data_ra(env, str + len, ra);
+ if (v == c) {
+ /* Character found. Set R1 to the location; R2 is unmodified. */
+ env->cc_op = 1;
+ set_address(env, r1, str + len);
+ return;
+ }
+ }
+
+ /* CPU-determined bytes processed. Advance R2 to next byte to process. */
+ env->cc_op = 3;
+ set_address(env, r2, str + len);
+}
+
+void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint32_t len;
+ uint16_t v, c = env->regs[0];
+ uint64_t end, str, adj_end;
+
+ /* Bits 32-47 of R0 must be zero. */
+ if (env->regs[0] & 0xffff0000u) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ str = get_address(env, r2);
+ end = get_address(env, r1);
+
+ /* If the LSB of the two addresses differ, use one extra byte. */
+ adj_end = end + ((str ^ end) & 1);
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ for (len = 0; len < 0x2000; len += 2) {
+ if (str + len == adj_end) {
+ /* End of input found. */
+ env->cc_op = 2;
+ return;
+ }
+ v = cpu_lduw_data_ra(env, str + len, ra);
+ if (v == c) {
+ /* Character found. Set R1 to the location; R2 is unmodified. */
+ env->cc_op = 1;
+ set_address(env, r1, str + len);
+ return;
+ }
+ }
+
+ /* CPU-determined bytes processed. Advance R2 to next byte to process. */
+ env->cc_op = 3;
+ set_address(env, r2, str + len);
+}
+
+/* unsigned string compare (c is string terminator) */
+uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
+{
+ uintptr_t ra = GETPC();
+ uint32_t len;
+
+ c = c & 0xff;
+ s1 = wrap_address(env, s1);
+ s2 = wrap_address(env, s2);
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ for (len = 0; len < 0x2000; ++len) {
+ uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
+ uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
+ if (v1 == v2) {
+ if (v1 == c) {
+ /* Equal. CC=0, and don't advance the registers. */
+ env->cc_op = 0;
+ env->retxl = s2;
+ return s1;
+ }
+ } else {
+ /* Unequal. CC={1,2}, and advance the registers. Note that
+ the terminator need not be zero, but the string that contains
+ the terminator is by definition "low". */
+ env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
+ env->retxl = s2 + len;
+ return s1 + len;
+ }
+ }
+
+ /* CPU-determined bytes equal; advance the registers. */
+ env->cc_op = 3;
+ env->retxl = s2 + len;
+ return s1 + len;
+}
+
+/* move page */
+uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2)
+{
+ const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK;
+ const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK;
+ const int mmu_idx = cpu_mmu_index(env, false);
+ const bool f = extract64(r0, 11, 1);
+ const bool s = extract64(r0, 10, 1);
+ const bool cco = extract64(r0, 8, 1);
+ uintptr_t ra = GETPC();
+ S390Access srca, desta;
+ int exc;
+
+ if ((f && s) || extract64(r0, 12, 4)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+
+ /*
+ * We always manually handle exceptions such that we can properly store
+ * r1/r2 to the lowcore on page-translation exceptions.
+ *
+ * TODO: Access key handling
+ */
+ exc = access_prepare_nf(&srca, env, true, src, TARGET_PAGE_SIZE,
+ MMU_DATA_LOAD, mmu_idx, ra);
+ if (exc) {
+ if (cco) {
+ return 2;
+ }
+ goto inject_exc;
+ }
+ exc = access_prepare_nf(&desta, env, true, dst, TARGET_PAGE_SIZE,
+ MMU_DATA_STORE, mmu_idx, ra);
+ if (exc) {
+ if (cco && exc != PGM_PROTECTION) {
+ return 1;
+ }
+ goto inject_exc;
+ }
+ access_memmove(env, &desta, &srca, ra);
+ return 0; /* data moved */
+inject_exc:
+#if !defined(CONFIG_USER_ONLY)
+ if (exc != PGM_ADDRESSING) {
+ stq_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code),
+ env->tlb_fill_tec);
+ }
+ if (exc == PGM_PAGE_TRANS) {
+ stb_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, op_access_id),
+ r1 << 4 | r2);
+ }
+#endif
+ tcg_s390_program_interrupt(env, exc, ra);
+}
+
+/* string copy */
+uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ const uint64_t d = get_address(env, r1);
+ const uint64_t s = get_address(env, r2);
+ const uint8_t c = env->regs[0];
+ const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK));
+ S390Access srca, desta;
+ uintptr_t ra = GETPC();
+ int i;
+
+ if (env->regs[0] & 0xffffff00ull) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /*
+ * Our access should not exceed single pages, as we must not report access
+ * exceptions exceeding the actually copied range (which we don't know at
+ * this point). We might over-indicate watchpoints within the pages
+ * (if we ever care, we have to limit processing to a single byte).
+ */
+ srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < len; i++) {
+ const uint8_t v = access_get_byte(env, &srca, i, ra);
+
+ access_set_byte(env, &desta, i, v, ra);
+ if (v == c) {
+ set_address_zero(env, r1, d + i);
+ return 1;
+ }
+ }
+ set_address_zero(env, r1, d + len);
+ set_address_zero(env, r2, s + len);
+ return 3;
+}
+
+/* load access registers r1 to r3 from memory at a2 */
+void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ if (a2 & 0x3) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* store access registers r1 to r3 in memory at a2 */
+void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ if (a2 & 0x3) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ for (i = r1;; i = (i + 1) % 16) {
+ cpu_stl_data_ra(env, a2, env->aregs[i], ra);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* move long helper */
+static inline uint32_t do_mvcl(CPUS390XState *env,
+ uint64_t *dest, uint64_t *destlen,
+ uint64_t *src, uint64_t *srclen,
+ uint16_t pad, int wordsize, uintptr_t ra)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK));
+ S390Access srca, desta;
+ int i, cc;
+
+ if (*destlen == *srclen) {
+ cc = 0;
+ } else if (*destlen < *srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (!*destlen) {
+ return cc;
+ }
+
+ /*
+ * Only perform one type of type of operation (move/pad) at a time.
+ * Stay within single pages.
+ */
+ if (*srclen) {
+ /* Copy the src array */
+ len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
+ *destlen -= len;
+ *srclen -= len;
+ srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_memmove(env, &desta, &srca, ra);
+ *src = wrap_address(env, *src + len);
+ *dest = wrap_address(env, *dest + len);
+ } else if (wordsize == 1) {
+ /* Pad the remaining area */
+ *destlen -= len;
+ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_memset(env, &desta, pad, ra);
+ *dest = wrap_address(env, *dest + len);
+ } else {
+ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+
+ /* The remaining length selects the padding byte. */
+ for (i = 0; i < len; (*destlen)--, i++) {
+ if (*destlen & 1) {
+ access_set_byte(env, &desta, i, pad, ra);
+ } else {
+ access_set_byte(env, &desta, i, pad >> 8, ra);
+ }
+ }
+ *dest = wrap_address(env, *dest + len);
+ }
+
+ return *destlen ? 3 : cc;
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+ uint64_t src = get_address(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ CPUState *cs = env_cpu(env);
+ S390Access srca, desta;
+ uint32_t cc, cur_len;
+
+ if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) {
+ cc = 3;
+ } else if (srclen == destlen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ /* We might have to zero-out some bits even if there was no action. */
+ if (unlikely(!destlen || cc == 3)) {
+ set_address_zero(env, r2, src);
+ set_address_zero(env, r1, dest);
+ return cc;
+ } else if (!srclen) {
+ set_address_zero(env, r2, src);
+ }
+
+ /*
+ * Only perform one type of type of operation (move/pad) in one step.
+ * Stay within single pages.
+ */
+ while (destlen) {
+ cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK));
+ if (!srclen) {
+ desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
+ ra);
+ access_memset(env, &desta, pad, ra);
+ } else {
+ cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
+
+ srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
+ ra);
+ desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
+ ra);
+ access_memmove(env, &desta, &srca, ra);
+ src = wrap_address(env, src + cur_len);
+ srclen -= cur_len;
+ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
+ set_address_zero(env, r2, src);
+ }
+ dest = wrap_address(env, dest + cur_len);
+ destlen -= cur_len;
+ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
+ set_address_zero(env, r1, dest);
+
+ /*
+ * MVCL is interruptible. Return to the main loop if requested after
+ * writing back all state to registers. If no interrupt will get
+ * injected, we'll end up back in this handler and continue processing
+ * the remaining parts.
+ */
+ if (destlen && unlikely(cpu_loop_exit_requested(cs))) {
+ cpu_loop_exit_restore(cs, ra);
+ }
+ }
+ return cc;
+}
+
+/* move long extended */
+uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t destlen = get_length(env, r1 + 1);
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = get_length(env, r3 + 1);
+ uint64_t src = get_address(env, r3);
+ uint8_t pad = a2;
+ uint32_t cc;
+
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
+
+ set_length(env, r1 + 1, destlen);
+ set_length(env, r3 + 1, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r3, src);
+
+ return cc;
+}
+
+/* move long unicode */
+uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t destlen = get_length(env, r1 + 1);
+ uint64_t dest = get_address(env, r1);
+ uint64_t srclen = get_length(env, r3 + 1);
+ uint64_t src = get_address(env, r3);
+ uint16_t pad = a2;
+ uint32_t cc;
+
+ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
+
+ set_length(env, r1 + 1, destlen);
+ set_length(env, r3 + 1, srclen);
+ set_address(env, r1, dest);
+ set_address(env, r3, src);
+
+ return cc;
+}
+
+/* compare logical long helper */
+static inline uint32_t do_clcl(CPUS390XState *env,
+ uint64_t *src1, uint64_t *src1len,
+ uint64_t *src3, uint64_t *src3len,
+ uint16_t pad, uint64_t limit,
+ int wordsize, uintptr_t ra)
+{
+ uint64_t len = MAX(*src1len, *src3len);
+ uint32_t cc = 0;
+
+ check_alignment(env, *src1len | *src3len, wordsize, ra);
+
+ if (!len) {
+ return cc;
+ }
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. */
+ if (len > limit) {
+ len = limit;
+ cc = 3;
+ }
+
+ for (; len; len -= wordsize) {
+ uint16_t v1 = pad;
+ uint16_t v3 = pad;
+
+ if (*src1len) {
+ v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
+ }
+ if (*src3len) {
+ v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
+ }
+
+ if (v1 != v3) {
+ cc = (v1 < v3) ? 1 : 2;
+ break;
+ }
+
+ if (*src1len) {
+ *src1 += wordsize;
+ *src1len -= wordsize;
+ }
+ if (*src3len) {
+ *src3 += wordsize;
+ *src3len -= wordsize;
+ }
+ }
+
+ return cc;
+}
+
+
+/* compare logical long */
+uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
+ uint64_t src3 = get_address(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ uint32_t cc;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
+
+ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
+ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r2, src3);
+
+ return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = get_length(env, r1 + 1);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = get_length(env, r3 + 1);
+ uint64_t src3 = get_address(env, r3);
+ uint8_t pad = a2;
+ uint32_t cc;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
+
+ set_length(env, r1 + 1, src1len);
+ set_length(env, r3 + 1, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r3, src3);
+
+ return cc;
+}
+
+/* compare logical long unicode memcompare insn with padding */
+uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t src1len = get_length(env, r1 + 1);
+ uint64_t src1 = get_address(env, r1);
+ uint64_t src3len = get_length(env, r3 + 1);
+ uint64_t src3 = get_address(env, r3);
+ uint16_t pad = a2;
+ uint32_t cc = 0;
+
+ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
+
+ set_length(env, r1 + 1, src1len);
+ set_length(env, r3 + 1, src3len);
+ set_address(env, r1, src1);
+ set_address(env, r3, src3);
+
+ return cc;
+}
+
+/* checksum */
+uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
+ uint64_t src, uint64_t src_len)
+{
+ uintptr_t ra = GETPC();
+ uint64_t max_len, len;
+ uint64_t cksm = (uint32_t)r1;
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ max_len = (src_len > 0x2000 ? 0x2000 : src_len);
+
+ /* Process full words as available. */
+ for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
+ cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
+ }
+
+ switch (max_len - len) {
+ case 1:
+ cksm += cpu_ldub_data_ra(env, src, ra) << 24;
+ len += 1;
+ break;
+ case 2:
+ cksm += cpu_lduw_data_ra(env, src, ra) << 16;
+ len += 2;
+ break;
+ case 3:
+ cksm += cpu_lduw_data_ra(env, src, ra) << 16;
+ cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
+ len += 3;
+ break;
+ }
+
+ /* Fold the carry from the checksum. Note that we can see carry-out
+ during folding more than once (but probably not more than twice). */
+ while (cksm > 0xffffffffull) {
+ cksm = (uint32_t)cksm + (cksm >> 32);
+ }
+
+ /* Indicate whether or not we've processed everything. */
+ env->cc_op = (len == src_len ? 0 : 3);
+
+ /* Return both cksm and processed length. */
+ env->retxl = cksm;
+ return len;
+}
+
+void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = cpu_ldub_data_ra(env, src, ra);
+ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
+ src--;
+ len_src--;
+
+ /* now pack every value */
+ while (len_dest > 0) {
+ b = 0;
+
+ if (len_src >= 0) {
+ b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
+ src--;
+ len_src--;
+ }
+ if (len_src >= 0) {
+ b |= cpu_ldub_data_ra(env, src, ra) << 4;
+ src--;
+ len_src--;
+ }
+
+ len_dest--;
+ dest--;
+ cpu_stb_data_ra(env, dest, b, ra);
+ }
+}
+
+static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen, int ssize, uintptr_t ra)
+{
+ int i;
+ /* The destination operand is always 16 bytes long. */
+ const int destlen = 16;
+
+ /* The operands are processed from right to left. */
+ src += srclen - 1;
+ dest += destlen - 1;
+
+ for (i = 0; i < destlen; i++) {
+ uint8_t b = 0;
+
+ /* Start with a positive sign */
+ if (i == 0) {
+ b = 0xc;
+ } else if (srclen > ssize) {
+ b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
+ src -= ssize;
+ srclen -= ssize;
+ }
+
+ if (srclen > ssize) {
+ b |= cpu_ldub_data_ra(env, src, ra) << 4;
+ src -= ssize;
+ srclen -= ssize;
+ }
+
+ cpu_stb_data_ra(env, dest, b, ra);
+ dest--;
+ }
+}
+
+
+void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen)
+{
+ do_pkau(env, dest, src, srclen, 1, GETPC());
+}
+
+void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t srclen)
+{
+ do_pkau(env, dest, src, srclen, 2, GETPC());
+}
+
+void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
+ uint64_t src)
+{
+ uintptr_t ra = GETPC();
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+ int second_nibble = 0;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = cpu_ldub_data_ra(env, src, ra);
+ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
+ src--;
+ len_src--;
+
+ /* now pad every nibble with 0xf0 */
+
+ while (len_dest > 0) {
+ uint8_t cur_byte = 0;
+
+ if (len_src > 0) {
+ cur_byte = cpu_ldub_data_ra(env, src, ra);
+ }
+
+ len_dest--;
+ dest--;
+
+ /* only advance one nibble at a time */
+ if (second_nibble) {
+ cur_byte >>= 4;
+ len_src--;
+ src--;
+ }
+ second_nibble = !second_nibble;
+
+ /* digit */
+ cur_byte = (cur_byte & 0xf);
+ /* zone bits */
+ cur_byte |= 0xf0;
+
+ cpu_stb_data_ra(env, dest, cur_byte, ra);
+ }
+}
+
+static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
+ uint32_t destlen, int dsize, uint64_t src,
+ uintptr_t ra)
+{
+ int i;
+ uint32_t cc;
+ uint8_t b;
+ /* The source operand is always 16 bytes long. */
+ const int srclen = 16;
+
+ /* The operands are processed from right to left. */
+ src += srclen - 1;
+ dest += destlen - dsize;
+
+ /* Check for the sign. */
+ b = cpu_ldub_data_ra(env, src, ra);
+ src--;
+ switch (b & 0xf) {
+ case 0xa:
+ case 0xc:
+ case 0xe ... 0xf:
+ cc = 0; /* plus */
+ break;
+ case 0xb:
+ case 0xd:
+ cc = 1; /* minus */
+ break;
+ default:
+ case 0x0 ... 0x9:
+ cc = 3; /* invalid */
+ break;
+ }
+
+ /* Now pad every nibble with 0x30, advancing one nibble at a time. */
+ for (i = 0; i < destlen; i += dsize) {
+ if (i == (31 * dsize)) {
+ /* If length is 32/64 bytes, the leftmost byte is 0. */
+ b = 0;
+ } else if (i % (2 * dsize)) {
+ b = cpu_ldub_data_ra(env, src, ra);
+ src--;
+ } else {
+ b >>= 4;
+ }
+ cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
+ dest -= dsize;
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
+ uint64_t src)
+{
+ return do_unpkau(env, dest, destlen, 1, src, GETPC());
+}
+
+uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
+ uint64_t src)
+{
+ return do_unpkau(env, dest, destlen, 2, src, GETPC());
+}
+
+uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
+{
+ uintptr_t ra = GETPC();
+ uint32_t cc = 0;
+ int i;
+
+ for (i = 0; i < destlen; i++) {
+ uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
+ /* digit */
+ cc |= (b & 0xf0) > 0x90 ? 2 : 0;
+
+ if (i == (destlen - 1)) {
+ /* sign */
+ cc |= (b & 0xf) < 0xa ? 1 : 0;
+ } else {
+ /* digit */
+ cc |= (b & 0xf) > 0x9 ? 2 : 0;
+ }
+ }
+
+ return cc;
+}
+
+static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans, uintptr_t ra)
+{
+ uint32_t i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
+ uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
+ cpu_stb_data_ra(env, array + i, new_byte, ra);
+ }
+
+ return env->cc_op;
+}
+
+void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ do_helper_tr(env, len, array, trans, GETPC());
+}
+
+uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
+ uint64_t len, uint64_t trans)
+{
+ uintptr_t ra = GETPC();
+ uint8_t end = env->regs[0] & 0xff;
+ uint64_t l = len;
+ uint64_t i;
+ uint32_t cc = 0;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ array &= 0x7fffffff;
+ l = (uint32_t)l;
+ }
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ if (l > 0x2000) {
+ l = 0x2000;
+ cc = 3;
+ }
+
+ for (i = 0; i < l; i++) {
+ uint8_t byte, new_byte;
+
+ byte = cpu_ldub_data_ra(env, array + i, ra);
+
+ if (byte == end) {
+ cc = 1;
+ break;
+ }
+
+ new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
+ cpu_stb_data_ra(env, array + i, new_byte, ra);
+ }
+
+ env->cc_op = cc;
+ env->retxl = len - i;
+ return array + i;
+}
+
+static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
+ uint64_t array, uint64_t trans,
+ int inc, uintptr_t ra)
+{
+ int i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
+ uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
+
+ if (sbyte != 0) {
+ set_address(env, 1, array + i * inc);
+ env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
+ return (i == len) ? 2 : 1;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len,
+ uint64_t array, uint64_t trans,
+ uintptr_t ra)
+{
+ return do_helper_trt(env, len, array, trans, 1, ra);
+}
+
+uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ return do_helper_trt(env, len, array, trans, 1, GETPC());
+}
+
+static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len,
+ uint64_t array, uint64_t trans,
+ uintptr_t ra)
+{
+ return do_helper_trt(env, len, array, trans, -1, ra);
+}
+
+uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ return do_helper_trt(env, len, array, trans, -1, GETPC());
+}
+
+/* Translate one/two to one/two */
+uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
+ uint32_t tst, uint32_t sizes)
+{
+ uintptr_t ra = GETPC();
+ int dsize = (sizes & 1) ? 1 : 2;
+ int ssize = (sizes & 2) ? 1 : 2;
+ uint64_t tbl = get_address(env, 1);
+ uint64_t dst = get_address(env, r1);
+ uint64_t len = get_length(env, r1 + 1);
+ uint64_t src = get_address(env, r2);
+ uint32_t cc = 3;
+ int i;
+
+ /* The lower address bits of TBL are ignored. For TROO, TROT, it's
+ the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
+ the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
+ if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
+ tbl &= -4096;
+ } else {
+ tbl &= -8;
+ }
+
+ check_alignment(env, len, ssize, ra);
+
+ /* Lest we fail to service interrupts in a timely manner, */
+ /* limit the amount of work we're willing to do. */
+ for (i = 0; i < 0x2000; i++) {
+ uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
+ uint64_t tble = tbl + (sval * dsize);
+ uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
+ if (dval == tst) {
+ cc = 1;
+ break;
+ }
+ cpu_stsize_data_ra(env, dst, dval, dsize, ra);
+
+ len -= ssize;
+ src += ssize;
+ dst += dsize;
+
+ if (len == 0) {
+ cc = 0;
+ break;
+ }
+ }
+
+ set_address(env, r1, dst);
+ set_length(env, r1 + 1, len);
+ set_address(env, r2, src);
+
+ return cc;
+}
+
+void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
+ uint32_t r1, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
+ Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
+ Int128 oldv;
+ uint64_t oldh, oldl;
+ bool fail;
+
+ check_alignment(env, addr, 16, ra);
+
+ oldh = cpu_ldq_data_ra(env, addr + 0, ra);
+ oldl = cpu_ldq_data_ra(env, addr + 8, ra);
+
+ oldv = int128_make128(oldl, oldh);
+ fail = !int128_eq(oldv, cmpv);
+ if (fail) {
+ newv = oldv;
+ }
+
+ cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
+ cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
+
+ env->cc_op = fail;
+ env->regs[r1] = int128_gethi(oldv);
+ env->regs[r1 + 1] = int128_getlo(oldv);
+}
+
+void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
+ uint32_t r1, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
+ Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
+ int mem_idx;
+ MemOpIdx oi;
+ Int128 oldv;
+ bool fail;
+
+ assert(HAVE_CMPXCHG128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ fail = !int128_eq(oldv, cmpv);
+
+ env->cc_op = fail;
+ env->regs[r1] = int128_gethi(oldv);
+ env->regs[r1 + 1] = int128_getlo(oldv);
+}
+
+static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
+ uint64_t a2, bool parallel)
+{
+ uint32_t mem_idx = cpu_mmu_index(env, false);
+ uintptr_t ra = GETPC();
+ uint32_t fc = extract32(env->regs[0], 0, 8);
+ uint32_t sc = extract32(env->regs[0], 8, 8);
+ uint64_t pl = get_address(env, 1) & -16;
+ uint64_t svh, svl;
+ uint32_t cc;
+
+ /* Sanity check the function code and storage characteristic. */
+ if (fc > 1 || sc > 3) {
+ if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
+ goto spec_exception;
+ }
+ if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
+ goto spec_exception;
+ }
+ }
+
+ /* Sanity check the alignments. */
+ if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) {
+ goto spec_exception;
+ }
+
+ /* Sanity check writability of the store address. */
+ probe_write(env, a2, 1 << sc, mem_idx, ra);
+
+ /*
+ * Note that the compare-and-swap is atomic, and the store is atomic,
+ * but the complete operation is not. Therefore we do not need to
+ * assert serial context in order to implement this. That said,
+ * restart early if we can't support either operation that is supposed
+ * to be atomic.
+ */
+ if (parallel) {
+ uint32_t max = 2;
+#ifdef CONFIG_ATOMIC64
+ max = 3;
+#endif
+ if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
+ (HAVE_ATOMIC128 ? 0 : sc > max)) {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ }
+
+ /* All loads happen before all stores. For simplicity, load the entire
+ store value area from the parameter list. */
+ svh = cpu_ldq_data_ra(env, pl + 16, ra);
+ svl = cpu_ldq_data_ra(env, pl + 24, ra);
+
+ switch (fc) {
+ case 0:
+ {
+ uint32_t nv = cpu_ldl_data_ra(env, pl, ra);
+ uint32_t cv = env->regs[r3];
+ uint32_t ov;
+
+ if (parallel) {
+#ifdef CONFIG_USER_ONLY
+ uint32_t *haddr = g2h(env_cpu(env), a1);
+ ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
+#else
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
+ ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
+#endif
+ } else {
+ ov = cpu_ldl_data_ra(env, a1, ra);
+ cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra);
+ }
+ cc = (ov != cv);
+ env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
+ }
+ break;
+
+ case 1:
+ {
+ uint64_t nv = cpu_ldq_data_ra(env, pl, ra);
+ uint64_t cv = env->regs[r3];
+ uint64_t ov;
+
+ if (parallel) {
+#ifdef CONFIG_ATOMIC64
+ MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
+ ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
+#else
+ /* Note that we asserted !parallel above. */
+ g_assert_not_reached();
+#endif
+ } else {
+ ov = cpu_ldq_data_ra(env, a1, ra);
+ cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra);
+ }
+ cc = (ov != cv);
+ env->regs[r3] = ov;
+ }
+ break;
+
+ case 2:
+ {
+ uint64_t nvh = cpu_ldq_data_ra(env, pl, ra);
+ uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra);
+ Int128 nv = int128_make128(nvl, nvh);
+ Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
+ Int128 ov;
+
+ if (!parallel) {
+ uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
+ uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
+
+ ov = int128_make128(ol, oh);
+ cc = !int128_eq(ov, cv);
+ if (cc) {
+ nv = ov;
+ }
+
+ cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
+ cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
+ } else if (HAVE_CMPXCHG128) {
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
+ ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
+ cc = !int128_eq(ov, cv);
+ } else {
+ /* Note that we asserted !parallel above. */
+ g_assert_not_reached();
+ }
+
+ env->regs[r3 + 0] = int128_gethi(ov);
+ env->regs[r3 + 1] = int128_getlo(ov);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Store only if the comparison succeeded. Note that above we use a pair
+ of 64-bit big-endian loads, so for sc < 3 we must extract the value
+ from the most-significant bits of svh. */
+ if (cc == 0) {
+ switch (sc) {
+ case 0:
+ cpu_stb_data_ra(env, a2, svh >> 56, ra);
+ break;
+ case 1:
+ cpu_stw_data_ra(env, a2, svh >> 48, ra);
+ break;
+ case 2:
+ cpu_stl_data_ra(env, a2, svh >> 32, ra);
+ break;
+ case 3:
+ cpu_stq_data_ra(env, a2, svh, ra);
+ break;
+ case 4:
+ if (!parallel) {
+ cpu_stq_data_ra(env, a2 + 0, svh, ra);
+ cpu_stq_data_ra(env, a2 + 8, svl, ra);
+ } else if (HAVE_ATOMIC128) {
+ MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ Int128 sv = int128_make128(svl, svh);
+ cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
+ } else {
+ /* Note that we asserted !parallel above. */
+ g_assert_not_reached();
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ return cc;
+
+ spec_exception:
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+}
+
+uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
+{
+ return do_csst(env, r3, a1, a2, false);
+}
+
+uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
+ uint64_t a2)
+{
+ return do_csst(env, r3, a1, a2, true);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ bool PERchanged = false;
+ uint64_t src = a2;
+ uint32_t i;
+
+ if (src & 0x7) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ for (i = r1;; i = (i + 1) % 16) {
+ uint64_t val = cpu_ldq_data_ra(env, src, ra);
+ if (env->cregs[i] != val && i >= 9 && i <= 11) {
+ PERchanged = true;
+ }
+ env->cregs[i] = val;
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
+ i, src, val);
+ src += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ if (PERchanged && env->psw.mask & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(env_cpu(env));
+ }
+
+ tlb_flush(env_cpu(env));
+}
+
+void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ bool PERchanged = false;
+ uint64_t src = a2;
+ uint32_t i;
+
+ if (src & 0x3) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ for (i = r1;; i = (i + 1) % 16) {
+ uint32_t val = cpu_ldl_data_ra(env, src, ra);
+ if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
+ PERchanged = true;
+ }
+ env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
+ src += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ if (PERchanged && env->psw.mask & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(env_cpu(env));
+ }
+
+ tlb_flush(env_cpu(env));
+}
+
+void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t dest = a2;
+ uint32_t i;
+
+ if (dest & 0x7) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ for (i = r1;; i = (i + 1) % 16) {
+ cpu_stq_data_ra(env, dest, env->cregs[i], ra);
+ dest += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ uintptr_t ra = GETPC();
+ uint64_t dest = a2;
+ uint32_t i;
+
+ if (dest & 0x3) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ for (i = r1;; i = (i + 1) % 16) {
+ cpu_stl_data_ra(env, dest, env->cregs[i], ra);
+ dest += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
+{
+ uintptr_t ra = GETPC();
+ int i;
+
+ real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
+
+ for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
+ cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
+ }
+
+ return 0;
+}
+
+uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
+{
+ S390CPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+
+ /*
+ * TODO: we currently don't handle all access protection types
+ * (including access-list and key-controlled) as well as AR mode.
+ */
+ if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
+ /* Fetching permitted; storing permitted */
+ return 0;
+ }
+
+ if (env->int_pgm_code == PGM_PROTECTION) {
+ /* retry if reading is possible */
+ cs->exception_index = -1;
+ if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
+ /* Fetching permitted; storing not permitted */
+ return 1;
+ }
+ }
+
+ switch (env->int_pgm_code) {
+ case PGM_PROTECTION:
+ /* Fetching not permitted; storing not permitted */
+ cs->exception_index = -1;
+ return 2;
+ case PGM_ADDRESSING:
+ case PGM_TRANS_SPEC:
+ /* exceptions forwarded to the guest */
+ s390_cpu_virt_mem_handle_exc(cpu, GETPC());
+ return 0;
+ }
+
+ /* Translation not available */
+ cs->exception_index = -1;
+ return 3;
+}
+
+/* insert storage key extended */
+uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
+{
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ uint64_t addr = wrap_address(env, r2);
+ uint8_t key;
+ int rc;
+
+ addr = mmu_real2abs(env, addr);
+ if (!mmu_absolute_addr_valid(addr, false)) {
+ tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
+ }
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) {
+ tlb_flush_all_cpus_synced(env_cpu(env));
+ }
+ }
+
+ rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (rc) {
+ trace_get_skeys_nonzero(rc);
+ return 0;
+ }
+ return key;
+}
+
+/* set storage key extended */
+void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
+{
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ uint64_t addr = wrap_address(env, r2);
+ uint8_t key;
+ int rc;
+
+ addr = mmu_real2abs(env, addr);
+ if (!mmu_absolute_addr_valid(addr, false)) {
+ tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
+ }
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) {
+ tlb_flush_all_cpus_synced(env_cpu(env));
+ }
+ }
+
+ key = r1 & 0xfe;
+ rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (rc) {
+ trace_set_skeys_nonzero(rc);
+ }
+ /*
+ * As we can only flush by virtual address and not all the entries
+ * that point to a physical address we have to flush the whole TLB.
+ */
+ tlb_flush_all_cpus_synced(env_cpu(env));
+}
+
+/* reset reference bit extended */
+uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
+{
+ uint64_t addr = wrap_address(env, r2);
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ uint8_t re, key;
+ int rc;
+
+ addr = mmu_real2abs(env, addr);
+ if (!mmu_absolute_addr_valid(addr, false)) {
+ tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC());
+ }
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ if (skeyclass->enable_skeys && !skeyclass->enable_skeys(ss)) {
+ tlb_flush_all_cpus_synced(env_cpu(env));
+ }
+ }
+
+ rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (rc) {
+ trace_get_skeys_nonzero(rc);
+ return 0;
+ }
+
+ re = key & (SK_R | SK_C);
+ key &= ~SK_R;
+
+ rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (rc) {
+ trace_set_skeys_nonzero(rc);
+ return 0;
+ }
+ /*
+ * As we can only flush by virtual address and not all the entries
+ * that point to a physical address we have to flush the whole TLB.
+ */
+ tlb_flush_all_cpus_synced(env_cpu(env));
+
+ /*
+ * cc
+ *
+ * 0 Reference bit zero; change bit zero
+ * 1 Reference bit zero; change bit one
+ * 2 Reference bit one; change bit zero
+ * 3 Reference bit one; change bit one
+ */
+
+ return re >> 1;
+}
+
+uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
+{
+ const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
+ S390Access srca, desta;
+ uintptr_t ra = GETPC();
+ int cc = 0;
+
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __func__, l, a1, a2);
+
+ if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
+ psw_as == AS_HOME || psw_as == AS_ACCREG) {
+ s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
+ }
+
+ l = wrap_length32(env, l);
+ if (l > 256) {
+ /* max 256 */
+ l = 256;
+ cc = 3;
+ } else if (!l) {
+ return cc;
+ }
+
+ /* TODO: Access key handling */
+ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
+ desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
+ access_memmove(env, &desta, &srca, ra);
+ return cc;
+}
+
+uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
+{
+ const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
+ S390Access srca, desta;
+ uintptr_t ra = GETPC();
+ int cc = 0;
+
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __func__, l, a1, a2);
+
+ if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
+ psw_as == AS_HOME || psw_as == AS_ACCREG) {
+ s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
+ }
+
+ l = wrap_length32(env, l);
+ if (l > 256) {
+ /* max 256 */
+ l = 256;
+ cc = 3;
+ } else if (!l) {
+ return cc;
+ }
+
+ /* TODO: Access key handling */
+ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
+ desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
+ access_memmove(env, &desta, &srca, ra);
+ return cc;
+}
+
+void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
+{
+ CPUState *cs = env_cpu(env);
+ const uintptr_t ra = GETPC();
+ uint64_t table, entry, raddr;
+ uint16_t entries, i, index = 0;
+
+ if (r2 & 0xff000) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ if (!(r2 & 0x800)) {
+ /* invalidation-and-clearing operation */
+ table = r1 & ASCE_ORIGIN;
+ entries = (r2 & 0x7ff) + 1;
+
+ switch (r1 & ASCE_TYPE_MASK) {
+ case ASCE_TYPE_REGION1:
+ index = (r2 >> 53) & 0x7ff;
+ break;
+ case ASCE_TYPE_REGION2:
+ index = (r2 >> 42) & 0x7ff;
+ break;
+ case ASCE_TYPE_REGION3:
+ index = (r2 >> 31) & 0x7ff;
+ break;
+ case ASCE_TYPE_SEGMENT:
+ index = (r2 >> 20) & 0x7ff;
+ break;
+ }
+ for (i = 0; i < entries; i++) {
+ /* addresses are not wrapped in 24/31bit mode but table index is */
+ raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
+ entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
+ if (!(entry & REGION_ENTRY_I)) {
+ /* we are allowed to not store if already invalid */
+ entry |= REGION_ENTRY_I;
+ cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
+ }
+ }
+ }
+
+ /* We simply flush the complete tlb, therefore we can ignore r3. */
+ if (m4 & 1) {
+ tlb_flush(cs);
+ } else {
+ tlb_flush_all_cpus_synced(cs);
+ }
+}
+
+/* invalidate pte */
+void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
+ uint32_t m4)
+{
+ CPUState *cs = env_cpu(env);
+ const uintptr_t ra = GETPC();
+ uint64_t page = vaddr & TARGET_PAGE_MASK;
+ uint64_t pte_addr, pte;
+
+ /* Compute the page table entry address */
+ pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
+ pte_addr += VADDR_PAGE_TX(vaddr) * 8;
+
+ /* Mark the page table entry as invalid */
+ pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
+ pte |= PAGE_ENTRY_I;
+ cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
+
+ /* XXX we exploit the fact that Linux passes the exact virtual
+ address here - it's not obliged to! */
+ if (m4 & 1) {
+ if (vaddr & ~VADDR_PAGE_TX_MASK) {
+ tlb_flush_page(cs, page);
+ /* XXX 31-bit hack */
+ tlb_flush_page(cs, page ^ 0x80000000);
+ } else {
+ /* looks like we don't have a valid virtual address */
+ tlb_flush(cs);
+ }
+ } else {
+ if (vaddr & ~VADDR_PAGE_TX_MASK) {
+ tlb_flush_page_all_cpus_synced(cs, page);
+ /* XXX 31-bit hack */
+ tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
+ } else {
+ /* looks like we don't have a valid virtual address */
+ tlb_flush_all_cpus_synced(cs);
+ }
+ }
+}
+
+/* flush local tlb */
+void HELPER(ptlb)(CPUS390XState *env)
+{
+ tlb_flush(env_cpu(env));
+}
+
+/* flush global tlb */
+void HELPER(purge)(CPUS390XState *env)
+{
+ tlb_flush_all_cpus_synced(env_cpu(env));
+}
+
+/* load real address */
+uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
+{
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ uint64_t ret, tec;
+ int flags, exc, cc;
+
+ /* XXX incomplete - has more corner cases */
+ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC());
+ }
+
+ exc = mmu_translate(env, addr, MMU_S390_LRA, asc, &ret, &flags, &tec);
+ if (exc) {
+ cc = 3;
+ ret = exc | 0x80000000;
+ } else {
+ cc = 0;
+ ret |= addr & ~TARGET_PAGE_MASK;
+ }
+
+ env->cc_op = cc;
+ return ret;
+}
+#endif
+
+/* load pair from quadword */
+uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t hi, lo;
+
+ check_alignment(env, addr, 16, ra);
+ hi = cpu_ldq_data_ra(env, addr + 0, ra);
+ lo = cpu_ldq_data_ra(env, addr + 8, ra);
+
+ env->retxl = lo;
+ return hi;
+}
+
+uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t hi, lo;
+ int mem_idx;
+ MemOpIdx oi;
+ Int128 v;
+
+ assert(HAVE_ATOMIC128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra);
+ hi = int128_gethi(v);
+ lo = int128_getlo(v);
+
+ env->retxl = lo;
+ return hi;
+}
+
+/* store pair to quadword */
+void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high)
+{
+ uintptr_t ra = GETPC();
+
+ check_alignment(env, addr, 16, ra);
+ cpu_stq_data_ra(env, addr + 0, high, ra);
+ cpu_stq_data_ra(env, addr + 8, low, ra);
+}
+
+void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
+ uint64_t low, uint64_t high)
+{
+ uintptr_t ra = GETPC();
+ int mem_idx;
+ MemOpIdx oi;
+ Int128 v;
+
+ assert(HAVE_ATOMIC128);
+
+ mem_idx = cpu_mmu_index(env, false);
+ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ v = int128_make128(low, high);
+ cpu_atomic_sto_be_mmu(env, addr, v, oi, ra);
+}
+
+/* Execute instruction. This instruction executes an insn modified with
+ the contents of r1. It does not change the executed instruction in memory;
+ it does not change the program counter.
+
+ Perform this by recording the modified instruction in env->ex_value.
+ This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
+*/
+void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
+{
+ uint64_t insn = cpu_lduw_code(env, addr);
+ uint8_t opc = insn >> 8;
+
+ /* Or in the contents of R1[56:63]. */
+ insn |= r1 & 0xff;
+
+ /* Load the rest of the instruction. */
+ insn <<= 48;
+ switch (get_ilen(opc)) {
+ case 2:
+ break;
+ case 4:
+ insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
+ break;
+ case 6:
+ insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* The very most common cases can be sped up by avoiding a new TB. */
+ if ((opc & 0xf0) == 0xd0) {
+ typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
+ uint64_t, uintptr_t);
+ static const dx_helper dx[16] = {
+ [0x0] = do_helper_trt_bkwd,
+ [0x2] = do_helper_mvc,
+ [0x4] = do_helper_nc,
+ [0x5] = do_helper_clc,
+ [0x6] = do_helper_oc,
+ [0x7] = do_helper_xc,
+ [0xc] = do_helper_tr,
+ [0xd] = do_helper_trt_fwd,
+ };
+ dx_helper helper = dx[opc & 0xf];
+
+ if (helper) {
+ uint32_t l = extract64(insn, 48, 8);
+ uint32_t b1 = extract64(insn, 44, 4);
+ uint32_t d1 = extract64(insn, 32, 12);
+ uint32_t b2 = extract64(insn, 28, 4);
+ uint32_t d2 = extract64(insn, 16, 12);
+ uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1);
+ uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2);
+
+ env->cc_op = helper(env, l, a1, a2, 0);
+ env->psw.addr += ilen;
+ return;
+ }
+ } else if (opc == 0x0a) {
+ env->int_svc_code = extract64(insn, 48, 8);
+ env->int_svc_ilen = ilen;
+ helper_exception(env, EXCP_SVC);
+ g_assert_not_reached();
+ }
+
+ /* Record the insn we want to execute as well as the ilen to use
+ during the execution of the target insn. This will also ensure
+ that ex_value is non-zero, which flags that we are in a state
+ that requires such execution. */
+ env->ex_value = insn | ilen;
+}
+
+uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint64_t len)
+{
+ const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
+ const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
+ const uint64_t r0 = env->regs[0];
+ const uintptr_t ra = GETPC();
+ uint8_t dest_key, dest_as, dest_k, dest_a;
+ uint8_t src_key, src_as, src_k, src_a;
+ uint64_t val;
+ int cc = 0;
+
+ HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
+ __func__, dest, src, len);
+
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
+ }
+
+ /* OAC (operand access control) for the first operand -> dest */
+ val = (r0 & 0xffff0000ULL) >> 16;
+ dest_key = (val >> 12) & 0xf;
+ dest_as = (val >> 6) & 0x3;
+ dest_k = (val >> 1) & 0x1;
+ dest_a = val & 0x1;
+
+ /* OAC (operand access control) for the second operand -> src */
+ val = (r0 & 0x0000ffffULL);
+ src_key = (val >> 12) & 0xf;
+ src_as = (val >> 6) & 0x3;
+ src_k = (val >> 1) & 0x1;
+ src_a = val & 0x1;
+
+ if (!dest_k) {
+ dest_key = psw_key;
+ }
+ if (!src_k) {
+ src_key = psw_key;
+ }
+ if (!dest_a) {
+ dest_as = psw_as;
+ }
+ if (!src_a) {
+ src_as = psw_as;
+ }
+
+ if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
+ }
+ if (!(env->cregs[0] & CR0_SECONDARY) &&
+ (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
+ }
+ if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
+ tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra);
+ }
+
+ len = wrap_length32(env, len);
+ if (len > 4096) {
+ cc = 3;
+ len = 4096;
+ }
+
+ /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
+ if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
+ (env->psw.mask & PSW_MASK_PSTATE)) {
+ qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
+ __func__);
+ tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra);
+ }
+
+ /* FIXME: Access using correct keys and AR-mode */
+ if (len) {
+ S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
+ mmu_idx_from_as(src_as), ra);
+ S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
+ mmu_idx_from_as(dest_as), ra);
+
+ access_memmove(env, &desta, &srca, ra);
+ }
+
+ return cc;
+}
+
+/* Decode a Unicode character. A return value < 0 indicates success, storing
+ the UTF-32 result into OCHAR and the input length into OLEN. A return
+ value >= 0 indicates failure, and the CC value to be returned. */
+typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
+ uint64_t ilen, bool enh_check, uintptr_t ra,
+ uint32_t *ochar, uint32_t *olen);
+
+/* Encode a Unicode character. A return value < 0 indicates success, storing
+ the bytes into ADDR and the output length into OLEN. A return value >= 0
+ indicates failure, and the CC value to be returned. */
+typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
+ uint64_t ilen, uintptr_t ra, uint32_t c,
+ uint32_t *olen);
+
+static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
+ bool enh_check, uintptr_t ra,
+ uint32_t *ochar, uint32_t *olen)
+{
+ uint8_t s0, s1, s2, s3;
+ uint32_t c, l;
+
+ if (ilen < 1) {
+ return 0;
+ }
+ s0 = cpu_ldub_data_ra(env, addr, ra);
+ if (s0 <= 0x7f) {
+ /* one byte character */
+ l = 1;
+ c = s0;
+ } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
+ /* invalid character */
+ return 2;
+ } else if (s0 <= 0xdf) {
+ /* two byte character */
+ l = 2;
+ if (ilen < 2) {
+ return 0;
+ }
+ s1 = cpu_ldub_data_ra(env, addr + 1, ra);
+ c = s0 & 0x1f;
+ c = (c << 6) | (s1 & 0x3f);
+ if (enh_check && (s1 & 0xc0) != 0x80) {
+ return 2;
+ }
+ } else if (s0 <= 0xef) {
+ /* three byte character */
+ l = 3;
+ if (ilen < 3) {
+ return 0;
+ }
+ s1 = cpu_ldub_data_ra(env, addr + 1, ra);
+ s2 = cpu_ldub_data_ra(env, addr + 2, ra);
+ c = s0 & 0x0f;
+ c = (c << 6) | (s1 & 0x3f);
+ c = (c << 6) | (s2 & 0x3f);
+ /* Fold the byte-by-byte range descriptions in the PoO into
+ tests against the complete value. It disallows encodings
+ that could be smaller, and the UTF-16 surrogates. */
+ if (enh_check
+ && ((s1 & 0xc0) != 0x80
+ || (s2 & 0xc0) != 0x80
+ || c < 0x1000
+ || (c >= 0xd800 && c <= 0xdfff))) {
+ return 2;
+ }
+ } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
+ /* four byte character */
+ l = 4;
+ if (ilen < 4) {
+ return 0;
+ }
+ s1 = cpu_ldub_data_ra(env, addr + 1, ra);
+ s2 = cpu_ldub_data_ra(env, addr + 2, ra);
+ s3 = cpu_ldub_data_ra(env, addr + 3, ra);
+ c = s0 & 0x07;
+ c = (c << 6) | (s1 & 0x3f);
+ c = (c << 6) | (s2 & 0x3f);
+ c = (c << 6) | (s3 & 0x3f);
+ /* See above. */
+ if (enh_check
+ && ((s1 & 0xc0) != 0x80
+ || (s2 & 0xc0) != 0x80
+ || (s3 & 0xc0) != 0x80
+ || c < 0x010000
+ || c > 0x10ffff)) {
+ return 2;
+ }
+ } else {
+ /* invalid character */
+ return 2;
+ }
+
+ *ochar = c;
+ *olen = l;
+ return -1;
+}
+
+static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
+ bool enh_check, uintptr_t ra,
+ uint32_t *ochar, uint32_t *olen)
+{
+ uint16_t s0, s1;
+ uint32_t c, l;
+
+ if (ilen < 2) {
+ return 0;
+ }
+ s0 = cpu_lduw_data_ra(env, addr, ra);
+ if ((s0 & 0xfc00) != 0xd800) {
+ /* one word character */
+ l = 2;
+ c = s0;
+ } else {
+ /* two word character */
+ l = 4;
+ if (ilen < 4) {
+ return 0;
+ }
+ s1 = cpu_lduw_data_ra(env, addr + 2, ra);
+ c = extract32(s0, 6, 4) + 1;
+ c = (c << 6) | (s0 & 0x3f);
+ c = (c << 10) | (s1 & 0x3ff);
+ if (enh_check && (s1 & 0xfc00) != 0xdc00) {
+ /* invalid surrogate character */
+ return 2;
+ }
+ }
+
+ *ochar = c;
+ *olen = l;
+ return -1;
+}
+
+static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
+ bool enh_check, uintptr_t ra,
+ uint32_t *ochar, uint32_t *olen)
+{
+ uint32_t c;
+
+ if (ilen < 4) {
+ return 0;
+ }
+ c = cpu_ldl_data_ra(env, addr, ra);
+ if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
+ /* invalid unicode character */
+ return 2;
+ }
+
+ *ochar = c;
+ *olen = 4;
+ return -1;
+}
+
+static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
+ uintptr_t ra, uint32_t c, uint32_t *olen)
+{
+ uint8_t d[4];
+ uint32_t l, i;
+
+ if (c <= 0x7f) {
+ /* one byte character */
+ l = 1;
+ d[0] = c;
+ } else if (c <= 0x7ff) {
+ /* two byte character */
+ l = 2;
+ d[1] = 0x80 | extract32(c, 0, 6);
+ d[0] = 0xc0 | extract32(c, 6, 5);
+ } else if (c <= 0xffff) {
+ /* three byte character */
+ l = 3;
+ d[2] = 0x80 | extract32(c, 0, 6);
+ d[1] = 0x80 | extract32(c, 6, 6);
+ d[0] = 0xe0 | extract32(c, 12, 4);
+ } else {
+ /* four byte character */
+ l = 4;
+ d[3] = 0x80 | extract32(c, 0, 6);
+ d[2] = 0x80 | extract32(c, 6, 6);
+ d[1] = 0x80 | extract32(c, 12, 6);
+ d[0] = 0xf0 | extract32(c, 18, 3);
+ }
+
+ if (ilen < l) {
+ return 1;
+ }
+ for (i = 0; i < l; ++i) {
+ cpu_stb_data_ra(env, addr + i, d[i], ra);
+ }
+
+ *olen = l;
+ return -1;
+}
+
+static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
+ uintptr_t ra, uint32_t c, uint32_t *olen)
+{
+ uint16_t d0, d1;
+
+ if (c <= 0xffff) {
+ /* one word character */
+ if (ilen < 2) {
+ return 1;
+ }
+ cpu_stw_data_ra(env, addr, c, ra);
+ *olen = 2;
+ } else {
+ /* two word character */
+ if (ilen < 4) {
+ return 1;
+ }
+ d1 = 0xdc00 | extract32(c, 0, 10);
+ d0 = 0xd800 | extract32(c, 10, 6);
+ d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
+ cpu_stw_data_ra(env, addr + 0, d0, ra);
+ cpu_stw_data_ra(env, addr + 2, d1, ra);
+ *olen = 4;
+ }
+
+ return -1;
+}
+
+static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
+ uintptr_t ra, uint32_t c, uint32_t *olen)
+{
+ if (ilen < 4) {
+ return 1;
+ }
+ cpu_stl_data_ra(env, addr, c, ra);
+ *olen = 4;
+ return -1;
+}
+
+static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
+ uint32_t r2, uint32_t m3, uintptr_t ra,
+ decode_unicode_fn decode,
+ encode_unicode_fn encode)
+{
+ uint64_t dst = get_address(env, r1);
+ uint64_t dlen = get_length(env, r1 + 1);
+ uint64_t src = get_address(env, r2);
+ uint64_t slen = get_length(env, r2 + 1);
+ bool enh_check = m3 & 1;
+ int cc, i;
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 256. */
+ for (i = 0; i < 256; ++i) {
+ uint32_t c, ilen, olen;
+
+ cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
+ if (unlikely(cc >= 0)) {
+ break;
+ }
+ cc = encode(env, dst, dlen, ra, c, &olen);
+ if (unlikely(cc >= 0)) {
+ break;
+ }
+
+ src += ilen;
+ slen -= ilen;
+ dst += olen;
+ dlen -= olen;
+ cc = 3;
+ }
+
+ set_address(env, r1, dst);
+ set_length(env, r1 + 1, dlen);
+ set_address(env, r2, src);
+ set_length(env, r2 + 1, slen);
+
+ return cc;
+}
+
+uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
+{
+ return convert_unicode(env, r1, r2, m3, GETPC(),
+ decode_utf8, encode_utf16);
+}
+
+uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
+{
+ return convert_unicode(env, r1, r2, m3, GETPC(),
+ decode_utf8, encode_utf32);
+}
+
+uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
+{
+ return convert_unicode(env, r1, r2, m3, GETPC(),
+ decode_utf16, encode_utf8);
+}
+
+uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
+{
+ return convert_unicode(env, r1, r2, m3, GETPC(),
+ decode_utf16, encode_utf32);
+}
+
+uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
+{
+ return convert_unicode(env, r1, r2, m3, GETPC(),
+ decode_utf32, encode_utf8);
+}
+
+uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
+{
+ return convert_unicode(env, r1, r2, m3, GETPC(),
+ decode_utf32, encode_utf16);
+}
+
+void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len,
+ uintptr_t ra)
+{
+ /* test the actual access, not just any access to the page due to LAP */
+ while (len) {
+ const uint64_t pagelen = -(addr | TARGET_PAGE_MASK);
+ const uint64_t curlen = MIN(pagelen, len);
+
+ probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra);
+ addr = wrap_address(env, addr + curlen);
+ len -= curlen;
+ }
+}
+
+void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len)
+{
+ probe_write_access(env, addr, len, GETPC());
+}
diff --git a/target/s390x/tcg/meson.build b/target/s390x/tcg/meson.build
new file mode 100644
index 000000000..ee4e8fec7
--- /dev/null
+++ b/target/s390x/tcg/meson.build
@@ -0,0 +1,14 @@
+s390x_ss.add(when: 'CONFIG_TCG', if_true: files(
+ 'cc_helper.c',
+ 'crypto_helper.c',
+ 'excp_helper.c',
+ 'fpu_helper.c',
+ 'int_helper.c',
+ 'mem_helper.c',
+ 'misc_helper.c',
+ 'translate.c',
+ 'vec_fpu_helper.c',
+ 'vec_helper.c',
+ 'vec_int_helper.c',
+ 'vec_string_helper.c',
+))
diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c
new file mode 100644
index 000000000..aab9c4774
--- /dev/null
+++ b/target/s390x/tcg/misc_helper.c
@@ -0,0 +1,798 @@
+/*
+ * S/390 misc helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "exec/memory.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "qemu/timer.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qapi/error.h"
+#include "tcg_s390x.h"
+#include "s390-tod.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "sysemu/cpus.h"
+#include "sysemu/sysemu.h"
+#include "hw/s390x/ebcdic.h"
+#include "hw/s390x/s390-virtio-hcall.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/s390x/ioinst.h"
+#include "hw/s390x/s390-pci-inst.h"
+#include "hw/boards.h"
+#include "hw/s390x/tod.h"
+#endif
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* Raise an exception statically from a TB. */
+void HELPER(exception)(CPUS390XState *env, uint32_t excp)
+{
+ CPUState *cs = env_cpu(env);
+
+ HELPER_LOG("%s: exception %d\n", __func__, excp);
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+/* Store CPU Timer (also used for EXTRACT CPU TIME) */
+uint64_t HELPER(stpt)(CPUS390XState *env)
+{
+#if defined(CONFIG_USER_ONLY)
+ /*
+ * Fake a descending CPU timer. We could get negative values here,
+ * but we don't care as it is up to the OS when to process that
+ * interrupt and reset to > 0.
+ */
+ return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
+#else
+ return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+#endif
+}
+
+/* Store Clock */
+uint64_t HELPER(stck)(CPUS390XState *env)
+{
+#ifdef CONFIG_USER_ONLY
+ struct timespec ts;
+ uint64_t ns;
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
+
+ return TOD_UNIX_EPOCH + time2tod(ns);
+#else
+ S390TODState *td = s390_get_todstate();
+ S390TODClass *tdc = S390_TOD_GET_CLASS(td);
+ S390TOD tod;
+
+ tdc->get(td, &tod, &error_abort);
+ return tod.low;
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+/* SCLP service call */
+uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
+{
+ qemu_mutex_lock_iothread();
+ int r = sclp_service_call(env, r1, r2);
+ qemu_mutex_unlock_iothread();
+ if (r < 0) {
+ tcg_s390_program_interrupt(env, -r, GETPC());
+ }
+ return r;
+}
+
+void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
+{
+ uint64_t r;
+
+ switch (num) {
+ case 0x500:
+ /* KVM hypercall */
+ qemu_mutex_lock_iothread();
+ r = s390_virtio_hypercall(env);
+ qemu_mutex_unlock_iothread();
+ break;
+ case 0x44:
+ /* yield */
+ r = 0;
+ break;
+ case 0x308:
+ /* ipl */
+ qemu_mutex_lock_iothread();
+ handle_diag_308(env, r1, r3, GETPC());
+ qemu_mutex_unlock_iothread();
+ r = 0;
+ break;
+ case 0x288:
+ /* time bomb (watchdog) */
+ r = handle_diag_288(env, r1, r3);
+ break;
+ default:
+ r = -1;
+ break;
+ }
+
+ if (r) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+}
+
+/* Set Prefix */
+void HELPER(spx)(CPUS390XState *env, uint64_t a1)
+{
+ const uint32_t prefix = a1 & 0x7fffe000;
+ const uint32_t old_prefix = env->psa;
+ CPUState *cs = env_cpu(env);
+
+ if (prefix == old_prefix) {
+ return;
+ }
+
+ env->psa = prefix;
+ HELPER_LOG("prefix: %#x\n", prefix);
+ tlb_flush_page(cs, 0);
+ tlb_flush_page(cs, TARGET_PAGE_SIZE);
+ if (prefix != 0) {
+ tlb_flush_page(cs, prefix);
+ tlb_flush_page(cs, prefix + TARGET_PAGE_SIZE);
+ }
+ if (old_prefix != 0) {
+ tlb_flush_page(cs, old_prefix);
+ tlb_flush_page(cs, old_prefix + TARGET_PAGE_SIZE);
+ }
+}
+
+static void update_ckc_timer(CPUS390XState *env)
+{
+ S390TODState *td = s390_get_todstate();
+ uint64_t time;
+
+ /* stop the timer and remove pending CKC IRQs */
+ timer_del(env->tod_timer);
+ g_assert(qemu_mutex_iothread_locked());
+ env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
+
+ /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */
+ if (env->ckc == -1ULL) {
+ return;
+ }
+
+ /* difference between origins */
+ time = env->ckc - td->base.low;
+
+ /* nanoseconds */
+ time = tod2time(time);
+
+ timer_mod(env->tod_timer, time);
+}
+
+/* Set Clock Comparator */
+void HELPER(sckc)(CPUS390XState *env, uint64_t ckc)
+{
+ env->ckc = ckc;
+
+ qemu_mutex_lock_iothread();
+ update_ckc_timer(env);
+ qemu_mutex_unlock_iothread();
+}
+
+void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ update_ckc_timer(&cpu->env);
+}
+
+/* Set Clock */
+uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low)
+{
+ S390TODState *td = s390_get_todstate();
+ S390TODClass *tdc = S390_TOD_GET_CLASS(td);
+ S390TOD tod = {
+ .high = 0,
+ .low = tod_low,
+ };
+
+ qemu_mutex_lock_iothread();
+ tdc->set(td, &tod, &error_abort);
+ qemu_mutex_unlock_iothread();
+ return 0;
+}
+
+/* Set Tod Programmable Field */
+void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
+{
+ uint32_t val = r0;
+
+ if (val & 0xffff0000) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
+ }
+ env->todpr = val;
+}
+
+/* Store Clock Comparator */
+uint64_t HELPER(stckc)(CPUS390XState *env)
+{
+ return env->ckc;
+}
+
+/* Set CPU Timer */
+void HELPER(spt)(CPUS390XState *env, uint64_t time)
+{
+ if (time == -1ULL) {
+ return;
+ }
+
+ /* nanoseconds */
+ time = tod2time(time);
+
+ env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
+
+ timer_mod(env->cpu_timer, env->cputm);
+}
+
+/* Store System Information */
+uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
+{
+ const uintptr_t ra = GETPC();
+ const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
+ const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
+ const MachineState *ms = MACHINE(qdev_get_machine());
+ uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
+ S390CPU *cpu = env_archcpu(env);
+ SysIB sysib = { };
+ int i, cc = 0;
+
+ if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
+ /* invalid function code: no other checks are performed */
+ return 3;
+ }
+
+ if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
+ /* query the current level: no further checks are performed */
+ env->regs[0] = STSI_R0_FC_LEVEL_3;
+ return 0;
+ }
+
+ if (a0 & ~TARGET_PAGE_MASK) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /* count the cpus and split them into configured and reserved ones */
+ for (i = 0; i < ms->possible_cpus->len; i++) {
+ total_cpus++;
+ if (ms->possible_cpus->cpus[i].cpu) {
+ conf_cpus++;
+ } else {
+ reserved_cpus++;
+ }
+ }
+
+ /*
+ * In theory, we could report Level 1 / Level 2 as current. However,
+ * the Linux kernel will detect this as running under LPAR and assume
+ * that we have a sclp linemode console (which is always present on
+ * LPAR, but not the default for QEMU), therefore not displaying boot
+ * messages and making booting a Linux kernel under TCG harder.
+ *
+ * For now we fake the same SMP configuration on all levels.
+ *
+ * TODO: We could later make the level configurable via the machine
+ * and change defaults (linemode console) based on machine type
+ * and accelerator.
+ */
+ switch (r0 & STSI_R0_FC_MASK) {
+ case STSI_R0_FC_LEVEL_1:
+ if ((sel1 == 1) && (sel2 == 1)) {
+ /* Basic Machine Configuration */
+ char type[5] = {};
+
+ ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16);
+ /* same as machine type number in STORE CPU ID, but in EBCDIC */
+ snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
+ ebcdic_put(sysib.sysib_111.type, type, 4);
+ /* model number (not stored in STORE CPU ID for z/Architecure) */
+ ebcdic_put(sysib.sysib_111.model, "QEMU ", 16);
+ ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16);
+ ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
+ } else if ((sel1 == 2) && (sel2 == 1)) {
+ /* Basic Machine CPU */
+ ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
+ sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* Basic Machine CPUs */
+ sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
+ sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
+ sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
+ sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
+ } else {
+ cc = 3;
+ }
+ break;
+ case STSI_R0_FC_LEVEL_2:
+ if ((sel1 == 2) && (sel2 == 1)) {
+ /* LPAR CPU */
+ ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
+ sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* LPAR CPUs */
+ sysib.sysib_222.lcpuc = 0x80; /* dedicated */
+ sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
+ sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
+ sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
+ ebcdic_put(sysib.sysib_222.name, "QEMU ", 8);
+ sysib.sysib_222.caf = cpu_to_be32(1000);
+ sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
+ } else {
+ cc = 3;
+ }
+ break;
+ case STSI_R0_FC_LEVEL_3:
+ if ((sel1 == 2) && (sel2 == 2)) {
+ /* VM CPUs */
+ sysib.sysib_322.count = 1;
+ sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
+ sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
+ sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
+ sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
+ /* Linux kernel uses this to distinguish us from z/VM */
+ ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16);
+ sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
+
+ /* If our VM has a name, use the real name */
+ if (qemu_name) {
+ memset(sysib.sysib_322.vm[0].name, 0x40,
+ sizeof(sysib.sysib_322.vm[0].name));
+ ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
+ MIN(sizeof(sysib.sysib_322.vm[0].name),
+ strlen(qemu_name)));
+ strpadcpy((char *)sysib.sysib_322.ext_names[0],
+ sizeof(sysib.sysib_322.ext_names[0]),
+ qemu_name, '\0');
+
+ } else {
+ ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
+ strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
+ }
+
+ /* add the uuid */
+ memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
+ sizeof(sysib.sysib_322.vm[0].uuid));
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+
+ if (cc == 0) {
+ if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ }
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
+ uint32_t r3)
+{
+ int cc;
+
+ /* TODO: needed to inject interrupts - push further down */
+ qemu_mutex_lock_iothread();
+ cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3);
+ qemu_mutex_unlock_iothread();
+
+ return cc;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_xsch(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(csch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_csch(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_hsch(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_rchp(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_rsch(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(sal)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ ioinst_handle_sal(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
+{
+ const uintptr_t ra = GETPC();
+ S390CPU *cpu = env_archcpu(env);
+ QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
+ QEMUS390FlicIO *io = NULL;
+ LowCore *lowcore;
+
+ if (addr & 0x3) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ qemu_mutex_lock_iothread();
+ io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
+ if (!io) {
+ qemu_mutex_unlock_iothread();
+ return 0;
+ }
+
+ if (addr) {
+ struct {
+ uint16_t id;
+ uint16_t nr;
+ uint32_t parm;
+ } intc = {
+ .id = cpu_to_be16(io->id),
+ .nr = cpu_to_be16(io->nr),
+ .parm = cpu_to_be32(io->parm),
+ };
+
+ if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
+ /* writing failed, reinject and properly clean up */
+ s390_io_interrupt(io->id, io->nr, io->parm, io->word);
+ qemu_mutex_unlock_iothread();
+ g_free(io);
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
+ return 0;
+ }
+ } else {
+ /* no protection applies */
+ lowcore = cpu_map_lowcore(env);
+ lowcore->subchannel_id = cpu_to_be16(io->id);
+ lowcore->subchannel_nr = cpu_to_be16(io->nr);
+ lowcore->io_int_parm = cpu_to_be32(io->parm);
+ lowcore->io_int_word = cpu_to_be32(io->word);
+ cpu_unmap_lowcore(lowcore);
+ }
+
+ g_free(io);
+ qemu_mutex_unlock_iothread();
+ return 1;
+}
+
+void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
+{
+ S390CPU *cpu = env_archcpu(env);
+ qemu_mutex_lock_iothread();
+ ioinst_handle_chsc(cpu, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(per_check_exception)(CPUS390XState *env)
+{
+ if (env->per_perc_atmid) {
+ tcg_s390_program_interrupt(env, PGM_PER, GETPC());
+ }
+}
+
+/* Check if an address is within the PER starting address and the PER
+ ending address. The address range might loop. */
+static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
+{
+ if (env->cregs[10] <= env->cregs[11]) {
+ return env->cregs[10] <= addr && addr <= env->cregs[11];
+ } else {
+ return env->cregs[10] <= addr || addr <= env->cregs[11];
+ }
+}
+
+void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
+{
+ if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
+ if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
+ || get_per_in_range(env, to)) {
+ env->per_address = from;
+ env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
+ }
+ }
+}
+
+void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
+{
+ if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
+ env->per_address = addr;
+ env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
+
+ /* If the instruction has to be nullified, trigger the
+ exception immediately. */
+ if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
+ CPUState *cs = env_cpu(env);
+
+ env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION;
+ env->int_pgm_code = PGM_PER;
+ env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
+
+ cs->exception_index = EXCP_PGM;
+ cpu_loop_exit(cs);
+ }
+ }
+}
+
+void HELPER(per_store_real)(CPUS390XState *env)
+{
+ if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
+ (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
+ /* PSW is saved just before calling the helper. */
+ env->per_address = env->psw.addr;
+ env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
+ }
+}
+#endif
+
+static uint8_t stfl_bytes[2048];
+static unsigned int used_stfl_bytes;
+
+static void prepare_stfl(void)
+{
+ static bool initialized;
+ int i;
+
+ /* racy, but we don't care, the same values are always written */
+ if (initialized) {
+ return;
+ }
+
+ s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
+ for (i = 0; i < sizeof(stfl_bytes); i++) {
+ if (stfl_bytes[i]) {
+ used_stfl_bytes = i + 1;
+ }
+ }
+ initialized = true;
+}
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(stfl)(CPUS390XState *env)
+{
+ LowCore *lowcore;
+
+ lowcore = cpu_map_lowcore(env);
+ prepare_stfl();
+ memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
+ cpu_unmap_lowcore(lowcore);
+}
+#endif
+
+uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
+{
+ const uintptr_t ra = GETPC();
+ const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8;
+ int max_bytes;
+ int i;
+
+ if (addr & 0x7) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ prepare_stfl();
+ max_bytes = ROUND_UP(used_stfl_bytes, 8);
+
+ /*
+ * The PoP says that doublewords beyond the highest-numbered facility
+ * bit may or may not be stored. However, existing hardware appears to
+ * not store the words, and existing software depend on that.
+ */
+ for (i = 0; i < MIN(count_bytes, max_bytes); ++i) {
+ cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra);
+ }
+
+ env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
+ return count_bytes >= max_bytes ? 0 : 3;
+}
+
+#ifndef CONFIG_USER_ONLY
+/*
+ * Note: we ignore any return code of the functions called for the pci
+ * instructions, as the only time they return !0 is when the stub is
+ * called, and in that case we didn't even offer the zpci facility.
+ * The only exception is SIC, where program checks need to be handled
+ * by the caller.
+ */
+void HELPER(clp)(CPUS390XState *env, uint32_t r2)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ clp_service_call(cpu, r2, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ pcilg_service_call(cpu, r1, r2, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ pcistg_service_call(cpu, r1, r2, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
+ uint32_t ar)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
+{
+ int r;
+
+ qemu_mutex_lock_iothread();
+ r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
+ qemu_mutex_unlock_iothread();
+ /* css_do_sic() may actually return a PGM_xxx value to inject */
+ if (r) {
+ tcg_s390_program_interrupt(env, -r, GETPC());
+ }
+}
+
+void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ rpcit_service_call(cpu, r1, r2, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
+ uint64_t gaddr, uint32_t ar)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
+ uint32_t ar)
+{
+ S390CPU *cpu = env_archcpu(env);
+
+ qemu_mutex_lock_iothread();
+ mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+#endif
diff --git a/target/s390x/tcg/s390-tod.h b/target/s390x/tcg/s390-tod.h
new file mode 100644
index 000000000..8b74d6a6d
--- /dev/null
+++ b/target/s390x/tcg/s390-tod.h
@@ -0,0 +1,29 @@
+/*
+ * TOD (Time Of Day) clock
+ *
+ * Copyright 2018 Red Hat, Inc.
+ * Author(s): David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TARGET_S390_TOD_H
+#define TARGET_S390_TOD_H
+
+/* The value of the TOD clock for 1.1.1970. */
+#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+
+/* Converts ns to s390's clock format */
+static inline uint64_t time2tod(uint64_t ns)
+{
+ return (ns << 9) / 125 + (((ns & 0xff80000000000000ull) / 125) << 9);
+}
+
+/* Converts s390's clock format to ns */
+static inline uint64_t tod2time(uint64_t t)
+{
+ return ((t >> 9) * 125) + (((t & 0x1ff) * 125) >> 9);
+}
+
+#endif
diff --git a/target/s390x/tcg/tcg_s390x.h b/target/s390x/tcg/tcg_s390x.h
new file mode 100644
index 000000000..2f54ccb02
--- /dev/null
+++ b/target/s390x/tcg/tcg_s390x.h
@@ -0,0 +1,24 @@
+/*
+ * QEMU TCG support -- s390x specific functions.
+ *
+ * Copyright 2018 Red Hat, Inc.
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef TCG_S390X_H
+#define TCG_S390X_H
+
+void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque);
+void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
+ uint32_t code, uintptr_t ra);
+void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
+ uintptr_t ra);
+void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
+ uintptr_t ra);
+
+#endif /* TCG_S390X_H */
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
new file mode 100644
index 000000000..dcc249a19
--- /dev/null
+++ b/target/s390x/tcg/translate.c
@@ -0,0 +1,6652 @@
+/*
+ * S/390 translation
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2010 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* #define DEBUG_INLINE_BRANCHES */
+#define S390X_DEBUG_DISAS
+/* #define S390X_DEBUG_DISAS_VERBOSE */
+
+#ifdef S390X_DEBUG_DISAS_VERBOSE
+# define LOG_DISAS(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+#include "qemu/log.h"
+#include "qemu/host-utils.h"
+#include "exec/cpu_ldst.h"
+#include "exec/gen-icount.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/translator.h"
+#include "exec/log.h"
+#include "qemu/atomic128.h"
+
+
+/* Information that (most) every instruction needs to manipulate. */
+typedef struct DisasContext DisasContext;
+typedef struct DisasInsn DisasInsn;
+typedef struct DisasFields DisasFields;
+
+/*
+ * Define a structure to hold the decoded fields. We'll store each inside
+ * an array indexed by an enum. In order to conserve memory, we'll arrange
+ * for fields that do not exist at the same time to overlap, thus the "C"
+ * for compact. For checking purposes there is an "O" for original index
+ * as well that will be applied to availability bitmaps.
+ */
+
+enum DisasFieldIndexO {
+ FLD_O_r1,
+ FLD_O_r2,
+ FLD_O_r3,
+ FLD_O_m1,
+ FLD_O_m3,
+ FLD_O_m4,
+ FLD_O_m5,
+ FLD_O_m6,
+ FLD_O_b1,
+ FLD_O_b2,
+ FLD_O_b4,
+ FLD_O_d1,
+ FLD_O_d2,
+ FLD_O_d4,
+ FLD_O_x2,
+ FLD_O_l1,
+ FLD_O_l2,
+ FLD_O_i1,
+ FLD_O_i2,
+ FLD_O_i3,
+ FLD_O_i4,
+ FLD_O_i5,
+ FLD_O_v1,
+ FLD_O_v2,
+ FLD_O_v3,
+ FLD_O_v4,
+};
+
+enum DisasFieldIndexC {
+ FLD_C_r1 = 0,
+ FLD_C_m1 = 0,
+ FLD_C_b1 = 0,
+ FLD_C_i1 = 0,
+ FLD_C_v1 = 0,
+
+ FLD_C_r2 = 1,
+ FLD_C_b2 = 1,
+ FLD_C_i2 = 1,
+
+ FLD_C_r3 = 2,
+ FLD_C_m3 = 2,
+ FLD_C_i3 = 2,
+ FLD_C_v3 = 2,
+
+ FLD_C_m4 = 3,
+ FLD_C_b4 = 3,
+ FLD_C_i4 = 3,
+ FLD_C_l1 = 3,
+ FLD_C_v4 = 3,
+
+ FLD_C_i5 = 4,
+ FLD_C_d1 = 4,
+ FLD_C_m5 = 4,
+
+ FLD_C_d2 = 5,
+ FLD_C_m6 = 5,
+
+ FLD_C_d4 = 6,
+ FLD_C_x2 = 6,
+ FLD_C_l2 = 6,
+ FLD_C_v2 = 6,
+
+ NUM_C_FIELD = 7
+};
+
+struct DisasFields {
+ uint64_t raw_insn;
+ unsigned op:8;
+ unsigned op2:8;
+ unsigned presentC:16;
+ unsigned int presentO;
+ int c[NUM_C_FIELD];
+};
+
+struct DisasContext {
+ DisasContextBase base;
+ const DisasInsn *insn;
+ TCGOp *insn_start;
+ DisasFields fields;
+ uint64_t ex_value;
+ /*
+ * During translate_one(), pc_tmp is used to determine the instruction
+ * to be executed after base.pc_next - e.g. next sequential instruction
+ * or a branch target.
+ */
+ uint64_t pc_tmp;
+ uint32_t ilen;
+ enum cc_op cc_op;
+};
+
+/* Information carried about a condition to be evaluated. */
+typedef struct {
+ TCGCond cond:8;
+ bool is_64;
+ bool g1;
+ bool g2;
+ union {
+ struct { TCGv_i64 a, b; } s64;
+ struct { TCGv_i32 a, b; } s32;
+ } u;
+} DisasCompare;
+
+#ifdef DEBUG_INLINE_BRANCHES
+static uint64_t inline_branch_hit[CC_OP_MAX];
+static uint64_t inline_branch_miss[CC_OP_MAX];
+#endif
+
+static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
+{
+ TCGv_i64 tmp;
+
+ if (s->base.tb->flags & FLAG_MASK_32) {
+ if (s->base.tb->flags & FLAG_MASK_64) {
+ tcg_gen_movi_i64(out, pc);
+ return;
+ }
+ pc |= 0x80000000;
+ }
+ assert(!(s->base.tb->flags & FLAG_MASK_64));
+ tmp = tcg_const_i64(pc);
+ tcg_gen_deposit_i64(out, out, tmp, 0, 32);
+ tcg_temp_free_i64(tmp);
+}
+
+static TCGv_i64 psw_addr;
+static TCGv_i64 psw_mask;
+static TCGv_i64 gbea;
+
+static TCGv_i32 cc_op;
+static TCGv_i64 cc_src;
+static TCGv_i64 cc_dst;
+static TCGv_i64 cc_vr;
+
+static char cpu_reg_names[16][4];
+static TCGv_i64 regs[16];
+
+void s390x_translate_init(void)
+{
+ int i;
+
+ psw_addr = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUS390XState, psw.addr),
+ "psw_addr");
+ psw_mask = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUS390XState, psw.mask),
+ "psw_mask");
+ gbea = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUS390XState, gbea),
+ "gbea");
+
+ cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
+ "cc_op");
+ cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
+ "cc_src");
+ cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
+ "cc_dst");
+ cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
+ "cc_vr");
+
+ for (i = 0; i < 16; i++) {
+ snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
+ regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUS390XState, regs[i]),
+ cpu_reg_names[i]);
+ }
+}
+
+static inline int vec_full_reg_offset(uint8_t reg)
+{
+ g_assert(reg < 32);
+ return offsetof(CPUS390XState, vregs[reg][0]);
+}
+
+static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
+{
+ /* Convert element size (es) - e.g. MO_8 - to bytes */
+ const uint8_t bytes = 1 << es;
+ int offs = enr * bytes;
+
+ /*
+ * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
+ * of the 16 byte vector, on both, little and big endian systems.
+ *
+ * Big Endian (target/possible host)
+ * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
+ * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
+ * W: [ 0][ 1] - [ 2][ 3]
+ * DW: [ 0] - [ 1]
+ *
+ * Little Endian (possible host)
+ * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
+ * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
+ * W: [ 1][ 0] - [ 3][ 2]
+ * DW: [ 0] - [ 1]
+ *
+ * For 16 byte elements, the two 8 byte halves will not form a host
+ * int128 if the host is little endian, since they're in the wrong order.
+ * Some operations (e.g. xor) do not care. For operations like addition,
+ * the two 8 byte elements have to be loaded separately. Let's force all
+ * 16 byte operations to handle it in a special way.
+ */
+ g_assert(es <= MO_64);
+#ifndef HOST_WORDS_BIGENDIAN
+ offs ^= (8 - bytes);
+#endif
+ return offs + vec_full_reg_offset(reg);
+}
+
+static inline int freg64_offset(uint8_t reg)
+{
+ g_assert(reg < 16);
+ return vec_reg_offset(reg, 0, MO_64);
+}
+
+static inline int freg32_offset(uint8_t reg)
+{
+ g_assert(reg < 16);
+ return vec_reg_offset(reg, 0, MO_32);
+}
+
+static TCGv_i64 load_reg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_mov_i64(r, regs[reg]);
+ return r;
+}
+
+static TCGv_i64 load_freg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
+ return r;
+}
+
+static TCGv_i64 load_freg32_i64(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+
+ tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
+ return r;
+}
+
+static void store_reg(int reg, TCGv_i64 v)
+{
+ tcg_gen_mov_i64(regs[reg], v);
+}
+
+static void store_freg(int reg, TCGv_i64 v)
+{
+ tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
+}
+
+static void store_reg32_i64(int reg, TCGv_i64 v)
+{
+ /* 32 bit register writes keep the upper half */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
+}
+
+static void store_reg32h_i64(int reg, TCGv_i64 v)
+{
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
+}
+
+static void store_freg32_i64(int reg, TCGv_i64 v)
+{
+ tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
+}
+
+static void return_low128(TCGv_i64 dest)
+{
+ tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
+}
+
+static void update_psw_addr(DisasContext *s)
+{
+ /* psw.addr */
+ tcg_gen_movi_i64(psw_addr, s->base.pc_next);
+}
+
+static void per_branch(DisasContext *s, bool to_next)
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_movi_i64(gbea, s->base.pc_next);
+
+ if (s->base.tb->flags & FLAG_MASK_PER) {
+ TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
+ gen_helper_per_branch(cpu_env, gbea, next_pc);
+ if (to_next) {
+ tcg_temp_free_i64(next_pc);
+ }
+ }
+#endif
+}
+
+static void per_branch_cond(DisasContext *s, TCGCond cond,
+ TCGv_i64 arg1, TCGv_i64 arg2)
+{
+#ifndef CONFIG_USER_ONLY
+ if (s->base.tb->flags & FLAG_MASK_PER) {
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
+
+ tcg_gen_movi_i64(gbea, s->base.pc_next);
+ gen_helper_per_branch(cpu_env, gbea, psw_addr);
+
+ gen_set_label(lab);
+ } else {
+ TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
+ tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
+ tcg_temp_free_i64(pc);
+ }
+#endif
+}
+
+static void per_breaking_event(DisasContext *s)
+{
+ tcg_gen_movi_i64(gbea, s->base.pc_next);
+}
+
+static void update_cc_op(DisasContext *s)
+{
+ if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
+ tcg_gen_movi_i32(cc_op, s->cc_op);
+ }
+}
+
+static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
+ uint64_t pc)
+{
+ return (uint64_t)translator_lduw(env, &s->base, pc);
+}
+
+static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
+ uint64_t pc)
+{
+ return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
+}
+
+static int get_mem_index(DisasContext *s)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
+ return MMU_REAL_IDX;
+ }
+
+ switch (s->base.tb->flags & FLAG_MASK_ASC) {
+ case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
+ return MMU_PRIMARY_IDX;
+ case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
+ return MMU_SECONDARY_IDX;
+ case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
+ return MMU_HOME_IDX;
+ default:
+ tcg_abort();
+ break;
+ }
+#endif
+}
+
+static void gen_exception(int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_program_exception(DisasContext *s, int code)
+{
+ TCGv_i32 tmp;
+
+ /* Remember what pgm exeption this was. */
+ tmp = tcg_const_i32(code);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
+ tcg_temp_free_i32(tmp);
+
+ tmp = tcg_const_i32(s->ilen);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
+ tcg_temp_free_i32(tmp);
+
+ /* update the psw */
+ update_psw_addr(s);
+
+ /* Save off cc. */
+ update_cc_op(s);
+
+ /* Trigger exception. */
+ gen_exception(EXCP_PGM);
+}
+
+static inline void gen_illegal_opcode(DisasContext *s)
+{
+ gen_program_exception(s, PGM_OPERATION);
+}
+
+static inline void gen_data_exception(uint8_t dxc)
+{
+ TCGv_i32 tmp = tcg_const_i32(dxc);
+ gen_helper_data_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void gen_trap(DisasContext *s)
+{
+ /* Set DXC to 0xff */
+ gen_data_exception(0xff);
+}
+
+static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
+ int64_t imm)
+{
+ tcg_gen_addi_i64(dst, src, imm);
+ if (!(s->base.tb->flags & FLAG_MASK_64)) {
+ if (s->base.tb->flags & FLAG_MASK_32) {
+ tcg_gen_andi_i64(dst, dst, 0x7fffffff);
+ } else {
+ tcg_gen_andi_i64(dst, dst, 0x00ffffff);
+ }
+ }
+}
+
+static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /*
+ * Note that d2 is limited to 20 bits, signed. If we crop negative
+ * displacements early we create larger immedate addends.
+ */
+ if (b2 && x2) {
+ tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
+ gen_addi_and_wrap_i64(s, tmp, tmp, d2);
+ } else if (b2) {
+ gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
+ } else if (x2) {
+ gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
+ } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
+ if (s->base.tb->flags & FLAG_MASK_32) {
+ tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
+ } else {
+ tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
+ }
+ } else {
+ tcg_gen_movi_i64(tmp, d2);
+ }
+
+ return tmp;
+}
+
+static inline bool live_cc_data(DisasContext *s)
+{
+ return (s->cc_op != CC_OP_DYNAMIC
+ && s->cc_op != CC_OP_STATIC
+ && s->cc_op > 3);
+}
+
+static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_dst);
+ tcg_gen_discard_i64(cc_vr);
+ }
+ s->cc_op = CC_OP_CONST0 + val;
+}
+
+static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_vr);
+ }
+ tcg_gen_mov_i64(cc_dst, dst);
+ s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_vr);
+ }
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst, TCGv_i64 vr)
+{
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_mov_i64(cc_vr, vr);
+ s->cc_op = op;
+}
+
+static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ, val);
+}
+
+/* CC value is in env->cc_op */
+static void set_cc_static(DisasContext *s)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_dst);
+ tcg_gen_discard_i64(cc_vr);
+ }
+ s->cc_op = CC_OP_STATIC;
+}
+
+/* calculates cc into cc_op */
+static void gen_op_calc_cc(DisasContext *s)
+{
+ TCGv_i32 local_cc_op = NULL;
+ TCGv_i64 dummy = NULL;
+
+ switch (s->cc_op) {
+ default:
+ dummy = tcg_const_i64(0);
+ /* FALLTHRU */
+ case CC_OP_ADD_64:
+ case CC_OP_SUB_64:
+ case CC_OP_ADD_32:
+ case CC_OP_SUB_32:
+ local_cc_op = tcg_const_i32(s->cc_op);
+ break;
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ case CC_OP_STATIC:
+ case CC_OP_DYNAMIC:
+ break;
+ }
+
+ switch (s->cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* s->cc_op is the cc value */
+ tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
+ break;
+ case CC_OP_STATIC:
+ /* env->cc_op already is the cc value */
+ break;
+ case CC_OP_NZ:
+ case CC_OP_ABS_64:
+ case CC_OP_NABS_64:
+ case CC_OP_ABS_32:
+ case CC_OP_NABS_32:
+ case CC_OP_LTGT0_32:
+ case CC_OP_LTGT0_64:
+ case CC_OP_COMP_32:
+ case CC_OP_COMP_64:
+ case CC_OP_NZ_F32:
+ case CC_OP_NZ_F64:
+ case CC_OP_FLOGR:
+ case CC_OP_LCBB:
+ case CC_OP_MULS_32:
+ /* 1 argument */
+ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
+ break;
+ case CC_OP_ADDU:
+ case CC_OP_ICM:
+ case CC_OP_LTGT_32:
+ case CC_OP_LTGT_64:
+ case CC_OP_LTUGTU_32:
+ case CC_OP_LTUGTU_64:
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ case CC_OP_SLA_32:
+ case CC_OP_SLA_64:
+ case CC_OP_SUBU:
+ case CC_OP_NZ_F128:
+ case CC_OP_VC:
+ case CC_OP_MULS_64:
+ /* 2 arguments */
+ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
+ break;
+ case CC_OP_ADD_64:
+ case CC_OP_SUB_64:
+ case CC_OP_ADD_32:
+ case CC_OP_SUB_32:
+ /* 3 arguments */
+ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ case CC_OP_DYNAMIC:
+ /* unknown operation - assume 3 arguments and cc_op in env */
+ gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ default:
+ tcg_abort();
+ }
+
+ if (local_cc_op) {
+ tcg_temp_free_i32(local_cc_op);
+ }
+ if (dummy) {
+ tcg_temp_free_i64(dummy);
+ }
+
+ /* We now have cc in cc_op as constant */
+ set_cc_static(s);
+}
+
+static bool use_goto_tb(DisasContext *s, uint64_t dest)
+{
+ if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
+ return false;
+ }
+ return translator_use_goto_tb(&s->base, dest);
+}
+
+static void account_noninline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_miss[cc_op]++;
+#endif
+}
+
+static void account_inline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_hit[cc_op]++;
+#endif
+}
+
+/* Table of mask values to comparison codes, given a comparison as input.
+ For such, CC=3 should not be possible. */
+static const TCGCond ltgt_cond[16] = {
+ TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
+ TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
+ TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
+ TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
+ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
+ TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
+ TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
+};
+
+/* Table of mask values to comparison codes, given a logic op as input.
+ For such, only CC=0 and CC=1 should be possible. */
+static const TCGCond nz_cond[16] = {
+ TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
+ TCG_COND_NEVER, TCG_COND_NEVER,
+ TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
+ TCG_COND_NE, TCG_COND_NE,
+ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
+ TCG_COND_EQ, TCG_COND_EQ,
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS,
+};
+
+/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
+ details required to generate a TCG comparison. */
+static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
+{
+ TCGCond cond;
+ enum cc_op old_cc_op = s->cc_op;
+
+ if (mask == 15 || mask == 0) {
+ c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
+ c->u.s32.a = cc_op;
+ c->u.s32.b = cc_op;
+ c->g1 = c->g2 = true;
+ c->is_64 = false;
+ return;
+ }
+
+ /* Find the TCG condition for the mask + cc op. */
+ switch (old_cc_op) {
+ case CC_OP_LTGT0_32:
+ case CC_OP_LTGT0_64:
+ case CC_OP_LTGT_32:
+ case CC_OP_LTGT_64:
+ cond = ltgt_cond[mask];
+ if (cond == TCG_COND_NEVER) {
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_LTUGTU_32:
+ case CC_OP_LTUGTU_64:
+ cond = tcg_unsigned_cond(ltgt_cond[mask]);
+ if (cond == TCG_COND_NEVER) {
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_NZ:
+ cond = nz_cond[mask];
+ if (cond == TCG_COND_NEVER) {
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ switch (mask) {
+ case 8:
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 2 | 1:
+ cond = TCG_COND_NE;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_ICM:
+ switch (mask) {
+ case 8:
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 2 | 1:
+ case 4 | 2:
+ cond = TCG_COND_NE;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_FLOGR:
+ switch (mask & 0xa) {
+ case 8: /* src == 0 -> no one bit found */
+ cond = TCG_COND_EQ;
+ break;
+ case 2: /* src != 0 -> one bit found */
+ cond = TCG_COND_NE;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_ADDU:
+ case CC_OP_SUBU:
+ switch (mask) {
+ case 8 | 2: /* result == 0 */
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 1: /* result != 0 */
+ cond = TCG_COND_NE;
+ break;
+ case 8 | 4: /* !carry (borrow) */
+ cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
+ break;
+ case 2 | 1: /* carry (!borrow) */
+ cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ default:
+ do_dynamic:
+ /* Calculate cc value. */
+ gen_op_calc_cc(s);
+ /* FALLTHRU */
+
+ case CC_OP_STATIC:
+ /* Jump based on CC. We'll load up the real cond below;
+ the assignment here merely avoids a compiler warning. */
+ account_noninline_branch(s, old_cc_op);
+ old_cc_op = CC_OP_STATIC;
+ cond = TCG_COND_NEVER;
+ break;
+ }
+
+ /* Load up the arguments of the comparison. */
+ c->is_64 = true;
+ c->g1 = c->g2 = false;
+ switch (old_cc_op) {
+ case CC_OP_LTGT0_32:
+ c->is_64 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
+ c->u.s32.b = tcg_const_i32(0);
+ break;
+ case CC_OP_LTGT_32:
+ case CC_OP_LTUGTU_32:
+ c->is_64 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
+ c->u.s32.b = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
+ break;
+
+ case CC_OP_LTGT0_64:
+ case CC_OP_NZ:
+ case CC_OP_FLOGR:
+ c->u.s64.a = cc_dst;
+ c->u.s64.b = tcg_const_i64(0);
+ c->g1 = true;
+ break;
+ case CC_OP_LTGT_64:
+ case CC_OP_LTUGTU_64:
+ c->u.s64.a = cc_src;
+ c->u.s64.b = cc_dst;
+ c->g1 = c->g2 = true;
+ break;
+
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ case CC_OP_ICM:
+ c->u.s64.a = tcg_temp_new_i64();
+ c->u.s64.b = tcg_const_i64(0);
+ tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
+ break;
+
+ case CC_OP_ADDU:
+ case CC_OP_SUBU:
+ c->is_64 = true;
+ c->u.s64.b = tcg_const_i64(0);
+ c->g1 = true;
+ switch (mask) {
+ case 8 | 2:
+ case 4 | 1: /* result */
+ c->u.s64.a = cc_dst;
+ break;
+ case 8 | 4:
+ case 2 | 1: /* carry */
+ c->u.s64.a = cc_src;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case CC_OP_STATIC:
+ c->is_64 = false;
+ c->u.s32.a = cc_op;
+ c->g1 = true;
+ switch (mask) {
+ case 0x8 | 0x4 | 0x2: /* cc != 3 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(3);
+ break;
+ case 0x8 | 0x4 | 0x1: /* cc != 2 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(2);
+ break;
+ case 0x8 | 0x2 | 0x1: /* cc != 1 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(1);
+ break;
+ case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
+ cond = TCG_COND_EQ;
+ c->g1 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_const_i32(0);
+ tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
+ break;
+ case 0x8 | 0x4: /* cc < 2 */
+ cond = TCG_COND_LTU;
+ c->u.s32.b = tcg_const_i32(2);
+ break;
+ case 0x8: /* cc == 0 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(0);
+ break;
+ case 0x4 | 0x2 | 0x1: /* cc != 0 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(0);
+ break;
+ case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
+ cond = TCG_COND_NE;
+ c->g1 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_const_i32(0);
+ tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
+ break;
+ case 0x4: /* cc == 1 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(1);
+ break;
+ case 0x2 | 0x1: /* cc > 1 */
+ cond = TCG_COND_GTU;
+ c->u.s32.b = tcg_const_i32(1);
+ break;
+ case 0x2: /* cc == 2 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(2);
+ break;
+ case 0x1: /* cc == 3 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(3);
+ break;
+ default:
+ /* CC is masked by something else: (8 >> cc) & mask. */
+ cond = TCG_COND_NE;
+ c->g1 = false;
+ c->u.s32.a = tcg_const_i32(8);
+ c->u.s32.b = tcg_const_i32(0);
+ tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
+ tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
+ break;
+ }
+ break;
+
+ default:
+ abort();
+ }
+ c->cond = cond;
+}
+
+static void free_compare(DisasCompare *c)
+{
+ if (!c->g1) {
+ if (c->is_64) {
+ tcg_temp_free_i64(c->u.s64.a);
+ } else {
+ tcg_temp_free_i32(c->u.s32.a);
+ }
+ }
+ if (!c->g2) {
+ if (c->is_64) {
+ tcg_temp_free_i64(c->u.s64.b);
+ } else {
+ tcg_temp_free_i32(c->u.s32.b);
+ }
+ }
+}
+
+/* ====================================================================== */
+/* Define the insn format enumeration. */
+#define F0(N) FMT_##N,
+#define F1(N, X1) F0(N)
+#define F2(N, X1, X2) F0(N)
+#define F3(N, X1, X2, X3) F0(N)
+#define F4(N, X1, X2, X3, X4) F0(N)
+#define F5(N, X1, X2, X3, X4, X5) F0(N)
+#define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
+
+typedef enum {
+#include "insn-format.def"
+} DisasFormat;
+
+#undef F0
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef F5
+#undef F6
+
+/* This is the way fields are to be accessed out of DisasFields. */
+#define have_field(S, F) have_field1((S), FLD_O_##F)
+#define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
+
+static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
+{
+ return (s->fields.presentO >> c) & 1;
+}
+
+static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
+ enum DisasFieldIndexC c)
+{
+ assert(have_field1(s, o));
+ return s->fields.c[c];
+}
+
+/* Describe the layout of each field in each format. */
+typedef struct DisasField {
+ unsigned int beg:8;
+ unsigned int size:8;
+ unsigned int type:2;
+ unsigned int indexC:6;
+ enum DisasFieldIndexO indexO:8;
+} DisasField;
+
+typedef struct DisasFormatInfo {
+ DisasField op[NUM_C_FIELD];
+} DisasFormatInfo;
+
+#define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
+#define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
+#define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N }
+#define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
+#define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
+ { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
+#define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
+#define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
+ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
+#define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
+#define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
+
+#define F0(N) { { } },
+#define F1(N, X1) { { X1 } },
+#define F2(N, X1, X2) { { X1, X2 } },
+#define F3(N, X1, X2, X3) { { X1, X2, X3 } },
+#define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
+#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
+#define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } },
+
+static const DisasFormatInfo format_info[] = {
+#include "insn-format.def"
+};
+
+#undef F0
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef F5
+#undef F6
+#undef R
+#undef M
+#undef V
+#undef BD
+#undef BXD
+#undef BDL
+#undef BXDL
+#undef I
+#undef L
+
+/* Generally, we'll extract operands into this structures, operate upon
+ them, and store them back. See the "in1", "in2", "prep", "wout" sets
+ of routines below for more details. */
+typedef struct {
+ bool g_out, g_out2, g_in1, g_in2;
+ TCGv_i64 out, out2, in1, in2;
+ TCGv_i64 addr1;
+} DisasOps;
+
+/* Instructions can place constraints on their operands, raising specification
+ exceptions if they are violated. To make this easy to automate, each "in1",
+ "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
+ of the following, or 0. To make this easy to document, we'll put the
+ SPEC_<name> defines next to <name>. */
+
+#define SPEC_r1_even 1
+#define SPEC_r2_even 2
+#define SPEC_r3_even 4
+#define SPEC_r1_f128 8
+#define SPEC_r2_f128 16
+
+/* Return values from translate_one, indicating the state of the TB. */
+
+/* We are not using a goto_tb (for whatever reason), but have updated
+ the PC (for whatever reason), so there's no need to do it again on
+ exiting the TB. */
+#define DISAS_PC_UPDATED DISAS_TARGET_0
+
+/* We have emitted one or more goto_tb. No fixup required. */
+#define DISAS_GOTO_TB DISAS_TARGET_1
+
+/* We have updated the PC and CC values. */
+#define DISAS_PC_CC_UPDATED DISAS_TARGET_2
+
+/* We are exiting the TB, but have neither emitted a goto_tb, nor
+ updated the PC for the next instruction to be executed. */
+#define DISAS_PC_STALE DISAS_TARGET_3
+
+/* We are exiting the TB to the main loop. */
+#define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4
+
+
+/* Instruction flags */
+#define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */
+#define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */
+#define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */
+#define IF_BFP 0x0008 /* binary floating point instruction */
+#define IF_DFP 0x0010 /* decimal floating point instruction */
+#define IF_PRIV 0x0020 /* privileged instruction */
+#define IF_VEC 0x0040 /* vector instruction */
+#define IF_IO 0x0080 /* input/output instruction */
+
+struct DisasInsn {
+ unsigned opc:16;
+ unsigned flags:16;
+ DisasFormat fmt:8;
+ unsigned fac:8;
+ unsigned spec:8;
+
+ const char *name;
+
+ /* Pre-process arguments before HELP_OP. */
+ void (*help_in1)(DisasContext *, DisasOps *);
+ void (*help_in2)(DisasContext *, DisasOps *);
+ void (*help_prep)(DisasContext *, DisasOps *);
+
+ /*
+ * Post-process output after HELP_OP.
+ * Note that these are not called if HELP_OP returns DISAS_NORETURN.
+ */
+ void (*help_wout)(DisasContext *, DisasOps *);
+ void (*help_cout)(DisasContext *, DisasOps *);
+
+ /* Implement the operation itself. */
+ DisasJumpType (*help_op)(DisasContext *, DisasOps *);
+
+ uint64_t data;
+};
+
+/* ====================================================================== */
+/* Miscellaneous helpers, used by several operations. */
+
+static void help_l2_shift(DisasContext *s, DisasOps *o, int mask)
+{
+ int b2 = get_field(s, b2);
+ int d2 = get_field(s, d2);
+
+ if (b2 == 0) {
+ o->in2 = tcg_const_i64(d2 & mask);
+ } else {
+ o->in2 = get_address(s, 0, b2, d2);
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ }
+}
+
+static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
+{
+ if (dest == s->pc_tmp) {
+ per_branch(s, true);
+ return DISAS_NEXT;
+ }
+ if (use_goto_tb(s, dest)) {
+ update_cc_op(s);
+ per_breaking_event(s);
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(psw_addr, dest);
+ tcg_gen_exit_tb(s->base.tb, 0);
+ return DISAS_GOTO_TB;
+ } else {
+ tcg_gen_movi_i64(psw_addr, dest);
+ per_branch(s, false);
+ return DISAS_PC_UPDATED;
+ }
+}
+
+static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
+ bool is_imm, int imm, TCGv_i64 cdest)
+{
+ DisasJumpType ret;
+ uint64_t dest = s->base.pc_next + 2 * imm;
+ TCGLabel *lab;
+
+ /* Take care of the special cases first. */
+ if (c->cond == TCG_COND_NEVER) {
+ ret = DISAS_NEXT;
+ goto egress;
+ }
+ if (is_imm) {
+ if (dest == s->pc_tmp) {
+ /* Branch to next. */
+ per_branch(s, true);
+ ret = DISAS_NEXT;
+ goto egress;
+ }
+ if (c->cond == TCG_COND_ALWAYS) {
+ ret = help_goto_direct(s, dest);
+ goto egress;
+ }
+ } else {
+ if (!cdest) {
+ /* E.g. bcr %r0 -> no branch. */
+ ret = DISAS_NEXT;
+ goto egress;
+ }
+ if (c->cond == TCG_COND_ALWAYS) {
+ tcg_gen_mov_i64(psw_addr, cdest);
+ per_branch(s, false);
+ ret = DISAS_PC_UPDATED;
+ goto egress;
+ }
+ }
+
+ if (use_goto_tb(s, s->pc_tmp)) {
+ if (is_imm && use_goto_tb(s, dest)) {
+ /* Both exits can use goto_tb. */
+ update_cc_op(s);
+
+ lab = gen_new_label();
+ if (c->is_64) {
+ tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
+ }
+
+ /* Branch not taken. */
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(psw_addr, s->pc_tmp);
+ tcg_gen_exit_tb(s->base.tb, 0);
+
+ /* Branch taken. */
+ gen_set_label(lab);
+ per_breaking_event(s);
+ tcg_gen_goto_tb(1);
+ tcg_gen_movi_i64(psw_addr, dest);
+ tcg_gen_exit_tb(s->base.tb, 1);
+
+ ret = DISAS_GOTO_TB;
+ } else {
+ /* Fallthru can use goto_tb, but taken branch cannot. */
+ /* Store taken branch destination before the brcond. This
+ avoids having to allocate a new local temp to hold it.
+ We'll overwrite this in the not taken case anyway. */
+ if (!is_imm) {
+ tcg_gen_mov_i64(psw_addr, cdest);
+ }
+
+ lab = gen_new_label();
+ if (c->is_64) {
+ tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
+ }
+
+ /* Branch not taken. */
+ update_cc_op(s);
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(psw_addr, s->pc_tmp);
+ tcg_gen_exit_tb(s->base.tb, 0);
+
+ gen_set_label(lab);
+ if (is_imm) {
+ tcg_gen_movi_i64(psw_addr, dest);
+ }
+ per_breaking_event(s);
+ ret = DISAS_PC_UPDATED;
+ }
+ } else {
+ /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
+ Most commonly we're single-stepping or some other condition that
+ disables all use of goto_tb. Just update the PC and exit. */
+
+ TCGv_i64 next = tcg_const_i64(s->pc_tmp);
+ if (is_imm) {
+ cdest = tcg_const_i64(dest);
+ }
+
+ if (c->is_64) {
+ tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
+ cdest, next);
+ per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 z = tcg_const_i64(0);
+ tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
+ tcg_gen_extu_i32_i64(t1, t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
+ per_branch_cond(s, TCG_COND_NE, t1, z);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(z);
+ }
+
+ if (is_imm) {
+ tcg_temp_free_i64(cdest);
+ }
+ tcg_temp_free_i64(next);
+
+ ret = DISAS_PC_UPDATED;
+ }
+
+ egress:
+ free_compare(c);
+ return ret;
+}
+
+/* ====================================================================== */
+/* The operations. These perform the bulk of the work for any insn,
+ usually after the operands have been loaded and output initialized. */
+
+static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_abs_i64(o->out, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
+ tcg_gen_mov_i64(o->out2, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_add(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_movi_i64(cc_src, 0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
+ return DISAS_NEXT;
+}
+
+/* Compute carry into cc_src. */
+static void compute_carry(DisasContext *s)
+{
+ switch (s->cc_op) {
+ case CC_OP_ADDU:
+ /* The carry value is already in cc_src (1,0). */
+ break;
+ case CC_OP_SUBU:
+ tcg_gen_addi_i64(cc_src, cc_src, 1);
+ break;
+ default:
+ gen_op_calc_cc(s);
+ /* fall through */
+ case CC_OP_STATIC:
+ /* The carry flag is the msb of CC; compute into cc_src. */
+ tcg_gen_extu_i32_i64(cc_src, cc_op);
+ tcg_gen_shri_i64(cc_src, cc_src, 1);
+ break;
+ }
+}
+
+static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
+{
+ compute_carry(s);
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+ tcg_gen_add_i64(o->out, o->out, cc_src);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
+{
+ compute_carry(s);
+
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
+ tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
+ tcg_temp_free_i64(zero);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
+{
+ bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
+
+ o->in1 = tcg_temp_new_i64();
+ if (non_atomic) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic addition in memory. */
+ tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+
+ if (non_atomic) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
+{
+ bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
+
+ o->in1 = tcg_temp_new_i64();
+ if (non_atomic) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic addition in memory. */
+ tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_movi_i64(cc_src, 0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
+
+ if (non_atomic) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_and(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ uint64_t mask = ((1ull << size) - 1) << shift;
+
+ assert(!o->g_in2);
+ tcg_gen_shli_i64(o->in2, o->in2, shift);
+ tcg_gen_ori_i64(o->in2, o->in2, ~mask);
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+
+ /* Produce the CC from only the bits manipulated. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic operation in memory. */
+ tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
+{
+ pc_to_link_info(o->out, s, s->pc_tmp);
+ if (o->in2) {
+ tcg_gen_mov_i64(psw_addr, o->in2);
+ per_branch(s, false);
+ return DISAS_PC_UPDATED;
+ } else {
+ return DISAS_NEXT;
+ }
+}
+
+static void save_link_info(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t;
+
+ if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
+ pc_to_link_info(o->out, s, s->pc_tmp);
+ return;
+ }
+ gen_op_calc_cc(s);
+ tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
+ tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
+ t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, psw_mask, 16);
+ tcg_gen_andi_i64(t, t, 0x0f000000);
+ tcg_gen_or_i64(o->out, o->out, t);
+ tcg_gen_extu_i32_i64(t, cc_op);
+ tcg_gen_shli_i64(t, t, 28);
+ tcg_gen_or_i64(o->out, o->out, t);
+ tcg_temp_free_i64(t);
+}
+
+static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
+{
+ save_link_info(s, o);
+ if (o->in2) {
+ tcg_gen_mov_i64(psw_addr, o->in2);
+ per_branch(s, false);
+ return DISAS_PC_UPDATED;
+ } else {
+ return DISAS_NEXT;
+ }
+}
+
+static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
+{
+ pc_to_link_info(o->out, s, s->pc_tmp);
+ return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2));
+}
+
+static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
+{
+ int m1 = get_field(s, m1);
+ bool is_imm = have_field(s, i2);
+ int imm = is_imm ? get_field(s, i2) : 0;
+ DisasCompare c;
+
+ /* BCR with R2 = 0 causes no branching */
+ if (have_field(s, r2) && get_field(s, r2) == 0) {
+ if (m1 == 14) {
+ /* Perform serialization */
+ /* FIXME: check for fast-BCR-serialization facility */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ }
+ if (m1 == 15) {
+ /* Perform serialization */
+ /* FIXME: perform checkpoint-synchronisation */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ }
+ return DISAS_NEXT;
+ }
+
+ disas_jcc(s, &c, m1);
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ bool is_imm = have_field(s, i2);
+ int imm = is_imm ? get_field(s, i2) : 0;
+ DisasCompare c;
+ TCGv_i64 t;
+
+ c.cond = TCG_COND_NE;
+ c.is_64 = false;
+ c.g1 = false;
+ c.g2 = false;
+
+ t = tcg_temp_new_i64();
+ tcg_gen_subi_i64(t, regs[r1], 1);
+ store_reg32_i64(r1, t);
+ c.u.s32.a = tcg_temp_new_i32();
+ c.u.s32.b = tcg_const_i32(0);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_temp_free_i64(t);
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int imm = get_field(s, i2);
+ DisasCompare c;
+ TCGv_i64 t;
+
+ c.cond = TCG_COND_NE;
+ c.is_64 = false;
+ c.g1 = false;
+ c.g2 = false;
+
+ t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, regs[r1], 32);
+ tcg_gen_subi_i64(t, t, 1);
+ store_reg32h_i64(r1, t);
+ c.u.s32.a = tcg_temp_new_i32();
+ c.u.s32.b = tcg_const_i32(0);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_temp_free_i64(t);
+
+ return help_branch(s, &c, 1, imm, o->in2);
+}
+
+static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ bool is_imm = have_field(s, i2);
+ int imm = is_imm ? get_field(s, i2) : 0;
+ DisasCompare c;
+
+ c.cond = TCG_COND_NE;
+ c.is_64 = true;
+ c.g1 = true;
+ c.g2 = false;
+
+ tcg_gen_subi_i64(regs[r1], regs[r1], 1);
+ c.u.s64.a = regs[r1];
+ c.u.s64.b = tcg_const_i64(0);
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ bool is_imm = have_field(s, i2);
+ int imm = is_imm ? get_field(s, i2) : 0;
+ DisasCompare c;
+ TCGv_i64 t;
+
+ c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
+ c.is_64 = false;
+ c.g1 = false;
+ c.g2 = false;
+
+ t = tcg_temp_new_i64();
+ tcg_gen_add_i64(t, regs[r1], regs[r3]);
+ c.u.s32.a = tcg_temp_new_i32();
+ c.u.s32.b = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
+ store_reg32_i64(r1, t);
+ tcg_temp_free_i64(t);
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ bool is_imm = have_field(s, i2);
+ int imm = is_imm ? get_field(s, i2) : 0;
+ DisasCompare c;
+
+ c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
+ c.is_64 = true;
+
+ if (r1 == (r3 | 1)) {
+ c.u.s64.b = load_reg(r3 | 1);
+ c.g2 = false;
+ } else {
+ c.u.s64.b = regs[r3 | 1];
+ c.g2 = true;
+ }
+
+ tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
+ c.u.s64.a = regs[r1];
+ c.g1 = true;
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
+{
+ int imm, m3 = get_field(s, m3);
+ bool is_imm;
+ DisasCompare c;
+
+ c.cond = ltgt_cond[m3];
+ if (s->insn->data) {
+ c.cond = tcg_unsigned_cond(c.cond);
+ }
+ c.is_64 = c.g1 = c.g2 = true;
+ c.u.s64.a = o->in1;
+ c.u.s64.b = o->in2;
+
+ is_imm = have_field(s, i4);
+ if (is_imm) {
+ imm = get_field(s, i4);
+ } else {
+ imm = 0;
+ o->out = get_address(s, 0, get_field(s, b4),
+ get_field(s, d4));
+ }
+
+ return help_branch(s, &c, is_imm, imm, o->out);
+}
+
+static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
+ bool m4_with_fpe)
+{
+ const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
+ uint8_t m3 = get_field(s, m3);
+ uint8_t m4 = get_field(s, m4);
+
+ /* m3 field was introduced with FPE */
+ if (!fpe && m3_with_fpe) {
+ m3 = 0;
+ }
+ /* m4 field was introduced with FPE */
+ if (!fpe && m4_with_fpe) {
+ m4 = 0;
+ }
+
+ /* Check for valid rounding modes. Mode 3 was introduced later. */
+ if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return NULL;
+ }
+
+ return tcg_const_i32(deposit32(m3, 4, 4, m4));
+}
+
+static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cegb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_celgb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
+{
+ int r2 = get_field(s, r2);
+ TCGv_i64 len = tcg_temp_new_i64();
+
+ gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
+ set_cc_static(s);
+ return_low128(o->out);
+
+ tcg_gen_add_i64(regs[r2], regs[r2], len);
+ tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
+ tcg_temp_free_i64(len);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
+{
+ int l = get_field(s, l1);
+ TCGv_i32 vl;
+
+ switch (l + 1) {
+ case 1:
+ tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
+ break;
+ case 2:
+ tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
+ break;
+ case 4:
+ tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
+ break;
+ case 8:
+ tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
+ break;
+ default:
+ vl = tcg_const_i32(l);
+ gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
+ tcg_temp_free_i32(vl);
+ set_cc_static(s);
+ return DISAS_NEXT;
+ }
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r2 = get_field(s, r2);
+ TCGv_i32 t1, t2;
+
+ /* r1 and r2 must be even. */
+ if (r1 & 1 || r2 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t2 = tcg_const_i32(r2);
+ gen_helper_clcl(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, o->in1);
+ gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
+ set_cc_static(s);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(m3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
+{
+ gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+ set_cc_static(s);
+ return_low128(o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
+ tcg_gen_or_i64(o->out, o->out, t);
+ tcg_temp_free_i64(t);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
+{
+ int d2 = get_field(s, d2);
+ int b2 = get_field(s, b2);
+ TCGv_i64 addr, cc;
+
+ /* Note that in1 = R3 (new value) and
+ in2 = (zero-extended) R1 (expected value). */
+
+ addr = get_address(s, 0, b2, d2);
+ tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
+ get_mem_index(s), s->insn->data | MO_ALIGN);
+ tcg_temp_free_i64(addr);
+
+ /* Are the memory and expected values (un)equal? Note that this setcond
+ produces the output CC value, thus the NE sense of the test. */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(cc);
+ set_cc_static(s);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ int d2 = get_field(s, d2);
+ int b2 = get_field(s, b2);
+ DisasJumpType ret = DISAS_NEXT;
+ TCGv_i64 addr;
+ TCGv_i32 t_r1, t_r3;
+
+ /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
+ addr = get_address(s, 0, b2, d2);
+ t_r1 = tcg_const_i32(r1);
+ t_r3 = tcg_const_i32(r3);
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
+ } else if (HAVE_CMPXCHG128) {
+ gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
+ } else {
+ gen_helper_exit_atomic(cpu_env);
+ ret = DISAS_NORETURN;
+ }
+ tcg_temp_free_i64(addr);
+ tcg_temp_free_i32(t_r1);
+ tcg_temp_free_i32(t_r3);
+
+ set_cc_static(s);
+ return ret;
+}
+
+static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s, r3);
+ TCGv_i32 t_r3 = tcg_const_i32(r3);
+
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
+ } else {
+ gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
+ }
+ tcg_temp_free_i32(t_r3);
+
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
+{
+ MemOp mop = s->insn->data;
+ TCGv_i64 addr, old, cc;
+ TCGLabel *lab = gen_new_label();
+
+ /* Note that in1 = R1 (zero-extended expected value),
+ out = R1 (original reg), out2 = R1+1 (new value). */
+
+ addr = tcg_temp_new_i64();
+ old = tcg_temp_new_i64();
+ tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
+ tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
+ get_mem_index(s), mop | MO_ALIGN);
+ tcg_temp_free_i64(addr);
+
+ /* Are the memory and expected values (un)equal? */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
+
+ /* Write back the output now, so that it happens before the
+ following branch, so that we don't need local temps. */
+ if ((mop & MO_SIZE) == MO_32) {
+ tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
+ } else {
+ tcg_gen_mov_i64(o->out, old);
+ }
+ tcg_temp_free_i64(old);
+
+ /* If the comparison was equal, and the LSB of R2 was set,
+ then we need to flush the TLB (for all cpus). */
+ tcg_gen_xori_i64(cc, cc, 1);
+ tcg_gen_and_i64(cc, cc, o->in2);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
+ tcg_temp_free_i64(cc);
+
+ gen_helper_purge(cpu_env);
+ gen_set_label(lab);
+
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, o->in1);
+ gen_helper_cvd(t1, t2);
+ tcg_temp_free_i32(t2);
+ tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
+ tcg_temp_free_i64(t1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s, m3);
+ TCGLabel *lab = gen_new_label();
+ TCGCond c;
+
+ c = tcg_invert_cond(ltgt_cond[m3]);
+ if (s->insn->data) {
+ c = tcg_unsigned_cond(c);
+ }
+ tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
+
+ /* Trap. */
+ gen_trap(s);
+
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s, m3);
+ int r1 = get_field(s, r1);
+ int r2 = get_field(s, r2);
+ TCGv_i32 tr1, tr2, chk;
+
+ /* R1 and R2 must both be even. */
+ if ((r1 | r2) & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
+ m3 = 0;
+ }
+
+ tr1 = tcg_const_i32(r1);
+ tr2 = tcg_const_i32(r2);
+ chk = tcg_const_i32(m3);
+
+ switch (s->insn->data) {
+ case 12:
+ gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
+ break;
+ case 14:
+ gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
+ break;
+ case 21:
+ gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
+ break;
+ case 24:
+ gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
+ break;
+ case 41:
+ gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
+ break;
+ case 42:
+ gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_temp_free_i32(tr1);
+ tcg_temp_free_i32(tr2);
+ tcg_temp_free_i32(chk);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
+
+ gen_helper_diag(cpu_env, r1, r3, func_code);
+
+ tcg_temp_free_i32(func_code);
+ tcg_temp_free_i32(r3);
+ tcg_temp_free_i32(r1);
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
+ return_low128(o->out);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
+ return_low128(o->out);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
+ return_low128(o->out);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
+ return_low128(o->out);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
+{
+ int r2 = get_field(s, r2);
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
+{
+ /* No cache information provided. */
+ tcg_gen_movi_i64(o->out, -1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r2 = get_field(s, r2);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ /* Note the "subsequently" in the PoO, which implies a defined result
+ if r1 == r2. Thus we cannot defer these writes to an output hook. */
+ tcg_gen_shri_i64(t, psw_mask, 32);
+ store_reg32_i64(r1, t);
+ if (r2 != 0) {
+ store_reg32_i64(r2, psw_mask);
+ }
+
+ tcg_temp_free_i64(t);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ TCGv_i32 ilen;
+ TCGv_i64 v1;
+
+ /* Nested EXECUTE is not allowed. */
+ if (unlikely(s->ex_value)) {
+ gen_program_exception(s, PGM_EXECUTE);
+ return DISAS_NORETURN;
+ }
+
+ update_psw_addr(s);
+ update_cc_op(s);
+
+ if (r1 == 0) {
+ v1 = tcg_const_i64(0);
+ } else {
+ v1 = regs[r1];
+ }
+
+ ilen = tcg_const_i32(s->ilen);
+ gen_helper_ex(cpu_env, ilen, v1, o->in2);
+ tcg_temp_free_i32(ilen);
+
+ if (r1 == 0) {
+ tcg_temp_free_i64(v1);
+ }
+
+ return DISAS_PC_CC_UPDATED;
+}
+
+static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_fieb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_fidb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
+ return_low128(o->out2);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
+{
+ /* We'll use the original input for cc computation, since we get to
+ compare that against 0, which ought to be better than comparing
+ the real output against 64. It also lets cc_dst be a convenient
+ temporary during our computation. */
+ gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
+
+ /* R1 = IN ? CLZ(IN) : 64. */
+ tcg_gen_clzi_i64(o->out, o->in2, 64);
+
+ /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
+ value by 64, which is undefined. But since the shift is 64 iff the
+ input is zero, we still get the correct result after and'ing. */
+ tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
+ tcg_gen_shr_i64(o->out2, o->out2, o->out);
+ tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s, m3);
+ int pos, len, base = s->insn->data;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ uint64_t ccm;
+
+ switch (m3) {
+ case 0xf:
+ /* Effectively a 32-bit load. */
+ tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
+ len = 32;
+ goto one_insert;
+
+ case 0xc:
+ case 0x6:
+ case 0x3:
+ /* Effectively a 16-bit load. */
+ tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
+ len = 16;
+ goto one_insert;
+
+ case 0x8:
+ case 0x4:
+ case 0x2:
+ case 0x1:
+ /* Effectively an 8-bit load. */
+ tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
+ len = 8;
+ goto one_insert;
+
+ one_insert:
+ pos = base + ctz32(m3) * 8;
+ tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
+ ccm = ((1ull << len) - 1) << pos;
+ break;
+
+ default:
+ /* This is going to be a sequence of loads and inserts. */
+ pos = base + 32 - 8;
+ ccm = 0;
+ while (m3) {
+ if (m3 & 0x8) {
+ tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 1);
+ tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
+ ccm |= 0xff << pos;
+ }
+ m3 = (m3 << 1) & 0xf;
+ pos -= 8;
+ }
+ break;
+ }
+
+ tcg_gen_movi_i64(tmp, ccm);
+ gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1, t2;
+
+ gen_op_calc_cc(s);
+ t1 = tcg_temp_new_i64();
+ tcg_gen_extract_i64(t1, psw_mask, 40, 4);
+ t2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t2, cc_op);
+ tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
+ tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m4;
+
+ if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
+ m4 = tcg_const_i32(get_field(s, m4));
+ } else {
+ m4 = tcg_const_i32(0);
+ }
+ gen_helper_idte(cpu_env, o->in1, o->in2, m4);
+ tcg_temp_free_i32(m4);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m4;
+
+ if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
+ m4 = tcg_const_i32(get_field(s, m4));
+ } else {
+ m4 = tcg_const_i32(0);
+ }
+ gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
+ tcg_temp_free_i32(m4);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
+{
+ gen_helper_iske(o->out, cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
+{
+ int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
+ int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
+ int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
+ TCGv_i32 t_r1, t_r2, t_r3, type;
+
+ switch (s->insn->data) {
+ case S390_FEAT_TYPE_KMA:
+ if (r3 == r1 || r3 == r2) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ /* FALL THROUGH */
+ case S390_FEAT_TYPE_KMCTR:
+ if (r3 & 1 || !r3) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ /* FALL THROUGH */
+ case S390_FEAT_TYPE_PPNO:
+ case S390_FEAT_TYPE_KMF:
+ case S390_FEAT_TYPE_KMC:
+ case S390_FEAT_TYPE_KMO:
+ case S390_FEAT_TYPE_KM:
+ if (r1 & 1 || !r1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ /* FALL THROUGH */
+ case S390_FEAT_TYPE_KMAC:
+ case S390_FEAT_TYPE_KIMD:
+ case S390_FEAT_TYPE_KLMD:
+ if (r2 & 1 || !r2) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ /* FALL THROUGH */
+ case S390_FEAT_TYPE_PCKMO:
+ case S390_FEAT_TYPE_PCC:
+ break;
+ default:
+ g_assert_not_reached();
+ };
+
+ t_r1 = tcg_const_i32(r1);
+ t_r2 = tcg_const_i32(r2);
+ t_r3 = tcg_const_i32(r3);
+ type = tcg_const_i32(s->insn->data);
+ gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
+ set_cc_static(s);
+ tcg_temp_free_i32(t_r1);
+ tcg_temp_free_i32(t_r2);
+ tcg_temp_free_i32(t_r3);
+ tcg_temp_free_i32(type);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
+{
+ /* The real output is indeed the original value in memory;
+ recompute the addition for the computation of CC. */
+ tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
+ s->insn->data | MO_ALIGN);
+ /* However, we need to recompute the addition for setting CC. */
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
+{
+ /* The real output is indeed the original value in memory;
+ recompute the addition for the computation of CC. */
+ tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
+ s->insn->data | MO_ALIGN);
+ /* However, we need to recompute the operation for setting CC. */
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
+{
+ /* The real output is indeed the original value in memory;
+ recompute the addition for the computation of CC. */
+ tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
+ s->insn->data | MO_ALIGN);
+ /* However, we need to recompute the operation for setting CC. */
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
+{
+ /* The real output is indeed the original value in memory;
+ recompute the addition for the computation of CC. */
+ tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
+ s->insn->data | MO_ALIGN);
+ /* However, we need to recompute the operation for setting CC. */
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ldeb(o->out, cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_ledb(o->out, cpu_env, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
+
+ if (!m34) {
+ return DISAS_NORETURN;
+ }
+ gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
+ tcg_temp_free_i32(m34);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lxdb(o->out, cpu_env, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lxeb(o->out, cpu_env, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shli_i64(o->out, o->in2, 32);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ store_reg32_i64(get_field(s, r1), o->in2);
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ store_reg32h_i64(get_field(s, r1), o->in2);
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
+{
+ DisasCompare c;
+
+ disas_jcc(s, &c, get_field(s, m3));
+
+ if (c.is_64) {
+ tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
+ o->in2, o->in1);
+ free_compare(&c);
+ } else {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ TCGv_i64 t, z;
+
+ tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
+ free_compare(&c);
+
+ t = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t, t32);
+ tcg_temp_free_i32(t32);
+
+ z = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(z);
+ }
+
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_lctl(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
+ return DISAS_PC_STALE_NOCHAIN;
+}
+
+static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_lctlg(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
+ return DISAS_PC_STALE_NOCHAIN;
+}
+
+static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lra(o->out, cpu_env, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1, t2;
+
+ per_breaking_event(s);
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
+ MO_TEUL | MO_ALIGN_8);
+ tcg_gen_addi_i64(o->in2, o->in2, 4);
+ tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
+ /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
+ tcg_gen_shli_i64(t1, t1, 32);
+ gen_helper_load_psw(cpu_env, t1, t2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return DISAS_NORETURN;
+}
+
+static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1, t2;
+
+ per_breaking_event(s);
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
+ MO_TEQ | MO_ALIGN_8);
+ tcg_gen_addi_i64(o->in2, o->in2, 8);
+ tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
+ gen_helper_load_psw(cpu_env, t1, t2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return DISAS_NORETURN;
+}
+#endif
+
+static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_lam(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i64 t1, t2;
+
+ /* Only one register to read. */
+ t1 = tcg_temp_new_i64();
+ if (unlikely(r1 == r3)) {
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32_i64(r1, t1);
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+ }
+
+ /* First load the values of the first and last registers to trigger
+ possible page faults. */
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
+ tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
+ store_reg32_i64(r1, t1);
+ store_reg32_i64(r3, t2);
+
+ /* Only two registers to read. */
+ if (((r1 + 1) & 15) == r3) {
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+ }
+
+ /* Then load the remaining registers. Page fault can't occur. */
+ r3 = (r3 - 1) & 15;
+ tcg_gen_movi_i64(t2, 4);
+ while (r1 != r3) {
+ r1 = (r1 + 1) & 15;
+ tcg_gen_add_i64(o->in2, o->in2, t2);
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32_i64(r1, t1);
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i64 t1, t2;
+
+ /* Only one register to read. */
+ t1 = tcg_temp_new_i64();
+ if (unlikely(r1 == r3)) {
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32h_i64(r1, t1);
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+ }
+
+ /* First load the values of the first and last registers to trigger
+ possible page faults. */
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
+ tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
+ store_reg32h_i64(r1, t1);
+ store_reg32h_i64(r3, t2);
+
+ /* Only two registers to read. */
+ if (((r1 + 1) & 15) == r3) {
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+ }
+
+ /* Then load the remaining registers. Page fault can't occur. */
+ r3 = (r3 - 1) & 15;
+ tcg_gen_movi_i64(t2, 4);
+ while (r1 != r3) {
+ r1 = (r1 + 1) & 15;
+ tcg_gen_add_i64(o->in2, o->in2, t2);
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32h_i64(r1, t1);
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i64 t1, t2;
+
+ /* Only one register to read. */
+ if (unlikely(r1 == r3)) {
+ tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+ }
+
+ /* First load the values of the first and last registers to trigger
+ possible page faults. */
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
+ tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
+ tcg_gen_mov_i64(regs[r1], t1);
+ tcg_temp_free(t2);
+
+ /* Only two registers to read. */
+ if (((r1 + 1) & 15) == r3) {
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+ }
+
+ /* Then load the remaining registers. Page fault can't occur. */
+ r3 = (r3 - 1) & 15;
+ tcg_gen_movi_i64(t1, 8);
+ while (r1 != r3) {
+ r1 = (r1 + 1) & 15;
+ tcg_gen_add_i64(o->in2, o->in2, t1);
+ tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
+ }
+ tcg_temp_free(t1);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 a1, a2;
+ MemOp mop = s->insn->data;
+
+ /* In a parallel context, stop the world and single step. */
+ if (tb_cflags(s->base.tb) & CF_PARALLEL) {
+ update_psw_addr(s);
+ update_cc_op(s);
+ gen_exception(EXCP_ATOMIC);
+ return DISAS_NORETURN;
+ }
+
+ /* In a serial context, perform the two loads ... */
+ a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
+ a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
+ tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
+ tcg_temp_free_i64(a1);
+ tcg_temp_free_i64(a2);
+
+ /* ... and indicate that we performed them while interlocked. */
+ gen_op_movi_cc(s, 0);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
+{
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_helper_lpq(o->out, cpu_env, o->in2);
+ } else if (HAVE_ATOMIC128) {
+ gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
+ } else {
+ gen_helper_exit_atomic(cpu_env);
+ return DISAS_NORETURN;
+ }
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, -256);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
+{
+ const int64_t block_size = (1ull << (get_field(s, m3) + 6));
+
+ if (get_field(s, m3) > 6) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
+ tcg_gen_neg_i64(o->addr1, o->addr1);
+ tcg_gen_movi_i64(o->out, 16);
+ tcg_gen_umin_i64(o->out, o->out, o->addr1);
+ gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
+{
+#if !defined(CONFIG_USER_ONLY)
+ TCGv_i32 i2;
+#endif
+ const uint16_t monitor_class = get_field(s, i2);
+
+ if (monitor_class & 0xff00) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ i2 = tcg_const_i32(monitor_class);
+ gen_helper_monitor_call(cpu_env, o->addr1, i2);
+ tcg_temp_free_i32(i2);
+#endif
+ /* Defaults to a NOP. */
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
+{
+ o->out = o->in2;
+ o->g_out = o->g_in2;
+ o->in2 = NULL;
+ o->g_in2 = false;
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
+{
+ int b2 = get_field(s, b2);
+ TCGv ar1 = tcg_temp_new_i64();
+
+ o->out = o->in2;
+ o->g_out = o->g_in2;
+ o->in2 = NULL;
+ o->g_in2 = false;
+
+ switch (s->base.tb->flags & FLAG_MASK_ASC) {
+ case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
+ tcg_gen_movi_i64(ar1, 0);
+ break;
+ case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
+ tcg_gen_movi_i64(ar1, 1);
+ break;
+ case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
+ if (b2) {
+ tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
+ } else {
+ tcg_gen_movi_i64(ar1, 0);
+ }
+ break;
+ case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
+ tcg_gen_movi_i64(ar1, 2);
+ break;
+ }
+
+ tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
+ tcg_temp_free_i64(ar1);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
+{
+ o->out = o->in1;
+ o->out2 = o->in2;
+ o->g_out = o->g_in1;
+ o->g_out2 = o->g_in2;
+ o->in1 = NULL;
+ o->in2 = NULL;
+ o->g_in1 = o->g_in2 = false;
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r2 = get_field(s, r2);
+ TCGv_i32 t1, t2;
+
+ /* r1 and r2 must be even. */
+ if (r1 & 1 || r2 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t2 = tcg_const_i32(r2);
+ gen_helper_mvcl(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i32 t1, t3;
+
+ /* r1 and r3 must be even. */
+ if (r1 & 1 || r3 & 1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t1 = tcg_const_i32(r1);
+ t3 = tcg_const_i32(r3);
+ gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t3);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s, r3);
+ gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, l1);
+ gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, l1);
+ gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_mvst(cc_op, cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_mul_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
+ gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
+ tcg_temp_free_i64(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 r3 = load_freg(get_field(s, r3));
+ gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
+ tcg_temp_free_i64(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
+ gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
+ tcg_temp_free_i64(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 r3 = load_freg(get_field(s, r3));
+ gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
+ tcg_temp_free_i64(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 z, n;
+ z = tcg_const_i64(0);
+ n = tcg_temp_new_i64();
+ tcg_gen_neg_i64(n, o->in2);
+ tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
+ tcg_temp_free_i64(n);
+ tcg_temp_free_i64(z);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
+ tcg_gen_mov_i64(o->out2, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_neg_i64(o->out, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
+ tcg_gen_mov_i64(o->out2, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_or(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ uint64_t mask = ((1ull << size) - 1) << shift;
+
+ assert(!o->g_in2);
+ tcg_gen_shli_i64(o->in2, o->in2, shift);
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+
+ /* Produce the CC from only the bits manipulated. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic operation in memory. */
+ tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_pack(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
+{
+ int l2 = get_field(s, l2) + 1;
+ TCGv_i32 l;
+
+ /* The length must not exceed 32 bytes. */
+ if (l2 > 32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ l = tcg_const_i32(l2);
+ gen_helper_pka(cpu_env, o->addr1, o->in2, l);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
+{
+ int l2 = get_field(s, l2) + 1;
+ TCGv_i32 l;
+
+ /* The length must be even and should not exceed 64 bytes. */
+ if ((l2 & 1) || (l2 > 64)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ l = tcg_const_i32(l2);
+ gen_helper_pku(cpu_env, o->addr1, o->in2, l);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
+{
+ gen_helper_popcnt(o->out, o->in2);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ptlb(cpu_env);
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
+{
+ int i3 = get_field(s, i3);
+ int i4 = get_field(s, i4);
+ int i5 = get_field(s, i5);
+ int do_zero = i4 & 0x80;
+ uint64_t mask, imask, pmask;
+ int pos, len, rot;
+
+ /* Adjust the arguments for the specific insn. */
+ switch (s->fields.op2) {
+ case 0x55: /* risbg */
+ case 0x59: /* risbgn */
+ i3 &= 63;
+ i4 &= 63;
+ pmask = ~0;
+ break;
+ case 0x5d: /* risbhg */
+ i3 &= 31;
+ i4 &= 31;
+ pmask = 0xffffffff00000000ull;
+ break;
+ case 0x51: /* risblg */
+ i3 = (i3 & 31) + 32;
+ i4 = (i4 & 31) + 32;
+ pmask = 0x00000000ffffffffull;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* MASK is the set of bits to be inserted from R2. */
+ if (i3 <= i4) {
+ /* [0...i3---i4...63] */
+ mask = (-1ull >> i3) & (-1ull << (63 - i4));
+ } else {
+ /* [0---i4...i3---63] */
+ mask = (-1ull >> i3) | (-1ull << (63 - i4));
+ }
+ /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
+ mask &= pmask;
+
+ /* IMASK is the set of bits to be kept from R1. In the case of the high/low
+ insns, we need to keep the other half of the register. */
+ imask = ~mask | ~pmask;
+ if (do_zero) {
+ imask = ~pmask;
+ }
+
+ len = i4 - i3 + 1;
+ pos = 63 - i4;
+ rot = i5 & 63;
+
+ /* In some cases we can implement this with extract. */
+ if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
+ tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
+ return DISAS_NEXT;
+ }
+
+ /* In some cases we can implement this with deposit. */
+ if (len > 0 && (imask == 0 || ~mask == imask)) {
+ /* Note that we rotate the bits to be inserted to the lsb, not to
+ the position as described in the PoO. */
+ rot = (rot - pos) & 63;
+ } else {
+ pos = -1;
+ }
+
+ /* Rotate the input as necessary. */
+ tcg_gen_rotli_i64(o->in2, o->in2, rot);
+
+ /* Insert the selected bits into the output. */
+ if (pos >= 0) {
+ if (imask == 0) {
+ tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
+ } else {
+ tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
+ }
+ } else if (imask == 0) {
+ tcg_gen_andi_i64(o->out, o->in2, mask);
+ } else {
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ tcg_gen_andi_i64(o->out, o->out, imask);
+ tcg_gen_or_i64(o->out, o->out, o->in2);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
+{
+ int i3 = get_field(s, i3);
+ int i4 = get_field(s, i4);
+ int i5 = get_field(s, i5);
+ uint64_t mask;
+
+ /* If this is a test-only form, arrange to discard the result. */
+ if (i3 & 0x80) {
+ o->out = tcg_temp_new_i64();
+ o->g_out = false;
+ }
+
+ i3 &= 63;
+ i4 &= 63;
+ i5 &= 63;
+
+ /* MASK is the set of bits to be operated on from R2.
+ Take care for I3/I4 wraparound. */
+ mask = ~0ull >> i3;
+ if (i3 <= i4) {
+ mask ^= ~0ull >> i4 >> 1;
+ } else {
+ mask |= ~(~0ull >> i4 >> 1);
+ }
+
+ /* Rotate the input as necessary. */
+ tcg_gen_rotli_i64(o->in2, o->in2, i5);
+
+ /* Operate. */
+ switch (s->fields.op2) {
+ case 0x54: /* AND */
+ tcg_gen_ori_i64(o->in2, o->in2, ~mask);
+ tcg_gen_and_i64(o->out, o->out, o->in2);
+ break;
+ case 0x56: /* OR */
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ tcg_gen_or_i64(o->out, o->out, o->in2);
+ break;
+ case 0x57: /* XOR */
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ tcg_gen_xor_i64(o->out, o->out, o->in2);
+ break;
+ default:
+ abort();
+ }
+
+ /* Set the CC. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_bswap64_i64(o->out, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 to = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, o->in1);
+ tcg_gen_extrl_i64_i32(t2, o->in2);
+ tcg_gen_rotl_i32(to, t1, t2);
+ tcg_gen_extu_i32_i64(o->out, to);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(to);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_rotl_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
+{
+ gen_helper_rrbe(cc_op, cpu_env, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sacf(cpu_env, o->in2);
+ /* Addressing mode has changed, so end the block. */
+ return DISAS_PC_STALE;
+}
+#endif
+
+static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
+{
+ int sam = s->insn->data;
+ TCGv_i64 tsam;
+ uint64_t mask;
+
+ switch (sam) {
+ case 0:
+ mask = 0xffffff;
+ break;
+ case 1:
+ mask = 0x7fffffff;
+ break;
+ default:
+ mask = -1;
+ break;
+ }
+
+ /* Bizarre but true, we check the address of the current insn for the
+ specification exception, not the next to be executed. Thus the PoO
+ documents that Bad Things Happen two bytes before the end. */
+ if (s->base.pc_next & ~mask) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ s->pc_tmp &= mask;
+
+ tsam = tcg_const_i64(sam);
+ tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
+ tcg_temp_free_i64(tsam);
+
+ /* Always exit the TB, since we (may have) changed execution mode. */
+ return DISAS_PC_STALE;
+}
+
+static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sqeb(o->out, cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sqdb(o->out, cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
+ return_low128(o->out2);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
+{
+ gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
+ set_cc_static(s);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
+{
+ DisasCompare c;
+ TCGv_i64 a, h;
+ TCGLabel *lab;
+ int r1;
+
+ disas_jcc(s, &c, get_field(s, m3));
+
+ /* We want to store when the condition is fulfilled, so branch
+ out when it's not */
+ c.cond = tcg_invert_cond(c.cond);
+
+ lab = gen_new_label();
+ if (c.is_64) {
+ tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
+ }
+ free_compare(&c);
+
+ r1 = get_field(s, r1);
+ a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
+ switch (s->insn->data) {
+ case 1: /* STOCG */
+ tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
+ break;
+ case 0: /* STOC */
+ tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
+ break;
+ case 2: /* STOCFH */
+ h = tcg_temp_new_i64();
+ tcg_gen_shri_i64(h, regs[r1], 32);
+ tcg_gen_qemu_st32(h, a, get_mem_index(s));
+ tcg_temp_free_i64(h);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(a);
+
+ gen_set_label(lab);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
+{
+ uint64_t sign = 1ull << s->insn->data;
+ enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
+ gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
+ tcg_gen_shl_i64(o->out, o->in1, o->in2);
+ /* The arithmetic left shift is curious in that it does not affect
+ the sign bit. Copy that over from the source unchanged. */
+ tcg_gen_andi_i64(o->out, o->out, ~sign);
+ tcg_gen_andi_i64(o->in1, o->in1, sign);
+ tcg_gen_or_i64(o->out, o->out, o->in1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shl_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_sar_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shr_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sfpc(cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sfas(cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
+{
+ /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
+ tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
+ gen_helper_srnm(cpu_env, o->addr1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
+{
+ /* Bits 0-55 are are ignored. */
+ tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
+ gen_helper_srnm(cpu_env, o->addr1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* Bits other than 61-63 are ignored. */
+ tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
+
+ /* No need to call a helper, we don't implement dfp */
+ tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
+ tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
+
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_extrl_i64_i32(cc_op, o->in1);
+ tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
+ set_cc_static(s);
+
+ tcg_gen_shri_i64(o->in1, o->in1, 24);
+ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
+{
+ int b1 = get_field(s, b1);
+ int d1 = get_field(s, d1);
+ int b2 = get_field(s, b2);
+ int d2 = get_field(s, d2);
+ int r3 = get_field(s, r3);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* fetch all operands first */
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_addi_i64(o->in1, regs[b1], d1);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_addi_i64(o->in2, regs[b2], d2);
+ o->addr1 = tcg_temp_new_i64();
+ gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
+
+ /* load the third operand into r3 before modifying anything */
+ tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
+
+ /* subtract CPU timer from first operand and store in GR0 */
+ gen_helper_stpt(tmp, cpu_env);
+ tcg_gen_sub_i64(regs[0], o->in1, tmp);
+
+ /* store second operand in GR1 */
+ tcg_gen_mov_i64(regs[1], o->in2);
+
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shri_i64(o->in2, o->in2, 4);
+ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sske(cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
+ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
+ return DISAS_PC_STALE_NOCHAIN;
+}
+
+static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stck(o->out, cpu_env);
+ /* ??? We don't implement clock states. */
+ gen_op_movi_cc(s, 0);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 c1 = tcg_temp_new_i64();
+ TCGv_i64 c2 = tcg_temp_new_i64();
+ TCGv_i64 todpr = tcg_temp_new_i64();
+ gen_helper_stck(c1, cpu_env);
+ /* 16 bit value store in an uint32_t (only valid bits set) */
+ tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
+ /* Shift the 64-bit value into its place as a zero-extended
+ 104-bit value. Note that "bit positions 64-103 are always
+ non-zero so that they compare differently to STCK"; we set
+ the least significant bit to 1. */
+ tcg_gen_shli_i64(c2, c1, 56);
+ tcg_gen_shri_i64(c1, c1, 8);
+ tcg_gen_ori_i64(c2, c2, 0x10000);
+ tcg_gen_or_i64(c2, c2, todpr);
+ tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 8);
+ tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
+ tcg_temp_free_i64(c1);
+ tcg_temp_free_i64(c2);
+ tcg_temp_free_i64(todpr);
+ /* ??? We don't implement clock states. */
+ gen_op_movi_cc(s, 0);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ gen_helper_sck(cc_op, cpu_env, o->in1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sckc(cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sckpf(cpu_env, regs[0]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stckc(o->out, cpu_env);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_stctg(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_stctl(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
+{
+ gen_helper_spt(cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stfl(cpu_env);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stpt(o->out, cpu_env);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
+{
+ gen_helper_spx(cpu_env, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_xsch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_csch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_hsch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_msch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
+{
+ gen_helper_rchp(cpu_env, regs[1]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_rsch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sal(cpu_env, regs[1]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
+{
+ gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
+{
+ /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
+ gen_op_movi_cc(s, 3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
+{
+ /* The instruction is suppressed if not provided. */
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ssch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stsch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stcrw(cpu_env, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tpi(cc_op, cpu_env, o->addr1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tsch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
+{
+ gen_helper_chsc(cpu_env, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
+ tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
+{
+ uint64_t i2 = get_field(s, i2);
+ TCGv_i64 t;
+
+ /* It is important to do what the instruction name says: STORE THEN.
+ If we let the output hook perform the store then if we fault and
+ restart, we'll have the wrong SYSTEM MASK in place. */
+ t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, psw_mask, 56);
+ tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
+ tcg_temp_free_i64(t);
+
+ if (s->fields.op == 0xac) {
+ tcg_gen_andi_i64(psw_mask, psw_mask,
+ (i2 << 56) | 0x00ffffffffffffffull);
+ } else {
+ tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
+ }
+
+ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
+ return DISAS_PC_STALE_NOCHAIN;
+}
+
+static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
+
+ if (s->base.tb->flags & FLAG_MASK_PER) {
+ update_psw_addr(s);
+ gen_helper_per_store_real(cpu_env);
+ }
+ return DISAS_NEXT;
+}
+#endif
+
+static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stfle(cc_op, cpu_env, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ gen_helper_stam(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s, m3);
+ int pos, base = s->insn->data;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ pos = base + ctz32(m3) * 8;
+ switch (m3) {
+ case 0xf:
+ /* Effectively a 32-bit store. */
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
+ break;
+
+ case 0xc:
+ case 0x6:
+ case 0x3:
+ /* Effectively a 16-bit store. */
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
+ break;
+
+ case 0x8:
+ case 0x4:
+ case 0x2:
+ case 0x1:
+ /* Effectively an 8-bit store. */
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
+ break;
+
+ default:
+ /* This is going to be a sequence of shifts and stores. */
+ pos = base + 32 - 8;
+ while (m3) {
+ if (m3 & 0x8) {
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 1);
+ }
+ m3 = (m3 << 1) & 0xf;
+ pos -= 8;
+ }
+ break;
+ }
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ int size = s->insn->data;
+ TCGv_i64 tsize = tcg_const_i64(size);
+
+ while (1) {
+ if (size == 8) {
+ tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
+ }
+ if (r1 == r3) {
+ break;
+ }
+ tcg_gen_add_i64(o->in2, o->in2, tsize);
+ r1 = (r1 + 1) & 15;
+ }
+
+ tcg_temp_free_i64(tsize);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ int r3 = get_field(s, r3);
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_const_i64(4);
+ TCGv_i64 t32 = tcg_const_i64(32);
+
+ while (1) {
+ tcg_gen_shl_i64(t, regs[r1], t32);
+ tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
+ if (r1 == r3) {
+ break;
+ }
+ tcg_gen_add_i64(o->in2, o->in2, t4);
+ r1 = (r1 + 1) & 15;
+ }
+
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(t4);
+ tcg_temp_free_i64(t32);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
+{
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
+ gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
+ } else if (HAVE_ATOMIC128) {
+ gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
+ } else {
+ gen_helper_exit_atomic(cpu_env);
+ return DISAS_NORETURN;
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_srst(cpu_env, r1, r2);
+
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_srstu(cpu_env, r1, r2);
+
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_sub_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_movi_i64(cc_src, 0);
+ tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
+ return DISAS_NEXT;
+}
+
+/* Compute borrow (0, -1) into cc_src. */
+static void compute_borrow(DisasContext *s)
+{
+ switch (s->cc_op) {
+ case CC_OP_SUBU:
+ /* The borrow value is already in cc_src (0,-1). */
+ break;
+ default:
+ gen_op_calc_cc(s);
+ /* fall through */
+ case CC_OP_STATIC:
+ /* The carry flag is the msb of CC; compute into cc_src. */
+ tcg_gen_extu_i32_i64(cc_src, cc_op);
+ tcg_gen_shri_i64(cc_src, cc_src, 1);
+ /* fall through */
+ case CC_OP_ADDU:
+ /* Convert carry (1,0) to borrow (0,-1). */
+ tcg_gen_subi_i64(cc_src, cc_src, 1);
+ break;
+ }
+}
+
+static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
+{
+ compute_borrow(s);
+
+ /* Borrow is {0, -1}, so add to subtract. */
+ tcg_gen_add_i64(o->out, o->in1, cc_src);
+ tcg_gen_sub_i64(o->out, o->out, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
+{
+ compute_borrow(s);
+
+ /*
+ * Borrow is {0, -1}, so add to subtract; replicate the
+ * borrow input to produce 128-bit -1 for the addition.
+ */
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
+ tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
+ tcg_temp_free_i64(zero);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t;
+
+ update_psw_addr(s);
+ update_cc_op(s);
+
+ t = tcg_const_i32(get_field(s, i1) & 0xff);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
+ tcg_temp_free_i32(t);
+
+ t = tcg_const_i32(s->ilen);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
+ tcg_temp_free_i32(t);
+
+ gen_exception(EXCP_SVC);
+ return DISAS_NORETURN;
+}
+
+static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
+{
+ int cc = 0;
+
+ cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
+ cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
+ gen_op_movi_cc(s, cc);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
+{
+ gen_helper_testblock(cc_op, cpu_env, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+#endif
+
+static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
+ gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
+ tcg_temp_free_i32(l1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_tr(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
+ return_low128(o->out2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+ TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
+ TCGv_i32 tst = tcg_temp_new_i32();
+ int m3 = get_field(s, m3);
+
+ if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
+ m3 = 0;
+ }
+ if (m3 & 1) {
+ tcg_gen_movi_i32(tst, -1);
+ } else {
+ tcg_gen_extrl_i64_i32(tst, regs[0]);
+ if (s->insn->opc & 3) {
+ tcg_gen_ext8u_i32(tst, tst);
+ } else {
+ tcg_gen_ext16u_i32(tst, tst);
+ }
+ }
+ gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
+
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ tcg_temp_free_i32(sizes);
+ tcg_temp_free_i32(tst);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_const_i32(0xff);
+ tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
+ tcg_gen_extract_i32(cc_op, t1, 7, 1);
+ tcg_temp_free_i32(t1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s, l1));
+ gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
+{
+ int l1 = get_field(s, l1) + 1;
+ TCGv_i32 l;
+
+ /* The length must not exceed 32 bytes. */
+ if (l1 > 32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ l = tcg_const_i32(l1);
+ gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
+{
+ int l1 = get_field(s, l1) + 1;
+ TCGv_i32 l;
+
+ /* The length must be even and should not exceed 64 bytes. */
+ if ((l1 & 1) || (l1 > 64)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ l = tcg_const_i32(l1);
+ gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+
+static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
+{
+ int d1 = get_field(s, d1);
+ int d2 = get_field(s, d2);
+ int b1 = get_field(s, b1);
+ int b2 = get_field(s, b2);
+ int l = get_field(s, l1);
+ TCGv_i32 t32;
+
+ o->addr1 = get_address(s, 0, b1, d1);
+
+ /* If the addresses are identical, this is a store/memset of zero. */
+ if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
+ o->in2 = tcg_const_i64(0);
+
+ l++;
+ while (l >= 8) {
+ tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
+ l -= 8;
+ if (l > 0) {
+ tcg_gen_addi_i64(o->addr1, o->addr1, 8);
+ }
+ }
+ if (l >= 4) {
+ tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
+ l -= 4;
+ if (l > 0) {
+ tcg_gen_addi_i64(o->addr1, o->addr1, 4);
+ }
+ }
+ if (l >= 2) {
+ tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
+ l -= 2;
+ if (l > 0) {
+ tcg_gen_addi_i64(o->addr1, o->addr1, 2);
+ }
+ }
+ if (l) {
+ tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
+ }
+ gen_op_movi_cc(s, 0);
+ return DISAS_NEXT;
+ }
+
+ /* But in general we'll defer to a helper. */
+ o->in2 = get_address(s, 0, b2, d2);
+ t32 = tcg_const_i32(l);
+ gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
+ tcg_temp_free_i32(t32);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ uint64_t mask = ((1ull << size) - 1) << shift;
+
+ assert(!o->g_in2);
+ tcg_gen_shli_i64(o->in2, o->in2, shift);
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+
+ /* Produce the CC from only the bits manipulated. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic operation in memory. */
+ tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
+{
+ o->out = tcg_const_i64(0);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
+{
+ o->out = tcg_const_i64(0);
+ o->out2 = o->out;
+ o->g_out2 = true;
+ return DISAS_NEXT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_clp(cpu_env, r2);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_pcilg(cpu_env, r1, r2);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_pcistg(cpu_env, r1, r2);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+
+ gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
+ tcg_temp_free_i32(ar);
+ tcg_temp_free_i32(r1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sic(cpu_env, o->in1, o->in2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
+
+ gen_helper_rpcit(cpu_env, r1, r2);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
+ TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+
+ gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
+ tcg_temp_free_i32(ar);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
+ TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
+
+ gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
+ tcg_temp_free_i32(ar);
+ tcg_temp_free_i32(r1);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+#endif
+
+#include "translate_vx.c.inc"
+
+/* ====================================================================== */
+/* The "Cc OUTput" generators. Given the generated output (and in some cases
+ the original inputs), update the various cc data structures in order to
+ be able to compute the new condition code. */
+
+static void cout_abs32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
+}
+
+static void cout_abs64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
+}
+
+static void cout_adds32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
+}
+
+static void cout_adds64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
+}
+
+static void cout_addu32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shri_i64(cc_src, o->out, 32);
+ tcg_gen_ext32u_i64(cc_dst, o->out);
+ gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
+}
+
+static void cout_addu64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
+}
+
+static void cout_cmps32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
+}
+
+static void cout_cmps64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
+}
+
+static void cout_cmpu32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
+}
+
+static void cout_cmpu64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
+}
+
+static void cout_f32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
+}
+
+static void cout_f64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
+}
+
+static void cout_f128(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
+}
+
+static void cout_nabs32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
+}
+
+static void cout_nabs64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
+}
+
+static void cout_neg32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
+}
+
+static void cout_neg64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
+}
+
+static void cout_nz32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ext32u_i64(cc_dst, o->out);
+ gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
+}
+
+static void cout_nz64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
+}
+
+static void cout_s32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
+}
+
+static void cout_s64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
+}
+
+static void cout_subs32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subs64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
+}
+
+static void cout_subu32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_sari_i64(cc_src, o->out, 32);
+ tcg_gen_ext32u_i64(cc_dst, o->out);
+ gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
+}
+
+static void cout_subu64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
+}
+
+static void cout_tm32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
+}
+
+static void cout_tm64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
+}
+
+static void cout_muls32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
+}
+
+static void cout_muls64(DisasContext *s, DisasOps *o)
+{
+ /* out contains "high" part, out2 contains "low" part of 128 bit result */
+ gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
+}
+
+/* ====================================================================== */
+/* The "PREParation" generators. These initialize the DisasOps.OUT fields
+ with the TCG register to which we will write. Used in combination with
+ the "wout" generators, in some cases we need a new temporary, and in
+ some cases we can write to a TCG global. */
+
+static void prep_new(DisasContext *s, DisasOps *o)
+{
+ o->out = tcg_temp_new_i64();
+}
+#define SPEC_prep_new 0
+
+static void prep_new_P(DisasContext *s, DisasOps *o)
+{
+ o->out = tcg_temp_new_i64();
+ o->out2 = tcg_temp_new_i64();
+}
+#define SPEC_prep_new_P 0
+
+static void prep_r1(DisasContext *s, DisasOps *o)
+{
+ o->out = regs[get_field(s, r1)];
+ o->g_out = true;
+}
+#define SPEC_prep_r1 0
+
+static void prep_r1_P(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ o->out = regs[r1];
+ o->out2 = regs[r1 + 1];
+ o->g_out = o->g_out2 = true;
+}
+#define SPEC_prep_r1_P SPEC_r1_even
+
+/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
+static void prep_x1(DisasContext *s, DisasOps *o)
+{
+ o->out = load_freg(get_field(s, r1));
+ o->out2 = load_freg(get_field(s, r1) + 2);
+}
+#define SPEC_prep_x1 SPEC_r1_f128
+
+/* ====================================================================== */
+/* The "Write OUTput" generators. These generally perform some non-trivial
+ copy of data to TCG globals, or to main memory. The trivial cases are
+ generally handled by having a "prep" generator install the TCG global
+ as the destination of the operation. */
+
+static void wout_r1(DisasContext *s, DisasOps *o)
+{
+ store_reg(get_field(s, r1), o->out);
+}
+#define SPEC_wout_r1 0
+
+static void wout_out2_r1(DisasContext *s, DisasOps *o)
+{
+ store_reg(get_field(s, r1), o->out2);
+}
+#define SPEC_wout_out2_r1 0
+
+static void wout_r1_8(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
+}
+#define SPEC_wout_r1_8 0
+
+static void wout_r1_16(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
+}
+#define SPEC_wout_r1_16 0
+
+static void wout_r1_32(DisasContext *s, DisasOps *o)
+{
+ store_reg32_i64(get_field(s, r1), o->out);
+}
+#define SPEC_wout_r1_32 0
+
+static void wout_r1_32h(DisasContext *s, DisasOps *o)
+{
+ store_reg32h_i64(get_field(s, r1), o->out);
+}
+#define SPEC_wout_r1_32h 0
+
+static void wout_r1_P32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ store_reg32_i64(r1, o->out);
+ store_reg32_i64(r1 + 1, o->out2);
+}
+#define SPEC_wout_r1_P32 SPEC_r1_even
+
+static void wout_r1_D32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ store_reg32_i64(r1 + 1, o->out);
+ tcg_gen_shri_i64(o->out, o->out, 32);
+ store_reg32_i64(r1, o->out);
+}
+#define SPEC_wout_r1_D32 SPEC_r1_even
+
+static void wout_r3_P32(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s, r3);
+ store_reg32_i64(r3, o->out);
+ store_reg32_i64(r3 + 1, o->out2);
+}
+#define SPEC_wout_r3_P32 SPEC_r3_even
+
+static void wout_r3_P64(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s, r3);
+ store_reg(r3, o->out);
+ store_reg(r3 + 1, o->out2);
+}
+#define SPEC_wout_r3_P64 SPEC_r3_even
+
+static void wout_e1(DisasContext *s, DisasOps *o)
+{
+ store_freg32_i64(get_field(s, r1), o->out);
+}
+#define SPEC_wout_e1 0
+
+static void wout_f1(DisasContext *s, DisasOps *o)
+{
+ store_freg(get_field(s, r1), o->out);
+}
+#define SPEC_wout_f1 0
+
+static void wout_x1(DisasContext *s, DisasOps *o)
+{
+ int f1 = get_field(s, r1);
+ store_freg(f1, o->out);
+ store_freg(f1 + 2, o->out2);
+}
+#define SPEC_wout_x1 SPEC_r1_f128
+
+static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
+{
+ if (get_field(s, r1) != get_field(s, r2)) {
+ store_reg32_i64(get_field(s, r1), o->out);
+ }
+}
+#define SPEC_wout_cond_r1r2_32 0
+
+static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
+{
+ if (get_field(s, r1) != get_field(s, r2)) {
+ store_freg32_i64(get_field(s, r1), o->out);
+ }
+}
+#define SPEC_wout_cond_e1e2 0
+
+static void wout_m1_8(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_8 0
+
+static void wout_m1_16(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_16 0
+
+#ifndef CONFIG_USER_ONLY
+static void wout_m1_16a(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
+}
+#define SPEC_wout_m1_16a 0
+#endif
+
+static void wout_m1_32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_32 0
+
+#ifndef CONFIG_USER_ONLY
+static void wout_m1_32a(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
+}
+#define SPEC_wout_m1_32a 0
+#endif
+
+static void wout_m1_64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_64 0
+
+#ifndef CONFIG_USER_ONLY
+static void wout_m1_64a(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
+}
+#define SPEC_wout_m1_64a 0
+#endif
+
+static void wout_m2_32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
+}
+#define SPEC_wout_m2_32 0
+
+static void wout_in2_r1(DisasContext *s, DisasOps *o)
+{
+ store_reg(get_field(s, r1), o->in2);
+}
+#define SPEC_wout_in2_r1 0
+
+static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
+{
+ store_reg32_i64(get_field(s, r1), o->in2);
+}
+#define SPEC_wout_in2_r1_32 0
+
+/* ====================================================================== */
+/* The "INput 1" generators. These load the first operand to an insn. */
+
+static void in1_r1(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(s, r1));
+}
+#define SPEC_in1_r1 0
+
+static void in1_r1_o(DisasContext *s, DisasOps *o)
+{
+ o->in1 = regs[get_field(s, r1)];
+ o->g_in1 = true;
+}
+#define SPEC_in1_r1_o 0
+
+static void in1_r1_32s(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
+}
+#define SPEC_in1_r1_32s 0
+
+static void in1_r1_32u(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
+}
+#define SPEC_in1_r1_32u 0
+
+static void in1_r1_sr32(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
+}
+#define SPEC_in1_r1_sr32 0
+
+static void in1_r1p1(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(s, r1) + 1);
+}
+#define SPEC_in1_r1p1 SPEC_r1_even
+
+static void in1_r1p1_o(DisasContext *s, DisasOps *o)
+{
+ o->in1 = regs[get_field(s, r1) + 1];
+ o->g_in1 = true;
+}
+#define SPEC_in1_r1p1_o SPEC_r1_even
+
+static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
+}
+#define SPEC_in1_r1p1_32s SPEC_r1_even
+
+static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
+}
+#define SPEC_in1_r1p1_32u SPEC_r1_even
+
+static void in1_r1_D32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in1_r1_D32 SPEC_r1_even
+
+static void in1_r2(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(s, r2));
+}
+#define SPEC_in1_r2 0
+
+static void in1_r2_sr32(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
+}
+#define SPEC_in1_r2_sr32 0
+
+static void in1_r2_32u(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
+}
+#define SPEC_in1_r2_32u 0
+
+static void in1_r3(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(s, r3));
+}
+#define SPEC_in1_r3 0
+
+static void in1_r3_o(DisasContext *s, DisasOps *o)
+{
+ o->in1 = regs[get_field(s, r3)];
+ o->g_in1 = true;
+}
+#define SPEC_in1_r3_o 0
+
+static void in1_r3_32s(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
+}
+#define SPEC_in1_r3_32s 0
+
+static void in1_r3_32u(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
+}
+#define SPEC_in1_r3_32u 0
+
+static void in1_r3_D32(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s, r3);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
+}
+#define SPEC_in1_r3_D32 SPEC_r3_even
+
+static void in1_e1(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_freg32_i64(get_field(s, r1));
+}
+#define SPEC_in1_e1 0
+
+static void in1_f1(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_freg(get_field(s, r1));
+}
+#define SPEC_in1_f1 0
+
+/* Load the high double word of an extended (128-bit) format FP number */
+static void in1_x2h(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_freg(get_field(s, r2));
+}
+#define SPEC_in1_x2h SPEC_r2_f128
+
+static void in1_f3(DisasContext *s, DisasOps *o)
+{
+ o->in1 = load_freg(get_field(s, r3));
+}
+#define SPEC_in1_f3 0
+
+static void in1_la1(DisasContext *s, DisasOps *o)
+{
+ o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
+}
+#define SPEC_in1_la1 0
+
+static void in1_la2(DisasContext *s, DisasOps *o)
+{
+ int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
+ o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
+}
+#define SPEC_in1_la2 0
+
+static void in1_m1_8u(DisasContext *s, DisasOps *o)
+{
+ in1_la1(s, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_8u 0
+
+static void in1_m1_16s(DisasContext *s, DisasOps *o)
+{
+ in1_la1(s, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_16s 0
+
+static void in1_m1_16u(DisasContext *s, DisasOps *o)
+{
+ in1_la1(s, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_16u 0
+
+static void in1_m1_32s(DisasContext *s, DisasOps *o)
+{
+ in1_la1(s, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_32s 0
+
+static void in1_m1_32u(DisasContext *s, DisasOps *o)
+{
+ in1_la1(s, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_32u 0
+
+static void in1_m1_64(DisasContext *s, DisasOps *o)
+{
+ in1_la1(s, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_64 0
+
+/* ====================================================================== */
+/* The "INput 2" generators. These load the second operand to an insn. */
+
+static void in2_r1_o(DisasContext *s, DisasOps *o)
+{
+ o->in2 = regs[get_field(s, r1)];
+ o->g_in2 = true;
+}
+#define SPEC_in2_r1_o 0
+
+static void in2_r1_16u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
+}
+#define SPEC_in2_r1_16u 0
+
+static void in2_r1_32u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
+}
+#define SPEC_in2_r1_32u 0
+
+static void in2_r1_D32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s, r1);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in2_r1_D32 SPEC_r1_even
+
+static void in2_r2(DisasContext *s, DisasOps *o)
+{
+ o->in2 = load_reg(get_field(s, r2));
+}
+#define SPEC_in2_r2 0
+
+static void in2_r2_o(DisasContext *s, DisasOps *o)
+{
+ o->in2 = regs[get_field(s, r2)];
+ o->g_in2 = true;
+}
+#define SPEC_in2_r2_o 0
+
+static void in2_r2_nz(DisasContext *s, DisasOps *o)
+{
+ int r2 = get_field(s, r2);
+ if (r2 != 0) {
+ o->in2 = load_reg(r2);
+ }
+}
+#define SPEC_in2_r2_nz 0
+
+static void in2_r2_8s(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
+}
+#define SPEC_in2_r2_8s 0
+
+static void in2_r2_8u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
+}
+#define SPEC_in2_r2_8u 0
+
+static void in2_r2_16s(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
+}
+#define SPEC_in2_r2_16s 0
+
+static void in2_r2_16u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
+}
+#define SPEC_in2_r2_16u 0
+
+static void in2_r3(DisasContext *s, DisasOps *o)
+{
+ o->in2 = load_reg(get_field(s, r3));
+}
+#define SPEC_in2_r3 0
+
+static void in2_r3_sr32(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
+}
+#define SPEC_in2_r3_sr32 0
+
+static void in2_r3_32u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
+}
+#define SPEC_in2_r3_32u 0
+
+static void in2_r2_32s(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
+}
+#define SPEC_in2_r2_32s 0
+
+static void in2_r2_32u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
+}
+#define SPEC_in2_r2_32u 0
+
+static void in2_r2_sr32(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
+}
+#define SPEC_in2_r2_sr32 0
+
+static void in2_e2(DisasContext *s, DisasOps *o)
+{
+ o->in2 = load_freg32_i64(get_field(s, r2));
+}
+#define SPEC_in2_e2 0
+
+static void in2_f2(DisasContext *s, DisasOps *o)
+{
+ o->in2 = load_freg(get_field(s, r2));
+}
+#define SPEC_in2_f2 0
+
+/* Load the low double word of an extended (128-bit) format FP number */
+static void in2_x2l(DisasContext *s, DisasOps *o)
+{
+ o->in2 = load_freg(get_field(s, r2) + 2);
+}
+#define SPEC_in2_x2l SPEC_r2_f128
+
+static void in2_ra2(DisasContext *s, DisasOps *o)
+{
+ int r2 = get_field(s, r2);
+
+ /* Note: *don't* treat !r2 as 0, use the reg value. */
+ o->in2 = tcg_temp_new_i64();
+ gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
+}
+#define SPEC_in2_ra2 0
+
+static void in2_a2(DisasContext *s, DisasOps *o)
+{
+ int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
+ o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
+}
+#define SPEC_in2_a2 0
+
+static void in2_ri2(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
+}
+#define SPEC_in2_ri2 0
+
+static void in2_sh32(DisasContext *s, DisasOps *o)
+{
+ help_l2_shift(s, o, 31);
+}
+#define SPEC_in2_sh32 0
+
+static void in2_sh64(DisasContext *s, DisasOps *o)
+{
+ help_l2_shift(s, o, 63);
+}
+#define SPEC_in2_sh64 0
+
+static void in2_m2_8u(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_8u 0
+
+static void in2_m2_16s(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_16s 0
+
+static void in2_m2_16u(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_16u 0
+
+static void in2_m2_32s(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_32s 0
+
+static void in2_m2_32u(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_32u 0
+
+#ifndef CONFIG_USER_ONLY
+static void in2_m2_32ua(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
+}
+#define SPEC_in2_m2_32ua 0
+#endif
+
+static void in2_m2_64(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_64 0
+
+static void in2_m2_64w(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+ gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
+}
+#define SPEC_in2_m2_64w 0
+
+#ifndef CONFIG_USER_ONLY
+static void in2_m2_64a(DisasContext *s, DisasOps *o)
+{
+ in2_a2(s, o);
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
+}
+#define SPEC_in2_m2_64a 0
+#endif
+
+static void in2_mri2_16u(DisasContext *s, DisasOps *o)
+{
+ in2_ri2(s, o);
+ tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_16u 0
+
+static void in2_mri2_32s(DisasContext *s, DisasOps *o)
+{
+ in2_ri2(s, o);
+ tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_32s 0
+
+static void in2_mri2_32u(DisasContext *s, DisasOps *o)
+{
+ in2_ri2(s, o);
+ tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_32u 0
+
+static void in2_mri2_64(DisasContext *s, DisasOps *o)
+{
+ in2_ri2(s, o);
+ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_64 0
+
+static void in2_i2(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_const_i64(get_field(s, i2));
+}
+#define SPEC_in2_i2 0
+
+static void in2_i2_8u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
+}
+#define SPEC_in2_i2_8u 0
+
+static void in2_i2_16u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
+}
+#define SPEC_in2_i2_16u 0
+
+static void in2_i2_32u(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
+}
+#define SPEC_in2_i2_32u 0
+
+static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
+{
+ uint64_t i2 = (uint16_t)get_field(s, i2);
+ o->in2 = tcg_const_i64(i2 << s->insn->data);
+}
+#define SPEC_in2_i2_16u_shl 0
+
+static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
+{
+ uint64_t i2 = (uint32_t)get_field(s, i2);
+ o->in2 = tcg_const_i64(i2 << s->insn->data);
+}
+#define SPEC_in2_i2_32u_shl 0
+
+#ifndef CONFIG_USER_ONLY
+static void in2_insn(DisasContext *s, DisasOps *o)
+{
+ o->in2 = tcg_const_i64(s->fields.raw_insn);
+}
+#define SPEC_in2_insn 0
+#endif
+
+/* ====================================================================== */
+
+/* Find opc within the table of insns. This is formulated as a switch
+ statement so that (1) we get compile-time notice of cut-paste errors
+ for duplicated opcodes, and (2) the compiler generates the binary
+ search tree, rather than us having to post-process the table. */
+
+#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
+ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
+
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
+ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
+
+#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
+ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
+
+#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
+
+enum DisasInsnEnum {
+#include "insn-data.def"
+};
+
+#undef E
+#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \
+ .opc = OPC, \
+ .flags = FL, \
+ .fmt = FMT_##FT, \
+ .fac = FAC_##FC, \
+ .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
+ .name = #NM, \
+ .help_in1 = in1_##I1, \
+ .help_in2 = in2_##I2, \
+ .help_prep = prep_##P, \
+ .help_wout = wout_##W, \
+ .help_cout = cout_##CC, \
+ .help_op = op_##OP, \
+ .data = D \
+ },
+
+/* Allow 0 to be used for NULL in the table below. */
+#define in1_0 NULL
+#define in2_0 NULL
+#define prep_0 NULL
+#define wout_0 NULL
+#define cout_0 NULL
+#define op_0 NULL
+
+#define SPEC_in1_0 0
+#define SPEC_in2_0 0
+#define SPEC_prep_0 0
+#define SPEC_wout_0 0
+
+/* Give smaller names to the various facilities. */
+#define FAC_Z S390_FEAT_ZARCH
+#define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
+#define FAC_DFP S390_FEAT_DFP
+#define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
+#define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
+#define FAC_EE S390_FEAT_EXECUTE_EXT
+#define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
+#define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
+#define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
+#define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
+#define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
+#define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
+#define FAC_HW S390_FEAT_STFLE_45 /* high-word */
+#define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
+#define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
+#define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
+#define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
+#define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
+#define FAC_LD S390_FEAT_LONG_DISPLACEMENT
+#define FAC_PC S390_FEAT_STFLE_45 /* population count */
+#define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
+#define FAC_SFLE S390_FEAT_STFLE
+#define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
+#define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
+#define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
+#define FAC_DAT_ENH S390_FEAT_DAT_ENH
+#define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
+#define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
+#define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
+#define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
+#define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
+#define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
+#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
+#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
+#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
+#define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
+#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
+#define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
+#define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
+#define FAC_V S390_FEAT_VECTOR /* vector facility */
+#define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */
+#define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
+
+static const DisasInsn insn_info[] = {
+#include "insn-data.def"
+};
+
+#undef E
+#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
+ case OPC: return &insn_info[insn_ ## NM];
+
+static const DisasInsn *lookup_opc(uint16_t opc)
+{
+ switch (opc) {
+#include "insn-data.def"
+ default:
+ return NULL;
+ }
+}
+
+#undef F
+#undef E
+#undef D
+#undef C
+
+/* Extract a field from the insn. The INSN should be left-aligned in
+ the uint64_t so that we can more easily utilize the big-bit-endian
+ definitions we extract from the Principals of Operation. */
+
+static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
+{
+ uint32_t r, m;
+
+ if (f->size == 0) {
+ return;
+ }
+
+ /* Zero extract the field from the insn. */
+ r = (insn << f->beg) >> (64 - f->size);
+
+ /* Sign-extend, or un-swap the field as necessary. */
+ switch (f->type) {
+ case 0: /* unsigned */
+ break;
+ case 1: /* signed */
+ assert(f->size <= 32);
+ m = 1u << (f->size - 1);
+ r = (r ^ m) - m;
+ break;
+ case 2: /* dl+dh split, signed 20 bit. */
+ r = ((int8_t)r << 12) | (r >> 8);
+ break;
+ case 3: /* MSB stored in RXB */
+ g_assert(f->size == 4);
+ switch (f->beg) {
+ case 8:
+ r |= extract64(insn, 63 - 36, 1) << 4;
+ break;
+ case 12:
+ r |= extract64(insn, 63 - 37, 1) << 4;
+ break;
+ case 16:
+ r |= extract64(insn, 63 - 38, 1) << 4;
+ break;
+ case 32:
+ r |= extract64(insn, 63 - 39, 1) << 4;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ default:
+ abort();
+ }
+
+ /*
+ * Validate that the "compressed" encoding we selected above is valid.
+ * I.e. we haven't made two different original fields overlap.
+ */
+ assert(((o->presentC >> f->indexC) & 1) == 0);
+ o->presentC |= 1 << f->indexC;
+ o->presentO |= 1 << f->indexO;
+
+ o->c[f->indexC] = r;
+}
+
+/* Lookup the insn at the current PC, extracting the operands into O and
+ returning the info struct for the insn. Returns NULL for invalid insn. */
+
+static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
+{
+ uint64_t insn, pc = s->base.pc_next;
+ int op, op2, ilen;
+ const DisasInsn *info;
+
+ if (unlikely(s->ex_value)) {
+ /* Drop the EX data now, so that it's clear on exception paths. */
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
+ tcg_temp_free_i64(zero);
+
+ /* Extract the values saved by EXECUTE. */
+ insn = s->ex_value & 0xffffffffffff0000ull;
+ ilen = s->ex_value & 0xf;
+ op = insn >> 56;
+ } else {
+ insn = ld_code2(env, s, pc);
+ op = (insn >> 8) & 0xff;
+ ilen = get_ilen(op);
+ switch (ilen) {
+ case 2:
+ insn = insn << 48;
+ break;
+ case 4:
+ insn = ld_code4(env, s, pc) << 32;
+ break;
+ case 6:
+ insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ s->pc_tmp = s->base.pc_next + ilen;
+ s->ilen = ilen;
+
+ /* We can't actually determine the insn format until we've looked up
+ the full insn opcode. Which we can't do without locating the
+ secondary opcode. Assume by default that OP2 is at bit 40; for
+ those smaller insns that don't actually have a secondary opcode
+ this will correctly result in OP2 = 0. */
+ switch (op) {
+ case 0x01: /* E */
+ case 0x80: /* S */
+ case 0x82: /* S */
+ case 0x93: /* S */
+ case 0xb2: /* S, RRF, RRE, IE */
+ case 0xb3: /* RRE, RRD, RRF */
+ case 0xb9: /* RRE, RRF */
+ case 0xe5: /* SSE, SIL */
+ op2 = (insn << 8) >> 56;
+ break;
+ case 0xa5: /* RI */
+ case 0xa7: /* RI */
+ case 0xc0: /* RIL */
+ case 0xc2: /* RIL */
+ case 0xc4: /* RIL */
+ case 0xc6: /* RIL */
+ case 0xc8: /* SSF */
+ case 0xcc: /* RIL */
+ op2 = (insn << 12) >> 60;
+ break;
+ case 0xc5: /* MII */
+ case 0xc7: /* SMI */
+ case 0xd0 ... 0xdf: /* SS */
+ case 0xe1: /* SS */
+ case 0xe2: /* SS */
+ case 0xe8: /* SS */
+ case 0xe9: /* SS */
+ case 0xea: /* SS */
+ case 0xee ... 0xf3: /* SS */
+ case 0xf8 ... 0xfd: /* SS */
+ op2 = 0;
+ break;
+ default:
+ op2 = (insn << 40) >> 56;
+ break;
+ }
+
+ memset(&s->fields, 0, sizeof(s->fields));
+ s->fields.raw_insn = insn;
+ s->fields.op = op;
+ s->fields.op2 = op2;
+
+ /* Lookup the instruction. */
+ info = lookup_opc(op << 8 | op2);
+ s->insn = info;
+
+ /* If we found it, extract the operands. */
+ if (info != NULL) {
+ DisasFormat fmt = info->fmt;
+ int i;
+
+ for (i = 0; i < NUM_C_FIELD; ++i) {
+ extract_field(&s->fields, &format_info[fmt].op[i], insn);
+ }
+ }
+ return info;
+}
+
+static bool is_afp_reg(int reg)
+{
+ return reg % 2 || reg > 6;
+}
+
+static bool is_fp_pair(int reg)
+{
+ /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
+ return !(reg & 0x2);
+}
+
+static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
+{
+ const DisasInsn *insn;
+ DisasJumpType ret = DISAS_NEXT;
+ DisasOps o = {};
+ bool icount = false;
+
+ /* Search for the insn in the table. */
+ insn = extract_insn(env, s);
+
+ /* Update insn_start now that we know the ILEN. */
+ tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
+
+ /* Not found means unimplemented/illegal opcode. */
+ if (insn == NULL) {
+ qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
+ s->fields.op, s->fields.op2);
+ gen_illegal_opcode(s);
+ ret = DISAS_NORETURN;
+ goto out;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (s->base.tb->flags & FLAG_MASK_PER) {
+ TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
+ gen_helper_per_ifetch(cpu_env, addr);
+ tcg_temp_free_i64(addr);
+ }
+#endif
+
+ /* process flags */
+ if (insn->flags) {
+ /* privileged instruction */
+ if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
+ gen_program_exception(s, PGM_PRIVILEGED);
+ ret = DISAS_NORETURN;
+ goto out;
+ }
+
+ /* if AFP is not enabled, instructions and registers are forbidden */
+ if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
+ uint8_t dxc = 0;
+
+ if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
+ dxc = 1;
+ }
+ if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
+ dxc = 1;
+ }
+ if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
+ dxc = 1;
+ }
+ if (insn->flags & IF_BFP) {
+ dxc = 2;
+ }
+ if (insn->flags & IF_DFP) {
+ dxc = 3;
+ }
+ if (insn->flags & IF_VEC) {
+ dxc = 0xfe;
+ }
+ if (dxc) {
+ gen_data_exception(dxc);
+ ret = DISAS_NORETURN;
+ goto out;
+ }
+ }
+
+ /* if vector instructions not enabled, executing them is forbidden */
+ if (insn->flags & IF_VEC) {
+ if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
+ gen_data_exception(0xfe);
+ ret = DISAS_NORETURN;
+ goto out;
+ }
+ }
+
+ /* input/output is the special case for icount mode */
+ if (unlikely(insn->flags & IF_IO)) {
+ icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
+ if (icount) {
+ gen_io_start();
+ }
+ }
+ }
+
+ /* Check for insn specification exceptions. */
+ if (insn->spec) {
+ if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
+ (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
+ (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
+ (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
+ (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ ret = DISAS_NORETURN;
+ goto out;
+ }
+ }
+
+ /* Implement the instruction. */
+ if (insn->help_in1) {
+ insn->help_in1(s, &o);
+ }
+ if (insn->help_in2) {
+ insn->help_in2(s, &o);
+ }
+ if (insn->help_prep) {
+ insn->help_prep(s, &o);
+ }
+ if (insn->help_op) {
+ ret = insn->help_op(s, &o);
+ }
+ if (ret != DISAS_NORETURN) {
+ if (insn->help_wout) {
+ insn->help_wout(s, &o);
+ }
+ if (insn->help_cout) {
+ insn->help_cout(s, &o);
+ }
+ }
+
+ /* Free any temporaries created by the helpers. */
+ if (o.out && !o.g_out) {
+ tcg_temp_free_i64(o.out);
+ }
+ if (o.out2 && !o.g_out2) {
+ tcg_temp_free_i64(o.out2);
+ }
+ if (o.in1 && !o.g_in1) {
+ tcg_temp_free_i64(o.in1);
+ }
+ if (o.in2 && !o.g_in2) {
+ tcg_temp_free_i64(o.in2);
+ }
+ if (o.addr1) {
+ tcg_temp_free_i64(o.addr1);
+ }
+
+ /* io should be the last instruction in tb when icount is enabled */
+ if (unlikely(icount && ret == DISAS_NEXT)) {
+ ret = DISAS_PC_STALE;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (s->base.tb->flags & FLAG_MASK_PER) {
+ /* An exception might be triggered, save PSW if not already done. */
+ if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) {
+ tcg_gen_movi_i64(psw_addr, s->pc_tmp);
+ }
+
+ /* Call the helper to check for a possible PER exception. */
+ gen_helper_per_check_exception(cpu_env);
+ }
+#endif
+
+out:
+ /* Advance to the next instruction. */
+ s->base.pc_next = s->pc_tmp;
+ return ret;
+}
+
+static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ /* 31-bit mode */
+ if (!(dc->base.tb->flags & FLAG_MASK_64)) {
+ dc->base.pc_first &= 0x7fffffff;
+ dc->base.pc_next = dc->base.pc_first;
+ }
+
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->ex_value = dc->base.tb->cs_base;
+}
+
+static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
+{
+}
+
+static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ /* Delay the set of ilen until we've read the insn. */
+ tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
+ dc->insn_start = tcg_last_op();
+}
+
+static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ CPUS390XState *env = cs->env_ptr;
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ dc->base.is_jmp = translate_one(env, dc);
+ if (dc->base.is_jmp == DISAS_NEXT) {
+ uint64_t page_start;
+
+ page_start = dc->base.pc_first & TARGET_PAGE_MASK;
+ if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ switch (dc->base.is_jmp) {
+ case DISAS_GOTO_TB:
+ case DISAS_NORETURN:
+ break;
+ case DISAS_TOO_MANY:
+ case DISAS_PC_STALE:
+ case DISAS_PC_STALE_NOCHAIN:
+ update_psw_addr(dc);
+ /* FALLTHRU */
+ case DISAS_PC_UPDATED:
+ /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
+ cc op type is in env */
+ update_cc_op(dc);
+ /* FALLTHRU */
+ case DISAS_PC_CC_UPDATED:
+ /* Exit the TB, either by raising a debug exception or by return. */
+ if ((dc->base.tb->flags & FLAG_MASK_PER) ||
+ dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
+ tcg_gen_exit_tb(NULL, 0);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (unlikely(dc->ex_value)) {
+ /* ??? Unfortunately log_target_disas can't use host memory. */
+ qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value);
+ } else {
+ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
+ log_target_disas(cs, dc->base.pc_first, dc->base.tb->size);
+ }
+}
+
+static const TranslatorOps s390x_tr_ops = {
+ .init_disas_context = s390x_tr_init_disas_context,
+ .tb_start = s390x_tr_tb_start,
+ .insn_start = s390x_tr_insn_start,
+ .translate_insn = s390x_tr_translate_insn,
+ .tb_stop = s390x_tr_tb_stop,
+ .disas_log = s390x_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc;
+
+ translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ int cc_op = data[1];
+
+ env->psw.addr = data[0];
+
+ /* Update the CC opcode if it is not already up-to-date. */
+ if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
+ env->cc_op = cc_op;
+ }
+
+ /* Record ILEN. */
+ env->int_pgm_ilen = data[2];
+}
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
new file mode 100644
index 000000000..28bf5a23b
--- /dev/null
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -0,0 +1,3109 @@
+/*
+ * QEMU TCG support -- s390x vector instruction translation functions
+ *
+ * Copyright (C) 2019 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/*
+ * For most instructions that use the same element size for reads and
+ * writes, we can use real gvec vector expansion, which potantially uses
+ * real host vector instructions. As they only work up to 64 bit elements,
+ * 128 bit elements (vector is a single element) have to be handled
+ * differently. Operations that are too complicated to encode via TCG ops
+ * are handled via gvec ool (out-of-line) handlers.
+ *
+ * As soon as instructions use different element sizes for reads and writes
+ * or access elements "out of their element scope" we expand them manually
+ * in fancy loops, as gvec expansion does not deal with actual element
+ * numbers and does also not support access to other elements.
+ *
+ * 128 bit elements:
+ * As we only have i32/i64, such elements have to be loaded into two
+ * i64 values and can then be processed e.g. by tcg_gen_add2_i64.
+ *
+ * Sizes:
+ * On s390x, the operand size (oprsz) and the maximum size (maxsz) are
+ * always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
+ * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
+ * 128 bit element size has to be treated in a special way (MO_64 + 1).
+ * We will use ES_* instead of MO_* for this reason in this file.
+ *
+ * CC handling:
+ * As gvec ool-helpers can currently not return values (besides via
+ * pointers like vectors or cpu_env), whenever we have to set the CC and
+ * can't conclude the value from the result vector, we will directly
+ * set it in "env->cc_op" and mark it as static via set_cc_static()".
+ * Whenever this is done, the helper writes globals (cc_op).
+ */
+
+#define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
+#define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
+#define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
+
+#define ES_8 MO_8
+#define ES_16 MO_16
+#define ES_32 MO_32
+#define ES_64 MO_64
+#define ES_128 4
+
+/* Floating-Point Format */
+#define FPF_SHORT 2
+#define FPF_LONG 3
+#define FPF_EXT 4
+
+static inline bool valid_vec_element(uint8_t enr, MemOp es)
+{
+ return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1));
+}
+
+static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
+ MemOp memop)
+{
+ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
+
+ switch ((unsigned)memop) {
+ case ES_8:
+ tcg_gen_ld8u_i64(dst, cpu_env, offs);
+ break;
+ case ES_16:
+ tcg_gen_ld16u_i64(dst, cpu_env, offs);
+ break;
+ case ES_32:
+ tcg_gen_ld32u_i64(dst, cpu_env, offs);
+ break;
+ case ES_8 | MO_SIGN:
+ tcg_gen_ld8s_i64(dst, cpu_env, offs);
+ break;
+ case ES_16 | MO_SIGN:
+ tcg_gen_ld16s_i64(dst, cpu_env, offs);
+ break;
+ case ES_32 | MO_SIGN:
+ tcg_gen_ld32s_i64(dst, cpu_env, offs);
+ break;
+ case ES_64:
+ case ES_64 | MO_SIGN:
+ tcg_gen_ld_i64(dst, cpu_env, offs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr,
+ MemOp memop)
+{
+ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
+
+ switch (memop) {
+ case ES_8:
+ tcg_gen_ld8u_i32(dst, cpu_env, offs);
+ break;
+ case ES_16:
+ tcg_gen_ld16u_i32(dst, cpu_env, offs);
+ break;
+ case ES_8 | MO_SIGN:
+ tcg_gen_ld8s_i32(dst, cpu_env, offs);
+ break;
+ case ES_16 | MO_SIGN:
+ tcg_gen_ld16s_i32(dst, cpu_env, offs);
+ break;
+ case ES_32:
+ case ES_32 | MO_SIGN:
+ tcg_gen_ld_i32(dst, cpu_env, offs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
+ MemOp memop)
+{
+ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
+
+ switch (memop) {
+ case ES_8:
+ tcg_gen_st8_i64(src, cpu_env, offs);
+ break;
+ case ES_16:
+ tcg_gen_st16_i64(src, cpu_env, offs);
+ break;
+ case ES_32:
+ tcg_gen_st32_i64(src, cpu_env, offs);
+ break;
+ case ES_64:
+ tcg_gen_st_i64(src, cpu_env, offs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr,
+ MemOp memop)
+{
+ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
+
+ switch (memop) {
+ case ES_8:
+ tcg_gen_st8_i32(src, cpu_env, offs);
+ break;
+ case ES_16:
+ tcg_gen_st16_i32(src, cpu_env, offs);
+ break;
+ case ES_32:
+ tcg_gen_st_i32(src, cpu_env, offs);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
+ uint8_t es)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* mask off invalid parts from the element nr */
+ tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
+
+ /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
+ tcg_gen_shli_i64(tmp, tmp, es);
+#ifndef HOST_WORDS_BIGENDIAN
+ tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
+#endif
+ tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
+
+ /* generate the final ptr by adding cpu_env */
+ tcg_gen_trunc_i64_ptr(ptr, tmp);
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
+
+ tcg_temp_free_i64(tmp);
+}
+
+#define gen_gvec_2(v1, v2, gen) \
+ tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ 16, 16, gen)
+#define gen_gvec_2s(v1, v2, c, gen) \
+ tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ 16, 16, c, gen)
+#define gen_gvec_2_ool(v1, v2, data, fn) \
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ 16, 16, data, fn)
+#define gen_gvec_2i_ool(v1, v2, c, data, fn) \
+ tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ c, 16, 16, data, fn)
+#define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ ptr, 16, 16, data, fn)
+#define gen_gvec_3(v1, v2, v3, gen) \
+ tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), 16, 16, gen)
+#define gen_gvec_3_ool(v1, v2, v3, data, fn) \
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), 16, 16, data, fn)
+#define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
+#define gen_gvec_3i(v1, v2, v3, c, gen) \
+ tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), 16, 16, c, gen)
+#define gen_gvec_4(v1, v2, v3, v4, gen) \
+ tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
+ 16, 16, gen)
+#define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
+ tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
+ 16, 16, data, fn)
+#define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \
+ tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
+ ptr, 16, 16, data, fn)
+#define gen_gvec_dup_i64(es, v1, c) \
+ tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
+#define gen_gvec_mov(v1, v2) \
+ tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
+ 16)
+#define gen_gvec_dup_imm(es, v1, c) \
+ tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
+#define gen_gvec_fn_2(fn, es, v1, v2) \
+ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ 16, 16)
+#define gen_gvec_fn_2i(fn, es, v1, v2, c) \
+ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ c, 16, 16)
+#define gen_gvec_fn_2s(fn, es, v1, v2, s) \
+ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ s, 16, 16)
+#define gen_gvec_fn_3(fn, es, v1, v2, v3) \
+ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), 16, 16)
+#define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \
+ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
+ vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16)
+
+/*
+ * Helper to carry out a 128 bit vector computation using 2 i64 values per
+ * vector.
+ */
+typedef void (*gen_gvec128_3_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
+static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a,
+ uint8_t b)
+{
+ TCGv_i64 dh = tcg_temp_new_i64();
+ TCGv_i64 dl = tcg_temp_new_i64();
+ TCGv_i64 ah = tcg_temp_new_i64();
+ TCGv_i64 al = tcg_temp_new_i64();
+ TCGv_i64 bh = tcg_temp_new_i64();
+ TCGv_i64 bl = tcg_temp_new_i64();
+
+ read_vec_element_i64(ah, a, 0, ES_64);
+ read_vec_element_i64(al, a, 1, ES_64);
+ read_vec_element_i64(bh, b, 0, ES_64);
+ read_vec_element_i64(bl, b, 1, ES_64);
+ fn(dl, dh, al, ah, bl, bh);
+ write_vec_element_i64(dh, d, 0, ES_64);
+ write_vec_element_i64(dl, d, 1, ES_64);
+
+ tcg_temp_free_i64(dh);
+ tcg_temp_free_i64(dl);
+ tcg_temp_free_i64(ah);
+ tcg_temp_free_i64(al);
+ tcg_temp_free_i64(bh);
+ tcg_temp_free_i64(bl);
+}
+
+typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh,
+ TCGv_i64 cl, TCGv_i64 ch);
+static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
+ uint8_t b, uint8_t c)
+{
+ TCGv_i64 dh = tcg_temp_new_i64();
+ TCGv_i64 dl = tcg_temp_new_i64();
+ TCGv_i64 ah = tcg_temp_new_i64();
+ TCGv_i64 al = tcg_temp_new_i64();
+ TCGv_i64 bh = tcg_temp_new_i64();
+ TCGv_i64 bl = tcg_temp_new_i64();
+ TCGv_i64 ch = tcg_temp_new_i64();
+ TCGv_i64 cl = tcg_temp_new_i64();
+
+ read_vec_element_i64(ah, a, 0, ES_64);
+ read_vec_element_i64(al, a, 1, ES_64);
+ read_vec_element_i64(bh, b, 0, ES_64);
+ read_vec_element_i64(bl, b, 1, ES_64);
+ read_vec_element_i64(ch, c, 0, ES_64);
+ read_vec_element_i64(cl, c, 1, ES_64);
+ fn(dl, dh, al, ah, bl, bh, cl, ch);
+ write_vec_element_i64(dh, d, 0, ES_64);
+ write_vec_element_i64(dl, d, 1, ES_64);
+
+ tcg_temp_free_i64(dh);
+ tcg_temp_free_i64(dl);
+ tcg_temp_free_i64(ah);
+ tcg_temp_free_i64(al);
+ tcg_temp_free_i64(bh);
+ tcg_temp_free_i64(bl);
+ tcg_temp_free_i64(ch);
+ tcg_temp_free_i64(cl);
+}
+
+static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
+ uint64_t b)
+{
+ TCGv_i64 bl = tcg_const_i64(b);
+ TCGv_i64 bh = tcg_const_i64(0);
+
+ tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
+ tcg_temp_free_i64(bl);
+ tcg_temp_free_i64(bh);
+}
+
+static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), get_field(s, v3), 0,
+ gen_helper_gvec_vbperm);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vge(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ read_vec_element_i64(tmp, get_field(s, v2), enr, es);
+ tcg_gen_add_i64(o->addr1, o->addr1, tmp);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
+
+ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
+ write_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static uint64_t generate_byte_mask(uint8_t mask)
+{
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 1) {
+ r |= 0xffull << (i * 8);
+ }
+ }
+ return r;
+}
+
+static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
+{
+ const uint16_t i2 = get_field(s, i2);
+
+ if (i2 == (i2 & 0xff) * 0x0101) {
+ /*
+ * Masks for both 64 bit elements of the vector are the same.
+ * Trust tcg to produce a good constant loading.
+ */
+ gen_gvec_dup_imm(ES_64, get_field(s, v1),
+ generate_byte_mask(i2 & 0xff));
+ } else {
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8));
+ write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
+ tcg_gen_movi_i64(t, generate_byte_mask(i2));
+ write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
+ tcg_temp_free_i64(t);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t bits = NUM_VEC_ELEMENT_BITS(es);
+ const uint8_t i2 = get_field(s, i2) & (bits - 1);
+ const uint8_t i3 = get_field(s, i3) & (bits - 1);
+ uint64_t mask = 0;
+ int i;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /* generate the mask - take care of wrapping */
+ for (i = i2; ; i = (i + 1) % bits) {
+ mask |= 1ull << (bits - i - 1);
+ if (i == i3) {
+ break;
+ }
+ }
+
+ gen_gvec_dup_imm(es, get_field(s, v1), mask);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlr(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_mov(get_field(s, v1), get_field(s, v2));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
+ gen_gvec_dup_i64(es, get_field(s, v1), tmp);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vle(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
+ write_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_const_i64((int16_t)get_field(s, i2));
+ write_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ TCGv_ptr ptr;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /* fast path if we don't need the register content */
+ if (!get_field(s, b2)) {
+ uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
+
+ read_vec_element_i64(o->out, get_field(s, v3), enr, es);
+ return DISAS_NEXT;
+ }
+
+ ptr = tcg_temp_new_ptr();
+ get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es);
+ switch (es) {
+ case ES_8:
+ tcg_gen_ld8u_i64(o->out, ptr, 0);
+ break;
+ case ES_16:
+ tcg_gen_ld16u_i64(o->out, ptr, 0);
+ break;
+ case ES_32:
+ tcg_gen_ld32u_i64(o->out, ptr, 0);
+ break;
+ case ES_64:
+ tcg_gen_ld_i64(o->out, ptr, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_ptr(ptr);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
+{
+ uint8_t es = get_field(s, m3);
+ uint8_t enr;
+ TCGv_i64 t;
+
+ switch (es) {
+ /* rightmost sub-element of leftmost doubleword */
+ case ES_8:
+ enr = 7;
+ break;
+ case ES_16:
+ enr = 3;
+ break;
+ case ES_32:
+ enr = 1;
+ break;
+ case ES_64:
+ enr = 0;
+ break;
+ /* leftmost sub-element of leftmost doubleword */
+ case 6:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ es = ES_32;
+ enr = 0;
+ break;
+ }
+ /* fallthrough */
+ default:
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ t = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
+ gen_gvec_dup_imm(es, get_field(s, v1), 0);
+ write_vec_element_i64(t, get_field(s, v1), enr, es);
+ tcg_temp_free_i64(t);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v3 = get_field(s, v3);
+ uint8_t v1 = get_field(s, v1);
+ TCGv_i64 t0, t1;
+
+ if (v3 < v1 || (v3 - v1 + 1) > 16) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /*
+ * Check for possible access exceptions by trying to load the last
+ * element. The first element will be checked first next.
+ */
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
+ tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
+
+ for (;; v1++) {
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ write_vec_element_i64(t1, v1, 0, ES_64);
+ if (v1 == v3) {
+ break;
+ }
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ write_vec_element_i64(t1, v1, 1, ES_64);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ }
+
+ /* Store the last element, loaded first */
+ write_vec_element_i64(t0, v1, 1, ES_64);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o)
+{
+ const int64_t block_size = (1ull << (get_field(s, m3) + 6));
+ const int v1_offs = vec_full_reg_offset(get_field(s, v1));
+ TCGv_ptr a0;
+ TCGv_i64 bytes;
+
+ if (get_field(s, m3) > 6) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ bytes = tcg_temp_new_i64();
+ a0 = tcg_temp_new_ptr();
+ /* calculate the number of bytes until the next block boundary */
+ tcg_gen_ori_i64(bytes, o->addr1, -block_size);
+ tcg_gen_neg_i64(bytes, bytes);
+
+ tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
+ gen_helper_vll(cpu_env, a0, o->addr1, bytes);
+ tcg_temp_free_i64(bytes);
+ tcg_temp_free_ptr(a0);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ TCGv_ptr ptr;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /* fast path if we don't need the register content */
+ if (!get_field(s, b2)) {
+ uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
+
+ write_vec_element_i64(o->in2, get_field(s, v1), enr, es);
+ return DISAS_NEXT;
+ }
+
+ ptr = tcg_temp_new_ptr();
+ get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es);
+ switch (es) {
+ case ES_8:
+ tcg_gen_st8_i64(o->in2, ptr, 0);
+ break;
+ case ES_16:
+ tcg_gen_st16_i64(o->in2, ptr, 0);
+ break;
+ case ES_32:
+ tcg_gen_st32_i64(o->in2, ptr, 0);
+ break;
+ case ES_64:
+ tcg_gen_st_i64(o->in2, ptr, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_ptr(ptr);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o)
+{
+ write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vll(DisasContext *s, DisasOps *o)
+{
+ const int v1_offs = vec_full_reg_offset(get_field(s, v1));
+ TCGv_ptr a0 = tcg_temp_new_ptr();
+
+ /* convert highest index into an actual length */
+ tcg_gen_addi_i64(o->in2, o->in2, 1);
+ tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
+ gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
+ tcg_temp_free_ptr(a0);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vmr(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v2 = get_field(s, v2);
+ const uint8_t v3 = get_field(s, v3);
+ const uint8_t es = get_field(s, m4);
+ int dst_idx, src_idx;
+ TCGv_i64 tmp;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ if (s->fields.op2 == 0x61) {
+ /* iterate backwards to avoid overwriting data we might need later */
+ for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) {
+ src_idx = dst_idx / 2;
+ if (dst_idx % 2 == 0) {
+ read_vec_element_i64(tmp, v2, src_idx, es);
+ } else {
+ read_vec_element_i64(tmp, v3, src_idx, es);
+ }
+ write_vec_element_i64(tmp, v1, dst_idx, es);
+ }
+ } else {
+ /* iterate forward to avoid overwriting data we might need later */
+ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) {
+ src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2;
+ if (dst_idx % 2 == 0) {
+ read_vec_element_i64(tmp, v2, src_idx, es);
+ } else {
+ read_vec_element_i64(tmp, v3, src_idx, es);
+ }
+ write_vec_element_i64(tmp, v1, dst_idx, es);
+ }
+ }
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vpk(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v2 = get_field(s, v2);
+ const uint8_t v3 = get_field(s, v3);
+ const uint8_t es = get_field(s, m4);
+ static gen_helper_gvec_3 * const vpk[3] = {
+ gen_helper_gvec_vpk16,
+ gen_helper_gvec_vpk32,
+ gen_helper_gvec_vpk64,
+ };
+ static gen_helper_gvec_3 * const vpks[3] = {
+ gen_helper_gvec_vpks16,
+ gen_helper_gvec_vpks32,
+ gen_helper_gvec_vpks64,
+ };
+ static gen_helper_gvec_3_ptr * const vpks_cc[3] = {
+ gen_helper_gvec_vpks_cc16,
+ gen_helper_gvec_vpks_cc32,
+ gen_helper_gvec_vpks_cc64,
+ };
+ static gen_helper_gvec_3 * const vpkls[3] = {
+ gen_helper_gvec_vpkls16,
+ gen_helper_gvec_vpkls32,
+ gen_helper_gvec_vpkls64,
+ };
+ static gen_helper_gvec_3_ptr * const vpkls_cc[3] = {
+ gen_helper_gvec_vpkls_cc16,
+ gen_helper_gvec_vpkls_cc32,
+ gen_helper_gvec_vpkls_cc64,
+ };
+
+ if (es == ES_8 || es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields.op2) {
+ case 0x97:
+ if (get_field(s, m5) & 0x1) {
+ gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]);
+ set_cc_static(s);
+ } else {
+ gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]);
+ }
+ break;
+ case 0x95:
+ if (get_field(s, m5) & 0x1) {
+ gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]);
+ set_cc_static(s);
+ } else {
+ gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]);
+ }
+ break;
+ case 0x94:
+ /* If sources and destination dont't overlap -> fast path */
+ if (v1 != v2 && v1 != v3) {
+ const uint8_t src_es = get_field(s, m4);
+ const uint8_t dst_es = src_es - 1;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ int dst_idx, src_idx;
+
+ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
+ src_idx = dst_idx;
+ if (src_idx < NUM_VEC_ELEMENTS(src_es)) {
+ read_vec_element_i64(tmp, v2, src_idx, src_es);
+ } else {
+ src_idx -= NUM_VEC_ELEMENTS(src_es);
+ read_vec_element_i64(tmp, v3, src_idx, src_es);
+ }
+ write_vec_element_i64(tmp, v1, dst_idx, dst_es);
+ }
+ tcg_temp_free_i64(tmp);
+ } else {
+ gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vperm(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4),
+ 0, gen_helper_gvec_vperm);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o)
+{
+ const uint8_t i2 = extract32(get_field(s, m4), 2, 1);
+ const uint8_t i3 = extract32(get_field(s, m4), 0, 1);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ read_vec_element_i64(t0, get_field(s, v2), i2, ES_64);
+ read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
+ write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vrep(DisasContext *s, DisasOps *o)
+{
+ const uint8_t enr = get_field(s, i2);
+ const uint8_t es = get_field(s, m4);
+
+ if (es > ES_64 || !valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)),
+ vec_reg_offset(get_field(s, v3), enr, es),
+ 16, 16);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
+{
+ const int64_t data = (int16_t)get_field(s, i2);
+ const uint8_t es = get_field(s, m3);
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_dup_imm(es, get_field(s, v1), data);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsce(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ read_vec_element_i64(tmp, get_field(s, v2), enr, es);
+ tcg_gen_add_i64(o->addr1, o->addr1, tmp);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
+
+ read_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsel(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1),
+ get_field(s, v4), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+ int idx1, idx2;
+ TCGv_i64 tmp;
+
+ switch (es) {
+ case ES_8:
+ idx1 = 7;
+ idx2 = 15;
+ break;
+ case ES_16:
+ idx1 = 3;
+ idx2 = 7;
+ break;
+ case ES_32:
+ idx1 = 1;
+ idx2 = 3;
+ break;
+ default:
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN);
+ write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
+ read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
+ write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 tmp = tcg_const_i64(16);
+
+ /* Probe write access before actually modifying memory */
+ gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
+
+ read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vste(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = s->insn->data;
+ const uint8_t enr = get_field(s, m3);
+ TCGv_i64 tmp;
+
+ if (!valid_vec_element(enr, es)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ read_vec_element_i64(tmp, get_field(s, v1), enr, es);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v3 = get_field(s, v3);
+ uint8_t v1 = get_field(s, v1);
+ TCGv_i64 tmp;
+
+ while (v3 < v1 || (v3 - v1 + 1) > 16) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /* Probe write access before actually modifying memory */
+ tmp = tcg_const_i64((v3 - v1 + 1) * 16);
+ gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
+
+ for (;; v1++) {
+ read_vec_element_i64(tmp, v1, 0, ES_64);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ read_vec_element_i64(tmp, v1, 1, ES_64);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ if (v1 == v3) {
+ break;
+ }
+ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
+ }
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vstl(DisasContext *s, DisasOps *o)
+{
+ const int v1_offs = vec_full_reg_offset(get_field(s, v1));
+ TCGv_ptr a0 = tcg_temp_new_ptr();
+
+ /* convert highest index into an actual length */
+ tcg_gen_addi_i64(o->in2, o->in2, 1);
+ tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
+ gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
+ tcg_temp_free_ptr(a0);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vup(DisasContext *s, DisasOps *o)
+{
+ const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5;
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v2 = get_field(s, v2);
+ const uint8_t src_es = get_field(s, m3);
+ const uint8_t dst_es = src_es + 1;
+ int dst_idx, src_idx;
+ TCGv_i64 tmp;
+
+ if (src_es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tmp = tcg_temp_new_i64();
+ if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) {
+ /* iterate backwards to avoid overwriting data we might need later */
+ for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) {
+ src_idx = dst_idx;
+ read_vec_element_i64(tmp, v2, src_idx,
+ src_es | (logical ? 0 : MO_SIGN));
+ write_vec_element_i64(tmp, v1, dst_idx, dst_es);
+ }
+
+ } else {
+ /* iterate forward to avoid overwriting data we might need later */
+ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
+ src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2;
+ read_vec_element_i64(tmp, v2, src_idx,
+ src_es | (logical ? 0 : MO_SIGN));
+ write_vec_element_i64(tmp, v1, dst_idx, dst_es);
+ }
+ }
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_va(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+
+ if (es > ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ } else if (es == ES_128) {
+ gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+ }
+ gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
+{
+ const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1;
+ TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr));
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ /* Calculate the carry into the MSB, ignoring the old MSBs */
+ tcg_gen_andc_i64(t1, a, msb_mask);
+ tcg_gen_andc_i64(t2, b, msb_mask);
+ tcg_gen_add_i64(t1, t1, t2);
+ /* Calculate the MSB without any carry into it */
+ tcg_gen_xor_i64(t3, a, b);
+ /* Calculate the carry out of the MSB in the MSB bit position */
+ tcg_gen_and_i64(d, a, b);
+ tcg_gen_and_i64(t1, t1, t3);
+ tcg_gen_or_i64(d, d, t1);
+ /* Isolate and shift the carry into position */
+ tcg_gen_and_i64(d, d, msb_mask);
+ tcg_gen_shri_i64(d, d, msb_bit_nr);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ gen_acc(d, a, b, ES_8);
+}
+
+static void gen_acc16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ gen_acc(d, a, b, ES_16);
+}
+
+static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_add_i32(t, a, b);
+ tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(t, a, b);
+ tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
+{
+ TCGv_i64 th = tcg_temp_new_i64();
+ TCGv_i64 tl = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+
+ tcg_gen_add2_i64(tl, th, al, zero, bl, zero);
+ tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
+ tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
+ tcg_gen_mov_i64(dh, zero);
+
+ tcg_temp_free_i64(th);
+ tcg_temp_free_i64(tl);
+ tcg_temp_free_i64(zero);
+}
+
+static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ static const GVecGen3 g[4] = {
+ { .fni8 = gen_acc8_i64, },
+ { .fni8 = gen_acc16_i64, },
+ { .fni4 = gen_acc_i32, },
+ { .fni8 = gen_acc_i64, },
+ };
+
+ if (es > ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ } else if (es == ES_128) {
+ gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+ }
+ gen_gvec_3(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), &g[es]);
+ return DISAS_NEXT;
+}
+
+static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
+ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
+{
+ TCGv_i64 tl = tcg_temp_new_i64();
+ TCGv_i64 th = tcg_const_i64(0);
+
+ /* extract the carry only */
+ tcg_gen_extract_i64(tl, cl, 0, 1);
+ tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
+ tcg_gen_add2_i64(dl, dh, dl, dh, tl, th);
+
+ tcg_temp_free_i64(tl);
+ tcg_temp_free_i64(th);
+}
+
+static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
+{
+ if (get_field(s, m5) != ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3),
+ get_field(s, v4));
+ return DISAS_NEXT;
+}
+
+static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
+ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
+{
+ TCGv_i64 tl = tcg_temp_new_i64();
+ TCGv_i64 th = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+
+ tcg_gen_andi_i64(tl, cl, 1);
+ tcg_gen_add2_i64(tl, th, tl, zero, al, zero);
+ tcg_gen_add2_i64(tl, th, tl, th, bl, zero);
+ tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
+ tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
+ tcg_gen_mov_i64(dh, zero);
+
+ tcg_temp_free_i64(tl);
+ tcg_temp_free_i64(th);
+ tcg_temp_free_i64(zero);
+}
+
+static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
+{
+ if (get_field(s, m5) != ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3),
+ get_field(s, v4));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vn(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vnc(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(andc, ES_8, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t0, a);
+ tcg_gen_ext_i32_i64(t1, b);
+ tcg_gen_add_i64(t0, t0, t1);
+ tcg_gen_addi_i64(t0, t0, 1);
+ tcg_gen_shri_i64(t0, t0, 1);
+ tcg_gen_extrl_i64_i32(d, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
+{
+ TCGv_i64 dh = tcg_temp_new_i64();
+ TCGv_i64 ah = tcg_temp_new_i64();
+ TCGv_i64 bh = tcg_temp_new_i64();
+
+ /* extending the sign by one bit is sufficient */
+ tcg_gen_extract_i64(ah, al, 63, 1);
+ tcg_gen_extract_i64(bh, bl, 63, 1);
+ tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
+ gen_addi2_i64(dl, dh, dl, dh, 1);
+ tcg_gen_extract2_i64(dl, dl, dh, 1);
+
+ tcg_temp_free_i64(dh);
+ tcg_temp_free_i64(ah);
+ tcg_temp_free_i64(bh);
+}
+
+static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ static const GVecGen3 g[4] = {
+ { .fno = gen_helper_gvec_vavg8, },
+ { .fno = gen_helper_gvec_vavg16, },
+ { .fni4 = gen_avg_i32, },
+ { .fni8 = gen_avg_i64, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ gen_gvec_3(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), &g[es]);
+ return DISAS_NEXT;
+}
+
+static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t0, a);
+ tcg_gen_extu_i32_i64(t1, b);
+ tcg_gen_add_i64(t0, t0, t1);
+ tcg_gen_addi_i64(t0, t0, 1);
+ tcg_gen_shri_i64(t0, t0, 1);
+ tcg_gen_extrl_i64_i32(d, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
+{
+ TCGv_i64 dh = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+
+ tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
+ gen_addi2_i64(dl, dh, dl, dh, 1);
+ tcg_gen_extract2_i64(dl, dl, dh, 1);
+
+ tcg_temp_free_i64(dh);
+ tcg_temp_free_i64(zero);
+}
+
+static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ static const GVecGen3 g[4] = {
+ { .fno = gen_helper_gvec_vavgl8, },
+ { .fno = gen_helper_gvec_vavgl16, },
+ { .fni4 = gen_avgl_i32, },
+ { .fni8 = gen_avgl_i64, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ gen_gvec_3(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), &g[es]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 sum = tcg_temp_new_i32();
+ int i;
+
+ read_vec_element_i32(sum, get_field(s, v3), 1, ES_32);
+ for (i = 0; i < 4; i++) {
+ read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
+ tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
+ }
+ gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
+ write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(sum);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vec(DisasContext *s, DisasOps *o)
+{
+ uint8_t es = get_field(s, m3);
+ const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ if (s->fields.op2 == 0xdb) {
+ es |= MO_SIGN;
+ }
+
+ o->in1 = tcg_temp_new_i64();
+ o->in2 = tcg_temp_new_i64();
+ read_vec_element_i64(o->in1, get_field(s, v1), enr, es);
+ read_vec_element_i64(o->in2, get_field(s, v2), enr, es);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vc(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ TCGCond cond = s->insn->data;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ tcg_gen_gvec_cmp(cond, es,
+ vec_full_reg_offset(get_field(s, v1)),
+ vec_full_reg_offset(get_field(s, v2)),
+ vec_full_reg_offset(get_field(s, v3)), 16, 16);
+ if (get_field(s, m5) & 0x1) {
+ TCGv_i64 low = tcg_temp_new_i64();
+ TCGv_i64 high = tcg_temp_new_i64();
+
+ read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
+ read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
+ gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
+
+ tcg_temp_free_i64(low);
+ tcg_temp_free_i64(high);
+ }
+ return DISAS_NEXT;
+}
+
+static void gen_clz_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_clzi_i32(d, a, 32);
+}
+
+static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_clzi_i64(d, a, 64);
+}
+
+static DisasJumpType op_vclz(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+ static const GVecGen2 g[4] = {
+ { .fno = gen_helper_gvec_vclz8, },
+ { .fno = gen_helper_gvec_vclz16, },
+ { .fni4 = gen_clz_i32, },
+ { .fni8 = gen_clz_i64, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
+ return DISAS_NEXT;
+}
+
+static void gen_ctz_i32(TCGv_i32 d, TCGv_i32 a)
+{
+ tcg_gen_ctzi_i32(d, a, 32);
+}
+
+static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a)
+{
+ tcg_gen_ctzi_i64(d, a, 64);
+}
+
+static DisasJumpType op_vctz(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+ static const GVecGen2 g[4] = {
+ { .fno = gen_helper_gvec_vctz8, },
+ { .fno = gen_helper_gvec_vctz16, },
+ { .fni4 = gen_ctz_i32, },
+ { .fni8 = gen_ctz_i64, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vx(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ static const GVecGen3 g[4] = {
+ { .fno = gen_helper_gvec_vgfm8, },
+ { .fno = gen_helper_gvec_vgfm16, },
+ { .fno = gen_helper_gvec_vgfm32, },
+ { .fno = gen_helper_gvec_vgfm64, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ gen_gvec_3(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), &g[es]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m5);
+ static const GVecGen4 g[4] = {
+ { .fno = gen_helper_gvec_vgfma8, },
+ { .fno = gen_helper_gvec_vgfma16, },
+ { .fno = gen_helper_gvec_vgfma32, },
+ { .fno = gen_helper_gvec_vgfma64, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+ gen_gvec_4(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4), &g[es]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlc(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vlp(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vmx(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v2 = get_field(s, v2);
+ const uint8_t v3 = get_field(s, v3);
+ const uint8_t es = get_field(s, m4);
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields.op2) {
+ case 0xff:
+ gen_gvec_fn_3(smax, es, v1, v2, v3);
+ break;
+ case 0xfd:
+ gen_gvec_fn_3(umax, es, v1, v2, v3);
+ break;
+ case 0xfe:
+ gen_gvec_fn_3(smin, es, v1, v2, v3);
+ break;
+ case 0xfc:
+ gen_gvec_fn_3(umin, es, v1, v2, v3);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return DISAS_NEXT;
+}
+
+static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_mul_i32(t0, a, b);
+ tcg_gen_add_i32(d, t0, c);
+
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t0, a);
+ tcg_gen_ext_i32_i64(t1, b);
+ tcg_gen_ext_i32_i64(t2, c);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_add_i64(t0, t0, t2);
+ tcg_gen_extrh_i64_i32(d, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t0, a);
+ tcg_gen_extu_i32_i64(t1, b);
+ tcg_gen_extu_i32_i64(t2, c);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_add_i64(t0, t0, t2);
+ tcg_gen_extrh_i64_i32(d, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m5);
+ static const GVecGen4 g_vmal[3] = {
+ { .fno = gen_helper_gvec_vmal8, },
+ { .fno = gen_helper_gvec_vmal16, },
+ { .fni4 = gen_mal_i32, },
+ };
+ static const GVecGen4 g_vmah[3] = {
+ { .fno = gen_helper_gvec_vmah8, },
+ { .fno = gen_helper_gvec_vmah16, },
+ { .fni4 = gen_mah_i32, },
+ };
+ static const GVecGen4 g_vmalh[3] = {
+ { .fno = gen_helper_gvec_vmalh8, },
+ { .fno = gen_helper_gvec_vmalh16, },
+ { .fni4 = gen_malh_i32, },
+ };
+ static const GVecGen4 g_vmae[3] = {
+ { .fno = gen_helper_gvec_vmae8, },
+ { .fno = gen_helper_gvec_vmae16, },
+ { .fno = gen_helper_gvec_vmae32, },
+ };
+ static const GVecGen4 g_vmale[3] = {
+ { .fno = gen_helper_gvec_vmale8, },
+ { .fno = gen_helper_gvec_vmale16, },
+ { .fno = gen_helper_gvec_vmale32, },
+ };
+ static const GVecGen4 g_vmao[3] = {
+ { .fno = gen_helper_gvec_vmao8, },
+ { .fno = gen_helper_gvec_vmao16, },
+ { .fno = gen_helper_gvec_vmao32, },
+ };
+ static const GVecGen4 g_vmalo[3] = {
+ { .fno = gen_helper_gvec_vmalo8, },
+ { .fno = gen_helper_gvec_vmalo16, },
+ { .fno = gen_helper_gvec_vmalo32, },
+ };
+ const GVecGen4 *fn;
+
+ if (es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields.op2) {
+ case 0xaa:
+ fn = &g_vmal[es];
+ break;
+ case 0xab:
+ fn = &g_vmah[es];
+ break;
+ case 0xa9:
+ fn = &g_vmalh[es];
+ break;
+ case 0xae:
+ fn = &g_vmae[es];
+ break;
+ case 0xac:
+ fn = &g_vmale[es];
+ break;
+ case 0xaf:
+ fn = &g_vmao[es];
+ break;
+ case 0xad:
+ fn = &g_vmalo[es];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_gvec_4(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4), fn);
+ return DISAS_NEXT;
+}
+
+static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_muls2_i32(t, d, a, b);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_mulu2_i32(t, d, a, b);
+ tcg_temp_free_i32(t);
+}
+
+static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ static const GVecGen3 g_vmh[3] = {
+ { .fno = gen_helper_gvec_vmh8, },
+ { .fno = gen_helper_gvec_vmh16, },
+ { .fni4 = gen_mh_i32, },
+ };
+ static const GVecGen3 g_vmlh[3] = {
+ { .fno = gen_helper_gvec_vmlh8, },
+ { .fno = gen_helper_gvec_vmlh16, },
+ { .fni4 = gen_mlh_i32, },
+ };
+ static const GVecGen3 g_vme[3] = {
+ { .fno = gen_helper_gvec_vme8, },
+ { .fno = gen_helper_gvec_vme16, },
+ { .fno = gen_helper_gvec_vme32, },
+ };
+ static const GVecGen3 g_vmle[3] = {
+ { .fno = gen_helper_gvec_vmle8, },
+ { .fno = gen_helper_gvec_vmle16, },
+ { .fno = gen_helper_gvec_vmle32, },
+ };
+ static const GVecGen3 g_vmo[3] = {
+ { .fno = gen_helper_gvec_vmo8, },
+ { .fno = gen_helper_gvec_vmo16, },
+ { .fno = gen_helper_gvec_vmo32, },
+ };
+ static const GVecGen3 g_vmlo[3] = {
+ { .fno = gen_helper_gvec_vmlo8, },
+ { .fno = gen_helper_gvec_vmlo16, },
+ { .fno = gen_helper_gvec_vmlo32, },
+ };
+ const GVecGen3 *fn;
+
+ if (es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields.op2) {
+ case 0xa2:
+ gen_gvec_fn_3(mul, es, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+ case 0xa3:
+ fn = &g_vmh[es];
+ break;
+ case 0xa1:
+ fn = &g_vmlh[es];
+ break;
+ case 0xa6:
+ fn = &g_vme[es];
+ break;
+ case 0xa4:
+ fn = &g_vmle[es];
+ break;
+ case 0xa7:
+ fn = &g_vmo[es];
+ break;
+ case 0xa5:
+ fn = &g_vmlo[es];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ gen_gvec_3(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 l1, h1, l2, h2;
+
+ if (get_field(s, m5) != ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ l1 = tcg_temp_new_i64();
+ h1 = tcg_temp_new_i64();
+ l2 = tcg_temp_new_i64();
+ h2 = tcg_temp_new_i64();
+
+ /* Multipy both even elements from v2 and v3 */
+ read_vec_element_i64(l1, get_field(s, v2), 0, ES_64);
+ read_vec_element_i64(h1, get_field(s, v3), 0, ES_64);
+ tcg_gen_mulu2_i64(l1, h1, l1, h1);
+ /* Shift result left by one (x2) if requested */
+ if (extract32(get_field(s, m6), 3, 1)) {
+ tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1);
+ }
+
+ /* Multipy both odd elements from v2 and v3 */
+ read_vec_element_i64(l2, get_field(s, v2), 1, ES_64);
+ read_vec_element_i64(h2, get_field(s, v3), 1, ES_64);
+ tcg_gen_mulu2_i64(l2, h2, l2, h2);
+ /* Shift result left by one (x2) if requested */
+ if (extract32(get_field(s, m6), 2, 1)) {
+ tcg_gen_add2_i64(l2, h2, l2, h2, l2, h2);
+ }
+
+ /* Add both intermediate results */
+ tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2);
+ /* Add whole v4 */
+ read_vec_element_i64(h2, get_field(s, v4), 0, ES_64);
+ read_vec_element_i64(l2, get_field(s, v4), 1, ES_64);
+ tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2);
+
+ /* Store final result into v1. */
+ write_vec_element_i64(h1, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(l1, get_field(s, v1), 1, ES_64);
+
+ tcg_temp_free_i64(l1);
+ tcg_temp_free_i64(h1);
+ tcg_temp_free_i64(l2);
+ tcg_temp_free_i64(h2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vnn(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(nand, ES_8, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vno(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vnx(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vo(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_voc(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m3);
+ static const GVecGen2 g[4] = {
+ { .fno = gen_helper_gvec_vpopct8, },
+ { .fno = gen_helper_gvec_vpopct16, },
+ { .fni4 = tcg_gen_ctpop_i32, },
+ { .fni8 = tcg_gen_ctpop_i64, },
+ };
+
+ if (es > ES_64 || (es != ES_8 && !s390_has_feat(S390_FEAT_VECTOR_ENH))) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
+ return DISAS_NEXT;
+}
+
+static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c)
+{
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_rotli_i32(t, a, c & 31);
+ tcg_gen_and_i32(t, t, b);
+ tcg_gen_andc_i32(d, d, b);
+ tcg_gen_or_i32(d, d, t);
+
+ tcg_temp_free_i32(t);
+}
+
+static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_rotli_i64(t, a, c & 63);
+ tcg_gen_and_i64(t, t, b);
+ tcg_gen_andc_i64(d, d, b);
+ tcg_gen_or_i64(d, d, t);
+
+ tcg_temp_free_i64(t);
+}
+
+static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m5);
+ const uint8_t i4 = get_field(s, i4) &
+ (NUM_VEC_ELEMENT_BITS(es) - 1);
+ static const GVecGen3i g[4] = {
+ { .fno = gen_helper_gvec_verim8, },
+ { .fno = gen_helper_gvec_verim16, },
+ { .fni4 = gen_rim_i32,
+ .load_dest = true, },
+ { .fni8 = gen_rim_i64,
+ .load_dest = true, },
+ };
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_3i(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), i4, &g[es]);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v2 = get_field(s, v2);
+ const uint8_t v3 = get_field(s, v3);
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (s->fields.op2) {
+ case 0x70:
+ gen_gvec_fn_3(shlv, es, v1, v2, v3);
+ break;
+ case 0x73:
+ gen_gvec_fn_3(rotlv, es, v1, v2, v3);
+ break;
+ case 0x7a:
+ gen_gvec_fn_3(sarv, es, v1, v2, v3);
+ break;
+ case 0x78:
+ gen_gvec_fn_3(shrv, es, v1, v2, v3);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t d2 = get_field(s, d2) &
+ (NUM_VEC_ELEMENT_BITS(es) - 1);
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v3 = get_field(s, v3);
+ TCGv_i32 shift;
+
+ if (es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (likely(!get_field(s, b2))) {
+ switch (s->fields.op2) {
+ case 0x30:
+ gen_gvec_fn_2i(shli, es, v1, v3, d2);
+ break;
+ case 0x33:
+ gen_gvec_fn_2i(rotli, es, v1, v3, d2);
+ break;
+ case 0x3a:
+ gen_gvec_fn_2i(sari, es, v1, v3, d2);
+ break;
+ case 0x38:
+ gen_gvec_fn_2i(shri, es, v1, v3, d2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ shift = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(shift, o->addr1);
+ tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1);
+ switch (s->fields.op2) {
+ case 0x30:
+ gen_gvec_fn_2s(shls, es, v1, v3, shift);
+ break;
+ case 0x33:
+ gen_gvec_fn_2s(rotls, es, v1, v3, shift);
+ break;
+ case 0x3a:
+ gen_gvec_fn_2s(sars, es, v1, v3, shift);
+ break;
+ case 0x38:
+ gen_gvec_fn_2s(shrs, es, v1, v3, shift);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i32(shift);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 shift = tcg_temp_new_i64();
+
+ read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
+ if (s->fields.op2 == 0x74) {
+ tcg_gen_andi_i64(shift, shift, 0x7);
+ } else {
+ tcg_gen_andi_i64(shift, shift, 0x78);
+ }
+
+ gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
+ shift, 0, gen_helper_gvec_vsl);
+ tcg_temp_free_i64(shift);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o)
+{
+ const uint8_t i4 = get_field(s, i4) & 0xf;
+ const int left_shift = (i4 & 7) * 8;
+ const int right_shift = 64 - left_shift;
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ if ((i4 & 8) == 0) {
+ read_vec_element_i64(t0, get_field(s, v2), 0, ES_64);
+ read_vec_element_i64(t1, get_field(s, v2), 1, ES_64);
+ read_vec_element_i64(t2, get_field(s, v3), 0, ES_64);
+ } else {
+ read_vec_element_i64(t0, get_field(s, v2), 1, ES_64);
+ read_vec_element_i64(t1, get_field(s, v3), 0, ES_64);
+ read_vec_element_i64(t2, get_field(s, v3), 1, ES_64);
+ }
+ tcg_gen_extract2_i64(t0, t1, t0, right_shift);
+ tcg_gen_extract2_i64(t1, t2, t1, right_shift);
+ write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsra(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 shift = tcg_temp_new_i64();
+
+ read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
+ if (s->fields.op2 == 0x7e) {
+ tcg_gen_andi_i64(shift, shift, 0x7);
+ } else {
+ tcg_gen_andi_i64(shift, shift, 0x78);
+ }
+
+ gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
+ shift, 0, gen_helper_gvec_vsra);
+ tcg_temp_free_i64(shift);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 shift = tcg_temp_new_i64();
+
+ read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
+ if (s->fields.op2 == 0x7c) {
+ tcg_gen_andi_i64(shift, shift, 0x7);
+ } else {
+ tcg_gen_andi_i64(shift, shift, 0x78);
+ }
+
+ gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
+ shift, 0, gen_helper_gvec_vsrl);
+ tcg_temp_free_i64(shift);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vs(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+
+ if (es > ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ } else if (es == ES_128) {
+ gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+ }
+ gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2),
+ get_field(s, v3));
+ return DISAS_NEXT;
+}
+
+static void gen_scbi_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_setcond_i32(TCG_COND_GEU, d, a, b);
+}
+
+static void gen_scbi_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_setcond_i64(TCG_COND_GEU, d, a, b);
+}
+
+static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
+{
+ TCGv_i64 th = tcg_temp_new_i64();
+ TCGv_i64 tl = tcg_temp_new_i64();
+ TCGv_i64 zero = tcg_const_i64(0);
+
+ tcg_gen_sub2_i64(tl, th, al, zero, bl, zero);
+ tcg_gen_andi_i64(th, th, 1);
+ tcg_gen_sub2_i64(tl, th, ah, zero, th, zero);
+ tcg_gen_sub2_i64(tl, th, tl, th, bh, zero);
+ /* "invert" the result: -1 -> 0; 0 -> 1 */
+ tcg_gen_addi_i64(dl, th, 1);
+ tcg_gen_mov_i64(dh, zero);
+
+ tcg_temp_free_i64(th);
+ tcg_temp_free_i64(tl);
+ tcg_temp_free_i64(zero);
+}
+
+static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ static const GVecGen3 g[4] = {
+ { .fno = gen_helper_gvec_vscbi8, },
+ { .fno = gen_helper_gvec_vscbi16, },
+ { .fni4 = gen_scbi_i32, },
+ { .fni8 = gen_scbi_i64, },
+ };
+
+ if (es > ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ } else if (es == ES_128) {
+ gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3));
+ return DISAS_NEXT;
+ }
+ gen_gvec_3(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), &g[es]);
+ return DISAS_NEXT;
+}
+
+static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
+ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
+{
+ TCGv_i64 tl = tcg_temp_new_i64();
+ TCGv_i64 th = tcg_temp_new_i64();
+
+ tcg_gen_not_i64(tl, bl);
+ tcg_gen_not_i64(th, bh);
+ gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
+ tcg_temp_free_i64(tl);
+ tcg_temp_free_i64(th);
+}
+
+static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
+{
+ if (get_field(s, m5) != ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3),
+ get_field(s, v4));
+ return DISAS_NEXT;
+}
+
+static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
+ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
+{
+ TCGv_i64 th = tcg_temp_new_i64();
+ TCGv_i64 tl = tcg_temp_new_i64();
+
+ tcg_gen_not_i64(tl, bl);
+ tcg_gen_not_i64(th, bh);
+ gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
+
+ tcg_temp_free_i64(tl);
+ tcg_temp_free_i64(th);
+}
+
+static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
+{
+ if (get_field(s, m5) != ES_128) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1),
+ get_field(s, v2), get_field(s, v3),
+ get_field(s, v4));
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ TCGv_i64 sum, tmp;
+ uint8_t dst_idx;
+
+ if (es == ES_8 || es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ sum = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+ for (dst_idx = 0; dst_idx < 2; dst_idx++) {
+ uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2;
+ const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1;
+
+ read_vec_element_i64(sum, get_field(s, v3), max_idx, es);
+ for (; idx <= max_idx; idx++) {
+ read_vec_element_i64(tmp, get_field(s, v2), idx, es);
+ tcg_gen_add_i64(sum, sum, tmp);
+ }
+ write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
+ }
+ tcg_temp_free_i64(sum);
+ tcg_temp_free_i64(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1;
+ TCGv_i64 sumh, suml, zero, tmpl;
+ uint8_t idx;
+
+ if (es < ES_32 || es > ES_64) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ sumh = tcg_const_i64(0);
+ suml = tcg_temp_new_i64();
+ zero = tcg_const_i64(0);
+ tmpl = tcg_temp_new_i64();
+
+ read_vec_element_i64(suml, get_field(s, v3), max_idx, es);
+ for (idx = 0; idx <= max_idx; idx++) {
+ read_vec_element_i64(tmpl, get_field(s, v2), idx, es);
+ tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero);
+ }
+ write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
+ write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
+
+ tcg_temp_free_i64(sumh);
+ tcg_temp_free_i64(suml);
+ tcg_temp_free_i64(zero);
+ tcg_temp_free_i64(tmpl);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vsum(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ TCGv_i32 sum, tmp;
+ uint8_t dst_idx;
+
+ if (es > ES_16) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ sum = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
+ for (dst_idx = 0; dst_idx < 4; dst_idx++) {
+ uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4;
+ const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1;
+
+ read_vec_element_i32(sum, get_field(s, v3), max_idx, es);
+ for (; idx <= max_idx; idx++) {
+ read_vec_element_i32(tmp, get_field(s, v2), idx, es);
+ tcg_gen_add_i32(sum, sum, tmp);
+ }
+ write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
+ }
+ tcg_temp_free_i32(sum);
+ tcg_temp_free_i32(tmp);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vtm(DisasContext *s, DisasOps *o)
+{
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
+ cpu_env, 0, gen_helper_gvec_vtm);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfae(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ static gen_helper_gvec_3 * const g[3] = {
+ gen_helper_gvec_vfae8,
+ gen_helper_gvec_vfae16,
+ gen_helper_gvec_vfae32,
+ };
+ static gen_helper_gvec_3_ptr * const g_cc[3] = {
+ gen_helper_gvec_vfae_cc8,
+ gen_helper_gvec_vfae_cc16,
+ gen_helper_gvec_vfae_cc32,
+ };
+ if (es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (extract32(m5, 0, 1)) {
+ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), cpu_env, m5, g_cc[es]);
+ set_cc_static(s);
+ } else {
+ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), m5, g[es]);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfee(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ static gen_helper_gvec_3 * const g[3] = {
+ gen_helper_gvec_vfee8,
+ gen_helper_gvec_vfee16,
+ gen_helper_gvec_vfee32,
+ };
+ static gen_helper_gvec_3_ptr * const g_cc[3] = {
+ gen_helper_gvec_vfee_cc8,
+ gen_helper_gvec_vfee_cc16,
+ gen_helper_gvec_vfee_cc32,
+ };
+
+ if (es > ES_32 || m5 & ~0x3) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (extract32(m5, 0, 1)) {
+ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), cpu_env, m5, g_cc[es]);
+ set_cc_static(s);
+ } else {
+ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), m5, g[es]);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfene(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ static gen_helper_gvec_3 * const g[3] = {
+ gen_helper_gvec_vfene8,
+ gen_helper_gvec_vfene16,
+ gen_helper_gvec_vfene32,
+ };
+ static gen_helper_gvec_3_ptr * const g_cc[3] = {
+ gen_helper_gvec_vfene_cc8,
+ gen_helper_gvec_vfene_cc16,
+ gen_helper_gvec_vfene_cc32,
+ };
+
+ if (es > ES_32 || m5 & ~0x3) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (extract32(m5, 0, 1)) {
+ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), cpu_env, m5, g_cc[es]);
+ set_cc_static(s);
+ } else {
+ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), m5, g[es]);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vistr(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ static gen_helper_gvec_2 * const g[3] = {
+ gen_helper_gvec_vistr8,
+ gen_helper_gvec_vistr16,
+ gen_helper_gvec_vistr32,
+ };
+ static gen_helper_gvec_2_ptr * const g_cc[3] = {
+ gen_helper_gvec_vistr_cc8,
+ gen_helper_gvec_vistr_cc16,
+ gen_helper_gvec_vistr_cc32,
+ };
+
+ if (es > ES_32 || m5 & ~0x1) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (extract32(m5, 0, 1)) {
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
+ cpu_env, 0, g_cc[es]);
+ set_cc_static(s);
+ } else {
+ gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0,
+ g[es]);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o)
+{
+ const uint8_t es = get_field(s, m5);
+ const uint8_t m6 = get_field(s, m6);
+ static gen_helper_gvec_4 * const g[3] = {
+ gen_helper_gvec_vstrc8,
+ gen_helper_gvec_vstrc16,
+ gen_helper_gvec_vstrc32,
+ };
+ static gen_helper_gvec_4 * const g_rt[3] = {
+ gen_helper_gvec_vstrc_rt8,
+ gen_helper_gvec_vstrc_rt16,
+ gen_helper_gvec_vstrc_rt32,
+ };
+ static gen_helper_gvec_4_ptr * const g_cc[3] = {
+ gen_helper_gvec_vstrc_cc8,
+ gen_helper_gvec_vstrc_cc16,
+ gen_helper_gvec_vstrc_cc32,
+ };
+ static gen_helper_gvec_4_ptr * const g_cc_rt[3] = {
+ gen_helper_gvec_vstrc_cc_rt8,
+ gen_helper_gvec_vstrc_cc_rt16,
+ gen_helper_gvec_vstrc_cc_rt32,
+ };
+
+ if (es > ES_32) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ if (extract32(m6, 0, 1)) {
+ if (extract32(m6, 2, 1)) {
+ gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4),
+ cpu_env, m6, g_cc_rt[es]);
+ } else {
+ gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4),
+ cpu_env, m6, g_cc[es]);
+ }
+ set_cc_static(s);
+ } else {
+ if (extract32(m6, 2, 1)) {
+ gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4),
+ m6, g_rt[es]);
+ } else {
+ gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4),
+ m6, g[es]);
+ }
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfa(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ gen_helper_gvec_3_ptr *fn = NULL;
+
+ switch (s->fields.op2) {
+ case 0xe3:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfa32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfa64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfa128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0xe5:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfd32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfd64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfd128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0xe7:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfm32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfm64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfm128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0xe2:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfs32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfs64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfs128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fn || extract32(m5, 0, 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), cpu_env, m5, fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_wfc(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m3);
+ const uint8_t m4 = get_field(s, m4);
+ gen_helper_gvec_2_ptr *fn = NULL;
+
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_wfk32;
+ if (s->fields.op2 == 0xcb) {
+ fn = gen_helper_gvec_wfc32;
+ }
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_wfk64;
+ if (s->fields.op2 == 0xcb) {
+ fn = gen_helper_gvec_wfc64;
+ }
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_wfk128;
+ if (s->fields.op2 == 0xcb) {
+ fn = gen_helper_gvec_wfc128;
+ }
+ }
+ break;
+ default:
+ break;
+ };
+
+ if (!fn || m4) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfc(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ const uint8_t m6 = get_field(s, m6);
+ const bool cs = extract32(m6, 0, 1);
+ const bool sq = extract32(m5, 2, 1);
+ gen_helper_gvec_3_ptr *fn = NULL;
+
+ switch (s->fields.op2) {
+ case 0xe8:
+ switch (fpf) {
+ case FPF_SHORT:
+ fn = cs ? gen_helper_gvec_vfce32_cc : gen_helper_gvec_vfce32;
+ break;
+ case FPF_LONG:
+ fn = cs ? gen_helper_gvec_vfce64_cc : gen_helper_gvec_vfce64;
+ break;
+ case FPF_EXT:
+ fn = cs ? gen_helper_gvec_vfce128_cc : gen_helper_gvec_vfce128;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0xeb:
+ switch (fpf) {
+ case FPF_SHORT:
+ fn = cs ? gen_helper_gvec_vfch32_cc : gen_helper_gvec_vfch32;
+ break;
+ case FPF_LONG:
+ fn = cs ? gen_helper_gvec_vfch64_cc : gen_helper_gvec_vfch64;
+ break;
+ case FPF_EXT:
+ fn = cs ? gen_helper_gvec_vfch128_cc : gen_helper_gvec_vfch128;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0xea:
+ switch (fpf) {
+ case FPF_SHORT:
+ fn = cs ? gen_helper_gvec_vfche32_cc : gen_helper_gvec_vfche32;
+ break;
+ case FPF_LONG:
+ fn = cs ? gen_helper_gvec_vfche64_cc : gen_helper_gvec_vfche64;
+ break;
+ case FPF_EXT:
+ fn = cs ? gen_helper_gvec_vfche128_cc : gen_helper_gvec_vfche128;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fn || extract32(m5, 0, 2) || extract32(m6, 1, 3) ||
+ (!s390_has_feat(S390_FEAT_VECTOR_ENH) && (fpf != FPF_LONG || sq))) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
+ cpu_env, m5, fn);
+ if (cs) {
+ set_cc_static(s);
+ }
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m3);
+ const uint8_t m4 = get_field(s, m4);
+ const uint8_t erm = get_field(s, m5);
+ gen_helper_gvec_2_ptr *fn = NULL;
+
+
+ switch (s->fields.op2) {
+ case 0xc3:
+ if (fpf == FPF_LONG) {
+ fn = gen_helper_gvec_vcdg64;
+ }
+ break;
+ case 0xc1:
+ if (fpf == FPF_LONG) {
+ fn = gen_helper_gvec_vcdlg64;
+ }
+ break;
+ case 0xc2:
+ if (fpf == FPF_LONG) {
+ fn = gen_helper_gvec_vcgd64;
+ }
+ break;
+ case 0xc0:
+ if (fpf == FPF_LONG) {
+ fn = gen_helper_gvec_vclgd64;
+ }
+ break;
+ case 0xc7:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfi32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfi64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfi128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0xc5:
+ switch (fpf) {
+ case FPF_LONG:
+ fn = gen_helper_gvec_vflr64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vflr128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fn || extract32(m4, 0, 2) || erm > 7 || erm == 2) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
+ deposit32(m4, 4, 4, erm), fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfll(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m3);
+ const uint8_t m4 = get_field(s, m4);
+ gen_helper_gvec_2_ptr *fn = NULL;
+
+ switch (fpf) {
+ case FPF_SHORT:
+ fn = gen_helper_gvec_vfll32;
+ break;
+ case FPF_LONG:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfll64;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!fn || extract32(m4, 0, 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfmax(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m4);
+ const uint8_t m6 = get_field(s, m6);
+ const uint8_t m5 = get_field(s, m5);
+ gen_helper_gvec_3_ptr *fn;
+
+ if (m6 == 5 || m6 == 6 || m6 == 7 || m6 > 13) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s->fields.op2 == 0xef) {
+ fn = gen_helper_gvec_vfmax32;
+ } else {
+ fn = gen_helper_gvec_vfmin32;
+ }
+ break;
+ case FPF_LONG:
+ if (s->fields.op2 == 0xef) {
+ fn = gen_helper_gvec_vfmax64;
+ } else {
+ fn = gen_helper_gvec_vfmin64;
+ }
+ break;
+ case FPF_EXT:
+ if (s->fields.op2 == 0xef) {
+ fn = gen_helper_gvec_vfmax128;
+ } else {
+ fn = gen_helper_gvec_vfmin128;
+ }
+ break;
+ default:
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
+ cpu_env, deposit32(m5, 4, 4, m6), fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfma(DisasContext *s, DisasOps *o)
+{
+ const uint8_t m5 = get_field(s, m5);
+ const uint8_t fpf = get_field(s, m6);
+ gen_helper_gvec_4_ptr *fn = NULL;
+
+ switch (s->fields.op2) {
+ case 0x8f:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfma32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfma64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfma128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x8e:
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfms32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfms64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfms128;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x9f:
+ switch (fpf) {
+ case FPF_SHORT:
+ fn = gen_helper_gvec_vfnma32;
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfnma64;
+ break;
+ case FPF_EXT:
+ fn = gen_helper_gvec_vfnma128;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 0x9e:
+ switch (fpf) {
+ case FPF_SHORT:
+ fn = gen_helper_gvec_vfnms32;
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfnms64;
+ break;
+ case FPF_EXT:
+ fn = gen_helper_gvec_vfnms128;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fn || extract32(m5, 0, 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
+ get_field(s, v3), get_field(s, v4), cpu_env, m5, fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o)
+{
+ const uint8_t v1 = get_field(s, v1);
+ const uint8_t v2 = get_field(s, v2);
+ const uint8_t fpf = get_field(s, m3);
+ const uint8_t m4 = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ const bool se = extract32(m4, 3, 1);
+ TCGv_i64 tmp;
+
+ if ((fpf != FPF_LONG && !s390_has_feat(S390_FEAT_VECTOR_ENH)) ||
+ extract32(m4, 0, 3) || m5 > 2) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ switch (fpf) {
+ case FPF_SHORT:
+ if (!se) {
+ switch (m5) {
+ case 0:
+ /* sign bit is inverted (complement) */
+ gen_gvec_fn_2i(xori, ES_32, v1, v2, 1ull << 31);
+ break;
+ case 1:
+ /* sign bit is set to one (negative) */
+ gen_gvec_fn_2i(ori, ES_32, v1, v2, 1ull << 31);
+ break;
+ case 2:
+ /* sign bit is set to zero (positive) */
+ gen_gvec_fn_2i(andi, ES_32, v1, v2, (1ull << 31) - 1);
+ break;
+ }
+ return DISAS_NEXT;
+ }
+ break;
+ case FPF_LONG:
+ if (!se) {
+ switch (m5) {
+ case 0:
+ /* sign bit is inverted (complement) */
+ gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63);
+ break;
+ case 1:
+ /* sign bit is set to one (negative) */
+ gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63);
+ break;
+ case 2:
+ /* sign bit is set to zero (positive) */
+ gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1);
+ break;
+ }
+ return DISAS_NEXT;
+ }
+ break;
+ case FPF_EXT:
+ /* Only a single element. */
+ break;
+ default:
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ /* With a single element, we are only interested in bit 0. */
+ tmp = tcg_temp_new_i64();
+ read_vec_element_i64(tmp, v2, 0, ES_64);
+ switch (m5) {
+ case 0:
+ /* sign bit is inverted (complement) */
+ tcg_gen_xori_i64(tmp, tmp, 1ull << 63);
+ break;
+ case 1:
+ /* sign bit is set to one (negative) */
+ tcg_gen_ori_i64(tmp, tmp, 1ull << 63);
+ break;
+ case 2:
+ /* sign bit is set to zero (positive) */
+ tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1);
+ break;
+ }
+ write_vec_element_i64(tmp, v1, 0, ES_64);
+
+ if (fpf == FPF_EXT) {
+ read_vec_element_i64(tmp, v2, 1, ES_64);
+ write_vec_element_i64(tmp, v1, 1, ES_64);
+ }
+
+ tcg_temp_free_i64(tmp);
+
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o)
+{
+ const uint8_t fpf = get_field(s, m3);
+ const uint8_t m4 = get_field(s, m4);
+ gen_helper_gvec_2_ptr *fn = NULL;
+
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfsq32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vfsq64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vfsq128;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!fn || extract32(m4, 0, 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
+ return DISAS_NEXT;
+}
+
+static DisasJumpType op_vftci(DisasContext *s, DisasOps *o)
+{
+ const uint16_t i3 = get_field(s, i3);
+ const uint8_t fpf = get_field(s, m4);
+ const uint8_t m5 = get_field(s, m5);
+ gen_helper_gvec_2_ptr *fn = NULL;
+
+ switch (fpf) {
+ case FPF_SHORT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vftci32;
+ }
+ break;
+ case FPF_LONG:
+ fn = gen_helper_gvec_vftci64;
+ break;
+ case FPF_EXT:
+ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
+ fn = gen_helper_gvec_vftci128;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!fn || extract32(m5, 0, 3)) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return DISAS_NORETURN;
+ }
+
+ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
+ deposit32(m5, 4, 12, i3), fn);
+ set_cc_static(s);
+ return DISAS_NEXT;
+}
diff --git a/target/s390x/tcg/vec.h b/target/s390x/tcg/vec.h
new file mode 100644
index 000000000..a6e361869
--- /dev/null
+++ b/target/s390x/tcg/vec.h
@@ -0,0 +1,141 @@
+/*
+ * QEMU TCG support -- s390x vector utilitites
+ *
+ * Copyright (C) 2019 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef S390X_VEC_H
+#define S390X_VEC_H
+
+#include "tcg/tcg.h"
+
+typedef union S390Vector {
+ uint64_t doubleword[2];
+ uint32_t word[4];
+ uint16_t halfword[8];
+ uint8_t byte[16];
+} S390Vector;
+
+/*
+ * Each vector is stored as two 64bit host values. So when talking about
+ * byte/halfword/word numbers, we have to take care of proper translation
+ * between element numbers.
+ *
+ * Big Endian (target/possible host)
+ * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
+ * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7]
+ * W: [ 0][ 1] - [ 2][ 3]
+ * DW: [ 0] - [ 1]
+ *
+ * Little Endian (possible host)
+ * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
+ * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4]
+ * W: [ 1][ 0] - [ 3][ 2]
+ * DW: [ 0] - [ 1]
+ */
+#ifndef HOST_WORDS_BIGENDIAN
+#define H1(x) ((x) ^ 7)
+#define H2(x) ((x) ^ 3)
+#define H4(x) ((x) ^ 1)
+#else
+#define H1(x) (x)
+#define H2(x) (x)
+#define H4(x) (x)
+#endif
+
+static inline uint8_t s390_vec_read_element8(const S390Vector *v, uint8_t enr)
+{
+ g_assert(enr < 16);
+ return v->byte[H1(enr)];
+}
+
+static inline uint16_t s390_vec_read_element16(const S390Vector *v, uint8_t enr)
+{
+ g_assert(enr < 8);
+ return v->halfword[H2(enr)];
+}
+
+static inline uint32_t s390_vec_read_element32(const S390Vector *v, uint8_t enr)
+{
+ g_assert(enr < 4);
+ return v->word[H4(enr)];
+}
+
+static inline uint64_t s390_vec_read_element64(const S390Vector *v, uint8_t enr)
+{
+ g_assert(enr < 2);
+ return v->doubleword[enr];
+}
+
+static inline uint64_t s390_vec_read_element(const S390Vector *v, uint8_t enr,
+ uint8_t es)
+{
+ switch (es) {
+ case MO_8:
+ return s390_vec_read_element8(v, enr);
+ case MO_16:
+ return s390_vec_read_element16(v, enr);
+ case MO_32:
+ return s390_vec_read_element32(v, enr);
+ case MO_64:
+ return s390_vec_read_element64(v, enr);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline void s390_vec_write_element8(S390Vector *v, uint8_t enr,
+ uint8_t data)
+{
+ g_assert(enr < 16);
+ v->byte[H1(enr)] = data;
+}
+
+static inline void s390_vec_write_element16(S390Vector *v, uint8_t enr,
+ uint16_t data)
+{
+ g_assert(enr < 8);
+ v->halfword[H2(enr)] = data;
+}
+
+static inline void s390_vec_write_element32(S390Vector *v, uint8_t enr,
+ uint32_t data)
+{
+ g_assert(enr < 4);
+ v->word[H4(enr)] = data;
+}
+
+static inline void s390_vec_write_element64(S390Vector *v, uint8_t enr,
+ uint64_t data)
+{
+ g_assert(enr < 2);
+ v->doubleword[enr] = data;
+}
+
+static inline void s390_vec_write_element(S390Vector *v, uint8_t enr,
+ uint8_t es, uint64_t data)
+{
+ switch (es) {
+ case MO_8:
+ s390_vec_write_element8(v, enr, data);
+ break;
+ case MO_16:
+ s390_vec_write_element16(v, enr, data);
+ break;
+ case MO_32:
+ s390_vec_write_element32(v, enr, data);
+ break;
+ case MO_64:
+ s390_vec_write_element64(v, enr, data);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#endif /* S390X_VEC_H */
diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c
new file mode 100644
index 000000000..1a7799347
--- /dev/null
+++ b/target/s390x/tcg/vec_fpu_helper.c
@@ -0,0 +1,1072 @@
+/*
+ * QEMU TCG support -- s390x vector floating point instruction support
+ *
+ * Copyright (C) 2019 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "vec.h"
+#include "tcg_s390x.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define VIC_INVALID 0x1
+#define VIC_DIVBYZERO 0x2
+#define VIC_OVERFLOW 0x3
+#define VIC_UNDERFLOW 0x4
+#define VIC_INEXACT 0x5
+
+/* returns the VEX. If the VEX is 0, there is no trap */
+static uint8_t check_ieee_exc(CPUS390XState *env, uint8_t enr, bool XxC,
+ uint8_t *vec_exc)
+{
+ uint8_t vece_exc = 0, trap_exc;
+ unsigned qemu_exc;
+
+ /* Retrieve and clear the softfloat exceptions */
+ qemu_exc = env->fpu_status.float_exception_flags;
+ if (qemu_exc == 0) {
+ return 0;
+ }
+ env->fpu_status.float_exception_flags = 0;
+
+ vece_exc = s390_softfloat_exc_to_ieee(qemu_exc);
+
+ /* Add them to the vector-wide s390x exception bits */
+ *vec_exc |= vece_exc;
+
+ /* Check for traps and construct the VXC */
+ trap_exc = vece_exc & env->fpc >> 24;
+ if (trap_exc) {
+ if (trap_exc & S390_IEEE_MASK_INVALID) {
+ return enr << 4 | VIC_INVALID;
+ } else if (trap_exc & S390_IEEE_MASK_DIVBYZERO) {
+ return enr << 4 | VIC_DIVBYZERO;
+ } else if (trap_exc & S390_IEEE_MASK_OVERFLOW) {
+ return enr << 4 | VIC_OVERFLOW;
+ } else if (trap_exc & S390_IEEE_MASK_UNDERFLOW) {
+ return enr << 4 | VIC_UNDERFLOW;
+ } else if (!XxC) {
+ g_assert(trap_exc & S390_IEEE_MASK_INEXACT);
+ /* inexact has lowest priority on traps */
+ return enr << 4 | VIC_INEXACT;
+ }
+ }
+ return 0;
+}
+
+static void handle_ieee_exc(CPUS390XState *env, uint8_t vxc, uint8_t vec_exc,
+ uintptr_t retaddr)
+{
+ if (vxc) {
+ /* on traps, the fpc flags are not updated, instruction is suppressed */
+ tcg_s390_vector_exception(env, vxc, retaddr);
+ }
+ if (vec_exc) {
+ /* indicate exceptions for all elements combined */
+ env->fpc |= vec_exc << 16;
+ }
+}
+
+static float32 s390_vec_read_float32(const S390Vector *v, uint8_t enr)
+{
+ return make_float32(s390_vec_read_element32(v, enr));
+}
+
+static float64 s390_vec_read_float64(const S390Vector *v, uint8_t enr)
+{
+ return make_float64(s390_vec_read_element64(v, enr));
+}
+
+static float128 s390_vec_read_float128(const S390Vector *v)
+{
+ return make_float128(s390_vec_read_element64(v, 0),
+ s390_vec_read_element64(v, 1));
+}
+
+static void s390_vec_write_float32(S390Vector *v, uint8_t enr, float32 data)
+{
+ return s390_vec_write_element32(v, enr, data);
+}
+
+static void s390_vec_write_float64(S390Vector *v, uint8_t enr, float64 data)
+{
+ return s390_vec_write_element64(v, enr, data);
+}
+
+static void s390_vec_write_float128(S390Vector *v, float128 data)
+{
+ s390_vec_write_element64(v, 0, data.high);
+ s390_vec_write_element64(v, 1, data.low);
+}
+
+typedef float32 (*vop32_2_fn)(float32 a, float_status *s);
+static void vop32_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env,
+ bool s, bool XxC, uint8_t erm, vop32_2_fn fn,
+ uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i, old_mode;
+
+ old_mode = s390_swap_bfp_rounding_mode(env, erm);
+ for (i = 0; i < 4; i++) {
+ const float32 a = s390_vec_read_float32(v2, i);
+
+ s390_vec_write_float32(&tmp, i, fn(a, &env->fpu_status));
+ vxc = check_ieee_exc(env, i, XxC, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+typedef float64 (*vop64_2_fn)(float64 a, float_status *s);
+static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env,
+ bool s, bool XxC, uint8_t erm, vop64_2_fn fn,
+ uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i, old_mode;
+
+ old_mode = s390_swap_bfp_rounding_mode(env, erm);
+ for (i = 0; i < 2; i++) {
+ const float64 a = s390_vec_read_float64(v2, i);
+
+ s390_vec_write_float64(&tmp, i, fn(a, &env->fpu_status));
+ vxc = check_ieee_exc(env, i, XxC, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+typedef float128 (*vop128_2_fn)(float128 a, float_status *s);
+static void vop128_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env,
+ bool s, bool XxC, uint8_t erm, vop128_2_fn fn,
+ uintptr_t retaddr)
+{
+ const float128 a = s390_vec_read_float128(v2);
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int old_mode;
+
+ old_mode = s390_swap_bfp_rounding_mode(env, erm);
+ s390_vec_write_float128(&tmp, fn(a, &env->fpu_status));
+ vxc = check_ieee_exc(env, 0, XxC, &vec_exc);
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+static float64 vcdg64(float64 a, float_status *s)
+{
+ return int64_to_float64(a, s);
+}
+
+static float64 vcdlg64(float64 a, float_status *s)
+{
+ return uint64_to_float64(a, s);
+}
+
+static float64 vcgd64(float64 a, float_status *s)
+{
+ const float64 tmp = float64_to_int64(a, s);
+
+ return float64_is_any_nan(a) ? INT64_MIN : tmp;
+}
+
+static float64 vclgd64(float64 a, float_status *s)
+{
+ const float64 tmp = float64_to_uint64(a, s);
+
+ return float64_is_any_nan(a) ? 0 : tmp;
+}
+
+#define DEF_GVEC_VOP2_FN(NAME, FN, BITS) \
+void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, CPUS390XState *env, \
+ uint32_t desc) \
+{ \
+ const uint8_t erm = extract32(simd_data(desc), 4, 4); \
+ const bool se = extract32(simd_data(desc), 3, 1); \
+ const bool XxC = extract32(simd_data(desc), 2, 1); \
+ \
+ vop##BITS##_2(v1, v2, env, se, XxC, erm, FN, GETPC()); \
+}
+
+#define DEF_GVEC_VOP2_64(NAME) \
+DEF_GVEC_VOP2_FN(NAME, NAME##64, 64)
+
+#define DEF_GVEC_VOP2(NAME, OP) \
+DEF_GVEC_VOP2_FN(NAME, float32_##OP, 32) \
+DEF_GVEC_VOP2_FN(NAME, float64_##OP, 64) \
+DEF_GVEC_VOP2_FN(NAME, float128_##OP, 128)
+
+DEF_GVEC_VOP2_64(vcdg)
+DEF_GVEC_VOP2_64(vcdlg)
+DEF_GVEC_VOP2_64(vcgd)
+DEF_GVEC_VOP2_64(vclgd)
+DEF_GVEC_VOP2(vfi, round_to_int)
+DEF_GVEC_VOP2(vfsq, sqrt)
+
+typedef float32 (*vop32_3_fn)(float32 a, float32 b, float_status *s);
+static void vop32_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ CPUS390XState *env, bool s, vop32_3_fn fn,
+ uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ const float32 a = s390_vec_read_float32(v2, i);
+ const float32 b = s390_vec_read_float32(v3, i);
+
+ s390_vec_write_float32(&tmp, i, fn(a, b, &env->fpu_status));
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+typedef float64 (*vop64_3_fn)(float64 a, float64 b, float_status *s);
+static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ CPUS390XState *env, bool s, vop64_3_fn fn,
+ uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ const float64 a = s390_vec_read_float64(v2, i);
+ const float64 b = s390_vec_read_float64(v3, i);
+
+ s390_vec_write_float64(&tmp, i, fn(a, b, &env->fpu_status));
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+typedef float128 (*vop128_3_fn)(float128 a, float128 b, float_status *s);
+static void vop128_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ CPUS390XState *env, bool s, vop128_3_fn fn,
+ uintptr_t retaddr)
+{
+ const float128 a = s390_vec_read_float128(v2);
+ const float128 b = s390_vec_read_float128(v3);
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+
+ s390_vec_write_float128(&tmp, fn(a, b, &env->fpu_status));
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+#define DEF_GVEC_VOP3_B(NAME, OP, BITS) \
+void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool se = extract32(simd_data(desc), 3, 1); \
+ \
+ vop##BITS##_3(v1, v2, v3, env, se, float##BITS##_##OP, GETPC()); \
+}
+
+#define DEF_GVEC_VOP3(NAME, OP) \
+DEF_GVEC_VOP3_B(NAME, OP, 32) \
+DEF_GVEC_VOP3_B(NAME, OP, 64) \
+DEF_GVEC_VOP3_B(NAME, OP, 128)
+
+DEF_GVEC_VOP3(vfa, add)
+DEF_GVEC_VOP3(vfs, sub)
+DEF_GVEC_VOP3(vfd, div)
+DEF_GVEC_VOP3(vfm, mul)
+
+static int wfc32(const S390Vector *v1, const S390Vector *v2,
+ CPUS390XState *env, bool signal, uintptr_t retaddr)
+{
+ /* only the zero-indexed elements are compared */
+ const float32 a = s390_vec_read_float32(v1, 0);
+ const float32 b = s390_vec_read_float32(v2, 0);
+ uint8_t vxc, vec_exc = 0;
+ int cmp;
+
+ if (signal) {
+ cmp = float32_compare(a, b, &env->fpu_status);
+ } else {
+ cmp = float32_compare_quiet(a, b, &env->fpu_status);
+ }
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+
+ return float_comp_to_cc(env, cmp);
+}
+
+static int wfc64(const S390Vector *v1, const S390Vector *v2,
+ CPUS390XState *env, bool signal, uintptr_t retaddr)
+{
+ /* only the zero-indexed elements are compared */
+ const float64 a = s390_vec_read_float64(v1, 0);
+ const float64 b = s390_vec_read_float64(v2, 0);
+ uint8_t vxc, vec_exc = 0;
+ int cmp;
+
+ if (signal) {
+ cmp = float64_compare(a, b, &env->fpu_status);
+ } else {
+ cmp = float64_compare_quiet(a, b, &env->fpu_status);
+ }
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+
+ return float_comp_to_cc(env, cmp);
+}
+
+static int wfc128(const S390Vector *v1, const S390Vector *v2,
+ CPUS390XState *env, bool signal, uintptr_t retaddr)
+{
+ /* only the zero-indexed elements are compared */
+ const float128 a = s390_vec_read_float128(v1);
+ const float128 b = s390_vec_read_float128(v2);
+ uint8_t vxc, vec_exc = 0;
+ int cmp;
+
+ if (signal) {
+ cmp = float128_compare(a, b, &env->fpu_status);
+ } else {
+ cmp = float128_compare_quiet(a, b, &env->fpu_status);
+ }
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+
+ return float_comp_to_cc(env, cmp);
+}
+
+#define DEF_GVEC_WFC_B(NAME, SIGNAL, BITS) \
+void HELPER(gvec_##NAME##BITS)(const void *v1, const void *v2, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ env->cc_op = wfc##BITS(v1, v2, env, SIGNAL, GETPC()); \
+}
+
+#define DEF_GVEC_WFC(NAME, SIGNAL) \
+ DEF_GVEC_WFC_B(NAME, SIGNAL, 32) \
+ DEF_GVEC_WFC_B(NAME, SIGNAL, 64) \
+ DEF_GVEC_WFC_B(NAME, SIGNAL, 128)
+
+DEF_GVEC_WFC(wfc, false)
+DEF_GVEC_WFC(wfk, true)
+
+typedef bool (*vfc32_fn)(float32 a, float32 b, float_status *status);
+static int vfc32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ CPUS390XState *env, bool s, vfc32_fn fn, uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int match = 0;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ const float32 a = s390_vec_read_float32(v2, i);
+ const float32 b = s390_vec_read_float32(v3, i);
+
+ /* swap the order of the parameters, so we can use existing functions */
+ if (fn(b, a, &env->fpu_status)) {
+ match++;
+ s390_vec_write_element32(&tmp, i, -1u);
+ }
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+ if (match) {
+ return s || match == 4 ? 0 : 1;
+ }
+ return 3;
+}
+
+typedef bool (*vfc64_fn)(float64 a, float64 b, float_status *status);
+static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int match = 0;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ const float64 a = s390_vec_read_float64(v2, i);
+ const float64 b = s390_vec_read_float64(v3, i);
+
+ /* swap the order of the parameters, so we can use existing functions */
+ if (fn(b, a, &env->fpu_status)) {
+ match++;
+ s390_vec_write_element64(&tmp, i, -1ull);
+ }
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+ if (match) {
+ return s || match == 2 ? 0 : 1;
+ }
+ return 3;
+}
+
+typedef bool (*vfc128_fn)(float128 a, float128 b, float_status *status);
+static int vfc128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ CPUS390XState *env, bool s, vfc128_fn fn, uintptr_t retaddr)
+{
+ const float128 a = s390_vec_read_float128(v2);
+ const float128 b = s390_vec_read_float128(v3);
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ bool match = false;
+
+ /* swap the order of the parameters, so we can use existing functions */
+ if (fn(b, a, &env->fpu_status)) {
+ match = true;
+ s390_vec_write_element64(&tmp, 0, -1ull);
+ s390_vec_write_element64(&tmp, 1, -1ull);
+ }
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+ return match ? 0 : 3;
+}
+
+#define DEF_GVEC_VFC_B(NAME, OP, BITS) \
+void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool se = extract32(simd_data(desc), 3, 1); \
+ const bool sq = extract32(simd_data(desc), 2, 1); \
+ vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \
+ \
+ vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \
+} \
+ \
+void HELPER(gvec_##NAME##BITS##_cc)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool se = extract32(simd_data(desc), 3, 1); \
+ const bool sq = extract32(simd_data(desc), 2, 1); \
+ vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \
+ \
+ env->cc_op = vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \
+}
+
+#define DEF_GVEC_VFC(NAME, OP) \
+DEF_GVEC_VFC_B(NAME, OP, 32) \
+DEF_GVEC_VFC_B(NAME, OP, 64) \
+DEF_GVEC_VFC_B(NAME, OP, 128) \
+
+DEF_GVEC_VFC(vfce, eq)
+DEF_GVEC_VFC(vfch, lt)
+DEF_GVEC_VFC(vfche, le)
+
+void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ const bool s = extract32(simd_data(desc), 3, 1);
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ /* load from even element */
+ const float32 a = s390_vec_read_element32(v2, i * 2);
+ const uint64_t ret = float32_to_float64(a, &env->fpu_status);
+
+ s390_vec_write_element64(&tmp, i, ret);
+ /* indicate the source element */
+ vxc = check_ieee_exc(env, i * 2, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, GETPC());
+ *(S390Vector *)v1 = tmp;
+}
+
+void HELPER(gvec_vfll64)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ /* load from even element */
+ const float128 ret = float64_to_float128(s390_vec_read_float64(v2, 0),
+ &env->fpu_status);
+ uint8_t vxc, vec_exc = 0;
+
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, GETPC());
+ s390_vec_write_float128(v1, ret);
+}
+
+void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ const uint8_t erm = extract32(simd_data(desc), 4, 4);
+ const bool s = extract32(simd_data(desc), 3, 1);
+ const bool XxC = extract32(simd_data(desc), 2, 1);
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i, old_mode;
+
+ old_mode = s390_swap_bfp_rounding_mode(env, erm);
+ for (i = 0; i < 2; i++) {
+ float64 a = s390_vec_read_element64(v2, i);
+ uint32_t ret = float64_to_float32(a, &env->fpu_status);
+
+ /* place at even element */
+ s390_vec_write_element32(&tmp, i * 2, ret);
+ /* indicate the source element */
+ vxc = check_ieee_exc(env, i, XxC, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_ieee_exc(env, vxc, vec_exc, GETPC());
+ *(S390Vector *)v1 = tmp;
+}
+
+void HELPER(gvec_vflr128)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ const uint8_t erm = extract32(simd_data(desc), 4, 4);
+ const bool XxC = extract32(simd_data(desc), 2, 1);
+ uint8_t vxc, vec_exc = 0;
+ int old_mode;
+ float64 ret;
+
+ old_mode = s390_swap_bfp_rounding_mode(env, erm);
+ ret = float128_to_float64(s390_vec_read_float128(v2), &env->fpu_status);
+ vxc = check_ieee_exc(env, 0, XxC, &vec_exc);
+ s390_restore_bfp_rounding_mode(env, old_mode);
+ handle_ieee_exc(env, vxc, vec_exc, GETPC());
+
+ /* place at even element, odd element is unpredictable */
+ s390_vec_write_float64(v1, 0, ret);
+}
+
+static void vfma32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ const S390Vector *v4, CPUS390XState *env, bool s, int flags,
+ uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ const float32 a = s390_vec_read_float32(v2, i);
+ const float32 b = s390_vec_read_float32(v3, i);
+ const float32 c = s390_vec_read_float32(v4, i);
+ float32 ret = float32_muladd(a, b, c, flags, &env->fpu_status);
+
+ s390_vec_write_float32(&tmp, i, ret);
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ const S390Vector *v4, CPUS390XState *env, bool s, int flags,
+ uintptr_t retaddr)
+{
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ const float64 a = s390_vec_read_float64(v2, i);
+ const float64 b = s390_vec_read_float64(v3, i);
+ const float64 c = s390_vec_read_float64(v4, i);
+ const float64 ret = float64_muladd(a, b, c, flags, &env->fpu_status);
+
+ s390_vec_write_float64(&tmp, i, ret);
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (s || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+static void vfma128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3,
+ const S390Vector *v4, CPUS390XState *env, bool s, int flags,
+ uintptr_t retaddr)
+{
+ const float128 a = s390_vec_read_float128(v2);
+ const float128 b = s390_vec_read_float128(v3);
+ const float128 c = s390_vec_read_float128(v4);
+ uint8_t vxc, vec_exc = 0;
+ float128 ret;
+
+ ret = float128_muladd(a, b, c, flags, &env->fpu_status);
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ s390_vec_write_float128(v1, ret);
+}
+
+#define DEF_GVEC_VFMA_B(NAME, FLAGS, BITS) \
+void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, CPUS390XState *env, \
+ uint32_t desc) \
+{ \
+ const bool se = extract32(simd_data(desc), 3, 1); \
+ \
+ vfma##BITS(v1, v2, v3, v4, env, se, FLAGS, GETPC()); \
+}
+
+#define DEF_GVEC_VFMA(NAME, FLAGS) \
+ DEF_GVEC_VFMA_B(NAME, FLAGS, 32) \
+ DEF_GVEC_VFMA_B(NAME, FLAGS, 64) \
+ DEF_GVEC_VFMA_B(NAME, FLAGS, 128)
+
+DEF_GVEC_VFMA(vfma, 0)
+DEF_GVEC_VFMA(vfms, float_muladd_negate_c)
+DEF_GVEC_VFMA(vfnma, float_muladd_negate_result)
+DEF_GVEC_VFMA(vfnms, float_muladd_negate_c | float_muladd_negate_result)
+
+void HELPER(gvec_vftci32)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ uint16_t i3 = extract32(simd_data(desc), 4, 12);
+ bool s = extract32(simd_data(desc), 3, 1);
+ int i, match = 0;
+
+ for (i = 0; i < 4; i++) {
+ float32 a = s390_vec_read_float32(v2, i);
+
+ if (float32_dcmask(env, a) & i3) {
+ match++;
+ s390_vec_write_element32(v1, i, -1u);
+ } else {
+ s390_vec_write_element32(v1, i, 0);
+ }
+ if (s) {
+ break;
+ }
+ }
+
+ if (match == 4 || (s && match)) {
+ env->cc_op = 0;
+ } else if (match) {
+ env->cc_op = 1;
+ } else {
+ env->cc_op = 3;
+ }
+}
+
+void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ const uint16_t i3 = extract32(simd_data(desc), 4, 12);
+ const bool s = extract32(simd_data(desc), 3, 1);
+ int i, match = 0;
+
+ for (i = 0; i < 2; i++) {
+ const float64 a = s390_vec_read_float64(v2, i);
+
+ if (float64_dcmask(env, a) & i3) {
+ match++;
+ s390_vec_write_element64(v1, i, -1ull);
+ } else {
+ s390_vec_write_element64(v1, i, 0);
+ }
+ if (s) {
+ break;
+ }
+ }
+
+ if (match == 2 || (s && match)) {
+ env->cc_op = 0;
+ } else if (match) {
+ env->cc_op = 1;
+ } else {
+ env->cc_op = 3;
+ }
+}
+
+void HELPER(gvec_vftci128)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ const float128 a = s390_vec_read_float128(v2);
+ uint16_t i3 = extract32(simd_data(desc), 4, 12);
+
+ if (float128_dcmask(env, a) & i3) {
+ env->cc_op = 0;
+ s390_vec_write_element64(v1, 0, -1ull);
+ s390_vec_write_element64(v1, 1, -1ull);
+ } else {
+ env->cc_op = 3;
+ s390_vec_write_element64(v1, 0, 0);
+ s390_vec_write_element64(v1, 1, 0);
+ }
+}
+
+typedef enum S390MinMaxType {
+ S390_MINMAX_TYPE_IEEE = 0,
+ S390_MINMAX_TYPE_JAVA,
+ S390_MINMAX_TYPE_C_MACRO,
+ S390_MINMAX_TYPE_CPP,
+ S390_MINMAX_TYPE_F,
+} S390MinMaxType;
+
+typedef enum S390MinMaxRes {
+ S390_MINMAX_RES_MINMAX = 0,
+ S390_MINMAX_RES_A,
+ S390_MINMAX_RES_B,
+ S390_MINMAX_RES_SILENCE_A,
+ S390_MINMAX_RES_SILENCE_B,
+} S390MinMaxRes;
+
+static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b,
+ S390MinMaxType type, float_status *s)
+{
+ const bool neg_a = dcmask_a & DCMASK_NEGATIVE;
+ const bool nan_a = dcmask_a & DCMASK_NAN;
+ const bool nan_b = dcmask_b & DCMASK_NAN;
+
+ g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F);
+
+ if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) {
+ const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN;
+ const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN;
+
+ if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) {
+ s->float_exception_flags |= float_flag_invalid;
+ }
+ switch (type) {
+ case S390_MINMAX_TYPE_JAVA:
+ if (sig_a) {
+ return S390_MINMAX_RES_SILENCE_A;
+ } else if (sig_b) {
+ return S390_MINMAX_RES_SILENCE_B;
+ }
+ return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_F:
+ return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_C_MACRO:
+ s->float_exception_flags |= float_flag_invalid;
+ return S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_CPP:
+ s->float_exception_flags |= float_flag_invalid;
+ return S390_MINMAX_RES_A;
+ default:
+ g_assert_not_reached();
+ }
+ } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) {
+ switch (type) {
+ case S390_MINMAX_TYPE_JAVA:
+ return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_C_MACRO:
+ return S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_F:
+ return !neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A;
+ case S390_MINMAX_TYPE_CPP:
+ return S390_MINMAX_RES_A;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return S390_MINMAX_RES_MINMAX;
+}
+
+static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b,
+ S390MinMaxType type, float_status *s)
+{
+ g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F);
+
+ if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) {
+ const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN;
+ const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN;
+ const bool nan_a = dcmask_a & DCMASK_NAN;
+ const bool nan_b = dcmask_b & DCMASK_NAN;
+
+ if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) {
+ s->float_exception_flags |= float_flag_invalid;
+ }
+ switch (type) {
+ case S390_MINMAX_TYPE_JAVA:
+ if (sig_a) {
+ return S390_MINMAX_RES_SILENCE_A;
+ } else if (sig_b) {
+ return S390_MINMAX_RES_SILENCE_B;
+ }
+ return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_F:
+ return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_C_MACRO:
+ s->float_exception_flags |= float_flag_invalid;
+ return S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_CPP:
+ s->float_exception_flags |= float_flag_invalid;
+ return S390_MINMAX_RES_A;
+ default:
+ g_assert_not_reached();
+ }
+ } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) {
+ const bool neg_a = dcmask_a & DCMASK_NEGATIVE;
+
+ switch (type) {
+ case S390_MINMAX_TYPE_JAVA:
+ case S390_MINMAX_TYPE_F:
+ return neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A;
+ case S390_MINMAX_TYPE_C_MACRO:
+ return S390_MINMAX_RES_B;
+ case S390_MINMAX_TYPE_CPP:
+ return S390_MINMAX_RES_A;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return S390_MINMAX_RES_MINMAX;
+}
+
+static S390MinMaxRes vfminmax_res(uint16_t dcmask_a, uint16_t dcmask_b,
+ S390MinMaxType type, bool is_min,
+ float_status *s)
+{
+ return is_min ? vfmin_res(dcmask_a, dcmask_b, type, s) :
+ vfmax_res(dcmask_a, dcmask_b, type, s);
+}
+
+static void vfminmax32(S390Vector *v1, const S390Vector *v2,
+ const S390Vector *v3, CPUS390XState *env,
+ S390MinMaxType type, bool is_min, bool is_abs, bool se,
+ uintptr_t retaddr)
+{
+ float_status *s = &env->fpu_status;
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ float32 a = s390_vec_read_float32(v2, i);
+ float32 b = s390_vec_read_float32(v3, i);
+ float32 result;
+
+ if (type != S390_MINMAX_TYPE_IEEE) {
+ S390MinMaxRes res;
+
+ if (is_abs) {
+ a = float32_abs(a);
+ b = float32_abs(b);
+ }
+
+ res = vfminmax_res(float32_dcmask(env, a), float32_dcmask(env, b),
+ type, is_min, s);
+ switch (res) {
+ case S390_MINMAX_RES_MINMAX:
+ result = is_min ? float32_min(a, b, s) : float32_max(a, b, s);
+ break;
+ case S390_MINMAX_RES_A:
+ result = a;
+ break;
+ case S390_MINMAX_RES_B:
+ result = b;
+ break;
+ case S390_MINMAX_RES_SILENCE_A:
+ result = float32_silence_nan(a, s);
+ break;
+ case S390_MINMAX_RES_SILENCE_B:
+ result = float32_silence_nan(b, s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else if (!is_abs) {
+ result = is_min ? float32_minnum(a, b, &env->fpu_status) :
+ float32_maxnum(a, b, &env->fpu_status);
+ } else {
+ result = is_min ? float32_minnummag(a, b, &env->fpu_status) :
+ float32_maxnummag(a, b, &env->fpu_status);
+ }
+
+ s390_vec_write_float32(&tmp, i, result);
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (se || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+static void vfminmax64(S390Vector *v1, const S390Vector *v2,
+ const S390Vector *v3, CPUS390XState *env,
+ S390MinMaxType type, bool is_min, bool is_abs, bool se,
+ uintptr_t retaddr)
+{
+ float_status *s = &env->fpu_status;
+ uint8_t vxc, vec_exc = 0;
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ float64 a = s390_vec_read_float64(v2, i);
+ float64 b = s390_vec_read_float64(v3, i);
+ float64 result;
+
+ if (type != S390_MINMAX_TYPE_IEEE) {
+ S390MinMaxRes res;
+
+ if (is_abs) {
+ a = float64_abs(a);
+ b = float64_abs(b);
+ }
+
+ res = vfminmax_res(float64_dcmask(env, a), float64_dcmask(env, b),
+ type, is_min, s);
+ switch (res) {
+ case S390_MINMAX_RES_MINMAX:
+ result = is_min ? float64_min(a, b, s) : float64_max(a, b, s);
+ break;
+ case S390_MINMAX_RES_A:
+ result = a;
+ break;
+ case S390_MINMAX_RES_B:
+ result = b;
+ break;
+ case S390_MINMAX_RES_SILENCE_A:
+ result = float64_silence_nan(a, s);
+ break;
+ case S390_MINMAX_RES_SILENCE_B:
+ result = float64_silence_nan(b, s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else if (!is_abs) {
+ result = is_min ? float64_minnum(a, b, &env->fpu_status) :
+ float64_maxnum(a, b, &env->fpu_status);
+ } else {
+ result = is_min ? float64_minnummag(a, b, &env->fpu_status) :
+ float64_maxnummag(a, b, &env->fpu_status);
+ }
+
+ s390_vec_write_float64(&tmp, i, result);
+ vxc = check_ieee_exc(env, i, false, &vec_exc);
+ if (se || vxc) {
+ break;
+ }
+ }
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ *v1 = tmp;
+}
+
+static void vfminmax128(S390Vector *v1, const S390Vector *v2,
+ const S390Vector *v3, CPUS390XState *env,
+ S390MinMaxType type, bool is_min, bool is_abs, bool se,
+ uintptr_t retaddr)
+{
+ float128 a = s390_vec_read_float128(v2);
+ float128 b = s390_vec_read_float128(v3);
+ float_status *s = &env->fpu_status;
+ uint8_t vxc, vec_exc = 0;
+ float128 result;
+
+ if (type != S390_MINMAX_TYPE_IEEE) {
+ S390MinMaxRes res;
+
+ if (is_abs) {
+ a = float128_abs(a);
+ b = float128_abs(b);
+ }
+
+ res = vfminmax_res(float128_dcmask(env, a), float128_dcmask(env, b),
+ type, is_min, s);
+ switch (res) {
+ case S390_MINMAX_RES_MINMAX:
+ result = is_min ? float128_min(a, b, s) : float128_max(a, b, s);
+ break;
+ case S390_MINMAX_RES_A:
+ result = a;
+ break;
+ case S390_MINMAX_RES_B:
+ result = b;
+ break;
+ case S390_MINMAX_RES_SILENCE_A:
+ result = float128_silence_nan(a, s);
+ break;
+ case S390_MINMAX_RES_SILENCE_B:
+ result = float128_silence_nan(b, s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else if (!is_abs) {
+ result = is_min ? float128_minnum(a, b, &env->fpu_status) :
+ float128_maxnum(a, b, &env->fpu_status);
+ } else {
+ result = is_min ? float128_minnummag(a, b, &env->fpu_status) :
+ float128_maxnummag(a, b, &env->fpu_status);
+ }
+
+ vxc = check_ieee_exc(env, 0, false, &vec_exc);
+ handle_ieee_exc(env, vxc, vec_exc, retaddr);
+ s390_vec_write_float128(v1, result);
+}
+
+#define DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, BITS) \
+void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool se = extract32(simd_data(desc), 3, 1); \
+ uint8_t type = extract32(simd_data(desc), 4, 4); \
+ bool is_abs = false; \
+ \
+ if (type >= 8) { \
+ is_abs = true; \
+ type -= 8; \
+ } \
+ \
+ vfminmax##BITS(v1, v2, v3, env, type, IS_MIN, is_abs, se, GETPC()); \
+}
+
+#define DEF_GVEC_VFMINMAX(NAME, IS_MIN) \
+ DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 32) \
+ DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 64) \
+ DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 128)
+
+DEF_GVEC_VFMINMAX(vfmax, false)
+DEF_GVEC_VFMINMAX(vfmin, true)
diff --git a/target/s390x/tcg/vec_helper.c b/target/s390x/tcg/vec_helper.c
new file mode 100644
index 000000000..ededf13cf
--- /dev/null
+++ b/target/s390x/tcg/vec_helper.c
@@ -0,0 +1,214 @@
+/*
+ * QEMU TCG support -- s390x vector support instructions
+ *
+ * Copyright (C) 2019 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "vec.h"
+#include "tcg/tcg.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+
+void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3,
+ uint32_t desc)
+{
+ S390Vector tmp = {};
+ uint16_t result = 0;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ const uint8_t bit_nr = s390_vec_read_element8(v3, i);
+ uint16_t bit;
+
+ if (bit_nr >= 128) {
+ continue;
+ }
+ bit = (s390_vec_read_element8(v2, bit_nr / 8)
+ >> (7 - (bit_nr % 8))) & 1;
+ result |= (bit << (15 - i));
+ }
+ s390_vec_write_element16(&tmp, 3, result);
+ *(S390Vector *)v1 = tmp;
+}
+
+void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes)
+{
+ if (likely(bytes >= 16)) {
+ uint64_t t0, t1;
+
+ t0 = cpu_ldq_data_ra(env, addr, GETPC());
+ addr = wrap_address(env, addr + 8);
+ t1 = cpu_ldq_data_ra(env, addr, GETPC());
+ s390_vec_write_element64(v1, 0, t0);
+ s390_vec_write_element64(v1, 1, t1);
+ } else {
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < bytes; i++) {
+ uint8_t byte = cpu_ldub_data_ra(env, addr, GETPC());
+
+ s390_vec_write_element8(&tmp, i, byte);
+ addr = wrap_address(env, addr + 1);
+ }
+ *(S390Vector *)v1 = tmp;
+ }
+}
+
+#define DEF_VPK_HFN(BITS, TBITS) \
+typedef uint##TBITS##_t (*vpk##BITS##_fn)(uint##BITS##_t, int *); \
+static int vpk##BITS##_hfn(S390Vector *v1, const S390Vector *v2, \
+ const S390Vector *v3, vpk##BITS##_fn fn) \
+{ \
+ int i, saturated = 0; \
+ S390Vector tmp; \
+ \
+ for (i = 0; i < (128 / TBITS); i++) { \
+ uint##BITS##_t src; \
+ \
+ if (i < (128 / BITS)) { \
+ src = s390_vec_read_element##BITS(v2, i); \
+ } else { \
+ src = s390_vec_read_element##BITS(v3, i - (128 / BITS)); \
+ } \
+ s390_vec_write_element##TBITS(&tmp, i, fn(src, &saturated)); \
+ } \
+ *v1 = tmp; \
+ return saturated; \
+}
+DEF_VPK_HFN(64, 32)
+DEF_VPK_HFN(32, 16)
+DEF_VPK_HFN(16, 8)
+
+#define DEF_VPK(BITS, TBITS) \
+static uint##TBITS##_t vpk##BITS##e(uint##BITS##_t src, int *saturated) \
+{ \
+ return src; \
+} \
+void HELPER(gvec_vpk##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ vpk##BITS##_hfn(v1, v2, v3, vpk##BITS##e); \
+}
+DEF_VPK(64, 32)
+DEF_VPK(32, 16)
+DEF_VPK(16, 8)
+
+#define DEF_VPKS(BITS, TBITS) \
+static uint##TBITS##_t vpks##BITS##e(uint##BITS##_t src, int *saturated) \
+{ \
+ if ((int##BITS##_t)src > INT##TBITS##_MAX) { \
+ (*saturated)++; \
+ return INT##TBITS##_MAX; \
+ } else if ((int##BITS##_t)src < INT##TBITS##_MIN) { \
+ (*saturated)++; \
+ return INT##TBITS##_MIN; \
+ } \
+ return src; \
+} \
+void HELPER(gvec_vpks##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \
+} \
+void HELPER(gvec_vpks_cc##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \
+ \
+ if (saturated == (128 / TBITS)) { \
+ env->cc_op = 3; \
+ } else if (saturated) { \
+ env->cc_op = 1; \
+ } else { \
+ env->cc_op = 0; \
+ } \
+}
+DEF_VPKS(64, 32)
+DEF_VPKS(32, 16)
+DEF_VPKS(16, 8)
+
+#define DEF_VPKLS(BITS, TBITS) \
+static uint##TBITS##_t vpkls##BITS##e(uint##BITS##_t src, int *saturated) \
+{ \
+ if (src > UINT##TBITS##_MAX) { \
+ (*saturated)++; \
+ return UINT##TBITS##_MAX; \
+ } \
+ return src; \
+} \
+void HELPER(gvec_vpkls##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \
+} \
+void HELPER(gvec_vpkls_cc##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \
+ \
+ if (saturated == (128 / TBITS)) { \
+ env->cc_op = 3; \
+ } else if (saturated) { \
+ env->cc_op = 1; \
+ } else { \
+ env->cc_op = 0; \
+ } \
+}
+DEF_VPKLS(64, 32)
+DEF_VPKLS(32, 16)
+DEF_VPKLS(16, 8)
+
+void HELPER(gvec_vperm)(void *v1, const void *v2, const void *v3,
+ const void *v4, uint32_t desc)
+{
+ S390Vector tmp;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ const uint8_t selector = s390_vec_read_element8(v4, i) & 0x1f;
+ uint8_t byte;
+
+ if (selector < 16) {
+ byte = s390_vec_read_element8(v2, selector);
+ } else {
+ byte = s390_vec_read_element8(v3, selector - 16);
+ }
+ s390_vec_write_element8(&tmp, i, byte);
+ }
+ *(S390Vector *)v1 = tmp;
+}
+
+void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr,
+ uint64_t bytes)
+{
+ /* Probe write access before actually modifying memory */
+ probe_write_access(env, addr, bytes, GETPC());
+
+ if (likely(bytes >= 16)) {
+ cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 0), GETPC());
+ addr = wrap_address(env, addr + 8);
+ cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC());
+ } else {
+ S390Vector tmp = {};
+ int i;
+
+ for (i = 0; i < bytes; i++) {
+ uint8_t byte = s390_vec_read_element8(v1, i);
+
+ cpu_stb_data_ra(env, addr, byte, GETPC());
+ addr = wrap_address(env, addr + 1);
+ }
+ *(S390Vector *)v1 = tmp;
+ }
+}
diff --git a/target/s390x/tcg/vec_int_helper.c b/target/s390x/tcg/vec_int_helper.c
new file mode 100644
index 000000000..5561b3ed9
--- /dev/null
+++ b/target/s390x/tcg/vec_int_helper.c
@@ -0,0 +1,587 @@
+/*
+ * QEMU TCG support -- s390x vector integer instruction support
+ *
+ * Copyright (C) 2019 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "vec.h"
+#include "exec/helper-proto.h"
+#include "tcg/tcg-gvec-desc.h"
+
+static bool s390_vec_is_zero(const S390Vector *v)
+{
+ return !v->doubleword[0] && !v->doubleword[1];
+}
+
+static void s390_vec_xor(S390Vector *res, const S390Vector *a,
+ const S390Vector *b)
+{
+ res->doubleword[0] = a->doubleword[0] ^ b->doubleword[0];
+ res->doubleword[1] = a->doubleword[1] ^ b->doubleword[1];
+}
+
+static void s390_vec_and(S390Vector *res, const S390Vector *a,
+ const S390Vector *b)
+{
+ res->doubleword[0] = a->doubleword[0] & b->doubleword[0];
+ res->doubleword[1] = a->doubleword[1] & b->doubleword[1];
+}
+
+static bool s390_vec_equal(const S390Vector *a, const S390Vector *b)
+{
+ return a->doubleword[0] == b->doubleword[0] &&
+ a->doubleword[1] == b->doubleword[1];
+}
+
+static void s390_vec_shl(S390Vector *d, const S390Vector *a, uint64_t count)
+{
+ uint64_t tmp;
+
+ g_assert(count < 128);
+ if (count == 0) {
+ d->doubleword[0] = a->doubleword[0];
+ d->doubleword[1] = a->doubleword[1];
+ } else if (count == 64) {
+ d->doubleword[0] = a->doubleword[1];
+ d->doubleword[1] = 0;
+ } else if (count < 64) {
+ tmp = extract64(a->doubleword[1], 64 - count, count);
+ d->doubleword[1] = a->doubleword[1] << count;
+ d->doubleword[0] = (a->doubleword[0] << count) | tmp;
+ } else {
+ d->doubleword[0] = a->doubleword[1] << (count - 64);
+ d->doubleword[1] = 0;
+ }
+}
+
+static void s390_vec_sar(S390Vector *d, const S390Vector *a, uint64_t count)
+{
+ uint64_t tmp;
+
+ if (count == 0) {
+ d->doubleword[0] = a->doubleword[0];
+ d->doubleword[1] = a->doubleword[1];
+ } else if (count == 64) {
+ tmp = (int64_t)a->doubleword[0] >> 63;
+ d->doubleword[1] = a->doubleword[0];
+ d->doubleword[0] = tmp;
+ } else if (count < 64) {
+ tmp = a->doubleword[1] >> count;
+ d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]);
+ d->doubleword[0] = (int64_t)a->doubleword[0] >> count;
+ } else {
+ tmp = (int64_t)a->doubleword[0] >> 63;
+ d->doubleword[1] = (int64_t)a->doubleword[0] >> (count - 64);
+ d->doubleword[0] = tmp;
+ }
+}
+
+static void s390_vec_shr(S390Vector *d, const S390Vector *a, uint64_t count)
+{
+ uint64_t tmp;
+
+ g_assert(count < 128);
+ if (count == 0) {
+ d->doubleword[0] = a->doubleword[0];
+ d->doubleword[1] = a->doubleword[1];
+ } else if (count == 64) {
+ d->doubleword[1] = a->doubleword[0];
+ d->doubleword[0] = 0;
+ } else if (count < 64) {
+ tmp = a->doubleword[1] >> count;
+ d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]);
+ d->doubleword[0] = a->doubleword[0] >> count;
+ } else {
+ d->doubleword[1] = a->doubleword[0] >> (count - 64);
+ d->doubleword[0] = 0;
+ }
+}
+#define DEF_VAVG(BITS) \
+void HELPER(gvec_vavg##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \
+ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \
+ } \
+}
+DEF_VAVG(8)
+DEF_VAVG(16)
+
+#define DEF_VAVGL(BITS) \
+void HELPER(gvec_vavgl##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \
+ } \
+}
+DEF_VAVGL(8)
+DEF_VAVGL(16)
+
+#define DEF_VCLZ(BITS) \
+void HELPER(gvec_vclz##BITS)(void *v1, const void *v2, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, clz32(a) - 32 + BITS); \
+ } \
+}
+DEF_VCLZ(8)
+DEF_VCLZ(16)
+
+#define DEF_VCTZ(BITS) \
+void HELPER(gvec_vctz##BITS)(void *v1, const void *v2, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, a ? ctz32(a) : BITS); \
+ } \
+}
+DEF_VCTZ(8)
+DEF_VCTZ(16)
+
+/* like binary multiplication, but XOR instead of addition */
+#define DEF_GALOIS_MULTIPLY(BITS, TBITS) \
+static uint##TBITS##_t galois_multiply##BITS(uint##TBITS##_t a, \
+ uint##TBITS##_t b) \
+{ \
+ uint##TBITS##_t res = 0; \
+ \
+ while (b) { \
+ if (b & 0x1) { \
+ res = res ^ a; \
+ } \
+ a = a << 1; \
+ b = b >> 1; \
+ } \
+ return res; \
+}
+DEF_GALOIS_MULTIPLY(8, 16)
+DEF_GALOIS_MULTIPLY(16, 32)
+DEF_GALOIS_MULTIPLY(32, 64)
+
+static S390Vector galois_multiply64(uint64_t a, uint64_t b)
+{
+ S390Vector res = {};
+ S390Vector va = {
+ .doubleword[1] = a,
+ };
+ S390Vector vb = {
+ .doubleword[1] = b,
+ };
+
+ while (!s390_vec_is_zero(&vb)) {
+ if (vb.doubleword[1] & 0x1) {
+ s390_vec_xor(&res, &res, &va);
+ }
+ s390_vec_shl(&va, &va, 1);
+ s390_vec_shr(&vb, &vb, 1);
+ }
+ return res;
+}
+
+#define DEF_VGFM(BITS, TBITS) \
+void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / TBITS); i++) { \
+ uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \
+ uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \
+ uint##TBITS##_t d = galois_multiply##BITS(a, b); \
+ \
+ a = s390_vec_read_element##BITS(v2, i * 2 + 1); \
+ b = s390_vec_read_element##BITS(v3, i * 2 + 1); \
+ d = d ^ galois_multiply32(a, b); \
+ s390_vec_write_element##TBITS(v1, i, d); \
+ } \
+}
+DEF_VGFM(8, 16)
+DEF_VGFM(16, 32)
+DEF_VGFM(32, 64)
+
+void HELPER(gvec_vgfm64)(void *v1, const void *v2, const void *v3,
+ uint32_t desc)
+{
+ S390Vector tmp1, tmp2;
+ uint64_t a, b;
+
+ a = s390_vec_read_element64(v2, 0);
+ b = s390_vec_read_element64(v3, 0);
+ tmp1 = galois_multiply64(a, b);
+ a = s390_vec_read_element64(v2, 1);
+ b = s390_vec_read_element64(v3, 1);
+ tmp2 = galois_multiply64(a, b);
+ s390_vec_xor(v1, &tmp1, &tmp2);
+}
+
+#define DEF_VGFMA(BITS, TBITS) \
+void HELPER(gvec_vgfma##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / TBITS); i++) { \
+ uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \
+ uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \
+ uint##TBITS##_t d = galois_multiply##BITS(a, b); \
+ \
+ a = s390_vec_read_element##BITS(v2, i * 2 + 1); \
+ b = s390_vec_read_element##BITS(v3, i * 2 + 1); \
+ d = d ^ galois_multiply32(a, b); \
+ d = d ^ s390_vec_read_element##TBITS(v4, i); \
+ s390_vec_write_element##TBITS(v1, i, d); \
+ } \
+}
+DEF_VGFMA(8, 16)
+DEF_VGFMA(16, 32)
+DEF_VGFMA(32, 64)
+
+void HELPER(gvec_vgfma64)(void *v1, const void *v2, const void *v3,
+ const void *v4, uint32_t desc)
+{
+ S390Vector tmp1, tmp2;
+ uint64_t a, b;
+
+ a = s390_vec_read_element64(v2, 0);
+ b = s390_vec_read_element64(v3, 0);
+ tmp1 = galois_multiply64(a, b);
+ a = s390_vec_read_element64(v2, 1);
+ b = s390_vec_read_element64(v3, 1);
+ tmp2 = galois_multiply64(a, b);
+ s390_vec_xor(&tmp1, &tmp1, &tmp2);
+ s390_vec_xor(v1, &tmp1, v4);
+}
+
+#define DEF_VMAL(BITS) \
+void HELPER(gvec_vmal##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \
+ const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, a * b + c); \
+ } \
+}
+DEF_VMAL(8)
+DEF_VMAL(16)
+
+#define DEF_VMAH(BITS) \
+void HELPER(gvec_vmah##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \
+ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \
+ const int32_t c = (int##BITS##_t)s390_vec_read_element##BITS(v4, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \
+ } \
+}
+DEF_VMAH(8)
+DEF_VMAH(16)
+
+#define DEF_VMALH(BITS) \
+void HELPER(gvec_vmalh##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \
+ const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \
+ } \
+}
+DEF_VMALH(8)
+DEF_VMALH(16)
+
+#define DEF_VMAE(BITS, TBITS) \
+void HELPER(gvec_vmae##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \
+ int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b + c); \
+ } \
+}
+DEF_VMAE(8, 16)
+DEF_VMAE(16, 32)
+DEF_VMAE(32, 64)
+
+#define DEF_VMALE(BITS, TBITS) \
+void HELPER(gvec_vmale##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \
+ uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \
+ uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \
+ uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b + c); \
+ } \
+}
+DEF_VMALE(8, 16)
+DEF_VMALE(16, 32)
+DEF_VMALE(32, 64)
+
+#define DEF_VMAO(BITS, TBITS) \
+void HELPER(gvec_vmao##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \
+ int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b + c); \
+ } \
+}
+DEF_VMAO(8, 16)
+DEF_VMAO(16, 32)
+DEF_VMAO(32, 64)
+
+#define DEF_VMALO(BITS, TBITS) \
+void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \
+ uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \
+ uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \
+ uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b + c); \
+ } \
+}
+DEF_VMALO(8, 16)
+DEF_VMALO(16, 32)
+DEF_VMALO(32, 64)
+
+#define DEF_VMH(BITS) \
+void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \
+ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \
+ } \
+}
+DEF_VMH(8)
+DEF_VMH(16)
+
+#define DEF_VMLH(BITS) \
+void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \
+ } \
+}
+DEF_VMLH(8)
+DEF_VMLH(16)
+
+#define DEF_VME(BITS, TBITS) \
+void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b); \
+ } \
+}
+DEF_VME(8, 16)
+DEF_VME(16, 32)
+DEF_VME(32, 64)
+
+#define DEF_VMLE(BITS, TBITS) \
+void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \
+ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \
+ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b); \
+ } \
+}
+DEF_VMLE(8, 16)
+DEF_VMLE(16, 32)
+DEF_VMLE(32, 64)
+
+#define DEF_VMO(BITS, TBITS) \
+void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \
+ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \
+ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b); \
+ } \
+}
+DEF_VMO(8, 16)
+DEF_VMO(16, 32)
+DEF_VMO(32, 64)
+
+#define DEF_VMLO(BITS, TBITS) \
+void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i, j; \
+ \
+ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \
+ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \
+ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \
+ \
+ s390_vec_write_element##TBITS(v1, i, a * b); \
+ } \
+}
+DEF_VMLO(8, 16)
+DEF_VMLO(16, 32)
+DEF_VMLO(32, 64)
+
+#define DEF_VPOPCT(BITS) \
+void HELPER(gvec_vpopct##BITS)(void *v1, const void *v2, uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, ctpop32(a)); \
+ } \
+}
+DEF_VPOPCT(8)
+DEF_VPOPCT(16)
+
+#define DEF_VERIM(BITS) \
+void HELPER(gvec_verim##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ const uint8_t count = simd_data(desc); \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v1, i); \
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v2, i); \
+ const uint##BITS##_t mask = s390_vec_read_element##BITS(v3, i); \
+ const uint##BITS##_t d = (a & ~mask) | (rol##BITS(b, count) & mask); \
+ \
+ s390_vec_write_element##BITS(v1, i, d); \
+ } \
+}
+DEF_VERIM(8)
+DEF_VERIM(16)
+
+void HELPER(gvec_vsl)(void *v1, const void *v2, uint64_t count,
+ uint32_t desc)
+{
+ s390_vec_shl(v1, v2, count);
+}
+
+void HELPER(gvec_vsra)(void *v1, const void *v2, uint64_t count,
+ uint32_t desc)
+{
+ s390_vec_sar(v1, v2, count);
+}
+
+void HELPER(gvec_vsrl)(void *v1, const void *v2, uint64_t count,
+ uint32_t desc)
+{
+ s390_vec_shr(v1, v2, count);
+}
+
+#define DEF_VSCBI(BITS) \
+void HELPER(gvec_vscbi##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < (128 / BITS); i++) { \
+ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \
+ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \
+ \
+ s390_vec_write_element##BITS(v1, i, a >= b); \
+ } \
+}
+DEF_VSCBI(8)
+DEF_VSCBI(16)
+
+void HELPER(gvec_vtm)(void *v1, const void *v2, CPUS390XState *env,
+ uint32_t desc)
+{
+ S390Vector tmp;
+
+ s390_vec_and(&tmp, v1, v2);
+ if (s390_vec_is_zero(&tmp)) {
+ /* Selected bits all zeros; or all mask bits zero */
+ env->cc_op = 0;
+ } else if (s390_vec_equal(&tmp, v2)) {
+ /* Selected bits all ones */
+ env->cc_op = 3;
+ } else {
+ /* Selected bits a mix of zeros and ones */
+ env->cc_op = 1;
+ }
+}
diff --git a/target/s390x/tcg/vec_string_helper.c b/target/s390x/tcg/vec_string_helper.c
new file mode 100644
index 000000000..ac315eb09
--- /dev/null
+++ b/target/s390x/tcg/vec_string_helper.c
@@ -0,0 +1,473 @@
+/*
+ * QEMU TCG support -- s390x vector string instruction support
+ *
+ * Copyright (C) 2019 Red Hat Inc
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "s390x-internal.h"
+#include "vec.h"
+#include "tcg/tcg.h"
+#include "tcg/tcg-gvec-desc.h"
+#include "exec/helper-proto.h"
+
+/*
+ * Returns a bit set in the MSB of each element that is zero,
+ * as defined by the mask.
+ */
+static inline uint64_t zero_search(uint64_t a, uint64_t mask)
+{
+ return ~(((a & mask) + mask) | a | mask);
+}
+
+/*
+ * Returns a bit set in the MSB of each element that is not zero,
+ * as defined by the mask.
+ */
+static inline uint64_t nonzero_search(uint64_t a, uint64_t mask)
+{
+ return (((a & mask) + mask) | a) & ~mask;
+}
+
+/*
+ * Returns the byte offset for the first match, or 16 for no match.
+ */
+static inline int match_index(uint64_t c0, uint64_t c1)
+{
+ return (c0 ? clz64(c0) : clz64(c1) + 64) >> 3;
+}
+
+/*
+ * Returns the number of bits composing one element.
+ */
+static uint8_t get_element_bits(uint8_t es)
+{
+ return (1 << es) * BITS_PER_BYTE;
+}
+
+/*
+ * Returns the bitmask for a single element.
+ */
+static uint64_t get_single_element_mask(uint8_t es)
+{
+ return -1ull >> (64 - get_element_bits(es));
+}
+
+/*
+ * Returns the bitmask for a single element (excluding the MSB).
+ */
+static uint64_t get_single_element_lsbs_mask(uint8_t es)
+{
+ return -1ull >> (65 - get_element_bits(es));
+}
+
+/*
+ * Returns the bitmasks for multiple elements (excluding the MSBs).
+ */
+static uint64_t get_element_lsbs_mask(uint8_t es)
+{
+ return dup_const(es, get_single_element_lsbs_mask(es));
+}
+
+static int vfae(void *v1, const void *v2, const void *v3, bool in,
+ bool rt, bool zs, uint8_t es)
+{
+ const uint64_t mask = get_element_lsbs_mask(es);
+ const int bits = get_element_bits(es);
+ uint64_t a0, a1, b0, b1, e0, e1, t0, t1, z0, z1;
+ uint64_t first_zero = 16;
+ uint64_t first_equal;
+ int i;
+
+ a0 = s390_vec_read_element64(v2, 0);
+ a1 = s390_vec_read_element64(v2, 1);
+ b0 = s390_vec_read_element64(v3, 0);
+ b1 = s390_vec_read_element64(v3, 1);
+ e0 = 0;
+ e1 = 0;
+ /* compare against equality with every other element */
+ for (i = 0; i < 64; i += bits) {
+ t0 = rol64(b0, i);
+ t1 = rol64(b1, i);
+ e0 |= zero_search(a0 ^ t0, mask);
+ e0 |= zero_search(a0 ^ t1, mask);
+ e1 |= zero_search(a1 ^ t0, mask);
+ e1 |= zero_search(a1 ^ t1, mask);
+ }
+ /* invert the result if requested - invert only the MSBs */
+ if (in) {
+ e0 = ~e0 & ~mask;
+ e1 = ~e1 & ~mask;
+ }
+ first_equal = match_index(e0, e1);
+
+ if (zs) {
+ z0 = zero_search(a0, mask);
+ z1 = zero_search(a1, mask);
+ first_zero = match_index(z0, z1);
+ }
+
+ if (rt) {
+ e0 = (e0 >> (bits - 1)) * get_single_element_mask(es);
+ e1 = (e1 >> (bits - 1)) * get_single_element_mask(es);
+ s390_vec_write_element64(v1, 0, e0);
+ s390_vec_write_element64(v1, 1, e1);
+ } else {
+ s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero));
+ s390_vec_write_element64(v1, 1, 0);
+ }
+
+ if (first_zero == 16 && first_equal == 16) {
+ return 3; /* no match */
+ } else if (first_zero == 16) {
+ return 1; /* matching elements, no match for zero */
+ } else if (first_equal < first_zero) {
+ return 2; /* matching elements before match for zero */
+ }
+ return 0; /* match for zero */
+}
+
+#define DEF_VFAE_HELPER(BITS) \
+void HELPER(gvec_vfae##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ const bool in = extract32(simd_data(desc), 3, 1); \
+ const bool rt = extract32(simd_data(desc), 2, 1); \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \
+}
+DEF_VFAE_HELPER(8)
+DEF_VFAE_HELPER(16)
+DEF_VFAE_HELPER(32)
+
+#define DEF_VFAE_CC_HELPER(BITS) \
+void HELPER(gvec_vfae_cc##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool in = extract32(simd_data(desc), 3, 1); \
+ const bool rt = extract32(simd_data(desc), 2, 1); \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ env->cc_op = vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \
+}
+DEF_VFAE_CC_HELPER(8)
+DEF_VFAE_CC_HELPER(16)
+DEF_VFAE_CC_HELPER(32)
+
+static int vfee(void *v1, const void *v2, const void *v3, bool zs, uint8_t es)
+{
+ const uint64_t mask = get_element_lsbs_mask(es);
+ uint64_t a0, a1, b0, b1, e0, e1, z0, z1;
+ uint64_t first_zero = 16;
+ uint64_t first_equal;
+
+ a0 = s390_vec_read_element64(v2, 0);
+ a1 = s390_vec_read_element64(v2, 1);
+ b0 = s390_vec_read_element64(v3, 0);
+ b1 = s390_vec_read_element64(v3, 1);
+ e0 = zero_search(a0 ^ b0, mask);
+ e1 = zero_search(a1 ^ b1, mask);
+ first_equal = match_index(e0, e1);
+
+ if (zs) {
+ z0 = zero_search(a0, mask);
+ z1 = zero_search(a1, mask);
+ first_zero = match_index(z0, z1);
+ }
+
+ s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero));
+ s390_vec_write_element64(v1, 1, 0);
+ if (first_zero == 16 && first_equal == 16) {
+ return 3; /* no match */
+ } else if (first_zero == 16) {
+ return 1; /* matching elements, no match for zero */
+ } else if (first_equal < first_zero) {
+ return 2; /* matching elements before match for zero */
+ }
+ return 0; /* match for zero */
+}
+
+#define DEF_VFEE_HELPER(BITS) \
+void HELPER(gvec_vfee##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ vfee(v1, v2, v3, zs, MO_##BITS); \
+}
+DEF_VFEE_HELPER(8)
+DEF_VFEE_HELPER(16)
+DEF_VFEE_HELPER(32)
+
+#define DEF_VFEE_CC_HELPER(BITS) \
+void HELPER(gvec_vfee_cc##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ env->cc_op = vfee(v1, v2, v3, zs, MO_##BITS); \
+}
+DEF_VFEE_CC_HELPER(8)
+DEF_VFEE_CC_HELPER(16)
+DEF_VFEE_CC_HELPER(32)
+
+static int vfene(void *v1, const void *v2, const void *v3, bool zs, uint8_t es)
+{
+ const uint64_t mask = get_element_lsbs_mask(es);
+ uint64_t a0, a1, b0, b1, e0, e1, z0, z1;
+ uint64_t first_zero = 16;
+ uint64_t first_inequal;
+ bool smaller = false;
+
+ a0 = s390_vec_read_element64(v2, 0);
+ a1 = s390_vec_read_element64(v2, 1);
+ b0 = s390_vec_read_element64(v3, 0);
+ b1 = s390_vec_read_element64(v3, 1);
+ e0 = nonzero_search(a0 ^ b0, mask);
+ e1 = nonzero_search(a1 ^ b1, mask);
+ first_inequal = match_index(e0, e1);
+
+ /* identify the smaller element */
+ if (first_inequal < 16) {
+ uint8_t enr = first_inequal / (1 << es);
+ uint32_t a = s390_vec_read_element(v2, enr, es);
+ uint32_t b = s390_vec_read_element(v3, enr, es);
+
+ smaller = a < b;
+ }
+
+ if (zs) {
+ z0 = zero_search(a0, mask);
+ z1 = zero_search(a1, mask);
+ first_zero = match_index(z0, z1);
+ }
+
+ s390_vec_write_element64(v1, 0, MIN(first_inequal, first_zero));
+ s390_vec_write_element64(v1, 1, 0);
+ if (first_zero == 16 && first_inequal == 16) {
+ return 3;
+ } else if (first_zero < first_inequal) {
+ return 0;
+ }
+ return smaller ? 1 : 2;
+}
+
+#define DEF_VFENE_HELPER(BITS) \
+void HELPER(gvec_vfene##BITS)(void *v1, const void *v2, const void *v3, \
+ uint32_t desc) \
+{ \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ vfene(v1, v2, v3, zs, MO_##BITS); \
+}
+DEF_VFENE_HELPER(8)
+DEF_VFENE_HELPER(16)
+DEF_VFENE_HELPER(32)
+
+#define DEF_VFENE_CC_HELPER(BITS) \
+void HELPER(gvec_vfene_cc##BITS)(void *v1, const void *v2, const void *v3, \
+ CPUS390XState *env, uint32_t desc) \
+{ \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ env->cc_op = vfene(v1, v2, v3, zs, MO_##BITS); \
+}
+DEF_VFENE_CC_HELPER(8)
+DEF_VFENE_CC_HELPER(16)
+DEF_VFENE_CC_HELPER(32)
+
+static int vistr(void *v1, const void *v2, uint8_t es)
+{
+ const uint64_t mask = get_element_lsbs_mask(es);
+ uint64_t a0 = s390_vec_read_element64(v2, 0);
+ uint64_t a1 = s390_vec_read_element64(v2, 1);
+ uint64_t z;
+ int cc = 3;
+
+ z = zero_search(a0, mask);
+ if (z) {
+ a0 &= ~(-1ull >> clz64(z));
+ a1 = 0;
+ cc = 0;
+ } else {
+ z = zero_search(a1, mask);
+ if (z) {
+ a1 &= ~(-1ull >> clz64(z));
+ cc = 0;
+ }
+ }
+
+ s390_vec_write_element64(v1, 0, a0);
+ s390_vec_write_element64(v1, 1, a1);
+ return cc;
+}
+
+#define DEF_VISTR_HELPER(BITS) \
+void HELPER(gvec_vistr##BITS)(void *v1, const void *v2, uint32_t desc) \
+{ \
+ vistr(v1, v2, MO_##BITS); \
+}
+DEF_VISTR_HELPER(8)
+DEF_VISTR_HELPER(16)
+DEF_VISTR_HELPER(32)
+
+#define DEF_VISTR_CC_HELPER(BITS) \
+void HELPER(gvec_vistr_cc##BITS)(void *v1, const void *v2, CPUS390XState *env, \
+ uint32_t desc) \
+{ \
+ env->cc_op = vistr(v1, v2, MO_##BITS); \
+}
+DEF_VISTR_CC_HELPER(8)
+DEF_VISTR_CC_HELPER(16)
+DEF_VISTR_CC_HELPER(32)
+
+static bool element_compare(uint32_t data, uint32_t l, uint8_t c)
+{
+ const bool equal = extract32(c, 7, 1);
+ const bool lower = extract32(c, 6, 1);
+ const bool higher = extract32(c, 5, 1);
+
+ if (data < l) {
+ return lower;
+ } else if (data > l) {
+ return higher;
+ }
+ return equal;
+}
+
+static int vstrc(void *v1, const void *v2, const void *v3, const void *v4,
+ bool in, bool rt, bool zs, uint8_t es)
+{
+ const uint64_t mask = get_element_lsbs_mask(es);
+ uint64_t a0 = s390_vec_read_element64(v2, 0);
+ uint64_t a1 = s390_vec_read_element64(v2, 1);
+ int first_zero = 16, first_match = 16;
+ S390Vector rt_result = {};
+ uint64_t z0, z1;
+ int i, j;
+
+ if (zs) {
+ z0 = zero_search(a0, mask);
+ z1 = zero_search(a1, mask);
+ first_zero = match_index(z0, z1);
+ }
+
+ for (i = 0; i < 16 / (1 << es); i++) {
+ const uint32_t data = s390_vec_read_element(v2, i, es);
+ const int cur_byte = i * (1 << es);
+ bool any_match = false;
+
+ /* if we don't need a bit vector, we can stop early */
+ if (cur_byte == first_zero && !rt) {
+ break;
+ }
+
+ for (j = 0; j < 16 / (1 << es); j += 2) {
+ const uint32_t l1 = s390_vec_read_element(v3, j, es);
+ const uint32_t l2 = s390_vec_read_element(v3, j + 1, es);
+ /* we are only interested in the highest byte of each element */
+ const uint8_t c1 = s390_vec_read_element8(v4, j * (1 << es));
+ const uint8_t c2 = s390_vec_read_element8(v4, (j + 1) * (1 << es));
+
+ if (element_compare(data, l1, c1) &&
+ element_compare(data, l2, c2)) {
+ any_match = true;
+ break;
+ }
+ }
+ /* invert the result if requested */
+ any_match = in ^ any_match;
+
+ if (any_match) {
+ /* indicate bit vector if requested */
+ if (rt) {
+ const uint64_t val = -1ull;
+
+ first_match = MIN(cur_byte, first_match);
+ s390_vec_write_element(&rt_result, i, es, val);
+ } else {
+ /* stop on the first match */
+ first_match = cur_byte;
+ break;
+ }
+ }
+ }
+
+ if (rt) {
+ *(S390Vector *)v1 = rt_result;
+ } else {
+ s390_vec_write_element64(v1, 0, MIN(first_match, first_zero));
+ s390_vec_write_element64(v1, 1, 0);
+ }
+
+ if (first_zero == 16 && first_match == 16) {
+ return 3; /* no match */
+ } else if (first_zero == 16) {
+ return 1; /* matching elements, no match for zero */
+ } else if (first_match < first_zero) {
+ return 2; /* matching elements before match for zero */
+ }
+ return 0; /* match for zero */
+}
+
+#define DEF_VSTRC_HELPER(BITS) \
+void HELPER(gvec_vstrc##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ const bool in = extract32(simd_data(desc), 3, 1); \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \
+}
+DEF_VSTRC_HELPER(8)
+DEF_VSTRC_HELPER(16)
+DEF_VSTRC_HELPER(32)
+
+#define DEF_VSTRC_RT_HELPER(BITS) \
+void HELPER(gvec_vstrc_rt##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, uint32_t desc) \
+{ \
+ const bool in = extract32(simd_data(desc), 3, 1); \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \
+}
+DEF_VSTRC_RT_HELPER(8)
+DEF_VSTRC_RT_HELPER(16)
+DEF_VSTRC_RT_HELPER(32)
+
+#define DEF_VSTRC_CC_HELPER(BITS) \
+void HELPER(gvec_vstrc_cc##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, CPUS390XState *env, \
+ uint32_t desc) \
+{ \
+ const bool in = extract32(simd_data(desc), 3, 1); \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ env->cc_op = vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \
+}
+DEF_VSTRC_CC_HELPER(8)
+DEF_VSTRC_CC_HELPER(16)
+DEF_VSTRC_CC_HELPER(32)
+
+#define DEF_VSTRC_CC_RT_HELPER(BITS) \
+void HELPER(gvec_vstrc_cc_rt##BITS)(void *v1, const void *v2, const void *v3, \
+ const void *v4, CPUS390XState *env, \
+ uint32_t desc) \
+{ \
+ const bool in = extract32(simd_data(desc), 3, 1); \
+ const bool zs = extract32(simd_data(desc), 1, 1); \
+ \
+ env->cc_op = vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \
+}
+DEF_VSTRC_CC_RT_HELPER(8)
+DEF_VSTRC_CC_RT_HELPER(16)
+DEF_VSTRC_CC_RT_HELPER(32)
diff --git a/target/s390x/trace-events b/target/s390x/trace-events
new file mode 100644
index 000000000..729cb012b
--- /dev/null
+++ b/target/s390x/trace-events
@@ -0,0 +1,19 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# mmu_helper.c
+get_skeys_nonzero(int rc) "SKEY: Call to get_skeys unexpectedly returned %d"
+set_skeys_nonzero(int rc) "SKEY: Call to set_skeys unexpectedly returned %d"
+
+# ioinst.c
+ioinst(const char *insn) "IOINST: %s"
+ioinst_sch_id(const char *insn, int cssid, int ssid, int schid) "IOINST: %s (%x.%x.%04x)"
+ioinst_chp_id(const char *insn, int cssid, int chpid) "IOINST: %s (%x.%02x)"
+ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command 0x%04x, len 0x%04x"
+
+# cpu-sysemu.c
+cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8
+cpu_halt(int cpu_index) "halting cpu %d"
+cpu_unhalt(int cpu_index) "unhalting cpu %d"
+
+# sigp.c
+sigp_finished(uint8_t order, int cpu_index, int dst_index, int cc) "SIGP: Finished order %u on cpu %d -> cpu %d with cc=%d"
diff --git a/target/s390x/trace.h b/target/s390x/trace.h
new file mode 100644
index 000000000..d7d59d4ab
--- /dev/null
+++ b/target/s390x/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_s390x.h"
diff --git a/target/sh4/Kconfig b/target/sh4/Kconfig
new file mode 100644
index 000000000..2397c8602
--- /dev/null
+++ b/target/sh4/Kconfig
@@ -0,0 +1,2 @@
+config SH4
+ bool
diff --git a/target/sh4/README.sh4 b/target/sh4/README.sh4
new file mode 100644
index 000000000..a192ca754
--- /dev/null
+++ b/target/sh4/README.sh4
@@ -0,0 +1,150 @@
+qemu target: sh4
+author: Samuel Tardieu <sam@rfc1149.net>
+last modified: Tue Dec 6 07:22:44 CET 2005
+
+The sh4 target is not ready at all yet for integration in qemu. This
+file describes the current state of implementation.
+
+Most places requiring attention and/or modification can be detected by
+looking for "XXXXX" or "abort()".
+
+The sh4 core is located in target/sh4/*, while the 7750 peripheral
+features (IO ports for example) are located in hw/sh7750.[ch]. The
+main board description is in hw/shix.c, and the NAND flash in
+hw/tc58128.[ch].
+
+All the shortcomings indicated here will eventually be resolved. This
+is a work in progress. Features are added in a semi-random order: if a
+point is blocking to progress on booting the Linux kernel for the shix
+board, it is addressed first; if feedback is necessary and no progress
+can be made on blocking points until it is received, a random feature
+is worked on.
+
+Goals
+-----
+
+The primary model being worked on is the soft MMU target to be able to
+emulate the Shix 2.0 board by Alexis Polti, described at
+https://web.archive.org/web/20070917001736/http://perso.enst.fr/~polti/realisations/shix20/
+
+Ultimately, qemu will be coupled with a system C or a verilog
+simulator to simulate the whole board functionalities.
+
+A sh4 user-mode has also somewhat started but will be worked on
+afterwards. The goal is to automate tests for GNAT (GNU Ada) compiler
+that I ported recently to the sh4-linux target.
+
+Registers
+---------
+
+16 general purpose registers are available at any time. The first 8
+registers are banked and the non-directly visible ones can be accessed
+by privileged instructions. In qemu, we define 24 general purpose
+registers and the code generation use either [0-7]+[8-15] or
+[16-23]+[8-15] depending on the MD and RB flags in the sr
+configuration register.
+
+Instructions
+------------
+
+Most sh4 instructions have been implemented. The missing ones at this
+time are:
+ - FPU related instructions
+ - LDTLB to load a new MMU entry
+ - SLEEP to put the processor in sleep mode
+
+Most instructions could be optimized a lot. This will be worked on
+after the current model is fully functional unless debugging
+convenience requires that it is done early.
+
+Many instructions did not have a chance to be tested yet. The plan is
+to implement unit and regression testing of those in the future.
+
+MMU
+---
+
+The MMU is implemented in the sh4 core. MMU management has not been
+tested at all yet. In the sh7750, it can be manipulated through memory
+mapped registers and this part has not yet been implemented.
+
+Exceptions
+----------
+
+Exceptions are implemented as described in the sh4 reference manual
+but have not been tested yet. They do not use qemu EXCP_ features
+yet.
+
+IRQ
+---
+
+IRQ are not implemented yet.
+
+Peripheral features
+-------------------
+
+ + Serial ports
+
+Configuration and use of the first serial port (SCI) without
+interrupts is supported. Input has not yet been tested.
+
+Configuration of the second serial port (SCIF) is supported. FIFO
+handling infrastructure has been started but is not completed yet.
+
+ + GPIO ports
+
+GPIO ports have been implemented. A registration function allows
+external modules to register interest in some port changes (see
+hw/tc58128.[ch] for an example) and will be called back. Interrupt
+generation is not yet supported but some infrastructure is in place
+for this purpose. Note that in the current model a peripheral module
+cannot directly simulate a H->L->H input port transition and have an
+interrupt generated on the low level.
+
+ + TC58128 NAND flash
+
+TC58128 NAND flash is partially implemented through GPIO ports. It
+supports reading from flash.
+
+GDB
+---
+
+GDB remote target support has been implemented and lightly tested.
+
+Files
+-----
+
+File names are hardcoded at this time. The bootloader must be stored in
+shix_bios.bin in the current directory. The initial Linux image must
+be stored in shix_linux_nand.bin in the current directory in NAND
+format. Test files can be obtained from
+http://perso.enst.fr/~polti/robot/ as well as the various datasheets I
+use.
+
+qemu disk parameter on the command line is unused. You can supply any
+existing image and it will be ignored. As the goal is to simulate an
+embedded target, it is not clear how this parameter will be handled in
+the future.
+
+To build an ELF kernel image from the NAND image, 16 bytes have to be
+stripped off the end of every 528 bytes, keeping only 512 of them. The
+following Python code snippet does it:
+
+#! /usr/bin/python
+
+def denand (infd, outfd):
+ while True:
+ d = infd.read (528)
+ if not d: return
+ outfd.write (d[:512])
+
+if __name__ == '__main__':
+ import sys
+ denand (open (sys.argv[1], 'rb'),
+ open (sys.argv[2], 'wb'))
+
+Style isssues
+-------------
+
+There is currently a mix between my style (space before opening
+parenthesis) and qemu style. This will be resolved before final
+integration is proposed.
diff --git a/target/sh4/cpu-param.h b/target/sh4/cpu-param.h
new file mode 100644
index 000000000..81ace3503
--- /dev/null
+++ b/target/sh4/cpu-param.h
@@ -0,0 +1,21 @@
+/*
+ * SH4 cpu parameters for qemu.
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef SH4_CPU_PARAM_H
+#define SH4_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 12 /* 4k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#ifdef CONFIG_USER_ONLY
+# define TARGET_VIRT_ADDR_SPACE_BITS 31
+#else
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+#define NB_MMU_MODES 2
+
+#endif
diff --git a/target/sh4/cpu-qom.h b/target/sh4/cpu-qom.h
new file mode 100644
index 000000000..8903b4b9c
--- /dev/null
+++ b/target/sh4/cpu-qom.h
@@ -0,0 +1,59 @@
+/*
+ * QEMU SuperH CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_SUPERH_CPU_QOM_H
+#define QEMU_SUPERH_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_SUPERH_CPU "superh-cpu"
+
+#define TYPE_SH7750R_CPU SUPERH_CPU_TYPE_NAME("sh7750r")
+#define TYPE_SH7751R_CPU SUPERH_CPU_TYPE_NAME("sh7751r")
+#define TYPE_SH7785_CPU SUPERH_CPU_TYPE_NAME("sh7785")
+
+OBJECT_DECLARE_TYPE(SuperHCPU, SuperHCPUClass,
+ SUPERH_CPU)
+
+/**
+ * SuperHCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @pvr: Processor Version Register
+ * @prr: Processor Revision Register
+ * @cvr: Cache Version Register
+ *
+ * A SuperH CPU model.
+ */
+struct SuperHCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+
+ uint32_t pvr;
+ uint32_t prr;
+ uint32_t cvr;
+};
+
+
+#endif
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
new file mode 100644
index 000000000..06b2691dc
--- /dev/null
+++ b/target/sh4/cpu.c
@@ -0,0 +1,302 @@
+/*
+ * QEMU SuperH CPU
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/qemu-print.h"
+#include "cpu.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat-helpers.h"
+
+static void superh_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static void superh_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+
+ cpu->env.pc = tb->pc;
+ cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool superh_io_recompile_replay_branch(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
+ && env->pc != tb->pc) {
+ env->pc -= 2;
+ env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+ return true;
+ }
+ return false;
+}
+#endif
+
+static bool superh_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+static void superh_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ SuperHCPU *cpu = SUPERH_CPU(s);
+ SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(cpu);
+ CPUSH4State *env = &cpu->env;
+
+ scc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUSH4State, end_reset_fields));
+
+ env->pc = 0xA0000000;
+#if defined(CONFIG_USER_ONLY)
+ env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
+#else
+ env->sr = (1u << SR_MD) | (1u << SR_RB) | (1u << SR_BL) |
+ (1u << SR_I3) | (1u << SR_I2) | (1u << SR_I1) | (1u << SR_I0);
+ env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ set_flush_to_zero(1, &env->fp_status);
+#endif
+ set_default_nan_mode(1, &env->fp_status);
+}
+
+static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_sh4;
+ info->print_insn = print_insn_sh;
+}
+
+static void superh_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ const char *typename = object_class_get_name(OBJECT_CLASS(data));
+ int len = strlen(typename) - strlen(SUPERH_CPU_TYPE_SUFFIX);
+
+ qemu_printf("%.*s\n", len, typename);
+}
+
+void sh4_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list_sorted(TYPE_SUPERH_CPU, false);
+ g_slist_foreach(list, superh_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+static ObjectClass *superh_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *s, *typename = NULL;
+
+ s = g_ascii_strdown(cpu_model, -1);
+ if (strcmp(s, "any") == 0) {
+ oc = object_class_by_name(TYPE_SH7750R_CPU);
+ goto out;
+ }
+
+ typename = g_strdup_printf(SUPERH_CPU_TYPE_NAME("%s"), s);
+ oc = object_class_by_name(typename);
+ if (oc != NULL && object_class_is_abstract(oc)) {
+ oc = NULL;
+ }
+
+out:
+ g_free(s);
+ g_free(typename);
+ return oc;
+}
+
+static void sh7750r_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ env->id = SH_CPU_SH7750R;
+ env->features = SH_FEATURE_BCR3_AND_BCR4;
+}
+
+static void sh7750r_class_init(ObjectClass *oc, void *data)
+{
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->pvr = 0x00050000;
+ scc->prr = 0x00000100;
+ scc->cvr = 0x00110000;
+}
+
+static void sh7751r_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ env->id = SH_CPU_SH7751R;
+ env->features = SH_FEATURE_BCR3_AND_BCR4;
+}
+
+static void sh7751r_class_init(ObjectClass *oc, void *data)
+{
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->pvr = 0x04050005;
+ scc->prr = 0x00000113;
+ scc->cvr = 0x00110000; /* Neutered caches, should be 0x20480000 */
+}
+
+static void sh7785_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ env->id = SH_CPU_SH7785;
+ env->features = SH_FEATURE_SH4A;
+}
+
+static void sh7785_class_init(ObjectClass *oc, void *data)
+{
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->pvr = 0x10300700;
+ scc->prr = 0x00000200;
+ scc->cvr = 0x71440211;
+}
+
+static void superh_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ scc->parent_realize(dev, errp);
+}
+
+static void superh_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+
+ env->movcal_backup_tail = &(env->movcal_backup);
+}
+
+#ifndef CONFIG_USER_ONLY
+static const VMStateDescription vmstate_sh_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps sh4_sysemu_ops = {
+ .get_phys_page_debug = superh_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps superh_tcg_ops = {
+ .initialize = sh4_translate_init,
+ .synchronize_from_tb = superh_cpu_synchronize_from_tb,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = superh_cpu_tlb_fill,
+ .cpu_exec_interrupt = superh_cpu_exec_interrupt,
+ .do_interrupt = superh_cpu_do_interrupt,
+ .do_unaligned_access = superh_cpu_do_unaligned_access,
+ .io_recompile_replay_branch = superh_io_recompile_replay_branch,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void superh_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ device_class_set_parent_realize(dc, superh_cpu_realizefn,
+ &scc->parent_realize);
+
+ device_class_set_parent_reset(dc, superh_cpu_reset, &scc->parent_reset);
+
+ cc->class_by_name = superh_cpu_class_by_name;
+ cc->has_work = superh_cpu_has_work;
+ cc->dump_state = superh_cpu_dump_state;
+ cc->set_pc = superh_cpu_set_pc;
+ cc->gdb_read_register = superh_cpu_gdb_read_register;
+ cc->gdb_write_register = superh_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &sh4_sysemu_ops;
+ dc->vmsd = &vmstate_sh_cpu;
+#endif
+ cc->disas_set_info = superh_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 59;
+ cc->tcg_ops = &superh_tcg_ops;
+}
+
+#define DEFINE_SUPERH_CPU_TYPE(type_name, cinit, initfn) \
+ { \
+ .name = type_name, \
+ .parent = TYPE_SUPERH_CPU, \
+ .class_init = cinit, \
+ .instance_init = initfn, \
+ }
+static const TypeInfo superh_cpu_type_infos[] = {
+ {
+ .name = TYPE_SUPERH_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(SuperHCPU),
+ .instance_init = superh_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(SuperHCPUClass),
+ .class_init = superh_cpu_class_init,
+ },
+ DEFINE_SUPERH_CPU_TYPE(TYPE_SH7750R_CPU, sh7750r_class_init,
+ sh7750r_cpu_initfn),
+ DEFINE_SUPERH_CPU_TYPE(TYPE_SH7751R_CPU, sh7751r_class_init,
+ sh7751r_cpu_initfn),
+ DEFINE_SUPERH_CPU_TYPE(TYPE_SH7785_CPU, sh7785_class_init,
+ sh7785_cpu_initfn),
+
+};
+
+DEFINE_TYPES(superh_cpu_type_infos)
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
new file mode 100644
index 000000000..4cfb109f5
--- /dev/null
+++ b/target/sh4/cpu.h
@@ -0,0 +1,378 @@
+/*
+ * SH4 emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SH4_CPU_H
+#define SH4_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+/* CPU Subtypes */
+#define SH_CPU_SH7750 (1 << 0)
+#define SH_CPU_SH7750S (1 << 1)
+#define SH_CPU_SH7750R (1 << 2)
+#define SH_CPU_SH7751 (1 << 3)
+#define SH_CPU_SH7751R (1 << 4)
+#define SH_CPU_SH7785 (1 << 5)
+#define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R)
+#define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R)
+
+#define SR_MD 30
+#define SR_RB 29
+#define SR_BL 28
+#define SR_FD 15
+#define SR_M 9
+#define SR_Q 8
+#define SR_I3 7
+#define SR_I2 6
+#define SR_I1 5
+#define SR_I0 4
+#define SR_S 1
+#define SR_T 0
+
+#define FPSCR_MASK (0x003fffff)
+#define FPSCR_FR (1 << 21)
+#define FPSCR_SZ (1 << 20)
+#define FPSCR_PR (1 << 19)
+#define FPSCR_DN (1 << 18)
+#define FPSCR_CAUSE_MASK (0x3f << 12)
+#define FPSCR_CAUSE_SHIFT (12)
+#define FPSCR_CAUSE_E (1 << 17)
+#define FPSCR_CAUSE_V (1 << 16)
+#define FPSCR_CAUSE_Z (1 << 15)
+#define FPSCR_CAUSE_O (1 << 14)
+#define FPSCR_CAUSE_U (1 << 13)
+#define FPSCR_CAUSE_I (1 << 12)
+#define FPSCR_ENABLE_MASK (0x1f << 7)
+#define FPSCR_ENABLE_SHIFT (7)
+#define FPSCR_ENABLE_V (1 << 11)
+#define FPSCR_ENABLE_Z (1 << 10)
+#define FPSCR_ENABLE_O (1 << 9)
+#define FPSCR_ENABLE_U (1 << 8)
+#define FPSCR_ENABLE_I (1 << 7)
+#define FPSCR_FLAG_MASK (0x1f << 2)
+#define FPSCR_FLAG_SHIFT (2)
+#define FPSCR_FLAG_V (1 << 6)
+#define FPSCR_FLAG_Z (1 << 5)
+#define FPSCR_FLAG_O (1 << 4)
+#define FPSCR_FLAG_U (1 << 3)
+#define FPSCR_FLAG_I (1 << 2)
+#define FPSCR_RM_MASK (0x03 << 0)
+#define FPSCR_RM_NEAREST (0 << 0)
+#define FPSCR_RM_ZERO (1 << 0)
+
+#define DELAY_SLOT_MASK 0x7
+#define DELAY_SLOT (1 << 0)
+#define DELAY_SLOT_CONDITIONAL (1 << 1)
+#define DELAY_SLOT_RTE (1 << 2)
+
+#define TB_FLAG_PENDING_MOVCA (1 << 3)
+
+#define GUSA_SHIFT 4
+#ifdef CONFIG_USER_ONLY
+#define GUSA_EXCLUSIVE (1 << 12)
+#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE)
+#else
+/* Provide dummy versions of the above to allow tests against tbflags
+ to be elided while avoiding ifdefs. */
+#define GUSA_EXCLUSIVE 0
+#define GUSA_MASK 0
+#endif
+
+#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK)
+
+typedef struct tlb_t {
+ uint32_t vpn; /* virtual page number */
+ uint32_t ppn; /* physical page number */
+ uint32_t size; /* mapped page size in bytes */
+ uint8_t asid; /* address space identifier */
+ uint8_t v:1; /* validity */
+ uint8_t sz:2; /* page size */
+ uint8_t sh:1; /* share status */
+ uint8_t c:1; /* cacheability */
+ uint8_t pr:2; /* protection key */
+ uint8_t d:1; /* dirty */
+ uint8_t wt:1; /* write through */
+ uint8_t sa:3; /* space attribute (PCMCIA) */
+ uint8_t tc:1; /* timing control */
+} tlb_t;
+
+#define UTLB_SIZE 64
+#define ITLB_SIZE 4
+
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+enum sh_features {
+ SH_FEATURE_SH4A = 1,
+ SH_FEATURE_BCR3_AND_BCR4 = 2,
+};
+
+typedef struct memory_content {
+ uint32_t address;
+ uint32_t value;
+ struct memory_content *next;
+} memory_content;
+
+typedef struct CPUSH4State {
+ uint32_t flags; /* general execution flags */
+ uint32_t gregs[24]; /* general registers */
+ float32 fregs[32]; /* floating point registers */
+ uint32_t sr; /* status register (with T split out) */
+ uint32_t sr_m; /* M bit of status register */
+ uint32_t sr_q; /* Q bit of status register */
+ uint32_t sr_t; /* T bit of status register */
+ uint32_t ssr; /* saved status register */
+ uint32_t spc; /* saved program counter */
+ uint32_t gbr; /* global base register */
+ uint32_t vbr; /* vector base register */
+ uint32_t sgr; /* saved global register 15 */
+ uint32_t dbr; /* debug base register */
+ uint32_t pc; /* program counter */
+ uint32_t delayed_pc; /* target of delayed branch */
+ uint32_t delayed_cond; /* condition of delayed branch */
+ uint32_t mach; /* multiply and accumulate high */
+ uint32_t macl; /* multiply and accumulate low */
+ uint32_t pr; /* procedure register */
+ uint32_t fpscr; /* floating point status/control register */
+ uint32_t fpul; /* floating point communication register */
+
+ /* float point status register */
+ float_status fp_status;
+
+ /* Those belong to the specific unit (SH7750) but are handled here */
+ uint32_t mmucr; /* MMU control register */
+ uint32_t pteh; /* page table entry high register */
+ uint32_t ptel; /* page table entry low register */
+ uint32_t ptea; /* page table entry assistance register */
+ uint32_t ttb; /* translation table base register */
+ uint32_t tea; /* TLB exception address register */
+ uint32_t tra; /* TRAPA exception register */
+ uint32_t expevt; /* exception event register */
+ uint32_t intevt; /* interrupt event register */
+
+ tlb_t itlb[ITLB_SIZE]; /* instruction translation table */
+ tlb_t utlb[UTLB_SIZE]; /* unified translation table */
+
+ /* LDST = LOCK_ADDR != -1. */
+ uint32_t lock_addr;
+ uint32_t lock_value;
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields from here on are preserved over CPU reset. */
+ int id; /* CPU model */
+
+ /* The features that we should emulate. See sh_features above. */
+ uint32_t features;
+
+ void *intc_handle;
+ int in_sleep; /* SR_BL ignored during sleep */
+ memory_content *movcal_backup;
+ memory_content **movcal_backup_tail;
+} CPUSH4State;
+
+/**
+ * SuperHCPU:
+ * @env: #CPUSH4State
+ *
+ * A SuperH CPU.
+ */
+struct SuperHCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUSH4State env;
+};
+
+
+void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+
+void sh4_translate_init(void);
+void sh4_cpu_list(void);
+
+#if !defined(CONFIG_USER_ONLY)
+bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void superh_cpu_do_interrupt(CPUState *cpu);
+bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void cpu_sh4_invalidate_tlb(CPUSH4State *s);
+uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+#endif
+
+int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr);
+
+void cpu_load_tlb(CPUSH4State * env);
+
+#define SUPERH_CPU_TYPE_SUFFIX "-" TYPE_SUPERH_CPU
+#define SUPERH_CPU_TYPE_NAME(model) model SUPERH_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_SUPERH_CPU
+
+#define cpu_list sh4_cpu_list
+
+/* MMU modes definitions */
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
+{
+ /* The instruction in a RTE delay slot is fetched in privileged
+ mode, but executed in user mode. */
+ if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
+ return 0;
+ } else {
+ return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+ }
+}
+
+typedef CPUSH4State CPUArchState;
+typedef SuperHCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+/* MMU control register */
+#define MMUCR 0x1F000010
+#define MMUCR_AT (1<<0)
+#define MMUCR_TI (1<<2)
+#define MMUCR_SV (1<<8)
+#define MMUCR_URC_BITS (6)
+#define MMUCR_URC_OFFSET (10)
+#define MMUCR_URC_SIZE (1 << MMUCR_URC_BITS)
+#define MMUCR_URC_MASK (((MMUCR_URC_SIZE) - 1) << MMUCR_URC_OFFSET)
+static inline int cpu_mmucr_urc (uint32_t mmucr)
+{
+ return ((mmucr & MMUCR_URC_MASK) >> MMUCR_URC_OFFSET);
+}
+
+/* PTEH : Page Translation Entry High register */
+#define PTEH_ASID_BITS (8)
+#define PTEH_ASID_SIZE (1 << PTEH_ASID_BITS)
+#define PTEH_ASID_MASK (PTEH_ASID_SIZE - 1)
+#define cpu_pteh_asid(pteh) ((pteh) & PTEH_ASID_MASK)
+#define PTEH_VPN_BITS (22)
+#define PTEH_VPN_OFFSET (10)
+#define PTEH_VPN_SIZE (1 << PTEH_VPN_BITS)
+#define PTEH_VPN_MASK (((PTEH_VPN_SIZE) - 1) << PTEH_VPN_OFFSET)
+static inline int cpu_pteh_vpn (uint32_t pteh)
+{
+ return ((pteh & PTEH_VPN_MASK) >> PTEH_VPN_OFFSET);
+}
+
+/* PTEL : Page Translation Entry Low register */
+#define PTEL_V (1 << 8)
+#define cpu_ptel_v(ptel) (((ptel) & PTEL_V) >> 8)
+#define PTEL_C (1 << 3)
+#define cpu_ptel_c(ptel) (((ptel) & PTEL_C) >> 3)
+#define PTEL_D (1 << 2)
+#define cpu_ptel_d(ptel) (((ptel) & PTEL_D) >> 2)
+#define PTEL_SH (1 << 1)
+#define cpu_ptel_sh(ptel)(((ptel) & PTEL_SH) >> 1)
+#define PTEL_WT (1 << 0)
+#define cpu_ptel_wt(ptel) ((ptel) & PTEL_WT)
+
+#define PTEL_SZ_HIGH_OFFSET (7)
+#define PTEL_SZ_HIGH (1 << PTEL_SZ_HIGH_OFFSET)
+#define PTEL_SZ_LOW_OFFSET (4)
+#define PTEL_SZ_LOW (1 << PTEL_SZ_LOW_OFFSET)
+static inline int cpu_ptel_sz (uint32_t ptel)
+{
+ int sz;
+ sz = (ptel & PTEL_SZ_HIGH) >> PTEL_SZ_HIGH_OFFSET;
+ sz <<= 1;
+ sz |= (ptel & PTEL_SZ_LOW) >> PTEL_SZ_LOW_OFFSET;
+ return sz;
+}
+
+#define PTEL_PPN_BITS (19)
+#define PTEL_PPN_OFFSET (10)
+#define PTEL_PPN_SIZE (1 << PTEL_PPN_BITS)
+#define PTEL_PPN_MASK (((PTEL_PPN_SIZE) - 1) << PTEL_PPN_OFFSET)
+static inline int cpu_ptel_ppn (uint32_t ptel)
+{
+ return ((ptel & PTEL_PPN_MASK) >> PTEL_PPN_OFFSET);
+}
+
+#define PTEL_PR_BITS (2)
+#define PTEL_PR_OFFSET (5)
+#define PTEL_PR_SIZE (1 << PTEL_PR_BITS)
+#define PTEL_PR_MASK (((PTEL_PR_SIZE) - 1) << PTEL_PR_OFFSET)
+static inline int cpu_ptel_pr (uint32_t ptel)
+{
+ return ((ptel & PTEL_PR_MASK) >> PTEL_PR_OFFSET);
+}
+
+/* PTEA : Page Translation Entry Assistance register */
+#define PTEA_SA_BITS (3)
+#define PTEA_SA_SIZE (1 << PTEA_SA_BITS)
+#define PTEA_SA_MASK (PTEA_SA_SIZE - 1)
+#define cpu_ptea_sa(ptea) ((ptea) & PTEA_SA_MASK)
+#define PTEA_TC (1 << 3)
+#define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3)
+
+static inline target_ulong cpu_read_sr(CPUSH4State *env)
+{
+ return env->sr | (env->sr_m << SR_M) |
+ (env->sr_q << SR_Q) |
+ (env->sr_t << SR_T);
+}
+
+static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
+{
+ env->sr_m = (sr >> SR_M) & 1;
+ env->sr_q = (sr >> SR_Q) & 1;
+ env->sr_t = (sr >> SR_T) & 1;
+ env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
+}
+
+static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ /* For a gUSA region, notice the end of the region. */
+ *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0;
+ *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */
+ | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
+ | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
+ | (env->sr & (1u << SR_FD)) /* Bit 15 */
+ | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
+}
+
+#endif /* SH4_CPU_H */
diff --git a/target/sh4/gdbstub.c b/target/sh4/gdbstub.c
new file mode 100644
index 000000000..3488f68e3
--- /dev/null
+++ b/target/sh4/gdbstub.c
@@ -0,0 +1,144 @@
+/*
+ * SuperH gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+/* Hint: Use "set architecture sh4" in GDB to see fpu registers */
+/* FIXME: We should use XML for this. */
+
+int superh_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ switch (n) {
+ case 0 ... 7:
+ if ((env->sr & (1u << SR_MD)) && (env->sr & (1u << SR_RB))) {
+ return gdb_get_regl(mem_buf, env->gregs[n + 16]);
+ } else {
+ return gdb_get_regl(mem_buf, env->gregs[n]);
+ }
+ case 8 ... 15:
+ return gdb_get_regl(mem_buf, env->gregs[n]);
+ case 16:
+ return gdb_get_regl(mem_buf, env->pc);
+ case 17:
+ return gdb_get_regl(mem_buf, env->pr);
+ case 18:
+ return gdb_get_regl(mem_buf, env->gbr);
+ case 19:
+ return gdb_get_regl(mem_buf, env->vbr);
+ case 20:
+ return gdb_get_regl(mem_buf, env->mach);
+ case 21:
+ return gdb_get_regl(mem_buf, env->macl);
+ case 22:
+ return gdb_get_regl(mem_buf, cpu_read_sr(env));
+ case 23:
+ return gdb_get_regl(mem_buf, env->fpul);
+ case 24:
+ return gdb_get_regl(mem_buf, env->fpscr);
+ case 25 ... 40:
+ if (env->fpscr & FPSCR_FR) {
+ return gdb_get_reg32(mem_buf, env->fregs[n - 9]);
+ }
+ return gdb_get_reg32(mem_buf, env->fregs[n - 25]);
+ case 41:
+ return gdb_get_regl(mem_buf, env->ssr);
+ case 42:
+ return gdb_get_regl(mem_buf, env->spc);
+ case 43 ... 50:
+ return gdb_get_regl(mem_buf, env->gregs[n - 43]);
+ case 51 ... 58:
+ return gdb_get_regl(mem_buf, env->gregs[n - (51 - 16)]);
+ }
+
+ return 0;
+}
+
+int superh_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ switch (n) {
+ case 0 ... 7:
+ if ((env->sr & (1u << SR_MD)) && (env->sr & (1u << SR_RB))) {
+ env->gregs[n + 16] = ldl_p(mem_buf);
+ } else {
+ env->gregs[n] = ldl_p(mem_buf);
+ }
+ break;
+ case 8 ... 15:
+ env->gregs[n] = ldl_p(mem_buf);
+ break;
+ case 16:
+ env->pc = ldl_p(mem_buf);
+ break;
+ case 17:
+ env->pr = ldl_p(mem_buf);
+ break;
+ case 18:
+ env->gbr = ldl_p(mem_buf);
+ break;
+ case 19:
+ env->vbr = ldl_p(mem_buf);
+ break;
+ case 20:
+ env->mach = ldl_p(mem_buf);
+ break;
+ case 21:
+ env->macl = ldl_p(mem_buf);
+ break;
+ case 22:
+ cpu_write_sr(env, ldl_p(mem_buf));
+ break;
+ case 23:
+ env->fpul = ldl_p(mem_buf);
+ break;
+ case 24:
+ env->fpscr = ldl_p(mem_buf);
+ break;
+ case 25 ... 40:
+ if (env->fpscr & FPSCR_FR) {
+ env->fregs[n - 9] = ldl_p(mem_buf);
+ } else {
+ env->fregs[n - 25] = ldl_p(mem_buf);
+ }
+ break;
+ case 41:
+ env->ssr = ldl_p(mem_buf);
+ break;
+ case 42:
+ env->spc = ldl_p(mem_buf);
+ break;
+ case 43 ... 50:
+ env->gregs[n - 43] = ldl_p(mem_buf);
+ break;
+ case 51 ... 58:
+ env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
+ break;
+ default:
+ return 0;
+ }
+
+ return 4;
+}
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
new file mode 100644
index 000000000..6a620e36f
--- /dev/null
+++ b/target/sh4/helper.c
@@ -0,0 +1,863 @@
+/*
+ * SH4 emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/log.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/sh4/sh_intc.h"
+#include "sysemu/runstate.h"
+#endif
+
+#define MMU_OK 0
+#define MMU_ITLB_MISS (-1)
+#define MMU_ITLB_MULTIPLE (-2)
+#define MMU_ITLB_VIOLATION (-3)
+#define MMU_DTLB_MISS_READ (-4)
+#define MMU_DTLB_MISS_WRITE (-5)
+#define MMU_DTLB_INITIAL_WRITE (-6)
+#define MMU_DTLB_VIOLATION_READ (-7)
+#define MMU_DTLB_VIOLATION_WRITE (-8)
+#define MMU_DTLB_MULTIPLE (-9)
+#define MMU_DTLB_MISS (-10)
+#define MMU_IADDR_ERROR (-11)
+#define MMU_DADDR_ERROR_READ (-12)
+#define MMU_DADDR_ERROR_WRITE (-13)
+
+#if defined(CONFIG_USER_ONLY)
+
+int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
+{
+ /* For user mode, only U0 area is cacheable. */
+ return !(addr & 0x80000000);
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+void superh_cpu_do_interrupt(CPUState *cs)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD;
+ int do_exp, irq_vector = cs->exception_index;
+
+ /* prioritize exceptions over interrupts */
+
+ do_exp = cs->exception_index != -1;
+ do_irq = do_irq && (cs->exception_index == -1);
+
+ if (env->sr & (1u << SR_BL)) {
+ if (do_exp && cs->exception_index != 0x1e0) {
+ /* In theory a masked exception generates a reset exception,
+ which in turn jumps to the reset vector. However this only
+ works when using a bootloader. When using a kernel and an
+ initrd, they need to be reloaded and the program counter
+ should be loaded with the kernel entry point.
+ qemu_system_reset_request takes care of that. */
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ return;
+ }
+ if (do_irq && !env->in_sleep) {
+ return; /* masked */
+ }
+ }
+ env->in_sleep = 0;
+
+ if (do_irq) {
+ irq_vector = sh_intc_get_pending_vector(env->intc_handle,
+ (env->sr >> 4) & 0xf);
+ if (irq_vector == -1) {
+ return; /* masked */
+ }
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *expname;
+ switch (cs->exception_index) {
+ case 0x0e0:
+ expname = "addr_error";
+ break;
+ case 0x040:
+ expname = "tlb_miss";
+ break;
+ case 0x0a0:
+ expname = "tlb_violation";
+ break;
+ case 0x180:
+ expname = "illegal_instruction";
+ break;
+ case 0x1a0:
+ expname = "slot_illegal_instruction";
+ break;
+ case 0x800:
+ expname = "fpu_disable";
+ break;
+ case 0x820:
+ expname = "slot_fpu";
+ break;
+ case 0x100:
+ expname = "data_write";
+ break;
+ case 0x060:
+ expname = "dtlb_miss_write";
+ break;
+ case 0x0c0:
+ expname = "dtlb_violation_write";
+ break;
+ case 0x120:
+ expname = "fpu_exception";
+ break;
+ case 0x080:
+ expname = "initial_page_write";
+ break;
+ case 0x160:
+ expname = "trapa";
+ break;
+ default:
+ expname = do_irq ? "interrupt" : "???";
+ break;
+ }
+ qemu_log("exception 0x%03x [%s] raised\n",
+ irq_vector, expname);
+ log_cpu_state(cs, 0);
+ }
+
+ env->ssr = cpu_read_sr(env);
+ env->spc = env->pc;
+ env->sgr = env->gregs[15];
+ env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
+ env->lock_addr = -1;
+
+ if (env->flags & DELAY_SLOT_MASK) {
+ /* Branch instruction should be executed again before delay slot. */
+ env->spc -= 2;
+ /* Clear flags for exception/interrupt routine. */
+ env->flags &= ~DELAY_SLOT_MASK;
+ }
+
+ if (do_exp) {
+ env->expevt = cs->exception_index;
+ switch (cs->exception_index) {
+ case 0x000:
+ case 0x020:
+ case 0x140:
+ env->sr &= ~(1u << SR_FD);
+ env->sr |= 0xf << 4; /* IMASK */
+ env->pc = 0xa0000000;
+ break;
+ case 0x040:
+ case 0x060:
+ env->pc = env->vbr + 0x400;
+ break;
+ case 0x160:
+ env->spc += 2; /* special case for TRAPA */
+ /* fall through */
+ default:
+ env->pc = env->vbr + 0x100;
+ break;
+ }
+ return;
+ }
+
+ if (do_irq) {
+ env->intevt = irq_vector;
+ env->pc = env->vbr + 0x600;
+ return;
+ }
+}
+
+static void update_itlb_use(CPUSH4State * env, int itlbnb)
+{
+ uint8_t or_mask = 0, and_mask = (uint8_t) - 1;
+
+ switch (itlbnb) {
+ case 0:
+ and_mask = 0x1f;
+ break;
+ case 1:
+ and_mask = 0xe7;
+ or_mask = 0x80;
+ break;
+ case 2:
+ and_mask = 0xfb;
+ or_mask = 0x50;
+ break;
+ case 3:
+ or_mask = 0x2c;
+ break;
+ }
+
+ env->mmucr &= (and_mask << 24) | 0x00ffffff;
+ env->mmucr |= (or_mask << 24);
+}
+
+static int itlb_replacement(CPUSH4State * env)
+{
+ if ((env->mmucr & 0xe0000000) == 0xe0000000) {
+ return 0;
+ }
+ if ((env->mmucr & 0x98000000) == 0x18000000) {
+ return 1;
+ }
+ if ((env->mmucr & 0x54000000) == 0x04000000) {
+ return 2;
+ }
+ if ((env->mmucr & 0x2c000000) == 0x00000000) {
+ return 3;
+ }
+ cpu_abort(env_cpu(env), "Unhandled itlb_replacement");
+}
+
+/* Find the corresponding entry in the right TLB
+ Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
+*/
+static int find_tlb_entry(CPUSH4State * env, target_ulong address,
+ tlb_t * entries, uint8_t nbtlb, int use_asid)
+{
+ int match = MMU_DTLB_MISS;
+ uint32_t start, end;
+ uint8_t asid;
+ int i;
+
+ asid = env->pteh & 0xff;
+
+ for (i = 0; i < nbtlb; i++) {
+ if (!entries[i].v)
+ continue; /* Invalid entry */
+ if (!entries[i].sh && use_asid && entries[i].asid != asid)
+ continue; /* Bad ASID */
+ start = (entries[i].vpn << 10) & ~(entries[i].size - 1);
+ end = start + entries[i].size - 1;
+ if (address >= start && address <= end) { /* Match */
+ if (match != MMU_DTLB_MISS)
+ return MMU_DTLB_MULTIPLE; /* Multiple match */
+ match = i;
+ }
+ }
+ return match;
+}
+
+static void increment_urc(CPUSH4State * env)
+{
+ uint8_t urb, urc;
+
+ /* Increment URC */
+ urb = ((env->mmucr) >> 18) & 0x3f;
+ urc = ((env->mmucr) >> 10) & 0x3f;
+ urc++;
+ if ((urb > 0 && urc > urb) || urc > (UTLB_SIZE - 1))
+ urc = 0;
+ env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10);
+}
+
+/* Copy and utlb entry into itlb
+ Return entry
+*/
+static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb)
+{
+ int itlb;
+
+ tlb_t * ientry;
+ itlb = itlb_replacement(env);
+ ientry = &env->itlb[itlb];
+ if (ientry->v) {
+ tlb_flush_page(env_cpu(env), ientry->vpn << 10);
+ }
+ *ientry = env->utlb[utlb];
+ update_itlb_use(env, itlb);
+ return itlb;
+}
+
+/* Find itlb entry
+ Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
+*/
+static int find_itlb_entry(CPUSH4State * env, target_ulong address,
+ int use_asid)
+{
+ int e;
+
+ e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid);
+ if (e == MMU_DTLB_MULTIPLE) {
+ e = MMU_ITLB_MULTIPLE;
+ } else if (e == MMU_DTLB_MISS) {
+ e = MMU_ITLB_MISS;
+ } else if (e >= 0) {
+ update_itlb_use(env, e);
+ }
+ return e;
+}
+
+/* Find utlb entry
+ Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
+static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid)
+{
+ /* per utlb access */
+ increment_urc(env);
+
+ /* Return entry */
+ return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
+}
+
+/* Match address against MMU
+ Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE,
+ MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ,
+ MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS,
+ MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
+ MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
+*/
+static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
+{
+ int use_asid, n;
+ tlb_t *matching = NULL;
+
+ use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
+
+ if (access_type == MMU_INST_FETCH) {
+ n = find_itlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ matching = &env->itlb[n];
+ if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
+ n = MMU_ITLB_VIOLATION;
+ } else {
+ *prot = PAGE_EXEC;
+ }
+ } else {
+ n = find_utlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ n = copy_utlb_entry_itlb(env, n);
+ matching = &env->itlb[n];
+ if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
+ n = MMU_ITLB_VIOLATION;
+ } else {
+ *prot = PAGE_READ | PAGE_EXEC;
+ if ((matching->pr & 1) && matching->d) {
+ *prot |= PAGE_WRITE;
+ }
+ }
+ } else if (n == MMU_DTLB_MULTIPLE) {
+ n = MMU_ITLB_MULTIPLE;
+ } else if (n == MMU_DTLB_MISS) {
+ n = MMU_ITLB_MISS;
+ }
+ }
+ } else {
+ n = find_utlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ matching = &env->utlb[n];
+ if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
+ n = (access_type == MMU_DATA_STORE)
+ ? MMU_DTLB_VIOLATION_WRITE : MMU_DTLB_VIOLATION_READ;
+ } else if ((access_type == MMU_DATA_STORE) && !(matching->pr & 1)) {
+ n = MMU_DTLB_VIOLATION_WRITE;
+ } else if ((access_type == MMU_DATA_STORE) && !matching->d) {
+ n = MMU_DTLB_INITIAL_WRITE;
+ } else {
+ *prot = PAGE_READ;
+ if ((matching->pr & 1) && matching->d) {
+ *prot |= PAGE_WRITE;
+ }
+ }
+ } else if (n == MMU_DTLB_MISS) {
+ n = (access_type == MMU_DATA_STORE)
+ ? MMU_DTLB_MISS_WRITE : MMU_DTLB_MISS_READ;
+ }
+ }
+ if (n >= 0) {
+ n = MMU_OK;
+ *physical = ((matching->ppn << 10) & ~(matching->size - 1))
+ | (address & (matching->size - 1));
+ }
+ return n;
+}
+
+static int get_physical_address(CPUSH4State * env, target_ulong * physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
+{
+ /* P1, P2 and P4 areas do not use translation */
+ if ((address >= 0x80000000 && address < 0xc0000000) || address >= 0xe0000000) {
+ if (!(env->sr & (1u << SR_MD))
+ && (address < 0xe0000000 || address >= 0xe4000000)) {
+ /* Unauthorized access in user mode (only store queues are available) */
+ qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n");
+ if (access_type == MMU_DATA_LOAD) {
+ return MMU_DADDR_ERROR_READ;
+ } else if (access_type == MMU_DATA_STORE) {
+ return MMU_DADDR_ERROR_WRITE;
+ } else {
+ return MMU_IADDR_ERROR;
+ }
+ }
+ if (address >= 0x80000000 && address < 0xc0000000) {
+ /* Mask upper 3 bits for P1 and P2 areas */
+ *physical = address & 0x1fffffff;
+ } else {
+ *physical = address;
+ }
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
+ }
+
+ /* If MMU is disabled, return the corresponding physical page */
+ if (!(env->mmucr & MMUCR_AT)) {
+ *physical = address & 0x1FFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
+ }
+
+ /* We need to resort to the MMU */
+ return get_mmu_address(env, physical, prot, address, access_type);
+}
+
+hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ target_ulong physical;
+ int prot;
+
+ if (get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD)
+ == MMU_OK) {
+ return physical;
+ }
+
+ return -1;
+}
+
+void cpu_load_tlb(CPUSH4State * env)
+{
+ CPUState *cs = env_cpu(env);
+ int n = cpu_mmucr_urc(env->mmucr);
+ tlb_t * entry = &env->utlb[n];
+
+ if (entry->v) {
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(cs, address);
+ }
+
+ /* Take values into cpu status from registers. */
+ entry->asid = (uint8_t)cpu_pteh_asid(env->pteh);
+ entry->vpn = cpu_pteh_vpn(env->pteh);
+ entry->v = (uint8_t)cpu_ptel_v(env->ptel);
+ entry->ppn = cpu_ptel_ppn(env->ptel);
+ entry->sz = (uint8_t)cpu_ptel_sz(env->ptel);
+ switch (entry->sz) {
+ case 0: /* 00 */
+ entry->size = 1024; /* 1K */
+ break;
+ case 1: /* 01 */
+ entry->size = 1024 * 4; /* 4K */
+ break;
+ case 2: /* 10 */
+ entry->size = 1024 * 64; /* 64K */
+ break;
+ case 3: /* 11 */
+ entry->size = 1024 * 1024; /* 1M */
+ break;
+ default:
+ cpu_abort(cs, "Unhandled load_tlb");
+ break;
+ }
+ entry->sh = (uint8_t)cpu_ptel_sh(env->ptel);
+ entry->c = (uint8_t)cpu_ptel_c(env->ptel);
+ entry->pr = (uint8_t)cpu_ptel_pr(env->ptel);
+ entry->d = (uint8_t)cpu_ptel_d(env->ptel);
+ entry->wt = (uint8_t)cpu_ptel_wt(env->ptel);
+ entry->sa = (uint8_t)cpu_ptea_sa(env->ptea);
+ entry->tc = (uint8_t)cpu_ptea_tc(env->ptea);
+}
+
+ void cpu_sh4_invalidate_tlb(CPUSH4State *s)
+{
+ int i;
+
+ /* UTLB */
+ for (i = 0; i < UTLB_SIZE; i++) {
+ tlb_t * entry = &s->utlb[i];
+ entry->v = 0;
+ }
+ /* ITLB */
+ for (i = 0; i < ITLB_SIZE; i++) {
+ tlb_t * entry = &s->itlb[i];
+ entry->v = 0;
+ }
+
+ tlb_flush(env_cpu(s));
+}
+
+uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
+ hwaddr addr)
+{
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+
+ return (entry->vpn << 10) |
+ (entry->v << 8) |
+ (entry->asid);
+}
+
+void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
+ uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
+ uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
+
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+ if (entry->v) {
+ /* Overwriting valid entry in itlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(env_cpu(s), address);
+ }
+ entry->asid = asid;
+ entry->vpn = vpn;
+ entry->v = v;
+}
+
+uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s,
+ hwaddr addr)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+
+ if (array == 0) {
+ /* ITLB Data Array 1 */
+ return (entry->ppn << 10) |
+ (entry->v << 8) |
+ (entry->pr << 5) |
+ ((entry->sz & 1) << 6) |
+ ((entry->sz & 2) << 4) |
+ (entry->c << 3) |
+ (entry->sh << 1);
+ } else {
+ /* ITLB Data Array 2 */
+ return (entry->tc << 1) |
+ (entry->sa);
+ }
+}
+
+void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+
+ if (array == 0) {
+ /* ITLB Data Array 1 */
+ if (entry->v) {
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(env_cpu(s), address);
+ }
+ entry->ppn = (mem_value & 0x1ffffc00) >> 10;
+ entry->v = (mem_value & 0x00000100) >> 8;
+ entry->sz = (mem_value & 0x00000080) >> 6 |
+ (mem_value & 0x00000010) >> 4;
+ entry->pr = (mem_value & 0x00000040) >> 5;
+ entry->c = (mem_value & 0x00000008) >> 3;
+ entry->sh = (mem_value & 0x00000002) >> 1;
+ } else {
+ /* ITLB Data Array 2 */
+ entry->tc = (mem_value & 0x00000008) >> 3;
+ entry->sa = (mem_value & 0x00000007);
+ }
+}
+
+uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s,
+ hwaddr addr)
+{
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+
+ increment_urc(s); /* per utlb access */
+
+ return (entry->vpn << 10) |
+ (entry->v << 8) |
+ (entry->asid);
+}
+
+void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ int associate = addr & 0x0000080;
+ uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
+ uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9);
+ uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
+ uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
+ int use_asid = !(s->mmucr & MMUCR_SV) || !(s->sr & (1u << SR_MD));
+
+ if (associate) {
+ int i;
+ tlb_t * utlb_match_entry = NULL;
+ int needs_tlb_flush = 0;
+
+ /* search UTLB */
+ for (i = 0; i < UTLB_SIZE; i++) {
+ tlb_t * entry = &s->utlb[i];
+ if (!entry->v)
+ continue;
+
+ if (entry->vpn == vpn
+ && (!use_asid || entry->asid == asid || entry->sh)) {
+ if (utlb_match_entry) {
+ CPUState *cs = env_cpu(s);
+
+ /* Multiple TLB Exception */
+ cs->exception_index = 0x140;
+ s->tea = addr;
+ break;
+ }
+ if (entry->v && !v)
+ needs_tlb_flush = 1;
+ entry->v = v;
+ entry->d = d;
+ utlb_match_entry = entry;
+ }
+ increment_urc(s); /* per utlb access */
+ }
+
+ /* search ITLB */
+ for (i = 0; i < ITLB_SIZE; i++) {
+ tlb_t * entry = &s->itlb[i];
+ if (entry->vpn == vpn
+ && (!use_asid || entry->asid == asid || entry->sh)) {
+ if (entry->v && !v)
+ needs_tlb_flush = 1;
+ if (utlb_match_entry)
+ *entry = *utlb_match_entry;
+ else
+ entry->v = v;
+ break;
+ }
+ }
+
+ if (needs_tlb_flush) {
+ tlb_flush_page(env_cpu(s), vpn << 10);
+ }
+ } else {
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+ if (entry->v) {
+ CPUState *cs = env_cpu(s);
+
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(cs, address);
+ }
+ entry->asid = asid;
+ entry->vpn = vpn;
+ entry->d = d;
+ entry->v = v;
+ increment_urc(s);
+ }
+}
+
+uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s,
+ hwaddr addr)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+
+ increment_urc(s); /* per utlb access */
+
+ if (array == 0) {
+ /* ITLB Data Array 1 */
+ return (entry->ppn << 10) |
+ (entry->v << 8) |
+ (entry->pr << 5) |
+ ((entry->sz & 1) << 6) |
+ ((entry->sz & 2) << 4) |
+ (entry->c << 3) |
+ (entry->d << 2) |
+ (entry->sh << 1) |
+ (entry->wt);
+ } else {
+ /* ITLB Data Array 2 */
+ return (entry->tc << 1) |
+ (entry->sa);
+ }
+}
+
+void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+
+ increment_urc(s); /* per utlb access */
+
+ if (array == 0) {
+ /* UTLB Data Array 1 */
+ if (entry->v) {
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(env_cpu(s), address);
+ }
+ entry->ppn = (mem_value & 0x1ffffc00) >> 10;
+ entry->v = (mem_value & 0x00000100) >> 8;
+ entry->sz = (mem_value & 0x00000080) >> 6 |
+ (mem_value & 0x00000010) >> 4;
+ entry->pr = (mem_value & 0x00000060) >> 5;
+ entry->c = (mem_value & 0x00000008) >> 3;
+ entry->d = (mem_value & 0x00000004) >> 2;
+ entry->sh = (mem_value & 0x00000002) >> 1;
+ entry->wt = (mem_value & 0x00000001);
+ } else {
+ /* UTLB Data Array 2 */
+ entry->tc = (mem_value & 0x00000008) >> 3;
+ entry->sa = (mem_value & 0x00000007);
+ }
+}
+
+int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
+{
+ int n;
+ int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
+
+ /* check area */
+ if (env->sr & (1u << SR_MD)) {
+ /* For privileged mode, P2 and P4 area is not cacheable. */
+ if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr)
+ return 0;
+ } else {
+ /* For user mode, only U0 area is cacheable. */
+ if (0x80000000 <= addr)
+ return 0;
+ }
+
+ /*
+ * TODO : Evaluate CCR and check if the cache is on or off.
+ * Now CCR is not in CPUSH4State, but in SH7750State.
+ * When you move the ccr into CPUSH4State, the code will be
+ * as follows.
+ */
+#if 0
+ /* check if operand cache is enabled or not. */
+ if (!(env->ccr & 1))
+ return 0;
+#endif
+
+ /* if MMU is off, no check for TLB. */
+ if (env->mmucr & MMUCR_AT)
+ return 1;
+
+ /* check TLB */
+ n = find_tlb_entry(env, addr, env->itlb, ITLB_SIZE, use_asid);
+ if (n >= 0)
+ return env->itlb[n].c;
+
+ n = find_tlb_entry(env, addr, env->utlb, UTLB_SIZE, use_asid);
+ if (n >= 0)
+ return env->utlb[n].c;
+
+ return 0;
+}
+
+bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ /* Delay slots are indivisible, ignore interrupts */
+ if (env->flags & DELAY_SLOT_MASK) {
+ return false;
+ } else {
+ superh_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int ret;
+
+ target_ulong physical;
+ int prot;
+
+ ret = get_physical_address(env, &physical, &prot, address, access_type);
+
+ if (ret == MMU_OK) {
+ address &= TARGET_PAGE_MASK;
+ physical &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+
+ if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
+ env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
+ }
+
+ env->tea = address;
+ switch (ret) {
+ case MMU_ITLB_MISS:
+ case MMU_DTLB_MISS_READ:
+ cs->exception_index = 0x040;
+ break;
+ case MMU_DTLB_MULTIPLE:
+ case MMU_ITLB_MULTIPLE:
+ cs->exception_index = 0x140;
+ break;
+ case MMU_ITLB_VIOLATION:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_MISS_WRITE:
+ cs->exception_index = 0x060;
+ break;
+ case MMU_DTLB_INITIAL_WRITE:
+ cs->exception_index = 0x080;
+ break;
+ case MMU_DTLB_VIOLATION_READ:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_VIOLATION_WRITE:
+ cs->exception_index = 0x0c0;
+ break;
+ case MMU_IADDR_ERROR:
+ case MMU_DADDR_ERROR_READ:
+ cs->exception_index = 0x0e0;
+ break;
+ case MMU_DADDR_ERROR_WRITE:
+ cs->exception_index = 0x100;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled MMU fault");
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/sh4/helper.h b/target/sh4/helper.h
new file mode 100644
index 000000000..8d792f6b5
--- /dev/null
+++ b/target/sh4/helper.h
@@ -0,0 +1,43 @@
+DEF_HELPER_1(ldtlb, void, env)
+DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
+DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env)
+DEF_HELPER_1(raise_fpu_disable, noreturn, env)
+DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env)
+DEF_HELPER_1(sleep, noreturn, env)
+DEF_HELPER_2(trapa, noreturn, env, i32)
+DEF_HELPER_1(exclusive, noreturn, env)
+
+DEF_HELPER_3(movcal, void, env, i32, i32)
+DEF_HELPER_1(discard_movcal_backup, void, env)
+DEF_HELPER_2(ocbi, void, env, i32)
+
+DEF_HELPER_3(macl, void, env, i32, i32)
+DEF_HELPER_3(macw, void, env, i32, i32)
+
+DEF_HELPER_2(ld_fpscr, void, env, i32)
+
+DEF_HELPER_FLAGS_3(fadd_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fadd_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_2(fcnvsd_FT_DT, TCG_CALL_NO_WG, f64, env, f32)
+DEF_HELPER_FLAGS_2(fcnvds_DT_FT, TCG_CALL_NO_WG, f32, env, f64)
+
+DEF_HELPER_FLAGS_3(fcmp_eq_FT, TCG_CALL_NO_WG, i32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmp_eq_DT, TCG_CALL_NO_WG, i32, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmp_gt_FT, TCG_CALL_NO_WG, i32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmp_gt_DT, TCG_CALL_NO_WG, i32, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdiv_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdiv_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_2(float_FT, TCG_CALL_NO_WG, f32, env, i32)
+DEF_HELPER_FLAGS_2(float_DT, TCG_CALL_NO_WG, f64, env, i32)
+DEF_HELPER_FLAGS_4(fmac_FT, TCG_CALL_NO_WG, f32, env, f32, f32, f32)
+DEF_HELPER_FLAGS_3(fmul_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmul_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fsub_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsub_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_2(fsqrt_FT, TCG_CALL_NO_WG, f32, env, f32)
+DEF_HELPER_FLAGS_2(fsqrt_DT, TCG_CALL_NO_WG, f64, env, f64)
+DEF_HELPER_FLAGS_2(fsrra_FT, TCG_CALL_NO_WG, f32, env, f32)
+DEF_HELPER_FLAGS_2(ftrc_FT, TCG_CALL_NO_WG, i32, env, f32)
+DEF_HELPER_FLAGS_2(ftrc_DT, TCG_CALL_NO_WG, i32, env, f64)
+DEF_HELPER_3(fipr, void, env, i32, i32)
+DEF_HELPER_2(ftrv, void, env, i32)
diff --git a/target/sh4/meson.build b/target/sh4/meson.build
new file mode 100644
index 000000000..56a57576d
--- /dev/null
+++ b/target/sh4/meson.build
@@ -0,0 +1,14 @@
+sh4_ss = ss.source_set()
+sh4_ss.add(files(
+ 'cpu.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'op_helper.c',
+ 'translate.c',
+))
+
+sh4_softmmu_ss = ss.source_set()
+sh4_softmmu_ss.add(files('monitor.c'))
+
+target_arch += {'sh4': sh4_ss}
+target_softmmu_arch += {'sh4': sh4_softmmu_ss}
diff --git a/target/sh4/monitor.c b/target/sh4/monitor.c
new file mode 100644
index 000000000..2da6a5426
--- /dev/null
+++ b/target/sh4/monitor.c
@@ -0,0 +1,58 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "monitor/hmp.h"
+
+static void print_tlb(Monitor *mon, int idx, tlb_t *tlb)
+{
+ monitor_printf(mon, " tlb%i:\t"
+ "asid=%hhu vpn=%x\tppn=%x\tsz=%hhu size=%u\t"
+ "v=%hhu shared=%hhu cached=%hhu prot=%hhu "
+ "dirty=%hhu writethrough=%hhu\n",
+ idx,
+ tlb->asid, tlb->vpn, tlb->ppn, tlb->sz, tlb->size,
+ tlb->v, tlb->sh, tlb->c, tlb->pr,
+ tlb->d, tlb->wt);
+}
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ int i;
+
+ if (!env) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+
+ monitor_printf (mon, "ITLB:\n");
+ for (i = 0 ; i < ITLB_SIZE ; i++)
+ print_tlb (mon, i, &env->itlb[i]);
+ monitor_printf (mon, "UTLB:\n");
+ for (i = 0 ; i < UTLB_SIZE ; i++)
+ print_tlb (mon, i, &env->utlb[i]);
+}
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
new file mode 100644
index 000000000..752669825
--- /dev/null
+++ b/target/sh4/op_helper.c
@@ -0,0 +1,491 @@
+/*
+ * SH4 emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "fpu/softfloat.h"
+
+#ifndef CONFIG_USER_ONLY
+
+void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUSH4State *env = cs->env_ptr;
+
+ env->tea = addr;
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ case MMU_DATA_LOAD:
+ cs->exception_index = 0x0e0;
+ break;
+ case MMU_DATA_STORE:
+ cs->exception_index = 0x100;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+#endif
+
+void helper_ldtlb(CPUSH4State *env)
+{
+#ifdef CONFIG_USER_ONLY
+ cpu_abort(env_cpu(env), "Unhandled ldtlb");
+#else
+ cpu_load_tlb(env);
+#endif
+}
+
+static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index,
+ uintptr_t retaddr)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = index;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void helper_raise_illegal_instruction(CPUSH4State *env)
+{
+ raise_exception(env, 0x180, 0);
+}
+
+void helper_raise_slot_illegal_instruction(CPUSH4State *env)
+{
+ raise_exception(env, 0x1a0, 0);
+}
+
+void helper_raise_fpu_disable(CPUSH4State *env)
+{
+ raise_exception(env, 0x800, 0);
+}
+
+void helper_raise_slot_fpu_disable(CPUSH4State *env)
+{
+ raise_exception(env, 0x820, 0);
+}
+
+void helper_sleep(CPUSH4State *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+ env->in_sleep = 1;
+ raise_exception(env, EXCP_HLT, 0);
+}
+
+void helper_trapa(CPUSH4State *env, uint32_t tra)
+{
+ env->tra = tra << 2;
+ raise_exception(env, 0x160, 0);
+}
+
+void helper_exclusive(CPUSH4State *env)
+{
+ /* We do not want cpu_restore_state to run. */
+ cpu_loop_exit_atomic(env_cpu(env), 0);
+}
+
+void helper_movcal(CPUSH4State *env, uint32_t address, uint32_t value)
+{
+ if (cpu_sh4_is_cached (env, address))
+ {
+ memory_content *r = g_new(memory_content, 1);
+
+ r->address = address;
+ r->value = value;
+ r->next = NULL;
+
+ *(env->movcal_backup_tail) = r;
+ env->movcal_backup_tail = &(r->next);
+ }
+}
+
+void helper_discard_movcal_backup(CPUSH4State *env)
+{
+ memory_content *current = env->movcal_backup;
+
+ while(current)
+ {
+ memory_content *next = current->next;
+ g_free(current);
+ env->movcal_backup = current = next;
+ if (current == NULL)
+ env->movcal_backup_tail = &(env->movcal_backup);
+ }
+}
+
+void helper_ocbi(CPUSH4State *env, uint32_t address)
+{
+ memory_content **current = &(env->movcal_backup);
+ while (*current)
+ {
+ uint32_t a = (*current)->address;
+ if ((a & ~0x1F) == (address & ~0x1F))
+ {
+ memory_content *next = (*current)->next;
+ cpu_stl_data(env, a, (*current)->value);
+
+ if (next == NULL)
+ {
+ env->movcal_backup_tail = current;
+ }
+
+ g_free(*current);
+ *current = next;
+ break;
+ }
+ }
+}
+
+void helper_macl(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
+{
+ int64_t res;
+
+ res = ((uint64_t) env->mach << 32) | env->macl;
+ res += (int64_t) (int32_t) arg0 *(int64_t) (int32_t) arg1;
+ env->mach = (res >> 32) & 0xffffffff;
+ env->macl = res & 0xffffffff;
+ if (env->sr & (1u << SR_S)) {
+ if (res < 0)
+ env->mach |= 0xffff0000;
+ else
+ env->mach &= 0x00007fff;
+ }
+}
+
+void helper_macw(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
+{
+ int64_t res;
+
+ res = ((uint64_t) env->mach << 32) | env->macl;
+ res += (int64_t) (int16_t) arg0 *(int64_t) (int16_t) arg1;
+ env->mach = (res >> 32) & 0xffffffff;
+ env->macl = res & 0xffffffff;
+ if (env->sr & (1u << SR_S)) {
+ if (res < -0x80000000) {
+ env->mach = 1;
+ env->macl = 0x80000000;
+ } else if (res > 0x000000007fffffff) {
+ env->mach = 1;
+ env->macl = 0x7fffffff;
+ }
+ }
+}
+
+void helper_ld_fpscr(CPUSH4State *env, uint32_t val)
+{
+ env->fpscr = val & FPSCR_MASK;
+ if ((val & FPSCR_RM_MASK) == FPSCR_RM_ZERO) {
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ } else {
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ }
+ set_flush_to_zero((val & FPSCR_DN) != 0, &env->fp_status);
+}
+
+static void update_fpscr(CPUSH4State *env, uintptr_t retaddr)
+{
+ int xcpt, cause, enable;
+
+ xcpt = get_float_exception_flags(&env->fp_status);
+
+ /* Clear the cause entries */
+ env->fpscr &= ~FPSCR_CAUSE_MASK;
+
+ if (unlikely(xcpt)) {
+ if (xcpt & float_flag_invalid) {
+ env->fpscr |= FPSCR_CAUSE_V;
+ }
+ if (xcpt & float_flag_divbyzero) {
+ env->fpscr |= FPSCR_CAUSE_Z;
+ }
+ if (xcpt & float_flag_overflow) {
+ env->fpscr |= FPSCR_CAUSE_O;
+ }
+ if (xcpt & float_flag_underflow) {
+ env->fpscr |= FPSCR_CAUSE_U;
+ }
+ if (xcpt & float_flag_inexact) {
+ env->fpscr |= FPSCR_CAUSE_I;
+ }
+
+ /* Accumulate in flag entries */
+ env->fpscr |= (env->fpscr & FPSCR_CAUSE_MASK)
+ >> (FPSCR_CAUSE_SHIFT - FPSCR_FLAG_SHIFT);
+
+ /* Generate an exception if enabled */
+ cause = (env->fpscr & FPSCR_CAUSE_MASK) >> FPSCR_CAUSE_SHIFT;
+ enable = (env->fpscr & FPSCR_ENABLE_MASK) >> FPSCR_ENABLE_SHIFT;
+ if (cause & enable) {
+ raise_exception(env, 0x120, retaddr);
+ }
+ }
+}
+
+float32 helper_fadd_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_add(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fadd_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_add(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+uint32_t helper_fcmp_eq_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float32_compare(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return relation == float_relation_equal;
+}
+
+uint32_t helper_fcmp_eq_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float64_compare(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return relation == float_relation_equal;
+}
+
+uint32_t helper_fcmp_gt_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float32_compare(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return relation == float_relation_greater;
+}
+
+uint32_t helper_fcmp_gt_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float64_compare(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return relation == float_relation_greater;
+}
+
+float64 helper_fcnvsd_FT_DT(CPUSH4State *env, float32 t0)
+{
+ float64 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float32_to_float64(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float32 helper_fcnvds_DT_FT(CPUSH4State *env, float64 t0)
+{
+ float32 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float64_to_float32(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float32 helper_fdiv_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_div(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fdiv_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_div(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_float_FT(CPUSH4State *env, uint32_t t0)
+{
+ float32 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = int32_to_float32(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float64 helper_float_DT(CPUSH4State *env, uint32_t t0)
+{
+ float64 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = int32_to_float64(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float32 helper_fmac_FT(CPUSH4State *env, float32 t0, float32 t1, float32 t2)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_muladd(t0, t1, t2, 0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fmul_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_mul(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fmul_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_mul(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fsqrt_FT(CPUSH4State *env, float32 t0)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_sqrt(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fsqrt_DT(CPUSH4State *env, float64 t0)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_sqrt(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fsrra_FT(CPUSH4State *env, float32 t0)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ /* "Approximate" 1/sqrt(x) via actual computation. */
+ t0 = float32_sqrt(t0, &env->fp_status);
+ t0 = float32_div(float32_one, t0, &env->fp_status);
+ /*
+ * Since this is supposed to be an approximation, an imprecision
+ * exception is required. One supposes this also follows the usual
+ * IEEE rule that other exceptions take precedence.
+ */
+ if (get_float_exception_flags(&env->fp_status) == 0) {
+ set_float_exception_flags(float_flag_inexact, &env->fp_status);
+ }
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fsub_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_sub(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fsub_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_sub(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+uint32_t helper_ftrc_FT(CPUSH4State *env, float32 t0)
+{
+ uint32_t ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float32_to_int32_round_to_zero(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+uint32_t helper_ftrc_DT(CPUSH4State *env, float64 t0)
+{
+ uint32_t ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float64_to_int32_round_to_zero(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+void helper_fipr(CPUSH4State *env, uint32_t m, uint32_t n)
+{
+ int bank, i;
+ float32 r, p;
+
+ bank = (env->sr & FPSCR_FR) ? 16 : 0;
+ r = float32_zero;
+ set_float_exception_flags(0, &env->fp_status);
+
+ for (i = 0 ; i < 4 ; i++) {
+ p = float32_mul(env->fregs[bank + m + i],
+ env->fregs[bank + n + i],
+ &env->fp_status);
+ r = float32_add(r, p, &env->fp_status);
+ }
+ update_fpscr(env, GETPC());
+
+ env->fregs[bank + n + 3] = r;
+}
+
+void helper_ftrv(CPUSH4State *env, uint32_t n)
+{
+ int bank_matrix, bank_vector;
+ int i, j;
+ float32 r[4];
+ float32 p;
+
+ bank_matrix = (env->sr & FPSCR_FR) ? 0 : 16;
+ bank_vector = (env->sr & FPSCR_FR) ? 16 : 0;
+ set_float_exception_flags(0, &env->fp_status);
+ for (i = 0 ; i < 4 ; i++) {
+ r[i] = float32_zero;
+ for (j = 0 ; j < 4 ; j++) {
+ p = float32_mul(env->fregs[bank_matrix + 4 * j + i],
+ env->fregs[bank_vector + j],
+ &env->fp_status);
+ r[i] = float32_add(r[i], p, &env->fp_status);
+ }
+ }
+ update_fpscr(env, GETPC());
+
+ for (i = 0 ; i < 4 ; i++) {
+ env->fregs[bank_vector + i] = r[i];
+ }
+}
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
new file mode 100644
index 000000000..ce5d674a5
--- /dev/null
+++ b/target/sh4/translate.c
@@ -0,0 +1,2367 @@
+/*
+ * SH4 translation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DEBUG_DISAS
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+#include "qemu/qemu-print.h"
+
+
+typedef struct DisasContext {
+ DisasContextBase base;
+
+ uint32_t tbflags; /* should stay unmodified during the TB translation */
+ uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
+ int memidx;
+ int gbank;
+ int fbank;
+ uint32_t delayed_pc;
+ uint32_t features;
+
+ uint16_t opcode;
+
+ bool has_movcal;
+} DisasContext;
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(ctx) 1
+#else
+#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
+#endif
+
+/* Target-specific values for ctx->base.is_jmp. */
+/* We want to exit back to the cpu loop for some reason.
+ Usually this is to recognize interrupts immediately. */
+#define DISAS_STOP DISAS_TARGET_0
+
+/* global register indexes */
+static TCGv cpu_gregs[32];
+static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
+static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
+static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
+static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
+static TCGv cpu_lock_addr, cpu_lock_value;
+static TCGv cpu_fregs[32];
+
+/* internal register indexes */
+static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
+
+#include "exec/gen-icount.h"
+
+void sh4_translate_init(void)
+{
+ int i;
+ static const char * const gregnames[24] = {
+ "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
+ "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
+ "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
+ "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
+ "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
+ };
+ static const char * const fregnames[32] = {
+ "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
+ "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
+ "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
+ "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
+ "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
+ "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
+ "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
+ "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
+ };
+
+ for (i = 0; i < 24; i++) {
+ cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, gregs[i]),
+ gregnames[i]);
+ }
+ memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
+
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, pc), "PC");
+ cpu_sr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr), "SR");
+ cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr_m), "SR_M");
+ cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr_q), "SR_Q");
+ cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr_t), "SR_T");
+ cpu_ssr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, ssr), "SSR");
+ cpu_spc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, spc), "SPC");
+ cpu_gbr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, gbr), "GBR");
+ cpu_vbr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, vbr), "VBR");
+ cpu_sgr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sgr), "SGR");
+ cpu_dbr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, dbr), "DBR");
+ cpu_mach = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, mach), "MACH");
+ cpu_macl = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, macl), "MACL");
+ cpu_pr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, pr), "PR");
+ cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, fpscr), "FPSCR");
+ cpu_fpul = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, fpul), "FPUL");
+
+ cpu_flags = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, flags), "_flags_");
+ cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, delayed_pc),
+ "_delayed_pc_");
+ cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State,
+ delayed_cond),
+ "_delayed_cond_");
+ cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, lock_addr),
+ "_lock_addr_");
+ cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, lock_value),
+ "_lock_value_");
+
+ for (i = 0; i < 32; i++)
+ cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, fregs[i]),
+ fregnames[i]);
+}
+
+void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
+ env->pc, cpu_read_sr(env), env->pr, env->fpscr);
+ qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
+ env->spc, env->ssr, env->gbr, env->vbr);
+ qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
+ env->sgr, env->dbr, env->delayed_pc, env->fpul);
+ for (i = 0; i < 24; i += 4) {
+ qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
+ i, env->gregs[i], i + 1, env->gregs[i + 1],
+ i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
+ }
+ if (env->flags & DELAY_SLOT) {
+ qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
+ } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
+ qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
+ } else if (env->flags & DELAY_SLOT_RTE) {
+ qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
+ }
+}
+
+static void gen_read_sr(TCGv dst)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
+ tcg_gen_or_i32(dst, dst, t0);
+ tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
+ tcg_gen_or_i32(dst, dst, t0);
+ tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
+ tcg_gen_or_i32(dst, cpu_sr, t0);
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_write_sr(TCGv src)
+{
+ tcg_gen_andi_i32(cpu_sr, src,
+ ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
+ tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
+ tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
+ tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
+}
+
+static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
+{
+ if (save_pc) {
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
+ }
+ if (ctx->delayed_pc != (uint32_t) -1) {
+ tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
+ }
+ if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags);
+ }
+}
+
+static inline bool use_exit_tb(DisasContext *ctx)
+{
+ return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
+}
+
+static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (use_exit_tb(ctx)) {
+ return false;
+ }
+ return translator_use_goto_tb(&ctx->base, dest);
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb(ctx->base.tb, n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ if (use_exit_tb(ctx)) {
+ tcg_gen_exit_tb(NULL, 0);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_jump(DisasContext * ctx)
+{
+ if (ctx->delayed_pc == -1) {
+ /* Target is not statically known, it comes necessarily from a
+ delayed jump as immediate jump are conditinal jumps */
+ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
+ tcg_gen_discard_i32(cpu_delayed_pc);
+ if (use_exit_tb(ctx)) {
+ tcg_gen_exit_tb(NULL, 0);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+ } else {
+ gen_goto_tb(ctx, 0, ctx->delayed_pc);
+ }
+}
+
+/* Immediate conditional jump (bt or bf) */
+static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
+ bool jump_if_true)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
+
+ if (ctx->tbflags & GUSA_EXCLUSIVE) {
+ /* When in an exclusive region, we must continue to the end.
+ Therefore, exit the region on a taken branch, but otherwise
+ fall through to the next instruction. */
+ tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
+ /* Note that this won't actually use a goto_tb opcode because we
+ disallow it in use_goto_tb, but it handles exit + singlestep. */
+ gen_goto_tb(ctx, 0, dest);
+ gen_set_label(l1);
+ ctx->base.is_jmp = DISAS_NEXT;
+ return;
+ }
+
+ gen_save_cpu_state(ctx, false);
+ tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
+ gen_goto_tb(ctx, 0, dest);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+/* Delayed conditional jump (bt or bf) */
+static void gen_delayed_conditional_jump(DisasContext * ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv ds = tcg_temp_new();
+
+ tcg_gen_mov_i32(ds, cpu_delayed_cond);
+ tcg_gen_discard_i32(cpu_delayed_cond);
+
+ if (ctx->tbflags & GUSA_EXCLUSIVE) {
+ /* When in an exclusive region, we must continue to the end.
+ Therefore, exit the region on a taken branch, but otherwise
+ fall through to the next instruction. */
+ tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
+
+ /* Leave the gUSA region. */
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
+ gen_jump(ctx);
+
+ gen_set_label(l1);
+ ctx->base.is_jmp = DISAS_NEXT;
+ return;
+ }
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
+ gen_set_label(l1);
+ gen_jump(ctx);
+}
+
+static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ /* We have already signaled illegal instruction for odd Dr. */
+ tcg_debug_assert((reg & 1) == 0);
+ reg ^= ctx->fbank;
+ tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
+}
+
+static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ /* We have already signaled illegal instruction for odd Dr. */
+ tcg_debug_assert((reg & 1) == 0);
+ reg ^= ctx->fbank;
+ tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
+}
+
+#define B3_0 (ctx->opcode & 0xf)
+#define B6_4 ((ctx->opcode >> 4) & 0x7)
+#define B7_4 ((ctx->opcode >> 4) & 0xf)
+#define B7_0 (ctx->opcode & 0xff)
+#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
+#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
+ (ctx->opcode & 0xfff))
+#define B11_8 ((ctx->opcode >> 8) & 0xf)
+#define B15_12 ((ctx->opcode >> 12) & 0xf)
+
+#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
+#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
+#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
+
+#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
+
+#define CHECK_NOT_DELAY_SLOT \
+ if (ctx->envflags & DELAY_SLOT_MASK) { \
+ goto do_illegal_slot; \
+ }
+
+#define CHECK_PRIVILEGED \
+ if (IS_USER(ctx)) { \
+ goto do_illegal; \
+ }
+
+#define CHECK_FPU_ENABLED \
+ if (ctx->tbflags & (1u << SR_FD)) { \
+ goto do_fpu_disabled; \
+ }
+
+#define CHECK_FPSCR_PR_0 \
+ if (ctx->tbflags & FPSCR_PR) { \
+ goto do_illegal; \
+ }
+
+#define CHECK_FPSCR_PR_1 \
+ if (!(ctx->tbflags & FPSCR_PR)) { \
+ goto do_illegal; \
+ }
+
+#define CHECK_SH4A \
+ if (!(ctx->features & SH_FEATURE_SH4A)) { \
+ goto do_illegal; \
+ }
+
+static void _decode_opc(DisasContext * ctx)
+{
+ /* This code tries to make movcal emulation sufficiently
+ accurate for Linux purposes. This instruction writes
+ memory, and prior to that, always allocates a cache line.
+ It is used in two contexts:
+ - in memcpy, where data is copied in blocks, the first write
+ of to a block uses movca.l for performance.
+ - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
+ to flush the cache. Here, the data written by movcal.l is never
+ written to memory, and the data written is just bogus.
+
+ To simulate this, we simulate movcal.l, we store the value to memory,
+ but we also remember the previous content. If we see ocbi, we check
+ if movcal.l for that address was done previously. If so, the write should
+ not have hit the memory, so we restore the previous content.
+ When we see an instruction that is neither movca.l
+ nor ocbi, the previous content is discarded.
+
+ To optimize, we only try to flush stores when we're at the start of
+ TB, or if we already saw movca.l in this TB and did not flush stores
+ yet. */
+ if (ctx->has_movcal)
+ {
+ int opcode = ctx->opcode & 0xf0ff;
+ if (opcode != 0x0093 /* ocbi */
+ && opcode != 0x00c3 /* movca.l */)
+ {
+ gen_helper_discard_movcal_backup(cpu_env);
+ ctx->has_movcal = 0;
+ }
+ }
+
+#if 0
+ fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
+#endif
+
+ switch (ctx->opcode) {
+ case 0x0019: /* div0u */
+ tcg_gen_movi_i32(cpu_sr_m, 0);
+ tcg_gen_movi_i32(cpu_sr_q, 0);
+ tcg_gen_movi_i32(cpu_sr_t, 0);
+ return;
+ case 0x000b: /* rts */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
+ ctx->envflags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0028: /* clrmac */
+ tcg_gen_movi_i32(cpu_mach, 0);
+ tcg_gen_movi_i32(cpu_macl, 0);
+ return;
+ case 0x0048: /* clrs */
+ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
+ return;
+ case 0x0008: /* clrt */
+ tcg_gen_movi_i32(cpu_sr_t, 0);
+ return;
+ case 0x0038: /* ldtlb */
+ CHECK_PRIVILEGED
+ gen_helper_ldtlb(cpu_env);
+ return;
+ case 0x002b: /* rte */
+ CHECK_PRIVILEGED
+ CHECK_NOT_DELAY_SLOT
+ gen_write_sr(cpu_ssr);
+ tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
+ ctx->envflags |= DELAY_SLOT_RTE;
+ ctx->delayed_pc = (uint32_t) - 1;
+ ctx->base.is_jmp = DISAS_STOP;
+ return;
+ case 0x0058: /* sets */
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
+ return;
+ case 0x0018: /* sett */
+ tcg_gen_movi_i32(cpu_sr_t, 1);
+ return;
+ case 0xfbfd: /* frchg */
+ CHECK_FPSCR_PR_0
+ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
+ ctx->base.is_jmp = DISAS_STOP;
+ return;
+ case 0xf3fd: /* fschg */
+ CHECK_FPSCR_PR_0
+ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
+ ctx->base.is_jmp = DISAS_STOP;
+ return;
+ case 0xf7fd: /* fpchg */
+ CHECK_SH4A
+ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
+ ctx->base.is_jmp = DISAS_STOP;
+ return;
+ case 0x0009: /* nop */
+ return;
+ case 0x001b: /* sleep */
+ CHECK_PRIVILEGED
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
+ gen_helper_sleep(cpu_env);
+ return;
+ }
+
+ switch (ctx->opcode & 0xf000) {
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xe000: /* mov #imm,Rn */
+#ifdef CONFIG_USER_ONLY
+ /* Detect the start of a gUSA region. If so, update envflags
+ and end the TB. This will allow us to see the end of the
+ region (stored in R0) in the next TB. */
+ if (B11_8 == 15 && B7_0s < 0 &&
+ (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
+ ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
+ ctx->base.is_jmp = DISAS_STOP;
+ }
+#endif
+ tcg_gen_movi_i32(REG(B11_8), B7_0s);
+ return;
+ case 0x9000: /* mov.w @(disp,PC),Rn */
+ {
+ TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xd000: /* mov.l @(disp,PC),Rn */
+ {
+ TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x7000: /* add #imm,Rn */
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
+ return;
+ case 0xa000: /* bra disp */
+ CHECK_NOT_DELAY_SLOT
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
+ ctx->envflags |= DELAY_SLOT;
+ return;
+ case 0xb000: /* bsr disp */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
+ ctx->envflags |= DELAY_SLOT;
+ return;
+ }
+
+ switch (ctx->opcode & 0xf00f) {
+ case 0x6003: /* mov Rm,Rn */
+ tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x2000: /* mov.b Rm,@Rn */
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
+ return;
+ case 0x2001: /* mov.w Rm,@Rn */
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
+ return;
+ case 0x2002: /* mov.l Rm,@Rn */
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
+ return;
+ case 0x6000: /* mov.b @Rm,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
+ return;
+ case 0x6001: /* mov.w @Rm,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ return;
+ case 0x6002: /* mov.l @Rm,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ return;
+ case 0x2004: /* mov.b Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 1);
+ /* might cause re-execution */
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
+ tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x2005: /* mov.w Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 2);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x2006: /* mov.l Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x6004: /* mov.b @Rm+,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
+ return;
+ case 0x6005: /* mov.w @Rm+,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
+ return;
+ case 0x6006: /* mov.l @Rm+,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ return;
+ case 0x0004: /* mov.b Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x0005: /* mov.w Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x0006: /* mov.l Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x000c: /* mov.b @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x000d: /* mov.w @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x000e: /* mov.l @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x6008: /* swap.b Rm,Rn */
+ {
+ TCGv low = tcg_temp_new();
+ tcg_gen_bswap16_i32(low, REG(B7_4), 0);
+ tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
+ tcg_temp_free(low);
+ }
+ return;
+ case 0x6009: /* swap.w Rm,Rn */
+ tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
+ return;
+ case 0x200d: /* xtrct Rm,Rn */
+ {
+ TCGv high, low;
+ high = tcg_temp_new();
+ tcg_gen_shli_i32(high, REG(B7_4), 16);
+ low = tcg_temp_new();
+ tcg_gen_shri_i32(low, REG(B11_8), 16);
+ tcg_gen_or_i32(REG(B11_8), high, low);
+ tcg_temp_free(low);
+ tcg_temp_free(high);
+ }
+ return;
+ case 0x300c: /* add Rm,Rn */
+ tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x300e: /* addc Rm,Rn */
+ {
+ TCGv t0, t1;
+ t0 = tcg_const_tl(0);
+ t1 = tcg_temp_new();
+ tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
+ tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
+ REG(B11_8), t0, t1, cpu_sr_t);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ return;
+ case 0x300f: /* addv Rm,Rn */
+ {
+ TCGv t0, t1, t2;
+ t0 = tcg_temp_new();
+ tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
+ t1 = tcg_temp_new();
+ tcg_gen_xor_i32(t1, t0, REG(B11_8));
+ t2 = tcg_temp_new();
+ tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
+ tcg_gen_andc_i32(cpu_sr_t, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
+ tcg_temp_free(t1);
+ tcg_gen_mov_i32(REG(B7_4), t0);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x2009: /* and Rm,Rn */
+ tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x3000: /* cmp/eq Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3003: /* cmp/ge Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3007: /* cmp/gt Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3006: /* cmp/hi Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3002: /* cmp/hs Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x200c: /* cmp/str Rm,Rn */
+ {
+ TCGv cmp1 = tcg_temp_new();
+ TCGv cmp2 = tcg_temp_new();
+ tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
+ tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
+ tcg_gen_andc_i32(cmp1, cmp1, cmp2);
+ tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
+ tcg_temp_free(cmp2);
+ tcg_temp_free(cmp1);
+ }
+ return;
+ case 0x2007: /* div0s Rm,Rn */
+ tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
+ tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
+ tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
+ return;
+ case 0x3004: /* div1 Rm,Rn */
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv zero = tcg_const_i32(0);
+
+ /* shift left arg1, saving the bit being pushed out and inserting
+ T on the right */
+ tcg_gen_shri_i32(t0, REG(B11_8), 31);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
+
+ /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
+ using 64-bit temps, we compute arg0's high part from q ^ m, so
+ that it is 0x00000000 when adding the value or 0xffffffff when
+ subtracting it. */
+ tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
+ tcg_gen_subi_i32(t1, t1, 1);
+ tcg_gen_neg_i32(t2, REG(B7_4));
+ tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
+ tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
+
+ /* compute T and Q depending on carry */
+ tcg_gen_andi_i32(t1, t1, 1);
+ tcg_gen_xor_i32(t1, t1, t0);
+ tcg_gen_xori_i32(cpu_sr_t, t1, 1);
+ tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
+
+ tcg_temp_free(zero);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x300d: /* dmuls.l Rm,Rn */
+ tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
+ return;
+ case 0x3005: /* dmulu.l Rm,Rn */
+ tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
+ return;
+ case 0x600e: /* exts.b Rm,Rn */
+ tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600f: /* exts.w Rm,Rn */
+ tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600c: /* extu.b Rm,Rn */
+ tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600d: /* extu.w Rm,Rn */
+ tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x000f: /* mac.l @Rm+,@Rn+ */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
+ arg1 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
+ gen_helper_macl(cpu_env, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ }
+ return;
+ case 0x400f: /* mac.w @Rm+,@Rn+ */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
+ arg1 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
+ gen_helper_macw(cpu_env, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
+ }
+ return;
+ case 0x0007: /* mul.l Rm,Rn */
+ tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
+ return;
+ case 0x200f: /* muls.w Rm,Rn */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_ext16s_i32(arg0, REG(B7_4));
+ arg1 = tcg_temp_new();
+ tcg_gen_ext16s_i32(arg1, REG(B11_8));
+ tcg_gen_mul_i32(cpu_macl, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ }
+ return;
+ case 0x200e: /* mulu.w Rm,Rn */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_ext16u_i32(arg0, REG(B7_4));
+ arg1 = tcg_temp_new();
+ tcg_gen_ext16u_i32(arg1, REG(B11_8));
+ tcg_gen_mul_i32(cpu_macl, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ }
+ return;
+ case 0x600b: /* neg Rm,Rn */
+ tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600a: /* negc Rm,Rn */
+ {
+ TCGv t0 = tcg_const_i32(0);
+ tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
+ REG(B7_4), t0, cpu_sr_t, t0);
+ tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
+ t0, t0, REG(B11_8), cpu_sr_t);
+ tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x6007: /* not Rm,Rn */
+ tcg_gen_not_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x200b: /* or Rm,Rn */
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x400c: /* shad Rm,Rn */
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
+
+ /* positive case: shift to the left */
+ tcg_gen_shl_i32(t1, REG(B11_8), t0);
+
+ /* negative case: shift to the right in two steps to
+ correctly handle the -32 case */
+ tcg_gen_xori_i32(t0, t0, 0x1f);
+ tcg_gen_sar_i32(t2, REG(B11_8), t0);
+ tcg_gen_sari_i32(t2, t2, 1);
+
+ /* select between the two cases */
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ return;
+ case 0x400d: /* shld Rm,Rn */
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
+
+ /* positive case: shift to the left */
+ tcg_gen_shl_i32(t1, REG(B11_8), t0);
+
+ /* negative case: shift to the right in two steps to
+ correctly handle the -32 case */
+ tcg_gen_xori_i32(t0, t0, 0x1f);
+ tcg_gen_shr_i32(t2, REG(B11_8), t0);
+ tcg_gen_shri_i32(t2, t2, 1);
+
+ /* select between the two cases */
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ return;
+ case 0x3008: /* sub Rm,Rn */
+ tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x300a: /* subc Rm,Rn */
+ {
+ TCGv t0, t1;
+ t0 = tcg_const_tl(0);
+ t1 = tcg_temp_new();
+ tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
+ tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
+ REG(B11_8), t0, t1, cpu_sr_t);
+ tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ return;
+ case 0x300b: /* subv Rm,Rn */
+ {
+ TCGv t0, t1, t2;
+ t0 = tcg_temp_new();
+ tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
+ t1 = tcg_temp_new();
+ tcg_gen_xor_i32(t1, t0, REG(B7_4));
+ t2 = tcg_temp_new();
+ tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
+ tcg_gen_and_i32(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_shri_i32(cpu_sr_t, t1, 31);
+ tcg_temp_free(t1);
+ tcg_gen_mov_i32(REG(B11_8), t0);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x2008: /* tst Rm,Rn */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0x200a: /* xor Rm,Rn */
+ tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_SZ) {
+ int xsrc = XHACK(B7_4);
+ int xdst = XHACK(B11_8);
+ tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
+ tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
+ } else {
+ tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
+ }
+ return;
+ case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp, XHACK(B7_4));
+ tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
+ tcg_temp_free_i64(fp);
+ } else {
+ tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
+ }
+ return;
+ case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
+ gen_store_fpr64(ctx, fp, XHACK(B11_8));
+ tcg_temp_free_i64(fp);
+ } else {
+ tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
+ }
+ return;
+ case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
+ gen_store_fpr64(ctx, fp, XHACK(B11_8));
+ tcg_temp_free_i64(fp);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
+ } else {
+ tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ }
+ return;
+ case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new_i32();
+ if (ctx->tbflags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp, XHACK(B7_4));
+ tcg_gen_subi_i32(addr, REG(B11_8), 8);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_temp_free_i64(fp);
+ } else {
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
+ }
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ if (ctx->tbflags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
+ gen_store_fpr64(ctx, fp, XHACK(B11_8));
+ tcg_temp_free_i64(fp);
+ } else {
+ tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
+ }
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ if (ctx->tbflags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp, XHACK(B7_4));
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_temp_free_i64(fp);
+ } else {
+ tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
+ }
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
+ case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
+ {
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_PR) {
+ TCGv_i64 fp0, fp1;
+
+ if (ctx->opcode & 0x0110) {
+ goto do_illegal;
+ }
+ fp0 = tcg_temp_new_i64();
+ fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, B11_8);
+ gen_load_fpr64(ctx, fp1, B7_4);
+ switch (ctx->opcode & 0xf00f) {
+ case 0xf000: /* fadd Rm,Rn */
+ gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf001: /* fsub Rm,Rn */
+ gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf002: /* fmul Rm,Rn */
+ gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf003: /* fdiv Rm,Rn */
+ gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf004: /* fcmp/eq Rm,Rn */
+ gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
+ return;
+ case 0xf005: /* fcmp/gt Rm,Rn */
+ gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
+ return;
+ }
+ gen_store_fpr64(ctx, fp0, B11_8);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ } else {
+ switch (ctx->opcode & 0xf00f) {
+ case 0xf000: /* fadd Rm,Rn */
+ gen_helper_fadd_FT(FREG(B11_8), cpu_env,
+ FREG(B11_8), FREG(B7_4));
+ break;
+ case 0xf001: /* fsub Rm,Rn */
+ gen_helper_fsub_FT(FREG(B11_8), cpu_env,
+ FREG(B11_8), FREG(B7_4));
+ break;
+ case 0xf002: /* fmul Rm,Rn */
+ gen_helper_fmul_FT(FREG(B11_8), cpu_env,
+ FREG(B11_8), FREG(B7_4));
+ break;
+ case 0xf003: /* fdiv Rm,Rn */
+ gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
+ FREG(B11_8), FREG(B7_4));
+ break;
+ case 0xf004: /* fcmp/eq Rm,Rn */
+ gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
+ FREG(B11_8), FREG(B7_4));
+ return;
+ case 0xf005: /* fcmp/gt Rm,Rn */
+ gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
+ FREG(B11_8), FREG(B7_4));
+ return;
+ }
+ }
+ }
+ return;
+ case 0xf00e: /* fmac FR0,RM,Rn */
+ CHECK_FPU_ENABLED
+ CHECK_FPSCR_PR_0
+ gen_helper_fmac_FT(FREG(B11_8), cpu_env,
+ FREG(0), FREG(B7_4), FREG(B11_8));
+ return;
+ }
+
+ switch (ctx->opcode & 0xff00) {
+ case 0xc900: /* and #imm,R0 */
+ tcg_gen_andi_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xcd00: /* and.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_andi_i32(val, val, B7_0);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8b00: /* bf label */
+ CHECK_NOT_DELAY_SLOT
+ gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
+ return;
+ case 0x8f00: /* bf/s label */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
+ ctx->envflags |= DELAY_SLOT_CONDITIONAL;
+ return;
+ case 0x8900: /* bt label */
+ CHECK_NOT_DELAY_SLOT
+ gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
+ return;
+ case 0x8d00: /* bt/s label */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
+ ctx->envflags |= DELAY_SLOT_CONDITIONAL;
+ return;
+ case 0x8800: /* cmp/eq #imm,R0 */
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
+ return;
+ case 0xc400: /* mov.b @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc500: /* mov.w @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc600: /* mov.l @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc000: /* mov.b R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc100: /* mov.w R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc200: /* mov.l R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8000: /* mov.b R0,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8100: /* mov.w R0,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8400: /* mov.b @(disp,Rn),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8500: /* mov.w @(disp,Rn),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc700: /* mova @(disp,PC),R0 */
+ tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
+ 4 + B7_0 * 4) & ~3);
+ return;
+ case 0xcb00: /* or #imm,R0 */
+ tcg_gen_ori_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xcf00: /* or.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_ori_i32(val, val, B7_0);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc300: /* trapa #imm */
+ {
+ TCGv imm;
+ CHECK_NOT_DELAY_SLOT
+ gen_save_cpu_state(ctx, true);
+ imm = tcg_const_i32(B7_0);
+ gen_helper_trapa(cpu_env, imm);
+ tcg_temp_free(imm);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
+ return;
+ case 0xc800: /* tst #imm,R0 */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_andi_i32(val, REG(0), B7_0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0xcc00: /* tst.b #imm,@(R0,GBR) */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_add_i32(val, REG(0), cpu_gbr);
+ tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
+ tcg_gen_andi_i32(val, val, B7_0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0xca00: /* xor #imm,R0 */
+ tcg_gen_xori_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xce00: /* xor.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_xori_i32(val, val, B7_0);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ }
+
+ switch (ctx->opcode & 0xf08f) {
+ case 0x408e: /* ldc Rm,Rn_BANK */
+ CHECK_PRIVILEGED
+ tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
+ return;
+ case 0x4087: /* ldc.l @Rm+,Rn_BANK */
+ CHECK_PRIVILEGED
+ tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ return;
+ case 0x0082: /* stc Rm_BANK,Rn */
+ CHECK_PRIVILEGED
+ tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
+ return;
+ case 0x4083: /* stc.l Rm_BANK,@-Rn */
+ CHECK_PRIVILEGED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ }
+ return;
+ }
+
+ switch (ctx->opcode & 0xf0ff) {
+ case 0x0023: /* braf Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
+ ctx->envflags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0003: /* bsrf Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
+ tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
+ ctx->envflags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x4015: /* cmp/pl Rn */
+ tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x4011: /* cmp/pz Rn */
+ tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x4010: /* dt Rn */
+ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x402b: /* jmp @Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
+ ctx->envflags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x400b: /* jsr @Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
+ tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
+ ctx->envflags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x400e: /* ldc Rm,SR */
+ CHECK_PRIVILEGED
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
+ gen_write_sr(val);
+ tcg_temp_free(val);
+ ctx->base.is_jmp = DISAS_STOP;
+ }
+ return;
+ case 0x4007: /* ldc.l @Rm+,SR */
+ CHECK_PRIVILEGED
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_andi_i32(val, val, 0x700083f3);
+ gen_write_sr(val);
+ tcg_temp_free(val);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ ctx->base.is_jmp = DISAS_STOP;
+ }
+ return;
+ case 0x0002: /* stc SR,Rn */
+ CHECK_PRIVILEGED
+ gen_read_sr(REG(B11_8));
+ return;
+ case 0x4003: /* stc SR,@-Rn */
+ CHECK_PRIVILEGED
+ {
+ TCGv addr = tcg_temp_new();
+ TCGv val = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ gen_read_sr(val);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+#define LD(reg,ldnum,ldpnum,prechk) \
+ case ldnum: \
+ prechk \
+ tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
+ return; \
+ case ldpnum: \
+ prechk \
+ tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
+ return;
+#define ST(reg,stnum,stpnum,prechk) \
+ case stnum: \
+ prechk \
+ tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
+ return; \
+ case stpnum: \
+ prechk \
+ { \
+ TCGv addr = tcg_temp_new(); \
+ tcg_gen_subi_i32(addr, REG(B11_8), 4); \
+ tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
+ tcg_gen_mov_i32(REG(B11_8), addr); \
+ tcg_temp_free(addr); \
+ } \
+ return;
+#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
+ LD(reg,ldnum,ldpnum,prechk) \
+ ST(reg,stnum,stpnum,prechk)
+ LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
+ LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
+ LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
+ LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
+ ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
+ LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
+ LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
+ LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
+ LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
+ LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
+ LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
+ case 0x406a: /* lds Rm,FPSCR */
+ CHECK_FPU_ENABLED
+ gen_helper_ld_fpscr(cpu_env, REG(B11_8));
+ ctx->base.is_jmp = DISAS_STOP;
+ return;
+ case 0x4066: /* lds.l @Rm+,FPSCR */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ gen_helper_ld_fpscr(cpu_env, addr);
+ tcg_temp_free(addr);
+ ctx->base.is_jmp = DISAS_STOP;
+ }
+ return;
+ case 0x006a: /* sts FPSCR,Rn */
+ CHECK_FPU_ENABLED
+ tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
+ return;
+ case 0x4062: /* sts FPSCR,@-Rn */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr, val;
+ val = tcg_temp_new();
+ tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
+ addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0x00c3: /* movca.l R0,@Rm */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
+ gen_helper_movcal(cpu_env, REG(B11_8), val);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_temp_free(val);
+ }
+ ctx->has_movcal = 1;
+ return;
+ case 0x40a9: /* movua.l @Rm,R0 */
+ CHECK_SH4A
+ /* Load non-boundary-aligned data */
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
+ MO_TEUL | MO_UNALN);
+ return;
+ case 0x40e9: /* movua.l @Rm+,R0 */
+ CHECK_SH4A
+ /* Load non-boundary-aligned data */
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
+ MO_TEUL | MO_UNALN);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ return;
+ case 0x0029: /* movt Rn */
+ tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
+ return;
+ case 0x0073:
+ /* MOVCO.L
+ * LDST -> T
+ * If (T == 1) R0 -> (Rn)
+ * 0 -> LDST
+ *
+ * The above description doesn't work in a parallel context.
+ * Since we currently support no smp boards, this implies user-mode.
+ * But we can still support the official mechanism while user-mode
+ * is single-threaded. */
+ CHECK_SH4A
+ {
+ TCGLabel *fail = gen_new_label();
+ TCGLabel *done = gen_new_label();
+
+ if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
+ TCGv tmp;
+
+ tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
+ cpu_lock_addr, fail);
+ tmp = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
+ REG(0), ctx->memidx, MO_TEUL);
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_movi_i32(cpu_sr_t, 1);
+ }
+ tcg_gen_br(done);
+
+ gen_set_label(fail);
+ tcg_gen_movi_i32(cpu_sr_t, 0);
+
+ gen_set_label(done);
+ tcg_gen_movi_i32(cpu_lock_addr, -1);
+ }
+ return;
+ case 0x0063:
+ /* MOVLI.L @Rm,R0
+ * 1 -> LDST
+ * (Rm) -> R0
+ * When interrupt/exception
+ * occurred 0 -> LDST
+ *
+ * In a parallel context, we must also save the loaded value
+ * for use with the cmpxchg that we'll use with movco.l. */
+ CHECK_SH4A
+ if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, REG(B11_8));
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_mov_i32(cpu_lock_value, REG(0));
+ tcg_gen_mov_i32(cpu_lock_addr, tmp);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_movi_i32(cpu_lock_addr, 0);
+ }
+ return;
+ case 0x0093: /* ocbi @Rn */
+ {
+ gen_helper_ocbi(cpu_env, REG(B11_8));
+ }
+ return;
+ case 0x00a3: /* ocbp @Rn */
+ case 0x00b3: /* ocbwb @Rn */
+ /* These instructions are supposed to do nothing in case of
+ a cache miss. Given that we only partially emulate caches
+ it is safe to simply ignore them. */
+ return;
+ case 0x0083: /* pref @Rn */
+ return;
+ case 0x00d3: /* prefi @Rn */
+ CHECK_SH4A
+ return;
+ case 0x00e3: /* icbi @Rn */
+ CHECK_SH4A
+ return;
+ case 0x00ab: /* synco */
+ CHECK_SH4A
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ return;
+ case 0x4024: /* rotcl Rn */
+ {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, cpu_sr_t);
+ tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
+ tcg_temp_free(tmp);
+ }
+ return;
+ case 0x4025: /* rotcr Rn */
+ {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
+ tcg_temp_free(tmp);
+ }
+ return;
+ case 0x4004: /* rotl Rn */
+ tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x4005: /* rotr Rn */
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
+ tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4000: /* shll Rn */
+ case 0x4020: /* shal Rn */
+ tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4021: /* shar Rn */
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
+ tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4001: /* shlr Rn */
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4008: /* shll2 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
+ return;
+ case 0x4018: /* shll8 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
+ return;
+ case 0x4028: /* shll16 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
+ return;
+ case 0x4009: /* shlr2 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
+ return;
+ case 0x4019: /* shlr8 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
+ return;
+ case 0x4029: /* shlr16 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
+ return;
+ case 0x401b: /* tas.b @Rn */
+ {
+ TCGv val = tcg_const_i32(0x80);
+ tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
+ ctx->memidx, MO_UB);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
+ return;
+ case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
+ return;
+ case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_PR) {
+ TCGv_i64 fp;
+ if (ctx->opcode & 0x0100) {
+ goto do_illegal;
+ }
+ fp = tcg_temp_new_i64();
+ gen_helper_float_DT(fp, cpu_env, cpu_fpul);
+ gen_store_fpr64(ctx, fp, B11_8);
+ tcg_temp_free_i64(fp);
+ }
+ else {
+ gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
+ }
+ return;
+ case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_PR) {
+ TCGv_i64 fp;
+ if (ctx->opcode & 0x0100) {
+ goto do_illegal;
+ }
+ fp = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp, B11_8);
+ gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
+ tcg_temp_free_i64(fp);
+ }
+ else {
+ gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
+ }
+ return;
+ case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
+ return;
+ case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
+ CHECK_FPU_ENABLED
+ tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
+ return;
+ case 0xf06d: /* fsqrt FRn */
+ CHECK_FPU_ENABLED
+ if (ctx->tbflags & FPSCR_PR) {
+ if (ctx->opcode & 0x0100) {
+ goto do_illegal;
+ }
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp, B11_8);
+ gen_helper_fsqrt_DT(fp, cpu_env, fp);
+ gen_store_fpr64(ctx, fp, B11_8);
+ tcg_temp_free_i64(fp);
+ } else {
+ gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
+ }
+ return;
+ case 0xf07d: /* fsrra FRn */
+ CHECK_FPU_ENABLED
+ CHECK_FPSCR_PR_0
+ gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
+ break;
+ case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
+ CHECK_FPU_ENABLED
+ CHECK_FPSCR_PR_0
+ tcg_gen_movi_i32(FREG(B11_8), 0);
+ return;
+ case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
+ CHECK_FPU_ENABLED
+ CHECK_FPSCR_PR_0
+ tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
+ return;
+ case 0xf0ad: /* fcnvsd FPUL,DRn */
+ CHECK_FPU_ENABLED
+ {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
+ gen_store_fpr64(ctx, fp, B11_8);
+ tcg_temp_free_i64(fp);
+ }
+ return;
+ case 0xf0bd: /* fcnvds DRn,FPUL */
+ CHECK_FPU_ENABLED
+ {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp, B11_8);
+ gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
+ tcg_temp_free_i64(fp);
+ }
+ return;
+ case 0xf0ed: /* fipr FVm,FVn */
+ CHECK_FPU_ENABLED
+ CHECK_FPSCR_PR_1
+ {
+ TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
+ TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ gen_helper_fipr(cpu_env, m, n);
+ tcg_temp_free(m);
+ tcg_temp_free(n);
+ return;
+ }
+ break;
+ case 0xf0fd: /* ftrv XMTRX,FVn */
+ CHECK_FPU_ENABLED
+ CHECK_FPSCR_PR_1
+ {
+ if ((ctx->opcode & 0x0300) != 0x0100) {
+ goto do_illegal;
+ }
+ TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ gen_helper_ftrv(cpu_env, n);
+ tcg_temp_free(n);
+ return;
+ }
+ break;
+ }
+#if 0
+ fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
+ ctx->opcode, ctx->base.pc_next);
+ fflush(stderr);
+#endif
+ do_illegal:
+ if (ctx->envflags & DELAY_SLOT_MASK) {
+ do_illegal_slot:
+ gen_save_cpu_state(ctx, true);
+ gen_helper_raise_slot_illegal_instruction(cpu_env);
+ } else {
+ gen_save_cpu_state(ctx, true);
+ gen_helper_raise_illegal_instruction(cpu_env);
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return;
+
+ do_fpu_disabled:
+ gen_save_cpu_state(ctx, true);
+ if (ctx->envflags & DELAY_SLOT_MASK) {
+ gen_helper_raise_slot_fpu_disable(cpu_env);
+ } else {
+ gen_helper_raise_fpu_disable(cpu_env);
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return;
+}
+
+static void decode_opc(DisasContext * ctx)
+{
+ uint32_t old_flags = ctx->envflags;
+
+ _decode_opc(ctx);
+
+ if (old_flags & DELAY_SLOT_MASK) {
+ /* go out of the delay slot */
+ ctx->envflags &= ~DELAY_SLOT_MASK;
+
+ /* When in an exclusive region, we must continue to the end
+ for conditional branches. */
+ if (ctx->tbflags & GUSA_EXCLUSIVE
+ && old_flags & DELAY_SLOT_CONDITIONAL) {
+ gen_delayed_conditional_jump(ctx);
+ return;
+ }
+ /* Otherwise this is probably an invalid gUSA region.
+ Drop the GUSA bits so the next TB doesn't see them. */
+ ctx->envflags &= ~GUSA_MASK;
+
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags);
+ if (old_flags & DELAY_SLOT_CONDITIONAL) {
+ gen_delayed_conditional_jump(ctx);
+ } else {
+ gen_jump(ctx);
+ }
+ }
+}
+
+#ifdef CONFIG_USER_ONLY
+/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
+ Upon an interrupt, a real kernel would simply notice magic values in
+ the registers and reset the PC to the start of the sequence.
+
+ For QEMU, we cannot do this in quite the same way. Instead, we notice
+ the normal start of such a sequence (mov #-x,r15). While we can handle
+ any sequence via cpu_exec_step_atomic, we can recognize the "normal"
+ sequences and transform them into atomic operations as seen by the host.
+*/
+static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
+{
+ uint16_t insns[5];
+ int ld_adr, ld_dst, ld_mop;
+ int op_dst, op_src, op_opc;
+ int mv_src, mt_dst, st_src, st_mop;
+ TCGv op_arg;
+ uint32_t pc = ctx->base.pc_next;
+ uint32_t pc_end = ctx->base.tb->cs_base;
+ int max_insns = (pc_end - pc) / 2;
+ int i;
+
+ /* The state machine below will consume only a few insns.
+ If there are more than that in a region, fail now. */
+ if (max_insns > ARRAY_SIZE(insns)) {
+ goto fail;
+ }
+
+ /* Read all of the insns for the region. */
+ for (i = 0; i < max_insns; ++i) {
+ insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
+ }
+
+ ld_adr = ld_dst = ld_mop = -1;
+ mv_src = -1;
+ op_dst = op_src = op_opc = -1;
+ mt_dst = -1;
+ st_src = st_mop = -1;
+ op_arg = NULL;
+ i = 0;
+
+#define NEXT_INSN \
+ do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
+
+ /*
+ * Expect a load to begin the region.
+ */
+ NEXT_INSN;
+ switch (ctx->opcode & 0xf00f) {
+ case 0x6000: /* mov.b @Rm,Rn */
+ ld_mop = MO_SB;
+ break;
+ case 0x6001: /* mov.w @Rm,Rn */
+ ld_mop = MO_TESW;
+ break;
+ case 0x6002: /* mov.l @Rm,Rn */
+ ld_mop = MO_TESL;
+ break;
+ default:
+ goto fail;
+ }
+ ld_adr = B7_4;
+ ld_dst = B11_8;
+ if (ld_adr == ld_dst) {
+ goto fail;
+ }
+ /* Unless we see a mov, any two-operand operation must use ld_dst. */
+ op_dst = ld_dst;
+
+ /*
+ * Expect an optional register move.
+ */
+ NEXT_INSN;
+ switch (ctx->opcode & 0xf00f) {
+ case 0x6003: /* mov Rm,Rn */
+ /*
+ * Here we want to recognize ld_dst being saved for later consumption,
+ * or for another input register being copied so that ld_dst need not
+ * be clobbered during the operation.
+ */
+ op_dst = B11_8;
+ mv_src = B7_4;
+ if (op_dst == ld_dst) {
+ /* Overwriting the load output. */
+ goto fail;
+ }
+ if (mv_src != ld_dst) {
+ /* Copying a new input; constrain op_src to match the load. */
+ op_src = ld_dst;
+ }
+ break;
+
+ default:
+ /* Put back and re-examine as operation. */
+ --i;
+ }
+
+ /*
+ * Expect the operation.
+ */
+ NEXT_INSN;
+ switch (ctx->opcode & 0xf00f) {
+ case 0x300c: /* add Rm,Rn */
+ op_opc = INDEX_op_add_i32;
+ goto do_reg_op;
+ case 0x2009: /* and Rm,Rn */
+ op_opc = INDEX_op_and_i32;
+ goto do_reg_op;
+ case 0x200a: /* xor Rm,Rn */
+ op_opc = INDEX_op_xor_i32;
+ goto do_reg_op;
+ case 0x200b: /* or Rm,Rn */
+ op_opc = INDEX_op_or_i32;
+ do_reg_op:
+ /* The operation register should be as expected, and the
+ other input cannot depend on the load. */
+ if (op_dst != B11_8) {
+ goto fail;
+ }
+ if (op_src < 0) {
+ /* Unconstrainted input. */
+ op_src = B7_4;
+ } else if (op_src == B7_4) {
+ /* Constrained input matched load. All operations are
+ commutative; "swap" them by "moving" the load output
+ to the (implicit) first argument and the move source
+ to the (explicit) second argument. */
+ op_src = mv_src;
+ } else {
+ goto fail;
+ }
+ op_arg = REG(op_src);
+ break;
+
+ case 0x6007: /* not Rm,Rn */
+ if (ld_dst != B7_4 || mv_src >= 0) {
+ goto fail;
+ }
+ op_dst = B11_8;
+ op_opc = INDEX_op_xor_i32;
+ op_arg = tcg_const_i32(-1);
+ break;
+
+ case 0x7000 ... 0x700f: /* add #imm,Rn */
+ if (op_dst != B11_8 || mv_src >= 0) {
+ goto fail;
+ }
+ op_opc = INDEX_op_add_i32;
+ op_arg = tcg_const_i32(B7_0s);
+ break;
+
+ case 0x3000: /* cmp/eq Rm,Rn */
+ /* Looking for the middle of a compare-and-swap sequence,
+ beginning with the compare. Operands can be either order,
+ but with only one overlapping the load. */
+ if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
+ goto fail;
+ }
+ op_opc = INDEX_op_setcond_i32; /* placeholder */
+ op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
+ op_arg = REG(op_src);
+
+ NEXT_INSN;
+ switch (ctx->opcode & 0xff00) {
+ case 0x8b00: /* bf label */
+ case 0x8f00: /* bf/s label */
+ if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
+ goto fail;
+ }
+ if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
+ break;
+ }
+ /* We're looking to unconditionally modify Rn with the
+ result of the comparison, within the delay slot of
+ the branch. This is used by older gcc. */
+ NEXT_INSN;
+ if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
+ mt_dst = B11_8;
+ } else {
+ goto fail;
+ }
+ break;
+
+ default:
+ goto fail;
+ }
+ break;
+
+ case 0x2008: /* tst Rm,Rn */
+ /* Looking for a compare-and-swap against zero. */
+ if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
+ goto fail;
+ }
+ op_opc = INDEX_op_setcond_i32;
+ op_arg = tcg_const_i32(0);
+
+ NEXT_INSN;
+ if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
+ || pc + (i + 1 + B7_0s) * 2 != pc_end) {
+ goto fail;
+ }
+ break;
+
+ default:
+ /* Put back and re-examine as store. */
+ --i;
+ }
+
+ /*
+ * Expect the store.
+ */
+ /* The store must be the last insn. */
+ if (i != max_insns - 1) {
+ goto fail;
+ }
+ NEXT_INSN;
+ switch (ctx->opcode & 0xf00f) {
+ case 0x2000: /* mov.b Rm,@Rn */
+ st_mop = MO_UB;
+ break;
+ case 0x2001: /* mov.w Rm,@Rn */
+ st_mop = MO_UW;
+ break;
+ case 0x2002: /* mov.l Rm,@Rn */
+ st_mop = MO_UL;
+ break;
+ default:
+ goto fail;
+ }
+ /* The store must match the load. */
+ if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
+ goto fail;
+ }
+ st_src = B7_4;
+
+#undef NEXT_INSN
+
+ /*
+ * Emit the operation.
+ */
+ switch (op_opc) {
+ case -1:
+ /* No operation found. Look for exchange pattern. */
+ if (st_src == ld_dst || mv_src >= 0) {
+ goto fail;
+ }
+ tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
+ ctx->memidx, ld_mop);
+ break;
+
+ case INDEX_op_add_i32:
+ if (op_dst != st_src) {
+ goto fail;
+ }
+ if (op_dst == ld_dst && st_mop == MO_UL) {
+ tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ } else {
+ tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ if (op_dst != ld_dst) {
+ /* Note that mop sizes < 4 cannot use add_fetch
+ because it won't carry into the higher bits. */
+ tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
+ }
+ }
+ break;
+
+ case INDEX_op_and_i32:
+ if (op_dst != st_src) {
+ goto fail;
+ }
+ if (op_dst == ld_dst) {
+ tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ } else {
+ tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
+ }
+ break;
+
+ case INDEX_op_or_i32:
+ if (op_dst != st_src) {
+ goto fail;
+ }
+ if (op_dst == ld_dst) {
+ tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ } else {
+ tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
+ }
+ break;
+
+ case INDEX_op_xor_i32:
+ if (op_dst != st_src) {
+ goto fail;
+ }
+ if (op_dst == ld_dst) {
+ tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ } else {
+ tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
+ op_arg, ctx->memidx, ld_mop);
+ tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
+ }
+ break;
+
+ case INDEX_op_setcond_i32:
+ if (st_src == ld_dst) {
+ goto fail;
+ }
+ tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
+ REG(st_src), ctx->memidx, ld_mop);
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
+ if (mt_dst >= 0) {
+ tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /* If op_src is not a valid register, then op_arg was a constant. */
+ if (op_src < 0 && op_arg) {
+ tcg_temp_free_i32(op_arg);
+ }
+
+ /* The entire region has been translated. */
+ ctx->envflags &= ~GUSA_MASK;
+ ctx->base.pc_next = pc_end;
+ ctx->base.num_insns += max_insns - 1;
+ return;
+
+ fail:
+ qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
+ pc, pc_end);
+
+ /* Restart with the EXCLUSIVE bit set, within a TB run via
+ cpu_exec_step_atomic holding the exclusive lock. */
+ ctx->envflags |= GUSA_EXCLUSIVE;
+ gen_save_cpu_state(ctx, false);
+ gen_helper_exclusive(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ /* We're not executing an instruction, but we must report one for the
+ purposes of accounting within the TB. We might as well report the
+ entire region consumed via ctx->base.pc_next so that it's immediately
+ available in the disassembly dump. */
+ ctx->base.pc_next = pc_end;
+ ctx->base.num_insns += max_insns - 1;
+}
+#endif
+
+static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUSH4State *env = cs->env_ptr;
+ uint32_t tbflags;
+ int bound;
+
+ ctx->tbflags = tbflags = ctx->base.tb->flags;
+ ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
+ ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
+ /* We don't know if the delayed pc came from a dynamic or static branch,
+ so assume it is a dynamic branch. */
+ ctx->delayed_pc = -1; /* use delayed pc from env pointer */
+ ctx->features = env->features;
+ ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
+ ctx->gbank = ((tbflags & (1 << SR_MD)) &&
+ (tbflags & (1 << SR_RB))) * 0x10;
+ ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
+
+ if (tbflags & GUSA_MASK) {
+ uint32_t pc = ctx->base.pc_next;
+ uint32_t pc_end = ctx->base.tb->cs_base;
+ int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
+ int max_insns = (pc_end - pc) / 2;
+
+ if (pc != pc_end + backup || max_insns < 2) {
+ /* This is a malformed gUSA region. Don't do anything special,
+ since the interpreter is likely to get confused. */
+ ctx->envflags &= ~GUSA_MASK;
+ } else if (tbflags & GUSA_EXCLUSIVE) {
+ /* Regardless of single-stepping or the end of the page,
+ we must complete execution of the gUSA region while
+ holding the exclusive lock. */
+ ctx->base.max_insns = max_insns;
+ return;
+ }
+ }
+
+ /* Since the ISA is fixed-width, we can bound by the number
+ of instructions remaining on the page. */
+ bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
+ ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
+}
+
+static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
+{
+}
+
+static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
+}
+
+static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ CPUSH4State *env = cs->env_ptr;
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+#ifdef CONFIG_USER_ONLY
+ if (unlikely(ctx->envflags & GUSA_MASK)
+ && !(ctx->envflags & GUSA_EXCLUSIVE)) {
+ /* We're in an gUSA region, and we have not already fallen
+ back on using an exclusive region. Attempt to parse the
+ region into a single supported atomic operation. Failure
+ is handled within the parser by raising an exception to
+ retry using an exclusive region. */
+ decode_gusa(ctx, env);
+ return;
+ }
+#endif
+
+ ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
+ decode_opc(ctx);
+ ctx->base.pc_next += 2;
+}
+
+static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ if (ctx->tbflags & GUSA_EXCLUSIVE) {
+ /* Ending the region of exclusivity. Clear the bits. */
+ ctx->envflags &= ~GUSA_MASK;
+ }
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_STOP:
+ gen_save_cpu_state(ctx, true);
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ gen_save_cpu_state(ctx, false);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps sh4_tr_ops = {
+ .init_disas_context = sh4_tr_init_disas_context,
+ .tb_start = sh4_tr_tb_start,
+ .insn_start = sh4_tr_insn_start,
+ .translate_insn = sh4_tr_translate_insn,
+ .tb_stop = sh4_tr_tb_stop,
+ .disas_log = sh4_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+ env->flags = data[1];
+ /* Theoretically delayed_pc should also be restored. In practice the
+ branch instruction is re-executed after exception, so the delayed
+ branch target will be recomputed. */
+}
diff --git a/target/sparc/Kconfig b/target/sparc/Kconfig
new file mode 100644
index 000000000..70cc0f3a2
--- /dev/null
+++ b/target/sparc/Kconfig
@@ -0,0 +1,5 @@
+config SPARC
+ bool
+
+config SPARC64
+ bool
diff --git a/target/sparc/asi.h b/target/sparc/asi.h
new file mode 100644
index 000000000..bb58735dd
--- /dev/null
+++ b/target/sparc/asi.h
@@ -0,0 +1,312 @@
+#ifndef SPARC_ASI_H
+#define SPARC_ASI_H
+
+/* asi.h: Address Space Identifier values for the sparc.
+ *
+ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au)
+ * Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su>
+ */
+
+/* The first batch are for the sun4c. */
+
+#define ASI_NULL1 0x00
+#define ASI_NULL2 0x01
+
+/* sun4c and sun4 control registers and mmu/vac ops */
+#define ASI_CONTROL 0x02
+#define ASI_SEGMAP 0x03
+#define ASI_PTE 0x04
+#define ASI_HWFLUSHSEG 0x05
+#define ASI_HWFLUSHPAGE 0x06
+#define ASI_REGMAP 0x06
+#define ASI_HWFLUSHCONTEXT 0x07
+
+#define ASI_USERTXT 0x08
+#define ASI_KERNELTXT 0x09
+#define ASI_USERDATA 0x0a
+#define ASI_KERNELDATA 0x0b
+
+/* VAC Cache flushing on sun4c and sun4 */
+#define ASI_FLUSHSEG 0x0c
+#define ASI_FLUSHPG 0x0d
+#define ASI_FLUSHCTX 0x0e
+
+/* SPARCstation-5: only 6 bits are decoded. */
+/* wo = Write Only, rw = Read Write; */
+/* ss = Single Size, as = All Sizes; */
+#define ASI_M_RES00 0x00 /* Don't touch... */
+#define ASI_M_UNA01 0x01 /* Same here... */
+#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
+#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
+#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
+#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
+#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
+#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
+#define ASI_M_USERTXT 0x08 /* Same as ASI_USERTXT; rw, as */
+#define ASI_M_KERNELTXT 0x09 /* Same as ASI_KERNELTXT; rw, as */
+#define ASI_M_USERDATA 0x0A /* Same as ASI_USERDATA; rw, as */
+#define ASI_M_KERNELDATA 0x0B /* Same as ASI_KERNELDATA; rw, as */
+#define ASI_M_TXTC_TAG 0x0C /* Instruction Cache Tag; rw, ss */
+#define ASI_M_TXTC_DATA 0x0D /* Instruction Cache Data; rw, ss */
+#define ASI_M_DATAC_TAG 0x0E /* Data Cache Tag; rw, ss */
+#define ASI_M_DATAC_DATA 0x0F /* Data Cache Data; rw, ss */
+
+/* The following cache flushing ASIs work only with the 'sta'
+ * instruction. Results are unpredictable for 'swap' and 'ldstuba',
+ * so don't do it.
+ */
+
+/* These ASI flushes affect external caches too. */
+#define ASI_M_FLUSH_PAGE 0x10 /* Flush I&D Cache Line (page); wo, ss */
+#define ASI_M_FLUSH_SEG 0x11 /* Flush I&D Cache Line (seg); wo, ss */
+#define ASI_M_FLUSH_REGION 0x12 /* Flush I&D Cache Line (region); wo, ss */
+#define ASI_M_FLUSH_CTX 0x13 /* Flush I&D Cache Line (context); wo, ss */
+#define ASI_M_FLUSH_USER 0x14 /* Flush I&D Cache Line (user); wo, ss */
+
+/* Block-copy operations are available only on certain V8 cpus. */
+#define ASI_M_BCOPY 0x17 /* Block copy */
+
+/* These affect only the ICACHE and are Ross HyperSparc and TurboSparc specific. */
+#define ASI_M_IFLUSH_PAGE 0x18 /* Flush I Cache Line (page); wo, ss */
+#define ASI_M_IFLUSH_SEG 0x19 /* Flush I Cache Line (seg); wo, ss */
+#define ASI_M_IFLUSH_REGION 0x1A /* Flush I Cache Line (region); wo, ss */
+#define ASI_M_IFLUSH_CTX 0x1B /* Flush I Cache Line (context); wo, ss */
+#define ASI_M_IFLUSH_USER 0x1C /* Flush I Cache Line (user); wo, ss */
+
+/* Block-fill operations are available on certain V8 cpus */
+#define ASI_M_BFILL 0x1F
+
+/* This allows direct access to main memory, actually 0x20 to 0x2f are
+ * the available ASI's for physical ram pass-through, but I don't have
+ * any idea what the other ones do....
+ */
+
+#define ASI_M_BYPASS 0x20 /* Reference MMU bypass; rw, as */
+#define ASI_M_FBMEM 0x29 /* Graphics card frame buffer access */
+#define ASI_M_VMEUS 0x2A /* VME user 16-bit access */
+#define ASI_M_VMEPS 0x2B /* VME priv 16-bit access */
+#define ASI_M_VMEUT 0x2C /* VME user 32-bit access */
+#define ASI_M_VMEPT 0x2D /* VME priv 32-bit access */
+#define ASI_M_SBUS 0x2E /* Direct SBus access */
+#define ASI_M_CTL 0x2F /* Control Space (ECC and MXCC are here) */
+
+
+/* This is ROSS HyperSparc only. */
+#define ASI_M_FLUSH_IWHOLE 0x31 /* Flush entire ICACHE; wo, ss */
+
+/* Tsunami/Viking/TurboSparc i/d cache flash clear. */
+#define ASI_M_IC_FLCLEAR 0x36
+#define ASI_M_DC_FLCLEAR 0x37
+
+#define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Register rw, ss */
+
+#define ASI_M_VIKING_TMP1 0x40 /* Emulation temporary 1 on Viking */
+/* only available on SuperSparc I */
+/* #define ASI_M_VIKING_TMP2 0x41 */ /* Emulation temporary 2 on Viking */
+
+#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */
+
+/* LEON ASI */
+#define ASI_LEON_NOCACHE 0x01
+
+#define ASI_LEON_DCACHE_MISS 0x01
+
+#define ASI_LEON_CACHEREGS 0x02
+#define ASI_LEON_IFLUSH 0x10
+#define ASI_LEON_DFLUSH 0x11
+
+#define ASI_LEON_MMUFLUSH 0x18
+#define ASI_LEON_MMUREGS 0x19
+#define ASI_LEON_BYPASS 0x1c
+#define ASI_LEON_FLUSH_PAGE 0x10
+
+/* V9 Architecture mandary ASIs. */
+#define ASI_N 0x04 /* Nucleus */
+#define ASI_NL 0x0c /* Nucleus, little endian */
+#define ASI_AIUP 0x10 /* Primary, user */
+#define ASI_AIUS 0x11 /* Secondary, user */
+#define ASI_AIUPL 0x18 /* Primary, user, little endian */
+#define ASI_AIUSL 0x19 /* Secondary, user, little endian */
+#define ASI_P 0x80 /* Primary, implicit */
+#define ASI_S 0x81 /* Secondary, implicit */
+#define ASI_PNF 0x82 /* Primary, no fault */
+#define ASI_SNF 0x83 /* Secondary, no fault */
+#define ASI_PL 0x88 /* Primary, implicit, l-endian */
+#define ASI_SL 0x89 /* Secondary, implicit, l-endian */
+#define ASI_PNFL 0x8a /* Primary, no fault, l-endian */
+#define ASI_SNFL 0x8b /* Secondary, no fault, l-endian */
+
+/* SpitFire and later extended ASIs. The "(III)" marker designates
+ * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
+ * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
+ * ASIs, "(4V)" designates SUN4V specific ASIs. "(NG4)" designates SPARC-T4
+ * and later ASIs.
+ */
+#define ASI_REAL 0x14 /* Real address, cachable */
+#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
+#define ASI_REAL_IO 0x15 /* Real address, non-cachable */
+#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
+#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */
+#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */
+#define ASI_REAL_L 0x1c /* Real address, cachable, LE */
+#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/
+#define ASI_REAL_IO_L 0x1d /* Real address, non-cachable, LE */
+#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */
+#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/
+#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */
+#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */
+#define ASI_MMU 0x21 /* (4V) MMU Context Registers */
+#define ASI_TWINX_AIUP 0x22 /* twin load, primary user */
+#define ASI_TWINX_AIUS 0x23 /* twin load, secondary user */
+#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
+ * secondary, user
+ */
+#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
+#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */
+#define ASI_TWINX_REAL 0x26 /* twin load, real, cachable */
+#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
+#define ASI_TWINX_N 0x27 /* twin load, nucleus */
+#define ASI_TWINX_AIUP_L 0x2a /* twin load, primary user, LE */
+#define ASI_TWINX_AIUS_L 0x2b /* twin load, secondary user, LE */
+#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
+#define ASI_TWINX_REAL_L 0x2e /* twin load, real, cachable, LE */
+#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
+#define ASI_TWINX_NL 0x2f /* twin load, nucleus, LE */
+#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */
+#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
+#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
+#define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */
+#define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */
+#define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */
+#define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */
+#define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */
+#define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */
+#define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */
+#define ASI_SRAM_FAST_INIT 0x40 /* (III+) Fast SRAM init */
+#define ASI_CORE_AVAILABLE 0x41 /* (CMT) LP Available */
+#define ASI_CORE_ENABLE_STAT 0x41 /* (CMT) LP Enable Status */
+#define ASI_CORE_ENABLE 0x41 /* (CMT) LP Enable RW */
+#define ASI_XIR_STEERING 0x41 /* (CMT) XIR Steering RW */
+#define ASI_CORE_RUNNING_RW 0x41 /* (CMT) LP Running RW */
+#define ASI_CORE_RUNNING_W1S 0x41 /* (CMT) LP Running Write-One Set */
+#define ASI_CORE_RUNNING_W1C 0x41 /* (CMT) LP Running Write-One Clr */
+#define ASI_CORE_RUNNING_STAT 0x41 /* (CMT) LP Running Status */
+#define ASI_CMT_ERROR_STEERING 0x41 /* (CMT) Error Steering RW */
+#define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */
+#define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */
+#define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */
+#define ASI_LSU_CONTROL 0x45 /* Load-store control unit */
+#define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control reg */
+#define ASI_DCACHE_DATA 0x46 /* DCache data-ram diag access */
+#define ASI_DCACHE_TAG 0x47 /* Dcache tag/valid ram diag access*/
+#define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */
+#define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */
+#define ASI_UPA_CONFIG 0x4a /* UPA config space */
+#define ASI_JBUS_CONFIG 0x4a /* (IIIi) JBUS Config Register */
+#define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */
+#define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */
+#define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */
+#define ASI_AFSR 0x4c /* Async fault status register */
+#define ASI_AFAR 0x4d /* Async fault address register */
+#define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */
+#define ASI_HYP_SCRATCHPAD 0x4f /* (4V) Hypervisor scratchpad */
+#define ASI_IMMU 0x50 /* Insn-MMU main register space */
+#define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */
+#define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */
+#define ASI_ITLB_DATA_IN 0x54 /* Insn-MMU TLB data in reg */
+#define ASI_ITLB_DATA_ACCESS 0x55 /* Insn-MMU TLB data access reg */
+#define ASI_ITLB_TAG_READ 0x56 /* Insn-MMU TLB tag read reg */
+#define ASI_IMMU_DEMAP 0x57 /* Insn-MMU TLB demap */
+#define ASI_DMMU 0x58 /* Data-MMU main register space */
+#define ASI_DMMU_TSB_8KB_PTR 0x59 /* Data-MMU 8KB TSB pointer reg */
+#define ASI_DMMU_TSB_64KB_PTR 0x5a /* Data-MMU 16KB TSB pointer reg */
+#define ASI_DMMU_TSB_DIRECT_PTR 0x5b /* Data-MMU TSB direct pointer reg */
+#define ASI_DTLB_DATA_IN 0x5c /* Data-MMU TLB data in reg */
+#define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access reg */
+#define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read reg */
+#define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */
+#define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint */
+#define ASI_INTR_ID 0x63 /* (CMT) Interrupt ID register */
+#define ASI_CORE_ID 0x63 /* (CMT) LP ID register */
+#define ASI_CESR_ID 0x63 /* (CMT) CESR ID register */
+#define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */
+#define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */
+#define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */
+#define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */
+#define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */
+#define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/
+#define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */
+#define ASI_BLK_AIUS 0x71 /* Secondary, user, block ld/st */
+#define ASI_MCU_CTRL_REG 0x72 /* (III) Memory controller regs */
+#define ASI_EC_DATA 0x74 /* (III) E-cache data staging reg */
+#define ASI_EC_CTRL 0x75 /* (III) E-cache control reg */
+#define ASI_EC_W 0x76 /* E-cache diag write access */
+#define ASI_UDB_ERROR_W 0x77 /* External UDB error regs W */
+#define ASI_UDB_CONTROL_W 0x77 /* External UDB control regs W */
+#define ASI_INTR_W 0x77 /* IRQ vector dispatch write */
+#define ASI_INTR_DATAN_W 0x77 /* (III) Out irq vector data reg N */
+#define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */
+#define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st*/
+#define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st*/
+#define ASI_EC_R 0x7e /* E-cache diag read access */
+#define ASI_UDBH_ERROR_R 0x7f /* External UDB error regs rd hi */
+#define ASI_UDBL_ERROR_R 0x7f /* External UDB error regs rd low */
+#define ASI_UDBH_CONTROL_R 0x7f /* External UDB control regs rd hi */
+#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/
+#define ASI_INTR_R 0x7f /* IRQ vector dispatch read */
+#define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */
+#define ASI_PIC 0xb0 /* (NG4) PIC registers */
+#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */
+#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */
+#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */
+#define ASI_PST16_S 0xc3 /* Secondary, 4 16-bit, partial */
+#define ASI_PST32_P 0xc4 /* Primary, 2 32-bit, partial */
+#define ASI_PST32_S 0xc5 /* Secondary, 2 32-bit, partial */
+#define ASI_PST8_PL 0xc8 /* Primary, 8 8-bit, partial, L */
+#define ASI_PST8_SL 0xc9 /* Secondary, 8 8-bit, partial, L */
+#define ASI_PST16_PL 0xca /* Primary, 4 16-bit, partial, L */
+#define ASI_PST16_SL 0xcb /* Secondary, 4 16-bit, partial, L */
+#define ASI_PST32_PL 0xcc /* Primary, 2 32-bit, partial, L */
+#define ASI_PST32_SL 0xcd /* Secondary, 2 32-bit, partial, L */
+#define ASI_FL8_P 0xd0 /* Primary, 1 8-bit, fpu ld/st */
+#define ASI_FL8_S 0xd1 /* Secondary, 1 8-bit, fpu ld/st */
+#define ASI_FL16_P 0xd2 /* Primary, 1 16-bit, fpu ld/st */
+#define ASI_FL16_S 0xd3 /* Secondary, 1 16-bit, fpu ld/st */
+#define ASI_FL8_PL 0xd8 /* Primary, 1 8-bit, fpu ld/st, L */
+#define ASI_FL8_SL 0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/
+#define ASI_FL16_PL 0xda /* Primary, 1 16-bit, fpu ld/st, L */
+#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
+#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
+#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
+#define ASI_TWINX_P 0xe2 /* twin load, primary implicit */
+#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
+ * primary, implicit */
+#define ASI_TWINX_S 0xe3 /* twin load, secondary implicit */
+#define ASI_BLK_INIT_QUAD_LDD_S 0xe3 /* (NG) init-store, twin load,
+ * secondary, implicit */
+#define ASI_TWINX_PL 0xea /* twin load, primary implicit, LE */
+#define ASI_TWINX_SL 0xeb /* twin load, secondary implicit, LE */
+#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
+#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
+#define ASI_ST_BLKINIT_MRU_P 0xf2 /* (NG4) init-store, twin load,
+ * Most-Recently-Used, primary,
+ * implicit
+ */
+#define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load,
+ * Most-Recently-Used, secondary,
+ * implicit
+ */
+#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */
+#define ASI_BLK_SL 0xf9 /* Secondary, blk ld/st, little */
+#define ASI_ST_BLKINIT_MRU_PL 0xfa /* (NG4) init-store, twin load,
+ * Most-Recently-Used, primary,
+ * implicit, little-endian
+ */
+#define ASI_ST_BLKINIT_MRU_SL 0xfb /* (NG4) init-store, twin load,
+ * Most-Recently-Used, secondary,
+ * implicit, little-endian
+ */
+
+#endif /* SPARC_ASI_H */
diff --git a/target/sparc/cc_helper.c b/target/sparc/cc_helper.c
new file mode 100644
index 000000000..7ad5b9b29
--- /dev/null
+++ b/target/sparc/cc_helper.c
@@ -0,0 +1,471 @@
+/*
+ * Helpers for lazy condition code handling
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+static uint32_t compute_all_flags(CPUSPARCState *env)
+{
+ return env->psr & PSR_ICC;
+}
+
+static uint32_t compute_C_flags(CPUSPARCState *env)
+{
+ return env->psr & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_icc(int32_t dst)
+{
+ uint32_t ret = 0;
+
+ if (dst == 0) {
+ ret = PSR_ZERO;
+ } else if (dst < 0) {
+ ret = PSR_NEG;
+ }
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_flags_xcc(CPUSPARCState *env)
+{
+ return env->xcc & PSR_ICC;
+}
+
+static uint32_t compute_C_flags_xcc(CPUSPARCState *env)
+{
+ return env->xcc & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_xcc(target_long dst)
+{
+ uint32_t ret = 0;
+
+ if (!dst) {
+ ret = PSR_ZERO;
+ } else if (dst < 0) {
+ ret = PSR_NEG;
+ }
+ return ret;
+}
+#endif
+
+static inline uint32_t get_V_div_icc(target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (src2 != 0) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_div(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_V_div_icc(CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_div(CPUSPARCState *env)
+{
+ return 0;
+}
+
+static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
+{
+ uint32_t ret = 0;
+
+ if (dst < src1) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
+{
+ uint32_t ret = 0;
+
+ if (dst < src1) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_add_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_add_xcc(CC_DST, CC_SRC);
+ ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_add_xcc(CPUSPARCState *env)
+{
+ return get_C_add_xcc(CC_DST, CC_SRC);
+}
+#endif
+
+static uint32_t compute_all_add(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_add(CPUSPARCState *env)
+{
+ return get_C_add_icc(CC_DST, CC_SRC);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_addx_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_addx_xcc(CPUSPARCState *env)
+{
+ return get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_addx(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_addx(CPUSPARCState *env)
+{
+ return get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+}
+
+static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if ((src1 | src2) & 0x3) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_tadd(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_taddtv(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ return ret;
+}
+
+static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (src1 < src2) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (src1 < src2) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_sub_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_sub_xcc(CPUSPARCState *env)
+{
+ return get_C_sub_xcc(CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_sub(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_sub(CPUSPARCState *env)
+{
+ return get_C_sub_icc(CC_SRC, CC_SRC2);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_subx_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_subx_xcc(CPUSPARCState *env)
+{
+ return get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_subx(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_subx(CPUSPARCState *env)
+{
+ return get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+}
+
+static uint32_t compute_all_tsub(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_tsubtv(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_logic(CPUSPARCState *env)
+{
+ return get_NZ_icc(CC_DST);
+}
+
+static uint32_t compute_C_logic(CPUSPARCState *env)
+{
+ return 0;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_logic_xcc(CPUSPARCState *env)
+{
+ return get_NZ_xcc(CC_DST);
+}
+#endif
+
+typedef struct CCTable {
+ uint32_t (*compute_all)(CPUSPARCState *env); /* return all the flags */
+ uint32_t (*compute_c)(CPUSPARCState *env); /* return the C flag */
+} CCTable;
+
+static const CCTable icc_table[CC_OP_NB] = {
+ /* CC_OP_DYNAMIC should never happen */
+ [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
+ [CC_OP_DIV] = { compute_all_div, compute_C_div },
+ [CC_OP_ADD] = { compute_all_add, compute_C_add },
+ [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
+ [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
+ [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
+ [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
+ [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
+ [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
+ [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
+ [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
+};
+
+#ifdef TARGET_SPARC64
+static const CCTable xcc_table[CC_OP_NB] = {
+ /* CC_OP_DYNAMIC should never happen */
+ [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
+ [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
+ [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
+ [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
+ [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
+};
+#endif
+
+void helper_compute_psr(CPUSPARCState *env)
+{
+ uint32_t new_psr;
+
+ new_psr = icc_table[CC_OP].compute_all(env);
+ env->psr = new_psr;
+#ifdef TARGET_SPARC64
+ new_psr = xcc_table[CC_OP].compute_all(env);
+ env->xcc = new_psr;
+#endif
+ CC_OP = CC_OP_FLAGS;
+}
+
+uint32_t helper_compute_C_icc(CPUSPARCState *env)
+{
+ return icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT;
+}
diff --git a/target/sparc/cpu-param.h b/target/sparc/cpu-param.h
new file mode 100644
index 000000000..4746d8941
--- /dev/null
+++ b/target/sparc/cpu-param.h
@@ -0,0 +1,28 @@
+/*
+ * Sparc cpu parameters for qemu.
+ *
+ * SPDX-License-Identifier: LGPL-2.0+
+ */
+
+#ifndef SPARC_CPU_PARAM_H
+#define SPARC_CPU_PARAM_H 1
+
+#ifdef TARGET_SPARC64
+# define TARGET_LONG_BITS 64
+# define TARGET_PAGE_BITS 13 /* 8k */
+# define TARGET_PHYS_ADDR_SPACE_BITS 41
+# ifdef TARGET_ABI32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+# else
+# define TARGET_VIRT_ADDR_SPACE_BITS 44
+# endif
+# define NB_MMU_MODES 6
+#else
+# define TARGET_LONG_BITS 32
+# define TARGET_PAGE_BITS 12 /* 4k */
+# define TARGET_PHYS_ADDR_SPACE_BITS 36
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+# define NB_MMU_MODES 3
+#endif
+
+#endif
diff --git a/target/sparc/cpu-qom.h b/target/sparc/cpu-qom.h
new file mode 100644
index 000000000..f33949aae
--- /dev/null
+++ b/target/sparc/cpu-qom.h
@@ -0,0 +1,54 @@
+/*
+ * QEMU SPARC CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_SPARC_CPU_QOM_H
+#define QEMU_SPARC_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#ifdef TARGET_SPARC64
+#define TYPE_SPARC_CPU "sparc64-cpu"
+#else
+#define TYPE_SPARC_CPU "sparc-cpu"
+#endif
+
+OBJECT_DECLARE_TYPE(SPARCCPU, SPARCCPUClass,
+ SPARC_CPU)
+
+typedef struct sparc_def_t sparc_def_t;
+/**
+ * SPARCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A SPARC CPU model.
+ */
+struct SPARCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+ sparc_def_t *cpu_def;
+};
+
+
+#endif
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
new file mode 100644
index 000000000..55268ed2a
--- /dev/null
+++ b/target/sparc/cpu.c
@@ -0,0 +1,954 @@
+/*
+ * Sparc CPU init helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu/module.h"
+#include "qemu/qemu-print.h"
+#include "exec/exec-all.h"
+#include "hw/qdev-properties.h"
+#include "qapi/visitor.h"
+
+//#define DEBUG_FEATURES
+
+static void sparc_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ SPARCCPU *cpu = SPARC_CPU(s);
+ SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu);
+ CPUSPARCState *env = &cpu->env;
+
+ scc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPUSPARCState, end_reset_fields));
+ env->cwp = 0;
+#ifndef TARGET_SPARC64
+ env->wim = 1;
+#endif
+ env->regwptr = env->regbase + (env->cwp * 16);
+ CC_OP = CC_OP_FLAGS;
+#if defined(CONFIG_USER_ONLY)
+#ifdef TARGET_SPARC64
+ env->cleanwin = env->nwindows - 2;
+ env->cansave = env->nwindows - 2;
+ env->pstate = PS_RMO | PS_PEF | PS_IE;
+ env->asi = 0x82; /* Primary no-fault */
+#endif
+#else
+#if !defined(TARGET_SPARC64)
+ env->psret = 0;
+ env->psrs = 1;
+ env->psrps = 1;
+#endif
+#ifdef TARGET_SPARC64
+ env->pstate = PS_PRIV | PS_RED | PS_PEF;
+ if (!cpu_has_hypervisor(env)) {
+ env->pstate |= PS_AG;
+ }
+ env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0;
+ env->tl = env->maxtl;
+ env->gl = 2;
+ cpu_tsptr(env)->tt = TT_POWER_ON_RESET;
+ env->lsu = 0;
+#else
+ env->mmuregs[0] &= ~(MMU_E | MMU_NF);
+ env->mmuregs[0] |= env->def.mmu_bm;
+#endif
+ env->pc = 0;
+ env->npc = env->pc + 4;
+#endif
+ env->cache_control = 0;
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) {
+ int pil = env->interrupt_index & 0xf;
+ int type = env->interrupt_index & 0xf0;
+
+ if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) {
+ cs->exception_index = env->interrupt_index;
+ sparc_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->print_insn = print_insn_sparc;
+#ifdef TARGET_SPARC64
+ info->mach = bfd_mach_sparc_v9b;
+#endif
+}
+
+static void
+cpu_add_feat_as_prop(const char *typename, const char *name, const char *val)
+{
+ GlobalProperty *prop = g_new0(typeof(*prop), 1);
+ prop->driver = typename;
+ prop->property = g_strdup(name);
+ prop->value = g_strdup(val);
+ qdev_prop_register_global(prop);
+}
+
+/* Parse "+feature,-feature,feature=foo" CPU feature string */
+static void sparc_cpu_parse_features(const char *typename, char *features,
+ Error **errp)
+{
+ GList *l, *plus_features = NULL, *minus_features = NULL;
+ char *featurestr; /* Single 'key=value" string being parsed */
+ static bool cpu_globals_initialized;
+
+ if (cpu_globals_initialized) {
+ return;
+ }
+ cpu_globals_initialized = true;
+
+ if (!features) {
+ return;
+ }
+
+ for (featurestr = strtok(features, ",");
+ featurestr;
+ featurestr = strtok(NULL, ",")) {
+ const char *name;
+ const char *val = NULL;
+ char *eq = NULL;
+
+ /* Compatibility syntax: */
+ if (featurestr[0] == '+') {
+ plus_features = g_list_append(plus_features,
+ g_strdup(featurestr + 1));
+ continue;
+ } else if (featurestr[0] == '-') {
+ minus_features = g_list_append(minus_features,
+ g_strdup(featurestr + 1));
+ continue;
+ }
+
+ eq = strchr(featurestr, '=');
+ name = featurestr;
+ if (eq) {
+ *eq++ = 0;
+ val = eq;
+
+ /*
+ * Temporarily, only +feat/-feat will be supported
+ * for boolean properties until we remove the
+ * minus-overrides-plus semantics and just follow
+ * the order options appear on the command-line.
+ *
+ * TODO: warn if user is relying on minus-override-plus semantics
+ * TODO: remove minus-override-plus semantics after
+ * warning for a few releases
+ */
+ if (!strcasecmp(val, "on") ||
+ !strcasecmp(val, "off") ||
+ !strcasecmp(val, "true") ||
+ !strcasecmp(val, "false")) {
+ error_setg(errp, "Boolean properties in format %s=%s"
+ " are not supported", name, val);
+ return;
+ }
+ } else {
+ error_setg(errp, "Unsupported property format: %s", name);
+ return;
+ }
+ cpu_add_feat_as_prop(typename, name, val);
+ }
+
+ for (l = plus_features; l; l = l->next) {
+ const char *name = l->data;
+ cpu_add_feat_as_prop(typename, name, "on");
+ }
+ g_list_free_full(plus_features, g_free);
+
+ for (l = minus_features; l; l = l->next) {
+ const char *name = l->data;
+ cpu_add_feat_as_prop(typename, name, "off");
+ }
+ g_list_free_full(minus_features, g_free);
+}
+
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu)
+{
+#if !defined(TARGET_SPARC64)
+ env->mxccregs[7] = ((cpu + 8) & 0xf) << 24;
+#endif
+}
+
+static const sparc_def_t sparc_defs[] = {
+#ifdef TARGET_SPARC64
+ {
+ .name = "Fujitsu Sparc64",
+ .iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 4,
+ .maxtl = 4,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 III",
+ .iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 5,
+ .maxtl = 4,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 IV",
+ .iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 V",
+ .iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc I",
+ .iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc II",
+ .iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc IIi",
+ .iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc IIe",
+ .iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc III",
+ .iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc III Cu",
+ .iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_3,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IIIi",
+ .iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IV",
+ .iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_4,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IV+",
+ .iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT,
+ },
+ {
+ .name = "Sun UltraSparc IIIi+",
+ .iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_3,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc T1",
+ /* defined in sparc_ifu_fdp.v and ctu.h */
+ .iu_version = ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_sun4v,
+ .nwindows = 8,
+ .maxtl = 6,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+ | CPU_FEATURE_GL,
+ },
+ {
+ .name = "Sun UltraSparc T2",
+ /* defined in tlu_asi_ctl.v and n2_revid_cust.v */
+ .iu_version = ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_sun4v,
+ .nwindows = 8,
+ .maxtl = 6,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+ | CPU_FEATURE_GL,
+ },
+ {
+ .name = "NEC UltraSparc I",
+ .iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+#else
+ {
+ .name = "Fujitsu MB86904",
+ .iu_version = 0x04 << 24, /* Impl 0, ver 4 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu MB86907",
+ .iu_version = 0x05 << 24, /* Impl 0, ver 5 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI MicroSparc I",
+ .iu_version = 0x41000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x41000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x0000003f,
+ .nwindows = 7,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL |
+ CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FMUL,
+ },
+ {
+ .name = "TI MicroSparc II",
+ .iu_version = 0x42000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x02000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI MicroSparc IIep",
+ .iu_version = 0x42000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x04000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016bff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 40", /* STP1020NPGA */
+ .iu_version = 0x41000000, /* SuperSPARC 2.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x00000800, /* SuperSPARC 2.x, no MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 50", /* STP1020PGA */
+ .iu_version = 0x40000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 51",
+ .iu_version = 0x40000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 60", /* STP1020APGA */
+ .iu_version = 0x40000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 61",
+ .iu_version = 0x44000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc II",
+ .iu_version = 0x40000000, /* SuperSPARC II 1.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x08000000, /* SuperSPARC II 1.x, MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "LEON2",
+ .iu_version = 0xf2000000,
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0xf2000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN,
+ },
+ {
+ .name = "LEON3",
+ .iu_version = 0xf3000000,
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0xf3000000,
+ .mmu_bm = 0x00000000,
+ .mmu_ctpr_mask = 0xfffffffc,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN |
+ CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN |
+ CPU_FEATURE_CASA,
+ },
+#endif
+};
+
+static const char * const feature_name[] = {
+ "float",
+ "float128",
+ "swap",
+ "mul",
+ "div",
+ "flush",
+ "fsqrt",
+ "fmul",
+ "vis1",
+ "vis2",
+ "fsmuld",
+ "hypv",
+ "cmt",
+ "gl",
+};
+
+static void print_features(uint32_t features, const char *prefix)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(feature_name); i++) {
+ if (feature_name[i] && (features & (1 << i))) {
+ if (prefix) {
+ qemu_printf("%s", prefix);
+ }
+ qemu_printf("%s ", feature_name[i]);
+ }
+ }
+}
+
+void sparc_cpu_list(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+ qemu_printf("Sparc %16s IU " TARGET_FMT_lx
+ " FPU %08x MMU %08x NWINS %d ",
+ sparc_defs[i].name,
+ sparc_defs[i].iu_version,
+ sparc_defs[i].fpu_version,
+ sparc_defs[i].mmu_version,
+ sparc_defs[i].nwindows);
+ print_features(CPU_DEFAULT_FEATURES & ~sparc_defs[i].features, "-");
+ print_features(~CPU_DEFAULT_FEATURES & sparc_defs[i].features, "+");
+ qemu_printf("\n");
+ }
+ qemu_printf("Default CPU feature flags (use '-' to remove): ");
+ print_features(CPU_DEFAULT_FEATURES, NULL);
+ qemu_printf("\n");
+ qemu_printf("Available CPU feature flags (use '+' to add): ");
+ print_features(~CPU_DEFAULT_FEATURES, NULL);
+ qemu_printf("\n");
+ qemu_printf("Numerical features (use '=' to set): iu_version "
+ "fpu_version mmu_version nwindows\n");
+}
+
+static void cpu_print_cc(FILE *f, uint32_t cc)
+{
+ qemu_fprintf(f, "%c%c%c%c", cc & PSR_NEG ? 'N' : '-',
+ cc & PSR_ZERO ? 'Z' : '-', cc & PSR_OVF ? 'V' : '-',
+ cc & PSR_CARRY ? 'C' : '-');
+}
+
+#ifdef TARGET_SPARC64
+#define REGS_PER_LINE 4
+#else
+#define REGS_PER_LINE 8
+#endif
+
+static void sparc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int i, x;
+
+ qemu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc,
+ env->npc);
+
+ for (i = 0; i < 8; i++) {
+ if (i % REGS_PER_LINE == 0) {
+ qemu_fprintf(f, "%%g%d-%d:", i, i + REGS_PER_LINE - 1);
+ }
+ qemu_fprintf(f, " " TARGET_FMT_lx, env->gregs[i]);
+ if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ for (x = 0; x < 3; x++) {
+ for (i = 0; i < 8; i++) {
+ if (i % REGS_PER_LINE == 0) {
+ qemu_fprintf(f, "%%%c%d-%d: ",
+ x == 0 ? 'o' : (x == 1 ? 'l' : 'i'),
+ i, i + REGS_PER_LINE - 1);
+ }
+ qemu_fprintf(f, TARGET_FMT_lx " ", env->regwptr[i + x * 8]);
+ if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ }
+
+ if (flags & CPU_DUMP_FPU) {
+ for (i = 0; i < TARGET_DPREGS; i++) {
+ if ((i & 3) == 0) {
+ qemu_fprintf(f, "%%f%02d: ", i * 2);
+ }
+ qemu_fprintf(f, " %016" PRIx64, env->fpr[i].ll);
+ if ((i & 3) == 3) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ }
+
+#ifdef TARGET_SPARC64
+ qemu_fprintf(f, "pstate: %08x ccr: %02x (icc: ", env->pstate,
+ (unsigned)cpu_get_ccr(env));
+ cpu_print_cc(f, cpu_get_ccr(env) << PSR_CARRY_SHIFT);
+ qemu_fprintf(f, " xcc: ");
+ cpu_print_cc(f, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4));
+ qemu_fprintf(f, ") asi: %02x tl: %d pil: %x gl: %d\n", env->asi, env->tl,
+ env->psrpil, env->gl);
+ qemu_fprintf(f, "tbr: " TARGET_FMT_lx " hpstate: " TARGET_FMT_lx " htba: "
+ TARGET_FMT_lx "\n", env->tbr, env->hpstate, env->htba);
+ qemu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d "
+ "cleanwin: %d cwp: %d\n",
+ env->cansave, env->canrestore, env->otherwin, env->wstate,
+ env->cleanwin, env->nwindows - 1 - env->cwp);
+ qemu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: "
+ TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs);
+
+#else
+ qemu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env));
+ cpu_print_cc(f, cpu_get_psr(env));
+ qemu_fprintf(f, " SPE: %c%c%c) wim: %08x\n", env->psrs ? 'S' : '-',
+ env->psrps ? 'P' : '-', env->psret ? 'E' : '-',
+ env->wim);
+ qemu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n",
+ env->fsr, env->y);
+#endif
+ qemu_fprintf(f, "\n");
+}
+
+static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+
+ cpu->env.pc = value;
+ cpu->env.npc = value + 4;
+}
+
+static void sparc_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+
+ cpu->env.pc = tb->pc;
+ cpu->env.npc = tb->cs_base;
+}
+
+static bool sparc_cpu_has_work(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_interrupts_enabled(env);
+}
+
+static char *sparc_cpu_type_name(const char *cpu_model)
+{
+ char *name = g_strdup_printf(SPARC_CPU_TYPE_NAME("%s"), cpu_model);
+ char *s = name;
+
+ /* SPARC cpu model names happen to have whitespaces,
+ * as type names shouldn't have spaces replace them with '-'
+ */
+ while ((s = strchr(s, ' '))) {
+ *s = '-';
+ }
+
+ return name;
+}
+
+static ObjectClass *sparc_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = sparc_cpu_type_name(cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+ SPARCCPU *cpu = SPARC_CPU(dev);
+ CPUSPARCState *env = &cpu->env;
+
+#if defined(CONFIG_USER_ONLY)
+ if ((env->def.features & CPU_FEATURE_FLOAT)) {
+ env->def.features |= CPU_FEATURE_FLOAT128;
+ }
+#endif
+
+ env->version = env->def.iu_version;
+ env->fsr = env->def.fpu_version;
+ env->nwindows = env->def.nwindows;
+#if !defined(TARGET_SPARC64)
+ env->mmuregs[0] |= env->def.mmu_version;
+ cpu_sparc_set_id(env, 0);
+ env->mxccregs[7] |= env->def.mxcc_version;
+#else
+ env->mmu_version = env->def.mmu_version;
+ env->maxtl = env->def.maxtl;
+ env->version |= env->def.maxtl << 8;
+ env->version |= env->def.nwindows - 1;
+#endif
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ scc->parent_realize(dev, errp);
+}
+
+static void sparc_cpu_initfn(Object *obj)
+{
+ SPARCCPU *cpu = SPARC_CPU(obj);
+ SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(obj);
+ CPUSPARCState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+
+ if (scc->cpu_def) {
+ env->def = *scc->cpu_def;
+ }
+}
+
+static void sparc_get_nwindows(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ SPARCCPU *cpu = SPARC_CPU(obj);
+ int64_t value = cpu->env.def.nwindows;
+
+ visit_type_int(v, name, &value, errp);
+}
+
+static void sparc_set_nwindows(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ const int64_t min = MIN_NWINDOWS;
+ const int64_t max = MAX_NWINDOWS;
+ SPARCCPU *cpu = SPARC_CPU(obj);
+ int64_t value;
+
+ if (!visit_type_int(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value < min || value > max) {
+ error_setg(errp, "Property %s.%s doesn't take value %" PRId64
+ " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
+ object_get_typename(obj), name ? name : "null",
+ value, min, max);
+ return;
+ }
+ cpu->env.def.nwindows = value;
+}
+
+static PropertyInfo qdev_prop_nwindows = {
+ .name = "int",
+ .get = sparc_get_nwindows,
+ .set = sparc_set_nwindows,
+};
+
+static Property sparc_cpu_properties[] = {
+ DEFINE_PROP_BIT("float", SPARCCPU, env.def.features, 0, false),
+ DEFINE_PROP_BIT("float128", SPARCCPU, env.def.features, 1, false),
+ DEFINE_PROP_BIT("swap", SPARCCPU, env.def.features, 2, false),
+ DEFINE_PROP_BIT("mul", SPARCCPU, env.def.features, 3, false),
+ DEFINE_PROP_BIT("div", SPARCCPU, env.def.features, 4, false),
+ DEFINE_PROP_BIT("flush", SPARCCPU, env.def.features, 5, false),
+ DEFINE_PROP_BIT("fsqrt", SPARCCPU, env.def.features, 6, false),
+ DEFINE_PROP_BIT("fmul", SPARCCPU, env.def.features, 7, false),
+ DEFINE_PROP_BIT("vis1", SPARCCPU, env.def.features, 8, false),
+ DEFINE_PROP_BIT("vis2", SPARCCPU, env.def.features, 9, false),
+ DEFINE_PROP_BIT("fsmuld", SPARCCPU, env.def.features, 10, false),
+ DEFINE_PROP_BIT("hypv", SPARCCPU, env.def.features, 11, false),
+ DEFINE_PROP_BIT("cmt", SPARCCPU, env.def.features, 12, false),
+ DEFINE_PROP_BIT("gl", SPARCCPU, env.def.features, 13, false),
+ DEFINE_PROP_UNSIGNED("iu-version", SPARCCPU, env.def.iu_version, 0,
+ qdev_prop_uint64, target_ulong),
+ DEFINE_PROP_UINT32("fpu-version", SPARCCPU, env.def.fpu_version, 0),
+ DEFINE_PROP_UINT32("mmu-version", SPARCCPU, env.def.mmu_version, 0),
+ DEFINE_PROP("nwindows", SPARCCPU, env.def.nwindows,
+ qdev_prop_nwindows, uint32_t),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps sparc_sysemu_ops = {
+ .get_phys_page_debug = sparc_cpu_get_phys_page_debug,
+ .legacy_vmsd = &vmstate_sparc_cpu,
+};
+#endif
+
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps sparc_tcg_ops = {
+ .initialize = sparc_tcg_init,
+ .synchronize_from_tb = sparc_cpu_synchronize_from_tb,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = sparc_cpu_tlb_fill,
+ .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
+ .do_interrupt = sparc_cpu_do_interrupt,
+ .do_transaction_failed = sparc_cpu_do_transaction_failed,
+ .do_unaligned_access = sparc_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+static void sparc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ device_class_set_parent_realize(dc, sparc_cpu_realizefn,
+ &scc->parent_realize);
+ device_class_set_props(dc, sparc_cpu_properties);
+
+ device_class_set_parent_reset(dc, sparc_cpu_reset, &scc->parent_reset);
+
+ cc->class_by_name = sparc_cpu_class_by_name;
+ cc->parse_features = sparc_cpu_parse_features;
+ cc->has_work = sparc_cpu_has_work;
+ cc->dump_state = sparc_cpu_dump_state;
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+ cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
+#endif
+ cc->set_pc = sparc_cpu_set_pc;
+ cc->gdb_read_register = sparc_cpu_gdb_read_register;
+ cc->gdb_write_register = sparc_cpu_gdb_write_register;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &sparc_sysemu_ops;
+#endif
+ cc->disas_set_info = cpu_sparc_disas_set_info;
+
+#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+ cc->gdb_num_core_regs = 86;
+#else
+ cc->gdb_num_core_regs = 72;
+#endif
+ cc->tcg_ops = &sparc_tcg_ops;
+}
+
+static const TypeInfo sparc_cpu_type_info = {
+ .name = TYPE_SPARC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(SPARCCPU),
+ .instance_init = sparc_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(SPARCCPUClass),
+ .class_init = sparc_cpu_class_init,
+};
+
+static void sparc_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+ SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
+ scc->cpu_def = data;
+}
+
+static void sparc_register_cpudef_type(const struct sparc_def_t *def)
+{
+ char *typename = sparc_cpu_type_name(def->name);
+ TypeInfo ti = {
+ .name = typename,
+ .parent = TYPE_SPARC_CPU,
+ .class_init = sparc_cpu_cpudef_class_init,
+ .class_data = (void *)def,
+ };
+
+ type_register(&ti);
+ g_free(typename);
+}
+
+static void sparc_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&sparc_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+ sparc_register_cpudef_type(&sparc_defs[i]);
+ }
+}
+
+type_init(sparc_cpu_register_types)
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
new file mode 100644
index 000000000..5a7f1ed5d
--- /dev/null
+++ b/target/sparc/cpu.h
@@ -0,0 +1,839 @@
+#ifndef SPARC_CPU_H
+#define SPARC_CPU_H
+
+#include "qemu/bswap.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+#if !defined(TARGET_SPARC64)
+#define TARGET_DPREGS 16
+#else
+#define TARGET_DPREGS 32
+#endif
+
+/*#define EXCP_INTERRUPT 0x100*/
+
+/* Windowed register indexes. */
+enum {
+ WREG_O0,
+ WREG_O1,
+ WREG_O2,
+ WREG_O3,
+ WREG_O4,
+ WREG_O5,
+ WREG_O6,
+ WREG_O7,
+
+ WREG_L0,
+ WREG_L1,
+ WREG_L2,
+ WREG_L3,
+ WREG_L4,
+ WREG_L5,
+ WREG_L6,
+ WREG_L7,
+
+ WREG_I0,
+ WREG_I1,
+ WREG_I2,
+ WREG_I3,
+ WREG_I4,
+ WREG_I5,
+ WREG_I6,
+ WREG_I7,
+
+ WREG_SP = WREG_O6,
+ WREG_FP = WREG_I6,
+};
+
+/* trap definitions */
+#ifndef TARGET_SPARC64
+#define TT_TFAULT 0x01
+#define TT_ILL_INSN 0x02
+#define TT_PRIV_INSN 0x03
+#define TT_NFPU_INSN 0x04
+#define TT_WIN_OVF 0x05
+#define TT_WIN_UNF 0x06
+#define TT_UNALIGNED 0x07
+#define TT_FP_EXCP 0x08
+#define TT_DFAULT 0x09
+#define TT_TOVF 0x0a
+#define TT_EXTINT 0x10
+#define TT_CODE_ACCESS 0x21
+#define TT_UNIMP_FLUSH 0x25
+#define TT_DATA_ACCESS 0x29
+#define TT_DIV_ZERO 0x2a
+#define TT_NCP_INSN 0x24
+#define TT_TRAP 0x80
+#else
+#define TT_POWER_ON_RESET 0x01
+#define TT_TFAULT 0x08
+#define TT_CODE_ACCESS 0x0a
+#define TT_ILL_INSN 0x10
+#define TT_UNIMP_FLUSH TT_ILL_INSN
+#define TT_PRIV_INSN 0x11
+#define TT_NFPU_INSN 0x20
+#define TT_FP_EXCP 0x21
+#define TT_TOVF 0x23
+#define TT_CLRWIN 0x24
+#define TT_DIV_ZERO 0x28
+#define TT_DFAULT 0x30
+#define TT_DATA_ACCESS 0x32
+#define TT_UNALIGNED 0x34
+#define TT_PRIV_ACT 0x37
+#define TT_INSN_REAL_TRANSLATION_MISS 0x3e
+#define TT_DATA_REAL_TRANSLATION_MISS 0x3f
+#define TT_EXTINT 0x40
+#define TT_IVEC 0x60
+#define TT_TMISS 0x64
+#define TT_DMISS 0x68
+#define TT_DPROT 0x6c
+#define TT_SPILL 0x80
+#define TT_FILL 0xc0
+#define TT_WOTHER (1 << 5)
+#define TT_TRAP 0x100
+#define TT_HTRAP 0x180
+#endif
+
+#define PSR_NEG_SHIFT 23
+#define PSR_NEG (1 << PSR_NEG_SHIFT)
+#define PSR_ZERO_SHIFT 22
+#define PSR_ZERO (1 << PSR_ZERO_SHIFT)
+#define PSR_OVF_SHIFT 21
+#define PSR_OVF (1 << PSR_OVF_SHIFT)
+#define PSR_CARRY_SHIFT 20
+#define PSR_CARRY (1 << PSR_CARRY_SHIFT)
+#define PSR_ICC (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY)
+#if !defined(TARGET_SPARC64)
+#define PSR_EF (1<<12)
+#define PSR_PIL 0xf00
+#define PSR_S (1<<7)
+#define PSR_PS (1<<6)
+#define PSR_ET (1<<5)
+#define PSR_CWP 0x1f
+#endif
+
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_DST (env->cc_dst)
+#define CC_OP (env->cc_op)
+
+/* Even though lazy evaluation of CPU condition codes tends to be less
+ * important on RISC systems where condition codes are only updated
+ * when explicitly requested, SPARC uses it to update 32-bit and 64-bit
+ * condition codes.
+ */
+enum {
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+ CC_OP_FLAGS, /* all cc are back in status register */
+ CC_OP_DIV, /* modify N, Z and V, C = 0*/
+ CC_OP_ADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADDX, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TADDTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUBX, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TSUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TSUBTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+ CC_OP_LOGIC, /* modify N and Z, C = V = 0, CC_DST = res */
+ CC_OP_NB,
+};
+
+/* Trap base register */
+#define TBR_BASE_MASK 0xfffff000
+
+#if defined(TARGET_SPARC64)
+#define PS_TCT (1<<12) /* UA2007, impl.dep. trap on control transfer */
+#define PS_IG (1<<11) /* v9, zero on UA2007 */
+#define PS_MG (1<<10) /* v9, zero on UA2007 */
+#define PS_CLE (1<<9) /* UA2007 */
+#define PS_TLE (1<<8) /* UA2007 */
+#define PS_RMO (1<<7)
+#define PS_RED (1<<5) /* v9, zero on UA2007 */
+#define PS_PEF (1<<4) /* enable fpu */
+#define PS_AM (1<<3) /* address mask */
+#define PS_PRIV (1<<2)
+#define PS_IE (1<<1)
+#define PS_AG (1<<0) /* v9, zero on UA2007 */
+
+#define FPRS_DL (1 << 0)
+#define FPRS_DU (1 << 1)
+#define FPRS_FEF (1 << 2)
+
+#define HS_PRIV (1<<2)
+#endif
+
+/* Fcc */
+#define FSR_RD1 (1ULL << 31)
+#define FSR_RD0 (1ULL << 30)
+#define FSR_RD_MASK (FSR_RD1 | FSR_RD0)
+#define FSR_RD_NEAREST 0
+#define FSR_RD_ZERO FSR_RD0
+#define FSR_RD_POS FSR_RD1
+#define FSR_RD_NEG (FSR_RD1 | FSR_RD0)
+
+#define FSR_NVM (1ULL << 27)
+#define FSR_OFM (1ULL << 26)
+#define FSR_UFM (1ULL << 25)
+#define FSR_DZM (1ULL << 24)
+#define FSR_NXM (1ULL << 23)
+#define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM)
+
+#define FSR_NVA (1ULL << 9)
+#define FSR_OFA (1ULL << 8)
+#define FSR_UFA (1ULL << 7)
+#define FSR_DZA (1ULL << 6)
+#define FSR_NXA (1ULL << 5)
+#define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+#define FSR_NVC (1ULL << 4)
+#define FSR_OFC (1ULL << 3)
+#define FSR_UFC (1ULL << 2)
+#define FSR_DZC (1ULL << 1)
+#define FSR_NXC (1ULL << 0)
+#define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC)
+
+#define FSR_FTT2 (1ULL << 16)
+#define FSR_FTT1 (1ULL << 15)
+#define FSR_FTT0 (1ULL << 14)
+//gcc warns about constant overflow for ~FSR_FTT_MASK
+//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
+#ifdef TARGET_SPARC64
+#define FSR_FTT_NMASK 0xfffffffffffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL
+#define FSR_LDFSR_OLDMASK 0x0000003f000fc000ULL
+#define FSR_LDXFSR_MASK 0x0000003fcfc00fffULL
+#define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL
+#else
+#define FSR_FTT_NMASK 0xfffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL
+#define FSR_LDFSR_OLDMASK 0x000fc000ULL
+#endif
+#define FSR_LDFSR_MASK 0xcfc00fffULL
+#define FSR_FTT_IEEE_EXCP (1ULL << 14)
+#define FSR_FTT_UNIMPFPOP (3ULL << 14)
+#define FSR_FTT_SEQ_ERROR (4ULL << 14)
+#define FSR_FTT_INVAL_FPR (6ULL << 14)
+
+#define FSR_FCC1_SHIFT 11
+#define FSR_FCC1 (1ULL << FSR_FCC1_SHIFT)
+#define FSR_FCC0_SHIFT 10
+#define FSR_FCC0 (1ULL << FSR_FCC0_SHIFT)
+
+/* MMU */
+#define MMU_E (1<<0)
+#define MMU_NF (1<<1)
+
+#define PTE_ENTRYTYPE_MASK 3
+#define PTE_ACCESS_MASK 0x1c
+#define PTE_ACCESS_SHIFT 2
+#define PTE_PPN_SHIFT 7
+#define PTE_ADDR_MASK 0xffffff00
+
+#define PG_ACCESSED_BIT 5
+#define PG_MODIFIED_BIT 6
+#define PG_CACHE_BIT 7
+
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT)
+#define PG_CACHE_MASK (1 << PG_CACHE_BIT)
+
+/* 3 <= NWINDOWS <= 32. */
+#define MIN_NWINDOWS 3
+#define MAX_NWINDOWS 32
+
+#ifdef TARGET_SPARC64
+typedef struct trap_state {
+ uint64_t tpc;
+ uint64_t tnpc;
+ uint64_t tstate;
+ uint32_t tt;
+} trap_state;
+#endif
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+struct sparc_def_t {
+ const char *name;
+ target_ulong iu_version;
+ uint32_t fpu_version;
+ uint32_t mmu_version;
+ uint32_t mmu_bm;
+ uint32_t mmu_ctpr_mask;
+ uint32_t mmu_cxr_mask;
+ uint32_t mmu_sfsr_mask;
+ uint32_t mmu_trcr_mask;
+ uint32_t mxcc_version;
+ uint32_t features;
+ uint32_t nwindows;
+ uint32_t maxtl;
+};
+
+#define CPU_FEATURE_FLOAT (1 << 0)
+#define CPU_FEATURE_FLOAT128 (1 << 1)
+#define CPU_FEATURE_SWAP (1 << 2)
+#define CPU_FEATURE_MUL (1 << 3)
+#define CPU_FEATURE_DIV (1 << 4)
+#define CPU_FEATURE_FLUSH (1 << 5)
+#define CPU_FEATURE_FSQRT (1 << 6)
+#define CPU_FEATURE_FMUL (1 << 7)
+#define CPU_FEATURE_VIS1 (1 << 8)
+#define CPU_FEATURE_VIS2 (1 << 9)
+#define CPU_FEATURE_FSMULD (1 << 10)
+#define CPU_FEATURE_HYPV (1 << 11)
+#define CPU_FEATURE_CMT (1 << 12)
+#define CPU_FEATURE_GL (1 << 13)
+#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */
+#define CPU_FEATURE_ASR17 (1 << 15)
+#define CPU_FEATURE_CACHE_CTRL (1 << 16)
+#define CPU_FEATURE_POWERDOWN (1 << 17)
+#define CPU_FEATURE_CASA (1 << 18)
+
+#ifndef TARGET_SPARC64
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
+ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+ CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD)
+#else
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
+ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+ CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \
+ CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \
+ CPU_FEATURE_CASA)
+enum {
+ mmu_us_12, // Ultrasparc < III (64 entry TLB)
+ mmu_us_3, // Ultrasparc III (512 entry TLB)
+ mmu_us_4, // Ultrasparc IV (several TLBs, 32 and 256MB pages)
+ mmu_sun4v, // T1, T2
+};
+#endif
+
+#define TTE_VALID_BIT (1ULL << 63)
+#define TTE_NFO_BIT (1ULL << 60)
+#define TTE_IE_BIT (1ULL << 59)
+#define TTE_USED_BIT (1ULL << 41)
+#define TTE_LOCKED_BIT (1ULL << 6)
+#define TTE_SIDEEFFECT_BIT (1ULL << 3)
+#define TTE_PRIV_BIT (1ULL << 2)
+#define TTE_W_OK_BIT (1ULL << 1)
+#define TTE_GLOBAL_BIT (1ULL << 0)
+
+#define TTE_NFO_BIT_UA2005 (1ULL << 62)
+#define TTE_USED_BIT_UA2005 (1ULL << 47)
+#define TTE_LOCKED_BIT_UA2005 (1ULL << 61)
+#define TTE_SIDEEFFECT_BIT_UA2005 (1ULL << 11)
+#define TTE_PRIV_BIT_UA2005 (1ULL << 8)
+#define TTE_W_OK_BIT_UA2005 (1ULL << 6)
+
+#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT)
+#define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT)
+#define TTE_IS_IE(tte) ((tte) & TTE_IE_BIT)
+#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT)
+#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT)
+#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)
+#define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005)
+#define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT)
+#define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT)
+
+#define TTE_IS_NFO_UA2005(tte) ((tte) & TTE_NFO_BIT_UA2005)
+#define TTE_IS_USED_UA2005(tte) ((tte) & TTE_USED_BIT_UA2005)
+#define TTE_IS_LOCKED_UA2005(tte) ((tte) & TTE_LOCKED_BIT_UA2005)
+#define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005)
+#define TTE_IS_PRIV_UA2005(tte) ((tte) & TTE_PRIV_BIT_UA2005)
+#define TTE_IS_W_OK_UA2005(tte) ((tte) & TTE_W_OK_BIT_UA2005)
+
+#define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT)
+
+#define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT)
+#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
+
+#define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL)
+#define TTE_PGSIZE_UA2005(tte) ((tte) & 7ULL)
+#define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL)
+
+/* UltraSPARC T1 specific */
+#define TLB_UST1_IS_REAL_BIT (1ULL << 9) /* Real translation entry */
+#define TLB_UST1_IS_SUN4V_BIT (1ULL << 10) /* sun4u/sun4v TTE format switch */
+
+#define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */
+#define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */
+#define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */
+#define SFSR_FT_VA_DMMU_BIT (1ULL << 12) /* USIIi VA out of range (DMMU) */
+#define SFSR_FT_NFO_BIT (1ULL << 11) /* NFO page access */
+#define SFSR_FT_ILL_BIT (1ULL << 10) /* illegal LDA/STA ASI */
+#define SFSR_FT_ATOMIC_BIT (1ULL << 9) /* atomic op on noncacheable area */
+#define SFSR_FT_NF_E_BIT (1ULL << 8) /* NF access on side effect area */
+#define SFSR_FT_PRIV_BIT (1ULL << 7) /* privilege violation */
+#define SFSR_PR_BIT (1ULL << 3) /* privilege mode */
+#define SFSR_WRITE_BIT (1ULL << 2) /* write access mode */
+#define SFSR_OW_BIT (1ULL << 1) /* status overwritten */
+#define SFSR_VALID_BIT (1ULL << 0) /* status valid */
+
+#define SFSR_ASI_SHIFT 16 /* 23:16 ASI value */
+#define SFSR_ASI_MASK (0xffULL << SFSR_ASI_SHIFT)
+#define SFSR_CT_PRIMARY (0ULL << 4) /* 5:4 context type */
+#define SFSR_CT_SECONDARY (1ULL << 4)
+#define SFSR_CT_NUCLEUS (2ULL << 4)
+#define SFSR_CT_NOTRANS (3ULL << 4)
+#define SFSR_CT_MASK (3ULL << 4)
+
+/* Leon3 cache control */
+
+/* Cache control: emulate the behavior of cache control registers but without
+ any effect on the emulated */
+
+#define CACHE_STATE_MASK 0x3
+#define CACHE_DISABLED 0x0
+#define CACHE_FROZEN 0x1
+#define CACHE_ENABLED 0x3
+
+/* Cache Control register fields */
+
+#define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */
+#define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */
+#define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */
+#define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */
+#define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */
+#define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */
+#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
+#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
+
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
+
+typedef struct SparcTLBEntry {
+ uint64_t tag;
+ uint64_t tte;
+} SparcTLBEntry;
+
+struct CPUTimer
+{
+ const char *name;
+ uint32_t frequency;
+ uint32_t disabled;
+ uint64_t disabled_mask;
+ uint32_t npt;
+ uint64_t npt_mask;
+ int64_t clock_offset;
+ QEMUTimer *qtimer;
+};
+
+typedef struct CPUTimer CPUTimer;
+
+typedef struct CPUSPARCState CPUSPARCState;
+#if defined(TARGET_SPARC64)
+typedef union {
+ uint64_t mmuregs[16];
+ struct {
+ uint64_t tsb_tag_target;
+ uint64_t mmu_primary_context;
+ uint64_t mmu_secondary_context;
+ uint64_t sfsr;
+ uint64_t sfar;
+ uint64_t tsb;
+ uint64_t tag_access;
+ uint64_t virtual_watchpoint;
+ uint64_t physical_watchpoint;
+ uint64_t sun4v_ctx_config[2];
+ uint64_t sun4v_tsb_pointers[4];
+ };
+} SparcV9MMU;
+#endif
+struct CPUSPARCState {
+ target_ulong gregs[8]; /* general registers */
+ target_ulong *regwptr; /* pointer to current register window */
+ target_ulong pc; /* program counter */
+ target_ulong npc; /* next program counter */
+ target_ulong y; /* multiply/divide register */
+
+ /* emulator internal flags handling */
+ target_ulong cc_src, cc_src2;
+ target_ulong cc_dst;
+ uint32_t cc_op;
+
+ target_ulong cond; /* conditional branch result (XXX: save it in a
+ temporary register when possible) */
+
+ uint32_t psr; /* processor state register */
+ target_ulong fsr; /* FPU state register */
+ CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */
+ uint32_t cwp; /* index of current register window (extracted
+ from PSR) */
+#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
+ uint32_t wim; /* window invalid mask */
+#endif
+ target_ulong tbr; /* trap base register */
+#if !defined(TARGET_SPARC64)
+ int psrs; /* supervisor mode (extracted from PSR) */
+ int psrps; /* previous supervisor mode */
+ int psret; /* enable traps */
+#endif
+ uint32_t psrpil; /* interrupt blocking level */
+ uint32_t pil_in; /* incoming interrupt level bitmap */
+#if !defined(TARGET_SPARC64)
+ int psref; /* enable fpu */
+#endif
+ int interrupt_index;
+ /* NOTE: we allow 8 more registers to handle wrapping */
+ target_ulong regbase[MAX_NWINDOWS * 16 + 8];
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields from here on are preserved across CPU reset. */
+ target_ulong version;
+ uint32_t nwindows;
+
+ /* MMU regs */
+#if defined(TARGET_SPARC64)
+ uint64_t lsu;
+#define DMMU_E 0x8
+#define IMMU_E 0x4
+ SparcV9MMU immu;
+ SparcV9MMU dmmu;
+ SparcTLBEntry itlb[64];
+ SparcTLBEntry dtlb[64];
+ uint32_t mmu_version;
+#else
+ uint32_t mmuregs[32];
+ uint64_t mxccdata[4];
+ uint64_t mxccregs[8];
+ uint32_t mmubpctrv, mmubpctrc, mmubpctrs;
+ uint64_t mmubpaction;
+ uint64_t mmubpregs[4];
+ uint64_t prom_addr;
+#endif
+ /* temporary float registers */
+ float128 qt0, qt1;
+ float_status fp_status;
+#if defined(TARGET_SPARC64)
+#define MAXTL_MAX 8
+#define MAXTL_MASK (MAXTL_MAX - 1)
+ trap_state ts[MAXTL_MAX];
+ uint32_t xcc; /* Extended integer condition codes */
+ uint32_t asi;
+ uint32_t pstate;
+ uint32_t tl;
+ uint32_t maxtl;
+ uint32_t cansave, canrestore, otherwin, wstate, cleanwin;
+ uint64_t agregs[8]; /* alternate general registers */
+ uint64_t bgregs[8]; /* backup for normal global registers */
+ uint64_t igregs[8]; /* interrupt general registers */
+ uint64_t mgregs[8]; /* mmu general registers */
+ uint64_t glregs[8 * MAXTL_MAX];
+ uint64_t fprs;
+ uint64_t tick_cmpr, stick_cmpr;
+ CPUTimer *tick, *stick;
+#define TICK_NPT_MASK 0x8000000000000000ULL
+#define TICK_INT_DIS 0x8000000000000000ULL
+ uint64_t gsr;
+ uint32_t gl; // UA2005
+ /* UA 2005 hyperprivileged registers */
+ uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr;
+ uint64_t scratch[8];
+ CPUTimer *hstick; // UA 2005
+ /* Interrupt vector registers */
+ uint64_t ivec_status;
+ uint64_t ivec_data[3];
+ uint32_t softint;
+#define SOFTINT_TIMER 1
+#define SOFTINT_STIMER (1 << 16)
+#define SOFTINT_INTRMASK (0xFFFE)
+#define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER)
+#endif
+ sparc_def_t def;
+
+ void *irq_manager;
+ void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno);
+
+ /* Leon3 cache control */
+ uint32_t cache_control;
+};
+
+/**
+ * SPARCCPU:
+ * @env: #CPUSPARCState
+ *
+ * A SPARC CPU.
+ */
+struct SPARCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUSPARCState env;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+extern const VMStateDescription vmstate_sparc_cpu;
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cpu);
+hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int sparc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx,
+ uintptr_t retaddr);
+void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN;
+
+#ifndef NO_CPU_IO_DEFS
+/* cpu_init.c */
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
+void sparc_cpu_list(void);
+/* mmu_helper.c */
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
+void dump_mmu(CPUSPARCState *env);
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ uint8_t *buf, int len, bool is_write);
+#endif
+
+
+/* translate.c */
+void sparc_tcg_init(void);
+
+/* cpu-exec.c */
+
+/* win_helper.c */
+target_ulong cpu_get_psr(CPUSPARCState *env1);
+void cpu_put_psr(CPUSPARCState *env1, target_ulong val);
+void cpu_put_psr_raw(CPUSPARCState *env1, target_ulong val);
+#ifdef TARGET_SPARC64
+void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate);
+void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl);
+#endif
+int cpu_cwp_inc(CPUSPARCState *env1, int cwp);
+int cpu_cwp_dec(CPUSPARCState *env1, int cwp);
+void cpu_set_cwp(CPUSPARCState *env1, int new_cwp);
+
+/* sun4m.c, sun4u.c */
+void cpu_check_irqs(CPUSPARCState *env);
+
+#if defined (TARGET_SPARC64)
+
+static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask)
+{
+ return (x & mask) == (y & mask);
+}
+
+#define MMU_CONTEXT_BITS 13
+#define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1)
+
+static inline int tlb_compare_context(const SparcTLBEntry *tlb,
+ uint64_t context)
+{
+ return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK);
+}
+
+#endif
+#endif
+
+/* cpu-exec.c */
+#if !defined(CONFIG_USER_ONLY)
+void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+#if defined(TARGET_SPARC64)
+hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
+ int mmu_idx);
+#endif
+#endif
+
+#define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU
+#define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_SPARC_CPU
+
+#define cpu_list sparc_cpu_list
+
+/* MMU modes definitions */
+#if defined (TARGET_SPARC64)
+#define MMU_USER_IDX 0
+#define MMU_USER_SECONDARY_IDX 1
+#define MMU_KERNEL_IDX 2
+#define MMU_KERNEL_SECONDARY_IDX 3
+#define MMU_NUCLEUS_IDX 4
+#define MMU_PHYS_IDX 5
+#else
+#define MMU_USER_IDX 0
+#define MMU_KERNEL_IDX 1
+#define MMU_PHYS_IDX 2
+#endif
+
+#if defined (TARGET_SPARC64)
+static inline int cpu_has_hypervisor(CPUSPARCState *env1)
+{
+ return env1->def.features & CPU_FEATURE_HYPV;
+}
+
+static inline int cpu_hypervisor_mode(CPUSPARCState *env1)
+{
+ return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV);
+}
+
+static inline int cpu_supervisor_mode(CPUSPARCState *env1)
+{
+ return env1->pstate & PS_PRIV;
+}
+#else
+static inline int cpu_supervisor_mode(CPUSPARCState *env1)
+{
+ return env1->psrs;
+}
+#endif
+
+static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
+{
+#if defined(CONFIG_USER_ONLY)
+ return MMU_USER_IDX;
+#elif !defined(TARGET_SPARC64)
+ if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+ return MMU_PHYS_IDX;
+ } else {
+ return env->psrs;
+ }
+#else
+ /* IMMU or DMMU disabled. */
+ if (ifetch
+ ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
+ : (env->lsu & DMMU_E) == 0) {
+ return MMU_PHYS_IDX;
+ } else if (cpu_hypervisor_mode(env)) {
+ return MMU_PHYS_IDX;
+ } else if (env->tl > 0) {
+ return MMU_NUCLEUS_IDX;
+ } else if (cpu_supervisor_mode(env)) {
+ return MMU_KERNEL_IDX;
+ } else {
+ return MMU_USER_IDX;
+ }
+#endif
+}
+
+static inline int cpu_interrupts_enabled(CPUSPARCState *env1)
+{
+#if !defined (TARGET_SPARC64)
+ if (env1->psret != 0)
+ return 1;
+#else
+ if ((env1->pstate & PS_IE) && !cpu_hypervisor_mode(env1)) {
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil)
+{
+#if !defined(TARGET_SPARC64)
+ /* level 15 is non-maskable on sparc v8 */
+ return pil == 15 || pil > env1->psrpil;
+#else
+ return pil > env1->psrpil;
+#endif
+}
+
+typedef CPUSPARCState CPUArchState;
+typedef SPARCCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+#ifdef TARGET_SPARC64
+/* sun4u.c */
+void cpu_tick_set_count(CPUTimer *timer, uint64_t count);
+uint64_t cpu_tick_get_count(CPUTimer *timer);
+void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit);
+trap_state* cpu_tsptr(CPUSPARCState* env);
+#endif
+
+#define TB_FLAG_MMU_MASK 7
+#define TB_FLAG_FPU_ENABLED (1 << 4)
+#define TB_FLAG_AM_ENABLED (1 << 5)
+#define TB_FLAG_SUPER (1 << 6)
+#define TB_FLAG_HYPER (1 << 7)
+#define TB_FLAG_ASI_SHIFT 24
+
+static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ uint32_t flags;
+ *pc = env->pc;
+ *cs_base = env->npc;
+ flags = cpu_mmu_index(env, false);
+#ifndef CONFIG_USER_ONLY
+ if (cpu_supervisor_mode(env)) {
+ flags |= TB_FLAG_SUPER;
+ }
+#endif
+#ifdef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+ if (cpu_hypervisor_mode(env)) {
+ flags |= TB_FLAG_HYPER;
+ }
+#endif
+ if (env->pstate & PS_AM) {
+ flags |= TB_FLAG_AM_ENABLED;
+ }
+ if ((env->def.features & CPU_FEATURE_FLOAT)
+ && (env->pstate & PS_PEF)
+ && (env->fprs & FPRS_FEF)) {
+ flags |= TB_FLAG_FPU_ENABLED;
+ }
+ flags |= env->asi << TB_FLAG_ASI_SHIFT;
+#else
+ if ((env->def.features & CPU_FEATURE_FLOAT) && env->psref) {
+ flags |= TB_FLAG_FPU_ENABLED;
+ }
+#endif
+ *pflags = flags;
+}
+
+static inline bool tb_fpu_enabled(int tb_flags)
+{
+#if defined(CONFIG_USER_ONLY)
+ return true;
+#else
+ return tb_flags & TB_FLAG_FPU_ENABLED;
+#endif
+}
+
+static inline bool tb_am_enabled(int tb_flags)
+{
+#ifndef TARGET_SPARC64
+ return false;
+#else
+ return tb_flags & TB_FLAG_AM_ENABLED;
+#endif
+}
+
+#ifdef TARGET_SPARC64
+/* win_helper.c */
+target_ulong cpu_get_ccr(CPUSPARCState *env1);
+void cpu_put_ccr(CPUSPARCState *env1, target_ulong val);
+target_ulong cpu_get_cwp64(CPUSPARCState *env1);
+void cpu_put_cwp64(CPUSPARCState *env1, int cwp);
+
+static inline uint64_t sparc64_tstate(CPUSPARCState *env)
+{
+ uint64_t tstate = (cpu_get_ccr(env) << 32) |
+ ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
+ cpu_get_cwp64(env);
+
+ if (env->def.features & CPU_FEATURE_GL) {
+ tstate |= (env->gl & 7ULL) << 40;
+ }
+ return tstate;
+}
+#endif
+
+#endif
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
new file mode 100644
index 000000000..f54fa9b95
--- /dev/null
+++ b/target/sparc/fop_helper.c
@@ -0,0 +1,401 @@
+/*
+ * FPU op helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra)
+{
+ target_ulong status = get_float_exception_flags(&env->fp_status);
+ target_ulong fsr = env->fsr;
+
+ if (unlikely(status)) {
+ /* Keep exception flags clear for next time. */
+ set_float_exception_flags(0, &env->fp_status);
+
+ /* Copy IEEE 754 flags into FSR */
+ if (status & float_flag_invalid) {
+ fsr |= FSR_NVC;
+ }
+ if (status & float_flag_overflow) {
+ fsr |= FSR_OFC;
+ }
+ if (status & float_flag_underflow) {
+ fsr |= FSR_UFC;
+ }
+ if (status & float_flag_divbyzero) {
+ fsr |= FSR_DZC;
+ }
+ if (status & float_flag_inexact) {
+ fsr |= FSR_NXC;
+ }
+
+ if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) {
+ CPUState *cs = env_cpu(env);
+
+ /* Unmasked exception, generate a trap. Note that while
+ the helper is marked as NO_WG, we can get away with
+ writing to cpu state along the exception path, since
+ TCG generated code will never see the write. */
+ env->fsr = fsr | FSR_FTT_IEEE_EXCP;
+ cs->exception_index = TT_FP_EXCP;
+ cpu_loop_exit_restore(cs, ra);
+ } else {
+ /* Accumulate exceptions */
+ fsr |= (fsr & FSR_CEXC_MASK) << 5;
+ }
+ }
+
+ return fsr;
+}
+
+target_ulong helper_check_ieee_exceptions(CPUSPARCState *env)
+{
+ return do_check_ieee_exceptions(env, GETPC());
+}
+
+#define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env)
+
+#define F_BINOP(name) \
+ float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \
+ float32 src2) \
+ { \
+ return float32_ ## name (src1, src2, &env->fp_status); \
+ } \
+ float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\
+ float64 src2) \
+ { \
+ return float64_ ## name (src1, src2, &env->fp_status); \
+ } \
+ F_HELPER(name, q) \
+ { \
+ QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
+ }
+
+F_BINOP(add);
+F_BINOP(sub);
+F_BINOP(mul);
+F_BINOP(div);
+#undef F_BINOP
+
+float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2)
+{
+ return float64_mul(float32_to_float64(src1, &env->fp_status),
+ float32_to_float64(src2, &env->fp_status),
+ &env->fp_status);
+}
+
+void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2)
+{
+ QT0 = float128_mul(float64_to_float128(src1, &env->fp_status),
+ float64_to_float128(src2, &env->fp_status),
+ &env->fp_status);
+}
+
+float32 helper_fnegs(float32 src)
+{
+ return float32_chs(src);
+}
+
+#ifdef TARGET_SPARC64
+float64 helper_fnegd(float64 src)
+{
+ return float64_chs(src);
+}
+
+F_HELPER(neg, q)
+{
+ QT0 = float128_chs(QT1);
+}
+#endif
+
+/* Integer to float conversion. */
+float32 helper_fitos(CPUSPARCState *env, int32_t src)
+{
+ return int32_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fitod(CPUSPARCState *env, int32_t src)
+{
+ return int32_to_float64(src, &env->fp_status);
+}
+
+void helper_fitoq(CPUSPARCState *env, int32_t src)
+{
+ QT0 = int32_to_float128(src, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+float32 helper_fxtos(CPUSPARCState *env, int64_t src)
+{
+ return int64_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fxtod(CPUSPARCState *env, int64_t src)
+{
+ return int64_to_float64(src, &env->fp_status);
+}
+
+void helper_fxtoq(CPUSPARCState *env, int64_t src)
+{
+ QT0 = int64_to_float128(src, &env->fp_status);
+}
+#endif
+#undef F_HELPER
+
+/* floating point conversion */
+float32 helper_fdtos(CPUSPARCState *env, float64 src)
+{
+ return float64_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fstod(CPUSPARCState *env, float32 src)
+{
+ return float32_to_float64(src, &env->fp_status);
+}
+
+float32 helper_fqtos(CPUSPARCState *env)
+{
+ return float128_to_float32(QT1, &env->fp_status);
+}
+
+void helper_fstoq(CPUSPARCState *env, float32 src)
+{
+ QT0 = float32_to_float128(src, &env->fp_status);
+}
+
+float64 helper_fqtod(CPUSPARCState *env)
+{
+ return float128_to_float64(QT1, &env->fp_status);
+}
+
+void helper_fdtoq(CPUSPARCState *env, float64 src)
+{
+ QT0 = float64_to_float128(src, &env->fp_status);
+}
+
+/* Float to integer conversion. */
+int32_t helper_fstoi(CPUSPARCState *env, float32 src)
+{
+ return float32_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fdtoi(CPUSPARCState *env, float64 src)
+{
+ return float64_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fqtoi(CPUSPARCState *env)
+{
+ return float128_to_int32_round_to_zero(QT1, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+int64_t helper_fstox(CPUSPARCState *env, float32 src)
+{
+ return float32_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+int64_t helper_fdtox(CPUSPARCState *env, float64 src)
+{
+ return float64_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+int64_t helper_fqtox(CPUSPARCState *env)
+{
+ return float128_to_int64_round_to_zero(QT1, &env->fp_status);
+}
+#endif
+
+float32 helper_fabss(float32 src)
+{
+ return float32_abs(src);
+}
+
+#ifdef TARGET_SPARC64
+float64 helper_fabsd(float64 src)
+{
+ return float64_abs(src);
+}
+
+void helper_fabsq(CPUSPARCState *env)
+{
+ QT0 = float128_abs(QT1);
+}
+#endif
+
+float32 helper_fsqrts(CPUSPARCState *env, float32 src)
+{
+ return float32_sqrt(src, &env->fp_status);
+}
+
+float64 helper_fsqrtd(CPUSPARCState *env, float64 src)
+{
+ return float64_sqrt(src, &env->fp_status);
+}
+
+void helper_fsqrtq(CPUSPARCState *env)
+{
+ QT0 = float128_sqrt(QT1, &env->fp_status);
+}
+
+#define GEN_FCMP(name, size, reg1, reg2, FS, E) \
+ target_ulong glue(helper_, name) (CPUSPARCState *env) \
+ { \
+ FloatRelation ret; \
+ target_ulong fsr; \
+ if (E) { \
+ ret = glue(size, _compare)(reg1, reg2, &env->fp_status); \
+ } else { \
+ ret = glue(size, _compare_quiet)(reg1, reg2, \
+ &env->fp_status); \
+ } \
+ fsr = do_check_ieee_exceptions(env, GETPC()); \
+ switch (ret) { \
+ case float_relation_unordered: \
+ fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
+ fsr |= FSR_NVA; \
+ break; \
+ case float_relation_less: \
+ fsr &= ~(FSR_FCC1) << FS; \
+ fsr |= FSR_FCC0 << FS; \
+ break; \
+ case float_relation_greater: \
+ fsr &= ~(FSR_FCC0) << FS; \
+ fsr |= FSR_FCC1 << FS; \
+ break; \
+ default: \
+ fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ break; \
+ } \
+ return fsr; \
+ }
+#define GEN_FCMP_T(name, size, FS, E) \
+ target_ulong glue(helper_, name)(CPUSPARCState *env, size src1, size src2)\
+ { \
+ FloatRelation ret; \
+ target_ulong fsr; \
+ if (E) { \
+ ret = glue(size, _compare)(src1, src2, &env->fp_status); \
+ } else { \
+ ret = glue(size, _compare_quiet)(src1, src2, \
+ &env->fp_status); \
+ } \
+ fsr = do_check_ieee_exceptions(env, GETPC()); \
+ switch (ret) { \
+ case float_relation_unordered: \
+ fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
+ break; \
+ case float_relation_less: \
+ fsr &= ~(FSR_FCC1 << FS); \
+ fsr |= FSR_FCC0 << FS; \
+ break; \
+ case float_relation_greater: \
+ fsr &= ~(FSR_FCC0 << FS); \
+ fsr |= FSR_FCC1 << FS; \
+ break; \
+ default: \
+ fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ break; \
+ } \
+ return fsr; \
+ }
+
+GEN_FCMP_T(fcmps, float32, 0, 0);
+GEN_FCMP_T(fcmpd, float64, 0, 0);
+
+GEN_FCMP_T(fcmpes, float32, 0, 1);
+GEN_FCMP_T(fcmped, float64, 0, 1);
+
+GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
+GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
+
+#ifdef TARGET_SPARC64
+GEN_FCMP_T(fcmps_fcc1, float32, 22, 0);
+GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0);
+GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
+
+GEN_FCMP_T(fcmps_fcc2, float32, 24, 0);
+GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0);
+GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
+
+GEN_FCMP_T(fcmps_fcc3, float32, 26, 0);
+GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0);
+GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
+
+GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1);
+GEN_FCMP_T(fcmped_fcc1, float64, 22, 1);
+GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
+
+GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1);
+GEN_FCMP_T(fcmped_fcc2, float64, 24, 1);
+GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
+
+GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1);
+GEN_FCMP_T(fcmped_fcc3, float64, 26, 1);
+GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
+#endif
+#undef GEN_FCMP_T
+#undef GEN_FCMP
+
+static void set_fsr(CPUSPARCState *env, target_ulong fsr)
+{
+ int rnd_mode;
+
+ switch (fsr & FSR_RD_MASK) {
+ case FSR_RD_NEAREST:
+ rnd_mode = float_round_nearest_even;
+ break;
+ default:
+ case FSR_RD_ZERO:
+ rnd_mode = float_round_to_zero;
+ break;
+ case FSR_RD_POS:
+ rnd_mode = float_round_up;
+ break;
+ case FSR_RD_NEG:
+ rnd_mode = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rnd_mode, &env->fp_status);
+}
+
+target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr,
+ uint32_t new_fsr)
+{
+ old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK);
+ set_fsr(env, old_fsr);
+ return old_fsr;
+}
+
+#ifdef TARGET_SPARC64
+target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr,
+ uint64_t new_fsr)
+{
+ old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK);
+ set_fsr(env, old_fsr);
+ return old_fsr;
+}
+#endif
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
new file mode 100644
index 000000000..5d1e808e8
--- /dev/null
+++ b/target/sparc/gdbstub.c
@@ -0,0 +1,208 @@
+/*
+ * SPARC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_ABI32
+#define gdb_get_rega(buf, val) gdb_get_reg32(buf, val)
+#else
+#define gdb_get_rega(buf, val) gdb_get_regl(buf, val)
+#endif
+
+int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ if (n < 8) {
+ /* g0..g7 */
+ return gdb_get_rega(mem_buf, env->gregs[n]);
+ }
+ if (n < 32) {
+ /* register window */
+ return gdb_get_rega(mem_buf, env->regwptr[n - 8]);
+ }
+#if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
+ if (n < 64) {
+ /* fprs */
+ if (n & 1) {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.lower);
+ } else {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.upper);
+ }
+ }
+ /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+ switch (n) {
+ case 64:
+ return gdb_get_rega(mem_buf, env->y);
+ case 65:
+ return gdb_get_rega(mem_buf, cpu_get_psr(env));
+ case 66:
+ return gdb_get_rega(mem_buf, env->wim);
+ case 67:
+ return gdb_get_rega(mem_buf, env->tbr);
+ case 68:
+ return gdb_get_rega(mem_buf, env->pc);
+ case 69:
+ return gdb_get_rega(mem_buf, env->npc);
+ case 70:
+ return gdb_get_rega(mem_buf, env->fsr);
+ case 71:
+ return gdb_get_rega(mem_buf, 0); /* csr */
+ default:
+ return gdb_get_rega(mem_buf, 0);
+ }
+#else
+ if (n < 64) {
+ /* f0-f31 */
+ if (n & 1) {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.lower);
+ } else {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.upper);
+ }
+ }
+ if (n < 80) {
+ /* f32-f62 (double width, even numbers only) */
+ return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll);
+ }
+ switch (n) {
+ case 80:
+ return gdb_get_regl(mem_buf, env->pc);
+ case 81:
+ return gdb_get_regl(mem_buf, env->npc);
+ case 82:
+ return gdb_get_regl(mem_buf, (cpu_get_ccr(env) << 32) |
+ ((env->asi & 0xff) << 24) |
+ ((env->pstate & 0xfff) << 8) |
+ cpu_get_cwp64(env));
+ case 83:
+ return gdb_get_regl(mem_buf, env->fsr);
+ case 84:
+ return gdb_get_regl(mem_buf, env->fprs);
+ case 85:
+ return gdb_get_regl(mem_buf, env->y);
+ }
+#endif
+ return 0;
+}
+
+int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+#if defined(TARGET_ABI32)
+ abi_ulong tmp;
+
+ tmp = ldl_p(mem_buf);
+#else
+ target_ulong tmp;
+
+ tmp = ldtul_p(mem_buf);
+#endif
+
+ if (n < 8) {
+ /* g0..g7 */
+ env->gregs[n] = tmp;
+ } else if (n < 32) {
+ /* register window */
+ env->regwptr[n - 8] = tmp;
+ }
+#if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
+ else if (n < 64) {
+ /* fprs */
+ /* f0-f31 */
+ if (n & 1) {
+ env->fpr[(n - 32) / 2].l.lower = tmp;
+ } else {
+ env->fpr[(n - 32) / 2].l.upper = tmp;
+ }
+ } else {
+ /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+ switch (n) {
+ case 64:
+ env->y = tmp;
+ break;
+ case 65:
+ cpu_put_psr(env, tmp);
+ break;
+ case 66:
+ env->wim = tmp;
+ break;
+ case 67:
+ env->tbr = tmp;
+ break;
+ case 68:
+ env->pc = tmp;
+ break;
+ case 69:
+ env->npc = tmp;
+ break;
+ case 70:
+ env->fsr = tmp;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 4;
+#else
+ else if (n < 64) {
+ /* f0-f31 */
+ tmp = ldl_p(mem_buf);
+ if (n & 1) {
+ env->fpr[(n - 32) / 2].l.lower = tmp;
+ } else {
+ env->fpr[(n - 32) / 2].l.upper = tmp;
+ }
+ return 4;
+ } else if (n < 80) {
+ /* f32-f62 (double width, even numbers only) */
+ env->fpr[(n - 32) / 2].ll = tmp;
+ } else {
+ switch (n) {
+ case 80:
+ env->pc = tmp;
+ break;
+ case 81:
+ env->npc = tmp;
+ break;
+ case 82:
+ cpu_put_ccr(env, tmp >> 32);
+ env->asi = (tmp >> 24) & 0xff;
+ env->pstate = (tmp >> 8) & 0xfff;
+ cpu_put_cwp64(env, tmp & 0xff);
+ break;
+ case 83:
+ env->fsr = tmp;
+ break;
+ case 84:
+ env->fprs = tmp;
+ break;
+ case 85:
+ env->y = tmp;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 8;
+#endif
+}
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
new file mode 100644
index 000000000..c7bcaa3a2
--- /dev/null
+++ b/target/sparc/helper.c
@@ -0,0 +1,253 @@
+/*
+ * Misc Sparc helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = tt;
+ cpu_loop_exit_restore(cs, ra);
+}
+
+void helper_raise_exception(CPUSPARCState *env, int tt)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = tt;
+ cpu_loop_exit(cs);
+}
+
+void helper_debug(CPUSPARCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+#ifdef TARGET_SPARC64
+void helper_tick_set_count(void *opaque, uint64_t count)
+{
+#if !defined(CONFIG_USER_ONLY)
+ cpu_tick_set_count(opaque, count);
+#endif
+}
+
+uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx)
+{
+#if !defined(CONFIG_USER_ONLY)
+ CPUTimer *timer = opaque;
+
+ if (timer->npt && mem_idx < MMU_KERNEL_IDX) {
+ cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC());
+ }
+
+ return cpu_tick_get_count(timer);
+#else
+ /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
+ Just pass through the host cpu clock ticks. */
+ return cpu_get_host_ticks();
+#endif
+}
+
+void helper_tick_set_limit(void *opaque, uint64_t limit)
+{
+#if !defined(CONFIG_USER_ONLY)
+ cpu_tick_set_limit(opaque, limit);
+#endif
+}
+#endif
+
+static target_ulong do_udiv(CPUSPARCState *env, target_ulong a,
+ target_ulong b, int cc, uintptr_t ra)
+{
+ int overflow = 0;
+ uint64_t x0;
+ uint32_t x1;
+
+ x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+ x1 = (b & 0xffffffff);
+
+ if (x1 == 0) {
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
+ }
+
+ x0 = x0 / x1;
+ if (x0 > UINT32_MAX) {
+ x0 = UINT32_MAX;
+ overflow = 1;
+ }
+
+ if (cc) {
+ env->cc_dst = x0;
+ env->cc_src2 = overflow;
+ env->cc_op = CC_OP_DIV;
+ }
+ return x0;
+}
+
+target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_udiv(env, a, b, 0, GETPC());
+}
+
+target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_udiv(env, a, b, 1, GETPC());
+}
+
+static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a,
+ target_ulong b, int cc, uintptr_t ra)
+{
+ int overflow = 0;
+ int64_t x0;
+ int32_t x1;
+
+ x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+ x1 = (b & 0xffffffff);
+
+ if (x1 == 0) {
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
+ } else if (x1 == -1 && x0 == INT64_MIN) {
+ x0 = INT32_MAX;
+ overflow = 1;
+ } else {
+ x0 = x0 / x1;
+ if ((int32_t) x0 != x0) {
+ x0 = x0 < 0 ? INT32_MIN : INT32_MAX;
+ overflow = 1;
+ }
+ }
+
+ if (cc) {
+ env->cc_dst = x0;
+ env->cc_src2 = overflow;
+ env->cc_op = CC_OP_DIV;
+ }
+ return x0;
+}
+
+target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_sdiv(env, a, b, 0, GETPC());
+}
+
+target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_sdiv(env, a, b, 1, GETPC());
+}
+
+#ifdef TARGET_SPARC64
+int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b)
+{
+ if (b == 0) {
+ /* Raise divide by zero trap. */
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
+ } else if (b == -1) {
+ /* Avoid overflow trap with i386 divide insn. */
+ return -a;
+ } else {
+ return a / b;
+ }
+}
+
+uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
+{
+ if (b == 0) {
+ /* Raise divide by zero trap. */
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
+ }
+ return a / b;
+}
+#endif
+
+target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
+ target_ulong src2)
+{
+ target_ulong dst;
+
+ /* Tag overflow occurs if either input has bits 0 or 1 set. */
+ if ((src1 | src2) & 3) {
+ goto tag_overflow;
+ }
+
+ dst = src1 + src2;
+
+ /* Tag overflow occurs if the addition overflows. */
+ if (~(src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
+ goto tag_overflow;
+ }
+
+ /* Only modify the CC after any exceptions have been generated. */
+ env->cc_op = CC_OP_TADDTV;
+ env->cc_src = src1;
+ env->cc_src2 = src2;
+ env->cc_dst = dst;
+ return dst;
+
+ tag_overflow:
+ cpu_raise_exception_ra(env, TT_TOVF, GETPC());
+}
+
+target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
+ target_ulong src2)
+{
+ target_ulong dst;
+
+ /* Tag overflow occurs if either input has bits 0 or 1 set. */
+ if ((src1 | src2) & 3) {
+ goto tag_overflow;
+ }
+
+ dst = src1 - src2;
+
+ /* Tag overflow occurs if the subtraction overflows. */
+ if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
+ goto tag_overflow;
+ }
+
+ /* Only modify the CC after any exceptions have been generated. */
+ env->cc_op = CC_OP_TSUBTV;
+ env->cc_src = src1;
+ env->cc_src2 = src2;
+ env->cc_dst = dst;
+ return dst;
+
+ tag_overflow:
+ cpu_raise_exception_ra(env, TT_TOVF, GETPC());
+}
+
+#ifndef TARGET_SPARC64
+void helper_power_down(CPUSPARCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ env->pc = env->npc;
+ env->npc = env->pc + 4;
+ cpu_loop_exit(cs);
+}
+#endif
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
new file mode 100644
index 000000000..b8f1e78c7
--- /dev/null
+++ b/target/sparc/helper.h
@@ -0,0 +1,168 @@
+#ifndef TARGET_SPARC64
+DEF_HELPER_1(rett, void, env)
+DEF_HELPER_2(wrpsr, void, env, tl)
+DEF_HELPER_1(rdpsr, tl, env)
+DEF_HELPER_1(power_down, void, env)
+#else
+DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(wrgl, void, env, tl)
+DEF_HELPER_2(wrpstate, void, env, tl)
+DEF_HELPER_1(done, void, env)
+DEF_HELPER_1(retry, void, env)
+DEF_HELPER_FLAGS_1(flushw, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_FLAGS_1(saved, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(restored, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_1(rdccr, tl, env)
+DEF_HELPER_2(wrccr, void, env, tl)
+DEF_HELPER_1(rdcwp, tl, env)
+DEF_HELPER_2(wrcwp, void, env, tl)
+DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(set_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(clear_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(write_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64)
+DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int)
+DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64)
+#endif
+DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_1(save, void, env)
+DEF_HELPER_1(restore, void, env)
+DEF_HELPER_3(udiv, tl, env, tl, tl)
+DEF_HELPER_3(udiv_cc, tl, env, tl, tl)
+DEF_HELPER_3(sdiv, tl, env, tl, tl)
+DEF_HELPER_3(sdiv_cc, tl, env, tl, tl)
+DEF_HELPER_3(taddcctv, tl, env, tl, tl)
+DEF_HELPER_3(tsubcctv, tl, env, tl, tl)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_3(sdivx, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_3(udivx, TCG_CALL_NO_WG, i64, env, i64, i64)
+#endif
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32)
+DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32)
+#endif
+DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32)
+DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64)
+DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmps_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpd_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpd_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpd_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpes_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpes_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpes_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmped_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmped_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmped_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_1(fabsq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc1, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc2, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc3, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc1, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc2, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc3, TCG_CALL_NO_WG, tl, env)
+#endif
+DEF_HELPER_2(raise_exception, noreturn, env, int)
+#define F_HELPER_0_1(name) \
+ DEF_HELPER_FLAGS_1(f ## name, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+F_HELPER_0_1(addq)
+F_HELPER_0_1(subq)
+F_HELPER_0_1(mulq)
+F_HELPER_0_1(divq)
+
+DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, f32, env, f32, f32)
+
+DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_RWG, f64, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_RWG, void, env, f64, f64)
+
+DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_RWG_SE, f64, env, s32)
+DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_RWG, void, env, s32)
+
+DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_RWG, f32, env, s32)
+
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_1(fnegq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_RWG, f32, env, s64)
+DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_RWG, f64, env, s64)
+DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_RWG, void, env, s64)
+#endif
+DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_RWG, f32, env, f64)
+DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_RWG, f64, env, f32)
+DEF_HELPER_FLAGS_1(fqtos, TCG_CALL_NO_RWG, f32, env)
+DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_RWG, void, env, f32)
+DEF_HELPER_FLAGS_1(fqtod, TCG_CALL_NO_RWG, f64, env)
+DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_RWG, void, env, f64)
+DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_1(fqtoi, TCG_CALL_NO_RWG, s32, env)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_RWG, s64, env, f64)
+DEF_HELPER_FLAGS_1(fqtox, TCG_CALL_NO_RWG, s64, env)
+
+DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+#define VIS_HELPER(name) \
+ DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64) \
+ DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \
+ i32, i32, i32) \
+ DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64) \
+ DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \
+ i32, i32, i32)
+
+VIS_HELPER(padd)
+VIS_HELPER(psub)
+#define VIS_CMPHELPER(name) \
+ DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64) \
+ DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64)
+VIS_CMPHELPER(cmpgt)
+VIS_CMPHELPER(cmpeq)
+VIS_CMPHELPER(cmple)
+VIS_CMPHELPER(cmpne)
+#endif
+#undef F_HELPER_0_1
+#undef VIS_HELPER
+#undef VIS_CMPHELPER
+DEF_HELPER_1(compute_psr, void, env)
+DEF_HELPER_FLAGS_1(compute_C_icc, TCG_CALL_NO_WG_SE, i32, env)
diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c
new file mode 100644
index 000000000..82e8418e4
--- /dev/null
+++ b/target/sparc/int32_helper.c
@@ -0,0 +1,171 @@
+/*
+ * Sparc32 interrupt helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "trace.h"
+#include "exec/log.h"
+#include "sysemu/runstate.h"
+
+
+static const char * const excp_names[0x80] = {
+ [TT_TFAULT] = "Instruction Access Fault",
+ [TT_ILL_INSN] = "Illegal Instruction",
+ [TT_PRIV_INSN] = "Privileged Instruction",
+ [TT_NFPU_INSN] = "FPU Disabled",
+ [TT_WIN_OVF] = "Window Overflow",
+ [TT_WIN_UNF] = "Window Underflow",
+ [TT_UNALIGNED] = "Unaligned Memory Access",
+ [TT_FP_EXCP] = "FPU Exception",
+ [TT_DFAULT] = "Data Access Fault",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_EXTINT | 0x1] = "External Interrupt 1",
+ [TT_EXTINT | 0x2] = "External Interrupt 2",
+ [TT_EXTINT | 0x3] = "External Interrupt 3",
+ [TT_EXTINT | 0x4] = "External Interrupt 4",
+ [TT_EXTINT | 0x5] = "External Interrupt 5",
+ [TT_EXTINT | 0x6] = "External Interrupt 6",
+ [TT_EXTINT | 0x7] = "External Interrupt 7",
+ [TT_EXTINT | 0x8] = "External Interrupt 8",
+ [TT_EXTINT | 0x9] = "External Interrupt 9",
+ [TT_EXTINT | 0xa] = "External Interrupt 10",
+ [TT_EXTINT | 0xb] = "External Interrupt 11",
+ [TT_EXTINT | 0xc] = "External Interrupt 12",
+ [TT_EXTINT | 0xd] = "External Interrupt 13",
+ [TT_EXTINT | 0xe] = "External Interrupt 14",
+ [TT_EXTINT | 0xf] = "External Interrupt 15",
+ [TT_CODE_ACCESS] = "Instruction Access Error",
+ [TT_DATA_ACCESS] = "Data Access Error",
+ [TT_DIV_ZERO] = "Division By Zero",
+ [TT_NCP_INSN] = "Coprocessor Disabled",
+};
+
+static const char *excp_name_str(int32_t exception_index)
+{
+ if (exception_index < 0 || exception_index >= ARRAY_SIZE(excp_names)) {
+ return "Unknown";
+ }
+ return excp_names[exception_index];
+}
+
+void cpu_check_irqs(CPUSPARCState *env)
+{
+ CPUState *cs;
+
+ /* We should be holding the BQL before we mess with IRQs */
+ g_assert(qemu_mutex_iothread_locked());
+
+ if (env->pil_in && (env->interrupt_index == 0 ||
+ (env->interrupt_index & ~15) == TT_EXTINT)) {
+ unsigned int i;
+
+ for (i = 15; i > 0; i--) {
+ if (env->pil_in & (1 << i)) {
+ int old_interrupt = env->interrupt_index;
+
+ env->interrupt_index = TT_EXTINT | i;
+ if (old_interrupt != env->interrupt_index) {
+ cs = env_cpu(env);
+ trace_sun4m_cpu_interrupt(i);
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ break;
+ }
+ }
+ } else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) {
+ cs = env_cpu(env);
+ trace_sun4m_cpu_reset_interrupt(env->interrupt_index & 15);
+ env->interrupt_index = 0;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+void sparc_cpu_do_interrupt(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int cwp, intno = cs->exception_index;
+
+ /* Compute PSR before exposing state. */
+ if (env->cc_op != CC_OP_FLAGS) {
+ cpu_get_psr(env);
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name;
+
+ if (intno < 0 || intno >= 0x100) {
+ name = "Unknown";
+ } else if (intno >= 0x80) {
+ name = "Trap Instruction";
+ } else {
+ name = excp_name_str(intno);
+ }
+
+ qemu_log("%6d: %s (v=%02x)\n", count, name, intno);
+ log_cpu_state(cs, 0);
+#if 0
+ {
+ int i;
+ uint8_t *ptr;
+
+ qemu_log(" code=");
+ ptr = (uint8_t *)env->pc;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+#if !defined(CONFIG_USER_ONLY)
+ if (env->psret == 0) {
+ if (cs->exception_index == 0x80 &&
+ env->def.features & CPU_FEATURE_TA0_SHUTDOWN) {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ } else {
+ cpu_abort(cs, "Trap 0x%02x (%s) while interrupts disabled, "
+ "Error state",
+ cs->exception_index, excp_name_str(cs->exception_index));
+ }
+ return;
+ }
+#endif
+ env->psret = 0;
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
+ cpu_set_cwp(env, cwp);
+ env->regwptr[9] = env->pc;
+ env->regwptr[10] = env->npc;
+ env->psrps = env->psrs;
+ env->psrs = 1;
+ env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
+ env->pc = env->tbr;
+ env->npc = env->pc + 4;
+ cs->exception_index = -1;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* IRQ acknowledgment */
+ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
+ env->qemu_irq_ack(env, env->irq_manager, intno);
+ }
+#endif
+}
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
new file mode 100644
index 000000000..793e57c53
--- /dev/null
+++ b/target/sparc/int64_helper.c
@@ -0,0 +1,304 @@
+/*
+ * Sparc64 interrupt helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+#include "trace.h"
+
+#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+ [TT_TFAULT] = "Instruction Access Fault",
+ [TT_TMISS] = "Instruction Access MMU Miss",
+ [TT_CODE_ACCESS] = "Instruction Access Error",
+ [TT_ILL_INSN] = "Illegal Instruction",
+ [TT_PRIV_INSN] = "Privileged Instruction",
+ [TT_NFPU_INSN] = "FPU Disabled",
+ [TT_FP_EXCP] = "FPU Exception",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_CLRWIN] = "Clean Windows",
+ [TT_DIV_ZERO] = "Division By Zero",
+ [TT_DFAULT] = "Data Access Fault",
+ [TT_DMISS] = "Data Access MMU Miss",
+ [TT_DATA_ACCESS] = "Data Access Error",
+ [TT_DPROT] = "Data Protection Error",
+ [TT_UNALIGNED] = "Unaligned Memory Access",
+ [TT_PRIV_ACT] = "Privileged Action",
+ [TT_EXTINT | 0x1] = "External Interrupt 1",
+ [TT_EXTINT | 0x2] = "External Interrupt 2",
+ [TT_EXTINT | 0x3] = "External Interrupt 3",
+ [TT_EXTINT | 0x4] = "External Interrupt 4",
+ [TT_EXTINT | 0x5] = "External Interrupt 5",
+ [TT_EXTINT | 0x6] = "External Interrupt 6",
+ [TT_EXTINT | 0x7] = "External Interrupt 7",
+ [TT_EXTINT | 0x8] = "External Interrupt 8",
+ [TT_EXTINT | 0x9] = "External Interrupt 9",
+ [TT_EXTINT | 0xa] = "External Interrupt 10",
+ [TT_EXTINT | 0xb] = "External Interrupt 11",
+ [TT_EXTINT | 0xc] = "External Interrupt 12",
+ [TT_EXTINT | 0xd] = "External Interrupt 13",
+ [TT_EXTINT | 0xe] = "External Interrupt 14",
+ [TT_EXTINT | 0xf] = "External Interrupt 15",
+};
+#endif
+
+void cpu_check_irqs(CPUSPARCState *env)
+{
+ CPUState *cs;
+ uint32_t pil = env->pil_in |
+ (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER));
+
+ /* We should be holding the BQL before we mess with IRQs */
+ g_assert(qemu_mutex_iothread_locked());
+
+ /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */
+ if (env->ivec_status & 0x20) {
+ return;
+ }
+ cs = env_cpu(env);
+ /*
+ * check if TM or SM in SOFTINT are set
+ * setting these also causes interrupt 14
+ */
+ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) {
+ pil |= 1 << 14;
+ }
+
+ /*
+ * The bit corresponding to psrpil is (1<< psrpil),
+ * the next bit is (2 << psrpil).
+ */
+ if (pil < (2 << env->psrpil)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ trace_sparc64_cpu_check_irqs_reset_irq(env->interrupt_index);
+ env->interrupt_index = 0;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ return;
+ }
+
+ if (cpu_interrupts_enabled(env)) {
+
+ unsigned int i;
+
+ for (i = 15; i > env->psrpil; i--) {
+ if (pil & (1 << i)) {
+ int old_interrupt = env->interrupt_index;
+ int new_interrupt = TT_EXTINT | i;
+
+ if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt
+ && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) {
+ trace_sparc64_cpu_check_irqs_noset_irq(env->tl,
+ cpu_tsptr(env)->tt,
+ new_interrupt);
+ } else if (old_interrupt != new_interrupt) {
+ env->interrupt_index = new_interrupt;
+ trace_sparc64_cpu_check_irqs_set_irq(i, old_interrupt,
+ new_interrupt);
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ break;
+ }
+ }
+ } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ trace_sparc64_cpu_check_irqs_disabled(pil, env->pil_in, env->softint,
+ env->interrupt_index);
+ env->interrupt_index = 0;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+void sparc_cpu_do_interrupt(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int intno = cs->exception_index;
+ trap_state *tsptr;
+
+ /* Compute PSR before exposing state. */
+ if (env->cc_op != CC_OP_FLAGS) {
+ cpu_get_psr(env);
+ }
+
+#ifdef DEBUG_PCALL
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name;
+
+ if (intno < 0 || intno >= 0x1ff) {
+ name = "Unknown";
+ } else if (intno >= 0x180) {
+ name = "Hyperprivileged Trap Instruction";
+ } else if (intno >= 0x100) {
+ name = "Trap Instruction";
+ } else if (intno >= 0xc0) {
+ name = "Window Fill";
+ } else if (intno >= 0x80) {
+ name = "Window Spill";
+ } else {
+ name = excp_names[intno];
+ if (!name) {
+ name = "Unknown";
+ }
+ }
+
+ qemu_log("%6d: %s (v=%04x)\n", count, name, intno);
+ log_cpu_state(cs, 0);
+#if 0
+ {
+ int i;
+ uint8_t *ptr;
+
+ qemu_log(" code=");
+ ptr = (uint8_t *)env->pc;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+ if (env->tl >= env->maxtl) {
+ cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
+ " Error state", cs->exception_index, env->tl, env->maxtl);
+ return;
+ }
+#endif
+ if (env->tl < env->maxtl - 1) {
+ env->tl++;
+ } else {
+ env->pstate |= PS_RED;
+ if (env->tl < env->maxtl) {
+ env->tl++;
+ }
+ }
+ tsptr = cpu_tsptr(env);
+
+ tsptr->tstate = sparc64_tstate(env);
+ tsptr->tpc = env->pc;
+ tsptr->tnpc = env->npc;
+ tsptr->tt = intno;
+
+ if (cpu_has_hypervisor(env)) {
+ env->htstate[env->tl] = env->hpstate;
+ /* XXX OpenSPARC T1 - UltraSPARC T3 have MAXPTL=2
+ but this may change in the future */
+ if (env->tl > 2) {
+ env->hpstate |= HS_PRIV;
+ }
+ }
+
+ if (env->def.features & CPU_FEATURE_GL) {
+ cpu_gl_switch_gregs(env, env->gl + 1);
+ env->gl++;
+ }
+
+ switch (intno) {
+ case TT_IVEC:
+ if (!cpu_has_hypervisor(env)) {
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG);
+ }
+ break;
+ case TT_TFAULT:
+ case TT_DFAULT:
+ case TT_TMISS ... TT_TMISS + 3:
+ case TT_DMISS ... TT_DMISS + 3:
+ case TT_DPROT ... TT_DPROT + 3:
+ if (cpu_has_hypervisor(env)) {
+ env->hpstate |= HS_PRIV;
+ env->pstate = PS_PEF | PS_PRIV;
+ } else {
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG);
+ }
+ break;
+ case TT_INSN_REAL_TRANSLATION_MISS ... TT_DATA_REAL_TRANSLATION_MISS:
+ case TT_HTRAP ... TT_HTRAP + 127:
+ env->hpstate |= HS_PRIV;
+ break;
+ default:
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG);
+ break;
+ }
+
+ if (intno == TT_CLRWIN) {
+ cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
+ } else if ((intno & 0x1c0) == TT_SPILL) {
+ cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
+ } else if ((intno & 0x1c0) == TT_FILL) {
+ cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
+ }
+
+ if (cpu_hypervisor_mode(env)) {
+ env->pc = (env->htba & ~0x3fffULL) | (intno << 5);
+ } else {
+ env->pc = env->tbr & ~0x7fffULL;
+ env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
+ }
+ env->npc = env->pc + 4;
+ cs->exception_index = -1;
+}
+
+trap_state *cpu_tsptr(CPUSPARCState* env)
+{
+ return &env->ts[env->tl & MAXTL_MASK];
+}
+
+static bool do_modify_softint(CPUSPARCState *env, uint32_t value)
+{
+ if (env->softint != value) {
+ env->softint = value;
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ qemu_mutex_lock_iothread();
+ cpu_check_irqs(env);
+ qemu_mutex_unlock_iothread();
+ }
+#endif
+ return true;
+ }
+ return false;
+}
+
+void helper_set_softint(CPUSPARCState *env, uint64_t value)
+{
+ if (do_modify_softint(env, env->softint | (uint32_t)value)) {
+ trace_int_helper_set_softint(env->softint);
+ }
+}
+
+void helper_clear_softint(CPUSPARCState *env, uint64_t value)
+{
+ if (do_modify_softint(env, env->softint & (uint32_t)~value)) {
+ trace_int_helper_clear_softint(env->softint);
+ }
+}
+
+void helper_write_softint(CPUSPARCState *env, uint64_t value)
+{
+ if (do_modify_softint(env, (uint32_t)value)) {
+ trace_int_helper_write_softint(env->softint);
+ }
+}
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
new file mode 100644
index 000000000..a3e1cf9b6
--- /dev/null
+++ b/target/sparc/ldst_helper.c
@@ -0,0 +1,1955 @@
+/*
+ * Helpers for loads and stores
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg/tcg.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "asi.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_MXCC
+//#define DEBUG_UNASSIGNED
+//#define DEBUG_ASI
+//#define DEBUG_CACHE_CONTROL
+
+#ifdef DEBUG_MMU
+#define DPRINTF_MMU(fmt, ...) \
+ do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MMU(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_MXCC
+#define DPRINTF_MXCC(fmt, ...) \
+ do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MXCC(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_ASI
+#define DPRINTF_ASI(fmt, ...) \
+ do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
+#endif
+
+#ifdef DEBUG_CACHE_CONTROL
+#define DPRINTF_CACHE_CONTROL(fmt, ...) \
+ do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
+#else
+#define AM_CHECK(env1) (1)
+#endif
+#endif
+
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+/* Calculates TSB pointer value for fault page size
+ * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers
+ * UA2005 holds the page size configuration in mmu_ctx registers */
+static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env,
+ const SparcV9MMU *mmu, const int idx)
+{
+ uint64_t tsb_register;
+ int page_size;
+ if (cpu_has_hypervisor(env)) {
+ int tsb_index = 0;
+ int ctx = mmu->tag_access & 0x1fffULL;
+ uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0];
+ tsb_index = idx;
+ tsb_index |= ctx ? 2 : 0;
+ page_size = idx ? ctx_register >> 8 : ctx_register;
+ page_size &= 7;
+ tsb_register = mmu->sun4v_tsb_pointers[tsb_index];
+ } else {
+ page_size = idx;
+ tsb_register = mmu->tsb;
+ }
+ int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
+ int tsb_size = tsb_register & 0xf;
+
+ uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size;
+
+ /* move va bits to correct position,
+ * the context bits will be masked out later */
+ uint64_t va = mmu->tag_access >> (3 * page_size + 9);
+
+ /* calculate tsb_base mask and adjust va if split is in use */
+ if (tsb_split) {
+ if (idx == 0) {
+ va &= ~(1ULL << (13 + tsb_size));
+ } else {
+ va |= (1ULL << (13 + tsb_size));
+ }
+ tsb_base_mask <<= 1;
+ }
+
+ return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
+}
+
+/* Calculates tag target register value by reordering bits
+ in tag access register */
+static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
+{
+ return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
+}
+
+static void replace_tlb_entry(SparcTLBEntry *tlb,
+ uint64_t tlb_tag, uint64_t tlb_tte,
+ CPUSPARCState *env)
+{
+ target_ulong mask, size, va, offset;
+
+ /* flush page range if translation is valid */
+ if (TTE_IS_VALID(tlb->tte)) {
+ CPUState *cs = env_cpu(env);
+
+ size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
+ mask = 1ULL + ~size;
+
+ va = tlb->tag & mask;
+
+ for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, va + offset);
+ }
+ }
+
+ tlb->tag = tlb_tag;
+ tlb->tte = tlb_tte;
+}
+
+static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
+ const char *strmmu, CPUSPARCState *env1)
+{
+ unsigned int i;
+ target_ulong mask;
+ uint64_t context;
+
+ int is_demap_context = (demap_addr >> 6) & 1;
+
+ /* demap context */
+ switch ((demap_addr >> 4) & 3) {
+ case 0: /* primary */
+ context = env1->dmmu.mmu_primary_context;
+ break;
+ case 1: /* secondary */
+ context = env1->dmmu.mmu_secondary_context;
+ break;
+ case 2: /* nucleus */
+ context = 0;
+ break;
+ case 3: /* reserved */
+ default:
+ return;
+ }
+
+ for (i = 0; i < 64; i++) {
+ if (TTE_IS_VALID(tlb[i].tte)) {
+
+ if (is_demap_context) {
+ /* will remove non-global entries matching context value */
+ if (TTE_IS_GLOBAL(tlb[i].tte) ||
+ !tlb_compare_context(&tlb[i], context)) {
+ continue;
+ }
+ } else {
+ /* demap page
+ will remove any entry matching VA */
+ mask = 0xffffffffffffe000ULL;
+ mask <<= 3 * ((tlb[i].tte >> 61) & 3);
+
+ if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
+ continue;
+ }
+
+ /* entry should be global or matching context value */
+ if (!TTE_IS_GLOBAL(tlb[i].tte) &&
+ !tlb_compare_context(&tlb[i], context)) {
+ continue;
+ }
+ }
+
+ replace_tlb_entry(&tlb[i], 0, 0, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
+ dump_mmu(env1);
+#endif
+ }
+ }
+}
+
+static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
+ uint64_t sun4v_tte)
+{
+ uint64_t sun4u_tte;
+ if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
+ /* is already in the sun4u format */
+ return sun4v_tte;
+ }
+ sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
+ sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
+ TTE_SIDEEFFECT_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
+ return sun4u_tte;
+}
+
+static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
+ uint64_t tlb_tag, uint64_t tlb_tte,
+ const char *strmmu, CPUSPARCState *env1,
+ uint64_t addr)
+{
+ unsigned int i, replace_used;
+
+ tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
+ if (cpu_has_hypervisor(env1)) {
+ uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
+ uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
+ uint32_t new_ctx = tlb_tag & 0x1fffU;
+ for (i = 0; i < 64; i++) {
+ uint32_t ctx = tlb[i].tag & 0x1fffU;
+ /* check if new mapping overlaps an existing one */
+ if (new_ctx == ctx) {
+ uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
+ uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
+ if (new_vaddr == vaddr
+ || (new_vaddr < vaddr + size
+ && vaddr < new_vaddr + new_size)) {
+ DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
+ new_vaddr);
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+ return;
+ }
+ }
+
+ }
+ }
+ /* Try replacing invalid entry */
+ for (i = 0; i < 64; i++) {
+ if (!TTE_IS_VALID(tlb[i].tte)) {
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
+ dump_mmu(env1);
+#endif
+ return;
+ }
+ }
+
+ /* All entries are valid, try replacing unlocked entry */
+
+ for (replace_used = 0; replace_used < 2; ++replace_used) {
+
+ /* Used entries are not replaced on first pass */
+
+ for (i = 0; i < 64; i++) {
+ if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
+
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
+ strmmu, (replace_used ? "used" : "unused"), i);
+ dump_mmu(env1);
+#endif
+ return;
+ }
+ }
+
+ /* Now reset used bit and search for unused entries again */
+
+ for (i = 0; i < 64; i++) {
+ TTE_SET_UNUSED(tlb[i].tte);
+ }
+ }
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replacement: no free entries available, "
+ "replacing the last one\n", strmmu);
+#endif
+ /* corner case: the last entry is replaced anyway */
+ replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1);
+}
+
+#endif
+
+#ifdef TARGET_SPARC64
+/* returns true if access using this ASI is to have address translated by MMU
+ otherwise access is to raw physical address */
+/* TODO: check sparc32 bits */
+static inline int is_translating_asi(int asi)
+{
+ /* Ultrasparc IIi translating asi
+ - note this list is defined by cpu implementation
+ */
+ switch (asi) {
+ case 0x04 ... 0x11:
+ case 0x16 ... 0x19:
+ case 0x1E ... 0x1F:
+ case 0x24 ... 0x2C:
+ case 0x70 ... 0x73:
+ case 0x78 ... 0x79:
+ case 0x80 ... 0xFF:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
+{
+ if (AM_CHECK(env1)) {
+ addr &= 0xffffffffULL;
+ }
+ return addr;
+}
+
+static inline target_ulong asi_address_mask(CPUSPARCState *env,
+ int asi, target_ulong addr)
+{
+ if (is_translating_asi(asi)) {
+ addr = address_mask(env, addr);
+ }
+ return addr;
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
+{
+ /* ASIs >= 0x80 are user mode.
+ * ASIs >= 0x30 are hyper mode (or super if hyper is not available).
+ * ASIs <= 0x2f are super mode.
+ */
+ if (asi < 0x80
+ && !cpu_hypervisor_mode(env)
+ && (!cpu_supervisor_mode(env)
+ || (asi >= 0x30 && cpu_has_hypervisor(env)))) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, ra);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
+#endif
+
+static void do_check_align(CPUSPARCState *env, target_ulong addr,
+ uint32_t align, uintptr_t ra)
+{
+ if (addr & align) {
+ cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
+ }
+}
+
+void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
+{
+ do_check_align(env, addr, align, GETPC());
+}
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
+ defined(DEBUG_MXCC)
+static void dump_mxcc(CPUSPARCState *env)
+{
+ printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n",
+ env->mxccdata[0], env->mxccdata[1],
+ env->mxccdata[2], env->mxccdata[3]);
+ printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n"
+ " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n",
+ env->mxccregs[0], env->mxccregs[1],
+ env->mxccregs[2], env->mxccregs[3],
+ env->mxccregs[4], env->mxccregs[5],
+ env->mxccregs[6], env->mxccregs[7]);
+}
+#endif
+
+#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
+ && defined(DEBUG_ASI)
+static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
+ uint64_t r1)
+{
+ switch (size) {
+ case 1:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xff);
+ break;
+ case 2:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xffff);
+ break;
+ case 4:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xffffffff);
+ break;
+ case 8:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
+ addr, asi, r1);
+ break;
+ }
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+#ifndef TARGET_SPARC64
+static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size, uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int fault_type;
+
+#ifdef DEBUG_UNASSIGNED
+ if (is_asi) {
+ printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ " asi 0x%02x from " TARGET_FMT_lx "\n",
+ is_exec ? "exec" : is_write ? "write" : "read", size,
+ size == 1 ? "" : "s", addr, is_asi, env->pc);
+ } else {
+ printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ " from " TARGET_FMT_lx "\n",
+ is_exec ? "exec" : is_write ? "write" : "read", size,
+ size == 1 ? "" : "s", addr, env->pc);
+ }
+#endif
+ /* Don't overwrite translation and access faults */
+ fault_type = (env->mmuregs[3] & 0x1c) >> 2;
+ if ((fault_type > 4) || (fault_type == 0)) {
+ env->mmuregs[3] = 0; /* Fault status register */
+ if (is_asi) {
+ env->mmuregs[3] |= 1 << 16;
+ }
+ if (env->psrs) {
+ env->mmuregs[3] |= 1 << 5;
+ }
+ if (is_exec) {
+ env->mmuregs[3] |= 1 << 6;
+ }
+ if (is_write) {
+ env->mmuregs[3] |= 1 << 7;
+ }
+ env->mmuregs[3] |= (5 << 2) | 2;
+ /* SuperSPARC will never place instruction fault addresses in the FAR */
+ if (!is_exec) {
+ env->mmuregs[4] = addr; /* Fault address register */
+ }
+ }
+ /* overflow (same type fault was not read before another fault) */
+ if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
+ env->mmuregs[3] |= 1;
+ }
+
+ if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
+ int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
+ cpu_raise_exception_ra(env, tt, retaddr);
+ }
+
+ /*
+ * flush neverland mappings created during no-fault mode,
+ * so the sequential MMU faults report proper fault types
+ */
+ if (env->mmuregs[0] & MMU_NF) {
+ tlb_flush(cs);
+ }
+}
+#else
+static void sparc_raise_mmu_fault(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size, uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+
+ if (is_exec) { /* XXX has_hypervisor */
+ if (env->lsu & (IMMU_E)) {
+ cpu_raise_exception_ra(env, TT_CODE_ACCESS, retaddr);
+ } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
+ cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, retaddr);
+ }
+ } else {
+ if (env->lsu & (DMMU_E)) {
+ cpu_raise_exception_ra(env, TT_DATA_ACCESS, retaddr);
+ } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
+ cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, retaddr);
+ }
+ }
+}
+#endif
+#endif
+
+#ifndef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+
+
+/* Leon3 cache control */
+
+static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
+ uint64_t val, int size)
+{
+ DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
+ addr, val, size);
+
+ if (size != 4) {
+ DPRINTF_CACHE_CONTROL("32bits only\n");
+ return;
+ }
+
+ switch (addr) {
+ case 0x00: /* Cache control */
+
+ /* These values must always be read as zeros */
+ val &= ~CACHE_CTRL_FD;
+ val &= ~CACHE_CTRL_FI;
+ val &= ~CACHE_CTRL_IB;
+ val &= ~CACHE_CTRL_IP;
+ val &= ~CACHE_CTRL_DP;
+
+ env->cache_control = val;
+ break;
+ case 0x04: /* Instruction cache configuration */
+ case 0x08: /* Data cache configuration */
+ /* Read Only */
+ break;
+ default:
+ DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
+ break;
+ };
+}
+
+static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
+ int size)
+{
+ uint64_t ret = 0;
+
+ if (size != 4) {
+ DPRINTF_CACHE_CONTROL("32bits only\n");
+ return 0;
+ }
+
+ switch (addr) {
+ case 0x00: /* Cache control */
+ ret = env->cache_control;
+ break;
+
+ /* Configuration registers are read and only always keep those
+ predefined values */
+
+ case 0x04: /* Instruction cache configuration */
+ ret = 0x10220000;
+ break;
+ case 0x08: /* Data cache configuration */
+ ret = 0x18220000;
+ break;
+ default:
+ DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
+ break;
+ };
+ DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
+ addr, ret, size);
+ return ret;
+}
+
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ int sign = memop & MO_SIGN;
+ CPUState *cs = env_cpu(env);
+ uint64_t ret = 0;
+#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
+ uint32_t last_addr = addr;
+#endif
+
+ do_check_align(env, addr, size - 1, GETPC());
+ switch (asi) {
+ case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
+ /* case ASI_LEON_CACHEREGS: Leon3 cache control */
+ switch (addr) {
+ case 0x00: /* Leon3 Cache Control */
+ case 0x08: /* Leon3 Instruction Cache config */
+ case 0x0C: /* Leon3 Date Cache config */
+ if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
+ ret = leon3_cache_control_ld(env, addr, size);
+ }
+ break;
+ case 0x01c00a00: /* MXCC control register */
+ if (size == 8) {
+ ret = env->mxccregs[3];
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00a04: /* MXCC control register */
+ if (size == 4) {
+ ret = env->mxccregs[3];
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00c00: /* Module reset register */
+ if (size == 8) {
+ ret = env->mxccregs[5];
+ /* should we do something here? */
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00f00: /* MBus port address register */
+ if (size == 8) {
+ ret = env->mxccregs[7];
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented address, size: %d\n", addr,
+ size);
+ break;
+ }
+ DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
+ "addr = %08x -> ret = %" PRIx64 ","
+ "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
+#ifdef DEBUG_MXCC
+ dump_mxcc(env);
+#endif
+ break;
+ case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
+ case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
+ {
+ int mmulev;
+
+ mmulev = (addr >> 8) & 15;
+ if (mmulev > 4) {
+ ret = 0;
+ } else {
+ ret = mmu_probe(env, addr, mmulev);
+ }
+ DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
+ addr, mmulev, ret);
+ }
+ break;
+ case ASI_M_MMUREGS: /* SuperSparc MMU regs */
+ case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
+ {
+ int reg = (addr >> 8) & 0x1f;
+
+ ret = env->mmuregs[reg];
+ if (reg == 3) { /* Fault status cleared on read */
+ env->mmuregs[3] = 0;
+ } else if (reg == 0x13) { /* Fault status read */
+ ret = env->mmuregs[3];
+ } else if (reg == 0x14) { /* Fault address read */
+ ret = env->mmuregs[4];
+ }
+ DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
+ }
+ break;
+ case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
+ case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
+ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
+ break;
+ case ASI_KERNELTXT: /* Supervisor code access */
+ switch (size) {
+ case 1:
+ ret = cpu_ldub_code(env, addr);
+ break;
+ case 2:
+ ret = cpu_lduw_code(env, addr);
+ break;
+ default:
+ case 4:
+ ret = cpu_ldl_code(env, addr);
+ break;
+ case 8:
+ ret = cpu_ldq_code(env, addr);
+ break;
+ }
+ break;
+ case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
+ case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
+ case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
+ case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
+ break;
+ case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+ {
+ MemTxResult result;
+ hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32);
+
+ switch (size) {
+ case 1:
+ ret = address_space_ldub(cs->as, access_addr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ case 2:
+ ret = address_space_lduw(cs->as, access_addr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ default:
+ case 4:
+ ret = address_space_ldl(cs->as, access_addr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ case 8:
+ ret = address_space_ldq(cs->as, access_addr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ }
+
+ if (result != MEMTX_OK) {
+ sparc_raise_mmu_fault(cs, access_addr, false, false, false,
+ size, GETPC());
+ }
+ break;
+ }
+ case 0x30: /* Turbosparc secondary cache diagnostic */
+ case 0x31: /* Turbosparc RAM snoop */
+ case 0x32: /* Turbosparc page table descriptor diagnostic */
+ case 0x39: /* data cache diagnostic register */
+ ret = 0;
+ break;
+ case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
+ {
+ int reg = (addr >> 8) & 3;
+
+ switch (reg) {
+ case 0: /* Breakpoint Value (Addr) */
+ ret = env->mmubpregs[reg];
+ break;
+ case 1: /* Breakpoint Mask */
+ ret = env->mmubpregs[reg];
+ break;
+ case 2: /* Breakpoint Control */
+ ret = env->mmubpregs[reg];
+ break;
+ case 3: /* Breakpoint Status */
+ ret = env->mmubpregs[reg];
+ env->mmubpregs[reg] = 0ULL;
+ break;
+ }
+ DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
+ ret);
+ }
+ break;
+ case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
+ ret = env->mmubpctrv;
+ break;
+ case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
+ ret = env->mmubpctrc;
+ break;
+ case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
+ ret = env->mmubpctrs;
+ break;
+ case 0x4c: /* SuperSPARC MMU Breakpoint Action */
+ ret = env->mmubpaction;
+ break;
+ case ASI_USERTXT: /* User code access, XXX */
+ default:
+ sparc_raise_mmu_fault(cs, addr, false, false, asi, size, GETPC());
+ ret = 0;
+ break;
+
+ case ASI_USERDATA: /* User data access */
+ case ASI_KERNELDATA: /* Supervisor data access */
+ case ASI_P: /* Implicit primary context data access (v9 only?) */
+ case ASI_M_BYPASS: /* MMU passthrough */
+ case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+ }
+ if (sign) {
+ switch (size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ CPUState *cs = env_cpu(env);
+
+ do_check_align(env, addr, size - 1, GETPC());
+ switch (asi) {
+ case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
+ /* case ASI_LEON_CACHEREGS: Leon3 cache control */
+ switch (addr) {
+ case 0x00: /* Leon3 Cache Control */
+ case 0x08: /* Leon3 Instruction Cache config */
+ case 0x0C: /* Leon3 Date Cache config */
+ if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
+ leon3_cache_control_st(env, addr, val, size);
+ }
+ break;
+
+ case 0x01c00000: /* MXCC stream data register 0 */
+ if (size == 8) {
+ env->mxccdata[0] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00008: /* MXCC stream data register 1 */
+ if (size == 8) {
+ env->mxccdata[1] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00010: /* MXCC stream data register 2 */
+ if (size == 8) {
+ env->mxccdata[2] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00018: /* MXCC stream data register 3 */
+ if (size == 8) {
+ env->mxccdata[3] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00100: /* MXCC stream source */
+ {
+ int i;
+
+ if (size == 8) {
+ env->mxccregs[0] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+
+ for (i = 0; i < 4; i++) {
+ MemTxResult result;
+ hwaddr access_addr = (env->mxccregs[0] & 0xffffffffULL) + 8 * i;
+
+ env->mxccdata[i] = address_space_ldq(cs->as,
+ access_addr,
+ MEMTXATTRS_UNSPECIFIED,
+ &result);
+ if (result != MEMTX_OK) {
+ /* TODO: investigate whether this is the right behaviour */
+ sparc_raise_mmu_fault(cs, access_addr, false, false,
+ false, size, GETPC());
+ }
+ }
+ break;
+ }
+ case 0x01c00200: /* MXCC stream destination */
+ {
+ int i;
+
+ if (size == 8) {
+ env->mxccregs[1] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+
+ for (i = 0; i < 4; i++) {
+ MemTxResult result;
+ hwaddr access_addr = (env->mxccregs[1] & 0xffffffffULL) + 8 * i;
+
+ address_space_stq(cs->as, access_addr, env->mxccdata[i],
+ MEMTXATTRS_UNSPECIFIED, &result);
+
+ if (result != MEMTX_OK) {
+ /* TODO: investigate whether this is the right behaviour */
+ sparc_raise_mmu_fault(cs, access_addr, true, false,
+ false, size, GETPC());
+ }
+ }
+ break;
+ }
+ case 0x01c00a00: /* MXCC control register */
+ if (size == 8) {
+ env->mxccregs[3] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00a04: /* MXCC control register */
+ if (size == 4) {
+ env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
+ | val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00e00: /* MXCC error register */
+ /* writing a 1 bit clears the error */
+ if (size == 8) {
+ env->mxccregs[6] &= ~val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00f00: /* MBus port address register */
+ if (size == 8) {
+ env->mxccregs[7] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented address, size: %d\n", addr,
+ size);
+ break;
+ }
+ DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
+ asi, size, addr, val);
+#ifdef DEBUG_MXCC
+ dump_mxcc(env);
+#endif
+ break;
+ case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
+ case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
+ {
+ int mmulev;
+
+ mmulev = (addr >> 8) & 15;
+ DPRINTF_MMU("mmu flush level %d\n", mmulev);
+ switch (mmulev) {
+ case 0: /* flush page */
+ tlb_flush_page(cs, addr & 0xfffff000);
+ break;
+ case 1: /* flush segment (256k) */
+ case 2: /* flush region (16M) */
+ case 3: /* flush context (4G) */
+ case 4: /* flush entire */
+ tlb_flush(cs);
+ break;
+ default:
+ break;
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(env);
+#endif
+ }
+ break;
+ case ASI_M_MMUREGS: /* write MMU regs */
+ case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
+ {
+ int reg = (addr >> 8) & 0x1f;
+ uint32_t oldreg;
+
+ oldreg = env->mmuregs[reg];
+ switch (reg) {
+ case 0: /* Control Register */
+ env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
+ (val & 0x00ffffff);
+ /* Mappings generated during no-fault mode
+ are invalid in normal mode. */
+ if ((oldreg ^ env->mmuregs[reg])
+ & (MMU_NF | env->def.mmu_bm)) {
+ tlb_flush(cs);
+ }
+ break;
+ case 1: /* Context Table Pointer Register */
+ env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
+ break;
+ case 2: /* Context Register */
+ env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
+ if (oldreg != env->mmuregs[reg]) {
+ /* we flush when the MMU context changes because
+ QEMU has no MMU context support */
+ tlb_flush(cs);
+ }
+ break;
+ case 3: /* Synchronous Fault Status Register with Clear */
+ case 4: /* Synchronous Fault Address Register */
+ break;
+ case 0x10: /* TLB Replacement Control Register */
+ env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
+ break;
+ case 0x13: /* Synchronous Fault Status Register with Read
+ and Clear */
+ env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
+ break;
+ case 0x14: /* Synchronous Fault Address Register */
+ env->mmuregs[4] = val;
+ break;
+ default:
+ env->mmuregs[reg] = val;
+ break;
+ }
+ if (oldreg != env->mmuregs[reg]) {
+ DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
+ reg, oldreg, env->mmuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(env);
+#endif
+ }
+ break;
+ case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
+ case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
+ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
+ break;
+ case ASI_M_TXTC_TAG: /* I-cache tag */
+ case ASI_M_TXTC_DATA: /* I-cache data */
+ case ASI_M_DATAC_TAG: /* D-cache tag */
+ case ASI_M_DATAC_DATA: /* D-cache data */
+ case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
+ case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
+ case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
+ case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
+ case ASI_M_FLUSH_USER: /* I/D-cache flush user */
+ break;
+ case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+ {
+ MemTxResult result;
+ hwaddr access_addr = (hwaddr)addr | ((hwaddr)(asi & 0xf) << 32);
+
+ switch (size) {
+ case 1:
+ address_space_stb(cs->as, access_addr, val,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ case 2:
+ address_space_stw(cs->as, access_addr, val,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ case 4:
+ default:
+ address_space_stl(cs->as, access_addr, val,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ case 8:
+ address_space_stq(cs->as, access_addr, val,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ break;
+ }
+ if (result != MEMTX_OK) {
+ sparc_raise_mmu_fault(cs, access_addr, true, false, false,
+ size, GETPC());
+ }
+ }
+ break;
+ case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
+ case 0x31: /* store buffer data, Ross RT620 I-cache flush or
+ Turbosparc snoop RAM */
+ case 0x32: /* store buffer control or Turbosparc page table
+ descriptor diagnostic */
+ case 0x36: /* I-cache flash clear */
+ case 0x37: /* D-cache flash clear */
+ break;
+ case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
+ {
+ int reg = (addr >> 8) & 3;
+
+ switch (reg) {
+ case 0: /* Breakpoint Value (Addr) */
+ env->mmubpregs[reg] = (val & 0xfffffffffULL);
+ break;
+ case 1: /* Breakpoint Mask */
+ env->mmubpregs[reg] = (val & 0xfffffffffULL);
+ break;
+ case 2: /* Breakpoint Control */
+ env->mmubpregs[reg] = (val & 0x7fULL);
+ break;
+ case 3: /* Breakpoint Status */
+ env->mmubpregs[reg] = (val & 0xfULL);
+ break;
+ }
+ DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
+ env->mmuregs[reg]);
+ }
+ break;
+ case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
+ env->mmubpctrv = val & 0xffffffff;
+ break;
+ case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
+ env->mmubpctrc = val & 0x3;
+ break;
+ case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
+ env->mmubpctrs = val & 0x3;
+ break;
+ case 0x4c: /* SuperSPARC MMU Breakpoint Action */
+ env->mmubpaction = val & 0x1fff;
+ break;
+ case ASI_USERTXT: /* User code access, XXX */
+ case ASI_KERNELTXT: /* Supervisor code access, XXX */
+ default:
+ sparc_raise_mmu_fault(cs, addr, true, false, asi, size, GETPC());
+ break;
+
+ case ASI_USERDATA: /* User data access */
+ case ASI_KERNELDATA: /* Supervisor data access */
+ case ASI_P:
+ case ASI_M_BYPASS: /* MMU passthrough */
+ case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+ case ASI_M_BCOPY: /* Block copy, sta access */
+ case ASI_M_BFILL: /* Block fill, stda access */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+ }
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+}
+
+#endif /* CONFIG_USER_ONLY */
+#else /* TARGET_SPARC64 */
+
+#ifdef CONFIG_USER_ONLY
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ int sign = memop & MO_SIGN;
+ uint64_t ret = 0;
+
+ if (asi < 0x80) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+ }
+ do_check_align(env, addr, size - 1, GETPC());
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case ASI_PNF: /* Primary no-fault */
+ case ASI_PNFL: /* Primary no-fault LE */
+ case ASI_SNF: /* Secondary no-fault */
+ case ASI_SNFL: /* Secondary no-fault LE */
+ if (page_check_range(addr, size, PAGE_READ) == -1) {
+ ret = 0;
+ break;
+ }
+ switch (size) {
+ case 1:
+ ret = cpu_ldub_data(env, addr);
+ break;
+ case 2:
+ ret = cpu_lduw_data(env, addr);
+ break;
+ case 4:
+ ret = cpu_ldl_data(env, addr);
+ break;
+ case 8:
+ ret = cpu_ldq_data(env, addr);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ break;
+
+ case ASI_P: /* Primary */
+ case ASI_PL: /* Primary LE */
+ case ASI_S: /* Secondary */
+ case ASI_SL: /* Secondary LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ default:
+ cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+ }
+
+ /* Convert from little endian */
+ switch (asi) {
+ case ASI_PNFL: /* Primary no-fault LE */
+ case ASI_SNFL: /* Secondary no-fault LE */
+ switch (size) {
+ case 2:
+ ret = bswap16(ret);
+ break;
+ case 4:
+ ret = bswap32(ret);
+ break;
+ case 8:
+ ret = bswap64(ret);
+ break;
+ }
+ }
+
+ /* Convert to signed number */
+ if (sign) {
+ switch (size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read", addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+ if (asi < 0x80) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+ }
+ do_check_align(env, addr, size - 1, GETPC());
+
+ switch (asi) {
+ case ASI_P: /* Primary */
+ case ASI_PL: /* Primary LE */
+ case ASI_S: /* Secondary */
+ case ASI_SL: /* Secondary LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ case ASI_PNF: /* Primary no-fault, RO */
+ case ASI_SNF: /* Secondary no-fault, RO */
+ case ASI_PNFL: /* Primary no-fault LE, RO */
+ case ASI_SNFL: /* Secondary no-fault LE, RO */
+ default:
+ cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+ }
+}
+
+#else /* CONFIG_USER_ONLY */
+
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ int sign = memop & MO_SIGN;
+ CPUState *cs = env_cpu(env);
+ uint64_t ret = 0;
+#if defined(DEBUG_ASI)
+ target_ulong last_addr = addr;
+#endif
+
+ asi &= 0xff;
+
+ do_check_asi(env, asi, GETPC());
+ do_check_align(env, addr, size - 1, GETPC());
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case ASI_PNF:
+ case ASI_PNFL:
+ case ASI_SNF:
+ case ASI_SNFL:
+ {
+ MemOpIdx oi;
+ int idx = (env->pstate & PS_PRIV
+ ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
+ : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
+
+ if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ /* exception_index is set in get_physical_address_data. */
+ cpu_raise_exception_ra(env, cs->exception_index, GETPC());
+ }
+ oi = make_memop_idx(memop, idx);
+ switch (size) {
+ case 1:
+ ret = cpu_ldb_mmu(env, addr, oi, GETPC());
+ break;
+ case 2:
+ if (asi & 8) {
+ ret = cpu_ldw_le_mmu(env, addr, oi, GETPC());
+ } else {
+ ret = cpu_ldw_be_mmu(env, addr, oi, GETPC());
+ }
+ break;
+ case 4:
+ if (asi & 8) {
+ ret = cpu_ldl_le_mmu(env, addr, oi, GETPC());
+ } else {
+ ret = cpu_ldl_be_mmu(env, addr, oi, GETPC());
+ }
+ break;
+ case 8:
+ if (asi & 8) {
+ ret = cpu_ldq_le_mmu(env, addr, oi, GETPC());
+ } else {
+ ret = cpu_ldq_be_mmu(env, addr, oi, GETPC());
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ break;
+
+ case ASI_AIUP: /* As if user primary */
+ case ASI_AIUS: /* As if user secondary */
+ case ASI_AIUPL: /* As if user primary LE */
+ case ASI_AIUSL: /* As if user secondary LE */
+ case ASI_P: /* Primary */
+ case ASI_S: /* Secondary */
+ case ASI_PL: /* Primary LE */
+ case ASI_SL: /* Secondary LE */
+ case ASI_REAL: /* Bypass */
+ case ASI_REAL_IO: /* Bypass, non-cacheable */
+ case ASI_REAL_L: /* Bypass LE */
+ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+ case ASI_N: /* Nucleus */
+ case ASI_NL: /* Nucleus Little Endian (LE) */
+ case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
+ case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
+ case ASI_TWINX_AIUP: /* As if user primary, twinx */
+ case ASI_TWINX_AIUS: /* As if user secondary, twinx */
+ case ASI_TWINX_REAL: /* Real address, twinx */
+ case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
+ case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
+ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+ case ASI_TWINX_N: /* Nucleus, twinx */
+ case ASI_TWINX_NL: /* Nucleus, twinx, LE */
+ /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
+ case ASI_TWINX_P: /* Primary, twinx */
+ case ASI_TWINX_PL: /* Primary, twinx, LE */
+ case ASI_TWINX_S: /* Secondary, twinx */
+ case ASI_TWINX_SL: /* Secondary, twinx, LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ case ASI_UPA_CONFIG: /* UPA config */
+ /* XXX */
+ break;
+ case ASI_LSU_CONTROL: /* LSU */
+ ret = env->lsu;
+ break;
+ case ASI_IMMU: /* I-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+ switch (reg) {
+ case 0:
+ /* 0x00 I-TSB Tag Target register */
+ ret = ultrasparc_tag_target(env->immu.tag_access);
+ break;
+ case 3: /* SFSR */
+ ret = env->immu.sfsr;
+ break;
+ case 5: /* TSB access */
+ ret = env->immu.tsb;
+ break;
+ case 6:
+ /* 0x30 I-TSB Tag Access register */
+ ret = env->immu.tag_access;
+ break;
+ default:
+ sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
+ ret = 0;
+ }
+ break;
+ }
+ case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
+ {
+ /* env->immuregs[5] holds I-MMU TSB register value
+ env->immuregs[6] holds I-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env, &env->immu, 0);
+ break;
+ }
+ case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
+ {
+ /* env->immuregs[5] holds I-MMU TSB register value
+ env->immuregs[6] holds I-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env, &env->immu, 1);
+ break;
+ }
+ case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->itlb[reg].tte;
+ break;
+ }
+ case ASI_ITLB_TAG_READ: /* I-MMU tag read */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->itlb[reg].tag;
+ break;
+ }
+ case ASI_DMMU: /* D-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+ switch (reg) {
+ case 0:
+ /* 0x00 D-TSB Tag Target register */
+ ret = ultrasparc_tag_target(env->dmmu.tag_access);
+ break;
+ case 1: /* 0x08 Primary Context */
+ ret = env->dmmu.mmu_primary_context;
+ break;
+ case 2: /* 0x10 Secondary Context */
+ ret = env->dmmu.mmu_secondary_context;
+ break;
+ case 3: /* SFSR */
+ ret = env->dmmu.sfsr;
+ break;
+ case 4: /* 0x20 SFAR */
+ ret = env->dmmu.sfar;
+ break;
+ case 5: /* 0x28 TSB access */
+ ret = env->dmmu.tsb;
+ break;
+ case 6: /* 0x30 D-TSB Tag Access register */
+ ret = env->dmmu.tag_access;
+ break;
+ case 7:
+ ret = env->dmmu.virtual_watchpoint;
+ break;
+ case 8:
+ ret = env->dmmu.physical_watchpoint;
+ break;
+ default:
+ sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
+ ret = 0;
+ }
+ break;
+ }
+ case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
+ {
+ /* env->dmmuregs[5] holds D-MMU TSB register value
+ env->dmmuregs[6] holds D-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0);
+ break;
+ }
+ case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
+ {
+ /* env->dmmuregs[5] holds D-MMU TSB register value
+ env->dmmuregs[6] holds D-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1);
+ break;
+ }
+ case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->dtlb[reg].tte;
+ break;
+ }
+ case ASI_DTLB_TAG_READ: /* D-MMU tag read */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->dtlb[reg].tag;
+ break;
+ }
+ case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
+ break;
+ case ASI_INTR_RECEIVE: /* Interrupt data receive */
+ ret = env->ivec_status;
+ break;
+ case ASI_INTR_R: /* Incoming interrupt vector, RO */
+ {
+ int reg = (addr >> 4) & 0x3;
+ if (reg < 3) {
+ ret = env->ivec_data[reg];
+ }
+ break;
+ }
+ case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
+ if (unlikely((addr >= 0x20) && (addr < 0x30))) {
+ /* Hyperprivileged access only */
+ sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
+ }
+ /* fall through */
+ case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
+ {
+ unsigned int i = (addr >> 3) & 0x7;
+ ret = env->scratch[i];
+ break;
+ }
+ case ASI_MMU: /* UA2005 Context ID registers */
+ switch ((addr >> 3) & 0x3) {
+ case 1:
+ ret = env->dmmu.mmu_primary_context;
+ break;
+ case 2:
+ ret = env->dmmu.mmu_secondary_context;
+ break;
+ default:
+ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
+ }
+ break;
+ case ASI_DCACHE_DATA: /* D-cache data */
+ case ASI_DCACHE_TAG: /* D-cache tag access */
+ case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
+ case ASI_AFSR: /* E-cache asynchronous fault status */
+ case ASI_AFAR: /* E-cache asynchronous fault address */
+ case ASI_EC_TAG_DATA: /* E-cache tag data */
+ case ASI_IC_INSTR: /* I-cache instruction access */
+ case ASI_IC_TAG: /* I-cache tag access */
+ case ASI_IC_PRE_DECODE: /* I-cache predecode */
+ case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
+ case ASI_EC_W: /* E-cache tag */
+ case ASI_EC_R: /* E-cache tag */
+ break;
+ case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
+ case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
+ case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
+ case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
+ case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
+ case ASI_INTR_W: /* Interrupt vector, WO */
+ default:
+ sparc_raise_mmu_fault(cs, addr, false, false, 1, size, GETPC());
+ ret = 0;
+ break;
+ }
+
+ /* Convert to signed number */
+ if (sign) {
+ switch (size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ CPUState *cs = env_cpu(env);
+
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+
+ asi &= 0xff;
+
+ do_check_asi(env, asi, GETPC());
+ do_check_align(env, addr, size - 1, GETPC());
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case ASI_AIUP: /* As if user primary */
+ case ASI_AIUS: /* As if user secondary */
+ case ASI_AIUPL: /* As if user primary LE */
+ case ASI_AIUSL: /* As if user secondary LE */
+ case ASI_P: /* Primary */
+ case ASI_S: /* Secondary */
+ case ASI_PL: /* Primary LE */
+ case ASI_SL: /* Secondary LE */
+ case ASI_REAL: /* Bypass */
+ case ASI_REAL_IO: /* Bypass, non-cacheable */
+ case ASI_REAL_L: /* Bypass LE */
+ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+ case ASI_N: /* Nucleus */
+ case ASI_NL: /* Nucleus Little Endian (LE) */
+ case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
+ case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
+ case ASI_TWINX_AIUP: /* As if user primary, twinx */
+ case ASI_TWINX_AIUS: /* As if user secondary, twinx */
+ case ASI_TWINX_REAL: /* Real address, twinx */
+ case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
+ case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
+ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+ case ASI_TWINX_N: /* Nucleus, twinx */
+ case ASI_TWINX_NL: /* Nucleus, twinx, LE */
+ /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
+ case ASI_TWINX_P: /* Primary, twinx */
+ case ASI_TWINX_PL: /* Primary, twinx, LE */
+ case ASI_TWINX_S: /* Secondary, twinx */
+ case ASI_TWINX_SL: /* Secondary, twinx, LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+ /* these ASIs have different functions on UltraSPARC-IIIi
+ * and UA2005 CPUs. Use the explicit numbers to avoid confusion
+ */
+ case 0x31:
+ case 0x32:
+ case 0x39:
+ case 0x3a:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0
+ * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1
+ * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0
+ * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1
+ */
+ int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
+ env->dmmu.sun4v_tsb_pointers[idx] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case 0x33:
+ case 0x3b:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_DMMU_CTX_ZERO_CONFIG
+ * ASI_DMMU_CTX_NONZERO_CONFIG
+ */
+ env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case 0x35:
+ case 0x36:
+ case 0x3d:
+ case 0x3e:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0
+ * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1
+ * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0
+ * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1
+ */
+ int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
+ env->immu.sun4v_tsb_pointers[idx] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case 0x37:
+ case 0x3f:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_IMMU_CTX_ZERO_CONFIG
+ * ASI_IMMU_CTX_NONZERO_CONFIG
+ */
+ env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case ASI_UPA_CONFIG: /* UPA config */
+ /* XXX */
+ return;
+ case ASI_LSU_CONTROL: /* LSU */
+ env->lsu = val & (DMMU_E | IMMU_E);
+ return;
+ case ASI_IMMU: /* I-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+ uint64_t oldreg;
+
+ oldreg = env->immu.mmuregs[reg];
+ switch (reg) {
+ case 0: /* RO */
+ return;
+ case 1: /* Not in I-MMU */
+ case 2:
+ return;
+ case 3: /* SFSR */
+ if ((val & 1) == 0) {
+ val = 0; /* Clear SFSR */
+ }
+ env->immu.sfsr = val;
+ break;
+ case 4: /* RO */
+ return;
+ case 5: /* TSB access */
+ DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", env->immu.tsb, val);
+ env->immu.tsb = val;
+ break;
+ case 6: /* Tag access */
+ env->immu.tag_access = val;
+ break;
+ case 7:
+ case 8:
+ return;
+ default:
+ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
+ break;
+ }
+
+ if (oldreg != env->immu.mmuregs[reg]) {
+ DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(env);
+#endif
+ return;
+ }
+ case ASI_ITLB_DATA_IN: /* I-MMU data in */
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
+ val, "immu", env, addr);
+ }
+ return;
+ case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
+ {
+ /* TODO: auto demap */
+
+ unsigned int i = (addr >> 3) & 0x3f;
+
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
+ sun4v_tte_to_sun4u(env, addr, val), env);
+ }
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
+ dump_mmu(env);
+#endif
+ return;
+ }
+ case ASI_IMMU_DEMAP: /* I-MMU demap */
+ demap_tlb(env->itlb, addr, "immu", env);
+ return;
+ case ASI_DMMU: /* D-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+ uint64_t oldreg;
+
+ oldreg = env->dmmu.mmuregs[reg];
+ switch (reg) {
+ case 0: /* RO */
+ case 4:
+ return;
+ case 3: /* SFSR */
+ if ((val & 1) == 0) {
+ val = 0; /* Clear SFSR, Fault address */
+ env->dmmu.sfar = 0;
+ }
+ env->dmmu.sfsr = val;
+ break;
+ case 1: /* Primary context */
+ env->dmmu.mmu_primary_context = val;
+ /* can be optimized to only flush MMU_USER_IDX
+ and MMU_KERNEL_IDX entries */
+ tlb_flush(cs);
+ break;
+ case 2: /* Secondary context */
+ env->dmmu.mmu_secondary_context = val;
+ /* can be optimized to only flush MMU_USER_SECONDARY_IDX
+ and MMU_KERNEL_SECONDARY_IDX entries */
+ tlb_flush(cs);
+ break;
+ case 5: /* TSB access */
+ DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", env->dmmu.tsb, val);
+ env->dmmu.tsb = val;
+ break;
+ case 6: /* Tag access */
+ env->dmmu.tag_access = val;
+ break;
+ case 7: /* Virtual Watchpoint */
+ env->dmmu.virtual_watchpoint = val;
+ break;
+ case 8: /* Physical Watchpoint */
+ env->dmmu.physical_watchpoint = val;
+ break;
+ default:
+ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
+ break;
+ }
+
+ if (oldreg != env->dmmu.mmuregs[reg]) {
+ DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(env);
+#endif
+ return;
+ }
+ case ASI_DTLB_DATA_IN: /* D-MMU data in */
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
+ val, "dmmu", env, addr);
+ }
+ return;
+ case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
+ {
+ unsigned int i = (addr >> 3) & 0x3f;
+
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
+ sun4v_tte_to_sun4u(env, addr, val), env);
+ }
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
+ dump_mmu(env);
+#endif
+ return;
+ }
+ case ASI_DMMU_DEMAP: /* D-MMU demap */
+ demap_tlb(env->dtlb, addr, "dmmu", env);
+ return;
+ case ASI_INTR_RECEIVE: /* Interrupt data receive */
+ env->ivec_status = val & 0x20;
+ return;
+ case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
+ if (unlikely((addr >= 0x20) && (addr < 0x30))) {
+ /* Hyperprivileged access only */
+ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
+ }
+ /* fall through */
+ case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
+ {
+ unsigned int i = (addr >> 3) & 0x7;
+ env->scratch[i] = val;
+ return;
+ }
+ case ASI_MMU: /* UA2005 Context ID registers */
+ {
+ switch ((addr >> 3) & 0x3) {
+ case 1:
+ env->dmmu.mmu_primary_context = val;
+ env->immu.mmu_primary_context = val;
+ tlb_flush_by_mmuidx(cs,
+ (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
+ break;
+ case 2:
+ env->dmmu.mmu_secondary_context = val;
+ env->immu.mmu_secondary_context = val;
+ tlb_flush_by_mmuidx(cs,
+ (1 << MMU_USER_SECONDARY_IDX) |
+ (1 << MMU_KERNEL_SECONDARY_IDX));
+ break;
+ default:
+ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
+ }
+ }
+ return;
+ case ASI_QUEUE: /* UA2005 CPU mondo queue */
+ case ASI_DCACHE_DATA: /* D-cache data */
+ case ASI_DCACHE_TAG: /* D-cache tag access */
+ case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
+ case ASI_AFSR: /* E-cache asynchronous fault status */
+ case ASI_AFAR: /* E-cache asynchronous fault address */
+ case ASI_EC_TAG_DATA: /* E-cache tag data */
+ case ASI_IC_INSTR: /* I-cache instruction access */
+ case ASI_IC_TAG: /* I-cache tag access */
+ case ASI_IC_PRE_DECODE: /* I-cache predecode */
+ case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
+ case ASI_EC_W: /* E-cache tag */
+ case ASI_EC_R: /* E-cache tag */
+ return;
+ case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
+ case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
+ case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
+ case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
+ case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
+ case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
+ case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
+ case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
+ case ASI_INTR_R: /* Incoming interrupt vector, RO */
+ case ASI_PNF: /* Primary no-fault, RO */
+ case ASI_SNF: /* Secondary no-fault, RO */
+ case ASI_PNFL: /* Primary no-fault LE, RO */
+ case ASI_SNFL: /* Secondary no-fault LE, RO */
+ default:
+ sparc_raise_mmu_fault(cs, addr, true, false, 1, size, GETPC());
+ return;
+ }
+}
+#endif /* CONFIG_USER_ONLY */
+#endif /* TARGET_SPARC64 */
+
+#if !defined(CONFIG_USER_ONLY)
+
+void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ bool is_write = access_type == MMU_DATA_STORE;
+ bool is_exec = access_type == MMU_INST_FETCH;
+ bool is_asi = false;
+
+ sparc_raise_mmu_fault(cs, physaddr, is_write, is_exec,
+ is_asi, size, retaddr);
+}
+#endif
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
new file mode 100644
index 000000000..917375c3a
--- /dev/null
+++ b/target/sparc/machine.c
@@ -0,0 +1,196 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/timer.h"
+
+#include "migration/cpu.h"
+
+#ifdef TARGET_SPARC64
+static const VMStateDescription vmstate_cpu_timer = {
+ .name = "cpu_timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(frequency, CPUTimer),
+ VMSTATE_UINT32(disabled, CPUTimer),
+ VMSTATE_UINT64(disabled_mask, CPUTimer),
+ VMSTATE_UINT32(npt, CPUTimer),
+ VMSTATE_UINT64(npt_mask, CPUTimer),
+ VMSTATE_INT64(clock_offset, CPUTimer),
+ VMSTATE_TIMER_PTR(qtimer, CPUTimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_CPU_TIMER(_f, _s) \
+ VMSTATE_STRUCT_POINTER(_f, _s, vmstate_cpu_timer, CPUTimer)
+
+static const VMStateDescription vmstate_trap_state = {
+ .name = "trap_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tpc, trap_state),
+ VMSTATE_UINT64(tnpc, trap_state),
+ VMSTATE_UINT64(tstate, trap_state),
+ VMSTATE_UINT32(tt, trap_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_tlb_entry = {
+ .name = "tlb_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tag, SparcTLBEntry),
+ VMSTATE_UINT64(tte, SparcTLBEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
+static int get_psr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+ uint32_t val = qemu_get_be32(f);
+
+ /* needed to ensure that the wrapping registers are correctly updated */
+ env->cwp = 0;
+ cpu_put_psr_raw(env, val);
+
+ return 0;
+}
+
+static int put_psr(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+ uint32_t val;
+
+ val = cpu_get_psr(env);
+
+ qemu_put_be32(f, val);
+ return 0;
+}
+
+static const VMStateInfo vmstate_psr = {
+ .name = "psr",
+ .get = get_psr,
+ .put = put_psr,
+};
+
+static int cpu_pre_save(void *opaque)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+
+ /* if env->cwp == env->nwindows - 1, this will set the ins of the last
+ * window as the outs of the first window
+ */
+ cpu_set_cwp(env, env->cwp);
+
+ return 0;
+}
+
+/* 32-bit SPARC retains migration compatibility with older versions
+ * of QEMU; 64-bit SPARC has had a migration break since then, so the
+ * versions are different.
+ */
+#ifndef TARGET_SPARC64
+#define SPARC_VMSTATE_VER 7
+#else
+#define SPARC_VMSTATE_VER 9
+#endif
+
+const VMStateDescription vmstate_sparc_cpu = {
+ .name = "cpu",
+ .version_id = SPARC_VMSTATE_VER,
+ .minimum_version_id = SPARC_VMSTATE_VER,
+ .minimum_version_id_old = SPARC_VMSTATE_VER,
+ .pre_save = cpu_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8),
+ VMSTATE_UINT32(env.nwindows, SPARCCPU),
+ VMSTATE_VARRAY_MULTIPLY(env.regbase, SPARCCPU, env.nwindows, 16,
+ vmstate_info_uinttl, target_ulong),
+ VMSTATE_CPUDOUBLE_ARRAY(env.fpr, SPARCCPU, TARGET_DPREGS),
+ VMSTATE_UINTTL(env.pc, SPARCCPU),
+ VMSTATE_UINTTL(env.npc, SPARCCPU),
+ VMSTATE_UINTTL(env.y, SPARCCPU),
+ {
+
+ .name = "psr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_psr,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_UINTTL(env.fsr, SPARCCPU),
+ VMSTATE_UINTTL(env.tbr, SPARCCPU),
+ VMSTATE_INT32(env.interrupt_index, SPARCCPU),
+ VMSTATE_UINT32(env.pil_in, SPARCCPU),
+#ifndef TARGET_SPARC64
+ /* MMU */
+ VMSTATE_UINT32(env.wim, SPARCCPU),
+ VMSTATE_UINT32_ARRAY(env.mmuregs, SPARCCPU, 32),
+ VMSTATE_UINT64_ARRAY(env.mxccdata, SPARCCPU, 4),
+ VMSTATE_UINT64_ARRAY(env.mxccregs, SPARCCPU, 8),
+ VMSTATE_UINT32(env.mmubpctrv, SPARCCPU),
+ VMSTATE_UINT32(env.mmubpctrc, SPARCCPU),
+ VMSTATE_UINT32(env.mmubpctrs, SPARCCPU),
+ VMSTATE_UINT64(env.mmubpaction, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.mmubpregs, SPARCCPU, 4),
+#else
+ VMSTATE_UINT64(env.lsu, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.immu.mmuregs, SPARCCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.dmmu.mmuregs, SPARCCPU, 16),
+ VMSTATE_STRUCT_ARRAY(env.itlb, SPARCCPU, 64, 0,
+ vmstate_tlb_entry, SparcTLBEntry),
+ VMSTATE_STRUCT_ARRAY(env.dtlb, SPARCCPU, 64, 0,
+ vmstate_tlb_entry, SparcTLBEntry),
+ VMSTATE_UINT32(env.mmu_version, SPARCCPU),
+ VMSTATE_STRUCT_ARRAY(env.ts, SPARCCPU, MAXTL_MAX, 0,
+ vmstate_trap_state, trap_state),
+ VMSTATE_UINT32(env.xcc, SPARCCPU),
+ VMSTATE_UINT32(env.asi, SPARCCPU),
+ VMSTATE_UINT32(env.pstate, SPARCCPU),
+ VMSTATE_UINT32(env.tl, SPARCCPU),
+ VMSTATE_UINT32(env.cansave, SPARCCPU),
+ VMSTATE_UINT32(env.canrestore, SPARCCPU),
+ VMSTATE_UINT32(env.otherwin, SPARCCPU),
+ VMSTATE_UINT32(env.wstate, SPARCCPU),
+ VMSTATE_UINT32(env.cleanwin, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.agregs, SPARCCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.bgregs, SPARCCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.igregs, SPARCCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.mgregs, SPARCCPU, 8),
+ VMSTATE_UINT64(env.fprs, SPARCCPU),
+ VMSTATE_UINT64(env.tick_cmpr, SPARCCPU),
+ VMSTATE_UINT64(env.stick_cmpr, SPARCCPU),
+ VMSTATE_CPU_TIMER(env.tick, SPARCCPU),
+ VMSTATE_CPU_TIMER(env.stick, SPARCCPU),
+ VMSTATE_UINT64(env.gsr, SPARCCPU),
+ VMSTATE_UINT32(env.gl, SPARCCPU),
+ VMSTATE_UINT64(env.hpstate, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.htstate, SPARCCPU, MAXTL_MAX),
+ VMSTATE_UINT64(env.hintp, SPARCCPU),
+ VMSTATE_UINT64(env.htba, SPARCCPU),
+ VMSTATE_UINT64(env.hver, SPARCCPU),
+ VMSTATE_UINT64(env.hstick_cmpr, SPARCCPU),
+ VMSTATE_UINT64(env.ssr, SPARCCPU),
+ VMSTATE_CPU_TIMER(env.hstick, SPARCCPU),
+ /* On SPARC32 env.psrpil and env.cwp are migrated as part of the PSR */
+ VMSTATE_UINT32(env.psrpil, SPARCCPU),
+ VMSTATE_UINT32(env.cwp, SPARCCPU),
+#endif
+ VMSTATE_END_OF_LIST()
+ },
+};
diff --git a/target/sparc/meson.build b/target/sparc/meson.build
new file mode 100644
index 000000000..a801802ee
--- /dev/null
+++ b/target/sparc/meson.build
@@ -0,0 +1,23 @@
+sparc_ss = ss.source_set()
+sparc_ss.add(files(
+ 'cc_helper.c',
+ 'cpu.c',
+ 'fop_helper.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'ldst_helper.c',
+ 'translate.c',
+ 'win_helper.c',
+))
+sparc_ss.add(when: 'TARGET_SPARC', if_true: files('int32_helper.c'))
+sparc_ss.add(when: 'TARGET_SPARC64', if_true: files('int64_helper.c', 'vis_helper.c'))
+
+sparc_softmmu_ss = ss.source_set()
+sparc_softmmu_ss.add(files(
+ 'machine.c',
+ 'mmu_helper.c',
+ 'monitor.c',
+))
+
+target_arch += {'sparc': sparc_ss}
+target_softmmu_arch += {'sparc': sparc_softmmu_ss}
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
new file mode 100644
index 000000000..f2668389b
--- /dev/null
+++ b/target/sparc/mmu_helper.c
@@ -0,0 +1,944 @@
+/*
+ * Sparc MMU helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/qemu-print.h"
+#include "trace.h"
+
+/* Sparc MMU emulation */
+
+#ifndef TARGET_SPARC64
+/*
+ * Sparc V8 Reference MMU (SRMMU)
+ */
+static const int access_table[8][8] = {
+ { 0, 0, 0, 0, 8, 0, 12, 12 },
+ { 0, 0, 0, 0, 8, 0, 0, 0 },
+ { 8, 8, 0, 0, 0, 8, 12, 12 },
+ { 8, 8, 0, 0, 0, 8, 0, 0 },
+ { 8, 0, 8, 0, 8, 8, 12, 12 },
+ { 8, 0, 8, 0, 8, 0, 8, 0 },
+ { 8, 8, 8, 0, 8, 8, 12, 12 },
+ { 8, 8, 8, 0, 8, 8, 8, 0 }
+};
+
+static const int perm_table[2][8] = {
+ {
+ PAGE_READ,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC
+ },
+ {
+ PAGE_READ,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_EXEC,
+ PAGE_READ,
+ 0,
+ 0,
+ }
+};
+
+static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
+ int *prot, int *access_index, MemTxAttrs *attrs,
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
+{
+ int access_perms = 0;
+ hwaddr pde_ptr;
+ uint32_t pde;
+ int error_code = 0, is_dirty, is_user;
+ unsigned long page_offset;
+ CPUState *cs = env_cpu(env);
+ MemTxResult result;
+
+ is_user = mmu_idx == MMU_USER_IDX;
+
+ if (mmu_idx == MMU_PHYS_IDX) {
+ *page_size = TARGET_PAGE_SIZE;
+ /* Boot mode: instruction fetches are taken from PROM */
+ if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
+ *physical = env->prom_addr | (address & 0x7ffffULL);
+ *prot = PAGE_READ | PAGE_EXEC;
+ return 0;
+ }
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
+ *physical = 0xffffffffffff0000ULL;
+
+ /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
+ /* Context base + context number */
+ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+ pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return 4 << 2; /* Translation fault, L = 0 */
+ }
+
+ /* Ctx pde */
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return 1 << 2;
+ case 2: /* L0 PTE, maybe should not happen? */
+ case 3: /* Reserved */
+ return 4 << 2;
+ case 1: /* L0 PDE */
+ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+ pde = address_space_ldl(cs->as, pde_ptr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (1 << 8) | (1 << 2);
+ case 3: /* Reserved */
+ return (1 << 8) | (4 << 2);
+ case 1: /* L1 PDE */
+ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+ pde = address_space_ldl(cs->as, pde_ptr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (2 << 8) | (1 << 2);
+ case 3: /* Reserved */
+ return (2 << 8) | (4 << 2);
+ case 1: /* L2 PDE */
+ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+ pde = address_space_ldl(cs->as, pde_ptr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (3 << 8) | (1 << 2);
+ case 1: /* PDE, should not happen */
+ case 3: /* Reserved */
+ return (3 << 8) | (4 << 2);
+ case 2: /* L3 PTE */
+ page_offset = 0;
+ }
+ *page_size = TARGET_PAGE_SIZE;
+ break;
+ case 2: /* L2 PTE */
+ page_offset = address & 0x3f000;
+ *page_size = 0x40000;
+ }
+ break;
+ case 2: /* L1 PTE */
+ page_offset = address & 0xfff000;
+ *page_size = 0x1000000;
+ }
+ }
+
+ /* check access */
+ access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
+ error_code = access_table[*access_index][access_perms];
+ if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
+ return error_code;
+ }
+
+ /* update page modified and dirty bits */
+ is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
+ if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
+ pde |= PG_ACCESSED_MASK;
+ if (is_dirty) {
+ pde |= PG_MODIFIED_MASK;
+ }
+ stl_phys_notdirty(cs->as, pde_ptr, pde);
+ }
+
+ /* the page can be put in the TLB */
+ *prot = perm_table[is_user][access_perms];
+ if (!(pde & PG_MODIFIED_MASK)) {
+ /* only set write access if already dirty... otherwise wait
+ for dirty access */
+ *prot &= ~PAGE_WRITE;
+ }
+
+ /* Even if large ptes, we map only one 4KB page in the cache to
+ avoid filling it too fast */
+ *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
+ return error_code;
+}
+
+/* Perform address translation */
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ hwaddr paddr;
+ target_ulong vaddr;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
+ MemTxAttrs attrs = {};
+
+ /*
+ * TODO: If we ever need tlb_vaddr_to_host for this target,
+ * then we must figure out how to manipulate FSR and FAR
+ * when both MMU_NF and probe are set. In the meantime,
+ * do not support this use case.
+ */
+ assert(!probe);
+
+ address &= TARGET_PAGE_MASK;
+ error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
+ address, access_type,
+ mmu_idx, &page_size);
+ vaddr = address;
+ if (likely(error_code == 0)) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "Translate at %" VADDR_PRIx " -> "
+ TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
+ address, paddr, vaddr);
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
+ return true;
+ }
+
+ if (env->mmuregs[3]) { /* Fault status register */
+ env->mmuregs[3] = 1; /* overflow (not read before another fault) */
+ }
+ env->mmuregs[3] |= (access_index << 5) | error_code | 2;
+ env->mmuregs[4] = address; /* Fault address register */
+
+ if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
+ /* No fault mode: if a mapping is available, just override
+ permissions. If no mapping is available, redirect accesses to
+ neverland. Fake/overridden mappings will be flushed when
+ switching to normal mode. */
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ } else {
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = TT_TFAULT;
+ } else {
+ cs->exception_index = TT_DFAULT;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+}
+
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
+{
+ CPUState *cs = env_cpu(env);
+ hwaddr pde_ptr;
+ uint32_t pde;
+ MemTxResult result;
+
+ /*
+ * TODO: MMU probe operations are supposed to set the fault
+ * status registers, but we don't do this.
+ */
+
+ /* Context base + context number */
+ pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
+ (env->mmuregs[2] << 2);
+ pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return 0;
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 2: /* PTE, maybe should not happen? */
+ case 3: /* Reserved */
+ return 0;
+ case 1: /* L1 PDE */
+ if (mmulev == 3) {
+ return pde;
+ }
+ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+ pde = address_space_ldl(cs->as, pde_ptr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return 0;
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L1 PTE */
+ return pde;
+ case 1: /* L2 PDE */
+ if (mmulev == 2) {
+ return pde;
+ }
+ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+ pde = address_space_ldl(cs->as, pde_ptr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return 0;
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L2 PTE */
+ return pde;
+ case 1: /* L3 PDE */
+ if (mmulev == 1) {
+ return pde;
+ }
+ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+ pde = address_space_ldl(cs->as, pde_ptr,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ return 0;
+ }
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 1: /* PDE, should not happen */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L3 PTE */
+ return pde;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+void dump_mmu(CPUSPARCState *env)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong va, va1, va2;
+ unsigned int n, m, o;
+ hwaddr pa;
+ uint32_t pde;
+
+ qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
+ (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
+ for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
+ pde = mmu_probe(env, va, 2);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(cs, va);
+ qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
+ " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
+ for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
+ pde = mmu_probe(env, va1, 1);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(cs, va1);
+ qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
+ TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
+ va1, pa, pde);
+ for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
+ pde = mmu_probe(env, va2, 0);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(cs, va2);
+ qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
+ TARGET_FMT_plx " PTE: "
+ TARGET_FMT_lx "\n",
+ va2, pa, pde);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Gdb expects all registers windows to be flushed in ram. This function handles
+ * reads (and only reads) in stack frames as if windows were flushed. We assume
+ * that the sparc ABI is followed.
+ */
+int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
+ uint8_t *buf, int len, bool is_write)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ target_ulong addr = address;
+ int i;
+ int len1;
+ int cwp = env->cwp;
+
+ if (!is_write) {
+ for (i = 0; i < env->nwindows; i++) {
+ int off;
+ target_ulong fp = env->regbase[cwp * 16 + 22];
+
+ /* Assume fp == 0 means end of frame. */
+ if (fp == 0) {
+ break;
+ }
+
+ cwp = cpu_cwp_inc(env, cwp + 1);
+
+ /* Invalid window ? */
+ if (env->wim & (1 << cwp)) {
+ break;
+ }
+
+ /* According to the ABI, the stack is growing downward. */
+ if (addr + len < fp) {
+ break;
+ }
+
+ /* Not in this frame. */
+ if (addr > fp + 64) {
+ continue;
+ }
+
+ /* Handle access before this window. */
+ if (addr < fp) {
+ len1 = fp - addr;
+ if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
+ return -1;
+ }
+ addr += len1;
+ len -= len1;
+ buf += len1;
+ }
+
+ /* Access byte per byte to registers. Not very efficient but speed
+ * is not critical.
+ */
+ off = addr - fp;
+ len1 = 64 - off;
+
+ if (len1 > len) {
+ len1 = len;
+ }
+
+ for (; len1; len1--) {
+ int reg = cwp * 16 + 8 + (off >> 2);
+ union {
+ uint32_t v;
+ uint8_t c[4];
+ } u;
+ u.v = cpu_to_be32(env->regbase[reg]);
+ *buf++ = u.c[off & 3];
+ addr++;
+ len--;
+ off++;
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+ }
+ }
+ return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
+}
+
+#else /* !TARGET_SPARC64 */
+
+/* 41 bit physical address space */
+static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
+{
+ return x & 0x1ffffffffffULL;
+}
+
+/*
+ * UltraSparc IIi I/DMMUs
+ */
+
+/* Returns true if TTE tag is valid and matches virtual address value
+ in context requires virtual address mask value calculated from TTE
+ entry size */
+static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
+ uint64_t address, uint64_t context,
+ hwaddr *physical)
+{
+ uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
+
+ /* valid, context match, virtual address match? */
+ if (TTE_IS_VALID(tlb->tte) &&
+ (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
+ && compare_masked(address, tlb->tag, mask)) {
+ /* decode physical address */
+ *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
+ return 1;
+ }
+
+ return 0;
+}
+
+static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw)
+{
+ uint64_t sfsr = SFSR_VALID_BIT;
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ sfsr |= SFSR_CT_NOTRANS;
+ break;
+ case MMU_USER_IDX:
+ case MMU_KERNEL_IDX:
+ sfsr |= SFSR_CT_PRIMARY;
+ break;
+ case MMU_USER_SECONDARY_IDX:
+ case MMU_KERNEL_SECONDARY_IDX:
+ sfsr |= SFSR_CT_SECONDARY;
+ break;
+ case MMU_NUCLEUS_IDX:
+ sfsr |= SFSR_CT_NUCLEUS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (rw == 1) {
+ sfsr |= SFSR_WRITE_BIT;
+ } else if (rw == 4) {
+ sfsr |= SFSR_NF_BIT;
+ }
+
+ if (env->pstate & PS_PRIV) {
+ sfsr |= SFSR_PR_BIT;
+ }
+
+ if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
+ sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */
+ }
+
+ /* FIXME: ASI field in SFSR must be set */
+
+ return sfsr;
+}
+
+static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
+ int *prot, MemTxAttrs *attrs,
+ target_ulong address, int rw, int mmu_idx)
+{
+ CPUState *cs = env_cpu(env);
+ unsigned int i;
+ uint64_t sfsr;
+ uint64_t context;
+ bool is_user = false;
+
+ sfsr = build_sfsr(env, mmu_idx, rw);
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ g_assert_not_reached();
+ case MMU_USER_IDX:
+ is_user = true;
+ /* fallthru */
+ case MMU_KERNEL_IDX:
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ break;
+ case MMU_USER_SECONDARY_IDX:
+ is_user = true;
+ /* fallthru */
+ case MMU_KERNEL_SECONDARY_IDX:
+ context = env->dmmu.mmu_secondary_context & 0x1fff;
+ break;
+ default:
+ context = 0;
+ break;
+ }
+
+ for (i = 0; i < 64; i++) {
+ /* ctx match, vaddr match, valid? */
+ if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
+ int do_fault = 0;
+
+ if (TTE_IS_IE(env->dtlb[i].tte)) {
+ attrs->byte_swap = true;
+ }
+
+ /* access ok? */
+ /* multiple bits in SFSR.FT may be set on TT_DFAULT */
+ if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
+ trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
+ }
+ if (rw == 4) {
+ if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_NF_E_BIT;
+ }
+ } else {
+ if (TTE_IS_NFO(env->dtlb[i].tte)) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_NFO_BIT;
+ }
+ }
+
+ if (do_fault) {
+ /* faults above are reported with TT_DFAULT. */
+ cs->exception_index = TT_DFAULT;
+ } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
+ do_fault = 1;
+ cs->exception_index = TT_DPROT;
+
+ trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
+ }
+
+ if (!do_fault) {
+ *prot = PAGE_READ;
+ if (TTE_IS_W_OK(env->dtlb[i].tte)) {
+ *prot |= PAGE_WRITE;
+ }
+
+ TTE_SET_USED(env->dtlb[i].tte);
+
+ return 0;
+ }
+
+ env->dmmu.sfsr = sfsr;
+ env->dmmu.sfar = address; /* Fault address register */
+ env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+ return 1;
+ }
+ }
+
+ trace_mmu_helper_dmiss(address, context);
+
+ /*
+ * On MMU misses:
+ * - UltraSPARC IIi: SFSR and SFAR unmodified
+ * - JPS1: SFAR updated and some fields of SFSR updated
+ */
+ env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+ cs->exception_index = TT_DMISS;
+ return 1;
+}
+
+static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical,
+ int *prot, MemTxAttrs *attrs,
+ target_ulong address, int mmu_idx)
+{
+ CPUState *cs = env_cpu(env);
+ unsigned int i;
+ uint64_t context;
+ bool is_user = false;
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ case MMU_USER_SECONDARY_IDX:
+ case MMU_KERNEL_SECONDARY_IDX:
+ g_assert_not_reached();
+ case MMU_USER_IDX:
+ is_user = true;
+ /* fallthru */
+ case MMU_KERNEL_IDX:
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ break;
+ default:
+ context = 0;
+ break;
+ }
+
+ if (env->tl == 0) {
+ /* PRIMARY context */
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ } else {
+ /* NUCLEUS context */
+ context = 0;
+ }
+
+ for (i = 0; i < 64; i++) {
+ /* ctx match, vaddr match, valid? */
+ if (ultrasparc_tag_match(&env->itlb[i],
+ address, context, physical)) {
+ /* access ok? */
+ if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
+ /* Fault status register */
+ if (env->immu.sfsr & SFSR_VALID_BIT) {
+ env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
+ another fault) */
+ } else {
+ env->immu.sfsr = 0;
+ }
+ if (env->pstate & PS_PRIV) {
+ env->immu.sfsr |= SFSR_PR_BIT;
+ }
+ if (env->tl > 0) {
+ env->immu.sfsr |= SFSR_CT_NUCLEUS;
+ }
+
+ /* FIXME: ASI field in SFSR must be set */
+ env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
+ cs->exception_index = TT_TFAULT;
+
+ env->immu.tag_access = (address & ~0x1fffULL) | context;
+
+ trace_mmu_helper_tfault(address, context);
+
+ return 1;
+ }
+ *prot = PAGE_EXEC;
+ TTE_SET_USED(env->itlb[i].tte);
+ return 0;
+ }
+ }
+
+ trace_mmu_helper_tmiss(address, context);
+
+ /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
+ env->immu.tag_access = (address & ~0x1fffULL) | context;
+ cs->exception_index = TT_TMISS;
+ return 1;
+}
+
+static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
+ int *prot, int *access_index, MemTxAttrs *attrs,
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
+{
+ /* ??? We treat everything as a small page, then explicitly flush
+ everything when an entry is evicted. */
+ *page_size = TARGET_PAGE_SIZE;
+
+ /* safety net to catch wrong softmmu index use from dynamic code */
+ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
+ if (rw == 2) {
+ trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context,
+ address);
+ } else {
+ trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context,
+ address);
+ }
+ }
+
+ if (mmu_idx == MMU_PHYS_IDX) {
+ *physical = ultrasparc_truncate_physical(address);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ if (rw == 2) {
+ return get_physical_address_code(env, physical, prot, attrs, address,
+ mmu_idx);
+ } else {
+ return get_physical_address_data(env, physical, prot, attrs, address,
+ rw, mmu_idx);
+ }
+}
+
+/* Perform address translation */
+bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ target_ulong vaddr;
+ hwaddr paddr;
+ target_ulong page_size;
+ MemTxAttrs attrs = {};
+ int error_code = 0, prot, access_index;
+
+ address &= TARGET_PAGE_MASK;
+ error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
+ address, access_type,
+ mmu_idx, &page_size);
+ if (likely(error_code == 0)) {
+ vaddr = address;
+
+ trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context);
+
+ tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx,
+ page_size);
+ return true;
+ }
+ if (probe) {
+ return false;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void dump_mmu(CPUSPARCState *env)
+{
+ unsigned int i;
+ const char *mask;
+
+ qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %"
+ PRId64 "\n",
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context);
+ qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
+ "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
+ if ((env->lsu & DMMU_E) == 0) {
+ qemu_printf("DMMU disabled\n");
+ } else {
+ qemu_printf("DMMU dump\n");
+ for (i = 0; i < 64; i++) {
+ switch (TTE_PGSIZE(env->dtlb[i].tte)) {
+ default:
+ case 0x0:
+ mask = " 8k";
+ break;
+ case 0x1:
+ mask = " 64k";
+ break;
+ case 0x2:
+ mask = "512k";
+ break;
+ case 0x3:
+ mask = " 4M";
+ break;
+ }
+ if (TTE_IS_VALID(env->dtlb[i].tte)) {
+ qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
+ ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
+ i,
+ env->dtlb[i].tag & (uint64_t)~0x1fffULL,
+ TTE_PA(env->dtlb[i].tte),
+ mask,
+ TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
+ TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
+ TTE_IS_LOCKED(env->dtlb[i].tte) ?
+ "locked" : "unlocked",
+ TTE_IS_IE(env->dtlb[i].tte) ?
+ "yes" : "no",
+ env->dtlb[i].tag & (uint64_t)0x1fffULL,
+ TTE_IS_GLOBAL(env->dtlb[i].tte) ?
+ "global" : "local");
+ }
+ }
+ }
+ if ((env->lsu & IMMU_E) == 0) {
+ qemu_printf("IMMU disabled\n");
+ } else {
+ qemu_printf("IMMU dump\n");
+ for (i = 0; i < 64; i++) {
+ switch (TTE_PGSIZE(env->itlb[i].tte)) {
+ default:
+ case 0x0:
+ mask = " 8k";
+ break;
+ case 0x1:
+ mask = " 64k";
+ break;
+ case 0x2:
+ mask = "512k";
+ break;
+ case 0x3:
+ mask = " 4M";
+ break;
+ }
+ if (TTE_IS_VALID(env->itlb[i].tte)) {
+ qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
+ ", %s, %s, %s, ctx %" PRId64 " %s\n",
+ i,
+ env->itlb[i].tag & (uint64_t)~0x1fffULL,
+ TTE_PA(env->itlb[i].tte),
+ mask,
+ TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
+ TTE_IS_LOCKED(env->itlb[i].tte) ?
+ "locked" : "unlocked",
+ env->itlb[i].tag & (uint64_t)0x1fffULL,
+ TTE_IS_GLOBAL(env->itlb[i].tte) ?
+ "global" : "local");
+ }
+ }
+ }
+}
+
+#endif /* TARGET_SPARC64 */
+
+static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
+ target_ulong addr, int rw, int mmu_idx)
+{
+ target_ulong page_size;
+ int prot, access_index;
+ MemTxAttrs attrs = {};
+
+ return get_physical_address(env, phys, &prot, &access_index, &attrs, addr,
+ rw, mmu_idx, &page_size);
+}
+
+#if defined(TARGET_SPARC64)
+hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
+ int mmu_idx)
+{
+ hwaddr phys_addr;
+
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
+ return -1;
+ }
+ return phys_addr;
+}
+#endif
+
+hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ hwaddr phys_addr;
+ int mmu_idx = cpu_mmu_index(env, false);
+
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
+ return -1;
+ }
+ }
+ return phys_addr;
+}
+
+#ifndef CONFIG_USER_ONLY
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+#ifdef TARGET_SPARC64
+ env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
+ env->dmmu.sfar = addr;
+#else
+ env->mmuregs[4] = addr;
+#endif
+
+ cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/sparc/monitor.c b/target/sparc/monitor.c
new file mode 100644
index 000000000..318413686
--- /dev/null
+++ b/target/sparc/monitor.c
@@ -0,0 +1,165 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "monitor/hmp.h"
+
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env(mon);
+
+ if (!env1) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+ dump_mmu(env1);
+}
+
+#ifndef TARGET_SPARC64
+static target_long monitor_get_psr(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+
+ return cpu_get_psr(env);
+}
+#endif
+
+static target_long monitor_get_reg(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return env->regwptr[val];
+}
+
+const MonitorDef monitor_defs[] = {
+ { "g0", offsetof(CPUSPARCState, gregs[0]) },
+ { "g1", offsetof(CPUSPARCState, gregs[1]) },
+ { "g2", offsetof(CPUSPARCState, gregs[2]) },
+ { "g3", offsetof(CPUSPARCState, gregs[3]) },
+ { "g4", offsetof(CPUSPARCState, gregs[4]) },
+ { "g5", offsetof(CPUSPARCState, gregs[5]) },
+ { "g6", offsetof(CPUSPARCState, gregs[6]) },
+ { "g7", offsetof(CPUSPARCState, gregs[7]) },
+ { "o0", 0, monitor_get_reg },
+ { "o1", 1, monitor_get_reg },
+ { "o2", 2, monitor_get_reg },
+ { "o3", 3, monitor_get_reg },
+ { "o4", 4, monitor_get_reg },
+ { "o5", 5, monitor_get_reg },
+ { "o6", 6, monitor_get_reg },
+ { "o7", 7, monitor_get_reg },
+ { "l0", 8, monitor_get_reg },
+ { "l1", 9, monitor_get_reg },
+ { "l2", 10, monitor_get_reg },
+ { "l3", 11, monitor_get_reg },
+ { "l4", 12, monitor_get_reg },
+ { "l5", 13, monitor_get_reg },
+ { "l6", 14, monitor_get_reg },
+ { "l7", 15, monitor_get_reg },
+ { "i0", 16, monitor_get_reg },
+ { "i1", 17, monitor_get_reg },
+ { "i2", 18, monitor_get_reg },
+ { "i3", 19, monitor_get_reg },
+ { "i4", 20, monitor_get_reg },
+ { "i5", 21, monitor_get_reg },
+ { "i6", 22, monitor_get_reg },
+ { "i7", 23, monitor_get_reg },
+ { "pc", offsetof(CPUSPARCState, pc) },
+ { "npc", offsetof(CPUSPARCState, npc) },
+ { "y", offsetof(CPUSPARCState, y) },
+#ifndef TARGET_SPARC64
+ { "psr", 0, &monitor_get_psr, },
+ { "wim", offsetof(CPUSPARCState, wim) },
+#endif
+ { "tbr", offsetof(CPUSPARCState, tbr) },
+ { "fsr", offsetof(CPUSPARCState, fsr) },
+ { "f0", offsetof(CPUSPARCState, fpr[0].l.upper) },
+ { "f1", offsetof(CPUSPARCState, fpr[0].l.lower) },
+ { "f2", offsetof(CPUSPARCState, fpr[1].l.upper) },
+ { "f3", offsetof(CPUSPARCState, fpr[1].l.lower) },
+ { "f4", offsetof(CPUSPARCState, fpr[2].l.upper) },
+ { "f5", offsetof(CPUSPARCState, fpr[2].l.lower) },
+ { "f6", offsetof(CPUSPARCState, fpr[3].l.upper) },
+ { "f7", offsetof(CPUSPARCState, fpr[3].l.lower) },
+ { "f8", offsetof(CPUSPARCState, fpr[4].l.upper) },
+ { "f9", offsetof(CPUSPARCState, fpr[4].l.lower) },
+ { "f10", offsetof(CPUSPARCState, fpr[5].l.upper) },
+ { "f11", offsetof(CPUSPARCState, fpr[5].l.lower) },
+ { "f12", offsetof(CPUSPARCState, fpr[6].l.upper) },
+ { "f13", offsetof(CPUSPARCState, fpr[6].l.lower) },
+ { "f14", offsetof(CPUSPARCState, fpr[7].l.upper) },
+ { "f15", offsetof(CPUSPARCState, fpr[7].l.lower) },
+ { "f16", offsetof(CPUSPARCState, fpr[8].l.upper) },
+ { "f17", offsetof(CPUSPARCState, fpr[8].l.lower) },
+ { "f18", offsetof(CPUSPARCState, fpr[9].l.upper) },
+ { "f19", offsetof(CPUSPARCState, fpr[9].l.lower) },
+ { "f20", offsetof(CPUSPARCState, fpr[10].l.upper) },
+ { "f21", offsetof(CPUSPARCState, fpr[10].l.lower) },
+ { "f22", offsetof(CPUSPARCState, fpr[11].l.upper) },
+ { "f23", offsetof(CPUSPARCState, fpr[11].l.lower) },
+ { "f24", offsetof(CPUSPARCState, fpr[12].l.upper) },
+ { "f25", offsetof(CPUSPARCState, fpr[12].l.lower) },
+ { "f26", offsetof(CPUSPARCState, fpr[13].l.upper) },
+ { "f27", offsetof(CPUSPARCState, fpr[13].l.lower) },
+ { "f28", offsetof(CPUSPARCState, fpr[14].l.upper) },
+ { "f29", offsetof(CPUSPARCState, fpr[14].l.lower) },
+ { "f30", offsetof(CPUSPARCState, fpr[15].l.upper) },
+ { "f31", offsetof(CPUSPARCState, fpr[15].l.lower) },
+#ifdef TARGET_SPARC64
+ { "f32", offsetof(CPUSPARCState, fpr[16]) },
+ { "f34", offsetof(CPUSPARCState, fpr[17]) },
+ { "f36", offsetof(CPUSPARCState, fpr[18]) },
+ { "f38", offsetof(CPUSPARCState, fpr[19]) },
+ { "f40", offsetof(CPUSPARCState, fpr[20]) },
+ { "f42", offsetof(CPUSPARCState, fpr[21]) },
+ { "f44", offsetof(CPUSPARCState, fpr[22]) },
+ { "f46", offsetof(CPUSPARCState, fpr[23]) },
+ { "f48", offsetof(CPUSPARCState, fpr[24]) },
+ { "f50", offsetof(CPUSPARCState, fpr[25]) },
+ { "f52", offsetof(CPUSPARCState, fpr[26]) },
+ { "f54", offsetof(CPUSPARCState, fpr[27]) },
+ { "f56", offsetof(CPUSPARCState, fpr[28]) },
+ { "f58", offsetof(CPUSPARCState, fpr[29]) },
+ { "f60", offsetof(CPUSPARCState, fpr[30]) },
+ { "f62", offsetof(CPUSPARCState, fpr[31]) },
+ { "asi", offsetof(CPUSPARCState, asi) },
+ { "pstate", offsetof(CPUSPARCState, pstate) },
+ { "cansave", offsetof(CPUSPARCState, cansave) },
+ { "canrestore", offsetof(CPUSPARCState, canrestore) },
+ { "otherwin", offsetof(CPUSPARCState, otherwin) },
+ { "wstate", offsetof(CPUSPARCState, wstate) },
+ { "cleanwin", offsetof(CPUSPARCState, cleanwin) },
+ { "fprs", offsetof(CPUSPARCState, fprs) },
+#endif
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
diff --git a/target/sparc/trace-events b/target/sparc/trace-events
new file mode 100644
index 000000000..de9833283
--- /dev/null
+++ b/target/sparc/trace-events
@@ -0,0 +1,32 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# mmu_helper.c
+mmu_helper_dfault(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DFAULT at 0x%"PRIx64" context 0x%"PRIx64" mmu_idx=%d tl=%d"
+mmu_helper_dprot(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DPROT at 0x%"PRIx64" context 0x%"PRIx64" mmu_idx=%d tl=%d"
+mmu_helper_dmiss(uint64_t address, uint64_t context) "DMISS at 0x%"PRIx64" context 0x%"PRIx64
+mmu_helper_tfault(uint64_t address, uint64_t context) "TFAULT at 0x%"PRIx64" context 0x%"PRIx64
+mmu_helper_tmiss(uint64_t address, uint64_t context) "TMISS at 0x%"PRIx64" context 0x%"PRIx64
+mmu_helper_get_phys_addr_code(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=0x%"PRIx64" secondary context=0x%"PRIx64" address=0x%"PRIx64
+mmu_helper_get_phys_addr_data(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=0x%"PRIx64" secondary context=0x%"PRIx64" address=0x%"PRIx64
+mmu_helper_mmu_fault(uint64_t address, uint64_t paddr, int mmu_idx, uint32_t tl, uint64_t prim_context, uint64_t sec_context) "Translate at 0x%"PRIx64" -> 0x%"PRIx64", mmu_idx=%d tl=%d primary context=0x%"PRIx64" secondary context=0x%"PRIx64
+
+# int32_helper.c
+sun4m_cpu_interrupt(unsigned int level) "Set CPU IRQ %d"
+sun4m_cpu_reset_interrupt(unsigned int level) "Reset CPU IRQ %d"
+
+# int64_helper.c
+int_helper_set_softint(uint32_t softint) "new 0x%08x"
+int_helper_clear_softint(uint32_t softint) "new 0x%08x"
+int_helper_write_softint(uint32_t softint) "new 0x%08x"
+sparc64_cpu_check_irqs_reset_irq(int intno) "Reset CPU IRQ (current interrupt 0x%x)"
+sparc64_cpu_check_irqs_noset_irq(uint32_t tl, uint32_t tt, int intno) "Not setting CPU IRQ: TL=%d current 0x%x >= pending 0x%x"
+sparc64_cpu_check_irqs_set_irq(unsigned int i, int old, int new) "Set CPU IRQ %d old=0x%x new=0x%x"
+sparc64_cpu_check_irqs_disabled(uint32_t pil, uint32_t pil_in, uint32_t softint, int intno) "Interrupts disabled, pil=0x%08x pil_in=0x%08x softint=0x%08x current interrupt 0x%x"
+
+# win_helper.c
+win_helper_gregset_error(uint32_t pstate) "ERROR in get_gregset: active pstate bits=0x%x"
+win_helper_switch_pstate(uint32_t pstate_regs, uint32_t new_pstate_regs) "change_pstate: switching regs old=0x%x new=0x%x"
+win_helper_no_switch_pstate(uint32_t new_pstate_regs) "change_pstate: regs new=0x%x (unchanged)"
+win_helper_wrpil(uint32_t psrpil, uint32_t new_pil) "old=0x%x new=0x%x"
+win_helper_done(uint32_t tl) "tl=%d"
+win_helper_retry(uint32_t tl) "tl=%d"
diff --git a/target/sparc/trace.h b/target/sparc/trace.h
new file mode 100644
index 000000000..3b2f5a8e2
--- /dev/null
+++ b/target/sparc/trace.h
@@ -0,0 +1 @@
+#include "trace/trace-target_sparc.h"
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
new file mode 100644
index 000000000..fdb8bbe5d
--- /dev/null
+++ b/target/sparc/translate.c
@@ -0,0 +1,6031 @@
+/*
+ SPARC translation
+
+ Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
+ Copyright (C) 2003-2005 Fabrice Bellard
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-gen.h"
+
+#include "exec/translator.h"
+#include "exec/log.h"
+#include "asi.h"
+
+
+#define DEBUG_DISAS
+
+#define DYNAMIC_PC 1 /* dynamic pc value */
+#define JUMP_PC 2 /* dynamic pc value which takes only two values
+ according to jump_pc[T2] */
+
+#define DISAS_EXIT DISAS_TARGET_0
+
+/* global register indexes */
+static TCGv_ptr cpu_regwptr;
+static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
+static TCGv_i32 cpu_cc_op;
+static TCGv_i32 cpu_psr;
+static TCGv cpu_fsr, cpu_pc, cpu_npc;
+static TCGv cpu_regs[32];
+static TCGv cpu_y;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_tbr;
+#endif
+static TCGv cpu_cond;
+#ifdef TARGET_SPARC64
+static TCGv_i32 cpu_xcc, cpu_fprs;
+static TCGv cpu_gsr;
+static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
+static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
+#else
+static TCGv cpu_wim;
+#endif
+/* Floating point registers */
+static TCGv_i64 cpu_fpr[TARGET_DPREGS];
+
+#include "exec/gen-icount.h"
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
+ target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
+ target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
+ int mem_idx;
+ bool fpu_enabled;
+ bool address_mask_32bit;
+#ifndef CONFIG_USER_ONLY
+ bool supervisor;
+#ifdef TARGET_SPARC64
+ bool hypervisor;
+#endif
+#endif
+
+ uint32_t cc_op; /* current CC operation */
+ sparc_def_t *def;
+ TCGv_i32 t32[3];
+ TCGv ttl[5];
+ int n_t32;
+ int n_ttl;
+#ifdef TARGET_SPARC64
+ int fprs_dirty;
+ int asi;
+#endif
+} DisasContext;
+
+typedef struct {
+ TCGCond cond;
+ bool is_bool;
+ bool g1, g2;
+ TCGv c1, c2;
+} DisasCompare;
+
+// This function uses non-native bit order
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
+
+// This function uses the order in the manuals, i.e. bit 0 is 2^0
+#define GET_FIELD_SP(X, FROM, TO) \
+ GET_FIELD(X, 31 - (TO), 31 - (FROM))
+
+#define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
+#define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
+
+#ifdef TARGET_SPARC64
+#define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
+#define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
+#else
+#define DFPREG(r) (r & 0x1e)
+#define QFPREG(r) (r & 0x1c)
+#endif
+
+#define UA2005_HTRAP_MASK 0xff
+#define V8_TRAP_MASK 0x7f
+
+static int sign_extend(int x, int len)
+{
+ len = 32 - len;
+ return (x << len) >> len;
+}
+
+#define IS_IMM (insn & (1<<13))
+
+static inline TCGv_i32 get_temp_i32(DisasContext *dc)
+{
+ TCGv_i32 t;
+ assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
+ dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
+ return t;
+}
+
+static inline TCGv get_temp_tl(DisasContext *dc)
+{
+ TCGv t;
+ assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
+ dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
+ return t;
+}
+
+static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
+{
+#if defined(TARGET_SPARC64)
+ int bit = (rd < 32) ? 1 : 2;
+ /* If we know we've already set this bit within the TB,
+ we can avoid setting it again. */
+ if (!(dc->fprs_dirty & bit)) {
+ dc->fprs_dirty |= bit;
+ tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
+ }
+#endif
+}
+
+/* floating point registers moves */
+static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
+{
+#if TCG_TARGET_REG_BITS == 32
+ if (src & 1) {
+ return TCGV_LOW(cpu_fpr[src / 2]);
+ } else {
+ return TCGV_HIGH(cpu_fpr[src / 2]);
+ }
+#else
+ TCGv_i32 ret = get_temp_i32(dc);
+ if (src & 1) {
+ tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
+ } else {
+ tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
+ }
+ return ret;
+#endif
+}
+
+static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
+{
+#if TCG_TARGET_REG_BITS == 32
+ if (dst & 1) {
+ tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
+ } else {
+ tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
+ }
+#else
+ TCGv_i64 t = (TCGv_i64)v;
+ tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
+ (dst & 1 ? 0 : 32), 32);
+#endif
+ gen_update_fprs_dirty(dc, dst);
+}
+
+static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
+{
+ return get_temp_i32(dc);
+}
+
+static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
+{
+ src = DFPREG(src);
+ return cpu_fpr[src / 2];
+}
+
+static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
+{
+ dst = DFPREG(dst);
+ tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
+ gen_update_fprs_dirty(dc, dst);
+}
+
+static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
+{
+ return cpu_fpr[DFPREG(dst) / 2];
+}
+
+static void gen_op_load_fpr_QT0(unsigned int src)
+{
+ tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.upper));
+ tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_op_load_fpr_QT1(unsigned int src)
+{
+ tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, ll.upper));
+ tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_op_store_QT0_fpr(unsigned int dst)
+{
+ tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.upper));
+ tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
+ TCGv_i64 v1, TCGv_i64 v2)
+{
+ dst = QFPREG(dst);
+
+ tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
+ tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
+ gen_update_fprs_dirty(dc, dst);
+}
+
+#ifdef TARGET_SPARC64
+static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
+{
+ src = QFPREG(src);
+ return cpu_fpr[src / 2];
+}
+
+static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
+{
+ src = QFPREG(src);
+ return cpu_fpr[src / 2 + 1];
+}
+
+static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
+{
+ rd = QFPREG(rd);
+ rs = QFPREG(rs);
+
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
+ gen_update_fprs_dirty(dc, rd);
+}
+#endif
+
+/* moves */
+#ifdef CONFIG_USER_ONLY
+#define supervisor(dc) 0
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) 0
+#endif
+#else
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) (dc->hypervisor)
+#define supervisor(dc) (dc->supervisor | dc->hypervisor)
+#else
+#define supervisor(dc) (dc->supervisor)
+#endif
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(dc) ((dc)->address_mask_32bit)
+#else
+#define AM_CHECK(dc) (1)
+#endif
+#endif
+
+static inline void gen_address_mask(DisasContext *dc, TCGv addr)
+{
+#ifdef TARGET_SPARC64
+ if (AM_CHECK(dc))
+ tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
+#endif
+}
+
+static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
+{
+ if (reg > 0) {
+ assert(reg < 32);
+ return cpu_regs[reg];
+ } else {
+ TCGv t = get_temp_tl(dc);
+ tcg_gen_movi_tl(t, 0);
+ return t;
+ }
+}
+
+static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
+{
+ if (reg > 0) {
+ assert(reg < 32);
+ tcg_gen_mov_tl(cpu_regs[reg], v);
+ }
+}
+
+static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
+{
+ if (reg > 0) {
+ assert(reg < 32);
+ return cpu_regs[reg];
+ } else {
+ return get_temp_tl(dc);
+ }
+}
+
+static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
+{
+ return translator_use_goto_tb(&s->base, pc) &&
+ translator_use_goto_tb(&s->base, npc);
+}
+
+static void gen_goto_tb(DisasContext *s, int tb_num,
+ target_ulong pc, target_ulong npc)
+{
+ if (use_goto_tb(s, pc, npc)) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ tcg_gen_movi_tl(cpu_pc, pc);
+ tcg_gen_movi_tl(cpu_npc, npc);
+ tcg_gen_exit_tb(s->base.tb, tb_num);
+ } else {
+ /* jump to another page: currently not optimized */
+ tcg_gen_movi_tl(cpu_pc, pc);
+ tcg_gen_movi_tl(cpu_npc, npc);
+ tcg_gen_exit_tb(NULL, 0);
+ }
+}
+
+// XXX suboptimal
+static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1);
+}
+
+static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1);
+}
+
+static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1);
+}
+
+static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1);
+}
+
+static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static TCGv_i32 gen_add32_carry32(void)
+{
+ TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+ /* Carry is computed from a previous add: (dst < src) */
+#if TARGET_LONG_BITS == 64
+ cc_src1_32 = tcg_temp_new_i32();
+ cc_src2_32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
+ tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
+#else
+ cc_src1_32 = cpu_cc_dst;
+ cc_src2_32 = cpu_cc_src;
+#endif
+
+ carry_32 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free_i32(cc_src1_32);
+ tcg_temp_free_i32(cc_src2_32);
+#endif
+
+ return carry_32;
+}
+
+static TCGv_i32 gen_sub32_carry32(void)
+{
+ TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+ /* Carry is computed from a previous borrow: (src1 < src2) */
+#if TARGET_LONG_BITS == 64
+ cc_src1_32 = tcg_temp_new_i32();
+ cc_src2_32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
+ tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
+#else
+ cc_src1_32 = cpu_cc_src;
+ cc_src2_32 = cpu_cc_src2;
+#endif
+
+ carry_32 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free_i32(cc_src1_32);
+ tcg_temp_free_i32(cc_src2_32);
+#endif
+
+ return carry_32;
+}
+
+static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
+ TCGv src2, int update_cc)
+{
+ TCGv_i32 carry_32;
+ TCGv carry;
+
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain ADD. */
+ if (update_cc) {
+ gen_op_add_cc(dst, src1, src2);
+ } else {
+ tcg_gen_add_tl(dst, src1, src2);
+ }
+ return;
+
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ if (TARGET_LONG_BITS == 32) {
+ /* We can re-use the host's hardware carry generation by using
+ an ADD2 opcode. We discard the low part of the output.
+ Ideally we'd combine this operation with the add that
+ generated the carry in the first place. */
+ carry = tcg_temp_new();
+ tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+ tcg_temp_free(carry);
+ goto add_done;
+ }
+ carry_32 = gen_add32_carry32();
+ break;
+
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ carry_32 = gen_sub32_carry32();
+ break;
+
+ default:
+ /* We need external help to produce the carry. */
+ carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32, cpu_env);
+ break;
+ }
+
+#if TARGET_LONG_BITS == 64
+ carry = tcg_temp_new();
+ tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+ carry = carry_32;
+#endif
+
+ tcg_gen_add_tl(dst, src1, src2);
+ tcg_gen_add_tl(dst, dst, carry);
+
+ tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free(carry);
+#endif
+
+ add_done:
+ if (update_cc) {
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
+ dc->cc_op = CC_OP_ADDX;
+ }
+}
+
+static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
+ TCGv src2, int update_cc)
+{
+ TCGv_i32 carry_32;
+ TCGv carry;
+
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain SUB. */
+ if (update_cc) {
+ gen_op_sub_cc(dst, src1, src2);
+ } else {
+ tcg_gen_sub_tl(dst, src1, src2);
+ }
+ return;
+
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ carry_32 = gen_add32_carry32();
+ break;
+
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ if (TARGET_LONG_BITS == 32) {
+ /* We can re-use the host's hardware carry generation by using
+ a SUB2 opcode. We discard the low part of the output.
+ Ideally we'd combine this operation with the add that
+ generated the carry in the first place. */
+ carry = tcg_temp_new();
+ tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+ tcg_temp_free(carry);
+ goto sub_done;
+ }
+ carry_32 = gen_sub32_carry32();
+ break;
+
+ default:
+ /* We need external help to produce the carry. */
+ carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32, cpu_env);
+ break;
+ }
+
+#if TARGET_LONG_BITS == 64
+ carry = tcg_temp_new();
+ tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+ carry = carry_32;
+#endif
+
+ tcg_gen_sub_tl(dst, src1, src2);
+ tcg_gen_sub_tl(dst, dst, carry);
+
+ tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free(carry);
+#endif
+
+ sub_done:
+ if (update_cc) {
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
+ dc->cc_op = CC_OP_SUBX;
+ }
+}
+
+static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
+{
+ TCGv r_temp, zero, t0;
+
+ r_temp = tcg_temp_new();
+ t0 = tcg_temp_new();
+
+ /* old op:
+ if (!(env->y & 1))
+ T1 = 0;
+ */
+ zero = tcg_const_tl(0);
+ tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
+ tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
+ tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
+ zero, cpu_cc_src2);
+ tcg_temp_free(zero);
+
+ // b2 = T0 & 1;
+ // env->y = (b2 << 31) | (env->y >> 1);
+ tcg_gen_extract_tl(t0, cpu_y, 1, 31);
+ tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
+
+ // b1 = N ^ V;
+ gen_mov_reg_N(t0, cpu_psr);
+ gen_mov_reg_V(r_temp, cpu_psr);
+ tcg_gen_xor_tl(t0, t0, r_temp);
+ tcg_temp_free(r_temp);
+
+ // T0 = (b1 << 31) | (T0 >> 1);
+ // src1 = T0;
+ tcg_gen_shli_tl(t0, t0, 31);
+ tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
+ tcg_temp_free(t0);
+
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
+{
+#if TARGET_LONG_BITS == 32
+ if (sign_ext) {
+ tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
+ } else {
+ tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
+ }
+#else
+ TCGv t0 = tcg_temp_new_i64();
+ TCGv t1 = tcg_temp_new_i64();
+
+ if (sign_ext) {
+ tcg_gen_ext32s_i64(t0, src1);
+ tcg_gen_ext32s_i64(t1, src2);
+ } else {
+ tcg_gen_ext32u_i64(t0, src1);
+ tcg_gen_ext32u_i64(t1, src2);
+ }
+
+ tcg_gen_mul_i64(dst, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+
+ tcg_gen_shri_i64(cpu_y, dst, 32);
+#endif
+}
+
+static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
+{
+ /* zero-extend truncated operands before multiplication */
+ gen_op_multiply(dst, src1, src2, 0);
+}
+
+static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
+{
+ /* sign-extend truncated operands before multiplication */
+ gen_op_multiply(dst, src1, src2, 1);
+}
+
+// 1
+static inline void gen_op_eval_ba(TCGv dst)
+{
+ tcg_gen_movi_tl(dst, 1);
+}
+
+// Z
+static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(dst, src);
+}
+
+// Z | (N ^ V)
+static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_N(t0, src);
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xor_tl(dst, dst, t0);
+ gen_mov_reg_Z(t0, src);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// N ^ V
+static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_V(t0, src);
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xor_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// C | Z
+static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_Z(t0, src);
+ gen_mov_reg_C(dst, src);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// C
+static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_C(dst, src);
+}
+
+// V
+static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(dst, src);
+}
+
+// 0
+static inline void gen_op_eval_bn(TCGv dst)
+{
+ tcg_gen_movi_tl(dst, 0);
+}
+
+// N
+static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(dst, src);
+}
+
+// !Z
+static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(Z | (N ^ V))
+static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
+{
+ gen_op_eval_ble(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(N ^ V)
+static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
+{
+ gen_op_eval_bl(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(C | Z)
+static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
+{
+ gen_op_eval_bleu(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !C
+static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_C(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !N
+static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !V
+static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+/*
+ FPSR bit field FCC1 | FCC0:
+ 0 =
+ 1 <
+ 2 >
+ 3 unordered
+*/
+static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
+ unsigned int fcc_offset)
+{
+ tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
+ unsigned int fcc_offset)
+{
+ tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+// !0: FCC0 | FCC1
+static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 1 or 2: FCC0 ^ FCC1
+static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_xor_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 1 or 3: FCC0
+static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+}
+
+// 1: FCC0 & !FCC1
+static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 2 or 3: FCC1
+static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC1(dst, src, fcc_offset);
+}
+
+// 2: !FCC0 & FCC1
+static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, t0, dst);
+ tcg_temp_free(t0);
+}
+
+// 3: FCC0 & FCC1
+static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 0: !(FCC0 | FCC1)
+static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// 0 or 3: !(FCC0 ^ FCC1)
+static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_xor_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// 0 or 2: !FCC0
+static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !1: !(FCC0 & !FCC1)
+static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// 0 or 1: !FCC1
+static inline void gen_op_eval_fble(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC1(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !2: !(!FCC0 & FCC1)
+static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, t0, dst);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// !3: !(FCC0 & FCC1)
+static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
+ target_ulong pc2, TCGv r_cond)
+{
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+ gen_goto_tb(dc, 0, pc1, pc1 + 4);
+
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, pc2, pc2 + 4);
+}
+
+static void gen_branch_a(DisasContext *dc, target_ulong pc1)
+{
+ TCGLabel *l1 = gen_new_label();
+ target_ulong npc = dc->npc;
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
+
+ gen_goto_tb(dc, 0, npc, pc1);
+
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, npc + 4, npc + 8);
+
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_branch_n(DisasContext *dc, target_ulong pc1)
+{
+ target_ulong npc = dc->npc;
+
+ if (likely(npc != DYNAMIC_PC)) {
+ dc->pc = npc;
+ dc->jump_pc[0] = pc1;
+ dc->jump_pc[1] = npc + 4;
+ dc->npc = JUMP_PC;
+ } else {
+ TCGv t, z;
+
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+ t = tcg_const_tl(pc1);
+ z = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
+ tcg_temp_free(t);
+ tcg_temp_free(z);
+
+ dc->pc = DYNAMIC_PC;
+ }
+}
+
+static inline void gen_generic_branch(DisasContext *dc)
+{
+ TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
+ TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
+ TCGv zero = tcg_const_tl(0);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
+
+ tcg_temp_free(npc0);
+ tcg_temp_free(npc1);
+ tcg_temp_free(zero);
+}
+
+/* call this function before using the condition register as it may
+ have been set for a jump */
+static inline void flush_cond(DisasContext *dc)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc);
+ dc->npc = DYNAMIC_PC;
+ }
+}
+
+static inline void save_npc(DisasContext *dc)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc);
+ dc->npc = DYNAMIC_PC;
+ } else if (dc->npc != DYNAMIC_PC) {
+ tcg_gen_movi_tl(cpu_npc, dc->npc);
+ }
+}
+
+static inline void update_psr(DisasContext *dc)
+{
+ if (dc->cc_op != CC_OP_FLAGS) {
+ dc->cc_op = CC_OP_FLAGS;
+ gen_helper_compute_psr(cpu_env);
+ }
+}
+
+static inline void save_state(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ save_npc(dc);
+}
+
+static void gen_exception(DisasContext *dc, int which)
+{
+ TCGv_i32 t;
+
+ save_state(dc);
+ t = tcg_const_i32(which);
+ gen_helper_raise_exception(cpu_env, t);
+ tcg_temp_free_i32(t);
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_check_align(TCGv addr, int mask)
+{
+ TCGv_i32 r_mask = tcg_const_i32(mask);
+ gen_helper_check_align(cpu_env, addr, r_mask);
+ tcg_temp_free_i32(r_mask);
+}
+
+static inline void gen_mov_pc_npc(DisasContext *dc)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc);
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC;
+ } else if (dc->npc == DYNAMIC_PC) {
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC;
+ } else {
+ dc->pc = dc->npc;
+ }
+}
+
+static inline void gen_op_next_insn(void)
+{
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+}
+
+static void free_compare(DisasCompare *cmp)
+{
+ if (!cmp->g1) {
+ tcg_temp_free(cmp->c1);
+ }
+ if (!cmp->g2) {
+ tcg_temp_free(cmp->c2);
+ }
+}
+
+static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
+ DisasContext *dc)
+{
+ static int subcc_cond[16] = {
+ TCG_COND_NEVER,
+ TCG_COND_EQ,
+ TCG_COND_LE,
+ TCG_COND_LT,
+ TCG_COND_LEU,
+ TCG_COND_LTU,
+ -1, /* neg */
+ -1, /* overflow */
+ TCG_COND_ALWAYS,
+ TCG_COND_NE,
+ TCG_COND_GT,
+ TCG_COND_GE,
+ TCG_COND_GTU,
+ TCG_COND_GEU,
+ -1, /* pos */
+ -1, /* no overflow */
+ };
+
+ static int logic_cond[16] = {
+ TCG_COND_NEVER,
+ TCG_COND_EQ, /* eq: Z */
+ TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
+ TCG_COND_LT, /* lt: N ^ V -> N */
+ TCG_COND_EQ, /* leu: C | Z -> Z */
+ TCG_COND_NEVER, /* ltu: C -> 0 */
+ TCG_COND_LT, /* neg: N */
+ TCG_COND_NEVER, /* vs: V -> 0 */
+ TCG_COND_ALWAYS,
+ TCG_COND_NE, /* ne: !Z */
+ TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
+ TCG_COND_GE, /* ge: !(N ^ V) -> !N */
+ TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
+ TCG_COND_ALWAYS, /* geu: !C -> 1 */
+ TCG_COND_GE, /* pos: !N */
+ TCG_COND_ALWAYS, /* vc: !V -> 1 */
+ };
+
+ TCGv_i32 r_src;
+ TCGv r_dst;
+
+#ifdef TARGET_SPARC64
+ if (xcc) {
+ r_src = cpu_xcc;
+ } else {
+ r_src = cpu_psr;
+ }
+#else
+ r_src = cpu_psr;
+#endif
+
+ switch (dc->cc_op) {
+ case CC_OP_LOGIC:
+ cmp->cond = logic_cond[cond];
+ do_compare_dst_0:
+ cmp->is_bool = false;
+ cmp->g2 = false;
+ cmp->c2 = tcg_const_tl(0);
+#ifdef TARGET_SPARC64
+ if (!xcc) {
+ cmp->g1 = false;
+ cmp->c1 = tcg_temp_new();
+ tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
+ break;
+ }
+#endif
+ cmp->g1 = true;
+ cmp->c1 = cpu_cc_dst;
+ break;
+
+ case CC_OP_SUB:
+ switch (cond) {
+ case 6: /* neg */
+ case 14: /* pos */
+ cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
+ goto do_compare_dst_0;
+
+ case 7: /* overflow */
+ case 15: /* !overflow */
+ goto do_dynamic;
+
+ default:
+ cmp->cond = subcc_cond[cond];
+ cmp->is_bool = false;
+#ifdef TARGET_SPARC64
+ if (!xcc) {
+ /* Note that sign-extension works for unsigned compares as
+ long as both operands are sign-extended. */
+ cmp->g1 = cmp->g2 = false;
+ cmp->c1 = tcg_temp_new();
+ cmp->c2 = tcg_temp_new();
+ tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
+ tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
+ break;
+ }
+#endif
+ cmp->g1 = cmp->g2 = true;
+ cmp->c1 = cpu_cc_src;
+ cmp->c2 = cpu_cc_src2;
+ break;
+ }
+ break;
+
+ default:
+ do_dynamic:
+ gen_helper_compute_psr(cpu_env);
+ dc->cc_op = CC_OP_FLAGS;
+ /* FALLTHRU */
+
+ case CC_OP_FLAGS:
+ /* We're going to generate a boolean result. */
+ cmp->cond = TCG_COND_NE;
+ cmp->is_bool = true;
+ cmp->g1 = cmp->g2 = false;
+ cmp->c1 = r_dst = tcg_temp_new();
+ cmp->c2 = tcg_const_tl(0);
+
+ switch (cond) {
+ case 0x0:
+ gen_op_eval_bn(r_dst);
+ break;
+ case 0x1:
+ gen_op_eval_be(r_dst, r_src);
+ break;
+ case 0x2:
+ gen_op_eval_ble(r_dst, r_src);
+ break;
+ case 0x3:
+ gen_op_eval_bl(r_dst, r_src);
+ break;
+ case 0x4:
+ gen_op_eval_bleu(r_dst, r_src);
+ break;
+ case 0x5:
+ gen_op_eval_bcs(r_dst, r_src);
+ break;
+ case 0x6:
+ gen_op_eval_bneg(r_dst, r_src);
+ break;
+ case 0x7:
+ gen_op_eval_bvs(r_dst, r_src);
+ break;
+ case 0x8:
+ gen_op_eval_ba(r_dst);
+ break;
+ case 0x9:
+ gen_op_eval_bne(r_dst, r_src);
+ break;
+ case 0xa:
+ gen_op_eval_bg(r_dst, r_src);
+ break;
+ case 0xb:
+ gen_op_eval_bge(r_dst, r_src);
+ break;
+ case 0xc:
+ gen_op_eval_bgu(r_dst, r_src);
+ break;
+ case 0xd:
+ gen_op_eval_bcc(r_dst, r_src);
+ break;
+ case 0xe:
+ gen_op_eval_bpos(r_dst, r_src);
+ break;
+ case 0xf:
+ gen_op_eval_bvc(r_dst, r_src);
+ break;
+ }
+ break;
+ }
+}
+
+static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
+{
+ unsigned int offset;
+ TCGv r_dst;
+
+ /* For now we still generate a straight boolean result. */
+ cmp->cond = TCG_COND_NE;
+ cmp->is_bool = true;
+ cmp->g1 = cmp->g2 = false;
+ cmp->c1 = r_dst = tcg_temp_new();
+ cmp->c2 = tcg_const_tl(0);
+
+ switch (cc) {
+ default:
+ case 0x0:
+ offset = 0;
+ break;
+ case 0x1:
+ offset = 32 - 10;
+ break;
+ case 0x2:
+ offset = 34 - 10;
+ break;
+ case 0x3:
+ offset = 36 - 10;
+ break;
+ }
+
+ switch (cond) {
+ case 0x0:
+ gen_op_eval_bn(r_dst);
+ break;
+ case 0x1:
+ gen_op_eval_fbne(r_dst, cpu_fsr, offset);
+ break;
+ case 0x2:
+ gen_op_eval_fblg(r_dst, cpu_fsr, offset);
+ break;
+ case 0x3:
+ gen_op_eval_fbul(r_dst, cpu_fsr, offset);
+ break;
+ case 0x4:
+ gen_op_eval_fbl(r_dst, cpu_fsr, offset);
+ break;
+ case 0x5:
+ gen_op_eval_fbug(r_dst, cpu_fsr, offset);
+ break;
+ case 0x6:
+ gen_op_eval_fbg(r_dst, cpu_fsr, offset);
+ break;
+ case 0x7:
+ gen_op_eval_fbu(r_dst, cpu_fsr, offset);
+ break;
+ case 0x8:
+ gen_op_eval_ba(r_dst);
+ break;
+ case 0x9:
+ gen_op_eval_fbe(r_dst, cpu_fsr, offset);
+ break;
+ case 0xa:
+ gen_op_eval_fbue(r_dst, cpu_fsr, offset);
+ break;
+ case 0xb:
+ gen_op_eval_fbge(r_dst, cpu_fsr, offset);
+ break;
+ case 0xc:
+ gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
+ break;
+ case 0xd:
+ gen_op_eval_fble(r_dst, cpu_fsr, offset);
+ break;
+ case 0xe:
+ gen_op_eval_fbule(r_dst, cpu_fsr, offset);
+ break;
+ case 0xf:
+ gen_op_eval_fbo(r_dst, cpu_fsr, offset);
+ break;
+ }
+}
+
+static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
+ DisasContext *dc)
+{
+ DisasCompare cmp;
+ gen_compare(&cmp, cc, cond, dc);
+
+ /* The interface is to return a boolean in r_dst. */
+ if (cmp.is_bool) {
+ tcg_gen_mov_tl(r_dst, cmp.c1);
+ } else {
+ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+ }
+
+ free_compare(&cmp);
+}
+
+static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
+{
+ DisasCompare cmp;
+ gen_fcompare(&cmp, cc, cond);
+
+ /* The interface is to return a boolean in r_dst. */
+ if (cmp.is_bool) {
+ tcg_gen_mov_tl(r_dst, cmp.c1);
+ } else {
+ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+ }
+
+ free_compare(&cmp);
+}
+
+#ifdef TARGET_SPARC64
+// Inverted logic
+static const int gen_tcg_cond_reg[8] = {
+ -1,
+ TCG_COND_NE,
+ TCG_COND_GT,
+ TCG_COND_GE,
+ -1,
+ TCG_COND_EQ,
+ TCG_COND_LE,
+ TCG_COND_LT,
+};
+
+static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
+{
+ cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
+ cmp->is_bool = false;
+ cmp->g1 = true;
+ cmp->g2 = false;
+ cmp->c1 = r_src;
+ cmp->c2 = tcg_const_tl(0);
+}
+
+static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
+{
+ DisasCompare cmp;
+ gen_compare_reg(&cmp, cond, r_src);
+
+ /* The interface is to return a boolean in r_dst. */
+ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+
+ free_compare(&cmp);
+}
+#endif
+
+static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+{
+ unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+#ifdef TARGET_SPARC64
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+#endif
+ if (cond == 0x0) {
+ /* unconditional not taken */
+ if (a) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ } else if (cond == 0x8) {
+ /* unconditional taken */
+ if (a) {
+ dc->pc = target;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = target;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ } else {
+ flush_cond(dc);
+ gen_cond(cpu_cond, cc, cond, dc);
+ if (a) {
+ gen_branch_a(dc, target);
+ } else {
+ gen_branch_n(dc, target);
+ }
+ }
+}
+
+static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+{
+ unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+#ifdef TARGET_SPARC64
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+#endif
+ if (cond == 0x0) {
+ /* unconditional not taken */
+ if (a) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ } else if (cond == 0x8) {
+ /* unconditional taken */
+ if (a) {
+ dc->pc = target;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = target;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ } else {
+ flush_cond(dc);
+ gen_fcond(cpu_cond, cc, cond);
+ if (a) {
+ gen_branch_a(dc, target);
+ } else {
+ gen_branch_n(dc, target);
+ }
+ }
+}
+
+#ifdef TARGET_SPARC64
+static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
+ TCGv r_reg)
+{
+ unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+ flush_cond(dc);
+ gen_cond_reg(cpu_cond, cond, r_reg);
+ if (a) {
+ gen_branch_a(dc, target);
+ } else {
+ gen_branch_n(dc, target);
+ }
+}
+
+static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpq(cpu_fsr, cpu_env);
+ break;
+ case 1:
+ gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
+ break;
+ case 2:
+ gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
+ break;
+ case 3:
+ gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpeq(cpu_fsr, cpu_env);
+ break;
+ case 1:
+ gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
+ break;
+ case 2:
+ gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
+ break;
+ case 3:
+ gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
+ break;
+ }
+}
+
+#else
+
+static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+ gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+ gen_helper_fcmpq(cpu_fsr, cpu_env);
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+ gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+ gen_helper_fcmpeq(cpu_fsr, cpu_env);
+}
+#endif
+
+static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
+{
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
+ tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
+ gen_exception(dc, TT_FP_EXCP);
+}
+
+static int gen_trap_ifnofpu(DisasContext *dc)
+{
+#if !defined(CONFIG_USER_ONLY)
+ if (!dc->fpu_enabled) {
+ gen_exception(dc, TT_NFPU_INSN);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static inline void gen_op_clear_ieee_excp_and_FTT(void)
+{
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
+}
+
+static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i32 dst, src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 dst, src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, src);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 dst, src1, src2;
+
+ src1 = gen_load_fpr_F(dc, rs1);
+ src2 = gen_load_fpr_F(dc, rs2);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 dst, src1, src2;
+
+ src1 = gen_load_fpr_F(dc, rs1);
+ src2 = gen_load_fpr_F(dc, rs2);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, src1, src2);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ src = gen_load_fpr_D(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ src = gen_load_fpr_D(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, src);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, src1, src2);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_gsr, src1, src2);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src0, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ src0 = gen_load_fpr_D(dc, rd);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, src0, src1, src2);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr))
+{
+ gen_op_load_fpr_QT1(QFPREG(rs));
+
+ gen(cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr))
+{
+ gen_op_load_fpr_QT1(QFPREG(rs));
+
+ gen(cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+#endif
+
+static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_ptr))
+{
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+
+ gen(cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src1, src2;
+
+ src1 = gen_load_fpr_F(dc, rs1);
+ src2 = gen_load_fpr_F(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+
+ gen(cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ src = gen_load_fpr_D(dc, rs);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr))
+{
+ TCGv_i32 dst;
+
+ gen_op_load_fpr_QT1(QFPREG(rs));
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr))
+{
+ TCGv_i64 dst;
+
+ gen_op_load_fpr_QT1(QFPREG(rs));
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr, TCGv_i32))
+{
+ TCGv_i32 src;
+
+ src = gen_load_fpr_F(dc, rs);
+
+ gen(cpu_env, src);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr, TCGv_i64))
+{
+ TCGv_i64 src;
+
+ src = gen_load_fpr_D(dc, rs);
+
+ gen(cpu_env, src);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
+ TCGv addr, int mmu_idx, MemOp memop)
+{
+ gen_address_mask(dc, addr);
+ tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
+}
+
+static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
+{
+ TCGv m1 = tcg_const_tl(0xff);
+ gen_address_mask(dc, addr);
+ tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
+ tcg_temp_free(m1);
+}
+
+/* asi moves */
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+typedef enum {
+ GET_ASI_HELPER,
+ GET_ASI_EXCP,
+ GET_ASI_DIRECT,
+ GET_ASI_DTWINX,
+ GET_ASI_BLOCK,
+ GET_ASI_SHORT,
+ GET_ASI_BCOPY,
+ GET_ASI_BFILL,
+} ASIType;
+
+typedef struct {
+ ASIType type;
+ int asi;
+ int mem_idx;
+ MemOp memop;
+} DisasASI;
+
+static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
+{
+ int asi = GET_FIELD(insn, 19, 26);
+ ASIType type = GET_ASI_HELPER;
+ int mem_idx = dc->mem_idx;
+
+#ifndef TARGET_SPARC64
+ /* Before v9, all asis are immediate and privileged. */
+ if (IS_IMM) {
+ gen_exception(dc, TT_ILL_INSN);
+ type = GET_ASI_EXCP;
+ } else if (supervisor(dc)
+ /* Note that LEON accepts ASI_USERDATA in user mode, for
+ use with CASA. Also note that previous versions of
+ QEMU allowed (and old versions of gcc emitted) ASI_P
+ for LEON, which is incorrect. */
+ || (asi == ASI_USERDATA
+ && (dc->def->features & CPU_FEATURE_CASA))) {
+ switch (asi) {
+ case ASI_USERDATA: /* User data access */
+ mem_idx = MMU_USER_IDX;
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_KERNELDATA: /* Supervisor data access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_M_BYPASS: /* MMU passthrough */
+ case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+ mem_idx = MMU_PHYS_IDX;
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_M_BCOPY: /* Block copy, sta access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_BCOPY;
+ break;
+ case ASI_M_BFILL: /* Block fill, stda access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_BFILL;
+ break;
+ }
+
+ /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
+ * permissions check in get_physical_address(..).
+ */
+ mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
+ } else {
+ gen_exception(dc, TT_PRIV_INSN);
+ type = GET_ASI_EXCP;
+ }
+#else
+ if (IS_IMM) {
+ asi = dc->asi;
+ }
+ /* With v9, all asis below 0x80 are privileged. */
+ /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
+ down that bit into DisasContext. For the moment that's ok,
+ since the direct implementations below doesn't have any ASIs
+ in the restricted [0x30, 0x7f] range, and the check will be
+ done properly in the helper. */
+ if (!supervisor(dc) && asi < 0x80) {
+ gen_exception(dc, TT_PRIV_ACT);
+ type = GET_ASI_EXCP;
+ } else {
+ switch (asi) {
+ case ASI_REAL: /* Bypass */
+ case ASI_REAL_IO: /* Bypass, non-cacheable */
+ case ASI_REAL_L: /* Bypass LE */
+ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+ case ASI_TWINX_REAL: /* Real address, twinx */
+ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+ case ASI_QUAD_LDD_PHYS:
+ case ASI_QUAD_LDD_PHYS_L:
+ mem_idx = MMU_PHYS_IDX;
+ break;
+ case ASI_N: /* Nucleus */
+ case ASI_NL: /* Nucleus LE */
+ case ASI_TWINX_N:
+ case ASI_TWINX_NL:
+ case ASI_NUCLEUS_QUAD_LDD:
+ case ASI_NUCLEUS_QUAD_LDD_L:
+ if (hypervisor(dc)) {
+ mem_idx = MMU_PHYS_IDX;
+ } else {
+ mem_idx = MMU_NUCLEUS_IDX;
+ }
+ break;
+ case ASI_AIUP: /* As if user primary */
+ case ASI_AIUPL: /* As if user primary LE */
+ case ASI_TWINX_AIUP:
+ case ASI_TWINX_AIUP_L:
+ case ASI_BLK_AIUP_4V:
+ case ASI_BLK_AIUP_L_4V:
+ case ASI_BLK_AIUP:
+ case ASI_BLK_AIUPL:
+ mem_idx = MMU_USER_IDX;
+ break;
+ case ASI_AIUS: /* As if user secondary */
+ case ASI_AIUSL: /* As if user secondary LE */
+ case ASI_TWINX_AIUS:
+ case ASI_TWINX_AIUS_L:
+ case ASI_BLK_AIUS_4V:
+ case ASI_BLK_AIUS_L_4V:
+ case ASI_BLK_AIUS:
+ case ASI_BLK_AIUSL:
+ mem_idx = MMU_USER_SECONDARY_IDX;
+ break;
+ case ASI_S: /* Secondary */
+ case ASI_SL: /* Secondary LE */
+ case ASI_TWINX_S:
+ case ASI_TWINX_SL:
+ case ASI_BLK_COMMIT_S:
+ case ASI_BLK_S:
+ case ASI_BLK_SL:
+ case ASI_FL8_S:
+ case ASI_FL8_SL:
+ case ASI_FL16_S:
+ case ASI_FL16_SL:
+ if (mem_idx == MMU_USER_IDX) {
+ mem_idx = MMU_USER_SECONDARY_IDX;
+ } else if (mem_idx == MMU_KERNEL_IDX) {
+ mem_idx = MMU_KERNEL_SECONDARY_IDX;
+ }
+ break;
+ case ASI_P: /* Primary */
+ case ASI_PL: /* Primary LE */
+ case ASI_TWINX_P:
+ case ASI_TWINX_PL:
+ case ASI_BLK_COMMIT_P:
+ case ASI_BLK_P:
+ case ASI_BLK_PL:
+ case ASI_FL8_P:
+ case ASI_FL8_PL:
+ case ASI_FL16_P:
+ case ASI_FL16_PL:
+ break;
+ }
+ switch (asi) {
+ case ASI_REAL:
+ case ASI_REAL_IO:
+ case ASI_REAL_L:
+ case ASI_REAL_IO_L:
+ case ASI_N:
+ case ASI_NL:
+ case ASI_AIUP:
+ case ASI_AIUPL:
+ case ASI_AIUS:
+ case ASI_AIUSL:
+ case ASI_S:
+ case ASI_SL:
+ case ASI_P:
+ case ASI_PL:
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_TWINX_REAL:
+ case ASI_TWINX_REAL_L:
+ case ASI_TWINX_N:
+ case ASI_TWINX_NL:
+ case ASI_TWINX_AIUP:
+ case ASI_TWINX_AIUP_L:
+ case ASI_TWINX_AIUS:
+ case ASI_TWINX_AIUS_L:
+ case ASI_TWINX_P:
+ case ASI_TWINX_PL:
+ case ASI_TWINX_S:
+ case ASI_TWINX_SL:
+ case ASI_QUAD_LDD_PHYS:
+ case ASI_QUAD_LDD_PHYS_L:
+ case ASI_NUCLEUS_QUAD_LDD:
+ case ASI_NUCLEUS_QUAD_LDD_L:
+ type = GET_ASI_DTWINX;
+ break;
+ case ASI_BLK_COMMIT_P:
+ case ASI_BLK_COMMIT_S:
+ case ASI_BLK_AIUP_4V:
+ case ASI_BLK_AIUP_L_4V:
+ case ASI_BLK_AIUP:
+ case ASI_BLK_AIUPL:
+ case ASI_BLK_AIUS_4V:
+ case ASI_BLK_AIUS_L_4V:
+ case ASI_BLK_AIUS:
+ case ASI_BLK_AIUSL:
+ case ASI_BLK_S:
+ case ASI_BLK_SL:
+ case ASI_BLK_P:
+ case ASI_BLK_PL:
+ type = GET_ASI_BLOCK;
+ break;
+ case ASI_FL8_S:
+ case ASI_FL8_SL:
+ case ASI_FL8_P:
+ case ASI_FL8_PL:
+ memop = MO_UB;
+ type = GET_ASI_SHORT;
+ break;
+ case ASI_FL16_S:
+ case ASI_FL16_SL:
+ case ASI_FL16_P:
+ case ASI_FL16_PL:
+ memop = MO_TEUW;
+ type = GET_ASI_SHORT;
+ break;
+ }
+ /* The little-endian asis all have bit 3 set. */
+ if (asi & 8) {
+ memop ^= MO_BSWAP;
+ }
+ }
+#endif
+
+ return (DisasASI){ type, asi, mem_idx, memop };
+}
+
+static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
+ int insn, MemOp memop)
+{
+ DisasASI da = get_asi(dc, insn, memop);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DTWINX: /* Reserved for ldda. */
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
+ break;
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(memop);
+
+ save_state(dc);
+#ifdef TARGET_SPARC64
+ gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
+#else
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ tcg_gen_trunc_i64_tl(dst, t64);
+ tcg_temp_free_i64(t64);
+ }
+#endif
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+}
+
+static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
+ int insn, MemOp memop)
+{
+ DisasASI da = get_asi(dc, insn, memop);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DTWINX: /* Reserved for stda. */
+#ifndef TARGET_SPARC64
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+#else
+ if (!(dc->def->features & CPU_FEATURE_HYPV)) {
+ /* Pre OpenSPARC CPUs don't have these */
+ gen_exception(dc, TT_ILL_INSN);
+ return;
+ }
+ /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
+ * are ST_BLKINIT_ ASIs */
+#endif
+ /* fall through */
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
+ break;
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+ case GET_ASI_BCOPY:
+ /* Copy 32 bytes from the address in SRC to ADDR. */
+ /* ??? The original qemu code suggests 4-byte alignment, dropping
+ the low bits, but the only place I can see this used is in the
+ Linux kernel with 32 byte alignment, which would make more sense
+ as a cacheline-style operation. */
+ {
+ TCGv saddr = tcg_temp_new();
+ TCGv daddr = tcg_temp_new();
+ TCGv four = tcg_const_tl(4);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ int i;
+
+ tcg_gen_andi_tl(saddr, src, -4);
+ tcg_gen_andi_tl(daddr, addr, -4);
+ for (i = 0; i < 32; i += 4) {
+ /* Since the loads and stores are paired, allow the
+ copy to happen in the host endianness. */
+ tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
+ tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
+ tcg_gen_add_tl(saddr, saddr, four);
+ tcg_gen_add_tl(daddr, daddr, four);
+ }
+
+ tcg_temp_free(saddr);
+ tcg_temp_free(daddr);
+ tcg_temp_free(four);
+ tcg_temp_free_i32(tmp);
+ }
+ break;
+#endif
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
+
+ save_state(dc);
+#ifdef TARGET_SPARC64
+ gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
+#else
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(t64, src);
+ gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ tcg_temp_free_i64(t64);
+ }
+#endif
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+
+ /* A write to a TLB register may alter page maps. End the TB. */
+ dc->npc = DYNAMIC_PC;
+ }
+ break;
+ }
+}
+
+static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
+ TCGv addr, int insn)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEUL);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DIRECT:
+ gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+
+static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEUL);
+ TCGv oldv;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ return;
+ case GET_ASI_DIRECT:
+ oldv = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
+ da.mem_idx, da.memop);
+ gen_store_gpr(dc, rd, oldv);
+ tcg_temp_free(oldv);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+
+static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
+{
+ DisasASI da = get_asi(dc, insn, MO_UB);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DIRECT:
+ gen_ldstub(dc, dst, addr, da.mem_idx);
+ break;
+ default:
+ /* ??? In theory, this should be raise DAE_invalid_asi.
+ But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
+ if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
+ gen_helper_exit_atomic(cpu_env);
+ } else {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(MO_UB);
+ TCGv_i64 s64, t64;
+
+ save_state(dc);
+ t64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+
+ s64 = tcg_const_i64(0xff);
+ gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop);
+ tcg_temp_free_i64(s64);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+
+ tcg_gen_trunc_i64_tl(dst, t64);
+ tcg_temp_free_i64(t64);
+
+ /* End the TB. */
+ dc->npc = DYNAMIC_PC;
+ }
+ break;
+ }
+}
+#endif
+
+#ifdef TARGET_SPARC64
+static void gen_ldf_asi(DisasContext *dc, TCGv addr,
+ int insn, int size, int rd)
+{
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ TCGv_i32 d32;
+ TCGv_i64 d64;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ switch (size) {
+ case 4:
+ d32 = gen_dest_fpr_F(dc);
+ tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
+ gen_store_fpr_F(dc, rd, d32);
+ break;
+ case 8:
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN_4);
+ break;
+ case 16:
+ d64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
+ da.memop | MO_ALIGN_4);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
+ tcg_temp_free_i64(d64);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case GET_ASI_BLOCK:
+ /* Valid for lddfa on aligned registers only. */
+ if (size == 8 && (rd & 7) == 0) {
+ MemOp memop;
+ TCGv eight;
+ int i;
+
+ gen_address_mask(dc, addr);
+
+ /* The first operation checks required alignment. */
+ memop = da.memop | MO_ALIGN_64;
+ eight = tcg_const_tl(8);
+ for (i = 0; ; ++i) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
+ da.mem_idx, memop);
+ if (i == 7) {
+ break;
+ }
+ tcg_gen_add_tl(addr, addr, eight);
+ memop = da.memop;
+ }
+ tcg_temp_free(eight);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ case GET_ASI_SHORT:
+ /* Valid for lddfa only. */
+ if (size == 8) {
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(da.memop);
+
+ save_state(dc);
+ /* According to the table in the UA2011 manual, the only
+ other asis that are valid for ldfa/lddfa/ldqfa are
+ the NO_FAULT asis. We still need a helper for these,
+ but we can just use the integer asi helper for them. */
+ switch (size) {
+ case 4:
+ d64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+ d32 = gen_dest_fpr_F(dc);
+ tcg_gen_extrl_i64_i32(d32, d64);
+ tcg_temp_free_i64(d64);
+ gen_store_fpr_F(dc, rd, d32);
+ break;
+ case 8:
+ gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
+ break;
+ case 16:
+ d64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+ tcg_gen_addi_tl(addr, addr, 8);
+ gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
+ tcg_temp_free_i64(d64);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+}
+
+static void gen_stf_asi(DisasContext *dc, TCGv addr,
+ int insn, int size, int rd)
+{
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ TCGv_i32 d32;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ switch (size) {
+ case 4:
+ d32 = gen_load_fpr_F(dc, rd);
+ tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
+ break;
+ case 8:
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN_4);
+ break;
+ case 16:
+ /* Only 4-byte alignment required. However, it is legal for the
+ cpu to signal the alignment fault, and the OS trap handler is
+ required to fix it up. Requiring 16-byte alignment here avoids
+ having to probe the second page before performing the first
+ write. */
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN_16);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case GET_ASI_BLOCK:
+ /* Valid for stdfa on aligned registers only. */
+ if (size == 8 && (rd & 7) == 0) {
+ MemOp memop;
+ TCGv eight;
+ int i;
+
+ gen_address_mask(dc, addr);
+
+ /* The first operation checks required alignment. */
+ memop = da.memop | MO_ALIGN_64;
+ eight = tcg_const_tl(8);
+ for (i = 0; ; ++i) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
+ da.mem_idx, memop);
+ if (i == 7) {
+ break;
+ }
+ tcg_gen_add_tl(addr, addr, eight);
+ memop = da.memop;
+ }
+ tcg_temp_free(eight);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ case GET_ASI_SHORT:
+ /* Valid for stdfa only. */
+ if (size == 8) {
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ default:
+ /* According to the table in the UA2011 manual, the only
+ other asis that are valid for ldfa/lddfa/ldqfa are
+ the PST* asis, which aren't currently handled. */
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ }
+}
+
+static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv_i64 hi = gen_dest_gpr(dc, rd);
+ TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ return;
+
+ case GET_ASI_DTWINX:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
+ break;
+
+ case GET_ASI_DIRECT:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
+
+ /* Note that LE ldda acts as if each 32-bit register
+ result is byte swapped. Having just performed one
+ 64-bit bswap, we need now to swap the writebacks. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr32_i64(lo, hi, tmp);
+ } else {
+ tcg_gen_extr32_i64(hi, lo, tmp);
+ }
+ tcg_temp_free_i64(tmp);
+ }
+ break;
+
+ default:
+ /* ??? In theory we've handled all of the ASIs that are valid
+ for ldda, and this should raise DAE_invalid_asi. However,
+ real hardware allows others. This can be seen with e.g.
+ FreeBSD 10.3 wrt ASI_IC_TAG. */
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ save_state(dc);
+ gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
+ tcg_temp_free_i32(r_asi);
+ tcg_temp_free_i32(r_mop);
+
+ /* See above. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr32_i64(lo, hi, tmp);
+ } else {
+ tcg_gen_extr32_i64(hi, lo, tmp);
+ }
+ tcg_temp_free_i64(tmp);
+ }
+ break;
+ }
+
+ gen_store_gpr(dc, rd, hi);
+ gen_store_gpr(dc, rd + 1, lo);
+}
+
+static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv lo = gen_load_gpr(dc, rd + 1);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+
+ case GET_ASI_DTWINX:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
+ break;
+
+ case GET_ASI_DIRECT:
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /* Note that LE stda acts as if each 32-bit register result is
+ byte swapped. We will perform one 64-bit LE store, so now
+ we must swap the order of the construction. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat32_i64(t64, lo, hi);
+ } else {
+ tcg_gen_concat32_i64(t64, hi, lo);
+ }
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+
+ default:
+ /* ??? In theory we've handled all of the ASIs that are valid
+ for stda, and this should raise DAE_invalid_asi. */
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /* See above. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat32_i64(t64, lo, hi);
+ } else {
+ tcg_gen_concat32_i64(t64, hi, lo);
+ }
+
+ save_state(dc);
+ gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+ }
+}
+
+static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv oldv;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ return;
+ case GET_ASI_DIRECT:
+ oldv = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
+ da.mem_idx, da.memop);
+ gen_store_gpr(dc, rd, oldv);
+ tcg_temp_free(oldv);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+
+#elif !defined(CONFIG_USER_ONLY)
+static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+{
+ /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
+ whereby "rd + 1" elicits "error: array subscript is above array".
+ Since we have already asserted that rd is even, the semantics
+ are unchanged. */
+ TCGv lo = gen_dest_gpr(dc, rd | 1);
+ TCGv hi = gen_dest_gpr(dc, rd);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ tcg_temp_free_i64(t64);
+ return;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
+ break;
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+
+ save_state(dc);
+ gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+
+ tcg_gen_extr_i64_i32(lo, hi, t64);
+ tcg_temp_free_i64(t64);
+ gen_store_gpr(dc, rd | 1, lo);
+ gen_store_gpr(dc, rd, hi);
+}
+
+static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv lo = gen_load_gpr(dc, rd + 1);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ tcg_gen_concat_tl_i64(t64, lo, hi);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+ break;
+ case GET_ASI_BFILL:
+ /* Store 32 bytes of T64 to ADDR. */
+ /* ??? The original qemu code suggests 8-byte alignment, dropping
+ the low bits, but the only place I can see this used is in the
+ Linux kernel with 32 byte alignment, which would make more sense
+ as a cacheline-style operation. */
+ {
+ TCGv d_addr = tcg_temp_new();
+ TCGv eight = tcg_const_tl(8);
+ int i;
+
+ tcg_gen_andi_tl(d_addr, addr, -8);
+ for (i = 0; i < 32; i += 8) {
+ tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
+ tcg_gen_add_tl(d_addr, d_addr, eight);
+ }
+
+ tcg_temp_free(d_addr);
+ tcg_temp_free(eight);
+ }
+ break;
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+
+ save_state(dc);
+ gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+
+ tcg_temp_free_i64(t64);
+}
+#endif
+
+static TCGv get_src1(DisasContext *dc, unsigned int insn)
+{
+ unsigned int rs1 = GET_FIELD(insn, 13, 17);
+ return gen_load_gpr(dc, rs1);
+}
+
+static TCGv get_src2(DisasContext *dc, unsigned int insn)
+{
+ if (IS_IMM) { /* immediate */
+ target_long simm = GET_FIELDs(insn, 19, 31);
+ TCGv t = get_temp_tl(dc);
+ tcg_gen_movi_tl(t, simm);
+ return t;
+ } else { /* register */
+ unsigned int rs2 = GET_FIELD(insn, 27, 31);
+ return gen_load_gpr(dc, rs2);
+ }
+}
+
+#ifdef TARGET_SPARC64
+static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+ TCGv_i32 c32, zero, dst, s1, s2;
+
+ /* We have two choices here: extend the 32 bit data and use movcond_i64,
+ or fold the comparison down to 32 bits and use movcond_i32. Choose
+ the later. */
+ c32 = tcg_temp_new_i32();
+ if (cmp->is_bool) {
+ tcg_gen_extrl_i64_i32(c32, cmp->c1);
+ } else {
+ TCGv_i64 c64 = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
+ tcg_gen_extrl_i64_i32(c32, c64);
+ tcg_temp_free_i64(c64);
+ }
+
+ s1 = gen_load_fpr_F(dc, rs);
+ s2 = gen_load_fpr_F(dc, rd);
+ dst = gen_dest_fpr_F(dc);
+ zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
+
+ tcg_temp_free_i32(c32);
+ tcg_temp_free_i32(zero);
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+ TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
+ tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
+ gen_load_fpr_D(dc, rs),
+ gen_load_fpr_D(dc, rd));
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+ int qd = QFPREG(rd);
+ int qs = QFPREG(rs);
+
+ tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
+ cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
+ tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
+ cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
+
+ gen_update_fprs_dirty(dc, qd);
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
+{
+ TCGv_i32 r_tl = tcg_temp_new_i32();
+
+ /* load env->tl into r_tl */
+ tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
+
+ /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
+ tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
+
+ /* calculate offset to current trap state from env->ts, reuse r_tl */
+ tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
+ tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
+
+ /* tsptr = env->ts[env->tl & MAXTL_MASK] */
+ {
+ TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
+ tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
+ tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
+ tcg_temp_free_ptr(r_tl_tmp);
+ }
+
+ tcg_temp_free_i32(r_tl);
+}
+#endif
+
+static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
+ int width, bool cc, bool left)
+{
+ TCGv lo1, lo2, t1, t2;
+ uint64_t amask, tabl, tabr;
+ int shift, imask, omask;
+
+ if (cc) {
+ tcg_gen_mov_tl(cpu_cc_src, s1);
+ tcg_gen_mov_tl(cpu_cc_src2, s2);
+ tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+ dc->cc_op = CC_OP_SUB;
+ }
+
+ /* Theory of operation: there are two tables, left and right (not to
+ be confused with the left and right versions of the opcode). These
+ are indexed by the low 3 bits of the inputs. To make things "easy",
+ these tables are loaded into two constants, TABL and TABR below.
+ The operation index = (input & imask) << shift calculates the index
+ into the constant, while val = (table >> index) & omask calculates
+ the value we're looking for. */
+ switch (width) {
+ case 8:
+ imask = 0x7;
+ shift = 3;
+ omask = 0xff;
+ if (left) {
+ tabl = 0x80c0e0f0f8fcfeffULL;
+ tabr = 0xff7f3f1f0f070301ULL;
+ } else {
+ tabl = 0x0103070f1f3f7fffULL;
+ tabr = 0xfffefcf8f0e0c080ULL;
+ }
+ break;
+ case 16:
+ imask = 0x6;
+ shift = 1;
+ omask = 0xf;
+ if (left) {
+ tabl = 0x8cef;
+ tabr = 0xf731;
+ } else {
+ tabl = 0x137f;
+ tabr = 0xfec8;
+ }
+ break;
+ case 32:
+ imask = 0x4;
+ shift = 0;
+ omask = 0x3;
+ if (left) {
+ tabl = (2 << 2) | 3;
+ tabr = (3 << 2) | 1;
+ } else {
+ tabl = (1 << 2) | 3;
+ tabr = (3 << 2) | 2;
+ }
+ break;
+ default:
+ abort();
+ }
+
+ lo1 = tcg_temp_new();
+ lo2 = tcg_temp_new();
+ tcg_gen_andi_tl(lo1, s1, imask);
+ tcg_gen_andi_tl(lo2, s2, imask);
+ tcg_gen_shli_tl(lo1, lo1, shift);
+ tcg_gen_shli_tl(lo2, lo2, shift);
+
+ t1 = tcg_const_tl(tabl);
+ t2 = tcg_const_tl(tabr);
+ tcg_gen_shr_tl(lo1, t1, lo1);
+ tcg_gen_shr_tl(lo2, t2, lo2);
+ tcg_gen_andi_tl(dst, lo1, omask);
+ tcg_gen_andi_tl(lo2, lo2, omask);
+
+ amask = -8;
+ if (AM_CHECK(dc)) {
+ amask &= 0xffffffffULL;
+ }
+ tcg_gen_andi_tl(s1, s1, amask);
+ tcg_gen_andi_tl(s2, s2, amask);
+
+ /* We want to compute
+ dst = (s1 == s2 ? lo1 : lo1 & lo2).
+ We've already done dst = lo1, so this reduces to
+ dst &= (s1 == s2 ? -1 : lo2)
+ Which we perform by
+ lo2 |= -(s1 == s2)
+ dst &= lo2
+ */
+ tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
+ tcg_gen_neg_tl(t1, t1);
+ tcg_gen_or_tl(lo2, lo2, t1);
+ tcg_gen_and_tl(dst, dst, lo2);
+
+ tcg_temp_free(lo1);
+ tcg_temp_free(lo2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
+{
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_add_tl(tmp, s1, s2);
+ tcg_gen_andi_tl(dst, tmp, -8);
+ if (left) {
+ tcg_gen_neg_tl(tmp, tmp);
+ }
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
+{
+ TCGv t1, t2, shift;
+
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ shift = tcg_temp_new();
+
+ tcg_gen_andi_tl(shift, gsr, 7);
+ tcg_gen_shli_tl(shift, shift, 3);
+ tcg_gen_shl_tl(t1, s1, shift);
+
+ /* A shift of 64 does not produce 0 in TCG. Divide this into a
+ shift of (up to 63) followed by a constant shift of 1. */
+ tcg_gen_xori_tl(shift, shift, 63);
+ tcg_gen_shr_tl(t2, s2, shift);
+ tcg_gen_shri_tl(t2, t2, 1);
+
+ tcg_gen_or_tl(dst, t1, t2);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(shift);
+}
+#endif
+
+#define CHECK_IU_FEATURE(dc, FEATURE) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
+ goto illegal_insn;
+#define CHECK_FPU_FEATURE(dc, FEATURE) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
+ goto nfpu_insn;
+
+/* before an instruction, dc->pc must be static */
+static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
+{
+ unsigned int opc, rs1, rs2, rd;
+ TCGv cpu_src1, cpu_src2;
+ TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
+ TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
+ target_long simm;
+
+ opc = GET_FIELD(insn, 0, 1);
+ rd = GET_FIELD(insn, 2, 6);
+
+ switch (opc) {
+ case 0: /* branches/sethi */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 9);
+ int32_t target;
+ switch (xop) {
+#ifdef TARGET_SPARC64
+ case 0x1: /* V9 BPcc */
+ {
+ int cc;
+
+ target = GET_FIELD_SP(insn, 0, 18);
+ target = sign_extend(target, 19);
+ target <<= 2;
+ cc = GET_FIELD_SP(insn, 20, 21);
+ if (cc == 0)
+ do_branch(dc, target, insn, 0);
+ else if (cc == 2)
+ do_branch(dc, target, insn, 1);
+ else
+ goto illegal_insn;
+ goto jmp_insn;
+ }
+ case 0x3: /* V9 BPr */
+ {
+ target = GET_FIELD_SP(insn, 0, 13) |
+ (GET_FIELD_SP(insn, 20, 21) << 14);
+ target = sign_extend(target, 16);
+ target <<= 2;
+ cpu_src1 = get_src1(dc, insn);
+ do_branch_reg(dc, target, insn, cpu_src1);
+ goto jmp_insn;
+ }
+ case 0x5: /* V9 FBPcc */
+ {
+ int cc = GET_FIELD_SP(insn, 20, 21);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ target = GET_FIELD_SP(insn, 0, 18);
+ target = sign_extend(target, 19);
+ target <<= 2;
+ do_fbranch(dc, target, insn, cc);
+ goto jmp_insn;
+ }
+#else
+ case 0x7: /* CBN+x */
+ {
+ goto ncp_insn;
+ }
+#endif
+ case 0x2: /* BN+x */
+ {
+ target = GET_FIELD(insn, 10, 31);
+ target = sign_extend(target, 22);
+ target <<= 2;
+ do_branch(dc, target, insn, 0);
+ goto jmp_insn;
+ }
+ case 0x6: /* FBN+x */
+ {
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ target = GET_FIELD(insn, 10, 31);
+ target = sign_extend(target, 22);
+ target <<= 2;
+ do_fbranch(dc, target, insn, 0);
+ goto jmp_insn;
+ }
+ case 0x4: /* SETHI */
+ /* Special-case %g0 because that's the canonical nop. */
+ if (rd) {
+ uint32_t value = GET_FIELD(insn, 10, 31);
+ TCGv t = gen_dest_gpr(dc, rd);
+ tcg_gen_movi_tl(t, value << 10);
+ gen_store_gpr(dc, rd, t);
+ }
+ break;
+ case 0x0: /* UNIMPL */
+ default:
+ goto illegal_insn;
+ }
+ break;
+ }
+ break;
+ case 1: /*CALL*/
+ {
+ target_long target = GET_FIELDs(insn, 2, 31) << 2;
+ TCGv o7 = gen_dest_gpr(dc, 15);
+
+ tcg_gen_movi_tl(o7, dc->pc);
+ gen_store_gpr(dc, 15, o7);
+ target += dc->pc;
+ gen_mov_pc_npc(dc);
+#ifdef TARGET_SPARC64
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+#endif
+ dc->npc = target;
+ }
+ goto jmp_insn;
+ case 2: /* FPU & Logical Operations */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 12);
+ TCGv cpu_dst = get_temp_tl(dc);
+ TCGv cpu_tmp0;
+
+ if (xop == 0x3a) { /* generate trap */
+ int cond = GET_FIELD(insn, 3, 6);
+ TCGv_i32 trap;
+ TCGLabel *l1 = NULL;
+ int mask;
+
+ if (cond == 0) {
+ /* Trap never. */
+ break;
+ }
+
+ save_state(dc);
+
+ if (cond != 8) {
+ /* Conditional trap. */
+ DisasCompare cmp;
+#ifdef TARGET_SPARC64
+ /* V9 icc/xcc */
+ int cc = GET_FIELD_SP(insn, 11, 12);
+ if (cc == 0) {
+ gen_compare(&cmp, 0, cond, dc);
+ } else if (cc == 2) {
+ gen_compare(&cmp, 1, cond, dc);
+ } else {
+ goto illegal_insn;
+ }
+#else
+ gen_compare(&cmp, 0, cond, dc);
+#endif
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
+ cmp.c1, cmp.c2, l1);
+ free_compare(&cmp);
+ }
+
+ mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
+ ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
+
+ /* Don't use the normal temporaries, as they may well have
+ gone out of scope with the branch above. While we're
+ doing that we might as well pre-truncate to 32-bit. */
+ trap = tcg_temp_new_i32();
+
+ rs1 = GET_FIELD_SP(insn, 14, 18);
+ if (IS_IMM) {
+ rs2 = GET_FIELD_SP(insn, 0, 7);
+ if (rs1 == 0) {
+ tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
+ /* Signal that the trap value is fully constant. */
+ mask = 0;
+ } else {
+ TCGv t1 = gen_load_gpr(dc, rs1);
+ tcg_gen_trunc_tl_i32(trap, t1);
+ tcg_gen_addi_i32(trap, trap, rs2);
+ }
+ } else {
+ TCGv t1, t2;
+ rs2 = GET_FIELD_SP(insn, 0, 4);
+ t1 = gen_load_gpr(dc, rs1);
+ t2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(t1, t1, t2);
+ tcg_gen_trunc_tl_i32(trap, t1);
+ }
+ if (mask != 0) {
+ tcg_gen_andi_i32(trap, trap, mask);
+ tcg_gen_addi_i32(trap, trap, TT_TRAP);
+ }
+
+ gen_helper_raise_exception(cpu_env, trap);
+ tcg_temp_free_i32(trap);
+
+ if (cond == 8) {
+ /* An unconditional trap ends the TB. */
+ dc->base.is_jmp = DISAS_NORETURN;
+ goto jmp_insn;
+ } else {
+ /* A conditional trap falls through to the next insn. */
+ gen_set_label(l1);
+ break;
+ }
+ } else if (xop == 0x28) {
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch(rs1) {
+ case 0: /* rdy */
+#ifndef TARGET_SPARC64
+ case 0x01 ... 0x0e: /* undefined in the SPARCv8
+ manual, rdy on the microSPARC
+ II */
+ case 0x0f: /* stbar in the SPARCv8 manual,
+ rdy on the microSPARC II */
+ case 0x10 ... 0x1f: /* implementation-dependent in the
+ SPARCv8 manual, rdy on the
+ microSPARC II */
+ /* Read Asr17 */
+ if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
+ TCGv t = gen_dest_gpr(dc, rd);
+ /* Read Asr17 for a Leon3 monoprocessor */
+ tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
+ gen_store_gpr(dc, rd, t);
+ break;
+ }
+#endif
+ gen_store_gpr(dc, rd, cpu_y);
+ break;
+#ifdef TARGET_SPARC64
+ case 0x2: /* V9 rdccr */
+ update_psr(dc);
+ gen_helper_rdccr(cpu_dst, cpu_env);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x3: /* V9 rdasi */
+ tcg_gen_movi_tl(cpu_dst, dc->asi);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x4: /* V9 rdtick */
+ {
+ TCGv_ptr r_tickptr;
+ TCGv_i32 r_const;
+
+ r_tickptr = tcg_temp_new_ptr();
+ r_const = tcg_const_i32(dc->mem_idx);
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+ r_const);
+ tcg_temp_free_ptr(r_tickptr);
+ tcg_temp_free_i32(r_const);
+ gen_store_gpr(dc, rd, cpu_dst);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ /* I/O operations in icount mode must end the TB */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ }
+ break;
+ case 0x5: /* V9 rdpc */
+ {
+ TCGv t = gen_dest_gpr(dc, rd);
+ if (unlikely(AM_CHECK(dc))) {
+ tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
+ } else {
+ tcg_gen_movi_tl(t, dc->pc);
+ }
+ gen_store_gpr(dc, rd, t);
+ }
+ break;
+ case 0x6: /* V9 rdfprs */
+ tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0xf: /* V9 membar */
+ break; /* no effect */
+ case 0x13: /* Graphics Status */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_store_gpr(dc, rd, cpu_gsr);
+ break;
+ case 0x16: /* Softint */
+ tcg_gen_ld32s_tl(cpu_dst, cpu_env,
+ offsetof(CPUSPARCState, softint));
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x17: /* Tick compare */
+ gen_store_gpr(dc, rd, cpu_tick_cmpr);
+ break;
+ case 0x18: /* System tick */
+ {
+ TCGv_ptr r_tickptr;
+ TCGv_i32 r_const;
+
+ r_tickptr = tcg_temp_new_ptr();
+ r_const = tcg_const_i32(dc->mem_idx);
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, stick));
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+ r_const);
+ tcg_temp_free_ptr(r_tickptr);
+ tcg_temp_free_i32(r_const);
+ gen_store_gpr(dc, rd, cpu_dst);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ /* I/O operations in icount mode must end the TB */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ }
+ break;
+ case 0x19: /* System tick compare */
+ gen_store_gpr(dc, rd, cpu_stick_cmpr);
+ break;
+ case 0x1a: /* UltraSPARC-T1 Strand status */
+ /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
+ * this ASR as impl. dep
+ */
+ CHECK_IU_FEATURE(dc, HYPV);
+ {
+ TCGv t = gen_dest_gpr(dc, rd);
+ tcg_gen_movi_tl(t, 1UL);
+ gen_store_gpr(dc, rd, t);
+ }
+ break;
+ case 0x10: /* Performance Control */
+ case 0x11: /* Performance Instrumentation Counter */
+ case 0x12: /* Dispatch Control */
+ case 0x14: /* Softint set, WO */
+ case 0x15: /* Softint clear, WO */
+#endif
+ default:
+ goto illegal_insn;
+ }
+#if !defined(CONFIG_USER_ONLY)
+ } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
+#ifndef TARGET_SPARC64
+ if (!supervisor(dc)) {
+ goto priv_insn;
+ }
+ update_psr(dc);
+ gen_helper_rdpsr(cpu_dst, cpu_env);
+#else
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch (rs1) {
+ case 0: // hpstate
+ tcg_gen_ld_i64(cpu_dst, cpu_env,
+ offsetof(CPUSPARCState, hpstate));
+ break;
+ case 1: // htstate
+ // gen_op_rdhtstate();
+ break;
+ case 3: // hintp
+ tcg_gen_mov_tl(cpu_dst, cpu_hintp);
+ break;
+ case 5: // htba
+ tcg_gen_mov_tl(cpu_dst, cpu_htba);
+ break;
+ case 6: // hver
+ tcg_gen_mov_tl(cpu_dst, cpu_hver);
+ break;
+ case 31: // hstick_cmpr
+ tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
+ break;
+ default:
+ goto illegal_insn;
+ }
+#endif
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
+ if (!supervisor(dc)) {
+ goto priv_insn;
+ }
+ cpu_tmp0 = get_temp_tl(dc);
+#ifdef TARGET_SPARC64
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch (rs1) {
+ case 0: // tpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 1: // tnpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tnpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 2: // tstate
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tstate));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 3: // tt
+ {
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tt));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 4: // tick
+ {
+ TCGv_ptr r_tickptr;
+ TCGv_i32 r_const;
+
+ r_tickptr = tcg_temp_new_ptr();
+ r_const = tcg_const_i32(dc->mem_idx);
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_get_count(cpu_tmp0, cpu_env,
+ r_tickptr, r_const);
+ tcg_temp_free_ptr(r_tickptr);
+ tcg_temp_free_i32(r_const);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ /* I/O operations in icount mode must end the TB */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ }
+ break;
+ case 5: // tba
+ tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
+ break;
+ case 6: // pstate
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, pstate));
+ break;
+ case 7: // tl
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, tl));
+ break;
+ case 8: // pil
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, psrpil));
+ break;
+ case 9: // cwp
+ gen_helper_rdcwp(cpu_tmp0, cpu_env);
+ break;
+ case 10: // cansave
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, cansave));
+ break;
+ case 11: // canrestore
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, canrestore));
+ break;
+ case 12: // cleanwin
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, cleanwin));
+ break;
+ case 13: // otherwin
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, otherwin));
+ break;
+ case 14: // wstate
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, wstate));
+ break;
+ case 16: // UA2005 gl
+ CHECK_IU_FEATURE(dc, GL);
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, gl));
+ break;
+ case 26: // UA2005 strand status
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
+ break;
+ case 31: // ver
+ tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
+ break;
+ case 15: // fq
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
+#endif
+ gen_store_gpr(dc, rd, cpu_tmp0);
+ break;
+#endif
+#if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
+ } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
+#ifdef TARGET_SPARC64
+ gen_helper_flushw(cpu_env);
+#else
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_store_gpr(dc, rd, cpu_tbr);
+#endif
+ break;
+#endif
+ } else if (xop == 0x34) { /* FPU Operations */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_op_clear_ieee_excp_and_FTT();
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ xop = GET_FIELD(insn, 18, 26);
+
+ switch (xop) {
+ case 0x1: /* fmovs */
+ cpu_src1_32 = gen_load_fpr_F(dc, rs2);
+ gen_store_fpr_F(dc, rd, cpu_src1_32);
+ break;
+ case 0x5: /* fnegs */
+ gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
+ break;
+ case 0x9: /* fabss */
+ gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
+ break;
+ case 0x29: /* fsqrts */
+ CHECK_FPU_FEATURE(dc, FSQRT);
+ gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
+ break;
+ case 0x2a: /* fsqrtd */
+ CHECK_FPU_FEATURE(dc, FSQRT);
+ gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
+ break;
+ case 0x2b: /* fsqrtq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
+ break;
+ case 0x41: /* fadds */
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
+ break;
+ case 0x42: /* faddd */
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
+ break;
+ case 0x43: /* faddq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
+ break;
+ case 0x45: /* fsubs */
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
+ break;
+ case 0x46: /* fsubd */
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
+ break;
+ case 0x47: /* fsubq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
+ break;
+ case 0x49: /* fmuls */
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
+ break;
+ case 0x4a: /* fmuld */
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
+ break;
+ case 0x4b: /* fmulq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
+ break;
+ case 0x4d: /* fdivs */
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
+ break;
+ case 0x4e: /* fdivd */
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
+ break;
+ case 0x4f: /* fdivq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
+ break;
+ case 0x69: /* fsmuld */
+ CHECK_FPU_FEATURE(dc, FSMULD);
+ gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
+ break;
+ case 0x6e: /* fdmulq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
+ break;
+ case 0xc4: /* fitos */
+ gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
+ break;
+ case 0xc6: /* fdtos */
+ gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
+ break;
+ case 0xc7: /* fqtos */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
+ break;
+ case 0xc8: /* fitod */
+ gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
+ break;
+ case 0xc9: /* fstod */
+ gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
+ break;
+ case 0xcb: /* fqtod */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
+ break;
+ case 0xcc: /* fitoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
+ break;
+ case 0xcd: /* fstoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
+ break;
+ case 0xce: /* fdtoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
+ break;
+ case 0xd1: /* fstoi */
+ gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
+ break;
+ case 0xd2: /* fdtoi */
+ gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
+ break;
+ case 0xd3: /* fqtoi */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
+ break;
+#ifdef TARGET_SPARC64
+ case 0x2: /* V9 fmovd */
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ gen_store_fpr_D(dc, rd, cpu_src1_64);
+ break;
+ case 0x3: /* V9 fmovq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_move_Q(dc, rd, rs2);
+ break;
+ case 0x6: /* V9 fnegd */
+ gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
+ break;
+ case 0x7: /* V9 fnegq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
+ break;
+ case 0xa: /* V9 fabsd */
+ gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
+ break;
+ case 0xb: /* V9 fabsq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
+ break;
+ case 0x81: /* V9 fstox */
+ gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
+ break;
+ case 0x82: /* V9 fdtox */
+ gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
+ break;
+ case 0x83: /* V9 fqtox */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
+ break;
+ case 0x84: /* V9 fxtos */
+ gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
+ break;
+ case 0x88: /* V9 fxtod */
+ gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
+ break;
+ case 0x8c: /* V9 fxtoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop == 0x35) { /* FPU Operations */
+#ifdef TARGET_SPARC64
+ int cond;
+#endif
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_op_clear_ieee_excp_and_FTT();
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ xop = GET_FIELD(insn, 18, 26);
+
+#ifdef TARGET_SPARC64
+#define FMOVR(sz) \
+ do { \
+ DisasCompare cmp; \
+ cond = GET_FIELD_SP(insn, 10, 12); \
+ cpu_src1 = get_src1(dc, insn); \
+ gen_compare_reg(&cmp, cond, cpu_src1); \
+ gen_fmov##sz(dc, &cmp, rd, rs2); \
+ free_compare(&cmp); \
+ } while (0)
+
+ if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
+ FMOVR(s);
+ break;
+ } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
+ FMOVR(d);
+ break;
+ } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVR(q);
+ break;
+ }
+#undef FMOVR
+#endif
+ switch (xop) {
+#ifdef TARGET_SPARC64
+#define FMOVCC(fcc, sz) \
+ do { \
+ DisasCompare cmp; \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_fcompare(&cmp, fcc, cond); \
+ gen_fmov##sz(dc, &cmp, rd, rs2); \
+ free_compare(&cmp); \
+ } while (0)
+
+ case 0x001: /* V9 fmovscc %fcc0 */
+ FMOVCC(0, s);
+ break;
+ case 0x002: /* V9 fmovdcc %fcc0 */
+ FMOVCC(0, d);
+ break;
+ case 0x003: /* V9 fmovqcc %fcc0 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(0, q);
+ break;
+ case 0x041: /* V9 fmovscc %fcc1 */
+ FMOVCC(1, s);
+ break;
+ case 0x042: /* V9 fmovdcc %fcc1 */
+ FMOVCC(1, d);
+ break;
+ case 0x043: /* V9 fmovqcc %fcc1 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(1, q);
+ break;
+ case 0x081: /* V9 fmovscc %fcc2 */
+ FMOVCC(2, s);
+ break;
+ case 0x082: /* V9 fmovdcc %fcc2 */
+ FMOVCC(2, d);
+ break;
+ case 0x083: /* V9 fmovqcc %fcc2 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(2, q);
+ break;
+ case 0x0c1: /* V9 fmovscc %fcc3 */
+ FMOVCC(3, s);
+ break;
+ case 0x0c2: /* V9 fmovdcc %fcc3 */
+ FMOVCC(3, d);
+ break;
+ case 0x0c3: /* V9 fmovqcc %fcc3 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(3, q);
+ break;
+#undef FMOVCC
+#define FMOVCC(xcc, sz) \
+ do { \
+ DisasCompare cmp; \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_compare(&cmp, xcc, cond, dc); \
+ gen_fmov##sz(dc, &cmp, rd, rs2); \
+ free_compare(&cmp); \
+ } while (0)
+
+ case 0x101: /* V9 fmovscc %icc */
+ FMOVCC(0, s);
+ break;
+ case 0x102: /* V9 fmovdcc %icc */
+ FMOVCC(0, d);
+ break;
+ case 0x103: /* V9 fmovqcc %icc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(0, q);
+ break;
+ case 0x181: /* V9 fmovscc %xcc */
+ FMOVCC(1, s);
+ break;
+ case 0x182: /* V9 fmovdcc %xcc */
+ FMOVCC(1, d);
+ break;
+ case 0x183: /* V9 fmovqcc %xcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(1, q);
+ break;
+#undef FMOVCC
+#endif
+ case 0x51: /* fcmps, V9 %fcc */
+ cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+ cpu_src2_32 = gen_load_fpr_F(dc, rs2);
+ gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
+ break;
+ case 0x52: /* fcmpd, V9 %fcc */
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
+ break;
+ case 0x53: /* fcmpq, V9 %fcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_op_fcmpq(rd & 3);
+ break;
+ case 0x55: /* fcmpes, V9 %fcc */
+ cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+ cpu_src2_32 = gen_load_fpr_F(dc, rs2);
+ gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
+ break;
+ case 0x56: /* fcmped, V9 %fcc */
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
+ break;
+ case 0x57: /* fcmpeq, V9 %fcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_op_fcmpeq(rd & 3);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop == 0x2) {
+ TCGv dst = gen_dest_gpr(dc, rd);
+ rs1 = GET_FIELD(insn, 13, 17);
+ if (rs1 == 0) {
+ /* clr/mov shortcut : or %g0, x, y -> mov x, y */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_movi_tl(dst, simm);
+ gen_store_gpr(dc, rd, dst);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 == 0) {
+ tcg_gen_movi_tl(dst, 0);
+ gen_store_gpr(dc, rd, dst);
+ } else {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_store_gpr(dc, rd, cpu_src2);
+ }
+ }
+ } else {
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_ori_tl(dst, cpu_src1, simm);
+ gen_store_gpr(dc, rd, dst);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 == 0) {
+ /* mov shortcut: or x, %g0, y -> mov x, y */
+ gen_store_gpr(dc, rd, cpu_src1);
+ } else {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, dst);
+ }
+ }
+ }
+#ifdef TARGET_SPARC64
+ } else if (xop == 0x25) { /* sll, V9 sllx */
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ }
+ tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ } else if (xop == 0x26) { /* srl, V9 srlx */
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
+ }
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ } else if (xop == 0x27) { /* sra, V9 srax */
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
+ tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
+ tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
+ }
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+#endif
+ } else if (xop < 0x36) {
+ if (xop < 0x20) {
+ cpu_src1 = get_src1(dc, insn);
+ cpu_src2 = get_src2(dc, insn);
+ switch (xop & ~0x10) {
+ case 0x0: /* add */
+ if (xop & 0x10) {
+ gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ } else {
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+ case 0x1: /* and */
+ tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x2: /* or */
+ tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x3: /* xor */
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x4: /* sub */
+ if (xop & 0x10) {
+ gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+ dc->cc_op = CC_OP_SUB;
+ } else {
+ tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+ case 0x5: /* andn */
+ tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x6: /* orn */
+ tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x7: /* xorn */
+ tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x8: /* addx, V9 addc */
+ gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+ (xop & 0x10));
+ break;
+#ifdef TARGET_SPARC64
+ case 0x9: /* V9 mulx */
+ tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
+ break;
+#endif
+ case 0xa: /* umul */
+ CHECK_IU_FEATURE(dc, MUL);
+ gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0xb: /* smul */
+ CHECK_IU_FEATURE(dc, MUL);
+ gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0xc: /* subx, V9 subc */
+ gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+ (xop & 0x10));
+ break;
+#ifdef TARGET_SPARC64
+ case 0xd: /* V9 udivx */
+ gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+ break;
+#endif
+ case 0xe: /* udiv */
+ CHECK_IU_FEATURE(dc, DIV);
+ if (xop & 0x10) {
+ gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ dc->cc_op = CC_OP_DIV;
+ } else {
+ gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ }
+ break;
+ case 0xf: /* sdiv */
+ CHECK_IU_FEATURE(dc, DIV);
+ if (xop & 0x10) {
+ gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ dc->cc_op = CC_OP_DIV;
+ } else {
+ gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ }
+ break;
+ default:
+ goto illegal_insn;
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ } else {
+ cpu_src1 = get_src1(dc, insn);
+ cpu_src2 = get_src2(dc, insn);
+ switch (xop) {
+ case 0x20: /* taddcc */
+ gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
+ dc->cc_op = CC_OP_TADD;
+ break;
+ case 0x21: /* tsubcc */
+ gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
+ dc->cc_op = CC_OP_TSUB;
+ break;
+ case 0x22: /* taddcctv */
+ gen_helper_taddcctv(cpu_dst, cpu_env,
+ cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ dc->cc_op = CC_OP_TADDTV;
+ break;
+ case 0x23: /* tsubcctv */
+ gen_helper_tsubcctv(cpu_dst, cpu_env,
+ cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ dc->cc_op = CC_OP_TSUBTV;
+ break;
+ case 0x24: /* mulscc */
+ update_psr(dc);
+ gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ break;
+#ifndef TARGET_SPARC64
+ case 0x25: /* sll */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x26: /* srl */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x27: /* sra */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+#endif
+ case 0x30:
+ {
+ cpu_tmp0 = get_temp_tl(dc);
+ switch(rd) {
+ case 0: /* wry */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
+ break;
+#ifndef TARGET_SPARC64
+ case 0x01 ... 0x0f: /* undefined in the
+ SPARCv8 manual, nop
+ on the microSPARC
+ II */
+ case 0x10 ... 0x1f: /* implementation-dependent
+ in the SPARCv8
+ manual, nop on the
+ microSPARC II */
+ if ((rd == 0x13) && (dc->def->features &
+ CPU_FEATURE_POWERDOWN)) {
+ /* LEON3 power-down */
+ save_state(dc);
+ gen_helper_power_down(cpu_env);
+ }
+ break;
+#else
+ case 0x2: /* V9 wrccr */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_wrccr(cpu_env, cpu_tmp0);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ break;
+ case 0x3: /* V9 wrasi */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, asi));
+ /* End TB to notice changed ASI. */
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(NULL, 0);
+ dc->base.is_jmp = DISAS_NORETURN;
+ break;
+ case 0x6: /* V9 wrfprs */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
+ dc->fprs_dirty = 0;
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(NULL, 0);
+ dc->base.is_jmp = DISAS_NORETURN;
+ break;
+ case 0xf: /* V9 sir, nop if user */
+#if !defined(CONFIG_USER_ONLY)
+ if (supervisor(dc)) {
+ ; // XXX
+ }
+#endif
+ break;
+ case 0x13: /* Graphics Status */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
+ break;
+ case 0x14: /* Softint set */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_set_softint(cpu_env, cpu_tmp0);
+ break;
+ case 0x15: /* Softint clear */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_clear_softint(cpu_env, cpu_tmp0);
+ break;
+ case 0x16: /* Softint write */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_write_softint(cpu_env, cpu_tmp0);
+ break;
+ case 0x17: /* Tick compare */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ if (tb_cflags(dc->base.tb) &
+ CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_tick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case 0x18: /* System tick */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, stick));
+ if (tb_cflags(dc->base.tb) &
+ CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_set_count(r_tickptr,
+ cpu_tmp0);
+ tcg_temp_free_ptr(r_tickptr);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case 0x19: /* System tick compare */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, stick));
+ if (tb_cflags(dc->base.tb) &
+ CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_stick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+
+ case 0x10: /* Performance Control */
+ case 0x11: /* Performance Instrumentation
+ Counter */
+ case 0x12: /* Dispatch Control */
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case 0x31: /* wrpsr, V9 saved, restored */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+#ifdef TARGET_SPARC64
+ switch (rd) {
+ case 0:
+ gen_helper_saved(cpu_env);
+ break;
+ case 1:
+ gen_helper_restored(cpu_env);
+ break;
+ case 2: /* UA2005 allclean */
+ case 3: /* UA2005 otherw */
+ case 4: /* UA2005 normalw */
+ case 5: /* UA2005 invalw */
+ // XXX
+ default:
+ goto illegal_insn;
+ }
+#else
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_wrpsr(cpu_env, cpu_tmp0);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(NULL, 0);
+ dc->base.is_jmp = DISAS_NORETURN;
+#endif
+ }
+ break;
+ case 0x32: /* wrwim, V9 wrpr */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+#ifdef TARGET_SPARC64
+ switch (rd) {
+ case 0: // tpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 1: // tnpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tnpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 2: // tstate
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state,
+ tstate));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 3: // tt
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tt));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 4: // tick
+ {
+ TCGv_ptr r_tickptr;
+
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ if (tb_cflags(dc->base.tb) &
+ CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_set_count(r_tickptr,
+ cpu_tmp0);
+ tcg_temp_free_ptr(r_tickptr);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case 5: // tba
+ tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
+ break;
+ case 6: // pstate
+ save_state(dc);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_wrpstate(cpu_env, cpu_tmp0);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ /* I/O ops in icount mode must end the TB */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 7: // tl
+ save_state(dc);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, tl));
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 8: // pil
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_wrpil(cpu_env, cpu_tmp0);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ /* I/O ops in icount mode must end the TB */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case 9: // cwp
+ gen_helper_wrcwp(cpu_env, cpu_tmp0);
+ break;
+ case 10: // cansave
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ cansave));
+ break;
+ case 11: // canrestore
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ canrestore));
+ break;
+ case 12: // cleanwin
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ cleanwin));
+ break;
+ case 13: // otherwin
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ otherwin));
+ break;
+ case 14: // wstate
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ wstate));
+ break;
+ case 16: // UA2005 gl
+ CHECK_IU_FEATURE(dc, GL);
+ gen_helper_wrgl(cpu_env, cpu_tmp0);
+ break;
+ case 26: // UA2005 strand status
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
+ break;
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
+ if (dc->def->nwindows != 32) {
+ tcg_gen_andi_tl(cpu_wim, cpu_wim,
+ (1 << dc->def->nwindows) - 1);
+ }
+#endif
+ }
+ break;
+ case 0x33: /* wrtbr, UA2005 wrhpr */
+ {
+#ifndef TARGET_SPARC64
+ if (!supervisor(dc))
+ goto priv_insn;
+ tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
+#else
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ switch (rd) {
+ case 0: // hpstate
+ tcg_gen_st_i64(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ hpstate));
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(NULL, 0);
+ dc->base.is_jmp = DISAS_NORETURN;
+ break;
+ case 1: // htstate
+ // XXX gen_op_wrhtstate();
+ break;
+ case 3: // hintp
+ tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
+ break;
+ case 5: // htba
+ tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
+ break;
+ case 31: // hstick_cmpr
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, hstick));
+ if (tb_cflags(dc->base.tb) &
+ CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_hstick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ /* End TB to handle timer interrupt */
+ dc->base.is_jmp = DISAS_EXIT;
+ }
+ break;
+ case 6: // hver readonly
+ default:
+ goto illegal_insn;
+ }
+#endif
+ }
+ break;
+#endif
+#ifdef TARGET_SPARC64
+ case 0x2c: /* V9 movcc */
+ {
+ int cc = GET_FIELD_SP(insn, 11, 12);
+ int cond = GET_FIELD_SP(insn, 14, 17);
+ DisasCompare cmp;
+ TCGv dst;
+
+ if (insn & (1 << 18)) {
+ if (cc == 0) {
+ gen_compare(&cmp, 0, cond, dc);
+ } else if (cc == 2) {
+ gen_compare(&cmp, 1, cond, dc);
+ } else {
+ goto illegal_insn;
+ }
+ } else {
+ gen_fcompare(&cmp, cc, cond);
+ }
+
+ /* The get_src2 above loaded the normal 13-bit
+ immediate field, not the 11-bit field we have
+ in movcc. But it did handle the reg case. */
+ if (IS_IMM) {
+ simm = GET_FIELD_SPs(insn, 0, 10);
+ tcg_gen_movi_tl(cpu_src2, simm);
+ }
+
+ dst = gen_load_gpr(dc, rd);
+ tcg_gen_movcond_tl(cmp.cond, dst,
+ cmp.c1, cmp.c2,
+ cpu_src2, dst);
+ free_compare(&cmp);
+ gen_store_gpr(dc, rd, dst);
+ break;
+ }
+ case 0x2d: /* V9 sdivx */
+ gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x2e: /* V9 popc */
+ tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x2f: /* V9 movr */
+ {
+ int cond = GET_FIELD_SP(insn, 10, 12);
+ DisasCompare cmp;
+ TCGv dst;
+
+ gen_compare_reg(&cmp, cond, cpu_src1);
+
+ /* The get_src2 above loaded the normal 13-bit
+ immediate field, not the 10-bit field we have
+ in movr. But it did handle the reg case. */
+ if (IS_IMM) {
+ simm = GET_FIELD_SPs(insn, 0, 9);
+ tcg_gen_movi_tl(cpu_src2, simm);
+ }
+
+ dst = gen_load_gpr(dc, rd);
+ tcg_gen_movcond_tl(cmp.cond, dst,
+ cmp.c1, cmp.c2,
+ cpu_src2, dst);
+ free_compare(&cmp);
+ gen_store_gpr(dc, rd, dst);
+ break;
+ }
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
+#ifdef TARGET_SPARC64
+ int opf = GET_FIELD_SP(insn, 5, 13);
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+
+ switch (opf) {
+ case 0x000: /* VIS I edge8cc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x001: /* VIS II edge8n */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x002: /* VIS I edge8lcc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x003: /* VIS II edge8ln */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x004: /* VIS I edge16cc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x005: /* VIS II edge16n */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x006: /* VIS I edge16lcc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x007: /* VIS II edge16ln */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x008: /* VIS I edge32cc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x009: /* VIS II edge32n */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x00a: /* VIS I edge32lcc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x00b: /* VIS II edge32ln */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x010: /* VIS I array8 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x012: /* VIS I array16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x014: /* VIS I array32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x018: /* VIS I alignaddr */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x01a: /* VIS I alignaddrl */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x019: /* VIS II bmask */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x020: /* VIS I fcmple16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x022: /* VIS I fcmpne16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x024: /* VIS I fcmple32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x026: /* VIS I fcmpne32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x028: /* VIS I fcmpgt16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x02a: /* VIS I fcmpeq16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x02c: /* VIS I fcmpgt32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x02e: /* VIS I fcmpeq32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x031: /* VIS I fmul8x16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
+ break;
+ case 0x033: /* VIS I fmul8x16au */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
+ break;
+ case 0x035: /* VIS I fmul8x16al */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
+ break;
+ case 0x036: /* VIS I fmul8sux16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
+ break;
+ case 0x037: /* VIS I fmul8ulx16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
+ break;
+ case 0x038: /* VIS I fmuld8sux16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
+ break;
+ case 0x039: /* VIS I fmuld8ulx16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
+ break;
+ case 0x03a: /* VIS I fpack32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
+ break;
+ case 0x03b: /* VIS I fpack16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x03d: /* VIS I fpackfix */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x03e: /* VIS I pdist */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
+ break;
+ case 0x048: /* VIS I faligndata */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
+ break;
+ case 0x04b: /* VIS I fpmerge */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
+ break;
+ case 0x04c: /* VIS II bshuffle */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
+ break;
+ case 0x04d: /* VIS I fexpand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
+ break;
+ case 0x050: /* VIS I fpadd16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
+ break;
+ case 0x051: /* VIS I fpadd16s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
+ break;
+ case 0x052: /* VIS I fpadd32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
+ break;
+ case 0x053: /* VIS I fpadd32s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
+ break;
+ case 0x054: /* VIS I fpsub16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
+ break;
+ case 0x055: /* VIS I fpsub16s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
+ break;
+ case 0x056: /* VIS I fpsub32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
+ break;
+ case 0x057: /* VIS I fpsub32s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
+ break;
+ case 0x060: /* VIS I fzero */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+ tcg_gen_movi_i64(cpu_dst_64, 0);
+ gen_store_fpr_D(dc, rd, cpu_dst_64);
+ break;
+ case 0x061: /* VIS I fzeros */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ tcg_gen_movi_i32(cpu_dst_32, 0);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x062: /* VIS I fnor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
+ break;
+ case 0x063: /* VIS I fnors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
+ break;
+ case 0x064: /* VIS I fandnot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
+ break;
+ case 0x065: /* VIS I fandnot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
+ break;
+ case 0x066: /* VIS I fnot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
+ break;
+ case 0x067: /* VIS I fnot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
+ break;
+ case 0x068: /* VIS I fandnot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
+ break;
+ case 0x069: /* VIS I fandnot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
+ break;
+ case 0x06a: /* VIS I fnot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
+ break;
+ case 0x06b: /* VIS I fnot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
+ break;
+ case 0x06c: /* VIS I fxor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
+ break;
+ case 0x06d: /* VIS I fxors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
+ break;
+ case 0x06e: /* VIS I fnand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
+ break;
+ case 0x06f: /* VIS I fnands */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
+ break;
+ case 0x070: /* VIS I fand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
+ break;
+ case 0x071: /* VIS I fands */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
+ break;
+ case 0x072: /* VIS I fxnor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
+ break;
+ case 0x073: /* VIS I fxnors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
+ break;
+ case 0x074: /* VIS I fsrc1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ gen_store_fpr_D(dc, rd, cpu_src1_64);
+ break;
+ case 0x075: /* VIS I fsrc1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+ gen_store_fpr_F(dc, rd, cpu_src1_32);
+ break;
+ case 0x076: /* VIS I fornot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
+ break;
+ case 0x077: /* VIS I fornot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
+ break;
+ case 0x078: /* VIS I fsrc2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ gen_store_fpr_D(dc, rd, cpu_src1_64);
+ break;
+ case 0x079: /* VIS I fsrc2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_32 = gen_load_fpr_F(dc, rs2);
+ gen_store_fpr_F(dc, rd, cpu_src1_32);
+ break;
+ case 0x07a: /* VIS I fornot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
+ break;
+ case 0x07b: /* VIS I fornot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
+ break;
+ case 0x07c: /* VIS I for */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
+ break;
+ case 0x07d: /* VIS I fors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
+ break;
+ case 0x07e: /* VIS I fone */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+ tcg_gen_movi_i64(cpu_dst_64, -1);
+ gen_store_fpr_D(dc, rd, cpu_dst_64);
+ break;
+ case 0x07f: /* VIS I fones */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ tcg_gen_movi_i32(cpu_dst_32, -1);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x080: /* VIS I shutdown */
+ case 0x081: /* VIS II siam */
+ // XXX
+ goto illegal_insn;
+ default:
+ goto illegal_insn;
+ }
+#else
+ goto ncp_insn;
+#endif
+ } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
+#ifdef TARGET_SPARC64
+ goto illegal_insn;
+#else
+ goto ncp_insn;
+#endif
+#ifdef TARGET_SPARC64
+ } else if (xop == 0x39) { /* V9 return */
+ save_state(dc);
+ cpu_src1 = get_src1(dc, insn);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2) {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ } else {
+ tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
+ }
+ }
+ gen_helper_restore(cpu_env);
+ gen_mov_pc_npc(dc);
+ gen_check_align(cpu_tmp0, 3);
+ tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ goto jmp_insn;
+#endif
+ } else {
+ cpu_src1 = get_src1(dc, insn);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2) {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ } else {
+ tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
+ }
+ }
+ switch (xop) {
+ case 0x38: /* jmpl */
+ {
+ TCGv t = gen_dest_gpr(dc, rd);
+ tcg_gen_movi_tl(t, dc->pc);
+ gen_store_gpr(dc, rd, t);
+
+ gen_mov_pc_npc(dc);
+ gen_check_align(cpu_tmp0, 3);
+ gen_address_mask(dc, cpu_tmp0);
+ tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ }
+ goto jmp_insn;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ case 0x39: /* rett, V9 return */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_mov_pc_npc(dc);
+ gen_check_align(cpu_tmp0, 3);
+ tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ gen_helper_rett(cpu_env);
+ }
+ goto jmp_insn;
+#endif
+ case 0x3b: /* flush */
+ if (!((dc)->def->features & CPU_FEATURE_FLUSH))
+ goto unimp_flush;
+ /* nop */
+ break;
+ case 0x3c: /* save */
+ gen_helper_save(cpu_env);
+ gen_store_gpr(dc, rd, cpu_tmp0);
+ break;
+ case 0x3d: /* restore */
+ gen_helper_restore(cpu_env);
+ gen_store_gpr(dc, rd, cpu_tmp0);
+ break;
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
+ case 0x3e: /* V9 done/retry */
+ {
+ switch (rd) {
+ case 0:
+ if (!supervisor(dc))
+ goto priv_insn;
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_done(cpu_env);
+ goto jmp_insn;
+ case 1:
+ if (!supervisor(dc))
+ goto priv_insn;
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_retry(cpu_env);
+ goto jmp_insn;
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+ }
+ break;
+ case 3: /* load/store instructions */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 12);
+ /* ??? gen_address_mask prevents us from using a source
+ register directly. Always generate a temporary. */
+ TCGv cpu_addr = get_temp_tl(dc);
+
+ tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
+ if (xop == 0x3c || xop == 0x3e) {
+ /* V9 casa/casxa : no offset */
+ } else if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ if (simm != 0) {
+ tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 != 0) {
+ tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
+ }
+ }
+ if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
+ (xop > 0x17 && xop <= 0x1d ) ||
+ (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
+ TCGv cpu_val = gen_dest_gpr(dc, rd);
+
+ switch (xop) {
+ case 0x0: /* ld, V9 lduw, load unsigned word */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x1: /* ldub, load unsigned byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x2: /* lduh, load unsigned halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x3: /* ldd, load double word */
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ TCGv_i64 t64;
+
+ gen_address_mask(dc, cpu_addr);
+ t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
+ tcg_gen_trunc_i64_tl(cpu_val, t64);
+ tcg_gen_ext32u_tl(cpu_val, cpu_val);
+ gen_store_gpr(dc, rd + 1, cpu_val);
+ tcg_gen_shri_i64(t64, t64, 32);
+ tcg_gen_trunc_i64_tl(cpu_val, t64);
+ tcg_temp_free_i64(t64);
+ tcg_gen_ext32u_tl(cpu_val, cpu_val);
+ }
+ break;
+ case 0x9: /* ldsb, load signed byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0xa: /* ldsh, load signed halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0xd: /* ldstub */
+ gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x0f:
+ /* swap, swap register with memory. Also atomically */
+ CHECK_IU_FEATURE(dc, SWAP);
+ cpu_src1 = gen_load_gpr(dc, rd);
+ gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x10: /* lda, V9 lduwa, load word alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
+ break;
+ case 0x11: /* lduba, load unsigned byte alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
+ break;
+ case 0x12: /* lduha, load unsigned halfword alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
+ break;
+ case 0x13: /* ldda, load double word alternate */
+ if (rd & 1) {
+ goto illegal_insn;
+ }
+ gen_ldda_asi(dc, cpu_addr, insn, rd);
+ goto skip_move;
+ case 0x19: /* ldsba, load signed byte alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
+ break;
+ case 0x1a: /* ldsha, load signed halfword alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
+ break;
+ case 0x1d: /* ldstuba -- XXX: should be atomically */
+ gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
+ break;
+ case 0x1f: /* swapa, swap reg with alt. memory. Also
+ atomically */
+ CHECK_IU_FEATURE(dc, SWAP);
+ cpu_src1 = gen_load_gpr(dc, rd);
+ gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
+ break;
+
+#ifndef TARGET_SPARC64
+ case 0x30: /* ldc */
+ case 0x31: /* ldcsr */
+ case 0x33: /* lddc */
+ goto ncp_insn;
+#endif
+#endif
+#ifdef TARGET_SPARC64
+ case 0x08: /* V9 ldsw */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x0b: /* V9 ldx */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x18: /* V9 ldswa */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
+ break;
+ case 0x1b: /* V9 ldxa */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ break;
+ case 0x2d: /* V9 prefetch, no effect */
+ goto skip_move;
+ case 0x30: /* V9 ldfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
+ gen_update_fprs_dirty(dc, rd);
+ goto skip_move;
+ case 0x33: /* V9 lddfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
+ gen_update_fprs_dirty(dc, DFPREG(rd));
+ goto skip_move;
+ case 0x3d: /* V9 prefetcha, no effect */
+ goto skip_move;
+ case 0x32: /* V9 ldqfa */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+ goto skip_move;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ gen_store_gpr(dc, rd, cpu_val);
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ skip_move: ;
+#endif
+ } else if (xop >= 0x20 && xop < 0x24) {
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ switch (xop) {
+ case 0x20: /* ldf, load fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x21: /* ldfsr, V9 ldxfsr */
+#ifdef TARGET_SPARC64
+ gen_address_mask(dc, cpu_addr);
+ if (rd == 1) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t64, cpu_addr,
+ dc->mem_idx, MO_TEQ);
+ gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
+ tcg_temp_free_i64(t64);
+ break;
+ }
+#endif
+ cpu_dst_32 = get_temp_i32(dc);
+ tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
+ break;
+ case 0x22: /* ldqf, load quad fpreg */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_address_mask(dc, cpu_addr);
+ cpu_src1_64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
+ cpu_src2_64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
+ tcg_temp_free_i64(cpu_src1_64);
+ tcg_temp_free_i64(cpu_src2_64);
+ break;
+ case 0x23: /* lddf, load double fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+ tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ gen_store_fpr_D(dc, rd, cpu_dst_64);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
+ xop == 0xe || xop == 0x1e) {
+ TCGv cpu_val = gen_load_gpr(dc, rd);
+
+ switch (xop) {
+ case 0x4: /* st, store word */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x5: /* stb, store byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x6: /* sth, store halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x7: /* std, store double word */
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ TCGv_i64 t64;
+ TCGv lo;
+
+ gen_address_mask(dc, cpu_addr);
+ lo = gen_load_gpr(dc, rd + 1);
+ t64 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t64, lo, cpu_val);
+ tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x14: /* sta, V9 stwa, store word alternate */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
+ break;
+ case 0x15: /* stba, store byte alternate */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
+ break;
+ case 0x16: /* stha, store halfword alternate */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
+ break;
+ case 0x17: /* stda, store double word alternate */
+ if (rd & 1) {
+ goto illegal_insn;
+ }
+ gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
+ break;
+#endif
+#ifdef TARGET_SPARC64
+ case 0x0e: /* V9 stx */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x1e: /* V9 stxa */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop > 0x23 && xop < 0x28) {
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ switch (xop) {
+ case 0x24: /* stf, store fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_src1_32 = gen_load_fpr_F(dc, rd);
+ tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ break;
+ case 0x25: /* stfsr, V9 stxfsr */
+ {
+#ifdef TARGET_SPARC64
+ gen_address_mask(dc, cpu_addr);
+ if (rd == 1) {
+ tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
+ break;
+ }
+#endif
+ tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
+ }
+ break;
+ case 0x26:
+#ifdef TARGET_SPARC64
+ /* V9 stqf, store quad fpreg */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_address_mask(dc, cpu_addr);
+ /* ??? While stqf only requires 4-byte alignment, it is
+ legal for the cpu to signal the unaligned exception.
+ The OS trap handler is then required to fix it up.
+ For qemu, this avoids having to probe the second page
+ before performing the first write. */
+ cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
+ tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
+ dc->mem_idx, MO_TEQ | MO_ALIGN_16);
+ tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
+ cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
+ tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
+ dc->mem_idx, MO_TEQ);
+ break;
+#else /* !TARGET_SPARC64 */
+ /* stdfq, store floating point queue */
+#if defined(CONFIG_USER_ONLY)
+ goto illegal_insn;
+#else
+ if (!supervisor(dc))
+ goto priv_insn;
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ goto nfq_insn;
+#endif
+#endif
+ case 0x27: /* stdf, store double fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_src1_64 = gen_load_fpr_D(dc, rd);
+ tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop > 0x33 && xop < 0x3f) {
+ switch (xop) {
+#ifdef TARGET_SPARC64
+ case 0x34: /* V9 stfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_stf_asi(dc, cpu_addr, insn, 4, rd);
+ break;
+ case 0x36: /* V9 stqfa */
+ {
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
+ }
+ break;
+ case 0x37: /* V9 stdfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
+ break;
+ case 0x3e: /* V9 casxa */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
+ break;
+#else
+ case 0x34: /* stc */
+ case 0x35: /* stcsr */
+ case 0x36: /* stdcq */
+ case 0x37: /* stdc */
+ goto ncp_insn;
+#endif
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x3c: /* V9 or LEON3 casa */
+#ifndef TARGET_SPARC64
+ CHECK_IU_FEATURE(dc, CASA);
+#endif
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else {
+ goto illegal_insn;
+ }
+ }
+ break;
+ }
+ /* default case for non jump instructions */
+ if (dc->npc == DYNAMIC_PC) {
+ dc->pc = DYNAMIC_PC;
+ gen_op_next_insn();
+ } else if (dc->npc == JUMP_PC) {
+ /* we can do a static jump */
+ gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+ dc->base.is_jmp = DISAS_NORETURN;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->npc + 4;
+ }
+ jmp_insn:
+ goto egress;
+ illegal_insn:
+ gen_exception(dc, TT_ILL_INSN);
+ goto egress;
+ unimp_flush:
+ gen_exception(dc, TT_UNIMP_FLUSH);
+ goto egress;
+#if !defined(CONFIG_USER_ONLY)
+ priv_insn:
+ gen_exception(dc, TT_PRIV_INSN);
+ goto egress;
+#endif
+ nfpu_insn:
+ gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
+ goto egress;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ nfq_insn:
+ gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
+ goto egress;
+#endif
+#ifndef TARGET_SPARC64
+ ncp_insn:
+ gen_exception(dc, TT_NCP_INSN);
+ goto egress;
+#endif
+ egress:
+ if (dc->n_t32 != 0) {
+ int i;
+ for (i = dc->n_t32 - 1; i >= 0; --i) {
+ tcg_temp_free_i32(dc->t32[i]);
+ }
+ dc->n_t32 = 0;
+ }
+ if (dc->n_ttl != 0) {
+ int i;
+ for (i = dc->n_ttl - 1; i >= 0; --i) {
+ tcg_temp_free(dc->ttl[i]);
+ }
+ dc->n_ttl = 0;
+ }
+}
+
+static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUSPARCState *env = cs->env_ptr;
+ int bound;
+
+ dc->pc = dc->base.pc_first;
+ dc->npc = (target_ulong)dc->base.tb->cs_base;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
+ dc->def = &env->def;
+ dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
+ dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
+#ifndef CONFIG_USER_ONLY
+ dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
+#endif
+#ifdef TARGET_SPARC64
+ dc->fprs_dirty = 0;
+ dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
+#ifndef CONFIG_USER_ONLY
+ dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
+#endif
+#endif
+ /*
+ * if we reach a page boundary, we stop generation so that the
+ * PC of a TT_TFAULT exception is always in the right page
+ */
+ bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
+ dc->base.max_insns = MIN(dc->base.max_insns, bound);
+}
+
+static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
+{
+}
+
+static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (dc->npc & JUMP_PC) {
+ assert(dc->jump_pc[1] == dc->pc + 4);
+ tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
+ } else {
+ tcg_gen_insn_start(dc->pc, dc->npc);
+ }
+}
+
+static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUSPARCState *env = cs->env_ptr;
+ unsigned int insn;
+
+ insn = translator_ldl(env, &dc->base, dc->pc);
+ dc->base.pc_next += 4;
+ disas_sparc_insn(dc, insn);
+
+ if (dc->base.is_jmp == DISAS_NORETURN) {
+ return;
+ }
+ if (dc->pc != dc->base.pc_next) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ switch (dc->base.is_jmp) {
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ if (dc->pc != DYNAMIC_PC &&
+ (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
+ /* static PC and NPC: we can use direct chaining */
+ gen_goto_tb(dc, 0, dc->pc, dc->npc);
+ } else {
+ if (dc->pc != DYNAMIC_PC) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ save_npc(dc);
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ break;
+
+ case DISAS_NORETURN:
+ break;
+
+ case DISAS_EXIT:
+ /* Exit TB */
+ save_state(dc);
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void sparc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps sparc_tr_ops = {
+ .init_disas_context = sparc_tr_init_disas_context,
+ .tb_start = sparc_tr_tb_start,
+ .insn_start = sparc_tr_insn_start,
+ .translate_insn = sparc_tr_translate_insn,
+ .tb_stop = sparc_tr_tb_stop,
+ .disas_log = sparc_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc = {};
+
+ translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
+}
+
+void sparc_tcg_init(void)
+{
+ static const char gregnames[32][4] = {
+ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
+ "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
+ "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
+ "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
+ };
+ static const char fregnames[32][4] = {
+ "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
+ "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
+ "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
+ "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
+ };
+
+ static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
+#ifdef TARGET_SPARC64
+ { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
+ { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
+#else
+ { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
+#endif
+ { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
+ { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
+ };
+
+ static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
+#ifdef TARGET_SPARC64
+ { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
+ { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
+ { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
+ { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
+ "hstick_cmpr" },
+ { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
+ { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
+ { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
+ { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
+ { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
+#endif
+ { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
+ { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
+ { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
+ { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
+ { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
+ { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
+ { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
+ { &cpu_y, offsetof(CPUSPARCState, y), "y" },
+#ifndef CONFIG_USER_ONLY
+ { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
+#endif
+ };
+
+ unsigned int i;
+
+ cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
+ offsetof(CPUSPARCState, regwptr),
+ "regwptr");
+
+ for (i = 0; i < ARRAY_SIZE(r32); ++i) {
+ *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
+ *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
+ }
+
+ cpu_regs[0] = NULL;
+ for (i = 1; i < 8; ++i) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUSPARCState, gregs[i]),
+ gregnames[i]);
+ }
+
+ for (i = 8; i < 32; ++i) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
+ (i - 8) * sizeof(target_ulong),
+ gregnames[i]);
+ }
+
+ for (i = 0; i < TARGET_DPREGS; i++) {
+ cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUSPARCState, fpr[i]),
+ fregnames[i]);
+ }
+}
+
+void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ target_ulong pc = data[0];
+ target_ulong npc = data[1];
+
+ env->pc = pc;
+ if (npc == DYNAMIC_PC) {
+ /* dynamic NPC: already stored */
+ } else if (npc & JUMP_PC) {
+ /* jump PC: use 'cond' and the jump targets of the translation */
+ if (env->cond) {
+ env->npc = npc & ~3;
+ } else {
+ env->npc = pc + 4;
+ }
+ } else {
+ env->npc = npc;
+ }
+}
diff --git a/target/sparc/vis_helper.c b/target/sparc/vis_helper.c
new file mode 100644
index 000000000..f917e5992
--- /dev/null
+++ b/target/sparc/vis_helper.c
@@ -0,0 +1,490 @@
+/*
+ * VIS op helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/* This function uses non-native bit order */
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
+
+/* This function uses the order in the manuals, i.e. bit 0 is 2^0 */
+#define GET_FIELD_SP(X, FROM, TO) \
+ GET_FIELD(X, 63 - (TO), 63 - (FROM))
+
+target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
+{
+ return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
+ (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
+ (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
+ (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
+ (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
+ (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
+ (((pixel_addr >> 55) & 1) << 4) |
+ (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
+ GET_FIELD_SP(pixel_addr, 11, 12);
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define VIS_B64(n) b[7 - (n)]
+#define VIS_W64(n) w[3 - (n)]
+#define VIS_SW64(n) sw[3 - (n)]
+#define VIS_L64(n) l[1 - (n)]
+#define VIS_B32(n) b[3 - (n)]
+#define VIS_W32(n) w[1 - (n)]
+#else
+#define VIS_B64(n) b[n]
+#define VIS_W64(n) w[n]
+#define VIS_SW64(n) sw[n]
+#define VIS_L64(n) l[n]
+#define VIS_B32(n) b[n]
+#define VIS_W32(n) w[n]
+#endif
+
+typedef union {
+ uint8_t b[8];
+ uint16_t w[4];
+ int16_t sw[4];
+ uint32_t l[2];
+ uint64_t ll;
+ float64 d;
+} VIS64;
+
+typedef union {
+ uint8_t b[4];
+ uint16_t w[2];
+ uint32_t l;
+ float32 f;
+} VIS32;
+
+uint64_t helper_fpmerge(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+
+ s.ll = src1;
+ d.ll = src2;
+
+ /* Reverse calculation order to handle overlap */
+ d.VIS_B64(7) = s.VIS_B64(3);
+ d.VIS_B64(6) = d.VIS_B64(3);
+ d.VIS_B64(5) = s.VIS_B64(2);
+ d.VIS_B64(4) = d.VIS_B64(2);
+ d.VIS_B64(3) = s.VIS_B64(1);
+ d.VIS_B64(2) = d.VIS_B64(1);
+ d.VIS_B64(1) = s.VIS_B64(0);
+ /* d.VIS_B64(0) = d.VIS_B64(0); */
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_L64(r) = tmp;
+
+ /* Reverse calculation order to handle overlap */
+ PMUL(1);
+ PMUL(0);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_L64(r) = tmp;
+
+ /* Reverse calculation order to handle overlap */
+ PMUL(1);
+ PMUL(0);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fexpand(uint64_t src1, uint64_t src2)
+{
+ VIS32 s;
+ VIS64 d;
+
+ s.l = (uint32_t)src1;
+ d.ll = src2;
+ d.VIS_W64(0) = s.VIS_B32(0) << 4;
+ d.VIS_W64(1) = s.VIS_B32(1) << 4;
+ d.VIS_W64(2) = s.VIS_B32(2) << 4;
+ d.VIS_W64(3) = s.VIS_B32(3) << 4;
+
+ return d.ll;
+}
+
+#define VIS_HELPER(name, F) \
+ uint64_t name##16(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
+ d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
+ d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
+ d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
+ \
+ return d.ll; \
+ } \
+ \
+ uint32_t name##16s(uint32_t src1, uint32_t src2) \
+ { \
+ VIS32 s, d; \
+ \
+ s.l = src1; \
+ d.l = src2; \
+ \
+ d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
+ d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
+ \
+ return d.l; \
+ } \
+ \
+ uint64_t name##32(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
+ d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
+ \
+ return d.ll; \
+ } \
+ \
+ uint32_t name##32s(uint32_t src1, uint32_t src2) \
+ { \
+ VIS32 s, d; \
+ \
+ s.l = src1; \
+ d.l = src2; \
+ \
+ d.l = F(d.l, s.l); \
+ \
+ return d.l; \
+ }
+
+#define FADD(a, b) ((a) + (b))
+#define FSUB(a, b) ((a) - (b))
+VIS_HELPER(helper_fpadd, FADD)
+VIS_HELPER(helper_fpsub, FSUB)
+
+#define VIS_CMPHELPER(name, F) \
+ uint64_t name##16(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \
+ d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \
+ \
+ return d.ll; \
+ } \
+ \
+ uint64_t name##32(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \
+ d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \
+ d.VIS_L64(1) = 0; \
+ \
+ return d.ll; \
+ }
+
+#define FCMPGT(a, b) ((a) > (b))
+#define FCMPEQ(a, b) ((a) == (b))
+#define FCMPLE(a, b) ((a) <= (b))
+#define FCMPNE(a, b) ((a) != (b))
+
+VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
+VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
+VIS_CMPHELPER(helper_fcmple, FCMPLE)
+VIS_CMPHELPER(helper_fcmpne, FCMPNE)
+
+uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2)
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ int s1, s2;
+
+ s1 = (src1 >> (56 - (i * 8))) & 0xff;
+ s2 = (src2 >> (56 - (i * 8))) & 0xff;
+
+ /* Absolute value of difference. */
+ s1 -= s2;
+ if (s1 < 0) {
+ s1 = -s1;
+ }
+
+ sum += s1;
+ }
+
+ return sum;
+}
+
+uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0xf;
+ uint32_t ret = 0;
+ int byte;
+
+ for (byte = 0; byte < 4; byte++) {
+ uint32_t val;
+ int16_t src = rs2 >> (byte * 16);
+ int32_t scaled = src << scale;
+ int32_t from_fixed = scaled >> 7;
+
+ val = (from_fixed < 0 ? 0 :
+ from_fixed > 255 ? 255 : from_fixed);
+
+ ret |= val << (8 * byte);
+ }
+
+ return ret;
+}
+
+uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0x1f;
+ uint64_t ret = 0;
+ int word;
+
+ ret = (rs1 << 8) & ~(0x000000ff000000ffULL);
+ for (word = 0; word < 2; word++) {
+ uint64_t val;
+ int32_t src = rs2 >> (word * 32);
+ int64_t scaled = (int64_t)src << scale;
+ int64_t from_fixed = scaled >> 23;
+
+ val = (from_fixed < 0 ? 0 :
+ (from_fixed > 255) ? 255 : from_fixed);
+
+ ret |= val << (32 * word);
+ }
+
+ return ret;
+}
+
+uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0x1f;
+ uint32_t ret = 0;
+ int word;
+
+ for (word = 0; word < 2; word++) {
+ uint32_t val;
+ int32_t src = rs2 >> (word * 32);
+ int64_t scaled = (int64_t)src << scale;
+ int64_t from_fixed = scaled >> 16;
+
+ val = (from_fixed < -32768 ? -32768 :
+ from_fixed > 32767 ? 32767 : from_fixed);
+
+ ret |= (val & 0xffff) << (word * 16);
+ }
+
+ return ret;
+}
+
+uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2)
+{
+ union {
+ uint64_t ll[2];
+ uint8_t b[16];
+ } s;
+ VIS64 r;
+ uint32_t i, mask, host;
+
+ /* Set up S such that we can index across all of the bytes. */
+#ifdef HOST_WORDS_BIGENDIAN
+ s.ll[0] = src1;
+ s.ll[1] = src2;
+ host = 0;
+#else
+ s.ll[1] = src1;
+ s.ll[0] = src2;
+ host = 15;
+#endif
+ mask = gsr >> 32;
+
+ for (i = 0; i < 8; ++i) {
+ unsigned e = (mask >> (28 - i*4)) & 0xf;
+ r.VIS_B64(i) = s.b[e ^ host];
+ }
+
+ return r.ll;
+}
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
new file mode 100644
index 000000000..3a7c0ff94
--- /dev/null
+++ b/target/sparc/win_helper.c
@@ -0,0 +1,455 @@
+/*
+ * Helpers for CWP and PSTATE handling
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "trace.h"
+
+static inline void memcpy32(target_ulong *dst, const target_ulong *src)
+{
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+}
+
+void cpu_set_cwp(CPUSPARCState *env, int new_cwp)
+{
+ /* put the modified wrap registers at their proper location */
+ if (env->cwp == env->nwindows - 1) {
+ memcpy32(env->regbase, env->regbase + env->nwindows * 16);
+ }
+ env->cwp = new_cwp;
+
+ /* put the wrap registers at their temporary location */
+ if (new_cwp == env->nwindows - 1) {
+ memcpy32(env->regbase + env->nwindows * 16, env->regbase);
+ }
+ env->regwptr = env->regbase + (new_cwp * 16);
+}
+
+target_ulong cpu_get_psr(CPUSPARCState *env)
+{
+ helper_compute_psr(env);
+
+#if !defined(TARGET_SPARC64)
+ return env->version | (env->psr & PSR_ICC) |
+ (env->psref ? PSR_EF : 0) |
+ (env->psrpil << 8) |
+ (env->psrs ? PSR_S : 0) |
+ (env->psrps ? PSR_PS : 0) |
+ (env->psret ? PSR_ET : 0) | env->cwp;
+#else
+ return env->psr & PSR_ICC;
+#endif
+}
+
+void cpu_put_psr_raw(CPUSPARCState *env, target_ulong val)
+{
+ env->psr = val & PSR_ICC;
+#if !defined(TARGET_SPARC64)
+ env->psref = (val & PSR_EF) ? 1 : 0;
+ env->psrpil = (val & PSR_PIL) >> 8;
+ env->psrs = (val & PSR_S) ? 1 : 0;
+ env->psrps = (val & PSR_PS) ? 1 : 0;
+ env->psret = (val & PSR_ET) ? 1 : 0;
+#endif
+ env->cc_op = CC_OP_FLAGS;
+#if !defined(TARGET_SPARC64)
+ cpu_set_cwp(env, val & PSR_CWP);
+#endif
+}
+
+/* Called with BQL held */
+void cpu_put_psr(CPUSPARCState *env, target_ulong val)
+{
+ cpu_put_psr_raw(env, val);
+#if ((!defined(TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
+ cpu_check_irqs(env);
+#endif
+}
+
+int cpu_cwp_inc(CPUSPARCState *env, int cwp)
+{
+ if (unlikely(cwp >= env->nwindows)) {
+ cwp -= env->nwindows;
+ }
+ return cwp;
+}
+
+int cpu_cwp_dec(CPUSPARCState *env, int cwp)
+{
+ if (unlikely(cwp < 0)) {
+ cwp += env->nwindows;
+ }
+ return cwp;
+}
+
+#ifndef TARGET_SPARC64
+void helper_rett(CPUSPARCState *env)
+{
+ unsigned int cwp;
+
+ if (env->psret == 1) {
+ cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
+ }
+
+ env->psret = 1;
+ cwp = cpu_cwp_inc(env, env->cwp + 1) ;
+ if (env->wim & (1 << cwp)) {
+ cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
+ }
+ cpu_set_cwp(env, cwp);
+ env->psrs = env->psrps;
+}
+
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+ handling ? */
+void helper_save(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
+ if (env->wim & (1 << cwp)) {
+ cpu_raise_exception_ra(env, TT_WIN_OVF, GETPC());
+ }
+ cpu_set_cwp(env, cwp);
+}
+
+void helper_restore(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_inc(env, env->cwp + 1);
+ if (env->wim & (1 << cwp)) {
+ cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
+ }
+ cpu_set_cwp(env, cwp);
+}
+
+void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr)
+{
+ if ((new_psr & PSR_CWP) >= env->nwindows) {
+ cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
+ } else {
+ /* cpu_put_psr may trigger interrupts, hence BQL */
+ qemu_mutex_lock_iothread();
+ cpu_put_psr(env, new_psr);
+ qemu_mutex_unlock_iothread();
+ }
+}
+
+target_ulong helper_rdpsr(CPUSPARCState *env)
+{
+ return cpu_get_psr(env);
+}
+
+#else
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+ handling ? */
+void helper_save(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
+ if (env->cansave == 0) {
+ int tt = TT_SPILL | (env->otherwin != 0
+ ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+ : ((env->wstate & 0x7) << 2));
+ cpu_raise_exception_ra(env, tt, GETPC());
+ } else {
+ if (env->cleanwin - env->canrestore == 0) {
+ /* XXX Clean windows without trap */
+ cpu_raise_exception_ra(env, TT_CLRWIN, GETPC());
+ } else {
+ env->cansave--;
+ env->canrestore++;
+ cpu_set_cwp(env, cwp);
+ }
+ }
+}
+
+void helper_restore(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_inc(env, env->cwp + 1);
+ if (env->canrestore == 0) {
+ int tt = TT_FILL | (env->otherwin != 0
+ ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+ : ((env->wstate & 0x7) << 2));
+ cpu_raise_exception_ra(env, tt, GETPC());
+ } else {
+ env->cansave++;
+ env->canrestore--;
+ cpu_set_cwp(env, cwp);
+ }
+}
+
+void helper_flushw(CPUSPARCState *env)
+{
+ if (env->cansave != env->nwindows - 2) {
+ int tt = TT_SPILL | (env->otherwin != 0
+ ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+ : ((env->wstate & 0x7) << 2));
+ cpu_raise_exception_ra(env, tt, GETPC());
+ }
+}
+
+void helper_saved(CPUSPARCState *env)
+{
+ env->cansave++;
+ if (env->otherwin == 0) {
+ env->canrestore--;
+ } else {
+ env->otherwin--;
+ }
+}
+
+void helper_restored(CPUSPARCState *env)
+{
+ env->canrestore++;
+ if (env->cleanwin < env->nwindows - 1) {
+ env->cleanwin++;
+ }
+ if (env->otherwin == 0) {
+ env->cansave--;
+ } else {
+ env->otherwin--;
+ }
+}
+
+target_ulong cpu_get_ccr(CPUSPARCState *env)
+{
+ target_ulong psr;
+
+ psr = cpu_get_psr(env);
+
+ return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
+}
+
+void cpu_put_ccr(CPUSPARCState *env, target_ulong val)
+{
+ env->xcc = (val >> 4) << 20;
+ env->psr = (val & 0xf) << 20;
+ CC_OP = CC_OP_FLAGS;
+}
+
+target_ulong cpu_get_cwp64(CPUSPARCState *env)
+{
+ return env->nwindows - 1 - env->cwp;
+}
+
+void cpu_put_cwp64(CPUSPARCState *env, int cwp)
+{
+ if (unlikely(cwp >= env->nwindows || cwp < 0)) {
+ cwp %= env->nwindows;
+ }
+ cpu_set_cwp(env, env->nwindows - 1 - cwp);
+}
+
+target_ulong helper_rdccr(CPUSPARCState *env)
+{
+ return cpu_get_ccr(env);
+}
+
+void helper_wrccr(CPUSPARCState *env, target_ulong new_ccr)
+{
+ cpu_put_ccr(env, new_ccr);
+}
+
+/* CWP handling is reversed in V9, but we still use the V8 register
+ order. */
+target_ulong helper_rdcwp(CPUSPARCState *env)
+{
+ return cpu_get_cwp64(env);
+}
+
+void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp)
+{
+ cpu_put_cwp64(env, new_cwp);
+}
+
+static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
+{
+ if (env->def.features & CPU_FEATURE_GL) {
+ return env->glregs + (env->gl & 7) * 8;
+ }
+
+ switch (pstate) {
+ default:
+ trace_win_helper_gregset_error(pstate);
+ /* fall through to normal set of global registers */
+ case 0:
+ return env->bgregs;
+ case PS_AG:
+ return env->agregs;
+ case PS_MG:
+ return env->mgregs;
+ case PS_IG:
+ return env->igregs;
+ }
+}
+
+static inline uint64_t *get_gl_gregset(CPUSPARCState *env, uint32_t gl)
+{
+ return env->glregs + (gl & 7) * 8;
+}
+
+/* Switch global register bank */
+void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl)
+{
+ uint64_t *src, *dst;
+ src = get_gl_gregset(env, new_gl);
+ dst = get_gl_gregset(env, env->gl);
+
+ if (src != dst) {
+ memcpy32(dst, env->gregs);
+ memcpy32(env->gregs, src);
+ }
+}
+
+void helper_wrgl(CPUSPARCState *env, target_ulong new_gl)
+{
+ cpu_gl_switch_gregs(env, new_gl & 7);
+ env->gl = new_gl & 7;
+}
+
+void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
+{
+ uint32_t pstate_regs, new_pstate_regs;
+ uint64_t *src, *dst;
+
+ if (env->def.features & CPU_FEATURE_GL) {
+ /* PS_AG, IG and MG are not implemented in this case */
+ new_pstate &= ~(PS_AG | PS_IG | PS_MG);
+ env->pstate = new_pstate;
+ return;
+ }
+
+ pstate_regs = env->pstate & 0xc01;
+ new_pstate_regs = new_pstate & 0xc01;
+
+ if (new_pstate_regs != pstate_regs) {
+ trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs);
+
+ /* Switch global register bank */
+ src = get_gregset(env, new_pstate_regs);
+ dst = get_gregset(env, pstate_regs);
+ memcpy32(dst, env->gregs);
+ memcpy32(env->gregs, src);
+ } else {
+ trace_win_helper_no_switch_pstate(new_pstate_regs);
+ }
+ env->pstate = new_pstate;
+}
+
+void helper_wrpstate(CPUSPARCState *env, target_ulong new_state)
+{
+ cpu_change_pstate(env, new_state & 0xf3f);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ qemu_mutex_lock_iothread();
+ cpu_check_irqs(env);
+ qemu_mutex_unlock_iothread();
+ }
+#endif
+}
+
+void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
+{
+#if !defined(CONFIG_USER_ONLY)
+ trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil);
+
+ env->psrpil = new_pil;
+
+ if (cpu_interrupts_enabled(env)) {
+ qemu_mutex_lock_iothread();
+ cpu_check_irqs(env);
+ qemu_mutex_unlock_iothread();
+ }
+#endif
+}
+
+void helper_done(CPUSPARCState *env)
+{
+ trap_state *tsptr = cpu_tsptr(env);
+
+ env->pc = tsptr->tnpc;
+ env->npc = tsptr->tnpc + 4;
+ cpu_put_ccr(env, tsptr->tstate >> 32);
+ env->asi = (tsptr->tstate >> 24) & 0xff;
+ cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
+ cpu_put_cwp64(env, tsptr->tstate & 0xff);
+ if (cpu_has_hypervisor(env)) {
+ uint32_t new_gl = (tsptr->tstate >> 40) & 7;
+ env->hpstate = env->htstate[env->tl];
+ cpu_gl_switch_gregs(env, new_gl);
+ env->gl = new_gl;
+ }
+ env->tl--;
+
+ trace_win_helper_done(env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ qemu_mutex_lock_iothread();
+ cpu_check_irqs(env);
+ qemu_mutex_unlock_iothread();
+ }
+#endif
+}
+
+void helper_retry(CPUSPARCState *env)
+{
+ trap_state *tsptr = cpu_tsptr(env);
+
+ env->pc = tsptr->tpc;
+ env->npc = tsptr->tnpc;
+ cpu_put_ccr(env, tsptr->tstate >> 32);
+ env->asi = (tsptr->tstate >> 24) & 0xff;
+ cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
+ cpu_put_cwp64(env, tsptr->tstate & 0xff);
+ if (cpu_has_hypervisor(env)) {
+ uint32_t new_gl = (tsptr->tstate >> 40) & 7;
+ env->hpstate = env->htstate[env->tl];
+ cpu_gl_switch_gregs(env, new_gl);
+ env->gl = new_gl;
+ }
+ env->tl--;
+
+ trace_win_helper_retry(env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ qemu_mutex_lock_iothread();
+ cpu_check_irqs(env);
+ qemu_mutex_unlock_iothread();
+ }
+#endif
+}
+#endif
diff --git a/target/tricore/Kconfig b/target/tricore/Kconfig
new file mode 100644
index 000000000..931340930
--- /dev/null
+++ b/target/tricore/Kconfig
@@ -0,0 +1,2 @@
+config TRICORE
+ bool
diff --git a/target/tricore/cpu-param.h b/target/tricore/cpu-param.h
new file mode 100644
index 000000000..cf5d9af89
--- /dev/null
+++ b/target/tricore/cpu-param.h
@@ -0,0 +1,17 @@
+/*
+ * TriCore cpu parameters for qemu.
+ *
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ * SPDX-License-Identifier: LGPL-2.1+
+ */
+
+#ifndef TRICORE_CPU_PARAM_H
+#define TRICORE_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 14
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define NB_MMU_MODES 3
+
+#endif
diff --git a/target/tricore/cpu-qom.h b/target/tricore/cpu-qom.h
new file mode 100644
index 000000000..59bfd01bb
--- /dev/null
+++ b/target/tricore/cpu-qom.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_TRICORE_CPU_QOM_H
+#define QEMU_TRICORE_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+
+#define TYPE_TRICORE_CPU "tricore-cpu"
+
+OBJECT_DECLARE_TYPE(TriCoreCPU, TriCoreCPUClass,
+ TRICORE_CPU)
+
+struct TriCoreCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+};
+
+
+#endif /* QEMU_TRICORE_CPU_QOM_H */
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
new file mode 100644
index 000000000..b95682b7f
--- /dev/null
+++ b/target/tricore/cpu.c
@@ -0,0 +1,205 @@
+/*
+ * TriCore emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/error-report.h"
+
+static inline void set_feature(CPUTriCoreState *env, int feature)
+{
+ env->features |= 1ULL << feature;
+}
+
+static gchar *tricore_gdb_arch_name(CPUState *cs)
+{
+ return g_strdup("tricore");
+}
+
+static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+
+ env->PC = value & ~(target_ulong)1;
+}
+
+static void tricore_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+
+ env->PC = tb->pc;
+}
+
+static void tricore_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ TriCoreCPU *cpu = TRICORE_CPU(s);
+ TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu);
+ CPUTriCoreState *env = &cpu->env;
+
+ tcc->parent_reset(dev);
+
+ cpu_state_reset(env);
+}
+
+static bool tricore_cpu_has_work(CPUState *cs)
+{
+ return true;
+}
+
+static void tricore_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ TriCoreCPU *cpu = TRICORE_CPU(dev);
+ TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(dev);
+ CPUTriCoreState *env = &cpu->env;
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* Some features automatically imply others */
+ if (tricore_feature(env, TRICORE_FEATURE_161)) {
+ set_feature(env, TRICORE_FEATURE_16);
+ }
+
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ set_feature(env, TRICORE_FEATURE_131);
+ }
+ if (tricore_feature(env, TRICORE_FEATURE_131)) {
+ set_feature(env, TRICORE_FEATURE_13);
+ }
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ tcc->parent_realize(dev, errp);
+}
+
+
+static void tricore_cpu_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ cpu_set_cpustate_pointers(cpu);
+}
+
+static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = g_strdup_printf(TRICORE_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void tc1796_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ set_feature(&cpu->env, TRICORE_FEATURE_13);
+}
+
+static void tc1797_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ set_feature(&cpu->env, TRICORE_FEATURE_131);
+}
+
+static void tc27x_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ set_feature(&cpu->env, TRICORE_FEATURE_161);
+}
+
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps tricore_sysemu_ops = {
+ .get_phys_page_debug = tricore_cpu_get_phys_page_debug,
+};
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps tricore_tcg_ops = {
+ .initialize = tricore_tcg_init,
+ .synchronize_from_tb = tricore_cpu_synchronize_from_tb,
+ .tlb_fill = tricore_cpu_tlb_fill,
+};
+
+static void tricore_cpu_class_init(ObjectClass *c, void *data)
+{
+ TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ device_class_set_parent_realize(dc, tricore_cpu_realizefn,
+ &mcc->parent_realize);
+
+ device_class_set_parent_reset(dc, tricore_cpu_reset, &mcc->parent_reset);
+ cc->class_by_name = tricore_cpu_class_by_name;
+ cc->has_work = tricore_cpu_has_work;
+
+ cc->gdb_read_register = tricore_cpu_gdb_read_register;
+ cc->gdb_write_register = tricore_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 44;
+ cc->gdb_arch_name = tricore_gdb_arch_name;
+
+ cc->dump_state = tricore_cpu_dump_state;
+ cc->set_pc = tricore_cpu_set_pc;
+ cc->sysemu_ops = &tricore_sysemu_ops;
+ cc->tcg_ops = &tricore_tcg_ops;
+}
+
+#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \
+ { \
+ .parent = TYPE_TRICORE_CPU, \
+ .instance_init = initfn, \
+ .name = TRICORE_CPU_TYPE_NAME(cpu_model), \
+ }
+
+static const TypeInfo tricore_cpu_type_infos[] = {
+ {
+ .name = TYPE_TRICORE_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(TriCoreCPU),
+ .instance_init = tricore_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(TriCoreCPUClass),
+ .class_init = tricore_cpu_class_init,
+ },
+ DEFINE_TRICORE_CPU_TYPE("tc1796", tc1796_initfn),
+ DEFINE_TRICORE_CPU_TYPE("tc1797", tc1797_initfn),
+ DEFINE_TRICORE_CPU_TYPE("tc27x", tc27x_initfn),
+};
+
+DEFINE_TYPES(tricore_cpu_type_infos)
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
new file mode 100644
index 000000000..c461387e7
--- /dev/null
+++ b/target/tricore/cpu.h
@@ -0,0 +1,397 @@
+/*
+ * TriCore emulation for qemu: main CPU struct.
+ *
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TRICORE_CPU_H
+#define TRICORE_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "tricore-defs.h"
+
+struct tricore_boot_info;
+
+typedef struct tricore_def_t tricore_def_t;
+
+typedef struct CPUTriCoreState CPUTriCoreState;
+struct CPUTriCoreState {
+ /* GPR Register */
+ uint32_t gpr_a[16];
+ uint32_t gpr_d[16];
+ /* CSFR Register */
+ uint32_t PCXI;
+/* Frequently accessed PSW_USB bits are stored separately for efficiency.
+ This contains all the other bits. Use psw_{read,write} to access
+ the whole PSW. */
+ uint32_t PSW;
+
+ /* PSW flag cache for faster execution
+ */
+ uint32_t PSW_USB_C;
+ uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */
+ uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */
+ uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */
+ uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */
+
+ uint32_t PC;
+ uint32_t SYSCON;
+ uint32_t CPU_ID;
+ uint32_t CORE_ID;
+ uint32_t BIV;
+ uint32_t BTV;
+ uint32_t ISP;
+ uint32_t ICR;
+ uint32_t FCX;
+ uint32_t LCX;
+ uint32_t COMPAT;
+
+ /* Mem Protection Register */
+ uint32_t DPR0_0L;
+ uint32_t DPR0_0U;
+ uint32_t DPR0_1L;
+ uint32_t DPR0_1U;
+ uint32_t DPR0_2L;
+ uint32_t DPR0_2U;
+ uint32_t DPR0_3L;
+ uint32_t DPR0_3U;
+
+ uint32_t DPR1_0L;
+ uint32_t DPR1_0U;
+ uint32_t DPR1_1L;
+ uint32_t DPR1_1U;
+ uint32_t DPR1_2L;
+ uint32_t DPR1_2U;
+ uint32_t DPR1_3L;
+ uint32_t DPR1_3U;
+
+ uint32_t DPR2_0L;
+ uint32_t DPR2_0U;
+ uint32_t DPR2_1L;
+ uint32_t DPR2_1U;
+ uint32_t DPR2_2L;
+ uint32_t DPR2_2U;
+ uint32_t DPR2_3L;
+ uint32_t DPR2_3U;
+
+ uint32_t DPR3_0L;
+ uint32_t DPR3_0U;
+ uint32_t DPR3_1L;
+ uint32_t DPR3_1U;
+ uint32_t DPR3_2L;
+ uint32_t DPR3_2U;
+ uint32_t DPR3_3L;
+ uint32_t DPR3_3U;
+
+ uint32_t CPR0_0L;
+ uint32_t CPR0_0U;
+ uint32_t CPR0_1L;
+ uint32_t CPR0_1U;
+ uint32_t CPR0_2L;
+ uint32_t CPR0_2U;
+ uint32_t CPR0_3L;
+ uint32_t CPR0_3U;
+
+ uint32_t CPR1_0L;
+ uint32_t CPR1_0U;
+ uint32_t CPR1_1L;
+ uint32_t CPR1_1U;
+ uint32_t CPR1_2L;
+ uint32_t CPR1_2U;
+ uint32_t CPR1_3L;
+ uint32_t CPR1_3U;
+
+ uint32_t CPR2_0L;
+ uint32_t CPR2_0U;
+ uint32_t CPR2_1L;
+ uint32_t CPR2_1U;
+ uint32_t CPR2_2L;
+ uint32_t CPR2_2U;
+ uint32_t CPR2_3L;
+ uint32_t CPR2_3U;
+
+ uint32_t CPR3_0L;
+ uint32_t CPR3_0U;
+ uint32_t CPR3_1L;
+ uint32_t CPR3_1U;
+ uint32_t CPR3_2L;
+ uint32_t CPR3_2U;
+ uint32_t CPR3_3L;
+ uint32_t CPR3_3U;
+
+ uint32_t DPM0;
+ uint32_t DPM1;
+ uint32_t DPM2;
+ uint32_t DPM3;
+
+ uint32_t CPM0;
+ uint32_t CPM1;
+ uint32_t CPM2;
+ uint32_t CPM3;
+
+ /* Memory Management Registers */
+ uint32_t MMU_CON;
+ uint32_t MMU_ASI;
+ uint32_t MMU_TVA;
+ uint32_t MMU_TPA;
+ uint32_t MMU_TPX;
+ uint32_t MMU_TFA;
+ /* {1.3.1 only */
+ uint32_t BMACON;
+ uint32_t SMACON;
+ uint32_t DIEAR;
+ uint32_t DIETR;
+ uint32_t CCDIER;
+ uint32_t MIECON;
+ uint32_t PIEAR;
+ uint32_t PIETR;
+ uint32_t CCPIER;
+ /*} */
+ /* Debug Registers */
+ uint32_t DBGSR;
+ uint32_t EXEVT;
+ uint32_t CREVT;
+ uint32_t SWEVT;
+ uint32_t TR0EVT;
+ uint32_t TR1EVT;
+ uint32_t DMS;
+ uint32_t DCX;
+ uint32_t DBGTCR;
+ uint32_t CCTRL;
+ uint32_t CCNT;
+ uint32_t ICNT;
+ uint32_t M1CNT;
+ uint32_t M2CNT;
+ uint32_t M3CNT;
+ /* Floating Point Registers */
+ float_status fp_status;
+ /* QEMU */
+ int error_code;
+ uint32_t hflags; /* CPU State */
+
+ /* Internal CPU feature flags. */
+ uint64_t features;
+
+ const tricore_def_t *cpu_model;
+ void *irq[8];
+ struct QEMUTimer *timer; /* Internal timer */
+};
+
+/**
+ * TriCoreCPU:
+ * @env: #CPUTriCoreState
+ *
+ * A TriCore CPU.
+ */
+struct TriCoreCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUTriCoreState env;
+};
+
+
+hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void tricore_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+
+
+#define MASK_PCXI_PCPN 0xff000000
+#define MASK_PCXI_PIE_1_3 0x00800000
+#define MASK_PCXI_PIE_1_6 0x00200000
+#define MASK_PCXI_UL 0x00400000
+#define MASK_PCXI_PCXS 0x000f0000
+#define MASK_PCXI_PCXO 0x0000ffff
+
+#define MASK_PSW_USB 0xff000000
+#define MASK_USB_C 0x80000000
+#define MASK_USB_V 0x40000000
+#define MASK_USB_SV 0x20000000
+#define MASK_USB_AV 0x10000000
+#define MASK_USB_SAV 0x08000000
+#define MASK_PSW_PRS 0x00003000
+#define MASK_PSW_IO 0x00000c00
+#define MASK_PSW_IS 0x00000200
+#define MASK_PSW_GW 0x00000100
+#define MASK_PSW_CDE 0x00000080
+#define MASK_PSW_CDC 0x0000007f
+#define MASK_PSW_FPU_RM 0x3000000
+
+#define MASK_SYSCON_PRO_TEN 0x2
+#define MASK_SYSCON_FCD_SF 0x1
+
+#define MASK_CPUID_MOD 0xffff0000
+#define MASK_CPUID_MOD_32B 0x0000ff00
+#define MASK_CPUID_REV 0x000000ff
+
+#define MASK_ICR_PIPN 0x00ff0000
+#define MASK_ICR_IE_1_3 0x00000100
+#define MASK_ICR_IE_1_6 0x00008000
+#define MASK_ICR_CCPN 0x000000ff
+
+#define MASK_FCX_FCXS 0x000f0000
+#define MASK_FCX_FCXO 0x0000ffff
+
+#define MASK_LCX_LCXS 0x000f0000
+#define MASK_LCX_LCX0 0x0000ffff
+
+#define MASK_DBGSR_DE 0x1
+#define MASK_DBGSR_HALT 0x6
+#define MASK_DBGSR_SUSP 0x10
+#define MASK_DBGSR_PREVSUSP 0x20
+#define MASK_DBGSR_PEVT 0x40
+#define MASK_DBGSR_EVTSRC 0x1f00
+
+#define TRICORE_HFLAG_KUU 0x3
+#define TRICORE_HFLAG_UM0 0x00002 /* user mode-0 flag */
+#define TRICORE_HFLAG_UM1 0x00001 /* user mode-1 flag */
+#define TRICORE_HFLAG_SM 0x00000 /* kernel mode flag */
+
+enum tricore_features {
+ TRICORE_FEATURE_13,
+ TRICORE_FEATURE_131,
+ TRICORE_FEATURE_16,
+ TRICORE_FEATURE_161,
+};
+
+static inline int tricore_feature(CPUTriCoreState *env, int feature)
+{
+ return (env->features & (1ULL << feature)) != 0;
+}
+
+/* TriCore Traps Classes*/
+enum {
+ TRAPC_NONE = -1,
+ TRAPC_MMU = 0,
+ TRAPC_PROT = 1,
+ TRAPC_INSN_ERR = 2,
+ TRAPC_CTX_MNG = 3,
+ TRAPC_SYSBUS = 4,
+ TRAPC_ASSERT = 5,
+ TRAPC_SYSCALL = 6,
+ TRAPC_NMI = 7,
+ TRAPC_IRQ = 8
+};
+
+/* Class 0 TIN */
+enum {
+ TIN0_VAF = 0,
+ TIN0_VAP = 1,
+};
+
+/* Class 1 TIN */
+enum {
+ TIN1_PRIV = 1,
+ TIN1_MPR = 2,
+ TIN1_MPW = 3,
+ TIN1_MPX = 4,
+ TIN1_MPP = 5,
+ TIN1_MPN = 6,
+ TIN1_GRWP = 7,
+};
+
+/* Class 2 TIN */
+enum {
+ TIN2_IOPC = 1,
+ TIN2_UOPC = 2,
+ TIN2_OPD = 3,
+ TIN2_ALN = 4,
+ TIN2_MEM = 5,
+};
+
+/* Class 3 TIN */
+enum {
+ TIN3_FCD = 1,
+ TIN3_CDO = 2,
+ TIN3_CDU = 3,
+ TIN3_FCU = 4,
+ TIN3_CSU = 5,
+ TIN3_CTYP = 6,
+ TIN3_NEST = 7,
+};
+
+/* Class 4 TIN */
+enum {
+ TIN4_PSE = 1,
+ TIN4_DSE = 2,
+ TIN4_DAE = 3,
+ TIN4_CAE = 4,
+ TIN4_PIE = 5,
+ TIN4_DIE = 6,
+};
+
+/* Class 5 TIN */
+enum {
+ TIN5_OVF = 1,
+ TIN5_SOVF = 1,
+};
+
+/* Class 6 TIN
+ *
+ * Is always TIN6_SYS
+ */
+
+/* Class 7 TIN */
+enum {
+ TIN7_NMI = 0,
+};
+
+uint32_t psw_read(CPUTriCoreState *env);
+void psw_write(CPUTriCoreState *env, uint32_t val);
+int tricore_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n);
+int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n);
+
+void fpu_set_state(CPUTriCoreState *env);
+
+#define MMU_USER_IDX 2
+
+void tricore_cpu_list(void);
+
+#define cpu_list tricore_cpu_list
+
+static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
+{
+ return 0;
+}
+
+typedef CPUTriCoreState CPUArchState;
+typedef TriCoreCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+void cpu_state_reset(CPUTriCoreState *s);
+void tricore_tcg_init(void);
+
+static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->PC;
+ *cs_base = 0;
+ *flags = 0;
+}
+
+#define TRICORE_CPU_TYPE_SUFFIX "-" TYPE_TRICORE_CPU
+#define TRICORE_CPU_TYPE_NAME(model) model TRICORE_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU
+
+/* helpers.c */
+bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+#endif /* TRICORE_CPU_H */
diff --git a/target/tricore/csfr.def b/target/tricore/csfr.def
new file mode 100644
index 000000000..ff004cbdd
--- /dev/null
+++ b/target/tricore/csfr.def
@@ -0,0 +1,125 @@
+/* A(ll) access permited
+ R(ead only) access
+ E(nd init protected) access
+
+ A|R|E(offset, register, feature introducing reg)
+
+ NOTE: PSW is handled as a special case in gen_mtcr/mfcr */
+
+A(0xfe00, PCXI, TRICORE_FEATURE_13)
+A(0xfe08, PC, TRICORE_FEATURE_13)
+A(0xfe14, SYSCON, TRICORE_FEATURE_13)
+R(0xfe18, CPU_ID, TRICORE_FEATURE_13)
+R(0xfe1c, CORE_ID, TRICORE_FEATURE_161)
+E(0xfe20, BIV, TRICORE_FEATURE_13)
+E(0xfe24, BTV, TRICORE_FEATURE_13)
+E(0xfe28, ISP, TRICORE_FEATURE_13)
+A(0xfe2c, ICR, TRICORE_FEATURE_13)
+A(0xfe38, FCX, TRICORE_FEATURE_13)
+A(0xfe3c, LCX, TRICORE_FEATURE_13)
+E(0x9400, COMPAT, TRICORE_FEATURE_131)
+/* memory protection register */
+A(0xC000, DPR0_0L, TRICORE_FEATURE_13)
+A(0xC004, DPR0_0U, TRICORE_FEATURE_13)
+A(0xC008, DPR0_1L, TRICORE_FEATURE_13)
+A(0xC00C, DPR0_1U, TRICORE_FEATURE_13)
+A(0xC010, DPR0_2L, TRICORE_FEATURE_13)
+A(0xC014, DPR0_2U, TRICORE_FEATURE_13)
+A(0xC018, DPR0_3L, TRICORE_FEATURE_13)
+A(0xC01C, DPR0_3U, TRICORE_FEATURE_13)
+A(0xC400, DPR1_0L, TRICORE_FEATURE_13)
+A(0xC404, DPR1_0U, TRICORE_FEATURE_13)
+A(0xC408, DPR1_1L, TRICORE_FEATURE_13)
+A(0xC40C, DPR1_1U, TRICORE_FEATURE_13)
+A(0xC410, DPR1_2L, TRICORE_FEATURE_13)
+A(0xC414, DPR1_2U, TRICORE_FEATURE_13)
+A(0xC418, DPR1_3L, TRICORE_FEATURE_13)
+A(0xC41C, DPR1_3U, TRICORE_FEATURE_13)
+A(0xC800, DPR2_0L, TRICORE_FEATURE_13)
+A(0xC804, DPR2_0U, TRICORE_FEATURE_13)
+A(0xC808, DPR2_1L, TRICORE_FEATURE_13)
+A(0xC80C, DPR2_1U, TRICORE_FEATURE_13)
+A(0xC810, DPR2_2L, TRICORE_FEATURE_13)
+A(0xC814, DPR2_2U, TRICORE_FEATURE_13)
+A(0xC818, DPR2_3L, TRICORE_FEATURE_13)
+A(0xC81C, DPR2_3U, TRICORE_FEATURE_13)
+A(0xCC00, DPR3_0L, TRICORE_FEATURE_13)
+A(0xCC04, DPR3_0U, TRICORE_FEATURE_13)
+A(0xCC08, DPR3_1L, TRICORE_FEATURE_13)
+A(0xCC0C, DPR3_1U, TRICORE_FEATURE_13)
+A(0xCC10, DPR3_2L, TRICORE_FEATURE_13)
+A(0xCC14, DPR3_2U, TRICORE_FEATURE_13)
+A(0xCC18, DPR3_3L, TRICORE_FEATURE_13)
+A(0xCC1C, DPR3_3U, TRICORE_FEATURE_13)
+A(0xD000, CPR0_0L, TRICORE_FEATURE_13)
+A(0xD004, CPR0_0U, TRICORE_FEATURE_13)
+A(0xD008, CPR0_1L, TRICORE_FEATURE_13)
+A(0xD00C, CPR0_1U, TRICORE_FEATURE_13)
+A(0xD010, CPR0_2L, TRICORE_FEATURE_13)
+A(0xD014, CPR0_2U, TRICORE_FEATURE_13)
+A(0xD018, CPR0_3L, TRICORE_FEATURE_13)
+A(0xD01C, CPR0_3U, TRICORE_FEATURE_13)
+A(0xD400, CPR1_0L, TRICORE_FEATURE_13)
+A(0xD404, CPR1_0U, TRICORE_FEATURE_13)
+A(0xD408, CPR1_1L, TRICORE_FEATURE_13)
+A(0xD40C, CPR1_1U, TRICORE_FEATURE_13)
+A(0xD410, CPR1_2L, TRICORE_FEATURE_13)
+A(0xD414, CPR1_2U, TRICORE_FEATURE_13)
+A(0xD418, CPR1_3L, TRICORE_FEATURE_13)
+A(0xD41C, CPR1_3U, TRICORE_FEATURE_13)
+A(0xD800, CPR2_0L, TRICORE_FEATURE_13)
+A(0xD804, CPR2_0U, TRICORE_FEATURE_13)
+A(0xD808, CPR2_1L, TRICORE_FEATURE_13)
+A(0xD80C, CPR2_1U, TRICORE_FEATURE_13)
+A(0xD810, CPR2_2L, TRICORE_FEATURE_13)
+A(0xD814, CPR2_2U, TRICORE_FEATURE_13)
+A(0xD818, CPR2_3L, TRICORE_FEATURE_13)
+A(0xD81C, CPR2_3U, TRICORE_FEATURE_13)
+A(0xDC00, CPR3_0L, TRICORE_FEATURE_13)
+A(0xDC04, CPR3_0U, TRICORE_FEATURE_13)
+A(0xDC08, CPR3_1L, TRICORE_FEATURE_13)
+A(0xDC0C, CPR3_1U, TRICORE_FEATURE_13)
+A(0xDC10, CPR3_2L, TRICORE_FEATURE_13)
+A(0xDC14, CPR3_2U, TRICORE_FEATURE_13)
+A(0xDC18, CPR3_3L, TRICORE_FEATURE_13)
+A(0xDC1C, CPR3_3U, TRICORE_FEATURE_13)
+A(0xE000, DPM0, TRICORE_FEATURE_13)
+A(0xE080, DPM1, TRICORE_FEATURE_13)
+A(0xE100, DPM2, TRICORE_FEATURE_13)
+A(0xE180, DPM3, TRICORE_FEATURE_13)
+A(0xE200, CPM0, TRICORE_FEATURE_13)
+A(0xE280, CPM1, TRICORE_FEATURE_13)
+A(0xE300, CPM2, TRICORE_FEATURE_13)
+A(0xE380, CPM3, TRICORE_FEATURE_13)
+/* memory management registers */
+A(0x8000, MMU_CON, TRICORE_FEATURE_13)
+A(0x8004, MMU_ASI, TRICORE_FEATURE_13)
+A(0x800C, MMU_TVA, TRICORE_FEATURE_13)
+A(0x8010, MMU_TPA, TRICORE_FEATURE_13)
+A(0x8014, MMU_TPX, TRICORE_FEATURE_13)
+A(0x8018, MMU_TFA, TRICORE_FEATURE_13)
+E(0x9004, BMACON, TRICORE_FEATURE_131)
+E(0x900C, SMACON, TRICORE_FEATURE_131)
+A(0x9020, DIEAR, TRICORE_FEATURE_131)
+A(0x9024, DIETR, TRICORE_FEATURE_131)
+A(0x9028, CCDIER, TRICORE_FEATURE_131)
+E(0x9044, MIECON, TRICORE_FEATURE_131)
+A(0x9210, PIEAR, TRICORE_FEATURE_131)
+A(0x9214, PIETR, TRICORE_FEATURE_131)
+A(0x9218, CCPIER, TRICORE_FEATURE_131)
+/* debug registers */
+A(0xFD00, DBGSR, TRICORE_FEATURE_13)
+A(0xFD08, EXEVT, TRICORE_FEATURE_13)
+A(0xFD0C, CREVT, TRICORE_FEATURE_13)
+A(0xFD10, SWEVT, TRICORE_FEATURE_13)
+A(0xFD20, TR0EVT, TRICORE_FEATURE_13)
+A(0xFD24, TR1EVT, TRICORE_FEATURE_13)
+A(0xFD40, DMS, TRICORE_FEATURE_13)
+A(0xFD44, DCX, TRICORE_FEATURE_13)
+A(0xFD48, DBGTCR, TRICORE_FEATURE_131)
+A(0xFC00, CCTRL, TRICORE_FEATURE_131)
+A(0xFC04, CCNT, TRICORE_FEATURE_131)
+A(0xFC08, ICNT, TRICORE_FEATURE_131)
+A(0xFC0C, M1CNT, TRICORE_FEATURE_131)
+A(0xFC10, M2CNT, TRICORE_FEATURE_131)
+A(0xFC14, M3CNT, TRICORE_FEATURE_131)
diff --git a/target/tricore/fpu_helper.c b/target/tricore/fpu_helper.c
new file mode 100644
index 000000000..cb7ee7dd3
--- /dev/null
+++ b/target/tricore/fpu_helper.c
@@ -0,0 +1,471 @@
+/*
+ * TriCore emulation for qemu: fpu helper.
+ *
+ * Copyright (c) 2016 Bastian Koppelmann University of Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define QUIET_NAN 0x7fc00000
+#define ADD_NAN 0x7fc00001
+#define SQRT_NAN 0x7fc00004
+#define DIV_NAN 0x7fc00008
+#define MUL_NAN 0x7fc00002
+#define FPU_FS PSW_USB_C
+#define FPU_FI PSW_USB_V
+#define FPU_FV PSW_USB_SV
+#define FPU_FZ PSW_USB_AV
+#define FPU_FU PSW_USB_SAV
+
+#define float32_sqrt_nan make_float32(SQRT_NAN)
+#define float32_quiet_nan make_float32(QUIET_NAN)
+
+/* we don't care about input_denormal */
+static inline uint8_t f_get_excp_flags(CPUTriCoreState *env)
+{
+ return get_float_exception_flags(&env->fp_status)
+ & (float_flag_invalid
+ | float_flag_overflow
+ | float_flag_underflow
+ | float_flag_output_denormal
+ | float_flag_divbyzero
+ | float_flag_inexact);
+}
+
+static inline float32 f_maddsub_nan_result(float32 arg1, float32 arg2,
+ float32 arg3, float32 result,
+ uint32_t muladd_negate_c)
+{
+ uint32_t aSign, bSign, cSign;
+ uint32_t aExp, bExp, cExp;
+
+ if (float32_is_any_nan(arg1) || float32_is_any_nan(arg2) ||
+ float32_is_any_nan(arg3)) {
+ return QUIET_NAN;
+ } else if (float32_is_infinity(arg1) && float32_is_zero(arg2)) {
+ return MUL_NAN;
+ } else if (float32_is_zero(arg1) && float32_is_infinity(arg2)) {
+ return MUL_NAN;
+ } else {
+ aSign = arg1 >> 31;
+ bSign = arg2 >> 31;
+ cSign = arg3 >> 31;
+
+ aExp = (arg1 >> 23) & 0xff;
+ bExp = (arg2 >> 23) & 0xff;
+ cExp = (arg3 >> 23) & 0xff;
+
+ if (muladd_negate_c) {
+ cSign ^= 1;
+ }
+ if (((aExp == 0xff) || (bExp == 0xff)) && (cExp == 0xff)) {
+ if (aSign ^ bSign ^ cSign) {
+ return ADD_NAN;
+ }
+ }
+ }
+
+ return result;
+}
+
+static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags)
+{
+ uint8_t some_excp = 0;
+ set_float_exception_flags(0, &env->fp_status);
+
+ if (flags & float_flag_invalid) {
+ env->FPU_FI = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_overflow) {
+ env->FPU_FV = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_underflow || flags & float_flag_output_denormal) {
+ env->FPU_FU = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_divbyzero) {
+ env->FPU_FZ = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_inexact || flags & float_flag_output_denormal) {
+ env->PSW |= 1 << 26;
+ some_excp = 1;
+ }
+
+ env->FPU_FS = some_excp;
+}
+
+#define FADD_SUB(op) \
+uint32_t helper_f##op(CPUTriCoreState *env, uint32_t r1, uint32_t r2) \
+{ \
+ float32 arg1 = make_float32(r1); \
+ float32 arg2 = make_float32(r2); \
+ uint32_t flags; \
+ float32 f_result; \
+ \
+ f_result = float32_##op(arg2, arg1, &env->fp_status); \
+ flags = f_get_excp_flags(env); \
+ if (flags) { \
+ /* If the output is a NaN, but the inputs aren't, \
+ we return a unique value. */ \
+ if ((flags & float_flag_invalid) \
+ && !float32_is_any_nan(arg1) \
+ && !float32_is_any_nan(arg2)) { \
+ f_result = ADD_NAN; \
+ } \
+ f_update_psw_flags(env, flags); \
+ } else { \
+ env->FPU_FS = 0; \
+ } \
+ return (uint32_t)f_result; \
+}
+FADD_SUB(add)
+FADD_SUB(sub)
+
+uint32_t helper_fmul(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 f_result;
+
+ f_result = float32_mul(arg1, arg2, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ /* If the output is a NaN, but the inputs aren't,
+ we return a unique value. */
+ if ((flags & float_flag_invalid)
+ && !float32_is_any_nan(arg1)
+ && !float32_is_any_nan(arg2)) {
+ f_result = MUL_NAN;
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+
+}
+
+/*
+ * Target TriCore QSEED.F significand Lookup Table
+ *
+ * The QSEED.F output significand depends on the least-significant
+ * exponent bit and the 6 most-significant significand bits.
+ *
+ * IEEE 754 float datatype
+ * partitioned into Sign (S), Exponent (E) and Significand (M):
+ *
+ * S E E E E E E E E M M M M M M ...
+ * | | |
+ * +------+------+-------+-------+
+ * | |
+ * for lookup table
+ * calculating index for
+ * output E output M
+ *
+ * This lookup table was extracted by analyzing QSEED output
+ * from the real hardware
+ */
+static const uint8_t target_qseed_significand_table[128] = {
+ 253, 252, 245, 244, 239, 238, 231, 230, 225, 224, 217, 216,
+ 211, 210, 205, 204, 201, 200, 195, 194, 189, 188, 185, 184,
+ 179, 178, 175, 174, 169, 168, 165, 164, 161, 160, 157, 156,
+ 153, 152, 149, 148, 145, 144, 141, 140, 137, 136, 133, 132,
+ 131, 130, 127, 126, 123, 122, 121, 120, 117, 116, 115, 114,
+ 111, 110, 109, 108, 103, 102, 99, 98, 93, 92, 89, 88, 83,
+ 82, 79, 78, 75, 74, 71, 70, 67, 66, 63, 62, 59, 58, 55,
+ 54, 53, 52, 49, 48, 45, 44, 43, 42, 39, 38, 37, 36, 33,
+ 32, 31, 30, 27, 26, 25, 24, 23, 22, 19, 18, 17, 16, 15,
+ 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2
+};
+
+uint32_t helper_qseed(CPUTriCoreState *env, uint32_t r1)
+{
+ uint32_t arg1, S, E, M, E_minus_one, m_idx;
+ uint32_t new_E, new_M, new_S, result;
+
+ arg1 = make_float32(r1);
+
+ /* fetch IEEE-754 fields S, E and the uppermost 6-bit of M */
+ S = extract32(arg1, 31, 1);
+ E = extract32(arg1, 23, 8);
+ M = extract32(arg1, 17, 6);
+
+ if (float32_is_any_nan(arg1)) {
+ result = float32_quiet_nan;
+ } else if (float32_is_zero_or_denormal(arg1)) {
+ if (float32_is_neg(arg1)) {
+ result = float32_infinity | (1 << 31);
+ } else {
+ result = float32_infinity;
+ }
+ } else if (float32_is_neg(arg1)) {
+ result = float32_sqrt_nan;
+ } else if (float32_is_infinity(arg1)) {
+ result = float32_zero;
+ } else {
+ E_minus_one = E - 1;
+ m_idx = ((E_minus_one & 1) << 6) | M;
+ new_S = S;
+ new_E = 0xBD - E_minus_one / 2;
+ new_M = target_qseed_significand_table[m_idx];
+
+ result = 0;
+ result = deposit32(result, 31, 1, new_S);
+ result = deposit32(result, 23, 8, new_E);
+ result = deposit32(result, 15, 8, new_M);
+ }
+
+ if (float32_is_signaling_nan(arg1, &env->fp_status)
+ || result == float32_sqrt_nan) {
+ env->FPU_FI = 1 << 31;
+ env->FPU_FS = 1;
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ return (uint32_t) result;
+}
+
+uint32_t helper_fdiv(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 f_result;
+
+ f_result = float32_div(arg1, arg2 , &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ /* If the output is a NaN, but the inputs aren't,
+ we return a unique value. */
+ if ((flags & float_flag_invalid)
+ && !float32_is_any_nan(arg1)
+ && !float32_is_any_nan(arg2)) {
+ f_result = DIV_NAN;
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_fmadd(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 arg3 = make_float32(r3);
+ float32 f_result;
+
+ f_result = float32_muladd(arg1, arg2, arg3, 0, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ if (flags & float_flag_invalid) {
+ arg1 = float32_squash_input_denormal(arg1, &env->fp_status);
+ arg2 = float32_squash_input_denormal(arg2, &env->fp_status);
+ arg3 = float32_squash_input_denormal(arg3, &env->fp_status);
+ f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 0);
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_fmsub(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 arg3 = make_float32(r3);
+ float32 f_result;
+
+ f_result = float32_muladd(arg1, arg2, arg3, float_muladd_negate_product,
+ &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ if (flags & float_flag_invalid) {
+ arg1 = float32_squash_input_denormal(arg1, &env->fp_status);
+ arg2 = float32_squash_input_denormal(arg2, &env->fp_status);
+ arg3 = float32_squash_input_denormal(arg3, &env->fp_status);
+
+ f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 1);
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_fcmp(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t result, flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+
+ set_flush_inputs_to_zero(0, &env->fp_status);
+
+ result = 1 << (float32_compare_quiet(arg1, arg2, &env->fp_status) + 1);
+ result |= float32_is_denormal(arg1) << 4;
+ result |= float32_is_denormal(arg2) << 5;
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ set_flush_inputs_to_zero(1, &env->fp_status);
+ return result;
+}
+
+uint32_t helper_ftoi(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ int32_t result, flags;
+
+ result = float32_to_int32(f_arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ if (float32_is_any_nan(f_arg)) {
+ result = 0;
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)result;
+}
+
+uint32_t helper_itof(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_result;
+ uint32_t flags;
+ f_result = int32_to_float32(arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_utof(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_result;
+ uint32_t flags;
+
+ f_result = uint32_to_float32(arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_ftoiz(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ uint32_t result;
+ int32_t flags;
+
+ result = float32_to_int32_round_to_zero(f_arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags & float_flag_invalid) {
+ flags &= ~float_flag_inexact;
+ if (float32_is_any_nan(f_arg)) {
+ result = 0;
+ }
+ }
+
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ return result;
+}
+
+uint32_t helper_ftouz(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ uint32_t result;
+ int32_t flags;
+
+ result = float32_to_uint32_round_to_zero(f_arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags & float_flag_invalid) {
+ flags &= ~float_flag_inexact;
+ if (float32_is_any_nan(f_arg)) {
+ result = 0;
+ }
+ } else if (float32_lt_quiet(f_arg, 0, &env->fp_status)) {
+ flags = float_flag_invalid;
+ result = 0;
+ }
+
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return result;
+}
+
+void helper_updfl(CPUTriCoreState *env, uint32_t arg)
+{
+ env->FPU_FS = extract32(arg, 7, 1) & extract32(arg, 15, 1);
+ env->FPU_FI = (extract32(arg, 6, 1) & extract32(arg, 14, 1)) << 31;
+ env->FPU_FV = (extract32(arg, 5, 1) & extract32(arg, 13, 1)) << 31;
+ env->FPU_FZ = (extract32(arg, 4, 1) & extract32(arg, 12, 1)) << 31;
+ env->FPU_FU = (extract32(arg, 3, 1) & extract32(arg, 11, 1)) << 31;
+ /* clear FX and RM */
+ env->PSW &= ~(extract32(arg, 10, 1) << 26);
+ env->PSW |= (extract32(arg, 2, 1) & extract32(arg, 10, 1)) << 26;
+
+ fpu_set_state(env);
+}
diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c
new file mode 100644
index 000000000..3ce55abb8
--- /dev/null
+++ b/target/tricore/gdbstub.c
@@ -0,0 +1,139 @@
+/*
+ * TriCore gdb server stub
+ *
+ * Copyright (c) 2019 Bastian Koppelmann, Paderborn University
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+
+
+#define LCX_REGNUM 32
+#define FCX_REGNUM 33
+#define PCXI_REGNUM 34
+#define TRICORE_PSW_REGNUM 35
+#define TRICORE_PC_REGNUM 36
+#define ICR_REGNUM 37
+#define ISP_REGNUM 38
+#define BTV_REGNUM 39
+#define BIV_REGNUM 40
+#define SYSCON_REGNUM 41
+#define PMUCON0_REGNUM 42
+#define DMUCON_REGNUM 43
+
+static uint32_t tricore_cpu_gdb_read_csfr(CPUTriCoreState *env, int n)
+{
+ switch (n) {
+ case LCX_REGNUM:
+ return env->LCX;
+ case FCX_REGNUM:
+ return env->FCX;
+ case PCXI_REGNUM:
+ return env->PCXI;
+ case TRICORE_PSW_REGNUM:
+ return psw_read(env);
+ case TRICORE_PC_REGNUM:
+ return env->PC;
+ case ICR_REGNUM:
+ return env->ICR;
+ case ISP_REGNUM:
+ return env->ISP;
+ case BTV_REGNUM:
+ return env->BTV;
+ case BIV_REGNUM:
+ return env->BIV;
+ case SYSCON_REGNUM:
+ return env->SYSCON;
+ case PMUCON0_REGNUM:
+ return 0; /* PMUCON0 */
+ case DMUCON_REGNUM:
+ return 0; /* DMUCON0 */
+ default:
+ return 0;
+ }
+}
+
+static void tricore_cpu_gdb_write_csfr(CPUTriCoreState *env, int n,
+ uint32_t val)
+{
+ switch (n) {
+ case LCX_REGNUM:
+ env->LCX = val;
+ break;
+ case FCX_REGNUM:
+ env->FCX = val;
+ break;
+ case PCXI_REGNUM:
+ env->PCXI = val;
+ break;
+ case TRICORE_PSW_REGNUM:
+ psw_write(env, val);
+ break;
+ case TRICORE_PC_REGNUM:
+ env->PC = val;
+ break;
+ case ICR_REGNUM:
+ env->ICR = val;
+ break;
+ case ISP_REGNUM:
+ env->ISP = val;
+ break;
+ case BTV_REGNUM:
+ env->BTV = val;
+ break;
+ case BIV_REGNUM:
+ env->BIV = val;
+ break;
+ case SYSCON_REGNUM:
+ env->SYSCON = val;
+ break;
+ }
+}
+
+
+int tricore_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+
+ if (n < 16) { /* data registers */
+ return gdb_get_reg32(mem_buf, env->gpr_d[n]);
+ } else if (n < 32) { /* address registers */
+ return gdb_get_reg32(mem_buf, env->gpr_a[n - 16]);
+ } else { /* csfr */
+ return gdb_get_reg32(mem_buf, tricore_cpu_gdb_read_csfr(env, n));
+ }
+ return 0;
+}
+
+int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+ uint32_t tmp;
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 16) { /* data registers */
+ env->gpr_d[n] = tmp;
+ } else if (n < 32) { /* address registers */
+ env->gpr_d[n - 16] = tmp;
+ } else {
+ tricore_cpu_gdb_write_csfr(env, n, tmp);
+ }
+ return 4;
+}
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
new file mode 100644
index 000000000..c5e997f32
--- /dev/null
+++ b/target/tricore/helper.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat-helpers.h"
+#include "qemu/qemu-print.h"
+
+enum {
+ TLBRET_DIRTY = -4,
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+#if defined(CONFIG_SOFTMMU)
+static int get_physical_address(CPUTriCoreState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type, int mmu_idx)
+{
+ int ret = TLBRET_MATCH;
+
+ *physical = address & 0xFFFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ return ret;
+}
+
+hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ hwaddr phys_addr;
+ int prot;
+ int mmu_idx = cpu_mmu_index(&cpu->env, false);
+
+ if (get_physical_address(&cpu->env, &phys_addr, &prot, addr,
+ MMU_DATA_LOAD, mmu_idx)) {
+ return -1;
+ }
+ return phys_addr;
+}
+#endif
+
+/* TODO: Add exeption support*/
+static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address,
+ int rw, int tlb_error)
+{
+}
+
+bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType rw, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+ hwaddr physical;
+ int prot;
+ int ret = 0;
+
+ rw &= 1;
+ ret = get_physical_address(env, &physical, &prot,
+ address, rw, mmu_idx);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, (target_ulong)address, ret, physical, prot);
+
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ } else {
+ assert(ret < 0);
+ if (probe) {
+ return false;
+ }
+ raise_mmu_exception(env, address, rw, ret);
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+}
+
+static void tricore_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(oc);
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU));
+ qemu_printf(" %s\n", name);
+ g_free(name);
+}
+
+void tricore_cpu_list(void)
+{
+ GSList *list;
+
+ list = object_class_get_list_sorted(TYPE_TRICORE_CPU, false);
+ qemu_printf("Available CPUs:\n");
+ g_slist_foreach(list, tricore_cpu_list_entry, NULL);
+ g_slist_free(list);
+}
+
+void fpu_set_state(CPUTriCoreState *env)
+{
+ set_float_rounding_mode(env->PSW & MASK_PSW_FPU_RM, &env->fp_status);
+ set_flush_inputs_to_zero(1, &env->fp_status);
+ set_flush_to_zero(1, &env->fp_status);
+ set_default_nan_mode(1, &env->fp_status);
+}
+
+uint32_t psw_read(CPUTriCoreState *env)
+{
+ /* clear all USB bits */
+ env->PSW &= 0x6ffffff;
+ /* now set them from the cache */
+ env->PSW |= ((env->PSW_USB_C != 0) << 31);
+ env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1);
+ env->PSW |= ((env->PSW_USB_SV & (1 << 31)) >> 2);
+ env->PSW |= ((env->PSW_USB_AV & (1 << 31)) >> 3);
+ env->PSW |= ((env->PSW_USB_SAV & (1 << 31)) >> 4);
+
+ return env->PSW;
+}
+
+void psw_write(CPUTriCoreState *env, uint32_t val)
+{
+ env->PSW_USB_C = (val & MASK_USB_C);
+ env->PSW_USB_V = (val & MASK_USB_V) << 1;
+ env->PSW_USB_SV = (val & MASK_USB_SV) << 2;
+ env->PSW_USB_AV = (val & MASK_USB_AV) << 3;
+ env->PSW_USB_SAV = (val & MASK_USB_SAV) << 4;
+ env->PSW = val;
+
+ fpu_set_state(env);
+}
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
new file mode 100644
index 000000000..b64780c37
--- /dev/null
+++ b/target/tricore/helper.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Arithmetic */
+DEF_HELPER_3(add_ssov, i32, env, i32, i32)
+DEF_HELPER_3(add64_ssov, i64, env, i64, i64)
+DEF_HELPER_3(add_suov, i32, env, i32, i32)
+DEF_HELPER_3(add_h_ssov, i32, env, i32, i32)
+DEF_HELPER_3(add_h_suov, i32, env, i32, i32)
+DEF_HELPER_4(addr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_4(addsur_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_3(sub_ssov, i32, env, i32, i32)
+DEF_HELPER_3(sub64_ssov, i64, env, i64, i64)
+DEF_HELPER_3(sub_suov, i32, env, i32, i32)
+DEF_HELPER_3(sub_h_ssov, i32, env, i32, i32)
+DEF_HELPER_3(sub_h_suov, i32, env, i32, i32)
+DEF_HELPER_4(subr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_4(subadr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_3(mul_ssov, i32, env, i32, i32)
+DEF_HELPER_3(mul_suov, i32, env, i32, i32)
+DEF_HELPER_3(sha_ssov, i32, env, i32, i32)
+DEF_HELPER_3(absdif_ssov, i32, env, i32, i32)
+DEF_HELPER_4(madd32_ssov, i32, env, i32, i32, i32)
+DEF_HELPER_4(madd32_suov, i32, env, i32, i32, i32)
+DEF_HELPER_4(madd64_ssov, i64, env, i32, i64, i32)
+DEF_HELPER_5(madd64_q_ssov, i64, env, i64, i32, i32, i32)
+DEF_HELPER_3(madd32_q_add_ssov, i32, env, i64, i64)
+DEF_HELPER_5(maddr_q_ssov, i32, env, i32, i32, i32, i32)
+DEF_HELPER_4(madd64_suov, i64, env, i32, i64, i32)
+DEF_HELPER_4(msub32_ssov, i32, env, i32, i32, i32)
+DEF_HELPER_4(msub32_suov, i32, env, i32, i32, i32)
+DEF_HELPER_4(msub64_ssov, i64, env, i32, i64, i32)
+DEF_HELPER_5(msub64_q_ssov, i64, env, i64, i32, i32, i32)
+DEF_HELPER_3(msub32_q_sub_ssov, i32, env, i64, i64)
+DEF_HELPER_5(msubr_q_ssov, i32, env, i32, i32, i32, i32)
+DEF_HELPER_4(msub64_suov, i64, env, i32, i64, i32)
+DEF_HELPER_3(absdif_h_ssov, i32, env, i32, i32)
+DEF_HELPER_2(abs_ssov, i32, env, i32)
+DEF_HELPER_2(abs_h_ssov, i32, env, i32)
+/* hword/byte arithmetic */
+DEF_HELPER_2(abs_b, i32, env, i32)
+DEF_HELPER_2(abs_h, i32, env, i32)
+DEF_HELPER_3(absdif_b, i32, env, i32, i32)
+DEF_HELPER_3(absdif_h, i32, env, i32, i32)
+DEF_HELPER_4(addr_h, i32, env, i64, i32, i32)
+DEF_HELPER_4(addsur_h, i32, env, i64, i32, i32)
+DEF_HELPER_5(maddr_q, i32, env, i32, i32, i32, i32)
+DEF_HELPER_3(add_b, i32, env, i32, i32)
+DEF_HELPER_3(add_h, i32, env, i32, i32)
+DEF_HELPER_3(sub_b, i32, env, i32, i32)
+DEF_HELPER_3(sub_h, i32, env, i32, i32)
+DEF_HELPER_4(subr_h, i32, env, i64, i32, i32)
+DEF_HELPER_4(subadr_h, i32, env, i64, i32, i32)
+DEF_HELPER_5(msubr_q, i32, env, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eq_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eq_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eqany_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eqany_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(ixmax, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(ixmax_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(min_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(min_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(min_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(min_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(ixmin, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(ixmin_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+/* count leading ... */
+DEF_HELPER_FLAGS_1(clo_h, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clz_h, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(cls_h, TCG_CALL_NO_RWG_SE, i32, i32)
+/* sh */
+DEF_HELPER_FLAGS_2(sh, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(sh_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_3(sha, i32, env, i32, i32)
+DEF_HELPER_2(sha_h, i32, i32, i32)
+/* merge/split/parity */
+DEF_HELPER_FLAGS_2(bmerge, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_1(bsplit, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_1(parity, TCG_CALL_NO_RWG_SE, i32, i32)
+/* float */
+DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32)
+DEF_HELPER_1(unpack, i64, i32)
+DEF_HELPER_3(fadd, i32, env, i32, i32)
+DEF_HELPER_3(fsub, i32, env, i32, i32)
+DEF_HELPER_3(fmul, i32, env, i32, i32)
+DEF_HELPER_3(fdiv, i32, env, i32, i32)
+DEF_HELPER_4(fmadd, i32, env, i32, i32, i32)
+DEF_HELPER_4(fmsub, i32, env, i32, i32, i32)
+DEF_HELPER_3(fcmp, i32, env, i32, i32)
+DEF_HELPER_2(qseed, i32, env, i32)
+DEF_HELPER_2(ftoi, i32, env, i32)
+DEF_HELPER_2(itof, i32, env, i32)
+DEF_HELPER_2(utof, i32, env, i32)
+DEF_HELPER_2(ftoiz, i32, env, i32)
+DEF_HELPER_2(ftouz, i32, env, i32)
+DEF_HELPER_2(updfl, void, env, i32)
+/* dvinit */
+DEF_HELPER_3(dvinit_b_13, i64, env, i32, i32)
+DEF_HELPER_3(dvinit_b_131, i64, env, i32, i32)
+DEF_HELPER_3(dvinit_h_13, i64, env, i32, i32)
+DEF_HELPER_3(dvinit_h_131, i64, env, i32, i32)
+DEF_HELPER_FLAGS_2(dvadj, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(dvstep, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(dvstep_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_3(divide, i64, env, i32, i32)
+DEF_HELPER_3(divide_u, i64, env, i32, i32)
+/* mulh */
+DEF_HELPER_FLAGS_5(mul_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mulm_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mulr_h, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32, i32)
+/* crc32 */
+DEF_HELPER_FLAGS_2(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+/* CSA */
+DEF_HELPER_2(call, void, env, i32)
+DEF_HELPER_1(ret, void, env)
+DEF_HELPER_2(bisr, void, env, i32)
+DEF_HELPER_1(rfe, void, env)
+DEF_HELPER_1(rfm, void, env)
+DEF_HELPER_2(ldlcx, void, env, i32)
+DEF_HELPER_2(lducx, void, env, i32)
+DEF_HELPER_2(stlcx, void, env, i32)
+DEF_HELPER_2(stucx, void, env, i32)
+DEF_HELPER_1(svlcx, void, env)
+DEF_HELPER_1(svucx, void, env)
+DEF_HELPER_1(rslcx, void, env)
+/* Address mode helper */
+DEF_HELPER_1(br_update, i32, i32)
+DEF_HELPER_2(circ_update, i32, i32, i32)
+/* PSW cache helper */
+DEF_HELPER_2(psw_write, void, env, i32)
+DEF_HELPER_1(psw_read, i32, env)
+/* Exceptions */
+DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32)
diff --git a/target/tricore/meson.build b/target/tricore/meson.build
new file mode 100644
index 000000000..0ccc82951
--- /dev/null
+++ b/target/tricore/meson.build
@@ -0,0 +1,15 @@
+tricore_ss = ss.source_set()
+tricore_ss.add(files(
+ 'cpu.c',
+ 'fpu_helper.c',
+ 'helper.c',
+ 'op_helper.c',
+ 'translate.c',
+ 'gdbstub.c',
+))
+tricore_ss.add(zlib)
+
+tricore_softmmu_ss = ss.source_set()
+
+target_arch += {'tricore': tricore_ss}
+target_softmmu_arch += {'tricore': tricore_softmmu_ss}
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
new file mode 100644
index 000000000..9476d10d0
--- /dev/null
+++ b/target/tricore/op_helper.c
@@ -0,0 +1,2795 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include <zlib.h> /* for crc32 */
+
+
+/* Exception helpers */
+
+static void QEMU_NORETURN
+raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin,
+ uintptr_t pc, uint32_t fcd_pc)
+{
+ CPUState *cs = env_cpu(env);
+ /* in case we come from a helper-call we need to restore the PC */
+ cpu_restore_state(cs, pc, true);
+
+ /* Tin is loaded into d[15] */
+ env->gpr_d[15] = tin;
+
+ if (class == TRAPC_CTX_MNG && tin == TIN3_FCU) {
+ /* upper context cannot be saved, if the context list is empty */
+ } else {
+ helper_svucx(env);
+ }
+
+ /* The return address in a[11] is updated */
+ if (class == TRAPC_CTX_MNG && tin == TIN3_FCD) {
+ env->SYSCON |= MASK_SYSCON_FCD_SF;
+ /* when we run out of CSAs after saving a context a FCD trap is taken
+ and the return address is the start of the trap handler which used
+ the last CSA */
+ env->gpr_a[11] = fcd_pc;
+ } else if (class == TRAPC_SYSCALL) {
+ env->gpr_a[11] = env->PC + 4;
+ } else {
+ env->gpr_a[11] = env->PC;
+ }
+ /* The stack pointer in A[10] is set to the Interrupt Stack Pointer (ISP)
+ when the processor was not previously using the interrupt stack
+ (in case of PSW.IS = 0). The stack pointer bit is set for using the
+ interrupt stack: PSW.IS = 1. */
+ if ((env->PSW & MASK_PSW_IS) == 0) {
+ env->gpr_a[10] = env->ISP;
+ }
+ env->PSW |= MASK_PSW_IS;
+ /* The I/O mode is set to Supervisor mode, which means all permissions
+ are enabled: PSW.IO = 10 B .*/
+ env->PSW |= (2 << 10);
+
+ /*The current Protection Register Set is set to 0: PSW.PRS = 00 B .*/
+ env->PSW &= ~MASK_PSW_PRS;
+
+ /* The Call Depth Counter (CDC) is cleared, and the call depth limit is
+ set for 64: PSW.CDC = 0000000 B .*/
+ env->PSW &= ~MASK_PSW_CDC;
+
+ /* Call Depth Counter is enabled, PSW.CDE = 1. */
+ env->PSW |= MASK_PSW_CDE;
+
+ /* Write permission to global registers A[0], A[1], A[8], A[9] is
+ disabled: PSW.GW = 0. */
+ env->PSW &= ~MASK_PSW_GW;
+
+ /*The interrupt system is globally disabled: ICR.IE = 0. The ‘old’
+ ICR.IE and ICR.CCPN are saved */
+
+ /* PCXI.PIE = ICR.IE */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) +
+ ((env->ICR & MASK_ICR_IE_1_3) << 15));
+ /* PCXI.PCPN = ICR.CCPN */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* Update PC using the trap vector table */
+ env->PC = env->BTV | (class << 5);
+
+ cpu_loop_exit(cs);
+}
+
+void helper_raise_exception_sync(CPUTriCoreState *env, uint32_t class,
+ uint32_t tin)
+{
+ raise_exception_sync_internal(env, class, tin, 0, 0);
+}
+
+static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class,
+ uint32_t tin, uintptr_t pc)
+{
+ raise_exception_sync_internal(env, class, tin, pc, 0);
+}
+
+/* Addressing mode helper */
+
+static uint16_t reverse16(uint16_t val)
+{
+ uint8_t high = (uint8_t)(val >> 8);
+ uint8_t low = (uint8_t)(val & 0xff);
+
+ uint16_t rh, rl;
+
+ rl = (uint16_t)((high * 0x0202020202ULL & 0x010884422010ULL) % 1023);
+ rh = (uint16_t)((low * 0x0202020202ULL & 0x010884422010ULL) % 1023);
+
+ return (rh << 8) | rl;
+}
+
+uint32_t helper_br_update(uint32_t reg)
+{
+ uint32_t index = reg & 0xffff;
+ uint32_t incr = reg >> 16;
+ uint32_t new_index = reverse16(reverse16(index) + reverse16(incr));
+ return reg - index + new_index;
+}
+
+uint32_t helper_circ_update(uint32_t reg, uint32_t off)
+{
+ uint32_t index = reg & 0xffff;
+ uint32_t length = reg >> 16;
+ int32_t new_index = index + off;
+ if (new_index < 0) {
+ new_index += length;
+ } else {
+ new_index %= length;
+ }
+ return reg - index + new_index;
+}
+
+static uint32_t ssov32(CPUTriCoreState *env, int64_t arg)
+{
+ uint32_t ret;
+ int64_t max_pos = INT32_MAX;
+ int64_t max_neg = INT32_MIN;
+ if (arg > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = (target_ulong)max_pos;
+ } else {
+ if (arg < max_neg) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = (target_ulong)max_neg;
+ } else {
+ env->PSW_USB_V = 0;
+ ret = (target_ulong)arg;
+ }
+ }
+ env->PSW_USB_AV = arg ^ arg * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg)
+{
+ uint32_t ret;
+ uint64_t max_pos = UINT32_MAX;
+ if (arg > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = (target_ulong)max_pos;
+ } else {
+ env->PSW_USB_V = 0;
+ ret = (target_ulong)arg;
+ }
+ env->PSW_USB_AV = arg ^ arg * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg)
+{
+ uint32_t ret;
+
+ if (arg < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = 0;
+ } else {
+ env->PSW_USB_V = 0;
+ ret = (target_ulong)arg;
+ }
+ env->PSW_USB_AV = arg ^ arg * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+static uint32_t ssov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1)
+{
+ int32_t max_pos = INT16_MAX;
+ int32_t max_neg = INT16_MIN;
+ int32_t av0, av1;
+
+ env->PSW_USB_V = 0;
+ av0 = hw0 ^ hw0 * 2u;
+ if (hw0 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = max_pos;
+ } else if (hw0 < max_neg) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = max_neg;
+ }
+
+ av1 = hw1 ^ hw1 * 2u;
+ if (hw1 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = max_pos;
+ } else if (hw1 < max_neg) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = max_neg;
+ }
+
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = (av0 | av1) << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return (hw0 & 0xffff) | (hw1 << 16);
+}
+
+static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1)
+{
+ int32_t max_pos = UINT16_MAX;
+ int32_t av0, av1;
+
+ env->PSW_USB_V = 0;
+ av0 = hw0 ^ hw0 * 2u;
+ if (hw0 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = max_pos;
+ } else if (hw0 < 0) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = 0;
+ }
+
+ av1 = hw1 ^ hw1 * 2u;
+ if (hw1 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = max_pos;
+ } else if (hw1 < 0) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = 0;
+ }
+
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = (av0 | av1) << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return (hw0 & 0xffff) | (hw1 << 16);
+}
+
+target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result = t1 + t2;
+ return ssov32(env, result);
+}
+
+uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ uint64_t result;
+ int64_t ovf;
+
+ result = r1 + r2;
+ ovf = (result ^ r1) & ~(r1 ^ r2);
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if ((int64_t)r1 >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return result;
+}
+
+target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = sextract32(r1, 0, 16) + sextract32(r2, 0, 16);
+ ret_hw1 = sextract32(r1, 16, 16) + sextract32(r2, 16, 16);
+ return ssov16(env, ret_hw0, ret_hw1);
+}
+
+uint32_t helper_addr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+
+target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = extract64(r1, 0, 32);
+ int64_t t2 = extract64(r2, 0, 32);
+ int64_t result = t1 + t2;
+ return suov32_pos(env, result);
+}
+
+target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = extract32(r1, 0, 16) + extract32(r2, 0, 16);
+ ret_hw1 = extract32(r1, 16, 16) + extract32(r2, 16, 16);
+ return suov16(env, ret_hw0, ret_hw1);
+}
+
+target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result = t1 - t2;
+ return ssov32(env, result);
+}
+
+uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ uint64_t result;
+ int64_t ovf;
+
+ result = r1 - r2;
+ ovf = (result ^ r1) & (r1 ^ r2);
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if ((int64_t)r1 >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return result;
+}
+
+target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = sextract32(r1, 0, 16) - sextract32(r2, 0, 16);
+ ret_hw1 = sextract32(r1, 16, 16) - sextract32(r2, 16, 16);
+ return ssov16(env, ret_hw0, ret_hw1);
+}
+
+uint32_t helper_subr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = extract64(r1, 0, 32);
+ int64_t t2 = extract64(r2, 0, 32);
+ int64_t result = t1 - t2;
+ return suov32_neg(env, result);
+}
+
+target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = extract32(r1, 0, 16) - extract32(r2, 0, 16);
+ ret_hw1 = extract32(r1, 16, 16) - extract32(r2, 16, 16);
+ return suov16(env, ret_hw0, ret_hw1);
+}
+
+target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result = t1 * t2;
+ return ssov32(env, result);
+}
+
+target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = extract64(r1, 0, 32);
+ int64_t t2 = extract64(r2, 0, 32);
+ int64_t result = t1 * t2;
+
+ return suov32_pos(env, result);
+}
+
+target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int32_t t2 = sextract64(r2, 0, 6);
+ int64_t result;
+ if (t2 == 0) {
+ result = t1;
+ } else if (t2 > 0) {
+ result = t1 << t2;
+ } else {
+ result = t1 >> -t2;
+ }
+ return ssov32(env, result);
+}
+
+uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1)
+{
+ target_ulong result;
+ result = ((int32_t)r1 >= 0) ? r1 : (0 - r1);
+ return ssov32(env, result);
+}
+
+uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1)
+{
+ int32_t ret_h0, ret_h1;
+
+ ret_h0 = sextract32(r1, 0, 16);
+ ret_h0 = (ret_h0 >= 0) ? ret_h0 : (0 - ret_h0);
+
+ ret_h1 = sextract32(r1, 16, 16);
+ ret_h1 = (ret_h1 >= 0) ? ret_h1 : (0 - ret_h1);
+
+ return ssov16(env, ret_h0, ret_h1);
+}
+
+target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result;
+
+ if (t1 > t2) {
+ result = t1 - t2;
+ } else {
+ result = t2 - t1;
+ }
+ return ssov32(env, result);
+}
+
+uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t t1, t2;
+ int32_t ret_h0, ret_h1;
+
+ t1 = sextract32(r1, 0, 16);
+ t2 = sextract32(r2, 0, 16);
+ if (t1 > t2) {
+ ret_h0 = t1 - t2;
+ } else {
+ ret_h0 = t2 - t1;
+ }
+
+ t1 = sextract32(r1, 16, 16);
+ t2 = sextract32(r2, 16, 16);
+ if (t1 > t2) {
+ ret_h1 = t1 - t2;
+ } else {
+ ret_h1 = t2 - t1;
+ }
+
+ return ssov16(env, ret_h0, ret_h1);
+}
+
+target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result;
+
+ result = t2 + (t1 * t3);
+ return ssov32(env, result);
+}
+
+target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t2 = extract64(r2, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+ int64_t result;
+
+ result = t2 + (t1 * t3);
+ return suov32_pos(env, result);
+}
+
+uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, ovf;
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul;
+
+ mul = t1 * t3;
+ ret = mul + r2;
+ ovf = (ret ^ mul) & ~(mul ^ r2);
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if ((int64_t)ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul >= 0) {
+ ret = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ ret = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+
+ return ret;
+}
+
+uint32_t
+helper_madd32_q_add_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ int64_t result;
+
+ result = (r1 + r2);
+
+ env->PSW_USB_AV = (result ^ result * 2u);
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if (r2 == 0x8000000000000000LL) {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+ return (uint32_t)result;
+}
+
+uint64_t helper_madd64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = (int64_t)r1;
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result, mul;
+ int64_t ovf;
+
+ mul = (t2 * t3) << n;
+ result = mul + t1;
+
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ ovf = (result ^ mul) & ~(mul ^ t1);
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if ((r2 == 0x80000000) && (r3 == 0x80000000) && (n == 1)) {
+ if (ovf >= 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul < 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+ return (uint64_t)result;
+}
+
+uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 + mul + 0x8000;
+
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret > 0x7fffffffll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MAX;
+ } else if (ret < -0x80000000ll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret & 0xffff0000ll;
+}
+
+uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, mul;
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+
+ mul = t1 * t3;
+ ret = mul + r2;
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret < r2) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* saturate */
+ ret = UINT64_MAX;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret;
+}
+
+target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result;
+
+ result = t2 - (t1 * t3);
+ return ssov32(env, result);
+}
+
+target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t2 = extract64(r2, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+ uint64_t result;
+ uint64_t mul;
+
+ mul = (t1 * t3);
+ result = t2 - mul;
+
+ env->PSW_USB_AV = result ^ result * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ /* we calculate ovf by hand here, because the multiplication can overflow on
+ the host, which would give false results if we compare to less than
+ zero */
+ if (mul > t2) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = 0;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return result;
+}
+
+uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, ovf;
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul;
+
+ mul = t1 * t3;
+ ret = r2 - mul;
+ ovf = (ret ^ r2) & (mul ^ r2);
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if ((int64_t)ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul < 0) {
+ ret = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ ret = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret;
+}
+
+uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, mul;
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+
+ mul = t1 * t3;
+ ret = r2 - mul;
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret > r2) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* saturate */
+ ret = 0;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret;
+}
+
+uint32_t
+helper_msub32_q_sub_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ int64_t result;
+ int64_t t1 = (int64_t)r1;
+ int64_t t2 = (int64_t)r2;
+
+ result = t1 - t2;
+
+ env->PSW_USB_AV = (result ^ result * 2u);
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if (r2 == 0x8000000000000000LL) {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+ return (uint32_t)result;
+}
+
+uint64_t helper_msub64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = (int64_t)r1;
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result, mul;
+ int64_t ovf;
+
+ mul = (t2 * t3) << n;
+ result = t1 - mul;
+
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ ovf = (result ^ t1) & (t1 ^ mul);
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if (mul == 0x8000000000000000LL) {
+ if (ovf >= 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul < 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+
+ return (uint64_t)result;
+}
+
+uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 - mul + 0x8000;
+
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret > 0x7fffffffll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MAX;
+ } else if (ret < -0x80000000ll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret & 0xffff0000ll;
+}
+
+uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
+{
+ int32_t b, i;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ b = sextract32(arg, i * 8, 8);
+ b = (b >= 0) ? b : (0 - b);
+ ovf |= (b > 0x7F) || (b < -0x80);
+ avf |= b ^ b * 2u;
+ ret |= (b & 0xff) << (i * 8);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg)
+{
+ int32_t h, i;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ h = sextract32(arg, i * 16, 16);
+ h = (h >= 0) ? h : (0 - h);
+ ovf |= (h > 0x7FFF) || (h < -0x8000);
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t b, i;
+ int32_t extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ extr_r2 = sextract32(r2, i * 8, 8);
+ b = sextract32(r1, i * 8, 8);
+ b = (b > extr_r2) ? (b - extr_r2) : (extr_r2 - b);
+ ovf |= (b > 0x7F) || (b < -0x80);
+ avf |= b ^ b * 2u;
+ ret |= (b & 0xff) << (i * 8);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t h, i;
+ int32_t extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ extr_r2 = sextract32(r2, i * 16, 16);
+ h = sextract32(r1, i * 16, 16);
+ h = (h > extr_r2) ? (h - extr_r2) : (extr_r2 - h);
+ ovf |= (h > 0x7FFF) || (h < -0x8000);
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_addr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_addsur_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 + mul + 0x8000;
+
+ if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret & 0xffff0000ll;
+}
+
+uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t b, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ extr_r1 = sextract32(r1, i * 8, 8);
+ extr_r2 = sextract32(r2, i * 8, 8);
+
+ b = extr_r1 + extr_r2;
+ ovf |= ((b > 0x7f) || (b < -0x80));
+ avf |= b ^ b * 2u;
+ ret |= ((b & 0xff) << (i*8));
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t h, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ extr_r1 = sextract32(r1, i * 16, 16);
+ extr_r2 = sextract32(r2, i * 16, 16);
+ h = extr_r1 + extr_r2;
+ ovf |= ((h > 0x7fff) || (h < -0x8000));
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = (avf << 16);
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_subr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_subadr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 - mul + 0x8000;
+
+ if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret & 0xffff0000ll;
+}
+
+uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t b, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ extr_r1 = sextract32(r1, i * 8, 8);
+ extr_r2 = sextract32(r2, i * 8, 8);
+
+ b = extr_r1 - extr_r2;
+ ovf |= ((b > 0x7f) || (b < -0x80));
+ avf |= b ^ b * 2u;
+ ret |= ((b & 0xff) << (i*8));
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t h, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ extr_r1 = sextract32(r1, i * 16, 16);
+ extr_r2 = sextract32(r2, i * 16, 16);
+ h = extr_r1 - extr_r2;
+ ovf |= ((h > 0x7fff) || (h < -0x8000));
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
+{
+ int32_t ret;
+ int32_t i, msk;
+
+ ret = 0;
+ msk = 0xff;
+ for (i = 0; i < 4; i++) {
+ if ((r1 & msk) == (r2 & msk)) {
+ ret |= msk;
+ }
+ msk = msk << 8;
+ }
+
+ return ret;
+}
+
+uint32_t helper_eq_h(target_ulong r1, target_ulong r2)
+{
+ int32_t ret = 0;
+
+ if ((r1 & 0xffff) == (r2 & 0xffff)) {
+ ret = 0xffff;
+ }
+
+ if ((r1 & 0xffff0000) == (r2 & 0xffff0000)) {
+ ret |= 0xffff0000;
+ }
+
+ return ret;
+}
+
+uint32_t helper_eqany_b(target_ulong r1, target_ulong r2)
+{
+ int32_t i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ ret |= (sextract32(r1, i * 8, 8) == sextract32(r2, i * 8, 8));
+ }
+
+ return ret;
+}
+
+uint32_t helper_eqany_h(target_ulong r1, target_ulong r2)
+{
+ uint32_t ret;
+
+ ret = (sextract32(r1, 0, 16) == sextract32(r2, 0, 16));
+ ret |= (sextract32(r1, 16, 16) == sextract32(r2, 16, 16));
+
+ return ret;
+}
+
+uint32_t helper_lt_b(target_ulong r1, target_ulong r2)
+{
+ int32_t i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (sextract32(r1, i * 8, 8) < sextract32(r2, i * 8, 8)) {
+ ret |= (0xff << (i * 8));
+ }
+ }
+
+ return ret;
+}
+
+uint32_t helper_lt_bu(target_ulong r1, target_ulong r2)
+{
+ int32_t i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (extract32(r1, i * 8, 8) < extract32(r2, i * 8, 8)) {
+ ret |= (0xff << (i * 8));
+ }
+ }
+
+ return ret;
+}
+
+uint32_t helper_lt_h(target_ulong r1, target_ulong r2)
+{
+ uint32_t ret = 0;
+
+ if (sextract32(r1, 0, 16) < sextract32(r2, 0, 16)) {
+ ret |= 0xffff;
+ }
+
+ if (sextract32(r1, 16, 16) < sextract32(r2, 16, 16)) {
+ ret |= 0xffff0000;
+ }
+
+ return ret;
+}
+
+uint32_t helper_lt_hu(target_ulong r1, target_ulong r2)
+{
+ uint32_t ret = 0;
+
+ if (extract32(r1, 0, 16) < extract32(r2, 0, 16)) {
+ ret |= 0xffff;
+ }
+
+ if (extract32(r1, 16, 16) < extract32(r2, 16, 16)) {
+ ret |= 0xffff0000;
+ }
+
+ return ret;
+}
+
+#define EXTREMA_H_B(name, op) \
+uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \
+{ \
+ int32_t i, extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ for (i = 0; i < 4; i++) { \
+ extr_r1 = sextract32(r1, i * 8, 8); \
+ extr_r2 = sextract32(r2, i * 8, 8); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= (extr_r1 & 0xff) << (i * 8); \
+ } \
+ return ret; \
+} \
+ \
+uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\
+{ \
+ int32_t i; \
+ uint32_t extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ for (i = 0; i < 4; i++) { \
+ extr_r1 = extract32(r1, i * 8, 8); \
+ extr_r2 = extract32(r2, i * 8, 8); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= (extr_r1 & 0xff) << (i * 8); \
+ } \
+ return ret; \
+} \
+ \
+uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \
+{ \
+ int32_t extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ extr_r1 = sextract32(r1, 0, 16); \
+ extr_r2 = sextract32(r2, 0, 16); \
+ ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret = ret & 0xffff; \
+ \
+ extr_r1 = sextract32(r1, 16, 16); \
+ extr_r2 = sextract32(r2, 16, 16); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= extr_r1 << 16; \
+ \
+ return ret; \
+} \
+ \
+uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\
+{ \
+ uint32_t extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ extr_r1 = extract32(r1, 0, 16); \
+ extr_r2 = extract32(r2, 0, 16); \
+ ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret = ret & 0xffff; \
+ \
+ extr_r1 = extract32(r1, 16, 16); \
+ extr_r2 = extract32(r2, 16, 16); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= extr_r1 << (16); \
+ \
+ return ret; \
+} \
+ \
+uint64_t helper_ix##name(uint64_t r1, uint32_t r2) \
+{ \
+ int64_t r2l, r2h, r1hl; \
+ uint64_t ret = 0; \
+ \
+ ret = ((r1 + 2) & 0xffff); \
+ r2l = sextract64(r2, 0, 16); \
+ r2h = sextract64(r2, 16, 16); \
+ r1hl = sextract64(r1, 32, 16); \
+ \
+ if ((r2l op ## = r2h) && (r2l op r1hl)) { \
+ ret |= (r2l & 0xffff) << 32; \
+ ret |= extract64(r1, 0, 16) << 16; \
+ } else if ((r2h op r2l) && (r2h op r1hl)) { \
+ ret |= extract64(r2, 16, 16) << 32; \
+ ret |= extract64(r1 + 1, 0, 16) << 16; \
+ } else { \
+ ret |= r1 & 0xffffffff0000ull; \
+ } \
+ return ret; \
+} \
+ \
+uint64_t helper_ix##name ##_u(uint64_t r1, uint32_t r2) \
+{ \
+ int64_t r2l, r2h, r1hl; \
+ uint64_t ret = 0; \
+ \
+ ret = ((r1 + 2) & 0xffff); \
+ r2l = extract64(r2, 0, 16); \
+ r2h = extract64(r2, 16, 16); \
+ r1hl = extract64(r1, 32, 16); \
+ \
+ if ((r2l op ## = r2h) && (r2l op r1hl)) { \
+ ret |= (r2l & 0xffff) << 32; \
+ ret |= extract64(r1, 0, 16) << 16; \
+ } else if ((r2h op r2l) && (r2h op r1hl)) { \
+ ret |= extract64(r2, 16, 16) << 32; \
+ ret |= extract64(r1 + 1, 0, 16) << 16; \
+ } else { \
+ ret |= r1 & 0xffffffff0000ull; \
+ } \
+ return ret; \
+}
+
+EXTREMA_H_B(max, >)
+EXTREMA_H_B(min, <)
+
+#undef EXTREMA_H_B
+
+uint32_t helper_clo_h(target_ulong r1)
+{
+ uint32_t ret_hw0 = extract32(r1, 0, 16);
+ uint32_t ret_hw1 = extract32(r1, 16, 16);
+
+ ret_hw0 = clo32(ret_hw0 << 16);
+ ret_hw1 = clo32(ret_hw1 << 16);
+
+ if (ret_hw0 > 16) {
+ ret_hw0 = 16;
+ }
+ if (ret_hw1 > 16) {
+ ret_hw1 = 16;
+ }
+
+ return ret_hw0 | (ret_hw1 << 16);
+}
+
+uint32_t helper_clz_h(target_ulong r1)
+{
+ uint32_t ret_hw0 = extract32(r1, 0, 16);
+ uint32_t ret_hw1 = extract32(r1, 16, 16);
+
+ ret_hw0 = clz32(ret_hw0 << 16);
+ ret_hw1 = clz32(ret_hw1 << 16);
+
+ if (ret_hw0 > 16) {
+ ret_hw0 = 16;
+ }
+ if (ret_hw1 > 16) {
+ ret_hw1 = 16;
+ }
+
+ return ret_hw0 | (ret_hw1 << 16);
+}
+
+uint32_t helper_cls_h(target_ulong r1)
+{
+ uint32_t ret_hw0 = extract32(r1, 0, 16);
+ uint32_t ret_hw1 = extract32(r1, 16, 16);
+
+ ret_hw0 = clrsb32(ret_hw0 << 16);
+ ret_hw1 = clrsb32(ret_hw1 << 16);
+
+ if (ret_hw0 > 15) {
+ ret_hw0 = 15;
+ }
+ if (ret_hw1 > 15) {
+ ret_hw1 = 15;
+ }
+
+ return ret_hw0 | (ret_hw1 << 16);
+}
+
+uint32_t helper_sh(target_ulong r1, target_ulong r2)
+{
+ int32_t shift_count = sextract32(r2, 0, 6);
+
+ if (shift_count == -32) {
+ return 0;
+ } else if (shift_count < 0) {
+ return r1 >> -shift_count;
+ } else {
+ return r1 << shift_count;
+ }
+}
+
+uint32_t helper_sh_h(target_ulong r1, target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+ int32_t shift_count;
+
+ shift_count = sextract32(r2, 0, 5);
+
+ if (shift_count == -16) {
+ return 0;
+ } else if (shift_count < 0) {
+ ret_hw0 = extract32(r1, 0, 16) >> -shift_count;
+ ret_hw1 = extract32(r1, 16, 16) >> -shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ } else {
+ ret_hw0 = extract32(r1, 0, 16) << shift_count;
+ ret_hw1 = extract32(r1, 16, 16) << shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ }
+}
+
+uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t shift_count;
+ int64_t result, t1;
+ uint32_t ret;
+
+ shift_count = sextract32(r2, 0, 6);
+ t1 = sextract32(r1, 0, 32);
+
+ if (shift_count == 0) {
+ env->PSW_USB_C = env->PSW_USB_V = 0;
+ ret = r1;
+ } else if (shift_count == -32) {
+ env->PSW_USB_C = r1;
+ env->PSW_USB_V = 0;
+ ret = t1 >> 31;
+ } else if (shift_count > 0) {
+ result = t1 << shift_count;
+ /* calc carry */
+ env->PSW_USB_C = ((result & 0xffffffff00000000ULL) != 0);
+ /* calc v */
+ env->PSW_USB_V = (((result > 0x7fffffffLL) ||
+ (result < -0x80000000LL)) << 31);
+ /* calc sv */
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = (uint32_t)result;
+ } else {
+ env->PSW_USB_V = 0;
+ env->PSW_USB_C = (r1 & ((1 << -shift_count) - 1));
+ ret = t1 >> -shift_count;
+ }
+
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_sha_h(target_ulong r1, target_ulong r2)
+{
+ int32_t shift_count;
+ int32_t ret_hw0, ret_hw1;
+
+ shift_count = sextract32(r2, 0, 5);
+
+ if (shift_count == 0) {
+ return r1;
+ } else if (shift_count < 0) {
+ ret_hw0 = sextract32(r1, 0, 16) >> -shift_count;
+ ret_hw1 = sextract32(r1, 16, 16) >> -shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ } else {
+ ret_hw0 = sextract32(r1, 0, 16) << shift_count;
+ ret_hw1 = sextract32(r1, 16, 16) << shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ }
+}
+
+uint32_t helper_bmerge(target_ulong r1, target_ulong r2)
+{
+ uint32_t i, ret;
+
+ ret = 0;
+ for (i = 0; i < 16; i++) {
+ ret |= (r1 & 1) << (2 * i + 1);
+ ret |= (r2 & 1) << (2 * i);
+ r1 = r1 >> 1;
+ r2 = r2 >> 1;
+ }
+ return ret;
+}
+
+uint64_t helper_bsplit(uint32_t r1)
+{
+ int32_t i;
+ uint64_t ret;
+
+ ret = 0;
+ for (i = 0; i < 32; i = i + 2) {
+ /* even */
+ ret |= (r1 & 1) << (i/2);
+ r1 = r1 >> 1;
+ /* odd */
+ ret |= (uint64_t)(r1 & 1) << (i/2 + 32);
+ r1 = r1 >> 1;
+ }
+ return ret;
+}
+
+uint32_t helper_parity(target_ulong r1)
+{
+ uint32_t ret;
+ uint32_t nOnes, i;
+
+ ret = 0;
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ ret ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ /* second byte */
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ nOnes ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ ret |= nOnes << 8;
+ /* third byte */
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ nOnes ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ ret |= nOnes << 16;
+ /* fourth byte */
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ nOnes ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ ret |= nOnes << 24;
+
+ return ret;
+}
+
+uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high,
+ target_ulong r2)
+{
+ uint32_t ret;
+ int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac;
+ int32_t int_exp = r1_high;
+ int32_t int_mant = r1_low;
+ uint32_t flag_rnd = (int_mant & (1 << 7)) && (
+ (int_mant & (1 << 8)) ||
+ (int_mant & 0x7f) ||
+ (carry != 0));
+ if (((int_mant & (1<<31)) == 0) && (int_exp == 255)) {
+ fp_exp = 255;
+ fp_frac = extract32(int_mant, 8, 23);
+ } else if ((int_mant & (1<<31)) && (int_exp >= 127)) {
+ fp_exp = 255;
+ fp_frac = 0;
+ } else if ((int_mant & (1<<31)) && (int_exp <= -128)) {
+ fp_exp = 0;
+ fp_frac = 0;
+ } else if (int_mant == 0) {
+ fp_exp = 0;
+ fp_frac = 0;
+ } else {
+ if (((int_mant & (1 << 31)) == 0)) {
+ temp_exp = 0;
+ } else {
+ temp_exp = int_exp + 128;
+ }
+ fp_exp_frac = (((temp_exp & 0xff) << 23) |
+ extract32(int_mant, 8, 23))
+ + flag_rnd;
+ fp_exp = extract32(fp_exp_frac, 23, 8);
+ fp_frac = extract32(fp_exp_frac, 0, 23);
+ }
+ ret = r2 & (1 << 31);
+ ret = ret + (fp_exp << 23);
+ ret = ret + (fp_frac & 0x7fffff);
+
+ return ret;
+}
+
+uint64_t helper_unpack(target_ulong arg1)
+{
+ int32_t fp_exp = extract32(arg1, 23, 8);
+ int32_t fp_frac = extract32(arg1, 0, 23);
+ uint64_t ret;
+ int32_t int_exp, int_mant;
+
+ if (fp_exp == 255) {
+ int_exp = 255;
+ int_mant = (fp_frac << 7);
+ } else if ((fp_exp == 0) && (fp_frac == 0)) {
+ int_exp = -127;
+ int_mant = 0;
+ } else if ((fp_exp == 0) && (fp_frac != 0)) {
+ int_exp = -126;
+ int_mant = (fp_frac << 7);
+ } else {
+ int_exp = fp_exp - 127;
+ int_mant = (fp_frac << 7);
+ int_mant |= (1 << 30);
+ }
+ ret = int_exp;
+ ret = ret << 32;
+ ret |= int_mant;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_b_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret;
+ int32_t abs_sig_dividend, abs_divisor;
+
+ ret = sextract32(r1, 0, 32);
+ ret = ret << 24;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffffff;
+ }
+
+ abs_sig_dividend = abs((int32_t)r1) >> 8;
+ abs_divisor = abs((int32_t)r2);
+ /* calc overflow
+ ofv if (a/b >= 255) <=> (a/255 >= b) */
+ env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31;
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_b_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret = sextract32(r1, 0, 32);
+
+ ret = ret << 24;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffffff;
+ }
+ /* calc overflow */
+ env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffffff80)));
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_h_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret;
+ int32_t abs_sig_dividend, abs_divisor;
+
+ ret = sextract32(r1, 0, 32);
+ ret = ret << 16;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffff;
+ }
+
+ abs_sig_dividend = abs((int32_t)r1) >> 16;
+ abs_divisor = abs((int32_t)r2);
+ /* calc overflow
+ ofv if (a/b >= 0xffff) <=> (a/0xffff >= b) */
+ env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31;
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_h_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret = sextract32(r1, 0, 32);
+
+ ret = ret << 16;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffff;
+ }
+ /* calc overflow */
+ env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffff8000)));
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvadj(uint64_t r1, uint32_t r2)
+{
+ int32_t x_sign = (r1 >> 63);
+ int32_t q_sign = x_sign ^ (r2 >> 31);
+ int32_t eq_pos = x_sign & ((r1 >> 32) == r2);
+ int32_t eq_neg = x_sign & ((r1 >> 32) == -r2);
+ uint32_t quotient;
+ uint64_t remainder;
+
+ if ((q_sign & ~eq_neg) | eq_pos) {
+ quotient = (r1 + 1) & 0xffffffff;
+ } else {
+ quotient = r1 & 0xffffffff;
+ }
+
+ if (eq_pos | eq_neg) {
+ remainder = 0;
+ } else {
+ remainder = (r1 & 0xffffffff00000000ull);
+ }
+ return remainder | quotient;
+}
+
+uint64_t helper_dvstep(uint64_t r1, uint32_t r2)
+{
+ int32_t dividend_sign = extract64(r1, 63, 1);
+ int32_t divisor_sign = extract32(r2, 31, 1);
+ int32_t quotient_sign = (dividend_sign != divisor_sign);
+ int32_t addend, dividend_quotient, remainder;
+ int32_t i, temp;
+
+ if (quotient_sign) {
+ addend = r2;
+ } else {
+ addend = -r2;
+ }
+ dividend_quotient = (int32_t)r1;
+ remainder = (int32_t)(r1 >> 32);
+
+ for (i = 0; i < 8; i++) {
+ remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1);
+ dividend_quotient <<= 1;
+ temp = remainder + addend;
+ if ((temp < 0) == dividend_sign) {
+ remainder = temp;
+ }
+ if (((temp < 0) == dividend_sign)) {
+ dividend_quotient = dividend_quotient | !quotient_sign;
+ } else {
+ dividend_quotient = dividend_quotient | quotient_sign;
+ }
+ }
+ return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient;
+}
+
+uint64_t helper_dvstep_u(uint64_t r1, uint32_t r2)
+{
+ int32_t dividend_quotient = extract64(r1, 0, 32);
+ int64_t remainder = extract64(r1, 32, 32);
+ int32_t i;
+ int64_t temp;
+ for (i = 0; i < 8; i++) {
+ remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1);
+ dividend_quotient <<= 1;
+ temp = (remainder & 0xffffffff) - r2;
+ if (temp >= 0) {
+ remainder = temp;
+ }
+ dividend_quotient = dividend_quotient | !(temp < 0);
+ }
+ return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient;
+}
+
+uint64_t helper_divide(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ int32_t quotient, remainder;
+ int32_t dividend = (int32_t)r1;
+ int32_t divisor = (int32_t)r2;
+
+ if (divisor == 0) {
+ if (dividend >= 0) {
+ quotient = 0x7fffffff;
+ remainder = 0;
+ } else {
+ quotient = 0x80000000;
+ remainder = 0;
+ }
+ env->PSW_USB_V = (1 << 31);
+ } else if ((divisor == 0xffffffff) && (dividend == 0x80000000)) {
+ quotient = 0x7fffffff;
+ remainder = 0;
+ env->PSW_USB_V = (1 << 31);
+ } else {
+ remainder = dividend % divisor;
+ quotient = (dividend - remainder)/divisor;
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+ return ((uint64_t)remainder << 32) | (uint32_t)quotient;
+}
+
+uint64_t helper_divide_u(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t quotient, remainder;
+ uint32_t dividend = r1;
+ uint32_t divisor = r2;
+
+ if (divisor == 0) {
+ quotient = 0xffffffff;
+ remainder = 0;
+ env->PSW_USB_V = (1 << 31);
+ } else {
+ remainder = dividend % divisor;
+ quotient = (dividend - remainder)/divisor;
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+ return ((uint64_t)remainder << 32) | quotient;
+}
+
+uint64_t helper_mul_h(uint32_t arg00, uint32_t arg01,
+ uint32_t arg10, uint32_t arg11, uint32_t n)
+{
+ uint32_t result0, result1;
+
+ int32_t sc1 = ((arg00 & 0xffff) == 0x8000) &&
+ ((arg10 & 0xffff) == 0x8000) && (n == 1);
+ int32_t sc0 = ((arg01 & 0xffff) == 0x8000) &&
+ ((arg11 & 0xffff) == 0x8000) && (n == 1);
+ if (sc1) {
+ result1 = 0x7fffffff;
+ } else {
+ result1 = (((uint32_t)(arg00 * arg10)) << n);
+ }
+ if (sc0) {
+ result0 = 0x7fffffff;
+ } else {
+ result0 = (((uint32_t)(arg01 * arg11)) << n);
+ }
+ return (((uint64_t)result1 << 32)) | result0;
+}
+
+uint64_t helper_mulm_h(uint32_t arg00, uint32_t arg01,
+ uint32_t arg10, uint32_t arg11, uint32_t n)
+{
+ uint64_t ret;
+ int64_t result0, result1;
+
+ int32_t sc1 = ((arg00 & 0xffff) == 0x8000) &&
+ ((arg10 & 0xffff) == 0x8000) && (n == 1);
+ int32_t sc0 = ((arg01 & 0xffff) == 0x8000) &&
+ ((arg11 & 0xffff) == 0x8000) && (n == 1);
+
+ if (sc1) {
+ result1 = 0x7fffffff;
+ } else {
+ result1 = (((int32_t)arg00 * (int32_t)arg10) << n);
+ }
+ if (sc0) {
+ result0 = 0x7fffffff;
+ } else {
+ result0 = (((int32_t)arg01 * (int32_t)arg11) << n);
+ }
+ ret = (result1 + result0);
+ ret = ret << 16;
+ return ret;
+}
+uint32_t helper_mulr_h(uint32_t arg00, uint32_t arg01,
+ uint32_t arg10, uint32_t arg11, uint32_t n)
+{
+ uint32_t result0, result1;
+
+ int32_t sc1 = ((arg00 & 0xffff) == 0x8000) &&
+ ((arg10 & 0xffff) == 0x8000) && (n == 1);
+ int32_t sc0 = ((arg01 & 0xffff) == 0x8000) &&
+ ((arg11 & 0xffff) == 0x8000) && (n == 1);
+
+ if (sc1) {
+ result1 = 0x7fffffff;
+ } else {
+ result1 = ((arg00 * arg10) << n) + 0x8000;
+ }
+ if (sc0) {
+ result0 = 0x7fffffff;
+ } else {
+ result0 = ((arg01 * arg11) << n) + 0x8000;
+ }
+ return (result1 & 0xffff0000) | (result0 >> 16);
+}
+
+uint32_t helper_crc32(uint32_t arg0, uint32_t arg1)
+{
+ uint8_t buf[4];
+ stl_be_p(buf, arg0);
+
+ return crc32(arg1, buf, 4);
+}
+
+/* context save area (CSA) related helpers */
+
+static int cdc_increment(target_ulong *psw)
+{
+ if ((*psw & MASK_PSW_CDC) == 0x7f) {
+ return 0;
+ }
+
+ (*psw)++;
+ /* check for overflow */
+ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7));
+ int mask = (1u << (7 - lo)) - 1;
+ int count = *psw & mask;
+ if (count == 0) {
+ (*psw)--;
+ return 1;
+ }
+ return 0;
+}
+
+static int cdc_decrement(target_ulong *psw)
+{
+ if ((*psw & MASK_PSW_CDC) == 0x7f) {
+ return 0;
+ }
+ /* check for underflow */
+ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7));
+ int mask = (1u << (7 - lo)) - 1;
+ int count = *psw & mask;
+ if (count == 0) {
+ return 1;
+ }
+ (*psw)--;
+ return 0;
+}
+
+static bool cdc_zero(target_ulong *psw)
+{
+ int cdc = *psw & MASK_PSW_CDC;
+ /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC ==
+ 7'b1111111, otherwise returns FALSE. */
+ if (cdc == 0x7f) {
+ return true;
+ }
+ /* find CDC.COUNT */
+ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7));
+ int mask = (1u << (7 - lo)) - 1;
+ int count = *psw & mask;
+ return count == 0;
+}
+
+static void save_context_upper(CPUTriCoreState *env, int ea)
+{
+ cpu_stl_data(env, ea, env->PCXI);
+ cpu_stl_data(env, ea+4, psw_read(env));
+ cpu_stl_data(env, ea+8, env->gpr_a[10]);
+ cpu_stl_data(env, ea+12, env->gpr_a[11]);
+ cpu_stl_data(env, ea+16, env->gpr_d[8]);
+ cpu_stl_data(env, ea+20, env->gpr_d[9]);
+ cpu_stl_data(env, ea+24, env->gpr_d[10]);
+ cpu_stl_data(env, ea+28, env->gpr_d[11]);
+ cpu_stl_data(env, ea+32, env->gpr_a[12]);
+ cpu_stl_data(env, ea+36, env->gpr_a[13]);
+ cpu_stl_data(env, ea+40, env->gpr_a[14]);
+ cpu_stl_data(env, ea+44, env->gpr_a[15]);
+ cpu_stl_data(env, ea+48, env->gpr_d[12]);
+ cpu_stl_data(env, ea+52, env->gpr_d[13]);
+ cpu_stl_data(env, ea+56, env->gpr_d[14]);
+ cpu_stl_data(env, ea+60, env->gpr_d[15]);
+}
+
+static void save_context_lower(CPUTriCoreState *env, int ea)
+{
+ cpu_stl_data(env, ea, env->PCXI);
+ cpu_stl_data(env, ea+4, env->gpr_a[11]);
+ cpu_stl_data(env, ea+8, env->gpr_a[2]);
+ cpu_stl_data(env, ea+12, env->gpr_a[3]);
+ cpu_stl_data(env, ea+16, env->gpr_d[0]);
+ cpu_stl_data(env, ea+20, env->gpr_d[1]);
+ cpu_stl_data(env, ea+24, env->gpr_d[2]);
+ cpu_stl_data(env, ea+28, env->gpr_d[3]);
+ cpu_stl_data(env, ea+32, env->gpr_a[4]);
+ cpu_stl_data(env, ea+36, env->gpr_a[5]);
+ cpu_stl_data(env, ea+40, env->gpr_a[6]);
+ cpu_stl_data(env, ea+44, env->gpr_a[7]);
+ cpu_stl_data(env, ea+48, env->gpr_d[4]);
+ cpu_stl_data(env, ea+52, env->gpr_d[5]);
+ cpu_stl_data(env, ea+56, env->gpr_d[6]);
+ cpu_stl_data(env, ea+60, env->gpr_d[7]);
+}
+
+static void restore_context_upper(CPUTriCoreState *env, int ea,
+ target_ulong *new_PCXI, target_ulong *new_PSW)
+{
+ *new_PCXI = cpu_ldl_data(env, ea);
+ *new_PSW = cpu_ldl_data(env, ea+4);
+ env->gpr_a[10] = cpu_ldl_data(env, ea+8);
+ env->gpr_a[11] = cpu_ldl_data(env, ea+12);
+ env->gpr_d[8] = cpu_ldl_data(env, ea+16);
+ env->gpr_d[9] = cpu_ldl_data(env, ea+20);
+ env->gpr_d[10] = cpu_ldl_data(env, ea+24);
+ env->gpr_d[11] = cpu_ldl_data(env, ea+28);
+ env->gpr_a[12] = cpu_ldl_data(env, ea+32);
+ env->gpr_a[13] = cpu_ldl_data(env, ea+36);
+ env->gpr_a[14] = cpu_ldl_data(env, ea+40);
+ env->gpr_a[15] = cpu_ldl_data(env, ea+44);
+ env->gpr_d[12] = cpu_ldl_data(env, ea+48);
+ env->gpr_d[13] = cpu_ldl_data(env, ea+52);
+ env->gpr_d[14] = cpu_ldl_data(env, ea+56);
+ env->gpr_d[15] = cpu_ldl_data(env, ea+60);
+}
+
+static void restore_context_lower(CPUTriCoreState *env, int ea,
+ target_ulong *ra, target_ulong *pcxi)
+{
+ *pcxi = cpu_ldl_data(env, ea);
+ *ra = cpu_ldl_data(env, ea+4);
+ env->gpr_a[2] = cpu_ldl_data(env, ea+8);
+ env->gpr_a[3] = cpu_ldl_data(env, ea+12);
+ env->gpr_d[0] = cpu_ldl_data(env, ea+16);
+ env->gpr_d[1] = cpu_ldl_data(env, ea+20);
+ env->gpr_d[2] = cpu_ldl_data(env, ea+24);
+ env->gpr_d[3] = cpu_ldl_data(env, ea+28);
+ env->gpr_a[4] = cpu_ldl_data(env, ea+32);
+ env->gpr_a[5] = cpu_ldl_data(env, ea+36);
+ env->gpr_a[6] = cpu_ldl_data(env, ea+40);
+ env->gpr_a[7] = cpu_ldl_data(env, ea+44);
+ env->gpr_d[4] = cpu_ldl_data(env, ea+48);
+ env->gpr_d[5] = cpu_ldl_data(env, ea+52);
+ env->gpr_d[6] = cpu_ldl_data(env, ea+56);
+ env->gpr_d[7] = cpu_ldl_data(env, ea+60);
+}
+
+void helper_call(CPUTriCoreState *env, uint32_t next_pc)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+ target_ulong psw;
+
+ psw = psw_read(env);
+ /* if (FCX == 0) trap(FCU); */
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+ /* if (PSW.CDE) then if (cdc_increment()) then trap(CDO); */
+ if (psw & MASK_PSW_CDE) {
+ if (cdc_increment(&psw)) {
+ /* CDO trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDO, GETPC());
+ }
+ }
+ /* PSW.CDE = 1;*/
+ psw |= MASK_PSW_CDE;
+ /* tmp_FCX = FCX; */
+ tmp_FCX = env->FCX;
+ /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+ ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+ ((env->FCX & MASK_FCX_FCXO) << 6);
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+ A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+ D[15]}; */
+ save_context_upper(env, ea);
+
+ /* PCXI.PCPN = ICR.CCPN; */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE; */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) +
+ ((env->ICR & MASK_ICR_IE_1_3) << 15));
+ /* PCXI.UL = 1; */
+ env->PCXI |= MASK_PCXI_UL;
+
+ /* PCXI[19: 0] = FCX[19: 0]; */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FCX[19: 0] = new_FCX[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+ /* A[11] = next_pc[31: 0]; */
+ env->gpr_a[11] = next_pc;
+
+ /* if (tmp_FCX == LCX) trap(FCD);*/
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+ psw_write(env, psw);
+}
+
+void helper_ret(CPUTriCoreState *env)
+{
+ target_ulong ea;
+ target_ulong new_PCXI;
+ target_ulong new_PSW, psw;
+
+ psw = psw_read(env);
+ /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/
+ if (psw & MASK_PSW_CDE) {
+ if (cdc_decrement(&psw)) {
+ /* CDU trap */
+ psw_write(env, psw);
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDU, GETPC());
+ }
+ }
+ /* if (PCXI[19: 0] == 0) then trap(CSU); */
+ if ((env->PCXI & 0xfffff) == 0) {
+ /* CSU trap */
+ psw_write(env, psw);
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC());
+ }
+ /* if (PCXI.UL == 0) then trap(CTYP); */
+ if ((env->PCXI & MASK_PCXI_UL) == 0) {
+ /* CTYP trap */
+ cdc_increment(&psw); /* restore to the start of helper */
+ psw_write(env, psw);
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC());
+ }
+ /* PC = {A11 [31: 1], 1’b0}; */
+ env->PC = env->gpr_a[11] & 0xfffffffe;
+
+ /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */
+ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+ ((env->PCXI & MASK_PCXI_PCXO) << 6);
+ /* {new_PCXI, new_PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+ A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+ restore_context_upper(env, ea, &new_PCXI, &new_PSW);
+ /* M(EA, word) = FCX; */
+ cpu_stl_data(env, ea, env->FCX);
+ /* FCX[19: 0] = PCXI[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+ /* PCXI = new_PCXI; */
+ env->PCXI = new_PCXI;
+
+ if (tricore_feature(env, TRICORE_FEATURE_13)) {
+ /* PSW = new_PSW */
+ psw_write(env, new_PSW);
+ } else {
+ /* PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; */
+ psw_write(env, (new_PSW & ~(0x3000000)) + (psw & (0x3000000)));
+ }
+}
+
+void helper_bisr(CPUTriCoreState *env, uint32_t const9)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+
+ tmp_FCX = env->FCX;
+ ea = ((env->FCX & 0xf0000) << 12) + ((env->FCX & 0xffff) << 6);
+
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4]
+ , A[5], A[6], A[7], D[4], D[5], D[6], D[7]}; */
+ save_context_lower(env, ea);
+
+
+ /* PCXI.PCPN = ICR.CCPN */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) +
+ ((env->ICR & MASK_ICR_IE_1_3) << 15));
+ /* PCXI.UL = 0 */
+ env->PCXI &= ~(MASK_PCXI_UL);
+ /* PCXI[19: 0] = FCX[19: 0] */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FXC[19: 0] = new_FCX[19: 0] */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+ /* ICR.IE = 1 */
+ env->ICR |= MASK_ICR_IE_1_3;
+
+ env->ICR |= const9; /* ICR.CCPN = const9[7: 0];*/
+
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+}
+
+void helper_rfe(CPUTriCoreState *env)
+{
+ target_ulong ea;
+ target_ulong new_PCXI;
+ target_ulong new_PSW;
+ /* if (PCXI[19: 0] == 0) then trap(CSU); */
+ if ((env->PCXI & 0xfffff) == 0) {
+ /* raise csu trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC());
+ }
+ /* if (PCXI.UL == 0) then trap(CTYP); */
+ if ((env->PCXI & MASK_PCXI_UL) == 0) {
+ /* raise CTYP trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC());
+ }
+ /* if (!cdc_zero() AND PSW.CDE) then trap(NEST); */
+ if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) {
+ /* raise NEST trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_NEST, GETPC());
+ }
+ env->PC = env->gpr_a[11] & ~0x1;
+ /* ICR.IE = PCXI.PIE; */
+ env->ICR = (env->ICR & ~MASK_ICR_IE_1_3)
+ + ((env->PCXI & MASK_PCXI_PIE_1_3) >> 15);
+ /* ICR.CCPN = PCXI.PCPN; */
+ env->ICR = (env->ICR & ~MASK_ICR_CCPN) +
+ ((env->PCXI & MASK_PCXI_PCPN) >> 24);
+ /*EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0};*/
+ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+ ((env->PCXI & MASK_PCXI_PCXO) << 6);
+ /*{new_PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+ A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+ restore_context_upper(env, ea, &new_PCXI, &new_PSW);
+ /* M(EA, word) = FCX;*/
+ cpu_stl_data(env, ea, env->FCX);
+ /* FCX[19: 0] = PCXI[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+ /* PCXI = new_PCXI; */
+ env->PCXI = new_PCXI;
+ /* write psw */
+ psw_write(env, new_PSW);
+}
+
+void helper_rfm(CPUTriCoreState *env)
+{
+ env->PC = (env->gpr_a[11] & ~0x1);
+ /* ICR.IE = PCXI.PIE; */
+ env->ICR = (env->ICR & ~MASK_ICR_IE_1_3)
+ | ((env->PCXI & MASK_PCXI_PIE_1_3) >> 15);
+ /* ICR.CCPN = PCXI.PCPN; */
+ env->ICR = (env->ICR & ~MASK_ICR_CCPN) |
+ ((env->PCXI & MASK_PCXI_PCPN) >> 24);
+ /* {PCXI, PSW, A[10], A[11]} = M(DCX, 4 * word); */
+ env->PCXI = cpu_ldl_data(env, env->DCX);
+ psw_write(env, cpu_ldl_data(env, env->DCX+4));
+ env->gpr_a[10] = cpu_ldl_data(env, env->DCX+8);
+ env->gpr_a[11] = cpu_ldl_data(env, env->DCX+12);
+
+ if (tricore_feature(env, TRICORE_FEATURE_131)) {
+ env->DBGTCR = 0;
+ }
+}
+
+void helper_ldlcx(CPUTriCoreState *env, uint32_t ea)
+{
+ uint32_t dummy;
+ /* insn doesn't load PCXI and RA */
+ restore_context_lower(env, ea, &dummy, &dummy);
+}
+
+void helper_lducx(CPUTriCoreState *env, uint32_t ea)
+{
+ uint32_t dummy;
+ /* insn doesn't load PCXI and PSW */
+ restore_context_upper(env, ea, &dummy, &dummy);
+}
+
+void helper_stlcx(CPUTriCoreState *env, uint32_t ea)
+{
+ save_context_lower(env, ea);
+}
+
+void helper_stucx(CPUTriCoreState *env, uint32_t ea)
+{
+ save_context_upper(env, ea);
+}
+
+void helper_svlcx(CPUTriCoreState *env)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+ /* tmp_FCX = FCX; */
+ tmp_FCX = env->FCX;
+ /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+ ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+ ((env->FCX & MASK_FCX_FCXO) << 6);
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+ A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+ D[15]}; */
+ save_context_lower(env, ea);
+
+ /* PCXI.PCPN = ICR.CCPN; */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE; */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) +
+ ((env->ICR & MASK_ICR_IE_1_3) << 15));
+ /* PCXI.UL = 0; */
+ env->PCXI &= ~MASK_PCXI_UL;
+
+ /* PCXI[19: 0] = FCX[19: 0]; */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FCX[19: 0] = new_FCX[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+
+ /* if (tmp_FCX == LCX) trap(FCD);*/
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+}
+
+void helper_svucx(CPUTriCoreState *env)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+ /* tmp_FCX = FCX; */
+ tmp_FCX = env->FCX;
+ /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+ ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+ ((env->FCX & MASK_FCX_FCXO) << 6);
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+ A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+ D[15]}; */
+ save_context_upper(env, ea);
+
+ /* PCXI.PCPN = ICR.CCPN; */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE; */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) +
+ ((env->ICR & MASK_ICR_IE_1_3) << 15));
+ /* PCXI.UL = 1; */
+ env->PCXI |= MASK_PCXI_UL;
+
+ /* PCXI[19: 0] = FCX[19: 0]; */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FCX[19: 0] = new_FCX[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+
+ /* if (tmp_FCX == LCX) trap(FCD);*/
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+}
+
+void helper_rslcx(CPUTriCoreState *env)
+{
+ target_ulong ea;
+ target_ulong new_PCXI;
+ /* if (PCXI[19: 0] == 0) then trap(CSU); */
+ if ((env->PCXI & 0xfffff) == 0) {
+ /* CSU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC());
+ }
+ /* if (PCXI.UL == 1) then trap(CTYP); */
+ if ((env->PCXI & MASK_PCXI_UL) != 0) {
+ /* CTYP trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC());
+ }
+ /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */
+ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+ ((env->PCXI & MASK_PCXI_PCXO) << 6);
+ /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+ A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+ restore_context_lower(env, ea, &env->gpr_a[11], &new_PCXI);
+ /* M(EA, word) = FCX; */
+ cpu_stl_data(env, ea, env->FCX);
+ /* M(EA, word) = FCX; */
+ cpu_stl_data(env, ea, env->FCX);
+ /* FCX[19: 0] = PCXI[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+ /* PCXI = new_PCXI; */
+ env->PCXI = new_PCXI;
+}
+
+void helper_psw_write(CPUTriCoreState *env, uint32_t arg)
+{
+ psw_write(env, arg);
+}
+
+uint32_t helper_psw_read(CPUTriCoreState *env)
+{
+ return psw_read(env);
+}
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
new file mode 100644
index 000000000..07084407c
--- /dev/null
+++ b/target/tricore/translate.c
@@ -0,0 +1,8949 @@
+/*
+ * TriCore emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/qemu-print.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "tricore-opcodes.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+
+/*
+ * TCG registers
+ */
+static TCGv cpu_PC;
+static TCGv cpu_PCXI;
+static TCGv cpu_PSW;
+static TCGv cpu_ICR;
+/* GPR registers */
+static TCGv cpu_gpr_a[16];
+static TCGv cpu_gpr_d[16];
+/* PSW Flag cache */
+static TCGv cpu_PSW_C;
+static TCGv cpu_PSW_V;
+static TCGv cpu_PSW_SV;
+static TCGv cpu_PSW_AV;
+static TCGv cpu_PSW_SAV;
+
+#include "exec/gen-icount.h"
+
+static const char *regnames_a[] = {
+ "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
+ "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
+ "a12" , "a13" , "a14" , "a15",
+ };
+
+static const char *regnames_d[] = {
+ "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
+ "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
+ "d12" , "d13" , "d14" , "d15",
+ };
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ target_ulong pc_succ_insn;
+ uint32_t opcode;
+ /* Routine used to access memory */
+ int mem_idx;
+ uint32_t hflags, saved_hflags;
+ uint64_t features;
+} DisasContext;
+
+static int has_feature(DisasContext *ctx, int feature)
+{
+ return (ctx->features & (1ULL << feature)) != 0;
+}
+
+enum {
+ MODE_LL = 0,
+ MODE_LU = 1,
+ MODE_UL = 2,
+ MODE_UU = 3,
+};
+
+void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+ uint32_t psw;
+ int i;
+
+ psw = psw_read(env);
+
+ qemu_fprintf(f, "PC: " TARGET_FMT_lx, env->PC);
+ qemu_fprintf(f, " PSW: " TARGET_FMT_lx, psw);
+ qemu_fprintf(f, " ICR: " TARGET_FMT_lx, env->ICR);
+ qemu_fprintf(f, "\nPCXI: " TARGET_FMT_lx, env->PCXI);
+ qemu_fprintf(f, " FCX: " TARGET_FMT_lx, env->FCX);
+ qemu_fprintf(f, " LCX: " TARGET_FMT_lx, env->LCX);
+
+ for (i = 0; i < 16; ++i) {
+ if ((i & 3) == 0) {
+ qemu_fprintf(f, "\nGPR A%02d:", i);
+ }
+ qemu_fprintf(f, " " TARGET_FMT_lx, env->gpr_a[i]);
+ }
+ for (i = 0; i < 16; ++i) {
+ if ((i & 3) == 0) {
+ qemu_fprintf(f, "\nGPR D%02d:", i);
+ }
+ qemu_fprintf(f, " " TARGET_FMT_lx, env->gpr_d[i]);
+ }
+ qemu_fprintf(f, "\n");
+}
+
+/*
+ * Functions to generate micro-ops
+ */
+
+/* Makros for generating helpers */
+
+#define gen_helper_1arg(name, arg) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg); \
+ gen_helper_##name(cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg00, arg0, 16); \
+ tcg_gen_ext16s_tl(arg01, arg0); \
+ tcg_gen_ext16s_tl(arg11, arg1); \
+ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg10 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg00, arg0, 16); \
+ tcg_gen_ext16s_tl(arg01, arg0); \
+ tcg_gen_sari_tl(arg11, arg1, 16); \
+ tcg_gen_ext16s_tl(arg10, arg1); \
+ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg10); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg10 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg00, arg0, 16); \
+ tcg_gen_ext16s_tl(arg01, arg0); \
+ tcg_gen_sari_tl(arg10, arg1, 16); \
+ tcg_gen_ext16s_tl(arg11, arg1); \
+ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg10); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg01, arg0, 16); \
+ tcg_gen_ext16s_tl(arg00, arg0); \
+ tcg_gen_sari_tl(arg11, arg1, 16); \
+ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
+ TCGv_i64 ret = tcg_temp_new_i64(); \
+ TCGv_i64 arg1 = tcg_temp_new_i64(); \
+ \
+ tcg_gen_concat_i32_i64(arg1, al1, ah1); \
+ gen_helper_##name(ret, arg1, arg2); \
+ tcg_gen_extr_i64_i32(rl, rh, ret); \
+ \
+ tcg_temp_free_i64(ret); \
+ tcg_temp_free_i64(arg1); \
+} while (0)
+
+#define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
+ TCGv_i64 ret = tcg_temp_new_i64(); \
+ \
+ gen_helper_##name(ret, cpu_env, arg1, arg2); \
+ tcg_gen_extr_i64_i32(rl, rh, ret); \
+ \
+ tcg_temp_free_i64(ret); \
+} while (0)
+
+#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
+#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
+ ((offset & 0x0fffff) << 1))
+
+/* For two 32-bit registers used a 64-bit register, the first
+ registernumber needs to be even. Otherwise we trap. */
+static inline void generate_trap(DisasContext *ctx, int class, int tin);
+#define CHECK_REG_PAIR(reg) do { \
+ if (reg & 0x1) { \
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
+ } \
+} while (0)
+
+/* Functions for load/save to/from memory */
+
+static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
+ int16_t con, MemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, con);
+ tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
+ int16_t con, MemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, con);
+ tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_temp_free(temp);
+}
+
+static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ tcg_gen_concat_i32_i64(temp, rl, rh);
+ tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
+
+ tcg_temp_free_i64(temp);
+}
+
+static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
+ DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, base, con);
+ gen_st_2regs_64(rh, rl, temp, ctx);
+ tcg_temp_free(temp);
+}
+
+static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
+ /* write back to two 32 bit regs */
+ tcg_gen_extr_i64_i32(rl, rh, temp);
+
+ tcg_temp_free_i64(temp);
+}
+
+static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
+ DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, base, con);
+ gen_ld_2regs_64(rh, rl, temp, ctx);
+ tcg_temp_free(temp);
+}
+
+static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
+ MemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, off);
+ tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_tl(r2, temp);
+ tcg_temp_free(temp);
+}
+
+static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
+ MemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, off);
+ tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_tl(r2, temp);
+ tcg_temp_free(temp);
+}
+
+/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
+static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ CHECK_REG_PAIR(ereg);
+ /* temp = (M(EA, word) */
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ /* temp = temp & ~E[a][63:32]) */
+ tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
+ /* temp2 = (E[a][31:0] & E[a][63:32]); */
+ tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
+ /* temp = temp | temp2; */
+ tcg_gen_or_tl(temp, temp, temp2);
+ /* M(EA, word) = temp; */
+ tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+/* tmp = M(EA, word);
+ M(EA, word) = D[a];
+ D[a] = tmp[31:0];*/
+static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
+ cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
+ tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
+ tcg_gen_or_tl(temp2, temp2, temp3);
+ tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+
+/* We generate loads and store to core special function register (csfr) through
+ the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
+ makros R, A and E, which allow read-only, all and endinit protected access.
+ These makros also specify in which ISA version the csfr was introduced. */
+#define R(ADDRESS, REG, FEATURE) \
+ case ADDRESS: \
+ if (has_feature(ctx, FEATURE)) { \
+ tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
+ } \
+ break;
+#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
+#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
+static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset)
+{
+ /* since we're caching PSW make this a special case */
+ if (offset == 0xfe04) {
+ gen_helper_psw_read(ret, cpu_env);
+ } else {
+ switch (offset) {
+#include "csfr.def"
+ }
+ }
+}
+#undef R
+#undef A
+#undef E
+
+#define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
+ since no execption occurs */
+#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
+ case ADDRESS: \
+ if (has_feature(ctx, FEATURE)) { \
+ tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
+ } \
+ break;
+/* Endinit protected registers
+ TODO: Since the endinit bit is in a register of a not yet implemented
+ watchdog device, we handle endinit protected registers like
+ all-access registers for now. */
+#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
+static inline void gen_mtcr(DisasContext *ctx, TCGv r1,
+ int32_t offset)
+{
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
+ /* since we're caching PSW make this a special case */
+ if (offset == 0xfe04) {
+ gen_helper_psw_write(cpu_env, r1);
+ } else {
+ switch (offset) {
+#include "csfr.def"
+ }
+ }
+ } else {
+ /* generate privilege trap */
+ }
+}
+
+/* Functions for arithmetic instructions */
+
+static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+ /* Addition and set V/SV bits */
+ tcg_gen_add_tl(result, r1, r2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(t0, r1, r2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(t0);
+}
+
+static inline void
+gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 result = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(result, r1, r2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t1, result, r1);
+ tcg_gen_xor_i64(t0, r1, r2);
+ tcg_gen_andc_i64(t1, t1, t0);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* calc AV/SAV bits */
+ tcg_gen_extrh_i64_i32(temp, result);
+ tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_i64(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void
+gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
+ void(*op2)(TCGv, TCGv, TCGv))
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv temp4 = tcg_temp_new();
+
+ (*op1)(temp, r1_low, r2);
+ /* calc V0 bit */
+ tcg_gen_xor_tl(temp2, temp, r1_low);
+ tcg_gen_xor_tl(temp3, r1_low, r2);
+ if (op1 == tcg_gen_add_tl) {
+ tcg_gen_andc_tl(temp2, temp2, temp3);
+ } else {
+ tcg_gen_and_tl(temp2, temp2, temp3);
+ }
+
+ (*op2)(temp3, r1_high, r3);
+ /* calc V1 bit */
+ tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high);
+ tcg_gen_xor_tl(temp4, r1_high, r3);
+ if (op2 == tcg_gen_add_tl) {
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ } else {
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ }
+ /* combine V0/V1 bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2);
+ /* calc sv bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* write result */
+ tcg_gen_mov_tl(ret_low, temp);
+ tcg_gen_mov_tl(ret_high, temp3);
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, ret_low, ret_low);
+ tcg_gen_xor_tl(temp, temp, ret_low);
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high);
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free(temp4);
+}
+
+/* ret = r2 + (r1 * r3); */
+static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, r1);
+ tcg_gen_ext_i32_i64(t2, r2);
+ tcg_gen_ext_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_add_i64(t1, t2, t1);
+
+ tcg_gen_extrl_i64_i32(ret, t1);
+ /* calc V
+ t1 > 0x7fffffff */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
+ /* t1 < -0x80000000 */
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_madd32_d(ret, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ tcg_gen_muls2_tl(t1, t2, r1, r3);
+ /* only the add can overflow */
+ tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_tl(t1, r2_high, t2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back the result */
+ tcg_gen_mov_tl(ret_low, t3);
+ tcg_gen_mov_tl(ret_high, t4);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ tcg_temp_free(t4);
+}
+
+static inline void
+gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t1, r1);
+ tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
+ tcg_gen_extu_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_add_i64(t2, t2, t1);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
+ /* only the add overflows, if t2 < t1
+ calc V bit */
+ tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_add_tl, tcg_gen_add_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_sub_tl, tcg_gen_add_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+
+ gen_add64_d(temp64_2, temp64_3, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2);
+
+static inline void
+gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_adds(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_adds(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+
+}
+
+static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
+
+static inline void
+gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_subs(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_adds(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+
+}
+
+static inline void
+gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+
+ gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+
+static inline void
+gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_add64_d(temp64_3, temp64_2, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
+ uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+
+static inline void
+gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
+ uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_shli_i64(t2, t2, n);
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ tcg_gen_sari_i64(t2, t2, up_shift);
+
+ tcg_gen_add_i64(t3, t1, t2);
+ tcg_gen_extrl_i64_i32(temp3, t3);
+ /* calc v bit */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
+ tcg_gen_or_i64(t1, t1, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* We produce an overflow on the host if the mul before was
+ (0x80000000 * 0x80000000) << 1). If this is the
+ case, we negate the ovf. */
+ if (n == 1) {
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_shli_tl(temp, temp, 31);
+ /* negate v bit, if special condition */
+ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ }
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_add_d(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_adds(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ gen_add64_d(t3, t1, t2);
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t3);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+
+ gen_helper_add64_ssov(t1, cpu_env, t1, t2);
+ tcg_gen_extr_i64_i32(rl, rh, t1);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static inline void
+gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+ TCGv temp, temp2;
+
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ if (n != 0) {
+ tcg_gen_shli_i64(t2, t2, 1);
+ }
+ tcg_gen_add_i64(t4, t1, t2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t3, t4, t1);
+ tcg_gen_xor_i64(t2, t1, t2);
+ tcg_gen_andc_i64(t3, t3, t2);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
+ /* We produce an overflow on the host if the mul before was
+ (0x80000000 * 0x80000000) << 1). If this is the
+ case, we negate the ovf. */
+ if (n == 1) {
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_shli_tl(temp, temp, 31);
+ /* negate v bit, if special condition */
+ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ }
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t4);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_sari_i64(t2, t2, up_shift - n);
+
+ gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv_i64 r1 = tcg_temp_new_i64();
+ TCGv temp = tcg_const_i32(n);
+
+ tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
+ gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ tcg_gen_extr_i64_i32(rl, rh, r1);
+
+ tcg_temp_free_i64(r1);
+ tcg_temp_free(temp);
+}
+/* ret = r2 - (r1 * r3); */
+static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, r1);
+ tcg_gen_ext_i32_i64(t2, r2);
+ tcg_gen_ext_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_sub_i64(t1, t2, t1);
+
+ tcg_gen_extrl_i64_i32(ret, t1);
+ /* calc V
+ t2 > 0x7fffffff */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
+ /* result < -0x80000000 */
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msub32_d(ret, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ tcg_gen_muls2_tl(t1, t2, r1, r3);
+ /* only the sub can overflow */
+ tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_tl(t1, r2_high, t2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back the result */
+ tcg_gen_mov_tl(ret_low, t3);
+ tcg_gen_mov_tl(ret_high, t4);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ tcg_temp_free(t4);
+}
+
+static inline void
+gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t1, r1);
+ tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
+ tcg_gen_extu_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_sub_i64(t3, t2, t1);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
+ /* calc V bit, only the sub can overflow, if t1 > t2 */
+ tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
+{
+ TCGv temp = tcg_const_i32(r2);
+ gen_add_d(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+/* calculate the carry bit too */
+static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_movi_tl(t0, 0);
+ /* Addition and set C/V/SV bits */
+ tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(t0, r1, r2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_add_CC(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv carry = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
+ /* Addition, carry and set C/V/SV bits */
+ tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
+ tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(t0, r1, r2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(t0);
+ tcg_temp_free(carry);
+}
+
+static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_addc_CC(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
+ TCGv r4)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv result = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv t0 = tcg_const_i32(0);
+
+ /* create mask for sticky bits */
+ tcg_gen_setcond_tl(cond, mask, r4, t0);
+ tcg_gen_shli_tl(mask, mask, 31);
+
+ tcg_gen_add_tl(result, r1, r2);
+ /* Calc PSW_V */
+ tcg_gen_xor_tl(temp, result, r1);
+ tcg_gen_xor_tl(temp2, r1, r2);
+ tcg_gen_andc_tl(temp, temp, temp2);
+ tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ /* Set PSW_SV */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, result, result);
+ tcg_gen_xor_tl(temp, temp, result);
+ tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ /* write back result */
+ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(result);
+ tcg_temp_free(mask);
+}
+
+static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
+ TCGv r3, TCGv r4)
+{
+ TCGv temp = tcg_const_i32(r2);
+ gen_cond_add(cond, r1, temp, r3, r4);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(result, r1, r2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(temp, r1, r2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(result);
+}
+
+static inline void
+gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 result = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(result, r1, r2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t1, result, r1);
+ tcg_gen_xor_i64(t0, r1, r2);
+ tcg_gen_and_i64(t1, t1, t0);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* calc AV/SAV bits */
+ tcg_gen_extrh_i64_i32(temp, result);
+ tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_i64(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv result = tcg_temp_new();
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_sub_tl(result, r1, r2);
+ /* calc C bit */
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(temp, r1, r2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_not_tl(temp, r2);
+ gen_addc_CC(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
+ TCGv r4)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv result = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv t0 = tcg_const_i32(0);
+
+ /* create mask for sticky bits */
+ tcg_gen_setcond_tl(cond, mask, r4, t0);
+ tcg_gen_shli_tl(mask, mask, 31);
+
+ tcg_gen_sub_tl(result, r1, r2);
+ /* Calc PSW_V */
+ tcg_gen_xor_tl(temp, result, r1);
+ tcg_gen_xor_tl(temp2, r1, r2);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ /* Set PSW_SV */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, result, result);
+ tcg_gen_xor_tl(temp, temp, result);
+ tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ /* write back result */
+ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(result);
+ tcg_temp_free(mask);
+}
+
+static inline void
+gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_sub_tl, tcg_gen_sub_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_subs(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_subs(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_sub64_d(temp64_3, temp64_2, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
+ uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
+ uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ /* if we shift part of the fraction out, we need to round up */
+ tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
+ tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
+ tcg_gen_sari_i64(t2, t2, up_shift - n);
+ tcg_gen_add_i64(t2, t2, t4);
+
+ tcg_gen_sub_i64(t3, t1, t2);
+ tcg_gen_extrl_i64_i32(temp3, t3);
+ /* calc v bit */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
+ tcg_gen_or_i64(t1, t1, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_sub_d(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_subs(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ gen_sub64_d(t3, t1, t2);
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t3);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+
+ gen_helper_sub64_ssov(t1, cpu_env, t1, t2);
+ tcg_gen_extr_i64_i32(rl, rh, t1);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static inline void
+gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+ TCGv temp, temp2;
+
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ if (n != 0) {
+ tcg_gen_shli_i64(t2, t2, 1);
+ }
+ tcg_gen_sub_i64(t4, t1, t2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t3, t4, t1);
+ tcg_gen_xor_i64(t2, t1, t2);
+ tcg_gen_and_i64(t3, t3, t2);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
+ /* We produce an overflow on the host if the mul before was
+ (0x80000000 * 0x80000000) << 1). If this is the
+ case, we negate the ovf. */
+ if (n == 1) {
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_shli_tl(temp, temp, 31);
+ /* negate v bit, if special condition */
+ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ }
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t4);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ /* if we shift part of the fraction out, we need to round up */
+ tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
+ tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
+ tcg_gen_sari_i64(t3, t2, up_shift - n);
+ tcg_gen_add_i64(t3, t3, t4);
+
+ gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv_i64 r1 = tcg_temp_new_i64();
+ TCGv temp = tcg_const_i32(n);
+
+ tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
+ gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ tcg_gen_extr_i64_i32(rl, rh, r1);
+
+ tcg_temp_free_i64(r1);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_add_tl, tcg_gen_sub_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+
+ gen_sub64_d(temp64_2, temp64_3, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_adds(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_subs(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+
+ gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void gen_abs(TCGv ret, TCGv r1)
+{
+ tcg_gen_abs_tl(ret, r1);
+ /* overflow can only happen, if r1 = 0x80000000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_tl(temp, r2, r1);
+ tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
+
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(temp, result, r2);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
+ tcg_gen_xor_tl(temp, r1, r2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(result);
+}
+
+static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_absdif(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv high = tcg_temp_new();
+ TCGv low = tcg_temp_new();
+
+ tcg_gen_muls2_tl(low, high, r1, r2);
+ tcg_gen_mov_tl(ret, low);
+ /* calc V bit */
+ tcg_gen_sari_tl(low, low, 31);
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(high);
+ tcg_temp_free(low);
+}
+
+static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_mul_i32s(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+{
+ tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
+ /* clear V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_mul_i64s(ret_low, ret_high, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+{
+ tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
+ /* clear V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_mul_i64u(ret_low, ret_high, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_mul_ssov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_mul_suov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
+static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static void
+gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv_i64 temp_64 = tcg_temp_new_i64();
+ TCGv_i64 temp2_64 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ if (up_shift == 32) {
+ tcg_gen_muls2_tl(rh, rl, arg1, arg2);
+ } else if (up_shift == 16) {
+ tcg_gen_ext_i32_i64(temp_64, arg1);
+ tcg_gen_ext_i32_i64(temp2_64, arg2);
+
+ tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
+ tcg_gen_shri_i64(temp_64, temp_64, up_shift);
+ tcg_gen_extr_i64_i32(rl, rh, temp_64);
+ } else {
+ tcg_gen_muls2_tl(rl, rh, arg1, arg2);
+ }
+ /* reset v bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ } else { /* n is expected to be 1 */
+ tcg_gen_ext_i32_i64(temp_64, arg1);
+ tcg_gen_ext_i32_i64(temp2_64, arg2);
+
+ tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
+
+ if (up_shift == 0) {
+ tcg_gen_shli_i64(temp_64, temp_64, 1);
+ } else {
+ tcg_gen_shri_i64(temp_64, temp_64, up_shift - 1);
+ }
+ tcg_gen_extr_i64_i32(rl, rh, temp_64);
+ /* overflow only occurs if r1 = r2 = 0x8000 */
+ if (up_shift == 0) {/* result is 64 bit */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
+ 0x80000000);
+ } else { /* result is 32 bit */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
+ 0x80000000);
+ }
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc sv overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ }
+ /* calc av overflow bit */
+ if (up_shift == 0) {
+ tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ } else {
+ tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
+ tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
+ }
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp_64);
+ tcg_temp_free_i64(temp2_64);
+}
+
+static void
+gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_shli_tl(ret, ret, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
+ tcg_gen_sub_tl(ret, ret, temp);
+ }
+ /* reset v bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc av overflow bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_addi_tl(ret, ret, 0x8000);
+ } else {
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_shli_tl(ret, ret, 1);
+ tcg_gen_addi_tl(ret, ret, 0x8000);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
+ tcg_gen_muli_tl(temp, temp, 0x8001);
+ tcg_gen_sub_tl(ret, ret, temp);
+ }
+ /* reset v bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc av overflow bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* cut halfword off */
+ tcg_gen_andi_tl(ret, ret, 0xffff0000);
+
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
+{
+ TCGv sat_neg = tcg_const_i32(low);
+ TCGv temp = tcg_const_i32(up);
+
+ /* sat_neg = (arg < low ) ? low : arg; */
+ tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
+
+ /* ret = (sat_neg > up ) ? up : sat_neg; */
+ tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
+
+ tcg_temp_free(sat_neg);
+ tcg_temp_free(temp);
+}
+
+static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
+{
+ TCGv temp = tcg_const_i32(up);
+ /* sat_neg = (arg > up ) ? up : arg; */
+ tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
+ tcg_temp_free(temp);
+}
+
+static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
+{
+ if (shift_count == -32) {
+ tcg_gen_movi_tl(ret, 0);
+ } else if (shift_count >= 0) {
+ tcg_gen_shli_tl(ret, r1, shift_count);
+ } else {
+ tcg_gen_shri_tl(ret, r1, -shift_count);
+ }
+}
+
+static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
+{
+ TCGv temp_low, temp_high;
+
+ if (shiftcount == -16) {
+ tcg_gen_movi_tl(ret, 0);
+ } else {
+ temp_high = tcg_temp_new();
+ temp_low = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp_low, r1, 0xffff);
+ tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
+ gen_shi(temp_low, temp_low, shiftcount);
+ gen_shi(ret, temp_high, shiftcount);
+ tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
+
+ tcg_temp_free(temp_low);
+ tcg_temp_free(temp_high);
+ }
+}
+
+static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
+{
+ uint32_t msk, msk_start;
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv t_0 = tcg_const_i32(0);
+
+ if (shift_count == 0) {
+ /* Clear PSW.C and PSW.V */
+ tcg_gen_movi_tl(cpu_PSW_C, 0);
+ tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
+ tcg_gen_mov_tl(ret, r1);
+ } else if (shift_count == -32) {
+ /* set PSW.C */
+ tcg_gen_mov_tl(cpu_PSW_C, r1);
+ /* fill ret completely with sign bit */
+ tcg_gen_sari_tl(ret, r1, 31);
+ /* clear PSW.V */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ } else if (shift_count > 0) {
+ TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
+ TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
+
+ /* calc carry */
+ msk_start = 32 - shift_count;
+ msk = ((1 << shift_count) - 1) << msk_start;
+ tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ /* calc v/sv bits */
+ tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
+ tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
+ tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
+ /* do shift */
+ tcg_gen_shli_tl(ret, r1, shift_count);
+
+ tcg_temp_free(t_max);
+ tcg_temp_free(t_min);
+ } else {
+ /* clear PSW.V */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc carry */
+ msk = (1 << -shift_count) - 1;
+ tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ /* do shift */
+ tcg_gen_sari_tl(ret, r1, -shift_count);
+ }
+ /* calc av overflow bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(t_0);
+}
+
+static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_sha_ssov(ret, cpu_env, r1, r2);
+}
+
+static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_shas(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
+{
+ TCGv low, high;
+
+ if (shift_count == 0) {
+ tcg_gen_mov_tl(ret, r1);
+ } else if (shift_count > 0) {
+ low = tcg_temp_new();
+ high = tcg_temp_new();
+
+ tcg_gen_andi_tl(high, r1, 0xffff0000);
+ tcg_gen_shli_tl(low, r1, shift_count);
+ tcg_gen_shli_tl(ret, high, shift_count);
+ tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+
+ tcg_temp_free(low);
+ tcg_temp_free(high);
+ } else {
+ low = tcg_temp_new();
+ high = tcg_temp_new();
+
+ tcg_gen_ext16s_tl(low, r1);
+ tcg_gen_sari_tl(low, low, -shift_count);
+ tcg_gen_sari_tl(ret, r1, -shift_count);
+ tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+
+ tcg_temp_free(low);
+ tcg_temp_free(high);
+ }
+
+}
+
+/* ret = {ret[30:0], (r1 cond r2)}; */
+static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_shli_tl(temp, ret, 1);
+ tcg_gen_setcond_tl(cond, temp2, r1, r2);
+ tcg_gen_or_tl(ret, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_sh_cond(cond, ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_add_ssov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_add_ssov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_add_suov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_sub_ssov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_sub_suov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv, TCGv, TCGv),
+ void(*op2)(TCGv, TCGv, TCGv))
+{
+ TCGv temp1, temp2;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp2, r2, pos2);
+ tcg_gen_shri_tl(temp1, r1, pos1);
+
+ (*op1)(temp1, temp1, temp2);
+ (*op2)(temp1 , ret, temp1);
+
+ tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+/* ret = r1[pos1] op1 r2[pos2]; */
+static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv, TCGv, TCGv))
+{
+ TCGv temp1, temp2;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp2, r2, pos2);
+ tcg_gen_shri_tl(temp1, r1, pos1);
+
+ (*op1)(ret, temp1, temp2);
+
+ tcg_gen_andi_tl(ret, ret, 0x1);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
+ void(*op)(TCGv, TCGv, TCGv))
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ /* temp = (arg1 cond arg2 )*/
+ tcg_gen_setcond_tl(cond, temp, r1, r2);
+ /* temp2 = ret[0]*/
+ tcg_gen_andi_tl(temp2, ret, 0x1);
+ /* temp = temp insn temp2 */
+ (*op)(temp, temp, temp2);
+ /* ret = {ret[31:1], temp} */
+ tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
+ void(*op)(TCGv, TCGv, TCGv))
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_accumulating_cond(cond, ret, r1, temp, op);
+ tcg_temp_free(temp);
+}
+
+/* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
+static inline void gen_cond_w(TCGCond cond, TCGv ret, TCGv r1, TCGv r2)
+{
+ tcg_gen_setcond_tl(cond, ret, r1, r2);
+ tcg_gen_neg_tl(ret, ret);
+}
+
+static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv b0 = tcg_temp_new();
+ TCGv b1 = tcg_temp_new();
+ TCGv b2 = tcg_temp_new();
+ TCGv b3 = tcg_temp_new();
+
+ /* byte 0 */
+ tcg_gen_andi_tl(b0, r1, 0xff);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
+
+ /* byte 1 */
+ tcg_gen_andi_tl(b1, r1, 0xff00);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
+
+ /* byte 2 */
+ tcg_gen_andi_tl(b2, r1, 0xff0000);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
+
+ /* byte 3 */
+ tcg_gen_andi_tl(b3, r1, 0xff000000);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
+
+ /* combine them */
+ tcg_gen_or_tl(ret, b0, b1);
+ tcg_gen_or_tl(ret, ret, b2);
+ tcg_gen_or_tl(ret, ret, b3);
+
+ tcg_temp_free(b0);
+ tcg_temp_free(b1);
+ tcg_temp_free(b2);
+ tcg_temp_free(b3);
+}
+
+static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+
+ /* halfword 0 */
+ tcg_gen_andi_tl(h0, r1, 0xffff);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
+
+ /* halfword 1 */
+ tcg_gen_andi_tl(h1, r1, 0xffff0000);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
+
+ /* combine them */
+ tcg_gen_or_tl(ret, h0, h1);
+
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
+}
+/* mask = ((1 << width) -1) << pos;
+ ret = (r1 & ~mask) | (r2 << pos) & mask); */
+static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
+{
+ TCGv mask = tcg_temp_new();
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(mask, 1);
+ tcg_gen_shl_tl(mask, mask, width);
+ tcg_gen_subi_tl(mask, mask, 1);
+ tcg_gen_shl_tl(mask, mask, pos);
+
+ tcg_gen_shl_tl(temp, r2, pos);
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_andc_tl(temp2, r1, mask);
+ tcg_gen_or_tl(ret, temp, temp2);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ gen_helper_bsplit(temp, r1);
+ tcg_gen_extr_i64_i32(rl, rh, temp);
+
+ tcg_temp_free_i64(temp);
+}
+
+static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ gen_helper_unpack(temp, r1);
+ tcg_gen_extr_i64_i32(rl, rh, temp);
+
+ tcg_temp_free_i64(temp);
+}
+
+static inline void
+gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+{
+ TCGv_i64 ret = tcg_temp_new_i64();
+
+ if (!has_feature(ctx, TRICORE_FEATURE_131)) {
+ gen_helper_dvinit_b_13(ret, cpu_env, r1, r2);
+ } else {
+ gen_helper_dvinit_b_131(ret, cpu_env, r1, r2);
+ }
+ tcg_gen_extr_i64_i32(rl, rh, ret);
+
+ tcg_temp_free_i64(ret);
+}
+
+static inline void
+gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+{
+ TCGv_i64 ret = tcg_temp_new_i64();
+
+ if (!has_feature(ctx, TRICORE_FEATURE_131)) {
+ gen_helper_dvinit_h_13(ret, cpu_env, r1, r2);
+ } else {
+ gen_helper_dvinit_h_131(ret, cpu_env, r1, r2);
+ }
+ tcg_gen_extr_i64_i32(rl, rh, ret);
+
+ tcg_temp_free_i64(ret);
+}
+
+static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
+{
+ TCGv temp = tcg_temp_new();
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, arg_low, arg_low);
+ tcg_gen_xor_tl(temp, temp, arg_low);
+ tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high);
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_temp_free(temp);
+}
+
+static void gen_calc_usb_mulr_h(TCGv arg)
+{
+ TCGv temp = tcg_temp_new();
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, arg, arg);
+ tcg_gen_xor_tl(temp, temp, arg);
+ tcg_gen_shli_tl(cpu_PSW_AV, temp, 16);
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* clear V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_temp_free(temp);
+}
+
+/* helpers for generating program flow micro-ops */
+
+static inline void gen_save_pc(target_ulong pc)
+{
+ tcg_gen_movi_tl(cpu_PC, pc);
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (translator_use_goto_tb(&ctx->base, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_save_pc(dest);
+ tcg_gen_exit_tb(ctx->base.tb, n);
+ } else {
+ gen_save_pc(dest);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+static void generate_trap(DisasContext *ctx, int class, int tin)
+{
+ TCGv_i32 classtemp = tcg_const_i32(class);
+ TCGv_i32 tintemp = tcg_const_i32(tin);
+
+ gen_save_pc(ctx->base.pc_next);
+ gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ tcg_temp_free(classtemp);
+ tcg_temp_free(tintemp);
+}
+
+static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
+ TCGv r2, int16_t address)
+{
+ TCGLabel *jumpLabel = gen_new_label();
+ tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
+
+ gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
+
+ gen_set_label(jumpLabel);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2);
+}
+
+static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
+ int r2, int16_t address)
+{
+ TCGv temp = tcg_const_i32(r2);
+ gen_branch_cond(ctx, cond, r1, temp, address);
+ tcg_temp_free(temp);
+}
+
+static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
+{
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + offset);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
+}
+
+static void gen_fcall_save_ctx(DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_mov_tl(cpu_gpr_a[10], temp);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_fret(DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4);
+ tcg_gen_mov_tl(cpu_PC, temp);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+
+ tcg_temp_free(temp);
+}
+
+static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
+ int r2 , int32_t constant , int32_t offset)
+{
+ TCGv temp, temp2;
+ int n;
+
+ switch (opc) {
+/* SB-format jumps */
+ case OPC1_16_SB_J:
+ case OPC1_32_B_J:
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
+ break;
+ case OPC1_32_B_CALL:
+ case OPC1_16_SB_CALL:
+ gen_helper_1arg(call, ctx->pc_succ_insn);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
+ break;
+ case OPC1_16_SB_JZ:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset);
+ break;
+ case OPC1_16_SB_JNZ:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset);
+ break;
+/* SBC-format jumps */
+ case OPC1_16_SBC_JEQ:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset);
+ break;
+ case OPC1_16_SBC_JEQ2:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant,
+ offset + 16);
+ break;
+ case OPC1_16_SBC_JNE:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset);
+ break;
+ case OPC1_16_SBC_JNE2:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15],
+ constant, offset + 16);
+ break;
+/* SBRN-format jumps */
+ case OPC1_16_SBRN_JZ_T:
+ temp = tcg_temp_new();
+ tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_16_SBRN_JNZ_T:
+ temp = tcg_temp_new();
+ tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
+ tcg_temp_free(temp);
+ break;
+/* SBR-format jumps */
+ case OPC1_16_SBR_JEQ:
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15],
+ offset);
+ break;
+ case OPC1_16_SBR_JEQ2:
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15],
+ offset + 16);
+ break;
+ case OPC1_16_SBR_JNE:
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15],
+ offset);
+ break;
+ case OPC1_16_SBR_JNE2:
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15],
+ offset + 16);
+ break;
+ case OPC1_16_SBR_JNZ:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JNZ_A:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JGEZ:
+ gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JGTZ:
+ gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JLEZ:
+ gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JLTZ:
+ gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JZ:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JZ_A:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_LOOP:
+ gen_loop(ctx, r1, offset * 2 - 32);
+ break;
+/* SR-format jumps */
+ case OPC1_16_SR_JI:
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case OPC2_32_SYS_RET:
+ case OPC2_16_SR_RET:
+ gen_helper_ret(cpu_env);
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+/* B-format */
+ case OPC1_32_B_CALLA:
+ gen_helper_1arg(call, ctx->pc_succ_insn);
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
+ case OPC1_32_B_FCALL:
+ gen_fcall_save_ctx(ctx);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
+ break;
+ case OPC1_32_B_FCALLA:
+ gen_fcall_save_ctx(ctx);
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
+ case OPC1_32_B_JLA:
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ /* fall through */
+ case OPC1_32_B_JA:
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
+ case OPC1_32_B_JL:
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
+ break;
+/* BOL format */
+ case OPCM_32_BRC_EQ_NEQ:
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) {
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset);
+ } else {
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset);
+ }
+ break;
+ case OPCM_32_BRC_GE:
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) {
+ gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset);
+ } else {
+ constant = MASK_OP_BRC_CONST4(ctx->opcode);
+ gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant,
+ offset);
+ }
+ break;
+ case OPCM_32_BRC_JLT:
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) {
+ gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset);
+ } else {
+ constant = MASK_OP_BRC_CONST4(ctx->opcode);
+ gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant,
+ offset);
+ }
+ break;
+ case OPCM_32_BRC_JNE:
+ temp = tcg_temp_new();
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* subi is unconditional */
+ tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* addi is unconditional */
+ tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
+ }
+ tcg_temp_free(temp);
+ break;
+/* BRN format */
+ case OPCM_32_BRN_JTT:
+ n = MASK_OP_BRN_N(ctx->opcode);
+
+ temp = tcg_temp_new();
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
+
+ if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
+ gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
+ } else {
+ gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
+ }
+ tcg_temp_free(temp);
+ break;
+/* BRR Format */
+ case OPCM_32_BRR_EQ_NEQ:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) {
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_ADDR_EQ_NEQ:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) {
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_GE:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) {
+ gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_JLT:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) {
+ gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_LOOP:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) {
+ gen_loop(ctx, r2, offset * 2);
+ } else {
+ /* OPC2_32_BRR_LOOPU */
+ gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2);
+ }
+ break;
+ case OPCM_32_BRR_JNE:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* also save r2, in case of r1 == r2, so r2 is not decremented */
+ tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ /* subi is unconditional */
+ tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* also save r2, in case of r1 == r2, so r2 is not decremented */
+ tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ /* addi is unconditional */
+ tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPCM_32_BRR_JNZ:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) {
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
+ } else {
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+
+/*
+ * Functions for decoding instructions
+ */
+
+static void decode_src_opc(DisasContext *ctx, int op1)
+{
+ int r1;
+ int32_t const4;
+ TCGv temp, temp2;
+
+ r1 = MASK_OP_SRC_S1D(ctx->opcode);
+ const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SRC_ADD:
+ gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_ADD_A15:
+ gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4);
+ break;
+ case OPC1_16_SRC_ADD_15A:
+ gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_ADD_A:
+ tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
+ break;
+ case OPC1_16_SRC_CADD:
+ gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
+ cpu_gpr_d[15]);
+ break;
+ case OPC1_16_SRC_CADDN:
+ gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
+ cpu_gpr_d[15]);
+ break;
+ case OPC1_16_SRC_CMOV:
+ temp = tcg_const_tl(0);
+ temp2 = tcg_const_tl(const4);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp2, cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC1_16_SRC_CMOVN:
+ temp = tcg_const_tl(0);
+ temp2 = tcg_const_tl(const4);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp2, cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC1_16_SRC_EQ:
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ const4);
+ break;
+ case OPC1_16_SRC_LT:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ const4);
+ break;
+ case OPC1_16_SRC_MOV:
+ tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_MOV_A:
+ const4 = MASK_OP_SRC_CONST4(ctx->opcode);
+ tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
+ break;
+ case OPC1_16_SRC_MOV_E:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_16_SRC_SH:
+ gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_SHA:
+ gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_srr_opc(DisasContext *ctx, int op1)
+{
+ int r1, r2;
+ TCGv temp;
+
+ r1 = MASK_OP_SRR_S1D(ctx->opcode);
+ r2 = MASK_OP_SRR_S2(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SRR_ADD:
+ gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_ADD_A15:
+ gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_ADD_15A:
+ gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_ADD_A:
+ tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC1_16_SRR_ADDS:
+ gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_AND:
+ tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_CMOV:
+ temp = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ cpu_gpr_d[r2], cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_16_SRR_CMOVN:
+ temp = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ cpu_gpr_d[r2], cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_16_SRR_EQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_LT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_MOV:
+ tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_MOV_A:
+ tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_MOV_AA:
+ tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC1_16_SRR_MOV_D:
+ tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC1_16_SRR_MUL:
+ gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_OR:
+ tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUB:
+ gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUB_A15B:
+ gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUB_15AB:
+ gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUBS:
+ gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_XOR:
+ tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_ssr_opc(DisasContext *ctx, int op1)
+{
+ int r1, r2;
+
+ r1 = MASK_OP_SSR_S1(ctx->opcode);
+ r2 = MASK_OP_SSR_S2(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SSR_ST_A:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ break;
+ case OPC1_16_SSR_ST_A_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ case OPC1_16_SSR_ST_B:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ break;
+ case OPC1_16_SSR_ST_B_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ break;
+ case OPC1_16_SSR_ST_H:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ break;
+ case OPC1_16_SSR_ST_H_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ break;
+ case OPC1_16_SSR_ST_W:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ break;
+ case OPC1_16_SSR_ST_W_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sc_opc(DisasContext *ctx, int op1)
+{
+ int32_t const16;
+
+ const16 = MASK_OP_SC_CONST8(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SC_AND:
+ tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ break;
+ case OPC1_16_SC_BISR:
+ gen_helper_1arg(bisr, const16 & 0xff);
+ break;
+ case OPC1_16_SC_LD_A:
+ gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_LD_W:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_MOV:
+ tcg_gen_movi_tl(cpu_gpr_d[15], const16);
+ break;
+ case OPC1_16_SC_OR:
+ tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ break;
+ case OPC1_16_SC_ST_A:
+ gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_ST_W:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_SUB_A:
+ tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_slr_opc(DisasContext *ctx, int op1)
+{
+ int r1, r2;
+
+ r1 = MASK_OP_SLR_D(ctx->opcode);
+ r2 = MASK_OP_SLR_S2(ctx->opcode);
+
+ switch (op1) {
+/* SLR-format */
+ case OPC1_16_SLR_LD_A:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ break;
+ case OPC1_16_SLR_LD_A_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ case OPC1_16_SLR_LD_BU:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ break;
+ case OPC1_16_SLR_LD_BU_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ break;
+ case OPC1_16_SLR_LD_H:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ break;
+ case OPC1_16_SLR_LD_H_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ break;
+ case OPC1_16_SLR_LD_W:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ break;
+ case OPC1_16_SLR_LD_W_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sro_opc(DisasContext *ctx, int op1)
+{
+ int r2;
+ int32_t address;
+
+ r2 = MASK_OP_SRO_S2(ctx->opcode);
+ address = MASK_OP_SRO_OFF4(ctx->opcode);
+
+/* SRO-format */
+ switch (op1) {
+ case OPC1_16_SRO_LD_A:
+ gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ case OPC1_16_SRO_LD_BU:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
+ break;
+ case OPC1_16_SRO_LD_H:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW);
+ break;
+ case OPC1_16_SRO_LD_W:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ case OPC1_16_SRO_ST_A:
+ gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ case OPC1_16_SRO_ST_B:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
+ break;
+ case OPC1_16_SRO_ST_H:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW);
+ break;
+ case OPC1_16_SRO_ST_W:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sr_system(DisasContext *ctx)
+{
+ uint32_t op2;
+ op2 = MASK_OP_SR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_16_SR_NOP:
+ break;
+ case OPC2_16_SR_RET:
+ gen_compute_branch(ctx, op2, 0, 0, 0, 0);
+ break;
+ case OPC2_16_SR_RFE:
+ gen_helper_rfe(cpu_env);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+ case OPC2_16_SR_DEBUG:
+ /* raise EXCP_DEBUG */
+ break;
+ case OPC2_16_SR_FRET:
+ gen_fret(ctx);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sr_accu(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1;
+ TCGv temp;
+
+ r1 = MASK_OP_SR_S1D(ctx->opcode);
+ op2 = MASK_OP_SR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_16_SR_RSUB:
+ /* overflow only if r1 = -0x80000000 */
+ temp = tcg_const_i32(-0x80000000);
+ /* calc V bit */
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* sub */
+ tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ /* calc av */
+ tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
+ /* calc sav */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_16_SR_SAT_B:
+ gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
+ break;
+ case OPC2_16_SR_SAT_BU:
+ gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff);
+ break;
+ case OPC2_16_SR_SAT_H:
+ gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000);
+ break;
+ case OPC2_16_SR_SAT_HU:
+ gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_16Bit_opc(DisasContext *ctx)
+{
+ int op1;
+ int r1, r2;
+ int32_t const16;
+ int32_t address;
+ TCGv temp;
+
+ op1 = MASK_OP_MAJOR(ctx->opcode);
+
+ /* handle ADDSC.A opcode only being 6 bit long */
+ if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
+ op1 = OPC1_16_SRRS_ADDSC_A;
+ }
+
+ switch (op1) {
+ case OPC1_16_SRC_ADD:
+ case OPC1_16_SRC_ADD_A15:
+ case OPC1_16_SRC_ADD_15A:
+ case OPC1_16_SRC_ADD_A:
+ case OPC1_16_SRC_CADD:
+ case OPC1_16_SRC_CADDN:
+ case OPC1_16_SRC_CMOV:
+ case OPC1_16_SRC_CMOVN:
+ case OPC1_16_SRC_EQ:
+ case OPC1_16_SRC_LT:
+ case OPC1_16_SRC_MOV:
+ case OPC1_16_SRC_MOV_A:
+ case OPC1_16_SRC_MOV_E:
+ case OPC1_16_SRC_SH:
+ case OPC1_16_SRC_SHA:
+ decode_src_opc(ctx, op1);
+ break;
+/* SRR-format */
+ case OPC1_16_SRR_ADD:
+ case OPC1_16_SRR_ADD_A15:
+ case OPC1_16_SRR_ADD_15A:
+ case OPC1_16_SRR_ADD_A:
+ case OPC1_16_SRR_ADDS:
+ case OPC1_16_SRR_AND:
+ case OPC1_16_SRR_CMOV:
+ case OPC1_16_SRR_CMOVN:
+ case OPC1_16_SRR_EQ:
+ case OPC1_16_SRR_LT:
+ case OPC1_16_SRR_MOV:
+ case OPC1_16_SRR_MOV_A:
+ case OPC1_16_SRR_MOV_AA:
+ case OPC1_16_SRR_MOV_D:
+ case OPC1_16_SRR_MUL:
+ case OPC1_16_SRR_OR:
+ case OPC1_16_SRR_SUB:
+ case OPC1_16_SRR_SUB_A15B:
+ case OPC1_16_SRR_SUB_15AB:
+ case OPC1_16_SRR_SUBS:
+ case OPC1_16_SRR_XOR:
+ decode_srr_opc(ctx, op1);
+ break;
+/* SSR-format */
+ case OPC1_16_SSR_ST_A:
+ case OPC1_16_SSR_ST_A_POSTINC:
+ case OPC1_16_SSR_ST_B:
+ case OPC1_16_SSR_ST_B_POSTINC:
+ case OPC1_16_SSR_ST_H:
+ case OPC1_16_SSR_ST_H_POSTINC:
+ case OPC1_16_SSR_ST_W:
+ case OPC1_16_SSR_ST_W_POSTINC:
+ decode_ssr_opc(ctx, op1);
+ break;
+/* SRRS-format */
+ case OPC1_16_SRRS_ADDSC_A:
+ r2 = MASK_OP_SRRS_S2(ctx->opcode);
+ r1 = MASK_OP_SRRS_S1D(ctx->opcode);
+ const16 = MASK_OP_SRRS_N(ctx->opcode);
+ temp = tcg_temp_new();
+ tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
+ tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+/* SLRO-format */
+ case OPC1_16_SLRO_LD_A:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SLRO_LD_BU:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
+ break;
+ case OPC1_16_SLRO_LD_H:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
+ break;
+ case OPC1_16_SLRO_LD_W:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+/* SB-format */
+ case OPC1_16_SB_CALL:
+ case OPC1_16_SB_J:
+ case OPC1_16_SB_JNZ:
+ case OPC1_16_SB_JZ:
+ address = MASK_OP_SB_DISP8_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, 0, address);
+ break;
+/* SBC-format */
+ case OPC1_16_SBC_JEQ:
+ case OPC1_16_SBC_JNE:
+ address = MASK_OP_SBC_DISP4(ctx->opcode);
+ const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, const16, address);
+ break;
+ case OPC1_16_SBC_JEQ2:
+ case OPC1_16_SBC_JNE2:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ address = MASK_OP_SBC_DISP4(ctx->opcode);
+ const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, const16, address);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+/* SBRN-format */
+ case OPC1_16_SBRN_JNZ_T:
+ case OPC1_16_SBRN_JZ_T:
+ address = MASK_OP_SBRN_DISP4(ctx->opcode);
+ const16 = MASK_OP_SBRN_N(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, const16, address);
+ break;
+/* SBR-format */
+ case OPC1_16_SBR_JEQ2:
+ case OPC1_16_SBR_JNE2:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ r1 = MASK_OP_SBR_S2(ctx->opcode);
+ address = MASK_OP_SBR_DISP4(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, address);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_16_SBR_JEQ:
+ case OPC1_16_SBR_JGEZ:
+ case OPC1_16_SBR_JGTZ:
+ case OPC1_16_SBR_JLEZ:
+ case OPC1_16_SBR_JLTZ:
+ case OPC1_16_SBR_JNE:
+ case OPC1_16_SBR_JNZ:
+ case OPC1_16_SBR_JNZ_A:
+ case OPC1_16_SBR_JZ:
+ case OPC1_16_SBR_JZ_A:
+ case OPC1_16_SBR_LOOP:
+ r1 = MASK_OP_SBR_S2(ctx->opcode);
+ address = MASK_OP_SBR_DISP4(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, address);
+ break;
+/* SC-format */
+ case OPC1_16_SC_AND:
+ case OPC1_16_SC_BISR:
+ case OPC1_16_SC_LD_A:
+ case OPC1_16_SC_LD_W:
+ case OPC1_16_SC_MOV:
+ case OPC1_16_SC_OR:
+ case OPC1_16_SC_ST_A:
+ case OPC1_16_SC_ST_W:
+ case OPC1_16_SC_SUB_A:
+ decode_sc_opc(ctx, op1);
+ break;
+/* SLR-format */
+ case OPC1_16_SLR_LD_A:
+ case OPC1_16_SLR_LD_A_POSTINC:
+ case OPC1_16_SLR_LD_BU:
+ case OPC1_16_SLR_LD_BU_POSTINC:
+ case OPC1_16_SLR_LD_H:
+ case OPC1_16_SLR_LD_H_POSTINC:
+ case OPC1_16_SLR_LD_W:
+ case OPC1_16_SLR_LD_W_POSTINC:
+ decode_slr_opc(ctx, op1);
+ break;
+/* SRO-format */
+ case OPC1_16_SRO_LD_A:
+ case OPC1_16_SRO_LD_BU:
+ case OPC1_16_SRO_LD_H:
+ case OPC1_16_SRO_LD_W:
+ case OPC1_16_SRO_ST_A:
+ case OPC1_16_SRO_ST_B:
+ case OPC1_16_SRO_ST_H:
+ case OPC1_16_SRO_ST_W:
+ decode_sro_opc(ctx, op1);
+ break;
+/* SSRO-format */
+ case OPC1_16_SSRO_ST_A:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SSRO_ST_B:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
+ break;
+ case OPC1_16_SSRO_ST_H:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
+ break;
+ case OPC1_16_SSRO_ST_W:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+/* SR-format */
+ case OPCM_16_SR_SYSTEM:
+ decode_sr_system(ctx);
+ break;
+ case OPCM_16_SR_ACCU:
+ decode_sr_accu(ctx);
+ break;
+ case OPC1_16_SR_JI:
+ r1 = MASK_OP_SR_S1D(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, 0);
+ break;
+ case OPC1_16_SR_NOT:
+ r1 = MASK_OP_SR_S1D(ctx->opcode);
+ tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/*
+ * 32 bit instructions
+ */
+
+/* ABS-format */
+static void decode_abs_ldw(DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_LD_A:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ case OPC2_32_ABS_LD_D:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_LD_DA:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_LD_W:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_ldb(DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_LD_B:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
+ break;
+ case OPC2_32_ABS_LD_BU:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ break;
+ case OPC2_32_ABS_LD_H:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
+ break;
+ case OPC2_32_ABS_LD_HU:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_ldst_swap(DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_LDMST:
+ gen_ldmst(ctx, r1, temp);
+ break;
+ case OPC2_32_ABS_SWAP_W:
+ gen_swap(ctx, r1, temp);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_ldst_context(DisasContext *ctx)
+{
+ uint32_t op2;
+ int32_t off18;
+
+ off18 = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_ABS_LDLCX:
+ gen_helper_1arg(ldlcx, EA_ABS_FORMAT(off18));
+ break;
+ case OPC2_32_ABS_LDUCX:
+ gen_helper_1arg(lducx, EA_ABS_FORMAT(off18));
+ break;
+ case OPC2_32_ABS_STLCX:
+ gen_helper_1arg(stlcx, EA_ABS_FORMAT(off18));
+ break;
+ case OPC2_32_ABS_STUCX:
+ gen_helper_1arg(stucx, EA_ABS_FORMAT(off18));
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_abs_store(DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_ST_A:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ case OPC2_32_ABS_ST_D:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_ST_DA:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_ST_W:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_storeb_h(DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_ST_B:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ break;
+ case OPC2_32_ABS_ST_H:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+/* Bit-format */
+
+static void decode_bit_andacc(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+
+
+ switch (op2) {
+ case OPC2_32_BIT_AND_AND_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_AND_ANDN_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_AND_NOR_T:
+ if (TCG_TARGET_HAS_andc_i32) {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
+ } else {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl);
+ }
+ break;
+ case OPC2_32_BIT_AND_OR_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_logical_t(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BIT_AND_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_ANDN_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl);
+ break;
+ case OPC2_32_BIT_NOR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl);
+ break;
+ case OPC2_32_BIT_OR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_insert(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ TCGv temp;
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
+ if (op2 == OPC2_32_BIT_INSN_T) {
+ tcg_gen_not_tl(temp, temp);
+ }
+ tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
+ tcg_temp_free(temp);
+}
+
+static void decode_bit_logical_t2(DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3;
+ int pos1, pos2;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BIT_NAND_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nand_tl);
+ break;
+ case OPC2_32_BIT_ORN_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_orc_tl);
+ break;
+ case OPC2_32_BIT_XNOR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_eqv_tl);
+ break;
+ case OPC2_32_BIT_XOR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_orand(DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3;
+ int pos1, pos2;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BIT_OR_AND_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_BIT_OR_ANDN_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_BIT_OR_NOR_T:
+ if (TCG_TARGET_HAS_orc_i32) {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
+ } else {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl);
+ }
+ break;
+ case OPC2_32_BIT_OR_OR_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_sh_logic1(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ TCGv temp;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_BIT_SH_AND_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_SH_ANDN_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl);
+ break;
+ case OPC2_32_BIT_SH_NOR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl);
+ break;
+ case OPC2_32_BIT_SH_OR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_temp_free(temp);
+}
+
+static void decode_bit_sh_logic2(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ TCGv temp;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_BIT_SH_NAND_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1] , cpu_gpr_d[r2] ,
+ pos1, pos2, &tcg_gen_nand_tl);
+ break;
+ case OPC2_32_BIT_SH_ORN_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_orc_tl);
+ break;
+ case OPC2_32_BIT_SH_XNOR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_eqv_tl);
+ break;
+ case OPC2_32_BIT_SH_XOR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_temp_free(temp);
+}
+
+/* BO-format */
+
+
+static void decode_bo_addrmode_post_pre_base(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int32_t r1, r2;
+ TCGv temp;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BO_CACHEA_WI_SHORTOFF:
+ case OPC2_32_BO_CACHEA_W_SHORTOFF:
+ case OPC2_32_BO_CACHEA_I_SHORTOFF:
+ /* instruction to access the cache */
+ break;
+ case OPC2_32_BO_CACHEA_WI_POSTINC:
+ case OPC2_32_BO_CACHEA_W_POSTINC:
+ case OPC2_32_BO_CACHEA_I_POSTINC:
+ /* instruction to access the cache, but we still need to handle
+ the addressing mode */
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CACHEA_WI_PREINC:
+ case OPC2_32_BO_CACHEA_W_PREINC:
+ case OPC2_32_BO_CACHEA_I_PREINC:
+ /* instruction to access the cache, but we still need to handle
+ the addressing mode */
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CACHEI_WI_SHORTOFF:
+ case OPC2_32_BO_CACHEI_W_SHORTOFF:
+ if (!has_feature(ctx, TRICORE_FEATURE_131)) {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_BO_CACHEI_W_POSTINC:
+ case OPC2_32_BO_CACHEI_WI_POSTINC:
+ if (has_feature(ctx, TRICORE_FEATURE_131)) {
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_BO_CACHEI_W_PREINC:
+ case OPC2_32_BO_CACHEI_WI_PREINC:
+ if (has_feature(ctx, TRICORE_FEATURE_131)) {
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_BO_ST_A_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
+ break;
+ case OPC2_32_BO_ST_A_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_A_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
+ break;
+ case OPC2_32_BO_ST_B_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+ break;
+ case OPC2_32_BO_ST_B_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_B_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+ break;
+ case OPC2_32_BO_ST_D_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_ST_D_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_D_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_DA_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_ST_DA_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_DA_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_H_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_ST_H_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_H_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_ST_Q_SHORTOFF:
+ temp = tcg_temp_new();
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_Q_POSTINC:
+ temp = tcg_temp_new();
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_Q_PREINC:
+ temp = tcg_temp_new();
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_W_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_ST_W_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_W_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int32_t r1, r2;
+ TCGv temp, temp2, temp3;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_const_i32(off10);
+ CHECK_REG_PAIR(r2);
+ tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+ switch (op2) {
+ case OPC2_32_BO_CACHEA_WI_BR:
+ case OPC2_32_BO_CACHEA_W_BR:
+ case OPC2_32_BO_CACHEA_I_BR:
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_CACHEA_WI_CIRC:
+ case OPC2_32_BO_CACHEA_W_CIRC:
+ case OPC2_32_BO_CACHEA_I_CIRC:
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_A_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_A_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_B_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_B_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_D_BR:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_D_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_DA_BR:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_DA_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_H_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_H_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_Q_BR:
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_Q_CIRC:
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_W_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_W_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int32_t r1, r2;
+ TCGv temp;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BO_LD_A_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_LD_A_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_A_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_LD_B_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+ break;
+ case OPC2_32_BO_LD_B_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_SB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_B_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+ break;
+ case OPC2_32_BO_LD_BU_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+ break;
+ case OPC2_32_BO_LD_BU_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_BU_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+ break;
+ case OPC2_32_BO_LD_D_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_LD_D_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_D_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_LD_DA_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_LD_DA_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_DA_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_LD_H_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
+ break;
+ case OPC2_32_BO_LD_H_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LESW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_H_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
+ break;
+ case OPC2_32_BO_LD_HU_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_LD_HU_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_HU_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_LD_Q_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ break;
+ case OPC2_32_BO_LD_Q_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_Q_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ break;
+ case OPC2_32_BO_LD_W_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_LD_W_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_W_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int r1, r2;
+
+ TCGv temp, temp2, temp3;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_const_i32(off10);
+ CHECK_REG_PAIR(r2);
+ tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+
+ switch (op2) {
+ case OPC2_32_BO_LD_A_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_A_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_B_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_B_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_BU_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_BU_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_D_BR:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_D_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_DA_BR:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_DA_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_H_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_H_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_HU_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_HU_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_Q_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_Q_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_W_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_W_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int r1, r2;
+
+ TCGv temp, temp2;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_BO_LDLCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_ldlcx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_LDMST_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_ldmst(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_LDMST_POSTINC:
+ gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LDMST_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_LDUCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_lducx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_LEA_SHORTOFF:
+ tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_STLCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_stlcx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_STUCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_stucx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_SWAP_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_swap(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_SWAP_W_POSTINC:
+ gen_swap(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_SWAP_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_swap(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_cmpswap(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_POSTINC:
+ gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_swapmsk(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_POSTINC:
+ gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int r1, r2;
+
+ TCGv temp, temp2, temp3;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_const_i32(off10);
+ CHECK_REG_PAIR(r2);
+ tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+ switch (op2) {
+ case OPC2_32_BO_LDMST_BR:
+ gen_ldmst(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LDMST_CIRC:
+ gen_ldmst(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_SWAP_W_BR:
+ gen_swap(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_SWAP_W_CIRC:
+ gen_swap(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_BR:
+ gen_cmpswap(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_CIRC:
+ gen_cmpswap(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_BR:
+ gen_swapmsk(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_CIRC:
+ gen_swapmsk(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+static void decode_bol_opc(DisasContext *ctx, int32_t op1)
+{
+ int r1, r2;
+ int32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_BOL_S1D(ctx->opcode);
+ r2 = MASK_OP_BOL_S2(ctx->opcode);
+ address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_32_BOL_LD_A_LONGOFF:
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_BOL_LD_W_LONGOFF:
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_BOL_LEA_LONGOFF:
+ tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
+ break;
+ case OPC1_32_BOL_ST_A_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_ST_W_LONGOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL);
+ break;
+ case OPC1_32_BOL_LD_B_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_LD_BU_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_UB);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_LD_H_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_LD_HU_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUW);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_ST_B_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_ST_H_LONGOFF:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RC format */
+static void decode_rc_logical_shift(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int32_t const9;
+ TCGv temp;
+
+ r2 = MASK_OP_RC_D(ctx->opcode);
+ r1 = MASK_OP_RC_S1(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RC_AND:
+ tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ANDN:
+ tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ break;
+ case OPC2_32_RC_NAND:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_NOR:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_OR:
+ tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ORN:
+ tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ break;
+ case OPC2_32_RC_SH:
+ const9 = sextract32(const9, 0, 6);
+ gen_shi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_H:
+ const9 = sextract32(const9, 0, 5);
+ gen_sh_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SHA:
+ const9 = sextract32(const9, 0, 6);
+ gen_shaci(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SHA_H:
+ const9 = sextract32(const9, 0, 5);
+ gen_sha_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SHAS:
+ gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_XNOR:
+ tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RC_XOR:
+ tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_rc_accumulator(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int16_t const9;
+
+ TCGv temp;
+
+ r2 = MASK_OP_RC_D(ctx->opcode);
+ r1 = MASK_OP_RC_S1(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
+
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RC_ABSDIF:
+ gen_absdifi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ABSDIFS:
+ gen_absdifsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADD:
+ gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDC:
+ gen_addci_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDS:
+ gen_addsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDS_U:
+ gen_addsui(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDX:
+ gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_AND_EQ:
+ gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_GE:
+ gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_LT:
+ gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_NE:
+ gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_EQ:
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_EQANY_B:
+ gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_EQANY_H:
+ gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_GE:
+ tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_LT:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MAX:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_MAX_U:
+ tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_MIN:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_MIN_U:
+ tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_NE:
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_OR_EQ:
+ gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_GE:
+ gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_LT:
+ gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_NE:
+ gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_RSUB:
+ tcg_gen_movi_tl(temp, const9);
+ gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RC_RSUBS:
+ tcg_gen_movi_tl(temp, const9);
+ gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RC_RSUBS_U:
+ tcg_gen_movi_tl(temp, const9);
+ gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RC_SH_EQ:
+ gen_sh_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_GE:
+ gen_sh_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_sh_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_LT:
+ gen_sh_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_sh_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_NE:
+ gen_sh_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_XOR_EQ:
+ gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_GE:
+ gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_LT:
+ gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_NE:
+ gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_rc_serviceroutine(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t const9;
+
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RC_BISR:
+ gen_helper_1arg(bisr, const9);
+ break;
+ case OPC2_32_RC_SYSCALL:
+ /* TODO: Add exception generation */
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rc_mul(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int16_t const9;
+
+ r2 = MASK_OP_RC_D(ctx->opcode);
+ r1 = MASK_OP_RC_S1(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
+
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RC_MUL_32:
+ gen_muli_i32s(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MUL_64:
+ CHECK_REG_PAIR(r2);
+ gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MULS_32:
+ gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MUL_U_64:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ CHECK_REG_PAIR(r2);
+ gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MULS_U_32:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_mulsui_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RCPW format */
+static void decode_rcpw_insert(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int32_t pos, width, const4;
+
+ TCGv temp;
+
+ op2 = MASK_OP_RCPW_OP2(ctx->opcode);
+ r1 = MASK_OP_RCPW_S1(ctx->opcode);
+ r2 = MASK_OP_RCPW_D(ctx->opcode);
+ const4 = MASK_OP_RCPW_CONST4(ctx->opcode);
+ width = MASK_OP_RCPW_WIDTH(ctx->opcode);
+ pos = MASK_OP_RCPW_POS(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCPW_IMASK:
+ CHECK_REG_PAIR(r2);
+ /* if pos + width > 32 undefined result */
+ if (pos + width <= 32) {
+ tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
+ tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
+ }
+ break;
+ case OPC2_32_RCPW_INSERT:
+ /* if pos + width > 32 undefined result */
+ if (pos + width <= 32) {
+ temp = tcg_const_i32(const4);
+ tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
+ tcg_temp_free(temp);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RCRW format */
+
+static void decode_rcrw_insert(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t width, const4;
+
+ TCGv temp, temp2, temp3;
+
+ op2 = MASK_OP_RCRW_OP2(ctx->opcode);
+ r1 = MASK_OP_RCRW_S1(ctx->opcode);
+ r3 = MASK_OP_RCRW_S3(ctx->opcode);
+ r4 = MASK_OP_RCRW_D(ctx->opcode);
+ width = MASK_OP_RCRW_WIDTH(ctx->opcode);
+ const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RCRW_IMASK:
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r4], 0x1f);
+ tcg_gen_movi_tl(temp2, (1 << width) - 1);
+ tcg_gen_shl_tl(cpu_gpr_d[r3 + 1], temp2, temp);
+ tcg_gen_movi_tl(temp2, const4);
+ tcg_gen_shl_tl(cpu_gpr_d[r3], temp2, temp);
+ break;
+ case OPC2_32_RCRW_INSERT:
+ temp3 = tcg_temp_new();
+
+ tcg_gen_movi_tl(temp, width);
+ tcg_gen_movi_tl(temp2, const4);
+ tcg_gen_andi_tl(temp3, cpu_gpr_d[r4], 0x1f);
+ gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp2, temp, temp3);
+
+ tcg_temp_free(temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+/* RCR format */
+
+static void decode_rcr_cond_select(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t const9;
+
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RCR_OP2(ctx->opcode);
+ r1 = MASK_OP_RCR_S1(ctx->opcode);
+ const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+ r3 = MASK_OP_RCR_S3(ctx->opcode);
+ r4 = MASK_OP_RCR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCR_CADD:
+ gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const9, cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RCR_CADDN:
+ gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const9, cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RCR_SEL:
+ temp = tcg_const_i32(0);
+ temp2 = tcg_const_i32(const9);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], temp2);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC2_32_RCR_SELN:
+ temp = tcg_const_i32(0);
+ temp2 = tcg_const_i32(const9);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], temp2);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rcr_madd(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t const9;
+
+
+ op2 = MASK_OP_RCR_OP2(ctx->opcode);
+ r1 = MASK_OP_RCR_S1(ctx->opcode);
+ const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+ r3 = MASK_OP_RCR_S3(ctx->opcode);
+ r4 = MASK_OP_RCR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCR_MADD_32:
+ gen_maddi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MADD_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MADDS_32:
+ gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MADDS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MADD_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MADDS_U_32:
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_maddsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MADDS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rcr_msub(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t const9;
+
+
+ op2 = MASK_OP_RCR_OP2(ctx->opcode);
+ r1 = MASK_OP_RCR_S1(ctx->opcode);
+ const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+ r3 = MASK_OP_RCR_S3(ctx->opcode);
+ r4 = MASK_OP_RCR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCR_MSUB_32:
+ gen_msubi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MSUB_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_32:
+ gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MSUB_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_U_32:
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_msubsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RLC format */
+
+static void decode_rlc_opc(DisasContext *ctx,
+ uint32_t op1)
+{
+ int32_t const16;
+ int r1, r2;
+
+ const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode);
+ r1 = MASK_OP_RLC_S1(ctx->opcode);
+ r2 = MASK_OP_RLC_D(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_32_RLC_ADDI:
+ gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16);
+ break;
+ case OPC1_32_RLC_ADDIH:
+ gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
+ break;
+ case OPC1_32_RLC_ADDIH_A:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
+ break;
+ case OPC1_32_RLC_MFCR:
+ const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+ gen_mfcr(ctx, cpu_gpr_d[r2], const16);
+ break;
+ case OPC1_32_RLC_MOV:
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ break;
+ case OPC1_32_RLC_MOV_64:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ CHECK_REG_PAIR(r2);
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_RLC_MOV_U:
+ const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ break;
+ case OPC1_32_RLC_MOV_H:
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
+ break;
+ case OPC1_32_RLC_MOVH_A:
+ tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
+ break;
+ case OPC1_32_RLC_MTCR:
+ const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+ gen_mtcr(ctx, cpu_gpr_d[r1], const16);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RR format */
+static void decode_rr_accumulator(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r3, r2, r1;
+
+ TCGv temp;
+
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_ABS:
+ gen_abs(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABS_B:
+ gen_helper_abs_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABS_H:
+ gen_helper_abs_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIF:
+ gen_absdif(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIF_B:
+ gen_helper_absdif_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIF_H:
+ gen_helper_absdif_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIFS:
+ gen_helper_absdif_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIFS_H:
+ gen_helper_absdif_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSS:
+ gen_helper_abs_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSS_H:
+ gen_helper_abs_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADD:
+ gen_add_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADD_B:
+ gen_helper_add_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADD_H:
+ gen_helper_add_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDC:
+ gen_addc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS:
+ gen_adds(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS_H:
+ gen_helper_add_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS_HU:
+ gen_helper_add_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS_U:
+ gen_helper_add_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDX:
+ gen_add_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_AND_EQ:
+ gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_GE:
+ gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_GE_U:
+ gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_LT:
+ gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_LT_U:
+ gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_NE:
+ gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_EQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQ_B:
+ gen_helper_eq_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQ_H:
+ gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQ_W:
+ gen_cond_w(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQANY_B:
+ gen_helper_eqany_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQANY_H:
+ gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_GE:
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_GE_U:
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_U:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_B:
+ gen_helper_lt_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_BU:
+ gen_helper_lt_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_H:
+ gen_helper_lt_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_HU:
+ gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_W:
+ gen_cond_w(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_WU:
+ gen_cond_w(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX:
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_U:
+ tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_B:
+ gen_helper_max_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_BU:
+ gen_helper_max_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_H:
+ gen_helper_max_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_HU:
+ gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN:
+ tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_U:
+ tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_B:
+ gen_helper_min_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_BU:
+ gen_helper_min_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_H:
+ gen_helper_min_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_HU:
+ gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MOV:
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MOV_64:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ temp = tcg_temp_new();
+
+ CHECK_REG_PAIR(r3);
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+
+ tcg_temp_free(temp);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_MOVS_64:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ CHECK_REG_PAIR(r3);
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_sari_tl(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_NE:
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_OR_EQ:
+ gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_GE:
+ gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_GE_U:
+ gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_LT:
+ gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_LT_U:
+ gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_NE:
+ gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_SAT_B:
+ gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7f, -0x80);
+ break;
+ case OPC2_32_RR_SAT_BU:
+ gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xff);
+ break;
+ case OPC2_32_RR_SAT_H:
+ gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7fff, -0x8000);
+ break;
+ case OPC2_32_RR_SAT_HU:
+ gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xffff);
+ break;
+ case OPC2_32_RR_SH_EQ:
+ gen_sh_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_GE:
+ gen_sh_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_GE_U:
+ gen_sh_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_LT:
+ gen_sh_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_LT_U:
+ gen_sh_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_NE:
+ gen_sh_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUB:
+ gen_sub_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUB_B:
+ gen_helper_sub_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUB_H:
+ gen_helper_sub_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBC:
+ gen_subc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS:
+ gen_subs(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS_U:
+ gen_subsu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS_H:
+ gen_helper_sub_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS_HU:
+ gen_helper_sub_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBX:
+ gen_sub_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_XOR_EQ:
+ gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_GE:
+ gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_GE_U:
+ gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_LT:
+ gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_LT_U:
+ gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_NE:
+ gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rr_logical_shift(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r3, r2, r1;
+ TCGv temp;
+
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+
+ temp = tcg_temp_new();
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_AND:
+ tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ANDN:
+ tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_CLO:
+ tcg_gen_not_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS);
+ break;
+ case OPC2_32_RR_CLO_H:
+ gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLS:
+ tcg_gen_clrsb_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLS_H:
+ gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLZ:
+ tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS);
+ break;
+ case OPC2_32_RR_CLZ_H:
+ gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_NAND:
+ tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_NOR:
+ tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_OR:
+ tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ORN:
+ tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH:
+ gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_H:
+ gen_helper_sh_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SHA:
+ gen_helper_sha(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SHA_H:
+ gen_helper_sha_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SHAS:
+ gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_XNOR:
+ tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_XOR:
+ tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_rr_address(DisasContext *ctx)
+{
+ uint32_t op2, n;
+ int r1, r2, r3;
+ TCGv temp;
+
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+ n = MASK_OP_RR_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_ADD_A:
+ tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_ADDSC_A:
+ temp = tcg_temp_new();
+ tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n);
+ tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_RR_ADDSC_AT:
+ temp = tcg_temp_new();
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3);
+ tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp);
+ tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_RR_EQ_A:
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_EQZ:
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ break;
+ case OPC2_32_RR_GE_A:
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_LT_A:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_MOV_A:
+ tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MOV_AA:
+ tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_MOV_D:
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_NE_A:
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_NEZ_A:
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ break;
+ case OPC2_32_RR_SUB_A:
+ tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rr_idirect(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1;
+
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_JI:
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ case OPC2_32_RR_JLI:
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ case OPC2_32_RR_CALLI:
+ gen_helper_1arg(call, ctx->pc_succ_insn);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ case OPC2_32_RR_FCALLI:
+ gen_fcall_save_ctx(ctx);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static void decode_rr_divide(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+
+ TCGv temp, temp2, temp3;
+
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_BMERGE:
+ gen_helper_bmerge(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_BSPLIT:
+ CHECK_REG_PAIR(r3);
+ gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_DVINIT_B:
+ CHECK_REG_PAIR(r3);
+ gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_DVINIT_BU:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_temp_new();
+ CHECK_REG_PAIR(r3);
+ tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 8);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ if (!has_feature(ctx, TRICORE_FEATURE_131)) {
+ /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
+ tcg_gen_abs_tl(temp, temp3);
+ tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ } else {
+ /* overflow = (D[b] == 0) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ }
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* write result */
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
+ tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ break;
+ case OPC2_32_RR_DVINIT_H:
+ CHECK_REG_PAIR(r3);
+ gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_DVINIT_HU:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_temp_new();
+ CHECK_REG_PAIR(r3);
+ tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 16);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ if (!has_feature(ctx, TRICORE_FEATURE_131)) {
+ /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
+ tcg_gen_abs_tl(temp, temp3);
+ tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ } else {
+ /* overflow = (D[b] == 0) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ }
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* write result */
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
+ tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ break;
+ case OPC2_32_RR_DVINIT:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ CHECK_REG_PAIR(r3);
+ /* overflow = ((D[b] == 0) ||
+ ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
+ tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ /* write result */
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ /* sign extend to high reg */
+ tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC2_32_RR_DVINIT_U:
+ /* overflow = (D[b] == 0) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ /* write result */
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ /* zero extend to high reg*/
+ tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0);
+ break;
+ case OPC2_32_RR_PARITY:
+ gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_UNPACK:
+ CHECK_REG_PAIR(r3);
+ gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CRC32:
+ if (has_feature(ctx, TRICORE_FEATURE_161)) {
+ gen_helper_crc32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_DIV:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_DIV_U:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_MUL_F:
+ gen_helper_fmul(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_DIV_F:
+ gen_helper_fdiv(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_CMP_F:
+ gen_helper_fcmp(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_FTOI:
+ gen_helper_ftoi(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_ITOF:
+ gen_helper_itof(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_FTOUZ:
+ gen_helper_ftouz(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_UPDFL:
+ gen_helper_updfl(cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_UTOF:
+ gen_helper_utof(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_FTOIZ:
+ gen_helper_ftoiz(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_QSEED_F:
+ gen_helper_qseed(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RR1 Format */
+static void decode_rr1_mul(DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3;
+ TCGv n;
+ TCGv_i64 temp64;
+
+ r1 = MASK_OP_RR1_S1(ctx->opcode);
+ r2 = MASK_OP_RR1_S2(ctx->opcode);
+ r3 = MASK_OP_RR1_D(ctx->opcode);
+ n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode));
+ op2 = MASK_OP_RR1_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR1_MUL_H_32_LL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MUL_H_32_LU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MUL_H_32_UL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MUL_H_32_UU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_LL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_LU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_UL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_UU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+
+ break;
+ case OPC2_32_RR1_MULR_H_16_LL:
+ GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RR1_MULR_H_16_LU:
+ GEN_HELPER_LU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RR1_MULR_H_16_UL:
+ GEN_HELPER_UL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RR1_MULR_H_16_UU:
+ GEN_HELPER_UU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(n);
+}
+
+static void decode_rr1_mulq(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ uint32_t n;
+
+ TCGv temp, temp2;
+
+ r1 = MASK_OP_RR1_S1(ctx->opcode);
+ r2 = MASK_OP_RR1_S2(ctx->opcode);
+ r3 = MASK_OP_RR1_D(ctx->opcode);
+ n = MASK_OP_RR1_N(ctx->opcode);
+ op2 = MASK_OP_RR1_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RR1_MUL_Q_32:
+ gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RR1_MUL_Q_64:
+ CHECK_REG_PAIR(r3);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, 0);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
+ break;
+ case OPC2_32_RR1_MUL_Q_64_L:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
+ break;
+ case OPC2_32_RR1_MUL_Q_64_U:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RR1_MULR_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RR1_MULR_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+/* RR2 format */
+static void decode_rr2_mul(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+
+ op2 = MASK_OP_RR2_OP2(ctx->opcode);
+ r1 = MASK_OP_RR2_S1(ctx->opcode);
+ r2 = MASK_OP_RR2_S2(ctx->opcode);
+ r3 = MASK_OP_RR2_D(ctx->opcode);
+ switch (op2) {
+ case OPC2_32_RR2_MUL_32:
+ gen_mul_i32s(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MUL_64:
+ CHECK_REG_PAIR(r3);
+ gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MULS_32:
+ gen_helper_mul_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MUL_U_64:
+ CHECK_REG_PAIR(r3);
+ gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MULS_U_32:
+ gen_helper_mul_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRPW format */
+static void decode_rrpw_extract_insert(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int32_t pos, width;
+ TCGv temp;
+
+ op2 = MASK_OP_RRPW_OP2(ctx->opcode);
+ r1 = MASK_OP_RRPW_S1(ctx->opcode);
+ r2 = MASK_OP_RRPW_S2(ctx->opcode);
+ r3 = MASK_OP_RRPW_D(ctx->opcode);
+ pos = MASK_OP_RRPW_POS(ctx->opcode);
+ width = MASK_OP_RRPW_WIDTH(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRPW_EXTR:
+ if (width == 0) {
+ tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ break;
+ }
+
+ if (pos + width <= 32) {
+ /* optimize special cases */
+ if ((pos == 0) && (width == 8)) {
+ tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ } else if ((pos == 0) && (width == 16)) {
+ tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ } else {
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width);
+ tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width);
+ }
+ }
+ break;
+ case OPC2_32_RRPW_EXTR_U:
+ if (width == 0) {
+ tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ } else {
+ tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos);
+ tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width));
+ }
+ break;
+ case OPC2_32_RRPW_IMASK:
+ CHECK_REG_PAIR(r3);
+
+ if (pos + width <= 32) {
+ temp = tcg_temp_new();
+ tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos);
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
+ tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+ tcg_temp_free(temp);
+ }
+
+ break;
+ case OPC2_32_RRPW_INSERT:
+ if (pos + width <= 32) {
+ tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos, width);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRR format */
+static void decode_rrr_cond_select(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3, r4;
+ TCGv temp;
+
+ op2 = MASK_OP_RRR_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR_S1(ctx->opcode);
+ r2 = MASK_OP_RRR_S2(ctx->opcode);
+ r3 = MASK_OP_RRR_S3(ctx->opcode);
+ r4 = MASK_OP_RRR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR_CADD:
+ gen_cond_add(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ cpu_gpr_d[r4], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_CADDN:
+ gen_cond_add(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_CSUB:
+ gen_cond_sub(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_CSUBN:
+ gen_cond_sub(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_SEL:
+ temp = tcg_const_i32(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_RRR_SELN:
+ temp = tcg_const_i32(0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_temp_free(temp);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr_divide(DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3, r4;
+
+ op2 = MASK_OP_RRR_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR_S1(ctx->opcode);
+ r2 = MASK_OP_RRR_S2(ctx->opcode);
+ r3 = MASK_OP_RRR_S3(ctx->opcode);
+ r4 = MASK_OP_RRR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR_DVADJ:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_DVSTEP:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_DVSTEP_U:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMAX:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMAX_U:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMIN:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMIN_U:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_PACK:
+ CHECK_REG_PAIR(r3);
+ gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RRR_ADD_F:
+ gen_helper_fadd(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_SUB_F:
+ gen_helper_fsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_MADD_F:
+ gen_helper_fmadd(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_MSUB_F:
+ gen_helper_fmsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r3]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRR2 format */
+static void decode_rrr2_madd(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4;
+
+ op2 = MASK_OP_RRR2_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR2_S1(ctx->opcode);
+ r2 = MASK_OP_RRR2_S2(ctx->opcode);
+ r3 = MASK_OP_RRR2_S3(ctx->opcode);
+ r4 = MASK_OP_RRR2_D(ctx->opcode);
+ switch (op2) {
+ case OPC2_32_RRR2_MADD_32:
+ gen_madd32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADD_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_32:
+ gen_helper_madd32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADD_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_U_32:
+ gen_helper_madd32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr2_msub(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4;
+
+ op2 = MASK_OP_RRR2_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR2_S1(ctx->opcode);
+ r2 = MASK_OP_RRR2_S2(ctx->opcode);
+ r3 = MASK_OP_RRR2_S3(ctx->opcode);
+ r4 = MASK_OP_RRR2_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR2_MSUB_32:
+ gen_msub32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUB_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_32:
+ gen_helper_msub32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUB_U_64:
+ gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_U_32:
+ gen_helper_msub32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRR1 format */
+static void decode_rrr1_madd(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MADD_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADD_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADD_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADD_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDM_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDM_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDM_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDM_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDR_H_LL:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDR_H_LU:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDR_H_UL:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDR_H_UU:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_LL:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_LU:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_UL:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_UU:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr1_maddq_h(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ temp = tcg_const_i32(n);
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRR1_MADD_Q_32:
+ gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32:
+ gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDR_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MADDR_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDR_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDRS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDRS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void decode_rrr1_maddsu_h(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MADDSU_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSU_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSU_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSU_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_LL:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_LU:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_UL:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_UU:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_LL:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_LU:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_UL:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_UU:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr1_msub(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MSUB_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUB_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUB_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUB_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_LL:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_LU:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_UL:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_UU:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_LL:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_LU:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_UL:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_UU:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr1_msubq_h(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ temp = tcg_const_i32(n);
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRR1_MSUB_Q_32:
+ gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32:
+ gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MSUBR_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBR_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBRS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBRS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void decode_rrr1_msubad_h(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MSUBAD_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBAD_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBAD_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBAD_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_LL:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_LU:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_UL:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_UU:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_LL:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_LU:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_UL:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_UU:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRRR format */
+static void decode_rrrr_extract_insert(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3, r4;
+ TCGv tmp_width, tmp_pos;
+
+ r1 = MASK_OP_RRRR_S1(ctx->opcode);
+ r2 = MASK_OP_RRRR_S2(ctx->opcode);
+ r3 = MASK_OP_RRRR_S3(ctx->opcode);
+ r4 = MASK_OP_RRRR_D(ctx->opcode);
+ op2 = MASK_OP_RRRR_OP2(ctx->opcode);
+
+ tmp_pos = tcg_temp_new();
+ tmp_width = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRRR_DEXTR:
+ tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ if (r1 == r2) {
+ tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ } else {
+ tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
+ tcg_gen_shr_tl(tmp_pos, cpu_gpr_d[r2], tmp_pos);
+ tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, tmp_pos);
+ }
+ break;
+ case OPC2_32_RRRR_EXTR:
+ case OPC2_32_RRRR_EXTR_U:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
+ tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width);
+ tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
+ tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_tl(tmp_width, 32, tmp_width);
+ if (op2 == OPC2_32_RRRR_EXTR) {
+ tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ } else {
+ tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ }
+ break;
+ case OPC2_32_RRRR_INSERT:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
+ tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width,
+ tmp_pos);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(tmp_pos);
+ tcg_temp_free(tmp_width);
+}
+
+/* RRRW format */
+static void decode_rrrw_extract_insert(DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3, r4;
+ int32_t width;
+
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RRRW_OP2(ctx->opcode);
+ r1 = MASK_OP_RRRW_S1(ctx->opcode);
+ r2 = MASK_OP_RRRW_S2(ctx->opcode);
+ r3 = MASK_OP_RRRW_S3(ctx->opcode);
+ r4 = MASK_OP_RRRW_D(ctx->opcode);
+ width = MASK_OP_RRRW_WIDTH(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRRW_EXTR:
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_addi_tl(temp, temp, width);
+ tcg_gen_subfi_tl(temp, 32, temp);
+ tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
+ break;
+ case OPC2_32_RRRW_EXTR_U:
+ if (width == 0) {
+ tcg_gen_movi_tl(cpu_gpr_d[r4], 0);
+ } else {
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width));
+ }
+ break;
+ case OPC2_32_RRRW_IMASK:
+ temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_tl(temp2, (1 << width) - 1);
+ tcg_gen_shl_tl(temp2, temp2, temp);
+ tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
+ tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2);
+
+ tcg_temp_free(temp2);
+ break;
+ case OPC2_32_RRRW_INSERT:
+ temp2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(temp, width);
+ tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f);
+ gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2);
+
+ tcg_temp_free(temp2);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+/* SYS Format*/
+static void decode_sys_interrupts(DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1;
+ TCGLabel *l1;
+ TCGv tmp;
+
+ op2 = MASK_OP_SYS_OP2(ctx->opcode);
+ r1 = MASK_OP_SYS_S1D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_SYS_DEBUG:
+ /* raise EXCP_DEBUG */
+ break;
+ case OPC2_32_SYS_DISABLE:
+ tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~MASK_ICR_IE_1_3);
+ break;
+ case OPC2_32_SYS_DSYNC:
+ break;
+ case OPC2_32_SYS_ENABLE:
+ tcg_gen_ori_tl(cpu_ICR, cpu_ICR, MASK_ICR_IE_1_3);
+ break;
+ case OPC2_32_SYS_ISYNC:
+ break;
+ case OPC2_32_SYS_NOP:
+ break;
+ case OPC2_32_SYS_RET:
+ gen_compute_branch(ctx, op2, 0, 0, 0, 0);
+ break;
+ case OPC2_32_SYS_FRET:
+ gen_fret(ctx);
+ break;
+ case OPC2_32_SYS_RFE:
+ gen_helper_rfe(cpu_env);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ break;
+ case OPC2_32_SYS_RFM:
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
+ tmp = tcg_temp_new();
+ l1 = gen_new_label();
+
+ tcg_gen_ld32u_tl(tmp, cpu_env, offsetof(CPUTriCoreState, DBGSR));
+ tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
+ gen_helper_rfm(cpu_env);
+ gen_set_label(l1);
+ tcg_gen_exit_tb(NULL, 0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ tcg_temp_free(tmp);
+ } else {
+ /* generate privilege trap */
+ }
+ break;
+ case OPC2_32_SYS_RSLCX:
+ gen_helper_rslcx(cpu_env);
+ break;
+ case OPC2_32_SYS_SVLCX:
+ gen_helper_svlcx(cpu_env);
+ break;
+ case OPC2_32_SYS_RESTORE:
+ if (has_feature(ctx, TRICORE_FEATURE_16)) {
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM ||
+ (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) {
+ tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], 8, 1);
+ } /* else raise privilege trap */
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_SYS_TRAPSV:
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_SV, 0, l1);
+ generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF);
+ gen_set_label(l1);
+ break;
+ case OPC2_32_SYS_TRAPV:
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_V, 0, l1);
+ generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF);
+ gen_set_label(l1);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_32Bit_opc(DisasContext *ctx)
+{
+ int op1;
+ int32_t r1, r2, r3;
+ int32_t address, const16;
+ int8_t b, const4;
+ int32_t bpos;
+ TCGv temp, temp2, temp3;
+
+ op1 = MASK_OP_MAJOR(ctx->opcode);
+
+ /* handle JNZ.T opcode only being 7 bit long */
+ if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) {
+ op1 = OPCM_32_BRN_JTT;
+ }
+
+ switch (op1) {
+/* ABS-format */
+ case OPCM_32_ABS_LDW:
+ decode_abs_ldw(ctx);
+ break;
+ case OPCM_32_ABS_LDB:
+ decode_abs_ldb(ctx);
+ break;
+ case OPCM_32_ABS_LDMST_SWAP:
+ decode_abs_ldst_swap(ctx);
+ break;
+ case OPCM_32_ABS_LDST_CONTEXT:
+ decode_abs_ldst_context(ctx);
+ break;
+ case OPCM_32_ABS_STORE:
+ decode_abs_store(ctx);
+ break;
+ case OPCM_32_ABS_STOREB_H:
+ decode_abs_storeb_h(ctx);
+ break;
+ case OPC1_32_ABS_STOREQ:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp2 = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
+
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_ABS_LD_Q:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_ABS_LEA:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
+ break;
+/* ABSB-format */
+ case OPC1_32_ABSB_ST_T:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ b = MASK_OP_ABSB_B(ctx->opcode);
+ bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp2 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
+ tcg_gen_ori_tl(temp2, temp2, (b << bpos));
+ tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+/* B-format */
+ case OPC1_32_B_CALL:
+ case OPC1_32_B_CALLA:
+ case OPC1_32_B_FCALL:
+ case OPC1_32_B_FCALLA:
+ case OPC1_32_B_J:
+ case OPC1_32_B_JA:
+ case OPC1_32_B_JL:
+ case OPC1_32_B_JLA:
+ address = MASK_OP_B_DISP24_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, 0, address);
+ break;
+/* Bit-format */
+ case OPCM_32_BIT_ANDACC:
+ decode_bit_andacc(ctx);
+ break;
+ case OPCM_32_BIT_LOGICAL_T1:
+ decode_bit_logical_t(ctx);
+ break;
+ case OPCM_32_BIT_INSERT:
+ decode_bit_insert(ctx);
+ break;
+ case OPCM_32_BIT_LOGICAL_T2:
+ decode_bit_logical_t2(ctx);
+ break;
+ case OPCM_32_BIT_ORAND:
+ decode_bit_orand(ctx);
+ break;
+ case OPCM_32_BIT_SH_LOGIC1:
+ decode_bit_sh_logic1(ctx);
+ break;
+ case OPCM_32_BIT_SH_LOGIC2:
+ decode_bit_sh_logic2(ctx);
+ break;
+ /* BO Format */
+ case OPCM_32_BO_ADDRMODE_POST_PRE_BASE:
+ decode_bo_addrmode_post_pre_base(ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR:
+ decode_bo_addrmode_bitreverse_circular(ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE:
+ decode_bo_addrmode_ld_post_pre_base(ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR:
+ decode_bo_addrmode_ld_bitreverse_circular(ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE:
+ decode_bo_addrmode_stctx_post_pre_base(ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR:
+ decode_bo_addrmode_ldmst_bitreverse_circular(ctx);
+ break;
+/* BOL-format */
+ case OPC1_32_BOL_LD_A_LONGOFF:
+ case OPC1_32_BOL_LD_W_LONGOFF:
+ case OPC1_32_BOL_LEA_LONGOFF:
+ case OPC1_32_BOL_ST_W_LONGOFF:
+ case OPC1_32_BOL_ST_A_LONGOFF:
+ case OPC1_32_BOL_LD_B_LONGOFF:
+ case OPC1_32_BOL_LD_BU_LONGOFF:
+ case OPC1_32_BOL_LD_H_LONGOFF:
+ case OPC1_32_BOL_LD_HU_LONGOFF:
+ case OPC1_32_BOL_ST_B_LONGOFF:
+ case OPC1_32_BOL_ST_H_LONGOFF:
+ decode_bol_opc(ctx, op1);
+ break;
+/* BRC Format */
+ case OPCM_32_BRC_EQ_NEQ:
+ case OPCM_32_BRC_GE:
+ case OPCM_32_BRC_JLT:
+ case OPCM_32_BRC_JNE:
+ const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode);
+ address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode);
+ r1 = MASK_OP_BRC_S1(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, const4, address);
+ break;
+/* BRN Format */
+ case OPCM_32_BRN_JTT:
+ address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode);
+ r1 = MASK_OP_BRN_S1(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, address);
+ break;
+/* BRR Format */
+ case OPCM_32_BRR_EQ_NEQ:
+ case OPCM_32_BRR_ADDR_EQ_NEQ:
+ case OPCM_32_BRR_GE:
+ case OPCM_32_BRR_JLT:
+ case OPCM_32_BRR_JNE:
+ case OPCM_32_BRR_JNZ:
+ case OPCM_32_BRR_LOOP:
+ address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode);
+ r2 = MASK_OP_BRR_S2(ctx->opcode);
+ r1 = MASK_OP_BRR_S1(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, r2, 0, address);
+ break;
+/* RC Format */
+ case OPCM_32_RC_LOGICAL_SHIFT:
+ decode_rc_logical_shift(ctx);
+ break;
+ case OPCM_32_RC_ACCUMULATOR:
+ decode_rc_accumulator(ctx);
+ break;
+ case OPCM_32_RC_SERVICEROUTINE:
+ decode_rc_serviceroutine(ctx);
+ break;
+ case OPCM_32_RC_MUL:
+ decode_rc_mul(ctx);
+ break;
+/* RCPW Format */
+ case OPCM_32_RCPW_MASK_INSERT:
+ decode_rcpw_insert(ctx);
+ break;
+/* RCRR Format */
+ case OPC1_32_RCRR_INSERT:
+ r1 = MASK_OP_RCRR_S1(ctx->opcode);
+ r2 = MASK_OP_RCRR_S3(ctx->opcode);
+ r3 = MASK_OP_RCRR_D(ctx->opcode);
+ const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
+ temp = tcg_const_i32(const16);
+ temp2 = tcg_temp_new(); /* width*/
+ temp3 = tcg_temp_new(); /* pos */
+
+ CHECK_REG_PAIR(r3);
+
+ tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f);
+ tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
+
+ gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ break;
+/* RCRW Format */
+ case OPCM_32_RCRW_MASK_INSERT:
+ decode_rcrw_insert(ctx);
+ break;
+/* RCR Format */
+ case OPCM_32_RCR_COND_SELECT:
+ decode_rcr_cond_select(ctx);
+ break;
+ case OPCM_32_RCR_MADD:
+ decode_rcr_madd(ctx);
+ break;
+ case OPCM_32_RCR_MSUB:
+ decode_rcr_msub(ctx);
+ break;
+/* RLC Format */
+ case OPC1_32_RLC_ADDI:
+ case OPC1_32_RLC_ADDIH:
+ case OPC1_32_RLC_ADDIH_A:
+ case OPC1_32_RLC_MFCR:
+ case OPC1_32_RLC_MOV:
+ case OPC1_32_RLC_MOV_64:
+ case OPC1_32_RLC_MOV_U:
+ case OPC1_32_RLC_MOV_H:
+ case OPC1_32_RLC_MOVH_A:
+ case OPC1_32_RLC_MTCR:
+ decode_rlc_opc(ctx, op1);
+ break;
+/* RR Format */
+ case OPCM_32_RR_ACCUMULATOR:
+ decode_rr_accumulator(ctx);
+ break;
+ case OPCM_32_RR_LOGICAL_SHIFT:
+ decode_rr_logical_shift(ctx);
+ break;
+ case OPCM_32_RR_ADDRESS:
+ decode_rr_address(ctx);
+ break;
+ case OPCM_32_RR_IDIRECT:
+ decode_rr_idirect(ctx);
+ break;
+ case OPCM_32_RR_DIVIDE:
+ decode_rr_divide(ctx);
+ break;
+/* RR1 Format */
+ case OPCM_32_RR1_MUL:
+ decode_rr1_mul(ctx);
+ break;
+ case OPCM_32_RR1_MULQ:
+ decode_rr1_mulq(ctx);
+ break;
+/* RR2 format */
+ case OPCM_32_RR2_MUL:
+ decode_rr2_mul(ctx);
+ break;
+/* RRPW format */
+ case OPCM_32_RRPW_EXTRACT_INSERT:
+ decode_rrpw_extract_insert(ctx);
+ break;
+ case OPC1_32_RRPW_DEXTR:
+ r1 = MASK_OP_RRPW_S1(ctx->opcode);
+ r2 = MASK_OP_RRPW_S2(ctx->opcode);
+ r3 = MASK_OP_RRPW_D(ctx->opcode);
+ const16 = MASK_OP_RRPW_POS(ctx->opcode);
+ if (r1 == r2) {
+ tcg_gen_rotli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], const16);
+ } else {
+ temp = tcg_temp_new();
+ tcg_gen_shli_tl(temp, cpu_gpr_d[r1], const16);
+ tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], 32 - const16);
+ tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_temp_free(temp);
+ }
+ break;
+/* RRR Format */
+ case OPCM_32_RRR_COND_SELECT:
+ decode_rrr_cond_select(ctx);
+ break;
+ case OPCM_32_RRR_DIVIDE:
+ decode_rrr_divide(ctx);
+ break;
+/* RRR2 Format */
+ case OPCM_32_RRR2_MADD:
+ decode_rrr2_madd(ctx);
+ break;
+ case OPCM_32_RRR2_MSUB:
+ decode_rrr2_msub(ctx);
+ break;
+/* RRR1 format */
+ case OPCM_32_RRR1_MADD:
+ decode_rrr1_madd(ctx);
+ break;
+ case OPCM_32_RRR1_MADDQ_H:
+ decode_rrr1_maddq_h(ctx);
+ break;
+ case OPCM_32_RRR1_MADDSU_H:
+ decode_rrr1_maddsu_h(ctx);
+ break;
+ case OPCM_32_RRR1_MSUB_H:
+ decode_rrr1_msub(ctx);
+ break;
+ case OPCM_32_RRR1_MSUB_Q:
+ decode_rrr1_msubq_h(ctx);
+ break;
+ case OPCM_32_RRR1_MSUBAD_H:
+ decode_rrr1_msubad_h(ctx);
+ break;
+/* RRRR format */
+ case OPCM_32_RRRR_EXTRACT_INSERT:
+ decode_rrrr_extract_insert(ctx);
+ break;
+/* RRRW format */
+ case OPCM_32_RRRW_EXTRACT_INSERT:
+ decode_rrrw_extract_insert(ctx);
+ break;
+/* SYS format */
+ case OPCM_32_SYS_INTERRUPTS:
+ decode_sys_interrupts(ctx);
+ break;
+ case OPC1_32_SYS_RSTV:
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static bool tricore_insn_is_16bit(uint32_t insn)
+{
+ return (insn & 0x1) == 0;
+}
+
+static void tricore_tr_init_disas_context(DisasContextBase *dcbase,
+ CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUTriCoreState *env = cs->env_ptr;
+ ctx->mem_idx = cpu_mmu_index(env, false);
+ ctx->hflags = (uint32_t)ctx->base.tb->flags;
+ ctx->features = env->features;
+}
+
+static void tricore_tr_tb_start(DisasContextBase *db, CPUState *cpu)
+{
+}
+
+static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next);
+}
+
+static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx)
+{
+ /*
+ * Return true if the insn at ctx->base.pc_next might cross a page boundary.
+ * (False positives are OK, false negatives are not.)
+ * Our caller ensures we are only called if dc->base.pc_next is less than
+ * 4 bytes from the page boundary, so we cross the page if the first
+ * 16 bits indicate that this is a 32 bit insn.
+ */
+ uint16_t insn = cpu_lduw_code(env, ctx->base.pc_next);
+
+ return !tricore_insn_is_16bit(insn);
+}
+
+
+static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPUTriCoreState *env = cpu->env_ptr;
+ uint16_t insn_lo;
+ bool is_16bit;
+
+ insn_lo = cpu_lduw_code(env, ctx->base.pc_next);
+ is_16bit = tricore_insn_is_16bit(insn_lo);
+ if (is_16bit) {
+ ctx->opcode = insn_lo;
+ ctx->pc_succ_insn = ctx->base.pc_next + 2;
+ decode_16Bit_opc(ctx);
+ } else {
+ uint32_t insn_hi = cpu_lduw_code(env, ctx->base.pc_next + 2);
+ ctx->opcode = insn_hi << 16 | insn_lo;
+ ctx->pc_succ_insn = ctx->base.pc_next + 4;
+ decode_32Bit_opc(ctx);
+ }
+ ctx->base.pc_next = ctx->pc_succ_insn;
+
+ if (ctx->base.is_jmp == DISAS_NEXT) {
+ target_ulong page_start;
+
+ page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
+ if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE
+ || (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - 3
+ && insn_crosses_page(env, ctx))) {
+ ctx->base.is_jmp = DISAS_TOO_MANY;
+ }
+ }
+}
+
+static void tricore_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ switch (ctx->base.is_jmp) {
+ case DISAS_TOO_MANY:
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tricore_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps tricore_tr_ops = {
+ .init_disas_context = tricore_tr_init_disas_context,
+ .tb_start = tricore_tr_tb_start,
+ .insn_start = tricore_tr_insn_start,
+ .translate_insn = tricore_tr_translate_insn,
+ .tb_stop = tricore_tr_tb_stop,
+ .disas_log = tricore_tr_disas_log,
+};
+
+
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+ translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void
+restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->PC = data[0];
+}
+/*
+ *
+ * Initialization
+ *
+ */
+
+void cpu_state_reset(CPUTriCoreState *env)
+{
+ /* Reset Regs to Default Value */
+ env->PSW = 0xb80;
+ fpu_set_state(env);
+}
+
+static void tricore_tcg_init_csfr(void)
+{
+ cpu_PCXI = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PCXI), "PCXI");
+ cpu_PSW = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW), "PSW");
+ cpu_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PC), "PC");
+ cpu_ICR = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, ICR), "ICR");
+}
+
+void tricore_tcg_init(void)
+{
+ int i;
+
+ /* reg init */
+ for (i = 0 ; i < 16 ; i++) {
+ cpu_gpr_a[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, gpr_a[i]),
+ regnames_a[i]);
+ }
+ for (i = 0 ; i < 16 ; i++) {
+ cpu_gpr_d[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, gpr_d[i]),
+ regnames_d[i]);
+ }
+ tricore_tcg_init_csfr();
+ /* init PSW flag cache */
+ cpu_PSW_C = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_C),
+ "PSW_C");
+ cpu_PSW_V = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_V),
+ "PSW_V");
+ cpu_PSW_SV = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_SV),
+ "PSW_SV");
+ cpu_PSW_AV = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_AV),
+ "PSW_AV");
+ cpu_PSW_SAV = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_SAV),
+ "PSW_SAV");
+}
diff --git a/target/tricore/tricore-defs.h b/target/tricore/tricore-defs.h
new file mode 100644
index 000000000..f5e0a0bed
--- /dev/null
+++ b/target/tricore/tricore-defs.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_TRICORE_DEFS_H
+#define QEMU_TRICORE_DEFS_H
+
+#define TRICORE_TLB_MAX 128
+
+#endif /* QEMU_TRICORE_DEFS_H */
diff --git a/target/tricore/tricore-opcodes.h b/target/tricore/tricore-opcodes.h
new file mode 100644
index 000000000..f7135f183
--- /dev/null
+++ b/target/tricore/tricore-opcodes.h
@@ -0,0 +1,1474 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_TRICORE_TRICORE_OPCODES_H
+#define TARGET_TRICORE_TRICORE_OPCODES_H
+
+/*
+ * Opcode Masks for Tricore
+ * Format MASK_OP_InstrFormatName_Field
+ */
+
+/* This creates a mask with bits start .. end set to 1 and applies it to op */
+#define MASK_BITS_SHIFT(op, start, end) (extract32(op, (start), \
+ (end) - (start) + 1))
+#define MASK_BITS_SHIFT_SEXT(op, start, end) (sextract32(op, (start),\
+ (end) - (start) + 1))
+
+/* new opcode masks */
+
+#define MASK_OP_MAJOR(op) MASK_BITS_SHIFT(op, 0, 7)
+
+/* 16-Bit Formats */
+#define MASK_OP_SB_DISP8(op) MASK_BITS_SHIFT(op, 8, 15)
+#define MASK_OP_SB_DISP8_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 8, 15)
+
+#define MASK_OP_SBC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SBC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15)
+#define MASK_OP_SBC_DISP4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SBR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SBR_DISP4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SBRN_N(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SBRN_DISP4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SC_CONST8(op) MASK_BITS_SHIFT(op, 8, 15)
+
+#define MASK_OP_SLR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SLR_D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SLRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SLRO_D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SR_OP2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SR_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15)
+#define MASK_OP_SRC_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRO_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRO_OFF4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRR_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRRS_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRRS_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+#define MASK_OP_SRRS_N(op) MASK_BITS_SHIFT(op, 6, 7)
+
+#define MASK_OP_SSR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SSR_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SSRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SSRO_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* 32-Bit Formats */
+
+/* ABS Format */
+#define MASK_OP_ABS_OFF18(op) (MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6) + \
+ (MASK_BITS_SHIFT(op, 22, 25) << 10) +\
+ (MASK_BITS_SHIFT(op, 12, 15) << 14))
+#define MASK_OP_ABS_OP2(op) MASK_BITS_SHIFT(op, 26, 27)
+#define MASK_OP_ABS_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* ABSB Format */
+#define MASK_OP_ABSB_OFF18(op) MASK_OP_ABS_OFF18(op)
+#define MASK_OP_ABSB_OP2(op) MASK_BITS_SHIFT(op, 26, 27)
+#define MASK_OP_ABSB_B(op) MASK_BITS_SHIFT(op, 11, 11)
+#define MASK_OP_ABSB_BPOS(op) MASK_BITS_SHIFT(op, 8, 10)
+
+/* B Format */
+#define MASK_OP_B_DISP24(op) (MASK_BITS_SHIFT(op, 16, 31) + \
+ (MASK_BITS_SHIFT(op, 8, 15) << 16))
+#define MASK_OP_B_DISP24_SEXT(op) (MASK_BITS_SHIFT(op, 16, 31) + \
+ (MASK_BITS_SHIFT_SEXT(op, 8, 15) << 16))
+/* BIT Format */
+#define MASK_OP_BIT_D(op) MASK_BITS_SHIFT(op, 28, 31)
+#define MASK_OP_BIT_POS2(op) MASK_BITS_SHIFT(op, 23, 27)
+#define MASK_OP_BIT_OP2(op) MASK_BITS_SHIFT(op, 21, 22)
+#define MASK_OP_BIT_POS1(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_BIT_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BIT_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BO Format */
+#define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6))
+#define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT_SEXT(op, 28, 31) << 6))
+#define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27)
+#define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BO_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BOL Format */
+#define MASK_OP_BOL_OFF16(op) ((MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \
+ (MASK_BITS_SHIFT(op, 22, 27) << 10))
+#define MASK_OP_BOL_OFF16_SEXT(op) ((MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \
+ (MASK_BITS_SHIFT_SEXT(op, 22, 27) << 10))
+#define MASK_OP_BOL_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BOL_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BRC Format */
+#define MASK_OP_BRC_OP2(op) MASK_BITS_SHIFT(op, 31, 31)
+#define MASK_OP_BRC_DISP15(op) MASK_BITS_SHIFT(op, 16, 30)
+#define MASK_OP_BRC_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30)
+#define MASK_OP_BRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15)
+#define MASK_OP_BRC_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BRN Format */
+#define MASK_OP_BRN_OP2(op) MASK_BITS_SHIFT(op, 31, 31)
+#define MASK_OP_BRN_DISP15(op) MASK_BITS_SHIFT(op, 16, 30)
+#define MASK_OP_BRN_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30)
+#define MASK_OP_BRN_N(op) (MASK_BITS_SHIFT(op, 12, 15) + \
+ (MASK_BITS_SHIFT(op, 7, 7) << 4))
+#define MASK_OP_BRN_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+/* BRR Format */
+#define MASK_OP_BRR_OP2(op) MASK_BITS_SHIFT(op, 31, 31)
+#define MASK_OP_BRR_DISP15(op) MASK_BITS_SHIFT(op, 16, 30)
+#define MASK_OP_BRR_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30)
+#define MASK_OP_BRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BRR_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* META MASK for similar instr Formats */
+#define MASK_OP_META_D(op) MASK_BITS_SHIFT(op, 28, 31)
+#define MASK_OP_META_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* RC Format */
+#define MASK_OP_RC_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RC_OP2(op) MASK_BITS_SHIFT(op, 21, 27)
+#define MASK_OP_RC_CONST9(op) MASK_BITS_SHIFT(op, 12, 20)
+#define MASK_OP_RC_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20)
+#define MASK_OP_RC_S1(op) MASK_OP_META_S1(op)
+
+/* RCPW Format */
+
+#define MASK_OP_RCPW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCPW_POS(op) MASK_BITS_SHIFT(op, 23, 27)
+#define MASK_OP_RCPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22)
+#define MASK_OP_RCPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RCPW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RCPW_S1(op) MASK_OP_META_S1(op)
+
+/* RCR Format */
+
+#define MASK_OP_RCR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RCR_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RCR_CONST9(op) MASK_BITS_SHIFT(op, 12, 20)
+#define MASK_OP_RCR_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20)
+#define MASK_OP_RCR_S1(op) MASK_OP_META_S1(op)
+
+/* RCRR Format */
+
+#define MASK_OP_RCRR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCRR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RCRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RCRR_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RCRR_S1(op) MASK_OP_META_S1(op)
+
+/* RCRW Format */
+
+#define MASK_OP_RCRW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCRW_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RCRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RCRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RCRW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RCRW_S1(op) MASK_OP_META_S1(op)
+
+/* RLC Format */
+
+#define MASK_OP_RLC_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RLC_CONST16(op) MASK_BITS_SHIFT(op, 12, 27)
+#define MASK_OP_RLC_CONST16_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 27)
+#define MASK_OP_RLC_S1(op) MASK_OP_META_S1(op)
+
+/* RR Format */
+#define MASK_OP_RR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RR_OP2(op) MASK_BITS_SHIFT(op, 20, 27)
+#define MASK_OP_RR_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RR_S1(op) MASK_OP_META_S1(op)
+
+/* RR1 Format */
+#define MASK_OP_RR1_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RR1_OP2(op) MASK_BITS_SHIFT(op, 18, 27)
+#define MASK_OP_RR1_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RR1_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RR1_S1(op) MASK_OP_META_S1(op)
+
+/* RR2 Format */
+#define MASK_OP_RR2_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RR2_OP2(op) MASK_BITS_SHIFT(op, 16, 27)
+#define MASK_OP_RR2_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RR2_S1(op) MASK_OP_META_S1(op)
+
+/* RRPW Format */
+#define MASK_OP_RRPW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRPW_POS(op) MASK_BITS_SHIFT(op, 23, 27)
+#define MASK_OP_RRPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22)
+#define MASK_OP_RRPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RRPW_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRPW_S1(op) MASK_OP_META_S1(op)
+
+/* RRR Format */
+#define MASK_OP_RRR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRR_OP2(op) MASK_BITS_SHIFT(op, 20, 23)
+#define MASK_OP_RRR_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRR_S1(op) MASK_OP_META_S1(op)
+
+/* RRR1 Format */
+#define MASK_OP_RRR1_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRR1_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRR1_OP2(op) MASK_BITS_SHIFT(op, 18, 23)
+#define MASK_OP_RRR1_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RRR1_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRR1_S1(op) MASK_OP_META_S1(op)
+
+/* RRR2 Format */
+#define MASK_OP_RRR2_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRR2_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRR2_OP2(op) MASK_BITS_SHIFT(op, 16, 23)
+#define MASK_OP_RRR2_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRR2_S1(op) MASK_OP_META_S1(op)
+
+/* RRRR Format */
+#define MASK_OP_RRRR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRRR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RRRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRRR_S1(op) MASK_OP_META_S1(op)
+
+/* RRRW Format */
+#define MASK_OP_RRRW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRRW_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RRRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RRRW_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRRW_S1(op) MASK_OP_META_S1(op)
+
+/* SYS Format */
+#define MASK_OP_SYS_OP2(op) MASK_BITS_SHIFT(op, 22, 27)
+#define MASK_OP_SYS_S1D(op) MASK_OP_META_S1(op)
+
+
+
+/*
+ * Tricore Opcodes Enums
+ *
+ * Format: OPC(1|2|M)_InstrLen_Name
+ * OPC1 = only op1 field is used
+ * OPC2 = op1 and op2 field used part of OPCM
+ * OPCM = op1 field used to group Instr
+ * InstrLen = 16|32
+ * Name = Name of Instr
+ */
+
+/* 16-Bit */
+enum {
+
+ OPCM_16_SR_SYSTEM = 0x00,
+ OPCM_16_SR_ACCU = 0x32,
+
+ OPC1_16_SRC_ADD = 0xc2,
+ OPC1_16_SRC_ADD_A15 = 0x92,
+ OPC1_16_SRC_ADD_15A = 0x9a,
+ OPC1_16_SRR_ADD = 0x42,
+ OPC1_16_SRR_ADD_A15 = 0x12,
+ OPC1_16_SRR_ADD_15A = 0x1a,
+ OPC1_16_SRC_ADD_A = 0xb0,
+ OPC1_16_SRR_ADD_A = 0x30,
+ OPC1_16_SRR_ADDS = 0x22,
+ OPC1_16_SRRS_ADDSC_A = 0x10,
+ OPC1_16_SC_AND = 0x16,
+ OPC1_16_SRR_AND = 0x26,
+ OPC1_16_SC_BISR = 0xe0,
+ OPC1_16_SRC_CADD = 0x8a,
+ OPC1_16_SRC_CADDN = 0xca,
+ OPC1_16_SB_CALL = 0x5c,
+ OPC1_16_SRC_CMOV = 0xaa,
+ OPC1_16_SRR_CMOV = 0x2a,
+ OPC1_16_SRC_CMOVN = 0xea,
+ OPC1_16_SRR_CMOVN = 0x6a,
+ OPC1_16_SRC_EQ = 0xba,
+ OPC1_16_SRR_EQ = 0x3a,
+ OPC1_16_SB_J = 0x3c,
+ OPC1_16_SBC_JEQ = 0x1e,
+ OPC1_16_SBC_JEQ2 = 0x9e,
+ OPC1_16_SBR_JEQ = 0x3e,
+ OPC1_16_SBR_JEQ2 = 0xbe,
+ OPC1_16_SBR_JGEZ = 0xce,
+ OPC1_16_SBR_JGTZ = 0x4e,
+ OPC1_16_SR_JI = 0xdc,
+ OPC1_16_SBR_JLEZ = 0x8e,
+ OPC1_16_SBR_JLTZ = 0x0e,
+ OPC1_16_SBC_JNE = 0x5e,
+ OPC1_16_SBC_JNE2 = 0xde,
+ OPC1_16_SBR_JNE = 0x7e,
+ OPC1_16_SBR_JNE2 = 0xfe,
+ OPC1_16_SB_JNZ = 0xee,
+ OPC1_16_SBR_JNZ = 0xf6,
+ OPC1_16_SBR_JNZ_A = 0x7c,
+ OPC1_16_SBRN_JNZ_T = 0xae,
+ OPC1_16_SB_JZ = 0x6e,
+ OPC1_16_SBR_JZ = 0x76,
+ OPC1_16_SBR_JZ_A = 0xbc,
+ OPC1_16_SBRN_JZ_T = 0x2e,
+ OPC1_16_SC_LD_A = 0xd8,
+ OPC1_16_SLR_LD_A = 0xd4,
+ OPC1_16_SLR_LD_A_POSTINC = 0xc4,
+ OPC1_16_SLRO_LD_A = 0xc8,
+ OPC1_16_SRO_LD_A = 0xcc,
+ OPC1_16_SLR_LD_BU = 0x14,
+ OPC1_16_SLR_LD_BU_POSTINC = 0x04,
+ OPC1_16_SLRO_LD_BU = 0x08,
+ OPC1_16_SRO_LD_BU = 0x0c,
+ OPC1_16_SLR_LD_H = 0x94,
+ OPC1_16_SLR_LD_H_POSTINC = 0x84,
+ OPC1_16_SLRO_LD_H = 0x88,
+ OPC1_16_SRO_LD_H = 0x8c,
+ OPC1_16_SC_LD_W = 0x58,
+ OPC1_16_SLR_LD_W = 0x54,
+ OPC1_16_SLR_LD_W_POSTINC = 0x44,
+ OPC1_16_SLRO_LD_W = 0x48,
+ OPC1_16_SRO_LD_W = 0x4c,
+ OPC1_16_SBR_LOOP = 0xfc,
+ OPC1_16_SRC_LT = 0xfa,
+ OPC1_16_SRR_LT = 0x7a,
+ OPC1_16_SC_MOV = 0xda,
+ OPC1_16_SRC_MOV = 0x82,
+ OPC1_16_SRR_MOV = 0x02,
+ OPC1_16_SRC_MOV_E = 0xd2,/* 1.6 only */
+ OPC1_16_SRC_MOV_A = 0xa0,
+ OPC1_16_SRR_MOV_A = 0x60,
+ OPC1_16_SRR_MOV_AA = 0x40,
+ OPC1_16_SRR_MOV_D = 0x80,
+ OPC1_16_SRR_MUL = 0xe2,
+ OPC1_16_SR_NOT = 0x46,
+ OPC1_16_SC_OR = 0x96,
+ OPC1_16_SRR_OR = 0xa6,
+ OPC1_16_SRC_SH = 0x06,
+ OPC1_16_SRC_SHA = 0x86,
+ OPC1_16_SC_ST_A = 0xf8,
+ OPC1_16_SRO_ST_A = 0xec,
+ OPC1_16_SSR_ST_A = 0xf4,
+ OPC1_16_SSR_ST_A_POSTINC = 0xe4,
+ OPC1_16_SSRO_ST_A = 0xe8,
+ OPC1_16_SRO_ST_B = 0x2c,
+ OPC1_16_SSR_ST_B = 0x34,
+ OPC1_16_SSR_ST_B_POSTINC = 0x24,
+ OPC1_16_SSRO_ST_B = 0x28,
+ OPC1_16_SRO_ST_H = 0xac,
+ OPC1_16_SSR_ST_H = 0xb4,
+ OPC1_16_SSR_ST_H_POSTINC = 0xa4,
+ OPC1_16_SSRO_ST_H = 0xa8,
+ OPC1_16_SC_ST_W = 0x78,
+ OPC1_16_SRO_ST_W = 0x6c,
+ OPC1_16_SSR_ST_W = 0x74,
+ OPC1_16_SSR_ST_W_POSTINC = 0x64,
+ OPC1_16_SSRO_ST_W = 0x68,
+ OPC1_16_SRR_SUB = 0xa2,
+ OPC1_16_SRR_SUB_A15B = 0x52,
+ OPC1_16_SRR_SUB_15AB = 0x5a,
+ OPC1_16_SC_SUB_A = 0x20,
+ OPC1_16_SRR_SUBS = 0x62,
+ OPC1_16_SRR_XOR = 0xc6,
+
+};
+
+/*
+ * SR Format
+ */
+/* OPCM_16_SR_SYSTEM */
+enum {
+
+ OPC2_16_SR_NOP = 0x00,
+ OPC2_16_SR_RET = 0x09,
+ OPC2_16_SR_RFE = 0x08,
+ OPC2_16_SR_DEBUG = 0x0a,
+ OPC2_16_SR_FRET = 0x07,
+};
+/* OPCM_16_SR_ACCU */
+enum {
+ OPC2_16_SR_RSUB = 0x05,
+ OPC2_16_SR_SAT_B = 0x00,
+ OPC2_16_SR_SAT_BU = 0x01,
+ OPC2_16_SR_SAT_H = 0x02,
+ OPC2_16_SR_SAT_HU = 0x03,
+
+};
+
+/* 32-Bit */
+
+enum {
+/* ABS Format 1, M */
+ OPCM_32_ABS_LDW = 0x85,
+ OPCM_32_ABS_LDB = 0x05,
+ OPCM_32_ABS_LDMST_SWAP = 0xe5,
+ OPCM_32_ABS_LDST_CONTEXT = 0x15,
+ OPCM_32_ABS_STORE = 0xa5,
+ OPCM_32_ABS_STOREB_H = 0x25,
+ OPC1_32_ABS_STOREQ = 0x65,
+ OPC1_32_ABS_LD_Q = 0x45,
+ OPC1_32_ABS_LEA = 0xc5,
+/* ABSB Format */
+ OPC1_32_ABSB_ST_T = 0xd5,
+/* B Format */
+ OPC1_32_B_CALL = 0x6d,
+ OPC1_32_B_CALLA = 0xed,
+ OPC1_32_B_FCALL = 0x61,
+ OPC1_32_B_FCALLA = 0xe1,
+ OPC1_32_B_J = 0x1d,
+ OPC1_32_B_JA = 0x9d,
+ OPC1_32_B_JL = 0x5d,
+ OPC1_32_B_JLA = 0xdd,
+/* Bit Format */
+ OPCM_32_BIT_ANDACC = 0x47,
+ OPCM_32_BIT_LOGICAL_T1 = 0x87,
+ OPCM_32_BIT_INSERT = 0x67,
+ OPCM_32_BIT_LOGICAL_T2 = 0x07,
+ OPCM_32_BIT_ORAND = 0xc7,
+ OPCM_32_BIT_SH_LOGIC1 = 0x27,
+ OPCM_32_BIT_SH_LOGIC2 = 0xa7,
+/* BO Format */
+ OPCM_32_BO_ADDRMODE_POST_PRE_BASE = 0x89,
+ OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR = 0xa9,
+ OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE = 0x09,
+ OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR = 0x29,
+ OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE = 0x49,
+ OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR = 0x69,
+/* BOL Format */
+ OPC1_32_BOL_LD_A_LONGOFF = 0x99,
+ OPC1_32_BOL_LD_W_LONGOFF = 0x19,
+ OPC1_32_BOL_LEA_LONGOFF = 0xd9,
+ OPC1_32_BOL_ST_W_LONGOFF = 0x59,
+ OPC1_32_BOL_ST_A_LONGOFF = 0xb5, /* 1.6 only */
+ OPC1_32_BOL_LD_B_LONGOFF = 0x79, /* 1.6 only */
+ OPC1_32_BOL_LD_BU_LONGOFF = 0x39, /* 1.6 only */
+ OPC1_32_BOL_LD_H_LONGOFF = 0xc9, /* 1.6 only */
+ OPC1_32_BOL_LD_HU_LONGOFF = 0xb9, /* 1.6 only */
+ OPC1_32_BOL_ST_B_LONGOFF = 0xe9, /* 1.6 only */
+ OPC1_32_BOL_ST_H_LONGOFF = 0xf9, /* 1.6 only */
+/* BRC Format */
+ OPCM_32_BRC_EQ_NEQ = 0xdf,
+ OPCM_32_BRC_GE = 0xff,
+ OPCM_32_BRC_JLT = 0xbf,
+ OPCM_32_BRC_JNE = 0x9f,
+/* BRN Format */
+ OPCM_32_BRN_JTT = 0x6f,
+/* BRR Format */
+ OPCM_32_BRR_EQ_NEQ = 0x5f,
+ OPCM_32_BRR_ADDR_EQ_NEQ = 0x7d,
+ OPCM_32_BRR_GE = 0x7f,
+ OPCM_32_BRR_JLT = 0x3f,
+ OPCM_32_BRR_JNE = 0x1f,
+ OPCM_32_BRR_JNZ = 0xbd,
+ OPCM_32_BRR_LOOP = 0xfd,
+/* RC Format */
+ OPCM_32_RC_LOGICAL_SHIFT = 0x8f,
+ OPCM_32_RC_ACCUMULATOR = 0x8b,
+ OPCM_32_RC_SERVICEROUTINE = 0xad,
+ OPCM_32_RC_MUL = 0x53,
+/* RCPW Format */
+ OPCM_32_RCPW_MASK_INSERT = 0xb7,
+/* RCR Format */
+ OPCM_32_RCR_COND_SELECT = 0xab,
+ OPCM_32_RCR_MADD = 0x13,
+ OPCM_32_RCR_MSUB = 0x33,
+/* RCRR Format */
+ OPC1_32_RCRR_INSERT = 0x97,
+/* RCRW Format */
+ OPCM_32_RCRW_MASK_INSERT = 0xd7,
+/* RLC Format */
+ OPC1_32_RLC_ADDI = 0x1b,
+ OPC1_32_RLC_ADDIH = 0x9b,
+ OPC1_32_RLC_ADDIH_A = 0x11,
+ OPC1_32_RLC_MFCR = 0x4d,
+ OPC1_32_RLC_MOV = 0x3b,
+ OPC1_32_RLC_MOV_64 = 0xfb, /* 1.6 only */
+ OPC1_32_RLC_MOV_U = 0xbb,
+ OPC1_32_RLC_MOV_H = 0x7b,
+ OPC1_32_RLC_MOVH_A = 0x91,
+ OPC1_32_RLC_MTCR = 0xcd,
+/* RR Format */
+ OPCM_32_RR_LOGICAL_SHIFT = 0x0f,
+ OPCM_32_RR_ACCUMULATOR = 0x0b,
+ OPCM_32_RR_ADDRESS = 0x01,
+ OPCM_32_RR_DIVIDE = 0x4b,
+ OPCM_32_RR_IDIRECT = 0x2d,
+/* RR1 Format */
+ OPCM_32_RR1_MUL = 0xb3,
+ OPCM_32_RR1_MULQ = 0x93,
+/* RR2 Format */
+ OPCM_32_RR2_MUL = 0x73,
+/* RRPW Format */
+ OPCM_32_RRPW_EXTRACT_INSERT = 0x37,
+ OPC1_32_RRPW_DEXTR = 0x77,
+/* RRR Format */
+ OPCM_32_RRR_COND_SELECT = 0x2b,
+ OPCM_32_RRR_DIVIDE = 0x6b,
+/* RRR1 Format */
+ OPCM_32_RRR1_MADD = 0x83,
+ OPCM_32_RRR1_MADDQ_H = 0x43,
+ OPCM_32_RRR1_MADDSU_H = 0xc3,
+ OPCM_32_RRR1_MSUB_H = 0xa3,
+ OPCM_32_RRR1_MSUB_Q = 0x63,
+ OPCM_32_RRR1_MSUBAD_H = 0xe3,
+/* RRR2 Format */
+ OPCM_32_RRR2_MADD = 0x03,
+ OPCM_32_RRR2_MSUB = 0x23,
+/* RRRR Format */
+ OPCM_32_RRRR_EXTRACT_INSERT = 0x17,
+/* RRRW Format */
+ OPCM_32_RRRW_EXTRACT_INSERT = 0x57,
+/* SYS Format */
+ OPCM_32_SYS_INTERRUPTS = 0x0d,
+ OPC1_32_SYS_RSTV = 0x2f,
+};
+
+
+
+/*
+ * ABS Format
+ */
+
+/* OPCM_32_ABS_LDW */
+enum {
+
+ OPC2_32_ABS_LD_A = 0x02,
+ OPC2_32_ABS_LD_D = 0x01,
+ OPC2_32_ABS_LD_DA = 0x03,
+ OPC2_32_ABS_LD_W = 0x00,
+};
+
+/* OPCM_32_ABS_LDB */
+enum {
+ OPC2_32_ABS_LD_B = 0x00,
+ OPC2_32_ABS_LD_BU = 0x01,
+ OPC2_32_ABS_LD_H = 0x02,
+ OPC2_32_ABS_LD_HU = 0x03,
+};
+/* OPCM_32_ABS_LDMST_SWAP */
+enum {
+ OPC2_32_ABS_LDMST = 0x01,
+ OPC2_32_ABS_SWAP_W = 0x00,
+};
+/* OPCM_32_ABS_LDST_CONTEXT */
+enum {
+ OPC2_32_ABS_LDLCX = 0x02,
+ OPC2_32_ABS_LDUCX = 0x03,
+ OPC2_32_ABS_STLCX = 0x00,
+ OPC2_32_ABS_STUCX = 0x01,
+};
+/* OPCM_32_ABS_STORE */
+enum {
+ OPC2_32_ABS_ST_A = 0x02,
+ OPC2_32_ABS_ST_D = 0x01,
+ OPC2_32_ABS_ST_DA = 0x03,
+ OPC2_32_ABS_ST_W = 0x00,
+};
+/* OPCM_32_ABS_STOREB_H */
+enum {
+ OPC2_32_ABS_ST_B = 0x00,
+ OPC2_32_ABS_ST_H = 0x02,
+};
+/*
+ * Bit Format
+ */
+/* OPCM_32_BIT_ANDACC */
+enum {
+ OPC2_32_BIT_AND_AND_T = 0x00,
+ OPC2_32_BIT_AND_ANDN_T = 0x03,
+ OPC2_32_BIT_AND_NOR_T = 0x02,
+ OPC2_32_BIT_AND_OR_T = 0x01,
+};
+/* OPCM_32_BIT_LOGICAL_T */
+enum {
+ OPC2_32_BIT_AND_T = 0x00,
+ OPC2_32_BIT_ANDN_T = 0x03,
+ OPC2_32_BIT_NOR_T = 0x02,
+ OPC2_32_BIT_OR_T = 0x01,
+};
+/* OPCM_32_BIT_INSERT */
+enum {
+ OPC2_32_BIT_INS_T = 0x00,
+ OPC2_32_BIT_INSN_T = 0x01,
+};
+/* OPCM_32_BIT_LOGICAL_T2 */
+enum {
+ OPC2_32_BIT_NAND_T = 0x00,
+ OPC2_32_BIT_ORN_T = 0x01,
+ OPC2_32_BIT_XNOR_T = 0x02,
+ OPC2_32_BIT_XOR_T = 0x03,
+};
+/* OPCM_32_BIT_ORAND */
+enum {
+ OPC2_32_BIT_OR_AND_T = 0x00,
+ OPC2_32_BIT_OR_ANDN_T = 0x03,
+ OPC2_32_BIT_OR_NOR_T = 0x02,
+ OPC2_32_BIT_OR_OR_T = 0x01,
+};
+/*OPCM_32_BIT_SH_LOGIC1 */
+enum {
+ OPC2_32_BIT_SH_AND_T = 0x00,
+ OPC2_32_BIT_SH_ANDN_T = 0x03,
+ OPC2_32_BIT_SH_NOR_T = 0x02,
+ OPC2_32_BIT_SH_OR_T = 0x01,
+};
+/* OPCM_32_BIT_SH_LOGIC2 */
+enum {
+ OPC2_32_BIT_SH_NAND_T = 0x00,
+ OPC2_32_BIT_SH_ORN_T = 0x01,
+ OPC2_32_BIT_SH_XNOR_T = 0x02,
+ OPC2_32_BIT_SH_XOR_T = 0x03,
+};
+/*
+ * BO Format
+ */
+/* OPCM_32_BO_ADDRMODE_POST_PRE_BASE */
+enum {
+ OPC2_32_BO_CACHEA_I_SHORTOFF = 0x2e,
+ OPC2_32_BO_CACHEA_I_POSTINC = 0x0e,
+ OPC2_32_BO_CACHEA_I_PREINC = 0x1e,
+ OPC2_32_BO_CACHEA_W_SHORTOFF = 0x2c,
+ OPC2_32_BO_CACHEA_W_POSTINC = 0x0c,
+ OPC2_32_BO_CACHEA_W_PREINC = 0x1c,
+ OPC2_32_BO_CACHEA_WI_SHORTOFF = 0x2d,
+ OPC2_32_BO_CACHEA_WI_POSTINC = 0x0d,
+ OPC2_32_BO_CACHEA_WI_PREINC = 0x1d,
+ /* 1.3.1 only */
+ OPC2_32_BO_CACHEI_W_SHORTOFF = 0x2b,
+ OPC2_32_BO_CACHEI_W_POSTINC = 0x0b,
+ OPC2_32_BO_CACHEI_W_PREINC = 0x1b,
+ OPC2_32_BO_CACHEI_WI_SHORTOFF = 0x2f,
+ OPC2_32_BO_CACHEI_WI_POSTINC = 0x0f,
+ OPC2_32_BO_CACHEI_WI_PREINC = 0x1f,
+ /* end 1.3.1 only */
+ OPC2_32_BO_ST_A_SHORTOFF = 0x26,
+ OPC2_32_BO_ST_A_POSTINC = 0x06,
+ OPC2_32_BO_ST_A_PREINC = 0x16,
+ OPC2_32_BO_ST_B_SHORTOFF = 0x20,
+ OPC2_32_BO_ST_B_POSTINC = 0x00,
+ OPC2_32_BO_ST_B_PREINC = 0x10,
+ OPC2_32_BO_ST_D_SHORTOFF = 0x25,
+ OPC2_32_BO_ST_D_POSTINC = 0x05,
+ OPC2_32_BO_ST_D_PREINC = 0x15,
+ OPC2_32_BO_ST_DA_SHORTOFF = 0x27,
+ OPC2_32_BO_ST_DA_POSTINC = 0x07,
+ OPC2_32_BO_ST_DA_PREINC = 0x17,
+ OPC2_32_BO_ST_H_SHORTOFF = 0x22,
+ OPC2_32_BO_ST_H_POSTINC = 0x02,
+ OPC2_32_BO_ST_H_PREINC = 0x12,
+ OPC2_32_BO_ST_Q_SHORTOFF = 0x28,
+ OPC2_32_BO_ST_Q_POSTINC = 0x08,
+ OPC2_32_BO_ST_Q_PREINC = 0x18,
+ OPC2_32_BO_ST_W_SHORTOFF = 0x24,
+ OPC2_32_BO_ST_W_POSTINC = 0x04,
+ OPC2_32_BO_ST_W_PREINC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR */
+enum {
+ OPC2_32_BO_CACHEA_I_BR = 0x0e,
+ OPC2_32_BO_CACHEA_I_CIRC = 0x1e,
+ OPC2_32_BO_CACHEA_W_BR = 0x0c,
+ OPC2_32_BO_CACHEA_W_CIRC = 0x1c,
+ OPC2_32_BO_CACHEA_WI_BR = 0x0d,
+ OPC2_32_BO_CACHEA_WI_CIRC = 0x1d,
+ OPC2_32_BO_ST_A_BR = 0x06,
+ OPC2_32_BO_ST_A_CIRC = 0x16,
+ OPC2_32_BO_ST_B_BR = 0x00,
+ OPC2_32_BO_ST_B_CIRC = 0x10,
+ OPC2_32_BO_ST_D_BR = 0x05,
+ OPC2_32_BO_ST_D_CIRC = 0x15,
+ OPC2_32_BO_ST_DA_BR = 0x07,
+ OPC2_32_BO_ST_DA_CIRC = 0x17,
+ OPC2_32_BO_ST_H_BR = 0x02,
+ OPC2_32_BO_ST_H_CIRC = 0x12,
+ OPC2_32_BO_ST_Q_BR = 0x08,
+ OPC2_32_BO_ST_Q_CIRC = 0x18,
+ OPC2_32_BO_ST_W_BR = 0x04,
+ OPC2_32_BO_ST_W_CIRC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE */
+enum {
+ OPC2_32_BO_LD_A_SHORTOFF = 0x26,
+ OPC2_32_BO_LD_A_POSTINC = 0x06,
+ OPC2_32_BO_LD_A_PREINC = 0x16,
+ OPC2_32_BO_LD_B_SHORTOFF = 0x20,
+ OPC2_32_BO_LD_B_POSTINC = 0x00,
+ OPC2_32_BO_LD_B_PREINC = 0x10,
+ OPC2_32_BO_LD_BU_SHORTOFF = 0x21,
+ OPC2_32_BO_LD_BU_POSTINC = 0x01,
+ OPC2_32_BO_LD_BU_PREINC = 0x11,
+ OPC2_32_BO_LD_D_SHORTOFF = 0x25,
+ OPC2_32_BO_LD_D_POSTINC = 0x05,
+ OPC2_32_BO_LD_D_PREINC = 0x15,
+ OPC2_32_BO_LD_DA_SHORTOFF = 0x27,
+ OPC2_32_BO_LD_DA_POSTINC = 0x07,
+ OPC2_32_BO_LD_DA_PREINC = 0x17,
+ OPC2_32_BO_LD_H_SHORTOFF = 0x22,
+ OPC2_32_BO_LD_H_POSTINC = 0x02,
+ OPC2_32_BO_LD_H_PREINC = 0x12,
+ OPC2_32_BO_LD_HU_SHORTOFF = 0x23,
+ OPC2_32_BO_LD_HU_POSTINC = 0x03,
+ OPC2_32_BO_LD_HU_PREINC = 0x13,
+ OPC2_32_BO_LD_Q_SHORTOFF = 0x28,
+ OPC2_32_BO_LD_Q_POSTINC = 0x08,
+ OPC2_32_BO_LD_Q_PREINC = 0x18,
+ OPC2_32_BO_LD_W_SHORTOFF = 0x24,
+ OPC2_32_BO_LD_W_POSTINC = 0x04,
+ OPC2_32_BO_LD_W_PREINC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR */
+enum {
+ OPC2_32_BO_LD_A_BR = 0x06,
+ OPC2_32_BO_LD_A_CIRC = 0x16,
+ OPC2_32_BO_LD_B_BR = 0x00,
+ OPC2_32_BO_LD_B_CIRC = 0x10,
+ OPC2_32_BO_LD_BU_BR = 0x01,
+ OPC2_32_BO_LD_BU_CIRC = 0x11,
+ OPC2_32_BO_LD_D_BR = 0x05,
+ OPC2_32_BO_LD_D_CIRC = 0x15,
+ OPC2_32_BO_LD_DA_BR = 0x07,
+ OPC2_32_BO_LD_DA_CIRC = 0x17,
+ OPC2_32_BO_LD_H_BR = 0x02,
+ OPC2_32_BO_LD_H_CIRC = 0x12,
+ OPC2_32_BO_LD_HU_BR = 0x03,
+ OPC2_32_BO_LD_HU_CIRC = 0x13,
+ OPC2_32_BO_LD_Q_BR = 0x08,
+ OPC2_32_BO_LD_Q_CIRC = 0x18,
+ OPC2_32_BO_LD_W_BR = 0x04,
+ OPC2_32_BO_LD_W_CIRC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE */
+enum {
+ OPC2_32_BO_LDLCX_SHORTOFF = 0x24,
+ OPC2_32_BO_LDMST_SHORTOFF = 0x21,
+ OPC2_32_BO_LDMST_POSTINC = 0x01,
+ OPC2_32_BO_LDMST_PREINC = 0x11,
+ OPC2_32_BO_LDUCX_SHORTOFF = 0x25,
+ OPC2_32_BO_LEA_SHORTOFF = 0x28,
+ OPC2_32_BO_STLCX_SHORTOFF = 0x26,
+ OPC2_32_BO_STUCX_SHORTOFF = 0x27,
+ OPC2_32_BO_SWAP_W_SHORTOFF = 0x20,
+ OPC2_32_BO_SWAP_W_POSTINC = 0x00,
+ OPC2_32_BO_SWAP_W_PREINC = 0x10,
+ OPC2_32_BO_CMPSWAP_W_SHORTOFF = 0x23,
+ OPC2_32_BO_CMPSWAP_W_POSTINC = 0x03,
+ OPC2_32_BO_CMPSWAP_W_PREINC = 0x13,
+ OPC2_32_BO_SWAPMSK_W_SHORTOFF = 0x22,
+ OPC2_32_BO_SWAPMSK_W_POSTINC = 0x02,
+ OPC2_32_BO_SWAPMSK_W_PREINC = 0x12,
+};
+/*OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR */
+enum {
+ OPC2_32_BO_LDMST_BR = 0x01,
+ OPC2_32_BO_LDMST_CIRC = 0x11,
+ OPC2_32_BO_SWAP_W_BR = 0x00,
+ OPC2_32_BO_SWAP_W_CIRC = 0x10,
+ OPC2_32_BO_CMPSWAP_W_BR = 0x03,
+ OPC2_32_BO_CMPSWAP_W_CIRC = 0x13,
+ OPC2_32_BO_SWAPMSK_W_BR = 0x02,
+ OPC2_32_BO_SWAPMSK_W_CIRC = 0x12,
+};
+/*
+ * BRC Format
+ */
+/*OPCM_32_BRC_EQ_NEQ */
+enum {
+ OPC2_32_BRC_JEQ = 0x00,
+ OPC2_32_BRC_JNE = 0x01,
+};
+/* OPCM_32_BRC_GE */
+enum {
+ OP2_32_BRC_JGE = 0x00,
+ OPC_32_BRC_JGE_U = 0x01,
+};
+/* OPCM_32_BRC_JLT */
+enum {
+ OPC2_32_BRC_JLT = 0x00,
+ OPC2_32_BRC_JLT_U = 0x01,
+};
+/* OPCM_32_BRC_JNE */
+enum {
+ OPC2_32_BRC_JNED = 0x01,
+ OPC2_32_BRC_JNEI = 0x00,
+};
+/*
+ * BRN Format
+ */
+/* OPCM_32_BRN_JTT */
+enum {
+ OPC2_32_BRN_JNZ_T = 0x01,
+ OPC2_32_BRN_JZ_T = 0x00,
+};
+/*
+ * BRR Format
+ */
+/* OPCM_32_BRR_EQ_NEQ */
+enum {
+ OPC2_32_BRR_JEQ = 0x00,
+ OPC2_32_BRR_JNE = 0x01,
+};
+/* OPCM_32_BRR_ADDR_EQ_NEQ */
+enum {
+ OPC2_32_BRR_JEQ_A = 0x00,
+ OPC2_32_BRR_JNE_A = 0x01,
+};
+/*OPCM_32_BRR_GE */
+enum {
+ OPC2_32_BRR_JGE = 0x00,
+ OPC2_32_BRR_JGE_U = 0x01,
+};
+/* OPCM_32_BRR_JLT */
+enum {
+ OPC2_32_BRR_JLT = 0x00,
+ OPC2_32_BRR_JLT_U = 0x01,
+};
+/* OPCM_32_BRR_JNE */
+enum {
+ OPC2_32_BRR_JNED = 0x01,
+ OPC2_32_BRR_JNEI = 0x00,
+};
+/* OPCM_32_BRR_JNZ */
+enum {
+ OPC2_32_BRR_JNZ_A = 0x01,
+ OPC2_32_BRR_JZ_A = 0x00,
+};
+/* OPCM_32_BRR_LOOP */
+enum {
+ OPC2_32_BRR_LOOP = 0x00,
+ OPC2_32_BRR_LOOPU = 0x01,
+};
+/*
+ * RC Format
+ */
+/* OPCM_32_RC_LOGICAL_SHIFT */
+enum {
+ OPC2_32_RC_AND = 0x08,
+ OPC2_32_RC_ANDN = 0x0e,
+ OPC2_32_RC_NAND = 0x09,
+ OPC2_32_RC_NOR = 0x0b,
+ OPC2_32_RC_OR = 0x0a,
+ OPC2_32_RC_ORN = 0x0f,
+ OPC2_32_RC_SH = 0x00,
+ OPC2_32_RC_SH_H = 0x40,
+ OPC2_32_RC_SHA = 0x01,
+ OPC2_32_RC_SHA_H = 0x41,
+ OPC2_32_RC_SHAS = 0x02,
+ OPC2_32_RC_XNOR = 0x0d,
+ OPC2_32_RC_XOR = 0x0c,
+};
+/* OPCM_32_RC_ACCUMULATOR */
+enum {
+ OPC2_32_RC_ABSDIF = 0x0e,
+ OPC2_32_RC_ABSDIFS = 0x0f,
+ OPC2_32_RC_ADD = 0x00,
+ OPC2_32_RC_ADDC = 0x05,
+ OPC2_32_RC_ADDS = 0x02,
+ OPC2_32_RC_ADDS_U = 0x03,
+ OPC2_32_RC_ADDX = 0x04,
+ OPC2_32_RC_AND_EQ = 0x20,
+ OPC2_32_RC_AND_GE = 0x24,
+ OPC2_32_RC_AND_GE_U = 0x25,
+ OPC2_32_RC_AND_LT = 0x22,
+ OPC2_32_RC_AND_LT_U = 0x23,
+ OPC2_32_RC_AND_NE = 0x21,
+ OPC2_32_RC_EQ = 0x10,
+ OPC2_32_RC_EQANY_B = 0x56,
+ OPC2_32_RC_EQANY_H = 0x76,
+ OPC2_32_RC_GE = 0x14,
+ OPC2_32_RC_GE_U = 0x15,
+ OPC2_32_RC_LT = 0x12,
+ OPC2_32_RC_LT_U = 0x13,
+ OPC2_32_RC_MAX = 0x1a,
+ OPC2_32_RC_MAX_U = 0x1b,
+ OPC2_32_RC_MIN = 0x18,
+ OPC2_32_RC_MIN_U = 0x19,
+ OPC2_32_RC_NE = 0x11,
+ OPC2_32_RC_OR_EQ = 0x27,
+ OPC2_32_RC_OR_GE = 0x2b,
+ OPC2_32_RC_OR_GE_U = 0x2c,
+ OPC2_32_RC_OR_LT = 0x29,
+ OPC2_32_RC_OR_LT_U = 0x2a,
+ OPC2_32_RC_OR_NE = 0x28,
+ OPC2_32_RC_RSUB = 0x08,
+ OPC2_32_RC_RSUBS = 0x0a,
+ OPC2_32_RC_RSUBS_U = 0x0b,
+ OPC2_32_RC_SH_EQ = 0x37,
+ OPC2_32_RC_SH_GE = 0x3b,
+ OPC2_32_RC_SH_GE_U = 0x3c,
+ OPC2_32_RC_SH_LT = 0x39,
+ OPC2_32_RC_SH_LT_U = 0x3a,
+ OPC2_32_RC_SH_NE = 0x38,
+ OPC2_32_RC_XOR_EQ = 0x2f,
+ OPC2_32_RC_XOR_GE = 0x33,
+ OPC2_32_RC_XOR_GE_U = 0x34,
+ OPC2_32_RC_XOR_LT = 0x31,
+ OPC2_32_RC_XOR_LT_U = 0x32,
+ OPC2_32_RC_XOR_NE = 0x30,
+};
+/* OPCM_32_RC_SERVICEROUTINE */
+enum {
+ OPC2_32_RC_BISR = 0x00,
+ OPC2_32_RC_SYSCALL = 0x04,
+};
+/* OPCM_32_RC_MUL */
+enum {
+ OPC2_32_RC_MUL_32 = 0x01,
+ OPC2_32_RC_MUL_64 = 0x03,
+ OPC2_32_RC_MULS_32 = 0x05,
+ OPC2_32_RC_MUL_U_64 = 0x02,
+ OPC2_32_RC_MULS_U_32 = 0x04,
+};
+/*
+ * RCPW Format
+ */
+/* OPCM_32_RCPW_MASK_INSERT */
+enum {
+ OPC2_32_RCPW_IMASK = 0x01,
+ OPC2_32_RCPW_INSERT = 0x00,
+};
+/*
+ * RCR Format
+ */
+/* OPCM_32_RCR_COND_SELECT */
+enum {
+ OPC2_32_RCR_CADD = 0x00,
+ OPC2_32_RCR_CADDN = 0x01,
+ OPC2_32_RCR_SEL = 0x04,
+ OPC2_32_RCR_SELN = 0x05,
+};
+/* OPCM_32_RCR_MADD */
+enum {
+ OPC2_32_RCR_MADD_32 = 0x01,
+ OPC2_32_RCR_MADD_64 = 0x03,
+ OPC2_32_RCR_MADDS_32 = 0x05,
+ OPC2_32_RCR_MADDS_64 = 0x07,
+ OPC2_32_RCR_MADD_U_64 = 0x02,
+ OPC2_32_RCR_MADDS_U_32 = 0x04,
+ OPC2_32_RCR_MADDS_U_64 = 0x06,
+};
+/* OPCM_32_RCR_MSUB */
+enum {
+ OPC2_32_RCR_MSUB_32 = 0x01,
+ OPC2_32_RCR_MSUB_64 = 0x03,
+ OPC2_32_RCR_MSUBS_32 = 0x05,
+ OPC2_32_RCR_MSUBS_64 = 0x07,
+ OPC2_32_RCR_MSUB_U_64 = 0x02,
+ OPC2_32_RCR_MSUBS_U_32 = 0x04,
+ OPC2_32_RCR_MSUBS_U_64 = 0x06,
+};
+/*
+ * RCRW Format
+ */
+/* OPCM_32_RCRW_MASK_INSERT */
+enum {
+ OPC2_32_RCRW_IMASK = 0x01,
+ OPC2_32_RCRW_INSERT = 0x00,
+};
+
+/*
+ * RR Format
+ */
+/* OPCM_32_RR_LOGICAL_SHIFT */
+enum {
+ OPC2_32_RR_AND = 0x08,
+ OPC2_32_RR_ANDN = 0x0e,
+ OPC2_32_RR_CLO = 0x1c,
+ OPC2_32_RR_CLO_H = 0x7d,
+ OPC2_32_RR_CLS = 0x1d,
+ OPC2_32_RR_CLS_H = 0x7e,
+ OPC2_32_RR_CLZ = 0x1b,
+ OPC2_32_RR_CLZ_H = 0x7c,
+ OPC2_32_RR_NAND = 0x09,
+ OPC2_32_RR_NOR = 0x0b,
+ OPC2_32_RR_OR = 0x0a,
+ OPC2_32_RR_ORN = 0x0f,
+ OPC2_32_RR_SH = 0x00,
+ OPC2_32_RR_SH_H = 0x40,
+ OPC2_32_RR_SHA = 0x01,
+ OPC2_32_RR_SHA_H = 0x41,
+ OPC2_32_RR_SHAS = 0x02,
+ OPC2_32_RR_XNOR = 0x0d,
+ OPC2_32_RR_XOR = 0x0c,
+};
+/* OPCM_32_RR_ACCUMULATOR */
+enum {
+ OPC2_32_RR_ABS = 0x1c,
+ OPC2_32_RR_ABS_B = 0x5c,
+ OPC2_32_RR_ABS_H = 0x7c,
+ OPC2_32_RR_ABSDIF = 0x0e,
+ OPC2_32_RR_ABSDIF_B = 0x4e,
+ OPC2_32_RR_ABSDIF_H = 0x6e,
+ OPC2_32_RR_ABSDIFS = 0x0f,
+ OPC2_32_RR_ABSDIFS_H = 0x6f,
+ OPC2_32_RR_ABSS = 0x1d,
+ OPC2_32_RR_ABSS_H = 0x7d,
+ OPC2_32_RR_ADD = 0x00,
+ OPC2_32_RR_ADD_B = 0x40,
+ OPC2_32_RR_ADD_H = 0x60,
+ OPC2_32_RR_ADDC = 0x05,
+ OPC2_32_RR_ADDS = 0x02,
+ OPC2_32_RR_ADDS_H = 0x62,
+ OPC2_32_RR_ADDS_HU = 0x63,
+ OPC2_32_RR_ADDS_U = 0x03,
+ OPC2_32_RR_ADDX = 0x04,
+ OPC2_32_RR_AND_EQ = 0x20,
+ OPC2_32_RR_AND_GE = 0x24,
+ OPC2_32_RR_AND_GE_U = 0x25,
+ OPC2_32_RR_AND_LT = 0x22,
+ OPC2_32_RR_AND_LT_U = 0x23,
+ OPC2_32_RR_AND_NE = 0x21,
+ OPC2_32_RR_EQ = 0x10,
+ OPC2_32_RR_EQ_B = 0x50,
+ OPC2_32_RR_EQ_H = 0x70,
+ OPC2_32_RR_EQ_W = 0x90,
+ OPC2_32_RR_EQANY_B = 0x56,
+ OPC2_32_RR_EQANY_H = 0x76,
+ OPC2_32_RR_GE = 0x14,
+ OPC2_32_RR_GE_U = 0x15,
+ OPC2_32_RR_LT = 0x12,
+ OPC2_32_RR_LT_U = 0x13,
+ OPC2_32_RR_LT_B = 0x52,
+ OPC2_32_RR_LT_BU = 0x53,
+ OPC2_32_RR_LT_H = 0x72,
+ OPC2_32_RR_LT_HU = 0x73,
+ OPC2_32_RR_LT_W = 0x92,
+ OPC2_32_RR_LT_WU = 0x93,
+ OPC2_32_RR_MAX = 0x1a,
+ OPC2_32_RR_MAX_U = 0x1b,
+ OPC2_32_RR_MAX_B = 0x5a,
+ OPC2_32_RR_MAX_BU = 0x5b,
+ OPC2_32_RR_MAX_H = 0x7a,
+ OPC2_32_RR_MAX_HU = 0x7b,
+ OPC2_32_RR_MIN = 0x18,
+ OPC2_32_RR_MIN_U = 0x19,
+ OPC2_32_RR_MIN_B = 0x58,
+ OPC2_32_RR_MIN_BU = 0x59,
+ OPC2_32_RR_MIN_H = 0x78,
+ OPC2_32_RR_MIN_HU = 0x79,
+ OPC2_32_RR_MOV = 0x1f,
+ OPC2_32_RR_MOVS_64 = 0x80,
+ OPC2_32_RR_MOV_64 = 0x81,
+ OPC2_32_RR_NE = 0x11,
+ OPC2_32_RR_OR_EQ = 0x27,
+ OPC2_32_RR_OR_GE = 0x2b,
+ OPC2_32_RR_OR_GE_U = 0x2c,
+ OPC2_32_RR_OR_LT = 0x29,
+ OPC2_32_RR_OR_LT_U = 0x2a,
+ OPC2_32_RR_OR_NE = 0x28,
+ OPC2_32_RR_SAT_B = 0x5e,
+ OPC2_32_RR_SAT_BU = 0x5f,
+ OPC2_32_RR_SAT_H = 0x7e,
+ OPC2_32_RR_SAT_HU = 0x7f,
+ OPC2_32_RR_SH_EQ = 0x37,
+ OPC2_32_RR_SH_GE = 0x3b,
+ OPC2_32_RR_SH_GE_U = 0x3c,
+ OPC2_32_RR_SH_LT = 0x39,
+ OPC2_32_RR_SH_LT_U = 0x3a,
+ OPC2_32_RR_SH_NE = 0x38,
+ OPC2_32_RR_SUB = 0x08,
+ OPC2_32_RR_SUB_B = 0x48,
+ OPC2_32_RR_SUB_H = 0x68,
+ OPC2_32_RR_SUBC = 0x0d,
+ OPC2_32_RR_SUBS = 0x0a,
+ OPC2_32_RR_SUBS_U = 0x0b,
+ OPC2_32_RR_SUBS_H = 0x6a,
+ OPC2_32_RR_SUBS_HU = 0x6b,
+ OPC2_32_RR_SUBX = 0x0c,
+ OPC2_32_RR_XOR_EQ = 0x2f,
+ OPC2_32_RR_XOR_GE = 0x33,
+ OPC2_32_RR_XOR_GE_U = 0x34,
+ OPC2_32_RR_XOR_LT = 0x31,
+ OPC2_32_RR_XOR_LT_U = 0x32,
+ OPC2_32_RR_XOR_NE = 0x30,
+};
+/* OPCM_32_RR_ADDRESS */
+enum {
+ OPC2_32_RR_ADD_A = 0x01,
+ OPC2_32_RR_ADDSC_A = 0x60,
+ OPC2_32_RR_ADDSC_AT = 0x62,
+ OPC2_32_RR_EQ_A = 0x40,
+ OPC2_32_RR_EQZ = 0x48,
+ OPC2_32_RR_GE_A = 0x43,
+ OPC2_32_RR_LT_A = 0x42,
+ OPC2_32_RR_MOV_A = 0x63,
+ OPC2_32_RR_MOV_AA = 0x00,
+ OPC2_32_RR_MOV_D = 0x4c,
+ OPC2_32_RR_NE_A = 0x41,
+ OPC2_32_RR_NEZ_A = 0x49,
+ OPC2_32_RR_SUB_A = 0x02,
+};
+/* OPCM_32_RR_FLOAT */
+enum {
+ OPC2_32_RR_BMERGE = 0x01,
+ OPC2_32_RR_BSPLIT = 0x09,
+ OPC2_32_RR_DVINIT_B = 0x5a,
+ OPC2_32_RR_DVINIT_BU = 0x4a,
+ OPC2_32_RR_DVINIT_H = 0x3a,
+ OPC2_32_RR_DVINIT_HU = 0x2a,
+ OPC2_32_RR_DVINIT = 0x1a,
+ OPC2_32_RR_DVINIT_U = 0x0a,
+ OPC2_32_RR_PARITY = 0x02,
+ OPC2_32_RR_UNPACK = 0x08,
+ OPC2_32_RR_CRC32 = 0x03,
+ OPC2_32_RR_DIV = 0x20,
+ OPC2_32_RR_DIV_U = 0x21,
+ OPC2_32_RR_MUL_F = 0x04,
+ OPC2_32_RR_DIV_F = 0x05,
+ OPC2_32_RR_FTOI = 0x10,
+ OPC2_32_RR_ITOF = 0x14,
+ OPC2_32_RR_CMP_F = 0x00,
+ OPC2_32_RR_FTOIZ = 0x13,
+ OPC2_32_RR_FTOQ31 = 0x11,
+ OPC2_32_RR_FTOQ31Z = 0x18,
+ OPC2_32_RR_FTOU = 0x12,
+ OPC2_32_RR_FTOUZ = 0x17,
+ OPC2_32_RR_Q31TOF = 0x15,
+ OPC2_32_RR_QSEED_F = 0x19,
+ OPC2_32_RR_UPDFL = 0x0c,
+ OPC2_32_RR_UTOF = 0x16,
+};
+/* OPCM_32_RR_IDIRECT */
+enum {
+ OPC2_32_RR_JI = 0x03,
+ OPC2_32_RR_JLI = 0x02,
+ OPC2_32_RR_CALLI = 0x00,
+ OPC2_32_RR_FCALLI = 0x01,
+};
+/*
+ * RR1 Format
+ */
+/* OPCM_32_RR1_MUL */
+enum {
+ OPC2_32_RR1_MUL_H_32_LL = 0x1a,
+ OPC2_32_RR1_MUL_H_32_LU = 0x19,
+ OPC2_32_RR1_MUL_H_32_UL = 0x18,
+ OPC2_32_RR1_MUL_H_32_UU = 0x1b,
+ OPC2_32_RR1_MULM_H_64_LL = 0x1e,
+ OPC2_32_RR1_MULM_H_64_LU = 0x1d,
+ OPC2_32_RR1_MULM_H_64_UL = 0x1c,
+ OPC2_32_RR1_MULM_H_64_UU = 0x1f,
+ OPC2_32_RR1_MULR_H_16_LL = 0x0e,
+ OPC2_32_RR1_MULR_H_16_LU = 0x0d,
+ OPC2_32_RR1_MULR_H_16_UL = 0x0c,
+ OPC2_32_RR1_MULR_H_16_UU = 0x0f,
+};
+/* OPCM_32_RR1_MULQ */
+enum {
+ OPC2_32_RR1_MUL_Q_32 = 0x02,
+ OPC2_32_RR1_MUL_Q_64 = 0x1b,
+ OPC2_32_RR1_MUL_Q_32_L = 0x01,
+ OPC2_32_RR1_MUL_Q_64_L = 0x19,
+ OPC2_32_RR1_MUL_Q_32_U = 0x00,
+ OPC2_32_RR1_MUL_Q_64_U = 0x18,
+ OPC2_32_RR1_MUL_Q_32_LL = 0x05,
+ OPC2_32_RR1_MUL_Q_32_UU = 0x04,
+ OPC2_32_RR1_MULR_Q_32_L = 0x07,
+ OPC2_32_RR1_MULR_Q_32_U = 0x06,
+};
+/*
+ * RR2 Format
+ */
+/* OPCM_32_RR2_MUL */
+enum {
+ OPC2_32_RR2_MUL_32 = 0x0a,
+ OPC2_32_RR2_MUL_64 = 0x6a,
+ OPC2_32_RR2_MULS_32 = 0x8a,
+ OPC2_32_RR2_MUL_U_64 = 0x68,
+ OPC2_32_RR2_MULS_U_32 = 0x88,
+};
+/*
+ * RRPW Format
+ */
+/* OPCM_32_RRPW_EXTRACT_INSERT */
+enum {
+
+ OPC2_32_RRPW_EXTR = 0x02,
+ OPC2_32_RRPW_EXTR_U = 0x03,
+ OPC2_32_RRPW_IMASK = 0x01,
+ OPC2_32_RRPW_INSERT = 0x00,
+};
+/*
+ * RRR Format
+ */
+/* OPCM_32_RRR_COND_SELECT */
+enum {
+ OPC2_32_RRR_CADD = 0x00,
+ OPC2_32_RRR_CADDN = 0x01,
+ OPC2_32_RRR_CSUB = 0x02,
+ OPC2_32_RRR_CSUBN = 0x03,
+ OPC2_32_RRR_SEL = 0x04,
+ OPC2_32_RRR_SELN = 0x05,
+};
+/* OPCM_32_RRR_FLOAT */
+enum {
+ OPC2_32_RRR_DVADJ = 0x0d,
+ OPC2_32_RRR_DVSTEP = 0x0f,
+ OPC2_32_RRR_DVSTEP_U = 0x0e,
+ OPC2_32_RRR_IXMAX = 0x0a,
+ OPC2_32_RRR_IXMAX_U = 0x0b,
+ OPC2_32_RRR_IXMIN = 0x08,
+ OPC2_32_RRR_IXMIN_U = 0x09,
+ OPC2_32_RRR_PACK = 0x00,
+ OPC2_32_RRR_ADD_F = 0x02,
+ OPC2_32_RRR_SUB_F = 0x03,
+ OPC2_32_RRR_MADD_F = 0x06,
+ OPC2_32_RRR_MSUB_F = 0x07,
+};
+/*
+ * RRR1 Format
+ */
+/* OPCM_32_RRR1_MADD */
+enum {
+ OPC2_32_RRR1_MADD_H_LL = 0x1a,
+ OPC2_32_RRR1_MADD_H_LU = 0x19,
+ OPC2_32_RRR1_MADD_H_UL = 0x18,
+ OPC2_32_RRR1_MADD_H_UU = 0x1b,
+ OPC2_32_RRR1_MADDS_H_LL = 0x3a,
+ OPC2_32_RRR1_MADDS_H_LU = 0x39,
+ OPC2_32_RRR1_MADDS_H_UL = 0x38,
+ OPC2_32_RRR1_MADDS_H_UU = 0x3b,
+ OPC2_32_RRR1_MADDM_H_LL = 0x1e,
+ OPC2_32_RRR1_MADDM_H_LU = 0x1d,
+ OPC2_32_RRR1_MADDM_H_UL = 0x1c,
+ OPC2_32_RRR1_MADDM_H_UU = 0x1f,
+ OPC2_32_RRR1_MADDMS_H_LL = 0x3e,
+ OPC2_32_RRR1_MADDMS_H_LU = 0x3d,
+ OPC2_32_RRR1_MADDMS_H_UL = 0x3c,
+ OPC2_32_RRR1_MADDMS_H_UU = 0x3f,
+ OPC2_32_RRR1_MADDR_H_LL = 0x0e,
+ OPC2_32_RRR1_MADDR_H_LU = 0x0d,
+ OPC2_32_RRR1_MADDR_H_UL = 0x0c,
+ OPC2_32_RRR1_MADDR_H_UU = 0x0f,
+ OPC2_32_RRR1_MADDRS_H_LL = 0x2e,
+ OPC2_32_RRR1_MADDRS_H_LU = 0x2d,
+ OPC2_32_RRR1_MADDRS_H_UL = 0x2c,
+ OPC2_32_RRR1_MADDRS_H_UU = 0x2f,
+};
+/* OPCM_32_RRR1_MADDQ_H */
+enum {
+ OPC2_32_RRR1_MADD_Q_32 = 0x02,
+ OPC2_32_RRR1_MADD_Q_64 = 0x1b,
+ OPC2_32_RRR1_MADD_Q_32_L = 0x01,
+ OPC2_32_RRR1_MADD_Q_64_L = 0x19,
+ OPC2_32_RRR1_MADD_Q_32_U = 0x00,
+ OPC2_32_RRR1_MADD_Q_64_U = 0x18,
+ OPC2_32_RRR1_MADD_Q_32_LL = 0x05,
+ OPC2_32_RRR1_MADD_Q_64_LL = 0x1d,
+ OPC2_32_RRR1_MADD_Q_32_UU = 0x04,
+ OPC2_32_RRR1_MADD_Q_64_UU = 0x1c,
+ OPC2_32_RRR1_MADDS_Q_32 = 0x22,
+ OPC2_32_RRR1_MADDS_Q_64 = 0x3b,
+ OPC2_32_RRR1_MADDS_Q_32_L = 0x21,
+ OPC2_32_RRR1_MADDS_Q_64_L = 0x39,
+ OPC2_32_RRR1_MADDS_Q_32_U = 0x20,
+ OPC2_32_RRR1_MADDS_Q_64_U = 0x38,
+ OPC2_32_RRR1_MADDS_Q_32_LL = 0x25,
+ OPC2_32_RRR1_MADDS_Q_64_LL = 0x3d,
+ OPC2_32_RRR1_MADDS_Q_32_UU = 0x24,
+ OPC2_32_RRR1_MADDS_Q_64_UU = 0x3c,
+ OPC2_32_RRR1_MADDR_H_64_UL = 0x1e,
+ OPC2_32_RRR1_MADDRS_H_64_UL = 0x3e,
+ OPC2_32_RRR1_MADDR_Q_32_LL = 0x07,
+ OPC2_32_RRR1_MADDR_Q_32_UU = 0x06,
+ OPC2_32_RRR1_MADDRS_Q_32_LL = 0x27,
+ OPC2_32_RRR1_MADDRS_Q_32_UU = 0x26,
+};
+/* OPCM_32_RRR1_MADDSU_H */
+enum {
+ OPC2_32_RRR1_MADDSU_H_32_LL = 0x1a,
+ OPC2_32_RRR1_MADDSU_H_32_LU = 0x19,
+ OPC2_32_RRR1_MADDSU_H_32_UL = 0x18,
+ OPC2_32_RRR1_MADDSU_H_32_UU = 0x1b,
+ OPC2_32_RRR1_MADDSUS_H_32_LL = 0x3a,
+ OPC2_32_RRR1_MADDSUS_H_32_LU = 0x39,
+ OPC2_32_RRR1_MADDSUS_H_32_UL = 0x38,
+ OPC2_32_RRR1_MADDSUS_H_32_UU = 0x3b,
+ OPC2_32_RRR1_MADDSUM_H_64_LL = 0x1e,
+ OPC2_32_RRR1_MADDSUM_H_64_LU = 0x1d,
+ OPC2_32_RRR1_MADDSUM_H_64_UL = 0x1c,
+ OPC2_32_RRR1_MADDSUM_H_64_UU = 0x1f,
+ OPC2_32_RRR1_MADDSUMS_H_64_LL = 0x3e,
+ OPC2_32_RRR1_MADDSUMS_H_64_LU = 0x3d,
+ OPC2_32_RRR1_MADDSUMS_H_64_UL = 0x3c,
+ OPC2_32_RRR1_MADDSUMS_H_64_UU = 0x3f,
+ OPC2_32_RRR1_MADDSUR_H_16_LL = 0x0e,
+ OPC2_32_RRR1_MADDSUR_H_16_LU = 0x0d,
+ OPC2_32_RRR1_MADDSUR_H_16_UL = 0x0c,
+ OPC2_32_RRR1_MADDSUR_H_16_UU = 0x0f,
+ OPC2_32_RRR1_MADDSURS_H_16_LL = 0x2e,
+ OPC2_32_RRR1_MADDSURS_H_16_LU = 0x2d,
+ OPC2_32_RRR1_MADDSURS_H_16_UL = 0x2c,
+ OPC2_32_RRR1_MADDSURS_H_16_UU = 0x2f,
+};
+/* OPCM_32_RRR1_MSUB_H */
+enum {
+ OPC2_32_RRR1_MSUB_H_LL = 0x1a,
+ OPC2_32_RRR1_MSUB_H_LU = 0x19,
+ OPC2_32_RRR1_MSUB_H_UL = 0x18,
+ OPC2_32_RRR1_MSUB_H_UU = 0x1b,
+ OPC2_32_RRR1_MSUBS_H_LL = 0x3a,
+ OPC2_32_RRR1_MSUBS_H_LU = 0x39,
+ OPC2_32_RRR1_MSUBS_H_UL = 0x38,
+ OPC2_32_RRR1_MSUBS_H_UU = 0x3b,
+ OPC2_32_RRR1_MSUBM_H_LL = 0x1e,
+ OPC2_32_RRR1_MSUBM_H_LU = 0x1d,
+ OPC2_32_RRR1_MSUBM_H_UL = 0x1c,
+ OPC2_32_RRR1_MSUBM_H_UU = 0x1f,
+ OPC2_32_RRR1_MSUBMS_H_LL = 0x3e,
+ OPC2_32_RRR1_MSUBMS_H_LU = 0x3d,
+ OPC2_32_RRR1_MSUBMS_H_UL = 0x3c,
+ OPC2_32_RRR1_MSUBMS_H_UU = 0x3f,
+ OPC2_32_RRR1_MSUBR_H_LL = 0x0e,
+ OPC2_32_RRR1_MSUBR_H_LU = 0x0d,
+ OPC2_32_RRR1_MSUBR_H_UL = 0x0c,
+ OPC2_32_RRR1_MSUBR_H_UU = 0x0f,
+ OPC2_32_RRR1_MSUBRS_H_LL = 0x2e,
+ OPC2_32_RRR1_MSUBRS_H_LU = 0x2d,
+ OPC2_32_RRR1_MSUBRS_H_UL = 0x2c,
+ OPC2_32_RRR1_MSUBRS_H_UU = 0x2f,
+};
+/* OPCM_32_RRR1_MSUB_Q */
+enum {
+ OPC2_32_RRR1_MSUB_Q_32 = 0x02,
+ OPC2_32_RRR1_MSUB_Q_64 = 0x1b,
+ OPC2_32_RRR1_MSUB_Q_32_L = 0x01,
+ OPC2_32_RRR1_MSUB_Q_64_L = 0x19,
+ OPC2_32_RRR1_MSUB_Q_32_U = 0x00,
+ OPC2_32_RRR1_MSUB_Q_64_U = 0x18,
+ OPC2_32_RRR1_MSUB_Q_32_LL = 0x05,
+ OPC2_32_RRR1_MSUB_Q_64_LL = 0x1d,
+ OPC2_32_RRR1_MSUB_Q_32_UU = 0x04,
+ OPC2_32_RRR1_MSUB_Q_64_UU = 0x1c,
+ OPC2_32_RRR1_MSUBS_Q_32 = 0x22,
+ OPC2_32_RRR1_MSUBS_Q_64 = 0x3b,
+ OPC2_32_RRR1_MSUBS_Q_32_L = 0x21,
+ OPC2_32_RRR1_MSUBS_Q_64_L = 0x39,
+ OPC2_32_RRR1_MSUBS_Q_32_U = 0x20,
+ OPC2_32_RRR1_MSUBS_Q_64_U = 0x38,
+ OPC2_32_RRR1_MSUBS_Q_32_LL = 0x25,
+ OPC2_32_RRR1_MSUBS_Q_64_LL = 0x3d,
+ OPC2_32_RRR1_MSUBS_Q_32_UU = 0x24,
+ OPC2_32_RRR1_MSUBS_Q_64_UU = 0x3c,
+ OPC2_32_RRR1_MSUBR_H_64_UL = 0x1e,
+ OPC2_32_RRR1_MSUBRS_H_64_UL = 0x3e,
+ OPC2_32_RRR1_MSUBR_Q_32_LL = 0x07,
+ OPC2_32_RRR1_MSUBR_Q_32_UU = 0x06,
+ OPC2_32_RRR1_MSUBRS_Q_32_LL = 0x27,
+ OPC2_32_RRR1_MSUBRS_Q_32_UU = 0x26,
+};
+/* OPCM_32_RRR1_MSUBADS_H */
+enum {
+ OPC2_32_RRR1_MSUBAD_H_32_LL = 0x1a,
+ OPC2_32_RRR1_MSUBAD_H_32_LU = 0x19,
+ OPC2_32_RRR1_MSUBAD_H_32_UL = 0x18,
+ OPC2_32_RRR1_MSUBAD_H_32_UU = 0x1b,
+ OPC2_32_RRR1_MSUBADS_H_32_LL = 0x3a,
+ OPC2_32_RRR1_MSUBADS_H_32_LU = 0x39,
+ OPC2_32_RRR1_MSUBADS_H_32_UL = 0x38,
+ OPC2_32_RRR1_MSUBADS_H_32_UU = 0x3b,
+ OPC2_32_RRR1_MSUBADM_H_64_LL = 0x1e,
+ OPC2_32_RRR1_MSUBADM_H_64_LU = 0x1d,
+ OPC2_32_RRR1_MSUBADM_H_64_UL = 0x1c,
+ OPC2_32_RRR1_MSUBADM_H_64_UU = 0x1f,
+ OPC2_32_RRR1_MSUBADMS_H_64_LL = 0x3e,
+ OPC2_32_RRR1_MSUBADMS_H_64_LU = 0x3d,
+ OPC2_32_RRR1_MSUBADMS_H_64_UL = 0x3c,
+ OPC2_32_RRR1_MSUBADMS_H_64_UU = 0x3f,
+ OPC2_32_RRR1_MSUBADR_H_16_LL = 0x0e,
+ OPC2_32_RRR1_MSUBADR_H_16_LU = 0x0d,
+ OPC2_32_RRR1_MSUBADR_H_16_UL = 0x0c,
+ OPC2_32_RRR1_MSUBADR_H_16_UU = 0x0f,
+ OPC2_32_RRR1_MSUBADRS_H_16_LL = 0x2e,
+ OPC2_32_RRR1_MSUBADRS_H_16_LU = 0x2d,
+ OPC2_32_RRR1_MSUBADRS_H_16_UL = 0x2c,
+ OPC2_32_RRR1_MSUBADRS_H_16_UU = 0x2f,
+};
+/*
+ * RRR2 Format
+ */
+/* OPCM_32_RRR2_MADD */
+enum {
+ OPC2_32_RRR2_MADD_32 = 0x0a,
+ OPC2_32_RRR2_MADD_64 = 0x6a,
+ OPC2_32_RRR2_MADDS_32 = 0x8a,
+ OPC2_32_RRR2_MADDS_64 = 0xea,
+ OPC2_32_RRR2_MADD_U_64 = 0x68,
+ OPC2_32_RRR2_MADDS_U_32 = 0x88,
+ OPC2_32_RRR2_MADDS_U_64 = 0xe8,
+};
+/* OPCM_32_RRR2_MSUB */
+enum {
+ OPC2_32_RRR2_MSUB_32 = 0x0a,
+ OPC2_32_RRR2_MSUB_64 = 0x6a,
+ OPC2_32_RRR2_MSUBS_32 = 0x8a,
+ OPC2_32_RRR2_MSUBS_64 = 0xea,
+ OPC2_32_RRR2_MSUB_U_64 = 0x68,
+ OPC2_32_RRR2_MSUBS_U_32 = 0x88,
+ OPC2_32_RRR2_MSUBS_U_64 = 0xe8,
+};
+/*
+ * RRRR Format
+ */
+/* OPCM_32_RRRR_EXTRACT_INSERT */
+enum {
+ OPC2_32_RRRR_DEXTR = 0x04,
+ OPC2_32_RRRR_EXTR = 0x02,
+ OPC2_32_RRRR_EXTR_U = 0x03,
+ OPC2_32_RRRR_INSERT = 0x00,
+};
+/*
+ * RRRW Format
+ */
+/* OPCM_32_RRRW_EXTRACT_INSERT */
+enum {
+ OPC2_32_RRRW_EXTR = 0x02,
+ OPC2_32_RRRW_EXTR_U = 0x03,
+ OPC2_32_RRRW_IMASK = 0x01,
+ OPC2_32_RRRW_INSERT = 0x00,
+};
+/*
+ * SYS Format
+ */
+/* OPCM_32_SYS_INTERRUPTS */
+enum {
+ OPC2_32_SYS_DEBUG = 0x04,
+ OPC2_32_SYS_DISABLE = 0x0d,
+ OPC2_32_SYS_DSYNC = 0x12,
+ OPC2_32_SYS_ENABLE = 0x0c,
+ OPC2_32_SYS_ISYNC = 0x13,
+ OPC2_32_SYS_NOP = 0x00,
+ OPC2_32_SYS_RET = 0x06,
+ OPC2_32_SYS_RFE = 0x07,
+ OPC2_32_SYS_RFM = 0x05,
+ OPC2_32_SYS_RSLCX = 0x09,
+ OPC2_32_SYS_SVLCX = 0x08,
+ OPC2_32_SYS_TRAPSV = 0x15,
+ OPC2_32_SYS_TRAPV = 0x14,
+ OPC2_32_SYS_RESTORE = 0x0e,
+ OPC2_32_SYS_FRET = 0x03,
+};
+
+#endif
diff --git a/target/xtensa/Kconfig b/target/xtensa/Kconfig
new file mode 100644
index 000000000..a3c8dc7f6
--- /dev/null
+++ b/target/xtensa/Kconfig
@@ -0,0 +1,2 @@
+config XTENSA
+ bool
diff --git a/target/xtensa/core-dc232b.c b/target/xtensa/core-dc232b.c
new file mode 100644
index 000000000..c982d09c2
--- /dev/null
+++ b/target/xtensa/core-dc232b.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "qemu/timer.h"
+
+#include "core-dc232b/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_dc232b
+#include "core-dc232b/xtensa-modules.c.inc"
+
+static XtensaConfig dc232b __attribute__((unused)) = {
+ .name = "dc232b",
+ .gdb_regmap = {
+ .reg = {
+#include "core-dc232b/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = (NANOSECONDS_PER_SECOND / 64) / 1000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dc232b)
diff --git a/target/xtensa/core-dc232b/core-isa.h b/target/xtensa/core-dc232b/core-isa.h
new file mode 100644
index 000000000..a9935b87a
--- /dev/null
+++ b/target/xtensa/core-dc232b/core-isa.h
@@ -0,0 +1,422 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef XTENSA_DC232B_CORE_ISA_H
+#define XTENSA_DC232B_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU insns */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 701001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc232b" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "Diamond 232L Standard Core Rev.B (LE)"
+#define XCHAL_BUILD_UNIQUE_ID 0x0000BEEF /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56307FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0D40BEEF /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.1.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2210 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 221001 /* major*100+minor */
+#define XCHAL_HW_REL_LX2 1
+#define XCHAL_HW_REL_LX2_1 1
+#define XCHAL_HW_REL_LX2_1_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2210 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 221001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2210 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 221001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0xD0000340
+#define XCHAL_USER_VECTOR_PADDR 0x00000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD00001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xD0000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0xD0000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0xD00002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_DC232B_CORE_ISA_H */
diff --git a/target/xtensa/core-dc232b/gdb-config.c.inc b/target/xtensa/core-dc232b/gdb-config.c.inc
new file mode 100644
index 000000000..d87168628
--- /dev/null
+++ b/target/xtensa/core-dc232b/gdb-config.c.inc
@@ -0,0 +1,262 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+ XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(1, 4, 32, 4, 4, 0x0100, 0x0006, -2, 1, 0x0002, ar0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(2, 8, 32, 4, 4, 0x0101, 0x0006, -2, 1, 0x0002, ar1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(3, 12, 32, 4, 4, 0x0102, 0x0006, -2, 1, 0x0002, ar2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(4, 16, 32, 4, 4, 0x0103, 0x0006, -2, 1, 0x0002, ar3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(5, 20, 32, 4, 4, 0x0104, 0x0006, -2, 1, 0x0002, ar4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(6, 24, 32, 4, 4, 0x0105, 0x0006, -2, 1, 0x0002, ar5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(7, 28, 32, 4, 4, 0x0106, 0x0006, -2, 1, 0x0002, ar6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(8, 32, 32, 4, 4, 0x0107, 0x0006, -2, 1, 0x0002, ar7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(9, 36, 32, 4, 4, 0x0108, 0x0006, -2, 1, 0x0002, ar8,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(10, 40, 32, 4, 4, 0x0109, 0x0006, -2, 1, 0x0002, ar9,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(11, 44, 32, 4, 4, 0x010a, 0x0006, -2, 1, 0x0002, ar10,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(12, 48, 32, 4, 4, 0x010b, 0x0006, -2, 1, 0x0002, ar11,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(13, 52, 32, 4, 4, 0x010c, 0x0006, -2, 1, 0x0002, ar12,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(14, 56, 32, 4, 4, 0x010d, 0x0006, -2, 1, 0x0002, ar13,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(15, 60, 32, 4, 4, 0x010e, 0x0006, -2, 1, 0x0002, ar14,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(16, 64, 32, 4, 4, 0x010f, 0x0006, -2, 1, 0x0002, ar15,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(17, 68, 32, 4, 4, 0x0110, 0x0006, -2, 1, 0x0002, ar16,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(18, 72, 32, 4, 4, 0x0111, 0x0006, -2, 1, 0x0002, ar17,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(19, 76, 32, 4, 4, 0x0112, 0x0006, -2, 1, 0x0002, ar18,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(20, 80, 32, 4, 4, 0x0113, 0x0006, -2, 1, 0x0002, ar19,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(21, 84, 32, 4, 4, 0x0114, 0x0006, -2, 1, 0x0002, ar20,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(22, 88, 32, 4, 4, 0x0115, 0x0006, -2, 1, 0x0002, ar21,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(23, 92, 32, 4, 4, 0x0116, 0x0006, -2, 1, 0x0002, ar22,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(24, 96, 32, 4, 4, 0x0117, 0x0006, -2, 1, 0x0002, ar23,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(25, 100, 32, 4, 4, 0x0118, 0x0006, -2, 1, 0x0002, ar24,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(26, 104, 32, 4, 4, 0x0119, 0x0006, -2, 1, 0x0002, ar25,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(27, 108, 32, 4, 4, 0x011a, 0x0006, -2, 1, 0x0002, ar26,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(28, 112, 32, 4, 4, 0x011b, 0x0006, -2, 1, 0x0002, ar27,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(29, 116, 32, 4, 4, 0x011c, 0x0006, -2, 1, 0x0002, ar28,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(30, 120, 32, 4, 4, 0x011d, 0x0006, -2, 1, 0x0002, ar29,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(31, 124, 32, 4, 4, 0x011e, 0x0006, -2, 1, 0x0002, ar30,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(32, 128, 32, 4, 4, 0x011f, 0x0006, -2, 1, 0x0002, ar31,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(33, 132, 32, 4, 4, 0x0200, 0x0006, -2, 2, 0x1100, lbeg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(34, 136, 32, 4, 4, 0x0201, 0x0006, -2, 2, 0x1100, lend,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(35, 140, 32, 4, 4, 0x0202, 0x0006, -2, 2, 0x1100, lcount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(36, 144, 6, 4, 4, 0x0203, 0x0006, -2, 2, 0x1100, sar,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(37, 148, 32, 4, 4, 0x0205, 0x0006, -2, 2, 0x1100, litbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(38, 152, 3, 4, 4, 0x0248, 0x0006, -2, 2, 0x1002, windowbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(39, 156, 8, 4, 4, 0x0249, 0x0006, -2, 2, 0x1002, windowstart,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(40, 160, 32, 4, 4, 0x02b0, 0x0002, -2, 2, 0x1000, sr176,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(41, 164, 32, 4, 4, 0x02d0, 0x0002, -2, 2, 0x1000, sr208,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(42, 168, 19, 4, 4, 0x02e6, 0x0006, -2, 2, 0x1100, ps,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(43, 172, 32, 4, 4, 0x03e7, 0x0006, -2, 3, 0x0110, threadptr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(44, 176, 32, 4, 4, 0x020c, 0x0006, -1, 2, 0x1100, scompare1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(45, 180, 32, 4, 4, 0x0210, 0x0006, -1, 2, 0x1100, acclo,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(46, 184, 8, 4, 4, 0x0211, 0x0006, -1, 2, 0x1100, acchi,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(47, 188, 32, 4, 4, 0x0220, 0x0006, -1, 2, 0x1100, m0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(48, 192, 32, 4, 4, 0x0221, 0x0006, -1, 2, 0x1100, m1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(49, 196, 32, 4, 4, 0x0222, 0x0006, -1, 2, 0x1100, m2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(50, 200, 32, 4, 4, 0x0223, 0x0006, -1, 2, 0x1100, m3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(51, 204, 32, 4, 4, 0x03e6, 0x000e, -1, 3, 0x0110, expstate,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(52, 208, 32, 4, 4, 0x0253, 0x0007, -2, 2, 0x1000, ptevaddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(53, 212, 32, 4, 4, 0x0259, 0x000d, -2, 2, 0x1000, mmid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(54, 216, 32, 4, 4, 0x025a, 0x0007, -2, 2, 0x1000, rasid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(55, 220, 18, 4, 4, 0x025b, 0x0007, -2, 2, 0x1000, itlbcfg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(56, 224, 18, 4, 4, 0x025c, 0x0007, -2, 2, 0x1000, dtlbcfg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(57, 228, 2, 4, 4, 0x0260, 0x0007, -2, 2, 0x1000, ibreakenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(58, 232, 32, 4, 4, 0x0268, 0x0007, -2, 2, 0x1000, ddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(59, 236, 32, 4, 4, 0x0280, 0x0007, -2, 2, 0x1000, ibreaka0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(60, 240, 32, 4, 4, 0x0281, 0x0007, -2, 2, 0x1000, ibreaka1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(61, 244, 32, 4, 4, 0x0290, 0x0007, -2, 2, 0x1000, dbreaka0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(62, 248, 32, 4, 4, 0x0291, 0x0007, -2, 2, 0x1000, dbreaka1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(63, 252, 32, 4, 4, 0x02a0, 0x0007, -2, 2, 0x1000, dbreakc0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(64, 256, 32, 4, 4, 0x02a1, 0x0007, -2, 2, 0x1000, dbreakc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(65, 260, 32, 4, 4, 0x02b1, 0x0007, -2, 2, 0x1000, epc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(66, 264, 32, 4, 4, 0x02b2, 0x0007, -2, 2, 0x1000, epc2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(67, 268, 32, 4, 4, 0x02b3, 0x0007, -2, 2, 0x1000, epc3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(68, 272, 32, 4, 4, 0x02b4, 0x0007, -2, 2, 0x1000, epc4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(69, 276, 32, 4, 4, 0x02b5, 0x0007, -2, 2, 0x1000, epc5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(70, 280, 32, 4, 4, 0x02b6, 0x0007, -2, 2, 0x1000, epc6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(71, 284, 32, 4, 4, 0x02b7, 0x0007, -2, 2, 0x1000, epc7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(72, 288, 32, 4, 4, 0x02c0, 0x0007, -2, 2, 0x1000, depc,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(73, 292, 19, 4, 4, 0x02c2, 0x0007, -2, 2, 0x1000, eps2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(74, 296, 19, 4, 4, 0x02c3, 0x0007, -2, 2, 0x1000, eps3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(75, 300, 19, 4, 4, 0x02c4, 0x0007, -2, 2, 0x1000, eps4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(76, 304, 19, 4, 4, 0x02c5, 0x0007, -2, 2, 0x1000, eps5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(77, 308, 19, 4, 4, 0x02c6, 0x0007, -2, 2, 0x1000, eps6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(78, 312, 19, 4, 4, 0x02c7, 0x0007, -2, 2, 0x1000, eps7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(79, 316, 32, 4, 4, 0x02d1, 0x0007, -2, 2, 0x1000, excsave1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(80, 320, 32, 4, 4, 0x02d2, 0x0007, -2, 2, 0x1000, excsave2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(81, 324, 32, 4, 4, 0x02d3, 0x0007, -2, 2, 0x1000, excsave3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(82, 328, 32, 4, 4, 0x02d4, 0x0007, -2, 2, 0x1000, excsave4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(83, 332, 32, 4, 4, 0x02d5, 0x0007, -2, 2, 0x1000, excsave5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(84, 336, 32, 4, 4, 0x02d6, 0x0007, -2, 2, 0x1000, excsave6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(85, 340, 32, 4, 4, 0x02d7, 0x0007, -2, 2, 0x1000, excsave7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(86, 344, 8, 4, 4, 0x02e0, 0x0007, -2, 2, 0x1000, cpenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(87, 348, 22, 4, 4, 0x02e2, 0x000b, -2, 2, 0x1000, interrupt,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(88, 352, 22, 4, 4, 0x02e2, 0x000d, -2, 2, 0x1000, intset,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(89, 356, 22, 4, 4, 0x02e3, 0x000d, -2, 2, 0x1000, intclear,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(90, 360, 22, 4, 4, 0x02e4, 0x0007, -2, 2, 0x1000, intenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(91, 364, 32, 4, 4, 0x02e7, 0x0007, -2, 2, 0x1000, vecbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(92, 368, 6, 4, 4, 0x02e8, 0x0007, -2, 2, 0x1000, exccause,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(93, 372, 12, 4, 4, 0x02e9, 0x0003, -2, 2, 0x1000, debugcause,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(94, 376, 32, 4, 4, 0x02ea, 0x000f, -2, 2, 0x1000, ccount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(95, 380, 32, 4, 4, 0x02eb, 0x0003, -2, 2, 0x1000, prid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(96, 384, 32, 4, 4, 0x02ec, 0x000f, -2, 2, 0x1000, icount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(97, 388, 4, 4, 4, 0x02ed, 0x0007, -2, 2, 0x1000, icountlevel,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(98, 392, 32, 4, 4, 0x02ee, 0x0007, -2, 2, 0x1000, excvaddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(99, 396, 32, 4, 4, 0x02f0, 0x000f, -2, 2, 0x1000, ccompare0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(100, 400, 32, 4, 4, 0x02f1, 0x000f, -2, 2, 0x1000, ccompare1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(101, 404, 32, 4, 4, 0x02f2, 0x000f, -2, 2, 0x1000, ccompare2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(102, 408, 32, 4, 4, 0x02f4, 0x0007, -2, 2, 0x1000, misc0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(103, 412, 32, 4, 4, 0x02f5, 0x0007, -2, 2, 0x1000, misc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(104, 416, 32, 4, 4, 0x0000, 0x0006, -2, 8, 0x0100, a0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(105, 420, 32, 4, 4, 0x0001, 0x0006, -2, 8, 0x0100, a1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(106, 424, 32, 4, 4, 0x0002, 0x0006, -2, 8, 0x0100, a2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(107, 428, 32, 4, 4, 0x0003, 0x0006, -2, 8, 0x0100, a3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(108, 432, 32, 4, 4, 0x0004, 0x0006, -2, 8, 0x0100, a4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(109, 436, 32, 4, 4, 0x0005, 0x0006, -2, 8, 0x0100, a5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(110, 440, 32, 4, 4, 0x0006, 0x0006, -2, 8, 0x0100, a6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(111, 444, 32, 4, 4, 0x0007, 0x0006, -2, 8, 0x0100, a7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(112, 448, 32, 4, 4, 0x0008, 0x0006, -2, 8, 0x0100, a8,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(113, 452, 32, 4, 4, 0x0009, 0x0006, -2, 8, 0x0100, a9,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(114, 456, 32, 4, 4, 0x000a, 0x0006, -2, 8, 0x0100, a10,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(115, 460, 32, 4, 4, 0x000b, 0x0006, -2, 8, 0x0100, a11,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(116, 464, 32, 4, 4, 0x000c, 0x0006, -2, 8, 0x0100, a12,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(117, 468, 32, 4, 4, 0x000d, 0x0006, -2, 8, 0x0100, a13,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(118, 472, 32, 4, 4, 0x000e, 0x0006, -2, 8, 0x0100, a14,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(119, 476, 32, 4, 4, 0x000f, 0x0006, -2, 8, 0x0100, a15,
+ 0, 0, 0, 0, 0, 0)
+ XTREG_END
diff --git a/target/xtensa/core-dc232b/xtensa-modules.c.inc b/target/xtensa/core-dc232b/xtensa-modules.c.inc
new file mode 100644
index 000000000..164df3b1a
--- /dev/null
+++ b/target/xtensa/core-dc232b/xtensa-modules.c.inc
@@ -0,0 +1,14078 @@
+/* Xtensa configuration-specific ISA information.
+ Copyright 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#include "qemu/osdep.h"
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "ACCLO", 16, 0 },
+ { "ACCHI", 17, 0 },
+ { "M0", 32, 0 },
+ { "M1", 33, 0 },
+ { "M2", 34, 0 },
+ { "M3", 35, 0 },
+ { "PTEVADDR", 83, 0 },
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "176", 176, 0 },
+ { "208", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EPC7", 183, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EXCSAVE7", 215, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EPS7", 199, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "LITBASE", 5, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "RASID", 90, 0 },
+ { "ITLBCFG", 91, 0 },
+ { "DTLBCFG", 92, 0 },
+ { "CPENABLE", 224, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "THREADPTR", 231, 1 },
+ { "EXPSTATE", 230, 1 }
+};
+
+#define NUM_SYSREGS 70
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 231
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 22, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EPC7", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EXCSAVE7", 32, 0 },
+ { "EPS2", 15, 0 },
+ { "EPS3", 15, 0 },
+ { "EPS4", 15, 0 },
+ { "EPS5", 15, 0 },
+ { "EPS6", 15, 0 },
+ { "EPS7", 15, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSRING", 2, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "THREADPTR", 32, 0 },
+ { "LITBADDR", 20, 0 },
+ { "LITBEN", 1, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "ACC", 40, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 22, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "ASID3", 8, 0 },
+ { "ASID2", 8, 0 },
+ { "ASID1", 8, 0 },
+ { "INSTPGSZID4", 2, 0 },
+ { "DATAPGSZID4", 2, 0 },
+ { "PTBASE", 10, 0 },
+ { "CPENABLE", 8, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED }
+};
+
+#define NUM_STATES 73
+
+/* Macros for xtensa_state numbers (for use in iclasses because the
+ state numbers are not available when the iclass table is generated). */
+
+#define STATE_LCOUNT 0
+#define STATE_PC 1
+#define STATE_ICOUNT 2
+#define STATE_DDR 3
+#define STATE_INTERRUPT 4
+#define STATE_CCOUNT 5
+#define STATE_XTSYNC 6
+#define STATE_VECBASE 7
+#define STATE_EPC1 8
+#define STATE_EPC2 9
+#define STATE_EPC3 10
+#define STATE_EPC4 11
+#define STATE_EPC5 12
+#define STATE_EPC6 13
+#define STATE_EPC7 14
+#define STATE_EXCSAVE1 15
+#define STATE_EXCSAVE2 16
+#define STATE_EXCSAVE3 17
+#define STATE_EXCSAVE4 18
+#define STATE_EXCSAVE5 19
+#define STATE_EXCSAVE6 20
+#define STATE_EXCSAVE7 21
+#define STATE_EPS2 22
+#define STATE_EPS3 23
+#define STATE_EPS4 24
+#define STATE_EPS5 25
+#define STATE_EPS6 26
+#define STATE_EPS7 27
+#define STATE_EXCCAUSE 28
+#define STATE_PSINTLEVEL 29
+#define STATE_PSUM 30
+#define STATE_PSWOE 31
+#define STATE_PSRING 32
+#define STATE_PSEXCM 33
+#define STATE_DEPC 34
+#define STATE_EXCVADDR 35
+#define STATE_WindowBase 36
+#define STATE_WindowStart 37
+#define STATE_PSCALLINC 38
+#define STATE_PSOWB 39
+#define STATE_LBEG 40
+#define STATE_LEND 41
+#define STATE_SAR 42
+#define STATE_THREADPTR 43
+#define STATE_LITBADDR 44
+#define STATE_LITBEN 45
+#define STATE_MISC0 46
+#define STATE_MISC1 47
+#define STATE_ACC 48
+#define STATE_InOCDMode 49
+#define STATE_INTENABLE 50
+#define STATE_DBREAKA0 51
+#define STATE_DBREAKC0 52
+#define STATE_DBREAKA1 53
+#define STATE_DBREAKC1 54
+#define STATE_IBREAKA0 55
+#define STATE_IBREAKA1 56
+#define STATE_IBREAKENABLE 57
+#define STATE_ICOUNTLEVEL 58
+#define STATE_DEBUGCAUSE 59
+#define STATE_DBNUM 60
+#define STATE_CCOMPARE0 61
+#define STATE_CCOMPARE1 62
+#define STATE_CCOMPARE2 63
+#define STATE_ASID3 64
+#define STATE_ASID2 65
+#define STATE_ASID1 66
+#define STATE_INSTPGSZID4 67
+#define STATE_DATAPGSZID4 68
+#define STATE_PTBASE 69
+#define STATE_CPENABLE 70
+#define STATE_SCOMPARE1 71
+#define STATE_EXPSTATE 72
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_rbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_rbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_rhi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_rhi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_tbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_tbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_tlo_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_tlo_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_w_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_w_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_y_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_y_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_x_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_x_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_bitindex_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_s3to1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_mr0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_mr1_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
+static unsigned
+Implicit_Field_mr2_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 2;
+}
+
+static unsigned
+Implicit_Field_mr3_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 3;
+}
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+
+};
+
+
+/* Register files. */
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", 0, 32, 32 },
+ { "MR", "m", 1, 32, 4 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "IMPWIRE", 32, 0, 0, 'i' }
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+Operand_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffsetx4_0 = 0x4 + ((((int) offset_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_encode (uint32 *valp)
+{
+ unsigned offset_0, soffsetx4_0;
+ soffsetx4_0 = *valp;
+ offset_0 = ((soffsetx4_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ uimm12x8_0 = imm12_0 << 3;
+ *valp = uimm12x8_0;
+ return 0;
+}
+
+static int
+Operand_uimm12x8_encode (uint32 *valp)
+{
+ unsigned imm12_0, uimm12x8_0;
+ uimm12x8_0 = *valp;
+ imm12_0 = ((uimm12x8_0 >> 3) & 0xfff);
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_0, mn_0;
+ mn_0 = *valp & 0xf;
+ simm4_0 = ((int) mn_0 << 28) >> 28;
+ *valp = simm4_0;
+ return 0;
+}
+
+static int
+Operand_simm4_encode (uint32 *valp)
+{
+ unsigned mn_0, simm4_0;
+ simm4_0 = *valp;
+ mn_0 = (simm4_0 & 0xf);
+ *valp = mn_0;
+ return 0;
+}
+
+static int
+Operand_arr_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_arr_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_ars_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_art_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_art_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_ar0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar0_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ar4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar4_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ar8_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar8_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ar12_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar12_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ars_entry_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_entry_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_0, r_0;
+ r_0 = *valp & 0xf;
+ immrx4_0 = (((0xfffffff) << 4) | r_0) << 2;
+ *valp = immrx4_0;
+ return 0;
+}
+
+static int
+Operand_immrx4_encode (uint32 *valp)
+{
+ unsigned r_0, immrx4_0;
+ immrx4_0 = *valp;
+ r_0 = ((immrx4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_0, r_0;
+ r_0 = *valp & 0xf;
+ lsi4x4_0 = r_0 << 2;
+ *valp = lsi4x4_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_encode (uint32 *valp)
+{
+ unsigned r_0, lsi4x4_0;
+ lsi4x4_0 = *valp;
+ r_0 = ((lsi4x4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_0, imm7_0;
+ imm7_0 = *valp & 0x7f;
+ simm7_0 = ((((-((((imm7_0 >> 6) & 1)) & (((imm7_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | imm7_0;
+ *valp = simm7_0;
+ return 0;
+}
+
+static int
+Operand_simm7_encode (uint32 *valp)
+{
+ unsigned imm7_0, simm7_0;
+ simm7_0 = *valp;
+ imm7_0 = (simm7_0 & 0x7f);
+ *valp = imm7_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_0, imm6_0;
+ imm6_0 = *valp & 0x3f;
+ uimm6_0 = 0x4 + (((0) << 6) | imm6_0);
+ *valp = uimm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_encode (uint32 *valp)
+{
+ unsigned imm6_0, uimm6_0;
+ uimm6_0 = *valp;
+ imm6_0 = (uimm6_0 - 0x4) & 0x3f;
+ *valp = imm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_0, t_0;
+ t_0 = *valp & 0xf;
+ ai4const_0 = CONST_TBL_ai4c_0[t_0 & 0xf];
+ *valp = ai4const_0;
+ return 0;
+}
+
+static int
+Operand_ai4const_encode (uint32 *valp)
+{
+ unsigned t_0, ai4const_0;
+ ai4const_0 = *valp;
+ switch (ai4const_0)
+ {
+ case 0xffffffff: t_0 = 0; break;
+ case 0x1: t_0 = 0x1; break;
+ case 0x2: t_0 = 0x2; break;
+ case 0x3: t_0 = 0x3; break;
+ case 0x4: t_0 = 0x4; break;
+ case 0x5: t_0 = 0x5; break;
+ case 0x6: t_0 = 0x6; break;
+ case 0x7: t_0 = 0x7; break;
+ case 0x8: t_0 = 0x8; break;
+ case 0x9: t_0 = 0x9; break;
+ case 0xa: t_0 = 0xa; break;
+ case 0xb: t_0 = 0xb; break;
+ case 0xc: t_0 = 0xc; break;
+ case 0xd: t_0 = 0xd; break;
+ case 0xe: t_0 = 0xe; break;
+ default: t_0 = 0xf; break;
+ }
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_0, r_0;
+ r_0 = *valp & 0xf;
+ b4const_0 = CONST_TBL_b4c_0[r_0 & 0xf];
+ *valp = b4const_0;
+ return 0;
+}
+
+static int
+Operand_b4const_encode (uint32 *valp)
+{
+ unsigned r_0, b4const_0;
+ b4const_0 = *valp;
+ switch (b4const_0)
+ {
+ case 0xffffffff: r_0 = 0; break;
+ case 0x1: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_0, r_0;
+ r_0 = *valp & 0xf;
+ b4constu_0 = CONST_TBL_b4cu_0[r_0 & 0xf];
+ *valp = b4constu_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_encode (uint32 *valp)
+{
+ unsigned r_0, b4constu_0;
+ b4constu_0 = *valp;
+ switch (b4constu_0)
+ {
+ case 0x8000: r_0 = 0; break;
+ case 0x10000: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8_0 = imm8_0;
+ *valp = uimm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8_0;
+ uimm8_0 = *valp;
+ imm8_0 = (uimm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x2_0 = imm8_0 << 1;
+ *valp = uimm8x2_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x2_0;
+ uimm8x2_0 = *valp;
+ imm8_0 = ((uimm8x2_0 >> 1) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x4_0 = imm8_0 << 2;
+ *valp = uimm8x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x4_0;
+ uimm8x4_0 = *valp;
+ imm8_0 = ((uimm8x4_0 >> 2) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_0, op2_0;
+ op2_0 = *valp & 0xf;
+ uimm4x16_0 = op2_0 << 4;
+ *valp = uimm4x16_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_encode (uint32 *valp)
+{
+ unsigned op2_0, uimm4x16_0;
+ uimm4x16_0 = *valp;
+ op2_0 = ((uimm4x16_0 >> 4) & 0xf);
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8_0 = ((int) imm8_0 << 24) >> 24;
+ *valp = simm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8_0;
+ simm8_0 = *valp;
+ imm8_0 = (simm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8x256_0 = (((int) imm8_0 << 24) >> 24) << 8;
+ *valp = simm8x256_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8x256_0;
+ simm8x256_0 = *valp;
+ imm8_0 = ((simm8x256_0 >> 8) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_0, imm12b_0;
+ imm12b_0 = *valp & 0xfff;
+ simm12b_0 = ((int) imm12b_0 << 20) >> 20;
+ *valp = simm12b_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_encode (uint32 *valp)
+{
+ unsigned imm12b_0, simm12b_0;
+ simm12b_0 = *valp;
+ imm12b_0 = (simm12b_0 & 0xfff);
+ *valp = imm12b_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_0, sal_0;
+ sal_0 = *valp & 0x1f;
+ msalp32_0 = 0x20 - sal_0;
+ *valp = msalp32_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_encode (uint32 *valp)
+{
+ unsigned sal_0, msalp32_0;
+ msalp32_0 = *valp;
+ sal_0 = (0x20 - msalp32_0) & 0x1f;
+ *valp = sal_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_0, op2_0;
+ op2_0 = *valp & 0xf;
+ op2p1_0 = op2_0 + 0x1;
+ *valp = op2p1_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_encode (uint32 *valp)
+{
+ unsigned op2_0, op2p1_0;
+ op2p1_0 = *valp;
+ op2_0 = (op2p1_0 - 0x1) & 0xf;
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_label8_decode (uint32 *valp)
+{
+ unsigned label8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ label8_0 = 0x4 + (((int) imm8_0 << 24) >> 24);
+ *valp = label8_0;
+ return 0;
+}
+
+static int
+Operand_label8_encode (uint32 *valp)
+{
+ unsigned imm8_0, label8_0;
+ label8_0 = *valp;
+ imm8_0 = (label8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ ulabel8_0 = 0x4 + (((0) << 8) | imm8_0);
+ *valp = ulabel8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_encode (uint32 *valp)
+{
+ unsigned imm8_0, ulabel8_0;
+ ulabel8_0 = *valp;
+ imm8_0 = (ulabel8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_decode (uint32 *valp)
+{
+ unsigned label12_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ label12_0 = 0x4 + (((int) imm12_0 << 20) >> 20);
+ *valp = label12_0;
+ return 0;
+}
+
+static int
+Operand_label12_encode (uint32 *valp)
+{
+ unsigned imm12_0, label12_0;
+ label12_0 = *valp;
+ imm12_0 = (label12_0 - 0x4) & 0xfff;
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffset_0 = 0x4 + (((int) offset_0 << 14) >> 14);
+ *valp = soffset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_encode (uint32 *valp)
+{
+ unsigned offset_0, soffset_0;
+ soffset_0 = *valp;
+ offset_0 = (soffset_0 - 0x4) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_0, imm16_0;
+ imm16_0 = *valp & 0xffff;
+ uimm16x4_0 = (((0xffff) << 16) | imm16_0) << 2;
+ *valp = uimm16x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_encode (uint32 *valp)
+{
+ unsigned imm16_0, uimm16x4_0;
+ uimm16x4_0 = *valp;
+ imm16_0 = (uimm16x4_0 >> 2) & 0xffff;
+ *valp = imm16_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_mx_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mx_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_my_decode (uint32 *valp)
+{
+ *valp += 2;
+ return 0;
+}
+
+static int
+Operand_my_encode (uint32 *valp)
+{
+ int error;
+ error = ((*valp & ~0x3) != 0) || ((*valp & 0x2) == 0);
+ *valp = *valp & 1;
+ return error;
+}
+
+static int
+Operand_mw_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mw_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr0_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr1_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr2_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr3_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_immt_decode (uint32 *valp)
+{
+ unsigned immt_0, t_0;
+ t_0 = *valp & 0xf;
+ immt_0 = t_0;
+ *valp = immt_0;
+ return 0;
+}
+
+static int
+Operand_immt_encode (uint32 *valp)
+{
+ unsigned t_0, immt_0;
+ immt_0 = *valp;
+ t_0 = immt_0 & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_imms_decode (uint32 *valp)
+{
+ unsigned imms_0, s_0;
+ s_0 = *valp & 0xf;
+ imms_0 = s_0;
+ *valp = imms_0;
+ return 0;
+}
+
+static int
+Operand_imms_encode (uint32 *valp)
+{
+ unsigned s_0, imms_0;
+ imms_0 = *valp;
+ s_0 = imms_0 & 0xf;
+ *valp = s_0;
+ return 0;
+}
+
+static int
+Operand_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_0, t_0;
+ t_0 = *valp & 0xf;
+ tp7_0 = t_0 + 0x7;
+ *valp = tp7_0;
+ return 0;
+}
+
+static int
+Operand_tp7_encode (uint32 *valp)
+{
+ unsigned t_0, tp7_0;
+ tp7_0 = *valp;
+ t_0 = (tp7_0 - 0x7) & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_0, xt_wbr15_imm_0;
+ xt_wbr15_imm_0 = *valp & 0x7fff;
+ xt_wbr15_label_0 = 0x4 + (((int) xt_wbr15_imm_0 << 17) >> 17);
+ *valp = xt_wbr15_label_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_imm_0, xt_wbr15_label_0;
+ xt_wbr15_label_0 = *valp;
+ xt_wbr15_imm_0 = (xt_wbr15_label_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_imm_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_0, xt_wbr18_imm_0;
+ xt_wbr18_imm_0 = *valp & 0x3ffff;
+ xt_wbr18_label_0 = 0x4 + (((int) xt_wbr18_imm_0 << 14) >> 14);
+ *valp = xt_wbr18_label_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr18_imm_0, xt_wbr18_label_0;
+ xt_wbr18_label_0 = *valp;
+ xt_wbr18_imm_0 = (xt_wbr18_label_0 - 0x4) & 0x3ffff;
+ *valp = xt_wbr18_imm_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", 10, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffsetx4_encode, Operand_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", 3, -1, 0,
+ 0,
+ Operand_uimm12x8_encode, Operand_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", 26, -1, 0,
+ 0,
+ Operand_simm4_encode, Operand_simm4_decode,
+ 0, 0 },
+ { "arr", 14, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_arr_encode, Operand_arr_decode,
+ 0, 0 },
+ { "ars", 5, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "*ars_invisible", 5, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "art", 0, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_art_encode, Operand_art_decode,
+ 0, 0 },
+ { "ar0", 48, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar0_encode, Operand_ar0_decode,
+ 0, 0 },
+ { "ar4", 49, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar4_encode, Operand_ar4_decode,
+ 0, 0 },
+ { "ar8", 50, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar8_encode, Operand_ar8_decode,
+ 0, 0 },
+ { "ar12", 51, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar12_encode, Operand_ar12_decode,
+ 0, 0 },
+ { "ars_entry", 5, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_entry_encode, Operand_ars_entry_decode,
+ 0, 0 },
+ { "immrx4", 14, -1, 0,
+ 0,
+ Operand_immrx4_encode, Operand_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", 14, -1, 0,
+ 0,
+ Operand_lsi4x4_encode, Operand_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", 34, -1, 0,
+ 0,
+ Operand_simm7_encode, Operand_simm7_decode,
+ 0, 0 },
+ { "uimm6", 33, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm6_encode, Operand_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", 0, -1, 0,
+ 0,
+ Operand_ai4const_encode, Operand_ai4const_decode,
+ 0, 0 },
+ { "b4const", 14, -1, 0,
+ 0,
+ Operand_b4const_encode, Operand_b4const_decode,
+ 0, 0 },
+ { "b4constu", 14, -1, 0,
+ 0,
+ Operand_b4constu_encode, Operand_b4constu_decode,
+ 0, 0 },
+ { "uimm8", 4, -1, 0,
+ 0,
+ Operand_uimm8_encode, Operand_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", 4, -1, 0,
+ 0,
+ Operand_uimm8x2_encode, Operand_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", 4, -1, 0,
+ 0,
+ Operand_uimm8x4_encode, Operand_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", 13, -1, 0,
+ 0,
+ Operand_uimm4x16_encode, Operand_uimm4x16_decode,
+ 0, 0 },
+ { "simm8", 4, -1, 0,
+ 0,
+ Operand_simm8_encode, Operand_simm8_decode,
+ 0, 0 },
+ { "simm8x256", 4, -1, 0,
+ 0,
+ Operand_simm8x256_encode, Operand_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", 6, -1, 0,
+ 0,
+ Operand_simm12b_encode, Operand_simm12b_decode,
+ 0, 0 },
+ { "msalp32", 18, -1, 0,
+ 0,
+ Operand_msalp32_encode, Operand_msalp32_decode,
+ 0, 0 },
+ { "op2p1", 13, -1, 0,
+ 0,
+ Operand_op2p1_encode, Operand_op2p1_decode,
+ 0, 0 },
+ { "label8", 4, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label8_encode, Operand_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", 4, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_ulabel8_encode, Operand_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", 3, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label12_encode, Operand_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", 10, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffset_encode, Operand_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", 7, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm16x4_encode, Operand_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "mx", 43, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ Operand_mx_encode, Operand_mx_decode,
+ 0, 0 },
+ { "my", 42, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ Operand_my_encode, Operand_my_decode,
+ 0, 0 },
+ { "mw", 41, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_mw_encode, Operand_mw_decode,
+ 0, 0 },
+ { "mr0", 52, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr0_encode, Operand_mr0_decode,
+ 0, 0 },
+ { "mr1", 53, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr1_encode, Operand_mr1_decode,
+ 0, 0 },
+ { "mr2", 54, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr2_encode, Operand_mr2_decode,
+ 0, 0 },
+ { "mr3", 55, 1, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr3_encode, Operand_mr3_decode,
+ 0, 0 },
+ { "immt", 0, -1, 0,
+ 0,
+ Operand_immt_encode, Operand_immt_decode,
+ 0, 0 },
+ { "imms", 5, -1, 0,
+ 0,
+ Operand_imms_encode, Operand_imms_decode,
+ 0, 0 },
+ { "tp7", 0, -1, 0,
+ 0,
+ Operand_tp7_encode, Operand_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", 44, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_xt_wbr15_label_encode, Operand_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", 45, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_xt_wbr18_label_encode, Operand_xt_wbr18_label_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "t", 0, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", 1, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi", 2, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", 3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", 4, -1, 0, 0, 0, 0, 0, 0 },
+ { "s", 5, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", 6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", 7, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", 8, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", 9, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", 10, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", 11, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", 12, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", 13, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", 14, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", 15, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", 16, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae", 17, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", 18, -1, 0, 0, 0, 0, 0, 0 },
+ { "sargt", 19, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", 20, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas", 21, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", 22, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", 23, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", 24, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", 25, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", 26, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", 27, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", 28, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", 29, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", 30, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", 31, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", 32, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", 33, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", 34, -1, 0, 0, 0, 0, 0, 0 },
+ { "r3", 35, -1, 0, 0, 0, 0, 0, 0 },
+ { "rbit2", 36, -1, 0, 0, 0, 0, 0, 0 },
+ { "rhi", 37, -1, 0, 0, 0, 0, 0, 0 },
+ { "t3", 38, -1, 0, 0, 0, 0, 0, 0 },
+ { "tbit2", 39, -1, 0, 0, 0, 0, 0, 0 },
+ { "tlo", 40, -1, 0, 0, 0, 0, 0, 0 },
+ { "w", 41, -1, 0, 0, 0, 0, 0, 0 },
+ { "y", 42, -1, 0, 0, 0, 0, 0, 0 },
+ { "x", 43, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", 44, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", 45, -1, 0, 0, 0, 0, 0, 0 },
+ { "bitindex", 46, -1, 0, 0, 0, 0, 0, 0 },
+ { "s3to1", 47, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 10 /* ar12 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 9 /* ar8 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 8 /* ar4 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 10 /* ar12 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 9 /* ar8 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 8 /* ar4 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { 11 /* ars_entry */ }, 's' },
+ { { 4 /* ars */ }, 'i' },
+ { { 1 /* uimm12x8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { 2 /* simm4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { 5 /* *ars_invisible */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 12 /* immrx4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 12 /* immrx4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 16 /* ai4const */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 15 /* uimm6 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 13 /* lsi4x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { 4 /* ars */ }, 'o' },
+ { { 14 /* simm7 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { 5 /* *ars_invisible */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 13 /* lsi4x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_args[] = {
+ { { 3 /* arr */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 23 /* simm8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 24 /* simm8x256 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 17 /* b4const */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 47 /* bbi */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 18 /* b4constu */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 30 /* label12 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 7 /* ar0 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 7 /* ar0 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' },
+ { { 62 /* sae */ }, 'i' },
+ { { 27 /* op2p1 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { 31 /* soffset */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 20 /* uimm8x2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 20 /* uimm8x2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 32 /* uimm16x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 19 /* uimm8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 29 /* ulabel8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 29 /* ulabel8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 25 /* simm12b */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { 3 /* arr */ }, 'm' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { 5 /* *ars_invisible */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 20 /* uimm8x2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 19 /* uimm8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { 66 /* sas */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 26 /* msalp32 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' },
+ { { 64 /* sargt */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' },
+ { { 50 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 50 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'o' },
+ { { STATE_LITBEN }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'm' },
+ { { STATE_LITBEN }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 34 /* my */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_args[] = {
+ { { 33 /* mx */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_args[] = {
+ { { 33 /* mx */ }, 'i' },
+ { { 34 /* my */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 34 /* my */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_args[] = {
+ { { 33 /* mx */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_args[] = {
+ { { 33 /* mx */ }, 'i' },
+ { { 34 /* my */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_args[] = {
+ { { 35 /* mw */ }, 'o' },
+ { { 4 /* ars */ }, 'm' },
+ { { 33 /* mx */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_args[] = {
+ { { 35 /* mw */ }, 'o' },
+ { { 4 /* ars */ }, 'm' },
+ { { 33 /* mx */ }, 'i' },
+ { { 34 /* my */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_l_args[] = {
+ { { 35 /* mw */ }, 'o' },
+ { { 4 /* ars */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mul16_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m0_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 36 /* mr0 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m0_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 36 /* mr0 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m0_args[] = {
+ { { 6 /* art */ }, 'm' },
+ { { 36 /* mr0 */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m1_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 37 /* mr1 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m1_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 37 /* mr1 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m1_args[] = {
+ { { 6 /* art */ }, 'm' },
+ { { 37 /* mr1 */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m2_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 38 /* mr2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m2_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 38 /* mr2 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m2_args[] = {
+ { { 6 /* art */ }, 'm' },
+ { { 38 /* mr2 */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m3_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 39 /* mr3 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m3_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 39 /* mr3 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m3_args[] = {
+ { { 6 /* art */ }, 'm' },
+ { { 39 /* mr3 */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { 50 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPC7 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_EPS7 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { 50 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { 41 /* imms */ }, 'i' },
+ { { 40 /* immt */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { 41 /* imms */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { 41 /* imms */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 22 /* uimm4x16 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 22 /* uimm4x16 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 22 /* uimm4x16 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'm' },
+ { { STATE_EXCVADDR }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'i' },
+ { { STATE_ASID2 }, 'i' },
+ { { STATE_ASID1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'o' },
+ { { STATE_ASID2 }, 'o' },
+ { { STATE_ASID1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'm' },
+ { { STATE_ASID2 }, 'm' },
+ { { STATE_ASID1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldpte_stateArgs[] = {
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwitlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwdtlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 42 /* tp7 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 42 /* tp7 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { 6 /* art */ }, 'm' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_div_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { 3 /* arr */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_READ_IMPWIRE_intfArgs[] = {
+ 0 /* IMPWIRE */
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_args[] = {
+ { { 91 /* bitindex */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_args[] = {
+ { { 91 /* bitindex */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 3, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 3, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 3, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 4, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 6, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 2, Iclass_xt_iclass_l32e_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 2, Iclass_xt_iclass_s32e_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 3, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 3, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 3, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 3, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 3, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 3, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_threadptr_args,
+ 1, Iclass_rur_threadptr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_threadptr_args,
+ 1, Iclass_wur_threadptr_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 2, Iclass_xt_iclass_l32r_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 7, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 2, Iclass_xt_iclass_rsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 2, Iclass_xt_iclass_wsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 2, Iclass_xt_iclass_xsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_176_args,
+ 2, Iclass_xt_iclass_rsr_176_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_208_args,
+ 2, Iclass_xt_iclass_rsr_208_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 7, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 7, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 7, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 3, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 3, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 3, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 3, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 3, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 3, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 3, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 3, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 3, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 3, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 3, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 3, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 3, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 3, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 3, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 3, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 3, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 3, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 3, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 3, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 3, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 3, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 3, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 3, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 3, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 3, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 3, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 3, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 3, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 3, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 3, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 3, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 3, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 3, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 3, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 3, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc7_args,
+ 3, Iclass_xt_iclass_rsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc7_args,
+ 3, Iclass_xt_iclass_wsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc7_args,
+ 3, Iclass_xt_iclass_xsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave7_args,
+ 3, Iclass_xt_iclass_rsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave7_args,
+ 3, Iclass_xt_iclass_wsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave7_args,
+ 3, Iclass_xt_iclass_xsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 3, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 3, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 3, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 3, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 3, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 3, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 3, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 3, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 3, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 3, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 3, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 3, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 3, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 3, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 3, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps7_args,
+ 3, Iclass_xt_iclass_rsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps7_args,
+ 3, Iclass_xt_iclass_wsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps7_args,
+ 3, Iclass_xt_iclass_xsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 3, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 3, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 3, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 3, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 3, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 3, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 4, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 3, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 3, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 3, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 3, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 3, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 3, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 3, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 3, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 2, Iclass_xt_iclass_rsr_prid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 3, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 3, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 3, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_aa_args,
+ 1, Iclass_xt_iclass_mac16_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_ad_args,
+ 1, Iclass_xt_iclass_mac16_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_da_args,
+ 1, Iclass_xt_iclass_mac16_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_dd_args,
+ 1, Iclass_xt_iclass_mac16_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_aa_args,
+ 1, Iclass_xt_iclass_mac16a_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_ad_args,
+ 1, Iclass_xt_iclass_mac16a_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_da_args,
+ 1, Iclass_xt_iclass_mac16a_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_dd_args,
+ 1, Iclass_xt_iclass_mac16a_dd_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_da_args,
+ 1, Iclass_xt_iclass_mac16al_da_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_dd_args,
+ 1, Iclass_xt_iclass_mac16al_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_l_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_mul16_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m3_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acclo_args,
+ 1, Iclass_xt_iclass_rsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acclo_args,
+ 1, Iclass_xt_iclass_wsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acclo_args,
+ 1, Iclass_xt_iclass_xsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acchi_args,
+ 1, Iclass_xt_iclass_rsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acchi_args,
+ 1, Iclass_xt_iclass_wsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acchi_args,
+ 1, Iclass_xt_iclass_xsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 21, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 3, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 3, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 4, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 4, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 3, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 3, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 3, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 4, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 4, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 4, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 3, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 4, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 4, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 3, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 3, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 3, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 3, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 4, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 4, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 10, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 3, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 3, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 4, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 4, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 3, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 4, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 4, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 3, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 4, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 4, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 3, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 4, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 4, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_lock_args,
+ 2, Iclass_xt_iclass_icache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 2, Iclass_xt_iclass_icache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 2, Iclass_xt_iclass_licx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 2, Iclass_xt_iclass_sicx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 2, Iclass_xt_iclass_dcache_ind_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 2, Iclass_xt_iclass_dcache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_lock_args,
+ 2, Iclass_xt_iclass_dcache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 2, Iclass_xt_iclass_sdct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 2, Iclass_xt_iclass_ldct_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_wsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_rsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ptevaddr_args,
+ 5, Iclass_xt_iclass_xsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_rasid_args,
+ 5, Iclass_xt_iclass_rsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_rasid_args,
+ 6, Iclass_xt_iclass_wsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_rasid_args,
+ 6, Iclass_xt_iclass_xsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_itlbcfg_args,
+ 3, Iclass_xt_iclass_rsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_itlbcfg_args,
+ 4, Iclass_xt_iclass_wsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_itlbcfg_args,
+ 4, Iclass_xt_iclass_xsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dtlbcfg_args,
+ 3, Iclass_xt_iclass_rsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dtlbcfg_args,
+ 4, Iclass_xt_iclass_wsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dtlbcfg_args,
+ 4, Iclass_xt_iclass_xsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 3, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 2, Iclass_xt_iclass_rdtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 3, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 2, Iclass_xt_iclass_iitlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 2, Iclass_xt_iclass_ritlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 2, Iclass_xt_iclass_witlb_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_ldpte */,
+ 2, Iclass_xt_iclass_ldpte_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwitlba */,
+ 1, Iclass_xt_iclass_hwwitlba_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwdtlba */,
+ 1, Iclass_xt_iclass_hwwdtlba_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_cpenable_args,
+ 3, Iclass_xt_iclass_rsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_cpenable_args,
+ 3, Iclass_xt_iclass_wsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_cpenable_args,
+ 3, Iclass_xt_iclass_xsr_cpenable_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 2, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_div_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_expstate_args,
+ 2, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 2, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_READ_IMPWIRE_args,
+ 1, Iclass_iclass_READ_IMPWIRE_stateArgs, 1, Iclass_iclass_READ_IMPWIRE_intfArgs },
+ { 1, Iclass_iclass_SETB_EXPSTATE_args,
+ 2, Iclass_iclass_SETB_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRB_EXPSTATE_args,
+ 2, Iclass_iclass_CLRB_EXPSTATE_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_WRMSK_EXPSTATE_args,
+ 2, Iclass_iclass_WRMSK_EXPSTATE_stateArgs, 0, 0 }
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_rur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e70;
+}
+
+static void
+Opcode_wur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e700;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8076;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9076;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa076;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30100;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130100;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610100;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130200;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610200;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130000;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30500;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130500;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610500;
+}
+
+static void
+Opcode_rsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_rsr_208_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b300;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b300;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b300;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d300;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d300;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d300;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b400;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b400;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b400;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d400;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d400;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b500;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b500;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b500;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d500;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d500;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b600;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b600;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b600;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d600;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d600;
+}
+
+static void
+Opcode_rsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b700;
+}
+
+static void
+Opcode_wsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b700;
+}
+
+static void
+Opcode_xsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b700;
+}
+
+static void
+Opcode_rsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_wsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d700;
+}
+
+static void
+Opcode_xsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d700;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c300;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c300;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c300;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c400;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c400;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c400;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c500;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c500;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c500;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c600;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c600;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c600;
+}
+
+static void
+Opcode_rsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c700;
+}
+
+static void
+Opcode_wsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c700;
+}
+
+static void
+Opcode_xsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c700;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f400;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f400;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f400;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f500;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f500;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f500;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_mul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x740004;
+}
+
+static void
+Opcode_mul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x750004;
+}
+
+static void
+Opcode_mul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x760004;
+}
+
+static void
+Opcode_mul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x770004;
+}
+
+static void
+Opcode_umul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700004;
+}
+
+static void
+Opcode_umul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x710004;
+}
+
+static void
+Opcode_umul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x720004;
+}
+
+static void
+Opcode_umul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730004;
+}
+
+static void
+Opcode_mul_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340004;
+}
+
+static void
+Opcode_mul_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x350004;
+}
+
+static void
+Opcode_mul_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x360004;
+}
+
+static void
+Opcode_mul_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370004;
+}
+
+static void
+Opcode_mul_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x640004;
+}
+
+static void
+Opcode_mul_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x650004;
+}
+
+static void
+Opcode_mul_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x660004;
+}
+
+static void
+Opcode_mul_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x670004;
+}
+
+static void
+Opcode_mul_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240004;
+}
+
+static void
+Opcode_mul_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x250004;
+}
+
+static void
+Opcode_mul_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x260004;
+}
+
+static void
+Opcode_mul_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270004;
+}
+
+static void
+Opcode_mula_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780004;
+}
+
+static void
+Opcode_mula_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x790004;
+}
+
+static void
+Opcode_mula_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7a0004;
+}
+
+static void
+Opcode_mula_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b0004;
+}
+
+static void
+Opcode_muls_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c0004;
+}
+
+static void
+Opcode_muls_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7d0004;
+}
+
+static void
+Opcode_muls_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7e0004;
+}
+
+static void
+Opcode_muls_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f0004;
+}
+
+static void
+Opcode_mula_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380004;
+}
+
+static void
+Opcode_mula_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x390004;
+}
+
+static void
+Opcode_mula_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a0004;
+}
+
+static void
+Opcode_mula_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b0004;
+}
+
+static void
+Opcode_muls_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0004;
+}
+
+static void
+Opcode_muls_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d0004;
+}
+
+static void
+Opcode_muls_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e0004;
+}
+
+static void
+Opcode_muls_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f0004;
+}
+
+static void
+Opcode_mula_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680004;
+}
+
+static void
+Opcode_mula_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x690004;
+}
+
+static void
+Opcode_mula_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0004;
+}
+
+static void
+Opcode_mula_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0004;
+}
+
+static void
+Opcode_muls_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0004;
+}
+
+static void
+Opcode_muls_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0004;
+}
+
+static void
+Opcode_muls_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0004;
+}
+
+static void
+Opcode_muls_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0004;
+}
+
+static void
+Opcode_mula_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280004;
+}
+
+static void
+Opcode_mula_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x290004;
+}
+
+static void
+Opcode_mula_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a0004;
+}
+
+static void
+Opcode_mula_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b0004;
+}
+
+static void
+Opcode_muls_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0004;
+}
+
+static void
+Opcode_muls_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0004;
+}
+
+static void
+Opcode_muls_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0004;
+}
+
+static void
+Opcode_muls_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0004;
+}
+
+static void
+Opcode_mula_da_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580004;
+}
+
+static void
+Opcode_mula_da_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480004;
+}
+
+static void
+Opcode_mula_da_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590004;
+}
+
+static void
+Opcode_mula_da_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490004;
+}
+
+static void
+Opcode_mula_da_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a0004;
+}
+
+static void
+Opcode_mula_da_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0004;
+}
+
+static void
+Opcode_mula_da_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b0004;
+}
+
+static void
+Opcode_mula_da_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0004;
+}
+
+static void
+Opcode_mula_dd_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180004;
+}
+
+static void
+Opcode_mula_dd_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80004;
+}
+
+static void
+Opcode_mula_dd_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x190004;
+}
+
+static void
+Opcode_mula_dd_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90004;
+}
+
+static void
+Opcode_mula_dd_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0004;
+}
+
+static void
+Opcode_mula_dd_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0004;
+}
+
+static void
+Opcode_mula_dd_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0004;
+}
+
+static void
+Opcode_mula_dd_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb0004;
+}
+
+static void
+Opcode_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900004;
+}
+
+static void
+Opcode_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800004;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_rsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32000;
+}
+
+static void
+Opcode_wsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132000;
+}
+
+static void
+Opcode_xsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612000;
+}
+
+static void
+Opcode_rsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32100;
+}
+
+static void
+Opcode_wsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132100;
+}
+
+static void
+Opcode_xsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612100;
+}
+
+static void
+Opcode_rsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32200;
+}
+
+static void
+Opcode_wsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132200;
+}
+
+static void
+Opcode_xsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612200;
+}
+
+static void
+Opcode_rsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32300;
+}
+
+static void
+Opcode_wsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132300;
+}
+
+static void
+Opcode_xsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612300;
+}
+
+static void
+Opcode_rsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31000;
+}
+
+static void
+Opcode_wsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131000;
+}
+
+static void
+Opcode_xsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611000;
+}
+
+static void
+Opcode_rsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31100;
+}
+
+static void
+Opcode_wsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131100;
+}
+
+static void
+Opcode_xsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611100;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39000;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139000;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619000;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a000;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a000;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a000;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39100;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139100;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619100;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a100;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a100;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a100;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138000;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618000;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138100;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618100;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136000;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616000;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135900;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f200;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f200;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f200;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70c2;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e2;
+}
+
+static void
+Opcode_ipfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70d2;
+}
+
+static void
+Opcode_ihu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270d2;
+}
+
+static void
+Opcode_iiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370d2;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f2;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf10000;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf12000;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11000;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13000;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7042;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7052;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47082;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x57082;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7062;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7072;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7002;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7012;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7022;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7032;
+}
+
+static void
+Opcode_dpfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7082;
+}
+
+static void
+Opcode_dhu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x27082;
+}
+
+static void
+Opcode_diu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x37082;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf19000;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf18000;
+}
+
+static void
+Opcode_wsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135300;
+}
+
+static void
+Opcode_rsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35300;
+}
+
+static void
+Opcode_xsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615300;
+}
+
+static void
+Opcode_rsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35a00;
+}
+
+static void
+Opcode_wsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135a00;
+}
+
+static void
+Opcode_xsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615a00;
+}
+
+static void
+Opcode_rsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35b00;
+}
+
+static void
+Opcode_wsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135b00;
+}
+
+static void
+Opcode_xsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615b00;
+}
+
+static void
+Opcode_rsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35c00;
+}
+
+static void
+Opcode_wsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135c00;
+}
+
+static void
+Opcode_xsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615c00;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_ldpte_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1f000;
+}
+
+static void
+Opcode_hwwitlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501000;
+}
+
+static void
+Opcode_hwwdtlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x509000;
+}
+
+static void
+Opcode_rsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e000;
+}
+
+static void
+Opcode_wsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e000;
+}
+
+static void
+Opcode_xsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e000;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_quou_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc20000;
+}
+
+static void
+Opcode_quos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20000;
+}
+
+static void
+Opcode_remu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe20000;
+}
+
+static void
+Opcode_rems_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf20000;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820000;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e60;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e600;
+}
+
+static void
+Opcode_read_impwire_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0000;
+}
+
+static void
+Opcode_setb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1000;
+}
+
+static void
+Opcode_clrb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1200;
+}
+
+static void
+Opcode_wrmsk_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe2000;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_threadptr_encode_fns[] = {
+ Opcode_rur_threadptr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_threadptr_encode_fns[] = {
+ Opcode_wur_threadptr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_176_encode_fns[] = {
+ Opcode_rsr_176_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_208_encode_fns[] = {
+ Opcode_rsr_208_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc7_encode_fns[] = {
+ Opcode_rsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc7_encode_fns[] = {
+ Opcode_wsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc7_encode_fns[] = {
+ Opcode_xsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave7_encode_fns[] = {
+ Opcode_rsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave7_encode_fns[] = {
+ Opcode_wsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave7_encode_fns[] = {
+ Opcode_xsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps7_encode_fns[] = {
+ Opcode_rsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps7_encode_fns[] = {
+ Opcode_wsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps7_encode_fns[] = {
+ Opcode_xsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_ll_encode_fns[] = {
+ Opcode_mul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hl_encode_fns[] = {
+ Opcode_mul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_lh_encode_fns[] = {
+ Opcode_mul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hh_encode_fns[] = {
+ Opcode_mul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_ll_encode_fns[] = {
+ Opcode_umul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hl_encode_fns[] = {
+ Opcode_umul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_lh_encode_fns[] = {
+ Opcode_umul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hh_encode_fns[] = {
+ Opcode_umul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_ll_encode_fns[] = {
+ Opcode_mul_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hl_encode_fns[] = {
+ Opcode_mul_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_lh_encode_fns[] = {
+ Opcode_mul_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hh_encode_fns[] = {
+ Opcode_mul_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_ll_encode_fns[] = {
+ Opcode_mul_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hl_encode_fns[] = {
+ Opcode_mul_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_lh_encode_fns[] = {
+ Opcode_mul_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hh_encode_fns[] = {
+ Opcode_mul_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_ll_encode_fns[] = {
+ Opcode_mul_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hl_encode_fns[] = {
+ Opcode_mul_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_lh_encode_fns[] = {
+ Opcode_mul_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hh_encode_fns[] = {
+ Opcode_mul_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_ll_encode_fns[] = {
+ Opcode_mula_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hl_encode_fns[] = {
+ Opcode_mula_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_lh_encode_fns[] = {
+ Opcode_mula_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hh_encode_fns[] = {
+ Opcode_mula_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_ll_encode_fns[] = {
+ Opcode_muls_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hl_encode_fns[] = {
+ Opcode_muls_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_lh_encode_fns[] = {
+ Opcode_muls_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hh_encode_fns[] = {
+ Opcode_muls_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_ll_encode_fns[] = {
+ Opcode_mula_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hl_encode_fns[] = {
+ Opcode_mula_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_lh_encode_fns[] = {
+ Opcode_mula_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hh_encode_fns[] = {
+ Opcode_mula_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_ll_encode_fns[] = {
+ Opcode_muls_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hl_encode_fns[] = {
+ Opcode_muls_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_lh_encode_fns[] = {
+ Opcode_muls_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hh_encode_fns[] = {
+ Opcode_muls_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_encode_fns[] = {
+ Opcode_mula_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_encode_fns[] = {
+ Opcode_mula_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_encode_fns[] = {
+ Opcode_mula_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_encode_fns[] = {
+ Opcode_mula_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_ll_encode_fns[] = {
+ Opcode_muls_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hl_encode_fns[] = {
+ Opcode_muls_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_lh_encode_fns[] = {
+ Opcode_muls_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hh_encode_fns[] = {
+ Opcode_muls_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_encode_fns[] = {
+ Opcode_mula_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_encode_fns[] = {
+ Opcode_mula_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_encode_fns[] = {
+ Opcode_mula_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_encode_fns[] = {
+ Opcode_mula_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_ll_encode_fns[] = {
+ Opcode_muls_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hl_encode_fns[] = {
+ Opcode_muls_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_lh_encode_fns[] = {
+ Opcode_muls_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hh_encode_fns[] = {
+ Opcode_muls_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_lddec_encode_fns[] = {
+ Opcode_mula_da_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_ldinc_encode_fns[] = {
+ Opcode_mula_da_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_lddec_encode_fns[] = {
+ Opcode_mula_da_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_ldinc_encode_fns[] = {
+ Opcode_mula_da_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_lddec_encode_fns[] = {
+ Opcode_mula_da_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_ldinc_encode_fns[] = {
+ Opcode_mula_da_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_lddec_encode_fns[] = {
+ Opcode_mula_da_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_ldinc_encode_fns[] = {
+ Opcode_mula_da_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_lddec_encode_fns[] = {
+ Opcode_mula_dd_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_ldinc_encode_fns[] = {
+ Opcode_mula_dd_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_lddec_encode_fns[] = {
+ Opcode_mula_dd_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_lddec_encode_fns[] = {
+ Opcode_mula_dd_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_lddec_encode_fns[] = {
+ Opcode_mula_dd_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddec_encode_fns[] = {
+ Opcode_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldinc_encode_fns[] = {
+ Opcode_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m0_encode_fns[] = {
+ Opcode_rsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m0_encode_fns[] = {
+ Opcode_wsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m0_encode_fns[] = {
+ Opcode_xsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m1_encode_fns[] = {
+ Opcode_rsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m1_encode_fns[] = {
+ Opcode_wsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m1_encode_fns[] = {
+ Opcode_xsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m2_encode_fns[] = {
+ Opcode_rsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m2_encode_fns[] = {
+ Opcode_wsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m2_encode_fns[] = {
+ Opcode_xsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m3_encode_fns[] = {
+ Opcode_rsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m3_encode_fns[] = {
+ Opcode_wsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m3_encode_fns[] = {
+ Opcode_xsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acclo_encode_fns[] = {
+ Opcode_rsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acclo_encode_fns[] = {
+ Opcode_wsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acclo_encode_fns[] = {
+ Opcode_xsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acchi_encode_fns[] = {
+ Opcode_rsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acchi_encode_fns[] = {
+ Opcode_wsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acchi_encode_fns[] = {
+ Opcode_xsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipfl_encode_fns[] = {
+ Opcode_ipfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihu_encode_fns[] = {
+ Opcode_ihu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iiu_encode_fns[] = {
+ Opcode_iiu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfl_encode_fns[] = {
+ Opcode_dpfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhu_encode_fns[] = {
+ Opcode_dhu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diu_encode_fns[] = {
+ Opcode_diu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ptevaddr_encode_fns[] = {
+ Opcode_wsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ptevaddr_encode_fns[] = {
+ Opcode_rsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ptevaddr_encode_fns[] = {
+ Opcode_xsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_rasid_encode_fns[] = {
+ Opcode_rsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_rasid_encode_fns[] = {
+ Opcode_wsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_rasid_encode_fns[] = {
+ Opcode_xsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_itlbcfg_encode_fns[] = {
+ Opcode_rsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_itlbcfg_encode_fns[] = {
+ Opcode_wsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_itlbcfg_encode_fns[] = {
+ Opcode_xsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dtlbcfg_encode_fns[] = {
+ Opcode_rsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dtlbcfg_encode_fns[] = {
+ Opcode_wsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dtlbcfg_encode_fns[] = {
+ Opcode_xsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldpte_encode_fns[] = {
+ Opcode_ldpte_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwitlba_encode_fns[] = {
+ Opcode_hwwitlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwdtlba_encode_fns[] = {
+ Opcode_hwwdtlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_cpenable_encode_fns[] = {
+ Opcode_rsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_cpenable_encode_fns[] = {
+ Opcode_wsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_cpenable_encode_fns[] = {
+ Opcode_xsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quou_encode_fns[] = {
+ Opcode_quou_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quos_encode_fns[] = {
+ Opcode_quos_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_remu_encode_fns[] = {
+ Opcode_remu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rems_encode_fns[] = {
+ Opcode_rems_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_read_impwire_encode_fns[] = {
+ Opcode_read_impwire_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_setb_expstate_encode_fns[] = {
+ Opcode_setb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrb_expstate_encode_fns[] = {
+ Opcode_clrb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrmsk_expstate_encode_fns[] = {
+ Opcode_wrmsk_expstate_Slot_inst_encode, 0, 0
+};
+
+
+/* Opcode table. */
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", 0 /* xt_iclass_excw */,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", 1 /* xt_iclass_rfe */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", 2 /* xt_iclass_rfde */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", 3 /* xt_iclass_syscall */,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "simcall", 4 /* xt_iclass_simcall */,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "call12", 5 /* xt_iclass_call12 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", 6 /* xt_iclass_call8 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", 7 /* xt_iclass_call4 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", 8 /* xt_iclass_callx12 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", 9 /* xt_iclass_callx8 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", 10 /* xt_iclass_callx4 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", 11 /* xt_iclass_entry */,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", 12 /* xt_iclass_movsp */,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", 13 /* xt_iclass_rotw */,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", 14 /* xt_iclass_retw */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", 14 /* xt_iclass_retw */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", 15 /* xt_iclass_rfwou */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", 15 /* xt_iclass_rfwou */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", 16 /* xt_iclass_l32e */,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", 17 /* xt_iclass_s32e */,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", 18 /* xt_iclass_rsr.windowbase */,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", 19 /* xt_iclass_wsr.windowbase */,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", 20 /* xt_iclass_xsr.windowbase */,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", 21 /* xt_iclass_rsr.windowstart */,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", 22 /* xt_iclass_wsr.windowstart */,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", 23 /* xt_iclass_xsr.windowstart */,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", 24 /* xt_iclass_add.n */,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", 25 /* xt_iclass_addi.n */,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", 26 /* xt_iclass_bz6 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", 26 /* xt_iclass_bz6 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", 27 /* xt_iclass_ill.n */,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", 28 /* xt_iclass_loadi4 */,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", 29 /* xt_iclass_mov.n */,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", 30 /* xt_iclass_movi.n */,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", 31 /* xt_iclass_nopn */,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", 32 /* xt_iclass_retn */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", 33 /* xt_iclass_storei4 */,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "rur.threadptr", 34 /* rur_threadptr */,
+ 0,
+ Opcode_rur_threadptr_encode_fns, 0, 0 },
+ { "wur.threadptr", 35 /* wur_threadptr */,
+ 0,
+ Opcode_wur_threadptr_encode_fns, 0, 0 },
+ { "addi", 36 /* xt_iclass_addi */,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", 37 /* xt_iclass_addmi */,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", 38 /* xt_iclass_addsub */,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", 39 /* xt_iclass_bit */,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", 39 /* xt_iclass_bit */,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", 39 /* xt_iclass_bit */,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", 40 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", 40 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", 40 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", 40 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", 41 /* xt_iclass_bsi8b */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", 41 /* xt_iclass_bsi8b */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", 42 /* xt_iclass_bsi8u */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", 42 /* xt_iclass_bsi8u */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", 43 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", 44 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", 44 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", 44 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", 44 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", 45 /* xt_iclass_call0 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", 46 /* xt_iclass_callx0 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", 47 /* xt_iclass_exti */,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", 48 /* xt_iclass_ill */,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", 49 /* xt_iclass_jump */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", 50 /* xt_iclass_jumpx */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", 51 /* xt_iclass_l16ui */,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", 52 /* xt_iclass_l16si */,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", 53 /* xt_iclass_l32i */,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", 54 /* xt_iclass_l32r */,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", 55 /* xt_iclass_l8i */,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", 56 /* xt_iclass_loop */,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", 57 /* xt_iclass_loopz */,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", 57 /* xt_iclass_loopz */,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", 58 /* xt_iclass_movi */,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", 59 /* xt_iclass_movz */,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", 59 /* xt_iclass_movz */,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", 59 /* xt_iclass_movz */,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", 59 /* xt_iclass_movz */,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", 60 /* xt_iclass_neg */,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", 60 /* xt_iclass_neg */,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", 61 /* xt_iclass_nop */,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", 62 /* xt_iclass_return */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "s16i", 63 /* xt_iclass_s16i */,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", 64 /* xt_iclass_s32i */,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s8i", 65 /* xt_iclass_s8i */,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", 66 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", 66 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", 66 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", 66 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", 67 /* xt_iclass_sari */,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", 68 /* xt_iclass_shifts */,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", 69 /* xt_iclass_shiftst */,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", 70 /* xt_iclass_shiftt */,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", 70 /* xt_iclass_shiftt */,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", 71 /* xt_iclass_slli */,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", 72 /* xt_iclass_srai */,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", 73 /* xt_iclass_srli */,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", 74 /* xt_iclass_memw */,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", 75 /* xt_iclass_extw */,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", 76 /* xt_iclass_isync */,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", 77 /* xt_iclass_sync */,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", 77 /* xt_iclass_sync */,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", 77 /* xt_iclass_sync */,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", 78 /* xt_iclass_rsil */,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", 79 /* xt_iclass_rsr.lend */,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", 80 /* xt_iclass_wsr.lend */,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", 81 /* xt_iclass_xsr.lend */,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", 82 /* xt_iclass_rsr.lcount */,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", 83 /* xt_iclass_wsr.lcount */,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", 84 /* xt_iclass_xsr.lcount */,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", 85 /* xt_iclass_rsr.lbeg */,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", 86 /* xt_iclass_wsr.lbeg */,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", 87 /* xt_iclass_xsr.lbeg */,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", 88 /* xt_iclass_rsr.sar */,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", 89 /* xt_iclass_wsr.sar */,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", 90 /* xt_iclass_xsr.sar */,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.litbase", 91 /* xt_iclass_rsr.litbase */,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", 92 /* xt_iclass_wsr.litbase */,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", 93 /* xt_iclass_xsr.litbase */,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.176", 94 /* xt_iclass_rsr.176 */,
+ 0,
+ Opcode_rsr_176_encode_fns, 0, 0 },
+ { "rsr.208", 95 /* xt_iclass_rsr.208 */,
+ 0,
+ Opcode_rsr_208_encode_fns, 0, 0 },
+ { "rsr.ps", 96 /* xt_iclass_rsr.ps */,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", 97 /* xt_iclass_wsr.ps */,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", 98 /* xt_iclass_xsr.ps */,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", 99 /* xt_iclass_rsr.epc1 */,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", 100 /* xt_iclass_wsr.epc1 */,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", 101 /* xt_iclass_xsr.epc1 */,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", 102 /* xt_iclass_rsr.excsave1 */,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", 103 /* xt_iclass_wsr.excsave1 */,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", 104 /* xt_iclass_xsr.excsave1 */,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", 105 /* xt_iclass_rsr.epc2 */,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", 106 /* xt_iclass_wsr.epc2 */,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", 107 /* xt_iclass_xsr.epc2 */,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", 108 /* xt_iclass_rsr.excsave2 */,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", 109 /* xt_iclass_wsr.excsave2 */,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", 110 /* xt_iclass_xsr.excsave2 */,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", 111 /* xt_iclass_rsr.epc3 */,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", 112 /* xt_iclass_wsr.epc3 */,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", 113 /* xt_iclass_xsr.epc3 */,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", 114 /* xt_iclass_rsr.excsave3 */,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", 115 /* xt_iclass_wsr.excsave3 */,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", 116 /* xt_iclass_xsr.excsave3 */,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", 117 /* xt_iclass_rsr.epc4 */,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", 118 /* xt_iclass_wsr.epc4 */,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", 119 /* xt_iclass_xsr.epc4 */,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", 120 /* xt_iclass_rsr.excsave4 */,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", 121 /* xt_iclass_wsr.excsave4 */,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", 122 /* xt_iclass_xsr.excsave4 */,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", 123 /* xt_iclass_rsr.epc5 */,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", 124 /* xt_iclass_wsr.epc5 */,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", 125 /* xt_iclass_xsr.epc5 */,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", 126 /* xt_iclass_rsr.excsave5 */,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", 127 /* xt_iclass_wsr.excsave5 */,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", 128 /* xt_iclass_xsr.excsave5 */,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", 129 /* xt_iclass_rsr.epc6 */,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", 130 /* xt_iclass_wsr.epc6 */,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", 131 /* xt_iclass_xsr.epc6 */,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", 132 /* xt_iclass_rsr.excsave6 */,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", 133 /* xt_iclass_wsr.excsave6 */,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", 134 /* xt_iclass_xsr.excsave6 */,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.epc7", 135 /* xt_iclass_rsr.epc7 */,
+ 0,
+ Opcode_rsr_epc7_encode_fns, 0, 0 },
+ { "wsr.epc7", 136 /* xt_iclass_wsr.epc7 */,
+ 0,
+ Opcode_wsr_epc7_encode_fns, 0, 0 },
+ { "xsr.epc7", 137 /* xt_iclass_xsr.epc7 */,
+ 0,
+ Opcode_xsr_epc7_encode_fns, 0, 0 },
+ { "rsr.excsave7", 138 /* xt_iclass_rsr.excsave7 */,
+ 0,
+ Opcode_rsr_excsave7_encode_fns, 0, 0 },
+ { "wsr.excsave7", 139 /* xt_iclass_wsr.excsave7 */,
+ 0,
+ Opcode_wsr_excsave7_encode_fns, 0, 0 },
+ { "xsr.excsave7", 140 /* xt_iclass_xsr.excsave7 */,
+ 0,
+ Opcode_xsr_excsave7_encode_fns, 0, 0 },
+ { "rsr.eps2", 141 /* xt_iclass_rsr.eps2 */,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", 142 /* xt_iclass_wsr.eps2 */,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", 143 /* xt_iclass_xsr.eps2 */,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", 144 /* xt_iclass_rsr.eps3 */,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", 145 /* xt_iclass_wsr.eps3 */,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", 146 /* xt_iclass_xsr.eps3 */,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", 147 /* xt_iclass_rsr.eps4 */,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", 148 /* xt_iclass_wsr.eps4 */,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", 149 /* xt_iclass_xsr.eps4 */,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", 150 /* xt_iclass_rsr.eps5 */,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", 151 /* xt_iclass_wsr.eps5 */,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", 152 /* xt_iclass_xsr.eps5 */,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", 153 /* xt_iclass_rsr.eps6 */,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", 154 /* xt_iclass_wsr.eps6 */,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", 155 /* xt_iclass_xsr.eps6 */,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.eps7", 156 /* xt_iclass_rsr.eps7 */,
+ 0,
+ Opcode_rsr_eps7_encode_fns, 0, 0 },
+ { "wsr.eps7", 157 /* xt_iclass_wsr.eps7 */,
+ 0,
+ Opcode_wsr_eps7_encode_fns, 0, 0 },
+ { "xsr.eps7", 158 /* xt_iclass_xsr.eps7 */,
+ 0,
+ Opcode_xsr_eps7_encode_fns, 0, 0 },
+ { "rsr.excvaddr", 159 /* xt_iclass_rsr.excvaddr */,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", 160 /* xt_iclass_wsr.excvaddr */,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", 161 /* xt_iclass_xsr.excvaddr */,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", 162 /* xt_iclass_rsr.depc */,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", 163 /* xt_iclass_wsr.depc */,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", 164 /* xt_iclass_xsr.depc */,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", 165 /* xt_iclass_rsr.exccause */,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", 166 /* xt_iclass_wsr.exccause */,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", 167 /* xt_iclass_xsr.exccause */,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", 168 /* xt_iclass_rsr.misc0 */,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", 169 /* xt_iclass_wsr.misc0 */,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", 170 /* xt_iclass_xsr.misc0 */,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", 171 /* xt_iclass_rsr.misc1 */,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", 172 /* xt_iclass_wsr.misc1 */,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", 173 /* xt_iclass_xsr.misc1 */,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", 174 /* xt_iclass_rsr.prid */,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", 175 /* xt_iclass_rsr.vecbase */,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", 176 /* xt_iclass_wsr.vecbase */,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", 177 /* xt_iclass_xsr.vecbase */,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "mul.aa.ll", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_mul_aa_ll_encode_fns, 0, 0 },
+ { "mul.aa.hl", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_mul_aa_hl_encode_fns, 0, 0 },
+ { "mul.aa.lh", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_mul_aa_lh_encode_fns, 0, 0 },
+ { "mul.aa.hh", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_mul_aa_hh_encode_fns, 0, 0 },
+ { "umul.aa.ll", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_umul_aa_ll_encode_fns, 0, 0 },
+ { "umul.aa.hl", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_umul_aa_hl_encode_fns, 0, 0 },
+ { "umul.aa.lh", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_umul_aa_lh_encode_fns, 0, 0 },
+ { "umul.aa.hh", 178 /* xt_iclass_mac16_aa */,
+ 0,
+ Opcode_umul_aa_hh_encode_fns, 0, 0 },
+ { "mul.ad.ll", 179 /* xt_iclass_mac16_ad */,
+ 0,
+ Opcode_mul_ad_ll_encode_fns, 0, 0 },
+ { "mul.ad.hl", 179 /* xt_iclass_mac16_ad */,
+ 0,
+ Opcode_mul_ad_hl_encode_fns, 0, 0 },
+ { "mul.ad.lh", 179 /* xt_iclass_mac16_ad */,
+ 0,
+ Opcode_mul_ad_lh_encode_fns, 0, 0 },
+ { "mul.ad.hh", 179 /* xt_iclass_mac16_ad */,
+ 0,
+ Opcode_mul_ad_hh_encode_fns, 0, 0 },
+ { "mul.da.ll", 180 /* xt_iclass_mac16_da */,
+ 0,
+ Opcode_mul_da_ll_encode_fns, 0, 0 },
+ { "mul.da.hl", 180 /* xt_iclass_mac16_da */,
+ 0,
+ Opcode_mul_da_hl_encode_fns, 0, 0 },
+ { "mul.da.lh", 180 /* xt_iclass_mac16_da */,
+ 0,
+ Opcode_mul_da_lh_encode_fns, 0, 0 },
+ { "mul.da.hh", 180 /* xt_iclass_mac16_da */,
+ 0,
+ Opcode_mul_da_hh_encode_fns, 0, 0 },
+ { "mul.dd.ll", 181 /* xt_iclass_mac16_dd */,
+ 0,
+ Opcode_mul_dd_ll_encode_fns, 0, 0 },
+ { "mul.dd.hl", 181 /* xt_iclass_mac16_dd */,
+ 0,
+ Opcode_mul_dd_hl_encode_fns, 0, 0 },
+ { "mul.dd.lh", 181 /* xt_iclass_mac16_dd */,
+ 0,
+ Opcode_mul_dd_lh_encode_fns, 0, 0 },
+ { "mul.dd.hh", 181 /* xt_iclass_mac16_dd */,
+ 0,
+ Opcode_mul_dd_hh_encode_fns, 0, 0 },
+ { "mula.aa.ll", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_mula_aa_ll_encode_fns, 0, 0 },
+ { "mula.aa.hl", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_mula_aa_hl_encode_fns, 0, 0 },
+ { "mula.aa.lh", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_mula_aa_lh_encode_fns, 0, 0 },
+ { "mula.aa.hh", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_mula_aa_hh_encode_fns, 0, 0 },
+ { "muls.aa.ll", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_muls_aa_ll_encode_fns, 0, 0 },
+ { "muls.aa.hl", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_muls_aa_hl_encode_fns, 0, 0 },
+ { "muls.aa.lh", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_muls_aa_lh_encode_fns, 0, 0 },
+ { "muls.aa.hh", 182 /* xt_iclass_mac16a_aa */,
+ 0,
+ Opcode_muls_aa_hh_encode_fns, 0, 0 },
+ { "mula.ad.ll", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_mula_ad_ll_encode_fns, 0, 0 },
+ { "mula.ad.hl", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_mula_ad_hl_encode_fns, 0, 0 },
+ { "mula.ad.lh", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_mula_ad_lh_encode_fns, 0, 0 },
+ { "mula.ad.hh", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_mula_ad_hh_encode_fns, 0, 0 },
+ { "muls.ad.ll", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_muls_ad_ll_encode_fns, 0, 0 },
+ { "muls.ad.hl", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_muls_ad_hl_encode_fns, 0, 0 },
+ { "muls.ad.lh", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_muls_ad_lh_encode_fns, 0, 0 },
+ { "muls.ad.hh", 183 /* xt_iclass_mac16a_ad */,
+ 0,
+ Opcode_muls_ad_hh_encode_fns, 0, 0 },
+ { "mula.da.ll", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_mula_da_ll_encode_fns, 0, 0 },
+ { "mula.da.hl", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_mula_da_hl_encode_fns, 0, 0 },
+ { "mula.da.lh", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_mula_da_lh_encode_fns, 0, 0 },
+ { "mula.da.hh", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_mula_da_hh_encode_fns, 0, 0 },
+ { "muls.da.ll", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_muls_da_ll_encode_fns, 0, 0 },
+ { "muls.da.hl", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_muls_da_hl_encode_fns, 0, 0 },
+ { "muls.da.lh", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_muls_da_lh_encode_fns, 0, 0 },
+ { "muls.da.hh", 184 /* xt_iclass_mac16a_da */,
+ 0,
+ Opcode_muls_da_hh_encode_fns, 0, 0 },
+ { "mula.dd.ll", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_mula_dd_ll_encode_fns, 0, 0 },
+ { "mula.dd.hl", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_mula_dd_hl_encode_fns, 0, 0 },
+ { "mula.dd.lh", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_mula_dd_lh_encode_fns, 0, 0 },
+ { "mula.dd.hh", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_mula_dd_hh_encode_fns, 0, 0 },
+ { "muls.dd.ll", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_muls_dd_ll_encode_fns, 0, 0 },
+ { "muls.dd.hl", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_muls_dd_hl_encode_fns, 0, 0 },
+ { "muls.dd.lh", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_muls_dd_lh_encode_fns, 0, 0 },
+ { "muls.dd.hh", 185 /* xt_iclass_mac16a_dd */,
+ 0,
+ Opcode_muls_dd_hh_encode_fns, 0, 0 },
+ { "mula.da.ll.lddec", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_ll_lddec_encode_fns, 0, 0 },
+ { "mula.da.ll.ldinc", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hl.lddec", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_hl_lddec_encode_fns, 0, 0 },
+ { "mula.da.hl.ldinc", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.da.lh.lddec", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_lh_lddec_encode_fns, 0, 0 },
+ { "mula.da.lh.ldinc", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hh.lddec", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_hh_lddec_encode_fns, 0, 0 },
+ { "mula.da.hh.ldinc", 186 /* xt_iclass_mac16al_da */,
+ 0,
+ Opcode_mula_da_hh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.ll.lddec", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_ll_lddec_encode_fns, 0, 0 },
+ { "mula.dd.ll.ldinc", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hl.lddec", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_hl_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hl.ldinc", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.lh.lddec", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_lh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.lh.ldinc", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hh.lddec", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_hh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hh.ldinc", 187 /* xt_iclass_mac16al_dd */,
+ 0,
+ Opcode_mula_dd_hh_ldinc_encode_fns, 0, 0 },
+ { "lddec", 188 /* xt_iclass_mac16_l */,
+ 0,
+ Opcode_lddec_encode_fns, 0, 0 },
+ { "ldinc", 188 /* xt_iclass_mac16_l */,
+ 0,
+ Opcode_ldinc_encode_fns, 0, 0 },
+ { "mul16u", 189 /* xt_iclass_mul16 */,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", 189 /* xt_iclass_mul16 */,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "rsr.m0", 190 /* xt_iclass_rsr.m0 */,
+ 0,
+ Opcode_rsr_m0_encode_fns, 0, 0 },
+ { "wsr.m0", 191 /* xt_iclass_wsr.m0 */,
+ 0,
+ Opcode_wsr_m0_encode_fns, 0, 0 },
+ { "xsr.m0", 192 /* xt_iclass_xsr.m0 */,
+ 0,
+ Opcode_xsr_m0_encode_fns, 0, 0 },
+ { "rsr.m1", 193 /* xt_iclass_rsr.m1 */,
+ 0,
+ Opcode_rsr_m1_encode_fns, 0, 0 },
+ { "wsr.m1", 194 /* xt_iclass_wsr.m1 */,
+ 0,
+ Opcode_wsr_m1_encode_fns, 0, 0 },
+ { "xsr.m1", 195 /* xt_iclass_xsr.m1 */,
+ 0,
+ Opcode_xsr_m1_encode_fns, 0, 0 },
+ { "rsr.m2", 196 /* xt_iclass_rsr.m2 */,
+ 0,
+ Opcode_rsr_m2_encode_fns, 0, 0 },
+ { "wsr.m2", 197 /* xt_iclass_wsr.m2 */,
+ 0,
+ Opcode_wsr_m2_encode_fns, 0, 0 },
+ { "xsr.m2", 198 /* xt_iclass_xsr.m2 */,
+ 0,
+ Opcode_xsr_m2_encode_fns, 0, 0 },
+ { "rsr.m3", 199 /* xt_iclass_rsr.m3 */,
+ 0,
+ Opcode_rsr_m3_encode_fns, 0, 0 },
+ { "wsr.m3", 200 /* xt_iclass_wsr.m3 */,
+ 0,
+ Opcode_wsr_m3_encode_fns, 0, 0 },
+ { "xsr.m3", 201 /* xt_iclass_xsr.m3 */,
+ 0,
+ Opcode_xsr_m3_encode_fns, 0, 0 },
+ { "rsr.acclo", 202 /* xt_iclass_rsr.acclo */,
+ 0,
+ Opcode_rsr_acclo_encode_fns, 0, 0 },
+ { "wsr.acclo", 203 /* xt_iclass_wsr.acclo */,
+ 0,
+ Opcode_wsr_acclo_encode_fns, 0, 0 },
+ { "xsr.acclo", 204 /* xt_iclass_xsr.acclo */,
+ 0,
+ Opcode_xsr_acclo_encode_fns, 0, 0 },
+ { "rsr.acchi", 205 /* xt_iclass_rsr.acchi */,
+ 0,
+ Opcode_rsr_acchi_encode_fns, 0, 0 },
+ { "wsr.acchi", 206 /* xt_iclass_wsr.acchi */,
+ 0,
+ Opcode_wsr_acchi_encode_fns, 0, 0 },
+ { "xsr.acchi", 207 /* xt_iclass_xsr.acchi */,
+ 0,
+ Opcode_xsr_acchi_encode_fns, 0, 0 },
+ { "rfi", 208 /* xt_iclass_rfi */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", 209 /* xt_iclass_wait */,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", 210 /* xt_iclass_rsr.interrupt */,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", 211 /* xt_iclass_wsr.intset */,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", 212 /* xt_iclass_wsr.intclear */,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", 213 /* xt_iclass_rsr.intenable */,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", 214 /* xt_iclass_wsr.intenable */,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", 215 /* xt_iclass_xsr.intenable */,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", 216 /* xt_iclass_break */,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", 217 /* xt_iclass_break.n */,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", 218 /* xt_iclass_rsr.dbreaka0 */,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", 219 /* xt_iclass_wsr.dbreaka0 */,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", 220 /* xt_iclass_xsr.dbreaka0 */,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", 221 /* xt_iclass_rsr.dbreakc0 */,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", 222 /* xt_iclass_wsr.dbreakc0 */,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", 223 /* xt_iclass_xsr.dbreakc0 */,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", 224 /* xt_iclass_rsr.dbreaka1 */,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", 225 /* xt_iclass_wsr.dbreaka1 */,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", 226 /* xt_iclass_xsr.dbreaka1 */,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", 227 /* xt_iclass_rsr.dbreakc1 */,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", 228 /* xt_iclass_wsr.dbreakc1 */,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", 229 /* xt_iclass_xsr.dbreakc1 */,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", 230 /* xt_iclass_rsr.ibreaka0 */,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", 231 /* xt_iclass_wsr.ibreaka0 */,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", 232 /* xt_iclass_xsr.ibreaka0 */,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", 233 /* xt_iclass_rsr.ibreaka1 */,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", 234 /* xt_iclass_wsr.ibreaka1 */,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", 235 /* xt_iclass_xsr.ibreaka1 */,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", 236 /* xt_iclass_rsr.ibreakenable */,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", 237 /* xt_iclass_wsr.ibreakenable */,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", 238 /* xt_iclass_xsr.ibreakenable */,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", 239 /* xt_iclass_rsr.debugcause */,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", 240 /* xt_iclass_wsr.debugcause */,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", 241 /* xt_iclass_xsr.debugcause */,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", 242 /* xt_iclass_rsr.icount */,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", 243 /* xt_iclass_wsr.icount */,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", 244 /* xt_iclass_xsr.icount */,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", 245 /* xt_iclass_rsr.icountlevel */,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", 246 /* xt_iclass_wsr.icountlevel */,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", 247 /* xt_iclass_xsr.icountlevel */,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", 248 /* xt_iclass_rsr.ddr */,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", 249 /* xt_iclass_wsr.ddr */,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", 250 /* xt_iclass_xsr.ddr */,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "rfdo", 251 /* xt_iclass_rfdo */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", 252 /* xt_iclass_rfdd */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", 253 /* xt_iclass_wsr.mmid */,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "rsr.ccount", 254 /* xt_iclass_rsr.ccount */,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", 255 /* xt_iclass_wsr.ccount */,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", 256 /* xt_iclass_xsr.ccount */,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", 257 /* xt_iclass_rsr.ccompare0 */,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", 258 /* xt_iclass_wsr.ccompare0 */,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", 259 /* xt_iclass_xsr.ccompare0 */,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", 260 /* xt_iclass_rsr.ccompare1 */,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", 261 /* xt_iclass_wsr.ccompare1 */,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", 262 /* xt_iclass_xsr.ccompare1 */,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", 263 /* xt_iclass_rsr.ccompare2 */,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", 264 /* xt_iclass_wsr.ccompare2 */,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", 265 /* xt_iclass_xsr.ccompare2 */,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "ipf", 266 /* xt_iclass_icache */,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", 266 /* xt_iclass_icache */,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "ipfl", 267 /* xt_iclass_icache_lock */,
+ 0,
+ Opcode_ipfl_encode_fns, 0, 0 },
+ { "ihu", 267 /* xt_iclass_icache_lock */,
+ 0,
+ Opcode_ihu_encode_fns, 0, 0 },
+ { "iiu", 267 /* xt_iclass_icache_lock */,
+ 0,
+ Opcode_iiu_encode_fns, 0, 0 },
+ { "iii", 268 /* xt_iclass_icache_inv */,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", 269 /* xt_iclass_licx */,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", 269 /* xt_iclass_licx */,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", 270 /* xt_iclass_sicx */,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", 270 /* xt_iclass_sicx */,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", 271 /* xt_iclass_dcache */,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", 271 /* xt_iclass_dcache */,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwb", 272 /* xt_iclass_dcache_ind */,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", 272 /* xt_iclass_dcache_ind */,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", 273 /* xt_iclass_dcache_inv */,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", 273 /* xt_iclass_dcache_inv */,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", 274 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", 274 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", 274 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", 274 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "dpfl", 275 /* xt_iclass_dcache_lock */,
+ 0,
+ Opcode_dpfl_encode_fns, 0, 0 },
+ { "dhu", 275 /* xt_iclass_dcache_lock */,
+ 0,
+ Opcode_dhu_encode_fns, 0, 0 },
+ { "diu", 275 /* xt_iclass_dcache_lock */,
+ 0,
+ Opcode_diu_encode_fns, 0, 0 },
+ { "sdct", 276 /* xt_iclass_sdct */,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", 277 /* xt_iclass_ldct */,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "wsr.ptevaddr", 278 /* xt_iclass_wsr.ptevaddr */,
+ 0,
+ Opcode_wsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.ptevaddr", 279 /* xt_iclass_rsr.ptevaddr */,
+ 0,
+ Opcode_rsr_ptevaddr_encode_fns, 0, 0 },
+ { "xsr.ptevaddr", 280 /* xt_iclass_xsr.ptevaddr */,
+ 0,
+ Opcode_xsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.rasid", 281 /* xt_iclass_rsr.rasid */,
+ 0,
+ Opcode_rsr_rasid_encode_fns, 0, 0 },
+ { "wsr.rasid", 282 /* xt_iclass_wsr.rasid */,
+ 0,
+ Opcode_wsr_rasid_encode_fns, 0, 0 },
+ { "xsr.rasid", 283 /* xt_iclass_xsr.rasid */,
+ 0,
+ Opcode_xsr_rasid_encode_fns, 0, 0 },
+ { "rsr.itlbcfg", 284 /* xt_iclass_rsr.itlbcfg */,
+ 0,
+ Opcode_rsr_itlbcfg_encode_fns, 0, 0 },
+ { "wsr.itlbcfg", 285 /* xt_iclass_wsr.itlbcfg */,
+ 0,
+ Opcode_wsr_itlbcfg_encode_fns, 0, 0 },
+ { "xsr.itlbcfg", 286 /* xt_iclass_xsr.itlbcfg */,
+ 0,
+ Opcode_xsr_itlbcfg_encode_fns, 0, 0 },
+ { "rsr.dtlbcfg", 287 /* xt_iclass_rsr.dtlbcfg */,
+ 0,
+ Opcode_rsr_dtlbcfg_encode_fns, 0, 0 },
+ { "wsr.dtlbcfg", 288 /* xt_iclass_wsr.dtlbcfg */,
+ 0,
+ Opcode_wsr_dtlbcfg_encode_fns, 0, 0 },
+ { "xsr.dtlbcfg", 289 /* xt_iclass_xsr.dtlbcfg */,
+ 0,
+ Opcode_xsr_dtlbcfg_encode_fns, 0, 0 },
+ { "idtlb", 290 /* xt_iclass_idtlb */,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", 291 /* xt_iclass_rdtlb */,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", 291 /* xt_iclass_rdtlb */,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", 291 /* xt_iclass_rdtlb */,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", 292 /* xt_iclass_wdtlb */,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", 293 /* xt_iclass_iitlb */,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", 294 /* xt_iclass_ritlb */,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", 294 /* xt_iclass_ritlb */,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", 294 /* xt_iclass_ritlb */,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", 295 /* xt_iclass_witlb */,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "ldpte", 296 /* xt_iclass_ldpte */,
+ 0,
+ Opcode_ldpte_encode_fns, 0, 0 },
+ { "hwwitlba", 297 /* xt_iclass_hwwitlba */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_hwwitlba_encode_fns, 0, 0 },
+ { "hwwdtlba", 298 /* xt_iclass_hwwdtlba */,
+ 0,
+ Opcode_hwwdtlba_encode_fns, 0, 0 },
+ { "rsr.cpenable", 299 /* xt_iclass_rsr.cpenable */,
+ 0,
+ Opcode_rsr_cpenable_encode_fns, 0, 0 },
+ { "wsr.cpenable", 300 /* xt_iclass_wsr.cpenable */,
+ 0,
+ Opcode_wsr_cpenable_encode_fns, 0, 0 },
+ { "xsr.cpenable", 301 /* xt_iclass_xsr.cpenable */,
+ 0,
+ Opcode_xsr_cpenable_encode_fns, 0, 0 },
+ { "clamps", 302 /* xt_iclass_clamp */,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "min", 303 /* xt_iclass_minmax */,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", 303 /* xt_iclass_minmax */,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", 303 /* xt_iclass_minmax */,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", 303 /* xt_iclass_minmax */,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", 304 /* xt_iclass_nsa */,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", 304 /* xt_iclass_nsa */,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", 305 /* xt_iclass_sx */,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", 306 /* xt_iclass_l32ai */,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", 307 /* xt_iclass_s32ri */,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", 308 /* xt_iclass_s32c1i */,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", 309 /* xt_iclass_rsr.scompare1 */,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", 310 /* xt_iclass_wsr.scompare1 */,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", 311 /* xt_iclass_xsr.scompare1 */,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "quou", 312 /* xt_iclass_div */,
+ 0,
+ Opcode_quou_encode_fns, 0, 0 },
+ { "quos", 312 /* xt_iclass_div */,
+ 0,
+ Opcode_quos_encode_fns, 0, 0 },
+ { "remu", 312 /* xt_iclass_div */,
+ 0,
+ Opcode_remu_encode_fns, 0, 0 },
+ { "rems", 312 /* xt_iclass_div */,
+ 0,
+ Opcode_rems_encode_fns, 0, 0 },
+ { "mull", 313 /* xt_mul32 */,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "rur.expstate", 314 /* rur_expstate */,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", 315 /* wur_expstate */,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "read_impwire", 316 /* iclass_READ_IMPWIRE */,
+ 0,
+ Opcode_read_impwire_encode_fns, 0, 0 },
+ { "setb_expstate", 317 /* iclass_SETB_EXPSTATE */,
+ 0,
+ Opcode_setb_expstate_encode_fns, 0, 0 },
+ { "clrb_expstate", 318 /* iclass_CLRB_EXPSTATE */,
+ 0,
+ Opcode_clrb_expstate_encode_fns, 0, 0 },
+ { "wrmsk_expstate", 319 /* iclass_WRMSK_EXPSTATE */,
+ 0,
+ Opcode_wrmsk_expstate_encode_fns, 0, 0 }
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return 79; /* ill */
+ break;
+ case 2:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 98; /* ret */
+ case 1:
+ return 14; /* retw */
+ case 2:
+ return 81; /* jx */
+ }
+ break;
+ case 3:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 77; /* callx0 */
+ case 1:
+ return 10; /* callx4 */
+ case 2:
+ return 9; /* callx8 */
+ case 3:
+ return 8; /* callx12 */
+ }
+ break;
+ }
+ break;
+ case 1:
+ return 12; /* movsp */
+ case 2:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return 116; /* isync */
+ case 1:
+ return 117; /* rsync */
+ case 2:
+ return 118; /* esync */
+ case 3:
+ return 119; /* dsync */
+ case 8:
+ return 0; /* excw */
+ case 12:
+ return 114; /* memw */
+ case 13:
+ return 115; /* extw */
+ case 15:
+ return 97; /* nop */
+ }
+ }
+ break;
+ case 3:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return 1; /* rfe */
+ case 2:
+ return 2; /* rfde */
+ case 4:
+ return 16; /* rfwo */
+ case 5:
+ return 17; /* rfwu */
+ }
+ break;
+ case 1:
+ return 310; /* rfi */
+ }
+ break;
+ case 4:
+ return 318; /* break */
+ case 5:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 3; /* syscall */
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 4; /* simcall */
+ break;
+ }
+ break;
+ case 6:
+ return 120; /* rsil */
+ case 7:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 311; /* waiti */
+ break;
+ }
+ break;
+ case 1:
+ return 49; /* and */
+ case 2:
+ return 50; /* or */
+ case 3:
+ return 51; /* xor */
+ case 4:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 102; /* ssr */
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 103; /* ssl */
+ break;
+ case 2:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 104; /* ssa8l */
+ break;
+ case 3:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 105; /* ssa8b */
+ break;
+ case 4:
+ if (Field_thi3_Slot_inst_get (insn) == 0)
+ return 106; /* ssai */
+ break;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return 13; /* rotw */
+ break;
+ case 14:
+ return 426; /* nsa */
+ case 15:
+ return 427; /* nsau */
+ }
+ break;
+ case 5:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 1:
+ return 416; /* hwwitlba */
+ case 3:
+ return 412; /* ritlb0 */
+ case 4:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 410; /* iitlb */
+ break;
+ case 5:
+ return 411; /* pitlb */
+ case 6:
+ return 414; /* witlb */
+ case 7:
+ return 413; /* ritlb1 */
+ case 9:
+ return 417; /* hwwdtlba */
+ case 11:
+ return 407; /* rdtlb0 */
+ case 12:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 405; /* idtlb */
+ break;
+ case 13:
+ return 406; /* pdtlb */
+ case 14:
+ return 409; /* wdtlb */
+ case 15:
+ return 408; /* rdtlb1 */
+ }
+ break;
+ case 6:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return 95; /* neg */
+ case 1:
+ return 96; /* abs */
+ }
+ break;
+ case 8:
+ return 41; /* add */
+ case 9:
+ return 43; /* addx2 */
+ case 10:
+ return 44; /* addx4 */
+ case 11:
+ return 45; /* addx8 */
+ case 12:
+ return 42; /* sub */
+ case 13:
+ return 46; /* subx2 */
+ case 14:
+ return 47; /* subx4 */
+ case 15:
+ return 48; /* subx8 */
+ }
+ break;
+ case 1:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ case 1:
+ return 111; /* slli */
+ case 2:
+ case 3:
+ return 112; /* srai */
+ case 4:
+ return 113; /* srli */
+ case 6:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return 129; /* xsr.lbeg */
+ case 1:
+ return 123; /* xsr.lend */
+ case 2:
+ return 126; /* xsr.lcount */
+ case 3:
+ return 132; /* xsr.sar */
+ case 5:
+ return 135; /* xsr.litbase */
+ case 12:
+ return 434; /* xsr.scompare1 */
+ case 16:
+ return 306; /* xsr.acclo */
+ case 17:
+ return 309; /* xsr.acchi */
+ case 32:
+ return 294; /* xsr.m0 */
+ case 33:
+ return 297; /* xsr.m1 */
+ case 34:
+ return 300; /* xsr.m2 */
+ case 35:
+ return 303; /* xsr.m3 */
+ case 72:
+ return 22; /* xsr.windowbase */
+ case 73:
+ return 25; /* xsr.windowstart */
+ case 83:
+ return 395; /* xsr.ptevaddr */
+ case 90:
+ return 398; /* xsr.rasid */
+ case 91:
+ return 401; /* xsr.itlbcfg */
+ case 92:
+ return 404; /* xsr.dtlbcfg */
+ case 96:
+ return 340; /* xsr.ibreakenable */
+ case 104:
+ return 352; /* xsr.ddr */
+ case 128:
+ return 334; /* xsr.ibreaka0 */
+ case 129:
+ return 337; /* xsr.ibreaka1 */
+ case 144:
+ return 322; /* xsr.dbreaka0 */
+ case 145:
+ return 328; /* xsr.dbreaka1 */
+ case 160:
+ return 325; /* xsr.dbreakc0 */
+ case 161:
+ return 331; /* xsr.dbreakc1 */
+ case 177:
+ return 143; /* xsr.epc1 */
+ case 178:
+ return 149; /* xsr.epc2 */
+ case 179:
+ return 155; /* xsr.epc3 */
+ case 180:
+ return 161; /* xsr.epc4 */
+ case 181:
+ return 167; /* xsr.epc5 */
+ case 182:
+ return 173; /* xsr.epc6 */
+ case 183:
+ return 179; /* xsr.epc7 */
+ case 192:
+ return 206; /* xsr.depc */
+ case 194:
+ return 185; /* xsr.eps2 */
+ case 195:
+ return 188; /* xsr.eps3 */
+ case 196:
+ return 191; /* xsr.eps4 */
+ case 197:
+ return 194; /* xsr.eps5 */
+ case 198:
+ return 197; /* xsr.eps6 */
+ case 199:
+ return 200; /* xsr.eps7 */
+ case 209:
+ return 146; /* xsr.excsave1 */
+ case 210:
+ return 152; /* xsr.excsave2 */
+ case 211:
+ return 158; /* xsr.excsave3 */
+ case 212:
+ return 164; /* xsr.excsave4 */
+ case 213:
+ return 170; /* xsr.excsave5 */
+ case 214:
+ return 176; /* xsr.excsave6 */
+ case 215:
+ return 182; /* xsr.excsave7 */
+ case 224:
+ return 420; /* xsr.cpenable */
+ case 228:
+ return 317; /* xsr.intenable */
+ case 230:
+ return 140; /* xsr.ps */
+ case 231:
+ return 219; /* xsr.vecbase */
+ case 232:
+ return 209; /* xsr.exccause */
+ case 233:
+ return 343; /* xsr.debugcause */
+ case 234:
+ return 358; /* xsr.ccount */
+ case 236:
+ return 346; /* xsr.icount */
+ case 237:
+ return 349; /* xsr.icountlevel */
+ case 238:
+ return 203; /* xsr.excvaddr */
+ case 240:
+ return 361; /* xsr.ccompare0 */
+ case 241:
+ return 364; /* xsr.ccompare1 */
+ case 242:
+ return 367; /* xsr.ccompare2 */
+ case 244:
+ return 212; /* xsr.misc0 */
+ case 245:
+ return 215; /* xsr.misc1 */
+ }
+ break;
+ case 8:
+ return 108; /* src */
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return 109; /* srl */
+ break;
+ case 10:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 107; /* sll */
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return 110; /* sra */
+ break;
+ case 12:
+ return 290; /* mul16u */
+ case 13:
+ return 291; /* mul16s */
+ case 15:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return 374; /* lict */
+ case 1:
+ return 376; /* sict */
+ case 2:
+ return 375; /* licw */
+ case 3:
+ return 377; /* sicw */
+ case 8:
+ return 392; /* ldct */
+ case 9:
+ return 391; /* sdct */
+ case 14:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 353; /* rfdo */
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return 354; /* rfdd */
+ break;
+ case 15:
+ return 415; /* ldpte */
+ }
+ break;
+ }
+ break;
+ case 2:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 8:
+ return 439; /* mull */
+ case 12:
+ return 435; /* quou */
+ case 13:
+ return 436; /* quos */
+ case 14:
+ return 437; /* remu */
+ case 15:
+ return 438; /* rems */
+ }
+ break;
+ case 3:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return 127; /* rsr.lbeg */
+ case 1:
+ return 121; /* rsr.lend */
+ case 2:
+ return 124; /* rsr.lcount */
+ case 3:
+ return 130; /* rsr.sar */
+ case 5:
+ return 133; /* rsr.litbase */
+ case 12:
+ return 432; /* rsr.scompare1 */
+ case 16:
+ return 304; /* rsr.acclo */
+ case 17:
+ return 307; /* rsr.acchi */
+ case 32:
+ return 292; /* rsr.m0 */
+ case 33:
+ return 295; /* rsr.m1 */
+ case 34:
+ return 298; /* rsr.m2 */
+ case 35:
+ return 301; /* rsr.m3 */
+ case 72:
+ return 20; /* rsr.windowbase */
+ case 73:
+ return 23; /* rsr.windowstart */
+ case 83:
+ return 394; /* rsr.ptevaddr */
+ case 90:
+ return 396; /* rsr.rasid */
+ case 91:
+ return 399; /* rsr.itlbcfg */
+ case 92:
+ return 402; /* rsr.dtlbcfg */
+ case 96:
+ return 338; /* rsr.ibreakenable */
+ case 104:
+ return 350; /* rsr.ddr */
+ case 128:
+ return 332; /* rsr.ibreaka0 */
+ case 129:
+ return 335; /* rsr.ibreaka1 */
+ case 144:
+ return 320; /* rsr.dbreaka0 */
+ case 145:
+ return 326; /* rsr.dbreaka1 */
+ case 160:
+ return 323; /* rsr.dbreakc0 */
+ case 161:
+ return 329; /* rsr.dbreakc1 */
+ case 176:
+ return 136; /* rsr.176 */
+ case 177:
+ return 141; /* rsr.epc1 */
+ case 178:
+ return 147; /* rsr.epc2 */
+ case 179:
+ return 153; /* rsr.epc3 */
+ case 180:
+ return 159; /* rsr.epc4 */
+ case 181:
+ return 165; /* rsr.epc5 */
+ case 182:
+ return 171; /* rsr.epc6 */
+ case 183:
+ return 177; /* rsr.epc7 */
+ case 192:
+ return 204; /* rsr.depc */
+ case 194:
+ return 183; /* rsr.eps2 */
+ case 195:
+ return 186; /* rsr.eps3 */
+ case 196:
+ return 189; /* rsr.eps4 */
+ case 197:
+ return 192; /* rsr.eps5 */
+ case 198:
+ return 195; /* rsr.eps6 */
+ case 199:
+ return 198; /* rsr.eps7 */
+ case 208:
+ return 137; /* rsr.208 */
+ case 209:
+ return 144; /* rsr.excsave1 */
+ case 210:
+ return 150; /* rsr.excsave2 */
+ case 211:
+ return 156; /* rsr.excsave3 */
+ case 212:
+ return 162; /* rsr.excsave4 */
+ case 213:
+ return 168; /* rsr.excsave5 */
+ case 214:
+ return 174; /* rsr.excsave6 */
+ case 215:
+ return 180; /* rsr.excsave7 */
+ case 224:
+ return 418; /* rsr.cpenable */
+ case 226:
+ return 312; /* rsr.interrupt */
+ case 228:
+ return 315; /* rsr.intenable */
+ case 230:
+ return 138; /* rsr.ps */
+ case 231:
+ return 217; /* rsr.vecbase */
+ case 232:
+ return 207; /* rsr.exccause */
+ case 233:
+ return 341; /* rsr.debugcause */
+ case 234:
+ return 356; /* rsr.ccount */
+ case 235:
+ return 216; /* rsr.prid */
+ case 236:
+ return 344; /* rsr.icount */
+ case 237:
+ return 347; /* rsr.icountlevel */
+ case 238:
+ return 201; /* rsr.excvaddr */
+ case 240:
+ return 359; /* rsr.ccompare0 */
+ case 241:
+ return 362; /* rsr.ccompare1 */
+ case 242:
+ return 365; /* rsr.ccompare2 */
+ case 244:
+ return 210; /* rsr.misc0 */
+ case 245:
+ return 213; /* rsr.misc1 */
+ }
+ break;
+ case 1:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return 128; /* wsr.lbeg */
+ case 1:
+ return 122; /* wsr.lend */
+ case 2:
+ return 125; /* wsr.lcount */
+ case 3:
+ return 131; /* wsr.sar */
+ case 5:
+ return 134; /* wsr.litbase */
+ case 12:
+ return 433; /* wsr.scompare1 */
+ case 16:
+ return 305; /* wsr.acclo */
+ case 17:
+ return 308; /* wsr.acchi */
+ case 32:
+ return 293; /* wsr.m0 */
+ case 33:
+ return 296; /* wsr.m1 */
+ case 34:
+ return 299; /* wsr.m2 */
+ case 35:
+ return 302; /* wsr.m3 */
+ case 72:
+ return 21; /* wsr.windowbase */
+ case 73:
+ return 24; /* wsr.windowstart */
+ case 83:
+ return 393; /* wsr.ptevaddr */
+ case 89:
+ return 355; /* wsr.mmid */
+ case 90:
+ return 397; /* wsr.rasid */
+ case 91:
+ return 400; /* wsr.itlbcfg */
+ case 92:
+ return 403; /* wsr.dtlbcfg */
+ case 96:
+ return 339; /* wsr.ibreakenable */
+ case 104:
+ return 351; /* wsr.ddr */
+ case 128:
+ return 333; /* wsr.ibreaka0 */
+ case 129:
+ return 336; /* wsr.ibreaka1 */
+ case 144:
+ return 321; /* wsr.dbreaka0 */
+ case 145:
+ return 327; /* wsr.dbreaka1 */
+ case 160:
+ return 324; /* wsr.dbreakc0 */
+ case 161:
+ return 330; /* wsr.dbreakc1 */
+ case 177:
+ return 142; /* wsr.epc1 */
+ case 178:
+ return 148; /* wsr.epc2 */
+ case 179:
+ return 154; /* wsr.epc3 */
+ case 180:
+ return 160; /* wsr.epc4 */
+ case 181:
+ return 166; /* wsr.epc5 */
+ case 182:
+ return 172; /* wsr.epc6 */
+ case 183:
+ return 178; /* wsr.epc7 */
+ case 192:
+ return 205; /* wsr.depc */
+ case 194:
+ return 184; /* wsr.eps2 */
+ case 195:
+ return 187; /* wsr.eps3 */
+ case 196:
+ return 190; /* wsr.eps4 */
+ case 197:
+ return 193; /* wsr.eps5 */
+ case 198:
+ return 196; /* wsr.eps6 */
+ case 199:
+ return 199; /* wsr.eps7 */
+ case 209:
+ return 145; /* wsr.excsave1 */
+ case 210:
+ return 151; /* wsr.excsave2 */
+ case 211:
+ return 157; /* wsr.excsave3 */
+ case 212:
+ return 163; /* wsr.excsave4 */
+ case 213:
+ return 169; /* wsr.excsave5 */
+ case 214:
+ return 175; /* wsr.excsave6 */
+ case 215:
+ return 181; /* wsr.excsave7 */
+ case 224:
+ return 419; /* wsr.cpenable */
+ case 226:
+ return 313; /* wsr.intset */
+ case 227:
+ return 314; /* wsr.intclear */
+ case 228:
+ return 316; /* wsr.intenable */
+ case 230:
+ return 139; /* wsr.ps */
+ case 231:
+ return 218; /* wsr.vecbase */
+ case 232:
+ return 208; /* wsr.exccause */
+ case 233:
+ return 342; /* wsr.debugcause */
+ case 234:
+ return 357; /* wsr.ccount */
+ case 236:
+ return 345; /* wsr.icount */
+ case 237:
+ return 348; /* wsr.icountlevel */
+ case 238:
+ return 202; /* wsr.excvaddr */
+ case 240:
+ return 360; /* wsr.ccompare0 */
+ case 241:
+ return 363; /* wsr.ccompare1 */
+ case 242:
+ return 366; /* wsr.ccompare2 */
+ case 244:
+ return 211; /* wsr.misc0 */
+ case 245:
+ return 214; /* wsr.misc1 */
+ }
+ break;
+ case 2:
+ return 428; /* sext */
+ case 3:
+ return 421; /* clamps */
+ case 4:
+ return 422; /* min */
+ case 5:
+ return 423; /* max */
+ case 6:
+ return 424; /* minu */
+ case 7:
+ return 425; /* maxu */
+ case 8:
+ return 91; /* moveqz */
+ case 9:
+ return 92; /* movnez */
+ case 10:
+ return 93; /* movltz */
+ case 11:
+ return 94; /* movgez */
+ case 14:
+ switch (Field_st_Slot_inst_get (insn))
+ {
+ case 230:
+ return 440; /* rur.expstate */
+ case 231:
+ return 37; /* rur.threadptr */
+ }
+ break;
+ case 15:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 230:
+ return 441; /* wur.expstate */
+ case 231:
+ return 38; /* wur.threadptr */
+ }
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ return 78; /* extui */
+ case 9:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return 18; /* l32e */
+ case 4:
+ return 19; /* s32e */
+ }
+ break;
+ }
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return 442; /* read_impwire */
+ break;
+ case 1:
+ if (Field_s3to1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return 443; /* setb_expstate */
+ if (Field_s3to1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return 444; /* clrb_expstate */
+ break;
+ case 2:
+ if (Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return 445; /* wrmsk_expstate */
+ break;
+ }
+ break;
+ case 1:
+ return 85; /* l32r */
+ case 2:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return 86; /* l8ui */
+ case 1:
+ return 82; /* l16ui */
+ case 2:
+ return 84; /* l32i */
+ case 4:
+ return 101; /* s8i */
+ case 5:
+ return 99; /* s16i */
+ case 6:
+ return 100; /* s32i */
+ case 7:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return 384; /* dpfr */
+ case 1:
+ return 385; /* dpfw */
+ case 2:
+ return 386; /* dpfro */
+ case 3:
+ return 387; /* dpfwo */
+ case 4:
+ return 378; /* dhwb */
+ case 5:
+ return 379; /* dhwbi */
+ case 6:
+ return 382; /* dhi */
+ case 7:
+ return 383; /* dii */
+ case 8:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ return 388; /* dpfl */
+ case 2:
+ return 389; /* dhu */
+ case 3:
+ return 390; /* diu */
+ case 4:
+ return 380; /* diwb */
+ case 5:
+ return 381; /* diwbi */
+ }
+ break;
+ case 12:
+ return 368; /* ipf */
+ case 13:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ return 370; /* ipfl */
+ case 2:
+ return 371; /* ihu */
+ case 3:
+ return 372; /* iiu */
+ }
+ break;
+ case 14:
+ return 369; /* ihi */
+ case 15:
+ return 373; /* iii */
+ }
+ break;
+ case 9:
+ return 83; /* l16si */
+ case 10:
+ return 90; /* movi */
+ case 11:
+ return 429; /* l32ai */
+ case 12:
+ return 39; /* addi */
+ case 13:
+ return 40; /* addmi */
+ case 14:
+ return 431; /* s32c1i */
+ case 15:
+ return 430; /* s32ri */
+ }
+ break;
+ case 4:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 281; /* mula.dd.ll.ldinc */
+ break;
+ case 9:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 283; /* mula.dd.hl.ldinc */
+ break;
+ case 10:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 285; /* mula.dd.lh.ldinc */
+ break;
+ case 11:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 287; /* mula.dd.hh.ldinc */
+ break;
+ }
+ break;
+ case 1:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 280; /* mula.dd.ll.lddec */
+ break;
+ case 9:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 282; /* mula.dd.hl.lddec */
+ break;
+ case 10:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 284; /* mula.dd.lh.lddec */
+ break;
+ case 11:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 286; /* mula.dd.hh.lddec */
+ break;
+ }
+ break;
+ case 2:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 236; /* mul.dd.ll */
+ break;
+ case 5:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 237; /* mul.dd.hl */
+ break;
+ case 6:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 238; /* mul.dd.lh */
+ break;
+ case 7:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 239; /* mul.dd.hh */
+ break;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 264; /* mula.dd.ll */
+ break;
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 265; /* mula.dd.hl */
+ break;
+ case 10:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 266; /* mula.dd.lh */
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 267; /* mula.dd.hh */
+ break;
+ case 12:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 268; /* muls.dd.ll */
+ break;
+ case 13:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 269; /* muls.dd.hl */
+ break;
+ case 14:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 270; /* muls.dd.lh */
+ break;
+ case 15:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 271; /* muls.dd.hh */
+ break;
+ }
+ break;
+ case 3:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 228; /* mul.ad.ll */
+ break;
+ case 5:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 229; /* mul.ad.hl */
+ break;
+ case 6:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 230; /* mul.ad.lh */
+ break;
+ case 7:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 231; /* mul.ad.hh */
+ break;
+ case 8:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 248; /* mula.ad.ll */
+ break;
+ case 9:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 249; /* mula.ad.hl */
+ break;
+ case 10:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 250; /* mula.ad.lh */
+ break;
+ case 11:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 251; /* mula.ad.hh */
+ break;
+ case 12:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 252; /* muls.ad.ll */
+ break;
+ case 13:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 253; /* muls.ad.hl */
+ break;
+ case 14:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 254; /* muls.ad.lh */
+ break;
+ case 15:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return 255; /* muls.ad.hh */
+ break;
+ }
+ break;
+ case 4:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 273; /* mula.da.ll.ldinc */
+ break;
+ case 9:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 275; /* mula.da.hl.ldinc */
+ break;
+ case 10:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 277; /* mula.da.lh.ldinc */
+ break;
+ case 11:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 279; /* mula.da.hh.ldinc */
+ break;
+ }
+ break;
+ case 5:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 272; /* mula.da.ll.lddec */
+ break;
+ case 9:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 274; /* mula.da.hl.lddec */
+ break;
+ case 10:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 276; /* mula.da.lh.lddec */
+ break;
+ case 11:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return 278; /* mula.da.hh.lddec */
+ break;
+ }
+ break;
+ case 6:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 232; /* mul.da.ll */
+ break;
+ case 5:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 233; /* mul.da.hl */
+ break;
+ case 6:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 234; /* mul.da.lh */
+ break;
+ case 7:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 235; /* mul.da.hh */
+ break;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 256; /* mula.da.ll */
+ break;
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 257; /* mula.da.hl */
+ break;
+ case 10:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 258; /* mula.da.lh */
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 259; /* mula.da.hh */
+ break;
+ case 12:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 260; /* muls.da.ll */
+ break;
+ case 13:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 261; /* muls.da.hl */
+ break;
+ case 14:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 262; /* muls.da.lh */
+ break;
+ case 15:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return 263; /* muls.da.hh */
+ break;
+ }
+ break;
+ case 7:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 224; /* umul.aa.ll */
+ break;
+ case 1:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 225; /* umul.aa.hl */
+ break;
+ case 2:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 226; /* umul.aa.lh */
+ break;
+ case 3:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 227; /* umul.aa.hh */
+ break;
+ case 4:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 220; /* mul.aa.ll */
+ break;
+ case 5:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 221; /* mul.aa.hl */
+ break;
+ case 6:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 222; /* mul.aa.lh */
+ break;
+ case 7:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 223; /* mul.aa.hh */
+ break;
+ case 8:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 240; /* mula.aa.ll */
+ break;
+ case 9:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 241; /* mula.aa.hl */
+ break;
+ case 10:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 242; /* mula.aa.lh */
+ break;
+ case 11:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 243; /* mula.aa.hh */
+ break;
+ case 12:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 244; /* muls.aa.ll */
+ break;
+ case 13:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 245; /* muls.aa.hl */
+ break;
+ case 14:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 246; /* muls.aa.lh */
+ break;
+ case 15:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return 247; /* muls.aa.hh */
+ break;
+ }
+ break;
+ case 8:
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return 289; /* ldinc */
+ break;
+ case 9:
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return 288; /* lddec */
+ break;
+ }
+ break;
+ case 5:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 76; /* call0 */
+ case 1:
+ return 7; /* call4 */
+ case 2:
+ return 6; /* call8 */
+ case 3:
+ return 5; /* call12 */
+ }
+ break;
+ case 6:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 80; /* j */
+ case 1:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return 72; /* beqz */
+ case 1:
+ return 73; /* bnez */
+ case 2:
+ return 75; /* bltz */
+ case 3:
+ return 74; /* bgez */
+ }
+ break;
+ case 2:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return 52; /* beqi */
+ case 1:
+ return 53; /* bnei */
+ case 2:
+ return 55; /* blti */
+ case 3:
+ return 54; /* bgei */
+ }
+ break;
+ case 3:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return 11; /* entry */
+ case 1:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 8:
+ return 87; /* loop */
+ case 9:
+ return 88; /* loopnez */
+ case 10:
+ return 89; /* loopgtz */
+ }
+ break;
+ case 2:
+ return 59; /* bltui */
+ case 3:
+ return 58; /* bgeui */
+ }
+ break;
+ }
+ break;
+ case 7:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return 67; /* bnone */
+ case 1:
+ return 60; /* beq */
+ case 2:
+ return 63; /* blt */
+ case 3:
+ return 65; /* bltu */
+ case 4:
+ return 68; /* ball */
+ case 5:
+ return 70; /* bbc */
+ case 6:
+ case 7:
+ return 56; /* bbci */
+ case 8:
+ return 66; /* bany */
+ case 9:
+ return 61; /* bne */
+ case 10:
+ return 62; /* bge */
+ case 11:
+ return 64; /* bgeu */
+ case 12:
+ return 69; /* bnall */
+ case 13:
+ return 71; /* bbs */
+ case 14:
+ case 15:
+ return 57; /* bbsi */
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16b_get (insn))
+ {
+ case 12:
+ switch (Field_i_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 33; /* movi.n */
+ case 1:
+ switch (Field_z_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 28; /* beqz.n */
+ case 1:
+ return 29; /* bnez.n */
+ }
+ break;
+ }
+ break;
+ case 13:
+ switch (Field_r_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 32; /* mov.n */
+ case 15:
+ switch (Field_t_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 35; /* ret.n */
+ case 1:
+ return 15; /* retw.n */
+ case 2:
+ return 319; /* break.n */
+ case 3:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return 34; /* nop.n */
+ break;
+ case 6:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return 30; /* ill.n */
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16a_get (insn))
+ {
+ case 8:
+ return 31; /* l32i.n */
+ case 9:
+ return 36; /* s32i.n */
+ case 10:
+ return 26; /* add.n */
+ case 11:
+ return 27; /* addi.n */
+ }
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_get,
+ Field_rbit2_Slot_inst_get,
+ Field_rhi_Slot_inst_get,
+ Field_t3_Slot_inst_get,
+ Field_tbit2_Slot_inst_get,
+ Field_tlo_Slot_inst_get,
+ Field_w_Slot_inst_get,
+ Field_y_Slot_inst_get,
+ Field_x_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_bitindex_Slot_inst_get,
+ Field_s3to1_Slot_inst_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_set,
+ Field_rbit2_Slot_inst_set,
+ Field_rhi_Slot_inst_set,
+ Field_t3_Slot_inst_set,
+ Field_tbit2_Slot_inst_set,
+ Field_tlo_Slot_inst_set,
+ Field_w_Slot_inst_set,
+ Field_y_Slot_inst_set,
+ Field_x_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_bitindex_Slot_inst_set,
+ Field_s3to1_Slot_inst_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_get,
+ Field_s3to1_Slot_inst16a_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_set,
+ Field_s3to1_Slot_inst16a_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_get,
+ Field_s3to1_Slot_inst16b_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_set,
+ Field_s3to1_Slot_inst16b_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc)
+ return 2; /* x16b */
+ return -1;
+}
+
+static int length_table[16] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int op0 = insn[0] & 0xf;
+ return length_table[op0];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 3 /* insn_size */, 0,
+ 3, formats, format_decoder, length_decoder,
+ 3, slots,
+ 56 /* num_fields */,
+ 93, operands,
+ 320, iclasses,
+ 446, opcodes, 0,
+ 2, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 1, interfaces, 0,
+ 0, funcUnits, 0
+};
diff --git a/target/xtensa/core-dc233c.c b/target/xtensa/core-dc233c.c
new file mode 100644
index 000000000..595ab9a90
--- /dev/null
+++ b/target/xtensa/core-dc233c.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-dc233c/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_dc233c
+#include "core-dc233c/xtensa-modules.c.inc"
+
+static XtensaConfig dc233c __attribute__((unused)) = {
+ .name = "dc233c",
+ .gdb_regmap = {
+ .reg = {
+#include "core-dc233c/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dc233c)
diff --git a/target/xtensa/core-dc233c/core-isa.h b/target/xtensa/core-dc233c/core-isa.h
new file mode 100644
index 000000000..ff92b7f3e
--- /dev/null
+++ b/target/xtensa/core-dc233c/core-isa.h
@@ -0,0 +1,473 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_DC233C_CORE_ISA_H
+#define XTENSA_DC233C_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 900001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc233c" /* alphanum core name
+(CoreID) set in the Xtensa
+Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 240001 /* major*100+minor */
+#define XCHAL_HW_REL_LX4 1
+#define XCHAL_HW_REL_LX4_0 1
+#define XCHAL_HW_REL_LX4_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+(not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+number: 1 == XEA1 (old)
+2 == XEA2 (new)
+0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+[autorefill] and protection)
+usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_DC233C_CORE_ISA_H */
diff --git a/target/xtensa/core-dc233c/gdb-config.c.inc b/target/xtensa/core-dc233c/gdb-config.c.inc
new file mode 100644
index 000000000..7e8963227
--- /dev/null
+++ b/target/xtensa/core-dc233c/gdb-config.c.inc
@@ -0,0 +1,146 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+/* idx ofs bi sz al targno flags cp typ group name */
+XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc, 0, 0, 0, 0, 0, 0)
+XTREG(1, 4, 32, 4, 4, 0x0100, 0x0006, -2, 1, 0x0002, ar0, 0, 0, 0, 0, 0, 0)
+XTREG(2, 8, 32, 4, 4, 0x0101, 0x0006, -2, 1, 0x0002, ar1, 0, 0, 0, 0, 0, 0)
+XTREG(3, 12, 32, 4, 4, 0x0102, 0x0006, -2, 1, 0x0002, ar2, 0, 0, 0, 0, 0, 0)
+XTREG(4, 16, 32, 4, 4, 0x0103, 0x0006, -2, 1, 0x0002, ar3, 0, 0, 0, 0, 0, 0)
+XTREG(5, 20, 32, 4, 4, 0x0104, 0x0006, -2, 1, 0x0002, ar4, 0, 0, 0, 0, 0, 0)
+XTREG(6, 24, 32, 4, 4, 0x0105, 0x0006, -2, 1, 0x0002, ar5, 0, 0, 0, 0, 0, 0)
+XTREG(7, 28, 32, 4, 4, 0x0106, 0x0006, -2, 1, 0x0002, ar6, 0, 0, 0, 0, 0, 0)
+XTREG(8, 32, 32, 4, 4, 0x0107, 0x0006, -2, 1, 0x0002, ar7, 0, 0, 0, 0, 0, 0)
+XTREG(9, 36, 32, 4, 4, 0x0108, 0x0006, -2, 1, 0x0002, ar8, 0, 0, 0, 0, 0, 0)
+XTREG(10, 40, 32, 4, 4, 0x0109, 0x0006, -2, 1, 0x0002, ar9, 0, 0, 0, 0, 0, 0)
+XTREG(11, 44, 32, 4, 4, 0x010a, 0x0006, -2, 1, 0x0002, ar10, 0, 0, 0, 0, 0, 0)
+XTREG(12, 48, 32, 4, 4, 0x010b, 0x0006, -2, 1, 0x0002, ar11, 0, 0, 0, 0, 0, 0)
+XTREG(13, 52, 32, 4, 4, 0x010c, 0x0006, -2, 1, 0x0002, ar12, 0, 0, 0, 0, 0, 0)
+XTREG(14, 56, 32, 4, 4, 0x010d, 0x0006, -2, 1, 0x0002, ar13, 0, 0, 0, 0, 0, 0)
+XTREG(15, 60, 32, 4, 4, 0x010e, 0x0006, -2, 1, 0x0002, ar14, 0, 0, 0, 0, 0, 0)
+XTREG(16, 64, 32, 4, 4, 0x010f, 0x0006, -2, 1, 0x0002, ar15, 0, 0, 0, 0, 0, 0)
+XTREG(17, 68, 32, 4, 4, 0x0110, 0x0006, -2, 1, 0x0002, ar16, 0, 0, 0, 0, 0, 0)
+XTREG(18, 72, 32, 4, 4, 0x0111, 0x0006, -2, 1, 0x0002, ar17, 0, 0, 0, 0, 0, 0)
+XTREG(19, 76, 32, 4, 4, 0x0112, 0x0006, -2, 1, 0x0002, ar18, 0, 0, 0, 0, 0, 0)
+XTREG(20, 80, 32, 4, 4, 0x0113, 0x0006, -2, 1, 0x0002, ar19, 0, 0, 0, 0, 0, 0)
+XTREG(21, 84, 32, 4, 4, 0x0114, 0x0006, -2, 1, 0x0002, ar20, 0, 0, 0, 0, 0, 0)
+XTREG(22, 88, 32, 4, 4, 0x0115, 0x0006, -2, 1, 0x0002, ar21, 0, 0, 0, 0, 0, 0)
+XTREG(23, 92, 32, 4, 4, 0x0116, 0x0006, -2, 1, 0x0002, ar22, 0, 0, 0, 0, 0, 0)
+XTREG(24, 96, 32, 4, 4, 0x0117, 0x0006, -2, 1, 0x0002, ar23, 0, 0, 0, 0, 0, 0)
+XTREG(25, 100, 32, 4, 4, 0x0118, 0x0006, -2, 1, 0x0002, ar24, 0, 0, 0, 0, 0, 0)
+XTREG(26, 104, 32, 4, 4, 0x0119, 0x0006, -2, 1, 0x0002, ar25, 0, 0, 0, 0, 0, 0)
+XTREG(27, 108, 32, 4, 4, 0x011a, 0x0006, -2, 1, 0x0002, ar26, 0, 0, 0, 0, 0, 0)
+XTREG(28, 112, 32, 4, 4, 0x011b, 0x0006, -2, 1, 0x0002, ar27, 0, 0, 0, 0, 0, 0)
+XTREG(29, 116, 32, 4, 4, 0x011c, 0x0006, -2, 1, 0x0002, ar28, 0, 0, 0, 0, 0, 0)
+XTREG(30, 120, 32, 4, 4, 0x011d, 0x0006, -2, 1, 0x0002, ar29, 0, 0, 0, 0, 0, 0)
+XTREG(31, 124, 32, 4, 4, 0x011e, 0x0006, -2, 1, 0x0002, ar30, 0, 0, 0, 0, 0, 0)
+XTREG(32, 128, 32, 4, 4, 0x011f, 0x0006, -2, 1, 0x0002, ar31, 0, 0, 0, 0, 0, 0)
+XTREG(33, 132, 32, 4, 4, 0x0200, 0x0006, -2, 2, 0x1100, lbeg, 0, 0, 0, 0, 0, 0)
+XTREG(34, 136, 32, 4, 4, 0x0201, 0x0006, -2, 2, 0x1100, lend, 0, 0, 0, 0, 0, 0)
+XTREG(35, 140, 32, 4, 4, 0x0202, 0x0006, -2, 2, 0x1100, lcount, 0, 0, 0, 0, 0, 0)
+XTREG(36, 144, 6, 4, 4, 0x0203, 0x0006, -2, 2, 0x1100, sar, 0, 0, 0, 0, 0, 0)
+XTREG(37, 148, 32, 4, 4, 0x0205, 0x0006, -2, 2, 0x1100, litbase, 0, 0, 0, 0, 0, 0)
+XTREG(38, 152, 3, 4, 4, 0x0248, 0x0006, -2, 2, 0x1002, windowbase, 0, 0, 0, 0, 0, 0)
+XTREG(39, 156, 8, 4, 4, 0x0249, 0x0006, -2, 2, 0x1002, windowstart, 0, 0, 0, 0, 0, 0)
+XTREG(40, 160, 32, 4, 4, 0x02b0, 0x0002, -2, 2, 0x1000, sr176, 0, 0, 0, 0, 0, 0)
+XTREG(41, 164, 32, 4, 4, 0x02d0, 0x0002, -2, 2, 0x1000, sr208, 0, 0, 0, 0, 0, 0)
+XTREG(42, 168, 19, 4, 4, 0x02e6, 0x0006, -2, 2, 0x1100, ps, 0, 0, 0, 0, 0, 0)
+XTREG(43, 172, 32, 4, 4, 0x03e7, 0x0006, -2, 3, 0x0110, threadptr, 0, 0, 0, 0, 0, 0)
+XTREG(44, 176, 32, 4, 4, 0x020c, 0x0006, -1, 2, 0x1100, scompare1, 0, 0, 0, 0, 0, 0)
+XTREG(45, 180, 32, 4, 4, 0x0210, 0x0006, -1, 2, 0x1100, acclo, 0, 0, 0, 0, 0, 0)
+XTREG(46, 184, 8, 4, 4, 0x0211, 0x0006, -1, 2, 0x1100, acchi, 0, 0, 0, 0, 0, 0)
+XTREG(47, 188, 32, 4, 4, 0x0220, 0x0006, -1, 2, 0x1100, m0, 0, 0, 0, 0, 0, 0)
+XTREG(48, 192, 32, 4, 4, 0x0221, 0x0006, -1, 2, 0x1100, m1, 0, 0, 0, 0, 0, 0)
+XTREG(49, 196, 32, 4, 4, 0x0222, 0x0006, -1, 2, 0x1100, m2, 0, 0, 0, 0, 0, 0)
+XTREG(50, 200, 32, 4, 4, 0x0223, 0x0006, -1, 2, 0x1100, m3, 0, 0, 0, 0, 0, 0)
+XTREG(51, 204, 32, 4, 4, 0x03e6, 0x000e, -1, 3, 0x0110, expstate, 0, 0, 0, 0, 0, 0)
+XTREG(52, 208, 32, 4, 4, 0x0253, 0x0007, -2, 2, 0x1000, ptevaddr, 0, 0, 0, 0, 0, 0)
+XTREG(53, 212, 32, 4, 4, 0x0259, 0x000d, -2, 2, 0x1000, mmid, 0, 0, 0, 0, 0, 0)
+XTREG(54, 216, 32, 4, 4, 0x025a, 0x0007, -2, 2, 0x1000, rasid, 0, 0, 0, 0, 0, 0)
+XTREG(55, 220, 25, 4, 4, 0x025b, 0x0007, -2, 2, 0x1000, itlbcfg, 0, 0, 0, 0, 0, 0)
+XTREG(56, 224, 25, 4, 4, 0x025c, 0x0007, -2, 2, 0x1000, dtlbcfg, 0, 0, 0, 0, 0, 0)
+XTREG(57, 228, 2, 4, 4, 0x0260, 0x0007, -2, 2, 0x1000, ibreakenable, 0, 0, 0, 0, 0, 0)
+XTREG(58, 232, 6, 4, 4, 0x0263, 0x0007, -2, 2, 0x1000, atomctl, 0, 0, 0, 0, 0, 0)
+XTREG(59, 236, 32, 4, 4, 0x0268, 0x0007, -2, 2, 0x1000, ddr, 0, 0, 0, 0, 0, 0)
+XTREG(60, 240, 32, 4, 4, 0x0280, 0x0007, -2, 2, 0x1000, ibreaka0, 0, 0, 0, 0, 0, 0)
+XTREG(61, 244, 32, 4, 4, 0x0281, 0x0007, -2, 2, 0x1000, ibreaka1, 0, 0, 0, 0, 0, 0)
+XTREG(62, 248, 32, 4, 4, 0x0290, 0x0007, -2, 2, 0x1000, dbreaka0, 0, 0, 0, 0, 0, 0)
+XTREG(63, 252, 32, 4, 4, 0x0291, 0x0007, -2, 2, 0x1000, dbreaka1, 0, 0, 0, 0, 0, 0)
+XTREG(64, 256, 32, 4, 4, 0x02a0, 0x0007, -2, 2, 0x1000, dbreakc0, 0, 0, 0, 0, 0, 0)
+XTREG(65, 260, 32, 4, 4, 0x02a1, 0x0007, -2, 2, 0x1000, dbreakc1, 0, 0, 0, 0, 0, 0)
+XTREG(66, 264, 32, 4, 4, 0x02b1, 0x0007, -2, 2, 0x1000, epc1, 0, 0, 0, 0, 0, 0)
+XTREG(67, 268, 32, 4, 4, 0x02b2, 0x0007, -2, 2, 0x1000, epc2, 0, 0, 0, 0, 0, 0)
+XTREG(68, 272, 32, 4, 4, 0x02b3, 0x0007, -2, 2, 0x1000, epc3, 0, 0, 0, 0, 0, 0)
+XTREG(69, 276, 32, 4, 4, 0x02b4, 0x0007, -2, 2, 0x1000, epc4, 0, 0, 0, 0, 0, 0)
+XTREG(70, 280, 32, 4, 4, 0x02b5, 0x0007, -2, 2, 0x1000, epc5, 0, 0, 0, 0, 0, 0)
+XTREG(71, 284, 32, 4, 4, 0x02b6, 0x0007, -2, 2, 0x1000, epc6, 0, 0, 0, 0, 0, 0)
+XTREG(72, 288, 32, 4, 4, 0x02b7, 0x0007, -2, 2, 0x1000, epc7, 0, 0, 0, 0, 0, 0)
+XTREG(73, 292, 32, 4, 4, 0x02c0, 0x0007, -2, 2, 0x1000, depc, 0, 0, 0, 0, 0, 0)
+XTREG(74, 296, 19, 4, 4, 0x02c2, 0x0007, -2, 2, 0x1000, eps2, 0, 0, 0, 0, 0, 0)
+XTREG(75, 300, 19, 4, 4, 0x02c3, 0x0007, -2, 2, 0x1000, eps3, 0, 0, 0, 0, 0, 0)
+XTREG(76, 304, 19, 4, 4, 0x02c4, 0x0007, -2, 2, 0x1000, eps4, 0, 0, 0, 0, 0, 0)
+XTREG(77, 308, 19, 4, 4, 0x02c5, 0x0007, -2, 2, 0x1000, eps5, 0, 0, 0, 0, 0, 0)
+XTREG(78, 312, 19, 4, 4, 0x02c6, 0x0007, -2, 2, 0x1000, eps6, 0, 0, 0, 0, 0, 0)
+XTREG(79, 316, 19, 4, 4, 0x02c7, 0x0007, -2, 2, 0x1000, eps7, 0, 0, 0, 0, 0, 0)
+XTREG(80, 320, 32, 4, 4, 0x02d1, 0x0007, -2, 2, 0x1000, excsave1, 0, 0, 0, 0, 0, 0)
+XTREG(81, 324, 32, 4, 4, 0x02d2, 0x0007, -2, 2, 0x1000, excsave2, 0, 0, 0, 0, 0, 0)
+XTREG(82, 328, 32, 4, 4, 0x02d3, 0x0007, -2, 2, 0x1000, excsave3, 0, 0, 0, 0, 0, 0)
+XTREG(83, 332, 32, 4, 4, 0x02d4, 0x0007, -2, 2, 0x1000, excsave4, 0, 0, 0, 0, 0, 0)
+XTREG(84, 336, 32, 4, 4, 0x02d5, 0x0007, -2, 2, 0x1000, excsave5, 0, 0, 0, 0, 0, 0)
+XTREG(85, 340, 32, 4, 4, 0x02d6, 0x0007, -2, 2, 0x1000, excsave6, 0, 0, 0, 0, 0, 0)
+XTREG(86, 344, 32, 4, 4, 0x02d7, 0x0007, -2, 2, 0x1000, excsave7, 0, 0, 0, 0, 0, 0)
+XTREG(87, 348, 8, 4, 4, 0x02e0, 0x0007, -2, 2, 0x1000, cpenable, 0, 0, 0, 0, 0, 0)
+XTREG(88, 352, 22, 4, 4, 0x02e2, 0x000b, -2, 2, 0x1000, interrupt, 0, 0, 0, 0, 0, 0)
+XTREG(89, 356, 22, 4, 4, 0x02e2, 0x000d, -2, 2, 0x1000, intset, 0, 0, 0, 0, 0, 0)
+XTREG(90, 360, 22, 4, 4, 0x02e3, 0x000d, -2, 2, 0x1000, intclear, 0, 0, 0, 0, 0, 0)
+XTREG(91, 364, 22, 4, 4, 0x02e4, 0x0007, -2, 2, 0x1000, intenable, 0, 0, 0, 0, 0, 0)
+XTREG(92, 368, 32, 4, 4, 0x02e7, 0x0007, -2, 2, 0x1000, vecbase, 0, 0, 0, 0, 0, 0)
+XTREG(93, 372, 6, 4, 4, 0x02e8, 0x0007, -2, 2, 0x1000, exccause, 0, 0, 0, 0, 0, 0)
+XTREG(94, 376, 12, 4, 4, 0x02e9, 0x0003, -2, 2, 0x1000, debugcause, 0, 0, 0, 0, 0, 0)
+XTREG(95, 380, 32, 4, 4, 0x02ea, 0x000f, -2, 2, 0x1000, ccount, 0, 0, 0, 0, 0, 0)
+XTREG(96, 384, 32, 4, 4, 0x02eb, 0x0003, -2, 2, 0x1000, prid, 0, 0, 0, 0, 0, 0)
+XTREG(97, 388, 32, 4, 4, 0x02ec, 0x000f, -2, 2, 0x1000, icount, 0, 0, 0, 0, 0, 0)
+XTREG(98, 392, 4, 4, 4, 0x02ed, 0x0007, -2, 2, 0x1000, icountlevel, 0, 0, 0, 0, 0, 0)
+XTREG(99, 396, 32, 4, 4, 0x02ee, 0x0007, -2, 2, 0x1000, excvaddr, 0, 0, 0, 0, 0, 0)
+XTREG(100, 400, 32, 4, 4, 0x02f0, 0x000f, -2, 2, 0x1000, ccompare0, 0, 0, 0, 0, 0, 0)
+XTREG(101, 404, 32, 4, 4, 0x02f1, 0x000f, -2, 2, 0x1000, ccompare1, 0, 0, 0, 0, 0, 0)
+XTREG(102, 408, 32, 4, 4, 0x02f2, 0x000f, -2, 2, 0x1000, ccompare2, 0, 0, 0, 0, 0, 0)
+XTREG(103, 412, 32, 4, 4, 0x02f4, 0x0007, -2, 2, 0x1000, misc0, 0, 0, 0, 0, 0, 0)
+XTREG(104, 416, 32, 4, 4, 0x02f5, 0x0007, -2, 2, 0x1000, misc1, 0, 0, 0, 0, 0, 0)
+XTREG(105, 420, 32, 4, 4, 0x0000, 0x0006, -2, 8, 0x0100, a0, 0, 0, 0, 0, 0, 0)
+XTREG(106, 424, 32, 4, 4, 0x0001, 0x0006, -2, 8, 0x0100, a1, 0, 0, 0, 0, 0, 0)
+XTREG(107, 428, 32, 4, 4, 0x0002, 0x0006, -2, 8, 0x0100, a2, 0, 0, 0, 0, 0, 0)
+XTREG(108, 432, 32, 4, 4, 0x0003, 0x0006, -2, 8, 0x0100, a3, 0, 0, 0, 0, 0, 0)
+XTREG(109, 436, 32, 4, 4, 0x0004, 0x0006, -2, 8, 0x0100, a4, 0, 0, 0, 0, 0, 0)
+XTREG(110, 440, 32, 4, 4, 0x0005, 0x0006, -2, 8, 0x0100, a5, 0, 0, 0, 0, 0, 0)
+XTREG(111, 444, 32, 4, 4, 0x0006, 0x0006, -2, 8, 0x0100, a6, 0, 0, 0, 0, 0, 0)
+XTREG(112, 448, 32, 4, 4, 0x0007, 0x0006, -2, 8, 0x0100, a7, 0, 0, 0, 0, 0, 0)
+XTREG(113, 452, 32, 4, 4, 0x0008, 0x0006, -2, 8, 0x0100, a8, 0, 0, 0, 0, 0, 0)
+XTREG(114, 456, 32, 4, 4, 0x0009, 0x0006, -2, 8, 0x0100, a9, 0, 0, 0, 0, 0, 0)
+XTREG(115, 460, 32, 4, 4, 0x000a, 0x0006, -2, 8, 0x0100, a10, 0, 0, 0, 0, 0, 0)
+XTREG(116, 464, 32, 4, 4, 0x000b, 0x0006, -2, 8, 0x0100, a11, 0, 0, 0, 0, 0, 0)
+XTREG(117, 468, 32, 4, 4, 0x000c, 0x0006, -2, 8, 0x0100, a12, 0, 0, 0, 0, 0, 0)
+XTREG(118, 472, 32, 4, 4, 0x000d, 0x0006, -2, 8, 0x0100, a13, 0, 0, 0, 0, 0, 0)
+XTREG(119, 476, 32, 4, 4, 0x000e, 0x0006, -2, 8, 0x0100, a14, 0, 0, 0, 0, 0, 0)
+XTREG(120, 480, 32, 4, 4, 0x000f, 0x0006, -2, 8, 0x0100, a15, 0, 0, 0, 0, 0, 0)
+XTREG_END
diff --git a/target/xtensa/core-dc233c/xtensa-modules.c.inc b/target/xtensa/core-dc233c/xtensa-modules.c.inc
new file mode 100644
index 000000000..0f32f0804
--- /dev/null
+++ b/target/xtensa/core-dc233c/xtensa-modules.c.inc
@@ -0,0 +1,15205 @@
+/* Xtensa configuration-specific ISA information.
+
+ Customer ID=4869; Build=0x2cfec; Copyright (c) 2003-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "qemu/osdep.h"
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "ACCLO", 16, 0 },
+ { "ACCHI", 17, 0 },
+ { "M0", 32, 0 },
+ { "M1", 33, 0 },
+ { "M2", 34, 0 },
+ { "M3", 35, 0 },
+ { "PTEVADDR", 83, 0 },
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "176", 176, 0 },
+ { "208", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EPC7", 183, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EXCSAVE7", 215, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EPS7", 199, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "LITBASE", 5, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "RASID", 90, 0 },
+ { "ITLBCFG", 91, 0 },
+ { "DTLBCFG", 92, 0 },
+ { "CPENABLE", 224, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "THREADPTR", 231, 1 },
+ { "EXPSTATE", 230, 1 }
+};
+
+#define NUM_SYSREGS 71
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 231
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 22, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EPC7", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EXCSAVE7", 32, 0 },
+ { "EPS2", 15, 0 },
+ { "EPS3", 15, 0 },
+ { "EPS4", 15, 0 },
+ { "EPS5", 15, 0 },
+ { "EPS6", 15, 0 },
+ { "EPS7", 15, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSRING", 2, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "THREADPTR", 32, 0 },
+ { "LITBADDR", 20, 0 },
+ { "LITBEN", 1, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "ACC", 40, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 22, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "ASID3", 8, 0 },
+ { "ASID2", 8, 0 },
+ { "ASID1", 8, 0 },
+ { "INSTPGSZID6", 1, 0 },
+ { "INSTPGSZID5", 1, 0 },
+ { "INSTPGSZID4", 2, 0 },
+ { "DATAPGSZID6", 1, 0 },
+ { "DATAPGSZID5", 1, 0 },
+ { "DATAPGSZID4", 2, 0 },
+ { "PTBASE", 10, 0 },
+ { "CPENABLE", 8, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED }
+};
+
+#define NUM_STATES 78
+
+enum xtensa_state_id {
+ STATE_LCOUNT,
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EPC3,
+ STATE_EPC4,
+ STATE_EPC5,
+ STATE_EPC6,
+ STATE_EPC7,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EXCSAVE3,
+ STATE_EXCSAVE4,
+ STATE_EXCSAVE5,
+ STATE_EXCSAVE6,
+ STATE_EXCSAVE7,
+ STATE_EPS2,
+ STATE_EPS3,
+ STATE_EPS4,
+ STATE_EPS5,
+ STATE_EPS6,
+ STATE_EPS7,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSRING,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_LBEG,
+ STATE_LEND,
+ STATE_SAR,
+ STATE_THREADPTR,
+ STATE_LITBADDR,
+ STATE_LITBEN,
+ STATE_MISC0,
+ STATE_MISC1,
+ STATE_ACC,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_DBREAKA0,
+ STATE_DBREAKC0,
+ STATE_DBREAKA1,
+ STATE_DBREAKC1,
+ STATE_IBREAKA0,
+ STATE_IBREAKA1,
+ STATE_IBREAKENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_CCOMPARE2,
+ STATE_ASID3,
+ STATE_ASID2,
+ STATE_ASID1,
+ STATE_INSTPGSZID6,
+ STATE_INSTPGSZID5,
+ STATE_INSTPGSZID4,
+ STATE_DATAPGSZID6,
+ STATE_DATAPGSZID5,
+ STATE_DATAPGSZID4,
+ STATE_PTBASE,
+ STATE_CPENABLE,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_EXPSTATE
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_tlo_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_tlo_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_w_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_w_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_rhi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_rhi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s3to1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_rbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_rbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_tbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_tbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_y_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_y_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_x_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_x_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_bitindex_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_mr0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_mr1_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
+static unsigned
+Implicit_Field_mr2_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 2;
+}
+
+static unsigned
+Implicit_Field_mr3_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 3;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_r3,
+ FIELD_rbit2,
+ FIELD_rhi,
+ FIELD_t3,
+ FIELD_tbit2,
+ FIELD_tlo,
+ FIELD_w,
+ FIELD_y,
+ FIELD_x,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_bitindex,
+ FIELD_s3to1,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12,
+ FIELD__mr0,
+ FIELD__mr1,
+ FIELD__mr2,
+ FIELD__mr3
+};
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+
+};
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR,
+ REGFILE_MR
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 },
+ { "MR", "m", REGFILE_MR, 32, 4 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "IMPWIRE", 32, 0, 0, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_IMPWIRE
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+Operand_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffsetx4_0 = 0x4 + ((((int) offset_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_encode (uint32 *valp)
+{
+ unsigned offset_0, soffsetx4_0;
+ soffsetx4_0 = *valp;
+ offset_0 = ((soffsetx4_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ uimm12x8_0 = imm12_0 << 3;
+ *valp = uimm12x8_0;
+ return 0;
+}
+
+static int
+Operand_uimm12x8_encode (uint32 *valp)
+{
+ unsigned imm12_0, uimm12x8_0;
+ uimm12x8_0 = *valp;
+ imm12_0 = ((uimm12x8_0 >> 3) & 0xfff);
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_0, mn_0;
+ mn_0 = *valp & 0xf;
+ simm4_0 = ((int) mn_0 << 28) >> 28;
+ *valp = simm4_0;
+ return 0;
+}
+
+static int
+Operand_simm4_encode (uint32 *valp)
+{
+ unsigned mn_0, simm4_0;
+ simm4_0 = *valp;
+ mn_0 = (simm4_0 & 0xf);
+ *valp = mn_0;
+ return 0;
+}
+
+static int
+Operand_arr_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_arr_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_ars_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_art_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_art_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_ar0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar0_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ar4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar4_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ar8_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar8_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ar12_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar12_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_ars_entry_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_entry_encode (uint32 *valp)
+{
+ return (*valp & ~0x1f) != 0;
+}
+
+static int
+Operand_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_0, r_0;
+ r_0 = *valp & 0xf;
+ immrx4_0 = (((0xfffffff) << 4) | r_0) << 2;
+ *valp = immrx4_0;
+ return 0;
+}
+
+static int
+Operand_immrx4_encode (uint32 *valp)
+{
+ unsigned r_0, immrx4_0;
+ immrx4_0 = *valp;
+ r_0 = ((immrx4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_0, r_0;
+ r_0 = *valp & 0xf;
+ lsi4x4_0 = r_0 << 2;
+ *valp = lsi4x4_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_encode (uint32 *valp)
+{
+ unsigned r_0, lsi4x4_0;
+ lsi4x4_0 = *valp;
+ r_0 = ((lsi4x4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_0, imm7_0;
+ imm7_0 = *valp & 0x7f;
+ simm7_0 = ((((-((((imm7_0 >> 6) & 1)) & (((imm7_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | imm7_0;
+ *valp = simm7_0;
+ return 0;
+}
+
+static int
+Operand_simm7_encode (uint32 *valp)
+{
+ unsigned imm7_0, simm7_0;
+ simm7_0 = *valp;
+ imm7_0 = (simm7_0 & 0x7f);
+ *valp = imm7_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_0, imm6_0;
+ imm6_0 = *valp & 0x3f;
+ uimm6_0 = 0x4 + (((0) << 6) | imm6_0);
+ *valp = uimm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_encode (uint32 *valp)
+{
+ unsigned imm6_0, uimm6_0;
+ uimm6_0 = *valp;
+ imm6_0 = (uimm6_0 - 0x4) & 0x3f;
+ *valp = imm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_0, t_0;
+ t_0 = *valp & 0xf;
+ ai4const_0 = CONST_TBL_ai4c_0[t_0 & 0xf];
+ *valp = ai4const_0;
+ return 0;
+}
+
+static int
+Operand_ai4const_encode (uint32 *valp)
+{
+ unsigned t_0, ai4const_0;
+ ai4const_0 = *valp;
+ switch (ai4const_0)
+ {
+ case 0xffffffff: t_0 = 0; break;
+ case 0x1: t_0 = 0x1; break;
+ case 0x2: t_0 = 0x2; break;
+ case 0x3: t_0 = 0x3; break;
+ case 0x4: t_0 = 0x4; break;
+ case 0x5: t_0 = 0x5; break;
+ case 0x6: t_0 = 0x6; break;
+ case 0x7: t_0 = 0x7; break;
+ case 0x8: t_0 = 0x8; break;
+ case 0x9: t_0 = 0x9; break;
+ case 0xa: t_0 = 0xa; break;
+ case 0xb: t_0 = 0xb; break;
+ case 0xc: t_0 = 0xc; break;
+ case 0xd: t_0 = 0xd; break;
+ case 0xe: t_0 = 0xe; break;
+ default: t_0 = 0xf; break;
+ }
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_0, r_0;
+ r_0 = *valp & 0xf;
+ b4const_0 = CONST_TBL_b4c_0[r_0 & 0xf];
+ *valp = b4const_0;
+ return 0;
+}
+
+static int
+Operand_b4const_encode (uint32 *valp)
+{
+ unsigned r_0, b4const_0;
+ b4const_0 = *valp;
+ switch (b4const_0)
+ {
+ case 0xffffffff: r_0 = 0; break;
+ case 0x1: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_0, r_0;
+ r_0 = *valp & 0xf;
+ b4constu_0 = CONST_TBL_b4cu_0[r_0 & 0xf];
+ *valp = b4constu_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_encode (uint32 *valp)
+{
+ unsigned r_0, b4constu_0;
+ b4constu_0 = *valp;
+ switch (b4constu_0)
+ {
+ case 0x8000: r_0 = 0; break;
+ case 0x10000: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8_0 = imm8_0;
+ *valp = uimm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8_0;
+ uimm8_0 = *valp;
+ imm8_0 = (uimm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x2_0 = imm8_0 << 1;
+ *valp = uimm8x2_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x2_0;
+ uimm8x2_0 = *valp;
+ imm8_0 = ((uimm8x2_0 >> 1) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x4_0 = imm8_0 << 2;
+ *valp = uimm8x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x4_0;
+ uimm8x4_0 = *valp;
+ imm8_0 = ((uimm8x4_0 >> 2) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_0, op2_0;
+ op2_0 = *valp & 0xf;
+ uimm4x16_0 = op2_0 << 4;
+ *valp = uimm4x16_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_encode (uint32 *valp)
+{
+ unsigned op2_0, uimm4x16_0;
+ uimm4x16_0 = *valp;
+ op2_0 = ((uimm4x16_0 >> 4) & 0xf);
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8_0 = ((int) imm8_0 << 24) >> 24;
+ *valp = simm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8_0;
+ simm8_0 = *valp;
+ imm8_0 = (simm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8x256_0 = (((int) imm8_0 << 24) >> 24) << 8;
+ *valp = simm8x256_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8x256_0;
+ simm8x256_0 = *valp;
+ imm8_0 = ((simm8x256_0 >> 8) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_0, imm12b_0;
+ imm12b_0 = *valp & 0xfff;
+ simm12b_0 = ((int) imm12b_0 << 20) >> 20;
+ *valp = simm12b_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_encode (uint32 *valp)
+{
+ unsigned imm12b_0, simm12b_0;
+ simm12b_0 = *valp;
+ imm12b_0 = (simm12b_0 & 0xfff);
+ *valp = imm12b_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_0, sal_0;
+ sal_0 = *valp & 0x1f;
+ msalp32_0 = 0x20 - sal_0;
+ *valp = msalp32_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_encode (uint32 *valp)
+{
+ unsigned sal_0, msalp32_0;
+ msalp32_0 = *valp;
+ sal_0 = (0x20 - msalp32_0) & 0x1f;
+ *valp = sal_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_0, op2_0;
+ op2_0 = *valp & 0xf;
+ op2p1_0 = op2_0 + 0x1;
+ *valp = op2p1_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_encode (uint32 *valp)
+{
+ unsigned op2_0, op2p1_0;
+ op2p1_0 = *valp;
+ op2_0 = (op2p1_0 - 0x1) & 0xf;
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_label8_decode (uint32 *valp)
+{
+ unsigned label8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ label8_0 = 0x4 + (((int) imm8_0 << 24) >> 24);
+ *valp = label8_0;
+ return 0;
+}
+
+static int
+Operand_label8_encode (uint32 *valp)
+{
+ unsigned imm8_0, label8_0;
+ label8_0 = *valp;
+ imm8_0 = (label8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ ulabel8_0 = 0x4 + (((0) << 8) | imm8_0);
+ *valp = ulabel8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_encode (uint32 *valp)
+{
+ unsigned imm8_0, ulabel8_0;
+ ulabel8_0 = *valp;
+ imm8_0 = (ulabel8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_decode (uint32 *valp)
+{
+ unsigned label12_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ label12_0 = 0x4 + (((int) imm12_0 << 20) >> 20);
+ *valp = label12_0;
+ return 0;
+}
+
+static int
+Operand_label12_encode (uint32 *valp)
+{
+ unsigned imm12_0, label12_0;
+ label12_0 = *valp;
+ imm12_0 = (label12_0 - 0x4) & 0xfff;
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffset_0 = 0x4 + (((int) offset_0 << 14) >> 14);
+ *valp = soffset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_encode (uint32 *valp)
+{
+ unsigned offset_0, soffset_0;
+ soffset_0 = *valp;
+ offset_0 = (soffset_0 - 0x4) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_0, imm16_0;
+ imm16_0 = *valp & 0xffff;
+ uimm16x4_0 = (((0xffff) << 16) | imm16_0) << 2;
+ *valp = uimm16x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_encode (uint32 *valp)
+{
+ unsigned imm16_0, uimm16x4_0;
+ uimm16x4_0 = *valp;
+ imm16_0 = (uimm16x4_0 >> 2) & 0xffff;
+ *valp = imm16_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_mx_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mx_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_my_decode (uint32 *valp)
+{
+ *valp += 2;
+ return 0;
+}
+
+static int
+Operand_my_encode (uint32 *valp)
+{
+ int error;
+ error = ((*valp & ~0x3) != 0) || ((*valp & 0x2) == 0);
+ *valp = *valp & 1;
+ return error;
+}
+
+static int
+Operand_mw_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mw_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr0_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr1_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr2_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_mr3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_mr3_encode (uint32 *valp)
+{
+ return (*valp & ~0x3) != 0;
+}
+
+static int
+Operand_immt_decode (uint32 *valp)
+{
+ unsigned immt_0, t_0;
+ t_0 = *valp & 0xf;
+ immt_0 = t_0;
+ *valp = immt_0;
+ return 0;
+}
+
+static int
+Operand_immt_encode (uint32 *valp)
+{
+ unsigned t_0, immt_0;
+ immt_0 = *valp;
+ t_0 = immt_0 & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_imms_decode (uint32 *valp)
+{
+ unsigned imms_0, s_0;
+ s_0 = *valp & 0xf;
+ imms_0 = s_0;
+ *valp = imms_0;
+ return 0;
+}
+
+static int
+Operand_imms_encode (uint32 *valp)
+{
+ unsigned s_0, imms_0;
+ imms_0 = *valp;
+ s_0 = imms_0 & 0xf;
+ *valp = s_0;
+ return 0;
+}
+
+static int
+Operand_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_0, t_0;
+ t_0 = *valp & 0xf;
+ tp7_0 = t_0 + 0x7;
+ *valp = tp7_0;
+ return 0;
+}
+
+static int
+Operand_tp7_encode (uint32 *valp)
+{
+ unsigned t_0, tp7_0;
+ tp7_0 = *valp;
+ t_0 = (tp7_0 - 0x7) & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_0, xt_wbr15_imm_0;
+ xt_wbr15_imm_0 = *valp & 0x7fff;
+ xt_wbr15_label_0 = 0x4 + (((int) xt_wbr15_imm_0 << 17) >> 17);
+ *valp = xt_wbr15_label_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_imm_0, xt_wbr15_label_0;
+ xt_wbr15_label_0 = *valp;
+ xt_wbr15_imm_0 = (xt_wbr15_label_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_imm_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_0, xt_wbr18_imm_0;
+ xt_wbr18_imm_0 = *valp & 0x3ffff;
+ xt_wbr18_label_0 = 0x4 + (((int) xt_wbr18_imm_0 << 14) >> 14);
+ *valp = xt_wbr18_label_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr18_imm_0, xt_wbr18_label_0;
+ xt_wbr18_label_0 = *valp;
+ xt_wbr18_imm_0 = (xt_wbr18_label_0 - 0x4) & 0x3ffff;
+ *valp = xt_wbr18_imm_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffsetx4_encode, Operand_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ Operand_uimm12x8_encode, Operand_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ Operand_simm4_encode, Operand_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_arr_encode, Operand_arr_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_art_encode, Operand_art_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar0_encode, Operand_ar0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar4_encode, Operand_ar4_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar8_encode, Operand_ar8_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar12_encode, Operand_ar12_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_entry_encode, Operand_ars_entry_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ Operand_immrx4_encode, Operand_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ Operand_lsi4x4_encode, Operand_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ Operand_simm7_encode, Operand_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm6_encode, Operand_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ Operand_ai4const_encode, Operand_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ Operand_b4const_encode, Operand_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ Operand_b4constu_encode, Operand_b4constu_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ Operand_uimm8_encode, Operand_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ Operand_uimm8x2_encode, Operand_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ Operand_uimm8x4_encode, Operand_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ Operand_uimm4x16_encode, Operand_uimm4x16_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ Operand_simm8_encode, Operand_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ Operand_simm8x256_encode, Operand_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ Operand_simm12b_encode, Operand_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ Operand_msalp32_encode, Operand_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ Operand_op2p1_encode, Operand_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label8_encode, Operand_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_ulabel8_encode, Operand_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label12_encode, Operand_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffset_encode, Operand_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm16x4_encode, Operand_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "mx", FIELD_x, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ Operand_mx_encode, Operand_mx_decode,
+ 0, 0 },
+ { "my", FIELD_y, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ Operand_my_encode, Operand_my_decode,
+ 0, 0 },
+ { "mw", FIELD_w, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_mw_encode, Operand_mw_decode,
+ 0, 0 },
+ { "mr0", FIELD__mr0, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr0_encode, Operand_mr0_decode,
+ 0, 0 },
+ { "mr1", FIELD__mr1, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr1_encode, Operand_mr1_decode,
+ 0, 0 },
+ { "mr2", FIELD__mr2, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr2_encode, Operand_mr2_decode,
+ 0, 0 },
+ { "mr3", FIELD__mr3, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_mr3_encode, Operand_mr3_decode,
+ 0, 0 },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ Operand_immt_encode, Operand_immt_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ Operand_imms_encode, Operand_imms_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ Operand_tp7_encode, Operand_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_xt_wbr15_label_encode, Operand_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_xt_wbr18_label_encode, Operand_xt_wbr18_label_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi", FIELD_bbi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "s", FIELD_s, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae", FIELD_sae, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas", FIELD_sas, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "r3", FIELD_r3, -1, 0, 0, 0, 0, 0, 0 },
+ { "rbit2", FIELD_rbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "rhi", FIELD_rhi, -1, 0, 0, 0, 0, 0, 0 },
+ { "t3", FIELD_t3, -1, 0, 0, 0, 0, 0, 0 },
+ { "tbit2", FIELD_tbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "tlo", FIELD_tlo, -1, 0, 0, 0, 0, 0, 0 },
+ { "w", FIELD_w, -1, 0, 0, 0, 0, 0, 0 },
+ { "y", FIELD_y, -1, 0, 0, 0, 0, 0, 0 },
+ { "x", FIELD_x, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "bitindex", FIELD_bitindex, -1, 0, 0, 0, 0, 0, 0 },
+ { "s3to1", FIELD_s3to1, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_ulabel8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_mx,
+ OPERAND_my,
+ OPERAND_mw,
+ OPERAND_mr0,
+ OPERAND_mr1,
+ OPERAND_mr2,
+ OPERAND_mr3,
+ OPERAND_immt,
+ OPERAND_imms,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_bbi,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_s,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sae,
+ OPERAND_sal,
+ OPERAND_sargt,
+ OPERAND_sas4,
+ OPERAND_sas,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_r3,
+ OPERAND_rbit2,
+ OPERAND_rhi,
+ OPERAND_t3,
+ OPERAND_tbit2,
+ OPERAND_tlo,
+ OPERAND_w,
+ OPERAND_y,
+ OPERAND_x,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_bitindex,
+ OPERAND_s3to1
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'o' },
+ { { STATE_LITBEN }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'm' },
+ { { STATE_LITBEN }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_176_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_176_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_l_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m0_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m0_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m0_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m1_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m1_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m1_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m2_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m2_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m2_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m3_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m3_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m3_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPC7 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_EPS7 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'm' },
+ { { STATE_EXCVADDR }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'i' },
+ { { STATE_ASID2 }, 'i' },
+ { { STATE_ASID1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'o' },
+ { { STATE_ASID2 }, 'o' },
+ { { STATE_ASID1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'm' },
+ { { STATE_ASID2 }, 'm' },
+ { { STATE_ASID1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'i' },
+ { { STATE_INSTPGSZID5 }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'o' },
+ { { STATE_INSTPGSZID5 }, 'o' },
+ { { STATE_INSTPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'm' },
+ { { STATE_INSTPGSZID5 }, 'm' },
+ { { STATE_INSTPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'i' },
+ { { STATE_DATAPGSZID5 }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'o' },
+ { { STATE_DATAPGSZID5 }, 'o' },
+ { { STATE_DATAPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'm' },
+ { { STATE_DATAPGSZID5 }, 'm' },
+ { { STATE_DATAPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldpte_stateArgs[] = {
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwitlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwdtlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_div_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_READ_IMPWIRE_intfArgs[] = {
+ INTERFACE_IMPWIRE
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 3, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 3, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 3, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 4, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 6, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 2, Iclass_xt_iclass_l32e_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 2, Iclass_xt_iclass_s32e_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 3, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 3, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 3, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 3, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 3, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 3, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_threadptr_args,
+ 1, Iclass_rur_threadptr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_threadptr_args,
+ 1, Iclass_wur_threadptr_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 2, Iclass_xt_iclass_l32r_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 7, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 2, Iclass_xt_iclass_rsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 2, Iclass_xt_iclass_wsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 2, Iclass_xt_iclass_xsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_176_args,
+ 2, Iclass_xt_iclass_rsr_176_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_176_args,
+ 2, Iclass_xt_iclass_wsr_176_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_208_args,
+ 2, Iclass_xt_iclass_rsr_208_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 7, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 7, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 7, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 3, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 3, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 3, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 3, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 3, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 3, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 3, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 3, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 3, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 3, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 3, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 3, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 3, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 3, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 3, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 3, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 3, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 3, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 3, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 3, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 3, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 3, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 3, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 3, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 3, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 3, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 3, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 3, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 3, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 3, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 3, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 3, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 3, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 3, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 3, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 3, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc7_args,
+ 3, Iclass_xt_iclass_rsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc7_args,
+ 3, Iclass_xt_iclass_wsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc7_args,
+ 3, Iclass_xt_iclass_xsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave7_args,
+ 3, Iclass_xt_iclass_rsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave7_args,
+ 3, Iclass_xt_iclass_wsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave7_args,
+ 3, Iclass_xt_iclass_xsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 3, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 3, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 3, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 3, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 3, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 3, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 3, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 3, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 3, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 3, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 3, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 3, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 3, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 3, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 3, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps7_args,
+ 3, Iclass_xt_iclass_rsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps7_args,
+ 3, Iclass_xt_iclass_wsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps7_args,
+ 3, Iclass_xt_iclass_xsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 3, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 3, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 3, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 3, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 3, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 3, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 4, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 3, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 3, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 3, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 3, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 3, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 3, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 3, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 3, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 2, Iclass_xt_iclass_rsr_prid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 3, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 3, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 3, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_aa_args,
+ 1, Iclass_xt_iclass_mac16_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_ad_args,
+ 1, Iclass_xt_iclass_mac16_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_da_args,
+ 1, Iclass_xt_iclass_mac16_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_dd_args,
+ 1, Iclass_xt_iclass_mac16_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_aa_args,
+ 1, Iclass_xt_iclass_mac16a_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_ad_args,
+ 1, Iclass_xt_iclass_mac16a_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_da_args,
+ 1, Iclass_xt_iclass_mac16a_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_dd_args,
+ 1, Iclass_xt_iclass_mac16a_dd_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_da_args,
+ 1, Iclass_xt_iclass_mac16al_da_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_dd_args,
+ 1, Iclass_xt_iclass_mac16al_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_l_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m3_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acclo_args,
+ 1, Iclass_xt_iclass_rsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acclo_args,
+ 1, Iclass_xt_iclass_wsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acclo_args,
+ 1, Iclass_xt_iclass_xsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acchi_args,
+ 1, Iclass_xt_iclass_rsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acchi_args,
+ 1, Iclass_xt_iclass_wsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acchi_args,
+ 1, Iclass_xt_iclass_xsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 21, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 3, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 3, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 4, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 4, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 3, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 3, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 3, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 4, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 4, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 4, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 3, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 4, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 4, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 3, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 3, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 3, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 3, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 4, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 4, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 10, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 3, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 3, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 4, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 4, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 3, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 4, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 4, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 3, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 4, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 4, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 3, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 4, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 4, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_lock_args,
+ 2, Iclass_xt_iclass_icache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 2, Iclass_xt_iclass_icache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 2, Iclass_xt_iclass_licx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 2, Iclass_xt_iclass_sicx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 2, Iclass_xt_iclass_dcache_ind_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 2, Iclass_xt_iclass_dcache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_lock_args,
+ 2, Iclass_xt_iclass_dcache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 2, Iclass_xt_iclass_sdct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 2, Iclass_xt_iclass_ldct_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_wsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_rsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ptevaddr_args,
+ 5, Iclass_xt_iclass_xsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_rasid_args,
+ 5, Iclass_xt_iclass_rsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_rasid_args,
+ 6, Iclass_xt_iclass_wsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_rasid_args,
+ 6, Iclass_xt_iclass_xsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_itlbcfg_args,
+ 5, Iclass_xt_iclass_rsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_itlbcfg_args,
+ 6, Iclass_xt_iclass_wsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_itlbcfg_args,
+ 6, Iclass_xt_iclass_xsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dtlbcfg_args,
+ 5, Iclass_xt_iclass_rsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dtlbcfg_args,
+ 6, Iclass_xt_iclass_wsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dtlbcfg_args,
+ 6, Iclass_xt_iclass_xsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 3, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 2, Iclass_xt_iclass_rdtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 3, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 2, Iclass_xt_iclass_iitlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 2, Iclass_xt_iclass_ritlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 2, Iclass_xt_iclass_witlb_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_ldpte */,
+ 2, Iclass_xt_iclass_ldpte_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwitlba */,
+ 1, Iclass_xt_iclass_hwwitlba_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwdtlba */,
+ 1, Iclass_xt_iclass_hwwdtlba_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_cpenable_args,
+ 3, Iclass_xt_iclass_rsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_cpenable_args,
+ 3, Iclass_xt_iclass_wsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_cpenable_args,
+ 3, Iclass_xt_iclass_xsr_cpenable_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 3, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 4, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 4, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_div_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rer */,
+ 2, Iclass_xt_iclass_rer_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_wer */,
+ 2, Iclass_xt_iclass_wer_stateArgs, 0, 0 },
+ { 1, Iclass_rur_expstate_args,
+ 2, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 2, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_READ_IMPWIRE_args,
+ 1, Iclass_iclass_READ_IMPWIRE_stateArgs, 1, Iclass_iclass_READ_IMPWIRE_intfArgs },
+ { 1, Iclass_iclass_SETB_EXPSTATE_args,
+ 2, Iclass_iclass_SETB_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRB_EXPSTATE_args,
+ 2, Iclass_iclass_CLRB_EXPSTATE_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_WRMSK_EXPSTATE_args,
+ 2, Iclass_iclass_WRMSK_EXPSTATE_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_rur_threadptr,
+ ICLASS_wur_threadptr,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_loop,
+ ICLASS_xt_iclass_loopz,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_lend,
+ ICLASS_xt_iclass_wsr_lend,
+ ICLASS_xt_iclass_xsr_lend,
+ ICLASS_xt_iclass_rsr_lcount,
+ ICLASS_xt_iclass_wsr_lcount,
+ ICLASS_xt_iclass_xsr_lcount,
+ ICLASS_xt_iclass_rsr_lbeg,
+ ICLASS_xt_iclass_wsr_lbeg,
+ ICLASS_xt_iclass_xsr_lbeg,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_litbase,
+ ICLASS_xt_iclass_wsr_litbase,
+ ICLASS_xt_iclass_xsr_litbase,
+ ICLASS_xt_iclass_rsr_176,
+ ICLASS_xt_iclass_wsr_176,
+ ICLASS_xt_iclass_rsr_208,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_epc3,
+ ICLASS_xt_iclass_wsr_epc3,
+ ICLASS_xt_iclass_xsr_epc3,
+ ICLASS_xt_iclass_rsr_excsave3,
+ ICLASS_xt_iclass_wsr_excsave3,
+ ICLASS_xt_iclass_xsr_excsave3,
+ ICLASS_xt_iclass_rsr_epc4,
+ ICLASS_xt_iclass_wsr_epc4,
+ ICLASS_xt_iclass_xsr_epc4,
+ ICLASS_xt_iclass_rsr_excsave4,
+ ICLASS_xt_iclass_wsr_excsave4,
+ ICLASS_xt_iclass_xsr_excsave4,
+ ICLASS_xt_iclass_rsr_epc5,
+ ICLASS_xt_iclass_wsr_epc5,
+ ICLASS_xt_iclass_xsr_epc5,
+ ICLASS_xt_iclass_rsr_excsave5,
+ ICLASS_xt_iclass_wsr_excsave5,
+ ICLASS_xt_iclass_xsr_excsave5,
+ ICLASS_xt_iclass_rsr_epc6,
+ ICLASS_xt_iclass_wsr_epc6,
+ ICLASS_xt_iclass_xsr_epc6,
+ ICLASS_xt_iclass_rsr_excsave6,
+ ICLASS_xt_iclass_wsr_excsave6,
+ ICLASS_xt_iclass_xsr_excsave6,
+ ICLASS_xt_iclass_rsr_epc7,
+ ICLASS_xt_iclass_wsr_epc7,
+ ICLASS_xt_iclass_xsr_epc7,
+ ICLASS_xt_iclass_rsr_excsave7,
+ ICLASS_xt_iclass_wsr_excsave7,
+ ICLASS_xt_iclass_xsr_excsave7,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_eps3,
+ ICLASS_xt_iclass_wsr_eps3,
+ ICLASS_xt_iclass_xsr_eps3,
+ ICLASS_xt_iclass_rsr_eps4,
+ ICLASS_xt_iclass_wsr_eps4,
+ ICLASS_xt_iclass_xsr_eps4,
+ ICLASS_xt_iclass_rsr_eps5,
+ ICLASS_xt_iclass_wsr_eps5,
+ ICLASS_xt_iclass_xsr_eps5,
+ ICLASS_xt_iclass_rsr_eps6,
+ ICLASS_xt_iclass_wsr_eps6,
+ ICLASS_xt_iclass_xsr_eps6,
+ ICLASS_xt_iclass_rsr_eps7,
+ ICLASS_xt_iclass_wsr_eps7,
+ ICLASS_xt_iclass_xsr_eps7,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_misc0,
+ ICLASS_xt_iclass_wsr_misc0,
+ ICLASS_xt_iclass_xsr_misc0,
+ ICLASS_xt_iclass_rsr_misc1,
+ ICLASS_xt_iclass_wsr_misc1,
+ ICLASS_xt_iclass_xsr_misc1,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_mul16,
+ ICLASS_xt_mul32,
+ ICLASS_xt_iclass_mac16_aa,
+ ICLASS_xt_iclass_mac16_ad,
+ ICLASS_xt_iclass_mac16_da,
+ ICLASS_xt_iclass_mac16_dd,
+ ICLASS_xt_iclass_mac16a_aa,
+ ICLASS_xt_iclass_mac16a_ad,
+ ICLASS_xt_iclass_mac16a_da,
+ ICLASS_xt_iclass_mac16a_dd,
+ ICLASS_xt_iclass_mac16al_da,
+ ICLASS_xt_iclass_mac16al_dd,
+ ICLASS_xt_iclass_mac16_l,
+ ICLASS_xt_iclass_rsr_m0,
+ ICLASS_xt_iclass_wsr_m0,
+ ICLASS_xt_iclass_xsr_m0,
+ ICLASS_xt_iclass_rsr_m1,
+ ICLASS_xt_iclass_wsr_m1,
+ ICLASS_xt_iclass_xsr_m1,
+ ICLASS_xt_iclass_rsr_m2,
+ ICLASS_xt_iclass_wsr_m2,
+ ICLASS_xt_iclass_xsr_m2,
+ ICLASS_xt_iclass_rsr_m3,
+ ICLASS_xt_iclass_wsr_m3,
+ ICLASS_xt_iclass_xsr_m3,
+ ICLASS_xt_iclass_rsr_acclo,
+ ICLASS_xt_iclass_wsr_acclo,
+ ICLASS_xt_iclass_xsr_acclo,
+ ICLASS_xt_iclass_rsr_acchi,
+ ICLASS_xt_iclass_wsr_acchi,
+ ICLASS_xt_iclass_xsr_acchi,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_dbreaka0,
+ ICLASS_xt_iclass_wsr_dbreaka0,
+ ICLASS_xt_iclass_xsr_dbreaka0,
+ ICLASS_xt_iclass_rsr_dbreakc0,
+ ICLASS_xt_iclass_wsr_dbreakc0,
+ ICLASS_xt_iclass_xsr_dbreakc0,
+ ICLASS_xt_iclass_rsr_dbreaka1,
+ ICLASS_xt_iclass_wsr_dbreaka1,
+ ICLASS_xt_iclass_xsr_dbreaka1,
+ ICLASS_xt_iclass_rsr_dbreakc1,
+ ICLASS_xt_iclass_wsr_dbreakc1,
+ ICLASS_xt_iclass_xsr_dbreakc1,
+ ICLASS_xt_iclass_rsr_ibreaka0,
+ ICLASS_xt_iclass_wsr_ibreaka0,
+ ICLASS_xt_iclass_xsr_ibreaka0,
+ ICLASS_xt_iclass_rsr_ibreaka1,
+ ICLASS_xt_iclass_wsr_ibreaka1,
+ ICLASS_xt_iclass_xsr_ibreaka1,
+ ICLASS_xt_iclass_rsr_ibreakenable,
+ ICLASS_xt_iclass_wsr_ibreakenable,
+ ICLASS_xt_iclass_xsr_ibreakenable,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_wsr_mmid,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_rsr_ccompare2,
+ ICLASS_xt_iclass_wsr_ccompare2,
+ ICLASS_xt_iclass_xsr_ccompare2,
+ ICLASS_xt_iclass_icache,
+ ICLASS_xt_iclass_icache_lock,
+ ICLASS_xt_iclass_icache_inv,
+ ICLASS_xt_iclass_licx,
+ ICLASS_xt_iclass_sicx,
+ ICLASS_xt_iclass_dcache,
+ ICLASS_xt_iclass_dcache_ind,
+ ICLASS_xt_iclass_dcache_inv,
+ ICLASS_xt_iclass_dpf,
+ ICLASS_xt_iclass_dcache_lock,
+ ICLASS_xt_iclass_sdct,
+ ICLASS_xt_iclass_ldct,
+ ICLASS_xt_iclass_wsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_ptevaddr,
+ ICLASS_xt_iclass_xsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_rasid,
+ ICLASS_xt_iclass_wsr_rasid,
+ ICLASS_xt_iclass_xsr_rasid,
+ ICLASS_xt_iclass_rsr_itlbcfg,
+ ICLASS_xt_iclass_wsr_itlbcfg,
+ ICLASS_xt_iclass_xsr_itlbcfg,
+ ICLASS_xt_iclass_rsr_dtlbcfg,
+ ICLASS_xt_iclass_wsr_dtlbcfg,
+ ICLASS_xt_iclass_xsr_dtlbcfg,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_ldpte,
+ ICLASS_xt_iclass_hwwitlba,
+ ICLASS_xt_iclass_hwwdtlba,
+ ICLASS_xt_iclass_rsr_cpenable,
+ ICLASS_xt_iclass_wsr_cpenable,
+ ICLASS_xt_iclass_xsr_cpenable,
+ ICLASS_xt_iclass_clamp,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_div,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_expstate,
+ ICLASS_wur_expstate,
+ ICLASS_iclass_READ_IMPWIRE,
+ ICLASS_iclass_SETB_EXPSTATE,
+ ICLASS_iclass_CLRB_EXPSTATE,
+ ICLASS_iclass_WRMSK_EXPSTATE
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_rur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e70;
+}
+
+static void
+Opcode_wur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e700;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8076;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9076;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa076;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30100;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130100;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610100;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130200;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610200;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130000;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30500;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130500;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610500;
+}
+
+static void
+Opcode_rsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_wsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b000;
+}
+
+static void
+Opcode_rsr_208_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b300;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b300;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b300;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d300;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d300;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d300;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b400;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b400;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b400;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d400;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d400;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b500;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b500;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b500;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d500;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d500;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b600;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b600;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b600;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d600;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d600;
+}
+
+static void
+Opcode_rsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b700;
+}
+
+static void
+Opcode_wsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b700;
+}
+
+static void
+Opcode_xsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b700;
+}
+
+static void
+Opcode_rsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_wsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d700;
+}
+
+static void
+Opcode_xsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d700;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c300;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c300;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c300;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c400;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c400;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c400;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c500;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c500;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c500;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c600;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c600;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c600;
+}
+
+static void
+Opcode_rsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c700;
+}
+
+static void
+Opcode_wsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c700;
+}
+
+static void
+Opcode_xsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c700;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f400;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f400;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f400;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f500;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f500;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f500;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820000;
+}
+
+static void
+Opcode_mul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x740004;
+}
+
+static void
+Opcode_mul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x750004;
+}
+
+static void
+Opcode_mul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x760004;
+}
+
+static void
+Opcode_mul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x770004;
+}
+
+static void
+Opcode_umul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700004;
+}
+
+static void
+Opcode_umul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x710004;
+}
+
+static void
+Opcode_umul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x720004;
+}
+
+static void
+Opcode_umul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730004;
+}
+
+static void
+Opcode_mul_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340004;
+}
+
+static void
+Opcode_mul_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x350004;
+}
+
+static void
+Opcode_mul_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x360004;
+}
+
+static void
+Opcode_mul_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370004;
+}
+
+static void
+Opcode_mul_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x640004;
+}
+
+static void
+Opcode_mul_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x650004;
+}
+
+static void
+Opcode_mul_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x660004;
+}
+
+static void
+Opcode_mul_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x670004;
+}
+
+static void
+Opcode_mul_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240004;
+}
+
+static void
+Opcode_mul_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x250004;
+}
+
+static void
+Opcode_mul_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x260004;
+}
+
+static void
+Opcode_mul_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270004;
+}
+
+static void
+Opcode_mula_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780004;
+}
+
+static void
+Opcode_mula_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x790004;
+}
+
+static void
+Opcode_mula_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7a0004;
+}
+
+static void
+Opcode_mula_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b0004;
+}
+
+static void
+Opcode_muls_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c0004;
+}
+
+static void
+Opcode_muls_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7d0004;
+}
+
+static void
+Opcode_muls_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7e0004;
+}
+
+static void
+Opcode_muls_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f0004;
+}
+
+static void
+Opcode_mula_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380004;
+}
+
+static void
+Opcode_mula_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x390004;
+}
+
+static void
+Opcode_mula_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a0004;
+}
+
+static void
+Opcode_mula_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b0004;
+}
+
+static void
+Opcode_muls_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0004;
+}
+
+static void
+Opcode_muls_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d0004;
+}
+
+static void
+Opcode_muls_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e0004;
+}
+
+static void
+Opcode_muls_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f0004;
+}
+
+static void
+Opcode_mula_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680004;
+}
+
+static void
+Opcode_mula_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x690004;
+}
+
+static void
+Opcode_mula_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0004;
+}
+
+static void
+Opcode_mula_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0004;
+}
+
+static void
+Opcode_muls_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0004;
+}
+
+static void
+Opcode_muls_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0004;
+}
+
+static void
+Opcode_muls_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0004;
+}
+
+static void
+Opcode_muls_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0004;
+}
+
+static void
+Opcode_mula_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280004;
+}
+
+static void
+Opcode_mula_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x290004;
+}
+
+static void
+Opcode_mula_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a0004;
+}
+
+static void
+Opcode_mula_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b0004;
+}
+
+static void
+Opcode_muls_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0004;
+}
+
+static void
+Opcode_muls_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0004;
+}
+
+static void
+Opcode_muls_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0004;
+}
+
+static void
+Opcode_muls_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0004;
+}
+
+static void
+Opcode_mula_da_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580004;
+}
+
+static void
+Opcode_mula_da_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480004;
+}
+
+static void
+Opcode_mula_da_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590004;
+}
+
+static void
+Opcode_mula_da_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490004;
+}
+
+static void
+Opcode_mula_da_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a0004;
+}
+
+static void
+Opcode_mula_da_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0004;
+}
+
+static void
+Opcode_mula_da_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b0004;
+}
+
+static void
+Opcode_mula_da_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0004;
+}
+
+static void
+Opcode_mula_dd_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180004;
+}
+
+static void
+Opcode_mula_dd_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80004;
+}
+
+static void
+Opcode_mula_dd_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x190004;
+}
+
+static void
+Opcode_mula_dd_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90004;
+}
+
+static void
+Opcode_mula_dd_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0004;
+}
+
+static void
+Opcode_mula_dd_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0004;
+}
+
+static void
+Opcode_mula_dd_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0004;
+}
+
+static void
+Opcode_mula_dd_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb0004;
+}
+
+static void
+Opcode_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900004;
+}
+
+static void
+Opcode_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800004;
+}
+
+static void
+Opcode_rsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32000;
+}
+
+static void
+Opcode_wsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132000;
+}
+
+static void
+Opcode_xsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612000;
+}
+
+static void
+Opcode_rsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32100;
+}
+
+static void
+Opcode_wsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132100;
+}
+
+static void
+Opcode_xsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612100;
+}
+
+static void
+Opcode_rsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32200;
+}
+
+static void
+Opcode_wsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132200;
+}
+
+static void
+Opcode_xsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612200;
+}
+
+static void
+Opcode_rsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32300;
+}
+
+static void
+Opcode_wsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132300;
+}
+
+static void
+Opcode_xsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612300;
+}
+
+static void
+Opcode_rsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31000;
+}
+
+static void
+Opcode_wsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131000;
+}
+
+static void
+Opcode_xsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611000;
+}
+
+static void
+Opcode_rsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31100;
+}
+
+static void
+Opcode_wsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131100;
+}
+
+static void
+Opcode_xsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611100;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39000;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139000;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619000;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a000;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a000;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a000;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39100;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139100;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619100;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a100;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a100;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a100;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138000;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618000;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138100;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618100;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136000;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616000;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135900;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f200;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f200;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f200;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70c2;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e2;
+}
+
+static void
+Opcode_ipfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70d2;
+}
+
+static void
+Opcode_ihu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270d2;
+}
+
+static void
+Opcode_iiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370d2;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f2;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf10000;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf12000;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11000;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13000;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7042;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7052;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47082;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x57082;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7062;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7072;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7002;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7012;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7022;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7032;
+}
+
+static void
+Opcode_dpfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7082;
+}
+
+static void
+Opcode_dhu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x27082;
+}
+
+static void
+Opcode_diu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x37082;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf19000;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf18000;
+}
+
+static void
+Opcode_wsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135300;
+}
+
+static void
+Opcode_rsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35300;
+}
+
+static void
+Opcode_xsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615300;
+}
+
+static void
+Opcode_rsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35a00;
+}
+
+static void
+Opcode_wsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135a00;
+}
+
+static void
+Opcode_xsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615a00;
+}
+
+static void
+Opcode_rsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35b00;
+}
+
+static void
+Opcode_wsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135b00;
+}
+
+static void
+Opcode_xsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615b00;
+}
+
+static void
+Opcode_rsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35c00;
+}
+
+static void
+Opcode_wsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135c00;
+}
+
+static void
+Opcode_xsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615c00;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_ldpte_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1f000;
+}
+
+static void
+Opcode_hwwitlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501000;
+}
+
+static void
+Opcode_hwwdtlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x509000;
+}
+
+static void
+Opcode_rsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e000;
+}
+
+static void
+Opcode_wsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e000;
+}
+
+static void
+Opcode_xsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e000;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36300;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136300;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616300;
+}
+
+static void
+Opcode_quou_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc20000;
+}
+
+static void
+Opcode_quos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20000;
+}
+
+static void
+Opcode_remu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe20000;
+}
+
+static void
+Opcode_rems_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf20000;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406000;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407000;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e60;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e600;
+}
+
+static void
+Opcode_read_impwire_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0000;
+}
+
+static void
+Opcode_setb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1000;
+}
+
+static void
+Opcode_clrb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1200;
+}
+
+static void
+Opcode_wrmsk_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe2000;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_threadptr_encode_fns[] = {
+ Opcode_rur_threadptr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_threadptr_encode_fns[] = {
+ Opcode_wur_threadptr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_176_encode_fns[] = {
+ Opcode_rsr_176_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_176_encode_fns[] = {
+ Opcode_wsr_176_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_208_encode_fns[] = {
+ Opcode_rsr_208_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc7_encode_fns[] = {
+ Opcode_rsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc7_encode_fns[] = {
+ Opcode_wsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc7_encode_fns[] = {
+ Opcode_xsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave7_encode_fns[] = {
+ Opcode_rsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave7_encode_fns[] = {
+ Opcode_wsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave7_encode_fns[] = {
+ Opcode_xsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps7_encode_fns[] = {
+ Opcode_rsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps7_encode_fns[] = {
+ Opcode_wsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps7_encode_fns[] = {
+ Opcode_xsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_ll_encode_fns[] = {
+ Opcode_mul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hl_encode_fns[] = {
+ Opcode_mul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_lh_encode_fns[] = {
+ Opcode_mul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hh_encode_fns[] = {
+ Opcode_mul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_ll_encode_fns[] = {
+ Opcode_umul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hl_encode_fns[] = {
+ Opcode_umul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_lh_encode_fns[] = {
+ Opcode_umul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hh_encode_fns[] = {
+ Opcode_umul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_ll_encode_fns[] = {
+ Opcode_mul_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hl_encode_fns[] = {
+ Opcode_mul_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_lh_encode_fns[] = {
+ Opcode_mul_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hh_encode_fns[] = {
+ Opcode_mul_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_ll_encode_fns[] = {
+ Opcode_mul_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hl_encode_fns[] = {
+ Opcode_mul_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_lh_encode_fns[] = {
+ Opcode_mul_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hh_encode_fns[] = {
+ Opcode_mul_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_ll_encode_fns[] = {
+ Opcode_mul_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hl_encode_fns[] = {
+ Opcode_mul_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_lh_encode_fns[] = {
+ Opcode_mul_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hh_encode_fns[] = {
+ Opcode_mul_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_ll_encode_fns[] = {
+ Opcode_mula_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hl_encode_fns[] = {
+ Opcode_mula_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_lh_encode_fns[] = {
+ Opcode_mula_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hh_encode_fns[] = {
+ Opcode_mula_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_ll_encode_fns[] = {
+ Opcode_muls_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hl_encode_fns[] = {
+ Opcode_muls_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_lh_encode_fns[] = {
+ Opcode_muls_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hh_encode_fns[] = {
+ Opcode_muls_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_ll_encode_fns[] = {
+ Opcode_mula_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hl_encode_fns[] = {
+ Opcode_mula_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_lh_encode_fns[] = {
+ Opcode_mula_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hh_encode_fns[] = {
+ Opcode_mula_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_ll_encode_fns[] = {
+ Opcode_muls_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hl_encode_fns[] = {
+ Opcode_muls_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_lh_encode_fns[] = {
+ Opcode_muls_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hh_encode_fns[] = {
+ Opcode_muls_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_encode_fns[] = {
+ Opcode_mula_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_encode_fns[] = {
+ Opcode_mula_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_encode_fns[] = {
+ Opcode_mula_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_encode_fns[] = {
+ Opcode_mula_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_ll_encode_fns[] = {
+ Opcode_muls_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hl_encode_fns[] = {
+ Opcode_muls_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_lh_encode_fns[] = {
+ Opcode_muls_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hh_encode_fns[] = {
+ Opcode_muls_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_encode_fns[] = {
+ Opcode_mula_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_encode_fns[] = {
+ Opcode_mula_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_encode_fns[] = {
+ Opcode_mula_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_encode_fns[] = {
+ Opcode_mula_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_ll_encode_fns[] = {
+ Opcode_muls_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hl_encode_fns[] = {
+ Opcode_muls_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_lh_encode_fns[] = {
+ Opcode_muls_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hh_encode_fns[] = {
+ Opcode_muls_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_lddec_encode_fns[] = {
+ Opcode_mula_da_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_ldinc_encode_fns[] = {
+ Opcode_mula_da_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_lddec_encode_fns[] = {
+ Opcode_mula_da_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_ldinc_encode_fns[] = {
+ Opcode_mula_da_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_lddec_encode_fns[] = {
+ Opcode_mula_da_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_ldinc_encode_fns[] = {
+ Opcode_mula_da_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_lddec_encode_fns[] = {
+ Opcode_mula_da_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_ldinc_encode_fns[] = {
+ Opcode_mula_da_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_lddec_encode_fns[] = {
+ Opcode_mula_dd_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_ldinc_encode_fns[] = {
+ Opcode_mula_dd_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_lddec_encode_fns[] = {
+ Opcode_mula_dd_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_lddec_encode_fns[] = {
+ Opcode_mula_dd_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_lddec_encode_fns[] = {
+ Opcode_mula_dd_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddec_encode_fns[] = {
+ Opcode_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldinc_encode_fns[] = {
+ Opcode_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m0_encode_fns[] = {
+ Opcode_rsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m0_encode_fns[] = {
+ Opcode_wsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m0_encode_fns[] = {
+ Opcode_xsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m1_encode_fns[] = {
+ Opcode_rsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m1_encode_fns[] = {
+ Opcode_wsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m1_encode_fns[] = {
+ Opcode_xsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m2_encode_fns[] = {
+ Opcode_rsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m2_encode_fns[] = {
+ Opcode_wsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m2_encode_fns[] = {
+ Opcode_xsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m3_encode_fns[] = {
+ Opcode_rsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m3_encode_fns[] = {
+ Opcode_wsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m3_encode_fns[] = {
+ Opcode_xsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acclo_encode_fns[] = {
+ Opcode_rsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acclo_encode_fns[] = {
+ Opcode_wsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acclo_encode_fns[] = {
+ Opcode_xsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acchi_encode_fns[] = {
+ Opcode_rsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acchi_encode_fns[] = {
+ Opcode_wsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acchi_encode_fns[] = {
+ Opcode_xsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipfl_encode_fns[] = {
+ Opcode_ipfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihu_encode_fns[] = {
+ Opcode_ihu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iiu_encode_fns[] = {
+ Opcode_iiu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfl_encode_fns[] = {
+ Opcode_dpfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhu_encode_fns[] = {
+ Opcode_dhu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diu_encode_fns[] = {
+ Opcode_diu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ptevaddr_encode_fns[] = {
+ Opcode_wsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ptevaddr_encode_fns[] = {
+ Opcode_rsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ptevaddr_encode_fns[] = {
+ Opcode_xsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_rasid_encode_fns[] = {
+ Opcode_rsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_rasid_encode_fns[] = {
+ Opcode_wsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_rasid_encode_fns[] = {
+ Opcode_xsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_itlbcfg_encode_fns[] = {
+ Opcode_rsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_itlbcfg_encode_fns[] = {
+ Opcode_wsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_itlbcfg_encode_fns[] = {
+ Opcode_xsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dtlbcfg_encode_fns[] = {
+ Opcode_rsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dtlbcfg_encode_fns[] = {
+ Opcode_wsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dtlbcfg_encode_fns[] = {
+ Opcode_xsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldpte_encode_fns[] = {
+ Opcode_ldpte_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwitlba_encode_fns[] = {
+ Opcode_hwwitlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwdtlba_encode_fns[] = {
+ Opcode_hwwdtlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_cpenable_encode_fns[] = {
+ Opcode_rsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_cpenable_encode_fns[] = {
+ Opcode_wsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_cpenable_encode_fns[] = {
+ Opcode_xsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quou_encode_fns[] = {
+ Opcode_quou_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quos_encode_fns[] = {
+ Opcode_quos_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_remu_encode_fns[] = {
+ Opcode_remu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rems_encode_fns[] = {
+ Opcode_rems_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_read_impwire_encode_fns[] = {
+ Opcode_read_impwire_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_setb_expstate_encode_fns[] = {
+ Opcode_setb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrb_expstate_encode_fns[] = {
+ Opcode_clrb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrmsk_expstate_encode_fns[] = {
+ Opcode_wrmsk_expstate_Slot_inst_encode, 0, 0
+};
+
+
+/* Opcode table. */
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "rur.threadptr", ICLASS_rur_threadptr,
+ 0,
+ Opcode_rur_threadptr_encode_fns, 0, 0 },
+ { "wur.threadptr", ICLASS_wur_threadptr,
+ 0,
+ Opcode_wur_threadptr_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", ICLASS_xt_iclass_loop,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", ICLASS_xt_iclass_rsr_lend,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", ICLASS_xt_iclass_wsr_lend,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", ICLASS_xt_iclass_xsr_lend,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", ICLASS_xt_iclass_rsr_lcount,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", ICLASS_xt_iclass_wsr_lcount,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", ICLASS_xt_iclass_xsr_lcount,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", ICLASS_xt_iclass_rsr_lbeg,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", ICLASS_xt_iclass_wsr_lbeg,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", ICLASS_xt_iclass_xsr_lbeg,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.176", ICLASS_xt_iclass_rsr_176,
+ 0,
+ Opcode_rsr_176_encode_fns, 0, 0 },
+ { "wsr.176", ICLASS_xt_iclass_wsr_176,
+ 0,
+ Opcode_wsr_176_encode_fns, 0, 0 },
+ { "rsr.208", ICLASS_xt_iclass_rsr_208,
+ 0,
+ Opcode_rsr_208_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", ICLASS_xt_iclass_rsr_epc4,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", ICLASS_xt_iclass_wsr_epc4,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", ICLASS_xt_iclass_xsr_epc4,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", ICLASS_xt_iclass_rsr_excsave4,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", ICLASS_xt_iclass_wsr_excsave4,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", ICLASS_xt_iclass_xsr_excsave4,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", ICLASS_xt_iclass_rsr_epc5,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", ICLASS_xt_iclass_wsr_epc5,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", ICLASS_xt_iclass_xsr_epc5,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", ICLASS_xt_iclass_rsr_excsave5,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", ICLASS_xt_iclass_wsr_excsave5,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", ICLASS_xt_iclass_xsr_excsave5,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", ICLASS_xt_iclass_rsr_epc6,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", ICLASS_xt_iclass_wsr_epc6,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", ICLASS_xt_iclass_xsr_epc6,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", ICLASS_xt_iclass_rsr_excsave6,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", ICLASS_xt_iclass_wsr_excsave6,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", ICLASS_xt_iclass_xsr_excsave6,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.epc7", ICLASS_xt_iclass_rsr_epc7,
+ 0,
+ Opcode_rsr_epc7_encode_fns, 0, 0 },
+ { "wsr.epc7", ICLASS_xt_iclass_wsr_epc7,
+ 0,
+ Opcode_wsr_epc7_encode_fns, 0, 0 },
+ { "xsr.epc7", ICLASS_xt_iclass_xsr_epc7,
+ 0,
+ Opcode_xsr_epc7_encode_fns, 0, 0 },
+ { "rsr.excsave7", ICLASS_xt_iclass_rsr_excsave7,
+ 0,
+ Opcode_rsr_excsave7_encode_fns, 0, 0 },
+ { "wsr.excsave7", ICLASS_xt_iclass_wsr_excsave7,
+ 0,
+ Opcode_wsr_excsave7_encode_fns, 0, 0 },
+ { "xsr.excsave7", ICLASS_xt_iclass_xsr_excsave7,
+ 0,
+ Opcode_xsr_excsave7_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", ICLASS_xt_iclass_rsr_eps4,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", ICLASS_xt_iclass_wsr_eps4,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", ICLASS_xt_iclass_xsr_eps4,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", ICLASS_xt_iclass_rsr_eps5,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", ICLASS_xt_iclass_wsr_eps5,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", ICLASS_xt_iclass_xsr_eps5,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", ICLASS_xt_iclass_rsr_eps6,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", ICLASS_xt_iclass_wsr_eps6,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", ICLASS_xt_iclass_xsr_eps6,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.eps7", ICLASS_xt_iclass_rsr_eps7,
+ 0,
+ Opcode_rsr_eps7_encode_fns, 0, 0 },
+ { "wsr.eps7", ICLASS_xt_iclass_wsr_eps7,
+ 0,
+ Opcode_wsr_eps7_encode_fns, 0, 0 },
+ { "xsr.eps7", ICLASS_xt_iclass_xsr_eps7,
+ 0,
+ Opcode_xsr_eps7_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", ICLASS_xt_iclass_rsr_misc0,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", ICLASS_xt_iclass_wsr_misc0,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", ICLASS_xt_iclass_xsr_misc0,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", ICLASS_xt_iclass_rsr_misc1,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", ICLASS_xt_iclass_wsr_misc1,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", ICLASS_xt_iclass_xsr_misc1,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "mull", ICLASS_xt_mul32,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "mul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_ll_encode_fns, 0, 0 },
+ { "mul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hl_encode_fns, 0, 0 },
+ { "mul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_lh_encode_fns, 0, 0 },
+ { "mul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hh_encode_fns, 0, 0 },
+ { "umul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_ll_encode_fns, 0, 0 },
+ { "umul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hl_encode_fns, 0, 0 },
+ { "umul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_lh_encode_fns, 0, 0 },
+ { "umul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hh_encode_fns, 0, 0 },
+ { "mul.ad.ll", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_ll_encode_fns, 0, 0 },
+ { "mul.ad.hl", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hl_encode_fns, 0, 0 },
+ { "mul.ad.lh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_lh_encode_fns, 0, 0 },
+ { "mul.ad.hh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hh_encode_fns, 0, 0 },
+ { "mul.da.ll", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_ll_encode_fns, 0, 0 },
+ { "mul.da.hl", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hl_encode_fns, 0, 0 },
+ { "mul.da.lh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_lh_encode_fns, 0, 0 },
+ { "mul.da.hh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hh_encode_fns, 0, 0 },
+ { "mul.dd.ll", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_ll_encode_fns, 0, 0 },
+ { "mul.dd.hl", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hl_encode_fns, 0, 0 },
+ { "mul.dd.lh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_lh_encode_fns, 0, 0 },
+ { "mul.dd.hh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hh_encode_fns, 0, 0 },
+ { "mula.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_ll_encode_fns, 0, 0 },
+ { "mula.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hl_encode_fns, 0, 0 },
+ { "mula.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_lh_encode_fns, 0, 0 },
+ { "mula.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hh_encode_fns, 0, 0 },
+ { "muls.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_ll_encode_fns, 0, 0 },
+ { "muls.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hl_encode_fns, 0, 0 },
+ { "muls.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_lh_encode_fns, 0, 0 },
+ { "muls.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hh_encode_fns, 0, 0 },
+ { "mula.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_ll_encode_fns, 0, 0 },
+ { "mula.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hl_encode_fns, 0, 0 },
+ { "mula.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_lh_encode_fns, 0, 0 },
+ { "mula.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hh_encode_fns, 0, 0 },
+ { "muls.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_ll_encode_fns, 0, 0 },
+ { "muls.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hl_encode_fns, 0, 0 },
+ { "muls.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_lh_encode_fns, 0, 0 },
+ { "muls.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hh_encode_fns, 0, 0 },
+ { "mula.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_ll_encode_fns, 0, 0 },
+ { "mula.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hl_encode_fns, 0, 0 },
+ { "mula.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_lh_encode_fns, 0, 0 },
+ { "mula.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hh_encode_fns, 0, 0 },
+ { "muls.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_ll_encode_fns, 0, 0 },
+ { "muls.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hl_encode_fns, 0, 0 },
+ { "muls.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_lh_encode_fns, 0, 0 },
+ { "muls.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hh_encode_fns, 0, 0 },
+ { "mula.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_ll_encode_fns, 0, 0 },
+ { "mula.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hl_encode_fns, 0, 0 },
+ { "mula.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_lh_encode_fns, 0, 0 },
+ { "mula.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hh_encode_fns, 0, 0 },
+ { "muls.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_ll_encode_fns, 0, 0 },
+ { "muls.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hl_encode_fns, 0, 0 },
+ { "muls.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_lh_encode_fns, 0, 0 },
+ { "muls.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hh_encode_fns, 0, 0 },
+ { "mula.da.ll.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_lddec_encode_fns, 0, 0 },
+ { "mula.da.ll.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hl.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_lddec_encode_fns, 0, 0 },
+ { "mula.da.hl.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.da.lh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_lddec_encode_fns, 0, 0 },
+ { "mula.da.lh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_lddec_encode_fns, 0, 0 },
+ { "mula.da.hh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.ll.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_lddec_encode_fns, 0, 0 },
+ { "mula.dd.ll.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hl.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hl.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.lh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.lh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_ldinc_encode_fns, 0, 0 },
+ { "lddec", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_lddec_encode_fns, 0, 0 },
+ { "ldinc", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_ldinc_encode_fns, 0, 0 },
+ { "rsr.m0", ICLASS_xt_iclass_rsr_m0,
+ 0,
+ Opcode_rsr_m0_encode_fns, 0, 0 },
+ { "wsr.m0", ICLASS_xt_iclass_wsr_m0,
+ 0,
+ Opcode_wsr_m0_encode_fns, 0, 0 },
+ { "xsr.m0", ICLASS_xt_iclass_xsr_m0,
+ 0,
+ Opcode_xsr_m0_encode_fns, 0, 0 },
+ { "rsr.m1", ICLASS_xt_iclass_rsr_m1,
+ 0,
+ Opcode_rsr_m1_encode_fns, 0, 0 },
+ { "wsr.m1", ICLASS_xt_iclass_wsr_m1,
+ 0,
+ Opcode_wsr_m1_encode_fns, 0, 0 },
+ { "xsr.m1", ICLASS_xt_iclass_xsr_m1,
+ 0,
+ Opcode_xsr_m1_encode_fns, 0, 0 },
+ { "rsr.m2", ICLASS_xt_iclass_rsr_m2,
+ 0,
+ Opcode_rsr_m2_encode_fns, 0, 0 },
+ { "wsr.m2", ICLASS_xt_iclass_wsr_m2,
+ 0,
+ Opcode_wsr_m2_encode_fns, 0, 0 },
+ { "xsr.m2", ICLASS_xt_iclass_xsr_m2,
+ 0,
+ Opcode_xsr_m2_encode_fns, 0, 0 },
+ { "rsr.m3", ICLASS_xt_iclass_rsr_m3,
+ 0,
+ Opcode_rsr_m3_encode_fns, 0, 0 },
+ { "wsr.m3", ICLASS_xt_iclass_wsr_m3,
+ 0,
+ Opcode_wsr_m3_encode_fns, 0, 0 },
+ { "xsr.m3", ICLASS_xt_iclass_xsr_m3,
+ 0,
+ Opcode_xsr_m3_encode_fns, 0, 0 },
+ { "rsr.acclo", ICLASS_xt_iclass_rsr_acclo,
+ 0,
+ Opcode_rsr_acclo_encode_fns, 0, 0 },
+ { "wsr.acclo", ICLASS_xt_iclass_wsr_acclo,
+ 0,
+ Opcode_wsr_acclo_encode_fns, 0, 0 },
+ { "xsr.acclo", ICLASS_xt_iclass_xsr_acclo,
+ 0,
+ Opcode_xsr_acclo_encode_fns, 0, 0 },
+ { "rsr.acchi", ICLASS_xt_iclass_rsr_acchi,
+ 0,
+ Opcode_rsr_acchi_encode_fns, 0, 0 },
+ { "wsr.acchi", ICLASS_xt_iclass_wsr_acchi,
+ 0,
+ Opcode_wsr_acchi_encode_fns, 0, 0 },
+ { "xsr.acchi", ICLASS_xt_iclass_xsr_acchi,
+ 0,
+ Opcode_xsr_acchi_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", ICLASS_xt_iclass_rsr_dbreaka1,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", ICLASS_xt_iclass_wsr_dbreaka1,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", ICLASS_xt_iclass_xsr_dbreaka1,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", ICLASS_xt_iclass_rsr_dbreakc1,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", ICLASS_xt_iclass_wsr_dbreakc1,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", ICLASS_xt_iclass_xsr_dbreakc1,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", ICLASS_xt_iclass_rsr_ibreaka1,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", ICLASS_xt_iclass_wsr_ibreaka1,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", ICLASS_xt_iclass_xsr_ibreaka1,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", ICLASS_xt_iclass_rsr_ccompare2,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", ICLASS_xt_iclass_wsr_ccompare2,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", ICLASS_xt_iclass_xsr_ccompare2,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "ipf", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "ipfl", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ipfl_encode_fns, 0, 0 },
+ { "ihu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ihu_encode_fns, 0, 0 },
+ { "iiu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_iiu_encode_fns, 0, 0 },
+ { "iii", ICLASS_xt_iclass_icache_inv,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwb", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "dpfl", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dpfl_encode_fns, 0, 0 },
+ { "dhu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dhu_encode_fns, 0, 0 },
+ { "diu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_diu_encode_fns, 0, 0 },
+ { "sdct", ICLASS_xt_iclass_sdct,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", ICLASS_xt_iclass_ldct,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "wsr.ptevaddr", ICLASS_xt_iclass_wsr_ptevaddr,
+ 0,
+ Opcode_wsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.ptevaddr", ICLASS_xt_iclass_rsr_ptevaddr,
+ 0,
+ Opcode_rsr_ptevaddr_encode_fns, 0, 0 },
+ { "xsr.ptevaddr", ICLASS_xt_iclass_xsr_ptevaddr,
+ 0,
+ Opcode_xsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.rasid", ICLASS_xt_iclass_rsr_rasid,
+ 0,
+ Opcode_rsr_rasid_encode_fns, 0, 0 },
+ { "wsr.rasid", ICLASS_xt_iclass_wsr_rasid,
+ 0,
+ Opcode_wsr_rasid_encode_fns, 0, 0 },
+ { "xsr.rasid", ICLASS_xt_iclass_xsr_rasid,
+ 0,
+ Opcode_xsr_rasid_encode_fns, 0, 0 },
+ { "rsr.itlbcfg", ICLASS_xt_iclass_rsr_itlbcfg,
+ 0,
+ Opcode_rsr_itlbcfg_encode_fns, 0, 0 },
+ { "wsr.itlbcfg", ICLASS_xt_iclass_wsr_itlbcfg,
+ 0,
+ Opcode_wsr_itlbcfg_encode_fns, 0, 0 },
+ { "xsr.itlbcfg", ICLASS_xt_iclass_xsr_itlbcfg,
+ 0,
+ Opcode_xsr_itlbcfg_encode_fns, 0, 0 },
+ { "rsr.dtlbcfg", ICLASS_xt_iclass_rsr_dtlbcfg,
+ 0,
+ Opcode_rsr_dtlbcfg_encode_fns, 0, 0 },
+ { "wsr.dtlbcfg", ICLASS_xt_iclass_wsr_dtlbcfg,
+ 0,
+ Opcode_wsr_dtlbcfg_encode_fns, 0, 0 },
+ { "xsr.dtlbcfg", ICLASS_xt_iclass_xsr_dtlbcfg,
+ 0,
+ Opcode_xsr_dtlbcfg_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "ldpte", ICLASS_xt_iclass_ldpte,
+ 0,
+ Opcode_ldpte_encode_fns, 0, 0 },
+ { "hwwitlba", ICLASS_xt_iclass_hwwitlba,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_hwwitlba_encode_fns, 0, 0 },
+ { "hwwdtlba", ICLASS_xt_iclass_hwwdtlba,
+ 0,
+ Opcode_hwwdtlba_encode_fns, 0, 0 },
+ { "rsr.cpenable", ICLASS_xt_iclass_rsr_cpenable,
+ 0,
+ Opcode_rsr_cpenable_encode_fns, 0, 0 },
+ { "wsr.cpenable", ICLASS_xt_iclass_wsr_cpenable,
+ 0,
+ Opcode_wsr_cpenable_encode_fns, 0, 0 },
+ { "xsr.cpenable", ICLASS_xt_iclass_xsr_cpenable,
+ 0,
+ Opcode_xsr_cpenable_encode_fns, 0, 0 },
+ { "clamps", ICLASS_xt_iclass_clamp,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "quou", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quou_encode_fns, 0, 0 },
+ { "quos", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quos_encode_fns, 0, 0 },
+ { "remu", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_remu_encode_fns, 0, 0 },
+ { "rems", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_rems_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.expstate", ICLASS_rur_expstate,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", ICLASS_wur_expstate,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "read_impwire", ICLASS_iclass_READ_IMPWIRE,
+ 0,
+ Opcode_read_impwire_encode_fns, 0, 0 },
+ { "setb_expstate", ICLASS_iclass_SETB_EXPSTATE,
+ 0,
+ Opcode_setb_expstate_encode_fns, 0, 0 },
+ { "clrb_expstate", ICLASS_iclass_CLRB_EXPSTATE,
+ 0,
+ Opcode_clrb_expstate_encode_fns, 0, 0 },
+ { "wrmsk_expstate", ICLASS_iclass_WRMSK_EXPSTATE,
+ 0,
+ Opcode_wrmsk_expstate_encode_fns, 0, 0 }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_SIMCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_RUR_THREADPTR,
+ OPCODE_WUR_THREADPTR,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_SUB,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BNEI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BEQ,
+ OPCODE_BNE,
+ OPCODE_BGE,
+ OPCODE_BLT,
+ OPCODE_BGEU,
+ OPCODE_BLTU,
+ OPCODE_BANY,
+ OPCODE_BNONE,
+ OPCODE_BALL,
+ OPCODE_BNALL,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQZ,
+ OPCODE_BNEZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_LOOP,
+ OPCODE_LOOPNEZ,
+ OPCODE_LOOPGTZ,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVNEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVGEZ,
+ OPCODE_NEG,
+ OPCODE_ABS,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S8I,
+ OPCODE_SSR,
+ OPCODE_SSL,
+ OPCODE_SSA8L,
+ OPCODE_SSA8B,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRL,
+ OPCODE_SRA,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_RSYNC,
+ OPCODE_ESYNC,
+ OPCODE_DSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_LEND,
+ OPCODE_WSR_LEND,
+ OPCODE_XSR_LEND,
+ OPCODE_RSR_LCOUNT,
+ OPCODE_WSR_LCOUNT,
+ OPCODE_XSR_LCOUNT,
+ OPCODE_RSR_LBEG,
+ OPCODE_WSR_LBEG,
+ OPCODE_XSR_LBEG,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_LITBASE,
+ OPCODE_WSR_LITBASE,
+ OPCODE_XSR_LITBASE,
+ OPCODE_RSR_176,
+ OPCODE_WSR_176,
+ OPCODE_RSR_208,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPC3,
+ OPCODE_WSR_EPC3,
+ OPCODE_XSR_EPC3,
+ OPCODE_RSR_EXCSAVE3,
+ OPCODE_WSR_EXCSAVE3,
+ OPCODE_XSR_EXCSAVE3,
+ OPCODE_RSR_EPC4,
+ OPCODE_WSR_EPC4,
+ OPCODE_XSR_EPC4,
+ OPCODE_RSR_EXCSAVE4,
+ OPCODE_WSR_EXCSAVE4,
+ OPCODE_XSR_EXCSAVE4,
+ OPCODE_RSR_EPC5,
+ OPCODE_WSR_EPC5,
+ OPCODE_XSR_EPC5,
+ OPCODE_RSR_EXCSAVE5,
+ OPCODE_WSR_EXCSAVE5,
+ OPCODE_XSR_EXCSAVE5,
+ OPCODE_RSR_EPC6,
+ OPCODE_WSR_EPC6,
+ OPCODE_XSR_EPC6,
+ OPCODE_RSR_EXCSAVE6,
+ OPCODE_WSR_EXCSAVE6,
+ OPCODE_XSR_EXCSAVE6,
+ OPCODE_RSR_EPC7,
+ OPCODE_WSR_EPC7,
+ OPCODE_XSR_EPC7,
+ OPCODE_RSR_EXCSAVE7,
+ OPCODE_WSR_EXCSAVE7,
+ OPCODE_XSR_EXCSAVE7,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EPS3,
+ OPCODE_WSR_EPS3,
+ OPCODE_XSR_EPS3,
+ OPCODE_RSR_EPS4,
+ OPCODE_WSR_EPS4,
+ OPCODE_XSR_EPS4,
+ OPCODE_RSR_EPS5,
+ OPCODE_WSR_EPS5,
+ OPCODE_XSR_EPS5,
+ OPCODE_RSR_EPS6,
+ OPCODE_WSR_EPS6,
+ OPCODE_XSR_EPS6,
+ OPCODE_RSR_EPS7,
+ OPCODE_WSR_EPS7,
+ OPCODE_XSR_EPS7,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_MISC0,
+ OPCODE_WSR_MISC0,
+ OPCODE_XSR_MISC0,
+ OPCODE_RSR_MISC1,
+ OPCODE_WSR_MISC1,
+ OPCODE_XSR_MISC1,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_MUL16U,
+ OPCODE_MUL16S,
+ OPCODE_MULL,
+ OPCODE_MUL_AA_LL,
+ OPCODE_MUL_AA_HL,
+ OPCODE_MUL_AA_LH,
+ OPCODE_MUL_AA_HH,
+ OPCODE_UMUL_AA_LL,
+ OPCODE_UMUL_AA_HL,
+ OPCODE_UMUL_AA_LH,
+ OPCODE_UMUL_AA_HH,
+ OPCODE_MUL_AD_LL,
+ OPCODE_MUL_AD_HL,
+ OPCODE_MUL_AD_LH,
+ OPCODE_MUL_AD_HH,
+ OPCODE_MUL_DA_LL,
+ OPCODE_MUL_DA_HL,
+ OPCODE_MUL_DA_LH,
+ OPCODE_MUL_DA_HH,
+ OPCODE_MUL_DD_LL,
+ OPCODE_MUL_DD_HL,
+ OPCODE_MUL_DD_LH,
+ OPCODE_MUL_DD_HH,
+ OPCODE_MULA_AA_LL,
+ OPCODE_MULA_AA_HL,
+ OPCODE_MULA_AA_LH,
+ OPCODE_MULA_AA_HH,
+ OPCODE_MULS_AA_LL,
+ OPCODE_MULS_AA_HL,
+ OPCODE_MULS_AA_LH,
+ OPCODE_MULS_AA_HH,
+ OPCODE_MULA_AD_LL,
+ OPCODE_MULA_AD_HL,
+ OPCODE_MULA_AD_LH,
+ OPCODE_MULA_AD_HH,
+ OPCODE_MULS_AD_LL,
+ OPCODE_MULS_AD_HL,
+ OPCODE_MULS_AD_LH,
+ OPCODE_MULS_AD_HH,
+ OPCODE_MULA_DA_LL,
+ OPCODE_MULA_DA_HL,
+ OPCODE_MULA_DA_LH,
+ OPCODE_MULA_DA_HH,
+ OPCODE_MULS_DA_LL,
+ OPCODE_MULS_DA_HL,
+ OPCODE_MULS_DA_LH,
+ OPCODE_MULS_DA_HH,
+ OPCODE_MULA_DD_LL,
+ OPCODE_MULA_DD_HL,
+ OPCODE_MULA_DD_LH,
+ OPCODE_MULA_DD_HH,
+ OPCODE_MULS_DD_LL,
+ OPCODE_MULS_DD_HL,
+ OPCODE_MULS_DD_LH,
+ OPCODE_MULS_DD_HH,
+ OPCODE_MULA_DA_LL_LDDEC,
+ OPCODE_MULA_DA_LL_LDINC,
+ OPCODE_MULA_DA_HL_LDDEC,
+ OPCODE_MULA_DA_HL_LDINC,
+ OPCODE_MULA_DA_LH_LDDEC,
+ OPCODE_MULA_DA_LH_LDINC,
+ OPCODE_MULA_DA_HH_LDDEC,
+ OPCODE_MULA_DA_HH_LDINC,
+ OPCODE_MULA_DD_LL_LDDEC,
+ OPCODE_MULA_DD_LL_LDINC,
+ OPCODE_MULA_DD_HL_LDDEC,
+ OPCODE_MULA_DD_HL_LDINC,
+ OPCODE_MULA_DD_LH_LDDEC,
+ OPCODE_MULA_DD_LH_LDINC,
+ OPCODE_MULA_DD_HH_LDDEC,
+ OPCODE_MULA_DD_HH_LDINC,
+ OPCODE_LDDEC,
+ OPCODE_LDINC,
+ OPCODE_RSR_M0,
+ OPCODE_WSR_M0,
+ OPCODE_XSR_M0,
+ OPCODE_RSR_M1,
+ OPCODE_WSR_M1,
+ OPCODE_XSR_M1,
+ OPCODE_RSR_M2,
+ OPCODE_WSR_M2,
+ OPCODE_XSR_M2,
+ OPCODE_RSR_M3,
+ OPCODE_WSR_M3,
+ OPCODE_XSR_M3,
+ OPCODE_RSR_ACCLO,
+ OPCODE_WSR_ACCLO,
+ OPCODE_XSR_ACCLO,
+ OPCODE_RSR_ACCHI,
+ OPCODE_WSR_ACCHI,
+ OPCODE_XSR_ACCHI,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DBREAKA0,
+ OPCODE_WSR_DBREAKA0,
+ OPCODE_XSR_DBREAKA0,
+ OPCODE_RSR_DBREAKC0,
+ OPCODE_WSR_DBREAKC0,
+ OPCODE_XSR_DBREAKC0,
+ OPCODE_RSR_DBREAKA1,
+ OPCODE_WSR_DBREAKA1,
+ OPCODE_XSR_DBREAKA1,
+ OPCODE_RSR_DBREAKC1,
+ OPCODE_WSR_DBREAKC1,
+ OPCODE_XSR_DBREAKC1,
+ OPCODE_RSR_IBREAKA0,
+ OPCODE_WSR_IBREAKA0,
+ OPCODE_XSR_IBREAKA0,
+ OPCODE_RSR_IBREAKA1,
+ OPCODE_WSR_IBREAKA1,
+ OPCODE_XSR_IBREAKA1,
+ OPCODE_RSR_IBREAKENABLE,
+ OPCODE_WSR_IBREAKENABLE,
+ OPCODE_XSR_IBREAKENABLE,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_WSR_MMID,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_RSR_CCOMPARE2,
+ OPCODE_WSR_CCOMPARE2,
+ OPCODE_XSR_CCOMPARE2,
+ OPCODE_IPF,
+ OPCODE_IHI,
+ OPCODE_IPFL,
+ OPCODE_IHU,
+ OPCODE_IIU,
+ OPCODE_III,
+ OPCODE_LICT,
+ OPCODE_LICW,
+ OPCODE_SICT,
+ OPCODE_SICW,
+ OPCODE_DHWB,
+ OPCODE_DHWBI,
+ OPCODE_DIWB,
+ OPCODE_DIWBI,
+ OPCODE_DHI,
+ OPCODE_DII,
+ OPCODE_DPFR,
+ OPCODE_DPFW,
+ OPCODE_DPFRO,
+ OPCODE_DPFWO,
+ OPCODE_DPFL,
+ OPCODE_DHU,
+ OPCODE_DIU,
+ OPCODE_SDCT,
+ OPCODE_LDCT,
+ OPCODE_WSR_PTEVADDR,
+ OPCODE_RSR_PTEVADDR,
+ OPCODE_XSR_PTEVADDR,
+ OPCODE_RSR_RASID,
+ OPCODE_WSR_RASID,
+ OPCODE_XSR_RASID,
+ OPCODE_RSR_ITLBCFG,
+ OPCODE_WSR_ITLBCFG,
+ OPCODE_XSR_ITLBCFG,
+ OPCODE_RSR_DTLBCFG,
+ OPCODE_WSR_DTLBCFG,
+ OPCODE_XSR_DTLBCFG,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_LDPTE,
+ OPCODE_HWWITLBA,
+ OPCODE_HWWDTLBA,
+ OPCODE_RSR_CPENABLE,
+ OPCODE_WSR_CPENABLE,
+ OPCODE_XSR_CPENABLE,
+ OPCODE_CLAMPS,
+ OPCODE_MIN,
+ OPCODE_MAX,
+ OPCODE_MINU,
+ OPCODE_MAXU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_QUOU,
+ OPCODE_QUOS,
+ OPCODE_REMU,
+ OPCODE_REMS,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_EXPSTATE,
+ OPCODE_WUR_EXPSTATE,
+ OPCODE_READ_IMPWIRE,
+ OPCODE_SETB_EXPSTATE,
+ OPCODE_CLRB_EXPSTATE,
+ OPCODE_WRMSK_EXPSTATE
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ break;
+ case 2:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RET;
+ case 1:
+ return OPCODE_RETW;
+ case 2:
+ return OPCODE_JX;
+ }
+ break;
+ case 3:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_CALLX0;
+ case 1:
+ return OPCODE_CALLX4;
+ case 2:
+ return OPCODE_CALLX8;
+ case 3:
+ return OPCODE_CALLX12;
+ }
+ break;
+ }
+ break;
+ case 1:
+ return OPCODE_MOVSP;
+ case 2:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_ISYNC;
+ case 1:
+ return OPCODE_RSYNC;
+ case 2:
+ return OPCODE_ESYNC;
+ case 3:
+ return OPCODE_DSYNC;
+ case 8:
+ return OPCODE_EXCW;
+ case 12:
+ return OPCODE_MEMW;
+ case 13:
+ return OPCODE_EXTW;
+ case 15:
+ return OPCODE_NOP;
+ }
+ }
+ break;
+ case 3:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RFE;
+ case 2:
+ return OPCODE_RFDE;
+ case 4:
+ return OPCODE_RFWO;
+ case 5:
+ return OPCODE_RFWU;
+ }
+ break;
+ case 1:
+ return OPCODE_RFI;
+ }
+ break;
+ case 4:
+ return OPCODE_BREAK;
+ case 5:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SIMCALL;
+ break;
+ }
+ break;
+ case 6:
+ return OPCODE_RSIL;
+ case 7:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ break;
+ }
+ break;
+ case 1:
+ return OPCODE_AND;
+ case 2:
+ return OPCODE_OR;
+ case 3:
+ return OPCODE_XOR;
+ case 4:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ break;
+ case 2:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ break;
+ case 3:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ break;
+ case 4:
+ if (Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ break;
+ case 6:
+ return OPCODE_RER;
+ case 7:
+ return OPCODE_WER;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ break;
+ case 14:
+ return OPCODE_NSA;
+ case 15:
+ return OPCODE_NSAU;
+ }
+ break;
+ case 5:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 1:
+ return OPCODE_HWWITLBA;
+ case 3:
+ return OPCODE_RITLB0;
+ case 4:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ break;
+ case 5:
+ return OPCODE_PITLB;
+ case 6:
+ return OPCODE_WITLB;
+ case 7:
+ return OPCODE_RITLB1;
+ case 9:
+ return OPCODE_HWWDTLBA;
+ case 11:
+ return OPCODE_RDTLB0;
+ case 12:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ break;
+ case 13:
+ return OPCODE_PDTLB;
+ case 14:
+ return OPCODE_WDTLB;
+ case 15:
+ return OPCODE_RDTLB1;
+ }
+ break;
+ case 6:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_NEG;
+ case 1:
+ return OPCODE_ABS;
+ }
+ break;
+ case 8:
+ return OPCODE_ADD;
+ case 9:
+ return OPCODE_ADDX2;
+ case 10:
+ return OPCODE_ADDX4;
+ case 11:
+ return OPCODE_ADDX8;
+ case 12:
+ return OPCODE_SUB;
+ case 13:
+ return OPCODE_SUBX2;
+ case 14:
+ return OPCODE_SUBX4;
+ case 15:
+ return OPCODE_SUBX8;
+ }
+ break;
+ case 1:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ case 1:
+ return OPCODE_SLLI;
+ case 2:
+ case 3:
+ return OPCODE_SRAI;
+ case 4:
+ return OPCODE_SRLI;
+ case 6:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_XSR_LBEG;
+ case 1:
+ return OPCODE_XSR_LEND;
+ case 2:
+ return OPCODE_XSR_LCOUNT;
+ case 3:
+ return OPCODE_XSR_SAR;
+ case 5:
+ return OPCODE_XSR_LITBASE;
+ case 12:
+ return OPCODE_XSR_SCOMPARE1;
+ case 16:
+ return OPCODE_XSR_ACCLO;
+ case 17:
+ return OPCODE_XSR_ACCHI;
+ case 32:
+ return OPCODE_XSR_M0;
+ case 33:
+ return OPCODE_XSR_M1;
+ case 34:
+ return OPCODE_XSR_M2;
+ case 35:
+ return OPCODE_XSR_M3;
+ case 72:
+ return OPCODE_XSR_WINDOWBASE;
+ case 73:
+ return OPCODE_XSR_WINDOWSTART;
+ case 83:
+ return OPCODE_XSR_PTEVADDR;
+ case 90:
+ return OPCODE_XSR_RASID;
+ case 91:
+ return OPCODE_XSR_ITLBCFG;
+ case 92:
+ return OPCODE_XSR_DTLBCFG;
+ case 96:
+ return OPCODE_XSR_IBREAKENABLE;
+ case 99:
+ return OPCODE_XSR_ATOMCTL;
+ case 104:
+ return OPCODE_XSR_DDR;
+ case 128:
+ return OPCODE_XSR_IBREAKA0;
+ case 129:
+ return OPCODE_XSR_IBREAKA1;
+ case 144:
+ return OPCODE_XSR_DBREAKA0;
+ case 145:
+ return OPCODE_XSR_DBREAKA1;
+ case 160:
+ return OPCODE_XSR_DBREAKC0;
+ case 161:
+ return OPCODE_XSR_DBREAKC1;
+ case 177:
+ return OPCODE_XSR_EPC1;
+ case 178:
+ return OPCODE_XSR_EPC2;
+ case 179:
+ return OPCODE_XSR_EPC3;
+ case 180:
+ return OPCODE_XSR_EPC4;
+ case 181:
+ return OPCODE_XSR_EPC5;
+ case 182:
+ return OPCODE_XSR_EPC6;
+ case 183:
+ return OPCODE_XSR_EPC7;
+ case 192:
+ return OPCODE_XSR_DEPC;
+ case 194:
+ return OPCODE_XSR_EPS2;
+ case 195:
+ return OPCODE_XSR_EPS3;
+ case 196:
+ return OPCODE_XSR_EPS4;
+ case 197:
+ return OPCODE_XSR_EPS5;
+ case 198:
+ return OPCODE_XSR_EPS6;
+ case 199:
+ return OPCODE_XSR_EPS7;
+ case 209:
+ return OPCODE_XSR_EXCSAVE1;
+ case 210:
+ return OPCODE_XSR_EXCSAVE2;
+ case 211:
+ return OPCODE_XSR_EXCSAVE3;
+ case 212:
+ return OPCODE_XSR_EXCSAVE4;
+ case 213:
+ return OPCODE_XSR_EXCSAVE5;
+ case 214:
+ return OPCODE_XSR_EXCSAVE6;
+ case 215:
+ return OPCODE_XSR_EXCSAVE7;
+ case 224:
+ return OPCODE_XSR_CPENABLE;
+ case 228:
+ return OPCODE_XSR_INTENABLE;
+ case 230:
+ return OPCODE_XSR_PS;
+ case 231:
+ return OPCODE_XSR_VECBASE;
+ case 232:
+ return OPCODE_XSR_EXCCAUSE;
+ case 233:
+ return OPCODE_XSR_DEBUGCAUSE;
+ case 234:
+ return OPCODE_XSR_CCOUNT;
+ case 236:
+ return OPCODE_XSR_ICOUNT;
+ case 237:
+ return OPCODE_XSR_ICOUNTLEVEL;
+ case 238:
+ return OPCODE_XSR_EXCVADDR;
+ case 240:
+ return OPCODE_XSR_CCOMPARE0;
+ case 241:
+ return OPCODE_XSR_CCOMPARE1;
+ case 242:
+ return OPCODE_XSR_CCOMPARE2;
+ case 244:
+ return OPCODE_XSR_MISC0;
+ case 245:
+ return OPCODE_XSR_MISC1;
+ }
+ break;
+ case 8:
+ return OPCODE_SRC;
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ break;
+ case 10:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ break;
+ case 12:
+ return OPCODE_MUL16U;
+ case 13:
+ return OPCODE_MUL16S;
+ case 15:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_LICT;
+ case 1:
+ return OPCODE_SICT;
+ case 2:
+ return OPCODE_LICW;
+ case 3:
+ return OPCODE_SICW;
+ case 8:
+ return OPCODE_LDCT;
+ case 9:
+ return OPCODE_SDCT;
+ case 14:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ break;
+ case 15:
+ return OPCODE_LDPTE;
+ }
+ break;
+ }
+ break;
+ case 2:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 8:
+ return OPCODE_MULL;
+ case 12:
+ return OPCODE_QUOU;
+ case 13:
+ return OPCODE_QUOS;
+ case 14:
+ return OPCODE_REMU;
+ case 15:
+ return OPCODE_REMS;
+ }
+ break;
+ case 3:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RSR_LBEG;
+ case 1:
+ return OPCODE_RSR_LEND;
+ case 2:
+ return OPCODE_RSR_LCOUNT;
+ case 3:
+ return OPCODE_RSR_SAR;
+ case 5:
+ return OPCODE_RSR_LITBASE;
+ case 12:
+ return OPCODE_RSR_SCOMPARE1;
+ case 16:
+ return OPCODE_RSR_ACCLO;
+ case 17:
+ return OPCODE_RSR_ACCHI;
+ case 32:
+ return OPCODE_RSR_M0;
+ case 33:
+ return OPCODE_RSR_M1;
+ case 34:
+ return OPCODE_RSR_M2;
+ case 35:
+ return OPCODE_RSR_M3;
+ case 72:
+ return OPCODE_RSR_WINDOWBASE;
+ case 73:
+ return OPCODE_RSR_WINDOWSTART;
+ case 83:
+ return OPCODE_RSR_PTEVADDR;
+ case 90:
+ return OPCODE_RSR_RASID;
+ case 91:
+ return OPCODE_RSR_ITLBCFG;
+ case 92:
+ return OPCODE_RSR_DTLBCFG;
+ case 96:
+ return OPCODE_RSR_IBREAKENABLE;
+ case 99:
+ return OPCODE_RSR_ATOMCTL;
+ case 104:
+ return OPCODE_RSR_DDR;
+ case 128:
+ return OPCODE_RSR_IBREAKA0;
+ case 129:
+ return OPCODE_RSR_IBREAKA1;
+ case 144:
+ return OPCODE_RSR_DBREAKA0;
+ case 145:
+ return OPCODE_RSR_DBREAKA1;
+ case 160:
+ return OPCODE_RSR_DBREAKC0;
+ case 161:
+ return OPCODE_RSR_DBREAKC1;
+ case 176:
+ return OPCODE_RSR_176;
+ case 177:
+ return OPCODE_RSR_EPC1;
+ case 178:
+ return OPCODE_RSR_EPC2;
+ case 179:
+ return OPCODE_RSR_EPC3;
+ case 180:
+ return OPCODE_RSR_EPC4;
+ case 181:
+ return OPCODE_RSR_EPC5;
+ case 182:
+ return OPCODE_RSR_EPC6;
+ case 183:
+ return OPCODE_RSR_EPC7;
+ case 192:
+ return OPCODE_RSR_DEPC;
+ case 194:
+ return OPCODE_RSR_EPS2;
+ case 195:
+ return OPCODE_RSR_EPS3;
+ case 196:
+ return OPCODE_RSR_EPS4;
+ case 197:
+ return OPCODE_RSR_EPS5;
+ case 198:
+ return OPCODE_RSR_EPS6;
+ case 199:
+ return OPCODE_RSR_EPS7;
+ case 208:
+ return OPCODE_RSR_208;
+ case 209:
+ return OPCODE_RSR_EXCSAVE1;
+ case 210:
+ return OPCODE_RSR_EXCSAVE2;
+ case 211:
+ return OPCODE_RSR_EXCSAVE3;
+ case 212:
+ return OPCODE_RSR_EXCSAVE4;
+ case 213:
+ return OPCODE_RSR_EXCSAVE5;
+ case 214:
+ return OPCODE_RSR_EXCSAVE6;
+ case 215:
+ return OPCODE_RSR_EXCSAVE7;
+ case 224:
+ return OPCODE_RSR_CPENABLE;
+ case 226:
+ return OPCODE_RSR_INTERRUPT;
+ case 228:
+ return OPCODE_RSR_INTENABLE;
+ case 230:
+ return OPCODE_RSR_PS;
+ case 231:
+ return OPCODE_RSR_VECBASE;
+ case 232:
+ return OPCODE_RSR_EXCCAUSE;
+ case 233:
+ return OPCODE_RSR_DEBUGCAUSE;
+ case 234:
+ return OPCODE_RSR_CCOUNT;
+ case 235:
+ return OPCODE_RSR_PRID;
+ case 236:
+ return OPCODE_RSR_ICOUNT;
+ case 237:
+ return OPCODE_RSR_ICOUNTLEVEL;
+ case 238:
+ return OPCODE_RSR_EXCVADDR;
+ case 240:
+ return OPCODE_RSR_CCOMPARE0;
+ case 241:
+ return OPCODE_RSR_CCOMPARE1;
+ case 242:
+ return OPCODE_RSR_CCOMPARE2;
+ case 244:
+ return OPCODE_RSR_MISC0;
+ case 245:
+ return OPCODE_RSR_MISC1;
+ }
+ break;
+ case 1:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_WSR_LBEG;
+ case 1:
+ return OPCODE_WSR_LEND;
+ case 2:
+ return OPCODE_WSR_LCOUNT;
+ case 3:
+ return OPCODE_WSR_SAR;
+ case 5:
+ return OPCODE_WSR_LITBASE;
+ case 12:
+ return OPCODE_WSR_SCOMPARE1;
+ case 16:
+ return OPCODE_WSR_ACCLO;
+ case 17:
+ return OPCODE_WSR_ACCHI;
+ case 32:
+ return OPCODE_WSR_M0;
+ case 33:
+ return OPCODE_WSR_M1;
+ case 34:
+ return OPCODE_WSR_M2;
+ case 35:
+ return OPCODE_WSR_M3;
+ case 72:
+ return OPCODE_WSR_WINDOWBASE;
+ case 73:
+ return OPCODE_WSR_WINDOWSTART;
+ case 83:
+ return OPCODE_WSR_PTEVADDR;
+ case 89:
+ return OPCODE_WSR_MMID;
+ case 90:
+ return OPCODE_WSR_RASID;
+ case 91:
+ return OPCODE_WSR_ITLBCFG;
+ case 92:
+ return OPCODE_WSR_DTLBCFG;
+ case 96:
+ return OPCODE_WSR_IBREAKENABLE;
+ case 99:
+ return OPCODE_WSR_ATOMCTL;
+ case 104:
+ return OPCODE_WSR_DDR;
+ case 128:
+ return OPCODE_WSR_IBREAKA0;
+ case 129:
+ return OPCODE_WSR_IBREAKA1;
+ case 144:
+ return OPCODE_WSR_DBREAKA0;
+ case 145:
+ return OPCODE_WSR_DBREAKA1;
+ case 160:
+ return OPCODE_WSR_DBREAKC0;
+ case 161:
+ return OPCODE_WSR_DBREAKC1;
+ case 176:
+ return OPCODE_WSR_176;
+ case 177:
+ return OPCODE_WSR_EPC1;
+ case 178:
+ return OPCODE_WSR_EPC2;
+ case 179:
+ return OPCODE_WSR_EPC3;
+ case 180:
+ return OPCODE_WSR_EPC4;
+ case 181:
+ return OPCODE_WSR_EPC5;
+ case 182:
+ return OPCODE_WSR_EPC6;
+ case 183:
+ return OPCODE_WSR_EPC7;
+ case 192:
+ return OPCODE_WSR_DEPC;
+ case 194:
+ return OPCODE_WSR_EPS2;
+ case 195:
+ return OPCODE_WSR_EPS3;
+ case 196:
+ return OPCODE_WSR_EPS4;
+ case 197:
+ return OPCODE_WSR_EPS5;
+ case 198:
+ return OPCODE_WSR_EPS6;
+ case 199:
+ return OPCODE_WSR_EPS7;
+ case 209:
+ return OPCODE_WSR_EXCSAVE1;
+ case 210:
+ return OPCODE_WSR_EXCSAVE2;
+ case 211:
+ return OPCODE_WSR_EXCSAVE3;
+ case 212:
+ return OPCODE_WSR_EXCSAVE4;
+ case 213:
+ return OPCODE_WSR_EXCSAVE5;
+ case 214:
+ return OPCODE_WSR_EXCSAVE6;
+ case 215:
+ return OPCODE_WSR_EXCSAVE7;
+ case 224:
+ return OPCODE_WSR_CPENABLE;
+ case 226:
+ return OPCODE_WSR_INTSET;
+ case 227:
+ return OPCODE_WSR_INTCLEAR;
+ case 228:
+ return OPCODE_WSR_INTENABLE;
+ case 230:
+ return OPCODE_WSR_PS;
+ case 231:
+ return OPCODE_WSR_VECBASE;
+ case 232:
+ return OPCODE_WSR_EXCCAUSE;
+ case 233:
+ return OPCODE_WSR_DEBUGCAUSE;
+ case 234:
+ return OPCODE_WSR_CCOUNT;
+ case 236:
+ return OPCODE_WSR_ICOUNT;
+ case 237:
+ return OPCODE_WSR_ICOUNTLEVEL;
+ case 238:
+ return OPCODE_WSR_EXCVADDR;
+ case 240:
+ return OPCODE_WSR_CCOMPARE0;
+ case 241:
+ return OPCODE_WSR_CCOMPARE1;
+ case 242:
+ return OPCODE_WSR_CCOMPARE2;
+ case 244:
+ return OPCODE_WSR_MISC0;
+ case 245:
+ return OPCODE_WSR_MISC1;
+ }
+ break;
+ case 2:
+ return OPCODE_SEXT;
+ case 3:
+ return OPCODE_CLAMPS;
+ case 4:
+ return OPCODE_MIN;
+ case 5:
+ return OPCODE_MAX;
+ case 6:
+ return OPCODE_MINU;
+ case 7:
+ return OPCODE_MAXU;
+ case 8:
+ return OPCODE_MOVEQZ;
+ case 9:
+ return OPCODE_MOVNEZ;
+ case 10:
+ return OPCODE_MOVLTZ;
+ case 11:
+ return OPCODE_MOVGEZ;
+ case 14:
+ switch (Field_st_Slot_inst_get (insn))
+ {
+ case 230:
+ return OPCODE_RUR_EXPSTATE;
+ case 231:
+ return OPCODE_RUR_THREADPTR;
+ }
+ break;
+ case 15:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 230:
+ return OPCODE_WUR_EXPSTATE;
+ case 231:
+ return OPCODE_WUR_THREADPTR;
+ }
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ return OPCODE_EXTUI;
+ case 9:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_L32E;
+ case 4:
+ return OPCODE_S32E;
+ }
+ break;
+ }
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_READ_IMPWIRE;
+ break;
+ case 1:
+ if (Field_s3to1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_SETB_EXPSTATE;
+ if (Field_s3to1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_CLRB_EXPSTATE;
+ break;
+ case 2:
+ if (Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_WRMSK_EXPSTATE;
+ break;
+ }
+ break;
+ case 1:
+ return OPCODE_L32R;
+ case 2:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_L8UI;
+ case 1:
+ return OPCODE_L16UI;
+ case 2:
+ return OPCODE_L32I;
+ case 4:
+ return OPCODE_S8I;
+ case 5:
+ return OPCODE_S16I;
+ case 6:
+ return OPCODE_S32I;
+ case 7:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_DPFR;
+ case 1:
+ return OPCODE_DPFW;
+ case 2:
+ return OPCODE_DPFRO;
+ case 3:
+ return OPCODE_DPFWO;
+ case 4:
+ return OPCODE_DHWB;
+ case 5:
+ return OPCODE_DHWBI;
+ case 6:
+ return OPCODE_DHI;
+ case 7:
+ return OPCODE_DII;
+ case 8:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_DPFL;
+ case 2:
+ return OPCODE_DHU;
+ case 3:
+ return OPCODE_DIU;
+ case 4:
+ return OPCODE_DIWB;
+ case 5:
+ return OPCODE_DIWBI;
+ }
+ break;
+ case 12:
+ return OPCODE_IPF;
+ case 13:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_IPFL;
+ case 2:
+ return OPCODE_IHU;
+ case 3:
+ return OPCODE_IIU;
+ }
+ break;
+ case 14:
+ return OPCODE_IHI;
+ case 15:
+ return OPCODE_III;
+ }
+ break;
+ case 9:
+ return OPCODE_L16SI;
+ case 10:
+ return OPCODE_MOVI;
+ case 11:
+ return OPCODE_L32AI;
+ case 12:
+ return OPCODE_ADDI;
+ case 13:
+ return OPCODE_ADDMI;
+ case 14:
+ return OPCODE_S32C1I;
+ case 15:
+ return OPCODE_S32RI;
+ }
+ break;
+ case 4:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDINC;
+ break;
+ case 9:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDINC;
+ break;
+ case 10:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDINC;
+ break;
+ case 11:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDINC;
+ break;
+ }
+ break;
+ case 1:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDDEC;
+ break;
+ case 9:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDDEC;
+ break;
+ case 10:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDDEC;
+ break;
+ case 11:
+ if (Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDDEC;
+ break;
+ }
+ break;
+ case 2:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LL;
+ break;
+ case 5:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HL;
+ break;
+ case 6:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LH;
+ break;
+ case 7:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HH;
+ break;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL;
+ break;
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL;
+ break;
+ case 10:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH;
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH;
+ break;
+ case 12:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LL;
+ break;
+ case 13:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HL;
+ break;
+ case 14:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LH;
+ break;
+ case 15:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HH;
+ break;
+ }
+ break;
+ case 3:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LL;
+ break;
+ case 5:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HL;
+ break;
+ case 6:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LH;
+ break;
+ case 7:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HH;
+ break;
+ case 8:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LL;
+ break;
+ case 9:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HL;
+ break;
+ case 10:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LH;
+ break;
+ case 11:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HH;
+ break;
+ case 12:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LL;
+ break;
+ case 13:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HL;
+ break;
+ case 14:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LH;
+ break;
+ case 15:
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HH;
+ break;
+ }
+ break;
+ case 4:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDINC;
+ break;
+ case 9:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDINC;
+ break;
+ case 10:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDINC;
+ break;
+ case 11:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDINC;
+ break;
+ }
+ break;
+ case 5:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 8:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDDEC;
+ break;
+ case 9:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDDEC;
+ break;
+ case 10:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDDEC;
+ break;
+ case 11:
+ if (Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDDEC;
+ break;
+ }
+ break;
+ case 6:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LL;
+ break;
+ case 5:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HL;
+ break;
+ case 6:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LH;
+ break;
+ case 7:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HH;
+ break;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL;
+ break;
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL;
+ break;
+ case 10:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH;
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH;
+ break;
+ case 12:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LL;
+ break;
+ case 13:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HL;
+ break;
+ case 14:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LH;
+ break;
+ case 15:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HH;
+ break;
+ }
+ break;
+ case 7:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LL;
+ break;
+ case 1:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HL;
+ break;
+ case 2:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LH;
+ break;
+ case 3:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HH;
+ break;
+ case 4:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LL;
+ break;
+ case 5:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HL;
+ break;
+ case 6:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LH;
+ break;
+ case 7:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HH;
+ break;
+ case 8:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LL;
+ break;
+ case 9:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HL;
+ break;
+ case 10:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LH;
+ break;
+ case 11:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HH;
+ break;
+ case 12:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LL;
+ break;
+ case 13:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HL;
+ break;
+ case 14:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LH;
+ break;
+ case 15:
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HH;
+ break;
+ }
+ break;
+ case 8:
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDINC;
+ break;
+ case 9:
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDDEC;
+ break;
+ }
+ break;
+ case 5:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_CALL0;
+ case 1:
+ return OPCODE_CALL4;
+ case 2:
+ return OPCODE_CALL8;
+ case 3:
+ return OPCODE_CALL12;
+ }
+ break;
+ case 6:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_J;
+ case 1:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BEQZ;
+ case 1:
+ return OPCODE_BNEZ;
+ case 2:
+ return OPCODE_BLTZ;
+ case 3:
+ return OPCODE_BGEZ;
+ }
+ break;
+ case 2:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BEQI;
+ case 1:
+ return OPCODE_BNEI;
+ case 2:
+ return OPCODE_BLTI;
+ case 3:
+ return OPCODE_BGEI;
+ }
+ break;
+ case 3:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_ENTRY;
+ case 1:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 8:
+ return OPCODE_LOOP;
+ case 9:
+ return OPCODE_LOOPNEZ;
+ case 10:
+ return OPCODE_LOOPGTZ;
+ }
+ break;
+ case 2:
+ return OPCODE_BLTUI;
+ case 3:
+ return OPCODE_BGEUI;
+ }
+ break;
+ }
+ break;
+ case 7:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BNONE;
+ case 1:
+ return OPCODE_BEQ;
+ case 2:
+ return OPCODE_BLT;
+ case 3:
+ return OPCODE_BLTU;
+ case 4:
+ return OPCODE_BALL;
+ case 5:
+ return OPCODE_BBC;
+ case 6:
+ case 7:
+ return OPCODE_BBCI;
+ case 8:
+ return OPCODE_BANY;
+ case 9:
+ return OPCODE_BNE;
+ case 10:
+ return OPCODE_BGE;
+ case 11:
+ return OPCODE_BGEU;
+ case 12:
+ return OPCODE_BNALL;
+ case 13:
+ return OPCODE_BBS;
+ case 14:
+ case 15:
+ return OPCODE_BBSI;
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16b_get (insn))
+ {
+ case 12:
+ switch (Field_i_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_MOVI_N;
+ case 1:
+ switch (Field_z_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_BEQZ_N;
+ case 1:
+ return OPCODE_BNEZ_N;
+ }
+ break;
+ }
+ break;
+ case 13:
+ switch (Field_r_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_MOV_N;
+ case 15:
+ switch (Field_t_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_RET_N;
+ case 1:
+ return OPCODE_RETW_N;
+ case 2:
+ return OPCODE_BREAK_N;
+ case 3:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ break;
+ case 6:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16a_get (insn))
+ {
+ case 8:
+ return OPCODE_L32I_N;
+ case 9:
+ return OPCODE_S32I_N;
+ case 10:
+ return OPCODE_ADD_N;
+ case 11:
+ return OPCODE_ADDI_N;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_get,
+ Field_rbit2_Slot_inst_get,
+ Field_rhi_Slot_inst_get,
+ Field_t3_Slot_inst_get,
+ Field_tbit2_Slot_inst_get,
+ Field_tlo_Slot_inst_get,
+ Field_w_Slot_inst_get,
+ Field_y_Slot_inst_get,
+ Field_x_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_bitindex_Slot_inst_get,
+ Field_s3to1_Slot_inst_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_set,
+ Field_rbit2_Slot_inst_set,
+ Field_rhi_Slot_inst_set,
+ Field_t3_Slot_inst_set,
+ Field_tbit2_Slot_inst_set,
+ Field_tlo_Slot_inst_set,
+ Field_w_Slot_inst_set,
+ Field_y_Slot_inst_set,
+ Field_x_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_bitindex_Slot_inst_set,
+ Field_s3to1_Slot_inst_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_get,
+ Field_s3to1_Slot_inst16a_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_set,
+ Field_s3to1_Slot_inst16a_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_get,
+ Field_s3to1_Slot_inst16b_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_set,
+ Field_s3to1_Slot_inst16b_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc)
+ return 2; /* x16b */
+ return -1;
+}
+
+static int length_table[16] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int op0 = insn[0] & 0xf;
+ return length_table[op0];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 3 /* insn_size */, 0,
+ 3, formats, format_decoder, length_decoder,
+ 3, slots,
+ 56 /* num_fields */,
+ 93, operands,
+ 326, iclasses,
+ 452, opcodes, 0,
+ 2, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 1, interfaces, 0,
+ 0, funcUnits, 0
+};
diff --git a/target/xtensa/core-de212.c b/target/xtensa/core-de212.c
new file mode 100644
index 000000000..50c995ba7
--- /dev/null
+++ b/target/xtensa/core-de212.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-de212/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_de212
+#include "core-de212/xtensa-modules.c.inc"
+
+static XtensaConfig de212 __attribute__((unused)) = {
+ .name = "de212",
+ .gdb_regmap = {
+ .reg = {
+#include "core-de212/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 40000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(de212)
diff --git a/target/xtensa/core-de212/core-isa.h b/target/xtensa/core-de212/core-isa.h
new file mode 100644
index 000000000..90ac32923
--- /dev/null
+++ b/target/xtensa/core-de212/core-isa.h
@@ -0,0 +1,620 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_CORE_DE212_CORE_ISA_H
+#define XTENSA_CORE_DE212_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_EXCLUSIVE 0 /* L32EX/S32EX instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_PDX8 0 /* PDX8 */
+#define XCHAL_HAVE_PDX16 0 /* PDX16 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+#define XCHAL_HAVE_VISION 0 /* Vision */
+#define XCHAL_VISION_TYPE 0 /* Vision P5 or P3 */
+#define XCHAL_VISION_SIMD16 0 /* Vision simd16 */
+#define XCHAL_HAVE_VISION_HISTOGRAM 0 /* histogram option on Vision */
+#define XCHAL_HAVE_VISION_SP_VFPU 0 /* sp_vfpu option on Vision */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1200000 /* sw version of this header */
+
+#define XCHAL_CORE_ID "de212_371077" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x0005A9EB /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC283DFFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C85A985 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound bus present */
+#define XCHAL_HAVE_AXI 0 /* AXI bus */
+#define XCHAL_HAVE_AXI_ECC 0 /* ECC on AXI bus */
+#define XCHAL_HAVE_ACELITE 0 /* ACELite bus */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 1 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 1 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 1 /* number of core XLMI ports */
+
+/* Instruction RAM 0: */
+#define XCHAL_INSTRAM0_VADDR 0x40000000 /* virtual address */
+#define XCHAL_INSTRAM0_PADDR 0x40000000 /* physical address */
+#define XCHAL_INSTRAM0_SIZE 131072 /* size in bytes */
+#define XCHAL_INSTRAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+#define XCHAL_HAVE_INSTRAM0
+#define XCHAL_INSTRAM0_HAVE_IDMA 0 /* idma supported by this local memory */
+
+/* Data RAM 0: */
+#define XCHAL_DATARAM0_VADDR 0x3FFE0000 /* virtual address */
+#define XCHAL_DATARAM0_PADDR 0x3FFE0000 /* physical address */
+#define XCHAL_DATARAM0_SIZE 131072 /* size in bytes */
+#define XCHAL_DATARAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+#define XCHAL_DATARAM0_BANKS 1 /* number of banks */
+#define XCHAL_HAVE_DATARAM0
+#define XCHAL_DATARAM0_HAVE_IDMA 0 /* idma supported by this local memory */
+
+/* XLMI Port 0: */
+#define XCHAL_XLMI0_VADDR 0x3FFC0000 /* virtual address */
+#define XCHAL_XLMI0_PADDR 0x3FFC0000 /* physical address */
+#define XCHAL_XLMI0_SIZE 131072 /* size in bytes */
+#define XCHAL_XLMI0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+
+#define XCHAL_HAVE_IDMA 0
+#define XCHAL_HAVE_IDMA_TRANSPOSE 0
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00000000
+#define XCHAL_INTTYPE_MASK_IDMA_DONE 0x00000000
+#define XCHAL_INTTYPE_MASK_IDMA_ERR 0x00000000
+#define XCHAL_INTTYPE_MASK_SG_ERR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT15_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT16_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 15 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 16 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x60000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x60000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0x50000000
+#define XCHAL_RESET_VECTOR0_PADDR 0x50000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x40000400
+#define XCHAL_RESET_VECTOR1_PADDR 0x40000400
+#define XCHAL_RESET_VECTOR_VADDR 0x50000000
+#define XCHAL_RESET_VECTOR_PADDR 0x50000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x60000340
+#define XCHAL_USER_VECTOR_PADDR 0x60000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x60000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x60000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x600003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x600003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x60000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x60000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x60000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x60000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x600001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x600001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x60000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x60000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x60000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x60000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x60000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x60000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x600002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x600002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+
+/* If none of the above last 5 are set, it's a custom TLB configuration. */
+
+#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
+
+/*----------------------------------------------------------------------
+ MPU
+ ----------------------------------------------------------------------*/
+#define XCHAL_HAVE_MPU 0
+#define XCHAL_MPU_ENTRIES 0
+
+#define XCHAL_MPU_ALIGN_REQ 1 /* MPU requires alignment of entries to background map */
+#define XCHAL_MPU_BACKGROUND_ENTRIES 0 /* number of entries in background map */
+
+#define XCHAL_MPU_ALIGN_BITS 0
+#define XCHAL_MPU_ALIGN 0
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_CORE_DE212_CORE_ISA_H */
diff --git a/target/xtensa/core-de212/gdb-config.c.inc b/target/xtensa/core-de212/gdb-config.c.inc
new file mode 100644
index 000000000..25510fc34
--- /dev/null
+++ b/target/xtensa/core-de212/gdb-config.c.inc
@@ -0,0 +1,198 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+ XTREG( 0, 0,32, 4, 4,0x0020,0x0006,-2, 9,0x0100,pc, 0,0,0,0,0,0)
+ XTREG( 1, 4,32, 4, 4,0x0100,0x0006,-2, 1,0x0002,ar0, 0,0,0,0,0,0)
+ XTREG( 2, 8,32, 4, 4,0x0101,0x0006,-2, 1,0x0002,ar1, 0,0,0,0,0,0)
+ XTREG( 3, 12,32, 4, 4,0x0102,0x0006,-2, 1,0x0002,ar2, 0,0,0,0,0,0)
+ XTREG( 4, 16,32, 4, 4,0x0103,0x0006,-2, 1,0x0002,ar3, 0,0,0,0,0,0)
+ XTREG( 5, 20,32, 4, 4,0x0104,0x0006,-2, 1,0x0002,ar4, 0,0,0,0,0,0)
+ XTREG( 6, 24,32, 4, 4,0x0105,0x0006,-2, 1,0x0002,ar5, 0,0,0,0,0,0)
+ XTREG( 7, 28,32, 4, 4,0x0106,0x0006,-2, 1,0x0002,ar6, 0,0,0,0,0,0)
+ XTREG( 8, 32,32, 4, 4,0x0107,0x0006,-2, 1,0x0002,ar7, 0,0,0,0,0,0)
+ XTREG( 9, 36,32, 4, 4,0x0108,0x0006,-2, 1,0x0002,ar8, 0,0,0,0,0,0)
+ XTREG( 10, 40,32, 4, 4,0x0109,0x0006,-2, 1,0x0002,ar9, 0,0,0,0,0,0)
+ XTREG( 11, 44,32, 4, 4,0x010a,0x0006,-2, 1,0x0002,ar10, 0,0,0,0,0,0)
+ XTREG( 12, 48,32, 4, 4,0x010b,0x0006,-2, 1,0x0002,ar11, 0,0,0,0,0,0)
+ XTREG( 13, 52,32, 4, 4,0x010c,0x0006,-2, 1,0x0002,ar12, 0,0,0,0,0,0)
+ XTREG( 14, 56,32, 4, 4,0x010d,0x0006,-2, 1,0x0002,ar13, 0,0,0,0,0,0)
+ XTREG( 15, 60,32, 4, 4,0x010e,0x0006,-2, 1,0x0002,ar14, 0,0,0,0,0,0)
+ XTREG( 16, 64,32, 4, 4,0x010f,0x0006,-2, 1,0x0002,ar15, 0,0,0,0,0,0)
+ XTREG( 17, 68,32, 4, 4,0x0110,0x0006,-2, 1,0x0002,ar16, 0,0,0,0,0,0)
+ XTREG( 18, 72,32, 4, 4,0x0111,0x0006,-2, 1,0x0002,ar17, 0,0,0,0,0,0)
+ XTREG( 19, 76,32, 4, 4,0x0112,0x0006,-2, 1,0x0002,ar18, 0,0,0,0,0,0)
+ XTREG( 20, 80,32, 4, 4,0x0113,0x0006,-2, 1,0x0002,ar19, 0,0,0,0,0,0)
+ XTREG( 21, 84,32, 4, 4,0x0114,0x0006,-2, 1,0x0002,ar20, 0,0,0,0,0,0)
+ XTREG( 22, 88,32, 4, 4,0x0115,0x0006,-2, 1,0x0002,ar21, 0,0,0,0,0,0)
+ XTREG( 23, 92,32, 4, 4,0x0116,0x0006,-2, 1,0x0002,ar22, 0,0,0,0,0,0)
+ XTREG( 24, 96,32, 4, 4,0x0117,0x0006,-2, 1,0x0002,ar23, 0,0,0,0,0,0)
+ XTREG( 25,100,32, 4, 4,0x0118,0x0006,-2, 1,0x0002,ar24, 0,0,0,0,0,0)
+ XTREG( 26,104,32, 4, 4,0x0119,0x0006,-2, 1,0x0002,ar25, 0,0,0,0,0,0)
+ XTREG( 27,108,32, 4, 4,0x011a,0x0006,-2, 1,0x0002,ar26, 0,0,0,0,0,0)
+ XTREG( 28,112,32, 4, 4,0x011b,0x0006,-2, 1,0x0002,ar27, 0,0,0,0,0,0)
+ XTREG( 29,116,32, 4, 4,0x011c,0x0006,-2, 1,0x0002,ar28, 0,0,0,0,0,0)
+ XTREG( 30,120,32, 4, 4,0x011d,0x0006,-2, 1,0x0002,ar29, 0,0,0,0,0,0)
+ XTREG( 31,124,32, 4, 4,0x011e,0x0006,-2, 1,0x0002,ar30, 0,0,0,0,0,0)
+ XTREG( 32,128,32, 4, 4,0x011f,0x0006,-2, 1,0x0002,ar31, 0,0,0,0,0,0)
+ XTREG( 33,132,32, 4, 4,0x0200,0x0006,-2, 2,0x1100,lbeg, 0,0,0,0,0,0)
+ XTREG( 34,136,32, 4, 4,0x0201,0x0006,-2, 2,0x1100,lend, 0,0,0,0,0,0)
+ XTREG( 35,140,32, 4, 4,0x0202,0x0006,-2, 2,0x1100,lcount, 0,0,0,0,0,0)
+ XTREG( 36,144, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0)
+ XTREG( 37,148, 3, 4, 4,0x0248,0x0006,-2, 2,0x1002,windowbase, 0,0,0,0,0,0)
+ XTREG( 38,152, 8, 4, 4,0x0249,0x0006,-2, 2,0x1002,windowstart, 0,0,0,0,0,0)
+ XTREG( 39,156,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,configid0, 0,0,0,0,0,0)
+ XTREG( 40,160,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,configid1, 0,0,0,0,0,0)
+ XTREG( 41,164,19, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0)
+ XTREG( 42,168,32, 4, 4,0x020c,0x0006,-1, 2,0x1100,scompare1, 0,0,0,0,0,0)
+ XTREG( 43,172,32, 4, 4,0x0210,0x0006,-1, 2,0x1100,acclo, 0,0,0,0,0,0)
+ XTREG( 44,176, 8, 4, 4,0x0211,0x0006,-1, 2,0x1100,acchi, 0,0,0,0,0,0)
+ XTREG( 45,180,32, 4, 4,0x0220,0x0006,-1, 2,0x1100,m0, 0,0,0,0,0,0)
+ XTREG( 46,184,32, 4, 4,0x0221,0x0006,-1, 2,0x1100,m1, 0,0,0,0,0,0)
+ XTREG( 47,188,32, 4, 4,0x0222,0x0006,-1, 2,0x1100,m2, 0,0,0,0,0,0)
+ XTREG( 48,192,32, 4, 4,0x0223,0x0006,-1, 2,0x1100,m3, 0,0,0,0,0,0)
+ XTREG( 49,196,32, 4, 4,0x03e6,0x000e,-1, 3,0x0110,expstate, 0,0,0,0,0,0)
+ XTREG( 50,200,32, 4, 4,0x0259,0x000d,-2, 2,0x1000,mmid, 0,0,0,0,0,0)
+ XTREG( 51,204, 2, 4, 4,0x0260,0x0007,-2, 2,0x1000,ibreakenable,0,0,0,0,0,0)
+ XTREG( 52,208, 6, 4, 4,0x0263,0x0007,-2, 2,0x1000,atomctl, 0,0,0,0,0,0)
+ XTREG( 53,212,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0)
+ XTREG( 54,216,32, 4, 4,0x0280,0x0007,-2, 2,0x1000,ibreaka0, 0,0,0,0,0,0)
+ XTREG( 55,220,32, 4, 4,0x0281,0x0007,-2, 2,0x1000,ibreaka1, 0,0,0,0,0,0)
+ XTREG( 56,224,32, 4, 4,0x0290,0x0007,-2, 2,0x1000,dbreaka0, 0,0,0,0,0,0)
+ XTREG( 57,228,32, 4, 4,0x0291,0x0007,-2, 2,0x1000,dbreaka1, 0,0,0,0,0,0)
+ XTREG( 58,232,32, 4, 4,0x02a0,0x0007,-2, 2,0x1000,dbreakc0, 0,0,0,0,0,0)
+ XTREG( 59,236,32, 4, 4,0x02a1,0x0007,-2, 2,0x1000,dbreakc1, 0,0,0,0,0,0)
+ XTREG( 60,240,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0)
+ XTREG( 61,244,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0)
+ XTREG( 62,248,32, 4, 4,0x02b3,0x0007,-2, 2,0x1000,epc3, 0,0,0,0,0,0)
+ XTREG( 63,252,32, 4, 4,0x02b4,0x0007,-2, 2,0x1000,epc4, 0,0,0,0,0,0)
+ XTREG( 64,256,32, 4, 4,0x02b5,0x0007,-2, 2,0x1000,epc5, 0,0,0,0,0,0)
+ XTREG( 65,260,32, 4, 4,0x02b6,0x0007,-2, 2,0x1000,epc6, 0,0,0,0,0,0)
+ XTREG( 66,264,32, 4, 4,0x02b7,0x0007,-2, 2,0x1000,epc7, 0,0,0,0,0,0)
+ XTREG( 67,268,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0)
+ XTREG( 68,272,19, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0)
+ XTREG( 69,276,19, 4, 4,0x02c3,0x0007,-2, 2,0x1000,eps3, 0,0,0,0,0,0)
+ XTREG( 70,280,19, 4, 4,0x02c4,0x0007,-2, 2,0x1000,eps4, 0,0,0,0,0,0)
+ XTREG( 71,284,19, 4, 4,0x02c5,0x0007,-2, 2,0x1000,eps5, 0,0,0,0,0,0)
+ XTREG( 72,288,19, 4, 4,0x02c6,0x0007,-2, 2,0x1000,eps6, 0,0,0,0,0,0)
+ XTREG( 73,292,19, 4, 4,0x02c7,0x0007,-2, 2,0x1000,eps7, 0,0,0,0,0,0)
+ XTREG( 74,296,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0)
+ XTREG( 75,300,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0)
+ XTREG( 76,304,32, 4, 4,0x02d3,0x0007,-2, 2,0x1000,excsave3, 0,0,0,0,0,0)
+ XTREG( 77,308,32, 4, 4,0x02d4,0x0007,-2, 2,0x1000,excsave4, 0,0,0,0,0,0)
+ XTREG( 78,312,32, 4, 4,0x02d5,0x0007,-2, 2,0x1000,excsave5, 0,0,0,0,0,0)
+ XTREG( 79,316,32, 4, 4,0x02d6,0x0007,-2, 2,0x1000,excsave6, 0,0,0,0,0,0)
+ XTREG( 80,320,32, 4, 4,0x02d7,0x0007,-2, 2,0x1000,excsave7, 0,0,0,0,0,0)
+ XTREG( 81,324,22, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0)
+ XTREG( 82,328,22, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0)
+ XTREG( 83,332,22, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0)
+ XTREG( 84,336,22, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0)
+ XTREG( 85,340,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0)
+ XTREG( 86,344, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0)
+ XTREG( 87,348,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0)
+ XTREG( 88,352,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0)
+ XTREG( 89,356,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0)
+ XTREG( 90,360,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0)
+ XTREG( 91,364, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0)
+ XTREG( 92,368,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0)
+ XTREG( 93,372,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0)
+ XTREG( 94,376,32, 4, 4,0x02f1,0x000f,-2, 2,0x1000,ccompare1, 0,0,0,0,0,0)
+ XTREG( 95,380,32, 4, 4,0x02f2,0x000f,-2, 2,0x1000,ccompare2, 0,0,0,0,0,0)
+ XTREG( 96,384,32, 4, 4,0x02f4,0x0007,-2, 2,0x1000,misc0, 0,0,0,0,0,0)
+ XTREG( 97,388,32, 4, 4,0x02f5,0x0007,-2, 2,0x1000,misc1, 0,0,0,0,0,0)
+ XTREG( 98,392,32, 8, 4,0x2015,0x000f,-2, 4,0x0101,pwrctl,
+ "03:52:64:01:03:62:64:02:03:52:a4:0c:03:60:55:11:03:52:c5:20:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0c:03:60:55:11:03:52:c5:20:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG( 99,400,32, 8, 4,0x2016,0x000f,-2, 4,0x0101,pwrstat,
+ "03:52:64:01:03:62:64:02:03:52:a4:0c:03:60:55:11:03:52:c5:24:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0c:03:60:55:11:03:52:c5:24:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(100,408, 1, 8, 4,0x2017,0x000f,-2, 4,0x0101,eristat,
+ "03:52:64:01:03:62:64:02:03:52:a4:0c:03:60:55:11:03:52:c5:28:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0c:03:60:55:11:03:52:c5:28:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(101,416,32, 8, 4,0x2018,0x000f,-2, 4,0x0101,cs_itctrl,
+ "03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:d5:03:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:d5:03:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(102,424,16, 8, 4,0x2019,0x000f,-2, 4,0x0101,cs_claimset,
+ "03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:a0:03:52:d5:04:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:a0:03:52:d5:04:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(103,432,16, 8, 4,0x201a,0x000f,-2, 4,0x0101,cs_claimclr,
+ "03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:a4:03:52:d5:04:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:a4:03:52:d5:04:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(104,440,32, 8, 4,0x201b,0x000f,-2, 4,0x0101,cs_lockaccess,
+ "03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:b0:03:52:d5:04:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:b0:03:52:d5:04:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(105,448,32, 8, 4,0x201c,0x000f,-2, 4,0x0101,cs_lockstatus,
+ "03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:b4:03:52:d5:04:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:b4:03:52:d5:04:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(106,456, 1, 8, 4,0x201d,0x000f,-2, 4,0x0101,cs_authstatus,
+ "03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:b8:03:52:d5:04:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:0f:03:60:55:11:03:52:c5:b8:03:52:d5:04:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(107,464,32, 8, 4,0x202c,0x0007,-2, 4,0x0101,trax_id,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(108,472,32, 8, 4,0x202d,0x000f,-2, 4,0x0101,trax_control,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:04:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:04:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(109,480,32, 8, 4,0x202e,0x000f,-2, 4,0x0101,trax_status,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:08:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:08:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(110,488,32, 8, 4,0x202f,0x000f,-2, 4,0x0101,trax_data,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:0c:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:0c:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(111,496,32, 8, 4,0x2030,0x000f,-2, 4,0x0101,trax_address,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:10:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:10:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(112,504,32, 8, 4,0x2031,0x000f,-2, 4,0x0101,trax_pctrigger,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:14:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:14:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(113,512,32, 8, 4,0x2032,0x000f,-2, 4,0x0101,trax_pcmatch,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:18:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:18:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(114,520,32, 8, 4,0x2033,0x000f,-2, 4,0x0101,trax_delay,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:1c:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:1c:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(115,528,32, 8, 4,0x2034,0x000f,-2, 4,0x0101,trax_memstart,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:20:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:20:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(116,536,32, 8, 4,0x2035,0x000f,-2, 4,0x0101,trax_memend,
+ "03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:24:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:00:03:60:55:11:03:52:c5:24:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(117,544,32, 8, 4,0x2043,0x0007,-2, 4,0x0101,ocdid,
+ "03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(118,552,32, 8, 4,0x2044,0x000f,-2, 4,0x0101,ocd_dcrclr,
+ "03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:52:c5:08:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:52:c5:08:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(119,560,32, 8, 4,0x2045,0x000f,-2, 4,0x0101,ocd_dcrset,
+ "03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:52:c5:0c:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:52:c5:0c:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(120,568,32, 8, 4,0x2046,0x000f,-2, 4,0x0101,ocd_dsr,
+ "03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:52:c5:10:03:60:65:40:03:62:64:00:03:52:24:01:03:62:24:02","03:52:64:01:03:62:64:02:03:52:a4:08:03:60:55:11:03:52:c5:10:03:62:24:00:03:60:75:40:03:52:24:01:03:62:24:02",0,0,0,0)
+ XTREG(121,576,32, 4, 4,0x0000,0x0006,-2, 8,0x0100,a0, 0,0,0,0,0,0)
+ XTREG(122,580,32, 4, 4,0x0001,0x0006,-2, 8,0x0100,a1, 0,0,0,0,0,0)
+ XTREG(123,584,32, 4, 4,0x0002,0x0006,-2, 8,0x0100,a2, 0,0,0,0,0,0)
+ XTREG(124,588,32, 4, 4,0x0003,0x0006,-2, 8,0x0100,a3, 0,0,0,0,0,0)
+ XTREG(125,592,32, 4, 4,0x0004,0x0006,-2, 8,0x0100,a4, 0,0,0,0,0,0)
+ XTREG(126,596,32, 4, 4,0x0005,0x0006,-2, 8,0x0100,a5, 0,0,0,0,0,0)
+ XTREG(127,600,32, 4, 4,0x0006,0x0006,-2, 8,0x0100,a6, 0,0,0,0,0,0)
+ XTREG(128,604,32, 4, 4,0x0007,0x0006,-2, 8,0x0100,a7, 0,0,0,0,0,0)
+ XTREG(129,608,32, 4, 4,0x0008,0x0006,-2, 8,0x0100,a8, 0,0,0,0,0,0)
+ XTREG(130,612,32, 4, 4,0x0009,0x0006,-2, 8,0x0100,a9, 0,0,0,0,0,0)
+ XTREG(131,616,32, 4, 4,0x000a,0x0006,-2, 8,0x0100,a10, 0,0,0,0,0,0)
+ XTREG(132,620,32, 4, 4,0x000b,0x0006,-2, 8,0x0100,a11, 0,0,0,0,0,0)
+ XTREG(133,624,32, 4, 4,0x000c,0x0006,-2, 8,0x0100,a12, 0,0,0,0,0,0)
+ XTREG(134,628,32, 4, 4,0x000d,0x0006,-2, 8,0x0100,a13, 0,0,0,0,0,0)
+ XTREG(135,632,32, 4, 4,0x000e,0x0006,-2, 8,0x0100,a14, 0,0,0,0,0,0)
+ XTREG(136,636,32, 4, 4,0x000f,0x0006,-2, 8,0x0100,a15, 0,0,0,0,0,0)
+ XTREG(137,640, 4, 4, 4,0x2008,0x0006,-2, 6,0x1010,psintlevel,
+ 0,0,&xtensa_mask0,0,0,0)
+ XTREG(138,644, 1, 4, 4,0x2009,0x0006,-2, 6,0x1010,psum,
+ 0,0,&xtensa_mask1,0,0,0)
+ XTREG(139,648, 1, 4, 4,0x200a,0x0006,-2, 6,0x1010,pswoe,
+ 0,0,&xtensa_mask2,0,0,0)
+ XTREG(140,652, 1, 4, 4,0x200b,0x0006,-2, 6,0x1010,psexcm,
+ 0,0,&xtensa_mask3,0,0,0)
+ XTREG(141,656, 2, 4, 4,0x200c,0x0006,-2, 6,0x1010,pscallinc,
+ 0,0,&xtensa_mask4,0,0,0)
+ XTREG(142,660, 4, 4, 4,0x200d,0x0006,-2, 6,0x1010,psowb,
+ 0,0,&xtensa_mask5,0,0,0)
+ XTREG(143,664,40, 8, 4,0x200e,0x0006,-2, 6,0x1010,acc,
+ 0,0,&xtensa_mask6,0,0,0)
+ XTREG_END
diff --git a/target/xtensa/core-de212/xtensa-modules.c.inc b/target/xtensa/core-de212/xtensa-modules.c.inc
new file mode 100644
index 000000000..480c68d3c
--- /dev/null
+++ b/target/xtensa/core-de212/xtensa-modules.c.inc
@@ -0,0 +1,14543 @@
+/* Xtensa configuration-specific ISA information.
+
+ Copyright (c) 2003-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "qemu/osdep.h"
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "ACCLO", 16, 0 },
+ { "ACCHI", 17, 0 },
+ { "M0", 32, 0 },
+ { "M1", 33, 0 },
+ { "M2", 34, 0 },
+ { "M3", 35, 0 },
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "CONFIGID0", 176, 0 },
+ { "CONFIGID1", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EPC7", 183, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EXCSAVE7", 215, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EPS7", 199, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "EXPSTATE", 230, 1 }
+};
+
+#define NUM_SYSREGS 64
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 230
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 22, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EPC7", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EXCSAVE7", 32, 0 },
+ { "EPS2", 13, 0 },
+ { "EPS3", 13, 0 },
+ { "EPS4", 13, 0 },
+ { "EPS5", 13, 0 },
+ { "EPS6", 13, 0 },
+ { "EPS7", 13, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "ACC", 40, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 22, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "ERI_RAW_INTERLOCK", 1, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED }
+};
+
+#define NUM_STATES 64
+
+enum xtensa_state_id {
+ STATE_LCOUNT,
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EPC3,
+ STATE_EPC4,
+ STATE_EPC5,
+ STATE_EPC6,
+ STATE_EPC7,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EXCSAVE3,
+ STATE_EXCSAVE4,
+ STATE_EXCSAVE5,
+ STATE_EXCSAVE6,
+ STATE_EXCSAVE7,
+ STATE_EPS2,
+ STATE_EPS3,
+ STATE_EPS4,
+ STATE_EPS5,
+ STATE_EPS6,
+ STATE_EPS7,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_LBEG,
+ STATE_LEND,
+ STATE_SAR,
+ STATE_MISC0,
+ STATE_MISC1,
+ STATE_ACC,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_DBREAKA0,
+ STATE_DBREAKC0,
+ STATE_DBREAKA1,
+ STATE_DBREAKC1,
+ STATE_IBREAKA0,
+ STATE_IBREAKA1,
+ STATE_IBREAKENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_CCOMPARE2,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_ERI_RAW_INTERLOCK,
+ STATE_EXPSTATE
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_tlo_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_tlo_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_w_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_w_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_rhi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_rhi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_s3to1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_rbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_rbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_tbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_tbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_y_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_y_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_x_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_x_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_bitindex_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_mr0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_mr1_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
+static unsigned
+Implicit_Field_mr2_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 2;
+}
+
+static unsigned
+Implicit_Field_mr3_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 3;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_r3,
+ FIELD_rbit2,
+ FIELD_rhi,
+ FIELD_t3,
+ FIELD_tbit2,
+ FIELD_tlo,
+ FIELD_w,
+ FIELD_y,
+ FIELD_x,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_bitindex,
+ FIELD_s3to1,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12,
+ FIELD__mr0,
+ FIELD__mr1,
+ FIELD__mr2,
+ FIELD__mr3
+};
+
+
+/* Functional units. */
+
+#define funcUnits 0
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR,
+ REGFILE_MR
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 },
+ { "MR", "m", REGFILE_MR, 32, 4 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "ERI_RD_Out", 14, 0, 0, 'o' },
+ { "ERI_RD_In", 32, 0, 1, 'i' },
+ { "ERI_RD_Rdy", 1, 0, 0, 'i' },
+ { "ERI_WR_Out", 46, 0, 2, 'o' },
+ { "ERI_WR_In", 1, 0, 3, 'i' },
+ { "IMPWIRE", 32, 0, 4, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_ERI_RD_Out,
+ INTERFACE_ERI_RD_In,
+ INTERFACE_ERI_RD_Rdy,
+ INTERFACE_ERI_WR_Out,
+ INTERFACE_ERI_WR_In,
+ INTERFACE_IMPWIRE
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+OperandSem_opnd_sem_MR_0_decode (uint32 *valp)
+{
+ *valp += 2;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_0_encode (uint32 *valp)
+{
+ int error;
+ error = ((*valp & ~0x3) != 0) || ((*valp & 0x2) == 0);
+ *valp = *valp & 1;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_out_0;
+ unsigned soffsetx4_in_0;
+ soffsetx4_in_0 = *valp & 0x3ffff;
+ soffsetx4_out_0 = 0x4 + ((((int) soffsetx4_in_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_encode (uint32 *valp)
+{
+ unsigned soffsetx4_in_0;
+ unsigned soffsetx4_out_0;
+ soffsetx4_out_0 = *valp;
+ soffsetx4_in_0 = ((soffsetx4_out_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = soffsetx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_out_0;
+ unsigned uimm12x8_in_0;
+ uimm12x8_in_0 = *valp & 0xfff;
+ uimm12x8_out_0 = uimm12x8_in_0 << 3;
+ *valp = uimm12x8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_encode (uint32 *valp)
+{
+ unsigned uimm12x8_in_0;
+ unsigned uimm12x8_out_0;
+ uimm12x8_out_0 = *valp;
+ uimm12x8_in_0 = ((uimm12x8_out_0 >> 3) & 0xfff);
+ *valp = uimm12x8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_out_0;
+ unsigned simm4_in_0;
+ simm4_in_0 = *valp & 0xf;
+ simm4_out_0 = ((int) simm4_in_0 << 28) >> 28;
+ *valp = simm4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_encode (uint32 *valp)
+{
+ unsigned simm4_in_0;
+ unsigned simm4_out_0;
+ simm4_out_0 = *valp;
+ simm4_in_0 = (simm4_out_0 & 0xf);
+ *valp = simm4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_1_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_2_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_3_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_out_0;
+ unsigned immrx4_in_0;
+ immrx4_in_0 = *valp & 0xf;
+ immrx4_out_0 = (((0xfffffff) << 4) | immrx4_in_0) << 2;
+ *valp = immrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_encode (uint32 *valp)
+{
+ unsigned immrx4_in_0;
+ unsigned immrx4_out_0;
+ immrx4_out_0 = *valp;
+ immrx4_in_0 = ((immrx4_out_0 >> 2) & 0xf);
+ *valp = immrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_out_0;
+ unsigned lsi4x4_in_0;
+ lsi4x4_in_0 = *valp & 0xf;
+ lsi4x4_out_0 = lsi4x4_in_0 << 2;
+ *valp = lsi4x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_encode (uint32 *valp)
+{
+ unsigned lsi4x4_in_0;
+ unsigned lsi4x4_out_0;
+ lsi4x4_out_0 = *valp;
+ lsi4x4_in_0 = ((lsi4x4_out_0 >> 2) & 0xf);
+ *valp = lsi4x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_out_0;
+ unsigned simm7_in_0;
+ simm7_in_0 = *valp & 0x7f;
+ simm7_out_0 = ((((-((((simm7_in_0 >> 6) & 1)) & (((simm7_in_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | simm7_in_0;
+ *valp = simm7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_encode (uint32 *valp)
+{
+ unsigned simm7_in_0;
+ unsigned simm7_out_0;
+ simm7_out_0 = *valp;
+ simm7_in_0 = (simm7_out_0 & 0x7f);
+ *valp = simm7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_out_0;
+ unsigned uimm6_in_0;
+ uimm6_in_0 = *valp & 0x3f;
+ uimm6_out_0 = 0x4 + (((0) << 6) | uimm6_in_0);
+ *valp = uimm6_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_encode (uint32 *valp)
+{
+ unsigned uimm6_in_0;
+ unsigned uimm6_out_0;
+ uimm6_out_0 = *valp;
+ uimm6_in_0 = (uimm6_out_0 - 0x4) & 0x3f;
+ *valp = uimm6_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_out_0;
+ unsigned ai4const_in_0;
+ ai4const_in_0 = *valp & 0xf;
+ ai4const_out_0 = CONST_TBL_ai4c_0[ai4const_in_0 & 0xf];
+ *valp = ai4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_encode (uint32 *valp)
+{
+ unsigned ai4const_in_0;
+ unsigned ai4const_out_0;
+ ai4const_out_0 = *valp;
+ switch (ai4const_out_0)
+ {
+ case 0xffffffff: ai4const_in_0 = 0; break;
+ case 0x1: ai4const_in_0 = 0x1; break;
+ case 0x2: ai4const_in_0 = 0x2; break;
+ case 0x3: ai4const_in_0 = 0x3; break;
+ case 0x4: ai4const_in_0 = 0x4; break;
+ case 0x5: ai4const_in_0 = 0x5; break;
+ case 0x6: ai4const_in_0 = 0x6; break;
+ case 0x7: ai4const_in_0 = 0x7; break;
+ case 0x8: ai4const_in_0 = 0x8; break;
+ case 0x9: ai4const_in_0 = 0x9; break;
+ case 0xa: ai4const_in_0 = 0xa; break;
+ case 0xb: ai4const_in_0 = 0xb; break;
+ case 0xc: ai4const_in_0 = 0xc; break;
+ case 0xd: ai4const_in_0 = 0xd; break;
+ case 0xe: ai4const_in_0 = 0xe; break;
+ default: ai4const_in_0 = 0xf; break;
+ }
+ *valp = ai4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_out_0;
+ unsigned b4const_in_0;
+ b4const_in_0 = *valp & 0xf;
+ b4const_out_0 = CONST_TBL_b4c_0[b4const_in_0 & 0xf];
+ *valp = b4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_encode (uint32 *valp)
+{
+ unsigned b4const_in_0;
+ unsigned b4const_out_0;
+ b4const_out_0 = *valp;
+ switch (b4const_out_0)
+ {
+ case 0xffffffff: b4const_in_0 = 0; break;
+ case 0x1: b4const_in_0 = 0x1; break;
+ case 0x2: b4const_in_0 = 0x2; break;
+ case 0x3: b4const_in_0 = 0x3; break;
+ case 0x4: b4const_in_0 = 0x4; break;
+ case 0x5: b4const_in_0 = 0x5; break;
+ case 0x6: b4const_in_0 = 0x6; break;
+ case 0x7: b4const_in_0 = 0x7; break;
+ case 0x8: b4const_in_0 = 0x8; break;
+ case 0xa: b4const_in_0 = 0x9; break;
+ case 0xc: b4const_in_0 = 0xa; break;
+ case 0x10: b4const_in_0 = 0xb; break;
+ case 0x20: b4const_in_0 = 0xc; break;
+ case 0x40: b4const_in_0 = 0xd; break;
+ case 0x80: b4const_in_0 = 0xe; break;
+ default: b4const_in_0 = 0xf; break;
+ }
+ *valp = b4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_out_0;
+ unsigned b4constu_in_0;
+ b4constu_in_0 = *valp & 0xf;
+ b4constu_out_0 = CONST_TBL_b4cu_0[b4constu_in_0 & 0xf];
+ *valp = b4constu_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_encode (uint32 *valp)
+{
+ unsigned b4constu_in_0;
+ unsigned b4constu_out_0;
+ b4constu_out_0 = *valp;
+ switch (b4constu_out_0)
+ {
+ case 0x8000: b4constu_in_0 = 0; break;
+ case 0x10000: b4constu_in_0 = 0x1; break;
+ case 0x2: b4constu_in_0 = 0x2; break;
+ case 0x3: b4constu_in_0 = 0x3; break;
+ case 0x4: b4constu_in_0 = 0x4; break;
+ case 0x5: b4constu_in_0 = 0x5; break;
+ case 0x6: b4constu_in_0 = 0x6; break;
+ case 0x7: b4constu_in_0 = 0x7; break;
+ case 0x8: b4constu_in_0 = 0x8; break;
+ case 0xa: b4constu_in_0 = 0x9; break;
+ case 0xc: b4constu_in_0 = 0xa; break;
+ case 0x10: b4constu_in_0 = 0xb; break;
+ case 0x20: b4constu_in_0 = 0xc; break;
+ case 0x40: b4constu_in_0 = 0xd; break;
+ case 0x80: b4constu_in_0 = 0xe; break;
+ default: b4constu_in_0 = 0xf; break;
+ }
+ *valp = b4constu_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_out_0;
+ unsigned uimm8_in_0;
+ uimm8_in_0 = *valp & 0xff;
+ uimm8_out_0 = uimm8_in_0;
+ *valp = uimm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_encode (uint32 *valp)
+{
+ unsigned uimm8_in_0;
+ unsigned uimm8_out_0;
+ uimm8_out_0 = *valp;
+ uimm8_in_0 = (uimm8_out_0 & 0xff);
+ *valp = uimm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_out_0;
+ unsigned uimm8x2_in_0;
+ uimm8x2_in_0 = *valp & 0xff;
+ uimm8x2_out_0 = uimm8x2_in_0 << 1;
+ *valp = uimm8x2_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_encode (uint32 *valp)
+{
+ unsigned uimm8x2_in_0;
+ unsigned uimm8x2_out_0;
+ uimm8x2_out_0 = *valp;
+ uimm8x2_in_0 = ((uimm8x2_out_0 >> 1) & 0xff);
+ *valp = uimm8x2_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_out_0;
+ unsigned uimm8x4_in_0;
+ uimm8x4_in_0 = *valp & 0xff;
+ uimm8x4_out_0 = uimm8x4_in_0 << 2;
+ *valp = uimm8x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_encode (uint32 *valp)
+{
+ unsigned uimm8x4_in_0;
+ unsigned uimm8x4_out_0;
+ uimm8x4_out_0 = *valp;
+ uimm8x4_in_0 = ((uimm8x4_out_0 >> 2) & 0xff);
+ *valp = uimm8x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_out_0;
+ unsigned uimm4x16_in_0;
+ uimm4x16_in_0 = *valp & 0xf;
+ uimm4x16_out_0 = uimm4x16_in_0 << 4;
+ *valp = uimm4x16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_encode (uint32 *valp)
+{
+ unsigned uimm4x16_in_0;
+ unsigned uimm4x16_out_0;
+ uimm4x16_out_0 = *valp;
+ uimm4x16_in_0 = ((uimm4x16_out_0 >> 4) & 0xf);
+ *valp = uimm4x16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimmrx4_decode (uint32 *valp)
+{
+ unsigned uimmrx4_out_0;
+ unsigned uimmrx4_in_0;
+ uimmrx4_in_0 = *valp & 0xf;
+ uimmrx4_out_0 = uimmrx4_in_0 << 2;
+ *valp = uimmrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimmrx4_encode (uint32 *valp)
+{
+ unsigned uimmrx4_in_0;
+ unsigned uimmrx4_out_0;
+ uimmrx4_out_0 = *valp;
+ uimmrx4_in_0 = ((uimmrx4_out_0 >> 2) & 0xf);
+ *valp = uimmrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_out_0;
+ unsigned simm8_in_0;
+ simm8_in_0 = *valp & 0xff;
+ simm8_out_0 = ((int) simm8_in_0 << 24) >> 24;
+ *valp = simm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_encode (uint32 *valp)
+{
+ unsigned simm8_in_0;
+ unsigned simm8_out_0;
+ simm8_out_0 = *valp;
+ simm8_in_0 = (simm8_out_0 & 0xff);
+ *valp = simm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_out_0;
+ unsigned simm8x256_in_0;
+ simm8x256_in_0 = *valp & 0xff;
+ simm8x256_out_0 = (((int) simm8x256_in_0 << 24) >> 24) << 8;
+ *valp = simm8x256_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_encode (uint32 *valp)
+{
+ unsigned simm8x256_in_0;
+ unsigned simm8x256_out_0;
+ simm8x256_out_0 = *valp;
+ simm8x256_in_0 = ((simm8x256_out_0 >> 8) & 0xff);
+ *valp = simm8x256_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_out_0;
+ unsigned simm12b_in_0;
+ simm12b_in_0 = *valp & 0xfff;
+ simm12b_out_0 = ((int) simm12b_in_0 << 20) >> 20;
+ *valp = simm12b_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_encode (uint32 *valp)
+{
+ unsigned simm12b_in_0;
+ unsigned simm12b_out_0;
+ simm12b_out_0 = *valp;
+ simm12b_in_0 = (simm12b_out_0 & 0xfff);
+ *valp = simm12b_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_out_0;
+ unsigned msalp32_in_0;
+ msalp32_in_0 = *valp & 0x1f;
+ msalp32_out_0 = 0x20 - msalp32_in_0;
+ *valp = msalp32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_encode (uint32 *valp)
+{
+ unsigned msalp32_in_0;
+ unsigned msalp32_out_0;
+ msalp32_out_0 = *valp;
+ msalp32_in_0 = (0x20 - msalp32_out_0) & 0x1f;
+ *valp = msalp32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_out_0;
+ unsigned op2p1_in_0;
+ op2p1_in_0 = *valp & 0xf;
+ op2p1_out_0 = op2p1_in_0 + 0x1;
+ *valp = op2p1_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_encode (uint32 *valp)
+{
+ unsigned op2p1_in_0;
+ unsigned op2p1_out_0;
+ op2p1_out_0 = *valp;
+ op2p1_in_0 = (op2p1_out_0 - 0x1) & 0xf;
+ *valp = op2p1_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_decode (uint32 *valp)
+{
+ unsigned label8_out_0;
+ unsigned label8_in_0;
+ label8_in_0 = *valp & 0xff;
+ label8_out_0 = 0x4 + (((int) label8_in_0 << 24) >> 24);
+ *valp = label8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_encode (uint32 *valp)
+{
+ unsigned label8_in_0;
+ unsigned label8_out_0;
+ label8_out_0 = *valp;
+ label8_in_0 = (label8_out_0 - 0x4) & 0xff;
+ *valp = label8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_out_0;
+ unsigned ulabel8_in_0;
+ ulabel8_in_0 = *valp & 0xff;
+ ulabel8_out_0 = 0x4 + (((0) << 8) | ulabel8_in_0);
+ *valp = ulabel8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_encode (uint32 *valp)
+{
+ unsigned ulabel8_in_0;
+ unsigned ulabel8_out_0;
+ ulabel8_out_0 = *valp;
+ ulabel8_in_0 = (ulabel8_out_0 - 0x4) & 0xff;
+ *valp = ulabel8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_decode (uint32 *valp)
+{
+ unsigned label12_out_0;
+ unsigned label12_in_0;
+ label12_in_0 = *valp & 0xfff;
+ label12_out_0 = 0x4 + (((int) label12_in_0 << 20) >> 20);
+ *valp = label12_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_encode (uint32 *valp)
+{
+ unsigned label12_in_0;
+ unsigned label12_out_0;
+ label12_out_0 = *valp;
+ label12_in_0 = (label12_out_0 - 0x4) & 0xfff;
+ *valp = label12_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_out_0;
+ unsigned soffset_in_0;
+ soffset_in_0 = *valp & 0x3ffff;
+ soffset_out_0 = 0x4 + (((int) soffset_in_0 << 14) >> 14);
+ *valp = soffset_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_encode (uint32 *valp)
+{
+ unsigned soffset_in_0;
+ unsigned soffset_out_0;
+ soffset_out_0 = *valp;
+ soffset_in_0 = (soffset_out_0 - 0x4) & 0x3ffff;
+ *valp = soffset_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_out_0;
+ unsigned uimm16x4_in_0;
+ uimm16x4_in_0 = *valp & 0xffff;
+ uimm16x4_out_0 = (((0xffff) << 16) | uimm16x4_in_0) << 2;
+ *valp = uimm16x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_encode (uint32 *valp)
+{
+ unsigned uimm16x4_in_0;
+ unsigned uimm16x4_out_0;
+ uimm16x4_out_0 = *valp;
+ uimm16x4_in_0 = (uimm16x4_out_0 >> 2) & 0xffff;
+ *valp = uimm16x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_decode (uint32 *valp)
+{
+ unsigned bbi_out_0;
+ unsigned bbi_in_0;
+ bbi_in_0 = *valp & 0x1f;
+ bbi_out_0 = (0 << 5) | bbi_in_0;
+ *valp = bbi_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_encode (uint32 *valp)
+{
+ unsigned bbi_in_0;
+ unsigned bbi_out_0;
+ bbi_out_0 = *valp;
+ bbi_in_0 = (bbi_out_0 & 0x1f);
+ *valp = bbi_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_decode (uint32 *valp)
+{
+ unsigned s_out_0;
+ unsigned s_in_0;
+ s_in_0 = *valp & 0xf;
+ s_out_0 = (0 << 4) | s_in_0;
+ *valp = s_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_encode (uint32 *valp)
+{
+ unsigned s_in_0;
+ unsigned s_out_0;
+ s_out_0 = *valp;
+ s_in_0 = (s_out_0 & 0xf);
+ *valp = s_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_encode (uint32 *valp)
+{
+ return (*valp >= 4);
+}
+
+static int
+OperandSem_opnd_sem_MR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_1_encode (uint32 *valp)
+{
+ return (*valp >= 4);
+}
+
+static int
+OperandSem_opnd_sem_MR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_2_encode (uint32 *valp)
+{
+ return (*valp >= 4);
+}
+
+static int
+OperandSem_opnd_sem_MR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_3_encode (uint32 *valp)
+{
+ return (*valp >= 4);
+}
+
+static int
+OperandSem_opnd_sem_MR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_4_encode (uint32 *valp)
+{
+ return (*valp >= 4);
+}
+
+static int
+OperandSem_opnd_sem_MR_5_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_5_encode (uint32 *valp)
+{
+ return (*valp >= 4);
+}
+
+static int
+OperandSem_opnd_sem_immt_decode (uint32 *valp)
+{
+ unsigned immt_out_0;
+ unsigned immt_in_0;
+ immt_in_0 = *valp & 0xf;
+ immt_out_0 = immt_in_0;
+ *valp = immt_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_encode (uint32 *valp)
+{
+ unsigned immt_in_0;
+ unsigned immt_out_0;
+ immt_out_0 = *valp;
+ immt_in_0 = immt_out_0 & 0xf;
+ *valp = immt_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_out_0;
+ unsigned tp7_in_0;
+ tp7_in_0 = *valp & 0xf;
+ tp7_out_0 = tp7_in_0 + 0x7;
+ *valp = tp7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_encode (uint32 *valp)
+{
+ unsigned tp7_in_0;
+ unsigned tp7_out_0;
+ tp7_out_0 = *valp;
+ tp7_in_0 = (tp7_out_0 - 0x7) & 0xf;
+ *valp = tp7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_out_0;
+ unsigned xt_wbr15_label_in_0;
+ xt_wbr15_label_in_0 = *valp & 0x7fff;
+ xt_wbr15_label_out_0 = 0x4 + (((int) xt_wbr15_label_in_0 << 17) >> 17);
+ *valp = xt_wbr15_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_in_0;
+ unsigned xt_wbr15_label_out_0;
+ xt_wbr15_label_out_0 = *valp;
+ xt_wbr15_label_in_0 = (xt_wbr15_label_out_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr18_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_out_0;
+ unsigned xt_wbr18_label_in_0;
+ xt_wbr18_label_in_0 = *valp & 0x3ffff;
+ xt_wbr18_label_out_0 = 0x4 + (((int) xt_wbr18_label_in_0 << 14) >> 14);
+ *valp = xt_wbr18_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr18_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_in_0;
+ unsigned xt_wbr18_label_out_0;
+ xt_wbr18_label_out_0 = *valp;
+ xt_wbr18_label_in_0 = (xt_wbr18_label_out_0 - 0x4) & 0x3ffff;
+ *valp = xt_wbr18_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bitindex_decode (uint32 *valp)
+{
+ unsigned bitindex_out_0;
+ unsigned bitindex_in_0;
+ bitindex_in_0 = *valp & 0x1f;
+ bitindex_out_0 = (0 << 5) | bitindex_in_0;
+ *valp = bitindex_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bitindex_encode (uint32 *valp)
+{
+ unsigned bitindex_in_0;
+ unsigned bitindex_out_0;
+ bitindex_out_0 = *valp;
+ bitindex_in_0 = (bitindex_out_0 & 0x1f);
+ *valp = bitindex_in_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffsetx4_encode, OperandSem_opnd_sem_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm12x8_encode, OperandSem_opnd_sem_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm4_encode, OperandSem_opnd_sem_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_0_encode, OperandSem_opnd_sem_AR_0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_1_encode, OperandSem_opnd_sem_AR_1_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_2_encode, OperandSem_opnd_sem_AR_2_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_3_encode, OperandSem_opnd_sem_AR_3_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_4_encode, OperandSem_opnd_sem_AR_4_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immrx4_encode, OperandSem_opnd_sem_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm7_encode, OperandSem_opnd_sem_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm6_encode, OperandSem_opnd_sem_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ai4const_encode, OperandSem_opnd_sem_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4const_encode, OperandSem_opnd_sem_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4constu_encode, OperandSem_opnd_sem_b4constu_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8_encode, OperandSem_opnd_sem_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x2_encode, OperandSem_opnd_sem_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x4_encode, OperandSem_opnd_sem_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm4x16_encode, OperandSem_opnd_sem_uimm4x16_decode,
+ 0, 0 },
+ { "uimmrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimmrx4_encode, OperandSem_opnd_sem_uimmrx4_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8_encode, OperandSem_opnd_sem_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8x256_encode, OperandSem_opnd_sem_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm12b_encode, OperandSem_opnd_sem_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ OperandSem_opnd_sem_msalp32_encode, OperandSem_opnd_sem_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_op2p1_encode, OperandSem_opnd_sem_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label8_encode, OperandSem_opnd_sem_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_ulabel8_encode, OperandSem_opnd_sem_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label12_encode, OperandSem_opnd_sem_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm16x4_encode, OperandSem_opnd_sem_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "bbi", FIELD_bbi, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sae", FIELD_sae, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sas", FIELD_sas, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "s", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_s_encode, OperandSem_opnd_sem_s_decode,
+ 0, 0 },
+ { "mx", FIELD_x, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ OperandSem_opnd_sem_MR_encode, OperandSem_opnd_sem_MR_decode,
+ 0, 0 },
+ { "my", FIELD_y, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ OperandSem_opnd_sem_MR_0_encode, OperandSem_opnd_sem_MR_0_decode,
+ 0, 0 },
+ { "mw", FIELD_w, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_MR_1_encode, OperandSem_opnd_sem_MR_1_decode,
+ 0, 0 },
+ { "mr0", FIELD__mr0, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_2_encode, OperandSem_opnd_sem_MR_2_decode,
+ 0, 0 },
+ { "mr1", FIELD__mr1, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_3_encode, OperandSem_opnd_sem_MR_3_decode,
+ 0, 0 },
+ { "mr2", FIELD__mr2, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_4_encode, OperandSem_opnd_sem_MR_4_decode,
+ 0, 0 },
+ { "mr3", FIELD__mr3, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_5_encode, OperandSem_opnd_sem_MR_5_decode,
+ 0, 0 },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_tp7_encode, OperandSem_opnd_sem_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr15_label_encode, OperandSem_opnd_sem_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr18_label_encode, OperandSem_opnd_sem_xt_wbr18_label_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "bitindex", FIELD_bitindex, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bitindex_encode, OperandSem_opnd_sem_bitindex_decode,
+ 0, 0 },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "r3", FIELD_r3, -1, 0, 0, 0, 0, 0, 0 },
+ { "rbit2", FIELD_rbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "rhi", FIELD_rhi, -1, 0, 0, 0, 0, 0, 0 },
+ { "t3", FIELD_t3, -1, 0, 0, 0, 0, 0, 0 },
+ { "tbit2", FIELD_tbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "tlo", FIELD_tlo, -1, 0, 0, 0, 0, 0, 0 },
+ { "w", FIELD_w, -1, 0, 0, 0, 0, 0, 0 },
+ { "y", FIELD_y, -1, 0, 0, 0, 0, 0, 0 },
+ { "x", FIELD_x, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "s3to1", FIELD_s3to1, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_uimmrx4,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_ulabel8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_bbi,
+ OPERAND_sae,
+ OPERAND_sas,
+ OPERAND_sargt,
+ OPERAND_s,
+ OPERAND_mx,
+ OPERAND_my,
+ OPERAND_mw,
+ OPERAND_mr0,
+ OPERAND_mr1,
+ OPERAND_mr2,
+ OPERAND_mr3,
+ OPERAND_immt,
+ OPERAND_imms,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_bitindex,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sal,
+ OPERAND_sas4,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_r3,
+ OPERAND_rbit2,
+ OPERAND_rhi,
+ OPERAND_t3,
+ OPERAND_tbit2,
+ OPERAND_tlo,
+ OPERAND_w,
+ OPERAND_y,
+ OPERAND_x,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_s3to1
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32nb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimmrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_memctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_memctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_memctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_stateArgs[] = {
+ { { STATE_EPC7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_stateArgs[] = {
+ { { STATE_EPC7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_stateArgs[] = {
+ { { STATE_EPC7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_stateArgs[] = {
+ { { STATE_EXCSAVE7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_stateArgs[] = {
+ { { STATE_EXCSAVE7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_stateArgs[] = {
+ { { STATE_EXCSAVE7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_stateArgs[] = {
+ { { STATE_EPS7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_stateArgs[] = {
+ { { STATE_EPS7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_stateArgs[] = {
+ { { STATE_EPS7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_l_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m0_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m0_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m0_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m1_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m1_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m1_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m2_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m2_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m2_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m3_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m3_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m3_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPC7 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_EPS7 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_stateArgs[] = {
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_dyn_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_div_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_stateArgs[] = {
+ { { STATE_ERI_RAW_INTERLOCK }, 'i' }
+};
+
+static xtensa_interface Iclass_xt_iclass_rer_intfArgs[] = {
+ INTERFACE_ERI_RD_In,
+ INTERFACE_ERI_RD_Out
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_stateArgs[] = {
+ { { STATE_ERI_RAW_INTERLOCK }, 'o' }
+};
+
+static xtensa_interface Iclass_xt_iclass_wer_intfArgs[] = {
+ INTERFACE_ERI_WR_In,
+ INTERFACE_ERI_WR_Out
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_READ_IMPWIRE_intfArgs[] = {
+ INTERFACE_IMPWIRE
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 2, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 1, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 1, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 5, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 5, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 1, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 1, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 1, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 1, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 1, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 1, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32nb_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 6, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid0_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_configid0_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid1_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 6, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 6, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 6, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 1, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 1, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 1, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 1, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 1, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 1, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 1, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 1, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 1, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 1, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 1, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 1, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 1, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 1, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 1, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 1, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 1, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 1, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 1, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 1, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 1, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 1, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 1, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 1, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 1, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 1, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 1, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 1, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 1, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 1, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 1, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 1, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 1, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 1, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 1, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 1, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc7_args,
+ 1, Iclass_xt_iclass_rsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc7_args,
+ 1, Iclass_xt_iclass_wsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc7_args,
+ 1, Iclass_xt_iclass_xsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave7_args,
+ 1, Iclass_xt_iclass_rsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave7_args,
+ 1, Iclass_xt_iclass_wsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave7_args,
+ 1, Iclass_xt_iclass_xsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 1, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 1, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 1, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 1, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 1, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 1, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 1, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 1, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 1, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 1, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 1, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 1, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 1, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 1, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 1, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps7_args,
+ 1, Iclass_xt_iclass_rsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps7_args,
+ 1, Iclass_xt_iclass_wsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps7_args,
+ 1, Iclass_xt_iclass_xsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 1, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 1, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 1, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 1, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 1, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 1, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 2, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 1, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 1, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 1, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 1, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 1, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 1, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 1, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 1, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 1, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 1, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 1, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_aa_args,
+ 1, Iclass_xt_iclass_mac16_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_ad_args,
+ 1, Iclass_xt_iclass_mac16_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_da_args,
+ 1, Iclass_xt_iclass_mac16_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_dd_args,
+ 1, Iclass_xt_iclass_mac16_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_aa_args,
+ 1, Iclass_xt_iclass_mac16a_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_ad_args,
+ 1, Iclass_xt_iclass_mac16a_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_da_args,
+ 1, Iclass_xt_iclass_mac16a_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_dd_args,
+ 1, Iclass_xt_iclass_mac16a_dd_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_da_args,
+ 1, Iclass_xt_iclass_mac16al_da_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_dd_args,
+ 1, Iclass_xt_iclass_mac16al_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_l_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m3_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acclo_args,
+ 1, Iclass_xt_iclass_rsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acclo_args,
+ 1, Iclass_xt_iclass_wsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acclo_args,
+ 1, Iclass_xt_iclass_xsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acchi_args,
+ 1, Iclass_xt_iclass_rsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acchi_args,
+ 1, Iclass_xt_iclass_wsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acchi_args,
+ 1, Iclass_xt_iclass_xsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 20, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 1, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 1, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 2, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 2, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 1, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 1, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 1, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 1, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 2, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 2, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 1, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 2, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 2, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 1, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 2, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 2, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 1, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 2, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 2, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 2, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 2, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 2, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 1, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 2, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 2, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 1, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 1, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 1, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 1, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 2, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 2, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_lddr32_p_args,
+ 3, Iclass_xt_iclass_lddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sddr32_p_args,
+ 2, Iclass_xt_iclass_sddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 9, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 1, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 1, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 2, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 2, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 1, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 2, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 2, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 1, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 2, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 2, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 1, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 2, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 2, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_lock_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_dcache_dyn_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_lock_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 1, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 1, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 1, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 2, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 2, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_div_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rer_args,
+ 1, Iclass_xt_iclass_rer_stateArgs, 2, Iclass_xt_iclass_rer_intfArgs },
+ { 2, Iclass_xt_iclass_wer_args,
+ 1, Iclass_xt_iclass_wer_stateArgs, 2, Iclass_xt_iclass_wer_intfArgs },
+ { 1, Iclass_rur_expstate_args,
+ 1, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 1, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_READ_IMPWIRE_args,
+ 0, 0, 1, Iclass_iclass_READ_IMPWIRE_intfArgs },
+ { 1, Iclass_iclass_SETB_EXPSTATE_args,
+ 1, Iclass_iclass_SETB_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRB_EXPSTATE_args,
+ 1, Iclass_iclass_CLRB_EXPSTATE_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_WRMSK_EXPSTATE_args,
+ 1, Iclass_iclass_WRMSK_EXPSTATE_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_loop,
+ ICLASS_xt_iclass_loopz,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s32nb,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_lend,
+ ICLASS_xt_iclass_wsr_lend,
+ ICLASS_xt_iclass_xsr_lend,
+ ICLASS_xt_iclass_rsr_lcount,
+ ICLASS_xt_iclass_wsr_lcount,
+ ICLASS_xt_iclass_xsr_lcount,
+ ICLASS_xt_iclass_rsr_lbeg,
+ ICLASS_xt_iclass_wsr_lbeg,
+ ICLASS_xt_iclass_xsr_lbeg,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_memctl,
+ ICLASS_xt_iclass_wsr_memctl,
+ ICLASS_xt_iclass_xsr_memctl,
+ ICLASS_xt_iclass_rsr_litbase,
+ ICLASS_xt_iclass_wsr_litbase,
+ ICLASS_xt_iclass_xsr_litbase,
+ ICLASS_xt_iclass_rsr_configid0,
+ ICLASS_xt_iclass_wsr_configid0,
+ ICLASS_xt_iclass_rsr_configid1,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_epc3,
+ ICLASS_xt_iclass_wsr_epc3,
+ ICLASS_xt_iclass_xsr_epc3,
+ ICLASS_xt_iclass_rsr_excsave3,
+ ICLASS_xt_iclass_wsr_excsave3,
+ ICLASS_xt_iclass_xsr_excsave3,
+ ICLASS_xt_iclass_rsr_epc4,
+ ICLASS_xt_iclass_wsr_epc4,
+ ICLASS_xt_iclass_xsr_epc4,
+ ICLASS_xt_iclass_rsr_excsave4,
+ ICLASS_xt_iclass_wsr_excsave4,
+ ICLASS_xt_iclass_xsr_excsave4,
+ ICLASS_xt_iclass_rsr_epc5,
+ ICLASS_xt_iclass_wsr_epc5,
+ ICLASS_xt_iclass_xsr_epc5,
+ ICLASS_xt_iclass_rsr_excsave5,
+ ICLASS_xt_iclass_wsr_excsave5,
+ ICLASS_xt_iclass_xsr_excsave5,
+ ICLASS_xt_iclass_rsr_epc6,
+ ICLASS_xt_iclass_wsr_epc6,
+ ICLASS_xt_iclass_xsr_epc6,
+ ICLASS_xt_iclass_rsr_excsave6,
+ ICLASS_xt_iclass_wsr_excsave6,
+ ICLASS_xt_iclass_xsr_excsave6,
+ ICLASS_xt_iclass_rsr_epc7,
+ ICLASS_xt_iclass_wsr_epc7,
+ ICLASS_xt_iclass_xsr_epc7,
+ ICLASS_xt_iclass_rsr_excsave7,
+ ICLASS_xt_iclass_wsr_excsave7,
+ ICLASS_xt_iclass_xsr_excsave7,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_eps3,
+ ICLASS_xt_iclass_wsr_eps3,
+ ICLASS_xt_iclass_xsr_eps3,
+ ICLASS_xt_iclass_rsr_eps4,
+ ICLASS_xt_iclass_wsr_eps4,
+ ICLASS_xt_iclass_xsr_eps4,
+ ICLASS_xt_iclass_rsr_eps5,
+ ICLASS_xt_iclass_wsr_eps5,
+ ICLASS_xt_iclass_xsr_eps5,
+ ICLASS_xt_iclass_rsr_eps6,
+ ICLASS_xt_iclass_wsr_eps6,
+ ICLASS_xt_iclass_xsr_eps6,
+ ICLASS_xt_iclass_rsr_eps7,
+ ICLASS_xt_iclass_wsr_eps7,
+ ICLASS_xt_iclass_xsr_eps7,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_misc0,
+ ICLASS_xt_iclass_wsr_misc0,
+ ICLASS_xt_iclass_xsr_misc0,
+ ICLASS_xt_iclass_rsr_misc1,
+ ICLASS_xt_iclass_wsr_misc1,
+ ICLASS_xt_iclass_xsr_misc1,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_mul16,
+ ICLASS_xt_mul32,
+ ICLASS_xt_iclass_mac16_aa,
+ ICLASS_xt_iclass_mac16_ad,
+ ICLASS_xt_iclass_mac16_da,
+ ICLASS_xt_iclass_mac16_dd,
+ ICLASS_xt_iclass_mac16a_aa,
+ ICLASS_xt_iclass_mac16a_ad,
+ ICLASS_xt_iclass_mac16a_da,
+ ICLASS_xt_iclass_mac16a_dd,
+ ICLASS_xt_iclass_mac16al_da,
+ ICLASS_xt_iclass_mac16al_dd,
+ ICLASS_xt_iclass_mac16_l,
+ ICLASS_xt_iclass_rsr_m0,
+ ICLASS_xt_iclass_wsr_m0,
+ ICLASS_xt_iclass_xsr_m0,
+ ICLASS_xt_iclass_rsr_m1,
+ ICLASS_xt_iclass_wsr_m1,
+ ICLASS_xt_iclass_xsr_m1,
+ ICLASS_xt_iclass_rsr_m2,
+ ICLASS_xt_iclass_wsr_m2,
+ ICLASS_xt_iclass_xsr_m2,
+ ICLASS_xt_iclass_rsr_m3,
+ ICLASS_xt_iclass_wsr_m3,
+ ICLASS_xt_iclass_xsr_m3,
+ ICLASS_xt_iclass_rsr_acclo,
+ ICLASS_xt_iclass_wsr_acclo,
+ ICLASS_xt_iclass_xsr_acclo,
+ ICLASS_xt_iclass_rsr_acchi,
+ ICLASS_xt_iclass_wsr_acchi,
+ ICLASS_xt_iclass_xsr_acchi,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_dbreaka0,
+ ICLASS_xt_iclass_wsr_dbreaka0,
+ ICLASS_xt_iclass_xsr_dbreaka0,
+ ICLASS_xt_iclass_rsr_dbreakc0,
+ ICLASS_xt_iclass_wsr_dbreakc0,
+ ICLASS_xt_iclass_xsr_dbreakc0,
+ ICLASS_xt_iclass_rsr_dbreaka1,
+ ICLASS_xt_iclass_wsr_dbreaka1,
+ ICLASS_xt_iclass_xsr_dbreaka1,
+ ICLASS_xt_iclass_rsr_dbreakc1,
+ ICLASS_xt_iclass_wsr_dbreakc1,
+ ICLASS_xt_iclass_xsr_dbreakc1,
+ ICLASS_xt_iclass_rsr_ibreaka0,
+ ICLASS_xt_iclass_wsr_ibreaka0,
+ ICLASS_xt_iclass_xsr_ibreaka0,
+ ICLASS_xt_iclass_rsr_ibreaka1,
+ ICLASS_xt_iclass_wsr_ibreaka1,
+ ICLASS_xt_iclass_xsr_ibreaka1,
+ ICLASS_xt_iclass_rsr_ibreakenable,
+ ICLASS_xt_iclass_wsr_ibreakenable,
+ ICLASS_xt_iclass_xsr_ibreakenable,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_lddr32_p,
+ ICLASS_xt_iclass_sddr32_p,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_wsr_mmid,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_rsr_ccompare2,
+ ICLASS_xt_iclass_wsr_ccompare2,
+ ICLASS_xt_iclass_xsr_ccompare2,
+ ICLASS_xt_iclass_icache,
+ ICLASS_xt_iclass_icache_lock,
+ ICLASS_xt_iclass_icache_inv,
+ ICLASS_xt_iclass_licx,
+ ICLASS_xt_iclass_sicx,
+ ICLASS_xt_iclass_dcache,
+ ICLASS_xt_iclass_dcache_dyn,
+ ICLASS_xt_iclass_dcache_ind,
+ ICLASS_xt_iclass_dcache_inv,
+ ICLASS_xt_iclass_dpf,
+ ICLASS_xt_iclass_dcache_lock,
+ ICLASS_xt_iclass_sdct,
+ ICLASS_xt_iclass_ldct,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_clamp,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_div,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_expstate,
+ ICLASS_wur_expstate,
+ ICLASS_iclass_READ_IMPWIRE,
+ ICLASS_iclass_SETB_EXPSTATE,
+ ICLASS_iclass_CLRB_EXPSTATE,
+ ICLASS_iclass_WRMSK_EXPSTATE
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8076;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9076;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa076;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s32nb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590000;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30100;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130100;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610100;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130200;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610200;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130000;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36100;
+}
+
+static void
+Opcode_wsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136100;
+}
+
+static void
+Opcode_xsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616100;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30500;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130500;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610500;
+}
+
+static void
+Opcode_rsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_wsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b000;
+}
+
+static void
+Opcode_rsr_configid1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b300;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b300;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b300;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d300;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d300;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d300;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b400;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b400;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b400;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d400;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d400;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b500;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b500;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b500;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d500;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d500;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b600;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b600;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b600;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d600;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d600;
+}
+
+static void
+Opcode_rsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b700;
+}
+
+static void
+Opcode_wsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b700;
+}
+
+static void
+Opcode_xsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b700;
+}
+
+static void
+Opcode_rsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_wsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d700;
+}
+
+static void
+Opcode_xsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d700;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c300;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c300;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c300;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c400;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c400;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c400;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c500;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c500;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c500;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c600;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c600;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c600;
+}
+
+static void
+Opcode_rsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c700;
+}
+
+static void
+Opcode_wsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c700;
+}
+
+static void
+Opcode_xsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c700;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f400;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f400;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f400;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f500;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f500;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f500;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820000;
+}
+
+static void
+Opcode_mul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x740004;
+}
+
+static void
+Opcode_mul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x750004;
+}
+
+static void
+Opcode_mul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x760004;
+}
+
+static void
+Opcode_mul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x770004;
+}
+
+static void
+Opcode_umul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700004;
+}
+
+static void
+Opcode_umul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x710004;
+}
+
+static void
+Opcode_umul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x720004;
+}
+
+static void
+Opcode_umul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730004;
+}
+
+static void
+Opcode_mul_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340004;
+}
+
+static void
+Opcode_mul_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x350004;
+}
+
+static void
+Opcode_mul_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x360004;
+}
+
+static void
+Opcode_mul_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370004;
+}
+
+static void
+Opcode_mul_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x640004;
+}
+
+static void
+Opcode_mul_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x650004;
+}
+
+static void
+Opcode_mul_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x660004;
+}
+
+static void
+Opcode_mul_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x670004;
+}
+
+static void
+Opcode_mul_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240004;
+}
+
+static void
+Opcode_mul_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x250004;
+}
+
+static void
+Opcode_mul_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x260004;
+}
+
+static void
+Opcode_mul_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270004;
+}
+
+static void
+Opcode_mula_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780004;
+}
+
+static void
+Opcode_mula_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x790004;
+}
+
+static void
+Opcode_mula_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7a0004;
+}
+
+static void
+Opcode_mula_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b0004;
+}
+
+static void
+Opcode_muls_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c0004;
+}
+
+static void
+Opcode_muls_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7d0004;
+}
+
+static void
+Opcode_muls_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7e0004;
+}
+
+static void
+Opcode_muls_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f0004;
+}
+
+static void
+Opcode_mula_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380004;
+}
+
+static void
+Opcode_mula_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x390004;
+}
+
+static void
+Opcode_mula_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a0004;
+}
+
+static void
+Opcode_mula_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b0004;
+}
+
+static void
+Opcode_muls_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0004;
+}
+
+static void
+Opcode_muls_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d0004;
+}
+
+static void
+Opcode_muls_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e0004;
+}
+
+static void
+Opcode_muls_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f0004;
+}
+
+static void
+Opcode_mula_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680004;
+}
+
+static void
+Opcode_mula_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x690004;
+}
+
+static void
+Opcode_mula_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0004;
+}
+
+static void
+Opcode_mula_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0004;
+}
+
+static void
+Opcode_muls_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0004;
+}
+
+static void
+Opcode_muls_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0004;
+}
+
+static void
+Opcode_muls_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0004;
+}
+
+static void
+Opcode_muls_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0004;
+}
+
+static void
+Opcode_mula_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280004;
+}
+
+static void
+Opcode_mula_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x290004;
+}
+
+static void
+Opcode_mula_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a0004;
+}
+
+static void
+Opcode_mula_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b0004;
+}
+
+static void
+Opcode_muls_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0004;
+}
+
+static void
+Opcode_muls_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0004;
+}
+
+static void
+Opcode_muls_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0004;
+}
+
+static void
+Opcode_muls_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0004;
+}
+
+static void
+Opcode_mula_da_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580004;
+}
+
+static void
+Opcode_mula_da_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480004;
+}
+
+static void
+Opcode_mula_da_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590004;
+}
+
+static void
+Opcode_mula_da_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490004;
+}
+
+static void
+Opcode_mula_da_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a0004;
+}
+
+static void
+Opcode_mula_da_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0004;
+}
+
+static void
+Opcode_mula_da_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b0004;
+}
+
+static void
+Opcode_mula_da_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0004;
+}
+
+static void
+Opcode_mula_dd_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180004;
+}
+
+static void
+Opcode_mula_dd_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80004;
+}
+
+static void
+Opcode_mula_dd_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x190004;
+}
+
+static void
+Opcode_mula_dd_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90004;
+}
+
+static void
+Opcode_mula_dd_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0004;
+}
+
+static void
+Opcode_mula_dd_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0004;
+}
+
+static void
+Opcode_mula_dd_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0004;
+}
+
+static void
+Opcode_mula_dd_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb0004;
+}
+
+static void
+Opcode_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900004;
+}
+
+static void
+Opcode_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800004;
+}
+
+static void
+Opcode_rsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32000;
+}
+
+static void
+Opcode_wsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132000;
+}
+
+static void
+Opcode_xsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612000;
+}
+
+static void
+Opcode_rsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32100;
+}
+
+static void
+Opcode_wsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132100;
+}
+
+static void
+Opcode_xsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612100;
+}
+
+static void
+Opcode_rsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32200;
+}
+
+static void
+Opcode_wsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132200;
+}
+
+static void
+Opcode_xsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612200;
+}
+
+static void
+Opcode_rsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32300;
+}
+
+static void
+Opcode_wsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132300;
+}
+
+static void
+Opcode_xsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612300;
+}
+
+static void
+Opcode_rsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31000;
+}
+
+static void
+Opcode_wsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131000;
+}
+
+static void
+Opcode_xsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611000;
+}
+
+static void
+Opcode_rsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31100;
+}
+
+static void
+Opcode_wsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131100;
+}
+
+static void
+Opcode_xsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611100;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39000;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139000;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619000;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a000;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a000;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a000;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39100;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139100;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619100;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a100;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a100;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a100;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138000;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618000;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138100;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618100;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136000;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616000;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_lddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e0;
+}
+
+static void
+Opcode_sddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f0;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135900;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f200;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f200;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f200;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70c2;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e2;
+}
+
+static void
+Opcode_ipfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70d2;
+}
+
+static void
+Opcode_ihu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270d2;
+}
+
+static void
+Opcode_iiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370d2;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f2;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf10000;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf12000;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11000;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13000;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7042;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7052;
+}
+
+static void
+Opcode_diwbui_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf7082;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47082;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x57082;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7062;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7072;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7002;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7012;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7022;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7032;
+}
+
+static void
+Opcode_dpfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7082;
+}
+
+static void
+Opcode_dhu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x27082;
+}
+
+static void
+Opcode_diu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x37082;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf19000;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf18000;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36300;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136300;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616300;
+}
+
+static void
+Opcode_quou_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc20000;
+}
+
+static void
+Opcode_quos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20000;
+}
+
+static void
+Opcode_remu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe20000;
+}
+
+static void
+Opcode_rems_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf20000;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406000;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407000;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e60;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e600;
+}
+
+static void
+Opcode_read_impwire_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0000;
+}
+
+static void
+Opcode_setb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1000;
+}
+
+static void
+Opcode_clrb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1200;
+}
+
+static void
+Opcode_wrmsk_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe2000;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32nb_encode_fns[] = {
+ Opcode_s32nb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_memctl_encode_fns[] = {
+ Opcode_rsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_memctl_encode_fns[] = {
+ Opcode_wsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_memctl_encode_fns[] = {
+ Opcode_xsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid0_encode_fns[] = {
+ Opcode_rsr_configid0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_configid0_encode_fns[] = {
+ Opcode_wsr_configid0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid1_encode_fns[] = {
+ Opcode_rsr_configid1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc7_encode_fns[] = {
+ Opcode_rsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc7_encode_fns[] = {
+ Opcode_wsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc7_encode_fns[] = {
+ Opcode_xsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave7_encode_fns[] = {
+ Opcode_rsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave7_encode_fns[] = {
+ Opcode_wsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave7_encode_fns[] = {
+ Opcode_xsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps7_encode_fns[] = {
+ Opcode_rsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps7_encode_fns[] = {
+ Opcode_wsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps7_encode_fns[] = {
+ Opcode_xsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_ll_encode_fns[] = {
+ Opcode_mul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hl_encode_fns[] = {
+ Opcode_mul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_lh_encode_fns[] = {
+ Opcode_mul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hh_encode_fns[] = {
+ Opcode_mul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_ll_encode_fns[] = {
+ Opcode_umul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hl_encode_fns[] = {
+ Opcode_umul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_lh_encode_fns[] = {
+ Opcode_umul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hh_encode_fns[] = {
+ Opcode_umul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_ll_encode_fns[] = {
+ Opcode_mul_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hl_encode_fns[] = {
+ Opcode_mul_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_lh_encode_fns[] = {
+ Opcode_mul_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hh_encode_fns[] = {
+ Opcode_mul_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_ll_encode_fns[] = {
+ Opcode_mul_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hl_encode_fns[] = {
+ Opcode_mul_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_lh_encode_fns[] = {
+ Opcode_mul_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hh_encode_fns[] = {
+ Opcode_mul_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_ll_encode_fns[] = {
+ Opcode_mul_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hl_encode_fns[] = {
+ Opcode_mul_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_lh_encode_fns[] = {
+ Opcode_mul_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hh_encode_fns[] = {
+ Opcode_mul_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_ll_encode_fns[] = {
+ Opcode_mula_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hl_encode_fns[] = {
+ Opcode_mula_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_lh_encode_fns[] = {
+ Opcode_mula_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hh_encode_fns[] = {
+ Opcode_mula_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_ll_encode_fns[] = {
+ Opcode_muls_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hl_encode_fns[] = {
+ Opcode_muls_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_lh_encode_fns[] = {
+ Opcode_muls_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hh_encode_fns[] = {
+ Opcode_muls_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_ll_encode_fns[] = {
+ Opcode_mula_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hl_encode_fns[] = {
+ Opcode_mula_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_lh_encode_fns[] = {
+ Opcode_mula_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hh_encode_fns[] = {
+ Opcode_mula_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_ll_encode_fns[] = {
+ Opcode_muls_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hl_encode_fns[] = {
+ Opcode_muls_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_lh_encode_fns[] = {
+ Opcode_muls_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hh_encode_fns[] = {
+ Opcode_muls_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_encode_fns[] = {
+ Opcode_mula_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_encode_fns[] = {
+ Opcode_mula_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_encode_fns[] = {
+ Opcode_mula_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_encode_fns[] = {
+ Opcode_mula_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_ll_encode_fns[] = {
+ Opcode_muls_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hl_encode_fns[] = {
+ Opcode_muls_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_lh_encode_fns[] = {
+ Opcode_muls_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hh_encode_fns[] = {
+ Opcode_muls_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_encode_fns[] = {
+ Opcode_mula_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_encode_fns[] = {
+ Opcode_mula_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_encode_fns[] = {
+ Opcode_mula_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_encode_fns[] = {
+ Opcode_mula_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_ll_encode_fns[] = {
+ Opcode_muls_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hl_encode_fns[] = {
+ Opcode_muls_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_lh_encode_fns[] = {
+ Opcode_muls_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hh_encode_fns[] = {
+ Opcode_muls_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_lddec_encode_fns[] = {
+ Opcode_mula_da_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_ldinc_encode_fns[] = {
+ Opcode_mula_da_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_lddec_encode_fns[] = {
+ Opcode_mula_da_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_ldinc_encode_fns[] = {
+ Opcode_mula_da_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_lddec_encode_fns[] = {
+ Opcode_mula_da_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_ldinc_encode_fns[] = {
+ Opcode_mula_da_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_lddec_encode_fns[] = {
+ Opcode_mula_da_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_ldinc_encode_fns[] = {
+ Opcode_mula_da_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_lddec_encode_fns[] = {
+ Opcode_mula_dd_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_ldinc_encode_fns[] = {
+ Opcode_mula_dd_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_lddec_encode_fns[] = {
+ Opcode_mula_dd_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_lddec_encode_fns[] = {
+ Opcode_mula_dd_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_lddec_encode_fns[] = {
+ Opcode_mula_dd_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddec_encode_fns[] = {
+ Opcode_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldinc_encode_fns[] = {
+ Opcode_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m0_encode_fns[] = {
+ Opcode_rsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m0_encode_fns[] = {
+ Opcode_wsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m0_encode_fns[] = {
+ Opcode_xsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m1_encode_fns[] = {
+ Opcode_rsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m1_encode_fns[] = {
+ Opcode_wsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m1_encode_fns[] = {
+ Opcode_xsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m2_encode_fns[] = {
+ Opcode_rsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m2_encode_fns[] = {
+ Opcode_wsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m2_encode_fns[] = {
+ Opcode_xsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m3_encode_fns[] = {
+ Opcode_rsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m3_encode_fns[] = {
+ Opcode_wsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m3_encode_fns[] = {
+ Opcode_xsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acclo_encode_fns[] = {
+ Opcode_rsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acclo_encode_fns[] = {
+ Opcode_wsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acclo_encode_fns[] = {
+ Opcode_xsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acchi_encode_fns[] = {
+ Opcode_rsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acchi_encode_fns[] = {
+ Opcode_wsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acchi_encode_fns[] = {
+ Opcode_xsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddr32_p_encode_fns[] = {
+ Opcode_lddr32_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sddr32_p_encode_fns[] = {
+ Opcode_sddr32_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipfl_encode_fns[] = {
+ Opcode_ipfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihu_encode_fns[] = {
+ Opcode_ihu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iiu_encode_fns[] = {
+ Opcode_iiu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbui_p_encode_fns[] = {
+ Opcode_diwbui_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfl_encode_fns[] = {
+ Opcode_dpfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhu_encode_fns[] = {
+ Opcode_dhu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diu_encode_fns[] = {
+ Opcode_diu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quou_encode_fns[] = {
+ Opcode_quou_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quos_encode_fns[] = {
+ Opcode_quos_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_remu_encode_fns[] = {
+ Opcode_remu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rems_encode_fns[] = {
+ Opcode_rems_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_read_impwire_encode_fns[] = {
+ Opcode_read_impwire_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_setb_expstate_encode_fns[] = {
+ Opcode_setb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrb_expstate_encode_fns[] = {
+ Opcode_clrb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrmsk_expstate_encode_fns[] = {
+ Opcode_wrmsk_expstate_Slot_inst_encode, 0, 0
+};
+
+
+
+
+
+/* Opcode table. */
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", ICLASS_xt_iclass_loop,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s32nb", ICLASS_xt_iclass_s32nb,
+ 0,
+ Opcode_s32nb_encode_fns, 0, 0 },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", ICLASS_xt_iclass_rsr_lend,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", ICLASS_xt_iclass_wsr_lend,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", ICLASS_xt_iclass_xsr_lend,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", ICLASS_xt_iclass_rsr_lcount,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", ICLASS_xt_iclass_wsr_lcount,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", ICLASS_xt_iclass_xsr_lcount,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", ICLASS_xt_iclass_rsr_lbeg,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", ICLASS_xt_iclass_wsr_lbeg,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", ICLASS_xt_iclass_xsr_lbeg,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.memctl", ICLASS_xt_iclass_rsr_memctl,
+ 0,
+ Opcode_rsr_memctl_encode_fns, 0, 0 },
+ { "wsr.memctl", ICLASS_xt_iclass_wsr_memctl,
+ 0,
+ Opcode_wsr_memctl_encode_fns, 0, 0 },
+ { "xsr.memctl", ICLASS_xt_iclass_xsr_memctl,
+ 0,
+ Opcode_xsr_memctl_encode_fns, 0, 0 },
+ { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.configid0", ICLASS_xt_iclass_rsr_configid0,
+ 0,
+ Opcode_rsr_configid0_encode_fns, 0, 0 },
+ { "wsr.configid0", ICLASS_xt_iclass_wsr_configid0,
+ 0,
+ Opcode_wsr_configid0_encode_fns, 0, 0 },
+ { "rsr.configid1", ICLASS_xt_iclass_rsr_configid1,
+ 0,
+ Opcode_rsr_configid1_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", ICLASS_xt_iclass_rsr_epc4,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", ICLASS_xt_iclass_wsr_epc4,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", ICLASS_xt_iclass_xsr_epc4,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", ICLASS_xt_iclass_rsr_excsave4,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", ICLASS_xt_iclass_wsr_excsave4,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", ICLASS_xt_iclass_xsr_excsave4,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", ICLASS_xt_iclass_rsr_epc5,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", ICLASS_xt_iclass_wsr_epc5,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", ICLASS_xt_iclass_xsr_epc5,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", ICLASS_xt_iclass_rsr_excsave5,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", ICLASS_xt_iclass_wsr_excsave5,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", ICLASS_xt_iclass_xsr_excsave5,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", ICLASS_xt_iclass_rsr_epc6,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", ICLASS_xt_iclass_wsr_epc6,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", ICLASS_xt_iclass_xsr_epc6,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", ICLASS_xt_iclass_rsr_excsave6,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", ICLASS_xt_iclass_wsr_excsave6,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", ICLASS_xt_iclass_xsr_excsave6,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.epc7", ICLASS_xt_iclass_rsr_epc7,
+ 0,
+ Opcode_rsr_epc7_encode_fns, 0, 0 },
+ { "wsr.epc7", ICLASS_xt_iclass_wsr_epc7,
+ 0,
+ Opcode_wsr_epc7_encode_fns, 0, 0 },
+ { "xsr.epc7", ICLASS_xt_iclass_xsr_epc7,
+ 0,
+ Opcode_xsr_epc7_encode_fns, 0, 0 },
+ { "rsr.excsave7", ICLASS_xt_iclass_rsr_excsave7,
+ 0,
+ Opcode_rsr_excsave7_encode_fns, 0, 0 },
+ { "wsr.excsave7", ICLASS_xt_iclass_wsr_excsave7,
+ 0,
+ Opcode_wsr_excsave7_encode_fns, 0, 0 },
+ { "xsr.excsave7", ICLASS_xt_iclass_xsr_excsave7,
+ 0,
+ Opcode_xsr_excsave7_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", ICLASS_xt_iclass_rsr_eps4,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", ICLASS_xt_iclass_wsr_eps4,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", ICLASS_xt_iclass_xsr_eps4,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", ICLASS_xt_iclass_rsr_eps5,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", ICLASS_xt_iclass_wsr_eps5,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", ICLASS_xt_iclass_xsr_eps5,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", ICLASS_xt_iclass_rsr_eps6,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", ICLASS_xt_iclass_wsr_eps6,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", ICLASS_xt_iclass_xsr_eps6,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.eps7", ICLASS_xt_iclass_rsr_eps7,
+ 0,
+ Opcode_rsr_eps7_encode_fns, 0, 0 },
+ { "wsr.eps7", ICLASS_xt_iclass_wsr_eps7,
+ 0,
+ Opcode_wsr_eps7_encode_fns, 0, 0 },
+ { "xsr.eps7", ICLASS_xt_iclass_xsr_eps7,
+ 0,
+ Opcode_xsr_eps7_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", ICLASS_xt_iclass_rsr_misc0,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", ICLASS_xt_iclass_wsr_misc0,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", ICLASS_xt_iclass_xsr_misc0,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", ICLASS_xt_iclass_rsr_misc1,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", ICLASS_xt_iclass_wsr_misc1,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", ICLASS_xt_iclass_xsr_misc1,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "mull", ICLASS_xt_mul32,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "mul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_ll_encode_fns, 0, 0 },
+ { "mul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hl_encode_fns, 0, 0 },
+ { "mul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_lh_encode_fns, 0, 0 },
+ { "mul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hh_encode_fns, 0, 0 },
+ { "umul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_ll_encode_fns, 0, 0 },
+ { "umul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hl_encode_fns, 0, 0 },
+ { "umul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_lh_encode_fns, 0, 0 },
+ { "umul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hh_encode_fns, 0, 0 },
+ { "mul.ad.ll", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_ll_encode_fns, 0, 0 },
+ { "mul.ad.hl", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hl_encode_fns, 0, 0 },
+ { "mul.ad.lh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_lh_encode_fns, 0, 0 },
+ { "mul.ad.hh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hh_encode_fns, 0, 0 },
+ { "mul.da.ll", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_ll_encode_fns, 0, 0 },
+ { "mul.da.hl", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hl_encode_fns, 0, 0 },
+ { "mul.da.lh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_lh_encode_fns, 0, 0 },
+ { "mul.da.hh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hh_encode_fns, 0, 0 },
+ { "mul.dd.ll", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_ll_encode_fns, 0, 0 },
+ { "mul.dd.hl", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hl_encode_fns, 0, 0 },
+ { "mul.dd.lh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_lh_encode_fns, 0, 0 },
+ { "mul.dd.hh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hh_encode_fns, 0, 0 },
+ { "mula.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_ll_encode_fns, 0, 0 },
+ { "mula.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hl_encode_fns, 0, 0 },
+ { "mula.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_lh_encode_fns, 0, 0 },
+ { "mula.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hh_encode_fns, 0, 0 },
+ { "muls.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_ll_encode_fns, 0, 0 },
+ { "muls.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hl_encode_fns, 0, 0 },
+ { "muls.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_lh_encode_fns, 0, 0 },
+ { "muls.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hh_encode_fns, 0, 0 },
+ { "mula.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_ll_encode_fns, 0, 0 },
+ { "mula.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hl_encode_fns, 0, 0 },
+ { "mula.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_lh_encode_fns, 0, 0 },
+ { "mula.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hh_encode_fns, 0, 0 },
+ { "muls.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_ll_encode_fns, 0, 0 },
+ { "muls.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hl_encode_fns, 0, 0 },
+ { "muls.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_lh_encode_fns, 0, 0 },
+ { "muls.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hh_encode_fns, 0, 0 },
+ { "mula.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_ll_encode_fns, 0, 0 },
+ { "mula.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hl_encode_fns, 0, 0 },
+ { "mula.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_lh_encode_fns, 0, 0 },
+ { "mula.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hh_encode_fns, 0, 0 },
+ { "muls.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_ll_encode_fns, 0, 0 },
+ { "muls.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hl_encode_fns, 0, 0 },
+ { "muls.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_lh_encode_fns, 0, 0 },
+ { "muls.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hh_encode_fns, 0, 0 },
+ { "mula.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_ll_encode_fns, 0, 0 },
+ { "mula.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hl_encode_fns, 0, 0 },
+ { "mula.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_lh_encode_fns, 0, 0 },
+ { "mula.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hh_encode_fns, 0, 0 },
+ { "muls.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_ll_encode_fns, 0, 0 },
+ { "muls.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hl_encode_fns, 0, 0 },
+ { "muls.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_lh_encode_fns, 0, 0 },
+ { "muls.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hh_encode_fns, 0, 0 },
+ { "mula.da.ll.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_lddec_encode_fns, 0, 0 },
+ { "mula.da.ll.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hl.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_lddec_encode_fns, 0, 0 },
+ { "mula.da.hl.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.da.lh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_lddec_encode_fns, 0, 0 },
+ { "mula.da.lh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_lddec_encode_fns, 0, 0 },
+ { "mula.da.hh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.ll.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_lddec_encode_fns, 0, 0 },
+ { "mula.dd.ll.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hl.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hl.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.lh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.lh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_ldinc_encode_fns, 0, 0 },
+ { "lddec", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_lddec_encode_fns, 0, 0 },
+ { "ldinc", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_ldinc_encode_fns, 0, 0 },
+ { "rsr.m0", ICLASS_xt_iclass_rsr_m0,
+ 0,
+ Opcode_rsr_m0_encode_fns, 0, 0 },
+ { "wsr.m0", ICLASS_xt_iclass_wsr_m0,
+ 0,
+ Opcode_wsr_m0_encode_fns, 0, 0 },
+ { "xsr.m0", ICLASS_xt_iclass_xsr_m0,
+ 0,
+ Opcode_xsr_m0_encode_fns, 0, 0 },
+ { "rsr.m1", ICLASS_xt_iclass_rsr_m1,
+ 0,
+ Opcode_rsr_m1_encode_fns, 0, 0 },
+ { "wsr.m1", ICLASS_xt_iclass_wsr_m1,
+ 0,
+ Opcode_wsr_m1_encode_fns, 0, 0 },
+ { "xsr.m1", ICLASS_xt_iclass_xsr_m1,
+ 0,
+ Opcode_xsr_m1_encode_fns, 0, 0 },
+ { "rsr.m2", ICLASS_xt_iclass_rsr_m2,
+ 0,
+ Opcode_rsr_m2_encode_fns, 0, 0 },
+ { "wsr.m2", ICLASS_xt_iclass_wsr_m2,
+ 0,
+ Opcode_wsr_m2_encode_fns, 0, 0 },
+ { "xsr.m2", ICLASS_xt_iclass_xsr_m2,
+ 0,
+ Opcode_xsr_m2_encode_fns, 0, 0 },
+ { "rsr.m3", ICLASS_xt_iclass_rsr_m3,
+ 0,
+ Opcode_rsr_m3_encode_fns, 0, 0 },
+ { "wsr.m3", ICLASS_xt_iclass_wsr_m3,
+ 0,
+ Opcode_wsr_m3_encode_fns, 0, 0 },
+ { "xsr.m3", ICLASS_xt_iclass_xsr_m3,
+ 0,
+ Opcode_xsr_m3_encode_fns, 0, 0 },
+ { "rsr.acclo", ICLASS_xt_iclass_rsr_acclo,
+ 0,
+ Opcode_rsr_acclo_encode_fns, 0, 0 },
+ { "wsr.acclo", ICLASS_xt_iclass_wsr_acclo,
+ 0,
+ Opcode_wsr_acclo_encode_fns, 0, 0 },
+ { "xsr.acclo", ICLASS_xt_iclass_xsr_acclo,
+ 0,
+ Opcode_xsr_acclo_encode_fns, 0, 0 },
+ { "rsr.acchi", ICLASS_xt_iclass_rsr_acchi,
+ 0,
+ Opcode_rsr_acchi_encode_fns, 0, 0 },
+ { "wsr.acchi", ICLASS_xt_iclass_wsr_acchi,
+ 0,
+ Opcode_wsr_acchi_encode_fns, 0, 0 },
+ { "xsr.acchi", ICLASS_xt_iclass_xsr_acchi,
+ 0,
+ Opcode_xsr_acchi_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", ICLASS_xt_iclass_rsr_dbreaka1,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", ICLASS_xt_iclass_wsr_dbreaka1,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", ICLASS_xt_iclass_xsr_dbreaka1,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", ICLASS_xt_iclass_rsr_dbreakc1,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", ICLASS_xt_iclass_wsr_dbreakc1,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", ICLASS_xt_iclass_xsr_dbreakc1,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", ICLASS_xt_iclass_rsr_ibreaka1,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", ICLASS_xt_iclass_wsr_ibreaka1,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", ICLASS_xt_iclass_xsr_ibreaka1,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "lddr32.p", ICLASS_xt_iclass_lddr32_p,
+ 0,
+ Opcode_lddr32_p_encode_fns, 0, 0 },
+ { "sddr32.p", ICLASS_xt_iclass_sddr32_p,
+ 0,
+ Opcode_sddr32_p_encode_fns, 0, 0 },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", ICLASS_xt_iclass_rsr_ccompare2,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", ICLASS_xt_iclass_wsr_ccompare2,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", ICLASS_xt_iclass_xsr_ccompare2,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "ipf", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "ipfl", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ipfl_encode_fns, 0, 0 },
+ { "ihu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ihu_encode_fns, 0, 0 },
+ { "iiu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_iiu_encode_fns, 0, 0 },
+ { "iii", ICLASS_xt_iclass_icache_inv,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwbui.p", ICLASS_xt_iclass_dcache_dyn,
+ 0,
+ Opcode_diwbui_p_encode_fns, 0, 0 },
+ { "diwb", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "dpfl", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dpfl_encode_fns, 0, 0 },
+ { "dhu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dhu_encode_fns, 0, 0 },
+ { "diu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_diu_encode_fns, 0, 0 },
+ { "sdct", ICLASS_xt_iclass_sdct,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", ICLASS_xt_iclass_ldct,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "clamps", ICLASS_xt_iclass_clamp,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "quou", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quou_encode_fns, 0, 0 },
+ { "quos", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quos_encode_fns, 0, 0 },
+ { "remu", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_remu_encode_fns, 0, 0 },
+ { "rems", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_rems_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.expstate", ICLASS_rur_expstate,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", ICLASS_wur_expstate,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "read_impwire", ICLASS_iclass_READ_IMPWIRE,
+ 0,
+ Opcode_read_impwire_encode_fns, 0, 0 },
+ { "setb_expstate", ICLASS_iclass_SETB_EXPSTATE,
+ 0,
+ Opcode_setb_expstate_encode_fns, 0, 0 },
+ { "clrb_expstate", ICLASS_iclass_CLRB_EXPSTATE,
+ 0,
+ Opcode_clrb_expstate_encode_fns, 0, 0 },
+ { "wrmsk_expstate", ICLASS_iclass_WRMSK_EXPSTATE,
+ 0,
+ Opcode_wrmsk_expstate_encode_fns, 0, 0 }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_SUB,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BNEI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BEQ,
+ OPCODE_BNE,
+ OPCODE_BGE,
+ OPCODE_BLT,
+ OPCODE_BGEU,
+ OPCODE_BLTU,
+ OPCODE_BANY,
+ OPCODE_BNONE,
+ OPCODE_BALL,
+ OPCODE_BNALL,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQZ,
+ OPCODE_BNEZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_LOOP,
+ OPCODE_LOOPNEZ,
+ OPCODE_LOOPGTZ,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVNEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVGEZ,
+ OPCODE_NEG,
+ OPCODE_ABS,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_SIMCALL,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S32NB,
+ OPCODE_S8I,
+ OPCODE_SSR,
+ OPCODE_SSL,
+ OPCODE_SSA8L,
+ OPCODE_SSA8B,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRL,
+ OPCODE_SRA,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_RSYNC,
+ OPCODE_ESYNC,
+ OPCODE_DSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_LEND,
+ OPCODE_WSR_LEND,
+ OPCODE_XSR_LEND,
+ OPCODE_RSR_LCOUNT,
+ OPCODE_WSR_LCOUNT,
+ OPCODE_XSR_LCOUNT,
+ OPCODE_RSR_LBEG,
+ OPCODE_WSR_LBEG,
+ OPCODE_XSR_LBEG,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_MEMCTL,
+ OPCODE_WSR_MEMCTL,
+ OPCODE_XSR_MEMCTL,
+ OPCODE_RSR_LITBASE,
+ OPCODE_WSR_LITBASE,
+ OPCODE_XSR_LITBASE,
+ OPCODE_RSR_CONFIGID0,
+ OPCODE_WSR_CONFIGID0,
+ OPCODE_RSR_CONFIGID1,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPC3,
+ OPCODE_WSR_EPC3,
+ OPCODE_XSR_EPC3,
+ OPCODE_RSR_EXCSAVE3,
+ OPCODE_WSR_EXCSAVE3,
+ OPCODE_XSR_EXCSAVE3,
+ OPCODE_RSR_EPC4,
+ OPCODE_WSR_EPC4,
+ OPCODE_XSR_EPC4,
+ OPCODE_RSR_EXCSAVE4,
+ OPCODE_WSR_EXCSAVE4,
+ OPCODE_XSR_EXCSAVE4,
+ OPCODE_RSR_EPC5,
+ OPCODE_WSR_EPC5,
+ OPCODE_XSR_EPC5,
+ OPCODE_RSR_EXCSAVE5,
+ OPCODE_WSR_EXCSAVE5,
+ OPCODE_XSR_EXCSAVE5,
+ OPCODE_RSR_EPC6,
+ OPCODE_WSR_EPC6,
+ OPCODE_XSR_EPC6,
+ OPCODE_RSR_EXCSAVE6,
+ OPCODE_WSR_EXCSAVE6,
+ OPCODE_XSR_EXCSAVE6,
+ OPCODE_RSR_EPC7,
+ OPCODE_WSR_EPC7,
+ OPCODE_XSR_EPC7,
+ OPCODE_RSR_EXCSAVE7,
+ OPCODE_WSR_EXCSAVE7,
+ OPCODE_XSR_EXCSAVE7,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EPS3,
+ OPCODE_WSR_EPS3,
+ OPCODE_XSR_EPS3,
+ OPCODE_RSR_EPS4,
+ OPCODE_WSR_EPS4,
+ OPCODE_XSR_EPS4,
+ OPCODE_RSR_EPS5,
+ OPCODE_WSR_EPS5,
+ OPCODE_XSR_EPS5,
+ OPCODE_RSR_EPS6,
+ OPCODE_WSR_EPS6,
+ OPCODE_XSR_EPS6,
+ OPCODE_RSR_EPS7,
+ OPCODE_WSR_EPS7,
+ OPCODE_XSR_EPS7,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_MISC0,
+ OPCODE_WSR_MISC0,
+ OPCODE_XSR_MISC0,
+ OPCODE_RSR_MISC1,
+ OPCODE_WSR_MISC1,
+ OPCODE_XSR_MISC1,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_MUL16U,
+ OPCODE_MUL16S,
+ OPCODE_MULL,
+ OPCODE_MUL_AA_LL,
+ OPCODE_MUL_AA_HL,
+ OPCODE_MUL_AA_LH,
+ OPCODE_MUL_AA_HH,
+ OPCODE_UMUL_AA_LL,
+ OPCODE_UMUL_AA_HL,
+ OPCODE_UMUL_AA_LH,
+ OPCODE_UMUL_AA_HH,
+ OPCODE_MUL_AD_LL,
+ OPCODE_MUL_AD_HL,
+ OPCODE_MUL_AD_LH,
+ OPCODE_MUL_AD_HH,
+ OPCODE_MUL_DA_LL,
+ OPCODE_MUL_DA_HL,
+ OPCODE_MUL_DA_LH,
+ OPCODE_MUL_DA_HH,
+ OPCODE_MUL_DD_LL,
+ OPCODE_MUL_DD_HL,
+ OPCODE_MUL_DD_LH,
+ OPCODE_MUL_DD_HH,
+ OPCODE_MULA_AA_LL,
+ OPCODE_MULA_AA_HL,
+ OPCODE_MULA_AA_LH,
+ OPCODE_MULA_AA_HH,
+ OPCODE_MULS_AA_LL,
+ OPCODE_MULS_AA_HL,
+ OPCODE_MULS_AA_LH,
+ OPCODE_MULS_AA_HH,
+ OPCODE_MULA_AD_LL,
+ OPCODE_MULA_AD_HL,
+ OPCODE_MULA_AD_LH,
+ OPCODE_MULA_AD_HH,
+ OPCODE_MULS_AD_LL,
+ OPCODE_MULS_AD_HL,
+ OPCODE_MULS_AD_LH,
+ OPCODE_MULS_AD_HH,
+ OPCODE_MULA_DA_LL,
+ OPCODE_MULA_DA_HL,
+ OPCODE_MULA_DA_LH,
+ OPCODE_MULA_DA_HH,
+ OPCODE_MULS_DA_LL,
+ OPCODE_MULS_DA_HL,
+ OPCODE_MULS_DA_LH,
+ OPCODE_MULS_DA_HH,
+ OPCODE_MULA_DD_LL,
+ OPCODE_MULA_DD_HL,
+ OPCODE_MULA_DD_LH,
+ OPCODE_MULA_DD_HH,
+ OPCODE_MULS_DD_LL,
+ OPCODE_MULS_DD_HL,
+ OPCODE_MULS_DD_LH,
+ OPCODE_MULS_DD_HH,
+ OPCODE_MULA_DA_LL_LDDEC,
+ OPCODE_MULA_DA_LL_LDINC,
+ OPCODE_MULA_DA_HL_LDDEC,
+ OPCODE_MULA_DA_HL_LDINC,
+ OPCODE_MULA_DA_LH_LDDEC,
+ OPCODE_MULA_DA_LH_LDINC,
+ OPCODE_MULA_DA_HH_LDDEC,
+ OPCODE_MULA_DA_HH_LDINC,
+ OPCODE_MULA_DD_LL_LDDEC,
+ OPCODE_MULA_DD_LL_LDINC,
+ OPCODE_MULA_DD_HL_LDDEC,
+ OPCODE_MULA_DD_HL_LDINC,
+ OPCODE_MULA_DD_LH_LDDEC,
+ OPCODE_MULA_DD_LH_LDINC,
+ OPCODE_MULA_DD_HH_LDDEC,
+ OPCODE_MULA_DD_HH_LDINC,
+ OPCODE_LDDEC,
+ OPCODE_LDINC,
+ OPCODE_RSR_M0,
+ OPCODE_WSR_M0,
+ OPCODE_XSR_M0,
+ OPCODE_RSR_M1,
+ OPCODE_WSR_M1,
+ OPCODE_XSR_M1,
+ OPCODE_RSR_M2,
+ OPCODE_WSR_M2,
+ OPCODE_XSR_M2,
+ OPCODE_RSR_M3,
+ OPCODE_WSR_M3,
+ OPCODE_XSR_M3,
+ OPCODE_RSR_ACCLO,
+ OPCODE_WSR_ACCLO,
+ OPCODE_XSR_ACCLO,
+ OPCODE_RSR_ACCHI,
+ OPCODE_WSR_ACCHI,
+ OPCODE_XSR_ACCHI,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DBREAKA0,
+ OPCODE_WSR_DBREAKA0,
+ OPCODE_XSR_DBREAKA0,
+ OPCODE_RSR_DBREAKC0,
+ OPCODE_WSR_DBREAKC0,
+ OPCODE_XSR_DBREAKC0,
+ OPCODE_RSR_DBREAKA1,
+ OPCODE_WSR_DBREAKA1,
+ OPCODE_XSR_DBREAKA1,
+ OPCODE_RSR_DBREAKC1,
+ OPCODE_WSR_DBREAKC1,
+ OPCODE_XSR_DBREAKC1,
+ OPCODE_RSR_IBREAKA0,
+ OPCODE_WSR_IBREAKA0,
+ OPCODE_XSR_IBREAKA0,
+ OPCODE_RSR_IBREAKA1,
+ OPCODE_WSR_IBREAKA1,
+ OPCODE_XSR_IBREAKA1,
+ OPCODE_RSR_IBREAKENABLE,
+ OPCODE_WSR_IBREAKENABLE,
+ OPCODE_XSR_IBREAKENABLE,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_LDDR32_P,
+ OPCODE_SDDR32_P,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_WSR_MMID,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_RSR_CCOMPARE2,
+ OPCODE_WSR_CCOMPARE2,
+ OPCODE_XSR_CCOMPARE2,
+ OPCODE_IPF,
+ OPCODE_IHI,
+ OPCODE_IPFL,
+ OPCODE_IHU,
+ OPCODE_IIU,
+ OPCODE_III,
+ OPCODE_LICT,
+ OPCODE_LICW,
+ OPCODE_SICT,
+ OPCODE_SICW,
+ OPCODE_DHWB,
+ OPCODE_DHWBI,
+ OPCODE_DIWBUI_P,
+ OPCODE_DIWB,
+ OPCODE_DIWBI,
+ OPCODE_DHI,
+ OPCODE_DII,
+ OPCODE_DPFR,
+ OPCODE_DPFW,
+ OPCODE_DPFRO,
+ OPCODE_DPFWO,
+ OPCODE_DPFL,
+ OPCODE_DHU,
+ OPCODE_DIU,
+ OPCODE_SDCT,
+ OPCODE_LDCT,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_CLAMPS,
+ OPCODE_MIN,
+ OPCODE_MAX,
+ OPCODE_MINU,
+ OPCODE_MAXU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_QUOU,
+ OPCODE_QUOS,
+ OPCODE_REMU,
+ OPCODE_REMS,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_EXPSTATE,
+ OPCODE_WUR_EXPSTATE,
+ OPCODE_READ_IMPWIRE,
+ OPCODE_SETB_EXPSTATE,
+ OPCODE_CLRB_EXPSTATE,
+ OPCODE_WRMSK_EXPSTATE
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_RET;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_RETW;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_JX;
+ }
+ if (Field_m_Slot_inst_get (insn) == 3)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALLX0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALLX4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALLX8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALLX12;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_MOVSP;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_ISYNC;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RSYNC;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_ESYNC;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DSYNC;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_EXCW;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MEMW;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_EXTW;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_NOP;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 3)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_RFE;
+ if (Field_s_Slot_inst_get (insn) == 2)
+ return OPCODE_RFDE;
+ if (Field_s_Slot_inst_get (insn) == 4)
+ return OPCODE_RFWO;
+ if (Field_s_Slot_inst_get (insn) == 5)
+ return OPCODE_RFWU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFI;
+ }
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BREAK;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ if (Field_s_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SIMCALL;
+ }
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RSIL;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_LDDR32_P;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_SDDR32_P;
+ }
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OR;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_XOR;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RER;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_WER;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_NSA;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_NSAU;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_RITLB0;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_PITLB;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_WITLB;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_RITLB1;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_RDTLB0;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_PDTLB;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_WDTLB;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_RDTLB1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_s_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ADD;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_ADDX2;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_ADDX4;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_ADDX8;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_SUB;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_SUBX2;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_SUBX4;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_SUBX8;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 1)
+ {
+ if ((Field_op2_Slot_inst_get (insn) == 0 ||
+ Field_op2_Slot_inst_get (insn) == 1))
+ return OPCODE_SLLI;
+ if ((Field_op2_Slot_inst_get (insn) == 2 ||
+ Field_op2_Slot_inst_get (insn) == 3))
+ return OPCODE_SRAI;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_SRLI;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_XSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_XSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_XSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_XSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_XSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_XSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_XSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_XSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_XSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_XSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_XSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_XSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_XSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_XSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_XSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_XSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_XSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_XSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_XSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_XSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_XSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_XSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_XSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_XSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_XSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_XSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_XSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_XSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_XSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_XSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_XSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_XSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_XSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_XSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_XSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_XSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_XSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_XSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_XSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_XSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_XSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_XSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_XSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_XSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_XSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_XSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_XSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_XSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_XSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_XSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_XSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_XSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_XSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_XSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_XSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_XSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_XSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_XSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_XSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_SRC;
+ if (Field_op2_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_op2_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_op2_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MUL16U;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MUL16S;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_LICT;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_SICT;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_LICW;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_SICW;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LDCT;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_SDCT;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ }
+ }
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MULL;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_QUOU;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_QUOS;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_REMU;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_REMS;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_RSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_RSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_RSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_RSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_RSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_RSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_RSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_RSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_RSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_RSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_RSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_RSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_RSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_RSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_RSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_RSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_RSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_RSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_RSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_RSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_RSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_RSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_RSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_RSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_RSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_RSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_RSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_RSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_RSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_RSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_RSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_RSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_RSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_RSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_RSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_RSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_RSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_RSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_RSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 208)
+ return OPCODE_RSR_CONFIGID1;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_RSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_RSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_RSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_RSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_RSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_RSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_RSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_RSR_INTERRUPT;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_RSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_RSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_RSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_RSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_RSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_RSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 235)
+ return OPCODE_RSR_PRID;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_RSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_RSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_RSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_RSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_RSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_RSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_RSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_RSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_WSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_WSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_WSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_WSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_WSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_WSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_WSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_WSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_WSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_WSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_WSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_WSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_WSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_WSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 89)
+ return OPCODE_WSR_MMID;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_WSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_WSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_WSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_WSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_WSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_WSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_WSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_WSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_WSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_WSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_WSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_WSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_WSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_WSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_WSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_WSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_WSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_WSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_WSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_WSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_WSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_WSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_WSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_WSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_WSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_WSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_WSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_WSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_WSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_WSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_WSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_WSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_WSR_INTSET;
+ if (Field_sr_Slot_inst_get (insn) == 227)
+ return OPCODE_WSR_INTCLEAR;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_WSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_WSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_WSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_WSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_WSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_WSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_WSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_WSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_WSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_WSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_SEXT;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_CLAMPS;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MIN;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MAX;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MINU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_MAXU;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MOVEQZ;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_MOVNEZ;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVLTZ;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MOVGEZ;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ {
+ if (Field_st_Slot_inst_get (insn) == 230)
+ return OPCODE_RUR_EXPSTATE;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WUR_EXPSTATE;
+ }
+ }
+ if ((Field_op1_Slot_inst_get (insn) == 4 ||
+ Field_op1_Slot_inst_get (insn) == 5))
+ return OPCODE_EXTUI;
+ if (Field_op1_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_L32E;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_S32E;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_S32NB;
+ }
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_READ_IMPWIRE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_SETB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_CLRB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_WRMSK_EXPSTATE;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 1)
+ return OPCODE_L32R;
+ if (Field_op0_Slot_inst_get (insn) == 2)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_L32I;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_S16I;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_S32I;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFR;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_DPFW;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_DPFRO;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DPFWO;
+ if (Field_t_Slot_inst_get (insn) == 4)
+ return OPCODE_DHWB;
+ if (Field_t_Slot_inst_get (insn) == 5)
+ return OPCODE_DHWBI;
+ if (Field_t_Slot_inst_get (insn) == 6)
+ return OPCODE_DHI;
+ if (Field_t_Slot_inst_get (insn) == 7)
+ return OPCODE_DII;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFL;
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ return OPCODE_DHU;
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ return OPCODE_DIU;
+ if (Field_op1_Slot_inst_get (insn) == 4)
+ return OPCODE_DIWB;
+ if (Field_op1_Slot_inst_get (insn) == 5)
+ return OPCODE_DIWBI;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_DIWBUI_P;
+ }
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_IPF;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ return OPCODE_IPFL;
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ return OPCODE_IHU;
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ return OPCODE_IIU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_IHI;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_III;
+ }
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVI;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_L32AI;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_S32C1I;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_S32RI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 4)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDDEC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDDEC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDDEC;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 5)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALL0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALL4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALL8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALL12;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 6)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_J;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQZ;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTZ;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEZ;
+ }
+ if (Field_n_Slot_inst_get (insn) == 2)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQI;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEI;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEI;
+ }
+ if (Field_n_Slot_inst_get (insn) == 3)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_ENTRY;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ {
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LOOP;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_LOOPNEZ;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_LOOPGTZ;
+ }
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTUI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEUI;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 7)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BALL;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_BBC;
+ if ((Field_r_Slot_inst_get (insn) == 6 ||
+ Field_r_Slot_inst_get (insn) == 7))
+ return OPCODE_BBCI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_BANY;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_BNE;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_BGE;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_BNALL;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_BBS;
+ if ((Field_r_Slot_inst_get (insn) == 14 ||
+ Field_r_Slot_inst_get (insn) == 15))
+ return OPCODE_BBSI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16b_get (insn) == 12)
+ {
+ if (Field_i_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_i_Slot_inst16b_get (insn) == 1)
+ {
+ if (Field_z_Slot_inst16b_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_z_Slot_inst16b_get (insn) == 1)
+ return OPCODE_BNEZ_N;
+ }
+ }
+ if (Field_op0_Slot_inst16b_get (insn) == 13)
+ {
+ if (Field_r_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOV_N;
+ if (Field_r_Slot_inst16b_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst16b_get (insn) == 0)
+ return OPCODE_RET_N;
+ if (Field_t_Slot_inst16b_get (insn) == 1)
+ return OPCODE_RETW_N;
+ if (Field_t_Slot_inst16b_get (insn) == 2)
+ return OPCODE_BREAK_N;
+ if (Field_t_Slot_inst16b_get (insn) == 3 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ if (Field_t_Slot_inst16b_get (insn) == 6 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ }
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16a_get (insn) == 8)
+ return OPCODE_L32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 9)
+ return OPCODE_S32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 10)
+ return OPCODE_ADD_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 11)
+ return OPCODE_ADDI_N;
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_get,
+ Field_rbit2_Slot_inst_get,
+ Field_rhi_Slot_inst_get,
+ Field_t3_Slot_inst_get,
+ Field_tbit2_Slot_inst_get,
+ Field_tlo_Slot_inst_get,
+ Field_w_Slot_inst_get,
+ Field_y_Slot_inst_get,
+ Field_x_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_bitindex_Slot_inst_get,
+ Field_s3to1_Slot_inst_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_set,
+ Field_rbit2_Slot_inst_set,
+ Field_rhi_Slot_inst_set,
+ Field_t3_Slot_inst_set,
+ Field_tbit2_Slot_inst_set,
+ Field_tlo_Slot_inst_set,
+ Field_w_Slot_inst_set,
+ Field_y_Slot_inst_set,
+ Field_x_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_bitindex_Slot_inst_set,
+ Field_s3to1_Slot_inst_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_get,
+ Field_s3to1_Slot_inst16a_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_set,
+ Field_s3to1_Slot_inst16a_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_get,
+ Field_s3to1_Slot_inst16b_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_set,
+ Field_s3to1_Slot_inst16b_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc)
+ return 2; /* x16b */
+ return -1;
+}
+
+static int length_table[256] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int l = insn[0];
+ return length_table[l];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 3 /* insn_size */, 0,
+ 3, formats, format_decoder, length_decoder,
+ 3, slots,
+ 56 /* num_fields */,
+ 94, operands,
+ 313, iclasses,
+ 439, opcodes, 0,
+ 2, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 6, interfaces, 0,
+ 0, funcUnits, 0
+};
diff --git a/target/xtensa/core-de233_fpu.c b/target/xtensa/core-de233_fpu.c
new file mode 100644
index 000000000..c7cbeb1b4
--- /dev/null
+++ b/target/xtensa/core-de233_fpu.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "core-de233_fpu/core-isa.h"
+#include "core-de233_fpu/core-matmap.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_de233_fpu
+#include "core-de233_fpu/xtensa-modules.c.inc"
+
+static XtensaConfig de233_fpu __attribute__((unused)) = {
+ .name = "de233_fpu",
+ .gdb_regmap = {
+ .reg = {
+#include "core-de233_fpu/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 40000,
+ .opcode_translators = (const XtensaOpcodeTranslators *[]){
+ &xtensa_core_opcodes,
+ &xtensa_fpu_opcodes,
+ NULL,
+ },
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(de233_fpu)
diff --git a/target/xtensa/core-de233_fpu/core-isa.h b/target/xtensa/core-de233_fpu/core-isa.h
new file mode 100644
index 000000000..f125619e8
--- /dev/null
+++ b/target/xtensa/core-de233_fpu/core-isa.h
@@ -0,0 +1,727 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2020 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_CORE_CONFIGURATION_H_
+#define XTENSA_CORE_CONFIGURATION_H_
+
+//depot/dev/Homewood/Xtensa/SWConfig/hal/core-common.h.tph#24 - edit change 444323 (text+ko)
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_EXCLUSIVE 0 /* L32EX/S32EX instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_LX 1 /* LX core */
+#define XCHAL_HAVE_NX 0 /* NX core (starting RH) */
+
+#define XCHAL_HAVE_SUPERGATHER 0 /* SuperGather */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_FUSION_VITERBI 0 /* Fusion Viterbi option */
+#define XCHAL_HAVE_FUSION_SOFTDEMAP 0 /* Fusion Soft Bit Demap option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI5 0 /* HiFi5 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI5_NN_MAC 0 /* HiFi5 Audio Engine NN-MAC option */
+#define XCHAL_HAVE_HIFI5_VFPU 0 /* HiFi5 Audio Engine Single-Precision VFPU option */
+#define XCHAL_HAVE_HIFI5_HP_VFPU 0 /* HiFi5 Audio Engine Half-Precision VFPU option */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3Z 0 /* HiFi3Z Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3Z_VFPU 0 /* HiFi3Z Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user SP floating-point pkg */
+#define XCHAL_HAVE_FP 1 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 1 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 1 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 1 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 1 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 1 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 1 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 1 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 1 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 1 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 1 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+
+#define XCHAL_HAVE_FUSIONG 0 /* FusionG */
+#define XCHAL_HAVE_FUSIONG3 0 /* FusionG3 */
+#define XCHAL_HAVE_FUSIONG6 0 /* FusionG6 */
+#define XCHAL_HAVE_FUSIONG_SP_VFPU 0 /* sp_vfpu option on FusionG */
+#define XCHAL_HAVE_FUSIONG_DP_VFPU 0 /* dp_vfpu option on FusionG */
+#define XCHAL_FUSIONG_SIMD32 0 /* simd32 for FusionG */
+
+#define XCHAL_HAVE_FUSIONJ 0 /* FusionJ */
+#define XCHAL_HAVE_FUSIONJ6 0 /* FusionJ6 */
+#define XCHAL_HAVE_FUSIONJ_SP_VFPU 0 /* sp_vfpu option on FusionJ */
+#define XCHAL_HAVE_FUSIONJ_DP_VFPU 0 /* dp_vfpu option on FusionJ */
+#define XCHAL_FUSIONJ_SIMD32 0 /* simd32 for FusionJ */
+
+#define XCHAL_HAVE_PDX 0 /* PDX-LX */
+#define XCHAL_PDX_SIMD32 0 /* simd32 for PDX */
+#define XCHAL_HAVE_PDX4 0 /* PDX4-LX */
+#define XCHAL_HAVE_PDX8 0 /* PDX8-LX */
+#define XCHAL_HAVE_PDX16 0 /* PDX16-LX */
+#define XCHAL_HAVE_PDXNX 0 /* PDX-NX */
+
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BALL 0
+#define XCHAL_HAVE_BALLAP 0
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_CONNX_B10 0 /* ConnX B10 pkg*/
+#define XCHAL_HAVE_CONNX_B20 0 /* ConnX B20 pkg*/
+#define XCHAL_HAVE_CONNX_B_SP_VFPU 0 /* Single-precision Vector Floating-point option on ConnX B10 & B20 */
+#define XCHAL_HAVE_CONNX_B_SPX_VFPU 0 /* Single-precision Extended Vector Floating-point option on ConnX B10 & B20 */
+#define XCHAL_HAVE_CONNX_B_HPX_VFPU 0 /* Half-precision Extended Vector Floating-point option on ConnX B10 & B20 */
+#define XCHAL_HAVE_CONNX_B_32B_MAC 0 /* 32-bit vector MAC (real and complex), FIR & FFT option on ConnX B10 & B20 */
+#define XCHAL_HAVE_CONNX_B_VITERBI 0 /* Viterbi option on ConnX B10 & B20 */
+#define XCHAL_HAVE_CONNX_B_TURBO 0 /* Turbo option on ConnX B10 & B20 */
+#define XCHAL_HAVE_CONNX_B_LDPC 0 /* LDPC option on ConnX B10 & B20 */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BBENEP_SP_VFPU 0 /* sp_vfpu option on BBE-EP */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+#define XCHAL_HAVE_VISION 0 /* Vision P5/P6 */
+#define XCHAL_VISION_SIMD16 0 /* simd16 for Vision P5/P6 */
+#define XCHAL_VISION_TYPE 0 /* Vision P5, P6, Q6 or Q7 */
+#define XCHAL_VISION_QUAD_MAC_TYPE 0 /* quad_mac option on Vision P6 */
+#define XCHAL_HAVE_VISION_HISTOGRAM 0 /* histogram option on Vision P5/P6 */
+#define XCHAL_HAVE_VISION_SP_VFPU 0 /* sp_vfpu option on Vision P5/P6/Q6/Q7 */
+#define XCHAL_HAVE_VISION_SP_VFPU_2XFMAC 0 /* sp_vfpu_2xfma option on Vision Q7 */
+#define XCHAL_HAVE_VISION_HP_VFPU 0 /* hp_vfpu option on Vision P6/Q6 */
+#define XCHAL_HAVE_VISION_HP_VFPU_2XFMAC 0 /* hp_vfpu_2xfma option on Vision Q7 */
+
+#define XCHAL_HAVE_VISIONC 0 /* Vision C */
+
+#define XCHAL_HAVE_XNNE 0 /* XNNE */
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 1 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 1 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_UNIFIED_LOADSTORE 0
+
+#define XCHAL_SW_VERSION 1403000 /* sw version of this header */
+#define XCHAL_SW_VERSION_MAJOR 14000 /* major ver# of sw */
+#define XCHAL_SW_VERSION_MINOR 3 /* minor ver# of sw */
+#define XCHAL_SW_VERSION_MICRO 0 /* micro ver# of sw */
+#define XCHAL_SW_MINOR_VERSION 1403000 /* with zeroed micro */
+#define XCHAL_SW_MICRO_VERSION 1403000
+
+#define XCHAL_CORE_ID "DE_233L_FPU" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x000872E0 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1039286 /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x28C872E0 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX7.1.3" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2810 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 3 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION_MICRO 0 /* subdot ver# of targeted hw */
+#define XCHAL_HW_VERSION 281030 /* major*100+(major<2810 ? minor : minor*10+micro) */
+#define XCHAL_HW_REL_LX7 1
+#define XCHAL_HW_REL_LX7_1 1
+#define XCHAL_HW_REL_LX7_1_3 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2810 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 3 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MICRO 0 /* micro v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 281030 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2810 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 3 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MICRO 0 /* micro v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 281030 /* latest targeted hw */
+
+/* Config is enabled for functional safety: */
+#define XCHAL_HAVE_FUNC_SAFETY 0
+
+#define XCHAL_HAVE_APB 0
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_ICACHE_SIZE_LOG2 14
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE_LOG2 14
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 cache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_CME_DOWNGRADES 0
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+#define XCHAL_HAVE_ICACHE_DYN_ENABLE 0 /* Icache enabled via MEMCTL */
+#define XCHAL_HAVE_DCACHE_DYN_ENABLE 0 /* Dcache enabled via MEMCTL */
+
+#define XCHAL_L1SCACHE_SIZE 0
+#define XCHAL_L1SCACHE_SIZE_LOG2 0
+#define XCHAL_L1SCACHE_WAYS 1
+#define XCHAL_L1SCACHE_WAYS_LOG2 0
+#define XCHAL_L1SCACHE_ACCESS_SIZE 0
+#define XCHAL_L1SCACHE_BANKS 1
+
+#define XCHAL_HAVE_L2 0 /* NX L2 cache controller */
+
+/* Number of cores in cluster */
+#if XCHAL_HAVE_L2
+#define XCHAL_NUM_CORES_IN_CLUSTER XCHAL_L2CC_NUM_CORES_LOG2
+#else
+#define XCHAL_NUM_CORES_IN_CLUSTER 0
+#endif
+
+/* PRID_ID macros are for internal use only ... subject to removal */
+#define PRID_ID_SHIFT 0
+#define PRID_ID_BITS 4
+#define PRID_ID_MASK 0x0000000F
+
+/* This one is a form of caching, though not architecturally visible: */
+#define XCHAL_HAVE_BRANCH_PREDICTION 0 /* branch [target] prediction */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound bus present */
+
+#define XCHAL_HAVE_AXI 0 /* AXI bus */
+#define XCHAL_HAVE_AXI_ECC 0 /* ECC on AXI bus */
+#define XCHAL_HAVE_ACELITE 0 /* ACELite bus */
+
+#define XCHAL_HAVE_PIF_WR_RESP 0 /* pif write response */
+#define XCHAL_HAVE_PIF_REQ_ATTR 0 /* pif attribute */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_ICACHE_WAYS_LOG2 2
+#define XCHAL_DCACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS_LOG2 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+#define XCHAL_ICACHE_ECC_WIDTH 4
+#define XCHAL_DCACHE_ECC_WIDTH 1
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* The number of Cache lines associated with a single cache tag */
+#define XCHAL_DCACHE_LINES_PER_TAG_LOG2 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+#define XCHAL_HAVE_IRAMCFG 0 /* IRAMxCFG register present */
+#define XCHAL_HAVE_DRAMCFG 0 /* DRAMxCFG register present */
+
+
+#define XCHAL_HAVE_IDMA 0
+
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+
+
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_DBG_REQUEST 0x00000000
+#define XCHAL_INTTYPE_MASK_BREAKIN 0x00000000
+#define XCHAL_INTTYPE_MASK_TRAX 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00000000
+#define XCHAL_INTTYPE_MASK_IDMA_DONE 0x00000000
+#define XCHAL_INTTYPE_MASK_IDMA_ERR 0x00000000
+#define XCHAL_INTTYPE_MASK_GS_ERR 0x00000000
+#define XCHAL_INTTYPE_MASK_L2_ERR 0x00000000
+#define XCHAL_INTTYPE_MASK_L2_STATUS 0x00000000
+#define XCHAL_INTTYPE_MASK_COR_ECC_ERR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT15_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT16_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 15 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 16 /* (intlevel 3) */
+
+#define XCHAL_HAVE_ISB 0 /* No ISB */
+#define XCHAL_ISB_VADDR 0 /* N/A */
+#define XCHAL_HAVE_ITB 0 /* No ITB */
+#define XCHAL_ITB_VADDR 0 /* N/A */
+
+#define XCHAL_HAVE_KSL 0 /* Kernel Stack Limit */
+#define XCHAL_HAVE_ISL 0 /* Interrupt Stack Limit */
+#define XCHAL_HAVE_PSL 0 /* Pageable Stack Limit */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (until T1050)
+ 2 == XEA2 (T1040 onwards)
+ 3 == XEA3 (LX8/NX/SX onwards)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEA3 0 /* Exception Architecture 3 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_IMPRECISE_EXCEPTIONS 0 /* imprecise exception option */
+#define XCHAL_EXCCAUSE_NUM 64 /* Number of exceptions */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0 /* UNUSED */
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR XCHAL_RESET_VECTOR0_VADDR
+#define XCHAL_RESET_VECTOR_PADDR XCHAL_RESET_VECTOR0_PADDR
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 0 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 0 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 0 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+
+/* If none of the above last 5 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+/*----------------------------------------------------------------------
+ MPU
+ ----------------------------------------------------------------------*/
+#define XCHAL_HAVE_MPU 0
+#define XCHAL_MPU_ENTRIES 0
+
+#define XCHAL_MPU_ALIGN_REQ 1 /* MPU requires alignment of entries to background map */
+#define XCHAL_MPU_BACKGROUND_ENTRIES 0 /* number of entries in bg map*/
+#define XCHAL_MPU_BG_CACHEADRDIS 0 /* default CACHEADRDIS for bg */
+
+#define XCHAL_MPU_ALIGN_BITS 0
+#define XCHAL_MPU_ALIGN 0
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_CORE_CONFIGURATION_H_ */
+
diff --git a/target/xtensa/core-de233_fpu/core-matmap.h b/target/xtensa/core-de233_fpu/core-matmap.h
new file mode 100644
index 000000000..cca51c7af
--- /dev/null
+++ b/target/xtensa/core-de233_fpu/core-matmap.h
@@ -0,0 +1,717 @@
+/*
+ * xtensa/config/core-matmap.h -- Memory access and translation mapping
+ * parameters (CHAL) of the Xtensa processor core configuration.
+ *
+ * If you are using Xtensa Tools, see <xtensa/config/core.h> (which includes
+ * this file) for more details.
+ *
+ * In the Xtensa processor products released to date, all parameters
+ * defined in this file are derivable (at least in theory) from
+ * information contained in the core-isa.h header file.
+ * In particular, the following core configuration parameters are relevant:
+ * XCHAL_HAVE_CACHEATTR
+ * XCHAL_HAVE_MIMIC_CACHEATTR
+ * XCHAL_HAVE_XLT_CACHEATTR
+ * XCHAL_HAVE_PTP_MMU
+ * XCHAL_ITLB_ARF_ENTRIES_LOG2
+ * XCHAL_DTLB_ARF_ENTRIES_LOG2
+ * XCHAL_DCACHE_IS_WRITEBACK
+ * XCHAL_ICACHE_SIZE (presence of I-cache)
+ * XCHAL_DCACHE_SIZE (presence of D-cache)
+ * XCHAL_HW_VERSION_MAJOR
+ * XCHAL_HW_VERSION_MINOR
+ */
+
+/* Copyright (c) 1999-2020 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+
+#ifndef XTENSA_CONFIG_CORE_MATMAP_H
+#define XTENSA_CONFIG_CORE_MATMAP_H
+
+
+/*----------------------------------------------------------------------
+ CACHE (MEMORY ACCESS) ATTRIBUTES
+ ----------------------------------------------------------------------*/
+
+
+
+/* Cache Attribute encodings -- lists of access modes for each cache attribute: */
+#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION
+#define XCHAL_LCA_LIST XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION
+#define XCHAL_SCA_LIST XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_BYPASS XCHAL_SEP \
+ XTHAL_SAM_BYPASS XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITEBACK XCHAL_SEP \
+ XTHAL_SAM_WRITEBACK XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION
+
+#define XCHAL_CA_R (0xC0 | 0x40000000)
+#define XCHAL_CA_RX (0xD0 | 0x40000000)
+#define XCHAL_CA_RW (0xE0 | 0x40000000)
+#define XCHAL_CA_RWX (0xF0 | 0x40000000)
+
+/*
+ * Specific encoded cache attribute values of general interest.
+ * If a specific cache mode is not available, the closest available
+ * one is returned instead (eg. writethru instead of writeback,
+ * bypass instead of writethru).
+ */
+#define XCHAL_CA_BYPASS 3 /* cache disabled (bypassed) mode */
+#define XCHAL_CA_WRITETHRU 11 /* cache enabled (write-through) mode */
+#define XCHAL_CA_WRITEBACK 7 /* cache enabled (write-back) mode */
+#define XCHAL_HAVE_CA_WRITEBACK_NOALLOC 0 /* write-back no-allocate availability */
+#define XCHAL_CA_WRITEBACK_NOALLOC 7 /* cache enabled (write-back no-allocate) mode */
+#define XCHAL_CA_BYPASS_RX 1 /* cache disabled (bypassed) mode (no write) */
+#define XCHAL_CA_WRITETHRU_RX 9 /* cache enabled (write-through) mode (no write) */
+#define XCHAL_CA_WRITEBACK_RX 5 /* cache enabled (write-back) mode (no write) */
+#define XCHAL_CA_WRITEBACK_NOALLOC_RX 5 /* cache enabled (write-back no-allocate) mode (no write) */
+#define XCHAL_CA_BYPASS_RW 2 /* cache disabled (bypassed) mode (no exec) */
+#define XCHAL_CA_WRITETHRU_RW 10 /* cache enabled (write-through) mode (no exec) */
+#define XCHAL_CA_WRITEBACK_RW 6 /* cache enabled (write-back) mode (no exec) */
+#define XCHAL_CA_WRITEBACK_NOALLOC_RW 6 /* cache enabled (write-back no-allocate) mode (no exec) */
+#define XCHAL_CA_BYPASS_R 0 /* cache disabled (bypassed) mode (no exec, no write) */
+#define XCHAL_CA_WRITETHRU_R 8 /* cache enabled (write-through) mode (no exec, no write) */
+#define XCHAL_CA_WRITEBACK_R 4 /* cache enabled (write-back) mode (no exec, no write) */
+#define XCHAL_CA_WRITEBACK_NOALLOC_R 4 /* cache enabled (write-back no-allocate) mode (no exec, no write) */
+#define XCHAL_CA_ILLEGAL 12 /* no access allowed (all cause exceptions) mode */
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/*
+ * General notes on MMU parameters.
+ *
+ * Terminology:
+ * ASID = address-space ID (acts as an "extension" of virtual addresses)
+ * VPN = virtual page number
+ * PPN = physical page number
+ * CA = encoded cache attribute (access modes)
+ * TLB = translation look-aside buffer (term is stretched somewhat here)
+ * I = instruction (fetch accesses)
+ * D = data (load and store accesses)
+ * way = each TLB (ITLB and DTLB) consists of a number of "ways"
+ * that simultaneously match the virtual address of an access;
+ * a TLB successfully translates a virtual address if exactly
+ * one way matches the vaddr; if none match, it is a miss;
+ * if multiple match, one gets a "multihit" exception;
+ * each way can be independently configured in terms of number of
+ * entries, page sizes, which fields are writable or constant, etc.
+ * set = group of contiguous ways with exactly identical parameters
+ * ARF = auto-refill; hardware services a 1st-level miss by loading a PTE
+ * from the page table and storing it in one of the auto-refill ways;
+ * if this PTE load also misses, a miss exception is posted for s/w.
+ * min-wired = a "min-wired" way can be used to map a single (minimum-sized)
+ * page arbitrarily under program control; it has a single entry,
+ * is non-auto-refill (some other way(s) must be auto-refill),
+ * all its fields (VPN, PPN, ASID, CA) are all writable, and it
+ * supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current
+ * restriction is that this be the only page size it supports).
+ *
+ * TLB way entries are virtually indexed.
+ * TLB ways that support multiple page sizes:
+ * - must have all writable VPN and PPN fields;
+ * - can only use one page size at any given time (eg. setup at startup),
+ * selected by the respective ITLBCFG or DTLBCFG special register,
+ * whose bits n*4+3 .. n*4 index the list of page sizes for way n
+ * (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n);
+ * this list may be sparse for auto-refill ways because auto-refill
+ * ways have independent lists of supported page sizes sharing a
+ * common encoding with PTE entries; the encoding is the index into
+ * this list; unsupported sizes for a given way are zero in the list;
+ * selecting unsupported sizes results in undefine hardware behaviour;
+ * - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition).
+ */
+
+#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */
+#define XCHAL_MMU_ASID_KERNEL 1 /* ASID value indicating kernel (ring 0) address space */
+#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */
+#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
+#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 12 /* max page size in a PTE structure (log2) */
+#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 12 /* min page size in a PTE structure (log2) */
+
+
+/*** Instruction TLB: ***/
+
+#define XCHAL_ITLB_WAY_BITS 3 /* number of bits holding the ways */
+#define XCHAL_ITLB_WAYS 7 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_ITLB_ARF_WAYS 4 /* number of auto-refill ways */
+#define XCHAL_ITLB_SETS 7 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_ITLB_WAY0_SET 0
+#define XCHAL_ITLB_WAY1_SET 1
+#define XCHAL_ITLB_WAY2_SET 2
+#define XCHAL_ITLB_WAY3_SET 3
+#define XCHAL_ITLB_WAY4_SET 4
+#define XCHAL_ITLB_WAY5_SET 5
+#define XCHAL_ITLB_WAY6_SET 6
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_ITLB_ARF_SETS 4 /* number of auto-refill sets */
+#define XCHAL_ITLB_ARF_SET0 0 /* index of n'th auto-refill set */
+#define XCHAL_ITLB_ARF_SET1 1 /* index of n'th auto-refill set */
+#define XCHAL_ITLB_ARF_SET2 2 /* index of n'th auto-refill set */
+#define XCHAL_ITLB_ARF_SET3 3 /* index of n'th auto-refill set */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
+
+
+/* ITLB way set 0 (group of ways 0 thru 0): */
+#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_ITLB_SET0_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 1 (group of ways 1 thru 1): */
+#define XCHAL_ITLB_SET1_WAY 1 /* index of first way in this way set */
+#define XCHAL_ITLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET1_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET1_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET1_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 2 (group of ways 2 thru 2): */
+#define XCHAL_ITLB_SET2_WAY 2 /* index of first way in this way set */
+#define XCHAL_ITLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET2_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET2_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET2_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET2_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 3 (group of ways 3 thru 3): */
+#define XCHAL_ITLB_SET3_WAY 3 /* index of first way in this way set */
+#define XCHAL_ITLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET3_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET3_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET3_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET3_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 4 (group of ways 4 thru 4): */
+#define XCHAL_ITLB_SET4_WAY 4 /* index of first way in this way set */
+#define XCHAL_ITLB_SET4_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET4_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET4_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET4_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET4_PAGESIZES 4 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET4_PAGESZ_BITS 2 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET4_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET4_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET4_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET4_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET4_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET4_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET4_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET4_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET4_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET4_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET4_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 5 (group of ways 5 thru 5): */
+#define XCHAL_ITLB_SET5_WAY 5 /* index of first way in this way set */
+#define XCHAL_ITLB_SET5_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET5_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET5_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET5_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET5_PAGESIZES 2 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET5_PAGESZ_BITS 1 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET5_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET5_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET5_PAGESZ_LOG2_LIST 27 XCHAL_SEP 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET5_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET5_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET5_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET5_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET5_ASID_RESET 1 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET5_VPN_RESET 1 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET5_PPN_RESET 1 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET5_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Reset ASID values for each entry of ITLB way set 5 (because SET5_ASID_RESET is non-zero): */
+#define XCHAL_ITLB_SET5_E0_ASID_RESET 0x00
+#define XCHAL_ITLB_SET5_E1_ASID_RESET 0x00
+#define XCHAL_ITLB_SET5_E2_ASID_RESET 0x00
+#define XCHAL_ITLB_SET5_E3_ASID_RESET 0x00
+/* Reset VPN values for each entry of ITLB way set 5 (because SET5_VPN_RESET is non-zero): */
+#define XCHAL_ITLB_SET5_E0_VPN_RESET 0x00000000
+#define XCHAL_ITLB_SET5_E1_VPN_RESET 0x00000000
+#define XCHAL_ITLB_SET5_E2_VPN_RESET 0x00000000
+#define XCHAL_ITLB_SET5_E3_VPN_RESET 0x00000000
+/* Reset PPN values for each entry of ITLB way set 5 (because SET5_PPN_RESET is non-zero): */
+#define XCHAL_ITLB_SET5_E0_PPN_RESET 0x00000000
+#define XCHAL_ITLB_SET5_E1_PPN_RESET 0x00000000
+#define XCHAL_ITLB_SET5_E2_PPN_RESET 0x00000000
+#define XCHAL_ITLB_SET5_E3_PPN_RESET 0x00000000
+
+/* ITLB way set 6 (group of ways 6 thru 6): */
+#define XCHAL_ITLB_SET6_WAY 6 /* index of first way in this way set */
+#define XCHAL_ITLB_SET6_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET6_ENTRIES_LOG2 3 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET6_ENTRIES 8 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET6_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET6_PAGESIZES 2 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET6_PAGESZ_BITS 1 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET6_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET6_PAGESZ_LOG2_MAX 29 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET6_PAGESZ_LOG2_LIST 29 XCHAL_SEP 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET6_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET6_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET6_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET6_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET6_ASID_RESET 1 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET6_VPN_RESET 1 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET6_PPN_RESET 1 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET6_CA_RESET 1 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Reset ASID values for each entry of ITLB way set 6 (because SET6_ASID_RESET is non-zero): */
+#define XCHAL_ITLB_SET6_E0_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E1_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E2_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E3_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E4_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E5_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E6_ASID_RESET 0x01
+#define XCHAL_ITLB_SET6_E7_ASID_RESET 0x01
+/* Reset VPN values for each entry of ITLB way set 6 (because SET6_VPN_RESET is non-zero): */
+#define XCHAL_ITLB_SET6_E0_VPN_RESET 0x00000000
+#define XCHAL_ITLB_SET6_E1_VPN_RESET 0x20000000
+#define XCHAL_ITLB_SET6_E2_VPN_RESET 0x40000000
+#define XCHAL_ITLB_SET6_E3_VPN_RESET 0x60000000
+#define XCHAL_ITLB_SET6_E4_VPN_RESET 0x80000000
+#define XCHAL_ITLB_SET6_E5_VPN_RESET 0xA0000000
+#define XCHAL_ITLB_SET6_E6_VPN_RESET 0xC0000000
+#define XCHAL_ITLB_SET6_E7_VPN_RESET 0xE0000000
+/* Reset PPN values for each entry of ITLB way set 6 (because SET6_PPN_RESET is non-zero): */
+#define XCHAL_ITLB_SET6_E0_PPN_RESET 0x00000000
+#define XCHAL_ITLB_SET6_E1_PPN_RESET 0x20000000
+#define XCHAL_ITLB_SET6_E2_PPN_RESET 0x40000000
+#define XCHAL_ITLB_SET6_E3_PPN_RESET 0x60000000
+#define XCHAL_ITLB_SET6_E4_PPN_RESET 0x80000000
+#define XCHAL_ITLB_SET6_E5_PPN_RESET 0xA0000000
+#define XCHAL_ITLB_SET6_E6_PPN_RESET 0xC0000000
+#define XCHAL_ITLB_SET6_E7_PPN_RESET 0xE0000000
+/* Reset CA values for each entry of ITLB way set 6 (because SET6_CA_RESET is non-zero): */
+#define XCHAL_ITLB_SET6_E0_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E1_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E2_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E3_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E4_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E5_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E6_CA_RESET 0x03
+#define XCHAL_ITLB_SET6_E7_CA_RESET 0x03
+
+
+/*** Data TLB: ***/
+
+#define XCHAL_DTLB_WAY_BITS 4 /* number of bits holding the ways */
+#define XCHAL_DTLB_WAYS 10 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_DTLB_ARF_WAYS 4 /* number of auto-refill ways */
+#define XCHAL_DTLB_SETS 10 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_DTLB_WAY0_SET 0
+#define XCHAL_DTLB_WAY1_SET 1
+#define XCHAL_DTLB_WAY2_SET 2
+#define XCHAL_DTLB_WAY3_SET 3
+#define XCHAL_DTLB_WAY4_SET 4
+#define XCHAL_DTLB_WAY5_SET 5
+#define XCHAL_DTLB_WAY6_SET 6
+#define XCHAL_DTLB_WAY7_SET 7
+#define XCHAL_DTLB_WAY8_SET 8
+#define XCHAL_DTLB_WAY9_SET 9
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_DTLB_ARF_SETS 4 /* number of auto-refill sets */
+#define XCHAL_DTLB_ARF_SET0 0 /* index of n'th auto-refill set */
+#define XCHAL_DTLB_ARF_SET1 1 /* index of n'th auto-refill set */
+#define XCHAL_DTLB_ARF_SET2 2 /* index of n'th auto-refill set */
+#define XCHAL_DTLB_ARF_SET3 3 /* index of n'th auto-refill set */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_DTLB_MINWIRED_SETS 3 /* number of "min-wired" sets */
+#define XCHAL_DTLB_MINWIRED_SET0 7 /* index of n'th "min-wired" set */
+#define XCHAL_DTLB_MINWIRED_SET1 8 /* index of n'th "min-wired" set */
+#define XCHAL_DTLB_MINWIRED_SET2 9 /* index of n'th "min-wired" set */
+
+
+/* DTLB way set 0 (group of ways 0 thru 0): */
+#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_DTLB_SET0_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 1 (group of ways 1 thru 1): */
+#define XCHAL_DTLB_SET1_WAY 1 /* index of first way in this way set */
+#define XCHAL_DTLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET1_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET1_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET1_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 2 (group of ways 2 thru 2): */
+#define XCHAL_DTLB_SET2_WAY 2 /* index of first way in this way set */
+#define XCHAL_DTLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET2_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET2_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET2_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET2_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 3 (group of ways 3 thru 3): */
+#define XCHAL_DTLB_SET3_WAY 3 /* index of first way in this way set */
+#define XCHAL_DTLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET3_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET3_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET3_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET3_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 4 (group of ways 4 thru 4): */
+#define XCHAL_DTLB_SET4_WAY 4 /* index of first way in this way set */
+#define XCHAL_DTLB_SET4_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET4_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET4_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET4_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET4_PAGESIZES 4 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET4_PAGESZ_BITS 2 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET4_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 5 (group of ways 5 thru 5): */
+#define XCHAL_DTLB_SET5_WAY 5 /* index of first way in this way set */
+#define XCHAL_DTLB_SET5_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET5_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET5_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET5_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET5_PAGESIZES 2 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET5_PAGESZ_BITS 1 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET5_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET5_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET5_PAGESZ_LOG2_LIST 27 XCHAL_SEP 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET5_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET5_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET5_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET5_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET5_ASID_RESET 1 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET5_VPN_RESET 1 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET5_PPN_RESET 1 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET5_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Reset ASID values for each entry of DTLB way set 5 (because SET5_ASID_RESET is non-zero): */
+#define XCHAL_DTLB_SET5_E0_ASID_RESET 0x00
+#define XCHAL_DTLB_SET5_E1_ASID_RESET 0x00
+#define XCHAL_DTLB_SET5_E2_ASID_RESET 0x00
+#define XCHAL_DTLB_SET5_E3_ASID_RESET 0x00
+/* Reset VPN values for each entry of DTLB way set 5 (because SET5_VPN_RESET is non-zero): */
+#define XCHAL_DTLB_SET5_E0_VPN_RESET 0x00000000
+#define XCHAL_DTLB_SET5_E1_VPN_RESET 0x00000000
+#define XCHAL_DTLB_SET5_E2_VPN_RESET 0x00000000
+#define XCHAL_DTLB_SET5_E3_VPN_RESET 0x00000000
+/* Reset PPN values for each entry of DTLB way set 5 (because SET5_PPN_RESET is non-zero): */
+#define XCHAL_DTLB_SET5_E0_PPN_RESET 0x00000000
+#define XCHAL_DTLB_SET5_E1_PPN_RESET 0x00000000
+#define XCHAL_DTLB_SET5_E2_PPN_RESET 0x00000000
+#define XCHAL_DTLB_SET5_E3_PPN_RESET 0x00000000
+
+/* DTLB way set 6 (group of ways 6 thru 6): */
+#define XCHAL_DTLB_SET6_WAY 6 /* index of first way in this way set */
+#define XCHAL_DTLB_SET6_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET6_ENTRIES_LOG2 3 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET6_ENTRIES 8 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET6_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET6_PAGESIZES 2 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET6_PAGESZ_BITS 1 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET6_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET6_PAGESZ_LOG2_MAX 29 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET6_PAGESZ_LOG2_LIST 29 XCHAL_SEP 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET6_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET6_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET6_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET6_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET6_ASID_RESET 1 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET6_VPN_RESET 1 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET6_PPN_RESET 1 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET6_CA_RESET 1 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Reset ASID values for each entry of DTLB way set 6 (because SET6_ASID_RESET is non-zero): */
+#define XCHAL_DTLB_SET6_E0_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E1_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E2_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E3_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E4_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E5_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E6_ASID_RESET 0x01
+#define XCHAL_DTLB_SET6_E7_ASID_RESET 0x01
+/* Reset VPN values for each entry of DTLB way set 6 (because SET6_VPN_RESET is non-zero): */
+#define XCHAL_DTLB_SET6_E0_VPN_RESET 0x00000000
+#define XCHAL_DTLB_SET6_E1_VPN_RESET 0x20000000
+#define XCHAL_DTLB_SET6_E2_VPN_RESET 0x40000000
+#define XCHAL_DTLB_SET6_E3_VPN_RESET 0x60000000
+#define XCHAL_DTLB_SET6_E4_VPN_RESET 0x80000000
+#define XCHAL_DTLB_SET6_E5_VPN_RESET 0xA0000000
+#define XCHAL_DTLB_SET6_E6_VPN_RESET 0xC0000000
+#define XCHAL_DTLB_SET6_E7_VPN_RESET 0xE0000000
+/* Reset PPN values for each entry of DTLB way set 6 (because SET6_PPN_RESET is non-zero): */
+#define XCHAL_DTLB_SET6_E0_PPN_RESET 0x00000000
+#define XCHAL_DTLB_SET6_E1_PPN_RESET 0x20000000
+#define XCHAL_DTLB_SET6_E2_PPN_RESET 0x40000000
+#define XCHAL_DTLB_SET6_E3_PPN_RESET 0x60000000
+#define XCHAL_DTLB_SET6_E4_PPN_RESET 0x80000000
+#define XCHAL_DTLB_SET6_E5_PPN_RESET 0xA0000000
+#define XCHAL_DTLB_SET6_E6_PPN_RESET 0xC0000000
+#define XCHAL_DTLB_SET6_E7_PPN_RESET 0xE0000000
+/* Reset CA values for each entry of DTLB way set 6 (because SET6_CA_RESET is non-zero): */
+#define XCHAL_DTLB_SET6_E0_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E1_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E2_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E3_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E4_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E5_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E6_CA_RESET 0x03
+#define XCHAL_DTLB_SET6_E7_CA_RESET 0x03
+
+/* DTLB way set 7 (group of ways 7 thru 7): */
+#define XCHAL_DTLB_SET7_WAY 7 /* index of first way in this way set */
+#define XCHAL_DTLB_SET7_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET7_ENTRIES_LOG2 0 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET7_ENTRIES 1 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET7_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET7_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET7_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET7_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET7_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET7_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET7_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET7_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET7_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET7_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET7_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET7_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET7_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET7_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 8 (group of ways 8 thru 8): */
+#define XCHAL_DTLB_SET8_WAY 8 /* index of first way in this way set */
+#define XCHAL_DTLB_SET8_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET8_ENTRIES_LOG2 0 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET8_ENTRIES 1 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET8_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET8_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET8_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET8_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET8_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET8_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET8_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET8_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET8_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET8_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET8_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET8_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET8_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET8_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 9 (group of ways 9 thru 9): */
+#define XCHAL_DTLB_SET9_WAY 9 /* index of first way in this way set */
+#define XCHAL_DTLB_SET9_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET9_ENTRIES_LOG2 0 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET9_ENTRIES 1 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET9_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET9_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET9_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET9_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET9_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET9_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET9_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET9_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET9_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET9_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET9_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET9_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET9_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET9_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+
+
+
+#endif /*XTENSA_CONFIG_CORE_MATMAP_H*/
+
diff --git a/target/xtensa/core-de233_fpu/gdb-config.c.inc b/target/xtensa/core-de233_fpu/gdb-config.c.inc
new file mode 100644
index 000000000..66fcb4894
--- /dev/null
+++ b/target/xtensa/core-de233_fpu/gdb-config.c.inc
@@ -0,0 +1,277 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2020 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+ XTREG( 0, 0,32, 4, 4,0x0020,0x0006,-2, 9,0x2100,pc, 0,0,0,0,0,0)
+ XTREG( 1, 4,32, 4, 4,0x0100,0x0006,-2, 1,0x0002,ar0, 0,0,0,0,0,0)
+ XTREG( 2, 8,32, 4, 4,0x0101,0x0006,-2, 1,0x0002,ar1, 0,0,0,0,0,0)
+ XTREG( 3, 12,32, 4, 4,0x0102,0x0006,-2, 1,0x0002,ar2, 0,0,0,0,0,0)
+ XTREG( 4, 16,32, 4, 4,0x0103,0x0006,-2, 1,0x0002,ar3, 0,0,0,0,0,0)
+ XTREG( 5, 20,32, 4, 4,0x0104,0x0006,-2, 1,0x0002,ar4, 0,0,0,0,0,0)
+ XTREG( 6, 24,32, 4, 4,0x0105,0x0006,-2, 1,0x0002,ar5, 0,0,0,0,0,0)
+ XTREG( 7, 28,32, 4, 4,0x0106,0x0006,-2, 1,0x0002,ar6, 0,0,0,0,0,0)
+ XTREG( 8, 32,32, 4, 4,0x0107,0x0006,-2, 1,0x0002,ar7, 0,0,0,0,0,0)
+ XTREG( 9, 36,32, 4, 4,0x0108,0x0006,-2, 1,0x0002,ar8, 0,0,0,0,0,0)
+ XTREG( 10, 40,32, 4, 4,0x0109,0x0006,-2, 1,0x0002,ar9, 0,0,0,0,0,0)
+ XTREG( 11, 44,32, 4, 4,0x010a,0x0006,-2, 1,0x0002,ar10, 0,0,0,0,0,0)
+ XTREG( 12, 48,32, 4, 4,0x010b,0x0006,-2, 1,0x0002,ar11, 0,0,0,0,0,0)
+ XTREG( 13, 52,32, 4, 4,0x010c,0x0006,-2, 1,0x0002,ar12, 0,0,0,0,0,0)
+ XTREG( 14, 56,32, 4, 4,0x010d,0x0006,-2, 1,0x0002,ar13, 0,0,0,0,0,0)
+ XTREG( 15, 60,32, 4, 4,0x010e,0x0006,-2, 1,0x0002,ar14, 0,0,0,0,0,0)
+ XTREG( 16, 64,32, 4, 4,0x010f,0x0006,-2, 1,0x0002,ar15, 0,0,0,0,0,0)
+ XTREG( 17, 68,32, 4, 4,0x0110,0x0006,-2, 1,0x0002,ar16, 0,0,0,0,0,0)
+ XTREG( 18, 72,32, 4, 4,0x0111,0x0006,-2, 1,0x0002,ar17, 0,0,0,0,0,0)
+ XTREG( 19, 76,32, 4, 4,0x0112,0x0006,-2, 1,0x0002,ar18, 0,0,0,0,0,0)
+ XTREG( 20, 80,32, 4, 4,0x0113,0x0006,-2, 1,0x0002,ar19, 0,0,0,0,0,0)
+ XTREG( 21, 84,32, 4, 4,0x0114,0x0006,-2, 1,0x0002,ar20, 0,0,0,0,0,0)
+ XTREG( 22, 88,32, 4, 4,0x0115,0x0006,-2, 1,0x0002,ar21, 0,0,0,0,0,0)
+ XTREG( 23, 92,32, 4, 4,0x0116,0x0006,-2, 1,0x0002,ar22, 0,0,0,0,0,0)
+ XTREG( 24, 96,32, 4, 4,0x0117,0x0006,-2, 1,0x0002,ar23, 0,0,0,0,0,0)
+ XTREG( 25,100,32, 4, 4,0x0118,0x0006,-2, 1,0x0002,ar24, 0,0,0,0,0,0)
+ XTREG( 26,104,32, 4, 4,0x0119,0x0006,-2, 1,0x0002,ar25, 0,0,0,0,0,0)
+ XTREG( 27,108,32, 4, 4,0x011a,0x0006,-2, 1,0x0002,ar26, 0,0,0,0,0,0)
+ XTREG( 28,112,32, 4, 4,0x011b,0x0006,-2, 1,0x0002,ar27, 0,0,0,0,0,0)
+ XTREG( 29,116,32, 4, 4,0x011c,0x0006,-2, 1,0x0002,ar28, 0,0,0,0,0,0)
+ XTREG( 30,120,32, 4, 4,0x011d,0x0006,-2, 1,0x0002,ar29, 0,0,0,0,0,0)
+ XTREG( 31,124,32, 4, 4,0x011e,0x0006,-2, 1,0x0002,ar30, 0,0,0,0,0,0)
+ XTREG( 32,128,32, 4, 4,0x011f,0x0006,-2, 1,0x0002,ar31, 0,0,0,0,0,0)
+ XTREG( 33,132,32, 4, 4,0x0200,0x0006,-2, 2,0x1100,lbeg, 0,0,0,0,0,0)
+ XTREG( 34,136,32, 4, 4,0x0201,0x0006,-2, 2,0x1100,lend, 0,0,0,0,0,0)
+ XTREG( 35,140,32, 4, 4,0x0202,0x0006,-2, 2,0x1100,lcount, 0,0,0,0,0,0)
+ XTREG( 36,144, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0)
+ XTREG( 37,148, 3, 4, 4,0x0248,0x0006,-2, 2,0x1002,windowbase, 0,0,0,0,0,0)
+ XTREG( 38,152, 8, 4, 4,0x0249,0x0006,-2, 2,0x1002,windowstart, 0,0,0,0,0,0)
+ XTREG( 39,156,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,configid0, 0,0,0,0,0,0)
+ XTREG( 40,160,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,configid1, 0,0,0,0,0,0)
+ XTREG( 41,164,19, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0)
+ XTREG( 42,168,32, 4, 4,0x03e7,0x0006,-2, 3,0x0110,threadptr, 0,0,0,0,0,0)
+ XTREG( 43,172,16, 4, 4,0x0204,0x0006,-1, 2,0x1100,br, 0,0,0,0,0,0)
+ XTREG( 44,176,32, 4, 4,0x020c,0x0006,-1, 2,0x1100,scompare1, 0,0,0,0,0,0)
+ XTREG( 45,180,32, 4, 4,0x0210,0x0006,-1, 2,0x1100,acclo, 0,0,0,0,0,0)
+ XTREG( 46,184, 8, 4, 4,0x0211,0x0006,-1, 2,0x1100,acchi, 0,0,0,0,0,0)
+ XTREG( 47,188,32, 4, 4,0x0220,0x0006,-1, 2,0x1100,m0, 0,0,0,0,0,0)
+ XTREG( 48,192,32, 4, 4,0x0221,0x0006,-1, 2,0x1100,m1, 0,0,0,0,0,0)
+ XTREG( 49,196,32, 4, 4,0x0222,0x0006,-1, 2,0x1100,m2, 0,0,0,0,0,0)
+ XTREG( 50,200,32, 4, 4,0x0223,0x0006,-1, 2,0x1100,m3, 0,0,0,0,0,0)
+ XTREG( 51,204,32, 4, 4,0x03e6,0x000e,-1, 3,0x0110,expstate, 0,0,0,0,0,0)
+ XTREG( 52,208,64, 8, 8,0x0030,0x0006, 0, 4,0x0401,f0,
+ "03:03:54:00","03:03:14:00",0,0,0,0)
+ XTREG( 53,216,64, 8, 8,0x0031,0x0006, 0, 4,0x0401,f1,
+ "03:13:54:00","03:13:14:00",0,0,0,0)
+ XTREG( 54,224,64, 8, 8,0x0032,0x0006, 0, 4,0x0401,f2,
+ "03:23:54:00","03:23:14:00",0,0,0,0)
+ XTREG( 55,232,64, 8, 8,0x0033,0x0006, 0, 4,0x0401,f3,
+ "03:33:54:00","03:33:14:00",0,0,0,0)
+ XTREG( 56,240,64, 8, 8,0x0034,0x0006, 0, 4,0x0401,f4,
+ "03:43:54:00","03:43:14:00",0,0,0,0)
+ XTREG( 57,248,64, 8, 8,0x0035,0x0006, 0, 4,0x0401,f5,
+ "03:53:54:00","03:53:14:00",0,0,0,0)
+ XTREG( 58,256,64, 8, 8,0x0036,0x0006, 0, 4,0x0401,f6,
+ "03:63:54:00","03:63:14:00",0,0,0,0)
+ XTREG( 59,264,64, 8, 8,0x0037,0x0006, 0, 4,0x0401,f7,
+ "03:73:54:00","03:73:14:00",0,0,0,0)
+ XTREG( 60,272,64, 8, 8,0x0038,0x0006, 0, 4,0x0401,f8,
+ "03:83:54:00","03:83:14:00",0,0,0,0)
+ XTREG( 61,280,64, 8, 8,0x0039,0x0006, 0, 4,0x0401,f9,
+ "03:93:54:00","03:93:14:00",0,0,0,0)
+ XTREG( 62,288,64, 8, 8,0x003a,0x0006, 0, 4,0x0401,f10,
+ "03:a3:54:00","03:a3:14:00",0,0,0,0)
+ XTREG( 63,296,64, 8, 8,0x003b,0x0006, 0, 4,0x0401,f11,
+ "03:b3:54:00","03:b3:14:00",0,0,0,0)
+ XTREG( 64,304,64, 8, 8,0x003c,0x0006, 0, 4,0x0401,f12,
+ "03:c3:54:00","03:c3:14:00",0,0,0,0)
+ XTREG( 65,312,64, 8, 8,0x003d,0x0006, 0, 4,0x0401,f13,
+ "03:d3:54:00","03:d3:14:00",0,0,0,0)
+ XTREG( 66,320,64, 8, 8,0x003e,0x0006, 0, 4,0x0401,f14,
+ "03:e3:54:00","03:e3:14:00",0,0,0,0)
+ XTREG( 67,328,64, 8, 8,0x003f,0x0006, 0, 4,0x0401,f15,
+ "03:f3:54:00","03:f3:14:00",0,0,0,0)
+ XTREG( 68,336,32, 4, 4,0x03e8,0x0006, 0, 3,0x0100,fcr, 0,0,0,0,0,0)
+ XTREG( 69,340,32, 4, 4,0x03e9,0x0006, 0, 3,0x0100,fsr, 0,0,0,0,0,0)
+ XTREG( 70,344,32, 4, 4,0x0253,0x0007,-2, 2,0x1000,ptevaddr, 0,0,0,0,0,0)
+ XTREG( 71,348,32, 4, 4,0x0259,0x000d,-2, 2,0x1000,mmid, 0,0,0,0,0,0)
+ XTREG( 72,352,32, 4, 4,0x025a,0x0007,-2, 2,0x1000,rasid, 0,0,0,0,0,0)
+ XTREG( 73,356,25, 4, 4,0x025b,0x0007,-2, 2,0x1000,itlbcfg, 0,0,0,0,0,0)
+ XTREG( 74,360,25, 4, 4,0x025c,0x0007,-2, 2,0x1000,dtlbcfg, 0,0,0,0,0,0)
+ XTREG( 75,364,16, 4, 4,0x025f,0x0007,-2, 2,0x1000,eraccess, 0,0,0,0,0,0)
+ XTREG( 76,368, 2, 4, 4,0x0260,0x0007,-2, 2,0x1000,ibreakenable,0,0,0,0,0,0)
+ XTREG( 77,372, 6, 4, 4,0x0263,0x0007,-2, 2,0x1000,atomctl, 0,0,0,0,0,0)
+ XTREG( 78,376,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0)
+ XTREG( 79,380,32, 4, 4,0x0280,0x0007,-2, 2,0x1000,ibreaka0, 0,0,0,0,0,0)
+ XTREG( 80,384,32, 4, 4,0x0281,0x0007,-2, 2,0x1000,ibreaka1, 0,0,0,0,0,0)
+ XTREG( 81,388,32, 4, 4,0x0290,0x0007,-2, 2,0x1000,dbreaka0, 0,0,0,0,0,0)
+ XTREG( 82,392,32, 4, 4,0x0291,0x0007,-2, 2,0x1000,dbreaka1, 0,0,0,0,0,0)
+ XTREG( 83,396,32, 4, 4,0x02a0,0x0007,-2, 2,0x1000,dbreakc0, 0,0,0,0,0,0)
+ XTREG( 84,400,32, 4, 4,0x02a1,0x0007,-2, 2,0x1000,dbreakc1, 0,0,0,0,0,0)
+ XTREG( 85,404,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0)
+ XTREG( 86,408,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0)
+ XTREG( 87,412,32, 4, 4,0x02b3,0x0007,-2, 2,0x1000,epc3, 0,0,0,0,0,0)
+ XTREG( 88,416,32, 4, 4,0x02b4,0x0007,-2, 2,0x1000,epc4, 0,0,0,0,0,0)
+ XTREG( 89,420,32, 4, 4,0x02b5,0x0007,-2, 2,0x1000,epc5, 0,0,0,0,0,0)
+ XTREG( 90,424,32, 4, 4,0x02b6,0x0007,-2, 2,0x1000,epc6, 0,0,0,0,0,0)
+ XTREG( 91,428,32, 4, 4,0x02b7,0x0007,-2, 2,0x1000,epc7, 0,0,0,0,0,0)
+ XTREG( 92,432,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0)
+ XTREG( 93,436,19, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0)
+ XTREG( 94,440,19, 4, 4,0x02c3,0x0007,-2, 2,0x1000,eps3, 0,0,0,0,0,0)
+ XTREG( 95,444,19, 4, 4,0x02c4,0x0007,-2, 2,0x1000,eps4, 0,0,0,0,0,0)
+ XTREG( 96,448,19, 4, 4,0x02c5,0x0007,-2, 2,0x1000,eps5, 0,0,0,0,0,0)
+ XTREG( 97,452,19, 4, 4,0x02c6,0x0007,-2, 2,0x1000,eps6, 0,0,0,0,0,0)
+ XTREG( 98,456,19, 4, 4,0x02c7,0x0007,-2, 2,0x1000,eps7, 0,0,0,0,0,0)
+ XTREG( 99,460,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0)
+ XTREG(100,464,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0)
+ XTREG(101,468,32, 4, 4,0x02d3,0x0007,-2, 2,0x1000,excsave3, 0,0,0,0,0,0)
+ XTREG(102,472,32, 4, 4,0x02d4,0x0007,-2, 2,0x1000,excsave4, 0,0,0,0,0,0)
+ XTREG(103,476,32, 4, 4,0x02d5,0x0007,-2, 2,0x1000,excsave5, 0,0,0,0,0,0)
+ XTREG(104,480,32, 4, 4,0x02d6,0x0007,-2, 2,0x1000,excsave6, 0,0,0,0,0,0)
+ XTREG(105,484,32, 4, 4,0x02d7,0x0007,-2, 2,0x1000,excsave7, 0,0,0,0,0,0)
+ XTREG(106,488, 8, 4, 4,0x02e0,0x0007,-2, 2,0x1000,cpenable, 0,0,0,0,0,0)
+ XTREG(107,492,22, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0)
+ XTREG(108,496,22, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0)
+ XTREG(109,500,22, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0)
+ XTREG(110,504,22, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0)
+ XTREG(111,508,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0)
+ XTREG(112,512, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0)
+ XTREG(113,516,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0)
+ XTREG(114,520,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0)
+ XTREG(115,524,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0)
+ XTREG(116,528,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0)
+ XTREG(117,532, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0)
+ XTREG(118,536,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0)
+ XTREG(119,540,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0)
+ XTREG(120,544,32, 4, 4,0x02f1,0x000f,-2, 2,0x1000,ccompare1, 0,0,0,0,0,0)
+ XTREG(121,548,32, 4, 4,0x02f2,0x000f,-2, 2,0x1000,ccompare2, 0,0,0,0,0,0)
+ XTREG(122,552,32, 4, 4,0x02f4,0x0007,-2, 2,0x1000,misc0, 0,0,0,0,0,0)
+ XTREG(123,556,32, 4, 4,0x02f5,0x0007,-2, 2,0x1000,misc1, 0,0,0,0,0,0)
+ XTREG(124,560,32, 4, 4,0x0000,0x0006,-2, 8,0x2100,a0, 0,0,0,0,0,0)
+ XTREG(125,564,32, 4, 4,0x0001,0x0006,-2, 8,0x2100,a1, 0,0,0,0,0,0)
+ XTREG(126,568,32, 4, 4,0x0002,0x0006,-2, 8,0x2100,a2, 0,0,0,0,0,0)
+ XTREG(127,572,32, 4, 4,0x0003,0x0006,-2, 8,0x2100,a3, 0,0,0,0,0,0)
+ XTREG(128,576,32, 4, 4,0x0004,0x0006,-2, 8,0x2100,a4, 0,0,0,0,0,0)
+ XTREG(129,580,32, 4, 4,0x0005,0x0006,-2, 8,0x2100,a5, 0,0,0,0,0,0)
+ XTREG(130,584,32, 4, 4,0x0006,0x0006,-2, 8,0x2100,a6, 0,0,0,0,0,0)
+ XTREG(131,588,32, 4, 4,0x0007,0x0006,-2, 8,0x2100,a7, 0,0,0,0,0,0)
+ XTREG(132,592,32, 4, 4,0x0008,0x0006,-2, 8,0x2100,a8, 0,0,0,0,0,0)
+ XTREG(133,596,32, 4, 4,0x0009,0x0006,-2, 8,0x2100,a9, 0,0,0,0,0,0)
+ XTREG(134,600,32, 4, 4,0x000a,0x0006,-2, 8,0x2100,a10, 0,0,0,0,0,0)
+ XTREG(135,604,32, 4, 4,0x000b,0x0006,-2, 8,0x2100,a11, 0,0,0,0,0,0)
+ XTREG(136,608,32, 4, 4,0x000c,0x0006,-2, 8,0x2100,a12, 0,0,0,0,0,0)
+ XTREG(137,612,32, 4, 4,0x000d,0x0006,-2, 8,0x2100,a13, 0,0,0,0,0,0)
+ XTREG(138,616,32, 4, 4,0x000e,0x0006,-2, 8,0x2100,a14, 0,0,0,0,0,0)
+ XTREG(139,620,32, 4, 4,0x000f,0x0006,-2, 8,0x2100,a15, 0,0,0,0,0,0)
+ XTREG(140,624, 1, 1, 1,0x0010,0x0006,-2, 6,0x1010,b0,
+ 0,0,&xtensa_mask0,0,0,0)
+ XTREG(141,625, 1, 1, 1,0x0011,0x0006,-2, 6,0x1010,b1,
+ 0,0,&xtensa_mask1,0,0,0)
+ XTREG(142,626, 1, 1, 1,0x0012,0x0006,-2, 6,0x1010,b2,
+ 0,0,&xtensa_mask2,0,0,0)
+ XTREG(143,627, 1, 1, 1,0x0013,0x0006,-2, 6,0x1010,b3,
+ 0,0,&xtensa_mask3,0,0,0)
+ XTREG(144,628, 1, 1, 1,0x0014,0x0006,-2, 6,0x1010,b4,
+ 0,0,&xtensa_mask4,0,0,0)
+ XTREG(145,629, 1, 1, 1,0x0015,0x0006,-2, 6,0x1010,b5,
+ 0,0,&xtensa_mask5,0,0,0)
+ XTREG(146,630, 1, 1, 1,0x0016,0x0006,-2, 6,0x1010,b6,
+ 0,0,&xtensa_mask6,0,0,0)
+ XTREG(147,631, 1, 1, 1,0x0017,0x0006,-2, 6,0x1010,b7,
+ 0,0,&xtensa_mask7,0,0,0)
+ XTREG(148,632, 1, 1, 1,0x0018,0x0006,-2, 6,0x1010,b8,
+ 0,0,&xtensa_mask8,0,0,0)
+ XTREG(149,633, 1, 1, 1,0x0019,0x0006,-2, 6,0x1010,b9,
+ 0,0,&xtensa_mask9,0,0,0)
+ XTREG(150,634, 1, 1, 1,0x001a,0x0006,-2, 6,0x1010,b10,
+ 0,0,&xtensa_mask10,0,0,0)
+ XTREG(151,635, 1, 1, 1,0x001b,0x0006,-2, 6,0x1010,b11,
+ 0,0,&xtensa_mask11,0,0,0)
+ XTREG(152,636, 1, 1, 1,0x001c,0x0006,-2, 6,0x1010,b12,
+ 0,0,&xtensa_mask12,0,0,0)
+ XTREG(153,637, 1, 1, 1,0x001d,0x0006,-2, 6,0x1010,b13,
+ 0,0,&xtensa_mask13,0,0,0)
+ XTREG(154,638, 1, 1, 1,0x001e,0x0006,-2, 6,0x1010,b14,
+ 0,0,&xtensa_mask14,0,0,0)
+ XTREG(155,639, 1, 1, 1,0x001f,0x0006,-2, 6,0x1010,b15,
+ 0,0,&xtensa_mask15,0,0,0)
+ XTREG(156,640, 4, 4, 4,0x2008,0x0006,-2, 6,0x1010,psintlevel,
+ 0,0,&xtensa_mask16,0,0,0)
+ XTREG(157,644, 1, 4, 4,0x2009,0x0006,-2, 6,0x1010,psum,
+ 0,0,&xtensa_mask17,0,0,0)
+ XTREG(158,648, 1, 4, 4,0x200a,0x0006,-2, 6,0x1010,pswoe,
+ 0,0,&xtensa_mask18,0,0,0)
+ XTREG(159,652, 2, 4, 4,0x200b,0x0006,-2, 6,0x1010,psring,
+ 0,0,&xtensa_mask19,0,0,0)
+ XTREG(160,656, 1, 4, 4,0x200c,0x0006,-2, 6,0x1010,psexcm,
+ 0,0,&xtensa_mask20,0,0,0)
+ XTREG(161,660, 2, 4, 4,0x200d,0x0006,-2, 6,0x1010,pscallinc,
+ 0,0,&xtensa_mask21,0,0,0)
+ XTREG(162,664, 4, 4, 4,0x200e,0x0006,-2, 6,0x1010,psowb,
+ 0,0,&xtensa_mask22,0,0,0)
+ XTREG(163,668,40, 8, 4,0x200f,0x0006,-2, 6,0x1010,acc,
+ 0,0,&xtensa_mask23,0,0,0)
+ XTREG(164,676, 4, 4, 4,0x2014,0x0006,-2, 6,0x1010,dbnum,
+ 0,0,&xtensa_mask24,0,0,0)
+ XTREG(165,680, 8, 4, 4,0x2015,0x0006,-2, 6,0x1010,asid3,
+ 0,0,&xtensa_mask25,0,0,0)
+ XTREG(166,684, 8, 4, 4,0x2016,0x0006,-2, 6,0x1010,asid2,
+ 0,0,&xtensa_mask26,0,0,0)
+ XTREG(167,688, 8, 4, 4,0x2017,0x0006,-2, 6,0x1010,asid1,
+ 0,0,&xtensa_mask27,0,0,0)
+ XTREG(168,692, 1, 4, 4,0x2018,0x0006,-2, 6,0x1010,instpgszid6,
+ 0,0,&xtensa_mask28,0,0,0)
+ XTREG(169,696, 1, 4, 4,0x2019,0x0006,-2, 6,0x1010,instpgszid5,
+ 0,0,&xtensa_mask29,0,0,0)
+ XTREG(170,700, 2, 4, 4,0x201a,0x0006,-2, 6,0x1010,instpgszid4,
+ 0,0,&xtensa_mask30,0,0,0)
+ XTREG(171,704, 1, 4, 4,0x201b,0x0006,-2, 6,0x1010,datapgszid6,
+ 0,0,&xtensa_mask31,0,0,0)
+ XTREG(172,708, 1, 4, 4,0x201c,0x0006,-2, 6,0x1010,datapgszid5,
+ 0,0,&xtensa_mask32,0,0,0)
+ XTREG(173,712, 2, 4, 4,0x201d,0x0006,-2, 6,0x1010,datapgszid4,
+ 0,0,&xtensa_mask33,0,0,0)
+ XTREG(174,716,10, 4, 4,0x201e,0x0006,-2, 6,0x1010,ptbase,
+ 0,0,&xtensa_mask34,0,0,0)
+ XTREG(175,720, 2, 4, 4,0x201f,0x0006, 0, 5,0x1010,roundmode,
+ 0,0,&xtensa_mask35,0,0,0)
+ XTREG(176,724, 1, 4, 4,0x2020,0x0006, 0, 5,0x1010,invalidenable,
+ 0,0,&xtensa_mask36,0,0,0)
+ XTREG(177,728, 1, 4, 4,0x2021,0x0006, 0, 5,0x1010,divzeroenable,
+ 0,0,&xtensa_mask37,0,0,0)
+ XTREG(178,732, 1, 4, 4,0x2022,0x0006, 0, 5,0x1010,overflowenable,
+ 0,0,&xtensa_mask38,0,0,0)
+ XTREG(179,736, 1, 4, 4,0x2023,0x0006, 0, 5,0x1010,underflowenable,
+ 0,0,&xtensa_mask39,0,0,0)
+ XTREG(180,740, 1, 4, 4,0x2024,0x0006, 0, 5,0x1010,inexactenable,
+ 0,0,&xtensa_mask40,0,0,0)
+ XTREG(181,744, 1, 4, 4,0x2025,0x0006, 0, 5,0x1010,invalidflag,
+ 0,0,&xtensa_mask41,0,0,0)
+ XTREG(182,748, 1, 4, 4,0x2026,0x0006, 0, 5,0x1010,divzeroflag,
+ 0,0,&xtensa_mask42,0,0,0)
+ XTREG(183,752, 1, 4, 4,0x2027,0x0006, 0, 5,0x1010,overflowflag,
+ 0,0,&xtensa_mask43,0,0,0)
+ XTREG(184,756, 1, 4, 4,0x2028,0x0006, 0, 5,0x1010,underflowflag,
+ 0,0,&xtensa_mask44,0,0,0)
+ XTREG(185,760, 1, 4, 4,0x2029,0x0006, 0, 5,0x1010,inexactflag,
+ 0,0,&xtensa_mask45,0,0,0)
+ XTREG(186,764,20, 4, 4,0x202a,0x0006, 0, 5,0x1010,fpreserved20,
+ 0,0,&xtensa_mask46,0,0,0)
+ XTREG(187,768,20, 4, 4,0x202b,0x0006, 0, 5,0x1010,fpreserved20a,
+ 0,0,&xtensa_mask47,0,0,0)
+ XTREG(188,772, 5, 4, 4,0x202c,0x0006, 0, 5,0x1010,fpreserved5,
+ 0,0,&xtensa_mask48,0,0,0)
+ XTREG_END
diff --git a/target/xtensa/core-de233_fpu/xtensa-modules.c.inc b/target/xtensa/core-de233_fpu/xtensa-modules.c.inc
new file mode 100644
index 000000000..129c02351
--- /dev/null
+++ b/target/xtensa/core-de233_fpu/xtensa-modules.c.inc
@@ -0,0 +1,20758 @@
+/* Xtensa configuration-specific ISA information.
+
+ Copyright (c) 2003-2020 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "BR", 4, 0 },
+ { "ACCLO", 16, 0 },
+ { "ACCHI", 17, 0 },
+ { "M0", 32, 0 },
+ { "M1", 33, 0 },
+ { "M2", 34, 0 },
+ { "M3", 35, 0 },
+ { "PTEVADDR", 83, 0 },
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "CONFIGID0", 176, 0 },
+ { "CONFIGID1", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EPC7", 183, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EXCSAVE7", 215, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EPS7", 199, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "RASID", 90, 0 },
+ { "ITLBCFG", 91, 0 },
+ { "DTLBCFG", 92, 0 },
+ { "CPENABLE", 224, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "ERACCESS", 95, 0 },
+ { "THREADPTR", 231, 1 },
+ { "FCR", 232, 1 },
+ { "FSR", 233, 1 },
+ { "EXPSTATE", 230, 1 }
+};
+
+#define NUM_SYSREGS 74
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 233
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 22, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EPC7", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EXCSAVE7", 32, 0 },
+ { "EPS2", 15, 0 },
+ { "EPS3", 15, 0 },
+ { "EPS4", 15, 0 },
+ { "EPS5", 15, 0 },
+ { "EPS6", 15, 0 },
+ { "EPS7", 15, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSRING", 2, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "THREADPTR", 32, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "ACC", 40, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 22, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "ASID3", 8, 0 },
+ { "ASID2", 8, 0 },
+ { "ASID1", 8, 0 },
+ { "INSTPGSZID6", 1, 0 },
+ { "INSTPGSZID5", 1, 0 },
+ { "INSTPGSZID4", 2, 0 },
+ { "DATAPGSZID6", 1, 0 },
+ { "DATAPGSZID5", 1, 0 },
+ { "DATAPGSZID4", 2, 0 },
+ { "PTBASE", 10, 0 },
+ { "CPENABLE", 8, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "ERACCESS", 16, 0 },
+ { "RoundMode", 2, 0 },
+ { "InvalidEnable", 1, 0 },
+ { "DivZeroEnable", 1, 0 },
+ { "OverflowEnable", 1, 0 },
+ { "UnderflowEnable", 1, 0 },
+ { "InexactEnable", 1, 0 },
+ { "InvalidFlag", 1, XTENSA_STATE_IS_SHARED_OR },
+ { "DivZeroFlag", 1, XTENSA_STATE_IS_SHARED_OR },
+ { "OverflowFlag", 1, XTENSA_STATE_IS_SHARED_OR },
+ { "UnderflowFlag", 1, XTENSA_STATE_IS_SHARED_OR },
+ { "InexactFlag", 1, XTENSA_STATE_IS_SHARED_OR },
+ { "FPreserved20", 20, 0 },
+ { "FPreserved20a", 20, 0 },
+ { "FPreserved5", 5, 0 },
+ { "FPreserved7", 7, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED }
+};
+
+#define NUM_STATES 92
+
+enum xtensa_state_id {
+ STATE_LCOUNT,
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EPC3,
+ STATE_EPC4,
+ STATE_EPC5,
+ STATE_EPC6,
+ STATE_EPC7,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EXCSAVE3,
+ STATE_EXCSAVE4,
+ STATE_EXCSAVE5,
+ STATE_EXCSAVE6,
+ STATE_EXCSAVE7,
+ STATE_EPS2,
+ STATE_EPS3,
+ STATE_EPS4,
+ STATE_EPS5,
+ STATE_EPS6,
+ STATE_EPS7,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSRING,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_LBEG,
+ STATE_LEND,
+ STATE_SAR,
+ STATE_THREADPTR,
+ STATE_MISC0,
+ STATE_MISC1,
+ STATE_ACC,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_DBREAKA0,
+ STATE_DBREAKC0,
+ STATE_DBREAKA1,
+ STATE_DBREAKC1,
+ STATE_IBREAKA0,
+ STATE_IBREAKA1,
+ STATE_IBREAKENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_CCOMPARE2,
+ STATE_ASID3,
+ STATE_ASID2,
+ STATE_ASID1,
+ STATE_INSTPGSZID6,
+ STATE_INSTPGSZID5,
+ STATE_INSTPGSZID4,
+ STATE_DATAPGSZID6,
+ STATE_DATAPGSZID5,
+ STATE_DATAPGSZID4,
+ STATE_PTBASE,
+ STATE_CPENABLE,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_ERACCESS,
+ STATE_RoundMode,
+ STATE_InvalidEnable,
+ STATE_DivZeroEnable,
+ STATE_OverflowEnable,
+ STATE_UnderflowEnable,
+ STATE_InexactEnable,
+ STATE_InvalidFlag,
+ STATE_DivZeroFlag,
+ STATE_OverflowFlag,
+ STATE_UnderflowFlag,
+ STATE_InexactFlag,
+ STATE_FPreserved20,
+ STATE_FPreserved20a,
+ STATE_FPreserved5,
+ STATE_FPreserved7,
+ STATE_EXPSTATE
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_tlo_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_tlo_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_w_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_w_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_rhi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_rhi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s3to1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_s8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_imms8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imms8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_disp_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r_disp_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r_3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_rbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_rbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_tbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_tbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_y_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_y_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_x_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_x_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_s2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_r2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_t4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_s4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_r8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_bitindex_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 23) >> 27);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f0) | (tie_t << 4);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_mr0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_mr1_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
+static unsigned
+Implicit_Field_mr2_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 2;
+}
+
+static unsigned
+Implicit_Field_mr3_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 3;
+}
+
+static unsigned
+Implicit_Field_bt16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_bs16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_br16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_brall_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_s8,
+ FIELD_imms8,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_r_disp,
+ FIELD_r_3,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_r3,
+ FIELD_rbit2,
+ FIELD_rhi,
+ FIELD_t3,
+ FIELD_tbit2,
+ FIELD_tlo,
+ FIELD_w,
+ FIELD_y,
+ FIELD_x,
+ FIELD_t2,
+ FIELD_s2,
+ FIELD_r2,
+ FIELD_t4,
+ FIELD_s4,
+ FIELD_r4,
+ FIELD_t8,
+ FIELD_r8,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_bitindex,
+ FIELD_s3to1,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12,
+ FIELD__mr0,
+ FIELD__mr1,
+ FIELD__mr2,
+ FIELD__mr3,
+ FIELD__bt16,
+ FIELD__bs16,
+ FIELD__br16,
+ FIELD__brall
+};
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+ {"XT_LOADSTORE_UNIT", 1}
+};
+
+enum xtensa_funcUnit_id {
+ FUNCUNIT_XT_LOADSTORE_UNIT
+};
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR,
+ REGFILE_MR,
+ REGFILE_BR,
+ REGFILE_FR,
+ REGFILE_BR2,
+ REGFILE_BR4,
+ REGFILE_BR8,
+ REGFILE_BR16
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 },
+ { "MR", "m", REGFILE_MR, 32, 4 },
+ { "BR", "b", REGFILE_BR, 1, 16 },
+ { "FR", "f", REGFILE_FR, 64, 16 },
+ { "BR2", "b", REGFILE_BR, 2, 8 },
+ { "BR4", "b", REGFILE_BR, 4, 4 },
+ { "BR8", "b", REGFILE_BR, 8, 2 },
+ { "BR16", "b", REGFILE_BR, 16, 1 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "IMPWIRE", 32, 0, 0, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_IMPWIRE
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table RECIP_Data8 */
+static const unsigned CONST_TBL_RECIP_Data8_0[] = {
+ 0xff & 0xff,
+ 0xfd & 0xff,
+ 0xfb & 0xff,
+ 0xf9 & 0xff,
+ 0xf7 & 0xff,
+ 0xf5 & 0xff,
+ 0xf4 & 0xff,
+ 0xf2 & 0xff,
+ 0xf0 & 0xff,
+ 0xee & 0xff,
+ 0xed & 0xff,
+ 0xeb & 0xff,
+ 0xe9 & 0xff,
+ 0xe8 & 0xff,
+ 0xe6 & 0xff,
+ 0xe4 & 0xff,
+ 0xe3 & 0xff,
+ 0xe1 & 0xff,
+ 0xe0 & 0xff,
+ 0xde & 0xff,
+ 0xdd & 0xff,
+ 0xdb & 0xff,
+ 0xda & 0xff,
+ 0xd8 & 0xff,
+ 0xd7 & 0xff,
+ 0xd5 & 0xff,
+ 0xd4 & 0xff,
+ 0xd3 & 0xff,
+ 0xd1 & 0xff,
+ 0xd0 & 0xff,
+ 0xcf & 0xff,
+ 0xcd & 0xff,
+ 0xcc & 0xff,
+ 0xcb & 0xff,
+ 0xca & 0xff,
+ 0xc8 & 0xff,
+ 0xc7 & 0xff,
+ 0xc6 & 0xff,
+ 0xc5 & 0xff,
+ 0xc4 & 0xff,
+ 0xc2 & 0xff,
+ 0xc1 & 0xff,
+ 0xc0 & 0xff,
+ 0xbf & 0xff,
+ 0xbe & 0xff,
+ 0xbd & 0xff,
+ 0xbc & 0xff,
+ 0xbb & 0xff,
+ 0xba & 0xff,
+ 0xb9 & 0xff,
+ 0xb8 & 0xff,
+ 0xb7 & 0xff,
+ 0xb6 & 0xff,
+ 0xb5 & 0xff,
+ 0xb4 & 0xff,
+ 0xb3 & 0xff,
+ 0xb2 & 0xff,
+ 0xb1 & 0xff,
+ 0xb0 & 0xff,
+ 0xaf & 0xff,
+ 0xae & 0xff,
+ 0xad & 0xff,
+ 0xac & 0xff,
+ 0xab & 0xff,
+ 0xaa & 0xff,
+ 0xa9 & 0xff,
+ 0xa8 & 0xff,
+ 0xa8 & 0xff,
+ 0xa7 & 0xff,
+ 0xa6 & 0xff,
+ 0xa5 & 0xff,
+ 0xa4 & 0xff,
+ 0xa3 & 0xff,
+ 0xa3 & 0xff,
+ 0xa2 & 0xff,
+ 0xa1 & 0xff,
+ 0xa0 & 0xff,
+ 0x9f & 0xff,
+ 0x9f & 0xff,
+ 0x9e & 0xff,
+ 0x9d & 0xff,
+ 0x9c & 0xff,
+ 0x9c & 0xff,
+ 0x9b & 0xff,
+ 0x9a & 0xff,
+ 0x99 & 0xff,
+ 0x99 & 0xff,
+ 0x98 & 0xff,
+ 0x97 & 0xff,
+ 0x97 & 0xff,
+ 0x96 & 0xff,
+ 0x95 & 0xff,
+ 0x95 & 0xff,
+ 0x94 & 0xff,
+ 0x93 & 0xff,
+ 0x93 & 0xff,
+ 0x92 & 0xff,
+ 0x91 & 0xff,
+ 0x91 & 0xff,
+ 0x90 & 0xff,
+ 0x8f & 0xff,
+ 0x8f & 0xff,
+ 0x8e & 0xff,
+ 0x8e & 0xff,
+ 0x8d & 0xff,
+ 0x8c & 0xff,
+ 0x8c & 0xff,
+ 0x8b & 0xff,
+ 0x8b & 0xff,
+ 0x8a & 0xff,
+ 0x89 & 0xff,
+ 0x89 & 0xff,
+ 0x88 & 0xff,
+ 0x88 & 0xff,
+ 0x87 & 0xff,
+ 0x87 & 0xff,
+ 0x86 & 0xff,
+ 0x85 & 0xff,
+ 0x85 & 0xff,
+ 0x84 & 0xff,
+ 0x84 & 0xff,
+ 0x83 & 0xff,
+ 0x83 & 0xff,
+ 0x82 & 0xff,
+ 0x82 & 0xff,
+ 0x81 & 0xff,
+ 0x81 & 0xff,
+ 0x81 & 0xff,
+ 0
+};
+
+/* constant table RSQRT_Data8 */
+static const unsigned CONST_TBL_RSQRT_Data8_0[] = {
+ 0xb4 & 0xff,
+ 0xb3 & 0xff,
+ 0xb2 & 0xff,
+ 0xb0 & 0xff,
+ 0xaf & 0xff,
+ 0xae & 0xff,
+ 0xac & 0xff,
+ 0xab & 0xff,
+ 0xaa & 0xff,
+ 0xa9 & 0xff,
+ 0xa8 & 0xff,
+ 0xa7 & 0xff,
+ 0xa6 & 0xff,
+ 0xa5 & 0xff,
+ 0xa3 & 0xff,
+ 0xa2 & 0xff,
+ 0xa1 & 0xff,
+ 0xa0 & 0xff,
+ 0x9f & 0xff,
+ 0x9e & 0xff,
+ 0x9e & 0xff,
+ 0x9d & 0xff,
+ 0x9c & 0xff,
+ 0x9b & 0xff,
+ 0x9a & 0xff,
+ 0x99 & 0xff,
+ 0x98 & 0xff,
+ 0x97 & 0xff,
+ 0x97 & 0xff,
+ 0x96 & 0xff,
+ 0x95 & 0xff,
+ 0x94 & 0xff,
+ 0x93 & 0xff,
+ 0x93 & 0xff,
+ 0x92 & 0xff,
+ 0x91 & 0xff,
+ 0x90 & 0xff,
+ 0x90 & 0xff,
+ 0x8f & 0xff,
+ 0x8e & 0xff,
+ 0x8e & 0xff,
+ 0x8d & 0xff,
+ 0x8c & 0xff,
+ 0x8c & 0xff,
+ 0x8b & 0xff,
+ 0x8a & 0xff,
+ 0x8a & 0xff,
+ 0x89 & 0xff,
+ 0x89 & 0xff,
+ 0x88 & 0xff,
+ 0x87 & 0xff,
+ 0x87 & 0xff,
+ 0x86 & 0xff,
+ 0x86 & 0xff,
+ 0x85 & 0xff,
+ 0x84 & 0xff,
+ 0x84 & 0xff,
+ 0x83 & 0xff,
+ 0x83 & 0xff,
+ 0x82 & 0xff,
+ 0x82 & 0xff,
+ 0x81 & 0xff,
+ 0x81 & 0xff,
+ 0x80 & 0xff,
+ 0xff & 0xff,
+ 0xfd & 0xff,
+ 0xfb & 0xff,
+ 0xf9 & 0xff,
+ 0xf7 & 0xff,
+ 0xf6 & 0xff,
+ 0xf4 & 0xff,
+ 0xf2 & 0xff,
+ 0xf1 & 0xff,
+ 0xef & 0xff,
+ 0xed & 0xff,
+ 0xec & 0xff,
+ 0xea & 0xff,
+ 0xe9 & 0xff,
+ 0xe7 & 0xff,
+ 0xe6 & 0xff,
+ 0xe4 & 0xff,
+ 0xe3 & 0xff,
+ 0xe1 & 0xff,
+ 0xe0 & 0xff,
+ 0xdf & 0xff,
+ 0xdd & 0xff,
+ 0xdc & 0xff,
+ 0xdb & 0xff,
+ 0xda & 0xff,
+ 0xd8 & 0xff,
+ 0xd7 & 0xff,
+ 0xd6 & 0xff,
+ 0xd5 & 0xff,
+ 0xd4 & 0xff,
+ 0xd3 & 0xff,
+ 0xd2 & 0xff,
+ 0xd0 & 0xff,
+ 0xcf & 0xff,
+ 0xce & 0xff,
+ 0xcd & 0xff,
+ 0xcc & 0xff,
+ 0xcb & 0xff,
+ 0xca & 0xff,
+ 0xc9 & 0xff,
+ 0xc8 & 0xff,
+ 0xc7 & 0xff,
+ 0xc6 & 0xff,
+ 0xc6 & 0xff,
+ 0xc5 & 0xff,
+ 0xc4 & 0xff,
+ 0xc3 & 0xff,
+ 0xc2 & 0xff,
+ 0xc1 & 0xff,
+ 0xc0 & 0xff,
+ 0xbf & 0xff,
+ 0xbf & 0xff,
+ 0xbe & 0xff,
+ 0xbd & 0xff,
+ 0xbc & 0xff,
+ 0xbb & 0xff,
+ 0xbb & 0xff,
+ 0xba & 0xff,
+ 0xb9 & 0xff,
+ 0xb8 & 0xff,
+ 0xb8 & 0xff,
+ 0xb7 & 0xff,
+ 0xb6 & 0xff,
+ 0xb5 & 0xff,
+ 0
+};
+
+/* constant table RECIP_Data10_2 */
+static const unsigned CONST_TBL_RECIP_Data10_2_0[] = {
+ 0x3fc & 0x3ff,
+ 0x3f4 & 0x3ff,
+ 0x3ec & 0x3ff,
+ 0x3e5 & 0x3ff,
+ 0x3dd & 0x3ff,
+ 0x3d6 & 0x3ff,
+ 0x3cf & 0x3ff,
+ 0x3c7 & 0x3ff,
+ 0x3c0 & 0x3ff,
+ 0x3b9 & 0x3ff,
+ 0x3b2 & 0x3ff,
+ 0x3ac & 0x3ff,
+ 0x3a5 & 0x3ff,
+ 0x39e & 0x3ff,
+ 0x398 & 0x3ff,
+ 0x391 & 0x3ff,
+ 0x38b & 0x3ff,
+ 0x385 & 0x3ff,
+ 0x37f & 0x3ff,
+ 0x378 & 0x3ff,
+ 0x373 & 0x3ff,
+ 0x36c & 0x3ff,
+ 0x367 & 0x3ff,
+ 0x361 & 0x3ff,
+ 0x35c & 0x3ff,
+ 0x356 & 0x3ff,
+ 0x350 & 0x3ff,
+ 0x34b & 0x3ff,
+ 0x345 & 0x3ff,
+ 0x340 & 0x3ff,
+ 0x33b & 0x3ff,
+ 0x335 & 0x3ff,
+ 0x330 & 0x3ff,
+ 0x32c & 0x3ff,
+ 0x327 & 0x3ff,
+ 0x322 & 0x3ff,
+ 0x31c & 0x3ff,
+ 0x318 & 0x3ff,
+ 0x314 & 0x3ff,
+ 0x30e & 0x3ff,
+ 0x30a & 0x3ff,
+ 0x306 & 0x3ff,
+ 0x300 & 0x3ff,
+ 0x2fc & 0x3ff,
+ 0x2f8 & 0x3ff,
+ 0x2f4 & 0x3ff,
+ 0x2f0 & 0x3ff,
+ 0x2ea & 0x3ff,
+ 0x2e6 & 0x3ff,
+ 0x2e2 & 0x3ff,
+ 0x2de & 0x3ff,
+ 0x2da & 0x3ff,
+ 0x2d6 & 0x3ff,
+ 0x2d2 & 0x3ff,
+ 0x2ce & 0x3ff,
+ 0x2ca & 0x3ff,
+ 0x2c6 & 0x3ff,
+ 0x2c2 & 0x3ff,
+ 0x2be & 0x3ff,
+ 0x2ba & 0x3ff,
+ 0x2b8 & 0x3ff,
+ 0x2b4 & 0x3ff,
+ 0x2b0 & 0x3ff,
+ 0x2ac & 0x3ff,
+ 0x2a8 & 0x3ff,
+ 0x2a6 & 0x3ff,
+ 0x2a2 & 0x3ff,
+ 0x29e & 0x3ff,
+ 0x29c & 0x3ff,
+ 0x298 & 0x3ff,
+ 0x294 & 0x3ff,
+ 0x290 & 0x3ff,
+ 0x28e & 0x3ff,
+ 0x28a & 0x3ff,
+ 0x288 & 0x3ff,
+ 0x284 & 0x3ff,
+ 0x280 & 0x3ff,
+ 0x27e & 0x3ff,
+ 0x27a & 0x3ff,
+ 0x278 & 0x3ff,
+ 0x274 & 0x3ff,
+ 0x272 & 0x3ff,
+ 0x26e & 0x3ff,
+ 0x26c & 0x3ff,
+ 0x268 & 0x3ff,
+ 0x266 & 0x3ff,
+ 0x264 & 0x3ff,
+ 0x260 & 0x3ff,
+ 0x25e & 0x3ff,
+ 0x25a & 0x3ff,
+ 0x258 & 0x3ff,
+ 0x254 & 0x3ff,
+ 0x252 & 0x3ff,
+ 0x250 & 0x3ff,
+ 0x24c & 0x3ff,
+ 0x24a & 0x3ff,
+ 0x248 & 0x3ff,
+ 0x246 & 0x3ff,
+ 0x242 & 0x3ff,
+ 0x240 & 0x3ff,
+ 0x23e & 0x3ff,
+ 0x23c & 0x3ff,
+ 0x238 & 0x3ff,
+ 0x236 & 0x3ff,
+ 0x234 & 0x3ff,
+ 0x232 & 0x3ff,
+ 0x230 & 0x3ff,
+ 0x22c & 0x3ff,
+ 0x22a & 0x3ff,
+ 0x228 & 0x3ff,
+ 0x226 & 0x3ff,
+ 0x224 & 0x3ff,
+ 0x220 & 0x3ff,
+ 0x21e & 0x3ff,
+ 0x21c & 0x3ff,
+ 0x21a & 0x3ff,
+ 0x218 & 0x3ff,
+ 0x216 & 0x3ff,
+ 0x214 & 0x3ff,
+ 0x212 & 0x3ff,
+ 0x210 & 0x3ff,
+ 0x20e & 0x3ff,
+ 0x20c & 0x3ff,
+ 0x208 & 0x3ff,
+ 0x208 & 0x3ff,
+ 0x204 & 0x3ff,
+ 0x204 & 0x3ff,
+ 0x201 & 0x3ff,
+ 0
+};
+
+/* constant table RSQRT_10b_256 */
+static const unsigned CONST_TBL_RSQRT_10b_256_0[] = {
+ 0x1a5 & 0x3ff,
+ 0x1a0 & 0x3ff,
+ 0x19a & 0x3ff,
+ 0x195 & 0x3ff,
+ 0x18f & 0x3ff,
+ 0x18a & 0x3ff,
+ 0x185 & 0x3ff,
+ 0x180 & 0x3ff,
+ 0x17a & 0x3ff,
+ 0x175 & 0x3ff,
+ 0x170 & 0x3ff,
+ 0x16b & 0x3ff,
+ 0x166 & 0x3ff,
+ 0x161 & 0x3ff,
+ 0x15d & 0x3ff,
+ 0x158 & 0x3ff,
+ 0x153 & 0x3ff,
+ 0x14e & 0x3ff,
+ 0x14a & 0x3ff,
+ 0x145 & 0x3ff,
+ 0x140 & 0x3ff,
+ 0x13c & 0x3ff,
+ 0x138 & 0x3ff,
+ 0x133 & 0x3ff,
+ 0x12f & 0x3ff,
+ 0x12a & 0x3ff,
+ 0x126 & 0x3ff,
+ 0x122 & 0x3ff,
+ 0x11e & 0x3ff,
+ 0x11a & 0x3ff,
+ 0x115 & 0x3ff,
+ 0x111 & 0x3ff,
+ 0x10d & 0x3ff,
+ 0x109 & 0x3ff,
+ 0x105 & 0x3ff,
+ 0x101 & 0x3ff,
+ 0xfd & 0x3ff,
+ 0xfa & 0x3ff,
+ 0xf6 & 0x3ff,
+ 0xf2 & 0x3ff,
+ 0xee & 0x3ff,
+ 0xea & 0x3ff,
+ 0xe7 & 0x3ff,
+ 0xe3 & 0x3ff,
+ 0xdf & 0x3ff,
+ 0xdc & 0x3ff,
+ 0xd8 & 0x3ff,
+ 0xd5 & 0x3ff,
+ 0xd1 & 0x3ff,
+ 0xce & 0x3ff,
+ 0xca & 0x3ff,
+ 0xc7 & 0x3ff,
+ 0xc3 & 0x3ff,
+ 0xc0 & 0x3ff,
+ 0xbd & 0x3ff,
+ 0xb9 & 0x3ff,
+ 0xb6 & 0x3ff,
+ 0xb3 & 0x3ff,
+ 0xb0 & 0x3ff,
+ 0xad & 0x3ff,
+ 0xa9 & 0x3ff,
+ 0xa6 & 0x3ff,
+ 0xa3 & 0x3ff,
+ 0xa0 & 0x3ff,
+ 0x9d & 0x3ff,
+ 0x9a & 0x3ff,
+ 0x97 & 0x3ff,
+ 0x94 & 0x3ff,
+ 0x91 & 0x3ff,
+ 0x8e & 0x3ff,
+ 0x8b & 0x3ff,
+ 0x88 & 0x3ff,
+ 0x85 & 0x3ff,
+ 0x82 & 0x3ff,
+ 0x7f & 0x3ff,
+ 0x7d & 0x3ff,
+ 0x7a & 0x3ff,
+ 0x77 & 0x3ff,
+ 0x74 & 0x3ff,
+ 0x71 & 0x3ff,
+ 0x6f & 0x3ff,
+ 0x6c & 0x3ff,
+ 0x69 & 0x3ff,
+ 0x67 & 0x3ff,
+ 0x64 & 0x3ff,
+ 0x61 & 0x3ff,
+ 0x5f & 0x3ff,
+ 0x5c & 0x3ff,
+ 0x5a & 0x3ff,
+ 0x57 & 0x3ff,
+ 0x54 & 0x3ff,
+ 0x52 & 0x3ff,
+ 0x4f & 0x3ff,
+ 0x4d & 0x3ff,
+ 0x4a & 0x3ff,
+ 0x48 & 0x3ff,
+ 0x45 & 0x3ff,
+ 0x43 & 0x3ff,
+ 0x41 & 0x3ff,
+ 0x3e & 0x3ff,
+ 0x3c & 0x3ff,
+ 0x3a & 0x3ff,
+ 0x37 & 0x3ff,
+ 0x35 & 0x3ff,
+ 0x33 & 0x3ff,
+ 0x30 & 0x3ff,
+ 0x2e & 0x3ff,
+ 0x2c & 0x3ff,
+ 0x29 & 0x3ff,
+ 0x27 & 0x3ff,
+ 0x25 & 0x3ff,
+ 0x23 & 0x3ff,
+ 0x20 & 0x3ff,
+ 0x1e & 0x3ff,
+ 0x1c & 0x3ff,
+ 0x1a & 0x3ff,
+ 0x18 & 0x3ff,
+ 0x16 & 0x3ff,
+ 0x14 & 0x3ff,
+ 0x11 & 0x3ff,
+ 0xf & 0x3ff,
+ 0xd & 0x3ff,
+ 0xb & 0x3ff,
+ 0x9 & 0x3ff,
+ 0x7 & 0x3ff,
+ 0x5 & 0x3ff,
+ 0x3 & 0x3ff,
+ 0x1 & 0x3ff,
+ 0x3fc & 0x3ff,
+ 0x3f4 & 0x3ff,
+ 0x3ec & 0x3ff,
+ 0x3e5 & 0x3ff,
+ 0x3dd & 0x3ff,
+ 0x3d5 & 0x3ff,
+ 0x3ce & 0x3ff,
+ 0x3c7 & 0x3ff,
+ 0x3bf & 0x3ff,
+ 0x3b8 & 0x3ff,
+ 0x3b1 & 0x3ff,
+ 0x3aa & 0x3ff,
+ 0x3a3 & 0x3ff,
+ 0x39c & 0x3ff,
+ 0x395 & 0x3ff,
+ 0x38e & 0x3ff,
+ 0x388 & 0x3ff,
+ 0x381 & 0x3ff,
+ 0x37a & 0x3ff,
+ 0x374 & 0x3ff,
+ 0x36d & 0x3ff,
+ 0x367 & 0x3ff,
+ 0x361 & 0x3ff,
+ 0x35a & 0x3ff,
+ 0x354 & 0x3ff,
+ 0x34e & 0x3ff,
+ 0x348 & 0x3ff,
+ 0x342 & 0x3ff,
+ 0x33c & 0x3ff,
+ 0x336 & 0x3ff,
+ 0x330 & 0x3ff,
+ 0x32b & 0x3ff,
+ 0x325 & 0x3ff,
+ 0x31f & 0x3ff,
+ 0x31a & 0x3ff,
+ 0x314 & 0x3ff,
+ 0x30f & 0x3ff,
+ 0x309 & 0x3ff,
+ 0x304 & 0x3ff,
+ 0x2fe & 0x3ff,
+ 0x2f9 & 0x3ff,
+ 0x2f4 & 0x3ff,
+ 0x2ee & 0x3ff,
+ 0x2e9 & 0x3ff,
+ 0x2e4 & 0x3ff,
+ 0x2df & 0x3ff,
+ 0x2da & 0x3ff,
+ 0x2d5 & 0x3ff,
+ 0x2d0 & 0x3ff,
+ 0x2cb & 0x3ff,
+ 0x2c6 & 0x3ff,
+ 0x2c1 & 0x3ff,
+ 0x2bd & 0x3ff,
+ 0x2b8 & 0x3ff,
+ 0x2b3 & 0x3ff,
+ 0x2ae & 0x3ff,
+ 0x2aa & 0x3ff,
+ 0x2a5 & 0x3ff,
+ 0x2a1 & 0x3ff,
+ 0x29c & 0x3ff,
+ 0x298 & 0x3ff,
+ 0x293 & 0x3ff,
+ 0x28f & 0x3ff,
+ 0x28a & 0x3ff,
+ 0x286 & 0x3ff,
+ 0x282 & 0x3ff,
+ 0x27d & 0x3ff,
+ 0x279 & 0x3ff,
+ 0x275 & 0x3ff,
+ 0x271 & 0x3ff,
+ 0x26d & 0x3ff,
+ 0x268 & 0x3ff,
+ 0x264 & 0x3ff,
+ 0x260 & 0x3ff,
+ 0x25c & 0x3ff,
+ 0x258 & 0x3ff,
+ 0x254 & 0x3ff,
+ 0x250 & 0x3ff,
+ 0x24c & 0x3ff,
+ 0x249 & 0x3ff,
+ 0x245 & 0x3ff,
+ 0x241 & 0x3ff,
+ 0x23d & 0x3ff,
+ 0x239 & 0x3ff,
+ 0x235 & 0x3ff,
+ 0x232 & 0x3ff,
+ 0x22e & 0x3ff,
+ 0x22a & 0x3ff,
+ 0x227 & 0x3ff,
+ 0x223 & 0x3ff,
+ 0x220 & 0x3ff,
+ 0x21c & 0x3ff,
+ 0x218 & 0x3ff,
+ 0x215 & 0x3ff,
+ 0x211 & 0x3ff,
+ 0x20e & 0x3ff,
+ 0x20a & 0x3ff,
+ 0x207 & 0x3ff,
+ 0x204 & 0x3ff,
+ 0x200 & 0x3ff,
+ 0x1fd & 0x3ff,
+ 0x1f9 & 0x3ff,
+ 0x1f6 & 0x3ff,
+ 0x1f3 & 0x3ff,
+ 0x1f0 & 0x3ff,
+ 0x1ec & 0x3ff,
+ 0x1e9 & 0x3ff,
+ 0x1e6 & 0x3ff,
+ 0x1e3 & 0x3ff,
+ 0x1df & 0x3ff,
+ 0x1dc & 0x3ff,
+ 0x1d9 & 0x3ff,
+ 0x1d6 & 0x3ff,
+ 0x1d3 & 0x3ff,
+ 0x1d0 & 0x3ff,
+ 0x1cd & 0x3ff,
+ 0x1ca & 0x3ff,
+ 0x1c7 & 0x3ff,
+ 0x1c4 & 0x3ff,
+ 0x1c1 & 0x3ff,
+ 0x1be & 0x3ff,
+ 0x1bb & 0x3ff,
+ 0x1b8 & 0x3ff,
+ 0x1b5 & 0x3ff,
+ 0x1b2 & 0x3ff,
+ 0x1af & 0x3ff,
+ 0x1ac & 0x3ff,
+ 0x1aa & 0x3ff,
+ 0
+};
+
+/* constant table RECIP_10b_256 */
+static const unsigned CONST_TBL_RECIP_10b_256_0[] = {
+ 0x3fc & 0x3ff,
+ 0x3f4 & 0x3ff,
+ 0x3ec & 0x3ff,
+ 0x3e4 & 0x3ff,
+ 0x3dd & 0x3ff,
+ 0x3d5 & 0x3ff,
+ 0x3cd & 0x3ff,
+ 0x3c6 & 0x3ff,
+ 0x3be & 0x3ff,
+ 0x3b7 & 0x3ff,
+ 0x3af & 0x3ff,
+ 0x3a8 & 0x3ff,
+ 0x3a1 & 0x3ff,
+ 0x399 & 0x3ff,
+ 0x392 & 0x3ff,
+ 0x38b & 0x3ff,
+ 0x384 & 0x3ff,
+ 0x37d & 0x3ff,
+ 0x376 & 0x3ff,
+ 0x36f & 0x3ff,
+ 0x368 & 0x3ff,
+ 0x361 & 0x3ff,
+ 0x35b & 0x3ff,
+ 0x354 & 0x3ff,
+ 0x34d & 0x3ff,
+ 0x346 & 0x3ff,
+ 0x340 & 0x3ff,
+ 0x339 & 0x3ff,
+ 0x333 & 0x3ff,
+ 0x32c & 0x3ff,
+ 0x326 & 0x3ff,
+ 0x320 & 0x3ff,
+ 0x319 & 0x3ff,
+ 0x313 & 0x3ff,
+ 0x30d & 0x3ff,
+ 0x307 & 0x3ff,
+ 0x300 & 0x3ff,
+ 0x2fa & 0x3ff,
+ 0x2f4 & 0x3ff,
+ 0x2ee & 0x3ff,
+ 0x2e8 & 0x3ff,
+ 0x2e2 & 0x3ff,
+ 0x2dc & 0x3ff,
+ 0x2d7 & 0x3ff,
+ 0x2d1 & 0x3ff,
+ 0x2cb & 0x3ff,
+ 0x2c5 & 0x3ff,
+ 0x2bf & 0x3ff,
+ 0x2ba & 0x3ff,
+ 0x2b4 & 0x3ff,
+ 0x2af & 0x3ff,
+ 0x2a9 & 0x3ff,
+ 0x2a3 & 0x3ff,
+ 0x29e & 0x3ff,
+ 0x299 & 0x3ff,
+ 0x293 & 0x3ff,
+ 0x28e & 0x3ff,
+ 0x288 & 0x3ff,
+ 0x283 & 0x3ff,
+ 0x27e & 0x3ff,
+ 0x279 & 0x3ff,
+ 0x273 & 0x3ff,
+ 0x26e & 0x3ff,
+ 0x269 & 0x3ff,
+ 0x264 & 0x3ff,
+ 0x25f & 0x3ff,
+ 0x25a & 0x3ff,
+ 0x255 & 0x3ff,
+ 0x250 & 0x3ff,
+ 0x24b & 0x3ff,
+ 0x246 & 0x3ff,
+ 0x241 & 0x3ff,
+ 0x23c & 0x3ff,
+ 0x237 & 0x3ff,
+ 0x232 & 0x3ff,
+ 0x22e & 0x3ff,
+ 0x229 & 0x3ff,
+ 0x224 & 0x3ff,
+ 0x21f & 0x3ff,
+ 0x21b & 0x3ff,
+ 0x216 & 0x3ff,
+ 0x211 & 0x3ff,
+ 0x20d & 0x3ff,
+ 0x208 & 0x3ff,
+ 0x204 & 0x3ff,
+ 0x1ff & 0x3ff,
+ 0x1fb & 0x3ff,
+ 0x1f6 & 0x3ff,
+ 0x1f2 & 0x3ff,
+ 0x1ed & 0x3ff,
+ 0x1e9 & 0x3ff,
+ 0x1e5 & 0x3ff,
+ 0x1e0 & 0x3ff,
+ 0x1dc & 0x3ff,
+ 0x1d8 & 0x3ff,
+ 0x1d4 & 0x3ff,
+ 0x1cf & 0x3ff,
+ 0x1cb & 0x3ff,
+ 0x1c7 & 0x3ff,
+ 0x1c3 & 0x3ff,
+ 0x1bf & 0x3ff,
+ 0x1bb & 0x3ff,
+ 0x1b6 & 0x3ff,
+ 0x1b2 & 0x3ff,
+ 0x1ae & 0x3ff,
+ 0x1aa & 0x3ff,
+ 0x1a6 & 0x3ff,
+ 0x1a2 & 0x3ff,
+ 0x19e & 0x3ff,
+ 0x19a & 0x3ff,
+ 0x197 & 0x3ff,
+ 0x193 & 0x3ff,
+ 0x18f & 0x3ff,
+ 0x18b & 0x3ff,
+ 0x187 & 0x3ff,
+ 0x183 & 0x3ff,
+ 0x17f & 0x3ff,
+ 0x17c & 0x3ff,
+ 0x178 & 0x3ff,
+ 0x174 & 0x3ff,
+ 0x171 & 0x3ff,
+ 0x16d & 0x3ff,
+ 0x169 & 0x3ff,
+ 0x166 & 0x3ff,
+ 0x162 & 0x3ff,
+ 0x15e & 0x3ff,
+ 0x15b & 0x3ff,
+ 0x157 & 0x3ff,
+ 0x154 & 0x3ff,
+ 0x150 & 0x3ff,
+ 0x14d & 0x3ff,
+ 0x149 & 0x3ff,
+ 0x146 & 0x3ff,
+ 0x142 & 0x3ff,
+ 0x13f & 0x3ff,
+ 0x13b & 0x3ff,
+ 0x138 & 0x3ff,
+ 0x134 & 0x3ff,
+ 0x131 & 0x3ff,
+ 0x12e & 0x3ff,
+ 0x12a & 0x3ff,
+ 0x127 & 0x3ff,
+ 0x124 & 0x3ff,
+ 0x120 & 0x3ff,
+ 0x11d & 0x3ff,
+ 0x11a & 0x3ff,
+ 0x117 & 0x3ff,
+ 0x113 & 0x3ff,
+ 0x110 & 0x3ff,
+ 0x10d & 0x3ff,
+ 0x10a & 0x3ff,
+ 0x107 & 0x3ff,
+ 0x103 & 0x3ff,
+ 0x100 & 0x3ff,
+ 0xfd & 0x3ff,
+ 0xfa & 0x3ff,
+ 0xf7 & 0x3ff,
+ 0xf4 & 0x3ff,
+ 0xf1 & 0x3ff,
+ 0xee & 0x3ff,
+ 0xeb & 0x3ff,
+ 0xe8 & 0x3ff,
+ 0xe5 & 0x3ff,
+ 0xe2 & 0x3ff,
+ 0xdf & 0x3ff,
+ 0xdc & 0x3ff,
+ 0xd9 & 0x3ff,
+ 0xd6 & 0x3ff,
+ 0xd3 & 0x3ff,
+ 0xd0 & 0x3ff,
+ 0xcd & 0x3ff,
+ 0xca & 0x3ff,
+ 0xc8 & 0x3ff,
+ 0xc5 & 0x3ff,
+ 0xc2 & 0x3ff,
+ 0xbf & 0x3ff,
+ 0xbc & 0x3ff,
+ 0xb9 & 0x3ff,
+ 0xb7 & 0x3ff,
+ 0xb4 & 0x3ff,
+ 0xb1 & 0x3ff,
+ 0xae & 0x3ff,
+ 0xac & 0x3ff,
+ 0xa9 & 0x3ff,
+ 0xa6 & 0x3ff,
+ 0xa4 & 0x3ff,
+ 0xa1 & 0x3ff,
+ 0x9e & 0x3ff,
+ 0x9c & 0x3ff,
+ 0x99 & 0x3ff,
+ 0x96 & 0x3ff,
+ 0x94 & 0x3ff,
+ 0x91 & 0x3ff,
+ 0x8e & 0x3ff,
+ 0x8c & 0x3ff,
+ 0x89 & 0x3ff,
+ 0x87 & 0x3ff,
+ 0x84 & 0x3ff,
+ 0x82 & 0x3ff,
+ 0x7f & 0x3ff,
+ 0x7c & 0x3ff,
+ 0x7a & 0x3ff,
+ 0x77 & 0x3ff,
+ 0x75 & 0x3ff,
+ 0x73 & 0x3ff,
+ 0x70 & 0x3ff,
+ 0x6e & 0x3ff,
+ 0x6b & 0x3ff,
+ 0x69 & 0x3ff,
+ 0x66 & 0x3ff,
+ 0x64 & 0x3ff,
+ 0x61 & 0x3ff,
+ 0x5f & 0x3ff,
+ 0x5d & 0x3ff,
+ 0x5a & 0x3ff,
+ 0x58 & 0x3ff,
+ 0x56 & 0x3ff,
+ 0x53 & 0x3ff,
+ 0x51 & 0x3ff,
+ 0x4f & 0x3ff,
+ 0x4c & 0x3ff,
+ 0x4a & 0x3ff,
+ 0x48 & 0x3ff,
+ 0x45 & 0x3ff,
+ 0x43 & 0x3ff,
+ 0x41 & 0x3ff,
+ 0x3f & 0x3ff,
+ 0x3c & 0x3ff,
+ 0x3a & 0x3ff,
+ 0x38 & 0x3ff,
+ 0x36 & 0x3ff,
+ 0x33 & 0x3ff,
+ 0x31 & 0x3ff,
+ 0x2f & 0x3ff,
+ 0x2d & 0x3ff,
+ 0x2b & 0x3ff,
+ 0x29 & 0x3ff,
+ 0x26 & 0x3ff,
+ 0x24 & 0x3ff,
+ 0x22 & 0x3ff,
+ 0x20 & 0x3ff,
+ 0x1e & 0x3ff,
+ 0x1c & 0x3ff,
+ 0x1a & 0x3ff,
+ 0x18 & 0x3ff,
+ 0x15 & 0x3ff,
+ 0x13 & 0x3ff,
+ 0x11 & 0x3ff,
+ 0xf & 0x3ff,
+ 0xd & 0x3ff,
+ 0xb & 0x3ff,
+ 0x9 & 0x3ff,
+ 0x7 & 0x3ff,
+ 0x5 & 0x3ff,
+ 0x3 & 0x3ff,
+ 0x1 & 0x3ff,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+OperandSem_opnd_sem_MR_0_decode (uint32 *valp)
+{
+ *valp += 2;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_0_encode (uint32 *valp)
+{
+ int error;
+ error = ((*valp & ~0x3) != 0) || ((*valp & 0x2) == 0);
+ *valp = *valp & 1;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_out_0;
+ unsigned soffsetx4_in_0;
+ soffsetx4_in_0 = *valp & 0x3ffff;
+ soffsetx4_out_0 = 0x4 + ((((int) soffsetx4_in_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_encode (uint32 *valp)
+{
+ unsigned soffsetx4_in_0;
+ unsigned soffsetx4_out_0;
+ soffsetx4_out_0 = *valp;
+ soffsetx4_in_0 = ((soffsetx4_out_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = soffsetx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immr_decode (uint32 *valp)
+{
+ unsigned immr_out_0;
+ unsigned immr_in_0;
+ immr_in_0 = *valp & 0xf;
+ immr_out_0 = immr_in_0;
+ *valp = immr_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immr_encode (uint32 *valp)
+{
+ unsigned immr_in_0;
+ unsigned immr_out_0;
+ immr_out_0 = *valp;
+ immr_in_0 = (immr_out_0 & 0xf);
+ *valp = immr_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_out_0;
+ unsigned uimm12x8_in_0;
+ uimm12x8_in_0 = *valp & 0xfff;
+ uimm12x8_out_0 = uimm12x8_in_0 << 3;
+ *valp = uimm12x8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_encode (uint32 *valp)
+{
+ unsigned uimm12x8_in_0;
+ unsigned uimm12x8_out_0;
+ uimm12x8_out_0 = *valp;
+ uimm12x8_in_0 = ((uimm12x8_out_0 >> 3) & 0xfff);
+ *valp = uimm12x8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_out_0;
+ unsigned simm4_in_0;
+ simm4_in_0 = *valp & 0xf;
+ simm4_out_0 = ((int) simm4_in_0 << 28) >> 28;
+ *valp = simm4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_encode (uint32 *valp)
+{
+ unsigned simm4_in_0;
+ unsigned simm4_out_0;
+ simm4_out_0 = *valp;
+ simm4_in_0 = (simm4_out_0 & 0xf);
+ *valp = simm4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_8_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_12_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_12_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_entry_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_entry_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_out_0;
+ unsigned immrx4_in_0;
+ immrx4_in_0 = *valp & 0xf;
+ immrx4_out_0 = (((0xfffffff) << 4) | immrx4_in_0) << 2;
+ *valp = immrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_encode (uint32 *valp)
+{
+ unsigned immrx4_in_0;
+ unsigned immrx4_out_0;
+ immrx4_out_0 = *valp;
+ immrx4_in_0 = ((immrx4_out_0 >> 2) & 0xf);
+ *valp = immrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_out_0;
+ unsigned lsi4x4_in_0;
+ lsi4x4_in_0 = *valp & 0xf;
+ lsi4x4_out_0 = lsi4x4_in_0 << 2;
+ *valp = lsi4x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_encode (uint32 *valp)
+{
+ unsigned lsi4x4_in_0;
+ unsigned lsi4x4_out_0;
+ lsi4x4_out_0 = *valp;
+ lsi4x4_in_0 = ((lsi4x4_out_0 >> 2) & 0xf);
+ *valp = lsi4x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_out_0;
+ unsigned simm7_in_0;
+ simm7_in_0 = *valp & 0x7f;
+ simm7_out_0 = ((((-((((simm7_in_0 >> 6) & 1)) & (((simm7_in_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | simm7_in_0;
+ *valp = simm7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_encode (uint32 *valp)
+{
+ unsigned simm7_in_0;
+ unsigned simm7_out_0;
+ simm7_out_0 = *valp;
+ simm7_in_0 = (simm7_out_0 & 0x7f);
+ *valp = simm7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_out_0;
+ unsigned uimm6_in_0;
+ uimm6_in_0 = *valp & 0x3f;
+ uimm6_out_0 = 0x4 + (((0) << 6) | uimm6_in_0);
+ *valp = uimm6_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_encode (uint32 *valp)
+{
+ unsigned uimm6_in_0;
+ unsigned uimm6_out_0;
+ uimm6_out_0 = *valp;
+ uimm6_in_0 = (uimm6_out_0 - 0x4) & 0x3f;
+ *valp = uimm6_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_out_0;
+ unsigned ai4const_in_0;
+ ai4const_in_0 = *valp & 0xf;
+ ai4const_out_0 = CONST_TBL_ai4c_0[ai4const_in_0 & 0xf];
+ *valp = ai4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_encode (uint32 *valp)
+{
+ unsigned ai4const_in_0;
+ unsigned ai4const_out_0;
+ ai4const_out_0 = *valp;
+ switch (ai4const_out_0)
+ {
+ case 0xffffffff: ai4const_in_0 = 0; break;
+ case 0x1: ai4const_in_0 = 0x1; break;
+ case 0x2: ai4const_in_0 = 0x2; break;
+ case 0x3: ai4const_in_0 = 0x3; break;
+ case 0x4: ai4const_in_0 = 0x4; break;
+ case 0x5: ai4const_in_0 = 0x5; break;
+ case 0x6: ai4const_in_0 = 0x6; break;
+ case 0x7: ai4const_in_0 = 0x7; break;
+ case 0x8: ai4const_in_0 = 0x8; break;
+ case 0x9: ai4const_in_0 = 0x9; break;
+ case 0xa: ai4const_in_0 = 0xa; break;
+ case 0xb: ai4const_in_0 = 0xb; break;
+ case 0xc: ai4const_in_0 = 0xc; break;
+ case 0xd: ai4const_in_0 = 0xd; break;
+ case 0xe: ai4const_in_0 = 0xe; break;
+ default: ai4const_in_0 = 0xf; break;
+ }
+ *valp = ai4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_out_0;
+ unsigned b4const_in_0;
+ b4const_in_0 = *valp & 0xf;
+ b4const_out_0 = CONST_TBL_b4c_0[b4const_in_0 & 0xf];
+ *valp = b4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_encode (uint32 *valp)
+{
+ unsigned b4const_in_0;
+ unsigned b4const_out_0;
+ b4const_out_0 = *valp;
+ switch (b4const_out_0)
+ {
+ case 0xffffffff: b4const_in_0 = 0; break;
+ case 0x1: b4const_in_0 = 0x1; break;
+ case 0x2: b4const_in_0 = 0x2; break;
+ case 0x3: b4const_in_0 = 0x3; break;
+ case 0x4: b4const_in_0 = 0x4; break;
+ case 0x5: b4const_in_0 = 0x5; break;
+ case 0x6: b4const_in_0 = 0x6; break;
+ case 0x7: b4const_in_0 = 0x7; break;
+ case 0x8: b4const_in_0 = 0x8; break;
+ case 0xa: b4const_in_0 = 0x9; break;
+ case 0xc: b4const_in_0 = 0xa; break;
+ case 0x10: b4const_in_0 = 0xb; break;
+ case 0x20: b4const_in_0 = 0xc; break;
+ case 0x40: b4const_in_0 = 0xd; break;
+ case 0x80: b4const_in_0 = 0xe; break;
+ default: b4const_in_0 = 0xf; break;
+ }
+ *valp = b4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_out_0;
+ unsigned b4constu_in_0;
+ b4constu_in_0 = *valp & 0xf;
+ b4constu_out_0 = CONST_TBL_b4cu_0[b4constu_in_0 & 0xf];
+ *valp = b4constu_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_encode (uint32 *valp)
+{
+ unsigned b4constu_in_0;
+ unsigned b4constu_out_0;
+ b4constu_out_0 = *valp;
+ switch (b4constu_out_0)
+ {
+ case 0x8000: b4constu_in_0 = 0; break;
+ case 0x10000: b4constu_in_0 = 0x1; break;
+ case 0x2: b4constu_in_0 = 0x2; break;
+ case 0x3: b4constu_in_0 = 0x3; break;
+ case 0x4: b4constu_in_0 = 0x4; break;
+ case 0x5: b4constu_in_0 = 0x5; break;
+ case 0x6: b4constu_in_0 = 0x6; break;
+ case 0x7: b4constu_in_0 = 0x7; break;
+ case 0x8: b4constu_in_0 = 0x8; break;
+ case 0xa: b4constu_in_0 = 0x9; break;
+ case 0xc: b4constu_in_0 = 0xa; break;
+ case 0x10: b4constu_in_0 = 0xb; break;
+ case 0x20: b4constu_in_0 = 0xc; break;
+ case 0x40: b4constu_in_0 = 0xd; break;
+ case 0x80: b4constu_in_0 = 0xe; break;
+ default: b4constu_in_0 = 0xf; break;
+ }
+ *valp = b4constu_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_decode (uint32 *valp)
+{
+ unsigned immt_out_0;
+ unsigned immt_in_0;
+ immt_in_0 = *valp & 0xf;
+ immt_out_0 = immt_in_0;
+ *valp = immt_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_encode (uint32 *valp)
+{
+ unsigned immt_in_0;
+ unsigned immt_out_0;
+ immt_out_0 = *valp;
+ immt_in_0 = immt_out_0 & 0xf;
+ *valp = immt_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimms8_decode (uint32 *valp)
+{
+ unsigned uimms8_out_0;
+ unsigned uimms8_in_0;
+ uimms8_in_0 = *valp & 0x7;
+ uimms8_out_0 = uimms8_in_0;
+ *valp = uimms8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimms8_encode (uint32 *valp)
+{
+ unsigned uimms8_in_0;
+ unsigned uimms8_out_0;
+ uimms8_out_0 = *valp;
+ uimms8_in_0 = uimms8_out_0 & 0x7;
+ *valp = uimms8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_out_0;
+ unsigned uimm8_in_0;
+ uimm8_in_0 = *valp & 0xff;
+ uimm8_out_0 = uimm8_in_0;
+ *valp = uimm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_encode (uint32 *valp)
+{
+ unsigned uimm8_in_0;
+ unsigned uimm8_out_0;
+ uimm8_out_0 = *valp;
+ uimm8_in_0 = (uimm8_out_0 & 0xff);
+ *valp = uimm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_out_0;
+ unsigned uimm8x2_in_0;
+ uimm8x2_in_0 = *valp & 0xff;
+ uimm8x2_out_0 = uimm8x2_in_0 << 1;
+ *valp = uimm8x2_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_encode (uint32 *valp)
+{
+ unsigned uimm8x2_in_0;
+ unsigned uimm8x2_out_0;
+ uimm8x2_out_0 = *valp;
+ uimm8x2_in_0 = ((uimm8x2_out_0 >> 1) & 0xff);
+ *valp = uimm8x2_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_out_0;
+ unsigned uimm8x4_in_0;
+ uimm8x4_in_0 = *valp & 0xff;
+ uimm8x4_out_0 = uimm8x4_in_0 << 2;
+ *valp = uimm8x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_encode (uint32 *valp)
+{
+ unsigned uimm8x4_in_0;
+ unsigned uimm8x4_out_0;
+ uimm8x4_out_0 = *valp;
+ uimm8x4_in_0 = ((uimm8x4_out_0 >> 2) & 0xff);
+ *valp = uimm8x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_out_0;
+ unsigned uimm4x16_in_0;
+ uimm4x16_in_0 = *valp & 0xf;
+ uimm4x16_out_0 = uimm4x16_in_0 << 4;
+ *valp = uimm4x16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_encode (uint32 *valp)
+{
+ unsigned uimm4x16_in_0;
+ unsigned uimm4x16_out_0;
+ uimm4x16_out_0 = *valp;
+ uimm4x16_in_0 = ((uimm4x16_out_0 >> 4) & 0xf);
+ *valp = uimm4x16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimmrx4_decode (uint32 *valp)
+{
+ unsigned uimmrx4_out_0;
+ unsigned uimmrx4_in_0;
+ uimmrx4_in_0 = *valp & 0xf;
+ uimmrx4_out_0 = uimmrx4_in_0 << 2;
+ *valp = uimmrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimmrx4_encode (uint32 *valp)
+{
+ unsigned uimmrx4_in_0;
+ unsigned uimmrx4_out_0;
+ uimmrx4_out_0 = *valp;
+ uimmrx4_in_0 = ((uimmrx4_out_0 >> 2) & 0xf);
+ *valp = uimmrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_out_0;
+ unsigned simm8_in_0;
+ simm8_in_0 = *valp & 0xff;
+ simm8_out_0 = ((int) simm8_in_0 << 24) >> 24;
+ *valp = simm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_encode (uint32 *valp)
+{
+ unsigned simm8_in_0;
+ unsigned simm8_out_0;
+ simm8_out_0 = *valp;
+ simm8_in_0 = (simm8_out_0 & 0xff);
+ *valp = simm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_out_0;
+ unsigned simm8x256_in_0;
+ simm8x256_in_0 = *valp & 0xff;
+ simm8x256_out_0 = (((int) simm8x256_in_0 << 24) >> 24) << 8;
+ *valp = simm8x256_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_encode (uint32 *valp)
+{
+ unsigned simm8x256_in_0;
+ unsigned simm8x256_out_0;
+ simm8x256_out_0 = *valp;
+ simm8x256_in_0 = ((simm8x256_out_0 >> 8) & 0xff);
+ *valp = simm8x256_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_out_0;
+ unsigned simm12b_in_0;
+ simm12b_in_0 = *valp & 0xfff;
+ simm12b_out_0 = ((int) simm12b_in_0 << 20) >> 20;
+ *valp = simm12b_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_encode (uint32 *valp)
+{
+ unsigned simm12b_in_0;
+ unsigned simm12b_out_0;
+ simm12b_out_0 = *valp;
+ simm12b_in_0 = (simm12b_out_0 & 0xfff);
+ *valp = simm12b_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_out_0;
+ unsigned msalp32_in_0;
+ msalp32_in_0 = *valp & 0x1f;
+ msalp32_out_0 = 0x20 - msalp32_in_0;
+ *valp = msalp32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_encode (uint32 *valp)
+{
+ unsigned msalp32_in_0;
+ unsigned msalp32_out_0;
+ msalp32_out_0 = *valp;
+ msalp32_in_0 = (0x20 - msalp32_out_0) & 0x1f;
+ *valp = msalp32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_out_0;
+ unsigned op2p1_in_0;
+ op2p1_in_0 = *valp & 0xf;
+ op2p1_out_0 = op2p1_in_0 + 0x1;
+ *valp = op2p1_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_encode (uint32 *valp)
+{
+ unsigned op2p1_in_0;
+ unsigned op2p1_out_0;
+ op2p1_out_0 = *valp;
+ op2p1_in_0 = (op2p1_out_0 - 0x1) & 0xf;
+ *valp = op2p1_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_decode (uint32 *valp)
+{
+ unsigned label8_out_0;
+ unsigned label8_in_0;
+ label8_in_0 = *valp & 0xff;
+ label8_out_0 = 0x4 + (((int) label8_in_0 << 24) >> 24);
+ *valp = label8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_encode (uint32 *valp)
+{
+ unsigned label8_in_0;
+ unsigned label8_out_0;
+ label8_out_0 = *valp;
+ label8_in_0 = (label8_out_0 - 0x4) & 0xff;
+ *valp = label8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_out_0;
+ unsigned ulabel8_in_0;
+ ulabel8_in_0 = *valp & 0xff;
+ ulabel8_out_0 = 0x4 + (((0) << 8) | ulabel8_in_0);
+ *valp = ulabel8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_encode (uint32 *valp)
+{
+ unsigned ulabel8_in_0;
+ unsigned ulabel8_out_0;
+ ulabel8_out_0 = *valp;
+ ulabel8_in_0 = (ulabel8_out_0 - 0x4) & 0xff;
+ *valp = ulabel8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_decode (uint32 *valp)
+{
+ unsigned label12_out_0;
+ unsigned label12_in_0;
+ label12_in_0 = *valp & 0xfff;
+ label12_out_0 = 0x4 + (((int) label12_in_0 << 20) >> 20);
+ *valp = label12_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_encode (uint32 *valp)
+{
+ unsigned label12_in_0;
+ unsigned label12_out_0;
+ label12_out_0 = *valp;
+ label12_in_0 = (label12_out_0 - 0x4) & 0xfff;
+ *valp = label12_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_out_0;
+ unsigned soffset_in_0;
+ soffset_in_0 = *valp & 0x3ffff;
+ soffset_out_0 = 0x4 + (((int) soffset_in_0 << 14) >> 14);
+ *valp = soffset_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_encode (uint32 *valp)
+{
+ unsigned soffset_in_0;
+ unsigned soffset_out_0;
+ soffset_out_0 = *valp;
+ soffset_in_0 = (soffset_out_0 - 0x4) & 0x3ffff;
+ *valp = soffset_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_out_0;
+ unsigned uimm16x4_in_0;
+ uimm16x4_in_0 = *valp & 0xffff;
+ uimm16x4_out_0 = (((0xffff) << 16) | uimm16x4_in_0) << 2;
+ *valp = uimm16x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_encode (uint32 *valp)
+{
+ unsigned uimm16x4_in_0;
+ unsigned uimm16x4_out_0;
+ uimm16x4_out_0 = *valp;
+ uimm16x4_in_0 = (uimm16x4_out_0 >> 2) & 0xffff;
+ *valp = uimm16x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_1_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_3_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_5_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_5_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_imms_decode (uint32 *valp)
+{
+ unsigned imms_out_0;
+ unsigned imms_in_0;
+ imms_in_0 = *valp & 0xf;
+ imms_out_0 = imms_in_0;
+ *valp = imms_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_imms_encode (uint32 *valp)
+{
+ unsigned imms_in_0;
+ unsigned imms_out_0;
+ imms_out_0 = *valp;
+ imms_in_0 = imms_out_0 & 0xf;
+ *valp = imms_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR2_decode (uint32 *valp)
+{
+ *valp = *valp << 1;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 1) != 0);
+ *valp = *valp >> 1;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR4_decode (uint32 *valp)
+{
+ *valp = *valp << 2;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 3) != 0);
+ *valp = *valp >> 2;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR8_decode (uint32 *valp)
+{
+ *valp = *valp << 3;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 7) != 0);
+ *valp = *valp >> 3;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR16_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR16_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 15) != 0);
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_out_0;
+ unsigned tp7_in_0;
+ tp7_in_0 = *valp & 0xf;
+ tp7_out_0 = tp7_in_0 + 0x7;
+ *valp = tp7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_encode (uint32 *valp)
+{
+ unsigned tp7_in_0;
+ unsigned tp7_out_0;
+ tp7_out_0 = *valp;
+ tp7_in_0 = (tp7_out_0 - 0x7) & 0xf;
+ *valp = tp7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_out_0;
+ unsigned xt_wbr15_label_in_0;
+ xt_wbr15_label_in_0 = *valp & 0x7fff;
+ xt_wbr15_label_out_0 = 0x4 + (((int) xt_wbr15_label_in_0 << 17) >> 17);
+ *valp = xt_wbr15_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_in_0;
+ unsigned xt_wbr15_label_out_0;
+ xt_wbr15_label_out_0 = *valp;
+ xt_wbr15_label_in_0 = (xt_wbr15_label_out_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr18_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_out_0;
+ unsigned xt_wbr18_label_in_0;
+ xt_wbr18_label_in_0 = *valp & 0x3ffff;
+ xt_wbr18_label_out_0 = 0x4 + (((int) xt_wbr18_label_in_0 << 14) >> 14);
+ *valp = xt_wbr18_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr18_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_in_0;
+ unsigned xt_wbr18_label_out_0;
+ xt_wbr18_label_out_0 = *valp;
+ xt_wbr18_label_in_0 = (xt_wbr18_label_out_0 - 0x4) & 0x3ffff;
+ *valp = xt_wbr18_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_FR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_FR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_imm_t_decode (uint32 *valp)
+{
+ unsigned imm_t_out_0;
+ unsigned imm_t_in_0;
+ imm_t_in_0 = *valp & 0xf;
+ imm_t_out_0 = (0 << 4) | imm_t_in_0;
+ *valp = imm_t_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_imm_t_encode (uint32 *valp)
+{
+ unsigned imm_t_in_0;
+ unsigned imm_t_out_0;
+ imm_t_out_0 = *valp;
+ imm_t_in_0 = (imm_t_out_0 & 0xf);
+ *valp = imm_t_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_imm8x4_decode (uint32 *valp)
+{
+ unsigned imm8x4_out_0;
+ unsigned imm8x4_in_0;
+ imm8x4_in_0 = *valp & 0xff;
+ imm8x4_out_0 = (0 << 10) | (imm8x4_in_0 << 2) | 0;
+ *valp = imm8x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_imm8x4_encode (uint32 *valp)
+{
+ unsigned imm8x4_in_0;
+ unsigned imm8x4_out_0;
+ imm8x4_out_0 = *valp;
+ imm8x4_in_0 = ((imm8x4_out_0 >> 2) & 0xff);
+ *valp = imm8x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_imm8x8_decode (uint32 *valp)
+{
+ unsigned imm8x8_out_0;
+ unsigned imm8x8_in_0;
+ imm8x8_in_0 = *valp & 0xff;
+ imm8x8_out_0 = (0 << 11) | (imm8x8_in_0 << 3) | 0;
+ *valp = imm8x8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_imm8x8_encode (uint32 *valp)
+{
+ unsigned imm8x8_in_0;
+ unsigned imm8x8_out_0;
+ imm8x8_out_0 = *valp;
+ imm8x8_in_0 = ((imm8x8_out_0 >> 3) & 0xff);
+ *valp = imm8x8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_decode (uint32 *valp)
+{
+ unsigned bbi_out_0;
+ unsigned bbi_in_0;
+ bbi_in_0 = *valp & 0x1f;
+ bbi_out_0 = (0 << 5) | bbi_in_0;
+ *valp = bbi_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_encode (uint32 *valp)
+{
+ unsigned bbi_in_0;
+ unsigned bbi_out_0;
+ bbi_out_0 = *valp;
+ bbi_in_0 = (bbi_out_0 & 0x1f);
+ *valp = bbi_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_decode (uint32 *valp)
+{
+ unsigned s_out_0;
+ unsigned s_in_0;
+ s_in_0 = *valp & 0xf;
+ s_out_0 = (0 << 4) | s_in_0;
+ *valp = s_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_encode (uint32 *valp)
+{
+ unsigned s_in_0;
+ unsigned s_out_0;
+ s_out_0 = *valp;
+ s_in_0 = (s_out_0 & 0xf);
+ *valp = s_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bitindex_decode (uint32 *valp)
+{
+ unsigned bitindex_out_0;
+ unsigned bitindex_in_0;
+ bitindex_in_0 = *valp & 0x1f;
+ bitindex_out_0 = (0 << 5) | bitindex_in_0;
+ *valp = bitindex_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bitindex_encode (uint32 *valp)
+{
+ unsigned bitindex_in_0;
+ unsigned bitindex_out_0;
+ bitindex_out_0 = *valp;
+ bitindex_in_0 = (bitindex_out_0 & 0x1f);
+ *valp = bitindex_in_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffsetx4_encode, OperandSem_opnd_sem_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "immr", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immr_encode, OperandSem_opnd_sem_immr_decode,
+ 0, 0 },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm12x8_encode, OperandSem_opnd_sem_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm4_encode, OperandSem_opnd_sem_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_0_encode, OperandSem_opnd_sem_AR_0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_4_encode, OperandSem_opnd_sem_AR_4_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_8_encode, OperandSem_opnd_sem_AR_8_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_12_encode, OperandSem_opnd_sem_AR_12_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_entry_encode, OperandSem_opnd_sem_AR_entry_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immrx4_encode, OperandSem_opnd_sem_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm7_encode, OperandSem_opnd_sem_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm6_encode, OperandSem_opnd_sem_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ai4const_encode, OperandSem_opnd_sem_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4const_encode, OperandSem_opnd_sem_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4constu_encode, OperandSem_opnd_sem_b4constu_decode,
+ 0, 0 },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "uimms8", FIELD_imms8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimms8_encode, OperandSem_opnd_sem_uimms8_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8_encode, OperandSem_opnd_sem_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x2_encode, OperandSem_opnd_sem_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x4_encode, OperandSem_opnd_sem_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm4x16_encode, OperandSem_opnd_sem_uimm4x16_decode,
+ 0, 0 },
+ { "uimmrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimmrx4_encode, OperandSem_opnd_sem_uimmrx4_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8_encode, OperandSem_opnd_sem_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8x256_encode, OperandSem_opnd_sem_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm12b_encode, OperandSem_opnd_sem_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ OperandSem_opnd_sem_msalp32_encode, OperandSem_opnd_sem_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_op2p1_encode, OperandSem_opnd_sem_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label8_encode, OperandSem_opnd_sem_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_ulabel8_encode, OperandSem_opnd_sem_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label12_encode, OperandSem_opnd_sem_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm16x4_encode, OperandSem_opnd_sem_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "mx", FIELD_x, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ OperandSem_opnd_sem_MR_encode, OperandSem_opnd_sem_MR_decode,
+ 0, 0 },
+ { "my", FIELD_y, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ OperandSem_opnd_sem_MR_0_encode, OperandSem_opnd_sem_MR_0_decode,
+ 0, 0 },
+ { "mw", FIELD_w, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_MR_1_encode, OperandSem_opnd_sem_MR_1_decode,
+ 0, 0 },
+ { "mr0", FIELD__mr0, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_2_encode, OperandSem_opnd_sem_MR_2_decode,
+ 0, 0 },
+ { "mr1", FIELD__mr1, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_3_encode, OperandSem_opnd_sem_MR_3_decode,
+ 0, 0 },
+ { "mr2", FIELD__mr2, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_4_encode, OperandSem_opnd_sem_MR_4_decode,
+ 0, 0 },
+ { "mr3", FIELD__mr3, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_5_encode, OperandSem_opnd_sem_MR_5_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_imms_encode, OperandSem_opnd_sem_imms_decode,
+ 0, 0 },
+ { "imms1", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_imms_encode, OperandSem_opnd_sem_imms_decode,
+ 0, 0 },
+ { "bt", FIELD_t, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "bs", FIELD_s, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "br", FIELD_r, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "bt2", FIELD_t2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "bs2", FIELD_s2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "br2", FIELD_r2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "bt4", FIELD_t4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "bs4", FIELD_s4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "br4", FIELD_r4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "bt8", FIELD_t8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "bs8", FIELD_s8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "br8", FIELD_r8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "bt16", FIELD__bt16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "bs16", FIELD__bs16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "br16", FIELD__br16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "brall", FIELD__brall, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_tp7_encode, OperandSem_opnd_sem_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr15_label_encode, OperandSem_opnd_sem_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr18_label_encode, OperandSem_opnd_sem_xt_wbr18_label_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "frr", FIELD_r, REGFILE_FR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_FR_encode, OperandSem_opnd_sem_FR_decode,
+ 0, 0 },
+ { "frs", FIELD_s, REGFILE_FR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_FR_encode, OperandSem_opnd_sem_FR_decode,
+ 0, 0 },
+ { "frt", FIELD_t, REGFILE_FR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_FR_encode, OperandSem_opnd_sem_FR_decode,
+ 0, 0 },
+ { "imm_t", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_imm_t_encode, OperandSem_opnd_sem_imm_t_decode,
+ 0, 0 },
+ { "imm_s", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_imm_t_encode, OperandSem_opnd_sem_imm_t_decode,
+ 0, 0 },
+ { "imm8x4", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_imm8x4_encode, OperandSem_opnd_sem_imm8x4_decode,
+ 0, 0 },
+ { "imm8x8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_imm8x8_encode, OperandSem_opnd_sem_imm8x8_decode,
+ 0, 0 },
+ { "bbi", FIELD_bbi, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sae", FIELD_sae, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sas", FIELD_sas, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "s", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_s_encode, OperandSem_opnd_sem_s_decode,
+ 0, 0 },
+ { "bitindex", FIELD_bitindex, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bitindex_encode, OperandSem_opnd_sem_bitindex_decode,
+ 0, 0 },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "s8", FIELD_s8, -1, 0, 0, 0, 0, 0, 0 },
+ { "imms8", FIELD_imms8, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "r_disp", FIELD_r_disp, -1, 0, 0, 0, 0, 0, 0 },
+ { "r_3", FIELD_r_3, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "r3", FIELD_r3, -1, 0, 0, 0, 0, 0, 0 },
+ { "rbit2", FIELD_rbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "rhi", FIELD_rhi, -1, 0, 0, 0, 0, 0, 0 },
+ { "t3", FIELD_t3, -1, 0, 0, 0, 0, 0, 0 },
+ { "tbit2", FIELD_tbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "tlo", FIELD_tlo, -1, 0, 0, 0, 0, 0, 0 },
+ { "w", FIELD_w, -1, 0, 0, 0, 0, 0, 0 },
+ { "y", FIELD_y, -1, 0, 0, 0, 0, 0, 0 },
+ { "x", FIELD_x, -1, 0, 0, 0, 0, 0, 0 },
+ { "t2", FIELD_t2, -1, 0, 0, 0, 0, 0, 0 },
+ { "s2", FIELD_s2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r2", FIELD_r2, -1, 0, 0, 0, 0, 0, 0 },
+ { "t4", FIELD_t4, -1, 0, 0, 0, 0, 0, 0 },
+ { "s4", FIELD_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "r4", FIELD_r4, -1, 0, 0, 0, 0, 0, 0 },
+ { "t8", FIELD_t8, -1, 0, 0, 0, 0, 0, 0 },
+ { "r8", FIELD_r8, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "s3to1", FIELD_s3to1, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_immr,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_immt,
+ OPERAND_uimms8,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_uimmrx4,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_ulabel8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_mx,
+ OPERAND_my,
+ OPERAND_mw,
+ OPERAND_mr0,
+ OPERAND_mr1,
+ OPERAND_mr2,
+ OPERAND_mr3,
+ OPERAND_imms,
+ OPERAND_imms1,
+ OPERAND_bt,
+ OPERAND_bs,
+ OPERAND_br,
+ OPERAND_bt2,
+ OPERAND_bs2,
+ OPERAND_br2,
+ OPERAND_bt4,
+ OPERAND_bs4,
+ OPERAND_br4,
+ OPERAND_bt8,
+ OPERAND_bs8,
+ OPERAND_br8,
+ OPERAND_bt16,
+ OPERAND_bs16,
+ OPERAND_br16,
+ OPERAND_brall,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_frr,
+ OPERAND_frs,
+ OPERAND_frt,
+ OPERAND_imm_t,
+ OPERAND_imm_s,
+ OPERAND_imm8x4,
+ OPERAND_imm8x8,
+ OPERAND_bbi,
+ OPERAND_sae,
+ OPERAND_sas,
+ OPERAND_sargt,
+ OPERAND_s,
+ OPERAND_bitindex,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_s8,
+ OPERAND_imms8,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_r_disp,
+ OPERAND_r_3,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sal,
+ OPERAND_sas4,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_r3,
+ OPERAND_rbit2,
+ OPERAND_rhi,
+ OPERAND_t3,
+ OPERAND_tbit2,
+ OPERAND_tlo,
+ OPERAND_w,
+ OPERAND_y,
+ OPERAND_x,
+ OPERAND_t2,
+ OPERAND_s2,
+ OPERAND_r2,
+ OPERAND_t4,
+ OPERAND_s4,
+ OPERAND_r4,
+ OPERAND_t8,
+ OPERAND_r8,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_s3to1
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_simcall_args[] = {
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32nb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimmrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_memctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_memctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_memctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_salt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_l_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m0_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m0_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m0_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m1_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m1_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m1_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m2_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m2_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m2_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m3_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m3_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m3_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPC7 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_EPS7 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool1_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool4_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool8_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbranch_args[] = {
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bmove_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_RSR_BR_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_brall }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_WSR_BR_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_brall }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_XSR_BR_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_brall }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_dyn_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_dyn_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdcw_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdcw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldcw_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldcw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'm' },
+ { { STATE_EXCVADDR }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'i' },
+ { { STATE_ASID2 }, 'i' },
+ { { STATE_ASID1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'o' },
+ { { STATE_ASID2 }, 'o' },
+ { { STATE_ASID1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'm' },
+ { { STATE_ASID2 }, 'm' },
+ { { STATE_ASID1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'i' },
+ { { STATE_INSTPGSZID5 }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'o' },
+ { { STATE_INSTPGSZID5 }, 'o' },
+ { { STATE_INSTPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'm' },
+ { { STATE_INSTPGSZID5 }, 'm' },
+ { { STATE_INSTPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'i' },
+ { { STATE_DATAPGSZID5 }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'o' },
+ { { STATE_DATAPGSZID5 }, 'o' },
+ { { STATE_DATAPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'm' },
+ { { STATE_DATAPGSZID5 }, 'm' },
+ { { STATE_DATAPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldpte_stateArgs[] = {
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwitlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwdtlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_div_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eraccess_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eraccess_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ERACCESS }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eraccess_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eraccess_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ERACCESS }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eraccess_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eraccess_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ERACCESS }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_stateArgs[] = {
+ { { STATE_ERACCESS }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_stateArgs[] = {
+ { { STATE_ERACCESS }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_fcr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_fcr_stateArgs[] = {
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_InvalidEnable }, 'i' },
+ { { STATE_DivZeroEnable }, 'i' },
+ { { STATE_OverflowEnable }, 'i' },
+ { { STATE_UnderflowEnable }, 'i' },
+ { { STATE_InexactEnable }, 'i' },
+ { { STATE_FPreserved20 }, 'i' },
+ { { STATE_FPreserved5 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fcr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fcr_stateArgs[] = {
+ { { STATE_RoundMode }, 'o' },
+ { { STATE_InvalidEnable }, 'o' },
+ { { STATE_DivZeroEnable }, 'o' },
+ { { STATE_OverflowEnable }, 'o' },
+ { { STATE_UnderflowEnable }, 'o' },
+ { { STATE_InexactEnable }, 'o' },
+ { { STATE_FPreserved20 }, 'o' },
+ { { STATE_FPreserved5 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_fsr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_fsr_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'i' },
+ { { STATE_DivZeroFlag }, 'i' },
+ { { STATE_OverflowFlag }, 'i' },
+ { { STATE_UnderflowFlag }, 'i' },
+ { { STATE_InexactFlag }, 'i' },
+ { { STATE_FPreserved20a }, 'i' },
+ { { STATE_FPreserved7 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fsr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fsr_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'o' },
+ { { STATE_DivZeroFlag }, 'o' },
+ { { STATE_OverflowFlag }, 'o' },
+ { { STATE_UnderflowFlag }, 'o' },
+ { { STATE_InexactFlag }, 'o' },
+ { { STATE_FPreserved20a }, 'o' },
+ { { STATE_FPreserved7 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_READ_IMPWIRE_intfArgs[] = {
+ INTERFACE_IMPWIRE
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSI_args[] = {
+ { { OPERAND_frt }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSI_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSIP_args[] = {
+ { { OPERAND_frt }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_imm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSIP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSX_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSX_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSXP_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LSXP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSI_args[] = {
+ { { OPERAND_frt }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSI_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSIP_args[] = {
+ { { OPERAND_frt }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_imm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSIP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSX_args[] = {
+ { { OPERAND_frr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSX_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSXP_args[] = {
+ { { OPERAND_frr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SSXP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDI_args[] = {
+ { { OPERAND_frt }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm8x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDI_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDIP_args[] = {
+ { { OPERAND_frt }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_imm8x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDIP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDX_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDX_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDXP_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_LDXP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDI_args[] = {
+ { { OPERAND_frt }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm8x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDI_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDIP_args[] = {
+ { { OPERAND_frt }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_imm8x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDIP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDX_args[] = {
+ { { OPERAND_frr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDX_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDXP_args[] = {
+ { { OPERAND_frr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SDXP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ABS_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ABS_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEG_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEG_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ABS_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ABS_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEG_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEG_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOV_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOV_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOV_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOV_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVEQZ_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVEQZ_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVNEZ_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVNEZ_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVLTZ_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVLTZ_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVGEZ_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVGEZ_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVF_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVF_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVT_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MOVT_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_WFR_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_WFR_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RFR_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RFR_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RFRD_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RFRD_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_WFRD_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_WFRD_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ROUND_S_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ROUND_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ROUND_D_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ROUND_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CEIL_S_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CEIL_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CEIL_D_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CEIL_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOOR_S_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOOR_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOOR_D_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOOR_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_TRUNC_S_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_TRUNC_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_TRUNC_D_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_TRUNC_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UTRUNC_S_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UTRUNC_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UTRUNC_D_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UTRUNC_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOAT_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOAT_S_stateArgs[] = {
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOAT_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_FLOAT_D_stateArgs[] = {
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UFLOAT_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UFLOAT_S_stateArgs[] = {
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UFLOAT_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_imm_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UFLOAT_D_stateArgs[] = {
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CVTD_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CVTD_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CVTS_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CVTS_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UN_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UN_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UN_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UN_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULT_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULT_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULT_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULT_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULE_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULE_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULE_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ULE_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UEQ_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UEQ_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UEQ_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_UEQ_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLT_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLT_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLT_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLT_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLE_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLE_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLE_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OLE_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OEQ_S_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OEQ_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OEQ_D_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_OEQ_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADD_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADD_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADD_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADD_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SUB_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SUB_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SUB_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SUB_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MUL_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MUL_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MUL_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MUL_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADD_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADD_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADD_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADD_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MSUB_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MSUB_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MSUB_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MSUB_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SQRT0_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SQRT0_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SQRT0_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_SQRT0_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIV0_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIV0_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIV0_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIV0_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RECIP0_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RECIP0_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_DivZeroFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RECIP0_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RECIP0_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_DivZeroFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RSQRT0_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RSQRT0_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_DivZeroFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RSQRT0_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_RSQRT0_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_DivZeroFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADDN_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADDN_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADDN_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MADDN_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIVN_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIVN_S_stateArgs[] = {
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIVN_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_DIVN_D_stateArgs[] = {
+ { { STATE_OverflowFlag }, 'm' },
+ { { STATE_UnderflowFlag }, 'm' },
+ { { STATE_InexactFlag }, 'm' },
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CONST_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_imm_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CONST_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CONST_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_imm_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_CONST_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEXP01_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEXP01_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEXP01_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_NEXP01_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXP_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXP_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXP_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXP_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXPM_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXPM_S_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXPM_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ADDEXPM_D_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKDADJ_S_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKDADJ_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_DivZeroFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKDADJ_D_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKDADJ_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_DivZeroFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKSADJ_S_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKSADJ_S_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKSADJ_D_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_MKSADJ_D_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 3, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 3, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 3, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 5, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 6, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 2, Iclass_xt_iclass_l32e_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 2, Iclass_xt_iclass_s32e_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 3, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 3, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 3, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 3, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 3, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 3, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_threadptr_args,
+ 1, Iclass_rur_threadptr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_threadptr_args,
+ 1, Iclass_wur_threadptr_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_simcall_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32nb_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 7, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid0_args,
+ 2, Iclass_xt_iclass_rsr_configid0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_configid0_args,
+ 2, Iclass_xt_iclass_wsr_configid0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid1_args,
+ 2, Iclass_xt_iclass_rsr_configid1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 7, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 7, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 7, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 3, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 3, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 3, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 3, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 3, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 3, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 3, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 3, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 3, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 3, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 3, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 3, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 3, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 3, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 3, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 3, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 3, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 3, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 3, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 3, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 3, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 3, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 3, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 3, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 3, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 3, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 3, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 3, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 3, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 3, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 3, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 3, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 3, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 3, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 3, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 3, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc7_args,
+ 3, Iclass_xt_iclass_rsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc7_args,
+ 3, Iclass_xt_iclass_wsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc7_args,
+ 3, Iclass_xt_iclass_xsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave7_args,
+ 3, Iclass_xt_iclass_rsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave7_args,
+ 3, Iclass_xt_iclass_wsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave7_args,
+ 3, Iclass_xt_iclass_xsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 3, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 3, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 3, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 3, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 3, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 3, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 3, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 3, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 3, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 3, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 3, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 3, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 3, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 3, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 3, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps7_args,
+ 3, Iclass_xt_iclass_rsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps7_args,
+ 3, Iclass_xt_iclass_wsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps7_args,
+ 3, Iclass_xt_iclass_xsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 3, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 3, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 3, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 3, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 3, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 3, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 4, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 3, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 3, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 3, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 3, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 3, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 3, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 3, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 3, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 2, Iclass_xt_iclass_rsr_prid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 3, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 3, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 3, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_salt_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_aa_args,
+ 1, Iclass_xt_iclass_mac16_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_ad_args,
+ 1, Iclass_xt_iclass_mac16_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_da_args,
+ 1, Iclass_xt_iclass_mac16_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_dd_args,
+ 1, Iclass_xt_iclass_mac16_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_aa_args,
+ 1, Iclass_xt_iclass_mac16a_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_ad_args,
+ 1, Iclass_xt_iclass_mac16a_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_da_args,
+ 1, Iclass_xt_iclass_mac16a_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_dd_args,
+ 1, Iclass_xt_iclass_mac16a_dd_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_da_args,
+ 1, Iclass_xt_iclass_mac16al_da_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_dd_args,
+ 1, Iclass_xt_iclass_mac16al_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_l_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m3_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acclo_args,
+ 1, Iclass_xt_iclass_rsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acclo_args,
+ 1, Iclass_xt_iclass_wsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acclo_args,
+ 1, Iclass_xt_iclass_xsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acchi_args,
+ 1, Iclass_xt_iclass_rsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acchi_args,
+ 1, Iclass_xt_iclass_wsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acchi_args,
+ 1, Iclass_xt_iclass_xsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 21, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 3, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 3, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 4, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 4, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 3, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 3, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 3, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 4, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 4, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 4, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 3, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 4, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 4, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 3, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 3, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 3, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 3, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 4, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 4, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_lddr32_p_args,
+ 5, Iclass_xt_iclass_lddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sddr32_p_args,
+ 4, Iclass_xt_iclass_sddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 10, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 2, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 3, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_bbool1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbranch_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bmove_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_RSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_WSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_XSR_BR_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 3, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 4, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 4, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 3, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 4, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 4, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 3, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 4, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 4, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 3, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 4, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 4, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_lock_args,
+ 2, Iclass_xt_iclass_icache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 2, Iclass_xt_iclass_icache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 2, Iclass_xt_iclass_licx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 2, Iclass_xt_iclass_sicx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_dcache_dyn_args,
+ 2, Iclass_xt_iclass_dcache_dyn_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 2, Iclass_xt_iclass_dcache_ind_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 2, Iclass_xt_iclass_dcache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_lock_args,
+ 2, Iclass_xt_iclass_dcache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 2, Iclass_xt_iclass_sdct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 2, Iclass_xt_iclass_ldct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sdcw_args,
+ 2, Iclass_xt_iclass_sdcw_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldcw_args,
+ 2, Iclass_xt_iclass_ldcw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_wsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_rsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ptevaddr_args,
+ 5, Iclass_xt_iclass_xsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_rasid_args,
+ 5, Iclass_xt_iclass_rsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_rasid_args,
+ 6, Iclass_xt_iclass_wsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_rasid_args,
+ 6, Iclass_xt_iclass_xsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_itlbcfg_args,
+ 5, Iclass_xt_iclass_rsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_itlbcfg_args,
+ 6, Iclass_xt_iclass_wsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_itlbcfg_args,
+ 6, Iclass_xt_iclass_xsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dtlbcfg_args,
+ 5, Iclass_xt_iclass_rsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dtlbcfg_args,
+ 6, Iclass_xt_iclass_wsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dtlbcfg_args,
+ 6, Iclass_xt_iclass_xsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 3, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 2, Iclass_xt_iclass_rdtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 3, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 2, Iclass_xt_iclass_iitlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 2, Iclass_xt_iclass_ritlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 2, Iclass_xt_iclass_witlb_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_ldpte */,
+ 2, Iclass_xt_iclass_ldpte_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwitlba */,
+ 1, Iclass_xt_iclass_hwwitlba_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwdtlba */,
+ 1, Iclass_xt_iclass_hwwdtlba_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_cpenable_args,
+ 3, Iclass_xt_iclass_rsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_cpenable_args,
+ 3, Iclass_xt_iclass_wsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_cpenable_args,
+ 3, Iclass_xt_iclass_xsr_cpenable_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 3, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 4, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 4, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_div_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eraccess_args,
+ 3, Iclass_xt_iclass_rsr_eraccess_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eraccess_args,
+ 3, Iclass_xt_iclass_wsr_eraccess_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eraccess_args,
+ 3, Iclass_xt_iclass_xsr_eraccess_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rer_args,
+ 3, Iclass_xt_iclass_rer_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wer_args,
+ 3, Iclass_xt_iclass_wer_stateArgs, 0, 0 },
+ { 1, Iclass_rur_fcr_args,
+ 9, Iclass_rur_fcr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_fcr_args,
+ 9, Iclass_wur_fcr_stateArgs, 0, 0 },
+ { 1, Iclass_rur_fsr_args,
+ 8, Iclass_rur_fsr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_fsr_args,
+ 8, Iclass_wur_fsr_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_READ_IMPWIRE_args,
+ 1, Iclass_iclass_READ_IMPWIRE_stateArgs, 1, Iclass_iclass_READ_IMPWIRE_intfArgs },
+ { 1, Iclass_iclass_SETB_EXPSTATE_args,
+ 2, Iclass_iclass_SETB_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRB_EXPSTATE_args,
+ 2, Iclass_iclass_CLRB_EXPSTATE_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_WRMSK_EXPSTATE_args,
+ 2, Iclass_iclass_WRMSK_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_rur_expstate_args,
+ 2, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 2, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 3, Iclass_LSI_args,
+ 1, Iclass_LSI_stateArgs, 0, 0 },
+ { 3, Iclass_LSIP_args,
+ 1, Iclass_LSIP_stateArgs, 0, 0 },
+ { 3, Iclass_LSX_args,
+ 1, Iclass_LSX_stateArgs, 0, 0 },
+ { 3, Iclass_LSXP_args,
+ 1, Iclass_LSXP_stateArgs, 0, 0 },
+ { 3, Iclass_SSI_args,
+ 1, Iclass_SSI_stateArgs, 0, 0 },
+ { 3, Iclass_SSIP_args,
+ 1, Iclass_SSIP_stateArgs, 0, 0 },
+ { 3, Iclass_SSX_args,
+ 1, Iclass_SSX_stateArgs, 0, 0 },
+ { 3, Iclass_SSXP_args,
+ 1, Iclass_SSXP_stateArgs, 0, 0 },
+ { 3, Iclass_LDI_args,
+ 1, Iclass_LDI_stateArgs, 0, 0 },
+ { 3, Iclass_LDIP_args,
+ 1, Iclass_LDIP_stateArgs, 0, 0 },
+ { 3, Iclass_LDX_args,
+ 1, Iclass_LDX_stateArgs, 0, 0 },
+ { 3, Iclass_LDXP_args,
+ 1, Iclass_LDXP_stateArgs, 0, 0 },
+ { 3, Iclass_SDI_args,
+ 1, Iclass_SDI_stateArgs, 0, 0 },
+ { 3, Iclass_SDIP_args,
+ 1, Iclass_SDIP_stateArgs, 0, 0 },
+ { 3, Iclass_SDX_args,
+ 1, Iclass_SDX_stateArgs, 0, 0 },
+ { 3, Iclass_SDXP_args,
+ 1, Iclass_SDXP_stateArgs, 0, 0 },
+ { 2, Iclass_ABS_S_args,
+ 1, Iclass_ABS_S_stateArgs, 0, 0 },
+ { 2, Iclass_NEG_S_args,
+ 1, Iclass_NEG_S_stateArgs, 0, 0 },
+ { 2, Iclass_ABS_D_args,
+ 1, Iclass_ABS_D_stateArgs, 0, 0 },
+ { 2, Iclass_NEG_D_args,
+ 1, Iclass_NEG_D_stateArgs, 0, 0 },
+ { 2, Iclass_MOV_S_args,
+ 1, Iclass_MOV_S_stateArgs, 0, 0 },
+ { 2, Iclass_MOV_D_args,
+ 1, Iclass_MOV_D_stateArgs, 0, 0 },
+ { 3, Iclass_MOVEQZ_S_args,
+ 1, Iclass_MOVEQZ_S_stateArgs, 0, 0 },
+ { 3, Iclass_MOVNEZ_S_args,
+ 1, Iclass_MOVNEZ_S_stateArgs, 0, 0 },
+ { 3, Iclass_MOVLTZ_S_args,
+ 1, Iclass_MOVLTZ_S_stateArgs, 0, 0 },
+ { 3, Iclass_MOVGEZ_S_args,
+ 1, Iclass_MOVGEZ_S_stateArgs, 0, 0 },
+ { 3, Iclass_MOVF_S_args,
+ 1, Iclass_MOVF_S_stateArgs, 0, 0 },
+ { 3, Iclass_MOVT_S_args,
+ 1, Iclass_MOVT_S_stateArgs, 0, 0 },
+ { 2, Iclass_WFR_args,
+ 1, Iclass_WFR_stateArgs, 0, 0 },
+ { 2, Iclass_RFR_args,
+ 1, Iclass_RFR_stateArgs, 0, 0 },
+ { 2, Iclass_RFRD_args,
+ 1, Iclass_RFRD_stateArgs, 0, 0 },
+ { 3, Iclass_WFRD_args,
+ 1, Iclass_WFRD_stateArgs, 0, 0 },
+ { 3, Iclass_ROUND_S_args,
+ 3, Iclass_ROUND_S_stateArgs, 0, 0 },
+ { 3, Iclass_ROUND_D_args,
+ 3, Iclass_ROUND_D_stateArgs, 0, 0 },
+ { 3, Iclass_CEIL_S_args,
+ 3, Iclass_CEIL_S_stateArgs, 0, 0 },
+ { 3, Iclass_CEIL_D_args,
+ 3, Iclass_CEIL_D_stateArgs, 0, 0 },
+ { 3, Iclass_FLOOR_S_args,
+ 3, Iclass_FLOOR_S_stateArgs, 0, 0 },
+ { 3, Iclass_FLOOR_D_args,
+ 3, Iclass_FLOOR_D_stateArgs, 0, 0 },
+ { 3, Iclass_TRUNC_S_args,
+ 3, Iclass_TRUNC_S_stateArgs, 0, 0 },
+ { 3, Iclass_TRUNC_D_args,
+ 3, Iclass_TRUNC_D_stateArgs, 0, 0 },
+ { 3, Iclass_UTRUNC_S_args,
+ 3, Iclass_UTRUNC_S_stateArgs, 0, 0 },
+ { 3, Iclass_UTRUNC_D_args,
+ 3, Iclass_UTRUNC_D_stateArgs, 0, 0 },
+ { 3, Iclass_FLOAT_S_args,
+ 3, Iclass_FLOAT_S_stateArgs, 0, 0 },
+ { 3, Iclass_FLOAT_D_args,
+ 2, Iclass_FLOAT_D_stateArgs, 0, 0 },
+ { 3, Iclass_UFLOAT_S_args,
+ 3, Iclass_UFLOAT_S_stateArgs, 0, 0 },
+ { 3, Iclass_UFLOAT_D_args,
+ 2, Iclass_UFLOAT_D_stateArgs, 0, 0 },
+ { 2, Iclass_CVTD_S_args,
+ 2, Iclass_CVTD_S_stateArgs, 0, 0 },
+ { 2, Iclass_CVTS_D_args,
+ 6, Iclass_CVTS_D_stateArgs, 0, 0 },
+ { 3, Iclass_UN_S_args,
+ 2, Iclass_UN_S_stateArgs, 0, 0 },
+ { 3, Iclass_UN_D_args,
+ 2, Iclass_UN_D_stateArgs, 0, 0 },
+ { 3, Iclass_ULT_S_args,
+ 2, Iclass_ULT_S_stateArgs, 0, 0 },
+ { 3, Iclass_ULT_D_args,
+ 2, Iclass_ULT_D_stateArgs, 0, 0 },
+ { 3, Iclass_ULE_S_args,
+ 2, Iclass_ULE_S_stateArgs, 0, 0 },
+ { 3, Iclass_ULE_D_args,
+ 2, Iclass_ULE_D_stateArgs, 0, 0 },
+ { 3, Iclass_UEQ_S_args,
+ 2, Iclass_UEQ_S_stateArgs, 0, 0 },
+ { 3, Iclass_UEQ_D_args,
+ 2, Iclass_UEQ_D_stateArgs, 0, 0 },
+ { 3, Iclass_OLT_S_args,
+ 2, Iclass_OLT_S_stateArgs, 0, 0 },
+ { 3, Iclass_OLT_D_args,
+ 2, Iclass_OLT_D_stateArgs, 0, 0 },
+ { 3, Iclass_OLE_S_args,
+ 2, Iclass_OLE_S_stateArgs, 0, 0 },
+ { 3, Iclass_OLE_D_args,
+ 2, Iclass_OLE_D_stateArgs, 0, 0 },
+ { 3, Iclass_OEQ_S_args,
+ 2, Iclass_OEQ_S_stateArgs, 0, 0 },
+ { 3, Iclass_OEQ_D_args,
+ 2, Iclass_OEQ_D_stateArgs, 0, 0 },
+ { 3, Iclass_ADD_S_args,
+ 6, Iclass_ADD_S_stateArgs, 0, 0 },
+ { 3, Iclass_ADD_D_args,
+ 6, Iclass_ADD_D_stateArgs, 0, 0 },
+ { 3, Iclass_SUB_S_args,
+ 6, Iclass_SUB_S_stateArgs, 0, 0 },
+ { 3, Iclass_SUB_D_args,
+ 6, Iclass_SUB_D_stateArgs, 0, 0 },
+ { 3, Iclass_MUL_S_args,
+ 6, Iclass_MUL_S_stateArgs, 0, 0 },
+ { 3, Iclass_MUL_D_args,
+ 6, Iclass_MUL_D_stateArgs, 0, 0 },
+ { 3, Iclass_MADD_S_args,
+ 6, Iclass_MADD_S_stateArgs, 0, 0 },
+ { 3, Iclass_MADD_D_args,
+ 6, Iclass_MADD_D_stateArgs, 0, 0 },
+ { 3, Iclass_MSUB_S_args,
+ 6, Iclass_MSUB_S_stateArgs, 0, 0 },
+ { 3, Iclass_MSUB_D_args,
+ 6, Iclass_MSUB_D_stateArgs, 0, 0 },
+ { 2, Iclass_SQRT0_S_args,
+ 1, Iclass_SQRT0_S_stateArgs, 0, 0 },
+ { 2, Iclass_SQRT0_D_args,
+ 1, Iclass_SQRT0_D_stateArgs, 0, 0 },
+ { 2, Iclass_DIV0_S_args,
+ 1, Iclass_DIV0_S_stateArgs, 0, 0 },
+ { 2, Iclass_DIV0_D_args,
+ 1, Iclass_DIV0_D_stateArgs, 0, 0 },
+ { 2, Iclass_RECIP0_S_args,
+ 3, Iclass_RECIP0_S_stateArgs, 0, 0 },
+ { 2, Iclass_RECIP0_D_args,
+ 3, Iclass_RECIP0_D_stateArgs, 0, 0 },
+ { 2, Iclass_RSQRT0_S_args,
+ 3, Iclass_RSQRT0_S_stateArgs, 0, 0 },
+ { 2, Iclass_RSQRT0_D_args,
+ 3, Iclass_RSQRT0_D_stateArgs, 0, 0 },
+ { 3, Iclass_MADDN_S_args,
+ 1, Iclass_MADDN_S_stateArgs, 0, 0 },
+ { 3, Iclass_MADDN_D_args,
+ 1, Iclass_MADDN_D_stateArgs, 0, 0 },
+ { 3, Iclass_DIVN_S_args,
+ 5, Iclass_DIVN_S_stateArgs, 0, 0 },
+ { 3, Iclass_DIVN_D_args,
+ 5, Iclass_DIVN_D_stateArgs, 0, 0 },
+ { 2, Iclass_CONST_S_args,
+ 1, Iclass_CONST_S_stateArgs, 0, 0 },
+ { 2, Iclass_CONST_D_args,
+ 1, Iclass_CONST_D_stateArgs, 0, 0 },
+ { 2, Iclass_NEXP01_S_args,
+ 1, Iclass_NEXP01_S_stateArgs, 0, 0 },
+ { 2, Iclass_NEXP01_D_args,
+ 1, Iclass_NEXP01_D_stateArgs, 0, 0 },
+ { 2, Iclass_ADDEXP_S_args,
+ 1, Iclass_ADDEXP_S_stateArgs, 0, 0 },
+ { 2, Iclass_ADDEXP_D_args,
+ 1, Iclass_ADDEXP_D_stateArgs, 0, 0 },
+ { 2, Iclass_ADDEXPM_S_args,
+ 1, Iclass_ADDEXPM_S_stateArgs, 0, 0 },
+ { 2, Iclass_ADDEXPM_D_args,
+ 1, Iclass_ADDEXPM_D_stateArgs, 0, 0 },
+ { 2, Iclass_MKDADJ_S_args,
+ 3, Iclass_MKDADJ_S_stateArgs, 0, 0 },
+ { 2, Iclass_MKDADJ_D_args,
+ 3, Iclass_MKDADJ_D_stateArgs, 0, 0 },
+ { 2, Iclass_MKSADJ_S_args,
+ 2, Iclass_MKSADJ_S_stateArgs, 0, 0 },
+ { 2, Iclass_MKSADJ_D_args,
+ 2, Iclass_MKSADJ_D_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_rur_threadptr,
+ ICLASS_wur_threadptr,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_loop,
+ ICLASS_xt_iclass_loopz,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s32nb,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_lend,
+ ICLASS_xt_iclass_wsr_lend,
+ ICLASS_xt_iclass_xsr_lend,
+ ICLASS_xt_iclass_rsr_lcount,
+ ICLASS_xt_iclass_wsr_lcount,
+ ICLASS_xt_iclass_xsr_lcount,
+ ICLASS_xt_iclass_rsr_lbeg,
+ ICLASS_xt_iclass_wsr_lbeg,
+ ICLASS_xt_iclass_xsr_lbeg,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_memctl,
+ ICLASS_xt_iclass_wsr_memctl,
+ ICLASS_xt_iclass_xsr_memctl,
+ ICLASS_xt_iclass_rsr_configid0,
+ ICLASS_xt_iclass_wsr_configid0,
+ ICLASS_xt_iclass_rsr_configid1,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_epc3,
+ ICLASS_xt_iclass_wsr_epc3,
+ ICLASS_xt_iclass_xsr_epc3,
+ ICLASS_xt_iclass_rsr_excsave3,
+ ICLASS_xt_iclass_wsr_excsave3,
+ ICLASS_xt_iclass_xsr_excsave3,
+ ICLASS_xt_iclass_rsr_epc4,
+ ICLASS_xt_iclass_wsr_epc4,
+ ICLASS_xt_iclass_xsr_epc4,
+ ICLASS_xt_iclass_rsr_excsave4,
+ ICLASS_xt_iclass_wsr_excsave4,
+ ICLASS_xt_iclass_xsr_excsave4,
+ ICLASS_xt_iclass_rsr_epc5,
+ ICLASS_xt_iclass_wsr_epc5,
+ ICLASS_xt_iclass_xsr_epc5,
+ ICLASS_xt_iclass_rsr_excsave5,
+ ICLASS_xt_iclass_wsr_excsave5,
+ ICLASS_xt_iclass_xsr_excsave5,
+ ICLASS_xt_iclass_rsr_epc6,
+ ICLASS_xt_iclass_wsr_epc6,
+ ICLASS_xt_iclass_xsr_epc6,
+ ICLASS_xt_iclass_rsr_excsave6,
+ ICLASS_xt_iclass_wsr_excsave6,
+ ICLASS_xt_iclass_xsr_excsave6,
+ ICLASS_xt_iclass_rsr_epc7,
+ ICLASS_xt_iclass_wsr_epc7,
+ ICLASS_xt_iclass_xsr_epc7,
+ ICLASS_xt_iclass_rsr_excsave7,
+ ICLASS_xt_iclass_wsr_excsave7,
+ ICLASS_xt_iclass_xsr_excsave7,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_eps3,
+ ICLASS_xt_iclass_wsr_eps3,
+ ICLASS_xt_iclass_xsr_eps3,
+ ICLASS_xt_iclass_rsr_eps4,
+ ICLASS_xt_iclass_wsr_eps4,
+ ICLASS_xt_iclass_xsr_eps4,
+ ICLASS_xt_iclass_rsr_eps5,
+ ICLASS_xt_iclass_wsr_eps5,
+ ICLASS_xt_iclass_xsr_eps5,
+ ICLASS_xt_iclass_rsr_eps6,
+ ICLASS_xt_iclass_wsr_eps6,
+ ICLASS_xt_iclass_xsr_eps6,
+ ICLASS_xt_iclass_rsr_eps7,
+ ICLASS_xt_iclass_wsr_eps7,
+ ICLASS_xt_iclass_xsr_eps7,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_misc0,
+ ICLASS_xt_iclass_wsr_misc0,
+ ICLASS_xt_iclass_xsr_misc0,
+ ICLASS_xt_iclass_rsr_misc1,
+ ICLASS_xt_iclass_wsr_misc1,
+ ICLASS_xt_iclass_xsr_misc1,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_iclass_salt,
+ ICLASS_xt_mul16,
+ ICLASS_xt_mul32,
+ ICLASS_xt_iclass_mac16_aa,
+ ICLASS_xt_iclass_mac16_ad,
+ ICLASS_xt_iclass_mac16_da,
+ ICLASS_xt_iclass_mac16_dd,
+ ICLASS_xt_iclass_mac16a_aa,
+ ICLASS_xt_iclass_mac16a_ad,
+ ICLASS_xt_iclass_mac16a_da,
+ ICLASS_xt_iclass_mac16a_dd,
+ ICLASS_xt_iclass_mac16al_da,
+ ICLASS_xt_iclass_mac16al_dd,
+ ICLASS_xt_iclass_mac16_l,
+ ICLASS_xt_iclass_rsr_m0,
+ ICLASS_xt_iclass_wsr_m0,
+ ICLASS_xt_iclass_xsr_m0,
+ ICLASS_xt_iclass_rsr_m1,
+ ICLASS_xt_iclass_wsr_m1,
+ ICLASS_xt_iclass_xsr_m1,
+ ICLASS_xt_iclass_rsr_m2,
+ ICLASS_xt_iclass_wsr_m2,
+ ICLASS_xt_iclass_xsr_m2,
+ ICLASS_xt_iclass_rsr_m3,
+ ICLASS_xt_iclass_wsr_m3,
+ ICLASS_xt_iclass_xsr_m3,
+ ICLASS_xt_iclass_rsr_acclo,
+ ICLASS_xt_iclass_wsr_acclo,
+ ICLASS_xt_iclass_xsr_acclo,
+ ICLASS_xt_iclass_rsr_acchi,
+ ICLASS_xt_iclass_wsr_acchi,
+ ICLASS_xt_iclass_xsr_acchi,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_dbreaka0,
+ ICLASS_xt_iclass_wsr_dbreaka0,
+ ICLASS_xt_iclass_xsr_dbreaka0,
+ ICLASS_xt_iclass_rsr_dbreakc0,
+ ICLASS_xt_iclass_wsr_dbreakc0,
+ ICLASS_xt_iclass_xsr_dbreakc0,
+ ICLASS_xt_iclass_rsr_dbreaka1,
+ ICLASS_xt_iclass_wsr_dbreaka1,
+ ICLASS_xt_iclass_xsr_dbreaka1,
+ ICLASS_xt_iclass_rsr_dbreakc1,
+ ICLASS_xt_iclass_wsr_dbreakc1,
+ ICLASS_xt_iclass_xsr_dbreakc1,
+ ICLASS_xt_iclass_rsr_ibreaka0,
+ ICLASS_xt_iclass_wsr_ibreaka0,
+ ICLASS_xt_iclass_xsr_ibreaka0,
+ ICLASS_xt_iclass_rsr_ibreaka1,
+ ICLASS_xt_iclass_wsr_ibreaka1,
+ ICLASS_xt_iclass_xsr_ibreaka1,
+ ICLASS_xt_iclass_rsr_ibreakenable,
+ ICLASS_xt_iclass_wsr_ibreakenable,
+ ICLASS_xt_iclass_xsr_ibreakenable,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_lddr32_p,
+ ICLASS_xt_iclass_sddr32_p,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_wsr_mmid,
+ ICLASS_xt_iclass_bbool1,
+ ICLASS_xt_iclass_bbool4,
+ ICLASS_xt_iclass_bbool8,
+ ICLASS_xt_iclass_bbranch,
+ ICLASS_xt_iclass_bmove,
+ ICLASS_xt_iclass_RSR_BR,
+ ICLASS_xt_iclass_WSR_BR,
+ ICLASS_xt_iclass_XSR_BR,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_rsr_ccompare2,
+ ICLASS_xt_iclass_wsr_ccompare2,
+ ICLASS_xt_iclass_xsr_ccompare2,
+ ICLASS_xt_iclass_icache,
+ ICLASS_xt_iclass_icache_lock,
+ ICLASS_xt_iclass_icache_inv,
+ ICLASS_xt_iclass_licx,
+ ICLASS_xt_iclass_sicx,
+ ICLASS_xt_iclass_dcache,
+ ICLASS_xt_iclass_dcache_dyn,
+ ICLASS_xt_iclass_dcache_ind,
+ ICLASS_xt_iclass_dcache_inv,
+ ICLASS_xt_iclass_dpf,
+ ICLASS_xt_iclass_dcache_lock,
+ ICLASS_xt_iclass_sdct,
+ ICLASS_xt_iclass_ldct,
+ ICLASS_xt_iclass_sdcw,
+ ICLASS_xt_iclass_ldcw,
+ ICLASS_xt_iclass_wsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_ptevaddr,
+ ICLASS_xt_iclass_xsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_rasid,
+ ICLASS_xt_iclass_wsr_rasid,
+ ICLASS_xt_iclass_xsr_rasid,
+ ICLASS_xt_iclass_rsr_itlbcfg,
+ ICLASS_xt_iclass_wsr_itlbcfg,
+ ICLASS_xt_iclass_xsr_itlbcfg,
+ ICLASS_xt_iclass_rsr_dtlbcfg,
+ ICLASS_xt_iclass_wsr_dtlbcfg,
+ ICLASS_xt_iclass_xsr_dtlbcfg,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_ldpte,
+ ICLASS_xt_iclass_hwwitlba,
+ ICLASS_xt_iclass_hwwdtlba,
+ ICLASS_xt_iclass_rsr_cpenable,
+ ICLASS_xt_iclass_wsr_cpenable,
+ ICLASS_xt_iclass_xsr_cpenable,
+ ICLASS_xt_iclass_clamp,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_div,
+ ICLASS_xt_iclass_rsr_eraccess,
+ ICLASS_xt_iclass_wsr_eraccess,
+ ICLASS_xt_iclass_xsr_eraccess,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_fcr,
+ ICLASS_wur_fcr,
+ ICLASS_rur_fsr,
+ ICLASS_wur_fsr,
+ ICLASS_iclass_READ_IMPWIRE,
+ ICLASS_iclass_SETB_EXPSTATE,
+ ICLASS_iclass_CLRB_EXPSTATE,
+ ICLASS_iclass_WRMSK_EXPSTATE,
+ ICLASS_rur_expstate,
+ ICLASS_wur_expstate,
+ ICLASS_LSI,
+ ICLASS_LSIP,
+ ICLASS_LSX,
+ ICLASS_LSXP,
+ ICLASS_SSI,
+ ICLASS_SSIP,
+ ICLASS_SSX,
+ ICLASS_SSXP,
+ ICLASS_LDI,
+ ICLASS_LDIP,
+ ICLASS_LDX,
+ ICLASS_LDXP,
+ ICLASS_SDI,
+ ICLASS_SDIP,
+ ICLASS_SDX,
+ ICLASS_SDXP,
+ ICLASS_ABS_S,
+ ICLASS_NEG_S,
+ ICLASS_ABS_D,
+ ICLASS_NEG_D,
+ ICLASS_MOV_S,
+ ICLASS_MOV_D,
+ ICLASS_MOVEQZ_S,
+ ICLASS_MOVNEZ_S,
+ ICLASS_MOVLTZ_S,
+ ICLASS_MOVGEZ_S,
+ ICLASS_MOVF_S,
+ ICLASS_MOVT_S,
+ ICLASS_WFR,
+ ICLASS_RFR,
+ ICLASS_RFRD,
+ ICLASS_WFRD,
+ ICLASS_ROUND_S,
+ ICLASS_ROUND_D,
+ ICLASS_CEIL_S,
+ ICLASS_CEIL_D,
+ ICLASS_FLOOR_S,
+ ICLASS_FLOOR_D,
+ ICLASS_TRUNC_S,
+ ICLASS_TRUNC_D,
+ ICLASS_UTRUNC_S,
+ ICLASS_UTRUNC_D,
+ ICLASS_FLOAT_S,
+ ICLASS_FLOAT_D,
+ ICLASS_UFLOAT_S,
+ ICLASS_UFLOAT_D,
+ ICLASS_CVTD_S,
+ ICLASS_CVTS_D,
+ ICLASS_UN_S,
+ ICLASS_UN_D,
+ ICLASS_ULT_S,
+ ICLASS_ULT_D,
+ ICLASS_ULE_S,
+ ICLASS_ULE_D,
+ ICLASS_UEQ_S,
+ ICLASS_UEQ_D,
+ ICLASS_OLT_S,
+ ICLASS_OLT_D,
+ ICLASS_OLE_S,
+ ICLASS_OLE_D,
+ ICLASS_OEQ_S,
+ ICLASS_OEQ_D,
+ ICLASS_ADD_S,
+ ICLASS_ADD_D,
+ ICLASS_SUB_S,
+ ICLASS_SUB_D,
+ ICLASS_MUL_S,
+ ICLASS_MUL_D,
+ ICLASS_MADD_S,
+ ICLASS_MADD_D,
+ ICLASS_MSUB_S,
+ ICLASS_MSUB_D,
+ ICLASS_SQRT0_S,
+ ICLASS_SQRT0_D,
+ ICLASS_DIV0_S,
+ ICLASS_DIV0_D,
+ ICLASS_RECIP0_S,
+ ICLASS_RECIP0_D,
+ ICLASS_RSQRT0_S,
+ ICLASS_RSQRT0_D,
+ ICLASS_MADDN_S,
+ ICLASS_MADDN_D,
+ ICLASS_DIVN_S,
+ ICLASS_DIVN_D,
+ ICLASS_CONST_S,
+ ICLASS_CONST_D,
+ ICLASS_NEXP01_S,
+ ICLASS_NEXP01_D,
+ ICLASS_ADDEXP_S,
+ ICLASS_ADDEXP_D,
+ ICLASS_ADDEXPM_S,
+ ICLASS_ADDEXPM_D,
+ ICLASS_MKDADJ_S,
+ ICLASS_MKDADJ_D,
+ ICLASS_MKSADJ_S,
+ ICLASS_MKSADJ_D
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_rur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e70;
+}
+
+static void
+Opcode_wur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e700;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8076;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa076;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9076;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s32nb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590000;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30100;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130100;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610100;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130200;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610200;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130000;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36100;
+}
+
+static void
+Opcode_wsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136100;
+}
+
+static void
+Opcode_xsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616100;
+}
+
+static void
+Opcode_rsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_wsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b000;
+}
+
+static void
+Opcode_rsr_configid1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b300;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b300;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b300;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d300;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d300;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d300;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b400;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b400;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b400;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d400;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d400;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b500;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b500;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b500;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d500;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d500;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b600;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b600;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b600;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d600;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d600;
+}
+
+static void
+Opcode_rsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b700;
+}
+
+static void
+Opcode_wsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b700;
+}
+
+static void
+Opcode_xsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b700;
+}
+
+static void
+Opcode_rsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_wsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d700;
+}
+
+static void
+Opcode_xsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d700;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c300;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c300;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c300;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c400;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c400;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c400;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c500;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c500;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c500;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c600;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c600;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c600;
+}
+
+static void
+Opcode_rsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c700;
+}
+
+static void
+Opcode_wsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c700;
+}
+
+static void
+Opcode_xsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c700;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f400;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f400;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f400;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f500;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f500;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f500;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_salt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x720000;
+}
+
+static void
+Opcode_saltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x620000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820000;
+}
+
+static void
+Opcode_mul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x770004;
+}
+
+static void
+Opcode_mul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x750004;
+}
+
+static void
+Opcode_mul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x760004;
+}
+
+static void
+Opcode_mul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x740004;
+}
+
+static void
+Opcode_umul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730004;
+}
+
+static void
+Opcode_umul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x710004;
+}
+
+static void
+Opcode_umul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x720004;
+}
+
+static void
+Opcode_umul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700004;
+}
+
+static void
+Opcode_mul_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370004;
+}
+
+static void
+Opcode_mul_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x350004;
+}
+
+static void
+Opcode_mul_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x360004;
+}
+
+static void
+Opcode_mul_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340004;
+}
+
+static void
+Opcode_mul_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x670004;
+}
+
+static void
+Opcode_mul_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x650004;
+}
+
+static void
+Opcode_mul_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x660004;
+}
+
+static void
+Opcode_mul_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x640004;
+}
+
+static void
+Opcode_mul_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270004;
+}
+
+static void
+Opcode_mul_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x250004;
+}
+
+static void
+Opcode_mul_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x260004;
+}
+
+static void
+Opcode_mul_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240004;
+}
+
+static void
+Opcode_mula_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b0004;
+}
+
+static void
+Opcode_mula_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x790004;
+}
+
+static void
+Opcode_mula_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7a0004;
+}
+
+static void
+Opcode_mula_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780004;
+}
+
+static void
+Opcode_muls_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f0004;
+}
+
+static void
+Opcode_muls_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7d0004;
+}
+
+static void
+Opcode_muls_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7e0004;
+}
+
+static void
+Opcode_muls_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c0004;
+}
+
+static void
+Opcode_mula_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b0004;
+}
+
+static void
+Opcode_mula_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x390004;
+}
+
+static void
+Opcode_mula_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a0004;
+}
+
+static void
+Opcode_mula_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380004;
+}
+
+static void
+Opcode_muls_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f0004;
+}
+
+static void
+Opcode_muls_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d0004;
+}
+
+static void
+Opcode_muls_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e0004;
+}
+
+static void
+Opcode_muls_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0004;
+}
+
+static void
+Opcode_mula_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0004;
+}
+
+static void
+Opcode_mula_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x690004;
+}
+
+static void
+Opcode_mula_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0004;
+}
+
+static void
+Opcode_mula_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680004;
+}
+
+static void
+Opcode_muls_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0004;
+}
+
+static void
+Opcode_muls_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0004;
+}
+
+static void
+Opcode_muls_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0004;
+}
+
+static void
+Opcode_muls_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0004;
+}
+
+static void
+Opcode_mula_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b0004;
+}
+
+static void
+Opcode_mula_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x290004;
+}
+
+static void
+Opcode_mula_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a0004;
+}
+
+static void
+Opcode_mula_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280004;
+}
+
+static void
+Opcode_muls_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0004;
+}
+
+static void
+Opcode_muls_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0004;
+}
+
+static void
+Opcode_muls_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0004;
+}
+
+static void
+Opcode_muls_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0004;
+}
+
+static void
+Opcode_mula_da_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b0004;
+}
+
+static void
+Opcode_mula_da_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0004;
+}
+
+static void
+Opcode_mula_da_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590004;
+}
+
+static void
+Opcode_mula_da_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490004;
+}
+
+static void
+Opcode_mula_da_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a0004;
+}
+
+static void
+Opcode_mula_da_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0004;
+}
+
+static void
+Opcode_mula_da_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580004;
+}
+
+static void
+Opcode_mula_da_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480004;
+}
+
+static void
+Opcode_mula_dd_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0004;
+}
+
+static void
+Opcode_mula_dd_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb0004;
+}
+
+static void
+Opcode_mula_dd_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x190004;
+}
+
+static void
+Opcode_mula_dd_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90004;
+}
+
+static void
+Opcode_mula_dd_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0004;
+}
+
+static void
+Opcode_mula_dd_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0004;
+}
+
+static void
+Opcode_mula_dd_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180004;
+}
+
+static void
+Opcode_mula_dd_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80004;
+}
+
+static void
+Opcode_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900004;
+}
+
+static void
+Opcode_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800004;
+}
+
+static void
+Opcode_rsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32000;
+}
+
+static void
+Opcode_wsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132000;
+}
+
+static void
+Opcode_xsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612000;
+}
+
+static void
+Opcode_rsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32100;
+}
+
+static void
+Opcode_wsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132100;
+}
+
+static void
+Opcode_xsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612100;
+}
+
+static void
+Opcode_rsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32200;
+}
+
+static void
+Opcode_wsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132200;
+}
+
+static void
+Opcode_xsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612200;
+}
+
+static void
+Opcode_rsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32300;
+}
+
+static void
+Opcode_wsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132300;
+}
+
+static void
+Opcode_xsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x612300;
+}
+
+static void
+Opcode_rsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31000;
+}
+
+static void
+Opcode_wsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131000;
+}
+
+static void
+Opcode_xsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611000;
+}
+
+static void
+Opcode_rsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31100;
+}
+
+static void
+Opcode_wsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131100;
+}
+
+static void
+Opcode_xsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x611100;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39000;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139000;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619000;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a000;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a000;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a000;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39100;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139100;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619100;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a100;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a100;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a100;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138000;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618000;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138100;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618100;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136000;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616000;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_lddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e0;
+}
+
+static void
+Opcode_sddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f0;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135900;
+}
+
+static void
+Opcode_andb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_andbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x120000;
+}
+
+static void
+Opcode_orb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x220000;
+}
+
+static void
+Opcode_orbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x320000;
+}
+
+static void
+Opcode_xorb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x420000;
+}
+
+static void
+Opcode_all4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9000;
+}
+
+static void
+Opcode_any4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_all8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_any8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa000;
+}
+
+static void
+Opcode_bf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x76;
+}
+
+static void
+Opcode_bt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1076;
+}
+
+static void
+Opcode_movf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc30000;
+}
+
+static void
+Opcode_movt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd30000;
+}
+
+static void
+Opcode_rsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30400;
+}
+
+static void
+Opcode_wsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130400;
+}
+
+static void
+Opcode_xsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610400;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f200;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f200;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f200;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e2;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70c2;
+}
+
+static void
+Opcode_ihu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270d2;
+}
+
+static void
+Opcode_iiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370d2;
+}
+
+static void
+Opcode_ipfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70d2;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f2;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf10000;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf12000;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11000;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13000;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7042;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7052;
+}
+
+static void
+Opcode_diwbui_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf7082;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47082;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x57082;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7062;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7072;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7002;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7022;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7012;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7032;
+}
+
+static void
+Opcode_dhu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x27082;
+}
+
+static void
+Opcode_diu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x37082;
+}
+
+static void
+Opcode_dpfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7082;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf19000;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf18000;
+}
+
+static void
+Opcode_sdcw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1b000;
+}
+
+static void
+Opcode_ldcw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1a000;
+}
+
+static void
+Opcode_wsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135300;
+}
+
+static void
+Opcode_rsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35300;
+}
+
+static void
+Opcode_xsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615300;
+}
+
+static void
+Opcode_rsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35a00;
+}
+
+static void
+Opcode_wsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135a00;
+}
+
+static void
+Opcode_xsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615a00;
+}
+
+static void
+Opcode_rsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35b00;
+}
+
+static void
+Opcode_wsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135b00;
+}
+
+static void
+Opcode_xsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615b00;
+}
+
+static void
+Opcode_rsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35c00;
+}
+
+static void
+Opcode_wsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135c00;
+}
+
+static void
+Opcode_xsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615c00;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_ldpte_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1f000;
+}
+
+static void
+Opcode_hwwitlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501000;
+}
+
+static void
+Opcode_hwwdtlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x509000;
+}
+
+static void
+Opcode_rsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e000;
+}
+
+static void
+Opcode_wsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e000;
+}
+
+static void
+Opcode_xsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e000;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36300;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136300;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616300;
+}
+
+static void
+Opcode_quos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20000;
+}
+
+static void
+Opcode_quou_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc20000;
+}
+
+static void
+Opcode_rems_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf20000;
+}
+
+static void
+Opcode_remu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe20000;
+}
+
+static void
+Opcode_rsr_eraccess_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35f00;
+}
+
+static void
+Opcode_wsr_eraccess_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135f00;
+}
+
+static void
+Opcode_xsr_eraccess_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615f00;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406000;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407000;
+}
+
+static void
+Opcode_rur_fcr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e80;
+}
+
+static void
+Opcode_wur_fcr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e800;
+}
+
+static void
+Opcode_rur_fsr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e90;
+}
+
+static void
+Opcode_wur_fsr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e900;
+}
+
+static void
+Opcode_read_impwire_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0000;
+}
+
+static void
+Opcode_setb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1000;
+}
+
+static void
+Opcode_clrb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1200;
+}
+
+static void
+Opcode_wrmsk_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe2000;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e60;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e600;
+}
+
+static void
+Opcode_lsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_lsip_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8003;
+}
+
+static void
+Opcode_lsx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80000;
+}
+
+static void
+Opcode_lsxp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180000;
+}
+
+static void
+Opcode_ssi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_ssip_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc003;
+}
+
+static void
+Opcode_ssx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480000;
+}
+
+static void
+Opcode_ssxp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580000;
+}
+
+static void
+Opcode_ldi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1003;
+}
+
+static void
+Opcode_ldip_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9003;
+}
+
+static void
+Opcode_ldx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280000;
+}
+
+static void
+Opcode_ldxp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380000;
+}
+
+static void
+Opcode_sdi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5003;
+}
+
+static void
+Opcode_sdip_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd003;
+}
+
+static void
+Opcode_sdx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680000;
+}
+
+static void
+Opcode_sdxp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780000;
+}
+
+static void
+Opcode_abs_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0010;
+}
+
+static void
+Opcode_neg_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0060;
+}
+
+static void
+Opcode_abs_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0010;
+}
+
+static void
+Opcode_neg_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0060;
+}
+
+static void
+Opcode_mov_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0000;
+}
+
+static void
+Opcode_mov_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0000;
+}
+
+static void
+Opcode_moveqz_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8b0000;
+}
+
+static void
+Opcode_movnez_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9b0000;
+}
+
+static void
+Opcode_movltz_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xab0000;
+}
+
+static void
+Opcode_movgez_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbb0000;
+}
+
+static void
+Opcode_movf_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcb0000;
+}
+
+static void
+Opcode_movt_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdb0000;
+}
+
+static void
+Opcode_wfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0050;
+}
+
+static void
+Opcode_rfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0040;
+}
+
+static void
+Opcode_rfrd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0040;
+}
+
+static void
+Opcode_wfrd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8e0000;
+}
+
+static void
+Opcode_round_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8a0000;
+}
+
+static void
+Opcode_round_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8f0000;
+}
+
+static void
+Opcode_ceil_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xba0000;
+}
+
+static void
+Opcode_ceil_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbf0000;
+}
+
+static void
+Opcode_floor_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaa0000;
+}
+
+static void
+Opcode_floor_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaf0000;
+}
+
+static void
+Opcode_trunc_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9a0000;
+}
+
+static void
+Opcode_trunc_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9f0000;
+}
+
+static void
+Opcode_utrunc_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea0000;
+}
+
+static void
+Opcode_utrunc_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xef0000;
+}
+
+static void
+Opcode_float_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca0000;
+}
+
+static void
+Opcode_float_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcf0000;
+}
+
+static void
+Opcode_ufloat_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xda0000;
+}
+
+static void
+Opcode_ufloat_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdf0000;
+}
+
+static void
+Opcode_cvtd_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0020;
+}
+
+static void
+Opcode_cvts_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0020;
+}
+
+static void
+Opcode_un_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0000;
+}
+
+static void
+Opcode_un_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e0000;
+}
+
+static void
+Opcode_ult_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b0000;
+}
+
+static void
+Opcode_ult_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5e0000;
+}
+
+static void
+Opcode_ule_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b0000;
+}
+
+static void
+Opcode_ule_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7e0000;
+}
+
+static void
+Opcode_ueq_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b0000;
+}
+
+static void
+Opcode_ueq_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e0000;
+}
+
+static void
+Opcode_olt_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0000;
+}
+
+static void
+Opcode_olt_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e0000;
+}
+
+static void
+Opcode_ole_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0000;
+}
+
+static void
+Opcode_ole_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0000;
+}
+
+static void
+Opcode_oeq_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b0000;
+}
+
+static void
+Opcode_oeq_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0000;
+}
+
+static void
+Opcode_add_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0000;
+}
+
+static void
+Opcode_add_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0000;
+}
+
+static void
+Opcode_sub_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0000;
+}
+
+static void
+Opcode_sub_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f0000;
+}
+
+static void
+Opcode_mul_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a0000;
+}
+
+static void
+Opcode_mul_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0000;
+}
+
+static void
+Opcode_madd_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0000;
+}
+
+static void
+Opcode_madd_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f0000;
+}
+
+static void
+Opcode_msub_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a0000;
+}
+
+static void
+Opcode_msub_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5f0000;
+}
+
+static void
+Opcode_sqrt0_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0090;
+}
+
+static void
+Opcode_sqrt0_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0090;
+}
+
+static void
+Opcode_div0_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0070;
+}
+
+static void
+Opcode_div0_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0070;
+}
+
+static void
+Opcode_recip0_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0080;
+}
+
+static void
+Opcode_recip0_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0080;
+}
+
+static void
+Opcode_rsqrt0_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa00a0;
+}
+
+static void
+Opcode_rsqrt0_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff00a0;
+}
+
+static void
+Opcode_maddn_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0000;
+}
+
+static void
+Opcode_maddn_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0000;
+}
+
+static void
+Opcode_divn_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7a0000;
+}
+
+static void
+Opcode_divn_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f0000;
+}
+
+static void
+Opcode_const_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0030;
+}
+
+static void
+Opcode_const_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff0030;
+}
+
+static void
+Opcode_nexp01_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa00b0;
+}
+
+static void
+Opcode_nexp01_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff00b0;
+}
+
+static void
+Opcode_addexp_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa00e0;
+}
+
+static void
+Opcode_addexp_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff00e0;
+}
+
+static void
+Opcode_addexpm_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa00f0;
+}
+
+static void
+Opcode_addexpm_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff00f0;
+}
+
+static void
+Opcode_mkdadj_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa00d0;
+}
+
+static void
+Opcode_mkdadj_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff00d0;
+}
+
+static void
+Opcode_mksadj_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa00c0;
+}
+
+static void
+Opcode_mksadj_d_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xff00c0;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_threadptr_encode_fns[] = {
+ Opcode_rur_threadptr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_threadptr_encode_fns[] = {
+ Opcode_wur_threadptr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32nb_encode_fns[] = {
+ Opcode_s32nb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_memctl_encode_fns[] = {
+ Opcode_rsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_memctl_encode_fns[] = {
+ Opcode_wsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_memctl_encode_fns[] = {
+ Opcode_xsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid0_encode_fns[] = {
+ Opcode_rsr_configid0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_configid0_encode_fns[] = {
+ Opcode_wsr_configid0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid1_encode_fns[] = {
+ Opcode_rsr_configid1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc7_encode_fns[] = {
+ Opcode_rsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc7_encode_fns[] = {
+ Opcode_wsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc7_encode_fns[] = {
+ Opcode_xsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave7_encode_fns[] = {
+ Opcode_rsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave7_encode_fns[] = {
+ Opcode_wsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave7_encode_fns[] = {
+ Opcode_xsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps7_encode_fns[] = {
+ Opcode_rsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps7_encode_fns[] = {
+ Opcode_wsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps7_encode_fns[] = {
+ Opcode_xsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_salt_encode_fns[] = {
+ Opcode_salt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_saltu_encode_fns[] = {
+ Opcode_saltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hh_encode_fns[] = {
+ Opcode_mul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hl_encode_fns[] = {
+ Opcode_mul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_lh_encode_fns[] = {
+ Opcode_mul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_ll_encode_fns[] = {
+ Opcode_mul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hh_encode_fns[] = {
+ Opcode_umul_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hl_encode_fns[] = {
+ Opcode_umul_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_lh_encode_fns[] = {
+ Opcode_umul_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_ll_encode_fns[] = {
+ Opcode_umul_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hh_encode_fns[] = {
+ Opcode_mul_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hl_encode_fns[] = {
+ Opcode_mul_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_lh_encode_fns[] = {
+ Opcode_mul_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_ll_encode_fns[] = {
+ Opcode_mul_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hh_encode_fns[] = {
+ Opcode_mul_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hl_encode_fns[] = {
+ Opcode_mul_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_lh_encode_fns[] = {
+ Opcode_mul_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_ll_encode_fns[] = {
+ Opcode_mul_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hh_encode_fns[] = {
+ Opcode_mul_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hl_encode_fns[] = {
+ Opcode_mul_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_lh_encode_fns[] = {
+ Opcode_mul_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_ll_encode_fns[] = {
+ Opcode_mul_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hh_encode_fns[] = {
+ Opcode_mula_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hl_encode_fns[] = {
+ Opcode_mula_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_lh_encode_fns[] = {
+ Opcode_mula_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_ll_encode_fns[] = {
+ Opcode_mula_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hh_encode_fns[] = {
+ Opcode_muls_aa_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hl_encode_fns[] = {
+ Opcode_muls_aa_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_lh_encode_fns[] = {
+ Opcode_muls_aa_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_ll_encode_fns[] = {
+ Opcode_muls_aa_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hh_encode_fns[] = {
+ Opcode_mula_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hl_encode_fns[] = {
+ Opcode_mula_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_lh_encode_fns[] = {
+ Opcode_mula_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_ll_encode_fns[] = {
+ Opcode_mula_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hh_encode_fns[] = {
+ Opcode_muls_ad_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hl_encode_fns[] = {
+ Opcode_muls_ad_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_lh_encode_fns[] = {
+ Opcode_muls_ad_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_ll_encode_fns[] = {
+ Opcode_muls_ad_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_encode_fns[] = {
+ Opcode_mula_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_encode_fns[] = {
+ Opcode_mula_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_encode_fns[] = {
+ Opcode_mula_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_encode_fns[] = {
+ Opcode_mula_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hh_encode_fns[] = {
+ Opcode_muls_da_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hl_encode_fns[] = {
+ Opcode_muls_da_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_lh_encode_fns[] = {
+ Opcode_muls_da_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_ll_encode_fns[] = {
+ Opcode_muls_da_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_encode_fns[] = {
+ Opcode_mula_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_encode_fns[] = {
+ Opcode_mula_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_encode_fns[] = {
+ Opcode_mula_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_encode_fns[] = {
+ Opcode_mula_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hh_encode_fns[] = {
+ Opcode_muls_dd_hh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hl_encode_fns[] = {
+ Opcode_muls_dd_hl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_lh_encode_fns[] = {
+ Opcode_muls_dd_lh_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_ll_encode_fns[] = {
+ Opcode_muls_dd_ll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_lddec_encode_fns[] = {
+ Opcode_mula_da_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_ldinc_encode_fns[] = {
+ Opcode_mula_da_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_lddec_encode_fns[] = {
+ Opcode_mula_da_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_ldinc_encode_fns[] = {
+ Opcode_mula_da_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_lddec_encode_fns[] = {
+ Opcode_mula_da_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_ldinc_encode_fns[] = {
+ Opcode_mula_da_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_lddec_encode_fns[] = {
+ Opcode_mula_da_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_ldinc_encode_fns[] = {
+ Opcode_mula_da_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_lddec_encode_fns[] = {
+ Opcode_mula_dd_hh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_lddec_encode_fns[] = {
+ Opcode_mula_dd_hl_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hl_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_lddec_encode_fns[] = {
+ Opcode_mula_dd_lh_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_lh_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_lddec_encode_fns[] = {
+ Opcode_mula_dd_ll_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_ldinc_encode_fns[] = {
+ Opcode_mula_dd_ll_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddec_encode_fns[] = {
+ Opcode_lddec_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldinc_encode_fns[] = {
+ Opcode_ldinc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m0_encode_fns[] = {
+ Opcode_rsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m0_encode_fns[] = {
+ Opcode_wsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m0_encode_fns[] = {
+ Opcode_xsr_m0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m1_encode_fns[] = {
+ Opcode_rsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m1_encode_fns[] = {
+ Opcode_wsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m1_encode_fns[] = {
+ Opcode_xsr_m1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m2_encode_fns[] = {
+ Opcode_rsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m2_encode_fns[] = {
+ Opcode_wsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m2_encode_fns[] = {
+ Opcode_xsr_m2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m3_encode_fns[] = {
+ Opcode_rsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m3_encode_fns[] = {
+ Opcode_wsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m3_encode_fns[] = {
+ Opcode_xsr_m3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acclo_encode_fns[] = {
+ Opcode_rsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acclo_encode_fns[] = {
+ Opcode_wsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acclo_encode_fns[] = {
+ Opcode_xsr_acclo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acchi_encode_fns[] = {
+ Opcode_rsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acchi_encode_fns[] = {
+ Opcode_wsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acchi_encode_fns[] = {
+ Opcode_xsr_acchi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddr32_p_encode_fns[] = {
+ Opcode_lddr32_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sddr32_p_encode_fns[] = {
+ Opcode_sddr32_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_andb_encode_fns[] = {
+ Opcode_andb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_andbc_encode_fns[] = {
+ Opcode_andbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_orb_encode_fns[] = {
+ Opcode_orb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_orbc_encode_fns[] = {
+ Opcode_orbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xorb_encode_fns[] = {
+ Opcode_xorb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_all4_encode_fns[] = {
+ Opcode_all4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_any4_encode_fns[] = {
+ Opcode_any4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_all8_encode_fns[] = {
+ Opcode_all8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_any8_encode_fns[] = {
+ Opcode_any8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bf_encode_fns[] = {
+ Opcode_bf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bt_encode_fns[] = {
+ Opcode_bt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movf_encode_fns[] = {
+ Opcode_movf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movt_encode_fns[] = {
+ Opcode_movt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_br_encode_fns[] = {
+ Opcode_rsr_br_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_br_encode_fns[] = {
+ Opcode_wsr_br_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_br_encode_fns[] = {
+ Opcode_xsr_br_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihu_encode_fns[] = {
+ Opcode_ihu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iiu_encode_fns[] = {
+ Opcode_iiu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipfl_encode_fns[] = {
+ Opcode_ipfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbui_p_encode_fns[] = {
+ Opcode_diwbui_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhu_encode_fns[] = {
+ Opcode_dhu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diu_encode_fns[] = {
+ Opcode_diu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfl_encode_fns[] = {
+ Opcode_dpfl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdcw_encode_fns[] = {
+ Opcode_sdcw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldcw_encode_fns[] = {
+ Opcode_ldcw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ptevaddr_encode_fns[] = {
+ Opcode_wsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ptevaddr_encode_fns[] = {
+ Opcode_rsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ptevaddr_encode_fns[] = {
+ Opcode_xsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_rasid_encode_fns[] = {
+ Opcode_rsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_rasid_encode_fns[] = {
+ Opcode_wsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_rasid_encode_fns[] = {
+ Opcode_xsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_itlbcfg_encode_fns[] = {
+ Opcode_rsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_itlbcfg_encode_fns[] = {
+ Opcode_wsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_itlbcfg_encode_fns[] = {
+ Opcode_xsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dtlbcfg_encode_fns[] = {
+ Opcode_rsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dtlbcfg_encode_fns[] = {
+ Opcode_wsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dtlbcfg_encode_fns[] = {
+ Opcode_xsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldpte_encode_fns[] = {
+ Opcode_ldpte_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwitlba_encode_fns[] = {
+ Opcode_hwwitlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwdtlba_encode_fns[] = {
+ Opcode_hwwdtlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_cpenable_encode_fns[] = {
+ Opcode_rsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_cpenable_encode_fns[] = {
+ Opcode_wsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_cpenable_encode_fns[] = {
+ Opcode_xsr_cpenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quos_encode_fns[] = {
+ Opcode_quos_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quou_encode_fns[] = {
+ Opcode_quou_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rems_encode_fns[] = {
+ Opcode_rems_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_remu_encode_fns[] = {
+ Opcode_remu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eraccess_encode_fns[] = {
+ Opcode_rsr_eraccess_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eraccess_encode_fns[] = {
+ Opcode_wsr_eraccess_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eraccess_encode_fns[] = {
+ Opcode_xsr_eraccess_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_fcr_encode_fns[] = {
+ Opcode_rur_fcr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_fcr_encode_fns[] = {
+ Opcode_wur_fcr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_fsr_encode_fns[] = {
+ Opcode_rur_fsr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_fsr_encode_fns[] = {
+ Opcode_wur_fsr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_read_impwire_encode_fns[] = {
+ Opcode_read_impwire_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_setb_expstate_encode_fns[] = {
+ Opcode_setb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrb_expstate_encode_fns[] = {
+ Opcode_clrb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrmsk_expstate_encode_fns[] = {
+ Opcode_wrmsk_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsi_encode_fns[] = {
+ Opcode_lsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsip_encode_fns[] = {
+ Opcode_lsip_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsx_encode_fns[] = {
+ Opcode_lsx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsxp_encode_fns[] = {
+ Opcode_lsxp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssi_encode_fns[] = {
+ Opcode_ssi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssip_encode_fns[] = {
+ Opcode_ssip_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssx_encode_fns[] = {
+ Opcode_ssx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssxp_encode_fns[] = {
+ Opcode_ssxp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldi_encode_fns[] = {
+ Opcode_ldi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldip_encode_fns[] = {
+ Opcode_ldip_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldx_encode_fns[] = {
+ Opcode_ldx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldxp_encode_fns[] = {
+ Opcode_ldxp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdi_encode_fns[] = {
+ Opcode_sdi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdip_encode_fns[] = {
+ Opcode_sdip_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdx_encode_fns[] = {
+ Opcode_sdx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdxp_encode_fns[] = {
+ Opcode_sdxp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_s_encode_fns[] = {
+ Opcode_abs_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_s_encode_fns[] = {
+ Opcode_neg_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_d_encode_fns[] = {
+ Opcode_abs_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_d_encode_fns[] = {
+ Opcode_neg_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_s_encode_fns[] = {
+ Opcode_mov_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_d_encode_fns[] = {
+ Opcode_mov_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_s_encode_fns[] = {
+ Opcode_moveqz_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_s_encode_fns[] = {
+ Opcode_movnez_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_s_encode_fns[] = {
+ Opcode_movltz_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_s_encode_fns[] = {
+ Opcode_movgez_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movf_s_encode_fns[] = {
+ Opcode_movf_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movt_s_encode_fns[] = {
+ Opcode_movt_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wfr_encode_fns[] = {
+ Opcode_wfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfr_encode_fns[] = {
+ Opcode_rfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfrd_encode_fns[] = {
+ Opcode_rfrd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wfrd_encode_fns[] = {
+ Opcode_wfrd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_round_s_encode_fns[] = {
+ Opcode_round_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_round_d_encode_fns[] = {
+ Opcode_round_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ceil_s_encode_fns[] = {
+ Opcode_ceil_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ceil_d_encode_fns[] = {
+ Opcode_ceil_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_floor_s_encode_fns[] = {
+ Opcode_floor_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_floor_d_encode_fns[] = {
+ Opcode_floor_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_trunc_s_encode_fns[] = {
+ Opcode_trunc_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_trunc_d_encode_fns[] = {
+ Opcode_trunc_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_utrunc_s_encode_fns[] = {
+ Opcode_utrunc_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_utrunc_d_encode_fns[] = {
+ Opcode_utrunc_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_float_s_encode_fns[] = {
+ Opcode_float_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_float_d_encode_fns[] = {
+ Opcode_float_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ufloat_s_encode_fns[] = {
+ Opcode_ufloat_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ufloat_d_encode_fns[] = {
+ Opcode_ufloat_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cvtd_s_encode_fns[] = {
+ Opcode_cvtd_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cvts_d_encode_fns[] = {
+ Opcode_cvts_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_un_s_encode_fns[] = {
+ Opcode_un_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_un_d_encode_fns[] = {
+ Opcode_un_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ult_s_encode_fns[] = {
+ Opcode_ult_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ult_d_encode_fns[] = {
+ Opcode_ult_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ule_s_encode_fns[] = {
+ Opcode_ule_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ule_d_encode_fns[] = {
+ Opcode_ule_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ueq_s_encode_fns[] = {
+ Opcode_ueq_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ueq_d_encode_fns[] = {
+ Opcode_ueq_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_olt_s_encode_fns[] = {
+ Opcode_olt_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_olt_d_encode_fns[] = {
+ Opcode_olt_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ole_s_encode_fns[] = {
+ Opcode_ole_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ole_d_encode_fns[] = {
+ Opcode_ole_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_oeq_s_encode_fns[] = {
+ Opcode_oeq_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_oeq_d_encode_fns[] = {
+ Opcode_oeq_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_s_encode_fns[] = {
+ Opcode_add_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_d_encode_fns[] = {
+ Opcode_add_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_s_encode_fns[] = {
+ Opcode_sub_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_d_encode_fns[] = {
+ Opcode_sub_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_s_encode_fns[] = {
+ Opcode_mul_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_d_encode_fns[] = {
+ Opcode_mul_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_madd_s_encode_fns[] = {
+ Opcode_madd_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_madd_d_encode_fns[] = {
+ Opcode_madd_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_msub_s_encode_fns[] = {
+ Opcode_msub_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_msub_d_encode_fns[] = {
+ Opcode_msub_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sqrt0_s_encode_fns[] = {
+ Opcode_sqrt0_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sqrt0_d_encode_fns[] = {
+ Opcode_sqrt0_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_div0_s_encode_fns[] = {
+ Opcode_div0_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_div0_d_encode_fns[] = {
+ Opcode_div0_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_recip0_s_encode_fns[] = {
+ Opcode_recip0_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_recip0_d_encode_fns[] = {
+ Opcode_recip0_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsqrt0_s_encode_fns[] = {
+ Opcode_rsqrt0_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsqrt0_d_encode_fns[] = {
+ Opcode_rsqrt0_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maddn_s_encode_fns[] = {
+ Opcode_maddn_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maddn_d_encode_fns[] = {
+ Opcode_maddn_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_divn_s_encode_fns[] = {
+ Opcode_divn_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_divn_d_encode_fns[] = {
+ Opcode_divn_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_const_s_encode_fns[] = {
+ Opcode_const_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_const_d_encode_fns[] = {
+ Opcode_const_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nexp01_s_encode_fns[] = {
+ Opcode_nexp01_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nexp01_d_encode_fns[] = {
+ Opcode_nexp01_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addexp_s_encode_fns[] = {
+ Opcode_addexp_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addexp_d_encode_fns[] = {
+ Opcode_addexp_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addexpm_s_encode_fns[] = {
+ Opcode_addexpm_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addexpm_d_encode_fns[] = {
+ Opcode_addexpm_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mkdadj_s_encode_fns[] = {
+ Opcode_mkdadj_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mkdadj_d_encode_fns[] = {
+ Opcode_mkdadj_d_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mksadj_s_encode_fns[] = {
+ Opcode_mksadj_s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mksadj_d_encode_fns[] = {
+ Opcode_mksadj_d_Slot_inst_encode, 0, 0
+};
+
+
+
+
+
+/* Opcode table. */
+
+static xtensa_funcUnit_use Opcode_l32e_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s32e_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l32i_n_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s32i_n_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l16ui_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l16si_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l32i_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l32r_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l8ui_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s16i_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s32i_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s32nb_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s8i_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_hh_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_hh_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_hl_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_hl_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_lh_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_lh_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_ll_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_da_ll_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_hh_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_hh_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_hl_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_hl_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_lh_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_lh_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_ll_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_mula_dd_ll_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lddec_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldinc_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lddr32_p_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sddr32_p_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lict_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_licw_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sict_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sicw_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sdct_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldct_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sdcw_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldcw_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldpte_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_l32ai_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s32ri_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_s32c1i_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lsi_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lsip_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lsx_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_lsxp_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ssi_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ssip_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ssx_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ssxp_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldi_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldip_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldx_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_ldxp_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sdi_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sdip_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sdx_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_funcUnit_use Opcode_sdxp_funcUnit_uses[] = {
+ { FUNCUNIT_XT_LOADSTORE_UNIT, 0 }
+};
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 1, Opcode_l32e_funcUnit_uses },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 1, Opcode_s32e_funcUnit_uses },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 1, Opcode_l32i_n_funcUnit_uses },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 1, Opcode_s32i_n_funcUnit_uses },
+ { "rur.threadptr", ICLASS_rur_threadptr,
+ 0,
+ Opcode_rur_threadptr_encode_fns, 0, 0 },
+ { "wur.threadptr", ICLASS_wur_threadptr,
+ 0,
+ Opcode_wur_threadptr_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 1, Opcode_l16ui_funcUnit_uses },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 1, Opcode_l16si_funcUnit_uses },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 1, Opcode_l32i_funcUnit_uses },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 1, Opcode_l32r_funcUnit_uses },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 1, Opcode_l8ui_funcUnit_uses },
+ { "loop", ICLASS_xt_iclass_loop,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopgtz", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "loopnez", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 1, Opcode_s16i_funcUnit_uses },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 1, Opcode_s32i_funcUnit_uses },
+ { "s32nb", ICLASS_xt_iclass_s32nb,
+ 0,
+ Opcode_s32nb_encode_fns, 1, Opcode_s32nb_funcUnit_uses },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 1, Opcode_s8i_funcUnit_uses },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", ICLASS_xt_iclass_rsr_lend,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", ICLASS_xt_iclass_wsr_lend,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", ICLASS_xt_iclass_xsr_lend,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", ICLASS_xt_iclass_rsr_lcount,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", ICLASS_xt_iclass_wsr_lcount,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", ICLASS_xt_iclass_xsr_lcount,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", ICLASS_xt_iclass_rsr_lbeg,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", ICLASS_xt_iclass_wsr_lbeg,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", ICLASS_xt_iclass_xsr_lbeg,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.memctl", ICLASS_xt_iclass_rsr_memctl,
+ 0,
+ Opcode_rsr_memctl_encode_fns, 0, 0 },
+ { "wsr.memctl", ICLASS_xt_iclass_wsr_memctl,
+ 0,
+ Opcode_wsr_memctl_encode_fns, 0, 0 },
+ { "xsr.memctl", ICLASS_xt_iclass_xsr_memctl,
+ 0,
+ Opcode_xsr_memctl_encode_fns, 0, 0 },
+ { "rsr.configid0", ICLASS_xt_iclass_rsr_configid0,
+ 0,
+ Opcode_rsr_configid0_encode_fns, 0, 0 },
+ { "wsr.configid0", ICLASS_xt_iclass_wsr_configid0,
+ 0,
+ Opcode_wsr_configid0_encode_fns, 0, 0 },
+ { "rsr.configid1", ICLASS_xt_iclass_rsr_configid1,
+ 0,
+ Opcode_rsr_configid1_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", ICLASS_xt_iclass_rsr_epc4,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", ICLASS_xt_iclass_wsr_epc4,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", ICLASS_xt_iclass_xsr_epc4,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", ICLASS_xt_iclass_rsr_excsave4,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", ICLASS_xt_iclass_wsr_excsave4,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", ICLASS_xt_iclass_xsr_excsave4,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", ICLASS_xt_iclass_rsr_epc5,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", ICLASS_xt_iclass_wsr_epc5,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", ICLASS_xt_iclass_xsr_epc5,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", ICLASS_xt_iclass_rsr_excsave5,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", ICLASS_xt_iclass_wsr_excsave5,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", ICLASS_xt_iclass_xsr_excsave5,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", ICLASS_xt_iclass_rsr_epc6,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", ICLASS_xt_iclass_wsr_epc6,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", ICLASS_xt_iclass_xsr_epc6,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", ICLASS_xt_iclass_rsr_excsave6,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", ICLASS_xt_iclass_wsr_excsave6,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", ICLASS_xt_iclass_xsr_excsave6,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.epc7", ICLASS_xt_iclass_rsr_epc7,
+ 0,
+ Opcode_rsr_epc7_encode_fns, 0, 0 },
+ { "wsr.epc7", ICLASS_xt_iclass_wsr_epc7,
+ 0,
+ Opcode_wsr_epc7_encode_fns, 0, 0 },
+ { "xsr.epc7", ICLASS_xt_iclass_xsr_epc7,
+ 0,
+ Opcode_xsr_epc7_encode_fns, 0, 0 },
+ { "rsr.excsave7", ICLASS_xt_iclass_rsr_excsave7,
+ 0,
+ Opcode_rsr_excsave7_encode_fns, 0, 0 },
+ { "wsr.excsave7", ICLASS_xt_iclass_wsr_excsave7,
+ 0,
+ Opcode_wsr_excsave7_encode_fns, 0, 0 },
+ { "xsr.excsave7", ICLASS_xt_iclass_xsr_excsave7,
+ 0,
+ Opcode_xsr_excsave7_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", ICLASS_xt_iclass_rsr_eps4,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", ICLASS_xt_iclass_wsr_eps4,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", ICLASS_xt_iclass_xsr_eps4,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", ICLASS_xt_iclass_rsr_eps5,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", ICLASS_xt_iclass_wsr_eps5,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", ICLASS_xt_iclass_xsr_eps5,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", ICLASS_xt_iclass_rsr_eps6,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", ICLASS_xt_iclass_wsr_eps6,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", ICLASS_xt_iclass_xsr_eps6,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.eps7", ICLASS_xt_iclass_rsr_eps7,
+ 0,
+ Opcode_rsr_eps7_encode_fns, 0, 0 },
+ { "wsr.eps7", ICLASS_xt_iclass_wsr_eps7,
+ 0,
+ Opcode_wsr_eps7_encode_fns, 0, 0 },
+ { "xsr.eps7", ICLASS_xt_iclass_xsr_eps7,
+ 0,
+ Opcode_xsr_eps7_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", ICLASS_xt_iclass_rsr_misc0,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", ICLASS_xt_iclass_wsr_misc0,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", ICLASS_xt_iclass_xsr_misc0,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", ICLASS_xt_iclass_rsr_misc1,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", ICLASS_xt_iclass_wsr_misc1,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", ICLASS_xt_iclass_xsr_misc1,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "salt", ICLASS_xt_iclass_salt,
+ 0,
+ Opcode_salt_encode_fns, 0, 0 },
+ { "saltu", ICLASS_xt_iclass_salt,
+ 0,
+ Opcode_saltu_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mull", ICLASS_xt_mul32,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "mul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hh_encode_fns, 0, 0 },
+ { "mul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hl_encode_fns, 0, 0 },
+ { "mul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_lh_encode_fns, 0, 0 },
+ { "mul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_ll_encode_fns, 0, 0 },
+ { "umul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hh_encode_fns, 0, 0 },
+ { "umul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hl_encode_fns, 0, 0 },
+ { "umul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_lh_encode_fns, 0, 0 },
+ { "umul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_ll_encode_fns, 0, 0 },
+ { "mul.ad.hh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hh_encode_fns, 0, 0 },
+ { "mul.ad.hl", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hl_encode_fns, 0, 0 },
+ { "mul.ad.lh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_lh_encode_fns, 0, 0 },
+ { "mul.ad.ll", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_ll_encode_fns, 0, 0 },
+ { "mul.da.hh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hh_encode_fns, 0, 0 },
+ { "mul.da.hl", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hl_encode_fns, 0, 0 },
+ { "mul.da.lh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_lh_encode_fns, 0, 0 },
+ { "mul.da.ll", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_ll_encode_fns, 0, 0 },
+ { "mul.dd.hh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hh_encode_fns, 0, 0 },
+ { "mul.dd.hl", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hl_encode_fns, 0, 0 },
+ { "mul.dd.lh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_lh_encode_fns, 0, 0 },
+ { "mul.dd.ll", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_ll_encode_fns, 0, 0 },
+ { "mula.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hh_encode_fns, 0, 0 },
+ { "mula.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hl_encode_fns, 0, 0 },
+ { "mula.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_lh_encode_fns, 0, 0 },
+ { "mula.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_ll_encode_fns, 0, 0 },
+ { "muls.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hh_encode_fns, 0, 0 },
+ { "muls.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hl_encode_fns, 0, 0 },
+ { "muls.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_lh_encode_fns, 0, 0 },
+ { "muls.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_ll_encode_fns, 0, 0 },
+ { "mula.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hh_encode_fns, 0, 0 },
+ { "mula.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hl_encode_fns, 0, 0 },
+ { "mula.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_lh_encode_fns, 0, 0 },
+ { "mula.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_ll_encode_fns, 0, 0 },
+ { "muls.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hh_encode_fns, 0, 0 },
+ { "muls.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hl_encode_fns, 0, 0 },
+ { "muls.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_lh_encode_fns, 0, 0 },
+ { "muls.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_ll_encode_fns, 0, 0 },
+ { "mula.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hh_encode_fns, 0, 0 },
+ { "mula.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hl_encode_fns, 0, 0 },
+ { "mula.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_lh_encode_fns, 0, 0 },
+ { "mula.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_ll_encode_fns, 0, 0 },
+ { "muls.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hh_encode_fns, 0, 0 },
+ { "muls.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hl_encode_fns, 0, 0 },
+ { "muls.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_lh_encode_fns, 0, 0 },
+ { "muls.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_ll_encode_fns, 0, 0 },
+ { "mula.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hh_encode_fns, 0, 0 },
+ { "mula.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hl_encode_fns, 0, 0 },
+ { "mula.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_lh_encode_fns, 0, 0 },
+ { "mula.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_ll_encode_fns, 0, 0 },
+ { "muls.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hh_encode_fns, 0, 0 },
+ { "muls.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hl_encode_fns, 0, 0 },
+ { "muls.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_lh_encode_fns, 0, 0 },
+ { "muls.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_ll_encode_fns, 0, 0 },
+ { "mula.da.hh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_lddec_encode_fns, 1, Opcode_mula_da_hh_lddec_funcUnit_uses },
+ { "mula.da.hh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_ldinc_encode_fns, 1, Opcode_mula_da_hh_ldinc_funcUnit_uses },
+ { "mula.da.hl.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_lddec_encode_fns, 1, Opcode_mula_da_hl_lddec_funcUnit_uses },
+ { "mula.da.hl.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_ldinc_encode_fns, 1, Opcode_mula_da_hl_ldinc_funcUnit_uses },
+ { "mula.da.lh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_lddec_encode_fns, 1, Opcode_mula_da_lh_lddec_funcUnit_uses },
+ { "mula.da.lh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_ldinc_encode_fns, 1, Opcode_mula_da_lh_ldinc_funcUnit_uses },
+ { "mula.da.ll.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_lddec_encode_fns, 1, Opcode_mula_da_ll_lddec_funcUnit_uses },
+ { "mula.da.ll.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_ldinc_encode_fns, 1, Opcode_mula_da_ll_ldinc_funcUnit_uses },
+ { "mula.dd.hh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_lddec_encode_fns, 1, Opcode_mula_dd_hh_lddec_funcUnit_uses },
+ { "mula.dd.hh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_ldinc_encode_fns, 1, Opcode_mula_dd_hh_ldinc_funcUnit_uses },
+ { "mula.dd.hl.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_lddec_encode_fns, 1, Opcode_mula_dd_hl_lddec_funcUnit_uses },
+ { "mula.dd.hl.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_ldinc_encode_fns, 1, Opcode_mula_dd_hl_ldinc_funcUnit_uses },
+ { "mula.dd.lh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_lddec_encode_fns, 1, Opcode_mula_dd_lh_lddec_funcUnit_uses },
+ { "mula.dd.lh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_ldinc_encode_fns, 1, Opcode_mula_dd_lh_ldinc_funcUnit_uses },
+ { "mula.dd.ll.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_lddec_encode_fns, 1, Opcode_mula_dd_ll_lddec_funcUnit_uses },
+ { "mula.dd.ll.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_ldinc_encode_fns, 1, Opcode_mula_dd_ll_ldinc_funcUnit_uses },
+ { "lddec", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_lddec_encode_fns, 1, Opcode_lddec_funcUnit_uses },
+ { "ldinc", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_ldinc_encode_fns, 1, Opcode_ldinc_funcUnit_uses },
+ { "rsr.m0", ICLASS_xt_iclass_rsr_m0,
+ 0,
+ Opcode_rsr_m0_encode_fns, 0, 0 },
+ { "wsr.m0", ICLASS_xt_iclass_wsr_m0,
+ 0,
+ Opcode_wsr_m0_encode_fns, 0, 0 },
+ { "xsr.m0", ICLASS_xt_iclass_xsr_m0,
+ 0,
+ Opcode_xsr_m0_encode_fns, 0, 0 },
+ { "rsr.m1", ICLASS_xt_iclass_rsr_m1,
+ 0,
+ Opcode_rsr_m1_encode_fns, 0, 0 },
+ { "wsr.m1", ICLASS_xt_iclass_wsr_m1,
+ 0,
+ Opcode_wsr_m1_encode_fns, 0, 0 },
+ { "xsr.m1", ICLASS_xt_iclass_xsr_m1,
+ 0,
+ Opcode_xsr_m1_encode_fns, 0, 0 },
+ { "rsr.m2", ICLASS_xt_iclass_rsr_m2,
+ 0,
+ Opcode_rsr_m2_encode_fns, 0, 0 },
+ { "wsr.m2", ICLASS_xt_iclass_wsr_m2,
+ 0,
+ Opcode_wsr_m2_encode_fns, 0, 0 },
+ { "xsr.m2", ICLASS_xt_iclass_xsr_m2,
+ 0,
+ Opcode_xsr_m2_encode_fns, 0, 0 },
+ { "rsr.m3", ICLASS_xt_iclass_rsr_m3,
+ 0,
+ Opcode_rsr_m3_encode_fns, 0, 0 },
+ { "wsr.m3", ICLASS_xt_iclass_wsr_m3,
+ 0,
+ Opcode_wsr_m3_encode_fns, 0, 0 },
+ { "xsr.m3", ICLASS_xt_iclass_xsr_m3,
+ 0,
+ Opcode_xsr_m3_encode_fns, 0, 0 },
+ { "rsr.acclo", ICLASS_xt_iclass_rsr_acclo,
+ 0,
+ Opcode_rsr_acclo_encode_fns, 0, 0 },
+ { "wsr.acclo", ICLASS_xt_iclass_wsr_acclo,
+ 0,
+ Opcode_wsr_acclo_encode_fns, 0, 0 },
+ { "xsr.acclo", ICLASS_xt_iclass_xsr_acclo,
+ 0,
+ Opcode_xsr_acclo_encode_fns, 0, 0 },
+ { "rsr.acchi", ICLASS_xt_iclass_rsr_acchi,
+ 0,
+ Opcode_rsr_acchi_encode_fns, 0, 0 },
+ { "wsr.acchi", ICLASS_xt_iclass_wsr_acchi,
+ 0,
+ Opcode_wsr_acchi_encode_fns, 0, 0 },
+ { "xsr.acchi", ICLASS_xt_iclass_xsr_acchi,
+ 0,
+ Opcode_xsr_acchi_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", ICLASS_xt_iclass_rsr_dbreaka1,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", ICLASS_xt_iclass_wsr_dbreaka1,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", ICLASS_xt_iclass_xsr_dbreaka1,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", ICLASS_xt_iclass_rsr_dbreakc1,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", ICLASS_xt_iclass_wsr_dbreakc1,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", ICLASS_xt_iclass_xsr_dbreakc1,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", ICLASS_xt_iclass_rsr_ibreaka1,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", ICLASS_xt_iclass_wsr_ibreaka1,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", ICLASS_xt_iclass_xsr_ibreaka1,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "lddr32.p", ICLASS_xt_iclass_lddr32_p,
+ 0,
+ Opcode_lddr32_p_encode_fns, 1, Opcode_lddr32_p_funcUnit_uses },
+ { "sddr32.p", ICLASS_xt_iclass_sddr32_p,
+ 0,
+ Opcode_sddr32_p_encode_fns, 1, Opcode_sddr32_p_funcUnit_uses },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "andb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andb_encode_fns, 0, 0 },
+ { "andbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andbc_encode_fns, 0, 0 },
+ { "orb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orb_encode_fns, 0, 0 },
+ { "orbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orbc_encode_fns, 0, 0 },
+ { "xorb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_xorb_encode_fns, 0, 0 },
+ { "all4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_all4_encode_fns, 0, 0 },
+ { "any4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_any4_encode_fns, 0, 0 },
+ { "all8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_all8_encode_fns, 0, 0 },
+ { "any8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_any8_encode_fns, 0, 0 },
+ { "bf", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bf_encode_fns, 0, 0 },
+ { "bt", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bt_encode_fns, 0, 0 },
+ { "movf", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movf_encode_fns, 0, 0 },
+ { "movt", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movt_encode_fns, 0, 0 },
+ { "rsr.br", ICLASS_xt_iclass_RSR_BR,
+ 0,
+ Opcode_rsr_br_encode_fns, 0, 0 },
+ { "wsr.br", ICLASS_xt_iclass_WSR_BR,
+ 0,
+ Opcode_wsr_br_encode_fns, 0, 0 },
+ { "xsr.br", ICLASS_xt_iclass_XSR_BR,
+ 0,
+ Opcode_xsr_br_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", ICLASS_xt_iclass_rsr_ccompare2,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", ICLASS_xt_iclass_wsr_ccompare2,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", ICLASS_xt_iclass_xsr_ccompare2,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "ihi", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "ipf", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ihu_encode_fns, 0, 0 },
+ { "iiu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_iiu_encode_fns, 0, 0 },
+ { "ipfl", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ipfl_encode_fns, 0, 0 },
+ { "iii", ICLASS_xt_iclass_icache_inv,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_lict_encode_fns, 1, Opcode_lict_funcUnit_uses },
+ { "licw", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_licw_encode_fns, 1, Opcode_licw_funcUnit_uses },
+ { "sict", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sict_encode_fns, 1, Opcode_sict_funcUnit_uses },
+ { "sicw", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sicw_encode_fns, 1, Opcode_sicw_funcUnit_uses },
+ { "dhwb", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwbui.p", ICLASS_xt_iclass_dcache_dyn,
+ 0,
+ Opcode_diwbui_p_encode_fns, 0, 0 },
+ { "diwb", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfro", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfw", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfwo", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "dhu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dhu_encode_fns, 0, 0 },
+ { "diu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_diu_encode_fns, 0, 0 },
+ { "dpfl", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dpfl_encode_fns, 0, 0 },
+ { "sdct", ICLASS_xt_iclass_sdct,
+ 0,
+ Opcode_sdct_encode_fns, 1, Opcode_sdct_funcUnit_uses },
+ { "ldct", ICLASS_xt_iclass_ldct,
+ 0,
+ Opcode_ldct_encode_fns, 1, Opcode_ldct_funcUnit_uses },
+ { "sdcw", ICLASS_xt_iclass_sdcw,
+ 0,
+ Opcode_sdcw_encode_fns, 1, Opcode_sdcw_funcUnit_uses },
+ { "ldcw", ICLASS_xt_iclass_ldcw,
+ 0,
+ Opcode_ldcw_encode_fns, 1, Opcode_ldcw_funcUnit_uses },
+ { "wsr.ptevaddr", ICLASS_xt_iclass_wsr_ptevaddr,
+ 0,
+ Opcode_wsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.ptevaddr", ICLASS_xt_iclass_rsr_ptevaddr,
+ 0,
+ Opcode_rsr_ptevaddr_encode_fns, 0, 0 },
+ { "xsr.ptevaddr", ICLASS_xt_iclass_xsr_ptevaddr,
+ 0,
+ Opcode_xsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.rasid", ICLASS_xt_iclass_rsr_rasid,
+ 0,
+ Opcode_rsr_rasid_encode_fns, 0, 0 },
+ { "wsr.rasid", ICLASS_xt_iclass_wsr_rasid,
+ 0,
+ Opcode_wsr_rasid_encode_fns, 0, 0 },
+ { "xsr.rasid", ICLASS_xt_iclass_xsr_rasid,
+ 0,
+ Opcode_xsr_rasid_encode_fns, 0, 0 },
+ { "rsr.itlbcfg", ICLASS_xt_iclass_rsr_itlbcfg,
+ 0,
+ Opcode_rsr_itlbcfg_encode_fns, 0, 0 },
+ { "wsr.itlbcfg", ICLASS_xt_iclass_wsr_itlbcfg,
+ 0,
+ Opcode_wsr_itlbcfg_encode_fns, 0, 0 },
+ { "xsr.itlbcfg", ICLASS_xt_iclass_xsr_itlbcfg,
+ 0,
+ Opcode_xsr_itlbcfg_encode_fns, 0, 0 },
+ { "rsr.dtlbcfg", ICLASS_xt_iclass_rsr_dtlbcfg,
+ 0,
+ Opcode_rsr_dtlbcfg_encode_fns, 0, 0 },
+ { "wsr.dtlbcfg", ICLASS_xt_iclass_wsr_dtlbcfg,
+ 0,
+ Opcode_wsr_dtlbcfg_encode_fns, 0, 0 },
+ { "xsr.dtlbcfg", ICLASS_xt_iclass_xsr_dtlbcfg,
+ 0,
+ Opcode_xsr_dtlbcfg_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "ldpte", ICLASS_xt_iclass_ldpte,
+ 0,
+ Opcode_ldpte_encode_fns, 1, Opcode_ldpte_funcUnit_uses },
+ { "hwwitlba", ICLASS_xt_iclass_hwwitlba,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_hwwitlba_encode_fns, 0, 0 },
+ { "hwwdtlba", ICLASS_xt_iclass_hwwdtlba,
+ 0,
+ Opcode_hwwdtlba_encode_fns, 0, 0 },
+ { "rsr.cpenable", ICLASS_xt_iclass_rsr_cpenable,
+ 0,
+ Opcode_rsr_cpenable_encode_fns, 0, 0 },
+ { "wsr.cpenable", ICLASS_xt_iclass_wsr_cpenable,
+ 0,
+ Opcode_wsr_cpenable_encode_fns, 0, 0 },
+ { "xsr.cpenable", ICLASS_xt_iclass_xsr_cpenable,
+ 0,
+ Opcode_xsr_cpenable_encode_fns, 0, 0 },
+ { "clamps", ICLASS_xt_iclass_clamp,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 1, Opcode_l32ai_funcUnit_uses },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 1, Opcode_s32ri_funcUnit_uses },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 1, Opcode_s32c1i_funcUnit_uses },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "quos", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quos_encode_fns, 0, 0 },
+ { "quou", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quou_encode_fns, 0, 0 },
+ { "rems", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_rems_encode_fns, 0, 0 },
+ { "remu", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_remu_encode_fns, 0, 0 },
+ { "rsr.eraccess", ICLASS_xt_iclass_rsr_eraccess,
+ 0,
+ Opcode_rsr_eraccess_encode_fns, 0, 0 },
+ { "wsr.eraccess", ICLASS_xt_iclass_wsr_eraccess,
+ 0,
+ Opcode_wsr_eraccess_encode_fns, 0, 0 },
+ { "xsr.eraccess", ICLASS_xt_iclass_xsr_eraccess,
+ 0,
+ Opcode_xsr_eraccess_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.fcr", ICLASS_rur_fcr,
+ 0,
+ Opcode_rur_fcr_encode_fns, 0, 0 },
+ { "wur.fcr", ICLASS_wur_fcr,
+ 0,
+ Opcode_wur_fcr_encode_fns, 0, 0 },
+ { "rur.fsr", ICLASS_rur_fsr,
+ 0,
+ Opcode_rur_fsr_encode_fns, 0, 0 },
+ { "wur.fsr", ICLASS_wur_fsr,
+ 0,
+ Opcode_wur_fsr_encode_fns, 0, 0 },
+ { "read_impwire", ICLASS_iclass_READ_IMPWIRE,
+ 0,
+ Opcode_read_impwire_encode_fns, 0, 0 },
+ { "setb_expstate", ICLASS_iclass_SETB_EXPSTATE,
+ 0,
+ Opcode_setb_expstate_encode_fns, 0, 0 },
+ { "clrb_expstate", ICLASS_iclass_CLRB_EXPSTATE,
+ 0,
+ Opcode_clrb_expstate_encode_fns, 0, 0 },
+ { "wrmsk_expstate", ICLASS_iclass_WRMSK_EXPSTATE,
+ 0,
+ Opcode_wrmsk_expstate_encode_fns, 0, 0 },
+ { "rur.expstate", ICLASS_rur_expstate,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", ICLASS_wur_expstate,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "lsi", ICLASS_LSI,
+ 0,
+ Opcode_lsi_encode_fns, 1, Opcode_lsi_funcUnit_uses },
+ { "lsip", ICLASS_LSIP,
+ 0,
+ Opcode_lsip_encode_fns, 1, Opcode_lsip_funcUnit_uses },
+ { "lsx", ICLASS_LSX,
+ 0,
+ Opcode_lsx_encode_fns, 1, Opcode_lsx_funcUnit_uses },
+ { "lsxp", ICLASS_LSXP,
+ 0,
+ Opcode_lsxp_encode_fns, 1, Opcode_lsxp_funcUnit_uses },
+ { "ssi", ICLASS_SSI,
+ 0,
+ Opcode_ssi_encode_fns, 1, Opcode_ssi_funcUnit_uses },
+ { "ssip", ICLASS_SSIP,
+ 0,
+ Opcode_ssip_encode_fns, 1, Opcode_ssip_funcUnit_uses },
+ { "ssx", ICLASS_SSX,
+ 0,
+ Opcode_ssx_encode_fns, 1, Opcode_ssx_funcUnit_uses },
+ { "ssxp", ICLASS_SSXP,
+ 0,
+ Opcode_ssxp_encode_fns, 1, Opcode_ssxp_funcUnit_uses },
+ { "ldi", ICLASS_LDI,
+ 0,
+ Opcode_ldi_encode_fns, 1, Opcode_ldi_funcUnit_uses },
+ { "ldip", ICLASS_LDIP,
+ 0,
+ Opcode_ldip_encode_fns, 1, Opcode_ldip_funcUnit_uses },
+ { "ldx", ICLASS_LDX,
+ 0,
+ Opcode_ldx_encode_fns, 1, Opcode_ldx_funcUnit_uses },
+ { "ldxp", ICLASS_LDXP,
+ 0,
+ Opcode_ldxp_encode_fns, 1, Opcode_ldxp_funcUnit_uses },
+ { "sdi", ICLASS_SDI,
+ 0,
+ Opcode_sdi_encode_fns, 1, Opcode_sdi_funcUnit_uses },
+ { "sdip", ICLASS_SDIP,
+ 0,
+ Opcode_sdip_encode_fns, 1, Opcode_sdip_funcUnit_uses },
+ { "sdx", ICLASS_SDX,
+ 0,
+ Opcode_sdx_encode_fns, 1, Opcode_sdx_funcUnit_uses },
+ { "sdxp", ICLASS_SDXP,
+ 0,
+ Opcode_sdxp_encode_fns, 1, Opcode_sdxp_funcUnit_uses },
+ { "abs.s", ICLASS_ABS_S,
+ 0,
+ Opcode_abs_s_encode_fns, 0, 0 },
+ { "neg.s", ICLASS_NEG_S,
+ 0,
+ Opcode_neg_s_encode_fns, 0, 0 },
+ { "abs.d", ICLASS_ABS_D,
+ 0,
+ Opcode_abs_d_encode_fns, 0, 0 },
+ { "neg.d", ICLASS_NEG_D,
+ 0,
+ Opcode_neg_d_encode_fns, 0, 0 },
+ { "mov.s", ICLASS_MOV_S,
+ 0,
+ Opcode_mov_s_encode_fns, 0, 0 },
+ { "mov.d", ICLASS_MOV_D,
+ 0,
+ Opcode_mov_d_encode_fns, 0, 0 },
+ { "moveqz.s", ICLASS_MOVEQZ_S,
+ 0,
+ Opcode_moveqz_s_encode_fns, 0, 0 },
+ { "movnez.s", ICLASS_MOVNEZ_S,
+ 0,
+ Opcode_movnez_s_encode_fns, 0, 0 },
+ { "movltz.s", ICLASS_MOVLTZ_S,
+ 0,
+ Opcode_movltz_s_encode_fns, 0, 0 },
+ { "movgez.s", ICLASS_MOVGEZ_S,
+ 0,
+ Opcode_movgez_s_encode_fns, 0, 0 },
+ { "movf.s", ICLASS_MOVF_S,
+ 0,
+ Opcode_movf_s_encode_fns, 0, 0 },
+ { "movt.s", ICLASS_MOVT_S,
+ 0,
+ Opcode_movt_s_encode_fns, 0, 0 },
+ { "wfr", ICLASS_WFR,
+ 0,
+ Opcode_wfr_encode_fns, 0, 0 },
+ { "rfr", ICLASS_RFR,
+ 0,
+ Opcode_rfr_encode_fns, 0, 0 },
+ { "rfrd", ICLASS_RFRD,
+ 0,
+ Opcode_rfrd_encode_fns, 0, 0 },
+ { "wfrd", ICLASS_WFRD,
+ 0,
+ Opcode_wfrd_encode_fns, 0, 0 },
+ { "round.s", ICLASS_ROUND_S,
+ 0,
+ Opcode_round_s_encode_fns, 0, 0 },
+ { "round.d", ICLASS_ROUND_D,
+ 0,
+ Opcode_round_d_encode_fns, 0, 0 },
+ { "ceil.s", ICLASS_CEIL_S,
+ 0,
+ Opcode_ceil_s_encode_fns, 0, 0 },
+ { "ceil.d", ICLASS_CEIL_D,
+ 0,
+ Opcode_ceil_d_encode_fns, 0, 0 },
+ { "floor.s", ICLASS_FLOOR_S,
+ 0,
+ Opcode_floor_s_encode_fns, 0, 0 },
+ { "floor.d", ICLASS_FLOOR_D,
+ 0,
+ Opcode_floor_d_encode_fns, 0, 0 },
+ { "trunc.s", ICLASS_TRUNC_S,
+ 0,
+ Opcode_trunc_s_encode_fns, 0, 0 },
+ { "trunc.d", ICLASS_TRUNC_D,
+ 0,
+ Opcode_trunc_d_encode_fns, 0, 0 },
+ { "utrunc.s", ICLASS_UTRUNC_S,
+ 0,
+ Opcode_utrunc_s_encode_fns, 0, 0 },
+ { "utrunc.d", ICLASS_UTRUNC_D,
+ 0,
+ Opcode_utrunc_d_encode_fns, 0, 0 },
+ { "float.s", ICLASS_FLOAT_S,
+ 0,
+ Opcode_float_s_encode_fns, 0, 0 },
+ { "float.d", ICLASS_FLOAT_D,
+ 0,
+ Opcode_float_d_encode_fns, 0, 0 },
+ { "ufloat.s", ICLASS_UFLOAT_S,
+ 0,
+ Opcode_ufloat_s_encode_fns, 0, 0 },
+ { "ufloat.d", ICLASS_UFLOAT_D,
+ 0,
+ Opcode_ufloat_d_encode_fns, 0, 0 },
+ { "cvtd.s", ICLASS_CVTD_S,
+ 0,
+ Opcode_cvtd_s_encode_fns, 0, 0 },
+ { "cvts.d", ICLASS_CVTS_D,
+ 0,
+ Opcode_cvts_d_encode_fns, 0, 0 },
+ { "un.s", ICLASS_UN_S,
+ 0,
+ Opcode_un_s_encode_fns, 0, 0 },
+ { "un.d", ICLASS_UN_D,
+ 0,
+ Opcode_un_d_encode_fns, 0, 0 },
+ { "ult.s", ICLASS_ULT_S,
+ 0,
+ Opcode_ult_s_encode_fns, 0, 0 },
+ { "ult.d", ICLASS_ULT_D,
+ 0,
+ Opcode_ult_d_encode_fns, 0, 0 },
+ { "ule.s", ICLASS_ULE_S,
+ 0,
+ Opcode_ule_s_encode_fns, 0, 0 },
+ { "ule.d", ICLASS_ULE_D,
+ 0,
+ Opcode_ule_d_encode_fns, 0, 0 },
+ { "ueq.s", ICLASS_UEQ_S,
+ 0,
+ Opcode_ueq_s_encode_fns, 0, 0 },
+ { "ueq.d", ICLASS_UEQ_D,
+ 0,
+ Opcode_ueq_d_encode_fns, 0, 0 },
+ { "olt.s", ICLASS_OLT_S,
+ 0,
+ Opcode_olt_s_encode_fns, 0, 0 },
+ { "olt.d", ICLASS_OLT_D,
+ 0,
+ Opcode_olt_d_encode_fns, 0, 0 },
+ { "ole.s", ICLASS_OLE_S,
+ 0,
+ Opcode_ole_s_encode_fns, 0, 0 },
+ { "ole.d", ICLASS_OLE_D,
+ 0,
+ Opcode_ole_d_encode_fns, 0, 0 },
+ { "oeq.s", ICLASS_OEQ_S,
+ 0,
+ Opcode_oeq_s_encode_fns, 0, 0 },
+ { "oeq.d", ICLASS_OEQ_D,
+ 0,
+ Opcode_oeq_d_encode_fns, 0, 0 },
+ { "add.s", ICLASS_ADD_S,
+ 0,
+ Opcode_add_s_encode_fns, 0, 0 },
+ { "add.d", ICLASS_ADD_D,
+ 0,
+ Opcode_add_d_encode_fns, 0, 0 },
+ { "sub.s", ICLASS_SUB_S,
+ 0,
+ Opcode_sub_s_encode_fns, 0, 0 },
+ { "sub.d", ICLASS_SUB_D,
+ 0,
+ Opcode_sub_d_encode_fns, 0, 0 },
+ { "mul.s", ICLASS_MUL_S,
+ 0,
+ Opcode_mul_s_encode_fns, 0, 0 },
+ { "mul.d", ICLASS_MUL_D,
+ 0,
+ Opcode_mul_d_encode_fns, 0, 0 },
+ { "madd.s", ICLASS_MADD_S,
+ 0,
+ Opcode_madd_s_encode_fns, 0, 0 },
+ { "madd.d", ICLASS_MADD_D,
+ 0,
+ Opcode_madd_d_encode_fns, 0, 0 },
+ { "msub.s", ICLASS_MSUB_S,
+ 0,
+ Opcode_msub_s_encode_fns, 0, 0 },
+ { "msub.d", ICLASS_MSUB_D,
+ 0,
+ Opcode_msub_d_encode_fns, 0, 0 },
+ { "sqrt0.s", ICLASS_SQRT0_S,
+ 0,
+ Opcode_sqrt0_s_encode_fns, 0, 0 },
+ { "sqrt0.d", ICLASS_SQRT0_D,
+ 0,
+ Opcode_sqrt0_d_encode_fns, 0, 0 },
+ { "div0.s", ICLASS_DIV0_S,
+ 0,
+ Opcode_div0_s_encode_fns, 0, 0 },
+ { "div0.d", ICLASS_DIV0_D,
+ 0,
+ Opcode_div0_d_encode_fns, 0, 0 },
+ { "recip0.s", ICLASS_RECIP0_S,
+ 0,
+ Opcode_recip0_s_encode_fns, 0, 0 },
+ { "recip0.d", ICLASS_RECIP0_D,
+ 0,
+ Opcode_recip0_d_encode_fns, 0, 0 },
+ { "rsqrt0.s", ICLASS_RSQRT0_S,
+ 0,
+ Opcode_rsqrt0_s_encode_fns, 0, 0 },
+ { "rsqrt0.d", ICLASS_RSQRT0_D,
+ 0,
+ Opcode_rsqrt0_d_encode_fns, 0, 0 },
+ { "maddn.s", ICLASS_MADDN_S,
+ 0,
+ Opcode_maddn_s_encode_fns, 0, 0 },
+ { "maddn.d", ICLASS_MADDN_D,
+ 0,
+ Opcode_maddn_d_encode_fns, 0, 0 },
+ { "divn.s", ICLASS_DIVN_S,
+ 0,
+ Opcode_divn_s_encode_fns, 0, 0 },
+ { "divn.d", ICLASS_DIVN_D,
+ 0,
+ Opcode_divn_d_encode_fns, 0, 0 },
+ { "const.s", ICLASS_CONST_S,
+ 0,
+ Opcode_const_s_encode_fns, 0, 0 },
+ { "const.d", ICLASS_CONST_D,
+ 0,
+ Opcode_const_d_encode_fns, 0, 0 },
+ { "nexp01.s", ICLASS_NEXP01_S,
+ 0,
+ Opcode_nexp01_s_encode_fns, 0, 0 },
+ { "nexp01.d", ICLASS_NEXP01_D,
+ 0,
+ Opcode_nexp01_d_encode_fns, 0, 0 },
+ { "addexp.s", ICLASS_ADDEXP_S,
+ 0,
+ Opcode_addexp_s_encode_fns, 0, 0 },
+ { "addexp.d", ICLASS_ADDEXP_D,
+ 0,
+ Opcode_addexp_d_encode_fns, 0, 0 },
+ { "addexpm.s", ICLASS_ADDEXPM_S,
+ 0,
+ Opcode_addexpm_s_encode_fns, 0, 0 },
+ { "addexpm.d", ICLASS_ADDEXPM_D,
+ 0,
+ Opcode_addexpm_d_encode_fns, 0, 0 },
+ { "mkdadj.s", ICLASS_MKDADJ_S,
+ 0,
+ Opcode_mkdadj_s_encode_fns, 0, 0 },
+ { "mkdadj.d", ICLASS_MKDADJ_D,
+ 0,
+ Opcode_mkdadj_d_encode_fns, 0, 0 },
+ { "mksadj.s", ICLASS_MKSADJ_S,
+ 0,
+ Opcode_mksadj_s_encode_fns, 0, 0 },
+ { "mksadj.d", ICLASS_MKSADJ_D,
+ 0,
+ Opcode_mksadj_d_encode_fns, 0, 0 }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_RUR_THREADPTR,
+ OPCODE_WUR_THREADPTR,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUB,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BNEI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BALL,
+ OPCODE_BANY,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQ,
+ OPCODE_BGE,
+ OPCODE_BGEU,
+ OPCODE_BLT,
+ OPCODE_BLTU,
+ OPCODE_BNALL,
+ OPCODE_BNE,
+ OPCODE_BNONE,
+ OPCODE_BEQZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_BNEZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_LOOP,
+ OPCODE_LOOPGTZ,
+ OPCODE_LOOPNEZ,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVGEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVNEZ,
+ OPCODE_ABS,
+ OPCODE_NEG,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_SIMCALL,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S32NB,
+ OPCODE_S8I,
+ OPCODE_SSA8B,
+ OPCODE_SSA8L,
+ OPCODE_SSL,
+ OPCODE_SSR,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRA,
+ OPCODE_SRL,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_DSYNC,
+ OPCODE_ESYNC,
+ OPCODE_RSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_LEND,
+ OPCODE_WSR_LEND,
+ OPCODE_XSR_LEND,
+ OPCODE_RSR_LCOUNT,
+ OPCODE_WSR_LCOUNT,
+ OPCODE_XSR_LCOUNT,
+ OPCODE_RSR_LBEG,
+ OPCODE_WSR_LBEG,
+ OPCODE_XSR_LBEG,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_MEMCTL,
+ OPCODE_WSR_MEMCTL,
+ OPCODE_XSR_MEMCTL,
+ OPCODE_RSR_CONFIGID0,
+ OPCODE_WSR_CONFIGID0,
+ OPCODE_RSR_CONFIGID1,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPC3,
+ OPCODE_WSR_EPC3,
+ OPCODE_XSR_EPC3,
+ OPCODE_RSR_EXCSAVE3,
+ OPCODE_WSR_EXCSAVE3,
+ OPCODE_XSR_EXCSAVE3,
+ OPCODE_RSR_EPC4,
+ OPCODE_WSR_EPC4,
+ OPCODE_XSR_EPC4,
+ OPCODE_RSR_EXCSAVE4,
+ OPCODE_WSR_EXCSAVE4,
+ OPCODE_XSR_EXCSAVE4,
+ OPCODE_RSR_EPC5,
+ OPCODE_WSR_EPC5,
+ OPCODE_XSR_EPC5,
+ OPCODE_RSR_EXCSAVE5,
+ OPCODE_WSR_EXCSAVE5,
+ OPCODE_XSR_EXCSAVE5,
+ OPCODE_RSR_EPC6,
+ OPCODE_WSR_EPC6,
+ OPCODE_XSR_EPC6,
+ OPCODE_RSR_EXCSAVE6,
+ OPCODE_WSR_EXCSAVE6,
+ OPCODE_XSR_EXCSAVE6,
+ OPCODE_RSR_EPC7,
+ OPCODE_WSR_EPC7,
+ OPCODE_XSR_EPC7,
+ OPCODE_RSR_EXCSAVE7,
+ OPCODE_WSR_EXCSAVE7,
+ OPCODE_XSR_EXCSAVE7,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EPS3,
+ OPCODE_WSR_EPS3,
+ OPCODE_XSR_EPS3,
+ OPCODE_RSR_EPS4,
+ OPCODE_WSR_EPS4,
+ OPCODE_XSR_EPS4,
+ OPCODE_RSR_EPS5,
+ OPCODE_WSR_EPS5,
+ OPCODE_XSR_EPS5,
+ OPCODE_RSR_EPS6,
+ OPCODE_WSR_EPS6,
+ OPCODE_XSR_EPS6,
+ OPCODE_RSR_EPS7,
+ OPCODE_WSR_EPS7,
+ OPCODE_XSR_EPS7,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_MISC0,
+ OPCODE_WSR_MISC0,
+ OPCODE_XSR_MISC0,
+ OPCODE_RSR_MISC1,
+ OPCODE_WSR_MISC1,
+ OPCODE_XSR_MISC1,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_SALT,
+ OPCODE_SALTU,
+ OPCODE_MUL16S,
+ OPCODE_MUL16U,
+ OPCODE_MULL,
+ OPCODE_MUL_AA_HH,
+ OPCODE_MUL_AA_HL,
+ OPCODE_MUL_AA_LH,
+ OPCODE_MUL_AA_LL,
+ OPCODE_UMUL_AA_HH,
+ OPCODE_UMUL_AA_HL,
+ OPCODE_UMUL_AA_LH,
+ OPCODE_UMUL_AA_LL,
+ OPCODE_MUL_AD_HH,
+ OPCODE_MUL_AD_HL,
+ OPCODE_MUL_AD_LH,
+ OPCODE_MUL_AD_LL,
+ OPCODE_MUL_DA_HH,
+ OPCODE_MUL_DA_HL,
+ OPCODE_MUL_DA_LH,
+ OPCODE_MUL_DA_LL,
+ OPCODE_MUL_DD_HH,
+ OPCODE_MUL_DD_HL,
+ OPCODE_MUL_DD_LH,
+ OPCODE_MUL_DD_LL,
+ OPCODE_MULA_AA_HH,
+ OPCODE_MULA_AA_HL,
+ OPCODE_MULA_AA_LH,
+ OPCODE_MULA_AA_LL,
+ OPCODE_MULS_AA_HH,
+ OPCODE_MULS_AA_HL,
+ OPCODE_MULS_AA_LH,
+ OPCODE_MULS_AA_LL,
+ OPCODE_MULA_AD_HH,
+ OPCODE_MULA_AD_HL,
+ OPCODE_MULA_AD_LH,
+ OPCODE_MULA_AD_LL,
+ OPCODE_MULS_AD_HH,
+ OPCODE_MULS_AD_HL,
+ OPCODE_MULS_AD_LH,
+ OPCODE_MULS_AD_LL,
+ OPCODE_MULA_DA_HH,
+ OPCODE_MULA_DA_HL,
+ OPCODE_MULA_DA_LH,
+ OPCODE_MULA_DA_LL,
+ OPCODE_MULS_DA_HH,
+ OPCODE_MULS_DA_HL,
+ OPCODE_MULS_DA_LH,
+ OPCODE_MULS_DA_LL,
+ OPCODE_MULA_DD_HH,
+ OPCODE_MULA_DD_HL,
+ OPCODE_MULA_DD_LH,
+ OPCODE_MULA_DD_LL,
+ OPCODE_MULS_DD_HH,
+ OPCODE_MULS_DD_HL,
+ OPCODE_MULS_DD_LH,
+ OPCODE_MULS_DD_LL,
+ OPCODE_MULA_DA_HH_LDDEC,
+ OPCODE_MULA_DA_HH_LDINC,
+ OPCODE_MULA_DA_HL_LDDEC,
+ OPCODE_MULA_DA_HL_LDINC,
+ OPCODE_MULA_DA_LH_LDDEC,
+ OPCODE_MULA_DA_LH_LDINC,
+ OPCODE_MULA_DA_LL_LDDEC,
+ OPCODE_MULA_DA_LL_LDINC,
+ OPCODE_MULA_DD_HH_LDDEC,
+ OPCODE_MULA_DD_HH_LDINC,
+ OPCODE_MULA_DD_HL_LDDEC,
+ OPCODE_MULA_DD_HL_LDINC,
+ OPCODE_MULA_DD_LH_LDDEC,
+ OPCODE_MULA_DD_LH_LDINC,
+ OPCODE_MULA_DD_LL_LDDEC,
+ OPCODE_MULA_DD_LL_LDINC,
+ OPCODE_LDDEC,
+ OPCODE_LDINC,
+ OPCODE_RSR_M0,
+ OPCODE_WSR_M0,
+ OPCODE_XSR_M0,
+ OPCODE_RSR_M1,
+ OPCODE_WSR_M1,
+ OPCODE_XSR_M1,
+ OPCODE_RSR_M2,
+ OPCODE_WSR_M2,
+ OPCODE_XSR_M2,
+ OPCODE_RSR_M3,
+ OPCODE_WSR_M3,
+ OPCODE_XSR_M3,
+ OPCODE_RSR_ACCLO,
+ OPCODE_WSR_ACCLO,
+ OPCODE_XSR_ACCLO,
+ OPCODE_RSR_ACCHI,
+ OPCODE_WSR_ACCHI,
+ OPCODE_XSR_ACCHI,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DBREAKA0,
+ OPCODE_WSR_DBREAKA0,
+ OPCODE_XSR_DBREAKA0,
+ OPCODE_RSR_DBREAKC0,
+ OPCODE_WSR_DBREAKC0,
+ OPCODE_XSR_DBREAKC0,
+ OPCODE_RSR_DBREAKA1,
+ OPCODE_WSR_DBREAKA1,
+ OPCODE_XSR_DBREAKA1,
+ OPCODE_RSR_DBREAKC1,
+ OPCODE_WSR_DBREAKC1,
+ OPCODE_XSR_DBREAKC1,
+ OPCODE_RSR_IBREAKA0,
+ OPCODE_WSR_IBREAKA0,
+ OPCODE_XSR_IBREAKA0,
+ OPCODE_RSR_IBREAKA1,
+ OPCODE_WSR_IBREAKA1,
+ OPCODE_XSR_IBREAKA1,
+ OPCODE_RSR_IBREAKENABLE,
+ OPCODE_WSR_IBREAKENABLE,
+ OPCODE_XSR_IBREAKENABLE,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_LDDR32_P,
+ OPCODE_SDDR32_P,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_WSR_MMID,
+ OPCODE_ANDB,
+ OPCODE_ANDBC,
+ OPCODE_ORB,
+ OPCODE_ORBC,
+ OPCODE_XORB,
+ OPCODE_ALL4,
+ OPCODE_ANY4,
+ OPCODE_ALL8,
+ OPCODE_ANY8,
+ OPCODE_BF,
+ OPCODE_BT,
+ OPCODE_MOVF,
+ OPCODE_MOVT,
+ OPCODE_RSR_BR,
+ OPCODE_WSR_BR,
+ OPCODE_XSR_BR,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_RSR_CCOMPARE2,
+ OPCODE_WSR_CCOMPARE2,
+ OPCODE_XSR_CCOMPARE2,
+ OPCODE_IHI,
+ OPCODE_IPF,
+ OPCODE_IHU,
+ OPCODE_IIU,
+ OPCODE_IPFL,
+ OPCODE_III,
+ OPCODE_LICT,
+ OPCODE_LICW,
+ OPCODE_SICT,
+ OPCODE_SICW,
+ OPCODE_DHWB,
+ OPCODE_DHWBI,
+ OPCODE_DIWBUI_P,
+ OPCODE_DIWB,
+ OPCODE_DIWBI,
+ OPCODE_DHI,
+ OPCODE_DII,
+ OPCODE_DPFR,
+ OPCODE_DPFRO,
+ OPCODE_DPFW,
+ OPCODE_DPFWO,
+ OPCODE_DHU,
+ OPCODE_DIU,
+ OPCODE_DPFL,
+ OPCODE_SDCT,
+ OPCODE_LDCT,
+ OPCODE_SDCW,
+ OPCODE_LDCW,
+ OPCODE_WSR_PTEVADDR,
+ OPCODE_RSR_PTEVADDR,
+ OPCODE_XSR_PTEVADDR,
+ OPCODE_RSR_RASID,
+ OPCODE_WSR_RASID,
+ OPCODE_XSR_RASID,
+ OPCODE_RSR_ITLBCFG,
+ OPCODE_WSR_ITLBCFG,
+ OPCODE_XSR_ITLBCFG,
+ OPCODE_RSR_DTLBCFG,
+ OPCODE_WSR_DTLBCFG,
+ OPCODE_XSR_DTLBCFG,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_LDPTE,
+ OPCODE_HWWITLBA,
+ OPCODE_HWWDTLBA,
+ OPCODE_RSR_CPENABLE,
+ OPCODE_WSR_CPENABLE,
+ OPCODE_XSR_CPENABLE,
+ OPCODE_CLAMPS,
+ OPCODE_MAX,
+ OPCODE_MAXU,
+ OPCODE_MIN,
+ OPCODE_MINU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_QUOS,
+ OPCODE_QUOU,
+ OPCODE_REMS,
+ OPCODE_REMU,
+ OPCODE_RSR_ERACCESS,
+ OPCODE_WSR_ERACCESS,
+ OPCODE_XSR_ERACCESS,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_FCR,
+ OPCODE_WUR_FCR,
+ OPCODE_RUR_FSR,
+ OPCODE_WUR_FSR,
+ OPCODE_READ_IMPWIRE,
+ OPCODE_SETB_EXPSTATE,
+ OPCODE_CLRB_EXPSTATE,
+ OPCODE_WRMSK_EXPSTATE,
+ OPCODE_RUR_EXPSTATE,
+ OPCODE_WUR_EXPSTATE,
+ OPCODE_LSI,
+ OPCODE_LSIP,
+ OPCODE_LSX,
+ OPCODE_LSXP,
+ OPCODE_SSI,
+ OPCODE_SSIP,
+ OPCODE_SSX,
+ OPCODE_SSXP,
+ OPCODE_LDI,
+ OPCODE_LDIP,
+ OPCODE_LDX,
+ OPCODE_LDXP,
+ OPCODE_SDI,
+ OPCODE_SDIP,
+ OPCODE_SDX,
+ OPCODE_SDXP,
+ OPCODE_ABS_S,
+ OPCODE_NEG_S,
+ OPCODE_ABS_D,
+ OPCODE_NEG_D,
+ OPCODE_MOV_S,
+ OPCODE_MOV_D,
+ OPCODE_MOVEQZ_S,
+ OPCODE_MOVNEZ_S,
+ OPCODE_MOVLTZ_S,
+ OPCODE_MOVGEZ_S,
+ OPCODE_MOVF_S,
+ OPCODE_MOVT_S,
+ OPCODE_WFR,
+ OPCODE_RFR,
+ OPCODE_RFRD,
+ OPCODE_WFRD,
+ OPCODE_ROUND_S,
+ OPCODE_ROUND_D,
+ OPCODE_CEIL_S,
+ OPCODE_CEIL_D,
+ OPCODE_FLOOR_S,
+ OPCODE_FLOOR_D,
+ OPCODE_TRUNC_S,
+ OPCODE_TRUNC_D,
+ OPCODE_UTRUNC_S,
+ OPCODE_UTRUNC_D,
+ OPCODE_FLOAT_S,
+ OPCODE_FLOAT_D,
+ OPCODE_UFLOAT_S,
+ OPCODE_UFLOAT_D,
+ OPCODE_CVTD_S,
+ OPCODE_CVTS_D,
+ OPCODE_UN_S,
+ OPCODE_UN_D,
+ OPCODE_ULT_S,
+ OPCODE_ULT_D,
+ OPCODE_ULE_S,
+ OPCODE_ULE_D,
+ OPCODE_UEQ_S,
+ OPCODE_UEQ_D,
+ OPCODE_OLT_S,
+ OPCODE_OLT_D,
+ OPCODE_OLE_S,
+ OPCODE_OLE_D,
+ OPCODE_OEQ_S,
+ OPCODE_OEQ_D,
+ OPCODE_ADD_S,
+ OPCODE_ADD_D,
+ OPCODE_SUB_S,
+ OPCODE_SUB_D,
+ OPCODE_MUL_S,
+ OPCODE_MUL_D,
+ OPCODE_MADD_S,
+ OPCODE_MADD_D,
+ OPCODE_MSUB_S,
+ OPCODE_MSUB_D,
+ OPCODE_SQRT0_S,
+ OPCODE_SQRT0_D,
+ OPCODE_DIV0_S,
+ OPCODE_DIV0_D,
+ OPCODE_RECIP0_S,
+ OPCODE_RECIP0_D,
+ OPCODE_RSQRT0_S,
+ OPCODE_RSQRT0_D,
+ OPCODE_MADDN_S,
+ OPCODE_MADDN_D,
+ OPCODE_DIVN_S,
+ OPCODE_DIVN_D,
+ OPCODE_CONST_S,
+ OPCODE_CONST_D,
+ OPCODE_NEXP01_S,
+ OPCODE_NEXP01_D,
+ OPCODE_ADDEXP_S,
+ OPCODE_ADDEXP_D,
+ OPCODE_ADDEXPM_S,
+ OPCODE_ADDEXPM_D,
+ OPCODE_MKDADJ_S,
+ OPCODE_MKDADJ_D,
+ OPCODE_MKSADJ_S,
+ OPCODE_MKSADJ_D
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_RET;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_RETW;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_JX;
+ }
+ if (Field_m_Slot_inst_get (insn) == 3)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALLX0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALLX4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALLX8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALLX12;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_MOVSP;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_ISYNC;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RSYNC;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_ESYNC;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DSYNC;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_EXCW;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MEMW;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_EXTW;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_NOP;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 3)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_RFE;
+ if (Field_s_Slot_inst_get (insn) == 2)
+ return OPCODE_RFDE;
+ if (Field_s_Slot_inst_get (insn) == 4)
+ return OPCODE_RFWO;
+ if (Field_s_Slot_inst_get (insn) == 5)
+ return OPCODE_RFWU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFI;
+ }
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BREAK;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ if (Field_s_Slot_inst_get (insn) == 1)
+ return OPCODE_SIMCALL;
+ }
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RSIL;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_LDDR32_P;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_SDDR32_P;
+ }
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_ANY4;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_ALL4;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_ANY8;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_ALL8;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OR;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_XOR;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RER;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_WER;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_NSA;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_NSAU;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_HWWITLBA;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_RITLB0;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_PITLB;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_WITLB;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_RITLB1;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_HWWDTLBA;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_RDTLB0;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_PDTLB;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_WDTLB;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_RDTLB1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_s_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ADD;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_ADDX2;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_ADDX4;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_ADDX8;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_SUB;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_SUBX2;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_SUBX4;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_SUBX8;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 1)
+ {
+ if ((Field_op2_Slot_inst_get (insn) == 0 ||
+ Field_op2_Slot_inst_get (insn) == 1))
+ return OPCODE_SLLI;
+ if ((Field_op2_Slot_inst_get (insn) == 2 ||
+ Field_op2_Slot_inst_get (insn) == 3))
+ return OPCODE_SRAI;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_SRLI;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_XSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_XSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_XSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_XSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_XSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_XSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_XSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_XSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_XSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_XSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_XSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_XSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_XSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_XSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_XSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_XSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_XSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_XSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 95)
+ return OPCODE_XSR_ERACCESS;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_XSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_XSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_XSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_XSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_XSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_XSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_XSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_XSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_XSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_XSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_XSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_XSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_XSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_XSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_XSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_XSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_XSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_XSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_XSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_XSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_XSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_XSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_XSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_XSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_XSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_XSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_XSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_XSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_XSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_XSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_XSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_XSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_XSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_XSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_XSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_XSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_XSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_XSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_XSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_XSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_XSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_XSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_XSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_XSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_XSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_XSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_SRC;
+ if (Field_op2_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_op2_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_op2_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MUL16U;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MUL16S;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_LICT;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_SICT;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_LICW;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_SICW;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LDCT;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_SDCT;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_LDCW;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_SDCW;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_LDPTE;
+ }
+ }
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_ANDB;
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_ANDBC;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_ORB;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_ORBC;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_XORB;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_SALTU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_SALT;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MULL;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_QUOU;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_QUOS;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_REMU;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_REMS;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_RSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_RSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_RSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_RSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_RSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_RSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_RSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_RSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_RSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_RSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_RSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_RSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_RSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_RSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_RSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_RSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_RSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_RSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 95)
+ return OPCODE_RSR_ERACCESS;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_RSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_RSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_RSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_RSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_RSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_RSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_RSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_RSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_RSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_RSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_RSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_RSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_RSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_RSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_RSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_RSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_RSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_RSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_RSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_RSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_RSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_RSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_RSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_RSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_RSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 208)
+ return OPCODE_RSR_CONFIGID1;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_RSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_RSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_RSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_RSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_RSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_RSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_RSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_RSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_RSR_INTERRUPT;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_RSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_RSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_RSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_RSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_RSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_RSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 235)
+ return OPCODE_RSR_PRID;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_RSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_RSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_RSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_RSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_RSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_RSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_RSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_RSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_WSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_WSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_WSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_WSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_WSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_WSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_WSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_WSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_WSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_WSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_WSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_WSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_WSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_WSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_WSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 89)
+ return OPCODE_WSR_MMID;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_WSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_WSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_WSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 95)
+ return OPCODE_WSR_ERACCESS;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_WSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_WSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_WSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_WSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_WSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_WSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_WSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_WSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_WSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_WSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_WSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_WSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_WSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_WSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_WSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_WSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_WSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_WSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_WSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_WSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_WSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_WSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_WSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_WSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_WSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_WSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_WSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_WSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_WSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_WSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_WSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_WSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_WSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_WSR_INTSET;
+ if (Field_sr_Slot_inst_get (insn) == 227)
+ return OPCODE_WSR_INTCLEAR;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_WSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_WSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_WSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_WSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_WSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_WSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_WSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_WSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_WSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_WSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_SEXT;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_CLAMPS;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MIN;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MAX;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MINU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_MAXU;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MOVEQZ;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_MOVNEZ;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVLTZ;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MOVGEZ;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MOVF;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MOVT;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ {
+ if (Field_st_Slot_inst_get (insn) == 230)
+ return OPCODE_RUR_EXPSTATE;
+ if (Field_st_Slot_inst_get (insn) == 231)
+ return OPCODE_RUR_THREADPTR;
+ if (Field_st_Slot_inst_get (insn) == 232)
+ return OPCODE_RUR_FCR;
+ if (Field_st_Slot_inst_get (insn) == 233)
+ return OPCODE_RUR_FSR;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WUR_EXPSTATE;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WUR_THREADPTR;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_WUR_FCR;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_WUR_FSR;
+ }
+ }
+ if ((Field_op1_Slot_inst_get (insn) == 4 ||
+ Field_op1_Slot_inst_get (insn) == 5))
+ return OPCODE_EXTUI;
+ if (Field_op1_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_LSX;
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_LSXP;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_LDX;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_LDXP;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_SSX;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_SSXP;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_SDX;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_SDXP;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_L32E;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_S32E;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_S32NB;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 10)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_ADD_S;
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_SUB_S;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_MUL_S;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MADD_S;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MSUB_S;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MADDN_S;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_DIVN_S;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ROUND_S;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_TRUNC_S;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_FLOOR_S;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_CEIL_S;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_FLOAT_S;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_UFLOAT_S;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_UTRUNC_S;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_MOV_S;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS_S;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_CVTD_S;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_CONST_S;
+ if (Field_t_Slot_inst_get (insn) == 4)
+ return OPCODE_RFR;
+ if (Field_t_Slot_inst_get (insn) == 5)
+ return OPCODE_WFR;
+ if (Field_t_Slot_inst_get (insn) == 6)
+ return OPCODE_NEG_S;
+ if (Field_t_Slot_inst_get (insn) == 7)
+ return OPCODE_DIV0_S;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_RECIP0_S;
+ if (Field_t_Slot_inst_get (insn) == 9)
+ return OPCODE_SQRT0_S;
+ if (Field_t_Slot_inst_get (insn) == 10)
+ return OPCODE_RSQRT0_S;
+ if (Field_t_Slot_inst_get (insn) == 11)
+ return OPCODE_NEXP01_S;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MKSADJ_S;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_MKDADJ_S;
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_ADDEXP_S;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_ADDEXPM_S;
+ }
+ }
+ if (Field_op1_Slot_inst_get (insn) == 11)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_UN_S;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OEQ_S;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_UEQ_S;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_OLT_S;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_ULT_S;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_OLE_S;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_ULE_S;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MOVEQZ_S;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_MOVNEZ_S;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVLTZ_S;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MOVGEZ_S;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MOVF_S;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MOVT_S;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 14)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_UN_D;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OEQ_D;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_UEQ_D;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_OLT_D;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_ULT_D;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_OLE_D;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_ULE_D;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_WFRD;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 15)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_ADD_D;
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_SUB_D;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_MUL_D;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MADD_D;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MSUB_D;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MADDN_D;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_DIVN_D;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ROUND_D;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_TRUNC_D;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_FLOOR_D;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_CEIL_D;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_FLOAT_D;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_UFLOAT_D;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_UTRUNC_D;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_MOV_D;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS_D;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_CVTS_D;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_CONST_D;
+ if (Field_t_Slot_inst_get (insn) == 4)
+ return OPCODE_RFRD;
+ if (Field_t_Slot_inst_get (insn) == 6)
+ return OPCODE_NEG_D;
+ if (Field_t_Slot_inst_get (insn) == 7)
+ return OPCODE_DIV0_D;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_RECIP0_D;
+ if (Field_t_Slot_inst_get (insn) == 9)
+ return OPCODE_SQRT0_D;
+ if (Field_t_Slot_inst_get (insn) == 10)
+ return OPCODE_RSQRT0_D;
+ if (Field_t_Slot_inst_get (insn) == 11)
+ return OPCODE_NEXP01_D;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MKSADJ_D;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_MKDADJ_D;
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_ADDEXP_D;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_ADDEXPM_D;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_READ_IMPWIRE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_SETB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_CLRB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_WRMSK_EXPSTATE;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 1)
+ return OPCODE_L32R;
+ if (Field_op0_Slot_inst_get (insn) == 2)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_L32I;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_S16I;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_S32I;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFR;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_DPFW;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_DPFRO;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DPFWO;
+ if (Field_t_Slot_inst_get (insn) == 4)
+ return OPCODE_DHWB;
+ if (Field_t_Slot_inst_get (insn) == 5)
+ return OPCODE_DHWBI;
+ if (Field_t_Slot_inst_get (insn) == 6)
+ return OPCODE_DHI;
+ if (Field_t_Slot_inst_get (insn) == 7)
+ return OPCODE_DII;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFL;
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ return OPCODE_DHU;
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ return OPCODE_DIU;
+ if (Field_op1_Slot_inst_get (insn) == 4)
+ return OPCODE_DIWB;
+ if (Field_op1_Slot_inst_get (insn) == 5)
+ return OPCODE_DIWBI;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_DIWBUI_P;
+ }
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_IPF;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ return OPCODE_IPFL;
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ return OPCODE_IHU;
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ return OPCODE_IIU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_IHI;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_III;
+ }
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVI;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_L32AI;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_S32C1I;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_S32RI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 3)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_LSI;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_LDI;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_SSI;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_SDI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LSIP;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_LDIP;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_SSIP;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_SDIP;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 4)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDDEC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDDEC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDDEC;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 5)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALL0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALL4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALL8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALL12;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 6)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_J;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQZ;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTZ;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEZ;
+ }
+ if (Field_n_Slot_inst_get (insn) == 2)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQI;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEI;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEI;
+ }
+ if (Field_n_Slot_inst_get (insn) == 3)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_ENTRY;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BF;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BT;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LOOP;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_LOOPNEZ;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_LOOPGTZ;
+ }
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTUI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEUI;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 7)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BALL;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_BBC;
+ if ((Field_r_Slot_inst_get (insn) == 6 ||
+ Field_r_Slot_inst_get (insn) == 7))
+ return OPCODE_BBCI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_BANY;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_BNE;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_BGE;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_BNALL;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_BBS;
+ if ((Field_r_Slot_inst_get (insn) == 14 ||
+ Field_r_Slot_inst_get (insn) == 15))
+ return OPCODE_BBSI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16b_get (insn) == 12)
+ {
+ if (Field_i_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_i_Slot_inst16b_get (insn) == 1)
+ {
+ if (Field_z_Slot_inst16b_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_z_Slot_inst16b_get (insn) == 1)
+ return OPCODE_BNEZ_N;
+ }
+ }
+ if (Field_op0_Slot_inst16b_get (insn) == 13)
+ {
+ if (Field_r_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOV_N;
+ if (Field_r_Slot_inst16b_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst16b_get (insn) == 0)
+ return OPCODE_RET_N;
+ if (Field_t_Slot_inst16b_get (insn) == 1)
+ return OPCODE_RETW_N;
+ if (Field_t_Slot_inst16b_get (insn) == 2)
+ return OPCODE_BREAK_N;
+ if (Field_t_Slot_inst16b_get (insn) == 3 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ if (Field_t_Slot_inst16b_get (insn) == 6 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ }
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16a_get (insn) == 8)
+ return OPCODE_L32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 9)
+ return OPCODE_S32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 10)
+ return OPCODE_ADD_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 11)
+ return OPCODE_ADDI_N;
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_s8_Slot_inst_get,
+ Field_imms8_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_r_disp_Slot_inst_get,
+ Field_r_3_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_get,
+ Field_rbit2_Slot_inst_get,
+ Field_rhi_Slot_inst_get,
+ Field_t3_Slot_inst_get,
+ Field_tbit2_Slot_inst_get,
+ Field_tlo_Slot_inst_get,
+ Field_w_Slot_inst_get,
+ Field_y_Slot_inst_get,
+ Field_x_Slot_inst_get,
+ Field_t2_Slot_inst_get,
+ Field_s2_Slot_inst_get,
+ Field_r2_Slot_inst_get,
+ Field_t4_Slot_inst_get,
+ Field_s4_Slot_inst_get,
+ Field_r4_Slot_inst_get,
+ Field_t8_Slot_inst_get,
+ Field_r8_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_bitindex_Slot_inst_get,
+ Field_s3to1_Slot_inst_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_s8_Slot_inst_set,
+ Field_imms8_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_r_disp_Slot_inst_set,
+ Field_r_3_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_set,
+ Field_rbit2_Slot_inst_set,
+ Field_rhi_Slot_inst_set,
+ Field_t3_Slot_inst_set,
+ Field_tbit2_Slot_inst_set,
+ Field_tlo_Slot_inst_set,
+ Field_w_Slot_inst_set,
+ Field_y_Slot_inst_set,
+ Field_x_Slot_inst_set,
+ Field_t2_Slot_inst_set,
+ Field_s2_Slot_inst_set,
+ Field_r2_Slot_inst_set,
+ Field_t4_Slot_inst_set,
+ Field_s4_Slot_inst_set,
+ Field_r4_Slot_inst_set,
+ Field_t8_Slot_inst_set,
+ Field_r8_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_bitindex_Slot_inst_set,
+ Field_s3to1_Slot_inst_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc)
+ return 2; /* x16b */
+ return -1;
+}
+
+static int length_table[256] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int l = insn[0];
+ return length_table[l];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 3 /* insn_size */, 0,
+ 3, formats, format_decoder, length_decoder,
+ 3, slots,
+ 72 /* num_fields */,
+ 132, operands,
+ 444, iclasses,
+ 579, opcodes, 0,
+ 8, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 1, interfaces, 0,
+ 1, funcUnits, 0
+};
diff --git a/target/xtensa/core-dsp3400.c b/target/xtensa/core-dsp3400.c
new file mode 100644
index 000000000..4e0bc8a8c
--- /dev/null
+++ b/target/xtensa/core-dsp3400.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2020, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "core-dsp3400/core-isa.h"
+#include "core-dsp3400/core-matmap.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_dsp3400
+#include "core-dsp3400/xtensa-modules.c.inc"
+
+static XtensaConfig dsp3400 __attribute__((unused)) = {
+ .name = "dsp3400",
+ .gdb_regmap = {
+ .reg = {
+#include "core-dsp3400/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 40000,
+ .opcode_translators = (const XtensaOpcodeTranslators *[]){
+ &xtensa_core_opcodes,
+ &xtensa_fpu2000_opcodes,
+ NULL,
+ },
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dsp3400)
diff --git a/target/xtensa/core-dsp3400/core-isa.h b/target/xtensa/core-dsp3400/core-isa.h
new file mode 100644
index 000000000..336b2467c
--- /dev/null
+++ b/target/xtensa/core-dsp3400/core-isa.h
@@ -0,0 +1,452 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 0 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 4 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 1 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 16 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 800002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dsp3400_RC2" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x0002DC22 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC3F3DBFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1082C3B0 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX3.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2300 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 230001 /* major*100+minor */
+#define XCHAL_HW_REL_LX3 1
+#define XCHAL_HW_REL_LX3_0 1
+#define XCHAL_HW_REL_LX3_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2300 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 230001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2300 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 230001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 16
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 2 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 2 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+/* Instruction RAM 0: */
+#define XCHAL_INSTRAM0_VADDR 0x5FFE0000
+#define XCHAL_INSTRAM0_PADDR 0x5FFE0000
+#define XCHAL_INSTRAM0_SIZE 65536
+#define XCHAL_INSTRAM0_ECC_PARITY 0
+
+/* Instruction RAM 1: */
+#define XCHAL_INSTRAM1_VADDR 0x5FFF0000
+#define XCHAL_INSTRAM1_PADDR 0x5FFF0000
+#define XCHAL_INSTRAM1_SIZE 65536
+#define XCHAL_INSTRAM1_ECC_PARITY 0
+
+/* Data RAM 0: */
+#define XCHAL_DATARAM0_VADDR 0x5FFD0000
+#define XCHAL_DATARAM0_PADDR 0x5FFD0000
+#define XCHAL_DATARAM0_SIZE 32768
+#define XCHAL_DATARAM0_ECC_PARITY 0
+
+/* Data RAM 1: */
+#define XCHAL_DATARAM1_VADDR 0x5FFD8000
+#define XCHAL_DATARAM1_PADDR 0x5FFD8000
+#define XCHAL_DATARAM1_SIZE 32768
+#define XCHAL_DATARAM1_ECC_PARITY 0
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 2 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 13 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 4 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 9 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 4 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x00001200
+#define XCHAL_INTLEVEL2_MASK 0x00000980
+#define XCHAL_INTLEVEL3_MASK 0x00000460
+#define XCHAL_INTLEVEL4_MASK 0x00000019
+#define XCHAL_INTLEVEL5_MASK 0x00000006
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x00001200
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x00001B80
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x00001FE0
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x00001FF9
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x00001FFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x00001FFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x00001FFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 4
+#define XCHAL_INT1_LEVEL 5
+#define XCHAL_INT2_LEVEL 5
+#define XCHAL_INT3_LEVEL 4
+#define XCHAL_INT4_LEVEL 4
+#define XCHAL_INT5_LEVEL 3
+#define XCHAL_INT6_LEVEL 3
+#define XCHAL_INT7_LEVEL 2
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 1
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 1
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_WRITE_ERROR
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFFE000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000400
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x000003FE
+#define XCHAL_INTTYPE_MASK_TIMER 0x00000801
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00001000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 0 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_WRITE_ERROR_INTERRUPT 12 /* write-error interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1, 2, 3, 4, 5.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 1 /* (intlevel 5) */
+#define XCHAL_EXTINT1_NUM 2 /* (intlevel 5) */
+#define XCHAL_EXTINT2_NUM 3 /* (intlevel 4) */
+#define XCHAL_EXTINT3_NUM 4 /* (intlevel 4) */
+#define XCHAL_EXTINT4_NUM 5 /* (intlevel 3) */
+#define XCHAL_EXTINT5_NUM 6 /* (intlevel 3) */
+#define XCHAL_EXTINT6_NUM 7 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT8_NUM 9 /* (intlevel 1) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x5FFE0400 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x5FFE0400
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0x5FFE0000
+#define XCHAL_RESET_VECTOR0_PADDR 0x5FFE0000
+#define XCHAL_RESET_VECTOR1_VADDR 0xFFFF1000
+#define XCHAL_RESET_VECTOR1_PADDR 0xFFFF1000
+#define XCHAL_RESET_VECTOR_VADDR 0x5FFE0000
+#define XCHAL_RESET_VECTOR_PADDR 0x5FFE0000
+#define XCHAL_USER_VECOFS 0x0000023C
+#define XCHAL_USER_VECTOR_VADDR 0x5FFE063C
+#define XCHAL_USER_VECTOR_PADDR 0x5FFE063C
+#define XCHAL_KERNEL_VECOFS 0x0000021C
+#define XCHAL_KERNEL_VECTOR_VADDR 0x5FFE061C
+#define XCHAL_KERNEL_VECTOR_PADDR 0x5FFE061C
+#define XCHAL_DOUBLEEXC_VECOFS 0x0000025C
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x5FFE065C
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x5FFE065C
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x5FFE0400
+#define XCHAL_WINDOW_VECTORS_PADDR 0x5FFE0400
+#define XCHAL_INTLEVEL2_VECOFS 0x0000017C
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x5FFE057C
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x5FFE057C
+#define XCHAL_INTLEVEL3_VECOFS 0x0000019C
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x5FFE059C
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x5FFE059C
+#define XCHAL_INTLEVEL4_VECOFS 0x000001BC
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x5FFE05BC
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x5FFE05BC
+#define XCHAL_INTLEVEL5_VECOFS 0x000001DC
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x5FFE05DC
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x5FFE05DC
+#define XCHAL_INTLEVEL6_VECOFS 0x000001FC
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x5FFE05FC
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x5FFE05FC
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 1 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+
+#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
+
diff --git a/target/xtensa/core-dsp3400/core-matmap.h b/target/xtensa/core-dsp3400/core-matmap.h
new file mode 100644
index 000000000..8d1aa8336
--- /dev/null
+++ b/target/xtensa/core-dsp3400/core-matmap.h
@@ -0,0 +1,312 @@
+/*
+ * xtensa/config/core-matmap.h -- Memory access and translation mapping
+ * parameters (CHAL) of the Xtensa processor core configuration.
+ *
+ * If you are using Xtensa Tools, see <xtensa/config/core.h> (which includes
+ * this file) for more details.
+ *
+ * In the Xtensa processor products released to date, all parameters
+ * defined in this file are derivable (at least in theory) from
+ * information contained in the core-isa.h header file.
+ * In particular, the following core configuration parameters are relevant:
+ * XCHAL_HAVE_CACHEATTR
+ * XCHAL_HAVE_MIMIC_CACHEATTR
+ * XCHAL_HAVE_XLT_CACHEATTR
+ * XCHAL_HAVE_PTP_MMU
+ * XCHAL_ITLB_ARF_ENTRIES_LOG2
+ * XCHAL_DTLB_ARF_ENTRIES_LOG2
+ * XCHAL_DCACHE_IS_WRITEBACK
+ * XCHAL_ICACHE_SIZE (presence of I-cache)
+ * XCHAL_DCACHE_SIZE (presence of D-cache)
+ * XCHAL_HW_VERSION_MAJOR
+ * XCHAL_HW_VERSION_MINOR
+ */
+
+/* Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+
+#ifndef XTENSA_CONFIG_CORE_MATMAP_H
+#define XTENSA_CONFIG_CORE_MATMAP_H
+
+
+/*----------------------------------------------------------------------
+ CACHE (MEMORY ACCESS) ATTRIBUTES
+ ----------------------------------------------------------------------*/
+
+
+/* Cache Attribute encodings -- lists of access modes for each cache attribute: */
+#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION
+#define XCHAL_LCA_LIST XTHAL_LAM_CACHED_NOALLOC XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_ISOLATE XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION
+#define XCHAL_SCA_LIST XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_BYPASS XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITEBACK XCHAL_SEP \
+ XTHAL_SAM_WRITEBACK_NOALLOC XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_ISOLATE XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION
+
+
+/*
+ * Specific encoded cache attribute values of general interest.
+ * If a specific cache mode is not available, the closest available
+ * one is returned instead (eg. writethru instead of writeback,
+ * bypass instead of writethru).
+ */
+#define XCHAL_CA_BYPASS 2 /* cache disabled (bypassed) mode */
+#define XCHAL_CA_WRITETHRU 1 /* cache enabled (write-through) mode */
+#define XCHAL_CA_WRITEBACK 4 /* cache enabled (write-back) mode */
+#define XCHAL_CA_WRITEBACK_NOALLOC 4 /* cache enabled (write-back no-allocate) mode */
+#define XCHAL_CA_ILLEGAL 15 /* no access allowed (all cause exceptions) mode */
+#define XCHAL_CA_ISOLATE 14 /* cache isolate (accesses go to cache not memory) mode */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/*
+ * General notes on MMU parameters.
+ *
+ * Terminology:
+ * ASID = address-space ID (acts as an "extension" of virtual addresses)
+ * VPN = virtual page number
+ * PPN = physical page number
+ * CA = encoded cache attribute (access modes)
+ * TLB = translation look-aside buffer (term is stretched somewhat here)
+ * I = instruction (fetch accesses)
+ * D = data (load and store accesses)
+ * way = each TLB (ITLB and DTLB) consists of a number of "ways"
+ * that simultaneously match the virtual address of an access;
+ * a TLB successfully translates a virtual address if exactly
+ * one way matches the vaddr; if none match, it is a miss;
+ * if multiple match, one gets a "multihit" exception;
+ * each way can be independently configured in terms of number of
+ * entries, page sizes, which fields are writable or constant, etc.
+ * set = group of contiguous ways with exactly identical parameters
+ * ARF = auto-refill; hardware services a 1st-level miss by loading a PTE
+ * from the page table and storing it in one of the auto-refill ways;
+ * if this PTE load also misses, a miss exception is posted for s/w.
+ * min-wired = a "min-wired" way can be used to map a single (minimum-sized)
+ * page arbitrarily under program control; it has a single entry,
+ * is non-auto-refill (some other way(s) must be auto-refill),
+ * all its fields (VPN, PPN, ASID, CA) are all writable, and it
+ * supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current
+ * restriction is that this be the only page size it supports).
+ *
+ * TLB way entries are virtually indexed.
+ * TLB ways that support multiple page sizes:
+ * - must have all writable VPN and PPN fields;
+ * - can only use one page size at any given time (eg. setup at startup),
+ * selected by the respective ITLBCFG or DTLBCFG special register,
+ * whose bits n*4+3 .. n*4 index the list of page sizes for way n
+ * (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n);
+ * this list may be sparse for auto-refill ways because auto-refill
+ * ways have independent lists of supported page sizes sharing a
+ * common encoding with PTE entries; the encoding is the index into
+ * this list; unsupported sizes for a given way are zero in the list;
+ * selecting unsupported sizes results in undefined hardware behaviour;
+ * - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition).
+ */
+
+#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */
+#define XCHAL_MMU_ASID_KERNEL 0 /* ASID value indicating kernel (ring 0) address space */
+#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */
+#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
+#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 29 /* max page size in a PTE structure (log2) */
+#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 29 /* min page size in a PTE structure (log2) */
+
+
+/*** Instruction TLB: ***/
+
+#define XCHAL_ITLB_WAY_BITS 0 /* number of bits holding the ways */
+#define XCHAL_ITLB_WAYS 1 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_ITLB_ARF_WAYS 0 /* number of auto-refill ways */
+#define XCHAL_ITLB_SETS 1 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_ITLB_WAY0_SET 0
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_ITLB_ARF_SETS 0 /* number of auto-refill sets */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
+
+
+/* ITLB way set 0 (group of ways 0 thru 0): */
+#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_ITLB_SET0_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET0_ENTRIES_LOG2 3 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET0_ENTRIES 8 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET0_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 29 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 29 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 29 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0x00000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_PPN_RESET 1 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_CA_RESET 1 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant VPN values for each entry of ITLB way set 0 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET0_E0_VPN_CONST 0x00000000
+#define XCHAL_ITLB_SET0_E1_VPN_CONST 0x20000000
+#define XCHAL_ITLB_SET0_E2_VPN_CONST 0x40000000
+#define XCHAL_ITLB_SET0_E3_VPN_CONST 0x60000000
+#define XCHAL_ITLB_SET0_E4_VPN_CONST 0x80000000
+#define XCHAL_ITLB_SET0_E5_VPN_CONST 0xA0000000
+#define XCHAL_ITLB_SET0_E6_VPN_CONST 0xC0000000
+#define XCHAL_ITLB_SET0_E7_VPN_CONST 0xE0000000
+/* Reset PPN values for each entry of ITLB way set 0 (because SET0_PPN_RESET is non-zero): */
+#define XCHAL_ITLB_SET0_E0_PPN_RESET 0x00000000
+#define XCHAL_ITLB_SET0_E1_PPN_RESET 0x20000000
+#define XCHAL_ITLB_SET0_E2_PPN_RESET 0x40000000
+#define XCHAL_ITLB_SET0_E3_PPN_RESET 0x60000000
+#define XCHAL_ITLB_SET0_E4_PPN_RESET 0x80000000
+#define XCHAL_ITLB_SET0_E5_PPN_RESET 0xA0000000
+#define XCHAL_ITLB_SET0_E6_PPN_RESET 0xC0000000
+#define XCHAL_ITLB_SET0_E7_PPN_RESET 0xE0000000
+/* Reset CA values for each entry of ITLB way set 0 (because SET0_CA_RESET is non-zero): */
+#define XCHAL_ITLB_SET0_E0_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E1_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E2_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E3_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E4_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E5_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E6_CA_RESET 0x02
+#define XCHAL_ITLB_SET0_E7_CA_RESET 0x02
+
+
+/*** Data TLB: ***/
+
+#define XCHAL_DTLB_WAY_BITS 0 /* number of bits holding the ways */
+#define XCHAL_DTLB_WAYS 1 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_DTLB_ARF_WAYS 0 /* number of auto-refill ways */
+#define XCHAL_DTLB_SETS 1 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_DTLB_WAY0_SET 0
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_DTLB_ARF_SETS 0 /* number of auto-refill sets */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_DTLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
+
+
+/* DTLB way set 0 (group of ways 0 thru 0): */
+#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_DTLB_SET0_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET0_ENTRIES_LOG2 3 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET0_ENTRIES 8 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET0_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 29 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 29 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 29 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0x00000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_PPN_RESET 1 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_CA_RESET 1 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant VPN values for each entry of DTLB way set 0 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET0_E0_VPN_CONST 0x00000000
+#define XCHAL_DTLB_SET0_E1_VPN_CONST 0x20000000
+#define XCHAL_DTLB_SET0_E2_VPN_CONST 0x40000000
+#define XCHAL_DTLB_SET0_E3_VPN_CONST 0x60000000
+#define XCHAL_DTLB_SET0_E4_VPN_CONST 0x80000000
+#define XCHAL_DTLB_SET0_E5_VPN_CONST 0xA0000000
+#define XCHAL_DTLB_SET0_E6_VPN_CONST 0xC0000000
+#define XCHAL_DTLB_SET0_E7_VPN_CONST 0xE0000000
+/* Reset PPN values for each entry of DTLB way set 0 (because SET0_PPN_RESET is non-zero): */
+#define XCHAL_DTLB_SET0_E0_PPN_RESET 0x00000000
+#define XCHAL_DTLB_SET0_E1_PPN_RESET 0x20000000
+#define XCHAL_DTLB_SET0_E2_PPN_RESET 0x40000000
+#define XCHAL_DTLB_SET0_E3_PPN_RESET 0x60000000
+#define XCHAL_DTLB_SET0_E4_PPN_RESET 0x80000000
+#define XCHAL_DTLB_SET0_E5_PPN_RESET 0xA0000000
+#define XCHAL_DTLB_SET0_E6_PPN_RESET 0xC0000000
+#define XCHAL_DTLB_SET0_E7_PPN_RESET 0xE0000000
+/* Reset CA values for each entry of DTLB way set 0 (because SET0_CA_RESET is non-zero): */
+#define XCHAL_DTLB_SET0_E0_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E1_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E2_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E3_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E4_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E5_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E6_CA_RESET 0x02
+#define XCHAL_DTLB_SET0_E7_CA_RESET 0x02
+
+
+
+
+#endif /*XTENSA_CONFIG_CORE_MATMAP_H*/
+
diff --git a/target/xtensa/core-dsp3400/gdb-config.c.inc b/target/xtensa/core-dsp3400/gdb-config.c.inc
new file mode 100644
index 000000000..f7f5f75c9
--- /dev/null
+++ b/target/xtensa/core-dsp3400/gdb-config.c.inc
@@ -0,0 +1,400 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+ XTREG( 0, 0,32, 4, 4,0x0020,0x0006,-2, 9,0x0100,pc, 0,0,0,0,0,0)
+ XTREG( 1, 4,32, 4, 4,0x0100,0x0006,-2, 1,0x0002,ar0, 0,0,0,0,0,0)
+ XTREG( 2, 8,32, 4, 4,0x0101,0x0006,-2, 1,0x0002,ar1, 0,0,0,0,0,0)
+ XTREG( 3, 12,32, 4, 4,0x0102,0x0006,-2, 1,0x0002,ar2, 0,0,0,0,0,0)
+ XTREG( 4, 16,32, 4, 4,0x0103,0x0006,-2, 1,0x0002,ar3, 0,0,0,0,0,0)
+ XTREG( 5, 20,32, 4, 4,0x0104,0x0006,-2, 1,0x0002,ar4, 0,0,0,0,0,0)
+ XTREG( 6, 24,32, 4, 4,0x0105,0x0006,-2, 1,0x0002,ar5, 0,0,0,0,0,0)
+ XTREG( 7, 28,32, 4, 4,0x0106,0x0006,-2, 1,0x0002,ar6, 0,0,0,0,0,0)
+ XTREG( 8, 32,32, 4, 4,0x0107,0x0006,-2, 1,0x0002,ar7, 0,0,0,0,0,0)
+ XTREG( 9, 36,32, 4, 4,0x0108,0x0006,-2, 1,0x0002,ar8, 0,0,0,0,0,0)
+ XTREG( 10, 40,32, 4, 4,0x0109,0x0006,-2, 1,0x0002,ar9, 0,0,0,0,0,0)
+ XTREG( 11, 44,32, 4, 4,0x010a,0x0006,-2, 1,0x0002,ar10, 0,0,0,0,0,0)
+ XTREG( 12, 48,32, 4, 4,0x010b,0x0006,-2, 1,0x0002,ar11, 0,0,0,0,0,0)
+ XTREG( 13, 52,32, 4, 4,0x010c,0x0006,-2, 1,0x0002,ar12, 0,0,0,0,0,0)
+ XTREG( 14, 56,32, 4, 4,0x010d,0x0006,-2, 1,0x0002,ar13, 0,0,0,0,0,0)
+ XTREG( 15, 60,32, 4, 4,0x010e,0x0006,-2, 1,0x0002,ar14, 0,0,0,0,0,0)
+ XTREG( 16, 64,32, 4, 4,0x010f,0x0006,-2, 1,0x0002,ar15, 0,0,0,0,0,0)
+ XTREG( 17, 68,32, 4, 4,0x0110,0x0006,-2, 1,0x0002,ar16, 0,0,0,0,0,0)
+ XTREG( 18, 72,32, 4, 4,0x0111,0x0006,-2, 1,0x0002,ar17, 0,0,0,0,0,0)
+ XTREG( 19, 76,32, 4, 4,0x0112,0x0006,-2, 1,0x0002,ar18, 0,0,0,0,0,0)
+ XTREG( 20, 80,32, 4, 4,0x0113,0x0006,-2, 1,0x0002,ar19, 0,0,0,0,0,0)
+ XTREG( 21, 84,32, 4, 4,0x0114,0x0006,-2, 1,0x0002,ar20, 0,0,0,0,0,0)
+ XTREG( 22, 88,32, 4, 4,0x0115,0x0006,-2, 1,0x0002,ar21, 0,0,0,0,0,0)
+ XTREG( 23, 92,32, 4, 4,0x0116,0x0006,-2, 1,0x0002,ar22, 0,0,0,0,0,0)
+ XTREG( 24, 96,32, 4, 4,0x0117,0x0006,-2, 1,0x0002,ar23, 0,0,0,0,0,0)
+ XTREG( 25,100,32, 4, 4,0x0118,0x0006,-2, 1,0x0002,ar24, 0,0,0,0,0,0)
+ XTREG( 26,104,32, 4, 4,0x0119,0x0006,-2, 1,0x0002,ar25, 0,0,0,0,0,0)
+ XTREG( 27,108,32, 4, 4,0x011a,0x0006,-2, 1,0x0002,ar26, 0,0,0,0,0,0)
+ XTREG( 28,112,32, 4, 4,0x011b,0x0006,-2, 1,0x0002,ar27, 0,0,0,0,0,0)
+ XTREG( 29,116,32, 4, 4,0x011c,0x0006,-2, 1,0x0002,ar28, 0,0,0,0,0,0)
+ XTREG( 30,120,32, 4, 4,0x011d,0x0006,-2, 1,0x0002,ar29, 0,0,0,0,0,0)
+ XTREG( 31,124,32, 4, 4,0x011e,0x0006,-2, 1,0x0002,ar30, 0,0,0,0,0,0)
+ XTREG( 32,128,32, 4, 4,0x011f,0x0006,-2, 1,0x0002,ar31, 0,0,0,0,0,0)
+ XTREG( 33,132,32, 4, 4,0x0200,0x0006,-2, 2,0x1100,lbeg, 0,0,0,0,0,0)
+ XTREG( 34,136,32, 4, 4,0x0201,0x0006,-2, 2,0x1100,lend, 0,0,0,0,0,0)
+ XTREG( 35,140,32, 4, 4,0x0202,0x0006,-2, 2,0x1100,lcount, 0,0,0,0,0,0)
+ XTREG( 36,144, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0)
+ XTREG( 37,148,32, 4, 4,0x0205,0x0006,-2, 2,0x1100,litbase, 0,0,0,0,0,0)
+ XTREG( 38,152, 3, 4, 4,0x0248,0x0006,-2, 2,0x1002,windowbase, 0,0,0,0,0,0)
+ XTREG( 39,156, 8, 4, 4,0x0249,0x0006,-2, 2,0x1002,windowstart, 0,0,0,0,0,0)
+ XTREG( 40,160,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,sr176, 0,0,0,0,0,0)
+ XTREG( 41,164,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,sr208, 0,0,0,0,0,0)
+ XTREG( 42,168,19, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0)
+ XTREG( 43,172,32, 4, 4,0x03e7,0x0006,-2, 3,0x0110,threadptr, 0,0,0,0,0,0)
+ XTREG( 44,176,16, 4, 4,0x0204,0x0006,-1, 2,0x1100,br, 0,0,0,0,0,0)
+ XTREG( 45,180,32, 4, 4,0x020c,0x0006,-1, 2,0x1100,scompare1, 0,0,0,0,0,0)
+ XTREG( 46,184,32, 4, 4,0x0300,0x000e,-1, 3,0x0210,expstate, 0,0,0,0,0,0)
+ XTREG( 47,188,32, 4, 4,0x0030,0x0006, 0, 4,0x0401,f0,
+ "03:03:44:00","03:03:04:00",0,0,0,0)
+ XTREG( 48,192,32, 4, 4,0x0031,0x0006, 0, 4,0x0401,f1,
+ "03:13:44:00","03:13:04:00",0,0,0,0)
+ XTREG( 49,196,32, 4, 4,0x0032,0x0006, 0, 4,0x0401,f2,
+ "03:23:44:00","03:23:04:00",0,0,0,0)
+ XTREG( 50,200,32, 4, 4,0x0033,0x0006, 0, 4,0x0401,f3,
+ "03:33:44:00","03:33:04:00",0,0,0,0)
+ XTREG( 51,204,32, 4, 4,0x0034,0x0006, 0, 4,0x0401,f4,
+ "03:43:44:00","03:43:04:00",0,0,0,0)
+ XTREG( 52,208,32, 4, 4,0x0035,0x0006, 0, 4,0x0401,f5,
+ "03:53:44:00","03:53:04:00",0,0,0,0)
+ XTREG( 53,212,32, 4, 4,0x0036,0x0006, 0, 4,0x0401,f6,
+ "03:63:44:00","03:63:04:00",0,0,0,0)
+ XTREG( 54,216,32, 4, 4,0x0037,0x0006, 0, 4,0x0401,f7,
+ "03:73:44:00","03:73:04:00",0,0,0,0)
+ XTREG( 55,220,32, 4, 4,0x0038,0x0006, 0, 4,0x0401,f8,
+ "03:83:44:00","03:83:04:00",0,0,0,0)
+ XTREG( 56,224,32, 4, 4,0x0039,0x0006, 0, 4,0x0401,f9,
+ "03:93:44:00","03:93:04:00",0,0,0,0)
+ XTREG( 57,228,32, 4, 4,0x003a,0x0006, 0, 4,0x0401,f10,
+ "03:a3:44:00","03:a3:04:00",0,0,0,0)
+ XTREG( 58,232,32, 4, 4,0x003b,0x0006, 0, 4,0x0401,f11,
+ "03:b3:44:00","03:b3:04:00",0,0,0,0)
+ XTREG( 59,236,32, 4, 4,0x003c,0x0006, 0, 4,0x0401,f12,
+ "03:c3:44:00","03:c3:04:00",0,0,0,0)
+ XTREG( 60,240,32, 4, 4,0x003d,0x0006, 0, 4,0x0401,f13,
+ "03:d3:44:00","03:d3:04:00",0,0,0,0)
+ XTREG( 61,244,32, 4, 4,0x003e,0x0006, 0, 4,0x0401,f14,
+ "03:e3:44:00","03:e3:04:00",0,0,0,0)
+ XTREG( 62,248,32, 4, 4,0x003f,0x0006, 0, 4,0x0401,f15,
+ "03:f3:44:00","03:f3:04:00",0,0,0,0)
+ XTREG( 63,252,32, 4, 4,0x03e8,0x0006, 0, 3,0x0100,fcr, 0,0,0,0,0,0)
+ XTREG( 64,256,32, 4, 4,0x03e9,0x0006, 0, 3,0x0100,fsr, 0,0,0,0,0,0)
+ XTREG( 65,260, 4, 4, 4,0x0301,0x0006, 2, 3,0x0210,sov, 0,0,0,0,0,0)
+ XTREG( 66,264, 1, 4, 4,0x0302,0x0006, 2, 3,0x0210,sat_mode, 0,0,0,0,0,0)
+ XTREG( 67,268, 6, 4, 4,0x0303,0x0006, 2, 3,0x0210,sar0, 0,0,0,0,0,0)
+ XTREG( 68,272, 6, 4, 4,0x0304,0x0006, 2, 3,0x0210,sar1, 0,0,0,0,0,0)
+ XTREG( 69,276, 6, 4, 4,0x0305,0x0006, 2, 3,0x0210,sar2, 0,0,0,0,0,0)
+ XTREG( 70,280, 6, 4, 4,0x0306,0x0006, 2, 3,0x0210,sar3, 0,0,0,0,0,0)
+ XTREG( 71,284, 6, 4, 4,0x0307,0x0006, 2, 3,0x0210,hsar0, 0,0,0,0,0,0)
+ XTREG( 72,288, 6, 4, 4,0x0308,0x0006, 2, 3,0x0210,hsar1, 0,0,0,0,0,0)
+ XTREG( 73,292, 6, 4, 4,0x0309,0x0006, 2, 3,0x0210,hsar2, 0,0,0,0,0,0)
+ XTREG( 74,296, 6, 4, 4,0x030a,0x0006, 2, 3,0x0210,hsar3, 0,0,0,0,0,0)
+ XTREG( 75,300,32, 4, 4,0x030b,0x0006, 2, 3,0x0200,max_reg_0, 0,0,0,0,0,0)
+ XTREG( 76,304,32, 4, 4,0x030c,0x0006, 2, 3,0x0200,max_reg_1, 0,0,0,0,0,0)
+ XTREG( 77,308,32, 4, 4,0x030d,0x0006, 2, 3,0x0200,max_reg_2, 0,0,0,0,0,0)
+ XTREG( 78,312,32, 4, 4,0x030e,0x0006, 2, 3,0x0200,max_reg_3, 0,0,0,0,0,0)
+ XTREG( 79,316,32, 4, 4,0x030f,0x0006, 2, 3,0x0200,arg_max_reg_0,0,0,0,0,0,0)
+ XTREG( 80,320,32, 4, 4,0x0310,0x0006, 2, 3,0x0200,arg_max_reg_1,0,0,0,0,0,0)
+ XTREG( 81,324,32, 4, 4,0x0311,0x0006, 2, 3,0x0200,arg_max_reg_2,0,0,0,0,0,0)
+ XTREG( 82,328,32, 4, 4,0x0312,0x0006, 2, 3,0x0200,arg_max_reg_3,0,0,0,0,0,0)
+ XTREG( 83,332,32, 4, 4,0x0313,0x0006, 2, 3,0x0200,nco_counter_0,0,0,0,0,0,0)
+ XTREG( 84,336,32, 4, 4,0x0314,0x0006, 2, 3,0x0200,nco_counter_1,0,0,0,0,0,0)
+ XTREG( 85,340,32, 4, 4,0x0315,0x0006, 2, 3,0x0200,nco_counter_2,0,0,0,0,0,0)
+ XTREG( 86,344,32, 4, 4,0x0316,0x0006, 2, 3,0x0200,nco_counter_3,0,0,0,0,0,0)
+ XTREG( 87,348, 4, 4, 4,0x0317,0x0006, 2, 3,0x0210,interp_ext_n,0,0,0,0,0,0)
+ XTREG( 88,352, 4, 4, 4,0x0318,0x0006, 2, 3,0x0210,interp_ext_l,0,0,0,0,0,0)
+ XTREG( 89,356,32, 4, 4,0x0319,0x0006, 2, 3,0x0200,llr_buf_0, 0,0,0,0,0,0)
+ XTREG( 90,360,32, 4, 4,0x031a,0x0006, 2, 3,0x0200,llr_buf_1, 0,0,0,0,0,0)
+ XTREG( 91,364,32, 4, 4,0x031b,0x0006, 2, 3,0x0200,llr_buf_2, 0,0,0,0,0,0)
+ XTREG( 92,368,32, 4, 4,0x031c,0x0006, 2, 3,0x0200,llr_buf_3, 0,0,0,0,0,0)
+ XTREG( 93,372,32, 4, 4,0x031d,0x0006, 2, 3,0x0200,llr_buf_4, 0,0,0,0,0,0)
+ XTREG( 94,376,32, 4, 4,0x031e,0x0006, 2, 3,0x0200,llr_buf_5, 0,0,0,0,0,0)
+ XTREG( 95,380,32, 4, 4,0x031f,0x0006, 2, 3,0x0200,llr_buf_6, 0,0,0,0,0,0)
+ XTREG( 96,384,32, 4, 4,0x0320,0x0006, 2, 3,0x0200,llr_buf_7, 0,0,0,0,0,0)
+ XTREG( 97,388,32, 4, 4,0x0321,0x0006, 2, 3,0x0200,llr_buf_8, 0,0,0,0,0,0)
+ XTREG( 98,392,32, 4, 4,0x0322,0x0006, 2, 3,0x0200,llr_buf_9, 0,0,0,0,0,0)
+ XTREG( 99,396,32, 4, 4,0x0323,0x0006, 2, 3,0x0200,llr_buf_10, 0,0,0,0,0,0)
+ XTREG(100,400,32, 4, 4,0x0324,0x0006, 2, 3,0x0200,llr_buf_11, 0,0,0,0,0,0)
+ XTREG(101,404,32, 4, 4,0x0325,0x0006, 2, 3,0x0200,llr_buf_12, 0,0,0,0,0,0)
+ XTREG(102,408,32, 4, 4,0x0326,0x0006, 2, 3,0x0200,llr_buf_13, 0,0,0,0,0,0)
+ XTREG(103,412,32, 4, 4,0x0327,0x0006, 2, 3,0x0200,llr_buf_14, 0,0,0,0,0,0)
+ XTREG(104,416,32, 4, 4,0x0328,0x0006, 2, 3,0x0200,llr_buf_15, 0,0,0,0,0,0)
+ XTREG(105,420,32, 4, 4,0x0329,0x0006, 2, 3,0x0200,llr_buf_16, 0,0,0,0,0,0)
+ XTREG(106,424,32, 4, 4,0x032a,0x0006, 2, 3,0x0200,llr_buf_17, 0,0,0,0,0,0)
+ XTREG(107,428,32, 4, 4,0x032b,0x0006, 2, 3,0x0200,llr_buf_18, 0,0,0,0,0,0)
+ XTREG(108,432,32, 4, 4,0x032c,0x0006, 2, 3,0x0200,llr_buf_19, 0,0,0,0,0,0)
+ XTREG(109,436,32, 4, 4,0x032d,0x0006, 2, 3,0x0200,llr_buf_20, 0,0,0,0,0,0)
+ XTREG(110,440,32, 4, 4,0x032e,0x0006, 2, 3,0x0200,llr_buf_21, 0,0,0,0,0,0)
+ XTREG(111,444,32, 4, 4,0x032f,0x0006, 2, 3,0x0200,llr_buf_22, 0,0,0,0,0,0)
+ XTREG(112,448,32, 4, 4,0x0330,0x0006, 2, 3,0x0200,llr_buf_23, 0,0,0,0,0,0)
+ XTREG(113,452,32, 4, 4,0x0331,0x0006, 2, 3,0x0200,smod_buf_0, 0,0,0,0,0,0)
+ XTREG(114,456,32, 4, 4,0x0332,0x0006, 2, 3,0x0200,smod_buf_1, 0,0,0,0,0,0)
+ XTREG(115,460,32, 4, 4,0x0333,0x0006, 2, 3,0x0200,smod_buf_2, 0,0,0,0,0,0)
+ XTREG(116,464,32, 4, 4,0x0334,0x0006, 2, 3,0x0200,smod_buf_3, 0,0,0,0,0,0)
+ XTREG(117,468,32, 4, 4,0x0335,0x0006, 2, 3,0x0200,smod_buf_4, 0,0,0,0,0,0)
+ XTREG(118,472,32, 4, 4,0x0336,0x0006, 2, 3,0x0200,smod_buf_5, 0,0,0,0,0,0)
+ XTREG(119,476,32, 4, 4,0x0337,0x0006, 2, 3,0x0200,smod_buf_6, 0,0,0,0,0,0)
+ XTREG(120,480,32, 4, 4,0x0338,0x0006, 2, 3,0x0200,smod_buf_7, 0,0,0,0,0,0)
+ XTREG(121,484, 8, 4, 4,0x0339,0x0006, 2, 3,0x0210,weight_reg, 0,0,0,0,0,0)
+ XTREG(122,488, 5, 4, 4,0x033a,0x0006, 2, 3,0x0210,scale_reg, 0,0,0,0,0,0)
+ XTREG(123,492, 6, 4, 4,0x033b,0x0006, 2, 3,0x0210,llr_pos, 0,0,0,0,0,0)
+ XTREG(124,496, 7, 4, 4,0x033c,0x0006, 2, 3,0x0210,smod_pos, 0,0,0,0,0,0)
+ XTREG(125,500,32, 4, 4,0x033d,0x0006, 2, 3,0x0210,perm_reg, 0,0,0,0,0,0)
+ XTREG(126,504,32, 4, 4,0x033e,0x0006, 2, 3,0x0200,smod_offset_table_0,0,0,0,0,0,0)
+ XTREG(127,508,32, 4, 4,0x033f,0x0006, 2, 3,0x0200,smod_offset_table_1,0,0,0,0,0,0)
+ XTREG(128,512,32, 4, 4,0x0340,0x0006, 2, 3,0x0200,smod_offset_table_2,0,0,0,0,0,0)
+ XTREG(129,516,32, 4, 4,0x0341,0x0006, 2, 3,0x0200,smod_offset_table_3,0,0,0,0,0,0)
+ XTREG(130,520, 4, 4, 4,0x0342,0x0006, 2, 3,0x0210,phasor_n, 0,0,0,0,0,0)
+ XTREG(131,524,16, 4, 4,0x0343,0x0006, 2, 3,0x0210,phasor_offset,0,0,0,0,0,0)
+ XTREG(132,528,320,64,16,0x1008,0x0006, 2, 4,0x0201,acu0,
+ "03:00:84:f8:03:10:84:8d:03:20:84:9d:03:30:84:ac","03:43:20:08:03:43:28:03:03:43:20:33:03:43:28:25",0,0,0,0)
+ XTREG(133,592,320,64,16,0x1009,0x0006, 2, 4,0x0201,acu1,
+ "03:00:94:f8:03:10:94:8d:03:20:94:9d:03:30:94:ac","03:43:21:08:03:43:29:03:03:43:21:33:03:43:29:25",0,0,0,0)
+ XTREG(134,656,320,64,16,0x100a,0x0006, 2, 4,0x0201,acu2,
+ "03:00:a4:f8:03:10:a4:8d:03:20:a4:9d:03:30:a4:ac","03:43:22:08:03:43:2a:03:03:43:22:33:03:43:2a:25",0,0,0,0)
+ XTREG(135,720,320,64,16,0x100b,0x0006, 2, 4,0x0201,acu3,
+ "03:00:b4:f8:03:10:b4:8d:03:20:b4:9d:03:30:b4:ac","03:43:23:08:03:43:2b:03:03:43:23:33:03:43:2b:25",0,0,0,0)
+ XTREG(136,784,320,64,16,0x100c,0x0006, 2, 4,0x0201,acu4,
+ "03:00:c4:f8:03:10:c4:8d:03:20:c4:9d:03:30:c4:ac","03:43:24:08:03:43:2c:03:03:43:24:33:03:43:2c:25",0,0,0,0)
+ XTREG(137,848,320,64,16,0x100d,0x0006, 2, 4,0x0201,acu5,
+ "03:00:d4:f8:03:10:d4:8d:03:20:d4:9d:03:30:d4:ac","03:43:25:08:03:43:2d:03:03:43:25:33:03:43:2d:25",0,0,0,0)
+ XTREG(138,912,320,64,16,0x100e,0x0006, 2, 4,0x0201,acu6,
+ "03:00:e4:f8:03:10:e4:8d:03:20:e4:9d:03:30:e4:ac","03:43:26:08:03:43:2e:03:03:43:26:33:03:43:2e:25",0,0,0,0)
+ XTREG(139,976,320,64,16,0x100f,0x0006, 2, 4,0x0201,acu7,
+ "03:00:f4:f8:03:10:f4:8d:03:20:f4:9d:03:30:f4:ac","03:43:27:08:03:43:2f:03:03:43:27:33:03:43:2f:25",0,0,0,0)
+ XTREG(140,1040,128,16,16,0x1010,0x0006, 2, 4,0x0201,cm0,
+ "03:00:04:5d","03:40:03:07",0,0,0,0)
+ XTREG(141,1056,128,16,16,0x1011,0x0006, 2, 4,0x0201,cm1,
+ "03:00:14:5d","03:40:13:07",0,0,0,0)
+ XTREG(142,1072,128,16,16,0x1012,0x0006, 2, 4,0x0201,cm2,
+ "03:00:24:5d","03:40:23:07",0,0,0,0)
+ XTREG(143,1088,128,16,16,0x1013,0x0006, 2, 4,0x0201,cm3,
+ "03:00:34:5d","03:40:33:07",0,0,0,0)
+ XTREG(144,1104,128,16,16,0x1014,0x0006, 2, 4,0x0201,cm4,
+ "03:00:44:5d","03:40:43:07",0,0,0,0)
+ XTREG(145,1120,128,16,16,0x1015,0x0006, 2, 4,0x0201,cm5,
+ "03:00:54:5d","03:40:53:07",0,0,0,0)
+ XTREG(146,1136,128,16,16,0x1016,0x0006, 2, 4,0x0201,cm6,
+ "03:00:64:5d","03:40:63:07",0,0,0,0)
+ XTREG(147,1152,128,16,16,0x1017,0x0006, 2, 4,0x0201,cm7,
+ "03:00:74:5d","03:40:73:07",0,0,0,0)
+ XTREG(148,1168,128,16,16,0x1018,0x0006, 2, 4,0x0201,cm8,
+ "03:00:84:5d","03:40:83:07",0,0,0,0)
+ XTREG(149,1184,128,16,16,0x1019,0x0006, 2, 4,0x0201,cm9,
+ "03:00:94:5d","03:40:93:07",0,0,0,0)
+ XTREG(150,1200,128,16,16,0x101a,0x0006, 2, 4,0x0201,cm10,
+ "03:00:a4:5d","03:40:a3:07",0,0,0,0)
+ XTREG(151,1216,128,16,16,0x101b,0x0006, 2, 4,0x0201,cm11,
+ "03:00:b4:5d","03:40:b3:07",0,0,0,0)
+ XTREG(152,1232,128,16,16,0x101c,0x0006, 2, 4,0x0201,cm12,
+ "03:00:c4:5d","03:40:c3:07",0,0,0,0)
+ XTREG(153,1248,128,16,16,0x101d,0x0006, 2, 4,0x0201,cm13,
+ "03:00:d4:5d","03:40:d3:07",0,0,0,0)
+ XTREG(154,1264,128,16,16,0x101e,0x0006, 2, 4,0x0201,cm14,
+ "03:00:e4:5d","03:40:e3:07",0,0,0,0)
+ XTREG(155,1280,128,16,16,0x101f,0x0006, 2, 4,0x0201,cm15,
+ "03:00:f4:5d","03:40:f3:07",0,0,0,0)
+ XTREG(156,1296,256,32,16,0x1020,0x0006, 2, 4,0x0201,pq0,
+ "03:00:04:7c:03:10:04:cc","03:40:02:07:03:40:0c:07",0,0,0,0)
+ XTREG(157,1328,256,32,16,0x1021,0x0006, 2, 4,0x0201,pq1,
+ "03:00:14:7c:03:10:14:cc","03:40:12:07:03:40:1c:07",0,0,0,0)
+ XTREG(158,1360,256,32,16,0x1022,0x0006, 2, 4,0x0201,pq2,
+ "03:00:24:7c:03:10:24:cc","03:40:22:07:03:40:2c:07",0,0,0,0)
+ XTREG(159,1392,256,32,16,0x1023,0x0006, 2, 4,0x0201,pq3,
+ "03:00:34:7c:03:10:34:cc","03:40:32:07:03:40:3c:07",0,0,0,0)
+ XTREG(160,1424,256,32,16,0x1024,0x0006, 2, 4,0x0201,pq4,
+ "03:00:44:7c:03:10:44:cc","03:40:42:07:03:40:4c:07",0,0,0,0)
+ XTREG(161,1456,256,32,16,0x1025,0x0006, 2, 4,0x0201,pq5,
+ "03:00:54:7c:03:10:54:cc","03:40:52:07:03:40:5c:07",0,0,0,0)
+ XTREG(162,1488,256,32,16,0x1026,0x0006, 2, 4,0x0201,pq6,
+ "03:00:64:7c:03:10:64:cc","03:40:62:07:03:40:6c:07",0,0,0,0)
+ XTREG(163,1520,256,32,16,0x1027,0x0006, 2, 4,0x0201,pq7,
+ "03:00:74:7c:03:10:74:cc","03:40:72:07:03:40:7c:07",0,0,0,0)
+ XTREG(164,1552,256,32,16,0x1028,0x0006, 2, 4,0x0201,pq8,
+ "03:00:84:7c:03:10:84:cc","03:40:82:07:03:40:8c:07",0,0,0,0)
+ XTREG(165,1584,256,32,16,0x1029,0x0006, 2, 4,0x0201,pq9,
+ "03:00:94:7c:03:10:94:cc","03:40:92:07:03:40:9c:07",0,0,0,0)
+ XTREG(166,1616,256,32,16,0x102a,0x0006, 2, 4,0x0201,pq10,
+ "03:00:a4:7c:03:10:a4:cc","03:40:a2:07:03:40:ac:07",0,0,0,0)
+ XTREG(167,1648,256,32,16,0x102b,0x0006, 2, 4,0x0201,pq11,
+ "03:00:b4:7c:03:10:b4:cc","03:40:b2:07:03:40:bc:07",0,0,0,0)
+ XTREG(168,1680,256,32,16,0x102c,0x0006, 2, 4,0x0201,pq12,
+ "03:00:c4:7c:03:10:c4:cc","03:40:c2:07:03:40:cc:07",0,0,0,0)
+ XTREG(169,1712,256,32,16,0x102d,0x0006, 2, 4,0x0201,pq13,
+ "03:00:d4:7c:03:10:d4:cc","03:40:d2:07:03:40:dc:07",0,0,0,0)
+ XTREG(170,1744,256,32,16,0x102e,0x0006, 2, 4,0x0201,pq14,
+ "03:00:e4:7c:03:10:e4:cc","03:40:e2:07:03:40:ec:07",0,0,0,0)
+ XTREG(171,1776,256,32,16,0x102f,0x0006, 2, 4,0x0201,pq15,
+ "03:00:f4:7c:03:10:f4:cc","03:40:f2:07:03:40:fc:07",0,0,0,0)
+ XTREG(172,1808,32, 4, 4,0x0259,0x000d,-2, 2,0x1000,mmid, 0,0,0,0,0,0)
+ XTREG(173,1812, 2, 4, 4,0x0260,0x0007,-2, 2,0x1000,ibreakenable,0,0,0,0,0,0)
+ XTREG(174,1816, 6, 4, 4,0x0263,0x0007,-2, 2,0x1000,atomctl, 0,0,0,0,0,0)
+ XTREG(175,1820,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0)
+ XTREG(176,1824,32, 4, 4,0x0280,0x0007,-2, 2,0x1000,ibreaka0, 0,0,0,0,0,0)
+ XTREG(177,1828,32, 4, 4,0x0281,0x0007,-2, 2,0x1000,ibreaka1, 0,0,0,0,0,0)
+ XTREG(178,1832,32, 4, 4,0x0290,0x0007,-2, 2,0x1000,dbreaka0, 0,0,0,0,0,0)
+ XTREG(179,1836,32, 4, 4,0x0291,0x0007,-2, 2,0x1000,dbreaka1, 0,0,0,0,0,0)
+ XTREG(180,1840,32, 4, 4,0x02a0,0x0007,-2, 2,0x1000,dbreakc0, 0,0,0,0,0,0)
+ XTREG(181,1844,32, 4, 4,0x02a1,0x0007,-2, 2,0x1000,dbreakc1, 0,0,0,0,0,0)
+ XTREG(182,1848,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0)
+ XTREG(183,1852,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0)
+ XTREG(184,1856,32, 4, 4,0x02b3,0x0007,-2, 2,0x1000,epc3, 0,0,0,0,0,0)
+ XTREG(185,1860,32, 4, 4,0x02b4,0x0007,-2, 2,0x1000,epc4, 0,0,0,0,0,0)
+ XTREG(186,1864,32, 4, 4,0x02b5,0x0007,-2, 2,0x1000,epc5, 0,0,0,0,0,0)
+ XTREG(187,1868,32, 4, 4,0x02b6,0x0007,-2, 2,0x1000,epc6, 0,0,0,0,0,0)
+ XTREG(188,1872,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0)
+ XTREG(189,1876,19, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0)
+ XTREG(190,1880,19, 4, 4,0x02c3,0x0007,-2, 2,0x1000,eps3, 0,0,0,0,0,0)
+ XTREG(191,1884,19, 4, 4,0x02c4,0x0007,-2, 2,0x1000,eps4, 0,0,0,0,0,0)
+ XTREG(192,1888,19, 4, 4,0x02c5,0x0007,-2, 2,0x1000,eps5, 0,0,0,0,0,0)
+ XTREG(193,1892,19, 4, 4,0x02c6,0x0007,-2, 2,0x1000,eps6, 0,0,0,0,0,0)
+ XTREG(194,1896,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0)
+ XTREG(195,1900,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0)
+ XTREG(196,1904,32, 4, 4,0x02d3,0x0007,-2, 2,0x1000,excsave3, 0,0,0,0,0,0)
+ XTREG(197,1908,32, 4, 4,0x02d4,0x0007,-2, 2,0x1000,excsave4, 0,0,0,0,0,0)
+ XTREG(198,1912,32, 4, 4,0x02d5,0x0007,-2, 2,0x1000,excsave5, 0,0,0,0,0,0)
+ XTREG(199,1916,32, 4, 4,0x02d6,0x0007,-2, 2,0x1000,excsave6, 0,0,0,0,0,0)
+ XTREG(200,1920, 4, 4, 4,0x02e0,0x0007,-2, 2,0x1000,cpenable, 0,0,0,0,0,0)
+ XTREG(201,1924,13, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0)
+ XTREG(202,1928,13, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0)
+ XTREG(203,1932,13, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0)
+ XTREG(204,1936,13, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0)
+ XTREG(205,1940,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0)
+ XTREG(206,1944, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0)
+ XTREG(207,1948,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0)
+ XTREG(208,1952,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0)
+ XTREG(209,1956,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0)
+ XTREG(210,1960,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0)
+ XTREG(211,1964, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0)
+ XTREG(212,1968,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0)
+ XTREG(213,1972,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0)
+ XTREG(214,1976,32, 4, 4,0x02f1,0x000f,-2, 2,0x1000,ccompare1, 0,0,0,0,0,0)
+ XTREG(215,1980,32, 4, 4,0x0000,0x0006,-2, 8,0x0100,a0, 0,0,0,0,0,0)
+ XTREG(216,1984,32, 4, 4,0x0001,0x0006,-2, 8,0x0100,a1, 0,0,0,0,0,0)
+ XTREG(217,1988,32, 4, 4,0x0002,0x0006,-2, 8,0x0100,a2, 0,0,0,0,0,0)
+ XTREG(218,1992,32, 4, 4,0x0003,0x0006,-2, 8,0x0100,a3, 0,0,0,0,0,0)
+ XTREG(219,1996,32, 4, 4,0x0004,0x0006,-2, 8,0x0100,a4, 0,0,0,0,0,0)
+ XTREG(220,2000,32, 4, 4,0x0005,0x0006,-2, 8,0x0100,a5, 0,0,0,0,0,0)
+ XTREG(221,2004,32, 4, 4,0x0006,0x0006,-2, 8,0x0100,a6, 0,0,0,0,0,0)
+ XTREG(222,2008,32, 4, 4,0x0007,0x0006,-2, 8,0x0100,a7, 0,0,0,0,0,0)
+ XTREG(223,2012,32, 4, 4,0x0008,0x0006,-2, 8,0x0100,a8, 0,0,0,0,0,0)
+ XTREG(224,2016,32, 4, 4,0x0009,0x0006,-2, 8,0x0100,a9, 0,0,0,0,0,0)
+ XTREG(225,2020,32, 4, 4,0x000a,0x0006,-2, 8,0x0100,a10, 0,0,0,0,0,0)
+ XTREG(226,2024,32, 4, 4,0x000b,0x0006,-2, 8,0x0100,a11, 0,0,0,0,0,0)
+ XTREG(227,2028,32, 4, 4,0x000c,0x0006,-2, 8,0x0100,a12, 0,0,0,0,0,0)
+ XTREG(228,2032,32, 4, 4,0x000d,0x0006,-2, 8,0x0100,a13, 0,0,0,0,0,0)
+ XTREG(229,2036,32, 4, 4,0x000e,0x0006,-2, 8,0x0100,a14, 0,0,0,0,0,0)
+ XTREG(230,2040,32, 4, 4,0x000f,0x0006,-2, 8,0x0100,a15, 0,0,0,0,0,0)
+ XTREG(231,2044, 1, 1, 1,0x0010,0x0006,-2, 6,0x1010,b0,
+ 0,0,&xtensa_mask0,0,0,0)
+ XTREG(232,2045, 1, 1, 1,0x0011,0x0006,-2, 6,0x1010,b1,
+ 0,0,&xtensa_mask1,0,0,0)
+ XTREG(233,2046, 1, 1, 1,0x0012,0x0006,-2, 6,0x1010,b2,
+ 0,0,&xtensa_mask2,0,0,0)
+ XTREG(234,2047, 1, 1, 1,0x0013,0x0006,-2, 6,0x1010,b3,
+ 0,0,&xtensa_mask3,0,0,0)
+ XTREG(235,2048, 1, 1, 1,0x0014,0x0006,-2, 6,0x1010,b4,
+ 0,0,&xtensa_mask4,0,0,0)
+ XTREG(236,2049, 1, 1, 1,0x0015,0x0006,-2, 6,0x1010,b5,
+ 0,0,&xtensa_mask5,0,0,0)
+ XTREG(237,2050, 1, 1, 1,0x0016,0x0006,-2, 6,0x1010,b6,
+ 0,0,&xtensa_mask6,0,0,0)
+ XTREG(238,2051, 1, 1, 1,0x0017,0x0006,-2, 6,0x1010,b7,
+ 0,0,&xtensa_mask7,0,0,0)
+ XTREG(239,2052, 1, 1, 1,0x0018,0x0006,-2, 6,0x1010,b8,
+ 0,0,&xtensa_mask8,0,0,0)
+ XTREG(240,2053, 1, 1, 1,0x0019,0x0006,-2, 6,0x1010,b9,
+ 0,0,&xtensa_mask9,0,0,0)
+ XTREG(241,2054, 1, 1, 1,0x001a,0x0006,-2, 6,0x1010,b10,
+ 0,0,&xtensa_mask10,0,0,0)
+ XTREG(242,2055, 1, 1, 1,0x001b,0x0006,-2, 6,0x1010,b11,
+ 0,0,&xtensa_mask11,0,0,0)
+ XTREG(243,2056, 1, 1, 1,0x001c,0x0006,-2, 6,0x1010,b12,
+ 0,0,&xtensa_mask12,0,0,0)
+ XTREG(244,2057, 1, 1, 1,0x001d,0x0006,-2, 6,0x1010,b13,
+ 0,0,&xtensa_mask13,0,0,0)
+ XTREG(245,2058, 1, 1, 1,0x001e,0x0006,-2, 6,0x1010,b14,
+ 0,0,&xtensa_mask14,0,0,0)
+ XTREG(246,2059, 1, 1, 1,0x001f,0x0006,-2, 6,0x1010,b15,
+ 0,0,&xtensa_mask15,0,0,0)
+ XTREG(247,2060, 4, 4, 4,0x2007,0x0006,-2, 6,0x1010,psintlevel,
+ 0,0,&xtensa_mask16,0,0,0)
+ XTREG(248,2064, 1, 4, 4,0x2008,0x0006,-2, 6,0x1010,psum,
+ 0,0,&xtensa_mask17,0,0,0)
+ XTREG(249,2068, 1, 4, 4,0x2009,0x0006,-2, 6,0x1010,pswoe,
+ 0,0,&xtensa_mask18,0,0,0)
+ XTREG(250,2072, 1, 4, 4,0x200a,0x0006,-2, 6,0x1010,psexcm,
+ 0,0,&xtensa_mask19,0,0,0)
+ XTREG(251,2076, 2, 4, 4,0x200b,0x0006,-2, 6,0x1010,pscallinc,
+ 0,0,&xtensa_mask20,0,0,0)
+ XTREG(252,2080, 4, 4, 4,0x200c,0x0006,-2, 6,0x1010,psowb,
+ 0,0,&xtensa_mask21,0,0,0)
+ XTREG(253,2084,20, 4, 4,0x200d,0x0006,-2, 6,0x1010,litbaddr,
+ 0,0,&xtensa_mask22,0,0,0)
+ XTREG(254,2088, 1, 4, 4,0x200e,0x0006,-2, 6,0x1010,litben,
+ 0,0,&xtensa_mask23,0,0,0)
+ XTREG(255,2092, 4, 4, 4,0x2013,0x0006,-2, 6,0x1010,dbnum,
+ 0,0,&xtensa_mask24,0,0,0)
+ XTREG(256,2096, 2, 4, 4,0x2014,0x0006, 0, 5,0x1010,roundmode,
+ 0,0,&xtensa_mask25,0,0,0)
+ XTREG(257,2100, 1, 4, 4,0x2015,0x0006, 0, 5,0x1010,invalidenable,
+ 0,0,&xtensa_mask26,0,0,0)
+ XTREG(258,2104, 1, 4, 4,0x2016,0x0006, 0, 5,0x1010,divzeroenable,
+ 0,0,&xtensa_mask27,0,0,0)
+ XTREG(259,2108, 1, 4, 4,0x2017,0x0006, 0, 5,0x1010,overflowenable,
+ 0,0,&xtensa_mask28,0,0,0)
+ XTREG(260,2112, 1, 4, 4,0x2018,0x0006, 0, 5,0x1010,underflowenable,
+ 0,0,&xtensa_mask29,0,0,0)
+ XTREG(261,2116, 1, 4, 4,0x2019,0x0006, 0, 5,0x1010,inexactenable,
+ 0,0,&xtensa_mask30,0,0,0)
+ XTREG(262,2120, 1, 4, 4,0x201a,0x0006, 0, 5,0x1010,invalidflag,
+ 0,0,&xtensa_mask31,0,0,0)
+ XTREG(263,2124, 1, 4, 4,0x201b,0x0006, 0, 5,0x1010,divzeroflag,
+ 0,0,&xtensa_mask32,0,0,0)
+ XTREG(264,2128, 1, 4, 4,0x201c,0x0006, 0, 5,0x1010,overflowflag,
+ 0,0,&xtensa_mask33,0,0,0)
+ XTREG(265,2132, 1, 4, 4,0x201d,0x0006, 0, 5,0x1010,underflowflag,
+ 0,0,&xtensa_mask34,0,0,0)
+ XTREG(266,2136, 1, 4, 4,0x201e,0x0006, 0, 5,0x1010,inexactflag,
+ 0,0,&xtensa_mask35,0,0,0)
+ XTREG(267,2140,20, 4, 4,0x201f,0x0006, 0, 5,0x1010,fpreserved20,
+ 0,0,&xtensa_mask36,0,0,0)
+ XTREG(268,2144,20, 4, 4,0x2020,0x0006, 0, 5,0x1010,fpreserved20a,
+ 0,0,&xtensa_mask37,0,0,0)
+ XTREG(269,2148, 5, 4, 4,0x2021,0x0006, 0, 5,0x1010,fpreserved5,
+ 0,0,&xtensa_mask38,0,0,0)
+ XTREG(270,2152, 7, 4, 4,0x2022,0x0006, 0, 5,0x1010,fpreserved7,
+ 0,0,&xtensa_mask39,0,0,0)
+ XTREG(271,2156,128,16, 4,0x2023,0x0006, 2, 5,0x0210,max_reg,
+ 0,0,&xtensa_mask40,0,0,0)
+ XTREG(272,2172,128,16, 4,0x2024,0x0006, 2, 5,0x0210,arg_max_reg,
+ 0,0,&xtensa_mask41,0,0,0)
+ XTREG(273,2188,128,16, 4,0x2025,0x0006, 2, 5,0x0210,nco_counter,
+ 0,0,&xtensa_mask42,0,0,0)
+ XTREG(274,2204,768,96, 4,0x2026,0x0006, 2, 5,0x0210,llr_buf,
+ 0,0,&xtensa_mask43,0,0,0)
+ XTREG(275,2300,256,32, 4,0x2027,0x0006, 2, 5,0x0210,smod_buf,
+ 0,0,&xtensa_mask44,0,0,0)
+ XTREG_END
diff --git a/target/xtensa/core-dsp3400/xtensa-modules.c.inc b/target/xtensa/core-dsp3400/xtensa-modules.c.inc
new file mode 100644
index 000000000..28ea3d75f
--- /dev/null
+++ b/target/xtensa/core-dsp3400/xtensa-modules.c.inc
@@ -0,0 +1,171906 @@
+/* Xtensa configuration-specific ISA information.
+
+ Copyright (c) 2003-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "BR", 4, 0 },
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "176", 176, 0 },
+ { "208", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "LITBASE", 5, 0 },
+ { "PS", 230, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "CPENABLE", 224, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "THREADPTR", 231, 1 },
+ { "FCR", 232, 1 },
+ { "FSR", 233, 1 },
+ { "EXPSTATE", 0, 1 },
+ { "SOV", 1, 1 },
+ { "SAT_MODE", 2, 1 },
+ { "SAR0", 3, 1 },
+ { "SAR1", 4, 1 },
+ { "SAR2", 5, 1 },
+ { "SAR3", 6, 1 },
+ { "HSAR0", 7, 1 },
+ { "HSAR1", 8, 1 },
+ { "HSAR2", 9, 1 },
+ { "HSAR3", 10, 1 },
+ { "MAX_REG_0", 11, 1 },
+ { "MAX_REG_1", 12, 1 },
+ { "MAX_REG_2", 13, 1 },
+ { "MAX_REG_3", 14, 1 },
+ { "ARG_MAX_REG_0", 15, 1 },
+ { "ARG_MAX_REG_1", 16, 1 },
+ { "ARG_MAX_REG_2", 17, 1 },
+ { "ARG_MAX_REG_3", 18, 1 },
+ { "NCO_COUNTER_0", 19, 1 },
+ { "NCO_COUNTER_1", 20, 1 },
+ { "NCO_COUNTER_2", 21, 1 },
+ { "NCO_COUNTER_3", 22, 1 },
+ { "INTERP_EXT_N", 23, 1 },
+ { "INTERP_EXT_L", 24, 1 },
+ { "LLR_BUF_0", 25, 1 },
+ { "LLR_BUF_1", 26, 1 },
+ { "LLR_BUF_2", 27, 1 },
+ { "LLR_BUF_3", 28, 1 },
+ { "LLR_BUF_4", 29, 1 },
+ { "LLR_BUF_5", 30, 1 },
+ { "LLR_BUF_6", 31, 1 },
+ { "LLR_BUF_7", 32, 1 },
+ { "LLR_BUF_8", 33, 1 },
+ { "LLR_BUF_9", 34, 1 },
+ { "LLR_BUF_10", 35, 1 },
+ { "LLR_BUF_11", 36, 1 },
+ { "LLR_BUF_12", 37, 1 },
+ { "LLR_BUF_13", 38, 1 },
+ { "LLR_BUF_14", 39, 1 },
+ { "LLR_BUF_15", 40, 1 },
+ { "LLR_BUF_16", 41, 1 },
+ { "LLR_BUF_17", 42, 1 },
+ { "LLR_BUF_18", 43, 1 },
+ { "LLR_BUF_19", 44, 1 },
+ { "LLR_BUF_20", 45, 1 },
+ { "LLR_BUF_21", 46, 1 },
+ { "LLR_BUF_22", 47, 1 },
+ { "LLR_BUF_23", 48, 1 },
+ { "SMOD_BUF_0", 49, 1 },
+ { "SMOD_BUF_1", 50, 1 },
+ { "SMOD_BUF_2", 51, 1 },
+ { "SMOD_BUF_3", 52, 1 },
+ { "SMOD_BUF_4", 53, 1 },
+ { "SMOD_BUF_5", 54, 1 },
+ { "SMOD_BUF_6", 55, 1 },
+ { "SMOD_BUF_7", 56, 1 },
+ { "WEIGHT_REG", 57, 1 },
+ { "SCALE_REG", 58, 1 },
+ { "LLR_POS", 59, 1 },
+ { "SMOD_POS", 60, 1 },
+ { "PERM_REG", 61, 1 },
+ { "SMOD_OFFSET_TABLE_0", 62, 1 },
+ { "SMOD_OFFSET_TABLE_1", 63, 1 },
+ { "SMOD_OFFSET_TABLE_2", 64, 1 },
+ { "SMOD_OFFSET_TABLE_3", 65, 1 },
+ { "PHASOR_N", 66, 1 },
+ { "PHASOR_OFFSET", 67, 1 }
+};
+
+#define NUM_SYSREGS 125
+#define MAX_SPECIAL_REG 241
+#define MAX_USER_REG 233
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 13, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EPS2", 13, 0 },
+ { "EPS3", 13, 0 },
+ { "EPS4", 13, 0 },
+ { "EPS5", 13, 0 },
+ { "EPS6", 13, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "THREADPTR", 32, 0 },
+ { "LITBADDR", 20, 0 },
+ { "LITBEN", 1, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 13, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CPENABLE", 4, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "RoundMode", 2, 0 },
+ { "InvalidEnable", 1, 0 },
+ { "DivZeroEnable", 1, 0 },
+ { "OverflowEnable", 1, 0 },
+ { "UnderflowEnable", 1, 0 },
+ { "InexactEnable", 1, 0 },
+ { "InvalidFlag", 1, 0 },
+ { "DivZeroFlag", 1, 0 },
+ { "OverflowFlag", 1, 0 },
+ { "UnderflowFlag", 1, 0 },
+ { "InexactFlag", 1, 0 },
+ { "FPreserved20", 20, 0 },
+ { "FPreserved20a", 20, 0 },
+ { "FPreserved5", 5, 0 },
+ { "FPreserved7", 7, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED },
+ { "SOV", 4, XTENSA_STATE_IS_SHARED_OR },
+ { "SAT_MODE", 1, 0 },
+ { "SAR0", 6, 0 },
+ { "SAR1", 6, 0 },
+ { "SAR2", 6, 0 },
+ { "SAR3", 6, 0 },
+ { "HSAR0", 6, 0 },
+ { "HSAR1", 6, 0 },
+ { "HSAR2", 6, 0 },
+ { "HSAR3", 6, 0 },
+ { "MAX_REG", 128, 0 },
+ { "ARG_MAX_REG", 128, 0 },
+ { "NCO_COUNTER", 128, 0 },
+ { "INTERP_EXT_N", 4, 0 },
+ { "INTERP_EXT_L", 4, 0 },
+ { "LLR_BUF", 768, 0 },
+ { "SMOD_BUF", 256, 0 },
+ { "WEIGHT_REG", 8, 0 },
+ { "SCALE_REG", 5, 0 },
+ { "LLR_POS", 6, 0 },
+ { "SMOD_POS", 7, 0 },
+ { "PERM_REG", 32, 0 },
+ { "SMOD_OFFSET_TABLE", 128, 0 },
+ { "PHASOR_N", 4, 0 },
+ { "PHASOR_OFFSET", 16, 0 }
+};
+
+#define NUM_STATES 100
+
+enum xtensa_state_id {
+ STATE_LCOUNT,
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EPC3,
+ STATE_EPC4,
+ STATE_EPC5,
+ STATE_EPC6,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EXCSAVE3,
+ STATE_EXCSAVE4,
+ STATE_EXCSAVE5,
+ STATE_EXCSAVE6,
+ STATE_EPS2,
+ STATE_EPS3,
+ STATE_EPS4,
+ STATE_EPS5,
+ STATE_EPS6,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_LBEG,
+ STATE_LEND,
+ STATE_SAR,
+ STATE_THREADPTR,
+ STATE_LITBADDR,
+ STATE_LITBEN,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_DBREAKA0,
+ STATE_DBREAKC0,
+ STATE_DBREAKA1,
+ STATE_DBREAKC1,
+ STATE_IBREAKA0,
+ STATE_IBREAKA1,
+ STATE_IBREAKENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_CPENABLE,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_RoundMode,
+ STATE_InvalidEnable,
+ STATE_DivZeroEnable,
+ STATE_OverflowEnable,
+ STATE_UnderflowEnable,
+ STATE_InexactEnable,
+ STATE_InvalidFlag,
+ STATE_DivZeroFlag,
+ STATE_OverflowFlag,
+ STATE_UnderflowFlag,
+ STATE_InexactFlag,
+ STATE_FPreserved20,
+ STATE_FPreserved20a,
+ STATE_FPreserved5,
+ STATE_FPreserved7,
+ STATE_EXPSTATE,
+ STATE_SOV,
+ STATE_SAT_MODE,
+ STATE_SAR0,
+ STATE_SAR1,
+ STATE_SAR2,
+ STATE_SAR3,
+ STATE_HSAR0,
+ STATE_HSAR1,
+ STATE_HSAR2,
+ STATE_HSAR3,
+ STATE_MAX_REG,
+ STATE_ARG_MAX_REG,
+ STATE_NCO_COUNTER,
+ STATE_INTERP_EXT_N,
+ STATE_INTERP_EXT_L,
+ STATE_LLR_BUF,
+ STATE_SMOD_BUF,
+ STATE_WEIGHT_REG,
+ STATE_SCALE_REG,
+ STATE_LLR_POS,
+ STATE_SMOD_POS,
+ STATE_PERM_REG,
+ STATE_SMOD_OFFSET_TABLE,
+ STATE_PHASOR_N,
+ STATE_PHASOR_OFFSET
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2098inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2098inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2019_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2019_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2100inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2100inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2102inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2102inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2186inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2186inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2185inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2185inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2149inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2149inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3627inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3627inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2187inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2187inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2101inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2101inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2103inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2103inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2189inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2189inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2188inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2188inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2104inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2104inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2190inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2190inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2094inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2094inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2105inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2105inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2191inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2191inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2192inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2192inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2194inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2194inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2197inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2197inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2160inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2160inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2173inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2173inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2112inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2112inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2199inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2199inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2200inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2200inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2114inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2114inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2113inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2113inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2201inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2201inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2115inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2115inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2215inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2215inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3630inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3630inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2203inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2203inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2254_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2254_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2116inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2116inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2117inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2117inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2119inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2119inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2089inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2089inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_r2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3631inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3631inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2085inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 11) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2085inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2088inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2088inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3633inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3633inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2082inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2082inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2083inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2083inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2084inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2084inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2086inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2086inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3634_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3634_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2156inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2156inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2037_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2037_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2021_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2021_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2035_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2035_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2225inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2225inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2226inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2226inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2228inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2228inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2230inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2230inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2222inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2222inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2221inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2221inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2238inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2238inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2239inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2239inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2241inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2241inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2223inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2223inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2232inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2232inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2234inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2234inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2237inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2237inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2240inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2240inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2229inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2229inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2224inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2224inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2227inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2227inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2231inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2231inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2247inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2247inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2091inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2091inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2153inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2153inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3635inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3635inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2154inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2154inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3636inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3636inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2155inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2155inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3637inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3637inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2134inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2134inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 19) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x1e00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2096inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2096inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2244inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2244inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2245inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2245inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2246inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2246inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3638inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3638inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2235inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2235inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2157inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2157inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2253inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2253inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3639inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3639inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2255inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2255inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3640inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3640inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2171inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2171inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2172inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2172inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2174inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2174inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2158inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2158inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2205inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2205inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2159inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2159inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2161inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2161inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2168inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2168inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2136inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2136inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2090inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2090inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2184inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2184inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3642inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3642inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2252inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2252inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3643inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3643inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2092inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2092inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2216inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2216inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3644inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3644inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2217inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2217inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3645inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3645inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2208inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2208inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2209inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2209inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2210inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2210inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2212inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2212inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2213inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2213inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3647inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3647inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2214inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2214inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3648inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3648inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2120inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2120inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2122inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2122inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2123inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2123inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2125inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2125inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2129inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 5) | ((insn[0] << 23) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2129inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f0) | (tie_t << 4);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2124inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2124inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2126inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2126inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2127inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2127inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2128inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2128inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2131inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 5) | ((insn[0] << 23) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2131inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f0) | (tie_t << 4);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2138inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2138inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2146inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2146inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3649inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 19) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3649inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2147inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2147inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3650inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 19) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3650inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f00) | (tie_t << 8);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2139inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2139inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2140inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2140inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2142inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2142inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2248inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2248inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3651inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3651inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2250inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2250inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3653inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3653inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2257inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2257inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3654inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3654inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2249inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2249inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3655inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3655inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2107inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2107inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2118inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2118inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2108inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2108inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2109inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2109inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2111inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2111inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2110inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2110inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2145inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2145inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2141inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2141inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2143inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 16) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2143inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff00) | (tie_t << 8);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2144inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2144inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2204inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2204inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3656inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3656inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2195inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2195inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2196inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2196inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2198inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2198inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2169inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2169inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2220inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2220inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2106inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2106inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2151inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2151inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3657inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3657inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2251inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2251inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3658inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3658inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2206inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2206inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2202inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2202inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2095inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2095inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2132inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2132inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3659inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3659inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2099inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2099inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2137inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2137inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2133inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2133inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3660inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3660inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2175inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2175inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2177inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2177inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2242inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2242inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3661inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3661inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2162inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2162inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2164inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2164inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2163inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2163inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2218inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2218inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2219inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2219inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2207inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2207inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2211inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2211inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2165inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2165inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2166inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2166inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2178inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2178inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2180inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2180inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2179inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2179inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2181inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2181inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2167inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2167inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2193inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2193inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3662inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3662inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2236inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2236inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2243inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2243inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2182inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2182inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2183inst_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 20) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2183inst_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0) | (tie_t << 4);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_sae_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s3_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s3_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2260gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2260gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2258gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2258gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2282gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2282gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2281gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2281gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2266gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 13) >> 18);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2266gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 17) >> 18;
+ insn[0] = (insn[0] & ~0x7ffe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2302_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2302_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2312gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2312gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2386_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2386_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2283gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2283gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2286gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2286gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2287gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2287gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2289gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2289gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2293gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2293gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2288gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2288gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2290gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2290gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2359gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 11) | ((insn[0] << 19) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2359gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0x1ffc) | (tie_t << 2);
+ tie_t = (val << 20) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2384_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2384_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2361gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 11) | ((insn[0] << 19) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2361gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0x1ffc) | (tie_t << 2);
+ tie_t = (val << 20) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2362gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 10) | ((insn[0] << 19) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2362gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x1ff8) | (tie_t << 3);
+ tie_t = (val << 21) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3663gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3663gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2364gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2364gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3664gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3664gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2366gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2366gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3665gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3665gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2308gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2308gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2259gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2259gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2262gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 13) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2262gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2284gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2284gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2275gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2275gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2354gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2354gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2333gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2333gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2310gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2310gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3667gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3667gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2357gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2357gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2376gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2376gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2343gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2343gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2342gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2342gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2344gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2344gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2345gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2345gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2346gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2346gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2373gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2373gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2374gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2374gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2375gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2375gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2335gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2335gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2339gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2339gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2334gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2334gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2336gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2336gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2337gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2337gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2338gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2338gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2340gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2340gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2341gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2341gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2369gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2369gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3668gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3668gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2280gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2280gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2309gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2309gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2261gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2261gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2321gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2321gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2322gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2322gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2355gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2355gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2324gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2324gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2372gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2372gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3669gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3669gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2263gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 17) | ((insn[0] << 13) >> 15);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2263gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 15) >> 15;
+ insn[0] = (insn[0] & ~0x7fffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2264gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 13) >> 18);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2264gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 17) >> 18;
+ insn[0] = (insn[0] & ~0x7ffe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2368gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2368gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3670gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3670gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2291gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2291gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2292gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2292gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2294gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2294gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2295gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2295gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2297gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2297gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2296gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2296gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2301gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2301gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2272_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2272_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2277gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2277gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2279gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2279gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2278gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2278gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2267gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2267gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2268gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2268gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2269gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2269gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2271gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2271gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 21) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2305_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2305_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2270gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2270gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2273gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2273gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 21) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2298gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2298gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2299gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2299gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2300gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2300gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2303gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2303gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3671gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3671gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2379gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2379gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3673gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3673gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2274gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 13) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2274gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2306gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2306gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2304gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2304gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3674gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3674gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2353gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2353gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2371gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2371gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3675gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3675gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2314gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2314gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2316gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2316gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2317gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2317gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2319gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2319gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2378gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2378gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3676gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3676gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2323gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2323gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2331gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2331gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2347gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2347gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2383gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2383gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3678gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 19) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3678gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2318gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2318gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2332gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2332gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2348gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2348gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2349gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2349gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2351gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2351gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2370gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2370gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2320gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2320gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2350gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2350gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2352gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2352gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2325gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2325gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2327gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2327gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2326gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2326gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2328gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2328gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2356gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2356gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2329gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2329gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2381gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2381gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3666_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3666_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2330gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2330gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2385gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2385gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2387gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2387gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2388gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2388gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3679gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3679gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2358gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2358gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2389gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2389gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3680gp_slot2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3680gp_slot2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2399gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2399gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_op0_s4_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s4_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3681gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3681gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 23) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2395gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2395gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2394gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2394gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2397gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2397gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2400gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2400gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3683gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3683gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2402gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2402gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2403gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2403gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3684gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3684gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2405gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2405gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3686gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3686gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2398gp_slot1_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2398gp_slot1_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_sae_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s5_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s5_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2447gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2447gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3688gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3688gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2449gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2449gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3689gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3689gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2418gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2418gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2462_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2462_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2464gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2464gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2454gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2454gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2436gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2436gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 21) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3690gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3690gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2438gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 13) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2438gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2437gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2437gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2526gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2526gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2460gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2460gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2459gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2459gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2445_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2445_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2427gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2427gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 21) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2420gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2420gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2424gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2424gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2425gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2425gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2480gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2480gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2479gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2479gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2419gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2419gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2481gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2481gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2483gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2483gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2487gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2487gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2482gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2482gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2426gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2426gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2484gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2484gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2422gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2422gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2429gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2429gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2485gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2485gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2486gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2486gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2488gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2488gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2531gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2531gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3691gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 23) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3691gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x1e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2493gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2493gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2506gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2506gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3692gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3692gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2494gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2494gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2407gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2407gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2413gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2413gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2415gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2415gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2417gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2417gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2441gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2441gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3693gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3693gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2409gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2409gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2410gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2410gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2416gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2416gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2411gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2411gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2412gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2412gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2453gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2453gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2451gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2451gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2452gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2452gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2516gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2516gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2512gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2512gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2514gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2514gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2515gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2515gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2443gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 13) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2443gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2444gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2444gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2067_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2067_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2439gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2439gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2423gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2423gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2508gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2508gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2058_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2058_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2509gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2509gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2510gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2510gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3696gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3696gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2496gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2496gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s8_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2527gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2527gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2036_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2036_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2528gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2528gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3697gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3697gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2523gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2523gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3698gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3698gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2455gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2455gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2456gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2456gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2458gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2458gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2457gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2457gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2440gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2440gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2524gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2524gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3699gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3699gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2503gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2503gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2507gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2507gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3700gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3700gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2498gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2498gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2500gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2500gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2501gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2501gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2502gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2502gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2504gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2504gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2505gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2505gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2433gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2433gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2430gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2430gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2431gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2431gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2432gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2432gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2434gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2434gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2435gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2435gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2517gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2517gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2518gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2518gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2519gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2519gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2520gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2520gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2489gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2489gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2491gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2491gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2490gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2490gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2492gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2492gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2529gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2529gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3702gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3702gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2468gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2468gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2470gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2470gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2448_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2448_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3703gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3703gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2521gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2521gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2530_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2530_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2461gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2461gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2463gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2463gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2497gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2497gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2499gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2499gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2465gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2465gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2467gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2467gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2471gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2471gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2473gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2473gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2477gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2477gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 21) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3705gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3705gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2472gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2472gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2495gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2495gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3706gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3706gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2466gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2466gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2474gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2474gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2475gp_slot0_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2475gp_slot0_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2595dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 12) >> 25);
+ tie_t = (tie_t << 9) | ((insn[0] << 21) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2595dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 25;
+ insn[0] = (insn[0] & ~0xfe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_op0_s6_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s6_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3708dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3708dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2579dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2579dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2580dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2580dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2582dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2582dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2586dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2586dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2581dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2581dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2583dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2583dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2599dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 10) | ((insn[0] << 20) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2599dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc) | (tie_t << 2);
+ tie_t = (val << 20) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3709dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3709dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2614_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2614_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2604dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2604dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3710dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3710dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2606dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2606dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3711dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3711dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2601dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 10) | ((insn[0] << 20) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2601dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc) | (tie_t << 2);
+ tie_t = (val << 20) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2602dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2602dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 21) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3713dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3713dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2645dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2645dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3714dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3714dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2658dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2658dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3716dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3716dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3717dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3717dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2636dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 12) >> 18);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2636dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 18) >> 18;
+ insn[0] = (insn[0] & ~0xfffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2577dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2577dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2635dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2635dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3719dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3719dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2571dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2571dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2547dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2547dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2646dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2646dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3721dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3721dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2574dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2574dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2655dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2655dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2557dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2557dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2558dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2558dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2560dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2560dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2559dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2559dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2561dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2561dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2562dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2562dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2563dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2563dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2647dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2647dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2549dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2549dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2550dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2550dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2552dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2552dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2556dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2556dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2551dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2551dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2553dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2553dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2554dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2554dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2555dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2555dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2573dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2573dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2628dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2628dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3722_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3722_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2642dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2642dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2605_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2605_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2539dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2539dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2578_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2578_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2541dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2541dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2572dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2572dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2542dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2542dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3723dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 18) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3723dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x3ff0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2591dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 12) >> 14);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2591dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2596dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2596dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 19) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3724dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3724dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2592dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 12) >> 25);
+ tie_t = (tie_t << 9) | ((insn[0] << 21) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2592dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 25;
+ insn[0] = (insn[0] & ~0xfe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2598dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2598dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2584dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2584dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2585dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2585dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2587dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2587dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2588dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2588dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2590dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2590dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2589dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2589dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2608dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2608dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2609dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2609dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2610dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2610dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2626dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2626dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3715_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3715_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2613dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2613dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2616dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2616dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2621dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2621dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2615dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2615dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2617dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2617dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2618dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2618dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2637dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2637dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3725dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3725dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2641dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 12) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2641dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0xffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3726dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3726dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2619dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2619dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2620dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2620dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2622dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2622dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2623dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2623dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3727dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3727dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2654dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 5) | ((insn[0] << 21) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2654dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3728dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3728dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2611dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2611dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2612_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2612_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2624dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2624dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 21) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3729dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3729dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2625_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2625_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3731dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3731dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2643dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2643dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2640dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2640dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2569dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2569dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2632dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2632dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2532dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2532dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2644dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2644dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3732dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3732dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2533dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2533dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2534dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2534dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2536dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2536dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2652dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 5) | ((insn[0] << 21) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2652dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3718_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3718_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2540dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2540dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2548dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2548dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2564dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2564dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2657dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2657dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2535dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2535dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2537dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2537dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2565dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2565dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2566dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2566dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2568dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2568dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2630dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2630dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2538dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2538dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2633dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2633dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3733dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3733dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2567dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2567dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2544dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2544dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2543dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2543dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2545dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2545dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2546dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2546dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2575dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2575dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2648dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2648dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2649dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2649dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2651dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2651dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2656dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2656dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3712_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3712_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2576dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2576dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2650dot_slot2_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2650dot_slot2_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s7_Slot_dot_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s7_Slot_dot_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3734dot_slot1_Slot_dot_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 15) >> 17);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3734dot_slot1_Slot_dot_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0x1fffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2068_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2068_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s8_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s8_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2668dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2668dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2666dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2666dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2674dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2674dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2688dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2688dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3735dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3735dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2705dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2705dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3737dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3737dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2677dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2677dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2678dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2678dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2679dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2679dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2690dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2690dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2680dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2680dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_t_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2697dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2697dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3738dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3738dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2667dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2667dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2704dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 17) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2704dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x7800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3739dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 21) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3739dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2689dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2689dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2669dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2669dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2671dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2671dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2675dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2675dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2672dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2672dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2673dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2673dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2676dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2676dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2682dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2682dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2686dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2686dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3736_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3736_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2681dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2681dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2683dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2683dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2684dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2684dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2685dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2685dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2699dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2699dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3740dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3740dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2692dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2692dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2693dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2693dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2695dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2695dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3741dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3741dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3742dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3742dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2700dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2700dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2702dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2702dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2701dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2701dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2703dot_slot0_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2703dot_slot0_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_sae_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s9_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s9_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2707pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2707pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2739pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2739pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3744pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3744pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2717pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2717pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2713pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 13) >> 18);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2713pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 17) >> 18;
+ insn[0] = (insn[0] & ~0x7ffe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3745pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3745pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+}
+
+static unsigned
+Field_t_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2718pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2718pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2721pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2721pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2722pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2722pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2724pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2724pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2728pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2728pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2736pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2736pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2723pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2723pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2793pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 11) | ((insn[0] << 19) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2793pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0x1ffc) | (tie_t << 2);
+ tie_t = (val << 20) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2795pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 11) | ((insn[0] << 19) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2795pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0x1ffc) | (tie_t << 2);
+ tie_t = (val << 20) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2796pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 10) | ((insn[0] << 19) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2796pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x1ff8) | (tie_t << 3);
+ tie_t = (val << 21) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3746pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3746pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2798pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2798pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3747pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3747pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2801pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2801pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3749pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3749pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2743pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2743pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3750pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3750pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2706pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 13) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2706pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2709pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 13) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2709pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2719pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2719pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2715pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2715pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2788pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2788pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2747pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2747pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2791pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2791pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2775pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2775pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2777pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2777pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2776pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2776pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2778pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2778pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2779pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2779pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2780pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2780pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2810pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2810pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2811pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2811pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2767pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2767pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2769pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2769pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2773pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2773pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2768pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2768pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2770pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2770pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2771pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2771pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2772pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2772pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2774pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2774pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2805pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2805pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3751pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3751pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2741pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2741pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2746pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2746pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3752pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3752pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2755pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2755pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2756pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2756pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2789pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2789pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2758pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2758pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2809pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2809pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3753pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3753pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2710pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 17) | ((insn[0] << 13) >> 15);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2710pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 15) >> 15;
+ insn[0] = (insn[0] & ~0x7fffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2711pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 13) >> 18);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2711pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 17) >> 18;
+ insn[0] = (insn[0] & ~0x7ffe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_s_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2803pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2803pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3754pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3754pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2725pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2725pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2726pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2726pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2727pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2727pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2729pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2729pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2730pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2730pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2732pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2732pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2731pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2731pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2733pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2733pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2734pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2734pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2735pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2735pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2807pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2807pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3756pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3756pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2742pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2742pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2738pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2738pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2708pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2708pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2714pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 13) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2714pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x7fe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2787pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2787pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2808pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2808pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3757pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3757pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2748pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2748pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2750pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2750pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2751pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2751pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2753pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2753pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2816pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2816pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_imm7_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2757pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2757pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2765pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2765pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2781pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2781pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2814pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2814pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3758pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3758pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2752pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2752pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2766pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2766pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2782pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2782pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2783pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2783pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2785pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2785pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2806pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2806pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2025_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2025_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2754pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2754pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2784pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2784pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2786pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2786pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2759pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2759pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2761pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2761pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2760pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2760pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2762pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2762pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2790pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2790pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2763pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2763pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2812pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2812pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3748_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3748_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2764pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2764pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2818pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2818pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2737_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2737_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2820pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2820pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2821pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2821pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3759pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3759pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2792pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2792pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2823pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2823pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3760pq_slot2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3760pq_slot2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2826pq_slot1_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2826pq_slot1_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_s10_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s10_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3761pq_slot1_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 7) | ((insn[0] << 23) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3761pq_slot1_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc) | (tie_t << 2);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2825pq_slot1_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2825pq_slot1_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_sae_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s11_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s11_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2867pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2867pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3763pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3763pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2869pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2869pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3764pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3764pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2838pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2838pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2882_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2882_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2884pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2884pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2874pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2874pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2856pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2856pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 21) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3765pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3765pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2858pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 13) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2858pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2857pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2857pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2946pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2946pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2880pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2880pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2879pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2879pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2865_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2865_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2847pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2847pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 21) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2840pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2840pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2844pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2844pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2845pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2845pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2900pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2900pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2899pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2899pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2839pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2839pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2901pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2901pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2903pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2903pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2907pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2907pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2902pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2902pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2846pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2846pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2904pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2904pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2842pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2842pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2849pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2849pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2905pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2905pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2906pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2906pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2908pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2908pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2950pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2950pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3766pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 23) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3766pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x1e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2913pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2913pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2926pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2926pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3767pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3767pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2914pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2914pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2827pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2827pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2833pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2833pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2835pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2835pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2837pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2837pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2861pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2861pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3768pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3768pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2829pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2829pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2830pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2830pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2836pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2836pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2831pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2831pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2832pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2832pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2873pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2873pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2871pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2871pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2872pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2872pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2936pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2936pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2932pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2932pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2934pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2934pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2935pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2935pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2863pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 13) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2863pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2864pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2864pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2069_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2069_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2859pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2859pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2843pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2843pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2928pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2928pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2059_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2059_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2929pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2929pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2930pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2930pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3769pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3769pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2916pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2916pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s8_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2947pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2947pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2036_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2036_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2948pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2948pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3770pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3770pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2942pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2942pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2875pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2875pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2876pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2876pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2878pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2878pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2877pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2877pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2860pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2860pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2941pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2941pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3771pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3771pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2923pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2923pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2927pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2927pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3773pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3773pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2918pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2918pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2920pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2920pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2921pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2921pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2922pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2922pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2924pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2924pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2925pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2925pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2853pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2853pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2850pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2850pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2851pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2851pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2852pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2852pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2854pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2854pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2855pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2855pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2943pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2943pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3775pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3775pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3776pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3776pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3777pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3777pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2945pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2945pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2909pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2909pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2911pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2911pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2910pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2910pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2912pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2912pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2949pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2949pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3778pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3778pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2888pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2888pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2890pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2890pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2937pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2937pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2939pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2939pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3772_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3772_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2881pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2881pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2883pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2883pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2917pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2917pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2919pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2919pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2885pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2885pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2887pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2887pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2891pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2891pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2893pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2893pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2897pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2897pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 21) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3779pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3779pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2892pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2892pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2915pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2915pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3780pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3780pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2886pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2886pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2894pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2894pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2895pq_slot0_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2895pq_slot0_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2953acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2953acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_op0_s12_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s12_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3782acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3782acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2956acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2956acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2967acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2967acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2966acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2966acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3783acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3783acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2957acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2957acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2958acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2958acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2960acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2960acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3784acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3784acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2964acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2964acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3785acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3785acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2959acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2959acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2963acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2963acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2954acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2954acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3786acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3786acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2955acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2955acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3788acc2_slot2_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3788acc2_slot2_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2969acc2_slot1_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2969acc2_slot1_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_s13_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s13_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3790acc2_slot1_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 8) >> 18);
+ tie_t = (tie_t << 7) | ((insn[0] << 23) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3790acc2_slot1_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc) | (tie_t << 2);
+ tie_t = (val << 11) >> 18;
+ insn[0] = (insn[0] & ~0xfffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2968acc2_slot1_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2968acc2_slot1_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3793acc2_slot1_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 11) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3793acc2_slot1_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0x1e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2075_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2075_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2031_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 11) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2031_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x1e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_op0_s14_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s14_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2973acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2973acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3795acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3795acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2980acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 18) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2980acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x3e00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3796acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3796acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2976acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 18) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2976acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2989acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2989acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3798acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3798acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2990acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2990acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3799acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3799acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2977acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 18) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2977acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x3e00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3797_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3797_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2981acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2981acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3800acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3800acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2975acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 18) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2975acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2982acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2982acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3801acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3801acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2984acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2984acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2985acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2985acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3802acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3802acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2987acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2987acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3803acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3803acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2039_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2039_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2974acc2_slot0_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 18) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2974acc2_slot0_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3043smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 15) | ((insn[0] << 15) >> 17);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3043smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0x1fffc) | (tie_t << 2);
+ tie_t = (val << 16) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_op0_s15_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s15_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3805smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3805smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3061smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3061smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3806smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3806smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3063smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3063smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3807smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3807smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3049smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3049smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3050smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3050smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3052smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3052smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3056smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3056smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+ tie_t = (val << 19) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3809smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3809smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3038smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 16) >> 18);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3038smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 18) >> 18;
+ insn[0] = (insn[0] & ~0xfffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3091smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3091smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3102smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3102smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3810smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3810smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3116smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3116smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3812smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3812smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3813smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3813smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3092smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 12) >> 18);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3092smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 18) >> 18;
+ insn[0] = (insn[0] & ~0xfffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3047smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3047smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3090smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3090smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3814smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3814smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3030smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3030smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3006smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3006smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3104smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3104smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3816smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3816smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3033smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3033smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3111smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3111smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3016smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3016smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3017smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3017smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3019smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3019smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3018smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3018smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3020smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3020smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3021smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3021smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3022smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3022smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3105smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3105smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3008smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3008smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3009smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3009smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3011smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3011smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3015smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3015smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3010smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3010smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3012smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3012smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3013smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3013smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3014smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3014smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3070_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3070_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3032smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3032smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3084smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3084smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3817_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3817_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3098smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3098smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3062_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3062_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2998smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2998smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3048_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3048_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3000smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3000smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3031smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3031smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3001smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3001smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3818smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 18) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3818smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x3ff0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3039smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 12) >> 14);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3039smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3044smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 9) | ((insn[0] << 21) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3044smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3819smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3819smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3040smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 15) | ((insn[0] << 15) >> 17);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3040smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0x1fffc) | (tie_t << 2);
+ tie_t = (val << 16) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3046smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3046smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3051smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3051smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3053smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3053smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3054smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3054smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3055smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3055smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3058smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3058smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+ tie_t = (val << 19) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3059smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3059smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 21) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+ tie_t = (val << 20) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3821smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3821smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3065smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3065smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3066smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3066smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3067smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3067smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3080smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3080smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3069smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3069smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3071smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3071smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3072smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3072smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3074smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3074smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3073smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3073smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3075smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3075smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 19) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3093smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3093smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3822smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3822smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3097smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 12) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3097smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0xffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3823smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3823smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3076smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3076smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3077smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3077smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3078smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3078smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3079smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3079smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3036smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 9) | ((insn[0] << 21) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3036smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc) | (tie_t << 2);
+ tie_t = (val << 21) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3068smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3068smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3081smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3081smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3082smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3082smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3824smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3824smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3100smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3100smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3825smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3825smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3106smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3106smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3099smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3099smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3096smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3096smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3028smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3028smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3087smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3087smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2991smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2991smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3101smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3101smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3826smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3826smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2992smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2992smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2993smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2993smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2995smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2995smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3113smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3113smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3827smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3827smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2999smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2999smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3007smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3007smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3023smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3023smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3115smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3115smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2994smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2994smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2996smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2996smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3024smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3024smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3025smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3025smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3027smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3027smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3085smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3085smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2997smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2997smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3088smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3088smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3828smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3828smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3026smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3026smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3003smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3003smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3002smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3002smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3004smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3004smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3005smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3005smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3034smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3034smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3107smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3107smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3109smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3109smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3114smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3114smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3808_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3808_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3108smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3108smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3035smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3035smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3110smod_slot2_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3110smod_slot2_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3118smod_slot1_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3118smod_slot1_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_s16_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s16_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3829smod_slot1_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 16) >> 26);
+ tie_t = (tie_t << 7) | ((insn[0] << 23) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3829smod_slot1_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0xfc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3117smod_slot1_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3117smod_slot1_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2080_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2080_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sae_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s17_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s17_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3120smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 13) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3120smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3122smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3122smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3121smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3121smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3131smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3131smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3126smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3126smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3148smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 13) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3148smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0x7f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 20) >> 26);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3164smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 13) >> 21);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3164smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 20) >> 21;
+ insn[0] = (insn[0] & ~0x7ff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3832smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3832smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3160smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 13) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3160smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3161smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 13) >> 21);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3161smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 20) >> 21;
+ insn[0] = (insn[0] & ~0x7ff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3159smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3159smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3165smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3165smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3135smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3135smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3133smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3133smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3119smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3119smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3155smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3155smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3833smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3833smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3149smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3149smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3137smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3137smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3138smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3138smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3184smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3184smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3836smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3836smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3182smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3182smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sas_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3153smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3153smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3186smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3186smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3837smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 23) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3837smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3168smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3168smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3171smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3171smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3175smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3175smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3140smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3140smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3170smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3170smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3152smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3152smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3144smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3144smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3172smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3172smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3173smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3173smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3174smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3174smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3188smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3188smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3838smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3838smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3180smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3180smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3166smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3166smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 20) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3181smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3181smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 20) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s8_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3136smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3136smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3123smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3123smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3134smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3134smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3125smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3125smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3127smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3127smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3128smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3128smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3130smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3130smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3129smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3129smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3132smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3132smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3139smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3139smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3141smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3141smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3142smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3142smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3143smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3143smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3145smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3145smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3146smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 13) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3146smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x7f800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3150_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3150_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3156smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3156smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3157smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3157smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3158smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3158smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3176smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3176smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3177smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3177smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3179smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3179smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3178smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3178smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 19) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3841smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3841smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3842smod_slot0_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3842smod_slot0_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3208llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 14) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3208llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 18) >> 27;
+ insn[0] = (insn[0] & ~0x3e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_op0_s18_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s18_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3843llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3843llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3193llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3193llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3194llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3194llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3196llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3196llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3200llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3200llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3213llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3213llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3845llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3845llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3195llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3195llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3206_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 20) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3206_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3225_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3225_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3205_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 14) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3205_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3847llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3847llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3212_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3212_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3848llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3848llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3216llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3216llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3849llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3849llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3215llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3215llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3850llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3850llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3234llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3234llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3232llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3232llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3237llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 14) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3237llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0x3f800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2074_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2074_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3192llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3192llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3230_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3230_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3191llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3191llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3243llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 14) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3243llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_t_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3218llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3218llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3851llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3851llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3244llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3244llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3853llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3853llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3242llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3242llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3855llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3855llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3231llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3231llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3856llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 18) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3856llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e00) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3203llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 14) >> 16);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3203llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0x3fffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3204llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 14) >> 27);
+ tie_t = (tie_t << 10) | ((insn[0] << 20) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3204llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc) | (tie_t << 2);
+ tie_t = (val << 17) >> 27;
+ insn[0] = (insn[0] & ~0x3e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3844_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3844_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3207llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 14) >> 27);
+ tie_t = (tie_t << 10) | ((insn[0] << 20) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3207llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc) | (tie_t << 2);
+ tie_t = (val << 17) >> 27;
+ insn[0] = (insn[0] & ~0x3e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3197llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3197llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3198llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3198llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3199llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3199llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3201llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3201llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3202llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3202llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3210llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3210llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3857llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3857llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3217_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3217_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3859llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 14) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3859llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0x3fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3221llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3221llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3222llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3222llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3860llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3860llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3224llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3224llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3861llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3861llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3220llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3220llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3246_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3246_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3214_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3214_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3863llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 15) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3863llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x1f800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3226llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3226llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3864llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3864llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3235llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 14) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3235llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0x3f800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3238llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 14) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3238llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x3f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3862_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3862_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3241llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3241llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3865llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3865llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3236_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 14) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3236_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3240llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 14) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3240llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x3e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3228llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3228llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3866llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3866llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3247llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3247llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3867llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3867llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3233_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3233_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3245llr_slot2_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3245llr_slot2_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3868_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3868_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3250llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3250llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_op0_s19_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s19_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3869llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3869llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3254llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3254llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3872llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3872llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+ tie_t = (val << 21) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3252llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3252llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3875llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3875llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3253llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3253llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3876llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 18) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3876llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e00) | (tie_t << 9);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3251llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3251llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3878llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3878llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3248llr_slot1_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3248llr_slot1_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3870_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3870_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2071_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2071_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s20_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s20_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3260llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3260llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3258llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3258llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3266llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3266llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3286llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3286llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3879llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3879llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3303llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3303llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3269llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3269llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3293llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3293llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3297llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3297llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3881llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3881llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3272llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3272llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3274llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3274llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3302llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3302llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3883llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3883llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3275llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3275llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3289llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3289llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3292llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3292llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3277llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3277llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3276llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3276llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3270llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3270llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3312llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3312llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3885llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3885llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3304llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3304llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3887llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3887llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3283llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3283llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3310llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3310llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3888llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3888llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3288llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3288llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3284llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3284llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3305llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3305llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3306llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3306llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3890llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3890llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3259llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3259llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3291llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3291llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3261llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3261llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3263llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3263llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3267llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3267llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3264llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3264llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3265llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3265llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3268llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3268llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3308llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3308llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3892llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3892llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3294llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3294llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3295llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3295llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3296llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3296llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3298llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3298llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3299llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3299llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3300llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 17) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3300llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x7800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3893llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3893llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3278llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3278llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3279llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3279llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3280llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3280llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3282llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3282llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3281llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3281llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3311llr_slot0_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3311llr_slot0_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3364_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3364_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s21_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s21_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3464dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3464dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3894dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3894dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3465dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3465dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3895dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3895dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3459dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3459dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3896dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 11) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3896dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3468dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3468dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3897dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3897dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3467dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3467dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3899dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3899dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3414dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3414dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3313dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3313dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3316dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3316dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3334dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3334dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_t_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3314_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3314_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3418dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3418dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3425dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3425dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3408dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3408dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3901dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3901dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3315dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3315dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3421dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3421dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3443dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3443dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3420dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3420dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3423dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3423dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3422dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3422dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3424dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3424dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3416dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3416dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3461dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3461dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3415dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3415dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3426dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3426dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3417dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3417dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3419dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3419dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3318_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3318_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3904dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3904dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3482dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3482dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3903dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3903dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 8) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3407_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3407_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3905dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 9) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3905dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3479dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3479dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 23) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3906dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3906dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3475dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3475dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 20) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3898dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3898dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3353dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 8) >> 25);
+ tie_t = (tie_t << 13) | ((insn[0] << 17) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3353dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc) | (tie_t << 2);
+ tie_t = (val << 12) >> 25;
+ insn[0] = (insn[0] & ~0xfe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3907dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3907dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3388dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 8) >> 19);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3388dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 15) >> 19;
+ insn[0] = (insn[0] & ~0xfff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3913dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3913dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3387dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3387dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3396dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 8) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3396dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0xfe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3909dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 15) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3909dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3404dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3404dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3910dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3910dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+ tie_t = (val << 21) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3385dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3385dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3478dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3478dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 21) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3908dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3908dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3477dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3477dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 20) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3469dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 8) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3469dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3484dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3484dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3916dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3916dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3451dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3451dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 21) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3914dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3914dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3450dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3450dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3453dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 8) >> 18);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3453dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 18) >> 18;
+ insn[0] = (insn[0] & ~0xfffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3365dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3365dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3366dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3366dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3368dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3368dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3372dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3372dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3367dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3367dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3369dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3369dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3345dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 12) | ((insn[0] << 18) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3345dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc) | (tie_t << 2);
+ tie_t = (val << 14) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3457dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 8) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3457dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0xfe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3917dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3917dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3358dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 6) | ((insn[0] << 22) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3358dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f0) | (tie_t << 4);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3918dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3918dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3347dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 12) | ((insn[0] << 18) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3347dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc) | (tie_t << 2);
+ tie_t = (val << 14) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3348dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 12) | ((insn[0] << 18) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3348dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc) | (tie_t << 2);
+ tie_t = (val << 14) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3354dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 10) | ((insn[0] << 18) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3354dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x3ff0) | (tie_t << 4);
+ tie_t = (val << 16) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_s4_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3397dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3397dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3919dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3919dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3462dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3462dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3394dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3394dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3920dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 16) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3920dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3363dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3363dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3481dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3481dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3339dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3339dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3384dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3384dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3360dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3360dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3437dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3437dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3439dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3439dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3438dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3438dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3440dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3440dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3441dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3441dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3442dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3442dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3444dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3444dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3460dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3460dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3921dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3921dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3429dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3429dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3431dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3431dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3435dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3435dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3430dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3430dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3432dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3432dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3433dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3433dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3434dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3434dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3436dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3436dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3361dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3361dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3403dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3403dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3922dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3922dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3386dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3386dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 18) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3923dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3923dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 18) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3324dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3324dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3325dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3325dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3340dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3340dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3327dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3327dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3342dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 6) | ((insn[0] << 22) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3342dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f0) | (tie_t << 4);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3356dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 6) | ((insn[0] << 22) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3356dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f0) | (tie_t << 4);
+ tie_t = (val << 23) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+ tie_t = (val << 17) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3924dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3924dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3349dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 22) | ((insn[0] << 8) >> 10);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3349dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 10) >> 10;
+ insn[0] = (insn[0] & ~0xfffffc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3350dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 8) >> 25);
+ tie_t = (tie_t << 13) | ((insn[0] << 17) >> 19);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3350dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0x7ffc) | (tie_t << 2);
+ tie_t = (val << 12) >> 25;
+ insn[0] = (insn[0] & ~0xfe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3341dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3341dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3370dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3370dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3371dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3371dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3373dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3373dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3374dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3374dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3376dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3376dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3375dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3375dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3377dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3377dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3378dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3378dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3379dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 8) >> 22);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3379dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 14) >> 22;
+ insn[0] = (insn[0] & ~0xffc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3381dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3381dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3448dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3448dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3925dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3925dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3454dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3454dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3466_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3466_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3412dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3412dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3927dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3927dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3382dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 7) | ((insn[0] << 22) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3382dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8) | (tie_t << 3);
+ tie_t = (val << 19) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3928dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3928dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3410_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3410_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3929dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3929dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+ tie_t = (val << 17) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3390dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3390dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3413dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3413dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3930dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 13) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3930dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3456dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3456dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3392dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 8) >> 21);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3392dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 17) >> 21;
+ insn[0] = (insn[0] & ~0xffe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3900_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3900_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3337dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3337dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3446dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3446dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 20) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3317dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3317dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3458dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3458dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3319dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3319dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3320dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3320dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3322dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3322dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3399dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3399dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3931dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 11) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3931dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x1c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3326dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3326dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3427dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3427dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3470dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3470dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3411dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3411dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3933dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3933dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3321dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3321dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3428dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3428dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_s_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_r_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3471dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3471dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3472dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3472dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3474dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3474dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3323dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3323dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3480dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3480dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3473dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3473dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3328dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3328dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3330dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3330dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3329dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3329dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3331dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3331dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3362dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3362dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3332dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3332dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3445dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3445dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3333dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3333dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3335dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3335dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3401dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 8) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3401dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3934dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3934dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3406dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3406dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3935dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 9) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3935dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3380dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3380dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 18) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3336dual_slot2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 8) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3336dual_slot2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0xfc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_op0_s22_Slot_dual_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_op0_s22_Slot_dual_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm8_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_op0_s23_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_op0_s23_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3532dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3532dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 24) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3533dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 10) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3533dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 25) >> 27;
+ insn[0] = (insn[0] & ~0x3e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3936dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3936dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2057_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2057_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3625dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3625dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_r_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_t_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3487dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3487dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3584_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3584_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3937dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3937dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3489dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3489dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3488dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3488dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3588dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 10) >> 27);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3588dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x3e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3938dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3938dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3544dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3544dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3519dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3519dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 18) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3939dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3939dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3523dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 10) >> 21);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3523dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0x3ff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_bbi_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 21) >> 27);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3520dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3520dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3507dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 10) >> 16);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3507dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0x3fffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3506dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 10) >> 17);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3506dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0x3fff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3614dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3614dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2079_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2079_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3550dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3550dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3549dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3549dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3531_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 10) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3531_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3500dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3500dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 18) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3502dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3502dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 17) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3606dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3606dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3607dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3607dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3608dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3608dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sae_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3619_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3619_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3940dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 11) >> 25);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3940dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3613dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 10) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3613dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0x3e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3509dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3509dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3510dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3510dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3562dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3562dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3560dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3560dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3499dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3499dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3563dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3563dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3565dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3565dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3564dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3564dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3566dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3566dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3512dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3512dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3567dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3567dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3504dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3504dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 17) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3516dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3516dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3568dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3568dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3570dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3570dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3580dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3580dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3559dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3559dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3573dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3573dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3599dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3599dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3941dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3941dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3575dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3575dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3535dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3535dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2066_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2066_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3494dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3494dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3497dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3497dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3496dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3496dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3498dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3498dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3490dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3490dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3491dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3491dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3493dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3493dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3527dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3527dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3492dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3492dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3589dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3589dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_s8_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3541dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3541dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2037_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2037_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3542dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3542dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3543dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3543dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3552_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3552_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3626dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3626dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3621dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 10) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3621dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 24) >> 25;
+ insn[0] = (insn[0] & ~0x3f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3623dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 10) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3623dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 24) >> 25;
+ insn[0] = (insn[0] & ~0x3f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3622dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 10) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3622dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 24) >> 25;
+ insn[0] = (insn[0] & ~0x3f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3624dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 10) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3624dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 24) >> 25;
+ insn[0] = (insn[0] & ~0x3f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3529dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 10) >> 22);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3529dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x3ff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3530dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 10) >> 25);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3530dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 23) >> 25;
+ insn[0] = (insn[0] & ~0x3f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2072_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2072_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3524dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 17) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3524dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x7800) | (tie_t << 11);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3943dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 6) | ((insn[0] << 21) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3943dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e0) | (tie_t << 5);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3508dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3508dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3601dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3601dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3945dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3945dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3603dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3603dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3604dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3604dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3946dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3946dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3545dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3545dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3616dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3616dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3947dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3947dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 25) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3618dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3618dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3949dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 11) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3949dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 24) >> 25;
+ insn[0] = (insn[0] & ~0x1fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3609dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3609dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3546dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3546dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3548dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3548dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3554dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3554dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3547dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3547dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3522dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3522dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3950dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3950dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3611dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3611dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3592dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3592dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3594dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3594dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3598dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3598dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3951dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3951dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3600dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 10) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3600dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 24) >> 27;
+ insn[0] = (insn[0] & ~0x3e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3952dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3952dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3593dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3593dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3595dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3595dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3596dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3596dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3597dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3597dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3511dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3511dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3513dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3513dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3514dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3514dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3515dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3515dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3517dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3517dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3518dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3518dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 17) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3536dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3536dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2060_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2060_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3539dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3539dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3954dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 14) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3954dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x3e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3537dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3537dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3538dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 10) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3538dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3957dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3957dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3587dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3587dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 21) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3610_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 22) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3610_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e0) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3571dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3571dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3572dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3572dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3574dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3574dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3569dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3569dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3505dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 10) >> 18);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3505dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 18) >> 18;
+ insn[0] = (insn[0] & ~0x3fff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3612dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 10) >> 26);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3612dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 22) >> 26;
+ insn[0] = (insn[0] & ~0x3f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3576dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3576dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 15) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3577dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3577dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 15) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3615dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3615dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3958dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 13) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3958dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3551dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3551dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3553dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3553dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3590dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3590dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3591dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3591dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3555dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3555dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3557dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3557dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3578dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3578dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 15) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3579dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3579dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 15) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3581dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 22) >> 24);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3581dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc) | (tie_t << 2);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3582dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3582dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 17) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3959dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3959dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3556dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3556dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3558dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 10) >> 23);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3558dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 19) >> 23;
+ insn[0] = (insn[0] & ~0x3fe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3583dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3583dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+ tie_t = (val << 18) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3960dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3960dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3585dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3585dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld3961dual_slot0_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld3961dual_slot0_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_t_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 21) >> 27);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 14) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm12_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 14) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_imm8_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 10) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_s_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_s_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 14) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_imm12b_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 14) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x3fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 10) >> 20);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 16) >> 20;
+ insn[0] = (insn[0] & ~0x3ffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_op2_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_op2_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_op2_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_op2_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_op2_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_op2_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_op2_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_r_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_r_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_r_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sal_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sal_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sargt_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_sas_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_sas_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_sas_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 20) >> 26);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 20) >> 26);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 20) >> 26);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 20) >> 26);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm7_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 19) >> 25);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x1fc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_s2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_r2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_r2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_t4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_s4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_s4_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_s4_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_s4_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_r4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_t8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_t8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s8_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_r8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_r8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_r8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_fimm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_fimm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_fimm8_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 14) >> 24);
+ return tie_t;
+}
+
+static void
+Field_fimm8_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x3fc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2029_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2029_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2030_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2030_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2032_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2032_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2035_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2035_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2035_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2035_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2035_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2035_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2035_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 27) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2035_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2036_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2036_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2036_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2036_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2036_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2036_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2037_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2037_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2037_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2037_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2037_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2037_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2037_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2037_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2038_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2038_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2039_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2039_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2039_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2039_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2039_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2039_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2039_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2039_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2039_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2039_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2040_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2040_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2040_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2040_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2040_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2040_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2040_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2040_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2040_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2040_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2041_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2041_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2042_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2042_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2042_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2042_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2042_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2042_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2042_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 17) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2042_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2043_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2043_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 23) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2044_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2044_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2045_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2045_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2046_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2046_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2047_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2047_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_dot_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_dot_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2048_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2048_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2049_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2049_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_dot_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 15) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_dot_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x1e000) | (tie_t << 13);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2050_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2050_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 22) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2051_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 24) >> 26);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2051_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2052_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2052_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2053_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 14) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2053_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x3c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_llr_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_llr_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2054_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2054_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_acc2_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_acc2_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2055_Slot_dual_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2055_Slot_dual_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_gp_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_gp_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_dot_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_dot_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_pq_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_pq_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_smod_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_smod_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2056_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2056_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2025_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2025_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2025_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2025_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2025_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2025_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2025_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2025_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2025_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2025_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_gp_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_gp_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_dot_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_dot_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_pq_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_pq_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_acc2_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_acc2_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_smod_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_smod_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_llr_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_llr_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2027_Slot_dual_slot2_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2027_Slot_dual_slot2_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2026_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2026_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2026_Slot_dot_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2026_Slot_dot_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2026_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2026_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2026_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2026_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2026_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2026_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2026_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2026_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2031_Slot_gp_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2031_Slot_gp_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2031_Slot_dot_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2031_Slot_dot_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2031_Slot_pq_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2031_Slot_pq_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2031_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2031_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2031_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 22) >> 28);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2031_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2028_Slot_acc2_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 15) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2028_Slot_acc2_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2028_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 27) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2028_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c) | (tie_t << 2);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2033_Slot_smod_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 23) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2033_Slot_smod_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2033_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 19) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2033_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x1c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_dsp340050b49a6c_fld2034_Slot_llr_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_dsp340050b49a6c_fld2034_Slot_llr_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_bt16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_bs16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_br16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_brall_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_t2,
+ FIELD_s2,
+ FIELD_r2,
+ FIELD_t4,
+ FIELD_s4,
+ FIELD_r4,
+ FIELD_t8,
+ FIELD_s8,
+ FIELD_r8,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_fimm8,
+ FIELD_dsp340050b49a6c_fld2019,
+ FIELD_dsp340050b49a6c_fld2021,
+ FIELD_dsp340050b49a6c_fld2029,
+ FIELD_dsp340050b49a6c_fld2030,
+ FIELD_dsp340050b49a6c_fld2032,
+ FIELD_dsp340050b49a6c_fld2035,
+ FIELD_dsp340050b49a6c_fld2036,
+ FIELD_dsp340050b49a6c_fld2037,
+ FIELD_dsp340050b49a6c_fld2038,
+ FIELD_dsp340050b49a6c_fld2039,
+ FIELD_dsp340050b49a6c_fld2040,
+ FIELD_dsp340050b49a6c_fld2041,
+ FIELD_dsp340050b49a6c_fld2042,
+ FIELD_dsp340050b49a6c_fld2043,
+ FIELD_dsp340050b49a6c_fld2044,
+ FIELD_dsp340050b49a6c_fld2045,
+ FIELD_dsp340050b49a6c_fld2046,
+ FIELD_dsp340050b49a6c_fld2047,
+ FIELD_dsp340050b49a6c_fld2048,
+ FIELD_dsp340050b49a6c_fld2049,
+ FIELD_dsp340050b49a6c_fld2050,
+ FIELD_dsp340050b49a6c_fld2051,
+ FIELD_dsp340050b49a6c_fld2052,
+ FIELD_dsp340050b49a6c_fld2053,
+ FIELD_dsp340050b49a6c_fld2054,
+ FIELD_dsp340050b49a6c_fld2055,
+ FIELD_dsp340050b49a6c_fld2056,
+ FIELD_dsp340050b49a6c_fld2082Inst,
+ FIELD_dsp340050b49a6c_fld2083Inst,
+ FIELD_dsp340050b49a6c_fld2084Inst,
+ FIELD_dsp340050b49a6c_fld2085Inst,
+ FIELD_dsp340050b49a6c_fld2086Inst,
+ FIELD_dsp340050b49a6c_fld2088Inst,
+ FIELD_dsp340050b49a6c_fld2089Inst,
+ FIELD_dsp340050b49a6c_fld2090Inst,
+ FIELD_dsp340050b49a6c_fld2091Inst,
+ FIELD_dsp340050b49a6c_fld2092Inst,
+ FIELD_dsp340050b49a6c_fld2094Inst,
+ FIELD_dsp340050b49a6c_fld2095Inst,
+ FIELD_dsp340050b49a6c_fld2096Inst,
+ FIELD_dsp340050b49a6c_fld2098Inst,
+ FIELD_dsp340050b49a6c_fld2099Inst,
+ FIELD_dsp340050b49a6c_fld2100Inst,
+ FIELD_dsp340050b49a6c_fld2101Inst,
+ FIELD_dsp340050b49a6c_fld2102Inst,
+ FIELD_dsp340050b49a6c_fld2103Inst,
+ FIELD_dsp340050b49a6c_fld2104Inst,
+ FIELD_dsp340050b49a6c_fld2105Inst,
+ FIELD_dsp340050b49a6c_fld2106Inst,
+ FIELD_dsp340050b49a6c_fld2107Inst,
+ FIELD_dsp340050b49a6c_fld2108Inst,
+ FIELD_dsp340050b49a6c_fld2109Inst,
+ FIELD_dsp340050b49a6c_fld2110Inst,
+ FIELD_dsp340050b49a6c_fld2111Inst,
+ FIELD_dsp340050b49a6c_fld2112Inst,
+ FIELD_dsp340050b49a6c_fld2113Inst,
+ FIELD_dsp340050b49a6c_fld2114Inst,
+ FIELD_dsp340050b49a6c_fld2115Inst,
+ FIELD_dsp340050b49a6c_fld2116Inst,
+ FIELD_dsp340050b49a6c_fld2117Inst,
+ FIELD_dsp340050b49a6c_fld2118Inst,
+ FIELD_dsp340050b49a6c_fld2119Inst,
+ FIELD_dsp340050b49a6c_fld2120Inst,
+ FIELD_dsp340050b49a6c_fld2122Inst,
+ FIELD_dsp340050b49a6c_fld2123Inst,
+ FIELD_dsp340050b49a6c_fld2124Inst,
+ FIELD_dsp340050b49a6c_fld2125Inst,
+ FIELD_dsp340050b49a6c_fld2126Inst,
+ FIELD_dsp340050b49a6c_fld2127Inst,
+ FIELD_dsp340050b49a6c_fld2128Inst,
+ FIELD_dsp340050b49a6c_fld2129Inst,
+ FIELD_dsp340050b49a6c_fld2131Inst,
+ FIELD_dsp340050b49a6c_fld2132Inst,
+ FIELD_dsp340050b49a6c_fld2133Inst,
+ FIELD_dsp340050b49a6c_fld2134Inst,
+ FIELD_dsp340050b49a6c_fld2136Inst,
+ FIELD_dsp340050b49a6c_fld2137Inst,
+ FIELD_dsp340050b49a6c_fld2138Inst,
+ FIELD_dsp340050b49a6c_fld2139Inst,
+ FIELD_dsp340050b49a6c_fld2140Inst,
+ FIELD_dsp340050b49a6c_fld2141Inst,
+ FIELD_dsp340050b49a6c_fld2142Inst,
+ FIELD_dsp340050b49a6c_fld2143Inst,
+ FIELD_dsp340050b49a6c_fld2144Inst,
+ FIELD_dsp340050b49a6c_fld2145Inst,
+ FIELD_dsp340050b49a6c_fld2146Inst,
+ FIELD_dsp340050b49a6c_fld2147Inst,
+ FIELD_dsp340050b49a6c_fld2149Inst,
+ FIELD_dsp340050b49a6c_fld2151Inst,
+ FIELD_dsp340050b49a6c_fld2153Inst,
+ FIELD_dsp340050b49a6c_fld2154Inst,
+ FIELD_dsp340050b49a6c_fld2155Inst,
+ FIELD_dsp340050b49a6c_fld2156Inst,
+ FIELD_dsp340050b49a6c_fld2157Inst,
+ FIELD_dsp340050b49a6c_fld2158Inst,
+ FIELD_dsp340050b49a6c_fld2159Inst,
+ FIELD_dsp340050b49a6c_fld2160Inst,
+ FIELD_dsp340050b49a6c_fld2161Inst,
+ FIELD_dsp340050b49a6c_fld2162Inst,
+ FIELD_dsp340050b49a6c_fld2163Inst,
+ FIELD_dsp340050b49a6c_fld2164Inst,
+ FIELD_dsp340050b49a6c_fld2165Inst,
+ FIELD_dsp340050b49a6c_fld2166Inst,
+ FIELD_dsp340050b49a6c_fld2167Inst,
+ FIELD_dsp340050b49a6c_fld2168Inst,
+ FIELD_dsp340050b49a6c_fld2169Inst,
+ FIELD_dsp340050b49a6c_fld2171Inst,
+ FIELD_dsp340050b49a6c_fld2172Inst,
+ FIELD_dsp340050b49a6c_fld2173Inst,
+ FIELD_dsp340050b49a6c_fld2174Inst,
+ FIELD_dsp340050b49a6c_fld2175Inst,
+ FIELD_dsp340050b49a6c_fld2177Inst,
+ FIELD_dsp340050b49a6c_fld2178Inst,
+ FIELD_dsp340050b49a6c_fld2179Inst,
+ FIELD_dsp340050b49a6c_fld2180Inst,
+ FIELD_dsp340050b49a6c_fld2181Inst,
+ FIELD_dsp340050b49a6c_fld2182Inst,
+ FIELD_dsp340050b49a6c_fld2183Inst,
+ FIELD_dsp340050b49a6c_fld2184Inst,
+ FIELD_dsp340050b49a6c_fld2185Inst,
+ FIELD_dsp340050b49a6c_fld2186Inst,
+ FIELD_dsp340050b49a6c_fld2187Inst,
+ FIELD_dsp340050b49a6c_fld2188Inst,
+ FIELD_dsp340050b49a6c_fld2189Inst,
+ FIELD_dsp340050b49a6c_fld2190Inst,
+ FIELD_dsp340050b49a6c_fld2191Inst,
+ FIELD_dsp340050b49a6c_fld2192Inst,
+ FIELD_dsp340050b49a6c_fld2193Inst,
+ FIELD_dsp340050b49a6c_fld2194Inst,
+ FIELD_dsp340050b49a6c_fld2195Inst,
+ FIELD_dsp340050b49a6c_fld2196Inst,
+ FIELD_dsp340050b49a6c_fld2197Inst,
+ FIELD_dsp340050b49a6c_fld2198Inst,
+ FIELD_dsp340050b49a6c_fld2199Inst,
+ FIELD_dsp340050b49a6c_fld2200Inst,
+ FIELD_dsp340050b49a6c_fld2201Inst,
+ FIELD_dsp340050b49a6c_fld2202Inst,
+ FIELD_dsp340050b49a6c_fld2203Inst,
+ FIELD_dsp340050b49a6c_fld2204Inst,
+ FIELD_dsp340050b49a6c_fld2205Inst,
+ FIELD_dsp340050b49a6c_fld2206Inst,
+ FIELD_dsp340050b49a6c_fld2207Inst,
+ FIELD_dsp340050b49a6c_fld2208Inst,
+ FIELD_dsp340050b49a6c_fld2209Inst,
+ FIELD_dsp340050b49a6c_fld2210Inst,
+ FIELD_dsp340050b49a6c_fld2211Inst,
+ FIELD_dsp340050b49a6c_fld2212Inst,
+ FIELD_dsp340050b49a6c_fld2213Inst,
+ FIELD_dsp340050b49a6c_fld2214Inst,
+ FIELD_dsp340050b49a6c_fld2215Inst,
+ FIELD_dsp340050b49a6c_fld2216Inst,
+ FIELD_dsp340050b49a6c_fld2217Inst,
+ FIELD_dsp340050b49a6c_fld2218Inst,
+ FIELD_dsp340050b49a6c_fld2219Inst,
+ FIELD_dsp340050b49a6c_fld2220Inst,
+ FIELD_dsp340050b49a6c_fld2221Inst,
+ FIELD_dsp340050b49a6c_fld2222Inst,
+ FIELD_dsp340050b49a6c_fld2223Inst,
+ FIELD_dsp340050b49a6c_fld2224Inst,
+ FIELD_dsp340050b49a6c_fld2225Inst,
+ FIELD_dsp340050b49a6c_fld2226Inst,
+ FIELD_dsp340050b49a6c_fld2227Inst,
+ FIELD_dsp340050b49a6c_fld2228Inst,
+ FIELD_dsp340050b49a6c_fld2229Inst,
+ FIELD_dsp340050b49a6c_fld2230Inst,
+ FIELD_dsp340050b49a6c_fld2231Inst,
+ FIELD_dsp340050b49a6c_fld2232Inst,
+ FIELD_dsp340050b49a6c_fld2234Inst,
+ FIELD_dsp340050b49a6c_fld2235Inst,
+ FIELD_dsp340050b49a6c_fld2236Inst,
+ FIELD_dsp340050b49a6c_fld2237Inst,
+ FIELD_dsp340050b49a6c_fld2238Inst,
+ FIELD_dsp340050b49a6c_fld2239Inst,
+ FIELD_dsp340050b49a6c_fld2240Inst,
+ FIELD_dsp340050b49a6c_fld2241Inst,
+ FIELD_dsp340050b49a6c_fld2242Inst,
+ FIELD_dsp340050b49a6c_fld2243Inst,
+ FIELD_dsp340050b49a6c_fld2244Inst,
+ FIELD_dsp340050b49a6c_fld2245Inst,
+ FIELD_dsp340050b49a6c_fld2246Inst,
+ FIELD_dsp340050b49a6c_fld2247Inst,
+ FIELD_dsp340050b49a6c_fld2248Inst,
+ FIELD_dsp340050b49a6c_fld2249Inst,
+ FIELD_dsp340050b49a6c_fld2250Inst,
+ FIELD_dsp340050b49a6c_fld2251Inst,
+ FIELD_dsp340050b49a6c_fld2252Inst,
+ FIELD_dsp340050b49a6c_fld2253Inst,
+ FIELD_dsp340050b49a6c_fld2254,
+ FIELD_dsp340050b49a6c_fld2255Inst,
+ FIELD_dsp340050b49a6c_fld2257Inst,
+ FIELD_dsp340050b49a6c_fld3627Inst,
+ FIELD_dsp340050b49a6c_fld3630Inst,
+ FIELD_dsp340050b49a6c_fld3631Inst,
+ FIELD_dsp340050b49a6c_fld3633Inst,
+ FIELD_dsp340050b49a6c_fld3634,
+ FIELD_dsp340050b49a6c_fld3635Inst,
+ FIELD_dsp340050b49a6c_fld3636Inst,
+ FIELD_dsp340050b49a6c_fld3637Inst,
+ FIELD_dsp340050b49a6c_fld3638Inst,
+ FIELD_dsp340050b49a6c_fld3639Inst,
+ FIELD_dsp340050b49a6c_fld3640Inst,
+ FIELD_dsp340050b49a6c_fld3642Inst,
+ FIELD_dsp340050b49a6c_fld3643Inst,
+ FIELD_dsp340050b49a6c_fld3644Inst,
+ FIELD_dsp340050b49a6c_fld3645Inst,
+ FIELD_dsp340050b49a6c_fld3647Inst,
+ FIELD_dsp340050b49a6c_fld3648Inst,
+ FIELD_dsp340050b49a6c_fld3649Inst,
+ FIELD_dsp340050b49a6c_fld3650Inst,
+ FIELD_dsp340050b49a6c_fld3651Inst,
+ FIELD_dsp340050b49a6c_fld3653Inst,
+ FIELD_dsp340050b49a6c_fld3654Inst,
+ FIELD_dsp340050b49a6c_fld3655Inst,
+ FIELD_dsp340050b49a6c_fld3656Inst,
+ FIELD_dsp340050b49a6c_fld3657Inst,
+ FIELD_dsp340050b49a6c_fld3658Inst,
+ FIELD_dsp340050b49a6c_fld3659Inst,
+ FIELD_dsp340050b49a6c_fld3660Inst,
+ FIELD_dsp340050b49a6c_fld3661Inst,
+ FIELD_dsp340050b49a6c_fld3662Inst,
+ FIELD_op0_s3,
+ FIELD_dsp340050b49a6c_fld2025,
+ FIELD_dsp340050b49a6c_fld2027,
+ FIELD_dsp340050b49a6c_fld2258GP_slot2,
+ FIELD_dsp340050b49a6c_fld2259GP_slot2,
+ FIELD_dsp340050b49a6c_fld2260GP_slot2,
+ FIELD_dsp340050b49a6c_fld2261GP_slot2,
+ FIELD_dsp340050b49a6c_fld2262GP_slot2,
+ FIELD_dsp340050b49a6c_fld2263GP_slot2,
+ FIELD_dsp340050b49a6c_fld2264GP_slot2,
+ FIELD_dsp340050b49a6c_fld2266GP_slot2,
+ FIELD_dsp340050b49a6c_fld2267GP_slot2,
+ FIELD_dsp340050b49a6c_fld2268GP_slot2,
+ FIELD_dsp340050b49a6c_fld2269GP_slot2,
+ FIELD_dsp340050b49a6c_fld2270GP_slot2,
+ FIELD_dsp340050b49a6c_fld2271GP_slot2,
+ FIELD_dsp340050b49a6c_fld2272,
+ FIELD_dsp340050b49a6c_fld2273GP_slot2,
+ FIELD_dsp340050b49a6c_fld2274GP_slot2,
+ FIELD_dsp340050b49a6c_fld2275GP_slot2,
+ FIELD_dsp340050b49a6c_fld2277GP_slot2,
+ FIELD_dsp340050b49a6c_fld2278GP_slot2,
+ FIELD_dsp340050b49a6c_fld2279GP_slot2,
+ FIELD_dsp340050b49a6c_fld2280GP_slot2,
+ FIELD_dsp340050b49a6c_fld2281GP_slot2,
+ FIELD_dsp340050b49a6c_fld2282GP_slot2,
+ FIELD_dsp340050b49a6c_fld2283GP_slot2,
+ FIELD_dsp340050b49a6c_fld2284GP_slot2,
+ FIELD_dsp340050b49a6c_fld2286GP_slot2,
+ FIELD_dsp340050b49a6c_fld2287GP_slot2,
+ FIELD_dsp340050b49a6c_fld2288GP_slot2,
+ FIELD_dsp340050b49a6c_fld2289GP_slot2,
+ FIELD_dsp340050b49a6c_fld2290GP_slot2,
+ FIELD_dsp340050b49a6c_fld2291GP_slot2,
+ FIELD_dsp340050b49a6c_fld2292GP_slot2,
+ FIELD_dsp340050b49a6c_fld2293GP_slot2,
+ FIELD_dsp340050b49a6c_fld2294GP_slot2,
+ FIELD_dsp340050b49a6c_fld2295GP_slot2,
+ FIELD_dsp340050b49a6c_fld2296GP_slot2,
+ FIELD_dsp340050b49a6c_fld2297GP_slot2,
+ FIELD_dsp340050b49a6c_fld2298GP_slot2,
+ FIELD_dsp340050b49a6c_fld2299GP_slot2,
+ FIELD_dsp340050b49a6c_fld2300GP_slot2,
+ FIELD_dsp340050b49a6c_fld2301GP_slot2,
+ FIELD_dsp340050b49a6c_fld2302,
+ FIELD_dsp340050b49a6c_fld2303GP_slot2,
+ FIELD_dsp340050b49a6c_fld2304GP_slot2,
+ FIELD_dsp340050b49a6c_fld2305,
+ FIELD_dsp340050b49a6c_fld2306GP_slot2,
+ FIELD_dsp340050b49a6c_fld2308GP_slot2,
+ FIELD_dsp340050b49a6c_fld2309GP_slot2,
+ FIELD_dsp340050b49a6c_fld2310GP_slot2,
+ FIELD_dsp340050b49a6c_fld2312GP_slot2,
+ FIELD_dsp340050b49a6c_fld2313,
+ FIELD_dsp340050b49a6c_fld2314GP_slot2,
+ FIELD_dsp340050b49a6c_fld2316GP_slot2,
+ FIELD_dsp340050b49a6c_fld2317GP_slot2,
+ FIELD_dsp340050b49a6c_fld2318GP_slot2,
+ FIELD_dsp340050b49a6c_fld2319GP_slot2,
+ FIELD_dsp340050b49a6c_fld2320GP_slot2,
+ FIELD_dsp340050b49a6c_fld2321GP_slot2,
+ FIELD_dsp340050b49a6c_fld2322GP_slot2,
+ FIELD_dsp340050b49a6c_fld2323GP_slot2,
+ FIELD_dsp340050b49a6c_fld2324GP_slot2,
+ FIELD_dsp340050b49a6c_fld2325GP_slot2,
+ FIELD_dsp340050b49a6c_fld2326GP_slot2,
+ FIELD_dsp340050b49a6c_fld2327GP_slot2,
+ FIELD_dsp340050b49a6c_fld2328GP_slot2,
+ FIELD_dsp340050b49a6c_fld2329GP_slot2,
+ FIELD_dsp340050b49a6c_fld2330GP_slot2,
+ FIELD_dsp340050b49a6c_fld2331GP_slot2,
+ FIELD_dsp340050b49a6c_fld2332GP_slot2,
+ FIELD_dsp340050b49a6c_fld2333GP_slot2,
+ FIELD_dsp340050b49a6c_fld2334GP_slot2,
+ FIELD_dsp340050b49a6c_fld2335GP_slot2,
+ FIELD_dsp340050b49a6c_fld2336GP_slot2,
+ FIELD_dsp340050b49a6c_fld2337GP_slot2,
+ FIELD_dsp340050b49a6c_fld2338GP_slot2,
+ FIELD_dsp340050b49a6c_fld2339GP_slot2,
+ FIELD_dsp340050b49a6c_fld2340GP_slot2,
+ FIELD_dsp340050b49a6c_fld2341GP_slot2,
+ FIELD_dsp340050b49a6c_fld2342GP_slot2,
+ FIELD_dsp340050b49a6c_fld2343GP_slot2,
+ FIELD_dsp340050b49a6c_fld2344GP_slot2,
+ FIELD_dsp340050b49a6c_fld2345GP_slot2,
+ FIELD_dsp340050b49a6c_fld2346GP_slot2,
+ FIELD_dsp340050b49a6c_fld2347GP_slot2,
+ FIELD_dsp340050b49a6c_fld2348GP_slot2,
+ FIELD_dsp340050b49a6c_fld2349GP_slot2,
+ FIELD_dsp340050b49a6c_fld2350GP_slot2,
+ FIELD_dsp340050b49a6c_fld2351GP_slot2,
+ FIELD_dsp340050b49a6c_fld2352GP_slot2,
+ FIELD_dsp340050b49a6c_fld2353GP_slot2,
+ FIELD_dsp340050b49a6c_fld2354GP_slot2,
+ FIELD_dsp340050b49a6c_fld2355GP_slot2,
+ FIELD_dsp340050b49a6c_fld2356GP_slot2,
+ FIELD_dsp340050b49a6c_fld2357GP_slot2,
+ FIELD_dsp340050b49a6c_fld2358GP_slot2,
+ FIELD_dsp340050b49a6c_fld2359GP_slot2,
+ FIELD_dsp340050b49a6c_fld2361GP_slot2,
+ FIELD_dsp340050b49a6c_fld2362GP_slot2,
+ FIELD_dsp340050b49a6c_fld2364GP_slot2,
+ FIELD_dsp340050b49a6c_fld2366GP_slot2,
+ FIELD_dsp340050b49a6c_fld2368GP_slot2,
+ FIELD_dsp340050b49a6c_fld2369GP_slot2,
+ FIELD_dsp340050b49a6c_fld2370GP_slot2,
+ FIELD_dsp340050b49a6c_fld2371GP_slot2,
+ FIELD_dsp340050b49a6c_fld2372GP_slot2,
+ FIELD_dsp340050b49a6c_fld2373GP_slot2,
+ FIELD_dsp340050b49a6c_fld2374GP_slot2,
+ FIELD_dsp340050b49a6c_fld2375GP_slot2,
+ FIELD_dsp340050b49a6c_fld2376GP_slot2,
+ FIELD_dsp340050b49a6c_fld2378GP_slot2,
+ FIELD_dsp340050b49a6c_fld2379GP_slot2,
+ FIELD_dsp340050b49a6c_fld2381GP_slot2,
+ FIELD_dsp340050b49a6c_fld2383GP_slot2,
+ FIELD_dsp340050b49a6c_fld2384,
+ FIELD_dsp340050b49a6c_fld2385GP_slot2,
+ FIELD_dsp340050b49a6c_fld2386,
+ FIELD_dsp340050b49a6c_fld2387GP_slot2,
+ FIELD_dsp340050b49a6c_fld2388GP_slot2,
+ FIELD_dsp340050b49a6c_fld2389GP_slot2,
+ FIELD_dsp340050b49a6c_fld3663GP_slot2,
+ FIELD_dsp340050b49a6c_fld3664GP_slot2,
+ FIELD_dsp340050b49a6c_fld3665GP_slot2,
+ FIELD_dsp340050b49a6c_fld3666,
+ FIELD_dsp340050b49a6c_fld3667GP_slot2,
+ FIELD_dsp340050b49a6c_fld3668GP_slot2,
+ FIELD_dsp340050b49a6c_fld3669GP_slot2,
+ FIELD_dsp340050b49a6c_fld3670GP_slot2,
+ FIELD_dsp340050b49a6c_fld3671GP_slot2,
+ FIELD_dsp340050b49a6c_fld3673GP_slot2,
+ FIELD_dsp340050b49a6c_fld3674GP_slot2,
+ FIELD_dsp340050b49a6c_fld3675GP_slot2,
+ FIELD_dsp340050b49a6c_fld3676GP_slot2,
+ FIELD_dsp340050b49a6c_fld3678GP_slot2,
+ FIELD_dsp340050b49a6c_fld3679GP_slot2,
+ FIELD_dsp340050b49a6c_fld3680GP_slot2,
+ FIELD_op0_s4,
+ FIELD_dsp340050b49a6c_fld2026,
+ FIELD_dsp340050b49a6c_fld2031,
+ FIELD_dsp340050b49a6c_fld2394GP_slot1,
+ FIELD_dsp340050b49a6c_fld2395GP_slot1,
+ FIELD_dsp340050b49a6c_fld2397GP_slot1,
+ FIELD_dsp340050b49a6c_fld2398GP_slot1,
+ FIELD_dsp340050b49a6c_fld2399GP_slot1,
+ FIELD_dsp340050b49a6c_fld2400GP_slot1,
+ FIELD_dsp340050b49a6c_fld2402GP_slot1,
+ FIELD_dsp340050b49a6c_fld2403GP_slot1,
+ FIELD_dsp340050b49a6c_fld2405GP_slot1,
+ FIELD_dsp340050b49a6c_fld3681GP_slot1,
+ FIELD_dsp340050b49a6c_fld3683GP_slot1,
+ FIELD_dsp340050b49a6c_fld3684GP_slot1,
+ FIELD_dsp340050b49a6c_fld3686GP_slot1,
+ FIELD_op0_s5,
+ FIELD_dsp340050b49a6c_fld2058,
+ FIELD_dsp340050b49a6c_fld2067,
+ FIELD_dsp340050b49a6c_fld2407GP_slot0,
+ FIELD_dsp340050b49a6c_fld2409GP_slot0,
+ FIELD_dsp340050b49a6c_fld2410GP_slot0,
+ FIELD_dsp340050b49a6c_fld2411GP_slot0,
+ FIELD_dsp340050b49a6c_fld2412GP_slot0,
+ FIELD_dsp340050b49a6c_fld2413GP_slot0,
+ FIELD_dsp340050b49a6c_fld2415GP_slot0,
+ FIELD_dsp340050b49a6c_fld2416GP_slot0,
+ FIELD_dsp340050b49a6c_fld2417GP_slot0,
+ FIELD_dsp340050b49a6c_fld2418GP_slot0,
+ FIELD_dsp340050b49a6c_fld2419GP_slot0,
+ FIELD_dsp340050b49a6c_fld2420GP_slot0,
+ FIELD_dsp340050b49a6c_fld2422GP_slot0,
+ FIELD_dsp340050b49a6c_fld2423GP_slot0,
+ FIELD_dsp340050b49a6c_fld2424GP_slot0,
+ FIELD_dsp340050b49a6c_fld2425GP_slot0,
+ FIELD_dsp340050b49a6c_fld2426GP_slot0,
+ FIELD_dsp340050b49a6c_fld2427GP_slot0,
+ FIELD_dsp340050b49a6c_fld2429GP_slot0,
+ FIELD_dsp340050b49a6c_fld2430GP_slot0,
+ FIELD_dsp340050b49a6c_fld2431GP_slot0,
+ FIELD_dsp340050b49a6c_fld2432GP_slot0,
+ FIELD_dsp340050b49a6c_fld2433GP_slot0,
+ FIELD_dsp340050b49a6c_fld2434GP_slot0,
+ FIELD_dsp340050b49a6c_fld2435GP_slot0,
+ FIELD_dsp340050b49a6c_fld2436GP_slot0,
+ FIELD_dsp340050b49a6c_fld2437GP_slot0,
+ FIELD_dsp340050b49a6c_fld2438GP_slot0,
+ FIELD_dsp340050b49a6c_fld2439GP_slot0,
+ FIELD_dsp340050b49a6c_fld2440GP_slot0,
+ FIELD_dsp340050b49a6c_fld2441GP_slot0,
+ FIELD_dsp340050b49a6c_fld2443GP_slot0,
+ FIELD_dsp340050b49a6c_fld2444GP_slot0,
+ FIELD_dsp340050b49a6c_fld2445,
+ FIELD_dsp340050b49a6c_fld2447GP_slot0,
+ FIELD_dsp340050b49a6c_fld2448,
+ FIELD_dsp340050b49a6c_fld2449GP_slot0,
+ FIELD_dsp340050b49a6c_fld2451GP_slot0,
+ FIELD_dsp340050b49a6c_fld2452GP_slot0,
+ FIELD_dsp340050b49a6c_fld2453GP_slot0,
+ FIELD_dsp340050b49a6c_fld2454GP_slot0,
+ FIELD_dsp340050b49a6c_fld2455GP_slot0,
+ FIELD_dsp340050b49a6c_fld2456GP_slot0,
+ FIELD_dsp340050b49a6c_fld2457GP_slot0,
+ FIELD_dsp340050b49a6c_fld2458GP_slot0,
+ FIELD_dsp340050b49a6c_fld2459GP_slot0,
+ FIELD_dsp340050b49a6c_fld2460GP_slot0,
+ FIELD_dsp340050b49a6c_fld2461GP_slot0,
+ FIELD_dsp340050b49a6c_fld2462,
+ FIELD_dsp340050b49a6c_fld2463GP_slot0,
+ FIELD_dsp340050b49a6c_fld2464GP_slot0,
+ FIELD_dsp340050b49a6c_fld2465GP_slot0,
+ FIELD_dsp340050b49a6c_fld2466GP_slot0,
+ FIELD_dsp340050b49a6c_fld2467GP_slot0,
+ FIELD_dsp340050b49a6c_fld2468GP_slot0,
+ FIELD_dsp340050b49a6c_fld2470GP_slot0,
+ FIELD_dsp340050b49a6c_fld2471GP_slot0,
+ FIELD_dsp340050b49a6c_fld2472GP_slot0,
+ FIELD_dsp340050b49a6c_fld2473GP_slot0,
+ FIELD_dsp340050b49a6c_fld2474GP_slot0,
+ FIELD_dsp340050b49a6c_fld2475GP_slot0,
+ FIELD_dsp340050b49a6c_fld2477GP_slot0,
+ FIELD_dsp340050b49a6c_fld2479GP_slot0,
+ FIELD_dsp340050b49a6c_fld2480GP_slot0,
+ FIELD_dsp340050b49a6c_fld2481GP_slot0,
+ FIELD_dsp340050b49a6c_fld2482GP_slot0,
+ FIELD_dsp340050b49a6c_fld2483GP_slot0,
+ FIELD_dsp340050b49a6c_fld2484GP_slot0,
+ FIELD_dsp340050b49a6c_fld2485GP_slot0,
+ FIELD_dsp340050b49a6c_fld2486GP_slot0,
+ FIELD_dsp340050b49a6c_fld2487GP_slot0,
+ FIELD_dsp340050b49a6c_fld2488GP_slot0,
+ FIELD_dsp340050b49a6c_fld2489GP_slot0,
+ FIELD_dsp340050b49a6c_fld2490GP_slot0,
+ FIELD_dsp340050b49a6c_fld2491GP_slot0,
+ FIELD_dsp340050b49a6c_fld2492GP_slot0,
+ FIELD_dsp340050b49a6c_fld2493GP_slot0,
+ FIELD_dsp340050b49a6c_fld2494GP_slot0,
+ FIELD_dsp340050b49a6c_fld2495GP_slot0,
+ FIELD_dsp340050b49a6c_fld2496GP_slot0,
+ FIELD_dsp340050b49a6c_fld2497GP_slot0,
+ FIELD_dsp340050b49a6c_fld2498GP_slot0,
+ FIELD_dsp340050b49a6c_fld2499GP_slot0,
+ FIELD_dsp340050b49a6c_fld2500GP_slot0,
+ FIELD_dsp340050b49a6c_fld2501GP_slot0,
+ FIELD_dsp340050b49a6c_fld2502GP_slot0,
+ FIELD_dsp340050b49a6c_fld2503GP_slot0,
+ FIELD_dsp340050b49a6c_fld2504GP_slot0,
+ FIELD_dsp340050b49a6c_fld2505GP_slot0,
+ FIELD_dsp340050b49a6c_fld2506GP_slot0,
+ FIELD_dsp340050b49a6c_fld2507GP_slot0,
+ FIELD_dsp340050b49a6c_fld2508GP_slot0,
+ FIELD_dsp340050b49a6c_fld2509GP_slot0,
+ FIELD_dsp340050b49a6c_fld2510GP_slot0,
+ FIELD_dsp340050b49a6c_fld2512GP_slot0,
+ FIELD_dsp340050b49a6c_fld2514GP_slot0,
+ FIELD_dsp340050b49a6c_fld2515GP_slot0,
+ FIELD_dsp340050b49a6c_fld2516GP_slot0,
+ FIELD_dsp340050b49a6c_fld2517GP_slot0,
+ FIELD_dsp340050b49a6c_fld2518GP_slot0,
+ FIELD_dsp340050b49a6c_fld2519GP_slot0,
+ FIELD_dsp340050b49a6c_fld2520GP_slot0,
+ FIELD_dsp340050b49a6c_fld2521GP_slot0,
+ FIELD_dsp340050b49a6c_fld2523GP_slot0,
+ FIELD_dsp340050b49a6c_fld2524GP_slot0,
+ FIELD_dsp340050b49a6c_fld2526GP_slot0,
+ FIELD_dsp340050b49a6c_fld2527GP_slot0,
+ FIELD_dsp340050b49a6c_fld2528GP_slot0,
+ FIELD_dsp340050b49a6c_fld2529GP_slot0,
+ FIELD_dsp340050b49a6c_fld2530,
+ FIELD_dsp340050b49a6c_fld2531GP_slot0,
+ FIELD_dsp340050b49a6c_fld3688GP_slot0,
+ FIELD_dsp340050b49a6c_fld3689GP_slot0,
+ FIELD_dsp340050b49a6c_fld3690GP_slot0,
+ FIELD_dsp340050b49a6c_fld3691GP_slot0,
+ FIELD_dsp340050b49a6c_fld3692GP_slot0,
+ FIELD_dsp340050b49a6c_fld3693GP_slot0,
+ FIELD_dsp340050b49a6c_fld3695GP_slot0,
+ FIELD_dsp340050b49a6c_fld3696GP_slot0,
+ FIELD_dsp340050b49a6c_fld3697GP_slot0,
+ FIELD_dsp340050b49a6c_fld3698GP_slot0,
+ FIELD_dsp340050b49a6c_fld3699GP_slot0,
+ FIELD_dsp340050b49a6c_fld3700GP_slot0,
+ FIELD_dsp340050b49a6c_fld3702GP_slot0,
+ FIELD_dsp340050b49a6c_fld3703GP_slot0,
+ FIELD_dsp340050b49a6c_fld3705GP_slot0,
+ FIELD_dsp340050b49a6c_fld3706GP_slot0,
+ FIELD_op0_s6,
+ FIELD_dsp340050b49a6c_fld2532DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2533DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2534DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2535DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2536DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2537DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2538DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2539DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2540DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2541DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2542DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2543DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2544DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2545DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2546DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2547DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2548DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2549DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2550DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2551DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2552DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2553DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2554DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2555DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2556DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2557DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2558DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2559DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2560DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2561DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2562DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2563DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2564DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2565DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2566DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2567DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2568DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2569DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2571DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2572DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2573DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2574DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2575DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2576DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2577DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2578,
+ FIELD_dsp340050b49a6c_fld2579DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2580DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2581DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2582DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2583DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2584DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2585DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2586DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2587DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2588DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2589DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2590DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2591DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2592DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2595DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2596DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2598DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2599DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2601DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2602DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2604DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2605,
+ FIELD_dsp340050b49a6c_fld2606DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2608DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2609DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2610DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2611DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2612,
+ FIELD_dsp340050b49a6c_fld2613DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2614,
+ FIELD_dsp340050b49a6c_fld2615DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2616DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2617DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2618DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2619DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2620DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2621DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2622DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2623DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2624DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2625,
+ FIELD_dsp340050b49a6c_fld2626DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2628DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2630DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2632DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2633DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2635DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2636DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2637DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2640DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2641DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2642DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2643DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2644DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2645DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2646DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2647DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2648DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2649DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2650DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2651DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2652DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2654DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2655DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2656DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2657DOT_slot2,
+ FIELD_dsp340050b49a6c_fld2658DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3708DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3709DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3710DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3711DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3712,
+ FIELD_dsp340050b49a6c_fld3713DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3714DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3715,
+ FIELD_dsp340050b49a6c_fld3716DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3717DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3718,
+ FIELD_dsp340050b49a6c_fld3719DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3721DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3722,
+ FIELD_dsp340050b49a6c_fld3723DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3724DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3725DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3726DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3727DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3728DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3729DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3731DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3732DOT_slot2,
+ FIELD_dsp340050b49a6c_fld3733DOT_slot2,
+ FIELD_op0_s7,
+ FIELD_dsp340050b49a6c_fld3734DOT_slot1,
+ FIELD_op0_s8,
+ FIELD_dsp340050b49a6c_fld2068,
+ FIELD_dsp340050b49a6c_fld2666DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2667DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2668DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2669DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2671DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2672DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2673DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2674DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2675DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2676DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2677DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2678DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2679DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2680DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2681DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2682DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2683DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2684DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2685DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2686DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2688DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2689DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2690DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2692DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2693DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2695DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2697DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2699DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2700DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2701DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2702DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2703DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2704DOT_slot0,
+ FIELD_dsp340050b49a6c_fld2705DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3735DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3736,
+ FIELD_dsp340050b49a6c_fld3737DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3738DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3739DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3740DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3741DOT_slot0,
+ FIELD_dsp340050b49a6c_fld3742DOT_slot0,
+ FIELD_op0_s9,
+ FIELD_dsp340050b49a6c_fld2706PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2707PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2708PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2709PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2710PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2711PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2713PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2714PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2715PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2717PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2718PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2719PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2721PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2722PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2723PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2724PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2725PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2726PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2727PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2728PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2729PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2730PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2731PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2732PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2733PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2734PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2735PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2736PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2737,
+ FIELD_dsp340050b49a6c_fld2738PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2739PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2741PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2742PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2743PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2746PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2747PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2748PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2750PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2751PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2752PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2753PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2754PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2755PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2756PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2757PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2758PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2759PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2760PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2761PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2762PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2763PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2764PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2765PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2766PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2767PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2768PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2769PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2770PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2771PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2772PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2773PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2774PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2775PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2776PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2777PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2778PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2779PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2780PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2781PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2782PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2783PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2784PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2785PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2786PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2787PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2788PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2789PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2790PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2791PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2792PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2793PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2795PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2796PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2798PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2801PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2803PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2805PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2806PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2807PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2808PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2809PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2810PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2811PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2812PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2814PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2816PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2817,
+ FIELD_dsp340050b49a6c_fld2818PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2819,
+ FIELD_dsp340050b49a6c_fld2820PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2821PQ_slot2,
+ FIELD_dsp340050b49a6c_fld2823PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3744PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3745PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3746PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3747PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3748,
+ FIELD_dsp340050b49a6c_fld3749PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3750PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3751PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3752PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3753PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3754PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3756PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3757PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3758PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3759PQ_slot2,
+ FIELD_dsp340050b49a6c_fld3760PQ_slot2,
+ FIELD_op0_s10,
+ FIELD_dsp340050b49a6c_fld2825PQ_slot1,
+ FIELD_dsp340050b49a6c_fld2826PQ_slot1,
+ FIELD_dsp340050b49a6c_fld3761PQ_slot1,
+ FIELD_op0_s11,
+ FIELD_dsp340050b49a6c_fld2059,
+ FIELD_dsp340050b49a6c_fld2069,
+ FIELD_dsp340050b49a6c_fld2827PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2829PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2830PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2831PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2832PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2833PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2835PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2836PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2837PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2838PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2839PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2840PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2842PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2843PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2844PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2845PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2846PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2847PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2849PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2850PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2851PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2852PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2853PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2854PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2855PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2856PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2857PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2858PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2859PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2860PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2861PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2863PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2864PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2865,
+ FIELD_dsp340050b49a6c_fld2867PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2869PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2871PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2872PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2873PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2874PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2875PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2876PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2877PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2878PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2879PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2880PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2881PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2882,
+ FIELD_dsp340050b49a6c_fld2883PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2884PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2885PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2886PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2887PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2888PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2890PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2891PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2892PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2893PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2894PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2895PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2897PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2899PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2900PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2901PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2902PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2903PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2904PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2905PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2906PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2907PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2908PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2909PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2910PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2911PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2912PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2913PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2914PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2915PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2916PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2917PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2918PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2919PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2920PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2921PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2922PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2923PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2924PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2925PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2926PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2927PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2928PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2929PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2930PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2932PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2934PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2935PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2936PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2937PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2939PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2940,
+ FIELD_dsp340050b49a6c_fld2941PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2942PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2943PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2945PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2946PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2947PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2948PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2949PQ_slot0,
+ FIELD_dsp340050b49a6c_fld2950PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3763PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3764PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3765PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3766PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3767PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3768PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3769PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3770PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3771PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3772,
+ FIELD_dsp340050b49a6c_fld3773PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3775PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3776PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3777PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3778PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3779PQ_slot0,
+ FIELD_dsp340050b49a6c_fld3780PQ_slot0,
+ FIELD_op0_s12,
+ FIELD_dsp340050b49a6c_fld2953ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2954ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2955ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2956ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2957ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2958ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2959ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2960ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2963ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2964ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2966ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld2967ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld3782ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld3783ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld3784ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld3785ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld3786ACC2_slot2,
+ FIELD_dsp340050b49a6c_fld3788ACC2_slot2,
+ FIELD_op0_s13,
+ FIELD_dsp340050b49a6c_fld2028,
+ FIELD_dsp340050b49a6c_fld2075,
+ FIELD_dsp340050b49a6c_fld2968ACC2_slot1,
+ FIELD_dsp340050b49a6c_fld2969ACC2_slot1,
+ FIELD_dsp340050b49a6c_fld3790ACC2_slot1,
+ FIELD_dsp340050b49a6c_fld3793ACC2_slot1,
+ FIELD_op0_s14,
+ FIELD_dsp340050b49a6c_fld2973ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2974ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2975ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2976ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2977ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2980ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2981ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2982ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2984ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2985ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2987ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2989ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld2990ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3795ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3796ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3797,
+ FIELD_dsp340050b49a6c_fld3798ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3799ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3800ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3801ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3802ACC2_slot0,
+ FIELD_dsp340050b49a6c_fld3803ACC2_slot0,
+ FIELD_op0_s15,
+ FIELD_dsp340050b49a6c_fld2991SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2992SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2993SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2994SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2995SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2996SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2997SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2998SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld2999SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3000SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3001SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3002SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3003SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3004SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3005SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3006SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3007SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3008SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3009SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3010SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3011SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3012SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3013SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3014SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3015SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3016SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3017SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3018SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3019SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3020SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3021SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3022SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3023SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3024SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3025SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3026SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3027SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3028SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3030SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3031SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3032SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3033SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3034SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3035SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3036SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3038SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3039SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3040SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3043SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3044SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3046SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3047SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3048,
+ FIELD_dsp340050b49a6c_fld3049SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3050SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3051SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3052SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3053SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3054SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3055SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3056SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3058SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3059SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3061SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3062,
+ FIELD_dsp340050b49a6c_fld3063SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3065SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3066SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3067SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3068SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3069SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3070,
+ FIELD_dsp340050b49a6c_fld3071SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3072SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3073SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3074SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3075SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3076SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3077SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3078SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3079SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3080SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3081SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3082SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3084SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3085SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3087SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3088SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3090SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3091SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3092SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3093SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3096SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3097SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3098SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3099SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3100SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3101SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3102SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3104SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3105SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3106SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3107SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3108SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3109SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3110SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3111SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3113SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3114SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3115SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3116SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3805SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3806SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3807SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3808,
+ FIELD_dsp340050b49a6c_fld3809SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3810SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3812SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3813SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3814SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3816SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3817,
+ FIELD_dsp340050b49a6c_fld3818SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3819SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3821SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3822SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3823SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3824SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3825SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3826SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3827SMOD_slot2,
+ FIELD_dsp340050b49a6c_fld3828SMOD_slot2,
+ FIELD_op0_s16,
+ FIELD_dsp340050b49a6c_fld2033,
+ FIELD_dsp340050b49a6c_fld2080,
+ FIELD_dsp340050b49a6c_fld3117SMOD_slot1,
+ FIELD_dsp340050b49a6c_fld3118SMOD_slot1,
+ FIELD_dsp340050b49a6c_fld3829SMOD_slot1,
+ FIELD_op0_s17,
+ FIELD_dsp340050b49a6c_fld3119SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3120SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3121SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3122SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3123SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3125SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3126SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3127SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3128SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3129SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3130SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3131SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3132SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3133SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3134SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3135SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3136SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3137SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3138SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3139SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3140SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3141SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3142SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3143SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3144SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3145SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3146SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3148SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3149SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3150,
+ FIELD_dsp340050b49a6c_fld3152SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3153SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3155SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3156SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3157SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3158SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3159SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3160SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3161SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3164SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3165SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3166SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3168SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3170SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3171SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3172SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3173SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3174SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3175SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3176SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3177SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3178SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3179SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3180SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3181SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3182SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3184SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3186SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3188SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3832SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3833SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3834,
+ FIELD_dsp340050b49a6c_fld3836SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3837SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3838SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3841SMOD_slot0,
+ FIELD_dsp340050b49a6c_fld3842SMOD_slot0,
+ FIELD_op0_s18,
+ FIELD_dsp340050b49a6c_fld2074,
+ FIELD_dsp340050b49a6c_fld3191LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3192LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3193LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3194LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3195LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3196LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3197LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3198LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3199LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3200LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3201LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3202LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3203LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3204LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3205,
+ FIELD_dsp340050b49a6c_fld3206,
+ FIELD_dsp340050b49a6c_fld3207LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3208LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3210LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3212,
+ FIELD_dsp340050b49a6c_fld3213LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3214,
+ FIELD_dsp340050b49a6c_fld3215LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3216LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3217,
+ FIELD_dsp340050b49a6c_fld3218LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3220LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3221LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3222LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3224LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3225,
+ FIELD_dsp340050b49a6c_fld3226LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3228LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3230,
+ FIELD_dsp340050b49a6c_fld3231LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3232LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3233,
+ FIELD_dsp340050b49a6c_fld3234LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3235LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3236,
+ FIELD_dsp340050b49a6c_fld3237LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3238LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3240LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3241LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3242LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3243LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3244LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3245LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3246,
+ FIELD_dsp340050b49a6c_fld3247LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3843LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3844,
+ FIELD_dsp340050b49a6c_fld3845LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3847LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3848LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3849LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3850LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3851LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3853LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3855LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3856LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3857LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3859LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3860LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3861LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3862,
+ FIELD_dsp340050b49a6c_fld3863LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3864LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3865LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3866LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3867LLR_slot2,
+ FIELD_dsp340050b49a6c_fld3868,
+ FIELD_op0_s19,
+ FIELD_dsp340050b49a6c_fld2034,
+ FIELD_dsp340050b49a6c_fld3248LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3250LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3251LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3252LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3253LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3254LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3869LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3870,
+ FIELD_dsp340050b49a6c_fld3872LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3875LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3876LLR_slot1,
+ FIELD_dsp340050b49a6c_fld3878LLR_slot1,
+ FIELD_op0_s20,
+ FIELD_dsp340050b49a6c_fld2071,
+ FIELD_dsp340050b49a6c_fld3258LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3259LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3260LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3261LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3263LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3264LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3265LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3266LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3267LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3268LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3269LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3270LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3272LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3274LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3275LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3276LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3277LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3278LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3279LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3280LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3281LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3282LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3283LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3284LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3286LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3288LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3289LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3291LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3292LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3293LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3294LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3295LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3296LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3297LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3298LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3299LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3300LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3302LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3303LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3304LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3305LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3306LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3308LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3310LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3311LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3312LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3879LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3881LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3883LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3885LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3887LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3888LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3890LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3892LLR_slot0,
+ FIELD_dsp340050b49a6c_fld3893LLR_slot0,
+ FIELD_op0_s21,
+ FIELD_dsp340050b49a6c_fld3313DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3314,
+ FIELD_dsp340050b49a6c_fld3315DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3316DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3317DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3318,
+ FIELD_dsp340050b49a6c_fld3319DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3320DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3321DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3322DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3323DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3324DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3325DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3326DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3327DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3328DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3329DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3330DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3331DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3332DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3333DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3334DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3335DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3336DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3337DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3339DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3340DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3341DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3342DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3345DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3347DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3348DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3349DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3350DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3353DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3354DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3356DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3358DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3360DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3361DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3362DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3363DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3364,
+ FIELD_dsp340050b49a6c_fld3365DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3366DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3367DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3368DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3369DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3370DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3371DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3372DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3373DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3374DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3375DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3376DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3377DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3378DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3379DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3380DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3381DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3382DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3384DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3385DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3386DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3387DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3388DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3390DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3392DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3394DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3396DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3397DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3399DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3401DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3403DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3404DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3406DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3407,
+ FIELD_dsp340050b49a6c_fld3408DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3410,
+ FIELD_dsp340050b49a6c_fld3411DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3412DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3413DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3414DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3415DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3416DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3417DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3418DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3419DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3420DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3421DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3422DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3423DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3424DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3425DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3426DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3427DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3428DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3429DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3430DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3431DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3432DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3433DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3434DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3435DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3436DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3437DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3438DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3439DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3440DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3441DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3442DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3443DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3444DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3445DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3446DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3448DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3450DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3451DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3453DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3454DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3456DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3457DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3458DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3459DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3460DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3461DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3462DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3464DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3465DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3466,
+ FIELD_dsp340050b49a6c_fld3467DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3468DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3469DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3470DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3471DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3472DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3473DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3474DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3475DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3477DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3478DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3479DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3480DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3481DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3482DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3484DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3894DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3895DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3896DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3897DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3898DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3899DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3900,
+ FIELD_dsp340050b49a6c_fld3901DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3903DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3904DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3905DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3906DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3907DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3908DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3909DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3910DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3913DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3914DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3916DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3917DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3918DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3919DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3920DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3921DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3922DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3923DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3924DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3925DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3927DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3928DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3929DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3930DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3931DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3933DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3934DUAL_slot2,
+ FIELD_dsp340050b49a6c_fld3935DUAL_slot2,
+ FIELD_op0_s22,
+ FIELD_op0_s23,
+ FIELD_dsp340050b49a6c_fld2057,
+ FIELD_dsp340050b49a6c_fld2060,
+ FIELD_dsp340050b49a6c_fld2066,
+ FIELD_dsp340050b49a6c_fld2072,
+ FIELD_dsp340050b49a6c_fld2079,
+ FIELD_dsp340050b49a6c_fld3487DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3488DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3489DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3490DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3491DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3492DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3493DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3494DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3496DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3497DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3498DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3499DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3500DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3502DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3504DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3505DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3506DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3507DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3508DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3509DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3510DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3511DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3512DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3513DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3514DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3515DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3516DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3517DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3518DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3519DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3520DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3522DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3523DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3524DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3527DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3529DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3530DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3531,
+ FIELD_dsp340050b49a6c_fld3532DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3533DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3535DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3536DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3537DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3538DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3539DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3541DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3542DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3543DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3544DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3545DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3546DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3547DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3548DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3549DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3550DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3551DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3552,
+ FIELD_dsp340050b49a6c_fld3553DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3554DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3555DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3556DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3557DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3558DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3559DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3560DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3562DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3563DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3564DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3565DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3566DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3567DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3568DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3569DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3570DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3571DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3572DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3573DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3574DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3575DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3576DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3577DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3578DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3579DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3580DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3581DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3582DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3583DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3584,
+ FIELD_dsp340050b49a6c_fld3585DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3587DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3588DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3589DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3590DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3591DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3592DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3593DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3594DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3595DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3596DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3597DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3598DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3599DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3600DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3601DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3602,
+ FIELD_dsp340050b49a6c_fld3603DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3604DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3606DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3607DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3608DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3609DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3610,
+ FIELD_dsp340050b49a6c_fld3611DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3612DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3613DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3614DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3615DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3616DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3618DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3619,
+ FIELD_dsp340050b49a6c_fld3620DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3621DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3622DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3623DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3624DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3625DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3626DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3936DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3937DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3938DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3939DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3940DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3941DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3943DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3945DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3946DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3947DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3949DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3950DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3951DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3952DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3954DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3957DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3958DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3959DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3960DUAL_slot0,
+ FIELD_dsp340050b49a6c_fld3961DUAL_slot0,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12,
+ FIELD__bt16,
+ FIELD__bs16,
+ FIELD__br16,
+ FIELD__brall
+};
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+
+};
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR,
+ REGFILE_BR,
+ REGFILE_FR,
+ REGFILE_ACU,
+ REGFILE_CM,
+ REGFILE_PQ,
+ REGFILE_BR2,
+ REGFILE_BR4,
+ REGFILE_BR8,
+ REGFILE_BR16
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 },
+ { "BR", "b", REGFILE_BR, 1, 16 },
+ { "FR", "f", REGFILE_FR, 32, 16 },
+ { "ACU", "ACU", REGFILE_ACU, 320, 8 },
+ { "CM", "CM", REGFILE_CM, 128, 16 },
+ { "PQ", "PQ", REGFILE_PQ, 256, 16 },
+ { "BR2", "b", REGFILE_BR, 2, 8 },
+ { "BR4", "b", REGFILE_BR, 4, 4 },
+ { "BR8", "b", REGFILE_BR, 8, 2 },
+ { "BR16", "b", REGFILE_BR, 16, 1 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "INQ0_32_Empty", 1, 0, 0, 'i' },
+ { "INQ0_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 0, 'i' },
+ { "INQ0_32_NOTRDY", 1, 0, 0, 'i' },
+ { "INQ0_32_KILL", 1, 0, 0, 'o' },
+ { "INQ1_32_Empty", 1, 0, 1, 'i' },
+ { "INQ1_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 1, 'i' },
+ { "INQ1_32_NOTRDY", 1, 0, 1, 'i' },
+ { "INQ1_32_KILL", 1, 0, 1, 'o' },
+ { "INQ2_32_Empty", 1, 0, 2, 'i' },
+ { "INQ2_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 2, 'i' },
+ { "INQ2_32_NOTRDY", 1, 0, 2, 'i' },
+ { "INQ2_32_KILL", 1, 0, 2, 'o' },
+ { "INQ3_32_Empty", 1, 0, 3, 'i' },
+ { "INQ3_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 3, 'i' },
+ { "INQ3_32_NOTRDY", 1, 0, 3, 'i' },
+ { "INQ3_32_KILL", 1, 0, 3, 'o' },
+ { "OUTQ0_32_Full", 1, 0, 4, 'i' },
+ { "OUTQ0_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 4, 'o' },
+ { "OUTQ0_32_NOTRDY", 1, 0, 4, 'i' },
+ { "OUTQ0_32_KILL", 1, 0, 4, 'o' },
+ { "OUTQ1_32_Full", 1, 0, 5, 'i' },
+ { "OUTQ1_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 5, 'o' },
+ { "OUTQ1_32_NOTRDY", 1, 0, 5, 'i' },
+ { "OUTQ1_32_KILL", 1, 0, 5, 'o' },
+ { "OUTQ2_32_Full", 1, 0, 6, 'i' },
+ { "OUTQ2_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 6, 'o' },
+ { "OUTQ2_32_NOTRDY", 1, 0, 6, 'i' },
+ { "OUTQ2_32_KILL", 1, 0, 6, 'o' },
+ { "OUTQ3_32_Full", 1, 0, 7, 'i' },
+ { "OUTQ3_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 7, 'o' },
+ { "OUTQ3_32_NOTRDY", 1, 0, 7, 'i' },
+ { "OUTQ3_32_KILL", 1, 0, 7, 'o' },
+ { "OUTQ4_32_Full", 1, 0, 8, 'i' },
+ { "OUTQ4_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 8, 'o' },
+ { "OUTQ4_32_NOTRDY", 1, 0, 8, 'i' },
+ { "OUTQ4_32_KILL", 1, 0, 8, 'o' },
+ { "OUTQ5_32_Full", 1, 0, 9, 'i' },
+ { "OUTQ5_32", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 9, 'o' },
+ { "OUTQ5_32_NOTRDY", 1, 0, 9, 'i' },
+ { "OUTQ5_32_KILL", 1, 0, 9, 'o' },
+ { "SIGNALQ_Full", 1, 0, 10, 'i' },
+ { "SIGNALQ", 32, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 10, 'o' },
+ { "SIGNALQ_NOTRDY", 1, 0, 10, 'i' },
+ { "SIGNALQ_KILL", 1, 0, 10, 'o' },
+ { "INQ0_128_Empty", 1, 0, 11, 'i' },
+ { "INQ0_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 11, 'i' },
+ { "INQ0_128_NOTRDY", 1, 0, 11, 'i' },
+ { "INQ0_128_KILL", 1, 0, 11, 'o' },
+ { "INQ1_128_Empty", 1, 0, 12, 'i' },
+ { "INQ1_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 12, 'i' },
+ { "INQ1_128_NOTRDY", 1, 0, 12, 'i' },
+ { "INQ1_128_KILL", 1, 0, 12, 'o' },
+ { "INQ2_128_Empty", 1, 0, 13, 'i' },
+ { "INQ2_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 13, 'i' },
+ { "INQ2_128_NOTRDY", 1, 0, 13, 'i' },
+ { "INQ2_128_KILL", 1, 0, 13, 'o' },
+ { "INQ3_128_Empty", 1, 0, 14, 'i' },
+ { "INQ3_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 14, 'i' },
+ { "INQ3_128_NOTRDY", 1, 0, 14, 'i' },
+ { "INQ3_128_KILL", 1, 0, 14, 'o' },
+ { "INQ4_128_Empty", 1, 0, 15, 'i' },
+ { "INQ4_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 15, 'i' },
+ { "INQ4_128_NOTRDY", 1, 0, 15, 'i' },
+ { "INQ4_128_KILL", 1, 0, 15, 'o' },
+ { "INQ5_128_Empty", 1, 0, 16, 'i' },
+ { "INQ5_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 16, 'i' },
+ { "INQ5_128_NOTRDY", 1, 0, 16, 'i' },
+ { "INQ5_128_KILL", 1, 0, 16, 'o' },
+ { "OUTQ0_128_Full", 1, 0, 17, 'i' },
+ { "OUTQ0_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 17, 'o' },
+ { "OUTQ0_128_NOTRDY", 1, 0, 17, 'i' },
+ { "OUTQ0_128_KILL", 1, 0, 17, 'o' },
+ { "OUTQ1_128_Full", 1, 0, 18, 'i' },
+ { "OUTQ1_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 18, 'o' },
+ { "OUTQ1_128_NOTRDY", 1, 0, 18, 'i' },
+ { "OUTQ1_128_KILL", 1, 0, 18, 'o' },
+ { "OUTQ2_128_Full", 1, 0, 19, 'i' },
+ { "OUTQ2_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 19, 'o' },
+ { "OUTQ2_128_NOTRDY", 1, 0, 19, 'i' },
+ { "OUTQ2_128_KILL", 1, 0, 19, 'o' },
+ { "OUTQ3_128_Full", 1, 0, 20, 'i' },
+ { "OUTQ3_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 20, 'o' },
+ { "OUTQ3_128_NOTRDY", 1, 0, 20, 'i' },
+ { "OUTQ3_128_KILL", 1, 0, 20, 'o' },
+ { "OUTQ4_128_Full", 1, 0, 21, 'i' },
+ { "OUTQ4_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 21, 'o' },
+ { "OUTQ4_128_NOTRDY", 1, 0, 21, 'i' },
+ { "OUTQ4_128_KILL", 1, 0, 21, 'o' },
+ { "OUTQ5_128_Full", 1, 0, 22, 'i' },
+ { "OUTQ5_128", 128, XTENSA_INTERFACE_HAS_SIDE_EFFECT, 22, 'o' },
+ { "OUTQ5_128_NOTRDY", 1, 0, 22, 'i' },
+ { "OUTQ5_128_KILL", 1, 0, 22, 'o' },
+ { "IMPWIRE", 32, 0, 23, 'i' },
+ { "LU128_Out", 128, 0, 24, 'o' },
+ { "LU128_In", 128, 0, 25, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_INQ0_32_Empty,
+ INTERFACE_INQ0_32,
+ INTERFACE_INQ0_32_NOTRDY,
+ INTERFACE_INQ0_32_KILL,
+ INTERFACE_INQ1_32_Empty,
+ INTERFACE_INQ1_32,
+ INTERFACE_INQ1_32_NOTRDY,
+ INTERFACE_INQ1_32_KILL,
+ INTERFACE_INQ2_32_Empty,
+ INTERFACE_INQ2_32,
+ INTERFACE_INQ2_32_NOTRDY,
+ INTERFACE_INQ2_32_KILL,
+ INTERFACE_INQ3_32_Empty,
+ INTERFACE_INQ3_32,
+ INTERFACE_INQ3_32_NOTRDY,
+ INTERFACE_INQ3_32_KILL,
+ INTERFACE_OUTQ0_32_Full,
+ INTERFACE_OUTQ0_32,
+ INTERFACE_OUTQ0_32_NOTRDY,
+ INTERFACE_OUTQ0_32_KILL,
+ INTERFACE_OUTQ1_32_Full,
+ INTERFACE_OUTQ1_32,
+ INTERFACE_OUTQ1_32_NOTRDY,
+ INTERFACE_OUTQ1_32_KILL,
+ INTERFACE_OUTQ2_32_Full,
+ INTERFACE_OUTQ2_32,
+ INTERFACE_OUTQ2_32_NOTRDY,
+ INTERFACE_OUTQ2_32_KILL,
+ INTERFACE_OUTQ3_32_Full,
+ INTERFACE_OUTQ3_32,
+ INTERFACE_OUTQ3_32_NOTRDY,
+ INTERFACE_OUTQ3_32_KILL,
+ INTERFACE_OUTQ4_32_Full,
+ INTERFACE_OUTQ4_32,
+ INTERFACE_OUTQ4_32_NOTRDY,
+ INTERFACE_OUTQ4_32_KILL,
+ INTERFACE_OUTQ5_32_Full,
+ INTERFACE_OUTQ5_32,
+ INTERFACE_OUTQ5_32_NOTRDY,
+ INTERFACE_OUTQ5_32_KILL,
+ INTERFACE_SIGNALQ_Full,
+ INTERFACE_SIGNALQ,
+ INTERFACE_SIGNALQ_NOTRDY,
+ INTERFACE_SIGNALQ_KILL,
+ INTERFACE_INQ0_128_Empty,
+ INTERFACE_INQ0_128,
+ INTERFACE_INQ0_128_NOTRDY,
+ INTERFACE_INQ0_128_KILL,
+ INTERFACE_INQ1_128_Empty,
+ INTERFACE_INQ1_128,
+ INTERFACE_INQ1_128_NOTRDY,
+ INTERFACE_INQ1_128_KILL,
+ INTERFACE_INQ2_128_Empty,
+ INTERFACE_INQ2_128,
+ INTERFACE_INQ2_128_NOTRDY,
+ INTERFACE_INQ2_128_KILL,
+ INTERFACE_INQ3_128_Empty,
+ INTERFACE_INQ3_128,
+ INTERFACE_INQ3_128_NOTRDY,
+ INTERFACE_INQ3_128_KILL,
+ INTERFACE_INQ4_128_Empty,
+ INTERFACE_INQ4_128,
+ INTERFACE_INQ4_128_NOTRDY,
+ INTERFACE_INQ4_128_KILL,
+ INTERFACE_INQ5_128_Empty,
+ INTERFACE_INQ5_128,
+ INTERFACE_INQ5_128_NOTRDY,
+ INTERFACE_INQ5_128_KILL,
+ INTERFACE_OUTQ0_128_Full,
+ INTERFACE_OUTQ0_128,
+ INTERFACE_OUTQ0_128_NOTRDY,
+ INTERFACE_OUTQ0_128_KILL,
+ INTERFACE_OUTQ1_128_Full,
+ INTERFACE_OUTQ1_128,
+ INTERFACE_OUTQ1_128_NOTRDY,
+ INTERFACE_OUTQ1_128_KILL,
+ INTERFACE_OUTQ2_128_Full,
+ INTERFACE_OUTQ2_128,
+ INTERFACE_OUTQ2_128_NOTRDY,
+ INTERFACE_OUTQ2_128_KILL,
+ INTERFACE_OUTQ3_128_Full,
+ INTERFACE_OUTQ3_128,
+ INTERFACE_OUTQ3_128_NOTRDY,
+ INTERFACE_OUTQ3_128_KILL,
+ INTERFACE_OUTQ4_128_Full,
+ INTERFACE_OUTQ4_128,
+ INTERFACE_OUTQ4_128_NOTRDY,
+ INTERFACE_OUTQ4_128_KILL,
+ INTERFACE_OUTQ5_128_Full,
+ INTERFACE_OUTQ5_128,
+ INTERFACE_OUTQ5_128_NOTRDY,
+ INTERFACE_OUTQ5_128_KILL,
+ INTERFACE_IMPWIRE,
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table LLR_CONSTELLATION_TABLE */
+static const unsigned CONST_TBL_LLR_CONSTELLATION_TABLE_0[] = {
+ 0x1 & 0xf,
+ 0x2 & 0xf,
+ 0x8 & 0xf,
+ 0x4 & 0xf,
+ 0x8 & 0xf,
+ 0x6 & 0xf,
+ 0x8 & 0xf,
+ 0x8 & 0xf,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+Operand_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffsetx4_0 = 0x4 + ((((int) offset_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_encode (uint32 *valp)
+{
+ unsigned offset_0, soffsetx4_0;
+ soffsetx4_0 = *valp;
+ offset_0 = ((soffsetx4_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ uimm12x8_0 = imm12_0 << 3;
+ *valp = uimm12x8_0;
+ return 0;
+}
+
+static int
+Operand_uimm12x8_encode (uint32 *valp)
+{
+ unsigned imm12_0, uimm12x8_0;
+ uimm12x8_0 = *valp;
+ imm12_0 = ((uimm12x8_0 >> 3) & 0xfff);
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_0, mn_0;
+ mn_0 = *valp & 0xf;
+ simm4_0 = ((int) mn_0 << 28) >> 28;
+ *valp = simm4_0;
+ return 0;
+}
+
+static int
+Operand_simm4_encode (uint32 *valp)
+{
+ unsigned mn_0, simm4_0;
+ simm4_0 = *valp;
+ mn_0 = (simm4_0 & 0xf);
+ *valp = mn_0;
+ return 0;
+}
+
+static int
+Operand_arr_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_arr_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_ars_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_art_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_art_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_ar0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar0_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x1f) != 0;
+ return error;
+}
+
+static int
+Operand_ar4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x1f) != 0;
+ return error;
+}
+
+static int
+Operand_ar8_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x1f) != 0;
+ return error;
+}
+
+static int
+Operand_ar12_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar12_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x1f) != 0;
+ return error;
+}
+
+static int
+Operand_ars_entry_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_entry_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x1f) != 0;
+ return error;
+}
+
+static int
+Operand_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_0, r_0;
+ r_0 = *valp & 0xf;
+ immrx4_0 = (((0xfffffff) << 4) | r_0) << 2;
+ *valp = immrx4_0;
+ return 0;
+}
+
+static int
+Operand_immrx4_encode (uint32 *valp)
+{
+ unsigned r_0, immrx4_0;
+ immrx4_0 = *valp;
+ r_0 = ((immrx4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_0, r_0;
+ r_0 = *valp & 0xf;
+ lsi4x4_0 = r_0 << 2;
+ *valp = lsi4x4_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_encode (uint32 *valp)
+{
+ unsigned r_0, lsi4x4_0;
+ lsi4x4_0 = *valp;
+ r_0 = ((lsi4x4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_0, imm7_0;
+ imm7_0 = *valp & 0x7f;
+ simm7_0 = ((((-((((imm7_0 >> 6) & 1)) & (((imm7_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | imm7_0;
+ *valp = simm7_0;
+ return 0;
+}
+
+static int
+Operand_simm7_encode (uint32 *valp)
+{
+ unsigned imm7_0, simm7_0;
+ simm7_0 = *valp;
+ imm7_0 = (simm7_0 & 0x7f);
+ *valp = imm7_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_0, imm6_0;
+ imm6_0 = *valp & 0x3f;
+ uimm6_0 = 0x4 + (((0) << 6) | imm6_0);
+ *valp = uimm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_encode (uint32 *valp)
+{
+ unsigned imm6_0, uimm6_0;
+ uimm6_0 = *valp;
+ imm6_0 = (uimm6_0 - 0x4) & 0x3f;
+ *valp = imm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_0, t_0;
+ t_0 = *valp & 0xf;
+ ai4const_0 = CONST_TBL_ai4c_0[t_0 & 0xf];
+ *valp = ai4const_0;
+ return 0;
+}
+
+static int
+Operand_ai4const_encode (uint32 *valp)
+{
+ unsigned t_0, ai4const_0;
+ ai4const_0 = *valp;
+ switch (ai4const_0)
+ {
+ case 0xffffffff: t_0 = 0; break;
+ case 0x1: t_0 = 0x1; break;
+ case 0x2: t_0 = 0x2; break;
+ case 0x3: t_0 = 0x3; break;
+ case 0x4: t_0 = 0x4; break;
+ case 0x5: t_0 = 0x5; break;
+ case 0x6: t_0 = 0x6; break;
+ case 0x7: t_0 = 0x7; break;
+ case 0x8: t_0 = 0x8; break;
+ case 0x9: t_0 = 0x9; break;
+ case 0xa: t_0 = 0xa; break;
+ case 0xb: t_0 = 0xb; break;
+ case 0xc: t_0 = 0xc; break;
+ case 0xd: t_0 = 0xd; break;
+ case 0xe: t_0 = 0xe; break;
+ default: t_0 = 0xf; break;
+ }
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_0, r_0;
+ r_0 = *valp & 0xf;
+ b4const_0 = CONST_TBL_b4c_0[r_0 & 0xf];
+ *valp = b4const_0;
+ return 0;
+}
+
+static int
+Operand_b4const_encode (uint32 *valp)
+{
+ unsigned r_0, b4const_0;
+ b4const_0 = *valp;
+ switch (b4const_0)
+ {
+ case 0xffffffff: r_0 = 0; break;
+ case 0x1: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_0, r_0;
+ r_0 = *valp & 0xf;
+ b4constu_0 = CONST_TBL_b4cu_0[r_0 & 0xf];
+ *valp = b4constu_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_encode (uint32 *valp)
+{
+ unsigned r_0, b4constu_0;
+ b4constu_0 = *valp;
+ switch (b4constu_0)
+ {
+ case 0x8000: r_0 = 0; break;
+ case 0x10000: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8_0 = imm8_0;
+ *valp = uimm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8_0;
+ uimm8_0 = *valp;
+ imm8_0 = (uimm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x2_0 = imm8_0 << 1;
+ *valp = uimm8x2_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x2_0;
+ uimm8x2_0 = *valp;
+ imm8_0 = ((uimm8x2_0 >> 1) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x4_0 = imm8_0 << 2;
+ *valp = uimm8x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x4_0;
+ uimm8x4_0 = *valp;
+ imm8_0 = ((uimm8x4_0 >> 2) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_0, op2_0;
+ op2_0 = *valp & 0xf;
+ uimm4x16_0 = op2_0 << 4;
+ *valp = uimm4x16_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_encode (uint32 *valp)
+{
+ unsigned op2_0, uimm4x16_0;
+ uimm4x16_0 = *valp;
+ op2_0 = ((uimm4x16_0 >> 4) & 0xf);
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8_0 = ((int) imm8_0 << 24) >> 24;
+ *valp = simm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8_0;
+ simm8_0 = *valp;
+ imm8_0 = (simm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8x256_0 = (((int) imm8_0 << 24) >> 24) << 8;
+ *valp = simm8x256_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8x256_0;
+ simm8x256_0 = *valp;
+ imm8_0 = ((simm8x256_0 >> 8) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_0, imm12b_0;
+ imm12b_0 = *valp & 0xfff;
+ simm12b_0 = ((int) imm12b_0 << 20) >> 20;
+ *valp = simm12b_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_encode (uint32 *valp)
+{
+ unsigned imm12b_0, simm12b_0;
+ simm12b_0 = *valp;
+ imm12b_0 = (simm12b_0 & 0xfff);
+ *valp = imm12b_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_0, sal_0;
+ sal_0 = *valp & 0x1f;
+ msalp32_0 = 0x20 - sal_0;
+ *valp = msalp32_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_encode (uint32 *valp)
+{
+ unsigned sal_0, msalp32_0;
+ msalp32_0 = *valp;
+ sal_0 = (0x20 - msalp32_0) & 0x1f;
+ *valp = sal_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_0, op2_0;
+ op2_0 = *valp & 0xf;
+ op2p1_0 = op2_0 + 0x1;
+ *valp = op2p1_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_encode (uint32 *valp)
+{
+ unsigned op2_0, op2p1_0;
+ op2p1_0 = *valp;
+ op2_0 = (op2p1_0 - 0x1) & 0xf;
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_label8_decode (uint32 *valp)
+{
+ unsigned label8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ label8_0 = 0x4 + (((int) imm8_0 << 24) >> 24);
+ *valp = label8_0;
+ return 0;
+}
+
+static int
+Operand_label8_encode (uint32 *valp)
+{
+ unsigned imm8_0, label8_0;
+ label8_0 = *valp;
+ imm8_0 = (label8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ ulabel8_0 = 0x4 + (((0) << 8) | imm8_0);
+ *valp = ulabel8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_encode (uint32 *valp)
+{
+ unsigned imm8_0, ulabel8_0;
+ ulabel8_0 = *valp;
+ imm8_0 = (ulabel8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_decode (uint32 *valp)
+{
+ unsigned label12_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ label12_0 = 0x4 + (((int) imm12_0 << 20) >> 20);
+ *valp = label12_0;
+ return 0;
+}
+
+static int
+Operand_label12_encode (uint32 *valp)
+{
+ unsigned imm12_0, label12_0;
+ label12_0 = *valp;
+ imm12_0 = (label12_0 - 0x4) & 0xfff;
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffset_0 = 0x4 + (((int) offset_0 << 14) >> 14);
+ *valp = soffset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_encode (uint32 *valp)
+{
+ unsigned offset_0, soffset_0;
+ soffset_0 = *valp;
+ offset_0 = (soffset_0 - 0x4) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_0, imm16_0;
+ imm16_0 = *valp & 0xffff;
+ uimm16x4_0 = (((0xffff) << 16) | imm16_0) << 2;
+ *valp = uimm16x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_encode (uint32 *valp)
+{
+ unsigned imm16_0, uimm16x4_0;
+ uimm16x4_0 = *valp;
+ imm16_0 = (uimm16x4_0 >> 2) & 0xffff;
+ *valp = imm16_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_immt_decode (uint32 *valp)
+{
+ unsigned immt_0, t_0;
+ t_0 = *valp & 0xf;
+ immt_0 = t_0;
+ *valp = immt_0;
+ return 0;
+}
+
+static int
+Operand_immt_encode (uint32 *valp)
+{
+ unsigned t_0, immt_0;
+ immt_0 = *valp;
+ t_0 = immt_0 & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_imms_decode (uint32 *valp)
+{
+ unsigned imms_0, s_0;
+ s_0 = *valp & 0xf;
+ imms_0 = s_0;
+ *valp = imms_0;
+ return 0;
+}
+
+static int
+Operand_imms_encode (uint32 *valp)
+{
+ unsigned s_0, imms_0;
+ imms_0 = *valp;
+ s_0 = imms_0 & 0xf;
+ *valp = s_0;
+ return 0;
+}
+
+static int
+Operand_bt_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_bt_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_bs_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_bs_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_br_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_br_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_bt2_decode (uint32 *valp)
+{
+ *valp = *valp << 1;
+ return 0;
+}
+
+static int
+Operand_bt2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x7 << 1)) != 0;
+ *valp = *valp >> 1;
+ return error;
+}
+
+static int
+Operand_bs2_decode (uint32 *valp)
+{
+ *valp = *valp << 1;
+ return 0;
+}
+
+static int
+Operand_bs2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x7 << 1)) != 0;
+ *valp = *valp >> 1;
+ return error;
+}
+
+static int
+Operand_br2_decode (uint32 *valp)
+{
+ *valp = *valp << 1;
+ return 0;
+}
+
+static int
+Operand_br2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x7 << 1)) != 0;
+ *valp = *valp >> 1;
+ return error;
+}
+
+static int
+Operand_bt4_decode (uint32 *valp)
+{
+ *valp = *valp << 2;
+ return 0;
+}
+
+static int
+Operand_bt4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x3 << 2)) != 0;
+ *valp = *valp >> 2;
+ return error;
+}
+
+static int
+Operand_bs4_decode (uint32 *valp)
+{
+ *valp = *valp << 2;
+ return 0;
+}
+
+static int
+Operand_bs4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x3 << 2)) != 0;
+ *valp = *valp >> 2;
+ return error;
+}
+
+static int
+Operand_br4_decode (uint32 *valp)
+{
+ *valp = *valp << 2;
+ return 0;
+}
+
+static int
+Operand_br4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x3 << 2)) != 0;
+ *valp = *valp >> 2;
+ return error;
+}
+
+static int
+Operand_bt8_decode (uint32 *valp)
+{
+ *valp = *valp << 3;
+ return 0;
+}
+
+static int
+Operand_bt8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x1 << 3)) != 0;
+ *valp = *valp >> 3;
+ return error;
+}
+
+static int
+Operand_bs8_decode (uint32 *valp)
+{
+ *valp = *valp << 3;
+ return 0;
+}
+
+static int
+Operand_bs8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x1 << 3)) != 0;
+ *valp = *valp >> 3;
+ return error;
+}
+
+static int
+Operand_br8_decode (uint32 *valp)
+{
+ *valp = *valp << 3;
+ return 0;
+}
+
+static int
+Operand_br8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0x1 << 3)) != 0;
+ *valp = *valp >> 3;
+ return error;
+}
+
+static int
+Operand_bt16_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+Operand_bt16_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0 << 4)) != 0;
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+Operand_bs16_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+Operand_bs16_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0 << 4)) != 0;
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+Operand_br16_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+Operand_br16_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0 << 4)) != 0;
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+Operand_brall_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+Operand_brall_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~(0 << 4)) != 0;
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+Operand_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_0, t_0;
+ t_0 = *valp & 0xf;
+ tp7_0 = t_0 + 0x7;
+ *valp = tp7_0;
+ return 0;
+}
+
+static int
+Operand_tp7_encode (uint32 *valp)
+{
+ unsigned t_0, tp7_0;
+ tp7_0 = *valp;
+ t_0 = (tp7_0 - 0x7) & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_0, xt_wbr15_imm_0;
+ xt_wbr15_imm_0 = *valp & 0x7fff;
+ xt_wbr15_label_0 = 0x4 + (((int) xt_wbr15_imm_0 << 17) >> 17);
+ *valp = xt_wbr15_label_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_imm_0, xt_wbr15_label_0;
+ xt_wbr15_label_0 = *valp;
+ xt_wbr15_imm_0 = (xt_wbr15_label_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_imm_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_0, xt_wbr18_imm_0;
+ xt_wbr18_imm_0 = *valp & 0x3ffff;
+ xt_wbr18_label_0 = 0x4 + (((int) xt_wbr18_imm_0 << 14) >> 14);
+ *valp = xt_wbr18_label_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr18_imm_0, xt_wbr18_label_0;
+ xt_wbr18_label_0 = *valp;
+ xt_wbr18_imm_0 = (xt_wbr18_label_0 - 0x4) & 0x3ffff;
+ *valp = xt_wbr18_imm_0;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_cimm8x4_decode (uint32 *valp)
+{
+ unsigned cimm8x4_0, fimm8_0;
+ fimm8_0 = *valp & 0xff;
+ cimm8x4_0 = (fimm8_0 << 2) | 0;
+ *valp = cimm8x4_0;
+ return 0;
+}
+
+static int
+Operand_cimm8x4_encode (uint32 *valp)
+{
+ unsigned fimm8_0, cimm8x4_0;
+ cimm8x4_0 = *valp;
+ fimm8_0 = (cimm8x4_0 >> 2) & 0xff;
+ *valp = fimm8_0;
+ return 0;
+}
+
+static int
+Operand_frr_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_frr_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_frs_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_frs_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_frt_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_frt_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper45_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper45_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper46_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper46_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper47_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper47_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper48_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper48_imm_0, dsp340050b49a6c_fld2047_0;
+ dsp340050b49a6c_fld2047_0 = *valp & 0x1;
+ dsp340050b49a6c_oper48_imm_0 = (0 << 1) | dsp340050b49a6c_fld2047_0;
+ *valp = dsp340050b49a6c_oper48_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper48_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2047_0, dsp340050b49a6c_oper48_imm_0;
+ dsp340050b49a6c_oper48_imm_0 = *valp;
+ dsp340050b49a6c_fld2047_0 = (((dsp340050b49a6c_oper48_imm_0 >> 0) & 1)) & 0x1;
+ *valp = dsp340050b49a6c_fld2047_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper49_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper49_imm_0, dsp340050b49a6c_fld2025_0;
+ dsp340050b49a6c_fld2025_0 = *valp & 0x1;
+ dsp340050b49a6c_oper49_imm_0 = (0 << 1) | dsp340050b49a6c_fld2025_0;
+ *valp = dsp340050b49a6c_oper49_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper49_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2025_0, dsp340050b49a6c_oper49_imm_0;
+ dsp340050b49a6c_oper49_imm_0 = *valp;
+ dsp340050b49a6c_fld2025_0 = (((dsp340050b49a6c_oper49_imm_0 >> 0) & 1)) & 0x1;
+ *valp = dsp340050b49a6c_fld2025_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper50_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper50_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper51_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper51_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x7) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper52_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper52_imm_0, dsp340050b49a6c_fld2049_0;
+ dsp340050b49a6c_fld2049_0 = *valp & 0x3;
+ dsp340050b49a6c_oper52_imm_0 = (0 << 2) | dsp340050b49a6c_fld2049_0;
+ *valp = dsp340050b49a6c_oper52_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper52_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2049_0, dsp340050b49a6c_oper52_imm_0;
+ dsp340050b49a6c_oper52_imm_0 = *valp;
+ dsp340050b49a6c_fld2049_0 = (dsp340050b49a6c_oper52_imm_0 & 0x3);
+ *valp = dsp340050b49a6c_fld2049_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper53_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper53_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper54_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper54_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x7) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper55_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper55_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper56_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper56_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper57_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper57_imm_0, dsp340050b49a6c_fld2041_0;
+ dsp340050b49a6c_fld2041_0 = *valp & 0x1;
+ dsp340050b49a6c_oper57_imm_0 = (0 << 1) | dsp340050b49a6c_fld2041_0;
+ *valp = dsp340050b49a6c_oper57_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper57_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2041_0, dsp340050b49a6c_oper57_imm_0;
+ dsp340050b49a6c_oper57_imm_0 = *valp;
+ dsp340050b49a6c_fld2041_0 = (((dsp340050b49a6c_oper57_imm_0 >> 0) & 1)) & 0x1;
+ *valp = dsp340050b49a6c_fld2041_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper58_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper58_imm_0, dsp340050b49a6c_fld2051_0;
+ dsp340050b49a6c_fld2051_0 = *valp & 0x3f;
+ dsp340050b49a6c_oper58_imm_0 = ((int) dsp340050b49a6c_fld2051_0 << 26) >> 26;
+ *valp = dsp340050b49a6c_oper58_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper58_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2051_0, dsp340050b49a6c_oper58_imm_0;
+ dsp340050b49a6c_oper58_imm_0 = *valp;
+ dsp340050b49a6c_fld2051_0 = (dsp340050b49a6c_oper58_imm_0 & 0x3f);
+ *valp = dsp340050b49a6c_fld2051_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper59_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper59_imm_0, dsp340050b49a6c_fld2052_0;
+ dsp340050b49a6c_fld2052_0 = *valp & 0xf;
+ dsp340050b49a6c_oper59_imm_0 = (0 << 4) | dsp340050b49a6c_fld2052_0;
+ *valp = dsp340050b49a6c_oper59_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper59_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2052_0, dsp340050b49a6c_oper59_imm_0;
+ dsp340050b49a6c_oper59_imm_0 = *valp;
+ dsp340050b49a6c_fld2052_0 = (dsp340050b49a6c_oper59_imm_0 & 0xf);
+ *valp = dsp340050b49a6c_fld2052_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper60_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper60_imm_0, dsp340050b49a6c_fld2053_0;
+ dsp340050b49a6c_fld2053_0 = *valp & 0x1f;
+ dsp340050b49a6c_oper60_imm_0 = (0 << 5) | dsp340050b49a6c_fld2053_0;
+ *valp = dsp340050b49a6c_oper60_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper60_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2053_0, dsp340050b49a6c_oper60_imm_0;
+ dsp340050b49a6c_oper60_imm_0 = *valp;
+ dsp340050b49a6c_fld2053_0 = (dsp340050b49a6c_oper60_imm_0 & 0x1f);
+ *valp = dsp340050b49a6c_fld2053_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper61_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper61_imm_0, dsp340050b49a6c_fld2044_0;
+ dsp340050b49a6c_fld2044_0 = *valp & 0x1f;
+ dsp340050b49a6c_oper61_imm_0 = (0 << 5) | dsp340050b49a6c_fld2044_0;
+ *valp = dsp340050b49a6c_oper61_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper61_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2044_0, dsp340050b49a6c_oper61_imm_0;
+ dsp340050b49a6c_oper61_imm_0 = *valp;
+ dsp340050b49a6c_fld2044_0 = (dsp340050b49a6c_oper61_imm_0 & 0x1f);
+ *valp = dsp340050b49a6c_fld2044_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper62_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper62_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper63_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper63_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x7) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper64_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper64_imm_0, dsp340050b49a6c_fld2032_0;
+ dsp340050b49a6c_fld2032_0 = *valp & 0x3;
+ dsp340050b49a6c_oper64_imm_0 = (0 << 2) | dsp340050b49a6c_fld2032_0;
+ *valp = dsp340050b49a6c_oper64_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper64_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2032_0, dsp340050b49a6c_oper64_imm_0;
+ dsp340050b49a6c_oper64_imm_0 = *valp;
+ dsp340050b49a6c_fld2032_0 = (dsp340050b49a6c_oper64_imm_0 & 0x3);
+ *valp = dsp340050b49a6c_fld2032_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper65_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper65_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper66_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper66_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x7) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper67_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper67_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x7) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper68_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper68_imm_0, dsp340050b49a6c_fld2035_0;
+ dsp340050b49a6c_fld2035_0 = *valp & 0x3;
+ dsp340050b49a6c_oper68_imm_0 = (0 << 2) | dsp340050b49a6c_fld2035_0;
+ *valp = dsp340050b49a6c_oper68_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper68_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2035_0, dsp340050b49a6c_oper68_imm_0;
+ dsp340050b49a6c_oper68_imm_0 = *valp;
+ dsp340050b49a6c_fld2035_0 = (dsp340050b49a6c_oper68_imm_0 & 0x3);
+ *valp = dsp340050b49a6c_fld2035_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper69_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper69_imm_0, dsp340050b49a6c_fld2036_0;
+ dsp340050b49a6c_fld2036_0 = *valp & 0x1;
+ dsp340050b49a6c_oper69_imm_0 = (0 << 1) | dsp340050b49a6c_fld2036_0;
+ *valp = dsp340050b49a6c_oper69_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper69_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2036_0, dsp340050b49a6c_oper69_imm_0;
+ dsp340050b49a6c_oper69_imm_0 = *valp;
+ dsp340050b49a6c_fld2036_0 = (((dsp340050b49a6c_oper69_imm_0 >> 0) & 1)) & 0x1;
+ *valp = dsp340050b49a6c_fld2036_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper70_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper70_imm_0, dsp340050b49a6c_fld2037_0;
+ dsp340050b49a6c_fld2037_0 = *valp & 0x7;
+ dsp340050b49a6c_oper70_imm_0 = (0 << 3) | dsp340050b49a6c_fld2037_0;
+ *valp = dsp340050b49a6c_oper70_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper70_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2037_0, dsp340050b49a6c_oper70_imm_0;
+ dsp340050b49a6c_oper70_imm_0 = *valp;
+ dsp340050b49a6c_fld2037_0 = (dsp340050b49a6c_oper70_imm_0 & 0x7);
+ *valp = dsp340050b49a6c_fld2037_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper71_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper71_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0x7) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper72_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper72_imm_0, dsp340050b49a6c_fld2042_0;
+ dsp340050b49a6c_fld2042_0 = *valp & 0x3;
+ dsp340050b49a6c_oper72_imm_0 = (0 << 2) | dsp340050b49a6c_fld2042_0;
+ *valp = dsp340050b49a6c_oper72_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper72_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2042_0, dsp340050b49a6c_oper72_imm_0;
+ dsp340050b49a6c_oper72_imm_0 = *valp;
+ dsp340050b49a6c_fld2042_0 = (dsp340050b49a6c_oper72_imm_0 & 0x3);
+ *valp = dsp340050b49a6c_fld2042_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper73_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper73_imm_0, dsp340050b49a6c_fld2038_0;
+ dsp340050b49a6c_fld2038_0 = *valp & 0xf;
+ dsp340050b49a6c_oper73_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2038_0) << 3;
+ *valp = dsp340050b49a6c_oper73_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper73_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2038_0, dsp340050b49a6c_oper73_imm_0;
+ dsp340050b49a6c_oper73_imm_0 = *valp;
+ dsp340050b49a6c_fld2038_0 = ((dsp340050b49a6c_oper73_imm_0 >> 3) & 0xf);
+ *valp = dsp340050b49a6c_fld2038_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper74_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper74_imm_0, dsp340050b49a6c_fld2038_0;
+ dsp340050b49a6c_fld2038_0 = *valp & 0xf;
+ dsp340050b49a6c_oper74_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2038_0) << 4;
+ *valp = dsp340050b49a6c_oper74_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper74_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2038_0, dsp340050b49a6c_oper74_imm_0;
+ dsp340050b49a6c_oper74_imm_0 = *valp;
+ dsp340050b49a6c_fld2038_0 = ((dsp340050b49a6c_oper74_imm_0 >> 4) & 0xf);
+ *valp = dsp340050b49a6c_fld2038_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper75_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper75_imm_0, dsp340050b49a6c_fld2038_0;
+ dsp340050b49a6c_fld2038_0 = *valp & 0xf;
+ dsp340050b49a6c_oper75_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2038_0) << 2;
+ *valp = dsp340050b49a6c_oper75_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper75_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2038_0, dsp340050b49a6c_oper75_imm_0;
+ dsp340050b49a6c_oper75_imm_0 = *valp;
+ dsp340050b49a6c_fld2038_0 = ((dsp340050b49a6c_oper75_imm_0 >> 2) & 0xf);
+ *valp = dsp340050b49a6c_fld2038_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper76_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper76_imm_0, dsp340050b49a6c_fld2053_0;
+ dsp340050b49a6c_fld2053_0 = *valp & 0x1f;
+ dsp340050b49a6c_oper76_imm_0 = ((0 << 5) | dsp340050b49a6c_fld2053_0) << 4;
+ *valp = dsp340050b49a6c_oper76_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper76_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2053_0, dsp340050b49a6c_oper76_imm_0;
+ dsp340050b49a6c_oper76_imm_0 = *valp;
+ dsp340050b49a6c_fld2053_0 = ((dsp340050b49a6c_oper76_imm_0 >> 4) & 0x1f);
+ *valp = dsp340050b49a6c_fld2053_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper77_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper77_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper78_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper78_imm_0, dsp340050b49a6c_fld2056_0;
+ dsp340050b49a6c_fld2056_0 = *valp & 0x7;
+ dsp340050b49a6c_oper78_imm_0 = (0 << 3) | dsp340050b49a6c_fld2056_0;
+ *valp = dsp340050b49a6c_oper78_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper78_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2056_0, dsp340050b49a6c_oper78_imm_0;
+ dsp340050b49a6c_oper78_imm_0 = *valp;
+ dsp340050b49a6c_fld2056_0 = (dsp340050b49a6c_oper78_imm_0 & 0x7);
+ *valp = dsp340050b49a6c_fld2056_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper79_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper79_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper80_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper80_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper81_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper81_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper82_reg_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper82_reg_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp & ~0xf) != 0;
+ return error;
+}
+
+static int
+Operand_dsp340050b49a6c_oper83_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper83_imm_0, dsp340050b49a6c_fld2040_0;
+ dsp340050b49a6c_fld2040_0 = *valp & 0x1f;
+ dsp340050b49a6c_oper83_imm_0 = ((0 << 5) | dsp340050b49a6c_fld2040_0) << 4;
+ *valp = dsp340050b49a6c_oper83_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper83_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2040_0, dsp340050b49a6c_oper83_imm_0;
+ dsp340050b49a6c_oper83_imm_0 = *valp;
+ dsp340050b49a6c_fld2040_0 = ((dsp340050b49a6c_oper83_imm_0 >> 4) & 0x1f);
+ *valp = dsp340050b49a6c_fld2040_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper84_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper84_imm_0, dsp340050b49a6c_fld2029_0;
+ dsp340050b49a6c_fld2029_0 = *valp & 0xf;
+ dsp340050b49a6c_oper84_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2029_0) << 3;
+ *valp = dsp340050b49a6c_oper84_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper84_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2029_0, dsp340050b49a6c_oper84_imm_0;
+ dsp340050b49a6c_oper84_imm_0 = *valp;
+ dsp340050b49a6c_fld2029_0 = ((dsp340050b49a6c_oper84_imm_0 >> 3) & 0xf);
+ *valp = dsp340050b49a6c_fld2029_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper85_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper85_imm_0, dsp340050b49a6c_fld2029_0;
+ dsp340050b49a6c_fld2029_0 = *valp & 0xf;
+ dsp340050b49a6c_oper85_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2029_0) << 4;
+ *valp = dsp340050b49a6c_oper85_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper85_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2029_0, dsp340050b49a6c_oper85_imm_0;
+ dsp340050b49a6c_oper85_imm_0 = *valp;
+ dsp340050b49a6c_fld2029_0 = ((dsp340050b49a6c_oper85_imm_0 >> 4) & 0xf);
+ *valp = dsp340050b49a6c_fld2029_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper86_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper86_imm_0, dsp340050b49a6c_fld2029_0;
+ dsp340050b49a6c_fld2029_0 = *valp & 0xf;
+ dsp340050b49a6c_oper86_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2029_0) << 2;
+ *valp = dsp340050b49a6c_oper86_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper86_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2029_0, dsp340050b49a6c_oper86_imm_0;
+ dsp340050b49a6c_oper86_imm_0 = *valp;
+ dsp340050b49a6c_fld2029_0 = ((dsp340050b49a6c_oper86_imm_0 >> 2) & 0xf);
+ *valp = dsp340050b49a6c_fld2029_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper87_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper87_imm_0, dsp340050b49a6c_fld2043_0;
+ dsp340050b49a6c_fld2043_0 = *valp & 0x1f;
+ dsp340050b49a6c_oper87_imm_0 = ((0 << 5) | dsp340050b49a6c_fld2043_0) << 4;
+ *valp = dsp340050b49a6c_oper87_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper87_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2043_0, dsp340050b49a6c_oper87_imm_0;
+ dsp340050b49a6c_oper87_imm_0 = *valp;
+ dsp340050b49a6c_fld2043_0 = ((dsp340050b49a6c_oper87_imm_0 >> 4) & 0x1f);
+ *valp = dsp340050b49a6c_fld2043_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper88_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper88_imm_0, dsp340050b49a6c_fld2030_0;
+ dsp340050b49a6c_fld2030_0 = *valp & 0xf;
+ dsp340050b49a6c_oper88_imm_0 = (0 << 4) | dsp340050b49a6c_fld2030_0;
+ *valp = dsp340050b49a6c_oper88_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper88_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2030_0, dsp340050b49a6c_oper88_imm_0;
+ dsp340050b49a6c_oper88_imm_0 = *valp;
+ dsp340050b49a6c_fld2030_0 = (dsp340050b49a6c_oper88_imm_0 & 0xf);
+ *valp = dsp340050b49a6c_fld2030_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper89_imm_decode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_oper89_imm_0, dsp340050b49a6c_fld2052_0;
+ dsp340050b49a6c_fld2052_0 = *valp & 0xf;
+ dsp340050b49a6c_oper89_imm_0 = ((0 << 4) | dsp340050b49a6c_fld2052_0) << 2;
+ *valp = dsp340050b49a6c_oper89_imm_0;
+ return 0;
+}
+
+static int
+Operand_dsp340050b49a6c_oper89_imm_encode (uint32 *valp)
+{
+ unsigned dsp340050b49a6c_fld2052_0, dsp340050b49a6c_oper89_imm_0;
+ dsp340050b49a6c_oper89_imm_0 = *valp;
+ dsp340050b49a6c_fld2052_0 = ((dsp340050b49a6c_oper89_imm_0 >> 2) & 0xf);
+ *valp = dsp340050b49a6c_fld2052_0;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffsetx4_encode, Operand_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ Operand_uimm12x8_encode, Operand_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ Operand_simm4_encode, Operand_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_arr_encode, Operand_arr_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_art_encode, Operand_art_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar0_encode, Operand_ar0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar4_encode, Operand_ar4_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar8_encode, Operand_ar8_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar12_encode, Operand_ar12_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_entry_encode, Operand_ars_entry_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ Operand_immrx4_encode, Operand_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ Operand_lsi4x4_encode, Operand_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ Operand_simm7_encode, Operand_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm6_encode, Operand_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ Operand_ai4const_encode, Operand_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ Operand_b4const_encode, Operand_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ Operand_b4constu_encode, Operand_b4constu_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ Operand_uimm8_encode, Operand_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ Operand_uimm8x2_encode, Operand_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ Operand_uimm8x4_encode, Operand_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ Operand_uimm4x16_encode, Operand_uimm4x16_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ Operand_simm8_encode, Operand_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ Operand_simm8x256_encode, Operand_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ Operand_simm12b_encode, Operand_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ Operand_msalp32_encode, Operand_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ Operand_op2p1_encode, Operand_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label8_encode, Operand_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_ulabel8_encode, Operand_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label12_encode, Operand_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffset_encode, Operand_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm16x4_encode, Operand_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ Operand_immt_encode, Operand_immt_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ Operand_imms_encode, Operand_imms_decode,
+ 0, 0 },
+ { "bt", FIELD_t, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bt_encode, Operand_bt_decode,
+ 0, 0 },
+ { "bs", FIELD_s, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bs_encode, Operand_bs_decode,
+ 0, 0 },
+ { "br", FIELD_r, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_br_encode, Operand_br_decode,
+ 0, 0 },
+ { "bt2", FIELD_t2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bt2_encode, Operand_bt2_decode,
+ 0, 0 },
+ { "bs2", FIELD_s2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bs2_encode, Operand_bs2_decode,
+ 0, 0 },
+ { "br2", FIELD_r2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_br2_encode, Operand_br2_decode,
+ 0, 0 },
+ { "bt4", FIELD_t4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bt4_encode, Operand_bt4_decode,
+ 0, 0 },
+ { "bs4", FIELD_s4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bs4_encode, Operand_bs4_decode,
+ 0, 0 },
+ { "br4", FIELD_r4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_br4_encode, Operand_br4_decode,
+ 0, 0 },
+ { "bt8", FIELD_t8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bt8_encode, Operand_bt8_decode,
+ 0, 0 },
+ { "bs8", FIELD_s8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bs8_encode, Operand_bs8_decode,
+ 0, 0 },
+ { "br8", FIELD_r8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_br8_encode, Operand_br8_decode,
+ 0, 0 },
+ { "bt16", FIELD__bt16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bt16_encode, Operand_bt16_decode,
+ 0, 0 },
+ { "bs16", FIELD__bs16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_bs16_encode, Operand_bs16_decode,
+ 0, 0 },
+ { "br16", FIELD__br16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_br16_encode, Operand_br16_decode,
+ 0, 0 },
+ { "brall", FIELD__brall, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_brall_encode, Operand_brall_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ Operand_tp7_encode, Operand_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_xt_wbr15_label_encode, Operand_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_xt_wbr18_label_encode, Operand_xt_wbr18_label_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "cimm8x4", FIELD_fimm8, -1, 0,
+ 0,
+ Operand_cimm8x4_encode, Operand_cimm8x4_decode,
+ 0, 0 },
+ { "frr", FIELD_r, REGFILE_FR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_frr_encode, Operand_frr_decode,
+ 0, 0 },
+ { "frs", FIELD_s, REGFILE_FR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_frs_encode, Operand_frs_decode,
+ 0, 0 },
+ { "frt", FIELD_t, REGFILE_FR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_frt_encode, Operand_frt_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper45_reg", FIELD_dsp340050b49a6c_fld2045, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper45_reg_encode, Operand_dsp340050b49a6c_oper45_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper46_reg", FIELD_dsp340050b49a6c_fld2046, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper46_reg_encode, Operand_dsp340050b49a6c_oper46_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper47_reg", FIELD_dsp340050b49a6c_fld2029, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper47_reg_encode, Operand_dsp340050b49a6c_oper47_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper48_imm", FIELD_dsp340050b49a6c_fld2047, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper48_imm_encode, Operand_dsp340050b49a6c_oper48_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper49_imm", FIELD_dsp340050b49a6c_fld2025, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper49_imm_encode, Operand_dsp340050b49a6c_oper49_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper50_reg", FIELD_dsp340050b49a6c_fld2026, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper50_reg_encode, Operand_dsp340050b49a6c_oper50_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper51_reg", FIELD_dsp340050b49a6c_fld2048, REGFILE_ACU, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper51_reg_encode, Operand_dsp340050b49a6c_oper51_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper52_imm", FIELD_dsp340050b49a6c_fld2049, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper52_imm_encode, Operand_dsp340050b49a6c_oper52_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper53_reg", FIELD_dsp340050b49a6c_fld2027, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper53_reg_encode, Operand_dsp340050b49a6c_oper53_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper54_reg", FIELD_dsp340050b49a6c_fld2028, REGFILE_ACU, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper54_reg_encode, Operand_dsp340050b49a6c_oper54_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper55_reg", FIELD_dsp340050b49a6c_fld2030, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper55_reg_encode, Operand_dsp340050b49a6c_oper55_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper56_reg", FIELD_dsp340050b49a6c_fld2050, REGFILE_PQ, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper56_reg_encode, Operand_dsp340050b49a6c_oper56_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper57_imm", FIELD_dsp340050b49a6c_fld2041, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper57_imm_encode, Operand_dsp340050b49a6c_oper57_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper58_imm", FIELD_dsp340050b49a6c_fld2051, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper58_imm_encode, Operand_dsp340050b49a6c_oper58_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper59_imm", FIELD_dsp340050b49a6c_fld2052, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper59_imm_encode, Operand_dsp340050b49a6c_oper59_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper60_imm", FIELD_dsp340050b49a6c_fld2053, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper60_imm_encode, Operand_dsp340050b49a6c_oper60_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper61_imm", FIELD_dsp340050b49a6c_fld2044, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper61_imm_encode, Operand_dsp340050b49a6c_oper61_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper62_reg", FIELD_dsp340050b49a6c_fld2031, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper62_reg_encode, Operand_dsp340050b49a6c_oper62_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper63_reg", FIELD_dsp340050b49a6c_fld2054, REGFILE_ACU, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper63_reg_encode, Operand_dsp340050b49a6c_oper63_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper64_imm", FIELD_dsp340050b49a6c_fld2032, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper64_imm_encode, Operand_dsp340050b49a6c_oper64_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper65_reg", FIELD_dsp340050b49a6c_fld2055, REGFILE_PQ, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper65_reg_encode, Operand_dsp340050b49a6c_oper65_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper66_reg", FIELD_dsp340050b49a6c_fld2033, REGFILE_ACU, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper66_reg_encode, Operand_dsp340050b49a6c_oper66_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper67_reg", FIELD_dsp340050b49a6c_fld2034, REGFILE_ACU, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper67_reg_encode, Operand_dsp340050b49a6c_oper67_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper68_imm", FIELD_dsp340050b49a6c_fld2035, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper68_imm_encode, Operand_dsp340050b49a6c_oper68_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper69_imm", FIELD_dsp340050b49a6c_fld2036, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper69_imm_encode, Operand_dsp340050b49a6c_oper69_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper70_imm", FIELD_dsp340050b49a6c_fld2037, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper70_imm_encode, Operand_dsp340050b49a6c_oper70_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper71_reg", FIELD_dsp340050b49a6c_fld2037, REGFILE_ACU, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper71_reg_encode, Operand_dsp340050b49a6c_oper71_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper72_imm", FIELD_dsp340050b49a6c_fld2042, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper72_imm_encode, Operand_dsp340050b49a6c_oper72_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper73_imm", FIELD_dsp340050b49a6c_fld2038, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper73_imm_encode, Operand_dsp340050b49a6c_oper73_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper74_imm", FIELD_dsp340050b49a6c_fld2038, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper74_imm_encode, Operand_dsp340050b49a6c_oper74_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper75_imm", FIELD_dsp340050b49a6c_fld2038, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper75_imm_encode, Operand_dsp340050b49a6c_oper75_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper76_imm", FIELD_dsp340050b49a6c_fld2053, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper76_imm_encode, Operand_dsp340050b49a6c_oper76_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper77_reg", FIELD_r, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper77_reg_encode, Operand_dsp340050b49a6c_oper77_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper78_imm", FIELD_dsp340050b49a6c_fld2056, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper78_imm_encode, Operand_dsp340050b49a6c_oper78_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper79_reg", FIELD_r, REGFILE_PQ, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper79_reg_encode, Operand_dsp340050b49a6c_oper79_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper80_reg", FIELD_s, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper80_reg_encode, Operand_dsp340050b49a6c_oper80_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper81_reg", FIELD_dsp340050b49a6c_fld2039, REGFILE_PQ, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper81_reg_encode, Operand_dsp340050b49a6c_oper81_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper82_reg", FIELD_dsp340050b49a6c_fld2052, REGFILE_CM, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_dsp340050b49a6c_oper82_reg_encode, Operand_dsp340050b49a6c_oper82_reg_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper83_imm", FIELD_dsp340050b49a6c_fld2040, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper83_imm_encode, Operand_dsp340050b49a6c_oper83_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper84_imm", FIELD_dsp340050b49a6c_fld2029, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper84_imm_encode, Operand_dsp340050b49a6c_oper84_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper85_imm", FIELD_dsp340050b49a6c_fld2029, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper85_imm_encode, Operand_dsp340050b49a6c_oper85_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper86_imm", FIELD_dsp340050b49a6c_fld2029, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper86_imm_encode, Operand_dsp340050b49a6c_oper86_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper87_imm", FIELD_dsp340050b49a6c_fld2043, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper87_imm_encode, Operand_dsp340050b49a6c_oper87_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper88_imm", FIELD_dsp340050b49a6c_fld2030, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper88_imm_encode, Operand_dsp340050b49a6c_oper88_imm_decode,
+ 0, 0 },
+ { "dsp340050b49a6c_oper89_imm", FIELD_dsp340050b49a6c_fld2052, -1, 0,
+ 0,
+ Operand_dsp340050b49a6c_oper89_imm_encode, Operand_dsp340050b49a6c_oper89_imm_decode,
+ 0, 0 },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi", FIELD_bbi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "s", FIELD_s, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae", FIELD_sae, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas", FIELD_sas, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "t2", FIELD_t2, -1, 0, 0, 0, 0, 0, 0 },
+ { "s2", FIELD_s2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r2", FIELD_r2, -1, 0, 0, 0, 0, 0, 0 },
+ { "t4", FIELD_t4, -1, 0, 0, 0, 0, 0, 0 },
+ { "s4", FIELD_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "r4", FIELD_r4, -1, 0, 0, 0, 0, 0, 0 },
+ { "t8", FIELD_t8, -1, 0, 0, 0, 0, 0, 0 },
+ { "s8", FIELD_s8, -1, 0, 0, 0, 0, 0, 0 },
+ { "r8", FIELD_r8, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "fimm8", FIELD_fimm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2019", FIELD_dsp340050b49a6c_fld2019, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2021", FIELD_dsp340050b49a6c_fld2021, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2029", FIELD_dsp340050b49a6c_fld2029, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2030", FIELD_dsp340050b49a6c_fld2030, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2032", FIELD_dsp340050b49a6c_fld2032, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2035", FIELD_dsp340050b49a6c_fld2035, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2036", FIELD_dsp340050b49a6c_fld2036, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2037", FIELD_dsp340050b49a6c_fld2037, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2038", FIELD_dsp340050b49a6c_fld2038, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2039", FIELD_dsp340050b49a6c_fld2039, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2040", FIELD_dsp340050b49a6c_fld2040, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2041", FIELD_dsp340050b49a6c_fld2041, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2042", FIELD_dsp340050b49a6c_fld2042, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2043", FIELD_dsp340050b49a6c_fld2043, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2044", FIELD_dsp340050b49a6c_fld2044, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2045", FIELD_dsp340050b49a6c_fld2045, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2046", FIELD_dsp340050b49a6c_fld2046, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2047", FIELD_dsp340050b49a6c_fld2047, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2048", FIELD_dsp340050b49a6c_fld2048, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2049", FIELD_dsp340050b49a6c_fld2049, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2050", FIELD_dsp340050b49a6c_fld2050, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2051", FIELD_dsp340050b49a6c_fld2051, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2052", FIELD_dsp340050b49a6c_fld2052, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2053", FIELD_dsp340050b49a6c_fld2053, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2054", FIELD_dsp340050b49a6c_fld2054, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2055", FIELD_dsp340050b49a6c_fld2055, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2056", FIELD_dsp340050b49a6c_fld2056, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2082Inst", FIELD_dsp340050b49a6c_fld2082Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2083Inst", FIELD_dsp340050b49a6c_fld2083Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2084Inst", FIELD_dsp340050b49a6c_fld2084Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2085Inst", FIELD_dsp340050b49a6c_fld2085Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2086Inst", FIELD_dsp340050b49a6c_fld2086Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2088Inst", FIELD_dsp340050b49a6c_fld2088Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2089Inst", FIELD_dsp340050b49a6c_fld2089Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2090Inst", FIELD_dsp340050b49a6c_fld2090Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2091Inst", FIELD_dsp340050b49a6c_fld2091Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2092Inst", FIELD_dsp340050b49a6c_fld2092Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2094Inst", FIELD_dsp340050b49a6c_fld2094Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2095Inst", FIELD_dsp340050b49a6c_fld2095Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2096Inst", FIELD_dsp340050b49a6c_fld2096Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2098Inst", FIELD_dsp340050b49a6c_fld2098Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2099Inst", FIELD_dsp340050b49a6c_fld2099Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2100Inst", FIELD_dsp340050b49a6c_fld2100Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2101Inst", FIELD_dsp340050b49a6c_fld2101Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2102Inst", FIELD_dsp340050b49a6c_fld2102Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2103Inst", FIELD_dsp340050b49a6c_fld2103Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2104Inst", FIELD_dsp340050b49a6c_fld2104Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2105Inst", FIELD_dsp340050b49a6c_fld2105Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2106Inst", FIELD_dsp340050b49a6c_fld2106Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2107Inst", FIELD_dsp340050b49a6c_fld2107Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2108Inst", FIELD_dsp340050b49a6c_fld2108Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2109Inst", FIELD_dsp340050b49a6c_fld2109Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2110Inst", FIELD_dsp340050b49a6c_fld2110Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2111Inst", FIELD_dsp340050b49a6c_fld2111Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2112Inst", FIELD_dsp340050b49a6c_fld2112Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2113Inst", FIELD_dsp340050b49a6c_fld2113Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2114Inst", FIELD_dsp340050b49a6c_fld2114Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2115Inst", FIELD_dsp340050b49a6c_fld2115Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2116Inst", FIELD_dsp340050b49a6c_fld2116Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2117Inst", FIELD_dsp340050b49a6c_fld2117Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2118Inst", FIELD_dsp340050b49a6c_fld2118Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2119Inst", FIELD_dsp340050b49a6c_fld2119Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2120Inst", FIELD_dsp340050b49a6c_fld2120Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2122Inst", FIELD_dsp340050b49a6c_fld2122Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2123Inst", FIELD_dsp340050b49a6c_fld2123Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2124Inst", FIELD_dsp340050b49a6c_fld2124Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2125Inst", FIELD_dsp340050b49a6c_fld2125Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2126Inst", FIELD_dsp340050b49a6c_fld2126Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2127Inst", FIELD_dsp340050b49a6c_fld2127Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2128Inst", FIELD_dsp340050b49a6c_fld2128Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2129Inst", FIELD_dsp340050b49a6c_fld2129Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2131Inst", FIELD_dsp340050b49a6c_fld2131Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2132Inst", FIELD_dsp340050b49a6c_fld2132Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2133Inst", FIELD_dsp340050b49a6c_fld2133Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2134Inst", FIELD_dsp340050b49a6c_fld2134Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2136Inst", FIELD_dsp340050b49a6c_fld2136Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2137Inst", FIELD_dsp340050b49a6c_fld2137Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2138Inst", FIELD_dsp340050b49a6c_fld2138Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2139Inst", FIELD_dsp340050b49a6c_fld2139Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2140Inst", FIELD_dsp340050b49a6c_fld2140Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2141Inst", FIELD_dsp340050b49a6c_fld2141Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2142Inst", FIELD_dsp340050b49a6c_fld2142Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2143Inst", FIELD_dsp340050b49a6c_fld2143Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2144Inst", FIELD_dsp340050b49a6c_fld2144Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2145Inst", FIELD_dsp340050b49a6c_fld2145Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2146Inst", FIELD_dsp340050b49a6c_fld2146Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2147Inst", FIELD_dsp340050b49a6c_fld2147Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2149Inst", FIELD_dsp340050b49a6c_fld2149Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2151Inst", FIELD_dsp340050b49a6c_fld2151Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2153Inst", FIELD_dsp340050b49a6c_fld2153Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2154Inst", FIELD_dsp340050b49a6c_fld2154Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2155Inst", FIELD_dsp340050b49a6c_fld2155Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2156Inst", FIELD_dsp340050b49a6c_fld2156Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2157Inst", FIELD_dsp340050b49a6c_fld2157Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2158Inst", FIELD_dsp340050b49a6c_fld2158Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2159Inst", FIELD_dsp340050b49a6c_fld2159Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2160Inst", FIELD_dsp340050b49a6c_fld2160Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2161Inst", FIELD_dsp340050b49a6c_fld2161Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2162Inst", FIELD_dsp340050b49a6c_fld2162Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2163Inst", FIELD_dsp340050b49a6c_fld2163Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2164Inst", FIELD_dsp340050b49a6c_fld2164Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2165Inst", FIELD_dsp340050b49a6c_fld2165Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2166Inst", FIELD_dsp340050b49a6c_fld2166Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2167Inst", FIELD_dsp340050b49a6c_fld2167Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2168Inst", FIELD_dsp340050b49a6c_fld2168Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2169Inst", FIELD_dsp340050b49a6c_fld2169Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2171Inst", FIELD_dsp340050b49a6c_fld2171Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2172Inst", FIELD_dsp340050b49a6c_fld2172Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2173Inst", FIELD_dsp340050b49a6c_fld2173Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2174Inst", FIELD_dsp340050b49a6c_fld2174Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2175Inst", FIELD_dsp340050b49a6c_fld2175Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2177Inst", FIELD_dsp340050b49a6c_fld2177Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2178Inst", FIELD_dsp340050b49a6c_fld2178Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2179Inst", FIELD_dsp340050b49a6c_fld2179Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2180Inst", FIELD_dsp340050b49a6c_fld2180Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2181Inst", FIELD_dsp340050b49a6c_fld2181Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2182Inst", FIELD_dsp340050b49a6c_fld2182Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2183Inst", FIELD_dsp340050b49a6c_fld2183Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2184Inst", FIELD_dsp340050b49a6c_fld2184Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2185Inst", FIELD_dsp340050b49a6c_fld2185Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2186Inst", FIELD_dsp340050b49a6c_fld2186Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2187Inst", FIELD_dsp340050b49a6c_fld2187Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2188Inst", FIELD_dsp340050b49a6c_fld2188Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2189Inst", FIELD_dsp340050b49a6c_fld2189Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2190Inst", FIELD_dsp340050b49a6c_fld2190Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2191Inst", FIELD_dsp340050b49a6c_fld2191Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2192Inst", FIELD_dsp340050b49a6c_fld2192Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2193Inst", FIELD_dsp340050b49a6c_fld2193Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2194Inst", FIELD_dsp340050b49a6c_fld2194Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2195Inst", FIELD_dsp340050b49a6c_fld2195Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2196Inst", FIELD_dsp340050b49a6c_fld2196Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2197Inst", FIELD_dsp340050b49a6c_fld2197Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2198Inst", FIELD_dsp340050b49a6c_fld2198Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2199Inst", FIELD_dsp340050b49a6c_fld2199Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2200Inst", FIELD_dsp340050b49a6c_fld2200Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2201Inst", FIELD_dsp340050b49a6c_fld2201Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2202Inst", FIELD_dsp340050b49a6c_fld2202Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2203Inst", FIELD_dsp340050b49a6c_fld2203Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2204Inst", FIELD_dsp340050b49a6c_fld2204Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2205Inst", FIELD_dsp340050b49a6c_fld2205Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2206Inst", FIELD_dsp340050b49a6c_fld2206Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2207Inst", FIELD_dsp340050b49a6c_fld2207Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2208Inst", FIELD_dsp340050b49a6c_fld2208Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2209Inst", FIELD_dsp340050b49a6c_fld2209Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2210Inst", FIELD_dsp340050b49a6c_fld2210Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2211Inst", FIELD_dsp340050b49a6c_fld2211Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2212Inst", FIELD_dsp340050b49a6c_fld2212Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2213Inst", FIELD_dsp340050b49a6c_fld2213Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2214Inst", FIELD_dsp340050b49a6c_fld2214Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2215Inst", FIELD_dsp340050b49a6c_fld2215Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2216Inst", FIELD_dsp340050b49a6c_fld2216Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2217Inst", FIELD_dsp340050b49a6c_fld2217Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2218Inst", FIELD_dsp340050b49a6c_fld2218Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2219Inst", FIELD_dsp340050b49a6c_fld2219Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2220Inst", FIELD_dsp340050b49a6c_fld2220Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2221Inst", FIELD_dsp340050b49a6c_fld2221Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2222Inst", FIELD_dsp340050b49a6c_fld2222Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2223Inst", FIELD_dsp340050b49a6c_fld2223Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2224Inst", FIELD_dsp340050b49a6c_fld2224Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2225Inst", FIELD_dsp340050b49a6c_fld2225Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2226Inst", FIELD_dsp340050b49a6c_fld2226Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2227Inst", FIELD_dsp340050b49a6c_fld2227Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2228Inst", FIELD_dsp340050b49a6c_fld2228Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2229Inst", FIELD_dsp340050b49a6c_fld2229Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2230Inst", FIELD_dsp340050b49a6c_fld2230Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2231Inst", FIELD_dsp340050b49a6c_fld2231Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2232Inst", FIELD_dsp340050b49a6c_fld2232Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2234Inst", FIELD_dsp340050b49a6c_fld2234Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2235Inst", FIELD_dsp340050b49a6c_fld2235Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2236Inst", FIELD_dsp340050b49a6c_fld2236Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2237Inst", FIELD_dsp340050b49a6c_fld2237Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2238Inst", FIELD_dsp340050b49a6c_fld2238Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2239Inst", FIELD_dsp340050b49a6c_fld2239Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2240Inst", FIELD_dsp340050b49a6c_fld2240Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2241Inst", FIELD_dsp340050b49a6c_fld2241Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2242Inst", FIELD_dsp340050b49a6c_fld2242Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2243Inst", FIELD_dsp340050b49a6c_fld2243Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2244Inst", FIELD_dsp340050b49a6c_fld2244Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2245Inst", FIELD_dsp340050b49a6c_fld2245Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2246Inst", FIELD_dsp340050b49a6c_fld2246Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2247Inst", FIELD_dsp340050b49a6c_fld2247Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2248Inst", FIELD_dsp340050b49a6c_fld2248Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2249Inst", FIELD_dsp340050b49a6c_fld2249Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2250Inst", FIELD_dsp340050b49a6c_fld2250Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2251Inst", FIELD_dsp340050b49a6c_fld2251Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2252Inst", FIELD_dsp340050b49a6c_fld2252Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2253Inst", FIELD_dsp340050b49a6c_fld2253Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2254", FIELD_dsp340050b49a6c_fld2254, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2255Inst", FIELD_dsp340050b49a6c_fld2255Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2257Inst", FIELD_dsp340050b49a6c_fld2257Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3627Inst", FIELD_dsp340050b49a6c_fld3627Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3630Inst", FIELD_dsp340050b49a6c_fld3630Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3631Inst", FIELD_dsp340050b49a6c_fld3631Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3633Inst", FIELD_dsp340050b49a6c_fld3633Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3634", FIELD_dsp340050b49a6c_fld3634, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3635Inst", FIELD_dsp340050b49a6c_fld3635Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3636Inst", FIELD_dsp340050b49a6c_fld3636Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3637Inst", FIELD_dsp340050b49a6c_fld3637Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3638Inst", FIELD_dsp340050b49a6c_fld3638Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3639Inst", FIELD_dsp340050b49a6c_fld3639Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3640Inst", FIELD_dsp340050b49a6c_fld3640Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3642Inst", FIELD_dsp340050b49a6c_fld3642Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3643Inst", FIELD_dsp340050b49a6c_fld3643Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3644Inst", FIELD_dsp340050b49a6c_fld3644Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3645Inst", FIELD_dsp340050b49a6c_fld3645Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3647Inst", FIELD_dsp340050b49a6c_fld3647Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3648Inst", FIELD_dsp340050b49a6c_fld3648Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3649Inst", FIELD_dsp340050b49a6c_fld3649Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3650Inst", FIELD_dsp340050b49a6c_fld3650Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3651Inst", FIELD_dsp340050b49a6c_fld3651Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3653Inst", FIELD_dsp340050b49a6c_fld3653Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3654Inst", FIELD_dsp340050b49a6c_fld3654Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3655Inst", FIELD_dsp340050b49a6c_fld3655Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3656Inst", FIELD_dsp340050b49a6c_fld3656Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3657Inst", FIELD_dsp340050b49a6c_fld3657Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3658Inst", FIELD_dsp340050b49a6c_fld3658Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3659Inst", FIELD_dsp340050b49a6c_fld3659Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3660Inst", FIELD_dsp340050b49a6c_fld3660Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3661Inst", FIELD_dsp340050b49a6c_fld3661Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3662Inst", FIELD_dsp340050b49a6c_fld3662Inst, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s3", FIELD_op0_s3, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2025", FIELD_dsp340050b49a6c_fld2025, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2027", FIELD_dsp340050b49a6c_fld2027, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2258GP_slot2", FIELD_dsp340050b49a6c_fld2258GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2259GP_slot2", FIELD_dsp340050b49a6c_fld2259GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2260GP_slot2", FIELD_dsp340050b49a6c_fld2260GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2261GP_slot2", FIELD_dsp340050b49a6c_fld2261GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2262GP_slot2", FIELD_dsp340050b49a6c_fld2262GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2263GP_slot2", FIELD_dsp340050b49a6c_fld2263GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2264GP_slot2", FIELD_dsp340050b49a6c_fld2264GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2266GP_slot2", FIELD_dsp340050b49a6c_fld2266GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2267GP_slot2", FIELD_dsp340050b49a6c_fld2267GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2268GP_slot2", FIELD_dsp340050b49a6c_fld2268GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2269GP_slot2", FIELD_dsp340050b49a6c_fld2269GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2270GP_slot2", FIELD_dsp340050b49a6c_fld2270GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2271GP_slot2", FIELD_dsp340050b49a6c_fld2271GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2272", FIELD_dsp340050b49a6c_fld2272, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2273GP_slot2", FIELD_dsp340050b49a6c_fld2273GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2274GP_slot2", FIELD_dsp340050b49a6c_fld2274GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2275GP_slot2", FIELD_dsp340050b49a6c_fld2275GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2277GP_slot2", FIELD_dsp340050b49a6c_fld2277GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2278GP_slot2", FIELD_dsp340050b49a6c_fld2278GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2279GP_slot2", FIELD_dsp340050b49a6c_fld2279GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2280GP_slot2", FIELD_dsp340050b49a6c_fld2280GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2281GP_slot2", FIELD_dsp340050b49a6c_fld2281GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2282GP_slot2", FIELD_dsp340050b49a6c_fld2282GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2283GP_slot2", FIELD_dsp340050b49a6c_fld2283GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2284GP_slot2", FIELD_dsp340050b49a6c_fld2284GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2286GP_slot2", FIELD_dsp340050b49a6c_fld2286GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2287GP_slot2", FIELD_dsp340050b49a6c_fld2287GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2288GP_slot2", FIELD_dsp340050b49a6c_fld2288GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2289GP_slot2", FIELD_dsp340050b49a6c_fld2289GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2290GP_slot2", FIELD_dsp340050b49a6c_fld2290GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2291GP_slot2", FIELD_dsp340050b49a6c_fld2291GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2292GP_slot2", FIELD_dsp340050b49a6c_fld2292GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2293GP_slot2", FIELD_dsp340050b49a6c_fld2293GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2294GP_slot2", FIELD_dsp340050b49a6c_fld2294GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2295GP_slot2", FIELD_dsp340050b49a6c_fld2295GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2296GP_slot2", FIELD_dsp340050b49a6c_fld2296GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2297GP_slot2", FIELD_dsp340050b49a6c_fld2297GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2298GP_slot2", FIELD_dsp340050b49a6c_fld2298GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2299GP_slot2", FIELD_dsp340050b49a6c_fld2299GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2300GP_slot2", FIELD_dsp340050b49a6c_fld2300GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2301GP_slot2", FIELD_dsp340050b49a6c_fld2301GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2302", FIELD_dsp340050b49a6c_fld2302, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2303GP_slot2", FIELD_dsp340050b49a6c_fld2303GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2304GP_slot2", FIELD_dsp340050b49a6c_fld2304GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2305", FIELD_dsp340050b49a6c_fld2305, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2306GP_slot2", FIELD_dsp340050b49a6c_fld2306GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2308GP_slot2", FIELD_dsp340050b49a6c_fld2308GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2309GP_slot2", FIELD_dsp340050b49a6c_fld2309GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2310GP_slot2", FIELD_dsp340050b49a6c_fld2310GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2312GP_slot2", FIELD_dsp340050b49a6c_fld2312GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2313", FIELD_dsp340050b49a6c_fld2313, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2314GP_slot2", FIELD_dsp340050b49a6c_fld2314GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2316GP_slot2", FIELD_dsp340050b49a6c_fld2316GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2317GP_slot2", FIELD_dsp340050b49a6c_fld2317GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2318GP_slot2", FIELD_dsp340050b49a6c_fld2318GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2319GP_slot2", FIELD_dsp340050b49a6c_fld2319GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2320GP_slot2", FIELD_dsp340050b49a6c_fld2320GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2321GP_slot2", FIELD_dsp340050b49a6c_fld2321GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2322GP_slot2", FIELD_dsp340050b49a6c_fld2322GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2323GP_slot2", FIELD_dsp340050b49a6c_fld2323GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2324GP_slot2", FIELD_dsp340050b49a6c_fld2324GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2325GP_slot2", FIELD_dsp340050b49a6c_fld2325GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2326GP_slot2", FIELD_dsp340050b49a6c_fld2326GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2327GP_slot2", FIELD_dsp340050b49a6c_fld2327GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2328GP_slot2", FIELD_dsp340050b49a6c_fld2328GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2329GP_slot2", FIELD_dsp340050b49a6c_fld2329GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2330GP_slot2", FIELD_dsp340050b49a6c_fld2330GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2331GP_slot2", FIELD_dsp340050b49a6c_fld2331GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2332GP_slot2", FIELD_dsp340050b49a6c_fld2332GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2333GP_slot2", FIELD_dsp340050b49a6c_fld2333GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2334GP_slot2", FIELD_dsp340050b49a6c_fld2334GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2335GP_slot2", FIELD_dsp340050b49a6c_fld2335GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2336GP_slot2", FIELD_dsp340050b49a6c_fld2336GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2337GP_slot2", FIELD_dsp340050b49a6c_fld2337GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2338GP_slot2", FIELD_dsp340050b49a6c_fld2338GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2339GP_slot2", FIELD_dsp340050b49a6c_fld2339GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2340GP_slot2", FIELD_dsp340050b49a6c_fld2340GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2341GP_slot2", FIELD_dsp340050b49a6c_fld2341GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2342GP_slot2", FIELD_dsp340050b49a6c_fld2342GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2343GP_slot2", FIELD_dsp340050b49a6c_fld2343GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2344GP_slot2", FIELD_dsp340050b49a6c_fld2344GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2345GP_slot2", FIELD_dsp340050b49a6c_fld2345GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2346GP_slot2", FIELD_dsp340050b49a6c_fld2346GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2347GP_slot2", FIELD_dsp340050b49a6c_fld2347GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2348GP_slot2", FIELD_dsp340050b49a6c_fld2348GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2349GP_slot2", FIELD_dsp340050b49a6c_fld2349GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2350GP_slot2", FIELD_dsp340050b49a6c_fld2350GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2351GP_slot2", FIELD_dsp340050b49a6c_fld2351GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2352GP_slot2", FIELD_dsp340050b49a6c_fld2352GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2353GP_slot2", FIELD_dsp340050b49a6c_fld2353GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2354GP_slot2", FIELD_dsp340050b49a6c_fld2354GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2355GP_slot2", FIELD_dsp340050b49a6c_fld2355GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2356GP_slot2", FIELD_dsp340050b49a6c_fld2356GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2357GP_slot2", FIELD_dsp340050b49a6c_fld2357GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2358GP_slot2", FIELD_dsp340050b49a6c_fld2358GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2359GP_slot2", FIELD_dsp340050b49a6c_fld2359GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2361GP_slot2", FIELD_dsp340050b49a6c_fld2361GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2362GP_slot2", FIELD_dsp340050b49a6c_fld2362GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2364GP_slot2", FIELD_dsp340050b49a6c_fld2364GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2366GP_slot2", FIELD_dsp340050b49a6c_fld2366GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2368GP_slot2", FIELD_dsp340050b49a6c_fld2368GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2369GP_slot2", FIELD_dsp340050b49a6c_fld2369GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2370GP_slot2", FIELD_dsp340050b49a6c_fld2370GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2371GP_slot2", FIELD_dsp340050b49a6c_fld2371GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2372GP_slot2", FIELD_dsp340050b49a6c_fld2372GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2373GP_slot2", FIELD_dsp340050b49a6c_fld2373GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2374GP_slot2", FIELD_dsp340050b49a6c_fld2374GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2375GP_slot2", FIELD_dsp340050b49a6c_fld2375GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2376GP_slot2", FIELD_dsp340050b49a6c_fld2376GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2378GP_slot2", FIELD_dsp340050b49a6c_fld2378GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2379GP_slot2", FIELD_dsp340050b49a6c_fld2379GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2381GP_slot2", FIELD_dsp340050b49a6c_fld2381GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2383GP_slot2", FIELD_dsp340050b49a6c_fld2383GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2384", FIELD_dsp340050b49a6c_fld2384, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2385GP_slot2", FIELD_dsp340050b49a6c_fld2385GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2386", FIELD_dsp340050b49a6c_fld2386, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2387GP_slot2", FIELD_dsp340050b49a6c_fld2387GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2388GP_slot2", FIELD_dsp340050b49a6c_fld2388GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2389GP_slot2", FIELD_dsp340050b49a6c_fld2389GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3663GP_slot2", FIELD_dsp340050b49a6c_fld3663GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3664GP_slot2", FIELD_dsp340050b49a6c_fld3664GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3665GP_slot2", FIELD_dsp340050b49a6c_fld3665GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3666", FIELD_dsp340050b49a6c_fld3666, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3667GP_slot2", FIELD_dsp340050b49a6c_fld3667GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3668GP_slot2", FIELD_dsp340050b49a6c_fld3668GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3669GP_slot2", FIELD_dsp340050b49a6c_fld3669GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3670GP_slot2", FIELD_dsp340050b49a6c_fld3670GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3671GP_slot2", FIELD_dsp340050b49a6c_fld3671GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3673GP_slot2", FIELD_dsp340050b49a6c_fld3673GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3674GP_slot2", FIELD_dsp340050b49a6c_fld3674GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3675GP_slot2", FIELD_dsp340050b49a6c_fld3675GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3676GP_slot2", FIELD_dsp340050b49a6c_fld3676GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3678GP_slot2", FIELD_dsp340050b49a6c_fld3678GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3679GP_slot2", FIELD_dsp340050b49a6c_fld3679GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3680GP_slot2", FIELD_dsp340050b49a6c_fld3680GP_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s4", FIELD_op0_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2026", FIELD_dsp340050b49a6c_fld2026, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2031", FIELD_dsp340050b49a6c_fld2031, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2394GP_slot1", FIELD_dsp340050b49a6c_fld2394GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2395GP_slot1", FIELD_dsp340050b49a6c_fld2395GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2397GP_slot1", FIELD_dsp340050b49a6c_fld2397GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2398GP_slot1", FIELD_dsp340050b49a6c_fld2398GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2399GP_slot1", FIELD_dsp340050b49a6c_fld2399GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2400GP_slot1", FIELD_dsp340050b49a6c_fld2400GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2402GP_slot1", FIELD_dsp340050b49a6c_fld2402GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2403GP_slot1", FIELD_dsp340050b49a6c_fld2403GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2405GP_slot1", FIELD_dsp340050b49a6c_fld2405GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3681GP_slot1", FIELD_dsp340050b49a6c_fld3681GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3683GP_slot1", FIELD_dsp340050b49a6c_fld3683GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3684GP_slot1", FIELD_dsp340050b49a6c_fld3684GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3686GP_slot1", FIELD_dsp340050b49a6c_fld3686GP_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s5", FIELD_op0_s5, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2058", FIELD_dsp340050b49a6c_fld2058, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2067", FIELD_dsp340050b49a6c_fld2067, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2407GP_slot0", FIELD_dsp340050b49a6c_fld2407GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2409GP_slot0", FIELD_dsp340050b49a6c_fld2409GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2410GP_slot0", FIELD_dsp340050b49a6c_fld2410GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2411GP_slot0", FIELD_dsp340050b49a6c_fld2411GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2412GP_slot0", FIELD_dsp340050b49a6c_fld2412GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2413GP_slot0", FIELD_dsp340050b49a6c_fld2413GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2415GP_slot0", FIELD_dsp340050b49a6c_fld2415GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2416GP_slot0", FIELD_dsp340050b49a6c_fld2416GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2417GP_slot0", FIELD_dsp340050b49a6c_fld2417GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2418GP_slot0", FIELD_dsp340050b49a6c_fld2418GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2419GP_slot0", FIELD_dsp340050b49a6c_fld2419GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2420GP_slot0", FIELD_dsp340050b49a6c_fld2420GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2422GP_slot0", FIELD_dsp340050b49a6c_fld2422GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2423GP_slot0", FIELD_dsp340050b49a6c_fld2423GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2424GP_slot0", FIELD_dsp340050b49a6c_fld2424GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2425GP_slot0", FIELD_dsp340050b49a6c_fld2425GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2426GP_slot0", FIELD_dsp340050b49a6c_fld2426GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2427GP_slot0", FIELD_dsp340050b49a6c_fld2427GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2429GP_slot0", FIELD_dsp340050b49a6c_fld2429GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2430GP_slot0", FIELD_dsp340050b49a6c_fld2430GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2431GP_slot0", FIELD_dsp340050b49a6c_fld2431GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2432GP_slot0", FIELD_dsp340050b49a6c_fld2432GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2433GP_slot0", FIELD_dsp340050b49a6c_fld2433GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2434GP_slot0", FIELD_dsp340050b49a6c_fld2434GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2435GP_slot0", FIELD_dsp340050b49a6c_fld2435GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2436GP_slot0", FIELD_dsp340050b49a6c_fld2436GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2437GP_slot0", FIELD_dsp340050b49a6c_fld2437GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2438GP_slot0", FIELD_dsp340050b49a6c_fld2438GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2439GP_slot0", FIELD_dsp340050b49a6c_fld2439GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2440GP_slot0", FIELD_dsp340050b49a6c_fld2440GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2441GP_slot0", FIELD_dsp340050b49a6c_fld2441GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2443GP_slot0", FIELD_dsp340050b49a6c_fld2443GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2444GP_slot0", FIELD_dsp340050b49a6c_fld2444GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2445", FIELD_dsp340050b49a6c_fld2445, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2447GP_slot0", FIELD_dsp340050b49a6c_fld2447GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2448", FIELD_dsp340050b49a6c_fld2448, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2449GP_slot0", FIELD_dsp340050b49a6c_fld2449GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2451GP_slot0", FIELD_dsp340050b49a6c_fld2451GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2452GP_slot0", FIELD_dsp340050b49a6c_fld2452GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2453GP_slot0", FIELD_dsp340050b49a6c_fld2453GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2454GP_slot0", FIELD_dsp340050b49a6c_fld2454GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2455GP_slot0", FIELD_dsp340050b49a6c_fld2455GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2456GP_slot0", FIELD_dsp340050b49a6c_fld2456GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2457GP_slot0", FIELD_dsp340050b49a6c_fld2457GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2458GP_slot0", FIELD_dsp340050b49a6c_fld2458GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2459GP_slot0", FIELD_dsp340050b49a6c_fld2459GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2460GP_slot0", FIELD_dsp340050b49a6c_fld2460GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2461GP_slot0", FIELD_dsp340050b49a6c_fld2461GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2462", FIELD_dsp340050b49a6c_fld2462, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2463GP_slot0", FIELD_dsp340050b49a6c_fld2463GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2464GP_slot0", FIELD_dsp340050b49a6c_fld2464GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2465GP_slot0", FIELD_dsp340050b49a6c_fld2465GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2466GP_slot0", FIELD_dsp340050b49a6c_fld2466GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2467GP_slot0", FIELD_dsp340050b49a6c_fld2467GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2468GP_slot0", FIELD_dsp340050b49a6c_fld2468GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2470GP_slot0", FIELD_dsp340050b49a6c_fld2470GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2471GP_slot0", FIELD_dsp340050b49a6c_fld2471GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2472GP_slot0", FIELD_dsp340050b49a6c_fld2472GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2473GP_slot0", FIELD_dsp340050b49a6c_fld2473GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2474GP_slot0", FIELD_dsp340050b49a6c_fld2474GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2475GP_slot0", FIELD_dsp340050b49a6c_fld2475GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2477GP_slot0", FIELD_dsp340050b49a6c_fld2477GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2479GP_slot0", FIELD_dsp340050b49a6c_fld2479GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2480GP_slot0", FIELD_dsp340050b49a6c_fld2480GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2481GP_slot0", FIELD_dsp340050b49a6c_fld2481GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2482GP_slot0", FIELD_dsp340050b49a6c_fld2482GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2483GP_slot0", FIELD_dsp340050b49a6c_fld2483GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2484GP_slot0", FIELD_dsp340050b49a6c_fld2484GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2485GP_slot0", FIELD_dsp340050b49a6c_fld2485GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2486GP_slot0", FIELD_dsp340050b49a6c_fld2486GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2487GP_slot0", FIELD_dsp340050b49a6c_fld2487GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2488GP_slot0", FIELD_dsp340050b49a6c_fld2488GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2489GP_slot0", FIELD_dsp340050b49a6c_fld2489GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2490GP_slot0", FIELD_dsp340050b49a6c_fld2490GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2491GP_slot0", FIELD_dsp340050b49a6c_fld2491GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2492GP_slot0", FIELD_dsp340050b49a6c_fld2492GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2493GP_slot0", FIELD_dsp340050b49a6c_fld2493GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2494GP_slot0", FIELD_dsp340050b49a6c_fld2494GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2495GP_slot0", FIELD_dsp340050b49a6c_fld2495GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2496GP_slot0", FIELD_dsp340050b49a6c_fld2496GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2497GP_slot0", FIELD_dsp340050b49a6c_fld2497GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2498GP_slot0", FIELD_dsp340050b49a6c_fld2498GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2499GP_slot0", FIELD_dsp340050b49a6c_fld2499GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2500GP_slot0", FIELD_dsp340050b49a6c_fld2500GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2501GP_slot0", FIELD_dsp340050b49a6c_fld2501GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2502GP_slot0", FIELD_dsp340050b49a6c_fld2502GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2503GP_slot0", FIELD_dsp340050b49a6c_fld2503GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2504GP_slot0", FIELD_dsp340050b49a6c_fld2504GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2505GP_slot0", FIELD_dsp340050b49a6c_fld2505GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2506GP_slot0", FIELD_dsp340050b49a6c_fld2506GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2507GP_slot0", FIELD_dsp340050b49a6c_fld2507GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2508GP_slot0", FIELD_dsp340050b49a6c_fld2508GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2509GP_slot0", FIELD_dsp340050b49a6c_fld2509GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2510GP_slot0", FIELD_dsp340050b49a6c_fld2510GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2512GP_slot0", FIELD_dsp340050b49a6c_fld2512GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2514GP_slot0", FIELD_dsp340050b49a6c_fld2514GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2515GP_slot0", FIELD_dsp340050b49a6c_fld2515GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2516GP_slot0", FIELD_dsp340050b49a6c_fld2516GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2517GP_slot0", FIELD_dsp340050b49a6c_fld2517GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2518GP_slot0", FIELD_dsp340050b49a6c_fld2518GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2519GP_slot0", FIELD_dsp340050b49a6c_fld2519GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2520GP_slot0", FIELD_dsp340050b49a6c_fld2520GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2521GP_slot0", FIELD_dsp340050b49a6c_fld2521GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2523GP_slot0", FIELD_dsp340050b49a6c_fld2523GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2524GP_slot0", FIELD_dsp340050b49a6c_fld2524GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2526GP_slot0", FIELD_dsp340050b49a6c_fld2526GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2527GP_slot0", FIELD_dsp340050b49a6c_fld2527GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2528GP_slot0", FIELD_dsp340050b49a6c_fld2528GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2529GP_slot0", FIELD_dsp340050b49a6c_fld2529GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2530", FIELD_dsp340050b49a6c_fld2530, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2531GP_slot0", FIELD_dsp340050b49a6c_fld2531GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3688GP_slot0", FIELD_dsp340050b49a6c_fld3688GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3689GP_slot0", FIELD_dsp340050b49a6c_fld3689GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3690GP_slot0", FIELD_dsp340050b49a6c_fld3690GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3691GP_slot0", FIELD_dsp340050b49a6c_fld3691GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3692GP_slot0", FIELD_dsp340050b49a6c_fld3692GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3693GP_slot0", FIELD_dsp340050b49a6c_fld3693GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3695GP_slot0", FIELD_dsp340050b49a6c_fld3695GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3696GP_slot0", FIELD_dsp340050b49a6c_fld3696GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3697GP_slot0", FIELD_dsp340050b49a6c_fld3697GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3698GP_slot0", FIELD_dsp340050b49a6c_fld3698GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3699GP_slot0", FIELD_dsp340050b49a6c_fld3699GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3700GP_slot0", FIELD_dsp340050b49a6c_fld3700GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3702GP_slot0", FIELD_dsp340050b49a6c_fld3702GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3703GP_slot0", FIELD_dsp340050b49a6c_fld3703GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3705GP_slot0", FIELD_dsp340050b49a6c_fld3705GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3706GP_slot0", FIELD_dsp340050b49a6c_fld3706GP_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s6", FIELD_op0_s6, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2532DOT_slot2", FIELD_dsp340050b49a6c_fld2532DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2533DOT_slot2", FIELD_dsp340050b49a6c_fld2533DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2534DOT_slot2", FIELD_dsp340050b49a6c_fld2534DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2535DOT_slot2", FIELD_dsp340050b49a6c_fld2535DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2536DOT_slot2", FIELD_dsp340050b49a6c_fld2536DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2537DOT_slot2", FIELD_dsp340050b49a6c_fld2537DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2538DOT_slot2", FIELD_dsp340050b49a6c_fld2538DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2539DOT_slot2", FIELD_dsp340050b49a6c_fld2539DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2540DOT_slot2", FIELD_dsp340050b49a6c_fld2540DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2541DOT_slot2", FIELD_dsp340050b49a6c_fld2541DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2542DOT_slot2", FIELD_dsp340050b49a6c_fld2542DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2543DOT_slot2", FIELD_dsp340050b49a6c_fld2543DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2544DOT_slot2", FIELD_dsp340050b49a6c_fld2544DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2545DOT_slot2", FIELD_dsp340050b49a6c_fld2545DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2546DOT_slot2", FIELD_dsp340050b49a6c_fld2546DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2547DOT_slot2", FIELD_dsp340050b49a6c_fld2547DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2548DOT_slot2", FIELD_dsp340050b49a6c_fld2548DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2549DOT_slot2", FIELD_dsp340050b49a6c_fld2549DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2550DOT_slot2", FIELD_dsp340050b49a6c_fld2550DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2551DOT_slot2", FIELD_dsp340050b49a6c_fld2551DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2552DOT_slot2", FIELD_dsp340050b49a6c_fld2552DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2553DOT_slot2", FIELD_dsp340050b49a6c_fld2553DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2554DOT_slot2", FIELD_dsp340050b49a6c_fld2554DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2555DOT_slot2", FIELD_dsp340050b49a6c_fld2555DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2556DOT_slot2", FIELD_dsp340050b49a6c_fld2556DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2557DOT_slot2", FIELD_dsp340050b49a6c_fld2557DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2558DOT_slot2", FIELD_dsp340050b49a6c_fld2558DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2559DOT_slot2", FIELD_dsp340050b49a6c_fld2559DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2560DOT_slot2", FIELD_dsp340050b49a6c_fld2560DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2561DOT_slot2", FIELD_dsp340050b49a6c_fld2561DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2562DOT_slot2", FIELD_dsp340050b49a6c_fld2562DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2563DOT_slot2", FIELD_dsp340050b49a6c_fld2563DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2564DOT_slot2", FIELD_dsp340050b49a6c_fld2564DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2565DOT_slot2", FIELD_dsp340050b49a6c_fld2565DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2566DOT_slot2", FIELD_dsp340050b49a6c_fld2566DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2567DOT_slot2", FIELD_dsp340050b49a6c_fld2567DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2568DOT_slot2", FIELD_dsp340050b49a6c_fld2568DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2569DOT_slot2", FIELD_dsp340050b49a6c_fld2569DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2571DOT_slot2", FIELD_dsp340050b49a6c_fld2571DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2572DOT_slot2", FIELD_dsp340050b49a6c_fld2572DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2573DOT_slot2", FIELD_dsp340050b49a6c_fld2573DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2574DOT_slot2", FIELD_dsp340050b49a6c_fld2574DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2575DOT_slot2", FIELD_dsp340050b49a6c_fld2575DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2576DOT_slot2", FIELD_dsp340050b49a6c_fld2576DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2577DOT_slot2", FIELD_dsp340050b49a6c_fld2577DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2578", FIELD_dsp340050b49a6c_fld2578, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2579DOT_slot2", FIELD_dsp340050b49a6c_fld2579DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2580DOT_slot2", FIELD_dsp340050b49a6c_fld2580DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2581DOT_slot2", FIELD_dsp340050b49a6c_fld2581DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2582DOT_slot2", FIELD_dsp340050b49a6c_fld2582DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2583DOT_slot2", FIELD_dsp340050b49a6c_fld2583DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2584DOT_slot2", FIELD_dsp340050b49a6c_fld2584DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2585DOT_slot2", FIELD_dsp340050b49a6c_fld2585DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2586DOT_slot2", FIELD_dsp340050b49a6c_fld2586DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2587DOT_slot2", FIELD_dsp340050b49a6c_fld2587DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2588DOT_slot2", FIELD_dsp340050b49a6c_fld2588DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2589DOT_slot2", FIELD_dsp340050b49a6c_fld2589DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2590DOT_slot2", FIELD_dsp340050b49a6c_fld2590DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2591DOT_slot2", FIELD_dsp340050b49a6c_fld2591DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2592DOT_slot2", FIELD_dsp340050b49a6c_fld2592DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2595DOT_slot2", FIELD_dsp340050b49a6c_fld2595DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2596DOT_slot2", FIELD_dsp340050b49a6c_fld2596DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2598DOT_slot2", FIELD_dsp340050b49a6c_fld2598DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2599DOT_slot2", FIELD_dsp340050b49a6c_fld2599DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2601DOT_slot2", FIELD_dsp340050b49a6c_fld2601DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2602DOT_slot2", FIELD_dsp340050b49a6c_fld2602DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2604DOT_slot2", FIELD_dsp340050b49a6c_fld2604DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2605", FIELD_dsp340050b49a6c_fld2605, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2606DOT_slot2", FIELD_dsp340050b49a6c_fld2606DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2608DOT_slot2", FIELD_dsp340050b49a6c_fld2608DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2609DOT_slot2", FIELD_dsp340050b49a6c_fld2609DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2610DOT_slot2", FIELD_dsp340050b49a6c_fld2610DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2611DOT_slot2", FIELD_dsp340050b49a6c_fld2611DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2612", FIELD_dsp340050b49a6c_fld2612, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2613DOT_slot2", FIELD_dsp340050b49a6c_fld2613DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2614", FIELD_dsp340050b49a6c_fld2614, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2615DOT_slot2", FIELD_dsp340050b49a6c_fld2615DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2616DOT_slot2", FIELD_dsp340050b49a6c_fld2616DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2617DOT_slot2", FIELD_dsp340050b49a6c_fld2617DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2618DOT_slot2", FIELD_dsp340050b49a6c_fld2618DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2619DOT_slot2", FIELD_dsp340050b49a6c_fld2619DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2620DOT_slot2", FIELD_dsp340050b49a6c_fld2620DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2621DOT_slot2", FIELD_dsp340050b49a6c_fld2621DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2622DOT_slot2", FIELD_dsp340050b49a6c_fld2622DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2623DOT_slot2", FIELD_dsp340050b49a6c_fld2623DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2624DOT_slot2", FIELD_dsp340050b49a6c_fld2624DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2625", FIELD_dsp340050b49a6c_fld2625, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2626DOT_slot2", FIELD_dsp340050b49a6c_fld2626DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2628DOT_slot2", FIELD_dsp340050b49a6c_fld2628DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2630DOT_slot2", FIELD_dsp340050b49a6c_fld2630DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2632DOT_slot2", FIELD_dsp340050b49a6c_fld2632DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2633DOT_slot2", FIELD_dsp340050b49a6c_fld2633DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2635DOT_slot2", FIELD_dsp340050b49a6c_fld2635DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2636DOT_slot2", FIELD_dsp340050b49a6c_fld2636DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2637DOT_slot2", FIELD_dsp340050b49a6c_fld2637DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2640DOT_slot2", FIELD_dsp340050b49a6c_fld2640DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2641DOT_slot2", FIELD_dsp340050b49a6c_fld2641DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2642DOT_slot2", FIELD_dsp340050b49a6c_fld2642DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2643DOT_slot2", FIELD_dsp340050b49a6c_fld2643DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2644DOT_slot2", FIELD_dsp340050b49a6c_fld2644DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2645DOT_slot2", FIELD_dsp340050b49a6c_fld2645DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2646DOT_slot2", FIELD_dsp340050b49a6c_fld2646DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2647DOT_slot2", FIELD_dsp340050b49a6c_fld2647DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2648DOT_slot2", FIELD_dsp340050b49a6c_fld2648DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2649DOT_slot2", FIELD_dsp340050b49a6c_fld2649DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2650DOT_slot2", FIELD_dsp340050b49a6c_fld2650DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2651DOT_slot2", FIELD_dsp340050b49a6c_fld2651DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2652DOT_slot2", FIELD_dsp340050b49a6c_fld2652DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2654DOT_slot2", FIELD_dsp340050b49a6c_fld2654DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2655DOT_slot2", FIELD_dsp340050b49a6c_fld2655DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2656DOT_slot2", FIELD_dsp340050b49a6c_fld2656DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2657DOT_slot2", FIELD_dsp340050b49a6c_fld2657DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2658DOT_slot2", FIELD_dsp340050b49a6c_fld2658DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3708DOT_slot2", FIELD_dsp340050b49a6c_fld3708DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3709DOT_slot2", FIELD_dsp340050b49a6c_fld3709DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3710DOT_slot2", FIELD_dsp340050b49a6c_fld3710DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3711DOT_slot2", FIELD_dsp340050b49a6c_fld3711DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3712", FIELD_dsp340050b49a6c_fld3712, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3713DOT_slot2", FIELD_dsp340050b49a6c_fld3713DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3714DOT_slot2", FIELD_dsp340050b49a6c_fld3714DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3715", FIELD_dsp340050b49a6c_fld3715, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3716DOT_slot2", FIELD_dsp340050b49a6c_fld3716DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3717DOT_slot2", FIELD_dsp340050b49a6c_fld3717DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3718", FIELD_dsp340050b49a6c_fld3718, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3719DOT_slot2", FIELD_dsp340050b49a6c_fld3719DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3721DOT_slot2", FIELD_dsp340050b49a6c_fld3721DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3722", FIELD_dsp340050b49a6c_fld3722, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3723DOT_slot2", FIELD_dsp340050b49a6c_fld3723DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3724DOT_slot2", FIELD_dsp340050b49a6c_fld3724DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3725DOT_slot2", FIELD_dsp340050b49a6c_fld3725DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3726DOT_slot2", FIELD_dsp340050b49a6c_fld3726DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3727DOT_slot2", FIELD_dsp340050b49a6c_fld3727DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3728DOT_slot2", FIELD_dsp340050b49a6c_fld3728DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3729DOT_slot2", FIELD_dsp340050b49a6c_fld3729DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3731DOT_slot2", FIELD_dsp340050b49a6c_fld3731DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3732DOT_slot2", FIELD_dsp340050b49a6c_fld3732DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3733DOT_slot2", FIELD_dsp340050b49a6c_fld3733DOT_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s7", FIELD_op0_s7, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3734DOT_slot1", FIELD_dsp340050b49a6c_fld3734DOT_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s8", FIELD_op0_s8, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2068", FIELD_dsp340050b49a6c_fld2068, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2666DOT_slot0", FIELD_dsp340050b49a6c_fld2666DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2667DOT_slot0", FIELD_dsp340050b49a6c_fld2667DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2668DOT_slot0", FIELD_dsp340050b49a6c_fld2668DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2669DOT_slot0", FIELD_dsp340050b49a6c_fld2669DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2671DOT_slot0", FIELD_dsp340050b49a6c_fld2671DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2672DOT_slot0", FIELD_dsp340050b49a6c_fld2672DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2673DOT_slot0", FIELD_dsp340050b49a6c_fld2673DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2674DOT_slot0", FIELD_dsp340050b49a6c_fld2674DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2675DOT_slot0", FIELD_dsp340050b49a6c_fld2675DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2676DOT_slot0", FIELD_dsp340050b49a6c_fld2676DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2677DOT_slot0", FIELD_dsp340050b49a6c_fld2677DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2678DOT_slot0", FIELD_dsp340050b49a6c_fld2678DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2679DOT_slot0", FIELD_dsp340050b49a6c_fld2679DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2680DOT_slot0", FIELD_dsp340050b49a6c_fld2680DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2681DOT_slot0", FIELD_dsp340050b49a6c_fld2681DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2682DOT_slot0", FIELD_dsp340050b49a6c_fld2682DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2683DOT_slot0", FIELD_dsp340050b49a6c_fld2683DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2684DOT_slot0", FIELD_dsp340050b49a6c_fld2684DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2685DOT_slot0", FIELD_dsp340050b49a6c_fld2685DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2686DOT_slot0", FIELD_dsp340050b49a6c_fld2686DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2688DOT_slot0", FIELD_dsp340050b49a6c_fld2688DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2689DOT_slot0", FIELD_dsp340050b49a6c_fld2689DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2690DOT_slot0", FIELD_dsp340050b49a6c_fld2690DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2692DOT_slot0", FIELD_dsp340050b49a6c_fld2692DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2693DOT_slot0", FIELD_dsp340050b49a6c_fld2693DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2695DOT_slot0", FIELD_dsp340050b49a6c_fld2695DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2697DOT_slot0", FIELD_dsp340050b49a6c_fld2697DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2699DOT_slot0", FIELD_dsp340050b49a6c_fld2699DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2700DOT_slot0", FIELD_dsp340050b49a6c_fld2700DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2701DOT_slot0", FIELD_dsp340050b49a6c_fld2701DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2702DOT_slot0", FIELD_dsp340050b49a6c_fld2702DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2703DOT_slot0", FIELD_dsp340050b49a6c_fld2703DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2704DOT_slot0", FIELD_dsp340050b49a6c_fld2704DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2705DOT_slot0", FIELD_dsp340050b49a6c_fld2705DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3735DOT_slot0", FIELD_dsp340050b49a6c_fld3735DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3736", FIELD_dsp340050b49a6c_fld3736, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3737DOT_slot0", FIELD_dsp340050b49a6c_fld3737DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3738DOT_slot0", FIELD_dsp340050b49a6c_fld3738DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3739DOT_slot0", FIELD_dsp340050b49a6c_fld3739DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3740DOT_slot0", FIELD_dsp340050b49a6c_fld3740DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3741DOT_slot0", FIELD_dsp340050b49a6c_fld3741DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3742DOT_slot0", FIELD_dsp340050b49a6c_fld3742DOT_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s9", FIELD_op0_s9, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2706PQ_slot2", FIELD_dsp340050b49a6c_fld2706PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2707PQ_slot2", FIELD_dsp340050b49a6c_fld2707PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2708PQ_slot2", FIELD_dsp340050b49a6c_fld2708PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2709PQ_slot2", FIELD_dsp340050b49a6c_fld2709PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2710PQ_slot2", FIELD_dsp340050b49a6c_fld2710PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2711PQ_slot2", FIELD_dsp340050b49a6c_fld2711PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2713PQ_slot2", FIELD_dsp340050b49a6c_fld2713PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2714PQ_slot2", FIELD_dsp340050b49a6c_fld2714PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2715PQ_slot2", FIELD_dsp340050b49a6c_fld2715PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2717PQ_slot2", FIELD_dsp340050b49a6c_fld2717PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2718PQ_slot2", FIELD_dsp340050b49a6c_fld2718PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2719PQ_slot2", FIELD_dsp340050b49a6c_fld2719PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2721PQ_slot2", FIELD_dsp340050b49a6c_fld2721PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2722PQ_slot2", FIELD_dsp340050b49a6c_fld2722PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2723PQ_slot2", FIELD_dsp340050b49a6c_fld2723PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2724PQ_slot2", FIELD_dsp340050b49a6c_fld2724PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2725PQ_slot2", FIELD_dsp340050b49a6c_fld2725PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2726PQ_slot2", FIELD_dsp340050b49a6c_fld2726PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2727PQ_slot2", FIELD_dsp340050b49a6c_fld2727PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2728PQ_slot2", FIELD_dsp340050b49a6c_fld2728PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2729PQ_slot2", FIELD_dsp340050b49a6c_fld2729PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2730PQ_slot2", FIELD_dsp340050b49a6c_fld2730PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2731PQ_slot2", FIELD_dsp340050b49a6c_fld2731PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2732PQ_slot2", FIELD_dsp340050b49a6c_fld2732PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2733PQ_slot2", FIELD_dsp340050b49a6c_fld2733PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2734PQ_slot2", FIELD_dsp340050b49a6c_fld2734PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2735PQ_slot2", FIELD_dsp340050b49a6c_fld2735PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2736PQ_slot2", FIELD_dsp340050b49a6c_fld2736PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2737", FIELD_dsp340050b49a6c_fld2737, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2738PQ_slot2", FIELD_dsp340050b49a6c_fld2738PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2739PQ_slot2", FIELD_dsp340050b49a6c_fld2739PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2741PQ_slot2", FIELD_dsp340050b49a6c_fld2741PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2742PQ_slot2", FIELD_dsp340050b49a6c_fld2742PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2743PQ_slot2", FIELD_dsp340050b49a6c_fld2743PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2746PQ_slot2", FIELD_dsp340050b49a6c_fld2746PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2747PQ_slot2", FIELD_dsp340050b49a6c_fld2747PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2748PQ_slot2", FIELD_dsp340050b49a6c_fld2748PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2750PQ_slot2", FIELD_dsp340050b49a6c_fld2750PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2751PQ_slot2", FIELD_dsp340050b49a6c_fld2751PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2752PQ_slot2", FIELD_dsp340050b49a6c_fld2752PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2753PQ_slot2", FIELD_dsp340050b49a6c_fld2753PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2754PQ_slot2", FIELD_dsp340050b49a6c_fld2754PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2755PQ_slot2", FIELD_dsp340050b49a6c_fld2755PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2756PQ_slot2", FIELD_dsp340050b49a6c_fld2756PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2757PQ_slot2", FIELD_dsp340050b49a6c_fld2757PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2758PQ_slot2", FIELD_dsp340050b49a6c_fld2758PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2759PQ_slot2", FIELD_dsp340050b49a6c_fld2759PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2760PQ_slot2", FIELD_dsp340050b49a6c_fld2760PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2761PQ_slot2", FIELD_dsp340050b49a6c_fld2761PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2762PQ_slot2", FIELD_dsp340050b49a6c_fld2762PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2763PQ_slot2", FIELD_dsp340050b49a6c_fld2763PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2764PQ_slot2", FIELD_dsp340050b49a6c_fld2764PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2765PQ_slot2", FIELD_dsp340050b49a6c_fld2765PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2766PQ_slot2", FIELD_dsp340050b49a6c_fld2766PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2767PQ_slot2", FIELD_dsp340050b49a6c_fld2767PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2768PQ_slot2", FIELD_dsp340050b49a6c_fld2768PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2769PQ_slot2", FIELD_dsp340050b49a6c_fld2769PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2770PQ_slot2", FIELD_dsp340050b49a6c_fld2770PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2771PQ_slot2", FIELD_dsp340050b49a6c_fld2771PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2772PQ_slot2", FIELD_dsp340050b49a6c_fld2772PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2773PQ_slot2", FIELD_dsp340050b49a6c_fld2773PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2774PQ_slot2", FIELD_dsp340050b49a6c_fld2774PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2775PQ_slot2", FIELD_dsp340050b49a6c_fld2775PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2776PQ_slot2", FIELD_dsp340050b49a6c_fld2776PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2777PQ_slot2", FIELD_dsp340050b49a6c_fld2777PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2778PQ_slot2", FIELD_dsp340050b49a6c_fld2778PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2779PQ_slot2", FIELD_dsp340050b49a6c_fld2779PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2780PQ_slot2", FIELD_dsp340050b49a6c_fld2780PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2781PQ_slot2", FIELD_dsp340050b49a6c_fld2781PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2782PQ_slot2", FIELD_dsp340050b49a6c_fld2782PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2783PQ_slot2", FIELD_dsp340050b49a6c_fld2783PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2784PQ_slot2", FIELD_dsp340050b49a6c_fld2784PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2785PQ_slot2", FIELD_dsp340050b49a6c_fld2785PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2786PQ_slot2", FIELD_dsp340050b49a6c_fld2786PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2787PQ_slot2", FIELD_dsp340050b49a6c_fld2787PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2788PQ_slot2", FIELD_dsp340050b49a6c_fld2788PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2789PQ_slot2", FIELD_dsp340050b49a6c_fld2789PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2790PQ_slot2", FIELD_dsp340050b49a6c_fld2790PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2791PQ_slot2", FIELD_dsp340050b49a6c_fld2791PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2792PQ_slot2", FIELD_dsp340050b49a6c_fld2792PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2793PQ_slot2", FIELD_dsp340050b49a6c_fld2793PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2795PQ_slot2", FIELD_dsp340050b49a6c_fld2795PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2796PQ_slot2", FIELD_dsp340050b49a6c_fld2796PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2798PQ_slot2", FIELD_dsp340050b49a6c_fld2798PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2801PQ_slot2", FIELD_dsp340050b49a6c_fld2801PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2803PQ_slot2", FIELD_dsp340050b49a6c_fld2803PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2805PQ_slot2", FIELD_dsp340050b49a6c_fld2805PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2806PQ_slot2", FIELD_dsp340050b49a6c_fld2806PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2807PQ_slot2", FIELD_dsp340050b49a6c_fld2807PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2808PQ_slot2", FIELD_dsp340050b49a6c_fld2808PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2809PQ_slot2", FIELD_dsp340050b49a6c_fld2809PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2810PQ_slot2", FIELD_dsp340050b49a6c_fld2810PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2811PQ_slot2", FIELD_dsp340050b49a6c_fld2811PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2812PQ_slot2", FIELD_dsp340050b49a6c_fld2812PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2814PQ_slot2", FIELD_dsp340050b49a6c_fld2814PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2816PQ_slot2", FIELD_dsp340050b49a6c_fld2816PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2817", FIELD_dsp340050b49a6c_fld2817, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2818PQ_slot2", FIELD_dsp340050b49a6c_fld2818PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2819", FIELD_dsp340050b49a6c_fld2819, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2820PQ_slot2", FIELD_dsp340050b49a6c_fld2820PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2821PQ_slot2", FIELD_dsp340050b49a6c_fld2821PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2823PQ_slot2", FIELD_dsp340050b49a6c_fld2823PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3744PQ_slot2", FIELD_dsp340050b49a6c_fld3744PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3745PQ_slot2", FIELD_dsp340050b49a6c_fld3745PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3746PQ_slot2", FIELD_dsp340050b49a6c_fld3746PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3747PQ_slot2", FIELD_dsp340050b49a6c_fld3747PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3748", FIELD_dsp340050b49a6c_fld3748, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3749PQ_slot2", FIELD_dsp340050b49a6c_fld3749PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3750PQ_slot2", FIELD_dsp340050b49a6c_fld3750PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3751PQ_slot2", FIELD_dsp340050b49a6c_fld3751PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3752PQ_slot2", FIELD_dsp340050b49a6c_fld3752PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3753PQ_slot2", FIELD_dsp340050b49a6c_fld3753PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3754PQ_slot2", FIELD_dsp340050b49a6c_fld3754PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3756PQ_slot2", FIELD_dsp340050b49a6c_fld3756PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3757PQ_slot2", FIELD_dsp340050b49a6c_fld3757PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3758PQ_slot2", FIELD_dsp340050b49a6c_fld3758PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3759PQ_slot2", FIELD_dsp340050b49a6c_fld3759PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3760PQ_slot2", FIELD_dsp340050b49a6c_fld3760PQ_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s10", FIELD_op0_s10, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2825PQ_slot1", FIELD_dsp340050b49a6c_fld2825PQ_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2826PQ_slot1", FIELD_dsp340050b49a6c_fld2826PQ_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3761PQ_slot1", FIELD_dsp340050b49a6c_fld3761PQ_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s11", FIELD_op0_s11, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2059", FIELD_dsp340050b49a6c_fld2059, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2069", FIELD_dsp340050b49a6c_fld2069, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2827PQ_slot0", FIELD_dsp340050b49a6c_fld2827PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2829PQ_slot0", FIELD_dsp340050b49a6c_fld2829PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2830PQ_slot0", FIELD_dsp340050b49a6c_fld2830PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2831PQ_slot0", FIELD_dsp340050b49a6c_fld2831PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2832PQ_slot0", FIELD_dsp340050b49a6c_fld2832PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2833PQ_slot0", FIELD_dsp340050b49a6c_fld2833PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2835PQ_slot0", FIELD_dsp340050b49a6c_fld2835PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2836PQ_slot0", FIELD_dsp340050b49a6c_fld2836PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2837PQ_slot0", FIELD_dsp340050b49a6c_fld2837PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2838PQ_slot0", FIELD_dsp340050b49a6c_fld2838PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2839PQ_slot0", FIELD_dsp340050b49a6c_fld2839PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2840PQ_slot0", FIELD_dsp340050b49a6c_fld2840PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2842PQ_slot0", FIELD_dsp340050b49a6c_fld2842PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2843PQ_slot0", FIELD_dsp340050b49a6c_fld2843PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2844PQ_slot0", FIELD_dsp340050b49a6c_fld2844PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2845PQ_slot0", FIELD_dsp340050b49a6c_fld2845PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2846PQ_slot0", FIELD_dsp340050b49a6c_fld2846PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2847PQ_slot0", FIELD_dsp340050b49a6c_fld2847PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2849PQ_slot0", FIELD_dsp340050b49a6c_fld2849PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2850PQ_slot0", FIELD_dsp340050b49a6c_fld2850PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2851PQ_slot0", FIELD_dsp340050b49a6c_fld2851PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2852PQ_slot0", FIELD_dsp340050b49a6c_fld2852PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2853PQ_slot0", FIELD_dsp340050b49a6c_fld2853PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2854PQ_slot0", FIELD_dsp340050b49a6c_fld2854PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2855PQ_slot0", FIELD_dsp340050b49a6c_fld2855PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2856PQ_slot0", FIELD_dsp340050b49a6c_fld2856PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2857PQ_slot0", FIELD_dsp340050b49a6c_fld2857PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2858PQ_slot0", FIELD_dsp340050b49a6c_fld2858PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2859PQ_slot0", FIELD_dsp340050b49a6c_fld2859PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2860PQ_slot0", FIELD_dsp340050b49a6c_fld2860PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2861PQ_slot0", FIELD_dsp340050b49a6c_fld2861PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2863PQ_slot0", FIELD_dsp340050b49a6c_fld2863PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2864PQ_slot0", FIELD_dsp340050b49a6c_fld2864PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2865", FIELD_dsp340050b49a6c_fld2865, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2867PQ_slot0", FIELD_dsp340050b49a6c_fld2867PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2869PQ_slot0", FIELD_dsp340050b49a6c_fld2869PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2871PQ_slot0", FIELD_dsp340050b49a6c_fld2871PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2872PQ_slot0", FIELD_dsp340050b49a6c_fld2872PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2873PQ_slot0", FIELD_dsp340050b49a6c_fld2873PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2874PQ_slot0", FIELD_dsp340050b49a6c_fld2874PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2875PQ_slot0", FIELD_dsp340050b49a6c_fld2875PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2876PQ_slot0", FIELD_dsp340050b49a6c_fld2876PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2877PQ_slot0", FIELD_dsp340050b49a6c_fld2877PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2878PQ_slot0", FIELD_dsp340050b49a6c_fld2878PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2879PQ_slot0", FIELD_dsp340050b49a6c_fld2879PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2880PQ_slot0", FIELD_dsp340050b49a6c_fld2880PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2881PQ_slot0", FIELD_dsp340050b49a6c_fld2881PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2882", FIELD_dsp340050b49a6c_fld2882, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2883PQ_slot0", FIELD_dsp340050b49a6c_fld2883PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2884PQ_slot0", FIELD_dsp340050b49a6c_fld2884PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2885PQ_slot0", FIELD_dsp340050b49a6c_fld2885PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2886PQ_slot0", FIELD_dsp340050b49a6c_fld2886PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2887PQ_slot0", FIELD_dsp340050b49a6c_fld2887PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2888PQ_slot0", FIELD_dsp340050b49a6c_fld2888PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2890PQ_slot0", FIELD_dsp340050b49a6c_fld2890PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2891PQ_slot0", FIELD_dsp340050b49a6c_fld2891PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2892PQ_slot0", FIELD_dsp340050b49a6c_fld2892PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2893PQ_slot0", FIELD_dsp340050b49a6c_fld2893PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2894PQ_slot0", FIELD_dsp340050b49a6c_fld2894PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2895PQ_slot0", FIELD_dsp340050b49a6c_fld2895PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2897PQ_slot0", FIELD_dsp340050b49a6c_fld2897PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2899PQ_slot0", FIELD_dsp340050b49a6c_fld2899PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2900PQ_slot0", FIELD_dsp340050b49a6c_fld2900PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2901PQ_slot0", FIELD_dsp340050b49a6c_fld2901PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2902PQ_slot0", FIELD_dsp340050b49a6c_fld2902PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2903PQ_slot0", FIELD_dsp340050b49a6c_fld2903PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2904PQ_slot0", FIELD_dsp340050b49a6c_fld2904PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2905PQ_slot0", FIELD_dsp340050b49a6c_fld2905PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2906PQ_slot0", FIELD_dsp340050b49a6c_fld2906PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2907PQ_slot0", FIELD_dsp340050b49a6c_fld2907PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2908PQ_slot0", FIELD_dsp340050b49a6c_fld2908PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2909PQ_slot0", FIELD_dsp340050b49a6c_fld2909PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2910PQ_slot0", FIELD_dsp340050b49a6c_fld2910PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2911PQ_slot0", FIELD_dsp340050b49a6c_fld2911PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2912PQ_slot0", FIELD_dsp340050b49a6c_fld2912PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2913PQ_slot0", FIELD_dsp340050b49a6c_fld2913PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2914PQ_slot0", FIELD_dsp340050b49a6c_fld2914PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2915PQ_slot0", FIELD_dsp340050b49a6c_fld2915PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2916PQ_slot0", FIELD_dsp340050b49a6c_fld2916PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2917PQ_slot0", FIELD_dsp340050b49a6c_fld2917PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2918PQ_slot0", FIELD_dsp340050b49a6c_fld2918PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2919PQ_slot0", FIELD_dsp340050b49a6c_fld2919PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2920PQ_slot0", FIELD_dsp340050b49a6c_fld2920PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2921PQ_slot0", FIELD_dsp340050b49a6c_fld2921PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2922PQ_slot0", FIELD_dsp340050b49a6c_fld2922PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2923PQ_slot0", FIELD_dsp340050b49a6c_fld2923PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2924PQ_slot0", FIELD_dsp340050b49a6c_fld2924PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2925PQ_slot0", FIELD_dsp340050b49a6c_fld2925PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2926PQ_slot0", FIELD_dsp340050b49a6c_fld2926PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2927PQ_slot0", FIELD_dsp340050b49a6c_fld2927PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2928PQ_slot0", FIELD_dsp340050b49a6c_fld2928PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2929PQ_slot0", FIELD_dsp340050b49a6c_fld2929PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2930PQ_slot0", FIELD_dsp340050b49a6c_fld2930PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2932PQ_slot0", FIELD_dsp340050b49a6c_fld2932PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2934PQ_slot0", FIELD_dsp340050b49a6c_fld2934PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2935PQ_slot0", FIELD_dsp340050b49a6c_fld2935PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2936PQ_slot0", FIELD_dsp340050b49a6c_fld2936PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2937PQ_slot0", FIELD_dsp340050b49a6c_fld2937PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2939PQ_slot0", FIELD_dsp340050b49a6c_fld2939PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2940", FIELD_dsp340050b49a6c_fld2940, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2941PQ_slot0", FIELD_dsp340050b49a6c_fld2941PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2942PQ_slot0", FIELD_dsp340050b49a6c_fld2942PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2943PQ_slot0", FIELD_dsp340050b49a6c_fld2943PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2945PQ_slot0", FIELD_dsp340050b49a6c_fld2945PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2946PQ_slot0", FIELD_dsp340050b49a6c_fld2946PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2947PQ_slot0", FIELD_dsp340050b49a6c_fld2947PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2948PQ_slot0", FIELD_dsp340050b49a6c_fld2948PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2949PQ_slot0", FIELD_dsp340050b49a6c_fld2949PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2950PQ_slot0", FIELD_dsp340050b49a6c_fld2950PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3763PQ_slot0", FIELD_dsp340050b49a6c_fld3763PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3764PQ_slot0", FIELD_dsp340050b49a6c_fld3764PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3765PQ_slot0", FIELD_dsp340050b49a6c_fld3765PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3766PQ_slot0", FIELD_dsp340050b49a6c_fld3766PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3767PQ_slot0", FIELD_dsp340050b49a6c_fld3767PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3768PQ_slot0", FIELD_dsp340050b49a6c_fld3768PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3769PQ_slot0", FIELD_dsp340050b49a6c_fld3769PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3770PQ_slot0", FIELD_dsp340050b49a6c_fld3770PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3771PQ_slot0", FIELD_dsp340050b49a6c_fld3771PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3772", FIELD_dsp340050b49a6c_fld3772, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3773PQ_slot0", FIELD_dsp340050b49a6c_fld3773PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3775PQ_slot0", FIELD_dsp340050b49a6c_fld3775PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3776PQ_slot0", FIELD_dsp340050b49a6c_fld3776PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3777PQ_slot0", FIELD_dsp340050b49a6c_fld3777PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3778PQ_slot0", FIELD_dsp340050b49a6c_fld3778PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3779PQ_slot0", FIELD_dsp340050b49a6c_fld3779PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3780PQ_slot0", FIELD_dsp340050b49a6c_fld3780PQ_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s12", FIELD_op0_s12, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2953ACC2_slot2", FIELD_dsp340050b49a6c_fld2953ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2954ACC2_slot2", FIELD_dsp340050b49a6c_fld2954ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2955ACC2_slot2", FIELD_dsp340050b49a6c_fld2955ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2956ACC2_slot2", FIELD_dsp340050b49a6c_fld2956ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2957ACC2_slot2", FIELD_dsp340050b49a6c_fld2957ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2958ACC2_slot2", FIELD_dsp340050b49a6c_fld2958ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2959ACC2_slot2", FIELD_dsp340050b49a6c_fld2959ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2960ACC2_slot2", FIELD_dsp340050b49a6c_fld2960ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2963ACC2_slot2", FIELD_dsp340050b49a6c_fld2963ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2964ACC2_slot2", FIELD_dsp340050b49a6c_fld2964ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2966ACC2_slot2", FIELD_dsp340050b49a6c_fld2966ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2967ACC2_slot2", FIELD_dsp340050b49a6c_fld2967ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3782ACC2_slot2", FIELD_dsp340050b49a6c_fld3782ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3783ACC2_slot2", FIELD_dsp340050b49a6c_fld3783ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3784ACC2_slot2", FIELD_dsp340050b49a6c_fld3784ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3785ACC2_slot2", FIELD_dsp340050b49a6c_fld3785ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3786ACC2_slot2", FIELD_dsp340050b49a6c_fld3786ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3788ACC2_slot2", FIELD_dsp340050b49a6c_fld3788ACC2_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s13", FIELD_op0_s13, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2028", FIELD_dsp340050b49a6c_fld2028, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2075", FIELD_dsp340050b49a6c_fld2075, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2968ACC2_slot1", FIELD_dsp340050b49a6c_fld2968ACC2_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2969ACC2_slot1", FIELD_dsp340050b49a6c_fld2969ACC2_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3790ACC2_slot1", FIELD_dsp340050b49a6c_fld3790ACC2_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3793ACC2_slot1", FIELD_dsp340050b49a6c_fld3793ACC2_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s14", FIELD_op0_s14, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2973ACC2_slot0", FIELD_dsp340050b49a6c_fld2973ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2974ACC2_slot0", FIELD_dsp340050b49a6c_fld2974ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2975ACC2_slot0", FIELD_dsp340050b49a6c_fld2975ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2976ACC2_slot0", FIELD_dsp340050b49a6c_fld2976ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2977ACC2_slot0", FIELD_dsp340050b49a6c_fld2977ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2980ACC2_slot0", FIELD_dsp340050b49a6c_fld2980ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2981ACC2_slot0", FIELD_dsp340050b49a6c_fld2981ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2982ACC2_slot0", FIELD_dsp340050b49a6c_fld2982ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2984ACC2_slot0", FIELD_dsp340050b49a6c_fld2984ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2985ACC2_slot0", FIELD_dsp340050b49a6c_fld2985ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2987ACC2_slot0", FIELD_dsp340050b49a6c_fld2987ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2989ACC2_slot0", FIELD_dsp340050b49a6c_fld2989ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2990ACC2_slot0", FIELD_dsp340050b49a6c_fld2990ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3795ACC2_slot0", FIELD_dsp340050b49a6c_fld3795ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3796ACC2_slot0", FIELD_dsp340050b49a6c_fld3796ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3797", FIELD_dsp340050b49a6c_fld3797, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3798ACC2_slot0", FIELD_dsp340050b49a6c_fld3798ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3799ACC2_slot0", FIELD_dsp340050b49a6c_fld3799ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3800ACC2_slot0", FIELD_dsp340050b49a6c_fld3800ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3801ACC2_slot0", FIELD_dsp340050b49a6c_fld3801ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3802ACC2_slot0", FIELD_dsp340050b49a6c_fld3802ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3803ACC2_slot0", FIELD_dsp340050b49a6c_fld3803ACC2_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s15", FIELD_op0_s15, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2991SMOD_slot2", FIELD_dsp340050b49a6c_fld2991SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2992SMOD_slot2", FIELD_dsp340050b49a6c_fld2992SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2993SMOD_slot2", FIELD_dsp340050b49a6c_fld2993SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2994SMOD_slot2", FIELD_dsp340050b49a6c_fld2994SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2995SMOD_slot2", FIELD_dsp340050b49a6c_fld2995SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2996SMOD_slot2", FIELD_dsp340050b49a6c_fld2996SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2997SMOD_slot2", FIELD_dsp340050b49a6c_fld2997SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2998SMOD_slot2", FIELD_dsp340050b49a6c_fld2998SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2999SMOD_slot2", FIELD_dsp340050b49a6c_fld2999SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3000SMOD_slot2", FIELD_dsp340050b49a6c_fld3000SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3001SMOD_slot2", FIELD_dsp340050b49a6c_fld3001SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3002SMOD_slot2", FIELD_dsp340050b49a6c_fld3002SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3003SMOD_slot2", FIELD_dsp340050b49a6c_fld3003SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3004SMOD_slot2", FIELD_dsp340050b49a6c_fld3004SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3005SMOD_slot2", FIELD_dsp340050b49a6c_fld3005SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3006SMOD_slot2", FIELD_dsp340050b49a6c_fld3006SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3007SMOD_slot2", FIELD_dsp340050b49a6c_fld3007SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3008SMOD_slot2", FIELD_dsp340050b49a6c_fld3008SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3009SMOD_slot2", FIELD_dsp340050b49a6c_fld3009SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3010SMOD_slot2", FIELD_dsp340050b49a6c_fld3010SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3011SMOD_slot2", FIELD_dsp340050b49a6c_fld3011SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3012SMOD_slot2", FIELD_dsp340050b49a6c_fld3012SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3013SMOD_slot2", FIELD_dsp340050b49a6c_fld3013SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3014SMOD_slot2", FIELD_dsp340050b49a6c_fld3014SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3015SMOD_slot2", FIELD_dsp340050b49a6c_fld3015SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3016SMOD_slot2", FIELD_dsp340050b49a6c_fld3016SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3017SMOD_slot2", FIELD_dsp340050b49a6c_fld3017SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3018SMOD_slot2", FIELD_dsp340050b49a6c_fld3018SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3019SMOD_slot2", FIELD_dsp340050b49a6c_fld3019SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3020SMOD_slot2", FIELD_dsp340050b49a6c_fld3020SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3021SMOD_slot2", FIELD_dsp340050b49a6c_fld3021SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3022SMOD_slot2", FIELD_dsp340050b49a6c_fld3022SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3023SMOD_slot2", FIELD_dsp340050b49a6c_fld3023SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3024SMOD_slot2", FIELD_dsp340050b49a6c_fld3024SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3025SMOD_slot2", FIELD_dsp340050b49a6c_fld3025SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3026SMOD_slot2", FIELD_dsp340050b49a6c_fld3026SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3027SMOD_slot2", FIELD_dsp340050b49a6c_fld3027SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3028SMOD_slot2", FIELD_dsp340050b49a6c_fld3028SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3030SMOD_slot2", FIELD_dsp340050b49a6c_fld3030SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3031SMOD_slot2", FIELD_dsp340050b49a6c_fld3031SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3032SMOD_slot2", FIELD_dsp340050b49a6c_fld3032SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3033SMOD_slot2", FIELD_dsp340050b49a6c_fld3033SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3034SMOD_slot2", FIELD_dsp340050b49a6c_fld3034SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3035SMOD_slot2", FIELD_dsp340050b49a6c_fld3035SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3036SMOD_slot2", FIELD_dsp340050b49a6c_fld3036SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3038SMOD_slot2", FIELD_dsp340050b49a6c_fld3038SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3039SMOD_slot2", FIELD_dsp340050b49a6c_fld3039SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3040SMOD_slot2", FIELD_dsp340050b49a6c_fld3040SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3043SMOD_slot2", FIELD_dsp340050b49a6c_fld3043SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3044SMOD_slot2", FIELD_dsp340050b49a6c_fld3044SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3046SMOD_slot2", FIELD_dsp340050b49a6c_fld3046SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3047SMOD_slot2", FIELD_dsp340050b49a6c_fld3047SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3048", FIELD_dsp340050b49a6c_fld3048, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3049SMOD_slot2", FIELD_dsp340050b49a6c_fld3049SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3050SMOD_slot2", FIELD_dsp340050b49a6c_fld3050SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3051SMOD_slot2", FIELD_dsp340050b49a6c_fld3051SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3052SMOD_slot2", FIELD_dsp340050b49a6c_fld3052SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3053SMOD_slot2", FIELD_dsp340050b49a6c_fld3053SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3054SMOD_slot2", FIELD_dsp340050b49a6c_fld3054SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3055SMOD_slot2", FIELD_dsp340050b49a6c_fld3055SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3056SMOD_slot2", FIELD_dsp340050b49a6c_fld3056SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3058SMOD_slot2", FIELD_dsp340050b49a6c_fld3058SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3059SMOD_slot2", FIELD_dsp340050b49a6c_fld3059SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3061SMOD_slot2", FIELD_dsp340050b49a6c_fld3061SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3062", FIELD_dsp340050b49a6c_fld3062, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3063SMOD_slot2", FIELD_dsp340050b49a6c_fld3063SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3065SMOD_slot2", FIELD_dsp340050b49a6c_fld3065SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3066SMOD_slot2", FIELD_dsp340050b49a6c_fld3066SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3067SMOD_slot2", FIELD_dsp340050b49a6c_fld3067SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3068SMOD_slot2", FIELD_dsp340050b49a6c_fld3068SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3069SMOD_slot2", FIELD_dsp340050b49a6c_fld3069SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3070", FIELD_dsp340050b49a6c_fld3070, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3071SMOD_slot2", FIELD_dsp340050b49a6c_fld3071SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3072SMOD_slot2", FIELD_dsp340050b49a6c_fld3072SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3073SMOD_slot2", FIELD_dsp340050b49a6c_fld3073SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3074SMOD_slot2", FIELD_dsp340050b49a6c_fld3074SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3075SMOD_slot2", FIELD_dsp340050b49a6c_fld3075SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3076SMOD_slot2", FIELD_dsp340050b49a6c_fld3076SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3077SMOD_slot2", FIELD_dsp340050b49a6c_fld3077SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3078SMOD_slot2", FIELD_dsp340050b49a6c_fld3078SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3079SMOD_slot2", FIELD_dsp340050b49a6c_fld3079SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3080SMOD_slot2", FIELD_dsp340050b49a6c_fld3080SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3081SMOD_slot2", FIELD_dsp340050b49a6c_fld3081SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3082SMOD_slot2", FIELD_dsp340050b49a6c_fld3082SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3084SMOD_slot2", FIELD_dsp340050b49a6c_fld3084SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3085SMOD_slot2", FIELD_dsp340050b49a6c_fld3085SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3087SMOD_slot2", FIELD_dsp340050b49a6c_fld3087SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3088SMOD_slot2", FIELD_dsp340050b49a6c_fld3088SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3090SMOD_slot2", FIELD_dsp340050b49a6c_fld3090SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3091SMOD_slot2", FIELD_dsp340050b49a6c_fld3091SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3092SMOD_slot2", FIELD_dsp340050b49a6c_fld3092SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3093SMOD_slot2", FIELD_dsp340050b49a6c_fld3093SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3096SMOD_slot2", FIELD_dsp340050b49a6c_fld3096SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3097SMOD_slot2", FIELD_dsp340050b49a6c_fld3097SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3098SMOD_slot2", FIELD_dsp340050b49a6c_fld3098SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3099SMOD_slot2", FIELD_dsp340050b49a6c_fld3099SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3100SMOD_slot2", FIELD_dsp340050b49a6c_fld3100SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3101SMOD_slot2", FIELD_dsp340050b49a6c_fld3101SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3102SMOD_slot2", FIELD_dsp340050b49a6c_fld3102SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3104SMOD_slot2", FIELD_dsp340050b49a6c_fld3104SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3105SMOD_slot2", FIELD_dsp340050b49a6c_fld3105SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3106SMOD_slot2", FIELD_dsp340050b49a6c_fld3106SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3107SMOD_slot2", FIELD_dsp340050b49a6c_fld3107SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3108SMOD_slot2", FIELD_dsp340050b49a6c_fld3108SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3109SMOD_slot2", FIELD_dsp340050b49a6c_fld3109SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3110SMOD_slot2", FIELD_dsp340050b49a6c_fld3110SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3111SMOD_slot2", FIELD_dsp340050b49a6c_fld3111SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3113SMOD_slot2", FIELD_dsp340050b49a6c_fld3113SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3114SMOD_slot2", FIELD_dsp340050b49a6c_fld3114SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3115SMOD_slot2", FIELD_dsp340050b49a6c_fld3115SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3116SMOD_slot2", FIELD_dsp340050b49a6c_fld3116SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3805SMOD_slot2", FIELD_dsp340050b49a6c_fld3805SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3806SMOD_slot2", FIELD_dsp340050b49a6c_fld3806SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3807SMOD_slot2", FIELD_dsp340050b49a6c_fld3807SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3808", FIELD_dsp340050b49a6c_fld3808, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3809SMOD_slot2", FIELD_dsp340050b49a6c_fld3809SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3810SMOD_slot2", FIELD_dsp340050b49a6c_fld3810SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3812SMOD_slot2", FIELD_dsp340050b49a6c_fld3812SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3813SMOD_slot2", FIELD_dsp340050b49a6c_fld3813SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3814SMOD_slot2", FIELD_dsp340050b49a6c_fld3814SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3816SMOD_slot2", FIELD_dsp340050b49a6c_fld3816SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3817", FIELD_dsp340050b49a6c_fld3817, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3818SMOD_slot2", FIELD_dsp340050b49a6c_fld3818SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3819SMOD_slot2", FIELD_dsp340050b49a6c_fld3819SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3821SMOD_slot2", FIELD_dsp340050b49a6c_fld3821SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3822SMOD_slot2", FIELD_dsp340050b49a6c_fld3822SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3823SMOD_slot2", FIELD_dsp340050b49a6c_fld3823SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3824SMOD_slot2", FIELD_dsp340050b49a6c_fld3824SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3825SMOD_slot2", FIELD_dsp340050b49a6c_fld3825SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3826SMOD_slot2", FIELD_dsp340050b49a6c_fld3826SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3827SMOD_slot2", FIELD_dsp340050b49a6c_fld3827SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3828SMOD_slot2", FIELD_dsp340050b49a6c_fld3828SMOD_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s16", FIELD_op0_s16, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2033", FIELD_dsp340050b49a6c_fld2033, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2080", FIELD_dsp340050b49a6c_fld2080, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3117SMOD_slot1", FIELD_dsp340050b49a6c_fld3117SMOD_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3118SMOD_slot1", FIELD_dsp340050b49a6c_fld3118SMOD_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3829SMOD_slot1", FIELD_dsp340050b49a6c_fld3829SMOD_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s17", FIELD_op0_s17, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3119SMOD_slot0", FIELD_dsp340050b49a6c_fld3119SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3120SMOD_slot0", FIELD_dsp340050b49a6c_fld3120SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3121SMOD_slot0", FIELD_dsp340050b49a6c_fld3121SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3122SMOD_slot0", FIELD_dsp340050b49a6c_fld3122SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3123SMOD_slot0", FIELD_dsp340050b49a6c_fld3123SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3125SMOD_slot0", FIELD_dsp340050b49a6c_fld3125SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3126SMOD_slot0", FIELD_dsp340050b49a6c_fld3126SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3127SMOD_slot0", FIELD_dsp340050b49a6c_fld3127SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3128SMOD_slot0", FIELD_dsp340050b49a6c_fld3128SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3129SMOD_slot0", FIELD_dsp340050b49a6c_fld3129SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3130SMOD_slot0", FIELD_dsp340050b49a6c_fld3130SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3131SMOD_slot0", FIELD_dsp340050b49a6c_fld3131SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3132SMOD_slot0", FIELD_dsp340050b49a6c_fld3132SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3133SMOD_slot0", FIELD_dsp340050b49a6c_fld3133SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3134SMOD_slot0", FIELD_dsp340050b49a6c_fld3134SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3135SMOD_slot0", FIELD_dsp340050b49a6c_fld3135SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3136SMOD_slot0", FIELD_dsp340050b49a6c_fld3136SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3137SMOD_slot0", FIELD_dsp340050b49a6c_fld3137SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3138SMOD_slot0", FIELD_dsp340050b49a6c_fld3138SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3139SMOD_slot0", FIELD_dsp340050b49a6c_fld3139SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3140SMOD_slot0", FIELD_dsp340050b49a6c_fld3140SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3141SMOD_slot0", FIELD_dsp340050b49a6c_fld3141SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3142SMOD_slot0", FIELD_dsp340050b49a6c_fld3142SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3143SMOD_slot0", FIELD_dsp340050b49a6c_fld3143SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3144SMOD_slot0", FIELD_dsp340050b49a6c_fld3144SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3145SMOD_slot0", FIELD_dsp340050b49a6c_fld3145SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3146SMOD_slot0", FIELD_dsp340050b49a6c_fld3146SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3148SMOD_slot0", FIELD_dsp340050b49a6c_fld3148SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3149SMOD_slot0", FIELD_dsp340050b49a6c_fld3149SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3150", FIELD_dsp340050b49a6c_fld3150, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3152SMOD_slot0", FIELD_dsp340050b49a6c_fld3152SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3153SMOD_slot0", FIELD_dsp340050b49a6c_fld3153SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3155SMOD_slot0", FIELD_dsp340050b49a6c_fld3155SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3156SMOD_slot0", FIELD_dsp340050b49a6c_fld3156SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3157SMOD_slot0", FIELD_dsp340050b49a6c_fld3157SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3158SMOD_slot0", FIELD_dsp340050b49a6c_fld3158SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3159SMOD_slot0", FIELD_dsp340050b49a6c_fld3159SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3160SMOD_slot0", FIELD_dsp340050b49a6c_fld3160SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3161SMOD_slot0", FIELD_dsp340050b49a6c_fld3161SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3164SMOD_slot0", FIELD_dsp340050b49a6c_fld3164SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3165SMOD_slot0", FIELD_dsp340050b49a6c_fld3165SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3166SMOD_slot0", FIELD_dsp340050b49a6c_fld3166SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3168SMOD_slot0", FIELD_dsp340050b49a6c_fld3168SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3170SMOD_slot0", FIELD_dsp340050b49a6c_fld3170SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3171SMOD_slot0", FIELD_dsp340050b49a6c_fld3171SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3172SMOD_slot0", FIELD_dsp340050b49a6c_fld3172SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3173SMOD_slot0", FIELD_dsp340050b49a6c_fld3173SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3174SMOD_slot0", FIELD_dsp340050b49a6c_fld3174SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3175SMOD_slot0", FIELD_dsp340050b49a6c_fld3175SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3176SMOD_slot0", FIELD_dsp340050b49a6c_fld3176SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3177SMOD_slot0", FIELD_dsp340050b49a6c_fld3177SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3178SMOD_slot0", FIELD_dsp340050b49a6c_fld3178SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3179SMOD_slot0", FIELD_dsp340050b49a6c_fld3179SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3180SMOD_slot0", FIELD_dsp340050b49a6c_fld3180SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3181SMOD_slot0", FIELD_dsp340050b49a6c_fld3181SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3182SMOD_slot0", FIELD_dsp340050b49a6c_fld3182SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3184SMOD_slot0", FIELD_dsp340050b49a6c_fld3184SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3186SMOD_slot0", FIELD_dsp340050b49a6c_fld3186SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3188SMOD_slot0", FIELD_dsp340050b49a6c_fld3188SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3832SMOD_slot0", FIELD_dsp340050b49a6c_fld3832SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3833SMOD_slot0", FIELD_dsp340050b49a6c_fld3833SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3834", FIELD_dsp340050b49a6c_fld3834, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3836SMOD_slot0", FIELD_dsp340050b49a6c_fld3836SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3837SMOD_slot0", FIELD_dsp340050b49a6c_fld3837SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3838SMOD_slot0", FIELD_dsp340050b49a6c_fld3838SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3841SMOD_slot0", FIELD_dsp340050b49a6c_fld3841SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3842SMOD_slot0", FIELD_dsp340050b49a6c_fld3842SMOD_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s18", FIELD_op0_s18, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2074", FIELD_dsp340050b49a6c_fld2074, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3191LLR_slot2", FIELD_dsp340050b49a6c_fld3191LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3192LLR_slot2", FIELD_dsp340050b49a6c_fld3192LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3193LLR_slot2", FIELD_dsp340050b49a6c_fld3193LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3194LLR_slot2", FIELD_dsp340050b49a6c_fld3194LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3195LLR_slot2", FIELD_dsp340050b49a6c_fld3195LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3196LLR_slot2", FIELD_dsp340050b49a6c_fld3196LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3197LLR_slot2", FIELD_dsp340050b49a6c_fld3197LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3198LLR_slot2", FIELD_dsp340050b49a6c_fld3198LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3199LLR_slot2", FIELD_dsp340050b49a6c_fld3199LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3200LLR_slot2", FIELD_dsp340050b49a6c_fld3200LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3201LLR_slot2", FIELD_dsp340050b49a6c_fld3201LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3202LLR_slot2", FIELD_dsp340050b49a6c_fld3202LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3203LLR_slot2", FIELD_dsp340050b49a6c_fld3203LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3204LLR_slot2", FIELD_dsp340050b49a6c_fld3204LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3205", FIELD_dsp340050b49a6c_fld3205, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3206", FIELD_dsp340050b49a6c_fld3206, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3207LLR_slot2", FIELD_dsp340050b49a6c_fld3207LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3208LLR_slot2", FIELD_dsp340050b49a6c_fld3208LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3210LLR_slot2", FIELD_dsp340050b49a6c_fld3210LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3212", FIELD_dsp340050b49a6c_fld3212, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3213LLR_slot2", FIELD_dsp340050b49a6c_fld3213LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3214", FIELD_dsp340050b49a6c_fld3214, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3215LLR_slot2", FIELD_dsp340050b49a6c_fld3215LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3216LLR_slot2", FIELD_dsp340050b49a6c_fld3216LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3217", FIELD_dsp340050b49a6c_fld3217, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3218LLR_slot2", FIELD_dsp340050b49a6c_fld3218LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3220LLR_slot2", FIELD_dsp340050b49a6c_fld3220LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3221LLR_slot2", FIELD_dsp340050b49a6c_fld3221LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3222LLR_slot2", FIELD_dsp340050b49a6c_fld3222LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3224LLR_slot2", FIELD_dsp340050b49a6c_fld3224LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3225", FIELD_dsp340050b49a6c_fld3225, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3226LLR_slot2", FIELD_dsp340050b49a6c_fld3226LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3228LLR_slot2", FIELD_dsp340050b49a6c_fld3228LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3230", FIELD_dsp340050b49a6c_fld3230, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3231LLR_slot2", FIELD_dsp340050b49a6c_fld3231LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3232LLR_slot2", FIELD_dsp340050b49a6c_fld3232LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3233", FIELD_dsp340050b49a6c_fld3233, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3234LLR_slot2", FIELD_dsp340050b49a6c_fld3234LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3235LLR_slot2", FIELD_dsp340050b49a6c_fld3235LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3236", FIELD_dsp340050b49a6c_fld3236, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3237LLR_slot2", FIELD_dsp340050b49a6c_fld3237LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3238LLR_slot2", FIELD_dsp340050b49a6c_fld3238LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3240LLR_slot2", FIELD_dsp340050b49a6c_fld3240LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3241LLR_slot2", FIELD_dsp340050b49a6c_fld3241LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3242LLR_slot2", FIELD_dsp340050b49a6c_fld3242LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3243LLR_slot2", FIELD_dsp340050b49a6c_fld3243LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3244LLR_slot2", FIELD_dsp340050b49a6c_fld3244LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3245LLR_slot2", FIELD_dsp340050b49a6c_fld3245LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3246", FIELD_dsp340050b49a6c_fld3246, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3247LLR_slot2", FIELD_dsp340050b49a6c_fld3247LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3843LLR_slot2", FIELD_dsp340050b49a6c_fld3843LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3844", FIELD_dsp340050b49a6c_fld3844, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3845LLR_slot2", FIELD_dsp340050b49a6c_fld3845LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3847LLR_slot2", FIELD_dsp340050b49a6c_fld3847LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3848LLR_slot2", FIELD_dsp340050b49a6c_fld3848LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3849LLR_slot2", FIELD_dsp340050b49a6c_fld3849LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3850LLR_slot2", FIELD_dsp340050b49a6c_fld3850LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3851LLR_slot2", FIELD_dsp340050b49a6c_fld3851LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3853LLR_slot2", FIELD_dsp340050b49a6c_fld3853LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3855LLR_slot2", FIELD_dsp340050b49a6c_fld3855LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3856LLR_slot2", FIELD_dsp340050b49a6c_fld3856LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3857LLR_slot2", FIELD_dsp340050b49a6c_fld3857LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3859LLR_slot2", FIELD_dsp340050b49a6c_fld3859LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3860LLR_slot2", FIELD_dsp340050b49a6c_fld3860LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3861LLR_slot2", FIELD_dsp340050b49a6c_fld3861LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3862", FIELD_dsp340050b49a6c_fld3862, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3863LLR_slot2", FIELD_dsp340050b49a6c_fld3863LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3864LLR_slot2", FIELD_dsp340050b49a6c_fld3864LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3865LLR_slot2", FIELD_dsp340050b49a6c_fld3865LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3866LLR_slot2", FIELD_dsp340050b49a6c_fld3866LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3867LLR_slot2", FIELD_dsp340050b49a6c_fld3867LLR_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3868", FIELD_dsp340050b49a6c_fld3868, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s19", FIELD_op0_s19, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2034", FIELD_dsp340050b49a6c_fld2034, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3248LLR_slot1", FIELD_dsp340050b49a6c_fld3248LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3250LLR_slot1", FIELD_dsp340050b49a6c_fld3250LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3251LLR_slot1", FIELD_dsp340050b49a6c_fld3251LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3252LLR_slot1", FIELD_dsp340050b49a6c_fld3252LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3253LLR_slot1", FIELD_dsp340050b49a6c_fld3253LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3254LLR_slot1", FIELD_dsp340050b49a6c_fld3254LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3869LLR_slot1", FIELD_dsp340050b49a6c_fld3869LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3870", FIELD_dsp340050b49a6c_fld3870, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3872LLR_slot1", FIELD_dsp340050b49a6c_fld3872LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3875LLR_slot1", FIELD_dsp340050b49a6c_fld3875LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3876LLR_slot1", FIELD_dsp340050b49a6c_fld3876LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3878LLR_slot1", FIELD_dsp340050b49a6c_fld3878LLR_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s20", FIELD_op0_s20, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2071", FIELD_dsp340050b49a6c_fld2071, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3258LLR_slot0", FIELD_dsp340050b49a6c_fld3258LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3259LLR_slot0", FIELD_dsp340050b49a6c_fld3259LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3260LLR_slot0", FIELD_dsp340050b49a6c_fld3260LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3261LLR_slot0", FIELD_dsp340050b49a6c_fld3261LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3263LLR_slot0", FIELD_dsp340050b49a6c_fld3263LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3264LLR_slot0", FIELD_dsp340050b49a6c_fld3264LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3265LLR_slot0", FIELD_dsp340050b49a6c_fld3265LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3266LLR_slot0", FIELD_dsp340050b49a6c_fld3266LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3267LLR_slot0", FIELD_dsp340050b49a6c_fld3267LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3268LLR_slot0", FIELD_dsp340050b49a6c_fld3268LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3269LLR_slot0", FIELD_dsp340050b49a6c_fld3269LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3270LLR_slot0", FIELD_dsp340050b49a6c_fld3270LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3272LLR_slot0", FIELD_dsp340050b49a6c_fld3272LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3274LLR_slot0", FIELD_dsp340050b49a6c_fld3274LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3275LLR_slot0", FIELD_dsp340050b49a6c_fld3275LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3276LLR_slot0", FIELD_dsp340050b49a6c_fld3276LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3277LLR_slot0", FIELD_dsp340050b49a6c_fld3277LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3278LLR_slot0", FIELD_dsp340050b49a6c_fld3278LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3279LLR_slot0", FIELD_dsp340050b49a6c_fld3279LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3280LLR_slot0", FIELD_dsp340050b49a6c_fld3280LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3281LLR_slot0", FIELD_dsp340050b49a6c_fld3281LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3282LLR_slot0", FIELD_dsp340050b49a6c_fld3282LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3283LLR_slot0", FIELD_dsp340050b49a6c_fld3283LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3284LLR_slot0", FIELD_dsp340050b49a6c_fld3284LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3286LLR_slot0", FIELD_dsp340050b49a6c_fld3286LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3288LLR_slot0", FIELD_dsp340050b49a6c_fld3288LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3289LLR_slot0", FIELD_dsp340050b49a6c_fld3289LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3291LLR_slot0", FIELD_dsp340050b49a6c_fld3291LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3292LLR_slot0", FIELD_dsp340050b49a6c_fld3292LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3293LLR_slot0", FIELD_dsp340050b49a6c_fld3293LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3294LLR_slot0", FIELD_dsp340050b49a6c_fld3294LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3295LLR_slot0", FIELD_dsp340050b49a6c_fld3295LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3296LLR_slot0", FIELD_dsp340050b49a6c_fld3296LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3297LLR_slot0", FIELD_dsp340050b49a6c_fld3297LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3298LLR_slot0", FIELD_dsp340050b49a6c_fld3298LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3299LLR_slot0", FIELD_dsp340050b49a6c_fld3299LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3300LLR_slot0", FIELD_dsp340050b49a6c_fld3300LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3302LLR_slot0", FIELD_dsp340050b49a6c_fld3302LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3303LLR_slot0", FIELD_dsp340050b49a6c_fld3303LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3304LLR_slot0", FIELD_dsp340050b49a6c_fld3304LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3305LLR_slot0", FIELD_dsp340050b49a6c_fld3305LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3306LLR_slot0", FIELD_dsp340050b49a6c_fld3306LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3308LLR_slot0", FIELD_dsp340050b49a6c_fld3308LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3310LLR_slot0", FIELD_dsp340050b49a6c_fld3310LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3311LLR_slot0", FIELD_dsp340050b49a6c_fld3311LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3312LLR_slot0", FIELD_dsp340050b49a6c_fld3312LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3879LLR_slot0", FIELD_dsp340050b49a6c_fld3879LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3881LLR_slot0", FIELD_dsp340050b49a6c_fld3881LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3883LLR_slot0", FIELD_dsp340050b49a6c_fld3883LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3885LLR_slot0", FIELD_dsp340050b49a6c_fld3885LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3887LLR_slot0", FIELD_dsp340050b49a6c_fld3887LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3888LLR_slot0", FIELD_dsp340050b49a6c_fld3888LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3890LLR_slot0", FIELD_dsp340050b49a6c_fld3890LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3892LLR_slot0", FIELD_dsp340050b49a6c_fld3892LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3893LLR_slot0", FIELD_dsp340050b49a6c_fld3893LLR_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s21", FIELD_op0_s21, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3313DUAL_slot2", FIELD_dsp340050b49a6c_fld3313DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3314", FIELD_dsp340050b49a6c_fld3314, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3315DUAL_slot2", FIELD_dsp340050b49a6c_fld3315DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3316DUAL_slot2", FIELD_dsp340050b49a6c_fld3316DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3317DUAL_slot2", FIELD_dsp340050b49a6c_fld3317DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3318", FIELD_dsp340050b49a6c_fld3318, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3319DUAL_slot2", FIELD_dsp340050b49a6c_fld3319DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3320DUAL_slot2", FIELD_dsp340050b49a6c_fld3320DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3321DUAL_slot2", FIELD_dsp340050b49a6c_fld3321DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3322DUAL_slot2", FIELD_dsp340050b49a6c_fld3322DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3323DUAL_slot2", FIELD_dsp340050b49a6c_fld3323DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3324DUAL_slot2", FIELD_dsp340050b49a6c_fld3324DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3325DUAL_slot2", FIELD_dsp340050b49a6c_fld3325DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3326DUAL_slot2", FIELD_dsp340050b49a6c_fld3326DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3327DUAL_slot2", FIELD_dsp340050b49a6c_fld3327DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3328DUAL_slot2", FIELD_dsp340050b49a6c_fld3328DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3329DUAL_slot2", FIELD_dsp340050b49a6c_fld3329DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3330DUAL_slot2", FIELD_dsp340050b49a6c_fld3330DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3331DUAL_slot2", FIELD_dsp340050b49a6c_fld3331DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3332DUAL_slot2", FIELD_dsp340050b49a6c_fld3332DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3333DUAL_slot2", FIELD_dsp340050b49a6c_fld3333DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3334DUAL_slot2", FIELD_dsp340050b49a6c_fld3334DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3335DUAL_slot2", FIELD_dsp340050b49a6c_fld3335DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3336DUAL_slot2", FIELD_dsp340050b49a6c_fld3336DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3337DUAL_slot2", FIELD_dsp340050b49a6c_fld3337DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3339DUAL_slot2", FIELD_dsp340050b49a6c_fld3339DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3340DUAL_slot2", FIELD_dsp340050b49a6c_fld3340DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3341DUAL_slot2", FIELD_dsp340050b49a6c_fld3341DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3342DUAL_slot2", FIELD_dsp340050b49a6c_fld3342DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3345DUAL_slot2", FIELD_dsp340050b49a6c_fld3345DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3347DUAL_slot2", FIELD_dsp340050b49a6c_fld3347DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3348DUAL_slot2", FIELD_dsp340050b49a6c_fld3348DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3349DUAL_slot2", FIELD_dsp340050b49a6c_fld3349DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3350DUAL_slot2", FIELD_dsp340050b49a6c_fld3350DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3353DUAL_slot2", FIELD_dsp340050b49a6c_fld3353DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3354DUAL_slot2", FIELD_dsp340050b49a6c_fld3354DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3356DUAL_slot2", FIELD_dsp340050b49a6c_fld3356DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3358DUAL_slot2", FIELD_dsp340050b49a6c_fld3358DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3360DUAL_slot2", FIELD_dsp340050b49a6c_fld3360DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3361DUAL_slot2", FIELD_dsp340050b49a6c_fld3361DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3362DUAL_slot2", FIELD_dsp340050b49a6c_fld3362DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3363DUAL_slot2", FIELD_dsp340050b49a6c_fld3363DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3364", FIELD_dsp340050b49a6c_fld3364, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3365DUAL_slot2", FIELD_dsp340050b49a6c_fld3365DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3366DUAL_slot2", FIELD_dsp340050b49a6c_fld3366DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3367DUAL_slot2", FIELD_dsp340050b49a6c_fld3367DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3368DUAL_slot2", FIELD_dsp340050b49a6c_fld3368DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3369DUAL_slot2", FIELD_dsp340050b49a6c_fld3369DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3370DUAL_slot2", FIELD_dsp340050b49a6c_fld3370DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3371DUAL_slot2", FIELD_dsp340050b49a6c_fld3371DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3372DUAL_slot2", FIELD_dsp340050b49a6c_fld3372DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3373DUAL_slot2", FIELD_dsp340050b49a6c_fld3373DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3374DUAL_slot2", FIELD_dsp340050b49a6c_fld3374DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3375DUAL_slot2", FIELD_dsp340050b49a6c_fld3375DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3376DUAL_slot2", FIELD_dsp340050b49a6c_fld3376DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3377DUAL_slot2", FIELD_dsp340050b49a6c_fld3377DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3378DUAL_slot2", FIELD_dsp340050b49a6c_fld3378DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3379DUAL_slot2", FIELD_dsp340050b49a6c_fld3379DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3380DUAL_slot2", FIELD_dsp340050b49a6c_fld3380DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3381DUAL_slot2", FIELD_dsp340050b49a6c_fld3381DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3382DUAL_slot2", FIELD_dsp340050b49a6c_fld3382DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3384DUAL_slot2", FIELD_dsp340050b49a6c_fld3384DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3385DUAL_slot2", FIELD_dsp340050b49a6c_fld3385DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3386DUAL_slot2", FIELD_dsp340050b49a6c_fld3386DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3387DUAL_slot2", FIELD_dsp340050b49a6c_fld3387DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3388DUAL_slot2", FIELD_dsp340050b49a6c_fld3388DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3390DUAL_slot2", FIELD_dsp340050b49a6c_fld3390DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3392DUAL_slot2", FIELD_dsp340050b49a6c_fld3392DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3394DUAL_slot2", FIELD_dsp340050b49a6c_fld3394DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3396DUAL_slot2", FIELD_dsp340050b49a6c_fld3396DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3397DUAL_slot2", FIELD_dsp340050b49a6c_fld3397DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3399DUAL_slot2", FIELD_dsp340050b49a6c_fld3399DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3401DUAL_slot2", FIELD_dsp340050b49a6c_fld3401DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3403DUAL_slot2", FIELD_dsp340050b49a6c_fld3403DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3404DUAL_slot2", FIELD_dsp340050b49a6c_fld3404DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3406DUAL_slot2", FIELD_dsp340050b49a6c_fld3406DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3407", FIELD_dsp340050b49a6c_fld3407, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3408DUAL_slot2", FIELD_dsp340050b49a6c_fld3408DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3410", FIELD_dsp340050b49a6c_fld3410, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3411DUAL_slot2", FIELD_dsp340050b49a6c_fld3411DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3412DUAL_slot2", FIELD_dsp340050b49a6c_fld3412DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3413DUAL_slot2", FIELD_dsp340050b49a6c_fld3413DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3414DUAL_slot2", FIELD_dsp340050b49a6c_fld3414DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3415DUAL_slot2", FIELD_dsp340050b49a6c_fld3415DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3416DUAL_slot2", FIELD_dsp340050b49a6c_fld3416DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3417DUAL_slot2", FIELD_dsp340050b49a6c_fld3417DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3418DUAL_slot2", FIELD_dsp340050b49a6c_fld3418DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3419DUAL_slot2", FIELD_dsp340050b49a6c_fld3419DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3420DUAL_slot2", FIELD_dsp340050b49a6c_fld3420DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3421DUAL_slot2", FIELD_dsp340050b49a6c_fld3421DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3422DUAL_slot2", FIELD_dsp340050b49a6c_fld3422DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3423DUAL_slot2", FIELD_dsp340050b49a6c_fld3423DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3424DUAL_slot2", FIELD_dsp340050b49a6c_fld3424DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3425DUAL_slot2", FIELD_dsp340050b49a6c_fld3425DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3426DUAL_slot2", FIELD_dsp340050b49a6c_fld3426DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3427DUAL_slot2", FIELD_dsp340050b49a6c_fld3427DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3428DUAL_slot2", FIELD_dsp340050b49a6c_fld3428DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3429DUAL_slot2", FIELD_dsp340050b49a6c_fld3429DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3430DUAL_slot2", FIELD_dsp340050b49a6c_fld3430DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3431DUAL_slot2", FIELD_dsp340050b49a6c_fld3431DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3432DUAL_slot2", FIELD_dsp340050b49a6c_fld3432DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3433DUAL_slot2", FIELD_dsp340050b49a6c_fld3433DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3434DUAL_slot2", FIELD_dsp340050b49a6c_fld3434DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3435DUAL_slot2", FIELD_dsp340050b49a6c_fld3435DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3436DUAL_slot2", FIELD_dsp340050b49a6c_fld3436DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3437DUAL_slot2", FIELD_dsp340050b49a6c_fld3437DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3438DUAL_slot2", FIELD_dsp340050b49a6c_fld3438DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3439DUAL_slot2", FIELD_dsp340050b49a6c_fld3439DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3440DUAL_slot2", FIELD_dsp340050b49a6c_fld3440DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3441DUAL_slot2", FIELD_dsp340050b49a6c_fld3441DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3442DUAL_slot2", FIELD_dsp340050b49a6c_fld3442DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3443DUAL_slot2", FIELD_dsp340050b49a6c_fld3443DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3444DUAL_slot2", FIELD_dsp340050b49a6c_fld3444DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3445DUAL_slot2", FIELD_dsp340050b49a6c_fld3445DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3446DUAL_slot2", FIELD_dsp340050b49a6c_fld3446DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3448DUAL_slot2", FIELD_dsp340050b49a6c_fld3448DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3450DUAL_slot2", FIELD_dsp340050b49a6c_fld3450DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3451DUAL_slot2", FIELD_dsp340050b49a6c_fld3451DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3453DUAL_slot2", FIELD_dsp340050b49a6c_fld3453DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3454DUAL_slot2", FIELD_dsp340050b49a6c_fld3454DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3456DUAL_slot2", FIELD_dsp340050b49a6c_fld3456DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3457DUAL_slot2", FIELD_dsp340050b49a6c_fld3457DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3458DUAL_slot2", FIELD_dsp340050b49a6c_fld3458DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3459DUAL_slot2", FIELD_dsp340050b49a6c_fld3459DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3460DUAL_slot2", FIELD_dsp340050b49a6c_fld3460DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3461DUAL_slot2", FIELD_dsp340050b49a6c_fld3461DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3462DUAL_slot2", FIELD_dsp340050b49a6c_fld3462DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3464DUAL_slot2", FIELD_dsp340050b49a6c_fld3464DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3465DUAL_slot2", FIELD_dsp340050b49a6c_fld3465DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3466", FIELD_dsp340050b49a6c_fld3466, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3467DUAL_slot2", FIELD_dsp340050b49a6c_fld3467DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3468DUAL_slot2", FIELD_dsp340050b49a6c_fld3468DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3469DUAL_slot2", FIELD_dsp340050b49a6c_fld3469DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3470DUAL_slot2", FIELD_dsp340050b49a6c_fld3470DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3471DUAL_slot2", FIELD_dsp340050b49a6c_fld3471DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3472DUAL_slot2", FIELD_dsp340050b49a6c_fld3472DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3473DUAL_slot2", FIELD_dsp340050b49a6c_fld3473DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3474DUAL_slot2", FIELD_dsp340050b49a6c_fld3474DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3475DUAL_slot2", FIELD_dsp340050b49a6c_fld3475DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3477DUAL_slot2", FIELD_dsp340050b49a6c_fld3477DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3478DUAL_slot2", FIELD_dsp340050b49a6c_fld3478DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3479DUAL_slot2", FIELD_dsp340050b49a6c_fld3479DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3480DUAL_slot2", FIELD_dsp340050b49a6c_fld3480DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3481DUAL_slot2", FIELD_dsp340050b49a6c_fld3481DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3482DUAL_slot2", FIELD_dsp340050b49a6c_fld3482DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3484DUAL_slot2", FIELD_dsp340050b49a6c_fld3484DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3894DUAL_slot2", FIELD_dsp340050b49a6c_fld3894DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3895DUAL_slot2", FIELD_dsp340050b49a6c_fld3895DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3896DUAL_slot2", FIELD_dsp340050b49a6c_fld3896DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3897DUAL_slot2", FIELD_dsp340050b49a6c_fld3897DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3898DUAL_slot2", FIELD_dsp340050b49a6c_fld3898DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3899DUAL_slot2", FIELD_dsp340050b49a6c_fld3899DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3900", FIELD_dsp340050b49a6c_fld3900, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3901DUAL_slot2", FIELD_dsp340050b49a6c_fld3901DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3903DUAL_slot2", FIELD_dsp340050b49a6c_fld3903DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3904DUAL_slot2", FIELD_dsp340050b49a6c_fld3904DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3905DUAL_slot2", FIELD_dsp340050b49a6c_fld3905DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3906DUAL_slot2", FIELD_dsp340050b49a6c_fld3906DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3907DUAL_slot2", FIELD_dsp340050b49a6c_fld3907DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3908DUAL_slot2", FIELD_dsp340050b49a6c_fld3908DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3909DUAL_slot2", FIELD_dsp340050b49a6c_fld3909DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3910DUAL_slot2", FIELD_dsp340050b49a6c_fld3910DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3913DUAL_slot2", FIELD_dsp340050b49a6c_fld3913DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3914DUAL_slot2", FIELD_dsp340050b49a6c_fld3914DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3916DUAL_slot2", FIELD_dsp340050b49a6c_fld3916DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3917DUAL_slot2", FIELD_dsp340050b49a6c_fld3917DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3918DUAL_slot2", FIELD_dsp340050b49a6c_fld3918DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3919DUAL_slot2", FIELD_dsp340050b49a6c_fld3919DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3920DUAL_slot2", FIELD_dsp340050b49a6c_fld3920DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3921DUAL_slot2", FIELD_dsp340050b49a6c_fld3921DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3922DUAL_slot2", FIELD_dsp340050b49a6c_fld3922DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3923DUAL_slot2", FIELD_dsp340050b49a6c_fld3923DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3924DUAL_slot2", FIELD_dsp340050b49a6c_fld3924DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3925DUAL_slot2", FIELD_dsp340050b49a6c_fld3925DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3927DUAL_slot2", FIELD_dsp340050b49a6c_fld3927DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3928DUAL_slot2", FIELD_dsp340050b49a6c_fld3928DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3929DUAL_slot2", FIELD_dsp340050b49a6c_fld3929DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3930DUAL_slot2", FIELD_dsp340050b49a6c_fld3930DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3931DUAL_slot2", FIELD_dsp340050b49a6c_fld3931DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3933DUAL_slot2", FIELD_dsp340050b49a6c_fld3933DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3934DUAL_slot2", FIELD_dsp340050b49a6c_fld3934DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3935DUAL_slot2", FIELD_dsp340050b49a6c_fld3935DUAL_slot2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s22", FIELD_op0_s22, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s23", FIELD_op0_s23, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2057", FIELD_dsp340050b49a6c_fld2057, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2060", FIELD_dsp340050b49a6c_fld2060, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2066", FIELD_dsp340050b49a6c_fld2066, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2072", FIELD_dsp340050b49a6c_fld2072, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld2079", FIELD_dsp340050b49a6c_fld2079, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3487DUAL_slot0", FIELD_dsp340050b49a6c_fld3487DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3488DUAL_slot0", FIELD_dsp340050b49a6c_fld3488DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3489DUAL_slot0", FIELD_dsp340050b49a6c_fld3489DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3490DUAL_slot0", FIELD_dsp340050b49a6c_fld3490DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3491DUAL_slot0", FIELD_dsp340050b49a6c_fld3491DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3492DUAL_slot0", FIELD_dsp340050b49a6c_fld3492DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3493DUAL_slot0", FIELD_dsp340050b49a6c_fld3493DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3494DUAL_slot0", FIELD_dsp340050b49a6c_fld3494DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3496DUAL_slot0", FIELD_dsp340050b49a6c_fld3496DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3497DUAL_slot0", FIELD_dsp340050b49a6c_fld3497DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3498DUAL_slot0", FIELD_dsp340050b49a6c_fld3498DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3499DUAL_slot0", FIELD_dsp340050b49a6c_fld3499DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3500DUAL_slot0", FIELD_dsp340050b49a6c_fld3500DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3502DUAL_slot0", FIELD_dsp340050b49a6c_fld3502DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3504DUAL_slot0", FIELD_dsp340050b49a6c_fld3504DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3505DUAL_slot0", FIELD_dsp340050b49a6c_fld3505DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3506DUAL_slot0", FIELD_dsp340050b49a6c_fld3506DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3507DUAL_slot0", FIELD_dsp340050b49a6c_fld3507DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3508DUAL_slot0", FIELD_dsp340050b49a6c_fld3508DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3509DUAL_slot0", FIELD_dsp340050b49a6c_fld3509DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3510DUAL_slot0", FIELD_dsp340050b49a6c_fld3510DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3511DUAL_slot0", FIELD_dsp340050b49a6c_fld3511DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3512DUAL_slot0", FIELD_dsp340050b49a6c_fld3512DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3513DUAL_slot0", FIELD_dsp340050b49a6c_fld3513DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3514DUAL_slot0", FIELD_dsp340050b49a6c_fld3514DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3515DUAL_slot0", FIELD_dsp340050b49a6c_fld3515DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3516DUAL_slot0", FIELD_dsp340050b49a6c_fld3516DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3517DUAL_slot0", FIELD_dsp340050b49a6c_fld3517DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3518DUAL_slot0", FIELD_dsp340050b49a6c_fld3518DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3519DUAL_slot0", FIELD_dsp340050b49a6c_fld3519DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3520DUAL_slot0", FIELD_dsp340050b49a6c_fld3520DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3522DUAL_slot0", FIELD_dsp340050b49a6c_fld3522DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3523DUAL_slot0", FIELD_dsp340050b49a6c_fld3523DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3524DUAL_slot0", FIELD_dsp340050b49a6c_fld3524DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3527DUAL_slot0", FIELD_dsp340050b49a6c_fld3527DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3529DUAL_slot0", FIELD_dsp340050b49a6c_fld3529DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3530DUAL_slot0", FIELD_dsp340050b49a6c_fld3530DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3531", FIELD_dsp340050b49a6c_fld3531, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3532DUAL_slot0", FIELD_dsp340050b49a6c_fld3532DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3533DUAL_slot0", FIELD_dsp340050b49a6c_fld3533DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3535DUAL_slot0", FIELD_dsp340050b49a6c_fld3535DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3536DUAL_slot0", FIELD_dsp340050b49a6c_fld3536DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3537DUAL_slot0", FIELD_dsp340050b49a6c_fld3537DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3538DUAL_slot0", FIELD_dsp340050b49a6c_fld3538DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3539DUAL_slot0", FIELD_dsp340050b49a6c_fld3539DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3541DUAL_slot0", FIELD_dsp340050b49a6c_fld3541DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3542DUAL_slot0", FIELD_dsp340050b49a6c_fld3542DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3543DUAL_slot0", FIELD_dsp340050b49a6c_fld3543DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3544DUAL_slot0", FIELD_dsp340050b49a6c_fld3544DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3545DUAL_slot0", FIELD_dsp340050b49a6c_fld3545DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3546DUAL_slot0", FIELD_dsp340050b49a6c_fld3546DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3547DUAL_slot0", FIELD_dsp340050b49a6c_fld3547DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3548DUAL_slot0", FIELD_dsp340050b49a6c_fld3548DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3549DUAL_slot0", FIELD_dsp340050b49a6c_fld3549DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3550DUAL_slot0", FIELD_dsp340050b49a6c_fld3550DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3551DUAL_slot0", FIELD_dsp340050b49a6c_fld3551DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3552", FIELD_dsp340050b49a6c_fld3552, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3553DUAL_slot0", FIELD_dsp340050b49a6c_fld3553DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3554DUAL_slot0", FIELD_dsp340050b49a6c_fld3554DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3555DUAL_slot0", FIELD_dsp340050b49a6c_fld3555DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3556DUAL_slot0", FIELD_dsp340050b49a6c_fld3556DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3557DUAL_slot0", FIELD_dsp340050b49a6c_fld3557DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3558DUAL_slot0", FIELD_dsp340050b49a6c_fld3558DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3559DUAL_slot0", FIELD_dsp340050b49a6c_fld3559DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3560DUAL_slot0", FIELD_dsp340050b49a6c_fld3560DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3562DUAL_slot0", FIELD_dsp340050b49a6c_fld3562DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3563DUAL_slot0", FIELD_dsp340050b49a6c_fld3563DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3564DUAL_slot0", FIELD_dsp340050b49a6c_fld3564DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3565DUAL_slot0", FIELD_dsp340050b49a6c_fld3565DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3566DUAL_slot0", FIELD_dsp340050b49a6c_fld3566DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3567DUAL_slot0", FIELD_dsp340050b49a6c_fld3567DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3568DUAL_slot0", FIELD_dsp340050b49a6c_fld3568DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3569DUAL_slot0", FIELD_dsp340050b49a6c_fld3569DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3570DUAL_slot0", FIELD_dsp340050b49a6c_fld3570DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3571DUAL_slot0", FIELD_dsp340050b49a6c_fld3571DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3572DUAL_slot0", FIELD_dsp340050b49a6c_fld3572DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3573DUAL_slot0", FIELD_dsp340050b49a6c_fld3573DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3574DUAL_slot0", FIELD_dsp340050b49a6c_fld3574DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3575DUAL_slot0", FIELD_dsp340050b49a6c_fld3575DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3576DUAL_slot0", FIELD_dsp340050b49a6c_fld3576DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3577DUAL_slot0", FIELD_dsp340050b49a6c_fld3577DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3578DUAL_slot0", FIELD_dsp340050b49a6c_fld3578DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3579DUAL_slot0", FIELD_dsp340050b49a6c_fld3579DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3580DUAL_slot0", FIELD_dsp340050b49a6c_fld3580DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3581DUAL_slot0", FIELD_dsp340050b49a6c_fld3581DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3582DUAL_slot0", FIELD_dsp340050b49a6c_fld3582DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3583DUAL_slot0", FIELD_dsp340050b49a6c_fld3583DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3584", FIELD_dsp340050b49a6c_fld3584, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3585DUAL_slot0", FIELD_dsp340050b49a6c_fld3585DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3587DUAL_slot0", FIELD_dsp340050b49a6c_fld3587DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3588DUAL_slot0", FIELD_dsp340050b49a6c_fld3588DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3589DUAL_slot0", FIELD_dsp340050b49a6c_fld3589DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3590DUAL_slot0", FIELD_dsp340050b49a6c_fld3590DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3591DUAL_slot0", FIELD_dsp340050b49a6c_fld3591DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3592DUAL_slot0", FIELD_dsp340050b49a6c_fld3592DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3593DUAL_slot0", FIELD_dsp340050b49a6c_fld3593DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3594DUAL_slot0", FIELD_dsp340050b49a6c_fld3594DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3595DUAL_slot0", FIELD_dsp340050b49a6c_fld3595DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3596DUAL_slot0", FIELD_dsp340050b49a6c_fld3596DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3597DUAL_slot0", FIELD_dsp340050b49a6c_fld3597DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3598DUAL_slot0", FIELD_dsp340050b49a6c_fld3598DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3599DUAL_slot0", FIELD_dsp340050b49a6c_fld3599DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3600DUAL_slot0", FIELD_dsp340050b49a6c_fld3600DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3601DUAL_slot0", FIELD_dsp340050b49a6c_fld3601DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3602", FIELD_dsp340050b49a6c_fld3602, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3603DUAL_slot0", FIELD_dsp340050b49a6c_fld3603DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3604DUAL_slot0", FIELD_dsp340050b49a6c_fld3604DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3606DUAL_slot0", FIELD_dsp340050b49a6c_fld3606DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3607DUAL_slot0", FIELD_dsp340050b49a6c_fld3607DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3608DUAL_slot0", FIELD_dsp340050b49a6c_fld3608DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3609DUAL_slot0", FIELD_dsp340050b49a6c_fld3609DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3610", FIELD_dsp340050b49a6c_fld3610, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3611DUAL_slot0", FIELD_dsp340050b49a6c_fld3611DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3612DUAL_slot0", FIELD_dsp340050b49a6c_fld3612DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3613DUAL_slot0", FIELD_dsp340050b49a6c_fld3613DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3614DUAL_slot0", FIELD_dsp340050b49a6c_fld3614DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3615DUAL_slot0", FIELD_dsp340050b49a6c_fld3615DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3616DUAL_slot0", FIELD_dsp340050b49a6c_fld3616DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3618DUAL_slot0", FIELD_dsp340050b49a6c_fld3618DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3619", FIELD_dsp340050b49a6c_fld3619, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3620DUAL_slot0", FIELD_dsp340050b49a6c_fld3620DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3621DUAL_slot0", FIELD_dsp340050b49a6c_fld3621DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3622DUAL_slot0", FIELD_dsp340050b49a6c_fld3622DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3623DUAL_slot0", FIELD_dsp340050b49a6c_fld3623DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3624DUAL_slot0", FIELD_dsp340050b49a6c_fld3624DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3625DUAL_slot0", FIELD_dsp340050b49a6c_fld3625DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3626DUAL_slot0", FIELD_dsp340050b49a6c_fld3626DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3936DUAL_slot0", FIELD_dsp340050b49a6c_fld3936DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3937DUAL_slot0", FIELD_dsp340050b49a6c_fld3937DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3938DUAL_slot0", FIELD_dsp340050b49a6c_fld3938DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3939DUAL_slot0", FIELD_dsp340050b49a6c_fld3939DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3940DUAL_slot0", FIELD_dsp340050b49a6c_fld3940DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3941DUAL_slot0", FIELD_dsp340050b49a6c_fld3941DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3943DUAL_slot0", FIELD_dsp340050b49a6c_fld3943DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3945DUAL_slot0", FIELD_dsp340050b49a6c_fld3945DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3946DUAL_slot0", FIELD_dsp340050b49a6c_fld3946DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3947DUAL_slot0", FIELD_dsp340050b49a6c_fld3947DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3949DUAL_slot0", FIELD_dsp340050b49a6c_fld3949DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3950DUAL_slot0", FIELD_dsp340050b49a6c_fld3950DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3951DUAL_slot0", FIELD_dsp340050b49a6c_fld3951DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3952DUAL_slot0", FIELD_dsp340050b49a6c_fld3952DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3954DUAL_slot0", FIELD_dsp340050b49a6c_fld3954DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3957DUAL_slot0", FIELD_dsp340050b49a6c_fld3957DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3958DUAL_slot0", FIELD_dsp340050b49a6c_fld3958DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3959DUAL_slot0", FIELD_dsp340050b49a6c_fld3959DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3960DUAL_slot0", FIELD_dsp340050b49a6c_fld3960DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "dsp340050b49a6c_fld3961DUAL_slot0", FIELD_dsp340050b49a6c_fld3961DUAL_slot0, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_ulabel8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_immt,
+ OPERAND_imms,
+ OPERAND_bt,
+ OPERAND_bs,
+ OPERAND_br,
+ OPERAND_bt2,
+ OPERAND_bs2,
+ OPERAND_br2,
+ OPERAND_bt4,
+ OPERAND_bs4,
+ OPERAND_br4,
+ OPERAND_bt8,
+ OPERAND_bs8,
+ OPERAND_br8,
+ OPERAND_bt16,
+ OPERAND_bs16,
+ OPERAND_br16,
+ OPERAND_brall,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_cimm8x4,
+ OPERAND_frr,
+ OPERAND_frs,
+ OPERAND_frt,
+ OPERAND_dsp340050b49a6c_oper45_reg,
+ OPERAND_dsp340050b49a6c_oper46_reg,
+ OPERAND_dsp340050b49a6c_oper47_reg,
+ OPERAND_dsp340050b49a6c_oper48_imm,
+ OPERAND_dsp340050b49a6c_oper49_imm,
+ OPERAND_dsp340050b49a6c_oper50_reg,
+ OPERAND_dsp340050b49a6c_oper51_reg,
+ OPERAND_dsp340050b49a6c_oper52_imm,
+ OPERAND_dsp340050b49a6c_oper53_reg,
+ OPERAND_dsp340050b49a6c_oper54_reg,
+ OPERAND_dsp340050b49a6c_oper55_reg,
+ OPERAND_dsp340050b49a6c_oper56_reg,
+ OPERAND_dsp340050b49a6c_oper57_imm,
+ OPERAND_dsp340050b49a6c_oper58_imm,
+ OPERAND_dsp340050b49a6c_oper59_imm,
+ OPERAND_dsp340050b49a6c_oper60_imm,
+ OPERAND_dsp340050b49a6c_oper61_imm,
+ OPERAND_dsp340050b49a6c_oper62_reg,
+ OPERAND_dsp340050b49a6c_oper63_reg,
+ OPERAND_dsp340050b49a6c_oper64_imm,
+ OPERAND_dsp340050b49a6c_oper65_reg,
+ OPERAND_dsp340050b49a6c_oper66_reg,
+ OPERAND_dsp340050b49a6c_oper67_reg,
+ OPERAND_dsp340050b49a6c_oper68_imm,
+ OPERAND_dsp340050b49a6c_oper69_imm,
+ OPERAND_dsp340050b49a6c_oper70_imm,
+ OPERAND_dsp340050b49a6c_oper71_reg,
+ OPERAND_dsp340050b49a6c_oper72_imm,
+ OPERAND_dsp340050b49a6c_oper73_imm,
+ OPERAND_dsp340050b49a6c_oper74_imm,
+ OPERAND_dsp340050b49a6c_oper75_imm,
+ OPERAND_dsp340050b49a6c_oper76_imm,
+ OPERAND_dsp340050b49a6c_oper77_reg,
+ OPERAND_dsp340050b49a6c_oper78_imm,
+ OPERAND_dsp340050b49a6c_oper79_reg,
+ OPERAND_dsp340050b49a6c_oper80_reg,
+ OPERAND_dsp340050b49a6c_oper81_reg,
+ OPERAND_dsp340050b49a6c_oper82_reg,
+ OPERAND_dsp340050b49a6c_oper83_imm,
+ OPERAND_dsp340050b49a6c_oper84_imm,
+ OPERAND_dsp340050b49a6c_oper85_imm,
+ OPERAND_dsp340050b49a6c_oper86_imm,
+ OPERAND_dsp340050b49a6c_oper87_imm,
+ OPERAND_dsp340050b49a6c_oper88_imm,
+ OPERAND_dsp340050b49a6c_oper89_imm,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_bbi,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_s,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sae,
+ OPERAND_sal,
+ OPERAND_sargt,
+ OPERAND_sas4,
+ OPERAND_sas,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_t2,
+ OPERAND_s2,
+ OPERAND_r2,
+ OPERAND_t4,
+ OPERAND_s4,
+ OPERAND_r4,
+ OPERAND_t8,
+ OPERAND_s8,
+ OPERAND_r8,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_fimm8,
+ OPERAND_dsp340050b49a6c_fld2019,
+ OPERAND_dsp340050b49a6c_fld2021,
+ OPERAND_dsp340050b49a6c_fld2029,
+ OPERAND_dsp340050b49a6c_fld2030,
+ OPERAND_dsp340050b49a6c_fld2032,
+ OPERAND_dsp340050b49a6c_fld2035,
+ OPERAND_dsp340050b49a6c_fld2036,
+ OPERAND_dsp340050b49a6c_fld2037,
+ OPERAND_dsp340050b49a6c_fld2038,
+ OPERAND_dsp340050b49a6c_fld2039,
+ OPERAND_dsp340050b49a6c_fld2040,
+ OPERAND_dsp340050b49a6c_fld2041,
+ OPERAND_dsp340050b49a6c_fld2042,
+ OPERAND_dsp340050b49a6c_fld2043,
+ OPERAND_dsp340050b49a6c_fld2044,
+ OPERAND_dsp340050b49a6c_fld2045,
+ OPERAND_dsp340050b49a6c_fld2046,
+ OPERAND_dsp340050b49a6c_fld2047,
+ OPERAND_dsp340050b49a6c_fld2048,
+ OPERAND_dsp340050b49a6c_fld2049,
+ OPERAND_dsp340050b49a6c_fld2050,
+ OPERAND_dsp340050b49a6c_fld2051,
+ OPERAND_dsp340050b49a6c_fld2052,
+ OPERAND_dsp340050b49a6c_fld2053,
+ OPERAND_dsp340050b49a6c_fld2054,
+ OPERAND_dsp340050b49a6c_fld2055,
+ OPERAND_dsp340050b49a6c_fld2056,
+ OPERAND_dsp340050b49a6c_fld2082Inst,
+ OPERAND_dsp340050b49a6c_fld2083Inst,
+ OPERAND_dsp340050b49a6c_fld2084Inst,
+ OPERAND_dsp340050b49a6c_fld2085Inst,
+ OPERAND_dsp340050b49a6c_fld2086Inst,
+ OPERAND_dsp340050b49a6c_fld2088Inst,
+ OPERAND_dsp340050b49a6c_fld2089Inst,
+ OPERAND_dsp340050b49a6c_fld2090Inst,
+ OPERAND_dsp340050b49a6c_fld2091Inst,
+ OPERAND_dsp340050b49a6c_fld2092Inst,
+ OPERAND_dsp340050b49a6c_fld2094Inst,
+ OPERAND_dsp340050b49a6c_fld2095Inst,
+ OPERAND_dsp340050b49a6c_fld2096Inst,
+ OPERAND_dsp340050b49a6c_fld2098Inst,
+ OPERAND_dsp340050b49a6c_fld2099Inst,
+ OPERAND_dsp340050b49a6c_fld2100Inst,
+ OPERAND_dsp340050b49a6c_fld2101Inst,
+ OPERAND_dsp340050b49a6c_fld2102Inst,
+ OPERAND_dsp340050b49a6c_fld2103Inst,
+ OPERAND_dsp340050b49a6c_fld2104Inst,
+ OPERAND_dsp340050b49a6c_fld2105Inst,
+ OPERAND_dsp340050b49a6c_fld2106Inst,
+ OPERAND_dsp340050b49a6c_fld2107Inst,
+ OPERAND_dsp340050b49a6c_fld2108Inst,
+ OPERAND_dsp340050b49a6c_fld2109Inst,
+ OPERAND_dsp340050b49a6c_fld2110Inst,
+ OPERAND_dsp340050b49a6c_fld2111Inst,
+ OPERAND_dsp340050b49a6c_fld2112Inst,
+ OPERAND_dsp340050b49a6c_fld2113Inst,
+ OPERAND_dsp340050b49a6c_fld2114Inst,
+ OPERAND_dsp340050b49a6c_fld2115Inst,
+ OPERAND_dsp340050b49a6c_fld2116Inst,
+ OPERAND_dsp340050b49a6c_fld2117Inst,
+ OPERAND_dsp340050b49a6c_fld2118Inst,
+ OPERAND_dsp340050b49a6c_fld2119Inst,
+ OPERAND_dsp340050b49a6c_fld2120Inst,
+ OPERAND_dsp340050b49a6c_fld2122Inst,
+ OPERAND_dsp340050b49a6c_fld2123Inst,
+ OPERAND_dsp340050b49a6c_fld2124Inst,
+ OPERAND_dsp340050b49a6c_fld2125Inst,
+ OPERAND_dsp340050b49a6c_fld2126Inst,
+ OPERAND_dsp340050b49a6c_fld2127Inst,
+ OPERAND_dsp340050b49a6c_fld2128Inst,
+ OPERAND_dsp340050b49a6c_fld2129Inst,
+ OPERAND_dsp340050b49a6c_fld2131Inst,
+ OPERAND_dsp340050b49a6c_fld2132Inst,
+ OPERAND_dsp340050b49a6c_fld2133Inst,
+ OPERAND_dsp340050b49a6c_fld2134Inst,
+ OPERAND_dsp340050b49a6c_fld2136Inst,
+ OPERAND_dsp340050b49a6c_fld2137Inst,
+ OPERAND_dsp340050b49a6c_fld2138Inst,
+ OPERAND_dsp340050b49a6c_fld2139Inst,
+ OPERAND_dsp340050b49a6c_fld2140Inst,
+ OPERAND_dsp340050b49a6c_fld2141Inst,
+ OPERAND_dsp340050b49a6c_fld2142Inst,
+ OPERAND_dsp340050b49a6c_fld2143Inst,
+ OPERAND_dsp340050b49a6c_fld2144Inst,
+ OPERAND_dsp340050b49a6c_fld2145Inst,
+ OPERAND_dsp340050b49a6c_fld2146Inst,
+ OPERAND_dsp340050b49a6c_fld2147Inst,
+ OPERAND_dsp340050b49a6c_fld2149Inst,
+ OPERAND_dsp340050b49a6c_fld2151Inst,
+ OPERAND_dsp340050b49a6c_fld2153Inst,
+ OPERAND_dsp340050b49a6c_fld2154Inst,
+ OPERAND_dsp340050b49a6c_fld2155Inst,
+ OPERAND_dsp340050b49a6c_fld2156Inst,
+ OPERAND_dsp340050b49a6c_fld2157Inst,
+ OPERAND_dsp340050b49a6c_fld2158Inst,
+ OPERAND_dsp340050b49a6c_fld2159Inst,
+ OPERAND_dsp340050b49a6c_fld2160Inst,
+ OPERAND_dsp340050b49a6c_fld2161Inst,
+ OPERAND_dsp340050b49a6c_fld2162Inst,
+ OPERAND_dsp340050b49a6c_fld2163Inst,
+ OPERAND_dsp340050b49a6c_fld2164Inst,
+ OPERAND_dsp340050b49a6c_fld2165Inst,
+ OPERAND_dsp340050b49a6c_fld2166Inst,
+ OPERAND_dsp340050b49a6c_fld2167Inst,
+ OPERAND_dsp340050b49a6c_fld2168Inst,
+ OPERAND_dsp340050b49a6c_fld2169Inst,
+ OPERAND_dsp340050b49a6c_fld2171Inst,
+ OPERAND_dsp340050b49a6c_fld2172Inst,
+ OPERAND_dsp340050b49a6c_fld2173Inst,
+ OPERAND_dsp340050b49a6c_fld2174Inst,
+ OPERAND_dsp340050b49a6c_fld2175Inst,
+ OPERAND_dsp340050b49a6c_fld2177Inst,
+ OPERAND_dsp340050b49a6c_fld2178Inst,
+ OPERAND_dsp340050b49a6c_fld2179Inst,
+ OPERAND_dsp340050b49a6c_fld2180Inst,
+ OPERAND_dsp340050b49a6c_fld2181Inst,
+ OPERAND_dsp340050b49a6c_fld2182Inst,
+ OPERAND_dsp340050b49a6c_fld2183Inst,
+ OPERAND_dsp340050b49a6c_fld2184Inst,
+ OPERAND_dsp340050b49a6c_fld2185Inst,
+ OPERAND_dsp340050b49a6c_fld2186Inst,
+ OPERAND_dsp340050b49a6c_fld2187Inst,
+ OPERAND_dsp340050b49a6c_fld2188Inst,
+ OPERAND_dsp340050b49a6c_fld2189Inst,
+ OPERAND_dsp340050b49a6c_fld2190Inst,
+ OPERAND_dsp340050b49a6c_fld2191Inst,
+ OPERAND_dsp340050b49a6c_fld2192Inst,
+ OPERAND_dsp340050b49a6c_fld2193Inst,
+ OPERAND_dsp340050b49a6c_fld2194Inst,
+ OPERAND_dsp340050b49a6c_fld2195Inst,
+ OPERAND_dsp340050b49a6c_fld2196Inst,
+ OPERAND_dsp340050b49a6c_fld2197Inst,
+ OPERAND_dsp340050b49a6c_fld2198Inst,
+ OPERAND_dsp340050b49a6c_fld2199Inst,
+ OPERAND_dsp340050b49a6c_fld2200Inst,
+ OPERAND_dsp340050b49a6c_fld2201Inst,
+ OPERAND_dsp340050b49a6c_fld2202Inst,
+ OPERAND_dsp340050b49a6c_fld2203Inst,
+ OPERAND_dsp340050b49a6c_fld2204Inst,
+ OPERAND_dsp340050b49a6c_fld2205Inst,
+ OPERAND_dsp340050b49a6c_fld2206Inst,
+ OPERAND_dsp340050b49a6c_fld2207Inst,
+ OPERAND_dsp340050b49a6c_fld2208Inst,
+ OPERAND_dsp340050b49a6c_fld2209Inst,
+ OPERAND_dsp340050b49a6c_fld2210Inst,
+ OPERAND_dsp340050b49a6c_fld2211Inst,
+ OPERAND_dsp340050b49a6c_fld2212Inst,
+ OPERAND_dsp340050b49a6c_fld2213Inst,
+ OPERAND_dsp340050b49a6c_fld2214Inst,
+ OPERAND_dsp340050b49a6c_fld2215Inst,
+ OPERAND_dsp340050b49a6c_fld2216Inst,
+ OPERAND_dsp340050b49a6c_fld2217Inst,
+ OPERAND_dsp340050b49a6c_fld2218Inst,
+ OPERAND_dsp340050b49a6c_fld2219Inst,
+ OPERAND_dsp340050b49a6c_fld2220Inst,
+ OPERAND_dsp340050b49a6c_fld2221Inst,
+ OPERAND_dsp340050b49a6c_fld2222Inst,
+ OPERAND_dsp340050b49a6c_fld2223Inst,
+ OPERAND_dsp340050b49a6c_fld2224Inst,
+ OPERAND_dsp340050b49a6c_fld2225Inst,
+ OPERAND_dsp340050b49a6c_fld2226Inst,
+ OPERAND_dsp340050b49a6c_fld2227Inst,
+ OPERAND_dsp340050b49a6c_fld2228Inst,
+ OPERAND_dsp340050b49a6c_fld2229Inst,
+ OPERAND_dsp340050b49a6c_fld2230Inst,
+ OPERAND_dsp340050b49a6c_fld2231Inst,
+ OPERAND_dsp340050b49a6c_fld2232Inst,
+ OPERAND_dsp340050b49a6c_fld2234Inst,
+ OPERAND_dsp340050b49a6c_fld2235Inst,
+ OPERAND_dsp340050b49a6c_fld2236Inst,
+ OPERAND_dsp340050b49a6c_fld2237Inst,
+ OPERAND_dsp340050b49a6c_fld2238Inst,
+ OPERAND_dsp340050b49a6c_fld2239Inst,
+ OPERAND_dsp340050b49a6c_fld2240Inst,
+ OPERAND_dsp340050b49a6c_fld2241Inst,
+ OPERAND_dsp340050b49a6c_fld2242Inst,
+ OPERAND_dsp340050b49a6c_fld2243Inst,
+ OPERAND_dsp340050b49a6c_fld2244Inst,
+ OPERAND_dsp340050b49a6c_fld2245Inst,
+ OPERAND_dsp340050b49a6c_fld2246Inst,
+ OPERAND_dsp340050b49a6c_fld2247Inst,
+ OPERAND_dsp340050b49a6c_fld2248Inst,
+ OPERAND_dsp340050b49a6c_fld2249Inst,
+ OPERAND_dsp340050b49a6c_fld2250Inst,
+ OPERAND_dsp340050b49a6c_fld2251Inst,
+ OPERAND_dsp340050b49a6c_fld2252Inst,
+ OPERAND_dsp340050b49a6c_fld2253Inst,
+ OPERAND_dsp340050b49a6c_fld2254,
+ OPERAND_dsp340050b49a6c_fld2255Inst,
+ OPERAND_dsp340050b49a6c_fld2257Inst,
+ OPERAND_dsp340050b49a6c_fld3627Inst,
+ OPERAND_dsp340050b49a6c_fld3630Inst,
+ OPERAND_dsp340050b49a6c_fld3631Inst,
+ OPERAND_dsp340050b49a6c_fld3633Inst,
+ OPERAND_dsp340050b49a6c_fld3634,
+ OPERAND_dsp340050b49a6c_fld3635Inst,
+ OPERAND_dsp340050b49a6c_fld3636Inst,
+ OPERAND_dsp340050b49a6c_fld3637Inst,
+ OPERAND_dsp340050b49a6c_fld3638Inst,
+ OPERAND_dsp340050b49a6c_fld3639Inst,
+ OPERAND_dsp340050b49a6c_fld3640Inst,
+ OPERAND_dsp340050b49a6c_fld3642Inst,
+ OPERAND_dsp340050b49a6c_fld3643Inst,
+ OPERAND_dsp340050b49a6c_fld3644Inst,
+ OPERAND_dsp340050b49a6c_fld3645Inst,
+ OPERAND_dsp340050b49a6c_fld3647Inst,
+ OPERAND_dsp340050b49a6c_fld3648Inst,
+ OPERAND_dsp340050b49a6c_fld3649Inst,
+ OPERAND_dsp340050b49a6c_fld3650Inst,
+ OPERAND_dsp340050b49a6c_fld3651Inst,
+ OPERAND_dsp340050b49a6c_fld3653Inst,
+ OPERAND_dsp340050b49a6c_fld3654Inst,
+ OPERAND_dsp340050b49a6c_fld3655Inst,
+ OPERAND_dsp340050b49a6c_fld3656Inst,
+ OPERAND_dsp340050b49a6c_fld3657Inst,
+ OPERAND_dsp340050b49a6c_fld3658Inst,
+ OPERAND_dsp340050b49a6c_fld3659Inst,
+ OPERAND_dsp340050b49a6c_fld3660Inst,
+ OPERAND_dsp340050b49a6c_fld3661Inst,
+ OPERAND_dsp340050b49a6c_fld3662Inst,
+ OPERAND_op0_s3,
+ OPERAND_dsp340050b49a6c_fld2025,
+ OPERAND_dsp340050b49a6c_fld2027,
+ OPERAND_dsp340050b49a6c_fld2258GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2259GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2260GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2261GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2262GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2263GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2264GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2266GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2267GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2268GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2269GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2270GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2271GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2272,
+ OPERAND_dsp340050b49a6c_fld2273GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2274GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2275GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2277GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2278GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2279GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2280GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2281GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2282GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2283GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2284GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2286GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2287GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2288GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2289GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2290GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2291GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2292GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2293GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2294GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2295GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2296GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2297GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2298GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2299GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2300GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2301GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2302,
+ OPERAND_dsp340050b49a6c_fld2303GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2304GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2305,
+ OPERAND_dsp340050b49a6c_fld2306GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2308GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2309GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2310GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2312GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2313,
+ OPERAND_dsp340050b49a6c_fld2314GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2316GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2317GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2318GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2319GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2320GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2321GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2322GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2323GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2324GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2325GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2326GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2327GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2328GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2329GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2330GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2331GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2332GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2333GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2334GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2335GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2336GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2337GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2338GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2339GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2340GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2341GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2342GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2343GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2344GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2345GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2346GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2347GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2348GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2349GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2350GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2351GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2352GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2353GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2354GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2355GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2356GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2357GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2358GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2359GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2361GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2362GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2364GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2366GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2368GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2369GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2370GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2371GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2372GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2373GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2374GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2375GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2376GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2378GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2379GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2381GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2383GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2384,
+ OPERAND_dsp340050b49a6c_fld2385GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2386,
+ OPERAND_dsp340050b49a6c_fld2387GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2388GP_slot2,
+ OPERAND_dsp340050b49a6c_fld2389GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3663GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3664GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3665GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3666,
+ OPERAND_dsp340050b49a6c_fld3667GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3668GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3669GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3670GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3671GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3673GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3674GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3675GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3676GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3678GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3679GP_slot2,
+ OPERAND_dsp340050b49a6c_fld3680GP_slot2,
+ OPERAND_op0_s4,
+ OPERAND_dsp340050b49a6c_fld2026,
+ OPERAND_dsp340050b49a6c_fld2031,
+ OPERAND_dsp340050b49a6c_fld2394GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2395GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2397GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2398GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2399GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2400GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2402GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2403GP_slot1,
+ OPERAND_dsp340050b49a6c_fld2405GP_slot1,
+ OPERAND_dsp340050b49a6c_fld3681GP_slot1,
+ OPERAND_dsp340050b49a6c_fld3683GP_slot1,
+ OPERAND_dsp340050b49a6c_fld3684GP_slot1,
+ OPERAND_dsp340050b49a6c_fld3686GP_slot1,
+ OPERAND_op0_s5,
+ OPERAND_dsp340050b49a6c_fld2058,
+ OPERAND_dsp340050b49a6c_fld2067,
+ OPERAND_dsp340050b49a6c_fld2407GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2409GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2410GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2411GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2412GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2413GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2415GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2416GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2417GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2418GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2419GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2420GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2422GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2423GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2424GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2425GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2426GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2427GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2429GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2430GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2431GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2432GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2433GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2434GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2435GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2436GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2437GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2438GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2439GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2440GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2441GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2443GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2444GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2445,
+ OPERAND_dsp340050b49a6c_fld2447GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2448,
+ OPERAND_dsp340050b49a6c_fld2449GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2451GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2452GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2453GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2454GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2455GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2456GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2457GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2458GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2459GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2460GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2461GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2462,
+ OPERAND_dsp340050b49a6c_fld2463GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2464GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2465GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2466GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2467GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2468GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2470GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2471GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2472GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2473GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2474GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2475GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2477GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2479GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2480GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2481GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2482GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2483GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2484GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2485GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2486GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2487GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2488GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2489GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2490GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2491GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2492GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2493GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2494GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2495GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2496GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2497GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2498GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2499GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2500GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2501GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2502GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2503GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2504GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2505GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2506GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2507GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2508GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2509GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2510GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2512GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2514GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2515GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2516GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2517GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2518GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2519GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2520GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2521GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2523GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2524GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2526GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2527GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2528GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2529GP_slot0,
+ OPERAND_dsp340050b49a6c_fld2530,
+ OPERAND_dsp340050b49a6c_fld2531GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3688GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3689GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3690GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3691GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3692GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3693GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3695GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3696GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3697GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3698GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3699GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3700GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3702GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3703GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3705GP_slot0,
+ OPERAND_dsp340050b49a6c_fld3706GP_slot0,
+ OPERAND_op0_s6,
+ OPERAND_dsp340050b49a6c_fld2532DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2533DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2534DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2535DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2536DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2537DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2538DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2539DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2540DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2541DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2542DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2543DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2544DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2545DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2546DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2547DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2548DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2549DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2550DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2551DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2552DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2553DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2554DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2555DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2556DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2557DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2558DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2559DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2560DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2561DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2562DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2563DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2564DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2565DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2566DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2567DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2568DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2569DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2571DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2572DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2573DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2574DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2575DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2576DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2577DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2578,
+ OPERAND_dsp340050b49a6c_fld2579DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2580DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2581DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2582DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2583DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2584DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2585DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2586DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2587DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2588DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2589DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2590DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2591DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2592DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2595DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2596DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2598DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2599DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2601DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2602DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2604DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2605,
+ OPERAND_dsp340050b49a6c_fld2606DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2608DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2609DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2610DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2611DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2612,
+ OPERAND_dsp340050b49a6c_fld2613DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2614,
+ OPERAND_dsp340050b49a6c_fld2615DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2616DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2617DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2618DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2619DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2620DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2621DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2622DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2623DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2624DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2625,
+ OPERAND_dsp340050b49a6c_fld2626DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2628DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2630DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2632DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2633DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2635DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2636DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2637DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2640DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2641DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2642DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2643DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2644DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2645DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2646DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2647DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2648DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2649DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2650DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2651DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2652DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2654DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2655DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2656DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2657DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld2658DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3708DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3709DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3710DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3711DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3712,
+ OPERAND_dsp340050b49a6c_fld3713DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3714DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3715,
+ OPERAND_dsp340050b49a6c_fld3716DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3717DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3718,
+ OPERAND_dsp340050b49a6c_fld3719DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3721DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3722,
+ OPERAND_dsp340050b49a6c_fld3723DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3724DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3725DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3726DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3727DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3728DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3729DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3731DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3732DOT_slot2,
+ OPERAND_dsp340050b49a6c_fld3733DOT_slot2,
+ OPERAND_op0_s7,
+ OPERAND_dsp340050b49a6c_fld3734DOT_slot1,
+ OPERAND_op0_s8,
+ OPERAND_dsp340050b49a6c_fld2068,
+ OPERAND_dsp340050b49a6c_fld2666DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2667DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2668DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2669DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2671DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2672DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2673DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2674DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2675DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2676DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2677DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2678DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2679DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2680DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2681DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2682DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2683DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2684DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2685DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2686DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2688DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2689DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2690DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2692DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2693DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2695DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2697DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2699DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2700DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2701DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2702DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2703DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2704DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld2705DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3735DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3736,
+ OPERAND_dsp340050b49a6c_fld3737DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3738DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3739DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3740DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3741DOT_slot0,
+ OPERAND_dsp340050b49a6c_fld3742DOT_slot0,
+ OPERAND_op0_s9,
+ OPERAND_dsp340050b49a6c_fld2706PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2707PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2708PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2709PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2710PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2711PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2713PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2714PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2715PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2717PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2718PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2719PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2721PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2722PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2723PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2724PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2725PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2726PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2727PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2728PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2729PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2730PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2731PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2732PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2733PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2734PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2735PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2736PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2737,
+ OPERAND_dsp340050b49a6c_fld2738PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2739PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2741PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2742PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2743PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2746PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2747PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2748PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2750PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2751PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2752PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2753PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2754PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2755PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2756PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2757PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2758PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2759PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2760PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2761PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2762PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2763PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2764PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2765PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2766PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2767PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2768PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2769PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2770PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2771PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2772PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2773PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2774PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2775PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2776PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2777PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2778PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2779PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2780PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2781PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2782PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2783PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2784PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2785PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2786PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2787PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2788PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2789PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2790PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2791PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2792PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2793PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2795PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2796PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2798PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2801PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2803PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2805PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2806PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2807PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2808PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2809PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2810PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2811PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2812PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2814PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2816PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2817,
+ OPERAND_dsp340050b49a6c_fld2818PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2819,
+ OPERAND_dsp340050b49a6c_fld2820PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2821PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld2823PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3744PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3745PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3746PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3747PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3748,
+ OPERAND_dsp340050b49a6c_fld3749PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3750PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3751PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3752PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3753PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3754PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3756PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3757PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3758PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3759PQ_slot2,
+ OPERAND_dsp340050b49a6c_fld3760PQ_slot2,
+ OPERAND_op0_s10,
+ OPERAND_dsp340050b49a6c_fld2825PQ_slot1,
+ OPERAND_dsp340050b49a6c_fld2826PQ_slot1,
+ OPERAND_dsp340050b49a6c_fld3761PQ_slot1,
+ OPERAND_op0_s11,
+ OPERAND_dsp340050b49a6c_fld2059,
+ OPERAND_dsp340050b49a6c_fld2069,
+ OPERAND_dsp340050b49a6c_fld2827PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2829PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2830PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2831PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2832PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2833PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2835PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2836PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2837PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2838PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2839PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2840PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2842PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2843PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2844PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2845PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2846PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2847PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2849PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2850PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2851PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2852PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2853PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2854PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2855PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2856PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2857PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2858PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2859PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2860PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2861PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2863PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2864PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2865,
+ OPERAND_dsp340050b49a6c_fld2867PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2869PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2871PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2872PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2873PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2874PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2875PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2876PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2877PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2878PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2879PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2880PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2881PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2882,
+ OPERAND_dsp340050b49a6c_fld2883PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2884PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2885PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2886PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2887PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2888PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2890PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2891PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2892PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2893PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2894PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2895PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2897PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2899PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2900PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2901PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2902PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2903PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2904PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2905PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2906PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2907PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2908PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2909PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2910PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2911PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2912PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2913PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2914PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2915PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2916PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2917PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2918PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2919PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2920PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2921PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2922PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2923PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2924PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2925PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2926PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2927PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2928PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2929PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2930PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2932PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2934PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2935PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2936PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2937PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2939PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2940,
+ OPERAND_dsp340050b49a6c_fld2941PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2942PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2943PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2945PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2946PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2947PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2948PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2949PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld2950PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3763PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3764PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3765PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3766PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3767PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3768PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3769PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3770PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3771PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3772,
+ OPERAND_dsp340050b49a6c_fld3773PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3775PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3776PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3777PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3778PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3779PQ_slot0,
+ OPERAND_dsp340050b49a6c_fld3780PQ_slot0,
+ OPERAND_op0_s12,
+ OPERAND_dsp340050b49a6c_fld2953ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2954ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2955ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2956ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2957ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2958ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2959ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2960ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2963ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2964ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2966ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld2967ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld3782ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld3783ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld3784ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld3785ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld3786ACC2_slot2,
+ OPERAND_dsp340050b49a6c_fld3788ACC2_slot2,
+ OPERAND_op0_s13,
+ OPERAND_dsp340050b49a6c_fld2028,
+ OPERAND_dsp340050b49a6c_fld2075,
+ OPERAND_dsp340050b49a6c_fld2968ACC2_slot1,
+ OPERAND_dsp340050b49a6c_fld2969ACC2_slot1,
+ OPERAND_dsp340050b49a6c_fld3790ACC2_slot1,
+ OPERAND_dsp340050b49a6c_fld3793ACC2_slot1,
+ OPERAND_op0_s14,
+ OPERAND_dsp340050b49a6c_fld2973ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2974ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2975ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2976ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2977ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2980ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2981ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2982ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2984ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2985ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2987ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2989ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld2990ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3795ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3796ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3797,
+ OPERAND_dsp340050b49a6c_fld3798ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3799ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3800ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3801ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3802ACC2_slot0,
+ OPERAND_dsp340050b49a6c_fld3803ACC2_slot0,
+ OPERAND_op0_s15,
+ OPERAND_dsp340050b49a6c_fld2991SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2992SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2993SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2994SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2995SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2996SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2997SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2998SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld2999SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3000SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3001SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3002SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3003SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3004SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3005SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3006SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3007SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3008SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3009SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3010SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3011SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3012SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3013SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3014SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3015SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3016SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3017SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3018SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3019SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3020SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3021SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3022SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3023SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3024SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3025SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3026SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3027SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3028SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3030SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3031SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3032SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3033SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3034SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3035SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3036SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3038SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3039SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3040SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3043SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3044SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3046SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3047SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3048,
+ OPERAND_dsp340050b49a6c_fld3049SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3050SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3051SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3052SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3053SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3054SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3055SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3056SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3058SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3059SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3061SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3062,
+ OPERAND_dsp340050b49a6c_fld3063SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3065SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3066SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3067SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3068SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3069SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3070,
+ OPERAND_dsp340050b49a6c_fld3071SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3072SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3073SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3074SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3075SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3076SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3077SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3078SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3079SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3080SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3081SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3082SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3084SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3085SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3087SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3088SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3090SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3091SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3092SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3093SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3096SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3097SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3098SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3099SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3100SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3101SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3102SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3104SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3105SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3106SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3107SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3108SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3109SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3110SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3111SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3113SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3114SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3115SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3116SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3805SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3806SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3807SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3808,
+ OPERAND_dsp340050b49a6c_fld3809SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3810SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3812SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3813SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3814SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3816SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3817,
+ OPERAND_dsp340050b49a6c_fld3818SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3819SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3821SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3822SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3823SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3824SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3825SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3826SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3827SMOD_slot2,
+ OPERAND_dsp340050b49a6c_fld3828SMOD_slot2,
+ OPERAND_op0_s16,
+ OPERAND_dsp340050b49a6c_fld2033,
+ OPERAND_dsp340050b49a6c_fld2080,
+ OPERAND_dsp340050b49a6c_fld3117SMOD_slot1,
+ OPERAND_dsp340050b49a6c_fld3118SMOD_slot1,
+ OPERAND_dsp340050b49a6c_fld3829SMOD_slot1,
+ OPERAND_op0_s17,
+ OPERAND_dsp340050b49a6c_fld3119SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3120SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3121SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3122SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3123SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3125SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3126SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3127SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3128SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3129SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3130SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3131SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3132SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3133SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3134SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3135SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3136SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3137SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3138SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3139SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3140SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3141SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3142SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3143SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3144SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3145SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3146SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3148SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3149SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3150,
+ OPERAND_dsp340050b49a6c_fld3152SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3153SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3155SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3156SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3157SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3158SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3159SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3160SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3161SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3164SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3165SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3166SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3168SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3170SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3171SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3172SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3173SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3174SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3175SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3176SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3177SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3178SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3179SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3180SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3181SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3182SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3184SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3186SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3188SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3832SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3833SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3834,
+ OPERAND_dsp340050b49a6c_fld3836SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3837SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3838SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3841SMOD_slot0,
+ OPERAND_dsp340050b49a6c_fld3842SMOD_slot0,
+ OPERAND_op0_s18,
+ OPERAND_dsp340050b49a6c_fld2074,
+ OPERAND_dsp340050b49a6c_fld3191LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3192LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3193LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3194LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3195LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3196LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3197LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3198LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3199LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3200LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3201LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3202LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3203LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3204LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3205,
+ OPERAND_dsp340050b49a6c_fld3206,
+ OPERAND_dsp340050b49a6c_fld3207LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3208LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3210LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3212,
+ OPERAND_dsp340050b49a6c_fld3213LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3214,
+ OPERAND_dsp340050b49a6c_fld3215LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3216LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3217,
+ OPERAND_dsp340050b49a6c_fld3218LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3220LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3221LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3222LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3224LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3225,
+ OPERAND_dsp340050b49a6c_fld3226LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3228LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3230,
+ OPERAND_dsp340050b49a6c_fld3231LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3232LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3233,
+ OPERAND_dsp340050b49a6c_fld3234LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3235LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3236,
+ OPERAND_dsp340050b49a6c_fld3237LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3238LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3240LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3241LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3242LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3243LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3244LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3245LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3246,
+ OPERAND_dsp340050b49a6c_fld3247LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3843LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3844,
+ OPERAND_dsp340050b49a6c_fld3845LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3847LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3848LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3849LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3850LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3851LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3853LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3855LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3856LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3857LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3859LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3860LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3861LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3862,
+ OPERAND_dsp340050b49a6c_fld3863LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3864LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3865LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3866LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3867LLR_slot2,
+ OPERAND_dsp340050b49a6c_fld3868,
+ OPERAND_op0_s19,
+ OPERAND_dsp340050b49a6c_fld2034,
+ OPERAND_dsp340050b49a6c_fld3248LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3250LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3251LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3252LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3253LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3254LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3869LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3870,
+ OPERAND_dsp340050b49a6c_fld3872LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3875LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3876LLR_slot1,
+ OPERAND_dsp340050b49a6c_fld3878LLR_slot1,
+ OPERAND_op0_s20,
+ OPERAND_dsp340050b49a6c_fld2071,
+ OPERAND_dsp340050b49a6c_fld3258LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3259LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3260LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3261LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3263LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3264LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3265LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3266LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3267LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3268LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3269LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3270LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3272LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3274LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3275LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3276LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3277LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3278LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3279LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3280LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3281LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3282LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3283LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3284LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3286LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3288LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3289LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3291LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3292LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3293LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3294LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3295LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3296LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3297LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3298LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3299LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3300LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3302LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3303LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3304LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3305LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3306LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3308LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3310LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3311LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3312LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3879LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3881LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3883LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3885LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3887LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3888LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3890LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3892LLR_slot0,
+ OPERAND_dsp340050b49a6c_fld3893LLR_slot0,
+ OPERAND_op0_s21,
+ OPERAND_dsp340050b49a6c_fld3313DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3314,
+ OPERAND_dsp340050b49a6c_fld3315DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3316DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3317DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3318,
+ OPERAND_dsp340050b49a6c_fld3319DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3320DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3321DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3322DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3323DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3324DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3325DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3326DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3327DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3328DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3329DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3330DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3331DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3332DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3333DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3334DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3335DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3336DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3337DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3339DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3340DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3341DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3342DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3345DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3347DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3348DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3349DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3350DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3353DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3354DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3356DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3358DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3360DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3361DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3362DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3363DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3364,
+ OPERAND_dsp340050b49a6c_fld3365DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3366DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3367DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3368DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3369DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3370DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3371DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3372DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3373DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3374DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3375DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3376DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3377DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3378DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3379DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3380DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3381DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3382DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3384DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3385DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3386DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3387DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3388DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3390DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3392DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3394DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3396DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3397DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3399DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3401DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3403DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3404DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3406DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3407,
+ OPERAND_dsp340050b49a6c_fld3408DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3410,
+ OPERAND_dsp340050b49a6c_fld3411DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3412DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3413DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3414DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3415DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3416DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3417DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3418DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3419DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3420DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3421DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3422DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3423DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3424DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3425DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3426DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3427DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3428DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3429DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3430DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3431DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3432DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3433DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3434DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3435DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3436DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3437DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3438DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3439DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3440DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3441DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3442DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3443DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3444DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3445DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3446DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3448DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3450DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3451DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3453DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3454DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3456DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3457DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3458DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3459DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3460DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3461DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3462DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3464DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3465DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3466,
+ OPERAND_dsp340050b49a6c_fld3467DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3468DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3469DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3470DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3471DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3472DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3473DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3474DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3475DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3477DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3478DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3479DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3480DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3481DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3482DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3484DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3894DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3895DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3896DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3897DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3898DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3899DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3900,
+ OPERAND_dsp340050b49a6c_fld3901DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3903DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3904DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3905DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3906DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3907DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3908DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3909DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3910DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3913DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3914DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3916DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3917DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3918DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3919DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3920DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3921DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3922DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3923DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3924DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3925DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3927DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3928DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3929DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3930DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3931DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3933DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3934DUAL_slot2,
+ OPERAND_dsp340050b49a6c_fld3935DUAL_slot2,
+ OPERAND_op0_s22,
+ OPERAND_op0_s23,
+ OPERAND_dsp340050b49a6c_fld2057,
+ OPERAND_dsp340050b49a6c_fld2060,
+ OPERAND_dsp340050b49a6c_fld2066,
+ OPERAND_dsp340050b49a6c_fld2072,
+ OPERAND_dsp340050b49a6c_fld2079,
+ OPERAND_dsp340050b49a6c_fld3487DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3488DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3489DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3490DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3491DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3492DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3493DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3494DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3496DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3497DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3498DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3499DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3500DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3502DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3504DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3505DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3506DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3507DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3508DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3509DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3510DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3511DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3512DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3513DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3514DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3515DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3516DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3517DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3518DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3519DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3520DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3522DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3523DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3524DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3527DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3529DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3530DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3531,
+ OPERAND_dsp340050b49a6c_fld3532DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3533DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3535DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3536DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3537DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3538DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3539DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3541DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3542DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3543DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3544DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3545DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3546DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3547DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3548DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3549DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3550DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3551DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3552,
+ OPERAND_dsp340050b49a6c_fld3553DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3554DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3555DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3556DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3557DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3558DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3559DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3560DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3562DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3563DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3564DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3565DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3566DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3567DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3568DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3569DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3570DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3571DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3572DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3573DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3574DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3575DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3576DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3577DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3578DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3579DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3580DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3581DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3582DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3583DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3584,
+ OPERAND_dsp340050b49a6c_fld3585DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3587DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3588DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3589DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3590DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3591DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3592DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3593DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3594DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3595DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3596DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3597DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3598DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3599DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3600DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3601DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3602,
+ OPERAND_dsp340050b49a6c_fld3603DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3604DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3606DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3607DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3608DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3609DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3610,
+ OPERAND_dsp340050b49a6c_fld3611DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3612DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3613DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3614DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3615DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3616DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3618DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3619,
+ OPERAND_dsp340050b49a6c_fld3620DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3621DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3622DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3623DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3624DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3625DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3626DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3936DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3937DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3938DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3939DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3940DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3941DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3943DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3945DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3946DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3947DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3949DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3950DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3951DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3952DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3954DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3957DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3958DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3959DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3960DUAL_slot0,
+ OPERAND_dsp340050b49a6c_fld3961DUAL_slot0
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'o' },
+ { { STATE_LITBEN }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'm' },
+ { { STATE_LITBEN }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_176_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool1_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool4_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool8_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbranch_args[] = {
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bmove_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_RSR_BR_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_brall }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_WSR_BR_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_brall }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_XSR_BR_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_brall }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_stateArgs[] = {
+ { { STATE_CPENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_stateArgs[] = {
+ { { STATE_CPENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_fcr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_fcr_stateArgs[] = {
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_InvalidEnable }, 'i' },
+ { { STATE_DivZeroEnable }, 'i' },
+ { { STATE_OverflowEnable }, 'i' },
+ { { STATE_UnderflowEnable }, 'i' },
+ { { STATE_InexactEnable }, 'i' },
+ { { STATE_FPreserved20 }, 'i' },
+ { { STATE_FPreserved5 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fcr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fcr_stateArgs[] = {
+ { { STATE_RoundMode }, 'o' },
+ { { STATE_InvalidEnable }, 'o' },
+ { { STATE_DivZeroEnable }, 'o' },
+ { { STATE_OverflowEnable }, 'o' },
+ { { STATE_UnderflowEnable }, 'o' },
+ { { STATE_InexactEnable }, 'o' },
+ { { STATE_FPreserved20 }, 'o' },
+ { { STATE_FPreserved5 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_fsr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_fsr_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'i' },
+ { { STATE_DivZeroFlag }, 'i' },
+ { { STATE_OverflowFlag }, 'i' },
+ { { STATE_UnderflowFlag }, 'i' },
+ { { STATE_InexactFlag }, 'i' },
+ { { STATE_FPreserved20a }, 'i' },
+ { { STATE_FPreserved7 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fsr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_fsr_stateArgs[] = {
+ { { STATE_InvalidFlag }, 'o' },
+ { { STATE_DivZeroFlag }, 'o' },
+ { { STATE_OverflowFlag }, 'o' },
+ { { STATE_UnderflowFlag }, 'o' },
+ { { STATE_InexactFlag }, 'o' },
+ { { STATE_FPreserved20a }, 'o' },
+ { { STATE_FPreserved7 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_stateArgs[] = {
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_mac_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_mac_stateArgs[] = {
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_cmov_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_cmov_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_mov_args[] = {
+ { { OPERAND_frr }, 'm' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_mov_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_mov2_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_mov2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_cmp_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_frt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_cmp_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_float_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_float_stateArgs[] = {
+ { { STATE_RoundMode }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_int_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' },
+ { { OPERAND_t }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_int_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_rfr_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_frs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_rfr_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_wfr_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_wfr_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsi_args[] = {
+ { { OPERAND_frt }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_cimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsi_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsiu_args[] = {
+ { { OPERAND_frt }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_cimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsiu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsx_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsx_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsxu_args[] = {
+ { { OPERAND_frr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_lsxu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssi_args[] = {
+ { { OPERAND_frt }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_cimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssi_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssiu_args[] = {
+ { { OPERAND_frt }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_cimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssiu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssx_args[] = {
+ { { OPERAND_frr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssx_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssxu_args[] = {
+ { { OPERAND_frr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_fp_ssxu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_ARGMAX_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_ARGMAX_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_HSAR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_HSAR_stateArgs[] = {
+ { { STATE_HSAR3 }, 'i' },
+ { { STATE_HSAR2 }, 'i' },
+ { { STATE_HSAR1 }, 'i' },
+ { { STATE_HSAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_HSAR2SAR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_HSAR2SAR_stateArgs[] = {
+ { { STATE_SAR3 }, 'o' },
+ { { STATE_SAR2 }, 'o' },
+ { { STATE_SAR1 }, 'o' },
+ { { STATE_SAR0 }, 'o' },
+ { { STATE_HSAR3 }, 'i' },
+ { { STATE_HSAR2 }, 'i' },
+ { { STATE_HSAR1 }, 'i' },
+ { { STATE_HSAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_INTERP_EXT_N_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_INTERP_EXT_N_stateArgs[] = {
+ { { STATE_INTERP_EXT_N }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_INTERP_EXT_L_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_INTERP_EXT_L_stateArgs[] = {
+ { { STATE_INTERP_EXT_L }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_LLR_BUF_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper70_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_LLR_BUF_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_LLR_POS_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_LLR_POS_stateArgs[] = {
+ { { STATE_LLR_POS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_MAX_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_MAX_stateArgs[] = {
+ { { STATE_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_NCO_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_NCO_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_PERM_REG_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_PERM_REG_stateArgs[] = {
+ { { STATE_PERM_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_PHASOR_N_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_PHASOR_N_stateArgs[] = {
+ { { STATE_PHASOR_N }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_PHASOR_OFFSET_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_PHASOR_OFFSET_stateArgs[] = {
+ { { STATE_PHASOR_OFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SAR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SAR_stateArgs[] = {
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SCALE_REG_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SCALE_REG_stateArgs[] = {
+ { { STATE_SCALE_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SMOD_BUF_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SMOD_BUF_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SMOD_OFFSET_TABLE_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SMOD_OFFSET_TABLE_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SMOD_POS_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SMOD_POS_stateArgs[] = {
+ { { STATE_SMOD_POS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SOV_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_SOV_stateArgs[] = {
+ { { STATE_SOV }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_WGHT_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_GET_WGHT_stateArgs[] = {
+ { { STATE_WEIGHT_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_ARGMAX_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_ARGMAX_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_EXT_REGS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper88_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_EXT_REGS_stateArgs[] = {
+ { { STATE_INTERP_EXT_N }, 'o' },
+ { { STATE_INTERP_EXT_L }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_HSAR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_HSAR_stateArgs[] = {
+ { { STATE_HSAR0 }, 'o' },
+ { { STATE_HSAR1 }, 'o' },
+ { { STATE_HSAR2 }, 'o' },
+ { { STATE_HSAR3 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_LLR_BUF_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper78_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_LLR_BUF_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_LLR_POS_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_LLR_POS_stateArgs[] = {
+ { { STATE_LLR_POS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_MAX_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_MAX_stateArgs[] = {
+ { { STATE_MAX_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_NCO_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_NCO_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_PERM_REG_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_PERM_REG_stateArgs[] = {
+ { { STATE_PERM_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_PHASOR_N_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_PHASOR_N_stateArgs[] = {
+ { { STATE_PHASOR_N }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_PHASOR_OFFSET_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_PHASOR_OFFSET_stateArgs[] = {
+ { { STATE_PHASOR_OFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SAR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SAR_stateArgs[] = {
+ { { STATE_SAR0 }, 'o' },
+ { { STATE_SAR1 }, 'o' },
+ { { STATE_SAR2 }, 'o' },
+ { { STATE_SAR3 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SCALE_REG_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SCALE_REG_stateArgs[] = {
+ { { STATE_SCALE_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SMOD_BUF_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SMOD_BUF_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SMOD_OFFSET_TABLE_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SMOD_OFFSET_TABLE_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SMOD_POS_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SMOD_POS_stateArgs[] = {
+ { { STATE_SMOD_POS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SOV_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_SOV_stateArgs[] = {
+ { { STATE_SOV }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_WGHT_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SET_WGHT_stateArgs[] = {
+ { { STATE_WEIGHT_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper72_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper73_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X32_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper74_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper74_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper74_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper74_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC2X64_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC32_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper72_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper75_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC32_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_IH_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_IH_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_IL_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_IL_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_RH_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_RH_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_RL_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LAC_RL_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper74_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_PINC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_PINC_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_PINC_X_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_PINC_X_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_U_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_U_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_X_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_X_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_XU_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LCM_XU_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LP_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper79_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LP_X_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper79_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LP_X_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper79_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper76_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LQ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LQ_X_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper79_reg }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LQ_X_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper80_reg }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper80_reg }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper80_reg }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper77_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper80_reg }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper72_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper84_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X32_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC2X64_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC32_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper72_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper86_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC32_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_IH_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper87_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_IH_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_IL_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper87_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_IL_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_RH_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper87_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_RH_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_RL_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper87_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SAC_RL_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_PINC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper87_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_PINC_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_PINC_X_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_PINC_X_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_U_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper87_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_U_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_X_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_X_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_XU_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SCM_XU_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_STORE_P_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper79_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_STORE_P_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_STORE_Q_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper79_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper85_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_STORE_Q_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_DUP_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_DUP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_LN_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_LN_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_LN_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_LN_I_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_LN_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2CM_LN_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2PQ_LN_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper57_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2PQ_LN_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2SAR_DUP_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AR2SAR_DUP_stateArgs[] = {
+ { { STATE_SAR3 }, 'o' },
+ { { STATE_SAR2 }, 'o' },
+ { { STATE_SAR1 }, 'o' },
+ { { STATE_SAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRAC_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRCM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CM2AR_LN_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper64_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CM2AR_LN_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CM2AR_LN_I_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper64_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CM2AR_LN_I_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CM2AR_LN_R_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper64_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CM2AR_LN_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_COMB_AR_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CONJ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CONJ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2AC32_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2AC32_I_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2AC32_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper71_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2AC32_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2CM2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2CM2PQ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper66_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_I_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_I2R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_I2R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper66_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_R2I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAC_R2I_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVAR2_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCM2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper81_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCM2PQ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_4_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_4_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_5_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_5_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_6_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_6_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_7_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND_7_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_4_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_4_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_5_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_5_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_6_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_6_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_7_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVCND8_7_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV_I_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVPQ2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOVPQ2PQ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NEGCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NEGCM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP16LLR_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP16LLR_1_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_LLR_POS }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PQ2CM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PQ2CM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SWAPAC_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SWAPAC_R_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SWAPAC_RI_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SWAPAC_RI_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SWAPB_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADD2AC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADD2AC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CDOT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CDOT_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CDOTAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CDOTAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CDOTACS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CDOTACS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMACS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMACS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPY_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPY_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPY2CM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPY2CM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPY2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPY2PQ_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPYS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPYS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPYXP2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMPYXP2PQ_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_COMB32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper66_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper67_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_COMB32_stateArgs[] = {
+ { { STATE_WEIGHT_REG }, 'i' },
+ { { STATE_SCALE_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_DOT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_DOT_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_DOTAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_DOTAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_DOTACS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_DOTACS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LIN_INT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LIN_INT_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_INTERP_EXT_L }, 'i' },
+ { { STATE_INTERP_EXT_N }, 'i' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_SCALE_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LLRPRE1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LLRPRE1_stateArgs[] = {
+ { { STATE_PERM_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LLRPRE2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper66_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper78_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LLRPRE2_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_LLR_POS }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MAC8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MAC8_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACD8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACD8_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_0_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_1_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_2_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACPQXP_3_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP2_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP2_0_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP2_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP2_1_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_0_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_1_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_2_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MACXP_3_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2AC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MOV2AC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY2CM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY2CM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY2PQ_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPY8_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYADD8_2CM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYADD8_2CM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYD8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYD8_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_0_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_1_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_2_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYPQXP_3_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP2PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP2PQ_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP2_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP2_0_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP2_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP2_1_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_0_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_1_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_2_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MPYXP_3_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMACD_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMACD_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMACPQ_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMACPQ_I_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMACPQ_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMACPQ_R_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMD_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMD_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMPYPQ_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMPYPQ_I_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMPYPQ_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NORMPYPQ_R_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RCMAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RCMAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RCMPY_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RCMPY_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RCMPY2CM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RCMPY2CM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIR_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIRA_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIRA_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIRD_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIRD_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIRDA_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper54_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RFIRDA_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RMAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RMAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RMPY_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RMPY_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RMPY2CM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_RMPY2CM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMOD_ALIGN_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper63_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper78_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMOD_ALIGN_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_SMOD_POS }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMOD_SCR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper66_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMOD_SCR_stateArgs[] = {
+ { { STATE_PERM_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUB2AC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUB2AC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WGHT32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper50_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper62_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WGHT32_stateArgs[] = {
+ { { STATE_WEIGHT_REG }, 'i' },
+ { { STATE_SCALE_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRTIEP_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper61_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRTIEP_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_2FIFO_0_intfArgs[] = {
+ INTERFACE_OUTQ0_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_2FIFO_1_intfArgs[] = {
+ INTERFACE_OUTQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_2FIFO_2_intfArgs[] = {
+ INTERFACE_OUTQ2_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_2FIFO_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_2FIFO_3_intfArgs[] = {
+ INTERFACE_OUTQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_R2FIFO_0_intfArgs[] = {
+ INTERFACE_OUTQ0_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_R2FIFO_1_intfArgs[] = {
+ INTERFACE_OUTQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_R2FIFO_2_intfArgs[] = {
+ INTERFACE_OUTQ2_128
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R2FIFO_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_EXT_R2FIFO_3_intfArgs[] = {
+ INTERFACE_OUTQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_LUT_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_AR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_AR_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_LUT_AR_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_IEXT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_IEXT_stateArgs[] = {
+ { { STATE_INTERP_EXT_L }, 'i' },
+ { { STATE_INTERP_EXT_N }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_LUT_IEXT_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_PHASOR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_PHASOR_stateArgs[] = {
+ { { STATE_PHASOR_N }, 'i' },
+ { { STATE_PHASOR_OFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_LUT_PHASOR_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_REXT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_REXT_stateArgs[] = {
+ { { STATE_INTERP_EXT_L }, 'i' },
+ { { STATE_INTERP_EXT_N }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_LUT_REXT_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_WRITE_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper64_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LUT_WRITE_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_LUT_WRITE_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ128_0_intfArgs[] = {
+ INTERFACE_OUTQ0_128,
+ INTERFACE_INQ0_128
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ128_1_intfArgs[] = {
+ INTERFACE_OUTQ1_128,
+ INTERFACE_INQ1_128
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ128_2_intfArgs[] = {
+ INTERFACE_OUTQ2_128,
+ INTERFACE_INQ2_128
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ128_3_intfArgs[] = {
+ INTERFACE_OUTQ3_128,
+ INTERFACE_INQ3_128
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ128_4_intfArgs[] = {
+ INTERFACE_OUTQ4_128,
+ INTERFACE_INQ4_128
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ128_5_intfArgs[] = {
+ INTERFACE_OUTQ5_128,
+ INTERFACE_INQ5_128
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ32_0_intfArgs[] = {
+ INTERFACE_OUTQ0_32,
+ INTERFACE_INQ0_32
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ32_1_intfArgs[] = {
+ INTERFACE_OUTQ1_32,
+ INTERFACE_INQ1_32
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ32_2_intfArgs[] = {
+ INTERFACE_OUTQ2_32,
+ INTERFACE_INQ2_32
+};
+
+static xtensa_interface Iclass_iclass_MOVEQ32_3_intfArgs[] = {
+ INTERFACE_OUTQ3_32,
+ INTERFACE_INQ3_32
+};
+
+static xtensa_arg_internal Iclass_iclass_NCO_UPDATE_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NCO_UPDATE_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'm' },
+ { { STATE_PHASOR_N }, 'i' },
+ { { STATE_PHASOR_OFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_NCO_UPDATE_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_0_intfArgs[] = {
+ INTERFACE_INQ0_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_1_intfArgs[] = {
+ INTERFACE_INQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2_intfArgs[] = {
+ INTERFACE_INQ2_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_3_intfArgs[] = {
+ INTERFACE_INQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_4_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_4_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_4_intfArgs[] = {
+ INTERFACE_INQ4_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_5_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_5_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_5_intfArgs[] = {
+ INTERFACE_INQ5_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2CMPQ_0_intfArgs[] = {
+ INTERFACE_INQ0_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2CMPQ_1_intfArgs[] = {
+ INTERFACE_INQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2CMPQ_2_intfArgs[] = {
+ INTERFACE_INQ2_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2CMPQ_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2CMPQ_3_intfArgs[] = {
+ INTERFACE_INQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2M_0_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper83_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2M_0_intfArgs[] = {
+ INTERFACE_INQ0_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2M_1_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper83_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2M_1_intfArgs[] = {
+ INTERFACE_INQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2M_2_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper83_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2M_2_intfArgs[] = {
+ INTERFACE_INQ2_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2M_3_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper83_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2M_3_intfArgs[] = {
+ INTERFACE_INQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_0_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_0_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2PQ_0_intfArgs[] = {
+ INTERFACE_INQ0_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_1_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_1_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2PQ_1_intfArgs[] = {
+ INTERFACE_INQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2PQ_2_intfArgs[] = {
+ INTERFACE_INQ2_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_3_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_3_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2PQ_3_intfArgs[] = {
+ INTERFACE_INQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_4_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_4_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2PQ_4_intfArgs[] = {
+ INTERFACE_INQ4_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_5_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP128_2PQ_5_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP128_2PQ_5_intfArgs[] = {
+ INTERFACE_INQ5_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_01_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_01_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP2X128_2PQ_01_intfArgs[] = {
+ INTERFACE_INQ0_128,
+ INTERFACE_INQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_03_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_03_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP2X128_2PQ_03_intfArgs[] = {
+ INTERFACE_INQ0_128,
+ INTERFACE_INQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_21_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_21_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP2X128_2PQ_21_intfArgs[] = {
+ INTERFACE_INQ2_128,
+ INTERFACE_INQ1_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_23_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper65_reg }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_POP2X128_2PQ_23_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_POP2X128_2PQ_23_intfArgs[] = {
+ INTERFACE_INQ2_128,
+ INTERFACE_INQ3_128
+};
+
+static xtensa_arg_internal Iclass_iclass_POP32_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_POP32_0_intfArgs[] = {
+ INTERFACE_INQ0_32
+};
+
+static xtensa_arg_internal Iclass_iclass_POP32_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_POP32_1_intfArgs[] = {
+ INTERFACE_INQ1_32
+};
+
+static xtensa_arg_internal Iclass_iclass_POP32_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_POP32_2_intfArgs[] = {
+ INTERFACE_INQ2_32
+};
+
+static xtensa_arg_internal Iclass_iclass_POP32_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_POP32_3_intfArgs[] = {
+ INTERFACE_INQ3_32
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH128_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper78_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH128_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_PUSH128_intfArgs[] = {
+ INTERFACE_OUTQ0_128,
+ INTERFACE_OUTQ0_128_KILL,
+ INTERFACE_OUTQ1_128,
+ INTERFACE_OUTQ1_128_KILL,
+ INTERFACE_OUTQ2_128,
+ INTERFACE_OUTQ2_128_KILL,
+ INTERFACE_OUTQ3_128,
+ INTERFACE_OUTQ3_128_KILL,
+ INTERFACE_OUTQ4_128,
+ INTERFACE_OUTQ4_128_KILL,
+ INTERFACE_OUTQ5_128,
+ INTERFACE_OUTQ5_128_KILL
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH128_M_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper52_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper83_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_PUSH128_M_intfArgs[] = {
+ INTERFACE_OUTQ0_128,
+ INTERFACE_OUTQ0_128_KILL,
+ INTERFACE_OUTQ1_128,
+ INTERFACE_OUTQ1_128_KILL,
+ INTERFACE_OUTQ2_128,
+ INTERFACE_OUTQ2_128_KILL,
+ INTERFACE_OUTQ3_128,
+ INTERFACE_OUTQ3_128_KILL
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH128_PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper78_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH128_PQ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_PUSH128_PQ_intfArgs[] = {
+ INTERFACE_OUTQ0_128,
+ INTERFACE_OUTQ0_128_KILL,
+ INTERFACE_OUTQ1_128,
+ INTERFACE_OUTQ1_128_KILL,
+ INTERFACE_OUTQ2_128,
+ INTERFACE_OUTQ2_128_KILL,
+ INTERFACE_OUTQ3_128,
+ INTERFACE_OUTQ3_128_KILL,
+ INTERFACE_OUTQ4_128,
+ INTERFACE_OUTQ4_128_KILL,
+ INTERFACE_OUTQ5_128,
+ INTERFACE_OUTQ5_128_KILL
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH2X128_PQ_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper56_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper57_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH2X128_PQ_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_PUSH2X128_PQ_intfArgs[] = {
+ INTERFACE_OUTQ0_128,
+ INTERFACE_OUTQ1_128,
+ INTERFACE_OUTQ2_128,
+ INTERFACE_OUTQ3_128,
+ INTERFACE_OUTQ0_128_KILL,
+ INTERFACE_OUTQ1_128_KILL,
+ INTERFACE_OUTQ2_128_KILL,
+ INTERFACE_OUTQ3_128_KILL
+};
+
+static xtensa_arg_internal Iclass_iclass_PUSH32_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_PUSH32_intfArgs[] = {
+ INTERFACE_OUTQ0_32,
+ INTERFACE_OUTQ0_32_KILL,
+ INTERFACE_OUTQ1_32,
+ INTERFACE_OUTQ1_32_KILL,
+ INTERFACE_OUTQ2_32,
+ INTERFACE_OUTQ2_32_KILL,
+ INTERFACE_OUTQ3_32,
+ INTERFACE_OUTQ3_32_KILL,
+ INTERFACE_OUTQ4_32,
+ INTERFACE_OUTQ4_32_KILL,
+ INTERFACE_OUTQ5_32,
+ INTERFACE_OUTQ5_32_KILL
+};
+
+static xtensa_arg_internal Iclass_iclass_QREADY_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper61_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_QREADY_intfArgs[] = {
+ INTERFACE_INQ0_32_NOTRDY,
+ INTERFACE_INQ1_32_NOTRDY,
+ INTERFACE_INQ2_32_NOTRDY,
+ INTERFACE_INQ3_32_NOTRDY,
+ INTERFACE_OUTQ0_32_NOTRDY,
+ INTERFACE_OUTQ1_32_NOTRDY,
+ INTERFACE_OUTQ2_32_NOTRDY,
+ INTERFACE_OUTQ3_32_NOTRDY,
+ INTERFACE_OUTQ4_32_NOTRDY,
+ INTERFACE_OUTQ5_32_NOTRDY,
+ INTERFACE_INQ0_128_NOTRDY,
+ INTERFACE_INQ1_128_NOTRDY,
+ INTERFACE_INQ2_128_NOTRDY,
+ INTERFACE_INQ3_128_NOTRDY,
+ INTERFACE_INQ4_128_NOTRDY,
+ INTERFACE_INQ5_128_NOTRDY,
+ INTERFACE_OUTQ0_128_NOTRDY,
+ INTERFACE_OUTQ1_128_NOTRDY,
+ INTERFACE_OUTQ2_128_NOTRDY,
+ INTERFACE_OUTQ3_128_NOTRDY,
+ INTERFACE_OUTQ4_128_NOTRDY,
+ INTERFACE_OUTQ5_128_NOTRDY,
+ INTERFACE_SIGNALQ_NOTRDY
+};
+
+static xtensa_arg_internal Iclass_iclass_RDTIEP_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_RDTIEP_intfArgs[] = {
+ INTERFACE_IMPWIRE
+};
+
+static xtensa_arg_internal Iclass_iclass_SETTIEP_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper61_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETTIEP_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMOD_LUT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper78_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper57_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMOD_LUT_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_SMOD_LUT_intfArgs[] = {
+ INTERFACE_LU128_Out,
+ INTERFACE_LU128_In
+};
+
+static xtensa_arg_internal Iclass_iclass_WRTBSIGQ_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_WRTBSIGQ_intfArgs[] = {
+ INTERFACE_SIGNALQ
+};
+
+static xtensa_arg_internal Iclass_iclass_WRTBSIGQM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper61_imm }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_WRTBSIGQM_intfArgs[] = {
+ INTERFACE_SIGNALQ
+};
+
+static xtensa_arg_internal Iclass_iclass_WRTIEP_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRTIEP_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRTSIGQ_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_WRTSIGQ_intfArgs[] = {
+ INTERFACE_SIGNALQ
+};
+
+static xtensa_arg_internal Iclass_iclass_ABS8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ABS8_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADD16_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper49_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADD16_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADD32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADD32_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAC_I2R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAC_I2R_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAC_R2I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAC_R2I_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDAR2_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDCM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDWRP_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ADDWRP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AND128_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_AND128_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ARGMAX8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ARGMAX8_stateArgs[] = {
+ { { STATE_MAX_REG }, 'm' },
+ { { STATE_ARG_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASL_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASL_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASL32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASL32_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASLACM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper58_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASLACM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASLM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASLM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASLM32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper58_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASLM32_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASR_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASR_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASR32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASR32_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASRAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASRAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASRM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_ASRM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_BITFEXT_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper60_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper61_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_BITFINS_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper61_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper60_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLB_C_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLB_C_stateArgs[] = {
+ { { STATE_SAR3 }, 'o' },
+ { { STATE_SAR2 }, 'o' },
+ { { STATE_SAR1 }, 'o' },
+ { { STATE_SAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLB_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLB_R_stateArgs[] = {
+ { { STATE_SAR3 }, 'o' },
+ { { STATE_SAR2 }, 'o' },
+ { { STATE_SAR1 }, 'o' },
+ { { STATE_SAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMP8_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMP8_stateArgs[] = {
+ { { STATE_SOV }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMP_I_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMP_I_stateArgs[] = {
+ { { STATE_SOV }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMP_R_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper47_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CMP_R_stateArgs[] = {
+ { { STATE_SOV }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper68_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper69_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper68_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper69_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT_R_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT32_I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT32_I_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT32_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper48_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXT32_R_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXTUI4_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_EXTUI4_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LSLM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LSLM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LSRM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper59_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_LSRM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MAX8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MAX8_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MEAN_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MEAN_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MEAN32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MEAN32_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MIN8_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MIN8_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MINCLB_C_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MINCLB_C_stateArgs[] = {
+ { { STATE_HSAR3 }, 'm' },
+ { { STATE_HSAR2 }, 'm' },
+ { { STATE_HSAR1 }, 'm' },
+ { { STATE_HSAR0 }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MINCLB_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_MINCLB_R_stateArgs[] = {
+ { { STATE_HSAR3 }, 'm' },
+ { { STATE_HSAR2 }, 'm' },
+ { { STATE_HSAR1 }, 'm' },
+ { { STATE_HSAR0 }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NOT128_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_NOT128_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_OR128_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_OR128_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PERM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper82_reg }, 'm' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_PERM_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDAC_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDAC_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDAC2_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDAC2_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDAC4_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDAC4_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDACS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_REDACS_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMINCLB_C_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMINCLB_C_stateArgs[] = {
+ { { STATE_HSAR3 }, 'o' },
+ { { STATE_HSAR2 }, 'o' },
+ { { STATE_HSAR1 }, 'o' },
+ { { STATE_HSAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMINCLB_R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SMINCLB_R_stateArgs[] = {
+ { { STATE_HSAR3 }, 'o' },
+ { { STATE_HSAR2 }, 'o' },
+ { { STATE_HSAR1 }, 'o' },
+ { { STATE_HSAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_STSWAPBM_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper89_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_STSWAPBMU_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper89_imm }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUB32_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper53_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUB32_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBAC_I2R_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBAC_I2R_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBAC_R2I_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper51_reg }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBAC_R2I_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBARX_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBCM_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBCM_stateArgs[] = {
+ { { STATE_SOV }, 'm' },
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBMEAN_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBMEAN_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBWRP_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SUBWRP_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_TRANS_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_TRANS_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_XOR128_args[] = {
+ { { OPERAND_dsp340050b49a6c_oper45_reg }, 'o' },
+ { { OPERAND_dsp340050b49a6c_oper55_reg }, 'i' },
+ { { OPERAND_dsp340050b49a6c_oper46_reg }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_XOR128_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sov_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sov_stateArgs[] = {
+ { { STATE_SOV }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sov_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sov_stateArgs[] = {
+ { { STATE_SOV }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_sat_mode_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sat_mode_stateArgs[] = {
+ { { STATE_SAT_MODE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sat_mode_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sat_mode_stateArgs[] = {
+ { { STATE_SAT_MODE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar0_stateArgs[] = {
+ { { STATE_SAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar0_stateArgs[] = {
+ { { STATE_SAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar1_stateArgs[] = {
+ { { STATE_SAR1 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar1_stateArgs[] = {
+ { { STATE_SAR1 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar2_stateArgs[] = {
+ { { STATE_SAR2 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar2_stateArgs[] = {
+ { { STATE_SAR2 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_sar3_stateArgs[] = {
+ { { STATE_SAR3 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_sar3_stateArgs[] = {
+ { { STATE_SAR3 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar0_stateArgs[] = {
+ { { STATE_HSAR0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar0_stateArgs[] = {
+ { { STATE_HSAR0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar1_stateArgs[] = {
+ { { STATE_HSAR1 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar1_stateArgs[] = {
+ { { STATE_HSAR1 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar2_stateArgs[] = {
+ { { STATE_HSAR2 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar2_stateArgs[] = {
+ { { STATE_HSAR2 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_hsar3_stateArgs[] = {
+ { { STATE_HSAR3 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_hsar3_stateArgs[] = {
+ { { STATE_HSAR3 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_0_stateArgs[] = {
+ { { STATE_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_0_stateArgs[] = {
+ { { STATE_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_1_stateArgs[] = {
+ { { STATE_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_1_stateArgs[] = {
+ { { STATE_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_2_stateArgs[] = {
+ { { STATE_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_2_stateArgs[] = {
+ { { STATE_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_max_reg_3_stateArgs[] = {
+ { { STATE_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_max_reg_3_stateArgs[] = {
+ { { STATE_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_0_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_0_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_1_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_1_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_2_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_2_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_arg_max_reg_3_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_arg_max_reg_3_stateArgs[] = {
+ { { STATE_ARG_MAX_REG }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_0_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_0_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_1_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_1_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_2_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_2_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_nco_counter_3_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_nco_counter_3_stateArgs[] = {
+ { { STATE_NCO_COUNTER }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_interp_ext_n_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_interp_ext_n_stateArgs[] = {
+ { { STATE_INTERP_EXT_N }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_interp_ext_n_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_interp_ext_n_stateArgs[] = {
+ { { STATE_INTERP_EXT_N }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_interp_ext_l_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_interp_ext_l_stateArgs[] = {
+ { { STATE_INTERP_EXT_L }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_interp_ext_l_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_interp_ext_l_stateArgs[] = {
+ { { STATE_INTERP_EXT_L }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_0_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_0_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_1_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_1_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_2_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_2_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_3_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_3_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_4_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_4_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_4_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_5_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_5_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_5_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_6_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_6_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_6_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_7_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_7_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_7_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_8_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_8_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_8_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_8_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_9_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_9_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_9_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_9_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_10_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_10_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_10_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_10_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_11_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_11_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_11_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_11_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_12_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_12_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_12_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_12_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_13_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_13_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_13_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_13_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_14_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_14_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_14_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_14_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_15_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_15_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_15_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_15_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_16_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_16_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_16_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_16_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_17_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_17_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_17_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_17_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_18_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_18_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_18_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_18_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_19_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_19_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_19_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_19_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_20_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_20_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_20_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_20_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_21_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_21_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_21_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_21_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_22_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_22_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_22_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_22_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_23_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_buf_23_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_23_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_buf_23_stateArgs[] = {
+ { { STATE_LLR_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_0_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_0_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_1_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_1_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_2_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_2_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_3_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_3_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_4_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_4_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_4_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_5_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_5_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_5_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_6_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_6_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_6_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_7_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_buf_7_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_buf_7_stateArgs[] = {
+ { { STATE_SMOD_BUF }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_weight_reg_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_weight_reg_stateArgs[] = {
+ { { STATE_WEIGHT_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_weight_reg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_weight_reg_stateArgs[] = {
+ { { STATE_WEIGHT_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_scale_reg_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_scale_reg_stateArgs[] = {
+ { { STATE_SCALE_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_scale_reg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_scale_reg_stateArgs[] = {
+ { { STATE_SCALE_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_pos_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_llr_pos_stateArgs[] = {
+ { { STATE_LLR_POS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_pos_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_llr_pos_stateArgs[] = {
+ { { STATE_LLR_POS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_pos_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_pos_stateArgs[] = {
+ { { STATE_SMOD_POS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_pos_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_pos_stateArgs[] = {
+ { { STATE_SMOD_POS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_perm_reg_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_perm_reg_stateArgs[] = {
+ { { STATE_PERM_REG }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_perm_reg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_perm_reg_stateArgs[] = {
+ { { STATE_PERM_REG }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_0_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_0_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_1_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_1_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_1_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_2_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_2_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_2_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_3_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_smod_offset_table_3_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_smod_offset_table_3_stateArgs[] = {
+ { { STATE_SMOD_OFFSET_TABLE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_phasor_n_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_phasor_n_stateArgs[] = {
+ { { STATE_PHASOR_N }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_phasor_n_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_phasor_n_stateArgs[] = {
+ { { STATE_PHASOR_N }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_phasor_offset_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_phasor_offset_stateArgs[] = {
+ { { STATE_PHASOR_OFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_phasor_offset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_phasor_offset_stateArgs[] = {
+ { { STATE_PHASOR_OFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 2, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 1, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 1, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 4, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 5, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 1, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 1, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 1, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 1, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 1, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 1, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_threadptr_args,
+ 1, Iclass_rur_threadptr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_threadptr_args,
+ 1, Iclass_wur_threadptr_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 2, Iclass_xt_iclass_l32r_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 6, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 2, Iclass_xt_iclass_rsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 2, Iclass_xt_iclass_wsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 2, Iclass_xt_iclass_xsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_176_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_176_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_208_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 6, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 6, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 6, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 1, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 1, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 1, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 1, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 1, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 1, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 1, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 1, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 1, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 1, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 1, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 1, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 1, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 1, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 1, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 1, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 1, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 1, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 1, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 1, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 1, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 1, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 1, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 1, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 1, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 1, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 1, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 1, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 1, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 1, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 1, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 1, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 1, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 1, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 1, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 1, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 1, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 1, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 1, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 1, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 1, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 1, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 1, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 1, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 1, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 1, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 1, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 1, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 1, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 1, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 1, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 1, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 1, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 1, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 1, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 1, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 1, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 2, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 1, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 1, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 1, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 1, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 1, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 18, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 1, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 1, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 2, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 2, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 1, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 1, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 1, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 1, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 2, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 2, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 1, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 2, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 2, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 1, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 2, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 2, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 1, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 2, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 2, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 2, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 2, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 2, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 1, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 2, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 2, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 1, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 1, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 1, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 1, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 2, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 2, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 9, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 1, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_bbool1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbranch_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bmove_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_RSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_WSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_XSR_BR_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 1, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 2, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 2, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 1, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 2, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 2, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 1, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 2, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 2, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_lock_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_lock_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 1, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 1, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_cpenable_args,
+ 1, Iclass_xt_iclass_rsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_cpenable_args,
+ 1, Iclass_xt_iclass_wsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_cpenable_args,
+ 1, Iclass_xt_iclass_xsr_cpenable_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 1, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 2, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 2, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rer */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_wer */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_fcr_args,
+ 9, Iclass_rur_fcr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_fcr_args,
+ 9, Iclass_wur_fcr_stateArgs, 0, 0 },
+ { 1, Iclass_rur_fsr_args,
+ 8, Iclass_rur_fsr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_fsr_args,
+ 8, Iclass_wur_fsr_stateArgs, 0, 0 },
+ { 3, Iclass_fp_args,
+ 2, Iclass_fp_stateArgs, 0, 0 },
+ { 3, Iclass_fp_mac_args,
+ 2, Iclass_fp_mac_stateArgs, 0, 0 },
+ { 3, Iclass_fp_cmov_args,
+ 1, Iclass_fp_cmov_stateArgs, 0, 0 },
+ { 3, Iclass_fp_mov_args,
+ 1, Iclass_fp_mov_stateArgs, 0, 0 },
+ { 2, Iclass_fp_mov2_args,
+ 1, Iclass_fp_mov2_stateArgs, 0, 0 },
+ { 3, Iclass_fp_cmp_args,
+ 1, Iclass_fp_cmp_stateArgs, 0, 0 },
+ { 3, Iclass_fp_float_args,
+ 2, Iclass_fp_float_stateArgs, 0, 0 },
+ { 3, Iclass_fp_int_args,
+ 1, Iclass_fp_int_stateArgs, 0, 0 },
+ { 2, Iclass_fp_rfr_args,
+ 1, Iclass_fp_rfr_stateArgs, 0, 0 },
+ { 2, Iclass_fp_wfr_args,
+ 1, Iclass_fp_wfr_stateArgs, 0, 0 },
+ { 3, Iclass_fp_lsi_args,
+ 1, Iclass_fp_lsi_stateArgs, 0, 0 },
+ { 3, Iclass_fp_lsiu_args,
+ 1, Iclass_fp_lsiu_stateArgs, 0, 0 },
+ { 3, Iclass_fp_lsx_args,
+ 1, Iclass_fp_lsx_stateArgs, 0, 0 },
+ { 3, Iclass_fp_lsxu_args,
+ 1, Iclass_fp_lsxu_stateArgs, 0, 0 },
+ { 3, Iclass_fp_ssi_args,
+ 1, Iclass_fp_ssi_stateArgs, 0, 0 },
+ { 3, Iclass_fp_ssiu_args,
+ 1, Iclass_fp_ssiu_stateArgs, 0, 0 },
+ { 3, Iclass_fp_ssx_args,
+ 1, Iclass_fp_ssx_stateArgs, 0, 0 },
+ { 3, Iclass_fp_ssxu_args,
+ 1, Iclass_fp_ssxu_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_ARGMAX_args,
+ 2, Iclass_iclass_GET_ARGMAX_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_HSAR_args,
+ 5, Iclass_iclass_GET_HSAR_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_HSAR2SAR_args,
+ 9, Iclass_iclass_GET_HSAR2SAR_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_INTERP_EXT_N_args,
+ 2, Iclass_iclass_GET_INTERP_EXT_N_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_INTERP_EXT_L_args,
+ 2, Iclass_iclass_GET_INTERP_EXT_L_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_GET_LLR_BUF_args,
+ 2, Iclass_iclass_GET_LLR_BUF_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_LLR_POS_args,
+ 2, Iclass_iclass_GET_LLR_POS_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_MAX_args,
+ 2, Iclass_iclass_GET_MAX_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_NCO_args,
+ 2, Iclass_iclass_GET_NCO_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_PERM_REG_args,
+ 2, Iclass_iclass_GET_PERM_REG_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_PHASOR_N_args,
+ 2, Iclass_iclass_GET_PHASOR_N_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_PHASOR_OFFSET_args,
+ 2, Iclass_iclass_GET_PHASOR_OFFSET_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_SAR_args,
+ 5, Iclass_iclass_GET_SAR_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_SCALE_REG_args,
+ 2, Iclass_iclass_GET_SCALE_REG_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_GET_SMOD_BUF_args,
+ 2, Iclass_iclass_GET_SMOD_BUF_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_SMOD_OFFSET_TABLE_args,
+ 2, Iclass_iclass_GET_SMOD_OFFSET_TABLE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_SMOD_POS_args,
+ 2, Iclass_iclass_GET_SMOD_POS_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_SOV_args,
+ 2, Iclass_iclass_GET_SOV_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_GET_WGHT_args,
+ 2, Iclass_iclass_GET_WGHT_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_ARGMAX_args,
+ 2, Iclass_iclass_SET_ARGMAX_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SET_EXT_REGS_args,
+ 3, Iclass_iclass_SET_EXT_REGS_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_HSAR_args,
+ 5, Iclass_iclass_SET_HSAR_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SET_LLR_BUF_args,
+ 2, Iclass_iclass_SET_LLR_BUF_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_LLR_POS_args,
+ 2, Iclass_iclass_SET_LLR_POS_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_MAX_args,
+ 2, Iclass_iclass_SET_MAX_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_NCO_args,
+ 2, Iclass_iclass_SET_NCO_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_PERM_REG_args,
+ 2, Iclass_iclass_SET_PERM_REG_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_PHASOR_N_args,
+ 2, Iclass_iclass_SET_PHASOR_N_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_PHASOR_OFFSET_args,
+ 2, Iclass_iclass_SET_PHASOR_OFFSET_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_SAR_args,
+ 5, Iclass_iclass_SET_SAR_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_SCALE_REG_args,
+ 2, Iclass_iclass_SET_SCALE_REG_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SET_SMOD_BUF_args,
+ 2, Iclass_iclass_SET_SMOD_BUF_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_SMOD_OFFSET_TABLE_args,
+ 2, Iclass_iclass_SET_SMOD_OFFSET_TABLE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_SMOD_POS_args,
+ 2, Iclass_iclass_SET_SMOD_POS_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_SOV_args,
+ 2, Iclass_iclass_SET_SOV_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SET_WGHT_args,
+ 2, Iclass_iclass_SET_WGHT_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_LAC2X32_args,
+ 1, Iclass_iclass_LAC2X32_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC2X64_0_args,
+ 1, Iclass_iclass_LAC2X64_0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC2X64_1_args,
+ 1, Iclass_iclass_LAC2X64_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC2X64_2_args,
+ 1, Iclass_iclass_LAC2X64_2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC2X64_3_args,
+ 1, Iclass_iclass_LAC2X64_3_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_LAC32_R_args,
+ 1, Iclass_iclass_LAC32_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC_IH_args,
+ 1, Iclass_iclass_LAC_IH_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC_IL_args,
+ 1, Iclass_iclass_LAC_IL_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC_RH_args,
+ 1, Iclass_iclass_LAC_RH_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LAC_RL_args,
+ 1, Iclass_iclass_LAC_RL_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LCM_args,
+ 1, Iclass_iclass_LCM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LCM_PINC_args,
+ 1, Iclass_iclass_LCM_PINC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LCM_PINC_X_args,
+ 1, Iclass_iclass_LCM_PINC_X_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LCM_U_args,
+ 1, Iclass_iclass_LCM_U_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LCM_X_args,
+ 1, Iclass_iclass_LCM_X_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LCM_XU_args,
+ 1, Iclass_iclass_LCM_XU_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LP_args,
+ 1, Iclass_iclass_LP_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LP_X_args,
+ 1, Iclass_iclass_LP_X_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LQ_args,
+ 1, Iclass_iclass_LQ_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LQ_X_args,
+ 1, Iclass_iclass_LQ_X_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LUT0_args,
+ 1, Iclass_iclass_LUT0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LUT1_args,
+ 1, Iclass_iclass_LUT1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LUT2_args,
+ 1, Iclass_iclass_LUT2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LUT3_args,
+ 1, Iclass_iclass_LUT3_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_SAC2X32_args,
+ 1, Iclass_iclass_SAC2X32_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC2X64_0_args,
+ 1, Iclass_iclass_SAC2X64_0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC2X64_1_args,
+ 1, Iclass_iclass_SAC2X64_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC2X64_2_args,
+ 1, Iclass_iclass_SAC2X64_2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC2X64_3_args,
+ 1, Iclass_iclass_SAC2X64_3_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_SAC32_R_args,
+ 1, Iclass_iclass_SAC32_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC_IH_args,
+ 1, Iclass_iclass_SAC_IH_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC_IL_args,
+ 1, Iclass_iclass_SAC_IL_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC_RH_args,
+ 1, Iclass_iclass_SAC_RH_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SAC_RL_args,
+ 1, Iclass_iclass_SAC_RL_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SCM_args,
+ 1, Iclass_iclass_SCM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SCM_PINC_args,
+ 1, Iclass_iclass_SCM_PINC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SCM_PINC_X_args,
+ 1, Iclass_iclass_SCM_PINC_X_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SCM_U_args,
+ 1, Iclass_iclass_SCM_U_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SCM_X_args,
+ 1, Iclass_iclass_SCM_X_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SCM_XU_args,
+ 1, Iclass_iclass_SCM_XU_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_STORE_P_args,
+ 1, Iclass_iclass_STORE_P_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_STORE_Q_args,
+ 1, Iclass_iclass_STORE_Q_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_AR2CM_DUP_args,
+ 1, Iclass_iclass_AR2CM_DUP_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_AR2CM_LN_args,
+ 1, Iclass_iclass_AR2CM_LN_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_AR2CM_LN_I_args,
+ 1, Iclass_iclass_AR2CM_LN_I_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_AR2CM_LN_R_args,
+ 1, Iclass_iclass_AR2CM_LN_R_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_AR2PQ_LN_args,
+ 1, Iclass_iclass_AR2PQ_LN_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_AR2SAR_DUP_args,
+ 5, Iclass_iclass_AR2SAR_DUP_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRAC_args,
+ 1, Iclass_iclass_CLRAC_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRCM_args,
+ 1, Iclass_iclass_CLRCM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CM2AR_LN_args,
+ 1, Iclass_iclass_CM2AR_LN_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CM2AR_LN_I_args,
+ 1, Iclass_iclass_CM2AR_LN_I_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CM2AR_LN_R_args,
+ 1, Iclass_iclass_CM2AR_LN_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_COMB_AR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_iclass_CONJ_args,
+ 1, Iclass_iclass_CONJ_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOV2AC32_I_args,
+ 1, Iclass_iclass_MOV2AC32_I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOV2AC32_R_args,
+ 1, Iclass_iclass_MOV2AC32_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOV2CM2PQ_args,
+ 1, Iclass_iclass_MOV2CM2PQ_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVAC_args,
+ 1, Iclass_iclass_MOVAC_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVAC_I_args,
+ 1, Iclass_iclass_MOVAC_I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVAC_I2R_args,
+ 1, Iclass_iclass_MOVAC_I2R_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVAC_R_args,
+ 1, Iclass_iclass_MOVAC_R_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVAC_R2I_args,
+ 1, Iclass_iclass_MOVAC_R2I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVAR2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_iclass_MOVCM_args,
+ 1, Iclass_iclass_MOVCM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCM2PQ_args,
+ 1, Iclass_iclass_MOVCM2PQ_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_0_args,
+ 1, Iclass_iclass_MOVCND_0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_1_args,
+ 1, Iclass_iclass_MOVCND_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_2_args,
+ 1, Iclass_iclass_MOVCND_2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_3_args,
+ 1, Iclass_iclass_MOVCND_3_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_4_args,
+ 1, Iclass_iclass_MOVCND_4_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_5_args,
+ 1, Iclass_iclass_MOVCND_5_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_6_args,
+ 1, Iclass_iclass_MOVCND_6_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND_7_args,
+ 1, Iclass_iclass_MOVCND_7_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_0_args,
+ 1, Iclass_iclass_MOVCND8_0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_1_args,
+ 1, Iclass_iclass_MOVCND8_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_2_args,
+ 1, Iclass_iclass_MOVCND8_2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_3_args,
+ 1, Iclass_iclass_MOVCND8_3_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_4_args,
+ 1, Iclass_iclass_MOVCND8_4_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_5_args,
+ 1, Iclass_iclass_MOVCND8_5_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_6_args,
+ 1, Iclass_iclass_MOVCND8_6_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOVCND8_7_args,
+ 1, Iclass_iclass_MOVCND8_7_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOV_I_args,
+ 1, Iclass_iclass_MOV_I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOVPQ2PQ_args,
+ 1, Iclass_iclass_MOVPQ2PQ_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MOV_R_args,
+ 1, Iclass_iclass_MOV_R_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_NEGCM_args,
+ 1, Iclass_iclass_NEGCM_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_POP16LLR_1_args,
+ 3, Iclass_iclass_POP16LLR_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_PQ2CM_args,
+ 1, Iclass_iclass_PQ2CM_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SWAPAC_R_args,
+ 1, Iclass_iclass_SWAPAC_R_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SWAPAC_RI_args,
+ 1, Iclass_iclass_SWAPAC_RI_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SWAPB_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_iclass_ADD2AC_args,
+ 6, Iclass_iclass_ADD2AC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ADDAC_args,
+ 3, Iclass_iclass_ADDAC_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_CDOT_args,
+ 2, Iclass_iclass_CDOT_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_CDOTAC_args,
+ 3, Iclass_iclass_CDOTAC_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_CDOTACS_args,
+ 3, Iclass_iclass_CDOTACS_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMAC_args,
+ 3, Iclass_iclass_CMAC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMACS_args,
+ 3, Iclass_iclass_CMACS_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMPY_args,
+ 2, Iclass_iclass_CMPY_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMPY2CM_args,
+ 3, Iclass_iclass_CMPY2CM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMPY2PQ_args,
+ 3, Iclass_iclass_CMPY2PQ_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMPYS_args,
+ 2, Iclass_iclass_CMPYS_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMPYXP2PQ_args,
+ 3, Iclass_iclass_CMPYXP2PQ_stateArgs, 0, 0 },
+ { 5, Iclass_iclass_COMB32_args,
+ 3, Iclass_iclass_COMB32_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_DOT_args,
+ 2, Iclass_iclass_DOT_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_DOTAC_args,
+ 3, Iclass_iclass_DOTAC_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_DOTACS_args,
+ 3, Iclass_iclass_DOTACS_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LIN_INT_args,
+ 6, Iclass_iclass_LIN_INT_stateArgs, 0, 0 },
+ { 6, Iclass_iclass_LLRPRE1_args,
+ 2, Iclass_iclass_LLRPRE1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LLRPRE2_args,
+ 3, Iclass_iclass_LLRPRE2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MAC_args,
+ 3, Iclass_iclass_MAC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MAC8_args,
+ 3, Iclass_iclass_MAC8_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACD8_args,
+ 3, Iclass_iclass_MACD8_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACPQXP_0_args,
+ 3, Iclass_iclass_MACPQXP_0_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACPQXP_1_args,
+ 3, Iclass_iclass_MACPQXP_1_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACPQXP_2_args,
+ 3, Iclass_iclass_MACPQXP_2_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACPQXP_3_args,
+ 3, Iclass_iclass_MACPQXP_3_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MACS_args,
+ 3, Iclass_iclass_MACS_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACXP2_0_args,
+ 3, Iclass_iclass_MACXP2_0_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MACXP2_1_args,
+ 3, Iclass_iclass_MACXP2_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MACXP_0_args,
+ 3, Iclass_iclass_MACXP_0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MACXP_1_args,
+ 3, Iclass_iclass_MACXP_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MACXP_2_args,
+ 3, Iclass_iclass_MACXP_2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MACXP_3_args,
+ 3, Iclass_iclass_MACXP_3_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MOV2AC_args,
+ 6, Iclass_iclass_MOV2AC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPY_args,
+ 2, Iclass_iclass_MPY_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPY2CM_args,
+ 3, Iclass_iclass_MPY2CM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPY2PQ_args,
+ 3, Iclass_iclass_MPY2PQ_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPY8_args,
+ 2, Iclass_iclass_MPY8_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYADD8_2CM_args,
+ 3, Iclass_iclass_MPYADD8_2CM_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYD8_args,
+ 2, Iclass_iclass_MPYD8_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYPQXP_0_args,
+ 2, Iclass_iclass_MPYPQXP_0_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYPQXP_1_args,
+ 2, Iclass_iclass_MPYPQXP_1_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYPQXP_2_args,
+ 2, Iclass_iclass_MPYPQXP_2_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYPQXP_3_args,
+ 2, Iclass_iclass_MPYPQXP_3_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYS_args,
+ 2, Iclass_iclass_MPYS_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYXP2PQ_args,
+ 3, Iclass_iclass_MPYXP2PQ_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYXP2_0_args,
+ 2, Iclass_iclass_MPYXP2_0_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_MPYXP2_1_args,
+ 2, Iclass_iclass_MPYXP2_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYXP_0_args,
+ 2, Iclass_iclass_MPYXP_0_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYXP_1_args,
+ 2, Iclass_iclass_MPYXP_1_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYXP_2_args,
+ 2, Iclass_iclass_MPYXP_2_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MPYXP_3_args,
+ 2, Iclass_iclass_MPYXP_3_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_NORMACD_args,
+ 3, Iclass_iclass_NORMACD_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_NORMACPQ_I_args,
+ 3, Iclass_iclass_NORMACPQ_I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_NORMACPQ_R_args,
+ 3, Iclass_iclass_NORMACPQ_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_NORMD_args,
+ 2, Iclass_iclass_NORMD_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_NORMPYPQ_I_args,
+ 2, Iclass_iclass_NORMPYPQ_I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_NORMPYPQ_R_args,
+ 2, Iclass_iclass_NORMPYPQ_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_RCMAC_args,
+ 3, Iclass_iclass_RCMAC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_RCMPY_args,
+ 2, Iclass_iclass_RCMPY_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_RCMPY2CM_args,
+ 1, Iclass_iclass_RCMPY2CM_stateArgs, 0, 0 },
+ { 6, Iclass_iclass_RFIR_args,
+ 3, Iclass_iclass_RFIR_stateArgs, 0, 0 },
+ { 6, Iclass_iclass_RFIRA_args,
+ 3, Iclass_iclass_RFIRA_stateArgs, 0, 0 },
+ { 5, Iclass_iclass_RFIRD_args,
+ 3, Iclass_iclass_RFIRD_stateArgs, 0, 0 },
+ { 5, Iclass_iclass_RFIRDA_args,
+ 3, Iclass_iclass_RFIRDA_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_RMAC_args,
+ 3, Iclass_iclass_RMAC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_RMPY_args,
+ 2, Iclass_iclass_RMPY_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_RMPY2CM_args,
+ 1, Iclass_iclass_RMPY2CM_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_SMOD_ALIGN_args,
+ 3, Iclass_iclass_SMOD_ALIGN_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SMOD_SCR_args,
+ 2, Iclass_iclass_SMOD_SCR_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SUB2AC_args,
+ 6, Iclass_iclass_SUB2AC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_WGHT32_args,
+ 3, Iclass_iclass_WGHT32_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRTIEP_args,
+ 1, Iclass_iclass_CLRTIEP_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_EXT_2FIFO_0_args,
+ 1, Iclass_iclass_EXT_2FIFO_0_stateArgs, 1, Iclass_iclass_EXT_2FIFO_0_intfArgs },
+ { 3, Iclass_iclass_EXT_2FIFO_1_args,
+ 1, Iclass_iclass_EXT_2FIFO_1_stateArgs, 1, Iclass_iclass_EXT_2FIFO_1_intfArgs },
+ { 3, Iclass_iclass_EXT_2FIFO_2_args,
+ 1, Iclass_iclass_EXT_2FIFO_2_stateArgs, 1, Iclass_iclass_EXT_2FIFO_2_intfArgs },
+ { 3, Iclass_iclass_EXT_2FIFO_3_args,
+ 1, Iclass_iclass_EXT_2FIFO_3_stateArgs, 1, Iclass_iclass_EXT_2FIFO_3_intfArgs },
+ { 3, Iclass_iclass_EXT_R2FIFO_0_args,
+ 1, Iclass_iclass_EXT_R2FIFO_0_stateArgs, 1, Iclass_iclass_EXT_R2FIFO_0_intfArgs },
+ { 3, Iclass_iclass_EXT_R2FIFO_1_args,
+ 1, Iclass_iclass_EXT_R2FIFO_1_stateArgs, 1, Iclass_iclass_EXT_R2FIFO_1_intfArgs },
+ { 3, Iclass_iclass_EXT_R2FIFO_2_args,
+ 1, Iclass_iclass_EXT_R2FIFO_2_stateArgs, 1, Iclass_iclass_EXT_R2FIFO_2_intfArgs },
+ { 3, Iclass_iclass_EXT_R2FIFO_3_args,
+ 1, Iclass_iclass_EXT_R2FIFO_3_stateArgs, 1, Iclass_iclass_EXT_R2FIFO_3_intfArgs },
+ { 3, Iclass_iclass_LUT_args,
+ 1, Iclass_iclass_LUT_stateArgs, 2, Iclass_iclass_LUT_intfArgs },
+ { 3, Iclass_iclass_LUT_AR_args,
+ 1, Iclass_iclass_LUT_AR_stateArgs, 2, Iclass_iclass_LUT_AR_intfArgs },
+ { 3, Iclass_iclass_LUT_IEXT_args,
+ 3, Iclass_iclass_LUT_IEXT_stateArgs, 2, Iclass_iclass_LUT_IEXT_intfArgs },
+ { 2, Iclass_iclass_LUT_PHASOR_args,
+ 3, Iclass_iclass_LUT_PHASOR_stateArgs, 2, Iclass_iclass_LUT_PHASOR_intfArgs },
+ { 3, Iclass_iclass_LUT_REXT_args,
+ 3, Iclass_iclass_LUT_REXT_stateArgs, 2, Iclass_iclass_LUT_REXT_intfArgs },
+ { 3, Iclass_iclass_LUT_WRITE_args,
+ 1, Iclass_iclass_LUT_WRITE_stateArgs, 2, Iclass_iclass_LUT_WRITE_intfArgs },
+ { 0, 0 /* iclass_MOVEQ128_0 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ128_0_intfArgs },
+ { 0, 0 /* iclass_MOVEQ128_1 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ128_1_intfArgs },
+ { 0, 0 /* iclass_MOVEQ128_2 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ128_2_intfArgs },
+ { 0, 0 /* iclass_MOVEQ128_3 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ128_3_intfArgs },
+ { 0, 0 /* iclass_MOVEQ128_4 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ128_4_intfArgs },
+ { 0, 0 /* iclass_MOVEQ128_5 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ128_5_intfArgs },
+ { 0, 0 /* iclass_MOVEQ32_0 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ32_0_intfArgs },
+ { 0, 0 /* iclass_MOVEQ32_1 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ32_1_intfArgs },
+ { 0, 0 /* iclass_MOVEQ32_2 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ32_2_intfArgs },
+ { 0, 0 /* iclass_MOVEQ32_3 */,
+ 0, 0, 2, Iclass_iclass_MOVEQ32_3_intfArgs },
+ { 2, Iclass_iclass_NCO_UPDATE_args,
+ 4, Iclass_iclass_NCO_UPDATE_stateArgs, 2, Iclass_iclass_NCO_UPDATE_intfArgs },
+ { 1, Iclass_iclass_POP128_0_args,
+ 1, Iclass_iclass_POP128_0_stateArgs, 1, Iclass_iclass_POP128_0_intfArgs },
+ { 1, Iclass_iclass_POP128_1_args,
+ 1, Iclass_iclass_POP128_1_stateArgs, 1, Iclass_iclass_POP128_1_intfArgs },
+ { 1, Iclass_iclass_POP128_2_args,
+ 1, Iclass_iclass_POP128_2_stateArgs, 1, Iclass_iclass_POP128_2_intfArgs },
+ { 1, Iclass_iclass_POP128_3_args,
+ 1, Iclass_iclass_POP128_3_stateArgs, 1, Iclass_iclass_POP128_3_intfArgs },
+ { 1, Iclass_iclass_POP128_4_args,
+ 1, Iclass_iclass_POP128_4_stateArgs, 1, Iclass_iclass_POP128_4_intfArgs },
+ { 1, Iclass_iclass_POP128_5_args,
+ 1, Iclass_iclass_POP128_5_stateArgs, 1, Iclass_iclass_POP128_5_intfArgs },
+ { 3, Iclass_iclass_POP128_2CMPQ_0_args,
+ 1, Iclass_iclass_POP128_2CMPQ_0_stateArgs, 1, Iclass_iclass_POP128_2CMPQ_0_intfArgs },
+ { 3, Iclass_iclass_POP128_2CMPQ_1_args,
+ 1, Iclass_iclass_POP128_2CMPQ_1_stateArgs, 1, Iclass_iclass_POP128_2CMPQ_1_intfArgs },
+ { 3, Iclass_iclass_POP128_2CMPQ_2_args,
+ 1, Iclass_iclass_POP128_2CMPQ_2_stateArgs, 1, Iclass_iclass_POP128_2CMPQ_2_intfArgs },
+ { 3, Iclass_iclass_POP128_2CMPQ_3_args,
+ 1, Iclass_iclass_POP128_2CMPQ_3_stateArgs, 1, Iclass_iclass_POP128_2CMPQ_3_intfArgs },
+ { 2, Iclass_iclass_POP128_2M_0_args,
+ 0, 0, 1, Iclass_iclass_POP128_2M_0_intfArgs },
+ { 2, Iclass_iclass_POP128_2M_1_args,
+ 0, 0, 1, Iclass_iclass_POP128_2M_1_intfArgs },
+ { 2, Iclass_iclass_POP128_2M_2_args,
+ 0, 0, 1, Iclass_iclass_POP128_2M_2_intfArgs },
+ { 2, Iclass_iclass_POP128_2M_3_args,
+ 0, 0, 1, Iclass_iclass_POP128_2M_3_intfArgs },
+ { 2, Iclass_iclass_POP128_2PQ_0_args,
+ 1, Iclass_iclass_POP128_2PQ_0_stateArgs, 1, Iclass_iclass_POP128_2PQ_0_intfArgs },
+ { 2, Iclass_iclass_POP128_2PQ_1_args,
+ 1, Iclass_iclass_POP128_2PQ_1_stateArgs, 1, Iclass_iclass_POP128_2PQ_1_intfArgs },
+ { 2, Iclass_iclass_POP128_2PQ_2_args,
+ 1, Iclass_iclass_POP128_2PQ_2_stateArgs, 1, Iclass_iclass_POP128_2PQ_2_intfArgs },
+ { 2, Iclass_iclass_POP128_2PQ_3_args,
+ 1, Iclass_iclass_POP128_2PQ_3_stateArgs, 1, Iclass_iclass_POP128_2PQ_3_intfArgs },
+ { 2, Iclass_iclass_POP128_2PQ_4_args,
+ 1, Iclass_iclass_POP128_2PQ_4_stateArgs, 1, Iclass_iclass_POP128_2PQ_4_intfArgs },
+ { 2, Iclass_iclass_POP128_2PQ_5_args,
+ 1, Iclass_iclass_POP128_2PQ_5_stateArgs, 1, Iclass_iclass_POP128_2PQ_5_intfArgs },
+ { 1, Iclass_iclass_POP2X128_2PQ_01_args,
+ 1, Iclass_iclass_POP2X128_2PQ_01_stateArgs, 2, Iclass_iclass_POP2X128_2PQ_01_intfArgs },
+ { 1, Iclass_iclass_POP2X128_2PQ_03_args,
+ 1, Iclass_iclass_POP2X128_2PQ_03_stateArgs, 2, Iclass_iclass_POP2X128_2PQ_03_intfArgs },
+ { 1, Iclass_iclass_POP2X128_2PQ_21_args,
+ 1, Iclass_iclass_POP2X128_2PQ_21_stateArgs, 2, Iclass_iclass_POP2X128_2PQ_21_intfArgs },
+ { 1, Iclass_iclass_POP2X128_2PQ_23_args,
+ 1, Iclass_iclass_POP2X128_2PQ_23_stateArgs, 2, Iclass_iclass_POP2X128_2PQ_23_intfArgs },
+ { 1, Iclass_iclass_POP32_0_args,
+ 0, 0, 1, Iclass_iclass_POP32_0_intfArgs },
+ { 1, Iclass_iclass_POP32_1_args,
+ 0, 0, 1, Iclass_iclass_POP32_1_intfArgs },
+ { 1, Iclass_iclass_POP32_2_args,
+ 0, 0, 1, Iclass_iclass_POP32_2_intfArgs },
+ { 1, Iclass_iclass_POP32_3_args,
+ 0, 0, 1, Iclass_iclass_POP32_3_intfArgs },
+ { 2, Iclass_iclass_PUSH128_args,
+ 1, Iclass_iclass_PUSH128_stateArgs, 12, Iclass_iclass_PUSH128_intfArgs },
+ { 3, Iclass_iclass_PUSH128_M_args,
+ 0, 0, 8, Iclass_iclass_PUSH128_M_intfArgs },
+ { 3, Iclass_iclass_PUSH128_PQ_args,
+ 1, Iclass_iclass_PUSH128_PQ_stateArgs, 12, Iclass_iclass_PUSH128_PQ_intfArgs },
+ { 3, Iclass_iclass_PUSH2X128_PQ_args,
+ 1, Iclass_iclass_PUSH2X128_PQ_stateArgs, 8, Iclass_iclass_PUSH2X128_PQ_intfArgs },
+ { 2, Iclass_iclass_PUSH32_args,
+ 0, 0, 12, Iclass_iclass_PUSH32_intfArgs },
+ { 2, Iclass_iclass_QREADY_args,
+ 0, 0, 23, Iclass_iclass_QREADY_intfArgs },
+ { 1, Iclass_iclass_RDTIEP_args,
+ 0, 0, 1, Iclass_iclass_RDTIEP_intfArgs },
+ { 1, Iclass_iclass_SETTIEP_args,
+ 1, Iclass_iclass_SETTIEP_stateArgs, 0, 0 },
+ { 5, Iclass_iclass_SMOD_LUT_args,
+ 2, Iclass_iclass_SMOD_LUT_stateArgs, 2, Iclass_iclass_SMOD_LUT_intfArgs },
+ { 1, Iclass_iclass_WRTBSIGQ_args,
+ 0, 0, 1, Iclass_iclass_WRTBSIGQ_intfArgs },
+ { 1, Iclass_iclass_WRTBSIGQM_args,
+ 0, 0, 1, Iclass_iclass_WRTBSIGQM_intfArgs },
+ { 2, Iclass_iclass_WRTIEP_args,
+ 1, Iclass_iclass_WRTIEP_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_WRTSIGQ_args,
+ 0, 0, 1, Iclass_iclass_WRTSIGQ_intfArgs },
+ { 2, Iclass_iclass_ABS8_args,
+ 1, Iclass_iclass_ABS8_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_ADD16_args,
+ 3, Iclass_iclass_ADD16_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ADD32_args,
+ 3, Iclass_iclass_ADD32_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_ADDAC_I2R_args,
+ 3, Iclass_iclass_ADDAC_I2R_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_ADDAC_R2I_args,
+ 3, Iclass_iclass_ADDAC_R2I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_ADDAR2_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_iclass_ADDCM_args,
+ 3, Iclass_iclass_ADDCM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ADDWRP_args,
+ 1, Iclass_iclass_ADDWRP_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_AND128_args,
+ 1, Iclass_iclass_AND128_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_ARGMAX8_args,
+ 3, Iclass_iclass_ARGMAX8_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ASL_args,
+ 3, Iclass_iclass_ASL_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ASL32_args,
+ 3, Iclass_iclass_ASL32_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_ASLACM_args,
+ 3, Iclass_iclass_ASLACM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ASLM_args,
+ 3, Iclass_iclass_ASLM_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_ASLM32_args,
+ 3, Iclass_iclass_ASLM32_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ASR_args,
+ 3, Iclass_iclass_ASR_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ASR32_args,
+ 3, Iclass_iclass_ASR32_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_ASRAC_args,
+ 3, Iclass_iclass_ASRAC_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_ASRM_args,
+ 3, Iclass_iclass_ASRM_stateArgs, 0, 0 },
+ { 4, Iclass_iclass_BITFEXT_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_iclass_BITFINS_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_iclass_CLB_C_args,
+ 5, Iclass_iclass_CLB_C_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_CLB_R_args,
+ 5, Iclass_iclass_CLB_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMP8_args,
+ 2, Iclass_iclass_CMP8_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMP_I_args,
+ 2, Iclass_iclass_CMP_I_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_CMP_R_args,
+ 2, Iclass_iclass_CMP_R_stateArgs, 0, 0 },
+ { 5, Iclass_iclass_EXT_args,
+ 6, Iclass_iclass_EXT_stateArgs, 0, 0 },
+ { 5, Iclass_iclass_EXT_R_args,
+ 6, Iclass_iclass_EXT_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_EXT32_I_args,
+ 2, Iclass_iclass_EXT32_I_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_EXT32_R_args,
+ 2, Iclass_iclass_EXT32_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_EXTUI4_args,
+ 1, Iclass_iclass_EXTUI4_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LSLM_args,
+ 1, Iclass_iclass_LSLM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_LSRM_args,
+ 1, Iclass_iclass_LSRM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MAX8_args,
+ 1, Iclass_iclass_MAX8_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MEAN_args,
+ 1, Iclass_iclass_MEAN_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MEAN32_args,
+ 1, Iclass_iclass_MEAN32_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_MIN8_args,
+ 1, Iclass_iclass_MIN8_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MINCLB_C_args,
+ 5, Iclass_iclass_MINCLB_C_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_MINCLB_R_args,
+ 5, Iclass_iclass_MINCLB_R_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_NOT128_args,
+ 1, Iclass_iclass_NOT128_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_OR128_args,
+ 1, Iclass_iclass_OR128_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_PERM_args,
+ 1, Iclass_iclass_PERM_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_REDAC_args,
+ 3, Iclass_iclass_REDAC_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_REDAC2_args,
+ 3, Iclass_iclass_REDAC2_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_REDAC4_args,
+ 3, Iclass_iclass_REDAC4_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_REDACS_args,
+ 3, Iclass_iclass_REDACS_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SMINCLB_C_args,
+ 5, Iclass_iclass_SMINCLB_C_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SMINCLB_R_args,
+ 5, Iclass_iclass_SMINCLB_R_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_STSWAPBM_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_iclass_STSWAPBMU_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_iclass_SUB32_args,
+ 3, Iclass_iclass_SUB32_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SUBAC_I2R_args,
+ 3, Iclass_iclass_SUBAC_I2R_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_SUBAC_R2I_args,
+ 3, Iclass_iclass_SUBAC_R2I_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_SUBARX_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_iclass_SUBCM_args,
+ 3, Iclass_iclass_SUBCM_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SUBMEAN_args,
+ 1, Iclass_iclass_SUBMEAN_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_SUBWRP_args,
+ 1, Iclass_iclass_SUBWRP_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_TRANS_args,
+ 1, Iclass_iclass_TRANS_stateArgs, 0, 0 },
+ { 3, Iclass_iclass_XOR128_args,
+ 1, Iclass_iclass_XOR128_stateArgs, 0, 0 },
+ { 1, Iclass_rur_expstate_args,
+ 1, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 1, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_rur_sov_args,
+ 2, Iclass_rur_sov_stateArgs, 0, 0 },
+ { 1, Iclass_wur_sov_args,
+ 2, Iclass_wur_sov_stateArgs, 0, 0 },
+ { 1, Iclass_rur_sat_mode_args,
+ 2, Iclass_rur_sat_mode_stateArgs, 0, 0 },
+ { 1, Iclass_wur_sat_mode_args,
+ 2, Iclass_wur_sat_mode_stateArgs, 0, 0 },
+ { 1, Iclass_rur_sar0_args,
+ 2, Iclass_rur_sar0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_sar0_args,
+ 2, Iclass_wur_sar0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_sar1_args,
+ 2, Iclass_rur_sar1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_sar1_args,
+ 2, Iclass_wur_sar1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_sar2_args,
+ 2, Iclass_rur_sar2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_sar2_args,
+ 2, Iclass_wur_sar2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_sar3_args,
+ 2, Iclass_rur_sar3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_sar3_args,
+ 2, Iclass_wur_sar3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_hsar0_args,
+ 2, Iclass_rur_hsar0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_hsar0_args,
+ 2, Iclass_wur_hsar0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_hsar1_args,
+ 2, Iclass_rur_hsar1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_hsar1_args,
+ 2, Iclass_wur_hsar1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_hsar2_args,
+ 2, Iclass_rur_hsar2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_hsar2_args,
+ 2, Iclass_wur_hsar2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_hsar3_args,
+ 2, Iclass_rur_hsar3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_hsar3_args,
+ 2, Iclass_wur_hsar3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_max_reg_0_args,
+ 2, Iclass_rur_max_reg_0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_max_reg_0_args,
+ 2, Iclass_wur_max_reg_0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_max_reg_1_args,
+ 2, Iclass_rur_max_reg_1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_max_reg_1_args,
+ 2, Iclass_wur_max_reg_1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_max_reg_2_args,
+ 2, Iclass_rur_max_reg_2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_max_reg_2_args,
+ 2, Iclass_wur_max_reg_2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_max_reg_3_args,
+ 2, Iclass_rur_max_reg_3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_max_reg_3_args,
+ 2, Iclass_wur_max_reg_3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_arg_max_reg_0_args,
+ 2, Iclass_rur_arg_max_reg_0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_arg_max_reg_0_args,
+ 2, Iclass_wur_arg_max_reg_0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_arg_max_reg_1_args,
+ 2, Iclass_rur_arg_max_reg_1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_arg_max_reg_1_args,
+ 2, Iclass_wur_arg_max_reg_1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_arg_max_reg_2_args,
+ 2, Iclass_rur_arg_max_reg_2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_arg_max_reg_2_args,
+ 2, Iclass_wur_arg_max_reg_2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_arg_max_reg_3_args,
+ 2, Iclass_rur_arg_max_reg_3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_arg_max_reg_3_args,
+ 2, Iclass_wur_arg_max_reg_3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_nco_counter_0_args,
+ 2, Iclass_rur_nco_counter_0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_nco_counter_0_args,
+ 2, Iclass_wur_nco_counter_0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_nco_counter_1_args,
+ 2, Iclass_rur_nco_counter_1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_nco_counter_1_args,
+ 2, Iclass_wur_nco_counter_1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_nco_counter_2_args,
+ 2, Iclass_rur_nco_counter_2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_nco_counter_2_args,
+ 2, Iclass_wur_nco_counter_2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_nco_counter_3_args,
+ 2, Iclass_rur_nco_counter_3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_nco_counter_3_args,
+ 2, Iclass_wur_nco_counter_3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_interp_ext_n_args,
+ 2, Iclass_rur_interp_ext_n_stateArgs, 0, 0 },
+ { 1, Iclass_wur_interp_ext_n_args,
+ 2, Iclass_wur_interp_ext_n_stateArgs, 0, 0 },
+ { 1, Iclass_rur_interp_ext_l_args,
+ 2, Iclass_rur_interp_ext_l_stateArgs, 0, 0 },
+ { 1, Iclass_wur_interp_ext_l_args,
+ 2, Iclass_wur_interp_ext_l_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_0_args,
+ 2, Iclass_rur_llr_buf_0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_0_args,
+ 2, Iclass_wur_llr_buf_0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_1_args,
+ 2, Iclass_rur_llr_buf_1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_1_args,
+ 2, Iclass_wur_llr_buf_1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_2_args,
+ 2, Iclass_rur_llr_buf_2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_2_args,
+ 2, Iclass_wur_llr_buf_2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_3_args,
+ 2, Iclass_rur_llr_buf_3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_3_args,
+ 2, Iclass_wur_llr_buf_3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_4_args,
+ 2, Iclass_rur_llr_buf_4_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_4_args,
+ 2, Iclass_wur_llr_buf_4_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_5_args,
+ 2, Iclass_rur_llr_buf_5_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_5_args,
+ 2, Iclass_wur_llr_buf_5_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_6_args,
+ 2, Iclass_rur_llr_buf_6_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_6_args,
+ 2, Iclass_wur_llr_buf_6_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_7_args,
+ 2, Iclass_rur_llr_buf_7_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_7_args,
+ 2, Iclass_wur_llr_buf_7_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_8_args,
+ 2, Iclass_rur_llr_buf_8_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_8_args,
+ 2, Iclass_wur_llr_buf_8_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_9_args,
+ 2, Iclass_rur_llr_buf_9_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_9_args,
+ 2, Iclass_wur_llr_buf_9_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_10_args,
+ 2, Iclass_rur_llr_buf_10_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_10_args,
+ 2, Iclass_wur_llr_buf_10_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_11_args,
+ 2, Iclass_rur_llr_buf_11_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_11_args,
+ 2, Iclass_wur_llr_buf_11_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_12_args,
+ 2, Iclass_rur_llr_buf_12_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_12_args,
+ 2, Iclass_wur_llr_buf_12_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_13_args,
+ 2, Iclass_rur_llr_buf_13_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_13_args,
+ 2, Iclass_wur_llr_buf_13_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_14_args,
+ 2, Iclass_rur_llr_buf_14_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_14_args,
+ 2, Iclass_wur_llr_buf_14_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_15_args,
+ 2, Iclass_rur_llr_buf_15_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_15_args,
+ 2, Iclass_wur_llr_buf_15_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_16_args,
+ 2, Iclass_rur_llr_buf_16_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_16_args,
+ 2, Iclass_wur_llr_buf_16_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_17_args,
+ 2, Iclass_rur_llr_buf_17_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_17_args,
+ 2, Iclass_wur_llr_buf_17_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_18_args,
+ 2, Iclass_rur_llr_buf_18_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_18_args,
+ 2, Iclass_wur_llr_buf_18_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_19_args,
+ 2, Iclass_rur_llr_buf_19_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_19_args,
+ 2, Iclass_wur_llr_buf_19_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_20_args,
+ 2, Iclass_rur_llr_buf_20_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_20_args,
+ 2, Iclass_wur_llr_buf_20_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_21_args,
+ 2, Iclass_rur_llr_buf_21_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_21_args,
+ 2, Iclass_wur_llr_buf_21_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_22_args,
+ 2, Iclass_rur_llr_buf_22_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_22_args,
+ 2, Iclass_wur_llr_buf_22_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_buf_23_args,
+ 2, Iclass_rur_llr_buf_23_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_buf_23_args,
+ 2, Iclass_wur_llr_buf_23_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_0_args,
+ 2, Iclass_rur_smod_buf_0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_0_args,
+ 2, Iclass_wur_smod_buf_0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_1_args,
+ 2, Iclass_rur_smod_buf_1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_1_args,
+ 2, Iclass_wur_smod_buf_1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_2_args,
+ 2, Iclass_rur_smod_buf_2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_2_args,
+ 2, Iclass_wur_smod_buf_2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_3_args,
+ 2, Iclass_rur_smod_buf_3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_3_args,
+ 2, Iclass_wur_smod_buf_3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_4_args,
+ 2, Iclass_rur_smod_buf_4_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_4_args,
+ 2, Iclass_wur_smod_buf_4_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_5_args,
+ 2, Iclass_rur_smod_buf_5_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_5_args,
+ 2, Iclass_wur_smod_buf_5_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_6_args,
+ 2, Iclass_rur_smod_buf_6_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_6_args,
+ 2, Iclass_wur_smod_buf_6_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_buf_7_args,
+ 2, Iclass_rur_smod_buf_7_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_buf_7_args,
+ 2, Iclass_wur_smod_buf_7_stateArgs, 0, 0 },
+ { 1, Iclass_rur_weight_reg_args,
+ 2, Iclass_rur_weight_reg_stateArgs, 0, 0 },
+ { 1, Iclass_wur_weight_reg_args,
+ 2, Iclass_wur_weight_reg_stateArgs, 0, 0 },
+ { 1, Iclass_rur_scale_reg_args,
+ 2, Iclass_rur_scale_reg_stateArgs, 0, 0 },
+ { 1, Iclass_wur_scale_reg_args,
+ 2, Iclass_wur_scale_reg_stateArgs, 0, 0 },
+ { 1, Iclass_rur_llr_pos_args,
+ 2, Iclass_rur_llr_pos_stateArgs, 0, 0 },
+ { 1, Iclass_wur_llr_pos_args,
+ 2, Iclass_wur_llr_pos_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_pos_args,
+ 2, Iclass_rur_smod_pos_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_pos_args,
+ 2, Iclass_wur_smod_pos_stateArgs, 0, 0 },
+ { 1, Iclass_rur_perm_reg_args,
+ 2, Iclass_rur_perm_reg_stateArgs, 0, 0 },
+ { 1, Iclass_wur_perm_reg_args,
+ 2, Iclass_wur_perm_reg_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_offset_table_0_args,
+ 2, Iclass_rur_smod_offset_table_0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_offset_table_0_args,
+ 2, Iclass_wur_smod_offset_table_0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_offset_table_1_args,
+ 2, Iclass_rur_smod_offset_table_1_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_offset_table_1_args,
+ 2, Iclass_wur_smod_offset_table_1_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_offset_table_2_args,
+ 2, Iclass_rur_smod_offset_table_2_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_offset_table_2_args,
+ 2, Iclass_wur_smod_offset_table_2_stateArgs, 0, 0 },
+ { 1, Iclass_rur_smod_offset_table_3_args,
+ 2, Iclass_rur_smod_offset_table_3_stateArgs, 0, 0 },
+ { 1, Iclass_wur_smod_offset_table_3_args,
+ 2, Iclass_wur_smod_offset_table_3_stateArgs, 0, 0 },
+ { 1, Iclass_rur_phasor_n_args,
+ 2, Iclass_rur_phasor_n_stateArgs, 0, 0 },
+ { 1, Iclass_wur_phasor_n_args,
+ 2, Iclass_wur_phasor_n_stateArgs, 0, 0 },
+ { 1, Iclass_rur_phasor_offset_args,
+ 2, Iclass_rur_phasor_offset_stateArgs, 0, 0 },
+ { 1, Iclass_wur_phasor_offset_args,
+ 2, Iclass_wur_phasor_offset_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_rur_threadptr,
+ ICLASS_wur_threadptr,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_loop,
+ ICLASS_xt_iclass_loopz,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_lend,
+ ICLASS_xt_iclass_wsr_lend,
+ ICLASS_xt_iclass_xsr_lend,
+ ICLASS_xt_iclass_rsr_lcount,
+ ICLASS_xt_iclass_wsr_lcount,
+ ICLASS_xt_iclass_xsr_lcount,
+ ICLASS_xt_iclass_rsr_lbeg,
+ ICLASS_xt_iclass_wsr_lbeg,
+ ICLASS_xt_iclass_xsr_lbeg,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_litbase,
+ ICLASS_xt_iclass_wsr_litbase,
+ ICLASS_xt_iclass_xsr_litbase,
+ ICLASS_xt_iclass_rsr_176,
+ ICLASS_xt_iclass_wsr_176,
+ ICLASS_xt_iclass_rsr_208,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_epc3,
+ ICLASS_xt_iclass_wsr_epc3,
+ ICLASS_xt_iclass_xsr_epc3,
+ ICLASS_xt_iclass_rsr_excsave3,
+ ICLASS_xt_iclass_wsr_excsave3,
+ ICLASS_xt_iclass_xsr_excsave3,
+ ICLASS_xt_iclass_rsr_epc4,
+ ICLASS_xt_iclass_wsr_epc4,
+ ICLASS_xt_iclass_xsr_epc4,
+ ICLASS_xt_iclass_rsr_excsave4,
+ ICLASS_xt_iclass_wsr_excsave4,
+ ICLASS_xt_iclass_xsr_excsave4,
+ ICLASS_xt_iclass_rsr_epc5,
+ ICLASS_xt_iclass_wsr_epc5,
+ ICLASS_xt_iclass_xsr_epc5,
+ ICLASS_xt_iclass_rsr_excsave5,
+ ICLASS_xt_iclass_wsr_excsave5,
+ ICLASS_xt_iclass_xsr_excsave5,
+ ICLASS_xt_iclass_rsr_epc6,
+ ICLASS_xt_iclass_wsr_epc6,
+ ICLASS_xt_iclass_xsr_epc6,
+ ICLASS_xt_iclass_rsr_excsave6,
+ ICLASS_xt_iclass_wsr_excsave6,
+ ICLASS_xt_iclass_xsr_excsave6,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_eps3,
+ ICLASS_xt_iclass_wsr_eps3,
+ ICLASS_xt_iclass_xsr_eps3,
+ ICLASS_xt_iclass_rsr_eps4,
+ ICLASS_xt_iclass_wsr_eps4,
+ ICLASS_xt_iclass_xsr_eps4,
+ ICLASS_xt_iclass_rsr_eps5,
+ ICLASS_xt_iclass_wsr_eps5,
+ ICLASS_xt_iclass_xsr_eps5,
+ ICLASS_xt_iclass_rsr_eps6,
+ ICLASS_xt_iclass_wsr_eps6,
+ ICLASS_xt_iclass_xsr_eps6,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_mul16,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_dbreaka0,
+ ICLASS_xt_iclass_wsr_dbreaka0,
+ ICLASS_xt_iclass_xsr_dbreaka0,
+ ICLASS_xt_iclass_rsr_dbreakc0,
+ ICLASS_xt_iclass_wsr_dbreakc0,
+ ICLASS_xt_iclass_xsr_dbreakc0,
+ ICLASS_xt_iclass_rsr_dbreaka1,
+ ICLASS_xt_iclass_wsr_dbreaka1,
+ ICLASS_xt_iclass_xsr_dbreaka1,
+ ICLASS_xt_iclass_rsr_dbreakc1,
+ ICLASS_xt_iclass_wsr_dbreakc1,
+ ICLASS_xt_iclass_xsr_dbreakc1,
+ ICLASS_xt_iclass_rsr_ibreaka0,
+ ICLASS_xt_iclass_wsr_ibreaka0,
+ ICLASS_xt_iclass_xsr_ibreaka0,
+ ICLASS_xt_iclass_rsr_ibreaka1,
+ ICLASS_xt_iclass_wsr_ibreaka1,
+ ICLASS_xt_iclass_xsr_ibreaka1,
+ ICLASS_xt_iclass_rsr_ibreakenable,
+ ICLASS_xt_iclass_wsr_ibreakenable,
+ ICLASS_xt_iclass_xsr_ibreakenable,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_wsr_mmid,
+ ICLASS_xt_iclass_bbool1,
+ ICLASS_xt_iclass_bbool4,
+ ICLASS_xt_iclass_bbool8,
+ ICLASS_xt_iclass_bbranch,
+ ICLASS_xt_iclass_bmove,
+ ICLASS_xt_iclass_RSR_BR,
+ ICLASS_xt_iclass_WSR_BR,
+ ICLASS_xt_iclass_XSR_BR,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_icache,
+ ICLASS_xt_iclass_icache_lock,
+ ICLASS_xt_iclass_icache_inv,
+ ICLASS_xt_iclass_licx,
+ ICLASS_xt_iclass_sicx,
+ ICLASS_xt_iclass_dcache,
+ ICLASS_xt_iclass_dcache_ind,
+ ICLASS_xt_iclass_dcache_inv,
+ ICLASS_xt_iclass_dpf,
+ ICLASS_xt_iclass_dcache_lock,
+ ICLASS_xt_iclass_sdct,
+ ICLASS_xt_iclass_ldct,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_rsr_cpenable,
+ ICLASS_xt_iclass_wsr_cpenable,
+ ICLASS_xt_iclass_xsr_cpenable,
+ ICLASS_xt_iclass_clamp,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_fcr,
+ ICLASS_wur_fcr,
+ ICLASS_rur_fsr,
+ ICLASS_wur_fsr,
+ ICLASS_fp,
+ ICLASS_fp_mac,
+ ICLASS_fp_cmov,
+ ICLASS_fp_mov,
+ ICLASS_fp_mov2,
+ ICLASS_fp_cmp,
+ ICLASS_fp_float,
+ ICLASS_fp_int,
+ ICLASS_fp_rfr,
+ ICLASS_fp_wfr,
+ ICLASS_fp_lsi,
+ ICLASS_fp_lsiu,
+ ICLASS_fp_lsx,
+ ICLASS_fp_lsxu,
+ ICLASS_fp_ssi,
+ ICLASS_fp_ssiu,
+ ICLASS_fp_ssx,
+ ICLASS_fp_ssxu,
+ ICLASS_iclass_GET_ARGMAX,
+ ICLASS_iclass_GET_HSAR,
+ ICLASS_iclass_GET_HSAR2SAR,
+ ICLASS_iclass_GET_INTERP_EXT_N,
+ ICLASS_iclass_GET_INTERP_EXT_L,
+ ICLASS_iclass_GET_LLR_BUF,
+ ICLASS_iclass_GET_LLR_POS,
+ ICLASS_iclass_GET_MAX,
+ ICLASS_iclass_GET_NCO,
+ ICLASS_iclass_GET_PERM_REG,
+ ICLASS_iclass_GET_PHASOR_N,
+ ICLASS_iclass_GET_PHASOR_OFFSET,
+ ICLASS_iclass_GET_SAR,
+ ICLASS_iclass_GET_SCALE_REG,
+ ICLASS_iclass_GET_SMOD_BUF,
+ ICLASS_iclass_GET_SMOD_OFFSET_TABLE,
+ ICLASS_iclass_GET_SMOD_POS,
+ ICLASS_iclass_GET_SOV,
+ ICLASS_iclass_GET_WGHT,
+ ICLASS_iclass_SET_ARGMAX,
+ ICLASS_iclass_SET_EXT_REGS,
+ ICLASS_iclass_SET_HSAR,
+ ICLASS_iclass_SET_LLR_BUF,
+ ICLASS_iclass_SET_LLR_POS,
+ ICLASS_iclass_SET_MAX,
+ ICLASS_iclass_SET_NCO,
+ ICLASS_iclass_SET_PERM_REG,
+ ICLASS_iclass_SET_PHASOR_N,
+ ICLASS_iclass_SET_PHASOR_OFFSET,
+ ICLASS_iclass_SET_SAR,
+ ICLASS_iclass_SET_SCALE_REG,
+ ICLASS_iclass_SET_SMOD_BUF,
+ ICLASS_iclass_SET_SMOD_OFFSET_TABLE,
+ ICLASS_iclass_SET_SMOD_POS,
+ ICLASS_iclass_SET_SOV,
+ ICLASS_iclass_SET_WGHT,
+ ICLASS_iclass_LAC2X32,
+ ICLASS_iclass_LAC2X64_0,
+ ICLASS_iclass_LAC2X64_1,
+ ICLASS_iclass_LAC2X64_2,
+ ICLASS_iclass_LAC2X64_3,
+ ICLASS_iclass_LAC32_R,
+ ICLASS_iclass_LAC_IH,
+ ICLASS_iclass_LAC_IL,
+ ICLASS_iclass_LAC_RH,
+ ICLASS_iclass_LAC_RL,
+ ICLASS_iclass_LCM,
+ ICLASS_iclass_LCM_PINC,
+ ICLASS_iclass_LCM_PINC_X,
+ ICLASS_iclass_LCM_U,
+ ICLASS_iclass_LCM_X,
+ ICLASS_iclass_LCM_XU,
+ ICLASS_iclass_LP,
+ ICLASS_iclass_LP_X,
+ ICLASS_iclass_LQ,
+ ICLASS_iclass_LQ_X,
+ ICLASS_iclass_LUT0,
+ ICLASS_iclass_LUT1,
+ ICLASS_iclass_LUT2,
+ ICLASS_iclass_LUT3,
+ ICLASS_iclass_SAC2X32,
+ ICLASS_iclass_SAC2X64_0,
+ ICLASS_iclass_SAC2X64_1,
+ ICLASS_iclass_SAC2X64_2,
+ ICLASS_iclass_SAC2X64_3,
+ ICLASS_iclass_SAC32_R,
+ ICLASS_iclass_SAC_IH,
+ ICLASS_iclass_SAC_IL,
+ ICLASS_iclass_SAC_RH,
+ ICLASS_iclass_SAC_RL,
+ ICLASS_iclass_SCM,
+ ICLASS_iclass_SCM_PINC,
+ ICLASS_iclass_SCM_PINC_X,
+ ICLASS_iclass_SCM_U,
+ ICLASS_iclass_SCM_X,
+ ICLASS_iclass_SCM_XU,
+ ICLASS_iclass_STORE_P,
+ ICLASS_iclass_STORE_Q,
+ ICLASS_iclass_AR2CM_DUP,
+ ICLASS_iclass_AR2CM_LN,
+ ICLASS_iclass_AR2CM_LN_I,
+ ICLASS_iclass_AR2CM_LN_R,
+ ICLASS_iclass_AR2PQ_LN,
+ ICLASS_iclass_AR2SAR_DUP,
+ ICLASS_iclass_CLRAC,
+ ICLASS_iclass_CLRCM,
+ ICLASS_iclass_CM2AR_LN,
+ ICLASS_iclass_CM2AR_LN_I,
+ ICLASS_iclass_CM2AR_LN_R,
+ ICLASS_iclass_COMB_AR,
+ ICLASS_iclass_CONJ,
+ ICLASS_iclass_MOV2AC32_I,
+ ICLASS_iclass_MOV2AC32_R,
+ ICLASS_iclass_MOV2CM2PQ,
+ ICLASS_iclass_MOVAC,
+ ICLASS_iclass_MOVAC_I,
+ ICLASS_iclass_MOVAC_I2R,
+ ICLASS_iclass_MOVAC_R,
+ ICLASS_iclass_MOVAC_R2I,
+ ICLASS_iclass_MOVAR2,
+ ICLASS_iclass_MOVCM,
+ ICLASS_iclass_MOVCM2PQ,
+ ICLASS_iclass_MOVCND_0,
+ ICLASS_iclass_MOVCND_1,
+ ICLASS_iclass_MOVCND_2,
+ ICLASS_iclass_MOVCND_3,
+ ICLASS_iclass_MOVCND_4,
+ ICLASS_iclass_MOVCND_5,
+ ICLASS_iclass_MOVCND_6,
+ ICLASS_iclass_MOVCND_7,
+ ICLASS_iclass_MOVCND8_0,
+ ICLASS_iclass_MOVCND8_1,
+ ICLASS_iclass_MOVCND8_2,
+ ICLASS_iclass_MOVCND8_3,
+ ICLASS_iclass_MOVCND8_4,
+ ICLASS_iclass_MOVCND8_5,
+ ICLASS_iclass_MOVCND8_6,
+ ICLASS_iclass_MOVCND8_7,
+ ICLASS_iclass_MOV_I,
+ ICLASS_iclass_MOVPQ2PQ,
+ ICLASS_iclass_MOV_R,
+ ICLASS_iclass_NEGCM,
+ ICLASS_iclass_POP16LLR_1,
+ ICLASS_iclass_PQ2CM,
+ ICLASS_iclass_SWAPAC_R,
+ ICLASS_iclass_SWAPAC_RI,
+ ICLASS_iclass_SWAPB,
+ ICLASS_iclass_ADD2AC,
+ ICLASS_iclass_ADDAC,
+ ICLASS_iclass_CDOT,
+ ICLASS_iclass_CDOTAC,
+ ICLASS_iclass_CDOTACS,
+ ICLASS_iclass_CMAC,
+ ICLASS_iclass_CMACS,
+ ICLASS_iclass_CMPY,
+ ICLASS_iclass_CMPY2CM,
+ ICLASS_iclass_CMPY2PQ,
+ ICLASS_iclass_CMPYS,
+ ICLASS_iclass_CMPYXP2PQ,
+ ICLASS_iclass_COMB32,
+ ICLASS_iclass_DOT,
+ ICLASS_iclass_DOTAC,
+ ICLASS_iclass_DOTACS,
+ ICLASS_iclass_LIN_INT,
+ ICLASS_iclass_LLRPRE1,
+ ICLASS_iclass_LLRPRE2,
+ ICLASS_iclass_MAC,
+ ICLASS_iclass_MAC8,
+ ICLASS_iclass_MACD8,
+ ICLASS_iclass_MACPQXP_0,
+ ICLASS_iclass_MACPQXP_1,
+ ICLASS_iclass_MACPQXP_2,
+ ICLASS_iclass_MACPQXP_3,
+ ICLASS_iclass_MACS,
+ ICLASS_iclass_MACXP2_0,
+ ICLASS_iclass_MACXP2_1,
+ ICLASS_iclass_MACXP_0,
+ ICLASS_iclass_MACXP_1,
+ ICLASS_iclass_MACXP_2,
+ ICLASS_iclass_MACXP_3,
+ ICLASS_iclass_MOV2AC,
+ ICLASS_iclass_MPY,
+ ICLASS_iclass_MPY2CM,
+ ICLASS_iclass_MPY2PQ,
+ ICLASS_iclass_MPY8,
+ ICLASS_iclass_MPYADD8_2CM,
+ ICLASS_iclass_MPYD8,
+ ICLASS_iclass_MPYPQXP_0,
+ ICLASS_iclass_MPYPQXP_1,
+ ICLASS_iclass_MPYPQXP_2,
+ ICLASS_iclass_MPYPQXP_3,
+ ICLASS_iclass_MPYS,
+ ICLASS_iclass_MPYXP2PQ,
+ ICLASS_iclass_MPYXP2_0,
+ ICLASS_iclass_MPYXP2_1,
+ ICLASS_iclass_MPYXP_0,
+ ICLASS_iclass_MPYXP_1,
+ ICLASS_iclass_MPYXP_2,
+ ICLASS_iclass_MPYXP_3,
+ ICLASS_iclass_NORMACD,
+ ICLASS_iclass_NORMACPQ_I,
+ ICLASS_iclass_NORMACPQ_R,
+ ICLASS_iclass_NORMD,
+ ICLASS_iclass_NORMPYPQ_I,
+ ICLASS_iclass_NORMPYPQ_R,
+ ICLASS_iclass_RCMAC,
+ ICLASS_iclass_RCMPY,
+ ICLASS_iclass_RCMPY2CM,
+ ICLASS_iclass_RFIR,
+ ICLASS_iclass_RFIRA,
+ ICLASS_iclass_RFIRD,
+ ICLASS_iclass_RFIRDA,
+ ICLASS_iclass_RMAC,
+ ICLASS_iclass_RMPY,
+ ICLASS_iclass_RMPY2CM,
+ ICLASS_iclass_SMOD_ALIGN,
+ ICLASS_iclass_SMOD_SCR,
+ ICLASS_iclass_SUB2AC,
+ ICLASS_iclass_WGHT32,
+ ICLASS_iclass_CLRTIEP,
+ ICLASS_iclass_EXT_2FIFO_0,
+ ICLASS_iclass_EXT_2FIFO_1,
+ ICLASS_iclass_EXT_2FIFO_2,
+ ICLASS_iclass_EXT_2FIFO_3,
+ ICLASS_iclass_EXT_R2FIFO_0,
+ ICLASS_iclass_EXT_R2FIFO_1,
+ ICLASS_iclass_EXT_R2FIFO_2,
+ ICLASS_iclass_EXT_R2FIFO_3,
+ ICLASS_iclass_LUT,
+ ICLASS_iclass_LUT_AR,
+ ICLASS_iclass_LUT_IEXT,
+ ICLASS_iclass_LUT_PHASOR,
+ ICLASS_iclass_LUT_REXT,
+ ICLASS_iclass_LUT_WRITE,
+ ICLASS_iclass_MOVEQ128_0,
+ ICLASS_iclass_MOVEQ128_1,
+ ICLASS_iclass_MOVEQ128_2,
+ ICLASS_iclass_MOVEQ128_3,
+ ICLASS_iclass_MOVEQ128_4,
+ ICLASS_iclass_MOVEQ128_5,
+ ICLASS_iclass_MOVEQ32_0,
+ ICLASS_iclass_MOVEQ32_1,
+ ICLASS_iclass_MOVEQ32_2,
+ ICLASS_iclass_MOVEQ32_3,
+ ICLASS_iclass_NCO_UPDATE,
+ ICLASS_iclass_POP128_0,
+ ICLASS_iclass_POP128_1,
+ ICLASS_iclass_POP128_2,
+ ICLASS_iclass_POP128_3,
+ ICLASS_iclass_POP128_4,
+ ICLASS_iclass_POP128_5,
+ ICLASS_iclass_POP128_2CMPQ_0,
+ ICLASS_iclass_POP128_2CMPQ_1,
+ ICLASS_iclass_POP128_2CMPQ_2,
+ ICLASS_iclass_POP128_2CMPQ_3,
+ ICLASS_iclass_POP128_2M_0,
+ ICLASS_iclass_POP128_2M_1,
+ ICLASS_iclass_POP128_2M_2,
+ ICLASS_iclass_POP128_2M_3,
+ ICLASS_iclass_POP128_2PQ_0,
+ ICLASS_iclass_POP128_2PQ_1,
+ ICLASS_iclass_POP128_2PQ_2,
+ ICLASS_iclass_POP128_2PQ_3,
+ ICLASS_iclass_POP128_2PQ_4,
+ ICLASS_iclass_POP128_2PQ_5,
+ ICLASS_iclass_POP2X128_2PQ_01,
+ ICLASS_iclass_POP2X128_2PQ_03,
+ ICLASS_iclass_POP2X128_2PQ_21,
+ ICLASS_iclass_POP2X128_2PQ_23,
+ ICLASS_iclass_POP32_0,
+ ICLASS_iclass_POP32_1,
+ ICLASS_iclass_POP32_2,
+ ICLASS_iclass_POP32_3,
+ ICLASS_iclass_PUSH128,
+ ICLASS_iclass_PUSH128_M,
+ ICLASS_iclass_PUSH128_PQ,
+ ICLASS_iclass_PUSH2X128_PQ,
+ ICLASS_iclass_PUSH32,
+ ICLASS_iclass_QREADY,
+ ICLASS_iclass_RDTIEP,
+ ICLASS_iclass_SETTIEP,
+ ICLASS_iclass_SMOD_LUT,
+ ICLASS_iclass_WRTBSIGQ,
+ ICLASS_iclass_WRTBSIGQM,
+ ICLASS_iclass_WRTIEP,
+ ICLASS_iclass_WRTSIGQ,
+ ICLASS_iclass_ABS8,
+ ICLASS_iclass_ADD16,
+ ICLASS_iclass_ADD32,
+ ICLASS_iclass_ADDAC_I2R,
+ ICLASS_iclass_ADDAC_R2I,
+ ICLASS_iclass_ADDAR2,
+ ICLASS_iclass_ADDCM,
+ ICLASS_iclass_ADDWRP,
+ ICLASS_iclass_AND128,
+ ICLASS_iclass_ARGMAX8,
+ ICLASS_iclass_ASL,
+ ICLASS_iclass_ASL32,
+ ICLASS_iclass_ASLACM,
+ ICLASS_iclass_ASLM,
+ ICLASS_iclass_ASLM32,
+ ICLASS_iclass_ASR,
+ ICLASS_iclass_ASR32,
+ ICLASS_iclass_ASRAC,
+ ICLASS_iclass_ASRM,
+ ICLASS_iclass_BITFEXT,
+ ICLASS_iclass_BITFINS,
+ ICLASS_iclass_CLB_C,
+ ICLASS_iclass_CLB_R,
+ ICLASS_iclass_CMP8,
+ ICLASS_iclass_CMP_I,
+ ICLASS_iclass_CMP_R,
+ ICLASS_iclass_EXT,
+ ICLASS_iclass_EXT_R,
+ ICLASS_iclass_EXT32_I,
+ ICLASS_iclass_EXT32_R,
+ ICLASS_iclass_EXTUI4,
+ ICLASS_iclass_LSLM,
+ ICLASS_iclass_LSRM,
+ ICLASS_iclass_MAX8,
+ ICLASS_iclass_MEAN,
+ ICLASS_iclass_MEAN32,
+ ICLASS_iclass_MIN8,
+ ICLASS_iclass_MINCLB_C,
+ ICLASS_iclass_MINCLB_R,
+ ICLASS_iclass_NOT128,
+ ICLASS_iclass_OR128,
+ ICLASS_iclass_PERM,
+ ICLASS_iclass_REDAC,
+ ICLASS_iclass_REDAC2,
+ ICLASS_iclass_REDAC4,
+ ICLASS_iclass_REDACS,
+ ICLASS_iclass_SMINCLB_C,
+ ICLASS_iclass_SMINCLB_R,
+ ICLASS_iclass_STSWAPBM,
+ ICLASS_iclass_STSWAPBMU,
+ ICLASS_iclass_SUB32,
+ ICLASS_iclass_SUBAC_I2R,
+ ICLASS_iclass_SUBAC_R2I,
+ ICLASS_iclass_SUBARX,
+ ICLASS_iclass_SUBCM,
+ ICLASS_iclass_SUBMEAN,
+ ICLASS_iclass_SUBWRP,
+ ICLASS_iclass_TRANS,
+ ICLASS_iclass_XOR128,
+ ICLASS_rur_expstate,
+ ICLASS_wur_expstate,
+ ICLASS_rur_sov,
+ ICLASS_wur_sov,
+ ICLASS_rur_sat_mode,
+ ICLASS_wur_sat_mode,
+ ICLASS_rur_sar0,
+ ICLASS_wur_sar0,
+ ICLASS_rur_sar1,
+ ICLASS_wur_sar1,
+ ICLASS_rur_sar2,
+ ICLASS_wur_sar2,
+ ICLASS_rur_sar3,
+ ICLASS_wur_sar3,
+ ICLASS_rur_hsar0,
+ ICLASS_wur_hsar0,
+ ICLASS_rur_hsar1,
+ ICLASS_wur_hsar1,
+ ICLASS_rur_hsar2,
+ ICLASS_wur_hsar2,
+ ICLASS_rur_hsar3,
+ ICLASS_wur_hsar3,
+ ICLASS_rur_max_reg_0,
+ ICLASS_wur_max_reg_0,
+ ICLASS_rur_max_reg_1,
+ ICLASS_wur_max_reg_1,
+ ICLASS_rur_max_reg_2,
+ ICLASS_wur_max_reg_2,
+ ICLASS_rur_max_reg_3,
+ ICLASS_wur_max_reg_3,
+ ICLASS_rur_arg_max_reg_0,
+ ICLASS_wur_arg_max_reg_0,
+ ICLASS_rur_arg_max_reg_1,
+ ICLASS_wur_arg_max_reg_1,
+ ICLASS_rur_arg_max_reg_2,
+ ICLASS_wur_arg_max_reg_2,
+ ICLASS_rur_arg_max_reg_3,
+ ICLASS_wur_arg_max_reg_3,
+ ICLASS_rur_nco_counter_0,
+ ICLASS_wur_nco_counter_0,
+ ICLASS_rur_nco_counter_1,
+ ICLASS_wur_nco_counter_1,
+ ICLASS_rur_nco_counter_2,
+ ICLASS_wur_nco_counter_2,
+ ICLASS_rur_nco_counter_3,
+ ICLASS_wur_nco_counter_3,
+ ICLASS_rur_interp_ext_n,
+ ICLASS_wur_interp_ext_n,
+ ICLASS_rur_interp_ext_l,
+ ICLASS_wur_interp_ext_l,
+ ICLASS_rur_llr_buf_0,
+ ICLASS_wur_llr_buf_0,
+ ICLASS_rur_llr_buf_1,
+ ICLASS_wur_llr_buf_1,
+ ICLASS_rur_llr_buf_2,
+ ICLASS_wur_llr_buf_2,
+ ICLASS_rur_llr_buf_3,
+ ICLASS_wur_llr_buf_3,
+ ICLASS_rur_llr_buf_4,
+ ICLASS_wur_llr_buf_4,
+ ICLASS_rur_llr_buf_5,
+ ICLASS_wur_llr_buf_5,
+ ICLASS_rur_llr_buf_6,
+ ICLASS_wur_llr_buf_6,
+ ICLASS_rur_llr_buf_7,
+ ICLASS_wur_llr_buf_7,
+ ICLASS_rur_llr_buf_8,
+ ICLASS_wur_llr_buf_8,
+ ICLASS_rur_llr_buf_9,
+ ICLASS_wur_llr_buf_9,
+ ICLASS_rur_llr_buf_10,
+ ICLASS_wur_llr_buf_10,
+ ICLASS_rur_llr_buf_11,
+ ICLASS_wur_llr_buf_11,
+ ICLASS_rur_llr_buf_12,
+ ICLASS_wur_llr_buf_12,
+ ICLASS_rur_llr_buf_13,
+ ICLASS_wur_llr_buf_13,
+ ICLASS_rur_llr_buf_14,
+ ICLASS_wur_llr_buf_14,
+ ICLASS_rur_llr_buf_15,
+ ICLASS_wur_llr_buf_15,
+ ICLASS_rur_llr_buf_16,
+ ICLASS_wur_llr_buf_16,
+ ICLASS_rur_llr_buf_17,
+ ICLASS_wur_llr_buf_17,
+ ICLASS_rur_llr_buf_18,
+ ICLASS_wur_llr_buf_18,
+ ICLASS_rur_llr_buf_19,
+ ICLASS_wur_llr_buf_19,
+ ICLASS_rur_llr_buf_20,
+ ICLASS_wur_llr_buf_20,
+ ICLASS_rur_llr_buf_21,
+ ICLASS_wur_llr_buf_21,
+ ICLASS_rur_llr_buf_22,
+ ICLASS_wur_llr_buf_22,
+ ICLASS_rur_llr_buf_23,
+ ICLASS_wur_llr_buf_23,
+ ICLASS_rur_smod_buf_0,
+ ICLASS_wur_smod_buf_0,
+ ICLASS_rur_smod_buf_1,
+ ICLASS_wur_smod_buf_1,
+ ICLASS_rur_smod_buf_2,
+ ICLASS_wur_smod_buf_2,
+ ICLASS_rur_smod_buf_3,
+ ICLASS_wur_smod_buf_3,
+ ICLASS_rur_smod_buf_4,
+ ICLASS_wur_smod_buf_4,
+ ICLASS_rur_smod_buf_5,
+ ICLASS_wur_smod_buf_5,
+ ICLASS_rur_smod_buf_6,
+ ICLASS_wur_smod_buf_6,
+ ICLASS_rur_smod_buf_7,
+ ICLASS_wur_smod_buf_7,
+ ICLASS_rur_weight_reg,
+ ICLASS_wur_weight_reg,
+ ICLASS_rur_scale_reg,
+ ICLASS_wur_scale_reg,
+ ICLASS_rur_llr_pos,
+ ICLASS_wur_llr_pos,
+ ICLASS_rur_smod_pos,
+ ICLASS_wur_smod_pos,
+ ICLASS_rur_perm_reg,
+ ICLASS_wur_perm_reg,
+ ICLASS_rur_smod_offset_table_0,
+ ICLASS_wur_smod_offset_table_0,
+ ICLASS_rur_smod_offset_table_1,
+ ICLASS_wur_smod_offset_table_1,
+ ICLASS_rur_smod_offset_table_2,
+ ICLASS_wur_smod_offset_table_2,
+ ICLASS_rur_smod_offset_table_3,
+ ICLASS_wur_smod_offset_table_3,
+ ICLASS_rur_phasor_n,
+ ICLASS_wur_phasor_n,
+ ICLASS_rur_phasor_offset,
+ ICLASS_wur_phasor_offset
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_addi_n_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_addi_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30001;
+}
+
+static void
+Opcode_addi_n_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_addi_n_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_addi_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30001;
+}
+
+static void
+Opcode_addi_n_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_addi_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_addi_n_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_addi_n_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0002;
+}
+
+static void
+Opcode_addi_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f8001;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_beqz_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13000;
+}
+
+static void
+Opcode_beqz_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13000;
+}
+
+static void
+Opcode_beqz_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f001;
+}
+
+static void
+Opcode_beqz_n_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc05001;
+}
+
+static void
+Opcode_beqz_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d3001;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_bnez_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x23000;
+}
+
+static void
+Opcode_bnez_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x23000;
+}
+
+static void
+Opcode_bnez_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x52001;
+}
+
+static void
+Opcode_bnez_n_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc09001;
+}
+
+static void
+Opcode_bnez_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e3001;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_l32i_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c001;
+}
+
+static void
+Opcode_l32i_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c001;
+}
+
+static void
+Opcode_l32i_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c000;
+}
+
+static void
+Opcode_l32i_n_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_l32i_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40c000;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_mov_n_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ec00;
+}
+
+static void
+Opcode_mov_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd400;
+}
+
+static void
+Opcode_mov_n_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd803;
+}
+
+static void
+Opcode_mov_n_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e000;
+}
+
+static void
+Opcode_mov_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd400;
+}
+
+static void
+Opcode_mov_n_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_mov_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x53401;
+}
+
+static void
+Opcode_mov_n_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb403;
+}
+
+static void
+Opcode_mov_n_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x601401;
+}
+
+static void
+Opcode_mov_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c6c01;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_movi_n_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c000;
+}
+
+static void
+Opcode_movi_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42002;
+}
+
+static void
+Opcode_movi_n_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_movi_n_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_movi_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42002;
+}
+
+static void
+Opcode_movi_n_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_movi_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50001;
+}
+
+static void
+Opcode_movi_n_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8003;
+}
+
+static void
+Opcode_movi_n_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402001;
+}
+
+static void
+Opcode_movi_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_s32i_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20002;
+}
+
+static void
+Opcode_s32i_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20002;
+}
+
+static void
+Opcode_s32i_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_s32i_n_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_s32i_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a4000;
+}
+
+static void
+Opcode_rur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e70;
+}
+
+static void
+Opcode_wur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e700;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addi_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2001;
+}
+
+static void
+Opcode_addi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_addmi_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401;
+}
+
+static void
+Opcode_addmi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_add_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28001;
+}
+
+static void
+Opcode_add_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28001;
+}
+
+static void
+Opcode_add_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_add_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28000;
+}
+
+static void
+Opcode_add_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0002;
+}
+
+static void
+Opcode_add_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f0001;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_sub_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24000;
+}
+
+static void
+Opcode_sub_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48002;
+}
+
+static void
+Opcode_sub_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_sub_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24000;
+}
+
+static void
+Opcode_sub_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60002;
+}
+
+static void
+Opcode_sub_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_sub_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c001;
+}
+
+static void
+Opcode_sub_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8002;
+}
+
+static void
+Opcode_sub_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f0002;
+}
+
+static void
+Opcode_sub_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4ec000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c001;
+}
+
+static void
+Opcode_addx2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c001;
+}
+
+static void
+Opcode_addx2_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c000;
+}
+
+static void
+Opcode_addx2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd4002;
+}
+
+static void
+Opcode_addx2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3fc001;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx4_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34001;
+}
+
+static void
+Opcode_addx4_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34001;
+}
+
+static void
+Opcode_addx4_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34000;
+}
+
+static void
+Opcode_addx4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd8002;
+}
+
+static void
+Opcode_addx4_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_addx8_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38001;
+}
+
+static void
+Opcode_addx8_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38001;
+}
+
+static void
+Opcode_addx8_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_addx8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdc002;
+}
+
+static void
+Opcode_addx8_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50002;
+}
+
+static void
+Opcode_subx2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c002;
+}
+
+static void
+Opcode_subx2_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34001;
+}
+
+static void
+Opcode_subx2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ec002;
+}
+
+static void
+Opcode_subx2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f8000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx4_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60002;
+}
+
+static void
+Opcode_subx4_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54002;
+}
+
+static void
+Opcode_subx4_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38001;
+}
+
+static void
+Opcode_subx4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f4002;
+}
+
+static void
+Opcode_subx4_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4fc000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_subx8_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c002;
+}
+
+static void
+Opcode_subx8_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58002;
+}
+
+static void
+Opcode_subx8_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c001;
+}
+
+static void
+Opcode_subx8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f8002;
+}
+
+static void
+Opcode_subx8_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_and_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_and_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c001;
+}
+
+static void
+Opcode_and_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_and_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_and_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c001;
+}
+
+static void
+Opcode_and_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_and_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_and_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe4002;
+}
+
+static void
+Opcode_and_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_or_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c000;
+}
+
+static void
+Opcode_or_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8002;
+}
+
+static void
+Opcode_or_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_or_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c000;
+}
+
+static void
+Opcode_or_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8002;
+}
+
+static void
+Opcode_or_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_or_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_or_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d4002;
+}
+
+static void
+Opcode_or_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x494000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_xor_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28000;
+}
+
+static void
+Opcode_xor_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54002;
+}
+
+static void
+Opcode_xor_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_xor_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28000;
+}
+
+static void
+Opcode_xor_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c002;
+}
+
+static void
+Opcode_xor_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44001;
+}
+
+static void
+Opcode_xor_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_xor_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1fc002;
+}
+
+static void
+Opcode_xor_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x518000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_beqi_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_beqi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bnei_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800040;
+}
+
+static void
+Opcode_bnei_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc2;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_bgei_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_bgei_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_blti_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_blti_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x102;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbci_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x801;
+}
+
+static void
+Opcode_bbci_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bbsi_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1001;
+}
+
+static void
+Opcode_bbsi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bgeui_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_bgeui_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_bltui_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400040;
+}
+
+static void
+Opcode_bltui_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x202;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_beq_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800801;
+}
+
+static void
+Opcode_beq_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2800;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bne_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401001;
+}
+
+static void
+Opcode_bne_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_bge_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c01;
+}
+
+static void
+Opcode_bge_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_blt_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00801;
+}
+
+static void
+Opcode_blt_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_bgeu_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800c01;
+}
+
+static void
+Opcode_bgeu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c00;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bltu_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00c01;
+}
+
+static void
+Opcode_bltu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3800;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bany_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800401;
+}
+
+static void
+Opcode_bany_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1800;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_bnone_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x801001;
+}
+
+static void
+Opcode_bnone_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_ball_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400401;
+}
+
+static void
+Opcode_ball_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1400;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bnall_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1801;
+}
+
+static void
+Opcode_bnall_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c00;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbc_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00401;
+}
+
+static void
+Opcode_bbc_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c00;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_bbs_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400801;
+}
+
+static void
+Opcode_bbs_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2400;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_beqz_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80002;
+}
+
+static void
+Opcode_beqz_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80001;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_bnez_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400002;
+}
+
+static void
+Opcode_bnez_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0001;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bgez_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100002;
+}
+
+static void
+Opcode_bgez_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100001;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_bltz_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200002;
+}
+
+static void
+Opcode_bltz_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200001;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_extui_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_extui_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_extui_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_extui_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_extui_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_extui_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_extui_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16ui_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1003;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l16si_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x803;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2003;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l32r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400001;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_l8ui_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc03;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8076;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9076;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa076;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_movi_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800002;
+}
+
+static void
+Opcode_movi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c0001;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_moveqz_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_moveqz_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x74001;
+}
+
+static void
+Opcode_moveqz_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_moveqz_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x74001;
+}
+
+static void
+Opcode_moveqz_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x78000;
+}
+
+static void
+Opcode_moveqz_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c4002;
+}
+
+static void
+Opcode_moveqz_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x454000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_movnez_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18000;
+}
+
+static void
+Opcode_movnez_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_movnez_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18000;
+}
+
+static void
+Opcode_movnez_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_movnez_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_movnez_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e0002;
+}
+
+static void
+Opcode_movnez_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x474000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movltz_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000;
+}
+
+static void
+Opcode_movltz_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c001;
+}
+
+static void
+Opcode_movltz_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000;
+}
+
+static void
+Opcode_movltz_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c001;
+}
+
+static void
+Opcode_movltz_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_movltz_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d0002;
+}
+
+static void
+Opcode_movltz_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x470000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_movgez_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_movgez_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x78001;
+}
+
+static void
+Opcode_movgez_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_movgez_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x78001;
+}
+
+static void
+Opcode_movgez_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c000;
+}
+
+static void
+Opcode_movgez_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c8002;
+}
+
+static void
+Opcode_movgez_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x464000;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_neg_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34034;
+}
+
+static void
+Opcode_neg_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000c;
+}
+
+static void
+Opcode_neg_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x801f;
+}
+
+static void
+Opcode_neg_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34020;
+}
+
+static void
+Opcode_neg_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000c;
+}
+
+static void
+Opcode_neg_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_neg_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54035;
+}
+
+static void
+Opcode_neg_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc01f;
+}
+
+static void
+Opcode_neg_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14001a;
+}
+
+static void
+Opcode_neg_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000d;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_abs_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34030;
+}
+
+static void
+Opcode_abs_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c;
+}
+
+static void
+Opcode_abs_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34010;
+}
+
+static void
+Opcode_abs_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c;
+}
+
+static void
+Opcode_abs_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54021;
+}
+
+static void
+Opcode_abs_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140016;
+}
+
+static void
+Opcode_abs_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34000d;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_nop_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f64;
+}
+
+static void
+Opcode_nop_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc030;
+}
+
+static void
+Opcode_nop_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35430;
+}
+
+static void
+Opcode_nop_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xda5d4;
+}
+
+static void
+Opcode_nop_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0000;
+}
+
+static void
+Opcode_nop_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa42f;
+}
+
+static void
+Opcode_nop_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2eda4;
+}
+
+static void
+Opcode_nop_Slot_pq_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_nop_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35430;
+}
+
+static void
+Opcode_nop_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60;
+}
+
+static void
+Opcode_nop_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_nop_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e40;
+}
+
+static void
+Opcode_nop_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9add4;
+}
+
+static void
+Opcode_nop_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x202;
+}
+
+static void
+Opcode_nop_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b015;
+}
+
+static void
+Opcode_nop_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x168dd;
+}
+
+static void
+Opcode_nop_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400;
+}
+
+static void
+Opcode_nop_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc237;
+}
+
+static void
+Opcode_nop_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa50dc;
+}
+
+static void
+Opcode_nop_Slot_dual_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_nop_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec31;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s16i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1403;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s32i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1803;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_s8i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c03;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssr_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d740;
+}
+
+static void
+Opcode_ssr_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d740;
+}
+
+static void
+Opcode_ssr_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5df41;
+}
+
+static void
+Opcode_ssr_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc8c0;
+}
+
+static void
+Opcode_ssr_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ee801;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssl_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d6c0;
+}
+
+static void
+Opcode_ssl_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d6c0;
+}
+
+static void
+Opcode_ssl_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dec1;
+}
+
+static void
+Opcode_ssl_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc4c0;
+}
+
+static void
+Opcode_ssl_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e6c01;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssa8l_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_ssa8l_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_ssa8l_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5df01;
+}
+
+static void
+Opcode_ssa8l_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc0c0;
+}
+
+static void
+Opcode_ssa8l_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2debc1;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssa8b_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d680;
+}
+
+static void
+Opcode_ssa8b_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d680;
+}
+
+static void
+Opcode_ssa8b_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5de81;
+}
+
+static void
+Opcode_ssa8b_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00c0;
+}
+
+static void
+Opcode_ssa8b_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2deb81;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_ssai_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_ssai_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_ssai_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dc01;
+}
+
+static void
+Opcode_ssai_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480080;
+}
+
+static void
+Opcode_ssai_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6f81;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_sll_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_sll_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c002;
+}
+
+static void
+Opcode_sll_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_sll_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_sll_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c002;
+}
+
+static void
+Opcode_sll_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64001;
+}
+
+static void
+Opcode_sll_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_sll_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc40c0;
+}
+
+static void
+Opcode_sll_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40142;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_src_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38002;
+}
+
+static void
+Opcode_src_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38002;
+}
+
+static void
+Opcode_src_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28001;
+}
+
+static void
+Opcode_src_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e4002;
+}
+
+static void
+Opcode_src_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e0000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_srl_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5800c;
+}
+
+static void
+Opcode_srl_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5800c;
+}
+
+static void
+Opcode_srl_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58011;
+}
+
+static void
+Opcode_srl_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x148016;
+}
+
+static void
+Opcode_srl_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34c00d;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_sra_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34038;
+}
+
+static void
+Opcode_sra_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002c;
+}
+
+static void
+Opcode_sra_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x802b;
+}
+
+static void
+Opcode_sra_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34014;
+}
+
+static void
+Opcode_sra_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002c;
+}
+
+static void
+Opcode_sra_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5403d;
+}
+
+static void
+Opcode_sra_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc02b;
+}
+
+static void
+Opcode_sra_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x144016;
+}
+
+static void
+Opcode_sra_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34802d;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_slli_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_slli_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10003;
+}
+
+static void
+Opcode_slli_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_slli_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_slli_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10003;
+}
+
+static void
+Opcode_slli_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0002;
+}
+
+static void
+Opcode_slli_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d8001;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srai_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18001;
+}
+
+static void
+Opcode_srai_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18000;
+}
+
+static void
+Opcode_srai_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18001;
+}
+
+static void
+Opcode_srai_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18000;
+}
+
+static void
+Opcode_srai_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18000;
+}
+
+static void
+Opcode_srai_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc8002;
+}
+
+static void
+Opcode_srai_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e8001;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_srli_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c002;
+}
+
+static void
+Opcode_srli_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c002;
+}
+
+static void
+Opcode_srli_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30001;
+}
+
+static void
+Opcode_srli_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e8002;
+}
+
+static void
+Opcode_srli_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4cc000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30100;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130100;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610100;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130200;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610200;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130000;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30500;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130500;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610500;
+}
+
+static void
+Opcode_rsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_wsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b000;
+}
+
+static void
+Opcode_rsr_208_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b300;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b300;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b300;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d300;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d300;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d300;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b400;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b400;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b400;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d400;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d400;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b500;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b500;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b500;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d500;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d500;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b600;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b600;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b600;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d600;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d600;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c300;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c300;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c300;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c400;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c400;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c400;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c500;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c500;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c500;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c600;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c600;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c600;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39000;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139000;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619000;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a000;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a000;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a000;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39100;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139100;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619100;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a100;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a100;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a100;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138000;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618000;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138100;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618100;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136000;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616000;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135900;
+}
+
+static void
+Opcode_andb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_andb_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48001;
+}
+
+static void
+Opcode_andb_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48001;
+}
+
+static void
+Opcode_andb_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48000;
+}
+
+static void
+Opcode_andb_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe8002;
+}
+
+static void
+Opcode_andb_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_andbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x120000;
+}
+
+static void
+Opcode_orb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x220000;
+}
+
+static void
+Opcode_orb_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_orb_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_orb_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_orb_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d8002;
+}
+
+static void
+Opcode_orb_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x498000;
+}
+
+static void
+Opcode_orbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x320000;
+}
+
+static void
+Opcode_xorb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x420000;
+}
+
+static void
+Opcode_xorb_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58002;
+}
+
+static void
+Opcode_xorb_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64002;
+}
+
+static void
+Opcode_xorb_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48001;
+}
+
+static void
+Opcode_xorb_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240002;
+}
+
+static void
+Opcode_xorb_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x51c000;
+}
+
+static void
+Opcode_any4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_any4_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35400;
+}
+
+static void
+Opcode_any4_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35400;
+}
+
+static void
+Opcode_any4_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58421;
+}
+
+static void
+Opcode_any4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x481421;
+}
+
+static void
+Opcode_any4_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2cec21;
+}
+
+static void
+Opcode_all4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9000;
+}
+
+static void
+Opcode_any8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa000;
+}
+
+static void
+Opcode_any8_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d420;
+}
+
+static void
+Opcode_any8_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d420;
+}
+
+static void
+Opcode_any8_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58019;
+}
+
+static void
+Opcode_any8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x481419;
+}
+
+static void
+Opcode_any8_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2cec31;
+}
+
+static void
+Opcode_all8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_bf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x76;
+}
+
+static void
+Opcode_bt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1076;
+}
+
+static void
+Opcode_movf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc30000;
+}
+
+static void
+Opcode_movt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd30000;
+}
+
+static void
+Opcode_movt_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_movt_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_movt_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_movt_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1cc002;
+}
+
+static void
+Opcode_movt_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47c000;
+}
+
+static void
+Opcode_rsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30400;
+}
+
+static void
+Opcode_wsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130400;
+}
+
+static void
+Opcode_xsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610400;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70c2;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e2;
+}
+
+static void
+Opcode_ipfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70d2;
+}
+
+static void
+Opcode_ihu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270d2;
+}
+
+static void
+Opcode_iiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370d2;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f2;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf10000;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf12000;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11000;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13000;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7042;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7052;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47082;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x57082;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7062;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7072;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7002;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7012;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7022;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7032;
+}
+
+static void
+Opcode_dpfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7082;
+}
+
+static void
+Opcode_dhu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x27082;
+}
+
+static void
+Opcode_diu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x37082;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf19000;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf18000;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_rsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e000;
+}
+
+static void
+Opcode_wsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e000;
+}
+
+static void
+Opcode_xsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e000;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330000;
+}
+
+static void
+Opcode_clamps_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50001;
+}
+
+static void
+Opcode_clamps_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50001;
+}
+
+static void
+Opcode_clamps_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50000;
+}
+
+static void
+Opcode_clamps_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0002;
+}
+
+static void
+Opcode_clamps_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x440000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_min_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70001;
+}
+
+static void
+Opcode_min_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70001;
+}
+
+static void
+Opcode_min_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c000;
+}
+
+static void
+Opcode_min_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfc002;
+}
+
+static void
+Opcode_min_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x460000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_max_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64001;
+}
+
+static void
+Opcode_max_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64001;
+}
+
+static void
+Opcode_max_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68000;
+}
+
+static void
+Opcode_max_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf4002;
+}
+
+static void
+Opcode_max_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x448000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_minu_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c001;
+}
+
+static void
+Opcode_minu_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c001;
+}
+
+static void
+Opcode_minu_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x74000;
+}
+
+static void
+Opcode_minu_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c0002;
+}
+
+static void
+Opcode_minu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44c000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_maxu_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68001;
+}
+
+static void
+Opcode_maxu_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68001;
+}
+
+static void
+Opcode_maxu_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70000;
+}
+
+static void
+Opcode_maxu_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf8002;
+}
+
+static void
+Opcode_maxu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x450000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsa_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15400;
+}
+
+static void
+Opcode_nsa_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15400;
+}
+
+static void
+Opcode_nsa_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x53801;
+}
+
+static void
+Opcode_nsa_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c1401;
+}
+
+static void
+Opcode_nsa_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ce801;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_nsau_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25400;
+}
+
+static void
+Opcode_nsau_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25400;
+}
+
+static void
+Opcode_nsau_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x53c01;
+}
+
+static void
+Opcode_nsau_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c5401;
+}
+
+static void
+Opcode_nsau_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6801;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_sext_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34002;
+}
+
+static void
+Opcode_sext_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34002;
+}
+
+static void
+Opcode_sext_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24001;
+}
+
+static void
+Opcode_sext_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1dc002;
+}
+
+static void
+Opcode_sext_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d0000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36300;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136300;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616300;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406000;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407000;
+}
+
+static void
+Opcode_rur_fcr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e80;
+}
+
+static void
+Opcode_wur_fcr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e800;
+}
+
+static void
+Opcode_rur_fsr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e90;
+}
+
+static void
+Opcode_wur_fsr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e900;
+}
+
+static void
+Opcode_add_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0000;
+}
+
+static void
+Opcode_add_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f4001;
+}
+
+static void
+Opcode_sub_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0000;
+}
+
+static void
+Opcode_sub_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f4000;
+}
+
+static void
+Opcode_mul_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a0000;
+}
+
+static void
+Opcode_mul_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_madd_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0000;
+}
+
+static void
+Opcode_madd_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x444000;
+}
+
+static void
+Opcode_msub_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a0000;
+}
+
+static void
+Opcode_msub_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x488000;
+}
+
+static void
+Opcode_movf_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcb0000;
+}
+
+static void
+Opcode_movf_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x45c000;
+}
+
+static void
+Opcode_movt_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdb0000;
+}
+
+static void
+Opcode_movt_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x484000;
+}
+
+static void
+Opcode_moveqz_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8b0000;
+}
+
+static void
+Opcode_moveqz_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x458000;
+}
+
+static void
+Opcode_movnez_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9b0000;
+}
+
+static void
+Opcode_movnez_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x478000;
+}
+
+static void
+Opcode_movltz_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xab0000;
+}
+
+static void
+Opcode_movltz_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x46c000;
+}
+
+static void
+Opcode_movgez_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbb0000;
+}
+
+static void
+Opcode_movgez_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x468000;
+}
+
+static void
+Opcode_abs_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0010;
+}
+
+static void
+Opcode_abs_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x142;
+}
+
+static void
+Opcode_mov_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0000;
+}
+
+static void
+Opcode_mov_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4142;
+}
+
+static void
+Opcode_neg_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0060;
+}
+
+static void
+Opcode_neg_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8142;
+}
+
+static void
+Opcode_un_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0000;
+}
+
+static void
+Opcode_un_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_oeq_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b0000;
+}
+
+static void
+Opcode_oeq_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a0000;
+}
+
+static void
+Opcode_ueq_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b0000;
+}
+
+static void
+Opcode_ueq_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x510000;
+}
+
+static void
+Opcode_olt_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0000;
+}
+
+static void
+Opcode_olt_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48c000;
+}
+
+static void
+Opcode_ult_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b0000;
+}
+
+static void
+Opcode_ult_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580000;
+}
+
+static void
+Opcode_ole_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0000;
+}
+
+static void
+Opcode_ole_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c0000;
+}
+
+static void
+Opcode_ule_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b0000;
+}
+
+static void
+Opcode_ule_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x540000;
+}
+
+static void
+Opcode_float_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca0000;
+}
+
+static void
+Opcode_float_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500000;
+}
+
+static void
+Opcode_ufloat_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xda0000;
+}
+
+static void
+Opcode_ufloat_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x520000;
+}
+
+static void
+Opcode_round_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8a0000;
+}
+
+static void
+Opcode_round_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x49c000;
+}
+
+static void
+Opcode_ceil_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xba0000;
+}
+
+static void
+Opcode_ceil_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x420000;
+}
+
+static void
+Opcode_floor_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaa0000;
+}
+
+static void
+Opcode_floor_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_trunc_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9a0000;
+}
+
+static void
+Opcode_trunc_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x508000;
+}
+
+static void
+Opcode_utrunc_s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea0000;
+}
+
+static void
+Opcode_utrunc_s_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x514000;
+}
+
+static void
+Opcode_rfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0040;
+}
+
+static void
+Opcode_rfr_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20142;
+}
+
+static void
+Opcode_wfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfa0050;
+}
+
+static void
+Opcode_lsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_lsi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140001;
+}
+
+static void
+Opcode_lsiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8003;
+}
+
+static void
+Opcode_lsiu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180001;
+}
+
+static void
+Opcode_lsx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80000;
+}
+
+static void
+Opcode_lsxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180000;
+}
+
+static void
+Opcode_lsxu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_ssi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_ssi_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240001;
+}
+
+static void
+Opcode_ssiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc003;
+}
+
+static void
+Opcode_ssiu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280001;
+}
+
+static void
+Opcode_ssx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480000;
+}
+
+static void
+Opcode_ssx_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d4000;
+}
+
+static void
+Opcode_ssxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580000;
+}
+
+static void
+Opcode_ssxu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d8000;
+}
+
+static void
+Opcode_get_argmax_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b700;
+}
+
+static void
+Opcode_get_argmax_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3407c;
+}
+
+static void
+Opcode_get_argmax_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x181d4;
+}
+
+static void
+Opcode_get_argmax_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3401c;
+}
+
+static void
+Opcode_get_argmax_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8294;
+}
+
+static void
+Opcode_get_argmax_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdd;
+}
+
+static void
+Opcode_get_argmax_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x840ec;
+}
+
+static void
+Opcode_get_hsar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6ba00;
+}
+
+static void
+Opcode_get_hsar_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340bc;
+}
+
+static void
+Opcode_get_hsar_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d434;
+}
+
+static void
+Opcode_get_hsar_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x281d4;
+}
+
+static void
+Opcode_get_hsar_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x802f;
+}
+
+static void
+Opcode_get_hsar_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34058;
+}
+
+static void
+Opcode_get_hsar_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d434;
+}
+
+static void
+Opcode_get_hsar_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8314;
+}
+
+static void
+Opcode_get_hsar_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58415;
+}
+
+static void
+Opcode_get_hsar_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40d9;
+}
+
+static void
+Opcode_get_hsar_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc83b;
+}
+
+static void
+Opcode_get_hsar_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x880ec;
+}
+
+static void
+Opcode_get_hsar_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec05;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6bc00;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3413c;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d438;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x481d4;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x842f;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34098;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d438;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18254;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58815;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80d9;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd03b;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900ec;
+}
+
+static void
+Opcode_get_hsar2sar_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec09;
+}
+
+static void
+Opcode_get_interp_ext_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70b90;
+}
+
+static void
+Opcode_get_interp_ext_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6806c;
+}
+
+static void
+Opcode_get_interp_ext_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6806c;
+}
+
+static void
+Opcode_get_interp_ext_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64101;
+}
+
+static void
+Opcode_get_interp_ext_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3540ed;
+}
+
+static void
+Opcode_get_interp_ext_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70b80;
+}
+
+static void
+Opcode_get_interp_ext_l_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6802c;
+}
+
+static void
+Opcode_get_interp_ext_l_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6802c;
+}
+
+static void
+Opcode_get_interp_ext_l_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64081;
+}
+
+static void
+Opcode_get_interp_ext_l_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35406d;
+}
+
+static void
+Opcode_get_llr_buf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x462000;
+}
+
+static void
+Opcode_get_llr_buf_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d400;
+}
+
+static void
+Opcode_get_llr_buf_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d400;
+}
+
+static void
+Opcode_get_llr_buf_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58021;
+}
+
+static void
+Opcode_get_llr_buf_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000;
+}
+
+static void
+Opcode_get_llr_buf_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2cec01;
+}
+
+static void
+Opcode_get_llr_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70ba0;
+}
+
+static void
+Opcode_get_llr_pos_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680ac;
+}
+
+static void
+Opcode_get_llr_pos_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680ac;
+}
+
+static void
+Opcode_get_llr_pos_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64201;
+}
+
+static void
+Opcode_get_llr_pos_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc06f;
+}
+
+static void
+Opcode_get_llr_pos_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35416d;
+}
+
+static void
+Opcode_get_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6bb00;
+}
+
+static void
+Opcode_get_max_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3423c;
+}
+
+static void
+Opcode_get_max_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x881d4;
+}
+
+static void
+Opcode_get_max_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34118;
+}
+
+static void
+Opcode_get_max_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28254;
+}
+
+static void
+Opcode_get_max_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100d9;
+}
+
+static void
+Opcode_get_max_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00ec;
+}
+
+static void
+Opcode_get_nco_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6bd00;
+}
+
+static void
+Opcode_get_nco_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340fc;
+}
+
+static void
+Opcode_get_nco_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x381d4;
+}
+
+static void
+Opcode_get_nco_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34218;
+}
+
+static void
+Opcode_get_nco_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48254;
+}
+
+static void
+Opcode_get_nco_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200d9;
+}
+
+static void
+Opcode_get_nco_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c0ec;
+}
+
+static void
+Opcode_get_perm_reg_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6812c;
+}
+
+static void
+Opcode_get_perm_reg_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8037;
+}
+
+static void
+Opcode_get_perm_reg_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6812c;
+}
+
+static void
+Opcode_get_perm_reg_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64049;
+}
+
+static void
+Opcode_get_perm_reg_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0ef;
+}
+
+static void
+Opcode_get_perm_reg_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35426d;
+}
+
+static void
+Opcode_get_phasor_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70bc0;
+}
+
+static void
+Opcode_get_phasor_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6822c;
+}
+
+static void
+Opcode_get_phasor_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6822c;
+}
+
+static void
+Opcode_get_phasor_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64051;
+}
+
+static void
+Opcode_get_phasor_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3541ed;
+}
+
+static void
+Opcode_get_phasor_offset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70bb0;
+}
+
+static void
+Opcode_get_phasor_offset_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680ec;
+}
+
+static void
+Opcode_get_phasor_offset_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680ec;
+}
+
+static void
+Opcode_get_phasor_offset_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64061;
+}
+
+static void
+Opcode_get_phasor_offset_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3542ed;
+}
+
+static void
+Opcode_get_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6be00;
+}
+
+static void
+Opcode_get_sar_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3417c;
+}
+
+static void
+Opcode_get_sar_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d43c;
+}
+
+static void
+Opcode_get_sar_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x581d4;
+}
+
+static void
+Opcode_get_sar_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x882f;
+}
+
+static void
+Opcode_get_sar_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3405c;
+}
+
+static void
+Opcode_get_sar_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d43c;
+}
+
+static void
+Opcode_get_sar_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88254;
+}
+
+static void
+Opcode_get_sar_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x59015;
+}
+
+static void
+Opcode_get_sar_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40dd;
+}
+
+static void
+Opcode_get_sar_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe03b;
+}
+
+static void
+Opcode_get_sar_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x940ec;
+}
+
+static void
+Opcode_get_sar_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec11;
+}
+
+static void
+Opcode_get_scale_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70bd0;
+}
+
+static void
+Opcode_get_scale_reg_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6816c;
+}
+
+static void
+Opcode_get_scale_reg_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6816c;
+}
+
+static void
+Opcode_get_scale_reg_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6404d;
+}
+
+static void
+Opcode_get_scale_reg_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc16f;
+}
+
+static void
+Opcode_get_scale_reg_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35436d;
+}
+
+static void
+Opcode_get_smod_buf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b200;
+}
+
+static void
+Opcode_get_smod_buf_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d428;
+}
+
+static void
+Opcode_get_smod_buf_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d428;
+}
+
+static void
+Opcode_get_smod_buf_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58419;
+}
+
+static void
+Opcode_get_smod_buf_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc03b;
+}
+
+static void
+Opcode_get_smod_buf_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2cec39;
+}
+
+static void
+Opcode_get_smod_offset_table_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6bf00;
+}
+
+static void
+Opcode_get_smod_offset_table_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35410;
+}
+
+static void
+Opcode_get_smod_offset_table_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35410;
+}
+
+static void
+Opcode_get_smod_offset_table_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a015;
+}
+
+static void
+Opcode_get_smod_offset_table_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc43f;
+}
+
+static void
+Opcode_get_smod_offset_table_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec21;
+}
+
+static void
+Opcode_get_smod_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70be0;
+}
+
+static void
+Opcode_get_smod_pos_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x681ac;
+}
+
+static void
+Opcode_get_smod_pos_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x681ac;
+}
+
+static void
+Opcode_get_smod_pos_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64055;
+}
+
+static void
+Opcode_get_smod_pos_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc26f;
+}
+
+static void
+Opcode_get_smod_pos_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3543ed;
+}
+
+static void
+Opcode_get_sov_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70bf0;
+}
+
+static void
+Opcode_get_sov_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x681ec;
+}
+
+static void
+Opcode_get_sov_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x681ec;
+}
+
+static void
+Opcode_get_sov_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64059;
+}
+
+static void
+Opcode_get_sov_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35802d;
+}
+
+static void
+Opcode_get_wght_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b00;
+}
+
+static void
+Opcode_get_wght_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6826c;
+}
+
+static void
+Opcode_get_wght_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6826c;
+}
+
+static void
+Opcode_get_wght_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6405d;
+}
+
+static void
+Opcode_get_wght_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc1ef;
+}
+
+static void
+Opcode_get_wght_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35804d;
+}
+
+static void
+Opcode_set_argmax_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b30;
+}
+
+static void
+Opcode_set_argmax_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400d9;
+}
+
+static void
+Opcode_set_argmax_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8254;
+}
+
+static void
+Opcode_set_argmax_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400d9;
+}
+
+static void
+Opcode_set_argmax_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85d4;
+}
+
+static void
+Opcode_set_argmax_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11d;
+}
+
+static void
+Opcode_set_argmax_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810d0;
+}
+
+static void
+Opcode_set_ext_regs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370b00;
+}
+
+static void
+Opcode_set_ext_regs_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32400;
+}
+
+static void
+Opcode_set_ext_regs_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10c01;
+}
+
+static void
+Opcode_set_ext_regs_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dc00;
+}
+
+static void
+Opcode_set_ext_regs_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12001;
+}
+
+static void
+Opcode_set_ext_regs_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_set_ext_regs_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e1401;
+}
+
+static void
+Opcode_set_hsar_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400dd;
+}
+
+static void
+Opcode_set_hsar_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8294;
+}
+
+static void
+Opcode_set_hsar_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400dd;
+}
+
+static void
+Opcode_set_hsar_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8dd4;
+}
+
+static void
+Opcode_set_hsar_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x159;
+}
+
+static void
+Opcode_set_hsar_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820d0;
+}
+
+static void
+Opcode_set_llr_buf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70300;
+}
+
+static void
+Opcode_set_llr_buf_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c202;
+}
+
+static void
+Opcode_set_llr_buf_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c202;
+}
+
+static void
+Opcode_set_llr_buf_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70001;
+}
+
+static void
+Opcode_set_llr_buf_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc02f;
+}
+
+static void
+Opcode_set_llr_buf_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35402d;
+}
+
+static void
+Opcode_set_llr_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1620d0;
+}
+
+static void
+Opcode_set_llr_pos_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d480;
+}
+
+static void
+Opcode_set_llr_pos_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d480;
+}
+
+static void
+Opcode_set_llr_pos_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dc81;
+}
+
+static void
+Opcode_set_llr_pos_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10200;
+}
+
+static void
+Opcode_set_llr_pos_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de941;
+}
+
+static void
+Opcode_set_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b50;
+}
+
+static void
+Opcode_set_max_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404d9;
+}
+
+static void
+Opcode_set_max_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8314;
+}
+
+static void
+Opcode_set_max_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404d9;
+}
+
+static void
+Opcode_set_max_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x95d4;
+}
+
+static void
+Opcode_set_max_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x199;
+}
+
+static void
+Opcode_set_max_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810d4;
+}
+
+static void
+Opcode_set_nco_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b60;
+}
+
+static void
+Opcode_set_nco_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408d9;
+}
+
+static void
+Opcode_set_nco_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8654;
+}
+
+static void
+Opcode_set_nco_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408d9;
+}
+
+static void
+Opcode_set_nco_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa5d4;
+}
+
+static void
+Opcode_set_nco_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x51d;
+}
+
+static void
+Opcode_set_nco_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810d8;
+}
+
+static void
+Opcode_set_perm_reg_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_set_perm_reg_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf403;
+}
+
+static void
+Opcode_set_perm_reg_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_set_perm_reg_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dd01;
+}
+
+static void
+Opcode_set_perm_reg_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10040;
+}
+
+static void
+Opcode_set_perm_reg_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de981;
+}
+
+static void
+Opcode_set_phasor_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1620e0;
+}
+
+static void
+Opcode_set_phasor_n_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_set_phasor_n_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_set_phasor_n_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5de01;
+}
+
+static void
+Opcode_set_phasor_n_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de9c1;
+}
+
+static void
+Opcode_set_phasor_offset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1630d0;
+}
+
+static void
+Opcode_set_phasor_offset_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d4c0;
+}
+
+static void
+Opcode_set_phasor_offset_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d4c0;
+}
+
+static void
+Opcode_set_phasor_offset_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dcc1;
+}
+
+static void
+Opcode_set_phasor_offset_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dea41;
+}
+
+static void
+Opcode_set_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b70;
+}
+
+static void
+Opcode_set_sar_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410d9;
+}
+
+static void
+Opcode_set_sar_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x683ac;
+}
+
+static void
+Opcode_set_sar_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8a54;
+}
+
+static void
+Opcode_set_sar_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8237;
+}
+
+static void
+Opcode_set_sar_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410d9;
+}
+
+static void
+Opcode_set_sar_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x683ac;
+}
+
+static void
+Opcode_set_sar_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9dd4;
+}
+
+static void
+Opcode_set_sar_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64075;
+}
+
+static void
+Opcode_set_sar_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x91d;
+}
+
+static void
+Opcode_set_sar_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0b7;
+}
+
+static void
+Opcode_set_sar_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x814d0;
+}
+
+static void
+Opcode_set_sar_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3581ad;
+}
+
+static void
+Opcode_set_scale_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1620f0;
+}
+
+static void
+Opcode_set_scale_reg_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d540;
+}
+
+static void
+Opcode_set_scale_reg_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d540;
+}
+
+static void
+Opcode_set_scale_reg_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dd41;
+}
+
+static void
+Opcode_set_scale_reg_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10080;
+}
+
+static void
+Opcode_set_scale_reg_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dea81;
+}
+
+static void
+Opcode_set_smod_buf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370e00;
+}
+
+static void
+Opcode_set_smod_buf_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c008;
+}
+
+static void
+Opcode_set_smod_buf_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c008;
+}
+
+static void
+Opcode_set_smod_buf_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64041;
+}
+
+static void
+Opcode_set_smod_buf_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc073;
+}
+
+static void
+Opcode_set_smod_buf_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x350039;
+}
+
+static void
+Opcode_set_smod_offset_table_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b90;
+}
+
+static void
+Opcode_set_smod_offset_table_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x683ec;
+}
+
+static void
+Opcode_set_smod_offset_table_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x683ec;
+}
+
+static void
+Opcode_set_smod_offset_table_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64079;
+}
+
+static void
+Opcode_set_smod_offset_table_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc137;
+}
+
+static void
+Opcode_set_smod_offset_table_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3582ad;
+}
+
+static void
+Opcode_set_smod_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1630e0;
+}
+
+static void
+Opcode_set_smod_pos_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d580;
+}
+
+static void
+Opcode_set_smod_pos_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d580;
+}
+
+static void
+Opcode_set_smod_pos_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5dd81;
+}
+
+static void
+Opcode_set_smod_pos_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10440;
+}
+
+static void
+Opcode_set_smod_pos_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2deb01;
+}
+
+static void
+Opcode_set_sov_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1630f0;
+}
+
+static void
+Opcode_set_sov_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d5c0;
+}
+
+static void
+Opcode_set_sov_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d5c0;
+}
+
+static void
+Opcode_set_sov_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5ddc1;
+}
+
+static void
+Opcode_set_sov_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2deac1;
+}
+
+static void
+Opcode_set_wght_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a010;
+}
+
+static void
+Opcode_set_wght_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d640;
+}
+
+static void
+Opcode_set_wght_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d640;
+}
+
+static void
+Opcode_set_wght_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5de41;
+}
+
+static void
+Opcode_set_wght_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10840;
+}
+
+static void
+Opcode_set_wght_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2deb41;
+}
+
+static void
+Opcode_lac2x32_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60000;
+}
+
+static void
+Opcode_lac2x32_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_lac2x32_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_lac2x32_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300001;
+}
+
+static void
+Opcode_lac2x64_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82003;
+}
+
+static void
+Opcode_lac2x64_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c00;
+}
+
+static void
+Opcode_lac2x64_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c00;
+}
+
+static void
+Opcode_lac2x64_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0c01;
+}
+
+static void
+Opcode_lac2x64_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32003;
+}
+
+static void
+Opcode_lac2x64_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c00;
+}
+
+static void
+Opcode_lac2x64_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c00;
+}
+
+static void
+Opcode_lac2x64_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c2801;
+}
+
+static void
+Opcode_lac2x64_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x132003;
+}
+
+static void
+Opcode_lac2x64_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1400;
+}
+
+static void
+Opcode_lac2x64_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1400;
+}
+
+static void
+Opcode_lac2x64_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c4c01;
+}
+
+static void
+Opcode_lac2x64_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x52003;
+}
+
+static void
+Opcode_lac2x64_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1800;
+}
+
+static void
+Opcode_lac2x64_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1800;
+}
+
+static void
+Opcode_lac2x64_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c2c01;
+}
+
+static void
+Opcode_lac32_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61000;
+}
+
+static void
+Opcode_lac32_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400;
+}
+
+static void
+Opcode_lac32_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400;
+}
+
+static void
+Opcode_lac32_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0001;
+}
+
+static void
+Opcode_lac_ih_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2003;
+}
+
+static void
+Opcode_lac_ih_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800;
+}
+
+static void
+Opcode_lac_ih_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800;
+}
+
+static void
+Opcode_lac_ih_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0401;
+}
+
+static void
+Opcode_lac_il_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12003;
+}
+
+static void
+Opcode_lac_il_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_lac_il_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_lac_il_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0801;
+}
+
+static void
+Opcode_lac_rh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x22003;
+}
+
+static void
+Opcode_lac_rh_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00;
+}
+
+static void
+Opcode_lac_rh_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00;
+}
+
+static void
+Opcode_lac_rh_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c1001;
+}
+
+static void
+Opcode_lac_rl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42003;
+}
+
+static void
+Opcode_lac_rl_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2800;
+}
+
+static void
+Opcode_lac_rl_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2800;
+}
+
+static void
+Opcode_lac_rl_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c2401;
+}
+
+static void
+Opcode_lcm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70300;
+}
+
+static void
+Opcode_lcm_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40008;
+}
+
+static void
+Opcode_lcm_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_lcm_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40008;
+}
+
+static void
+Opcode_lcm_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54000;
+}
+
+static void
+Opcode_lcm_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_lcm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340011;
+}
+
+static void
+Opcode_lcm_pinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70000;
+}
+
+static void
+Opcode_lcm_pinc_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_lcm_pinc_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_lcm_pinc_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_lcm_pinc_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340001;
+}
+
+static void
+Opcode_lcm_pinc_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680000;
+}
+
+static void
+Opcode_lcm_pinc_x_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54001;
+}
+
+static void
+Opcode_lcm_pinc_x_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54001;
+}
+
+static void
+Opcode_lcm_pinc_x_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58000;
+}
+
+static void
+Opcode_lcm_pinc_x_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x414000;
+}
+
+static void
+Opcode_lcm_u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70100;
+}
+
+static void
+Opcode_lcm_u_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40004;
+}
+
+static void
+Opcode_lcm_u_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_lcm_u_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40004;
+}
+
+static void
+Opcode_lcm_u_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_lcm_u_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_lcm_u_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380001;
+}
+
+static void
+Opcode_lcm_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x880000;
+}
+
+static void
+Opcode_lcm_x_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58001;
+}
+
+static void
+Opcode_lcm_x_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_lcm_x_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58001;
+}
+
+static void
+Opcode_lcm_x_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_lcm_x_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c000;
+}
+
+static void
+Opcode_lcm_x_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_lcm_x_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x418000;
+}
+
+static void
+Opcode_lcm_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa80000;
+}
+
+static void
+Opcode_lcm_xu_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c001;
+}
+
+static void
+Opcode_lcm_xu_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_lcm_xu_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c001;
+}
+
+static void
+Opcode_lcm_xu_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_lcm_xu_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64000;
+}
+
+static void
+Opcode_lcm_xu_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_lcm_xu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41c000;
+}
+
+static void
+Opcode_lp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70200;
+}
+
+static void
+Opcode_lp_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340005;
+}
+
+static void
+Opcode_lp_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc80000;
+}
+
+static void
+Opcode_lp_x_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x424000;
+}
+
+static void
+Opcode_lq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70400;
+}
+
+static void
+Opcode_lq_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340009;
+}
+
+static void
+Opcode_lq_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe80000;
+}
+
+static void
+Opcode_lq_x_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x428000;
+}
+
+static void
+Opcode_lut0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0000;
+}
+
+static void
+Opcode_lut0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42c000;
+}
+
+static void
+Opcode_lut1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d0000;
+}
+
+static void
+Opcode_lut1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x434000;
+}
+
+static void
+Opcode_lut2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d0000;
+}
+
+static void
+Opcode_lut2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x438000;
+}
+
+static void
+Opcode_lut3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0000;
+}
+
+static void
+Opcode_lut3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x43c000;
+}
+
+static void
+Opcode_sac2x32_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0000;
+}
+
+static void
+Opcode_sac2x32_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_sac2x32_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_sac2x32_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0001;
+}
+
+static void
+Opcode_sac2x64_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf88000;
+}
+
+static void
+Opcode_sac2x64_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c002;
+}
+
+static void
+Opcode_sac2x64_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68002;
+}
+
+static void
+Opcode_sac2x64_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x52a000;
+}
+
+static void
+Opcode_sac2x64_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8d8000;
+}
+
+static void
+Opcode_sac2x64_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5e002;
+}
+
+static void
+Opcode_sac2x64_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70002;
+}
+
+static void
+Opcode_sac2x64_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x52c000;
+}
+
+static void
+Opcode_sac2x64_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9d8000;
+}
+
+static void
+Opcode_sac2x64_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64002;
+}
+
+static void
+Opcode_sac2x64_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a002;
+}
+
+static void
+Opcode_sac2x64_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x52e000;
+}
+
+static void
+Opcode_sac2x64_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xac8000;
+}
+
+static void
+Opcode_sac2x64_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68002;
+}
+
+static void
+Opcode_sac2x64_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c002;
+}
+
+static void
+Opcode_sac2x64_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x532000;
+}
+
+static void
+Opcode_sac32_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0000;
+}
+
+static void
+Opcode_sac32_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_sac32_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_sac32_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c8001;
+}
+
+static void
+Opcode_sac_ih_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0000;
+}
+
+static void
+Opcode_sac_ih_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002;
+}
+
+static void
+Opcode_sac_ih_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002;
+}
+
+static void
+Opcode_sac_ih_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4a8000;
+}
+
+static void
+Opcode_sac_il_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d8000;
+}
+
+static void
+Opcode_sac_il_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18002;
+}
+
+static void
+Opcode_sac_il_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18002;
+}
+
+static void
+Opcode_sac_il_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b0000;
+}
+
+static void
+Opcode_sac_rh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8d0000;
+}
+
+static void
+Opcode_sac_rh_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a002;
+}
+
+static void
+Opcode_sac_rh_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a002;
+}
+
+static void
+Opcode_sac_rh_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4aa000;
+}
+
+static void
+Opcode_sac_rl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xac0000;
+}
+
+static void
+Opcode_sac_rl_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28002;
+}
+
+static void
+Opcode_sac_rl_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28002;
+}
+
+static void
+Opcode_sac_rl_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b2000;
+}
+
+static void
+Opcode_scm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5d0000;
+}
+
+static void
+Opcode_scm_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_scm_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_scm_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_scm_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_scm_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_scm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b8000;
+}
+
+static void
+Opcode_scm_pinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0000;
+}
+
+static void
+Opcode_scm_pinc_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_scm_pinc_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_scm_pinc_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_scm_pinc_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d0001;
+}
+
+static void
+Opcode_scm_pinc_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780000;
+}
+
+static void
+Opcode_scm_pinc_x_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14002;
+}
+
+static void
+Opcode_scm_pinc_x_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14002;
+}
+
+static void
+Opcode_scm_pinc_x_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14001;
+}
+
+static void
+Opcode_scm_pinc_x_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4bc000;
+}
+
+static void
+Opcode_scm_u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c0000;
+}
+
+static void
+Opcode_scm_u_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_scm_u_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_scm_u_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_scm_u_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_scm_u_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_scm_u_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e0001;
+}
+
+static void
+Opcode_scm_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x980000;
+}
+
+static void
+Opcode_scm_x_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30002;
+}
+
+static void
+Opcode_scm_x_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_scm_x_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30002;
+}
+
+static void
+Opcode_scm_x_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_scm_x_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18001;
+}
+
+static void
+Opcode_scm_x_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_scm_x_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c4000;
+}
+
+static void
+Opcode_scm_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb80000;
+}
+
+static void
+Opcode_scm_xu_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24002;
+}
+
+static void
+Opcode_scm_xu_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_scm_xu_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24002;
+}
+
+static void
+Opcode_scm_xu_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_scm_xu_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c001;
+}
+
+static void
+Opcode_scm_xu_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_scm_xu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c8000;
+}
+
+static void
+Opcode_store_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c0000;
+}
+
+static void
+Opcode_store_p_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48002;
+}
+
+static void
+Opcode_store_p_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4dc000;
+}
+
+static void
+Opcode_store_q_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc0000;
+}
+
+static void
+Opcode_store_q_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50002;
+}
+
+static void
+Opcode_store_q_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e4000;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a000;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c080;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5400;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5400;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x53001;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4102;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb003;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000c0;
+}
+
+static void
+Opcode_ar2cm_dup_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c6801;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64000;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e000;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c000;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c001;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa003;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc01001;
+}
+
+static void
+Opcode_ar2cm_ln_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c3001;
+}
+
+static void
+Opcode_ar2cm_ln_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c000;
+}
+
+static void
+Opcode_ar2cm_ln_i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_ar2cm_ln_i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_ar2cm_ln_i_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d001;
+}
+
+static void
+Opcode_ar2cm_ln_i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c7001;
+}
+
+static void
+Opcode_ar2cm_ln_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x164000;
+}
+
+static void
+Opcode_ar2cm_ln_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_ar2cm_ln_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_ar2cm_ln_r_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e001;
+}
+
+static void
+Opcode_ar2cm_ln_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2cb001;
+}
+
+static void
+Opcode_ar2pq_ln_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_ar2pq_ln_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_ar2pq_ln_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_ar2sar_dup_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32e00;
+}
+
+static void
+Opcode_ar2sar_dup_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12401;
+}
+
+static void
+Opcode_ar2sar_dup_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed00;
+}
+
+static void
+Opcode_ar2sar_dup_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11c01;
+}
+
+static void
+Opcode_ar2sar_dup_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4942;
+}
+
+static void
+Opcode_ar2sar_dup_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00c0;
+}
+
+static void
+Opcode_clrac_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a080;
+}
+
+static void
+Opcode_clrac_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d780;
+}
+
+static void
+Opcode_clrac_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf803;
+}
+
+static void
+Opcode_clrac_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d780;
+}
+
+static void
+Opcode_clrac_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f6801;
+}
+
+static void
+Opcode_clrcm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b600;
+}
+
+static void
+Opcode_clrcm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3403c;
+}
+
+static void
+Opcode_clrcm_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d430;
+}
+
+static void
+Opcode_clrcm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81d4;
+}
+
+static void
+Opcode_clrcm_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8033;
+}
+
+static void
+Opcode_clrcm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34018;
+}
+
+static void
+Opcode_clrcm_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d430;
+}
+
+static void
+Opcode_clrcm_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_clrcm_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b00;
+}
+
+static void
+Opcode_clrcm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8254;
+}
+
+static void
+Opcode_clrcm_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58015;
+}
+
+static void
+Opcode_clrcm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd9;
+}
+
+static void
+Opcode_clrcm_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc43b;
+}
+
+static void
+Opcode_clrcm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800ec;
+}
+
+static void
+Opcode_clrcm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec01;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbd0000;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34000;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40010;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc010;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8003;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34000;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40010;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc010;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54001;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8002;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc003;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140032;
+}
+
+static void
+Opcode_cm2ar_ln_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x344011;
+}
+
+static void
+Opcode_cm2ar_ln_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbd0400;
+}
+
+static void
+Opcode_cm2ar_ln_i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40030;
+}
+
+static void
+Opcode_cm2ar_ln_i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40030;
+}
+
+static void
+Opcode_cm2ar_ln_i_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58001;
+}
+
+static void
+Opcode_cm2ar_ln_i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x344031;
+}
+
+static void
+Opcode_cm2ar_ln_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbd0800;
+}
+
+static void
+Opcode_cm2ar_ln_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44010;
+}
+
+static void
+Opcode_cm2ar_ln_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44010;
+}
+
+static void
+Opcode_cm2ar_ln_r_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54011;
+}
+
+static void
+Opcode_cm2ar_ln_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34c011;
+}
+
+static void
+Opcode_comb_ar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c0000;
+}
+
+static void
+Opcode_comb_ar_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60001;
+}
+
+static void
+Opcode_comb_ar_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60001;
+}
+
+static void
+Opcode_comb_ar_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60000;
+}
+
+static void
+Opcode_comb_ar_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480000;
+}
+
+static void
+Opcode_conj_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x170300;
+}
+
+static void
+Opcode_conj_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001d;
+}
+
+static void
+Opcode_conj_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44008;
+}
+
+static void
+Opcode_conj_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8054;
+}
+
+static void
+Opcode_conj_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8013;
+}
+
+static void
+Opcode_conj_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001d;
+}
+
+static void
+Opcode_conj_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44008;
+}
+
+static void
+Opcode_conj_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_conj_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81;
+}
+
+static void
+Opcode_conj_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8054;
+}
+
+static void
+Opcode_conj_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54025;
+}
+
+static void
+Opcode_conj_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d;
+}
+
+static void
+Opcode_conj_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc013;
+}
+
+static void
+Opcode_conj_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800c4;
+}
+
+static void
+Opcode_conj_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34002d;
+}
+
+static void
+Opcode_mov2ac32_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdd0020;
+}
+
+static void
+Opcode_mov2ac32_i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c042;
+}
+
+static void
+Opcode_mov2ac32_i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c042;
+}
+
+static void
+Opcode_mov2ac32_i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100142;
+}
+
+static void
+Opcode_mov2ac32_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdd0040;
+}
+
+static void
+Opcode_mov2ac32_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c082;
+}
+
+static void
+Opcode_mov2ac32_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c082;
+}
+
+static void
+Opcode_mov2ac32_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200142;
+}
+
+static void
+Opcode_mov2cm2pq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x801;
+}
+
+static void
+Opcode_mov2cm2pq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0;
+}
+
+static void
+Opcode_mov2cm2pq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0;
+}
+
+static void
+Opcode_movac_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x970b80;
+}
+
+static void
+Opcode_movac_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a042;
+}
+
+static void
+Opcode_movac_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e202;
+}
+
+static void
+Opcode_movac_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc142;
+}
+
+static void
+Opcode_movac_i_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_movac_i2r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x978b80;
+}
+
+static void
+Opcode_movac_r_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_movac_r2i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70380;
+}
+
+static void
+Opcode_movar2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c100;
+}
+
+static void
+Opcode_movar2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80001;
+}
+
+static void
+Opcode_movar2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38200;
+}
+
+static void
+Opcode_movar2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80001;
+}
+
+static void
+Opcode_movar2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc402;
+}
+
+static void
+Opcode_movar2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00c0;
+}
+
+static void
+Opcode_movcm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x170b00;
+}
+
+static void
+Opcode_movcm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40099;
+}
+
+static void
+Opcode_movcm_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002c;
+}
+
+static void
+Opcode_movcm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8114;
+}
+
+static void
+Opcode_movcm_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8023;
+}
+
+static void
+Opcode_movcm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40099;
+}
+
+static void
+Opcode_movcm_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002c;
+}
+
+static void
+Opcode_movcm_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_movcm_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x101;
+}
+
+static void
+Opcode_movcm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8114;
+}
+
+static void
+Opcode_movcm_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54029;
+}
+
+static void
+Opcode_movcm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x99;
+}
+
+static void
+Opcode_movcm_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc023;
+}
+
+static void
+Opcode_movcm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800e0;
+}
+
+static void
+Opcode_movcm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34400d;
+}
+
+static void
+Opcode_movcm2pq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70600;
+}
+
+static void
+Opcode_movcm2pq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc02;
+}
+
+static void
+Opcode_movcm2pq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4181;
+}
+
+static void
+Opcode_movcm2pq_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_movcm2pq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4181;
+}
+
+static void
+Opcode_movcm2pq_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x182;
+}
+
+static void
+Opcode_movcnd_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3001;
+}
+
+static void
+Opcode_movcnd_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4240;
+}
+
+static void
+Opcode_movcnd_0_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2801;
+}
+
+static void
+Opcode_movcnd_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4240;
+}
+
+static void
+Opcode_movcnd_0_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c3;
+}
+
+static void
+Opcode_movcnd_0_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x281c01;
+}
+
+static void
+Opcode_movcnd_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c01;
+}
+
+static void
+Opcode_movcnd_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4280;
+}
+
+static void
+Opcode_movcnd_1_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3001;
+}
+
+static void
+Opcode_movcnd_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4280;
+}
+
+static void
+Opcode_movcnd_1_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x243;
+}
+
+static void
+Opcode_movcnd_1_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x301c01;
+}
+
+static void
+Opcode_movcnd_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3401;
+}
+
+static void
+Opcode_movcnd_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4300;
+}
+
+static void
+Opcode_movcnd_2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c01;
+}
+
+static void
+Opcode_movcnd_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4300;
+}
+
+static void
+Opcode_movcnd_2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x283;
+}
+
+static void
+Opcode_movcnd_2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c1c01;
+}
+
+static void
+Opcode_movcnd_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3801;
+}
+
+static void
+Opcode_movcnd_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42c0;
+}
+
+static void
+Opcode_movcnd_3_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3401;
+}
+
+static void
+Opcode_movcnd_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42c0;
+}
+
+static void
+Opcode_movcnd_3_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x303;
+}
+
+static void
+Opcode_movcnd_3_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x341c01;
+}
+
+static void
+Opcode_movcnd_4_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c01;
+}
+
+static void
+Opcode_movcnd_4_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4340;
+}
+
+static void
+Opcode_movcnd_4_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3801;
+}
+
+static void
+Opcode_movcnd_4_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4340;
+}
+
+static void
+Opcode_movcnd_4_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c3;
+}
+
+static void
+Opcode_movcnd_4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x381c01;
+}
+
+static void
+Opcode_movcnd_5_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_movcnd_5_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4380;
+}
+
+static void
+Opcode_movcnd_5_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c01;
+}
+
+static void
+Opcode_movcnd_5_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4380;
+}
+
+static void
+Opcode_movcnd_5_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x343;
+}
+
+static void
+Opcode_movcnd_5_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c1c01;
+}
+
+static void
+Opcode_movcnd_6_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402;
+}
+
+static void
+Opcode_movcnd_6_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x43c0;
+}
+
+static void
+Opcode_movcnd_6_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_movcnd_6_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x43c0;
+}
+
+static void
+Opcode_movcnd_6_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x383;
+}
+
+static void
+Opcode_movcnd_6_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401401;
+}
+
+static void
+Opcode_movcnd_7_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x802;
+}
+
+static void
+Opcode_movcnd_7_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_movcnd_7_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402;
+}
+
+static void
+Opcode_movcnd_7_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_movcnd_7_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c3;
+}
+
+static void
+Opcode_movcnd_7_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401801;
+}
+
+static void
+Opcode_movcnd8_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1001;
+}
+
+static void
+Opcode_movcnd8_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4040;
+}
+
+static void
+Opcode_movcnd8_0_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x801;
+}
+
+static void
+Opcode_movcnd8_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4040;
+}
+
+static void
+Opcode_movcnd8_0_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_movcnd8_0_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81c01;
+}
+
+static void
+Opcode_movcnd8_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2001;
+}
+
+static void
+Opcode_movcnd8_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4080;
+}
+
+static void
+Opcode_movcnd8_1_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1001;
+}
+
+static void
+Opcode_movcnd8_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4080;
+}
+
+static void
+Opcode_movcnd8_1_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x43;
+}
+
+static void
+Opcode_movcnd8_1_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x101c01;
+}
+
+static void
+Opcode_movcnd8_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc01;
+}
+
+static void
+Opcode_movcnd8_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4100;
+}
+
+static void
+Opcode_movcnd8_2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2001;
+}
+
+static void
+Opcode_movcnd8_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4100;
+}
+
+static void
+Opcode_movcnd8_2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83;
+}
+
+static void
+Opcode_movcnd8_2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x201c01;
+}
+
+static void
+Opcode_movcnd8_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1401;
+}
+
+static void
+Opcode_movcnd8_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4200;
+}
+
+static void
+Opcode_movcnd8_3_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc01;
+}
+
+static void
+Opcode_movcnd8_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4200;
+}
+
+static void
+Opcode_movcnd8_3_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x103;
+}
+
+static void
+Opcode_movcnd8_3_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc1c01;
+}
+
+static void
+Opcode_movcnd8_4_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1801;
+}
+
+static void
+Opcode_movcnd8_4_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40c0;
+}
+
+static void
+Opcode_movcnd8_4_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1401;
+}
+
+static void
+Opcode_movcnd8_4_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40c0;
+}
+
+static void
+Opcode_movcnd8_4_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x203;
+}
+
+static void
+Opcode_movcnd8_4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x141c01;
+}
+
+static void
+Opcode_movcnd8_5_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c01;
+}
+
+static void
+Opcode_movcnd8_5_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4140;
+}
+
+static void
+Opcode_movcnd8_5_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1801;
+}
+
+static void
+Opcode_movcnd8_5_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4140;
+}
+
+static void
+Opcode_movcnd8_5_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc3;
+}
+
+static void
+Opcode_movcnd8_5_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x181c01;
+}
+
+static void
+Opcode_movcnd8_6_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2401;
+}
+
+static void
+Opcode_movcnd8_6_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4180;
+}
+
+static void
+Opcode_movcnd8_6_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c01;
+}
+
+static void
+Opcode_movcnd8_6_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4180;
+}
+
+static void
+Opcode_movcnd8_6_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x143;
+}
+
+static void
+Opcode_movcnd8_6_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c1c01;
+}
+
+static void
+Opcode_movcnd8_7_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2801;
+}
+
+static void
+Opcode_movcnd8_7_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41c0;
+}
+
+static void
+Opcode_movcnd8_7_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2401;
+}
+
+static void
+Opcode_movcnd8_7_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41c0;
+}
+
+static void
+Opcode_movcnd8_7_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x183;
+}
+
+static void
+Opcode_movcnd8_7_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x241c01;
+}
+
+static void
+Opcode_mov_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370300;
+}
+
+static void
+Opcode_mov_i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4800c;
+}
+
+static void
+Opcode_mov_i_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8017;
+}
+
+static void
+Opcode_mov_i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4800c;
+}
+
+static void
+Opcode_mov_i_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54031;
+}
+
+static void
+Opcode_mov_i_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc017;
+}
+
+static void
+Opcode_mov_i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800d;
+}
+
+static void
+Opcode_movpq2pq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f400;
+}
+
+static void
+Opcode_movpq2pq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10801;
+}
+
+static void
+Opcode_movpq2pq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10801;
+}
+
+static void
+Opcode_mov_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x570300;
+}
+
+static void
+Opcode_mov_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000c;
+}
+
+static void
+Opcode_mov_r_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x801b;
+}
+
+static void
+Opcode_mov_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000c;
+}
+
+static void
+Opcode_mov_r_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5402d;
+}
+
+static void
+Opcode_mov_r_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc01b;
+}
+
+static void
+Opcode_mov_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35000d;
+}
+
+static void
+Opcode_negcm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x970300;
+}
+
+static void
+Opcode_negcm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40219;
+}
+
+static void
+Opcode_negcm_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4802c;
+}
+
+static void
+Opcode_negcm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80d4;
+}
+
+static void
+Opcode_negcm_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8027;
+}
+
+static void
+Opcode_negcm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40219;
+}
+
+static void
+Opcode_negcm_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4802c;
+}
+
+static void
+Opcode_negcm_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x21;
+}
+
+static void
+Opcode_negcm_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x201;
+}
+
+static void
+Opcode_negcm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80d4;
+}
+
+static void
+Opcode_negcm_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54039;
+}
+
+static void
+Opcode_negcm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x219;
+}
+
+static void
+Opcode_negcm_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc027;
+}
+
+static void
+Opcode_negcm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800e4;
+}
+
+static void
+Opcode_negcm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34402d;
+}
+
+static void
+Opcode_pop16llr_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a180;
+}
+
+static void
+Opcode_pop16llr_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d7a0;
+}
+
+static void
+Opcode_pop16llr_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d7a0;
+}
+
+static void
+Opcode_pop16llr_1_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11040;
+}
+
+static void
+Opcode_pop16llr_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e6c41;
+}
+
+static void
+Opcode_pq2cm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x62000;
+}
+
+static void
+Opcode_pq2cm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34028;
+}
+
+static void
+Opcode_pq2cm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88018;
+}
+
+static void
+Opcode_pq2cm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_pq2cm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88018;
+}
+
+static void
+Opcode_pq2cm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_pq2cm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x440080;
+}
+
+static void
+Opcode_swapac_r_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_swapac_ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70b00;
+}
+
+static void
+Opcode_swapac_ri_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a082;
+}
+
+static void
+Opcode_swapac_ri_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e142;
+}
+
+static void
+Opcode_swapac_ri_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc162;
+}
+
+static void
+Opcode_swapb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdd0010;
+}
+
+static void
+Opcode_swapb_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0c0;
+}
+
+static void
+Opcode_swapb_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11801;
+}
+
+static void
+Opcode_swapb_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38140;
+}
+
+static void
+Opcode_swapb_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11401;
+}
+
+static void
+Opcode_swapb_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd402;
+}
+
+static void
+Opcode_swapb_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc80c0;
+}
+
+static void
+Opcode_add2ac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_addac_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_cdot_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_cdotac_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_cdotacs_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_cmac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_cmac_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_cmacs_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_cmpy_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_cmpy_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_cmpy2cm_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_cmpy2cm_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_cmpy2pq_Slot_pq_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_cmpys_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2001;
+}
+
+static void
+Opcode_cmpyxp2pq_Slot_pq_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_comb32_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_dot_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_dotac_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_dotacs_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_lin_int_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_llrpre1_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_llrpre2_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_mac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_mac_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6001;
+}
+
+static void
+Opcode_mac8_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_macd8_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20002;
+}
+
+static void
+Opcode_macpqxp_0_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20003;
+}
+
+static void
+Opcode_macpqxp_1_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_macpqxp_2_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_macpqxp_3_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002;
+}
+
+static void
+Opcode_macs_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_macxp2_0_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40003;
+}
+
+static void
+Opcode_macxp2_1_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60000;
+}
+
+static void
+Opcode_macxp_0_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2003;
+}
+
+static void
+Opcode_macxp_1_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_macxp_2_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_macxp_3_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_mov2ac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc010;
+}
+
+static void
+Opcode_mpy_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6001;
+}
+
+static void
+Opcode_mpy_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa001;
+}
+
+static void
+Opcode_mpy2cm_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc003;
+}
+
+static void
+Opcode_mpy2cm_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_mpy2pq_Slot_pq_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_mpy8_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_mpyadd8_2cm_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_mpyadd8_2cm_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_mpyd8_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60001;
+}
+
+static void
+Opcode_mpypqxp_0_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60002;
+}
+
+static void
+Opcode_mpypqxp_1_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60003;
+}
+
+static void
+Opcode_mpypqxp_2_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80000;
+}
+
+static void
+Opcode_mpypqxp_3_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80001;
+}
+
+static void
+Opcode_mpys_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_mpyxp2pq_Slot_pq_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_mpyxp2_0_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80002;
+}
+
+static void
+Opcode_mpyxp2_1_Slot_dot_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80003;
+}
+
+static void
+Opcode_mpyxp_0_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_mpyxp_1_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6003;
+}
+
+static void
+Opcode_mpyxp_2_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_mpyxp_3_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa000;
+}
+
+static void
+Opcode_normacd_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_normacd_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_normacpq_i_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe000;
+}
+
+static void
+Opcode_normacpq_r_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe040;
+}
+
+static void
+Opcode_normd_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa001;
+}
+
+static void
+Opcode_normd_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe001;
+}
+
+static void
+Opcode_normpypq_i_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe080;
+}
+
+static void
+Opcode_normpypq_r_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe100;
+}
+
+static void
+Opcode_rcmac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8002;
+}
+
+static void
+Opcode_rcmpy_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_rcmpy2cm_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_rcmpy2cm_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_rfir_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_rfira_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_rfird_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_rfirda_Slot_acc2_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_rmac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8003;
+}
+
+static void
+Opcode_rmpy_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa003;
+}
+
+static void
+Opcode_rmpy2cm_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10002;
+}
+
+static void
+Opcode_rmpy2cm_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_smod_align_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_smod_scr_Slot_smod_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_sub2ac_Slot_gp_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc020;
+}
+
+static void
+Opcode_wght32_Slot_llr_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_clrtiep_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b000;
+}
+
+static void
+Opcode_clrtiep_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32c00;
+}
+
+static void
+Opcode_clrtiep_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12801;
+}
+
+static void
+Opcode_clrtiep_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f000;
+}
+
+static void
+Opcode_clrtiep_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12801;
+}
+
+static void
+Opcode_clrtiep_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4202;
+}
+
+static void
+Opcode_clrtiep_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280;
+}
+
+static void
+Opcode_ext_2fifo_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x470600;
+}
+
+static void
+Opcode_ext_2fifo_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54008;
+}
+
+static void
+Opcode_ext_2fifo_0_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfc02;
+}
+
+static void
+Opcode_ext_2fifo_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54008;
+}
+
+static void
+Opcode_ext_2fifo_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x342019;
+}
+
+static void
+Opcode_ext_2fifo_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x870600;
+}
+
+static void
+Opcode_ext_2fifo_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64008;
+}
+
+static void
+Opcode_ext_2fifo_1_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc003;
+}
+
+static void
+Opcode_ext_2fifo_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64008;
+}
+
+static void
+Opcode_ext_2fifo_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x348019;
+}
+
+static void
+Opcode_ext_2fifo_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x170e00;
+}
+
+static void
+Opcode_ext_2fifo_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x46028;
+}
+
+static void
+Opcode_ext_2fifo_2_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc403;
+}
+
+static void
+Opcode_ext_2fifo_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x46028;
+}
+
+static void
+Opcode_ext_2fifo_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x350019;
+}
+
+static void
+Opcode_ext_2fifo_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x178600;
+}
+
+static void
+Opcode_ext_2fifo_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c028;
+}
+
+static void
+Opcode_ext_2fifo_3_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc803;
+}
+
+static void
+Opcode_ext_2fifo_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c028;
+}
+
+static void
+Opcode_ext_2fifo_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x360019;
+}
+
+static void
+Opcode_ext_r2fifo_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x178e00;
+}
+
+static void
+Opcode_ext_r2fifo_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e008;
+}
+
+static void
+Opcode_ext_r2fifo_0_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd003;
+}
+
+static void
+Opcode_ext_r2fifo_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e008;
+}
+
+static void
+Opcode_ext_r2fifo_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x342039;
+}
+
+static void
+Opcode_ext_r2fifo_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270e00;
+}
+
+static void
+Opcode_ext_r2fifo_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e028;
+}
+
+static void
+Opcode_ext_r2fifo_1_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe003;
+}
+
+static void
+Opcode_ext_r2fifo_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e028;
+}
+
+static void
+Opcode_ext_r2fifo_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x348039;
+}
+
+static void
+Opcode_ext_r2fifo_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x278600;
+}
+
+static void
+Opcode_ext_r2fifo_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54028;
+}
+
+static void
+Opcode_ext_r2fifo_2_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc03;
+}
+
+static void
+Opcode_ext_r2fifo_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x54028;
+}
+
+static void
+Opcode_ext_r2fifo_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34a019;
+}
+
+static void
+Opcode_ext_r2fifo_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x370600;
+}
+
+static void
+Opcode_ext_r2fifo_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56008;
+}
+
+static void
+Opcode_ext_r2fifo_3_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd403;
+}
+
+static void
+Opcode_ext_r2fifo_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56008;
+}
+
+static void
+Opcode_ext_r2fifo_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34a039;
+}
+
+static void
+Opcode_lut_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40180;
+}
+
+static void
+Opcode_lut_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c0;
+}
+
+static void
+Opcode_lut_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40180;
+}
+
+static void
+Opcode_lut_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c0;
+}
+
+static void
+Opcode_lut_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240;
+}
+
+static void
+Opcode_lut_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd80040;
+}
+
+static void
+Opcode_lut_ar_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_lut_ar_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_lut_ar_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_lut_ar_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_lut_ar_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_lut_ar_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec002;
+}
+
+static void
+Opcode_lut_iext_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401c0;
+}
+
+static void
+Opcode_lut_iext_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240;
+}
+
+static void
+Opcode_lut_iext_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401c0;
+}
+
+static void
+Opcode_lut_iext_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_lut_iext_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240;
+}
+
+static void
+Opcode_lut_iext_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280;
+}
+
+static void
+Opcode_lut_iext_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdc0040;
+}
+
+static void
+Opcode_lut_phasor_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40059;
+}
+
+static void
+Opcode_lut_phasor_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8094;
+}
+
+static void
+Opcode_lut_phasor_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40059;
+}
+
+static void
+Opcode_lut_phasor_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_lut_phasor_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8094;
+}
+
+static void
+Opcode_lut_phasor_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x59;
+}
+
+static void
+Opcode_lut_phasor_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800c8;
+}
+
+static void
+Opcode_lut_rext_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40240;
+}
+
+static void
+Opcode_lut_rext_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280;
+}
+
+static void
+Opcode_lut_rext_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40240;
+}
+
+static void
+Opcode_lut_rext_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280;
+}
+
+static void
+Opcode_lut_rext_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300;
+}
+
+static void
+Opcode_lut_rext_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe40040;
+}
+
+static void
+Opcode_lut_write_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40431;
+}
+
+static void
+Opcode_lut_write_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_lut_write_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41031;
+}
+
+static void
+Opcode_lut_write_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_lut_write_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131;
+}
+
+static void
+Opcode_lut_write_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800d0;
+}
+
+static void
+Opcode_moveq128_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b010;
+}
+
+static void
+Opcode_moveq128_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f40;
+}
+
+static void
+Opcode_moveq128_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd81d4;
+}
+
+static void
+Opcode_moveq128_0_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed80;
+}
+
+static void
+Opcode_moveq128_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xadd4;
+}
+
+static void
+Opcode_moveq128_0_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140dd;
+}
+
+static void
+Opcode_moveq128_0_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x818d0;
+}
+
+static void
+Opcode_moveq128_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b110;
+}
+
+static void
+Opcode_moveq128_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f80;
+}
+
+static void
+Opcode_moveq128_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe81d4;
+}
+
+static void
+Opcode_moveq128_1_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed84;
+}
+
+static void
+Opcode_moveq128_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb5d4;
+}
+
+static void
+Opcode_moveq128_1_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x144dd;
+}
+
+static void
+Opcode_moveq128_1_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810dc;
+}
+
+static void
+Opcode_moveq128_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b210;
+}
+
+static void
+Opcode_moveq128_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f44;
+}
+
+static void
+Opcode_moveq128_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd85d4;
+}
+
+static void
+Opcode_moveq128_2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed88;
+}
+
+static void
+Opcode_moveq128_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1add4;
+}
+
+static void
+Opcode_moveq128_2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x148dd;
+}
+
+static void
+Opcode_moveq128_2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x850dc;
+}
+
+static void
+Opcode_moveq128_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b410;
+}
+
+static void
+Opcode_moveq128_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f48;
+}
+
+static void
+Opcode_moveq128_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd89d4;
+}
+
+static void
+Opcode_moveq128_3_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed90;
+}
+
+static void
+Opcode_moveq128_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2add4;
+}
+
+static void
+Opcode_moveq128_3_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x150dd;
+}
+
+static void
+Opcode_moveq128_3_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x890dc;
+}
+
+static void
+Opcode_moveq128_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b810;
+}
+
+static void
+Opcode_moveq128_4_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f50;
+}
+
+static void
+Opcode_moveq128_4_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd91d4;
+}
+
+static void
+Opcode_moveq128_4_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2eda0;
+}
+
+static void
+Opcode_moveq128_4_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4add4;
+}
+
+static void
+Opcode_moveq128_4_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x160dd;
+}
+
+static void
+Opcode_moveq128_4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910dc;
+}
+
+static void
+Opcode_moveq128_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b310;
+}
+
+static void
+Opcode_moveq128_5_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f60;
+}
+
+static void
+Opcode_moveq128_5_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xda1d4;
+}
+
+static void
+Opcode_moveq128_5_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2edc0;
+}
+
+static void
+Opcode_moveq128_5_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8add4;
+}
+
+static void
+Opcode_moveq128_5_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14cdd;
+}
+
+static void
+Opcode_moveq128_5_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10dc;
+}
+
+static void
+Opcode_moveq32_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b510;
+}
+
+static void
+Opcode_moveq32_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f4c;
+}
+
+static void
+Opcode_moveq32_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd8dd4;
+}
+
+static void
+Opcode_moveq32_0_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed8c;
+}
+
+static void
+Opcode_moveq32_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3add4;
+}
+
+static void
+Opcode_moveq32_0_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x154dd;
+}
+
+static void
+Opcode_moveq32_0_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8d0dc;
+}
+
+static void
+Opcode_moveq32_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b610;
+}
+
+static void
+Opcode_moveq32_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f54;
+}
+
+static void
+Opcode_moveq32_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd95d4;
+}
+
+static void
+Opcode_moveq32_1_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed94;
+}
+
+static void
+Opcode_moveq32_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5add4;
+}
+
+static void
+Opcode_moveq32_1_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x158dd;
+}
+
+static void
+Opcode_moveq32_1_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x950dc;
+}
+
+static void
+Opcode_moveq32_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b710;
+}
+
+static void
+Opcode_moveq32_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f58;
+}
+
+static void
+Opcode_moveq32_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd99d4;
+}
+
+static void
+Opcode_moveq32_2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed98;
+}
+
+static void
+Opcode_moveq32_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6add4;
+}
+
+static void
+Opcode_moveq32_2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15cdd;
+}
+
+static void
+Opcode_moveq32_2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x990dc;
+}
+
+static void
+Opcode_moveq32_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b910;
+}
+
+static void
+Opcode_moveq32_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f5c;
+}
+
+static void
+Opcode_moveq32_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd9dd4;
+}
+
+static void
+Opcode_moveq32_3_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed9c;
+}
+
+static void
+Opcode_moveq32_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7add4;
+}
+
+static void
+Opcode_moveq32_3_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x164dd;
+}
+
+static void
+Opcode_moveq32_3_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9d0dc;
+}
+
+static void
+Opcode_nco_update_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40119;
+}
+
+static void
+Opcode_nco_update_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8214;
+}
+
+static void
+Opcode_nco_update_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40119;
+}
+
+static void
+Opcode_nco_update_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11;
+}
+
+static void
+Opcode_nco_update_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8214;
+}
+
+static void
+Opcode_nco_update_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x119;
+}
+
+static void
+Opcode_nco_update_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800cc;
+}
+
+static void
+Opcode_pop128_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263000;
+}
+
+static void
+Opcode_pop128_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x341bc;
+}
+
+static void
+Opcode_pop128_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35420;
+}
+
+static void
+Opcode_pop128_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x681d4;
+}
+
+static void
+Opcode_pop128_0_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x902f;
+}
+
+static void
+Opcode_pop128_0_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3409c;
+}
+
+static void
+Opcode_pop128_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35420;
+}
+
+static void
+Opcode_pop128_0_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40d;
+}
+
+static void
+Opcode_pop128_0_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b40;
+}
+
+static void
+Opcode_pop128_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38254;
+}
+
+static void
+Opcode_pop128_0_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58c15;
+}
+
+static void
+Opcode_pop128_0_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80dd;
+}
+
+static void
+Opcode_pop128_0_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc83f;
+}
+
+static void
+Opcode_pop128_0_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x980ec;
+}
+
+static void
+Opcode_pop128_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec0d;
+}
+
+static void
+Opcode_pop128_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26a000;
+}
+
+static void
+Opcode_pop128_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x341fc;
+}
+
+static void
+Opcode_pop128_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35414;
+}
+
+static void
+Opcode_pop128_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x781d4;
+}
+
+static void
+Opcode_pop128_1_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa02f;
+}
+
+static void
+Opcode_pop128_1_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340d8;
+}
+
+static void
+Opcode_pop128_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35414;
+}
+
+static void
+Opcode_pop128_1_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80d;
+}
+
+static void
+Opcode_pop128_1_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b80;
+}
+
+static void
+Opcode_pop128_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58254;
+}
+
+static void
+Opcode_pop128_1_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x59415;
+}
+
+static void
+Opcode_pop128_1_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0d9;
+}
+
+static void
+Opcode_pop128_1_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc3b;
+}
+
+static void
+Opcode_pop128_1_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9c0ec;
+}
+
+static void
+Opcode_pop128_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec15;
+}
+
+static void
+Opcode_pop128_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x362000;
+}
+
+static void
+Opcode_pop128_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3427c;
+}
+
+static void
+Opcode_pop128_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35418;
+}
+
+static void
+Opcode_pop128_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x981d4;
+}
+
+static void
+Opcode_pop128_2_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c2f;
+}
+
+static void
+Opcode_pop128_2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340dc;
+}
+
+static void
+Opcode_pop128_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35418;
+}
+
+static void
+Opcode_pop128_2_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100d;
+}
+
+static void
+Opcode_pop128_2_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2bc0;
+}
+
+static void
+Opcode_pop128_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68254;
+}
+
+static void
+Opcode_pop128_2_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x59815;
+}
+
+static void
+Opcode_pop128_2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0dd;
+}
+
+static void
+Opcode_pop128_2_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc3f;
+}
+
+static void
+Opcode_pop128_2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa40ec;
+}
+
+static void
+Opcode_pop128_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec19;
+}
+
+static void
+Opcode_pop128_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263100;
+}
+
+static void
+Opcode_pop128_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x342bc;
+}
+
+static void
+Opcode_pop128_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3541c;
+}
+
+static void
+Opcode_pop128_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa81d4;
+}
+
+static void
+Opcode_pop128_3_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x942f;
+}
+
+static void
+Opcode_pop128_3_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3411c;
+}
+
+static void
+Opcode_pop128_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3541c;
+}
+
+static void
+Opcode_pop128_3_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200d;
+}
+
+static void
+Opcode_pop128_3_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c80;
+}
+
+static void
+Opcode_pop128_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x78254;
+}
+
+static void
+Opcode_pop128_3_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x59c15;
+}
+
+static void
+Opcode_pop128_3_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100dd;
+}
+
+static void
+Opcode_pop128_3_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd03f;
+}
+
+static void
+Opcode_pop128_3_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa80ec;
+}
+
+static void
+Opcode_pop128_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec1d;
+}
+
+static void
+Opcode_pop128_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263200;
+}
+
+static void
+Opcode_pop128_4_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3433c;
+}
+
+static void
+Opcode_pop128_4_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35424;
+}
+
+static void
+Opcode_pop128_4_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc81d4;
+}
+
+static void
+Opcode_pop128_4_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x982f;
+}
+
+static void
+Opcode_pop128_4_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34158;
+}
+
+static void
+Opcode_pop128_4_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35424;
+}
+
+static void
+Opcode_pop128_4_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0d;
+}
+
+static void
+Opcode_pop128_4_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d00;
+}
+
+static void
+Opcode_pop128_4_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x98254;
+}
+
+static void
+Opcode_pop128_4_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a415;
+}
+
+static void
+Opcode_pop128_4_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140d9;
+}
+
+static void
+Opcode_pop128_4_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd43b;
+}
+
+static void
+Opcode_pop128_4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00ec;
+}
+
+static void
+Opcode_pop128_4_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec25;
+}
+
+static void
+Opcode_pop128_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263400;
+}
+
+static void
+Opcode_pop128_5_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x342fc;
+}
+
+static void
+Opcode_pop128_5_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35428;
+}
+
+static void
+Opcode_pop128_5_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb81d4;
+}
+
+static void
+Opcode_pop128_5_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9c2f;
+}
+
+static void
+Opcode_pop128_5_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34198;
+}
+
+static void
+Opcode_pop128_5_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35428;
+}
+
+static void
+Opcode_pop128_5_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140d;
+}
+
+static void
+Opcode_pop128_5_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e00;
+}
+
+static void
+Opcode_pop128_5_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa8254;
+}
+
+static void
+Opcode_pop128_5_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a815;
+}
+
+static void
+Opcode_pop128_5_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180d9;
+}
+
+static void
+Opcode_pop128_5_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd83b;
+}
+
+static void
+Opcode_pop128_5_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xac0ec;
+}
+
+static void
+Opcode_pop128_5_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dec29;
+}
+
+static void
+Opcode_pop128_2cmpq_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_pop128_2cmpq_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8018;
+}
+
+static void
+Opcode_pop128_2cmpq_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8018;
+}
+
+static void
+Opcode_pop128_2cmpq_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34010;
+}
+
+static void
+Opcode_pop128_2cmpq_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18018;
+}
+
+static void
+Opcode_pop128_2cmpq_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18018;
+}
+
+static void
+Opcode_pop128_2cmpq_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34020;
+}
+
+static void
+Opcode_pop128_2cmpq_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28018;
+}
+
+static void
+Opcode_pop128_2cmpq_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28018;
+}
+
+static void
+Opcode_pop128_2cmpq_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34018;
+}
+
+static void
+Opcode_pop128_2cmpq_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48018;
+}
+
+static void
+Opcode_pop128_2cmpq_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x48018;
+}
+
+static void
+Opcode_pop128_2m_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcd0000;
+}
+
+static void
+Opcode_pop128_2m_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66002;
+}
+
+static void
+Opcode_pop128_2m_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x72002;
+}
+
+static void
+Opcode_pop128_2m_0_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c001;
+}
+
+static void
+Opcode_pop128_2m_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300401;
+}
+
+static void
+Opcode_pop128_2m_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdc0000;
+}
+
+static void
+Opcode_pop128_2m_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66402;
+}
+
+static void
+Opcode_pop128_2m_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x74002;
+}
+
+static void
+Opcode_pop128_2m_1_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c401;
+}
+
+static void
+Opcode_pop128_2m_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300801;
+}
+
+static void
+Opcode_pop128_2m_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec0000;
+}
+
+static void
+Opcode_pop128_2m_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66802;
+}
+
+static void
+Opcode_pop128_2m_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x78002;
+}
+
+static void
+Opcode_pop128_2m_2_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c801;
+}
+
+static void
+Opcode_pop128_2m_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x302401;
+}
+
+static void
+Opcode_pop128_2m_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcd1000;
+}
+
+static void
+Opcode_pop128_2m_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66c02;
+}
+
+static void
+Opcode_pop128_2m_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x72402;
+}
+
+static void
+Opcode_pop128_2m_3_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5e001;
+}
+
+static void
+Opcode_pop128_2m_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x304401;
+}
+
+static void
+Opcode_pop128_2pq_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x162080;
+}
+
+static void
+Opcode_pop128_2pq_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33400;
+}
+
+static void
+Opcode_pop128_2pq_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38818;
+}
+
+static void
+Opcode_pop128_2pq_0_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_pop128_2pq_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38418;
+}
+
+static void
+Opcode_pop128_2pq_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de801;
+}
+
+static void
+Opcode_pop128_2pq_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a000;
+}
+
+static void
+Opcode_pop128_2pq_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33408;
+}
+
+static void
+Opcode_pop128_2pq_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39018;
+}
+
+static void
+Opcode_pop128_2pq_1_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2900;
+}
+
+static void
+Opcode_pop128_2pq_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38c18;
+}
+
+static void
+Opcode_pop128_2pq_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6d01;
+}
+
+static void
+Opcode_pop128_2pq_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x162090;
+}
+
+static void
+Opcode_pop128_2pq_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33410;
+}
+
+static void
+Opcode_pop128_2pq_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a018;
+}
+
+static void
+Opcode_pop128_2pq_2_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a00;
+}
+
+static void
+Opcode_pop128_2pq_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39418;
+}
+
+static void
+Opcode_pop128_2pq_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6e01;
+}
+
+static void
+Opcode_pop128_2pq_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1620a0;
+}
+
+static void
+Opcode_pop128_2pq_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33420;
+}
+
+static void
+Opcode_pop128_2pq_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38c18;
+}
+
+static void
+Opcode_pop128_2pq_3_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c00;
+}
+
+static void
+Opcode_pop128_2pq_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a418;
+}
+
+static void
+Opcode_pop128_2pq_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6d81;
+}
+
+static void
+Opcode_pop128_2pq_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1620c0;
+}
+
+static void
+Opcode_pop128_2pq_4_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33418;
+}
+
+static void
+Opcode_pop128_2pq_4_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39418;
+}
+
+static void
+Opcode_pop128_2pq_4_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2980;
+}
+
+static void
+Opcode_pop128_2pq_4_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39c18;
+}
+
+static void
+Opcode_pop128_2pq_4_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6e81;
+}
+
+static void
+Opcode_pop128_2pq_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1620b0;
+}
+
+static void
+Opcode_pop128_2pq_5_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33428;
+}
+
+static void
+Opcode_pop128_2pq_5_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39818;
+}
+
+static void
+Opcode_pop128_2pq_5_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a80;
+}
+
+static void
+Opcode_pop128_2pq_5_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ac18;
+}
+
+static void
+Opcode_pop128_2pq_5_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6f01;
+}
+
+static void
+Opcode_pop2x128_2pq_01_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263800;
+}
+
+static void
+Opcode_pop2x128_2pq_01_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32d80;
+}
+
+static void
+Opcode_pop2x128_2pq_01_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12441;
+}
+
+static void
+Opcode_pop2x128_2pq_01_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2cc0;
+}
+
+static void
+Opcode_pop2x128_2pq_01_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11c41;
+}
+
+static void
+Opcode_pop2x128_2pq_01_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de881;
+}
+
+static void
+Opcode_pop2x128_2pq_03_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263300;
+}
+
+static void
+Opcode_pop2x128_2pq_03_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32dc0;
+}
+
+static void
+Opcode_pop2x128_2pq_03_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12481;
+}
+
+static void
+Opcode_pop2x128_2pq_03_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d40;
+}
+
+static void
+Opcode_pop2x128_2pq_03_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11c81;
+}
+
+static void
+Opcode_pop2x128_2pq_03_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de901;
+}
+
+static void
+Opcode_pop2x128_2pq_21_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263500;
+}
+
+static void
+Opcode_pop2x128_2pq_21_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32e40;
+}
+
+static void
+Opcode_pop2x128_2pq_21_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12501;
+}
+
+static void
+Opcode_pop2x128_2pq_21_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d80;
+}
+
+static void
+Opcode_pop2x128_2pq_21_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11d01;
+}
+
+static void
+Opcode_pop2x128_2pq_21_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dea01;
+}
+
+static void
+Opcode_pop2x128_2pq_23_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x263600;
+}
+
+static void
+Opcode_pop2x128_2pq_23_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32e80;
+}
+
+static void
+Opcode_pop2x128_2pq_23_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12601;
+}
+
+static void
+Opcode_pop2x128_2pq_23_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2dc0;
+}
+
+static void
+Opcode_pop2x128_2pq_23_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11e01;
+}
+
+static void
+Opcode_pop2x128_2pq_23_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2de8c1;
+}
+
+static void
+Opcode_pop32_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf70300;
+}
+
+static void
+Opcode_pop32_0_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3437c;
+}
+
+static void
+Opcode_pop32_0_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x682ac;
+}
+
+static void
+Opcode_pop32_0_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39c18;
+}
+
+static void
+Opcode_pop32_0_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x803b;
+}
+
+static void
+Opcode_pop32_0_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3415c;
+}
+
+static void
+Opcode_pop32_0_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x682ac;
+}
+
+static void
+Opcode_pop32_0_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_pop32_0_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc1;
+}
+
+static void
+Opcode_pop32_0_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b418;
+}
+
+static void
+Opcode_pop32_0_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64065;
+}
+
+static void
+Opcode_pop32_0_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x29;
+}
+
+static void
+Opcode_pop32_0_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc2ef;
+}
+
+static void
+Opcode_pop32_0_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb40ec;
+}
+
+static void
+Opcode_pop32_0_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35c00d;
+}
+
+static void
+Opcode_pop32_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b10;
+}
+
+static void
+Opcode_pop32_1_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x343bc;
+}
+
+static void
+Opcode_pop32_1_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6832c;
+}
+
+static void
+Opcode_pop32_1_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39c1c;
+}
+
+static void
+Opcode_pop32_1_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8077;
+}
+
+static void
+Opcode_pop32_1_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3419c;
+}
+
+static void
+Opcode_pop32_1_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6832c;
+}
+
+static void
+Opcode_pop32_1_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_pop32_1_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc5;
+}
+
+static void
+Opcode_pop32_1_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b41c;
+}
+
+static void
+Opcode_pop32_1_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64069;
+}
+
+static void
+Opcode_pop32_1_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x425;
+}
+
+static void
+Opcode_pop32_1_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc36f;
+}
+
+static void
+Opcode_pop32_1_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb80ec;
+}
+
+static void
+Opcode_pop32_1_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3580ad;
+}
+
+static void
+Opcode_pop32_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b20;
+}
+
+static void
+Opcode_pop32_2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x343fc;
+}
+
+static void
+Opcode_pop32_2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x682ec;
+}
+
+static void
+Opcode_pop32_2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a418;
+}
+
+static void
+Opcode_pop32_2_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80b7;
+}
+
+static void
+Opcode_pop32_2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x341d8;
+}
+
+static void
+Opcode_pop32_2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x682ec;
+}
+
+static void
+Opcode_pop32_2_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_pop32_2_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc9;
+}
+
+static void
+Opcode_pop32_2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3bc18;
+}
+
+static void
+Opcode_pop32_2_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64071;
+}
+
+static void
+Opcode_pop32_2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x825;
+}
+
+static void
+Opcode_pop32_2_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc3ef;
+}
+
+static void
+Opcode_pop32_2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbc0ec;
+}
+
+static void
+Opcode_pop32_2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35812d;
+}
+
+static void
+Opcode_pop32_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b40;
+}
+
+static void
+Opcode_pop32_3_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38008;
+}
+
+static void
+Opcode_pop32_3_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6836c;
+}
+
+static void
+Opcode_pop32_3_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a818;
+}
+
+static void
+Opcode_pop32_3_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8137;
+}
+
+static void
+Opcode_pop32_3_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x341dc;
+}
+
+static void
+Opcode_pop32_3_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6836c;
+}
+
+static void
+Opcode_pop32_3_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_pop32_3_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd1;
+}
+
+static void
+Opcode_pop32_3_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3bc1c;
+}
+
+static void
+Opcode_pop32_3_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6406d;
+}
+
+static void
+Opcode_pop32_3_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1025;
+}
+
+static void
+Opcode_pop32_3_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc037;
+}
+
+static void
+Opcode_pop32_3_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800f4;
+}
+
+static void
+Opcode_pop32_3_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35822d;
+}
+
+static void
+Opcode_push128_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x970b00;
+}
+
+static void
+Opcode_push128_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd02;
+}
+
+static void
+Opcode_push128_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c102;
+}
+
+static void
+Opcode_push128_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4541;
+}
+
+static void
+Opcode_push128_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10040;
+}
+
+static void
+Opcode_push128_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40025;
+}
+
+static void
+Opcode_push128_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7c102;
+}
+
+static void
+Opcode_push128_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_push128_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_push128_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81d4;
+}
+
+static void
+Opcode_push128_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68001;
+}
+
+static void
+Opcode_push128_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_push128_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc033;
+}
+
+static void
+Opcode_push128_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x481411;
+}
+
+static void
+Opcode_push128_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35800d;
+}
+
+static void
+Opcode_push128_m_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf80000;
+}
+
+static void
+Opcode_push128_m_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x46002;
+}
+
+static void
+Opcode_push128_m_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x46002;
+}
+
+static void
+Opcode_push128_m_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60001;
+}
+
+static void
+Opcode_push128_m_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x526000;
+}
+
+static void
+Opcode_push128_pq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x162000;
+}
+
+static void
+Opcode_push128_pq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2fc00;
+}
+
+static void
+Opcode_push128_pq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11001;
+}
+
+static void
+Opcode_push128_pq_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d400;
+}
+
+static void
+Opcode_push128_pq_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2400;
+}
+
+static void
+Opcode_push128_pq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38018;
+}
+
+static void
+Opcode_push128_pq_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc802;
+}
+
+static void
+Opcode_push128_pq_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb803;
+}
+
+static void
+Opcode_push128_pq_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c9401;
+}
+
+static void
+Opcode_push128_pq_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e6801;
+}
+
+static void
+Opcode_push2x128_pq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x862000;
+}
+
+static void
+Opcode_push2x128_pq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33800;
+}
+
+static void
+Opcode_push2x128_pq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38018;
+}
+
+static void
+Opcode_push2x128_pq_Slot_acc2_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2800;
+}
+
+static void
+Opcode_push2x128_pq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x58018;
+}
+
+static void
+Opcode_push2x128_pq_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10100;
+}
+
+static void
+Opcode_push2x128_pq_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d6c01;
+}
+
+static void
+Opcode_push32_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdd0000;
+}
+
+static void
+Opcode_push32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32000;
+}
+
+static void
+Opcode_push32_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d400;
+}
+
+static void
+Opcode_push32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12001;
+}
+
+static void
+Opcode_push32_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xdc03;
+}
+
+static void
+Opcode_push32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d800;
+}
+
+static void
+Opcode_push32_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d400;
+}
+
+static void
+Opcode_push32_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_push32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11001;
+}
+
+static void
+Opcode_push32_Slot_smod_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5cc01;
+}
+
+static void
+Opcode_push32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_push32_Slot_llr_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbc03;
+}
+
+static void
+Opcode_push32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d1401;
+}
+
+static void
+Opcode_push32_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10142;
+}
+
+static void
+Opcode_qready_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e00;
+}
+
+static void
+Opcode_qready_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_qready_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_qready_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38080;
+}
+
+static void
+Opcode_qready_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10001;
+}
+
+static void
+Opcode_qready_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20002;
+}
+
+static void
+Opcode_qready_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240;
+}
+
+static void
+Opcode_rdtiep_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70b80;
+}
+
+static void
+Opcode_rdtiep_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38010;
+}
+
+static void
+Opcode_rdtiep_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b018;
+}
+
+static void
+Opcode_rdtiep_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3421c;
+}
+
+static void
+Opcode_rdtiep_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68018;
+}
+
+static void
+Opcode_rdtiep_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2025;
+}
+
+static void
+Opcode_rdtiep_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800f8;
+}
+
+static void
+Opcode_settiep_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b400;
+}
+
+static void
+Opcode_settiep_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32c80;
+}
+
+static void
+Opcode_settiep_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13001;
+}
+
+static void
+Opcode_settiep_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ec00;
+}
+
+static void
+Opcode_settiep_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13001;
+}
+
+static void
+Opcode_settiep_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4142;
+}
+
+static void
+Opcode_settiep_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300;
+}
+
+static void
+Opcode_smod_lut_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4041;
+}
+
+static void
+Opcode_wrtbsigq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a020;
+}
+
+static void
+Opcode_wrtbsigq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32f00;
+}
+
+static void
+Opcode_wrtbsigq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x124c1;
+}
+
+static void
+Opcode_wrtbsigq_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ee00;
+}
+
+static void
+Opcode_wrtbsigq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11cc1;
+}
+
+static void
+Opcode_wrtbsigq_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5142;
+}
+
+static void
+Opcode_wrtbsigq_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcd0c0;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b800;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32d00;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12841;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ec80;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12841;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4182;
+}
+
+static void
+Opcode_wrtbsigqm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x244;
+}
+
+static void
+Opcode_wrtiep_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x262000;
+}
+
+static void
+Opcode_wrtiep_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33000;
+}
+
+static void
+Opcode_wrtiep_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11c01;
+}
+
+static void
+Opcode_wrtiep_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e800;
+}
+
+static void
+Opcode_wrtiep_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11801;
+}
+
+static void
+Opcode_wrtiep_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd802;
+}
+
+static void
+Opcode_wrtiep_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d5401;
+}
+
+static void
+Opcode_wrtsigq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a040;
+}
+
+static void
+Opcode_wrtsigq_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32ec0;
+}
+
+static void
+Opcode_wrtsigq_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12541;
+}
+
+static void
+Opcode_wrtsigq_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ed40;
+}
+
+static void
+Opcode_wrtsigq_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11d41;
+}
+
+static void
+Opcode_wrtsigq_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6142;
+}
+
+static void
+Opcode_wrtsigq_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xce0c0;
+}
+
+static void
+Opcode_abs8_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40019;
+}
+
+static void
+Opcode_abs8_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8014;
+}
+
+static void
+Opcode_abs8_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40019;
+}
+
+static void
+Opcode_abs8_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8014;
+}
+
+static void
+Opcode_abs8_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x19;
+}
+
+static void
+Opcode_abs8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800c0;
+}
+
+static void
+Opcode_add16_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40031;
+}
+
+static void
+Opcode_add16_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8030;
+}
+
+static void
+Opcode_add16_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40031;
+}
+
+static void
+Opcode_add16_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8030;
+}
+
+static void
+Opcode_add16_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31;
+}
+
+static void
+Opcode_add16_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x481401;
+}
+
+static void
+Opcode_add32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_add32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_add32_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_add32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_add32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00040;
+}
+
+static void
+Opcode_addac_i2r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb78380;
+}
+
+static void
+Opcode_addac_i2r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a00c;
+}
+
+static void
+Opcode_addac_i2r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a00c;
+}
+
+static void
+Opcode_addac_i2r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35832d;
+}
+
+static void
+Opcode_addac_r2i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb78390;
+}
+
+static void
+Opcode_addac_r2i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a04c;
+}
+
+static void
+Opcode_addac_r2i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a04c;
+}
+
+static void
+Opcode_addac_r2i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3583ad;
+}
+
+static void
+Opcode_addar2_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e400;
+}
+
+static void
+Opcode_addar2_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_addar2_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d000;
+}
+
+static void
+Opcode_addar2_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_addar2_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addar2_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501401;
+}
+
+static void
+Opcode_addcm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40040;
+}
+
+static void
+Opcode_addcm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_addcm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40040;
+}
+
+static void
+Opcode_addcm_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_addcm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_addcm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_addcm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc40040;
+}
+
+static void
+Opcode_addwrp_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40080;
+}
+
+static void
+Opcode_addwrp_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_addwrp_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40080;
+}
+
+static void
+Opcode_addwrp_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_addwrp_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_addwrp_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc80040;
+}
+
+static void
+Opcode_and128_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40100;
+}
+
+static void
+Opcode_and128_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_and128_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40100;
+}
+
+static void
+Opcode_and128_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_and128_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_and128_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00040;
+}
+
+static void
+Opcode_argmax8_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc82;
+}
+
+static void
+Opcode_argmax8_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4141;
+}
+
+static void
+Opcode_argmax8_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_argmax8_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_argmax8_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4581;
+}
+
+static void
+Opcode_argmax8_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40140;
+}
+
+static void
+Opcode_argmax8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000c0;
+}
+
+static void
+Opcode_asl_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40200;
+}
+
+static void
+Opcode_asl_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_asl_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40200;
+}
+
+static void
+Opcode_asl_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_asl_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_asl_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00040;
+}
+
+static void
+Opcode_asl32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_asl32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_asl32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_asl32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_asl32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_asl32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c01;
+}
+
+static void
+Opcode_aslacm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbc8000;
+}
+
+static void
+Opcode_aslacm_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70002;
+}
+
+static void
+Opcode_aslacm_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e002;
+}
+
+static void
+Opcode_aslacm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x302;
+}
+
+static void
+Opcode_aslm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_aslm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_aslm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_aslm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_aslm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_aslm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140002;
+}
+
+static void
+Opcode_aslm32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_aslm32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4301;
+}
+
+static void
+Opcode_aslm32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_aslm32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4301;
+}
+
+static void
+Opcode_aslm32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40300;
+}
+
+static void
+Opcode_aslm32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400100;
+}
+
+static void
+Opcode_asr_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c0;
+}
+
+static void
+Opcode_asr_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_asr_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c0;
+}
+
+static void
+Opcode_asr_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001;
+}
+
+static void
+Opcode_asr_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_asr_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140;
+}
+
+static void
+Opcode_asr_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc0040;
+}
+
+static void
+Opcode_asr32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401;
+}
+
+static void
+Opcode_asr32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140;
+}
+
+static void
+Opcode_asr32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401;
+}
+
+static void
+Opcode_asr32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140;
+}
+
+static void
+Opcode_asr32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180;
+}
+
+static void
+Opcode_asr32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41c01;
+}
+
+static void
+Opcode_asrac_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x570b00;
+}
+
+static void
+Opcode_asrac_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a002;
+}
+
+static void
+Opcode_asrac_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e102;
+}
+
+static void
+Opcode_asrac_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80142;
+}
+
+static void
+Opcode_asrm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005;
+}
+
+static void
+Opcode_asrm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8004;
+}
+
+static void
+Opcode_asrm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005;
+}
+
+static void
+Opcode_asrm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8004;
+}
+
+static void
+Opcode_asrm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_asrm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180002;
+}
+
+static void
+Opcode_bitfext_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_bitfins_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_clb_c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x770300;
+}
+
+static void
+Opcode_clb_c_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5802c;
+}
+
+static void
+Opcode_clb_c_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe403;
+}
+
+static void
+Opcode_clb_c_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5802c;
+}
+
+static void
+Opcode_clb_c_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34c02d;
+}
+
+static void
+Opcode_clb_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x578b00;
+}
+
+static void
+Opcode_clb_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a02c;
+}
+
+static void
+Opcode_clb_r_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe803;
+}
+
+static void
+Opcode_clb_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a02c;
+}
+
+static void
+Opcode_clb_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34e02d;
+}
+
+static void
+Opcode_cmp8_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40009;
+}
+
+static void
+Opcode_cmp8_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8008;
+}
+
+static void
+Opcode_cmp8_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40009;
+}
+
+static void
+Opcode_cmp8_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8008;
+}
+
+static void
+Opcode_cmp8_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_cmp8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140006;
+}
+
+static void
+Opcode_cmp_i_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40011;
+}
+
+static void
+Opcode_cmp_i_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8010;
+}
+
+static void
+Opcode_cmp_i_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40011;
+}
+
+static void
+Opcode_cmp_i_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8010;
+}
+
+static void
+Opcode_cmp_i_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11;
+}
+
+static void
+Opcode_cmp_i_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000a;
+}
+
+static void
+Opcode_cmp_r_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40021;
+}
+
+static void
+Opcode_cmp_r_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8020;
+}
+
+static void
+Opcode_cmp_r_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40021;
+}
+
+static void
+Opcode_cmp_r_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8020;
+}
+
+static void
+Opcode_cmp_r_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x21;
+}
+
+static void
+Opcode_cmp_r_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140012;
+}
+
+static void
+Opcode_ext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd80000;
+}
+
+static void
+Opcode_ext_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a002;
+}
+
+static void
+Opcode_ext_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8002;
+}
+
+static void
+Opcode_ext_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a002;
+}
+
+static void
+Opcode_ext_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x524000;
+}
+
+static void
+Opcode_ext_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd88000;
+}
+
+static void
+Opcode_ext_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e002;
+}
+
+static void
+Opcode_ext_r_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_ext_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e002;
+}
+
+static void
+Opcode_ext_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x528000;
+}
+
+static void
+Opcode_ext32_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x170600;
+}
+
+static void
+Opcode_ext32_i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44028;
+}
+
+static void
+Opcode_ext32_i_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf402;
+}
+
+static void
+Opcode_ext32_i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x44028;
+}
+
+static void
+Opcode_ext32_i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340019;
+}
+
+static void
+Opcode_ext32_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270600;
+}
+
+static void
+Opcode_ext32_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c008;
+}
+
+static void
+Opcode_ext32_r_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf802;
+}
+
+static void
+Opcode_ext32_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4c008;
+}
+
+static void
+Opcode_ext32_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340039;
+}
+
+static void
+Opcode_extui4_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40140;
+}
+
+static void
+Opcode_extui4_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180;
+}
+
+static void
+Opcode_extui4_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40140;
+}
+
+static void
+Opcode_extui4_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180;
+}
+
+static void
+Opcode_extui4_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c0;
+}
+
+static void
+Opcode_extui4_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd40040;
+}
+
+static void
+Opcode_lslm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d;
+}
+
+static void
+Opcode_lslm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_lslm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d;
+}
+
+static void
+Opcode_lslm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_lslm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_lslm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140022;
+}
+
+static void
+Opcode_lsrm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40015;
+}
+
+static void
+Opcode_lsrm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800c;
+}
+
+static void
+Opcode_lsrm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40015;
+}
+
+static void
+Opcode_lsrm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800c;
+}
+
+static void
+Opcode_lsrm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_lsrm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000e;
+}
+
+static void
+Opcode_max8_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40280;
+}
+
+static void
+Opcode_max8_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300;
+}
+
+static void
+Opcode_max8_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40280;
+}
+
+static void
+Opcode_max8_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300;
+}
+
+static void
+Opcode_max8_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0;
+}
+
+static void
+Opcode_max8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe80040;
+}
+
+static void
+Opcode_mean_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40300;
+}
+
+static void
+Opcode_mean_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0;
+}
+
+static void
+Opcode_mean_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40300;
+}
+
+static void
+Opcode_mean_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0;
+}
+
+static void
+Opcode_mean_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340;
+}
+
+static void
+Opcode_mean_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00040;
+}
+
+static void
+Opcode_mean32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402c0;
+}
+
+static void
+Opcode_mean32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340;
+}
+
+static void
+Opcode_mean32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402c0;
+}
+
+static void
+Opcode_mean32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x340;
+}
+
+static void
+Opcode_mean32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380;
+}
+
+static void
+Opcode_mean32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec0040;
+}
+
+static void
+Opcode_min8_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40340;
+}
+
+static void
+Opcode_min8_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380;
+}
+
+static void
+Opcode_min8_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40340;
+}
+
+static void
+Opcode_min8_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380;
+}
+
+static void
+Opcode_min8_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0;
+}
+
+static void
+Opcode_min8_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf40040;
+}
+
+static void
+Opcode_minclb_c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x770b00;
+}
+
+static void
+Opcode_minclb_c_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002c;
+}
+
+static void
+Opcode_minclb_c_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf003;
+}
+
+static void
+Opcode_minclb_c_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002c;
+}
+
+static void
+Opcode_minclb_c_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35002d;
+}
+
+static void
+Opcode_minclb_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x778300;
+}
+
+static void
+Opcode_minclb_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6800c;
+}
+
+static void
+Opcode_minclb_r_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe407;
+}
+
+static void
+Opcode_minclb_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6800c;
+}
+
+static void
+Opcode_minclb_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35400d;
+}
+
+static void
+Opcode_not128_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4005d;
+}
+
+static void
+Opcode_not128_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8154;
+}
+
+static void
+Opcode_not128_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4005d;
+}
+
+static void
+Opcode_not128_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8154;
+}
+
+static void
+Opcode_not128_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5d;
+}
+
+static void
+Opcode_not128_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800e8;
+}
+
+static void
+Opcode_or128_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40380;
+}
+
+static void
+Opcode_or128_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_or128_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40380;
+}
+
+static void
+Opcode_or128_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8001;
+}
+
+static void
+Opcode_or128_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_or128_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf80040;
+}
+
+static void
+Opcode_perm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_perm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_perm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x802;
+}
+
+static void
+Opcode_perm_Slot_acc2_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003;
+}
+
+static void
+Opcode_perm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc001;
+}
+
+static void
+Opcode_perm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_perm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x441401;
+}
+
+static void
+Opcode_redac_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb783a0;
+}
+
+static void
+Opcode_redac_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a08c;
+}
+
+static void
+Opcode_redac_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a08c;
+}
+
+static void
+Opcode_redac_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35a32d;
+}
+
+static void
+Opcode_redac2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb783c0;
+}
+
+static void
+Opcode_redac2_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a10c;
+}
+
+static void
+Opcode_redac2_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a10c;
+}
+
+static void
+Opcode_redac2_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35a3ad;
+}
+
+static void
+Opcode_redac4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb783b0;
+}
+
+static void
+Opcode_redac4_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a20c;
+}
+
+static void
+Opcode_redac4_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a20c;
+}
+
+static void
+Opcode_redac4_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35806d;
+}
+
+static void
+Opcode_redacs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb783d0;
+}
+
+static void
+Opcode_redacs_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0cc;
+}
+
+static void
+Opcode_redacs_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0cc;
+}
+
+static void
+Opcode_redacs_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3580cd;
+}
+
+static void
+Opcode_sminclb_c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x778b00;
+}
+
+static void
+Opcode_sminclb_c_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000c;
+}
+
+static void
+Opcode_sminclb_c_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe807;
+}
+
+static void
+Opcode_sminclb_c_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000c;
+}
+
+static void
+Opcode_sminclb_c_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35202d;
+}
+
+static void
+Opcode_sminclb_r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd70300;
+}
+
+static void
+Opcode_sminclb_r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6202c;
+}
+
+static void
+Opcode_sminclb_r_Slot_dot_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec03;
+}
+
+static void
+Opcode_sminclb_r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6202c;
+}
+
+static void
+Opcode_sminclb_r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35600d;
+}
+
+static void
+Opcode_stswapbm_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9c0000;
+}
+
+static void
+Opcode_stswapbm_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4e8000;
+}
+
+static void
+Opcode_stswapbmu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xad0000;
+}
+
+static void
+Opcode_stswapbmu_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f0000;
+}
+
+static void
+Opcode_sub32_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403c0;
+}
+
+static void
+Opcode_sub32_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4041;
+}
+
+static void
+Opcode_sub32_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403c0;
+}
+
+static void
+Opcode_sub32_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4081;
+}
+
+static void
+Opcode_sub32_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40040;
+}
+
+static void
+Opcode_sub32_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xfc0040;
+}
+
+static void
+Opcode_subac_i2r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb783e0;
+}
+
+static void
+Opcode_subac_i2r_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a14c;
+}
+
+static void
+Opcode_subac_i2r_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a14c;
+}
+
+static void
+Opcode_subac_i2r_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35814d;
+}
+
+static void
+Opcode_subac_r2i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb783f0;
+}
+
+static void
+Opcode_subac_r2i_Slot_gp_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a18c;
+}
+
+static void
+Opcode_subac_r2i_Slot_pq_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a18c;
+}
+
+static void
+Opcode_subac_r2i_Slot_dual_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35824d;
+}
+
+static void
+Opcode_subarx_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32800;
+}
+
+static void
+Opcode_subarx_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11401;
+}
+
+static void
+Opcode_subarx_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e400;
+}
+
+static void
+Opcode_subarx_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10c01;
+}
+
+static void
+Opcode_subarx_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc02;
+}
+
+static void
+Opcode_subarx_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4cd401;
+}
+
+static void
+Opcode_subcm_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002;
+}
+
+static void
+Opcode_subcm_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4081;
+}
+
+static void
+Opcode_subcm_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002;
+}
+
+static void
+Opcode_subcm_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4101;
+}
+
+static void
+Opcode_subcm_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40080;
+}
+
+static void
+Opcode_subcm_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_submean_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40042;
+}
+
+static void
+Opcode_submean_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4101;
+}
+
+static void
+Opcode_submean_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40042;
+}
+
+static void
+Opcode_submean_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4201;
+}
+
+static void
+Opcode_submean_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40100;
+}
+
+static void
+Opcode_submean_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400080;
+}
+
+static void
+Opcode_subwrp_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40082;
+}
+
+static void
+Opcode_subwrp_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4201;
+}
+
+static void
+Opcode_subwrp_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40082;
+}
+
+static void
+Opcode_subwrp_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40c1;
+}
+
+static void
+Opcode_subwrp_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40200;
+}
+
+static void
+Opcode_subwrp_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800080;
+}
+
+static void
+Opcode_trans_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4009d;
+}
+
+static void
+Opcode_trans_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8194;
+}
+
+static void
+Opcode_trans_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4009d;
+}
+
+static void
+Opcode_trans_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8194;
+}
+
+static void
+Opcode_trans_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9d;
+}
+
+static void
+Opcode_trans_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800f0;
+}
+
+static void
+Opcode_xor128_Slot_gp_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40102;
+}
+
+static void
+Opcode_xor128_Slot_dot_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40c1;
+}
+
+static void
+Opcode_xor128_Slot_pq_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40102;
+}
+
+static void
+Opcode_xor128_Slot_smod_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4141;
+}
+
+static void
+Opcode_xor128_Slot_llr_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c0;
+}
+
+static void
+Opcode_xor128_Slot_dual_slot2_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c0;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30000;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30000;
+}
+
+static void
+Opcode_rur_sov_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30010;
+}
+
+static void
+Opcode_wur_sov_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30100;
+}
+
+static void
+Opcode_rur_sat_mode_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30020;
+}
+
+static void
+Opcode_wur_sat_mode_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30200;
+}
+
+static void
+Opcode_rur_sar0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30030;
+}
+
+static void
+Opcode_wur_sar0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30300;
+}
+
+static void
+Opcode_rur_sar1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30040;
+}
+
+static void
+Opcode_wur_sar1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30400;
+}
+
+static void
+Opcode_rur_sar2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30050;
+}
+
+static void
+Opcode_wur_sar2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30500;
+}
+
+static void
+Opcode_rur_sar3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30060;
+}
+
+static void
+Opcode_wur_sar3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30600;
+}
+
+static void
+Opcode_rur_hsar0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30070;
+}
+
+static void
+Opcode_wur_hsar0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30700;
+}
+
+static void
+Opcode_rur_hsar1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30080;
+}
+
+static void
+Opcode_wur_hsar1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30800;
+}
+
+static void
+Opcode_rur_hsar2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30090;
+}
+
+static void
+Opcode_wur_hsar2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30900;
+}
+
+static void
+Opcode_rur_hsar3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe300a0;
+}
+
+static void
+Opcode_wur_hsar3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30a00;
+}
+
+static void
+Opcode_rur_max_reg_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe300b0;
+}
+
+static void
+Opcode_wur_max_reg_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30b00;
+}
+
+static void
+Opcode_rur_max_reg_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe300c0;
+}
+
+static void
+Opcode_wur_max_reg_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30c00;
+}
+
+static void
+Opcode_rur_max_reg_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe300d0;
+}
+
+static void
+Opcode_wur_max_reg_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30d00;
+}
+
+static void
+Opcode_rur_max_reg_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe300e0;
+}
+
+static void
+Opcode_wur_max_reg_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30e00;
+}
+
+static void
+Opcode_rur_arg_max_reg_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe300f0;
+}
+
+static void
+Opcode_wur_arg_max_reg_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf30f00;
+}
+
+static void
+Opcode_rur_arg_max_reg_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30100;
+}
+
+static void
+Opcode_wur_arg_max_reg_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31000;
+}
+
+static void
+Opcode_rur_arg_max_reg_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30110;
+}
+
+static void
+Opcode_wur_arg_max_reg_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31100;
+}
+
+static void
+Opcode_rur_arg_max_reg_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30120;
+}
+
+static void
+Opcode_wur_arg_max_reg_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31200;
+}
+
+static void
+Opcode_rur_nco_counter_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30130;
+}
+
+static void
+Opcode_wur_nco_counter_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31300;
+}
+
+static void
+Opcode_rur_nco_counter_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30140;
+}
+
+static void
+Opcode_wur_nco_counter_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31400;
+}
+
+static void
+Opcode_rur_nco_counter_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30150;
+}
+
+static void
+Opcode_wur_nco_counter_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31500;
+}
+
+static void
+Opcode_rur_nco_counter_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30160;
+}
+
+static void
+Opcode_wur_nco_counter_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31600;
+}
+
+static void
+Opcode_rur_interp_ext_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30170;
+}
+
+static void
+Opcode_wur_interp_ext_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31700;
+}
+
+static void
+Opcode_rur_interp_ext_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30180;
+}
+
+static void
+Opcode_wur_interp_ext_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31800;
+}
+
+static void
+Opcode_rur_llr_buf_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30190;
+}
+
+static void
+Opcode_wur_llr_buf_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31900;
+}
+
+static void
+Opcode_rur_llr_buf_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe301a0;
+}
+
+static void
+Opcode_wur_llr_buf_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31a00;
+}
+
+static void
+Opcode_rur_llr_buf_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe301b0;
+}
+
+static void
+Opcode_wur_llr_buf_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31b00;
+}
+
+static void
+Opcode_rur_llr_buf_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe301c0;
+}
+
+static void
+Opcode_wur_llr_buf_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31c00;
+}
+
+static void
+Opcode_rur_llr_buf_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe301d0;
+}
+
+static void
+Opcode_wur_llr_buf_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31d00;
+}
+
+static void
+Opcode_rur_llr_buf_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe301e0;
+}
+
+static void
+Opcode_wur_llr_buf_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31e00;
+}
+
+static void
+Opcode_rur_llr_buf_6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe301f0;
+}
+
+static void
+Opcode_wur_llr_buf_6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf31f00;
+}
+
+static void
+Opcode_rur_llr_buf_7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30200;
+}
+
+static void
+Opcode_wur_llr_buf_7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32000;
+}
+
+static void
+Opcode_rur_llr_buf_8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30210;
+}
+
+static void
+Opcode_wur_llr_buf_8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32100;
+}
+
+static void
+Opcode_rur_llr_buf_9_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30220;
+}
+
+static void
+Opcode_wur_llr_buf_9_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32200;
+}
+
+static void
+Opcode_rur_llr_buf_10_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30230;
+}
+
+static void
+Opcode_wur_llr_buf_10_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32300;
+}
+
+static void
+Opcode_rur_llr_buf_11_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30240;
+}
+
+static void
+Opcode_wur_llr_buf_11_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32400;
+}
+
+static void
+Opcode_rur_llr_buf_12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30250;
+}
+
+static void
+Opcode_wur_llr_buf_12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32500;
+}
+
+static void
+Opcode_rur_llr_buf_13_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30260;
+}
+
+static void
+Opcode_wur_llr_buf_13_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32600;
+}
+
+static void
+Opcode_rur_llr_buf_14_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30270;
+}
+
+static void
+Opcode_wur_llr_buf_14_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32700;
+}
+
+static void
+Opcode_rur_llr_buf_15_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30280;
+}
+
+static void
+Opcode_wur_llr_buf_15_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32800;
+}
+
+static void
+Opcode_rur_llr_buf_16_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30290;
+}
+
+static void
+Opcode_wur_llr_buf_16_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32900;
+}
+
+static void
+Opcode_rur_llr_buf_17_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe302a0;
+}
+
+static void
+Opcode_wur_llr_buf_17_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32a00;
+}
+
+static void
+Opcode_rur_llr_buf_18_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe302b0;
+}
+
+static void
+Opcode_wur_llr_buf_18_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32b00;
+}
+
+static void
+Opcode_rur_llr_buf_19_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe302c0;
+}
+
+static void
+Opcode_wur_llr_buf_19_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32c00;
+}
+
+static void
+Opcode_rur_llr_buf_20_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe302d0;
+}
+
+static void
+Opcode_wur_llr_buf_20_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32d00;
+}
+
+static void
+Opcode_rur_llr_buf_21_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe302e0;
+}
+
+static void
+Opcode_wur_llr_buf_21_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32e00;
+}
+
+static void
+Opcode_rur_llr_buf_22_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe302f0;
+}
+
+static void
+Opcode_wur_llr_buf_22_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf32f00;
+}
+
+static void
+Opcode_rur_llr_buf_23_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30300;
+}
+
+static void
+Opcode_wur_llr_buf_23_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33000;
+}
+
+static void
+Opcode_rur_smod_buf_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30310;
+}
+
+static void
+Opcode_wur_smod_buf_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33100;
+}
+
+static void
+Opcode_rur_smod_buf_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30320;
+}
+
+static void
+Opcode_wur_smod_buf_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33200;
+}
+
+static void
+Opcode_rur_smod_buf_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30330;
+}
+
+static void
+Opcode_wur_smod_buf_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33300;
+}
+
+static void
+Opcode_rur_smod_buf_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30340;
+}
+
+static void
+Opcode_wur_smod_buf_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33400;
+}
+
+static void
+Opcode_rur_smod_buf_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30350;
+}
+
+static void
+Opcode_wur_smod_buf_4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33500;
+}
+
+static void
+Opcode_rur_smod_buf_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30360;
+}
+
+static void
+Opcode_wur_smod_buf_5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33600;
+}
+
+static void
+Opcode_rur_smod_buf_6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30370;
+}
+
+static void
+Opcode_wur_smod_buf_6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33700;
+}
+
+static void
+Opcode_rur_smod_buf_7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30380;
+}
+
+static void
+Opcode_wur_smod_buf_7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33800;
+}
+
+static void
+Opcode_rur_weight_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30390;
+}
+
+static void
+Opcode_wur_weight_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33900;
+}
+
+static void
+Opcode_rur_scale_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe303a0;
+}
+
+static void
+Opcode_wur_scale_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33a00;
+}
+
+static void
+Opcode_rur_llr_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe303b0;
+}
+
+static void
+Opcode_wur_llr_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33b00;
+}
+
+static void
+Opcode_rur_smod_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe303c0;
+}
+
+static void
+Opcode_wur_smod_pos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33c00;
+}
+
+static void
+Opcode_rur_perm_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe303d0;
+}
+
+static void
+Opcode_wur_perm_reg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33d00;
+}
+
+static void
+Opcode_rur_smod_offset_table_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe303e0;
+}
+
+static void
+Opcode_wur_smod_offset_table_0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33e00;
+}
+
+static void
+Opcode_rur_smod_offset_table_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe303f0;
+}
+
+static void
+Opcode_wur_smod_offset_table_1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33f00;
+}
+
+static void
+Opcode_rur_smod_offset_table_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30400;
+}
+
+static void
+Opcode_wur_smod_offset_table_2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf34000;
+}
+
+static void
+Opcode_rur_smod_offset_table_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30410;
+}
+
+static void
+Opcode_wur_smod_offset_table_3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf34100;
+}
+
+static void
+Opcode_rur_phasor_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30420;
+}
+
+static void
+Opcode_wur_phasor_n_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf34200;
+}
+
+static void
+Opcode_rur_phasor_offset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30430;
+}
+
+static void
+Opcode_wur_phasor_offset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf34300;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0, Opcode_addi_n_Slot_gp_slot2_encode, 0, Opcode_addi_n_Slot_gp_slot0_encode, 0, 0, Opcode_addi_n_Slot_dot_slot0_encode, Opcode_addi_n_Slot_pq_slot2_encode, 0, Opcode_addi_n_Slot_pq_slot0_encode, 0, 0, Opcode_addi_n_Slot_acc2_slot0_encode, 0, 0, Opcode_addi_n_Slot_smod_slot0_encode, 0, 0, Opcode_addi_n_Slot_llr_slot0_encode, Opcode_addi_n_Slot_dual_slot2_encode, 0, Opcode_addi_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode, 0, 0, Opcode_beqz_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_beqz_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_beqz_n_Slot_smod_slot0_encode, 0, 0, 0, Opcode_beqz_n_Slot_dual_slot2_encode, 0, Opcode_beqz_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode, 0, 0, Opcode_bnez_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_bnez_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_bnez_n_Slot_smod_slot0_encode, 0, 0, 0, Opcode_bnez_n_Slot_dual_slot2_encode, 0, Opcode_bnez_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0, 0, 0, Opcode_l32i_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_l32i_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_l32i_n_Slot_smod_slot0_encode, 0, 0, Opcode_l32i_n_Slot_llr_slot0_encode, 0, 0, Opcode_l32i_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode, Opcode_mov_n_Slot_gp_slot2_encode, 0, Opcode_mov_n_Slot_gp_slot0_encode, 0, 0, Opcode_mov_n_Slot_dot_slot0_encode, Opcode_mov_n_Slot_pq_slot2_encode, 0, Opcode_mov_n_Slot_pq_slot0_encode, 0, 0, Opcode_mov_n_Slot_acc2_slot0_encode, 0, 0, Opcode_mov_n_Slot_smod_slot0_encode, 0, 0, Opcode_mov_n_Slot_llr_slot0_encode, Opcode_mov_n_Slot_dual_slot2_encode, 0, Opcode_mov_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode, Opcode_movi_n_Slot_gp_slot2_encode, 0, Opcode_movi_n_Slot_gp_slot0_encode, 0, 0, Opcode_movi_n_Slot_dot_slot0_encode, Opcode_movi_n_Slot_pq_slot2_encode, 0, Opcode_movi_n_Slot_pq_slot0_encode, 0, 0, Opcode_movi_n_Slot_acc2_slot0_encode, 0, 0, Opcode_movi_n_Slot_smod_slot0_encode, 0, 0, Opcode_movi_n_Slot_llr_slot0_encode, Opcode_movi_n_Slot_dual_slot2_encode, 0, Opcode_movi_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0, 0, 0, Opcode_s32i_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_s32i_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_s32i_n_Slot_smod_slot0_encode, 0, 0, Opcode_s32i_n_Slot_llr_slot0_encode, 0, 0, Opcode_s32i_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_threadptr_encode_fns[] = {
+ Opcode_rur_threadptr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_threadptr_encode_fns[] = {
+ Opcode_wur_threadptr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_addi_Slot_dual_slot2_encode, 0, Opcode_addi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_addmi_Slot_dual_slot2_encode, 0, Opcode_addmi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0, Opcode_add_Slot_gp_slot2_encode, 0, Opcode_add_Slot_gp_slot0_encode, 0, 0, Opcode_add_Slot_dot_slot0_encode, Opcode_add_Slot_pq_slot2_encode, 0, Opcode_add_Slot_pq_slot0_encode, 0, 0, Opcode_add_Slot_acc2_slot0_encode, 0, 0, Opcode_add_Slot_smod_slot0_encode, 0, 0, Opcode_add_Slot_llr_slot0_encode, Opcode_add_Slot_dual_slot2_encode, 0, Opcode_add_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0, Opcode_sub_Slot_gp_slot2_encode, 0, Opcode_sub_Slot_gp_slot0_encode, 0, 0, Opcode_sub_Slot_dot_slot0_encode, Opcode_sub_Slot_pq_slot2_encode, 0, Opcode_sub_Slot_pq_slot0_encode, 0, 0, Opcode_sub_Slot_acc2_slot0_encode, 0, 0, Opcode_sub_Slot_smod_slot0_encode, 0, 0, Opcode_sub_Slot_llr_slot0_encode, Opcode_sub_Slot_dual_slot2_encode, 0, Opcode_sub_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0, 0, 0, Opcode_addx2_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_addx2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_addx2_Slot_smod_slot0_encode, 0, 0, 0, Opcode_addx2_Slot_dual_slot2_encode, 0, Opcode_addx2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0, 0, 0, Opcode_addx4_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_addx4_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_addx4_Slot_smod_slot0_encode, 0, 0, 0, Opcode_addx4_Slot_dual_slot2_encode, 0, Opcode_addx4_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0, 0, 0, Opcode_addx8_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_addx8_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_addx8_Slot_smod_slot0_encode, 0, 0, 0, Opcode_addx8_Slot_dual_slot2_encode, 0, Opcode_addx8_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0, 0, 0, Opcode_subx2_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_subx2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_subx2_Slot_smod_slot0_encode, 0, 0, 0, Opcode_subx2_Slot_dual_slot2_encode, 0, Opcode_subx2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0, 0, 0, Opcode_subx4_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_subx4_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_subx4_Slot_smod_slot0_encode, 0, 0, 0, Opcode_subx4_Slot_dual_slot2_encode, 0, Opcode_subx4_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0, 0, 0, Opcode_subx8_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_subx8_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_subx8_Slot_smod_slot0_encode, 0, 0, 0, Opcode_subx8_Slot_dual_slot2_encode, 0, Opcode_subx8_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0, Opcode_and_Slot_gp_slot2_encode, 0, Opcode_and_Slot_gp_slot0_encode, 0, 0, Opcode_and_Slot_dot_slot0_encode, Opcode_and_Slot_pq_slot2_encode, 0, Opcode_and_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_and_Slot_smod_slot0_encode, 0, 0, Opcode_and_Slot_llr_slot0_encode, Opcode_and_Slot_dual_slot2_encode, 0, Opcode_and_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0, Opcode_or_Slot_gp_slot2_encode, 0, Opcode_or_Slot_gp_slot0_encode, 0, 0, Opcode_or_Slot_dot_slot0_encode, Opcode_or_Slot_pq_slot2_encode, 0, Opcode_or_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_or_Slot_smod_slot0_encode, 0, 0, Opcode_or_Slot_llr_slot0_encode, Opcode_or_Slot_dual_slot2_encode, 0, Opcode_or_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0, Opcode_xor_Slot_gp_slot2_encode, 0, Opcode_xor_Slot_gp_slot0_encode, 0, 0, Opcode_xor_Slot_dot_slot0_encode, Opcode_xor_Slot_pq_slot2_encode, 0, Opcode_xor_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_xor_Slot_smod_slot0_encode, 0, 0, Opcode_xor_Slot_llr_slot0_encode, Opcode_xor_Slot_dual_slot2_encode, 0, Opcode_xor_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_beqi_Slot_dual_slot2_encode, 0, Opcode_beqi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bnei_Slot_dual_slot2_encode, 0, Opcode_bnei_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bgei_Slot_dual_slot2_encode, 0, Opcode_bgei_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_blti_Slot_dual_slot2_encode, 0, Opcode_blti_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bbci_Slot_dual_slot2_encode, 0, Opcode_bbci_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bbsi_Slot_dual_slot2_encode, 0, Opcode_bbsi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bgeui_Slot_dual_slot2_encode, 0, Opcode_bgeui_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bltui_Slot_dual_slot2_encode, 0, Opcode_bltui_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_beq_Slot_dual_slot2_encode, 0, Opcode_beq_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bne_Slot_dual_slot2_encode, 0, Opcode_bne_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bge_Slot_dual_slot2_encode, 0, Opcode_bge_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_blt_Slot_dual_slot2_encode, 0, Opcode_blt_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bgeu_Slot_dual_slot2_encode, 0, Opcode_bgeu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bltu_Slot_dual_slot2_encode, 0, Opcode_bltu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bany_Slot_dual_slot2_encode, 0, Opcode_bany_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bnone_Slot_dual_slot2_encode, 0, Opcode_bnone_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ball_Slot_dual_slot2_encode, 0, Opcode_ball_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bnall_Slot_dual_slot2_encode, 0, Opcode_bnall_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bbc_Slot_dual_slot2_encode, 0, Opcode_bbc_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bbs_Slot_dual_slot2_encode, 0, Opcode_bbs_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_beqz_Slot_dual_slot2_encode, 0, Opcode_beqz_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bnez_Slot_dual_slot2_encode, 0, Opcode_bnez_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bgez_Slot_dual_slot2_encode, 0, Opcode_bgez_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bltz_Slot_dual_slot2_encode, 0, Opcode_bltz_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0, Opcode_extui_Slot_gp_slot2_encode, 0, Opcode_extui_Slot_gp_slot0_encode, 0, 0, 0, Opcode_extui_Slot_pq_slot2_encode, 0, Opcode_extui_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_extui_Slot_smod_slot0_encode, 0, 0, 0, Opcode_extui_Slot_dual_slot2_encode, 0, Opcode_extui_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_l16ui_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_l16si_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_l32i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_l32r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_l8ui_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movi_Slot_dual_slot2_encode, 0, Opcode_movi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0, Opcode_moveqz_Slot_gp_slot2_encode, 0, Opcode_moveqz_Slot_gp_slot0_encode, 0, 0, 0, Opcode_moveqz_Slot_pq_slot2_encode, 0, Opcode_moveqz_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_moveqz_Slot_smod_slot0_encode, 0, 0, 0, Opcode_moveqz_Slot_dual_slot2_encode, 0, Opcode_moveqz_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0, Opcode_movnez_Slot_gp_slot2_encode, 0, Opcode_movnez_Slot_gp_slot0_encode, 0, 0, 0, Opcode_movnez_Slot_pq_slot2_encode, 0, Opcode_movnez_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_movnez_Slot_smod_slot0_encode, 0, 0, 0, Opcode_movnez_Slot_dual_slot2_encode, 0, Opcode_movnez_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0, Opcode_movltz_Slot_gp_slot2_encode, 0, Opcode_movltz_Slot_gp_slot0_encode, 0, 0, 0, Opcode_movltz_Slot_pq_slot2_encode, 0, Opcode_movltz_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_movltz_Slot_smod_slot0_encode, 0, 0, 0, Opcode_movltz_Slot_dual_slot2_encode, 0, Opcode_movltz_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0, Opcode_movgez_Slot_gp_slot2_encode, 0, Opcode_movgez_Slot_gp_slot0_encode, 0, 0, 0, Opcode_movgez_Slot_pq_slot2_encode, 0, Opcode_movgez_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_movgez_Slot_smod_slot0_encode, 0, 0, 0, Opcode_movgez_Slot_dual_slot2_encode, 0, Opcode_movgez_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0, Opcode_neg_Slot_gp_slot2_encode, 0, Opcode_neg_Slot_gp_slot0_encode, 0, 0, Opcode_neg_Slot_dot_slot0_encode, Opcode_neg_Slot_pq_slot2_encode, 0, Opcode_neg_Slot_pq_slot0_encode, 0, 0, Opcode_neg_Slot_acc2_slot0_encode, 0, 0, Opcode_neg_Slot_smod_slot0_encode, 0, 0, Opcode_neg_Slot_llr_slot0_encode, Opcode_neg_Slot_dual_slot2_encode, 0, Opcode_neg_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0, Opcode_abs_Slot_gp_slot2_encode, 0, Opcode_abs_Slot_gp_slot0_encode, 0, 0, 0, Opcode_abs_Slot_pq_slot2_encode, 0, Opcode_abs_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_abs_Slot_smod_slot0_encode, 0, 0, 0, Opcode_abs_Slot_dual_slot2_encode, 0, Opcode_abs_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0, Opcode_nop_Slot_gp_slot2_encode, Opcode_nop_Slot_gp_slot1_encode, Opcode_nop_Slot_gp_slot0_encode, Opcode_nop_Slot_dot_slot2_encode, Opcode_nop_Slot_dot_slot1_encode, Opcode_nop_Slot_dot_slot0_encode, Opcode_nop_Slot_pq_slot2_encode, Opcode_nop_Slot_pq_slot1_encode, Opcode_nop_Slot_pq_slot0_encode, Opcode_nop_Slot_acc2_slot2_encode, Opcode_nop_Slot_acc2_slot1_encode, Opcode_nop_Slot_acc2_slot0_encode, Opcode_nop_Slot_smod_slot2_encode, Opcode_nop_Slot_smod_slot1_encode, Opcode_nop_Slot_smod_slot0_encode, Opcode_nop_Slot_llr_slot2_encode, Opcode_nop_Slot_llr_slot1_encode, Opcode_nop_Slot_llr_slot0_encode, Opcode_nop_Slot_dual_slot2_encode, Opcode_nop_Slot_dual_slot1_encode, Opcode_nop_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_s16i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_s32i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_s8i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0, 0, 0, Opcode_ssr_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssr_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssr_Slot_smod_slot0_encode, 0, 0, 0, Opcode_ssr_Slot_dual_slot2_encode, 0, Opcode_ssr_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0, 0, 0, Opcode_ssl_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssl_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssl_Slot_smod_slot0_encode, 0, 0, 0, Opcode_ssl_Slot_dual_slot2_encode, 0, Opcode_ssl_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0, 0, 0, Opcode_ssa8l_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssa8l_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssa8l_Slot_smod_slot0_encode, 0, 0, 0, Opcode_ssa8l_Slot_dual_slot2_encode, 0, Opcode_ssa8l_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0, 0, 0, Opcode_ssa8b_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssa8b_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssa8b_Slot_smod_slot0_encode, 0, 0, 0, Opcode_ssa8b_Slot_dual_slot2_encode, 0, Opcode_ssa8b_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0, 0, 0, Opcode_ssai_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssai_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ssai_Slot_smod_slot0_encode, 0, 0, 0, Opcode_ssai_Slot_dual_slot2_encode, 0, Opcode_ssai_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0, Opcode_sll_Slot_gp_slot2_encode, 0, Opcode_sll_Slot_gp_slot0_encode, 0, 0, Opcode_sll_Slot_dot_slot0_encode, Opcode_sll_Slot_pq_slot2_encode, 0, Opcode_sll_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_sll_Slot_smod_slot0_encode, 0, 0, Opcode_sll_Slot_llr_slot0_encode, Opcode_sll_Slot_dual_slot2_encode, 0, Opcode_sll_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0, 0, 0, Opcode_src_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_src_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_src_Slot_smod_slot0_encode, 0, 0, 0, Opcode_src_Slot_dual_slot2_encode, 0, Opcode_src_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0, 0, 0, Opcode_srl_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_srl_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_srl_Slot_smod_slot0_encode, 0, 0, 0, Opcode_srl_Slot_dual_slot2_encode, 0, Opcode_srl_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0, Opcode_sra_Slot_gp_slot2_encode, 0, Opcode_sra_Slot_gp_slot0_encode, 0, 0, Opcode_sra_Slot_dot_slot0_encode, Opcode_sra_Slot_pq_slot2_encode, 0, Opcode_sra_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_sra_Slot_smod_slot0_encode, 0, 0, Opcode_sra_Slot_llr_slot0_encode, Opcode_sra_Slot_dual_slot2_encode, 0, Opcode_sra_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0, 0, 0, Opcode_slli_Slot_gp_slot0_encode, 0, 0, Opcode_slli_Slot_dot_slot0_encode, 0, 0, Opcode_slli_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_slli_Slot_smod_slot0_encode, 0, 0, Opcode_slli_Slot_llr_slot0_encode, Opcode_slli_Slot_dual_slot2_encode, 0, Opcode_slli_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0, 0, 0, Opcode_srai_Slot_gp_slot0_encode, 0, 0, Opcode_srai_Slot_dot_slot0_encode, 0, 0, Opcode_srai_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_srai_Slot_smod_slot0_encode, 0, 0, Opcode_srai_Slot_llr_slot0_encode, Opcode_srai_Slot_dual_slot2_encode, 0, Opcode_srai_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0, 0, 0, Opcode_srli_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_srli_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_srli_Slot_smod_slot0_encode, 0, 0, 0, Opcode_srli_Slot_dual_slot2_encode, 0, Opcode_srli_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_176_encode_fns[] = {
+ Opcode_rsr_176_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_176_encode_fns[] = {
+ Opcode_wsr_176_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_208_encode_fns[] = {
+ Opcode_rsr_208_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_andb_encode_fns[] = {
+ Opcode_andb_Slot_inst_encode, 0, 0, 0, 0, Opcode_andb_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_andb_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_andb_Slot_smod_slot0_encode, 0, 0, 0, Opcode_andb_Slot_dual_slot2_encode, 0, Opcode_andb_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_andbc_encode_fns[] = {
+ Opcode_andbc_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_orb_encode_fns[] = {
+ Opcode_orb_Slot_inst_encode, 0, 0, 0, 0, Opcode_orb_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_orb_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_orb_Slot_smod_slot0_encode, 0, 0, 0, Opcode_orb_Slot_dual_slot2_encode, 0, Opcode_orb_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_orbc_encode_fns[] = {
+ Opcode_orbc_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xorb_encode_fns[] = {
+ Opcode_xorb_Slot_inst_encode, 0, 0, 0, 0, Opcode_xorb_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_xorb_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_xorb_Slot_smod_slot0_encode, 0, 0, 0, Opcode_xorb_Slot_dual_slot2_encode, 0, Opcode_xorb_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_any4_encode_fns[] = {
+ Opcode_any4_Slot_inst_encode, 0, 0, 0, 0, Opcode_any4_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_any4_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_any4_Slot_smod_slot0_encode, 0, 0, 0, Opcode_any4_Slot_dual_slot2_encode, 0, Opcode_any4_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_all4_encode_fns[] = {
+ Opcode_all4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_any8_encode_fns[] = {
+ Opcode_any8_Slot_inst_encode, 0, 0, 0, 0, Opcode_any8_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_any8_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_any8_Slot_smod_slot0_encode, 0, 0, 0, Opcode_any8_Slot_dual_slot2_encode, 0, Opcode_any8_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_all8_encode_fns[] = {
+ Opcode_all8_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bf_encode_fns[] = {
+ Opcode_bf_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bt_encode_fns[] = {
+ Opcode_bt_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movf_encode_fns[] = {
+ Opcode_movf_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movt_encode_fns[] = {
+ Opcode_movt_Slot_inst_encode, 0, 0, 0, 0, Opcode_movt_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_movt_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_movt_Slot_smod_slot0_encode, 0, 0, 0, Opcode_movt_Slot_dual_slot2_encode, 0, Opcode_movt_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_br_encode_fns[] = {
+ Opcode_rsr_br_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_br_encode_fns[] = {
+ Opcode_wsr_br_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_br_encode_fns[] = {
+ Opcode_xsr_br_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipfl_encode_fns[] = {
+ Opcode_ipfl_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihu_encode_fns[] = {
+ Opcode_ihu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iiu_encode_fns[] = {
+ Opcode_iiu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfl_encode_fns[] = {
+ Opcode_dpfl_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhu_encode_fns[] = {
+ Opcode_dhu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diu_encode_fns[] = {
+ Opcode_diu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_cpenable_encode_fns[] = {
+ Opcode_rsr_cpenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_cpenable_encode_fns[] = {
+ Opcode_wsr_cpenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_cpenable_encode_fns[] = {
+ Opcode_xsr_cpenable_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0, 0, 0, Opcode_clamps_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_clamps_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_clamps_Slot_smod_slot0_encode, 0, 0, 0, Opcode_clamps_Slot_dual_slot2_encode, 0, Opcode_clamps_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0, 0, 0, Opcode_min_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_min_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_min_Slot_smod_slot0_encode, 0, 0, 0, Opcode_min_Slot_dual_slot2_encode, 0, Opcode_min_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0, 0, 0, Opcode_max_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_max_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_max_Slot_smod_slot0_encode, 0, 0, 0, Opcode_max_Slot_dual_slot2_encode, 0, Opcode_max_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0, 0, 0, Opcode_minu_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_minu_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_minu_Slot_smod_slot0_encode, 0, 0, 0, Opcode_minu_Slot_dual_slot2_encode, 0, Opcode_minu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0, 0, 0, Opcode_maxu_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_maxu_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_maxu_Slot_smod_slot0_encode, 0, 0, 0, Opcode_maxu_Slot_dual_slot2_encode, 0, Opcode_maxu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0, 0, 0, Opcode_nsa_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_nsa_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_nsa_Slot_smod_slot0_encode, 0, 0, 0, Opcode_nsa_Slot_dual_slot2_encode, 0, Opcode_nsa_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0, 0, 0, Opcode_nsau_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_nsau_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_nsau_Slot_smod_slot0_encode, 0, 0, 0, Opcode_nsau_Slot_dual_slot2_encode, 0, Opcode_nsau_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0, 0, 0, Opcode_sext_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sext_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_sext_Slot_smod_slot0_encode, 0, 0, 0, Opcode_sext_Slot_dual_slot2_encode, 0, Opcode_sext_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_fcr_encode_fns[] = {
+ Opcode_rur_fcr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_fcr_encode_fns[] = {
+ Opcode_wur_fcr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_fsr_encode_fns[] = {
+ Opcode_rur_fsr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_fsr_encode_fns[] = {
+ Opcode_wur_fsr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_s_encode_fns[] = {
+ Opcode_add_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_add_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_s_encode_fns[] = {
+ Opcode_sub_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sub_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_s_encode_fns[] = {
+ Opcode_mul_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mul_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_madd_s_encode_fns[] = {
+ Opcode_madd_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_madd_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_msub_s_encode_fns[] = {
+ Opcode_msub_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_msub_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movf_s_encode_fns[] = {
+ Opcode_movf_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movf_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movt_s_encode_fns[] = {
+ Opcode_movt_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movt_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_s_encode_fns[] = {
+ Opcode_moveqz_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_moveqz_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_s_encode_fns[] = {
+ Opcode_movnez_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movnez_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_s_encode_fns[] = {
+ Opcode_movltz_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movltz_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_s_encode_fns[] = {
+ Opcode_movgez_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movgez_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_s_encode_fns[] = {
+ Opcode_abs_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_abs_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_s_encode_fns[] = {
+ Opcode_mov_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mov_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_s_encode_fns[] = {
+ Opcode_neg_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_neg_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_un_s_encode_fns[] = {
+ Opcode_un_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_un_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_oeq_s_encode_fns[] = {
+ Opcode_oeq_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_oeq_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ueq_s_encode_fns[] = {
+ Opcode_ueq_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ueq_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_olt_s_encode_fns[] = {
+ Opcode_olt_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_olt_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ult_s_encode_fns[] = {
+ Opcode_ult_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ult_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ole_s_encode_fns[] = {
+ Opcode_ole_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ole_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ule_s_encode_fns[] = {
+ Opcode_ule_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ule_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_float_s_encode_fns[] = {
+ Opcode_float_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_float_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ufloat_s_encode_fns[] = {
+ Opcode_ufloat_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ufloat_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_round_s_encode_fns[] = {
+ Opcode_round_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_round_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ceil_s_encode_fns[] = {
+ Opcode_ceil_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ceil_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_floor_s_encode_fns[] = {
+ Opcode_floor_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_floor_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_trunc_s_encode_fns[] = {
+ Opcode_trunc_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_trunc_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_utrunc_s_encode_fns[] = {
+ Opcode_utrunc_s_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_utrunc_s_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfr_encode_fns[] = {
+ Opcode_rfr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rfr_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_wfr_encode_fns[] = {
+ Opcode_wfr_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsi_encode_fns[] = {
+ Opcode_lsi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lsi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lsiu_encode_fns[] = {
+ Opcode_lsiu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lsiu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lsx_encode_fns[] = {
+ Opcode_lsx_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsxu_encode_fns[] = {
+ Opcode_lsxu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lsxu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssi_encode_fns[] = {
+ Opcode_ssi_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ssi_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssiu_encode_fns[] = {
+ Opcode_ssiu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ssiu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssx_encode_fns[] = {
+ Opcode_ssx_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ssx_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssxu_encode_fns[] = {
+ Opcode_ssxu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ssxu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_argmax_encode_fns[] = {
+ Opcode_get_argmax_Slot_inst_encode, 0, 0, Opcode_get_argmax_Slot_gp_slot2_encode, 0, 0, Opcode_get_argmax_Slot_dot_slot2_encode, 0, 0, Opcode_get_argmax_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_get_argmax_Slot_smod_slot2_encode, 0, 0, Opcode_get_argmax_Slot_llr_slot2_encode, 0, 0, Opcode_get_argmax_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_get_hsar_encode_fns[] = {
+ Opcode_get_hsar_Slot_inst_encode, 0, 0, Opcode_get_hsar_Slot_gp_slot2_encode, 0, Opcode_get_hsar_Slot_gp_slot0_encode, Opcode_get_hsar_Slot_dot_slot2_encode, 0, Opcode_get_hsar_Slot_dot_slot0_encode, Opcode_get_hsar_Slot_pq_slot2_encode, 0, Opcode_get_hsar_Slot_pq_slot0_encode, 0, 0, 0, Opcode_get_hsar_Slot_smod_slot2_encode, 0, Opcode_get_hsar_Slot_smod_slot0_encode, Opcode_get_hsar_Slot_llr_slot2_encode, 0, Opcode_get_hsar_Slot_llr_slot0_encode, Opcode_get_hsar_Slot_dual_slot2_encode, 0, Opcode_get_hsar_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_hsar2sar_encode_fns[] = {
+ Opcode_get_hsar2sar_Slot_inst_encode, 0, 0, Opcode_get_hsar2sar_Slot_gp_slot2_encode, 0, Opcode_get_hsar2sar_Slot_gp_slot0_encode, Opcode_get_hsar2sar_Slot_dot_slot2_encode, 0, Opcode_get_hsar2sar_Slot_dot_slot0_encode, Opcode_get_hsar2sar_Slot_pq_slot2_encode, 0, Opcode_get_hsar2sar_Slot_pq_slot0_encode, 0, 0, 0, Opcode_get_hsar2sar_Slot_smod_slot2_encode, 0, Opcode_get_hsar2sar_Slot_smod_slot0_encode, Opcode_get_hsar2sar_Slot_llr_slot2_encode, 0, Opcode_get_hsar2sar_Slot_llr_slot0_encode, Opcode_get_hsar2sar_Slot_dual_slot2_encode, 0, Opcode_get_hsar2sar_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_interp_ext_n_encode_fns[] = {
+ Opcode_get_interp_ext_n_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_interp_ext_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_interp_ext_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_interp_ext_n_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_interp_ext_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_interp_ext_l_encode_fns[] = {
+ Opcode_get_interp_ext_l_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_interp_ext_l_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_interp_ext_l_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_interp_ext_l_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_interp_ext_l_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_llr_buf_encode_fns[] = {
+ Opcode_get_llr_buf_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_llr_buf_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_llr_buf_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_llr_buf_Slot_smod_slot0_encode, 0, 0, Opcode_get_llr_buf_Slot_llr_slot0_encode, 0, 0, Opcode_get_llr_buf_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_llr_pos_encode_fns[] = {
+ Opcode_get_llr_pos_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_llr_pos_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_llr_pos_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_llr_pos_Slot_smod_slot0_encode, 0, 0, Opcode_get_llr_pos_Slot_llr_slot0_encode, 0, 0, Opcode_get_llr_pos_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_max_encode_fns[] = {
+ Opcode_get_max_Slot_inst_encode, 0, 0, Opcode_get_max_Slot_gp_slot2_encode, 0, 0, Opcode_get_max_Slot_dot_slot2_encode, 0, 0, Opcode_get_max_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_get_max_Slot_smod_slot2_encode, 0, 0, Opcode_get_max_Slot_llr_slot2_encode, 0, 0, Opcode_get_max_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_get_nco_encode_fns[] = {
+ Opcode_get_nco_Slot_inst_encode, 0, 0, Opcode_get_nco_Slot_gp_slot2_encode, 0, 0, Opcode_get_nco_Slot_dot_slot2_encode, 0, 0, Opcode_get_nco_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_get_nco_Slot_smod_slot2_encode, 0, 0, Opcode_get_nco_Slot_llr_slot2_encode, 0, 0, Opcode_get_nco_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_get_perm_reg_encode_fns[] = {
+ 0, 0, 0, 0, 0, Opcode_get_perm_reg_Slot_gp_slot0_encode, 0, 0, Opcode_get_perm_reg_Slot_dot_slot0_encode, 0, 0, Opcode_get_perm_reg_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_perm_reg_Slot_smod_slot0_encode, 0, 0, Opcode_get_perm_reg_Slot_llr_slot0_encode, 0, 0, Opcode_get_perm_reg_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_phasor_n_encode_fns[] = {
+ Opcode_get_phasor_n_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_phasor_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_phasor_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_phasor_n_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_phasor_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_phasor_offset_encode_fns[] = {
+ Opcode_get_phasor_offset_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_phasor_offset_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_phasor_offset_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_phasor_offset_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_phasor_offset_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_sar_encode_fns[] = {
+ Opcode_get_sar_Slot_inst_encode, 0, 0, Opcode_get_sar_Slot_gp_slot2_encode, 0, Opcode_get_sar_Slot_gp_slot0_encode, Opcode_get_sar_Slot_dot_slot2_encode, 0, Opcode_get_sar_Slot_dot_slot0_encode, Opcode_get_sar_Slot_pq_slot2_encode, 0, Opcode_get_sar_Slot_pq_slot0_encode, 0, 0, 0, Opcode_get_sar_Slot_smod_slot2_encode, 0, Opcode_get_sar_Slot_smod_slot0_encode, Opcode_get_sar_Slot_llr_slot2_encode, 0, Opcode_get_sar_Slot_llr_slot0_encode, Opcode_get_sar_Slot_dual_slot2_encode, 0, Opcode_get_sar_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_scale_reg_encode_fns[] = {
+ Opcode_get_scale_reg_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_scale_reg_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_scale_reg_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_scale_reg_Slot_smod_slot0_encode, 0, 0, Opcode_get_scale_reg_Slot_llr_slot0_encode, 0, 0, Opcode_get_scale_reg_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_smod_buf_encode_fns[] = {
+ Opcode_get_smod_buf_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_smod_buf_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_smod_buf_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_smod_buf_Slot_smod_slot0_encode, 0, 0, Opcode_get_smod_buf_Slot_llr_slot0_encode, 0, 0, Opcode_get_smod_buf_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_smod_offset_table_encode_fns[] = {
+ Opcode_get_smod_offset_table_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_smod_offset_table_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_smod_offset_table_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_smod_offset_table_Slot_smod_slot0_encode, 0, 0, Opcode_get_smod_offset_table_Slot_llr_slot0_encode, 0, 0, Opcode_get_smod_offset_table_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_smod_pos_encode_fns[] = {
+ Opcode_get_smod_pos_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_smod_pos_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_smod_pos_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_smod_pos_Slot_smod_slot0_encode, 0, 0, Opcode_get_smod_pos_Slot_llr_slot0_encode, 0, 0, Opcode_get_smod_pos_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_sov_encode_fns[] = {
+ Opcode_get_sov_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_sov_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_sov_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_sov_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_sov_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_get_wght_encode_fns[] = {
+ Opcode_get_wght_Slot_inst_encode, 0, 0, 0, 0, Opcode_get_wght_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_wght_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_get_wght_Slot_smod_slot0_encode, 0, 0, Opcode_get_wght_Slot_llr_slot0_encode, 0, 0, Opcode_get_wght_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_argmax_encode_fns[] = {
+ Opcode_set_argmax_Slot_inst_encode, 0, 0, Opcode_set_argmax_Slot_gp_slot2_encode, 0, 0, Opcode_set_argmax_Slot_dot_slot2_encode, 0, 0, Opcode_set_argmax_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_set_argmax_Slot_smod_slot2_encode, 0, 0, Opcode_set_argmax_Slot_llr_slot2_encode, 0, 0, Opcode_set_argmax_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_set_ext_regs_encode_fns[] = {
+ Opcode_set_ext_regs_Slot_inst_encode, 0, 0, Opcode_set_ext_regs_Slot_gp_slot2_encode, 0, 0, Opcode_set_ext_regs_Slot_dot_slot2_encode, 0, 0, Opcode_set_ext_regs_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_set_ext_regs_Slot_smod_slot2_encode, 0, 0, Opcode_set_ext_regs_Slot_llr_slot2_encode, 0, 0, Opcode_set_ext_regs_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_set_hsar_encode_fns[] = {
+ 0, 0, 0, Opcode_set_hsar_Slot_gp_slot2_encode, 0, 0, Opcode_set_hsar_Slot_dot_slot2_encode, 0, 0, Opcode_set_hsar_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_set_hsar_Slot_smod_slot2_encode, 0, 0, Opcode_set_hsar_Slot_llr_slot2_encode, 0, 0, Opcode_set_hsar_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_set_llr_buf_encode_fns[] = {
+ Opcode_set_llr_buf_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_llr_buf_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_llr_buf_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_llr_buf_Slot_smod_slot0_encode, 0, 0, Opcode_set_llr_buf_Slot_llr_slot0_encode, 0, 0, Opcode_set_llr_buf_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_llr_pos_encode_fns[] = {
+ Opcode_set_llr_pos_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_llr_pos_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_llr_pos_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_llr_pos_Slot_smod_slot0_encode, 0, 0, Opcode_set_llr_pos_Slot_llr_slot0_encode, 0, 0, Opcode_set_llr_pos_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_max_encode_fns[] = {
+ Opcode_set_max_Slot_inst_encode, 0, 0, Opcode_set_max_Slot_gp_slot2_encode, 0, 0, Opcode_set_max_Slot_dot_slot2_encode, 0, 0, Opcode_set_max_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_set_max_Slot_smod_slot2_encode, 0, 0, Opcode_set_max_Slot_llr_slot2_encode, 0, 0, Opcode_set_max_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_set_nco_encode_fns[] = {
+ Opcode_set_nco_Slot_inst_encode, 0, 0, Opcode_set_nco_Slot_gp_slot2_encode, 0, 0, Opcode_set_nco_Slot_dot_slot2_encode, 0, 0, Opcode_set_nco_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_set_nco_Slot_smod_slot2_encode, 0, 0, Opcode_set_nco_Slot_llr_slot2_encode, 0, 0, Opcode_set_nco_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_set_perm_reg_encode_fns[] = {
+ 0, 0, 0, 0, 0, Opcode_set_perm_reg_Slot_gp_slot0_encode, 0, 0, Opcode_set_perm_reg_Slot_dot_slot0_encode, 0, 0, Opcode_set_perm_reg_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_perm_reg_Slot_smod_slot0_encode, 0, 0, Opcode_set_perm_reg_Slot_llr_slot0_encode, 0, 0, Opcode_set_perm_reg_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_phasor_n_encode_fns[] = {
+ Opcode_set_phasor_n_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_phasor_n_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_phasor_n_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_phasor_n_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_phasor_n_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_phasor_offset_encode_fns[] = {
+ Opcode_set_phasor_offset_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_phasor_offset_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_phasor_offset_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_phasor_offset_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_phasor_offset_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_sar_encode_fns[] = {
+ Opcode_set_sar_Slot_inst_encode, 0, 0, Opcode_set_sar_Slot_gp_slot2_encode, 0, Opcode_set_sar_Slot_gp_slot0_encode, Opcode_set_sar_Slot_dot_slot2_encode, 0, Opcode_set_sar_Slot_dot_slot0_encode, Opcode_set_sar_Slot_pq_slot2_encode, 0, Opcode_set_sar_Slot_pq_slot0_encode, 0, 0, 0, Opcode_set_sar_Slot_smod_slot2_encode, 0, Opcode_set_sar_Slot_smod_slot0_encode, Opcode_set_sar_Slot_llr_slot2_encode, 0, Opcode_set_sar_Slot_llr_slot0_encode, Opcode_set_sar_Slot_dual_slot2_encode, 0, Opcode_set_sar_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_scale_reg_encode_fns[] = {
+ Opcode_set_scale_reg_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_scale_reg_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_scale_reg_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_scale_reg_Slot_smod_slot0_encode, 0, 0, Opcode_set_scale_reg_Slot_llr_slot0_encode, 0, 0, Opcode_set_scale_reg_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_smod_buf_encode_fns[] = {
+ Opcode_set_smod_buf_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_smod_buf_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_smod_buf_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_smod_buf_Slot_smod_slot0_encode, 0, 0, Opcode_set_smod_buf_Slot_llr_slot0_encode, 0, 0, Opcode_set_smod_buf_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_smod_offset_table_encode_fns[] = {
+ Opcode_set_smod_offset_table_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_smod_offset_table_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_smod_offset_table_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_smod_offset_table_Slot_smod_slot0_encode, 0, 0, Opcode_set_smod_offset_table_Slot_llr_slot0_encode, 0, 0, Opcode_set_smod_offset_table_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_smod_pos_encode_fns[] = {
+ Opcode_set_smod_pos_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_smod_pos_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_smod_pos_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_smod_pos_Slot_smod_slot0_encode, 0, 0, Opcode_set_smod_pos_Slot_llr_slot0_encode, 0, 0, Opcode_set_smod_pos_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_sov_encode_fns[] = {
+ Opcode_set_sov_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_sov_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_sov_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_sov_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_sov_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_set_wght_encode_fns[] = {
+ Opcode_set_wght_Slot_inst_encode, 0, 0, 0, 0, Opcode_set_wght_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_wght_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_set_wght_Slot_smod_slot0_encode, 0, 0, Opcode_set_wght_Slot_llr_slot0_encode, 0, 0, Opcode_set_wght_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac2x32_encode_fns[] = {
+ Opcode_lac2x32_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac2x32_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac2x32_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac2x32_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac2x64_0_encode_fns[] = {
+ Opcode_lac2x64_0_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac2x64_0_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac2x64_0_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac2x64_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac2x64_1_encode_fns[] = {
+ Opcode_lac2x64_1_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac2x64_1_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac2x64_1_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac2x64_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac2x64_2_encode_fns[] = {
+ Opcode_lac2x64_2_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac2x64_2_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac2x64_2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac2x64_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac2x64_3_encode_fns[] = {
+ Opcode_lac2x64_3_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac2x64_3_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac2x64_3_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac2x64_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac32_r_encode_fns[] = {
+ Opcode_lac32_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac32_r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac32_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac32_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac_ih_encode_fns[] = {
+ Opcode_lac_ih_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac_ih_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac_ih_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac_ih_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac_il_encode_fns[] = {
+ Opcode_lac_il_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac_il_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac_il_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac_il_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac_rh_encode_fns[] = {
+ Opcode_lac_rh_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac_rh_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac_rh_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac_rh_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lac_rl_encode_fns[] = {
+ Opcode_lac_rl_Slot_inst_encode, 0, 0, 0, 0, Opcode_lac_rl_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lac_rl_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lac_rl_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lcm_encode_fns[] = {
+ Opcode_lcm_Slot_inst_encode, 0, 0, 0, 0, Opcode_lcm_Slot_gp_slot0_encode, 0, 0, Opcode_lcm_Slot_dot_slot0_encode, 0, 0, Opcode_lcm_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_Slot_smod_slot0_encode, 0, 0, Opcode_lcm_Slot_llr_slot0_encode, 0, 0, Opcode_lcm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lcm_pinc_encode_fns[] = {
+ Opcode_lcm_pinc_Slot_inst_encode, 0, 0, 0, 0, Opcode_lcm_pinc_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_pinc_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_pinc_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_pinc_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lcm_pinc_x_encode_fns[] = {
+ Opcode_lcm_pinc_x_Slot_inst_encode, 0, 0, 0, 0, Opcode_lcm_pinc_x_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_pinc_x_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_pinc_x_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_pinc_x_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lcm_u_encode_fns[] = {
+ Opcode_lcm_u_Slot_inst_encode, 0, 0, 0, 0, Opcode_lcm_u_Slot_gp_slot0_encode, 0, 0, Opcode_lcm_u_Slot_dot_slot0_encode, 0, 0, Opcode_lcm_u_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_lcm_u_Slot_smod_slot0_encode, 0, 0, Opcode_lcm_u_Slot_llr_slot0_encode, 0, 0, Opcode_lcm_u_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lcm_x_encode_fns[] = {
+ Opcode_lcm_x_Slot_inst_encode, 0, 0, 0, 0, Opcode_lcm_x_Slot_gp_slot0_encode, 0, 0, Opcode_lcm_x_Slot_dot_slot0_encode, 0, 0, Opcode_lcm_x_Slot_pq_slot0_encode, 0, 0, Opcode_lcm_x_Slot_acc2_slot0_encode, 0, 0, Opcode_lcm_x_Slot_smod_slot0_encode, 0, 0, Opcode_lcm_x_Slot_llr_slot0_encode, 0, 0, Opcode_lcm_x_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lcm_xu_encode_fns[] = {
+ Opcode_lcm_xu_Slot_inst_encode, 0, 0, 0, 0, Opcode_lcm_xu_Slot_gp_slot0_encode, 0, 0, Opcode_lcm_xu_Slot_dot_slot0_encode, 0, 0, Opcode_lcm_xu_Slot_pq_slot0_encode, 0, 0, Opcode_lcm_xu_Slot_acc2_slot0_encode, 0, 0, Opcode_lcm_xu_Slot_smod_slot0_encode, 0, 0, Opcode_lcm_xu_Slot_llr_slot0_encode, 0, 0, Opcode_lcm_xu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lp_encode_fns[] = {
+ Opcode_lp_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lp_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lp_x_encode_fns[] = {
+ Opcode_lp_x_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lp_x_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lq_encode_fns[] = {
+ Opcode_lq_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lq_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lq_x_encode_fns[] = {
+ Opcode_lq_x_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lq_x_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lut0_encode_fns[] = {
+ Opcode_lut0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lut0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lut1_encode_fns[] = {
+ Opcode_lut1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lut1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lut2_encode_fns[] = {
+ Opcode_lut2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lut2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lut3_encode_fns[] = {
+ Opcode_lut3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_lut3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac2x32_encode_fns[] = {
+ Opcode_sac2x32_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac2x32_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac2x32_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac2x32_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac2x64_0_encode_fns[] = {
+ Opcode_sac2x64_0_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac2x64_0_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac2x64_0_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac2x64_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac2x64_1_encode_fns[] = {
+ Opcode_sac2x64_1_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac2x64_1_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac2x64_1_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac2x64_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac2x64_2_encode_fns[] = {
+ Opcode_sac2x64_2_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac2x64_2_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac2x64_2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac2x64_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac2x64_3_encode_fns[] = {
+ Opcode_sac2x64_3_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac2x64_3_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac2x64_3_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac2x64_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac32_r_encode_fns[] = {
+ Opcode_sac32_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac32_r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac32_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac32_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac_ih_encode_fns[] = {
+ Opcode_sac_ih_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac_ih_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac_ih_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac_ih_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac_il_encode_fns[] = {
+ Opcode_sac_il_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac_il_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac_il_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac_il_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac_rh_encode_fns[] = {
+ Opcode_sac_rh_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac_rh_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac_rh_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac_rh_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sac_rl_encode_fns[] = {
+ Opcode_sac_rl_Slot_inst_encode, 0, 0, 0, 0, Opcode_sac_rl_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_sac_rl_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sac_rl_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_scm_encode_fns[] = {
+ Opcode_scm_Slot_inst_encode, 0, 0, 0, 0, Opcode_scm_Slot_gp_slot0_encode, 0, 0, Opcode_scm_Slot_dot_slot0_encode, 0, 0, Opcode_scm_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_Slot_smod_slot0_encode, 0, 0, Opcode_scm_Slot_llr_slot0_encode, 0, 0, Opcode_scm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_scm_pinc_encode_fns[] = {
+ Opcode_scm_pinc_Slot_inst_encode, 0, 0, 0, 0, Opcode_scm_pinc_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_pinc_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_pinc_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_pinc_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_scm_pinc_x_encode_fns[] = {
+ Opcode_scm_pinc_x_Slot_inst_encode, 0, 0, 0, 0, Opcode_scm_pinc_x_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_pinc_x_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_pinc_x_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_pinc_x_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_scm_u_encode_fns[] = {
+ Opcode_scm_u_Slot_inst_encode, 0, 0, 0, 0, Opcode_scm_u_Slot_gp_slot0_encode, 0, 0, Opcode_scm_u_Slot_dot_slot0_encode, 0, 0, Opcode_scm_u_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_scm_u_Slot_smod_slot0_encode, 0, 0, Opcode_scm_u_Slot_llr_slot0_encode, 0, 0, Opcode_scm_u_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_scm_x_encode_fns[] = {
+ Opcode_scm_x_Slot_inst_encode, 0, 0, 0, 0, Opcode_scm_x_Slot_gp_slot0_encode, 0, 0, Opcode_scm_x_Slot_dot_slot0_encode, 0, 0, Opcode_scm_x_Slot_pq_slot0_encode, 0, 0, Opcode_scm_x_Slot_acc2_slot0_encode, 0, 0, Opcode_scm_x_Slot_smod_slot0_encode, 0, 0, Opcode_scm_x_Slot_llr_slot0_encode, 0, 0, Opcode_scm_x_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_scm_xu_encode_fns[] = {
+ Opcode_scm_xu_Slot_inst_encode, 0, 0, 0, 0, Opcode_scm_xu_Slot_gp_slot0_encode, 0, 0, Opcode_scm_xu_Slot_dot_slot0_encode, 0, 0, Opcode_scm_xu_Slot_pq_slot0_encode, 0, 0, Opcode_scm_xu_Slot_acc2_slot0_encode, 0, 0, Opcode_scm_xu_Slot_smod_slot0_encode, 0, 0, Opcode_scm_xu_Slot_llr_slot0_encode, 0, 0, Opcode_scm_xu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_store_p_encode_fns[] = {
+ Opcode_store_p_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_store_p_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_store_p_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_store_q_encode_fns[] = {
+ Opcode_store_q_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_store_q_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_store_q_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ar2cm_dup_encode_fns[] = {
+ Opcode_ar2cm_dup_Slot_inst_encode, 0, 0, Opcode_ar2cm_dup_Slot_gp_slot2_encode, 0, Opcode_ar2cm_dup_Slot_gp_slot0_encode, Opcode_ar2cm_dup_Slot_dot_slot2_encode, 0, Opcode_ar2cm_dup_Slot_dot_slot0_encode, Opcode_ar2cm_dup_Slot_pq_slot2_encode, 0, Opcode_ar2cm_dup_Slot_pq_slot0_encode, 0, 0, 0, Opcode_ar2cm_dup_Slot_smod_slot2_encode, 0, Opcode_ar2cm_dup_Slot_smod_slot0_encode, Opcode_ar2cm_dup_Slot_llr_slot2_encode, 0, Opcode_ar2cm_dup_Slot_llr_slot0_encode, Opcode_ar2cm_dup_Slot_dual_slot2_encode, 0, Opcode_ar2cm_dup_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ar2cm_ln_encode_fns[] = {
+ Opcode_ar2cm_ln_Slot_inst_encode, 0, 0, Opcode_ar2cm_ln_Slot_gp_slot2_encode, 0, Opcode_ar2cm_ln_Slot_gp_slot0_encode, Opcode_ar2cm_ln_Slot_dot_slot2_encode, 0, Opcode_ar2cm_ln_Slot_dot_slot0_encode, Opcode_ar2cm_ln_Slot_pq_slot2_encode, 0, Opcode_ar2cm_ln_Slot_pq_slot0_encode, 0, 0, 0, Opcode_ar2cm_ln_Slot_smod_slot2_encode, 0, Opcode_ar2cm_ln_Slot_smod_slot0_encode, Opcode_ar2cm_ln_Slot_llr_slot2_encode, 0, Opcode_ar2cm_ln_Slot_llr_slot0_encode, Opcode_ar2cm_ln_Slot_dual_slot2_encode, 0, Opcode_ar2cm_ln_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ar2cm_ln_i_encode_fns[] = {
+ Opcode_ar2cm_ln_i_Slot_inst_encode, 0, 0, 0, 0, Opcode_ar2cm_ln_i_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ar2cm_ln_i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ar2cm_ln_i_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_ar2cm_ln_i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ar2cm_ln_r_encode_fns[] = {
+ Opcode_ar2cm_ln_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_ar2cm_ln_r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_ar2cm_ln_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_ar2cm_ln_r_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_ar2cm_ln_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ar2pq_ln_encode_fns[] = {
+ 0, 0, 0, Opcode_ar2pq_ln_Slot_gp_slot2_encode, 0, 0, Opcode_ar2pq_ln_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ar2pq_ln_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ar2sar_dup_encode_fns[] = {
+ 0, 0, 0, Opcode_ar2sar_dup_Slot_gp_slot2_encode, 0, 0, Opcode_ar2sar_dup_Slot_dot_slot2_encode, 0, 0, Opcode_ar2sar_dup_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_ar2sar_dup_Slot_smod_slot2_encode, 0, 0, Opcode_ar2sar_dup_Slot_llr_slot2_encode, 0, 0, Opcode_ar2sar_dup_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrac_encode_fns[] = {
+ Opcode_clrac_Slot_inst_encode, 0, 0, 0, 0, Opcode_clrac_Slot_gp_slot0_encode, 0, 0, Opcode_clrac_Slot_dot_slot0_encode, 0, 0, Opcode_clrac_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_clrac_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_clrcm_encode_fns[] = {
+ Opcode_clrcm_Slot_inst_encode, 0, 0, Opcode_clrcm_Slot_gp_slot2_encode, 0, Opcode_clrcm_Slot_gp_slot0_encode, Opcode_clrcm_Slot_dot_slot2_encode, 0, Opcode_clrcm_Slot_dot_slot0_encode, Opcode_clrcm_Slot_pq_slot2_encode, 0, Opcode_clrcm_Slot_pq_slot0_encode, Opcode_clrcm_Slot_acc2_slot2_encode, 0, Opcode_clrcm_Slot_acc2_slot0_encode, Opcode_clrcm_Slot_smod_slot2_encode, 0, Opcode_clrcm_Slot_smod_slot0_encode, Opcode_clrcm_Slot_llr_slot2_encode, 0, Opcode_clrcm_Slot_llr_slot0_encode, Opcode_clrcm_Slot_dual_slot2_encode, 0, Opcode_clrcm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_cm2ar_ln_encode_fns[] = {
+ Opcode_cm2ar_ln_Slot_inst_encode, 0, 0, Opcode_cm2ar_ln_Slot_gp_slot2_encode, 0, Opcode_cm2ar_ln_Slot_gp_slot0_encode, Opcode_cm2ar_ln_Slot_dot_slot2_encode, 0, Opcode_cm2ar_ln_Slot_dot_slot0_encode, Opcode_cm2ar_ln_Slot_pq_slot2_encode, 0, Opcode_cm2ar_ln_Slot_pq_slot0_encode, 0, 0, 0, Opcode_cm2ar_ln_Slot_smod_slot2_encode, 0, Opcode_cm2ar_ln_Slot_smod_slot0_encode, Opcode_cm2ar_ln_Slot_llr_slot2_encode, 0, Opcode_cm2ar_ln_Slot_llr_slot0_encode, Opcode_cm2ar_ln_Slot_dual_slot2_encode, 0, Opcode_cm2ar_ln_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_cm2ar_ln_i_encode_fns[] = {
+ Opcode_cm2ar_ln_i_Slot_inst_encode, 0, 0, 0, 0, Opcode_cm2ar_ln_i_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_cm2ar_ln_i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_cm2ar_ln_i_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_cm2ar_ln_i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_cm2ar_ln_r_encode_fns[] = {
+ Opcode_cm2ar_ln_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_cm2ar_ln_r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_cm2ar_ln_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_cm2ar_ln_r_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_cm2ar_ln_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_comb_ar_encode_fns[] = {
+ Opcode_comb_ar_Slot_inst_encode, 0, 0, 0, 0, Opcode_comb_ar_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_comb_ar_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_comb_ar_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_comb_ar_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_conj_encode_fns[] = {
+ Opcode_conj_Slot_inst_encode, 0, 0, Opcode_conj_Slot_gp_slot2_encode, 0, Opcode_conj_Slot_gp_slot0_encode, Opcode_conj_Slot_dot_slot2_encode, 0, Opcode_conj_Slot_dot_slot0_encode, Opcode_conj_Slot_pq_slot2_encode, 0, Opcode_conj_Slot_pq_slot0_encode, Opcode_conj_Slot_acc2_slot2_encode, 0, Opcode_conj_Slot_acc2_slot0_encode, Opcode_conj_Slot_smod_slot2_encode, 0, Opcode_conj_Slot_smod_slot0_encode, Opcode_conj_Slot_llr_slot2_encode, 0, Opcode_conj_Slot_llr_slot0_encode, Opcode_conj_Slot_dual_slot2_encode, 0, Opcode_conj_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_mov2ac32_i_encode_fns[] = {
+ Opcode_mov2ac32_i_Slot_inst_encode, 0, 0, 0, 0, Opcode_mov2ac32_i_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_mov2ac32_i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mov2ac32_i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_mov2ac32_r_encode_fns[] = {
+ Opcode_mov2ac32_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_mov2ac32_r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_mov2ac32_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mov2ac32_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_mov2cm2pq_encode_fns[] = {
+ 0, 0, 0, Opcode_mov2cm2pq_Slot_gp_slot2_encode, 0, 0, Opcode_mov2cm2pq_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mov2cm2pq_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movac_encode_fns[] = {
+ Opcode_movac_Slot_inst_encode, 0, 0, 0, 0, Opcode_movac_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_movac_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movac_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movac_i_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movac_i_Slot_llr_slot1_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movac_i2r_encode_fns[] = {
+ Opcode_movac_i2r_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movac_r_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movac_r_Slot_llr_slot1_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movac_r2i_encode_fns[] = {
+ Opcode_movac_r2i_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movar2_encode_fns[] = {
+ 0, 0, 0, Opcode_movar2_Slot_gp_slot2_encode, 0, 0, Opcode_movar2_Slot_dot_slot2_encode, 0, 0, Opcode_movar2_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movar2_Slot_smod_slot2_encode, 0, 0, Opcode_movar2_Slot_llr_slot2_encode, 0, 0, Opcode_movar2_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcm_encode_fns[] = {
+ Opcode_movcm_Slot_inst_encode, 0, 0, Opcode_movcm_Slot_gp_slot2_encode, 0, Opcode_movcm_Slot_gp_slot0_encode, Opcode_movcm_Slot_dot_slot2_encode, 0, Opcode_movcm_Slot_dot_slot0_encode, Opcode_movcm_Slot_pq_slot2_encode, 0, Opcode_movcm_Slot_pq_slot0_encode, Opcode_movcm_Slot_acc2_slot2_encode, 0, Opcode_movcm_Slot_acc2_slot0_encode, Opcode_movcm_Slot_smod_slot2_encode, 0, Opcode_movcm_Slot_smod_slot0_encode, Opcode_movcm_Slot_llr_slot2_encode, 0, Opcode_movcm_Slot_llr_slot0_encode, Opcode_movcm_Slot_dual_slot2_encode, 0, Opcode_movcm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movcm2pq_encode_fns[] = {
+ Opcode_movcm2pq_Slot_inst_encode, 0, 0, Opcode_movcm2pq_Slot_gp_slot2_encode, 0, 0, Opcode_movcm2pq_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_movcm2pq_Slot_acc2_slot0_encode, Opcode_movcm2pq_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_movcm2pq_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_0_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_0_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_0_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_0_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_0_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_0_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_0_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_1_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_1_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_1_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_1_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_1_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_1_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_1_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_2_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_2_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_2_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_2_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_2_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_2_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_2_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_3_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_3_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_3_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_3_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_3_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_3_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_3_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_4_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_4_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_4_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_4_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_4_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_4_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_4_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_5_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_5_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_5_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_5_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_5_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_5_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_5_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_6_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_6_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_6_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_6_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_6_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_6_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_6_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd_7_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd_7_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd_7_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd_7_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd_7_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd_7_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd_7_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_0_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_0_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_0_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_0_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_0_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_0_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_0_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_1_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_1_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_1_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_1_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_1_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_1_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_1_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_2_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_2_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_2_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_2_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_2_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_2_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_2_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_3_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_3_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_3_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_3_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_3_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_3_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_3_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_4_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_4_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_4_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_4_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_4_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_4_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_4_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_5_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_5_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_5_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_5_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_5_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_5_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_5_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_6_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_6_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_6_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_6_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_6_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_6_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_6_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movcnd8_7_encode_fns[] = {
+ 0, 0, 0, Opcode_movcnd8_7_Slot_gp_slot2_encode, 0, 0, Opcode_movcnd8_7_Slot_dot_slot2_encode, 0, 0, Opcode_movcnd8_7_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_movcnd8_7_Slot_smod_slot2_encode, 0, 0, Opcode_movcnd8_7_Slot_llr_slot2_encode, 0, 0, Opcode_movcnd8_7_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_i_encode_fns[] = {
+ Opcode_mov_i_Slot_inst_encode, 0, 0, 0, 0, Opcode_mov_i_Slot_gp_slot0_encode, 0, 0, Opcode_mov_i_Slot_dot_slot0_encode, 0, 0, Opcode_mov_i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_mov_i_Slot_smod_slot0_encode, 0, 0, Opcode_mov_i_Slot_llr_slot0_encode, 0, 0, Opcode_mov_i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movpq2pq_encode_fns[] = {
+ 0, 0, 0, Opcode_movpq2pq_Slot_gp_slot2_encode, 0, 0, Opcode_movpq2pq_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_movpq2pq_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_r_encode_fns[] = {
+ Opcode_mov_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_mov_r_Slot_gp_slot0_encode, 0, 0, Opcode_mov_r_Slot_dot_slot0_encode, 0, 0, Opcode_mov_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_mov_r_Slot_smod_slot0_encode, 0, 0, Opcode_mov_r_Slot_llr_slot0_encode, 0, 0, Opcode_mov_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_negcm_encode_fns[] = {
+ Opcode_negcm_Slot_inst_encode, 0, 0, Opcode_negcm_Slot_gp_slot2_encode, 0, Opcode_negcm_Slot_gp_slot0_encode, Opcode_negcm_Slot_dot_slot2_encode, 0, Opcode_negcm_Slot_dot_slot0_encode, Opcode_negcm_Slot_pq_slot2_encode, 0, Opcode_negcm_Slot_pq_slot0_encode, Opcode_negcm_Slot_acc2_slot2_encode, 0, Opcode_negcm_Slot_acc2_slot0_encode, Opcode_negcm_Slot_smod_slot2_encode, 0, Opcode_negcm_Slot_smod_slot0_encode, Opcode_negcm_Slot_llr_slot2_encode, 0, Opcode_negcm_Slot_llr_slot0_encode, Opcode_negcm_Slot_dual_slot2_encode, 0, Opcode_negcm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop16llr_1_encode_fns[] = {
+ Opcode_pop16llr_1_Slot_inst_encode, 0, 0, 0, 0, Opcode_pop16llr_1_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop16llr_1_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_pop16llr_1_Slot_llr_slot0_encode, 0, 0, Opcode_pop16llr_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pq2cm_encode_fns[] = {
+ Opcode_pq2cm_Slot_inst_encode, 0, 0, Opcode_pq2cm_Slot_gp_slot2_encode, 0, 0, Opcode_pq2cm_Slot_dot_slot2_encode, 0, 0, Opcode_pq2cm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_pq2cm_Slot_smod_slot2_encode, 0, 0, Opcode_pq2cm_Slot_llr_slot2_encode, 0, 0, Opcode_pq2cm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_swapac_r_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_swapac_r_Slot_acc2_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_swapac_ri_encode_fns[] = {
+ Opcode_swapac_ri_Slot_inst_encode, 0, 0, 0, 0, Opcode_swapac_ri_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_swapac_ri_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_swapac_ri_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_swapb_encode_fns[] = {
+ Opcode_swapb_Slot_inst_encode, 0, 0, Opcode_swapb_Slot_gp_slot2_encode, 0, 0, Opcode_swapb_Slot_dot_slot2_encode, 0, 0, Opcode_swapb_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_swapb_Slot_smod_slot2_encode, 0, 0, Opcode_swapb_Slot_llr_slot2_encode, 0, 0, Opcode_swapb_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add2ac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_add2ac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addac_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_addac_Slot_llr_slot1_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cdot_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_cdot_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cdotac_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_cdotac_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cdotacs_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_cdotacs_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_cmac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_cmac_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmacs_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_cmacs_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmpy_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_cmpy_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_cmpy_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmpy2cm_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_cmpy2cm_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_cmpy2cm_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmpy2pq_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_cmpy2pq_Slot_pq_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmpys_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_cmpys_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmpyxp2pq_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_cmpyxp2pq_Slot_pq_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_comb32_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_comb32_Slot_llr_slot1_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dot_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_dot_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dotac_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_dotac_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dotacs_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_dotacs_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lin_int_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_lin_int_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_llrpre1_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_llrpre1_Slot_acc2_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_llrpre2_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_llrpre2_Slot_llr_slot1_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mac_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mac8_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mac8_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macd8_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macd8_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macpqxp_0_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macpqxp_0_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macpqxp_1_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macpqxp_1_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macpqxp_2_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macpqxp_2_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macpqxp_3_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macpqxp_3_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macs_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_macs_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macxp2_0_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macxp2_0_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macxp2_1_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_macxp2_1_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macxp_0_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_macxp_0_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macxp_1_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_macxp_1_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macxp_2_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_macxp_2_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_macxp_3_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_macxp_3_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov2ac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mov2ac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpy_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpy_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mpy_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpy2cm_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpy2cm_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mpy2cm_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpy2pq_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mpy2pq_Slot_pq_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpy8_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpy8_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyadd8_2cm_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpyadd8_2cm_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mpyadd8_2cm_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyd8_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpyd8_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpypqxp_0_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpypqxp_0_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpypqxp_1_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpypqxp_1_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpypqxp_2_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpypqxp_2_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpypqxp_3_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpypqxp_3_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpys_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpys_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp2pq_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_mpyxp2pq_Slot_pq_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp2_0_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpyxp2_0_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp2_1_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, Opcode_mpyxp2_1_Slot_dot_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp_0_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpyxp_0_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp_1_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpyxp_1_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp_2_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpyxp_2_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mpyxp_3_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_mpyxp_3_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_normacd_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_normacd_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_normacd_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_normacpq_i_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_normacpq_i_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_normacpq_r_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_normacpq_r_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_normd_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_normd_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_normd_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_normpypq_i_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_normpypq_i_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_normpypq_r_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_normpypq_r_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rcmac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_rcmac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rcmpy_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_rcmpy_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rcmpy2cm_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_rcmpy2cm_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rcmpy2cm_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfir_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rfir_Slot_acc2_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfira_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rfira_Slot_acc2_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfird_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rfird_Slot_acc2_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfirda_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rfirda_Slot_acc2_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rmac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_rmac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rmpy_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_rmpy_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rmpy2cm_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_rmpy2cm_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_rmpy2cm_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_smod_align_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_smod_align_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_smod_scr_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_smod_scr_Slot_smod_slot1_encode, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub2ac_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_sub2ac_Slot_gp_slot1_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wght32_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_wght32_Slot_llr_slot1_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrtiep_encode_fns[] = {
+ Opcode_clrtiep_Slot_inst_encode, 0, 0, Opcode_clrtiep_Slot_gp_slot2_encode, 0, 0, Opcode_clrtiep_Slot_dot_slot2_encode, 0, 0, Opcode_clrtiep_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_clrtiep_Slot_smod_slot2_encode, 0, 0, Opcode_clrtiep_Slot_llr_slot2_encode, 0, 0, Opcode_clrtiep_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_2fifo_0_encode_fns[] = {
+ Opcode_ext_2fifo_0_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_2fifo_0_Slot_gp_slot0_encode, 0, 0, Opcode_ext_2fifo_0_Slot_dot_slot0_encode, 0, 0, Opcode_ext_2fifo_0_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_2fifo_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_2fifo_1_encode_fns[] = {
+ Opcode_ext_2fifo_1_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_2fifo_1_Slot_gp_slot0_encode, 0, 0, Opcode_ext_2fifo_1_Slot_dot_slot0_encode, 0, 0, Opcode_ext_2fifo_1_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_2fifo_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_2fifo_2_encode_fns[] = {
+ Opcode_ext_2fifo_2_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_2fifo_2_Slot_gp_slot0_encode, 0, 0, Opcode_ext_2fifo_2_Slot_dot_slot0_encode, 0, 0, Opcode_ext_2fifo_2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_2fifo_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_2fifo_3_encode_fns[] = {
+ Opcode_ext_2fifo_3_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_2fifo_3_Slot_gp_slot0_encode, 0, 0, Opcode_ext_2fifo_3_Slot_dot_slot0_encode, 0, 0, Opcode_ext_2fifo_3_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_2fifo_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_r2fifo_0_encode_fns[] = {
+ Opcode_ext_r2fifo_0_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_r2fifo_0_Slot_gp_slot0_encode, 0, 0, Opcode_ext_r2fifo_0_Slot_dot_slot0_encode, 0, 0, Opcode_ext_r2fifo_0_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_r2fifo_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_r2fifo_1_encode_fns[] = {
+ Opcode_ext_r2fifo_1_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_r2fifo_1_Slot_gp_slot0_encode, 0, 0, Opcode_ext_r2fifo_1_Slot_dot_slot0_encode, 0, 0, Opcode_ext_r2fifo_1_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_r2fifo_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_r2fifo_2_encode_fns[] = {
+ Opcode_ext_r2fifo_2_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_r2fifo_2_Slot_gp_slot0_encode, 0, 0, Opcode_ext_r2fifo_2_Slot_dot_slot0_encode, 0, 0, Opcode_ext_r2fifo_2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_r2fifo_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_r2fifo_3_encode_fns[] = {
+ Opcode_ext_r2fifo_3_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_r2fifo_3_Slot_gp_slot0_encode, 0, 0, Opcode_ext_r2fifo_3_Slot_dot_slot0_encode, 0, 0, Opcode_ext_r2fifo_3_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_r2fifo_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_lut_encode_fns[] = {
+ 0, 0, 0, Opcode_lut_Slot_gp_slot2_encode, 0, 0, Opcode_lut_Slot_dot_slot2_encode, 0, 0, Opcode_lut_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_lut_Slot_smod_slot2_encode, 0, 0, Opcode_lut_Slot_llr_slot2_encode, 0, 0, Opcode_lut_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lut_ar_encode_fns[] = {
+ 0, 0, 0, Opcode_lut_ar_Slot_gp_slot2_encode, 0, 0, Opcode_lut_ar_Slot_dot_slot2_encode, 0, 0, Opcode_lut_ar_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_lut_ar_Slot_smod_slot2_encode, 0, 0, Opcode_lut_ar_Slot_llr_slot2_encode, 0, 0, Opcode_lut_ar_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lut_iext_encode_fns[] = {
+ 0, 0, 0, Opcode_lut_iext_Slot_gp_slot2_encode, 0, 0, Opcode_lut_iext_Slot_dot_slot2_encode, 0, 0, Opcode_lut_iext_Slot_pq_slot2_encode, 0, 0, Opcode_lut_iext_Slot_acc2_slot2_encode, 0, 0, Opcode_lut_iext_Slot_smod_slot2_encode, 0, 0, Opcode_lut_iext_Slot_llr_slot2_encode, 0, 0, Opcode_lut_iext_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lut_phasor_encode_fns[] = {
+ 0, 0, 0, Opcode_lut_phasor_Slot_gp_slot2_encode, 0, 0, Opcode_lut_phasor_Slot_dot_slot2_encode, 0, 0, Opcode_lut_phasor_Slot_pq_slot2_encode, 0, 0, Opcode_lut_phasor_Slot_acc2_slot2_encode, 0, 0, Opcode_lut_phasor_Slot_smod_slot2_encode, 0, 0, Opcode_lut_phasor_Slot_llr_slot2_encode, 0, 0, Opcode_lut_phasor_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lut_rext_encode_fns[] = {
+ 0, 0, 0, Opcode_lut_rext_Slot_gp_slot2_encode, 0, 0, Opcode_lut_rext_Slot_dot_slot2_encode, 0, 0, Opcode_lut_rext_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_lut_rext_Slot_smod_slot2_encode, 0, 0, Opcode_lut_rext_Slot_llr_slot2_encode, 0, 0, Opcode_lut_rext_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lut_write_encode_fns[] = {
+ 0, 0, 0, Opcode_lut_write_Slot_gp_slot2_encode, 0, 0, Opcode_lut_write_Slot_dot_slot2_encode, 0, 0, Opcode_lut_write_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_lut_write_Slot_smod_slot2_encode, 0, 0, Opcode_lut_write_Slot_llr_slot2_encode, 0, 0, Opcode_lut_write_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq128_0_encode_fns[] = {
+ Opcode_moveq128_0_Slot_inst_encode, 0, 0, Opcode_moveq128_0_Slot_gp_slot2_encode, 0, 0, Opcode_moveq128_0_Slot_dot_slot2_encode, 0, 0, Opcode_moveq128_0_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq128_0_Slot_smod_slot2_encode, 0, 0, Opcode_moveq128_0_Slot_llr_slot2_encode, 0, 0, Opcode_moveq128_0_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq128_1_encode_fns[] = {
+ Opcode_moveq128_1_Slot_inst_encode, 0, 0, Opcode_moveq128_1_Slot_gp_slot2_encode, 0, 0, Opcode_moveq128_1_Slot_dot_slot2_encode, 0, 0, Opcode_moveq128_1_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq128_1_Slot_smod_slot2_encode, 0, 0, Opcode_moveq128_1_Slot_llr_slot2_encode, 0, 0, Opcode_moveq128_1_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq128_2_encode_fns[] = {
+ Opcode_moveq128_2_Slot_inst_encode, 0, 0, Opcode_moveq128_2_Slot_gp_slot2_encode, 0, 0, Opcode_moveq128_2_Slot_dot_slot2_encode, 0, 0, Opcode_moveq128_2_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq128_2_Slot_smod_slot2_encode, 0, 0, Opcode_moveq128_2_Slot_llr_slot2_encode, 0, 0, Opcode_moveq128_2_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq128_3_encode_fns[] = {
+ Opcode_moveq128_3_Slot_inst_encode, 0, 0, Opcode_moveq128_3_Slot_gp_slot2_encode, 0, 0, Opcode_moveq128_3_Slot_dot_slot2_encode, 0, 0, Opcode_moveq128_3_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq128_3_Slot_smod_slot2_encode, 0, 0, Opcode_moveq128_3_Slot_llr_slot2_encode, 0, 0, Opcode_moveq128_3_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq128_4_encode_fns[] = {
+ Opcode_moveq128_4_Slot_inst_encode, 0, 0, Opcode_moveq128_4_Slot_gp_slot2_encode, 0, 0, Opcode_moveq128_4_Slot_dot_slot2_encode, 0, 0, Opcode_moveq128_4_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq128_4_Slot_smod_slot2_encode, 0, 0, Opcode_moveq128_4_Slot_llr_slot2_encode, 0, 0, Opcode_moveq128_4_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq128_5_encode_fns[] = {
+ Opcode_moveq128_5_Slot_inst_encode, 0, 0, Opcode_moveq128_5_Slot_gp_slot2_encode, 0, 0, Opcode_moveq128_5_Slot_dot_slot2_encode, 0, 0, Opcode_moveq128_5_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq128_5_Slot_smod_slot2_encode, 0, 0, Opcode_moveq128_5_Slot_llr_slot2_encode, 0, 0, Opcode_moveq128_5_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq32_0_encode_fns[] = {
+ Opcode_moveq32_0_Slot_inst_encode, 0, 0, Opcode_moveq32_0_Slot_gp_slot2_encode, 0, 0, Opcode_moveq32_0_Slot_dot_slot2_encode, 0, 0, Opcode_moveq32_0_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq32_0_Slot_smod_slot2_encode, 0, 0, Opcode_moveq32_0_Slot_llr_slot2_encode, 0, 0, Opcode_moveq32_0_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq32_1_encode_fns[] = {
+ Opcode_moveq32_1_Slot_inst_encode, 0, 0, Opcode_moveq32_1_Slot_gp_slot2_encode, 0, 0, Opcode_moveq32_1_Slot_dot_slot2_encode, 0, 0, Opcode_moveq32_1_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq32_1_Slot_smod_slot2_encode, 0, 0, Opcode_moveq32_1_Slot_llr_slot2_encode, 0, 0, Opcode_moveq32_1_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq32_2_encode_fns[] = {
+ Opcode_moveq32_2_Slot_inst_encode, 0, 0, Opcode_moveq32_2_Slot_gp_slot2_encode, 0, 0, Opcode_moveq32_2_Slot_dot_slot2_encode, 0, 0, Opcode_moveq32_2_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq32_2_Slot_smod_slot2_encode, 0, 0, Opcode_moveq32_2_Slot_llr_slot2_encode, 0, 0, Opcode_moveq32_2_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveq32_3_encode_fns[] = {
+ Opcode_moveq32_3_Slot_inst_encode, 0, 0, Opcode_moveq32_3_Slot_gp_slot2_encode, 0, 0, Opcode_moveq32_3_Slot_dot_slot2_encode, 0, 0, Opcode_moveq32_3_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_moveq32_3_Slot_smod_slot2_encode, 0, 0, Opcode_moveq32_3_Slot_llr_slot2_encode, 0, 0, Opcode_moveq32_3_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nco_update_encode_fns[] = {
+ 0, 0, 0, Opcode_nco_update_Slot_gp_slot2_encode, 0, 0, Opcode_nco_update_Slot_dot_slot2_encode, 0, 0, Opcode_nco_update_Slot_pq_slot2_encode, 0, 0, Opcode_nco_update_Slot_acc2_slot2_encode, 0, 0, Opcode_nco_update_Slot_smod_slot2_encode, 0, 0, Opcode_nco_update_Slot_llr_slot2_encode, 0, 0, Opcode_nco_update_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_0_encode_fns[] = {
+ Opcode_pop128_0_Slot_inst_encode, 0, 0, Opcode_pop128_0_Slot_gp_slot2_encode, 0, Opcode_pop128_0_Slot_gp_slot0_encode, Opcode_pop128_0_Slot_dot_slot2_encode, 0, Opcode_pop128_0_Slot_dot_slot0_encode, Opcode_pop128_0_Slot_pq_slot2_encode, 0, Opcode_pop128_0_Slot_pq_slot0_encode, Opcode_pop128_0_Slot_acc2_slot2_encode, 0, Opcode_pop128_0_Slot_acc2_slot0_encode, Opcode_pop128_0_Slot_smod_slot2_encode, 0, Opcode_pop128_0_Slot_smod_slot0_encode, Opcode_pop128_0_Slot_llr_slot2_encode, 0, Opcode_pop128_0_Slot_llr_slot0_encode, Opcode_pop128_0_Slot_dual_slot2_encode, 0, Opcode_pop128_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_1_encode_fns[] = {
+ Opcode_pop128_1_Slot_inst_encode, 0, 0, Opcode_pop128_1_Slot_gp_slot2_encode, 0, Opcode_pop128_1_Slot_gp_slot0_encode, Opcode_pop128_1_Slot_dot_slot2_encode, 0, Opcode_pop128_1_Slot_dot_slot0_encode, Opcode_pop128_1_Slot_pq_slot2_encode, 0, Opcode_pop128_1_Slot_pq_slot0_encode, Opcode_pop128_1_Slot_acc2_slot2_encode, 0, Opcode_pop128_1_Slot_acc2_slot0_encode, Opcode_pop128_1_Slot_smod_slot2_encode, 0, Opcode_pop128_1_Slot_smod_slot0_encode, Opcode_pop128_1_Slot_llr_slot2_encode, 0, Opcode_pop128_1_Slot_llr_slot0_encode, Opcode_pop128_1_Slot_dual_slot2_encode, 0, Opcode_pop128_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2_encode_fns[] = {
+ Opcode_pop128_2_Slot_inst_encode, 0, 0, Opcode_pop128_2_Slot_gp_slot2_encode, 0, Opcode_pop128_2_Slot_gp_slot0_encode, Opcode_pop128_2_Slot_dot_slot2_encode, 0, Opcode_pop128_2_Slot_dot_slot0_encode, Opcode_pop128_2_Slot_pq_slot2_encode, 0, Opcode_pop128_2_Slot_pq_slot0_encode, Opcode_pop128_2_Slot_acc2_slot2_encode, 0, Opcode_pop128_2_Slot_acc2_slot0_encode, Opcode_pop128_2_Slot_smod_slot2_encode, 0, Opcode_pop128_2_Slot_smod_slot0_encode, Opcode_pop128_2_Slot_llr_slot2_encode, 0, Opcode_pop128_2_Slot_llr_slot0_encode, Opcode_pop128_2_Slot_dual_slot2_encode, 0, Opcode_pop128_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_3_encode_fns[] = {
+ Opcode_pop128_3_Slot_inst_encode, 0, 0, Opcode_pop128_3_Slot_gp_slot2_encode, 0, Opcode_pop128_3_Slot_gp_slot0_encode, Opcode_pop128_3_Slot_dot_slot2_encode, 0, Opcode_pop128_3_Slot_dot_slot0_encode, Opcode_pop128_3_Slot_pq_slot2_encode, 0, Opcode_pop128_3_Slot_pq_slot0_encode, Opcode_pop128_3_Slot_acc2_slot2_encode, 0, Opcode_pop128_3_Slot_acc2_slot0_encode, Opcode_pop128_3_Slot_smod_slot2_encode, 0, Opcode_pop128_3_Slot_smod_slot0_encode, Opcode_pop128_3_Slot_llr_slot2_encode, 0, Opcode_pop128_3_Slot_llr_slot0_encode, Opcode_pop128_3_Slot_dual_slot2_encode, 0, Opcode_pop128_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_4_encode_fns[] = {
+ Opcode_pop128_4_Slot_inst_encode, 0, 0, Opcode_pop128_4_Slot_gp_slot2_encode, 0, Opcode_pop128_4_Slot_gp_slot0_encode, Opcode_pop128_4_Slot_dot_slot2_encode, 0, Opcode_pop128_4_Slot_dot_slot0_encode, Opcode_pop128_4_Slot_pq_slot2_encode, 0, Opcode_pop128_4_Slot_pq_slot0_encode, Opcode_pop128_4_Slot_acc2_slot2_encode, 0, Opcode_pop128_4_Slot_acc2_slot0_encode, Opcode_pop128_4_Slot_smod_slot2_encode, 0, Opcode_pop128_4_Slot_smod_slot0_encode, Opcode_pop128_4_Slot_llr_slot2_encode, 0, Opcode_pop128_4_Slot_llr_slot0_encode, Opcode_pop128_4_Slot_dual_slot2_encode, 0, Opcode_pop128_4_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_5_encode_fns[] = {
+ Opcode_pop128_5_Slot_inst_encode, 0, 0, Opcode_pop128_5_Slot_gp_slot2_encode, 0, Opcode_pop128_5_Slot_gp_slot0_encode, Opcode_pop128_5_Slot_dot_slot2_encode, 0, Opcode_pop128_5_Slot_dot_slot0_encode, Opcode_pop128_5_Slot_pq_slot2_encode, 0, Opcode_pop128_5_Slot_pq_slot0_encode, Opcode_pop128_5_Slot_acc2_slot2_encode, 0, Opcode_pop128_5_Slot_acc2_slot0_encode, Opcode_pop128_5_Slot_smod_slot2_encode, 0, Opcode_pop128_5_Slot_smod_slot0_encode, Opcode_pop128_5_Slot_llr_slot2_encode, 0, Opcode_pop128_5_Slot_llr_slot0_encode, Opcode_pop128_5_Slot_dual_slot2_encode, 0, Opcode_pop128_5_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2cmpq_0_encode_fns[] = {
+ 0, 0, 0, Opcode_pop128_2cmpq_0_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2cmpq_0_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2cmpq_0_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2cmpq_1_encode_fns[] = {
+ 0, 0, 0, Opcode_pop128_2cmpq_1_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2cmpq_1_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2cmpq_1_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2cmpq_2_encode_fns[] = {
+ 0, 0, 0, Opcode_pop128_2cmpq_2_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2cmpq_2_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2cmpq_2_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2cmpq_3_encode_fns[] = {
+ 0, 0, 0, Opcode_pop128_2cmpq_3_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2cmpq_3_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2cmpq_3_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2m_0_encode_fns[] = {
+ Opcode_pop128_2m_0_Slot_inst_encode, 0, 0, 0, 0, Opcode_pop128_2m_0_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_0_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_0_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2m_1_encode_fns[] = {
+ Opcode_pop128_2m_1_Slot_inst_encode, 0, 0, 0, 0, Opcode_pop128_2m_1_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_1_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_1_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2m_2_encode_fns[] = {
+ Opcode_pop128_2m_2_Slot_inst_encode, 0, 0, 0, 0, Opcode_pop128_2m_2_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_2_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2m_3_encode_fns[] = {
+ Opcode_pop128_2m_3_Slot_inst_encode, 0, 0, 0, 0, Opcode_pop128_2m_3_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_3_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_3_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_pop128_2m_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2pq_0_encode_fns[] = {
+ Opcode_pop128_2pq_0_Slot_inst_encode, 0, 0, Opcode_pop128_2pq_0_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2pq_0_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_0_Slot_acc2_slot0_encode, Opcode_pop128_2pq_0_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2pq_1_encode_fns[] = {
+ Opcode_pop128_2pq_1_Slot_inst_encode, 0, 0, Opcode_pop128_2pq_1_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2pq_1_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_1_Slot_acc2_slot0_encode, Opcode_pop128_2pq_1_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2pq_2_encode_fns[] = {
+ Opcode_pop128_2pq_2_Slot_inst_encode, 0, 0, Opcode_pop128_2pq_2_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2pq_2_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_2_Slot_acc2_slot0_encode, Opcode_pop128_2pq_2_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2pq_3_encode_fns[] = {
+ Opcode_pop128_2pq_3_Slot_inst_encode, 0, 0, Opcode_pop128_2pq_3_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2pq_3_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_3_Slot_acc2_slot0_encode, Opcode_pop128_2pq_3_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2pq_4_encode_fns[] = {
+ Opcode_pop128_2pq_4_Slot_inst_encode, 0, 0, Opcode_pop128_2pq_4_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2pq_4_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_4_Slot_acc2_slot0_encode, Opcode_pop128_2pq_4_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_4_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop128_2pq_5_encode_fns[] = {
+ Opcode_pop128_2pq_5_Slot_inst_encode, 0, 0, Opcode_pop128_2pq_5_Slot_gp_slot2_encode, 0, 0, Opcode_pop128_2pq_5_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_5_Slot_acc2_slot0_encode, Opcode_pop128_2pq_5_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop128_2pq_5_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop2x128_2pq_01_encode_fns[] = {
+ Opcode_pop2x128_2pq_01_Slot_inst_encode, 0, 0, Opcode_pop2x128_2pq_01_Slot_gp_slot2_encode, 0, 0, Opcode_pop2x128_2pq_01_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_01_Slot_acc2_slot0_encode, Opcode_pop2x128_2pq_01_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_01_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop2x128_2pq_03_encode_fns[] = {
+ Opcode_pop2x128_2pq_03_Slot_inst_encode, 0, 0, Opcode_pop2x128_2pq_03_Slot_gp_slot2_encode, 0, 0, Opcode_pop2x128_2pq_03_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_03_Slot_acc2_slot0_encode, Opcode_pop2x128_2pq_03_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_03_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop2x128_2pq_21_encode_fns[] = {
+ Opcode_pop2x128_2pq_21_Slot_inst_encode, 0, 0, Opcode_pop2x128_2pq_21_Slot_gp_slot2_encode, 0, 0, Opcode_pop2x128_2pq_21_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_21_Slot_acc2_slot0_encode, Opcode_pop2x128_2pq_21_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_21_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop2x128_2pq_23_encode_fns[] = {
+ Opcode_pop2x128_2pq_23_Slot_inst_encode, 0, 0, Opcode_pop2x128_2pq_23_Slot_gp_slot2_encode, 0, 0, Opcode_pop2x128_2pq_23_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_23_Slot_acc2_slot0_encode, Opcode_pop2x128_2pq_23_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_pop2x128_2pq_23_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop32_0_encode_fns[] = {
+ Opcode_pop32_0_Slot_inst_encode, 0, 0, Opcode_pop32_0_Slot_gp_slot2_encode, 0, Opcode_pop32_0_Slot_gp_slot0_encode, Opcode_pop32_0_Slot_dot_slot2_encode, 0, Opcode_pop32_0_Slot_dot_slot0_encode, Opcode_pop32_0_Slot_pq_slot2_encode, 0, Opcode_pop32_0_Slot_pq_slot0_encode, Opcode_pop32_0_Slot_acc2_slot2_encode, 0, Opcode_pop32_0_Slot_acc2_slot0_encode, Opcode_pop32_0_Slot_smod_slot2_encode, 0, Opcode_pop32_0_Slot_smod_slot0_encode, Opcode_pop32_0_Slot_llr_slot2_encode, 0, Opcode_pop32_0_Slot_llr_slot0_encode, Opcode_pop32_0_Slot_dual_slot2_encode, 0, Opcode_pop32_0_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop32_1_encode_fns[] = {
+ Opcode_pop32_1_Slot_inst_encode, 0, 0, Opcode_pop32_1_Slot_gp_slot2_encode, 0, Opcode_pop32_1_Slot_gp_slot0_encode, Opcode_pop32_1_Slot_dot_slot2_encode, 0, Opcode_pop32_1_Slot_dot_slot0_encode, Opcode_pop32_1_Slot_pq_slot2_encode, 0, Opcode_pop32_1_Slot_pq_slot0_encode, Opcode_pop32_1_Slot_acc2_slot2_encode, 0, Opcode_pop32_1_Slot_acc2_slot0_encode, Opcode_pop32_1_Slot_smod_slot2_encode, 0, Opcode_pop32_1_Slot_smod_slot0_encode, Opcode_pop32_1_Slot_llr_slot2_encode, 0, Opcode_pop32_1_Slot_llr_slot0_encode, Opcode_pop32_1_Slot_dual_slot2_encode, 0, Opcode_pop32_1_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop32_2_encode_fns[] = {
+ Opcode_pop32_2_Slot_inst_encode, 0, 0, Opcode_pop32_2_Slot_gp_slot2_encode, 0, Opcode_pop32_2_Slot_gp_slot0_encode, Opcode_pop32_2_Slot_dot_slot2_encode, 0, Opcode_pop32_2_Slot_dot_slot0_encode, Opcode_pop32_2_Slot_pq_slot2_encode, 0, Opcode_pop32_2_Slot_pq_slot0_encode, Opcode_pop32_2_Slot_acc2_slot2_encode, 0, Opcode_pop32_2_Slot_acc2_slot0_encode, Opcode_pop32_2_Slot_smod_slot2_encode, 0, Opcode_pop32_2_Slot_smod_slot0_encode, Opcode_pop32_2_Slot_llr_slot2_encode, 0, Opcode_pop32_2_Slot_llr_slot0_encode, Opcode_pop32_2_Slot_dual_slot2_encode, 0, Opcode_pop32_2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_pop32_3_encode_fns[] = {
+ Opcode_pop32_3_Slot_inst_encode, 0, 0, Opcode_pop32_3_Slot_gp_slot2_encode, 0, Opcode_pop32_3_Slot_gp_slot0_encode, Opcode_pop32_3_Slot_dot_slot2_encode, 0, Opcode_pop32_3_Slot_dot_slot0_encode, Opcode_pop32_3_Slot_pq_slot2_encode, 0, Opcode_pop32_3_Slot_pq_slot0_encode, Opcode_pop32_3_Slot_acc2_slot2_encode, 0, Opcode_pop32_3_Slot_acc2_slot0_encode, Opcode_pop32_3_Slot_smod_slot2_encode, 0, Opcode_pop32_3_Slot_smod_slot0_encode, Opcode_pop32_3_Slot_llr_slot2_encode, 0, Opcode_pop32_3_Slot_llr_slot0_encode, Opcode_pop32_3_Slot_dual_slot2_encode, 0, Opcode_pop32_3_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_push128_encode_fns[] = {
+ Opcode_push128_Slot_inst_encode, 0, 0, Opcode_push128_Slot_gp_slot2_encode, 0, Opcode_push128_Slot_gp_slot0_encode, Opcode_push128_Slot_dot_slot2_encode, 0, Opcode_push128_Slot_dot_slot0_encode, Opcode_push128_Slot_pq_slot2_encode, 0, Opcode_push128_Slot_pq_slot0_encode, Opcode_push128_Slot_acc2_slot2_encode, 0, Opcode_push128_Slot_acc2_slot0_encode, Opcode_push128_Slot_smod_slot2_encode, 0, Opcode_push128_Slot_smod_slot0_encode, Opcode_push128_Slot_llr_slot2_encode, 0, Opcode_push128_Slot_llr_slot0_encode, Opcode_push128_Slot_dual_slot2_encode, 0, Opcode_push128_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_push128_m_encode_fns[] = {
+ Opcode_push128_m_Slot_inst_encode, 0, 0, 0, 0, Opcode_push128_m_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_push128_m_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, Opcode_push128_m_Slot_smod_slot0_encode, 0, 0, 0, 0, 0, Opcode_push128_m_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_push128_pq_encode_fns[] = {
+ Opcode_push128_pq_Slot_inst_encode, 0, 0, Opcode_push128_pq_Slot_gp_slot2_encode, 0, 0, Opcode_push128_pq_Slot_dot_slot2_encode, 0, 0, Opcode_push128_pq_Slot_pq_slot2_encode, 0, 0, 0, 0, Opcode_push128_pq_Slot_acc2_slot0_encode, Opcode_push128_pq_Slot_smod_slot2_encode, 0, 0, Opcode_push128_pq_Slot_llr_slot2_encode, 0, Opcode_push128_pq_Slot_llr_slot0_encode, Opcode_push128_pq_Slot_dual_slot2_encode, 0, Opcode_push128_pq_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_push2x128_pq_encode_fns[] = {
+ Opcode_push2x128_pq_Slot_inst_encode, 0, 0, Opcode_push2x128_pq_Slot_gp_slot2_encode, 0, 0, Opcode_push2x128_pq_Slot_dot_slot2_encode, 0, 0, 0, 0, 0, 0, 0, Opcode_push2x128_pq_Slot_acc2_slot0_encode, Opcode_push2x128_pq_Slot_smod_slot2_encode, 0, 0, 0, 0, Opcode_push2x128_pq_Slot_llr_slot0_encode, 0, 0, Opcode_push2x128_pq_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_push32_encode_fns[] = {
+ Opcode_push32_Slot_inst_encode, 0, 0, Opcode_push32_Slot_gp_slot2_encode, 0, Opcode_push32_Slot_gp_slot0_encode, Opcode_push32_Slot_dot_slot2_encode, 0, Opcode_push32_Slot_dot_slot0_encode, Opcode_push32_Slot_pq_slot2_encode, 0, Opcode_push32_Slot_pq_slot0_encode, Opcode_push32_Slot_acc2_slot2_encode, 0, 0, Opcode_push32_Slot_smod_slot2_encode, 0, Opcode_push32_Slot_smod_slot0_encode, Opcode_push32_Slot_llr_slot2_encode, 0, Opcode_push32_Slot_llr_slot0_encode, Opcode_push32_Slot_dual_slot2_encode, 0, Opcode_push32_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_qready_encode_fns[] = {
+ Opcode_qready_Slot_inst_encode, 0, 0, Opcode_qready_Slot_gp_slot2_encode, 0, 0, Opcode_qready_Slot_dot_slot2_encode, 0, 0, Opcode_qready_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_qready_Slot_smod_slot2_encode, 0, 0, Opcode_qready_Slot_llr_slot2_encode, 0, 0, Opcode_qready_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtiep_encode_fns[] = {
+ Opcode_rdtiep_Slot_inst_encode, 0, 0, Opcode_rdtiep_Slot_gp_slot2_encode, 0, 0, Opcode_rdtiep_Slot_dot_slot2_encode, 0, 0, Opcode_rdtiep_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_rdtiep_Slot_smod_slot2_encode, 0, 0, Opcode_rdtiep_Slot_llr_slot2_encode, 0, 0, Opcode_rdtiep_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_settiep_encode_fns[] = {
+ Opcode_settiep_Slot_inst_encode, 0, 0, Opcode_settiep_Slot_gp_slot2_encode, 0, 0, Opcode_settiep_Slot_dot_slot2_encode, 0, 0, Opcode_settiep_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_settiep_Slot_smod_slot2_encode, 0, 0, Opcode_settiep_Slot_llr_slot2_encode, 0, 0, Opcode_settiep_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_smod_lut_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_smod_lut_Slot_smod_slot2_encode, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrtbsigq_encode_fns[] = {
+ Opcode_wrtbsigq_Slot_inst_encode, 0, 0, Opcode_wrtbsigq_Slot_gp_slot2_encode, 0, 0, Opcode_wrtbsigq_Slot_dot_slot2_encode, 0, 0, Opcode_wrtbsigq_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_wrtbsigq_Slot_smod_slot2_encode, 0, 0, Opcode_wrtbsigq_Slot_llr_slot2_encode, 0, 0, Opcode_wrtbsigq_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrtbsigqm_encode_fns[] = {
+ Opcode_wrtbsigqm_Slot_inst_encode, 0, 0, Opcode_wrtbsigqm_Slot_gp_slot2_encode, 0, 0, Opcode_wrtbsigqm_Slot_dot_slot2_encode, 0, 0, Opcode_wrtbsigqm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_wrtbsigqm_Slot_smod_slot2_encode, 0, 0, Opcode_wrtbsigqm_Slot_llr_slot2_encode, 0, 0, Opcode_wrtbsigqm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrtiep_encode_fns[] = {
+ Opcode_wrtiep_Slot_inst_encode, 0, 0, Opcode_wrtiep_Slot_gp_slot2_encode, 0, 0, Opcode_wrtiep_Slot_dot_slot2_encode, 0, 0, Opcode_wrtiep_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_wrtiep_Slot_smod_slot2_encode, 0, 0, Opcode_wrtiep_Slot_llr_slot2_encode, 0, 0, Opcode_wrtiep_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrtsigq_encode_fns[] = {
+ Opcode_wrtsigq_Slot_inst_encode, 0, 0, Opcode_wrtsigq_Slot_gp_slot2_encode, 0, 0, Opcode_wrtsigq_Slot_dot_slot2_encode, 0, 0, Opcode_wrtsigq_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_wrtsigq_Slot_smod_slot2_encode, 0, 0, Opcode_wrtsigq_Slot_llr_slot2_encode, 0, 0, Opcode_wrtsigq_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs8_encode_fns[] = {
+ 0, 0, 0, Opcode_abs8_Slot_gp_slot2_encode, 0, 0, Opcode_abs8_Slot_dot_slot2_encode, 0, 0, Opcode_abs8_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_abs8_Slot_smod_slot2_encode, 0, 0, Opcode_abs8_Slot_llr_slot2_encode, 0, 0, Opcode_abs8_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add16_encode_fns[] = {
+ 0, 0, 0, Opcode_add16_Slot_gp_slot2_encode, 0, 0, Opcode_add16_Slot_dot_slot2_encode, 0, 0, Opcode_add16_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_add16_Slot_smod_slot2_encode, 0, 0, Opcode_add16_Slot_llr_slot2_encode, 0, 0, Opcode_add16_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add32_encode_fns[] = {
+ 0, 0, 0, Opcode_add32_Slot_gp_slot2_encode, 0, 0, Opcode_add32_Slot_dot_slot2_encode, 0, 0, Opcode_add32_Slot_pq_slot2_encode, 0, 0, Opcode_add32_Slot_acc2_slot2_encode, 0, 0, Opcode_add32_Slot_smod_slot2_encode, 0, 0, Opcode_add32_Slot_llr_slot2_encode, 0, 0, Opcode_add32_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addac_i2r_encode_fns[] = {
+ Opcode_addac_i2r_Slot_inst_encode, 0, 0, 0, 0, Opcode_addac_i2r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_addac_i2r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_addac_i2r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addac_r2i_encode_fns[] = {
+ Opcode_addac_r2i_Slot_inst_encode, 0, 0, 0, 0, Opcode_addac_r2i_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_addac_r2i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_addac_r2i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addar2_encode_fns[] = {
+ 0, 0, 0, Opcode_addar2_Slot_gp_slot2_encode, 0, 0, Opcode_addar2_Slot_dot_slot2_encode, 0, 0, Opcode_addar2_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_addar2_Slot_smod_slot2_encode, 0, 0, Opcode_addar2_Slot_llr_slot2_encode, 0, 0, Opcode_addar2_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addcm_encode_fns[] = {
+ 0, 0, 0, Opcode_addcm_Slot_gp_slot2_encode, 0, 0, Opcode_addcm_Slot_dot_slot2_encode, 0, 0, Opcode_addcm_Slot_pq_slot2_encode, 0, 0, Opcode_addcm_Slot_acc2_slot2_encode, 0, 0, Opcode_addcm_Slot_smod_slot2_encode, 0, 0, Opcode_addcm_Slot_llr_slot2_encode, 0, 0, Opcode_addcm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addwrp_encode_fns[] = {
+ 0, 0, 0, Opcode_addwrp_Slot_gp_slot2_encode, 0, 0, Opcode_addwrp_Slot_dot_slot2_encode, 0, 0, Opcode_addwrp_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_addwrp_Slot_smod_slot2_encode, 0, 0, Opcode_addwrp_Slot_llr_slot2_encode, 0, 0, Opcode_addwrp_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and128_encode_fns[] = {
+ 0, 0, 0, Opcode_and128_Slot_gp_slot2_encode, 0, 0, Opcode_and128_Slot_dot_slot2_encode, 0, 0, Opcode_and128_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_and128_Slot_smod_slot2_encode, 0, 0, Opcode_and128_Slot_llr_slot2_encode, 0, 0, Opcode_and128_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_argmax8_encode_fns[] = {
+ 0, 0, 0, Opcode_argmax8_Slot_gp_slot2_encode, 0, 0, Opcode_argmax8_Slot_dot_slot2_encode, 0, 0, Opcode_argmax8_Slot_pq_slot2_encode, 0, 0, Opcode_argmax8_Slot_acc2_slot2_encode, 0, 0, Opcode_argmax8_Slot_smod_slot2_encode, 0, 0, Opcode_argmax8_Slot_llr_slot2_encode, 0, 0, Opcode_argmax8_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_asl_encode_fns[] = {
+ 0, 0, 0, Opcode_asl_Slot_gp_slot2_encode, 0, 0, Opcode_asl_Slot_dot_slot2_encode, 0, 0, Opcode_asl_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_asl_Slot_smod_slot2_encode, 0, 0, Opcode_asl_Slot_llr_slot2_encode, 0, 0, Opcode_asl_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_asl32_encode_fns[] = {
+ 0, 0, 0, Opcode_asl32_Slot_gp_slot2_encode, 0, 0, Opcode_asl32_Slot_dot_slot2_encode, 0, 0, Opcode_asl32_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_asl32_Slot_smod_slot2_encode, 0, 0, Opcode_asl32_Slot_llr_slot2_encode, 0, 0, Opcode_asl32_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_aslacm_encode_fns[] = {
+ Opcode_aslacm_Slot_inst_encode, 0, 0, 0, 0, Opcode_aslacm_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_aslacm_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_aslacm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_aslm_encode_fns[] = {
+ 0, 0, 0, Opcode_aslm_Slot_gp_slot2_encode, 0, 0, Opcode_aslm_Slot_dot_slot2_encode, 0, 0, Opcode_aslm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_aslm_Slot_smod_slot2_encode, 0, 0, Opcode_aslm_Slot_llr_slot2_encode, 0, 0, Opcode_aslm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_aslm32_encode_fns[] = {
+ 0, 0, 0, Opcode_aslm32_Slot_gp_slot2_encode, 0, 0, Opcode_aslm32_Slot_dot_slot2_encode, 0, 0, Opcode_aslm32_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_aslm32_Slot_smod_slot2_encode, 0, 0, Opcode_aslm32_Slot_llr_slot2_encode, 0, 0, Opcode_aslm32_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_asr_encode_fns[] = {
+ 0, 0, 0, Opcode_asr_Slot_gp_slot2_encode, 0, 0, Opcode_asr_Slot_dot_slot2_encode, 0, 0, Opcode_asr_Slot_pq_slot2_encode, 0, 0, Opcode_asr_Slot_acc2_slot2_encode, 0, 0, Opcode_asr_Slot_smod_slot2_encode, 0, 0, Opcode_asr_Slot_llr_slot2_encode, 0, 0, Opcode_asr_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_asr32_encode_fns[] = {
+ 0, 0, 0, Opcode_asr32_Slot_gp_slot2_encode, 0, 0, Opcode_asr32_Slot_dot_slot2_encode, 0, 0, Opcode_asr32_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_asr32_Slot_smod_slot2_encode, 0, 0, Opcode_asr32_Slot_llr_slot2_encode, 0, 0, Opcode_asr32_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_asrac_encode_fns[] = {
+ Opcode_asrac_Slot_inst_encode, 0, 0, 0, 0, Opcode_asrac_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_asrac_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_asrac_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_asrm_encode_fns[] = {
+ 0, 0, 0, Opcode_asrm_Slot_gp_slot2_encode, 0, 0, Opcode_asrm_Slot_dot_slot2_encode, 0, 0, Opcode_asrm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_asrm_Slot_smod_slot2_encode, 0, 0, Opcode_asrm_Slot_llr_slot2_encode, 0, 0, Opcode_asrm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bitfext_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bitfext_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bitfins_encode_fns[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_bitfins_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clb_c_encode_fns[] = {
+ Opcode_clb_c_Slot_inst_encode, 0, 0, 0, 0, Opcode_clb_c_Slot_gp_slot0_encode, 0, 0, Opcode_clb_c_Slot_dot_slot0_encode, 0, 0, Opcode_clb_c_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_clb_c_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_clb_r_encode_fns[] = {
+ Opcode_clb_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_clb_r_Slot_gp_slot0_encode, 0, 0, Opcode_clb_r_Slot_dot_slot0_encode, 0, 0, Opcode_clb_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_clb_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_cmp8_encode_fns[] = {
+ 0, 0, 0, Opcode_cmp8_Slot_gp_slot2_encode, 0, 0, Opcode_cmp8_Slot_dot_slot2_encode, 0, 0, Opcode_cmp8_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_cmp8_Slot_smod_slot2_encode, 0, 0, Opcode_cmp8_Slot_llr_slot2_encode, 0, 0, Opcode_cmp8_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmp_i_encode_fns[] = {
+ 0, 0, 0, Opcode_cmp_i_Slot_gp_slot2_encode, 0, 0, Opcode_cmp_i_Slot_dot_slot2_encode, 0, 0, Opcode_cmp_i_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_cmp_i_Slot_smod_slot2_encode, 0, 0, Opcode_cmp_i_Slot_llr_slot2_encode, 0, 0, Opcode_cmp_i_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_cmp_r_encode_fns[] = {
+ 0, 0, 0, Opcode_cmp_r_Slot_gp_slot2_encode, 0, 0, Opcode_cmp_r_Slot_dot_slot2_encode, 0, 0, Opcode_cmp_r_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_cmp_r_Slot_smod_slot2_encode, 0, 0, Opcode_cmp_r_Slot_llr_slot2_encode, 0, 0, Opcode_cmp_r_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_encode_fns[] = {
+ Opcode_ext_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_Slot_gp_slot0_encode, 0, 0, Opcode_ext_Slot_dot_slot0_encode, 0, 0, Opcode_ext_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext_r_encode_fns[] = {
+ Opcode_ext_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext_r_Slot_gp_slot0_encode, 0, 0, Opcode_ext_r_Slot_dot_slot0_encode, 0, 0, Opcode_ext_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext32_i_encode_fns[] = {
+ Opcode_ext32_i_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext32_i_Slot_gp_slot0_encode, 0, 0, Opcode_ext32_i_Slot_dot_slot0_encode, 0, 0, Opcode_ext32_i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext32_i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ext32_r_encode_fns[] = {
+ Opcode_ext32_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_ext32_r_Slot_gp_slot0_encode, 0, 0, Opcode_ext32_r_Slot_dot_slot0_encode, 0, 0, Opcode_ext32_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_ext32_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_extui4_encode_fns[] = {
+ 0, 0, 0, Opcode_extui4_Slot_gp_slot2_encode, 0, 0, Opcode_extui4_Slot_dot_slot2_encode, 0, 0, Opcode_extui4_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_extui4_Slot_smod_slot2_encode, 0, 0, Opcode_extui4_Slot_llr_slot2_encode, 0, 0, Opcode_extui4_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lslm_encode_fns[] = {
+ 0, 0, 0, Opcode_lslm_Slot_gp_slot2_encode, 0, 0, Opcode_lslm_Slot_dot_slot2_encode, 0, 0, Opcode_lslm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_lslm_Slot_smod_slot2_encode, 0, 0, Opcode_lslm_Slot_llr_slot2_encode, 0, 0, Opcode_lslm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lsrm_encode_fns[] = {
+ 0, 0, 0, Opcode_lsrm_Slot_gp_slot2_encode, 0, 0, Opcode_lsrm_Slot_dot_slot2_encode, 0, 0, Opcode_lsrm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_lsrm_Slot_smod_slot2_encode, 0, 0, Opcode_lsrm_Slot_llr_slot2_encode, 0, 0, Opcode_lsrm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_max8_encode_fns[] = {
+ 0, 0, 0, Opcode_max8_Slot_gp_slot2_encode, 0, 0, Opcode_max8_Slot_dot_slot2_encode, 0, 0, Opcode_max8_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_max8_Slot_smod_slot2_encode, 0, 0, Opcode_max8_Slot_llr_slot2_encode, 0, 0, Opcode_max8_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mean_encode_fns[] = {
+ 0, 0, 0, Opcode_mean_Slot_gp_slot2_encode, 0, 0, Opcode_mean_Slot_dot_slot2_encode, 0, 0, Opcode_mean_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_mean_Slot_smod_slot2_encode, 0, 0, Opcode_mean_Slot_llr_slot2_encode, 0, 0, Opcode_mean_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mean32_encode_fns[] = {
+ 0, 0, 0, Opcode_mean32_Slot_gp_slot2_encode, 0, 0, Opcode_mean32_Slot_dot_slot2_encode, 0, 0, Opcode_mean32_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_mean32_Slot_smod_slot2_encode, 0, 0, Opcode_mean32_Slot_llr_slot2_encode, 0, 0, Opcode_mean32_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_min8_encode_fns[] = {
+ 0, 0, 0, Opcode_min8_Slot_gp_slot2_encode, 0, 0, Opcode_min8_Slot_dot_slot2_encode, 0, 0, Opcode_min8_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_min8_Slot_smod_slot2_encode, 0, 0, Opcode_min8_Slot_llr_slot2_encode, 0, 0, Opcode_min8_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_minclb_c_encode_fns[] = {
+ Opcode_minclb_c_Slot_inst_encode, 0, 0, 0, 0, Opcode_minclb_c_Slot_gp_slot0_encode, 0, 0, Opcode_minclb_c_Slot_dot_slot0_encode, 0, 0, Opcode_minclb_c_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_minclb_c_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_minclb_r_encode_fns[] = {
+ Opcode_minclb_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_minclb_r_Slot_gp_slot0_encode, 0, 0, Opcode_minclb_r_Slot_dot_slot0_encode, 0, 0, Opcode_minclb_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_minclb_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_not128_encode_fns[] = {
+ 0, 0, 0, Opcode_not128_Slot_gp_slot2_encode, 0, 0, Opcode_not128_Slot_dot_slot2_encode, 0, 0, Opcode_not128_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_not128_Slot_smod_slot2_encode, 0, 0, Opcode_not128_Slot_llr_slot2_encode, 0, 0, Opcode_not128_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or128_encode_fns[] = {
+ 0, 0, 0, Opcode_or128_Slot_gp_slot2_encode, 0, 0, Opcode_or128_Slot_dot_slot2_encode, 0, 0, Opcode_or128_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_or128_Slot_smod_slot2_encode, 0, 0, Opcode_or128_Slot_llr_slot2_encode, 0, 0, Opcode_or128_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_perm_encode_fns[] = {
+ 0, 0, 0, Opcode_perm_Slot_gp_slot2_encode, 0, 0, Opcode_perm_Slot_dot_slot2_encode, 0, 0, Opcode_perm_Slot_pq_slot2_encode, 0, 0, Opcode_perm_Slot_acc2_slot2_encode, 0, 0, Opcode_perm_Slot_smod_slot2_encode, 0, 0, Opcode_perm_Slot_llr_slot2_encode, 0, 0, Opcode_perm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_redac_encode_fns[] = {
+ Opcode_redac_Slot_inst_encode, 0, 0, 0, 0, Opcode_redac_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_redac_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_redac_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_redac2_encode_fns[] = {
+ Opcode_redac2_Slot_inst_encode, 0, 0, 0, 0, Opcode_redac2_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_redac2_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_redac2_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_redac4_encode_fns[] = {
+ Opcode_redac4_Slot_inst_encode, 0, 0, 0, 0, Opcode_redac4_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_redac4_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_redac4_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_redacs_encode_fns[] = {
+ Opcode_redacs_Slot_inst_encode, 0, 0, 0, 0, Opcode_redacs_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_redacs_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_redacs_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sminclb_c_encode_fns[] = {
+ Opcode_sminclb_c_Slot_inst_encode, 0, 0, 0, 0, Opcode_sminclb_c_Slot_gp_slot0_encode, 0, 0, Opcode_sminclb_c_Slot_dot_slot0_encode, 0, 0, Opcode_sminclb_c_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sminclb_c_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sminclb_r_encode_fns[] = {
+ Opcode_sminclb_r_Slot_inst_encode, 0, 0, 0, 0, Opcode_sminclb_r_Slot_gp_slot0_encode, 0, 0, Opcode_sminclb_r_Slot_dot_slot0_encode, 0, 0, Opcode_sminclb_r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_sminclb_r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_stswapbm_encode_fns[] = {
+ Opcode_stswapbm_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_stswapbm_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_stswapbmu_encode_fns[] = {
+ Opcode_stswapbmu_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_stswapbmu_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sub32_encode_fns[] = {
+ 0, 0, 0, Opcode_sub32_Slot_gp_slot2_encode, 0, 0, Opcode_sub32_Slot_dot_slot2_encode, 0, 0, Opcode_sub32_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_sub32_Slot_smod_slot2_encode, 0, 0, Opcode_sub32_Slot_llr_slot2_encode, 0, 0, Opcode_sub32_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subac_i2r_encode_fns[] = {
+ Opcode_subac_i2r_Slot_inst_encode, 0, 0, 0, 0, Opcode_subac_i2r_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_subac_i2r_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_subac_i2r_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subac_r2i_encode_fns[] = {
+ Opcode_subac_r2i_Slot_inst_encode, 0, 0, 0, 0, Opcode_subac_r2i_Slot_gp_slot0_encode, 0, 0, 0, 0, 0, Opcode_subac_r2i_Slot_pq_slot0_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Opcode_subac_r2i_Slot_dual_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subarx_encode_fns[] = {
+ 0, 0, 0, Opcode_subarx_Slot_gp_slot2_encode, 0, 0, Opcode_subarx_Slot_dot_slot2_encode, 0, 0, Opcode_subarx_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_subarx_Slot_smod_slot2_encode, 0, 0, Opcode_subarx_Slot_llr_slot2_encode, 0, 0, Opcode_subarx_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subcm_encode_fns[] = {
+ 0, 0, 0, Opcode_subcm_Slot_gp_slot2_encode, 0, 0, Opcode_subcm_Slot_dot_slot2_encode, 0, 0, Opcode_subcm_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_subcm_Slot_smod_slot2_encode, 0, 0, Opcode_subcm_Slot_llr_slot2_encode, 0, 0, Opcode_subcm_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_submean_encode_fns[] = {
+ 0, 0, 0, Opcode_submean_Slot_gp_slot2_encode, 0, 0, Opcode_submean_Slot_dot_slot2_encode, 0, 0, Opcode_submean_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_submean_Slot_smod_slot2_encode, 0, 0, Opcode_submean_Slot_llr_slot2_encode, 0, 0, Opcode_submean_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subwrp_encode_fns[] = {
+ 0, 0, 0, Opcode_subwrp_Slot_gp_slot2_encode, 0, 0, Opcode_subwrp_Slot_dot_slot2_encode, 0, 0, Opcode_subwrp_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_subwrp_Slot_smod_slot2_encode, 0, 0, Opcode_subwrp_Slot_llr_slot2_encode, 0, 0, Opcode_subwrp_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_trans_encode_fns[] = {
+ 0, 0, 0, Opcode_trans_Slot_gp_slot2_encode, 0, 0, Opcode_trans_Slot_dot_slot2_encode, 0, 0, Opcode_trans_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_trans_Slot_smod_slot2_encode, 0, 0, Opcode_trans_Slot_llr_slot2_encode, 0, 0, Opcode_trans_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor128_encode_fns[] = {
+ 0, 0, 0, Opcode_xor128_Slot_gp_slot2_encode, 0, 0, Opcode_xor128_Slot_dot_slot2_encode, 0, 0, Opcode_xor128_Slot_pq_slot2_encode, 0, 0, 0, 0, 0, Opcode_xor128_Slot_smod_slot2_encode, 0, 0, Opcode_xor128_Slot_llr_slot2_encode, 0, 0, Opcode_xor128_Slot_dual_slot2_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_sov_encode_fns[] = {
+ Opcode_rur_sov_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_sov_encode_fns[] = {
+ Opcode_wur_sov_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_sat_mode_encode_fns[] = {
+ Opcode_rur_sat_mode_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_sat_mode_encode_fns[] = {
+ Opcode_wur_sat_mode_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_sar0_encode_fns[] = {
+ Opcode_rur_sar0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_sar0_encode_fns[] = {
+ Opcode_wur_sar0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_sar1_encode_fns[] = {
+ Opcode_rur_sar1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_sar1_encode_fns[] = {
+ Opcode_wur_sar1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_sar2_encode_fns[] = {
+ Opcode_rur_sar2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_sar2_encode_fns[] = {
+ Opcode_wur_sar2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_sar3_encode_fns[] = {
+ Opcode_rur_sar3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_sar3_encode_fns[] = {
+ Opcode_wur_sar3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_hsar0_encode_fns[] = {
+ Opcode_rur_hsar0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_hsar0_encode_fns[] = {
+ Opcode_wur_hsar0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_hsar1_encode_fns[] = {
+ Opcode_rur_hsar1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_hsar1_encode_fns[] = {
+ Opcode_wur_hsar1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_hsar2_encode_fns[] = {
+ Opcode_rur_hsar2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_hsar2_encode_fns[] = {
+ Opcode_wur_hsar2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_hsar3_encode_fns[] = {
+ Opcode_rur_hsar3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_hsar3_encode_fns[] = {
+ Opcode_wur_hsar3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_max_reg_0_encode_fns[] = {
+ Opcode_rur_max_reg_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_max_reg_0_encode_fns[] = {
+ Opcode_wur_max_reg_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_max_reg_1_encode_fns[] = {
+ Opcode_rur_max_reg_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_max_reg_1_encode_fns[] = {
+ Opcode_wur_max_reg_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_max_reg_2_encode_fns[] = {
+ Opcode_rur_max_reg_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_max_reg_2_encode_fns[] = {
+ Opcode_wur_max_reg_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_max_reg_3_encode_fns[] = {
+ Opcode_rur_max_reg_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_max_reg_3_encode_fns[] = {
+ Opcode_wur_max_reg_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_arg_max_reg_0_encode_fns[] = {
+ Opcode_rur_arg_max_reg_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_arg_max_reg_0_encode_fns[] = {
+ Opcode_wur_arg_max_reg_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_arg_max_reg_1_encode_fns[] = {
+ Opcode_rur_arg_max_reg_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_arg_max_reg_1_encode_fns[] = {
+ Opcode_wur_arg_max_reg_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_arg_max_reg_2_encode_fns[] = {
+ Opcode_rur_arg_max_reg_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_arg_max_reg_2_encode_fns[] = {
+ Opcode_wur_arg_max_reg_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_arg_max_reg_3_encode_fns[] = {
+ Opcode_rur_arg_max_reg_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_arg_max_reg_3_encode_fns[] = {
+ Opcode_wur_arg_max_reg_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_nco_counter_0_encode_fns[] = {
+ Opcode_rur_nco_counter_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_nco_counter_0_encode_fns[] = {
+ Opcode_wur_nco_counter_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_nco_counter_1_encode_fns[] = {
+ Opcode_rur_nco_counter_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_nco_counter_1_encode_fns[] = {
+ Opcode_wur_nco_counter_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_nco_counter_2_encode_fns[] = {
+ Opcode_rur_nco_counter_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_nco_counter_2_encode_fns[] = {
+ Opcode_wur_nco_counter_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_nco_counter_3_encode_fns[] = {
+ Opcode_rur_nco_counter_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_nco_counter_3_encode_fns[] = {
+ Opcode_wur_nco_counter_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_interp_ext_n_encode_fns[] = {
+ Opcode_rur_interp_ext_n_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_interp_ext_n_encode_fns[] = {
+ Opcode_wur_interp_ext_n_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_interp_ext_l_encode_fns[] = {
+ Opcode_rur_interp_ext_l_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_interp_ext_l_encode_fns[] = {
+ Opcode_wur_interp_ext_l_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_0_encode_fns[] = {
+ Opcode_rur_llr_buf_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_0_encode_fns[] = {
+ Opcode_wur_llr_buf_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_1_encode_fns[] = {
+ Opcode_rur_llr_buf_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_1_encode_fns[] = {
+ Opcode_wur_llr_buf_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_2_encode_fns[] = {
+ Opcode_rur_llr_buf_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_2_encode_fns[] = {
+ Opcode_wur_llr_buf_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_3_encode_fns[] = {
+ Opcode_rur_llr_buf_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_3_encode_fns[] = {
+ Opcode_wur_llr_buf_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_4_encode_fns[] = {
+ Opcode_rur_llr_buf_4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_4_encode_fns[] = {
+ Opcode_wur_llr_buf_4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_5_encode_fns[] = {
+ Opcode_rur_llr_buf_5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_5_encode_fns[] = {
+ Opcode_wur_llr_buf_5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_6_encode_fns[] = {
+ Opcode_rur_llr_buf_6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_6_encode_fns[] = {
+ Opcode_wur_llr_buf_6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_7_encode_fns[] = {
+ Opcode_rur_llr_buf_7_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_7_encode_fns[] = {
+ Opcode_wur_llr_buf_7_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_8_encode_fns[] = {
+ Opcode_rur_llr_buf_8_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_8_encode_fns[] = {
+ Opcode_wur_llr_buf_8_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_9_encode_fns[] = {
+ Opcode_rur_llr_buf_9_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_9_encode_fns[] = {
+ Opcode_wur_llr_buf_9_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_10_encode_fns[] = {
+ Opcode_rur_llr_buf_10_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_10_encode_fns[] = {
+ Opcode_wur_llr_buf_10_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_11_encode_fns[] = {
+ Opcode_rur_llr_buf_11_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_11_encode_fns[] = {
+ Opcode_wur_llr_buf_11_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_12_encode_fns[] = {
+ Opcode_rur_llr_buf_12_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_12_encode_fns[] = {
+ Opcode_wur_llr_buf_12_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_13_encode_fns[] = {
+ Opcode_rur_llr_buf_13_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_13_encode_fns[] = {
+ Opcode_wur_llr_buf_13_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_14_encode_fns[] = {
+ Opcode_rur_llr_buf_14_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_14_encode_fns[] = {
+ Opcode_wur_llr_buf_14_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_15_encode_fns[] = {
+ Opcode_rur_llr_buf_15_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_15_encode_fns[] = {
+ Opcode_wur_llr_buf_15_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_16_encode_fns[] = {
+ Opcode_rur_llr_buf_16_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_16_encode_fns[] = {
+ Opcode_wur_llr_buf_16_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_17_encode_fns[] = {
+ Opcode_rur_llr_buf_17_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_17_encode_fns[] = {
+ Opcode_wur_llr_buf_17_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_18_encode_fns[] = {
+ Opcode_rur_llr_buf_18_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_18_encode_fns[] = {
+ Opcode_wur_llr_buf_18_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_19_encode_fns[] = {
+ Opcode_rur_llr_buf_19_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_19_encode_fns[] = {
+ Opcode_wur_llr_buf_19_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_20_encode_fns[] = {
+ Opcode_rur_llr_buf_20_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_20_encode_fns[] = {
+ Opcode_wur_llr_buf_20_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_21_encode_fns[] = {
+ Opcode_rur_llr_buf_21_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_21_encode_fns[] = {
+ Opcode_wur_llr_buf_21_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_22_encode_fns[] = {
+ Opcode_rur_llr_buf_22_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_22_encode_fns[] = {
+ Opcode_wur_llr_buf_22_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_buf_23_encode_fns[] = {
+ Opcode_rur_llr_buf_23_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_buf_23_encode_fns[] = {
+ Opcode_wur_llr_buf_23_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_0_encode_fns[] = {
+ Opcode_rur_smod_buf_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_0_encode_fns[] = {
+ Opcode_wur_smod_buf_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_1_encode_fns[] = {
+ Opcode_rur_smod_buf_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_1_encode_fns[] = {
+ Opcode_wur_smod_buf_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_2_encode_fns[] = {
+ Opcode_rur_smod_buf_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_2_encode_fns[] = {
+ Opcode_wur_smod_buf_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_3_encode_fns[] = {
+ Opcode_rur_smod_buf_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_3_encode_fns[] = {
+ Opcode_wur_smod_buf_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_4_encode_fns[] = {
+ Opcode_rur_smod_buf_4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_4_encode_fns[] = {
+ Opcode_wur_smod_buf_4_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_5_encode_fns[] = {
+ Opcode_rur_smod_buf_5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_5_encode_fns[] = {
+ Opcode_wur_smod_buf_5_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_6_encode_fns[] = {
+ Opcode_rur_smod_buf_6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_6_encode_fns[] = {
+ Opcode_wur_smod_buf_6_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_buf_7_encode_fns[] = {
+ Opcode_rur_smod_buf_7_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_buf_7_encode_fns[] = {
+ Opcode_wur_smod_buf_7_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_weight_reg_encode_fns[] = {
+ Opcode_rur_weight_reg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_weight_reg_encode_fns[] = {
+ Opcode_wur_weight_reg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_scale_reg_encode_fns[] = {
+ Opcode_rur_scale_reg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_scale_reg_encode_fns[] = {
+ Opcode_wur_scale_reg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_llr_pos_encode_fns[] = {
+ Opcode_rur_llr_pos_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_llr_pos_encode_fns[] = {
+ Opcode_wur_llr_pos_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_pos_encode_fns[] = {
+ Opcode_rur_smod_pos_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_pos_encode_fns[] = {
+ Opcode_wur_smod_pos_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_perm_reg_encode_fns[] = {
+ Opcode_rur_perm_reg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_perm_reg_encode_fns[] = {
+ Opcode_wur_perm_reg_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_offset_table_0_encode_fns[] = {
+ Opcode_rur_smod_offset_table_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_offset_table_0_encode_fns[] = {
+ Opcode_wur_smod_offset_table_0_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_offset_table_1_encode_fns[] = {
+ Opcode_rur_smod_offset_table_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_offset_table_1_encode_fns[] = {
+ Opcode_wur_smod_offset_table_1_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_offset_table_2_encode_fns[] = {
+ Opcode_rur_smod_offset_table_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_offset_table_2_encode_fns[] = {
+ Opcode_wur_smod_offset_table_2_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_smod_offset_table_3_encode_fns[] = {
+ Opcode_rur_smod_offset_table_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_smod_offset_table_3_encode_fns[] = {
+ Opcode_wur_smod_offset_table_3_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_phasor_n_encode_fns[] = {
+ Opcode_rur_phasor_n_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_phasor_n_encode_fns[] = {
+ Opcode_wur_phasor_n_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_phasor_offset_encode_fns[] = {
+ Opcode_rur_phasor_offset_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_phasor_offset_encode_fns[] = {
+ Opcode_wur_phasor_offset_Slot_inst_encode, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+/* Opcode table. */
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "rur.threadptr", ICLASS_rur_threadptr,
+ 0,
+ Opcode_rur_threadptr_encode_fns, 0, 0 },
+ { "wur.threadptr", ICLASS_wur_threadptr,
+ 0,
+ Opcode_wur_threadptr_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", ICLASS_xt_iclass_loop,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", ICLASS_xt_iclass_rsr_lend,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", ICLASS_xt_iclass_wsr_lend,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", ICLASS_xt_iclass_xsr_lend,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", ICLASS_xt_iclass_rsr_lcount,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", ICLASS_xt_iclass_wsr_lcount,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", ICLASS_xt_iclass_xsr_lcount,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", ICLASS_xt_iclass_rsr_lbeg,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", ICLASS_xt_iclass_wsr_lbeg,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", ICLASS_xt_iclass_xsr_lbeg,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.176", ICLASS_xt_iclass_rsr_176,
+ 0,
+ Opcode_rsr_176_encode_fns, 0, 0 },
+ { "wsr.176", ICLASS_xt_iclass_wsr_176,
+ 0,
+ Opcode_wsr_176_encode_fns, 0, 0 },
+ { "rsr.208", ICLASS_xt_iclass_rsr_208,
+ 0,
+ Opcode_rsr_208_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", ICLASS_xt_iclass_rsr_epc4,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", ICLASS_xt_iclass_wsr_epc4,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", ICLASS_xt_iclass_xsr_epc4,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", ICLASS_xt_iclass_rsr_excsave4,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", ICLASS_xt_iclass_wsr_excsave4,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", ICLASS_xt_iclass_xsr_excsave4,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", ICLASS_xt_iclass_rsr_epc5,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", ICLASS_xt_iclass_wsr_epc5,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", ICLASS_xt_iclass_xsr_epc5,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", ICLASS_xt_iclass_rsr_excsave5,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", ICLASS_xt_iclass_wsr_excsave5,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", ICLASS_xt_iclass_xsr_excsave5,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", ICLASS_xt_iclass_rsr_epc6,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", ICLASS_xt_iclass_wsr_epc6,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", ICLASS_xt_iclass_xsr_epc6,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", ICLASS_xt_iclass_rsr_excsave6,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", ICLASS_xt_iclass_wsr_excsave6,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", ICLASS_xt_iclass_xsr_excsave6,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", ICLASS_xt_iclass_rsr_eps4,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", ICLASS_xt_iclass_wsr_eps4,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", ICLASS_xt_iclass_xsr_eps4,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", ICLASS_xt_iclass_rsr_eps5,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", ICLASS_xt_iclass_wsr_eps5,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", ICLASS_xt_iclass_xsr_eps5,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", ICLASS_xt_iclass_rsr_eps6,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", ICLASS_xt_iclass_wsr_eps6,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", ICLASS_xt_iclass_xsr_eps6,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", ICLASS_xt_iclass_rsr_dbreaka1,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", ICLASS_xt_iclass_wsr_dbreaka1,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", ICLASS_xt_iclass_xsr_dbreaka1,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", ICLASS_xt_iclass_rsr_dbreakc1,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", ICLASS_xt_iclass_wsr_dbreakc1,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", ICLASS_xt_iclass_xsr_dbreakc1,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", ICLASS_xt_iclass_rsr_ibreaka1,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", ICLASS_xt_iclass_wsr_ibreaka1,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", ICLASS_xt_iclass_xsr_ibreaka1,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "andb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andb_encode_fns, 0, 0 },
+ { "andbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andbc_encode_fns, 0, 0 },
+ { "orb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orb_encode_fns, 0, 0 },
+ { "orbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orbc_encode_fns, 0, 0 },
+ { "xorb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_xorb_encode_fns, 0, 0 },
+ { "any4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_any4_encode_fns, 0, 0 },
+ { "all4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_all4_encode_fns, 0, 0 },
+ { "any8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_any8_encode_fns, 0, 0 },
+ { "all8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_all8_encode_fns, 0, 0 },
+ { "bf", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bf_encode_fns, 0, 0 },
+ { "bt", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bt_encode_fns, 0, 0 },
+ { "movf", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movf_encode_fns, 0, 0 },
+ { "movt", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movt_encode_fns, 0, 0 },
+ { "rsr.br", ICLASS_xt_iclass_RSR_BR,
+ 0,
+ Opcode_rsr_br_encode_fns, 0, 0 },
+ { "wsr.br", ICLASS_xt_iclass_WSR_BR,
+ 0,
+ Opcode_wsr_br_encode_fns, 0, 0 },
+ { "xsr.br", ICLASS_xt_iclass_XSR_BR,
+ 0,
+ Opcode_xsr_br_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "ipf", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "ipfl", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ipfl_encode_fns, 0, 0 },
+ { "ihu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ihu_encode_fns, 0, 0 },
+ { "iiu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_iiu_encode_fns, 0, 0 },
+ { "iii", ICLASS_xt_iclass_icache_inv,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwb", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "dpfl", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dpfl_encode_fns, 0, 0 },
+ { "dhu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dhu_encode_fns, 0, 0 },
+ { "diu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_diu_encode_fns, 0, 0 },
+ { "sdct", ICLASS_xt_iclass_sdct,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", ICLASS_xt_iclass_ldct,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "rsr.cpenable", ICLASS_xt_iclass_rsr_cpenable,
+ 0,
+ Opcode_rsr_cpenable_encode_fns, 0, 0 },
+ { "wsr.cpenable", ICLASS_xt_iclass_wsr_cpenable,
+ 0,
+ Opcode_wsr_cpenable_encode_fns, 0, 0 },
+ { "xsr.cpenable", ICLASS_xt_iclass_xsr_cpenable,
+ 0,
+ Opcode_xsr_cpenable_encode_fns, 0, 0 },
+ { "clamps", ICLASS_xt_iclass_clamp,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.fcr", ICLASS_rur_fcr,
+ 0,
+ Opcode_rur_fcr_encode_fns, 0, 0 },
+ { "wur.fcr", ICLASS_wur_fcr,
+ 0,
+ Opcode_wur_fcr_encode_fns, 0, 0 },
+ { "rur.fsr", ICLASS_rur_fsr,
+ 0,
+ Opcode_rur_fsr_encode_fns, 0, 0 },
+ { "wur.fsr", ICLASS_wur_fsr,
+ 0,
+ Opcode_wur_fsr_encode_fns, 0, 0 },
+ { "add.s", ICLASS_fp,
+ 0,
+ Opcode_add_s_encode_fns, 0, 0 },
+ { "sub.s", ICLASS_fp,
+ 0,
+ Opcode_sub_s_encode_fns, 0, 0 },
+ { "mul.s", ICLASS_fp,
+ 0,
+ Opcode_mul_s_encode_fns, 0, 0 },
+ { "madd.s", ICLASS_fp_mac,
+ 0,
+ Opcode_madd_s_encode_fns, 0, 0 },
+ { "msub.s", ICLASS_fp_mac,
+ 0,
+ Opcode_msub_s_encode_fns, 0, 0 },
+ { "movf.s", ICLASS_fp_cmov,
+ 0,
+ Opcode_movf_s_encode_fns, 0, 0 },
+ { "movt.s", ICLASS_fp_cmov,
+ 0,
+ Opcode_movt_s_encode_fns, 0, 0 },
+ { "moveqz.s", ICLASS_fp_mov,
+ 0,
+ Opcode_moveqz_s_encode_fns, 0, 0 },
+ { "movnez.s", ICLASS_fp_mov,
+ 0,
+ Opcode_movnez_s_encode_fns, 0, 0 },
+ { "movltz.s", ICLASS_fp_mov,
+ 0,
+ Opcode_movltz_s_encode_fns, 0, 0 },
+ { "movgez.s", ICLASS_fp_mov,
+ 0,
+ Opcode_movgez_s_encode_fns, 0, 0 },
+ { "abs.s", ICLASS_fp_mov2,
+ 0,
+ Opcode_abs_s_encode_fns, 0, 0 },
+ { "mov.s", ICLASS_fp_mov2,
+ 0,
+ Opcode_mov_s_encode_fns, 0, 0 },
+ { "neg.s", ICLASS_fp_mov2,
+ 0,
+ Opcode_neg_s_encode_fns, 0, 0 },
+ { "un.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_un_s_encode_fns, 0, 0 },
+ { "oeq.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_oeq_s_encode_fns, 0, 0 },
+ { "ueq.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_ueq_s_encode_fns, 0, 0 },
+ { "olt.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_olt_s_encode_fns, 0, 0 },
+ { "ult.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_ult_s_encode_fns, 0, 0 },
+ { "ole.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_ole_s_encode_fns, 0, 0 },
+ { "ule.s", ICLASS_fp_cmp,
+ 0,
+ Opcode_ule_s_encode_fns, 0, 0 },
+ { "float.s", ICLASS_fp_float,
+ 0,
+ Opcode_float_s_encode_fns, 0, 0 },
+ { "ufloat.s", ICLASS_fp_float,
+ 0,
+ Opcode_ufloat_s_encode_fns, 0, 0 },
+ { "round.s", ICLASS_fp_int,
+ 0,
+ Opcode_round_s_encode_fns, 0, 0 },
+ { "ceil.s", ICLASS_fp_int,
+ 0,
+ Opcode_ceil_s_encode_fns, 0, 0 },
+ { "floor.s", ICLASS_fp_int,
+ 0,
+ Opcode_floor_s_encode_fns, 0, 0 },
+ { "trunc.s", ICLASS_fp_int,
+ 0,
+ Opcode_trunc_s_encode_fns, 0, 0 },
+ { "utrunc.s", ICLASS_fp_int,
+ 0,
+ Opcode_utrunc_s_encode_fns, 0, 0 },
+ { "rfr", ICLASS_fp_rfr,
+ 0,
+ Opcode_rfr_encode_fns, 0, 0 },
+ { "wfr", ICLASS_fp_wfr,
+ 0,
+ Opcode_wfr_encode_fns, 0, 0 },
+ { "lsi", ICLASS_fp_lsi,
+ 0,
+ Opcode_lsi_encode_fns, 0, 0 },
+ { "lsiu", ICLASS_fp_lsiu,
+ 0,
+ Opcode_lsiu_encode_fns, 0, 0 },
+ { "lsx", ICLASS_fp_lsx,
+ 0,
+ Opcode_lsx_encode_fns, 0, 0 },
+ { "lsxu", ICLASS_fp_lsxu,
+ 0,
+ Opcode_lsxu_encode_fns, 0, 0 },
+ { "ssi", ICLASS_fp_ssi,
+ 0,
+ Opcode_ssi_encode_fns, 0, 0 },
+ { "ssiu", ICLASS_fp_ssiu,
+ 0,
+ Opcode_ssiu_encode_fns, 0, 0 },
+ { "ssx", ICLASS_fp_ssx,
+ 0,
+ Opcode_ssx_encode_fns, 0, 0 },
+ { "ssxu", ICLASS_fp_ssxu,
+ 0,
+ Opcode_ssxu_encode_fns, 0, 0 },
+ { "get_argmax", ICLASS_iclass_GET_ARGMAX,
+ 0,
+ Opcode_get_argmax_encode_fns, 0, 0 },
+ { "get_hsar", ICLASS_iclass_GET_HSAR,
+ 0,
+ Opcode_get_hsar_encode_fns, 0, 0 },
+ { "get_hsar2sar", ICLASS_iclass_GET_HSAR2SAR,
+ 0,
+ Opcode_get_hsar2sar_encode_fns, 0, 0 },
+ { "get_interp_ext_n", ICLASS_iclass_GET_INTERP_EXT_N,
+ 0,
+ Opcode_get_interp_ext_n_encode_fns, 0, 0 },
+ { "get_interp_ext_l", ICLASS_iclass_GET_INTERP_EXT_L,
+ 0,
+ Opcode_get_interp_ext_l_encode_fns, 0, 0 },
+ { "get_llr_buf", ICLASS_iclass_GET_LLR_BUF,
+ 0,
+ Opcode_get_llr_buf_encode_fns, 0, 0 },
+ { "get_llr_pos", ICLASS_iclass_GET_LLR_POS,
+ 0,
+ Opcode_get_llr_pos_encode_fns, 0, 0 },
+ { "get_max", ICLASS_iclass_GET_MAX,
+ 0,
+ Opcode_get_max_encode_fns, 0, 0 },
+ { "get_nco", ICLASS_iclass_GET_NCO,
+ 0,
+ Opcode_get_nco_encode_fns, 0, 0 },
+ { "get_perm_reg", ICLASS_iclass_GET_PERM_REG,
+ 0,
+ Opcode_get_perm_reg_encode_fns, 0, 0 },
+ { "get_phasor_n", ICLASS_iclass_GET_PHASOR_N,
+ 0,
+ Opcode_get_phasor_n_encode_fns, 0, 0 },
+ { "get_phasor_offset", ICLASS_iclass_GET_PHASOR_OFFSET,
+ 0,
+ Opcode_get_phasor_offset_encode_fns, 0, 0 },
+ { "get_sar", ICLASS_iclass_GET_SAR,
+ 0,
+ Opcode_get_sar_encode_fns, 0, 0 },
+ { "get_scale_reg", ICLASS_iclass_GET_SCALE_REG,
+ 0,
+ Opcode_get_scale_reg_encode_fns, 0, 0 },
+ { "get_smod_buf", ICLASS_iclass_GET_SMOD_BUF,
+ 0,
+ Opcode_get_smod_buf_encode_fns, 0, 0 },
+ { "get_smod_offset_table", ICLASS_iclass_GET_SMOD_OFFSET_TABLE,
+ 0,
+ Opcode_get_smod_offset_table_encode_fns, 0, 0 },
+ { "get_smod_pos", ICLASS_iclass_GET_SMOD_POS,
+ 0,
+ Opcode_get_smod_pos_encode_fns, 0, 0 },
+ { "get_sov", ICLASS_iclass_GET_SOV,
+ 0,
+ Opcode_get_sov_encode_fns, 0, 0 },
+ { "get_wght", ICLASS_iclass_GET_WGHT,
+ 0,
+ Opcode_get_wght_encode_fns, 0, 0 },
+ { "set_argmax", ICLASS_iclass_SET_ARGMAX,
+ 0,
+ Opcode_set_argmax_encode_fns, 0, 0 },
+ { "set_ext_regs", ICLASS_iclass_SET_EXT_REGS,
+ 0,
+ Opcode_set_ext_regs_encode_fns, 0, 0 },
+ { "set_hsar", ICLASS_iclass_SET_HSAR,
+ 0,
+ Opcode_set_hsar_encode_fns, 0, 0 },
+ { "set_llr_buf", ICLASS_iclass_SET_LLR_BUF,
+ 0,
+ Opcode_set_llr_buf_encode_fns, 0, 0 },
+ { "set_llr_pos", ICLASS_iclass_SET_LLR_POS,
+ 0,
+ Opcode_set_llr_pos_encode_fns, 0, 0 },
+ { "set_max", ICLASS_iclass_SET_MAX,
+ 0,
+ Opcode_set_max_encode_fns, 0, 0 },
+ { "set_nco", ICLASS_iclass_SET_NCO,
+ 0,
+ Opcode_set_nco_encode_fns, 0, 0 },
+ { "set_perm_reg", ICLASS_iclass_SET_PERM_REG,
+ 0,
+ Opcode_set_perm_reg_encode_fns, 0, 0 },
+ { "set_phasor_n", ICLASS_iclass_SET_PHASOR_N,
+ 0,
+ Opcode_set_phasor_n_encode_fns, 0, 0 },
+ { "set_phasor_offset", ICLASS_iclass_SET_PHASOR_OFFSET,
+ 0,
+ Opcode_set_phasor_offset_encode_fns, 0, 0 },
+ { "set_sar", ICLASS_iclass_SET_SAR,
+ 0,
+ Opcode_set_sar_encode_fns, 0, 0 },
+ { "set_scale_reg", ICLASS_iclass_SET_SCALE_REG,
+ 0,
+ Opcode_set_scale_reg_encode_fns, 0, 0 },
+ { "set_smod_buf", ICLASS_iclass_SET_SMOD_BUF,
+ 0,
+ Opcode_set_smod_buf_encode_fns, 0, 0 },
+ { "set_smod_offset_table", ICLASS_iclass_SET_SMOD_OFFSET_TABLE,
+ 0,
+ Opcode_set_smod_offset_table_encode_fns, 0, 0 },
+ { "set_smod_pos", ICLASS_iclass_SET_SMOD_POS,
+ 0,
+ Opcode_set_smod_pos_encode_fns, 0, 0 },
+ { "set_sov", ICLASS_iclass_SET_SOV,
+ 0,
+ Opcode_set_sov_encode_fns, 0, 0 },
+ { "set_wght", ICLASS_iclass_SET_WGHT,
+ 0,
+ Opcode_set_wght_encode_fns, 0, 0 },
+ { "lac2x32", ICLASS_iclass_LAC2X32,
+ 0,
+ Opcode_lac2x32_encode_fns, 0, 0 },
+ { "lac2x64_0", ICLASS_iclass_LAC2X64_0,
+ 0,
+ Opcode_lac2x64_0_encode_fns, 0, 0 },
+ { "lac2x64_1", ICLASS_iclass_LAC2X64_1,
+ 0,
+ Opcode_lac2x64_1_encode_fns, 0, 0 },
+ { "lac2x64_2", ICLASS_iclass_LAC2X64_2,
+ 0,
+ Opcode_lac2x64_2_encode_fns, 0, 0 },
+ { "lac2x64_3", ICLASS_iclass_LAC2X64_3,
+ 0,
+ Opcode_lac2x64_3_encode_fns, 0, 0 },
+ { "lac32_r", ICLASS_iclass_LAC32_R,
+ 0,
+ Opcode_lac32_r_encode_fns, 0, 0 },
+ { "lac_ih", ICLASS_iclass_LAC_IH,
+ 0,
+ Opcode_lac_ih_encode_fns, 0, 0 },
+ { "lac_il", ICLASS_iclass_LAC_IL,
+ 0,
+ Opcode_lac_il_encode_fns, 0, 0 },
+ { "lac_rh", ICLASS_iclass_LAC_RH,
+ 0,
+ Opcode_lac_rh_encode_fns, 0, 0 },
+ { "lac_rl", ICLASS_iclass_LAC_RL,
+ 0,
+ Opcode_lac_rl_encode_fns, 0, 0 },
+ { "lcm", ICLASS_iclass_LCM,
+ 0,
+ Opcode_lcm_encode_fns, 0, 0 },
+ { "lcm_pinc", ICLASS_iclass_LCM_PINC,
+ 0,
+ Opcode_lcm_pinc_encode_fns, 0, 0 },
+ { "lcm_pinc_x", ICLASS_iclass_LCM_PINC_X,
+ 0,
+ Opcode_lcm_pinc_x_encode_fns, 0, 0 },
+ { "lcm_u", ICLASS_iclass_LCM_U,
+ 0,
+ Opcode_lcm_u_encode_fns, 0, 0 },
+ { "lcm_x", ICLASS_iclass_LCM_X,
+ 0,
+ Opcode_lcm_x_encode_fns, 0, 0 },
+ { "lcm_xu", ICLASS_iclass_LCM_XU,
+ 0,
+ Opcode_lcm_xu_encode_fns, 0, 0 },
+ { "lp", ICLASS_iclass_LP,
+ 0,
+ Opcode_lp_encode_fns, 0, 0 },
+ { "lp_x", ICLASS_iclass_LP_X,
+ 0,
+ Opcode_lp_x_encode_fns, 0, 0 },
+ { "lq", ICLASS_iclass_LQ,
+ 0,
+ Opcode_lq_encode_fns, 0, 0 },
+ { "lq_x", ICLASS_iclass_LQ_X,
+ 0,
+ Opcode_lq_x_encode_fns, 0, 0 },
+ { "lut0", ICLASS_iclass_LUT0,
+ 0,
+ Opcode_lut0_encode_fns, 0, 0 },
+ { "lut1", ICLASS_iclass_LUT1,
+ 0,
+ Opcode_lut1_encode_fns, 0, 0 },
+ { "lut2", ICLASS_iclass_LUT2,
+ 0,
+ Opcode_lut2_encode_fns, 0, 0 },
+ { "lut3", ICLASS_iclass_LUT3,
+ 0,
+ Opcode_lut3_encode_fns, 0, 0 },
+ { "sac2x32", ICLASS_iclass_SAC2X32,
+ 0,
+ Opcode_sac2x32_encode_fns, 0, 0 },
+ { "sac2x64_0", ICLASS_iclass_SAC2X64_0,
+ 0,
+ Opcode_sac2x64_0_encode_fns, 0, 0 },
+ { "sac2x64_1", ICLASS_iclass_SAC2X64_1,
+ 0,
+ Opcode_sac2x64_1_encode_fns, 0, 0 },
+ { "sac2x64_2", ICLASS_iclass_SAC2X64_2,
+ 0,
+ Opcode_sac2x64_2_encode_fns, 0, 0 },
+ { "sac2x64_3", ICLASS_iclass_SAC2X64_3,
+ 0,
+ Opcode_sac2x64_3_encode_fns, 0, 0 },
+ { "sac32_r", ICLASS_iclass_SAC32_R,
+ 0,
+ Opcode_sac32_r_encode_fns, 0, 0 },
+ { "sac_ih", ICLASS_iclass_SAC_IH,
+ 0,
+ Opcode_sac_ih_encode_fns, 0, 0 },
+ { "sac_il", ICLASS_iclass_SAC_IL,
+ 0,
+ Opcode_sac_il_encode_fns, 0, 0 },
+ { "sac_rh", ICLASS_iclass_SAC_RH,
+ 0,
+ Opcode_sac_rh_encode_fns, 0, 0 },
+ { "sac_rl", ICLASS_iclass_SAC_RL,
+ 0,
+ Opcode_sac_rl_encode_fns, 0, 0 },
+ { "scm", ICLASS_iclass_SCM,
+ 0,
+ Opcode_scm_encode_fns, 0, 0 },
+ { "scm_pinc", ICLASS_iclass_SCM_PINC,
+ 0,
+ Opcode_scm_pinc_encode_fns, 0, 0 },
+ { "scm_pinc_x", ICLASS_iclass_SCM_PINC_X,
+ 0,
+ Opcode_scm_pinc_x_encode_fns, 0, 0 },
+ { "scm_u", ICLASS_iclass_SCM_U,
+ 0,
+ Opcode_scm_u_encode_fns, 0, 0 },
+ { "scm_x", ICLASS_iclass_SCM_X,
+ 0,
+ Opcode_scm_x_encode_fns, 0, 0 },
+ { "scm_xu", ICLASS_iclass_SCM_XU,
+ 0,
+ Opcode_scm_xu_encode_fns, 0, 0 },
+ { "store_p", ICLASS_iclass_STORE_P,
+ 0,
+ Opcode_store_p_encode_fns, 0, 0 },
+ { "store_q", ICLASS_iclass_STORE_Q,
+ 0,
+ Opcode_store_q_encode_fns, 0, 0 },
+ { "ar2cm_dup", ICLASS_iclass_AR2CM_DUP,
+ 0,
+ Opcode_ar2cm_dup_encode_fns, 0, 0 },
+ { "ar2cm_ln", ICLASS_iclass_AR2CM_LN,
+ 0,
+ Opcode_ar2cm_ln_encode_fns, 0, 0 },
+ { "ar2cm_ln_i", ICLASS_iclass_AR2CM_LN_I,
+ 0,
+ Opcode_ar2cm_ln_i_encode_fns, 0, 0 },
+ { "ar2cm_ln_r", ICLASS_iclass_AR2CM_LN_R,
+ 0,
+ Opcode_ar2cm_ln_r_encode_fns, 0, 0 },
+ { "ar2pq_ln", ICLASS_iclass_AR2PQ_LN,
+ 0,
+ Opcode_ar2pq_ln_encode_fns, 0, 0 },
+ { "ar2sar_dup", ICLASS_iclass_AR2SAR_DUP,
+ 0,
+ Opcode_ar2sar_dup_encode_fns, 0, 0 },
+ { "clrac", ICLASS_iclass_CLRAC,
+ 0,
+ Opcode_clrac_encode_fns, 0, 0 },
+ { "clrcm", ICLASS_iclass_CLRCM,
+ 0,
+ Opcode_clrcm_encode_fns, 0, 0 },
+ { "cm2ar_ln", ICLASS_iclass_CM2AR_LN,
+ 0,
+ Opcode_cm2ar_ln_encode_fns, 0, 0 },
+ { "cm2ar_ln_i", ICLASS_iclass_CM2AR_LN_I,
+ 0,
+ Opcode_cm2ar_ln_i_encode_fns, 0, 0 },
+ { "cm2ar_ln_r", ICLASS_iclass_CM2AR_LN_R,
+ 0,
+ Opcode_cm2ar_ln_r_encode_fns, 0, 0 },
+ { "comb_ar", ICLASS_iclass_COMB_AR,
+ 0,
+ Opcode_comb_ar_encode_fns, 0, 0 },
+ { "conj", ICLASS_iclass_CONJ,
+ 0,
+ Opcode_conj_encode_fns, 0, 0 },
+ { "mov2ac32_i", ICLASS_iclass_MOV2AC32_I,
+ 0,
+ Opcode_mov2ac32_i_encode_fns, 0, 0 },
+ { "mov2ac32_r", ICLASS_iclass_MOV2AC32_R,
+ 0,
+ Opcode_mov2ac32_r_encode_fns, 0, 0 },
+ { "mov2cm2pq", ICLASS_iclass_MOV2CM2PQ,
+ 0,
+ Opcode_mov2cm2pq_encode_fns, 0, 0 },
+ { "movac", ICLASS_iclass_MOVAC,
+ 0,
+ Opcode_movac_encode_fns, 0, 0 },
+ { "movac_i", ICLASS_iclass_MOVAC_I,
+ 0,
+ Opcode_movac_i_encode_fns, 0, 0 },
+ { "movac_i2r", ICLASS_iclass_MOVAC_I2R,
+ 0,
+ Opcode_movac_i2r_encode_fns, 0, 0 },
+ { "movac_r", ICLASS_iclass_MOVAC_R,
+ 0,
+ Opcode_movac_r_encode_fns, 0, 0 },
+ { "movac_r2i", ICLASS_iclass_MOVAC_R2I,
+ 0,
+ Opcode_movac_r2i_encode_fns, 0, 0 },
+ { "movar2", ICLASS_iclass_MOVAR2,
+ 0,
+ Opcode_movar2_encode_fns, 0, 0 },
+ { "movcm", ICLASS_iclass_MOVCM,
+ 0,
+ Opcode_movcm_encode_fns, 0, 0 },
+ { "movcm2pq", ICLASS_iclass_MOVCM2PQ,
+ 0,
+ Opcode_movcm2pq_encode_fns, 0, 0 },
+ { "movcnd_0", ICLASS_iclass_MOVCND_0,
+ 0,
+ Opcode_movcnd_0_encode_fns, 0, 0 },
+ { "movcnd_1", ICLASS_iclass_MOVCND_1,
+ 0,
+ Opcode_movcnd_1_encode_fns, 0, 0 },
+ { "movcnd_2", ICLASS_iclass_MOVCND_2,
+ 0,
+ Opcode_movcnd_2_encode_fns, 0, 0 },
+ { "movcnd_3", ICLASS_iclass_MOVCND_3,
+ 0,
+ Opcode_movcnd_3_encode_fns, 0, 0 },
+ { "movcnd_4", ICLASS_iclass_MOVCND_4,
+ 0,
+ Opcode_movcnd_4_encode_fns, 0, 0 },
+ { "movcnd_5", ICLASS_iclass_MOVCND_5,
+ 0,
+ Opcode_movcnd_5_encode_fns, 0, 0 },
+ { "movcnd_6", ICLASS_iclass_MOVCND_6,
+ 0,
+ Opcode_movcnd_6_encode_fns, 0, 0 },
+ { "movcnd_7", ICLASS_iclass_MOVCND_7,
+ 0,
+ Opcode_movcnd_7_encode_fns, 0, 0 },
+ { "movcnd8_0", ICLASS_iclass_MOVCND8_0,
+ 0,
+ Opcode_movcnd8_0_encode_fns, 0, 0 },
+ { "movcnd8_1", ICLASS_iclass_MOVCND8_1,
+ 0,
+ Opcode_movcnd8_1_encode_fns, 0, 0 },
+ { "movcnd8_2", ICLASS_iclass_MOVCND8_2,
+ 0,
+ Opcode_movcnd8_2_encode_fns, 0, 0 },
+ { "movcnd8_3", ICLASS_iclass_MOVCND8_3,
+ 0,
+ Opcode_movcnd8_3_encode_fns, 0, 0 },
+ { "movcnd8_4", ICLASS_iclass_MOVCND8_4,
+ 0,
+ Opcode_movcnd8_4_encode_fns, 0, 0 },
+ { "movcnd8_5", ICLASS_iclass_MOVCND8_5,
+ 0,
+ Opcode_movcnd8_5_encode_fns, 0, 0 },
+ { "movcnd8_6", ICLASS_iclass_MOVCND8_6,
+ 0,
+ Opcode_movcnd8_6_encode_fns, 0, 0 },
+ { "movcnd8_7", ICLASS_iclass_MOVCND8_7,
+ 0,
+ Opcode_movcnd8_7_encode_fns, 0, 0 },
+ { "mov_i", ICLASS_iclass_MOV_I,
+ 0,
+ Opcode_mov_i_encode_fns, 0, 0 },
+ { "movpq2pq", ICLASS_iclass_MOVPQ2PQ,
+ 0,
+ Opcode_movpq2pq_encode_fns, 0, 0 },
+ { "mov_r", ICLASS_iclass_MOV_R,
+ 0,
+ Opcode_mov_r_encode_fns, 0, 0 },
+ { "negcm", ICLASS_iclass_NEGCM,
+ 0,
+ Opcode_negcm_encode_fns, 0, 0 },
+ { "pop16llr_1", ICLASS_iclass_POP16LLR_1,
+ 0,
+ Opcode_pop16llr_1_encode_fns, 0, 0 },
+ { "pq2cm", ICLASS_iclass_PQ2CM,
+ 0,
+ Opcode_pq2cm_encode_fns, 0, 0 },
+ { "swapac_r", ICLASS_iclass_SWAPAC_R,
+ 0,
+ Opcode_swapac_r_encode_fns, 0, 0 },
+ { "swapac_ri", ICLASS_iclass_SWAPAC_RI,
+ 0,
+ Opcode_swapac_ri_encode_fns, 0, 0 },
+ { "swapb", ICLASS_iclass_SWAPB,
+ 0,
+ Opcode_swapb_encode_fns, 0, 0 },
+ { "add2ac", ICLASS_iclass_ADD2AC,
+ 0,
+ Opcode_add2ac_encode_fns, 0, 0 },
+ { "addac", ICLASS_iclass_ADDAC,
+ 0,
+ Opcode_addac_encode_fns, 0, 0 },
+ { "cdot", ICLASS_iclass_CDOT,
+ 0,
+ Opcode_cdot_encode_fns, 0, 0 },
+ { "cdotac", ICLASS_iclass_CDOTAC,
+ 0,
+ Opcode_cdotac_encode_fns, 0, 0 },
+ { "cdotacs", ICLASS_iclass_CDOTACS,
+ 0,
+ Opcode_cdotacs_encode_fns, 0, 0 },
+ { "cmac", ICLASS_iclass_CMAC,
+ 0,
+ Opcode_cmac_encode_fns, 0, 0 },
+ { "cmacs", ICLASS_iclass_CMACS,
+ 0,
+ Opcode_cmacs_encode_fns, 0, 0 },
+ { "cmpy", ICLASS_iclass_CMPY,
+ 0,
+ Opcode_cmpy_encode_fns, 0, 0 },
+ { "cmpy2cm", ICLASS_iclass_CMPY2CM,
+ 0,
+ Opcode_cmpy2cm_encode_fns, 0, 0 },
+ { "cmpy2pq", ICLASS_iclass_CMPY2PQ,
+ 0,
+ Opcode_cmpy2pq_encode_fns, 0, 0 },
+ { "cmpys", ICLASS_iclass_CMPYS,
+ 0,
+ Opcode_cmpys_encode_fns, 0, 0 },
+ { "cmpyxp2pq", ICLASS_iclass_CMPYXP2PQ,
+ 0,
+ Opcode_cmpyxp2pq_encode_fns, 0, 0 },
+ { "comb32", ICLASS_iclass_COMB32,
+ 0,
+ Opcode_comb32_encode_fns, 0, 0 },
+ { "dot", ICLASS_iclass_DOT,
+ 0,
+ Opcode_dot_encode_fns, 0, 0 },
+ { "dotac", ICLASS_iclass_DOTAC,
+ 0,
+ Opcode_dotac_encode_fns, 0, 0 },
+ { "dotacs", ICLASS_iclass_DOTACS,
+ 0,
+ Opcode_dotacs_encode_fns, 0, 0 },
+ { "lin_int", ICLASS_iclass_LIN_INT,
+ 0,
+ Opcode_lin_int_encode_fns, 0, 0 },
+ { "llrpre1", ICLASS_iclass_LLRPRE1,
+ 0,
+ Opcode_llrpre1_encode_fns, 0, 0 },
+ { "llrpre2", ICLASS_iclass_LLRPRE2,
+ 0,
+ Opcode_llrpre2_encode_fns, 0, 0 },
+ { "mac", ICLASS_iclass_MAC,
+ 0,
+ Opcode_mac_encode_fns, 0, 0 },
+ { "mac8", ICLASS_iclass_MAC8,
+ 0,
+ Opcode_mac8_encode_fns, 0, 0 },
+ { "macd8", ICLASS_iclass_MACD8,
+ 0,
+ Opcode_macd8_encode_fns, 0, 0 },
+ { "macpqxp_0", ICLASS_iclass_MACPQXP_0,
+ 0,
+ Opcode_macpqxp_0_encode_fns, 0, 0 },
+ { "macpqxp_1", ICLASS_iclass_MACPQXP_1,
+ 0,
+ Opcode_macpqxp_1_encode_fns, 0, 0 },
+ { "macpqxp_2", ICLASS_iclass_MACPQXP_2,
+ 0,
+ Opcode_macpqxp_2_encode_fns, 0, 0 },
+ { "macpqxp_3", ICLASS_iclass_MACPQXP_3,
+ 0,
+ Opcode_macpqxp_3_encode_fns, 0, 0 },
+ { "macs", ICLASS_iclass_MACS,
+ 0,
+ Opcode_macs_encode_fns, 0, 0 },
+ { "macxp2_0", ICLASS_iclass_MACXP2_0,
+ 0,
+ Opcode_macxp2_0_encode_fns, 0, 0 },
+ { "macxp2_1", ICLASS_iclass_MACXP2_1,
+ 0,
+ Opcode_macxp2_1_encode_fns, 0, 0 },
+ { "macxp_0", ICLASS_iclass_MACXP_0,
+ 0,
+ Opcode_macxp_0_encode_fns, 0, 0 },
+ { "macxp_1", ICLASS_iclass_MACXP_1,
+ 0,
+ Opcode_macxp_1_encode_fns, 0, 0 },
+ { "macxp_2", ICLASS_iclass_MACXP_2,
+ 0,
+ Opcode_macxp_2_encode_fns, 0, 0 },
+ { "macxp_3", ICLASS_iclass_MACXP_3,
+ 0,
+ Opcode_macxp_3_encode_fns, 0, 0 },
+ { "mov2ac", ICLASS_iclass_MOV2AC,
+ 0,
+ Opcode_mov2ac_encode_fns, 0, 0 },
+ { "mpy", ICLASS_iclass_MPY,
+ 0,
+ Opcode_mpy_encode_fns, 0, 0 },
+ { "mpy2cm", ICLASS_iclass_MPY2CM,
+ 0,
+ Opcode_mpy2cm_encode_fns, 0, 0 },
+ { "mpy2pq", ICLASS_iclass_MPY2PQ,
+ 0,
+ Opcode_mpy2pq_encode_fns, 0, 0 },
+ { "mpy8", ICLASS_iclass_MPY8,
+ 0,
+ Opcode_mpy8_encode_fns, 0, 0 },
+ { "mpyadd8_2cm", ICLASS_iclass_MPYADD8_2CM,
+ 0,
+ Opcode_mpyadd8_2cm_encode_fns, 0, 0 },
+ { "mpyd8", ICLASS_iclass_MPYD8,
+ 0,
+ Opcode_mpyd8_encode_fns, 0, 0 },
+ { "mpypqxp_0", ICLASS_iclass_MPYPQXP_0,
+ 0,
+ Opcode_mpypqxp_0_encode_fns, 0, 0 },
+ { "mpypqxp_1", ICLASS_iclass_MPYPQXP_1,
+ 0,
+ Opcode_mpypqxp_1_encode_fns, 0, 0 },
+ { "mpypqxp_2", ICLASS_iclass_MPYPQXP_2,
+ 0,
+ Opcode_mpypqxp_2_encode_fns, 0, 0 },
+ { "mpypqxp_3", ICLASS_iclass_MPYPQXP_3,
+ 0,
+ Opcode_mpypqxp_3_encode_fns, 0, 0 },
+ { "mpys", ICLASS_iclass_MPYS,
+ 0,
+ Opcode_mpys_encode_fns, 0, 0 },
+ { "mpyxp2pq", ICLASS_iclass_MPYXP2PQ,
+ 0,
+ Opcode_mpyxp2pq_encode_fns, 0, 0 },
+ { "mpyxp2_0", ICLASS_iclass_MPYXP2_0,
+ 0,
+ Opcode_mpyxp2_0_encode_fns, 0, 0 },
+ { "mpyxp2_1", ICLASS_iclass_MPYXP2_1,
+ 0,
+ Opcode_mpyxp2_1_encode_fns, 0, 0 },
+ { "mpyxp_0", ICLASS_iclass_MPYXP_0,
+ 0,
+ Opcode_mpyxp_0_encode_fns, 0, 0 },
+ { "mpyxp_1", ICLASS_iclass_MPYXP_1,
+ 0,
+ Opcode_mpyxp_1_encode_fns, 0, 0 },
+ { "mpyxp_2", ICLASS_iclass_MPYXP_2,
+ 0,
+ Opcode_mpyxp_2_encode_fns, 0, 0 },
+ { "mpyxp_3", ICLASS_iclass_MPYXP_3,
+ 0,
+ Opcode_mpyxp_3_encode_fns, 0, 0 },
+ { "normacd", ICLASS_iclass_NORMACD,
+ 0,
+ Opcode_normacd_encode_fns, 0, 0 },
+ { "normacpq_i", ICLASS_iclass_NORMACPQ_I,
+ 0,
+ Opcode_normacpq_i_encode_fns, 0, 0 },
+ { "normacpq_r", ICLASS_iclass_NORMACPQ_R,
+ 0,
+ Opcode_normacpq_r_encode_fns, 0, 0 },
+ { "normd", ICLASS_iclass_NORMD,
+ 0,
+ Opcode_normd_encode_fns, 0, 0 },
+ { "normpypq_i", ICLASS_iclass_NORMPYPQ_I,
+ 0,
+ Opcode_normpypq_i_encode_fns, 0, 0 },
+ { "normpypq_r", ICLASS_iclass_NORMPYPQ_R,
+ 0,
+ Opcode_normpypq_r_encode_fns, 0, 0 },
+ { "rcmac", ICLASS_iclass_RCMAC,
+ 0,
+ Opcode_rcmac_encode_fns, 0, 0 },
+ { "rcmpy", ICLASS_iclass_RCMPY,
+ 0,
+ Opcode_rcmpy_encode_fns, 0, 0 },
+ { "rcmpy2cm", ICLASS_iclass_RCMPY2CM,
+ 0,
+ Opcode_rcmpy2cm_encode_fns, 0, 0 },
+ { "rfir", ICLASS_iclass_RFIR,
+ 0,
+ Opcode_rfir_encode_fns, 0, 0 },
+ { "rfira", ICLASS_iclass_RFIRA,
+ 0,
+ Opcode_rfira_encode_fns, 0, 0 },
+ { "rfird", ICLASS_iclass_RFIRD,
+ 0,
+ Opcode_rfird_encode_fns, 0, 0 },
+ { "rfirda", ICLASS_iclass_RFIRDA,
+ 0,
+ Opcode_rfirda_encode_fns, 0, 0 },
+ { "rmac", ICLASS_iclass_RMAC,
+ 0,
+ Opcode_rmac_encode_fns, 0, 0 },
+ { "rmpy", ICLASS_iclass_RMPY,
+ 0,
+ Opcode_rmpy_encode_fns, 0, 0 },
+ { "rmpy2cm", ICLASS_iclass_RMPY2CM,
+ 0,
+ Opcode_rmpy2cm_encode_fns, 0, 0 },
+ { "smod_align", ICLASS_iclass_SMOD_ALIGN,
+ 0,
+ Opcode_smod_align_encode_fns, 0, 0 },
+ { "smod_scr", ICLASS_iclass_SMOD_SCR,
+ 0,
+ Opcode_smod_scr_encode_fns, 0, 0 },
+ { "sub2ac", ICLASS_iclass_SUB2AC,
+ 0,
+ Opcode_sub2ac_encode_fns, 0, 0 },
+ { "wght32", ICLASS_iclass_WGHT32,
+ 0,
+ Opcode_wght32_encode_fns, 0, 0 },
+ { "clrtiep", ICLASS_iclass_CLRTIEP,
+ 0,
+ Opcode_clrtiep_encode_fns, 0, 0 },
+ { "ext_2fifo_0", ICLASS_iclass_EXT_2FIFO_0,
+ 0,
+ Opcode_ext_2fifo_0_encode_fns, 0, 0 },
+ { "ext_2fifo_1", ICLASS_iclass_EXT_2FIFO_1,
+ 0,
+ Opcode_ext_2fifo_1_encode_fns, 0, 0 },
+ { "ext_2fifo_2", ICLASS_iclass_EXT_2FIFO_2,
+ 0,
+ Opcode_ext_2fifo_2_encode_fns, 0, 0 },
+ { "ext_2fifo_3", ICLASS_iclass_EXT_2FIFO_3,
+ 0,
+ Opcode_ext_2fifo_3_encode_fns, 0, 0 },
+ { "ext_r2fifo_0", ICLASS_iclass_EXT_R2FIFO_0,
+ 0,
+ Opcode_ext_r2fifo_0_encode_fns, 0, 0 },
+ { "ext_r2fifo_1", ICLASS_iclass_EXT_R2FIFO_1,
+ 0,
+ Opcode_ext_r2fifo_1_encode_fns, 0, 0 },
+ { "ext_r2fifo_2", ICLASS_iclass_EXT_R2FIFO_2,
+ 0,
+ Opcode_ext_r2fifo_2_encode_fns, 0, 0 },
+ { "ext_r2fifo_3", ICLASS_iclass_EXT_R2FIFO_3,
+ 0,
+ Opcode_ext_r2fifo_3_encode_fns, 0, 0 },
+ { "lut", ICLASS_iclass_LUT,
+ 0,
+ Opcode_lut_encode_fns, 0, 0 },
+ { "lut_ar", ICLASS_iclass_LUT_AR,
+ 0,
+ Opcode_lut_ar_encode_fns, 0, 0 },
+ { "lut_iext", ICLASS_iclass_LUT_IEXT,
+ 0,
+ Opcode_lut_iext_encode_fns, 0, 0 },
+ { "lut_phasor", ICLASS_iclass_LUT_PHASOR,
+ 0,
+ Opcode_lut_phasor_encode_fns, 0, 0 },
+ { "lut_rext", ICLASS_iclass_LUT_REXT,
+ 0,
+ Opcode_lut_rext_encode_fns, 0, 0 },
+ { "lut_write", ICLASS_iclass_LUT_WRITE,
+ 0,
+ Opcode_lut_write_encode_fns, 0, 0 },
+ { "moveq128_0", ICLASS_iclass_MOVEQ128_0,
+ 0,
+ Opcode_moveq128_0_encode_fns, 0, 0 },
+ { "moveq128_1", ICLASS_iclass_MOVEQ128_1,
+ 0,
+ Opcode_moveq128_1_encode_fns, 0, 0 },
+ { "moveq128_2", ICLASS_iclass_MOVEQ128_2,
+ 0,
+ Opcode_moveq128_2_encode_fns, 0, 0 },
+ { "moveq128_3", ICLASS_iclass_MOVEQ128_3,
+ 0,
+ Opcode_moveq128_3_encode_fns, 0, 0 },
+ { "moveq128_4", ICLASS_iclass_MOVEQ128_4,
+ 0,
+ Opcode_moveq128_4_encode_fns, 0, 0 },
+ { "moveq128_5", ICLASS_iclass_MOVEQ128_5,
+ 0,
+ Opcode_moveq128_5_encode_fns, 0, 0 },
+ { "moveq32_0", ICLASS_iclass_MOVEQ32_0,
+ 0,
+ Opcode_moveq32_0_encode_fns, 0, 0 },
+ { "moveq32_1", ICLASS_iclass_MOVEQ32_1,
+ 0,
+ Opcode_moveq32_1_encode_fns, 0, 0 },
+ { "moveq32_2", ICLASS_iclass_MOVEQ32_2,
+ 0,
+ Opcode_moveq32_2_encode_fns, 0, 0 },
+ { "moveq32_3", ICLASS_iclass_MOVEQ32_3,
+ 0,
+ Opcode_moveq32_3_encode_fns, 0, 0 },
+ { "nco_update", ICLASS_iclass_NCO_UPDATE,
+ 0,
+ Opcode_nco_update_encode_fns, 0, 0 },
+ { "pop128_0", ICLASS_iclass_POP128_0,
+ 0,
+ Opcode_pop128_0_encode_fns, 0, 0 },
+ { "pop128_1", ICLASS_iclass_POP128_1,
+ 0,
+ Opcode_pop128_1_encode_fns, 0, 0 },
+ { "pop128_2", ICLASS_iclass_POP128_2,
+ 0,
+ Opcode_pop128_2_encode_fns, 0, 0 },
+ { "pop128_3", ICLASS_iclass_POP128_3,
+ 0,
+ Opcode_pop128_3_encode_fns, 0, 0 },
+ { "pop128_4", ICLASS_iclass_POP128_4,
+ 0,
+ Opcode_pop128_4_encode_fns, 0, 0 },
+ { "pop128_5", ICLASS_iclass_POP128_5,
+ 0,
+ Opcode_pop128_5_encode_fns, 0, 0 },
+ { "pop128_2cmpq_0", ICLASS_iclass_POP128_2CMPQ_0,
+ 0,
+ Opcode_pop128_2cmpq_0_encode_fns, 0, 0 },
+ { "pop128_2cmpq_1", ICLASS_iclass_POP128_2CMPQ_1,
+ 0,
+ Opcode_pop128_2cmpq_1_encode_fns, 0, 0 },
+ { "pop128_2cmpq_2", ICLASS_iclass_POP128_2CMPQ_2,
+ 0,
+ Opcode_pop128_2cmpq_2_encode_fns, 0, 0 },
+ { "pop128_2cmpq_3", ICLASS_iclass_POP128_2CMPQ_3,
+ 0,
+ Opcode_pop128_2cmpq_3_encode_fns, 0, 0 },
+ { "pop128_2m_0", ICLASS_iclass_POP128_2M_0,
+ 0,
+ Opcode_pop128_2m_0_encode_fns, 0, 0 },
+ { "pop128_2m_1", ICLASS_iclass_POP128_2M_1,
+ 0,
+ Opcode_pop128_2m_1_encode_fns, 0, 0 },
+ { "pop128_2m_2", ICLASS_iclass_POP128_2M_2,
+ 0,
+ Opcode_pop128_2m_2_encode_fns, 0, 0 },
+ { "pop128_2m_3", ICLASS_iclass_POP128_2M_3,
+ 0,
+ Opcode_pop128_2m_3_encode_fns, 0, 0 },
+ { "pop128_2pq_0", ICLASS_iclass_POP128_2PQ_0,
+ 0,
+ Opcode_pop128_2pq_0_encode_fns, 0, 0 },
+ { "pop128_2pq_1", ICLASS_iclass_POP128_2PQ_1,
+ 0,
+ Opcode_pop128_2pq_1_encode_fns, 0, 0 },
+ { "pop128_2pq_2", ICLASS_iclass_POP128_2PQ_2,
+ 0,
+ Opcode_pop128_2pq_2_encode_fns, 0, 0 },
+ { "pop128_2pq_3", ICLASS_iclass_POP128_2PQ_3,
+ 0,
+ Opcode_pop128_2pq_3_encode_fns, 0, 0 },
+ { "pop128_2pq_4", ICLASS_iclass_POP128_2PQ_4,
+ 0,
+ Opcode_pop128_2pq_4_encode_fns, 0, 0 },
+ { "pop128_2pq_5", ICLASS_iclass_POP128_2PQ_5,
+ 0,
+ Opcode_pop128_2pq_5_encode_fns, 0, 0 },
+ { "pop2x128_2pq_01", ICLASS_iclass_POP2X128_2PQ_01,
+ 0,
+ Opcode_pop2x128_2pq_01_encode_fns, 0, 0 },
+ { "pop2x128_2pq_03", ICLASS_iclass_POP2X128_2PQ_03,
+ 0,
+ Opcode_pop2x128_2pq_03_encode_fns, 0, 0 },
+ { "pop2x128_2pq_21", ICLASS_iclass_POP2X128_2PQ_21,
+ 0,
+ Opcode_pop2x128_2pq_21_encode_fns, 0, 0 },
+ { "pop2x128_2pq_23", ICLASS_iclass_POP2X128_2PQ_23,
+ 0,
+ Opcode_pop2x128_2pq_23_encode_fns, 0, 0 },
+ { "pop32_0", ICLASS_iclass_POP32_0,
+ 0,
+ Opcode_pop32_0_encode_fns, 0, 0 },
+ { "pop32_1", ICLASS_iclass_POP32_1,
+ 0,
+ Opcode_pop32_1_encode_fns, 0, 0 },
+ { "pop32_2", ICLASS_iclass_POP32_2,
+ 0,
+ Opcode_pop32_2_encode_fns, 0, 0 },
+ { "pop32_3", ICLASS_iclass_POP32_3,
+ 0,
+ Opcode_pop32_3_encode_fns, 0, 0 },
+ { "push128", ICLASS_iclass_PUSH128,
+ 0,
+ Opcode_push128_encode_fns, 0, 0 },
+ { "push128_m", ICLASS_iclass_PUSH128_M,
+ 0,
+ Opcode_push128_m_encode_fns, 0, 0 },
+ { "push128_pq", ICLASS_iclass_PUSH128_PQ,
+ 0,
+ Opcode_push128_pq_encode_fns, 0, 0 },
+ { "push2x128_pq", ICLASS_iclass_PUSH2X128_PQ,
+ 0,
+ Opcode_push2x128_pq_encode_fns, 0, 0 },
+ { "push32", ICLASS_iclass_PUSH32,
+ 0,
+ Opcode_push32_encode_fns, 0, 0 },
+ { "qready", ICLASS_iclass_QREADY,
+ 0,
+ Opcode_qready_encode_fns, 0, 0 },
+ { "rdtiep", ICLASS_iclass_RDTIEP,
+ 0,
+ Opcode_rdtiep_encode_fns, 0, 0 },
+ { "settiep", ICLASS_iclass_SETTIEP,
+ 0,
+ Opcode_settiep_encode_fns, 0, 0 },
+ { "smod_lut", ICLASS_iclass_SMOD_LUT,
+ 0,
+ Opcode_smod_lut_encode_fns, 0, 0 },
+ { "wrtbsigq", ICLASS_iclass_WRTBSIGQ,
+ 0,
+ Opcode_wrtbsigq_encode_fns, 0, 0 },
+ { "wrtbsigqm", ICLASS_iclass_WRTBSIGQM,
+ 0,
+ Opcode_wrtbsigqm_encode_fns, 0, 0 },
+ { "wrtiep", ICLASS_iclass_WRTIEP,
+ 0,
+ Opcode_wrtiep_encode_fns, 0, 0 },
+ { "wrtsigq", ICLASS_iclass_WRTSIGQ,
+ 0,
+ Opcode_wrtsigq_encode_fns, 0, 0 },
+ { "abs8", ICLASS_iclass_ABS8,
+ 0,
+ Opcode_abs8_encode_fns, 0, 0 },
+ { "add16", ICLASS_iclass_ADD16,
+ 0,
+ Opcode_add16_encode_fns, 0, 0 },
+ { "add32", ICLASS_iclass_ADD32,
+ 0,
+ Opcode_add32_encode_fns, 0, 0 },
+ { "addac_i2r", ICLASS_iclass_ADDAC_I2R,
+ 0,
+ Opcode_addac_i2r_encode_fns, 0, 0 },
+ { "addac_r2i", ICLASS_iclass_ADDAC_R2I,
+ 0,
+ Opcode_addac_r2i_encode_fns, 0, 0 },
+ { "addar2", ICLASS_iclass_ADDAR2,
+ 0,
+ Opcode_addar2_encode_fns, 0, 0 },
+ { "addcm", ICLASS_iclass_ADDCM,
+ 0,
+ Opcode_addcm_encode_fns, 0, 0 },
+ { "addwrp", ICLASS_iclass_ADDWRP,
+ 0,
+ Opcode_addwrp_encode_fns, 0, 0 },
+ { "and128", ICLASS_iclass_AND128,
+ 0,
+ Opcode_and128_encode_fns, 0, 0 },
+ { "argmax8", ICLASS_iclass_ARGMAX8,
+ 0,
+ Opcode_argmax8_encode_fns, 0, 0 },
+ { "asl", ICLASS_iclass_ASL,
+ 0,
+ Opcode_asl_encode_fns, 0, 0 },
+ { "asl32", ICLASS_iclass_ASL32,
+ 0,
+ Opcode_asl32_encode_fns, 0, 0 },
+ { "aslacm", ICLASS_iclass_ASLACM,
+ 0,
+ Opcode_aslacm_encode_fns, 0, 0 },
+ { "aslm", ICLASS_iclass_ASLM,
+ 0,
+ Opcode_aslm_encode_fns, 0, 0 },
+ { "aslm32", ICLASS_iclass_ASLM32,
+ 0,
+ Opcode_aslm32_encode_fns, 0, 0 },
+ { "asr", ICLASS_iclass_ASR,
+ 0,
+ Opcode_asr_encode_fns, 0, 0 },
+ { "asr32", ICLASS_iclass_ASR32,
+ 0,
+ Opcode_asr32_encode_fns, 0, 0 },
+ { "asrac", ICLASS_iclass_ASRAC,
+ 0,
+ Opcode_asrac_encode_fns, 0, 0 },
+ { "asrm", ICLASS_iclass_ASRM,
+ 0,
+ Opcode_asrm_encode_fns, 0, 0 },
+ { "bitfext", ICLASS_iclass_BITFEXT,
+ 0,
+ Opcode_bitfext_encode_fns, 0, 0 },
+ { "bitfins", ICLASS_iclass_BITFINS,
+ 0,
+ Opcode_bitfins_encode_fns, 0, 0 },
+ { "clb_c", ICLASS_iclass_CLB_C,
+ 0,
+ Opcode_clb_c_encode_fns, 0, 0 },
+ { "clb_r", ICLASS_iclass_CLB_R,
+ 0,
+ Opcode_clb_r_encode_fns, 0, 0 },
+ { "cmp8", ICLASS_iclass_CMP8,
+ 0,
+ Opcode_cmp8_encode_fns, 0, 0 },
+ { "cmp_i", ICLASS_iclass_CMP_I,
+ 0,
+ Opcode_cmp_i_encode_fns, 0, 0 },
+ { "cmp_r", ICLASS_iclass_CMP_R,
+ 0,
+ Opcode_cmp_r_encode_fns, 0, 0 },
+ { "ext", ICLASS_iclass_EXT,
+ 0,
+ Opcode_ext_encode_fns, 0, 0 },
+ { "ext_r", ICLASS_iclass_EXT_R,
+ 0,
+ Opcode_ext_r_encode_fns, 0, 0 },
+ { "ext32_i", ICLASS_iclass_EXT32_I,
+ 0,
+ Opcode_ext32_i_encode_fns, 0, 0 },
+ { "ext32_r", ICLASS_iclass_EXT32_R,
+ 0,
+ Opcode_ext32_r_encode_fns, 0, 0 },
+ { "extui4", ICLASS_iclass_EXTUI4,
+ 0,
+ Opcode_extui4_encode_fns, 0, 0 },
+ { "lslm", ICLASS_iclass_LSLM,
+ 0,
+ Opcode_lslm_encode_fns, 0, 0 },
+ { "lsrm", ICLASS_iclass_LSRM,
+ 0,
+ Opcode_lsrm_encode_fns, 0, 0 },
+ { "max8", ICLASS_iclass_MAX8,
+ 0,
+ Opcode_max8_encode_fns, 0, 0 },
+ { "mean", ICLASS_iclass_MEAN,
+ 0,
+ Opcode_mean_encode_fns, 0, 0 },
+ { "mean32", ICLASS_iclass_MEAN32,
+ 0,
+ Opcode_mean32_encode_fns, 0, 0 },
+ { "min8", ICLASS_iclass_MIN8,
+ 0,
+ Opcode_min8_encode_fns, 0, 0 },
+ { "minclb_c", ICLASS_iclass_MINCLB_C,
+ 0,
+ Opcode_minclb_c_encode_fns, 0, 0 },
+ { "minclb_r", ICLASS_iclass_MINCLB_R,
+ 0,
+ Opcode_minclb_r_encode_fns, 0, 0 },
+ { "not128", ICLASS_iclass_NOT128,
+ 0,
+ Opcode_not128_encode_fns, 0, 0 },
+ { "or128", ICLASS_iclass_OR128,
+ 0,
+ Opcode_or128_encode_fns, 0, 0 },
+ { "perm", ICLASS_iclass_PERM,
+ 0,
+ Opcode_perm_encode_fns, 0, 0 },
+ { "redac", ICLASS_iclass_REDAC,
+ 0,
+ Opcode_redac_encode_fns, 0, 0 },
+ { "redac2", ICLASS_iclass_REDAC2,
+ 0,
+ Opcode_redac2_encode_fns, 0, 0 },
+ { "redac4", ICLASS_iclass_REDAC4,
+ 0,
+ Opcode_redac4_encode_fns, 0, 0 },
+ { "redacs", ICLASS_iclass_REDACS,
+ 0,
+ Opcode_redacs_encode_fns, 0, 0 },
+ { "sminclb_c", ICLASS_iclass_SMINCLB_C,
+ 0,
+ Opcode_sminclb_c_encode_fns, 0, 0 },
+ { "sminclb_r", ICLASS_iclass_SMINCLB_R,
+ 0,
+ Opcode_sminclb_r_encode_fns, 0, 0 },
+ { "stswapbm", ICLASS_iclass_STSWAPBM,
+ 0,
+ Opcode_stswapbm_encode_fns, 0, 0 },
+ { "stswapbmu", ICLASS_iclass_STSWAPBMU,
+ 0,
+ Opcode_stswapbmu_encode_fns, 0, 0 },
+ { "sub32", ICLASS_iclass_SUB32,
+ 0,
+ Opcode_sub32_encode_fns, 0, 0 },
+ { "subac_i2r", ICLASS_iclass_SUBAC_I2R,
+ 0,
+ Opcode_subac_i2r_encode_fns, 0, 0 },
+ { "subac_r2i", ICLASS_iclass_SUBAC_R2I,
+ 0,
+ Opcode_subac_r2i_encode_fns, 0, 0 },
+ { "subarx", ICLASS_iclass_SUBARX,
+ 0,
+ Opcode_subarx_encode_fns, 0, 0 },
+ { "subcm", ICLASS_iclass_SUBCM,
+ 0,
+ Opcode_subcm_encode_fns, 0, 0 },
+ { "submean", ICLASS_iclass_SUBMEAN,
+ 0,
+ Opcode_submean_encode_fns, 0, 0 },
+ { "subwrp", ICLASS_iclass_SUBWRP,
+ 0,
+ Opcode_subwrp_encode_fns, 0, 0 },
+ { "trans", ICLASS_iclass_TRANS,
+ 0,
+ Opcode_trans_encode_fns, 0, 0 },
+ { "xor128", ICLASS_iclass_XOR128,
+ 0,
+ Opcode_xor128_encode_fns, 0, 0 },
+ { "rur.expstate", ICLASS_rur_expstate,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", ICLASS_wur_expstate,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "rur.sov", ICLASS_rur_sov,
+ 0,
+ Opcode_rur_sov_encode_fns, 0, 0 },
+ { "wur.sov", ICLASS_wur_sov,
+ 0,
+ Opcode_wur_sov_encode_fns, 0, 0 },
+ { "rur.sat_mode", ICLASS_rur_sat_mode,
+ 0,
+ Opcode_rur_sat_mode_encode_fns, 0, 0 },
+ { "wur.sat_mode", ICLASS_wur_sat_mode,
+ 0,
+ Opcode_wur_sat_mode_encode_fns, 0, 0 },
+ { "rur.sar0", ICLASS_rur_sar0,
+ 0,
+ Opcode_rur_sar0_encode_fns, 0, 0 },
+ { "wur.sar0", ICLASS_wur_sar0,
+ 0,
+ Opcode_wur_sar0_encode_fns, 0, 0 },
+ { "rur.sar1", ICLASS_rur_sar1,
+ 0,
+ Opcode_rur_sar1_encode_fns, 0, 0 },
+ { "wur.sar1", ICLASS_wur_sar1,
+ 0,
+ Opcode_wur_sar1_encode_fns, 0, 0 },
+ { "rur.sar2", ICLASS_rur_sar2,
+ 0,
+ Opcode_rur_sar2_encode_fns, 0, 0 },
+ { "wur.sar2", ICLASS_wur_sar2,
+ 0,
+ Opcode_wur_sar2_encode_fns, 0, 0 },
+ { "rur.sar3", ICLASS_rur_sar3,
+ 0,
+ Opcode_rur_sar3_encode_fns, 0, 0 },
+ { "wur.sar3", ICLASS_wur_sar3,
+ 0,
+ Opcode_wur_sar3_encode_fns, 0, 0 },
+ { "rur.hsar0", ICLASS_rur_hsar0,
+ 0,
+ Opcode_rur_hsar0_encode_fns, 0, 0 },
+ { "wur.hsar0", ICLASS_wur_hsar0,
+ 0,
+ Opcode_wur_hsar0_encode_fns, 0, 0 },
+ { "rur.hsar1", ICLASS_rur_hsar1,
+ 0,
+ Opcode_rur_hsar1_encode_fns, 0, 0 },
+ { "wur.hsar1", ICLASS_wur_hsar1,
+ 0,
+ Opcode_wur_hsar1_encode_fns, 0, 0 },
+ { "rur.hsar2", ICLASS_rur_hsar2,
+ 0,
+ Opcode_rur_hsar2_encode_fns, 0, 0 },
+ { "wur.hsar2", ICLASS_wur_hsar2,
+ 0,
+ Opcode_wur_hsar2_encode_fns, 0, 0 },
+ { "rur.hsar3", ICLASS_rur_hsar3,
+ 0,
+ Opcode_rur_hsar3_encode_fns, 0, 0 },
+ { "wur.hsar3", ICLASS_wur_hsar3,
+ 0,
+ Opcode_wur_hsar3_encode_fns, 0, 0 },
+ { "rur.max_reg_0", ICLASS_rur_max_reg_0,
+ 0,
+ Opcode_rur_max_reg_0_encode_fns, 0, 0 },
+ { "wur.max_reg_0", ICLASS_wur_max_reg_0,
+ 0,
+ Opcode_wur_max_reg_0_encode_fns, 0, 0 },
+ { "rur.max_reg_1", ICLASS_rur_max_reg_1,
+ 0,
+ Opcode_rur_max_reg_1_encode_fns, 0, 0 },
+ { "wur.max_reg_1", ICLASS_wur_max_reg_1,
+ 0,
+ Opcode_wur_max_reg_1_encode_fns, 0, 0 },
+ { "rur.max_reg_2", ICLASS_rur_max_reg_2,
+ 0,
+ Opcode_rur_max_reg_2_encode_fns, 0, 0 },
+ { "wur.max_reg_2", ICLASS_wur_max_reg_2,
+ 0,
+ Opcode_wur_max_reg_2_encode_fns, 0, 0 },
+ { "rur.max_reg_3", ICLASS_rur_max_reg_3,
+ 0,
+ Opcode_rur_max_reg_3_encode_fns, 0, 0 },
+ { "wur.max_reg_3", ICLASS_wur_max_reg_3,
+ 0,
+ Opcode_wur_max_reg_3_encode_fns, 0, 0 },
+ { "rur.arg_max_reg_0", ICLASS_rur_arg_max_reg_0,
+ 0,
+ Opcode_rur_arg_max_reg_0_encode_fns, 0, 0 },
+ { "wur.arg_max_reg_0", ICLASS_wur_arg_max_reg_0,
+ 0,
+ Opcode_wur_arg_max_reg_0_encode_fns, 0, 0 },
+ { "rur.arg_max_reg_1", ICLASS_rur_arg_max_reg_1,
+ 0,
+ Opcode_rur_arg_max_reg_1_encode_fns, 0, 0 },
+ { "wur.arg_max_reg_1", ICLASS_wur_arg_max_reg_1,
+ 0,
+ Opcode_wur_arg_max_reg_1_encode_fns, 0, 0 },
+ { "rur.arg_max_reg_2", ICLASS_rur_arg_max_reg_2,
+ 0,
+ Opcode_rur_arg_max_reg_2_encode_fns, 0, 0 },
+ { "wur.arg_max_reg_2", ICLASS_wur_arg_max_reg_2,
+ 0,
+ Opcode_wur_arg_max_reg_2_encode_fns, 0, 0 },
+ { "rur.arg_max_reg_3", ICLASS_rur_arg_max_reg_3,
+ 0,
+ Opcode_rur_arg_max_reg_3_encode_fns, 0, 0 },
+ { "wur.arg_max_reg_3", ICLASS_wur_arg_max_reg_3,
+ 0,
+ Opcode_wur_arg_max_reg_3_encode_fns, 0, 0 },
+ { "rur.nco_counter_0", ICLASS_rur_nco_counter_0,
+ 0,
+ Opcode_rur_nco_counter_0_encode_fns, 0, 0 },
+ { "wur.nco_counter_0", ICLASS_wur_nco_counter_0,
+ 0,
+ Opcode_wur_nco_counter_0_encode_fns, 0, 0 },
+ { "rur.nco_counter_1", ICLASS_rur_nco_counter_1,
+ 0,
+ Opcode_rur_nco_counter_1_encode_fns, 0, 0 },
+ { "wur.nco_counter_1", ICLASS_wur_nco_counter_1,
+ 0,
+ Opcode_wur_nco_counter_1_encode_fns, 0, 0 },
+ { "rur.nco_counter_2", ICLASS_rur_nco_counter_2,
+ 0,
+ Opcode_rur_nco_counter_2_encode_fns, 0, 0 },
+ { "wur.nco_counter_2", ICLASS_wur_nco_counter_2,
+ 0,
+ Opcode_wur_nco_counter_2_encode_fns, 0, 0 },
+ { "rur.nco_counter_3", ICLASS_rur_nco_counter_3,
+ 0,
+ Opcode_rur_nco_counter_3_encode_fns, 0, 0 },
+ { "wur.nco_counter_3", ICLASS_wur_nco_counter_3,
+ 0,
+ Opcode_wur_nco_counter_3_encode_fns, 0, 0 },
+ { "rur.interp_ext_n", ICLASS_rur_interp_ext_n,
+ 0,
+ Opcode_rur_interp_ext_n_encode_fns, 0, 0 },
+ { "wur.interp_ext_n", ICLASS_wur_interp_ext_n,
+ 0,
+ Opcode_wur_interp_ext_n_encode_fns, 0, 0 },
+ { "rur.interp_ext_l", ICLASS_rur_interp_ext_l,
+ 0,
+ Opcode_rur_interp_ext_l_encode_fns, 0, 0 },
+ { "wur.interp_ext_l", ICLASS_wur_interp_ext_l,
+ 0,
+ Opcode_wur_interp_ext_l_encode_fns, 0, 0 },
+ { "rur.llr_buf_0", ICLASS_rur_llr_buf_0,
+ 0,
+ Opcode_rur_llr_buf_0_encode_fns, 0, 0 },
+ { "wur.llr_buf_0", ICLASS_wur_llr_buf_0,
+ 0,
+ Opcode_wur_llr_buf_0_encode_fns, 0, 0 },
+ { "rur.llr_buf_1", ICLASS_rur_llr_buf_1,
+ 0,
+ Opcode_rur_llr_buf_1_encode_fns, 0, 0 },
+ { "wur.llr_buf_1", ICLASS_wur_llr_buf_1,
+ 0,
+ Opcode_wur_llr_buf_1_encode_fns, 0, 0 },
+ { "rur.llr_buf_2", ICLASS_rur_llr_buf_2,
+ 0,
+ Opcode_rur_llr_buf_2_encode_fns, 0, 0 },
+ { "wur.llr_buf_2", ICLASS_wur_llr_buf_2,
+ 0,
+ Opcode_wur_llr_buf_2_encode_fns, 0, 0 },
+ { "rur.llr_buf_3", ICLASS_rur_llr_buf_3,
+ 0,
+ Opcode_rur_llr_buf_3_encode_fns, 0, 0 },
+ { "wur.llr_buf_3", ICLASS_wur_llr_buf_3,
+ 0,
+ Opcode_wur_llr_buf_3_encode_fns, 0, 0 },
+ { "rur.llr_buf_4", ICLASS_rur_llr_buf_4,
+ 0,
+ Opcode_rur_llr_buf_4_encode_fns, 0, 0 },
+ { "wur.llr_buf_4", ICLASS_wur_llr_buf_4,
+ 0,
+ Opcode_wur_llr_buf_4_encode_fns, 0, 0 },
+ { "rur.llr_buf_5", ICLASS_rur_llr_buf_5,
+ 0,
+ Opcode_rur_llr_buf_5_encode_fns, 0, 0 },
+ { "wur.llr_buf_5", ICLASS_wur_llr_buf_5,
+ 0,
+ Opcode_wur_llr_buf_5_encode_fns, 0, 0 },
+ { "rur.llr_buf_6", ICLASS_rur_llr_buf_6,
+ 0,
+ Opcode_rur_llr_buf_6_encode_fns, 0, 0 },
+ { "wur.llr_buf_6", ICLASS_wur_llr_buf_6,
+ 0,
+ Opcode_wur_llr_buf_6_encode_fns, 0, 0 },
+ { "rur.llr_buf_7", ICLASS_rur_llr_buf_7,
+ 0,
+ Opcode_rur_llr_buf_7_encode_fns, 0, 0 },
+ { "wur.llr_buf_7", ICLASS_wur_llr_buf_7,
+ 0,
+ Opcode_wur_llr_buf_7_encode_fns, 0, 0 },
+ { "rur.llr_buf_8", ICLASS_rur_llr_buf_8,
+ 0,
+ Opcode_rur_llr_buf_8_encode_fns, 0, 0 },
+ { "wur.llr_buf_8", ICLASS_wur_llr_buf_8,
+ 0,
+ Opcode_wur_llr_buf_8_encode_fns, 0, 0 },
+ { "rur.llr_buf_9", ICLASS_rur_llr_buf_9,
+ 0,
+ Opcode_rur_llr_buf_9_encode_fns, 0, 0 },
+ { "wur.llr_buf_9", ICLASS_wur_llr_buf_9,
+ 0,
+ Opcode_wur_llr_buf_9_encode_fns, 0, 0 },
+ { "rur.llr_buf_10", ICLASS_rur_llr_buf_10,
+ 0,
+ Opcode_rur_llr_buf_10_encode_fns, 0, 0 },
+ { "wur.llr_buf_10", ICLASS_wur_llr_buf_10,
+ 0,
+ Opcode_wur_llr_buf_10_encode_fns, 0, 0 },
+ { "rur.llr_buf_11", ICLASS_rur_llr_buf_11,
+ 0,
+ Opcode_rur_llr_buf_11_encode_fns, 0, 0 },
+ { "wur.llr_buf_11", ICLASS_wur_llr_buf_11,
+ 0,
+ Opcode_wur_llr_buf_11_encode_fns, 0, 0 },
+ { "rur.llr_buf_12", ICLASS_rur_llr_buf_12,
+ 0,
+ Opcode_rur_llr_buf_12_encode_fns, 0, 0 },
+ { "wur.llr_buf_12", ICLASS_wur_llr_buf_12,
+ 0,
+ Opcode_wur_llr_buf_12_encode_fns, 0, 0 },
+ { "rur.llr_buf_13", ICLASS_rur_llr_buf_13,
+ 0,
+ Opcode_rur_llr_buf_13_encode_fns, 0, 0 },
+ { "wur.llr_buf_13", ICLASS_wur_llr_buf_13,
+ 0,
+ Opcode_wur_llr_buf_13_encode_fns, 0, 0 },
+ { "rur.llr_buf_14", ICLASS_rur_llr_buf_14,
+ 0,
+ Opcode_rur_llr_buf_14_encode_fns, 0, 0 },
+ { "wur.llr_buf_14", ICLASS_wur_llr_buf_14,
+ 0,
+ Opcode_wur_llr_buf_14_encode_fns, 0, 0 },
+ { "rur.llr_buf_15", ICLASS_rur_llr_buf_15,
+ 0,
+ Opcode_rur_llr_buf_15_encode_fns, 0, 0 },
+ { "wur.llr_buf_15", ICLASS_wur_llr_buf_15,
+ 0,
+ Opcode_wur_llr_buf_15_encode_fns, 0, 0 },
+ { "rur.llr_buf_16", ICLASS_rur_llr_buf_16,
+ 0,
+ Opcode_rur_llr_buf_16_encode_fns, 0, 0 },
+ { "wur.llr_buf_16", ICLASS_wur_llr_buf_16,
+ 0,
+ Opcode_wur_llr_buf_16_encode_fns, 0, 0 },
+ { "rur.llr_buf_17", ICLASS_rur_llr_buf_17,
+ 0,
+ Opcode_rur_llr_buf_17_encode_fns, 0, 0 },
+ { "wur.llr_buf_17", ICLASS_wur_llr_buf_17,
+ 0,
+ Opcode_wur_llr_buf_17_encode_fns, 0, 0 },
+ { "rur.llr_buf_18", ICLASS_rur_llr_buf_18,
+ 0,
+ Opcode_rur_llr_buf_18_encode_fns, 0, 0 },
+ { "wur.llr_buf_18", ICLASS_wur_llr_buf_18,
+ 0,
+ Opcode_wur_llr_buf_18_encode_fns, 0, 0 },
+ { "rur.llr_buf_19", ICLASS_rur_llr_buf_19,
+ 0,
+ Opcode_rur_llr_buf_19_encode_fns, 0, 0 },
+ { "wur.llr_buf_19", ICLASS_wur_llr_buf_19,
+ 0,
+ Opcode_wur_llr_buf_19_encode_fns, 0, 0 },
+ { "rur.llr_buf_20", ICLASS_rur_llr_buf_20,
+ 0,
+ Opcode_rur_llr_buf_20_encode_fns, 0, 0 },
+ { "wur.llr_buf_20", ICLASS_wur_llr_buf_20,
+ 0,
+ Opcode_wur_llr_buf_20_encode_fns, 0, 0 },
+ { "rur.llr_buf_21", ICLASS_rur_llr_buf_21,
+ 0,
+ Opcode_rur_llr_buf_21_encode_fns, 0, 0 },
+ { "wur.llr_buf_21", ICLASS_wur_llr_buf_21,
+ 0,
+ Opcode_wur_llr_buf_21_encode_fns, 0, 0 },
+ { "rur.llr_buf_22", ICLASS_rur_llr_buf_22,
+ 0,
+ Opcode_rur_llr_buf_22_encode_fns, 0, 0 },
+ { "wur.llr_buf_22", ICLASS_wur_llr_buf_22,
+ 0,
+ Opcode_wur_llr_buf_22_encode_fns, 0, 0 },
+ { "rur.llr_buf_23", ICLASS_rur_llr_buf_23,
+ 0,
+ Opcode_rur_llr_buf_23_encode_fns, 0, 0 },
+ { "wur.llr_buf_23", ICLASS_wur_llr_buf_23,
+ 0,
+ Opcode_wur_llr_buf_23_encode_fns, 0, 0 },
+ { "rur.smod_buf_0", ICLASS_rur_smod_buf_0,
+ 0,
+ Opcode_rur_smod_buf_0_encode_fns, 0, 0 },
+ { "wur.smod_buf_0", ICLASS_wur_smod_buf_0,
+ 0,
+ Opcode_wur_smod_buf_0_encode_fns, 0, 0 },
+ { "rur.smod_buf_1", ICLASS_rur_smod_buf_1,
+ 0,
+ Opcode_rur_smod_buf_1_encode_fns, 0, 0 },
+ { "wur.smod_buf_1", ICLASS_wur_smod_buf_1,
+ 0,
+ Opcode_wur_smod_buf_1_encode_fns, 0, 0 },
+ { "rur.smod_buf_2", ICLASS_rur_smod_buf_2,
+ 0,
+ Opcode_rur_smod_buf_2_encode_fns, 0, 0 },
+ { "wur.smod_buf_2", ICLASS_wur_smod_buf_2,
+ 0,
+ Opcode_wur_smod_buf_2_encode_fns, 0, 0 },
+ { "rur.smod_buf_3", ICLASS_rur_smod_buf_3,
+ 0,
+ Opcode_rur_smod_buf_3_encode_fns, 0, 0 },
+ { "wur.smod_buf_3", ICLASS_wur_smod_buf_3,
+ 0,
+ Opcode_wur_smod_buf_3_encode_fns, 0, 0 },
+ { "rur.smod_buf_4", ICLASS_rur_smod_buf_4,
+ 0,
+ Opcode_rur_smod_buf_4_encode_fns, 0, 0 },
+ { "wur.smod_buf_4", ICLASS_wur_smod_buf_4,
+ 0,
+ Opcode_wur_smod_buf_4_encode_fns, 0, 0 },
+ { "rur.smod_buf_5", ICLASS_rur_smod_buf_5,
+ 0,
+ Opcode_rur_smod_buf_5_encode_fns, 0, 0 },
+ { "wur.smod_buf_5", ICLASS_wur_smod_buf_5,
+ 0,
+ Opcode_wur_smod_buf_5_encode_fns, 0, 0 },
+ { "rur.smod_buf_6", ICLASS_rur_smod_buf_6,
+ 0,
+ Opcode_rur_smod_buf_6_encode_fns, 0, 0 },
+ { "wur.smod_buf_6", ICLASS_wur_smod_buf_6,
+ 0,
+ Opcode_wur_smod_buf_6_encode_fns, 0, 0 },
+ { "rur.smod_buf_7", ICLASS_rur_smod_buf_7,
+ 0,
+ Opcode_rur_smod_buf_7_encode_fns, 0, 0 },
+ { "wur.smod_buf_7", ICLASS_wur_smod_buf_7,
+ 0,
+ Opcode_wur_smod_buf_7_encode_fns, 0, 0 },
+ { "rur.weight_reg", ICLASS_rur_weight_reg,
+ 0,
+ Opcode_rur_weight_reg_encode_fns, 0, 0 },
+ { "wur.weight_reg", ICLASS_wur_weight_reg,
+ 0,
+ Opcode_wur_weight_reg_encode_fns, 0, 0 },
+ { "rur.scale_reg", ICLASS_rur_scale_reg,
+ 0,
+ Opcode_rur_scale_reg_encode_fns, 0, 0 },
+ { "wur.scale_reg", ICLASS_wur_scale_reg,
+ 0,
+ Opcode_wur_scale_reg_encode_fns, 0, 0 },
+ { "rur.llr_pos", ICLASS_rur_llr_pos,
+ 0,
+ Opcode_rur_llr_pos_encode_fns, 0, 0 },
+ { "wur.llr_pos", ICLASS_wur_llr_pos,
+ 0,
+ Opcode_wur_llr_pos_encode_fns, 0, 0 },
+ { "rur.smod_pos", ICLASS_rur_smod_pos,
+ 0,
+ Opcode_rur_smod_pos_encode_fns, 0, 0 },
+ { "wur.smod_pos", ICLASS_wur_smod_pos,
+ 0,
+ Opcode_wur_smod_pos_encode_fns, 0, 0 },
+ { "rur.perm_reg", ICLASS_rur_perm_reg,
+ 0,
+ Opcode_rur_perm_reg_encode_fns, 0, 0 },
+ { "wur.perm_reg", ICLASS_wur_perm_reg,
+ 0,
+ Opcode_wur_perm_reg_encode_fns, 0, 0 },
+ { "rur.smod_offset_table_0", ICLASS_rur_smod_offset_table_0,
+ 0,
+ Opcode_rur_smod_offset_table_0_encode_fns, 0, 0 },
+ { "wur.smod_offset_table_0", ICLASS_wur_smod_offset_table_0,
+ 0,
+ Opcode_wur_smod_offset_table_0_encode_fns, 0, 0 },
+ { "rur.smod_offset_table_1", ICLASS_rur_smod_offset_table_1,
+ 0,
+ Opcode_rur_smod_offset_table_1_encode_fns, 0, 0 },
+ { "wur.smod_offset_table_1", ICLASS_wur_smod_offset_table_1,
+ 0,
+ Opcode_wur_smod_offset_table_1_encode_fns, 0, 0 },
+ { "rur.smod_offset_table_2", ICLASS_rur_smod_offset_table_2,
+ 0,
+ Opcode_rur_smod_offset_table_2_encode_fns, 0, 0 },
+ { "wur.smod_offset_table_2", ICLASS_wur_smod_offset_table_2,
+ 0,
+ Opcode_wur_smod_offset_table_2_encode_fns, 0, 0 },
+ { "rur.smod_offset_table_3", ICLASS_rur_smod_offset_table_3,
+ 0,
+ Opcode_rur_smod_offset_table_3_encode_fns, 0, 0 },
+ { "wur.smod_offset_table_3", ICLASS_wur_smod_offset_table_3,
+ 0,
+ Opcode_wur_smod_offset_table_3_encode_fns, 0, 0 },
+ { "rur.phasor_n", ICLASS_rur_phasor_n,
+ 0,
+ Opcode_rur_phasor_n_encode_fns, 0, 0 },
+ { "wur.phasor_n", ICLASS_wur_phasor_n,
+ 0,
+ Opcode_wur_phasor_n_encode_fns, 0, 0 },
+ { "rur.phasor_offset", ICLASS_rur_phasor_offset,
+ 0,
+ Opcode_rur_phasor_offset_encode_fns, 0, 0 },
+ { "wur.phasor_offset", ICLASS_wur_phasor_offset,
+ 0,
+ Opcode_wur_phasor_offset_encode_fns, 0, 0 }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_SIMCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_RUR_THREADPTR,
+ OPCODE_WUR_THREADPTR,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_SUB,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BNEI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BEQ,
+ OPCODE_BNE,
+ OPCODE_BGE,
+ OPCODE_BLT,
+ OPCODE_BGEU,
+ OPCODE_BLTU,
+ OPCODE_BANY,
+ OPCODE_BNONE,
+ OPCODE_BALL,
+ OPCODE_BNALL,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQZ,
+ OPCODE_BNEZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_LOOP,
+ OPCODE_LOOPNEZ,
+ OPCODE_LOOPGTZ,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVNEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVGEZ,
+ OPCODE_NEG,
+ OPCODE_ABS,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S8I,
+ OPCODE_SSR,
+ OPCODE_SSL,
+ OPCODE_SSA8L,
+ OPCODE_SSA8B,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRL,
+ OPCODE_SRA,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_RSYNC,
+ OPCODE_ESYNC,
+ OPCODE_DSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_LEND,
+ OPCODE_WSR_LEND,
+ OPCODE_XSR_LEND,
+ OPCODE_RSR_LCOUNT,
+ OPCODE_WSR_LCOUNT,
+ OPCODE_XSR_LCOUNT,
+ OPCODE_RSR_LBEG,
+ OPCODE_WSR_LBEG,
+ OPCODE_XSR_LBEG,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_LITBASE,
+ OPCODE_WSR_LITBASE,
+ OPCODE_XSR_LITBASE,
+ OPCODE_RSR_176,
+ OPCODE_WSR_176,
+ OPCODE_RSR_208,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPC3,
+ OPCODE_WSR_EPC3,
+ OPCODE_XSR_EPC3,
+ OPCODE_RSR_EXCSAVE3,
+ OPCODE_WSR_EXCSAVE3,
+ OPCODE_XSR_EXCSAVE3,
+ OPCODE_RSR_EPC4,
+ OPCODE_WSR_EPC4,
+ OPCODE_XSR_EPC4,
+ OPCODE_RSR_EXCSAVE4,
+ OPCODE_WSR_EXCSAVE4,
+ OPCODE_XSR_EXCSAVE4,
+ OPCODE_RSR_EPC5,
+ OPCODE_WSR_EPC5,
+ OPCODE_XSR_EPC5,
+ OPCODE_RSR_EXCSAVE5,
+ OPCODE_WSR_EXCSAVE5,
+ OPCODE_XSR_EXCSAVE5,
+ OPCODE_RSR_EPC6,
+ OPCODE_WSR_EPC6,
+ OPCODE_XSR_EPC6,
+ OPCODE_RSR_EXCSAVE6,
+ OPCODE_WSR_EXCSAVE6,
+ OPCODE_XSR_EXCSAVE6,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EPS3,
+ OPCODE_WSR_EPS3,
+ OPCODE_XSR_EPS3,
+ OPCODE_RSR_EPS4,
+ OPCODE_WSR_EPS4,
+ OPCODE_XSR_EPS4,
+ OPCODE_RSR_EPS5,
+ OPCODE_WSR_EPS5,
+ OPCODE_XSR_EPS5,
+ OPCODE_RSR_EPS6,
+ OPCODE_WSR_EPS6,
+ OPCODE_XSR_EPS6,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_MUL16U,
+ OPCODE_MUL16S,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DBREAKA0,
+ OPCODE_WSR_DBREAKA0,
+ OPCODE_XSR_DBREAKA0,
+ OPCODE_RSR_DBREAKC0,
+ OPCODE_WSR_DBREAKC0,
+ OPCODE_XSR_DBREAKC0,
+ OPCODE_RSR_DBREAKA1,
+ OPCODE_WSR_DBREAKA1,
+ OPCODE_XSR_DBREAKA1,
+ OPCODE_RSR_DBREAKC1,
+ OPCODE_WSR_DBREAKC1,
+ OPCODE_XSR_DBREAKC1,
+ OPCODE_RSR_IBREAKA0,
+ OPCODE_WSR_IBREAKA0,
+ OPCODE_XSR_IBREAKA0,
+ OPCODE_RSR_IBREAKA1,
+ OPCODE_WSR_IBREAKA1,
+ OPCODE_XSR_IBREAKA1,
+ OPCODE_RSR_IBREAKENABLE,
+ OPCODE_WSR_IBREAKENABLE,
+ OPCODE_XSR_IBREAKENABLE,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_WSR_MMID,
+ OPCODE_ANDB,
+ OPCODE_ANDBC,
+ OPCODE_ORB,
+ OPCODE_ORBC,
+ OPCODE_XORB,
+ OPCODE_ANY4,
+ OPCODE_ALL4,
+ OPCODE_ANY8,
+ OPCODE_ALL8,
+ OPCODE_BF,
+ OPCODE_BT,
+ OPCODE_MOVF,
+ OPCODE_MOVT,
+ OPCODE_RSR_BR,
+ OPCODE_WSR_BR,
+ OPCODE_XSR_BR,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_IPF,
+ OPCODE_IHI,
+ OPCODE_IPFL,
+ OPCODE_IHU,
+ OPCODE_IIU,
+ OPCODE_III,
+ OPCODE_LICT,
+ OPCODE_LICW,
+ OPCODE_SICT,
+ OPCODE_SICW,
+ OPCODE_DHWB,
+ OPCODE_DHWBI,
+ OPCODE_DIWB,
+ OPCODE_DIWBI,
+ OPCODE_DHI,
+ OPCODE_DII,
+ OPCODE_DPFR,
+ OPCODE_DPFW,
+ OPCODE_DPFRO,
+ OPCODE_DPFWO,
+ OPCODE_DPFL,
+ OPCODE_DHU,
+ OPCODE_DIU,
+ OPCODE_SDCT,
+ OPCODE_LDCT,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_RSR_CPENABLE,
+ OPCODE_WSR_CPENABLE,
+ OPCODE_XSR_CPENABLE,
+ OPCODE_CLAMPS,
+ OPCODE_MIN,
+ OPCODE_MAX,
+ OPCODE_MINU,
+ OPCODE_MAXU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_FCR,
+ OPCODE_WUR_FCR,
+ OPCODE_RUR_FSR,
+ OPCODE_WUR_FSR,
+ OPCODE_ADD_S,
+ OPCODE_SUB_S,
+ OPCODE_MUL_S,
+ OPCODE_MADD_S,
+ OPCODE_MSUB_S,
+ OPCODE_MOVF_S,
+ OPCODE_MOVT_S,
+ OPCODE_MOVEQZ_S,
+ OPCODE_MOVNEZ_S,
+ OPCODE_MOVLTZ_S,
+ OPCODE_MOVGEZ_S,
+ OPCODE_ABS_S,
+ OPCODE_MOV_S,
+ OPCODE_NEG_S,
+ OPCODE_UN_S,
+ OPCODE_OEQ_S,
+ OPCODE_UEQ_S,
+ OPCODE_OLT_S,
+ OPCODE_ULT_S,
+ OPCODE_OLE_S,
+ OPCODE_ULE_S,
+ OPCODE_FLOAT_S,
+ OPCODE_UFLOAT_S,
+ OPCODE_ROUND_S,
+ OPCODE_CEIL_S,
+ OPCODE_FLOOR_S,
+ OPCODE_TRUNC_S,
+ OPCODE_UTRUNC_S,
+ OPCODE_RFR,
+ OPCODE_WFR,
+ OPCODE_LSI,
+ OPCODE_LSIU,
+ OPCODE_LSX,
+ OPCODE_LSXU,
+ OPCODE_SSI,
+ OPCODE_SSIU,
+ OPCODE_SSX,
+ OPCODE_SSXU,
+ OPCODE_GET_ARGMAX,
+ OPCODE_GET_HSAR,
+ OPCODE_GET_HSAR2SAR,
+ OPCODE_GET_INTERP_EXT_N,
+ OPCODE_GET_INTERP_EXT_L,
+ OPCODE_GET_LLR_BUF,
+ OPCODE_GET_LLR_POS,
+ OPCODE_GET_MAX,
+ OPCODE_GET_NCO,
+ OPCODE_GET_PERM_REG,
+ OPCODE_GET_PHASOR_N,
+ OPCODE_GET_PHASOR_OFFSET,
+ OPCODE_GET_SAR,
+ OPCODE_GET_SCALE_REG,
+ OPCODE_GET_SMOD_BUF,
+ OPCODE_GET_SMOD_OFFSET_TABLE,
+ OPCODE_GET_SMOD_POS,
+ OPCODE_GET_SOV,
+ OPCODE_GET_WGHT,
+ OPCODE_SET_ARGMAX,
+ OPCODE_SET_EXT_REGS,
+ OPCODE_SET_HSAR,
+ OPCODE_SET_LLR_BUF,
+ OPCODE_SET_LLR_POS,
+ OPCODE_SET_MAX,
+ OPCODE_SET_NCO,
+ OPCODE_SET_PERM_REG,
+ OPCODE_SET_PHASOR_N,
+ OPCODE_SET_PHASOR_OFFSET,
+ OPCODE_SET_SAR,
+ OPCODE_SET_SCALE_REG,
+ OPCODE_SET_SMOD_BUF,
+ OPCODE_SET_SMOD_OFFSET_TABLE,
+ OPCODE_SET_SMOD_POS,
+ OPCODE_SET_SOV,
+ OPCODE_SET_WGHT,
+ OPCODE_LAC2X32,
+ OPCODE_LAC2X64_0,
+ OPCODE_LAC2X64_1,
+ OPCODE_LAC2X64_2,
+ OPCODE_LAC2X64_3,
+ OPCODE_LAC32_R,
+ OPCODE_LAC_IH,
+ OPCODE_LAC_IL,
+ OPCODE_LAC_RH,
+ OPCODE_LAC_RL,
+ OPCODE_LCM,
+ OPCODE_LCM_PINC,
+ OPCODE_LCM_PINC_X,
+ OPCODE_LCM_U,
+ OPCODE_LCM_X,
+ OPCODE_LCM_XU,
+ OPCODE_LP,
+ OPCODE_LP_X,
+ OPCODE_LQ,
+ OPCODE_LQ_X,
+ OPCODE_LUT0,
+ OPCODE_LUT1,
+ OPCODE_LUT2,
+ OPCODE_LUT3,
+ OPCODE_SAC2X32,
+ OPCODE_SAC2X64_0,
+ OPCODE_SAC2X64_1,
+ OPCODE_SAC2X64_2,
+ OPCODE_SAC2X64_3,
+ OPCODE_SAC32_R,
+ OPCODE_SAC_IH,
+ OPCODE_SAC_IL,
+ OPCODE_SAC_RH,
+ OPCODE_SAC_RL,
+ OPCODE_SCM,
+ OPCODE_SCM_PINC,
+ OPCODE_SCM_PINC_X,
+ OPCODE_SCM_U,
+ OPCODE_SCM_X,
+ OPCODE_SCM_XU,
+ OPCODE_STORE_P,
+ OPCODE_STORE_Q,
+ OPCODE_AR2CM_DUP,
+ OPCODE_AR2CM_LN,
+ OPCODE_AR2CM_LN_I,
+ OPCODE_AR2CM_LN_R,
+ OPCODE_AR2PQ_LN,
+ OPCODE_AR2SAR_DUP,
+ OPCODE_CLRAC,
+ OPCODE_CLRCM,
+ OPCODE_CM2AR_LN,
+ OPCODE_CM2AR_LN_I,
+ OPCODE_CM2AR_LN_R,
+ OPCODE_COMB_AR,
+ OPCODE_CONJ,
+ OPCODE_MOV2AC32_I,
+ OPCODE_MOV2AC32_R,
+ OPCODE_MOV2CM2PQ,
+ OPCODE_MOVAC,
+ OPCODE_MOVAC_I,
+ OPCODE_MOVAC_I2R,
+ OPCODE_MOVAC_R,
+ OPCODE_MOVAC_R2I,
+ OPCODE_MOVAR2,
+ OPCODE_MOVCM,
+ OPCODE_MOVCM2PQ,
+ OPCODE_MOVCND_0,
+ OPCODE_MOVCND_1,
+ OPCODE_MOVCND_2,
+ OPCODE_MOVCND_3,
+ OPCODE_MOVCND_4,
+ OPCODE_MOVCND_5,
+ OPCODE_MOVCND_6,
+ OPCODE_MOVCND_7,
+ OPCODE_MOVCND8_0,
+ OPCODE_MOVCND8_1,
+ OPCODE_MOVCND8_2,
+ OPCODE_MOVCND8_3,
+ OPCODE_MOVCND8_4,
+ OPCODE_MOVCND8_5,
+ OPCODE_MOVCND8_6,
+ OPCODE_MOVCND8_7,
+ OPCODE_MOV_I,
+ OPCODE_MOVPQ2PQ,
+ OPCODE_MOV_R,
+ OPCODE_NEGCM,
+ OPCODE_POP16LLR_1,
+ OPCODE_PQ2CM,
+ OPCODE_SWAPAC_R,
+ OPCODE_SWAPAC_RI,
+ OPCODE_SWAPB,
+ OPCODE_ADD2AC,
+ OPCODE_ADDAC,
+ OPCODE_CDOT,
+ OPCODE_CDOTAC,
+ OPCODE_CDOTACS,
+ OPCODE_CMAC,
+ OPCODE_CMACS,
+ OPCODE_CMPY,
+ OPCODE_CMPY2CM,
+ OPCODE_CMPY2PQ,
+ OPCODE_CMPYS,
+ OPCODE_CMPYXP2PQ,
+ OPCODE_COMB32,
+ OPCODE_DOT,
+ OPCODE_DOTAC,
+ OPCODE_DOTACS,
+ OPCODE_LIN_INT,
+ OPCODE_LLRPRE1,
+ OPCODE_LLRPRE2,
+ OPCODE_MAC,
+ OPCODE_MAC8,
+ OPCODE_MACD8,
+ OPCODE_MACPQXP_0,
+ OPCODE_MACPQXP_1,
+ OPCODE_MACPQXP_2,
+ OPCODE_MACPQXP_3,
+ OPCODE_MACS,
+ OPCODE_MACXP2_0,
+ OPCODE_MACXP2_1,
+ OPCODE_MACXP_0,
+ OPCODE_MACXP_1,
+ OPCODE_MACXP_2,
+ OPCODE_MACXP_3,
+ OPCODE_MOV2AC,
+ OPCODE_MPY,
+ OPCODE_MPY2CM,
+ OPCODE_MPY2PQ,
+ OPCODE_MPY8,
+ OPCODE_MPYADD8_2CM,
+ OPCODE_MPYD8,
+ OPCODE_MPYPQXP_0,
+ OPCODE_MPYPQXP_1,
+ OPCODE_MPYPQXP_2,
+ OPCODE_MPYPQXP_3,
+ OPCODE_MPYS,
+ OPCODE_MPYXP2PQ,
+ OPCODE_MPYXP2_0,
+ OPCODE_MPYXP2_1,
+ OPCODE_MPYXP_0,
+ OPCODE_MPYXP_1,
+ OPCODE_MPYXP_2,
+ OPCODE_MPYXP_3,
+ OPCODE_NORMACD,
+ OPCODE_NORMACPQ_I,
+ OPCODE_NORMACPQ_R,
+ OPCODE_NORMD,
+ OPCODE_NORMPYPQ_I,
+ OPCODE_NORMPYPQ_R,
+ OPCODE_RCMAC,
+ OPCODE_RCMPY,
+ OPCODE_RCMPY2CM,
+ OPCODE_RFIR,
+ OPCODE_RFIRA,
+ OPCODE_RFIRD,
+ OPCODE_RFIRDA,
+ OPCODE_RMAC,
+ OPCODE_RMPY,
+ OPCODE_RMPY2CM,
+ OPCODE_SMOD_ALIGN,
+ OPCODE_SMOD_SCR,
+ OPCODE_SUB2AC,
+ OPCODE_WGHT32,
+ OPCODE_CLRTIEP,
+ OPCODE_EXT_2FIFO_0,
+ OPCODE_EXT_2FIFO_1,
+ OPCODE_EXT_2FIFO_2,
+ OPCODE_EXT_2FIFO_3,
+ OPCODE_EXT_R2FIFO_0,
+ OPCODE_EXT_R2FIFO_1,
+ OPCODE_EXT_R2FIFO_2,
+ OPCODE_EXT_R2FIFO_3,
+ OPCODE_LUT,
+ OPCODE_LUT_AR,
+ OPCODE_LUT_IEXT,
+ OPCODE_LUT_PHASOR,
+ OPCODE_LUT_REXT,
+ OPCODE_LUT_WRITE,
+ OPCODE_MOVEQ128_0,
+ OPCODE_MOVEQ128_1,
+ OPCODE_MOVEQ128_2,
+ OPCODE_MOVEQ128_3,
+ OPCODE_MOVEQ128_4,
+ OPCODE_MOVEQ128_5,
+ OPCODE_MOVEQ32_0,
+ OPCODE_MOVEQ32_1,
+ OPCODE_MOVEQ32_2,
+ OPCODE_MOVEQ32_3,
+ OPCODE_NCO_UPDATE,
+ OPCODE_POP128_0,
+ OPCODE_POP128_1,
+ OPCODE_POP128_2,
+ OPCODE_POP128_3,
+ OPCODE_POP128_4,
+ OPCODE_POP128_5,
+ OPCODE_POP128_2CMPQ_0,
+ OPCODE_POP128_2CMPQ_1,
+ OPCODE_POP128_2CMPQ_2,
+ OPCODE_POP128_2CMPQ_3,
+ OPCODE_POP128_2M_0,
+ OPCODE_POP128_2M_1,
+ OPCODE_POP128_2M_2,
+ OPCODE_POP128_2M_3,
+ OPCODE_POP128_2PQ_0,
+ OPCODE_POP128_2PQ_1,
+ OPCODE_POP128_2PQ_2,
+ OPCODE_POP128_2PQ_3,
+ OPCODE_POP128_2PQ_4,
+ OPCODE_POP128_2PQ_5,
+ OPCODE_POP2X128_2PQ_01,
+ OPCODE_POP2X128_2PQ_03,
+ OPCODE_POP2X128_2PQ_21,
+ OPCODE_POP2X128_2PQ_23,
+ OPCODE_POP32_0,
+ OPCODE_POP32_1,
+ OPCODE_POP32_2,
+ OPCODE_POP32_3,
+ OPCODE_PUSH128,
+ OPCODE_PUSH128_M,
+ OPCODE_PUSH128_PQ,
+ OPCODE_PUSH2X128_PQ,
+ OPCODE_PUSH32,
+ OPCODE_QREADY,
+ OPCODE_RDTIEP,
+ OPCODE_SETTIEP,
+ OPCODE_SMOD_LUT,
+ OPCODE_WRTBSIGQ,
+ OPCODE_WRTBSIGQM,
+ OPCODE_WRTIEP,
+ OPCODE_WRTSIGQ,
+ OPCODE_ABS8,
+ OPCODE_ADD16,
+ OPCODE_ADD32,
+ OPCODE_ADDAC_I2R,
+ OPCODE_ADDAC_R2I,
+ OPCODE_ADDAR2,
+ OPCODE_ADDCM,
+ OPCODE_ADDWRP,
+ OPCODE_AND128,
+ OPCODE_ARGMAX8,
+ OPCODE_ASL,
+ OPCODE_ASL32,
+ OPCODE_ASLACM,
+ OPCODE_ASLM,
+ OPCODE_ASLM32,
+ OPCODE_ASR,
+ OPCODE_ASR32,
+ OPCODE_ASRAC,
+ OPCODE_ASRM,
+ OPCODE_BITFEXT,
+ OPCODE_BITFINS,
+ OPCODE_CLB_C,
+ OPCODE_CLB_R,
+ OPCODE_CMP8,
+ OPCODE_CMP_I,
+ OPCODE_CMP_R,
+ OPCODE_EXT,
+ OPCODE_EXT_R,
+ OPCODE_EXT32_I,
+ OPCODE_EXT32_R,
+ OPCODE_EXTUI4,
+ OPCODE_LSLM,
+ OPCODE_LSRM,
+ OPCODE_MAX8,
+ OPCODE_MEAN,
+ OPCODE_MEAN32,
+ OPCODE_MIN8,
+ OPCODE_MINCLB_C,
+ OPCODE_MINCLB_R,
+ OPCODE_NOT128,
+ OPCODE_OR128,
+ OPCODE_PERM,
+ OPCODE_REDAC,
+ OPCODE_REDAC2,
+ OPCODE_REDAC4,
+ OPCODE_REDACS,
+ OPCODE_SMINCLB_C,
+ OPCODE_SMINCLB_R,
+ OPCODE_STSWAPBM,
+ OPCODE_STSWAPBMU,
+ OPCODE_SUB32,
+ OPCODE_SUBAC_I2R,
+ OPCODE_SUBAC_R2I,
+ OPCODE_SUBARX,
+ OPCODE_SUBCM,
+ OPCODE_SUBMEAN,
+ OPCODE_SUBWRP,
+ OPCODE_TRANS,
+ OPCODE_XOR128,
+ OPCODE_RUR_EXPSTATE,
+ OPCODE_WUR_EXPSTATE,
+ OPCODE_RUR_SOV,
+ OPCODE_WUR_SOV,
+ OPCODE_RUR_SAT_MODE,
+ OPCODE_WUR_SAT_MODE,
+ OPCODE_RUR_SAR0,
+ OPCODE_WUR_SAR0,
+ OPCODE_RUR_SAR1,
+ OPCODE_WUR_SAR1,
+ OPCODE_RUR_SAR2,
+ OPCODE_WUR_SAR2,
+ OPCODE_RUR_SAR3,
+ OPCODE_WUR_SAR3,
+ OPCODE_RUR_HSAR0,
+ OPCODE_WUR_HSAR0,
+ OPCODE_RUR_HSAR1,
+ OPCODE_WUR_HSAR1,
+ OPCODE_RUR_HSAR2,
+ OPCODE_WUR_HSAR2,
+ OPCODE_RUR_HSAR3,
+ OPCODE_WUR_HSAR3,
+ OPCODE_RUR_MAX_REG_0,
+ OPCODE_WUR_MAX_REG_0,
+ OPCODE_RUR_MAX_REG_1,
+ OPCODE_WUR_MAX_REG_1,
+ OPCODE_RUR_MAX_REG_2,
+ OPCODE_WUR_MAX_REG_2,
+ OPCODE_RUR_MAX_REG_3,
+ OPCODE_WUR_MAX_REG_3,
+ OPCODE_RUR_ARG_MAX_REG_0,
+ OPCODE_WUR_ARG_MAX_REG_0,
+ OPCODE_RUR_ARG_MAX_REG_1,
+ OPCODE_WUR_ARG_MAX_REG_1,
+ OPCODE_RUR_ARG_MAX_REG_2,
+ OPCODE_WUR_ARG_MAX_REG_2,
+ OPCODE_RUR_ARG_MAX_REG_3,
+ OPCODE_WUR_ARG_MAX_REG_3,
+ OPCODE_RUR_NCO_COUNTER_0,
+ OPCODE_WUR_NCO_COUNTER_0,
+ OPCODE_RUR_NCO_COUNTER_1,
+ OPCODE_WUR_NCO_COUNTER_1,
+ OPCODE_RUR_NCO_COUNTER_2,
+ OPCODE_WUR_NCO_COUNTER_2,
+ OPCODE_RUR_NCO_COUNTER_3,
+ OPCODE_WUR_NCO_COUNTER_3,
+ OPCODE_RUR_INTERP_EXT_N,
+ OPCODE_WUR_INTERP_EXT_N,
+ OPCODE_RUR_INTERP_EXT_L,
+ OPCODE_WUR_INTERP_EXT_L,
+ OPCODE_RUR_LLR_BUF_0,
+ OPCODE_WUR_LLR_BUF_0,
+ OPCODE_RUR_LLR_BUF_1,
+ OPCODE_WUR_LLR_BUF_1,
+ OPCODE_RUR_LLR_BUF_2,
+ OPCODE_WUR_LLR_BUF_2,
+ OPCODE_RUR_LLR_BUF_3,
+ OPCODE_WUR_LLR_BUF_3,
+ OPCODE_RUR_LLR_BUF_4,
+ OPCODE_WUR_LLR_BUF_4,
+ OPCODE_RUR_LLR_BUF_5,
+ OPCODE_WUR_LLR_BUF_5,
+ OPCODE_RUR_LLR_BUF_6,
+ OPCODE_WUR_LLR_BUF_6,
+ OPCODE_RUR_LLR_BUF_7,
+ OPCODE_WUR_LLR_BUF_7,
+ OPCODE_RUR_LLR_BUF_8,
+ OPCODE_WUR_LLR_BUF_8,
+ OPCODE_RUR_LLR_BUF_9,
+ OPCODE_WUR_LLR_BUF_9,
+ OPCODE_RUR_LLR_BUF_10,
+ OPCODE_WUR_LLR_BUF_10,
+ OPCODE_RUR_LLR_BUF_11,
+ OPCODE_WUR_LLR_BUF_11,
+ OPCODE_RUR_LLR_BUF_12,
+ OPCODE_WUR_LLR_BUF_12,
+ OPCODE_RUR_LLR_BUF_13,
+ OPCODE_WUR_LLR_BUF_13,
+ OPCODE_RUR_LLR_BUF_14,
+ OPCODE_WUR_LLR_BUF_14,
+ OPCODE_RUR_LLR_BUF_15,
+ OPCODE_WUR_LLR_BUF_15,
+ OPCODE_RUR_LLR_BUF_16,
+ OPCODE_WUR_LLR_BUF_16,
+ OPCODE_RUR_LLR_BUF_17,
+ OPCODE_WUR_LLR_BUF_17,
+ OPCODE_RUR_LLR_BUF_18,
+ OPCODE_WUR_LLR_BUF_18,
+ OPCODE_RUR_LLR_BUF_19,
+ OPCODE_WUR_LLR_BUF_19,
+ OPCODE_RUR_LLR_BUF_20,
+ OPCODE_WUR_LLR_BUF_20,
+ OPCODE_RUR_LLR_BUF_21,
+ OPCODE_WUR_LLR_BUF_21,
+ OPCODE_RUR_LLR_BUF_22,
+ OPCODE_WUR_LLR_BUF_22,
+ OPCODE_RUR_LLR_BUF_23,
+ OPCODE_WUR_LLR_BUF_23,
+ OPCODE_RUR_SMOD_BUF_0,
+ OPCODE_WUR_SMOD_BUF_0,
+ OPCODE_RUR_SMOD_BUF_1,
+ OPCODE_WUR_SMOD_BUF_1,
+ OPCODE_RUR_SMOD_BUF_2,
+ OPCODE_WUR_SMOD_BUF_2,
+ OPCODE_RUR_SMOD_BUF_3,
+ OPCODE_WUR_SMOD_BUF_3,
+ OPCODE_RUR_SMOD_BUF_4,
+ OPCODE_WUR_SMOD_BUF_4,
+ OPCODE_RUR_SMOD_BUF_5,
+ OPCODE_WUR_SMOD_BUF_5,
+ OPCODE_RUR_SMOD_BUF_6,
+ OPCODE_WUR_SMOD_BUF_6,
+ OPCODE_RUR_SMOD_BUF_7,
+ OPCODE_WUR_SMOD_BUF_7,
+ OPCODE_RUR_WEIGHT_REG,
+ OPCODE_WUR_WEIGHT_REG,
+ OPCODE_RUR_SCALE_REG,
+ OPCODE_WUR_SCALE_REG,
+ OPCODE_RUR_LLR_POS,
+ OPCODE_WUR_LLR_POS,
+ OPCODE_RUR_SMOD_POS,
+ OPCODE_WUR_SMOD_POS,
+ OPCODE_RUR_PERM_REG,
+ OPCODE_WUR_PERM_REG,
+ OPCODE_RUR_SMOD_OFFSET_TABLE_0,
+ OPCODE_WUR_SMOD_OFFSET_TABLE_0,
+ OPCODE_RUR_SMOD_OFFSET_TABLE_1,
+ OPCODE_WUR_SMOD_OFFSET_TABLE_1,
+ OPCODE_RUR_SMOD_OFFSET_TABLE_2,
+ OPCODE_WUR_SMOD_OFFSET_TABLE_2,
+ OPCODE_RUR_SMOD_OFFSET_TABLE_3,
+ OPCODE_WUR_SMOD_OFFSET_TABLE_3,
+ OPCODE_RUR_PHASOR_N,
+ OPCODE_WUR_PHASOR_N,
+ OPCODE_RUR_PHASOR_OFFSET,
+ OPCODE_WUR_PHASOR_OFFSET
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2021_Slot_inst_get (insn))
+ {
+ case 3:
+ if (Field_sa4_Slot_inst_get (insn) == 0 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LCM_PINC_X;
+ if (Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SCM_PINC_X;
+ break;
+ case 4:
+ if (Field_sa4_Slot_inst_get (insn) == 0 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LCM_X;
+ if (Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SCM_X;
+ break;
+ case 5:
+ if (Field_sa4_Slot_inst_get (insn) == 0 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LCM_XU;
+ if (Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SCM_XU;
+ break;
+ case 6:
+ if (Field_sa4_Slot_inst_get (insn) == 0 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LP_X;
+ break;
+ case 7:
+ if (Field_sa4_Slot_inst_get (insn) == 0 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LQ_X;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2035_Slot_inst_get (insn) == 2 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2047_Slot_inst_get (insn) == 0)
+ return OPCODE_LQ;
+ switch (Field_dsp340050b49a6c_fld2037_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LCM_PINC;
+ break;
+ case 1:
+ if (Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LCM_U;
+ break;
+ case 2:
+ if (Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LP;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2048_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LAC2X32;
+ break;
+ case 1:
+ if (Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LAC32_R;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2082inst_Slot_inst_get (insn) == 0 &&
+ Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3)
+ return OPCODE_LAC_IH;
+ if (Field_dsp340050b49a6c_fld2083inst_Slot_inst_get (insn) == 1 &&
+ Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3)
+ return OPCODE_LAC_IL;
+ if (Field_dsp340050b49a6c_fld2084inst_Slot_inst_get (insn) == 2 &&
+ Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3)
+ return OPCODE_LAC_RH;
+ switch (Field_dsp340050b49a6c_fld2085inst_Slot_inst_get (insn))
+ {
+ case 3:
+ if (Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3)
+ return OPCODE_LAC2X64_1;
+ break;
+ case 19:
+ if (Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3)
+ return OPCODE_LAC2X64_2;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2086inst_Slot_inst_get (insn) == 2 &&
+ Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3 &&
+ Field_dsp340050b49a6c_fld3634_Slot_inst_get (insn) == 0)
+ return OPCODE_LAC_RL;
+ if (Field_dsp340050b49a6c_fld2088inst_Slot_inst_get (insn) == 3 &&
+ Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3 &&
+ Field_dsp340050b49a6c_fld3633inst_Slot_inst_get (insn) == 0)
+ return OPCODE_LAC2X64_3;
+ if (Field_dsp340050b49a6c_fld2089inst_Slot_inst_get (insn) == 1 &&
+ Field_r2_Slot_inst_get (insn) == 1 &&
+ Field_bbi4_Slot_inst_get (insn) == 0 &&
+ Field_op0_Slot_inst_get (insn) == 3 &&
+ Field_dsp340050b49a6c_fld3631inst_Slot_inst_get (insn) == 0)
+ return OPCODE_LAC2X64_0;
+ if (Field_dsp340050b49a6c_fld2090inst_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld2091inst_Slot_inst_get (insn) == 10 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld2092inst_Slot_inst_get (insn) == 88 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ if (Field_dsp340050b49a6c_fld2094inst_Slot_inst_get (insn) == 89 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld2095inst_Slot_inst_get (insn) == 90 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SETTIEP;
+ if (Field_dsp340050b49a6c_fld2096inst_Slot_inst_get (insn) == 182 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2098inst_Slot_inst_get (insn) == 183 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2099inst_Slot_inst_get (insn) == 92 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ if (Field_dsp340050b49a6c_fld2100inst_Slot_inst_get (insn) == 186 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2101inst_Slot_inst_get (insn) == 187 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld2102inst_Slot_inst_get (insn) == 188 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2103inst_Slot_inst_get (insn) == 189 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld2104inst_Slot_inst_get (insn) == 190 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2105inst_Slot_inst_get (insn) == 191 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld2106inst_Slot_inst_get (insn) == 18 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_PUSH128_PQ;
+ if (Field_dsp340050b49a6c_fld2107inst_Slot_inst_get (insn) == 152 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2PQ_0;
+ if (Field_dsp340050b49a6c_fld2108inst_Slot_inst_get (insn) == 153 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2PQ_2;
+ if (Field_dsp340050b49a6c_fld2109inst_Slot_inst_get (insn) == 154 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2PQ_3;
+ if (Field_dsp340050b49a6c_fld2110inst_Slot_inst_get (insn) == 155 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2PQ_5;
+ if (Field_dsp340050b49a6c_fld2111inst_Slot_inst_get (insn) == 156 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2PQ_4;
+ if (Field_dsp340050b49a6c_fld2112inst_Slot_inst_get (insn) == 301 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld2113inst_Slot_inst_get (insn) == 317 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_PHASOR_OFFSET;
+ if (Field_dsp340050b49a6c_fld2114inst_Slot_inst_get (insn) == 302 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_PHASOR_N;
+ if (Field_dsp340050b49a6c_fld2115inst_Slot_inst_get (insn) == 303 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld2116inst_Slot_inst_get (insn) == 318 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld2117inst_Slot_inst_get (insn) == 319 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_SOV;
+ if (Field_dsp340050b49a6c_fld2118inst_Slot_inst_get (insn) == 208 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2PQ_1;
+ if (Field_dsp340050b49a6c_fld2119inst_Slot_inst_get (insn) == 417 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_WGHT;
+ if (Field_dsp340050b49a6c_fld2120inst_Slot_inst_get (insn) == 6913 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ128_0;
+ if (Field_dsp340050b49a6c_fld2122inst_Slot_inst_get (insn) == 6929 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ128_1;
+ if (Field_dsp340050b49a6c_fld2123inst_Slot_inst_get (insn) == 6945 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ128_2;
+ if (Field_dsp340050b49a6c_fld2124inst_Slot_inst_get (insn) == 6961 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ128_5;
+ if (Field_dsp340050b49a6c_fld2125inst_Slot_inst_get (insn) == 6977 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ128_3;
+ if (Field_dsp340050b49a6c_fld2126inst_Slot_inst_get (insn) == 6993 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ32_0;
+ if (Field_dsp340050b49a6c_fld2127inst_Slot_inst_get (insn) == 7009 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ32_1;
+ if (Field_dsp340050b49a6c_fld2128inst_Slot_inst_get (insn) == 7025 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ32_2;
+ if (Field_dsp340050b49a6c_fld2129inst_Slot_inst_get (insn) == 1761 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2035_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ if (Field_dsp340050b49a6c_fld2131inst_Slot_inst_get (insn) == 1777 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2035_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ if (Field_dsp340050b49a6c_fld2132inst_Slot_inst_get (insn) == 105 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3659inst_Slot_inst_get (insn) == 0)
+ return OPCODE_WRTBSIGQ;
+ if (Field_dsp340050b49a6c_fld2133inst_Slot_inst_get (insn) == 53 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3660inst_Slot_inst_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ if (Field_dsp340050b49a6c_fld2134inst_Slot_inst_get (insn) == 53 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2050_Slot_inst_get (insn) == 0)
+ return OPCODE_CLRAC;
+ if (Field_dsp340050b49a6c_fld2136inst_Slot_inst_get (insn) == 55 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2050_Slot_inst_get (insn) == 0)
+ return OPCODE_POP16LLR_1;
+ if (Field_dsp340050b49a6c_fld2137inst_Slot_inst_get (insn) == 34 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_WRTIEP;
+ if (Field_dsp340050b49a6c_fld2138inst_Slot_inst_get (insn) == 560 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2139inst_Slot_inst_get (insn) == 561 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2140inst_Slot_inst_get (insn) == 562 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2141inst_Slot_inst_get (insn) == 563 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_03;
+ if (Field_dsp340050b49a6c_fld2142inst_Slot_inst_get (insn) == 564 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2143inst_Slot_inst_get (insn) == 565 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_21;
+ if (Field_dsp340050b49a6c_fld2144inst_Slot_inst_get (insn) == 283 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2047_Slot_inst_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_23;
+ if (Field_dsp340050b49a6c_fld2145inst_Slot_inst_get (insn) == 71 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2037_Slot_inst_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_01;
+ if (Field_dsp340050b49a6c_fld2146inst_Slot_inst_get (insn) == 21 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3649inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2147inst_Slot_inst_get (insn) == 13 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3650inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2149inst_Slot_inst_get (insn) == 5 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3627inst_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld2151inst_Slot_inst_get (insn) == 5 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3657inst_Slot_inst_get (insn) == 0)
+ return OPCODE_PUSH2X128_PQ;
+ if (Field_dsp340050b49a6c_fld2153inst_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3635inst_Slot_inst_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ if (Field_dsp340050b49a6c_fld2154inst_Slot_inst_get (insn) == 3 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3636inst_Slot_inst_get (insn) == 0)
+ return OPCODE_AR2CM_LN_I;
+ if (Field_dsp340050b49a6c_fld2155inst_Slot_inst_get (insn) == 3 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3637inst_Slot_inst_get (insn) == 0)
+ return OPCODE_AR2CM_LN_R;
+ if (Field_dsp340050b49a6c_fld2156inst_Slot_inst_get (insn) == 3 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LCM;
+ if (Field_dsp340050b49a6c_fld2157inst_Slot_inst_get (insn) == 19 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2158inst_Slot_inst_get (insn) == 27 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2159inst_Slot_inst_get (insn) == 51 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld2160inst_Slot_inst_get (insn) == 59 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_EXT_REGS;
+ if (Field_dsp340050b49a6c_fld2161inst_Slot_inst_get (insn) == 83 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld2162inst_Slot_inst_get (insn) == 171 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_ASRAC;
+ if (Field_dsp340050b49a6c_fld2163inst_Slot_inst_get (insn) == 187 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CLB_R;
+ if (Field_dsp340050b49a6c_fld2164inst_Slot_inst_get (insn) == 227 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CLB_C;
+ if (Field_dsp340050b49a6c_fld2165inst_Slot_inst_get (insn) == 235 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MINCLB_C;
+ if (Field_dsp340050b49a6c_fld2166inst_Slot_inst_get (insn) == 243 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MINCLB_R;
+ if (Field_dsp340050b49a6c_fld2167inst_Slot_inst_get (insn) == 251 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SMINCLB_C;
+ if (Field_dsp340050b49a6c_fld2168inst_Slot_inst_get (insn) == 147 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2169inst_Slot_inst_get (insn) == 310 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld2171inst_Slot_inst_get (insn) == 599 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVAC;
+ if (Field_dsp340050b49a6c_fld2172inst_Slot_inst_get (insn) == 631 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVAC_I2R;
+ if (Field_dsp340050b49a6c_fld2173inst_Slot_inst_get (insn) == 358 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld2174inst_Slot_inst_get (insn) == 711 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVAC_R2I;
+ if (Field_dsp340050b49a6c_fld2175inst_Slot_inst_get (insn) == 5944 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_ADDAC_I2R;
+ if (Field_dsp340050b49a6c_fld2177inst_Slot_inst_get (insn) == 5945 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_ADDAC_R2I;
+ if (Field_dsp340050b49a6c_fld2178inst_Slot_inst_get (insn) == 5946 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_REDAC;
+ if (Field_dsp340050b49a6c_fld2179inst_Slot_inst_get (insn) == 5947 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_REDAC4;
+ if (Field_dsp340050b49a6c_fld2180inst_Slot_inst_get (insn) == 5948 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_REDAC2;
+ if (Field_dsp340050b49a6c_fld2181inst_Slot_inst_get (insn) == 5949 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_REDACS;
+ if (Field_dsp340050b49a6c_fld2182inst_Slot_inst_get (insn) == 5950 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SUBAC_I2R;
+ if (Field_dsp340050b49a6c_fld2183inst_Slot_inst_get (insn) == 5951 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SUBAC_R2I;
+ if (Field_dsp340050b49a6c_fld2184inst_Slot_inst_get (insn) == 374 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3642inst_Slot_inst_get (insn) == 0)
+ return OPCODE_SWAPAC_RI;
+ if (Field_dsp340050b49a6c_fld2185inst_Slot_inst_get (insn) == 3000 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_L;
+ if (Field_dsp340050b49a6c_fld2186inst_Slot_inst_get (insn) == 3001 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_N;
+ if (Field_dsp340050b49a6c_fld2187inst_Slot_inst_get (insn) == 3002 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld2188inst_Slot_inst_get (insn) == 3003 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_PHASOR_OFFSET;
+ if (Field_dsp340050b49a6c_fld2189inst_Slot_inst_get (insn) == 3004 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_PHASOR_N;
+ if (Field_dsp340050b49a6c_fld2190inst_Slot_inst_get (insn) == 3005 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld2191inst_Slot_inst_get (insn) == 3006 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld2192inst_Slot_inst_get (insn) == 3007 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_SOV;
+ if (Field_dsp340050b49a6c_fld2193inst_Slot_inst_get (insn) == 211 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3662inst_Slot_inst_get (insn) == 0)
+ return OPCODE_SMINCLB_R;
+ if (Field_dsp340050b49a6c_fld2194inst_Slot_inst_get (insn) == 3504 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_GET_WGHT;
+ if (Field_dsp340050b49a6c_fld2195inst_Slot_inst_get (insn) == 3505 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2196inst_Slot_inst_get (insn) == 3506 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2197inst_Slot_inst_get (insn) == 3507 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2198inst_Slot_inst_get (insn) == 3508 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2199inst_Slot_inst_get (insn) == 3509 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ if (Field_dsp340050b49a6c_fld2200inst_Slot_inst_get (insn) == 3510 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ if (Field_dsp340050b49a6c_fld2201inst_Slot_inst_get (insn) == 3511 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2202inst_Slot_inst_get (insn) == 878 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2254_Slot_inst_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld2203inst_Slot_inst_get (insn) == 879 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2254_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld2204inst_Slot_inst_get (insn) == 123 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3656inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2205inst_Slot_inst_get (insn) == 3 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_MOVCM2PQ;
+ if (Field_dsp340050b49a6c_fld2206inst_Slot_inst_get (insn) == 7 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_QREADY;
+ if (Field_dsp340050b49a6c_fld2207inst_Slot_inst_get (insn) == 19 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT32_I;
+ if (Field_dsp340050b49a6c_fld2208inst_Slot_inst_get (insn) == 23 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_2;
+ if (Field_dsp340050b49a6c_fld2209inst_Slot_inst_get (insn) == 27 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_3;
+ if (Field_dsp340050b49a6c_fld2210inst_Slot_inst_get (insn) == 31 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_0;
+ if (Field_dsp340050b49a6c_fld2211inst_Slot_inst_get (insn) == 35 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT32_R;
+ if (Field_dsp340050b49a6c_fld2212inst_Slot_inst_get (insn) == 39 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_1;
+ if (Field_dsp340050b49a6c_fld2213inst_Slot_inst_get (insn) == 23 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3647inst_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_2;
+ if (Field_dsp340050b49a6c_fld2214inst_Slot_inst_get (insn) == 27 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3648inst_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_3;
+ if (Field_dsp340050b49a6c_fld2215inst_Slot_inst_get (insn) == 31 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3630inst_Slot_inst_get (insn) == 0)
+ return OPCODE_SET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld2216inst_Slot_inst_get (insn) == 7 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3644inst_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_0;
+ if (Field_dsp340050b49a6c_fld2217inst_Slot_inst_get (insn) == 7 &&
+ Field_sae4_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 3 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3645inst_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_1;
+ if (Field_dsp340050b49a6c_fld2218inst_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2021_Slot_inst_get (insn) == 6 &&
+ Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT;
+ if (Field_dsp340050b49a6c_fld2219inst_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2021_Slot_inst_get (insn) == 6 &&
+ Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_EXT_R;
+ if (Field_dsp340050b49a6c_fld2220inst_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2021_Slot_inst_get (insn) == 7 &&
+ Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_PUSH128_M;
+ if (Field_dsp340050b49a6c_fld2221inst_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2021_Slot_inst_get (insn) == 7 &&
+ Field_sa4_Slot_inst_get (insn) == 1 &&
+ Field_sae4_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 4 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC2X64_0;
+ if (Field_dsp340050b49a6c_fld2222inst_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC2X32;
+ if (Field_dsp340050b49a6c_fld2223inst_Slot_inst_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC32_R;
+ if (Field_dsp340050b49a6c_fld2224inst_Slot_inst_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SCM_PINC;
+ if (Field_dsp340050b49a6c_fld2225inst_Slot_inst_get (insn) == 5 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LUT0;
+ if (Field_dsp340050b49a6c_fld2226inst_Slot_inst_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LUT1;
+ if (Field_dsp340050b49a6c_fld2227inst_Slot_inst_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SCM_U;
+ if (Field_dsp340050b49a6c_fld2228inst_Slot_inst_get (insn) == 9 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LUT2;
+ if (Field_dsp340050b49a6c_fld2229inst_Slot_inst_get (insn) == 11 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SCM;
+ if (Field_dsp340050b49a6c_fld2230inst_Slot_inst_get (insn) == 12 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_LUT3;
+ if (Field_dsp340050b49a6c_fld2231inst_Slot_inst_get (insn) == 14 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_STORE_P;
+ if (Field_dsp340050b49a6c_fld2232inst_Slot_inst_get (insn) == 14 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC_IH;
+ if (Field_dsp340050b49a6c_fld2234inst_Slot_inst_get (insn) == 15 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC_IL;
+ if (Field_dsp340050b49a6c_fld2235inst_Slot_inst_get (insn) == 16 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_COMB_AR;
+ if (Field_dsp340050b49a6c_fld2236inst_Slot_inst_get (insn) == 18 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_STSWAPBM;
+ if (Field_dsp340050b49a6c_fld2237inst_Slot_inst_get (insn) == 18 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC_RH;
+ if (Field_dsp340050b49a6c_fld2238inst_Slot_inst_get (insn) == 35 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC2X64_1;
+ if (Field_dsp340050b49a6c_fld2239inst_Slot_inst_get (insn) == 39 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC2X64_2;
+ if (Field_dsp340050b49a6c_fld2240inst_Slot_inst_get (insn) == 20 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC_RL;
+ if (Field_dsp340050b49a6c_fld2241inst_Slot_inst_get (insn) == 41 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_SAC2X64_3;
+ if (Field_dsp340050b49a6c_fld2242inst_Slot_inst_get (insn) == 45 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3661inst_Slot_inst_get (insn) == 0)
+ return OPCODE_ASLACM;
+ if (Field_dsp340050b49a6c_fld2243inst_Slot_inst_get (insn) == 21 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_STSWAPBMU;
+ if (Field_dsp340050b49a6c_fld2244inst_Slot_inst_get (insn) == 92 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld2245inst_Slot_inst_get (insn) == 93 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_CM2AR_LN_I;
+ if (Field_dsp340050b49a6c_fld2246inst_Slot_inst_get (insn) == 47 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3638inst_Slot_inst_get (insn) == 0)
+ return OPCODE_CM2AR_LN_R;
+ if (Field_dsp340050b49a6c_fld2247inst_Slot_inst_get (insn) == 24 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0)
+ return OPCODE_STORE_Q;
+ if (Field_dsp340050b49a6c_fld2248inst_Slot_inst_get (insn) == 50 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3651inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2M_0;
+ if (Field_dsp340050b49a6c_fld2249inst_Slot_inst_get (insn) == 51 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3655inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2M_3;
+ if (Field_dsp340050b49a6c_fld2250inst_Slot_inst_get (insn) == 26 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3653inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2M_1;
+ if (Field_dsp340050b49a6c_fld2251inst_Slot_inst_get (insn) == 216 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3658inst_Slot_inst_get (insn) == 0)
+ return OPCODE_PUSH32;
+ if (Field_dsp340050b49a6c_fld2252inst_Slot_inst_get (insn) == 217 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3643inst_Slot_inst_get (insn) == 0)
+ return OPCODE_SWAPB;
+ if (Field_dsp340050b49a6c_fld2253inst_Slot_inst_get (insn) == 109 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3639inst_Slot_inst_get (insn) == 0)
+ return OPCODE_MOV2AC32_I;
+ if (Field_dsp340050b49a6c_fld2255inst_Slot_inst_get (insn) == 55 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3640inst_Slot_inst_get (insn) == 0)
+ return OPCODE_MOV2AC32_R;
+ if (Field_dsp340050b49a6c_fld2257inst_Slot_inst_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get (insn) == 6 &&
+ Field_op0_Slot_inst_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3654inst_Slot_inst_get (insn) == 0)
+ return OPCODE_POP128_2M_2;
+ switch (Field_op0_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ break;
+ case 2:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RET;
+ case 1:
+ return OPCODE_RETW;
+ case 2:
+ return OPCODE_JX;
+ }
+ break;
+ case 3:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_CALLX0;
+ case 1:
+ return OPCODE_CALLX4;
+ case 2:
+ return OPCODE_CALLX8;
+ case 3:
+ return OPCODE_CALLX12;
+ }
+ break;
+ }
+ break;
+ case 1:
+ return OPCODE_MOVSP;
+ case 2:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_ISYNC;
+ case 1:
+ return OPCODE_RSYNC;
+ case 2:
+ return OPCODE_ESYNC;
+ case 3:
+ return OPCODE_DSYNC;
+ case 8:
+ return OPCODE_EXCW;
+ case 12:
+ return OPCODE_MEMW;
+ case 13:
+ return OPCODE_EXTW;
+ case 15:
+ return OPCODE_NOP;
+ }
+ }
+ break;
+ case 3:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RFE;
+ case 2:
+ return OPCODE_RFDE;
+ case 4:
+ return OPCODE_RFWO;
+ case 5:
+ return OPCODE_RFWU;
+ }
+ break;
+ case 1:
+ return OPCODE_RFI;
+ }
+ break;
+ case 4:
+ return OPCODE_BREAK;
+ case 5:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SIMCALL;
+ break;
+ }
+ break;
+ case 6:
+ return OPCODE_RSIL;
+ case 7:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ break;
+ case 8:
+ return OPCODE_ANY4;
+ case 9:
+ return OPCODE_ALL4;
+ case 10:
+ return OPCODE_ANY8;
+ case 11:
+ return OPCODE_ALL8;
+ }
+ break;
+ case 1:
+ return OPCODE_AND;
+ case 2:
+ return OPCODE_OR;
+ case 3:
+ return OPCODE_XOR;
+ case 4:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ break;
+ case 2:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ break;
+ case 3:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ break;
+ case 4:
+ if (Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ break;
+ case 6:
+ return OPCODE_RER;
+ case 7:
+ return OPCODE_WER;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ break;
+ case 14:
+ return OPCODE_NSA;
+ case 15:
+ return OPCODE_NSAU;
+ }
+ break;
+ case 5:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 3:
+ return OPCODE_RITLB0;
+ case 4:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ break;
+ case 5:
+ return OPCODE_PITLB;
+ case 6:
+ return OPCODE_WITLB;
+ case 7:
+ return OPCODE_RITLB1;
+ case 11:
+ return OPCODE_RDTLB0;
+ case 12:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ break;
+ case 13:
+ return OPCODE_PDTLB;
+ case 14:
+ return OPCODE_WDTLB;
+ case 15:
+ return OPCODE_RDTLB1;
+ }
+ break;
+ case 6:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_NEG;
+ case 1:
+ return OPCODE_ABS;
+ }
+ break;
+ case 8:
+ return OPCODE_ADD;
+ case 9:
+ return OPCODE_ADDX2;
+ case 10:
+ return OPCODE_ADDX4;
+ case 11:
+ return OPCODE_ADDX8;
+ case 12:
+ return OPCODE_SUB;
+ case 13:
+ return OPCODE_SUBX2;
+ case 14:
+ return OPCODE_SUBX4;
+ case 15:
+ return OPCODE_SUBX8;
+ }
+ break;
+ case 1:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ case 1:
+ return OPCODE_SLLI;
+ case 2:
+ case 3:
+ return OPCODE_SRAI;
+ case 4:
+ return OPCODE_SRLI;
+ case 6:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_XSR_LBEG;
+ case 1:
+ return OPCODE_XSR_LEND;
+ case 2:
+ return OPCODE_XSR_LCOUNT;
+ case 3:
+ return OPCODE_XSR_SAR;
+ case 4:
+ return OPCODE_XSR_BR;
+ case 5:
+ return OPCODE_XSR_LITBASE;
+ case 12:
+ return OPCODE_XSR_SCOMPARE1;
+ case 72:
+ return OPCODE_XSR_WINDOWBASE;
+ case 73:
+ return OPCODE_XSR_WINDOWSTART;
+ case 96:
+ return OPCODE_XSR_IBREAKENABLE;
+ case 99:
+ return OPCODE_XSR_ATOMCTL;
+ case 104:
+ return OPCODE_XSR_DDR;
+ case 128:
+ return OPCODE_XSR_IBREAKA0;
+ case 129:
+ return OPCODE_XSR_IBREAKA1;
+ case 144:
+ return OPCODE_XSR_DBREAKA0;
+ case 145:
+ return OPCODE_XSR_DBREAKA1;
+ case 160:
+ return OPCODE_XSR_DBREAKC0;
+ case 161:
+ return OPCODE_XSR_DBREAKC1;
+ case 177:
+ return OPCODE_XSR_EPC1;
+ case 178:
+ return OPCODE_XSR_EPC2;
+ case 179:
+ return OPCODE_XSR_EPC3;
+ case 180:
+ return OPCODE_XSR_EPC4;
+ case 181:
+ return OPCODE_XSR_EPC5;
+ case 182:
+ return OPCODE_XSR_EPC6;
+ case 192:
+ return OPCODE_XSR_DEPC;
+ case 194:
+ return OPCODE_XSR_EPS2;
+ case 195:
+ return OPCODE_XSR_EPS3;
+ case 196:
+ return OPCODE_XSR_EPS4;
+ case 197:
+ return OPCODE_XSR_EPS5;
+ case 198:
+ return OPCODE_XSR_EPS6;
+ case 209:
+ return OPCODE_XSR_EXCSAVE1;
+ case 210:
+ return OPCODE_XSR_EXCSAVE2;
+ case 211:
+ return OPCODE_XSR_EXCSAVE3;
+ case 212:
+ return OPCODE_XSR_EXCSAVE4;
+ case 213:
+ return OPCODE_XSR_EXCSAVE5;
+ case 214:
+ return OPCODE_XSR_EXCSAVE6;
+ case 224:
+ return OPCODE_XSR_CPENABLE;
+ case 228:
+ return OPCODE_XSR_INTENABLE;
+ case 230:
+ return OPCODE_XSR_PS;
+ case 231:
+ return OPCODE_XSR_VECBASE;
+ case 232:
+ return OPCODE_XSR_EXCCAUSE;
+ case 233:
+ return OPCODE_XSR_DEBUGCAUSE;
+ case 234:
+ return OPCODE_XSR_CCOUNT;
+ case 236:
+ return OPCODE_XSR_ICOUNT;
+ case 237:
+ return OPCODE_XSR_ICOUNTLEVEL;
+ case 238:
+ return OPCODE_XSR_EXCVADDR;
+ case 240:
+ return OPCODE_XSR_CCOMPARE0;
+ case 241:
+ return OPCODE_XSR_CCOMPARE1;
+ }
+ break;
+ case 8:
+ return OPCODE_SRC;
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ break;
+ case 10:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ break;
+ case 12:
+ return OPCODE_MUL16U;
+ case 13:
+ return OPCODE_MUL16S;
+ case 15:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_LICT;
+ case 1:
+ return OPCODE_SICT;
+ case 2:
+ return OPCODE_LICW;
+ case 3:
+ return OPCODE_SICW;
+ case 8:
+ return OPCODE_LDCT;
+ case 9:
+ return OPCODE_SDCT;
+ case 14:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ break;
+ }
+ break;
+ }
+ break;
+ case 2:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_ANDB;
+ case 1:
+ return OPCODE_ANDBC;
+ case 2:
+ return OPCODE_ORB;
+ case 3:
+ return OPCODE_ORBC;
+ case 4:
+ return OPCODE_XORB;
+ }
+ break;
+ case 3:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RSR_LBEG;
+ case 1:
+ return OPCODE_RSR_LEND;
+ case 2:
+ return OPCODE_RSR_LCOUNT;
+ case 3:
+ return OPCODE_RSR_SAR;
+ case 4:
+ return OPCODE_RSR_BR;
+ case 5:
+ return OPCODE_RSR_LITBASE;
+ case 12:
+ return OPCODE_RSR_SCOMPARE1;
+ case 72:
+ return OPCODE_RSR_WINDOWBASE;
+ case 73:
+ return OPCODE_RSR_WINDOWSTART;
+ case 96:
+ return OPCODE_RSR_IBREAKENABLE;
+ case 99:
+ return OPCODE_RSR_ATOMCTL;
+ case 104:
+ return OPCODE_RSR_DDR;
+ case 128:
+ return OPCODE_RSR_IBREAKA0;
+ case 129:
+ return OPCODE_RSR_IBREAKA1;
+ case 144:
+ return OPCODE_RSR_DBREAKA0;
+ case 145:
+ return OPCODE_RSR_DBREAKA1;
+ case 160:
+ return OPCODE_RSR_DBREAKC0;
+ case 161:
+ return OPCODE_RSR_DBREAKC1;
+ case 176:
+ return OPCODE_RSR_176;
+ case 177:
+ return OPCODE_RSR_EPC1;
+ case 178:
+ return OPCODE_RSR_EPC2;
+ case 179:
+ return OPCODE_RSR_EPC3;
+ case 180:
+ return OPCODE_RSR_EPC4;
+ case 181:
+ return OPCODE_RSR_EPC5;
+ case 182:
+ return OPCODE_RSR_EPC6;
+ case 192:
+ return OPCODE_RSR_DEPC;
+ case 194:
+ return OPCODE_RSR_EPS2;
+ case 195:
+ return OPCODE_RSR_EPS3;
+ case 196:
+ return OPCODE_RSR_EPS4;
+ case 197:
+ return OPCODE_RSR_EPS5;
+ case 198:
+ return OPCODE_RSR_EPS6;
+ case 208:
+ return OPCODE_RSR_208;
+ case 209:
+ return OPCODE_RSR_EXCSAVE1;
+ case 210:
+ return OPCODE_RSR_EXCSAVE2;
+ case 211:
+ return OPCODE_RSR_EXCSAVE3;
+ case 212:
+ return OPCODE_RSR_EXCSAVE4;
+ case 213:
+ return OPCODE_RSR_EXCSAVE5;
+ case 214:
+ return OPCODE_RSR_EXCSAVE6;
+ case 224:
+ return OPCODE_RSR_CPENABLE;
+ case 226:
+ return OPCODE_RSR_INTERRUPT;
+ case 228:
+ return OPCODE_RSR_INTENABLE;
+ case 230:
+ return OPCODE_RSR_PS;
+ case 231:
+ return OPCODE_RSR_VECBASE;
+ case 232:
+ return OPCODE_RSR_EXCCAUSE;
+ case 233:
+ return OPCODE_RSR_DEBUGCAUSE;
+ case 234:
+ return OPCODE_RSR_CCOUNT;
+ case 235:
+ return OPCODE_RSR_PRID;
+ case 236:
+ return OPCODE_RSR_ICOUNT;
+ case 237:
+ return OPCODE_RSR_ICOUNTLEVEL;
+ case 238:
+ return OPCODE_RSR_EXCVADDR;
+ case 240:
+ return OPCODE_RSR_CCOMPARE0;
+ case 241:
+ return OPCODE_RSR_CCOMPARE1;
+ }
+ break;
+ case 1:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_WSR_LBEG;
+ case 1:
+ return OPCODE_WSR_LEND;
+ case 2:
+ return OPCODE_WSR_LCOUNT;
+ case 3:
+ return OPCODE_WSR_SAR;
+ case 4:
+ return OPCODE_WSR_BR;
+ case 5:
+ return OPCODE_WSR_LITBASE;
+ case 12:
+ return OPCODE_WSR_SCOMPARE1;
+ case 72:
+ return OPCODE_WSR_WINDOWBASE;
+ case 73:
+ return OPCODE_WSR_WINDOWSTART;
+ case 89:
+ return OPCODE_WSR_MMID;
+ case 96:
+ return OPCODE_WSR_IBREAKENABLE;
+ case 99:
+ return OPCODE_WSR_ATOMCTL;
+ case 104:
+ return OPCODE_WSR_DDR;
+ case 128:
+ return OPCODE_WSR_IBREAKA0;
+ case 129:
+ return OPCODE_WSR_IBREAKA1;
+ case 144:
+ return OPCODE_WSR_DBREAKA0;
+ case 145:
+ return OPCODE_WSR_DBREAKA1;
+ case 160:
+ return OPCODE_WSR_DBREAKC0;
+ case 161:
+ return OPCODE_WSR_DBREAKC1;
+ case 176:
+ return OPCODE_WSR_176;
+ case 177:
+ return OPCODE_WSR_EPC1;
+ case 178:
+ return OPCODE_WSR_EPC2;
+ case 179:
+ return OPCODE_WSR_EPC3;
+ case 180:
+ return OPCODE_WSR_EPC4;
+ case 181:
+ return OPCODE_WSR_EPC5;
+ case 182:
+ return OPCODE_WSR_EPC6;
+ case 192:
+ return OPCODE_WSR_DEPC;
+ case 194:
+ return OPCODE_WSR_EPS2;
+ case 195:
+ return OPCODE_WSR_EPS3;
+ case 196:
+ return OPCODE_WSR_EPS4;
+ case 197:
+ return OPCODE_WSR_EPS5;
+ case 198:
+ return OPCODE_WSR_EPS6;
+ case 209:
+ return OPCODE_WSR_EXCSAVE1;
+ case 210:
+ return OPCODE_WSR_EXCSAVE2;
+ case 211:
+ return OPCODE_WSR_EXCSAVE3;
+ case 212:
+ return OPCODE_WSR_EXCSAVE4;
+ case 213:
+ return OPCODE_WSR_EXCSAVE5;
+ case 214:
+ return OPCODE_WSR_EXCSAVE6;
+ case 224:
+ return OPCODE_WSR_CPENABLE;
+ case 226:
+ return OPCODE_WSR_INTSET;
+ case 227:
+ return OPCODE_WSR_INTCLEAR;
+ case 228:
+ return OPCODE_WSR_INTENABLE;
+ case 230:
+ return OPCODE_WSR_PS;
+ case 231:
+ return OPCODE_WSR_VECBASE;
+ case 232:
+ return OPCODE_WSR_EXCCAUSE;
+ case 233:
+ return OPCODE_WSR_DEBUGCAUSE;
+ case 234:
+ return OPCODE_WSR_CCOUNT;
+ case 236:
+ return OPCODE_WSR_ICOUNT;
+ case 237:
+ return OPCODE_WSR_ICOUNTLEVEL;
+ case 238:
+ return OPCODE_WSR_EXCVADDR;
+ case 240:
+ return OPCODE_WSR_CCOMPARE0;
+ case 241:
+ return OPCODE_WSR_CCOMPARE1;
+ }
+ break;
+ case 2:
+ return OPCODE_SEXT;
+ case 3:
+ return OPCODE_CLAMPS;
+ case 4:
+ return OPCODE_MIN;
+ case 5:
+ return OPCODE_MAX;
+ case 6:
+ return OPCODE_MINU;
+ case 7:
+ return OPCODE_MAXU;
+ case 8:
+ return OPCODE_MOVEQZ;
+ case 9:
+ return OPCODE_MOVNEZ;
+ case 10:
+ return OPCODE_MOVLTZ;
+ case 11:
+ return OPCODE_MOVGEZ;
+ case 12:
+ return OPCODE_MOVF;
+ case 13:
+ return OPCODE_MOVT;
+ case 14:
+ switch (Field_st_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_RUR_EXPSTATE;
+ case 1:
+ return OPCODE_RUR_SOV;
+ case 2:
+ return OPCODE_RUR_SAT_MODE;
+ case 3:
+ return OPCODE_RUR_SAR0;
+ case 4:
+ return OPCODE_RUR_SAR1;
+ case 5:
+ return OPCODE_RUR_SAR2;
+ case 6:
+ return OPCODE_RUR_SAR3;
+ case 7:
+ return OPCODE_RUR_HSAR0;
+ case 8:
+ return OPCODE_RUR_HSAR1;
+ case 9:
+ return OPCODE_RUR_HSAR2;
+ case 10:
+ return OPCODE_RUR_HSAR3;
+ case 11:
+ return OPCODE_RUR_MAX_REG_0;
+ case 12:
+ return OPCODE_RUR_MAX_REG_1;
+ case 13:
+ return OPCODE_RUR_MAX_REG_2;
+ case 14:
+ return OPCODE_RUR_MAX_REG_3;
+ case 15:
+ return OPCODE_RUR_ARG_MAX_REG_0;
+ case 16:
+ return OPCODE_RUR_ARG_MAX_REG_1;
+ case 17:
+ return OPCODE_RUR_ARG_MAX_REG_2;
+ case 18:
+ return OPCODE_RUR_ARG_MAX_REG_3;
+ case 19:
+ return OPCODE_RUR_NCO_COUNTER_0;
+ case 20:
+ return OPCODE_RUR_NCO_COUNTER_1;
+ case 21:
+ return OPCODE_RUR_NCO_COUNTER_2;
+ case 22:
+ return OPCODE_RUR_NCO_COUNTER_3;
+ case 23:
+ return OPCODE_RUR_INTERP_EXT_N;
+ case 24:
+ return OPCODE_RUR_INTERP_EXT_L;
+ case 25:
+ return OPCODE_RUR_LLR_BUF_0;
+ case 26:
+ return OPCODE_RUR_LLR_BUF_1;
+ case 27:
+ return OPCODE_RUR_LLR_BUF_2;
+ case 28:
+ return OPCODE_RUR_LLR_BUF_3;
+ case 29:
+ return OPCODE_RUR_LLR_BUF_4;
+ case 30:
+ return OPCODE_RUR_LLR_BUF_5;
+ case 31:
+ return OPCODE_RUR_LLR_BUF_6;
+ case 32:
+ return OPCODE_RUR_LLR_BUF_7;
+ case 33:
+ return OPCODE_RUR_LLR_BUF_8;
+ case 34:
+ return OPCODE_RUR_LLR_BUF_9;
+ case 35:
+ return OPCODE_RUR_LLR_BUF_10;
+ case 36:
+ return OPCODE_RUR_LLR_BUF_11;
+ case 37:
+ return OPCODE_RUR_LLR_BUF_12;
+ case 38:
+ return OPCODE_RUR_LLR_BUF_13;
+ case 39:
+ return OPCODE_RUR_LLR_BUF_14;
+ case 40:
+ return OPCODE_RUR_LLR_BUF_15;
+ case 41:
+ return OPCODE_RUR_LLR_BUF_16;
+ case 42:
+ return OPCODE_RUR_LLR_BUF_17;
+ case 43:
+ return OPCODE_RUR_LLR_BUF_18;
+ case 44:
+ return OPCODE_RUR_LLR_BUF_19;
+ case 45:
+ return OPCODE_RUR_LLR_BUF_20;
+ case 46:
+ return OPCODE_RUR_LLR_BUF_21;
+ case 47:
+ return OPCODE_RUR_LLR_BUF_22;
+ case 48:
+ return OPCODE_RUR_LLR_BUF_23;
+ case 49:
+ return OPCODE_RUR_SMOD_BUF_0;
+ case 50:
+ return OPCODE_RUR_SMOD_BUF_1;
+ case 51:
+ return OPCODE_RUR_SMOD_BUF_2;
+ case 52:
+ return OPCODE_RUR_SMOD_BUF_3;
+ case 53:
+ return OPCODE_RUR_SMOD_BUF_4;
+ case 54:
+ return OPCODE_RUR_SMOD_BUF_5;
+ case 55:
+ return OPCODE_RUR_SMOD_BUF_6;
+ case 56:
+ return OPCODE_RUR_SMOD_BUF_7;
+ case 57:
+ return OPCODE_RUR_WEIGHT_REG;
+ case 58:
+ return OPCODE_RUR_SCALE_REG;
+ case 59:
+ return OPCODE_RUR_LLR_POS;
+ case 60:
+ return OPCODE_RUR_SMOD_POS;
+ case 61:
+ return OPCODE_RUR_PERM_REG;
+ case 62:
+ return OPCODE_RUR_SMOD_OFFSET_TABLE_0;
+ case 63:
+ return OPCODE_RUR_SMOD_OFFSET_TABLE_1;
+ case 64:
+ return OPCODE_RUR_SMOD_OFFSET_TABLE_2;
+ case 65:
+ return OPCODE_RUR_SMOD_OFFSET_TABLE_3;
+ case 66:
+ return OPCODE_RUR_PHASOR_N;
+ case 67:
+ return OPCODE_RUR_PHASOR_OFFSET;
+ case 231:
+ return OPCODE_RUR_THREADPTR;
+ case 232:
+ return OPCODE_RUR_FCR;
+ case 233:
+ return OPCODE_RUR_FSR;
+ }
+ break;
+ case 15:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_WUR_EXPSTATE;
+ case 1:
+ return OPCODE_WUR_SOV;
+ case 2:
+ return OPCODE_WUR_SAT_MODE;
+ case 3:
+ return OPCODE_WUR_SAR0;
+ case 4:
+ return OPCODE_WUR_SAR1;
+ case 5:
+ return OPCODE_WUR_SAR2;
+ case 6:
+ return OPCODE_WUR_SAR3;
+ case 7:
+ return OPCODE_WUR_HSAR0;
+ case 8:
+ return OPCODE_WUR_HSAR1;
+ case 9:
+ return OPCODE_WUR_HSAR2;
+ case 10:
+ return OPCODE_WUR_HSAR3;
+ case 11:
+ return OPCODE_WUR_MAX_REG_0;
+ case 12:
+ return OPCODE_WUR_MAX_REG_1;
+ case 13:
+ return OPCODE_WUR_MAX_REG_2;
+ case 14:
+ return OPCODE_WUR_MAX_REG_3;
+ case 15:
+ return OPCODE_WUR_ARG_MAX_REG_0;
+ case 16:
+ return OPCODE_WUR_ARG_MAX_REG_1;
+ case 17:
+ return OPCODE_WUR_ARG_MAX_REG_2;
+ case 18:
+ return OPCODE_WUR_ARG_MAX_REG_3;
+ case 19:
+ return OPCODE_WUR_NCO_COUNTER_0;
+ case 20:
+ return OPCODE_WUR_NCO_COUNTER_1;
+ case 21:
+ return OPCODE_WUR_NCO_COUNTER_2;
+ case 22:
+ return OPCODE_WUR_NCO_COUNTER_3;
+ case 23:
+ return OPCODE_WUR_INTERP_EXT_N;
+ case 24:
+ return OPCODE_WUR_INTERP_EXT_L;
+ case 25:
+ return OPCODE_WUR_LLR_BUF_0;
+ case 26:
+ return OPCODE_WUR_LLR_BUF_1;
+ case 27:
+ return OPCODE_WUR_LLR_BUF_2;
+ case 28:
+ return OPCODE_WUR_LLR_BUF_3;
+ case 29:
+ return OPCODE_WUR_LLR_BUF_4;
+ case 30:
+ return OPCODE_WUR_LLR_BUF_5;
+ case 31:
+ return OPCODE_WUR_LLR_BUF_6;
+ case 32:
+ return OPCODE_WUR_LLR_BUF_7;
+ case 33:
+ return OPCODE_WUR_LLR_BUF_8;
+ case 34:
+ return OPCODE_WUR_LLR_BUF_9;
+ case 35:
+ return OPCODE_WUR_LLR_BUF_10;
+ case 36:
+ return OPCODE_WUR_LLR_BUF_11;
+ case 37:
+ return OPCODE_WUR_LLR_BUF_12;
+ case 38:
+ return OPCODE_WUR_LLR_BUF_13;
+ case 39:
+ return OPCODE_WUR_LLR_BUF_14;
+ case 40:
+ return OPCODE_WUR_LLR_BUF_15;
+ case 41:
+ return OPCODE_WUR_LLR_BUF_16;
+ case 42:
+ return OPCODE_WUR_LLR_BUF_17;
+ case 43:
+ return OPCODE_WUR_LLR_BUF_18;
+ case 44:
+ return OPCODE_WUR_LLR_BUF_19;
+ case 45:
+ return OPCODE_WUR_LLR_BUF_20;
+ case 46:
+ return OPCODE_WUR_LLR_BUF_21;
+ case 47:
+ return OPCODE_WUR_LLR_BUF_22;
+ case 48:
+ return OPCODE_WUR_LLR_BUF_23;
+ case 49:
+ return OPCODE_WUR_SMOD_BUF_0;
+ case 50:
+ return OPCODE_WUR_SMOD_BUF_1;
+ case 51:
+ return OPCODE_WUR_SMOD_BUF_2;
+ case 52:
+ return OPCODE_WUR_SMOD_BUF_3;
+ case 53:
+ return OPCODE_WUR_SMOD_BUF_4;
+ case 54:
+ return OPCODE_WUR_SMOD_BUF_5;
+ case 55:
+ return OPCODE_WUR_SMOD_BUF_6;
+ case 56:
+ return OPCODE_WUR_SMOD_BUF_7;
+ case 57:
+ return OPCODE_WUR_WEIGHT_REG;
+ case 58:
+ return OPCODE_WUR_SCALE_REG;
+ case 59:
+ return OPCODE_WUR_LLR_POS;
+ case 60:
+ return OPCODE_WUR_SMOD_POS;
+ case 61:
+ return OPCODE_WUR_PERM_REG;
+ case 62:
+ return OPCODE_WUR_SMOD_OFFSET_TABLE_0;
+ case 63:
+ return OPCODE_WUR_SMOD_OFFSET_TABLE_1;
+ case 64:
+ return OPCODE_WUR_SMOD_OFFSET_TABLE_2;
+ case 65:
+ return OPCODE_WUR_SMOD_OFFSET_TABLE_3;
+ case 66:
+ return OPCODE_WUR_PHASOR_N;
+ case 67:
+ return OPCODE_WUR_PHASOR_OFFSET;
+ case 231:
+ return OPCODE_WUR_THREADPTR;
+ case 232:
+ return OPCODE_WUR_FCR;
+ case 233:
+ return OPCODE_WUR_FSR;
+ }
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ return OPCODE_EXTUI;
+ case 8:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_LSX;
+ case 1:
+ return OPCODE_LSXU;
+ case 4:
+ return OPCODE_SSX;
+ case 5:
+ return OPCODE_SSXU;
+ }
+ break;
+ case 9:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_L32E;
+ case 4:
+ return OPCODE_S32E;
+ }
+ break;
+ case 10:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_ADD_S;
+ case 1:
+ return OPCODE_SUB_S;
+ case 2:
+ return OPCODE_MUL_S;
+ case 4:
+ return OPCODE_MADD_S;
+ case 5:
+ return OPCODE_MSUB_S;
+ case 8:
+ return OPCODE_ROUND_S;
+ case 9:
+ return OPCODE_TRUNC_S;
+ case 10:
+ return OPCODE_FLOOR_S;
+ case 11:
+ return OPCODE_CEIL_S;
+ case 12:
+ return OPCODE_FLOAT_S;
+ case 13:
+ return OPCODE_UFLOAT_S;
+ case 14:
+ return OPCODE_UTRUNC_S;
+ case 15:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_MOV_S;
+ case 1:
+ return OPCODE_ABS_S;
+ case 4:
+ return OPCODE_RFR;
+ case 5:
+ return OPCODE_WFR;
+ case 6:
+ return OPCODE_NEG_S;
+ }
+ break;
+ }
+ break;
+ case 11:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 1:
+ return OPCODE_UN_S;
+ case 2:
+ return OPCODE_OEQ_S;
+ case 3:
+ return OPCODE_UEQ_S;
+ case 4:
+ return OPCODE_OLT_S;
+ case 5:
+ return OPCODE_ULT_S;
+ case 6:
+ return OPCODE_OLE_S;
+ case 7:
+ return OPCODE_ULE_S;
+ case 8:
+ return OPCODE_MOVEQZ_S;
+ case 9:
+ return OPCODE_MOVNEZ_S;
+ case 10:
+ return OPCODE_MOVLTZ_S;
+ case 11:
+ return OPCODE_MOVGEZ_S;
+ case 12:
+ return OPCODE_MOVF_S;
+ case 13:
+ return OPCODE_MOVT_S;
+ }
+ break;
+ }
+ break;
+ case 1:
+ return OPCODE_L32R;
+ case 2:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_L8UI;
+ case 1:
+ return OPCODE_L16UI;
+ case 2:
+ return OPCODE_L32I;
+ case 4:
+ return OPCODE_S8I;
+ case 5:
+ return OPCODE_S16I;
+ case 6:
+ return OPCODE_S32I;
+ case 7:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_DPFR;
+ case 1:
+ return OPCODE_DPFW;
+ case 2:
+ return OPCODE_DPFRO;
+ case 3:
+ return OPCODE_DPFWO;
+ case 4:
+ return OPCODE_DHWB;
+ case 5:
+ return OPCODE_DHWBI;
+ case 6:
+ return OPCODE_DHI;
+ case 7:
+ return OPCODE_DII;
+ case 8:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_DPFL;
+ case 2:
+ return OPCODE_DHU;
+ case 3:
+ return OPCODE_DIU;
+ case 4:
+ return OPCODE_DIWB;
+ case 5:
+ return OPCODE_DIWBI;
+ }
+ break;
+ case 12:
+ return OPCODE_IPF;
+ case 13:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_IPFL;
+ case 2:
+ return OPCODE_IHU;
+ case 3:
+ return OPCODE_IIU;
+ }
+ break;
+ case 14:
+ return OPCODE_IHI;
+ case 15:
+ return OPCODE_III;
+ }
+ break;
+ case 9:
+ return OPCODE_L16SI;
+ case 10:
+ return OPCODE_MOVI;
+ case 11:
+ return OPCODE_L32AI;
+ case 12:
+ return OPCODE_ADDI;
+ case 13:
+ return OPCODE_ADDMI;
+ case 14:
+ return OPCODE_S32C1I;
+ case 15:
+ return OPCODE_S32RI;
+ }
+ break;
+ case 3:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_LSI;
+ case 4:
+ return OPCODE_SSI;
+ case 8:
+ return OPCODE_LSIU;
+ case 12:
+ return OPCODE_SSIU;
+ }
+ break;
+ case 5:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_CALL0;
+ case 1:
+ return OPCODE_CALL4;
+ case 2:
+ return OPCODE_CALL8;
+ case 3:
+ return OPCODE_CALL12;
+ }
+ break;
+ case 6:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_J;
+ case 1:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BEQZ;
+ case 1:
+ return OPCODE_BNEZ;
+ case 2:
+ return OPCODE_BLTZ;
+ case 3:
+ return OPCODE_BGEZ;
+ }
+ break;
+ case 2:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BEQI;
+ case 1:
+ return OPCODE_BNEI;
+ case 2:
+ return OPCODE_BLTI;
+ case 3:
+ return OPCODE_BGEI;
+ }
+ break;
+ case 3:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_ENTRY;
+ case 1:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BF;
+ case 1:
+ return OPCODE_BT;
+ case 8:
+ return OPCODE_LOOP;
+ case 9:
+ return OPCODE_LOOPNEZ;
+ case 10:
+ return OPCODE_LOOPGTZ;
+ }
+ break;
+ case 2:
+ return OPCODE_BLTUI;
+ case 3:
+ return OPCODE_BGEUI;
+ }
+ break;
+ }
+ break;
+ case 7:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return OPCODE_BNONE;
+ case 1:
+ return OPCODE_BEQ;
+ case 2:
+ return OPCODE_BLT;
+ case 3:
+ return OPCODE_BLTU;
+ case 4:
+ return OPCODE_BALL;
+ case 5:
+ return OPCODE_BBC;
+ case 6:
+ case 7:
+ return OPCODE_BBCI;
+ case 8:
+ return OPCODE_BANY;
+ case 9:
+ return OPCODE_BNE;
+ case 10:
+ return OPCODE_BGE;
+ case 11:
+ return OPCODE_BGEU;
+ case 12:
+ return OPCODE_BNALL;
+ case 13:
+ return OPCODE_BBS;
+ case 14:
+ case 15:
+ return OPCODE_BBSI;
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16b_get (insn))
+ {
+ case 12:
+ switch (Field_i_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_MOVI_N;
+ case 1:
+ switch (Field_z_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_BEQZ_N;
+ case 1:
+ return OPCODE_BNEZ_N;
+ }
+ break;
+ }
+ break;
+ case 13:
+ switch (Field_r_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_MOV_N;
+ case 15:
+ switch (Field_t_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return OPCODE_RET_N;
+ case 1:
+ return OPCODE_RETW_N;
+ case 2:
+ return OPCODE_BREAK_N;
+ case 3:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ break;
+ case 6:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16a_get (insn))
+ {
+ case 8:
+ return OPCODE_L32I_N;
+ case 9:
+ return OPCODE_S32I_N;
+ case 10:
+ return OPCODE_ADD_N;
+ case 11:
+ return OPCODE_ADDI_N;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_gp_slot2_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2258gp_slot2_Slot_gp_slot2_get (insn))
+ {
+ case 22:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ break;
+ case 24:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_AR2PQ_LN;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2259gp_slot2_Slot_gp_slot2_get (insn) == 46 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ switch (Field_dsp340050b49a6c_fld2260gp_slot2_Slot_gp_slot2_get (insn))
+ {
+ case 185:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADDAR2;
+ break;
+ case 187:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOV_N;
+ break;
+ case 189:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVPQ2PQ;
+ break;
+ case 191:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_PUSH128_PQ;
+ break;
+ case 200:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_PUSH32;
+ break;
+ case 201:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SET_EXT_REGS;
+ break;
+ case 202:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SUBARX;
+ break;
+ case 204:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_WRTIEP;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2261gp_slot2_Slot_gp_slot2_get (insn))
+ {
+ case 1624:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ break;
+ case 1625:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SETTIEP;
+ break;
+ case 1626:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ break;
+ case 1631:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2044_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_1;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2262gp_slot2_Slot_gp_slot2_get (insn))
+ {
+ case 3254:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_01;
+ break;
+ case 3255:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_03;
+ break;
+ case 3256:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_AR2SAR_DUP;
+ break;
+ case 3257:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_21;
+ break;
+ case 3258:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_23;
+ break;
+ case 3259:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ break;
+ case 3260:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQ;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2263gp_slot2_Slot_gp_slot2_get (insn))
+ {
+ case 52176:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_0;
+ break;
+ case 52177:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_2;
+ break;
+ case 52178:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_3;
+ break;
+ case 52179:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_0;
+ break;
+ case 52180:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ break;
+ case 52181:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_1;
+ break;
+ case 52182:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_2;
+ break;
+ case 52183:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2264gp_slot2_Slot_gp_slot2_get (insn) == 13046 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2302_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_5;
+ if (Field_dsp340050b49a6c_fld2266gp_slot2_Slot_gp_slot2_get (insn) == 13047 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2302_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2267gp_slot2_Slot_gp_slot2_get (insn) == 1640 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_0;
+ if (Field_dsp340050b49a6c_fld2268gp_slot2_Slot_gp_slot2_get (insn) == 1641 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_1;
+ if (Field_dsp340050b49a6c_fld2269gp_slot2_Slot_gp_slot2_get (insn) == 1642 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_2;
+ if (Field_dsp340050b49a6c_fld2270gp_slot2_Slot_gp_slot2_get (insn) == 1643 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_4;
+ if (Field_dsp340050b49a6c_fld2271gp_slot2_Slot_gp_slot2_get (insn) == 822 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2305_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_3;
+ if (Field_dsp340050b49a6c_fld2273gp_slot2_Slot_gp_slot2_get (insn) == 823 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2305_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_5;
+ if (Field_dsp340050b49a6c_fld2274gp_slot2_Slot_gp_slot2_get (insn) == 103 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2056_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_PUSH2X128_PQ;
+ if (Field_dsp340050b49a6c_fld2275gp_slot2_Slot_gp_slot2_get (insn) == 52 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld2277gp_slot2_Slot_gp_slot2_get (insn) == 106 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_1;
+ if (Field_dsp340050b49a6c_fld2278gp_slot2_Slot_gp_slot2_get (insn) == 107 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_3;
+ if (Field_dsp340050b49a6c_fld2279gp_slot2_Slot_gp_slot2_get (insn) == 108 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_2;
+ if (Field_dsp340050b49a6c_fld2280gp_slot2_Slot_gp_slot2_get (insn) == 109 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld2281gp_slot2_Slot_gp_slot2_get (insn) == 220 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld2282gp_slot2_Slot_gp_slot2_get (insn) == 221 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld2283gp_slot2_Slot_gp_slot2_get (insn) == 222 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld2284gp_slot2_Slot_gp_slot2_get (insn) == 3343 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2286gp_slot2_Slot_gp_slot2_get (insn) == 3359 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2287gp_slot2_Slot_gp_slot2_get (insn) == 3375 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2288gp_slot2_Slot_gp_slot2_get (insn) == 3391 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld2289gp_slot2_Slot_gp_slot2_get (insn) == 3407 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2290gp_slot2_Slot_gp_slot2_get (insn) == 3423 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2291gp_slot2_Slot_gp_slot2_get (insn) == 3439 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2292gp_slot2_Slot_gp_slot2_get (insn) == 3455 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2293gp_slot2_Slot_gp_slot2_get (insn) == 3471 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld2294gp_slot2_Slot_gp_slot2_get (insn) == 3487 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2295gp_slot2_Slot_gp_slot2_get (insn) == 3503 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2296gp_slot2_Slot_gp_slot2_get (insn) == 3519 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2297gp_slot2_Slot_gp_slot2_get (insn) == 3535 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2298gp_slot2_Slot_gp_slot2_get (insn) == 3551 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2299gp_slot2_Slot_gp_slot2_get (insn) == 3567 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2300gp_slot2_Slot_gp_slot2_get (insn) == 3583 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2301gp_slot2_Slot_gp_slot2_get (insn) == 56 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2272_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_0;
+ if (Field_dsp340050b49a6c_fld2303gp_slot2_Slot_gp_slot2_get (insn) == 57 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3671gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2304gp_slot2_Slot_gp_slot2_get (insn) == 29 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3674gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld2306gp_slot2_Slot_gp_slot2_get (insn) == 120 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_QREADY;
+ if (Field_dsp340050b49a6c_fld2308gp_slot2_Slot_gp_slot2_get (insn) == 242 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld2309gp_slot2_Slot_gp_slot2_get (insn) == 243 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SWAPB;
+ if (Field_dsp340050b49a6c_fld2310gp_slot2_Slot_gp_slot2_get (insn) == 61 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3667gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVAR2;
+ if (Field_dsp340050b49a6c_fld2312gp_slot2_Slot_gp_slot2_get (insn) == 31 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2386_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld2314gp_slot2_Slot_gp_slot2_get (insn) == 16 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADD32;
+ if (Field_dsp340050b49a6c_fld2316gp_slot2_Slot_gp_slot2_get (insn) == 17 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADDCM;
+ if (Field_dsp340050b49a6c_fld2317gp_slot2_Slot_gp_slot2_get (insn) == 18 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADDWRP;
+ if (Field_dsp340050b49a6c_fld2318gp_slot2_Slot_gp_slot2_get (insn) == 19 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ASR;
+ if (Field_dsp340050b49a6c_fld2319gp_slot2_Slot_gp_slot2_get (insn) == 20 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_AND128;
+ if (Field_dsp340050b49a6c_fld2320gp_slot2_Slot_gp_slot2_get (insn) == 21 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_EXTUI4;
+ if (Field_dsp340050b49a6c_fld2321gp_slot2_Slot_gp_slot2_get (insn) == 22 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_LUT;
+ if (Field_dsp340050b49a6c_fld2322gp_slot2_Slot_gp_slot2_get (insn) == 23 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_LUT_IEXT;
+ if (Field_dsp340050b49a6c_fld2323gp_slot2_Slot_gp_slot2_get (insn) == 24 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ASL;
+ if (Field_dsp340050b49a6c_fld2324gp_slot2_Slot_gp_slot2_get (insn) == 25 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_LUT_REXT;
+ if (Field_dsp340050b49a6c_fld2325gp_slot2_Slot_gp_slot2_get (insn) == 26 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MAX8;
+ if (Field_dsp340050b49a6c_fld2326gp_slot2_Slot_gp_slot2_get (insn) == 27 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MEAN32;
+ if (Field_dsp340050b49a6c_fld2327gp_slot2_Slot_gp_slot2_get (insn) == 28 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MEAN;
+ if (Field_dsp340050b49a6c_fld2328gp_slot2_Slot_gp_slot2_get (insn) == 29 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MIN8;
+ if (Field_dsp340050b49a6c_fld2329gp_slot2_Slot_gp_slot2_get (insn) == 30 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_OR128;
+ if (Field_dsp340050b49a6c_fld2330gp_slot2_Slot_gp_slot2_get (insn) == 31 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SUB32;
+ if (Field_dsp340050b49a6c_fld2331gp_slot2_Slot_gp_slot2_get (insn) == 0 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_ASL32;
+ if (Field_dsp340050b49a6c_fld2332gp_slot2_Slot_gp_slot2_get (insn) == 1 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_ASR32;
+ if (Field_dsp340050b49a6c_fld2333gp_slot2_Slot_gp_slot2_get (insn) == 2 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOV2CM2PQ;
+ if (Field_dsp340050b49a6c_fld2334gp_slot2_Slot_gp_slot2_get (insn) == 3 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_2;
+ if (Field_dsp340050b49a6c_fld2335gp_slot2_Slot_gp_slot2_get (insn) == 4 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_0;
+ if (Field_dsp340050b49a6c_fld2336gp_slot2_Slot_gp_slot2_get (insn) == 5 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_3;
+ if (Field_dsp340050b49a6c_fld2337gp_slot2_Slot_gp_slot2_get (insn) == 6 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_4;
+ if (Field_dsp340050b49a6c_fld2338gp_slot2_Slot_gp_slot2_get (insn) == 7 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_5;
+ if (Field_dsp340050b49a6c_fld2339gp_slot2_Slot_gp_slot2_get (insn) == 8 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_1;
+ if (Field_dsp340050b49a6c_fld2340gp_slot2_Slot_gp_slot2_get (insn) == 9 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_6;
+ if (Field_dsp340050b49a6c_fld2341gp_slot2_Slot_gp_slot2_get (insn) == 10 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_7;
+ if (Field_dsp340050b49a6c_fld2342gp_slot2_Slot_gp_slot2_get (insn) == 11 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_1;
+ if (Field_dsp340050b49a6c_fld2343gp_slot2_Slot_gp_slot2_get (insn) == 12 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_0;
+ if (Field_dsp340050b49a6c_fld2344gp_slot2_Slot_gp_slot2_get (insn) == 13 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_2;
+ if (Field_dsp340050b49a6c_fld2345gp_slot2_Slot_gp_slot2_get (insn) == 14 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_3;
+ if (Field_dsp340050b49a6c_fld2346gp_slot2_Slot_gp_slot2_get (insn) == 15 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_4;
+ if (Field_dsp340050b49a6c_fld2347gp_slot2_Slot_gp_slot2_get (insn) == 16 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_ASLM;
+ if (Field_dsp340050b49a6c_fld2348gp_slot2_Slot_gp_slot2_get (insn) == 17 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_ASRM;
+ if (Field_dsp340050b49a6c_fld2349gp_slot2_Slot_gp_slot2_get (insn) == 18 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_CMP8;
+ if (Field_dsp340050b49a6c_fld2350gp_slot2_Slot_gp_slot2_get (insn) == 19 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_LSLM;
+ if (Field_dsp340050b49a6c_fld2351gp_slot2_Slot_gp_slot2_get (insn) == 20 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_CMP_I;
+ if (Field_dsp340050b49a6c_fld2352gp_slot2_Slot_gp_slot2_get (insn) == 21 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_LSRM;
+ if (Field_dsp340050b49a6c_fld2353gp_slot2_Slot_gp_slot2_get (insn) == 262 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_ABS8;
+ if (Field_dsp340050b49a6c_fld2354gp_slot2_Slot_gp_slot2_get (insn) == 263 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2355gp_slot2_Slot_gp_slot2_get (insn) == 278 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_LUT_PHASOR;
+ if (Field_dsp340050b49a6c_fld2356gp_slot2_Slot_gp_slot2_get (insn) == 279 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_NOT128;
+ if (Field_dsp340050b49a6c_fld2357gp_slot2_Slot_gp_slot2_get (insn) == 294 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2358gp_slot2_Slot_gp_slot2_get (insn) == 295 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1)
+ return OPCODE_TRANS;
+ if (Field_dsp340050b49a6c_fld2359gp_slot2_Slot_gp_slot2_get (insn) == 2102 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2384_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2361gp_slot2_Slot_gp_slot2_get (insn) == 2103 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2384_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SET_HSAR;
+ if (Field_dsp340050b49a6c_fld2362gp_slot2_Slot_gp_slot2_get (insn) == 1179 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3663gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ if (Field_dsp340050b49a6c_fld2364gp_slot2_Slot_gp_slot2_get (insn) == 667 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3664gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ if (Field_dsp340050b49a6c_fld2366gp_slot2_Slot_gp_slot2_get (insn) == 411 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3665gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2368gp_slot2_Slot_gp_slot2_get (insn) == 43 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3670gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_NCO_UPDATE;
+ if (Field_dsp340050b49a6c_fld2369gp_slot2_Slot_gp_slot2_get (insn) == 27 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3668gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2370gp_slot2_Slot_gp_slot2_get (insn) == 6 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2032_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_CMP_R;
+ if (Field_dsp340050b49a6c_fld2371gp_slot2_Slot_gp_slot2_get (insn) == 11 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3675gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADD16;
+ if (Field_dsp340050b49a6c_fld2372gp_slot2_Slot_gp_slot2_get (insn) == 15 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3669gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_LUT_WRITE;
+ if (Field_dsp340050b49a6c_fld2373gp_slot2_Slot_gp_slot2_get (insn) == 0 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2)
+ return OPCODE_MOVCND_5;
+ if (Field_dsp340050b49a6c_fld2374gp_slot2_Slot_gp_slot2_get (insn) == 1 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2)
+ return OPCODE_MOVCND_6;
+ if (Field_dsp340050b49a6c_fld2375gp_slot2_Slot_gp_slot2_get (insn) == 2 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2)
+ return OPCODE_MOVCND_7;
+ if (Field_dsp340050b49a6c_fld2376gp_slot2_Slot_gp_slot2_get (insn) == 12 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVCM2PQ;
+ if (Field_dsp340050b49a6c_fld2378gp_slot2_Slot_gp_slot2_get (insn) == 13 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3676gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ if (Field_dsp340050b49a6c_fld2379gp_slot2_Slot_gp_slot2_get (insn) == 7 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3673gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld2381gp_slot2_Slot_gp_slot2_get (insn) == 1 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3666_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_PERM;
+ if (Field_dsp340050b49a6c_fld2383gp_slot2_Slot_gp_slot2_get (insn) == 1 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3678gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ASLM32;
+ if (Field_dsp340050b49a6c_fld2385gp_slot2_Slot_gp_slot2_get (insn) == 8 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SUBCM;
+ if (Field_dsp340050b49a6c_fld2387gp_slot2_Slot_gp_slot2_get (insn) == 9 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SUBMEAN;
+ if (Field_dsp340050b49a6c_fld2388gp_slot2_Slot_gp_slot2_get (insn) == 5 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3679gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SUBWRP;
+ if (Field_dsp340050b49a6c_fld2389gp_slot2_Slot_gp_slot2_get (insn) == 3 &&
+ Field_op0_s3_Slot_gp_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3680gp_slot2_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_XOR128;
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 3)
+ return OPCODE_EXTUI;
+ switch (Field_sae_Slot_gp_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADD;
+ break;
+ case 1:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_ADDI_N;
+ break;
+ case 2:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_AND;
+ break;
+ case 3:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVGEZ;
+ break;
+ case 4:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_LUT_AR;
+ break;
+ case 5:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVLTZ;
+ break;
+ case 6:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVNEZ;
+ break;
+ case 7:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_OR;
+ break;
+ case 8:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_MOVEQZ;
+ break;
+ case 9:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_SUB;
+ break;
+ case 10:
+ if (Field_op0_s3_Slot_gp_slot2_get (insn) == 0)
+ return OPCODE_XOR;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_gp_slot0_decode (const xtensa_insnbuf insn)
+{
+ if (Field_dsp340050b49a6c_fld2407gp_slot0_Slot_gp_slot0_get (insn) == 0 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC2X32;
+ if (Field_dsp340050b49a6c_fld2409gp_slot0_Slot_gp_slot0_get (insn) == 1 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC32_R;
+ if (Field_dsp340050b49a6c_fld2410gp_slot0_Slot_gp_slot0_get (insn) == 2 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC_IH;
+ if (Field_dsp340050b49a6c_fld2411gp_slot0_Slot_gp_slot0_get (insn) == 3 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC_RH;
+ if (Field_dsp340050b49a6c_fld2412gp_slot0_Slot_gp_slot0_get (insn) == 10 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC_RL;
+ if (Field_dsp340050b49a6c_fld2413gp_slot0_Slot_gp_slot0_get (insn) == 11 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_0;
+ if (Field_dsp340050b49a6c_fld2415gp_slot0_Slot_gp_slot0_get (insn) == 27 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_1;
+ if (Field_dsp340050b49a6c_fld2416gp_slot0_Slot_gp_slot0_get (insn) == 4 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC_IL;
+ if (Field_dsp340050b49a6c_fld2417gp_slot0_Slot_gp_slot0_get (insn) == 5 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_2;
+ switch (Field_dsp340050b49a6c_fld2418gp_slot0_Slot_gp_slot0_get (insn))
+ {
+ case 21:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ break;
+ case 53:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOV_N;
+ break;
+ case 85:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_NSA;
+ break;
+ case 117:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_PUSH32;
+ break;
+ case 149:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_NSAU;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2419gp_slot0_Slot_gp_slot0_get (insn) == 362 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld2420gp_slot0_Slot_gp_slot0_get (insn) == 1452 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ANY8;
+ if (Field_dsp340050b49a6c_fld2422gp_slot0_Slot_gp_slot0_get (insn) == 1453 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld2423gp_slot0_Slot_gp_slot0_get (insn) == 2908 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2424gp_slot0_Slot_gp_slot0_get (insn) == 2909 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2425gp_slot0_Slot_gp_slot0_get (insn) == 2910 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2426gp_slot0_Slot_gp_slot0_get (insn) == 2911 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2427gp_slot0_Slot_gp_slot0_get (insn) == 852 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ANY4;
+ if (Field_dsp340050b49a6c_fld2429gp_slot0_Slot_gp_slot0_get (insn) == 3412 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld2430gp_slot0_Slot_gp_slot0_get (insn) == 3413 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2431gp_slot0_Slot_gp_slot0_get (insn) == 3414 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2432gp_slot0_Slot_gp_slot0_get (insn) == 3415 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2433gp_slot0_Slot_gp_slot0_get (insn) == 3416 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2434gp_slot0_Slot_gp_slot0_get (insn) == 3417 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2435gp_slot0_Slot_gp_slot0_get (insn) == 1709 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_s8_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2436gp_slot0_Slot_gp_slot0_get (insn) == 855 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3690gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2437gp_slot0_Slot_gp_slot0_get (insn) == 1960 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SSAI;
+ switch (Field_dsp340050b49a6c_fld2438gp_slot0_Slot_gp_slot0_get (insn))
+ {
+ case 3922:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_LLR_POS;
+ break;
+ case 3923:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_PHASOR_OFFSET;
+ break;
+ case 3924:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_PERM_REG;
+ break;
+ case 3925:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_SCALE_REG;
+ break;
+ case 3926:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_POS;
+ break;
+ case 3927:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_SOV;
+ break;
+ case 3928:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_PHASOR_N;
+ break;
+ case 3929:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_WGHT;
+ break;
+ case 3930:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SSA8B;
+ break;
+ case 3931:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SSL;
+ break;
+ case 3932:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SSA8L;
+ break;
+ case 3933:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SSR;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2439gp_slot0_Slot_gp_slot0_get (insn) == 3934 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CLRAC;
+ if (Field_dsp340050b49a6c_fld2440gp_slot0_Slot_gp_slot0_get (insn) == 3935 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP16LLR_1;
+ if (Field_dsp340050b49a6c_fld2441gp_slot0_Slot_gp_slot0_get (insn) == 3 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3693gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_3;
+ switch (Field_dsp340050b49a6c_fld2443gp_slot0_Slot_gp_slot0_get (insn))
+ {
+ case 3:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ break;
+ case 7:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN_I;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2444gp_slot0_Slot_gp_slot0_get (insn) == 7 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2067_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN_R;
+ switch (Field_dsp340050b49a6c_fld2445_Slot_gp_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_SAC2X32;
+ break;
+ case 1:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_SAC32_R;
+ break;
+ case 2:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_SCM_PINC;
+ break;
+ case 3:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_SRAI;
+ break;
+ case 4:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_SCM_U;
+ break;
+ case 8:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_SLLI;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2447gp_slot0_Slot_gp_slot0_get (insn) == 7 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3688gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_dsp340050b49a6c_fld2448_Slot_gp_slot0_get (insn) == 7 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3703gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ASLACM;
+ if (Field_dsp340050b49a6c_fld2449gp_slot0_Slot_gp_slot0_get (insn) == 7 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3689gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_BNEZ_N;
+ if (Field_dsp340050b49a6c_fld2451gp_slot0_Slot_gp_slot0_get (insn) == 8 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LCM_PINC;
+ if (Field_dsp340050b49a6c_fld2452gp_slot0_Slot_gp_slot0_get (insn) == 9 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LCM_U;
+ if (Field_dsp340050b49a6c_fld2453gp_slot0_Slot_gp_slot0_get (insn) == 18 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_LCM;
+ if (Field_dsp340050b49a6c_fld2454gp_slot0_Slot_gp_slot0_get (insn) == 259 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld2455gp_slot0_Slot_gp_slot0_get (insn) == 267 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2456gp_slot0_Slot_gp_slot0_get (insn) == 291 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld2457gp_slot0_Slot_gp_slot0_get (insn) == 299 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2458gp_slot0_Slot_gp_slot0_get (insn) == 323 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld2459gp_slot0_Slot_gp_slot0_get (insn) == 331 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld2460gp_slot0_Slot_gp_slot0_get (insn) == 355 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_dsp340050b49a6c_fld2461gp_slot0_Slot_gp_slot0_get (insn) == 715 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CLB_C;
+ switch (Field_dsp340050b49a6c_fld2462_Slot_gp_slot0_get (insn))
+ {
+ case 21:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_EXT;
+ break;
+ case 23:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_EXT_R;
+ break;
+ case 33:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_MOVI_N;
+ break;
+ case 35:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_PUSH128_M;
+ break;
+ case 46:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_0;
+ break;
+ case 47:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_1;
+ break;
+ case 50:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_2;
+ break;
+ case 52:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_3;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2463gp_slot0_Slot_gp_slot0_get (insn) == 731 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CLB_R;
+ if (Field_dsp340050b49a6c_fld2464gp_slot0_Slot_gp_slot0_get (insn) == 387 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld2465gp_slot0_Slot_gp_slot0_get (insn) == 779 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MINCLB_C;
+ if (Field_dsp340050b49a6c_fld2466gp_slot0_Slot_gp_slot0_get (insn) == 795 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SMINCLB_R;
+ if (Field_dsp340050b49a6c_fld2467gp_slot0_Slot_gp_slot0_get (insn) == 835 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MINCLB_R;
+ if (Field_dsp340050b49a6c_fld2468gp_slot0_Slot_gp_slot0_get (insn) == 13571 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ADDAC_I2R;
+ if (Field_dsp340050b49a6c_fld2470gp_slot0_Slot_gp_slot0_get (insn) == 13587 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ADDAC_R2I;
+ if (Field_dsp340050b49a6c_fld2471gp_slot0_Slot_gp_slot0_get (insn) == 13603 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_REDAC;
+ if (Field_dsp340050b49a6c_fld2472gp_slot0_Slot_gp_slot0_get (insn) == 13619 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_REDACS;
+ if (Field_dsp340050b49a6c_fld2473gp_slot0_Slot_gp_slot0_get (insn) == 13635 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_REDAC2;
+ if (Field_dsp340050b49a6c_fld2474gp_slot0_Slot_gp_slot0_get (insn) == 13651 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SUBAC_I2R;
+ if (Field_dsp340050b49a6c_fld2475gp_slot0_Slot_gp_slot0_get (insn) == 6835 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SUBAC_R2I;
+ if (Field_dsp340050b49a6c_fld2477gp_slot0_Slot_gp_slot0_get (insn) == 1715 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3705gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_REDAC4;
+ if (Field_dsp340050b49a6c_fld2479gp_slot0_Slot_gp_slot0_get (insn) == 6667 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_L;
+ if (Field_dsp340050b49a6c_fld2480gp_slot0_Slot_gp_slot0_get (insn) == 6683 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_N;
+ if (Field_dsp340050b49a6c_fld2481gp_slot0_Slot_gp_slot0_get (insn) == 6699 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld2482gp_slot0_Slot_gp_slot0_get (insn) == 6715 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_PHASOR_OFFSET;
+ if (Field_dsp340050b49a6c_fld2483gp_slot0_Slot_gp_slot0_get (insn) == 6731 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld2484gp_slot0_Slot_gp_slot0_get (insn) == 6747 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld2485gp_slot0_Slot_gp_slot0_get (insn) == 6763 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld2486gp_slot0_Slot_gp_slot0_get (insn) == 6779 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_SOV;
+ if (Field_dsp340050b49a6c_fld2487gp_slot0_Slot_gp_slot0_get (insn) == 6795 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_PHASOR_N;
+ if (Field_dsp340050b49a6c_fld2488gp_slot0_Slot_gp_slot0_get (insn) == 6811 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_GET_WGHT;
+ if (Field_dsp340050b49a6c_fld2489gp_slot0_Slot_gp_slot0_get (insn) == 6827 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2490gp_slot0_Slot_gp_slot0_get (insn) == 6843 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2491gp_slot0_Slot_gp_slot0_get (insn) == 6859 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2492gp_slot0_Slot_gp_slot0_get (insn) == 6875 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2493gp_slot0_Slot_gp_slot0_get (insn) == 6891 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2494gp_slot0_Slot_gp_slot0_get (insn) == 6907 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld2495gp_slot0_Slot_gp_slot0_get (insn) == 115 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3706gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SMINCLB_C;
+ if (Field_dsp340050b49a6c_fld2496gp_slot0_Slot_gp_slot0_get (insn) == 137 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_s8_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2497gp_slot0_Slot_gp_slot0_get (insn) == 277 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT32_I;
+ if (Field_dsp340050b49a6c_fld2498gp_slot0_Slot_gp_slot0_get (insn) == 285 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_2;
+ if (Field_dsp340050b49a6c_fld2499gp_slot0_Slot_gp_slot0_get (insn) == 305 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT32_R;
+ if (Field_dsp340050b49a6c_fld2500gp_slot0_Slot_gp_slot0_get (insn) == 309 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_3;
+ if (Field_dsp340050b49a6c_fld2501gp_slot0_Slot_gp_slot0_get (insn) == 313 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_0;
+ if (Field_dsp340050b49a6c_fld2502gp_slot0_Slot_gp_slot0_get (insn) == 317 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_1;
+ if (Field_dsp340050b49a6c_fld2503gp_slot0_Slot_gp_slot0_get (insn) == 337 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_0;
+ if (Field_dsp340050b49a6c_fld2504gp_slot0_Slot_gp_slot0_get (insn) == 341 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_2;
+ if (Field_dsp340050b49a6c_fld2505gp_slot0_Slot_gp_slot0_get (insn) == 173 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2036_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_3;
+ if (Field_dsp340050b49a6c_fld2506gp_slot0_Slot_gp_slot0_get (insn) == 93 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3692gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld2507gp_slot0_Slot_gp_slot0_get (insn) == 29 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3700gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_1;
+ if (Field_dsp340050b49a6c_fld2508gp_slot0_Slot_gp_slot0_get (insn) == 9 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2058_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld2509gp_slot0_Slot_gp_slot0_get (insn) == 11 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2058_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN_I;
+ if (Field_dsp340050b49a6c_fld2510gp_slot0_Slot_gp_slot0_get (insn) == 7 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3696gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN_R;
+ if (Field_dsp340050b49a6c_fld2512gp_slot0_Slot_gp_slot0_get (insn) == 6 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC_IL;
+ if (Field_dsp340050b49a6c_fld2514gp_slot0_Slot_gp_slot0_get (insn) == 7 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC_RH;
+ if (Field_dsp340050b49a6c_fld2515gp_slot0_Slot_gp_slot0_get (insn) == 10 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC_RL;
+ if (Field_dsp340050b49a6c_fld2516gp_slot0_Slot_gp_slot0_get (insn) == 16 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SAC_IH;
+ if (Field_dsp340050b49a6c_fld2517gp_slot0_Slot_gp_slot0_get (insn) == 204 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_POP128_2M_0;
+ if (Field_dsp340050b49a6c_fld2518gp_slot0_Slot_gp_slot0_get (insn) == 205 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_POP128_2M_1;
+ if (Field_dsp340050b49a6c_fld2519gp_slot0_Slot_gp_slot0_get (insn) == 206 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_POP128_2M_2;
+ if (Field_dsp340050b49a6c_fld2520gp_slot0_Slot_gp_slot0_get (insn) == 207 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_POP128_2M_3;
+ if (Field_dsp340050b49a6c_fld2521gp_slot0_Slot_gp_slot0_get (insn) == 212 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2530_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_ASRAC;
+ if (Field_dsp340050b49a6c_fld2523gp_slot0_Slot_gp_slot0_get (insn) == 213 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3698gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOVAC;
+ if (Field_dsp340050b49a6c_fld2524gp_slot0_Slot_gp_slot0_get (insn) == 107 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3699gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SWAPAC_RI;
+ if (Field_dsp340050b49a6c_fld2526gp_slot0_Slot_gp_slot0_get (insn) == 432 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld2527gp_slot0_Slot_gp_slot0_get (insn) == 433 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2036_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOV2AC32_I;
+ if (Field_dsp340050b49a6c_fld2528gp_slot0_Slot_gp_slot0_get (insn) == 217 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3697gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_MOV2AC32_R;
+ if (Field_dsp340050b49a6c_fld2529gp_slot0_Slot_gp_slot0_get (insn) == 109 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3702gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld2531gp_slot0_Slot_gp_slot0_get (insn) == 55 &&
+ Field_op0_s5_Slot_gp_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3691gp_slot0_Slot_gp_slot0_get (insn) == 0)
+ return OPCODE_SET_LLR_BUF;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 3)
+ return OPCODE_EXTUI;
+ switch (Field_sae_Slot_gp_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_MOVNEZ;
+ break;
+ case 1:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_MOVT;
+ break;
+ case 2:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_OR;
+ break;
+ case 3:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SCM;
+ break;
+ case 4:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_ORB;
+ break;
+ case 5:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SCM_PINC_X;
+ break;
+ case 8:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_S32I_N;
+ break;
+ case 9:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SCM_XU;
+ break;
+ case 10:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_ADD;
+ break;
+ case 11:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_ADDX2;
+ break;
+ case 12:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_ADDI_N;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SCM_X;
+ break;
+ case 13:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_ADDX4;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SEXT;
+ break;
+ case 14:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_ADDX8;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SRC;
+ break;
+ case 15:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SRLI;
+ break;
+ case 18:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SUB;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_ANDB;
+ break;
+ case 19:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_L32I_N;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SUBX8;
+ break;
+ case 20:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SUBX2;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_CLAMPS;
+ break;
+ case 21:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_XOR;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_LCM_PINC_X;
+ break;
+ case 22:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_XORB;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_LCM_X;
+ break;
+ case 23:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_LCM_XU;
+ break;
+ case 24:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 2)
+ return OPCODE_SUBX4;
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_COMB_AR;
+ break;
+ case 25:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MAX;
+ break;
+ case 26:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MAXU;
+ break;
+ case 27:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MINU;
+ break;
+ case 28:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MIN;
+ break;
+ case 29:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MOVEQZ;
+ break;
+ case 30:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MOVGEZ;
+ break;
+ case 31:
+ if (Field_op0_s5_Slot_gp_slot0_get (insn) == 1)
+ return OPCODE_MOVLTZ;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_dot_slot0_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2068_Slot_dot_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_ADD;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 5)
+ return OPCODE_SUB;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 3)
+ return OPCODE_OR;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 2)
+ return OPCODE_LCM_X;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 4)
+ return OPCODE_SCM_X;
+ break;
+ case 1:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_ADDI_N;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 5)
+ return OPCODE_XOR;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 1)
+ return OPCODE_LCM;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 2)
+ return OPCODE_LCM_XU;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 3)
+ return OPCODE_SCM;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 4)
+ return OPCODE_SCM_XU;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2666dot_slot0_Slot_dot_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_EXT;
+ break;
+ case 1:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_EXT_R;
+ break;
+ case 2:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_MOVI_N;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2667dot_slot0_Slot_dot_slot0_get (insn) == 6 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_AR2CM_LN;
+ switch (Field_dsp340050b49a6c_fld2668dot_slot0_Slot_dot_slot0_get (insn))
+ {
+ case 16:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_2FIFO_1;
+ break;
+ case 17:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_2FIFO_2;
+ break;
+ case 18:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_2FIFO_3;
+ break;
+ case 19:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_R2FIFO_2;
+ break;
+ case 20:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_R2FIFO_0;
+ break;
+ case 21:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_R2FIFO_3;
+ break;
+ case 22:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_MOV_N;
+ break;
+ case 23:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_PUSH32;
+ break;
+ case 24:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_EXT_R2FIFO_1;
+ break;
+ case 27:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_SMINCLB_R;
+ break;
+ case 28:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_AR2CM_DUP;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_MINCLB_C;
+ break;
+ case 29:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_t_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_SET_PERM_REG;
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_EXT32_I;
+ break;
+ case 30:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_EXT32_R;
+ break;
+ case 31:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 6)
+ return OPCODE_EXT_2FIFO_0;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2669dot_slot0_Slot_dot_slot0_get (insn) == 0 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld2671dot_slot0_Slot_dot_slot0_get (insn) == 4 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2672dot_slot0_Slot_dot_slot0_get (insn) == 5 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld2673dot_slot0_Slot_dot_slot0_get (insn) == 6 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld2674dot_slot0_Slot_dot_slot0_get (insn) == 7 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld2675dot_slot0_Slot_dot_slot0_get (insn) == 8 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2676dot_slot0_Slot_dot_slot0_get (insn) == 9 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2677dot_slot0_Slot_dot_slot0_get (insn) == 10 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld2678dot_slot0_Slot_dot_slot0_get (insn) == 11 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2679dot_slot0_Slot_dot_slot0_get (insn) == 27 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2680dot_slot0_Slot_dot_slot0_get (insn) == 43 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2681dot_slot0_Slot_dot_slot0_get (insn) == 59 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2682dot_slot0_Slot_dot_slot0_get (insn) == 75 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2683dot_slot0_Slot_dot_slot0_get (insn) == 91 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2684dot_slot0_Slot_dot_slot0_get (insn) == 107 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2685dot_slot0_Slot_dot_slot0_get (insn) == 123 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2686dot_slot0_Slot_dot_slot0_get (insn) == 43 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3736_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2688dot_slot0_Slot_dot_slot0_get (insn) == 59 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3735dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2689dot_slot0_Slot_dot_slot0_get (insn) == 12 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_r_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2690dot_slot0_Slot_dot_slot0_get (insn) == 13 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_GET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld2692dot_slot0_Slot_dot_slot0_get (insn) == 29 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2693dot_slot0_Slot_dot_slot0_get (insn) == 29 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2705dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2695dot_slot0_Slot_dot_slot0_get (insn) == 29 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3741dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2697dot_slot0_Slot_dot_slot0_get (insn) == 29 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3738dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2699dot_slot0_Slot_dot_slot0_get (insn) == 7 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3740dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2700dot_slot0_Slot_dot_slot0_get (insn) == 50 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_CLB_C;
+ if (Field_dsp340050b49a6c_fld2701dot_slot0_Slot_dot_slot0_get (insn) == 51 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_MINCLB_R;
+ if (Field_dsp340050b49a6c_fld2702dot_slot0_Slot_dot_slot0_get (insn) == 52 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_CLB_R;
+ if (Field_dsp340050b49a6c_fld2703dot_slot0_Slot_dot_slot0_get (insn) == 53 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7)
+ return OPCODE_SMINCLB_C;
+ if (Field_dsp340050b49a6c_fld2704dot_slot0_Slot_dot_slot0_get (insn) == 15 &&
+ Field_op0_s8_Slot_dot_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3739dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_CLRAC;
+ switch (Field_dsp340050b49a6c_fld2705dot_slot0_Slot_dot_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3737dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_SLL;
+ break;
+ case 1:
+ if (Field_op0_s8_Slot_dot_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3742dot_slot0_Slot_dot_slot0_get (insn) == 0)
+ return OPCODE_PUSH128;
+ break;
+ }
+ switch (Field_op0_s8_Slot_dot_slot0_get (insn))
+ {
+ case 9:
+ return OPCODE_LCM_U;
+ case 10:
+ return OPCODE_SCM_U;
+ case 11:
+ return OPCODE_SLLI;
+ case 12:
+ return OPCODE_SRAI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_pq_slot2_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2706pq_slot2_Slot_pq_slot2_get (insn))
+ {
+ case 44:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ break;
+ case 47:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3752pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2707pq_slot2_Slot_pq_slot2_get (insn))
+ {
+ case 180:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADDAR2;
+ break;
+ case 181:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_PUSH128_PQ;
+ break;
+ case 182:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_PUSH32;
+ break;
+ case 183:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SET_EXT_REGS;
+ break;
+ case 184:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOV_N;
+ break;
+ case 185:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SUBARX;
+ break;
+ case 186:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_WRTIEP;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2708pq_slot2_Slot_pq_slot2_get (insn))
+ {
+ case 1496:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SETTIEP;
+ break;
+ case 1497:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2709pq_slot2_Slot_pq_slot2_get (insn))
+ {
+ case 2996:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_AR2SAR_DUP;
+ break;
+ case 2997:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ break;
+ case 2999:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_s_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_5;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2710pq_slot2_Slot_pq_slot2_get (insn))
+ {
+ case 47968:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_0;
+ break;
+ case 47969:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_1;
+ break;
+ case 47970:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_2;
+ break;
+ case 47971:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_0;
+ break;
+ case 47972:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_3;
+ break;
+ case 47973:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_1;
+ break;
+ case 47974:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_2;
+ break;
+ case 47975:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2711pq_slot2_Slot_pq_slot2_get (insn) == 11994 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3745pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ if (Field_dsp340050b49a6c_fld2713pq_slot2_Slot_pq_slot2_get (insn) == 11995 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3745pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2714pq_slot2_Slot_pq_slot2_get (insn) == 375 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQ;
+ if (Field_dsp340050b49a6c_fld2715pq_slot2_Slot_pq_slot2_get (insn) == 52 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld2717pq_slot2_Slot_pq_slot2_get (insn) == 212 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld2718pq_slot2_Slot_pq_slot2_get (insn) == 213 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld2719pq_slot2_Slot_pq_slot2_get (insn) == 3334 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2721pq_slot2_Slot_pq_slot2_get (insn) == 3335 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2722pq_slot2_Slot_pq_slot2_get (insn) == 3350 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2723pq_slot2_Slot_pq_slot2_get (insn) == 3351 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2724pq_slot2_Slot_pq_slot2_get (insn) == 3366 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2725pq_slot2_Slot_pq_slot2_get (insn) == 3367 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2726pq_slot2_Slot_pq_slot2_get (insn) == 3382 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2727pq_slot2_Slot_pq_slot2_get (insn) == 3383 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2728pq_slot2_Slot_pq_slot2_get (insn) == 3398 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld2729pq_slot2_Slot_pq_slot2_get (insn) == 3399 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2730pq_slot2_Slot_pq_slot2_get (insn) == 3414 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2731pq_slot2_Slot_pq_slot2_get (insn) == 3415 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2732pq_slot2_Slot_pq_slot2_get (insn) == 3430 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2733pq_slot2_Slot_pq_slot2_get (insn) == 3431 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2734pq_slot2_Slot_pq_slot2_get (insn) == 3446 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2735pq_slot2_Slot_pq_slot2_get (insn) == 3447 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2736pq_slot2_Slot_pq_slot2_get (insn) == 438 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld2738pq_slot2_Slot_pq_slot2_get (insn) == 439 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld2739pq_slot2_Slot_pq_slot2_get (insn) == 27 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3744pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld2741pq_slot2_Slot_pq_slot2_get (insn) == 112 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld2742pq_slot2_Slot_pq_slot2_get (insn) == 113 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_QREADY;
+ if (Field_dsp340050b49a6c_fld2743pq_slot2_Slot_pq_slot2_get (insn) == 114 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3750pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld2746pq_slot2_Slot_pq_slot2_get (insn) == 115 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3750pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SWAPB;
+ if (Field_dsp340050b49a6c_fld2747pq_slot2_Slot_pq_slot2_get (insn) == 29 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVAR2;
+ if (Field_dsp340050b49a6c_fld2748pq_slot2_Slot_pq_slot2_get (insn) == 16 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADD32;
+ if (Field_dsp340050b49a6c_fld2750pq_slot2_Slot_pq_slot2_get (insn) == 17 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADDCM;
+ if (Field_dsp340050b49a6c_fld2751pq_slot2_Slot_pq_slot2_get (insn) == 18 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADDWRP;
+ if (Field_dsp340050b49a6c_fld2752pq_slot2_Slot_pq_slot2_get (insn) == 19 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ASR;
+ if (Field_dsp340050b49a6c_fld2753pq_slot2_Slot_pq_slot2_get (insn) == 20 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_AND128;
+ if (Field_dsp340050b49a6c_fld2754pq_slot2_Slot_pq_slot2_get (insn) == 21 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_EXTUI4;
+ if (Field_dsp340050b49a6c_fld2755pq_slot2_Slot_pq_slot2_get (insn) == 22 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_LUT;
+ if (Field_dsp340050b49a6c_fld2756pq_slot2_Slot_pq_slot2_get (insn) == 23 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_LUT_IEXT;
+ if (Field_dsp340050b49a6c_fld2757pq_slot2_Slot_pq_slot2_get (insn) == 24 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ASL;
+ if (Field_dsp340050b49a6c_fld2758pq_slot2_Slot_pq_slot2_get (insn) == 25 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_LUT_REXT;
+ if (Field_dsp340050b49a6c_fld2759pq_slot2_Slot_pq_slot2_get (insn) == 26 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MAX8;
+ if (Field_dsp340050b49a6c_fld2760pq_slot2_Slot_pq_slot2_get (insn) == 27 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MEAN32;
+ if (Field_dsp340050b49a6c_fld2761pq_slot2_Slot_pq_slot2_get (insn) == 28 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MEAN;
+ if (Field_dsp340050b49a6c_fld2762pq_slot2_Slot_pq_slot2_get (insn) == 29 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MIN8;
+ if (Field_dsp340050b49a6c_fld2763pq_slot2_Slot_pq_slot2_get (insn) == 30 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_OR128;
+ if (Field_dsp340050b49a6c_fld2764pq_slot2_Slot_pq_slot2_get (insn) == 31 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SUB32;
+ if (Field_dsp340050b49a6c_fld2765pq_slot2_Slot_pq_slot2_get (insn) == 0 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_ASL32;
+ if (Field_dsp340050b49a6c_fld2766pq_slot2_Slot_pq_slot2_get (insn) == 1 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_ASR32;
+ if (Field_dsp340050b49a6c_fld2767pq_slot2_Slot_pq_slot2_get (insn) == 2 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_0;
+ if (Field_dsp340050b49a6c_fld2768pq_slot2_Slot_pq_slot2_get (insn) == 3 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_3;
+ if (Field_dsp340050b49a6c_fld2769pq_slot2_Slot_pq_slot2_get (insn) == 4 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_1;
+ if (Field_dsp340050b49a6c_fld2770pq_slot2_Slot_pq_slot2_get (insn) == 5 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_4;
+ if (Field_dsp340050b49a6c_fld2771pq_slot2_Slot_pq_slot2_get (insn) == 6 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_5;
+ if (Field_dsp340050b49a6c_fld2772pq_slot2_Slot_pq_slot2_get (insn) == 7 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_6;
+ if (Field_dsp340050b49a6c_fld2773pq_slot2_Slot_pq_slot2_get (insn) == 8 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_2;
+ if (Field_dsp340050b49a6c_fld2774pq_slot2_Slot_pq_slot2_get (insn) == 9 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_7;
+ if (Field_dsp340050b49a6c_fld2775pq_slot2_Slot_pq_slot2_get (insn) == 10 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_0;
+ if (Field_dsp340050b49a6c_fld2776pq_slot2_Slot_pq_slot2_get (insn) == 11 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_2;
+ if (Field_dsp340050b49a6c_fld2777pq_slot2_Slot_pq_slot2_get (insn) == 12 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_1;
+ if (Field_dsp340050b49a6c_fld2778pq_slot2_Slot_pq_slot2_get (insn) == 13 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_3;
+ if (Field_dsp340050b49a6c_fld2779pq_slot2_Slot_pq_slot2_get (insn) == 14 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_4;
+ if (Field_dsp340050b49a6c_fld2780pq_slot2_Slot_pq_slot2_get (insn) == 15 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_5;
+ if (Field_dsp340050b49a6c_fld2781pq_slot2_Slot_pq_slot2_get (insn) == 16 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_ASLM;
+ if (Field_dsp340050b49a6c_fld2782pq_slot2_Slot_pq_slot2_get (insn) == 17 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_ASRM;
+ if (Field_dsp340050b49a6c_fld2783pq_slot2_Slot_pq_slot2_get (insn) == 18 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_CMP8;
+ if (Field_dsp340050b49a6c_fld2784pq_slot2_Slot_pq_slot2_get (insn) == 19 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_LSLM;
+ if (Field_dsp340050b49a6c_fld2785pq_slot2_Slot_pq_slot2_get (insn) == 20 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_CMP_I;
+ if (Field_dsp340050b49a6c_fld2786pq_slot2_Slot_pq_slot2_get (insn) == 21 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_LSRM;
+ if (Field_dsp340050b49a6c_fld2787pq_slot2_Slot_pq_slot2_get (insn) == 262 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_ABS8;
+ if (Field_dsp340050b49a6c_fld2788pq_slot2_Slot_pq_slot2_get (insn) == 263 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2789pq_slot2_Slot_pq_slot2_get (insn) == 278 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_LUT_PHASOR;
+ if (Field_dsp340050b49a6c_fld2790pq_slot2_Slot_pq_slot2_get (insn) == 279 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_NOT128;
+ if (Field_dsp340050b49a6c_fld2791pq_slot2_Slot_pq_slot2_get (insn) == 294 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2792pq_slot2_Slot_pq_slot2_get (insn) == 295 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1)
+ return OPCODE_TRANS;
+ if (Field_dsp340050b49a6c_fld2793pq_slot2_Slot_pq_slot2_get (insn) == 2102 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2795pq_slot2_Slot_pq_slot2_get (insn) == 2103 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SET_HSAR;
+ if (Field_dsp340050b49a6c_fld2796pq_slot2_Slot_pq_slot2_get (insn) == 1179 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3746pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ if (Field_dsp340050b49a6c_fld2798pq_slot2_Slot_pq_slot2_get (insn) == 667 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3747pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ if (Field_dsp340050b49a6c_fld2801pq_slot2_Slot_pq_slot2_get (insn) == 411 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3749pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2803pq_slot2_Slot_pq_slot2_get (insn) == 43 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3754pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_NCO_UPDATE;
+ if (Field_dsp340050b49a6c_fld2805pq_slot2_Slot_pq_slot2_get (insn) == 27 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3751pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2806pq_slot2_Slot_pq_slot2_get (insn) == 12 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2025_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_CMP_R;
+ if (Field_dsp340050b49a6c_fld2807pq_slot2_Slot_pq_slot2_get (insn) == 13 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3756pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld2808pq_slot2_Slot_pq_slot2_get (insn) == 11 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3757pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADD16;
+ if (Field_dsp340050b49a6c_fld2809pq_slot2_Slot_pq_slot2_get (insn) == 15 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3753pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_LUT_WRITE;
+ if (Field_dsp340050b49a6c_fld2810pq_slot2_Slot_pq_slot2_get (insn) == 0 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2)
+ return OPCODE_MOVCND_6;
+ if (Field_dsp340050b49a6c_fld2811pq_slot2_Slot_pq_slot2_get (insn) == 1 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2)
+ return OPCODE_MOVCND_7;
+ if (Field_dsp340050b49a6c_fld2812pq_slot2_Slot_pq_slot2_get (insn) == 1 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3748_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_PERM;
+ if (Field_dsp340050b49a6c_fld2814pq_slot2_Slot_pq_slot2_get (insn) == 1 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3758pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ASLM32;
+ if (Field_dsp340050b49a6c_fld2816pq_slot2_Slot_pq_slot2_get (insn) == 1 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_imm7_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ if (Field_dsp340050b49a6c_fld2818pq_slot2_Slot_pq_slot2_get (insn) == 8 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2737_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SUBCM;
+ if (Field_dsp340050b49a6c_fld2820pq_slot2_Slot_pq_slot2_get (insn) == 9 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2737_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SUBMEAN;
+ if (Field_dsp340050b49a6c_fld2821pq_slot2_Slot_pq_slot2_get (insn) == 5 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3759pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SUBWRP;
+ if (Field_dsp340050b49a6c_fld2823pq_slot2_Slot_pq_slot2_get (insn) == 3 &&
+ Field_op0_s9_Slot_pq_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3760pq_slot2_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_XOR128;
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 3)
+ return OPCODE_EXTUI;
+ switch (Field_sae_Slot_pq_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADD;
+ break;
+ case 1:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_ADDI_N;
+ break;
+ case 2:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_AND;
+ break;
+ case 3:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVGEZ;
+ break;
+ case 4:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_LUT_AR;
+ break;
+ case 5:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVLTZ;
+ break;
+ case 6:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVNEZ;
+ break;
+ case 7:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_OR;
+ break;
+ case 8:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVEQZ;
+ break;
+ case 9:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SUB;
+ break;
+ case 10:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_XOR;
+ break;
+ case 12:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ break;
+ case 15:
+ if (Field_op0_s9_Slot_pq_slot2_get (insn) == 0 &&
+ Field_t_Slot_pq_slot2_get (insn) == 0)
+ return OPCODE_SLL;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_pq_slot0_decode (const xtensa_insnbuf insn)
+{
+ if (Field_dsp340050b49a6c_fld2827pq_slot0_Slot_pq_slot0_get (insn) == 0 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC2X32;
+ if (Field_dsp340050b49a6c_fld2829pq_slot0_Slot_pq_slot0_get (insn) == 1 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC32_R;
+ if (Field_dsp340050b49a6c_fld2830pq_slot0_Slot_pq_slot0_get (insn) == 2 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC_IH;
+ if (Field_dsp340050b49a6c_fld2831pq_slot0_Slot_pq_slot0_get (insn) == 3 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC_RH;
+ if (Field_dsp340050b49a6c_fld2832pq_slot0_Slot_pq_slot0_get (insn) == 10 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC_RL;
+ if (Field_dsp340050b49a6c_fld2833pq_slot0_Slot_pq_slot0_get (insn) == 11 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_0;
+ if (Field_dsp340050b49a6c_fld2835pq_slot0_Slot_pq_slot0_get (insn) == 27 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_1;
+ if (Field_dsp340050b49a6c_fld2836pq_slot0_Slot_pq_slot0_get (insn) == 4 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC_IL;
+ if (Field_dsp340050b49a6c_fld2837pq_slot0_Slot_pq_slot0_get (insn) == 5 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_2;
+ switch (Field_dsp340050b49a6c_fld2838pq_slot0_Slot_pq_slot0_get (insn))
+ {
+ case 21:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ break;
+ case 53:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOV_N;
+ break;
+ case 85:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_NSA;
+ break;
+ case 117:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_PUSH32;
+ break;
+ case 149:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_NSAU;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2839pq_slot0_Slot_pq_slot0_get (insn) == 362 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld2840pq_slot0_Slot_pq_slot0_get (insn) == 1452 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_ANY8;
+ if (Field_dsp340050b49a6c_fld2842pq_slot0_Slot_pq_slot0_get (insn) == 1453 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld2843pq_slot0_Slot_pq_slot0_get (insn) == 2908 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2844pq_slot0_Slot_pq_slot0_get (insn) == 2909 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2845pq_slot0_Slot_pq_slot0_get (insn) == 2910 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2846pq_slot0_Slot_pq_slot0_get (insn) == 2911 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2847pq_slot0_Slot_pq_slot0_get (insn) == 852 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_ANY4;
+ if (Field_dsp340050b49a6c_fld2849pq_slot0_Slot_pq_slot0_get (insn) == 3412 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld2850pq_slot0_Slot_pq_slot0_get (insn) == 3413 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2851pq_slot0_Slot_pq_slot0_get (insn) == 3414 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2852pq_slot0_Slot_pq_slot0_get (insn) == 3415 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2853pq_slot0_Slot_pq_slot0_get (insn) == 3416 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2854pq_slot0_Slot_pq_slot0_get (insn) == 3417 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2855pq_slot0_Slot_pq_slot0_get (insn) == 1709 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_s8_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2856pq_slot0_Slot_pq_slot0_get (insn) == 855 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3765pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2857pq_slot0_Slot_pq_slot0_get (insn) == 1960 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SSAI;
+ switch (Field_dsp340050b49a6c_fld2858pq_slot0_Slot_pq_slot0_get (insn))
+ {
+ case 3922:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_LLR_POS;
+ break;
+ case 3923:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_PHASOR_OFFSET;
+ break;
+ case 3924:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_PERM_REG;
+ break;
+ case 3925:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_SCALE_REG;
+ break;
+ case 3926:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_POS;
+ break;
+ case 3927:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_SOV;
+ break;
+ case 3928:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_PHASOR_N;
+ break;
+ case 3929:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_WGHT;
+ break;
+ case 3930:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SSA8B;
+ break;
+ case 3931:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SSL;
+ break;
+ case 3932:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SSA8L;
+ break;
+ case 3933:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SSR;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2859pq_slot0_Slot_pq_slot0_get (insn) == 3934 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CLRAC;
+ if (Field_dsp340050b49a6c_fld2860pq_slot0_Slot_pq_slot0_get (insn) == 3935 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP16LLR_1;
+ if (Field_dsp340050b49a6c_fld2861pq_slot0_Slot_pq_slot0_get (insn) == 3 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3768pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LAC2X64_3;
+ switch (Field_dsp340050b49a6c_fld2863pq_slot0_Slot_pq_slot0_get (insn))
+ {
+ case 3:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ break;
+ case 7:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN_I;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2864pq_slot0_Slot_pq_slot0_get (insn) == 7 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2069_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN_R;
+ switch (Field_dsp340050b49a6c_fld2865_Slot_pq_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_SAC2X32;
+ break;
+ case 1:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_SAC32_R;
+ break;
+ case 2:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_SCM_PINC;
+ break;
+ case 3:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_SRAI;
+ break;
+ case 4:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_SCM_U;
+ break;
+ case 8:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_SLLI;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2867pq_slot0_Slot_pq_slot0_get (insn) == 7 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3763pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_dsp340050b49a6c_fld2869pq_slot0_Slot_pq_slot0_get (insn) == 7 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3764pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_BNEZ_N;
+ if (Field_dsp340050b49a6c_fld2871pq_slot0_Slot_pq_slot0_get (insn) == 8 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LCM_PINC;
+ if (Field_dsp340050b49a6c_fld2872pq_slot0_Slot_pq_slot0_get (insn) == 9 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LCM_U;
+ if (Field_dsp340050b49a6c_fld2873pq_slot0_Slot_pq_slot0_get (insn) == 18 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_LCM;
+ if (Field_dsp340050b49a6c_fld2874pq_slot0_Slot_pq_slot0_get (insn) == 259 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld2875pq_slot0_Slot_pq_slot0_get (insn) == 267 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2876pq_slot0_Slot_pq_slot0_get (insn) == 291 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld2877pq_slot0_Slot_pq_slot0_get (insn) == 299 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2878pq_slot0_Slot_pq_slot0_get (insn) == 323 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld2879pq_slot0_Slot_pq_slot0_get (insn) == 331 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld2880pq_slot0_Slot_pq_slot0_get (insn) == 355 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_dsp340050b49a6c_fld2881pq_slot0_Slot_pq_slot0_get (insn) == 715 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CLB_C;
+ switch (Field_dsp340050b49a6c_fld2882_Slot_pq_slot0_get (insn))
+ {
+ case 21:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_EXT;
+ break;
+ case 23:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_EXT_R;
+ break;
+ case 33:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_MOVI_N;
+ break;
+ case 35:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_PUSH128_M;
+ break;
+ case 52:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_0;
+ break;
+ case 53:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_2;
+ break;
+ case 54:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_3;
+ break;
+ case 56:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC2X64_1;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2883pq_slot0_Slot_pq_slot0_get (insn) == 731 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CLB_R;
+ if (Field_dsp340050b49a6c_fld2884pq_slot0_Slot_pq_slot0_get (insn) == 387 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld2885pq_slot0_Slot_pq_slot0_get (insn) == 779 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MINCLB_C;
+ if (Field_dsp340050b49a6c_fld2886pq_slot0_Slot_pq_slot0_get (insn) == 795 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SMINCLB_R;
+ if (Field_dsp340050b49a6c_fld2887pq_slot0_Slot_pq_slot0_get (insn) == 835 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MINCLB_R;
+ if (Field_dsp340050b49a6c_fld2888pq_slot0_Slot_pq_slot0_get (insn) == 13571 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_ADDAC_I2R;
+ if (Field_dsp340050b49a6c_fld2890pq_slot0_Slot_pq_slot0_get (insn) == 13587 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_ADDAC_R2I;
+ if (Field_dsp340050b49a6c_fld2891pq_slot0_Slot_pq_slot0_get (insn) == 13603 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_REDAC;
+ if (Field_dsp340050b49a6c_fld2892pq_slot0_Slot_pq_slot0_get (insn) == 13619 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_REDACS;
+ if (Field_dsp340050b49a6c_fld2893pq_slot0_Slot_pq_slot0_get (insn) == 13635 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_REDAC2;
+ if (Field_dsp340050b49a6c_fld2894pq_slot0_Slot_pq_slot0_get (insn) == 13651 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SUBAC_I2R;
+ if (Field_dsp340050b49a6c_fld2895pq_slot0_Slot_pq_slot0_get (insn) == 6835 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SUBAC_R2I;
+ if (Field_dsp340050b49a6c_fld2897pq_slot0_Slot_pq_slot0_get (insn) == 1715 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3779pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_REDAC4;
+ if (Field_dsp340050b49a6c_fld2899pq_slot0_Slot_pq_slot0_get (insn) == 6667 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_L;
+ if (Field_dsp340050b49a6c_fld2900pq_slot0_Slot_pq_slot0_get (insn) == 6683 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_N;
+ if (Field_dsp340050b49a6c_fld2901pq_slot0_Slot_pq_slot0_get (insn) == 6699 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld2902pq_slot0_Slot_pq_slot0_get (insn) == 6715 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_PHASOR_OFFSET;
+ if (Field_dsp340050b49a6c_fld2903pq_slot0_Slot_pq_slot0_get (insn) == 6731 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld2904pq_slot0_Slot_pq_slot0_get (insn) == 6747 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld2905pq_slot0_Slot_pq_slot0_get (insn) == 6763 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld2906pq_slot0_Slot_pq_slot0_get (insn) == 6779 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_SOV;
+ if (Field_dsp340050b49a6c_fld2907pq_slot0_Slot_pq_slot0_get (insn) == 6795 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_PHASOR_N;
+ if (Field_dsp340050b49a6c_fld2908pq_slot0_Slot_pq_slot0_get (insn) == 6811 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_GET_WGHT;
+ if (Field_dsp340050b49a6c_fld2909pq_slot0_Slot_pq_slot0_get (insn) == 6827 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2910pq_slot0_Slot_pq_slot0_get (insn) == 6843 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2911pq_slot0_Slot_pq_slot0_get (insn) == 6859 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2912pq_slot0_Slot_pq_slot0_get (insn) == 6875 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2913pq_slot0_Slot_pq_slot0_get (insn) == 6891 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2914pq_slot0_Slot_pq_slot0_get (insn) == 6907 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld2915pq_slot0_Slot_pq_slot0_get (insn) == 115 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3780pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SMINCLB_C;
+ if (Field_dsp340050b49a6c_fld2916pq_slot0_Slot_pq_slot0_get (insn) == 137 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_s8_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2917pq_slot0_Slot_pq_slot0_get (insn) == 277 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT32_I;
+ if (Field_dsp340050b49a6c_fld2918pq_slot0_Slot_pq_slot0_get (insn) == 285 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_2;
+ if (Field_dsp340050b49a6c_fld2919pq_slot0_Slot_pq_slot0_get (insn) == 305 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT32_R;
+ if (Field_dsp340050b49a6c_fld2920pq_slot0_Slot_pq_slot0_get (insn) == 309 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_3;
+ if (Field_dsp340050b49a6c_fld2921pq_slot0_Slot_pq_slot0_get (insn) == 313 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_0;
+ if (Field_dsp340050b49a6c_fld2922pq_slot0_Slot_pq_slot0_get (insn) == 317 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_1;
+ if (Field_dsp340050b49a6c_fld2923pq_slot0_Slot_pq_slot0_get (insn) == 337 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_0;
+ if (Field_dsp340050b49a6c_fld2924pq_slot0_Slot_pq_slot0_get (insn) == 341 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_2;
+ if (Field_dsp340050b49a6c_fld2925pq_slot0_Slot_pq_slot0_get (insn) == 173 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2036_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_R2FIFO_3;
+ if (Field_dsp340050b49a6c_fld2926pq_slot0_Slot_pq_slot0_get (insn) == 93 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3767pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld2927pq_slot0_Slot_pq_slot0_get (insn) == 29 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3773pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_1;
+ if (Field_dsp340050b49a6c_fld2928pq_slot0_Slot_pq_slot0_get (insn) == 9 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2059_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld2929pq_slot0_Slot_pq_slot0_get (insn) == 11 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2059_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN_I;
+ if (Field_dsp340050b49a6c_fld2930pq_slot0_Slot_pq_slot0_get (insn) == 7 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3769pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN_R;
+ if (Field_dsp340050b49a6c_fld2932pq_slot0_Slot_pq_slot0_get (insn) == 6 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC_IL;
+ if (Field_dsp340050b49a6c_fld2934pq_slot0_Slot_pq_slot0_get (insn) == 7 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC_RH;
+ if (Field_dsp340050b49a6c_fld2935pq_slot0_Slot_pq_slot0_get (insn) == 10 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC_RL;
+ if (Field_dsp340050b49a6c_fld2936pq_slot0_Slot_pq_slot0_get (insn) == 16 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SAC_IH;
+ if (Field_dsp340050b49a6c_fld2937pq_slot0_Slot_pq_slot0_get (insn) == 220 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_ASLACM;
+ if (Field_dsp340050b49a6c_fld2939pq_slot0_Slot_pq_slot0_get (insn) == 442 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3772_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_ASRAC;
+ if (Field_dsp340050b49a6c_fld2941pq_slot0_Slot_pq_slot0_get (insn) == 443 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3771pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SWAPAC_RI;
+ if (Field_dsp340050b49a6c_fld2942pq_slot0_Slot_pq_slot0_get (insn) == 111 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3766pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOVAC;
+ if (Field_dsp340050b49a6c_fld2943pq_slot0_Slot_pq_slot0_get (insn) == 114 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3775pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_0;
+ if (Field_dsp340050b49a6c_fld2945pq_slot0_Slot_pq_slot0_get (insn) == 115 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3775pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_3;
+ if (Field_dsp340050b49a6c_fld2946pq_slot0_Slot_pq_slot0_get (insn) == 496 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld2947pq_slot0_Slot_pq_slot0_get (insn) == 497 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2036_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOV2AC32_I;
+ if (Field_dsp340050b49a6c_fld2948pq_slot0_Slot_pq_slot0_get (insn) == 249 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3770pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_MOV2AC32_R;
+ if (Field_dsp340050b49a6c_fld2949pq_slot0_Slot_pq_slot0_get (insn) == 125 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3778pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld2950pq_slot0_Slot_pq_slot0_get (insn) == 63 &&
+ Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3766pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_SET_LLR_BUF;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 3)
+ return OPCODE_EXTUI;
+ switch (Field_sae_Slot_pq_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_MOVNEZ;
+ break;
+ case 1:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_MOVT;
+ break;
+ case 2:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_OR;
+ break;
+ case 3:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SCM;
+ break;
+ case 4:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_ORB;
+ break;
+ case 5:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SCM_PINC_X;
+ break;
+ case 8:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_S32I_N;
+ break;
+ case 9:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SCM_XU;
+ break;
+ case 10:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_ADD;
+ break;
+ case 11:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_ADDX2;
+ break;
+ case 12:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_ADDI_N;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SCM_X;
+ break;
+ case 13:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_ADDX4;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SEXT;
+ break;
+ case 14:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_ADDX8;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SRC;
+ break;
+ case 15:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SRLI;
+ break;
+ case 18:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_ANDB;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_STORE_P;
+ break;
+ case 19:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_L32I_N;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SUBX2;
+ break;
+ case 20:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_CLAMPS;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_STORE_Q;
+ break;
+ case 21:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SUBX4;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_LCM_PINC_X;
+ break;
+ case 22:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SUBX8;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_LCM_X;
+ break;
+ case 23:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_XOR;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_LCM_XU;
+ break;
+ case 24:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_SUB;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_COMB_AR;
+ break;
+ case 25:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2)
+ return OPCODE_XORB;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MAX;
+ break;
+ case 26:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MAXU;
+ break;
+ case 27:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MINU;
+ break;
+ case 28:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MIN;
+ break;
+ case 29:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MOVEQZ;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3776pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_1;
+ break;
+ case 30:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MOVGEZ;
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3777pq_slot0_Slot_pq_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_2;
+ break;
+ case 31:
+ if (Field_op0_s11_Slot_pq_slot0_get (insn) == 1)
+ return OPCODE_MOVLTZ;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_acc2_slot0_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2047_Slot_acc2_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3795acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_NEG;
+ break;
+ case 1:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2039_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_PUSH128;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2056_Slot_acc2_slot0_get (insn) == 0 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1)
+ return OPCODE_MOVCM2PQ;
+ if (Field_dsp340050b49a6c_fld2973acc2_slot0_Slot_acc2_slot0_get (insn) == 0 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_dsp340050b49a6c_fld2974acc2_slot0_Slot_acc2_slot0_get (insn) == 40 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_PUSH2X128_PQ;
+ switch (Field_dsp340050b49a6c_fld2975acc2_slot0_Slot_acc2_slot0_get (insn))
+ {
+ case 82:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2PQ_1;
+ break;
+ case 83:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2PQ_4;
+ break;
+ case 84:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2PQ_2;
+ break;
+ case 85:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2PQ_5;
+ break;
+ case 88:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2PQ_3;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2976acc2_slot0_Slot_acc2_slot0_get (insn))
+ {
+ case 172:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_CLRCM;
+ break;
+ case 173:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_0;
+ break;
+ case 174:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_1;
+ break;
+ case 175:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2;
+ break;
+ case 178:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_3;
+ break;
+ case 179:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_01;
+ break;
+ case 180:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_4;
+ break;
+ case 181:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_03;
+ break;
+ case 182:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_21;
+ break;
+ case 183:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_23;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2977acc2_slot0_Slot_acc2_slot0_get (insn) == 46 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3797_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2980acc2_slot0_Slot_acc2_slot0_get (insn) == 47 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3796acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2981acc2_slot0_Slot_acc2_slot0_get (insn) == 3 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3800acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP128_2PQ_0;
+ if (Field_dsp340050b49a6c_fld2982acc2_slot0_Slot_acc2_slot0_get (insn) == 24 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3801acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2984acc2_slot0_Slot_acc2_slot0_get (insn) == 25 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3801acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2985acc2_slot0_Slot_acc2_slot0_get (insn) == 13 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3802acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2987acc2_slot0_Slot_acc2_slot0_get (insn) == 7 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3803acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2989acc2_slot0_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3798acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2990acc2_slot0_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3799acc2_slot0_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_NEGCM;
+ switch (Field_op0_s14_Slot_acc2_slot0_get (insn))
+ {
+ case 3:
+ return OPCODE_ADD;
+ case 4:
+ return OPCODE_ADDI_N;
+ case 5:
+ return OPCODE_LCM_X;
+ case 6:
+ return OPCODE_LCM_XU;
+ case 7:
+ return OPCODE_SCM_X;
+ case 8:
+ return OPCODE_SCM_XU;
+ case 9:
+ return OPCODE_SUB;
+ }
+ switch (Field_r_Slot_acc2_slot0_get (insn))
+ {
+ case 8:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_MOV_N;
+ break;
+ case 9:
+ if (Field_op0_s14_Slot_acc2_slot0_get (insn) == 0)
+ return OPCODE_PUSH128_PQ;
+ break;
+ }
+ if (Field_t_Slot_acc2_slot0_get (insn) == 2 &&
+ Field_op0_s14_Slot_acc2_slot0_get (insn) == 1)
+ return OPCODE_CONJ;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_smod_slot0_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld3119smod_slot0_Slot_smod_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_LCM_PINC;
+ break;
+ case 1:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_LCM_U;
+ break;
+ case 2:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SCM_PINC;
+ break;
+ case 3:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SRAI;
+ break;
+ case 4:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SCM_U;
+ break;
+ case 8:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SLLI;
+ break;
+ case 13:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3841smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_PUSH128;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3120smod_slot0_Slot_smod_slot0_get (insn))
+ {
+ case 76:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_LN;
+ break;
+ case 77:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_LN_I;
+ break;
+ case 78:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_LN_R;
+ break;
+ case 79:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_BEQZ_N;
+ break;
+ case 82:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_BNEZ_N;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3121smod_slot0_Slot_smod_slot0_get (insn))
+ {
+ case 40:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOVI_N;
+ break;
+ case 47:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2049_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_3;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3122smod_slot0_Slot_smod_slot0_get (insn))
+ {
+ case 332:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_DUP;
+ break;
+ case 333:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOV_N;
+ break;
+ case 334:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_NSA;
+ break;
+ case 335:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_NSAU;
+ break;
+ case 371:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_PUSH32;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3123smod_slot0_Slot_smod_slot0_get (insn) == 84 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld3125smod_slot0_Slot_smod_slot0_get (insn) == 85 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_CM2AR_LN_R;
+ if (Field_dsp340050b49a6c_fld3126smod_slot0_Slot_smod_slot0_get (insn) == 344 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld3127smod_slot0_Slot_smod_slot0_get (insn) == 345 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld3128smod_slot0_Slot_smod_slot0_get (insn) == 346 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld3129smod_slot0_Slot_smod_slot0_get (insn) == 347 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld3130smod_slot0_Slot_smod_slot0_get (insn) == 348 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld3131smod_slot0_Slot_smod_slot0_get (insn) == 349 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld3132smod_slot0_Slot_smod_slot0_get (insn) == 350 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld3133smod_slot0_Slot_smod_slot0_get (insn) == 351 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld3134smod_slot0_Slot_smod_slot0_get (insn) == 88 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_CM2AR_LN_I;
+ if (Field_dsp340050b49a6c_fld3135smod_slot0_Slot_smod_slot0_get (insn) == 356 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SRL;
+ if (Field_dsp340050b49a6c_fld3136smod_slot0_Slot_smod_slot0_get (insn) == 5637 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld3137smod_slot0_Slot_smod_slot0_get (insn) == 5653 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld3138smod_slot0_Slot_smod_slot0_get (insn) == 5669 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld3139smod_slot0_Slot_smod_slot0_get (insn) == 5685 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld3140smod_slot0_Slot_smod_slot0_get (insn) == 5701 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld3141smod_slot0_Slot_smod_slot0_get (insn) == 5717 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld3142smod_slot0_Slot_smod_slot0_get (insn) == 5733 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld3143smod_slot0_Slot_smod_slot0_get (insn) == 5749 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld3144smod_slot0_Slot_smod_slot0_get (insn) == 5765 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld3145smod_slot0_Slot_smod_slot0_get (insn) == 5781 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld3146smod_slot0_Slot_smod_slot0_get (insn) == 2901 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3150_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld3148smod_slot0_Slot_smod_slot0_get (insn) == 1461 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_imm6_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3149smod_slot0_Slot_smod_slot0_get (insn) == 355 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ANY8;
+ if (Field_dsp340050b49a6c_fld3152smod_slot0_Slot_smod_slot0_get (insn) == 363 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_GET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld3153smod_slot0_Slot_smod_slot0_get (insn) == 89 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld3155smod_slot0_Slot_smod_slot0_get (insn) == 91 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3833smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ANY4;
+ if (Field_dsp340050b49a6c_fld3156smod_slot0_Slot_smod_slot0_get (insn) == 184 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_2M_0;
+ if (Field_dsp340050b49a6c_fld3157smod_slot0_Slot_smod_slot0_get (insn) == 185 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_2M_1;
+ if (Field_dsp340050b49a6c_fld3158smod_slot0_Slot_smod_slot0_get (insn) == 186 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP128_2M_2;
+ if (Field_dsp340050b49a6c_fld3159smod_slot0_Slot_smod_slot0_get (insn) == 3000 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SSAI;
+ switch (Field_dsp340050b49a6c_fld3160smod_slot0_Slot_smod_slot0_get (insn))
+ {
+ case 6002:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_LLR_POS;
+ break;
+ case 6003:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_PHASOR_OFFSET;
+ break;
+ case 6004:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_PERM_REG;
+ break;
+ case 6005:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_SCALE_REG;
+ break;
+ case 6006:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_SMOD_POS;
+ break;
+ case 6007:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_SOV;
+ break;
+ case 6008:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_PHASOR_N;
+ break;
+ case 6009:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_WGHT;
+ break;
+ case 6010:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SSA8B;
+ break;
+ case 6011:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SSL;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3161smod_slot0_Slot_smod_slot0_get (insn) == 3006 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3832smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_dsp340050b49a6c_fld3164smod_slot0_Slot_smod_slot0_get (insn) == 3007 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3832smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_dsp340050b49a6c_fld3165smod_slot0_Slot_smod_slot0_get (insn) == 400 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld3166smod_slot0_Slot_smod_slot0_get (insn) == 3208 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld3168smod_slot0_Slot_smod_slot0_get (insn) == 6418 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld3170smod_slot0_Slot_smod_slot0_get (insn) == 6419 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld3171smod_slot0_Slot_smod_slot0_get (insn) == 6420 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_PHASOR_N;
+ if (Field_dsp340050b49a6c_fld3172smod_slot0_Slot_smod_slot0_get (insn) == 6421 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld3173smod_slot0_Slot_smod_slot0_get (insn) == 6422 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_SOV;
+ if (Field_dsp340050b49a6c_fld3174smod_slot0_Slot_smod_slot0_get (insn) == 6423 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_WGHT;
+ if (Field_dsp340050b49a6c_fld3175smod_slot0_Slot_smod_slot0_get (insn) == 6424 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_GET_PHASOR_OFFSET;
+ if (Field_dsp340050b49a6c_fld3176smod_slot0_Slot_smod_slot0_get (insn) == 6425 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld3177smod_slot0_Slot_smod_slot0_get (insn) == 6426 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld3178smod_slot0_Slot_smod_slot0_get (insn) == 6427 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld3179smod_slot0_Slot_smod_slot0_get (insn) == 6428 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld3180smod_slot0_Slot_smod_slot0_get (insn) == 6429 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld3181smod_slot0_Slot_smod_slot0_get (insn) == 3215 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_s8_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld3182smod_slot0_Slot_smod_slot0_get (insn) == 201 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_sas_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_L;
+ if (Field_dsp340050b49a6c_fld3184smod_slot0_Slot_smod_slot0_get (insn) == 101 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3836smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_GET_INTERP_EXT_N;
+ if (Field_dsp340050b49a6c_fld3186smod_slot0_Slot_smod_slot0_get (insn) == 51 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3837smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld3188smod_slot0_Slot_smod_slot0_get (insn) == 7 &&
+ Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3838smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_SET_LLR_BUF;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 2)
+ return OPCODE_EXTUI;
+ switch (Field_sae_Slot_smod_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOVLTZ;
+ break;
+ case 1:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOVNEZ;
+ break;
+ case 2:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_MOVT;
+ break;
+ case 3:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SCM;
+ break;
+ case 4:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_OR;
+ break;
+ case 5:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SCM_PINC_X;
+ break;
+ case 6:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SCM_X;
+ break;
+ case 7:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SCM_XU;
+ break;
+ case 8:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_ORB;
+ break;
+ case 9:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SEXT;
+ break;
+ case 10:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ADD;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SRC;
+ break;
+ case 11:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SUB;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ADDX2;
+ break;
+ case 12:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ADDI_N;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SRLI;
+ break;
+ case 13:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ADDX4;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SUBX2;
+ break;
+ case 14:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ADDX8;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SUBX4;
+ break;
+ case 15:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_SUBX8;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_AND;
+ break;
+ case 16:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_S32I_N;
+ break;
+ case 17:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_XOR;
+ break;
+ case 18:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_ANDB;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1)
+ return OPCODE_XORB;
+ break;
+ case 19:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_L32I_N;
+ break;
+ case 20:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_CLAMPS;
+ break;
+ case 21:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_LCM;
+ break;
+ case 22:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_LCM_PINC_X;
+ break;
+ case 23:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_LCM_X;
+ break;
+ case 24:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_COMB_AR;
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3842smod_slot0_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_PUSH128_M;
+ break;
+ case 25:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_LCM_XU;
+ break;
+ case 26:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_MAX;
+ break;
+ case 27:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_MIN;
+ break;
+ case 28:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_MAXU;
+ break;
+ case 29:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_MINU;
+ break;
+ case 30:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_MOVEQZ;
+ break;
+ case 31:
+ if (Field_op0_s17_Slot_smod_slot0_get (insn) == 0)
+ return OPCODE_MOVGEZ;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_llr_slot0_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2071_Slot_llr_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 4)
+ return OPCODE_S32I_N;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_ADD;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 6)
+ return OPCODE_SUB;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 2)
+ return OPCODE_LCM;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 3)
+ return OPCODE_LCM_XU;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 5)
+ return OPCODE_SCM_X;
+ break;
+ case 1:
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_ADDI_N;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 1)
+ return OPCODE_L32I_N;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 3)
+ return OPCODE_OR;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 6)
+ return OPCODE_XOR;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3881llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_GET_LLR_BUF;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 2)
+ return OPCODE_LCM_X;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 4)
+ return OPCODE_SCM;
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 5)
+ return OPCODE_SCM_XU;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3258llr_slot0_Slot_llr_slot0_get (insn) == 0 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_MOVI_N;
+ if (Field_dsp340050b49a6c_fld3259llr_slot0_Slot_llr_slot0_get (insn) == 2 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_AR2CM_LN;
+ switch (Field_dsp340050b49a6c_fld3260llr_slot0_Slot_llr_slot0_get (insn))
+ {
+ case 12:
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_AR2CM_DUP;
+ break;
+ case 13:
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_MOV_N;
+ break;
+ case 14:
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_PUSH128_PQ;
+ break;
+ case 15:
+ if (Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_PUSH32;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3261llr_slot0_Slot_llr_slot0_get (insn) == 4 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld3263llr_slot0_Slot_llr_slot0_get (insn) == 20 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld3264llr_slot0_Slot_llr_slot0_get (insn) == 21 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld3265llr_slot0_Slot_llr_slot0_get (insn) == 22 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld3266llr_slot0_Slot_llr_slot0_get (insn) == 23 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld3267llr_slot0_Slot_llr_slot0_get (insn) == 24 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld3268llr_slot0_Slot_llr_slot0_get (insn) == 25 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld3269llr_slot0_Slot_llr_slot0_get (insn) == 26 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld3270llr_slot0_Slot_llr_slot0_get (insn) == 43 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_SET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld3272llr_slot0_Slot_llr_slot0_get (insn) == 283 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld3274llr_slot0_Slot_llr_slot0_get (insn) == 315 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld3275llr_slot0_Slot_llr_slot0_get (insn) == 347 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld3276llr_slot0_Slot_llr_slot0_get (insn) == 379 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_WGHT;
+ if (Field_dsp340050b49a6c_fld3277llr_slot0_Slot_llr_slot0_get (insn) == 411 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld3278llr_slot0_Slot_llr_slot0_get (insn) == 443 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld3279llr_slot0_Slot_llr_slot0_get (insn) == 475 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld3280llr_slot0_Slot_llr_slot0_get (insn) == 507 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld3281llr_slot0_Slot_llr_slot0_get (insn) == 44 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld3282llr_slot0_Slot_llr_slot0_get (insn) == 269 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld3283llr_slot0_Slot_llr_slot0_get (insn) == 301 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld3284llr_slot0_Slot_llr_slot0_get (insn) == 173 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2041_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld3286llr_slot0_Slot_llr_slot0_get (insn) == 109 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3879llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3288llr_slot0_Slot_llr_slot0_get (insn) == 30 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld3289llr_slot0_Slot_llr_slot0_get (insn) == 135 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld3291llr_slot0_Slot_llr_slot0_get (insn) == 286 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld3292llr_slot0_Slot_llr_slot0_get (insn) == 287 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld3293llr_slot0_Slot_llr_slot0_get (insn) == 302 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld3294llr_slot0_Slot_llr_slot0_get (insn) == 303 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld3295llr_slot0_Slot_llr_slot0_get (insn) == 318 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld3296llr_slot0_Slot_llr_slot0_get (insn) == 319 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld3297llr_slot0_Slot_llr_slot0_get (insn) == 334 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld3298llr_slot0_Slot_llr_slot0_get (insn) == 335 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld3299llr_slot0_Slot_llr_slot0_get (insn) == 175 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld3300llr_slot0_Slot_llr_slot0_get (insn) == 95 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3893llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld3302llr_slot0_Slot_llr_slot0_get (insn) == 31 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 7 &&
+ Field_dsp340050b49a6c_fld3883llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld3303llr_slot0_Slot_llr_slot0_get (insn) == 0 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld3304llr_slot0_Slot_llr_slot0_get (insn) == 1 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3887llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld3305llr_slot0_Slot_llr_slot0_get (insn) == 17 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3887llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld3306llr_slot0_Slot_llr_slot0_get (insn) == 17 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3890llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_WGHT;
+ if (Field_dsp340050b49a6c_fld3308llr_slot0_Slot_llr_slot0_get (insn) == 17 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3892llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_POP16LLR_1;
+ if (Field_dsp340050b49a6c_fld3310llr_slot0_Slot_llr_slot0_get (insn) == 1 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3888llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld3311llr_slot0_Slot_llr_slot0_get (insn) == 1 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_r_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_PUSH2X128_PQ;
+ if (Field_dsp340050b49a6c_fld3312llr_slot0_Slot_llr_slot0_get (insn) == 1 &&
+ Field_op0_s20_Slot_llr_slot0_get (insn) == 8 &&
+ Field_dsp340050b49a6c_fld3885llr_slot0_Slot_llr_slot0_get (insn) == 0)
+ return OPCODE_SET_LLR_POS;
+ switch (Field_op0_s20_Slot_llr_slot0_get (insn))
+ {
+ case 9:
+ return OPCODE_LCM_U;
+ case 10:
+ return OPCODE_SCM_U;
+ case 11:
+ return OPCODE_SLLI;
+ case 12:
+ return OPCODE_SRAI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_dual_slot2_decode (const xtensa_insnbuf insn)
+{
+ if (Field_dsp340050b49a6c_fld2044_Slot_dual_slot2_get (insn) == 0 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_EXTUI;
+ if (Field_dsp340050b49a6c_fld2056_Slot_dual_slot2_get (insn) == 5 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3923dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ if (Field_dsp340050b49a6c_fld3313dual_slot2_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BEQI;
+ if (Field_dsp340050b49a6c_fld3314_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3904dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BNEZ;
+ if (Field_dsp340050b49a6c_fld3315dual_slot2_Slot_dual_slot2_get (insn) == 17 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BLTUI;
+ if (Field_dsp340050b49a6c_fld3316dual_slot2_Slot_dual_slot2_get (insn) == 33 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BNEI;
+ if (Field_dsp340050b49a6c_fld3317dual_slot2_Slot_dual_slot2_get (insn) == 769 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ADD32;
+ switch (Field_dsp340050b49a6c_fld3318_Slot_dual_slot2_get (insn))
+ {
+ case 2:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_BEQZ;
+ break;
+ case 4:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_BGEZ;
+ break;
+ case 6:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_s_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ASRM;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3319dual_slot2_Slot_dual_slot2_get (insn) == 785 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ADDCM;
+ if (Field_dsp340050b49a6c_fld3320dual_slot2_Slot_dual_slot2_get (insn) == 801 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ADDWRP;
+ if (Field_dsp340050b49a6c_fld3321dual_slot2_Slot_dual_slot2_get (insn) == 817 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ASR;
+ if (Field_dsp340050b49a6c_fld3322dual_slot2_Slot_dual_slot2_get (insn) == 833 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_AND128;
+ if (Field_dsp340050b49a6c_fld3323dual_slot2_Slot_dual_slot2_get (insn) == 849 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_EXTUI4;
+ if (Field_dsp340050b49a6c_fld3324dual_slot2_Slot_dual_slot2_get (insn) == 865 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_LUT;
+ if (Field_dsp340050b49a6c_fld3325dual_slot2_Slot_dual_slot2_get (insn) == 881 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_LUT_IEXT;
+ if (Field_dsp340050b49a6c_fld3326dual_slot2_Slot_dual_slot2_get (insn) == 897 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ASL;
+ if (Field_dsp340050b49a6c_fld3327dual_slot2_Slot_dual_slot2_get (insn) == 913 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_LUT_REXT;
+ if (Field_dsp340050b49a6c_fld3328dual_slot2_Slot_dual_slot2_get (insn) == 929 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MAX8;
+ if (Field_dsp340050b49a6c_fld3329dual_slot2_Slot_dual_slot2_get (insn) == 945 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MEAN32;
+ if (Field_dsp340050b49a6c_fld3330dual_slot2_Slot_dual_slot2_get (insn) == 961 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MEAN;
+ if (Field_dsp340050b49a6c_fld3331dual_slot2_Slot_dual_slot2_get (insn) == 977 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MIN8;
+ if (Field_dsp340050b49a6c_fld3332dual_slot2_Slot_dual_slot2_get (insn) == 993 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_OR128;
+ if (Field_dsp340050b49a6c_fld3333dual_slot2_Slot_dual_slot2_get (insn) == 1009 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SUB32;
+ if (Field_dsp340050b49a6c_fld3334dual_slot2_Slot_dual_slot2_get (insn) == 2 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BGEI;
+ if (Field_dsp340050b49a6c_fld3335dual_slot2_Slot_dual_slot2_get (insn) == 3 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SUBCM;
+ if (Field_dsp340050b49a6c_fld3336dual_slot2_Slot_dual_slot2_get (insn) == 19 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_XOR128;
+ if (Field_dsp340050b49a6c_fld3337dual_slot2_Slot_dual_slot2_get (insn) == 560 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ABS8;
+ if (Field_dsp340050b49a6c_fld3339dual_slot2_Slot_dual_slot2_get (insn) == 561 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld3340dual_slot2_Slot_dual_slot2_get (insn) == 562 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_LUT_PHASOR;
+ if (Field_dsp340050b49a6c_fld3341dual_slot2_Slot_dual_slot2_get (insn) == 563 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_NCO_UPDATE;
+ if (Field_dsp340050b49a6c_fld3342dual_slot2_Slot_dual_slot2_get (insn) == 525 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_LUT_WRITE;
+ if (Field_dsp340050b49a6c_fld3345dual_slot2_Slot_dual_slot2_get (insn) == 9268 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld3347dual_slot2_Slot_dual_slot2_get (insn) == 9269 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ if (Field_dsp340050b49a6c_fld3348dual_slot2_Slot_dual_slot2_get (insn) == 9270 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ switch (Field_dsp340050b49a6c_fld3349dual_slot2_Slot_dual_slot2_get (insn))
+ {
+ case 132151:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_1;
+ break;
+ case 136247:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_2;
+ break;
+ case 140343:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_3;
+ break;
+ case 144439:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_0;
+ break;
+ case 148535:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ break;
+ case 152631:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_1;
+ break;
+ case 156727:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_2;
+ break;
+ case 160823:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3350dual_slot2_Slot_dual_slot2_get (insn) == 42039 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3907dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_5;
+ if (Field_dsp340050b49a6c_fld3353dual_slot2_Slot_dual_slot2_get (insn) == 46135 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3907dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3354dual_slot2_Slot_dual_slot2_get (insn) == 2381 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_s4_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld3356dual_slot2_Slot_dual_slot2_get (insn) == 1229 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3924dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_0;
+ if (Field_dsp340050b49a6c_fld3358dual_slot2_Slot_dual_slot2_get (insn) == 333 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3918dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SET_HSAR;
+ if (Field_dsp340050b49a6c_fld3360dual_slot2_Slot_dual_slot2_get (insn) == 568 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld3361dual_slot2_Slot_dual_slot2_get (insn) == 569 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld3362dual_slot2_Slot_dual_slot2_get (insn) == 570 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_NOT128;
+ if (Field_dsp340050b49a6c_fld3363dual_slot2_Slot_dual_slot2_get (insn) == 8251 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_CLRCM;
+ switch (Field_dsp340050b49a6c_fld3364_Slot_dual_slot2_get (insn))
+ {
+ case 52:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ADD;
+ break;
+ case 53:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ADDX2;
+ break;
+ case 54:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ADDX4;
+ break;
+ case 55:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ADDX8;
+ break;
+ case 56:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ADDI_N;
+ break;
+ case 57:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_AND;
+ break;
+ case 58:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ANDB;
+ break;
+ case 59:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_LUT_AR;
+ break;
+ case 60:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_CLAMPS;
+ break;
+ case 61:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MAX;
+ break;
+ case 62:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MAXU;
+ break;
+ case 63:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MIN;
+ break;
+ case 112:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MINU;
+ break;
+ case 113:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MOVEQZ;
+ break;
+ case 114:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MOVGEZ;
+ break;
+ case 115:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MOVT;
+ break;
+ case 116:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MOVLTZ;
+ break;
+ case 117:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_OR;
+ break;
+ case 118:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ORB;
+ break;
+ case 119:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SEXT;
+ break;
+ case 120:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_MOVNEZ;
+ break;
+ case 121:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SRC;
+ break;
+ case 122:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SRLI;
+ break;
+ case 123:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SUBX2;
+ break;
+ case 124:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SUB;
+ break;
+ case 125:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SUBX4;
+ break;
+ case 126:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SUBX8;
+ break;
+ case 127:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_XOR;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3365dual_slot2_Slot_dual_slot2_get (insn) == 8507 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld3366dual_slot2_Slot_dual_slot2_get (insn) == 8763 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld3367dual_slot2_Slot_dual_slot2_get (insn) == 9019 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld3368dual_slot2_Slot_dual_slot2_get (insn) == 9275 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld3369dual_slot2_Slot_dual_slot2_get (insn) == 9531 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld3370dual_slot2_Slot_dual_slot2_get (insn) == 9787 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld3371dual_slot2_Slot_dual_slot2_get (insn) == 10043 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld3372dual_slot2_Slot_dual_slot2_get (insn) == 10299 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld3373dual_slot2_Slot_dual_slot2_get (insn) == 10555 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld3374dual_slot2_Slot_dual_slot2_get (insn) == 10811 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld3375dual_slot2_Slot_dual_slot2_get (insn) == 11067 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld3376dual_slot2_Slot_dual_slot2_get (insn) == 11323 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld3377dual_slot2_Slot_dual_slot2_get (insn) == 11579 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld3378dual_slot2_Slot_dual_slot2_get (insn) == 11835 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld3379dual_slot2_Slot_dual_slot2_get (insn) == 12091 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld3380dual_slot2_Slot_dual_slot2_get (insn) == 572 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_TRANS;
+ if (Field_dsp340050b49a6c_fld3381dual_slot2_Slot_dual_slot2_get (insn) == 573 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld3382dual_slot2_Slot_dual_slot2_get (insn) == 287 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3928dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld3384dual_slot2_Slot_dual_slot2_get (insn) == 771 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVAR2;
+ if (Field_dsp340050b49a6c_fld3385dual_slot2_Slot_dual_slot2_get (insn) == 787 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld3386dual_slot2_Slot_dual_slot2_get (insn) == 803 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SWAPB;
+ switch (Field_dsp340050b49a6c_fld3387dual_slot2_Slot_dual_slot2_get (insn))
+ {
+ case 13059:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SSA8L;
+ break;
+ case 13075:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SSL;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3388dual_slot2_Slot_dual_slot2_get (insn) == 6547 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3913dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_dsp340050b49a6c_fld3390dual_slot2_Slot_dual_slot2_get (insn) == 3283 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQ;
+ if (Field_dsp340050b49a6c_fld3392dual_slot2_Slot_dual_slot2_get (insn) == 1651 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3900_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ if (Field_dsp340050b49a6c_fld3394dual_slot2_Slot_dual_slot2_get (insn) == 211 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3920dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_AR2SAR_DUP;
+ if (Field_dsp340050b49a6c_fld3396dual_slot2_Slot_dual_slot2_get (insn) == 115 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3909dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_dsp340050b49a6c_fld3397dual_slot2_Slot_dual_slot2_get (insn) == 19 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3919dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld3399dual_slot2_Slot_dual_slot2_get (insn) == 19 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3931dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ if (Field_dsp340050b49a6c_fld3401dual_slot2_Slot_dual_slot2_get (insn) == 33 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3934dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SUBMEAN;
+ if (Field_dsp340050b49a6c_fld3403dual_slot2_Slot_dual_slot2_get (insn) == 41 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3922dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld3404dual_slot2_Slot_dual_slot2_get (insn) == 25 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3910dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_dsp340050b49a6c_fld3406dual_slot2_Slot_dual_slot2_get (insn) == 9 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3935dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SUBWRP;
+ if (Field_dsp340050b49a6c_fld3407_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3905dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVI;
+ if (Field_dsp340050b49a6c_fld3408dual_slot2_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3901dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BGEUI;
+ if (Field_dsp340050b49a6c_fld3410_Slot_dual_slot2_get (insn) == 3 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3929dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SETTIEP;
+ if (Field_dsp340050b49a6c_fld3411dual_slot2_Slot_dual_slot2_get (insn) == 5 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3933dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ASLM32;
+ if (Field_dsp340050b49a6c_fld3412dual_slot2_Slot_dual_slot2_get (insn) == 18 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3927dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_QREADY;
+ if (Field_dsp340050b49a6c_fld3413dual_slot2_Slot_dual_slot2_get (insn) == 19 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3930dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ if (Field_dsp340050b49a6c_fld3414dual_slot2_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_ADDMI;
+ if (Field_dsp340050b49a6c_fld3415dual_slot2_Slot_dual_slot2_get (insn) == 17 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BALL;
+ if (Field_dsp340050b49a6c_fld3416dual_slot2_Slot_dual_slot2_get (insn) == 33 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BANY;
+ if (Field_dsp340050b49a6c_fld3417dual_slot2_Slot_dual_slot2_get (insn) == 49 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BBC;
+ if (Field_dsp340050b49a6c_fld3418dual_slot2_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BBCI;
+ if (Field_dsp340050b49a6c_fld3419dual_slot2_Slot_dual_slot2_get (insn) == 18 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BBS;
+ if (Field_dsp340050b49a6c_fld3420dual_slot2_Slot_dual_slot2_get (insn) == 19 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BGE;
+ if (Field_dsp340050b49a6c_fld3421dual_slot2_Slot_dual_slot2_get (insn) == 34 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BEQ;
+ if (Field_dsp340050b49a6c_fld3422dual_slot2_Slot_dual_slot2_get (insn) == 35 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BGEU;
+ if (Field_dsp340050b49a6c_fld3423dual_slot2_Slot_dual_slot2_get (insn) == 50 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BLT;
+ if (Field_dsp340050b49a6c_fld3424dual_slot2_Slot_dual_slot2_get (insn) == 51 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BLTU;
+ if (Field_dsp340050b49a6c_fld3425dual_slot2_Slot_dual_slot2_get (insn) == 2 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BBSI;
+ if (Field_dsp340050b49a6c_fld3426dual_slot2_Slot_dual_slot2_get (insn) == 6 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BNALL;
+ if (Field_dsp340050b49a6c_fld3427dual_slot2_Slot_dual_slot2_get (insn) == 7 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_ASL32;
+ if (Field_dsp340050b49a6c_fld3428dual_slot2_Slot_dual_slot2_get (insn) == 23 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_ASR32;
+ if (Field_dsp340050b49a6c_fld3429dual_slot2_Slot_dual_slot2_get (insn) == 39 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_0;
+ if (Field_dsp340050b49a6c_fld3430dual_slot2_Slot_dual_slot2_get (insn) == 55 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_3;
+ if (Field_dsp340050b49a6c_fld3431dual_slot2_Slot_dual_slot2_get (insn) == 71 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_1;
+ if (Field_dsp340050b49a6c_fld3432dual_slot2_Slot_dual_slot2_get (insn) == 87 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_4;
+ if (Field_dsp340050b49a6c_fld3433dual_slot2_Slot_dual_slot2_get (insn) == 103 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_5;
+ if (Field_dsp340050b49a6c_fld3434dual_slot2_Slot_dual_slot2_get (insn) == 119 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_6;
+ if (Field_dsp340050b49a6c_fld3435dual_slot2_Slot_dual_slot2_get (insn) == 135 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_2;
+ if (Field_dsp340050b49a6c_fld3436dual_slot2_Slot_dual_slot2_get (insn) == 151 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND8_7;
+ if (Field_dsp340050b49a6c_fld3437dual_slot2_Slot_dual_slot2_get (insn) == 167 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_0;
+ if (Field_dsp340050b49a6c_fld3438dual_slot2_Slot_dual_slot2_get (insn) == 183 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_2;
+ if (Field_dsp340050b49a6c_fld3439dual_slot2_Slot_dual_slot2_get (insn) == 199 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_1;
+ if (Field_dsp340050b49a6c_fld3440dual_slot2_Slot_dual_slot2_get (insn) == 215 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_3;
+ if (Field_dsp340050b49a6c_fld3441dual_slot2_Slot_dual_slot2_get (insn) == 231 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_4;
+ if (Field_dsp340050b49a6c_fld3442dual_slot2_Slot_dual_slot2_get (insn) == 247 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_5;
+ if (Field_dsp340050b49a6c_fld3443dual_slot2_Slot_dual_slot2_get (insn) == 20 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BNE;
+ if (Field_dsp340050b49a6c_fld3444dual_slot2_Slot_dual_slot2_get (insn) == 261 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_6;
+ if (Field_dsp340050b49a6c_fld3445dual_slot2_Slot_dual_slot2_get (insn) == 277 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_PERM;
+ if (Field_dsp340050b49a6c_fld3446dual_slot2_Slot_dual_slot2_get (insn) == 1172 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_ADD16;
+ if (Field_dsp340050b49a6c_fld3448dual_slot2_Slot_dual_slot2_get (insn) == 2346 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3925dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld3450dual_slot2_Slot_dual_slot2_get (insn) == 2347 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ANY8;
+ if (Field_dsp340050b49a6c_fld3451dual_slot2_Slot_dual_slot2_get (insn) == 587 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3914dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ANY4;
+ switch (Field_dsp340050b49a6c_fld3453dual_slot2_Slot_dual_slot2_get (insn))
+ {
+ case 4869:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_NSA;
+ break;
+ case 4885:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_NSAU;
+ break;
+ case 4901:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_PUSH128_PQ;
+ break;
+ case 4917:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_SUBARX;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3454dual_slot2_Slot_dual_slot2_get (insn) == 2469 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3466_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_PUSH32;
+ if (Field_dsp340050b49a6c_fld3456dual_slot2_Slot_dual_slot2_get (insn) == 2485 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3466_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_WRTIEP;
+ if (Field_dsp340050b49a6c_fld3457dual_slot2_Slot_dual_slot2_get (insn) == 629 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3917dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SET_EXT_REGS;
+ if (Field_dsp340050b49a6c_fld3458dual_slot2_Slot_dual_slot2_get (insn) == 85 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3919dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ADDAR2;
+ if (Field_dsp340050b49a6c_fld3459dual_slot2_Slot_dual_slot2_get (insn) == 53 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3896dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOV_N;
+ if (Field_dsp340050b49a6c_fld3460dual_slot2_Slot_dual_slot2_get (insn) == 11 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3921dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_7;
+ if (Field_dsp340050b49a6c_fld3461dual_slot2_Slot_dual_slot2_get (insn) == 9 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BNONE;
+ if (Field_dsp340050b49a6c_fld3462dual_slot2_Slot_dual_slot2_get (insn) == 49 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3894dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ if (Field_dsp340050b49a6c_fld3464dual_slot2_Slot_dual_slot2_get (insn) == 53 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3894dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_dsp340050b49a6c_fld3465dual_slot2_Slot_dual_slot2_get (insn) == 29 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3895dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BNEZ_N;
+ if (Field_dsp340050b49a6c_fld3467dual_slot2_Slot_dual_slot2_get (insn) == 1 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3899dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ADDI;
+ if (Field_dsp340050b49a6c_fld3468dual_slot2_Slot_dual_slot2_get (insn) == 3 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3897dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ switch (Field_dsp340050b49a6c_fld3469dual_slot2_Slot_dual_slot2_get (insn))
+ {
+ case 24:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SLLI;
+ break;
+ case 25:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_SRAI;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3470dual_slot2_Slot_dual_slot2_get (insn) == 80 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_ASLM;
+ if (Field_dsp340050b49a6c_fld3471dual_slot2_Slot_dual_slot2_get (insn) == 81 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_CMP8;
+ if (Field_dsp340050b49a6c_fld3472dual_slot2_Slot_dual_slot2_get (insn) == 82 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_CMP_I;
+ if (Field_dsp340050b49a6c_fld3473dual_slot2_Slot_dual_slot2_get (insn) == 83 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_LSRM;
+ if (Field_dsp340050b49a6c_fld3474dual_slot2_Slot_dual_slot2_get (insn) == 84 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2)
+ return OPCODE_CMP_R;
+ if (Field_dsp340050b49a6c_fld3475dual_slot2_Slot_dual_slot2_get (insn) == 325 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3898dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld3477dual_slot2_Slot_dual_slot2_get (insn) == 341 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3898dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld3478dual_slot2_Slot_dual_slot2_get (insn) == 181 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3908dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_dsp340050b49a6c_fld3479dual_slot2_Slot_dual_slot2_get (insn) == 43 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3906dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld3480dual_slot2_Slot_dual_slot2_get (insn) == 22 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_s4_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_LSLM;
+ if (Field_dsp340050b49a6c_fld3481dual_slot2_Slot_dual_slot2_get (insn) == 23 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld3482dual_slot2_Slot_dual_slot2_get (insn) == 2 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3903dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BLTZ;
+ if (Field_dsp340050b49a6c_fld3484dual_slot2_Slot_dual_slot2_get (insn) == 3 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3916dual_slot2_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_XORB;
+ if (Field_r_Slot_dual_slot2_get (insn) == 0 &&
+ Field_op0_s21_Slot_dual_slot2_get (insn) == 1)
+ return OPCODE_BITFINS;
+ switch (Field_t_Slot_dual_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BITFEXT;
+ break;
+ case 8:
+ if (Field_op0_s21_Slot_dual_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3314_Slot_dual_slot2_get (insn) == 0)
+ return OPCODE_BLTI;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_dual_slot0_decode (const xtensa_insnbuf insn)
+{
+ if (Field_dsp340050b49a6c_fld2056_Slot_dual_slot0_get (insn) == 3 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_imm8_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_MOVCM2PQ;
+ switch (Field_dsp340050b49a6c_fld2057_Slot_dual_slot0_get (insn))
+ {
+ case 2842:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_DUP;
+ break;
+ case 2843:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MOV_N;
+ break;
+ case 2874:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_NSA;
+ break;
+ case 2906:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_NSAU;
+ break;
+ case 2970:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_PUSH128_PQ;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3487dual_slot0_Slot_dual_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BBCI;
+ break;
+ case 1:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BBSI;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3488dual_slot0_Slot_dual_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXTUI;
+ break;
+ case 3:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4 &&
+ Field_sae_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_ULT_S;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3489dual_slot0_Slot_dual_slot0_get (insn))
+ {
+ case 2:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_BEQZ;
+ break;
+ case 3:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_BNEZ;
+ break;
+ case 4:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_BGEZ;
+ break;
+ case 5:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld2079_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_ULE_S;
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LSI;
+ break;
+ case 6:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LSIU;
+ break;
+ case 7:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MOVI;
+ break;
+ case 8:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_BLTZ;
+ break;
+ case 9:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SSI;
+ break;
+ case 10:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SSIU;
+ break;
+ case 14:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2037_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_LCM_U;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3490dual_slot0_Slot_dual_slot0_get (insn) == 88 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC32_R;
+ if (Field_dsp340050b49a6c_fld3491dual_slot0_Slot_dual_slot0_get (insn) == 177 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC_IH;
+ if (Field_dsp340050b49a6c_fld3492dual_slot0_Slot_dual_slot0_get (insn) == 185 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC_RL;
+ if (Field_dsp340050b49a6c_fld3493dual_slot0_Slot_dual_slot0_get (insn) == 178 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC_IL;
+ if (Field_dsp340050b49a6c_fld3494dual_slot0_Slot_dual_slot0_get (insn) == 355 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC2X64_0;
+ if (Field_dsp340050b49a6c_fld3496dual_slot0_Slot_dual_slot0_get (insn) == 371 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC2X64_2;
+ if (Field_dsp340050b49a6c_fld3497dual_slot0_Slot_dual_slot0_get (insn) == 362 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC2X64_1;
+ if (Field_dsp340050b49a6c_fld3498dual_slot0_Slot_dual_slot0_get (insn) == 363 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LAC2X64_3;
+ if (Field_dsp340050b49a6c_fld3499dual_slot0_Slot_dual_slot0_get (insn) == 5750 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld3500dual_slot0_Slot_dual_slot0_get (insn) == 11502 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ANY4;
+ if (Field_dsp340050b49a6c_fld3502dual_slot0_Slot_dual_slot0_get (insn) == 23006 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ANY8;
+ if (Field_dsp340050b49a6c_fld3504dual_slot0_Slot_dual_slot0_get (insn) == 23007 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld3505dual_slot0_Slot_dual_slot0_get (insn) == 11628 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_PUSH2X128_PQ;
+ switch (Field_dsp340050b49a6c_fld3506dual_slot0_Slot_dual_slot0_get (insn))
+ {
+ case 23258:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2PQ_1;
+ break;
+ case 23259:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2PQ_3;
+ break;
+ case 23260:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2PQ_2;
+ break;
+ case 23261:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2PQ_4;
+ break;
+ case 23262:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2PQ_5;
+ break;
+ case 23263:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SSAI;
+ break;
+ case 23504:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2PQ_0;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3507dual_slot0_Slot_dual_slot0_get (insn))
+ {
+ case 47010:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_01;
+ break;
+ case 47011:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_23;
+ break;
+ case 47012:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_03;
+ break;
+ case 47013:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_LLR_POS;
+ break;
+ case 47014:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_PERM_REG;
+ break;
+ case 47015:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_PHASOR_N;
+ break;
+ case 47016:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_21;
+ break;
+ case 47017:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_PHASOR_OFFSET;
+ break;
+ case 47018:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_SCALE_REG;
+ break;
+ case 47019:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_SOV;
+ break;
+ case 47020:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_SMOD_POS;
+ break;
+ case 47021:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_WGHT;
+ break;
+ case 47022:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SSA8B;
+ break;
+ case 47023:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SSA8L;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3508dual_slot0_Slot_dual_slot0_get (insn) == 47024 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld3509dual_slot0_Slot_dual_slot0_get (insn) == 47025 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld3510dual_slot0_Slot_dual_slot0_get (insn) == 47026 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld3511dual_slot0_Slot_dual_slot0_get (insn) == 47027 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld3512dual_slot0_Slot_dual_slot0_get (insn) == 47028 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld3513dual_slot0_Slot_dual_slot0_get (insn) == 47029 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld3514dual_slot0_Slot_dual_slot0_get (insn) == 47030 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld3515dual_slot0_Slot_dual_slot0_get (insn) == 47031 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld3516dual_slot0_Slot_dual_slot0_get (insn) == 47032 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld3517dual_slot0_Slot_dual_slot0_get (insn) == 47033 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld3518dual_slot0_Slot_dual_slot0_get (insn) == 23517 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_s8_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld3519dual_slot0_Slot_dual_slot0_get (insn) == 11759 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3939dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3520dual_slot0_Slot_dual_slot0_get (insn) == 5942 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2056_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_dsp340050b49a6c_fld3522dual_slot0_Slot_dual_slot0_get (insn) == 5943 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3950dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP16LLR_1;
+ if (Field_dsp340050b49a6c_fld3523dual_slot0_Slot_dual_slot0_get (insn) == 1501 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_bbi_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_dsp340050b49a6c_fld3524dual_slot0_Slot_dual_slot0_get (insn) == 765 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3943dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_CLRAC;
+ if (Field_dsp340050b49a6c_fld3527dual_slot0_Slot_dual_slot0_get (insn) == 45 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_LAC_RH;
+ switch (Field_dsp340050b49a6c_fld3529dual_slot0_Slot_dual_slot0_get (insn))
+ {
+ case 707:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_LN;
+ break;
+ case 711:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_AR2CM_LN_I;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3530dual_slot0_Slot_dual_slot0_get (insn) == 359 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2072_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_AR2CM_LN_R;
+ switch (Field_dsp340050b49a6c_fld3531_Slot_dual_slot0_get (insn))
+ {
+ case 120:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SAC2X32;
+ break;
+ case 121:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SAC32_R;
+ break;
+ case 122:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SCM_PINC;
+ break;
+ case 123:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SLLI;
+ break;
+ case 124:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SCM_U;
+ break;
+ case 125:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SRAI;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3532dual_slot0_Slot_dual_slot0_get (insn) == 183 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_dsp340050b49a6c_fld3533dual_slot0_Slot_dual_slot0_get (insn) == 95 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3936dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BNEZ_N;
+ if (Field_dsp340050b49a6c_fld3535dual_slot0_Slot_dual_slot0_get (insn) == 48 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2066_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_LAC2X32;
+ if (Field_dsp340050b49a6c_fld3536dual_slot0_Slot_dual_slot0_get (insn) == 193 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2060_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_0;
+ if (Field_dsp340050b49a6c_fld3537dual_slot0_Slot_dual_slot0_get (insn) == 197 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2060_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_2;
+ if (Field_dsp340050b49a6c_fld3538dual_slot0_Slot_dual_slot0_get (insn) == 101 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3957dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_3;
+ if (Field_dsp340050b49a6c_fld3539dual_slot0_Slot_dual_slot0_get (insn) == 25 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3954dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP128_2M_1;
+ if (Field_dsp340050b49a6c_fld3541dual_slot0_Slot_dual_slot0_get (insn) == 104 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LCM_PINC;
+ if (Field_dsp340050b49a6c_fld3542dual_slot0_Slot_dual_slot0_get (insn) == 105 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LP;
+ if (Field_dsp340050b49a6c_fld3543dual_slot0_Slot_dual_slot0_get (insn) == 106 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_LQ;
+ if (Field_dsp340050b49a6c_fld3544dual_slot0_Slot_dual_slot0_get (insn) == 3331 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ABS;
+ if (Field_dsp340050b49a6c_fld3545dual_slot0_Slot_dual_slot0_get (insn) == 3339 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld3546dual_slot0_Slot_dual_slot0_get (insn) == 3347 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld3547dual_slot0_Slot_dual_slot0_get (insn) == 3355 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld3548dual_slot0_Slot_dual_slot0_get (insn) == 3363 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MOV_I;
+ if (Field_dsp340050b49a6c_fld3549dual_slot0_Slot_dual_slot0_get (insn) == 3371 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SRA;
+ if (Field_dsp340050b49a6c_fld3550dual_slot0_Slot_dual_slot0_get (insn) == 3379 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SRL;
+ if (Field_dsp340050b49a6c_fld3551dual_slot0_Slot_dual_slot0_get (insn) == 6763 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_CLB_C;
+ switch (Field_dsp340050b49a6c_fld3552_Slot_dual_slot0_get (insn))
+ {
+ case 146:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_EXT;
+ break;
+ case 147:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_PUSH128_M;
+ break;
+ case 148:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_EXT_R;
+ break;
+ case 149:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC2X64_0;
+ break;
+ case 150:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC2X64_1;
+ break;
+ case 151:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC2X64_2;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3553dual_slot0_Slot_dual_slot0_get (insn) == 6779 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_CLB_R;
+ if (Field_dsp340050b49a6c_fld3554dual_slot0_Slot_dual_slot0_get (insn) == 3395 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MOV_R;
+ if (Field_dsp340050b49a6c_fld3555dual_slot0_Slot_dual_slot0_get (insn) == 6795 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MINCLB_C;
+ if (Field_dsp340050b49a6c_fld3556dual_slot0_Slot_dual_slot0_get (insn) == 6811 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SMINCLB_C;
+ if (Field_dsp340050b49a6c_fld3557dual_slot0_Slot_dual_slot0_get (insn) == 6819 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_MINCLB_R;
+ if (Field_dsp340050b49a6c_fld3558dual_slot0_Slot_dual_slot0_get (insn) == 6835 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SMINCLB_R;
+ if (Field_dsp340050b49a6c_fld3559dual_slot0_Slot_dual_slot0_get (insn) == 6827 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_LLR_BUF;
+ if (Field_dsp340050b49a6c_fld3560dual_slot0_Slot_dual_slot0_get (insn) == 54555 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_INTERP_EXT_L;
+ if (Field_dsp340050b49a6c_fld3562dual_slot0_Slot_dual_slot0_get (insn) == 54587 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_INTERP_EXT_N;
+ if (Field_dsp340050b49a6c_fld3563dual_slot0_Slot_dual_slot0_get (insn) == 54619 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_LLR_POS;
+ if (Field_dsp340050b49a6c_fld3564dual_slot0_Slot_dual_slot0_get (insn) == 54651 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_PHASOR_N;
+ if (Field_dsp340050b49a6c_fld3565dual_slot0_Slot_dual_slot0_get (insn) == 54683 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_PERM_REG;
+ if (Field_dsp340050b49a6c_fld3566dual_slot0_Slot_dual_slot0_get (insn) == 54715 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_PHASOR_OFFSET;
+ if (Field_dsp340050b49a6c_fld3567dual_slot0_Slot_dual_slot0_get (insn) == 54747 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_SCALE_REG;
+ if (Field_dsp340050b49a6c_fld3568dual_slot0_Slot_dual_slot0_get (insn) == 54779 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_SMOD_POS;
+ if (Field_dsp340050b49a6c_fld3569dual_slot0_Slot_dual_slot0_get (insn) == 6851 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld3570dual_slot0_Slot_dual_slot0_get (insn) == 54795 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_SOV;
+ if (Field_dsp340050b49a6c_fld3571dual_slot0_Slot_dual_slot0_get (insn) == 54827 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld3572dual_slot0_Slot_dual_slot0_get (insn) == 54859 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld3573dual_slot0_Slot_dual_slot0_get (insn) == 54891 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld3574dual_slot0_Slot_dual_slot0_get (insn) == 54923 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld3575dual_slot0_Slot_dual_slot0_get (insn) == 54955 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_SET_SMOD_OFFSET_TABLE;
+ if (Field_dsp340050b49a6c_fld3576dual_slot0_Slot_dual_slot0_get (insn) == 109771 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ADDAC_I2R;
+ if (Field_dsp340050b49a6c_fld3577dual_slot0_Slot_dual_slot0_get (insn) == 109803 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ADDAC_R2I;
+ if (Field_dsp340050b49a6c_fld3578dual_slot0_Slot_dual_slot0_get (insn) == 110027 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_REDAC;
+ if (Field_dsp340050b49a6c_fld3579dual_slot0_Slot_dual_slot0_get (insn) == 110059 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_REDAC2;
+ if (Field_dsp340050b49a6c_fld3580dual_slot0_Slot_dual_slot0_get (insn) == 54803 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_GET_WGHT;
+ if (Field_dsp340050b49a6c_fld3581dual_slot0_Slot_dual_slot0_get (insn) == 54811 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_REDAC4;
+ if (Field_dsp340050b49a6c_fld3582dual_slot0_Slot_dual_slot0_get (insn) == 27419 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3959dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_REDACS;
+ if (Field_dsp340050b49a6c_fld3583dual_slot0_Slot_dual_slot0_get (insn) == 13723 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3960dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SUBAC_I2R;
+ switch (Field_dsp340050b49a6c_fld3584_Slot_dual_slot0_get (insn))
+ {
+ case 2:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3937dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BLTUI;
+ break;
+ case 3:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3552_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_ASLACM;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3585dual_slot0_Slot_dual_slot0_get (insn) == 6875 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3961dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SUBAC_R2I;
+ if (Field_dsp340050b49a6c_fld3587dual_slot0_Slot_dual_slot0_get (insn) == 1723 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3610_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld3588dual_slot0_Slot_dual_slot0_get (insn) == 219 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3938dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_dsp340050b49a6c_fld3589dual_slot0_Slot_dual_slot0_get (insn) == 106 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_s8_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_LCM;
+ if (Field_dsp340050b49a6c_fld3590dual_slot0_Slot_dual_slot0_get (insn) == 3331 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT32_I;
+ if (Field_dsp340050b49a6c_fld3591dual_slot0_Slot_dual_slot0_get (insn) == 3335 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT32_R;
+ if (Field_dsp340050b49a6c_fld3592dual_slot0_Slot_dual_slot0_get (insn) == 3339 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT_2FIFO_0;
+ if (Field_dsp340050b49a6c_fld3593dual_slot0_Slot_dual_slot0_get (insn) == 3343 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT_R2FIFO_0;
+ if (Field_dsp340050b49a6c_fld3594dual_slot0_Slot_dual_slot0_get (insn) == 3363 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT_2FIFO_1;
+ if (Field_dsp340050b49a6c_fld3595dual_slot0_Slot_dual_slot0_get (insn) == 3367 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT_R2FIFO_1;
+ if (Field_dsp340050b49a6c_fld3596dual_slot0_Slot_dual_slot0_get (insn) == 3371 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT_R2FIFO_2;
+ if (Field_dsp340050b49a6c_fld3597dual_slot0_Slot_dual_slot0_get (insn) == 3375 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_EXT_R2FIFO_3;
+ if (Field_dsp340050b49a6c_fld3598dual_slot0_Slot_dual_slot0_get (insn) == 851 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3951dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_2;
+ if (Field_dsp340050b49a6c_fld3599dual_slot0_Slot_dual_slot0_get (insn) == 855 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3941dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SET_SMOD_BUF;
+ if (Field_dsp340050b49a6c_fld3600dual_slot0_Slot_dual_slot0_get (insn) == 219 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3952dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_EXT_2FIFO_3;
+ if (Field_dsp340050b49a6c_fld3601dual_slot0_Slot_dual_slot0_get (insn) == 213 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3945dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ if (Field_dsp340050b49a6c_fld3603dual_slot0_Slot_dual_slot0_get (insn) == 215 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3945dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN_I;
+ if (Field_dsp340050b49a6c_fld3604dual_slot0_Slot_dual_slot0_get (insn) == 111 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3946dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_CM2AR_LN_R;
+ if (Field_dsp340050b49a6c_fld3606dual_slot0_Slot_dual_slot0_get (insn) == 5 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_ABS_S;
+ if (Field_dsp340050b49a6c_fld3607dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_MOV_S;
+ if (Field_dsp340050b49a6c_fld3608dual_slot0_Slot_dual_slot0_get (insn) == 37 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_NEG_S;
+ if (Field_dsp340050b49a6c_fld3609dual_slot0_Slot_dual_slot0_get (insn) == 106 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_MOVAC;
+ if (Field_dsp340050b49a6c_fld3611dual_slot0_Slot_dual_slot0_get (insn) == 107 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SWAPAC_RI;
+ if (Field_dsp340050b49a6c_fld3612dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_PUSH32;
+ if (Field_dsp340050b49a6c_fld3613dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3936dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_RFR;
+ if (Field_dsp340050b49a6c_fld3614dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2079_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_dsp340050b49a6c_fld3615dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3958dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_ASRAC;
+ if (Field_dsp340050b49a6c_fld3616dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3947dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_MOV2AC32_I;
+ if (Field_dsp340050b49a6c_fld3618dual_slot0_Slot_dual_slot0_get (insn) == 21 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3949dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_MOV2AC32_R;
+ if (Field_dsp340050b49a6c_fld3619_Slot_dual_slot0_get (insn) == 1 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld3940dual_slot0_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_FLOOR_S;
+ if (Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_get (insn) == 1 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 3 &&
+ Field_dsp340050b49a6c_fld2048_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_L32I;
+ if (Field_dsp340050b49a6c_fld3621dual_slot0_Slot_dual_slot0_get (insn) == 42 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC_IH;
+ if (Field_dsp340050b49a6c_fld3622dual_slot0_Slot_dual_slot0_get (insn) == 43 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC_RH;
+ if (Field_dsp340050b49a6c_fld3623dual_slot0_Slot_dual_slot0_get (insn) == 44 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC_IL;
+ if (Field_dsp340050b49a6c_fld3624dual_slot0_Slot_dual_slot0_get (insn) == 45 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SAC_RL;
+ if (Field_dsp340050b49a6c_fld3625dual_slot0_Slot_dual_slot0_get (insn) == 38 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_dsp340050b49a6c_fld3626dual_slot0_Slot_dual_slot0_get (insn) == 39 &&
+ Field_op0_s23_Slot_dual_slot0_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_SAC2X64_3;
+ switch (Field_imm8_Slot_dual_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_ADDX4;
+ break;
+ case 1:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_ADDX8;
+ break;
+ case 2:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_AND;
+ break;
+ case 3:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_L32I_N;
+ break;
+ case 4:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_ANDB;
+ break;
+ case 5:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LCM_PINC_X;
+ break;
+ case 6:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LCM_X;
+ break;
+ case 7:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LCM_XU;
+ break;
+ case 8:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_CEIL_S;
+ break;
+ case 9:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LP_X;
+ break;
+ case 10:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LQ_X;
+ break;
+ case 11:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LUT0;
+ break;
+ case 12:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LSXU;
+ break;
+ case 13:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LUT1;
+ break;
+ case 14:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LUT2;
+ break;
+ case 15:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_LUT3;
+ break;
+ case 16:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_CLAMPS;
+ break;
+ case 17:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MADD_S;
+ break;
+ case 18:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MAX;
+ break;
+ case 19:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MINU;
+ break;
+ case 20:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MAXU;
+ break;
+ case 21:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVEQZ;
+ break;
+ case 22:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVEQZ_S;
+ break;
+ case 23:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVF_S;
+ break;
+ case 24:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MIN;
+ break;
+ case 25:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVGEZ;
+ break;
+ case 26:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVGEZ_S;
+ break;
+ case 27:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVLTZ_S;
+ break;
+ case 28:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVLTZ;
+ break;
+ case 29:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVNEZ;
+ break;
+ case 30:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVNEZ_S;
+ break;
+ case 31:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVT;
+ break;
+ case 32:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_COMB_AR;
+ break;
+ case 33:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MOVT_S;
+ break;
+ case 34:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MSUB_S;
+ break;
+ case 35:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_OLT_S;
+ break;
+ case 36:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_MUL_S;
+ break;
+ case 37:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_OR;
+ break;
+ case 38:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_ORB;
+ break;
+ case 39:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_ROUND_S;
+ break;
+ case 40:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_OEQ_S;
+ break;
+ case 41:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_S32I_N;
+ break;
+ case 46:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SCM;
+ break;
+ case 47:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SCM_PINC_X;
+ break;
+ case 48:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_OLE_S;
+ break;
+ case 49:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SCM_X;
+ break;
+ case 50:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SCM_XU;
+ break;
+ case 51:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SRLI;
+ break;
+ case 52:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SEXT;
+ break;
+ case 53:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SSX;
+ break;
+ case 54:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SSXU;
+ break;
+ case 55:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_STORE_P;
+ break;
+ case 56:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SRC;
+ break;
+ case 57:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_STORE_Q;
+ break;
+ case 58:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_STSWAPBM;
+ break;
+ case 59:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SUB;
+ break;
+ case 60:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_STSWAPBMU;
+ break;
+ case 61:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SUB_S;
+ break;
+ case 62:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SUBX2;
+ break;
+ case 63:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SUBX4;
+ break;
+ case 64:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_FLOAT_S;
+ break;
+ case 65:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_SUBX8;
+ break;
+ case 66:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_TRUNC_S;
+ break;
+ case 67:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_UN_S;
+ break;
+ case 68:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_UEQ_S;
+ break;
+ case 69:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_UTRUNC_S;
+ break;
+ case 70:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_XOR;
+ break;
+ case 71:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_XORB;
+ break;
+ case 72:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 4)
+ return OPCODE_UFLOAT_S;
+ break;
+ case 252:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ADD;
+ break;
+ case 253:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ADD_S;
+ break;
+ case 254:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ADDI_N;
+ break;
+ case 255:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 1)
+ return OPCODE_ADDX2;
+ break;
+ }
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 5)
+ return OPCODE_L32R;
+ switch (Field_r_Slot_dual_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_BNE;
+ break;
+ case 1:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_BNONE;
+ break;
+ case 2:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_L16SI;
+ break;
+ case 3:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_L8UI;
+ break;
+ case 4:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_ADDI;
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_L16UI;
+ break;
+ case 5:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BALL;
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_S16I;
+ break;
+ case 6:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BANY;
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_S32I;
+ break;
+ case 7:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BBC;
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 3)
+ return OPCODE_S8I;
+ break;
+ case 8:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_ADDMI;
+ break;
+ case 9:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BBS;
+ break;
+ case 10:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BEQ;
+ break;
+ case 11:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BGEU;
+ break;
+ case 12:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BGE;
+ break;
+ case 13:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BLT;
+ break;
+ case 14:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BLTU;
+ break;
+ case 15:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 0)
+ return OPCODE_BNALL;
+ break;
+ }
+ switch (Field_t_Slot_dual_slot0_get (insn))
+ {
+ case 0:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_BEQI;
+ break;
+ case 1:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_BGEI;
+ break;
+ case 2:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_BGEUI;
+ break;
+ case 3:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_BNEI;
+ break;
+ case 4:
+ if (Field_op0_s23_Slot_dual_slot0_get (insn) == 2)
+ return OPCODE_BLTI;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_gp_slot1_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2394gp_slot1_Slot_gp_slot1_get (insn))
+ {
+ case 0:
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_CMAC;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 1)
+ return OPCODE_CMPY;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 2)
+ return OPCODE_MAC;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 3)
+ return OPCODE_MACS;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 4)
+ return OPCODE_MACXP_1;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 5)
+ return OPCODE_MACXP_3;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 6)
+ return OPCODE_MPY8;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 7)
+ return OPCODE_MPYXP_0;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 8)
+ return OPCODE_MPYXP_2;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 9)
+ return OPCODE_NORMACD;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 10)
+ return OPCODE_RCMAC;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 11)
+ return OPCODE_RMAC;
+ break;
+ case 1:
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_CMACS;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 1)
+ return OPCODE_CMPYS;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 2)
+ return OPCODE_MAC8;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 3)
+ return OPCODE_MACXP_0;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 4)
+ return OPCODE_MACXP_2;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 5)
+ return OPCODE_MPY;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 6)
+ return OPCODE_MPYS;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 7)
+ return OPCODE_MPYXP_1;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 8)
+ return OPCODE_MPYXP_3;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 9)
+ return OPCODE_NORMD;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 10)
+ return OPCODE_RCMPY;
+ if (Field_op0_s4_Slot_gp_slot1_get (insn) == 11)
+ return OPCODE_RMPY;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2395gp_slot1_Slot_gp_slot1_get (insn) == 0 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12)
+ return OPCODE_ADD2AC;
+ if (Field_dsp340050b49a6c_fld2397gp_slot1_Slot_gp_slot1_get (insn) == 1 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12)
+ return OPCODE_MOV2AC;
+ if (Field_dsp340050b49a6c_fld2398gp_slot1_Slot_gp_slot1_get (insn) == 2 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12)
+ return OPCODE_SUB2AC;
+ if (Field_dsp340050b49a6c_fld2399gp_slot1_Slot_gp_slot1_get (insn) == 3 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12 &&
+ Field_dsp340050b49a6c_fld3681gp_slot1_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2400gp_slot1_Slot_gp_slot1_get (insn) == 8 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12 &&
+ Field_dsp340050b49a6c_fld3683gp_slot1_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_NORMACPQ_I;
+ if (Field_dsp340050b49a6c_fld2402gp_slot1_Slot_gp_slot1_get (insn) == 9 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12 &&
+ Field_dsp340050b49a6c_fld3683gp_slot1_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_NORMACPQ_R;
+ if (Field_dsp340050b49a6c_fld2403gp_slot1_Slot_gp_slot1_get (insn) == 5 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12 &&
+ Field_dsp340050b49a6c_fld3684gp_slot1_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_NORMPYPQ_I;
+ if (Field_dsp340050b49a6c_fld2405gp_slot1_Slot_gp_slot1_get (insn) == 3 &&
+ Field_op0_s4_Slot_gp_slot1_get (insn) == 12 &&
+ Field_dsp340050b49a6c_fld3686gp_slot1_Slot_gp_slot1_get (insn) == 0)
+ return OPCODE_NORMPYPQ_R;
+ switch (Field_op0_s4_Slot_gp_slot1_get (insn))
+ {
+ case 13:
+ return OPCODE_CMPY2CM;
+ case 14:
+ return OPCODE_LIN_INT;
+ case 15:
+ return OPCODE_MPY2CM;
+ case 16:
+ return OPCODE_MPYADD8_2CM;
+ case 17:
+ return OPCODE_RCMPY2CM;
+ case 18:
+ return OPCODE_RMPY2CM;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_dot_slot2_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2049_Slot_dot_slot2_get (insn))
+ {
+ case 2:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_t_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_OR128;
+ break;
+ case 3:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_PERM;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2532dot_slot2_Slot_dot_slot2_get (insn) == 0 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ADD32;
+ if (Field_dsp340050b49a6c_fld2533dot_slot2_Slot_dot_slot2_get (insn) == 1 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ADDCM;
+ if (Field_dsp340050b49a6c_fld2534dot_slot2_Slot_dot_slot2_get (insn) == 2 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ADDWRP;
+ if (Field_dsp340050b49a6c_fld2535dot_slot2_Slot_dot_slot2_get (insn) == 3 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASR;
+ if (Field_dsp340050b49a6c_fld2536dot_slot2_Slot_dot_slot2_get (insn) == 4 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_AND128;
+ if (Field_dsp340050b49a6c_fld2537dot_slot2_Slot_dot_slot2_get (insn) == 5 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASR32;
+ if (Field_dsp340050b49a6c_fld2538dot_slot2_Slot_dot_slot2_get (insn) == 6 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_EXTUI4;
+ if (Field_dsp340050b49a6c_fld2539dot_slot2_Slot_dot_slot2_get (insn) == 7 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LUT;
+ if (Field_dsp340050b49a6c_fld2540dot_slot2_Slot_dot_slot2_get (insn) == 8 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASL;
+ if (Field_dsp340050b49a6c_fld2541dot_slot2_Slot_dot_slot2_get (insn) == 9 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LUT_IEXT;
+ if (Field_dsp340050b49a6c_fld2542dot_slot2_Slot_dot_slot2_get (insn) == 10 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LUT_REXT;
+ if (Field_dsp340050b49a6c_fld2543dot_slot2_Slot_dot_slot2_get (insn) == 11 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MEAN;
+ if (Field_dsp340050b49a6c_fld2544dot_slot2_Slot_dot_slot2_get (insn) == 12 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MAX8;
+ if (Field_dsp340050b49a6c_fld2545dot_slot2_Slot_dot_slot2_get (insn) == 13 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MEAN32;
+ if (Field_dsp340050b49a6c_fld2546dot_slot2_Slot_dot_slot2_get (insn) == 14 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MIN8;
+ if (Field_dsp340050b49a6c_fld2547dot_slot2_Slot_dot_slot2_get (insn) == 15 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOV2CM2PQ;
+ if (Field_dsp340050b49a6c_fld2548dot_slot2_Slot_dot_slot2_get (insn) == 16 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASL32;
+ if (Field_dsp340050b49a6c_fld2549dot_slot2_Slot_dot_slot2_get (insn) == 17 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_0;
+ if (Field_dsp340050b49a6c_fld2550dot_slot2_Slot_dot_slot2_get (insn) == 18 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_1;
+ if (Field_dsp340050b49a6c_fld2551dot_slot2_Slot_dot_slot2_get (insn) == 19 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_4;
+ if (Field_dsp340050b49a6c_fld2552dot_slot2_Slot_dot_slot2_get (insn) == 20 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_2;
+ if (Field_dsp340050b49a6c_fld2553dot_slot2_Slot_dot_slot2_get (insn) == 21 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_5;
+ if (Field_dsp340050b49a6c_fld2554dot_slot2_Slot_dot_slot2_get (insn) == 22 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_6;
+ if (Field_dsp340050b49a6c_fld2555dot_slot2_Slot_dot_slot2_get (insn) == 23 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_7;
+ if (Field_dsp340050b49a6c_fld2556dot_slot2_Slot_dot_slot2_get (insn) == 24 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_3;
+ if (Field_dsp340050b49a6c_fld2557dot_slot2_Slot_dot_slot2_get (insn) == 25 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_0;
+ if (Field_dsp340050b49a6c_fld2558dot_slot2_Slot_dot_slot2_get (insn) == 26 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_1;
+ if (Field_dsp340050b49a6c_fld2559dot_slot2_Slot_dot_slot2_get (insn) == 27 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_3;
+ if (Field_dsp340050b49a6c_fld2560dot_slot2_Slot_dot_slot2_get (insn) == 28 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_2;
+ if (Field_dsp340050b49a6c_fld2561dot_slot2_Slot_dot_slot2_get (insn) == 29 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_4;
+ if (Field_dsp340050b49a6c_fld2562dot_slot2_Slot_dot_slot2_get (insn) == 30 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_5;
+ if (Field_dsp340050b49a6c_fld2563dot_slot2_Slot_dot_slot2_get (insn) == 31 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_6;
+ if (Field_dsp340050b49a6c_fld2564dot_slot2_Slot_dot_slot2_get (insn) == 32 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASLM;
+ if (Field_dsp340050b49a6c_fld2565dot_slot2_Slot_dot_slot2_get (insn) == 33 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASRM;
+ if (Field_dsp340050b49a6c_fld2566dot_slot2_Slot_dot_slot2_get (insn) == 34 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CMP8;
+ if (Field_dsp340050b49a6c_fld2567dot_slot2_Slot_dot_slot2_get (insn) == 35 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LSRM;
+ if (Field_dsp340050b49a6c_fld2568dot_slot2_Slot_dot_slot2_get (insn) == 36 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CMP_I;
+ if (Field_dsp340050b49a6c_fld2569dot_slot2_Slot_dot_slot2_get (insn) == 517 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ABS8;
+ if (Field_dsp340050b49a6c_fld2571dot_slot2_Slot_dot_slot2_get (insn) == 533 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld2572dot_slot2_Slot_dot_slot2_get (insn) == 549 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LUT_PHASOR;
+ if (Field_dsp340050b49a6c_fld2573dot_slot2_Slot_dot_slot2_get (insn) == 565 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld2574dot_slot2_Slot_dot_slot2_get (insn) == 581 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld2575dot_slot2_Slot_dot_slot2_get (insn) == 597 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_NOT128;
+ if (Field_dsp340050b49a6c_fld2576dot_slot2_Slot_dot_slot2_get (insn) == 613 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_TRANS;
+ if (Field_dsp340050b49a6c_fld2577dot_slot2_Slot_dot_slot2_get (insn) == 629 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2578_Slot_dot_slot2_get (insn) == 0 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_LUT_AR;
+ if (Field_dsp340050b49a6c_fld2579dot_slot2_Slot_dot_slot2_get (insn) == 1653 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2580dot_slot2_Slot_dot_slot2_get (insn) == 2677 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld2581dot_slot2_Slot_dot_slot2_get (insn) == 3701 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld2582dot_slot2_Slot_dot_slot2_get (insn) == 4725 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld2583dot_slot2_Slot_dot_slot2_get (insn) == 5749 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld2584dot_slot2_Slot_dot_slot2_get (insn) == 6773 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2585dot_slot2_Slot_dot_slot2_get (insn) == 7797 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2586dot_slot2_Slot_dot_slot2_get (insn) == 8821 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld2587dot_slot2_Slot_dot_slot2_get (insn) == 9845 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2588dot_slot2_Slot_dot_slot2_get (insn) == 10869 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2589dot_slot2_Slot_dot_slot2_get (insn) == 11893 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2590dot_slot2_Slot_dot_slot2_get (insn) == 12917 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_4;
+ switch (Field_dsp340050b49a6c_fld2591dot_slot2_Slot_dot_slot2_get (insn))
+ {
+ case 221301:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_0;
+ break;
+ case 221557:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_2;
+ break;
+ case 221813:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_3;
+ break;
+ case 222069:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_0;
+ break;
+ case 222325:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ break;
+ case 222581:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_1;
+ break;
+ case 222837:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_2;
+ break;
+ case 223093:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2592dot_slot2_Slot_dot_slot2_get (insn) == 55925 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3708dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_5;
+ if (Field_dsp340050b49a6c_fld2595dot_slot2_Slot_dot_slot2_get (insn) == 56181 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3708dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld2596dot_slot2_Slot_dot_slot2_get (insn) == 7797 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3724dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_1;
+ if (Field_dsp340050b49a6c_fld2598dot_slot2_Slot_dot_slot2_get (insn) == 645 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_NCO_UPDATE;
+ if (Field_dsp340050b49a6c_fld2599dot_slot2_Slot_dot_slot2_get (insn) == 2197 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3709dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld2601dot_slot2_Slot_dot_slot2_get (insn) == 2453 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3709dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ if (Field_dsp340050b49a6c_fld2602dot_slot2_Slot_dot_slot2_get (insn) == 1429 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3713dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld2604dot_slot2_Slot_dot_slot2_get (insn) == 341 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3710dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SET_HSAR;
+ if (Field_dsp340050b49a6c_fld2606dot_slot2_Slot_dot_slot2_get (insn) == 181 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3711dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ if (Field_dsp340050b49a6c_fld2608dot_slot2_Slot_dot_slot2_get (insn) == 19 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_0;
+ if (Field_dsp340050b49a6c_fld2609dot_slot2_Slot_dot_slot2_get (insn) == 51 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_1;
+ if (Field_dsp340050b49a6c_fld2610dot_slot2_Slot_dot_slot2_get (insn) == 83 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_2;
+ if (Field_dsp340050b49a6c_fld2611dot_slot2_Slot_dot_slot2_get (insn) == 899 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_PUSH2X128_PQ;
+ if (Field_dsp340050b49a6c_fld2612_Slot_dot_slot2_get (insn) == 32 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_QREADY;
+ if (Field_dsp340050b49a6c_fld2613dot_slot2_Slot_dot_slot2_get (insn) == 1811 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_0;
+ switch (Field_dsp340050b49a6c_fld2614_Slot_dot_slot2_get (insn))
+ {
+ case 66:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_MOVPQ2PQ;
+ break;
+ case 67:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_SET_EXT_REGS;
+ break;
+ case 68:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_PUSH128_PQ;
+ break;
+ case 69:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_SUBARX;
+ break;
+ case 70:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_SWAPB;
+ break;
+ case 71:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_WRTIEP;
+ break;
+ case 72:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_PUSH32;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2615dot_slot2_Slot_dot_slot2_get (insn) == 1819 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_3;
+ if (Field_dsp340050b49a6c_fld2616dot_slot2_Slot_dot_slot2_get (insn) == 1827 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_1;
+ if (Field_dsp340050b49a6c_fld2617dot_slot2_Slot_dot_slot2_get (insn) == 1835 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_4;
+ if (Field_dsp340050b49a6c_fld2618dot_slot2_Slot_dot_slot2_get (insn) == 1843 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_5;
+ if (Field_dsp340050b49a6c_fld2619dot_slot2_Slot_dot_slot2_get (insn) == 3702 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld2620dot_slot2_Slot_dot_slot2_get (insn) == 3703 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2621dot_slot2_Slot_dot_slot2_get (insn) == 1859 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_2;
+ if (Field_dsp340050b49a6c_fld2622dot_slot2_Slot_dot_slot2_get (insn) == 1867 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2623dot_slot2_Slot_dot_slot2_get (insn) == 939 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3727dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld2624dot_slot2_Slot_dot_slot2_get (insn) == 475 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3729dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld2625_Slot_dot_slot2_get (insn) == 19 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3731dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SETTIEP;
+ if (Field_dsp340050b49a6c_fld2626dot_slot2_Slot_dot_slot2_get (insn) == 51 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3715_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_3;
+ if (Field_dsp340050b49a6c_fld2628dot_slot2_Slot_dot_slot2_get (insn) == 51 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3722_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld2630dot_slot2_Slot_dot_slot2_get (insn) == 10 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2032_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CMP_R;
+ if (Field_dsp340050b49a6c_fld2632dot_slot2_Slot_dot_slot2_get (insn) == 11 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_t_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ADD16;
+ if (Field_dsp340050b49a6c_fld2633dot_slot2_Slot_dot_slot2_get (insn) == 6 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3733dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LSLM;
+ if (Field_dsp340050b49a6c_fld2635dot_slot2_Slot_dot_slot2_get (insn) == 7 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3719dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ switch (Field_dsp340050b49a6c_fld2636dot_slot2_Slot_dot_slot2_get (insn))
+ {
+ case 1168:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_AR2SAR_DUP;
+ break;
+ case 1169:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_01;
+ break;
+ case 1170:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_03;
+ break;
+ case 1171:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_WRTBSIGQ;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2637dot_slot2_Slot_dot_slot2_get (insn) == 586 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3725dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_21;
+ if (Field_dsp340050b49a6c_fld2640dot_slot2_Slot_dot_slot2_get (insn) == 587 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3725dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ if (Field_dsp340050b49a6c_fld2641dot_slot2_Slot_dot_slot2_get (insn) == 147 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3726dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_23;
+ if (Field_dsp340050b49a6c_fld2642dot_slot2_Slot_dot_slot2_get (insn) == 74 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2605_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ if (Field_dsp340050b49a6c_fld2643dot_slot2_Slot_dot_slot2_get (insn) == 75 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2605_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ if (Field_dsp340050b49a6c_fld2644dot_slot2_Slot_dot_slot2_get (insn) == 4 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3732dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ADDAR2;
+ if (Field_dsp340050b49a6c_fld2645dot_slot2_Slot_dot_slot2_get (insn) == 4 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3714dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld2646dot_slot2_Slot_dot_slot2_get (insn) == 4 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3721dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVAR2;
+ if (Field_dsp340050b49a6c_fld2647dot_slot2_Slot_dot_slot2_get (insn) == 16 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_7;
+ if (Field_dsp340050b49a6c_fld2648dot_slot2_Slot_dot_slot2_get (insn) == 17 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_SUB32;
+ if (Field_dsp340050b49a6c_fld2649dot_slot2_Slot_dot_slot2_get (insn) == 18 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_SUBCM;
+ if (Field_dsp340050b49a6c_fld2650dot_slot2_Slot_dot_slot2_get (insn) == 19 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_XOR128;
+ if (Field_dsp340050b49a6c_fld2651dot_slot2_Slot_dot_slot2_get (insn) == 20 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1)
+ return OPCODE_SUBMEAN;
+ if (Field_dsp340050b49a6c_fld2652dot_slot2_Slot_dot_slot2_get (insn) == 37 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3718_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ if (Field_dsp340050b49a6c_fld2654dot_slot2_Slot_dot_slot2_get (insn) == 53 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3728dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld2655dot_slot2_Slot_dot_slot2_get (insn) == 11 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_MOVCM2PQ;
+ if (Field_dsp340050b49a6c_fld2656dot_slot2_Slot_dot_slot2_get (insn) == 6 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3712_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_SUBWRP;
+ if (Field_dsp340050b49a6c_fld2657dot_slot2_Slot_dot_slot2_get (insn) == 7 &&
+ Field_op0_s6_Slot_dot_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_ASLM32;
+ switch (Field_dsp340050b49a6c_fld2658dot_slot2_Slot_dot_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3717dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_AR2PQ_LN;
+ break;
+ case 1:
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3716dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ break;
+ }
+ if (Field_op0_s6_Slot_dot_slot2_get (insn) == 3 &&
+ Field_dsp340050b49a6c_fld3723dot_slot2_Slot_dot_slot2_get (insn) == 0)
+ return OPCODE_LUT_WRITE;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_dot_slot1_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_s7_Slot_dot_slot1_get (insn))
+ {
+ case 0:
+ return OPCODE_CDOT;
+ case 1:
+ return OPCODE_CDOTAC;
+ case 2:
+ return OPCODE_CDOTACS;
+ case 3:
+ return OPCODE_DOT;
+ case 4:
+ return OPCODE_DOTAC;
+ case 5:
+ return OPCODE_DOTACS;
+ case 6:
+ return OPCODE_MACD8;
+ case 7:
+ return OPCODE_MACPQXP_0;
+ case 8:
+ return OPCODE_MACPQXP_1;
+ case 9:
+ return OPCODE_MACPQXP_2;
+ case 10:
+ return OPCODE_MACPQXP_3;
+ case 11:
+ return OPCODE_MACXP2_0;
+ case 12:
+ return OPCODE_MACXP2_1;
+ case 13:
+ return OPCODE_MPYD8;
+ case 14:
+ return OPCODE_MPYPQXP_0;
+ case 15:
+ return OPCODE_MPYPQXP_1;
+ case 16:
+ return OPCODE_MPYPQXP_2;
+ case 17:
+ return OPCODE_MPYPQXP_3;
+ case 18:
+ return OPCODE_MPYXP2_0;
+ case 19:
+ return OPCODE_MPYXP2_1;
+ case 20:
+ if (Field_dsp340050b49a6c_fld3734dot_slot1_Slot_dot_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_pq_slot1_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2825pq_slot1_Slot_pq_slot1_get (insn))
+ {
+ case 0:
+ if (Field_op0_s10_Slot_pq_slot1_get (insn) == 0)
+ return OPCODE_CMPYXP2PQ;
+ break;
+ case 1:
+ if (Field_op0_s10_Slot_pq_slot1_get (insn) == 0)
+ return OPCODE_MPYXP2PQ;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2826pq_slot1_Slot_pq_slot1_get (insn) == 1 &&
+ Field_op0_s10_Slot_pq_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3761pq_slot1_Slot_pq_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ switch (Field_op0_s10_Slot_pq_slot1_get (insn))
+ {
+ case 1:
+ return OPCODE_CMPY2PQ;
+ case 2:
+ return OPCODE_MPY2PQ;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_acc2_slot2_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2045_Slot_acc2_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ break;
+ case 2:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ break;
+ case 3:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld2953acc2_slot2_Slot_acc2_slot2_get (insn))
+ {
+ case 2:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ break;
+ case 3:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3782acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2954acc2_slot2_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3786acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld2955acc2_slot2_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3788acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld2956acc2_slot2_Slot_acc2_slot2_get (insn) == 3 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld2957acc2_slot2_Slot_acc2_slot2_get (insn) == 19 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld2958acc2_slot2_Slot_acc2_slot2_get (insn) == 35 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld2959acc2_slot2_Slot_acc2_slot2_get (insn) == 51 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld2960acc2_slot2_Slot_acc2_slot2_get (insn) == 35 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3784acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld2963acc2_slot2_Slot_acc2_slot2_get (insn) == 51 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3784acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld2964acc2_slot2_Slot_acc2_slot2_get (insn) == 19 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3785acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld2966acc2_slot2_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3783acc2_slot2_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_NCO_UPDATE;
+ if (Field_dsp340050b49a6c_fld2967acc2_slot2_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_op0_s12_Slot_acc2_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2056_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ switch (Field_op0_s12_Slot_acc2_slot2_get (insn))
+ {
+ case 2:
+ if (Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_get (insn) == 0)
+ return OPCODE_PUSH32;
+ break;
+ case 3:
+ return OPCODE_ADD32;
+ case 4:
+ return OPCODE_ADDCM;
+ case 5:
+ return OPCODE_ASR;
+ case 6:
+ return OPCODE_LUT_IEXT;
+ case 7:
+ return OPCODE_PERM;
+ }
+ switch (Field_s_Slot_acc2_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_CONJ;
+ break;
+ case 1:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_LUT_PHASOR;
+ break;
+ case 2:
+ if (Field_op0_s12_Slot_acc2_slot2_get (insn) == 1)
+ return OPCODE_MOVCM;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_acc2_slot1_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2075_Slot_acc2_slot1_get (insn))
+ {
+ case 0:
+ if (Field_op0_s13_Slot_acc2_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2031_Slot_acc2_slot1_get (insn) == 0)
+ return OPCODE_RFIRD;
+ break;
+ case 1:
+ if (Field_op0_s13_Slot_acc2_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2031_Slot_acc2_slot1_get (insn) == 0)
+ return OPCODE_RFIRDA;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2968acc2_slot1_Slot_acc2_slot1_get (insn) == 1 &&
+ Field_op0_s13_Slot_acc2_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3793acc2_slot1_Slot_acc2_slot1_get (insn) == 0)
+ return OPCODE_SWAPAC_R;
+ if (Field_dsp340050b49a6c_fld2969acc2_slot1_Slot_acc2_slot1_get (insn) == 1 &&
+ Field_op0_s13_Slot_acc2_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3790acc2_slot1_Slot_acc2_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ switch (Field_op0_s13_Slot_acc2_slot1_get (insn))
+ {
+ case 1:
+ return OPCODE_LLRPRE1;
+ case 2:
+ return OPCODE_RFIR;
+ case 3:
+ return OPCODE_RFIRA;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_smod_slot2_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2049_Slot_smod_slot2_get (insn))
+ {
+ case 2:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_t_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_OR128;
+ break;
+ case 3:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2029_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_PERM;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld2991smod_slot2_Slot_smod_slot2_get (insn) == 0 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ADD32;
+ if (Field_dsp340050b49a6c_fld2992smod_slot2_Slot_smod_slot2_get (insn) == 1 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ADDCM;
+ if (Field_dsp340050b49a6c_fld2993smod_slot2_Slot_smod_slot2_get (insn) == 2 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ADDWRP;
+ if (Field_dsp340050b49a6c_fld2994smod_slot2_Slot_smod_slot2_get (insn) == 3 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASR;
+ if (Field_dsp340050b49a6c_fld2995smod_slot2_Slot_smod_slot2_get (insn) == 4 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_AND128;
+ if (Field_dsp340050b49a6c_fld2996smod_slot2_Slot_smod_slot2_get (insn) == 5 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASR32;
+ if (Field_dsp340050b49a6c_fld2997smod_slot2_Slot_smod_slot2_get (insn) == 6 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_EXTUI4;
+ if (Field_dsp340050b49a6c_fld2998smod_slot2_Slot_smod_slot2_get (insn) == 7 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LUT;
+ if (Field_dsp340050b49a6c_fld2999smod_slot2_Slot_smod_slot2_get (insn) == 8 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASL;
+ if (Field_dsp340050b49a6c_fld3000smod_slot2_Slot_smod_slot2_get (insn) == 9 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LUT_IEXT;
+ if (Field_dsp340050b49a6c_fld3001smod_slot2_Slot_smod_slot2_get (insn) == 10 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LUT_REXT;
+ if (Field_dsp340050b49a6c_fld3002smod_slot2_Slot_smod_slot2_get (insn) == 11 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MEAN;
+ if (Field_dsp340050b49a6c_fld3003smod_slot2_Slot_smod_slot2_get (insn) == 12 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MAX8;
+ if (Field_dsp340050b49a6c_fld3004smod_slot2_Slot_smod_slot2_get (insn) == 13 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MEAN32;
+ if (Field_dsp340050b49a6c_fld3005smod_slot2_Slot_smod_slot2_get (insn) == 14 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MIN8;
+ if (Field_dsp340050b49a6c_fld3006smod_slot2_Slot_smod_slot2_get (insn) == 15 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOV2CM2PQ;
+ if (Field_dsp340050b49a6c_fld3007smod_slot2_Slot_smod_slot2_get (insn) == 16 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASL32;
+ if (Field_dsp340050b49a6c_fld3008smod_slot2_Slot_smod_slot2_get (insn) == 17 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_0;
+ if (Field_dsp340050b49a6c_fld3009smod_slot2_Slot_smod_slot2_get (insn) == 18 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_1;
+ if (Field_dsp340050b49a6c_fld3010smod_slot2_Slot_smod_slot2_get (insn) == 19 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_4;
+ if (Field_dsp340050b49a6c_fld3011smod_slot2_Slot_smod_slot2_get (insn) == 20 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_2;
+ if (Field_dsp340050b49a6c_fld3012smod_slot2_Slot_smod_slot2_get (insn) == 21 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_5;
+ if (Field_dsp340050b49a6c_fld3013smod_slot2_Slot_smod_slot2_get (insn) == 22 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_6;
+ if (Field_dsp340050b49a6c_fld3014smod_slot2_Slot_smod_slot2_get (insn) == 23 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_7;
+ if (Field_dsp340050b49a6c_fld3015smod_slot2_Slot_smod_slot2_get (insn) == 24 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND8_3;
+ if (Field_dsp340050b49a6c_fld3016smod_slot2_Slot_smod_slot2_get (insn) == 25 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_0;
+ if (Field_dsp340050b49a6c_fld3017smod_slot2_Slot_smod_slot2_get (insn) == 26 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_1;
+ if (Field_dsp340050b49a6c_fld3018smod_slot2_Slot_smod_slot2_get (insn) == 27 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_3;
+ if (Field_dsp340050b49a6c_fld3019smod_slot2_Slot_smod_slot2_get (insn) == 28 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_2;
+ if (Field_dsp340050b49a6c_fld3020smod_slot2_Slot_smod_slot2_get (insn) == 29 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_4;
+ if (Field_dsp340050b49a6c_fld3021smod_slot2_Slot_smod_slot2_get (insn) == 30 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_5;
+ if (Field_dsp340050b49a6c_fld3022smod_slot2_Slot_smod_slot2_get (insn) == 31 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCND_6;
+ if (Field_dsp340050b49a6c_fld3023smod_slot2_Slot_smod_slot2_get (insn) == 32 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASLM;
+ if (Field_dsp340050b49a6c_fld3024smod_slot2_Slot_smod_slot2_get (insn) == 33 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASRM;
+ if (Field_dsp340050b49a6c_fld3025smod_slot2_Slot_smod_slot2_get (insn) == 34 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CMP8;
+ if (Field_dsp340050b49a6c_fld3026smod_slot2_Slot_smod_slot2_get (insn) == 35 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LSRM;
+ if (Field_dsp340050b49a6c_fld3027smod_slot2_Slot_smod_slot2_get (insn) == 36 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CMP_I;
+ if (Field_dsp340050b49a6c_fld3028smod_slot2_Slot_smod_slot2_get (insn) == 517 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ABS8;
+ if (Field_dsp340050b49a6c_fld3030smod_slot2_Slot_smod_slot2_get (insn) == 533 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CONJ;
+ if (Field_dsp340050b49a6c_fld3031smod_slot2_Slot_smod_slot2_get (insn) == 549 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LUT_PHASOR;
+ if (Field_dsp340050b49a6c_fld3032smod_slot2_Slot_smod_slot2_get (insn) == 565 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld3033smod_slot2_Slot_smod_slot2_get (insn) == 581 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCM;
+ if (Field_dsp340050b49a6c_fld3034smod_slot2_Slot_smod_slot2_get (insn) == 597 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_NOT128;
+ if (Field_dsp340050b49a6c_fld3035smod_slot2_Slot_smod_slot2_get (insn) == 613 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_TRANS;
+ if (Field_dsp340050b49a6c_fld3036smod_slot2_Slot_smod_slot2_get (insn) == 1141 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ switch (Field_dsp340050b49a6c_fld3038smod_slot2_Slot_smod_slot2_get (insn))
+ {
+ case 8565:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ break;
+ case 9077:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SET_HSAR;
+ break;
+ case 9589:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ break;
+ case 10101:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ break;
+ case 10613:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3039smod_slot2_Slot_smod_slot2_get (insn))
+ {
+ case 11125:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_0;
+ break;
+ case 27509:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_2;
+ break;
+ case 43893:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_3;
+ break;
+ case 60277:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_0;
+ break;
+ case 76661:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ break;
+ case 93045:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_1;
+ break;
+ case 109429:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_2;
+ break;
+ case 125813:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3040smod_slot2_Slot_smod_slot2_get (insn) == 43893 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3805smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_5;
+ if (Field_dsp340050b49a6c_fld3043smod_slot2_Slot_smod_slot2_get (insn) == 60277 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3805smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3044smod_slot2_Slot_smod_slot2_get (insn) == 6005 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3819smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_1;
+ if (Field_dsp340050b49a6c_fld3046smod_slot2_Slot_smod_slot2_get (insn) == 645 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_NCO_UPDATE;
+ if (Field_dsp340050b49a6c_fld3047smod_slot2_Slot_smod_slot2_get (insn) == 661 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld3048_Slot_smod_slot2_get (insn) == 0 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_LUT_AR;
+ if (Field_dsp340050b49a6c_fld3049smod_slot2_Slot_smod_slot2_get (insn) == 1685 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld3050smod_slot2_Slot_smod_slot2_get (insn) == 2709 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld3051smod_slot2_Slot_smod_slot2_get (insn) == 3733 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld3052smod_slot2_Slot_smod_slot2_get (insn) == 4757 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld3053smod_slot2_Slot_smod_slot2_get (insn) == 5781 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld3054smod_slot2_Slot_smod_slot2_get (insn) == 6805 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld3055smod_slot2_Slot_smod_slot2_get (insn) == 7829 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld3056smod_slot2_Slot_smod_slot2_get (insn) == 4757 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3809smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld3058smod_slot2_Slot_smod_slot2_get (insn) == 5781 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3809smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_4;
+ if (Field_dsp340050b49a6c_fld3059smod_slot2_Slot_smod_slot2_get (insn) == 3733 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3821smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld3061smod_slot2_Slot_smod_slot2_get (insn) == 341 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3806smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld3063smod_slot2_Slot_smod_slot2_get (insn) == 181 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3807smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld3065smod_slot2_Slot_smod_slot2_get (insn) == 19 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_0;
+ if (Field_dsp340050b49a6c_fld3066smod_slot2_Slot_smod_slot2_get (insn) == 51 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_1;
+ if (Field_dsp340050b49a6c_fld3067smod_slot2_Slot_smod_slot2_get (insn) == 83 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_2;
+ if (Field_dsp340050b49a6c_fld3068smod_slot2_Slot_smod_slot2_get (insn) == 227 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_PUSH128_PQ;
+ if (Field_dsp340050b49a6c_fld3069smod_slot2_Slot_smod_slot2_get (insn) == 1803 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_0;
+ switch (Field_dsp340050b49a6c_fld3070_Slot_smod_slot2_get (insn))
+ {
+ case 66:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_MOVPQ2PQ;
+ break;
+ case 67:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_SUBARX;
+ break;
+ case 68:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_PUSH32;
+ break;
+ case 69:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_SWAPB;
+ break;
+ case 70:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_WRTIEP;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3071smod_slot2_Slot_smod_slot2_get (insn) == 1819 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_1;
+ if (Field_dsp340050b49a6c_fld3072smod_slot2_Slot_smod_slot2_get (insn) == 1835 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_2;
+ if (Field_dsp340050b49a6c_fld3073smod_slot2_Slot_smod_slot2_get (insn) == 1851 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_4;
+ if (Field_dsp340050b49a6c_fld3074smod_slot2_Slot_smod_slot2_get (insn) == 1867 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_3;
+ if (Field_dsp340050b49a6c_fld3075smod_slot2_Slot_smod_slot2_get (insn) == 1883 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2PQ_5;
+ if (Field_dsp340050b49a6c_fld3076smod_slot2_Slot_smod_slot2_get (insn) == 3798 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld3077smod_slot2_Slot_smod_slot2_get (insn) == 3799 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld3078smod_slot2_Slot_smod_slot2_get (insn) == 3830 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld3079smod_slot2_Slot_smod_slot2_get (insn) == 3831 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld3080smod_slot2_Slot_smod_slot2_get (insn) == 147 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP128_2CMPQ_3;
+ if (Field_dsp340050b49a6c_fld3081smod_slot2_Slot_smod_slot2_get (insn) == 179 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_PUSH2X128_PQ;
+ if (Field_dsp340050b49a6c_fld3082smod_slot2_Slot_smod_slot2_get (insn) == 115 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3824smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld3084smod_slot2_Slot_smod_slot2_get (insn) == 51 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3817_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld3085smod_slot2_Slot_smod_slot2_get (insn) == 10 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld2032_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CMP_R;
+ if (Field_dsp340050b49a6c_fld3087smod_slot2_Slot_smod_slot2_get (insn) == 11 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_t_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ADD16;
+ if (Field_dsp340050b49a6c_fld3088smod_slot2_Slot_smod_slot2_get (insn) == 6 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3828smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LSLM;
+ if (Field_dsp340050b49a6c_fld3090smod_slot2_Slot_smod_slot2_get (insn) == 7 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3814smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ switch (Field_dsp340050b49a6c_fld3091smod_slot2_Slot_smod_slot2_get (insn))
+ {
+ case 32:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_QREADY;
+ break;
+ case 36:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2041_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SET_EXT_REGS;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3092smod_slot2_Slot_smod_slot2_get (insn))
+ {
+ case 1136:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_AR2SAR_DUP;
+ break;
+ case 1137:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_01;
+ break;
+ case 1138:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_POP2X128_2PQ_03;
+ break;
+ case 1139:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_WRTBSIGQ;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3093smod_slot2_Slot_smod_slot2_get (insn) == 570 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3822smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_21;
+ if (Field_dsp340050b49a6c_fld3096smod_slot2_Slot_smod_slot2_get (insn) == 571 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3822smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ if (Field_dsp340050b49a6c_fld3097smod_slot2_Slot_smod_slot2_get (insn) == 143 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3823smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_POP2X128_2PQ_23;
+ if (Field_dsp340050b49a6c_fld3098smod_slot2_Slot_smod_slot2_get (insn) == 74 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3062_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ if (Field_dsp340050b49a6c_fld3099smod_slot2_Slot_smod_slot2_get (insn) == 75 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3062_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ if (Field_dsp340050b49a6c_fld3100smod_slot2_Slot_smod_slot2_get (insn) == 19 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3825smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SETTIEP;
+ if (Field_dsp340050b49a6c_fld3101smod_slot2_Slot_smod_slot2_get (insn) == 4 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3826smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ADDAR2;
+ if (Field_dsp340050b49a6c_fld3102smod_slot2_Slot_smod_slot2_get (insn) == 4 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3810smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld3104smod_slot2_Slot_smod_slot2_get (insn) == 4 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3816smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVAR2;
+ if (Field_dsp340050b49a6c_fld3105smod_slot2_Slot_smod_slot2_get (insn) == 16 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_MOVCND_7;
+ if (Field_dsp340050b49a6c_fld3106smod_slot2_Slot_smod_slot2_get (insn) == 17 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_SMOD_LUT;
+ if (Field_dsp340050b49a6c_fld3107smod_slot2_Slot_smod_slot2_get (insn) == 18 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_SUB32;
+ if (Field_dsp340050b49a6c_fld3108smod_slot2_Slot_smod_slot2_get (insn) == 19 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_SUBWRP;
+ if (Field_dsp340050b49a6c_fld3109smod_slot2_Slot_smod_slot2_get (insn) == 20 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_SUBCM;
+ if (Field_dsp340050b49a6c_fld3110smod_slot2_Slot_smod_slot2_get (insn) == 21 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1)
+ return OPCODE_XOR128;
+ if (Field_dsp340050b49a6c_fld3111smod_slot2_Slot_smod_slot2_get (insn) == 19 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_MOVCM2PQ;
+ if (Field_dsp340050b49a6c_fld3113smod_slot2_Slot_smod_slot2_get (insn) == 27 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3827smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ if (Field_dsp340050b49a6c_fld3114smod_slot2_Slot_smod_slot2_get (insn) == 6 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3808_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_SUBMEAN;
+ if (Field_dsp340050b49a6c_fld3115smod_slot2_Slot_smod_slot2_get (insn) == 7 &&
+ Field_op0_s15_Slot_smod_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2029_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_ASLM32;
+ switch (Field_dsp340050b49a6c_fld3116smod_slot2_Slot_smod_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3813smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_AR2PQ_LN;
+ break;
+ case 1:
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3812smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_AR2CM_LN;
+ break;
+ }
+ if (Field_op0_s15_Slot_smod_slot2_get (insn) == 3 &&
+ Field_dsp340050b49a6c_fld3818smod_slot2_Slot_smod_slot2_get (insn) == 0)
+ return OPCODE_LUT_WRITE;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_smod_slot1_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2080_Slot_smod_slot1_get (insn))
+ {
+ case 0:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 0)
+ return OPCODE_CMPY2CM;
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_RMPY2CM;
+ break;
+ case 1:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 0)
+ return OPCODE_MPY2CM;
+ break;
+ case 2:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 0)
+ return OPCODE_MPYADD8_2CM;
+ break;
+ case 3:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 0)
+ return OPCODE_RCMPY2CM;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3117smod_slot1_Slot_smod_slot1_get (insn))
+ {
+ case 2:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_CMAC;
+ break;
+ case 3:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_MAC;
+ break;
+ case 4:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_CMPY;
+ break;
+ case 5:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_MPY;
+ break;
+ case 6:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_NORMACD;
+ break;
+ case 7:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 1)
+ return OPCODE_NORMD;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3118smod_slot1_Slot_smod_slot1_get (insn))
+ {
+ case 0:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3117smod_slot1_Slot_smod_slot1_get (insn) == 0)
+ return OPCODE_SMOD_SCR;
+ break;
+ case 1:
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3829smod_slot1_Slot_smod_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ break;
+ }
+ if (Field_op0_s16_Slot_smod_slot1_get (insn) == 3)
+ return OPCODE_SMOD_ALIGN;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_llr_slot2_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_dsp340050b49a6c_fld2046_Slot_llr_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_LUT_AR;
+ break;
+ case 2:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3230_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_CM2AR_LN;
+ break;
+ }
+ switch (Field_dsp340050b49a6c_fld3191llr_slot2_Slot_llr_slot2_get (insn))
+ {
+ case 6:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_ABS8;
+ break;
+ case 7:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_CONJ;
+ break;
+ case 22:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_LUT_PHASOR;
+ break;
+ case 23:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_NOT128;
+ break;
+ case 38:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVCM;
+ break;
+ case 39:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_TRANS;
+ break;
+ case 70:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_NCO_UPDATE;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3192llr_slot2_Slot_llr_slot2_get (insn) == 54 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_CLRCM;
+ if (Field_dsp340050b49a6c_fld3193llr_slot2_Slot_llr_slot2_get (insn) == 55 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_GET_ARGMAX;
+ if (Field_dsp340050b49a6c_fld3194llr_slot2_Slot_llr_slot2_get (insn) == 310 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_GET_HSAR;
+ if (Field_dsp340050b49a6c_fld3195llr_slot2_Slot_llr_slot2_get (insn) == 311 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_GET_SAR;
+ if (Field_dsp340050b49a6c_fld3196llr_slot2_Slot_llr_slot2_get (insn) == 566 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_GET_HSAR2SAR;
+ if (Field_dsp340050b49a6c_fld3197llr_slot2_Slot_llr_slot2_get (insn) == 567 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_POP128_0;
+ if (Field_dsp340050b49a6c_fld3198llr_slot2_Slot_llr_slot2_get (insn) == 822 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_POP128_1;
+ if (Field_dsp340050b49a6c_fld3199llr_slot2_Slot_llr_slot2_get (insn) == 823 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_POP128_2;
+ if (Field_dsp340050b49a6c_fld3200llr_slot2_Slot_llr_slot2_get (insn) == 1078 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_GET_MAX;
+ if (Field_dsp340050b49a6c_fld3201llr_slot2_Slot_llr_slot2_get (insn) == 1079 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_POP128_3;
+ if (Field_dsp340050b49a6c_fld3202llr_slot2_Slot_llr_slot2_get (insn) == 1334 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_POP128_4;
+ switch (Field_dsp340050b49a6c_fld3203llr_slot2_Slot_llr_slot2_get (insn))
+ {
+ case 20535:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ128_0;
+ break;
+ case 20791:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ128_1;
+ break;
+ case 21047:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ128_2;
+ break;
+ case 21303:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ128_5;
+ break;
+ case 21559:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ128_3;
+ break;
+ case 21815:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ32_0;
+ break;
+ case 22071:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ32_1;
+ break;
+ case 22327:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_MOVEQ32_2;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3204llr_slot2_Slot_llr_slot2_get (insn) == 11319 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3844_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ128_4;
+ if (Field_dsp340050b49a6c_fld3205_Slot_llr_slot2_get (insn) == 7 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3847llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SET_EXT_REGS;
+ switch (Field_dsp340050b49a6c_fld3206_Slot_llr_slot2_get (insn))
+ {
+ case 71:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3225_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SET_ARGMAX;
+ break;
+ case 327:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3225_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SET_NCO;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3207llr_slot2_Slot_llr_slot2_get (insn) == 11575 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3844_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_MOVEQ32_3;
+ if (Field_dsp340050b49a6c_fld3208llr_slot2_Slot_llr_slot2_get (insn) == 5943 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3843llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3210llr_slot2_Slot_llr_slot2_get (insn) == 411 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3857llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_POP128_5;
+ if (Field_dsp340050b49a6c_fld3212_Slot_llr_slot2_get (insn) == 43 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3848llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SET_HSAR;
+ if (Field_dsp340050b49a6c_fld3213llr_slot2_Slot_llr_slot2_get (insn) == 155 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3845llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_GET_NCO;
+ if (Field_dsp340050b49a6c_fld3214_Slot_llr_slot2_get (insn) == 1 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3863llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_QREADY;
+ if (Field_dsp340050b49a6c_fld3215llr_slot2_Slot_llr_slot2_get (insn) == 327 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3850llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SET_SAR;
+ if (Field_dsp340050b49a6c_fld3216llr_slot2_Slot_llr_slot2_get (insn) == 27 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3849llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SET_MAX;
+ if (Field_dsp340050b49a6c_fld3217_Slot_llr_slot2_get (insn) == 5 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3859llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_POP32_0;
+ if (Field_dsp340050b49a6c_fld3218llr_slot2_Slot_llr_slot2_get (insn) == 11 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3851llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_NEGCM;
+ if (Field_dsp340050b49a6c_fld3220llr_slot2_Slot_llr_slot2_get (insn) == 9 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3246_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_PUSH128;
+ if (Field_dsp340050b49a6c_fld3221llr_slot2_Slot_llr_slot2_get (insn) == 25 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld2046_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_POP32_1;
+ if (Field_dsp340050b49a6c_fld3222llr_slot2_Slot_llr_slot2_get (insn) == 25 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3860llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_POP32_2;
+ if (Field_dsp340050b49a6c_fld3224llr_slot2_Slot_llr_slot2_get (insn) == 25 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3861llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_POP32_3;
+ if (Field_dsp340050b49a6c_fld3226llr_slot2_Slot_llr_slot2_get (insn) == 25 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3864llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_RDTIEP;
+ if (Field_dsp340050b49a6c_fld3228llr_slot2_Slot_llr_slot2_get (insn) == 3 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3866llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ADD16;
+ if (Field_dsp340050b49a6c_fld3231llr_slot2_Slot_llr_slot2_get (insn) == 7 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 1 &&
+ Field_dsp340050b49a6c_fld3856llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_LUT_WRITE;
+ if (Field_dsp340050b49a6c_fld3232llr_slot2_Slot_llr_slot2_get (insn) == 4 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_AR2CM_LN;
+ switch (Field_dsp340050b49a6c_fld3233_Slot_llr_slot2_get (insn))
+ {
+ case 2:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld2049_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SUBWRP;
+ break;
+ case 3:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld2029_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ASLM32;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3234llr_slot2_Slot_llr_slot2_get (insn) == 20 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_AR2CM_DUP;
+ if (Field_dsp340050b49a6c_fld3235llr_slot2_Slot_llr_slot2_get (insn) == 133 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_SETTIEP;
+ if (Field_dsp340050b49a6c_fld3236_Slot_llr_slot2_get (insn) == 27 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2074_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_WRTIEP;
+ if (Field_dsp340050b49a6c_fld3237llr_slot2_Slot_llr_slot2_get (insn) == 149 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld2074_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_AR2SAR_DUP;
+ if (Field_dsp340050b49a6c_fld3238llr_slot2_Slot_llr_slot2_get (insn) == 85 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3862_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQ;
+ if (Field_dsp340050b49a6c_fld3240llr_slot2_Slot_llr_slot2_get (insn) == 53 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3847llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_WRTSIGQ;
+ if (Field_dsp340050b49a6c_fld3241llr_slot2_Slot_llr_slot2_get (insn) == 11 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3865llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_WRTBSIGQM;
+ if (Field_dsp340050b49a6c_fld3242llr_slot2_Slot_llr_slot2_get (insn) == 3 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3855llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_CLRTIEP;
+ switch (Field_dsp340050b49a6c_fld3243llr_slot2_Slot_llr_slot2_get (insn))
+ {
+ case 48:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_ADDAR2;
+ break;
+ case 49:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_MOVAR2;
+ break;
+ case 50:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_PUSH128_PQ;
+ break;
+ case 51:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_SUBARX;
+ break;
+ case 52:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_PUSH32;
+ break;
+ case 53:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 2)
+ return OPCODE_SWAPB;
+ break;
+ }
+ if (Field_dsp340050b49a6c_fld3244llr_slot2_Slot_llr_slot2_get (insn) == 1 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 2 &&
+ Field_dsp340050b49a6c_fld3853llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_PQ2CM;
+ if (Field_dsp340050b49a6c_fld3245llr_slot2_Slot_llr_slot2_get (insn) == 2 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld3868_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_SUBMEAN;
+ if (Field_dsp340050b49a6c_fld3247llr_slot2_Slot_llr_slot2_get (insn) == 3 &&
+ Field_op0_s18_Slot_llr_slot2_get (insn) == 4 &&
+ Field_dsp340050b49a6c_fld3867llr_slot2_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ARGMAX8;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 5 &&
+ Field_dsp340050b49a6c_fld2029_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_PERM;
+ switch (Field_s_Slot_llr_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_ASLM;
+ break;
+ case 1:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_ASRM;
+ break;
+ case 2:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_CMP8;
+ break;
+ case 3:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_LSLM;
+ break;
+ case 4:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_CMP_I;
+ break;
+ case 5:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_LSRM;
+ break;
+ case 8:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 1)
+ return OPCODE_CMP_R;
+ break;
+ }
+ switch (Field_t_Slot_llr_slot2_get (insn))
+ {
+ case 0:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_0;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ADD32;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 4)
+ return OPCODE_OR128;
+ break;
+ case 1:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_1;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ADDCM;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 4)
+ return OPCODE_SUB32;
+ break;
+ case 2:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_2;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ADDWRP;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 4)
+ return OPCODE_SUBCM;
+ break;
+ case 3:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_5;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ASL32;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 4)
+ return OPCODE_XOR128;
+ break;
+ case 4:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_3;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_AND128;
+ break;
+ case 5:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_6;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ASR;
+ break;
+ case 6:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_7;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ASR32;
+ break;
+ case 7:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_0;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_EXTUI4;
+ break;
+ case 8:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND8_4;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_ASL;
+ break;
+ case 9:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_1;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_LUT;
+ break;
+ case 10:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_2;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_LUT_IEXT;
+ break;
+ case 11:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_4;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_MAX8;
+ break;
+ case 12:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_3;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_LUT_REXT;
+ break;
+ case 13:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_5;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_MEAN;
+ break;
+ case 14:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_6;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_MEAN32;
+ break;
+ case 15:
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 3)
+ return OPCODE_MOVCND_7;
+ if (Field_op0_s18_Slot_llr_slot2_get (insn) == 0)
+ return OPCODE_MIN8;
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_llr_slot1_decode (const xtensa_insnbuf insn)
+{
+ if (Field_dsp340050b49a6c_fld3248llr_slot1_Slot_llr_slot1_get (insn) == 0 &&
+ Field_op0_s19_Slot_llr_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3870_Slot_llr_slot1_get (insn) == 0)
+ return OPCODE_WGHT32;
+ if (Field_dsp340050b49a6c_fld3250llr_slot1_Slot_llr_slot1_get (insn) == 1 &&
+ Field_op0_s19_Slot_llr_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3869llr_slot1_Slot_llr_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_dsp340050b49a6c_fld3251llr_slot1_Slot_llr_slot1_get (insn) == 2 &&
+ Field_op0_s19_Slot_llr_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3878llr_slot1_Slot_llr_slot1_get (insn) == 0)
+ return OPCODE_LLRPRE2;
+ if (Field_dsp340050b49a6c_fld3252llr_slot1_Slot_llr_slot1_get (insn) == 3 &&
+ Field_op0_s19_Slot_llr_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3875llr_slot1_Slot_llr_slot1_get (insn) == 0)
+ return OPCODE_MOVAC_R;
+ if (Field_dsp340050b49a6c_fld3253llr_slot1_Slot_llr_slot1_get (insn) == 1 &&
+ Field_op0_s19_Slot_llr_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3876llr_slot1_Slot_llr_slot1_get (insn) == 0)
+ return OPCODE_ADDAC;
+ if (Field_dsp340050b49a6c_fld3254llr_slot1_Slot_llr_slot1_get (insn) == 1 &&
+ Field_op0_s19_Slot_llr_slot1_get (insn) == 0 &&
+ Field_dsp340050b49a6c_fld3872llr_slot1_Slot_llr_slot1_get (insn) == 0)
+ return OPCODE_MOVAC_I;
+ if (Field_op0_s19_Slot_llr_slot1_get (insn) == 1)
+ return OPCODE_COMB32;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_dual_slot1_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_s22_Slot_dual_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_gp_Format_gp_slot2_43_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0x3ffff800) >> 11);
+}
+
+static void
+Slot_gp_Format_gp_slot2_43_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0x3ffff800) | ((slotbuf[0] & 0x7ffff) << 11);
+}
+
+static void
+Slot_gp_Format_gp_slot1_26_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xfc000000) >> 26);
+ slotbuf[0] = (slotbuf[0] & ~0x1ffc0) | ((insn[1] & 0x7ff) << 6);
+}
+
+static void
+Slot_gp_Format_gp_slot1_26_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xfc000000) | ((slotbuf[0] & 0x3f) << 26);
+ insn[1] = (insn[1] & ~0x7ff) | ((slotbuf[0] & 0x1ffc0) >> 6);
+}
+
+static void
+Slot_gp_Format_gp_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x3ffff80) >> 7);
+}
+
+static void
+Slot_gp_Format_gp_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x3ffff80) | ((slotbuf[0] & 0x7ffff) << 7);
+}
+
+static void
+Slot_dot_Format_dot_slot2_44_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0xfffff000) >> 12);
+}
+
+static void
+Slot_dot_Format_dot_slot2_44_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0xfffff000) | ((slotbuf[0] & 0xfffff) << 12);
+}
+
+static void
+Slot_dot_Format_dot_slot1_24_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xff000000) >> 24);
+ slotbuf[0] = (slotbuf[0] & ~0xfff00) | ((insn[1] & 0xfff) << 8);
+}
+
+static void
+Slot_dot_Format_dot_slot1_24_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xff000000) | ((slotbuf[0] & 0xff) << 24);
+ insn[1] = (insn[1] & ~0xfff) | ((slotbuf[0] & 0xfff00) >> 8);
+}
+
+static void
+Slot_dot_Format_dot_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xffff80) >> 7);
+}
+
+static void
+Slot_dot_Format_dot_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff80) | ((slotbuf[0] & 0x1ffff) << 7);
+}
+
+static void
+Slot_pq_Format_pq_slot2_40_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0x7ffff00) >> 8);
+}
+
+static void
+Slot_pq_Format_pq_slot2_40_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0x7ffff00) | ((slotbuf[0] & 0x7ffff) << 8);
+}
+
+static void
+Slot_pq_Format_pq_slot1_26_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xfc000000) >> 26);
+ slotbuf[0] = (slotbuf[0] & ~0x3fc0) | ((insn[1] & 0xff) << 6);
+}
+
+static void
+Slot_pq_Format_pq_slot1_26_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xfc000000) | ((slotbuf[0] & 0x3f) << 26);
+ insn[1] = (insn[1] & ~0xff) | ((slotbuf[0] & 0x3fc0) >> 6);
+}
+
+static void
+Slot_pq_Format_pq_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x3ffff80) >> 7);
+}
+
+static void
+Slot_pq_Format_pq_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x3ffff80) | ((slotbuf[0] & 0x7ffff) << 7);
+}
+
+static void
+Slot_acc2_Format_acc2_slot2_47_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0x3fff8000) >> 15);
+}
+
+static void
+Slot_acc2_Format_acc2_slot2_47_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0x3fff8000) | ((slotbuf[0] & 0x7fff) << 15);
+}
+
+static void
+Slot_acc2_Format_acc2_slot1_23_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xff800000) >> 23);
+ slotbuf[0] = (slotbuf[0] & ~0xfffe00) | ((insn[1] & 0x7fff) << 9);
+}
+
+static void
+Slot_acc2_Format_acc2_slot1_23_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xff800000) | ((slotbuf[0] & 0x1ff) << 23);
+ insn[1] = (insn[1] & ~0x7fff) | ((slotbuf[0] & 0xfffe00) >> 9);
+}
+
+static void
+Slot_acc2_Format_acc2_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x7fff80) >> 7);
+}
+
+static void
+Slot_acc2_Format_acc2_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x7fff80) | ((slotbuf[0] & 0xffff) << 7);
+}
+
+static void
+Slot_smod_Format_smod_slot2_42_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0x3ffffc00) >> 10);
+}
+
+static void
+Slot_smod_Format_smod_slot2_42_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0x3ffffc00) | ((slotbuf[0] & 0xfffff) << 10);
+}
+
+static void
+Slot_smod_Format_smod_slot1_26_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xfc000000) >> 26);
+ slotbuf[0] = (slotbuf[0] & ~0xffc0) | ((insn[1] & 0x3ff) << 6);
+}
+
+static void
+Slot_smod_Format_smod_slot1_26_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xfc000000) | ((slotbuf[0] & 0x3f) << 26);
+ insn[1] = (insn[1] & ~0x3ff) | ((slotbuf[0] & 0xffc0) >> 6);
+}
+
+static void
+Slot_smod_Format_smod_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x3ffff80) >> 7);
+}
+
+static void
+Slot_smod_Format_smod_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x3ffff80) | ((slotbuf[0] & 0x7ffff) << 7);
+}
+
+static void
+Slot_llr_Format_llr_slot2_44_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0x7ffff000) >> 12);
+}
+
+static void
+Slot_llr_Format_llr_slot2_44_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0x7ffff000) | ((slotbuf[0] & 0x7ffff) << 12);
+}
+
+static void
+Slot_llr_Format_llr_slot1_24_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xff000000) >> 24);
+ slotbuf[0] = (slotbuf[0] & ~0xfff00) | ((insn[1] & 0xfff) << 8);
+}
+
+static void
+Slot_llr_Format_llr_slot1_24_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xff000000) | ((slotbuf[0] & 0xff) << 24);
+ insn[1] = (insn[1] & ~0xfff) | ((slotbuf[0] & 0xfff00) >> 8);
+}
+
+static void
+Slot_llr_Format_llr_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xffff80) >> 7);
+}
+
+static void
+Slot_llr_Format_llr_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff80) | ((slotbuf[0] & 0x1ffff) << 7);
+}
+
+static void
+Slot_dual_Format_dual_slot2_31_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x80000000) >> 31);
+ slotbuf[0] = (slotbuf[0] & ~0xfffffe) | ((insn[1] & 0x7fffff) << 1);
+}
+
+static void
+Slot_dual_Format_dual_slot2_31_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x80000000) | ((slotbuf[0] & 0x1) << 31);
+ insn[1] = (insn[1] & ~0x7fffff) | ((slotbuf[0] & 0xfffffe) >> 1);
+}
+
+static void
+Slot_dual_Format_dual_slot1_30_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x40000000) >> 30);
+}
+
+static void
+Slot_dual_Format_dual_slot1_30_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x40000000) | ((slotbuf[0] & 0x1) << 30);
+}
+
+static void
+Slot_dual_Format_dual_slot0_7_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x3fffff80) >> 7);
+}
+
+static void
+Slot_dual_Format_dual_slot0_7_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x3fffff80) | ((slotbuf[0] & 0x7fffff) << 7);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst_get,
+ Field_s2_Slot_inst_get,
+ Field_r2_Slot_inst_get,
+ Field_t4_Slot_inst_get,
+ Field_s4_Slot_inst_get,
+ Field_r4_Slot_inst_get,
+ Field_t8_Slot_inst_get,
+ Field_s8_Slot_inst_get,
+ Field_r8_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_fimm8_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2019_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2021_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2029_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2030_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2032_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2035_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2036_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2037_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2038_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2039_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2040_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2041_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2042_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2043_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2044_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2045_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2046_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2047_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2048_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2049_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2050_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2051_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2052_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2053_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2054_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2055_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2056_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2082inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2083inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2084inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2085inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2086inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2088inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2089inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2090inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2091inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2092inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2094inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2095inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2096inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2098inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2099inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2100inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2101inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2102inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2103inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2104inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2105inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2106inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2107inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2108inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2109inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2110inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2111inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2112inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2113inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2114inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2115inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2116inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2117inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2118inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2119inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2120inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2122inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2123inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2124inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2125inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2126inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2127inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2128inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2129inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2131inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2132inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2133inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2134inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2136inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2137inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2138inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2139inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2140inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2141inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2142inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2143inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2144inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2145inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2146inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2147inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2149inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2151inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2153inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2154inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2155inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2156inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2157inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2158inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2159inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2160inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2161inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2162inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2163inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2164inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2165inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2166inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2167inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2168inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2169inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2171inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2172inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2173inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2174inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2175inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2177inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2178inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2179inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2180inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2181inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2182inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2183inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2184inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2185inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2186inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2187inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2188inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2189inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2190inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2191inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2192inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2193inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2194inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2195inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2196inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2197inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2198inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2199inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2200inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2201inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2202inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2203inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2204inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2205inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2206inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2207inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2208inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2209inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2210inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2211inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2212inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2213inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2214inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2215inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2216inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2217inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2218inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2219inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2220inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2221inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2222inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2223inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2224inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2225inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2226inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2227inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2228inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2229inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2230inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2231inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2232inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2234inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2235inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2236inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2237inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2238inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2239inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2240inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2241inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2242inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2243inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2244inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2245inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2246inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2247inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2248inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2249inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2250inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2251inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2252inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2253inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2254_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2255inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld2257inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3627inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3630inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3631inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3633inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3634_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3635inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3636inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3637inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3638inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3639inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3640inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3642inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3643inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3644inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3645inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3647inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3648inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3649inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3650inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3651inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3653inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3654inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3655inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3656inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3657inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3658inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3659inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3660inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3661inst_Slot_inst_get,
+ Field_dsp340050b49a6c_fld3662inst_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst_set,
+ Field_s2_Slot_inst_set,
+ Field_r2_Slot_inst_set,
+ Field_t4_Slot_inst_set,
+ Field_s4_Slot_inst_set,
+ Field_r4_Slot_inst_set,
+ Field_t8_Slot_inst_set,
+ Field_s8_Slot_inst_set,
+ Field_r8_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_fimm8_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2019_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2021_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2029_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2030_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2032_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2035_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2036_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2037_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2038_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2039_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2040_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2041_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2042_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2043_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2044_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2045_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2046_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2047_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2048_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2049_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2050_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2051_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2052_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2053_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2054_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2055_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2056_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2082inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2083inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2084inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2085inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2086inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2088inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2089inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2090inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2091inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2092inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2094inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2095inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2096inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2098inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2099inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2100inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2101inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2102inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2103inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2104inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2105inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2106inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2107inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2108inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2109inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2110inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2111inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2112inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2113inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2114inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2115inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2116inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2117inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2118inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2119inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2120inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2122inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2123inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2124inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2125inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2126inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2127inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2128inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2129inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2131inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2132inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2133inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2134inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2136inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2137inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2138inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2139inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2140inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2141inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2142inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2143inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2144inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2145inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2146inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2147inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2149inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2151inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2153inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2154inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2155inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2156inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2157inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2158inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2159inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2160inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2161inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2162inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2163inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2164inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2165inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2166inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2167inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2168inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2169inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2171inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2172inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2173inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2174inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2175inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2177inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2178inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2179inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2180inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2181inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2182inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2183inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2184inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2185inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2186inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2187inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2188inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2189inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2190inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2191inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2192inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2193inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2194inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2195inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2196inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2197inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2198inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2199inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2200inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2201inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2202inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2203inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2204inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2205inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2206inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2207inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2208inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2209inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2210inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2211inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2212inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2213inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2214inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2215inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2216inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2217inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2218inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2219inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2220inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2221inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2222inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2223inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2224inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2225inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2226inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2227inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2228inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2229inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2230inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2231inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2232inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2234inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2235inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2236inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2237inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2238inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2239inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2240inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2241inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2242inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2243inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2244inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2245inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2246inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2247inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2248inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2249inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2250inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2251inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2252inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2253inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2254_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2255inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld2257inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3627inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3630inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3631inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3633inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3634_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3635inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3636inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3637inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3638inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3639inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3640inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3642inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3643inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3644inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3645inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3647inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3648inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3649inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3650inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3651inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3653inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3654inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3655inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3656inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3657inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3658inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3659inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3660inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3661inst_Slot_inst_set,
+ Field_dsp340050b49a6c_fld3662inst_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ Field_t2_Slot_inst16a_get,
+ Field_s2_Slot_inst16a_get,
+ Field_r2_Slot_inst16a_get,
+ Field_t4_Slot_inst16a_get,
+ Field_s4_Slot_inst16a_get,
+ Field_r4_Slot_inst16a_get,
+ Field_t8_Slot_inst16a_get,
+ Field_s8_Slot_inst16a_get,
+ Field_r8_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ Field_t2_Slot_inst16a_set,
+ Field_s2_Slot_inst16a_set,
+ Field_r2_Slot_inst16a_set,
+ Field_t4_Slot_inst16a_set,
+ Field_s4_Slot_inst16a_set,
+ Field_r4_Slot_inst16a_set,
+ Field_t8_Slot_inst16a_set,
+ Field_s8_Slot_inst16a_set,
+ Field_r8_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ Field_t2_Slot_inst16b_get,
+ Field_s2_Slot_inst16b_get,
+ Field_r2_Slot_inst16b_get,
+ Field_t4_Slot_inst16b_get,
+ Field_s4_Slot_inst16b_get,
+ Field_r4_Slot_inst16b_get,
+ Field_t8_Slot_inst16b_get,
+ Field_s8_Slot_inst16b_get,
+ Field_r8_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ Field_t2_Slot_inst16b_set,
+ Field_s2_Slot_inst16b_set,
+ Field_r2_Slot_inst16b_set,
+ Field_t4_Slot_inst16b_set,
+ Field_s4_Slot_inst16b_set,
+ Field_r4_Slot_inst16b_set,
+ Field_t8_Slot_inst16b_set,
+ Field_s8_Slot_inst16b_set,
+ Field_r8_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_gp_slot2_get_field_fns[] = {
+ Field_t_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_gp_slot2_get,
+ Field_r_Slot_gp_slot2_get,
+ 0,
+ 0,
+ Field_sae_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2030_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2032_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_gp_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_gp_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2045_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2047_Slot_gp_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2050_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2051_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2052_Slot_gp_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2056_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s3_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2025_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2027_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2258gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2259gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2260gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2261gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2262gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2263gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2264gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2266gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2267gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2268gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2269gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2270gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2271gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2272_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2273gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2274gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2275gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2277gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2278gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2279gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2280gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2281gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2282gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2283gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2284gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2286gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2287gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2288gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2289gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2290gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2291gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2292gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2293gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2294gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2295gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2296gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2297gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2298gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2299gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2300gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2301gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2302_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2303gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2304gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2305_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2306gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2308gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2309gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2310gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2312gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2314gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2316gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2317gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2318gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2319gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2320gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2321gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2322gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2323gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2324gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2325gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2326gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2327gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2328gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2329gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2330gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2331gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2332gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2333gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2334gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2335gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2336gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2337gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2338gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2339gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2340gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2341gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2342gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2343gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2344gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2345gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2346gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2347gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2348gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2349gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2350gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2351gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2352gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2353gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2354gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2355gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2356gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2357gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2358gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2359gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2361gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2362gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2364gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2366gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2368gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2369gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2370gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2371gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2372gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2373gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2374gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2375gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2376gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2378gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2379gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2381gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2383gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2384_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2385gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2386_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2387gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2388gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld2389gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3663gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3664gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3665gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3666_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3667gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3668gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3669gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3670gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3671gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3673gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3674gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3675gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3676gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3678gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3679gp_slot2_Slot_gp_slot2_get,
+ Field_dsp340050b49a6c_fld3680gp_slot2_Slot_gp_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_gp_slot2_set_field_fns[] = {
+ Field_t_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_gp_slot2_set,
+ Field_r_Slot_gp_slot2_set,
+ 0,
+ 0,
+ Field_sae_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2030_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2032_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_gp_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_gp_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2045_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2047_Slot_gp_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2050_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2051_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2052_Slot_gp_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2056_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s3_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2025_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2027_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2258gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2259gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2260gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2261gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2262gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2263gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2264gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2266gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2267gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2268gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2269gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2270gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2271gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2272_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2273gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2274gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2275gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2277gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2278gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2279gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2280gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2281gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2282gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2283gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2284gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2286gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2287gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2288gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2289gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2290gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2291gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2292gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2293gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2294gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2295gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2296gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2297gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2298gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2299gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2300gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2301gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2302_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2303gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2304gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2305_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2306gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2308gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2309gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2310gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2312gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2313_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2314gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2316gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2317gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2318gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2319gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2320gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2321gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2322gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2323gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2324gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2325gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2326gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2327gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2328gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2329gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2330gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2331gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2332gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2333gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2334gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2335gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2336gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2337gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2338gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2339gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2340gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2341gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2342gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2343gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2344gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2345gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2346gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2347gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2348gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2349gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2350gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2351gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2352gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2353gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2354gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2355gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2356gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2357gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2358gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2359gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2361gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2362gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2364gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2366gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2368gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2369gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2370gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2371gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2372gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2373gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2374gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2375gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2376gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2378gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2379gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2381gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2383gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2384_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2385gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2386_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2387gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2388gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld2389gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3663gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3664gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3665gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3666_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3667gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3668gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3669gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3670gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3671gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3673gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3674gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3675gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3676gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3678gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3679gp_slot2_Slot_gp_slot2_set,
+ Field_dsp340050b49a6c_fld3680gp_slot2_Slot_gp_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_gp_slot1_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_gp_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2046_Slot_gp_slot1_get,
+ 0,
+ Field_dsp340050b49a6c_fld2048_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2049_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2050_Slot_gp_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s4_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2026_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2031_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2394gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2395gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2397gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2398gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2399gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2400gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2402gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2403gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld2405gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld3681gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld3683gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld3684gp_slot1_Slot_gp_slot1_get,
+ Field_dsp340050b49a6c_fld3686gp_slot1_Slot_gp_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_gp_slot1_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_gp_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2046_Slot_gp_slot1_set,
+ 0,
+ Field_dsp340050b49a6c_fld2048_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2049_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2050_Slot_gp_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s4_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2026_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2031_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2394gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2395gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2397gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2398gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2399gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2400gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2402gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2403gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld2405gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld3681gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld3683gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld3684gp_slot1_Slot_gp_slot1_set,
+ Field_dsp340050b49a6c_fld3686gp_slot1_Slot_gp_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_gp_slot0_get_field_fns[] = {
+ Field_t_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_gp_slot0_get,
+ Field_r_Slot_gp_slot0_get,
+ 0,
+ 0,
+ Field_sae_Slot_gp_slot0_get,
+ Field_sal_Slot_gp_slot0_get,
+ Field_sargt_Slot_gp_slot0_get,
+ 0,
+ Field_sas_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_gp_slot0_get,
+ Field_imm7_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_gp_slot0_get,
+ 0,
+ 0,
+ Field_s8_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2030_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2032_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2035_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2036_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2037_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2038_Slot_gp_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2040_Slot_gp_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2042_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2043_Slot_gp_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2048_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2049_Slot_gp_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2051_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2052_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2053_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2054_Slot_gp_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s5_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2058_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2067_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2407gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2409gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2410gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2411gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2412gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2413gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2415gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2416gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2417gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2418gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2419gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2420gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2422gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2423gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2424gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2425gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2426gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2427gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2429gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2430gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2431gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2432gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2433gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2434gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2435gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2436gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2437gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2438gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2439gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2440gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2441gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2443gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2444gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2445_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2447gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2448_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2449gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2451gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2452gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2453gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2454gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2455gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2456gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2457gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2458gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2459gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2460gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2461gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2462_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2463gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2464gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2465gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2466gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2467gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2468gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2470gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2471gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2472gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2473gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2474gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2475gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2477gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2479gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2480gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2481gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2482gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2483gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2484gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2485gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2486gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2487gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2488gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2489gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2490gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2491gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2492gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2493gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2494gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2495gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2496gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2497gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2498gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2499gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2500gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2501gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2502gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2503gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2504gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2505gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2506gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2507gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2508gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2509gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2510gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2512gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2514gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2515gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2516gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2517gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2518gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2519gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2520gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2521gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2523gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2524gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2526gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2527gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2528gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2529gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2530_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld2531gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3688gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3689gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3690gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3691gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3692gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3693gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3696gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3697gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3698gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3699gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3700gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3702gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3703gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3705gp_slot0_Slot_gp_slot0_get,
+ Field_dsp340050b49a6c_fld3706gp_slot0_Slot_gp_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_gp_slot0_set_field_fns[] = {
+ Field_t_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_gp_slot0_set,
+ Field_r_Slot_gp_slot0_set,
+ 0,
+ 0,
+ Field_sae_Slot_gp_slot0_set,
+ Field_sal_Slot_gp_slot0_set,
+ Field_sargt_Slot_gp_slot0_set,
+ 0,
+ Field_sas_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_gp_slot0_set,
+ Field_imm7_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_gp_slot0_set,
+ 0,
+ 0,
+ Field_s8_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2030_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2032_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2035_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2036_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2037_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2038_Slot_gp_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2040_Slot_gp_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2042_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2043_Slot_gp_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2048_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2049_Slot_gp_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2051_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2052_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2053_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2054_Slot_gp_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s5_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2058_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2067_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2407gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2409gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2410gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2411gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2412gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2413gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2415gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2416gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2417gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2418gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2419gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2420gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2422gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2423gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2424gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2425gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2426gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2427gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2429gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2430gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2431gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2432gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2433gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2434gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2435gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2436gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2437gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2438gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2439gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2440gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2441gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2443gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2444gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2445_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2447gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2448_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2449gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2451gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2452gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2453gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2454gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2455gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2456gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2457gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2458gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2459gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2460gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2461gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2462_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2463gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2464gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2465gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2466gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2467gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2468gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2470gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2471gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2472gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2473gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2474gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2475gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2477gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2479gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2480gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2481gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2482gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2483gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2484gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2485gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2486gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2487gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2488gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2489gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2490gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2491gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2492gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2493gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2494gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2495gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2496gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2497gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2498gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2499gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2500gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2501gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2502gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2503gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2504gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2505gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2506gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2507gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2508gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2509gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2510gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2512gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2514gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2515gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2516gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2517gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2518gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2519gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2520gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2521gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2523gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2524gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2526gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2527gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2528gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2529gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2530_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld2531gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3688gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3689gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3690gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3691gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3692gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3693gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3695gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3696gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3697gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3698gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3699gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3700gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3702gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3703gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3705gp_slot0_Slot_gp_slot0_set,
+ Field_dsp340050b49a6c_fld3706gp_slot0_Slot_gp_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_dot_slot2_get_field_fns[] = {
+ Field_t_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2030_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2032_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_dot_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_dot_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2045_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2050_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2051_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2052_Slot_dot_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2056_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2027_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s6_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2532dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2533dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2534dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2535dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2536dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2537dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2538dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2539dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2540dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2541dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2542dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2543dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2544dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2545dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2546dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2547dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2548dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2549dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2550dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2551dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2552dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2553dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2554dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2555dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2556dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2557dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2558dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2559dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2560dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2561dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2562dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2563dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2564dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2565dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2566dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2567dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2568dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2569dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2571dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2572dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2573dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2574dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2575dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2576dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2577dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2578_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2579dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2580dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2581dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2582dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2583dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2584dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2585dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2586dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2587dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2588dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2589dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2590dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2591dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2592dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2595dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2596dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2598dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2599dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2601dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2602dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2604dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2605_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2606dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2608dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2609dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2610dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2611dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2612_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2613dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2614_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2615dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2616dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2617dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2618dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2619dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2620dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2621dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2622dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2623dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2624dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2625_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2626dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2628dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2630dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2632dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2633dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2635dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2636dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2637dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2640dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2641dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2642dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2643dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2644dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2645dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2646dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2647dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2648dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2649dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2650dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2651dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2652dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2654dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2655dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2656dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2657dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld2658dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3708dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3709dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3710dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3711dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3712_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3713dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3714dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3715_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3716dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3717dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3718_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3719dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3721dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3722_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3723dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3724dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3725dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3726dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3727dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3728dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3729dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3731dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3732dot_slot2_Slot_dot_slot2_get,
+ Field_dsp340050b49a6c_fld3733dot_slot2_Slot_dot_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_dot_slot2_set_field_fns[] = {
+ Field_t_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2030_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2032_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_dot_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_dot_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2045_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2050_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2051_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2052_Slot_dot_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2056_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2027_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s6_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2532dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2533dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2534dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2535dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2536dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2537dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2538dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2539dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2540dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2541dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2542dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2543dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2544dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2545dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2546dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2547dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2548dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2549dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2550dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2551dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2552dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2553dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2554dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2555dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2556dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2557dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2558dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2559dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2560dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2561dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2562dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2563dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2564dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2565dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2566dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2567dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2568dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2569dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2571dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2572dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2573dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2574dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2575dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2576dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2577dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2578_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2579dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2580dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2581dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2582dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2583dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2584dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2585dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2586dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2587dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2588dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2589dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2590dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2591dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2592dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2595dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2596dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2598dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2599dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2601dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2602dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2604dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2605_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2606dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2608dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2609dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2610dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2611dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2612_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2613dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2614_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2615dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2616dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2617dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2618dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2619dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2620dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2621dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2622dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2623dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2624dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2625_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2626dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2628dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2630dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2632dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2633dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2635dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2636dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2637dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2640dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2641dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2642dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2643dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2644dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2645dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2646dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2647dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2648dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2649dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2650dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2651dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2652dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2654dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2655dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2656dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2657dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld2658dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3708dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3709dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3710dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3711dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3712_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3713dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3714dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3715_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3716dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3717dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3718_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3719dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3721dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3722_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3723dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3724dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3725dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3726dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3727dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3728dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3729dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3731dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3732dot_slot2_Slot_dot_slot2_set,
+ Field_dsp340050b49a6c_fld3733dot_slot2_Slot_dot_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_dot_slot1_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2048_Slot_dot_slot1_get,
+ 0,
+ Field_dsp340050b49a6c_fld2050_Slot_dot_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_dot_slot1_get,
+ Field_dsp340050b49a6c_fld2031_Slot_dot_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s7_Slot_dot_slot1_get,
+ Field_dsp340050b49a6c_fld3734dot_slot1_Slot_dot_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_dot_slot1_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2048_Slot_dot_slot1_set,
+ 0,
+ Field_dsp340050b49a6c_fld2050_Slot_dot_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_dot_slot1_set,
+ Field_dsp340050b49a6c_fld2031_Slot_dot_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s7_Slot_dot_slot1_set,
+ Field_dsp340050b49a6c_fld3734dot_slot1_Slot_dot_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_dot_slot0_get_field_fns[] = {
+ Field_t_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ Field_sal_Slot_dot_slot0_get,
+ Field_sargt_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2032_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2035_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2036_Slot_dot_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2038_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2043_Slot_dot_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2048_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2049_Slot_dot_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2053_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2054_Slot_dot_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s8_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2068_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2666dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2667dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2668dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2669dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2671dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2672dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2673dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2674dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2675dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2676dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2677dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2678dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2679dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2680dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2681dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2682dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2683dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2684dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2685dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2686dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2688dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2689dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2690dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2692dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2693dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2695dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2697dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2699dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2700dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2701dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2702dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2703dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2704dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld2705dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3735dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3736_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3737dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3738dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3739dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3740dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3741dot_slot0_Slot_dot_slot0_get,
+ Field_dsp340050b49a6c_fld3742dot_slot0_Slot_dot_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_dot_slot0_set_field_fns[] = {
+ Field_t_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ Field_sal_Slot_dot_slot0_set,
+ Field_sargt_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dot_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2032_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2035_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2036_Slot_dot_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2038_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2043_Slot_dot_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2048_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2049_Slot_dot_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2053_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2054_Slot_dot_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s8_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2068_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2666dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2667dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2668dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2669dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2671dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2672dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2673dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2674dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2675dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2676dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2677dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2678dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2679dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2680dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2681dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2682dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2683dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2684dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2685dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2686dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2688dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2689dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2690dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2692dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2693dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2695dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2697dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2699dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2700dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2701dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2702dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2703dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2704dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld2705dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3735dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3736_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3737dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3738dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3739dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3740dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3741dot_slot0_Slot_dot_slot0_set,
+ Field_dsp340050b49a6c_fld3742dot_slot0_Slot_dot_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_pq_slot2_get_field_fns[] = {
+ Field_t_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_pq_slot2_get,
+ Field_r_Slot_pq_slot2_get,
+ 0,
+ 0,
+ Field_sae_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2030_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2032_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2045_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2047_Slot_pq_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2050_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2051_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2052_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2027_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s9_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2706pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2707pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2708pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2709pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2710pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2711pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2713pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2714pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2715pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2717pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2718pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2719pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2721pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2722pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2723pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2724pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2725pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2726pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2727pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2728pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2729pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2730pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2731pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2732pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2733pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2734pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2735pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2736pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2737_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2738pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2739pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2741pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2742pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2743pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2746pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2747pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2748pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2750pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2751pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2752pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2753pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2754pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2755pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2756pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2757pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2758pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2759pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2760pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2761pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2762pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2763pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2764pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2765pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2766pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2767pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2768pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2769pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2770pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2771pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2772pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2773pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2774pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2775pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2776pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2777pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2778pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2779pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2780pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2781pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2782pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2783pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2784pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2785pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2786pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2787pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2788pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2789pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2790pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2791pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2792pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2793pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2795pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2796pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2798pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2801pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2803pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2805pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2806pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2807pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2808pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2809pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2810pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2811pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2812pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2814pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2816pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2818pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2820pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2821pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld2823pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3744pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3745pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3746pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3747pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3748_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3749pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3750pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3751pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3752pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3753pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3754pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3756pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3757pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3758pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3759pq_slot2_Slot_pq_slot2_get,
+ Field_dsp340050b49a6c_fld3760pq_slot2_Slot_pq_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_pq_slot2_set_field_fns[] = {
+ Field_t_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_pq_slot2_set,
+ Field_r_Slot_pq_slot2_set,
+ 0,
+ 0,
+ Field_sae_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2030_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2032_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2045_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2047_Slot_pq_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2050_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2051_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2052_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2027_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s9_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2706pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2707pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2708pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2709pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2710pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2711pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2713pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2714pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2715pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2717pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2718pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2719pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2721pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2722pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2723pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2724pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2725pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2726pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2727pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2728pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2729pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2730pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2731pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2732pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2733pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2734pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2735pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2736pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2737_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2738pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2739pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2741pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2742pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2743pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2746pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2747pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2748pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2750pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2751pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2752pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2753pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2754pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2755pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2756pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2757pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2758pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2759pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2760pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2761pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2762pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2763pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2764pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2765pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2766pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2767pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2768pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2769pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2770pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2771pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2772pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2773pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2774pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2775pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2776pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2777pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2778pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2779pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2780pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2781pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2782pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2783pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2784pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2785pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2786pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2787pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2788pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2789pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2790pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2791pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2792pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2793pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2795pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2796pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2798pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2801pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2803pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2805pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2806pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2807pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2808pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2809pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2810pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2811pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2812pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2814pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2816pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2817_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2818pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2819_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2820pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2821pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld2823pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3744pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3745pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3746pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3747pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3748_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3749pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3750pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3751pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3752pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3753pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3754pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3756pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3757pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3758pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3759pq_slot2_Slot_pq_slot2_set,
+ Field_dsp340050b49a6c_fld3760pq_slot2_Slot_pq_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_pq_slot1_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_pq_slot1_get,
+ Field_dsp340050b49a6c_fld2050_Slot_pq_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_pq_slot1_get,
+ Field_dsp340050b49a6c_fld2031_Slot_pq_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s10_Slot_pq_slot1_get,
+ Field_dsp340050b49a6c_fld2825pq_slot1_Slot_pq_slot1_get,
+ Field_dsp340050b49a6c_fld2826pq_slot1_Slot_pq_slot1_get,
+ Field_dsp340050b49a6c_fld3761pq_slot1_Slot_pq_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_pq_slot1_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_pq_slot1_set,
+ Field_dsp340050b49a6c_fld2050_Slot_pq_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_pq_slot1_set,
+ Field_dsp340050b49a6c_fld2031_Slot_pq_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s10_Slot_pq_slot1_set,
+ Field_dsp340050b49a6c_fld2825pq_slot1_Slot_pq_slot1_set,
+ Field_dsp340050b49a6c_fld2826pq_slot1_Slot_pq_slot1_set,
+ Field_dsp340050b49a6c_fld3761pq_slot1_Slot_pq_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_pq_slot0_get_field_fns[] = {
+ Field_t_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_pq_slot0_get,
+ Field_r_Slot_pq_slot0_get,
+ 0,
+ 0,
+ Field_sae_Slot_pq_slot0_get,
+ Field_sal_Slot_pq_slot0_get,
+ Field_sargt_Slot_pq_slot0_get,
+ 0,
+ Field_sas_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_pq_slot0_get,
+ Field_imm7_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_pq_slot0_get,
+ 0,
+ 0,
+ Field_s8_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2030_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2032_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2035_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2036_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2037_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2038_Slot_pq_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2040_Slot_pq_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2042_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2043_Slot_pq_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2048_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2049_Slot_pq_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2051_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2052_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2053_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2054_Slot_pq_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s11_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2059_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2069_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2827pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2829pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2830pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2831pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2832pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2833pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2835pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2836pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2837pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2838pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2839pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2840pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2842pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2843pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2844pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2845pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2846pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2847pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2849pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2850pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2851pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2852pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2853pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2854pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2855pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2856pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2857pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2858pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2859pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2860pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2861pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2863pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2864pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2865_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2867pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2869pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2871pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2872pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2873pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2874pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2875pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2876pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2877pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2878pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2879pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2880pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2881pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2882_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2883pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2884pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2885pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2886pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2887pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2888pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2890pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2891pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2892pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2893pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2894pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2895pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2897pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2899pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2900pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2901pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2902pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2903pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2904pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2905pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2906pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2907pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2908pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2909pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2910pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2911pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2912pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2913pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2914pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2915pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2916pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2917pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2918pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2919pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2920pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2921pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2922pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2923pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2924pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2925pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2926pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2927pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2928pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2929pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2930pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2932pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2934pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2935pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2936pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2937pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2939pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2941pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2942pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2943pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2945pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2946pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2947pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2948pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2949pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld2950pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3763pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3764pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3765pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3766pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3767pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3768pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3769pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3770pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3771pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3772_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3773pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3775pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3776pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3777pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3778pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3779pq_slot0_Slot_pq_slot0_get,
+ Field_dsp340050b49a6c_fld3780pq_slot0_Slot_pq_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_pq_slot0_set_field_fns[] = {
+ Field_t_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_pq_slot0_set,
+ Field_r_Slot_pq_slot0_set,
+ 0,
+ 0,
+ Field_sae_Slot_pq_slot0_set,
+ Field_sal_Slot_pq_slot0_set,
+ Field_sargt_Slot_pq_slot0_set,
+ 0,
+ Field_sas_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_pq_slot0_set,
+ Field_imm7_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_pq_slot0_set,
+ 0,
+ 0,
+ Field_s8_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2030_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2032_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2035_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2036_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2037_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2038_Slot_pq_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2040_Slot_pq_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2042_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2043_Slot_pq_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2048_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2049_Slot_pq_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2051_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2052_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2053_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2054_Slot_pq_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s11_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2059_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2069_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2827pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2829pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2830pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2831pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2832pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2833pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2835pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2836pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2837pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2838pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2839pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2840pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2842pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2843pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2844pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2845pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2846pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2847pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2849pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2850pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2851pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2852pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2853pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2854pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2855pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2856pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2857pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2858pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2859pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2860pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2861pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2863pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2864pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2865_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2867pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2869pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2871pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2872pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2873pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2874pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2875pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2876pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2877pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2878pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2879pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2880pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2881pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2882_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2883pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2884pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2885pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2886pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2887pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2888pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2890pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2891pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2892pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2893pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2894pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2895pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2897pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2899pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2900pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2901pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2902pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2903pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2904pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2905pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2906pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2907pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2908pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2909pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2910pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2911pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2912pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2913pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2914pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2915pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2916pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2917pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2918pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2919pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2920pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2921pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2922pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2923pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2924pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2925pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2926pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2927pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2928pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2929pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2930pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2932pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2934pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2935pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2936pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2937pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2939pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2940_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2941pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2942pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2943pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2945pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2946pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2947pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2948pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2949pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld2950pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3763pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3764pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3765pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3766pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3767pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3768pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3769pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3770pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3771pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3772_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3773pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3775pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3776pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3777pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3778pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3779pq_slot0_Slot_pq_slot0_set,
+ Field_dsp340050b49a6c_fld3780pq_slot0_Slot_pq_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_acc2_slot2_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2027_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s12_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2953acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2954acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2955acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2956acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2957acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2958acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2959acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2960acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2963acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2964acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2966acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld2967acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld3782acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld3783acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld3784acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld3785acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld3786acc2_slot2_Slot_acc2_slot2_get,
+ Field_dsp340050b49a6c_fld3788acc2_slot2_Slot_acc2_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_acc2_slot2_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2027_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s12_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2953acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2954acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2955acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2956acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2957acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2958acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2959acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2960acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2963acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2964acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2966acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld2967acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld3782acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld3783acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld3784acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld3785acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld3786acc2_slot2_Slot_acc2_slot2_set,
+ Field_dsp340050b49a6c_fld3788acc2_slot2_Slot_acc2_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_acc2_slot1_get_field_fns[] = {
+ Field_t_Slot_acc2_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_acc2_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2047_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld2048_Slot_acc2_slot1_get,
+ 0,
+ Field_dsp340050b49a6c_fld2050_Slot_acc2_slot1_get,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_acc2_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld2031_Slot_acc2_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s13_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld2028_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld2075_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld2968acc2_slot1_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld2969acc2_slot1_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld3790acc2_slot1_Slot_acc2_slot1_get,
+ Field_dsp340050b49a6c_fld3793acc2_slot1_Slot_acc2_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_acc2_slot1_set_field_fns[] = {
+ Field_t_Slot_acc2_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_acc2_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2047_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld2048_Slot_acc2_slot1_set,
+ 0,
+ Field_dsp340050b49a6c_fld2050_Slot_acc2_slot1_set,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_acc2_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld2031_Slot_acc2_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s13_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld2028_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld2075_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld2968acc2_slot1_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld2969acc2_slot1_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld3790acc2_slot1_Slot_acc2_slot1_set,
+ Field_dsp340050b49a6c_fld3793acc2_slot1_Slot_acc2_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_acc2_slot0_get_field_fns[] = {
+ Field_t_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_acc2_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2050_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2056_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s14_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2973acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2974acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2975acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2976acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2977acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2980acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2981acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2982acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2984acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2985acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2987acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2989acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld2990acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3795acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3796acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3797_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3798acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3799acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3800acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3801acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3802acc2_slot0_Slot_acc2_slot0_get,
+ Field_dsp340050b49a6c_fld3803acc2_slot0_Slot_acc2_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_acc2_slot0_set_field_fns[] = {
+ Field_t_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_acc2_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2050_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2056_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s14_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2973acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2974acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2975acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2976acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2977acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2980acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2981acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2982acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2984acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2985acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2987acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2989acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld2990acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3795acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3796acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3797_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3798acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3799acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3800acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3801acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3802acc2_slot0_Slot_acc2_slot0_set,
+ Field_dsp340050b49a6c_fld3803acc2_slot0_Slot_acc2_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_smod_slot2_get_field_fns[] = {
+ Field_t_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2030_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2032_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_smod_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_smod_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2045_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2047_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2048_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2049_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2050_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2051_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2052_Slot_smod_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2027_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s15_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2991smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2992smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2993smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2994smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2995smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2996smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2997smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2998smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld2999smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3000smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3001smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3002smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3003smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3004smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3005smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3006smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3007smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3008smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3009smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3010smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3011smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3012smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3013smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3014smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3015smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3016smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3017smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3018smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3019smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3020smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3021smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3022smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3023smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3024smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3025smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3026smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3027smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3028smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3030smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3031smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3032smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3033smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3034smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3035smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3036smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3038smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3039smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3040smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3043smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3044smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3046smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3047smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3048_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3049smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3050smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3051smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3052smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3053smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3054smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3055smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3056smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3058smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3059smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3061smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3062_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3063smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3065smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3066smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3067smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3068smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3069smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3070_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3071smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3072smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3073smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3074smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3075smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3076smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3077smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3078smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3079smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3080smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3081smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3082smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3084smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3085smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3087smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3088smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3090smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3091smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3092smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3093smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3096smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3097smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3098smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3099smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3100smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3101smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3102smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3104smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3105smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3106smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3107smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3108smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3109smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3110smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3111smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3113smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3114smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3115smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3116smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3805smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3806smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3807smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3808_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3809smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3810smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3812smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3813smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3814smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3816smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3817_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3818smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3819smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3821smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3822smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3823smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3824smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3825smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3826smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3827smod_slot2_Slot_smod_slot2_get,
+ Field_dsp340050b49a6c_fld3828smod_slot2_Slot_smod_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_smod_slot2_set_field_fns[] = {
+ Field_t_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2030_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2032_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2039_Slot_smod_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_smod_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2045_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2047_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2048_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2049_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2050_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2051_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2052_Slot_smod_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2027_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s15_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2991smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2992smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2993smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2994smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2995smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2996smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2997smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2998smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld2999smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3000smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3001smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3002smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3003smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3004smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3005smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3006smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3007smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3008smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3009smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3010smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3011smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3012smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3013smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3014smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3015smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3016smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3017smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3018smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3019smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3020smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3021smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3022smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3023smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3024smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3025smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3026smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3027smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3028smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3030smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3031smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3032smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3033smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3034smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3035smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3036smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3038smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3039smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3040smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3043smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3044smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3046smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3047smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3048_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3049smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3050smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3051smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3052smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3053smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3054smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3055smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3056smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3058smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3059smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3061smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3062_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3063smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3065smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3066smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3067smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3068smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3069smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3070_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3071smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3072smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3073smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3074smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3075smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3076smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3077smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3078smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3079smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3080smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3081smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3082smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3084smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3085smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3087smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3088smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3090smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3091smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3092smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3093smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3096smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3097smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3098smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3099smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3100smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3101smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3102smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3104smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3105smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3106smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3107smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3108smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3109smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3110smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3111smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3113smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3114smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3115smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3116smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3805smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3806smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3807smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3808_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3809smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3810smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3812smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3813smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3814smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3816smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3817_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3818smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3819smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3821smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3822smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3823smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3824smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3825smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3826smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3827smod_slot2_Slot_smod_slot2_set,
+ Field_dsp340050b49a6c_fld3828smod_slot2_Slot_smod_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_smod_slot1_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_smod_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2046_Slot_smod_slot1_get,
+ 0,
+ Field_dsp340050b49a6c_fld2048_Slot_smod_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2054_Slot_smod_slot1_get,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_smod_slot1_get,
+ Field_dsp340050b49a6c_fld2031_Slot_smod_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s16_Slot_smod_slot1_get,
+ Field_dsp340050b49a6c_fld2033_Slot_smod_slot1_get,
+ Field_dsp340050b49a6c_fld2080_Slot_smod_slot1_get,
+ Field_dsp340050b49a6c_fld3117smod_slot1_Slot_smod_slot1_get,
+ Field_dsp340050b49a6c_fld3118smod_slot1_Slot_smod_slot1_get,
+ Field_dsp340050b49a6c_fld3829smod_slot1_Slot_smod_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_smod_slot1_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_smod_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2046_Slot_smod_slot1_set,
+ 0,
+ Field_dsp340050b49a6c_fld2048_Slot_smod_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2054_Slot_smod_slot1_set,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_smod_slot1_set,
+ Field_dsp340050b49a6c_fld2031_Slot_smod_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s16_Slot_smod_slot1_set,
+ Field_dsp340050b49a6c_fld2033_Slot_smod_slot1_set,
+ Field_dsp340050b49a6c_fld2080_Slot_smod_slot1_set,
+ Field_dsp340050b49a6c_fld3117smod_slot1_Slot_smod_slot1_set,
+ Field_dsp340050b49a6c_fld3118smod_slot1_Slot_smod_slot1_set,
+ Field_dsp340050b49a6c_fld3829smod_slot1_Slot_smod_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_smod_slot0_get_field_fns[] = {
+ Field_t_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_smod_slot0_get,
+ Field_r_Slot_smod_slot0_get,
+ 0,
+ 0,
+ Field_sae_Slot_smod_slot0_get,
+ Field_sal_Slot_smod_slot0_get,
+ Field_sargt_Slot_smod_slot0_get,
+ 0,
+ Field_sas_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_smod_slot0_get,
+ Field_imm7_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_smod_slot0_get,
+ 0,
+ 0,
+ Field_s8_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_smod_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2032_Slot_smod_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2037_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld2038_Slot_smod_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2040_Slot_smod_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2043_Slot_smod_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_smod_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_smod_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld2053_Slot_smod_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s17_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3119smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3120smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3121smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3122smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3123smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3125smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3126smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3127smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3128smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3129smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3130smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3131smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3132smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3133smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3134smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3135smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3136smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3137smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3138smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3139smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3140smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3141smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3142smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3143smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3144smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3145smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3146smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3148smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3149smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3150_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3152smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3153smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3155smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3156smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3157smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3158smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3159smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3160smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3161smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3164smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3165smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3166smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3168smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3170smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3171smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3172smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3173smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3174smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3175smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3176smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3177smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3178smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3179smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3180smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3181smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3182smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3184smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3186smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3188smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3832smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3833smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3836smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3837smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3838smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3841smod_slot0_Slot_smod_slot0_get,
+ Field_dsp340050b49a6c_fld3842smod_slot0_Slot_smod_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_smod_slot0_set_field_fns[] = {
+ Field_t_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_smod_slot0_set,
+ Field_r_Slot_smod_slot0_set,
+ 0,
+ 0,
+ Field_sae_Slot_smod_slot0_set,
+ Field_sal_Slot_smod_slot0_set,
+ Field_sargt_Slot_smod_slot0_set,
+ 0,
+ Field_sas_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_smod_slot0_set,
+ Field_imm7_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_smod_slot0_set,
+ 0,
+ 0,
+ Field_s8_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_smod_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2032_Slot_smod_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2037_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld2038_Slot_smod_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2040_Slot_smod_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2043_Slot_smod_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_smod_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_smod_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld2053_Slot_smod_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s17_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3119smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3120smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3121smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3122smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3123smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3125smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3126smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3127smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3128smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3129smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3130smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3131smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3132smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3133smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3134smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3135smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3136smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3137smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3138smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3139smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3140smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3141smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3142smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3143smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3144smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3145smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3146smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3148smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3149smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3150_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3152smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3153smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3155smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3156smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3157smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3158smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3159smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3160smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3161smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3164smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3165smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3166smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3168smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3170smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3171smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3172smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3173smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3174smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3175smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3176smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3177smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3178smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3179smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3180smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3181smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3182smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3184smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3186smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3188smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3832smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3833smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3834_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3836smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3837smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3838smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3841smod_slot0_Slot_smod_slot0_set,
+ Field_dsp340050b49a6c_fld3842smod_slot0_Slot_smod_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_llr_slot2_get_field_fns[] = {
+ Field_t_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2030_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2032_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2045_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2050_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2051_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2052_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2027_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s18_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld2074_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3191llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3192llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3193llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3194llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3195llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3196llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3197llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3198llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3199llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3200llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3201llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3202llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3203llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3204llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3205_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3206_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3207llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3208llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3210llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3212_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3213llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3214_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3215llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3216llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3217_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3218llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3220llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3221llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3222llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3224llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3225_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3226llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3228llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3230_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3231llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3232llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3233_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3234llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3235llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3236_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3237llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3238llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3240llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3241llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3242llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3243llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3244llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3245llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3246_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3247llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3843llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3844_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3845llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3847llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3848llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3849llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3850llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3851llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3853llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3855llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3856llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3857llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3859llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3860llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3861llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3862_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3863llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3864llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3865llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3866llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3867llr_slot2_Slot_llr_slot2_get,
+ Field_dsp340050b49a6c_fld3868_Slot_llr_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_llr_slot2_set_field_fns[] = {
+ Field_t_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2030_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2032_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2045_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2050_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2051_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2052_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2027_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s18_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld2074_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3191llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3192llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3193llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3194llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3195llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3196llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3197llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3198llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3199llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3200llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3201llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3202llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3203llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3204llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3205_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3206_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3207llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3208llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3210llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3212_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3213llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3214_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3215llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3216llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3217_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3218llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3220llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3221llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3222llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3224llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3225_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3226llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3228llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3230_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3231llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3232llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3233_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3234llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3235llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3236_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3237llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3238llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3240llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3241llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3242llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3243llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3244llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3245llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3246_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3247llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3843llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3844_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3845llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3847llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3848llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3849llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3850llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3851llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3853llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3855llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3856llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3857llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3859llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3860llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3861llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3862_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3863llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3864llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3865llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3866llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3867llr_slot2_Slot_llr_slot2_set,
+ Field_dsp340050b49a6c_fld3868_Slot_llr_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_llr_slot1_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld2048_Slot_llr_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld2031_Slot_llr_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2028_Slot_llr_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2033_Slot_llr_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s19_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld2034_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3248llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3250llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3251llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3252llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3253llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3254llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3869llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3870_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3872llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3875llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3876llr_slot1_Slot_llr_slot1_get,
+ Field_dsp340050b49a6c_fld3878llr_slot1_Slot_llr_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_llr_slot1_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld2048_Slot_llr_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2055_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2026_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld2031_Slot_llr_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2028_Slot_llr_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2033_Slot_llr_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s19_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld2034_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3248llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3250llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3251llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3252llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3253llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3254llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3869llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3870_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3872llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3875llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3876llr_slot1_Slot_llr_slot1_set,
+ Field_dsp340050b49a6c_fld3878llr_slot1_Slot_llr_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_llr_slot0_get_field_fns[] = {
+ Field_t_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ Field_sal_Slot_llr_slot0_get,
+ Field_sargt_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_llr_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2032_Slot_llr_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2037_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2038_Slot_llr_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_llr_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2043_Slot_llr_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2050_Slot_llr_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2053_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2054_Slot_llr_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s20_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld2071_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3258llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3259llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3260llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3261llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3263llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3264llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3265llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3266llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3267llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3268llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3269llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3270llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3272llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3274llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3275llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3276llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3277llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3278llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3279llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3280llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3281llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3282llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3283llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3284llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3286llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3288llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3289llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3291llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3292llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3293llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3294llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3295llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3296llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3297llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3298llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3299llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3300llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3302llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3303llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3304llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3305llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3306llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3308llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3310llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3311llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3312llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3879llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3881llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3883llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3885llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3887llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3888llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3890llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3892llr_slot0_Slot_llr_slot0_get,
+ Field_dsp340050b49a6c_fld3893llr_slot0_Slot_llr_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_llr_slot0_set_field_fns[] = {
+ Field_t_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ Field_sal_Slot_llr_slot0_set,
+ Field_sargt_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm7_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_llr_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2032_Slot_llr_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2037_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2038_Slot_llr_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2041_Slot_llr_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2043_Slot_llr_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_llr_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2050_Slot_llr_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2052_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2053_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2054_Slot_llr_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s20_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld2071_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3258llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3259llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3260llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3261llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3263llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3264llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3265llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3266llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3267llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3268llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3269llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3270llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3272llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3274llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3275llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3276llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3277llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3278llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3279llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3280llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3281llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3282llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3283llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3284llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3286llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3288llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3289llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3291llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3292llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3293llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3294llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3295llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3296llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3297llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3298llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3299llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3300llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3302llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3303llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3304llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3305llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3306llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3308llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3310llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3311llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3312llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3879llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3881llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3883llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3885llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3887llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3888llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3890llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3892llr_slot0_Slot_llr_slot0_set,
+ Field_dsp340050b49a6c_fld3893llr_slot0_Slot_llr_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_dual_slot2_get_field_fns[] = {
+ Field_t_Slot_dual_slot2_get,
+ 0,
+ Field_bbi_Slot_dual_slot2_get,
+ Field_imm12_Slot_dual_slot2_get,
+ Field_imm8_Slot_dual_slot2_get,
+ Field_s_Slot_dual_slot2_get,
+ Field_imm12b_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_dual_slot2_get,
+ Field_r_Slot_dual_slot2_get,
+ 0,
+ 0,
+ Field_sae_Slot_dual_slot2_get,
+ Field_sal_Slot_dual_slot2_get,
+ Field_sargt_Slot_dual_slot2_get,
+ 0,
+ Field_sas_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_dual_slot2_get,
+ Field_imm7_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_dual_slot2_get,
+ 0,
+ 0,
+ Field_s8_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2030_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2032_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2045_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2047_Slot_dual_slot2_get,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2050_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2051_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2052_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2053_Slot_dual_slot2_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld2027_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s21_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3313dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3314_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3315dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3316dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3317dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3318_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3319dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3320dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3321dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3322dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3323dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3324dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3325dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3326dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3327dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3328dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3329dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3330dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3331dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3332dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3333dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3334dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3335dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3336dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3337dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3339dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3340dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3341dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3342dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3345dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3347dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3348dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3349dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3350dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3353dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3354dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3356dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3358dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3360dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3361dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3362dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3363dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3364_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3365dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3366dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3367dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3368dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3369dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3370dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3371dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3372dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3373dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3374dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3375dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3376dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3377dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3378dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3379dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3380dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3381dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3382dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3384dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3385dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3386dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3387dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3388dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3390dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3392dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3394dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3396dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3397dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3399dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3401dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3403dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3404dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3406dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3407_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3408dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3410_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3411dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3412dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3413dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3414dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3415dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3416dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3417dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3418dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3419dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3420dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3421dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3422dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3423dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3424dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3425dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3426dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3427dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3428dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3429dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3430dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3431dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3432dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3433dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3434dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3435dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3436dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3437dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3438dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3439dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3440dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3441dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3442dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3443dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3444dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3445dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3446dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3448dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3450dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3451dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3453dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3454dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3456dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3457dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3458dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3459dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3460dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3461dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3462dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3464dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3465dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3466_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3467dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3468dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3469dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3470dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3471dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3472dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3473dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3474dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3475dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3477dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3478dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3479dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3480dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3481dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3482dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3484dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3894dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3895dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3896dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3897dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3898dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3899dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3900_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3901dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3903dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3904dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3905dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3906dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3907dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3908dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3909dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3910dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3913dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3914dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3916dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3917dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3918dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3919dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3920dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3921dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3922dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3923dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3924dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3925dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3927dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3928dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3929dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3930dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3931dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3933dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3934dual_slot2_Slot_dual_slot2_get,
+ Field_dsp340050b49a6c_fld3935dual_slot2_Slot_dual_slot2_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_dual_slot2_set_field_fns[] = {
+ Field_t_Slot_dual_slot2_set,
+ 0,
+ Field_bbi_Slot_dual_slot2_set,
+ Field_imm12_Slot_dual_slot2_set,
+ Field_imm8_Slot_dual_slot2_set,
+ Field_s_Slot_dual_slot2_set,
+ Field_imm12b_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_dual_slot2_set,
+ Field_r_Slot_dual_slot2_set,
+ 0,
+ 0,
+ Field_sae_Slot_dual_slot2_set,
+ Field_sal_Slot_dual_slot2_set,
+ Field_sargt_Slot_dual_slot2_set,
+ 0,
+ Field_sas_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_dual_slot2_set,
+ Field_imm7_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_dual_slot2_set,
+ 0,
+ 0,
+ Field_s8_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2030_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2032_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2044_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2045_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2047_Slot_dual_slot2_set,
+ 0,
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2050_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2051_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2052_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2053_Slot_dual_slot2_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2056_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2025_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld2027_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s21_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3313dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3314_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3315dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3316dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3317dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3318_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3319dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3320dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3321dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3322dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3323dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3324dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3325dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3326dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3327dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3328dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3329dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3330dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3331dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3332dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3333dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3334dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3335dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3336dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3337dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3339dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3340dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3341dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3342dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3345dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3347dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3348dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3349dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3350dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3353dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3354dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3356dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3358dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3360dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3361dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3362dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3363dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3364_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3365dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3366dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3367dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3368dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3369dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3370dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3371dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3372dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3373dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3374dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3375dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3376dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3377dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3378dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3379dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3380dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3381dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3382dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3384dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3385dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3386dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3387dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3388dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3390dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3392dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3394dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3396dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3397dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3399dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3401dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3403dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3404dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3406dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3407_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3408dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3410_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3411dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3412dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3413dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3414dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3415dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3416dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3417dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3418dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3419dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3420dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3421dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3422dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3423dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3424dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3425dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3426dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3427dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3428dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3429dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3430dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3431dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3432dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3433dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3434dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3435dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3436dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3437dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3438dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3439dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3440dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3441dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3442dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3443dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3444dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3445dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3446dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3448dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3450dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3451dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3453dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3454dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3456dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3457dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3458dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3459dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3460dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3461dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3462dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3464dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3465dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3466_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3467dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3468dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3469dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3470dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3471dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3472dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3473dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3474dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3475dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3477dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3478dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3479dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3480dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3481dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3482dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3484dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3894dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3895dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3896dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3897dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3898dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3899dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3900_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3901dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3903dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3904dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3905dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3906dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3907dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3908dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3909dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3910dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3913dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3914dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3916dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3917dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3918dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3919dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3920dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3921dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3922dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3923dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3924dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3925dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3927dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3928dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3929dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3930dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3931dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3933dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3934dual_slot2_Slot_dual_slot2_set,
+ Field_dsp340050b49a6c_fld3935dual_slot2_Slot_dual_slot2_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_dual_slot1_get_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s22_Slot_dual_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_dual_slot1_set_field_fns[] = {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s22_Slot_dual_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_dual_slot0_get_field_fns[] = {
+ Field_t_Slot_dual_slot0_get,
+ 0,
+ Field_bbi_Slot_dual_slot0_get,
+ Field_imm12_Slot_dual_slot0_get,
+ Field_imm8_Slot_dual_slot0_get,
+ Field_s_Slot_dual_slot0_get,
+ Field_imm12b_Slot_dual_slot0_get,
+ Field_imm16_Slot_dual_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_dual_slot0_get,
+ Field_r_Slot_dual_slot0_get,
+ 0,
+ 0,
+ Field_sae_Slot_dual_slot0_get,
+ Field_sal_Slot_dual_slot0_get,
+ Field_sargt_Slot_dual_slot0_get,
+ 0,
+ Field_sas_Slot_dual_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_dual_slot0_get,
+ Field_imm7_Slot_dual_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_dual_slot0_get,
+ 0,
+ 0,
+ Field_s8_Slot_dual_slot0_get,
+ 0,
+ 0,
+ 0,
+ Field_fimm8_Slot_dual_slot0_get,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2030_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2032_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2035_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2036_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2037_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2038_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2039_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2040_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2041_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2042_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2043_Slot_dual_slot0_get,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2047_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2048_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2050_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2051_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2052_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2053_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2054_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2055_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2056_Slot_dual_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s23_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2057_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2060_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2066_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2072_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld2079_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3487dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3488dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3489dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3490dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3491dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3492dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3493dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3494dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3496dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3497dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3498dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3499dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3500dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3502dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3504dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3505dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3506dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3507dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3508dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3509dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3510dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3511dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3512dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3513dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3514dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3515dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3516dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3517dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3518dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3519dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3520dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3522dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3523dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3524dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3527dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3529dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3530dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3531_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3532dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3533dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3535dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3536dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3537dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3538dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3539dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3541dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3542dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3543dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3544dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3545dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3546dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3547dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3548dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3549dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3550dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3551dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3552_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3553dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3554dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3555dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3556dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3557dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3558dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3559dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3560dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3562dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3563dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3564dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3565dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3566dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3567dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3568dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3569dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3570dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3571dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3572dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3573dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3574dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3575dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3576dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3577dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3578dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3579dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3580dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3581dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3582dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3583dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3584_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3585dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3587dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3588dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3589dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3590dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3591dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3592dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3593dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3594dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3595dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3596dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3597dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3598dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3599dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3600dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3601dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3603dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3604dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3606dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3607dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3608dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3609dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3610_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3611dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3612dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3613dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3614dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3615dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3616dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3618dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3619_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3621dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3622dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3623dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3624dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3625dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3626dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3936dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3937dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3938dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3939dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3940dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3941dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3943dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3945dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3946dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3947dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3949dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3950dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3951dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3952dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3954dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3957dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3958dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3959dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3960dual_slot0_Slot_dual_slot0_get,
+ Field_dsp340050b49a6c_fld3961dual_slot0_Slot_dual_slot0_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_dual_slot0_set_field_fns[] = {
+ Field_t_Slot_dual_slot0_set,
+ 0,
+ Field_bbi_Slot_dual_slot0_set,
+ Field_imm12_Slot_dual_slot0_set,
+ Field_imm8_Slot_dual_slot0_set,
+ Field_s_Slot_dual_slot0_set,
+ Field_imm12b_Slot_dual_slot0_set,
+ Field_imm16_Slot_dual_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op2_Slot_dual_slot0_set,
+ Field_r_Slot_dual_slot0_set,
+ 0,
+ 0,
+ Field_sae_Slot_dual_slot0_set,
+ Field_sal_Slot_dual_slot0_set,
+ Field_sargt_Slot_dual_slot0_set,
+ 0,
+ Field_sas_Slot_dual_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_imm6_Slot_dual_slot0_set,
+ Field_imm7_Slot_dual_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_dual_slot0_set,
+ 0,
+ 0,
+ Field_s8_Slot_dual_slot0_set,
+ 0,
+ 0,
+ 0,
+ Field_fimm8_Slot_dual_slot0_set,
+ 0,
+ 0,
+ Field_dsp340050b49a6c_fld2029_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2030_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2032_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2035_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2036_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2037_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2038_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2039_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2040_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2041_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2042_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2043_Slot_dual_slot0_set,
+ 0,
+ Field_dsp340050b49a6c_fld2045_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2046_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2047_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2048_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2049_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2050_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2051_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2052_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2053_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2054_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2055_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2056_Slot_dual_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s23_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2057_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2060_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2066_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2072_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld2079_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3487dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3488dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3489dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3490dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3491dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3492dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3493dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3494dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3496dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3497dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3498dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3499dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3500dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3502dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3504dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3505dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3506dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3507dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3508dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3509dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3510dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3511dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3512dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3513dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3514dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3515dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3516dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3517dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3518dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3519dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3520dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3522dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3523dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3524dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3527dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3529dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3530dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3531_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3532dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3533dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3535dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3536dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3537dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3538dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3539dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3541dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3542dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3543dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3544dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3545dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3546dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3547dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3548dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3549dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3550dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3551dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3552_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3553dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3554dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3555dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3556dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3557dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3558dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3559dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3560dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3562dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3563dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3564dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3565dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3566dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3567dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3568dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3569dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3570dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3571dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3572dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3573dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3574dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3575dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3576dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3577dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3578dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3579dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3580dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3581dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3582dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3583dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3584_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3585dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3587dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3588dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3589dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3590dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3591dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3592dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3593dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3594dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3595dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3596dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3597dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3598dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3599dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3600dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3601dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3602_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3603dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3604dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3606dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3607dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3608dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3609dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3610_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3611dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3612dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3613dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3614dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3615dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3616dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3618dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3619_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3620dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3621dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3622dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3623dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3624dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3625dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3626dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3936dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3937dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3938dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3939dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3940dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3941dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3943dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3945dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3946dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3947dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3949dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3950dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3951dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3952dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3954dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3957dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3958dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3959dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3960dual_slot0_Slot_dual_slot0_set,
+ Field_dsp340050b49a6c_fld3961dual_slot0_Slot_dual_slot0_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" },
+ { "GP_slot2", "GP", 2,
+ Slot_gp_Format_gp_slot2_43_get, Slot_gp_Format_gp_slot2_43_set,
+ Slot_gp_slot2_get_field_fns, Slot_gp_slot2_set_field_fns,
+ Slot_gp_slot2_decode, "nop" },
+ { "GP_slot1", "GP", 1,
+ Slot_gp_Format_gp_slot1_26_get, Slot_gp_Format_gp_slot1_26_set,
+ Slot_gp_slot1_get_field_fns, Slot_gp_slot1_set_field_fns,
+ Slot_gp_slot1_decode, "nop" },
+ { "GP_slot0", "GP", 0,
+ Slot_gp_Format_gp_slot0_7_get, Slot_gp_Format_gp_slot0_7_set,
+ Slot_gp_slot0_get_field_fns, Slot_gp_slot0_set_field_fns,
+ Slot_gp_slot0_decode, "nop" },
+ { "DOT_slot2", "DOT", 2,
+ Slot_dot_Format_dot_slot2_44_get, Slot_dot_Format_dot_slot2_44_set,
+ Slot_dot_slot2_get_field_fns, Slot_dot_slot2_set_field_fns,
+ Slot_dot_slot2_decode, "nop" },
+ { "DOT_slot1", "DOT", 1,
+ Slot_dot_Format_dot_slot1_24_get, Slot_dot_Format_dot_slot1_24_set,
+ Slot_dot_slot1_get_field_fns, Slot_dot_slot1_set_field_fns,
+ Slot_dot_slot1_decode, "nop" },
+ { "DOT_slot0", "DOT", 0,
+ Slot_dot_Format_dot_slot0_7_get, Slot_dot_Format_dot_slot0_7_set,
+ Slot_dot_slot0_get_field_fns, Slot_dot_slot0_set_field_fns,
+ Slot_dot_slot0_decode, "nop" },
+ { "PQ_slot2", "PQ", 2,
+ Slot_pq_Format_pq_slot2_40_get, Slot_pq_Format_pq_slot2_40_set,
+ Slot_pq_slot2_get_field_fns, Slot_pq_slot2_set_field_fns,
+ Slot_pq_slot2_decode, "nop" },
+ { "PQ_slot1", "PQ", 1,
+ Slot_pq_Format_pq_slot1_26_get, Slot_pq_Format_pq_slot1_26_set,
+ Slot_pq_slot1_get_field_fns, Slot_pq_slot1_set_field_fns,
+ Slot_pq_slot1_decode, "nop" },
+ { "PQ_slot0", "PQ", 0,
+ Slot_pq_Format_pq_slot0_7_get, Slot_pq_Format_pq_slot0_7_set,
+ Slot_pq_slot0_get_field_fns, Slot_pq_slot0_set_field_fns,
+ Slot_pq_slot0_decode, "nop" },
+ { "ACC2_slot2", "ACC2", 2,
+ Slot_acc2_Format_acc2_slot2_47_get, Slot_acc2_Format_acc2_slot2_47_set,
+ Slot_acc2_slot2_get_field_fns, Slot_acc2_slot2_set_field_fns,
+ Slot_acc2_slot2_decode, "nop" },
+ { "ACC2_slot1", "ACC2", 1,
+ Slot_acc2_Format_acc2_slot1_23_get, Slot_acc2_Format_acc2_slot1_23_set,
+ Slot_acc2_slot1_get_field_fns, Slot_acc2_slot1_set_field_fns,
+ Slot_acc2_slot1_decode, "nop" },
+ { "ACC2_slot0", "ACC2", 0,
+ Slot_acc2_Format_acc2_slot0_7_get, Slot_acc2_Format_acc2_slot0_7_set,
+ Slot_acc2_slot0_get_field_fns, Slot_acc2_slot0_set_field_fns,
+ Slot_acc2_slot0_decode, "nop" },
+ { "SMOD_slot2", "SMOD", 2,
+ Slot_smod_Format_smod_slot2_42_get, Slot_smod_Format_smod_slot2_42_set,
+ Slot_smod_slot2_get_field_fns, Slot_smod_slot2_set_field_fns,
+ Slot_smod_slot2_decode, "nop" },
+ { "SMOD_slot1", "SMOD", 1,
+ Slot_smod_Format_smod_slot1_26_get, Slot_smod_Format_smod_slot1_26_set,
+ Slot_smod_slot1_get_field_fns, Slot_smod_slot1_set_field_fns,
+ Slot_smod_slot1_decode, "nop" },
+ { "SMOD_slot0", "SMOD", 0,
+ Slot_smod_Format_smod_slot0_7_get, Slot_smod_Format_smod_slot0_7_set,
+ Slot_smod_slot0_get_field_fns, Slot_smod_slot0_set_field_fns,
+ Slot_smod_slot0_decode, "nop" },
+ { "LLR_slot2", "LLR", 2,
+ Slot_llr_Format_llr_slot2_44_get, Slot_llr_Format_llr_slot2_44_set,
+ Slot_llr_slot2_get_field_fns, Slot_llr_slot2_set_field_fns,
+ Slot_llr_slot2_decode, "nop" },
+ { "LLR_slot1", "LLR", 1,
+ Slot_llr_Format_llr_slot1_24_get, Slot_llr_Format_llr_slot1_24_set,
+ Slot_llr_slot1_get_field_fns, Slot_llr_slot1_set_field_fns,
+ Slot_llr_slot1_decode, "nop" },
+ { "LLR_slot0", "LLR", 0,
+ Slot_llr_Format_llr_slot0_7_get, Slot_llr_Format_llr_slot0_7_set,
+ Slot_llr_slot0_get_field_fns, Slot_llr_slot0_set_field_fns,
+ Slot_llr_slot0_decode, "nop" },
+ { "DUAL_slot2", "DUAL", 2,
+ Slot_dual_Format_dual_slot2_31_get, Slot_dual_Format_dual_slot2_31_set,
+ Slot_dual_slot2_get_field_fns, Slot_dual_slot2_set_field_fns,
+ Slot_dual_slot2_decode, "nop" },
+ { "DUAL_slot1", "DUAL", 1,
+ Slot_dual_Format_dual_slot1_30_get, Slot_dual_Format_dual_slot1_30_set,
+ Slot_dual_slot1_get_field_fns, Slot_dual_slot1_set_field_fns,
+ Slot_dual_slot1_decode, "nop" },
+ { "DUAL_slot0", "DUAL", 0,
+ Slot_dual_Format_dual_slot0_7_get, Slot_dual_Format_dual_slot0_7_set,
+ Slot_dual_slot0_get_field_fns, Slot_dual_slot0_set_field_fns,
+ Slot_dual_slot0_decode, "nop" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+ insn[1] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+ insn[1] = 0;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+ insn[1] = 0;
+}
+
+static void
+Format_GP_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xf;
+ insn[1] = 0;
+}
+
+static void
+Format_DOT_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x1f;
+ insn[1] = 0;
+}
+
+static void
+Format_PQ_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x2f;
+ insn[1] = 0;
+}
+
+static void
+Format_ACC2_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x3f;
+ insn[1] = 0;
+}
+
+static void
+Format_SMOD_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x4f;
+ insn[1] = 0;
+}
+
+static void
+Format_LLR_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x5f;
+ insn[1] = 0;
+}
+
+static void
+Format_DUAL_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x6f;
+ insn[1] = 0;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static int Format_GP_slots[] = { 5, 4, 3 };
+
+static int Format_DOT_slots[] = { 8, 7, 6 };
+
+static int Format_PQ_slots[] = { 11, 10, 9 };
+
+static int Format_ACC2_slots[] = { 14, 13, 12 };
+
+static int Format_SMOD_slots[] = { 17, 16, 15 };
+
+static int Format_LLR_slots[] = { 20, 19, 18 };
+
+static int Format_DUAL_slots[] = { 23, 22, 21 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots },
+ { "GP", 8, Format_GP_encode, 3, Format_GP_slots },
+ { "DOT", 8, Format_DOT_encode, 3, Format_DOT_slots },
+ { "PQ", 8, Format_PQ_encode, 3, Format_PQ_slots },
+ { "ACC2", 8, Format_ACC2_encode, 3, Format_ACC2_slots },
+ { "SMOD", 8, Format_SMOD_encode, 3, Format_SMOD_slots },
+ { "LLR", 8, Format_LLR_encode, 3, Format_LLR_slots },
+ { "DUAL", 8, Format_DUAL_encode, 3, Format_DUAL_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0 && (insn[1] & 0) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8 && (insn[1] & 0) == 0)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc && (insn[1] & 0) == 0)
+ return 2; /* x16b */
+ if ((insn[0] & 0x7f) == 0xf && (insn[1] & 0) == 0)
+ return 3; /* GP */
+ if ((insn[0] & 0x7f) == 0x1f && (insn[1] & 0) == 0)
+ return 4; /* DOT */
+ if ((insn[0] & 0x7f) == 0x2f && (insn[1] & 0) == 0)
+ return 5; /* PQ */
+ if ((insn[0] & 0x7f) == 0x3f && (insn[1] & 0) == 0)
+ return 6; /* ACC2 */
+ if ((insn[0] & 0x7f) == 0x4f && (insn[1] & 0) == 0)
+ return 7; /* SMOD */
+ if ((insn[0] & 0x7f) == 0x5f && (insn[1] & 0) == 0)
+ return 8; /* LLR */
+ if ((insn[0] & 0x7f) == 0x6f && (insn[1] & 0) == 0)
+ return 9; /* DUAL */
+ return -1;
+}
+
+static int length_table[16] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int op0 = insn[0] & 0xf;
+ return length_table[op0];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 8 /* insn_size */, 0,
+ 10, formats, format_decoder, length_decoder,
+ 24, slots,
+ 1733 /* num_fields */,
+ 1828, operands,
+ 747, iclasses,
+ 839, opcodes, 0,
+ 10, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 95, interfaces, 0,
+ 0, funcUnits, 0
+};
diff --git a/target/xtensa/core-fsf.c b/target/xtensa/core-fsf.c
new file mode 100644
index 000000000..3327c50b4
--- /dev/null
+++ b/target/xtensa/core-fsf.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-fsf/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_fsf
+#include "core-fsf/xtensa-modules.c.inc"
+
+static XtensaConfig fsf __attribute__((unused)) = {
+ .name = "fsf",
+ .gdb_regmap = {
+ /* GDB for this core is not supported currently */
+ .reg = {
+ XTREG_END
+ },
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(fsf)
diff --git a/target/xtensa/core-fsf/core-isa.h b/target/xtensa/core-fsf/core-isa.h
new file mode 100644
index 000000000..fb2bb8f2c
--- /dev/null
+++ b/target/xtensa/core-fsf/core-isa.h
@@ -0,0 +1,360 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2006 Tensilica Inc.
+ */
+
+#ifndef XTENSA_FSF_CORE_ISA_H
+#define XTENSA_FSF_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 0 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 800002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "fsf" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "fsf standard core"
+#define XCHAL_BUILD_UNIQUE_ID 0x00006700 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC103C3FF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0C006700 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.0.0" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2200 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
+#define XTHAL_HW_REL_LX2 1
+#define XTHAL_HW_REL_LX2_0 1
+#define XTHAL_HW_REL_LX2_0_0 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2200 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2200 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 10 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_MASK 0x00008902
+#define XCHAL_INTLEVEL3_MASK 0x00011204
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 2
+#define XCHAL_INT2_LEVEL 3
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 3
+#define XCHAL_INT13_LEVEL 1
+#define XCHAL_INT14_LEVEL 1
+#define XCHAL_INT15_LEVEL 2
+#define XCHAL_INT16_LEVEL 3
+#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 0 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 10 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 12 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1, 2, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
+#define XCHAL_USER_VECTOR_VADDR 0xD0000220
+#define XCHAL_USER_VECTOR_PADDR 0x00000220
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See <xtensa/config/core-matmap.h> header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_FSF_CORE_ISA_H */
diff --git a/target/xtensa/core-fsf/xtensa-modules.c.inc b/target/xtensa/core-fsf/xtensa-modules.c.inc
new file mode 100644
index 000000000..c32683ff7
--- /dev/null
+++ b/target/xtensa/core-fsf/xtensa-modules.c.inc
@@ -0,0 +1,9826 @@
+/* Xtensa configuration-specific ISA information.
+ Copyright 2003, 2004, 2005 Free Software Foundation, Inc.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+#include "qemu/osdep.h"
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "PTEVADDR", 83, 0 },
+ { "DDR", 104, 0 },
+ { "176", 176, 0 },
+ { "208", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "LITBASE", 5, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "RASID", 90, 0 },
+ { "ITLBCFG", 91, 0 },
+ { "DTLBCFG", 92, 0 }
+};
+
+#define NUM_SYSREGS 49
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 0
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 17, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EPS2", 15, 0 },
+ { "EPS3", 15, 0 },
+ { "EPS4", 15, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSRING", 2, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 4, 0 },
+ { "WindowStart", 16, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "LITBADDR", 20, 0 },
+ { "LITBEN", 1, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 17, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "ASID3", 8, 0 },
+ { "ASID2", 8, 0 },
+ { "ASID1", 8, 0 },
+ { "INSTPGSZID4", 2, 0 },
+ { "DATAPGSZID4", 2, 0 },
+ { "PTBASE", 10, 0 }
+};
+
+#define NUM_STATES 58
+
+/* Macros for xtensa_state numbers (for use in iclasses because the
+ state numbers are not available when the iclass table is generated). */
+
+#define STATE_LCOUNT 0
+#define STATE_PC 1
+#define STATE_ICOUNT 2
+#define STATE_DDR 3
+#define STATE_INTERRUPT 4
+#define STATE_CCOUNT 5
+#define STATE_XTSYNC 6
+#define STATE_EPC1 7
+#define STATE_EPC2 8
+#define STATE_EPC3 9
+#define STATE_EPC4 10
+#define STATE_EXCSAVE1 11
+#define STATE_EXCSAVE2 12
+#define STATE_EXCSAVE3 13
+#define STATE_EXCSAVE4 14
+#define STATE_EPS2 15
+#define STATE_EPS3 16
+#define STATE_EPS4 17
+#define STATE_EXCCAUSE 18
+#define STATE_PSINTLEVEL 19
+#define STATE_PSUM 20
+#define STATE_PSWOE 21
+#define STATE_PSRING 22
+#define STATE_PSEXCM 23
+#define STATE_DEPC 24
+#define STATE_EXCVADDR 25
+#define STATE_WindowBase 26
+#define STATE_WindowStart 27
+#define STATE_PSCALLINC 28
+#define STATE_PSOWB 29
+#define STATE_LBEG 30
+#define STATE_LEND 31
+#define STATE_SAR 32
+#define STATE_LITBADDR 33
+#define STATE_LITBEN 34
+#define STATE_MISC0 35
+#define STATE_MISC1 36
+#define STATE_InOCDMode 37
+#define STATE_INTENABLE 38
+#define STATE_DBREAKA0 39
+#define STATE_DBREAKC0 40
+#define STATE_DBREAKA1 41
+#define STATE_DBREAKC1 42
+#define STATE_IBREAKA0 43
+#define STATE_IBREAKA1 44
+#define STATE_IBREAKENABLE 45
+#define STATE_ICOUNTLEVEL 46
+#define STATE_DEBUGCAUSE 47
+#define STATE_DBNUM 48
+#define STATE_CCOMPARE0 49
+#define STATE_CCOMPARE1 50
+#define STATE_CCOMPARE2 51
+#define STATE_ASID3 52
+#define STATE_ASID2 53
+#define STATE_ASID1 54
+#define STATE_INSTPGSZID4 55
+#define STATE_DATAPGSZID4 56
+#define STATE_PTBASE 57
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 20) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 16) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff) | (tie_t << 0);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 14) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0x3ffff) | (tie_t << 0);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+
+};
+
+
+/* Register files. */
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", 0, 32, 64 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+Operand_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffsetx4_0 = 0x4 + ((((int) offset_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_encode (uint32 *valp)
+{
+ unsigned offset_0, soffsetx4_0;
+ soffsetx4_0 = *valp;
+ offset_0 = ((soffsetx4_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ uimm12x8_0 = imm12_0 << 3;
+ *valp = uimm12x8_0;
+ return 0;
+}
+
+static int
+Operand_uimm12x8_encode (uint32 *valp)
+{
+ unsigned imm12_0, uimm12x8_0;
+ uimm12x8_0 = *valp;
+ imm12_0 = ((uimm12x8_0 >> 3) & 0xfff);
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_0, mn_0;
+ mn_0 = *valp & 0xf;
+ simm4_0 = ((int) mn_0 << 28) >> 28;
+ *valp = simm4_0;
+ return 0;
+}
+
+static int
+Operand_simm4_encode (uint32 *valp)
+{
+ unsigned mn_0, simm4_0;
+ simm4_0 = *valp;
+ mn_0 = (simm4_0 & 0xf);
+ *valp = mn_0;
+ return 0;
+}
+
+static int
+Operand_arr_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_arr_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_ars_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_art_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_art_encode (uint32 *valp)
+{
+ return (*valp & ~0xf) != 0;
+}
+
+static int
+Operand_ar0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar0_encode (uint32 *valp)
+{
+ return (*valp & ~0x3f) != 0;
+}
+
+static int
+Operand_ar4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar4_encode (uint32 *valp)
+{
+ return (*valp & ~0x3f) != 0;
+}
+
+static int
+Operand_ar8_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar8_encode (uint32 *valp)
+{
+ return (*valp & ~0x3f) != 0;
+}
+
+static int
+Operand_ar12_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ar12_encode (uint32 *valp)
+{
+ return (*valp & ~0x3f) != 0;
+}
+
+static int
+Operand_ars_entry_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+Operand_ars_entry_encode (uint32 *valp)
+{
+ return (*valp & ~0x3f) != 0;
+}
+
+static int
+Operand_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_0, r_0;
+ r_0 = *valp & 0xf;
+ immrx4_0 = ((((0xfffffff)) << 4) | r_0) << 2;
+ *valp = immrx4_0;
+ return 0;
+}
+
+static int
+Operand_immrx4_encode (uint32 *valp)
+{
+ unsigned r_0, immrx4_0;
+ immrx4_0 = *valp;
+ r_0 = ((immrx4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_0, r_0;
+ r_0 = *valp & 0xf;
+ lsi4x4_0 = r_0 << 2;
+ *valp = lsi4x4_0;
+ return 0;
+}
+
+static int
+Operand_lsi4x4_encode (uint32 *valp)
+{
+ unsigned r_0, lsi4x4_0;
+ lsi4x4_0 = *valp;
+ r_0 = ((lsi4x4_0 >> 2) & 0xf);
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_0, imm7_0;
+ imm7_0 = *valp & 0x7f;
+ simm7_0 = ((((-((((imm7_0 >> 6) & 1)) & (((imm7_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | imm7_0;
+ *valp = simm7_0;
+ return 0;
+}
+
+static int
+Operand_simm7_encode (uint32 *valp)
+{
+ unsigned imm7_0, simm7_0;
+ simm7_0 = *valp;
+ imm7_0 = (simm7_0 & 0x7f);
+ *valp = imm7_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_0, imm6_0;
+ imm6_0 = *valp & 0x3f;
+ uimm6_0 = 0x4 + ((((0)) << 6) | imm6_0);
+ *valp = uimm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_encode (uint32 *valp)
+{
+ unsigned imm6_0, uimm6_0;
+ uimm6_0 = *valp;
+ imm6_0 = (uimm6_0 - 0x4) & 0x3f;
+ *valp = imm6_0;
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_0, t_0;
+ t_0 = *valp & 0xf;
+ ai4const_0 = CONST_TBL_ai4c_0[t_0 & 0xf];
+ *valp = ai4const_0;
+ return 0;
+}
+
+static int
+Operand_ai4const_encode (uint32 *valp)
+{
+ unsigned t_0, ai4const_0;
+ ai4const_0 = *valp;
+ switch (ai4const_0)
+ {
+ case 0xffffffff: t_0 = 0; break;
+ case 0x1: t_0 = 0x1; break;
+ case 0x2: t_0 = 0x2; break;
+ case 0x3: t_0 = 0x3; break;
+ case 0x4: t_0 = 0x4; break;
+ case 0x5: t_0 = 0x5; break;
+ case 0x6: t_0 = 0x6; break;
+ case 0x7: t_0 = 0x7; break;
+ case 0x8: t_0 = 0x8; break;
+ case 0x9: t_0 = 0x9; break;
+ case 0xa: t_0 = 0xa; break;
+ case 0xb: t_0 = 0xb; break;
+ case 0xc: t_0 = 0xc; break;
+ case 0xd: t_0 = 0xd; break;
+ case 0xe: t_0 = 0xe; break;
+ default: t_0 = 0xf; break;
+ }
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_0, r_0;
+ r_0 = *valp & 0xf;
+ b4const_0 = CONST_TBL_b4c_0[r_0 & 0xf];
+ *valp = b4const_0;
+ return 0;
+}
+
+static int
+Operand_b4const_encode (uint32 *valp)
+{
+ unsigned r_0, b4const_0;
+ b4const_0 = *valp;
+ switch (b4const_0)
+ {
+ case 0xffffffff: r_0 = 0; break;
+ case 0x1: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_0, r_0;
+ r_0 = *valp & 0xf;
+ b4constu_0 = CONST_TBL_b4cu_0[r_0 & 0xf];
+ *valp = b4constu_0;
+ return 0;
+}
+
+static int
+Operand_b4constu_encode (uint32 *valp)
+{
+ unsigned r_0, b4constu_0;
+ b4constu_0 = *valp;
+ switch (b4constu_0)
+ {
+ case 0x8000: r_0 = 0; break;
+ case 0x10000: r_0 = 0x1; break;
+ case 0x2: r_0 = 0x2; break;
+ case 0x3: r_0 = 0x3; break;
+ case 0x4: r_0 = 0x4; break;
+ case 0x5: r_0 = 0x5; break;
+ case 0x6: r_0 = 0x6; break;
+ case 0x7: r_0 = 0x7; break;
+ case 0x8: r_0 = 0x8; break;
+ case 0xa: r_0 = 0x9; break;
+ case 0xc: r_0 = 0xa; break;
+ case 0x10: r_0 = 0xb; break;
+ case 0x20: r_0 = 0xc; break;
+ case 0x40: r_0 = 0xd; break;
+ case 0x80: r_0 = 0xe; break;
+ default: r_0 = 0xf; break;
+ }
+ *valp = r_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8_0 = imm8_0;
+ *valp = uimm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8_0;
+ uimm8_0 = *valp;
+ imm8_0 = (uimm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x2_0 = imm8_0 << 1;
+ *valp = uimm8x2_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x2_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x2_0;
+ uimm8x2_0 = *valp;
+ imm8_0 = ((uimm8x2_0 >> 1) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ uimm8x4_0 = imm8_0 << 2;
+ *valp = uimm8x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm8x4_encode (uint32 *valp)
+{
+ unsigned imm8_0, uimm8x4_0;
+ uimm8x4_0 = *valp;
+ imm8_0 = ((uimm8x4_0 >> 2) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_0, op2_0;
+ op2_0 = *valp & 0xf;
+ uimm4x16_0 = op2_0 << 4;
+ *valp = uimm4x16_0;
+ return 0;
+}
+
+static int
+Operand_uimm4x16_encode (uint32 *valp)
+{
+ unsigned op2_0, uimm4x16_0;
+ uimm4x16_0 = *valp;
+ op2_0 = ((uimm4x16_0 >> 4) & 0xf);
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8_0 = ((int) imm8_0 << 24) >> 24;
+ *valp = simm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8_0;
+ simm8_0 = *valp;
+ imm8_0 = (simm8_0 & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ simm8x256_0 = (((int) imm8_0 << 24) >> 24) << 8;
+ *valp = simm8x256_0;
+ return 0;
+}
+
+static int
+Operand_simm8x256_encode (uint32 *valp)
+{
+ unsigned imm8_0, simm8x256_0;
+ simm8x256_0 = *valp;
+ imm8_0 = ((simm8x256_0 >> 8) & 0xff);
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_0, imm12b_0;
+ imm12b_0 = *valp & 0xfff;
+ simm12b_0 = ((int) imm12b_0 << 20) >> 20;
+ *valp = simm12b_0;
+ return 0;
+}
+
+static int
+Operand_simm12b_encode (uint32 *valp)
+{
+ unsigned imm12b_0, simm12b_0;
+ simm12b_0 = *valp;
+ imm12b_0 = (simm12b_0 & 0xfff);
+ *valp = imm12b_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_0, sal_0;
+ sal_0 = *valp & 0x1f;
+ msalp32_0 = 0x20 - sal_0;
+ *valp = msalp32_0;
+ return 0;
+}
+
+static int
+Operand_msalp32_encode (uint32 *valp)
+{
+ unsigned sal_0, msalp32_0;
+ msalp32_0 = *valp;
+ sal_0 = (0x20 - msalp32_0) & 0x1f;
+ *valp = sal_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_0, op2_0;
+ op2_0 = *valp & 0xf;
+ op2p1_0 = op2_0 + 0x1;
+ *valp = op2p1_0;
+ return 0;
+}
+
+static int
+Operand_op2p1_encode (uint32 *valp)
+{
+ unsigned op2_0, op2p1_0;
+ op2p1_0 = *valp;
+ op2_0 = (op2p1_0 - 0x1) & 0xf;
+ *valp = op2_0;
+ return 0;
+}
+
+static int
+Operand_label8_decode (uint32 *valp)
+{
+ unsigned label8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ label8_0 = 0x4 + (((int) imm8_0 << 24) >> 24);
+ *valp = label8_0;
+ return 0;
+}
+
+static int
+Operand_label8_encode (uint32 *valp)
+{
+ unsigned imm8_0, label8_0;
+ label8_0 = *valp;
+ imm8_0 = (label8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_0, imm8_0;
+ imm8_0 = *valp & 0xff;
+ ulabel8_0 = 0x4 + ((((0)) << 8) | imm8_0);
+ *valp = ulabel8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_encode (uint32 *valp)
+{
+ unsigned imm8_0, ulabel8_0;
+ ulabel8_0 = *valp;
+ imm8_0 = (ulabel8_0 - 0x4) & 0xff;
+ *valp = imm8_0;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_decode (uint32 *valp)
+{
+ unsigned label12_0, imm12_0;
+ imm12_0 = *valp & 0xfff;
+ label12_0 = 0x4 + (((int) imm12_0 << 20) >> 20);
+ *valp = label12_0;
+ return 0;
+}
+
+static int
+Operand_label12_encode (uint32 *valp)
+{
+ unsigned imm12_0, label12_0;
+ label12_0 = *valp;
+ imm12_0 = (label12_0 - 0x4) & 0xfff;
+ *valp = imm12_0;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_0, offset_0;
+ offset_0 = *valp & 0x3ffff;
+ soffset_0 = 0x4 + (((int) offset_0 << 14) >> 14);
+ *valp = soffset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_encode (uint32 *valp)
+{
+ unsigned offset_0, soffset_0;
+ soffset_0 = *valp;
+ offset_0 = (soffset_0 - 0x4) & 0x3ffff;
+ *valp = offset_0;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_0, imm16_0;
+ imm16_0 = *valp & 0xffff;
+ uimm16x4_0 = ((((0xffff)) << 16) | imm16_0) << 2;
+ *valp = uimm16x4_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_encode (uint32 *valp)
+{
+ unsigned imm16_0, uimm16x4_0;
+ uimm16x4_0 = *valp;
+ imm16_0 = (uimm16x4_0 >> 2) & 0xffff;
+ *valp = imm16_0;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_immt_decode (uint32 *valp)
+{
+ unsigned immt_0, t_0;
+ t_0 = *valp & 0xf;
+ immt_0 = t_0;
+ *valp = immt_0;
+ return 0;
+}
+
+static int
+Operand_immt_encode (uint32 *valp)
+{
+ unsigned t_0, immt_0;
+ immt_0 = *valp;
+ t_0 = immt_0 & 0xf;
+ *valp = t_0;
+ return 0;
+}
+
+static int
+Operand_imms_decode (uint32 *valp)
+{
+ unsigned imms_0, s_0;
+ s_0 = *valp & 0xf;
+ imms_0 = s_0;
+ *valp = imms_0;
+ return 0;
+}
+
+static int
+Operand_imms_encode (uint32 *valp)
+{
+ unsigned s_0, imms_0;
+ imms_0 = *valp;
+ s_0 = imms_0 & 0xf;
+ *valp = s_0;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", 10, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffsetx4_encode, Operand_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", 3, -1, 0,
+ 0,
+ Operand_uimm12x8_encode, Operand_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", 26, -1, 0,
+ 0,
+ Operand_simm4_encode, Operand_simm4_decode,
+ 0, 0 },
+ { "arr", 14, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_arr_encode, Operand_arr_decode,
+ 0, 0 },
+ { "ars", 5, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "*ars_invisible", 5, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ars_encode, Operand_ars_decode,
+ 0, 0 },
+ { "art", 0, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_art_encode, Operand_art_decode,
+ 0, 0 },
+ { "ar0", 35, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar0_encode, Operand_ar0_decode,
+ 0, 0 },
+ { "ar4", 36, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar4_encode, Operand_ar4_decode,
+ 0, 0 },
+ { "ar8", 37, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar8_encode, Operand_ar8_decode,
+ 0, 0 },
+ { "ar12", 38, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ Operand_ar12_encode, Operand_ar12_decode,
+ 0, 0 },
+ { "ars_entry", 5, 0, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ Operand_ars_entry_encode, Operand_ars_entry_decode,
+ 0, 0 },
+ { "immrx4", 14, -1, 0,
+ 0,
+ Operand_immrx4_encode, Operand_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", 14, -1, 0,
+ 0,
+ Operand_lsi4x4_encode, Operand_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", 34, -1, 0,
+ 0,
+ Operand_simm7_encode, Operand_simm7_decode,
+ 0, 0 },
+ { "uimm6", 33, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm6_encode, Operand_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", 0, -1, 0,
+ 0,
+ Operand_ai4const_encode, Operand_ai4const_decode,
+ 0, 0 },
+ { "b4const", 14, -1, 0,
+ 0,
+ Operand_b4const_encode, Operand_b4const_decode,
+ 0, 0 },
+ { "b4constu", 14, -1, 0,
+ 0,
+ Operand_b4constu_encode, Operand_b4constu_decode,
+ 0, 0 },
+ { "uimm8", 4, -1, 0,
+ 0,
+ Operand_uimm8_encode, Operand_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", 4, -1, 0,
+ 0,
+ Operand_uimm8x2_encode, Operand_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", 4, -1, 0,
+ 0,
+ Operand_uimm8x4_encode, Operand_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", 13, -1, 0,
+ 0,
+ Operand_uimm4x16_encode, Operand_uimm4x16_decode,
+ 0, 0 },
+ { "simm8", 4, -1, 0,
+ 0,
+ Operand_simm8_encode, Operand_simm8_decode,
+ 0, 0 },
+ { "simm8x256", 4, -1, 0,
+ 0,
+ Operand_simm8x256_encode, Operand_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", 6, -1, 0,
+ 0,
+ Operand_simm12b_encode, Operand_simm12b_decode,
+ 0, 0 },
+ { "msalp32", 18, -1, 0,
+ 0,
+ Operand_msalp32_encode, Operand_msalp32_decode,
+ 0, 0 },
+ { "op2p1", 13, -1, 0,
+ 0,
+ Operand_op2p1_encode, Operand_op2p1_decode,
+ 0, 0 },
+ { "label8", 4, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label8_encode, Operand_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", 4, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_ulabel8_encode, Operand_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", 3, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_label12_encode, Operand_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", 10, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_soffset_encode, Operand_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", 7, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ Operand_uimm16x4_encode, Operand_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "immt", 0, -1, 0,
+ 0,
+ Operand_immt_encode, Operand_immt_decode,
+ 0, 0 },
+ { "imms", 5, -1, 0,
+ 0,
+ Operand_imms_encode, Operand_imms_decode,
+ 0, 0 },
+ { "t", 0, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", 1, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi", 2, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", 3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", 4, -1, 0, 0, 0, 0, 0, 0 },
+ { "s", 5, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", 6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", 7, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", 8, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", 9, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", 10, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", 11, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", 12, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", 13, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", 14, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", 15, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", 16, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae", 17, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", 18, -1, 0, 0, 0, 0, 0, 0 },
+ { "sargt", 19, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", 20, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas", 21, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", 22, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", 23, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", 24, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", 25, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", 26, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", 27, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", 28, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", 29, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", 30, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", 31, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", 32, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", 33, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", 34, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 10 /* ar12 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 9 /* ar8 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 8 /* ar4 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 10 /* ar12 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 9 /* ar8 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 8 /* ar4 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { 11 /* ars_entry */ }, 's' },
+ { { 4 /* ars */ }, 'i' },
+ { { 1 /* uimm12x8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { 2 /* simm4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { 5 /* *ars_invisible */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 12 /* immrx4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 12 /* immrx4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 16 /* ai4const */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 15 /* uimm6 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 13 /* lsi4x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { 4 /* ars */ }, 'o' },
+ { { 14 /* simm7 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { 5 /* *ars_invisible */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 13 /* lsi4x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 23 /* simm8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 24 /* simm8x256 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 17 /* b4const */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 37 /* bbi */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 18 /* b4constu */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' },
+ { { 28 /* label8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 30 /* label12 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { 0 /* soffsetx4 */ }, 'i' },
+ { { 7 /* ar0 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 7 /* ar0 */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' },
+ { { 52 /* sae */ }, 'i' },
+ { { 27 /* op2p1 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { 31 /* soffset */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 20 /* uimm8x2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 20 /* uimm8x2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 32 /* uimm16x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 19 /* uimm8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 29 /* ulabel8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 29 /* ulabel8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 25 /* simm12b */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { 3 /* arr */ }, 'm' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { 5 /* *ars_invisible */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 20 /* uimm8x2 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' },
+ { { 19 /* uimm8 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { 56 /* sas */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 4 /* ars */ }, 'i' },
+ { { 26 /* msalp32 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' },
+ { { 54 /* sargt */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { 3 /* arr */ }, 'o' },
+ { { 6 /* art */ }, 'i' },
+ { { 40 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 40 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'o' },
+ { { STATE_LITBEN }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'm' },
+ { { STATE_LITBEN }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_176_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_208_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { 40 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { 40 /* s */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { 34 /* imms */ }, 'i' },
+ { { 33 /* immt */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { 34 /* imms */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 22 /* uimm4x16 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { 4 /* ars */ }, 'i' },
+ { { 21 /* uimm8x4 */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'm' },
+ { { STATE_EXCVADDR }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'i' },
+ { { STATE_ASID2 }, 'i' },
+ { { STATE_ASID1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'o' },
+ { { STATE_ASID2 }, 'o' },
+ { { STATE_ASID1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'm' },
+ { { STATE_ASID2 }, 'm' },
+ { { STATE_ASID1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_args[] = {
+ { { 6 /* art */ }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_args[] = {
+ { { 6 /* art */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_args[] = {
+ { { 6 /* art */ }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { 6 /* art */ }, 'i' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldpte_stateArgs[] = {
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwitlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwdtlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { 6 /* art */ }, 'o' },
+ { { 4 /* ars */ }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 3, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 3, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 3, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 4, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 6, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 2, Iclass_xt_iclass_l32e_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 2, Iclass_xt_iclass_s32e_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 3, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 3, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 3, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 3, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 3, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 3, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 2, Iclass_xt_iclass_l32r_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 7, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 2, Iclass_xt_iclass_rsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 2, Iclass_xt_iclass_wsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 2, Iclass_xt_iclass_xsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_176_args,
+ 2, Iclass_xt_iclass_rsr_176_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_208_args,
+ 2, Iclass_xt_iclass_rsr_208_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 7, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 7, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 7, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 3, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 3, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 3, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 3, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 3, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 3, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 3, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 3, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 3, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 3, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 3, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 3, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 3, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 3, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 3, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 3, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 3, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 3, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 3, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 3, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 3, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 3, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 3, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 3, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 3, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 3, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 3, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 3, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 3, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 3, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 3, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 3, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 3, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 3, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 3, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 3, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 3, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 3, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 3, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 4, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 3, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 3, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 3, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 3, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 3, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 3, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 3, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 3, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 2, Iclass_xt_iclass_rsr_prid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 15, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 3, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 3, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 4, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 4, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 3, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 3, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 3, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 4, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 4, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 4, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 3, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 4, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 4, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 3, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 3, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 3, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 3, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 4, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 4, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdo */,
+ 10, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 3, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 4, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 4, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 3, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 4, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 4, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 3, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 4, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 4, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 3, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 4, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 4, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 2, Iclass_xt_iclass_icache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 2, Iclass_xt_iclass_licx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 2, Iclass_xt_iclass_sicx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 2, Iclass_xt_iclass_dcache_ind_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 2, Iclass_xt_iclass_dcache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 2, Iclass_xt_iclass_sdct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 2, Iclass_xt_iclass_ldct_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_wsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_rsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ptevaddr_args,
+ 5, Iclass_xt_iclass_xsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_rasid_args,
+ 5, Iclass_xt_iclass_rsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_rasid_args,
+ 6, Iclass_xt_iclass_wsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_rasid_args,
+ 6, Iclass_xt_iclass_xsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_itlbcfg_args,
+ 3, Iclass_xt_iclass_rsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_itlbcfg_args,
+ 4, Iclass_xt_iclass_wsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_itlbcfg_args,
+ 4, Iclass_xt_iclass_xsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dtlbcfg_args,
+ 3, Iclass_xt_iclass_rsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dtlbcfg_args,
+ 4, Iclass_xt_iclass_wsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dtlbcfg_args,
+ 4, Iclass_xt_iclass_xsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 3, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 2, Iclass_xt_iclass_rdtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 3, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 2, Iclass_xt_iclass_iitlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 2, Iclass_xt_iclass_ritlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 2, Iclass_xt_iclass_witlb_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_ldpte */,
+ 2, Iclass_xt_iclass_ldpte_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwitlba */,
+ 1, Iclass_xt_iclass_hwwitlba_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwdtlba */,
+ 1, Iclass_xt_iclass_hwwdtlba_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 }
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80200;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2300;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1500;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c0000;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580000;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x540000;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0000;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb0000;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70000;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0000;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x804;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60000;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10f;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4300;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5300;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x94;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4830;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4831;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4816;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4930;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4931;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4916;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa000;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc800;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc00;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd60f;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd000;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd30f;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00f;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9000;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200c00;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200d00;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680000;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x690000;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0000;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0000;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700600;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700e00;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0000;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0000;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700100;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700900;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700a00;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700200;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700b00;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700300;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700800;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700000;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700400;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700c00;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700500;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700d00;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x640000;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x650000;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x670000;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x660000;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500000;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0000;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200100;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200900;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200200;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0800;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0900;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0a00;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200a00;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1006;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0200;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200500;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200600;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200400;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x104;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x204;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x304;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x19;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0200;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0200;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10200;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20200;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x116;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x231;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x216;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x331;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x316;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x531;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x516;
+}
+
+static void
+Opcode_rsr_176_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb030;
+}
+
+static void
+Opcode_rsr_208_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd030;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe630;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe631;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe616;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb130;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb131;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb116;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd130;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd131;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd116;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb230;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb231;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb216;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd230;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd231;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd216;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb330;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb331;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb316;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd330;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd331;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd316;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb430;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb431;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb416;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd430;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd431;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd416;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc230;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc231;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc216;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc330;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc331;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc316;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc430;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc431;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc416;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xee30;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xee31;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xee16;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc030;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc031;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc016;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe830;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe831;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe816;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf430;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf431;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf416;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf530;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf531;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf516;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xeb30;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10300;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe230;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe231;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe331;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe430;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe431;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe416;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20f;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9030;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9031;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9016;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa030;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa031;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa016;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9130;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9131;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9116;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa130;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa131;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa116;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8030;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8031;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8016;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8130;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8131;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8116;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6030;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6031;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6016;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe930;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe931;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe916;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec30;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec31;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec16;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed30;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed31;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed16;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6830;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6831;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6816;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1f;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10e1f;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea30;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea31;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea16;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf030;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf031;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf016;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf130;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf131;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf116;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf230;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf231;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf216;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0700;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0700;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0700;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x21f;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11f;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31f;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240700;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x250700;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280740;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280750;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x260700;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270700;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200700;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210700;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x220700;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230700;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x91f;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81f;
+}
+
+static void
+Opcode_wsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5331;
+}
+
+static void
+Opcode_rsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5330;
+}
+
+static void
+Opcode_xsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5316;
+}
+
+static void
+Opcode_rsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a30;
+}
+
+static void
+Opcode_wsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a31;
+}
+
+static void
+Opcode_xsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a16;
+}
+
+static void
+Opcode_rsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b30;
+}
+
+static void
+Opcode_wsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b31;
+}
+
+static void
+Opcode_xsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b16;
+}
+
+static void
+Opcode_rsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c30;
+}
+
+static void
+Opcode_wsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c31;
+}
+
+static void
+Opcode_xsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c16;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc05;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd05;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb05;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf05;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe05;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x405;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x305;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x705;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x605;
+}
+
+static void
+Opcode_ldpte_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1f;
+}
+
+static void
+Opcode_hwwitlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x105;
+}
+
+static void
+Opcode_hwwdtlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x905;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe04;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf04;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_176_encode_fns[] = {
+ Opcode_rsr_176_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_208_encode_fns[] = {
+ Opcode_rsr_208_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ptevaddr_encode_fns[] = {
+ Opcode_wsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ptevaddr_encode_fns[] = {
+ Opcode_rsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ptevaddr_encode_fns[] = {
+ Opcode_xsr_ptevaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_rasid_encode_fns[] = {
+ Opcode_rsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_rasid_encode_fns[] = {
+ Opcode_wsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_rasid_encode_fns[] = {
+ Opcode_xsr_rasid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_itlbcfg_encode_fns[] = {
+ Opcode_rsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_itlbcfg_encode_fns[] = {
+ Opcode_wsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_itlbcfg_encode_fns[] = {
+ Opcode_xsr_itlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dtlbcfg_encode_fns[] = {
+ Opcode_rsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dtlbcfg_encode_fns[] = {
+ Opcode_wsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dtlbcfg_encode_fns[] = {
+ Opcode_xsr_dtlbcfg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldpte_encode_fns[] = {
+ Opcode_ldpte_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwitlba_encode_fns[] = {
+ Opcode_hwwitlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwdtlba_encode_fns[] = {
+ Opcode_hwwdtlba_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0
+};
+
+
+/* Opcode table. */
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", 0 /* xt_iclass_excw */,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", 1 /* xt_iclass_rfe */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", 2 /* xt_iclass_rfde */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", 3 /* xt_iclass_syscall */,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "simcall", 4 /* xt_iclass_simcall */,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "call12", 5 /* xt_iclass_call12 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", 6 /* xt_iclass_call8 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", 7 /* xt_iclass_call4 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", 8 /* xt_iclass_callx12 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", 9 /* xt_iclass_callx8 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", 10 /* xt_iclass_callx4 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", 11 /* xt_iclass_entry */,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", 12 /* xt_iclass_movsp */,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", 13 /* xt_iclass_rotw */,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", 14 /* xt_iclass_retw */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", 14 /* xt_iclass_retw */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", 15 /* xt_iclass_rfwou */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", 15 /* xt_iclass_rfwou */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", 16 /* xt_iclass_l32e */,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", 17 /* xt_iclass_s32e */,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", 18 /* xt_iclass_rsr.windowbase */,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", 19 /* xt_iclass_wsr.windowbase */,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", 20 /* xt_iclass_xsr.windowbase */,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", 21 /* xt_iclass_rsr.windowstart */,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", 22 /* xt_iclass_wsr.windowstart */,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", 23 /* xt_iclass_xsr.windowstart */,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", 24 /* xt_iclass_add.n */,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", 25 /* xt_iclass_addi.n */,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", 26 /* xt_iclass_bz6 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", 26 /* xt_iclass_bz6 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", 27 /* xt_iclass_ill.n */,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", 28 /* xt_iclass_loadi4 */,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", 29 /* xt_iclass_mov.n */,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", 30 /* xt_iclass_movi.n */,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", 31 /* xt_iclass_nopn */,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", 32 /* xt_iclass_retn */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", 33 /* xt_iclass_storei4 */,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "addi", 34 /* xt_iclass_addi */,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", 35 /* xt_iclass_addmi */,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", 36 /* xt_iclass_addsub */,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", 37 /* xt_iclass_bit */,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", 37 /* xt_iclass_bit */,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", 37 /* xt_iclass_bit */,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", 38 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", 38 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", 38 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", 38 /* xt_iclass_bsi8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", 39 /* xt_iclass_bsi8b */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", 39 /* xt_iclass_bsi8b */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", 40 /* xt_iclass_bsi8u */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", 40 /* xt_iclass_bsi8u */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", 41 /* xt_iclass_bst8 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", 42 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", 42 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", 42 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", 42 /* xt_iclass_bsz12 */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", 43 /* xt_iclass_call0 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", 44 /* xt_iclass_callx0 */,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", 45 /* xt_iclass_exti */,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", 46 /* xt_iclass_ill */,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", 47 /* xt_iclass_jump */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", 48 /* xt_iclass_jumpx */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", 49 /* xt_iclass_l16ui */,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", 50 /* xt_iclass_l16si */,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", 51 /* xt_iclass_l32i */,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", 52 /* xt_iclass_l32r */,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", 53 /* xt_iclass_l8i */,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", 54 /* xt_iclass_loop */,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", 55 /* xt_iclass_loopz */,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", 55 /* xt_iclass_loopz */,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", 56 /* xt_iclass_movi */,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", 57 /* xt_iclass_movz */,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", 57 /* xt_iclass_movz */,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", 57 /* xt_iclass_movz */,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", 57 /* xt_iclass_movz */,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", 58 /* xt_iclass_neg */,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", 58 /* xt_iclass_neg */,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", 59 /* xt_iclass_nop */,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", 60 /* xt_iclass_return */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "s16i", 61 /* xt_iclass_s16i */,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", 62 /* xt_iclass_s32i */,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s8i", 63 /* xt_iclass_s8i */,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", 64 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", 64 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", 64 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", 64 /* xt_iclass_sar */,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", 65 /* xt_iclass_sari */,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", 66 /* xt_iclass_shifts */,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", 67 /* xt_iclass_shiftst */,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", 68 /* xt_iclass_shiftt */,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", 68 /* xt_iclass_shiftt */,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", 69 /* xt_iclass_slli */,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", 70 /* xt_iclass_srai */,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", 71 /* xt_iclass_srli */,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", 72 /* xt_iclass_memw */,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", 73 /* xt_iclass_extw */,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", 74 /* xt_iclass_isync */,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", 75 /* xt_iclass_sync */,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", 75 /* xt_iclass_sync */,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", 75 /* xt_iclass_sync */,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", 76 /* xt_iclass_rsil */,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", 77 /* xt_iclass_rsr.lend */,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", 78 /* xt_iclass_wsr.lend */,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", 79 /* xt_iclass_xsr.lend */,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", 80 /* xt_iclass_rsr.lcount */,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", 81 /* xt_iclass_wsr.lcount */,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", 82 /* xt_iclass_xsr.lcount */,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", 83 /* xt_iclass_rsr.lbeg */,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", 84 /* xt_iclass_wsr.lbeg */,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", 85 /* xt_iclass_xsr.lbeg */,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", 86 /* xt_iclass_rsr.sar */,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", 87 /* xt_iclass_wsr.sar */,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", 88 /* xt_iclass_xsr.sar */,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.litbase", 89 /* xt_iclass_rsr.litbase */,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", 90 /* xt_iclass_wsr.litbase */,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", 91 /* xt_iclass_xsr.litbase */,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.176", 92 /* xt_iclass_rsr.176 */,
+ 0,
+ Opcode_rsr_176_encode_fns, 0, 0 },
+ { "rsr.208", 93 /* xt_iclass_rsr.208 */,
+ 0,
+ Opcode_rsr_208_encode_fns, 0, 0 },
+ { "rsr.ps", 94 /* xt_iclass_rsr.ps */,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", 95 /* xt_iclass_wsr.ps */,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", 96 /* xt_iclass_xsr.ps */,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", 97 /* xt_iclass_rsr.epc1 */,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", 98 /* xt_iclass_wsr.epc1 */,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", 99 /* xt_iclass_xsr.epc1 */,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", 100 /* xt_iclass_rsr.excsave1 */,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", 101 /* xt_iclass_wsr.excsave1 */,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", 102 /* xt_iclass_xsr.excsave1 */,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", 103 /* xt_iclass_rsr.epc2 */,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", 104 /* xt_iclass_wsr.epc2 */,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", 105 /* xt_iclass_xsr.epc2 */,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", 106 /* xt_iclass_rsr.excsave2 */,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", 107 /* xt_iclass_wsr.excsave2 */,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", 108 /* xt_iclass_xsr.excsave2 */,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", 109 /* xt_iclass_rsr.epc3 */,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", 110 /* xt_iclass_wsr.epc3 */,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", 111 /* xt_iclass_xsr.epc3 */,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", 112 /* xt_iclass_rsr.excsave3 */,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", 113 /* xt_iclass_wsr.excsave3 */,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", 114 /* xt_iclass_xsr.excsave3 */,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", 115 /* xt_iclass_rsr.epc4 */,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", 116 /* xt_iclass_wsr.epc4 */,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", 117 /* xt_iclass_xsr.epc4 */,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", 118 /* xt_iclass_rsr.excsave4 */,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", 119 /* xt_iclass_wsr.excsave4 */,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", 120 /* xt_iclass_xsr.excsave4 */,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.eps2", 121 /* xt_iclass_rsr.eps2 */,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", 122 /* xt_iclass_wsr.eps2 */,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", 123 /* xt_iclass_xsr.eps2 */,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", 124 /* xt_iclass_rsr.eps3 */,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", 125 /* xt_iclass_wsr.eps3 */,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", 126 /* xt_iclass_xsr.eps3 */,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", 127 /* xt_iclass_rsr.eps4 */,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", 128 /* xt_iclass_wsr.eps4 */,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", 129 /* xt_iclass_xsr.eps4 */,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.excvaddr", 130 /* xt_iclass_rsr.excvaddr */,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", 131 /* xt_iclass_wsr.excvaddr */,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", 132 /* xt_iclass_xsr.excvaddr */,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", 133 /* xt_iclass_rsr.depc */,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", 134 /* xt_iclass_wsr.depc */,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", 135 /* xt_iclass_xsr.depc */,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", 136 /* xt_iclass_rsr.exccause */,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", 137 /* xt_iclass_wsr.exccause */,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", 138 /* xt_iclass_xsr.exccause */,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", 139 /* xt_iclass_rsr.misc0 */,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", 140 /* xt_iclass_wsr.misc0 */,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", 141 /* xt_iclass_xsr.misc0 */,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", 142 /* xt_iclass_rsr.misc1 */,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", 143 /* xt_iclass_wsr.misc1 */,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", 144 /* xt_iclass_xsr.misc1 */,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", 145 /* xt_iclass_rsr.prid */,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rfi", 146 /* xt_iclass_rfi */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", 147 /* xt_iclass_wait */,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", 148 /* xt_iclass_rsr.interrupt */,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", 149 /* xt_iclass_wsr.intset */,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", 150 /* xt_iclass_wsr.intclear */,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", 151 /* xt_iclass_rsr.intenable */,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", 152 /* xt_iclass_wsr.intenable */,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", 153 /* xt_iclass_xsr.intenable */,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", 154 /* xt_iclass_break */,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", 155 /* xt_iclass_break.n */,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", 156 /* xt_iclass_rsr.dbreaka0 */,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", 157 /* xt_iclass_wsr.dbreaka0 */,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", 158 /* xt_iclass_xsr.dbreaka0 */,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", 159 /* xt_iclass_rsr.dbreakc0 */,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", 160 /* xt_iclass_wsr.dbreakc0 */,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", 161 /* xt_iclass_xsr.dbreakc0 */,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", 162 /* xt_iclass_rsr.dbreaka1 */,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", 163 /* xt_iclass_wsr.dbreaka1 */,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", 164 /* xt_iclass_xsr.dbreaka1 */,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", 165 /* xt_iclass_rsr.dbreakc1 */,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", 166 /* xt_iclass_wsr.dbreakc1 */,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", 167 /* xt_iclass_xsr.dbreakc1 */,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", 168 /* xt_iclass_rsr.ibreaka0 */,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", 169 /* xt_iclass_wsr.ibreaka0 */,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", 170 /* xt_iclass_xsr.ibreaka0 */,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", 171 /* xt_iclass_rsr.ibreaka1 */,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", 172 /* xt_iclass_wsr.ibreaka1 */,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", 173 /* xt_iclass_xsr.ibreaka1 */,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", 174 /* xt_iclass_rsr.ibreakenable */,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", 175 /* xt_iclass_wsr.ibreakenable */,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", 176 /* xt_iclass_xsr.ibreakenable */,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", 177 /* xt_iclass_rsr.debugcause */,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", 178 /* xt_iclass_wsr.debugcause */,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", 179 /* xt_iclass_xsr.debugcause */,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", 180 /* xt_iclass_rsr.icount */,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", 181 /* xt_iclass_wsr.icount */,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", 182 /* xt_iclass_xsr.icount */,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", 183 /* xt_iclass_rsr.icountlevel */,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", 184 /* xt_iclass_wsr.icountlevel */,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", 185 /* xt_iclass_xsr.icountlevel */,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", 186 /* xt_iclass_rsr.ddr */,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", 187 /* xt_iclass_wsr.ddr */,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", 188 /* xt_iclass_xsr.ddr */,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "rfdo", 189 /* xt_iclass_rfdo */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", 190 /* xt_iclass_rfdd */,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "rsr.ccount", 191 /* xt_iclass_rsr.ccount */,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", 192 /* xt_iclass_wsr.ccount */,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", 193 /* xt_iclass_xsr.ccount */,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", 194 /* xt_iclass_rsr.ccompare0 */,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", 195 /* xt_iclass_wsr.ccompare0 */,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", 196 /* xt_iclass_xsr.ccompare0 */,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", 197 /* xt_iclass_rsr.ccompare1 */,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", 198 /* xt_iclass_wsr.ccompare1 */,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", 199 /* xt_iclass_xsr.ccompare1 */,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", 200 /* xt_iclass_rsr.ccompare2 */,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", 201 /* xt_iclass_wsr.ccompare2 */,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", 202 /* xt_iclass_xsr.ccompare2 */,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "ipf", 203 /* xt_iclass_icache */,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", 203 /* xt_iclass_icache */,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "iii", 204 /* xt_iclass_icache_inv */,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", 205 /* xt_iclass_licx */,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", 205 /* xt_iclass_licx */,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", 206 /* xt_iclass_sicx */,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", 206 /* xt_iclass_sicx */,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", 207 /* xt_iclass_dcache */,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", 207 /* xt_iclass_dcache */,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwb", 208 /* xt_iclass_dcache_ind */,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", 208 /* xt_iclass_dcache_ind */,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", 209 /* xt_iclass_dcache_inv */,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", 209 /* xt_iclass_dcache_inv */,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", 210 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", 210 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", 210 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", 210 /* xt_iclass_dpf */,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "sdct", 211 /* xt_iclass_sdct */,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", 212 /* xt_iclass_ldct */,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "wsr.ptevaddr", 213 /* xt_iclass_wsr.ptevaddr */,
+ 0,
+ Opcode_wsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.ptevaddr", 214 /* xt_iclass_rsr.ptevaddr */,
+ 0,
+ Opcode_rsr_ptevaddr_encode_fns, 0, 0 },
+ { "xsr.ptevaddr", 215 /* xt_iclass_xsr.ptevaddr */,
+ 0,
+ Opcode_xsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.rasid", 216 /* xt_iclass_rsr.rasid */,
+ 0,
+ Opcode_rsr_rasid_encode_fns, 0, 0 },
+ { "wsr.rasid", 217 /* xt_iclass_wsr.rasid */,
+ 0,
+ Opcode_wsr_rasid_encode_fns, 0, 0 },
+ { "xsr.rasid", 218 /* xt_iclass_xsr.rasid */,
+ 0,
+ Opcode_xsr_rasid_encode_fns, 0, 0 },
+ { "rsr.itlbcfg", 219 /* xt_iclass_rsr.itlbcfg */,
+ 0,
+ Opcode_rsr_itlbcfg_encode_fns, 0, 0 },
+ { "wsr.itlbcfg", 220 /* xt_iclass_wsr.itlbcfg */,
+ 0,
+ Opcode_wsr_itlbcfg_encode_fns, 0, 0 },
+ { "xsr.itlbcfg", 221 /* xt_iclass_xsr.itlbcfg */,
+ 0,
+ Opcode_xsr_itlbcfg_encode_fns, 0, 0 },
+ { "rsr.dtlbcfg", 222 /* xt_iclass_rsr.dtlbcfg */,
+ 0,
+ Opcode_rsr_dtlbcfg_encode_fns, 0, 0 },
+ { "wsr.dtlbcfg", 223 /* xt_iclass_wsr.dtlbcfg */,
+ 0,
+ Opcode_wsr_dtlbcfg_encode_fns, 0, 0 },
+ { "xsr.dtlbcfg", 224 /* xt_iclass_xsr.dtlbcfg */,
+ 0,
+ Opcode_xsr_dtlbcfg_encode_fns, 0, 0 },
+ { "idtlb", 225 /* xt_iclass_idtlb */,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", 226 /* xt_iclass_rdtlb */,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", 226 /* xt_iclass_rdtlb */,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", 226 /* xt_iclass_rdtlb */,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", 227 /* xt_iclass_wdtlb */,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", 228 /* xt_iclass_iitlb */,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", 229 /* xt_iclass_ritlb */,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", 229 /* xt_iclass_ritlb */,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", 229 /* xt_iclass_ritlb */,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", 230 /* xt_iclass_witlb */,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "ldpte", 231 /* xt_iclass_ldpte */,
+ 0,
+ Opcode_ldpte_encode_fns, 0, 0 },
+ { "hwwitlba", 232 /* xt_iclass_hwwitlba */,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_hwwitlba_encode_fns, 0, 0 },
+ { "hwwdtlba", 233 /* xt_iclass_hwwdtlba */,
+ 0,
+ Opcode_hwwdtlba_encode_fns, 0, 0 },
+ { "nsa", 234 /* xt_iclass_nsa */,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", 234 /* xt_iclass_nsa */,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 }
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return 77; /* ill */
+ break;
+ case 2:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 96; /* ret */
+ case 1:
+ return 14; /* retw */
+ case 2:
+ return 79; /* jx */
+ }
+ break;
+ case 3:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 75; /* callx0 */
+ case 1:
+ return 10; /* callx4 */
+ case 2:
+ return 9; /* callx8 */
+ case 3:
+ return 8; /* callx12 */
+ }
+ break;
+ }
+ break;
+ case 1:
+ return 12; /* movsp */
+ case 2:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return 114; /* isync */
+ case 1:
+ return 115; /* rsync */
+ case 2:
+ return 116; /* esync */
+ case 3:
+ return 117; /* dsync */
+ case 8:
+ return 0; /* excw */
+ case 12:
+ return 112; /* memw */
+ case 13:
+ return 113; /* extw */
+ case 15:
+ return 95; /* nop */
+ }
+ }
+ break;
+ case 3:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return 1; /* rfe */
+ case 2:
+ return 2; /* rfde */
+ case 4:
+ return 16; /* rfwo */
+ case 5:
+ return 17; /* rfwu */
+ }
+ break;
+ case 1:
+ return 188; /* rfi */
+ }
+ break;
+ case 4:
+ return 196; /* break */
+ case 5:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 3; /* syscall */
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 4; /* simcall */
+ break;
+ }
+ break;
+ case 6:
+ return 118; /* rsil */
+ case 7:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 189; /* waiti */
+ break;
+ }
+ break;
+ case 1:
+ return 47; /* and */
+ case 2:
+ return 48; /* or */
+ case 3:
+ return 49; /* xor */
+ case 4:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 100; /* ssr */
+ break;
+ case 1:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 101; /* ssl */
+ break;
+ case 2:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 102; /* ssa8l */
+ break;
+ case 3:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 103; /* ssa8b */
+ break;
+ case 4:
+ if (Field_thi3_Slot_inst_get (insn) == 0)
+ return 104; /* ssai */
+ break;
+ case 8:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return 13; /* rotw */
+ break;
+ case 14:
+ return 289; /* nsa */
+ case 15:
+ return 290; /* nsau */
+ }
+ break;
+ case 5:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 1:
+ return 287; /* hwwitlba */
+ case 3:
+ return 283; /* ritlb0 */
+ case 4:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 281; /* iitlb */
+ break;
+ case 5:
+ return 282; /* pitlb */
+ case 6:
+ return 285; /* witlb */
+ case 7:
+ return 284; /* ritlb1 */
+ case 9:
+ return 288; /* hwwdtlba */
+ case 11:
+ return 278; /* rdtlb0 */
+ case 12:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 276; /* idtlb */
+ break;
+ case 13:
+ return 277; /* pdtlb */
+ case 14:
+ return 280; /* wdtlb */
+ case 15:
+ return 279; /* rdtlb1 */
+ }
+ break;
+ case 6:
+ switch (Field_s_Slot_inst_get (insn))
+ {
+ case 0:
+ return 93; /* neg */
+ case 1:
+ return 94; /* abs */
+ }
+ break;
+ case 8:
+ return 39; /* add */
+ case 9:
+ return 41; /* addx2 */
+ case 10:
+ return 42; /* addx4 */
+ case 11:
+ return 43; /* addx8 */
+ case 12:
+ return 40; /* sub */
+ case 13:
+ return 44; /* subx2 */
+ case 14:
+ return 45; /* subx4 */
+ case 15:
+ return 46; /* subx8 */
+ }
+ break;
+ case 1:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ case 1:
+ return 109; /* slli */
+ case 2:
+ case 3:
+ return 110; /* srai */
+ case 4:
+ return 111; /* srli */
+ case 6:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return 127; /* xsr.lbeg */
+ case 1:
+ return 121; /* xsr.lend */
+ case 2:
+ return 124; /* xsr.lcount */
+ case 3:
+ return 130; /* xsr.sar */
+ case 5:
+ return 133; /* xsr.litbase */
+ case 72:
+ return 22; /* xsr.windowbase */
+ case 73:
+ return 25; /* xsr.windowstart */
+ case 83:
+ return 266; /* xsr.ptevaddr */
+ case 90:
+ return 269; /* xsr.rasid */
+ case 91:
+ return 272; /* xsr.itlbcfg */
+ case 92:
+ return 275; /* xsr.dtlbcfg */
+ case 96:
+ return 218; /* xsr.ibreakenable */
+ case 104:
+ return 230; /* xsr.ddr */
+ case 128:
+ return 212; /* xsr.ibreaka0 */
+ case 129:
+ return 215; /* xsr.ibreaka1 */
+ case 144:
+ return 200; /* xsr.dbreaka0 */
+ case 145:
+ return 206; /* xsr.dbreaka1 */
+ case 160:
+ return 203; /* xsr.dbreakc0 */
+ case 161:
+ return 209; /* xsr.dbreakc1 */
+ case 177:
+ return 141; /* xsr.epc1 */
+ case 178:
+ return 147; /* xsr.epc2 */
+ case 179:
+ return 153; /* xsr.epc3 */
+ case 180:
+ return 159; /* xsr.epc4 */
+ case 192:
+ return 177; /* xsr.depc */
+ case 194:
+ return 165; /* xsr.eps2 */
+ case 195:
+ return 168; /* xsr.eps3 */
+ case 196:
+ return 171; /* xsr.eps4 */
+ case 209:
+ return 144; /* xsr.excsave1 */
+ case 210:
+ return 150; /* xsr.excsave2 */
+ case 211:
+ return 156; /* xsr.excsave3 */
+ case 212:
+ return 162; /* xsr.excsave4 */
+ case 228:
+ return 195; /* xsr.intenable */
+ case 230:
+ return 138; /* xsr.ps */
+ case 232:
+ return 180; /* xsr.exccause */
+ case 233:
+ return 221; /* xsr.debugcause */
+ case 234:
+ return 235; /* xsr.ccount */
+ case 236:
+ return 224; /* xsr.icount */
+ case 237:
+ return 227; /* xsr.icountlevel */
+ case 238:
+ return 174; /* xsr.excvaddr */
+ case 240:
+ return 238; /* xsr.ccompare0 */
+ case 241:
+ return 241; /* xsr.ccompare1 */
+ case 242:
+ return 244; /* xsr.ccompare2 */
+ case 244:
+ return 183; /* xsr.misc0 */
+ case 245:
+ return 186; /* xsr.misc1 */
+ }
+ break;
+ case 8:
+ return 106; /* src */
+ case 9:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return 107; /* srl */
+ break;
+ case 10:
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return 105; /* sll */
+ break;
+ case 11:
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return 108; /* sra */
+ break;
+ case 15:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return 248; /* lict */
+ case 1:
+ return 250; /* sict */
+ case 2:
+ return 249; /* licw */
+ case 3:
+ return 251; /* sicw */
+ case 8:
+ return 263; /* ldct */
+ case 9:
+ return 262; /* sdct */
+ case 14:
+ if (Field_t_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return 231; /* rfdo */
+ if (Field_t_Slot_inst_get (insn) == 1 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return 232; /* rfdd */
+ break;
+ case 15:
+ return 286; /* ldpte */
+ }
+ break;
+ }
+ break;
+ case 3:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return 125; /* rsr.lbeg */
+ case 1:
+ return 119; /* rsr.lend */
+ case 2:
+ return 122; /* rsr.lcount */
+ case 3:
+ return 128; /* rsr.sar */
+ case 5:
+ return 131; /* rsr.litbase */
+ case 72:
+ return 20; /* rsr.windowbase */
+ case 73:
+ return 23; /* rsr.windowstart */
+ case 83:
+ return 265; /* rsr.ptevaddr */
+ case 90:
+ return 267; /* rsr.rasid */
+ case 91:
+ return 270; /* rsr.itlbcfg */
+ case 92:
+ return 273; /* rsr.dtlbcfg */
+ case 96:
+ return 216; /* rsr.ibreakenable */
+ case 104:
+ return 228; /* rsr.ddr */
+ case 128:
+ return 210; /* rsr.ibreaka0 */
+ case 129:
+ return 213; /* rsr.ibreaka1 */
+ case 144:
+ return 198; /* rsr.dbreaka0 */
+ case 145:
+ return 204; /* rsr.dbreaka1 */
+ case 160:
+ return 201; /* rsr.dbreakc0 */
+ case 161:
+ return 207; /* rsr.dbreakc1 */
+ case 176:
+ return 134; /* rsr.176 */
+ case 177:
+ return 139; /* rsr.epc1 */
+ case 178:
+ return 145; /* rsr.epc2 */
+ case 179:
+ return 151; /* rsr.epc3 */
+ case 180:
+ return 157; /* rsr.epc4 */
+ case 192:
+ return 175; /* rsr.depc */
+ case 194:
+ return 163; /* rsr.eps2 */
+ case 195:
+ return 166; /* rsr.eps3 */
+ case 196:
+ return 169; /* rsr.eps4 */
+ case 208:
+ return 135; /* rsr.208 */
+ case 209:
+ return 142; /* rsr.excsave1 */
+ case 210:
+ return 148; /* rsr.excsave2 */
+ case 211:
+ return 154; /* rsr.excsave3 */
+ case 212:
+ return 160; /* rsr.excsave4 */
+ case 226:
+ return 190; /* rsr.interrupt */
+ case 228:
+ return 193; /* rsr.intenable */
+ case 230:
+ return 136; /* rsr.ps */
+ case 232:
+ return 178; /* rsr.exccause */
+ case 233:
+ return 219; /* rsr.debugcause */
+ case 234:
+ return 233; /* rsr.ccount */
+ case 235:
+ return 187; /* rsr.prid */
+ case 236:
+ return 222; /* rsr.icount */
+ case 237:
+ return 225; /* rsr.icountlevel */
+ case 238:
+ return 172; /* rsr.excvaddr */
+ case 240:
+ return 236; /* rsr.ccompare0 */
+ case 241:
+ return 239; /* rsr.ccompare1 */
+ case 242:
+ return 242; /* rsr.ccompare2 */
+ case 244:
+ return 181; /* rsr.misc0 */
+ case 245:
+ return 184; /* rsr.misc1 */
+ }
+ break;
+ case 1:
+ switch (Field_sr_Slot_inst_get (insn))
+ {
+ case 0:
+ return 126; /* wsr.lbeg */
+ case 1:
+ return 120; /* wsr.lend */
+ case 2:
+ return 123; /* wsr.lcount */
+ case 3:
+ return 129; /* wsr.sar */
+ case 5:
+ return 132; /* wsr.litbase */
+ case 72:
+ return 21; /* wsr.windowbase */
+ case 73:
+ return 24; /* wsr.windowstart */
+ case 83:
+ return 264; /* wsr.ptevaddr */
+ case 90:
+ return 268; /* wsr.rasid */
+ case 91:
+ return 271; /* wsr.itlbcfg */
+ case 92:
+ return 274; /* wsr.dtlbcfg */
+ case 96:
+ return 217; /* wsr.ibreakenable */
+ case 104:
+ return 229; /* wsr.ddr */
+ case 128:
+ return 211; /* wsr.ibreaka0 */
+ case 129:
+ return 214; /* wsr.ibreaka1 */
+ case 144:
+ return 199; /* wsr.dbreaka0 */
+ case 145:
+ return 205; /* wsr.dbreaka1 */
+ case 160:
+ return 202; /* wsr.dbreakc0 */
+ case 161:
+ return 208; /* wsr.dbreakc1 */
+ case 177:
+ return 140; /* wsr.epc1 */
+ case 178:
+ return 146; /* wsr.epc2 */
+ case 179:
+ return 152; /* wsr.epc3 */
+ case 180:
+ return 158; /* wsr.epc4 */
+ case 192:
+ return 176; /* wsr.depc */
+ case 194:
+ return 164; /* wsr.eps2 */
+ case 195:
+ return 167; /* wsr.eps3 */
+ case 196:
+ return 170; /* wsr.eps4 */
+ case 209:
+ return 143; /* wsr.excsave1 */
+ case 210:
+ return 149; /* wsr.excsave2 */
+ case 211:
+ return 155; /* wsr.excsave3 */
+ case 212:
+ return 161; /* wsr.excsave4 */
+ case 226:
+ return 191; /* wsr.intset */
+ case 227:
+ return 192; /* wsr.intclear */
+ case 228:
+ return 194; /* wsr.intenable */
+ case 230:
+ return 137; /* wsr.ps */
+ case 232:
+ return 179; /* wsr.exccause */
+ case 233:
+ return 220; /* wsr.debugcause */
+ case 234:
+ return 234; /* wsr.ccount */
+ case 236:
+ return 223; /* wsr.icount */
+ case 237:
+ return 226; /* wsr.icountlevel */
+ case 238:
+ return 173; /* wsr.excvaddr */
+ case 240:
+ return 237; /* wsr.ccompare0 */
+ case 241:
+ return 240; /* wsr.ccompare1 */
+ case 242:
+ return 243; /* wsr.ccompare2 */
+ case 244:
+ return 182; /* wsr.misc0 */
+ case 245:
+ return 185; /* wsr.misc1 */
+ }
+ break;
+ case 8:
+ return 89; /* moveqz */
+ case 9:
+ return 90; /* movnez */
+ case 10:
+ return 91; /* movltz */
+ case 11:
+ return 92; /* movgez */
+ }
+ break;
+ case 4:
+ case 5:
+ return 76; /* extui */
+ case 9:
+ switch (Field_op2_Slot_inst_get (insn))
+ {
+ case 0:
+ return 18; /* l32e */
+ case 4:
+ return 19; /* s32e */
+ }
+ break;
+ }
+ break;
+ case 1:
+ return 83; /* l32r */
+ case 2:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return 84; /* l8ui */
+ case 1:
+ return 80; /* l16ui */
+ case 2:
+ return 82; /* l32i */
+ case 4:
+ return 99; /* s8i */
+ case 5:
+ return 97; /* s16i */
+ case 6:
+ return 98; /* s32i */
+ case 7:
+ switch (Field_t_Slot_inst_get (insn))
+ {
+ case 0:
+ return 258; /* dpfr */
+ case 1:
+ return 259; /* dpfw */
+ case 2:
+ return 260; /* dpfro */
+ case 3:
+ return 261; /* dpfwo */
+ case 4:
+ return 252; /* dhwb */
+ case 5:
+ return 253; /* dhwbi */
+ case 6:
+ return 256; /* dhi */
+ case 7:
+ return 257; /* dii */
+ case 8:
+ switch (Field_op1_Slot_inst_get (insn))
+ {
+ case 4:
+ return 254; /* diwb */
+ case 5:
+ return 255; /* diwbi */
+ }
+ break;
+ case 12:
+ return 245; /* ipf */
+ case 14:
+ return 246; /* ihi */
+ case 15:
+ return 247; /* iii */
+ }
+ break;
+ case 9:
+ return 81; /* l16si */
+ case 10:
+ return 88; /* movi */
+ case 12:
+ return 37; /* addi */
+ case 13:
+ return 38; /* addmi */
+ }
+ break;
+ case 5:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 74; /* call0 */
+ case 1:
+ return 7; /* call4 */
+ case 2:
+ return 6; /* call8 */
+ case 3:
+ return 5; /* call12 */
+ }
+ break;
+ case 6:
+ switch (Field_n_Slot_inst_get (insn))
+ {
+ case 0:
+ return 78; /* j */
+ case 1:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return 70; /* beqz */
+ case 1:
+ return 71; /* bnez */
+ case 2:
+ return 73; /* bltz */
+ case 3:
+ return 72; /* bgez */
+ }
+ break;
+ case 2:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return 50; /* beqi */
+ case 1:
+ return 51; /* bnei */
+ case 2:
+ return 53; /* blti */
+ case 3:
+ return 52; /* bgei */
+ }
+ break;
+ case 3:
+ switch (Field_m_Slot_inst_get (insn))
+ {
+ case 0:
+ return 11; /* entry */
+ case 1:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 8:
+ return 85; /* loop */
+ case 9:
+ return 86; /* loopnez */
+ case 10:
+ return 87; /* loopgtz */
+ }
+ break;
+ case 2:
+ return 57; /* bltui */
+ case 3:
+ return 56; /* bgeui */
+ }
+ break;
+ }
+ break;
+ case 7:
+ switch (Field_r_Slot_inst_get (insn))
+ {
+ case 0:
+ return 65; /* bnone */
+ case 1:
+ return 58; /* beq */
+ case 2:
+ return 61; /* blt */
+ case 3:
+ return 63; /* bltu */
+ case 4:
+ return 66; /* ball */
+ case 5:
+ return 68; /* bbc */
+ case 6:
+ case 7:
+ return 54; /* bbci */
+ case 8:
+ return 64; /* bany */
+ case 9:
+ return 59; /* bne */
+ case 10:
+ return 60; /* bge */
+ case 11:
+ return 62; /* bgeu */
+ case 12:
+ return 67; /* bnall */
+ case 13:
+ return 69; /* bbs */
+ case 14:
+ case 15:
+ return 55; /* bbsi */
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16b_get (insn))
+ {
+ case 12:
+ switch (Field_i_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 33; /* movi.n */
+ case 1:
+ switch (Field_z_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 28; /* beqz.n */
+ case 1:
+ return 29; /* bnez.n */
+ }
+ break;
+ }
+ break;
+ case 13:
+ switch (Field_r_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 32; /* mov.n */
+ case 15:
+ switch (Field_t_Slot_inst16b_get (insn))
+ {
+ case 0:
+ return 35; /* ret.n */
+ case 1:
+ return 15; /* retw.n */
+ case 2:
+ return 197; /* break.n */
+ case 3:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return 34; /* nop.n */
+ break;
+ case 6:
+ if (Field_s_Slot_inst16b_get (insn) == 0)
+ return 30; /* ill.n */
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ switch (Field_op0_Slot_inst16a_get (insn))
+ {
+ case 8:
+ return 31; /* l32i.n */
+ case 9:
+ return 36; /* s32i.n */
+ case 10:
+ return 26; /* add.n */
+ case 11:
+ return 27; /* addi.n */
+ }
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = ((insn[0] & 0xffff00) >> 8);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff00) | ((slotbuf[0] & 0xffff) << 8);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = ((insn[0] & 0xffff00) >> 8);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff00) | ((slotbuf[0] & 0xffff) << 8);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x800000;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc00000;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x800000) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc00000) == 0x800000)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe00000) == 0xc00000)
+ return 2; /* x16b */
+ return -1;
+}
+
+static int length_table[16] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int op0 = (insn[0] >> 4) & 0xf;
+ return length_table[op0];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 1 /* big-endian */,
+ 3 /* insn_size */, 0,
+ 3, formats, format_decoder, length_decoder,
+ 3, slots,
+ 39 /* num_fields */,
+ 70, operands,
+ 235, iclasses,
+ 291, opcodes, 0,
+ 1, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 0, interfaces, 0,
+ 0, funcUnits, 0
+};
diff --git a/target/xtensa/core-sample_controller.c b/target/xtensa/core-sample_controller.c
new file mode 100644
index 000000000..fd5de5576
--- /dev/null
+++ b/target/xtensa/core-sample_controller.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-sample_controller/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_sample_controller
+#include "core-sample_controller/xtensa-modules.c.inc"
+
+static XtensaConfig sample_controller __attribute__((unused)) = {
+ .name = "sample_controller",
+ .gdb_regmap = {
+ .reg = {
+#include "core-sample_controller/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(sample_controller)
diff --git a/target/xtensa/core-sample_controller/core-isa.h b/target/xtensa/core-sample_controller/core-isa.h
new file mode 100644
index 000000000..d53dca866
--- /dev/null
+++ b/target/xtensa/core-sample_controller/core-isa.h
@@ -0,0 +1,642 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2016 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_CORE_SAMPLE_CONTROLLER_CORE_ISA_H
+#define XTENSA_CORE_SAMPLE_CONTROLLER_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 0 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_EXCLUSIVE 0 /* L32EX/S32EX instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 0 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_FUSION_VITERBI 0 /* Fusion Viterbi option */
+#define XCHAL_HAVE_FUSION_SOFTDEMAP 0 /* Fusion Soft Bit Demap option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+
+#define XCHAL_HAVE_FUSIONG 0 /* FusionG */
+#define XCHAL_HAVE_FUSIONG3 0 /* FusionG3 */
+#define XCHAL_HAVE_FUSIONG_SP_VFPU 0 /* sp_vfpu option on FusionG */
+#define XCHAL_HAVE_FUSIONG_DP_VFPU 0 /* dp_vfpu option on FusionG */
+#define XCHAL_FUSIONG_SIMD32 0 /* simd32 for FusionG */
+
+#define XCHAL_HAVE_PDX 0 /* PDX */
+#define XCHAL_PDX_SIMD32 0 /* simd32 for PDX */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_PDX8 0 /* PDX8 */
+#define XCHAL_HAVE_PDX16 0 /* PDX16 */
+
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BBENEP_SP_VFPU 0 /* sp_vfpu option on BBE-EP */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+#define XCHAL_HAVE_VISION 0 /* Vision P5/P6 */
+#define XCHAL_VISION_SIMD16 0 /* simd16 for Vision P5/P6 */
+#define XCHAL_VISION_TYPE 0 /* Vision P5, P6, or P3 */
+#define XCHAL_VISION_QUAD_MAC_TYPE 0 /* quad_mac option on Vision P6 */
+#define XCHAL_HAVE_VISION_HISTOGRAM 0 /* histogram option on Vision P5/P6 */
+#define XCHAL_HAVE_VISION_SP_VFPU 0 /* sp_vfpu option on Vision P5/P6 */
+#define XCHAL_HAVE_VISION_HP_VFPU 0 /* hp_vfpu option on Vision P6 */
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 1 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 1 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1200004 /* sw version of this header */
+
+#define XCHAL_CORE_ID "sample_controller" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00064D47 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC280DAFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x21064D47 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX7.0.4" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2700 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 4 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 270004 /* major*100+minor */
+#define XCHAL_HW_REL_LX7 1
+#define XCHAL_HW_REL_LX7_0 1
+#define XCHAL_HW_REL_LX7_0_4 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2700 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 4 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 270004 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2700 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 4 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 270004 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 4 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 4 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 2 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 2 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 0 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 0 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 0 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 0 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 0 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound bus present */
+
+#define XCHAL_HAVE_AXI 0 /* AXI bus */
+#define XCHAL_HAVE_AXI_ECC 0 /* ECC on AXI bus */
+#define XCHAL_HAVE_ACELITE 0 /* ACELite bus */
+
+#define XCHAL_HAVE_PIF_WR_RESP 0 /* pif write response */
+#define XCHAL_HAVE_PIF_REQ_ATTR 0 /* pif attribute */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 0
+#define XCHAL_DCACHE_SETWIDTH 0
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 1
+#define XCHAL_DCACHE_WAYS 1
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 1
+#define XCHAL_DCACHE_ACCESS_SIZE 1
+
+#define XCHAL_DCACHE_BANKS 0 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 1 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 2 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+/* Instruction RAM 0: */
+#define XCHAL_INSTRAM0_VADDR 0x40000000 /* virtual address */
+#define XCHAL_INSTRAM0_PADDR 0x40000000 /* physical address */
+#define XCHAL_INSTRAM0_SIZE 131072 /* size in bytes */
+#define XCHAL_INSTRAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+#define XCHAL_HAVE_INSTRAM0
+#define XCHAL_INSTRAM0_HAVE_IDMA 0 /* idma supported by this local memory */
+
+/* Data RAM 0: */
+#define XCHAL_DATARAM0_VADDR 0x3FFE0000 /* virtual address */
+#define XCHAL_DATARAM0_PADDR 0x3FFE0000 /* physical address */
+#define XCHAL_DATARAM0_SIZE 131072 /* size in bytes */
+#define XCHAL_DATARAM0_ECC_PARITY 0 /* ECC/parity type, 0=none */
+#define XCHAL_DATARAM0_BANKS 1 /* number of banks */
+#define XCHAL_HAVE_DATARAM0
+#define XCHAL_DATARAM0_HAVE_IDMA 0 /* idma supported by this local memory */
+
+/* Data RAM 1: */
+#define XCHAL_DATARAM1_VADDR 0x3FFC0000 /* virtual address */
+#define XCHAL_DATARAM1_PADDR 0x3FFC0000 /* physical address */
+#define XCHAL_DATARAM1_SIZE 131072 /* size in bytes */
+#define XCHAL_DATARAM1_ECC_PARITY 0 /* ECC/parity type, 0=none */
+#define XCHAL_DATARAM1_BANKS 1 /* number of banks */
+#define XCHAL_HAVE_DATARAM1
+#define XCHAL_DATARAM1_HAVE_IDMA 0 /* idma supported by this local memory */
+
+#define XCHAL_HAVE_IDMA 0
+#define XCHAL_HAVE_IDMA_TRANSPOSE 0
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00000000
+#define XCHAL_INTTYPE_MASK_IDMA_DONE 0x00000000
+#define XCHAL_INTTYPE_MASK_IDMA_ERR 0x00000000
+#define XCHAL_INTTYPE_MASK_GS_ERR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 4) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT15_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT16_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 15 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 16 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x40000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x40000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0x50000000
+#define XCHAL_RESET_VECTOR0_PADDR 0x50000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x40000400
+#define XCHAL_RESET_VECTOR1_PADDR 0x40000400
+#define XCHAL_RESET_VECTOR_VADDR 0x50000000
+#define XCHAL_RESET_VECTOR_PADDR 0x50000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x40000340
+#define XCHAL_USER_VECTOR_PADDR 0x40000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x40000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x40000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x400003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x400003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x40000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x40000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x40000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x40000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x400001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x400001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x40000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x40000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x40000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x40000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x40000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x40000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x400002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x400002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 0 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 0 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 0 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 0 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 1 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 1 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 0 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+
+/* If none of the above last 5 are set, it's a custom TLB configuration. */
+
+#define XCHAL_MMU_ASID_BITS 0 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 1 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 0 /* num of bits in RING field */
+
+/*----------------------------------------------------------------------
+ MPU
+ ----------------------------------------------------------------------*/
+#define XCHAL_HAVE_MPU 0
+#define XCHAL_MPU_ENTRIES 0
+
+#define XCHAL_MPU_ALIGN_REQ 1 /* MPU requires alignment of entries to background map */
+#define XCHAL_MPU_BACKGROUND_ENTRIES 0 /* number of entries in bg map*/
+#define XCHAL_MPU_BG_CACHEADRDIS 0 /* default CACHEADRDIS for bg */
+
+#define XCHAL_MPU_ALIGN_BITS 0
+#define XCHAL_MPU_ALIGN 0
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_CORE_SAMPLE_CONTROLLER_CORE_ISA_H */
diff --git a/target/xtensa/core-sample_controller/gdb-config.c.inc b/target/xtensa/core-sample_controller/gdb-config.c.inc
new file mode 100644
index 000000000..99e172d81
--- /dev/null
+++ b/target/xtensa/core-sample_controller/gdb-config.c.inc
@@ -0,0 +1,141 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2016 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+ XTREG( 0, 0,32, 4, 4,0x0020,0x0006,-2, 9,0x0100,pc, 0,0,0,0,0,0)
+ XTREG( 1, 4,32, 4, 4,0x0100,0x0006,-2, 1,0x0002,ar0, 0,0,0,0,0,0)
+ XTREG( 2, 8,32, 4, 4,0x0101,0x0006,-2, 1,0x0002,ar1, 0,0,0,0,0,0)
+ XTREG( 3, 12,32, 4, 4,0x0102,0x0006,-2, 1,0x0002,ar2, 0,0,0,0,0,0)
+ XTREG( 4, 16,32, 4, 4,0x0103,0x0006,-2, 1,0x0002,ar3, 0,0,0,0,0,0)
+ XTREG( 5, 20,32, 4, 4,0x0104,0x0006,-2, 1,0x0002,ar4, 0,0,0,0,0,0)
+ XTREG( 6, 24,32, 4, 4,0x0105,0x0006,-2, 1,0x0002,ar5, 0,0,0,0,0,0)
+ XTREG( 7, 28,32, 4, 4,0x0106,0x0006,-2, 1,0x0002,ar6, 0,0,0,0,0,0)
+ XTREG( 8, 32,32, 4, 4,0x0107,0x0006,-2, 1,0x0002,ar7, 0,0,0,0,0,0)
+ XTREG( 9, 36,32, 4, 4,0x0108,0x0006,-2, 1,0x0002,ar8, 0,0,0,0,0,0)
+ XTREG( 10, 40,32, 4, 4,0x0109,0x0006,-2, 1,0x0002,ar9, 0,0,0,0,0,0)
+ XTREG( 11, 44,32, 4, 4,0x010a,0x0006,-2, 1,0x0002,ar10, 0,0,0,0,0,0)
+ XTREG( 12, 48,32, 4, 4,0x010b,0x0006,-2, 1,0x0002,ar11, 0,0,0,0,0,0)
+ XTREG( 13, 52,32, 4, 4,0x010c,0x0006,-2, 1,0x0002,ar12, 0,0,0,0,0,0)
+ XTREG( 14, 56,32, 4, 4,0x010d,0x0006,-2, 1,0x0002,ar13, 0,0,0,0,0,0)
+ XTREG( 15, 60,32, 4, 4,0x010e,0x0006,-2, 1,0x0002,ar14, 0,0,0,0,0,0)
+ XTREG( 16, 64,32, 4, 4,0x010f,0x0006,-2, 1,0x0002,ar15, 0,0,0,0,0,0)
+ XTREG( 17, 68,32, 4, 4,0x0110,0x0006,-2, 1,0x0002,ar16, 0,0,0,0,0,0)
+ XTREG( 18, 72,32, 4, 4,0x0111,0x0006,-2, 1,0x0002,ar17, 0,0,0,0,0,0)
+ XTREG( 19, 76,32, 4, 4,0x0112,0x0006,-2, 1,0x0002,ar18, 0,0,0,0,0,0)
+ XTREG( 20, 80,32, 4, 4,0x0113,0x0006,-2, 1,0x0002,ar19, 0,0,0,0,0,0)
+ XTREG( 21, 84,32, 4, 4,0x0114,0x0006,-2, 1,0x0002,ar20, 0,0,0,0,0,0)
+ XTREG( 22, 88,32, 4, 4,0x0115,0x0006,-2, 1,0x0002,ar21, 0,0,0,0,0,0)
+ XTREG( 23, 92,32, 4, 4,0x0116,0x0006,-2, 1,0x0002,ar22, 0,0,0,0,0,0)
+ XTREG( 24, 96,32, 4, 4,0x0117,0x0006,-2, 1,0x0002,ar23, 0,0,0,0,0,0)
+ XTREG( 25,100,32, 4, 4,0x0118,0x0006,-2, 1,0x0002,ar24, 0,0,0,0,0,0)
+ XTREG( 26,104,32, 4, 4,0x0119,0x0006,-2, 1,0x0002,ar25, 0,0,0,0,0,0)
+ XTREG( 27,108,32, 4, 4,0x011a,0x0006,-2, 1,0x0002,ar26, 0,0,0,0,0,0)
+ XTREG( 28,112,32, 4, 4,0x011b,0x0006,-2, 1,0x0002,ar27, 0,0,0,0,0,0)
+ XTREG( 29,116,32, 4, 4,0x011c,0x0006,-2, 1,0x0002,ar28, 0,0,0,0,0,0)
+ XTREG( 30,120,32, 4, 4,0x011d,0x0006,-2, 1,0x0002,ar29, 0,0,0,0,0,0)
+ XTREG( 31,124,32, 4, 4,0x011e,0x0006,-2, 1,0x0002,ar30, 0,0,0,0,0,0)
+ XTREG( 32,128,32, 4, 4,0x011f,0x0006,-2, 1,0x0002,ar31, 0,0,0,0,0,0)
+ XTREG( 33,132, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0)
+ XTREG( 34,136, 3, 4, 4,0x0248,0x0006,-2, 2,0x1002,windowbase, 0,0,0,0,0,0)
+ XTREG( 35,140, 8, 4, 4,0x0249,0x0006,-2, 2,0x1002,windowstart, 0,0,0,0,0,0)
+ XTREG( 36,144,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,configid0, 0,0,0,0,0,0)
+ XTREG( 37,148,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,configid1, 0,0,0,0,0,0)
+ XTREG( 38,152,19, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0)
+ XTREG( 39,156,32, 4, 4,0x020c,0x0006,-1, 2,0x1100,scompare1, 0,0,0,0,0,0)
+ XTREG( 40,160,32, 4, 4,0x03e6,0x000e,-1, 3,0x0110,expstate, 0,0,0,0,0,0)
+ XTREG( 41,164,32, 4, 4,0x0259,0x000d,-2, 2,0x1000,mmid, 0,0,0,0,0,0)
+ XTREG( 42,168, 2, 4, 4,0x0260,0x0007,-2, 2,0x1000,ibreakenable,0,0,0,0,0,0)
+ XTREG( 43,172, 6, 4, 4,0x0263,0x0007,-2, 2,0x1000,atomctl, 0,0,0,0,0,0)
+ XTREG( 44,176,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0)
+ XTREG( 45,180,32, 4, 4,0x0280,0x0007,-2, 2,0x1000,ibreaka0, 0,0,0,0,0,0)
+ XTREG( 46,184,32, 4, 4,0x0281,0x0007,-2, 2,0x1000,ibreaka1, 0,0,0,0,0,0)
+ XTREG( 47,188,32, 4, 4,0x0290,0x0007,-2, 2,0x1000,dbreaka0, 0,0,0,0,0,0)
+ XTREG( 48,192,32, 4, 4,0x0291,0x0007,-2, 2,0x1000,dbreaka1, 0,0,0,0,0,0)
+ XTREG( 49,196,32, 4, 4,0x02a0,0x0007,-2, 2,0x1000,dbreakc0, 0,0,0,0,0,0)
+ XTREG( 50,200,32, 4, 4,0x02a1,0x0007,-2, 2,0x1000,dbreakc1, 0,0,0,0,0,0)
+ XTREG( 51,204,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0)
+ XTREG( 52,208,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0)
+ XTREG( 53,212,32, 4, 4,0x02b3,0x0007,-2, 2,0x1000,epc3, 0,0,0,0,0,0)
+ XTREG( 54,216,32, 4, 4,0x02b4,0x0007,-2, 2,0x1000,epc4, 0,0,0,0,0,0)
+ XTREG( 55,220,32, 4, 4,0x02b5,0x0007,-2, 2,0x1000,epc5, 0,0,0,0,0,0)
+ XTREG( 56,224,32, 4, 4,0x02b6,0x0007,-2, 2,0x1000,epc6, 0,0,0,0,0,0)
+ XTREG( 57,228,32, 4, 4,0x02b7,0x0007,-2, 2,0x1000,epc7, 0,0,0,0,0,0)
+ XTREG( 58,232,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0)
+ XTREG( 59,236,19, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0)
+ XTREG( 60,240,19, 4, 4,0x02c3,0x0007,-2, 2,0x1000,eps3, 0,0,0,0,0,0)
+ XTREG( 61,244,19, 4, 4,0x02c4,0x0007,-2, 2,0x1000,eps4, 0,0,0,0,0,0)
+ XTREG( 62,248,19, 4, 4,0x02c5,0x0007,-2, 2,0x1000,eps5, 0,0,0,0,0,0)
+ XTREG( 63,252,19, 4, 4,0x02c6,0x0007,-2, 2,0x1000,eps6, 0,0,0,0,0,0)
+ XTREG( 64,256,19, 4, 4,0x02c7,0x0007,-2, 2,0x1000,eps7, 0,0,0,0,0,0)
+ XTREG( 65,260,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0)
+ XTREG( 66,264,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0)
+ XTREG( 67,268,32, 4, 4,0x02d3,0x0007,-2, 2,0x1000,excsave3, 0,0,0,0,0,0)
+ XTREG( 68,272,32, 4, 4,0x02d4,0x0007,-2, 2,0x1000,excsave4, 0,0,0,0,0,0)
+ XTREG( 69,276,32, 4, 4,0x02d5,0x0007,-2, 2,0x1000,excsave5, 0,0,0,0,0,0)
+ XTREG( 70,280,32, 4, 4,0x02d6,0x0007,-2, 2,0x1000,excsave6, 0,0,0,0,0,0)
+ XTREG( 71,284,32, 4, 4,0x02d7,0x0007,-2, 2,0x1000,excsave7, 0,0,0,0,0,0)
+ XTREG( 72,288,22, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0)
+ XTREG( 73,292,22, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0)
+ XTREG( 74,296,22, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0)
+ XTREG( 75,300,22, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0)
+ XTREG( 76,304,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0)
+ XTREG( 77,308, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0)
+ XTREG( 78,312,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0)
+ XTREG( 79,316,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0)
+ XTREG( 80,320,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0)
+ XTREG( 81,324,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0)
+ XTREG( 82,328, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0)
+ XTREG( 83,332,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0)
+ XTREG( 84,336,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0)
+ XTREG( 85,340,32, 4, 4,0x02f1,0x000f,-2, 2,0x1000,ccompare1, 0,0,0,0,0,0)
+ XTREG( 86,344,32, 4, 4,0x02f2,0x000f,-2, 2,0x1000,ccompare2, 0,0,0,0,0,0)
+ XTREG( 87,348,32, 4, 4,0x02f4,0x0007,-2, 2,0x1000,misc0, 0,0,0,0,0,0)
+ XTREG( 88,352,32, 4, 4,0x02f5,0x0007,-2, 2,0x1000,misc1, 0,0,0,0,0,0)
+ XTREG( 89,356,32, 4, 4,0x0000,0x0006,-2, 8,0x0100,a0, 0,0,0,0,0,0)
+ XTREG( 90,360,32, 4, 4,0x0001,0x0006,-2, 8,0x0100,a1, 0,0,0,0,0,0)
+ XTREG( 91,364,32, 4, 4,0x0002,0x0006,-2, 8,0x0100,a2, 0,0,0,0,0,0)
+ XTREG( 92,368,32, 4, 4,0x0003,0x0006,-2, 8,0x0100,a3, 0,0,0,0,0,0)
+ XTREG( 93,372,32, 4, 4,0x0004,0x0006,-2, 8,0x0100,a4, 0,0,0,0,0,0)
+ XTREG( 94,376,32, 4, 4,0x0005,0x0006,-2, 8,0x0100,a5, 0,0,0,0,0,0)
+ XTREG( 95,380,32, 4, 4,0x0006,0x0006,-2, 8,0x0100,a6, 0,0,0,0,0,0)
+ XTREG( 96,384,32, 4, 4,0x0007,0x0006,-2, 8,0x0100,a7, 0,0,0,0,0,0)
+ XTREG( 97,388,32, 4, 4,0x0008,0x0006,-2, 8,0x0100,a8, 0,0,0,0,0,0)
+ XTREG( 98,392,32, 4, 4,0x0009,0x0006,-2, 8,0x0100,a9, 0,0,0,0,0,0)
+ XTREG( 99,396,32, 4, 4,0x000a,0x0006,-2, 8,0x0100,a10, 0,0,0,0,0,0)
+ XTREG(100,400,32, 4, 4,0x000b,0x0006,-2, 8,0x0100,a11, 0,0,0,0,0,0)
+ XTREG(101,404,32, 4, 4,0x000c,0x0006,-2, 8,0x0100,a12, 0,0,0,0,0,0)
+ XTREG(102,408,32, 4, 4,0x000d,0x0006,-2, 8,0x0100,a13, 0,0,0,0,0,0)
+ XTREG(103,412,32, 4, 4,0x000e,0x0006,-2, 8,0x0100,a14, 0,0,0,0,0,0)
+ XTREG(104,416,32, 4, 4,0x000f,0x0006,-2, 8,0x0100,a15, 0,0,0,0,0,0)
+ XTREG(105,420, 4, 4, 4,0x2008,0x0006,-2, 6,0x1010,psintlevel,
+ 0,0,&xtensa_mask0,0,0,0)
+ XTREG(106,424, 1, 4, 4,0x2009,0x0006,-2, 6,0x1010,psum,
+ 0,0,&xtensa_mask1,0,0,0)
+ XTREG(107,428, 1, 4, 4,0x200a,0x0006,-2, 6,0x1010,pswoe,
+ 0,0,&xtensa_mask2,0,0,0)
+ XTREG(108,432, 1, 4, 4,0x200b,0x0006,-2, 6,0x1010,psexcm,
+ 0,0,&xtensa_mask3,0,0,0)
+ XTREG(109,436, 2, 4, 4,0x200c,0x0006,-2, 6,0x1010,pscallinc,
+ 0,0,&xtensa_mask4,0,0,0)
+ XTREG(110,440, 4, 4, 4,0x200d,0x0006,-2, 6,0x1010,psowb,
+ 0,0,&xtensa_mask5,0,0,0)
+ XTREG_END
diff --git a/target/xtensa/core-sample_controller/xtensa-modules.c.inc b/target/xtensa/core-sample_controller/xtensa-modules.c.inc
new file mode 100644
index 000000000..7e87d216b
--- /dev/null
+++ b/target/xtensa/core-sample_controller/xtensa-modules.c.inc
@@ -0,0 +1,11366 @@
+/* Xtensa configuration-specific ISA information.
+
+ Copyright (c) 2003-2016 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "qemu/osdep.h"
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "CONFIGID0", 176, 0 },
+ { "CONFIGID1", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EPC7", 183, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EXCSAVE7", 215, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EPS7", 199, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "EXPSTATE", 230, 1 }
+};
+
+#define NUM_SYSREGS 55
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 230
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 22, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EPC7", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EXCSAVE7", 32, 0 },
+ { "EPS2", 13, 0 },
+ { "EPS3", 13, 0 },
+ { "EPS4", 13, 0 },
+ { "EPS5", 13, 0 },
+ { "EPS6", 13, 0 },
+ { "EPS7", 13, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "SAR", 6, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 22, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED }
+};
+
+#define NUM_STATES 59
+
+enum xtensa_state_id {
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EPC3,
+ STATE_EPC4,
+ STATE_EPC5,
+ STATE_EPC6,
+ STATE_EPC7,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EXCSAVE3,
+ STATE_EXCSAVE4,
+ STATE_EXCSAVE5,
+ STATE_EXCSAVE6,
+ STATE_EXCSAVE7,
+ STATE_EPS2,
+ STATE_EPS3,
+ STATE_EPS4,
+ STATE_EPS5,
+ STATE_EPS6,
+ STATE_EPS7,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_SAR,
+ STATE_MISC0,
+ STATE_MISC1,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_DBREAKA0,
+ STATE_DBREAKC0,
+ STATE_DBREAKA1,
+ STATE_DBREAKC1,
+ STATE_IBREAKA0,
+ STATE_IBREAKA1,
+ STATE_IBREAKENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_CCOMPARE2,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_EXPSTATE
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_s3to1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_bitindex_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_bitindex,
+ FIELD_s3to1,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12
+};
+
+
+/* Functional units. */
+
+#define funcUnits 0
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "IMPWIRE", 32, 0, 0, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_IMPWIRE
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+OperandSem_opnd_sem_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_out_0;
+ unsigned soffsetx4_in_0;
+ soffsetx4_in_0 = *valp & 0x3ffff;
+ soffsetx4_out_0 = 0x4 + ((((int) soffsetx4_in_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_encode (uint32 *valp)
+{
+ unsigned soffsetx4_in_0;
+ unsigned soffsetx4_out_0;
+ soffsetx4_out_0 = *valp;
+ soffsetx4_in_0 = ((soffsetx4_out_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = soffsetx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_out_0;
+ unsigned uimm12x8_in_0;
+ uimm12x8_in_0 = *valp & 0xfff;
+ uimm12x8_out_0 = uimm12x8_in_0 << 3;
+ *valp = uimm12x8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_encode (uint32 *valp)
+{
+ unsigned uimm12x8_in_0;
+ unsigned uimm12x8_out_0;
+ uimm12x8_out_0 = *valp;
+ uimm12x8_in_0 = ((uimm12x8_out_0 >> 3) & 0xfff);
+ *valp = uimm12x8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_out_0;
+ unsigned simm4_in_0;
+ simm4_in_0 = *valp & 0xf;
+ simm4_out_0 = ((int) simm4_in_0 << 28) >> 28;
+ *valp = simm4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_encode (uint32 *valp)
+{
+ unsigned simm4_in_0;
+ unsigned simm4_out_0;
+ simm4_out_0 = *valp;
+ simm4_in_0 = (simm4_out_0 & 0xf);
+ *valp = simm4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_1_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_2_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_3_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_AR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_encode (uint32 *valp)
+{
+ return (*valp >= 32);
+}
+
+static int
+OperandSem_opnd_sem_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_out_0;
+ unsigned immrx4_in_0;
+ immrx4_in_0 = *valp & 0xf;
+ immrx4_out_0 = (((0xfffffff) << 4) | immrx4_in_0) << 2;
+ *valp = immrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_encode (uint32 *valp)
+{
+ unsigned immrx4_in_0;
+ unsigned immrx4_out_0;
+ immrx4_out_0 = *valp;
+ immrx4_in_0 = ((immrx4_out_0 >> 2) & 0xf);
+ *valp = immrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_out_0;
+ unsigned lsi4x4_in_0;
+ lsi4x4_in_0 = *valp & 0xf;
+ lsi4x4_out_0 = lsi4x4_in_0 << 2;
+ *valp = lsi4x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_encode (uint32 *valp)
+{
+ unsigned lsi4x4_in_0;
+ unsigned lsi4x4_out_0;
+ lsi4x4_out_0 = *valp;
+ lsi4x4_in_0 = ((lsi4x4_out_0 >> 2) & 0xf);
+ *valp = lsi4x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_out_0;
+ unsigned simm7_in_0;
+ simm7_in_0 = *valp & 0x7f;
+ simm7_out_0 = ((((-((((simm7_in_0 >> 6) & 1)) & (((simm7_in_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | simm7_in_0;
+ *valp = simm7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_encode (uint32 *valp)
+{
+ unsigned simm7_in_0;
+ unsigned simm7_out_0;
+ simm7_out_0 = *valp;
+ simm7_in_0 = (simm7_out_0 & 0x7f);
+ *valp = simm7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_out_0;
+ unsigned uimm6_in_0;
+ uimm6_in_0 = *valp & 0x3f;
+ uimm6_out_0 = 0x4 + (((0) << 6) | uimm6_in_0);
+ *valp = uimm6_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_encode (uint32 *valp)
+{
+ unsigned uimm6_in_0;
+ unsigned uimm6_out_0;
+ uimm6_out_0 = *valp;
+ uimm6_in_0 = (uimm6_out_0 - 0x4) & 0x3f;
+ *valp = uimm6_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_out_0;
+ unsigned ai4const_in_0;
+ ai4const_in_0 = *valp & 0xf;
+ ai4const_out_0 = CONST_TBL_ai4c_0[ai4const_in_0 & 0xf];
+ *valp = ai4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_encode (uint32 *valp)
+{
+ unsigned ai4const_in_0;
+ unsigned ai4const_out_0;
+ ai4const_out_0 = *valp;
+ switch (ai4const_out_0)
+ {
+ case 0xffffffff: ai4const_in_0 = 0; break;
+ case 0x1: ai4const_in_0 = 0x1; break;
+ case 0x2: ai4const_in_0 = 0x2; break;
+ case 0x3: ai4const_in_0 = 0x3; break;
+ case 0x4: ai4const_in_0 = 0x4; break;
+ case 0x5: ai4const_in_0 = 0x5; break;
+ case 0x6: ai4const_in_0 = 0x6; break;
+ case 0x7: ai4const_in_0 = 0x7; break;
+ case 0x8: ai4const_in_0 = 0x8; break;
+ case 0x9: ai4const_in_0 = 0x9; break;
+ case 0xa: ai4const_in_0 = 0xa; break;
+ case 0xb: ai4const_in_0 = 0xb; break;
+ case 0xc: ai4const_in_0 = 0xc; break;
+ case 0xd: ai4const_in_0 = 0xd; break;
+ case 0xe: ai4const_in_0 = 0xe; break;
+ default: ai4const_in_0 = 0xf; break;
+ }
+ *valp = ai4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_out_0;
+ unsigned b4const_in_0;
+ b4const_in_0 = *valp & 0xf;
+ b4const_out_0 = CONST_TBL_b4c_0[b4const_in_0 & 0xf];
+ *valp = b4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_encode (uint32 *valp)
+{
+ unsigned b4const_in_0;
+ unsigned b4const_out_0;
+ b4const_out_0 = *valp;
+ switch (b4const_out_0)
+ {
+ case 0xffffffff: b4const_in_0 = 0; break;
+ case 0x1: b4const_in_0 = 0x1; break;
+ case 0x2: b4const_in_0 = 0x2; break;
+ case 0x3: b4const_in_0 = 0x3; break;
+ case 0x4: b4const_in_0 = 0x4; break;
+ case 0x5: b4const_in_0 = 0x5; break;
+ case 0x6: b4const_in_0 = 0x6; break;
+ case 0x7: b4const_in_0 = 0x7; break;
+ case 0x8: b4const_in_0 = 0x8; break;
+ case 0xa: b4const_in_0 = 0x9; break;
+ case 0xc: b4const_in_0 = 0xa; break;
+ case 0x10: b4const_in_0 = 0xb; break;
+ case 0x20: b4const_in_0 = 0xc; break;
+ case 0x40: b4const_in_0 = 0xd; break;
+ case 0x80: b4const_in_0 = 0xe; break;
+ default: b4const_in_0 = 0xf; break;
+ }
+ *valp = b4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_out_0;
+ unsigned b4constu_in_0;
+ b4constu_in_0 = *valp & 0xf;
+ b4constu_out_0 = CONST_TBL_b4cu_0[b4constu_in_0 & 0xf];
+ *valp = b4constu_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_encode (uint32 *valp)
+{
+ unsigned b4constu_in_0;
+ unsigned b4constu_out_0;
+ b4constu_out_0 = *valp;
+ switch (b4constu_out_0)
+ {
+ case 0x8000: b4constu_in_0 = 0; break;
+ case 0x10000: b4constu_in_0 = 0x1; break;
+ case 0x2: b4constu_in_0 = 0x2; break;
+ case 0x3: b4constu_in_0 = 0x3; break;
+ case 0x4: b4constu_in_0 = 0x4; break;
+ case 0x5: b4constu_in_0 = 0x5; break;
+ case 0x6: b4constu_in_0 = 0x6; break;
+ case 0x7: b4constu_in_0 = 0x7; break;
+ case 0x8: b4constu_in_0 = 0x8; break;
+ case 0xa: b4constu_in_0 = 0x9; break;
+ case 0xc: b4constu_in_0 = 0xa; break;
+ case 0x10: b4constu_in_0 = 0xb; break;
+ case 0x20: b4constu_in_0 = 0xc; break;
+ case 0x40: b4constu_in_0 = 0xd; break;
+ case 0x80: b4constu_in_0 = 0xe; break;
+ default: b4constu_in_0 = 0xf; break;
+ }
+ *valp = b4constu_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_out_0;
+ unsigned uimm8_in_0;
+ uimm8_in_0 = *valp & 0xff;
+ uimm8_out_0 = uimm8_in_0;
+ *valp = uimm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_encode (uint32 *valp)
+{
+ unsigned uimm8_in_0;
+ unsigned uimm8_out_0;
+ uimm8_out_0 = *valp;
+ uimm8_in_0 = (uimm8_out_0 & 0xff);
+ *valp = uimm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_out_0;
+ unsigned uimm8x2_in_0;
+ uimm8x2_in_0 = *valp & 0xff;
+ uimm8x2_out_0 = uimm8x2_in_0 << 1;
+ *valp = uimm8x2_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_encode (uint32 *valp)
+{
+ unsigned uimm8x2_in_0;
+ unsigned uimm8x2_out_0;
+ uimm8x2_out_0 = *valp;
+ uimm8x2_in_0 = ((uimm8x2_out_0 >> 1) & 0xff);
+ *valp = uimm8x2_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_out_0;
+ unsigned uimm8x4_in_0;
+ uimm8x4_in_0 = *valp & 0xff;
+ uimm8x4_out_0 = uimm8x4_in_0 << 2;
+ *valp = uimm8x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_encode (uint32 *valp)
+{
+ unsigned uimm8x4_in_0;
+ unsigned uimm8x4_out_0;
+ uimm8x4_out_0 = *valp;
+ uimm8x4_in_0 = ((uimm8x4_out_0 >> 2) & 0xff);
+ *valp = uimm8x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_out_0;
+ unsigned uimm4x16_in_0;
+ uimm4x16_in_0 = *valp & 0xf;
+ uimm4x16_out_0 = uimm4x16_in_0 << 4;
+ *valp = uimm4x16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_encode (uint32 *valp)
+{
+ unsigned uimm4x16_in_0;
+ unsigned uimm4x16_out_0;
+ uimm4x16_out_0 = *valp;
+ uimm4x16_in_0 = ((uimm4x16_out_0 >> 4) & 0xf);
+ *valp = uimm4x16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimmrx4_decode (uint32 *valp)
+{
+ unsigned uimmrx4_out_0;
+ unsigned uimmrx4_in_0;
+ uimmrx4_in_0 = *valp & 0xf;
+ uimmrx4_out_0 = uimmrx4_in_0 << 2;
+ *valp = uimmrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimmrx4_encode (uint32 *valp)
+{
+ unsigned uimmrx4_in_0;
+ unsigned uimmrx4_out_0;
+ uimmrx4_out_0 = *valp;
+ uimmrx4_in_0 = ((uimmrx4_out_0 >> 2) & 0xf);
+ *valp = uimmrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_out_0;
+ unsigned simm8_in_0;
+ simm8_in_0 = *valp & 0xff;
+ simm8_out_0 = ((int) simm8_in_0 << 24) >> 24;
+ *valp = simm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_encode (uint32 *valp)
+{
+ unsigned simm8_in_0;
+ unsigned simm8_out_0;
+ simm8_out_0 = *valp;
+ simm8_in_0 = (simm8_out_0 & 0xff);
+ *valp = simm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_out_0;
+ unsigned simm8x256_in_0;
+ simm8x256_in_0 = *valp & 0xff;
+ simm8x256_out_0 = (((int) simm8x256_in_0 << 24) >> 24) << 8;
+ *valp = simm8x256_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_encode (uint32 *valp)
+{
+ unsigned simm8x256_in_0;
+ unsigned simm8x256_out_0;
+ simm8x256_out_0 = *valp;
+ simm8x256_in_0 = ((simm8x256_out_0 >> 8) & 0xff);
+ *valp = simm8x256_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_out_0;
+ unsigned simm12b_in_0;
+ simm12b_in_0 = *valp & 0xfff;
+ simm12b_out_0 = ((int) simm12b_in_0 << 20) >> 20;
+ *valp = simm12b_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_encode (uint32 *valp)
+{
+ unsigned simm12b_in_0;
+ unsigned simm12b_out_0;
+ simm12b_out_0 = *valp;
+ simm12b_in_0 = (simm12b_out_0 & 0xfff);
+ *valp = simm12b_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_out_0;
+ unsigned msalp32_in_0;
+ msalp32_in_0 = *valp & 0x1f;
+ msalp32_out_0 = 0x20 - msalp32_in_0;
+ *valp = msalp32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_encode (uint32 *valp)
+{
+ unsigned msalp32_in_0;
+ unsigned msalp32_out_0;
+ msalp32_out_0 = *valp;
+ msalp32_in_0 = (0x20 - msalp32_out_0) & 0x1f;
+ *valp = msalp32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_out_0;
+ unsigned op2p1_in_0;
+ op2p1_in_0 = *valp & 0xf;
+ op2p1_out_0 = op2p1_in_0 + 0x1;
+ *valp = op2p1_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_encode (uint32 *valp)
+{
+ unsigned op2p1_in_0;
+ unsigned op2p1_out_0;
+ op2p1_out_0 = *valp;
+ op2p1_in_0 = (op2p1_out_0 - 0x1) & 0xf;
+ *valp = op2p1_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_decode (uint32 *valp)
+{
+ unsigned label8_out_0;
+ unsigned label8_in_0;
+ label8_in_0 = *valp & 0xff;
+ label8_out_0 = 0x4 + (((int) label8_in_0 << 24) >> 24);
+ *valp = label8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_encode (uint32 *valp)
+{
+ unsigned label8_in_0;
+ unsigned label8_out_0;
+ label8_out_0 = *valp;
+ label8_in_0 = (label8_out_0 - 0x4) & 0xff;
+ *valp = label8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_decode (uint32 *valp)
+{
+ unsigned label12_out_0;
+ unsigned label12_in_0;
+ label12_in_0 = *valp & 0xfff;
+ label12_out_0 = 0x4 + (((int) label12_in_0 << 20) >> 20);
+ *valp = label12_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_encode (uint32 *valp)
+{
+ unsigned label12_in_0;
+ unsigned label12_out_0;
+ label12_out_0 = *valp;
+ label12_in_0 = (label12_out_0 - 0x4) & 0xfff;
+ *valp = label12_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_out_0;
+ unsigned soffset_in_0;
+ soffset_in_0 = *valp & 0x3ffff;
+ soffset_out_0 = 0x4 + (((int) soffset_in_0 << 14) >> 14);
+ *valp = soffset_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_encode (uint32 *valp)
+{
+ unsigned soffset_in_0;
+ unsigned soffset_out_0;
+ soffset_out_0 = *valp;
+ soffset_in_0 = (soffset_out_0 - 0x4) & 0x3ffff;
+ *valp = soffset_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_out_0;
+ unsigned uimm16x4_in_0;
+ uimm16x4_in_0 = *valp & 0xffff;
+ uimm16x4_out_0 = (((0xffff) << 16) | uimm16x4_in_0) << 2;
+ *valp = uimm16x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_encode (uint32 *valp)
+{
+ unsigned uimm16x4_in_0;
+ unsigned uimm16x4_out_0;
+ uimm16x4_out_0 = *valp;
+ uimm16x4_in_0 = (uimm16x4_out_0 >> 2) & 0xffff;
+ *valp = uimm16x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_decode (uint32 *valp)
+{
+ unsigned bbi_out_0;
+ unsigned bbi_in_0;
+ bbi_in_0 = *valp & 0x1f;
+ bbi_out_0 = (0 << 5) | bbi_in_0;
+ *valp = bbi_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_encode (uint32 *valp)
+{
+ unsigned bbi_in_0;
+ unsigned bbi_out_0;
+ bbi_out_0 = *valp;
+ bbi_in_0 = (bbi_out_0 & 0x1f);
+ *valp = bbi_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_decode (uint32 *valp)
+{
+ unsigned s_out_0;
+ unsigned s_in_0;
+ s_in_0 = *valp & 0xf;
+ s_out_0 = (0 << 4) | s_in_0;
+ *valp = s_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_encode (uint32 *valp)
+{
+ unsigned s_in_0;
+ unsigned s_out_0;
+ s_out_0 = *valp;
+ s_in_0 = (s_out_0 & 0xf);
+ *valp = s_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_decode (uint32 *valp)
+{
+ unsigned immt_out_0;
+ unsigned immt_in_0;
+ immt_in_0 = *valp & 0xf;
+ immt_out_0 = immt_in_0;
+ *valp = immt_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_encode (uint32 *valp)
+{
+ unsigned immt_in_0;
+ unsigned immt_out_0;
+ immt_out_0 = *valp;
+ immt_in_0 = immt_out_0 & 0xf;
+ *valp = immt_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_out_0;
+ unsigned tp7_in_0;
+ tp7_in_0 = *valp & 0xf;
+ tp7_out_0 = tp7_in_0 + 0x7;
+ *valp = tp7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_encode (uint32 *valp)
+{
+ unsigned tp7_in_0;
+ unsigned tp7_out_0;
+ tp7_out_0 = *valp;
+ tp7_in_0 = (tp7_out_0 - 0x7) & 0xf;
+ *valp = tp7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_out_0;
+ unsigned xt_wbr15_label_in_0;
+ xt_wbr15_label_in_0 = *valp & 0x7fff;
+ xt_wbr15_label_out_0 = 0x4 + (((int) xt_wbr15_label_in_0 << 17) >> 17);
+ *valp = xt_wbr15_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_in_0;
+ unsigned xt_wbr15_label_out_0;
+ xt_wbr15_label_out_0 = *valp;
+ xt_wbr15_label_in_0 = (xt_wbr15_label_out_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr18_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_out_0;
+ unsigned xt_wbr18_label_in_0;
+ xt_wbr18_label_in_0 = *valp & 0x3ffff;
+ xt_wbr18_label_out_0 = 0x4 + (((int) xt_wbr18_label_in_0 << 14) >> 14);
+ *valp = xt_wbr18_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr18_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr18_label_in_0;
+ unsigned xt_wbr18_label_out_0;
+ xt_wbr18_label_out_0 = *valp;
+ xt_wbr18_label_in_0 = (xt_wbr18_label_out_0 - 0x4) & 0x3ffff;
+ *valp = xt_wbr18_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bitindex_decode (uint32 *valp)
+{
+ unsigned bitindex_out_0;
+ unsigned bitindex_in_0;
+ bitindex_in_0 = *valp & 0x1f;
+ bitindex_out_0 = (0 << 5) | bitindex_in_0;
+ *valp = bitindex_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bitindex_encode (uint32 *valp)
+{
+ unsigned bitindex_in_0;
+ unsigned bitindex_out_0;
+ bitindex_out_0 = *valp;
+ bitindex_in_0 = (bitindex_out_0 & 0x1f);
+ *valp = bitindex_in_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffsetx4_encode, OperandSem_opnd_sem_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm12x8_encode, OperandSem_opnd_sem_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm4_encode, OperandSem_opnd_sem_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_0_encode, OperandSem_opnd_sem_AR_0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_1_encode, OperandSem_opnd_sem_AR_1_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_2_encode, OperandSem_opnd_sem_AR_2_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_3_encode, OperandSem_opnd_sem_AR_3_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_4_encode, OperandSem_opnd_sem_AR_4_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immrx4_encode, OperandSem_opnd_sem_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm7_encode, OperandSem_opnd_sem_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm6_encode, OperandSem_opnd_sem_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ai4const_encode, OperandSem_opnd_sem_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4const_encode, OperandSem_opnd_sem_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4constu_encode, OperandSem_opnd_sem_b4constu_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8_encode, OperandSem_opnd_sem_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x2_encode, OperandSem_opnd_sem_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x4_encode, OperandSem_opnd_sem_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm4x16_encode, OperandSem_opnd_sem_uimm4x16_decode,
+ 0, 0 },
+ { "uimmrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimmrx4_encode, OperandSem_opnd_sem_uimmrx4_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8_encode, OperandSem_opnd_sem_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8x256_encode, OperandSem_opnd_sem_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm12b_encode, OperandSem_opnd_sem_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ OperandSem_opnd_sem_msalp32_encode, OperandSem_opnd_sem_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_op2p1_encode, OperandSem_opnd_sem_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label8_encode, OperandSem_opnd_sem_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label12_encode, OperandSem_opnd_sem_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm16x4_encode, OperandSem_opnd_sem_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "bbi", FIELD_bbi, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sae", FIELD_sae, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sas", FIELD_sas, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "s", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_s_encode, OperandSem_opnd_sem_s_decode,
+ 0, 0 },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_tp7_encode, OperandSem_opnd_sem_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr15_label_encode, OperandSem_opnd_sem_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr18_label_encode, OperandSem_opnd_sem_xt_wbr18_label_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "bitindex", FIELD_bitindex, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bitindex_encode, OperandSem_opnd_sem_bitindex_decode,
+ 0, 0 },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "s3to1", FIELD_s3to1, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_uimmrx4,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_bbi,
+ OPERAND_sae,
+ OPERAND_sas,
+ OPERAND_sargt,
+ OPERAND_s,
+ OPERAND_immt,
+ OPERAND_imms,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_bitindex,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sal,
+ OPERAND_sas4,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_s3to1
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32nb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimmrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_memctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_memctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_memctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_stateArgs[] = {
+ { { STATE_EPC7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_stateArgs[] = {
+ { { STATE_EPC7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_stateArgs[] = {
+ { { STATE_EPC7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_stateArgs[] = {
+ { { STATE_EXCSAVE7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_stateArgs[] = {
+ { { STATE_EXCSAVE7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_stateArgs[] = {
+ { { STATE_EXCSAVE7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_stateArgs[] = {
+ { { STATE_EPS7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_stateArgs[] = {
+ { { STATE_EPS7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_stateArgs[] = {
+ { { STATE_EPS7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_salt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPC7 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_EPS7 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_stateArgs[] = {
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_div_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eraccess_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eraccess_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eraccess_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_interface Iclass_iclass_READ_IMPWIRE_intfArgs[] = {
+ INTERFACE_IMPWIRE
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 2, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 1, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 1, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 5, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 5, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 1, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 1, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 1, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 1, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 1, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 1, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32nb_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 6, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid0_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_configid0_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid1_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 6, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 6, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 6, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 1, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 1, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 1, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 1, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 1, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 1, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 1, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 1, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 1, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 1, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 1, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 1, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 1, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 1, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 1, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 1, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 1, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 1, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 1, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 1, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 1, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 1, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 1, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 1, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 1, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 1, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 1, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 1, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 1, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 1, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 1, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 1, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 1, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 1, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 1, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 1, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc7_args,
+ 1, Iclass_xt_iclass_rsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc7_args,
+ 1, Iclass_xt_iclass_wsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc7_args,
+ 1, Iclass_xt_iclass_xsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave7_args,
+ 1, Iclass_xt_iclass_rsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave7_args,
+ 1, Iclass_xt_iclass_wsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave7_args,
+ 1, Iclass_xt_iclass_xsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 1, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 1, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 1, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 1, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 1, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 1, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 1, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 1, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 1, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 1, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 1, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 1, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 1, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 1, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 1, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps7_args,
+ 1, Iclass_xt_iclass_rsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps7_args,
+ 1, Iclass_xt_iclass_wsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps7_args,
+ 1, Iclass_xt_iclass_xsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 1, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 1, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 1, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 1, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 1, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 1, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 2, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 1, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 1, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 1, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 1, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 1, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 1, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 1, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 1, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 1, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 1, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 1, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_salt_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 20, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 1, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 1, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 2, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 2, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 1, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 1, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 1, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 1, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 2, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 2, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 1, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 2, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 2, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 1, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 2, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 2, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 1, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 2, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 2, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 1, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 1, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 1, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 2, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 2, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 2, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 1, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 2, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 2, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 1, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 1, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 1, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 1, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 2, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 2, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_lddr32_p_args,
+ 3, Iclass_xt_iclass_lddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sddr32_p_args,
+ 2, Iclass_xt_iclass_sddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 9, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 1, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 1, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 2, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 2, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 1, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 2, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 2, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 1, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 2, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 2, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 1, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 2, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 2, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 1, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 1, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 1, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 2, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 2, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_div_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eraccess_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eraccess_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eraccess_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rer_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wer_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_expstate_args,
+ 1, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 1, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_READ_IMPWIRE_args,
+ 0, 0, 1, Iclass_iclass_READ_IMPWIRE_intfArgs },
+ { 1, Iclass_iclass_SETB_EXPSTATE_args,
+ 1, Iclass_iclass_SETB_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRB_EXPSTATE_args,
+ 1, Iclass_iclass_CLRB_EXPSTATE_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_WRMSK_EXPSTATE_args,
+ 1, Iclass_iclass_WRMSK_EXPSTATE_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s32nb,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_memctl,
+ ICLASS_xt_iclass_wsr_memctl,
+ ICLASS_xt_iclass_xsr_memctl,
+ ICLASS_xt_iclass_rsr_litbase,
+ ICLASS_xt_iclass_wsr_litbase,
+ ICLASS_xt_iclass_xsr_litbase,
+ ICLASS_xt_iclass_rsr_configid0,
+ ICLASS_xt_iclass_wsr_configid0,
+ ICLASS_xt_iclass_rsr_configid1,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_epc3,
+ ICLASS_xt_iclass_wsr_epc3,
+ ICLASS_xt_iclass_xsr_epc3,
+ ICLASS_xt_iclass_rsr_excsave3,
+ ICLASS_xt_iclass_wsr_excsave3,
+ ICLASS_xt_iclass_xsr_excsave3,
+ ICLASS_xt_iclass_rsr_epc4,
+ ICLASS_xt_iclass_wsr_epc4,
+ ICLASS_xt_iclass_xsr_epc4,
+ ICLASS_xt_iclass_rsr_excsave4,
+ ICLASS_xt_iclass_wsr_excsave4,
+ ICLASS_xt_iclass_xsr_excsave4,
+ ICLASS_xt_iclass_rsr_epc5,
+ ICLASS_xt_iclass_wsr_epc5,
+ ICLASS_xt_iclass_xsr_epc5,
+ ICLASS_xt_iclass_rsr_excsave5,
+ ICLASS_xt_iclass_wsr_excsave5,
+ ICLASS_xt_iclass_xsr_excsave5,
+ ICLASS_xt_iclass_rsr_epc6,
+ ICLASS_xt_iclass_wsr_epc6,
+ ICLASS_xt_iclass_xsr_epc6,
+ ICLASS_xt_iclass_rsr_excsave6,
+ ICLASS_xt_iclass_wsr_excsave6,
+ ICLASS_xt_iclass_xsr_excsave6,
+ ICLASS_xt_iclass_rsr_epc7,
+ ICLASS_xt_iclass_wsr_epc7,
+ ICLASS_xt_iclass_xsr_epc7,
+ ICLASS_xt_iclass_rsr_excsave7,
+ ICLASS_xt_iclass_wsr_excsave7,
+ ICLASS_xt_iclass_xsr_excsave7,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_eps3,
+ ICLASS_xt_iclass_wsr_eps3,
+ ICLASS_xt_iclass_xsr_eps3,
+ ICLASS_xt_iclass_rsr_eps4,
+ ICLASS_xt_iclass_wsr_eps4,
+ ICLASS_xt_iclass_xsr_eps4,
+ ICLASS_xt_iclass_rsr_eps5,
+ ICLASS_xt_iclass_wsr_eps5,
+ ICLASS_xt_iclass_xsr_eps5,
+ ICLASS_xt_iclass_rsr_eps6,
+ ICLASS_xt_iclass_wsr_eps6,
+ ICLASS_xt_iclass_xsr_eps6,
+ ICLASS_xt_iclass_rsr_eps7,
+ ICLASS_xt_iclass_wsr_eps7,
+ ICLASS_xt_iclass_xsr_eps7,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_misc0,
+ ICLASS_xt_iclass_wsr_misc0,
+ ICLASS_xt_iclass_xsr_misc0,
+ ICLASS_xt_iclass_rsr_misc1,
+ ICLASS_xt_iclass_wsr_misc1,
+ ICLASS_xt_iclass_xsr_misc1,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_iclass_salt,
+ ICLASS_xt_mul16,
+ ICLASS_xt_mul32,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_dbreaka0,
+ ICLASS_xt_iclass_wsr_dbreaka0,
+ ICLASS_xt_iclass_xsr_dbreaka0,
+ ICLASS_xt_iclass_rsr_dbreakc0,
+ ICLASS_xt_iclass_wsr_dbreakc0,
+ ICLASS_xt_iclass_xsr_dbreakc0,
+ ICLASS_xt_iclass_rsr_dbreaka1,
+ ICLASS_xt_iclass_wsr_dbreaka1,
+ ICLASS_xt_iclass_xsr_dbreaka1,
+ ICLASS_xt_iclass_rsr_dbreakc1,
+ ICLASS_xt_iclass_wsr_dbreakc1,
+ ICLASS_xt_iclass_xsr_dbreakc1,
+ ICLASS_xt_iclass_rsr_ibreaka0,
+ ICLASS_xt_iclass_wsr_ibreaka0,
+ ICLASS_xt_iclass_xsr_ibreaka0,
+ ICLASS_xt_iclass_rsr_ibreaka1,
+ ICLASS_xt_iclass_wsr_ibreaka1,
+ ICLASS_xt_iclass_xsr_ibreaka1,
+ ICLASS_xt_iclass_rsr_ibreakenable,
+ ICLASS_xt_iclass_wsr_ibreakenable,
+ ICLASS_xt_iclass_xsr_ibreakenable,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_lddr32_p,
+ ICLASS_xt_iclass_sddr32_p,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_wsr_mmid,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_rsr_ccompare2,
+ ICLASS_xt_iclass_wsr_ccompare2,
+ ICLASS_xt_iclass_xsr_ccompare2,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_div,
+ ICLASS_xt_iclass_rsr_eraccess,
+ ICLASS_xt_iclass_wsr_eraccess,
+ ICLASS_xt_iclass_xsr_eraccess,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_expstate,
+ ICLASS_wur_expstate,
+ ICLASS_iclass_READ_IMPWIRE,
+ ICLASS_iclass_SETB_EXPSTATE,
+ ICLASS_iclass_CLRB_EXPSTATE,
+ ICLASS_iclass_WRMSK_EXPSTATE
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s32nb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x590000;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36100;
+}
+
+static void
+Opcode_wsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136100;
+}
+
+static void
+Opcode_xsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616100;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30500;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130500;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610500;
+}
+
+static void
+Opcode_rsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_wsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b000;
+}
+
+static void
+Opcode_rsr_configid1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b300;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b300;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b300;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d300;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d300;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d300;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b400;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b400;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b400;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d400;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d400;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d400;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b500;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b500;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b500;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d500;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d500;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d500;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b600;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b600;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b600;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d600;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d600;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d600;
+}
+
+static void
+Opcode_rsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b700;
+}
+
+static void
+Opcode_wsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b700;
+}
+
+static void
+Opcode_xsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b700;
+}
+
+static void
+Opcode_rsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d700;
+}
+
+static void
+Opcode_wsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d700;
+}
+
+static void
+Opcode_xsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d700;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c300;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c300;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c300;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c400;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c400;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c400;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c500;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c500;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c500;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c600;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c600;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c600;
+}
+
+static void
+Opcode_rsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c700;
+}
+
+static void
+Opcode_wsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c700;
+}
+
+static void
+Opcode_xsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c700;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f400;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f400;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f400;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f500;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f500;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f500;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_salt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x720000;
+}
+
+static void
+Opcode_saltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x620000;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820000;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39000;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139000;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619000;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a000;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a000;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a000;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39100;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x139100;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x619100;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a100;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13a100;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61a100;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38000;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138000;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618000;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38100;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x138100;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x618100;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36000;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136000;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616000;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_lddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e0;
+}
+
+static void
+Opcode_sddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f0;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135900;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f200;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f200;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f200;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36300;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136300;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616300;
+}
+
+static void
+Opcode_quou_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc20000;
+}
+
+static void
+Opcode_quos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20000;
+}
+
+static void
+Opcode_remu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe20000;
+}
+
+static void
+Opcode_rems_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf20000;
+}
+
+static void
+Opcode_rsr_eraccess_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35f00;
+}
+
+static void
+Opcode_wsr_eraccess_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135f00;
+}
+
+static void
+Opcode_xsr_eraccess_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615f00;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406000;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407000;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e60;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e600;
+}
+
+static void
+Opcode_read_impwire_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0000;
+}
+
+static void
+Opcode_setb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1000;
+}
+
+static void
+Opcode_clrb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1200;
+}
+
+static void
+Opcode_wrmsk_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe2000;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32nb_encode_fns[] = {
+ Opcode_s32nb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_memctl_encode_fns[] = {
+ Opcode_rsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_memctl_encode_fns[] = {
+ Opcode_wsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_memctl_encode_fns[] = {
+ Opcode_xsr_memctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid0_encode_fns[] = {
+ Opcode_rsr_configid0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_configid0_encode_fns[] = {
+ Opcode_wsr_configid0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid1_encode_fns[] = {
+ Opcode_rsr_configid1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc7_encode_fns[] = {
+ Opcode_rsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc7_encode_fns[] = {
+ Opcode_wsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc7_encode_fns[] = {
+ Opcode_xsr_epc7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave7_encode_fns[] = {
+ Opcode_rsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave7_encode_fns[] = {
+ Opcode_wsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave7_encode_fns[] = {
+ Opcode_xsr_excsave7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps7_encode_fns[] = {
+ Opcode_rsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps7_encode_fns[] = {
+ Opcode_wsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps7_encode_fns[] = {
+ Opcode_xsr_eps7_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_salt_encode_fns[] = {
+ Opcode_salt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_saltu_encode_fns[] = {
+ Opcode_saltu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddr32_p_encode_fns[] = {
+ Opcode_lddr32_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sddr32_p_encode_fns[] = {
+ Opcode_sddr32_p_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quou_encode_fns[] = {
+ Opcode_quou_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quos_encode_fns[] = {
+ Opcode_quos_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_remu_encode_fns[] = {
+ Opcode_remu_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rems_encode_fns[] = {
+ Opcode_rems_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eraccess_encode_fns[] = {
+ Opcode_rsr_eraccess_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eraccess_encode_fns[] = {
+ Opcode_wsr_eraccess_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eraccess_encode_fns[] = {
+ Opcode_xsr_eraccess_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_read_impwire_encode_fns[] = {
+ Opcode_read_impwire_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_setb_expstate_encode_fns[] = {
+ Opcode_setb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrb_expstate_encode_fns[] = {
+ Opcode_clrb_expstate_Slot_inst_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrmsk_expstate_encode_fns[] = {
+ Opcode_wrmsk_expstate_Slot_inst_encode, 0, 0
+};
+
+
+
+
+
+/* Opcode table. */
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s32nb", ICLASS_xt_iclass_s32nb,
+ 0,
+ Opcode_s32nb_encode_fns, 0, 0 },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.memctl", ICLASS_xt_iclass_rsr_memctl,
+ 0,
+ Opcode_rsr_memctl_encode_fns, 0, 0 },
+ { "wsr.memctl", ICLASS_xt_iclass_wsr_memctl,
+ 0,
+ Opcode_wsr_memctl_encode_fns, 0, 0 },
+ { "xsr.memctl", ICLASS_xt_iclass_xsr_memctl,
+ 0,
+ Opcode_xsr_memctl_encode_fns, 0, 0 },
+ { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.configid0", ICLASS_xt_iclass_rsr_configid0,
+ 0,
+ Opcode_rsr_configid0_encode_fns, 0, 0 },
+ { "wsr.configid0", ICLASS_xt_iclass_wsr_configid0,
+ 0,
+ Opcode_wsr_configid0_encode_fns, 0, 0 },
+ { "rsr.configid1", ICLASS_xt_iclass_rsr_configid1,
+ 0,
+ Opcode_rsr_configid1_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", ICLASS_xt_iclass_rsr_epc4,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", ICLASS_xt_iclass_wsr_epc4,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", ICLASS_xt_iclass_xsr_epc4,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", ICLASS_xt_iclass_rsr_excsave4,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", ICLASS_xt_iclass_wsr_excsave4,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", ICLASS_xt_iclass_xsr_excsave4,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", ICLASS_xt_iclass_rsr_epc5,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", ICLASS_xt_iclass_wsr_epc5,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", ICLASS_xt_iclass_xsr_epc5,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", ICLASS_xt_iclass_rsr_excsave5,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", ICLASS_xt_iclass_wsr_excsave5,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", ICLASS_xt_iclass_xsr_excsave5,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", ICLASS_xt_iclass_rsr_epc6,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", ICLASS_xt_iclass_wsr_epc6,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", ICLASS_xt_iclass_xsr_epc6,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", ICLASS_xt_iclass_rsr_excsave6,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", ICLASS_xt_iclass_wsr_excsave6,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", ICLASS_xt_iclass_xsr_excsave6,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.epc7", ICLASS_xt_iclass_rsr_epc7,
+ 0,
+ Opcode_rsr_epc7_encode_fns, 0, 0 },
+ { "wsr.epc7", ICLASS_xt_iclass_wsr_epc7,
+ 0,
+ Opcode_wsr_epc7_encode_fns, 0, 0 },
+ { "xsr.epc7", ICLASS_xt_iclass_xsr_epc7,
+ 0,
+ Opcode_xsr_epc7_encode_fns, 0, 0 },
+ { "rsr.excsave7", ICLASS_xt_iclass_rsr_excsave7,
+ 0,
+ Opcode_rsr_excsave7_encode_fns, 0, 0 },
+ { "wsr.excsave7", ICLASS_xt_iclass_wsr_excsave7,
+ 0,
+ Opcode_wsr_excsave7_encode_fns, 0, 0 },
+ { "xsr.excsave7", ICLASS_xt_iclass_xsr_excsave7,
+ 0,
+ Opcode_xsr_excsave7_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", ICLASS_xt_iclass_rsr_eps4,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", ICLASS_xt_iclass_wsr_eps4,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", ICLASS_xt_iclass_xsr_eps4,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", ICLASS_xt_iclass_rsr_eps5,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", ICLASS_xt_iclass_wsr_eps5,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", ICLASS_xt_iclass_xsr_eps5,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", ICLASS_xt_iclass_rsr_eps6,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", ICLASS_xt_iclass_wsr_eps6,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", ICLASS_xt_iclass_xsr_eps6,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.eps7", ICLASS_xt_iclass_rsr_eps7,
+ 0,
+ Opcode_rsr_eps7_encode_fns, 0, 0 },
+ { "wsr.eps7", ICLASS_xt_iclass_wsr_eps7,
+ 0,
+ Opcode_wsr_eps7_encode_fns, 0, 0 },
+ { "xsr.eps7", ICLASS_xt_iclass_xsr_eps7,
+ 0,
+ Opcode_xsr_eps7_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", ICLASS_xt_iclass_rsr_misc0,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", ICLASS_xt_iclass_wsr_misc0,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", ICLASS_xt_iclass_xsr_misc0,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", ICLASS_xt_iclass_rsr_misc1,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", ICLASS_xt_iclass_wsr_misc1,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", ICLASS_xt_iclass_xsr_misc1,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "salt", ICLASS_xt_iclass_salt,
+ 0,
+ Opcode_salt_encode_fns, 0, 0 },
+ { "saltu", ICLASS_xt_iclass_salt,
+ 0,
+ Opcode_saltu_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "mull", ICLASS_xt_mul32,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", ICLASS_xt_iclass_rsr_dbreaka1,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", ICLASS_xt_iclass_wsr_dbreaka1,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", ICLASS_xt_iclass_xsr_dbreaka1,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", ICLASS_xt_iclass_rsr_dbreakc1,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", ICLASS_xt_iclass_wsr_dbreakc1,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", ICLASS_xt_iclass_xsr_dbreakc1,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", ICLASS_xt_iclass_rsr_ibreaka1,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", ICLASS_xt_iclass_wsr_ibreaka1,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", ICLASS_xt_iclass_xsr_ibreaka1,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "lddr32.p", ICLASS_xt_iclass_lddr32_p,
+ 0,
+ Opcode_lddr32_p_encode_fns, 0, 0 },
+ { "sddr32.p", ICLASS_xt_iclass_sddr32_p,
+ 0,
+ Opcode_sddr32_p_encode_fns, 0, 0 },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", ICLASS_xt_iclass_rsr_ccompare2,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", ICLASS_xt_iclass_wsr_ccompare2,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", ICLASS_xt_iclass_xsr_ccompare2,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "quou", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quou_encode_fns, 0, 0 },
+ { "quos", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quos_encode_fns, 0, 0 },
+ { "remu", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_remu_encode_fns, 0, 0 },
+ { "rems", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_rems_encode_fns, 0, 0 },
+ { "rsr.eraccess", ICLASS_xt_iclass_rsr_eraccess,
+ 0,
+ Opcode_rsr_eraccess_encode_fns, 0, 0 },
+ { "wsr.eraccess", ICLASS_xt_iclass_wsr_eraccess,
+ 0,
+ Opcode_wsr_eraccess_encode_fns, 0, 0 },
+ { "xsr.eraccess", ICLASS_xt_iclass_xsr_eraccess,
+ 0,
+ Opcode_xsr_eraccess_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.expstate", ICLASS_rur_expstate,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", ICLASS_wur_expstate,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "read_impwire", ICLASS_iclass_READ_IMPWIRE,
+ 0,
+ Opcode_read_impwire_encode_fns, 0, 0 },
+ { "setb_expstate", ICLASS_iclass_SETB_EXPSTATE,
+ 0,
+ Opcode_setb_expstate_encode_fns, 0, 0 },
+ { "clrb_expstate", ICLASS_iclass_CLRB_EXPSTATE,
+ 0,
+ Opcode_clrb_expstate_encode_fns, 0, 0 },
+ { "wrmsk_expstate", ICLASS_iclass_WRMSK_EXPSTATE,
+ 0,
+ Opcode_wrmsk_expstate_encode_fns, 0, 0 }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_SUB,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BNEI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BEQ,
+ OPCODE_BNE,
+ OPCODE_BGE,
+ OPCODE_BLT,
+ OPCODE_BGEU,
+ OPCODE_BLTU,
+ OPCODE_BANY,
+ OPCODE_BNONE,
+ OPCODE_BALL,
+ OPCODE_BNALL,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQZ,
+ OPCODE_BNEZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVNEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVGEZ,
+ OPCODE_NEG,
+ OPCODE_ABS,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_SIMCALL,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S32NB,
+ OPCODE_S8I,
+ OPCODE_SSR,
+ OPCODE_SSL,
+ OPCODE_SSA8L,
+ OPCODE_SSA8B,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRL,
+ OPCODE_SRA,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_RSYNC,
+ OPCODE_ESYNC,
+ OPCODE_DSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_MEMCTL,
+ OPCODE_WSR_MEMCTL,
+ OPCODE_XSR_MEMCTL,
+ OPCODE_RSR_LITBASE,
+ OPCODE_WSR_LITBASE,
+ OPCODE_XSR_LITBASE,
+ OPCODE_RSR_CONFIGID0,
+ OPCODE_WSR_CONFIGID0,
+ OPCODE_RSR_CONFIGID1,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPC3,
+ OPCODE_WSR_EPC3,
+ OPCODE_XSR_EPC3,
+ OPCODE_RSR_EXCSAVE3,
+ OPCODE_WSR_EXCSAVE3,
+ OPCODE_XSR_EXCSAVE3,
+ OPCODE_RSR_EPC4,
+ OPCODE_WSR_EPC4,
+ OPCODE_XSR_EPC4,
+ OPCODE_RSR_EXCSAVE4,
+ OPCODE_WSR_EXCSAVE4,
+ OPCODE_XSR_EXCSAVE4,
+ OPCODE_RSR_EPC5,
+ OPCODE_WSR_EPC5,
+ OPCODE_XSR_EPC5,
+ OPCODE_RSR_EXCSAVE5,
+ OPCODE_WSR_EXCSAVE5,
+ OPCODE_XSR_EXCSAVE5,
+ OPCODE_RSR_EPC6,
+ OPCODE_WSR_EPC6,
+ OPCODE_XSR_EPC6,
+ OPCODE_RSR_EXCSAVE6,
+ OPCODE_WSR_EXCSAVE6,
+ OPCODE_XSR_EXCSAVE6,
+ OPCODE_RSR_EPC7,
+ OPCODE_WSR_EPC7,
+ OPCODE_XSR_EPC7,
+ OPCODE_RSR_EXCSAVE7,
+ OPCODE_WSR_EXCSAVE7,
+ OPCODE_XSR_EXCSAVE7,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EPS3,
+ OPCODE_WSR_EPS3,
+ OPCODE_XSR_EPS3,
+ OPCODE_RSR_EPS4,
+ OPCODE_WSR_EPS4,
+ OPCODE_XSR_EPS4,
+ OPCODE_RSR_EPS5,
+ OPCODE_WSR_EPS5,
+ OPCODE_XSR_EPS5,
+ OPCODE_RSR_EPS6,
+ OPCODE_WSR_EPS6,
+ OPCODE_XSR_EPS6,
+ OPCODE_RSR_EPS7,
+ OPCODE_WSR_EPS7,
+ OPCODE_XSR_EPS7,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_MISC0,
+ OPCODE_WSR_MISC0,
+ OPCODE_XSR_MISC0,
+ OPCODE_RSR_MISC1,
+ OPCODE_WSR_MISC1,
+ OPCODE_XSR_MISC1,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_SALT,
+ OPCODE_SALTU,
+ OPCODE_MUL16U,
+ OPCODE_MUL16S,
+ OPCODE_MULL,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DBREAKA0,
+ OPCODE_WSR_DBREAKA0,
+ OPCODE_XSR_DBREAKA0,
+ OPCODE_RSR_DBREAKC0,
+ OPCODE_WSR_DBREAKC0,
+ OPCODE_XSR_DBREAKC0,
+ OPCODE_RSR_DBREAKA1,
+ OPCODE_WSR_DBREAKA1,
+ OPCODE_XSR_DBREAKA1,
+ OPCODE_RSR_DBREAKC1,
+ OPCODE_WSR_DBREAKC1,
+ OPCODE_XSR_DBREAKC1,
+ OPCODE_RSR_IBREAKA0,
+ OPCODE_WSR_IBREAKA0,
+ OPCODE_XSR_IBREAKA0,
+ OPCODE_RSR_IBREAKA1,
+ OPCODE_WSR_IBREAKA1,
+ OPCODE_XSR_IBREAKA1,
+ OPCODE_RSR_IBREAKENABLE,
+ OPCODE_WSR_IBREAKENABLE,
+ OPCODE_XSR_IBREAKENABLE,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_LDDR32_P,
+ OPCODE_SDDR32_P,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_WSR_MMID,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_RSR_CCOMPARE2,
+ OPCODE_WSR_CCOMPARE2,
+ OPCODE_XSR_CCOMPARE2,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_MIN,
+ OPCODE_MAX,
+ OPCODE_MINU,
+ OPCODE_MAXU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_QUOU,
+ OPCODE_QUOS,
+ OPCODE_REMU,
+ OPCODE_REMS,
+ OPCODE_RSR_ERACCESS,
+ OPCODE_WSR_ERACCESS,
+ OPCODE_XSR_ERACCESS,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_EXPSTATE,
+ OPCODE_WUR_EXPSTATE,
+ OPCODE_READ_IMPWIRE,
+ OPCODE_SETB_EXPSTATE,
+ OPCODE_CLRB_EXPSTATE,
+ OPCODE_WRMSK_EXPSTATE
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_RET;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_RETW;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_JX;
+ }
+ if (Field_m_Slot_inst_get (insn) == 3)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALLX0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALLX4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALLX8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALLX12;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_MOVSP;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_ISYNC;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RSYNC;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_ESYNC;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DSYNC;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_EXCW;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MEMW;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_EXTW;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_NOP;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 3)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_RFE;
+ if (Field_s_Slot_inst_get (insn) == 2)
+ return OPCODE_RFDE;
+ if (Field_s_Slot_inst_get (insn) == 4)
+ return OPCODE_RFWO;
+ if (Field_s_Slot_inst_get (insn) == 5)
+ return OPCODE_RFWU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFI;
+ }
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BREAK;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ if (Field_s_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SIMCALL;
+ }
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RSIL;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_LDDR32_P;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_SDDR32_P;
+ }
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OR;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_XOR;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RER;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_WER;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_NSA;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_NSAU;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_RITLB0;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_PITLB;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_WITLB;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_RITLB1;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_RDTLB0;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_PDTLB;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_WDTLB;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_RDTLB1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_s_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ADD;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_ADDX2;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_ADDX4;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_ADDX8;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_SUB;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_SUBX2;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_SUBX4;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_SUBX8;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 1)
+ {
+ if ((Field_op2_Slot_inst_get (insn) == 0 ||
+ Field_op2_Slot_inst_get (insn) == 1))
+ return OPCODE_SLLI;
+ if ((Field_op2_Slot_inst_get (insn) == 2 ||
+ Field_op2_Slot_inst_get (insn) == 3))
+ return OPCODE_SRAI;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_SRLI;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_XSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_XSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_XSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_XSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_XSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 95)
+ return OPCODE_XSR_ERACCESS;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_XSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_XSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_XSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_XSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_XSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_XSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_XSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_XSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_XSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_XSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_XSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_XSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_XSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_XSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_XSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_XSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_XSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_XSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_XSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_XSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_XSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_XSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_XSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_XSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_XSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_XSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_XSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_XSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_XSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_XSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_XSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_XSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_XSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_XSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_XSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_XSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_XSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_XSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_XSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_XSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_XSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_XSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_XSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_XSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_XSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_SRC;
+ if (Field_op2_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_op2_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_op2_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MUL16U;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MUL16S;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ }
+ }
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_SALTU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_SALT;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MULL;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_QUOU;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_QUOS;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_REMU;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_REMS;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_RSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_RSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_RSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_RSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_RSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 95)
+ return OPCODE_RSR_ERACCESS;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_RSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_RSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_RSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_RSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_RSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_RSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_RSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_RSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_RSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_RSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_RSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_RSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_RSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_RSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_RSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_RSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_RSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_RSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_RSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_RSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_RSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_RSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_RSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_RSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_RSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 208)
+ return OPCODE_RSR_CONFIGID1;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_RSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_RSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_RSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_RSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_RSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_RSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_RSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_RSR_INTERRUPT;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_RSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_RSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_RSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_RSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_RSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_RSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 235)
+ return OPCODE_RSR_PRID;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_RSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_RSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_RSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_RSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_RSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_RSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_RSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_RSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_WSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_WSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_WSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_WSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_WSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 89)
+ return OPCODE_WSR_MMID;
+ if (Field_sr_Slot_inst_get (insn) == 95)
+ return OPCODE_WSR_ERACCESS;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_WSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_WSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_WSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_WSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_WSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_WSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_WSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_WSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_WSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_WSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_WSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_WSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_WSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_WSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_WSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_WSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_WSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_WSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_WSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_WSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_WSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_WSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_WSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_WSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_WSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_WSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_WSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_WSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_WSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_WSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_WSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_WSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_WSR_INTSET;
+ if (Field_sr_Slot_inst_get (insn) == 227)
+ return OPCODE_WSR_INTCLEAR;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_WSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_WSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_WSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_WSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_WSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_WSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_WSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_WSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_WSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_WSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_SEXT;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MIN;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MAX;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MINU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_MAXU;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MOVEQZ;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_MOVNEZ;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVLTZ;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MOVGEZ;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ {
+ if (Field_st_Slot_inst_get (insn) == 230)
+ return OPCODE_RUR_EXPSTATE;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WUR_EXPSTATE;
+ }
+ }
+ if ((Field_op1_Slot_inst_get (insn) == 4 ||
+ Field_op1_Slot_inst_get (insn) == 5))
+ return OPCODE_EXTUI;
+ if (Field_op1_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_L32E;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_S32E;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_S32NB;
+ }
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_READ_IMPWIRE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_SETB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_CLRB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_WRMSK_EXPSTATE;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 1)
+ return OPCODE_L32R;
+ if (Field_op0_Slot_inst_get (insn) == 2)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_L32I;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_S16I;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_S32I;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVI;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_L32AI;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_S32C1I;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_S32RI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 5)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALL0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALL4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALL8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALL12;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 6)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_J;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQZ;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTZ;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEZ;
+ }
+ if (Field_n_Slot_inst_get (insn) == 2)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQI;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEI;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEI;
+ }
+ if (Field_n_Slot_inst_get (insn) == 3)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_ENTRY;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTUI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEUI;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 7)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BALL;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_BBC;
+ if ((Field_r_Slot_inst_get (insn) == 6 ||
+ Field_r_Slot_inst_get (insn) == 7))
+ return OPCODE_BBCI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_BANY;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_BNE;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_BGE;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_BNALL;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_BBS;
+ if ((Field_r_Slot_inst_get (insn) == 14 ||
+ Field_r_Slot_inst_get (insn) == 15))
+ return OPCODE_BBSI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16b_get (insn) == 12)
+ {
+ if (Field_i_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_i_Slot_inst16b_get (insn) == 1)
+ {
+ if (Field_z_Slot_inst16b_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_z_Slot_inst16b_get (insn) == 1)
+ return OPCODE_BNEZ_N;
+ }
+ }
+ if (Field_op0_Slot_inst16b_get (insn) == 13)
+ {
+ if (Field_r_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOV_N;
+ if (Field_r_Slot_inst16b_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst16b_get (insn) == 0)
+ return OPCODE_RET_N;
+ if (Field_t_Slot_inst16b_get (insn) == 1)
+ return OPCODE_RETW_N;
+ if (Field_t_Slot_inst16b_get (insn) == 2)
+ return OPCODE_BREAK_N;
+ if (Field_t_Slot_inst16b_get (insn) == 3 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ if (Field_t_Slot_inst16b_get (insn) == 6 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ }
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16a_get (insn) == 8)
+ return OPCODE_L32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 9)
+ return OPCODE_S32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 10)
+ return OPCODE_ADD_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 11)
+ return OPCODE_ADDI_N;
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_bitindex_Slot_inst_get,
+ Field_s3to1_Slot_inst_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_bitindex_Slot_inst_set,
+ Field_s3to1_Slot_inst_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_get,
+ Field_s3to1_Slot_inst16a_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_set,
+ Field_s3to1_Slot_inst16a_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_get,
+ Field_s3to1_Slot_inst16b_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_set,
+ Field_s3to1_Slot_inst16b_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc)
+ return 2; /* x16b */
+ return -1;
+}
+
+static int length_table[256] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int l = insn[0];
+ return length_table[l];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 3 /* insn_size */, 0,
+ 3, formats, format_decoder, length_decoder,
+ 3, slots,
+ 43 /* num_fields */,
+ 77, operands,
+ 263, iclasses,
+ 317, opcodes, 0,
+ 1, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 1, interfaces, 0,
+ 0, funcUnits, 0
+};
diff --git a/target/xtensa/core-test_kc705_be.c b/target/xtensa/core-test_kc705_be.c
new file mode 100644
index 000000000..294c16f2f
--- /dev/null
+++ b/target/xtensa/core-test_kc705_be.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-test_kc705_be/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_test_kc705_be
+#include "core-test_kc705_be/xtensa-modules.c.inc"
+
+static XtensaConfig test_kc705_be __attribute__((unused)) = {
+ .name = "test_kc705_be",
+ .gdb_regmap = {
+ .reg = {
+#include "core-test_kc705_be/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 40000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(test_kc705_be)
diff --git a/target/xtensa/core-test_kc705_be/core-isa.h b/target/xtensa/core-test_kc705_be/core-isa.h
new file mode 100644
index 000000000..408fed871
--- /dev/null
+++ b/target/xtensa/core-test_kc705_be/core-isa.h
@@ -0,0 +1,573 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_CORE_TEST_KC705_BE_CORE_ISA_H
+#define XTENSA_CORE_TEST_KC705_BE_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_DEPBITS 0 /* DEPBITS instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 1 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 0 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 0 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+
+#define XCHAL_HAVE_FUSION 0 /* Fusion*/
+#define XCHAL_HAVE_FUSION_FP 0 /* Fusion FP option */
+#define XCHAL_HAVE_FUSION_LOW_POWER 0 /* Fusion Low Power option */
+#define XCHAL_HAVE_FUSION_AES 0 /* Fusion BLE/Wifi AES-128 CCM option */
+#define XCHAL_HAVE_FUSION_CONVENC 0 /* Fusion Conv Encode option */
+#define XCHAL_HAVE_FUSION_LFSR_CRC 0 /* Fusion LFSR-CRC option */
+#define XCHAL_HAVE_FUSION_BITOPS 0 /* Fusion Bit Operations Support option */
+#define XCHAL_HAVE_FUSION_AVS 0 /* Fusion AVS option */
+#define XCHAL_HAVE_FUSION_16BIT_BASEBAND 0 /* Fusion 16-bit Baseband option */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4 0 /* HiFi4 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI4_VFPU 0 /* HiFi4 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3_VFPU 0 /* HiFi3 Audio Engine VFPU option */
+#define XCHAL_HAVE_HIFI2 1 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2_MUL32X24 1 /* HiFi2 and 32x24 MACs */
+#define XCHAL_HAVE_HIFI2EP 1 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+
+
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector or user floating-point pkg */
+#define XCHAL_HAVE_USER_DPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_USER_SPFPU 0 /* user DP floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_ACCEL 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_DFP_accel XCHAL_HAVE_DFP_ACCEL /* for backward compatibility */
+
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0 /* DFPU Coprocessor, single precision only */
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE 0 /* DFPU Coprocessor, single and double precision */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_PDX4 0 /* PDX4 */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_CONNXD2_DUALLSFLIX 0 /* ConnX D2 & Dual LoadStore Flix */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+#define XCHAL_HAVE_GRIVPEP 0 /* GRIVPEP is General Release of IVPEP */
+#define XCHAL_HAVE_GRIVPEP_HISTOGRAM 0 /* Histogram option on GRIVPEP */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+#define XCHAL_CLOCK_GATING_GLOBAL 0 /* global clock gating */
+#define XCHAL_CLOCK_GATING_FUNCUNIT 0 /* funct. unit clock gating */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1100002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "test_kc705_be" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00058D8C /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3FFFF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x1C858D8C /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX6.0.2" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2600 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 2 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 260002 /* major*100+minor */
+#define XCHAL_HW_REL_LX6 1
+#define XCHAL_HW_REL_LX6_0 1
+#define XCHAL_HW_REL_LX6_0_2 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2600 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 2 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 260002 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2600 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 2 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 260002 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 1 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 1 /* dcache pref. castout bufsz */
+#define XCHAL_PREFETCH_ENTRIES 8 /* cache prefetch entries */
+#define XCHAL_PREFETCH_BLOCK_ENTRIES 0 /* prefetch block streams */
+#define XCHAL_HAVE_CACHE_BLOCKOPS 0 /* block prefetch for caches */
+#define XCHAL_HAVE_ICACHE_TEST 1 /* Icache test instructions */
+#define XCHAL_HAVE_DCACHE_TEST 1 /* Dcache test instructions */
+#define XCHAL_HAVE_ICACHE_DYN_WAYS 0 /* Icache dynamic way support */
+#define XCHAL_HAVE_DCACHE_DYN_WAYS 0 /* Dcache dynamic way support */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+/* Whether MEMCTL register has anything useful */
+#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \
+ XCHAL_DCACHE_IS_COHERENT || \
+ XCHAL_HAVE_ICACHE_DYN_WAYS || \
+ XCHAL_HAVE_DCACHE_DYN_WAYS) && \
+ (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0))
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 16 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 4 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_MASK 0x00000140
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00008000
+#define XCHAL_INTLEVEL5_MASK 0x00003000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F00BF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F01FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F0FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 2
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 5
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 4
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_PROFILING
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F0000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00008000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+#define XCHAL_PROFILING_INTERRUPT 15 /* profiling interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL4_NUM 15
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 2, 3, 5.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 5) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 21 /* (intlevel 3) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT8_EXTNUM 6 /* (intlevel 2) */
+#define XCHAL_INT9_EXTNUM 7 /* (intlevel 3) */
+#define XCHAL_INT12_EXTNUM 8 /* (intlevel 5) */
+#define XCHAL_INT14_EXTNUM 9 /* (intlevel 7) */
+#define XCHAL_INT16_EXTNUM 10 /* (intlevel 1) */
+#define XCHAL_INT17_EXTNUM 11 /* (intlevel 1) */
+#define XCHAL_INT18_EXTNUM 12 /* (intlevel 1) */
+#define XCHAL_INT19_EXTNUM 13 /* (intlevel 1) */
+#define XCHAL_INT20_EXTNUM 14 /* (intlevel 1) */
+#define XCHAL_INT21_EXTNUM 15 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 1 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 1 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 1 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 1 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 262144 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 1 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 8 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_CORE_TEST_KC705_BE_CORE_ISA_H */
diff --git a/target/xtensa/core-test_kc705_be/gdb-config.c.inc b/target/xtensa/core-test_kc705_be/gdb-config.c.inc
new file mode 100644
index 000000000..eb3e03cd5
--- /dev/null
+++ b/target/xtensa/core-test_kc705_be/gdb-config.c.inc
@@ -0,0 +1,259 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+ XTREG( 0, 0,32, 4, 4,0x0020,0x0006,-2, 9,0x0100,pc, 0,0,0,0,0,0)
+ XTREG( 1, 4,32, 4, 4,0x0100,0x0006,-2, 1,0x0002,ar0, 0,0,0,0,0,0)
+ XTREG( 2, 8,32, 4, 4,0x0101,0x0006,-2, 1,0x0002,ar1, 0,0,0,0,0,0)
+ XTREG( 3, 12,32, 4, 4,0x0102,0x0006,-2, 1,0x0002,ar2, 0,0,0,0,0,0)
+ XTREG( 4, 16,32, 4, 4,0x0103,0x0006,-2, 1,0x0002,ar3, 0,0,0,0,0,0)
+ XTREG( 5, 20,32, 4, 4,0x0104,0x0006,-2, 1,0x0002,ar4, 0,0,0,0,0,0)
+ XTREG( 6, 24,32, 4, 4,0x0105,0x0006,-2, 1,0x0002,ar5, 0,0,0,0,0,0)
+ XTREG( 7, 28,32, 4, 4,0x0106,0x0006,-2, 1,0x0002,ar6, 0,0,0,0,0,0)
+ XTREG( 8, 32,32, 4, 4,0x0107,0x0006,-2, 1,0x0002,ar7, 0,0,0,0,0,0)
+ XTREG( 9, 36,32, 4, 4,0x0108,0x0006,-2, 1,0x0002,ar8, 0,0,0,0,0,0)
+ XTREG( 10, 40,32, 4, 4,0x0109,0x0006,-2, 1,0x0002,ar9, 0,0,0,0,0,0)
+ XTREG( 11, 44,32, 4, 4,0x010a,0x0006,-2, 1,0x0002,ar10, 0,0,0,0,0,0)
+ XTREG( 12, 48,32, 4, 4,0x010b,0x0006,-2, 1,0x0002,ar11, 0,0,0,0,0,0)
+ XTREG( 13, 52,32, 4, 4,0x010c,0x0006,-2, 1,0x0002,ar12, 0,0,0,0,0,0)
+ XTREG( 14, 56,32, 4, 4,0x010d,0x0006,-2, 1,0x0002,ar13, 0,0,0,0,0,0)
+ XTREG( 15, 60,32, 4, 4,0x010e,0x0006,-2, 1,0x0002,ar14, 0,0,0,0,0,0)
+ XTREG( 16, 64,32, 4, 4,0x010f,0x0006,-2, 1,0x0002,ar15, 0,0,0,0,0,0)
+ XTREG( 17, 68,32, 4, 4,0x0110,0x0006,-2, 1,0x0002,ar16, 0,0,0,0,0,0)
+ XTREG( 18, 72,32, 4, 4,0x0111,0x0006,-2, 1,0x0002,ar17, 0,0,0,0,0,0)
+ XTREG( 19, 76,32, 4, 4,0x0112,0x0006,-2, 1,0x0002,ar18, 0,0,0,0,0,0)
+ XTREG( 20, 80,32, 4, 4,0x0113,0x0006,-2, 1,0x0002,ar19, 0,0,0,0,0,0)
+ XTREG( 21, 84,32, 4, 4,0x0114,0x0006,-2, 1,0x0002,ar20, 0,0,0,0,0,0)
+ XTREG( 22, 88,32, 4, 4,0x0115,0x0006,-2, 1,0x0002,ar21, 0,0,0,0,0,0)
+ XTREG( 23, 92,32, 4, 4,0x0116,0x0006,-2, 1,0x0002,ar22, 0,0,0,0,0,0)
+ XTREG( 24, 96,32, 4, 4,0x0117,0x0006,-2, 1,0x0002,ar23, 0,0,0,0,0,0)
+ XTREG( 25,100,32, 4, 4,0x0118,0x0006,-2, 1,0x0002,ar24, 0,0,0,0,0,0)
+ XTREG( 26,104,32, 4, 4,0x0119,0x0006,-2, 1,0x0002,ar25, 0,0,0,0,0,0)
+ XTREG( 27,108,32, 4, 4,0x011a,0x0006,-2, 1,0x0002,ar26, 0,0,0,0,0,0)
+ XTREG( 28,112,32, 4, 4,0x011b,0x0006,-2, 1,0x0002,ar27, 0,0,0,0,0,0)
+ XTREG( 29,116,32, 4, 4,0x011c,0x0006,-2, 1,0x0002,ar28, 0,0,0,0,0,0)
+ XTREG( 30,120,32, 4, 4,0x011d,0x0006,-2, 1,0x0002,ar29, 0,0,0,0,0,0)
+ XTREG( 31,124,32, 4, 4,0x011e,0x0006,-2, 1,0x0002,ar30, 0,0,0,0,0,0)
+ XTREG( 32,128,32, 4, 4,0x011f,0x0006,-2, 1,0x0002,ar31, 0,0,0,0,0,0)
+ XTREG( 33,132,32, 4, 4,0x0200,0x0006,-2, 2,0x1100,lbeg, 0,0,0,0,0,0)
+ XTREG( 34,136,32, 4, 4,0x0201,0x0006,-2, 2,0x1100,lend, 0,0,0,0,0,0)
+ XTREG( 35,140,32, 4, 4,0x0202,0x0006,-2, 2,0x1100,lcount, 0,0,0,0,0,0)
+ XTREG( 36,144, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0)
+ XTREG( 37,148,13, 4, 4,0x0228,0x0006,-2, 2,0x1100,prefctl, 0,0,0,0,0,0)
+ XTREG( 38,152, 3, 4, 4,0x0248,0x0006,-2, 2,0x1002,windowbase, 0,0,0,0,0,0)
+ XTREG( 39,156, 8, 4, 4,0x0249,0x0006,-2, 2,0x1002,windowstart, 0,0,0,0,0,0)
+ XTREG( 40,160,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,configid0, 0,0,0,0,0,0)
+ XTREG( 41,164,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,configid1, 0,0,0,0,0,0)
+ XTREG( 42,168,19, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0)
+ XTREG( 43,172,32, 4, 4,0x03e7,0x0006,-2, 3,0x0110,threadptr, 0,0,0,0,0,0)
+ XTREG( 44,176,16, 4, 4,0x0204,0x0006,-1, 2,0x1100,br, 0,0,0,0,0,0)
+ XTREG( 45,180,32, 4, 4,0x020c,0x0006,-1, 2,0x1100,scompare1, 0,0,0,0,0,0)
+ XTREG( 46,184,32, 4, 4,0x0210,0x0006,-1, 2,0x1100,acclo, 0,0,0,0,0,0)
+ XTREG( 47,188, 8, 4, 4,0x0211,0x0006,-1, 2,0x1100,acchi, 0,0,0,0,0,0)
+ XTREG( 48,192,32, 4, 4,0x0220,0x0006,-1, 2,0x1100,m0, 0,0,0,0,0,0)
+ XTREG( 49,196,32, 4, 4,0x0221,0x0006,-1, 2,0x1100,m1, 0,0,0,0,0,0)
+ XTREG( 50,200,32, 4, 4,0x0222,0x0006,-1, 2,0x1100,m2, 0,0,0,0,0,0)
+ XTREG( 51,204,32, 4, 4,0x0223,0x0006,-1, 2,0x1100,m3, 0,0,0,0,0,0)
+ XTREG( 52,208,32, 4, 4,0x03e6,0x000e,-1, 3,0x0110,expstate, 0,0,0,0,0,0)
+ XTREG( 53,212,48, 8, 8,0x0060,0x0006, 1, 4,0x0101,aep0,
+ "03:40:48:2b","03:40:48:7a",0,0,0,0)
+ XTREG( 54,220,48, 8, 8,0x0061,0x0006, 1, 4,0x0101,aep1,
+ "03:40:49:2b","03:40:49:7a",0,0,0,0)
+ XTREG( 55,228,48, 8, 8,0x0062,0x0006, 1, 4,0x0101,aep2,
+ "03:40:4a:2b","03:40:4a:7a",0,0,0,0)
+ XTREG( 56,236,48, 8, 8,0x0063,0x0006, 1, 4,0x0101,aep3,
+ "03:40:4b:2b","03:40:4b:7a",0,0,0,0)
+ XTREG( 57,244,48, 8, 8,0x0064,0x0006, 1, 4,0x0101,aep4,
+ "03:40:4c:2b","03:40:4c:7a",0,0,0,0)
+ XTREG( 58,252,48, 8, 8,0x0065,0x0006, 1, 4,0x0101,aep5,
+ "03:40:4d:2b","03:40:4d:7a",0,0,0,0)
+ XTREG( 59,260,48, 8, 8,0x0066,0x0006, 1, 4,0x0101,aep6,
+ "03:40:4e:2b","03:40:4e:7a",0,0,0,0)
+ XTREG( 60,268,48, 8, 8,0x0067,0x0006, 1, 4,0x0101,aep7,
+ "03:40:4f:2b","03:40:4f:7a",0,0,0,0)
+ XTREG( 61,276,56, 8, 8,0x0068,0x0006, 1, 4,0x0101,aeq0,
+ "03:40:40:3c","03:40:40:1c",0,0,0,0)
+ XTREG( 62,284,56, 8, 8,0x0069,0x0006, 1, 4,0x0101,aeq1,
+ "03:40:41:3c","03:40:44:1c",0,0,0,0)
+ XTREG( 63,292,56, 8, 8,0x006a,0x0006, 1, 4,0x0101,aeq2,
+ "03:40:42:3c","03:40:48:1c",0,0,0,0)
+ XTREG( 64,300,56, 8, 8,0x006b,0x0006, 1, 4,0x0101,aeq3,
+ "03:40:43:3c","03:40:4c:1c",0,0,0,0)
+ XTREG( 65,308, 7, 4, 4,0x03f0,0x0006, 1, 3,0x0100,ae_ovf_sar, 0,0,0,0,0,0)
+ XTREG( 66,312,32, 4, 4,0x03f1,0x0006, 1, 3,0x0110,ae_bithead, 0,0,0,0,0,0)
+ XTREG( 67,316,16, 4, 4,0x03f2,0x0006, 1, 3,0x0100,ae_ts_fts_bu_bp,0,0,0,0,0,0)
+ XTREG( 68,320,28, 4, 4,0x03f3,0x0006, 1, 3,0x0100,ae_sd_no, 0,0,0,0,0,0)
+ XTREG( 69,324,32, 4, 4,0x03f6,0x0006, 1, 3,0x0110,ae_cbegin0, 0,0,0,0,0,0)
+ XTREG( 70,328,32, 4, 4,0x03f7,0x0006, 1, 3,0x0110,ae_cend0, 0,0,0,0,0,0)
+ XTREG( 71,332,32, 4, 4,0x0253,0x0007,-2, 2,0x1000,ptevaddr, 0,0,0,0,0,0)
+ XTREG( 72,336,32, 4, 4,0x0259,0x000d,-2, 2,0x1000,mmid, 0,0,0,0,0,0)
+ XTREG( 73,340,32, 4, 4,0x025a,0x0007,-2, 2,0x1000,rasid, 0,0,0,0,0,0)
+ XTREG( 74,344,25, 4, 4,0x025b,0x0007,-2, 2,0x1000,itlbcfg, 0,0,0,0,0,0)
+ XTREG( 75,348,25, 4, 4,0x025c,0x0007,-2, 2,0x1000,dtlbcfg, 0,0,0,0,0,0)
+ XTREG( 76,352, 2, 4, 4,0x0260,0x0007,-2, 2,0x1000,ibreakenable,0,0,0,0,0,0)
+ XTREG( 77,356, 6, 4, 4,0x0263,0x0007,-2, 2,0x1000,atomctl, 0,0,0,0,0,0)
+ XTREG( 78,360,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0)
+ XTREG( 79,364,32, 4, 4,0x0280,0x0007,-2, 2,0x1000,ibreaka0, 0,0,0,0,0,0)
+ XTREG( 80,368,32, 4, 4,0x0281,0x0007,-2, 2,0x1000,ibreaka1, 0,0,0,0,0,0)
+ XTREG( 81,372,32, 4, 4,0x0290,0x0007,-2, 2,0x1000,dbreaka0, 0,0,0,0,0,0)
+ XTREG( 82,376,32, 4, 4,0x0291,0x0007,-2, 2,0x1000,dbreaka1, 0,0,0,0,0,0)
+ XTREG( 83,380,32, 4, 4,0x02a0,0x0007,-2, 2,0x1000,dbreakc0, 0,0,0,0,0,0)
+ XTREG( 84,384,32, 4, 4,0x02a1,0x0007,-2, 2,0x1000,dbreakc1, 0,0,0,0,0,0)
+ XTREG( 85,388,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0)
+ XTREG( 86,392,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0)
+ XTREG( 87,396,32, 4, 4,0x02b3,0x0007,-2, 2,0x1000,epc3, 0,0,0,0,0,0)
+ XTREG( 88,400,32, 4, 4,0x02b4,0x0007,-2, 2,0x1000,epc4, 0,0,0,0,0,0)
+ XTREG( 89,404,32, 4, 4,0x02b5,0x0007,-2, 2,0x1000,epc5, 0,0,0,0,0,0)
+ XTREG( 90,408,32, 4, 4,0x02b6,0x0007,-2, 2,0x1000,epc6, 0,0,0,0,0,0)
+ XTREG( 91,412,32, 4, 4,0x02b7,0x0007,-2, 2,0x1000,epc7, 0,0,0,0,0,0)
+ XTREG( 92,416,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0)
+ XTREG( 93,420,19, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0)
+ XTREG( 94,424,19, 4, 4,0x02c3,0x0007,-2, 2,0x1000,eps3, 0,0,0,0,0,0)
+ XTREG( 95,428,19, 4, 4,0x02c4,0x0007,-2, 2,0x1000,eps4, 0,0,0,0,0,0)
+ XTREG( 96,432,19, 4, 4,0x02c5,0x0007,-2, 2,0x1000,eps5, 0,0,0,0,0,0)
+ XTREG( 97,436,19, 4, 4,0x02c6,0x0007,-2, 2,0x1000,eps6, 0,0,0,0,0,0)
+ XTREG( 98,440,19, 4, 4,0x02c7,0x0007,-2, 2,0x1000,eps7, 0,0,0,0,0,0)
+ XTREG( 99,444,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0)
+ XTREG(100,448,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0)
+ XTREG(101,452,32, 4, 4,0x02d3,0x0007,-2, 2,0x1000,excsave3, 0,0,0,0,0,0)
+ XTREG(102,456,32, 4, 4,0x02d4,0x0007,-2, 2,0x1000,excsave4, 0,0,0,0,0,0)
+ XTREG(103,460,32, 4, 4,0x02d5,0x0007,-2, 2,0x1000,excsave5, 0,0,0,0,0,0)
+ XTREG(104,464,32, 4, 4,0x02d6,0x0007,-2, 2,0x1000,excsave6, 0,0,0,0,0,0)
+ XTREG(105,468,32, 4, 4,0x02d7,0x0007,-2, 2,0x1000,excsave7, 0,0,0,0,0,0)
+ XTREG(106,472, 8, 4, 4,0x02e0,0x0007,-2, 2,0x1000,cpenable, 0,0,0,0,0,0)
+ XTREG(107,476,22, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0)
+ XTREG(108,480,22, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0)
+ XTREG(109,484,22, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0)
+ XTREG(110,488,22, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0)
+ XTREG(111,492,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0)
+ XTREG(112,496, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0)
+ XTREG(113,500,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0)
+ XTREG(114,504,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0)
+ XTREG(115,508,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0)
+ XTREG(116,512,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0)
+ XTREG(117,516, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0)
+ XTREG(118,520,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0)
+ XTREG(119,524,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0)
+ XTREG(120,528,32, 4, 4,0x02f1,0x000f,-2, 2,0x1000,ccompare1, 0,0,0,0,0,0)
+ XTREG(121,532,32, 4, 4,0x02f2,0x000f,-2, 2,0x1000,ccompare2, 0,0,0,0,0,0)
+ XTREG(122,536,32, 4, 4,0x02f4,0x0007,-2, 2,0x1000,misc0, 0,0,0,0,0,0)
+ XTREG(123,540,32, 4, 4,0x02f5,0x0007,-2, 2,0x1000,misc1, 0,0,0,0,0,0)
+ XTREG(124,544,32, 4, 4,0x0000,0x0006,-2, 8,0x0100,a0, 0,0,0,0,0,0)
+ XTREG(125,548,32, 4, 4,0x0001,0x0006,-2, 8,0x0100,a1, 0,0,0,0,0,0)
+ XTREG(126,552,32, 4, 4,0x0002,0x0006,-2, 8,0x0100,a2, 0,0,0,0,0,0)
+ XTREG(127,556,32, 4, 4,0x0003,0x0006,-2, 8,0x0100,a3, 0,0,0,0,0,0)
+ XTREG(128,560,32, 4, 4,0x0004,0x0006,-2, 8,0x0100,a4, 0,0,0,0,0,0)
+ XTREG(129,564,32, 4, 4,0x0005,0x0006,-2, 8,0x0100,a5, 0,0,0,0,0,0)
+ XTREG(130,568,32, 4, 4,0x0006,0x0006,-2, 8,0x0100,a6, 0,0,0,0,0,0)
+ XTREG(131,572,32, 4, 4,0x0007,0x0006,-2, 8,0x0100,a7, 0,0,0,0,0,0)
+ XTREG(132,576,32, 4, 4,0x0008,0x0006,-2, 8,0x0100,a8, 0,0,0,0,0,0)
+ XTREG(133,580,32, 4, 4,0x0009,0x0006,-2, 8,0x0100,a9, 0,0,0,0,0,0)
+ XTREG(134,584,32, 4, 4,0x000a,0x0006,-2, 8,0x0100,a10, 0,0,0,0,0,0)
+ XTREG(135,588,32, 4, 4,0x000b,0x0006,-2, 8,0x0100,a11, 0,0,0,0,0,0)
+ XTREG(136,592,32, 4, 4,0x000c,0x0006,-2, 8,0x0100,a12, 0,0,0,0,0,0)
+ XTREG(137,596,32, 4, 4,0x000d,0x0006,-2, 8,0x0100,a13, 0,0,0,0,0,0)
+ XTREG(138,600,32, 4, 4,0x000e,0x0006,-2, 8,0x0100,a14, 0,0,0,0,0,0)
+ XTREG(139,604,32, 4, 4,0x000f,0x0006,-2, 8,0x0100,a15, 0,0,0,0,0,0)
+ XTREG(140,608, 1, 1, 1,0x0010,0x0006,-2, 6,0x1010,b0,
+ 0,0,&xtensa_mask0,0,0,0)
+ XTREG(141,609, 1, 1, 1,0x0011,0x0006,-2, 6,0x1010,b1,
+ 0,0,&xtensa_mask1,0,0,0)
+ XTREG(142,610, 1, 1, 1,0x0012,0x0006,-2, 6,0x1010,b2,
+ 0,0,&xtensa_mask2,0,0,0)
+ XTREG(143,611, 1, 1, 1,0x0013,0x0006,-2, 6,0x1010,b3,
+ 0,0,&xtensa_mask3,0,0,0)
+ XTREG(144,612, 1, 1, 1,0x0014,0x0006,-2, 6,0x1010,b4,
+ 0,0,&xtensa_mask4,0,0,0)
+ XTREG(145,613, 1, 1, 1,0x0015,0x0006,-2, 6,0x1010,b5,
+ 0,0,&xtensa_mask5,0,0,0)
+ XTREG(146,614, 1, 1, 1,0x0016,0x0006,-2, 6,0x1010,b6,
+ 0,0,&xtensa_mask6,0,0,0)
+ XTREG(147,615, 1, 1, 1,0x0017,0x0006,-2, 6,0x1010,b7,
+ 0,0,&xtensa_mask7,0,0,0)
+ XTREG(148,616, 1, 1, 1,0x0018,0x0006,-2, 6,0x1010,b8,
+ 0,0,&xtensa_mask8,0,0,0)
+ XTREG(149,617, 1, 1, 1,0x0019,0x0006,-2, 6,0x1010,b9,
+ 0,0,&xtensa_mask9,0,0,0)
+ XTREG(150,618, 1, 1, 1,0x001a,0x0006,-2, 6,0x1010,b10,
+ 0,0,&xtensa_mask10,0,0,0)
+ XTREG(151,619, 1, 1, 1,0x001b,0x0006,-2, 6,0x1010,b11,
+ 0,0,&xtensa_mask11,0,0,0)
+ XTREG(152,620, 1, 1, 1,0x001c,0x0006,-2, 6,0x1010,b12,
+ 0,0,&xtensa_mask12,0,0,0)
+ XTREG(153,621, 1, 1, 1,0x001d,0x0006,-2, 6,0x1010,b13,
+ 0,0,&xtensa_mask13,0,0,0)
+ XTREG(154,622, 1, 1, 1,0x001e,0x0006,-2, 6,0x1010,b14,
+ 0,0,&xtensa_mask14,0,0,0)
+ XTREG(155,623, 1, 1, 1,0x001f,0x0006,-2, 6,0x1010,b15,
+ 0,0,&xtensa_mask15,0,0,0)
+ XTREG(156,624, 4, 4, 4,0x2008,0x0006,-2, 6,0x1010,psintlevel,
+ 0,0,&xtensa_mask16,0,0,0)
+ XTREG(157,628, 1, 4, 4,0x2009,0x0006,-2, 6,0x1010,psum,
+ 0,0,&xtensa_mask17,0,0,0)
+ XTREG(158,632, 1, 4, 4,0x200a,0x0006,-2, 6,0x1010,pswoe,
+ 0,0,&xtensa_mask18,0,0,0)
+ XTREG(159,636, 2, 4, 4,0x200b,0x0006,-2, 6,0x1010,psring,
+ 0,0,&xtensa_mask19,0,0,0)
+ XTREG(160,640, 1, 4, 4,0x200c,0x0006,-2, 6,0x1010,psexcm,
+ 0,0,&xtensa_mask20,0,0,0)
+ XTREG(161,644, 2, 4, 4,0x200d,0x0006,-2, 6,0x1010,pscallinc,
+ 0,0,&xtensa_mask21,0,0,0)
+ XTREG(162,648, 4, 4, 4,0x200e,0x0006,-2, 6,0x1010,psowb,
+ 0,0,&xtensa_mask22,0,0,0)
+ XTREG(163,652,40, 8, 4,0x200f,0x0006,-2, 6,0x1010,acc,
+ 0,0,&xtensa_mask23,0,0,0)
+ XTREG(164,660, 4, 4, 4,0x2014,0x0006,-2, 6,0x1010,dbnum,
+ 0,0,&xtensa_mask24,0,0,0)
+ XTREG(165,664, 8, 4, 4,0x2016,0x0006,-2, 6,0x1010,asid3,
+ 0,0,&xtensa_mask25,0,0,0)
+ XTREG(166,668, 8, 4, 4,0x2017,0x0006,-2, 6,0x1010,asid2,
+ 0,0,&xtensa_mask26,0,0,0)
+ XTREG(167,672, 8, 4, 4,0x2018,0x0006,-2, 6,0x1010,asid1,
+ 0,0,&xtensa_mask27,0,0,0)
+ XTREG(168,676, 1, 4, 4,0x2019,0x0006,-2, 6,0x1010,instpgszid6,
+ 0,0,&xtensa_mask28,0,0,0)
+ XTREG(169,680, 1, 4, 4,0x201a,0x0006,-2, 6,0x1010,instpgszid5,
+ 0,0,&xtensa_mask29,0,0,0)
+ XTREG(170,684, 2, 4, 4,0x201b,0x0006,-2, 6,0x1010,instpgszid4,
+ 0,0,&xtensa_mask30,0,0,0)
+ XTREG(171,688, 1, 4, 4,0x201c,0x0006,-2, 6,0x1010,datapgszid6,
+ 0,0,&xtensa_mask31,0,0,0)
+ XTREG(172,692, 1, 4, 4,0x201d,0x0006,-2, 6,0x1010,datapgszid5,
+ 0,0,&xtensa_mask32,0,0,0)
+ XTREG(173,696, 2, 4, 4,0x201e,0x0006,-2, 6,0x1010,datapgszid4,
+ 0,0,&xtensa_mask33,0,0,0)
+ XTREG(174,700,10, 4, 4,0x201f,0x0006,-2, 6,0x1010,ptbase,
+ 0,0,&xtensa_mask34,0,0,0)
+ XTREG(175,704, 1, 4, 4,0x2021,0x0006, 1, 5,0x1010,ae_overflow,
+ 0,0,&xtensa_mask35,0,0,0)
+ XTREG(176,708, 6, 4, 4,0x2022,0x0006, 1, 5,0x1010,ae_sar,
+ 0,0,&xtensa_mask36,0,0,0)
+ XTREG(177,712, 4, 4, 4,0x2023,0x0006, 1, 5,0x1010,ae_bitptr,
+ 0,0,&xtensa_mask37,0,0,0)
+ XTREG(178,716, 4, 4, 4,0x2024,0x0006, 1, 5,0x1010,ae_bitsused,
+ 0,0,&xtensa_mask38,0,0,0)
+ XTREG(179,720, 4, 4, 4,0x2025,0x0006, 1, 5,0x1010,ae_tablesize,
+ 0,0,&xtensa_mask39,0,0,0)
+ XTREG(180,724, 4, 4, 4,0x2026,0x0006, 1, 5,0x1010,ae_first_ts,
+ 0,0,&xtensa_mask40,0,0,0)
+ XTREG(181,728,27, 4, 4,0x2027,0x0006, 1, 5,0x1010,ae_nextoffset,
+ 0,0,&xtensa_mask41,0,0,0)
+ XTREG_END
diff --git a/target/xtensa/core-test_kc705_be/xtensa-modules.c.inc b/target/xtensa/core-test_kc705_be/xtensa-modules.c.inc
new file mode 100644
index 000000000..bc7cf4482
--- /dev/null
+++ b/target/xtensa/core-test_kc705_be/xtensa-modules.c.inc
@@ -0,0 +1,45117 @@
+/* Xtensa configuration-specific ISA information.
+
+ Copyright (c) 2003-2015 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "BR", 4, 0 },
+ { "ACCLO", 16, 0 },
+ { "ACCHI", 17, 0 },
+ { "M0", 32, 0 },
+ { "M1", 33, 0 },
+ { "M2", 34, 0 },
+ { "M3", 35, 0 },
+ { "PTEVADDR", 83, 0 },
+ { "MMID", 89, 0 },
+ { "DDR", 104, 0 },
+ { "CONFIGID0", 176, 0 },
+ { "CONFIGID1", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "CCOMPARE2", 242, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EPC3", 179, 0 },
+ { "EPC4", 180, 0 },
+ { "EPC5", 181, 0 },
+ { "EPC6", 182, 0 },
+ { "EPC7", 183, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EXCSAVE3", 211, 0 },
+ { "EXCSAVE4", 212, 0 },
+ { "EXCSAVE5", 213, 0 },
+ { "EXCSAVE6", 214, 0 },
+ { "EXCSAVE7", 215, 0 },
+ { "EPS2", 194, 0 },
+ { "EPS3", 195, 0 },
+ { "EPS4", 196, 0 },
+ { "EPS5", 197, 0 },
+ { "EPS6", 198, 0 },
+ { "EPS7", 199, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "DBREAKA0", 144, 0 },
+ { "DBREAKC0", 160, 0 },
+ { "DBREAKA1", 145, 0 },
+ { "DBREAKC1", 161, 0 },
+ { "IBREAKA0", 128, 0 },
+ { "IBREAKA1", 129, 0 },
+ { "IBREAKENABLE", 96, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "PREFCTL", 40, 0 },
+ { "RASID", 90, 0 },
+ { "ITLBCFG", 91, 0 },
+ { "DTLBCFG", 92, 0 },
+ { "CPENABLE", 224, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "THREADPTR", 231, 1 },
+ { "AE_OVF_SAR", 240, 1 },
+ { "AE_BITHEAD", 241, 1 },
+ { "AE_TS_FTS_BU_BP", 242, 1 },
+ { "AE_SD_NO", 243, 1 },
+ { "AE_CBEGIN0", 246, 1 },
+ { "AE_CEND0", 247, 1 },
+ { "EXPSTATE", 230, 1 }
+};
+
+#define NUM_SYSREGS 78
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 247
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 22, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EPC3", 32, 0 },
+ { "EPC4", 32, 0 },
+ { "EPC5", 32, 0 },
+ { "EPC6", 32, 0 },
+ { "EPC7", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EXCSAVE3", 32, 0 },
+ { "EXCSAVE4", 32, 0 },
+ { "EXCSAVE5", 32, 0 },
+ { "EXCSAVE6", 32, 0 },
+ { "EXCSAVE7", 32, 0 },
+ { "EPS2", 15, 0 },
+ { "EPS3", 15, 0 },
+ { "EPS4", 15, 0 },
+ { "EPS5", 15, 0 },
+ { "EPS6", 15, 0 },
+ { "EPS7", 15, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSRING", 2, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "THREADPTR", 32, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "ACC", 40, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 22, 0 },
+ { "DBREAKA0", 32, 0 },
+ { "DBREAKC0", 8, 0 },
+ { "DBREAKA1", 32, 0 },
+ { "DBREAKC1", 8, 0 },
+ { "IBREAKA0", 32, 0 },
+ { "IBREAKA1", 32, 0 },
+ { "IBREAKENABLE", 2, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "CCOMPARE2", 32, 0 },
+ { "PREFCTL", 9, 0 },
+ { "ASID3", 8, 0 },
+ { "ASID2", 8, 0 },
+ { "ASID1", 8, 0 },
+ { "INSTPGSZID6", 1, 0 },
+ { "INSTPGSZID5", 1, 0 },
+ { "INSTPGSZID4", 2, 0 },
+ { "DATAPGSZID6", 1, 0 },
+ { "DATAPGSZID5", 1, 0 },
+ { "DATAPGSZID4", 2, 0 },
+ { "PTBASE", 10, 0 },
+ { "CPENABLE", 8, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "ERI_RAW_INTERLOCK", 1, 0 },
+ { "AE_OVERFLOW", 1, XTENSA_STATE_IS_SHARED_OR },
+ { "AE_SAR", 6, 0 },
+ { "AE_BITHEAD", 32, 0 },
+ { "AE_BITPTR", 4, 0 },
+ { "AE_BITSUSED", 4, 0 },
+ { "AE_TABLESIZE", 4, 0 },
+ { "AE_FIRST_TS", 4, 0 },
+ { "AE_NEXTOFFSET", 27, 0 },
+ { "AE_SEARCHDONE", 1, 0 },
+ { "AE_CBEGIN0", 32, 0 },
+ { "AE_CEND0", 32, 0 },
+ { "EXPSTATE", 32, XTENSA_STATE_IS_EXPORTED }
+};
+
+#define NUM_STATES 89
+
+enum xtensa_state_id {
+ STATE_LCOUNT,
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EPC3,
+ STATE_EPC4,
+ STATE_EPC5,
+ STATE_EPC6,
+ STATE_EPC7,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EXCSAVE3,
+ STATE_EXCSAVE4,
+ STATE_EXCSAVE5,
+ STATE_EXCSAVE6,
+ STATE_EXCSAVE7,
+ STATE_EPS2,
+ STATE_EPS3,
+ STATE_EPS4,
+ STATE_EPS5,
+ STATE_EPS6,
+ STATE_EPS7,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSRING,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_LBEG,
+ STATE_LEND,
+ STATE_SAR,
+ STATE_THREADPTR,
+ STATE_MISC0,
+ STATE_MISC1,
+ STATE_ACC,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_DBREAKA0,
+ STATE_DBREAKC0,
+ STATE_DBREAKA1,
+ STATE_DBREAKC1,
+ STATE_IBREAKA0,
+ STATE_IBREAKA1,
+ STATE_IBREAKENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_CCOMPARE2,
+ STATE_PREFCTL,
+ STATE_ASID3,
+ STATE_ASID2,
+ STATE_ASID1,
+ STATE_INSTPGSZID6,
+ STATE_INSTPGSZID5,
+ STATE_INSTPGSZID4,
+ STATE_DATAPGSZID6,
+ STATE_DATAPGSZID5,
+ STATE_DATAPGSZID4,
+ STATE_PTBASE,
+ STATE_CPENABLE,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_ERI_RAW_INTERLOCK,
+ STATE_AE_OVERFLOW,
+ STATE_AE_SAR,
+ STATE_AE_BITHEAD,
+ STATE_AE_BITPTR,
+ STATE_AE_BITSUSED,
+ STATE_AE_TABLESIZE,
+ STATE_AE_FIRST_TS,
+ STATE_AE_NEXTOFFSET,
+ STATE_AE_SEARCHDONE,
+ STATE_AE_CBEGIN0,
+ STATE_AE_CEND0,
+ STATE_EXPSTATE
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_t3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_tlo_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_tlo_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_w_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_w_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_rhi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_rhi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_ae_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ae_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ae_r10_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r10_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_ae_r32_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r32_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_ae_s3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ae_s3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ae_s_non_samt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_s_non_samt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_s3to1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf42ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf42ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_s3_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s3_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf333ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf333ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf43ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf43ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_ftsf359ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf359ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf45ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf45ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf32ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf32ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf33ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf33ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf31ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf31ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf30ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf30ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf60ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf60ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_ftsf355ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf355ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf58ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf58ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf354ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf354ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf37ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf37ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf22ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf22ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf126ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf126ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+}
+
+static unsigned
+Field_ftsf357ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 18) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf357ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e00) | (tie_t << 9);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf53ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf53ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf66ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf66ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf347ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf347ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf64ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 18) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf64ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x3c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_ftsf345ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf345ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf63ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 18) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf63ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf344ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf344ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf48ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf48ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf47ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf47ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf92ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf92ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf358ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf358ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf94ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf94ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 20) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf93ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf93ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ae_r10_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r10_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf90ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf90ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 22) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf55_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf55_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf121ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 9) >> 24);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf121ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 21) >> 24;
+ insn[0] = (insn[0] & ~0x7f8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf91_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf91_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf99ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 9) >> 27);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf99ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0x7c0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_ftsf351_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf351_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf97ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 9) >> 26);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf97ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 21) >> 26;
+ insn[0] = (insn[0] & ~0x7e0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_ftsf348ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf348ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf96ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf96ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 20) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf23ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf23ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf34ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf34ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf40ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf40ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf38ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf38ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf24ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf24ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf26ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf26ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf20ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf20ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf21ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf21ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf25ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf25ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf35ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf35ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf41ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf41ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf54ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf54ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf356ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf356ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf29ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf29ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf27ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf27ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf28ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf28ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf36ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf36ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf57ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf57ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf62ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf62ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ae_s20_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_s20_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf56ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf56ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf127ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf127ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_ftsf350ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf350ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf114ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf114ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf124ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf124ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf352ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf352ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf118ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 9) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf118ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf111ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf111ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf113ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf113ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf106ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf106ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf107ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf107ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf109ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf109ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf115ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf115ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf120ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 9) >> 23);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf120ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 20) >> 23;
+ insn[0] = (insn[0] & ~0x7fc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf123ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf123ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+ tie_t = (val << 23) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf349ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf349ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf110ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf110ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf117ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf117ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf112ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf112ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf116ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf116ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf68ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf68ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf50ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf50ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf52ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf52ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf51ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf51ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf49ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf49ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ae_r20_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_r20_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf343ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 18) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf343ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf125ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf125ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf342ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf342ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf108ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf108ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ae_r32_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r32_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ae_mul32x24fld_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ae_mul32x24fld_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf161ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf161ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf155ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf155ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf176ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf176ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf159ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf159ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf156ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf156ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf168ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf168ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf158ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf158ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf154ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf154ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf164ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf164ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf157ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf157ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf153ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf153ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf162ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf162ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf134ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf134ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf192ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf192ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf143ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf143ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf133ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf133ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf160ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf160ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf142ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf142ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf131ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf131ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf144ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf144ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf141ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf141ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf61_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf61_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_ftsf334ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf334ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf136ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf136ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf139ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf139ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf177ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf177ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf171ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf171ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf185ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf185ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf175ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf175ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf172ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf172ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf183ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf183ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf174ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf174ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf170ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf170ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf182ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf182ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf173ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf173ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf169ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf169ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf181ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf181ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf140ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf140ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf152ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf152ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf138ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf138ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf148ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf148ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf137ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf137ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf146ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf146ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf135ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf135ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf145ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf145ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf179ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf179ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf189ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf189ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf184ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf184ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf187ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf187ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf180ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf180ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf188ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf188ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf178ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf178ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf186ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf186ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf76ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf76ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf75ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf75ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf79ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf79ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf78ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf78ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf80ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf80ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf81ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf81ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf83ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf83ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf82ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf82ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf69ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf69ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf103ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf103ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf337ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf337ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf71ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf71ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf70ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf70ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf77ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf77ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf73ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf73ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf74ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf74ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf72ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf72ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf85ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf85ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf84ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf84ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf86ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf86ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf102ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf102ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf336ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf336ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf89ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf89ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf87ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf87ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf88ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf88ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 23) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf101ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 5) | ((insn[0] << 20) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf101ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80) | (tie_t << 7);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_ftsf335_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf335_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+}
+
+static unsigned
+Field_t_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf196ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf196ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf208ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf208ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf341ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf341ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf200ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf200ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf340ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf340ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf195ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf195ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf198ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf198ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf197ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf197ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf199ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf199ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf201ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf201ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf204ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf204ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf202ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf202ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf203ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf203ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf205ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf205ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf207ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf207ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf206ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf206ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf210ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf210ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf339ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf339ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf128ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf128ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf130ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf130ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf129ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf129ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf132ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf132ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf147ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf147ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf150ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf150ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf149ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf149ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf151ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf151ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf163ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf163ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf166ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf166ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf165ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf165ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf167ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf167ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf190ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf190ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf193ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf193ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf191ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf191ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf194ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf194ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld132ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld132ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld69_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld69_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld68_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld68_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld19_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld19_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld22_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld22_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_op0_s3_s3_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s3_s3_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld131ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld131ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld74_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld74_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld66_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld66_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld91_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld91_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld90_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld90_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld88_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld88_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld65_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld65_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld24_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld24_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld147ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld147ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld79_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld79_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_r_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 9) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780000) | (tie_t << 19);
+}
+
+static unsigned
+Field_op0_s4_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s4_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm8_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+}
+
+static unsigned
+Field_t_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf280_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf280_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf288_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 5) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf288_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000000) | (tie_t << 26);
+}
+
+static unsigned
+Field_ftsf360ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 6) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf360ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf213ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf213ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf212ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 22) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf212ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x380) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf211ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf211ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf279ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf279ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 21) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+ tie_t = (val << 17) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_s8_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_ftsf313ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_ftsf313ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf282ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf282ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf361ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 9) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf361ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+ tie_t = (val << 26) >> 31;
+ insn[0] = (insn[0] & ~0x400000) | (tie_t << 22);
+}
+
+static unsigned
+Field_ftsf281ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 3) | ((insn[0] << 10) >> 29);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf281ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 21) >> 29;
+ insn[0] = (insn[0] & ~0x380000) | (tie_t << 19);
+ tie_t = (val << 17) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf287ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 5) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf287ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+ tie_t = (val << 22) >> 31;
+ insn[0] = (insn[0] & ~0x4000000) | (tie_t << 26);
+}
+
+static unsigned
+Field_ftsf368ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 6) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf368ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x3e00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf285ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 5) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf285ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+ tie_t = (val << 21) >> 30;
+ insn[0] = (insn[0] & ~0x6000000) | (tie_t << 25);
+}
+
+static unsigned
+Field_ftsf366ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 7) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf366ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x1e00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf284ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 5) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf284ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+ tie_t = (val << 20) >> 29;
+ insn[0] = (insn[0] & ~0x7000000) | (tie_t << 24);
+}
+
+static unsigned
+Field_ftsf364ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 8) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf364ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0xe00000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf297ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 14) | ((insn[0] << 11) >> 18);
+ return tie_t;
+}
+
+static void
+Field_ftsf297ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 18) >> 18;
+ insn[0] = (insn[0] & ~0x1fff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf309_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf309_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf327ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 5) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf327ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+ tie_t = (val << 21) >> 30;
+ insn[0] = (insn[0] & ~0x6000000) | (tie_t << 25);
+}
+
+static unsigned
+Field_ftsf363ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 7) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf363ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000000) | (tie_t << 24);
+}
+
+static unsigned
+Field_ftsf214ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 18) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf214ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x3f80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf298ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf298ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf373ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf373ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf302ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf302ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_ftsf376ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf376ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ftsf300ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf300ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 21) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_ftsf370ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 10) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf370ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x300000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ae_s20_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 5) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_s20_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000000) | (tie_t << 24);
+}
+
+static unsigned
+Field_ftsf378ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 8) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf378ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf217ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf217ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf219ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf219ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf220ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf220ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf221ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf221ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf227ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf227ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf228ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf228ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf229ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf229ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf231ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf231ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf232ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf232ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf234ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf234ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf238ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf238ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf233ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf233ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf223ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf223ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf224ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf224ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf226ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf226ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf225ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf225ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf240ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf240ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf242ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf242ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf241ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf241ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf243ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf243ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf235ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf235ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf236ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf236ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf237ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf237ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf239ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf239ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf260ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf260ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf295ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf295ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf247ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf247ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf249ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf249ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf268ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf268ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf263ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf263ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf265ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf265ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf266ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf266ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf259ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf259ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf261ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf261ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf262ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf262ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf264ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf264ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf245ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf245ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf246ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf246ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf248ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf248ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf252ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf252ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf256ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf256ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf255ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf255ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf257ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf257ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf258ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf258ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf250ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf250ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf251ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf251ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf253ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf253ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf254ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf254ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf305ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf305ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf306ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf306ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf307ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf307ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf310ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf310ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf304ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf304ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf308ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf308ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ae_r10_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r10_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf329ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf329ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf379ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf379ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf272ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf272ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf273ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf273ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf274ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf274ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf276ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf276ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf269ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf269ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf271ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf271ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf275ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf275ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf270ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf270ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf296ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 12) >> 19);
+ return tie_t;
+}
+
+static void
+Field_ftsf296ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0xfff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf315_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf315_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld52_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld52_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+}
+
+static unsigned
+Field_combined1e9fefee_fld96_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined1e9fefee_fld96_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_combined1e9fefee_fld98_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_combined1e9fefee_fld98_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld49_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld39_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 22) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld50_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x200) | (tie_t << 9);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld40_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf362_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf362_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_op0_s4_s4_s4_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s4_s4_s4_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+}
+
+static unsigned
+Field_combined1e9fefee_fld109ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 15) >> 30);
+ return tie_t;
+}
+
+static void
+Field_combined1e9fefee_fld109ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x18000) | (tie_t << 15);
+}
+
+static unsigned
+Field_combined1e9fefee_fld108ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_combined1e9fefee_fld108ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld47_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld47_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_combined1e9fefee_fld107ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ return tie_t;
+}
+
+static void
+Field_combined1e9fefee_fld107ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_combined1e9fefee_fld106ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 14) >> 29);
+ return tie_t;
+}
+
+static void
+Field_combined1e9fefee_fld106ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf244ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf244ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf267ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf267ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf290ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf290ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf289ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf289ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 19) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf230ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf230ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf222ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf222ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf218ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf218ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf215ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf215ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ftsf314ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_ftsf314ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+ tie_t = (val << 19) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf323ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf323ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf322ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf322ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf311ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 10) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf311ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x200000) | (tie_t << 21);
+}
+
+static unsigned
+Field_ftsf386ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 5) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf386ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0x7c00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_ftsf278ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf278ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf292ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 5) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf292ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 21) >> 29;
+ insn[0] = (insn[0] & ~0x7000000) | (tie_t << 24);
+}
+
+static unsigned
+Field_ftsf382ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf382ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf291ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf291ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf294ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 5) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf294ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000000) | (tie_t << 26);
+}
+
+static unsigned
+Field_ftsf383ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 6) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf383ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x3800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf293ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 5) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 17) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf293ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00) | (tie_t << 8);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x6000000) | (tie_t << 25);
+}
+
+static unsigned
+Field_ftsf384ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 7) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf384ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x1800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf312ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 15) >> 22);
+ return tie_t;
+}
+
+static void
+Field_ftsf312ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x1ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf320ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf320ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf387ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf387ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+ tie_t = (val << 25) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf319ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 10) | ((insn[0] << 15) >> 22);
+ return tie_t;
+}
+
+static void
+Field_ftsf319ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x1ff80) | (tie_t << 7);
+ tie_t = (val << 21) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf388ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 5) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf388ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x7000000) | (tie_t << 24);
+}
+
+static unsigned
+Field_ftsf317ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ tie_t = (tie_t << 10) | ((insn[0] << 15) >> 22);
+ return tie_t;
+}
+
+static void
+Field_ftsf317ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0x1ff80) | (tie_t << 7);
+ tie_t = (val << 21) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+ tie_t = (val << 20) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf389ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 5) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 14) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf389ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20000) | (tie_t << 17);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x7000000) | (tie_t << 24);
+}
+
+static unsigned
+Field_ftsf324ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf324ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf325ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf325ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf328ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 5) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf328ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x4000000) | (tie_t << 26);
+}
+
+static unsigned
+Field_ftsf316ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 12) | ((insn[0] << 13) >> 20);
+ return tie_t;
+}
+
+static void
+Field_ftsf316ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff80) | (tie_t << 7);
+ tie_t = (val << 19) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf326ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 5) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf326ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 23) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+ tie_t = (val << 21) >> 30;
+ insn[0] = (insn[0] & ~0x6000000) | (tie_t << 25);
+}
+
+static unsigned
+Field_ftsf277ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf277ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+ tie_t = (val << 22) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+ tie_t = (val << 18) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld123_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 6) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld123_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000000) | (tie_t << 25);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld121_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 5) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld121_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000000) | (tie_t << 26);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld28_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld127_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_s4_s4_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s4_s4_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld149ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 17) >> 24);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld149ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0x7f80) | (tie_t << 7);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld137ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld137ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld144ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld144ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld138ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld138ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld143ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld143ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld134ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld134ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld140ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld140ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld135ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld135ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld142ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld142ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld136ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld136ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld141ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld141ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld133ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld133ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld139ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld139ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld146ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld146ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld46_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld46_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld145ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld145ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_bbi_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 5) >> 27);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 20) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm12_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 5) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 17) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x7800) | (tie_t << 11);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0x7f80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12b_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 9) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0x7ff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 16) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm16_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 9) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0x7fff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 14) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0x3ffff) | (tie_t << 0);
+}
+
+static unsigned
+Field_offset_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 5) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0x7fffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_op2_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 13) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78000) | (tie_t << 15);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sae_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 17) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00) | (tie_t << 10);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_sal_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_sargt_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 13) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c000) | (tie_t << 14);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sas_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 5) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c00000) | (tie_t << 22);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 14) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30000) | (tie_t << 16);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_rbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_rbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_tbit2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ return tie_t;
+}
+
+static void
+Field_tbit2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_y_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 13) >> 31);
+ return tie_t;
+}
+
+static void
+Field_y_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40000) | (tie_t << 18);
+}
+
+static unsigned
+Field_x_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 21) >> 31);
+ return tie_t;
+}
+
+static void
+Field_x_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x400) | (tie_t << 10);
+}
+
+static unsigned
+Field_t2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_t2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_t2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_t2_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_s2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_s2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_s2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_r2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_r2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 28) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe) | (tie_t << 1);
+}
+
+static unsigned
+Field_r2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 28) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe) | (tie_t << 1);
+}
+
+static unsigned
+Field_t4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_t4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_t4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_s4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_s4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_s4_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 9) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600000) | (tie_t << 21);
+}
+
+static unsigned
+Field_r4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_r4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_r4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+}
+
+static unsigned
+Field_t8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_t8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_t8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_s8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_r8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_r8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_r8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ae_r32_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 11) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r32_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180000) | (tie_t << 19);
+}
+
+static unsigned
+Field_ae_samt_s_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 12) >> 26);
+ return tie_t;
+}
+
+static void
+Field_ae_samt_s_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0xfc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ae_samt_s_t_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ tie_t = (tie_t << 2) | ((insn[0] << 13) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_samt_s_t_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x60000) | (tie_t << 17);
+ tie_t = (val << 26) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ae_r20_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_r20_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_ae_r20_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 9) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_r20_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700000) | (tie_t << 20);
+}
+
+static unsigned
+Field_ae_s20_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_s20_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ae_fld_ohba_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ae_fld_ohba_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_ae_fld_ohba2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ae_fld_ohba2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf11_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf11_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_ftsf11_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf11_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf11_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 8) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf11_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x800000) | (tie_t << 23);
+}
+
+static unsigned
+Field_ftsf12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_ftsf12_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf12_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf13_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 12) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf13_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80000) | (tie_t << 19);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld37_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld37_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_combined2c0b5f72_fld148ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 14) >> 21);
+ return tie_t;
+}
+
+static void
+Field_combined2c0b5f72_fld148ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0x3ff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_bitindex_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_bitindex_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_bitindex_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 5) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bitindex_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x7800000) | (tie_t << 23);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_s3to1_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_s3to1_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 13) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s3to1_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70000) | (tie_t << 16);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_mr0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_mr1_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 1;
+}
+
+static unsigned
+Implicit_Field_mr2_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 2;
+}
+
+static unsigned
+Implicit_Field_mr3_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 3;
+}
+
+static unsigned
+Implicit_Field_bt16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_bs16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_br16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_brall_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_r3,
+ FIELD_rbit2,
+ FIELD_rhi,
+ FIELD_t3,
+ FIELD_tbit2,
+ FIELD_tlo,
+ FIELD_w,
+ FIELD_y,
+ FIELD_x,
+ FIELD_t2,
+ FIELD_s2,
+ FIELD_r2,
+ FIELD_t4,
+ FIELD_s4,
+ FIELD_r4,
+ FIELD_t8,
+ FIELD_s8,
+ FIELD_r8,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_ae_r3,
+ FIELD_ae_s_non_samt,
+ FIELD_ae_s3,
+ FIELD_ae_r32,
+ FIELD_ae_samt_s_t,
+ FIELD_ae_r20,
+ FIELD_ae_r10,
+ FIELD_ae_s20,
+ FIELD_ae_fld_ohba,
+ FIELD_ae_fld_ohba2,
+ FIELD_op0_s3,
+ FIELD_ftsf11,
+ FIELD_ftsf12,
+ FIELD_ftsf13,
+ FIELD_ftsf20ae_slot1,
+ FIELD_ftsf21ae_slot1,
+ FIELD_ftsf22ae_slot1,
+ FIELD_ftsf23ae_slot1,
+ FIELD_ftsf24ae_slot1,
+ FIELD_ftsf25ae_slot1,
+ FIELD_ftsf26ae_slot1,
+ FIELD_ftsf27ae_slot1,
+ FIELD_ftsf28ae_slot1,
+ FIELD_ftsf29ae_slot1,
+ FIELD_ftsf30ae_slot1,
+ FIELD_ftsf31ae_slot1,
+ FIELD_ftsf32ae_slot1,
+ FIELD_ftsf33ae_slot1,
+ FIELD_ftsf34ae_slot1,
+ FIELD_ftsf35ae_slot1,
+ FIELD_ftsf36ae_slot1,
+ FIELD_ftsf37ae_slot1,
+ FIELD_ftsf38ae_slot1,
+ FIELD_ftsf40ae_slot1,
+ FIELD_ftsf41ae_slot1,
+ FIELD_ftsf42ae_slot1,
+ FIELD_ftsf43ae_slot1,
+ FIELD_ftsf45ae_slot1,
+ FIELD_ftsf47ae_slot1,
+ FIELD_ftsf48ae_slot1,
+ FIELD_ftsf49ae_slot1,
+ FIELD_ftsf50ae_slot1,
+ FIELD_ftsf51ae_slot1,
+ FIELD_ftsf52ae_slot1,
+ FIELD_ftsf53ae_slot1,
+ FIELD_ftsf54ae_slot1,
+ FIELD_ftsf55,
+ FIELD_ftsf56ae_slot1,
+ FIELD_ftsf57ae_slot1,
+ FIELD_ftsf58ae_slot1,
+ FIELD_ftsf60ae_slot1,
+ FIELD_ftsf61,
+ FIELD_ftsf62ae_slot1,
+ FIELD_ftsf63ae_slot1,
+ FIELD_ftsf64ae_slot1,
+ FIELD_ftsf66ae_slot1,
+ FIELD_ftsf68ae_slot1,
+ FIELD_ftsf69ae_slot1,
+ FIELD_ftsf70ae_slot1,
+ FIELD_ftsf71ae_slot1,
+ FIELD_ftsf72ae_slot1,
+ FIELD_ftsf73ae_slot1,
+ FIELD_ftsf74ae_slot1,
+ FIELD_ftsf75ae_slot1,
+ FIELD_ftsf76ae_slot1,
+ FIELD_ftsf77ae_slot1,
+ FIELD_ftsf78ae_slot1,
+ FIELD_ftsf79ae_slot1,
+ FIELD_ftsf80ae_slot1,
+ FIELD_ftsf81ae_slot1,
+ FIELD_ftsf82ae_slot1,
+ FIELD_ftsf83ae_slot1,
+ FIELD_ftsf84ae_slot1,
+ FIELD_ftsf85ae_slot1,
+ FIELD_ftsf86ae_slot1,
+ FIELD_ftsf87ae_slot1,
+ FIELD_ftsf88ae_slot1,
+ FIELD_ftsf89ae_slot1,
+ FIELD_ftsf90ae_slot1,
+ FIELD_ftsf91,
+ FIELD_ftsf92ae_slot1,
+ FIELD_ftsf93ae_slot1,
+ FIELD_ftsf94ae_slot1,
+ FIELD_ftsf96ae_slot1,
+ FIELD_ftsf97ae_slot1,
+ FIELD_ftsf99ae_slot1,
+ FIELD_ftsf101ae_slot1,
+ FIELD_ftsf102ae_slot1,
+ FIELD_ftsf103ae_slot1,
+ FIELD_ftsf106ae_slot1,
+ FIELD_ftsf107ae_slot1,
+ FIELD_ftsf108ae_slot1,
+ FIELD_ftsf109ae_slot1,
+ FIELD_ftsf110ae_slot1,
+ FIELD_ftsf111ae_slot1,
+ FIELD_ftsf112ae_slot1,
+ FIELD_ftsf113ae_slot1,
+ FIELD_ftsf114ae_slot1,
+ FIELD_ftsf115ae_slot1,
+ FIELD_ftsf116ae_slot1,
+ FIELD_ftsf117ae_slot1,
+ FIELD_ftsf118ae_slot1,
+ FIELD_ftsf120ae_slot1,
+ FIELD_ftsf121ae_slot1,
+ FIELD_ftsf123ae_slot1,
+ FIELD_ftsf124ae_slot1,
+ FIELD_ftsf125ae_slot1,
+ FIELD_ftsf126ae_slot1,
+ FIELD_ftsf127ae_slot1,
+ FIELD_ftsf128ae_slot1,
+ FIELD_ftsf129ae_slot1,
+ FIELD_ftsf130ae_slot1,
+ FIELD_ftsf131ae_slot1,
+ FIELD_ftsf132ae_slot1,
+ FIELD_ftsf133ae_slot1,
+ FIELD_ftsf134ae_slot1,
+ FIELD_ftsf135ae_slot1,
+ FIELD_ftsf136ae_slot1,
+ FIELD_ftsf137ae_slot1,
+ FIELD_ftsf138ae_slot1,
+ FIELD_ftsf139ae_slot1,
+ FIELD_ftsf140ae_slot1,
+ FIELD_ftsf141ae_slot1,
+ FIELD_ftsf142ae_slot1,
+ FIELD_ftsf143ae_slot1,
+ FIELD_ftsf144ae_slot1,
+ FIELD_ftsf145ae_slot1,
+ FIELD_ftsf146ae_slot1,
+ FIELD_ftsf147ae_slot1,
+ FIELD_ftsf148ae_slot1,
+ FIELD_ftsf149ae_slot1,
+ FIELD_ftsf150ae_slot1,
+ FIELD_ftsf151ae_slot1,
+ FIELD_ftsf152ae_slot1,
+ FIELD_ftsf153ae_slot1,
+ FIELD_ftsf154ae_slot1,
+ FIELD_ftsf155ae_slot1,
+ FIELD_ftsf156ae_slot1,
+ FIELD_ftsf157ae_slot1,
+ FIELD_ftsf158ae_slot1,
+ FIELD_ftsf159ae_slot1,
+ FIELD_ftsf160ae_slot1,
+ FIELD_ftsf161ae_slot1,
+ FIELD_ftsf162ae_slot1,
+ FIELD_ftsf163ae_slot1,
+ FIELD_ftsf164ae_slot1,
+ FIELD_ftsf165ae_slot1,
+ FIELD_ftsf166ae_slot1,
+ FIELD_ftsf167ae_slot1,
+ FIELD_ftsf168ae_slot1,
+ FIELD_ftsf169ae_slot1,
+ FIELD_ftsf170ae_slot1,
+ FIELD_ftsf171ae_slot1,
+ FIELD_ftsf172ae_slot1,
+ FIELD_ftsf173ae_slot1,
+ FIELD_ftsf174ae_slot1,
+ FIELD_ftsf175ae_slot1,
+ FIELD_ftsf176ae_slot1,
+ FIELD_ftsf177ae_slot1,
+ FIELD_ftsf178ae_slot1,
+ FIELD_ftsf179ae_slot1,
+ FIELD_ftsf180ae_slot1,
+ FIELD_ftsf181ae_slot1,
+ FIELD_ftsf182ae_slot1,
+ FIELD_ftsf183ae_slot1,
+ FIELD_ftsf184ae_slot1,
+ FIELD_ftsf185ae_slot1,
+ FIELD_ftsf186ae_slot1,
+ FIELD_ftsf187ae_slot1,
+ FIELD_ftsf188ae_slot1,
+ FIELD_ftsf189ae_slot1,
+ FIELD_ftsf190ae_slot1,
+ FIELD_ftsf191ae_slot1,
+ FIELD_ftsf192ae_slot1,
+ FIELD_ftsf193ae_slot1,
+ FIELD_ftsf194ae_slot1,
+ FIELD_ftsf195ae_slot1,
+ FIELD_ftsf196ae_slot1,
+ FIELD_ftsf197ae_slot1,
+ FIELD_ftsf198ae_slot1,
+ FIELD_ftsf199ae_slot1,
+ FIELD_ftsf200ae_slot1,
+ FIELD_ftsf201ae_slot1,
+ FIELD_ftsf202ae_slot1,
+ FIELD_ftsf203ae_slot1,
+ FIELD_ftsf204ae_slot1,
+ FIELD_ftsf205ae_slot1,
+ FIELD_ftsf206ae_slot1,
+ FIELD_ftsf207ae_slot1,
+ FIELD_ftsf208ae_slot1,
+ FIELD_ftsf210ae_slot1,
+ FIELD_ftsf333ae_slot1,
+ FIELD_ftsf334ae_slot1,
+ FIELD_ftsf335,
+ FIELD_ftsf336ae_slot1,
+ FIELD_ftsf337ae_slot1,
+ FIELD_ftsf339ae_slot1,
+ FIELD_ftsf340ae_slot1,
+ FIELD_ftsf341ae_slot1,
+ FIELD_ftsf342ae_slot1,
+ FIELD_ftsf343ae_slot1,
+ FIELD_ftsf344ae_slot1,
+ FIELD_ftsf345ae_slot1,
+ FIELD_ftsf347ae_slot1,
+ FIELD_ftsf348ae_slot1,
+ FIELD_ftsf349ae_slot1,
+ FIELD_ftsf350ae_slot1,
+ FIELD_ftsf351,
+ FIELD_ftsf352ae_slot1,
+ FIELD_ftsf354ae_slot1,
+ FIELD_ftsf355ae_slot1,
+ FIELD_ftsf356ae_slot1,
+ FIELD_ftsf357ae_slot1,
+ FIELD_ftsf358ae_slot1,
+ FIELD_ftsf359ae_slot1,
+ FIELD_op0_s4,
+ FIELD_ftsf211ae_slot0,
+ FIELD_ftsf212ae_slot0,
+ FIELD_ftsf213ae_slot0,
+ FIELD_ftsf214ae_slot0,
+ FIELD_ftsf215ae_slot0,
+ FIELD_ftsf217ae_slot0,
+ FIELD_ftsf218ae_slot0,
+ FIELD_ftsf219ae_slot0,
+ FIELD_ftsf220ae_slot0,
+ FIELD_ftsf221ae_slot0,
+ FIELD_ftsf222ae_slot0,
+ FIELD_ftsf223ae_slot0,
+ FIELD_ftsf224ae_slot0,
+ FIELD_ftsf225ae_slot0,
+ FIELD_ftsf226ae_slot0,
+ FIELD_ftsf227ae_slot0,
+ FIELD_ftsf228ae_slot0,
+ FIELD_ftsf229ae_slot0,
+ FIELD_ftsf230ae_slot0,
+ FIELD_ftsf231ae_slot0,
+ FIELD_ftsf232ae_slot0,
+ FIELD_ftsf233ae_slot0,
+ FIELD_ftsf234ae_slot0,
+ FIELD_ftsf235ae_slot0,
+ FIELD_ftsf236ae_slot0,
+ FIELD_ftsf237ae_slot0,
+ FIELD_ftsf238ae_slot0,
+ FIELD_ftsf239ae_slot0,
+ FIELD_ftsf240ae_slot0,
+ FIELD_ftsf241ae_slot0,
+ FIELD_ftsf242ae_slot0,
+ FIELD_ftsf243ae_slot0,
+ FIELD_ftsf244ae_slot0,
+ FIELD_ftsf245ae_slot0,
+ FIELD_ftsf246ae_slot0,
+ FIELD_ftsf247ae_slot0,
+ FIELD_ftsf248ae_slot0,
+ FIELD_ftsf249ae_slot0,
+ FIELD_ftsf250ae_slot0,
+ FIELD_ftsf251ae_slot0,
+ FIELD_ftsf252ae_slot0,
+ FIELD_ftsf253ae_slot0,
+ FIELD_ftsf254ae_slot0,
+ FIELD_ftsf255ae_slot0,
+ FIELD_ftsf256ae_slot0,
+ FIELD_ftsf257ae_slot0,
+ FIELD_ftsf258ae_slot0,
+ FIELD_ftsf259ae_slot0,
+ FIELD_ftsf260ae_slot0,
+ FIELD_ftsf261ae_slot0,
+ FIELD_ftsf262ae_slot0,
+ FIELD_ftsf263ae_slot0,
+ FIELD_ftsf264ae_slot0,
+ FIELD_ftsf265ae_slot0,
+ FIELD_ftsf266ae_slot0,
+ FIELD_ftsf267ae_slot0,
+ FIELD_ftsf268ae_slot0,
+ FIELD_ftsf269ae_slot0,
+ FIELD_ftsf270ae_slot0,
+ FIELD_ftsf271ae_slot0,
+ FIELD_ftsf272ae_slot0,
+ FIELD_ftsf273ae_slot0,
+ FIELD_ftsf274ae_slot0,
+ FIELD_ftsf275ae_slot0,
+ FIELD_ftsf276ae_slot0,
+ FIELD_ftsf277ae_slot0,
+ FIELD_ftsf278ae_slot0,
+ FIELD_ftsf279ae_slot0,
+ FIELD_ftsf280,
+ FIELD_ftsf281ae_slot0,
+ FIELD_ftsf282ae_slot0,
+ FIELD_ftsf284ae_slot0,
+ FIELD_ftsf285ae_slot0,
+ FIELD_ftsf287ae_slot0,
+ FIELD_ftsf288,
+ FIELD_ftsf289ae_slot0,
+ FIELD_ftsf290ae_slot0,
+ FIELD_ftsf291ae_slot0,
+ FIELD_ftsf292ae_slot0,
+ FIELD_ftsf293ae_slot0,
+ FIELD_ftsf294ae_slot0,
+ FIELD_ftsf295ae_slot0,
+ FIELD_ftsf296ae_slot0,
+ FIELD_ftsf297ae_slot0,
+ FIELD_ftsf298ae_slot0,
+ FIELD_ftsf300ae_slot0,
+ FIELD_ftsf302ae_slot0,
+ FIELD_ftsf304ae_slot0,
+ FIELD_ftsf305ae_slot0,
+ FIELD_ftsf306ae_slot0,
+ FIELD_ftsf307ae_slot0,
+ FIELD_ftsf308ae_slot0,
+ FIELD_ftsf309,
+ FIELD_ftsf310ae_slot0,
+ FIELD_ftsf311ae_slot0,
+ FIELD_ftsf312ae_slot0,
+ FIELD_ftsf313ae_slot0,
+ FIELD_ftsf314ae_slot0,
+ FIELD_ftsf315,
+ FIELD_ftsf316ae_slot0,
+ FIELD_ftsf317ae_slot0,
+ FIELD_ftsf319ae_slot0,
+ FIELD_ftsf320ae_slot0,
+ FIELD_ftsf322ae_slot0,
+ FIELD_ftsf323ae_slot0,
+ FIELD_ftsf324ae_slot0,
+ FIELD_ftsf325ae_slot0,
+ FIELD_ftsf326ae_slot0,
+ FIELD_ftsf327ae_slot0,
+ FIELD_ftsf328ae_slot0,
+ FIELD_ftsf329ae_slot0,
+ FIELD_ftsf360ae_slot0,
+ FIELD_ftsf361ae_slot0,
+ FIELD_ftsf362,
+ FIELD_ftsf363ae_slot0,
+ FIELD_ftsf364ae_slot0,
+ FIELD_ftsf366ae_slot0,
+ FIELD_ftsf368ae_slot0,
+ FIELD_ftsf370ae_slot0,
+ FIELD_ftsf373ae_slot0,
+ FIELD_ftsf376ae_slot0,
+ FIELD_ftsf378ae_slot0,
+ FIELD_ftsf379ae_slot0,
+ FIELD_ftsf382ae_slot0,
+ FIELD_ftsf383ae_slot0,
+ FIELD_ftsf384ae_slot0,
+ FIELD_ftsf386ae_slot0,
+ FIELD_ftsf387ae_slot0,
+ FIELD_ftsf388ae_slot0,
+ FIELD_ftsf389ae_slot0,
+ FIELD_ae_mul32x24fld,
+ FIELD_op0_s4_s4,
+ FIELD_combined2c0b5f72_fld28,
+ FIELD_combined2c0b5f72_fld37,
+ FIELD_combined2c0b5f72_fld39,
+ FIELD_combined2c0b5f72_fld40,
+ FIELD_combined2c0b5f72_fld46,
+ FIELD_combined2c0b5f72_fld47,
+ FIELD_combined2c0b5f72_fld49,
+ FIELD_combined2c0b5f72_fld50,
+ FIELD_combined2c0b5f72_fld52,
+ FIELD_combined2c0b5f72_fld121,
+ FIELD_combined2c0b5f72_fld123,
+ FIELD_combined2c0b5f72_fld127,
+ FIELD_combined2c0b5f72_fld133ae_slot0,
+ FIELD_combined2c0b5f72_fld134ae_slot0,
+ FIELD_combined2c0b5f72_fld135ae_slot0,
+ FIELD_combined2c0b5f72_fld136ae_slot0,
+ FIELD_combined2c0b5f72_fld137ae_slot0,
+ FIELD_combined2c0b5f72_fld138ae_slot0,
+ FIELD_combined2c0b5f72_fld139ae_slot0,
+ FIELD_combined2c0b5f72_fld140ae_slot0,
+ FIELD_combined2c0b5f72_fld141ae_slot0,
+ FIELD_combined2c0b5f72_fld142ae_slot0,
+ FIELD_combined2c0b5f72_fld143ae_slot0,
+ FIELD_combined2c0b5f72_fld144ae_slot0,
+ FIELD_combined2c0b5f72_fld145ae_slot0,
+ FIELD_combined2c0b5f72_fld146ae_slot0,
+ FIELD_combined2c0b5f72_fld148ae_slot0,
+ FIELD_combined2c0b5f72_fld149ae_slot0,
+ FIELD_op0_s4_s4_s4,
+ FIELD_combined1e9fefee_fld96,
+ FIELD_combined1e9fefee_fld98,
+ FIELD_combined1e9fefee_fld106ae_slot0,
+ FIELD_combined1e9fefee_fld107ae_slot0,
+ FIELD_combined1e9fefee_fld108ae_slot0,
+ FIELD_combined1e9fefee_fld109ae_slot0,
+ FIELD_op0_s3_s3,
+ FIELD_combined2c0b5f72_fld19,
+ FIELD_combined2c0b5f72_fld22,
+ FIELD_combined2c0b5f72_fld24,
+ FIELD_combined2c0b5f72_fld65,
+ FIELD_combined2c0b5f72_fld66,
+ FIELD_combined2c0b5f72_fld68,
+ FIELD_combined2c0b5f72_fld69,
+ FIELD_combined2c0b5f72_fld74,
+ FIELD_combined2c0b5f72_fld79,
+ FIELD_combined2c0b5f72_fld88,
+ FIELD_combined2c0b5f72_fld90,
+ FIELD_combined2c0b5f72_fld91,
+ FIELD_combined2c0b5f72_fld131ae_slot1,
+ FIELD_combined2c0b5f72_fld132ae_slot1,
+ FIELD_combined2c0b5f72_fld147ae_slot1,
+ FIELD_bitindex,
+ FIELD_s3to1,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12,
+ FIELD__mr0,
+ FIELD__mr1,
+ FIELD__mr2,
+ FIELD__mr3,
+ FIELD__bt16,
+ FIELD__bs16,
+ FIELD__br16,
+ FIELD__brall
+};
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+ { "ae_add32", 1 },
+ { "ae_shift32x4", 1 },
+ { "ae_shift32x5", 1 },
+ { "ae_subshift", 1 }
+};
+
+enum xtensa_funcUnit_id {
+ FUNCUNIT_ae_add32,
+ FUNCUNIT_ae_shift32x4,
+ FUNCUNIT_ae_shift32x5,
+ FUNCUNIT_ae_subshift
+};
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR,
+ REGFILE_MR,
+ REGFILE_BR,
+ REGFILE_AE_PR,
+ REGFILE_AE_QR,
+ REGFILE_BR2,
+ REGFILE_BR4,
+ REGFILE_BR8,
+ REGFILE_BR16
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 },
+ { "MR", "m", REGFILE_MR, 32, 4 },
+ { "BR", "b", REGFILE_BR, 1, 16 },
+ { "AE_PR", "aep", REGFILE_AE_PR, 48, 8 },
+ { "AE_QR", "aeq", REGFILE_AE_QR, 56, 4 },
+ { "BR2", "b", REGFILE_BR, 2, 8 },
+ { "BR4", "b", REGFILE_BR, 4, 4 },
+ { "BR8", "b", REGFILE_BR, 8, 2 },
+ { "BR16", "b", REGFILE_BR, 16, 1 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "ERI_RD_Out", 14, 0, 0, 'o' },
+ { "ERI_RD_In", 32, 0, 1, 'i' },
+ { "ERI_RD_Rdy", 1, 0, 0, 'i' },
+ { "ERI_WR_Out", 46, 0, 2, 'o' },
+ { "ERI_WR_In", 1, 0, 3, 'i' },
+ { "IMPWIRE", 32, 0, 4, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_ERI_RD_Out,
+ INTERFACE_ERI_RD_In,
+ INTERFACE_ERI_RD_Rdy,
+ INTERFACE_ERI_WR_Out,
+ INTERFACE_ERI_WR_In,
+ INTERFACE_IMPWIRE
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+OperandSem_opnd_sem_MR_0_decode (uint32 *valp)
+{
+ *valp += 2;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_0_encode (uint32 *valp)
+{
+ int error;
+ error = ((*valp & ~0x3) != 0) || ((*valp & 0x2) == 0);
+ *valp = *valp & 1;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_out_0;
+ unsigned soffsetx4_in_0;
+ soffsetx4_in_0 = *valp & 0x3ffff;
+ soffsetx4_out_0 = 0x4 + ((((int) soffsetx4_in_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_encode (uint32 *valp)
+{
+ unsigned soffsetx4_in_0;
+ unsigned soffsetx4_out_0;
+ soffsetx4_out_0 = *valp;
+ soffsetx4_in_0 = ((soffsetx4_out_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = soffsetx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_out_0;
+ unsigned uimm12x8_in_0;
+ uimm12x8_in_0 = *valp & 0xfff;
+ uimm12x8_out_0 = uimm12x8_in_0 << 3;
+ *valp = uimm12x8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_encode (uint32 *valp)
+{
+ unsigned uimm12x8_in_0;
+ unsigned uimm12x8_out_0;
+ uimm12x8_out_0 = *valp;
+ uimm12x8_in_0 = ((uimm12x8_out_0 >> 3) & 0xfff);
+ *valp = uimm12x8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_out_0;
+ unsigned simm4_in_0;
+ simm4_in_0 = *valp & 0xf;
+ simm4_out_0 = ((int) simm4_in_0 << 28) >> 28;
+ *valp = simm4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_encode (uint32 *valp)
+{
+ unsigned simm4_in_0;
+ unsigned simm4_out_0;
+ simm4_out_0 = *valp;
+ simm4_in_0 = (simm4_out_0 & 0xf);
+ *valp = simm4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_1_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_3_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_out_0;
+ unsigned immrx4_in_0;
+ immrx4_in_0 = *valp & 0xf;
+ immrx4_out_0 = (((0xfffffff) << 4) | immrx4_in_0) << 2;
+ *valp = immrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_encode (uint32 *valp)
+{
+ unsigned immrx4_in_0;
+ unsigned immrx4_out_0;
+ immrx4_out_0 = *valp;
+ immrx4_in_0 = ((immrx4_out_0 >> 2) & 0xf);
+ *valp = immrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_out_0;
+ unsigned lsi4x4_in_0;
+ lsi4x4_in_0 = *valp & 0xf;
+ lsi4x4_out_0 = lsi4x4_in_0 << 2;
+ *valp = lsi4x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_encode (uint32 *valp)
+{
+ unsigned lsi4x4_in_0;
+ unsigned lsi4x4_out_0;
+ lsi4x4_out_0 = *valp;
+ lsi4x4_in_0 = ((lsi4x4_out_0 >> 2) & 0xf);
+ *valp = lsi4x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_out_0;
+ unsigned simm7_in_0;
+ simm7_in_0 = *valp & 0x7f;
+ simm7_out_0 = ((((-((((simm7_in_0 >> 6) & 1)) & (((simm7_in_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | simm7_in_0;
+ *valp = simm7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_encode (uint32 *valp)
+{
+ unsigned simm7_in_0;
+ unsigned simm7_out_0;
+ simm7_out_0 = *valp;
+ simm7_in_0 = (simm7_out_0 & 0x7f);
+ *valp = simm7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_out_0;
+ unsigned uimm6_in_0;
+ uimm6_in_0 = *valp & 0x3f;
+ uimm6_out_0 = 0x4 + (((0) << 6) | uimm6_in_0);
+ *valp = uimm6_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_encode (uint32 *valp)
+{
+ unsigned uimm6_in_0;
+ unsigned uimm6_out_0;
+ uimm6_out_0 = *valp;
+ uimm6_in_0 = (uimm6_out_0 - 0x4) & 0x3f;
+ *valp = uimm6_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_out_0;
+ unsigned ai4const_in_0;
+ ai4const_in_0 = *valp & 0xf;
+ ai4const_out_0 = CONST_TBL_ai4c_0[ai4const_in_0 & 0xf];
+ *valp = ai4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_encode (uint32 *valp)
+{
+ unsigned ai4const_in_0;
+ unsigned ai4const_out_0;
+ ai4const_out_0 = *valp;
+ switch (ai4const_out_0)
+ {
+ case 0xffffffff: ai4const_in_0 = 0; break;
+ case 0x1: ai4const_in_0 = 0x1; break;
+ case 0x2: ai4const_in_0 = 0x2; break;
+ case 0x3: ai4const_in_0 = 0x3; break;
+ case 0x4: ai4const_in_0 = 0x4; break;
+ case 0x5: ai4const_in_0 = 0x5; break;
+ case 0x6: ai4const_in_0 = 0x6; break;
+ case 0x7: ai4const_in_0 = 0x7; break;
+ case 0x8: ai4const_in_0 = 0x8; break;
+ case 0x9: ai4const_in_0 = 0x9; break;
+ case 0xa: ai4const_in_0 = 0xa; break;
+ case 0xb: ai4const_in_0 = 0xb; break;
+ case 0xc: ai4const_in_0 = 0xc; break;
+ case 0xd: ai4const_in_0 = 0xd; break;
+ case 0xe: ai4const_in_0 = 0xe; break;
+ default: ai4const_in_0 = 0xf; break;
+ }
+ *valp = ai4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_out_0;
+ unsigned b4const_in_0;
+ b4const_in_0 = *valp & 0xf;
+ b4const_out_0 = CONST_TBL_b4c_0[b4const_in_0 & 0xf];
+ *valp = b4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_encode (uint32 *valp)
+{
+ unsigned b4const_in_0;
+ unsigned b4const_out_0;
+ b4const_out_0 = *valp;
+ switch (b4const_out_0)
+ {
+ case 0xffffffff: b4const_in_0 = 0; break;
+ case 0x1: b4const_in_0 = 0x1; break;
+ case 0x2: b4const_in_0 = 0x2; break;
+ case 0x3: b4const_in_0 = 0x3; break;
+ case 0x4: b4const_in_0 = 0x4; break;
+ case 0x5: b4const_in_0 = 0x5; break;
+ case 0x6: b4const_in_0 = 0x6; break;
+ case 0x7: b4const_in_0 = 0x7; break;
+ case 0x8: b4const_in_0 = 0x8; break;
+ case 0xa: b4const_in_0 = 0x9; break;
+ case 0xc: b4const_in_0 = 0xa; break;
+ case 0x10: b4const_in_0 = 0xb; break;
+ case 0x20: b4const_in_0 = 0xc; break;
+ case 0x40: b4const_in_0 = 0xd; break;
+ case 0x80: b4const_in_0 = 0xe; break;
+ default: b4const_in_0 = 0xf; break;
+ }
+ *valp = b4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_out_0;
+ unsigned b4constu_in_0;
+ b4constu_in_0 = *valp & 0xf;
+ b4constu_out_0 = CONST_TBL_b4cu_0[b4constu_in_0 & 0xf];
+ *valp = b4constu_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_encode (uint32 *valp)
+{
+ unsigned b4constu_in_0;
+ unsigned b4constu_out_0;
+ b4constu_out_0 = *valp;
+ switch (b4constu_out_0)
+ {
+ case 0x8000: b4constu_in_0 = 0; break;
+ case 0x10000: b4constu_in_0 = 0x1; break;
+ case 0x2: b4constu_in_0 = 0x2; break;
+ case 0x3: b4constu_in_0 = 0x3; break;
+ case 0x4: b4constu_in_0 = 0x4; break;
+ case 0x5: b4constu_in_0 = 0x5; break;
+ case 0x6: b4constu_in_0 = 0x6; break;
+ case 0x7: b4constu_in_0 = 0x7; break;
+ case 0x8: b4constu_in_0 = 0x8; break;
+ case 0xa: b4constu_in_0 = 0x9; break;
+ case 0xc: b4constu_in_0 = 0xa; break;
+ case 0x10: b4constu_in_0 = 0xb; break;
+ case 0x20: b4constu_in_0 = 0xc; break;
+ case 0x40: b4constu_in_0 = 0xd; break;
+ case 0x80: b4constu_in_0 = 0xe; break;
+ default: b4constu_in_0 = 0xf; break;
+ }
+ *valp = b4constu_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_out_0;
+ unsigned uimm8_in_0;
+ uimm8_in_0 = *valp & 0xff;
+ uimm8_out_0 = uimm8_in_0;
+ *valp = uimm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_encode (uint32 *valp)
+{
+ unsigned uimm8_in_0;
+ unsigned uimm8_out_0;
+ uimm8_out_0 = *valp;
+ uimm8_in_0 = (uimm8_out_0 & 0xff);
+ *valp = uimm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_out_0;
+ unsigned uimm8x2_in_0;
+ uimm8x2_in_0 = *valp & 0xff;
+ uimm8x2_out_0 = uimm8x2_in_0 << 1;
+ *valp = uimm8x2_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_encode (uint32 *valp)
+{
+ unsigned uimm8x2_in_0;
+ unsigned uimm8x2_out_0;
+ uimm8x2_out_0 = *valp;
+ uimm8x2_in_0 = ((uimm8x2_out_0 >> 1) & 0xff);
+ *valp = uimm8x2_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_out_0;
+ unsigned uimm8x4_in_0;
+ uimm8x4_in_0 = *valp & 0xff;
+ uimm8x4_out_0 = uimm8x4_in_0 << 2;
+ *valp = uimm8x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_encode (uint32 *valp)
+{
+ unsigned uimm8x4_in_0;
+ unsigned uimm8x4_out_0;
+ uimm8x4_out_0 = *valp;
+ uimm8x4_in_0 = ((uimm8x4_out_0 >> 2) & 0xff);
+ *valp = uimm8x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_out_0;
+ unsigned uimm4x16_in_0;
+ uimm4x16_in_0 = *valp & 0xf;
+ uimm4x16_out_0 = uimm4x16_in_0 << 4;
+ *valp = uimm4x16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_encode (uint32 *valp)
+{
+ unsigned uimm4x16_in_0;
+ unsigned uimm4x16_out_0;
+ uimm4x16_out_0 = *valp;
+ uimm4x16_in_0 = ((uimm4x16_out_0 >> 4) & 0xf);
+ *valp = uimm4x16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_out_0;
+ unsigned simm8_in_0;
+ simm8_in_0 = *valp & 0xff;
+ simm8_out_0 = ((int) simm8_in_0 << 24) >> 24;
+ *valp = simm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_encode (uint32 *valp)
+{
+ unsigned simm8_in_0;
+ unsigned simm8_out_0;
+ simm8_out_0 = *valp;
+ simm8_in_0 = (simm8_out_0 & 0xff);
+ *valp = simm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_out_0;
+ unsigned simm8x256_in_0;
+ simm8x256_in_0 = *valp & 0xff;
+ simm8x256_out_0 = (((int) simm8x256_in_0 << 24) >> 24) << 8;
+ *valp = simm8x256_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_encode (uint32 *valp)
+{
+ unsigned simm8x256_in_0;
+ unsigned simm8x256_out_0;
+ simm8x256_out_0 = *valp;
+ simm8x256_in_0 = ((simm8x256_out_0 >> 8) & 0xff);
+ *valp = simm8x256_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_out_0;
+ unsigned simm12b_in_0;
+ simm12b_in_0 = *valp & 0xfff;
+ simm12b_out_0 = ((int) simm12b_in_0 << 20) >> 20;
+ *valp = simm12b_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_encode (uint32 *valp)
+{
+ unsigned simm12b_in_0;
+ unsigned simm12b_out_0;
+ simm12b_out_0 = *valp;
+ simm12b_in_0 = (simm12b_out_0 & 0xfff);
+ *valp = simm12b_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_out_0;
+ unsigned msalp32_in_0;
+ msalp32_in_0 = *valp & 0x1f;
+ msalp32_out_0 = 0x20 - msalp32_in_0;
+ *valp = msalp32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_encode (uint32 *valp)
+{
+ unsigned msalp32_in_0;
+ unsigned msalp32_out_0;
+ msalp32_out_0 = *valp;
+ msalp32_in_0 = (0x20 - msalp32_out_0) & 0x1f;
+ *valp = msalp32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_out_0;
+ unsigned op2p1_in_0;
+ op2p1_in_0 = *valp & 0xf;
+ op2p1_out_0 = op2p1_in_0 + 0x1;
+ *valp = op2p1_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_encode (uint32 *valp)
+{
+ unsigned op2p1_in_0;
+ unsigned op2p1_out_0;
+ op2p1_out_0 = *valp;
+ op2p1_in_0 = (op2p1_out_0 - 0x1) & 0xf;
+ *valp = op2p1_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_decode (uint32 *valp)
+{
+ unsigned label8_out_0;
+ unsigned label8_in_0;
+ label8_in_0 = *valp & 0xff;
+ label8_out_0 = 0x4 + (((int) label8_in_0 << 24) >> 24);
+ *valp = label8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_encode (uint32 *valp)
+{
+ unsigned label8_in_0;
+ unsigned label8_out_0;
+ label8_out_0 = *valp;
+ label8_in_0 = (label8_out_0 - 0x4) & 0xff;
+ *valp = label8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_out_0;
+ unsigned ulabel8_in_0;
+ ulabel8_in_0 = *valp & 0xff;
+ ulabel8_out_0 = 0x4 + (((0) << 8) | ulabel8_in_0);
+ *valp = ulabel8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_encode (uint32 *valp)
+{
+ unsigned ulabel8_in_0;
+ unsigned ulabel8_out_0;
+ ulabel8_out_0 = *valp;
+ ulabel8_in_0 = (ulabel8_out_0 - 0x4) & 0xff;
+ *valp = ulabel8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_decode (uint32 *valp)
+{
+ unsigned label12_out_0;
+ unsigned label12_in_0;
+ label12_in_0 = *valp & 0xfff;
+ label12_out_0 = 0x4 + (((int) label12_in_0 << 20) >> 20);
+ *valp = label12_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_encode (uint32 *valp)
+{
+ unsigned label12_in_0;
+ unsigned label12_out_0;
+ label12_out_0 = *valp;
+ label12_in_0 = (label12_out_0 - 0x4) & 0xfff;
+ *valp = label12_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_out_0;
+ unsigned soffset_in_0;
+ soffset_in_0 = *valp & 0x3ffff;
+ soffset_out_0 = 0x4 + (((int) soffset_in_0 << 14) >> 14);
+ *valp = soffset_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_encode (uint32 *valp)
+{
+ unsigned soffset_in_0;
+ unsigned soffset_out_0;
+ soffset_out_0 = *valp;
+ soffset_in_0 = (soffset_out_0 - 0x4) & 0x3ffff;
+ *valp = soffset_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_out_0;
+ unsigned uimm16x4_in_0;
+ uimm16x4_in_0 = *valp & 0xffff;
+ uimm16x4_out_0 = (((0xffff) << 16) | uimm16x4_in_0) << 2;
+ *valp = uimm16x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_encode (uint32 *valp)
+{
+ unsigned uimm16x4_in_0;
+ unsigned uimm16x4_out_0;
+ uimm16x4_out_0 = *valp;
+ uimm16x4_in_0 = (uimm16x4_out_0 >> 2) & 0xffff;
+ *valp = uimm16x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_decode (uint32 *valp)
+{
+ unsigned bbi_out_0;
+ unsigned bbi_in_0;
+ bbi_in_0 = *valp & 0x1f;
+ bbi_out_0 = (0 << 5) | bbi_in_0;
+ *valp = bbi_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_encode (uint32 *valp)
+{
+ unsigned bbi_in_0;
+ unsigned bbi_out_0;
+ bbi_out_0 = *valp;
+ bbi_in_0 = (bbi_out_0 & 0x1f);
+ *valp = bbi_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_decode (uint32 *valp)
+{
+ unsigned s_out_0;
+ unsigned s_in_0;
+ s_in_0 = *valp & 0xf;
+ s_out_0 = (0 << 4) | s_in_0;
+ *valp = s_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_encode (uint32 *valp)
+{
+ unsigned s_in_0;
+ unsigned s_out_0;
+ s_out_0 = *valp;
+ s_in_0 = (s_out_0 & 0xf);
+ *valp = s_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_1_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_3_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_MR_5_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_MR_5_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_immt_decode (uint32 *valp)
+{
+ unsigned immt_out_0;
+ unsigned immt_in_0;
+ immt_in_0 = *valp & 0xf;
+ immt_out_0 = immt_in_0;
+ *valp = immt_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_encode (uint32 *valp)
+{
+ unsigned immt_in_0;
+ unsigned immt_out_0;
+ immt_out_0 = *valp;
+ immt_in_0 = immt_out_0 & 0xf;
+ *valp = immt_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR2_decode (uint32 *valp)
+{
+ *valp = *valp << 1;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 1) != 0);
+ *valp = *valp >> 1;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR4_decode (uint32 *valp)
+{
+ *valp = *valp << 2;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 3) != 0);
+ *valp = *valp >> 2;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR8_decode (uint32 *valp)
+{
+ *valp = *valp << 3;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 7) != 0);
+ *valp = *valp >> 3;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR16_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR16_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 15) != 0);
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_out_0;
+ unsigned tp7_in_0;
+ tp7_in_0 = *valp & 0xf;
+ tp7_out_0 = tp7_in_0 + 0x7;
+ *valp = tp7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_encode (uint32 *valp)
+{
+ unsigned tp7_in_0;
+ unsigned tp7_out_0;
+ tp7_out_0 = *valp;
+ tp7_in_0 = (tp7_out_0 - 0x7) & 0xf;
+ *valp = tp7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_out_0;
+ unsigned xt_wbr15_label_in_0;
+ xt_wbr15_label_in_0 = *valp & 0x7fff;
+ xt_wbr15_label_out_0 = 0x4 + (((int) xt_wbr15_label_in_0 << 17) >> 17);
+ *valp = xt_wbr15_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_in_0;
+ unsigned xt_wbr15_label_out_0;
+ xt_wbr15_label_out_0 = *valp;
+ xt_wbr15_label_in_0 = (xt_wbr15_label_out_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt32_decode (uint32 *valp)
+{
+ unsigned ae_samt32_out_0;
+ unsigned ae_samt32_in_0;
+ ae_samt32_in_0 = *valp & 0x1f;
+ ae_samt32_out_0 = (0 << 5) | ae_samt32_in_0;
+ *valp = ae_samt32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt32_encode (uint32 *valp)
+{
+ unsigned ae_samt32_in_0;
+ unsigned ae_samt32_out_0;
+ ae_samt32_out_0 = *valp;
+ ae_samt32_in_0 = (ae_samt32_out_0 & 0x1f);
+ *valp = ae_samt32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AE_PR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AE_PR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 8);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AE_QR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AE_QR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm16_decode (uint32 *valp)
+{
+ unsigned ae_lsimm16_out_0;
+ unsigned ae_lsimm16_in_0;
+ ae_lsimm16_in_0 = *valp & 0xf;
+ ae_lsimm16_out_0 = (((int) ae_lsimm16_in_0 << 28) >> 28) << 1;
+ *valp = ae_lsimm16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm16_encode (uint32 *valp)
+{
+ unsigned ae_lsimm16_in_0;
+ unsigned ae_lsimm16_out_0;
+ ae_lsimm16_out_0 = *valp;
+ ae_lsimm16_in_0 = ((ae_lsimm16_out_0 >> 1) & 0xf);
+ *valp = ae_lsimm16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm32_decode (uint32 *valp)
+{
+ unsigned ae_lsimm32_out_0;
+ unsigned ae_lsimm32_in_0;
+ ae_lsimm32_in_0 = *valp & 0xf;
+ ae_lsimm32_out_0 = (((int) ae_lsimm32_in_0 << 28) >> 28) << 2;
+ *valp = ae_lsimm32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm32_encode (uint32 *valp)
+{
+ unsigned ae_lsimm32_in_0;
+ unsigned ae_lsimm32_out_0;
+ ae_lsimm32_out_0 = *valp;
+ ae_lsimm32_in_0 = ((ae_lsimm32_out_0 >> 2) & 0xf);
+ *valp = ae_lsimm32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm64_decode (uint32 *valp)
+{
+ unsigned ae_lsimm64_out_0;
+ unsigned ae_lsimm64_in_0;
+ ae_lsimm64_in_0 = *valp & 0xf;
+ ae_lsimm64_out_0 = (((int) ae_lsimm64_in_0 << 28) >> 28) << 3;
+ *valp = ae_lsimm64_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm64_encode (uint32 *valp)
+{
+ unsigned ae_lsimm64_in_0;
+ unsigned ae_lsimm64_out_0;
+ ae_lsimm64_out_0 = *valp;
+ ae_lsimm64_in_0 = ((ae_lsimm64_out_0 >> 3) & 0xf);
+ *valp = ae_lsimm64_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt64_decode (uint32 *valp)
+{
+ unsigned ae_samt64_out_0;
+ unsigned ae_samt64_in_0;
+ ae_samt64_in_0 = *valp & 0x3f;
+ ae_samt64_out_0 = (0 << 6) | ae_samt64_in_0;
+ *valp = ae_samt64_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt64_encode (uint32 *valp)
+{
+ unsigned ae_samt64_in_0;
+ unsigned ae_samt64_out_0;
+ ae_samt64_out_0 = *valp;
+ ae_samt64_in_0 = (ae_samt64_out_0 & 0x3f);
+ *valp = ae_samt64_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_ohba_decode (uint32 *valp)
+{
+ unsigned ae_ohba_out_0;
+ unsigned ae_ohba_in_0;
+ ae_ohba_in_0 = *valp & 0xf;
+ ae_ohba_out_0 = (0 << 5) | (((((ae_ohba_in_0 & 0xf))) == 0) << 4) | ((ae_ohba_in_0 & 0xf));
+ *valp = ae_ohba_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_ohba_encode (uint32 *valp)
+{
+ unsigned ae_ohba_in_0;
+ unsigned ae_ohba_out_0;
+ ae_ohba_out_0 = *valp;
+ ae_ohba_in_0 = (ae_ohba_out_0 & 0xf);
+ *valp = ae_ohba_in_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffsetx4_encode, OperandSem_opnd_sem_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm12x8_encode, OperandSem_opnd_sem_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm4_encode, OperandSem_opnd_sem_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_0_encode, OperandSem_opnd_sem_AR_0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_1_encode, OperandSem_opnd_sem_AR_1_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_2_encode, OperandSem_opnd_sem_AR_2_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_3_encode, OperandSem_opnd_sem_AR_3_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_4_encode, OperandSem_opnd_sem_AR_4_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immrx4_encode, OperandSem_opnd_sem_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm7_encode, OperandSem_opnd_sem_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm6_encode, OperandSem_opnd_sem_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ai4const_encode, OperandSem_opnd_sem_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4const_encode, OperandSem_opnd_sem_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4constu_encode, OperandSem_opnd_sem_b4constu_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8_encode, OperandSem_opnd_sem_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x2_encode, OperandSem_opnd_sem_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x4_encode, OperandSem_opnd_sem_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm4x16_encode, OperandSem_opnd_sem_uimm4x16_decode,
+ 0, 0 },
+ { "uimmrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8_encode, OperandSem_opnd_sem_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8x256_encode, OperandSem_opnd_sem_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm12b_encode, OperandSem_opnd_sem_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ OperandSem_opnd_sem_msalp32_encode, OperandSem_opnd_sem_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_op2p1_encode, OperandSem_opnd_sem_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label8_encode, OperandSem_opnd_sem_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_ulabel8_encode, OperandSem_opnd_sem_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label12_encode, OperandSem_opnd_sem_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm16x4_encode, OperandSem_opnd_sem_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "bbi", FIELD_bbi, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sae", FIELD_sae, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sas", FIELD_sas, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "s", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_s_encode, OperandSem_opnd_sem_s_decode,
+ 0, 0 },
+ { "mx", FIELD_x, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ OperandSem_opnd_sem_MR_encode, OperandSem_opnd_sem_MR_decode,
+ 0, 0 },
+ { "my", FIELD_y, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_UNKNOWN,
+ OperandSem_opnd_sem_MR_0_encode, OperandSem_opnd_sem_MR_0_decode,
+ 0, 0 },
+ { "mw", FIELD_w, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_MR_1_encode, OperandSem_opnd_sem_MR_1_decode,
+ 0, 0 },
+ { "mr0", FIELD__mr0, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_2_encode, OperandSem_opnd_sem_MR_2_decode,
+ 0, 0 },
+ { "mr1", FIELD__mr1, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_3_encode, OperandSem_opnd_sem_MR_3_decode,
+ 0, 0 },
+ { "mr2", FIELD__mr2, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_4_encode, OperandSem_opnd_sem_MR_4_decode,
+ 0, 0 },
+ { "mr3", FIELD__mr3, REGFILE_MR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_MR_5_encode, OperandSem_opnd_sem_MR_5_decode,
+ 0, 0 },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "bt", FIELD_t, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "bs", FIELD_s, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "br", FIELD_r, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "bt2", FIELD_t2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "bs2", FIELD_s2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "br2", FIELD_r2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "bt4", FIELD_t4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "bs4", FIELD_s4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "br4", FIELD_r4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "bt8", FIELD_t8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "bs8", FIELD_s8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "br8", FIELD_r8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "bt16", FIELD__bt16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "bs16", FIELD__bs16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "br16", FIELD__br16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "brall", FIELD__brall, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_tp7_encode, OperandSem_opnd_sem_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr15_label_encode, OperandSem_opnd_sem_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "ae_samt32", FIELD_ftsf13, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_samt32_encode, OperandSem_opnd_sem_ae_samt32_decode,
+ 0, 0 },
+ { "pr0", FIELD_ftsf11, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "qr0", FIELD_ftsf12, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "mac_qr0", FIELD_ftsf12, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "ae_lsimm16", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_lsimm16_encode, OperandSem_opnd_sem_ae_lsimm16_decode,
+ 0, 0 },
+ { "ae_lsimm32", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_lsimm32_encode, OperandSem_opnd_sem_ae_lsimm32_decode,
+ 0, 0 },
+ { "ae_lsimm64", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_lsimm64_encode, OperandSem_opnd_sem_ae_lsimm64_decode,
+ 0, 0 },
+ { "ae_samt64", FIELD_ae_samt_s_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_samt64_encode, OperandSem_opnd_sem_ae_samt64_decode,
+ 0, 0 },
+ { "ae_ohba", FIELD_ae_fld_ohba, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_ohba_encode, OperandSem_opnd_sem_ae_ohba_decode,
+ 0, 0 },
+ { "ae_ohba2", FIELD_ae_fld_ohba2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_ohba_encode, OperandSem_opnd_sem_ae_ohba_decode,
+ 0, 0 },
+ { "pr", FIELD_ae_r20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "cvt_pr", FIELD_ae_r20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "qr0_rw", FIELD_ae_r10, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "mac_qr0_rw", FIELD_ae_r10, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "qr1_w", FIELD_ae_r32, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "mac_qr1_w", FIELD_ae_r32, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "ps", FIELD_ae_s20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "alupppb_ps", FIELD_ae_s20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "bitindex", FIELD_bitindex, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "r3", FIELD_r3, -1, 0, 0, 0, 0, 0, 0 },
+ { "rbit2", FIELD_rbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "rhi", FIELD_rhi, -1, 0, 0, 0, 0, 0, 0 },
+ { "t3", FIELD_t3, -1, 0, 0, 0, 0, 0, 0 },
+ { "tbit2", FIELD_tbit2, -1, 0, 0, 0, 0, 0, 0 },
+ { "tlo", FIELD_tlo, -1, 0, 0, 0, 0, 0, 0 },
+ { "w", FIELD_w, -1, 0, 0, 0, 0, 0, 0 },
+ { "y", FIELD_y, -1, 0, 0, 0, 0, 0, 0 },
+ { "x", FIELD_x, -1, 0, 0, 0, 0, 0, 0 },
+ { "t2", FIELD_t2, -1, 0, 0, 0, 0, 0, 0 },
+ { "s2", FIELD_s2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r2", FIELD_r2, -1, 0, 0, 0, 0, 0, 0 },
+ { "t4", FIELD_t4, -1, 0, 0, 0, 0, 0, 0 },
+ { "s4", FIELD_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "r4", FIELD_r4, -1, 0, 0, 0, 0, 0, 0 },
+ { "t8", FIELD_t8, -1, 0, 0, 0, 0, 0, 0 },
+ { "s8", FIELD_s8, -1, 0, 0, 0, 0, 0, 0 },
+ { "r8", FIELD_r8, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r3", FIELD_ae_r3, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_s_non_samt", FIELD_ae_s_non_samt, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_s3", FIELD_ae_s3, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r32", FIELD_ae_r32, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_samt_s_t", FIELD_ae_samt_s_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r20", FIELD_ae_r20, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r10", FIELD_ae_r10, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_s20", FIELD_ae_s20, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_fld_ohba", FIELD_ae_fld_ohba, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_fld_ohba2", FIELD_ae_fld_ohba2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s3", FIELD_op0_s3, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf11", FIELD_ftsf11, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf12", FIELD_ftsf12, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf13", FIELD_ftsf13, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf20ae_slot1", FIELD_ftsf20ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf21ae_slot1", FIELD_ftsf21ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf22ae_slot1", FIELD_ftsf22ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf23ae_slot1", FIELD_ftsf23ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf24ae_slot1", FIELD_ftsf24ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf25ae_slot1", FIELD_ftsf25ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf26ae_slot1", FIELD_ftsf26ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf27ae_slot1", FIELD_ftsf27ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf28ae_slot1", FIELD_ftsf28ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf29ae_slot1", FIELD_ftsf29ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf30ae_slot1", FIELD_ftsf30ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf31ae_slot1", FIELD_ftsf31ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf32ae_slot1", FIELD_ftsf32ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf33ae_slot1", FIELD_ftsf33ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf34ae_slot1", FIELD_ftsf34ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf35ae_slot1", FIELD_ftsf35ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf36ae_slot1", FIELD_ftsf36ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf37ae_slot1", FIELD_ftsf37ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf38ae_slot1", FIELD_ftsf38ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf40ae_slot1", FIELD_ftsf40ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf41ae_slot1", FIELD_ftsf41ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf42ae_slot1", FIELD_ftsf42ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf43ae_slot1", FIELD_ftsf43ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf45ae_slot1", FIELD_ftsf45ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf47ae_slot1", FIELD_ftsf47ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf48ae_slot1", FIELD_ftsf48ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf49ae_slot1", FIELD_ftsf49ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf50ae_slot1", FIELD_ftsf50ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf51ae_slot1", FIELD_ftsf51ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf52ae_slot1", FIELD_ftsf52ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf53ae_slot1", FIELD_ftsf53ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf54ae_slot1", FIELD_ftsf54ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf55", FIELD_ftsf55, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf56ae_slot1", FIELD_ftsf56ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf57ae_slot1", FIELD_ftsf57ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf58ae_slot1", FIELD_ftsf58ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf60ae_slot1", FIELD_ftsf60ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf61", FIELD_ftsf61, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf62ae_slot1", FIELD_ftsf62ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf63ae_slot1", FIELD_ftsf63ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf64ae_slot1", FIELD_ftsf64ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf66ae_slot1", FIELD_ftsf66ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf68ae_slot1", FIELD_ftsf68ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf69ae_slot1", FIELD_ftsf69ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf70ae_slot1", FIELD_ftsf70ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf71ae_slot1", FIELD_ftsf71ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf72ae_slot1", FIELD_ftsf72ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf73ae_slot1", FIELD_ftsf73ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf74ae_slot1", FIELD_ftsf74ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf75ae_slot1", FIELD_ftsf75ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf76ae_slot1", FIELD_ftsf76ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf77ae_slot1", FIELD_ftsf77ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf78ae_slot1", FIELD_ftsf78ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf79ae_slot1", FIELD_ftsf79ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf80ae_slot1", FIELD_ftsf80ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf81ae_slot1", FIELD_ftsf81ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf82ae_slot1", FIELD_ftsf82ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf83ae_slot1", FIELD_ftsf83ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf84ae_slot1", FIELD_ftsf84ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf85ae_slot1", FIELD_ftsf85ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf86ae_slot1", FIELD_ftsf86ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf87ae_slot1", FIELD_ftsf87ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf88ae_slot1", FIELD_ftsf88ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf89ae_slot1", FIELD_ftsf89ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf90ae_slot1", FIELD_ftsf90ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf91", FIELD_ftsf91, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf92ae_slot1", FIELD_ftsf92ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf93ae_slot1", FIELD_ftsf93ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf94ae_slot1", FIELD_ftsf94ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf96ae_slot1", FIELD_ftsf96ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf97ae_slot1", FIELD_ftsf97ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf99ae_slot1", FIELD_ftsf99ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf101ae_slot1", FIELD_ftsf101ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf102ae_slot1", FIELD_ftsf102ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf103ae_slot1", FIELD_ftsf103ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf106ae_slot1", FIELD_ftsf106ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf107ae_slot1", FIELD_ftsf107ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf108ae_slot1", FIELD_ftsf108ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf109ae_slot1", FIELD_ftsf109ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf110ae_slot1", FIELD_ftsf110ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf111ae_slot1", FIELD_ftsf111ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf112ae_slot1", FIELD_ftsf112ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf113ae_slot1", FIELD_ftsf113ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf114ae_slot1", FIELD_ftsf114ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf115ae_slot1", FIELD_ftsf115ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf116ae_slot1", FIELD_ftsf116ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf117ae_slot1", FIELD_ftsf117ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf118ae_slot1", FIELD_ftsf118ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf120ae_slot1", FIELD_ftsf120ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf121ae_slot1", FIELD_ftsf121ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf123ae_slot1", FIELD_ftsf123ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf124ae_slot1", FIELD_ftsf124ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf125ae_slot1", FIELD_ftsf125ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf126ae_slot1", FIELD_ftsf126ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf127ae_slot1", FIELD_ftsf127ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf128ae_slot1", FIELD_ftsf128ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf129ae_slot1", FIELD_ftsf129ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf130ae_slot1", FIELD_ftsf130ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf131ae_slot1", FIELD_ftsf131ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf132ae_slot1", FIELD_ftsf132ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf133ae_slot1", FIELD_ftsf133ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf134ae_slot1", FIELD_ftsf134ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf135ae_slot1", FIELD_ftsf135ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf136ae_slot1", FIELD_ftsf136ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf137ae_slot1", FIELD_ftsf137ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf138ae_slot1", FIELD_ftsf138ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf139ae_slot1", FIELD_ftsf139ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf140ae_slot1", FIELD_ftsf140ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf141ae_slot1", FIELD_ftsf141ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf142ae_slot1", FIELD_ftsf142ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf143ae_slot1", FIELD_ftsf143ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf144ae_slot1", FIELD_ftsf144ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf145ae_slot1", FIELD_ftsf145ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf146ae_slot1", FIELD_ftsf146ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf147ae_slot1", FIELD_ftsf147ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf148ae_slot1", FIELD_ftsf148ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf149ae_slot1", FIELD_ftsf149ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf150ae_slot1", FIELD_ftsf150ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf151ae_slot1", FIELD_ftsf151ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf152ae_slot1", FIELD_ftsf152ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf153ae_slot1", FIELD_ftsf153ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf154ae_slot1", FIELD_ftsf154ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf155ae_slot1", FIELD_ftsf155ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf156ae_slot1", FIELD_ftsf156ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf157ae_slot1", FIELD_ftsf157ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf158ae_slot1", FIELD_ftsf158ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf159ae_slot1", FIELD_ftsf159ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf160ae_slot1", FIELD_ftsf160ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf161ae_slot1", FIELD_ftsf161ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf162ae_slot1", FIELD_ftsf162ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf163ae_slot1", FIELD_ftsf163ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf164ae_slot1", FIELD_ftsf164ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf165ae_slot1", FIELD_ftsf165ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf166ae_slot1", FIELD_ftsf166ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf167ae_slot1", FIELD_ftsf167ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf168ae_slot1", FIELD_ftsf168ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf169ae_slot1", FIELD_ftsf169ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf170ae_slot1", FIELD_ftsf170ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf171ae_slot1", FIELD_ftsf171ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf172ae_slot1", FIELD_ftsf172ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf173ae_slot1", FIELD_ftsf173ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf174ae_slot1", FIELD_ftsf174ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf175ae_slot1", FIELD_ftsf175ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf176ae_slot1", FIELD_ftsf176ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf177ae_slot1", FIELD_ftsf177ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf178ae_slot1", FIELD_ftsf178ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf179ae_slot1", FIELD_ftsf179ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf180ae_slot1", FIELD_ftsf180ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf181ae_slot1", FIELD_ftsf181ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf182ae_slot1", FIELD_ftsf182ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf183ae_slot1", FIELD_ftsf183ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf184ae_slot1", FIELD_ftsf184ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf185ae_slot1", FIELD_ftsf185ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf186ae_slot1", FIELD_ftsf186ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf187ae_slot1", FIELD_ftsf187ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf188ae_slot1", FIELD_ftsf188ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf189ae_slot1", FIELD_ftsf189ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf190ae_slot1", FIELD_ftsf190ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf191ae_slot1", FIELD_ftsf191ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf192ae_slot1", FIELD_ftsf192ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf193ae_slot1", FIELD_ftsf193ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf194ae_slot1", FIELD_ftsf194ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf195ae_slot1", FIELD_ftsf195ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf196ae_slot1", FIELD_ftsf196ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf197ae_slot1", FIELD_ftsf197ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf198ae_slot1", FIELD_ftsf198ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf199ae_slot1", FIELD_ftsf199ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf200ae_slot1", FIELD_ftsf200ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf201ae_slot1", FIELD_ftsf201ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf202ae_slot1", FIELD_ftsf202ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf203ae_slot1", FIELD_ftsf203ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf204ae_slot1", FIELD_ftsf204ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf205ae_slot1", FIELD_ftsf205ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf206ae_slot1", FIELD_ftsf206ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf207ae_slot1", FIELD_ftsf207ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf208ae_slot1", FIELD_ftsf208ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf210ae_slot1", FIELD_ftsf210ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf333ae_slot1", FIELD_ftsf333ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf334ae_slot1", FIELD_ftsf334ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf335", FIELD_ftsf335, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf336ae_slot1", FIELD_ftsf336ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf337ae_slot1", FIELD_ftsf337ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf339ae_slot1", FIELD_ftsf339ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf340ae_slot1", FIELD_ftsf340ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf341ae_slot1", FIELD_ftsf341ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf342ae_slot1", FIELD_ftsf342ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf343ae_slot1", FIELD_ftsf343ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf344ae_slot1", FIELD_ftsf344ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf345ae_slot1", FIELD_ftsf345ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf347ae_slot1", FIELD_ftsf347ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf348ae_slot1", FIELD_ftsf348ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf349ae_slot1", FIELD_ftsf349ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf350ae_slot1", FIELD_ftsf350ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf351", FIELD_ftsf351, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf352ae_slot1", FIELD_ftsf352ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf354ae_slot1", FIELD_ftsf354ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf355ae_slot1", FIELD_ftsf355ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf356ae_slot1", FIELD_ftsf356ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf357ae_slot1", FIELD_ftsf357ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf358ae_slot1", FIELD_ftsf358ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf359ae_slot1", FIELD_ftsf359ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s4", FIELD_op0_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf211ae_slot0", FIELD_ftsf211ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf212ae_slot0", FIELD_ftsf212ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf213ae_slot0", FIELD_ftsf213ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf214ae_slot0", FIELD_ftsf214ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf215ae_slot0", FIELD_ftsf215ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf217ae_slot0", FIELD_ftsf217ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf218ae_slot0", FIELD_ftsf218ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf219ae_slot0", FIELD_ftsf219ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf220ae_slot0", FIELD_ftsf220ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf221ae_slot0", FIELD_ftsf221ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf222ae_slot0", FIELD_ftsf222ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf223ae_slot0", FIELD_ftsf223ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf224ae_slot0", FIELD_ftsf224ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf225ae_slot0", FIELD_ftsf225ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf226ae_slot0", FIELD_ftsf226ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf227ae_slot0", FIELD_ftsf227ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf228ae_slot0", FIELD_ftsf228ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf229ae_slot0", FIELD_ftsf229ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf230ae_slot0", FIELD_ftsf230ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf231ae_slot0", FIELD_ftsf231ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf232ae_slot0", FIELD_ftsf232ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf233ae_slot0", FIELD_ftsf233ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf234ae_slot0", FIELD_ftsf234ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf235ae_slot0", FIELD_ftsf235ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf236ae_slot0", FIELD_ftsf236ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf237ae_slot0", FIELD_ftsf237ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf238ae_slot0", FIELD_ftsf238ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf239ae_slot0", FIELD_ftsf239ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf240ae_slot0", FIELD_ftsf240ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf241ae_slot0", FIELD_ftsf241ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf242ae_slot0", FIELD_ftsf242ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf243ae_slot0", FIELD_ftsf243ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf244ae_slot0", FIELD_ftsf244ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf245ae_slot0", FIELD_ftsf245ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf246ae_slot0", FIELD_ftsf246ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf247ae_slot0", FIELD_ftsf247ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf248ae_slot0", FIELD_ftsf248ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf249ae_slot0", FIELD_ftsf249ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf250ae_slot0", FIELD_ftsf250ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf251ae_slot0", FIELD_ftsf251ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf252ae_slot0", FIELD_ftsf252ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf253ae_slot0", FIELD_ftsf253ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf254ae_slot0", FIELD_ftsf254ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf255ae_slot0", FIELD_ftsf255ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf256ae_slot0", FIELD_ftsf256ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf257ae_slot0", FIELD_ftsf257ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf258ae_slot0", FIELD_ftsf258ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf259ae_slot0", FIELD_ftsf259ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf260ae_slot0", FIELD_ftsf260ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf261ae_slot0", FIELD_ftsf261ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf262ae_slot0", FIELD_ftsf262ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf263ae_slot0", FIELD_ftsf263ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf264ae_slot0", FIELD_ftsf264ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf265ae_slot0", FIELD_ftsf265ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf266ae_slot0", FIELD_ftsf266ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf267ae_slot0", FIELD_ftsf267ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf268ae_slot0", FIELD_ftsf268ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf269ae_slot0", FIELD_ftsf269ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf270ae_slot0", FIELD_ftsf270ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf271ae_slot0", FIELD_ftsf271ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf272ae_slot0", FIELD_ftsf272ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf273ae_slot0", FIELD_ftsf273ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf274ae_slot0", FIELD_ftsf274ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf275ae_slot0", FIELD_ftsf275ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf276ae_slot0", FIELD_ftsf276ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf277ae_slot0", FIELD_ftsf277ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf278ae_slot0", FIELD_ftsf278ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf279ae_slot0", FIELD_ftsf279ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf280", FIELD_ftsf280, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf281ae_slot0", FIELD_ftsf281ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf282ae_slot0", FIELD_ftsf282ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf284ae_slot0", FIELD_ftsf284ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf285ae_slot0", FIELD_ftsf285ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf287ae_slot0", FIELD_ftsf287ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf288", FIELD_ftsf288, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf289ae_slot0", FIELD_ftsf289ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf290ae_slot0", FIELD_ftsf290ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf291ae_slot0", FIELD_ftsf291ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf292ae_slot0", FIELD_ftsf292ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf293ae_slot0", FIELD_ftsf293ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf294ae_slot0", FIELD_ftsf294ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf295ae_slot0", FIELD_ftsf295ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf296ae_slot0", FIELD_ftsf296ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf297ae_slot0", FIELD_ftsf297ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf298ae_slot0", FIELD_ftsf298ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf300ae_slot0", FIELD_ftsf300ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf302ae_slot0", FIELD_ftsf302ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf304ae_slot0", FIELD_ftsf304ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf305ae_slot0", FIELD_ftsf305ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf306ae_slot0", FIELD_ftsf306ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf307ae_slot0", FIELD_ftsf307ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf308ae_slot0", FIELD_ftsf308ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf309", FIELD_ftsf309, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf310ae_slot0", FIELD_ftsf310ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf311ae_slot0", FIELD_ftsf311ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf312ae_slot0", FIELD_ftsf312ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf313ae_slot0", FIELD_ftsf313ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf314ae_slot0", FIELD_ftsf314ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf315", FIELD_ftsf315, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf316ae_slot0", FIELD_ftsf316ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf317ae_slot0", FIELD_ftsf317ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf319ae_slot0", FIELD_ftsf319ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf320ae_slot0", FIELD_ftsf320ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf322ae_slot0", FIELD_ftsf322ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf323ae_slot0", FIELD_ftsf323ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf324ae_slot0", FIELD_ftsf324ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf325ae_slot0", FIELD_ftsf325ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf326ae_slot0", FIELD_ftsf326ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf327ae_slot0", FIELD_ftsf327ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf328ae_slot0", FIELD_ftsf328ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf329ae_slot0", FIELD_ftsf329ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf360ae_slot0", FIELD_ftsf360ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf361ae_slot0", FIELD_ftsf361ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf362", FIELD_ftsf362, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf363ae_slot0", FIELD_ftsf363ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf364ae_slot0", FIELD_ftsf364ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf366ae_slot0", FIELD_ftsf366ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf368ae_slot0", FIELD_ftsf368ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf370ae_slot0", FIELD_ftsf370ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf373ae_slot0", FIELD_ftsf373ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf376ae_slot0", FIELD_ftsf376ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf378ae_slot0", FIELD_ftsf378ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf379ae_slot0", FIELD_ftsf379ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf382ae_slot0", FIELD_ftsf382ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf383ae_slot0", FIELD_ftsf383ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf384ae_slot0", FIELD_ftsf384ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf386ae_slot0", FIELD_ftsf386ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf387ae_slot0", FIELD_ftsf387ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf388ae_slot0", FIELD_ftsf388ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf389ae_slot0", FIELD_ftsf389ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_mul32x24fld", FIELD_ae_mul32x24fld, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s4_s4", FIELD_op0_s4_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld28", FIELD_combined2c0b5f72_fld28, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld37", FIELD_combined2c0b5f72_fld37, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld39", FIELD_combined2c0b5f72_fld39, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld40", FIELD_combined2c0b5f72_fld40, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld46", FIELD_combined2c0b5f72_fld46, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld47", FIELD_combined2c0b5f72_fld47, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld49", FIELD_combined2c0b5f72_fld49, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld50", FIELD_combined2c0b5f72_fld50, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld52", FIELD_combined2c0b5f72_fld52, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld121", FIELD_combined2c0b5f72_fld121, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld123", FIELD_combined2c0b5f72_fld123, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld127", FIELD_combined2c0b5f72_fld127, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld133ae_slot0", FIELD_combined2c0b5f72_fld133ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld134ae_slot0", FIELD_combined2c0b5f72_fld134ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld135ae_slot0", FIELD_combined2c0b5f72_fld135ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld136ae_slot0", FIELD_combined2c0b5f72_fld136ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld137ae_slot0", FIELD_combined2c0b5f72_fld137ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld138ae_slot0", FIELD_combined2c0b5f72_fld138ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld139ae_slot0", FIELD_combined2c0b5f72_fld139ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld140ae_slot0", FIELD_combined2c0b5f72_fld140ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld141ae_slot0", FIELD_combined2c0b5f72_fld141ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld142ae_slot0", FIELD_combined2c0b5f72_fld142ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld143ae_slot0", FIELD_combined2c0b5f72_fld143ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld144ae_slot0", FIELD_combined2c0b5f72_fld144ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld145ae_slot0", FIELD_combined2c0b5f72_fld145ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld146ae_slot0", FIELD_combined2c0b5f72_fld146ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld148ae_slot0", FIELD_combined2c0b5f72_fld148ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld149ae_slot0", FIELD_combined2c0b5f72_fld149ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s4_s4_s4", FIELD_op0_s4_s4_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined1e9fefee_fld96", FIELD_combined1e9fefee_fld96, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined1e9fefee_fld98", FIELD_combined1e9fefee_fld98, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined1e9fefee_fld106ae_slot0", FIELD_combined1e9fefee_fld106ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined1e9fefee_fld107ae_slot0", FIELD_combined1e9fefee_fld107ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined1e9fefee_fld108ae_slot0", FIELD_combined1e9fefee_fld108ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined1e9fefee_fld109ae_slot0", FIELD_combined1e9fefee_fld109ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s3_s3", FIELD_op0_s3_s3, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld19", FIELD_combined2c0b5f72_fld19, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld22", FIELD_combined2c0b5f72_fld22, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld24", FIELD_combined2c0b5f72_fld24, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld65", FIELD_combined2c0b5f72_fld65, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld66", FIELD_combined2c0b5f72_fld66, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld68", FIELD_combined2c0b5f72_fld68, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld69", FIELD_combined2c0b5f72_fld69, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld74", FIELD_combined2c0b5f72_fld74, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld79", FIELD_combined2c0b5f72_fld79, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld88", FIELD_combined2c0b5f72_fld88, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld90", FIELD_combined2c0b5f72_fld90, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld91", FIELD_combined2c0b5f72_fld91, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld131ae_slot1", FIELD_combined2c0b5f72_fld131ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld132ae_slot1", FIELD_combined2c0b5f72_fld132ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "combined2c0b5f72_fld147ae_slot1", FIELD_combined2c0b5f72_fld147ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "s3to1", FIELD_s3to1, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_uimmrx4,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_ulabel8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_bbi,
+ OPERAND_sae,
+ OPERAND_sas,
+ OPERAND_sargt,
+ OPERAND_s,
+ OPERAND_mx,
+ OPERAND_my,
+ OPERAND_mw,
+ OPERAND_mr0,
+ OPERAND_mr1,
+ OPERAND_mr2,
+ OPERAND_mr3,
+ OPERAND_immt,
+ OPERAND_imms,
+ OPERAND_bt,
+ OPERAND_bs,
+ OPERAND_br,
+ OPERAND_bt2,
+ OPERAND_bs2,
+ OPERAND_br2,
+ OPERAND_bt4,
+ OPERAND_bs4,
+ OPERAND_br4,
+ OPERAND_bt8,
+ OPERAND_bs8,
+ OPERAND_br8,
+ OPERAND_bt16,
+ OPERAND_bs16,
+ OPERAND_br16,
+ OPERAND_brall,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_ae_samt32,
+ OPERAND_pr0,
+ OPERAND_qr0,
+ OPERAND_mac_qr0,
+ OPERAND_ae_lsimm16,
+ OPERAND_ae_lsimm32,
+ OPERAND_ae_lsimm64,
+ OPERAND_ae_samt64,
+ OPERAND_ae_ohba,
+ OPERAND_ae_ohba2,
+ OPERAND_pr,
+ OPERAND_cvt_pr,
+ OPERAND_qr0_rw,
+ OPERAND_mac_qr0_rw,
+ OPERAND_qr1_w,
+ OPERAND_mac_qr1_w,
+ OPERAND_ps,
+ OPERAND_alupppb_ps,
+ OPERAND_bitindex,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sal,
+ OPERAND_sas4,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_r3,
+ OPERAND_rbit2,
+ OPERAND_rhi,
+ OPERAND_t3,
+ OPERAND_tbit2,
+ OPERAND_tlo,
+ OPERAND_w,
+ OPERAND_y,
+ OPERAND_x,
+ OPERAND_t2,
+ OPERAND_s2,
+ OPERAND_r2,
+ OPERAND_t4,
+ OPERAND_s4,
+ OPERAND_r4,
+ OPERAND_t8,
+ OPERAND_s8,
+ OPERAND_r8,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_ae_r3,
+ OPERAND_ae_s_non_samt,
+ OPERAND_ae_s3,
+ OPERAND_ae_r32,
+ OPERAND_ae_samt_s_t,
+ OPERAND_ae_r20,
+ OPERAND_ae_r10,
+ OPERAND_ae_s20,
+ OPERAND_ae_fld_ohba,
+ OPERAND_ae_fld_ohba2,
+ OPERAND_op0_s3,
+ OPERAND_ftsf11,
+ OPERAND_ftsf12,
+ OPERAND_ftsf13,
+ OPERAND_ftsf20ae_slot1,
+ OPERAND_ftsf21ae_slot1,
+ OPERAND_ftsf22ae_slot1,
+ OPERAND_ftsf23ae_slot1,
+ OPERAND_ftsf24ae_slot1,
+ OPERAND_ftsf25ae_slot1,
+ OPERAND_ftsf26ae_slot1,
+ OPERAND_ftsf27ae_slot1,
+ OPERAND_ftsf28ae_slot1,
+ OPERAND_ftsf29ae_slot1,
+ OPERAND_ftsf30ae_slot1,
+ OPERAND_ftsf31ae_slot1,
+ OPERAND_ftsf32ae_slot1,
+ OPERAND_ftsf33ae_slot1,
+ OPERAND_ftsf34ae_slot1,
+ OPERAND_ftsf35ae_slot1,
+ OPERAND_ftsf36ae_slot1,
+ OPERAND_ftsf37ae_slot1,
+ OPERAND_ftsf38ae_slot1,
+ OPERAND_ftsf40ae_slot1,
+ OPERAND_ftsf41ae_slot1,
+ OPERAND_ftsf42ae_slot1,
+ OPERAND_ftsf43ae_slot1,
+ OPERAND_ftsf45ae_slot1,
+ OPERAND_ftsf47ae_slot1,
+ OPERAND_ftsf48ae_slot1,
+ OPERAND_ftsf49ae_slot1,
+ OPERAND_ftsf50ae_slot1,
+ OPERAND_ftsf51ae_slot1,
+ OPERAND_ftsf52ae_slot1,
+ OPERAND_ftsf53ae_slot1,
+ OPERAND_ftsf54ae_slot1,
+ OPERAND_ftsf55,
+ OPERAND_ftsf56ae_slot1,
+ OPERAND_ftsf57ae_slot1,
+ OPERAND_ftsf58ae_slot1,
+ OPERAND_ftsf60ae_slot1,
+ OPERAND_ftsf61,
+ OPERAND_ftsf62ae_slot1,
+ OPERAND_ftsf63ae_slot1,
+ OPERAND_ftsf64ae_slot1,
+ OPERAND_ftsf66ae_slot1,
+ OPERAND_ftsf68ae_slot1,
+ OPERAND_ftsf69ae_slot1,
+ OPERAND_ftsf70ae_slot1,
+ OPERAND_ftsf71ae_slot1,
+ OPERAND_ftsf72ae_slot1,
+ OPERAND_ftsf73ae_slot1,
+ OPERAND_ftsf74ae_slot1,
+ OPERAND_ftsf75ae_slot1,
+ OPERAND_ftsf76ae_slot1,
+ OPERAND_ftsf77ae_slot1,
+ OPERAND_ftsf78ae_slot1,
+ OPERAND_ftsf79ae_slot1,
+ OPERAND_ftsf80ae_slot1,
+ OPERAND_ftsf81ae_slot1,
+ OPERAND_ftsf82ae_slot1,
+ OPERAND_ftsf83ae_slot1,
+ OPERAND_ftsf84ae_slot1,
+ OPERAND_ftsf85ae_slot1,
+ OPERAND_ftsf86ae_slot1,
+ OPERAND_ftsf87ae_slot1,
+ OPERAND_ftsf88ae_slot1,
+ OPERAND_ftsf89ae_slot1,
+ OPERAND_ftsf90ae_slot1,
+ OPERAND_ftsf91,
+ OPERAND_ftsf92ae_slot1,
+ OPERAND_ftsf93ae_slot1,
+ OPERAND_ftsf94ae_slot1,
+ OPERAND_ftsf96ae_slot1,
+ OPERAND_ftsf97ae_slot1,
+ OPERAND_ftsf99ae_slot1,
+ OPERAND_ftsf101ae_slot1,
+ OPERAND_ftsf102ae_slot1,
+ OPERAND_ftsf103ae_slot1,
+ OPERAND_ftsf106ae_slot1,
+ OPERAND_ftsf107ae_slot1,
+ OPERAND_ftsf108ae_slot1,
+ OPERAND_ftsf109ae_slot1,
+ OPERAND_ftsf110ae_slot1,
+ OPERAND_ftsf111ae_slot1,
+ OPERAND_ftsf112ae_slot1,
+ OPERAND_ftsf113ae_slot1,
+ OPERAND_ftsf114ae_slot1,
+ OPERAND_ftsf115ae_slot1,
+ OPERAND_ftsf116ae_slot1,
+ OPERAND_ftsf117ae_slot1,
+ OPERAND_ftsf118ae_slot1,
+ OPERAND_ftsf120ae_slot1,
+ OPERAND_ftsf121ae_slot1,
+ OPERAND_ftsf123ae_slot1,
+ OPERAND_ftsf124ae_slot1,
+ OPERAND_ftsf125ae_slot1,
+ OPERAND_ftsf126ae_slot1,
+ OPERAND_ftsf127ae_slot1,
+ OPERAND_ftsf128ae_slot1,
+ OPERAND_ftsf129ae_slot1,
+ OPERAND_ftsf130ae_slot1,
+ OPERAND_ftsf131ae_slot1,
+ OPERAND_ftsf132ae_slot1,
+ OPERAND_ftsf133ae_slot1,
+ OPERAND_ftsf134ae_slot1,
+ OPERAND_ftsf135ae_slot1,
+ OPERAND_ftsf136ae_slot1,
+ OPERAND_ftsf137ae_slot1,
+ OPERAND_ftsf138ae_slot1,
+ OPERAND_ftsf139ae_slot1,
+ OPERAND_ftsf140ae_slot1,
+ OPERAND_ftsf141ae_slot1,
+ OPERAND_ftsf142ae_slot1,
+ OPERAND_ftsf143ae_slot1,
+ OPERAND_ftsf144ae_slot1,
+ OPERAND_ftsf145ae_slot1,
+ OPERAND_ftsf146ae_slot1,
+ OPERAND_ftsf147ae_slot1,
+ OPERAND_ftsf148ae_slot1,
+ OPERAND_ftsf149ae_slot1,
+ OPERAND_ftsf150ae_slot1,
+ OPERAND_ftsf151ae_slot1,
+ OPERAND_ftsf152ae_slot1,
+ OPERAND_ftsf153ae_slot1,
+ OPERAND_ftsf154ae_slot1,
+ OPERAND_ftsf155ae_slot1,
+ OPERAND_ftsf156ae_slot1,
+ OPERAND_ftsf157ae_slot1,
+ OPERAND_ftsf158ae_slot1,
+ OPERAND_ftsf159ae_slot1,
+ OPERAND_ftsf160ae_slot1,
+ OPERAND_ftsf161ae_slot1,
+ OPERAND_ftsf162ae_slot1,
+ OPERAND_ftsf163ae_slot1,
+ OPERAND_ftsf164ae_slot1,
+ OPERAND_ftsf165ae_slot1,
+ OPERAND_ftsf166ae_slot1,
+ OPERAND_ftsf167ae_slot1,
+ OPERAND_ftsf168ae_slot1,
+ OPERAND_ftsf169ae_slot1,
+ OPERAND_ftsf170ae_slot1,
+ OPERAND_ftsf171ae_slot1,
+ OPERAND_ftsf172ae_slot1,
+ OPERAND_ftsf173ae_slot1,
+ OPERAND_ftsf174ae_slot1,
+ OPERAND_ftsf175ae_slot1,
+ OPERAND_ftsf176ae_slot1,
+ OPERAND_ftsf177ae_slot1,
+ OPERAND_ftsf178ae_slot1,
+ OPERAND_ftsf179ae_slot1,
+ OPERAND_ftsf180ae_slot1,
+ OPERAND_ftsf181ae_slot1,
+ OPERAND_ftsf182ae_slot1,
+ OPERAND_ftsf183ae_slot1,
+ OPERAND_ftsf184ae_slot1,
+ OPERAND_ftsf185ae_slot1,
+ OPERAND_ftsf186ae_slot1,
+ OPERAND_ftsf187ae_slot1,
+ OPERAND_ftsf188ae_slot1,
+ OPERAND_ftsf189ae_slot1,
+ OPERAND_ftsf190ae_slot1,
+ OPERAND_ftsf191ae_slot1,
+ OPERAND_ftsf192ae_slot1,
+ OPERAND_ftsf193ae_slot1,
+ OPERAND_ftsf194ae_slot1,
+ OPERAND_ftsf195ae_slot1,
+ OPERAND_ftsf196ae_slot1,
+ OPERAND_ftsf197ae_slot1,
+ OPERAND_ftsf198ae_slot1,
+ OPERAND_ftsf199ae_slot1,
+ OPERAND_ftsf200ae_slot1,
+ OPERAND_ftsf201ae_slot1,
+ OPERAND_ftsf202ae_slot1,
+ OPERAND_ftsf203ae_slot1,
+ OPERAND_ftsf204ae_slot1,
+ OPERAND_ftsf205ae_slot1,
+ OPERAND_ftsf206ae_slot1,
+ OPERAND_ftsf207ae_slot1,
+ OPERAND_ftsf208ae_slot1,
+ OPERAND_ftsf210ae_slot1,
+ OPERAND_ftsf333ae_slot1,
+ OPERAND_ftsf334ae_slot1,
+ OPERAND_ftsf335,
+ OPERAND_ftsf336ae_slot1,
+ OPERAND_ftsf337ae_slot1,
+ OPERAND_ftsf339ae_slot1,
+ OPERAND_ftsf340ae_slot1,
+ OPERAND_ftsf341ae_slot1,
+ OPERAND_ftsf342ae_slot1,
+ OPERAND_ftsf343ae_slot1,
+ OPERAND_ftsf344ae_slot1,
+ OPERAND_ftsf345ae_slot1,
+ OPERAND_ftsf347ae_slot1,
+ OPERAND_ftsf348ae_slot1,
+ OPERAND_ftsf349ae_slot1,
+ OPERAND_ftsf350ae_slot1,
+ OPERAND_ftsf351,
+ OPERAND_ftsf352ae_slot1,
+ OPERAND_ftsf354ae_slot1,
+ OPERAND_ftsf355ae_slot1,
+ OPERAND_ftsf356ae_slot1,
+ OPERAND_ftsf357ae_slot1,
+ OPERAND_ftsf358ae_slot1,
+ OPERAND_ftsf359ae_slot1,
+ OPERAND_op0_s4,
+ OPERAND_ftsf211ae_slot0,
+ OPERAND_ftsf212ae_slot0,
+ OPERAND_ftsf213ae_slot0,
+ OPERAND_ftsf214ae_slot0,
+ OPERAND_ftsf215ae_slot0,
+ OPERAND_ftsf217ae_slot0,
+ OPERAND_ftsf218ae_slot0,
+ OPERAND_ftsf219ae_slot0,
+ OPERAND_ftsf220ae_slot0,
+ OPERAND_ftsf221ae_slot0,
+ OPERAND_ftsf222ae_slot0,
+ OPERAND_ftsf223ae_slot0,
+ OPERAND_ftsf224ae_slot0,
+ OPERAND_ftsf225ae_slot0,
+ OPERAND_ftsf226ae_slot0,
+ OPERAND_ftsf227ae_slot0,
+ OPERAND_ftsf228ae_slot0,
+ OPERAND_ftsf229ae_slot0,
+ OPERAND_ftsf230ae_slot0,
+ OPERAND_ftsf231ae_slot0,
+ OPERAND_ftsf232ae_slot0,
+ OPERAND_ftsf233ae_slot0,
+ OPERAND_ftsf234ae_slot0,
+ OPERAND_ftsf235ae_slot0,
+ OPERAND_ftsf236ae_slot0,
+ OPERAND_ftsf237ae_slot0,
+ OPERAND_ftsf238ae_slot0,
+ OPERAND_ftsf239ae_slot0,
+ OPERAND_ftsf240ae_slot0,
+ OPERAND_ftsf241ae_slot0,
+ OPERAND_ftsf242ae_slot0,
+ OPERAND_ftsf243ae_slot0,
+ OPERAND_ftsf244ae_slot0,
+ OPERAND_ftsf245ae_slot0,
+ OPERAND_ftsf246ae_slot0,
+ OPERAND_ftsf247ae_slot0,
+ OPERAND_ftsf248ae_slot0,
+ OPERAND_ftsf249ae_slot0,
+ OPERAND_ftsf250ae_slot0,
+ OPERAND_ftsf251ae_slot0,
+ OPERAND_ftsf252ae_slot0,
+ OPERAND_ftsf253ae_slot0,
+ OPERAND_ftsf254ae_slot0,
+ OPERAND_ftsf255ae_slot0,
+ OPERAND_ftsf256ae_slot0,
+ OPERAND_ftsf257ae_slot0,
+ OPERAND_ftsf258ae_slot0,
+ OPERAND_ftsf259ae_slot0,
+ OPERAND_ftsf260ae_slot0,
+ OPERAND_ftsf261ae_slot0,
+ OPERAND_ftsf262ae_slot0,
+ OPERAND_ftsf263ae_slot0,
+ OPERAND_ftsf264ae_slot0,
+ OPERAND_ftsf265ae_slot0,
+ OPERAND_ftsf266ae_slot0,
+ OPERAND_ftsf267ae_slot0,
+ OPERAND_ftsf268ae_slot0,
+ OPERAND_ftsf269ae_slot0,
+ OPERAND_ftsf270ae_slot0,
+ OPERAND_ftsf271ae_slot0,
+ OPERAND_ftsf272ae_slot0,
+ OPERAND_ftsf273ae_slot0,
+ OPERAND_ftsf274ae_slot0,
+ OPERAND_ftsf275ae_slot0,
+ OPERAND_ftsf276ae_slot0,
+ OPERAND_ftsf277ae_slot0,
+ OPERAND_ftsf278ae_slot0,
+ OPERAND_ftsf279ae_slot0,
+ OPERAND_ftsf280,
+ OPERAND_ftsf281ae_slot0,
+ OPERAND_ftsf282ae_slot0,
+ OPERAND_ftsf284ae_slot0,
+ OPERAND_ftsf285ae_slot0,
+ OPERAND_ftsf287ae_slot0,
+ OPERAND_ftsf288,
+ OPERAND_ftsf289ae_slot0,
+ OPERAND_ftsf290ae_slot0,
+ OPERAND_ftsf291ae_slot0,
+ OPERAND_ftsf292ae_slot0,
+ OPERAND_ftsf293ae_slot0,
+ OPERAND_ftsf294ae_slot0,
+ OPERAND_ftsf295ae_slot0,
+ OPERAND_ftsf296ae_slot0,
+ OPERAND_ftsf297ae_slot0,
+ OPERAND_ftsf298ae_slot0,
+ OPERAND_ftsf300ae_slot0,
+ OPERAND_ftsf302ae_slot0,
+ OPERAND_ftsf304ae_slot0,
+ OPERAND_ftsf305ae_slot0,
+ OPERAND_ftsf306ae_slot0,
+ OPERAND_ftsf307ae_slot0,
+ OPERAND_ftsf308ae_slot0,
+ OPERAND_ftsf309,
+ OPERAND_ftsf310ae_slot0,
+ OPERAND_ftsf311ae_slot0,
+ OPERAND_ftsf312ae_slot0,
+ OPERAND_ftsf313ae_slot0,
+ OPERAND_ftsf314ae_slot0,
+ OPERAND_ftsf315,
+ OPERAND_ftsf316ae_slot0,
+ OPERAND_ftsf317ae_slot0,
+ OPERAND_ftsf319ae_slot0,
+ OPERAND_ftsf320ae_slot0,
+ OPERAND_ftsf322ae_slot0,
+ OPERAND_ftsf323ae_slot0,
+ OPERAND_ftsf324ae_slot0,
+ OPERAND_ftsf325ae_slot0,
+ OPERAND_ftsf326ae_slot0,
+ OPERAND_ftsf327ae_slot0,
+ OPERAND_ftsf328ae_slot0,
+ OPERAND_ftsf329ae_slot0,
+ OPERAND_ftsf360ae_slot0,
+ OPERAND_ftsf361ae_slot0,
+ OPERAND_ftsf362,
+ OPERAND_ftsf363ae_slot0,
+ OPERAND_ftsf364ae_slot0,
+ OPERAND_ftsf366ae_slot0,
+ OPERAND_ftsf368ae_slot0,
+ OPERAND_ftsf370ae_slot0,
+ OPERAND_ftsf373ae_slot0,
+ OPERAND_ftsf376ae_slot0,
+ OPERAND_ftsf378ae_slot0,
+ OPERAND_ftsf379ae_slot0,
+ OPERAND_ftsf382ae_slot0,
+ OPERAND_ftsf383ae_slot0,
+ OPERAND_ftsf384ae_slot0,
+ OPERAND_ftsf386ae_slot0,
+ OPERAND_ftsf387ae_slot0,
+ OPERAND_ftsf388ae_slot0,
+ OPERAND_ftsf389ae_slot0,
+ OPERAND_ae_mul32x24fld,
+ OPERAND_op0_s4_s4,
+ OPERAND_combined2c0b5f72_fld28,
+ OPERAND_combined2c0b5f72_fld37,
+ OPERAND_combined2c0b5f72_fld39,
+ OPERAND_combined2c0b5f72_fld40,
+ OPERAND_combined2c0b5f72_fld46,
+ OPERAND_combined2c0b5f72_fld47,
+ OPERAND_combined2c0b5f72_fld49,
+ OPERAND_combined2c0b5f72_fld50,
+ OPERAND_combined2c0b5f72_fld52,
+ OPERAND_combined2c0b5f72_fld121,
+ OPERAND_combined2c0b5f72_fld123,
+ OPERAND_combined2c0b5f72_fld127,
+ OPERAND_combined2c0b5f72_fld133ae_slot0,
+ OPERAND_combined2c0b5f72_fld134ae_slot0,
+ OPERAND_combined2c0b5f72_fld135ae_slot0,
+ OPERAND_combined2c0b5f72_fld136ae_slot0,
+ OPERAND_combined2c0b5f72_fld137ae_slot0,
+ OPERAND_combined2c0b5f72_fld138ae_slot0,
+ OPERAND_combined2c0b5f72_fld139ae_slot0,
+ OPERAND_combined2c0b5f72_fld140ae_slot0,
+ OPERAND_combined2c0b5f72_fld141ae_slot0,
+ OPERAND_combined2c0b5f72_fld142ae_slot0,
+ OPERAND_combined2c0b5f72_fld143ae_slot0,
+ OPERAND_combined2c0b5f72_fld144ae_slot0,
+ OPERAND_combined2c0b5f72_fld145ae_slot0,
+ OPERAND_combined2c0b5f72_fld146ae_slot0,
+ OPERAND_combined2c0b5f72_fld148ae_slot0,
+ OPERAND_combined2c0b5f72_fld149ae_slot0,
+ OPERAND_op0_s4_s4_s4,
+ OPERAND_combined1e9fefee_fld96,
+ OPERAND_combined1e9fefee_fld98,
+ OPERAND_combined1e9fefee_fld106ae_slot0,
+ OPERAND_combined1e9fefee_fld107ae_slot0,
+ OPERAND_combined1e9fefee_fld108ae_slot0,
+ OPERAND_combined1e9fefee_fld109ae_slot0,
+ OPERAND_op0_s3_s3,
+ OPERAND_combined2c0b5f72_fld19,
+ OPERAND_combined2c0b5f72_fld22,
+ OPERAND_combined2c0b5f72_fld24,
+ OPERAND_combined2c0b5f72_fld65,
+ OPERAND_combined2c0b5f72_fld66,
+ OPERAND_combined2c0b5f72_fld68,
+ OPERAND_combined2c0b5f72_fld69,
+ OPERAND_combined2c0b5f72_fld74,
+ OPERAND_combined2c0b5f72_fld79,
+ OPERAND_combined2c0b5f72_fld88,
+ OPERAND_combined2c0b5f72_fld90,
+ OPERAND_combined2c0b5f72_fld91,
+ OPERAND_combined2c0b5f72_fld131ae_slot1,
+ OPERAND_combined2c0b5f72_fld132ae_slot1,
+ OPERAND_combined2c0b5f72_fld147ae_slot1,
+ OPERAND_s3to1
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32nb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimmrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_memctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_memctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_memctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_243_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_243_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps3_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps4_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps5_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS5 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps6_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS6 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps7_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS7 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32h_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_aa_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_ad_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_da_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_dd_stateArgs[] = {
+ { { STATE_ACC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_aa_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_ad_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_args[] = {
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16a_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_da_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_mx }, 'i' },
+ { { OPERAND_my }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16al_dd_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mac16_l_args[] = {
+ { { OPERAND_mw }, 'o' },
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m0_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m0_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m0_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m1_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m1_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m1_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m2_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m2_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m2_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_m3_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_mr3 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_m3_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_mr3 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_m3_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_mr3 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acclo_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_acchi_stateArgs[] = {
+ { { STATE_ACC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPC3 }, 'i' },
+ { { STATE_EPC4 }, 'i' },
+ { { STATE_EPC5 }, 'i' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_EPC7 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_EPS3 }, 'i' },
+ { { STATE_EPS4 }, 'i' },
+ { { STATE_EPS5 }, 'i' },
+ { { STATE_EPS6 }, 'i' },
+ { { STATE_EPS7 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC0 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKA1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dbreakc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DBREAKC1 }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreaka1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKA1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ibreakenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_IBREAKENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_lddr32_p_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sddr32_p_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_InOCDMode }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC6 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_mmid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool1_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool4_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool8_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbranch_args[] = {
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bmove_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_RSR_BR_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_brall }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_WSR_BR_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_brall }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_XSR_BR_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_brall }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE2 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_dyn_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_dyn_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_lock_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prefctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prefctl_stateArgs[] = {
+ { { STATE_PREFCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_prefctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_prefctl_stateArgs[] = {
+ { { STATE_PREFCTL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_prefctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_prefctl_stateArgs[] = {
+ { { STATE_PREFCTL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'm' },
+ { { STATE_EXCVADDR }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'i' },
+ { { STATE_ASID2 }, 'i' },
+ { { STATE_ASID1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'o' },
+ { { STATE_ASID2 }, 'o' },
+ { { STATE_ASID1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'm' },
+ { { STATE_ASID2 }, 'm' },
+ { { STATE_ASID1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'i' },
+ { { STATE_INSTPGSZID5 }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'o' },
+ { { STATE_INSTPGSZID5 }, 'o' },
+ { { STATE_INSTPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID6 }, 'm' },
+ { { STATE_INSTPGSZID5 }, 'm' },
+ { { STATE_INSTPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'i' },
+ { { STATE_DATAPGSZID5 }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'o' },
+ { { STATE_DATAPGSZID5 }, 'o' },
+ { { STATE_DATAPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID6 }, 'm' },
+ { { STATE_DATAPGSZID5 }, 'm' },
+ { { STATE_DATAPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldpte_stateArgs[] = {
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwitlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwdtlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_div_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_ERI_RAW_INTERLOCK }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_interface Iclass_xt_iclass_rer_intfArgs[] = {
+ INTERFACE_ERI_RD_In,
+ INTERFACE_ERI_RD_Out
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_ERI_RAW_INTERLOCK }, 'o' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_interface Iclass_xt_iclass_wer_intfArgs[] = {
+ INTERFACE_ERI_WR_In,
+ INTERFACE_ERI_WR_Out
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ovf_sar_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ovf_sar_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'i' },
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ovf_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ovf_sar_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'o' },
+ { { STATE_AE_SAR }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_bithead_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_bithead_stateArgs[] = {
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_bithead_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_bithead_stateArgs[] = {
+ { { STATE_AE_BITHEAD }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ts_fts_bu_bp_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ts_fts_bu_bp_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_TABLESIZE }, 'i' },
+ { { STATE_AE_FIRST_TS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ts_fts_bu_bp_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ts_fts_bu_bp_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'o' },
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_TABLESIZE }, 'o' },
+ { { STATE_AE_FIRST_TS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_sd_no_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_sd_no_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'i' },
+ { { STATE_AE_SEARCHDONE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_sd_no_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_sd_no_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_overflow_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_overflow_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_overflow_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_overflow_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_sar_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_sar_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_sar_stateArgs[] = {
+ { { STATE_AE_SAR }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitptr_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitptr_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitsused_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitsused_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitsused_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitsused_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_tablesize_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_tablesize_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_tablesize_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_tablesize_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_first_ts_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_first_ts_stateArgs[] = {
+ { { STATE_AE_FIRST_TS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_first_ts_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_first_ts_stateArgs[] = {
+ { { STATE_AE_FIRST_TS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_nextoffset_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_nextoffset_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_nextoffset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_nextoffset_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_searchdone_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_searchdone_stateArgs[] = {
+ { { STATE_AE_SEARCHDONE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_searchdone_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_searchdone_stateArgs[] = {
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_i_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_iu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_x_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_xu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_i_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_iu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_x_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_xu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_i_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_iu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_x_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_xu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_i_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_iu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_x_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_xu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zerop48_args[] = {
+ { { OPERAND_ps }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zerop48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_ll_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_lh_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hl_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hh_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp24x2_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp24x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp24x2_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp24x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp48_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp48_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movpa24x2_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movpa24x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24a32x2_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24a32x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_l_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_h_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_ll_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_lh_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hl_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hh_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24q48x2_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24q48x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp16_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp16_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48sym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48asym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48sym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48asym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16sym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16asym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zeroq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zeroq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtq56_args[] = {
+ { { OPERAND_qr1_w }, 'm' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfq56_args[] = {
+ { { OPERAND_qr1_w }, 'm' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48a32s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48a32s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_l_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_cvt_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_h_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_cvt_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_satq48s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_satq48s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncq32_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncq32_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32sym_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32asym_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca32q48_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca32q48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_l_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_h_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_l_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_h_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbp24s_args[] = {
+ { { OPERAND_alupppb_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbp24s_args[] = {
+ { { OPERAND_alupppb_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltp24s_args[] = {
+ { { OPERAND_bt2 }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lep24s_args[] = {
+ { { OPERAND_bt2 }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lep24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqp24_args[] = {
+ { { OPERAND_bt2 }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bt }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bt }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllip24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllip24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlip24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlip24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraip24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraip24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsp24_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsp24_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasp24_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_slliq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_slliq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srliq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srliq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraiq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraiq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsq56_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsq56_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasq56_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllaq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllaq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlaq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlaq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraaq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraaq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllasq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllasq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltq56s_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_leq56s_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_leq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqq56_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nsaq56s_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nsaq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsrfq32sp24s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsrfq32sp24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsrfq32sp24s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsrfq32sp24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mularfq32sp24s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mularfq32sp24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mularfq32sp24s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mularfq32sp24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulrfq32sp24s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulrfq32sp24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulrfq32sp24s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulrfq32sp24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp24s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp24s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp24s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp24s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp24s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp24s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sha32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl32t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl32t_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'm' },
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'm' },
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16t_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'm' },
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'm' },
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16c_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16c_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'm' },
+ { { STATE_AE_TABLESIZE }, 'm' },
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_AE_FIRST_TS }, 'i' },
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_SEARCHDONE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldsht_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldsht_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_AE_FIRST_TS }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_AE_TABLESIZE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lb_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lb_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbi_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ae_ohba2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbi_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbk_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbk_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbki_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_ohba2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbki_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_db_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_db_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_dbi_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_ohba }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_dbi_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel32t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel32t_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel16t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel16t_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sb_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sb_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbi_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ae_ohba }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbi_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vles16c_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vles16c_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_NEXTOFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbf_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbf_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SLAASQ56S_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SLAASQ56S_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_ADDBRBA32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MINABSSP24S_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MINABSSP24S_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MAXABSSP24S_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MAXABSSP24S_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MINABSSQ56S_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MINABSSQ56S_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MAXABSSQ56S_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_MAXABSSQ56S_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_cbegin0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_cbegin0_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_cbegin0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_cbegin0_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_cend0_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_cend0_stateArgs[] = {
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_cend0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_cend0_stateArgs[] = {
+ { { STATE_AE_CEND0 }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24X2_C_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24X2_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24X2S_C_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24X2S_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24X2F_C_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24X2F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24X2F_C_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24X2F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP16X2F_C_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP16X2F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP16X2F_C_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP16X2F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24_C_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24S_L_C_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24S_L_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24F_C_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP24F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24F_L_C_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP24F_L_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP16F_C_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LP16F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP16F_L_C_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SP16F_L_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LQ56_C_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LQ56_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SQ56S_C_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SQ56S_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LQ32F_C_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_LQ32F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SQ32F_C_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_icls_AE_SQ32F_C_stateArgs[] = {
+ { { STATE_AE_CBEGIN0 }, 'i' },
+ { { STATE_AE_CEND0 }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_expstate_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_iclass_READ_IMPWIRE_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_interface Iclass_iclass_READ_IMPWIRE_intfArgs[] = {
+ INTERFACE_IMPWIRE
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_SETB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_args[] = {
+ { { OPERAND_bitindex }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_CLRB_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_iclass_WRMSK_EXPSTATE_stateArgs[] = {
+ { { STATE_EXPSTATE }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 3, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 3, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 3, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 5, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 6, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 2, Iclass_xt_iclass_l32e_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 2, Iclass_xt_iclass_s32e_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 3, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 3, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 3, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 3, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 3, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 3, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_threadptr_args,
+ 1, Iclass_rur_threadptr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_threadptr_args,
+ 1, Iclass_wur_threadptr_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32nb_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 7, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_memctl_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid0_args,
+ 2, Iclass_xt_iclass_rsr_configid0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_configid0_args,
+ 2, Iclass_xt_iclass_wsr_configid0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid1_args,
+ 2, Iclass_xt_iclass_rsr_configid1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_243_args,
+ 2, Iclass_xt_iclass_rsr_243_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 7, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 7, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 7, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 3, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 3, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 3, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 3, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 3, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 3, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 3, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 3, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 3, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 3, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 3, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 3, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc3_args,
+ 3, Iclass_xt_iclass_rsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc3_args,
+ 3, Iclass_xt_iclass_wsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc3_args,
+ 3, Iclass_xt_iclass_xsr_epc3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave3_args,
+ 3, Iclass_xt_iclass_rsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave3_args,
+ 3, Iclass_xt_iclass_wsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave3_args,
+ 3, Iclass_xt_iclass_xsr_excsave3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc4_args,
+ 3, Iclass_xt_iclass_rsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc4_args,
+ 3, Iclass_xt_iclass_wsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc4_args,
+ 3, Iclass_xt_iclass_xsr_epc4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave4_args,
+ 3, Iclass_xt_iclass_rsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave4_args,
+ 3, Iclass_xt_iclass_wsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave4_args,
+ 3, Iclass_xt_iclass_xsr_excsave4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc5_args,
+ 3, Iclass_xt_iclass_rsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc5_args,
+ 3, Iclass_xt_iclass_wsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc5_args,
+ 3, Iclass_xt_iclass_xsr_epc5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave5_args,
+ 3, Iclass_xt_iclass_rsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave5_args,
+ 3, Iclass_xt_iclass_wsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave5_args,
+ 3, Iclass_xt_iclass_xsr_excsave5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc6_args,
+ 3, Iclass_xt_iclass_rsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc6_args,
+ 3, Iclass_xt_iclass_wsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc6_args,
+ 3, Iclass_xt_iclass_xsr_epc6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave6_args,
+ 3, Iclass_xt_iclass_rsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave6_args,
+ 3, Iclass_xt_iclass_wsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave6_args,
+ 3, Iclass_xt_iclass_xsr_excsave6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc7_args,
+ 3, Iclass_xt_iclass_rsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc7_args,
+ 3, Iclass_xt_iclass_wsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc7_args,
+ 3, Iclass_xt_iclass_xsr_epc7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave7_args,
+ 3, Iclass_xt_iclass_rsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave7_args,
+ 3, Iclass_xt_iclass_wsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave7_args,
+ 3, Iclass_xt_iclass_xsr_excsave7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 3, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 3, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 3, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps3_args,
+ 3, Iclass_xt_iclass_rsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps3_args,
+ 3, Iclass_xt_iclass_wsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps3_args,
+ 3, Iclass_xt_iclass_xsr_eps3_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps4_args,
+ 3, Iclass_xt_iclass_rsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps4_args,
+ 3, Iclass_xt_iclass_wsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps4_args,
+ 3, Iclass_xt_iclass_xsr_eps4_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps5_args,
+ 3, Iclass_xt_iclass_rsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps5_args,
+ 3, Iclass_xt_iclass_wsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps5_args,
+ 3, Iclass_xt_iclass_xsr_eps5_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps6_args,
+ 3, Iclass_xt_iclass_rsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps6_args,
+ 3, Iclass_xt_iclass_wsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps6_args,
+ 3, Iclass_xt_iclass_xsr_eps6_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps7_args,
+ 3, Iclass_xt_iclass_rsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps7_args,
+ 3, Iclass_xt_iclass_wsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps7_args,
+ 3, Iclass_xt_iclass_xsr_eps7_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 3, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 3, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 3, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 3, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 3, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 3, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 4, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 3, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 3, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 3, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 3, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 3, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 3, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 3, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 3, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 2, Iclass_xt_iclass_rsr_prid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 3, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 3, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 3, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32h_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_aa_args,
+ 1, Iclass_xt_iclass_mac16_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_ad_args,
+ 1, Iclass_xt_iclass_mac16_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_da_args,
+ 1, Iclass_xt_iclass_mac16_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_dd_args,
+ 1, Iclass_xt_iclass_mac16_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_aa_args,
+ 1, Iclass_xt_iclass_mac16a_aa_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_ad_args,
+ 1, Iclass_xt_iclass_mac16a_ad_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_da_args,
+ 1, Iclass_xt_iclass_mac16a_da_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16a_dd_args,
+ 1, Iclass_xt_iclass_mac16a_dd_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_da_args,
+ 1, Iclass_xt_iclass_mac16al_da_stateArgs, 0, 0 },
+ { 4, Iclass_xt_iclass_mac16al_dd_args,
+ 1, Iclass_xt_iclass_mac16al_dd_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_mac16_l_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m2_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_wsr_m3_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_xsr_m3_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acclo_args,
+ 1, Iclass_xt_iclass_rsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acclo_args,
+ 1, Iclass_xt_iclass_wsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acclo_args,
+ 1, Iclass_xt_iclass_xsr_acclo_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_acchi_args,
+ 1, Iclass_xt_iclass_rsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_acchi_args,
+ 1, Iclass_xt_iclass_wsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_acchi_args,
+ 1, Iclass_xt_iclass_xsr_acchi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 21, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 3, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 3, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 4, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 4, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 3, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 3, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 3, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka0_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka0_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc0_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc0_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreaka1_args,
+ 3, Iclass_xt_iclass_rsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_wsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreaka1_args,
+ 4, Iclass_xt_iclass_xsr_dbreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dbreakc1_args,
+ 3, Iclass_xt_iclass_rsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_wsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dbreakc1_args,
+ 4, Iclass_xt_iclass_xsr_dbreakc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka0_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_rsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_wsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreaka1_args,
+ 3, Iclass_xt_iclass_xsr_ibreaka1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_rsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_wsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ibreakenable_args,
+ 3, Iclass_xt_iclass_xsr_ibreakenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 4, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 4, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 4, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 3, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 4, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 4, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 3, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 3, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 3, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 3, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 4, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 4, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_lddr32_p_args,
+ 5, Iclass_xt_iclass_lddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sddr32_p_args,
+ 4, Iclass_xt_iclass_sddr32_p_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 10, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_mmid_args,
+ 3, Iclass_xt_iclass_wsr_mmid_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_bbool1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbranch_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bmove_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_RSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_WSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_XSR_BR_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 3, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 4, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 4, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 3, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 4, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 4, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 3, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 4, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 4, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare2_args,
+ 3, Iclass_xt_iclass_rsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare2_args,
+ 4, Iclass_xt_iclass_wsr_ccompare2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare2_args,
+ 4, Iclass_xt_iclass_xsr_ccompare2_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_lock_args,
+ 2, Iclass_xt_iclass_icache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 2, Iclass_xt_iclass_icache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 2, Iclass_xt_iclass_licx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 2, Iclass_xt_iclass_sicx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_dcache_dyn_args,
+ 2, Iclass_xt_iclass_dcache_dyn_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 2, Iclass_xt_iclass_dcache_ind_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 2, Iclass_xt_iclass_dcache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_lock_args,
+ 2, Iclass_xt_iclass_dcache_lock_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 2, Iclass_xt_iclass_sdct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 2, Iclass_xt_iclass_ldct_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prefctl_args,
+ 1, Iclass_xt_iclass_rsr_prefctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_prefctl_args,
+ 1, Iclass_xt_iclass_wsr_prefctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_prefctl_args,
+ 1, Iclass_xt_iclass_xsr_prefctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_wsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_rsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ptevaddr_args,
+ 5, Iclass_xt_iclass_xsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_rasid_args,
+ 5, Iclass_xt_iclass_rsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_rasid_args,
+ 6, Iclass_xt_iclass_wsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_rasid_args,
+ 6, Iclass_xt_iclass_xsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_itlbcfg_args,
+ 5, Iclass_xt_iclass_rsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_itlbcfg_args,
+ 6, Iclass_xt_iclass_wsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_itlbcfg_args,
+ 6, Iclass_xt_iclass_xsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dtlbcfg_args,
+ 5, Iclass_xt_iclass_rsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dtlbcfg_args,
+ 6, Iclass_xt_iclass_wsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dtlbcfg_args,
+ 6, Iclass_xt_iclass_xsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 3, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 2, Iclass_xt_iclass_rdtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 3, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 2, Iclass_xt_iclass_iitlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 2, Iclass_xt_iclass_ritlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 2, Iclass_xt_iclass_witlb_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_ldpte */,
+ 2, Iclass_xt_iclass_ldpte_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwitlba */,
+ 1, Iclass_xt_iclass_hwwitlba_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwdtlba */,
+ 1, Iclass_xt_iclass_hwwdtlba_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_cpenable_args,
+ 3, Iclass_xt_iclass_rsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_cpenable_args,
+ 3, Iclass_xt_iclass_wsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_cpenable_args,
+ 3, Iclass_xt_iclass_xsr_cpenable_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 3, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 4, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 4, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_div_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_rer_args,
+ 3, Iclass_xt_iclass_rer_stateArgs, 2, Iclass_xt_iclass_rer_intfArgs },
+ { 2, Iclass_xt_iclass_wer_args,
+ 3, Iclass_xt_iclass_wer_stateArgs, 2, Iclass_xt_iclass_wer_intfArgs },
+ { 1, Iclass_rur_ae_ovf_sar_args,
+ 3, Iclass_rur_ae_ovf_sar_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_ovf_sar_args,
+ 3, Iclass_wur_ae_ovf_sar_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_bithead_args,
+ 2, Iclass_rur_ae_bithead_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_bithead_args,
+ 2, Iclass_wur_ae_bithead_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_ts_fts_bu_bp_args,
+ 5, Iclass_rur_ae_ts_fts_bu_bp_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_ts_fts_bu_bp_args,
+ 5, Iclass_wur_ae_ts_fts_bu_bp_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_sd_no_args,
+ 3, Iclass_rur_ae_sd_no_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_sd_no_args,
+ 3, Iclass_wur_ae_sd_no_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_overflow_args,
+ 2, Iclass_ae_iclass_rur_ae_overflow_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_overflow_args,
+ 2, Iclass_ae_iclass_wur_ae_overflow_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_sar_args,
+ 2, Iclass_ae_iclass_rur_ae_sar_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_sar_args,
+ 2, Iclass_ae_iclass_wur_ae_sar_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_bitptr_args,
+ 2, Iclass_ae_iclass_rur_ae_bitptr_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_bitptr_args,
+ 2, Iclass_ae_iclass_wur_ae_bitptr_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_bitsused_args,
+ 2, Iclass_ae_iclass_rur_ae_bitsused_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_bitsused_args,
+ 2, Iclass_ae_iclass_wur_ae_bitsused_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_tablesize_args,
+ 2, Iclass_ae_iclass_rur_ae_tablesize_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_tablesize_args,
+ 2, Iclass_ae_iclass_wur_ae_tablesize_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_first_ts_args,
+ 2, Iclass_ae_iclass_rur_ae_first_ts_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_first_ts_args,
+ 2, Iclass_ae_iclass_wur_ae_first_ts_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_nextoffset_args,
+ 2, Iclass_ae_iclass_rur_ae_nextoffset_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_nextoffset_args,
+ 2, Iclass_ae_iclass_wur_ae_nextoffset_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_searchdone_args,
+ 2, Iclass_ae_iclass_rur_ae_searchdone_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_searchdone_args,
+ 2, Iclass_ae_iclass_wur_ae_searchdone_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_i_args,
+ 1, Iclass_ae_iclass_lp16f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_iu_args,
+ 1, Iclass_ae_iclass_lp16f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_x_args,
+ 1, Iclass_ae_iclass_lp16f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_xu_args,
+ 1, Iclass_ae_iclass_lp16f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_i_args,
+ 1, Iclass_ae_iclass_lp24_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_iu_args,
+ 1, Iclass_ae_iclass_lp24_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_x_args,
+ 1, Iclass_ae_iclass_lp24_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_xu_args,
+ 1, Iclass_ae_iclass_lp24_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_i_args,
+ 1, Iclass_ae_iclass_lp24f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_iu_args,
+ 1, Iclass_ae_iclass_lp24f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_x_args,
+ 1, Iclass_ae_iclass_lp24f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_xu_args,
+ 1, Iclass_ae_iclass_lp24f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_i_args,
+ 1, Iclass_ae_iclass_lp16x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_iu_args,
+ 1, Iclass_ae_iclass_lp16x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_x_args,
+ 1, Iclass_ae_iclass_lp16x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_xu_args,
+ 1, Iclass_ae_iclass_lp16x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_i_args,
+ 1, Iclass_ae_iclass_lp24x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_iu_args,
+ 1, Iclass_ae_iclass_lp24x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_x_args,
+ 1, Iclass_ae_iclass_lp24x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_xu_args,
+ 1, Iclass_ae_iclass_lp24x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_i_args,
+ 1, Iclass_ae_iclass_lp24x2_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_iu_args,
+ 1, Iclass_ae_iclass_lp24x2_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_x_args,
+ 1, Iclass_ae_iclass_lp24x2_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_xu_args,
+ 1, Iclass_ae_iclass_lp24x2_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_i_args,
+ 1, Iclass_ae_iclass_sp16x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_iu_args,
+ 1, Iclass_ae_iclass_sp16x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_x_args,
+ 1, Iclass_ae_iclass_sp16x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_xu_args,
+ 1, Iclass_ae_iclass_sp16x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_i_args,
+ 1, Iclass_ae_iclass_sp24x2s_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_iu_args,
+ 1, Iclass_ae_iclass_sp24x2s_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_x_args,
+ 1, Iclass_ae_iclass_sp24x2s_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_xu_args,
+ 1, Iclass_ae_iclass_sp24x2s_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_i_args,
+ 1, Iclass_ae_iclass_sp24x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_iu_args,
+ 1, Iclass_ae_iclass_sp24x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_x_args,
+ 1, Iclass_ae_iclass_sp24x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_xu_args,
+ 1, Iclass_ae_iclass_sp24x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_i_args,
+ 1, Iclass_ae_iclass_sp16f_l_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_iu_args,
+ 1, Iclass_ae_iclass_sp16f_l_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_x_args,
+ 1, Iclass_ae_iclass_sp16f_l_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_xu_args,
+ 1, Iclass_ae_iclass_sp16f_l_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_i_args,
+ 1, Iclass_ae_iclass_sp24s_l_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_iu_args,
+ 1, Iclass_ae_iclass_sp24s_l_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_x_args,
+ 1, Iclass_ae_iclass_sp24s_l_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_xu_args,
+ 1, Iclass_ae_iclass_sp24s_l_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_i_args,
+ 1, Iclass_ae_iclass_sp24f_l_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_iu_args,
+ 1, Iclass_ae_iclass_sp24f_l_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_x_args,
+ 1, Iclass_ae_iclass_sp24f_l_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_xu_args,
+ 1, Iclass_ae_iclass_sp24f_l_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_i_args,
+ 1, Iclass_ae_iclass_lq56_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_iu_args,
+ 1, Iclass_ae_iclass_lq56_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_x_args,
+ 1, Iclass_ae_iclass_lq56_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_xu_args,
+ 1, Iclass_ae_iclass_lq56_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_i_args,
+ 1, Iclass_ae_iclass_lq32f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_iu_args,
+ 1, Iclass_ae_iclass_lq32f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_x_args,
+ 1, Iclass_ae_iclass_lq32f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_xu_args,
+ 1, Iclass_ae_iclass_lq32f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_i_args,
+ 1, Iclass_ae_iclass_sq56s_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_iu_args,
+ 1, Iclass_ae_iclass_sq56s_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_x_args,
+ 1, Iclass_ae_iclass_sq56s_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_xu_args,
+ 1, Iclass_ae_iclass_sq56s_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_i_args,
+ 1, Iclass_ae_iclass_sq32f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_iu_args,
+ 1, Iclass_ae_iclass_sq32f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_x_args,
+ 1, Iclass_ae_iclass_sq32f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_xu_args,
+ 1, Iclass_ae_iclass_sq32f_xu_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_zerop48_args,
+ 1, Iclass_ae_iclass_zerop48_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movp48_args,
+ 1, Iclass_ae_iclass_movp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_ll_args,
+ 1, Iclass_ae_iclass_selp24_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_lh_args,
+ 1, Iclass_ae_iclass_selp24_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_hl_args,
+ 1, Iclass_ae_iclass_selp24_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_hh_args,
+ 1, Iclass_ae_iclass_selp24_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movtp24x2_args,
+ 1, Iclass_ae_iclass_movtp24x2_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movfp24x2_args,
+ 1, Iclass_ae_iclass_movfp24x2_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movtp48_args,
+ 1, Iclass_ae_iclass_movtp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movfp48_args,
+ 1, Iclass_ae_iclass_movfp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movpa24x2_args,
+ 1, Iclass_ae_iclass_movpa24x2_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_truncp24a32x2_args,
+ 1, Iclass_ae_iclass_truncp24a32x2_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvta32p24_l_args,
+ 1, Iclass_ae_iclass_cvta32p24_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvta32p24_h_args,
+ 1, Iclass_ae_iclass_cvta32p24_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_ll_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_lh_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_hl_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_hh_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_truncp24q48x2_args,
+ 1, Iclass_ae_iclass_truncp24q48x2_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_truncp16_args,
+ 1, Iclass_ae_iclass_truncp16_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp24q48sym_args,
+ 2, Iclass_ae_iclass_roundsp24q48sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp24q48asym_args,
+ 2, Iclass_ae_iclass_roundsp24q48asym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16q48sym_args,
+ 2, Iclass_ae_iclass_roundsp16q48sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16q48asym_args,
+ 2, Iclass_ae_iclass_roundsp16q48asym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16sym_args,
+ 2, Iclass_ae_iclass_roundsp16sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16asym_args,
+ 2, Iclass_ae_iclass_roundsp16asym_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_zeroq56_args,
+ 1, Iclass_ae_iclass_zeroq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movq56_args,
+ 1, Iclass_ae_iclass_movq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movtq56_args,
+ 1, Iclass_ae_iclass_movtq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movfq56_args,
+ 1, Iclass_ae_iclass_movfq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvtq48a32s_args,
+ 1, Iclass_ae_iclass_cvtq48a32s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvtq48p24s_l_args,
+ 1, Iclass_ae_iclass_cvtq48p24s_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvtq48p24s_h_args,
+ 1, Iclass_ae_iclass_cvtq48p24s_h_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_satq48s_args,
+ 2, Iclass_ae_iclass_satq48s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_truncq32_args,
+ 1, Iclass_ae_iclass_truncq32_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsq32sym_args,
+ 2, Iclass_ae_iclass_roundsq32sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsq32asym_args,
+ 2, Iclass_ae_iclass_roundsq32asym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_trunca32q48_args,
+ 1, Iclass_ae_iclass_trunca32q48_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movap24s_l_args,
+ 1, Iclass_ae_iclass_movap24s_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movap24s_h_args,
+ 1, Iclass_ae_iclass_movap24s_h_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_trunca16p24s_l_args,
+ 1, Iclass_ae_iclass_trunca16p24s_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_trunca16p24s_h_args,
+ 1, Iclass_ae_iclass_trunca16p24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addp24_args,
+ 1, Iclass_ae_iclass_addp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subp24_args,
+ 1, Iclass_ae_iclass_subp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negp24_args,
+ 1, Iclass_ae_iclass_negp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_absp24_args,
+ 1, Iclass_ae_iclass_absp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_maxp24s_args,
+ 1, Iclass_ae_iclass_maxp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_minp24s_args,
+ 1, Iclass_ae_iclass_minp24s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_maxbp24s_args,
+ 1, Iclass_ae_iclass_maxbp24s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_minbp24s_args,
+ 1, Iclass_ae_iclass_minbp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addsp24s_args,
+ 2, Iclass_ae_iclass_addsp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subsp24s_args,
+ 2, Iclass_ae_iclass_subsp24s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negsp24s_args,
+ 2, Iclass_ae_iclass_negsp24s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_abssp24s_args,
+ 2, Iclass_ae_iclass_abssp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_andp48_args,
+ 1, Iclass_ae_iclass_andp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_nandp48_args,
+ 1, Iclass_ae_iclass_nandp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_orp48_args,
+ 1, Iclass_ae_iclass_orp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_xorp48_args,
+ 1, Iclass_ae_iclass_xorp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_ltp24s_args,
+ 1, Iclass_ae_iclass_ltp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lep24s_args,
+ 1, Iclass_ae_iclass_lep24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_eqp24_args,
+ 1, Iclass_ae_iclass_eqp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addq56_args,
+ 1, Iclass_ae_iclass_addq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subq56_args,
+ 1, Iclass_ae_iclass_subq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negq56_args,
+ 1, Iclass_ae_iclass_negq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_absq56_args,
+ 1, Iclass_ae_iclass_absq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_maxq56s_args,
+ 1, Iclass_ae_iclass_maxq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_minq56s_args,
+ 1, Iclass_ae_iclass_minq56s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_maxbq56s_args,
+ 1, Iclass_ae_iclass_maxbq56s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_minbq56s_args,
+ 1, Iclass_ae_iclass_minbq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addsq56s_args,
+ 2, Iclass_ae_iclass_addsq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subsq56s_args,
+ 2, Iclass_ae_iclass_subsq56s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negsq56s_args,
+ 2, Iclass_ae_iclass_negsq56s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_abssq56s_args,
+ 2, Iclass_ae_iclass_abssq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_andq56_args,
+ 1, Iclass_ae_iclass_andq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_nandq56_args,
+ 1, Iclass_ae_iclass_nandq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_orq56_args,
+ 1, Iclass_ae_iclass_orq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_xorq56_args,
+ 1, Iclass_ae_iclass_xorq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllip24_args,
+ 1, Iclass_ae_iclass_sllip24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_srlip24_args,
+ 1, Iclass_ae_iclass_srlip24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sraip24_args,
+ 1, Iclass_ae_iclass_sraip24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllsp24_args,
+ 2, Iclass_ae_iclass_sllsp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srlsp24_args,
+ 2, Iclass_ae_iclass_srlsp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srasp24_args,
+ 2, Iclass_ae_iclass_srasp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllisp24s_args,
+ 2, Iclass_ae_iclass_sllisp24s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllssp24s_args,
+ 3, Iclass_ae_iclass_sllssp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_slliq56_args,
+ 1, Iclass_ae_iclass_slliq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_srliq56_args,
+ 1, Iclass_ae_iclass_srliq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sraiq56_args,
+ 1, Iclass_ae_iclass_sraiq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllsq56_args,
+ 2, Iclass_ae_iclass_sllsq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srlsq56_args,
+ 2, Iclass_ae_iclass_srlsq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srasq56_args,
+ 2, Iclass_ae_iclass_srasq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllaq56_args,
+ 1, Iclass_ae_iclass_sllaq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_srlaq56_args,
+ 1, Iclass_ae_iclass_srlaq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sraaq56_args,
+ 1, Iclass_ae_iclass_sraaq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllisq56s_args,
+ 2, Iclass_ae_iclass_sllisq56s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllssq56s_args,
+ 3, Iclass_ae_iclass_sllssq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllasq56s_args,
+ 2, Iclass_ae_iclass_sllasq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_ltq56s_args,
+ 1, Iclass_ae_iclass_ltq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_leq56s_args,
+ 1, Iclass_ae_iclass_leq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_eqq56_args,
+ 1, Iclass_ae_iclass_eqq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_nsaq56s_args,
+ 1, Iclass_ae_iclass_nsaq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsrfq32sp24s_h_args,
+ 1, Iclass_ae_iclass_mulsrfq32sp24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsrfq32sp24s_l_args,
+ 1, Iclass_ae_iclass_mulsrfq32sp24s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mularfq32sp24s_h_args,
+ 1, Iclass_ae_iclass_mularfq32sp24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mularfq32sp24s_l_args,
+ 1, Iclass_ae_iclass_mularfq32sp24s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulrfq32sp24s_h_args,
+ 1, Iclass_ae_iclass_mulrfq32sp24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulrfq32sp24s_l_args,
+ 1, Iclass_ae_iclass_mulrfq32sp24s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp24s_h_args,
+ 1, Iclass_ae_iclass_mulsfq32sp24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp24s_l_args,
+ 1, Iclass_ae_iclass_mulsfq32sp24s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp24s_h_args,
+ 1, Iclass_ae_iclass_mulafq32sp24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp24s_l_args,
+ 1, Iclass_ae_iclass_mulafq32sp24s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp24s_h_args,
+ 1, Iclass_ae_iclass_mulfq32sp24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp24s_l_args,
+ 1, Iclass_ae_iclass_mulfq32sp24s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_ll_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_ll_args,
+ 1, Iclass_ae_iclass_mulfp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_ll_args,
+ 1, Iclass_ae_iclass_mulp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_lh_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_lh_args,
+ 1, Iclass_ae_iclass_mulfp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_lh_args,
+ 1, Iclass_ae_iclass_mulp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_hl_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_hl_args,
+ 1, Iclass_ae_iclass_mulfp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_hl_args,
+ 1, Iclass_ae_iclass_mulp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_hh_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_hh_args,
+ 1, Iclass_ae_iclass_mulfp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_hh_args,
+ 1, Iclass_ae_iclass_mulp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_ll_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_ll_args,
+ 1, Iclass_ae_iclass_mulafp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_ll_args,
+ 1, Iclass_ae_iclass_mulap24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_lh_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_lh_args,
+ 1, Iclass_ae_iclass_mulafp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_lh_args,
+ 1, Iclass_ae_iclass_mulap24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_hl_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_hl_args,
+ 1, Iclass_ae_iclass_mulafp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_hl_args,
+ 1, Iclass_ae_iclass_mulap24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_hh_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_hh_args,
+ 1, Iclass_ae_iclass_mulafp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_hh_args,
+ 1, Iclass_ae_iclass_mulap24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_ll_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_ll_args,
+ 1, Iclass_ae_iclass_mulsfp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_ll_args,
+ 1, Iclass_ae_iclass_mulsp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_lh_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_lh_args,
+ 1, Iclass_ae_iclass_mulsfp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_lh_args,
+ 1, Iclass_ae_iclass_mulsp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_hl_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_hl_args,
+ 1, Iclass_ae_iclass_mulsfp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_hl_args,
+ 1, Iclass_ae_iclass_mulsp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_hh_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_hh_args,
+ 1, Iclass_ae_iclass_mulsfp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_hh_args,
+ 1, Iclass_ae_iclass_mulsp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulas56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulas56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulas56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulas56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulss56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulss56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulss56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulss56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulfq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulfq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulfq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulfq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulafq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulafq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulafq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulafq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulaq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulaq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulaq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulaq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulsq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulsq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulsq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulsq32sp16u_h_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16u_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzaafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzaap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzaafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzaap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzasfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzasp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzasfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzasp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzsafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzsap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzsafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzsap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzssfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzssp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzssfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzssp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulaafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulaap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulaafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulaap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulasfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulasp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulasfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulasp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulsafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulsap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulsafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulsap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulssfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulssp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulssfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulssp24s_hl_lh_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sha32_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_ae_iclass_vldl32t_args,
+ 5, Iclass_ae_iclass_vldl32t_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_vldl16t_args,
+ 5, Iclass_ae_iclass_vldl16t_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_vldl16c_args,
+ 8, Iclass_ae_iclass_vldl16c_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_vldsht_args,
+ 6, Iclass_ae_iclass_vldsht_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_lb_args,
+ 3, Iclass_ae_iclass_lb_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_lbi_args,
+ 3, Iclass_ae_iclass_lbi_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lbk_args,
+ 3, Iclass_ae_iclass_lbk_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lbki_args,
+ 3, Iclass_ae_iclass_lbki_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_db_args,
+ 3, Iclass_ae_iclass_db_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_dbi_args,
+ 3, Iclass_ae_iclass_dbi_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_vlel32t_args,
+ 3, Iclass_ae_iclass_vlel32t_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_vlel16t_args,
+ 3, Iclass_ae_iclass_vlel16t_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sb_args,
+ 4, Iclass_ae_iclass_sb_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sbi_args,
+ 3, Iclass_ae_iclass_sbi_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_vles16c_args,
+ 5, Iclass_ae_iclass_vles16c_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_sbf_args,
+ 3, Iclass_ae_iclass_sbf_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SLAASQ56S_args,
+ 2, Iclass_icls_AE_SLAASQ56S_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_ADDBRBA32_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_icls_AE_MINABSSP24S_args,
+ 2, Iclass_icls_AE_MINABSSP24S_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_MAXABSSP24S_args,
+ 2, Iclass_icls_AE_MAXABSSP24S_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_MINABSSQ56S_args,
+ 2, Iclass_icls_AE_MINABSSQ56S_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_MAXABSSQ56S_args,
+ 2, Iclass_icls_AE_MAXABSSQ56S_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_cbegin0_args,
+ 2, Iclass_rur_ae_cbegin0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_cbegin0_args,
+ 2, Iclass_wur_ae_cbegin0_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_cend0_args,
+ 2, Iclass_rur_ae_cend0_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_cend0_args,
+ 2, Iclass_wur_ae_cend0_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LP24X2_C_args,
+ 3, Iclass_icls_AE_LP24X2_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SP24X2S_C_args,
+ 3, Iclass_icls_AE_SP24X2S_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LP24X2F_C_args,
+ 3, Iclass_icls_AE_LP24X2F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SP24X2F_C_args,
+ 3, Iclass_icls_AE_SP24X2F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LP16X2F_C_args,
+ 3, Iclass_icls_AE_LP16X2F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SP16X2F_C_args,
+ 3, Iclass_icls_AE_SP16X2F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LP24_C_args,
+ 3, Iclass_icls_AE_LP24_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SP24S_L_C_args,
+ 3, Iclass_icls_AE_SP24S_L_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LP24F_C_args,
+ 3, Iclass_icls_AE_LP24F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SP24F_L_C_args,
+ 3, Iclass_icls_AE_SP24F_L_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LP16F_C_args,
+ 3, Iclass_icls_AE_LP16F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SP16F_L_C_args,
+ 3, Iclass_icls_AE_SP16F_L_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LQ56_C_args,
+ 3, Iclass_icls_AE_LQ56_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SQ56S_C_args,
+ 3, Iclass_icls_AE_SQ56S_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_LQ32F_C_args,
+ 3, Iclass_icls_AE_LQ32F_C_stateArgs, 0, 0 },
+ { 3, Iclass_icls_AE_SQ32F_C_args,
+ 3, Iclass_icls_AE_SQ32F_C_stateArgs, 0, 0 },
+ { 1, Iclass_rur_expstate_args,
+ 2, Iclass_rur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_wur_expstate_args,
+ 2, Iclass_wur_expstate_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_READ_IMPWIRE_args,
+ 1, Iclass_iclass_READ_IMPWIRE_stateArgs, 1, Iclass_iclass_READ_IMPWIRE_intfArgs },
+ { 1, Iclass_iclass_SETB_EXPSTATE_args,
+ 2, Iclass_iclass_SETB_EXPSTATE_stateArgs, 0, 0 },
+ { 1, Iclass_iclass_CLRB_EXPSTATE_args,
+ 2, Iclass_iclass_CLRB_EXPSTATE_stateArgs, 0, 0 },
+ { 2, Iclass_iclass_WRMSK_EXPSTATE_args,
+ 2, Iclass_iclass_WRMSK_EXPSTATE_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_rur_threadptr,
+ ICLASS_wur_threadptr,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_loop,
+ ICLASS_xt_iclass_loopz,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s32nb,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_lend,
+ ICLASS_xt_iclass_wsr_lend,
+ ICLASS_xt_iclass_xsr_lend,
+ ICLASS_xt_iclass_rsr_lcount,
+ ICLASS_xt_iclass_wsr_lcount,
+ ICLASS_xt_iclass_xsr_lcount,
+ ICLASS_xt_iclass_rsr_lbeg,
+ ICLASS_xt_iclass_wsr_lbeg,
+ ICLASS_xt_iclass_xsr_lbeg,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_memctl,
+ ICLASS_xt_iclass_wsr_memctl,
+ ICLASS_xt_iclass_xsr_memctl,
+ ICLASS_xt_iclass_rsr_litbase,
+ ICLASS_xt_iclass_wsr_litbase,
+ ICLASS_xt_iclass_xsr_litbase,
+ ICLASS_xt_iclass_rsr_configid0,
+ ICLASS_xt_iclass_wsr_configid0,
+ ICLASS_xt_iclass_rsr_configid1,
+ ICLASS_xt_iclass_rsr_243,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_epc3,
+ ICLASS_xt_iclass_wsr_epc3,
+ ICLASS_xt_iclass_xsr_epc3,
+ ICLASS_xt_iclass_rsr_excsave3,
+ ICLASS_xt_iclass_wsr_excsave3,
+ ICLASS_xt_iclass_xsr_excsave3,
+ ICLASS_xt_iclass_rsr_epc4,
+ ICLASS_xt_iclass_wsr_epc4,
+ ICLASS_xt_iclass_xsr_epc4,
+ ICLASS_xt_iclass_rsr_excsave4,
+ ICLASS_xt_iclass_wsr_excsave4,
+ ICLASS_xt_iclass_xsr_excsave4,
+ ICLASS_xt_iclass_rsr_epc5,
+ ICLASS_xt_iclass_wsr_epc5,
+ ICLASS_xt_iclass_xsr_epc5,
+ ICLASS_xt_iclass_rsr_excsave5,
+ ICLASS_xt_iclass_wsr_excsave5,
+ ICLASS_xt_iclass_xsr_excsave5,
+ ICLASS_xt_iclass_rsr_epc6,
+ ICLASS_xt_iclass_wsr_epc6,
+ ICLASS_xt_iclass_xsr_epc6,
+ ICLASS_xt_iclass_rsr_excsave6,
+ ICLASS_xt_iclass_wsr_excsave6,
+ ICLASS_xt_iclass_xsr_excsave6,
+ ICLASS_xt_iclass_rsr_epc7,
+ ICLASS_xt_iclass_wsr_epc7,
+ ICLASS_xt_iclass_xsr_epc7,
+ ICLASS_xt_iclass_rsr_excsave7,
+ ICLASS_xt_iclass_wsr_excsave7,
+ ICLASS_xt_iclass_xsr_excsave7,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_eps3,
+ ICLASS_xt_iclass_wsr_eps3,
+ ICLASS_xt_iclass_xsr_eps3,
+ ICLASS_xt_iclass_rsr_eps4,
+ ICLASS_xt_iclass_wsr_eps4,
+ ICLASS_xt_iclass_xsr_eps4,
+ ICLASS_xt_iclass_rsr_eps5,
+ ICLASS_xt_iclass_wsr_eps5,
+ ICLASS_xt_iclass_xsr_eps5,
+ ICLASS_xt_iclass_rsr_eps6,
+ ICLASS_xt_iclass_wsr_eps6,
+ ICLASS_xt_iclass_xsr_eps6,
+ ICLASS_xt_iclass_rsr_eps7,
+ ICLASS_xt_iclass_wsr_eps7,
+ ICLASS_xt_iclass_xsr_eps7,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_misc0,
+ ICLASS_xt_iclass_wsr_misc0,
+ ICLASS_xt_iclass_xsr_misc0,
+ ICLASS_xt_iclass_rsr_misc1,
+ ICLASS_xt_iclass_wsr_misc1,
+ ICLASS_xt_iclass_xsr_misc1,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_mul16,
+ ICLASS_xt_mul32,
+ ICLASS_xt_mul32h,
+ ICLASS_xt_iclass_mac16_aa,
+ ICLASS_xt_iclass_mac16_ad,
+ ICLASS_xt_iclass_mac16_da,
+ ICLASS_xt_iclass_mac16_dd,
+ ICLASS_xt_iclass_mac16a_aa,
+ ICLASS_xt_iclass_mac16a_ad,
+ ICLASS_xt_iclass_mac16a_da,
+ ICLASS_xt_iclass_mac16a_dd,
+ ICLASS_xt_iclass_mac16al_da,
+ ICLASS_xt_iclass_mac16al_dd,
+ ICLASS_xt_iclass_mac16_l,
+ ICLASS_xt_iclass_rsr_m0,
+ ICLASS_xt_iclass_wsr_m0,
+ ICLASS_xt_iclass_xsr_m0,
+ ICLASS_xt_iclass_rsr_m1,
+ ICLASS_xt_iclass_wsr_m1,
+ ICLASS_xt_iclass_xsr_m1,
+ ICLASS_xt_iclass_rsr_m2,
+ ICLASS_xt_iclass_wsr_m2,
+ ICLASS_xt_iclass_xsr_m2,
+ ICLASS_xt_iclass_rsr_m3,
+ ICLASS_xt_iclass_wsr_m3,
+ ICLASS_xt_iclass_xsr_m3,
+ ICLASS_xt_iclass_rsr_acclo,
+ ICLASS_xt_iclass_wsr_acclo,
+ ICLASS_xt_iclass_xsr_acclo,
+ ICLASS_xt_iclass_rsr_acchi,
+ ICLASS_xt_iclass_wsr_acchi,
+ ICLASS_xt_iclass_xsr_acchi,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_dbreaka0,
+ ICLASS_xt_iclass_wsr_dbreaka0,
+ ICLASS_xt_iclass_xsr_dbreaka0,
+ ICLASS_xt_iclass_rsr_dbreakc0,
+ ICLASS_xt_iclass_wsr_dbreakc0,
+ ICLASS_xt_iclass_xsr_dbreakc0,
+ ICLASS_xt_iclass_rsr_dbreaka1,
+ ICLASS_xt_iclass_wsr_dbreaka1,
+ ICLASS_xt_iclass_xsr_dbreaka1,
+ ICLASS_xt_iclass_rsr_dbreakc1,
+ ICLASS_xt_iclass_wsr_dbreakc1,
+ ICLASS_xt_iclass_xsr_dbreakc1,
+ ICLASS_xt_iclass_rsr_ibreaka0,
+ ICLASS_xt_iclass_wsr_ibreaka0,
+ ICLASS_xt_iclass_xsr_ibreaka0,
+ ICLASS_xt_iclass_rsr_ibreaka1,
+ ICLASS_xt_iclass_wsr_ibreaka1,
+ ICLASS_xt_iclass_xsr_ibreaka1,
+ ICLASS_xt_iclass_rsr_ibreakenable,
+ ICLASS_xt_iclass_wsr_ibreakenable,
+ ICLASS_xt_iclass_xsr_ibreakenable,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_lddr32_p,
+ ICLASS_xt_iclass_sddr32_p,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_wsr_mmid,
+ ICLASS_xt_iclass_bbool1,
+ ICLASS_xt_iclass_bbool4,
+ ICLASS_xt_iclass_bbool8,
+ ICLASS_xt_iclass_bbranch,
+ ICLASS_xt_iclass_bmove,
+ ICLASS_xt_iclass_RSR_BR,
+ ICLASS_xt_iclass_WSR_BR,
+ ICLASS_xt_iclass_XSR_BR,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_rsr_ccompare2,
+ ICLASS_xt_iclass_wsr_ccompare2,
+ ICLASS_xt_iclass_xsr_ccompare2,
+ ICLASS_xt_iclass_icache,
+ ICLASS_xt_iclass_icache_lock,
+ ICLASS_xt_iclass_icache_inv,
+ ICLASS_xt_iclass_licx,
+ ICLASS_xt_iclass_sicx,
+ ICLASS_xt_iclass_dcache,
+ ICLASS_xt_iclass_dcache_dyn,
+ ICLASS_xt_iclass_dcache_ind,
+ ICLASS_xt_iclass_dcache_inv,
+ ICLASS_xt_iclass_dpf,
+ ICLASS_xt_iclass_dcache_lock,
+ ICLASS_xt_iclass_sdct,
+ ICLASS_xt_iclass_ldct,
+ ICLASS_xt_iclass_rsr_prefctl,
+ ICLASS_xt_iclass_wsr_prefctl,
+ ICLASS_xt_iclass_xsr_prefctl,
+ ICLASS_xt_iclass_wsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_ptevaddr,
+ ICLASS_xt_iclass_xsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_rasid,
+ ICLASS_xt_iclass_wsr_rasid,
+ ICLASS_xt_iclass_xsr_rasid,
+ ICLASS_xt_iclass_rsr_itlbcfg,
+ ICLASS_xt_iclass_wsr_itlbcfg,
+ ICLASS_xt_iclass_xsr_itlbcfg,
+ ICLASS_xt_iclass_rsr_dtlbcfg,
+ ICLASS_xt_iclass_wsr_dtlbcfg,
+ ICLASS_xt_iclass_xsr_dtlbcfg,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_ldpte,
+ ICLASS_xt_iclass_hwwitlba,
+ ICLASS_xt_iclass_hwwdtlba,
+ ICLASS_xt_iclass_rsr_cpenable,
+ ICLASS_xt_iclass_wsr_cpenable,
+ ICLASS_xt_iclass_xsr_cpenable,
+ ICLASS_xt_iclass_clamp,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_div,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_ae_ovf_sar,
+ ICLASS_wur_ae_ovf_sar,
+ ICLASS_rur_ae_bithead,
+ ICLASS_wur_ae_bithead,
+ ICLASS_rur_ae_ts_fts_bu_bp,
+ ICLASS_wur_ae_ts_fts_bu_bp,
+ ICLASS_rur_ae_sd_no,
+ ICLASS_wur_ae_sd_no,
+ ICLASS_ae_iclass_rur_ae_overflow,
+ ICLASS_ae_iclass_wur_ae_overflow,
+ ICLASS_ae_iclass_rur_ae_sar,
+ ICLASS_ae_iclass_wur_ae_sar,
+ ICLASS_ae_iclass_rur_ae_bitptr,
+ ICLASS_ae_iclass_wur_ae_bitptr,
+ ICLASS_ae_iclass_rur_ae_bitsused,
+ ICLASS_ae_iclass_wur_ae_bitsused,
+ ICLASS_ae_iclass_rur_ae_tablesize,
+ ICLASS_ae_iclass_wur_ae_tablesize,
+ ICLASS_ae_iclass_rur_ae_first_ts,
+ ICLASS_ae_iclass_wur_ae_first_ts,
+ ICLASS_ae_iclass_rur_ae_nextoffset,
+ ICLASS_ae_iclass_wur_ae_nextoffset,
+ ICLASS_ae_iclass_rur_ae_searchdone,
+ ICLASS_ae_iclass_wur_ae_searchdone,
+ ICLASS_ae_iclass_lp16f_i,
+ ICLASS_ae_iclass_lp16f_iu,
+ ICLASS_ae_iclass_lp16f_x,
+ ICLASS_ae_iclass_lp16f_xu,
+ ICLASS_ae_iclass_lp24_i,
+ ICLASS_ae_iclass_lp24_iu,
+ ICLASS_ae_iclass_lp24_x,
+ ICLASS_ae_iclass_lp24_xu,
+ ICLASS_ae_iclass_lp24f_i,
+ ICLASS_ae_iclass_lp24f_iu,
+ ICLASS_ae_iclass_lp24f_x,
+ ICLASS_ae_iclass_lp24f_xu,
+ ICLASS_ae_iclass_lp16x2f_i,
+ ICLASS_ae_iclass_lp16x2f_iu,
+ ICLASS_ae_iclass_lp16x2f_x,
+ ICLASS_ae_iclass_lp16x2f_xu,
+ ICLASS_ae_iclass_lp24x2f_i,
+ ICLASS_ae_iclass_lp24x2f_iu,
+ ICLASS_ae_iclass_lp24x2f_x,
+ ICLASS_ae_iclass_lp24x2f_xu,
+ ICLASS_ae_iclass_lp24x2_i,
+ ICLASS_ae_iclass_lp24x2_iu,
+ ICLASS_ae_iclass_lp24x2_x,
+ ICLASS_ae_iclass_lp24x2_xu,
+ ICLASS_ae_iclass_sp16x2f_i,
+ ICLASS_ae_iclass_sp16x2f_iu,
+ ICLASS_ae_iclass_sp16x2f_x,
+ ICLASS_ae_iclass_sp16x2f_xu,
+ ICLASS_ae_iclass_sp24x2s_i,
+ ICLASS_ae_iclass_sp24x2s_iu,
+ ICLASS_ae_iclass_sp24x2s_x,
+ ICLASS_ae_iclass_sp24x2s_xu,
+ ICLASS_ae_iclass_sp24x2f_i,
+ ICLASS_ae_iclass_sp24x2f_iu,
+ ICLASS_ae_iclass_sp24x2f_x,
+ ICLASS_ae_iclass_sp24x2f_xu,
+ ICLASS_ae_iclass_sp16f_l_i,
+ ICLASS_ae_iclass_sp16f_l_iu,
+ ICLASS_ae_iclass_sp16f_l_x,
+ ICLASS_ae_iclass_sp16f_l_xu,
+ ICLASS_ae_iclass_sp24s_l_i,
+ ICLASS_ae_iclass_sp24s_l_iu,
+ ICLASS_ae_iclass_sp24s_l_x,
+ ICLASS_ae_iclass_sp24s_l_xu,
+ ICLASS_ae_iclass_sp24f_l_i,
+ ICLASS_ae_iclass_sp24f_l_iu,
+ ICLASS_ae_iclass_sp24f_l_x,
+ ICLASS_ae_iclass_sp24f_l_xu,
+ ICLASS_ae_iclass_lq56_i,
+ ICLASS_ae_iclass_lq56_iu,
+ ICLASS_ae_iclass_lq56_x,
+ ICLASS_ae_iclass_lq56_xu,
+ ICLASS_ae_iclass_lq32f_i,
+ ICLASS_ae_iclass_lq32f_iu,
+ ICLASS_ae_iclass_lq32f_x,
+ ICLASS_ae_iclass_lq32f_xu,
+ ICLASS_ae_iclass_sq56s_i,
+ ICLASS_ae_iclass_sq56s_iu,
+ ICLASS_ae_iclass_sq56s_x,
+ ICLASS_ae_iclass_sq56s_xu,
+ ICLASS_ae_iclass_sq32f_i,
+ ICLASS_ae_iclass_sq32f_iu,
+ ICLASS_ae_iclass_sq32f_x,
+ ICLASS_ae_iclass_sq32f_xu,
+ ICLASS_ae_iclass_zerop48,
+ ICLASS_ae_iclass_movp48,
+ ICLASS_ae_iclass_selp24_ll,
+ ICLASS_ae_iclass_selp24_lh,
+ ICLASS_ae_iclass_selp24_hl,
+ ICLASS_ae_iclass_selp24_hh,
+ ICLASS_ae_iclass_movtp24x2,
+ ICLASS_ae_iclass_movfp24x2,
+ ICLASS_ae_iclass_movtp48,
+ ICLASS_ae_iclass_movfp48,
+ ICLASS_ae_iclass_movpa24x2,
+ ICLASS_ae_iclass_truncp24a32x2,
+ ICLASS_ae_iclass_cvta32p24_l,
+ ICLASS_ae_iclass_cvta32p24_h,
+ ICLASS_ae_iclass_cvtp24a16x2_ll,
+ ICLASS_ae_iclass_cvtp24a16x2_lh,
+ ICLASS_ae_iclass_cvtp24a16x2_hl,
+ ICLASS_ae_iclass_cvtp24a16x2_hh,
+ ICLASS_ae_iclass_truncp24q48x2,
+ ICLASS_ae_iclass_truncp16,
+ ICLASS_ae_iclass_roundsp24q48sym,
+ ICLASS_ae_iclass_roundsp24q48asym,
+ ICLASS_ae_iclass_roundsp16q48sym,
+ ICLASS_ae_iclass_roundsp16q48asym,
+ ICLASS_ae_iclass_roundsp16sym,
+ ICLASS_ae_iclass_roundsp16asym,
+ ICLASS_ae_iclass_zeroq56,
+ ICLASS_ae_iclass_movq56,
+ ICLASS_ae_iclass_movtq56,
+ ICLASS_ae_iclass_movfq56,
+ ICLASS_ae_iclass_cvtq48a32s,
+ ICLASS_ae_iclass_cvtq48p24s_l,
+ ICLASS_ae_iclass_cvtq48p24s_h,
+ ICLASS_ae_iclass_satq48s,
+ ICLASS_ae_iclass_truncq32,
+ ICLASS_ae_iclass_roundsq32sym,
+ ICLASS_ae_iclass_roundsq32asym,
+ ICLASS_ae_iclass_trunca32q48,
+ ICLASS_ae_iclass_movap24s_l,
+ ICLASS_ae_iclass_movap24s_h,
+ ICLASS_ae_iclass_trunca16p24s_l,
+ ICLASS_ae_iclass_trunca16p24s_h,
+ ICLASS_ae_iclass_addp24,
+ ICLASS_ae_iclass_subp24,
+ ICLASS_ae_iclass_negp24,
+ ICLASS_ae_iclass_absp24,
+ ICLASS_ae_iclass_maxp24s,
+ ICLASS_ae_iclass_minp24s,
+ ICLASS_ae_iclass_maxbp24s,
+ ICLASS_ae_iclass_minbp24s,
+ ICLASS_ae_iclass_addsp24s,
+ ICLASS_ae_iclass_subsp24s,
+ ICLASS_ae_iclass_negsp24s,
+ ICLASS_ae_iclass_abssp24s,
+ ICLASS_ae_iclass_andp48,
+ ICLASS_ae_iclass_nandp48,
+ ICLASS_ae_iclass_orp48,
+ ICLASS_ae_iclass_xorp48,
+ ICLASS_ae_iclass_ltp24s,
+ ICLASS_ae_iclass_lep24s,
+ ICLASS_ae_iclass_eqp24,
+ ICLASS_ae_iclass_addq56,
+ ICLASS_ae_iclass_subq56,
+ ICLASS_ae_iclass_negq56,
+ ICLASS_ae_iclass_absq56,
+ ICLASS_ae_iclass_maxq56s,
+ ICLASS_ae_iclass_minq56s,
+ ICLASS_ae_iclass_maxbq56s,
+ ICLASS_ae_iclass_minbq56s,
+ ICLASS_ae_iclass_addsq56s,
+ ICLASS_ae_iclass_subsq56s,
+ ICLASS_ae_iclass_negsq56s,
+ ICLASS_ae_iclass_abssq56s,
+ ICLASS_ae_iclass_andq56,
+ ICLASS_ae_iclass_nandq56,
+ ICLASS_ae_iclass_orq56,
+ ICLASS_ae_iclass_xorq56,
+ ICLASS_ae_iclass_sllip24,
+ ICLASS_ae_iclass_srlip24,
+ ICLASS_ae_iclass_sraip24,
+ ICLASS_ae_iclass_sllsp24,
+ ICLASS_ae_iclass_srlsp24,
+ ICLASS_ae_iclass_srasp24,
+ ICLASS_ae_iclass_sllisp24s,
+ ICLASS_ae_iclass_sllssp24s,
+ ICLASS_ae_iclass_slliq56,
+ ICLASS_ae_iclass_srliq56,
+ ICLASS_ae_iclass_sraiq56,
+ ICLASS_ae_iclass_sllsq56,
+ ICLASS_ae_iclass_srlsq56,
+ ICLASS_ae_iclass_srasq56,
+ ICLASS_ae_iclass_sllaq56,
+ ICLASS_ae_iclass_srlaq56,
+ ICLASS_ae_iclass_sraaq56,
+ ICLASS_ae_iclass_sllisq56s,
+ ICLASS_ae_iclass_sllssq56s,
+ ICLASS_ae_iclass_sllasq56s,
+ ICLASS_ae_iclass_ltq56s,
+ ICLASS_ae_iclass_leq56s,
+ ICLASS_ae_iclass_eqq56,
+ ICLASS_ae_iclass_nsaq56s,
+ ICLASS_ae_iclass_mulsrfq32sp24s_h,
+ ICLASS_ae_iclass_mulsrfq32sp24s_l,
+ ICLASS_ae_iclass_mularfq32sp24s_h,
+ ICLASS_ae_iclass_mularfq32sp24s_l,
+ ICLASS_ae_iclass_mulrfq32sp24s_h,
+ ICLASS_ae_iclass_mulrfq32sp24s_l,
+ ICLASS_ae_iclass_mulsfq32sp24s_h,
+ ICLASS_ae_iclass_mulsfq32sp24s_l,
+ ICLASS_ae_iclass_mulafq32sp24s_h,
+ ICLASS_ae_iclass_mulafq32sp24s_l,
+ ICLASS_ae_iclass_mulfq32sp24s_h,
+ ICLASS_ae_iclass_mulfq32sp24s_l,
+ ICLASS_ae_iclass_mulfs32p16s_ll,
+ ICLASS_ae_iclass_mulfp24s_ll,
+ ICLASS_ae_iclass_mulp24s_ll,
+ ICLASS_ae_iclass_mulfs32p16s_lh,
+ ICLASS_ae_iclass_mulfp24s_lh,
+ ICLASS_ae_iclass_mulp24s_lh,
+ ICLASS_ae_iclass_mulfs32p16s_hl,
+ ICLASS_ae_iclass_mulfp24s_hl,
+ ICLASS_ae_iclass_mulp24s_hl,
+ ICLASS_ae_iclass_mulfs32p16s_hh,
+ ICLASS_ae_iclass_mulfp24s_hh,
+ ICLASS_ae_iclass_mulp24s_hh,
+ ICLASS_ae_iclass_mulafs32p16s_ll,
+ ICLASS_ae_iclass_mulafp24s_ll,
+ ICLASS_ae_iclass_mulap24s_ll,
+ ICLASS_ae_iclass_mulafs32p16s_lh,
+ ICLASS_ae_iclass_mulafp24s_lh,
+ ICLASS_ae_iclass_mulap24s_lh,
+ ICLASS_ae_iclass_mulafs32p16s_hl,
+ ICLASS_ae_iclass_mulafp24s_hl,
+ ICLASS_ae_iclass_mulap24s_hl,
+ ICLASS_ae_iclass_mulafs32p16s_hh,
+ ICLASS_ae_iclass_mulafp24s_hh,
+ ICLASS_ae_iclass_mulap24s_hh,
+ ICLASS_ae_iclass_mulsfs32p16s_ll,
+ ICLASS_ae_iclass_mulsfp24s_ll,
+ ICLASS_ae_iclass_mulsp24s_ll,
+ ICLASS_ae_iclass_mulsfs32p16s_lh,
+ ICLASS_ae_iclass_mulsfp24s_lh,
+ ICLASS_ae_iclass_mulsp24s_lh,
+ ICLASS_ae_iclass_mulsfs32p16s_hl,
+ ICLASS_ae_iclass_mulsfp24s_hl,
+ ICLASS_ae_iclass_mulsp24s_hl,
+ ICLASS_ae_iclass_mulsfs32p16s_hh,
+ ICLASS_ae_iclass_mulsfp24s_hh,
+ ICLASS_ae_iclass_mulsp24s_hh,
+ ICLASS_ae_iclass_mulafs56p24s_ll,
+ ICLASS_ae_iclass_mulas56p24s_ll,
+ ICLASS_ae_iclass_mulafs56p24s_lh,
+ ICLASS_ae_iclass_mulas56p24s_lh,
+ ICLASS_ae_iclass_mulafs56p24s_hl,
+ ICLASS_ae_iclass_mulas56p24s_hl,
+ ICLASS_ae_iclass_mulafs56p24s_hh,
+ ICLASS_ae_iclass_mulas56p24s_hh,
+ ICLASS_ae_iclass_mulsfs56p24s_ll,
+ ICLASS_ae_iclass_mulss56p24s_ll,
+ ICLASS_ae_iclass_mulsfs56p24s_lh,
+ ICLASS_ae_iclass_mulss56p24s_lh,
+ ICLASS_ae_iclass_mulsfs56p24s_hl,
+ ICLASS_ae_iclass_mulss56p24s_hl,
+ ICLASS_ae_iclass_mulsfs56p24s_hh,
+ ICLASS_ae_iclass_mulss56p24s_hh,
+ ICLASS_ae_iclass_mulfq32sp16s_l,
+ ICLASS_ae_iclass_mulfq32sp16s_h,
+ ICLASS_ae_iclass_mulfq32sp16u_l,
+ ICLASS_ae_iclass_mulfq32sp16u_h,
+ ICLASS_ae_iclass_mulq32sp16s_l,
+ ICLASS_ae_iclass_mulq32sp16s_h,
+ ICLASS_ae_iclass_mulq32sp16u_l,
+ ICLASS_ae_iclass_mulq32sp16u_h,
+ ICLASS_ae_iclass_mulafq32sp16s_l,
+ ICLASS_ae_iclass_mulafq32sp16s_h,
+ ICLASS_ae_iclass_mulafq32sp16u_l,
+ ICLASS_ae_iclass_mulafq32sp16u_h,
+ ICLASS_ae_iclass_mulaq32sp16s_l,
+ ICLASS_ae_iclass_mulaq32sp16s_h,
+ ICLASS_ae_iclass_mulaq32sp16u_l,
+ ICLASS_ae_iclass_mulaq32sp16u_h,
+ ICLASS_ae_iclass_mulsfq32sp16s_l,
+ ICLASS_ae_iclass_mulsfq32sp16s_h,
+ ICLASS_ae_iclass_mulsfq32sp16u_l,
+ ICLASS_ae_iclass_mulsfq32sp16u_h,
+ ICLASS_ae_iclass_mulsq32sp16s_l,
+ ICLASS_ae_iclass_mulsq32sp16s_h,
+ ICLASS_ae_iclass_mulsq32sp16u_l,
+ ICLASS_ae_iclass_mulsq32sp16u_h,
+ ICLASS_ae_iclass_mulzaaq32sp16s_ll,
+ ICLASS_ae_iclass_mulzaafq32sp16s_ll,
+ ICLASS_ae_iclass_mulzaaq32sp16u_ll,
+ ICLASS_ae_iclass_mulzaafq32sp16u_ll,
+ ICLASS_ae_iclass_mulzaaq32sp16s_hh,
+ ICLASS_ae_iclass_mulzaafq32sp16s_hh,
+ ICLASS_ae_iclass_mulzaaq32sp16u_hh,
+ ICLASS_ae_iclass_mulzaafq32sp16u_hh,
+ ICLASS_ae_iclass_mulzaaq32sp16s_lh,
+ ICLASS_ae_iclass_mulzaafq32sp16s_lh,
+ ICLASS_ae_iclass_mulzaaq32sp16u_lh,
+ ICLASS_ae_iclass_mulzaafq32sp16u_lh,
+ ICLASS_ae_iclass_mulzasq32sp16s_ll,
+ ICLASS_ae_iclass_mulzasfq32sp16s_ll,
+ ICLASS_ae_iclass_mulzasq32sp16u_ll,
+ ICLASS_ae_iclass_mulzasfq32sp16u_ll,
+ ICLASS_ae_iclass_mulzasq32sp16s_hh,
+ ICLASS_ae_iclass_mulzasfq32sp16s_hh,
+ ICLASS_ae_iclass_mulzasq32sp16u_hh,
+ ICLASS_ae_iclass_mulzasfq32sp16u_hh,
+ ICLASS_ae_iclass_mulzasq32sp16s_lh,
+ ICLASS_ae_iclass_mulzasfq32sp16s_lh,
+ ICLASS_ae_iclass_mulzasq32sp16u_lh,
+ ICLASS_ae_iclass_mulzasfq32sp16u_lh,
+ ICLASS_ae_iclass_mulzsaq32sp16s_ll,
+ ICLASS_ae_iclass_mulzsafq32sp16s_ll,
+ ICLASS_ae_iclass_mulzsaq32sp16u_ll,
+ ICLASS_ae_iclass_mulzsafq32sp16u_ll,
+ ICLASS_ae_iclass_mulzsaq32sp16s_hh,
+ ICLASS_ae_iclass_mulzsafq32sp16s_hh,
+ ICLASS_ae_iclass_mulzsaq32sp16u_hh,
+ ICLASS_ae_iclass_mulzsafq32sp16u_hh,
+ ICLASS_ae_iclass_mulzsaq32sp16s_lh,
+ ICLASS_ae_iclass_mulzsafq32sp16s_lh,
+ ICLASS_ae_iclass_mulzsaq32sp16u_lh,
+ ICLASS_ae_iclass_mulzsafq32sp16u_lh,
+ ICLASS_ae_iclass_mulzssq32sp16s_ll,
+ ICLASS_ae_iclass_mulzssfq32sp16s_ll,
+ ICLASS_ae_iclass_mulzssq32sp16u_ll,
+ ICLASS_ae_iclass_mulzssfq32sp16u_ll,
+ ICLASS_ae_iclass_mulzssq32sp16s_hh,
+ ICLASS_ae_iclass_mulzssfq32sp16s_hh,
+ ICLASS_ae_iclass_mulzssq32sp16u_hh,
+ ICLASS_ae_iclass_mulzssfq32sp16u_hh,
+ ICLASS_ae_iclass_mulzssq32sp16s_lh,
+ ICLASS_ae_iclass_mulzssfq32sp16s_lh,
+ ICLASS_ae_iclass_mulzssq32sp16u_lh,
+ ICLASS_ae_iclass_mulzssfq32sp16u_lh,
+ ICLASS_ae_iclass_mulzaafp24s_hh_ll,
+ ICLASS_ae_iclass_mulzaap24s_hh_ll,
+ ICLASS_ae_iclass_mulzaafp24s_hl_lh,
+ ICLASS_ae_iclass_mulzaap24s_hl_lh,
+ ICLASS_ae_iclass_mulzasfp24s_hh_ll,
+ ICLASS_ae_iclass_mulzasp24s_hh_ll,
+ ICLASS_ae_iclass_mulzasfp24s_hl_lh,
+ ICLASS_ae_iclass_mulzasp24s_hl_lh,
+ ICLASS_ae_iclass_mulzsafp24s_hh_ll,
+ ICLASS_ae_iclass_mulzsap24s_hh_ll,
+ ICLASS_ae_iclass_mulzsafp24s_hl_lh,
+ ICLASS_ae_iclass_mulzsap24s_hl_lh,
+ ICLASS_ae_iclass_mulzssfp24s_hh_ll,
+ ICLASS_ae_iclass_mulzssp24s_hh_ll,
+ ICLASS_ae_iclass_mulzssfp24s_hl_lh,
+ ICLASS_ae_iclass_mulzssp24s_hl_lh,
+ ICLASS_ae_iclass_mulaafp24s_hh_ll,
+ ICLASS_ae_iclass_mulaap24s_hh_ll,
+ ICLASS_ae_iclass_mulaafp24s_hl_lh,
+ ICLASS_ae_iclass_mulaap24s_hl_lh,
+ ICLASS_ae_iclass_mulasfp24s_hh_ll,
+ ICLASS_ae_iclass_mulasp24s_hh_ll,
+ ICLASS_ae_iclass_mulasfp24s_hl_lh,
+ ICLASS_ae_iclass_mulasp24s_hl_lh,
+ ICLASS_ae_iclass_mulsafp24s_hh_ll,
+ ICLASS_ae_iclass_mulsap24s_hh_ll,
+ ICLASS_ae_iclass_mulsafp24s_hl_lh,
+ ICLASS_ae_iclass_mulsap24s_hl_lh,
+ ICLASS_ae_iclass_mulssfp24s_hh_ll,
+ ICLASS_ae_iclass_mulssp24s_hh_ll,
+ ICLASS_ae_iclass_mulssfp24s_hl_lh,
+ ICLASS_ae_iclass_mulssp24s_hl_lh,
+ ICLASS_ae_iclass_sha32,
+ ICLASS_ae_iclass_vldl32t,
+ ICLASS_ae_iclass_vldl16t,
+ ICLASS_ae_iclass_vldl16c,
+ ICLASS_ae_iclass_vldsht,
+ ICLASS_ae_iclass_lb,
+ ICLASS_ae_iclass_lbi,
+ ICLASS_ae_iclass_lbk,
+ ICLASS_ae_iclass_lbki,
+ ICLASS_ae_iclass_db,
+ ICLASS_ae_iclass_dbi,
+ ICLASS_ae_iclass_vlel32t,
+ ICLASS_ae_iclass_vlel16t,
+ ICLASS_ae_iclass_sb,
+ ICLASS_ae_iclass_sbi,
+ ICLASS_ae_iclass_vles16c,
+ ICLASS_ae_iclass_sbf,
+ ICLASS_icls_AE_SLAASQ56S,
+ ICLASS_icls_AE_ADDBRBA32,
+ ICLASS_icls_AE_MINABSSP24S,
+ ICLASS_icls_AE_MAXABSSP24S,
+ ICLASS_icls_AE_MINABSSQ56S,
+ ICLASS_icls_AE_MAXABSSQ56S,
+ ICLASS_rur_ae_cbegin0,
+ ICLASS_wur_ae_cbegin0,
+ ICLASS_rur_ae_cend0,
+ ICLASS_wur_ae_cend0,
+ ICLASS_icls_AE_LP24X2_C,
+ ICLASS_icls_AE_SP24X2S_C,
+ ICLASS_icls_AE_LP24X2F_C,
+ ICLASS_icls_AE_SP24X2F_C,
+ ICLASS_icls_AE_LP16X2F_C,
+ ICLASS_icls_AE_SP16X2F_C,
+ ICLASS_icls_AE_LP24_C,
+ ICLASS_icls_AE_SP24S_L_C,
+ ICLASS_icls_AE_LP24F_C,
+ ICLASS_icls_AE_SP24F_L_C,
+ ICLASS_icls_AE_LP16F_C,
+ ICLASS_icls_AE_SP16F_L_C,
+ ICLASS_icls_AE_LQ56_C,
+ ICLASS_icls_AE_SQ56S_C,
+ ICLASS_icls_AE_LQ32F_C,
+ ICLASS_icls_AE_SQ32F_C,
+ ICLASS_rur_expstate,
+ ICLASS_wur_expstate,
+ ICLASS_iclass_READ_IMPWIRE,
+ ICLASS_iclass_SETB_EXPSTATE,
+ ICLASS_iclass_CLRB_EXPSTATE,
+ ICLASS_iclass_WRMSK_EXPSTATE
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80200;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2300;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c0000;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580000;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x540000;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0000;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb0000;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70000;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6c0000;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x804;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60000;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10f;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4300;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5300;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x94;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4830;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4831;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4816;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4930;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4931;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4916;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa000;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc800;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc00;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd60f;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd000;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd30f;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00f;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9000;
+}
+
+static void
+Opcode_rur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7e03e;
+}
+
+static void
+Opcode_wur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe73f;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200c00;
+}
+
+static void
+Opcode_addi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100002;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200d00;
+}
+
+static void
+Opcode_addmi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200002;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_add_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb81;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_sub_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_addx2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1381;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addx4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2381;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_addx8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4b81;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_subx2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d01;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe;
+}
+
+static void
+Opcode_subx4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf81;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf;
+}
+
+static void
+Opcode_subx8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4d81;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_and_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b81;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_or_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2501;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_xor_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f01;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680000;
+}
+
+static void
+Opcode_beqi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x690000;
+}
+
+static void
+Opcode_bnei_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1800003;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b0000;
+}
+
+static void
+Opcode_bgei_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800003;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6a0000;
+}
+
+static void
+Opcode_blti_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000003;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700600;
+}
+
+static void
+Opcode_bbci_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700e00;
+}
+
+static void
+Opcode_bbsi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80002;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f0000;
+}
+
+static void
+Opcode_bgeui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000003;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e0000;
+}
+
+static void
+Opcode_bltui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000003;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700100;
+}
+
+static void
+Opcode_beq_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300002;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700900;
+}
+
+static void
+Opcode_bne_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700a00;
+}
+
+static void
+Opcode_bge_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600002;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700200;
+}
+
+static void
+Opcode_blt_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680002;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700b00;
+}
+
+static void
+Opcode_bgeu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380002;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700300;
+}
+
+static void
+Opcode_bltu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700002;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700800;
+}
+
+static void
+Opcode_bany_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500002;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700000;
+}
+
+static void
+Opcode_bnone_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80004;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700400;
+}
+
+static void
+Opcode_ball_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180002;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700c00;
+}
+
+static void
+Opcode_bnall_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780002;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700500;
+}
+
+static void
+Opcode_bbc_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580002;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700d00;
+}
+
+static void
+Opcode_bbs_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280002;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x640000;
+}
+
+static void
+Opcode_beqz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x101;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x650000;
+}
+
+static void
+Opcode_bnez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x181;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x670000;
+}
+
+static void
+Opcode_bgez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x281;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x660000;
+}
+
+static void
+Opcode_bltz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x681;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500000;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40;
+}
+
+static void
+Opcode_extui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_j_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0000;
+}
+
+static void
+Opcode_jx_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x983d01;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200100;
+}
+
+static void
+Opcode_l16ui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200004;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200900;
+}
+
+static void
+Opcode_l16si_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100004;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200200;
+}
+
+static void
+Opcode_l32i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400004;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_l32r_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_l8ui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180004;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0800;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0900;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0a00;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200a00;
+}
+
+static void
+Opcode_movi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x301;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x38;
+}
+
+static void
+Opcode_moveqz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7381;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x39;
+}
+
+static void
+Opcode_movnez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd01;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a;
+}
+
+static void
+Opcode_movltz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x701;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b;
+}
+
+static void
+Opcode_movgez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x581;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_neg_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed81;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1006;
+}
+
+static void
+Opcode_abs_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ed81;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0200;
+}
+
+static void
+Opcode_nop_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36001;
+}
+
+static void
+Opcode_nop_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb83d01;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1500;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200500;
+}
+
+static void
+Opcode_s16i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280004;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200600;
+}
+
+static void
+Opcode_s32i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300004;
+}
+
+static void
+Opcode_s32nb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x95;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200400;
+}
+
+static void
+Opcode_s8i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380004;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4;
+}
+
+static void
+Opcode_ssr_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x983d81;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x104;
+}
+
+static void
+Opcode_ssl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4183d01;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x204;
+}
+
+static void
+Opcode_ssa8l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2183d01;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x304;
+}
+
+static void
+Opcode_ssa8b_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1183d01;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404;
+}
+
+static void
+Opcode_ssai_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18e501;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a;
+}
+
+static void
+Opcode_sll_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2806f81;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18;
+}
+
+static void
+Opcode_src_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4781;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x19;
+}
+
+static void
+Opcode_srl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7ed81;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b;
+}
+
+static void
+Opcode_sra_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5ed81;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10;
+}
+
+static void
+Opcode_slli_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x12;
+}
+
+static void
+Opcode_srai_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x381;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14;
+}
+
+static void
+Opcode_srli_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd81;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0200;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0200;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10200;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20200;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x131;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x116;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x231;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x216;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x331;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x316;
+}
+
+static void
+Opcode_rsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6130;
+}
+
+static void
+Opcode_wsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6131;
+}
+
+static void
+Opcode_xsr_memctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6116;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x531;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x516;
+}
+
+static void
+Opcode_rsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb030;
+}
+
+static void
+Opcode_wsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb031;
+}
+
+static void
+Opcode_rsr_configid1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd030;
+}
+
+static void
+Opcode_rsr_243_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf330;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe630;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe631;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe616;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb130;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb131;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb116;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd130;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd131;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd116;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb230;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb231;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb216;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd230;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd231;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd216;
+}
+
+static void
+Opcode_rsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb330;
+}
+
+static void
+Opcode_wsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb331;
+}
+
+static void
+Opcode_xsr_epc3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb316;
+}
+
+static void
+Opcode_rsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd330;
+}
+
+static void
+Opcode_wsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd331;
+}
+
+static void
+Opcode_xsr_excsave3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd316;
+}
+
+static void
+Opcode_rsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb430;
+}
+
+static void
+Opcode_wsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb431;
+}
+
+static void
+Opcode_xsr_epc4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb416;
+}
+
+static void
+Opcode_rsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd430;
+}
+
+static void
+Opcode_wsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd431;
+}
+
+static void
+Opcode_xsr_excsave4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd416;
+}
+
+static void
+Opcode_rsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb530;
+}
+
+static void
+Opcode_wsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb531;
+}
+
+static void
+Opcode_xsr_epc5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb516;
+}
+
+static void
+Opcode_rsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd530;
+}
+
+static void
+Opcode_wsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd531;
+}
+
+static void
+Opcode_xsr_excsave5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd516;
+}
+
+static void
+Opcode_rsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb630;
+}
+
+static void
+Opcode_wsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb631;
+}
+
+static void
+Opcode_xsr_epc6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb616;
+}
+
+static void
+Opcode_rsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd630;
+}
+
+static void
+Opcode_wsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd631;
+}
+
+static void
+Opcode_xsr_excsave6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd616;
+}
+
+static void
+Opcode_rsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb730;
+}
+
+static void
+Opcode_wsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb731;
+}
+
+static void
+Opcode_xsr_epc7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb716;
+}
+
+static void
+Opcode_rsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd730;
+}
+
+static void
+Opcode_wsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd731;
+}
+
+static void
+Opcode_xsr_excsave7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd716;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc230;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc231;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc216;
+}
+
+static void
+Opcode_rsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc330;
+}
+
+static void
+Opcode_wsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc331;
+}
+
+static void
+Opcode_xsr_eps3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc316;
+}
+
+static void
+Opcode_rsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc430;
+}
+
+static void
+Opcode_wsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc431;
+}
+
+static void
+Opcode_xsr_eps4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc416;
+}
+
+static void
+Opcode_rsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc530;
+}
+
+static void
+Opcode_wsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc531;
+}
+
+static void
+Opcode_xsr_eps5_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc516;
+}
+
+static void
+Opcode_rsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc630;
+}
+
+static void
+Opcode_wsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc631;
+}
+
+static void
+Opcode_xsr_eps6_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc616;
+}
+
+static void
+Opcode_rsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc730;
+}
+
+static void
+Opcode_wsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc731;
+}
+
+static void
+Opcode_xsr_eps7_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc716;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xee30;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xee31;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xee16;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc030;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc031;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc016;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe830;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe831;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe816;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf430;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf431;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf416;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf530;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf531;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf516;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xeb30;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe730;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe731;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe716;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x28;
+}
+
+static void
+Opcode_muluh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2a;
+}
+
+static void
+Opcode_mulsh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b;
+}
+
+static void
+Opcode_mul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400047;
+}
+
+static void
+Opcode_mul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400057;
+}
+
+static void
+Opcode_mul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400067;
+}
+
+static void
+Opcode_mul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400077;
+}
+
+static void
+Opcode_umul_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400007;
+}
+
+static void
+Opcode_umul_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400017;
+}
+
+static void
+Opcode_umul_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400027;
+}
+
+static void
+Opcode_umul_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400037;
+}
+
+static void
+Opcode_mul_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400043;
+}
+
+static void
+Opcode_mul_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400053;
+}
+
+static void
+Opcode_mul_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400063;
+}
+
+static void
+Opcode_mul_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400073;
+}
+
+static void
+Opcode_mul_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400046;
+}
+
+static void
+Opcode_mul_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400056;
+}
+
+static void
+Opcode_mul_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400066;
+}
+
+static void
+Opcode_mul_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400076;
+}
+
+static void
+Opcode_mul_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400042;
+}
+
+static void
+Opcode_mul_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400052;
+}
+
+static void
+Opcode_mul_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400062;
+}
+
+static void
+Opcode_mul_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400072;
+}
+
+static void
+Opcode_mula_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400087;
+}
+
+static void
+Opcode_mula_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400097;
+}
+
+static void
+Opcode_mula_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a7;
+}
+
+static void
+Opcode_mula_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b7;
+}
+
+static void
+Opcode_muls_aa_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c7;
+}
+
+static void
+Opcode_muls_aa_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d7;
+}
+
+static void
+Opcode_muls_aa_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000e7;
+}
+
+static void
+Opcode_muls_aa_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000f7;
+}
+
+static void
+Opcode_mula_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400083;
+}
+
+static void
+Opcode_mula_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400093;
+}
+
+static void
+Opcode_mula_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a3;
+}
+
+static void
+Opcode_mula_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b3;
+}
+
+static void
+Opcode_muls_ad_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c3;
+}
+
+static void
+Opcode_muls_ad_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d3;
+}
+
+static void
+Opcode_muls_ad_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000e3;
+}
+
+static void
+Opcode_muls_ad_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000f3;
+}
+
+static void
+Opcode_mula_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400086;
+}
+
+static void
+Opcode_mula_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400096;
+}
+
+static void
+Opcode_mula_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a6;
+}
+
+static void
+Opcode_mula_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b6;
+}
+
+static void
+Opcode_muls_da_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c6;
+}
+
+static void
+Opcode_muls_da_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d6;
+}
+
+static void
+Opcode_muls_da_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000e6;
+}
+
+static void
+Opcode_muls_da_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000f6;
+}
+
+static void
+Opcode_mula_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400082;
+}
+
+static void
+Opcode_mula_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400092;
+}
+
+static void
+Opcode_mula_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a2;
+}
+
+static void
+Opcode_mula_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b2;
+}
+
+static void
+Opcode_muls_dd_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c2;
+}
+
+static void
+Opcode_muls_dd_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d2;
+}
+
+static void
+Opcode_muls_dd_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000e2;
+}
+
+static void
+Opcode_muls_dd_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000f2;
+}
+
+static void
+Opcode_mula_da_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400085;
+}
+
+static void
+Opcode_mula_da_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400084;
+}
+
+static void
+Opcode_mula_da_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400095;
+}
+
+static void
+Opcode_mula_da_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400094;
+}
+
+static void
+Opcode_mula_da_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a5;
+}
+
+static void
+Opcode_mula_da_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a4;
+}
+
+static void
+Opcode_mula_da_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b5;
+}
+
+static void
+Opcode_mula_da_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b4;
+}
+
+static void
+Opcode_mula_dd_ll_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400081;
+}
+
+static void
+Opcode_mula_dd_ll_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400080;
+}
+
+static void
+Opcode_mula_dd_hl_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400091;
+}
+
+static void
+Opcode_mula_dd_hl_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400090;
+}
+
+static void
+Opcode_mula_dd_lh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a1;
+}
+
+static void
+Opcode_mula_dd_lh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a0;
+}
+
+static void
+Opcode_mula_dd_hh_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b1;
+}
+
+static void
+Opcode_mula_dd_hh_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b0;
+}
+
+static void
+Opcode_lddec_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400009;
+}
+
+static void
+Opcode_ldinc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400008;
+}
+
+static void
+Opcode_rsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_wsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2031;
+}
+
+static void
+Opcode_xsr_m0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2016;
+}
+
+static void
+Opcode_rsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2130;
+}
+
+static void
+Opcode_wsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2131;
+}
+
+static void
+Opcode_xsr_m1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2116;
+}
+
+static void
+Opcode_rsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2230;
+}
+
+static void
+Opcode_wsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2231;
+}
+
+static void
+Opcode_xsr_m2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2216;
+}
+
+static void
+Opcode_rsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2330;
+}
+
+static void
+Opcode_wsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2331;
+}
+
+static void
+Opcode_xsr_m3_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2316;
+}
+
+static void
+Opcode_rsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1030;
+}
+
+static void
+Opcode_wsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1031;
+}
+
+static void
+Opcode_xsr_acclo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1016;
+}
+
+static void
+Opcode_rsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1130;
+}
+
+static void
+Opcode_wsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1131;
+}
+
+static void
+Opcode_xsr_acchi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1116;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10300;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe230;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe231;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe331;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe430;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe431;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe416;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd20f;
+}
+
+static void
+Opcode_rsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9030;
+}
+
+static void
+Opcode_wsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9031;
+}
+
+static void
+Opcode_xsr_dbreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9016;
+}
+
+static void
+Opcode_rsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa030;
+}
+
+static void
+Opcode_wsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa031;
+}
+
+static void
+Opcode_xsr_dbreakc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa016;
+}
+
+static void
+Opcode_rsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9130;
+}
+
+static void
+Opcode_wsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9131;
+}
+
+static void
+Opcode_xsr_dbreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9116;
+}
+
+static void
+Opcode_rsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa130;
+}
+
+static void
+Opcode_wsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa131;
+}
+
+static void
+Opcode_xsr_dbreakc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa116;
+}
+
+static void
+Opcode_rsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8030;
+}
+
+static void
+Opcode_wsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8031;
+}
+
+static void
+Opcode_xsr_ibreaka0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8016;
+}
+
+static void
+Opcode_rsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8130;
+}
+
+static void
+Opcode_wsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8131;
+}
+
+static void
+Opcode_xsr_ibreaka1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8116;
+}
+
+static void
+Opcode_rsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6030;
+}
+
+static void
+Opcode_wsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6031;
+}
+
+static void
+Opcode_xsr_ibreakenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6016;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe930;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe931;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe916;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec30;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec31;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xec16;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed30;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed31;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed16;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6830;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6831;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6816;
+}
+
+static void
+Opcode_lddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0700;
+}
+
+static void
+Opcode_sddr32_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0700;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe1f;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10e1f;
+}
+
+static void
+Opcode_wsr_mmid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5931;
+}
+
+static void
+Opcode_andb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20;
+}
+
+static void
+Opcode_andb_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5381;
+}
+
+static void
+Opcode_andbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x21;
+}
+
+static void
+Opcode_andbc_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b81;
+}
+
+static void
+Opcode_orb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x22;
+}
+
+static void
+Opcode_orb_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4581;
+}
+
+static void
+Opcode_orbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x23;
+}
+
+static void
+Opcode_orbc_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x781;
+}
+
+static void
+Opcode_xorb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24;
+}
+
+static void
+Opcode_xorb_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4f81;
+}
+
+static void
+Opcode_any4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800;
+}
+
+static void
+Opcode_any4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96501;
+}
+
+static void
+Opcode_all4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900;
+}
+
+static void
+Opcode_all4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8e501;
+}
+
+static void
+Opcode_any8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00;
+}
+
+static void
+Opcode_any8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc6501;
+}
+
+static void
+Opcode_all8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00;
+}
+
+static void
+Opcode_all8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6501;
+}
+
+static void
+Opcode_bf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0000;
+}
+
+static void
+Opcode_bf_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2800003;
+}
+
+static void
+Opcode_bt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d0100;
+}
+
+static void
+Opcode_bt_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000003;
+}
+
+static void
+Opcode_movf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c;
+}
+
+static void
+Opcode_movf_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7b81;
+}
+
+static void
+Opcode_movt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d;
+}
+
+static void
+Opcode_movt_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1501;
+}
+
+static void
+Opcode_rsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430;
+}
+
+static void
+Opcode_wsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x431;
+}
+
+static void
+Opcode_xsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x416;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea30;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea31;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xea16;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf030;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf031;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf016;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf130;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf131;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf116;
+}
+
+static void
+Opcode_rsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf230;
+}
+
+static void
+Opcode_wsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf231;
+}
+
+static void
+Opcode_xsr_ccompare2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf216;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c0700;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0700;
+}
+
+static void
+Opcode_ipfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0700;
+}
+
+static void
+Opcode_ihu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0720;
+}
+
+static void
+Opcode_iiu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d0730;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f0700;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x21f;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x11f;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x31f;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240700;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x250700;
+}
+
+static void
+Opcode_diwbui_p_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2807f0;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280740;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280750;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x260700;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x270700;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200700;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210700;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x220700;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230700;
+}
+
+static void
+Opcode_dpfl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280700;
+}
+
+static void
+Opcode_dhu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280720;
+}
+
+static void
+Opcode_diu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280730;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x91f;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81f;
+}
+
+static void
+Opcode_rsr_prefctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2830;
+}
+
+static void
+Opcode_wsr_prefctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2831;
+}
+
+static void
+Opcode_xsr_prefctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2816;
+}
+
+static void
+Opcode_wsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5331;
+}
+
+static void
+Opcode_rsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5330;
+}
+
+static void
+Opcode_xsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5316;
+}
+
+static void
+Opcode_rsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a30;
+}
+
+static void
+Opcode_wsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a31;
+}
+
+static void
+Opcode_xsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5a16;
+}
+
+static void
+Opcode_rsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b30;
+}
+
+static void
+Opcode_wsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b31;
+}
+
+static void
+Opcode_xsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5b16;
+}
+
+static void
+Opcode_rsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c30;
+}
+
+static void
+Opcode_wsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c31;
+}
+
+static void
+Opcode_xsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5c16;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc05;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd05;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb05;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf05;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe05;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x405;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x305;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x705;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x605;
+}
+
+static void
+Opcode_ldpte_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1f;
+}
+
+static void
+Opcode_hwwitlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x105;
+}
+
+static void
+Opcode_hwwdtlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x905;
+}
+
+static void
+Opcode_rsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe030;
+}
+
+static void
+Opcode_wsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe031;
+}
+
+static void
+Opcode_xsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe016;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x33;
+}
+
+static void
+Opcode_clamps_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2b81;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34;
+}
+
+static void
+Opcode_min_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b81;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_max_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3381;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_minu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6b81;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x37;
+}
+
+static void
+Opcode_maxu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6381;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe04;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf04;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x32;
+}
+
+static void
+Opcode_sext_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4701;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200b00;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200f00;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200e00;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc30;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc31;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc16;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6330;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6331;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6316;
+}
+
+static void
+Opcode_quou_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2c;
+}
+
+static void
+Opcode_quos_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d;
+}
+
+static void
+Opcode_remu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e;
+}
+
+static void
+Opcode_rems_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x604;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x704;
+}
+
+static void
+Opcode_rur_ae_ovf_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03e;
+}
+
+static void
+Opcode_wur_ae_ovf_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03f;
+}
+
+static void
+Opcode_rur_ae_bithead_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f03e;
+}
+
+static void
+Opcode_wur_ae_bithead_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13f;
+}
+
+static void
+Opcode_rur_ae_ts_fts_bu_bp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f03e;
+}
+
+static void
+Opcode_wur_ae_ts_fts_bu_bp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf23f;
+}
+
+static void
+Opcode_rur_ae_sd_no_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f03e;
+}
+
+static void
+Opcode_wur_ae_sd_no_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf33f;
+}
+
+static void
+Opcode_rur_ae_overflow_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40809c;
+}
+
+static void
+Opcode_wur_ae_overflow_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000ac;
+}
+
+static void
+Opcode_rur_ae_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40909c;
+}
+
+static void
+Opcode_wur_ae_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4001ac;
+}
+
+static void
+Opcode_rur_ae_bitptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40a09c;
+}
+
+static void
+Opcode_wur_ae_bitptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002ac;
+}
+
+static void
+Opcode_rur_ae_bitsused_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40b09c;
+}
+
+static void
+Opcode_wur_ae_bitsused_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4003ac;
+}
+
+static void
+Opcode_rur_ae_tablesize_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40c09c;
+}
+
+static void
+Opcode_wur_ae_tablesize_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4004ac;
+}
+
+static void
+Opcode_rur_ae_first_ts_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40d09c;
+}
+
+static void
+Opcode_wur_ae_first_ts_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4005ac;
+}
+
+static void
+Opcode_rur_ae_nextoffset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e09c;
+}
+
+static void
+Opcode_wur_ae_nextoffset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4006ac;
+}
+
+static void
+Opcode_rur_ae_searchdone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f09c;
+}
+
+static void
+Opcode_wur_ae_searchdone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007ac;
+}
+
+static void
+Opcode_ae_lp16f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81581;
+}
+
+static void
+Opcode_ae_lp16f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005a;
+}
+
+static void
+Opcode_ae_lp16f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1781;
+}
+
+static void
+Opcode_ae_lp16f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40009a;
+}
+
+static void
+Opcode_ae_lp16f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81701;
+}
+
+static void
+Opcode_ae_lp16f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000ca;
+}
+
+static void
+Opcode_ae_lp16f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81781;
+}
+
+static void
+Opcode_ae_lp16f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000fa;
+}
+
+static void
+Opcode_ae_lp24_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81d81;
+}
+
+static void
+Opcode_ae_lp24_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40085a;
+}
+
+static void
+Opcode_ae_lp24_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81f01;
+}
+
+static void
+Opcode_ae_lp24_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40089a;
+}
+
+static void
+Opcode_ae_lp24_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81f81;
+}
+
+static void
+Opcode_ae_lp24_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008ca;
+}
+
+static void
+Opcode_ae_lp24_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5581;
+}
+
+static void
+Opcode_ae_lp24_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008fa;
+}
+
+static void
+Opcode_ae_lp24f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5701;
+}
+
+static void
+Opcode_ae_lp24f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40006a;
+}
+
+static void
+Opcode_ae_lp24f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5d01;
+}
+
+static void
+Opcode_ae_lp24f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000aa;
+}
+
+static void
+Opcode_ae_lp24f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85501;
+}
+
+static void
+Opcode_ae_lp24f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000da;
+}
+
+static void
+Opcode_ae_lp24f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5781;
+}
+
+static void
+Opcode_ae_lp24f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000b;
+}
+
+static void
+Opcode_ae_lp16x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d81;
+}
+
+static void
+Opcode_ae_lp16x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40086a;
+}
+
+static void
+Opcode_ae_lp16x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f01;
+}
+
+static void
+Opcode_ae_lp16x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008aa;
+}
+
+static void
+Opcode_ae_lp16x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81d01;
+}
+
+static void
+Opcode_ae_lp16x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008da;
+}
+
+static void
+Opcode_ae_lp16x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f81;
+}
+
+static void
+Opcode_ae_lp16x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40080b;
+}
+
+static void
+Opcode_ae_lp24x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85701;
+}
+
+static void
+Opcode_ae_lp24x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40007a;
+}
+
+static void
+Opcode_ae_lp24x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85d01;
+}
+
+static void
+Opcode_ae_lp24x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000ba;
+}
+
+static void
+Opcode_ae_lp24x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85781;
+}
+
+static void
+Opcode_ae_lp24x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000ea;
+}
+
+static void
+Opcode_ae_lp24x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85d81;
+}
+
+static void
+Opcode_ae_lp24x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001b;
+}
+
+static void
+Opcode_ae_lp24x2_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5d81;
+}
+
+static void
+Opcode_ae_lp24x2_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40087a;
+}
+
+static void
+Opcode_ae_lp24x2_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5f01;
+}
+
+static void
+Opcode_ae_lp24x2_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008ba;
+}
+
+static void
+Opcode_ae_lp24x2_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5f81;
+}
+
+static void
+Opcode_ae_lp24x2_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008ea;
+}
+
+static void
+Opcode_ae_lp24x2_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85581;
+}
+
+static void
+Opcode_ae_lp24x2_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40081b;
+}
+
+static void
+Opcode_ae_sp16x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3501;
+}
+
+static void
+Opcode_ae_sp16x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002b;
+}
+
+static void
+Opcode_ae_sp16x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6501;
+}
+
+static void
+Opcode_ae_sp16x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005b;
+}
+
+static void
+Opcode_ae_sp16x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82581;
+}
+
+static void
+Opcode_ae_sp16x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40008b;
+}
+
+static void
+Opcode_ae_sp16x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2781;
+}
+
+static void
+Opcode_ae_sp16x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000bb;
+}
+
+static void
+Opcode_ae_sp24x2s_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83501;
+}
+
+static void
+Opcode_ae_sp24x2s_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40082b;
+}
+
+static void
+Opcode_ae_sp24x2s_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3781;
+}
+
+static void
+Opcode_ae_sp24x2s_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40085b;
+}
+
+static void
+Opcode_ae_sp24x2s_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d81;
+}
+
+static void
+Opcode_ae_sp24x2s_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40088b;
+}
+
+static void
+Opcode_ae_sp24x2s_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f01;
+}
+
+static void
+Opcode_ae_sp24x2s_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008bb;
+}
+
+static void
+Opcode_ae_sp24x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82f81;
+}
+
+static void
+Opcode_ae_sp24x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40003b;
+}
+
+static void
+Opcode_ae_sp24x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3581;
+}
+
+static void
+Opcode_ae_sp24x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40006b;
+}
+
+static void
+Opcode_ae_sp24x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3701;
+}
+
+static void
+Opcode_ae_sp24x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40009b;
+}
+
+static void
+Opcode_ae_sp24x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d01;
+}
+
+static void
+Opcode_ae_sp24x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000cb;
+}
+
+static void
+Opcode_ae_sp16f_l_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85f81;
+}
+
+static void
+Opcode_ae_sp16f_l_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40083b;
+}
+
+static void
+Opcode_ae_sp16f_l_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2581;
+}
+
+static void
+Opcode_ae_sp16f_l_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40086b;
+}
+
+static void
+Opcode_ae_sp16f_l_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2701;
+}
+
+static void
+Opcode_ae_sp16f_l_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40089b;
+}
+
+static void
+Opcode_ae_sp16f_l_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d01;
+}
+
+static void
+Opcode_ae_sp16f_l_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008cb;
+}
+
+static void
+Opcode_ae_sp24s_l_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82d01;
+}
+
+static void
+Opcode_ae_sp24s_l_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40004b;
+}
+
+static void
+Opcode_ae_sp24s_l_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f81;
+}
+
+static void
+Opcode_ae_sp24s_l_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40007b;
+}
+
+static void
+Opcode_ae_sp24s_l_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82d81;
+}
+
+static void
+Opcode_ae_sp24s_l_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000ab;
+}
+
+static void
+Opcode_ae_sp24s_l_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82f01;
+}
+
+static void
+Opcode_ae_sp24s_l_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000db;
+}
+
+static void
+Opcode_ae_sp24f_l_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82701;
+}
+
+static void
+Opcode_ae_sp24f_l_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40084b;
+}
+
+static void
+Opcode_ae_sp24f_l_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x82781;
+}
+
+static void
+Opcode_ae_sp24f_l_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40087b;
+}
+
+static void
+Opcode_ae_sp24f_l_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2d81;
+}
+
+static void
+Opcode_ae_sp24f_l_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008ab;
+}
+
+static void
+Opcode_ae_sp24f_l_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2f01;
+}
+
+static void
+Opcode_ae_sp24f_l_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008db;
+}
+
+static void
+Opcode_ae_lq56_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x206581;
+}
+
+static void
+Opcode_ae_lq56_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001c;
+}
+
+static void
+Opcode_ae_lq56_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406581;
+}
+
+static void
+Opcode_ae_lq56_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40021c;
+}
+
+static void
+Opcode_ae_lq56_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x606581;
+}
+
+static void
+Opcode_ae_lq56_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002c;
+}
+
+static void
+Opcode_ae_lq56_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6781;
+}
+
+static void
+Opcode_ae_lq56_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40022c;
+}
+
+static void
+Opcode_ae_lq32f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6581;
+}
+
+static void
+Opcode_ae_lq32f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40011c;
+}
+
+static void
+Opcode_ae_lq32f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6701;
+}
+
+static void
+Opcode_ae_lq32f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40031c;
+}
+
+static void
+Opcode_ae_lq32f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d01;
+}
+
+static void
+Opcode_ae_lq32f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40012c;
+}
+
+static void
+Opcode_ae_lq32f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7501;
+}
+
+static void
+Opcode_ae_lq32f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40032c;
+}
+
+static void
+Opcode_ae_sq56s_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83781;
+}
+
+static void
+Opcode_ae_sq56s_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40003c;
+}
+
+static void
+Opcode_ae_sq56s_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x183701;
+}
+
+static void
+Opcode_ae_sq56s_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40083c;
+}
+
+static void
+Opcode_ae_sq56s_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x183781;
+}
+
+static void
+Opcode_ae_sq56s_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40004c;
+}
+
+static void
+Opcode_ae_sq56s_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83d81;
+}
+
+static void
+Opcode_ae_sq56s_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40084c;
+}
+
+static void
+Opcode_ae_sq32f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83581;
+}
+
+static void
+Opcode_ae_sq32f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40043c;
+}
+
+static void
+Opcode_ae_sq32f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83701;
+}
+
+static void
+Opcode_ae_sq32f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c3c;
+}
+
+static void
+Opcode_ae_sq32f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83d01;
+}
+
+static void
+Opcode_ae_sq32f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40044c;
+}
+
+static void
+Opcode_ae_sq32f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x183581;
+}
+
+static void
+Opcode_ae_sq32f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400c4c;
+}
+
+static void
+Opcode_ae_zerop48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x46001;
+}
+
+static void
+Opcode_ae_movp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86001;
+}
+
+static void
+Opcode_ae_movp48_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86501;
+}
+
+static void
+Opcode_ae_movp48_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40009c;
+}
+
+static void
+Opcode_ae_selp24_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd081;
+}
+
+static void
+Opcode_ae_selp24_ll_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x24000;
+}
+
+static void
+Opcode_ae_selp24_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x89001;
+}
+
+static void
+Opcode_ae_selp24_lh_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000;
+}
+
+static void
+Opcode_ae_selp24_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd001;
+}
+
+static void
+Opcode_ae_selp24_hl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc000;
+}
+
+static void
+Opcode_ae_selp24_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9081;
+}
+
+static void
+Opcode_ae_selp24_hh_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_ae_movtp24x2_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402081;
+}
+
+static void
+Opcode_ae_movfp24x2_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x202081;
+}
+
+static void
+Opcode_ae_movtp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2001;
+}
+
+static void
+Opcode_ae_movfp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1001;
+}
+
+static void
+Opcode_ae_movpa24x2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85f01;
+}
+
+static void
+Opcode_ae_movpa24x2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000c;
+}
+
+static void
+Opcode_ae_truncp24a32x2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f81;
+}
+
+static void
+Opcode_ae_truncp24a32x2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40080c;
+}
+
+static void
+Opcode_ae_cvta32p24_l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83f81;
+}
+
+static void
+Opcode_ae_cvta32p24_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000bc;
+}
+
+static void
+Opcode_ae_cvta32p24_h_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x83f01;
+}
+
+static void
+Opcode_ae_cvta32p24_h_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008bc;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_ll_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5501;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000eb;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_lh_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d01;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008eb;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1701;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000fb;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hh_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1581;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008fb;
+}
+
+static void
+Opcode_ae_truncp24q48x2_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20005;
+}
+
+static void
+Opcode_ae_truncp16_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86e01;
+}
+
+static void
+Opcode_ae_roundsp24q48sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3881;
+}
+
+static void
+Opcode_ae_roundsp24q48asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3481;
+}
+
+static void
+Opcode_ae_roundsp16q48sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3281;
+}
+
+static void
+Opcode_ae_roundsp16q48asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3081;
+}
+
+static void
+Opcode_ae_roundsp16sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86401;
+}
+
+static void
+Opcode_ae_roundsp16asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86201;
+}
+
+static void
+Opcode_ae_zeroq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x284581;
+}
+
+static void
+Opcode_ae_movq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380581;
+}
+
+static void
+Opcode_ae_movq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xef01;
+}
+
+static void
+Opcode_ae_movq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41409c;
+}
+
+static void
+Opcode_ae_movtq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x806f81;
+}
+
+static void
+Opcode_ae_movtq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41005e;
+}
+
+static void
+Opcode_ae_movfq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f81;
+}
+
+static void
+Opcode_ae_movfq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41006e;
+}
+
+static void
+Opcode_ae_cvtq48a32s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x206701;
+}
+
+static void
+Opcode_ae_cvtq48a32s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x43027e;
+}
+
+static void
+Opcode_ae_cvtq48p24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300581;
+}
+
+static void
+Opcode_ae_cvtq48p24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280581;
+}
+
+static void
+Opcode_ae_satq48s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x488605;
+}
+
+static void
+Opcode_ae_truncq32_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c0581;
+}
+
+static void
+Opcode_ae_roundsq32sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3a0581;
+}
+
+static void
+Opcode_ae_roundsq32asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x390581;
+}
+
+static void
+Opcode_ae_trunca32q48_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x183d81;
+}
+
+static void
+Opcode_ae_trunca32q48_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41007e;
+}
+
+static void
+Opcode_ae_movap24s_l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1083f01;
+}
+
+static void
+Opcode_ae_movap24s_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40007c;
+}
+
+static void
+Opcode_ae_movap24s_h_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x883f01;
+}
+
+static void
+Opcode_ae_movap24s_h_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40087c;
+}
+
+static void
+Opcode_ae_trunca16p24s_l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4083f01;
+}
+
+static void
+Opcode_ae_trunca16p24s_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40008c;
+}
+
+static void
+Opcode_ae_trunca16p24s_h_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2083f01;
+}
+
+static void
+Opcode_ae_trunca16p24s_h_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40088c;
+}
+
+static void
+Opcode_ae_addp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1081;
+}
+
+static void
+Opcode_ae_subp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x89081;
+}
+
+static void
+Opcode_ae_negp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16001;
+}
+
+static void
+Opcode_ae_absp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6001;
+}
+
+static void
+Opcode_ae_maxp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81081;
+}
+
+static void
+Opcode_ae_minp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5081;
+}
+
+static void
+Opcode_ae_maxbp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_ae_minbp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x81;
+}
+
+static void
+Opcode_ae_addsp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5001;
+}
+
+static void
+Opcode_ae_subsp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8d001;
+}
+
+static void
+Opcode_ae_negsp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26001;
+}
+
+static void
+Opcode_ae_abssp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa001;
+}
+
+static void
+Opcode_ae_andp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9001;
+}
+
+static void
+Opcode_ae_nandp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85001;
+}
+
+static void
+Opcode_ae_orp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x85081;
+}
+
+static void
+Opcode_ae_xorp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8d081;
+}
+
+static void
+Opcode_ae_ltp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x102081;
+}
+
+static void
+Opcode_ae_lep24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3001;
+}
+
+static void
+Opcode_ae_eqp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2081;
+}
+
+static void
+Opcode_ae_addq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005;
+}
+
+static void
+Opcode_ae_subq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280605;
+}
+
+static void
+Opcode_ae_negq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600605;
+}
+
+static void
+Opcode_ae_absq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480605;
+}
+
+static void
+Opcode_ae_maxq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100605;
+}
+
+static void
+Opcode_ae_minq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200605;
+}
+
+static void
+Opcode_ae_maxbq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_ae_minbq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x205;
+}
+
+static void
+Opcode_ae_addsq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x605;
+}
+
+static void
+Opcode_ae_subsq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300605;
+}
+
+static void
+Opcode_ae_negsq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x484605;
+}
+
+static void
+Opcode_ae_abssq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500605;
+}
+
+static void
+Opcode_ae_andq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80605;
+}
+
+static void
+Opcode_ae_nandq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400605;
+}
+
+static void
+Opcode_ae_orq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180605;
+}
+
+static void
+Opcode_ae_xorq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380605;
+}
+
+static void
+Opcode_ae_sllip24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x101;
+}
+
+static void
+Opcode_ae_srlip24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501;
+}
+
+static void
+Opcode_ae_sraip24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x301;
+}
+
+static void
+Opcode_ae_sllsp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86801;
+}
+
+static void
+Opcode_ae_srlsp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86c01;
+}
+
+static void
+Opcode_ae_srasp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86a01;
+}
+
+static void
+Opcode_ae_sllisp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x181;
+}
+
+static void
+Opcode_ae_sllssp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86601;
+}
+
+static void
+Opcode_ae_slliq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6d81;
+}
+
+static void
+Opcode_ae_slliq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005c;
+}
+
+static void
+Opcode_ae_srliq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16d81;
+}
+
+static void
+Opcode_ae_srliq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40105c;
+}
+
+static void
+Opcode_ae_sraiq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xed81;
+}
+
+static void
+Opcode_ae_sraiq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40205c;
+}
+
+static void
+Opcode_ae_sllsq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16f01;
+}
+
+static void
+Opcode_ae_sllsq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41009c;
+}
+
+static void
+Opcode_ae_srlsq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80ef01;
+}
+
+static void
+Opcode_ae_srlsq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41109c;
+}
+
+static void
+Opcode_ae_srasq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4ef01;
+}
+
+static void
+Opcode_ae_srasq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41209c;
+}
+
+static void
+Opcode_ae_sllaq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1006f81;
+}
+
+static void
+Opcode_ae_sllaq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41001e;
+}
+
+static void
+Opcode_ae_srlaq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1806f81;
+}
+
+static void
+Opcode_ae_srlaq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41002e;
+}
+
+static void
+Opcode_ae_sraaq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4006f81;
+}
+
+static void
+Opcode_ae_sraaq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41003e;
+}
+
+static void
+Opcode_ae_sllisq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f01;
+}
+
+static void
+Opcode_ae_sllisq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40305c;
+}
+
+static void
+Opcode_ae_sllssq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2ef01;
+}
+
+static void
+Opcode_ae_sllssq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41309c;
+}
+
+static void
+Opcode_ae_sllasq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2006f81;
+}
+
+static void
+Opcode_ae_sllasq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41004e;
+}
+
+static void
+Opcode_ae_ltq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10005;
+}
+
+static void
+Opcode_ae_leq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x805;
+}
+
+static void
+Opcode_ae_eqq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x405;
+}
+
+static void
+Opcode_ae_nsaq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x183d01;
+}
+
+static void
+Opcode_ae_nsaq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41047e;
+}
+
+static void
+Opcode_ae_mulsrfq32sp24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500901;
+}
+
+static void
+Opcode_ae_mulsrfq32sp24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300901;
+}
+
+static void
+Opcode_ae_mularfq32sp24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600901;
+}
+
+static void
+Opcode_ae_mularfq32sp24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100901;
+}
+
+static void
+Opcode_ae_mulrfq32sp24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400901;
+}
+
+static void
+Opcode_ae_mulrfq32sp24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200901;
+}
+
+static void
+Opcode_ae_mulsfq32sp24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680901;
+}
+
+static void
+Opcode_ae_mulsfq32sp24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180901;
+}
+
+static void
+Opcode_ae_mulafq32sp24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480901;
+}
+
+static void
+Opcode_ae_mulafq32sp24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280901;
+}
+
+static void
+Opcode_ae_mulfq32sp24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700901;
+}
+
+static void
+Opcode_ae_mulfq32sp24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80901;
+}
+
+static void
+Opcode_ae_mulfs32p16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100086;
+}
+
+static void
+Opcode_ae_mulfp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88186;
+}
+
+static void
+Opcode_ae_mulp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180006;
+}
+
+static void
+Opcode_ae_mulfs32p16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c186;
+}
+
+static void
+Opcode_ae_mulfp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c006;
+}
+
+static void
+Opcode_ae_mulp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x108006;
+}
+
+static void
+Opcode_ae_mulfs32p16s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c106;
+}
+
+static void
+Opcode_ae_mulfp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88106;
+}
+
+static void
+Opcode_ae_mulp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x104006;
+}
+
+static void
+Opcode_ae_mulfs32p16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c086;
+}
+
+static void
+Opcode_ae_mulfp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88086;
+}
+
+static void
+Opcode_ae_mulp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100106;
+}
+
+static void
+Opcode_ae_mulafs32p16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4106;
+}
+
+static void
+Opcode_ae_mulafp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200006;
+}
+
+static void
+Opcode_ae_mulap24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc186;
+}
+
+static void
+Opcode_ae_mulafs32p16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4086;
+}
+
+static void
+Opcode_ae_mulafp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100006;
+}
+
+static void
+Opcode_ae_mulap24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc106;
+}
+
+static void
+Opcode_ae_mulafs32p16s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x186;
+}
+
+static void
+Opcode_ae_mulafp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80006;
+}
+
+static void
+Opcode_ae_mulap24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc086;
+}
+
+static void
+Opcode_ae_mulafs32p16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400006;
+}
+
+static void
+Opcode_ae_mulafp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8006;
+}
+
+static void
+Opcode_ae_mulap24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8186;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180086;
+}
+
+static void
+Opcode_ae_mulsfp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x108186;
+}
+
+static void
+Opcode_ae_mulsp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x188086;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10c186;
+}
+
+static void
+Opcode_ae_mulsfp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10c006;
+}
+
+static void
+Opcode_ae_mulsp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x184186;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10c106;
+}
+
+static void
+Opcode_ae_mulsfp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x108106;
+}
+
+static void
+Opcode_ae_mulsp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x184106;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10c086;
+}
+
+static void
+Opcode_ae_mulsfp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x108086;
+}
+
+static void
+Opcode_ae_mulsp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x184086;
+}
+
+static void
+Opcode_ae_mulafs56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc006;
+}
+
+static void
+Opcode_ae_mulas56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x88006;
+}
+
+static void
+Opcode_ae_mulafs56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8106;
+}
+
+static void
+Opcode_ae_mulas56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x84006;
+}
+
+static void
+Opcode_ae_mulafs56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8086;
+}
+
+static void
+Opcode_ae_mulas56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80106;
+}
+
+static void
+Opcode_ae_mulafs56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4186;
+}
+
+static void
+Opcode_ae_mulas56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80086;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180186;
+}
+
+static void
+Opcode_ae_mulss56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18c086;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x188006;
+}
+
+static void
+Opcode_ae_mulss56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x188186;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x184006;
+}
+
+static void
+Opcode_ae_mulss56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18c006;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180106;
+}
+
+static void
+Opcode_ae_mulss56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x188106;
+}
+
+static void
+Opcode_ae_mulfq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380381;
+}
+
+static void
+Opcode_ae_mulfq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300381;
+}
+
+static void
+Opcode_ae_mulfq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500381;
+}
+
+static void
+Opcode_ae_mulfq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480381;
+}
+
+static void
+Opcode_ae_mulq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580381;
+}
+
+static void
+Opcode_ae_mulq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600381;
+}
+
+static void
+Opcode_ae_mulq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700381;
+}
+
+static void
+Opcode_ae_mulq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680381;
+}
+
+static void
+Opcode_ae_mulafq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x381;
+}
+
+static void
+Opcode_ae_mulafq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x901;
+}
+
+static void
+Opcode_ae_mulafq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100381;
+}
+
+static void
+Opcode_ae_mulafq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80381;
+}
+
+static void
+Opcode_ae_mulaq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400381;
+}
+
+static void
+Opcode_ae_mulaq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200381;
+}
+
+static void
+Opcode_ae_mulaq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280381;
+}
+
+static void
+Opcode_ae_mulaq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180381;
+}
+
+static void
+Opcode_ae_mulsfq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x581;
+}
+
+static void
+Opcode_ae_mulsfq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780381;
+}
+
+static void
+Opcode_ae_mulsfq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80581;
+}
+
+static void
+Opcode_ae_mulsfq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x701;
+}
+
+static void
+Opcode_ae_mulsq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200581;
+}
+
+static void
+Opcode_ae_mulsq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100581;
+}
+
+static void
+Opcode_ae_mulsq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180581;
+}
+
+static void
+Opcode_ae_mulsq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400581;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380002;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100002;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600002;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180002;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280002;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480002;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200002;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300002;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80002;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500002;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400002;
+}
+
+static void
+Opcode_ae_mulzasq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700002;
+}
+
+static void
+Opcode_ae_mulzasq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80003;
+}
+
+static void
+Opcode_ae_mulzasq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580002;
+}
+
+static void
+Opcode_ae_mulzasq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780002;
+}
+
+static void
+Opcode_ae_mulzasq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680002;
+}
+
+static void
+Opcode_ae_mulzasq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500003;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200004;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680003;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700003;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380003;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80004;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600003;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780003;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480003;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100004;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580003;
+}
+
+static void
+Opcode_ae_mulzssq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x580004;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280004;
+}
+
+static void
+Opcode_ae_mulzssq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x780004;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x480004;
+}
+
+static void
+Opcode_ae_mulzssq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500004;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400004;
+}
+
+static void
+Opcode_ae_mulzssq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x680004;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300004;
+}
+
+static void
+Opcode_ae_mulzssq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600004;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180004;
+}
+
+static void
+Opcode_ae_mulzssq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x700004;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x380004;
+}
+
+static void
+Opcode_ae_mulzaafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x204006;
+}
+
+static void
+Opcode_ae_mulzaap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280006;
+}
+
+static void
+Opcode_ae_mulzaafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x208006;
+}
+
+static void
+Opcode_ae_mulzaap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300006;
+}
+
+static void
+Opcode_ae_mulzasfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200186;
+}
+
+static void
+Opcode_ae_mulzasp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x204106;
+}
+
+static void
+Opcode_ae_mulzasfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x204086;
+}
+
+static void
+Opcode_ae_mulzasp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x204186;
+}
+
+static void
+Opcode_ae_mulzsafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x208086;
+}
+
+static void
+Opcode_ae_mulzsap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c006;
+}
+
+static void
+Opcode_ae_mulzsafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x208106;
+}
+
+static void
+Opcode_ae_mulzsap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x208186;
+}
+
+static void
+Opcode_ae_mulzssfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c086;
+}
+
+static void
+Opcode_ae_mulzssp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c186;
+}
+
+static void
+Opcode_ae_mulzssfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c106;
+}
+
+static void
+Opcode_ae_mulzssp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x280086;
+}
+
+static void
+Opcode_ae_mulaafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_ae_mulaap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x106;
+}
+
+static void
+Opcode_ae_mulaafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x86;
+}
+
+static void
+Opcode_ae_mulaap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4006;
+}
+
+static void
+Opcode_ae_mulasfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80186;
+}
+
+static void
+Opcode_ae_mulasp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x84106;
+}
+
+static void
+Opcode_ae_mulasfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x84086;
+}
+
+static void
+Opcode_ae_mulasp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x84186;
+}
+
+static void
+Opcode_ae_mulsafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100186;
+}
+
+static void
+Opcode_ae_mulsap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x104106;
+}
+
+static void
+Opcode_ae_mulsafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x104086;
+}
+
+static void
+Opcode_ae_mulsap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x104186;
+}
+
+static void
+Opcode_ae_mulssfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18c106;
+}
+
+static void
+Opcode_ae_mulssp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200086;
+}
+
+static void
+Opcode_ae_mulssfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18c186;
+}
+
+static void
+Opcode_ae_mulssp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200106;
+}
+
+static void
+Opcode_ae_sha32_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x41000e;
+}
+
+static void
+Opcode_ae_vldl32t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000a;
+}
+
+static void
+Opcode_ae_vldl16t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001a;
+}
+
+static void
+Opcode_ae_vldl16c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410e7e;
+}
+
+static void
+Opcode_ae_vldsht_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4008ac;
+}
+
+static void
+Opcode_ae_lb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40006c;
+}
+
+static void
+Opcode_ae_lbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x42000e;
+}
+
+static void
+Opcode_ae_lbk_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002a;
+}
+
+static void
+Opcode_ae_lbki_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000e;
+}
+
+static void
+Opcode_ae_db_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40010f;
+}
+
+static void
+Opcode_ae_dbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40020f;
+}
+
+static void
+Opcode_ae_vlel32t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40003a;
+}
+
+static void
+Opcode_ae_vlel16t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40004a;
+}
+
+static void
+Opcode_ae_sb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40011f;
+}
+
+static void
+Opcode_ae_sbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000f;
+}
+
+static void
+Opcode_ae_vles16c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410c7e;
+}
+
+static void
+Opcode_ae_sbf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410d7e;
+}
+
+static void
+Opcode_ae_slaasq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6006f81;
+}
+
+static void
+Opcode_ae_addbrba32_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_ae_minabssp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8e001;
+}
+
+static void
+Opcode_ae_maxabssp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe001;
+}
+
+static void
+Opcode_ae_minabssq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x240005;
+}
+
+static void
+Opcode_ae_maxabssq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x440005;
+}
+
+static void
+Opcode_rur_ae_cbegin0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6f03e;
+}
+
+static void
+Opcode_wur_ae_cbegin0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf63f;
+}
+
+static void
+Opcode_rur_ae_cend0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f03e;
+}
+
+static void
+Opcode_wur_ae_cend0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf73f;
+}
+
+static void
+Opcode_ae_lp24x2_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f01;
+}
+
+static void
+Opcode_ae_sp24x2s_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87781;
+}
+
+static void
+Opcode_ae_lp24x2f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87f01;
+}
+
+static void
+Opcode_ae_sp24x2f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7781;
+}
+
+static void
+Opcode_ae_lp16x2f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87d01;
+}
+
+static void
+Opcode_ae_sp16x2f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87581;
+}
+
+static void
+Opcode_ae_lp24_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7701;
+}
+
+static void
+Opcode_ae_sp24s_l_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87d81;
+}
+
+static void
+Opcode_ae_lp24f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87701;
+}
+
+static void
+Opcode_ae_sp24f_l_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7d81;
+}
+
+static void
+Opcode_ae_lp16f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7d01;
+}
+
+static void
+Opcode_ae_sp16f_l_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7581;
+}
+
+static void
+Opcode_ae_lq56_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x207501;
+}
+
+static void
+Opcode_ae_sq56s_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x87f81;
+}
+
+static void
+Opcode_ae_lq32f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407501;
+}
+
+static void
+Opcode_ae_sq32f_c_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7f81;
+}
+
+static void
+Opcode_rur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6e03e;
+}
+
+static void
+Opcode_wur_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe63f;
+}
+
+static void
+Opcode_read_impwire_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_setb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e0;
+}
+
+static void
+Opcode_clrb_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x21e0;
+}
+
+static void
+Opcode_wrmsk_expstate_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2e0;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_threadptr_encode_fns[] = {
+ Opcode_rur_threadptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_threadptr_encode_fns[] = {
+ Opcode_wur_threadptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0, 0, Opcode_addi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0, 0, Opcode_addmi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0, 0, Opcode_add_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0, 0, Opcode_sub_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0, 0, Opcode_addx2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0, 0, Opcode_addx4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0, 0, Opcode_addx8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0, 0, Opcode_subx2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0, 0, Opcode_subx4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0, 0, Opcode_subx8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0, 0, Opcode_and_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0, 0, Opcode_or_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0, 0, Opcode_xor_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0, 0, Opcode_beqi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0, 0, Opcode_bnei_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0, 0, Opcode_bgei_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0, 0, Opcode_blti_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0, 0, Opcode_bbci_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0, 0, Opcode_bbsi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0, 0, Opcode_bgeui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0, 0, Opcode_bltui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0, 0, Opcode_beq_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0, 0, Opcode_bne_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0, 0, Opcode_bge_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0, 0, Opcode_blt_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0, 0, Opcode_bgeu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0, 0, Opcode_bltu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0, 0, Opcode_bany_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0, 0, Opcode_bnone_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0, 0, Opcode_ball_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0, 0, Opcode_bnall_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0, 0, Opcode_bbc_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0, 0, Opcode_bbs_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0, 0, Opcode_beqz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0, 0, Opcode_bnez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0, 0, Opcode_bgez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0, 0, Opcode_bltz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0, 0, Opcode_extui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0, 0, Opcode_j_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0, 0, Opcode_jx_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0, 0, Opcode_l16ui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0, 0, Opcode_l16si_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0, 0, Opcode_l32i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0, 0, Opcode_l32r_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0, 0, Opcode_l8ui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0, 0, Opcode_movi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0, 0, Opcode_moveqz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0, 0, Opcode_movnez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0, 0, Opcode_movltz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0, 0, Opcode_movgez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0, 0, Opcode_neg_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0, 0, Opcode_abs_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0, Opcode_nop_Slot_ae_slot1_encode, Opcode_nop_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0, 0, Opcode_s16i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0, 0, Opcode_s32i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32nb_encode_fns[] = {
+ Opcode_s32nb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0, 0, Opcode_s8i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0, 0, Opcode_ssr_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0, 0, Opcode_ssl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0, 0, Opcode_ssa8l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0, 0, Opcode_ssa8b_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0, 0, Opcode_ssai_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0, 0, Opcode_sll_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0, 0, Opcode_src_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0, 0, Opcode_srl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0, 0, Opcode_sra_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0, 0, Opcode_slli_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0, 0, Opcode_srai_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0, 0, Opcode_srli_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_memctl_encode_fns[] = {
+ Opcode_rsr_memctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_memctl_encode_fns[] = {
+ Opcode_wsr_memctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_memctl_encode_fns[] = {
+ Opcode_xsr_memctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid0_encode_fns[] = {
+ Opcode_rsr_configid0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_configid0_encode_fns[] = {
+ Opcode_wsr_configid0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid1_encode_fns[] = {
+ Opcode_rsr_configid1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_243_encode_fns[] = {
+ Opcode_rsr_243_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc3_encode_fns[] = {
+ Opcode_rsr_epc3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc3_encode_fns[] = {
+ Opcode_wsr_epc3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc3_encode_fns[] = {
+ Opcode_xsr_epc3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave3_encode_fns[] = {
+ Opcode_rsr_excsave3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave3_encode_fns[] = {
+ Opcode_wsr_excsave3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave3_encode_fns[] = {
+ Opcode_xsr_excsave3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc4_encode_fns[] = {
+ Opcode_rsr_epc4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc4_encode_fns[] = {
+ Opcode_wsr_epc4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc4_encode_fns[] = {
+ Opcode_xsr_epc4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave4_encode_fns[] = {
+ Opcode_rsr_excsave4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave4_encode_fns[] = {
+ Opcode_wsr_excsave4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave4_encode_fns[] = {
+ Opcode_xsr_excsave4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc5_encode_fns[] = {
+ Opcode_rsr_epc5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc5_encode_fns[] = {
+ Opcode_wsr_epc5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc5_encode_fns[] = {
+ Opcode_xsr_epc5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave5_encode_fns[] = {
+ Opcode_rsr_excsave5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave5_encode_fns[] = {
+ Opcode_wsr_excsave5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave5_encode_fns[] = {
+ Opcode_xsr_excsave5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc6_encode_fns[] = {
+ Opcode_rsr_epc6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc6_encode_fns[] = {
+ Opcode_wsr_epc6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc6_encode_fns[] = {
+ Opcode_xsr_epc6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave6_encode_fns[] = {
+ Opcode_rsr_excsave6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave6_encode_fns[] = {
+ Opcode_wsr_excsave6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave6_encode_fns[] = {
+ Opcode_xsr_excsave6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc7_encode_fns[] = {
+ Opcode_rsr_epc7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc7_encode_fns[] = {
+ Opcode_wsr_epc7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc7_encode_fns[] = {
+ Opcode_xsr_epc7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave7_encode_fns[] = {
+ Opcode_rsr_excsave7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave7_encode_fns[] = {
+ Opcode_wsr_excsave7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave7_encode_fns[] = {
+ Opcode_xsr_excsave7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps3_encode_fns[] = {
+ Opcode_rsr_eps3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps3_encode_fns[] = {
+ Opcode_wsr_eps3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps3_encode_fns[] = {
+ Opcode_xsr_eps3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps4_encode_fns[] = {
+ Opcode_rsr_eps4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps4_encode_fns[] = {
+ Opcode_wsr_eps4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps4_encode_fns[] = {
+ Opcode_xsr_eps4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps5_encode_fns[] = {
+ Opcode_rsr_eps5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps5_encode_fns[] = {
+ Opcode_wsr_eps5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps5_encode_fns[] = {
+ Opcode_xsr_eps5_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps6_encode_fns[] = {
+ Opcode_rsr_eps6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps6_encode_fns[] = {
+ Opcode_wsr_eps6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps6_encode_fns[] = {
+ Opcode_xsr_eps6_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps7_encode_fns[] = {
+ Opcode_rsr_eps7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps7_encode_fns[] = {
+ Opcode_wsr_eps7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps7_encode_fns[] = {
+ Opcode_xsr_eps7_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muluh_encode_fns[] = {
+ Opcode_muluh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mulsh_encode_fns[] = {
+ Opcode_mulsh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_ll_encode_fns[] = {
+ Opcode_mul_aa_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hl_encode_fns[] = {
+ Opcode_mul_aa_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_lh_encode_fns[] = {
+ Opcode_mul_aa_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_aa_hh_encode_fns[] = {
+ Opcode_mul_aa_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_ll_encode_fns[] = {
+ Opcode_umul_aa_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hl_encode_fns[] = {
+ Opcode_umul_aa_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_lh_encode_fns[] = {
+ Opcode_umul_aa_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_umul_aa_hh_encode_fns[] = {
+ Opcode_umul_aa_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_ll_encode_fns[] = {
+ Opcode_mul_ad_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hl_encode_fns[] = {
+ Opcode_mul_ad_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_lh_encode_fns[] = {
+ Opcode_mul_ad_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_ad_hh_encode_fns[] = {
+ Opcode_mul_ad_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_ll_encode_fns[] = {
+ Opcode_mul_da_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hl_encode_fns[] = {
+ Opcode_mul_da_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_lh_encode_fns[] = {
+ Opcode_mul_da_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_da_hh_encode_fns[] = {
+ Opcode_mul_da_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_ll_encode_fns[] = {
+ Opcode_mul_dd_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hl_encode_fns[] = {
+ Opcode_mul_dd_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_lh_encode_fns[] = {
+ Opcode_mul_dd_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul_dd_hh_encode_fns[] = {
+ Opcode_mul_dd_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_ll_encode_fns[] = {
+ Opcode_mula_aa_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hl_encode_fns[] = {
+ Opcode_mula_aa_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_lh_encode_fns[] = {
+ Opcode_mula_aa_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_aa_hh_encode_fns[] = {
+ Opcode_mula_aa_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_ll_encode_fns[] = {
+ Opcode_muls_aa_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hl_encode_fns[] = {
+ Opcode_muls_aa_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_lh_encode_fns[] = {
+ Opcode_muls_aa_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_aa_hh_encode_fns[] = {
+ Opcode_muls_aa_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_ll_encode_fns[] = {
+ Opcode_mula_ad_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hl_encode_fns[] = {
+ Opcode_mula_ad_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_lh_encode_fns[] = {
+ Opcode_mula_ad_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_ad_hh_encode_fns[] = {
+ Opcode_mula_ad_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_ll_encode_fns[] = {
+ Opcode_muls_ad_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hl_encode_fns[] = {
+ Opcode_muls_ad_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_lh_encode_fns[] = {
+ Opcode_muls_ad_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_ad_hh_encode_fns[] = {
+ Opcode_muls_ad_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_encode_fns[] = {
+ Opcode_mula_da_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_encode_fns[] = {
+ Opcode_mula_da_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_encode_fns[] = {
+ Opcode_mula_da_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_encode_fns[] = {
+ Opcode_mula_da_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_ll_encode_fns[] = {
+ Opcode_muls_da_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hl_encode_fns[] = {
+ Opcode_muls_da_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_lh_encode_fns[] = {
+ Opcode_muls_da_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_da_hh_encode_fns[] = {
+ Opcode_muls_da_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_encode_fns[] = {
+ Opcode_mula_dd_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_encode_fns[] = {
+ Opcode_mula_dd_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_encode_fns[] = {
+ Opcode_mula_dd_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_encode_fns[] = {
+ Opcode_mula_dd_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_ll_encode_fns[] = {
+ Opcode_muls_dd_ll_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hl_encode_fns[] = {
+ Opcode_muls_dd_hl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_lh_encode_fns[] = {
+ Opcode_muls_dd_lh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_muls_dd_hh_encode_fns[] = {
+ Opcode_muls_dd_hh_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_lddec_encode_fns[] = {
+ Opcode_mula_da_ll_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_ll_ldinc_encode_fns[] = {
+ Opcode_mula_da_ll_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_lddec_encode_fns[] = {
+ Opcode_mula_da_hl_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hl_ldinc_encode_fns[] = {
+ Opcode_mula_da_hl_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_lddec_encode_fns[] = {
+ Opcode_mula_da_lh_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_lh_ldinc_encode_fns[] = {
+ Opcode_mula_da_lh_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_lddec_encode_fns[] = {
+ Opcode_mula_da_hh_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_da_hh_ldinc_encode_fns[] = {
+ Opcode_mula_da_hh_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_lddec_encode_fns[] = {
+ Opcode_mula_dd_ll_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_ll_ldinc_encode_fns[] = {
+ Opcode_mula_dd_ll_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_lddec_encode_fns[] = {
+ Opcode_mula_dd_hl_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hl_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hl_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_lddec_encode_fns[] = {
+ Opcode_mula_dd_lh_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_lh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_lh_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_lddec_encode_fns[] = {
+ Opcode_mula_dd_hh_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mula_dd_hh_ldinc_encode_fns[] = {
+ Opcode_mula_dd_hh_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddec_encode_fns[] = {
+ Opcode_lddec_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldinc_encode_fns[] = {
+ Opcode_ldinc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m0_encode_fns[] = {
+ Opcode_rsr_m0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m0_encode_fns[] = {
+ Opcode_wsr_m0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m0_encode_fns[] = {
+ Opcode_xsr_m0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m1_encode_fns[] = {
+ Opcode_rsr_m1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m1_encode_fns[] = {
+ Opcode_wsr_m1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m1_encode_fns[] = {
+ Opcode_xsr_m1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m2_encode_fns[] = {
+ Opcode_rsr_m2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m2_encode_fns[] = {
+ Opcode_wsr_m2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m2_encode_fns[] = {
+ Opcode_xsr_m2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_m3_encode_fns[] = {
+ Opcode_rsr_m3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_m3_encode_fns[] = {
+ Opcode_wsr_m3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_m3_encode_fns[] = {
+ Opcode_xsr_m3_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acclo_encode_fns[] = {
+ Opcode_rsr_acclo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acclo_encode_fns[] = {
+ Opcode_wsr_acclo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acclo_encode_fns[] = {
+ Opcode_xsr_acclo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_acchi_encode_fns[] = {
+ Opcode_rsr_acchi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_acchi_encode_fns[] = {
+ Opcode_wsr_acchi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_acchi_encode_fns[] = {
+ Opcode_xsr_acchi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka0_encode_fns[] = {
+ Opcode_rsr_dbreaka0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka0_encode_fns[] = {
+ Opcode_wsr_dbreaka0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka0_encode_fns[] = {
+ Opcode_xsr_dbreaka0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc0_encode_fns[] = {
+ Opcode_rsr_dbreakc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc0_encode_fns[] = {
+ Opcode_wsr_dbreakc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc0_encode_fns[] = {
+ Opcode_xsr_dbreakc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreaka1_encode_fns[] = {
+ Opcode_rsr_dbreaka1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreaka1_encode_fns[] = {
+ Opcode_wsr_dbreaka1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreaka1_encode_fns[] = {
+ Opcode_xsr_dbreaka1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dbreakc1_encode_fns[] = {
+ Opcode_rsr_dbreakc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dbreakc1_encode_fns[] = {
+ Opcode_wsr_dbreakc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dbreakc1_encode_fns[] = {
+ Opcode_xsr_dbreakc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka0_encode_fns[] = {
+ Opcode_rsr_ibreaka0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka0_encode_fns[] = {
+ Opcode_wsr_ibreaka0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka0_encode_fns[] = {
+ Opcode_xsr_ibreaka0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreaka1_encode_fns[] = {
+ Opcode_rsr_ibreaka1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreaka1_encode_fns[] = {
+ Opcode_wsr_ibreaka1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreaka1_encode_fns[] = {
+ Opcode_xsr_ibreaka1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ibreakenable_encode_fns[] = {
+ Opcode_rsr_ibreakenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ibreakenable_encode_fns[] = {
+ Opcode_wsr_ibreakenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ibreakenable_encode_fns[] = {
+ Opcode_xsr_ibreakenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lddr32_p_encode_fns[] = {
+ Opcode_lddr32_p_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sddr32_p_encode_fns[] = {
+ Opcode_sddr32_p_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_mmid_encode_fns[] = {
+ Opcode_wsr_mmid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_andb_encode_fns[] = {
+ Opcode_andb_Slot_inst_encode, 0, 0, 0, Opcode_andb_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_andbc_encode_fns[] = {
+ Opcode_andbc_Slot_inst_encode, 0, 0, 0, Opcode_andbc_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_orb_encode_fns[] = {
+ Opcode_orb_Slot_inst_encode, 0, 0, 0, Opcode_orb_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_orbc_encode_fns[] = {
+ Opcode_orbc_Slot_inst_encode, 0, 0, 0, Opcode_orbc_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_xorb_encode_fns[] = {
+ Opcode_xorb_Slot_inst_encode, 0, 0, 0, Opcode_xorb_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_any4_encode_fns[] = {
+ Opcode_any4_Slot_inst_encode, 0, 0, 0, Opcode_any4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_all4_encode_fns[] = {
+ Opcode_all4_Slot_inst_encode, 0, 0, 0, Opcode_all4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_any8_encode_fns[] = {
+ Opcode_any8_Slot_inst_encode, 0, 0, 0, Opcode_any8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_all8_encode_fns[] = {
+ Opcode_all8_Slot_inst_encode, 0, 0, 0, Opcode_all8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bf_encode_fns[] = {
+ Opcode_bf_Slot_inst_encode, 0, 0, 0, Opcode_bf_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bt_encode_fns[] = {
+ Opcode_bt_Slot_inst_encode, 0, 0, 0, Opcode_bt_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movf_encode_fns[] = {
+ Opcode_movf_Slot_inst_encode, 0, 0, 0, Opcode_movf_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movt_encode_fns[] = {
+ Opcode_movt_Slot_inst_encode, 0, 0, 0, Opcode_movt_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_br_encode_fns[] = {
+ Opcode_rsr_br_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_br_encode_fns[] = {
+ Opcode_wsr_br_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_br_encode_fns[] = {
+ Opcode_xsr_br_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare2_encode_fns[] = {
+ Opcode_rsr_ccompare2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare2_encode_fns[] = {
+ Opcode_wsr_ccompare2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare2_encode_fns[] = {
+ Opcode_xsr_ccompare2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipfl_encode_fns[] = {
+ Opcode_ipfl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihu_encode_fns[] = {
+ Opcode_ihu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iiu_encode_fns[] = {
+ Opcode_iiu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbui_p_encode_fns[] = {
+ Opcode_diwbui_p_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfl_encode_fns[] = {
+ Opcode_dpfl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhu_encode_fns[] = {
+ Opcode_dhu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diu_encode_fns[] = {
+ Opcode_diu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prefctl_encode_fns[] = {
+ Opcode_rsr_prefctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_prefctl_encode_fns[] = {
+ Opcode_wsr_prefctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_prefctl_encode_fns[] = {
+ Opcode_xsr_prefctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ptevaddr_encode_fns[] = {
+ Opcode_wsr_ptevaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ptevaddr_encode_fns[] = {
+ Opcode_rsr_ptevaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ptevaddr_encode_fns[] = {
+ Opcode_xsr_ptevaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_rasid_encode_fns[] = {
+ Opcode_rsr_rasid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_rasid_encode_fns[] = {
+ Opcode_wsr_rasid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_rasid_encode_fns[] = {
+ Opcode_xsr_rasid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_itlbcfg_encode_fns[] = {
+ Opcode_rsr_itlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_itlbcfg_encode_fns[] = {
+ Opcode_wsr_itlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_itlbcfg_encode_fns[] = {
+ Opcode_xsr_itlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dtlbcfg_encode_fns[] = {
+ Opcode_rsr_dtlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dtlbcfg_encode_fns[] = {
+ Opcode_wsr_dtlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dtlbcfg_encode_fns[] = {
+ Opcode_xsr_dtlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldpte_encode_fns[] = {
+ Opcode_ldpte_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwitlba_encode_fns[] = {
+ Opcode_hwwitlba_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwdtlba_encode_fns[] = {
+ Opcode_hwwdtlba_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_cpenable_encode_fns[] = {
+ Opcode_rsr_cpenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_cpenable_encode_fns[] = {
+ Opcode_wsr_cpenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_cpenable_encode_fns[] = {
+ Opcode_xsr_cpenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0, 0, Opcode_clamps_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0, 0, Opcode_min_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0, 0, Opcode_max_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0, 0, Opcode_minu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0, 0, Opcode_maxu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0, 0, Opcode_sext_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quou_encode_fns[] = {
+ Opcode_quou_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_quos_encode_fns[] = {
+ Opcode_quos_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_remu_encode_fns[] = {
+ Opcode_remu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rems_encode_fns[] = {
+ Opcode_rems_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_ovf_sar_encode_fns[] = {
+ Opcode_rur_ae_ovf_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_ovf_sar_encode_fns[] = {
+ Opcode_wur_ae_ovf_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_bithead_encode_fns[] = {
+ Opcode_rur_ae_bithead_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_bithead_encode_fns[] = {
+ Opcode_wur_ae_bithead_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_ts_fts_bu_bp_encode_fns[] = {
+ Opcode_rur_ae_ts_fts_bu_bp_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_ts_fts_bu_bp_encode_fns[] = {
+ Opcode_wur_ae_ts_fts_bu_bp_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_sd_no_encode_fns[] = {
+ Opcode_rur_ae_sd_no_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_sd_no_encode_fns[] = {
+ Opcode_wur_ae_sd_no_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_overflow_encode_fns[] = {
+ Opcode_rur_ae_overflow_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_overflow_encode_fns[] = {
+ Opcode_wur_ae_overflow_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_sar_encode_fns[] = {
+ Opcode_rur_ae_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_sar_encode_fns[] = {
+ Opcode_wur_ae_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_bitptr_encode_fns[] = {
+ Opcode_rur_ae_bitptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_bitptr_encode_fns[] = {
+ Opcode_wur_ae_bitptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_bitsused_encode_fns[] = {
+ Opcode_rur_ae_bitsused_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_bitsused_encode_fns[] = {
+ Opcode_wur_ae_bitsused_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_tablesize_encode_fns[] = {
+ Opcode_rur_ae_tablesize_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_tablesize_encode_fns[] = {
+ Opcode_wur_ae_tablesize_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_first_ts_encode_fns[] = {
+ Opcode_rur_ae_first_ts_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_first_ts_encode_fns[] = {
+ Opcode_wur_ae_first_ts_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_nextoffset_encode_fns[] = {
+ Opcode_rur_ae_nextoffset_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_nextoffset_encode_fns[] = {
+ Opcode_wur_ae_nextoffset_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_searchdone_encode_fns[] = {
+ Opcode_rur_ae_searchdone_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_searchdone_encode_fns[] = {
+ Opcode_wur_ae_searchdone_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_i_encode_fns[] = {
+ Opcode_ae_lp16f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_iu_encode_fns[] = {
+ Opcode_ae_lp16f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_x_encode_fns[] = {
+ Opcode_ae_lp16f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_xu_encode_fns[] = {
+ Opcode_ae_lp16f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_i_encode_fns[] = {
+ Opcode_ae_lp24_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_iu_encode_fns[] = {
+ Opcode_ae_lp24_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_x_encode_fns[] = {
+ Opcode_ae_lp24_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_xu_encode_fns[] = {
+ Opcode_ae_lp24_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_i_encode_fns[] = {
+ Opcode_ae_lp24f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_iu_encode_fns[] = {
+ Opcode_ae_lp24f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_x_encode_fns[] = {
+ Opcode_ae_lp24f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_xu_encode_fns[] = {
+ Opcode_ae_lp24f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_i_encode_fns[] = {
+ Opcode_ae_lp16x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_iu_encode_fns[] = {
+ Opcode_ae_lp16x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_x_encode_fns[] = {
+ Opcode_ae_lp16x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_xu_encode_fns[] = {
+ Opcode_ae_lp16x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_i_encode_fns[] = {
+ Opcode_ae_lp24x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_iu_encode_fns[] = {
+ Opcode_ae_lp24x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_x_encode_fns[] = {
+ Opcode_ae_lp24x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_xu_encode_fns[] = {
+ Opcode_ae_lp24x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_i_encode_fns[] = {
+ Opcode_ae_lp24x2_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_iu_encode_fns[] = {
+ Opcode_ae_lp24x2_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_x_encode_fns[] = {
+ Opcode_ae_lp24x2_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_xu_encode_fns[] = {
+ Opcode_ae_lp24x2_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_i_encode_fns[] = {
+ Opcode_ae_sp16x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_iu_encode_fns[] = {
+ Opcode_ae_sp16x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_x_encode_fns[] = {
+ Opcode_ae_sp16x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_xu_encode_fns[] = {
+ Opcode_ae_sp16x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_i_encode_fns[] = {
+ Opcode_ae_sp24x2s_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_iu_encode_fns[] = {
+ Opcode_ae_sp24x2s_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_x_encode_fns[] = {
+ Opcode_ae_sp24x2s_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_xu_encode_fns[] = {
+ Opcode_ae_sp24x2s_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_i_encode_fns[] = {
+ Opcode_ae_sp24x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_iu_encode_fns[] = {
+ Opcode_ae_sp24x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_x_encode_fns[] = {
+ Opcode_ae_sp24x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_xu_encode_fns[] = {
+ Opcode_ae_sp24x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_i_encode_fns[] = {
+ Opcode_ae_sp16f_l_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_iu_encode_fns[] = {
+ Opcode_ae_sp16f_l_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_x_encode_fns[] = {
+ Opcode_ae_sp16f_l_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_xu_encode_fns[] = {
+ Opcode_ae_sp16f_l_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_i_encode_fns[] = {
+ Opcode_ae_sp24s_l_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_iu_encode_fns[] = {
+ Opcode_ae_sp24s_l_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_x_encode_fns[] = {
+ Opcode_ae_sp24s_l_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_xu_encode_fns[] = {
+ Opcode_ae_sp24s_l_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_i_encode_fns[] = {
+ Opcode_ae_sp24f_l_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_iu_encode_fns[] = {
+ Opcode_ae_sp24f_l_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_x_encode_fns[] = {
+ Opcode_ae_sp24f_l_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_xu_encode_fns[] = {
+ Opcode_ae_sp24f_l_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_i_encode_fns[] = {
+ Opcode_ae_lq56_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_iu_encode_fns[] = {
+ Opcode_ae_lq56_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_x_encode_fns[] = {
+ Opcode_ae_lq56_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_xu_encode_fns[] = {
+ Opcode_ae_lq56_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_i_encode_fns[] = {
+ Opcode_ae_lq32f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_iu_encode_fns[] = {
+ Opcode_ae_lq32f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_x_encode_fns[] = {
+ Opcode_ae_lq32f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_xu_encode_fns[] = {
+ Opcode_ae_lq32f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_i_encode_fns[] = {
+ Opcode_ae_sq56s_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_iu_encode_fns[] = {
+ Opcode_ae_sq56s_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_x_encode_fns[] = {
+ Opcode_ae_sq56s_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_xu_encode_fns[] = {
+ Opcode_ae_sq56s_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_i_encode_fns[] = {
+ Opcode_ae_sq32f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_iu_encode_fns[] = {
+ Opcode_ae_sq32f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_x_encode_fns[] = {
+ Opcode_ae_sq32f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_xu_encode_fns[] = {
+ Opcode_ae_sq32f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_zerop48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_zerop48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movp48_encode_fns[] = {
+ Opcode_ae_movp48_Slot_inst_encode, 0, 0, Opcode_ae_movp48_Slot_ae_slot1_encode, Opcode_ae_movp48_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_ll_Slot_ae_slot1_encode, Opcode_ae_selp24_ll_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_lh_Slot_ae_slot1_encode, Opcode_ae_selp24_lh_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_hl_Slot_ae_slot1_encode, Opcode_ae_selp24_hl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_hh_Slot_ae_slot1_encode, Opcode_ae_selp24_hh_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movtp24x2_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movtp24x2_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movfp24x2_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movfp24x2_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movtp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movtp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movfp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movfp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movpa24x2_encode_fns[] = {
+ Opcode_ae_movpa24x2_Slot_inst_encode, 0, 0, 0, Opcode_ae_movpa24x2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncp24a32x2_encode_fns[] = {
+ Opcode_ae_truncp24a32x2_Slot_inst_encode, 0, 0, 0, Opcode_ae_truncp24a32x2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvta32p24_l_encode_fns[] = {
+ Opcode_ae_cvta32p24_l_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvta32p24_l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvta32p24_h_encode_fns[] = {
+ Opcode_ae_cvta32p24_h_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvta32p24_h_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_ll_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_ll_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_ll_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_lh_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_lh_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_lh_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_hl_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_hl_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_hl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_hh_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_hh_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_hh_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncp24q48x2_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_truncp24q48x2_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncp16_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_truncp16_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp24q48sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp24q48sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp24q48asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp24q48asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16q48sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16q48sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16q48asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16q48asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_zeroq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_zeroq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movq56_encode_fns[] = {
+ Opcode_ae_movq56_Slot_inst_encode, 0, 0, Opcode_ae_movq56_Slot_ae_slot1_encode, Opcode_ae_movq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movtq56_encode_fns[] = {
+ Opcode_ae_movtq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_movtq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movfq56_encode_fns[] = {
+ Opcode_ae_movfq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_movfq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtq48a32s_encode_fns[] = {
+ Opcode_ae_cvtq48a32s_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtq48a32s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtq48p24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_cvtq48p24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtq48p24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_cvtq48p24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_satq48s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_satq48s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncq32_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_truncq32_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsq32sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsq32sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsq32asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsq32asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_trunca32q48_encode_fns[] = {
+ Opcode_ae_trunca32q48_Slot_inst_encode, 0, 0, 0, Opcode_ae_trunca32q48_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movap24s_l_encode_fns[] = {
+ Opcode_ae_movap24s_l_Slot_inst_encode, 0, 0, 0, Opcode_ae_movap24s_l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movap24s_h_encode_fns[] = {
+ Opcode_ae_movap24s_h_Slot_inst_encode, 0, 0, 0, Opcode_ae_movap24s_h_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_trunca16p24s_l_encode_fns[] = {
+ Opcode_ae_trunca16p24s_l_Slot_inst_encode, 0, 0, 0, Opcode_ae_trunca16p24s_l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_trunca16p24s_h_encode_fns[] = {
+ Opcode_ae_trunca16p24s_h_Slot_inst_encode, 0, 0, 0, Opcode_ae_trunca16p24s_h_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_absp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_absp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxbp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxbp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minbp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minbp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addsp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addsp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subsp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subsp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negsp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negsp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_abssp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_abssp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_andp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_andp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_nandp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_nandp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_orp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_orp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_xorp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_xorp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_ltp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_ltp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lep24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_lep24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_eqp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_eqp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_absq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_absq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxbq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxbq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minbq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minbq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addsq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addsq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subsq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subsq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negsq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negsq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_abssq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_abssq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_andq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_andq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_nandq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_nandq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_orq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_orq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_xorq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_xorq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllip24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllip24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlip24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_srlip24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sraip24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sraip24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllsp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllsp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlsp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_srlsp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srasp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_srasp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllisp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllisp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllssp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllssp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_slliq56_encode_fns[] = {
+ Opcode_ae_slliq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_slliq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srliq56_encode_fns[] = {
+ Opcode_ae_srliq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srliq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sraiq56_encode_fns[] = {
+ Opcode_ae_sraiq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sraiq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllsq56_encode_fns[] = {
+ Opcode_ae_sllsq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllsq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlsq56_encode_fns[] = {
+ Opcode_ae_srlsq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srlsq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srasq56_encode_fns[] = {
+ Opcode_ae_srasq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srasq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllaq56_encode_fns[] = {
+ Opcode_ae_sllaq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllaq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlaq56_encode_fns[] = {
+ Opcode_ae_srlaq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srlaq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sraaq56_encode_fns[] = {
+ Opcode_ae_sraaq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sraaq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllisq56s_encode_fns[] = {
+ Opcode_ae_sllisq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllisq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllssq56s_encode_fns[] = {
+ Opcode_ae_sllssq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllssq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllasq56s_encode_fns[] = {
+ Opcode_ae_sllasq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllasq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_ltq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_ltq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_leq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_leq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_eqq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_eqq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_nsaq56s_encode_fns[] = {
+ Opcode_ae_nsaq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_nsaq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsrfq32sp24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsrfq32sp24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsrfq32sp24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsrfq32sp24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mularfq32sp24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mularfq32sp24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mularfq32sp24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mularfq32sp24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulrfq32sp24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulrfq32sp24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulrfq32sp24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulrfq32sp24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sha32_encode_fns[] = {
+ Opcode_ae_sha32_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldl32t_encode_fns[] = {
+ Opcode_ae_vldl32t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldl16t_encode_fns[] = {
+ Opcode_ae_vldl16t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldl16c_encode_fns[] = {
+ Opcode_ae_vldl16c_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldsht_encode_fns[] = {
+ Opcode_ae_vldsht_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lb_encode_fns[] = {
+ Opcode_ae_lb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lbi_encode_fns[] = {
+ Opcode_ae_lbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lbk_encode_fns[] = {
+ Opcode_ae_lbk_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lbki_encode_fns[] = {
+ Opcode_ae_lbki_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_db_encode_fns[] = {
+ Opcode_ae_db_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_dbi_encode_fns[] = {
+ Opcode_ae_dbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vlel32t_encode_fns[] = {
+ Opcode_ae_vlel32t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vlel16t_encode_fns[] = {
+ Opcode_ae_vlel16t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sb_encode_fns[] = {
+ Opcode_ae_sb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sbi_encode_fns[] = {
+ Opcode_ae_sbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vles16c_encode_fns[] = {
+ Opcode_ae_vles16c_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sbf_encode_fns[] = {
+ Opcode_ae_sbf_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_slaasq56s_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_slaasq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addbrba32_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_addbrba32_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minabssp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minabssp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxabssp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxabssp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minabssq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minabssq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxabssq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxabssq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_cbegin0_encode_fns[] = {
+ Opcode_rur_ae_cbegin0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_cbegin0_encode_fns[] = {
+ Opcode_wur_ae_cbegin0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_cend0_encode_fns[] = {
+ Opcode_rur_ae_cend0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_cend0_encode_fns[] = {
+ Opcode_wur_ae_cend0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lp24x2_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sp24x2s_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lp24x2f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sp24x2f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lp16x2f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sp16x2f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lp24_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sp24s_l_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lp24f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sp24f_l_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lp16f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sp16f_l_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lq56_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sq56s_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_lq32f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_c_encode_fns[] = {
+ 0, 0, 0, 0, Opcode_ae_sq32f_c_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_expstate_encode_fns[] = {
+ Opcode_rur_expstate_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_expstate_encode_fns[] = {
+ Opcode_wur_expstate_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_read_impwire_encode_fns[] = {
+ Opcode_read_impwire_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_setb_expstate_encode_fns[] = {
+ Opcode_setb_expstate_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clrb_expstate_encode_fns[] = {
+ Opcode_clrb_expstate_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wrmsk_expstate_encode_fns[] = {
+ Opcode_wrmsk_expstate_Slot_inst_encode, 0, 0, 0, 0
+};
+
+
+
+
+
+/* Opcode table. */
+
+static xtensa_funcUnit_use Opcode_ae_vldl32t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vldl16t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vldl16c_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_shift32x5, 3 },
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vldsht_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_shift32x5, 3 },
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lb_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lbi_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lbk_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lbki_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_db_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_dbi_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vlel32t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vlel16t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_sb_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_sbi_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vles16c_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_sbf_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "rur.threadptr", ICLASS_rur_threadptr,
+ 0,
+ Opcode_rur_threadptr_encode_fns, 0, 0 },
+ { "wur.threadptr", ICLASS_wur_threadptr,
+ 0,
+ Opcode_wur_threadptr_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", ICLASS_xt_iclass_loop,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s32nb", ICLASS_xt_iclass_s32nb,
+ 0,
+ Opcode_s32nb_encode_fns, 0, 0 },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", ICLASS_xt_iclass_rsr_lend,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", ICLASS_xt_iclass_wsr_lend,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", ICLASS_xt_iclass_xsr_lend,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", ICLASS_xt_iclass_rsr_lcount,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", ICLASS_xt_iclass_wsr_lcount,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", ICLASS_xt_iclass_xsr_lcount,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", ICLASS_xt_iclass_rsr_lbeg,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", ICLASS_xt_iclass_wsr_lbeg,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", ICLASS_xt_iclass_xsr_lbeg,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.memctl", ICLASS_xt_iclass_rsr_memctl,
+ 0,
+ Opcode_rsr_memctl_encode_fns, 0, 0 },
+ { "wsr.memctl", ICLASS_xt_iclass_wsr_memctl,
+ 0,
+ Opcode_wsr_memctl_encode_fns, 0, 0 },
+ { "xsr.memctl", ICLASS_xt_iclass_xsr_memctl,
+ 0,
+ Opcode_xsr_memctl_encode_fns, 0, 0 },
+ { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.configid0", ICLASS_xt_iclass_rsr_configid0,
+ 0,
+ Opcode_rsr_configid0_encode_fns, 0, 0 },
+ { "wsr.configid0", ICLASS_xt_iclass_wsr_configid0,
+ 0,
+ Opcode_wsr_configid0_encode_fns, 0, 0 },
+ { "rsr.configid1", ICLASS_xt_iclass_rsr_configid1,
+ 0,
+ Opcode_rsr_configid1_encode_fns, 0, 0 },
+ { "rsr.243", ICLASS_xt_iclass_rsr_243,
+ 0,
+ Opcode_rsr_243_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.epc3", ICLASS_xt_iclass_rsr_epc3,
+ 0,
+ Opcode_rsr_epc3_encode_fns, 0, 0 },
+ { "wsr.epc3", ICLASS_xt_iclass_wsr_epc3,
+ 0,
+ Opcode_wsr_epc3_encode_fns, 0, 0 },
+ { "xsr.epc3", ICLASS_xt_iclass_xsr_epc3,
+ 0,
+ Opcode_xsr_epc3_encode_fns, 0, 0 },
+ { "rsr.excsave3", ICLASS_xt_iclass_rsr_excsave3,
+ 0,
+ Opcode_rsr_excsave3_encode_fns, 0, 0 },
+ { "wsr.excsave3", ICLASS_xt_iclass_wsr_excsave3,
+ 0,
+ Opcode_wsr_excsave3_encode_fns, 0, 0 },
+ { "xsr.excsave3", ICLASS_xt_iclass_xsr_excsave3,
+ 0,
+ Opcode_xsr_excsave3_encode_fns, 0, 0 },
+ { "rsr.epc4", ICLASS_xt_iclass_rsr_epc4,
+ 0,
+ Opcode_rsr_epc4_encode_fns, 0, 0 },
+ { "wsr.epc4", ICLASS_xt_iclass_wsr_epc4,
+ 0,
+ Opcode_wsr_epc4_encode_fns, 0, 0 },
+ { "xsr.epc4", ICLASS_xt_iclass_xsr_epc4,
+ 0,
+ Opcode_xsr_epc4_encode_fns, 0, 0 },
+ { "rsr.excsave4", ICLASS_xt_iclass_rsr_excsave4,
+ 0,
+ Opcode_rsr_excsave4_encode_fns, 0, 0 },
+ { "wsr.excsave4", ICLASS_xt_iclass_wsr_excsave4,
+ 0,
+ Opcode_wsr_excsave4_encode_fns, 0, 0 },
+ { "xsr.excsave4", ICLASS_xt_iclass_xsr_excsave4,
+ 0,
+ Opcode_xsr_excsave4_encode_fns, 0, 0 },
+ { "rsr.epc5", ICLASS_xt_iclass_rsr_epc5,
+ 0,
+ Opcode_rsr_epc5_encode_fns, 0, 0 },
+ { "wsr.epc5", ICLASS_xt_iclass_wsr_epc5,
+ 0,
+ Opcode_wsr_epc5_encode_fns, 0, 0 },
+ { "xsr.epc5", ICLASS_xt_iclass_xsr_epc5,
+ 0,
+ Opcode_xsr_epc5_encode_fns, 0, 0 },
+ { "rsr.excsave5", ICLASS_xt_iclass_rsr_excsave5,
+ 0,
+ Opcode_rsr_excsave5_encode_fns, 0, 0 },
+ { "wsr.excsave5", ICLASS_xt_iclass_wsr_excsave5,
+ 0,
+ Opcode_wsr_excsave5_encode_fns, 0, 0 },
+ { "xsr.excsave5", ICLASS_xt_iclass_xsr_excsave5,
+ 0,
+ Opcode_xsr_excsave5_encode_fns, 0, 0 },
+ { "rsr.epc6", ICLASS_xt_iclass_rsr_epc6,
+ 0,
+ Opcode_rsr_epc6_encode_fns, 0, 0 },
+ { "wsr.epc6", ICLASS_xt_iclass_wsr_epc6,
+ 0,
+ Opcode_wsr_epc6_encode_fns, 0, 0 },
+ { "xsr.epc6", ICLASS_xt_iclass_xsr_epc6,
+ 0,
+ Opcode_xsr_epc6_encode_fns, 0, 0 },
+ { "rsr.excsave6", ICLASS_xt_iclass_rsr_excsave6,
+ 0,
+ Opcode_rsr_excsave6_encode_fns, 0, 0 },
+ { "wsr.excsave6", ICLASS_xt_iclass_wsr_excsave6,
+ 0,
+ Opcode_wsr_excsave6_encode_fns, 0, 0 },
+ { "xsr.excsave6", ICLASS_xt_iclass_xsr_excsave6,
+ 0,
+ Opcode_xsr_excsave6_encode_fns, 0, 0 },
+ { "rsr.epc7", ICLASS_xt_iclass_rsr_epc7,
+ 0,
+ Opcode_rsr_epc7_encode_fns, 0, 0 },
+ { "wsr.epc7", ICLASS_xt_iclass_wsr_epc7,
+ 0,
+ Opcode_wsr_epc7_encode_fns, 0, 0 },
+ { "xsr.epc7", ICLASS_xt_iclass_xsr_epc7,
+ 0,
+ Opcode_xsr_epc7_encode_fns, 0, 0 },
+ { "rsr.excsave7", ICLASS_xt_iclass_rsr_excsave7,
+ 0,
+ Opcode_rsr_excsave7_encode_fns, 0, 0 },
+ { "wsr.excsave7", ICLASS_xt_iclass_wsr_excsave7,
+ 0,
+ Opcode_wsr_excsave7_encode_fns, 0, 0 },
+ { "xsr.excsave7", ICLASS_xt_iclass_xsr_excsave7,
+ 0,
+ Opcode_xsr_excsave7_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.eps3", ICLASS_xt_iclass_rsr_eps3,
+ 0,
+ Opcode_rsr_eps3_encode_fns, 0, 0 },
+ { "wsr.eps3", ICLASS_xt_iclass_wsr_eps3,
+ 0,
+ Opcode_wsr_eps3_encode_fns, 0, 0 },
+ { "xsr.eps3", ICLASS_xt_iclass_xsr_eps3,
+ 0,
+ Opcode_xsr_eps3_encode_fns, 0, 0 },
+ { "rsr.eps4", ICLASS_xt_iclass_rsr_eps4,
+ 0,
+ Opcode_rsr_eps4_encode_fns, 0, 0 },
+ { "wsr.eps4", ICLASS_xt_iclass_wsr_eps4,
+ 0,
+ Opcode_wsr_eps4_encode_fns, 0, 0 },
+ { "xsr.eps4", ICLASS_xt_iclass_xsr_eps4,
+ 0,
+ Opcode_xsr_eps4_encode_fns, 0, 0 },
+ { "rsr.eps5", ICLASS_xt_iclass_rsr_eps5,
+ 0,
+ Opcode_rsr_eps5_encode_fns, 0, 0 },
+ { "wsr.eps5", ICLASS_xt_iclass_wsr_eps5,
+ 0,
+ Opcode_wsr_eps5_encode_fns, 0, 0 },
+ { "xsr.eps5", ICLASS_xt_iclass_xsr_eps5,
+ 0,
+ Opcode_xsr_eps5_encode_fns, 0, 0 },
+ { "rsr.eps6", ICLASS_xt_iclass_rsr_eps6,
+ 0,
+ Opcode_rsr_eps6_encode_fns, 0, 0 },
+ { "wsr.eps6", ICLASS_xt_iclass_wsr_eps6,
+ 0,
+ Opcode_wsr_eps6_encode_fns, 0, 0 },
+ { "xsr.eps6", ICLASS_xt_iclass_xsr_eps6,
+ 0,
+ Opcode_xsr_eps6_encode_fns, 0, 0 },
+ { "rsr.eps7", ICLASS_xt_iclass_rsr_eps7,
+ 0,
+ Opcode_rsr_eps7_encode_fns, 0, 0 },
+ { "wsr.eps7", ICLASS_xt_iclass_wsr_eps7,
+ 0,
+ Opcode_wsr_eps7_encode_fns, 0, 0 },
+ { "xsr.eps7", ICLASS_xt_iclass_xsr_eps7,
+ 0,
+ Opcode_xsr_eps7_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", ICLASS_xt_iclass_rsr_misc0,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", ICLASS_xt_iclass_wsr_misc0,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", ICLASS_xt_iclass_xsr_misc0,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", ICLASS_xt_iclass_rsr_misc1,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", ICLASS_xt_iclass_wsr_misc1,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", ICLASS_xt_iclass_xsr_misc1,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "mull", ICLASS_xt_mul32,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "muluh", ICLASS_xt_mul32h,
+ 0,
+ Opcode_muluh_encode_fns, 0, 0 },
+ { "mulsh", ICLASS_xt_mul32h,
+ 0,
+ Opcode_mulsh_encode_fns, 0, 0 },
+ { "mul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_ll_encode_fns, 0, 0 },
+ { "mul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hl_encode_fns, 0, 0 },
+ { "mul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_lh_encode_fns, 0, 0 },
+ { "mul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_mul_aa_hh_encode_fns, 0, 0 },
+ { "umul.aa.ll", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_ll_encode_fns, 0, 0 },
+ { "umul.aa.hl", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hl_encode_fns, 0, 0 },
+ { "umul.aa.lh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_lh_encode_fns, 0, 0 },
+ { "umul.aa.hh", ICLASS_xt_iclass_mac16_aa,
+ 0,
+ Opcode_umul_aa_hh_encode_fns, 0, 0 },
+ { "mul.ad.ll", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_ll_encode_fns, 0, 0 },
+ { "mul.ad.hl", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hl_encode_fns, 0, 0 },
+ { "mul.ad.lh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_lh_encode_fns, 0, 0 },
+ { "mul.ad.hh", ICLASS_xt_iclass_mac16_ad,
+ 0,
+ Opcode_mul_ad_hh_encode_fns, 0, 0 },
+ { "mul.da.ll", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_ll_encode_fns, 0, 0 },
+ { "mul.da.hl", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hl_encode_fns, 0, 0 },
+ { "mul.da.lh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_lh_encode_fns, 0, 0 },
+ { "mul.da.hh", ICLASS_xt_iclass_mac16_da,
+ 0,
+ Opcode_mul_da_hh_encode_fns, 0, 0 },
+ { "mul.dd.ll", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_ll_encode_fns, 0, 0 },
+ { "mul.dd.hl", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hl_encode_fns, 0, 0 },
+ { "mul.dd.lh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_lh_encode_fns, 0, 0 },
+ { "mul.dd.hh", ICLASS_xt_iclass_mac16_dd,
+ 0,
+ Opcode_mul_dd_hh_encode_fns, 0, 0 },
+ { "mula.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_ll_encode_fns, 0, 0 },
+ { "mula.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hl_encode_fns, 0, 0 },
+ { "mula.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_lh_encode_fns, 0, 0 },
+ { "mula.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_mula_aa_hh_encode_fns, 0, 0 },
+ { "muls.aa.ll", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_ll_encode_fns, 0, 0 },
+ { "muls.aa.hl", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hl_encode_fns, 0, 0 },
+ { "muls.aa.lh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_lh_encode_fns, 0, 0 },
+ { "muls.aa.hh", ICLASS_xt_iclass_mac16a_aa,
+ 0,
+ Opcode_muls_aa_hh_encode_fns, 0, 0 },
+ { "mula.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_ll_encode_fns, 0, 0 },
+ { "mula.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hl_encode_fns, 0, 0 },
+ { "mula.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_lh_encode_fns, 0, 0 },
+ { "mula.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_mula_ad_hh_encode_fns, 0, 0 },
+ { "muls.ad.ll", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_ll_encode_fns, 0, 0 },
+ { "muls.ad.hl", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hl_encode_fns, 0, 0 },
+ { "muls.ad.lh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_lh_encode_fns, 0, 0 },
+ { "muls.ad.hh", ICLASS_xt_iclass_mac16a_ad,
+ 0,
+ Opcode_muls_ad_hh_encode_fns, 0, 0 },
+ { "mula.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_ll_encode_fns, 0, 0 },
+ { "mula.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hl_encode_fns, 0, 0 },
+ { "mula.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_lh_encode_fns, 0, 0 },
+ { "mula.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_mula_da_hh_encode_fns, 0, 0 },
+ { "muls.da.ll", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_ll_encode_fns, 0, 0 },
+ { "muls.da.hl", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hl_encode_fns, 0, 0 },
+ { "muls.da.lh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_lh_encode_fns, 0, 0 },
+ { "muls.da.hh", ICLASS_xt_iclass_mac16a_da,
+ 0,
+ Opcode_muls_da_hh_encode_fns, 0, 0 },
+ { "mula.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_ll_encode_fns, 0, 0 },
+ { "mula.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hl_encode_fns, 0, 0 },
+ { "mula.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_lh_encode_fns, 0, 0 },
+ { "mula.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_mula_dd_hh_encode_fns, 0, 0 },
+ { "muls.dd.ll", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_ll_encode_fns, 0, 0 },
+ { "muls.dd.hl", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hl_encode_fns, 0, 0 },
+ { "muls.dd.lh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_lh_encode_fns, 0, 0 },
+ { "muls.dd.hh", ICLASS_xt_iclass_mac16a_dd,
+ 0,
+ Opcode_muls_dd_hh_encode_fns, 0, 0 },
+ { "mula.da.ll.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_lddec_encode_fns, 0, 0 },
+ { "mula.da.ll.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hl.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_lddec_encode_fns, 0, 0 },
+ { "mula.da.hl.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.da.lh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_lddec_encode_fns, 0, 0 },
+ { "mula.da.lh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.da.hh.lddec", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_lddec_encode_fns, 0, 0 },
+ { "mula.da.hh.ldinc", ICLASS_xt_iclass_mac16al_da,
+ 0,
+ Opcode_mula_da_hh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.ll.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_lddec_encode_fns, 0, 0 },
+ { "mula.dd.ll.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_ll_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hl.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hl.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hl_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.lh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.lh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_lh_ldinc_encode_fns, 0, 0 },
+ { "mula.dd.hh.lddec", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_lddec_encode_fns, 0, 0 },
+ { "mula.dd.hh.ldinc", ICLASS_xt_iclass_mac16al_dd,
+ 0,
+ Opcode_mula_dd_hh_ldinc_encode_fns, 0, 0 },
+ { "lddec", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_lddec_encode_fns, 0, 0 },
+ { "ldinc", ICLASS_xt_iclass_mac16_l,
+ 0,
+ Opcode_ldinc_encode_fns, 0, 0 },
+ { "rsr.m0", ICLASS_xt_iclass_rsr_m0,
+ 0,
+ Opcode_rsr_m0_encode_fns, 0, 0 },
+ { "wsr.m0", ICLASS_xt_iclass_wsr_m0,
+ 0,
+ Opcode_wsr_m0_encode_fns, 0, 0 },
+ { "xsr.m0", ICLASS_xt_iclass_xsr_m0,
+ 0,
+ Opcode_xsr_m0_encode_fns, 0, 0 },
+ { "rsr.m1", ICLASS_xt_iclass_rsr_m1,
+ 0,
+ Opcode_rsr_m1_encode_fns, 0, 0 },
+ { "wsr.m1", ICLASS_xt_iclass_wsr_m1,
+ 0,
+ Opcode_wsr_m1_encode_fns, 0, 0 },
+ { "xsr.m1", ICLASS_xt_iclass_xsr_m1,
+ 0,
+ Opcode_xsr_m1_encode_fns, 0, 0 },
+ { "rsr.m2", ICLASS_xt_iclass_rsr_m2,
+ 0,
+ Opcode_rsr_m2_encode_fns, 0, 0 },
+ { "wsr.m2", ICLASS_xt_iclass_wsr_m2,
+ 0,
+ Opcode_wsr_m2_encode_fns, 0, 0 },
+ { "xsr.m2", ICLASS_xt_iclass_xsr_m2,
+ 0,
+ Opcode_xsr_m2_encode_fns, 0, 0 },
+ { "rsr.m3", ICLASS_xt_iclass_rsr_m3,
+ 0,
+ Opcode_rsr_m3_encode_fns, 0, 0 },
+ { "wsr.m3", ICLASS_xt_iclass_wsr_m3,
+ 0,
+ Opcode_wsr_m3_encode_fns, 0, 0 },
+ { "xsr.m3", ICLASS_xt_iclass_xsr_m3,
+ 0,
+ Opcode_xsr_m3_encode_fns, 0, 0 },
+ { "rsr.acclo", ICLASS_xt_iclass_rsr_acclo,
+ 0,
+ Opcode_rsr_acclo_encode_fns, 0, 0 },
+ { "wsr.acclo", ICLASS_xt_iclass_wsr_acclo,
+ 0,
+ Opcode_wsr_acclo_encode_fns, 0, 0 },
+ { "xsr.acclo", ICLASS_xt_iclass_xsr_acclo,
+ 0,
+ Opcode_xsr_acclo_encode_fns, 0, 0 },
+ { "rsr.acchi", ICLASS_xt_iclass_rsr_acchi,
+ 0,
+ Opcode_rsr_acchi_encode_fns, 0, 0 },
+ { "wsr.acchi", ICLASS_xt_iclass_wsr_acchi,
+ 0,
+ Opcode_wsr_acchi_encode_fns, 0, 0 },
+ { "xsr.acchi", ICLASS_xt_iclass_xsr_acchi,
+ 0,
+ Opcode_xsr_acchi_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.dbreaka0", ICLASS_xt_iclass_rsr_dbreaka0,
+ 0,
+ Opcode_rsr_dbreaka0_encode_fns, 0, 0 },
+ { "wsr.dbreaka0", ICLASS_xt_iclass_wsr_dbreaka0,
+ 0,
+ Opcode_wsr_dbreaka0_encode_fns, 0, 0 },
+ { "xsr.dbreaka0", ICLASS_xt_iclass_xsr_dbreaka0,
+ 0,
+ Opcode_xsr_dbreaka0_encode_fns, 0, 0 },
+ { "rsr.dbreakc0", ICLASS_xt_iclass_rsr_dbreakc0,
+ 0,
+ Opcode_rsr_dbreakc0_encode_fns, 0, 0 },
+ { "wsr.dbreakc0", ICLASS_xt_iclass_wsr_dbreakc0,
+ 0,
+ Opcode_wsr_dbreakc0_encode_fns, 0, 0 },
+ { "xsr.dbreakc0", ICLASS_xt_iclass_xsr_dbreakc0,
+ 0,
+ Opcode_xsr_dbreakc0_encode_fns, 0, 0 },
+ { "rsr.dbreaka1", ICLASS_xt_iclass_rsr_dbreaka1,
+ 0,
+ Opcode_rsr_dbreaka1_encode_fns, 0, 0 },
+ { "wsr.dbreaka1", ICLASS_xt_iclass_wsr_dbreaka1,
+ 0,
+ Opcode_wsr_dbreaka1_encode_fns, 0, 0 },
+ { "xsr.dbreaka1", ICLASS_xt_iclass_xsr_dbreaka1,
+ 0,
+ Opcode_xsr_dbreaka1_encode_fns, 0, 0 },
+ { "rsr.dbreakc1", ICLASS_xt_iclass_rsr_dbreakc1,
+ 0,
+ Opcode_rsr_dbreakc1_encode_fns, 0, 0 },
+ { "wsr.dbreakc1", ICLASS_xt_iclass_wsr_dbreakc1,
+ 0,
+ Opcode_wsr_dbreakc1_encode_fns, 0, 0 },
+ { "xsr.dbreakc1", ICLASS_xt_iclass_xsr_dbreakc1,
+ 0,
+ Opcode_xsr_dbreakc1_encode_fns, 0, 0 },
+ { "rsr.ibreaka0", ICLASS_xt_iclass_rsr_ibreaka0,
+ 0,
+ Opcode_rsr_ibreaka0_encode_fns, 0, 0 },
+ { "wsr.ibreaka0", ICLASS_xt_iclass_wsr_ibreaka0,
+ 0,
+ Opcode_wsr_ibreaka0_encode_fns, 0, 0 },
+ { "xsr.ibreaka0", ICLASS_xt_iclass_xsr_ibreaka0,
+ 0,
+ Opcode_xsr_ibreaka0_encode_fns, 0, 0 },
+ { "rsr.ibreaka1", ICLASS_xt_iclass_rsr_ibreaka1,
+ 0,
+ Opcode_rsr_ibreaka1_encode_fns, 0, 0 },
+ { "wsr.ibreaka1", ICLASS_xt_iclass_wsr_ibreaka1,
+ 0,
+ Opcode_wsr_ibreaka1_encode_fns, 0, 0 },
+ { "xsr.ibreaka1", ICLASS_xt_iclass_xsr_ibreaka1,
+ 0,
+ Opcode_xsr_ibreaka1_encode_fns, 0, 0 },
+ { "rsr.ibreakenable", ICLASS_xt_iclass_rsr_ibreakenable,
+ 0,
+ Opcode_rsr_ibreakenable_encode_fns, 0, 0 },
+ { "wsr.ibreakenable", ICLASS_xt_iclass_wsr_ibreakenable,
+ 0,
+ Opcode_wsr_ibreakenable_encode_fns, 0, 0 },
+ { "xsr.ibreakenable", ICLASS_xt_iclass_xsr_ibreakenable,
+ 0,
+ Opcode_xsr_ibreakenable_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "lddr32.p", ICLASS_xt_iclass_lddr32_p,
+ 0,
+ Opcode_lddr32_p_encode_fns, 0, 0 },
+ { "sddr32.p", ICLASS_xt_iclass_sddr32_p,
+ 0,
+ Opcode_sddr32_p_encode_fns, 0, 0 },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "wsr.mmid", ICLASS_xt_iclass_wsr_mmid,
+ 0,
+ Opcode_wsr_mmid_encode_fns, 0, 0 },
+ { "andb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andb_encode_fns, 0, 0 },
+ { "andbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andbc_encode_fns, 0, 0 },
+ { "orb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orb_encode_fns, 0, 0 },
+ { "orbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orbc_encode_fns, 0, 0 },
+ { "xorb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_xorb_encode_fns, 0, 0 },
+ { "any4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_any4_encode_fns, 0, 0 },
+ { "all4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_all4_encode_fns, 0, 0 },
+ { "any8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_any8_encode_fns, 0, 0 },
+ { "all8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_all8_encode_fns, 0, 0 },
+ { "bf", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bf_encode_fns, 0, 0 },
+ { "bt", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bt_encode_fns, 0, 0 },
+ { "movf", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movf_encode_fns, 0, 0 },
+ { "movt", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movt_encode_fns, 0, 0 },
+ { "rsr.br", ICLASS_xt_iclass_RSR_BR,
+ 0,
+ Opcode_rsr_br_encode_fns, 0, 0 },
+ { "wsr.br", ICLASS_xt_iclass_WSR_BR,
+ 0,
+ Opcode_wsr_br_encode_fns, 0, 0 },
+ { "xsr.br", ICLASS_xt_iclass_XSR_BR,
+ 0,
+ Opcode_xsr_br_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "rsr.ccompare2", ICLASS_xt_iclass_rsr_ccompare2,
+ 0,
+ Opcode_rsr_ccompare2_encode_fns, 0, 0 },
+ { "wsr.ccompare2", ICLASS_xt_iclass_wsr_ccompare2,
+ 0,
+ Opcode_wsr_ccompare2_encode_fns, 0, 0 },
+ { "xsr.ccompare2", ICLASS_xt_iclass_xsr_ccompare2,
+ 0,
+ Opcode_xsr_ccompare2_encode_fns, 0, 0 },
+ { "ipf", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "ipfl", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ipfl_encode_fns, 0, 0 },
+ { "ihu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_ihu_encode_fns, 0, 0 },
+ { "iiu", ICLASS_xt_iclass_icache_lock,
+ 0,
+ Opcode_iiu_encode_fns, 0, 0 },
+ { "iii", ICLASS_xt_iclass_icache_inv,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwbui.p", ICLASS_xt_iclass_dcache_dyn,
+ 0,
+ Opcode_diwbui_p_encode_fns, 0, 0 },
+ { "diwb", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "dpfl", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dpfl_encode_fns, 0, 0 },
+ { "dhu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_dhu_encode_fns, 0, 0 },
+ { "diu", ICLASS_xt_iclass_dcache_lock,
+ 0,
+ Opcode_diu_encode_fns, 0, 0 },
+ { "sdct", ICLASS_xt_iclass_sdct,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", ICLASS_xt_iclass_ldct,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "rsr.prefctl", ICLASS_xt_iclass_rsr_prefctl,
+ 0,
+ Opcode_rsr_prefctl_encode_fns, 0, 0 },
+ { "wsr.prefctl", ICLASS_xt_iclass_wsr_prefctl,
+ 0,
+ Opcode_wsr_prefctl_encode_fns, 0, 0 },
+ { "xsr.prefctl", ICLASS_xt_iclass_xsr_prefctl,
+ 0,
+ Opcode_xsr_prefctl_encode_fns, 0, 0 },
+ { "wsr.ptevaddr", ICLASS_xt_iclass_wsr_ptevaddr,
+ 0,
+ Opcode_wsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.ptevaddr", ICLASS_xt_iclass_rsr_ptevaddr,
+ 0,
+ Opcode_rsr_ptevaddr_encode_fns, 0, 0 },
+ { "xsr.ptevaddr", ICLASS_xt_iclass_xsr_ptevaddr,
+ 0,
+ Opcode_xsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.rasid", ICLASS_xt_iclass_rsr_rasid,
+ 0,
+ Opcode_rsr_rasid_encode_fns, 0, 0 },
+ { "wsr.rasid", ICLASS_xt_iclass_wsr_rasid,
+ 0,
+ Opcode_wsr_rasid_encode_fns, 0, 0 },
+ { "xsr.rasid", ICLASS_xt_iclass_xsr_rasid,
+ 0,
+ Opcode_xsr_rasid_encode_fns, 0, 0 },
+ { "rsr.itlbcfg", ICLASS_xt_iclass_rsr_itlbcfg,
+ 0,
+ Opcode_rsr_itlbcfg_encode_fns, 0, 0 },
+ { "wsr.itlbcfg", ICLASS_xt_iclass_wsr_itlbcfg,
+ 0,
+ Opcode_wsr_itlbcfg_encode_fns, 0, 0 },
+ { "xsr.itlbcfg", ICLASS_xt_iclass_xsr_itlbcfg,
+ 0,
+ Opcode_xsr_itlbcfg_encode_fns, 0, 0 },
+ { "rsr.dtlbcfg", ICLASS_xt_iclass_rsr_dtlbcfg,
+ 0,
+ Opcode_rsr_dtlbcfg_encode_fns, 0, 0 },
+ { "wsr.dtlbcfg", ICLASS_xt_iclass_wsr_dtlbcfg,
+ 0,
+ Opcode_wsr_dtlbcfg_encode_fns, 0, 0 },
+ { "xsr.dtlbcfg", ICLASS_xt_iclass_xsr_dtlbcfg,
+ 0,
+ Opcode_xsr_dtlbcfg_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "ldpte", ICLASS_xt_iclass_ldpte,
+ 0,
+ Opcode_ldpte_encode_fns, 0, 0 },
+ { "hwwitlba", ICLASS_xt_iclass_hwwitlba,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_hwwitlba_encode_fns, 0, 0 },
+ { "hwwdtlba", ICLASS_xt_iclass_hwwdtlba,
+ 0,
+ Opcode_hwwdtlba_encode_fns, 0, 0 },
+ { "rsr.cpenable", ICLASS_xt_iclass_rsr_cpenable,
+ 0,
+ Opcode_rsr_cpenable_encode_fns, 0, 0 },
+ { "wsr.cpenable", ICLASS_xt_iclass_wsr_cpenable,
+ 0,
+ Opcode_wsr_cpenable_encode_fns, 0, 0 },
+ { "xsr.cpenable", ICLASS_xt_iclass_xsr_cpenable,
+ 0,
+ Opcode_xsr_cpenable_encode_fns, 0, 0 },
+ { "clamps", ICLASS_xt_iclass_clamp,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "quou", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quou_encode_fns, 0, 0 },
+ { "quos", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_quos_encode_fns, 0, 0 },
+ { "remu", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_remu_encode_fns, 0, 0 },
+ { "rems", ICLASS_xt_iclass_div,
+ 0,
+ Opcode_rems_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.ae_ovf_sar", ICLASS_rur_ae_ovf_sar,
+ 0,
+ Opcode_rur_ae_ovf_sar_encode_fns, 0, 0 },
+ { "wur.ae_ovf_sar", ICLASS_wur_ae_ovf_sar,
+ 0,
+ Opcode_wur_ae_ovf_sar_encode_fns, 0, 0 },
+ { "rur.ae_bithead", ICLASS_rur_ae_bithead,
+ 0,
+ Opcode_rur_ae_bithead_encode_fns, 0, 0 },
+ { "wur.ae_bithead", ICLASS_wur_ae_bithead,
+ 0,
+ Opcode_wur_ae_bithead_encode_fns, 0, 0 },
+ { "rur.ae_ts_fts_bu_bp", ICLASS_rur_ae_ts_fts_bu_bp,
+ 0,
+ Opcode_rur_ae_ts_fts_bu_bp_encode_fns, 0, 0 },
+ { "wur.ae_ts_fts_bu_bp", ICLASS_wur_ae_ts_fts_bu_bp,
+ 0,
+ Opcode_wur_ae_ts_fts_bu_bp_encode_fns, 0, 0 },
+ { "rur.ae_sd_no", ICLASS_rur_ae_sd_no,
+ 0,
+ Opcode_rur_ae_sd_no_encode_fns, 0, 0 },
+ { "wur.ae_sd_no", ICLASS_wur_ae_sd_no,
+ 0,
+ Opcode_wur_ae_sd_no_encode_fns, 0, 0 },
+ { "rur.ae_overflow", ICLASS_ae_iclass_rur_ae_overflow,
+ 0,
+ Opcode_rur_ae_overflow_encode_fns, 0, 0 },
+ { "wur.ae_overflow", ICLASS_ae_iclass_wur_ae_overflow,
+ 0,
+ Opcode_wur_ae_overflow_encode_fns, 0, 0 },
+ { "rur.ae_sar", ICLASS_ae_iclass_rur_ae_sar,
+ 0,
+ Opcode_rur_ae_sar_encode_fns, 0, 0 },
+ { "wur.ae_sar", ICLASS_ae_iclass_wur_ae_sar,
+ 0,
+ Opcode_wur_ae_sar_encode_fns, 0, 0 },
+ { "rur.ae_bitptr", ICLASS_ae_iclass_rur_ae_bitptr,
+ 0,
+ Opcode_rur_ae_bitptr_encode_fns, 0, 0 },
+ { "wur.ae_bitptr", ICLASS_ae_iclass_wur_ae_bitptr,
+ 0,
+ Opcode_wur_ae_bitptr_encode_fns, 0, 0 },
+ { "rur.ae_bitsused", ICLASS_ae_iclass_rur_ae_bitsused,
+ 0,
+ Opcode_rur_ae_bitsused_encode_fns, 0, 0 },
+ { "wur.ae_bitsused", ICLASS_ae_iclass_wur_ae_bitsused,
+ 0,
+ Opcode_wur_ae_bitsused_encode_fns, 0, 0 },
+ { "rur.ae_tablesize", ICLASS_ae_iclass_rur_ae_tablesize,
+ 0,
+ Opcode_rur_ae_tablesize_encode_fns, 0, 0 },
+ { "wur.ae_tablesize", ICLASS_ae_iclass_wur_ae_tablesize,
+ 0,
+ Opcode_wur_ae_tablesize_encode_fns, 0, 0 },
+ { "rur.ae_first_ts", ICLASS_ae_iclass_rur_ae_first_ts,
+ 0,
+ Opcode_rur_ae_first_ts_encode_fns, 0, 0 },
+ { "wur.ae_first_ts", ICLASS_ae_iclass_wur_ae_first_ts,
+ 0,
+ Opcode_wur_ae_first_ts_encode_fns, 0, 0 },
+ { "rur.ae_nextoffset", ICLASS_ae_iclass_rur_ae_nextoffset,
+ 0,
+ Opcode_rur_ae_nextoffset_encode_fns, 0, 0 },
+ { "wur.ae_nextoffset", ICLASS_ae_iclass_wur_ae_nextoffset,
+ 0,
+ Opcode_wur_ae_nextoffset_encode_fns, 0, 0 },
+ { "rur.ae_searchdone", ICLASS_ae_iclass_rur_ae_searchdone,
+ 0,
+ Opcode_rur_ae_searchdone_encode_fns, 0, 0 },
+ { "wur.ae_searchdone", ICLASS_ae_iclass_wur_ae_searchdone,
+ 0,
+ Opcode_wur_ae_searchdone_encode_fns, 0, 0 },
+ { "ae_lp16f.i", ICLASS_ae_iclass_lp16f_i,
+ 0,
+ Opcode_ae_lp16f_i_encode_fns, 0, 0 },
+ { "ae_lp16f.iu", ICLASS_ae_iclass_lp16f_iu,
+ 0,
+ Opcode_ae_lp16f_iu_encode_fns, 0, 0 },
+ { "ae_lp16f.x", ICLASS_ae_iclass_lp16f_x,
+ 0,
+ Opcode_ae_lp16f_x_encode_fns, 0, 0 },
+ { "ae_lp16f.xu", ICLASS_ae_iclass_lp16f_xu,
+ 0,
+ Opcode_ae_lp16f_xu_encode_fns, 0, 0 },
+ { "ae_lp24.i", ICLASS_ae_iclass_lp24_i,
+ 0,
+ Opcode_ae_lp24_i_encode_fns, 0, 0 },
+ { "ae_lp24.iu", ICLASS_ae_iclass_lp24_iu,
+ 0,
+ Opcode_ae_lp24_iu_encode_fns, 0, 0 },
+ { "ae_lp24.x", ICLASS_ae_iclass_lp24_x,
+ 0,
+ Opcode_ae_lp24_x_encode_fns, 0, 0 },
+ { "ae_lp24.xu", ICLASS_ae_iclass_lp24_xu,
+ 0,
+ Opcode_ae_lp24_xu_encode_fns, 0, 0 },
+ { "ae_lp24f.i", ICLASS_ae_iclass_lp24f_i,
+ 0,
+ Opcode_ae_lp24f_i_encode_fns, 0, 0 },
+ { "ae_lp24f.iu", ICLASS_ae_iclass_lp24f_iu,
+ 0,
+ Opcode_ae_lp24f_iu_encode_fns, 0, 0 },
+ { "ae_lp24f.x", ICLASS_ae_iclass_lp24f_x,
+ 0,
+ Opcode_ae_lp24f_x_encode_fns, 0, 0 },
+ { "ae_lp24f.xu", ICLASS_ae_iclass_lp24f_xu,
+ 0,
+ Opcode_ae_lp24f_xu_encode_fns, 0, 0 },
+ { "ae_lp16x2f.i", ICLASS_ae_iclass_lp16x2f_i,
+ 0,
+ Opcode_ae_lp16x2f_i_encode_fns, 0, 0 },
+ { "ae_lp16x2f.iu", ICLASS_ae_iclass_lp16x2f_iu,
+ 0,
+ Opcode_ae_lp16x2f_iu_encode_fns, 0, 0 },
+ { "ae_lp16x2f.x", ICLASS_ae_iclass_lp16x2f_x,
+ 0,
+ Opcode_ae_lp16x2f_x_encode_fns, 0, 0 },
+ { "ae_lp16x2f.xu", ICLASS_ae_iclass_lp16x2f_xu,
+ 0,
+ Opcode_ae_lp16x2f_xu_encode_fns, 0, 0 },
+ { "ae_lp24x2f.i", ICLASS_ae_iclass_lp24x2f_i,
+ 0,
+ Opcode_ae_lp24x2f_i_encode_fns, 0, 0 },
+ { "ae_lp24x2f.iu", ICLASS_ae_iclass_lp24x2f_iu,
+ 0,
+ Opcode_ae_lp24x2f_iu_encode_fns, 0, 0 },
+ { "ae_lp24x2f.x", ICLASS_ae_iclass_lp24x2f_x,
+ 0,
+ Opcode_ae_lp24x2f_x_encode_fns, 0, 0 },
+ { "ae_lp24x2f.xu", ICLASS_ae_iclass_lp24x2f_xu,
+ 0,
+ Opcode_ae_lp24x2f_xu_encode_fns, 0, 0 },
+ { "ae_lp24x2.i", ICLASS_ae_iclass_lp24x2_i,
+ 0,
+ Opcode_ae_lp24x2_i_encode_fns, 0, 0 },
+ { "ae_lp24x2.iu", ICLASS_ae_iclass_lp24x2_iu,
+ 0,
+ Opcode_ae_lp24x2_iu_encode_fns, 0, 0 },
+ { "ae_lp24x2.x", ICLASS_ae_iclass_lp24x2_x,
+ 0,
+ Opcode_ae_lp24x2_x_encode_fns, 0, 0 },
+ { "ae_lp24x2.xu", ICLASS_ae_iclass_lp24x2_xu,
+ 0,
+ Opcode_ae_lp24x2_xu_encode_fns, 0, 0 },
+ { "ae_sp16x2f.i", ICLASS_ae_iclass_sp16x2f_i,
+ 0,
+ Opcode_ae_sp16x2f_i_encode_fns, 0, 0 },
+ { "ae_sp16x2f.iu", ICLASS_ae_iclass_sp16x2f_iu,
+ 0,
+ Opcode_ae_sp16x2f_iu_encode_fns, 0, 0 },
+ { "ae_sp16x2f.x", ICLASS_ae_iclass_sp16x2f_x,
+ 0,
+ Opcode_ae_sp16x2f_x_encode_fns, 0, 0 },
+ { "ae_sp16x2f.xu", ICLASS_ae_iclass_sp16x2f_xu,
+ 0,
+ Opcode_ae_sp16x2f_xu_encode_fns, 0, 0 },
+ { "ae_sp24x2s.i", ICLASS_ae_iclass_sp24x2s_i,
+ 0,
+ Opcode_ae_sp24x2s_i_encode_fns, 0, 0 },
+ { "ae_sp24x2s.iu", ICLASS_ae_iclass_sp24x2s_iu,
+ 0,
+ Opcode_ae_sp24x2s_iu_encode_fns, 0, 0 },
+ { "ae_sp24x2s.x", ICLASS_ae_iclass_sp24x2s_x,
+ 0,
+ Opcode_ae_sp24x2s_x_encode_fns, 0, 0 },
+ { "ae_sp24x2s.xu", ICLASS_ae_iclass_sp24x2s_xu,
+ 0,
+ Opcode_ae_sp24x2s_xu_encode_fns, 0, 0 },
+ { "ae_sp24x2f.i", ICLASS_ae_iclass_sp24x2f_i,
+ 0,
+ Opcode_ae_sp24x2f_i_encode_fns, 0, 0 },
+ { "ae_sp24x2f.iu", ICLASS_ae_iclass_sp24x2f_iu,
+ 0,
+ Opcode_ae_sp24x2f_iu_encode_fns, 0, 0 },
+ { "ae_sp24x2f.x", ICLASS_ae_iclass_sp24x2f_x,
+ 0,
+ Opcode_ae_sp24x2f_x_encode_fns, 0, 0 },
+ { "ae_sp24x2f.xu", ICLASS_ae_iclass_sp24x2f_xu,
+ 0,
+ Opcode_ae_sp24x2f_xu_encode_fns, 0, 0 },
+ { "ae_sp16f.l.i", ICLASS_ae_iclass_sp16f_l_i,
+ 0,
+ Opcode_ae_sp16f_l_i_encode_fns, 0, 0 },
+ { "ae_sp16f.l.iu", ICLASS_ae_iclass_sp16f_l_iu,
+ 0,
+ Opcode_ae_sp16f_l_iu_encode_fns, 0, 0 },
+ { "ae_sp16f.l.x", ICLASS_ae_iclass_sp16f_l_x,
+ 0,
+ Opcode_ae_sp16f_l_x_encode_fns, 0, 0 },
+ { "ae_sp16f.l.xu", ICLASS_ae_iclass_sp16f_l_xu,
+ 0,
+ Opcode_ae_sp16f_l_xu_encode_fns, 0, 0 },
+ { "ae_sp24s.l.i", ICLASS_ae_iclass_sp24s_l_i,
+ 0,
+ Opcode_ae_sp24s_l_i_encode_fns, 0, 0 },
+ { "ae_sp24s.l.iu", ICLASS_ae_iclass_sp24s_l_iu,
+ 0,
+ Opcode_ae_sp24s_l_iu_encode_fns, 0, 0 },
+ { "ae_sp24s.l.x", ICLASS_ae_iclass_sp24s_l_x,
+ 0,
+ Opcode_ae_sp24s_l_x_encode_fns, 0, 0 },
+ { "ae_sp24s.l.xu", ICLASS_ae_iclass_sp24s_l_xu,
+ 0,
+ Opcode_ae_sp24s_l_xu_encode_fns, 0, 0 },
+ { "ae_sp24f.l.i", ICLASS_ae_iclass_sp24f_l_i,
+ 0,
+ Opcode_ae_sp24f_l_i_encode_fns, 0, 0 },
+ { "ae_sp24f.l.iu", ICLASS_ae_iclass_sp24f_l_iu,
+ 0,
+ Opcode_ae_sp24f_l_iu_encode_fns, 0, 0 },
+ { "ae_sp24f.l.x", ICLASS_ae_iclass_sp24f_l_x,
+ 0,
+ Opcode_ae_sp24f_l_x_encode_fns, 0, 0 },
+ { "ae_sp24f.l.xu", ICLASS_ae_iclass_sp24f_l_xu,
+ 0,
+ Opcode_ae_sp24f_l_xu_encode_fns, 0, 0 },
+ { "ae_lq56.i", ICLASS_ae_iclass_lq56_i,
+ 0,
+ Opcode_ae_lq56_i_encode_fns, 0, 0 },
+ { "ae_lq56.iu", ICLASS_ae_iclass_lq56_iu,
+ 0,
+ Opcode_ae_lq56_iu_encode_fns, 0, 0 },
+ { "ae_lq56.x", ICLASS_ae_iclass_lq56_x,
+ 0,
+ Opcode_ae_lq56_x_encode_fns, 0, 0 },
+ { "ae_lq56.xu", ICLASS_ae_iclass_lq56_xu,
+ 0,
+ Opcode_ae_lq56_xu_encode_fns, 0, 0 },
+ { "ae_lq32f.i", ICLASS_ae_iclass_lq32f_i,
+ 0,
+ Opcode_ae_lq32f_i_encode_fns, 0, 0 },
+ { "ae_lq32f.iu", ICLASS_ae_iclass_lq32f_iu,
+ 0,
+ Opcode_ae_lq32f_iu_encode_fns, 0, 0 },
+ { "ae_lq32f.x", ICLASS_ae_iclass_lq32f_x,
+ 0,
+ Opcode_ae_lq32f_x_encode_fns, 0, 0 },
+ { "ae_lq32f.xu", ICLASS_ae_iclass_lq32f_xu,
+ 0,
+ Opcode_ae_lq32f_xu_encode_fns, 0, 0 },
+ { "ae_sq56s.i", ICLASS_ae_iclass_sq56s_i,
+ 0,
+ Opcode_ae_sq56s_i_encode_fns, 0, 0 },
+ { "ae_sq56s.iu", ICLASS_ae_iclass_sq56s_iu,
+ 0,
+ Opcode_ae_sq56s_iu_encode_fns, 0, 0 },
+ { "ae_sq56s.x", ICLASS_ae_iclass_sq56s_x,
+ 0,
+ Opcode_ae_sq56s_x_encode_fns, 0, 0 },
+ { "ae_sq56s.xu", ICLASS_ae_iclass_sq56s_xu,
+ 0,
+ Opcode_ae_sq56s_xu_encode_fns, 0, 0 },
+ { "ae_sq32f.i", ICLASS_ae_iclass_sq32f_i,
+ 0,
+ Opcode_ae_sq32f_i_encode_fns, 0, 0 },
+ { "ae_sq32f.iu", ICLASS_ae_iclass_sq32f_iu,
+ 0,
+ Opcode_ae_sq32f_iu_encode_fns, 0, 0 },
+ { "ae_sq32f.x", ICLASS_ae_iclass_sq32f_x,
+ 0,
+ Opcode_ae_sq32f_x_encode_fns, 0, 0 },
+ { "ae_sq32f.xu", ICLASS_ae_iclass_sq32f_xu,
+ 0,
+ Opcode_ae_sq32f_xu_encode_fns, 0, 0 },
+ { "ae_zerop48", ICLASS_ae_iclass_zerop48,
+ 0,
+ Opcode_ae_zerop48_encode_fns, 0, 0 },
+ { "ae_movp48", ICLASS_ae_iclass_movp48,
+ 0,
+ Opcode_ae_movp48_encode_fns, 0, 0 },
+ { "ae_selp24.ll", ICLASS_ae_iclass_selp24_ll,
+ 0,
+ Opcode_ae_selp24_ll_encode_fns, 0, 0 },
+ { "ae_selp24.lh", ICLASS_ae_iclass_selp24_lh,
+ 0,
+ Opcode_ae_selp24_lh_encode_fns, 0, 0 },
+ { "ae_selp24.hl", ICLASS_ae_iclass_selp24_hl,
+ 0,
+ Opcode_ae_selp24_hl_encode_fns, 0, 0 },
+ { "ae_selp24.hh", ICLASS_ae_iclass_selp24_hh,
+ 0,
+ Opcode_ae_selp24_hh_encode_fns, 0, 0 },
+ { "ae_movtp24x2", ICLASS_ae_iclass_movtp24x2,
+ 0,
+ Opcode_ae_movtp24x2_encode_fns, 0, 0 },
+ { "ae_movfp24x2", ICLASS_ae_iclass_movfp24x2,
+ 0,
+ Opcode_ae_movfp24x2_encode_fns, 0, 0 },
+ { "ae_movtp48", ICLASS_ae_iclass_movtp48,
+ 0,
+ Opcode_ae_movtp48_encode_fns, 0, 0 },
+ { "ae_movfp48", ICLASS_ae_iclass_movfp48,
+ 0,
+ Opcode_ae_movfp48_encode_fns, 0, 0 },
+ { "ae_movpa24x2", ICLASS_ae_iclass_movpa24x2,
+ 0,
+ Opcode_ae_movpa24x2_encode_fns, 0, 0 },
+ { "ae_truncp24a32x2", ICLASS_ae_iclass_truncp24a32x2,
+ 0,
+ Opcode_ae_truncp24a32x2_encode_fns, 0, 0 },
+ { "ae_cvta32p24.l", ICLASS_ae_iclass_cvta32p24_l,
+ 0,
+ Opcode_ae_cvta32p24_l_encode_fns, 0, 0 },
+ { "ae_cvta32p24.h", ICLASS_ae_iclass_cvta32p24_h,
+ 0,
+ Opcode_ae_cvta32p24_h_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.ll", ICLASS_ae_iclass_cvtp24a16x2_ll,
+ 0,
+ Opcode_ae_cvtp24a16x2_ll_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.lh", ICLASS_ae_iclass_cvtp24a16x2_lh,
+ 0,
+ Opcode_ae_cvtp24a16x2_lh_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.hl", ICLASS_ae_iclass_cvtp24a16x2_hl,
+ 0,
+ Opcode_ae_cvtp24a16x2_hl_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.hh", ICLASS_ae_iclass_cvtp24a16x2_hh,
+ 0,
+ Opcode_ae_cvtp24a16x2_hh_encode_fns, 0, 0 },
+ { "ae_truncp24q48x2", ICLASS_ae_iclass_truncp24q48x2,
+ 0,
+ Opcode_ae_truncp24q48x2_encode_fns, 0, 0 },
+ { "ae_truncp16", ICLASS_ae_iclass_truncp16,
+ 0,
+ Opcode_ae_truncp16_encode_fns, 0, 0 },
+ { "ae_roundsp24q48sym", ICLASS_ae_iclass_roundsp24q48sym,
+ 0,
+ Opcode_ae_roundsp24q48sym_encode_fns, 0, 0 },
+ { "ae_roundsp24q48asym", ICLASS_ae_iclass_roundsp24q48asym,
+ 0,
+ Opcode_ae_roundsp24q48asym_encode_fns, 0, 0 },
+ { "ae_roundsp16q48sym", ICLASS_ae_iclass_roundsp16q48sym,
+ 0,
+ Opcode_ae_roundsp16q48sym_encode_fns, 0, 0 },
+ { "ae_roundsp16q48asym", ICLASS_ae_iclass_roundsp16q48asym,
+ 0,
+ Opcode_ae_roundsp16q48asym_encode_fns, 0, 0 },
+ { "ae_roundsp16sym", ICLASS_ae_iclass_roundsp16sym,
+ 0,
+ Opcode_ae_roundsp16sym_encode_fns, 0, 0 },
+ { "ae_roundsp16asym", ICLASS_ae_iclass_roundsp16asym,
+ 0,
+ Opcode_ae_roundsp16asym_encode_fns, 0, 0 },
+ { "ae_zeroq56", ICLASS_ae_iclass_zeroq56,
+ 0,
+ Opcode_ae_zeroq56_encode_fns, 0, 0 },
+ { "ae_movq56", ICLASS_ae_iclass_movq56,
+ 0,
+ Opcode_ae_movq56_encode_fns, 0, 0 },
+ { "ae_movtq56", ICLASS_ae_iclass_movtq56,
+ 0,
+ Opcode_ae_movtq56_encode_fns, 0, 0 },
+ { "ae_movfq56", ICLASS_ae_iclass_movfq56,
+ 0,
+ Opcode_ae_movfq56_encode_fns, 0, 0 },
+ { "ae_cvtq48a32s", ICLASS_ae_iclass_cvtq48a32s,
+ 0,
+ Opcode_ae_cvtq48a32s_encode_fns, 0, 0 },
+ { "ae_cvtq48p24s.l", ICLASS_ae_iclass_cvtq48p24s_l,
+ 0,
+ Opcode_ae_cvtq48p24s_l_encode_fns, 0, 0 },
+ { "ae_cvtq48p24s.h", ICLASS_ae_iclass_cvtq48p24s_h,
+ 0,
+ Opcode_ae_cvtq48p24s_h_encode_fns, 0, 0 },
+ { "ae_satq48s", ICLASS_ae_iclass_satq48s,
+ 0,
+ Opcode_ae_satq48s_encode_fns, 0, 0 },
+ { "ae_truncq32", ICLASS_ae_iclass_truncq32,
+ 0,
+ Opcode_ae_truncq32_encode_fns, 0, 0 },
+ { "ae_roundsq32sym", ICLASS_ae_iclass_roundsq32sym,
+ 0,
+ Opcode_ae_roundsq32sym_encode_fns, 0, 0 },
+ { "ae_roundsq32asym", ICLASS_ae_iclass_roundsq32asym,
+ 0,
+ Opcode_ae_roundsq32asym_encode_fns, 0, 0 },
+ { "ae_trunca32q48", ICLASS_ae_iclass_trunca32q48,
+ 0,
+ Opcode_ae_trunca32q48_encode_fns, 0, 0 },
+ { "ae_movap24s.l", ICLASS_ae_iclass_movap24s_l,
+ 0,
+ Opcode_ae_movap24s_l_encode_fns, 0, 0 },
+ { "ae_movap24s.h", ICLASS_ae_iclass_movap24s_h,
+ 0,
+ Opcode_ae_movap24s_h_encode_fns, 0, 0 },
+ { "ae_trunca16p24s.l", ICLASS_ae_iclass_trunca16p24s_l,
+ 0,
+ Opcode_ae_trunca16p24s_l_encode_fns, 0, 0 },
+ { "ae_trunca16p24s.h", ICLASS_ae_iclass_trunca16p24s_h,
+ 0,
+ Opcode_ae_trunca16p24s_h_encode_fns, 0, 0 },
+ { "ae_addp24", ICLASS_ae_iclass_addp24,
+ 0,
+ Opcode_ae_addp24_encode_fns, 0, 0 },
+ { "ae_subp24", ICLASS_ae_iclass_subp24,
+ 0,
+ Opcode_ae_subp24_encode_fns, 0, 0 },
+ { "ae_negp24", ICLASS_ae_iclass_negp24,
+ 0,
+ Opcode_ae_negp24_encode_fns, 0, 0 },
+ { "ae_absp24", ICLASS_ae_iclass_absp24,
+ 0,
+ Opcode_ae_absp24_encode_fns, 0, 0 },
+ { "ae_maxp24s", ICLASS_ae_iclass_maxp24s,
+ 0,
+ Opcode_ae_maxp24s_encode_fns, 0, 0 },
+ { "ae_minp24s", ICLASS_ae_iclass_minp24s,
+ 0,
+ Opcode_ae_minp24s_encode_fns, 0, 0 },
+ { "ae_maxbp24s", ICLASS_ae_iclass_maxbp24s,
+ 0,
+ Opcode_ae_maxbp24s_encode_fns, 0, 0 },
+ { "ae_minbp24s", ICLASS_ae_iclass_minbp24s,
+ 0,
+ Opcode_ae_minbp24s_encode_fns, 0, 0 },
+ { "ae_addsp24s", ICLASS_ae_iclass_addsp24s,
+ 0,
+ Opcode_ae_addsp24s_encode_fns, 0, 0 },
+ { "ae_subsp24s", ICLASS_ae_iclass_subsp24s,
+ 0,
+ Opcode_ae_subsp24s_encode_fns, 0, 0 },
+ { "ae_negsp24s", ICLASS_ae_iclass_negsp24s,
+ 0,
+ Opcode_ae_negsp24s_encode_fns, 0, 0 },
+ { "ae_abssp24s", ICLASS_ae_iclass_abssp24s,
+ 0,
+ Opcode_ae_abssp24s_encode_fns, 0, 0 },
+ { "ae_andp48", ICLASS_ae_iclass_andp48,
+ 0,
+ Opcode_ae_andp48_encode_fns, 0, 0 },
+ { "ae_nandp48", ICLASS_ae_iclass_nandp48,
+ 0,
+ Opcode_ae_nandp48_encode_fns, 0, 0 },
+ { "ae_orp48", ICLASS_ae_iclass_orp48,
+ 0,
+ Opcode_ae_orp48_encode_fns, 0, 0 },
+ { "ae_xorp48", ICLASS_ae_iclass_xorp48,
+ 0,
+ Opcode_ae_xorp48_encode_fns, 0, 0 },
+ { "ae_ltp24s", ICLASS_ae_iclass_ltp24s,
+ 0,
+ Opcode_ae_ltp24s_encode_fns, 0, 0 },
+ { "ae_lep24s", ICLASS_ae_iclass_lep24s,
+ 0,
+ Opcode_ae_lep24s_encode_fns, 0, 0 },
+ { "ae_eqp24", ICLASS_ae_iclass_eqp24,
+ 0,
+ Opcode_ae_eqp24_encode_fns, 0, 0 },
+ { "ae_addq56", ICLASS_ae_iclass_addq56,
+ 0,
+ Opcode_ae_addq56_encode_fns, 0, 0 },
+ { "ae_subq56", ICLASS_ae_iclass_subq56,
+ 0,
+ Opcode_ae_subq56_encode_fns, 0, 0 },
+ { "ae_negq56", ICLASS_ae_iclass_negq56,
+ 0,
+ Opcode_ae_negq56_encode_fns, 0, 0 },
+ { "ae_absq56", ICLASS_ae_iclass_absq56,
+ 0,
+ Opcode_ae_absq56_encode_fns, 0, 0 },
+ { "ae_maxq56s", ICLASS_ae_iclass_maxq56s,
+ 0,
+ Opcode_ae_maxq56s_encode_fns, 0, 0 },
+ { "ae_minq56s", ICLASS_ae_iclass_minq56s,
+ 0,
+ Opcode_ae_minq56s_encode_fns, 0, 0 },
+ { "ae_maxbq56s", ICLASS_ae_iclass_maxbq56s,
+ 0,
+ Opcode_ae_maxbq56s_encode_fns, 0, 0 },
+ { "ae_minbq56s", ICLASS_ae_iclass_minbq56s,
+ 0,
+ Opcode_ae_minbq56s_encode_fns, 0, 0 },
+ { "ae_addsq56s", ICLASS_ae_iclass_addsq56s,
+ 0,
+ Opcode_ae_addsq56s_encode_fns, 0, 0 },
+ { "ae_subsq56s", ICLASS_ae_iclass_subsq56s,
+ 0,
+ Opcode_ae_subsq56s_encode_fns, 0, 0 },
+ { "ae_negsq56s", ICLASS_ae_iclass_negsq56s,
+ 0,
+ Opcode_ae_negsq56s_encode_fns, 0, 0 },
+ { "ae_abssq56s", ICLASS_ae_iclass_abssq56s,
+ 0,
+ Opcode_ae_abssq56s_encode_fns, 0, 0 },
+ { "ae_andq56", ICLASS_ae_iclass_andq56,
+ 0,
+ Opcode_ae_andq56_encode_fns, 0, 0 },
+ { "ae_nandq56", ICLASS_ae_iclass_nandq56,
+ 0,
+ Opcode_ae_nandq56_encode_fns, 0, 0 },
+ { "ae_orq56", ICLASS_ae_iclass_orq56,
+ 0,
+ Opcode_ae_orq56_encode_fns, 0, 0 },
+ { "ae_xorq56", ICLASS_ae_iclass_xorq56,
+ 0,
+ Opcode_ae_xorq56_encode_fns, 0, 0 },
+ { "ae_sllip24", ICLASS_ae_iclass_sllip24,
+ 0,
+ Opcode_ae_sllip24_encode_fns, 0, 0 },
+ { "ae_srlip24", ICLASS_ae_iclass_srlip24,
+ 0,
+ Opcode_ae_srlip24_encode_fns, 0, 0 },
+ { "ae_sraip24", ICLASS_ae_iclass_sraip24,
+ 0,
+ Opcode_ae_sraip24_encode_fns, 0, 0 },
+ { "ae_sllsp24", ICLASS_ae_iclass_sllsp24,
+ 0,
+ Opcode_ae_sllsp24_encode_fns, 0, 0 },
+ { "ae_srlsp24", ICLASS_ae_iclass_srlsp24,
+ 0,
+ Opcode_ae_srlsp24_encode_fns, 0, 0 },
+ { "ae_srasp24", ICLASS_ae_iclass_srasp24,
+ 0,
+ Opcode_ae_srasp24_encode_fns, 0, 0 },
+ { "ae_sllisp24s", ICLASS_ae_iclass_sllisp24s,
+ 0,
+ Opcode_ae_sllisp24s_encode_fns, 0, 0 },
+ { "ae_sllssp24s", ICLASS_ae_iclass_sllssp24s,
+ 0,
+ Opcode_ae_sllssp24s_encode_fns, 0, 0 },
+ { "ae_slliq56", ICLASS_ae_iclass_slliq56,
+ 0,
+ Opcode_ae_slliq56_encode_fns, 0, 0 },
+ { "ae_srliq56", ICLASS_ae_iclass_srliq56,
+ 0,
+ Opcode_ae_srliq56_encode_fns, 0, 0 },
+ { "ae_sraiq56", ICLASS_ae_iclass_sraiq56,
+ 0,
+ Opcode_ae_sraiq56_encode_fns, 0, 0 },
+ { "ae_sllsq56", ICLASS_ae_iclass_sllsq56,
+ 0,
+ Opcode_ae_sllsq56_encode_fns, 0, 0 },
+ { "ae_srlsq56", ICLASS_ae_iclass_srlsq56,
+ 0,
+ Opcode_ae_srlsq56_encode_fns, 0, 0 },
+ { "ae_srasq56", ICLASS_ae_iclass_srasq56,
+ 0,
+ Opcode_ae_srasq56_encode_fns, 0, 0 },
+ { "ae_sllaq56", ICLASS_ae_iclass_sllaq56,
+ 0,
+ Opcode_ae_sllaq56_encode_fns, 0, 0 },
+ { "ae_srlaq56", ICLASS_ae_iclass_srlaq56,
+ 0,
+ Opcode_ae_srlaq56_encode_fns, 0, 0 },
+ { "ae_sraaq56", ICLASS_ae_iclass_sraaq56,
+ 0,
+ Opcode_ae_sraaq56_encode_fns, 0, 0 },
+ { "ae_sllisq56s", ICLASS_ae_iclass_sllisq56s,
+ 0,
+ Opcode_ae_sllisq56s_encode_fns, 0, 0 },
+ { "ae_sllssq56s", ICLASS_ae_iclass_sllssq56s,
+ 0,
+ Opcode_ae_sllssq56s_encode_fns, 0, 0 },
+ { "ae_sllasq56s", ICLASS_ae_iclass_sllasq56s,
+ 0,
+ Opcode_ae_sllasq56s_encode_fns, 0, 0 },
+ { "ae_ltq56s", ICLASS_ae_iclass_ltq56s,
+ 0,
+ Opcode_ae_ltq56s_encode_fns, 0, 0 },
+ { "ae_leq56s", ICLASS_ae_iclass_leq56s,
+ 0,
+ Opcode_ae_leq56s_encode_fns, 0, 0 },
+ { "ae_eqq56", ICLASS_ae_iclass_eqq56,
+ 0,
+ Opcode_ae_eqq56_encode_fns, 0, 0 },
+ { "ae_nsaq56s", ICLASS_ae_iclass_nsaq56s,
+ 0,
+ Opcode_ae_nsaq56s_encode_fns, 0, 0 },
+ { "ae_mulsrfq32sp24s.h", ICLASS_ae_iclass_mulsrfq32sp24s_h,
+ 0,
+ Opcode_ae_mulsrfq32sp24s_h_encode_fns, 0, 0 },
+ { "ae_mulsrfq32sp24s.l", ICLASS_ae_iclass_mulsrfq32sp24s_l,
+ 0,
+ Opcode_ae_mulsrfq32sp24s_l_encode_fns, 0, 0 },
+ { "ae_mularfq32sp24s.h", ICLASS_ae_iclass_mularfq32sp24s_h,
+ 0,
+ Opcode_ae_mularfq32sp24s_h_encode_fns, 0, 0 },
+ { "ae_mularfq32sp24s.l", ICLASS_ae_iclass_mularfq32sp24s_l,
+ 0,
+ Opcode_ae_mularfq32sp24s_l_encode_fns, 0, 0 },
+ { "ae_mulrfq32sp24s.h", ICLASS_ae_iclass_mulrfq32sp24s_h,
+ 0,
+ Opcode_ae_mulrfq32sp24s_h_encode_fns, 0, 0 },
+ { "ae_mulrfq32sp24s.l", ICLASS_ae_iclass_mulrfq32sp24s_l,
+ 0,
+ Opcode_ae_mulrfq32sp24s_l_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp24s.h", ICLASS_ae_iclass_mulsfq32sp24s_h,
+ 0,
+ Opcode_ae_mulsfq32sp24s_h_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp24s.l", ICLASS_ae_iclass_mulsfq32sp24s_l,
+ 0,
+ Opcode_ae_mulsfq32sp24s_l_encode_fns, 0, 0 },
+ { "ae_mulafq32sp24s.h", ICLASS_ae_iclass_mulafq32sp24s_h,
+ 0,
+ Opcode_ae_mulafq32sp24s_h_encode_fns, 0, 0 },
+ { "ae_mulafq32sp24s.l", ICLASS_ae_iclass_mulafq32sp24s_l,
+ 0,
+ Opcode_ae_mulafq32sp24s_l_encode_fns, 0, 0 },
+ { "ae_mulfq32sp24s.h", ICLASS_ae_iclass_mulfq32sp24s_h,
+ 0,
+ Opcode_ae_mulfq32sp24s_h_encode_fns, 0, 0 },
+ { "ae_mulfq32sp24s.l", ICLASS_ae_iclass_mulfq32sp24s_l,
+ 0,
+ Opcode_ae_mulfq32sp24s_l_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.ll", ICLASS_ae_iclass_mulfs32p16s_ll,
+ 0,
+ Opcode_ae_mulfs32p16s_ll_encode_fns, 0, 0 },
+ { "ae_mulfp24s.ll", ICLASS_ae_iclass_mulfp24s_ll,
+ 0,
+ Opcode_ae_mulfp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulp24s.ll", ICLASS_ae_iclass_mulp24s_ll,
+ 0,
+ Opcode_ae_mulp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.lh", ICLASS_ae_iclass_mulfs32p16s_lh,
+ 0,
+ Opcode_ae_mulfs32p16s_lh_encode_fns, 0, 0 },
+ { "ae_mulfp24s.lh", ICLASS_ae_iclass_mulfp24s_lh,
+ 0,
+ Opcode_ae_mulfp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulp24s.lh", ICLASS_ae_iclass_mulp24s_lh,
+ 0,
+ Opcode_ae_mulp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.hl", ICLASS_ae_iclass_mulfs32p16s_hl,
+ 0,
+ Opcode_ae_mulfs32p16s_hl_encode_fns, 0, 0 },
+ { "ae_mulfp24s.hl", ICLASS_ae_iclass_mulfp24s_hl,
+ 0,
+ Opcode_ae_mulfp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulp24s.hl", ICLASS_ae_iclass_mulp24s_hl,
+ 0,
+ Opcode_ae_mulp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.hh", ICLASS_ae_iclass_mulfs32p16s_hh,
+ 0,
+ Opcode_ae_mulfs32p16s_hh_encode_fns, 0, 0 },
+ { "ae_mulfp24s.hh", ICLASS_ae_iclass_mulfp24s_hh,
+ 0,
+ Opcode_ae_mulfp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulp24s.hh", ICLASS_ae_iclass_mulp24s_hh,
+ 0,
+ Opcode_ae_mulp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.ll", ICLASS_ae_iclass_mulafs32p16s_ll,
+ 0,
+ Opcode_ae_mulafs32p16s_ll_encode_fns, 0, 0 },
+ { "ae_mulafp24s.ll", ICLASS_ae_iclass_mulafp24s_ll,
+ 0,
+ Opcode_ae_mulafp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulap24s.ll", ICLASS_ae_iclass_mulap24s_ll,
+ 0,
+ Opcode_ae_mulap24s_ll_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.lh", ICLASS_ae_iclass_mulafs32p16s_lh,
+ 0,
+ Opcode_ae_mulafs32p16s_lh_encode_fns, 0, 0 },
+ { "ae_mulafp24s.lh", ICLASS_ae_iclass_mulafp24s_lh,
+ 0,
+ Opcode_ae_mulafp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulap24s.lh", ICLASS_ae_iclass_mulap24s_lh,
+ 0,
+ Opcode_ae_mulap24s_lh_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.hl", ICLASS_ae_iclass_mulafs32p16s_hl,
+ 0,
+ Opcode_ae_mulafs32p16s_hl_encode_fns, 0, 0 },
+ { "ae_mulafp24s.hl", ICLASS_ae_iclass_mulafp24s_hl,
+ 0,
+ Opcode_ae_mulafp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulap24s.hl", ICLASS_ae_iclass_mulap24s_hl,
+ 0,
+ Opcode_ae_mulap24s_hl_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.hh", ICLASS_ae_iclass_mulafs32p16s_hh,
+ 0,
+ Opcode_ae_mulafs32p16s_hh_encode_fns, 0, 0 },
+ { "ae_mulafp24s.hh", ICLASS_ae_iclass_mulafp24s_hh,
+ 0,
+ Opcode_ae_mulafp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulap24s.hh", ICLASS_ae_iclass_mulap24s_hh,
+ 0,
+ Opcode_ae_mulap24s_hh_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.ll", ICLASS_ae_iclass_mulsfs32p16s_ll,
+ 0,
+ Opcode_ae_mulsfs32p16s_ll_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.ll", ICLASS_ae_iclass_mulsfp24s_ll,
+ 0,
+ Opcode_ae_mulsfp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulsp24s.ll", ICLASS_ae_iclass_mulsp24s_ll,
+ 0,
+ Opcode_ae_mulsp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.lh", ICLASS_ae_iclass_mulsfs32p16s_lh,
+ 0,
+ Opcode_ae_mulsfs32p16s_lh_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.lh", ICLASS_ae_iclass_mulsfp24s_lh,
+ 0,
+ Opcode_ae_mulsfp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulsp24s.lh", ICLASS_ae_iclass_mulsp24s_lh,
+ 0,
+ Opcode_ae_mulsp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.hl", ICLASS_ae_iclass_mulsfs32p16s_hl,
+ 0,
+ Opcode_ae_mulsfs32p16s_hl_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.hl", ICLASS_ae_iclass_mulsfp24s_hl,
+ 0,
+ Opcode_ae_mulsfp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulsp24s.hl", ICLASS_ae_iclass_mulsp24s_hl,
+ 0,
+ Opcode_ae_mulsp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.hh", ICLASS_ae_iclass_mulsfs32p16s_hh,
+ 0,
+ Opcode_ae_mulsfs32p16s_hh_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.hh", ICLASS_ae_iclass_mulsfp24s_hh,
+ 0,
+ Opcode_ae_mulsfp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulsp24s.hh", ICLASS_ae_iclass_mulsp24s_hh,
+ 0,
+ Opcode_ae_mulsp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.ll", ICLASS_ae_iclass_mulafs56p24s_ll,
+ 0,
+ Opcode_ae_mulafs56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.ll", ICLASS_ae_iclass_mulas56p24s_ll,
+ 0,
+ Opcode_ae_mulas56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.lh", ICLASS_ae_iclass_mulafs56p24s_lh,
+ 0,
+ Opcode_ae_mulafs56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.lh", ICLASS_ae_iclass_mulas56p24s_lh,
+ 0,
+ Opcode_ae_mulas56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.hl", ICLASS_ae_iclass_mulafs56p24s_hl,
+ 0,
+ Opcode_ae_mulafs56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.hl", ICLASS_ae_iclass_mulas56p24s_hl,
+ 0,
+ Opcode_ae_mulas56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.hh", ICLASS_ae_iclass_mulafs56p24s_hh,
+ 0,
+ Opcode_ae_mulafs56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.hh", ICLASS_ae_iclass_mulas56p24s_hh,
+ 0,
+ Opcode_ae_mulas56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.ll", ICLASS_ae_iclass_mulsfs56p24s_ll,
+ 0,
+ Opcode_ae_mulsfs56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.ll", ICLASS_ae_iclass_mulss56p24s_ll,
+ 0,
+ Opcode_ae_mulss56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.lh", ICLASS_ae_iclass_mulsfs56p24s_lh,
+ 0,
+ Opcode_ae_mulsfs56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.lh", ICLASS_ae_iclass_mulss56p24s_lh,
+ 0,
+ Opcode_ae_mulss56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.hl", ICLASS_ae_iclass_mulsfs56p24s_hl,
+ 0,
+ Opcode_ae_mulsfs56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.hl", ICLASS_ae_iclass_mulss56p24s_hl,
+ 0,
+ Opcode_ae_mulss56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.hh", ICLASS_ae_iclass_mulsfs56p24s_hh,
+ 0,
+ Opcode_ae_mulsfs56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.hh", ICLASS_ae_iclass_mulss56p24s_hh,
+ 0,
+ Opcode_ae_mulss56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16s.l", ICLASS_ae_iclass_mulfq32sp16s_l,
+ 0,
+ Opcode_ae_mulfq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16s.h", ICLASS_ae_iclass_mulfq32sp16s_h,
+ 0,
+ Opcode_ae_mulfq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16u.l", ICLASS_ae_iclass_mulfq32sp16u_l,
+ 0,
+ Opcode_ae_mulfq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16u.h", ICLASS_ae_iclass_mulfq32sp16u_h,
+ 0,
+ Opcode_ae_mulfq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulq32sp16s.l", ICLASS_ae_iclass_mulq32sp16s_l,
+ 0,
+ Opcode_ae_mulq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulq32sp16s.h", ICLASS_ae_iclass_mulq32sp16s_h,
+ 0,
+ Opcode_ae_mulq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulq32sp16u.l", ICLASS_ae_iclass_mulq32sp16u_l,
+ 0,
+ Opcode_ae_mulq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulq32sp16u.h", ICLASS_ae_iclass_mulq32sp16u_h,
+ 0,
+ Opcode_ae_mulq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16s.l", ICLASS_ae_iclass_mulafq32sp16s_l,
+ 0,
+ Opcode_ae_mulafq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16s.h", ICLASS_ae_iclass_mulafq32sp16s_h,
+ 0,
+ Opcode_ae_mulafq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16u.l", ICLASS_ae_iclass_mulafq32sp16u_l,
+ 0,
+ Opcode_ae_mulafq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16u.h", ICLASS_ae_iclass_mulafq32sp16u_h,
+ 0,
+ Opcode_ae_mulafq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16s.l", ICLASS_ae_iclass_mulaq32sp16s_l,
+ 0,
+ Opcode_ae_mulaq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16s.h", ICLASS_ae_iclass_mulaq32sp16s_h,
+ 0,
+ Opcode_ae_mulaq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16u.l", ICLASS_ae_iclass_mulaq32sp16u_l,
+ 0,
+ Opcode_ae_mulaq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16u.h", ICLASS_ae_iclass_mulaq32sp16u_h,
+ 0,
+ Opcode_ae_mulaq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16s.l", ICLASS_ae_iclass_mulsfq32sp16s_l,
+ 0,
+ Opcode_ae_mulsfq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16s.h", ICLASS_ae_iclass_mulsfq32sp16s_h,
+ 0,
+ Opcode_ae_mulsfq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16u.l", ICLASS_ae_iclass_mulsfq32sp16u_l,
+ 0,
+ Opcode_ae_mulsfq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16u.h", ICLASS_ae_iclass_mulsfq32sp16u_h,
+ 0,
+ Opcode_ae_mulsfq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16s.l", ICLASS_ae_iclass_mulsq32sp16s_l,
+ 0,
+ Opcode_ae_mulsq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16s.h", ICLASS_ae_iclass_mulsq32sp16s_h,
+ 0,
+ Opcode_ae_mulsq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16u.l", ICLASS_ae_iclass_mulsq32sp16u_l,
+ 0,
+ Opcode_ae_mulsq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16u.h", ICLASS_ae_iclass_mulsq32sp16u_h,
+ 0,
+ Opcode_ae_mulsq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16s.ll", ICLASS_ae_iclass_mulzaaq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzaaq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16s.ll", ICLASS_ae_iclass_mulzaafq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzaafq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16u.ll", ICLASS_ae_iclass_mulzaaq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzaaq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16u.ll", ICLASS_ae_iclass_mulzaafq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzaafq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16s.hh", ICLASS_ae_iclass_mulzaaq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzaaq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16s.hh", ICLASS_ae_iclass_mulzaafq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzaafq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16u.hh", ICLASS_ae_iclass_mulzaaq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzaaq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16u.hh", ICLASS_ae_iclass_mulzaafq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzaafq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16s.lh", ICLASS_ae_iclass_mulzaaq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzaaq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16s.lh", ICLASS_ae_iclass_mulzaafq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzaafq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16u.lh", ICLASS_ae_iclass_mulzaaq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzaaq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16u.lh", ICLASS_ae_iclass_mulzaafq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzaafq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16s.ll", ICLASS_ae_iclass_mulzasq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzasq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16s.ll", ICLASS_ae_iclass_mulzasfq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzasfq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16u.ll", ICLASS_ae_iclass_mulzasq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzasq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16u.ll", ICLASS_ae_iclass_mulzasfq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzasfq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16s.hh", ICLASS_ae_iclass_mulzasq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzasq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16s.hh", ICLASS_ae_iclass_mulzasfq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzasfq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16u.hh", ICLASS_ae_iclass_mulzasq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzasq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16u.hh", ICLASS_ae_iclass_mulzasfq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzasfq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16s.lh", ICLASS_ae_iclass_mulzasq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzasq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16s.lh", ICLASS_ae_iclass_mulzasfq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzasfq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16u.lh", ICLASS_ae_iclass_mulzasq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzasq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16u.lh", ICLASS_ae_iclass_mulzasfq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzasfq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16s.ll", ICLASS_ae_iclass_mulzsaq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzsaq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16s.ll", ICLASS_ae_iclass_mulzsafq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzsafq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16u.ll", ICLASS_ae_iclass_mulzsaq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzsaq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16u.ll", ICLASS_ae_iclass_mulzsafq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzsafq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16s.hh", ICLASS_ae_iclass_mulzsaq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzsaq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16s.hh", ICLASS_ae_iclass_mulzsafq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzsafq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16u.hh", ICLASS_ae_iclass_mulzsaq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzsaq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16u.hh", ICLASS_ae_iclass_mulzsafq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzsafq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16s.lh", ICLASS_ae_iclass_mulzsaq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzsaq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16s.lh", ICLASS_ae_iclass_mulzsafq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzsafq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16u.lh", ICLASS_ae_iclass_mulzsaq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzsaq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16u.lh", ICLASS_ae_iclass_mulzsafq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzsafq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16s.ll", ICLASS_ae_iclass_mulzssq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzssq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16s.ll", ICLASS_ae_iclass_mulzssfq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzssfq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16u.ll", ICLASS_ae_iclass_mulzssq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzssq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16u.ll", ICLASS_ae_iclass_mulzssfq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzssfq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16s.hh", ICLASS_ae_iclass_mulzssq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzssq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16s.hh", ICLASS_ae_iclass_mulzssfq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzssfq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16u.hh", ICLASS_ae_iclass_mulzssq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzssq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16u.hh", ICLASS_ae_iclass_mulzssfq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzssfq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16s.lh", ICLASS_ae_iclass_mulzssq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzssq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16s.lh", ICLASS_ae_iclass_mulzssfq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzssfq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16u.lh", ICLASS_ae_iclass_mulzssq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzssq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16u.lh", ICLASS_ae_iclass_mulzssfq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzssfq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzaafp24s.hh.ll", ICLASS_ae_iclass_mulzaafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzaafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzaap24s.hh.ll", ICLASS_ae_iclass_mulzaap24s_hh_ll,
+ 0,
+ Opcode_ae_mulzaap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzaafp24s.hl.lh", ICLASS_ae_iclass_mulzaafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzaafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzaap24s.hl.lh", ICLASS_ae_iclass_mulzaap24s_hl_lh,
+ 0,
+ Opcode_ae_mulzaap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzasfp24s.hh.ll", ICLASS_ae_iclass_mulzasfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzasfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzasp24s.hh.ll", ICLASS_ae_iclass_mulzasp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzasp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzasfp24s.hl.lh", ICLASS_ae_iclass_mulzasfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzasfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzasp24s.hl.lh", ICLASS_ae_iclass_mulzasp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzasp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzsafp24s.hh.ll", ICLASS_ae_iclass_mulzsafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzsafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzsap24s.hh.ll", ICLASS_ae_iclass_mulzsap24s_hh_ll,
+ 0,
+ Opcode_ae_mulzsap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzsafp24s.hl.lh", ICLASS_ae_iclass_mulzsafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzsafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzsap24s.hl.lh", ICLASS_ae_iclass_mulzsap24s_hl_lh,
+ 0,
+ Opcode_ae_mulzsap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzssfp24s.hh.ll", ICLASS_ae_iclass_mulzssfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzssfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzssp24s.hh.ll", ICLASS_ae_iclass_mulzssp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzssp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzssfp24s.hl.lh", ICLASS_ae_iclass_mulzssfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzssfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzssp24s.hl.lh", ICLASS_ae_iclass_mulzssp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzssp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulaafp24s.hh.ll", ICLASS_ae_iclass_mulaafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulaafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulaap24s.hh.ll", ICLASS_ae_iclass_mulaap24s_hh_ll,
+ 0,
+ Opcode_ae_mulaap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulaafp24s.hl.lh", ICLASS_ae_iclass_mulaafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulaafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulaap24s.hl.lh", ICLASS_ae_iclass_mulaap24s_hl_lh,
+ 0,
+ Opcode_ae_mulaap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulasfp24s.hh.ll", ICLASS_ae_iclass_mulasfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulasfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulasp24s.hh.ll", ICLASS_ae_iclass_mulasp24s_hh_ll,
+ 0,
+ Opcode_ae_mulasp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulasfp24s.hl.lh", ICLASS_ae_iclass_mulasfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulasfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulasp24s.hl.lh", ICLASS_ae_iclass_mulasp24s_hl_lh,
+ 0,
+ Opcode_ae_mulasp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulsafp24s.hh.ll", ICLASS_ae_iclass_mulsafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulsafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulsap24s.hh.ll", ICLASS_ae_iclass_mulsap24s_hh_ll,
+ 0,
+ Opcode_ae_mulsap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulsafp24s.hl.lh", ICLASS_ae_iclass_mulsafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulsafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulsap24s.hl.lh", ICLASS_ae_iclass_mulsap24s_hl_lh,
+ 0,
+ Opcode_ae_mulsap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulssfp24s.hh.ll", ICLASS_ae_iclass_mulssfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulssfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulssp24s.hh.ll", ICLASS_ae_iclass_mulssp24s_hh_ll,
+ 0,
+ Opcode_ae_mulssp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulssfp24s.hl.lh", ICLASS_ae_iclass_mulssfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulssfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulssp24s.hl.lh", ICLASS_ae_iclass_mulssp24s_hl_lh,
+ 0,
+ Opcode_ae_mulssp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_sha32", ICLASS_ae_iclass_sha32,
+ 0,
+ Opcode_ae_sha32_encode_fns, 0, 0 },
+ { "ae_vldl32t", ICLASS_ae_iclass_vldl32t,
+ 0,
+ Opcode_ae_vldl32t_encode_fns, 1, Opcode_ae_vldl32t_funcUnit_uses },
+ { "ae_vldl16t", ICLASS_ae_iclass_vldl16t,
+ 0,
+ Opcode_ae_vldl16t_encode_fns, 1, Opcode_ae_vldl16t_funcUnit_uses },
+ { "ae_vldl16c", ICLASS_ae_iclass_vldl16c,
+ 0,
+ Opcode_ae_vldl16c_encode_fns, 3, Opcode_ae_vldl16c_funcUnit_uses },
+ { "ae_vldsht", ICLASS_ae_iclass_vldsht,
+ 0,
+ Opcode_ae_vldsht_encode_fns, 3, Opcode_ae_vldsht_funcUnit_uses },
+ { "ae_lb", ICLASS_ae_iclass_lb,
+ 0,
+ Opcode_ae_lb_encode_fns, 1, Opcode_ae_lb_funcUnit_uses },
+ { "ae_lbi", ICLASS_ae_iclass_lbi,
+ 0,
+ Opcode_ae_lbi_encode_fns, 1, Opcode_ae_lbi_funcUnit_uses },
+ { "ae_lbk", ICLASS_ae_iclass_lbk,
+ 0,
+ Opcode_ae_lbk_encode_fns, 1, Opcode_ae_lbk_funcUnit_uses },
+ { "ae_lbki", ICLASS_ae_iclass_lbki,
+ 0,
+ Opcode_ae_lbki_encode_fns, 1, Opcode_ae_lbki_funcUnit_uses },
+ { "ae_db", ICLASS_ae_iclass_db,
+ 0,
+ Opcode_ae_db_encode_fns, 2, Opcode_ae_db_funcUnit_uses },
+ { "ae_dbi", ICLASS_ae_iclass_dbi,
+ 0,
+ Opcode_ae_dbi_encode_fns, 2, Opcode_ae_dbi_funcUnit_uses },
+ { "ae_vlel32t", ICLASS_ae_iclass_vlel32t,
+ 0,
+ Opcode_ae_vlel32t_encode_fns, 1, Opcode_ae_vlel32t_funcUnit_uses },
+ { "ae_vlel16t", ICLASS_ae_iclass_vlel16t,
+ 0,
+ Opcode_ae_vlel16t_encode_fns, 1, Opcode_ae_vlel16t_funcUnit_uses },
+ { "ae_sb", ICLASS_ae_iclass_sb,
+ 0,
+ Opcode_ae_sb_encode_fns, 2, Opcode_ae_sb_funcUnit_uses },
+ { "ae_sbi", ICLASS_ae_iclass_sbi,
+ 0,
+ Opcode_ae_sbi_encode_fns, 2, Opcode_ae_sbi_funcUnit_uses },
+ { "ae_vles16c", ICLASS_ae_iclass_vles16c,
+ 0,
+ Opcode_ae_vles16c_encode_fns, 2, Opcode_ae_vles16c_funcUnit_uses },
+ { "ae_sbf", ICLASS_ae_iclass_sbf,
+ 0,
+ Opcode_ae_sbf_encode_fns, 2, Opcode_ae_sbf_funcUnit_uses },
+ { "ae_slaasq56s", ICLASS_icls_AE_SLAASQ56S,
+ 0,
+ Opcode_ae_slaasq56s_encode_fns, 0, 0 },
+ { "ae_addbrba32", ICLASS_icls_AE_ADDBRBA32,
+ 0,
+ Opcode_ae_addbrba32_encode_fns, 0, 0 },
+ { "ae_minabssp24s", ICLASS_icls_AE_MINABSSP24S,
+ 0,
+ Opcode_ae_minabssp24s_encode_fns, 0, 0 },
+ { "ae_maxabssp24s", ICLASS_icls_AE_MAXABSSP24S,
+ 0,
+ Opcode_ae_maxabssp24s_encode_fns, 0, 0 },
+ { "ae_minabssq56s", ICLASS_icls_AE_MINABSSQ56S,
+ 0,
+ Opcode_ae_minabssq56s_encode_fns, 0, 0 },
+ { "ae_maxabssq56s", ICLASS_icls_AE_MAXABSSQ56S,
+ 0,
+ Opcode_ae_maxabssq56s_encode_fns, 0, 0 },
+ { "rur.ae_cbegin0", ICLASS_rur_ae_cbegin0,
+ 0,
+ Opcode_rur_ae_cbegin0_encode_fns, 0, 0 },
+ { "wur.ae_cbegin0", ICLASS_wur_ae_cbegin0,
+ 0,
+ Opcode_wur_ae_cbegin0_encode_fns, 0, 0 },
+ { "rur.ae_cend0", ICLASS_rur_ae_cend0,
+ 0,
+ Opcode_rur_ae_cend0_encode_fns, 0, 0 },
+ { "wur.ae_cend0", ICLASS_wur_ae_cend0,
+ 0,
+ Opcode_wur_ae_cend0_encode_fns, 0, 0 },
+ { "ae_lp24x2.c", ICLASS_icls_AE_LP24X2_C,
+ 0,
+ Opcode_ae_lp24x2_c_encode_fns, 0, 0 },
+ { "ae_sp24x2s.c", ICLASS_icls_AE_SP24X2S_C,
+ 0,
+ Opcode_ae_sp24x2s_c_encode_fns, 0, 0 },
+ { "ae_lp24x2f.c", ICLASS_icls_AE_LP24X2F_C,
+ 0,
+ Opcode_ae_lp24x2f_c_encode_fns, 0, 0 },
+ { "ae_sp24x2f.c", ICLASS_icls_AE_SP24X2F_C,
+ 0,
+ Opcode_ae_sp24x2f_c_encode_fns, 0, 0 },
+ { "ae_lp16x2f.c", ICLASS_icls_AE_LP16X2F_C,
+ 0,
+ Opcode_ae_lp16x2f_c_encode_fns, 0, 0 },
+ { "ae_sp16x2f.c", ICLASS_icls_AE_SP16X2F_C,
+ 0,
+ Opcode_ae_sp16x2f_c_encode_fns, 0, 0 },
+ { "ae_lp24.c", ICLASS_icls_AE_LP24_C,
+ 0,
+ Opcode_ae_lp24_c_encode_fns, 0, 0 },
+ { "ae_sp24s.l.c", ICLASS_icls_AE_SP24S_L_C,
+ 0,
+ Opcode_ae_sp24s_l_c_encode_fns, 0, 0 },
+ { "ae_lp24f.c", ICLASS_icls_AE_LP24F_C,
+ 0,
+ Opcode_ae_lp24f_c_encode_fns, 0, 0 },
+ { "ae_sp24f.l.c", ICLASS_icls_AE_SP24F_L_C,
+ 0,
+ Opcode_ae_sp24f_l_c_encode_fns, 0, 0 },
+ { "ae_lp16f.c", ICLASS_icls_AE_LP16F_C,
+ 0,
+ Opcode_ae_lp16f_c_encode_fns, 0, 0 },
+ { "ae_sp16f.l.c", ICLASS_icls_AE_SP16F_L_C,
+ 0,
+ Opcode_ae_sp16f_l_c_encode_fns, 0, 0 },
+ { "ae_lq56.c", ICLASS_icls_AE_LQ56_C,
+ 0,
+ Opcode_ae_lq56_c_encode_fns, 0, 0 },
+ { "ae_sq56s.c", ICLASS_icls_AE_SQ56S_C,
+ 0,
+ Opcode_ae_sq56s_c_encode_fns, 0, 0 },
+ { "ae_lq32f.c", ICLASS_icls_AE_LQ32F_C,
+ 0,
+ Opcode_ae_lq32f_c_encode_fns, 0, 0 },
+ { "ae_sq32f.c", ICLASS_icls_AE_SQ32F_C,
+ 0,
+ Opcode_ae_sq32f_c_encode_fns, 0, 0 },
+ { "rur.expstate", ICLASS_rur_expstate,
+ 0,
+ Opcode_rur_expstate_encode_fns, 0, 0 },
+ { "wur.expstate", ICLASS_wur_expstate,
+ 0,
+ Opcode_wur_expstate_encode_fns, 0, 0 },
+ { "read_impwire", ICLASS_iclass_READ_IMPWIRE,
+ 0,
+ Opcode_read_impwire_encode_fns, 0, 0 },
+ { "setb_expstate", ICLASS_iclass_SETB_EXPSTATE,
+ 0,
+ Opcode_setb_expstate_encode_fns, 0, 0 },
+ { "clrb_expstate", ICLASS_iclass_CLRB_EXPSTATE,
+ 0,
+ Opcode_clrb_expstate_encode_fns, 0, 0 },
+ { "wrmsk_expstate", ICLASS_iclass_WRMSK_EXPSTATE,
+ 0,
+ Opcode_wrmsk_expstate_encode_fns, 0, 0 }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_RUR_THREADPTR,
+ OPCODE_WUR_THREADPTR,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_SUB,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BNEI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BEQ,
+ OPCODE_BNE,
+ OPCODE_BGE,
+ OPCODE_BLT,
+ OPCODE_BGEU,
+ OPCODE_BLTU,
+ OPCODE_BANY,
+ OPCODE_BNONE,
+ OPCODE_BALL,
+ OPCODE_BNALL,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQZ,
+ OPCODE_BNEZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_LOOP,
+ OPCODE_LOOPNEZ,
+ OPCODE_LOOPGTZ,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVNEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVGEZ,
+ OPCODE_NEG,
+ OPCODE_ABS,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_SIMCALL,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S32NB,
+ OPCODE_S8I,
+ OPCODE_SSR,
+ OPCODE_SSL,
+ OPCODE_SSA8L,
+ OPCODE_SSA8B,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRL,
+ OPCODE_SRA,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_RSYNC,
+ OPCODE_ESYNC,
+ OPCODE_DSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_LEND,
+ OPCODE_WSR_LEND,
+ OPCODE_XSR_LEND,
+ OPCODE_RSR_LCOUNT,
+ OPCODE_WSR_LCOUNT,
+ OPCODE_XSR_LCOUNT,
+ OPCODE_RSR_LBEG,
+ OPCODE_WSR_LBEG,
+ OPCODE_XSR_LBEG,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_MEMCTL,
+ OPCODE_WSR_MEMCTL,
+ OPCODE_XSR_MEMCTL,
+ OPCODE_RSR_LITBASE,
+ OPCODE_WSR_LITBASE,
+ OPCODE_XSR_LITBASE,
+ OPCODE_RSR_CONFIGID0,
+ OPCODE_WSR_CONFIGID0,
+ OPCODE_RSR_CONFIGID1,
+ OPCODE_RSR_243,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPC3,
+ OPCODE_WSR_EPC3,
+ OPCODE_XSR_EPC3,
+ OPCODE_RSR_EXCSAVE3,
+ OPCODE_WSR_EXCSAVE3,
+ OPCODE_XSR_EXCSAVE3,
+ OPCODE_RSR_EPC4,
+ OPCODE_WSR_EPC4,
+ OPCODE_XSR_EPC4,
+ OPCODE_RSR_EXCSAVE4,
+ OPCODE_WSR_EXCSAVE4,
+ OPCODE_XSR_EXCSAVE4,
+ OPCODE_RSR_EPC5,
+ OPCODE_WSR_EPC5,
+ OPCODE_XSR_EPC5,
+ OPCODE_RSR_EXCSAVE5,
+ OPCODE_WSR_EXCSAVE5,
+ OPCODE_XSR_EXCSAVE5,
+ OPCODE_RSR_EPC6,
+ OPCODE_WSR_EPC6,
+ OPCODE_XSR_EPC6,
+ OPCODE_RSR_EXCSAVE6,
+ OPCODE_WSR_EXCSAVE6,
+ OPCODE_XSR_EXCSAVE6,
+ OPCODE_RSR_EPC7,
+ OPCODE_WSR_EPC7,
+ OPCODE_XSR_EPC7,
+ OPCODE_RSR_EXCSAVE7,
+ OPCODE_WSR_EXCSAVE7,
+ OPCODE_XSR_EXCSAVE7,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EPS3,
+ OPCODE_WSR_EPS3,
+ OPCODE_XSR_EPS3,
+ OPCODE_RSR_EPS4,
+ OPCODE_WSR_EPS4,
+ OPCODE_XSR_EPS4,
+ OPCODE_RSR_EPS5,
+ OPCODE_WSR_EPS5,
+ OPCODE_XSR_EPS5,
+ OPCODE_RSR_EPS6,
+ OPCODE_WSR_EPS6,
+ OPCODE_XSR_EPS6,
+ OPCODE_RSR_EPS7,
+ OPCODE_WSR_EPS7,
+ OPCODE_XSR_EPS7,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_MISC0,
+ OPCODE_WSR_MISC0,
+ OPCODE_XSR_MISC0,
+ OPCODE_RSR_MISC1,
+ OPCODE_WSR_MISC1,
+ OPCODE_XSR_MISC1,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_MUL16U,
+ OPCODE_MUL16S,
+ OPCODE_MULL,
+ OPCODE_MULUH,
+ OPCODE_MULSH,
+ OPCODE_MUL_AA_LL,
+ OPCODE_MUL_AA_HL,
+ OPCODE_MUL_AA_LH,
+ OPCODE_MUL_AA_HH,
+ OPCODE_UMUL_AA_LL,
+ OPCODE_UMUL_AA_HL,
+ OPCODE_UMUL_AA_LH,
+ OPCODE_UMUL_AA_HH,
+ OPCODE_MUL_AD_LL,
+ OPCODE_MUL_AD_HL,
+ OPCODE_MUL_AD_LH,
+ OPCODE_MUL_AD_HH,
+ OPCODE_MUL_DA_LL,
+ OPCODE_MUL_DA_HL,
+ OPCODE_MUL_DA_LH,
+ OPCODE_MUL_DA_HH,
+ OPCODE_MUL_DD_LL,
+ OPCODE_MUL_DD_HL,
+ OPCODE_MUL_DD_LH,
+ OPCODE_MUL_DD_HH,
+ OPCODE_MULA_AA_LL,
+ OPCODE_MULA_AA_HL,
+ OPCODE_MULA_AA_LH,
+ OPCODE_MULA_AA_HH,
+ OPCODE_MULS_AA_LL,
+ OPCODE_MULS_AA_HL,
+ OPCODE_MULS_AA_LH,
+ OPCODE_MULS_AA_HH,
+ OPCODE_MULA_AD_LL,
+ OPCODE_MULA_AD_HL,
+ OPCODE_MULA_AD_LH,
+ OPCODE_MULA_AD_HH,
+ OPCODE_MULS_AD_LL,
+ OPCODE_MULS_AD_HL,
+ OPCODE_MULS_AD_LH,
+ OPCODE_MULS_AD_HH,
+ OPCODE_MULA_DA_LL,
+ OPCODE_MULA_DA_HL,
+ OPCODE_MULA_DA_LH,
+ OPCODE_MULA_DA_HH,
+ OPCODE_MULS_DA_LL,
+ OPCODE_MULS_DA_HL,
+ OPCODE_MULS_DA_LH,
+ OPCODE_MULS_DA_HH,
+ OPCODE_MULA_DD_LL,
+ OPCODE_MULA_DD_HL,
+ OPCODE_MULA_DD_LH,
+ OPCODE_MULA_DD_HH,
+ OPCODE_MULS_DD_LL,
+ OPCODE_MULS_DD_HL,
+ OPCODE_MULS_DD_LH,
+ OPCODE_MULS_DD_HH,
+ OPCODE_MULA_DA_LL_LDDEC,
+ OPCODE_MULA_DA_LL_LDINC,
+ OPCODE_MULA_DA_HL_LDDEC,
+ OPCODE_MULA_DA_HL_LDINC,
+ OPCODE_MULA_DA_LH_LDDEC,
+ OPCODE_MULA_DA_LH_LDINC,
+ OPCODE_MULA_DA_HH_LDDEC,
+ OPCODE_MULA_DA_HH_LDINC,
+ OPCODE_MULA_DD_LL_LDDEC,
+ OPCODE_MULA_DD_LL_LDINC,
+ OPCODE_MULA_DD_HL_LDDEC,
+ OPCODE_MULA_DD_HL_LDINC,
+ OPCODE_MULA_DD_LH_LDDEC,
+ OPCODE_MULA_DD_LH_LDINC,
+ OPCODE_MULA_DD_HH_LDDEC,
+ OPCODE_MULA_DD_HH_LDINC,
+ OPCODE_LDDEC,
+ OPCODE_LDINC,
+ OPCODE_RSR_M0,
+ OPCODE_WSR_M0,
+ OPCODE_XSR_M0,
+ OPCODE_RSR_M1,
+ OPCODE_WSR_M1,
+ OPCODE_XSR_M1,
+ OPCODE_RSR_M2,
+ OPCODE_WSR_M2,
+ OPCODE_XSR_M2,
+ OPCODE_RSR_M3,
+ OPCODE_WSR_M3,
+ OPCODE_XSR_M3,
+ OPCODE_RSR_ACCLO,
+ OPCODE_WSR_ACCLO,
+ OPCODE_XSR_ACCLO,
+ OPCODE_RSR_ACCHI,
+ OPCODE_WSR_ACCHI,
+ OPCODE_XSR_ACCHI,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DBREAKA0,
+ OPCODE_WSR_DBREAKA0,
+ OPCODE_XSR_DBREAKA0,
+ OPCODE_RSR_DBREAKC0,
+ OPCODE_WSR_DBREAKC0,
+ OPCODE_XSR_DBREAKC0,
+ OPCODE_RSR_DBREAKA1,
+ OPCODE_WSR_DBREAKA1,
+ OPCODE_XSR_DBREAKA1,
+ OPCODE_RSR_DBREAKC1,
+ OPCODE_WSR_DBREAKC1,
+ OPCODE_XSR_DBREAKC1,
+ OPCODE_RSR_IBREAKA0,
+ OPCODE_WSR_IBREAKA0,
+ OPCODE_XSR_IBREAKA0,
+ OPCODE_RSR_IBREAKA1,
+ OPCODE_WSR_IBREAKA1,
+ OPCODE_XSR_IBREAKA1,
+ OPCODE_RSR_IBREAKENABLE,
+ OPCODE_WSR_IBREAKENABLE,
+ OPCODE_XSR_IBREAKENABLE,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_LDDR32_P,
+ OPCODE_SDDR32_P,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_WSR_MMID,
+ OPCODE_ANDB,
+ OPCODE_ANDBC,
+ OPCODE_ORB,
+ OPCODE_ORBC,
+ OPCODE_XORB,
+ OPCODE_ANY4,
+ OPCODE_ALL4,
+ OPCODE_ANY8,
+ OPCODE_ALL8,
+ OPCODE_BF,
+ OPCODE_BT,
+ OPCODE_MOVF,
+ OPCODE_MOVT,
+ OPCODE_RSR_BR,
+ OPCODE_WSR_BR,
+ OPCODE_XSR_BR,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_RSR_CCOMPARE2,
+ OPCODE_WSR_CCOMPARE2,
+ OPCODE_XSR_CCOMPARE2,
+ OPCODE_IPF,
+ OPCODE_IHI,
+ OPCODE_IPFL,
+ OPCODE_IHU,
+ OPCODE_IIU,
+ OPCODE_III,
+ OPCODE_LICT,
+ OPCODE_LICW,
+ OPCODE_SICT,
+ OPCODE_SICW,
+ OPCODE_DHWB,
+ OPCODE_DHWBI,
+ OPCODE_DIWBUI_P,
+ OPCODE_DIWB,
+ OPCODE_DIWBI,
+ OPCODE_DHI,
+ OPCODE_DII,
+ OPCODE_DPFR,
+ OPCODE_DPFW,
+ OPCODE_DPFRO,
+ OPCODE_DPFWO,
+ OPCODE_DPFL,
+ OPCODE_DHU,
+ OPCODE_DIU,
+ OPCODE_SDCT,
+ OPCODE_LDCT,
+ OPCODE_RSR_PREFCTL,
+ OPCODE_WSR_PREFCTL,
+ OPCODE_XSR_PREFCTL,
+ OPCODE_WSR_PTEVADDR,
+ OPCODE_RSR_PTEVADDR,
+ OPCODE_XSR_PTEVADDR,
+ OPCODE_RSR_RASID,
+ OPCODE_WSR_RASID,
+ OPCODE_XSR_RASID,
+ OPCODE_RSR_ITLBCFG,
+ OPCODE_WSR_ITLBCFG,
+ OPCODE_XSR_ITLBCFG,
+ OPCODE_RSR_DTLBCFG,
+ OPCODE_WSR_DTLBCFG,
+ OPCODE_XSR_DTLBCFG,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_LDPTE,
+ OPCODE_HWWITLBA,
+ OPCODE_HWWDTLBA,
+ OPCODE_RSR_CPENABLE,
+ OPCODE_WSR_CPENABLE,
+ OPCODE_XSR_CPENABLE,
+ OPCODE_CLAMPS,
+ OPCODE_MIN,
+ OPCODE_MAX,
+ OPCODE_MINU,
+ OPCODE_MAXU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_QUOU,
+ OPCODE_QUOS,
+ OPCODE_REMU,
+ OPCODE_REMS,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_AE_OVF_SAR,
+ OPCODE_WUR_AE_OVF_SAR,
+ OPCODE_RUR_AE_BITHEAD,
+ OPCODE_WUR_AE_BITHEAD,
+ OPCODE_RUR_AE_TS_FTS_BU_BP,
+ OPCODE_WUR_AE_TS_FTS_BU_BP,
+ OPCODE_RUR_AE_SD_NO,
+ OPCODE_WUR_AE_SD_NO,
+ OPCODE_RUR_AE_OVERFLOW,
+ OPCODE_WUR_AE_OVERFLOW,
+ OPCODE_RUR_AE_SAR,
+ OPCODE_WUR_AE_SAR,
+ OPCODE_RUR_AE_BITPTR,
+ OPCODE_WUR_AE_BITPTR,
+ OPCODE_RUR_AE_BITSUSED,
+ OPCODE_WUR_AE_BITSUSED,
+ OPCODE_RUR_AE_TABLESIZE,
+ OPCODE_WUR_AE_TABLESIZE,
+ OPCODE_RUR_AE_FIRST_TS,
+ OPCODE_WUR_AE_FIRST_TS,
+ OPCODE_RUR_AE_NEXTOFFSET,
+ OPCODE_WUR_AE_NEXTOFFSET,
+ OPCODE_RUR_AE_SEARCHDONE,
+ OPCODE_WUR_AE_SEARCHDONE,
+ OPCODE_AE_LP16F_I,
+ OPCODE_AE_LP16F_IU,
+ OPCODE_AE_LP16F_X,
+ OPCODE_AE_LP16F_XU,
+ OPCODE_AE_LP24_I,
+ OPCODE_AE_LP24_IU,
+ OPCODE_AE_LP24_X,
+ OPCODE_AE_LP24_XU,
+ OPCODE_AE_LP24F_I,
+ OPCODE_AE_LP24F_IU,
+ OPCODE_AE_LP24F_X,
+ OPCODE_AE_LP24F_XU,
+ OPCODE_AE_LP16X2F_I,
+ OPCODE_AE_LP16X2F_IU,
+ OPCODE_AE_LP16X2F_X,
+ OPCODE_AE_LP16X2F_XU,
+ OPCODE_AE_LP24X2F_I,
+ OPCODE_AE_LP24X2F_IU,
+ OPCODE_AE_LP24X2F_X,
+ OPCODE_AE_LP24X2F_XU,
+ OPCODE_AE_LP24X2_I,
+ OPCODE_AE_LP24X2_IU,
+ OPCODE_AE_LP24X2_X,
+ OPCODE_AE_LP24X2_XU,
+ OPCODE_AE_SP16X2F_I,
+ OPCODE_AE_SP16X2F_IU,
+ OPCODE_AE_SP16X2F_X,
+ OPCODE_AE_SP16X2F_XU,
+ OPCODE_AE_SP24X2S_I,
+ OPCODE_AE_SP24X2S_IU,
+ OPCODE_AE_SP24X2S_X,
+ OPCODE_AE_SP24X2S_XU,
+ OPCODE_AE_SP24X2F_I,
+ OPCODE_AE_SP24X2F_IU,
+ OPCODE_AE_SP24X2F_X,
+ OPCODE_AE_SP24X2F_XU,
+ OPCODE_AE_SP16F_L_I,
+ OPCODE_AE_SP16F_L_IU,
+ OPCODE_AE_SP16F_L_X,
+ OPCODE_AE_SP16F_L_XU,
+ OPCODE_AE_SP24S_L_I,
+ OPCODE_AE_SP24S_L_IU,
+ OPCODE_AE_SP24S_L_X,
+ OPCODE_AE_SP24S_L_XU,
+ OPCODE_AE_SP24F_L_I,
+ OPCODE_AE_SP24F_L_IU,
+ OPCODE_AE_SP24F_L_X,
+ OPCODE_AE_SP24F_L_XU,
+ OPCODE_AE_LQ56_I,
+ OPCODE_AE_LQ56_IU,
+ OPCODE_AE_LQ56_X,
+ OPCODE_AE_LQ56_XU,
+ OPCODE_AE_LQ32F_I,
+ OPCODE_AE_LQ32F_IU,
+ OPCODE_AE_LQ32F_X,
+ OPCODE_AE_LQ32F_XU,
+ OPCODE_AE_SQ56S_I,
+ OPCODE_AE_SQ56S_IU,
+ OPCODE_AE_SQ56S_X,
+ OPCODE_AE_SQ56S_XU,
+ OPCODE_AE_SQ32F_I,
+ OPCODE_AE_SQ32F_IU,
+ OPCODE_AE_SQ32F_X,
+ OPCODE_AE_SQ32F_XU,
+ OPCODE_AE_ZEROP48,
+ OPCODE_AE_MOVP48,
+ OPCODE_AE_SELP24_LL,
+ OPCODE_AE_SELP24_LH,
+ OPCODE_AE_SELP24_HL,
+ OPCODE_AE_SELP24_HH,
+ OPCODE_AE_MOVTP24X2,
+ OPCODE_AE_MOVFP24X2,
+ OPCODE_AE_MOVTP48,
+ OPCODE_AE_MOVFP48,
+ OPCODE_AE_MOVPA24X2,
+ OPCODE_AE_TRUNCP24A32X2,
+ OPCODE_AE_CVTA32P24_L,
+ OPCODE_AE_CVTA32P24_H,
+ OPCODE_AE_CVTP24A16X2_LL,
+ OPCODE_AE_CVTP24A16X2_LH,
+ OPCODE_AE_CVTP24A16X2_HL,
+ OPCODE_AE_CVTP24A16X2_HH,
+ OPCODE_AE_TRUNCP24Q48X2,
+ OPCODE_AE_TRUNCP16,
+ OPCODE_AE_ROUNDSP24Q48SYM,
+ OPCODE_AE_ROUNDSP24Q48ASYM,
+ OPCODE_AE_ROUNDSP16Q48SYM,
+ OPCODE_AE_ROUNDSP16Q48ASYM,
+ OPCODE_AE_ROUNDSP16SYM,
+ OPCODE_AE_ROUNDSP16ASYM,
+ OPCODE_AE_ZEROQ56,
+ OPCODE_AE_MOVQ56,
+ OPCODE_AE_MOVTQ56,
+ OPCODE_AE_MOVFQ56,
+ OPCODE_AE_CVTQ48A32S,
+ OPCODE_AE_CVTQ48P24S_L,
+ OPCODE_AE_CVTQ48P24S_H,
+ OPCODE_AE_SATQ48S,
+ OPCODE_AE_TRUNCQ32,
+ OPCODE_AE_ROUNDSQ32SYM,
+ OPCODE_AE_ROUNDSQ32ASYM,
+ OPCODE_AE_TRUNCA32Q48,
+ OPCODE_AE_MOVAP24S_L,
+ OPCODE_AE_MOVAP24S_H,
+ OPCODE_AE_TRUNCA16P24S_L,
+ OPCODE_AE_TRUNCA16P24S_H,
+ OPCODE_AE_ADDP24,
+ OPCODE_AE_SUBP24,
+ OPCODE_AE_NEGP24,
+ OPCODE_AE_ABSP24,
+ OPCODE_AE_MAXP24S,
+ OPCODE_AE_MINP24S,
+ OPCODE_AE_MAXBP24S,
+ OPCODE_AE_MINBP24S,
+ OPCODE_AE_ADDSP24S,
+ OPCODE_AE_SUBSP24S,
+ OPCODE_AE_NEGSP24S,
+ OPCODE_AE_ABSSP24S,
+ OPCODE_AE_ANDP48,
+ OPCODE_AE_NANDP48,
+ OPCODE_AE_ORP48,
+ OPCODE_AE_XORP48,
+ OPCODE_AE_LTP24S,
+ OPCODE_AE_LEP24S,
+ OPCODE_AE_EQP24,
+ OPCODE_AE_ADDQ56,
+ OPCODE_AE_SUBQ56,
+ OPCODE_AE_NEGQ56,
+ OPCODE_AE_ABSQ56,
+ OPCODE_AE_MAXQ56S,
+ OPCODE_AE_MINQ56S,
+ OPCODE_AE_MAXBQ56S,
+ OPCODE_AE_MINBQ56S,
+ OPCODE_AE_ADDSQ56S,
+ OPCODE_AE_SUBSQ56S,
+ OPCODE_AE_NEGSQ56S,
+ OPCODE_AE_ABSSQ56S,
+ OPCODE_AE_ANDQ56,
+ OPCODE_AE_NANDQ56,
+ OPCODE_AE_ORQ56,
+ OPCODE_AE_XORQ56,
+ OPCODE_AE_SLLIP24,
+ OPCODE_AE_SRLIP24,
+ OPCODE_AE_SRAIP24,
+ OPCODE_AE_SLLSP24,
+ OPCODE_AE_SRLSP24,
+ OPCODE_AE_SRASP24,
+ OPCODE_AE_SLLISP24S,
+ OPCODE_AE_SLLSSP24S,
+ OPCODE_AE_SLLIQ56,
+ OPCODE_AE_SRLIQ56,
+ OPCODE_AE_SRAIQ56,
+ OPCODE_AE_SLLSQ56,
+ OPCODE_AE_SRLSQ56,
+ OPCODE_AE_SRASQ56,
+ OPCODE_AE_SLLAQ56,
+ OPCODE_AE_SRLAQ56,
+ OPCODE_AE_SRAAQ56,
+ OPCODE_AE_SLLISQ56S,
+ OPCODE_AE_SLLSSQ56S,
+ OPCODE_AE_SLLASQ56S,
+ OPCODE_AE_LTQ56S,
+ OPCODE_AE_LEQ56S,
+ OPCODE_AE_EQQ56,
+ OPCODE_AE_NSAQ56S,
+ OPCODE_AE_MULSRFQ32SP24S_H,
+ OPCODE_AE_MULSRFQ32SP24S_L,
+ OPCODE_AE_MULARFQ32SP24S_H,
+ OPCODE_AE_MULARFQ32SP24S_L,
+ OPCODE_AE_MULRFQ32SP24S_H,
+ OPCODE_AE_MULRFQ32SP24S_L,
+ OPCODE_AE_MULSFQ32SP24S_H,
+ OPCODE_AE_MULSFQ32SP24S_L,
+ OPCODE_AE_MULAFQ32SP24S_H,
+ OPCODE_AE_MULAFQ32SP24S_L,
+ OPCODE_AE_MULFQ32SP24S_H,
+ OPCODE_AE_MULFQ32SP24S_L,
+ OPCODE_AE_MULFS32P16S_LL,
+ OPCODE_AE_MULFP24S_LL,
+ OPCODE_AE_MULP24S_LL,
+ OPCODE_AE_MULFS32P16S_LH,
+ OPCODE_AE_MULFP24S_LH,
+ OPCODE_AE_MULP24S_LH,
+ OPCODE_AE_MULFS32P16S_HL,
+ OPCODE_AE_MULFP24S_HL,
+ OPCODE_AE_MULP24S_HL,
+ OPCODE_AE_MULFS32P16S_HH,
+ OPCODE_AE_MULFP24S_HH,
+ OPCODE_AE_MULP24S_HH,
+ OPCODE_AE_MULAFS32P16S_LL,
+ OPCODE_AE_MULAFP24S_LL,
+ OPCODE_AE_MULAP24S_LL,
+ OPCODE_AE_MULAFS32P16S_LH,
+ OPCODE_AE_MULAFP24S_LH,
+ OPCODE_AE_MULAP24S_LH,
+ OPCODE_AE_MULAFS32P16S_HL,
+ OPCODE_AE_MULAFP24S_HL,
+ OPCODE_AE_MULAP24S_HL,
+ OPCODE_AE_MULAFS32P16S_HH,
+ OPCODE_AE_MULAFP24S_HH,
+ OPCODE_AE_MULAP24S_HH,
+ OPCODE_AE_MULSFS32P16S_LL,
+ OPCODE_AE_MULSFP24S_LL,
+ OPCODE_AE_MULSP24S_LL,
+ OPCODE_AE_MULSFS32P16S_LH,
+ OPCODE_AE_MULSFP24S_LH,
+ OPCODE_AE_MULSP24S_LH,
+ OPCODE_AE_MULSFS32P16S_HL,
+ OPCODE_AE_MULSFP24S_HL,
+ OPCODE_AE_MULSP24S_HL,
+ OPCODE_AE_MULSFS32P16S_HH,
+ OPCODE_AE_MULSFP24S_HH,
+ OPCODE_AE_MULSP24S_HH,
+ OPCODE_AE_MULAFS56P24S_LL,
+ OPCODE_AE_MULAS56P24S_LL,
+ OPCODE_AE_MULAFS56P24S_LH,
+ OPCODE_AE_MULAS56P24S_LH,
+ OPCODE_AE_MULAFS56P24S_HL,
+ OPCODE_AE_MULAS56P24S_HL,
+ OPCODE_AE_MULAFS56P24S_HH,
+ OPCODE_AE_MULAS56P24S_HH,
+ OPCODE_AE_MULSFS56P24S_LL,
+ OPCODE_AE_MULSS56P24S_LL,
+ OPCODE_AE_MULSFS56P24S_LH,
+ OPCODE_AE_MULSS56P24S_LH,
+ OPCODE_AE_MULSFS56P24S_HL,
+ OPCODE_AE_MULSS56P24S_HL,
+ OPCODE_AE_MULSFS56P24S_HH,
+ OPCODE_AE_MULSS56P24S_HH,
+ OPCODE_AE_MULFQ32SP16S_L,
+ OPCODE_AE_MULFQ32SP16S_H,
+ OPCODE_AE_MULFQ32SP16U_L,
+ OPCODE_AE_MULFQ32SP16U_H,
+ OPCODE_AE_MULQ32SP16S_L,
+ OPCODE_AE_MULQ32SP16S_H,
+ OPCODE_AE_MULQ32SP16U_L,
+ OPCODE_AE_MULQ32SP16U_H,
+ OPCODE_AE_MULAFQ32SP16S_L,
+ OPCODE_AE_MULAFQ32SP16S_H,
+ OPCODE_AE_MULAFQ32SP16U_L,
+ OPCODE_AE_MULAFQ32SP16U_H,
+ OPCODE_AE_MULAQ32SP16S_L,
+ OPCODE_AE_MULAQ32SP16S_H,
+ OPCODE_AE_MULAQ32SP16U_L,
+ OPCODE_AE_MULAQ32SP16U_H,
+ OPCODE_AE_MULSFQ32SP16S_L,
+ OPCODE_AE_MULSFQ32SP16S_H,
+ OPCODE_AE_MULSFQ32SP16U_L,
+ OPCODE_AE_MULSFQ32SP16U_H,
+ OPCODE_AE_MULSQ32SP16S_L,
+ OPCODE_AE_MULSQ32SP16S_H,
+ OPCODE_AE_MULSQ32SP16U_L,
+ OPCODE_AE_MULSQ32SP16U_H,
+ OPCODE_AE_MULZAAQ32SP16S_LL,
+ OPCODE_AE_MULZAAFQ32SP16S_LL,
+ OPCODE_AE_MULZAAQ32SP16U_LL,
+ OPCODE_AE_MULZAAFQ32SP16U_LL,
+ OPCODE_AE_MULZAAQ32SP16S_HH,
+ OPCODE_AE_MULZAAFQ32SP16S_HH,
+ OPCODE_AE_MULZAAQ32SP16U_HH,
+ OPCODE_AE_MULZAAFQ32SP16U_HH,
+ OPCODE_AE_MULZAAQ32SP16S_LH,
+ OPCODE_AE_MULZAAFQ32SP16S_LH,
+ OPCODE_AE_MULZAAQ32SP16U_LH,
+ OPCODE_AE_MULZAAFQ32SP16U_LH,
+ OPCODE_AE_MULZASQ32SP16S_LL,
+ OPCODE_AE_MULZASFQ32SP16S_LL,
+ OPCODE_AE_MULZASQ32SP16U_LL,
+ OPCODE_AE_MULZASFQ32SP16U_LL,
+ OPCODE_AE_MULZASQ32SP16S_HH,
+ OPCODE_AE_MULZASFQ32SP16S_HH,
+ OPCODE_AE_MULZASQ32SP16U_HH,
+ OPCODE_AE_MULZASFQ32SP16U_HH,
+ OPCODE_AE_MULZASQ32SP16S_LH,
+ OPCODE_AE_MULZASFQ32SP16S_LH,
+ OPCODE_AE_MULZASQ32SP16U_LH,
+ OPCODE_AE_MULZASFQ32SP16U_LH,
+ OPCODE_AE_MULZSAQ32SP16S_LL,
+ OPCODE_AE_MULZSAFQ32SP16S_LL,
+ OPCODE_AE_MULZSAQ32SP16U_LL,
+ OPCODE_AE_MULZSAFQ32SP16U_LL,
+ OPCODE_AE_MULZSAQ32SP16S_HH,
+ OPCODE_AE_MULZSAFQ32SP16S_HH,
+ OPCODE_AE_MULZSAQ32SP16U_HH,
+ OPCODE_AE_MULZSAFQ32SP16U_HH,
+ OPCODE_AE_MULZSAQ32SP16S_LH,
+ OPCODE_AE_MULZSAFQ32SP16S_LH,
+ OPCODE_AE_MULZSAQ32SP16U_LH,
+ OPCODE_AE_MULZSAFQ32SP16U_LH,
+ OPCODE_AE_MULZSSQ32SP16S_LL,
+ OPCODE_AE_MULZSSFQ32SP16S_LL,
+ OPCODE_AE_MULZSSQ32SP16U_LL,
+ OPCODE_AE_MULZSSFQ32SP16U_LL,
+ OPCODE_AE_MULZSSQ32SP16S_HH,
+ OPCODE_AE_MULZSSFQ32SP16S_HH,
+ OPCODE_AE_MULZSSQ32SP16U_HH,
+ OPCODE_AE_MULZSSFQ32SP16U_HH,
+ OPCODE_AE_MULZSSQ32SP16S_LH,
+ OPCODE_AE_MULZSSFQ32SP16S_LH,
+ OPCODE_AE_MULZSSQ32SP16U_LH,
+ OPCODE_AE_MULZSSFQ32SP16U_LH,
+ OPCODE_AE_MULZAAFP24S_HH_LL,
+ OPCODE_AE_MULZAAP24S_HH_LL,
+ OPCODE_AE_MULZAAFP24S_HL_LH,
+ OPCODE_AE_MULZAAP24S_HL_LH,
+ OPCODE_AE_MULZASFP24S_HH_LL,
+ OPCODE_AE_MULZASP24S_HH_LL,
+ OPCODE_AE_MULZASFP24S_HL_LH,
+ OPCODE_AE_MULZASP24S_HL_LH,
+ OPCODE_AE_MULZSAFP24S_HH_LL,
+ OPCODE_AE_MULZSAP24S_HH_LL,
+ OPCODE_AE_MULZSAFP24S_HL_LH,
+ OPCODE_AE_MULZSAP24S_HL_LH,
+ OPCODE_AE_MULZSSFP24S_HH_LL,
+ OPCODE_AE_MULZSSP24S_HH_LL,
+ OPCODE_AE_MULZSSFP24S_HL_LH,
+ OPCODE_AE_MULZSSP24S_HL_LH,
+ OPCODE_AE_MULAAFP24S_HH_LL,
+ OPCODE_AE_MULAAP24S_HH_LL,
+ OPCODE_AE_MULAAFP24S_HL_LH,
+ OPCODE_AE_MULAAP24S_HL_LH,
+ OPCODE_AE_MULASFP24S_HH_LL,
+ OPCODE_AE_MULASP24S_HH_LL,
+ OPCODE_AE_MULASFP24S_HL_LH,
+ OPCODE_AE_MULASP24S_HL_LH,
+ OPCODE_AE_MULSAFP24S_HH_LL,
+ OPCODE_AE_MULSAP24S_HH_LL,
+ OPCODE_AE_MULSAFP24S_HL_LH,
+ OPCODE_AE_MULSAP24S_HL_LH,
+ OPCODE_AE_MULSSFP24S_HH_LL,
+ OPCODE_AE_MULSSP24S_HH_LL,
+ OPCODE_AE_MULSSFP24S_HL_LH,
+ OPCODE_AE_MULSSP24S_HL_LH,
+ OPCODE_AE_SHA32,
+ OPCODE_AE_VLDL32T,
+ OPCODE_AE_VLDL16T,
+ OPCODE_AE_VLDL16C,
+ OPCODE_AE_VLDSHT,
+ OPCODE_AE_LB,
+ OPCODE_AE_LBI,
+ OPCODE_AE_LBK,
+ OPCODE_AE_LBKI,
+ OPCODE_AE_DB,
+ OPCODE_AE_DBI,
+ OPCODE_AE_VLEL32T,
+ OPCODE_AE_VLEL16T,
+ OPCODE_AE_SB,
+ OPCODE_AE_SBI,
+ OPCODE_AE_VLES16C,
+ OPCODE_AE_SBF,
+ OPCODE_AE_SLAASQ56S,
+ OPCODE_AE_ADDBRBA32,
+ OPCODE_AE_MINABSSP24S,
+ OPCODE_AE_MAXABSSP24S,
+ OPCODE_AE_MINABSSQ56S,
+ OPCODE_AE_MAXABSSQ56S,
+ OPCODE_RUR_AE_CBEGIN0,
+ OPCODE_WUR_AE_CBEGIN0,
+ OPCODE_RUR_AE_CEND0,
+ OPCODE_WUR_AE_CEND0,
+ OPCODE_AE_LP24X2_C,
+ OPCODE_AE_SP24X2S_C,
+ OPCODE_AE_LP24X2F_C,
+ OPCODE_AE_SP24X2F_C,
+ OPCODE_AE_LP16X2F_C,
+ OPCODE_AE_SP16X2F_C,
+ OPCODE_AE_LP24_C,
+ OPCODE_AE_SP24S_L_C,
+ OPCODE_AE_LP24F_C,
+ OPCODE_AE_SP24F_L_C,
+ OPCODE_AE_LP16F_C,
+ OPCODE_AE_SP16F_L_C,
+ OPCODE_AE_LQ56_C,
+ OPCODE_AE_SQ56S_C,
+ OPCODE_AE_LQ32F_C,
+ OPCODE_AE_SQ32F_C,
+ OPCODE_RUR_EXPSTATE,
+ OPCODE_WUR_EXPSTATE,
+ OPCODE_READ_IMPWIRE,
+ OPCODE_SETB_EXPSTATE,
+ OPCODE_CLRB_EXPSTATE,
+ OPCODE_WRMSK_EXPSTATE
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_RET;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_RETW;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_JX;
+ }
+ if (Field_m_Slot_inst_get (insn) == 3)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALLX0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALLX4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALLX8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALLX12;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_MOVSP;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_ISYNC;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RSYNC;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_ESYNC;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DSYNC;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_EXCW;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MEMW;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_EXTW;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_NOP;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 3)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_RFE;
+ if (Field_s_Slot_inst_get (insn) == 2)
+ return OPCODE_RFDE;
+ if (Field_s_Slot_inst_get (insn) == 4)
+ return OPCODE_RFWO;
+ if (Field_s_Slot_inst_get (insn) == 5)
+ return OPCODE_RFWU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFI;
+ }
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BREAK;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ if (Field_s_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SIMCALL;
+ }
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RSIL;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_LDDR32_P;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_SDDR32_P;
+ }
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_ANY4;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_ALL4;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_ANY8;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_ALL8;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OR;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_XOR;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RER;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_WER;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_NSA;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_NSAU;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_HWWITLBA;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_RITLB0;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_PITLB;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_WITLB;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_RITLB1;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_HWWDTLBA;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_RDTLB0;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_PDTLB;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_WDTLB;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_RDTLB1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_s_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ADD;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_ADDX2;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_ADDX4;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_ADDX8;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_SUB;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_SUBX2;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_SUBX4;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_SUBX8;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 1)
+ {
+ if ((Field_op2_Slot_inst_get (insn) == 0 ||
+ Field_op2_Slot_inst_get (insn) == 1))
+ return OPCODE_SLLI;
+ if ((Field_op2_Slot_inst_get (insn) == 2 ||
+ Field_op2_Slot_inst_get (insn) == 3))
+ return OPCODE_SRAI;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_SRLI;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_XSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_XSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_XSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_XSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_XSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_XSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_XSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_XSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_XSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_XSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_XSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_XSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_XSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 40)
+ return OPCODE_XSR_PREFCTL;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_XSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_XSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_XSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_XSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_XSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_XSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_XSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_XSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_XSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_XSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_XSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_XSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_XSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_XSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_XSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_XSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_XSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_XSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_XSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_XSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_XSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_XSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_XSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_XSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_XSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_XSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_XSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_XSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_XSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_XSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_XSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_XSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_XSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_XSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_XSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_XSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_XSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_XSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_XSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_XSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_XSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_XSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_XSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_XSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_XSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_XSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_XSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_XSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_XSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_XSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_XSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_XSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_SRC;
+ if (Field_op2_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_op2_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_op2_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MUL16U;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MUL16S;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_LICT;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_SICT;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_LICW;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_SICW;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LDCT;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_SDCT;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_LDPTE;
+ }
+ }
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_ANDB;
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_ANDBC;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_ORB;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_ORBC;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_XORB;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MULL;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MULUH;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MULSH;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_QUOU;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_QUOS;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_REMU;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_REMS;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_RSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_RSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_RSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_RSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_RSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_RSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_RSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_RSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_RSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_RSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_RSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_RSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_RSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 40)
+ return OPCODE_RSR_PREFCTL;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_RSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_RSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_RSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_RSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_RSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_RSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_RSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_RSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_RSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_RSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_RSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_RSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_RSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_RSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_RSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_RSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_RSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_RSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_RSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_RSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_RSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_RSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_RSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_RSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_RSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_RSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_RSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_RSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_RSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_RSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_RSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 208)
+ return OPCODE_RSR_CONFIGID1;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_RSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_RSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_RSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_RSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_RSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_RSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_RSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_RSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_RSR_INTERRUPT;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_RSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_RSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_RSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_RSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_RSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_RSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 235)
+ return OPCODE_RSR_PRID;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_RSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_RSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_RSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_RSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_RSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_RSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 243)
+ return OPCODE_RSR_243;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_RSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_RSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_WSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_WSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_WSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_WSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_WSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_WSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_WSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 16)
+ return OPCODE_WSR_ACCLO;
+ if (Field_sr_Slot_inst_get (insn) == 17)
+ return OPCODE_WSR_ACCHI;
+ if (Field_sr_Slot_inst_get (insn) == 32)
+ return OPCODE_WSR_M0;
+ if (Field_sr_Slot_inst_get (insn) == 33)
+ return OPCODE_WSR_M1;
+ if (Field_sr_Slot_inst_get (insn) == 34)
+ return OPCODE_WSR_M2;
+ if (Field_sr_Slot_inst_get (insn) == 35)
+ return OPCODE_WSR_M3;
+ if (Field_sr_Slot_inst_get (insn) == 40)
+ return OPCODE_WSR_PREFCTL;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_WSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_WSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_WSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 89)
+ return OPCODE_WSR_MMID;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_WSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_WSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_WSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 96)
+ return OPCODE_WSR_IBREAKENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 97)
+ return OPCODE_WSR_MEMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_WSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_WSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 128)
+ return OPCODE_WSR_IBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 129)
+ return OPCODE_WSR_IBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 144)
+ return OPCODE_WSR_DBREAKA0;
+ if (Field_sr_Slot_inst_get (insn) == 145)
+ return OPCODE_WSR_DBREAKA1;
+ if (Field_sr_Slot_inst_get (insn) == 160)
+ return OPCODE_WSR_DBREAKC0;
+ if (Field_sr_Slot_inst_get (insn) == 161)
+ return OPCODE_WSR_DBREAKC1;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_WSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_WSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_WSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 179)
+ return OPCODE_WSR_EPC3;
+ if (Field_sr_Slot_inst_get (insn) == 180)
+ return OPCODE_WSR_EPC4;
+ if (Field_sr_Slot_inst_get (insn) == 181)
+ return OPCODE_WSR_EPC5;
+ if (Field_sr_Slot_inst_get (insn) == 182)
+ return OPCODE_WSR_EPC6;
+ if (Field_sr_Slot_inst_get (insn) == 183)
+ return OPCODE_WSR_EPC7;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_WSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_WSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 195)
+ return OPCODE_WSR_EPS3;
+ if (Field_sr_Slot_inst_get (insn) == 196)
+ return OPCODE_WSR_EPS4;
+ if (Field_sr_Slot_inst_get (insn) == 197)
+ return OPCODE_WSR_EPS5;
+ if (Field_sr_Slot_inst_get (insn) == 198)
+ return OPCODE_WSR_EPS6;
+ if (Field_sr_Slot_inst_get (insn) == 199)
+ return OPCODE_WSR_EPS7;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_WSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_WSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 211)
+ return OPCODE_WSR_EXCSAVE3;
+ if (Field_sr_Slot_inst_get (insn) == 212)
+ return OPCODE_WSR_EXCSAVE4;
+ if (Field_sr_Slot_inst_get (insn) == 213)
+ return OPCODE_WSR_EXCSAVE5;
+ if (Field_sr_Slot_inst_get (insn) == 214)
+ return OPCODE_WSR_EXCSAVE6;
+ if (Field_sr_Slot_inst_get (insn) == 215)
+ return OPCODE_WSR_EXCSAVE7;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_WSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_WSR_INTSET;
+ if (Field_sr_Slot_inst_get (insn) == 227)
+ return OPCODE_WSR_INTCLEAR;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_WSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_WSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_WSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_WSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_WSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_WSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_WSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_WSR_CCOMPARE2;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_WSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_WSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_SEXT;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_CLAMPS;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MIN;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MAX;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MINU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_MAXU;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MOVEQZ;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_MOVNEZ;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVLTZ;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MOVGEZ;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MOVF;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MOVT;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ {
+ if (Field_st_Slot_inst_get (insn) == 230)
+ return OPCODE_RUR_EXPSTATE;
+ if (Field_st_Slot_inst_get (insn) == 231)
+ return OPCODE_RUR_THREADPTR;
+ if (Field_st_Slot_inst_get (insn) == 240)
+ return OPCODE_RUR_AE_OVF_SAR;
+ if (Field_st_Slot_inst_get (insn) == 241)
+ return OPCODE_RUR_AE_BITHEAD;
+ if (Field_st_Slot_inst_get (insn) == 242)
+ return OPCODE_RUR_AE_TS_FTS_BU_BP;
+ if (Field_st_Slot_inst_get (insn) == 243)
+ return OPCODE_RUR_AE_SD_NO;
+ if (Field_st_Slot_inst_get (insn) == 246)
+ return OPCODE_RUR_AE_CBEGIN0;
+ if (Field_st_Slot_inst_get (insn) == 247)
+ return OPCODE_RUR_AE_CEND0;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WUR_EXPSTATE;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WUR_THREADPTR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WUR_AE_OVF_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WUR_AE_BITHEAD;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_WUR_AE_TS_FTS_BU_BP;
+ if (Field_sr_Slot_inst_get (insn) == 243)
+ return OPCODE_WUR_AE_SD_NO;
+ if (Field_sr_Slot_inst_get (insn) == 246)
+ return OPCODE_WUR_AE_CBEGIN0;
+ if (Field_sr_Slot_inst_get (insn) == 247)
+ return OPCODE_WUR_AE_CEND0;
+ }
+ }
+ if ((Field_op1_Slot_inst_get (insn) == 4 ||
+ Field_op1_Slot_inst_get (insn) == 5))
+ return OPCODE_EXTUI;
+ if (Field_op1_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_L32E;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_S32E;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_S32NB;
+ }
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_READ_IMPWIRE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_SETB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s3to1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_CLRB_EXPSTATE;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14)
+ return OPCODE_WRMSK_EXPSTATE;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 1)
+ return OPCODE_L32R;
+ if (Field_op0_Slot_inst_get (insn) == 2)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_L32I;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_S16I;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_S32I;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFR;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_DPFW;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_DPFRO;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DPFWO;
+ if (Field_t_Slot_inst_get (insn) == 4)
+ return OPCODE_DHWB;
+ if (Field_t_Slot_inst_get (insn) == 5)
+ return OPCODE_DHWBI;
+ if (Field_t_Slot_inst_get (insn) == 6)
+ return OPCODE_DHI;
+ if (Field_t_Slot_inst_get (insn) == 7)
+ return OPCODE_DII;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFL;
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ return OPCODE_DHU;
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ return OPCODE_DIU;
+ if (Field_op1_Slot_inst_get (insn) == 4)
+ return OPCODE_DIWB;
+ if (Field_op1_Slot_inst_get (insn) == 5)
+ return OPCODE_DIWBI;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_DIWBUI_P;
+ }
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_IPF;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ return OPCODE_IPFL;
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ return OPCODE_IHU;
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ return OPCODE_IIU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_IHI;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_III;
+ }
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVI;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_L32AI;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_S32C1I;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_S32RI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 4)
+ {
+ if (Field_ae_r10_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_I;
+ if (Field_ae_r10_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_X;
+ if (Field_ae_r10_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_I;
+ if (Field_ae_r10_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_X;
+ if (Field_ae_r10_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_IU;
+ if (Field_ae_r10_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_XU;
+ if (Field_ae_r10_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_CVTQ48A32S;
+ if (Field_ae_r10_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_IU;
+ if (Field_ae_r10_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP24F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP24X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_ae_s3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVP48;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVPA24X2;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_CVTA32P24_L;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_LL;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_HL;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVAP24S_L;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_TRUNCA16P24S_L;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP16X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP24X2_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_TRUNCP24A32X2;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_CVTA32P24_H;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_LH;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_HH;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVAP24S_H;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_TRUNCA16P24S_H;
+ if (Field_ae_r32_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_I;
+ if (Field_ae_r32_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_X;
+ if (Field_ae_r32_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_TRUNCA32Q48;
+ if (Field_ae_r32_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_I;
+ if (Field_ae_r32_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_X;
+ if (Field_ae_r32_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_NSAQ56S;
+ if (Field_ae_r32_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_IU;
+ if (Field_ae_r32_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_XU;
+ if (Field_ae_r32_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_IU;
+ if (Field_ae_r32_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_XU;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLIQ56;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRLIQ56;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRAIQ56;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLISQ56S;
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SHA32;
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLDL32T;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SLLAQ56;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLDL16T;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SRLAQ56;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LBK;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SRAAQ56;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLEL32T;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SLLASQ56S;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLEL16T;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_MOVTQ56;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_MOVFQ56;
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH_LDDEC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DD_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AD_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t3_Slot_inst_get (insn) == 0 &&
+ Field_tlo_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AD_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDINC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH_LDDEC;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH_LDDEC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_DA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_DA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_w_Slot_inst_get (insn) == 0 &&
+ Field_r3_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_DA_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_UMUL_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MUL_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULA_AA_HH;
+ if (Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LL;
+ if (Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HL;
+ if (Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_LH;
+ if (Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_MULS_AA_HH;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDINC;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_rhi_Slot_inst_get (insn) == 0)
+ return OPCODE_LDDEC;
+ }
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_OVERFLOW;
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_SBI;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_SAR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_DB;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_SB;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_BITPTR;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_BITSUSED;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_TABLESIZE;
+ if (Field_r_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_FIRST_TS;
+ if (Field_r_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_NEXTOFFSET;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_SEARCHDONE;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_VLDSHT;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_VLES16C;
+ if (Field_r_Slot_inst_get (insn) == 13 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SBF;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_VLDL16C;
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLSQ56;
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LB;
+ if (Field_s_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRLSQ56;
+ if (Field_s_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRASQ56;
+ if (Field_s_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLSSQ56S;
+ if (Field_s_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVQ56;
+ if (Field_s_Slot_inst_get (insn) == 8 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_OVERFLOW;
+ if (Field_s_Slot_inst_get (insn) == 9 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_SAR;
+ if (Field_s_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_BITPTR;
+ if (Field_s_Slot_inst_get (insn) == 11 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_BITSUSED;
+ if (Field_s_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_TABLESIZE;
+ if (Field_s_Slot_inst_get (insn) == 13 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_FIRST_TS;
+ if (Field_s_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_NEXTOFFSET;
+ if (Field_s_Slot_inst_get (insn) == 15 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_SEARCHDONE;
+ if (Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_LBKI;
+ if (Field_t_Slot_inst_get (insn) == 0 &&
+ Field_r_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_DBI;
+ if (Field_t_Slot_inst_get (insn) == 2 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_LBI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 5)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALL0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALL4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALL8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALL12;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 6)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_J;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQZ;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTZ;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEZ;
+ }
+ if (Field_n_Slot_inst_get (insn) == 2)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQI;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEI;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEI;
+ }
+ if (Field_n_Slot_inst_get (insn) == 3)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_ENTRY;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BF;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BT;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LOOP;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_LOOPNEZ;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_LOOPGTZ;
+ }
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTUI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEUI;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 7)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BALL;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_BBC;
+ if ((Field_r_Slot_inst_get (insn) == 6 ||
+ Field_r_Slot_inst_get (insn) == 7))
+ return OPCODE_BBCI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_BANY;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_BNE;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_BGE;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_BNALL;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_BBS;
+ if ((Field_r_Slot_inst_get (insn) == 14 ||
+ Field_r_Slot_inst_get (insn) == 15))
+ return OPCODE_BBSI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16b_get (insn) == 12)
+ {
+ if (Field_i_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_i_Slot_inst16b_get (insn) == 1)
+ {
+ if (Field_z_Slot_inst16b_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_z_Slot_inst16b_get (insn) == 1)
+ return OPCODE_BNEZ_N;
+ }
+ }
+ if (Field_op0_Slot_inst16b_get (insn) == 13)
+ {
+ if (Field_r_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOV_N;
+ if (Field_r_Slot_inst16b_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst16b_get (insn) == 0)
+ return OPCODE_RET_N;
+ if (Field_t_Slot_inst16b_get (insn) == 1)
+ return OPCODE_RETW_N;
+ if (Field_t_Slot_inst16b_get (insn) == 2)
+ return OPCODE_BREAK_N;
+ if (Field_t_Slot_inst16b_get (insn) == 3 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ if (Field_t_Slot_inst16b_get (insn) == 6 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ }
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16a_get (insn) == 8)
+ return OPCODE_L32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 9)
+ return OPCODE_S32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 10)
+ return OPCODE_ADD_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 11)
+ return OPCODE_ADDI_N;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_ae_slot0_decode (const xtensa_insnbuf insn)
+{
+ if (Field_ae_s20_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3 &&
+ Field_ftsf378ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_BT;
+ if (Field_combined1e9fefee_fld106ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined1e9fefee_fld96_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld98_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 0 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_s4_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SELP24_HH;
+ if (Field_combined1e9fefee_fld107ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld96_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld98_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 0 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_s4_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SELP24_HL;
+ if (Field_combined1e9fefee_fld108ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld96_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld98_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 0 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_s4_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld47_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SELP24_LH;
+ if (Field_combined2c0b5f72_fld123_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld121_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf363ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_ftsf315_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 2 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLAASQ56S;
+ if (Field_combined2c0b5f72_fld133ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_C;
+ if (Field_combined2c0b5f72_fld134ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_C;
+ if (Field_combined2c0b5f72_fld135ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_C;
+ if (Field_combined2c0b5f72_fld136ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_C;
+ if (Field_combined2c0b5f72_fld137ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_C;
+ if (Field_combined2c0b5f72_fld138ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_C;
+ if (Field_combined2c0b5f72_fld139ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_C;
+ if (Field_combined2c0b5f72_fld140ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_C;
+ if (Field_combined2c0b5f72_fld141ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_C;
+ if (Field_combined2c0b5f72_fld142ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_C;
+ if (Field_combined2c0b5f72_fld143ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_C;
+ if (Field_combined2c0b5f72_fld144ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_C;
+ if (Field_combined2c0b5f72_fld145ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld46_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SQ32F_C;
+ if (Field_combined2c0b5f72_fld146ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld46_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SQ56S_C;
+ if (Field_combined2c0b5f72_fld52_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld96_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined1e9fefee_fld98_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 0 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_s4_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined1e9fefee_fld109ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SELP24_LL;
+ if (Field_ftsf211ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_J;
+ if (Field_ftsf212ae_slot0_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_EXTUI;
+ if (Field_ftsf213ae_slot0_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BEQZ;
+ if (Field_ftsf213ae_slot0_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_ftsf213ae_slot0_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BGEZ;
+ if (Field_ftsf213ae_slot0_Slot_ae_slot0_get (insn) == 6 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVI;
+ if (Field_ftsf213ae_slot0_Slot_ae_slot0_get (insn) == 13 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BLTZ;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 7 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRAI;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 10 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SLLI;
+ if (Field_ftsf215ae_slot0_Slot_ae_slot0_get (insn) == 43 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_HH;
+ if (Field_ftsf217ae_slot0_Slot_ae_slot0_get (insn) == 299 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_I;
+ if (Field_ftsf218ae_slot0_Slot_ae_slot0_get (insn) == 46 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_HL;
+ if (Field_ftsf219ae_slot0_Slot_ae_slot0_get (insn) == 47 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_IU;
+ if (Field_ftsf220ae_slot0_Slot_ae_slot0_get (insn) == 302 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_X;
+ if (Field_ftsf221ae_slot0_Slot_ae_slot0_get (insn) == 303 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_XU;
+ if (Field_ftsf222ae_slot0_Slot_ae_slot0_get (insn) == 58 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_LH;
+ if (Field_ftsf223ae_slot0_Slot_ae_slot0_get (insn) == 59 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_I;
+ if (Field_ftsf224ae_slot0_Slot_ae_slot0_get (insn) == 62 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_IU;
+ if (Field_ftsf225ae_slot0_Slot_ae_slot0_get (insn) == 63 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_XU;
+ if (Field_ftsf226ae_slot0_Slot_ae_slot0_get (insn) == 314 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_X;
+ if (Field_ftsf227ae_slot0_Slot_ae_slot0_get (insn) == 315 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_I;
+ if (Field_ftsf228ae_slot0_Slot_ae_slot0_get (insn) == 318 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_IU;
+ if (Field_ftsf229ae_slot0_Slot_ae_slot0_get (insn) == 319 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_X;
+ if (Field_ftsf230ae_slot0_Slot_ae_slot0_get (insn) == 170 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_LL;
+ if (Field_ftsf231ae_slot0_Slot_ae_slot0_get (insn) == 171 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_XU;
+ if (Field_ftsf232ae_slot0_Slot_ae_slot0_get (insn) == 174 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_I;
+ if (Field_ftsf233ae_slot0_Slot_ae_slot0_get (insn) == 175 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_XU;
+ if (Field_ftsf234ae_slot0_Slot_ae_slot0_get (insn) == 186 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_IU;
+ if (Field_ftsf235ae_slot0_Slot_ae_slot0_get (insn) == 187 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_I;
+ if (Field_ftsf236ae_slot0_Slot_ae_slot0_get (insn) == 190 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_IU;
+ if (Field_ftsf237ae_slot0_Slot_ae_slot0_get (insn) == 191 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_X;
+ if (Field_ftsf238ae_slot0_Slot_ae_slot0_get (insn) == 426 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_X;
+ if (Field_ftsf239ae_slot0_Slot_ae_slot0_get (insn) == 427 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_XU;
+ if (Field_ftsf240ae_slot0_Slot_ae_slot0_get (insn) == 430 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_I;
+ if (Field_ftsf241ae_slot0_Slot_ae_slot0_get (insn) == 431 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_X;
+ if (Field_ftsf242ae_slot0_Slot_ae_slot0_get (insn) == 442 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_IU;
+ if (Field_ftsf243ae_slot0_Slot_ae_slot0_get (insn) == 443 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_XU;
+ if (Field_ftsf244ae_slot0_Slot_ae_slot0_get (insn) == 446 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVPA24X2;
+ if (Field_ftsf245ae_slot0_Slot_ae_slot0_get (insn) == 447 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_I;
+ if (Field_ftsf246ae_slot0_Slot_ae_slot0_get (insn) == 75 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_IU;
+ if (Field_ftsf247ae_slot0_Slot_ae_slot0_get (insn) == 331 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_X;
+ if (Field_ftsf248ae_slot0_Slot_ae_slot0_get (insn) == 78 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_X;
+ if (Field_ftsf249ae_slot0_Slot_ae_slot0_get (insn) == 79 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_XU;
+ if (Field_ftsf250ae_slot0_Slot_ae_slot0_get (insn) == 334 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_I;
+ if (Field_ftsf251ae_slot0_Slot_ae_slot0_get (insn) == 335 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_IU;
+ if (Field_ftsf252ae_slot0_Slot_ae_slot0_get (insn) == 90 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_XU;
+ if (Field_ftsf253ae_slot0_Slot_ae_slot0_get (insn) == 91 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_X;
+ if (Field_ftsf254ae_slot0_Slot_ae_slot0_get (insn) == 94 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_XU;
+ if (Field_ftsf255ae_slot0_Slot_ae_slot0_get (insn) == 95 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_IU;
+ if (Field_ftsf256ae_slot0_Slot_ae_slot0_get (insn) == 346 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_I;
+ if (Field_ftsf257ae_slot0_Slot_ae_slot0_get (insn) == 347 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_X;
+ if (Field_ftsf258ae_slot0_Slot_ae_slot0_get (insn) == 350 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_XU;
+ if (Field_ftsf259ae_slot0_Slot_ae_slot0_get (insn) == 351 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_I;
+ if (Field_ftsf260ae_slot0_Slot_ae_slot0_get (insn) == 106 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_I;
+ if (Field_ftsf261ae_slot0_Slot_ae_slot0_get (insn) == 107 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_IU;
+ if (Field_ftsf262ae_slot0_Slot_ae_slot0_get (insn) == 110 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_X;
+ if (Field_ftsf263ae_slot0_Slot_ae_slot0_get (insn) == 111 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_IU;
+ if (Field_ftsf264ae_slot0_Slot_ae_slot0_get (insn) == 122 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_XU;
+ if (Field_ftsf265ae_slot0_Slot_ae_slot0_get (insn) == 123 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_X;
+ if (Field_ftsf266ae_slot0_Slot_ae_slot0_get (insn) == 126 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_XU;
+ if (Field_ftsf267ae_slot0_Slot_ae_slot0_get (insn) == 127 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_TRUNCP24A32X2;
+ if (Field_ftsf268ae_slot0_Slot_ae_slot0_get (insn) == 362 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_I;
+ if (Field_ftsf269ae_slot0_Slot_ae_slot0_get (insn) == 363 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ32F_I;
+ if (Field_ftsf270ae_slot0_Slot_ae_slot0_get (insn) == 875 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ32F_XU;
+ if (Field_ftsf271ae_slot0_Slot_ae_slot0_get (insn) == 366 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ32F_IU;
+ if (Field_ftsf272ae_slot0_Slot_ae_slot0_get (insn) == 367 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_I;
+ if (Field_ftsf273ae_slot0_Slot_ae_slot0_get (insn) == 878 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_IU;
+ if (Field_ftsf274ae_slot0_Slot_ae_slot0_get (insn) == 879 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_X;
+ if (Field_ftsf275ae_slot0_Slot_ae_slot0_get (insn) == 378 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ32F_X;
+ if (Field_ftsf276ae_slot0_Slot_ae_slot0_get (insn) == 379 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_XU;
+ if (Field_ftsf277ae_slot0_Slot_ae_slot0_get (insn) == 890 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_NSAQ56S;
+ if (Field_ftsf278ae_slot0_Slot_ae_slot0_get (insn) == 891 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_TRUNCA32Q48;
+ if (Field_ftsf279ae_slot0_Slot_ae_slot0_get (insn) == 2938 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s8_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_JX;
+ if (Field_ftsf280_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBCI;
+ if (Field_ftsf280_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBSI;
+ if (Field_ftsf281ae_slot0_Slot_ae_slot0_get (insn) == 2939 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s8_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_ftsf282ae_slot0_Slot_ae_slot0_get (insn) == 1981 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf361ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_ftsf284ae_slot0_Slot_ae_slot0_get (insn) == 957 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf364ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_ftsf285ae_slot0_Slot_ae_slot0_get (insn) == 957 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf366ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_ftsf287ae_slot0_Slot_ae_slot0_get (insn) == 957 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf368ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_ftsf288_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3 &&
+ Field_ftsf360ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_BLTUI;
+ if (Field_ftsf289ae_slot0_Slot_ae_slot0_get (insn) == 382 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTA32P24_H;
+ if (Field_ftsf290ae_slot0_Slot_ae_slot0_get (insn) == 383 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTA32P24_L;
+ if (Field_ftsf291ae_slot0_Slot_ae_slot0_get (insn) == 447 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVAP24S_H;
+ if (Field_ftsf292ae_slot0_Slot_ae_slot0_get (insn) == 447 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf382ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVAP24S_L;
+ if (Field_ftsf293ae_slot0_Slot_ae_slot0_get (insn) == 447 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf384ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_TRUNCA16P24S_H;
+ if (Field_ftsf294ae_slot0_Slot_ae_slot0_get (insn) == 447 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf383ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_TRUNCA16P24S_L;
+ if (Field_ftsf295ae_slot0_Slot_ae_slot0_get (insn) == 202 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_IU;
+ if (Field_ftsf296ae_slot0_Slot_ae_slot0_get (insn) == 4298 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf315_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVP48;
+ if (Field_ftsf297ae_slot0_Slot_ae_slot0_get (insn) == 4554 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ALL4;
+ if (Field_ftsf297ae_slot0_Slot_ae_slot0_get (insn) == 12746 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf309_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_ftsf298ae_slot0_Slot_ae_slot0_get (insn) == 2506 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf373ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ANY4;
+ if (Field_ftsf300ae_slot0_Slot_ae_slot0_get (insn) == 1482 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf370ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ALL8;
+ if (Field_ftsf302ae_slot0_Slot_ae_slot0_get (insn) == 970 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf376ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ANY8;
+ if (Field_ftsf304ae_slot0_Slot_ae_slot0_get (insn) == 203 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ32F_I;
+ if (Field_ftsf305ae_slot0_Slot_ae_slot0_get (insn) == 459 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ56_I;
+ if (Field_ftsf306ae_slot0_Slot_ae_slot0_get (insn) == 715 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ56_IU;
+ if (Field_ftsf307ae_slot0_Slot_ae_slot0_get (insn) == 971 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ56_X;
+ if (Field_ftsf308ae_slot0_Slot_ae_slot0_get (insn) == 206 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s8_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_LQ32F_IU;
+ if (Field_ftsf309_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s8_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_LQ56_C;
+ if (Field_ftsf310ae_slot0_Slot_ae_slot0_get (insn) == 207 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s8_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_LQ56_XU;
+ if (Field_ftsf311ae_slot0_Slot_ae_slot0_get (insn) == 231 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf386ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_CVTQ48A32S;
+ if (Field_ftsf312ae_slot0_Slot_ae_slot0_get (insn) == 219 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLIQ56;
+ if (Field_ftsf312ae_slot0_Slot_ae_slot0_get (insn) == 222 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLISQ56S;
+ if (Field_ftsf312ae_slot0_Slot_ae_slot0_get (insn) == 475 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SRAIQ56;
+ if (Field_ftsf312ae_slot0_Slot_ae_slot0_get (insn) == 731 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SRLIQ56;
+ if (Field_ftsf313ae_slot0_Slot_ae_slot0_get (insn) == 987 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ABS;
+ if (Field_ftsf313ae_slot0_Slot_ae_slot0_get (insn) == 2011 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_NEG;
+ if (Field_ftsf313ae_slot0_Slot_ae_slot0_get (insn) == 3035 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRA;
+ if (Field_ftsf313ae_slot0_Slot_ae_slot0_get (insn) == 4059 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRL;
+ if (Field_ftsf314ae_slot0_Slot_ae_slot0_get (insn) == 478 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVQ56;
+ if (Field_ftsf316ae_slot0_Slot_ae_slot0_get (insn) == 1502 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SLLSSQ56S;
+ if (Field_ftsf317ae_slot0_Slot_ae_slot0_get (insn) == 1502 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf389ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRASQ56;
+ if (Field_ftsf319ae_slot0_Slot_ae_slot0_get (insn) == 1502 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf388ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRLSQ56;
+ if (Field_ftsf320ae_slot0_Slot_ae_slot0_get (insn) == 478 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf387ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SLLSQ56;
+ if (Field_ftsf322ae_slot0_Slot_ae_slot0_get (insn) == 223 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVFQ56;
+ if (Field_ftsf323ae_slot0_Slot_ae_slot0_get (insn) == 479 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVTQ56;
+ if (Field_ftsf324ae_slot0_Slot_ae_slot0_get (insn) == 735 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLAQ56;
+ if (Field_ftsf325ae_slot0_Slot_ae_slot0_get (insn) == 991 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SRLAQ56;
+ if (Field_ftsf326ae_slot0_Slot_ae_slot0_get (insn) == 735 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf363ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SLLASQ56S;
+ if (Field_ftsf327ae_slot0_Slot_ae_slot0_get (insn) == 991 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf363ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_ftsf328ae_slot0_Slot_ae_slot0_get (insn) == 479 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf360ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRAAQ56;
+ if (Field_ftsf329ae_slot0_Slot_ae_slot0_get (insn) == 31 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf379ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_LQ32F_XU;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 11 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVGEZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 14 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVLTZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 15 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ORBC;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 23 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADD;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 26 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVNEZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 27 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRLI;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 30 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 31 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUBX4;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 39 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADDX2;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 42 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVT;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 55 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 71 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADDX4;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 74 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_OR;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 87 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_CLAMPS;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 103 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MAX;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 119 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MIN;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 139 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ORB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 142 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SEXT;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 143 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRC;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 151 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADDX8;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 154 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUBX2;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 155 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUBX8;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 158 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_XOR;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 159 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_XORB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 167 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ANDB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 183 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ANDBC;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 199 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MAXU;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 215 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MINU;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 218 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ae_r10_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_LQ32F_X;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 231 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVEQZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 247 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVF;
+ if (Field_op0_s4_Slot_ae_slot0_get (insn) == 5)
+ return OPCODE_L32R;
+ if (Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld149ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_ADDBRBA32;
+ if (Field_r_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_BNE;
+ if (Field_r_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BALL;
+ if (Field_r_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 4 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 4 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBS;
+ if (Field_r_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_S16I;
+ if (Field_r_Slot_ae_slot0_get (insn) == 6 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_ae_slot0_get (insn) == 6 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_S32I;
+ if (Field_r_Slot_ae_slot0_get (insn) == 7 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_ae_slot0_get (insn) == 7 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_ae_slot0_get (insn) == 10 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BANY;
+ if (Field_r_Slot_ae_slot0_get (insn) == 11 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBC;
+ if (Field_r_Slot_ae_slot0_get (insn) == 12 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BGE;
+ if (Field_r_Slot_ae_slot0_get (insn) == 13 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_ae_slot0_get (insn) == 14 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_ae_slot0_get (insn) == 15 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BNALL;
+ if (Field_s8_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4 &&
+ Field_ftsf280_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_L32I;
+ if (Field_s8_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf309_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get (insn) == 3 &&
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ32F_C;
+ if (Field_t_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BEQI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BGEI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BGEUI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BNEI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 4 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BLTI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3 &&
+ Field_r_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_BF;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_ae_slot1_decode (const xtensa_insnbuf insn)
+{
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 50 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP24S_L;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 82 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULARFQ32SP24S_L;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 114 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP24S_L;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 146 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULRFQ32SP24S_L;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 178 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP24S_L;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 210 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSRFQ32SP24S_L;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 274 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULRFQ32SP24S_H;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 306 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP24S_H;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 338 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSRFQ32SP24S_H;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 402 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULARFQ32SP24S_H;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 434 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP24S_H;
+ if (Field_ae_mul32x24fld_Slot_ae_slot1_get (insn) == 466 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP24S_H;
+ if (Field_ae_r20_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf343ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_LTQ56S;
+ if (Field_ae_s20_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf340ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULZAAP24S_HL_LH;
+ if (Field_combined2c0b5f72_fld131ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld69_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf91_Slot_ae_slot1_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld68_Slot_ae_slot1_get (insn) == 2 &&
+ Field_combined2c0b5f72_fld19_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld22_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MAXABSSP24S;
+ if (Field_combined2c0b5f72_fld132ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld69_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf91_Slot_ae_slot1_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld68_Slot_ae_slot1_get (insn) == 2 &&
+ Field_combined2c0b5f72_fld19_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld22_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MINABSSP24S;
+ if (Field_combined2c0b5f72_fld74_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf354ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld66_Slot_ae_slot1_get (insn) == 0 &&
+ Field_ftsf127ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld91_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld90_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld88_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld65_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld24_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_combined2c0b5f72_fld147ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MINABSSQ56S;
+ if (Field_combined2c0b5f72_fld79_Slot_ae_slot1_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld74_Slot_ae_slot1_get (insn) == 0 &&
+ Field_ftsf354ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld66_Slot_ae_slot1_get (insn) == 0 &&
+ Field_ftsf127ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_combined2c0b5f72_fld91_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld90_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld88_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld65_Slot_ae_slot1_get (insn) == 0 &&
+ Field_combined2c0b5f72_fld24_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MAXABSSQ56S;
+ if (Field_ftsf101ae_slot1_Slot_ae_slot1_get (insn) == 43 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf335_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULSQ32SP16U_H;
+ if (Field_ftsf102ae_slot1_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf336ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULSFQ32SP16U_H;
+ if (Field_ftsf103ae_slot1_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf337ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULAFQ32SP16S_H;
+ if (Field_ftsf106ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MAXBQ56S;
+ if (Field_ftsf107ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MINBQ56S;
+ if (Field_ftsf108ae_slot1_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ae_r32_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_EQQ56;
+ if (Field_ftsf109ae_slot1_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ADDSQ56S;
+ if (Field_ftsf110ae_slot1_Slot_ae_slot1_get (insn) == 67 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ANDQ56;
+ if (Field_ftsf111ae_slot1_Slot_ae_slot1_get (insn) == 131 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MAXQ56S;
+ if (Field_ftsf112ae_slot1_Slot_ae_slot1_get (insn) == 195 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ORQ56;
+ if (Field_ftsf113ae_slot1_Slot_ae_slot1_get (insn) == 259 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MINQ56S;
+ if (Field_ftsf114ae_slot1_Slot_ae_slot1_get (insn) == 323 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_SUBQ56;
+ if (Field_ftsf115ae_slot1_Slot_ae_slot1_get (insn) == 387 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_SUBSQ56S;
+ if (Field_ftsf116ae_slot1_Slot_ae_slot1_get (insn) == 451 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_XORQ56;
+ if (Field_ftsf117ae_slot1_Slot_ae_slot1_get (insn) == 515 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_NANDQ56;
+ if (Field_ftsf118ae_slot1_Slot_ae_slot1_get (insn) == 2307 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ABSQ56;
+ if (Field_ftsf120ae_slot1_Slot_ae_slot1_get (insn) == 2315 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_NEGSQ56S;
+ if (Field_ftsf121ae_slot1_Slot_ae_slot1_get (insn) == 1163 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf91_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_SATQ48S;
+ if (Field_ftsf123ae_slot1_Slot_ae_slot1_get (insn) == 323 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf349ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ABSSQ56S;
+ if (Field_ftsf124ae_slot1_Slot_ae_slot1_get (insn) == 195 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf352ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_NEGQ56;
+ if (Field_ftsf125ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf342ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_LEQ56S;
+ if (Field_ftsf126ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf357ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_TRUNCP24Q48X2;
+ if (Field_ftsf127ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf350ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ADDQ56;
+ if (Field_ftsf128ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAFP24S_HH_LL;
+ if (Field_ftsf129ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAFP24S_HL_LH;
+ if (Field_ftsf130ae_slot1_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAP24S_HH_LL;
+ if (Field_ftsf131ae_slot1_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS32P16S_HL;
+ if (Field_ftsf132ae_slot1_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAP24S_HL_LH;
+ if (Field_ftsf133ae_slot1_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS32P16S_LH;
+ if (Field_ftsf134ae_slot1_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS32P16S_LL;
+ if (Field_ftsf135ae_slot1_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_HH;
+ if (Field_ftsf136ae_slot1_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_HH;
+ if (Field_ftsf137ae_slot1_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_HL;
+ if (Field_ftsf138ae_slot1_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_LH;
+ if (Field_ftsf139ae_slot1_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_HH;
+ if (Field_ftsf140ae_slot1_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_LL;
+ if (Field_ftsf141ae_slot1_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_HL;
+ if (Field_ftsf142ae_slot1_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_LH;
+ if (Field_ftsf143ae_slot1_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_LL;
+ if (Field_ftsf144ae_slot1_Slot_ae_slot1_get (insn) == 16 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_HL;
+ if (Field_ftsf145ae_slot1_Slot_ae_slot1_get (insn) == 17 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_HH;
+ if (Field_ftsf146ae_slot1_Slot_ae_slot1_get (insn) == 18 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_HL;
+ if (Field_ftsf147ae_slot1_Slot_ae_slot1_get (insn) == 19 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASFP24S_HH_LL;
+ if (Field_ftsf148ae_slot1_Slot_ae_slot1_get (insn) == 20 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_LH;
+ if (Field_ftsf149ae_slot1_Slot_ae_slot1_get (insn) == 21 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASFP24S_HL_LH;
+ if (Field_ftsf150ae_slot1_Slot_ae_slot1_get (insn) == 22 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASP24S_HH_LL;
+ if (Field_ftsf151ae_slot1_Slot_ae_slot1_get (insn) == 23 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASP24S_HL_LH;
+ if (Field_ftsf152ae_slot1_Slot_ae_slot1_get (insn) == 24 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_LL;
+ if (Field_ftsf153ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_HH;
+ if (Field_ftsf154ae_slot1_Slot_ae_slot1_get (insn) == 26 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_HL;
+ if (Field_ftsf155ae_slot1_Slot_ae_slot1_get (insn) == 27 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_LL;
+ if (Field_ftsf156ae_slot1_Slot_ae_slot1_get (insn) == 28 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_LH;
+ if (Field_ftsf157ae_slot1_Slot_ae_slot1_get (insn) == 29 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_HH;
+ if (Field_ftsf158ae_slot1_Slot_ae_slot1_get (insn) == 30 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_HL;
+ if (Field_ftsf159ae_slot1_Slot_ae_slot1_get (insn) == 31 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_LH;
+ if (Field_ftsf160ae_slot1_Slot_ae_slot1_get (insn) == 32 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_LH;
+ if (Field_ftsf161ae_slot1_Slot_ae_slot1_get (insn) == 33 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_LL;
+ if (Field_ftsf162ae_slot1_Slot_ae_slot1_get (insn) == 34 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_HH;
+ if (Field_ftsf163ae_slot1_Slot_ae_slot1_get (insn) == 35 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAFP24S_HH_LL;
+ if (Field_ftsf164ae_slot1_Slot_ae_slot1_get (insn) == 36 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_HL;
+ if (Field_ftsf165ae_slot1_Slot_ae_slot1_get (insn) == 37 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAFP24S_HL_LH;
+ if (Field_ftsf166ae_slot1_Slot_ae_slot1_get (insn) == 38 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAP24S_HH_LL;
+ if (Field_ftsf167ae_slot1_Slot_ae_slot1_get (insn) == 39 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAP24S_HL_LH;
+ if (Field_ftsf168ae_slot1_Slot_ae_slot1_get (insn) == 40 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_LH;
+ if (Field_ftsf169ae_slot1_Slot_ae_slot1_get (insn) == 41 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_HH;
+ if (Field_ftsf170ae_slot1_Slot_ae_slot1_get (insn) == 42 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_HL;
+ if (Field_ftsf171ae_slot1_Slot_ae_slot1_get (insn) == 43 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_LL;
+ if (Field_ftsf172ae_slot1_Slot_ae_slot1_get (insn) == 44 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_LH;
+ if (Field_ftsf173ae_slot1_Slot_ae_slot1_get (insn) == 45 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_HH;
+ if (Field_ftsf174ae_slot1_Slot_ae_slot1_get (insn) == 46 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_HL;
+ if (Field_ftsf175ae_slot1_Slot_ae_slot1_get (insn) == 47 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_LH;
+ if (Field_ftsf176ae_slot1_Slot_ae_slot1_get (insn) == 48 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_LL;
+ if (Field_ftsf177ae_slot1_Slot_ae_slot1_get (insn) == 49 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_LL;
+ if (Field_ftsf178ae_slot1_Slot_ae_slot1_get (insn) == 50 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_HH;
+ if (Field_ftsf179ae_slot1_Slot_ae_slot1_get (insn) == 51 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_LL;
+ if (Field_ftsf180ae_slot1_Slot_ae_slot1_get (insn) == 52 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_HL;
+ if (Field_ftsf181ae_slot1_Slot_ae_slot1_get (insn) == 53 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_HH;
+ if (Field_ftsf182ae_slot1_Slot_ae_slot1_get (insn) == 54 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_HL;
+ if (Field_ftsf183ae_slot1_Slot_ae_slot1_get (insn) == 55 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_LH;
+ if (Field_ftsf184ae_slot1_Slot_ae_slot1_get (insn) == 56 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_LH;
+ if (Field_ftsf185ae_slot1_Slot_ae_slot1_get (insn) == 57 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_LL;
+ if (Field_ftsf186ae_slot1_Slot_ae_slot1_get (insn) == 58 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_HH;
+ if (Field_ftsf187ae_slot1_Slot_ae_slot1_get (insn) == 59 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_LH;
+ if (Field_ftsf188ae_slot1_Slot_ae_slot1_get (insn) == 60 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_HL;
+ if (Field_ftsf189ae_slot1_Slot_ae_slot1_get (insn) == 61 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_LL;
+ if (Field_ftsf190ae_slot1_Slot_ae_slot1_get (insn) == 62 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSFP24S_HH_LL;
+ if (Field_ftsf191ae_slot1_Slot_ae_slot1_get (insn) == 63 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSFP24S_HL_LH;
+ if (Field_ftsf192ae_slot1_Slot_ae_slot1_get (insn) == 64 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_LL;
+ if (Field_ftsf193ae_slot1_Slot_ae_slot1_get (insn) == 65 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSP24S_HH_LL;
+ if (Field_ftsf194ae_slot1_Slot_ae_slot1_get (insn) == 66 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSP24S_HL_LH;
+ if (Field_ftsf195ae_slot1_Slot_ae_slot1_get (insn) == 67 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASFP24S_HH_LL;
+ if (Field_ftsf196ae_slot1_Slot_ae_slot1_get (insn) == 68 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZAAFP24S_HH_LL;
+ if (Field_ftsf197ae_slot1_Slot_ae_slot1_get (insn) == 69 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASFP24S_HL_LH;
+ if (Field_ftsf198ae_slot1_Slot_ae_slot1_get (insn) == 70 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASP24S_HH_LL;
+ if (Field_ftsf199ae_slot1_Slot_ae_slot1_get (insn) == 71 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASP24S_HL_LH;
+ if (Field_ftsf200ae_slot1_Slot_ae_slot1_get (insn) == 72 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZAAFP24S_HL_LH;
+ if (Field_ftsf201ae_slot1_Slot_ae_slot1_get (insn) == 73 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAFP24S_HH_LL;
+ if (Field_ftsf202ae_slot1_Slot_ae_slot1_get (insn) == 74 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAFP24S_HL_LH;
+ if (Field_ftsf203ae_slot1_Slot_ae_slot1_get (insn) == 75 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAP24S_HL_LH;
+ if (Field_ftsf204ae_slot1_Slot_ae_slot1_get (insn) == 76 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAP24S_HH_LL;
+ if (Field_ftsf205ae_slot1_Slot_ae_slot1_get (insn) == 77 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSSFP24S_HH_LL;
+ if (Field_ftsf206ae_slot1_Slot_ae_slot1_get (insn) == 78 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSSFP24S_HL_LH;
+ if (Field_ftsf207ae_slot1_Slot_ae_slot1_get (insn) == 79 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSSP24S_HH_LL;
+ if (Field_ftsf208ae_slot1_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf341ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULZAAP24S_HH_LL;
+ if (Field_ftsf20ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MAXBP24S;
+ if (Field_ftsf210ae_slot1_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf339ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULZSSP24S_HL_LH;
+ if (Field_ftsf21ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MINBP24S;
+ if (Field_ftsf22ae_slot1_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVFP48;
+ if (Field_ftsf23ae_slot1_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ADDP24;
+ if (Field_ftsf24ae_slot1_Slot_ae_slot1_get (insn) == 69 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MAXP24S;
+ if (Field_ftsf25ae_slot1_Slot_ae_slot1_get (insn) == 20 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ADDSP24S;
+ if (Field_ftsf26ae_slot1_Slot_ae_slot1_get (insn) == 21 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MINP24S;
+ if (Field_ftsf27ae_slot1_Slot_ae_slot1_get (insn) == 84 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_NANDP48;
+ if (Field_ftsf28ae_slot1_Slot_ae_slot1_get (insn) == 85 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ORP48;
+ if (Field_ftsf29ae_slot1_Slot_ae_slot1_get (insn) == 36 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ANDP48;
+ if (Field_ftsf30ae_slot1_Slot_ae_slot1_get (insn) == 37 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_HH;
+ if (Field_ftsf31ae_slot1_Slot_ae_slot1_get (insn) == 52 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_HL;
+ if (Field_ftsf32ae_slot1_Slot_ae_slot1_get (insn) == 53 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_LL;
+ if (Field_ftsf33ae_slot1_Slot_ae_slot1_get (insn) == 100 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_LH;
+ if (Field_ftsf34ae_slot1_Slot_ae_slot1_get (insn) == 101 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SUBP24;
+ if (Field_ftsf35ae_slot1_Slot_ae_slot1_get (insn) == 116 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SUBSP24S;
+ if (Field_ftsf36ae_slot1_Slot_ae_slot1_get (insn) == 117 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_XORP48;
+ if (Field_ftsf37ae_slot1_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVTP48;
+ if (Field_ftsf38ae_slot1_Slot_ae_slot1_get (insn) == 24 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ABSP24;
+ if (Field_ftsf40ae_slot1_Slot_ae_slot1_get (insn) == 88 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_NEGP24;
+ if (Field_ftsf41ae_slot1_Slot_ae_slot1_get (insn) == 152 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_NEGSP24S;
+ if (Field_ftsf42ae_slot1_Slot_ae_slot1_get (insn) == 216 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf333ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_ftsf43ae_slot1_Slot_ae_slot1_get (insn) == 88 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf359ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ZEROP48;
+ if (Field_ftsf45ae_slot1_Slot_ae_slot1_get (insn) == 704 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVP48;
+ if (Field_ftsf47ae_slot1_Slot_ae_slot1_get (insn) == 708 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSP16ASYM;
+ if (Field_ftsf48ae_slot1_Slot_ae_slot1_get (insn) == 712 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSP16SYM;
+ if (Field_ftsf49ae_slot1_Slot_ae_slot1_get (insn) == 716 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SLLSSP24S;
+ if (Field_ftsf50ae_slot1_Slot_ae_slot1_get (insn) == 720 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SLLSP24;
+ if (Field_ftsf51ae_slot1_Slot_ae_slot1_get (insn) == 724 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRASP24;
+ if (Field_ftsf52ae_slot1_Slot_ae_slot1_get (insn) == 728 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRLSP24;
+ if (Field_ftsf53ae_slot1_Slot_ae_slot1_get (insn) == 732 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_TRUNCP16;
+ if (Field_ftsf54ae_slot1_Slot_ae_slot1_get (insn) == 24 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf356ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ABSSP24S;
+ if (Field_ftsf56ae_slot1_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_EQP24;
+ if (Field_ftsf57ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_LTP24S;
+ if (Field_ftsf58ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf354ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MOVFP24X2;
+ if (Field_ftsf60ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf355ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MOVTP24X2;
+ if (Field_ftsf61_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf334ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULAFS32P16S_HH;
+ if (Field_ftsf62ae_slot1_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_LEP24S;
+ if (Field_ftsf63ae_slot1_Slot_ae_slot1_get (insn) == 97 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf344ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSP16Q48ASYM;
+ if (Field_ftsf63ae_slot1_Slot_ae_slot1_get (insn) == 101 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf344ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSP16Q48SYM;
+ if (Field_ftsf64ae_slot1_Slot_ae_slot1_get (insn) == 53 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf345ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSP24Q48ASYM;
+ if (Field_ftsf66ae_slot1_Slot_ae_slot1_get (insn) == 29 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf347ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSP24Q48SYM;
+ if (Field_ftsf68ae_slot1_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SLLIP24;
+ if (Field_ftsf68ae_slot1_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SLLISP24S;
+ if (Field_ftsf68ae_slot1_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRAIP24;
+ if (Field_ftsf68ae_slot1_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRLIP24;
+ if (Field_ftsf69ae_slot1_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16S_L;
+ if (Field_ftsf70ae_slot1_Slot_ae_slot1_get (insn) == 39 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16U_H;
+ if (Field_ftsf71ae_slot1_Slot_ae_slot1_get (insn) == 71 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16U_L;
+ if (Field_ftsf72ae_slot1_Slot_ae_slot1_get (insn) == 103 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16U_H;
+ if (Field_ftsf73ae_slot1_Slot_ae_slot1_get (insn) == 135 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16S_H;
+ if (Field_ftsf74ae_slot1_Slot_ae_slot1_get (insn) == 167 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16U_L;
+ if (Field_ftsf75ae_slot1_Slot_ae_slot1_get (insn) == 199 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16S_H;
+ if (Field_ftsf76ae_slot1_Slot_ae_slot1_get (insn) == 231 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16S_L;
+ if (Field_ftsf77ae_slot1_Slot_ae_slot1_get (insn) == 263 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16S_L;
+ if (Field_ftsf78ae_slot1_Slot_ae_slot1_get (insn) == 295 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16U_H;
+ if (Field_ftsf79ae_slot1_Slot_ae_slot1_get (insn) == 327 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16U_L;
+ if (Field_ftsf80ae_slot1_Slot_ae_slot1_get (insn) == 359 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16S_L;
+ if (Field_ftsf81ae_slot1_Slot_ae_slot1_get (insn) == 391 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16S_H;
+ if (Field_ftsf82ae_slot1_Slot_ae_slot1_get (insn) == 423 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16U_H;
+ if (Field_ftsf83ae_slot1_Slot_ae_slot1_get (insn) == 455 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16U_L;
+ if (Field_ftsf84ae_slot1_Slot_ae_slot1_get (insn) == 487 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16S_H;
+ if (Field_ftsf85ae_slot1_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16S_L;
+ if (Field_ftsf86ae_slot1_Slot_ae_slot1_get (insn) == 43 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16U_L;
+ if (Field_ftsf87ae_slot1_Slot_ae_slot1_get (insn) == 75 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSQ32SP16S_H;
+ if (Field_ftsf88ae_slot1_Slot_ae_slot1_get (insn) == 107 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSQ32SP16U_L;
+ if (Field_ftsf89ae_slot1_Slot_ae_slot1_get (insn) == 139 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSQ32SP16S_L;
+ if (Field_ftsf90ae_slot1_Slot_ae_slot1_get (insn) == 331 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf55_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_CVTQ48P24S_H;
+ if (Field_ftsf92ae_slot1_Slot_ae_slot1_get (insn) == 363 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf358ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ZEROQ56;
+ if (Field_ftsf93ae_slot1_Slot_ae_slot1_get (insn) == 203 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r10_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_CVTQ48P24S_L;
+ if (Field_ftsf94ae_slot1_Slot_ae_slot1_get (insn) == 1803 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVQ56;
+ if (Field_ftsf96ae_slot1_Slot_ae_slot1_get (insn) == 1835 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSQ32ASYM;
+ if (Field_ftsf97ae_slot1_Slot_ae_slot1_get (insn) == 939 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf348ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSQ32SYM;
+ if (Field_ftsf99ae_slot1_Slot_ae_slot1_get (insn) == 491 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf351_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_TRUNCQ32;
+ if (Field_t_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16U_LL;
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0xffffff00) >> 8);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0xffffff00) | ((slotbuf[0] & 0xffffff) << 8);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0xffff0000) >> 16);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0xffff0000) | ((slotbuf[0] & 0xffff) << 16);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0xffff0000) >> 16);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0xffff0000) | ((slotbuf[0] & 0xffff) << 16);
+}
+
+static void
+Slot_ae_format_Format_ae_slot1_10_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0xfffffc00) >> 10);
+ slotbuf[0] = (slotbuf[0] & ~0x400000) | ((insn[1] & 0x1) << 22);
+}
+
+static void
+Slot_ae_format_Format_ae_slot1_10_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xfffffc00) | ((slotbuf[0] & 0x3fffff) << 10);
+ insn[1] = (insn[1] & ~0x1) | ((slotbuf[0] & 0x400000) >> 22);
+}
+
+static void
+Slot_ae_format_Format_ae_slot0_33_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[1] & 0xffffffe) >> 1);
+}
+
+static void
+Slot_ae_format_Format_ae_slot0_33_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[1] = (insn[1] & ~0xffffffe) | ((slotbuf[0] & 0x7ffffff) << 1);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_get,
+ Field_rbit2_Slot_inst_get,
+ Field_rhi_Slot_inst_get,
+ Field_t3_Slot_inst_get,
+ Field_tbit2_Slot_inst_get,
+ Field_tlo_Slot_inst_get,
+ Field_w_Slot_inst_get,
+ Field_y_Slot_inst_get,
+ Field_x_Slot_inst_get,
+ Field_t2_Slot_inst_get,
+ Field_s2_Slot_inst_get,
+ Field_r2_Slot_inst_get,
+ Field_t4_Slot_inst_get,
+ Field_s4_Slot_inst_get,
+ Field_r4_Slot_inst_get,
+ Field_t8_Slot_inst_get,
+ Field_s8_Slot_inst_get,
+ Field_r8_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_ae_r3_Slot_inst_get,
+ Field_ae_s_non_samt_Slot_inst_get,
+ Field_ae_s3_Slot_inst_get,
+ Field_ae_r32_Slot_inst_get,
+ Field_ae_samt_s_t_Slot_inst_get,
+ Field_ae_r20_Slot_inst_get,
+ Field_ae_r10_Slot_inst_get,
+ Field_ae_s20_Slot_inst_get,
+ Field_ae_fld_ohba_Slot_inst_get,
+ Field_ae_fld_ohba2_Slot_inst_get,
+ 0,
+ Field_ftsf11_Slot_inst_get,
+ Field_ftsf12_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst_get,
+ Field_s3to1_Slot_inst_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_r3_Slot_inst_set,
+ Field_rbit2_Slot_inst_set,
+ Field_rhi_Slot_inst_set,
+ Field_t3_Slot_inst_set,
+ Field_tbit2_Slot_inst_set,
+ Field_tlo_Slot_inst_set,
+ Field_w_Slot_inst_set,
+ Field_y_Slot_inst_set,
+ Field_x_Slot_inst_set,
+ Field_t2_Slot_inst_set,
+ Field_s2_Slot_inst_set,
+ Field_r2_Slot_inst_set,
+ Field_t4_Slot_inst_set,
+ Field_s4_Slot_inst_set,
+ Field_r4_Slot_inst_set,
+ Field_t8_Slot_inst_set,
+ Field_s8_Slot_inst_set,
+ Field_r8_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_ae_r3_Slot_inst_set,
+ Field_ae_s_non_samt_Slot_inst_set,
+ Field_ae_s3_Slot_inst_set,
+ Field_ae_r32_Slot_inst_set,
+ Field_ae_samt_s_t_Slot_inst_set,
+ Field_ae_r20_Slot_inst_set,
+ Field_ae_r10_Slot_inst_set,
+ Field_ae_s20_Slot_inst_set,
+ Field_ae_fld_ohba_Slot_inst_set,
+ Field_ae_fld_ohba2_Slot_inst_set,
+ 0,
+ Field_ftsf11_Slot_inst_set,
+ Field_ftsf12_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst_set,
+ Field_s3to1_Slot_inst_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst16a_get,
+ Field_s2_Slot_inst16a_get,
+ Field_r2_Slot_inst16a_get,
+ Field_t4_Slot_inst16a_get,
+ Field_s4_Slot_inst16a_get,
+ Field_r4_Slot_inst16a_get,
+ Field_t8_Slot_inst16a_get,
+ Field_s8_Slot_inst16a_get,
+ Field_r8_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_get,
+ Field_s3to1_Slot_inst16a_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst16a_set,
+ Field_s2_Slot_inst16a_set,
+ Field_r2_Slot_inst16a_set,
+ Field_t4_Slot_inst16a_set,
+ Field_s4_Slot_inst16a_set,
+ Field_r4_Slot_inst16a_set,
+ Field_t8_Slot_inst16a_set,
+ Field_s8_Slot_inst16a_set,
+ Field_r8_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16a_set,
+ Field_s3to1_Slot_inst16a_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst16b_get,
+ Field_s2_Slot_inst16b_get,
+ Field_r2_Slot_inst16b_get,
+ Field_t4_Slot_inst16b_get,
+ Field_s4_Slot_inst16b_get,
+ Field_r4_Slot_inst16b_get,
+ Field_t8_Slot_inst16b_get,
+ Field_s8_Slot_inst16b_get,
+ Field_r8_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_get,
+ Field_s3to1_Slot_inst16b_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst16b_set,
+ Field_s2_Slot_inst16b_set,
+ Field_r2_Slot_inst16b_set,
+ Field_t4_Slot_inst16b_set,
+ Field_s4_Slot_inst16b_set,
+ Field_r4_Slot_inst16b_set,
+ Field_t8_Slot_inst16b_set,
+ Field_s8_Slot_inst16b_set,
+ Field_r8_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_inst16b_set,
+ Field_s3to1_Slot_inst16b_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_ae_slot1_get_field_fns[] = {
+ Field_t_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot1_get,
+ 0,
+ Field_ae_r20_Slot_ae_slot1_get,
+ Field_ae_r10_Slot_ae_slot1_get,
+ Field_ae_s20_Slot_ae_slot1_get,
+ 0,
+ 0,
+ Field_op0_s3_Slot_ae_slot1_get,
+ Field_ftsf11_Slot_ae_slot1_get,
+ Field_ftsf12_Slot_ae_slot1_get,
+ Field_ftsf13_Slot_ae_slot1_get,
+ Field_ftsf20ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf21ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf22ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf23ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf24ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf25ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf26ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf27ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf28ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf29ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf30ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf31ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf32ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf33ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf34ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf35ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf36ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf37ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf38ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf40ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf41ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf42ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf43ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf45ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf47ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf48ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf49ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf50ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf51ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf52ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf53ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf54ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf55_Slot_ae_slot1_get,
+ Field_ftsf56ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf57ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf58ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf60ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf61_Slot_ae_slot1_get,
+ Field_ftsf62ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf63ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf64ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf66ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf68ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf69ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf70ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf71ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf72ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf73ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf74ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf75ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf76ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf77ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf78ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf79ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf80ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf81ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf82ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf83ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf84ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf85ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf86ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf87ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf88ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf89ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf90ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf91_Slot_ae_slot1_get,
+ Field_ftsf92ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf93ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf94ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf96ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf97ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf99ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf101ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf102ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf103ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf106ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf107ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf108ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf109ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf110ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf111ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf112ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf113ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf114ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf115ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf116ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf117ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf118ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf120ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf121ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf123ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf124ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf125ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf126ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf127ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf128ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf129ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf130ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf131ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf132ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf133ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf134ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf135ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf136ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf137ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf138ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf139ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf140ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf141ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf142ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf143ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf144ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf145ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf146ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf147ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf148ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf149ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf150ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf151ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf152ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf153ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf154ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf155ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf156ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf157ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf158ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf159ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf160ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf161ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf162ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf163ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf164ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf165ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf166ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf167ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf168ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf169ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf170ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf171ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf172ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf173ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf174ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf175ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf176ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf177ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf178ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf179ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf180ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf181ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf182ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf183ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf184ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf185ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf186ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf187ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf188ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf189ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf190ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf191ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf192ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf193ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf194ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf195ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf196ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf197ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf198ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf199ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf200ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf201ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf202ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf203ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf204ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf205ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf206ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf207ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf208ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf210ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf333ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf334ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf335_Slot_ae_slot1_get,
+ Field_ftsf336ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf337ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf339ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf340ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf341ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf342ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf343ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf344ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf345ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf347ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf348ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf349ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf350ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf351_Slot_ae_slot1_get,
+ Field_ftsf352ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf354ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf355ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf356ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf357ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf358ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf359ae_slot1_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_mul32x24fld_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s3_s3_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld19_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld22_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld24_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld65_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld66_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld68_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld69_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld74_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld79_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld88_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld90_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld91_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld131ae_slot1_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld132ae_slot1_Slot_ae_slot1_get,
+ Field_combined2c0b5f72_fld147ae_slot1_Slot_ae_slot1_get,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_ae_slot1_set_field_fns[] = {
+ Field_t_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot1_set,
+ 0,
+ Field_ae_r20_Slot_ae_slot1_set,
+ Field_ae_r10_Slot_ae_slot1_set,
+ Field_ae_s20_Slot_ae_slot1_set,
+ 0,
+ 0,
+ Field_op0_s3_Slot_ae_slot1_set,
+ Field_ftsf11_Slot_ae_slot1_set,
+ Field_ftsf12_Slot_ae_slot1_set,
+ Field_ftsf13_Slot_ae_slot1_set,
+ Field_ftsf20ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf21ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf22ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf23ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf24ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf25ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf26ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf27ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf28ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf29ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf30ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf31ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf32ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf33ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf34ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf35ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf36ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf37ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf38ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf40ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf41ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf42ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf43ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf45ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf47ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf48ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf49ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf50ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf51ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf52ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf53ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf54ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf55_Slot_ae_slot1_set,
+ Field_ftsf56ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf57ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf58ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf60ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf61_Slot_ae_slot1_set,
+ Field_ftsf62ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf63ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf64ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf66ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf68ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf69ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf70ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf71ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf72ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf73ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf74ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf75ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf76ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf77ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf78ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf79ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf80ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf81ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf82ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf83ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf84ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf85ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf86ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf87ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf88ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf89ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf90ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf91_Slot_ae_slot1_set,
+ Field_ftsf92ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf93ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf94ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf96ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf97ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf99ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf101ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf102ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf103ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf106ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf107ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf108ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf109ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf110ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf111ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf112ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf113ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf114ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf115ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf116ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf117ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf118ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf120ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf121ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf123ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf124ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf125ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf126ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf127ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf128ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf129ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf130ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf131ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf132ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf133ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf134ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf135ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf136ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf137ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf138ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf139ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf140ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf141ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf142ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf143ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf144ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf145ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf146ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf147ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf148ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf149ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf150ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf151ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf152ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf153ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf154ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf155ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf156ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf157ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf158ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf159ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf160ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf161ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf162ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf163ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf164ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf165ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf166ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf167ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf168ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf169ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf170ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf171ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf172ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf173ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf174ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf175ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf176ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf177ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf178ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf179ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf180ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf181ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf182ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf183ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf184ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf185ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf186ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf187ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf188ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf189ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf190ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf191ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf192ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf193ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf194ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf195ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf196ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf197ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf198ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf199ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf200ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf201ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf202ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf203ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf204ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf205ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf206ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf207ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf208ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf210ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf333ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf334ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf335_Slot_ae_slot1_set,
+ Field_ftsf336ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf337ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf339ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf340ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf341ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf342ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf343ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf344ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf345ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf347ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf348ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf349ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf350ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf351_Slot_ae_slot1_set,
+ Field_ftsf352ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf354ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf355ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf356ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf357ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf358ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf359ae_slot1_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_mul32x24fld_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s3_s3_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld19_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld22_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld24_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld65_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld66_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld68_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld69_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld74_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld79_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld88_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld90_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld91_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld131ae_slot1_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld132ae_slot1_Slot_ae_slot1_set,
+ Field_combined2c0b5f72_fld147ae_slot1_Slot_ae_slot1_set,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_ae_slot0_get_field_fns[] = {
+ Field_t_Slot_ae_slot0_get,
+ 0,
+ Field_bbi_Slot_ae_slot0_get,
+ Field_imm12_Slot_ae_slot0_get,
+ Field_imm8_Slot_ae_slot0_get,
+ Field_s_Slot_ae_slot0_get,
+ Field_imm12b_Slot_ae_slot0_get,
+ Field_imm16_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_offset_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_op2_Slot_ae_slot0_get,
+ Field_r_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_sae_Slot_ae_slot0_get,
+ Field_sal_Slot_ae_slot0_get,
+ Field_sargt_Slot_ae_slot0_get,
+ 0,
+ Field_sas_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_s8_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot0_get,
+ Field_ae_samt_s_t_Slot_ae_slot0_get,
+ Field_ae_r20_Slot_ae_slot0_get,
+ Field_ae_r10_Slot_ae_slot0_get,
+ Field_ae_s20_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ Field_ftsf11_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s4_Slot_ae_slot0_get,
+ Field_ftsf211ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf212ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf213ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf214ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf215ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf217ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf218ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf219ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf220ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf221ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf222ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf223ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf224ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf225ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf226ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf227ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf228ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf229ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf230ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf231ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf232ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf233ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf234ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf235ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf236ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf237ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf238ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf239ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf240ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf241ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf242ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf243ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf244ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf245ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf246ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf247ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf248ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf249ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf250ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf251ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf252ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf253ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf254ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf255ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf256ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf257ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf258ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf259ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf260ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf261ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf262ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf263ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf264ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf265ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf266ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf267ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf268ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf269ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf270ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf271ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf272ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf273ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf274ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf275ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf276ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf277ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf278ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf279ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf280_Slot_ae_slot0_get,
+ Field_ftsf281ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf282ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf284ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf285ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf287ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf288_Slot_ae_slot0_get,
+ Field_ftsf289ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf290ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf291ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf292ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf293ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf294ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf295ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf296ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf297ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf298ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf300ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf302ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf304ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf305ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf306ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf307ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf308ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf309_Slot_ae_slot0_get,
+ Field_ftsf310ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf311ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf312ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf313ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf314ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf315_Slot_ae_slot0_get,
+ Field_ftsf316ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf317ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf319ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf320ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf322ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf323ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf324ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf325ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf326ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf327ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf328ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf329ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf360ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf361ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf362_Slot_ae_slot0_get,
+ Field_ftsf363ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf364ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf366ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf368ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf370ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf373ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf376ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf378ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf379ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf382ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf383ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf384ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf386ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf387ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf388ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf389ae_slot0_Slot_ae_slot0_get,
+ 0,
+ Field_op0_s4_s4_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld37_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld46_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld47_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld52_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld121_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld123_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld133ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld134ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld135ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld136ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld137ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld138ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld139ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld140ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld141ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld142ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld143ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld144ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld145ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld146ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld148ae_slot0_Slot_ae_slot0_get,
+ Field_combined2c0b5f72_fld149ae_slot0_Slot_ae_slot0_get,
+ Field_op0_s4_s4_s4_Slot_ae_slot0_get,
+ Field_combined1e9fefee_fld96_Slot_ae_slot0_get,
+ Field_combined1e9fefee_fld98_Slot_ae_slot0_get,
+ Field_combined1e9fefee_fld106ae_slot0_Slot_ae_slot0_get,
+ Field_combined1e9fefee_fld107ae_slot0_Slot_ae_slot0_get,
+ Field_combined1e9fefee_fld108ae_slot0_Slot_ae_slot0_get,
+ Field_combined1e9fefee_fld109ae_slot0_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_ae_slot0_get,
+ Field_s3to1_Slot_ae_slot0_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_mr0_get,
+ Implicit_Field_mr1_get,
+ Implicit_Field_mr2_get,
+ Implicit_Field_mr3_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_ae_slot0_set_field_fns[] = {
+ Field_t_Slot_ae_slot0_set,
+ 0,
+ Field_bbi_Slot_ae_slot0_set,
+ Field_imm12_Slot_ae_slot0_set,
+ Field_imm8_Slot_ae_slot0_set,
+ Field_s_Slot_ae_slot0_set,
+ Field_imm12b_Slot_ae_slot0_set,
+ Field_imm16_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_offset_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_op2_Slot_ae_slot0_set,
+ Field_r_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_sae_Slot_ae_slot0_set,
+ Field_sal_Slot_ae_slot0_set,
+ Field_sargt_Slot_ae_slot0_set,
+ 0,
+ Field_sas_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_s8_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot0_set,
+ Field_ae_samt_s_t_Slot_ae_slot0_set,
+ Field_ae_r20_Slot_ae_slot0_set,
+ Field_ae_r10_Slot_ae_slot0_set,
+ Field_ae_s20_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ Field_ftsf11_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s4_Slot_ae_slot0_set,
+ Field_ftsf211ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf212ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf213ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf214ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf215ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf217ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf218ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf219ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf220ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf221ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf222ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf223ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf224ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf225ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf226ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf227ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf228ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf229ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf230ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf231ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf232ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf233ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf234ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf235ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf236ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf237ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf238ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf239ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf240ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf241ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf242ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf243ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf244ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf245ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf246ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf247ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf248ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf249ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf250ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf251ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf252ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf253ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf254ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf255ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf256ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf257ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf258ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf259ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf260ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf261ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf262ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf263ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf264ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf265ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf266ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf267ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf268ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf269ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf270ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf271ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf272ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf273ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf274ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf275ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf276ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf277ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf278ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf279ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf280_Slot_ae_slot0_set,
+ Field_ftsf281ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf282ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf284ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf285ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf287ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf288_Slot_ae_slot0_set,
+ Field_ftsf289ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf290ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf291ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf292ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf293ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf294ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf295ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf296ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf297ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf298ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf300ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf302ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf304ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf305ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf306ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf307ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf308ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf309_Slot_ae_slot0_set,
+ Field_ftsf310ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf311ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf312ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf313ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf314ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf315_Slot_ae_slot0_set,
+ Field_ftsf316ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf317ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf319ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf320ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf322ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf323ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf324ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf325ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf326ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf327ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf328ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf329ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf360ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf361ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf362_Slot_ae_slot0_set,
+ Field_ftsf363ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf364ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf366ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf368ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf370ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf373ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf376ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf378ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf379ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf382ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf383ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf384ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf386ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf387ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf388ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf389ae_slot0_Slot_ae_slot0_set,
+ 0,
+ Field_op0_s4_s4_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld28_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld37_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld39_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld40_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld46_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld47_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld49_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld50_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld52_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld121_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld123_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld127_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld133ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld134ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld135ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld136ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld137ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld138ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld139ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld140ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld141ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld142ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld143ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld144ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld145ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld146ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld148ae_slot0_Slot_ae_slot0_set,
+ Field_combined2c0b5f72_fld149ae_slot0_Slot_ae_slot0_set,
+ Field_op0_s4_s4_s4_Slot_ae_slot0_set,
+ Field_combined1e9fefee_fld96_Slot_ae_slot0_set,
+ Field_combined1e9fefee_fld98_Slot_ae_slot0_set,
+ Field_combined1e9fefee_fld106ae_slot0_Slot_ae_slot0_set,
+ Field_combined1e9fefee_fld107ae_slot0_Slot_ae_slot0_set,
+ Field_combined1e9fefee_fld108ae_slot0_Slot_ae_slot0_set,
+ Field_combined1e9fefee_fld109ae_slot0_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_bitindex_Slot_ae_slot0_set,
+ Field_s3to1_Slot_ae_slot0_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" },
+ { "ae_slot1", "ae_format", 1,
+ Slot_ae_format_Format_ae_slot1_10_get, Slot_ae_format_Format_ae_slot1_10_set,
+ Slot_ae_slot1_get_field_fns, Slot_ae_slot1_set_field_fns,
+ Slot_ae_slot1_decode, "nop" },
+ { "ae_slot0", "ae_format", 0,
+ Slot_ae_format_Format_ae_slot0_33_get, Slot_ae_format_Format_ae_slot0_33_set,
+ Slot_ae_slot0_get_field_fns, Slot_ae_slot0_set_field_fns,
+ Slot_ae_slot0_decode, "nop" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+ insn[1] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+ insn[1] = 0x80000000;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+ insn[1] = 0xc0000000;
+}
+
+static void
+Format_ae_format_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+ insn[1] = 0xf0000000;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static int Format_ae_format_slots[] = { 3, 4 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots },
+ { "ae_format", 8, Format_ae_format_encode, 2, Format_ae_format_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0) == 0 && (insn[1] & 0x80000000) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0) == 0 && (insn[1] & 0xc0000000) == 0x80000000)
+ return 1; /* x16a */
+ if ((insn[0] & 0) == 0 && (insn[1] & 0xe0000000) == 0xc0000000)
+ return 2; /* x16b */
+ if ((insn[0] & 0x3ff) == 0 && (insn[1] & 0xf0000000) == 0xf0000000)
+ return 3; /* ae_format */
+ return -1;
+}
+
+static int length_table[256] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8,
+ 8
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int l = insn[0];
+ return length_table[l];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 1 /* big-endian */,
+ 8 /* insn_size */, 0,
+ 4, formats, format_decoder, length_decoder,
+ 5, slots,
+ 468 /* num_fields */,
+ 536, operands,
+ 746, iclasses,
+ 881, opcodes, 0,
+ 9, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 6, interfaces, 0,
+ 4, funcUnits, 0
+};
diff --git a/target/xtensa/core-test_mmuhifi_c3.c b/target/xtensa/core-test_mmuhifi_c3.c
new file mode 100644
index 000000000..123c630b0
--- /dev/null
+++ b/target/xtensa/core-test_mmuhifi_c3.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "core-test_mmuhifi_c3/core-isa.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_test_mmuhifi_c3
+#include "core-test_mmuhifi_c3/xtensa-modules.c.inc"
+
+static XtensaConfig test_mmuhifi_c3 __attribute__((unused)) = {
+ .name = "test_mmuhifi_c3",
+ .gdb_regmap = {
+ .reg = {
+#include "core-test_mmuhifi_c3/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = 40000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(test_mmuhifi_c3)
diff --git a/target/xtensa/core-test_mmuhifi_c3/core-isa.h b/target/xtensa/core-test_mmuhifi_c3/core-isa.h
new file mode 100644
index 000000000..838b1b09d
--- /dev/null
+++ b/target/xtensa/core-test_mmuhifi_c3/core-isa.h
@@ -0,0 +1,472 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2019 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_CORE_TEST_MMUHIFI_C3_CORE_ISA_H
+#define XTENSA_CORE_TEST_MMUHIFI_C3_CORE_ISA_H
+
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 8 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_LOOP_BUFFER_SIZE 0 /* zero-ov. loop instr buffer size */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 0 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MX 1 /* MX core (Tensilica internal) */
+#define XCHAL_HAVE_MP_INTERRUPTS 1 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 1 /* core RunStall control port */
+#define XCHAL_HAVE_PSO 0 /* Power Shut-Off */
+#define XCHAL_HAVE_PSO_CDM 0 /* core/debug/mem pwr domains */
+#define XCHAL_HAVE_PSO_FULL_RETENTION 0 /* all regs preserved on PSO */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 1 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 2 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* single prec floating point */
+#define XCHAL_HAVE_FP_DIV 0 /* FP with DIV instructions */
+#define XCHAL_HAVE_FP_RECIP 0 /* FP with RECIP instructions */
+#define XCHAL_HAVE_FP_SQRT 0 /* FP with SQRT instructions */
+#define XCHAL_HAVE_FP_RSQRT 0 /* FP with RSQRT instructions */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_DIV 0 /* DFP with DIV instructions */
+#define XCHAL_HAVE_DFP_RECIP 0 /* DFP with RECIP instructions*/
+#define XCHAL_HAVE_DFP_SQRT 0 /* DFP with SQRT instructions */
+#define XCHAL_HAVE_DFP_RSQRT 0 /* DFP with RSQRT instructions*/
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI3 0 /* HiFi3 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 1 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_HIFI_MINI 0
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BBENEP 0 /* ConnX BBENEP pkgs */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_BSP3_TRANSPOSE 0 /* BSP3 & transpose32x32 */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+#define XCHAL_HAVE_FLIX3 0 /* basic 3-way FLIX option */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_LOADSTORE_UNITS 1 /* load/store units */
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 8 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 8 /* data width in bytes */
+#define XCHAL_DATA_PIPE_DELAY 1 /* d-side pipeline delay
+ (1 = 5-stage, 2 = 7-stage) */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 1000006 /* sw version of this header */
+
+#define XCHAL_CORE_ID "test_mmuhifi_c3" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00005A6A /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC1B3CBFE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x10405A6A /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX3.0.0" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2300 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 230000 /* major*100+minor */
+#define XCHAL_HW_REL_LX3 1
+#define XCHAL_HW_REL_LX3_0 1
+#define XCHAL_HW_REL_LX3_0_0 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2300 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 230000 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2300 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 230000 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 1 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+#define XCHAL_HAVE_PREFETCH_L1 0 /* prefetch to L1 dcache */
+#define XCHAL_PREFETCH_CASTOUT_LINES 0 /* dcache pref. castout bufsz */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 8
+#define XCHAL_DCACHE_ACCESS_SIZE 8
+
+#define XCHAL_DCACHE_BANKS 1 /* number of banks */
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 2 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 12 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 4 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 9 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 2 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x00000FFF
+#define XCHAL_INTLEVEL2_MASK 0x00000000
+#define XCHAL_INTLEVEL3_MASK 0x00000000
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x00000FFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x00000FFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 1
+#define XCHAL_INT9_LEVEL 1
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 1
+#define XCHAL_DEBUGLEVEL 2 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFFF000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000080
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000004
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x00000E3B
+#define XCHAL_INTTYPE_MASK_TIMER 0x00000140
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+#define XCHAL_INTTYPE_MASK_PROFILING 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 8 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1.) */
+
+
+/*
+ * External interrupt mapping.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL BInterrupt pin number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 9 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 10 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 11 /* (intlevel 1) */
+/* EXTERNAL BInterrupt pin numbers mapped to each core interrupt number: */
+#define XCHAL_INT0_EXTNUM 0 /* (intlevel 1) */
+#define XCHAL_INT1_EXTNUM 1 /* (intlevel 1) */
+#define XCHAL_INT2_EXTNUM 2 /* (intlevel 1) */
+#define XCHAL_INT3_EXTNUM 3 /* (intlevel 1) */
+#define XCHAL_INT4_EXTNUM 4 /* (intlevel 1) */
+#define XCHAL_INT5_EXTNUM 5 /* (intlevel 1) */
+#define XCHAL_INT9_EXTNUM 6 /* (intlevel 1) */
+#define XCHAL_INT10_EXTNUM 7 /* (intlevel 1) */
+#define XCHAL_INT11_EXTNUM 8 /* (intlevel 1) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0xD0000340
+#define XCHAL_USER_VECTOR_PADDR 0x00000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000280
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000280
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL2_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL2_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG MODULE
+ ----------------------------------------------------------------------*/
+
+/* Misc */
+#define XCHAL_HAVE_DEBUG_ERI 0 /* ERI to debug module */
+#define XCHAL_HAVE_DEBUG_APB 0 /* APB to debug module */
+#define XCHAL_HAVE_DEBUG_JTAG 0 /* JTAG to debug module */
+
+/* On-Chip Debug (OCD) */
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 0 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 0 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 0 /* faster OCD option (to LX4) */
+#define XCHAL_HAVE_OCD_LS32DDR 0 /* L32DDR/S32DDR (faster OCD) */
+
+/* TRAX (in core) */
+#define XCHAL_HAVE_TRAX 0 /* TRAX in debug module */
+#define XCHAL_TRAX_MEM_SIZE 0 /* TRAX memory size in bytes */
+#define XCHAL_TRAX_MEM_SHAREABLE 0 /* start/end regs; ready sig. */
+#define XCHAL_TRAX_ATB_WIDTH 0 /* ATB width (bits), 0=no ATB */
+#define XCHAL_TRAX_TIME_WIDTH 0 /* timestamp bitwidth, 0=none */
+
+/* Perf counters */
+#define XCHAL_NUM_PERF_COUNTERS 0 /* performance counters */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_CORE_TEST_MMUHIFI_C3_CORE_ISA_H */
diff --git a/target/xtensa/core-test_mmuhifi_c3/gdb-config.c.inc b/target/xtensa/core-test_mmuhifi_c3/gdb-config.c.inc
new file mode 100644
index 000000000..0bca70b5a
--- /dev/null
+++ b/target/xtensa/core-test_mmuhifi_c3/gdb-config.c.inc
@@ -0,0 +1,220 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2019 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+ XTREG( 0, 0,32, 4, 4,0x0020,0x0006,-2, 9,0x0100,pc, 0,0,0,0,0,0)
+ XTREG( 1, 4,32, 4, 4,0x0100,0x0006,-2, 1,0x0002,ar0, 0,0,0,0,0,0)
+ XTREG( 2, 8,32, 4, 4,0x0101,0x0006,-2, 1,0x0002,ar1, 0,0,0,0,0,0)
+ XTREG( 3, 12,32, 4, 4,0x0102,0x0006,-2, 1,0x0002,ar2, 0,0,0,0,0,0)
+ XTREG( 4, 16,32, 4, 4,0x0103,0x0006,-2, 1,0x0002,ar3, 0,0,0,0,0,0)
+ XTREG( 5, 20,32, 4, 4,0x0104,0x0006,-2, 1,0x0002,ar4, 0,0,0,0,0,0)
+ XTREG( 6, 24,32, 4, 4,0x0105,0x0006,-2, 1,0x0002,ar5, 0,0,0,0,0,0)
+ XTREG( 7, 28,32, 4, 4,0x0106,0x0006,-2, 1,0x0002,ar6, 0,0,0,0,0,0)
+ XTREG( 8, 32,32, 4, 4,0x0107,0x0006,-2, 1,0x0002,ar7, 0,0,0,0,0,0)
+ XTREG( 9, 36,32, 4, 4,0x0108,0x0006,-2, 1,0x0002,ar8, 0,0,0,0,0,0)
+ XTREG( 10, 40,32, 4, 4,0x0109,0x0006,-2, 1,0x0002,ar9, 0,0,0,0,0,0)
+ XTREG( 11, 44,32, 4, 4,0x010a,0x0006,-2, 1,0x0002,ar10, 0,0,0,0,0,0)
+ XTREG( 12, 48,32, 4, 4,0x010b,0x0006,-2, 1,0x0002,ar11, 0,0,0,0,0,0)
+ XTREG( 13, 52,32, 4, 4,0x010c,0x0006,-2, 1,0x0002,ar12, 0,0,0,0,0,0)
+ XTREG( 14, 56,32, 4, 4,0x010d,0x0006,-2, 1,0x0002,ar13, 0,0,0,0,0,0)
+ XTREG( 15, 60,32, 4, 4,0x010e,0x0006,-2, 1,0x0002,ar14, 0,0,0,0,0,0)
+ XTREG( 16, 64,32, 4, 4,0x010f,0x0006,-2, 1,0x0002,ar15, 0,0,0,0,0,0)
+ XTREG( 17, 68,32, 4, 4,0x0110,0x0006,-2, 1,0x0002,ar16, 0,0,0,0,0,0)
+ XTREG( 18, 72,32, 4, 4,0x0111,0x0006,-2, 1,0x0002,ar17, 0,0,0,0,0,0)
+ XTREG( 19, 76,32, 4, 4,0x0112,0x0006,-2, 1,0x0002,ar18, 0,0,0,0,0,0)
+ XTREG( 20, 80,32, 4, 4,0x0113,0x0006,-2, 1,0x0002,ar19, 0,0,0,0,0,0)
+ XTREG( 21, 84,32, 4, 4,0x0114,0x0006,-2, 1,0x0002,ar20, 0,0,0,0,0,0)
+ XTREG( 22, 88,32, 4, 4,0x0115,0x0006,-2, 1,0x0002,ar21, 0,0,0,0,0,0)
+ XTREG( 23, 92,32, 4, 4,0x0116,0x0006,-2, 1,0x0002,ar22, 0,0,0,0,0,0)
+ XTREG( 24, 96,32, 4, 4,0x0117,0x0006,-2, 1,0x0002,ar23, 0,0,0,0,0,0)
+ XTREG( 25,100,32, 4, 4,0x0118,0x0006,-2, 1,0x0002,ar24, 0,0,0,0,0,0)
+ XTREG( 26,104,32, 4, 4,0x0119,0x0006,-2, 1,0x0002,ar25, 0,0,0,0,0,0)
+ XTREG( 27,108,32, 4, 4,0x011a,0x0006,-2, 1,0x0002,ar26, 0,0,0,0,0,0)
+ XTREG( 28,112,32, 4, 4,0x011b,0x0006,-2, 1,0x0002,ar27, 0,0,0,0,0,0)
+ XTREG( 29,116,32, 4, 4,0x011c,0x0006,-2, 1,0x0002,ar28, 0,0,0,0,0,0)
+ XTREG( 30,120,32, 4, 4,0x011d,0x0006,-2, 1,0x0002,ar29, 0,0,0,0,0,0)
+ XTREG( 31,124,32, 4, 4,0x011e,0x0006,-2, 1,0x0002,ar30, 0,0,0,0,0,0)
+ XTREG( 32,128,32, 4, 4,0x011f,0x0006,-2, 1,0x0002,ar31, 0,0,0,0,0,0)
+ XTREG( 33,132,32, 4, 4,0x0200,0x0006,-2, 2,0x1100,lbeg, 0,0,0,0,0,0)
+ XTREG( 34,136,32, 4, 4,0x0201,0x0006,-2, 2,0x1100,lend, 0,0,0,0,0,0)
+ XTREG( 35,140,32, 4, 4,0x0202,0x0006,-2, 2,0x1100,lcount, 0,0,0,0,0,0)
+ XTREG( 36,144, 6, 4, 4,0x0203,0x0006,-2, 2,0x1100,sar, 0,0,0,0,0,0)
+ XTREG( 37,148,32, 4, 4,0x0205,0x0006,-2, 2,0x1100,litbase, 0,0,0,0,0,0)
+ XTREG( 38,152, 3, 4, 4,0x0248,0x0006,-2, 2,0x1002,windowbase, 0,0,0,0,0,0)
+ XTREG( 39,156, 8, 4, 4,0x0249,0x0006,-2, 2,0x1002,windowstart, 0,0,0,0,0,0)
+ XTREG( 40,160,32, 4, 4,0x02b0,0x0002,-2, 2,0x1000,configid0, 0,0,0,0,0,0)
+ XTREG( 41,164,32, 4, 4,0x02d0,0x0002,-2, 2,0x1000,configid1, 0,0,0,0,0,0)
+ XTREG( 42,168,19, 4, 4,0x02e6,0x0006,-2, 2,0x1100,ps, 0,0,0,0,0,0)
+ XTREG( 43,172,32, 4, 4,0x03e7,0x0006,-2, 3,0x0110,threadptr, 0,0,0,0,0,0)
+ XTREG( 44,176,16, 4, 4,0x0204,0x0006,-1, 2,0x1100,br, 0,0,0,0,0,0)
+ XTREG( 45,180,32, 4, 4,0x020c,0x0006,-1, 2,0x1100,scompare1, 0,0,0,0,0,0)
+ XTREG( 46,184,48, 8, 8,0x0060,0x0006, 1, 4,0x0101,aep0,
+ "03:04:84:b2","03:04:84:a7",0,0,0,0)
+ XTREG( 47,192,48, 8, 8,0x0061,0x0006, 1, 4,0x0101,aep1,
+ "03:04:94:b2","03:04:94:a7",0,0,0,0)
+ XTREG( 48,200,48, 8, 8,0x0062,0x0006, 1, 4,0x0101,aep2,
+ "03:04:a4:b2","03:04:a4:a7",0,0,0,0)
+ XTREG( 49,208,48, 8, 8,0x0063,0x0006, 1, 4,0x0101,aep3,
+ "03:04:b4:b2","03:04:b4:a7",0,0,0,0)
+ XTREG( 50,216,48, 8, 8,0x0064,0x0006, 1, 4,0x0101,aep4,
+ "03:04:c4:b2","03:04:c4:a7",0,0,0,0)
+ XTREG( 51,224,48, 8, 8,0x0065,0x0006, 1, 4,0x0101,aep5,
+ "03:04:d4:b2","03:04:d4:a7",0,0,0,0)
+ XTREG( 52,232,48, 8, 8,0x0066,0x0006, 1, 4,0x0101,aep6,
+ "03:04:e4:b2","03:04:e4:a7",0,0,0,0)
+ XTREG( 53,240,48, 8, 8,0x0067,0x0006, 1, 4,0x0101,aep7,
+ "03:04:f4:b2","03:04:f4:a7",0,0,0,0)
+ XTREG( 54,248,56, 8, 8,0x0068,0x0006, 1, 4,0x0101,aeq0,
+ "03:04:04:c3","03:04:04:c1",0,0,0,0)
+ XTREG( 55,256,56, 8, 8,0x0069,0x0006, 1, 4,0x0101,aeq1,
+ "03:04:14:c3","03:04:44:c1",0,0,0,0)
+ XTREG( 56,264,56, 8, 8,0x006a,0x0006, 1, 4,0x0101,aeq2,
+ "03:04:24:c3","03:04:84:c1",0,0,0,0)
+ XTREG( 57,272,56, 8, 8,0x006b,0x0006, 1, 4,0x0101,aeq3,
+ "03:04:34:c3","03:04:c4:c1",0,0,0,0)
+ XTREG( 58,280, 7, 4, 4,0x03f0,0x0006, 1, 3,0x0100,ae_ovf_sar, 0,0,0,0,0,0)
+ XTREG( 59,284,32, 4, 4,0x03f1,0x0006, 1, 3,0x0110,ae_bithead, 0,0,0,0,0,0)
+ XTREG( 60,288,16, 4, 4,0x03f2,0x0006, 1, 3,0x0100,ae_ts_fts_bu_bp,0,0,0,0,0,0)
+ XTREG( 61,292,28, 4, 4,0x03f3,0x0006, 1, 3,0x0100,ae_sd_no, 0,0,0,0,0,0)
+ XTREG( 62,296,32, 4, 4,0x0253,0x0007,-2, 2,0x1000,ptevaddr, 0,0,0,0,0,0)
+ XTREG( 63,300,32, 4, 4,0x025a,0x0007,-2, 2,0x1000,rasid, 0,0,0,0,0,0)
+ XTREG( 64,304,18, 4, 4,0x025b,0x0007,-2, 2,0x1000,itlbcfg, 0,0,0,0,0,0)
+ XTREG( 65,308,18, 4, 4,0x025c,0x0007,-2, 2,0x1000,dtlbcfg, 0,0,0,0,0,0)
+ XTREG( 66,312, 6, 4, 4,0x0263,0x0007,-2, 2,0x1000,atomctl, 0,0,0,0,0,0)
+ XTREG( 67,316,32, 4, 4,0x0268,0x0007,-2, 2,0x1000,ddr, 0,0,0,0,0,0)
+ XTREG( 68,320,32, 4, 4,0x02b1,0x0007,-2, 2,0x1000,epc1, 0,0,0,0,0,0)
+ XTREG( 69,324,32, 4, 4,0x02b2,0x0007,-2, 2,0x1000,epc2, 0,0,0,0,0,0)
+ XTREG( 70,328,32, 4, 4,0x02c0,0x0007,-2, 2,0x1000,depc, 0,0,0,0,0,0)
+ XTREG( 71,332,19, 4, 4,0x02c2,0x0007,-2, 2,0x1000,eps2, 0,0,0,0,0,0)
+ XTREG( 72,336,32, 4, 4,0x02d1,0x0007,-2, 2,0x1000,excsave1, 0,0,0,0,0,0)
+ XTREG( 73,340,32, 4, 4,0x02d2,0x0007,-2, 2,0x1000,excsave2, 0,0,0,0,0,0)
+ XTREG( 74,344, 2, 4, 4,0x02e0,0x0007,-2, 2,0x1000,cpenable, 0,0,0,0,0,0)
+ XTREG( 75,348,12, 4, 4,0x02e2,0x000b,-2, 2,0x1000,interrupt, 0,0,0,0,0,0)
+ XTREG( 76,352,12, 4, 4,0x02e2,0x000d,-2, 2,0x1000,intset, 0,0,0,0,0,0)
+ XTREG( 77,356,12, 4, 4,0x02e3,0x000d,-2, 2,0x1000,intclear, 0,0,0,0,0,0)
+ XTREG( 78,360,12, 4, 4,0x02e4,0x0007,-2, 2,0x1000,intenable, 0,0,0,0,0,0)
+ XTREG( 79,364,32, 4, 4,0x02e7,0x0007,-2, 2,0x1000,vecbase, 0,0,0,0,0,0)
+ XTREG( 80,368, 6, 4, 4,0x02e8,0x0007,-2, 2,0x1000,exccause, 0,0,0,0,0,0)
+ XTREG( 81,372,12, 4, 4,0x02e9,0x0003,-2, 2,0x1000,debugcause, 0,0,0,0,0,0)
+ XTREG( 82,376,32, 4, 4,0x02ea,0x000f,-2, 2,0x1000,ccount, 0,0,0,0,0,0)
+ XTREG( 83,380,32, 4, 4,0x02eb,0x0003,-2, 2,0x1000,prid, 0,0,0,0,0,0)
+ XTREG( 84,384,32, 4, 4,0x02ec,0x000f,-2, 2,0x1000,icount, 0,0,0,0,0,0)
+ XTREG( 85,388, 4, 4, 4,0x02ed,0x0007,-2, 2,0x1000,icountlevel, 0,0,0,0,0,0)
+ XTREG( 86,392,32, 4, 4,0x02ee,0x0007,-2, 2,0x1000,excvaddr, 0,0,0,0,0,0)
+ XTREG( 87,396,32, 4, 4,0x02f0,0x000f,-2, 2,0x1000,ccompare0, 0,0,0,0,0,0)
+ XTREG( 88,400,32, 4, 4,0x02f1,0x000f,-2, 2,0x1000,ccompare1, 0,0,0,0,0,0)
+ XTREG( 89,404,32, 4, 4,0x02f4,0x0007,-2, 2,0x1000,misc0, 0,0,0,0,0,0)
+ XTREG( 90,408,32, 4, 4,0x02f5,0x0007,-2, 2,0x1000,misc1, 0,0,0,0,0,0)
+ XTREG( 91,412,32, 4, 4,0x0000,0x0006,-2, 8,0x0100,a0, 0,0,0,0,0,0)
+ XTREG( 92,416,32, 4, 4,0x0001,0x0006,-2, 8,0x0100,a1, 0,0,0,0,0,0)
+ XTREG( 93,420,32, 4, 4,0x0002,0x0006,-2, 8,0x0100,a2, 0,0,0,0,0,0)
+ XTREG( 94,424,32, 4, 4,0x0003,0x0006,-2, 8,0x0100,a3, 0,0,0,0,0,0)
+ XTREG( 95,428,32, 4, 4,0x0004,0x0006,-2, 8,0x0100,a4, 0,0,0,0,0,0)
+ XTREG( 96,432,32, 4, 4,0x0005,0x0006,-2, 8,0x0100,a5, 0,0,0,0,0,0)
+ XTREG( 97,436,32, 4, 4,0x0006,0x0006,-2, 8,0x0100,a6, 0,0,0,0,0,0)
+ XTREG( 98,440,32, 4, 4,0x0007,0x0006,-2, 8,0x0100,a7, 0,0,0,0,0,0)
+ XTREG( 99,444,32, 4, 4,0x0008,0x0006,-2, 8,0x0100,a8, 0,0,0,0,0,0)
+ XTREG(100,448,32, 4, 4,0x0009,0x0006,-2, 8,0x0100,a9, 0,0,0,0,0,0)
+ XTREG(101,452,32, 4, 4,0x000a,0x0006,-2, 8,0x0100,a10, 0,0,0,0,0,0)
+ XTREG(102,456,32, 4, 4,0x000b,0x0006,-2, 8,0x0100,a11, 0,0,0,0,0,0)
+ XTREG(103,460,32, 4, 4,0x000c,0x0006,-2, 8,0x0100,a12, 0,0,0,0,0,0)
+ XTREG(104,464,32, 4, 4,0x000d,0x0006,-2, 8,0x0100,a13, 0,0,0,0,0,0)
+ XTREG(105,468,32, 4, 4,0x000e,0x0006,-2, 8,0x0100,a14, 0,0,0,0,0,0)
+ XTREG(106,472,32, 4, 4,0x000f,0x0006,-2, 8,0x0100,a15, 0,0,0,0,0,0)
+ XTREG(107,476, 1, 1, 1,0x0010,0x0006,-2, 6,0x1010,b0,
+ 0,0,&xtensa_mask0,0,0,0)
+ XTREG(108,477, 1, 1, 1,0x0011,0x0006,-2, 6,0x1010,b1,
+ 0,0,&xtensa_mask1,0,0,0)
+ XTREG(109,478, 1, 1, 1,0x0012,0x0006,-2, 6,0x1010,b2,
+ 0,0,&xtensa_mask2,0,0,0)
+ XTREG(110,479, 1, 1, 1,0x0013,0x0006,-2, 6,0x1010,b3,
+ 0,0,&xtensa_mask3,0,0,0)
+ XTREG(111,480, 1, 1, 1,0x0014,0x0006,-2, 6,0x1010,b4,
+ 0,0,&xtensa_mask4,0,0,0)
+ XTREG(112,481, 1, 1, 1,0x0015,0x0006,-2, 6,0x1010,b5,
+ 0,0,&xtensa_mask5,0,0,0)
+ XTREG(113,482, 1, 1, 1,0x0016,0x0006,-2, 6,0x1010,b6,
+ 0,0,&xtensa_mask6,0,0,0)
+ XTREG(114,483, 1, 1, 1,0x0017,0x0006,-2, 6,0x1010,b7,
+ 0,0,&xtensa_mask7,0,0,0)
+ XTREG(115,484, 1, 1, 1,0x0018,0x0006,-2, 6,0x1010,b8,
+ 0,0,&xtensa_mask8,0,0,0)
+ XTREG(116,485, 1, 1, 1,0x0019,0x0006,-2, 6,0x1010,b9,
+ 0,0,&xtensa_mask9,0,0,0)
+ XTREG(117,486, 1, 1, 1,0x001a,0x0006,-2, 6,0x1010,b10,
+ 0,0,&xtensa_mask10,0,0,0)
+ XTREG(118,487, 1, 1, 1,0x001b,0x0006,-2, 6,0x1010,b11,
+ 0,0,&xtensa_mask11,0,0,0)
+ XTREG(119,488, 1, 1, 1,0x001c,0x0006,-2, 6,0x1010,b12,
+ 0,0,&xtensa_mask12,0,0,0)
+ XTREG(120,489, 1, 1, 1,0x001d,0x0006,-2, 6,0x1010,b13,
+ 0,0,&xtensa_mask13,0,0,0)
+ XTREG(121,490, 1, 1, 1,0x001e,0x0006,-2, 6,0x1010,b14,
+ 0,0,&xtensa_mask14,0,0,0)
+ XTREG(122,491, 1, 1, 1,0x001f,0x0006,-2, 6,0x1010,b15,
+ 0,0,&xtensa_mask15,0,0,0)
+ XTREG(123,492, 4, 4, 4,0x2003,0x0006,-2, 6,0x1010,psintlevel,
+ 0,0,&xtensa_mask16,0,0,0)
+ XTREG(124,496, 1, 4, 4,0x2004,0x0006,-2, 6,0x1010,psum,
+ 0,0,&xtensa_mask17,0,0,0)
+ XTREG(125,500, 1, 4, 4,0x2005,0x0006,-2, 6,0x1010,pswoe,
+ 0,0,&xtensa_mask18,0,0,0)
+ XTREG(126,504, 2, 4, 4,0x2006,0x0006,-2, 6,0x1010,psring,
+ 0,0,&xtensa_mask19,0,0,0)
+ XTREG(127,508, 1, 4, 4,0x2007,0x0006,-2, 6,0x1010,psexcm,
+ 0,0,&xtensa_mask20,0,0,0)
+ XTREG(128,512, 2, 4, 4,0x2008,0x0006,-2, 6,0x1010,pscallinc,
+ 0,0,&xtensa_mask21,0,0,0)
+ XTREG(129,516, 4, 4, 4,0x2009,0x0006,-2, 6,0x1010,psowb,
+ 0,0,&xtensa_mask22,0,0,0)
+ XTREG(130,520,20, 4, 4,0x200a,0x0006,-2, 6,0x1010,litbaddr,
+ 0,0,&xtensa_mask23,0,0,0)
+ XTREG(131,524, 1, 4, 4,0x200b,0x0006,-2, 6,0x1010,litben,
+ 0,0,&xtensa_mask24,0,0,0)
+ XTREG(132,528, 4, 4, 4,0x200e,0x0006,-2, 6,0x1010,dbnum,
+ 0,0,&xtensa_mask25,0,0,0)
+ XTREG(133,532, 8, 4, 4,0x200f,0x0006,-2, 6,0x1010,asid3,
+ 0,0,&xtensa_mask26,0,0,0)
+ XTREG(134,536, 8, 4, 4,0x2010,0x0006,-2, 6,0x1010,asid2,
+ 0,0,&xtensa_mask27,0,0,0)
+ XTREG(135,540, 8, 4, 4,0x2011,0x0006,-2, 6,0x1010,asid1,
+ 0,0,&xtensa_mask28,0,0,0)
+ XTREG(136,544, 2, 4, 4,0x2012,0x0006,-2, 6,0x1010,instpgszid4,
+ 0,0,&xtensa_mask29,0,0,0)
+ XTREG(137,548, 2, 4, 4,0x2013,0x0006,-2, 6,0x1010,datapgszid4,
+ 0,0,&xtensa_mask30,0,0,0)
+ XTREG(138,552,10, 4, 4,0x2014,0x0006,-2, 6,0x1010,ptbase,
+ 0,0,&xtensa_mask31,0,0,0)
+ XTREG(139,556, 1, 4, 4,0x201a,0x0006, 1, 5,0x1010,ae_overflow,
+ 0,0,&xtensa_mask32,0,0,0)
+ XTREG(140,560, 6, 4, 4,0x201b,0x0006, 1, 5,0x1010,ae_sar,
+ 0,0,&xtensa_mask33,0,0,0)
+ XTREG(141,564, 4, 4, 4,0x201c,0x0006, 1, 5,0x1010,ae_bitptr,
+ 0,0,&xtensa_mask34,0,0,0)
+ XTREG(142,568, 4, 4, 4,0x201d,0x0006, 1, 5,0x1010,ae_bitsused,
+ 0,0,&xtensa_mask35,0,0,0)
+ XTREG(143,572, 4, 4, 4,0x201e,0x0006, 1, 5,0x1010,ae_tablesize,
+ 0,0,&xtensa_mask36,0,0,0)
+ XTREG(144,576, 4, 4, 4,0x201f,0x0006, 1, 5,0x1010,ae_first_ts,
+ 0,0,&xtensa_mask37,0,0,0)
+ XTREG(145,580,27, 4, 4,0x2020,0x0006, 1, 5,0x1010,ae_nextoffset,
+ 0,0,&xtensa_mask38,0,0,0)
+ XTREG_END
diff --git a/target/xtensa/core-test_mmuhifi_c3/xtensa-modules.c.inc b/target/xtensa/core-test_mmuhifi_c3/xtensa-modules.c.inc
new file mode 100644
index 000000000..28561147f
--- /dev/null
+++ b/target/xtensa/core-test_mmuhifi_c3/xtensa-modules.c.inc
@@ -0,0 +1,36387 @@
+/* Xtensa configuration-specific ISA information.
+
+ Copyright (c) 2003-2019 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+
+/* Sysregs. */
+
+static xtensa_sysreg_internal sysregs[] = {
+ { "LBEG", 0, 0 },
+ { "LEND", 1, 0 },
+ { "LCOUNT", 2, 0 },
+ { "BR", 4, 0 },
+ { "PTEVADDR", 83, 0 },
+ { "DDR", 104, 0 },
+ { "CONFIGID0", 176, 0 },
+ { "CONFIGID1", 208, 0 },
+ { "INTERRUPT", 226, 0 },
+ { "INTCLEAR", 227, 0 },
+ { "CCOUNT", 234, 0 },
+ { "PRID", 235, 0 },
+ { "ICOUNT", 236, 0 },
+ { "CCOMPARE0", 240, 0 },
+ { "CCOMPARE1", 241, 0 },
+ { "VECBASE", 231, 0 },
+ { "EPC1", 177, 0 },
+ { "EPC2", 178, 0 },
+ { "EXCSAVE1", 209, 0 },
+ { "EXCSAVE2", 210, 0 },
+ { "EPS2", 194, 0 },
+ { "EXCCAUSE", 232, 0 },
+ { "DEPC", 192, 0 },
+ { "EXCVADDR", 238, 0 },
+ { "WINDOWBASE", 72, 0 },
+ { "WINDOWSTART", 73, 0 },
+ { "SAR", 3, 0 },
+ { "LITBASE", 5, 0 },
+ { "PS", 230, 0 },
+ { "MISC0", 244, 0 },
+ { "MISC1", 245, 0 },
+ { "INTENABLE", 228, 0 },
+ { "ICOUNTLEVEL", 237, 0 },
+ { "DEBUGCAUSE", 233, 0 },
+ { "RASID", 90, 0 },
+ { "ITLBCFG", 91, 0 },
+ { "DTLBCFG", 92, 0 },
+ { "CPENABLE", 224, 0 },
+ { "SCOMPARE1", 12, 0 },
+ { "ATOMCTL", 99, 0 },
+ { "THREADPTR", 231, 1 },
+ { "AE_OVF_SAR", 240, 1 },
+ { "AE_BITHEAD", 241, 1 },
+ { "AE_TS_FTS_BU_BP", 242, 1 },
+ { "AE_SD_NO", 243, 1 }
+};
+
+#define NUM_SYSREGS 45
+#define MAX_SPECIAL_REG 245
+#define MAX_USER_REG 243
+
+
+/* Processor states. */
+
+static xtensa_state_internal states[] = {
+ { "LCOUNT", 32, 0 },
+ { "PC", 32, 0 },
+ { "ICOUNT", 32, 0 },
+ { "DDR", 32, 0 },
+ { "INTERRUPT", 12, 0 },
+ { "CCOUNT", 32, 0 },
+ { "XTSYNC", 1, 0 },
+ { "VECBASE", 22, 0 },
+ { "EPC1", 32, 0 },
+ { "EPC2", 32, 0 },
+ { "EXCSAVE1", 32, 0 },
+ { "EXCSAVE2", 32, 0 },
+ { "EPS2", 15, 0 },
+ { "EXCCAUSE", 6, 0 },
+ { "PSINTLEVEL", 4, 0 },
+ { "PSUM", 1, 0 },
+ { "PSWOE", 1, 0 },
+ { "PSRING", 2, 0 },
+ { "PSEXCM", 1, 0 },
+ { "DEPC", 32, 0 },
+ { "EXCVADDR", 32, 0 },
+ { "WindowBase", 3, 0 },
+ { "WindowStart", 8, 0 },
+ { "PSCALLINC", 2, 0 },
+ { "PSOWB", 4, 0 },
+ { "LBEG", 32, 0 },
+ { "LEND", 32, 0 },
+ { "SAR", 6, 0 },
+ { "THREADPTR", 32, 0 },
+ { "LITBADDR", 20, 0 },
+ { "LITBEN", 1, 0 },
+ { "MISC0", 32, 0 },
+ { "MISC1", 32, 0 },
+ { "InOCDMode", 1, 0 },
+ { "INTENABLE", 12, 0 },
+ { "ICOUNTLEVEL", 4, 0 },
+ { "DEBUGCAUSE", 6, 0 },
+ { "DBNUM", 4, 0 },
+ { "CCOMPARE0", 32, 0 },
+ { "CCOMPARE1", 32, 0 },
+ { "ASID3", 8, 0 },
+ { "ASID2", 8, 0 },
+ { "ASID1", 8, 0 },
+ { "INSTPGSZID4", 2, 0 },
+ { "DATAPGSZID4", 2, 0 },
+ { "PTBASE", 10, 0 },
+ { "CPENABLE", 2, 0 },
+ { "SCOMPARE1", 32, 0 },
+ { "ATOMCTL", 6, 0 },
+ { "CCON", 1, XTENSA_STATE_IS_EXPORTED },
+ { "MPSCORE", 16, XTENSA_STATE_IS_EXPORTED },
+ { "WMPINT_ADDR", 12, XTENSA_STATE_IS_EXPORTED },
+ { "WMPINT_DATA", 32, XTENSA_STATE_IS_EXPORTED },
+ { "WMPINT_TOGGLEEN", 1, XTENSA_STATE_IS_EXPORTED },
+ { "AE_OVERFLOW", 1, 0 },
+ { "AE_SAR", 6, 0 },
+ { "AE_BITHEAD", 32, 0 },
+ { "AE_BITPTR", 4, 0 },
+ { "AE_BITSUSED", 4, 0 },
+ { "AE_TABLESIZE", 4, 0 },
+ { "AE_FIRST_TS", 4, 0 },
+ { "AE_NEXTOFFSET", 27, 0 },
+ { "AE_SEARCHDONE", 1, 0 }
+};
+
+#define NUM_STATES 63
+
+enum xtensa_state_id {
+ STATE_LCOUNT,
+ STATE_PC,
+ STATE_ICOUNT,
+ STATE_DDR,
+ STATE_INTERRUPT,
+ STATE_CCOUNT,
+ STATE_XTSYNC,
+ STATE_VECBASE,
+ STATE_EPC1,
+ STATE_EPC2,
+ STATE_EXCSAVE1,
+ STATE_EXCSAVE2,
+ STATE_EPS2,
+ STATE_EXCCAUSE,
+ STATE_PSINTLEVEL,
+ STATE_PSUM,
+ STATE_PSWOE,
+ STATE_PSRING,
+ STATE_PSEXCM,
+ STATE_DEPC,
+ STATE_EXCVADDR,
+ STATE_WindowBase,
+ STATE_WindowStart,
+ STATE_PSCALLINC,
+ STATE_PSOWB,
+ STATE_LBEG,
+ STATE_LEND,
+ STATE_SAR,
+ STATE_THREADPTR,
+ STATE_LITBADDR,
+ STATE_LITBEN,
+ STATE_MISC0,
+ STATE_MISC1,
+ STATE_InOCDMode,
+ STATE_INTENABLE,
+ STATE_ICOUNTLEVEL,
+ STATE_DEBUGCAUSE,
+ STATE_DBNUM,
+ STATE_CCOMPARE0,
+ STATE_CCOMPARE1,
+ STATE_ASID3,
+ STATE_ASID2,
+ STATE_ASID1,
+ STATE_INSTPGSZID4,
+ STATE_DATAPGSZID4,
+ STATE_PTBASE,
+ STATE_CPENABLE,
+ STATE_SCOMPARE1,
+ STATE_ATOMCTL,
+ STATE_CCON,
+ STATE_MPSCORE,
+ STATE_WMPINT_ADDR,
+ STATE_WMPINT_DATA,
+ STATE_WMPINT_TOGGLEEN,
+ STATE_AE_OVERFLOW,
+ STATE_AE_SAR,
+ STATE_AE_BITHEAD,
+ STATE_AE_BITPTR,
+ STATE_AE_BITSUSED,
+ STATE_AE_TABLESIZE,
+ STATE_AE_FIRST_TS,
+ STATE_AE_NEXTOFFSET,
+ STATE_AE_SEARCHDONE
+};
+
+
+/* Field definitions. */
+
+static unsigned
+Field_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_s_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 8) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_op1_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op1_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_op0_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_n_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_n_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_m_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_m_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_sr_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_thi3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_thi3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_ae_r3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ae_r3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ae_r10_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r10_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ae_r32_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r32_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ae_s3_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ae_s3_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ae_s_non_samt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_s_non_samt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_op0_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_t_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_r_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_op0_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op0_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_z_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf61ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf61ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_op0_s3_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 9) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s3_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf330ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf330ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf81ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf81ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ae_r20_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_r20_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf73ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf73ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf35ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf35ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf34ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf34ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf32ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf32ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf33ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf33ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf96ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf96ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ae_s20_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_s20_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf94ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf94ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf347_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf347_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf24ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf24ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf23ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf23ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf125ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 18) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf125ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf350ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf350ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf80ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf80ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf88ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf88ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 23) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf340_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf340_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf87ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf87ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+ tie_t = (val << 22) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf342ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf342ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf86ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf86ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf84ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ tie_t = (tie_t << 4) | ((insn[0] << 25) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf84ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x78) | (tie_t << 3);
+ tie_t = (val << 21) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf76ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf76ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf75ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf75ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf60ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf60ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 21) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf64ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf64ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 20) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf63ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf63ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ae_r10_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r10_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf59ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf59ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 21) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf119ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf119ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 24) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 21) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf338_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf338_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf69ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf69ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 22) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf67ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 2) | ((insn[0] << 25) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf67ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x60) | (tie_t << 5);
+ tie_t = (val << 21) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf66ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf66ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 20) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf25ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf25ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf36ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf36ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf103ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf103ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf349ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 23) >> 26);
+ return tie_t;
+}
+
+static void
+Field_ftsf349ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x1f8) | (tie_t << 3);
+}
+
+static unsigned
+Field_ftsf99ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf99ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf27ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf27ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf28ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf28ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf21ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf21ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf22ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf22ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf29ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf29ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf97ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf97ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf100ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf100ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf101ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf101ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf348ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 24) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf348ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf8) | (tie_t << 3);
+}
+
+static unsigned
+Field_ftsf26ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf26ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf30ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf30ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf31ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf31ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf98ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf98ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf92ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 29) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf92ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6) | (tie_t << 1);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf208_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf208_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf91ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf91ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf90ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf90ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+ tie_t = (val << 25) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf126ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 18) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf126ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x2000) | (tie_t << 13);
+}
+
+static unsigned
+Field_ftsf344ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 19) >> 30);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf344ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 23) >> 30;
+ insn[0] = (insn[0] & ~0x1800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf112ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf112ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf122ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 5) | ((insn[0] << 25) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf122ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c) | (tie_t << 2);
+ tie_t = (val << 24) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf346ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf346ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf116ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 9) | ((insn[0] << 23) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf116ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x1ff) | (tie_t << 0);
+ tie_t = (val << 20) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf109ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf109ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf111ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf111ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf104ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf104ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf105ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf105ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf107ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf107ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf113ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf113ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf118ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 9) | ((insn[0] << 23) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf118ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0x1ff) | (tie_t << 0);
+ tie_t = (val << 20) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf120ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 6) | ((insn[0] << 25) >> 26);
+ return tie_t;
+}
+
+static void
+Field_ftsf120ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x7e) | (tie_t << 1);
+ tie_t = (val << 23) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf343ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf343ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf108ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf108ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf115ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf115ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf110ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf110ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf114ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 7) | ((insn[0] << 25) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf114ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f) | (tie_t << 0);
+ tie_t = (val << 22) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf37ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf37ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf78ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf78ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf79ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf79ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf77ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 16) >> 23);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf77ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 22) >> 23;
+ insn[0] = (insn[0] & ~0xff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf13_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf13_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf12_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf12_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf82ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 16) >> 25);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf82ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 24) >> 25;
+ insn[0] = (insn[0] & ~0xfe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf341ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf341ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf124ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf124ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 28) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf339ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf339ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf106ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 18) >> 29);
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf106ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+ tie_t = (val << 26) >> 29;
+ insn[0] = (insn[0] & ~0x3800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ae_r32_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 21) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r32_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x600) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf160ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf160ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf154ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf154ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf175ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf175ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf158ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf158ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf155ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf155ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf167ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf167ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf157ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf157ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf153ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf153ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf163ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf163ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf156ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf156ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf152ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf152ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf161ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf161ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf133ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf133ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf191ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf191ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf142ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf142ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf132ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf132ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf159ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf159ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf141ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf141ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf130ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf130ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf143ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf143ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf140ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf140ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf211ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf211ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf332ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 17) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf332ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x4000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf135ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf135ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf138ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf138ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf176ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf176ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf170ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf170ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf184ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf184ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf174ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf174ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf171ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf171ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf182ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf182ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf173ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf173ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf169ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf169ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf181ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf181ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf172ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf172ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf168ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf168ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf180ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf180ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf139ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf139ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf151ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf151ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf137ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf137ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf147ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf147ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf136ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf136ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf145ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf145ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf134ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf134ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf144ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf144ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf178ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf178ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf188ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf188ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf183ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf183ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf186ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf186ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf179ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf179ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf187ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf187ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf177ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf177ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf185ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf185ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf45ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf45ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf44ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf44ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf48ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf48ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf47ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf47ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf49ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf49ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf50ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf50ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf52ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf52ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf51ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf51ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf38ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf38ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf54ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf54ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf40ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf40ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf39ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf39ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf46ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf46ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf42ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf42ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf43ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf43ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf41ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf41ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf55ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf55ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf53ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf53ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf58ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf58ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf56ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf56ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf72ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf72ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf71ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf71ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 26) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf57ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 16) >> 27);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf57ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 23) >> 27;
+ insn[0] = (insn[0] & ~0xf800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf89ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf89ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf334ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf334ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_t_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf195ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf195ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf207ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf207ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf336ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 28) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf336ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe) | (tie_t << 1);
+}
+
+static unsigned
+Field_ftsf199ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf199ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf210ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf210ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf337ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf337ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf194ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf194ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf197ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf197ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf196ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf196ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf198ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf198ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf200ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf200ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf203ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf203ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf201ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf201ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf202ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf202ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf204ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf204ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf206ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf206ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf205ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf205ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf209ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf209ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf127ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf127ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf129ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf129ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf128ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf128ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf131ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf131ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf146ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf146ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf149ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf149ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf148ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf148ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf150ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf150ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf162ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf162ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf165ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf165ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf164ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf164ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf166ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf166ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf189ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf189ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf192ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf192ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf190ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf190ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_ftsf193ae_slot1_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf193ae_slot1_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+ tie_t = (val << 24) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_op0_s4_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 5) >> 25);
+ return tie_t;
+}
+
+static void
+Field_op0_s4_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0x7f00000) | (tie_t << 20);
+}
+
+static unsigned
+Field_imm8_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_t_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf293_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf293_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_ftsf321_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf321_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_ae_s20_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_s20_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf214ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf214ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf213ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 12) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf213ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0000) | (tie_t << 17);
+}
+
+static unsigned
+Field_ftsf212ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 12) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf212ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0000) | (tie_t << 18);
+}
+
+static unsigned
+Field_ftsf281ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf281ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf217_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf217_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ae_r20_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_r20_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf300ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ return tie_t;
+}
+
+static void
+Field_ftsf300ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf283ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 6) | ((insn[0] << 26) >> 26);
+ return tie_t;
+}
+
+static void
+Field_ftsf283ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f) | (tie_t << 0);
+ tie_t = (val << 25) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 17) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf352ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf352ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf282ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_ftsf282ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+ tie_t = (val << 16) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf288ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 3) | ((insn[0] << 26) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf288ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x38) | (tie_t << 3);
+ tie_t = (val << 21) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf359ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf359ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+ tie_t = (val << 27) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf286ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 26) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf286ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0x3c) | (tie_t << 2);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf356ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf356ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf284ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 5) | ((insn[0] << 26) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf284ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x3e) | (tie_t << 1);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf354ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf354ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf295ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf295ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf358ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf358ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf325ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf325ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf215ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 7) | ((insn[0] << 12) >> 25);
+ return tie_t;
+}
+
+static void
+Field_ftsf215ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 25) >> 25;
+ insn[0] = (insn[0] & ~0xfe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_ftsf301ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 13) | ((insn[0] << 12) >> 19);
+ return tie_t;
+}
+
+static void
+Field_ftsf301ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 19) >> 19;
+ insn[0] = (insn[0] & ~0xfff80) | (tie_t << 7);
+}
+
+static unsigned
+Field_ftsf353_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf353_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf309ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 9) | ((insn[0] << 12) >> 23);
+ return tie_t;
+}
+
+static void
+Field_ftsf309ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 23) >> 23;
+ insn[0] = (insn[0] & ~0xff800) | (tie_t << 11);
+}
+
+static unsigned
+Field_ftsf360ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 21) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf360ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x7c0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf294ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf294ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+ tie_t = (val << 21) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_s_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf292ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf292ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+ tie_t = (val << 21) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf319_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 28) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf319_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe) | (tie_t << 1);
+}
+
+static unsigned
+Field_ftsf361ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf361ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf218ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf218ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf220ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf220ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf221ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf221ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf222ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf222ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf228ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf228ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf229ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf229ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf230ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf230ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf232ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf232ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf233ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf233ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf235ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf235ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf239ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf239ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf234ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf234ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf224ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf224ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf225ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf225ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf227ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf227ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf226ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf226ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf241ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf241ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf243ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf243ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf242ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf242ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf244ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf244ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf236ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf236ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf237ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf237ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf238ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf238ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf240ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf240ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf261ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf261ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf296ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf296ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf248ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf248ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf250ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf250ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf269ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf269ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf264ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf264ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf266ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf266ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf267ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf267ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf260ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf260ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf262ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf262ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf263ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf263ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf265ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf265ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf246ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf246ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf247ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf247ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf249ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf249ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf253ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf253ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf257ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf257ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf256ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf256ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf258ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf258ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf259ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf259ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf251ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf251ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf252ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf252ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf254ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf254ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf255ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf255ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf275ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf275ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf277ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf277ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf278ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf278ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf290ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 26) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf290ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x20) | (tie_t << 5);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_s8_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf272ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf272ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf276ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf276ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf273ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf273ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf274ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf274ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf297ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf297ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf298ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf298ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf310ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf310ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf311ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf311ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf270ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf270ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf271ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf271ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ae_r32_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r32_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf329ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 12) >> 27);
+ return tie_t;
+}
+
+static void
+Field_ftsf329ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0xf8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_ftsf362ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf362ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+ tie_t = (val << 27) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf245ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf245ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf268ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf268ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf313ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf313ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf312ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf312ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf231ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf231ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf223ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf223ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf219ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf219ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf216ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf216ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf302ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf302ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+ tie_t = (val << 17) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf364ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf364ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf322ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf322ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf279ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 6) | ((insn[0] << 26) >> 26);
+ return tie_t;
+}
+
+static void
+Field_ftsf279ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f) | (tie_t << 0);
+ tie_t = (val << 18) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf318ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 3) | ((insn[0] << 28) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf318ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe) | (tie_t << 1);
+ tie_t = (val << 28) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf365ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf365ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_ftsf316ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf316ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf314ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf314ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf315ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf315ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf320ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf320ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf299ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 10) | ((insn[0] << 12) >> 22);
+ return tie_t;
+}
+
+static void
+Field_ftsf299ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 22) >> 22;
+ insn[0] = (insn[0] & ~0xffc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_ftsf308ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 11) | ((insn[0] << 12) >> 21);
+ return tie_t;
+}
+
+static void
+Field_ftsf308ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 21) >> 21;
+ insn[0] = (insn[0] & ~0xffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_ftsf366ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 23) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf366ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf306ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 1) | ((insn[0] << 29) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf306ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x4) | (tie_t << 2);
+ tie_t = (val << 19) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf368ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf368ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+ tie_t = (val << 29) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_ftsf304ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 2) | ((insn[0] << 29) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf304ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x6) | (tie_t << 1);
+ tie_t = (val << 18) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf369ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ tie_t = (tie_t << 1) | ((insn[0] << 31) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf369ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1) | (tie_t << 0);
+ tie_t = (val << 30) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+}
+
+static unsigned
+Field_ftsf323ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf323ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf328ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf328ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 23) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf326ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 2) | ((insn[0] << 28) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf326ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc) | (tie_t << 2);
+ tie_t = (val << 22) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf357_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 30) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf357_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x3) | (tie_t << 0);
+}
+
+static unsigned
+Field_ftsf303ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 12) >> 20);
+ tie_t = (tie_t << 3) | ((insn[0] << 29) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf303ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7) | (tie_t << 0);
+ tie_t = (val << 17) >> 20;
+ insn[0] = (insn[0] & ~0xfff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_ftsf324ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf324ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 20) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ftsf317ae_slot0_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 12) >> 24);
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ftsf317ae_slot0_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+ tie_t = (val << 19) >> 24;
+ insn[0] = (insn[0] & ~0xff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_t_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_t_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+}
+
+static unsigned
+Field_bbi4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ return tie_t;
+}
+
+static void
+Field_bbi4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_bbi_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 27) >> 27);
+ return tie_t;
+}
+
+static void
+Field_bbi_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f) | (tie_t << 0);
+}
+
+static unsigned
+Field_imm12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 8) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm12_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 24) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff) | (tie_t << 0);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_s_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_s_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 8) | ((insn[0] << 8) >> 24);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 24) >> 24;
+ insn[0] = (insn[0] & ~0xff0000) | (tie_t << 16);
+ tie_t = (val << 20) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm12b_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 12) | ((insn[0] << 16) >> 20);
+ return tie_t;
+}
+
+static void
+Field_imm12b_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 20) >> 20;
+ insn[0] = (insn[0] & ~0xfff0) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm16_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 8) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm16_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 16) | ((insn[0] << 12) >> 16);
+ return tie_t;
+}
+
+static void
+Field_imm16_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 16) >> 16;
+ insn[0] = (insn[0] & ~0xffff0) | (tie_t << 4);
+}
+
+static unsigned
+Field_offset_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_offset_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 14) >> 14);
+ return tie_t;
+}
+
+static void
+Field_offset_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0x3ffff) | (tie_t << 0);
+}
+
+static unsigned
+Field_op2_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_op2_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_r_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_r_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sa4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sa4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sae4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sae4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 15) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10000) | (tie_t << 16);
+}
+
+static unsigned
+Field_sae_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 15) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sae_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sal_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sal_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 19) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sal_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x1000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sargt_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 11) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x100000) | (tie_t << 20);
+}
+
+static unsigned
+Field_sargt_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 19) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sargt_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f00) | (tie_t << 8);
+}
+
+static unsigned
+Field_sas4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ return tie_t;
+}
+
+static void
+Field_sas4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 27) >> 31);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 27) >> 31;
+ insn[0] = (insn[0] & ~0x10) | (tie_t << 4);
+}
+
+static unsigned
+Field_sas_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 5) | ((insn[0] << 27) >> 27);
+ return tie_t;
+}
+
+static void
+Field_sas_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 27) >> 27;
+ insn[0] = (insn[0] & ~0x1f) | (tie_t << 0);
+}
+
+static unsigned
+Field_sr_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_sr_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ return tie_t;
+}
+
+static void
+Field_sr_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_st_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_st_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 20) >> 28);
+ tie_t = (tie_t << 4) | ((insn[0] << 24) >> 28);
+ return tie_t;
+}
+
+static void
+Field_st_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0) | (tie_t << 4);
+ tie_t = (val << 24) >> 28;
+ insn[0] = (insn[0] & ~0xf00) | (tie_t << 8);
+}
+
+static unsigned
+Field_imm4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_mn_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_mn_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+ tie_t = (val << 28) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_i_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_i_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_imm6hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7lo_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7lo_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7hi_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_imm7hi_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_z_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 25) >> 31);
+ return tie_t;
+}
+
+static void
+Field_z_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x40) | (tie_t << 6);
+}
+
+static unsigned
+Field_imm6_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm6_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm6_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_imm7_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ tie_t = (tie_t << 4) | ((insn[0] << 16) >> 28);
+ return tie_t;
+}
+
+static void
+Field_imm7_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf000) | (tie_t << 12);
+ tie_t = (val << 25) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_t2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 24) >> 29);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe0) | (tie_t << 5);
+}
+
+static unsigned
+Field_t2_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 23) >> 30);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t2_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 29) >> 30;
+ insn[0] = (insn[0] & ~0x180) | (tie_t << 7);
+}
+
+static unsigned
+Field_s2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_s2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 20) >> 29);
+ return tie_t;
+}
+
+static void
+Field_s2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_r2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_r2_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_r2_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 16) >> 29);
+ return tie_t;
+}
+
+static void
+Field_r2_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0xe000) | (tie_t << 13);
+}
+
+static unsigned
+Field_t4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_t4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 24) >> 30);
+ return tie_t;
+}
+
+static void
+Field_t4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_s4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 20) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc00) | (tie_t << 10);
+}
+
+static unsigned
+Field_s4_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_s4_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_r4_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r4_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_r4_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 16) >> 30);
+ return tie_t;
+}
+
+static void
+Field_r4_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0xc000) | (tie_t << 14);
+}
+
+static unsigned
+Field_t8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_t8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_t8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 24) >> 31);
+ return tie_t;
+}
+
+static void
+Field_t8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x80) | (tie_t << 7);
+}
+
+static unsigned
+Field_s8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_s8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 20) >> 31);
+ return tie_t;
+}
+
+static void
+Field_s8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x800) | (tie_t << 11);
+}
+
+static unsigned
+Field_r8_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_r8_Slot_inst16a_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst16a_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_r8_Slot_inst16b_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 1) | ((insn[0] << 16) >> 31);
+ return tie_t;
+}
+
+static void
+Field_r8_Slot_inst16b_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8000) | (tie_t << 15);
+}
+
+static unsigned
+Field_xt_wbr15_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 15) | ((insn[0] << 8) >> 17);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr15_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 17) >> 17;
+ insn[0] = (insn[0] & ~0xfffe00) | (tie_t << 9);
+}
+
+static unsigned
+Field_xt_wbr18_imm_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 18) | ((insn[0] << 8) >> 14);
+ return tie_t;
+}
+
+static void
+Field_xt_wbr18_imm_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 14) >> 14;
+ insn[0] = (insn[0] & ~0xffffc0) | (tie_t << 6);
+}
+
+static unsigned
+Field_ae_samt_s_t_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 6) | ((insn[0] << 22) >> 26);
+ return tie_t;
+}
+
+static void
+Field_ae_samt_s_t_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 26) >> 26;
+ insn[0] = (insn[0] & ~0x3f0) | (tie_t << 4);
+}
+
+static unsigned
+Field_ae_samt_s_t_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 22) >> 30);
+ tie_t = (tie_t << 4) | ((insn[0] << 28) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ae_samt_s_t_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf) | (tie_t << 0);
+ tie_t = (val << 26) >> 30;
+ insn[0] = (insn[0] & ~0x300) | (tie_t << 8);
+}
+
+static unsigned
+Field_ae_r20_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 17) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_r20_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x7000) | (tie_t << 12);
+}
+
+static unsigned
+Field_ae_r10_Slot_ae_slot0_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ae_r10_Slot_ae_slot0_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_ae_s20_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 21) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ae_s20_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x700) | (tie_t << 8);
+}
+
+static unsigned
+Field_ae_fld_ohba_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ae_fld_ohba_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ae_fld_ohba2_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 12) >> 28);
+ return tie_t;
+}
+
+static void
+Field_ae_fld_ohba2_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 28) >> 28;
+ insn[0] = (insn[0] & ~0xf0000) | (tie_t << 16);
+}
+
+static unsigned
+Field_ftsf12_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 3) | ((insn[0] << 25) >> 29);
+ return tie_t;
+}
+
+static void
+Field_ftsf12_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 29) >> 29;
+ insn[0] = (insn[0] & ~0x70) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf13_Slot_inst_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 2) | ((insn[0] << 26) >> 30);
+ return tie_t;
+}
+
+static void
+Field_ftsf13_Slot_inst_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 30) >> 30;
+ insn[0] = (insn[0] & ~0x30) | (tie_t << 4);
+}
+
+static unsigned
+Field_ftsf14_Slot_ae_slot1_get (const xtensa_insnbuf insn)
+{
+ unsigned tie_t = 0;
+ tie_t = (tie_t << 4) | ((insn[0] << 21) >> 28);
+ tie_t = (tie_t << 1) | ((insn[0] << 28) >> 31);
+ return tie_t;
+}
+
+static void
+Field_ftsf14_Slot_ae_slot1_set (xtensa_insnbuf insn, uint32 val)
+{
+ uint32 tie_t;
+ tie_t = (val << 31) >> 31;
+ insn[0] = (insn[0] & ~0x8) | (tie_t << 3);
+ tie_t = (val << 27) >> 28;
+ insn[0] = (insn[0] & ~0x780) | (tie_t << 7);
+}
+
+static void
+Implicit_Field_set (xtensa_insnbuf insn ATTRIBUTE_UNUSED,
+ uint32 val ATTRIBUTE_UNUSED)
+{
+ /* Do nothing. */
+}
+
+static unsigned
+Implicit_Field_ar0_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_ar4_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 4;
+}
+
+static unsigned
+Implicit_Field_ar8_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 8;
+}
+
+static unsigned
+Implicit_Field_ar12_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 12;
+}
+
+static unsigned
+Implicit_Field_bt16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_bs16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_br16_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static unsigned
+Implicit_Field_brall_get (const xtensa_insnbuf insn ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+enum xtensa_field_id {
+ FIELD_t,
+ FIELD_bbi4,
+ FIELD_bbi,
+ FIELD_imm12,
+ FIELD_imm8,
+ FIELD_s,
+ FIELD_imm12b,
+ FIELD_imm16,
+ FIELD_m,
+ FIELD_n,
+ FIELD_offset,
+ FIELD_op0,
+ FIELD_op1,
+ FIELD_op2,
+ FIELD_r,
+ FIELD_sa4,
+ FIELD_sae4,
+ FIELD_sae,
+ FIELD_sal,
+ FIELD_sargt,
+ FIELD_sas4,
+ FIELD_sas,
+ FIELD_sr,
+ FIELD_st,
+ FIELD_thi3,
+ FIELD_imm4,
+ FIELD_mn,
+ FIELD_i,
+ FIELD_imm6lo,
+ FIELD_imm6hi,
+ FIELD_imm7lo,
+ FIELD_imm7hi,
+ FIELD_z,
+ FIELD_imm6,
+ FIELD_imm7,
+ FIELD_t2,
+ FIELD_s2,
+ FIELD_r2,
+ FIELD_t4,
+ FIELD_s4,
+ FIELD_r4,
+ FIELD_t8,
+ FIELD_s8,
+ FIELD_r8,
+ FIELD_xt_wbr15_imm,
+ FIELD_xt_wbr18_imm,
+ FIELD_ae_r3,
+ FIELD_ae_s_non_samt,
+ FIELD_ae_s3,
+ FIELD_ae_r32,
+ FIELD_ae_samt_s_t,
+ FIELD_ae_r20,
+ FIELD_ae_r10,
+ FIELD_ae_s20,
+ FIELD_ae_fld_ohba,
+ FIELD_ae_fld_ohba2,
+ FIELD_op0_s3,
+ FIELD_ftsf12,
+ FIELD_ftsf13,
+ FIELD_ftsf14,
+ FIELD_ftsf21ae_slot1,
+ FIELD_ftsf22ae_slot1,
+ FIELD_ftsf23ae_slot1,
+ FIELD_ftsf24ae_slot1,
+ FIELD_ftsf25ae_slot1,
+ FIELD_ftsf26ae_slot1,
+ FIELD_ftsf27ae_slot1,
+ FIELD_ftsf28ae_slot1,
+ FIELD_ftsf29ae_slot1,
+ FIELD_ftsf30ae_slot1,
+ FIELD_ftsf31ae_slot1,
+ FIELD_ftsf32ae_slot1,
+ FIELD_ftsf33ae_slot1,
+ FIELD_ftsf34ae_slot1,
+ FIELD_ftsf35ae_slot1,
+ FIELD_ftsf36ae_slot1,
+ FIELD_ftsf37ae_slot1,
+ FIELD_ftsf38ae_slot1,
+ FIELD_ftsf39ae_slot1,
+ FIELD_ftsf40ae_slot1,
+ FIELD_ftsf41ae_slot1,
+ FIELD_ftsf42ae_slot1,
+ FIELD_ftsf43ae_slot1,
+ FIELD_ftsf44ae_slot1,
+ FIELD_ftsf45ae_slot1,
+ FIELD_ftsf46ae_slot1,
+ FIELD_ftsf47ae_slot1,
+ FIELD_ftsf48ae_slot1,
+ FIELD_ftsf49ae_slot1,
+ FIELD_ftsf50ae_slot1,
+ FIELD_ftsf51ae_slot1,
+ FIELD_ftsf52ae_slot1,
+ FIELD_ftsf53ae_slot1,
+ FIELD_ftsf54ae_slot1,
+ FIELD_ftsf55ae_slot1,
+ FIELD_ftsf56ae_slot1,
+ FIELD_ftsf57ae_slot1,
+ FIELD_ftsf58ae_slot1,
+ FIELD_ftsf59ae_slot1,
+ FIELD_ftsf60ae_slot1,
+ FIELD_ftsf61ae_slot1,
+ FIELD_ftsf63ae_slot1,
+ FIELD_ftsf64ae_slot1,
+ FIELD_ftsf66ae_slot1,
+ FIELD_ftsf67ae_slot1,
+ FIELD_ftsf69ae_slot1,
+ FIELD_ftsf71ae_slot1,
+ FIELD_ftsf72ae_slot1,
+ FIELD_ftsf73ae_slot1,
+ FIELD_ftsf75ae_slot1,
+ FIELD_ftsf76ae_slot1,
+ FIELD_ftsf77ae_slot1,
+ FIELD_ftsf78ae_slot1,
+ FIELD_ftsf79ae_slot1,
+ FIELD_ftsf80ae_slot1,
+ FIELD_ftsf81ae_slot1,
+ FIELD_ftsf82ae_slot1,
+ FIELD_ftsf84ae_slot1,
+ FIELD_ftsf86ae_slot1,
+ FIELD_ftsf87ae_slot1,
+ FIELD_ftsf88ae_slot1,
+ FIELD_ftsf89ae_slot1,
+ FIELD_ftsf90ae_slot1,
+ FIELD_ftsf91ae_slot1,
+ FIELD_ftsf92ae_slot1,
+ FIELD_ftsf94ae_slot1,
+ FIELD_ftsf96ae_slot1,
+ FIELD_ftsf97ae_slot1,
+ FIELD_ftsf98ae_slot1,
+ FIELD_ftsf99ae_slot1,
+ FIELD_ftsf100ae_slot1,
+ FIELD_ftsf101ae_slot1,
+ FIELD_ftsf103ae_slot1,
+ FIELD_ftsf104ae_slot1,
+ FIELD_ftsf105ae_slot1,
+ FIELD_ftsf106ae_slot1,
+ FIELD_ftsf107ae_slot1,
+ FIELD_ftsf108ae_slot1,
+ FIELD_ftsf109ae_slot1,
+ FIELD_ftsf110ae_slot1,
+ FIELD_ftsf111ae_slot1,
+ FIELD_ftsf112ae_slot1,
+ FIELD_ftsf113ae_slot1,
+ FIELD_ftsf114ae_slot1,
+ FIELD_ftsf115ae_slot1,
+ FIELD_ftsf116ae_slot1,
+ FIELD_ftsf118ae_slot1,
+ FIELD_ftsf119ae_slot1,
+ FIELD_ftsf120ae_slot1,
+ FIELD_ftsf122ae_slot1,
+ FIELD_ftsf124ae_slot1,
+ FIELD_ftsf125ae_slot1,
+ FIELD_ftsf126ae_slot1,
+ FIELD_ftsf127ae_slot1,
+ FIELD_ftsf128ae_slot1,
+ FIELD_ftsf129ae_slot1,
+ FIELD_ftsf130ae_slot1,
+ FIELD_ftsf131ae_slot1,
+ FIELD_ftsf132ae_slot1,
+ FIELD_ftsf133ae_slot1,
+ FIELD_ftsf134ae_slot1,
+ FIELD_ftsf135ae_slot1,
+ FIELD_ftsf136ae_slot1,
+ FIELD_ftsf137ae_slot1,
+ FIELD_ftsf138ae_slot1,
+ FIELD_ftsf139ae_slot1,
+ FIELD_ftsf140ae_slot1,
+ FIELD_ftsf141ae_slot1,
+ FIELD_ftsf142ae_slot1,
+ FIELD_ftsf143ae_slot1,
+ FIELD_ftsf144ae_slot1,
+ FIELD_ftsf145ae_slot1,
+ FIELD_ftsf146ae_slot1,
+ FIELD_ftsf147ae_slot1,
+ FIELD_ftsf148ae_slot1,
+ FIELD_ftsf149ae_slot1,
+ FIELD_ftsf150ae_slot1,
+ FIELD_ftsf151ae_slot1,
+ FIELD_ftsf152ae_slot1,
+ FIELD_ftsf153ae_slot1,
+ FIELD_ftsf154ae_slot1,
+ FIELD_ftsf155ae_slot1,
+ FIELD_ftsf156ae_slot1,
+ FIELD_ftsf157ae_slot1,
+ FIELD_ftsf158ae_slot1,
+ FIELD_ftsf159ae_slot1,
+ FIELD_ftsf160ae_slot1,
+ FIELD_ftsf161ae_slot1,
+ FIELD_ftsf162ae_slot1,
+ FIELD_ftsf163ae_slot1,
+ FIELD_ftsf164ae_slot1,
+ FIELD_ftsf165ae_slot1,
+ FIELD_ftsf166ae_slot1,
+ FIELD_ftsf167ae_slot1,
+ FIELD_ftsf168ae_slot1,
+ FIELD_ftsf169ae_slot1,
+ FIELD_ftsf170ae_slot1,
+ FIELD_ftsf171ae_slot1,
+ FIELD_ftsf172ae_slot1,
+ FIELD_ftsf173ae_slot1,
+ FIELD_ftsf174ae_slot1,
+ FIELD_ftsf175ae_slot1,
+ FIELD_ftsf176ae_slot1,
+ FIELD_ftsf177ae_slot1,
+ FIELD_ftsf178ae_slot1,
+ FIELD_ftsf179ae_slot1,
+ FIELD_ftsf180ae_slot1,
+ FIELD_ftsf181ae_slot1,
+ FIELD_ftsf182ae_slot1,
+ FIELD_ftsf183ae_slot1,
+ FIELD_ftsf184ae_slot1,
+ FIELD_ftsf185ae_slot1,
+ FIELD_ftsf186ae_slot1,
+ FIELD_ftsf187ae_slot1,
+ FIELD_ftsf188ae_slot1,
+ FIELD_ftsf189ae_slot1,
+ FIELD_ftsf190ae_slot1,
+ FIELD_ftsf191ae_slot1,
+ FIELD_ftsf192ae_slot1,
+ FIELD_ftsf193ae_slot1,
+ FIELD_ftsf194ae_slot1,
+ FIELD_ftsf195ae_slot1,
+ FIELD_ftsf196ae_slot1,
+ FIELD_ftsf197ae_slot1,
+ FIELD_ftsf198ae_slot1,
+ FIELD_ftsf199ae_slot1,
+ FIELD_ftsf200ae_slot1,
+ FIELD_ftsf201ae_slot1,
+ FIELD_ftsf202ae_slot1,
+ FIELD_ftsf203ae_slot1,
+ FIELD_ftsf204ae_slot1,
+ FIELD_ftsf205ae_slot1,
+ FIELD_ftsf206ae_slot1,
+ FIELD_ftsf207ae_slot1,
+ FIELD_ftsf208,
+ FIELD_ftsf209ae_slot1,
+ FIELD_ftsf210ae_slot1,
+ FIELD_ftsf211ae_slot1,
+ FIELD_ftsf330ae_slot1,
+ FIELD_ftsf332ae_slot1,
+ FIELD_ftsf334ae_slot1,
+ FIELD_ftsf336ae_slot1,
+ FIELD_ftsf337ae_slot1,
+ FIELD_ftsf338,
+ FIELD_ftsf339ae_slot1,
+ FIELD_ftsf340,
+ FIELD_ftsf341ae_slot1,
+ FIELD_ftsf342ae_slot1,
+ FIELD_ftsf343ae_slot1,
+ FIELD_ftsf344ae_slot1,
+ FIELD_ftsf346ae_slot1,
+ FIELD_ftsf347,
+ FIELD_ftsf348ae_slot1,
+ FIELD_ftsf349ae_slot1,
+ FIELD_ftsf350ae_slot1,
+ FIELD_op0_s4,
+ FIELD_ftsf212ae_slot0,
+ FIELD_ftsf213ae_slot0,
+ FIELD_ftsf214ae_slot0,
+ FIELD_ftsf215ae_slot0,
+ FIELD_ftsf216ae_slot0,
+ FIELD_ftsf217,
+ FIELD_ftsf218ae_slot0,
+ FIELD_ftsf219ae_slot0,
+ FIELD_ftsf220ae_slot0,
+ FIELD_ftsf221ae_slot0,
+ FIELD_ftsf222ae_slot0,
+ FIELD_ftsf223ae_slot0,
+ FIELD_ftsf224ae_slot0,
+ FIELD_ftsf225ae_slot0,
+ FIELD_ftsf226ae_slot0,
+ FIELD_ftsf227ae_slot0,
+ FIELD_ftsf228ae_slot0,
+ FIELD_ftsf229ae_slot0,
+ FIELD_ftsf230ae_slot0,
+ FIELD_ftsf231ae_slot0,
+ FIELD_ftsf232ae_slot0,
+ FIELD_ftsf233ae_slot0,
+ FIELD_ftsf234ae_slot0,
+ FIELD_ftsf235ae_slot0,
+ FIELD_ftsf236ae_slot0,
+ FIELD_ftsf237ae_slot0,
+ FIELD_ftsf238ae_slot0,
+ FIELD_ftsf239ae_slot0,
+ FIELD_ftsf240ae_slot0,
+ FIELD_ftsf241ae_slot0,
+ FIELD_ftsf242ae_slot0,
+ FIELD_ftsf243ae_slot0,
+ FIELD_ftsf244ae_slot0,
+ FIELD_ftsf245ae_slot0,
+ FIELD_ftsf246ae_slot0,
+ FIELD_ftsf247ae_slot0,
+ FIELD_ftsf248ae_slot0,
+ FIELD_ftsf249ae_slot0,
+ FIELD_ftsf250ae_slot0,
+ FIELD_ftsf251ae_slot0,
+ FIELD_ftsf252ae_slot0,
+ FIELD_ftsf253ae_slot0,
+ FIELD_ftsf254ae_slot0,
+ FIELD_ftsf255ae_slot0,
+ FIELD_ftsf256ae_slot0,
+ FIELD_ftsf257ae_slot0,
+ FIELD_ftsf258ae_slot0,
+ FIELD_ftsf259ae_slot0,
+ FIELD_ftsf260ae_slot0,
+ FIELD_ftsf261ae_slot0,
+ FIELD_ftsf262ae_slot0,
+ FIELD_ftsf263ae_slot0,
+ FIELD_ftsf264ae_slot0,
+ FIELD_ftsf265ae_slot0,
+ FIELD_ftsf266ae_slot0,
+ FIELD_ftsf267ae_slot0,
+ FIELD_ftsf268ae_slot0,
+ FIELD_ftsf269ae_slot0,
+ FIELD_ftsf270ae_slot0,
+ FIELD_ftsf271ae_slot0,
+ FIELD_ftsf272ae_slot0,
+ FIELD_ftsf273ae_slot0,
+ FIELD_ftsf274ae_slot0,
+ FIELD_ftsf275ae_slot0,
+ FIELD_ftsf276ae_slot0,
+ FIELD_ftsf277ae_slot0,
+ FIELD_ftsf278ae_slot0,
+ FIELD_ftsf279ae_slot0,
+ FIELD_ftsf281ae_slot0,
+ FIELD_ftsf282ae_slot0,
+ FIELD_ftsf283ae_slot0,
+ FIELD_ftsf284ae_slot0,
+ FIELD_ftsf286ae_slot0,
+ FIELD_ftsf288ae_slot0,
+ FIELD_ftsf290ae_slot0,
+ FIELD_ftsf292ae_slot0,
+ FIELD_ftsf293,
+ FIELD_ftsf294ae_slot0,
+ FIELD_ftsf295ae_slot0,
+ FIELD_ftsf296ae_slot0,
+ FIELD_ftsf297ae_slot0,
+ FIELD_ftsf298ae_slot0,
+ FIELD_ftsf299ae_slot0,
+ FIELD_ftsf300ae_slot0,
+ FIELD_ftsf301ae_slot0,
+ FIELD_ftsf302ae_slot0,
+ FIELD_ftsf303ae_slot0,
+ FIELD_ftsf304ae_slot0,
+ FIELD_ftsf306ae_slot0,
+ FIELD_ftsf308ae_slot0,
+ FIELD_ftsf309ae_slot0,
+ FIELD_ftsf310ae_slot0,
+ FIELD_ftsf311ae_slot0,
+ FIELD_ftsf312ae_slot0,
+ FIELD_ftsf313ae_slot0,
+ FIELD_ftsf314ae_slot0,
+ FIELD_ftsf315ae_slot0,
+ FIELD_ftsf316ae_slot0,
+ FIELD_ftsf317ae_slot0,
+ FIELD_ftsf318ae_slot0,
+ FIELD_ftsf319,
+ FIELD_ftsf320ae_slot0,
+ FIELD_ftsf321,
+ FIELD_ftsf322ae_slot0,
+ FIELD_ftsf323ae_slot0,
+ FIELD_ftsf324ae_slot0,
+ FIELD_ftsf325ae_slot0,
+ FIELD_ftsf326ae_slot0,
+ FIELD_ftsf328ae_slot0,
+ FIELD_ftsf329ae_slot0,
+ FIELD_ftsf352ae_slot0,
+ FIELD_ftsf353,
+ FIELD_ftsf354ae_slot0,
+ FIELD_ftsf356ae_slot0,
+ FIELD_ftsf357,
+ FIELD_ftsf358ae_slot0,
+ FIELD_ftsf359ae_slot0,
+ FIELD_ftsf360ae_slot0,
+ FIELD_ftsf361ae_slot0,
+ FIELD_ftsf362ae_slot0,
+ FIELD_ftsf364ae_slot0,
+ FIELD_ftsf365ae_slot0,
+ FIELD_ftsf366ae_slot0,
+ FIELD_ftsf368ae_slot0,
+ FIELD_ftsf369ae_slot0,
+ FIELD__ar0,
+ FIELD__ar4,
+ FIELD__ar8,
+ FIELD__ar12,
+ FIELD__bt16,
+ FIELD__bs16,
+ FIELD__br16,
+ FIELD__brall
+};
+
+
+/* Functional units. */
+
+static xtensa_funcUnit_internal funcUnits[] = {
+ { "ae_add32", 1 },
+ { "ae_shift32x4", 1 },
+ { "ae_shift32x5", 1 },
+ { "ae_subshift", 1 }
+};
+
+enum xtensa_funcUnit_id {
+ FUNCUNIT_ae_add32,
+ FUNCUNIT_ae_shift32x4,
+ FUNCUNIT_ae_shift32x5,
+ FUNCUNIT_ae_subshift
+};
+
+
+/* Register files. */
+
+enum xtensa_regfile_id {
+ REGFILE_AR,
+ REGFILE_BR,
+ REGFILE_AE_PR,
+ REGFILE_AE_QR,
+ REGFILE_BR2,
+ REGFILE_BR4,
+ REGFILE_BR8,
+ REGFILE_BR16
+};
+
+static xtensa_regfile_internal regfiles[] = {
+ { "AR", "a", REGFILE_AR, 32, 32 },
+ { "BR", "b", REGFILE_BR, 1, 16 },
+ { "AE_PR", "aep", REGFILE_AE_PR, 48, 8 },
+ { "AE_QR", "aeq", REGFILE_AE_QR, 56, 4 },
+ { "BR2", "b", REGFILE_BR, 2, 8 },
+ { "BR4", "b", REGFILE_BR, 4, 4 },
+ { "BR8", "b", REGFILE_BR, 8, 2 },
+ { "BR16", "b", REGFILE_BR, 16, 1 }
+};
+
+
+/* Interfaces. */
+
+static xtensa_interface_internal interfaces[] = {
+ { "RMPINT_Out", 12, 0, 0, 'o' },
+ { "RMPINT_In", 32, 0, 1, 'i' }
+};
+
+enum xtensa_interface_id {
+ INTERFACE_RMPINT_Out,
+ INTERFACE_RMPINT_In
+};
+
+
+/* Constant tables. */
+
+/* constant table ai4c */
+static const unsigned CONST_TBL_ai4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0x9,
+ 0xa,
+ 0xb,
+ 0xc,
+ 0xd,
+ 0xe,
+ 0xf,
+ 0
+};
+
+/* constant table b4c */
+static const unsigned CONST_TBL_b4c_0[] = {
+ 0xffffffff,
+ 0x1,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+/* constant table b4cu */
+static const unsigned CONST_TBL_b4cu_0[] = {
+ 0x8000,
+ 0x10000,
+ 0x2,
+ 0x3,
+ 0x4,
+ 0x5,
+ 0x6,
+ 0x7,
+ 0x8,
+ 0xa,
+ 0xc,
+ 0x10,
+ 0x20,
+ 0x40,
+ 0x80,
+ 0x100,
+ 0
+};
+
+
+/* Instruction operands. */
+
+static int
+OperandSem_opnd_sem_soffsetx4_decode (uint32 *valp)
+{
+ unsigned soffsetx4_out_0;
+ unsigned soffsetx4_in_0;
+ soffsetx4_in_0 = *valp & 0x3ffff;
+ soffsetx4_out_0 = 0x4 + ((((int) soffsetx4_in_0 << 14) >> 14) << 2);
+ *valp = soffsetx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffsetx4_encode (uint32 *valp)
+{
+ unsigned soffsetx4_in_0;
+ unsigned soffsetx4_out_0;
+ soffsetx4_out_0 = *valp;
+ soffsetx4_in_0 = ((soffsetx4_out_0 - 0x4) >> 2) & 0x3ffff;
+ *valp = soffsetx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_decode (uint32 *valp)
+{
+ unsigned uimm12x8_out_0;
+ unsigned uimm12x8_in_0;
+ uimm12x8_in_0 = *valp & 0xfff;
+ uimm12x8_out_0 = uimm12x8_in_0 << 3;
+ *valp = uimm12x8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm12x8_encode (uint32 *valp)
+{
+ unsigned uimm12x8_in_0;
+ unsigned uimm12x8_out_0;
+ uimm12x8_out_0 = *valp;
+ uimm12x8_in_0 = ((uimm12x8_out_0 >> 3) & 0xfff);
+ *valp = uimm12x8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_decode (uint32 *valp)
+{
+ unsigned simm4_out_0;
+ unsigned simm4_in_0;
+ simm4_in_0 = *valp & 0xf;
+ simm4_out_0 = ((int) simm4_in_0 << 28) >> 28;
+ *valp = simm4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm4_encode (uint32 *valp)
+{
+ unsigned simm4_in_0;
+ unsigned simm4_out_0;
+ simm4_out_0 = *valp;
+ simm4_in_0 = (simm4_out_0 & 0xf);
+ *valp = simm4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_0_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_1_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_1_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_2_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_3_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_3_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AR_4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 32);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_decode (uint32 *valp)
+{
+ unsigned immrx4_out_0;
+ unsigned immrx4_in_0;
+ immrx4_in_0 = *valp & 0xf;
+ immrx4_out_0 = (((0xfffffff) << 4) | immrx4_in_0) << 2;
+ *valp = immrx4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immrx4_encode (uint32 *valp)
+{
+ unsigned immrx4_in_0;
+ unsigned immrx4_out_0;
+ immrx4_out_0 = *valp;
+ immrx4_in_0 = ((immrx4_out_0 >> 2) & 0xf);
+ *valp = immrx4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_decode (uint32 *valp)
+{
+ unsigned lsi4x4_out_0;
+ unsigned lsi4x4_in_0;
+ lsi4x4_in_0 = *valp & 0xf;
+ lsi4x4_out_0 = lsi4x4_in_0 << 2;
+ *valp = lsi4x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_lsi4x4_encode (uint32 *valp)
+{
+ unsigned lsi4x4_in_0;
+ unsigned lsi4x4_out_0;
+ lsi4x4_out_0 = *valp;
+ lsi4x4_in_0 = ((lsi4x4_out_0 >> 2) & 0xf);
+ *valp = lsi4x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_decode (uint32 *valp)
+{
+ unsigned simm7_out_0;
+ unsigned simm7_in_0;
+ simm7_in_0 = *valp & 0x7f;
+ simm7_out_0 = ((((-((((simm7_in_0 >> 6) & 1)) & (((simm7_in_0 >> 5) & 1)))) & 0x1ffffff)) << 7) | simm7_in_0;
+ *valp = simm7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm7_encode (uint32 *valp)
+{
+ unsigned simm7_in_0;
+ unsigned simm7_out_0;
+ simm7_out_0 = *valp;
+ simm7_in_0 = (simm7_out_0 & 0x7f);
+ *valp = simm7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_decode (uint32 *valp)
+{
+ unsigned uimm6_out_0;
+ unsigned uimm6_in_0;
+ uimm6_in_0 = *valp & 0x3f;
+ uimm6_out_0 = 0x4 + (((0) << 6) | uimm6_in_0);
+ *valp = uimm6_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm6_encode (uint32 *valp)
+{
+ unsigned uimm6_in_0;
+ unsigned uimm6_out_0;
+ uimm6_out_0 = *valp;
+ uimm6_in_0 = (uimm6_out_0 - 0x4) & 0x3f;
+ *valp = uimm6_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_decode (uint32 *valp)
+{
+ unsigned ai4const_out_0;
+ unsigned ai4const_in_0;
+ ai4const_in_0 = *valp & 0xf;
+ ai4const_out_0 = CONST_TBL_ai4c_0[ai4const_in_0 & 0xf];
+ *valp = ai4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ai4const_encode (uint32 *valp)
+{
+ unsigned ai4const_in_0;
+ unsigned ai4const_out_0;
+ ai4const_out_0 = *valp;
+ switch (ai4const_out_0)
+ {
+ case 0xffffffff: ai4const_in_0 = 0; break;
+ case 0x1: ai4const_in_0 = 0x1; break;
+ case 0x2: ai4const_in_0 = 0x2; break;
+ case 0x3: ai4const_in_0 = 0x3; break;
+ case 0x4: ai4const_in_0 = 0x4; break;
+ case 0x5: ai4const_in_0 = 0x5; break;
+ case 0x6: ai4const_in_0 = 0x6; break;
+ case 0x7: ai4const_in_0 = 0x7; break;
+ case 0x8: ai4const_in_0 = 0x8; break;
+ case 0x9: ai4const_in_0 = 0x9; break;
+ case 0xa: ai4const_in_0 = 0xa; break;
+ case 0xb: ai4const_in_0 = 0xb; break;
+ case 0xc: ai4const_in_0 = 0xc; break;
+ case 0xd: ai4const_in_0 = 0xd; break;
+ case 0xe: ai4const_in_0 = 0xe; break;
+ default: ai4const_in_0 = 0xf; break;
+ }
+ *valp = ai4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_decode (uint32 *valp)
+{
+ unsigned b4const_out_0;
+ unsigned b4const_in_0;
+ b4const_in_0 = *valp & 0xf;
+ b4const_out_0 = CONST_TBL_b4c_0[b4const_in_0 & 0xf];
+ *valp = b4const_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4const_encode (uint32 *valp)
+{
+ unsigned b4const_in_0;
+ unsigned b4const_out_0;
+ b4const_out_0 = *valp;
+ switch (b4const_out_0)
+ {
+ case 0xffffffff: b4const_in_0 = 0; break;
+ case 0x1: b4const_in_0 = 0x1; break;
+ case 0x2: b4const_in_0 = 0x2; break;
+ case 0x3: b4const_in_0 = 0x3; break;
+ case 0x4: b4const_in_0 = 0x4; break;
+ case 0x5: b4const_in_0 = 0x5; break;
+ case 0x6: b4const_in_0 = 0x6; break;
+ case 0x7: b4const_in_0 = 0x7; break;
+ case 0x8: b4const_in_0 = 0x8; break;
+ case 0xa: b4const_in_0 = 0x9; break;
+ case 0xc: b4const_in_0 = 0xa; break;
+ case 0x10: b4const_in_0 = 0xb; break;
+ case 0x20: b4const_in_0 = 0xc; break;
+ case 0x40: b4const_in_0 = 0xd; break;
+ case 0x80: b4const_in_0 = 0xe; break;
+ default: b4const_in_0 = 0xf; break;
+ }
+ *valp = b4const_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_decode (uint32 *valp)
+{
+ unsigned b4constu_out_0;
+ unsigned b4constu_in_0;
+ b4constu_in_0 = *valp & 0xf;
+ b4constu_out_0 = CONST_TBL_b4cu_0[b4constu_in_0 & 0xf];
+ *valp = b4constu_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_b4constu_encode (uint32 *valp)
+{
+ unsigned b4constu_in_0;
+ unsigned b4constu_out_0;
+ b4constu_out_0 = *valp;
+ switch (b4constu_out_0)
+ {
+ case 0x8000: b4constu_in_0 = 0; break;
+ case 0x10000: b4constu_in_0 = 0x1; break;
+ case 0x2: b4constu_in_0 = 0x2; break;
+ case 0x3: b4constu_in_0 = 0x3; break;
+ case 0x4: b4constu_in_0 = 0x4; break;
+ case 0x5: b4constu_in_0 = 0x5; break;
+ case 0x6: b4constu_in_0 = 0x6; break;
+ case 0x7: b4constu_in_0 = 0x7; break;
+ case 0x8: b4constu_in_0 = 0x8; break;
+ case 0xa: b4constu_in_0 = 0x9; break;
+ case 0xc: b4constu_in_0 = 0xa; break;
+ case 0x10: b4constu_in_0 = 0xb; break;
+ case 0x20: b4constu_in_0 = 0xc; break;
+ case 0x40: b4constu_in_0 = 0xd; break;
+ case 0x80: b4constu_in_0 = 0xe; break;
+ default: b4constu_in_0 = 0xf; break;
+ }
+ *valp = b4constu_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_decode (uint32 *valp)
+{
+ unsigned uimm8_out_0;
+ unsigned uimm8_in_0;
+ uimm8_in_0 = *valp & 0xff;
+ uimm8_out_0 = uimm8_in_0;
+ *valp = uimm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8_encode (uint32 *valp)
+{
+ unsigned uimm8_in_0;
+ unsigned uimm8_out_0;
+ uimm8_out_0 = *valp;
+ uimm8_in_0 = (uimm8_out_0 & 0xff);
+ *valp = uimm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_decode (uint32 *valp)
+{
+ unsigned uimm8x2_out_0;
+ unsigned uimm8x2_in_0;
+ uimm8x2_in_0 = *valp & 0xff;
+ uimm8x2_out_0 = uimm8x2_in_0 << 1;
+ *valp = uimm8x2_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x2_encode (uint32 *valp)
+{
+ unsigned uimm8x2_in_0;
+ unsigned uimm8x2_out_0;
+ uimm8x2_out_0 = *valp;
+ uimm8x2_in_0 = ((uimm8x2_out_0 >> 1) & 0xff);
+ *valp = uimm8x2_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_decode (uint32 *valp)
+{
+ unsigned uimm8x4_out_0;
+ unsigned uimm8x4_in_0;
+ uimm8x4_in_0 = *valp & 0xff;
+ uimm8x4_out_0 = uimm8x4_in_0 << 2;
+ *valp = uimm8x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm8x4_encode (uint32 *valp)
+{
+ unsigned uimm8x4_in_0;
+ unsigned uimm8x4_out_0;
+ uimm8x4_out_0 = *valp;
+ uimm8x4_in_0 = ((uimm8x4_out_0 >> 2) & 0xff);
+ *valp = uimm8x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_decode (uint32 *valp)
+{
+ unsigned uimm4x16_out_0;
+ unsigned uimm4x16_in_0;
+ uimm4x16_in_0 = *valp & 0xf;
+ uimm4x16_out_0 = uimm4x16_in_0 << 4;
+ *valp = uimm4x16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm4x16_encode (uint32 *valp)
+{
+ unsigned uimm4x16_in_0;
+ unsigned uimm4x16_out_0;
+ uimm4x16_out_0 = *valp;
+ uimm4x16_in_0 = ((uimm4x16_out_0 >> 4) & 0xf);
+ *valp = uimm4x16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_decode (uint32 *valp)
+{
+ unsigned simm8_out_0;
+ unsigned simm8_in_0;
+ simm8_in_0 = *valp & 0xff;
+ simm8_out_0 = ((int) simm8_in_0 << 24) >> 24;
+ *valp = simm8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8_encode (uint32 *valp)
+{
+ unsigned simm8_in_0;
+ unsigned simm8_out_0;
+ simm8_out_0 = *valp;
+ simm8_in_0 = (simm8_out_0 & 0xff);
+ *valp = simm8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_decode (uint32 *valp)
+{
+ unsigned simm8x256_out_0;
+ unsigned simm8x256_in_0;
+ simm8x256_in_0 = *valp & 0xff;
+ simm8x256_out_0 = (((int) simm8x256_in_0 << 24) >> 24) << 8;
+ *valp = simm8x256_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm8x256_encode (uint32 *valp)
+{
+ unsigned simm8x256_in_0;
+ unsigned simm8x256_out_0;
+ simm8x256_out_0 = *valp;
+ simm8x256_in_0 = ((simm8x256_out_0 >> 8) & 0xff);
+ *valp = simm8x256_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_decode (uint32 *valp)
+{
+ unsigned simm12b_out_0;
+ unsigned simm12b_in_0;
+ simm12b_in_0 = *valp & 0xfff;
+ simm12b_out_0 = ((int) simm12b_in_0 << 20) >> 20;
+ *valp = simm12b_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_simm12b_encode (uint32 *valp)
+{
+ unsigned simm12b_in_0;
+ unsigned simm12b_out_0;
+ simm12b_out_0 = *valp;
+ simm12b_in_0 = (simm12b_out_0 & 0xfff);
+ *valp = simm12b_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_decode (uint32 *valp)
+{
+ unsigned msalp32_out_0;
+ unsigned msalp32_in_0;
+ msalp32_in_0 = *valp & 0x1f;
+ msalp32_out_0 = 0x20 - msalp32_in_0;
+ *valp = msalp32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_msalp32_encode (uint32 *valp)
+{
+ unsigned msalp32_in_0;
+ unsigned msalp32_out_0;
+ msalp32_out_0 = *valp;
+ msalp32_in_0 = (0x20 - msalp32_out_0) & 0x1f;
+ *valp = msalp32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_decode (uint32 *valp)
+{
+ unsigned op2p1_out_0;
+ unsigned op2p1_in_0;
+ op2p1_in_0 = *valp & 0xf;
+ op2p1_out_0 = op2p1_in_0 + 0x1;
+ *valp = op2p1_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_op2p1_encode (uint32 *valp)
+{
+ unsigned op2p1_in_0;
+ unsigned op2p1_out_0;
+ op2p1_out_0 = *valp;
+ op2p1_in_0 = (op2p1_out_0 - 0x1) & 0xf;
+ *valp = op2p1_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_decode (uint32 *valp)
+{
+ unsigned label8_out_0;
+ unsigned label8_in_0;
+ label8_in_0 = *valp & 0xff;
+ label8_out_0 = 0x4 + (((int) label8_in_0 << 24) >> 24);
+ *valp = label8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label8_encode (uint32 *valp)
+{
+ unsigned label8_in_0;
+ unsigned label8_out_0;
+ label8_out_0 = *valp;
+ label8_in_0 = (label8_out_0 - 0x4) & 0xff;
+ *valp = label8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_decode (uint32 *valp)
+{
+ unsigned ulabel8_out_0;
+ unsigned ulabel8_in_0;
+ ulabel8_in_0 = *valp & 0xff;
+ ulabel8_out_0 = 0x4 + (((0) << 8) | ulabel8_in_0);
+ *valp = ulabel8_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ulabel8_encode (uint32 *valp)
+{
+ unsigned ulabel8_in_0;
+ unsigned ulabel8_out_0;
+ ulabel8_out_0 = *valp;
+ ulabel8_in_0 = (ulabel8_out_0 - 0x4) & 0xff;
+ *valp = ulabel8_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_decode (uint32 *valp)
+{
+ unsigned label12_out_0;
+ unsigned label12_in_0;
+ label12_in_0 = *valp & 0xfff;
+ label12_out_0 = 0x4 + (((int) label12_in_0 << 20) >> 20);
+ *valp = label12_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_label12_encode (uint32 *valp)
+{
+ unsigned label12_in_0;
+ unsigned label12_out_0;
+ label12_out_0 = *valp;
+ label12_in_0 = (label12_out_0 - 0x4) & 0xfff;
+ *valp = label12_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_decode (uint32 *valp)
+{
+ unsigned soffset_out_0;
+ unsigned soffset_in_0;
+ soffset_in_0 = *valp & 0x3ffff;
+ soffset_out_0 = 0x4 + (((int) soffset_in_0 << 14) >> 14);
+ *valp = soffset_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_soffset_encode (uint32 *valp)
+{
+ unsigned soffset_in_0;
+ unsigned soffset_out_0;
+ soffset_out_0 = *valp;
+ soffset_in_0 = (soffset_out_0 - 0x4) & 0x3ffff;
+ *valp = soffset_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_decode (uint32 *valp)
+{
+ unsigned uimm16x4_out_0;
+ unsigned uimm16x4_in_0;
+ uimm16x4_in_0 = *valp & 0xffff;
+ uimm16x4_out_0 = (((0xffff) << 16) | uimm16x4_in_0) << 2;
+ *valp = uimm16x4_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_uimm16x4_encode (uint32 *valp)
+{
+ unsigned uimm16x4_in_0;
+ unsigned uimm16x4_out_0;
+ uimm16x4_out_0 = *valp;
+ uimm16x4_in_0 = (uimm16x4_out_0 >> 2) & 0xffff;
+ *valp = uimm16x4_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_decode (uint32 *valp)
+{
+ unsigned bbi_out_0;
+ unsigned bbi_in_0;
+ bbi_in_0 = *valp & 0x1f;
+ bbi_out_0 = (0 << 5) | bbi_in_0;
+ *valp = bbi_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_bbi_encode (uint32 *valp)
+{
+ unsigned bbi_in_0;
+ unsigned bbi_out_0;
+ bbi_out_0 = *valp;
+ bbi_in_0 = (bbi_out_0 & 0x1f);
+ *valp = bbi_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_decode (uint32 *valp)
+{
+ unsigned s_out_0;
+ unsigned s_in_0;
+ s_in_0 = *valp & 0xf;
+ s_out_0 = (0 << 4) | s_in_0;
+ *valp = s_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_s_encode (uint32 *valp)
+{
+ unsigned s_in_0;
+ unsigned s_out_0;
+ s_out_0 = *valp;
+ s_in_0 = (s_out_0 & 0xf);
+ *valp = s_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_decode (uint32 *valp)
+{
+ unsigned immt_out_0;
+ unsigned immt_in_0;
+ immt_in_0 = *valp & 0xf;
+ immt_out_0 = immt_in_0;
+ *valp = immt_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_immt_encode (uint32 *valp)
+{
+ unsigned immt_in_0;
+ unsigned immt_out_0;
+ immt_out_0 = *valp;
+ immt_in_0 = immt_out_0 & 0xf;
+ *valp = immt_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR2_decode (uint32 *valp)
+{
+ *valp = *valp << 1;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR2_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 1) != 0);
+ *valp = *valp >> 1;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR4_decode (uint32 *valp)
+{
+ *valp = *valp << 2;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR4_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 3) != 0);
+ *valp = *valp >> 2;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR8_decode (uint32 *valp)
+{
+ *valp = *valp << 3;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR8_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 7) != 0);
+ *valp = *valp >> 3;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_BR16_decode (uint32 *valp)
+{
+ *valp = *valp << 4;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_BR16_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 16) || ((*valp & 15) != 0);
+ *valp = *valp >> 4;
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_tp7_decode (uint32 *valp)
+{
+ unsigned tp7_out_0;
+ unsigned tp7_in_0;
+ tp7_in_0 = *valp & 0xf;
+ tp7_out_0 = tp7_in_0 + 0x7;
+ *valp = tp7_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_tp7_encode (uint32 *valp)
+{
+ unsigned tp7_in_0;
+ unsigned tp7_out_0;
+ tp7_out_0 = *valp;
+ tp7_in_0 = (tp7_out_0 - 0x7) & 0xf;
+ *valp = tp7_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_decode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_out_0;
+ unsigned xt_wbr15_label_in_0;
+ xt_wbr15_label_in_0 = *valp & 0x7fff;
+ xt_wbr15_label_out_0 = 0x4 + (((int) xt_wbr15_label_in_0 << 17) >> 17);
+ *valp = xt_wbr15_label_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_xt_wbr15_label_encode (uint32 *valp)
+{
+ unsigned xt_wbr15_label_in_0;
+ unsigned xt_wbr15_label_out_0;
+ xt_wbr15_label_out_0 = *valp;
+ xt_wbr15_label_in_0 = (xt_wbr15_label_out_0 - 0x4) & 0x7fff;
+ *valp = xt_wbr15_label_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt32_decode (uint32 *valp)
+{
+ unsigned ae_samt32_out_0;
+ unsigned ae_samt32_in_0;
+ ae_samt32_in_0 = *valp & 0x1f;
+ ae_samt32_out_0 = (0 << 5) | ae_samt32_in_0;
+ *valp = ae_samt32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt32_encode (uint32 *valp)
+{
+ unsigned ae_samt32_in_0;
+ unsigned ae_samt32_out_0;
+ ae_samt32_out_0 = *valp;
+ ae_samt32_in_0 = (ae_samt32_out_0 & 0x1f);
+ *valp = ae_samt32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AE_PR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AE_PR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 8);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_AE_QR_decode (uint32 *valp ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_AE_QR_encode (uint32 *valp)
+{
+ int error;
+ error = (*valp >= 4);
+ return error;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm16_decode (uint32 *valp)
+{
+ unsigned ae_lsimm16_out_0;
+ unsigned ae_lsimm16_in_0;
+ ae_lsimm16_in_0 = *valp & 0xf;
+ ae_lsimm16_out_0 = (((int) ae_lsimm16_in_0 << 28) >> 28) << 1;
+ *valp = ae_lsimm16_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm16_encode (uint32 *valp)
+{
+ unsigned ae_lsimm16_in_0;
+ unsigned ae_lsimm16_out_0;
+ ae_lsimm16_out_0 = *valp;
+ ae_lsimm16_in_0 = ((ae_lsimm16_out_0 >> 1) & 0xf);
+ *valp = ae_lsimm16_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm32_decode (uint32 *valp)
+{
+ unsigned ae_lsimm32_out_0;
+ unsigned ae_lsimm32_in_0;
+ ae_lsimm32_in_0 = *valp & 0xf;
+ ae_lsimm32_out_0 = (((int) ae_lsimm32_in_0 << 28) >> 28) << 2;
+ *valp = ae_lsimm32_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm32_encode (uint32 *valp)
+{
+ unsigned ae_lsimm32_in_0;
+ unsigned ae_lsimm32_out_0;
+ ae_lsimm32_out_0 = *valp;
+ ae_lsimm32_in_0 = ((ae_lsimm32_out_0 >> 2) & 0xf);
+ *valp = ae_lsimm32_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm64_decode (uint32 *valp)
+{
+ unsigned ae_lsimm64_out_0;
+ unsigned ae_lsimm64_in_0;
+ ae_lsimm64_in_0 = *valp & 0xf;
+ ae_lsimm64_out_0 = (((int) ae_lsimm64_in_0 << 28) >> 28) << 3;
+ *valp = ae_lsimm64_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_lsimm64_encode (uint32 *valp)
+{
+ unsigned ae_lsimm64_in_0;
+ unsigned ae_lsimm64_out_0;
+ ae_lsimm64_out_0 = *valp;
+ ae_lsimm64_in_0 = ((ae_lsimm64_out_0 >> 3) & 0xf);
+ *valp = ae_lsimm64_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt64_decode (uint32 *valp)
+{
+ unsigned ae_samt64_out_0;
+ unsigned ae_samt64_in_0;
+ ae_samt64_in_0 = *valp & 0x3f;
+ ae_samt64_out_0 = (0 << 6) | ae_samt64_in_0;
+ *valp = ae_samt64_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_samt64_encode (uint32 *valp)
+{
+ unsigned ae_samt64_in_0;
+ unsigned ae_samt64_out_0;
+ ae_samt64_out_0 = *valp;
+ ae_samt64_in_0 = (ae_samt64_out_0 & 0x3f);
+ *valp = ae_samt64_in_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_ohba_decode (uint32 *valp)
+{
+ unsigned ae_ohba_out_0;
+ unsigned ae_ohba_in_0;
+ ae_ohba_in_0 = *valp & 0xf;
+ ae_ohba_out_0 = (0 << 5) | (((((ae_ohba_in_0 & 0xf))) == 0) << 4) | ((ae_ohba_in_0 & 0xf));
+ *valp = ae_ohba_out_0;
+ return 0;
+}
+
+static int
+OperandSem_opnd_sem_ae_ohba_encode (uint32 *valp)
+{
+ unsigned ae_ohba_in_0;
+ unsigned ae_ohba_out_0;
+ ae_ohba_out_0 = *valp;
+ ae_ohba_in_0 = (ae_ohba_out_0 & 0xf);
+ *valp = ae_ohba_in_0;
+ return 0;
+}
+
+static int
+Operand_soffsetx4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_soffsetx4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += (pc & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm6_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_uimm6_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_ulabel8_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_label12_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_label12_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_soffset_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_soffset_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_uimm16x4_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_uimm16x4_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += ((pc + 3) & ~0x3);
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr15_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_ator (uint32 *valp, uint32 pc)
+{
+ *valp -= pc;
+ return 0;
+}
+
+static int
+Operand_xt_wbr18_label_rtoa (uint32 *valp, uint32 pc)
+{
+ *valp += pc;
+ return 0;
+}
+
+static xtensa_operand_internal operands[] = {
+ { "soffsetx4", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffsetx4_encode, OperandSem_opnd_sem_soffsetx4_decode,
+ Operand_soffsetx4_ator, Operand_soffsetx4_rtoa },
+ { "uimm12x8", FIELD_imm12, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm12x8_encode, OperandSem_opnd_sem_uimm12x8_decode,
+ 0, 0 },
+ { "simm4", FIELD_mn, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm4_encode, OperandSem_opnd_sem_simm4_decode,
+ 0, 0 },
+ { "arr", FIELD_r, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ars", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "*ars_invisible", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "art", FIELD_t, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_encode, OperandSem_opnd_sem_AR_decode,
+ 0, 0 },
+ { "ar0", FIELD__ar0, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_0_encode, OperandSem_opnd_sem_AR_0_decode,
+ 0, 0 },
+ { "ar4", FIELD__ar4, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_1_encode, OperandSem_opnd_sem_AR_1_decode,
+ 0, 0 },
+ { "ar8", FIELD__ar8, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_2_encode, OperandSem_opnd_sem_AR_2_decode,
+ 0, 0 },
+ { "ar12", FIELD__ar12, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_AR_3_encode, OperandSem_opnd_sem_AR_3_decode,
+ 0, 0 },
+ { "ars_entry", FIELD_s, REGFILE_AR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AR_4_encode, OperandSem_opnd_sem_AR_4_decode,
+ 0, 0 },
+ { "immrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immrx4_encode, OperandSem_opnd_sem_immrx4_decode,
+ 0, 0 },
+ { "lsi4x4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm7", FIELD_imm7, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm7_encode, OperandSem_opnd_sem_simm7_decode,
+ 0, 0 },
+ { "uimm6", FIELD_imm6, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm6_encode, OperandSem_opnd_sem_uimm6_decode,
+ Operand_uimm6_ator, Operand_uimm6_rtoa },
+ { "ai4const", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ai4const_encode, OperandSem_opnd_sem_ai4const_decode,
+ 0, 0 },
+ { "b4const", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4const_encode, OperandSem_opnd_sem_b4const_decode,
+ 0, 0 },
+ { "b4constu", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_b4constu_encode, OperandSem_opnd_sem_b4constu_decode,
+ 0, 0 },
+ { "uimm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8_encode, OperandSem_opnd_sem_uimm8_decode,
+ 0, 0 },
+ { "uimm8x2", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x2_encode, OperandSem_opnd_sem_uimm8x2_decode,
+ 0, 0 },
+ { "uimm8x4", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm8x4_encode, OperandSem_opnd_sem_uimm8x4_decode,
+ 0, 0 },
+ { "uimm4x16", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_uimm4x16_encode, OperandSem_opnd_sem_uimm4x16_decode,
+ 0, 0 },
+ { "uimmrx4", FIELD_r, -1, 0,
+ 0,
+ OperandSem_opnd_sem_lsi4x4_encode, OperandSem_opnd_sem_lsi4x4_decode,
+ 0, 0 },
+ { "simm8", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8_encode, OperandSem_opnd_sem_simm8_decode,
+ 0, 0 },
+ { "simm8x256", FIELD_imm8, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm8x256_encode, OperandSem_opnd_sem_simm8x256_decode,
+ 0, 0 },
+ { "simm12b", FIELD_imm12b, -1, 0,
+ 0,
+ OperandSem_opnd_sem_simm12b_encode, OperandSem_opnd_sem_simm12b_decode,
+ 0, 0 },
+ { "msalp32", FIELD_sal, -1, 0,
+ 0,
+ OperandSem_opnd_sem_msalp32_encode, OperandSem_opnd_sem_msalp32_decode,
+ 0, 0 },
+ { "op2p1", FIELD_op2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_op2p1_encode, OperandSem_opnd_sem_op2p1_decode,
+ 0, 0 },
+ { "label8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label8_encode, OperandSem_opnd_sem_label8_decode,
+ Operand_label8_ator, Operand_label8_rtoa },
+ { "ulabel8", FIELD_imm8, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_ulabel8_encode, OperandSem_opnd_sem_ulabel8_decode,
+ Operand_ulabel8_ator, Operand_ulabel8_rtoa },
+ { "label12", FIELD_imm12, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_label12_encode, OperandSem_opnd_sem_label12_decode,
+ Operand_label12_ator, Operand_label12_rtoa },
+ { "soffset", FIELD_offset, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_soffset_ator, Operand_soffset_rtoa },
+ { "uimm16x4", FIELD_imm16, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_uimm16x4_encode, OperandSem_opnd_sem_uimm16x4_decode,
+ Operand_uimm16x4_ator, Operand_uimm16x4_rtoa },
+ { "bbi", FIELD_bbi, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sae", FIELD_sae, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sas", FIELD_sas, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "sargt", FIELD_sargt, -1, 0,
+ 0,
+ OperandSem_opnd_sem_bbi_encode, OperandSem_opnd_sem_bbi_decode,
+ 0, 0 },
+ { "s", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_s_encode, OperandSem_opnd_sem_s_decode,
+ 0, 0 },
+ { "immt", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "imms", FIELD_s, -1, 0,
+ 0,
+ OperandSem_opnd_sem_immt_encode, OperandSem_opnd_sem_immt_decode,
+ 0, 0 },
+ { "bt", FIELD_t, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "bs", FIELD_s, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "br", FIELD_r, REGFILE_BR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR_encode, OperandSem_opnd_sem_BR_decode,
+ 0, 0 },
+ { "bt2", FIELD_t2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "bs2", FIELD_s2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "br2", FIELD_r2, REGFILE_BR, 2,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR2_encode, OperandSem_opnd_sem_BR2_decode,
+ 0, 0 },
+ { "bt4", FIELD_t4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "bs4", FIELD_s4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "br4", FIELD_r4, REGFILE_BR, 4,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR4_encode, OperandSem_opnd_sem_BR4_decode,
+ 0, 0 },
+ { "bt8", FIELD_t8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "bs8", FIELD_s8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "br8", FIELD_r8, REGFILE_BR, 8,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR8_encode, OperandSem_opnd_sem_BR8_decode,
+ 0, 0 },
+ { "bt16", FIELD__bt16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "bs16", FIELD__bs16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "br16", FIELD__br16, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "brall", FIELD__brall, REGFILE_BR, 16,
+ XTENSA_OPERAND_IS_REGISTER | XTENSA_OPERAND_IS_INVISIBLE,
+ OperandSem_opnd_sem_BR16_encode, OperandSem_opnd_sem_BR16_decode,
+ 0, 0 },
+ { "tp7", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_tp7_encode, OperandSem_opnd_sem_tp7_decode,
+ 0, 0 },
+ { "xt_wbr15_label", FIELD_xt_wbr15_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_xt_wbr15_label_encode, OperandSem_opnd_sem_xt_wbr15_label_decode,
+ Operand_xt_wbr15_label_ator, Operand_xt_wbr15_label_rtoa },
+ { "xt_wbr18_label", FIELD_xt_wbr18_imm, -1, 0,
+ XTENSA_OPERAND_IS_PCRELATIVE,
+ OperandSem_opnd_sem_soffset_encode, OperandSem_opnd_sem_soffset_decode,
+ Operand_xt_wbr18_label_ator, Operand_xt_wbr18_label_rtoa },
+ { "ae_samt32", FIELD_ftsf14, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_samt32_encode, OperandSem_opnd_sem_ae_samt32_decode,
+ 0, 0 },
+ { "pr0", FIELD_ftsf12, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "qr0", FIELD_ftsf13, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "mac_qr0", FIELD_ftsf13, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "ae_lsimm16", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_lsimm16_encode, OperandSem_opnd_sem_ae_lsimm16_decode,
+ 0, 0 },
+ { "ae_lsimm32", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_lsimm32_encode, OperandSem_opnd_sem_ae_lsimm32_decode,
+ 0, 0 },
+ { "ae_lsimm64", FIELD_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_lsimm64_encode, OperandSem_opnd_sem_ae_lsimm64_decode,
+ 0, 0 },
+ { "ae_samt64", FIELD_ae_samt_s_t, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_samt64_encode, OperandSem_opnd_sem_ae_samt64_decode,
+ 0, 0 },
+ { "ae_ohba", FIELD_ae_fld_ohba, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_ohba_encode, OperandSem_opnd_sem_ae_ohba_decode,
+ 0, 0 },
+ { "ae_ohba2", FIELD_ae_fld_ohba2, -1, 0,
+ 0,
+ OperandSem_opnd_sem_ae_ohba_encode, OperandSem_opnd_sem_ae_ohba_decode,
+ 0, 0 },
+ { "pr", FIELD_ae_r20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "cvt_pr", FIELD_ae_r20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "qr0_rw", FIELD_ae_r10, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "mac_qr0_rw", FIELD_ae_r10, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "qr1_w", FIELD_ae_r32, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "mac_qr1_w", FIELD_ae_r32, REGFILE_AE_QR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_QR_encode, OperandSem_opnd_sem_AE_QR_decode,
+ 0, 0 },
+ { "ps", FIELD_ae_s20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "alupppb_ps", FIELD_ae_s20, REGFILE_AE_PR, 1,
+ XTENSA_OPERAND_IS_REGISTER,
+ OperandSem_opnd_sem_AE_PR_encode, OperandSem_opnd_sem_AE_PR_decode,
+ 0, 0 },
+ { "t", FIELD_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "bbi4", FIELD_bbi4, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12", FIELD_imm12, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm8", FIELD_imm8, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm12b", FIELD_imm12b, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm16", FIELD_imm16, -1, 0, 0, 0, 0, 0, 0 },
+ { "m", FIELD_m, -1, 0, 0, 0, 0, 0, 0 },
+ { "n", FIELD_n, -1, 0, 0, 0, 0, 0, 0 },
+ { "offset", FIELD_offset, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0", FIELD_op0, -1, 0, 0, 0, 0, 0, 0 },
+ { "op1", FIELD_op1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op2", FIELD_op2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r", FIELD_r, -1, 0, 0, 0, 0, 0, 0 },
+ { "sa4", FIELD_sa4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sae4", FIELD_sae4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sal", FIELD_sal, -1, 0, 0, 0, 0, 0, 0 },
+ { "sas4", FIELD_sas4, -1, 0, 0, 0, 0, 0, 0 },
+ { "sr", FIELD_sr, -1, 0, 0, 0, 0, 0, 0 },
+ { "st", FIELD_st, -1, 0, 0, 0, 0, 0, 0 },
+ { "thi3", FIELD_thi3, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm4", FIELD_imm4, -1, 0, 0, 0, 0, 0, 0 },
+ { "mn", FIELD_mn, -1, 0, 0, 0, 0, 0, 0 },
+ { "i", FIELD_i, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6lo", FIELD_imm6lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6hi", FIELD_imm6hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7lo", FIELD_imm7lo, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7hi", FIELD_imm7hi, -1, 0, 0, 0, 0, 0, 0 },
+ { "z", FIELD_z, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm6", FIELD_imm6, -1, 0, 0, 0, 0, 0, 0 },
+ { "imm7", FIELD_imm7, -1, 0, 0, 0, 0, 0, 0 },
+ { "t2", FIELD_t2, -1, 0, 0, 0, 0, 0, 0 },
+ { "s2", FIELD_s2, -1, 0, 0, 0, 0, 0, 0 },
+ { "r2", FIELD_r2, -1, 0, 0, 0, 0, 0, 0 },
+ { "t4", FIELD_t4, -1, 0, 0, 0, 0, 0, 0 },
+ { "s4", FIELD_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "r4", FIELD_r4, -1, 0, 0, 0, 0, 0, 0 },
+ { "t8", FIELD_t8, -1, 0, 0, 0, 0, 0, 0 },
+ { "s8", FIELD_s8, -1, 0, 0, 0, 0, 0, 0 },
+ { "r8", FIELD_r8, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr15_imm", FIELD_xt_wbr15_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "xt_wbr18_imm", FIELD_xt_wbr18_imm, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r3", FIELD_ae_r3, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_s_non_samt", FIELD_ae_s_non_samt, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_s3", FIELD_ae_s3, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r32", FIELD_ae_r32, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_samt_s_t", FIELD_ae_samt_s_t, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r20", FIELD_ae_r20, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_r10", FIELD_ae_r10, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_s20", FIELD_ae_s20, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_fld_ohba", FIELD_ae_fld_ohba, -1, 0, 0, 0, 0, 0, 0 },
+ { "ae_fld_ohba2", FIELD_ae_fld_ohba2, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s3", FIELD_op0_s3, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf12", FIELD_ftsf12, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf13", FIELD_ftsf13, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf14", FIELD_ftsf14, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf21ae_slot1", FIELD_ftsf21ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf22ae_slot1", FIELD_ftsf22ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf23ae_slot1", FIELD_ftsf23ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf24ae_slot1", FIELD_ftsf24ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf25ae_slot1", FIELD_ftsf25ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf26ae_slot1", FIELD_ftsf26ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf27ae_slot1", FIELD_ftsf27ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf28ae_slot1", FIELD_ftsf28ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf29ae_slot1", FIELD_ftsf29ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf30ae_slot1", FIELD_ftsf30ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf31ae_slot1", FIELD_ftsf31ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf32ae_slot1", FIELD_ftsf32ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf33ae_slot1", FIELD_ftsf33ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf34ae_slot1", FIELD_ftsf34ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf35ae_slot1", FIELD_ftsf35ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf36ae_slot1", FIELD_ftsf36ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf37ae_slot1", FIELD_ftsf37ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf38ae_slot1", FIELD_ftsf38ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf39ae_slot1", FIELD_ftsf39ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf40ae_slot1", FIELD_ftsf40ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf41ae_slot1", FIELD_ftsf41ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf42ae_slot1", FIELD_ftsf42ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf43ae_slot1", FIELD_ftsf43ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf44ae_slot1", FIELD_ftsf44ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf45ae_slot1", FIELD_ftsf45ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf46ae_slot1", FIELD_ftsf46ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf47ae_slot1", FIELD_ftsf47ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf48ae_slot1", FIELD_ftsf48ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf49ae_slot1", FIELD_ftsf49ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf50ae_slot1", FIELD_ftsf50ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf51ae_slot1", FIELD_ftsf51ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf52ae_slot1", FIELD_ftsf52ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf53ae_slot1", FIELD_ftsf53ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf54ae_slot1", FIELD_ftsf54ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf55ae_slot1", FIELD_ftsf55ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf56ae_slot1", FIELD_ftsf56ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf57ae_slot1", FIELD_ftsf57ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf58ae_slot1", FIELD_ftsf58ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf59ae_slot1", FIELD_ftsf59ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf60ae_slot1", FIELD_ftsf60ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf61ae_slot1", FIELD_ftsf61ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf63ae_slot1", FIELD_ftsf63ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf64ae_slot1", FIELD_ftsf64ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf66ae_slot1", FIELD_ftsf66ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf67ae_slot1", FIELD_ftsf67ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf69ae_slot1", FIELD_ftsf69ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf71ae_slot1", FIELD_ftsf71ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf72ae_slot1", FIELD_ftsf72ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf73ae_slot1", FIELD_ftsf73ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf75ae_slot1", FIELD_ftsf75ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf76ae_slot1", FIELD_ftsf76ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf77ae_slot1", FIELD_ftsf77ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf78ae_slot1", FIELD_ftsf78ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf79ae_slot1", FIELD_ftsf79ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf80ae_slot1", FIELD_ftsf80ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf81ae_slot1", FIELD_ftsf81ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf82ae_slot1", FIELD_ftsf82ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf84ae_slot1", FIELD_ftsf84ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf86ae_slot1", FIELD_ftsf86ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf87ae_slot1", FIELD_ftsf87ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf88ae_slot1", FIELD_ftsf88ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf89ae_slot1", FIELD_ftsf89ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf90ae_slot1", FIELD_ftsf90ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf91ae_slot1", FIELD_ftsf91ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf92ae_slot1", FIELD_ftsf92ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf94ae_slot1", FIELD_ftsf94ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf96ae_slot1", FIELD_ftsf96ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf97ae_slot1", FIELD_ftsf97ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf98ae_slot1", FIELD_ftsf98ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf99ae_slot1", FIELD_ftsf99ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf100ae_slot1", FIELD_ftsf100ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf101ae_slot1", FIELD_ftsf101ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf103ae_slot1", FIELD_ftsf103ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf104ae_slot1", FIELD_ftsf104ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf105ae_slot1", FIELD_ftsf105ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf106ae_slot1", FIELD_ftsf106ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf107ae_slot1", FIELD_ftsf107ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf108ae_slot1", FIELD_ftsf108ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf109ae_slot1", FIELD_ftsf109ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf110ae_slot1", FIELD_ftsf110ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf111ae_slot1", FIELD_ftsf111ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf112ae_slot1", FIELD_ftsf112ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf113ae_slot1", FIELD_ftsf113ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf114ae_slot1", FIELD_ftsf114ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf115ae_slot1", FIELD_ftsf115ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf116ae_slot1", FIELD_ftsf116ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf118ae_slot1", FIELD_ftsf118ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf119ae_slot1", FIELD_ftsf119ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf120ae_slot1", FIELD_ftsf120ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf122ae_slot1", FIELD_ftsf122ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf124ae_slot1", FIELD_ftsf124ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf125ae_slot1", FIELD_ftsf125ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf126ae_slot1", FIELD_ftsf126ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf127ae_slot1", FIELD_ftsf127ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf128ae_slot1", FIELD_ftsf128ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf129ae_slot1", FIELD_ftsf129ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf130ae_slot1", FIELD_ftsf130ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf131ae_slot1", FIELD_ftsf131ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf132ae_slot1", FIELD_ftsf132ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf133ae_slot1", FIELD_ftsf133ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf134ae_slot1", FIELD_ftsf134ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf135ae_slot1", FIELD_ftsf135ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf136ae_slot1", FIELD_ftsf136ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf137ae_slot1", FIELD_ftsf137ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf138ae_slot1", FIELD_ftsf138ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf139ae_slot1", FIELD_ftsf139ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf140ae_slot1", FIELD_ftsf140ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf141ae_slot1", FIELD_ftsf141ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf142ae_slot1", FIELD_ftsf142ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf143ae_slot1", FIELD_ftsf143ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf144ae_slot1", FIELD_ftsf144ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf145ae_slot1", FIELD_ftsf145ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf146ae_slot1", FIELD_ftsf146ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf147ae_slot1", FIELD_ftsf147ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf148ae_slot1", FIELD_ftsf148ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf149ae_slot1", FIELD_ftsf149ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf150ae_slot1", FIELD_ftsf150ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf151ae_slot1", FIELD_ftsf151ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf152ae_slot1", FIELD_ftsf152ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf153ae_slot1", FIELD_ftsf153ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf154ae_slot1", FIELD_ftsf154ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf155ae_slot1", FIELD_ftsf155ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf156ae_slot1", FIELD_ftsf156ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf157ae_slot1", FIELD_ftsf157ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf158ae_slot1", FIELD_ftsf158ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf159ae_slot1", FIELD_ftsf159ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf160ae_slot1", FIELD_ftsf160ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf161ae_slot1", FIELD_ftsf161ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf162ae_slot1", FIELD_ftsf162ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf163ae_slot1", FIELD_ftsf163ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf164ae_slot1", FIELD_ftsf164ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf165ae_slot1", FIELD_ftsf165ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf166ae_slot1", FIELD_ftsf166ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf167ae_slot1", FIELD_ftsf167ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf168ae_slot1", FIELD_ftsf168ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf169ae_slot1", FIELD_ftsf169ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf170ae_slot1", FIELD_ftsf170ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf171ae_slot1", FIELD_ftsf171ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf172ae_slot1", FIELD_ftsf172ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf173ae_slot1", FIELD_ftsf173ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf174ae_slot1", FIELD_ftsf174ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf175ae_slot1", FIELD_ftsf175ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf176ae_slot1", FIELD_ftsf176ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf177ae_slot1", FIELD_ftsf177ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf178ae_slot1", FIELD_ftsf178ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf179ae_slot1", FIELD_ftsf179ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf180ae_slot1", FIELD_ftsf180ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf181ae_slot1", FIELD_ftsf181ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf182ae_slot1", FIELD_ftsf182ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf183ae_slot1", FIELD_ftsf183ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf184ae_slot1", FIELD_ftsf184ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf185ae_slot1", FIELD_ftsf185ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf186ae_slot1", FIELD_ftsf186ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf187ae_slot1", FIELD_ftsf187ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf188ae_slot1", FIELD_ftsf188ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf189ae_slot1", FIELD_ftsf189ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf190ae_slot1", FIELD_ftsf190ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf191ae_slot1", FIELD_ftsf191ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf192ae_slot1", FIELD_ftsf192ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf193ae_slot1", FIELD_ftsf193ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf194ae_slot1", FIELD_ftsf194ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf195ae_slot1", FIELD_ftsf195ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf196ae_slot1", FIELD_ftsf196ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf197ae_slot1", FIELD_ftsf197ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf198ae_slot1", FIELD_ftsf198ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf199ae_slot1", FIELD_ftsf199ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf200ae_slot1", FIELD_ftsf200ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf201ae_slot1", FIELD_ftsf201ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf202ae_slot1", FIELD_ftsf202ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf203ae_slot1", FIELD_ftsf203ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf204ae_slot1", FIELD_ftsf204ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf205ae_slot1", FIELD_ftsf205ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf206ae_slot1", FIELD_ftsf206ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf207ae_slot1", FIELD_ftsf207ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf208", FIELD_ftsf208, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf209ae_slot1", FIELD_ftsf209ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf210ae_slot1", FIELD_ftsf210ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf211ae_slot1", FIELD_ftsf211ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf330ae_slot1", FIELD_ftsf330ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf332ae_slot1", FIELD_ftsf332ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf334ae_slot1", FIELD_ftsf334ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf336ae_slot1", FIELD_ftsf336ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf337ae_slot1", FIELD_ftsf337ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf338", FIELD_ftsf338, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf339ae_slot1", FIELD_ftsf339ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf340", FIELD_ftsf340, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf341ae_slot1", FIELD_ftsf341ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf342ae_slot1", FIELD_ftsf342ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf343ae_slot1", FIELD_ftsf343ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf344ae_slot1", FIELD_ftsf344ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf346ae_slot1", FIELD_ftsf346ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf347", FIELD_ftsf347, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf348ae_slot1", FIELD_ftsf348ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf349ae_slot1", FIELD_ftsf349ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf350ae_slot1", FIELD_ftsf350ae_slot1, -1, 0, 0, 0, 0, 0, 0 },
+ { "op0_s4", FIELD_op0_s4, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf212ae_slot0", FIELD_ftsf212ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf213ae_slot0", FIELD_ftsf213ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf214ae_slot0", FIELD_ftsf214ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf215ae_slot0", FIELD_ftsf215ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf216ae_slot0", FIELD_ftsf216ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf217", FIELD_ftsf217, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf218ae_slot0", FIELD_ftsf218ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf219ae_slot0", FIELD_ftsf219ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf220ae_slot0", FIELD_ftsf220ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf221ae_slot0", FIELD_ftsf221ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf222ae_slot0", FIELD_ftsf222ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf223ae_slot0", FIELD_ftsf223ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf224ae_slot0", FIELD_ftsf224ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf225ae_slot0", FIELD_ftsf225ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf226ae_slot0", FIELD_ftsf226ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf227ae_slot0", FIELD_ftsf227ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf228ae_slot0", FIELD_ftsf228ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf229ae_slot0", FIELD_ftsf229ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf230ae_slot0", FIELD_ftsf230ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf231ae_slot0", FIELD_ftsf231ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf232ae_slot0", FIELD_ftsf232ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf233ae_slot0", FIELD_ftsf233ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf234ae_slot0", FIELD_ftsf234ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf235ae_slot0", FIELD_ftsf235ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf236ae_slot0", FIELD_ftsf236ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf237ae_slot0", FIELD_ftsf237ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf238ae_slot0", FIELD_ftsf238ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf239ae_slot0", FIELD_ftsf239ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf240ae_slot0", FIELD_ftsf240ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf241ae_slot0", FIELD_ftsf241ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf242ae_slot0", FIELD_ftsf242ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf243ae_slot0", FIELD_ftsf243ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf244ae_slot0", FIELD_ftsf244ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf245ae_slot0", FIELD_ftsf245ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf246ae_slot0", FIELD_ftsf246ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf247ae_slot0", FIELD_ftsf247ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf248ae_slot0", FIELD_ftsf248ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf249ae_slot0", FIELD_ftsf249ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf250ae_slot0", FIELD_ftsf250ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf251ae_slot0", FIELD_ftsf251ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf252ae_slot0", FIELD_ftsf252ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf253ae_slot0", FIELD_ftsf253ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf254ae_slot0", FIELD_ftsf254ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf255ae_slot0", FIELD_ftsf255ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf256ae_slot0", FIELD_ftsf256ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf257ae_slot0", FIELD_ftsf257ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf258ae_slot0", FIELD_ftsf258ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf259ae_slot0", FIELD_ftsf259ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf260ae_slot0", FIELD_ftsf260ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf261ae_slot0", FIELD_ftsf261ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf262ae_slot0", FIELD_ftsf262ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf263ae_slot0", FIELD_ftsf263ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf264ae_slot0", FIELD_ftsf264ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf265ae_slot0", FIELD_ftsf265ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf266ae_slot0", FIELD_ftsf266ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf267ae_slot0", FIELD_ftsf267ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf268ae_slot0", FIELD_ftsf268ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf269ae_slot0", FIELD_ftsf269ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf270ae_slot0", FIELD_ftsf270ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf271ae_slot0", FIELD_ftsf271ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf272ae_slot0", FIELD_ftsf272ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf273ae_slot0", FIELD_ftsf273ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf274ae_slot0", FIELD_ftsf274ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf275ae_slot0", FIELD_ftsf275ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf276ae_slot0", FIELD_ftsf276ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf277ae_slot0", FIELD_ftsf277ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf278ae_slot0", FIELD_ftsf278ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf279ae_slot0", FIELD_ftsf279ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf281ae_slot0", FIELD_ftsf281ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf282ae_slot0", FIELD_ftsf282ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf283ae_slot0", FIELD_ftsf283ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf284ae_slot0", FIELD_ftsf284ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf286ae_slot0", FIELD_ftsf286ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf288ae_slot0", FIELD_ftsf288ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf290ae_slot0", FIELD_ftsf290ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf292ae_slot0", FIELD_ftsf292ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf293", FIELD_ftsf293, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf294ae_slot0", FIELD_ftsf294ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf295ae_slot0", FIELD_ftsf295ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf296ae_slot0", FIELD_ftsf296ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf297ae_slot0", FIELD_ftsf297ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf298ae_slot0", FIELD_ftsf298ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf299ae_slot0", FIELD_ftsf299ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf300ae_slot0", FIELD_ftsf300ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf301ae_slot0", FIELD_ftsf301ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf302ae_slot0", FIELD_ftsf302ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf303ae_slot0", FIELD_ftsf303ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf304ae_slot0", FIELD_ftsf304ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf306ae_slot0", FIELD_ftsf306ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf308ae_slot0", FIELD_ftsf308ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf309ae_slot0", FIELD_ftsf309ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf310ae_slot0", FIELD_ftsf310ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf311ae_slot0", FIELD_ftsf311ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf312ae_slot0", FIELD_ftsf312ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf313ae_slot0", FIELD_ftsf313ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf314ae_slot0", FIELD_ftsf314ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf315ae_slot0", FIELD_ftsf315ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf316ae_slot0", FIELD_ftsf316ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf317ae_slot0", FIELD_ftsf317ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf318ae_slot0", FIELD_ftsf318ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf319", FIELD_ftsf319, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf320ae_slot0", FIELD_ftsf320ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf321", FIELD_ftsf321, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf322ae_slot0", FIELD_ftsf322ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf323ae_slot0", FIELD_ftsf323ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf324ae_slot0", FIELD_ftsf324ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf325ae_slot0", FIELD_ftsf325ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf326ae_slot0", FIELD_ftsf326ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf328ae_slot0", FIELD_ftsf328ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf329ae_slot0", FIELD_ftsf329ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf352ae_slot0", FIELD_ftsf352ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf353", FIELD_ftsf353, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf354ae_slot0", FIELD_ftsf354ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf356ae_slot0", FIELD_ftsf356ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf357", FIELD_ftsf357, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf358ae_slot0", FIELD_ftsf358ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf359ae_slot0", FIELD_ftsf359ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf360ae_slot0", FIELD_ftsf360ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf361ae_slot0", FIELD_ftsf361ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf362ae_slot0", FIELD_ftsf362ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf364ae_slot0", FIELD_ftsf364ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf365ae_slot0", FIELD_ftsf365ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf366ae_slot0", FIELD_ftsf366ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf368ae_slot0", FIELD_ftsf368ae_slot0, -1, 0, 0, 0, 0, 0, 0 },
+ { "ftsf369ae_slot0", FIELD_ftsf369ae_slot0, -1, 0, 0, 0, 0, 0, 0 }
+};
+
+enum xtensa_operand_id {
+ OPERAND_soffsetx4,
+ OPERAND_uimm12x8,
+ OPERAND_simm4,
+ OPERAND_arr,
+ OPERAND_ars,
+ OPERAND__ars_invisible,
+ OPERAND_art,
+ OPERAND_ar0,
+ OPERAND_ar4,
+ OPERAND_ar8,
+ OPERAND_ar12,
+ OPERAND_ars_entry,
+ OPERAND_immrx4,
+ OPERAND_lsi4x4,
+ OPERAND_simm7,
+ OPERAND_uimm6,
+ OPERAND_ai4const,
+ OPERAND_b4const,
+ OPERAND_b4constu,
+ OPERAND_uimm8,
+ OPERAND_uimm8x2,
+ OPERAND_uimm8x4,
+ OPERAND_uimm4x16,
+ OPERAND_uimmrx4,
+ OPERAND_simm8,
+ OPERAND_simm8x256,
+ OPERAND_simm12b,
+ OPERAND_msalp32,
+ OPERAND_op2p1,
+ OPERAND_label8,
+ OPERAND_ulabel8,
+ OPERAND_label12,
+ OPERAND_soffset,
+ OPERAND_uimm16x4,
+ OPERAND_bbi,
+ OPERAND_sae,
+ OPERAND_sas,
+ OPERAND_sargt,
+ OPERAND_s,
+ OPERAND_immt,
+ OPERAND_imms,
+ OPERAND_bt,
+ OPERAND_bs,
+ OPERAND_br,
+ OPERAND_bt2,
+ OPERAND_bs2,
+ OPERAND_br2,
+ OPERAND_bt4,
+ OPERAND_bs4,
+ OPERAND_br4,
+ OPERAND_bt8,
+ OPERAND_bs8,
+ OPERAND_br8,
+ OPERAND_bt16,
+ OPERAND_bs16,
+ OPERAND_br16,
+ OPERAND_brall,
+ OPERAND_tp7,
+ OPERAND_xt_wbr15_label,
+ OPERAND_xt_wbr18_label,
+ OPERAND_ae_samt32,
+ OPERAND_pr0,
+ OPERAND_qr0,
+ OPERAND_mac_qr0,
+ OPERAND_ae_lsimm16,
+ OPERAND_ae_lsimm32,
+ OPERAND_ae_lsimm64,
+ OPERAND_ae_samt64,
+ OPERAND_ae_ohba,
+ OPERAND_ae_ohba2,
+ OPERAND_pr,
+ OPERAND_cvt_pr,
+ OPERAND_qr0_rw,
+ OPERAND_mac_qr0_rw,
+ OPERAND_qr1_w,
+ OPERAND_mac_qr1_w,
+ OPERAND_ps,
+ OPERAND_alupppb_ps,
+ OPERAND_t,
+ OPERAND_bbi4,
+ OPERAND_imm12,
+ OPERAND_imm8,
+ OPERAND_imm12b,
+ OPERAND_imm16,
+ OPERAND_m,
+ OPERAND_n,
+ OPERAND_offset,
+ OPERAND_op0,
+ OPERAND_op1,
+ OPERAND_op2,
+ OPERAND_r,
+ OPERAND_sa4,
+ OPERAND_sae4,
+ OPERAND_sal,
+ OPERAND_sas4,
+ OPERAND_sr,
+ OPERAND_st,
+ OPERAND_thi3,
+ OPERAND_imm4,
+ OPERAND_mn,
+ OPERAND_i,
+ OPERAND_imm6lo,
+ OPERAND_imm6hi,
+ OPERAND_imm7lo,
+ OPERAND_imm7hi,
+ OPERAND_z,
+ OPERAND_imm6,
+ OPERAND_imm7,
+ OPERAND_t2,
+ OPERAND_s2,
+ OPERAND_r2,
+ OPERAND_t4,
+ OPERAND_s4,
+ OPERAND_r4,
+ OPERAND_t8,
+ OPERAND_s8,
+ OPERAND_r8,
+ OPERAND_xt_wbr15_imm,
+ OPERAND_xt_wbr18_imm,
+ OPERAND_ae_r3,
+ OPERAND_ae_s_non_samt,
+ OPERAND_ae_s3,
+ OPERAND_ae_r32,
+ OPERAND_ae_samt_s_t,
+ OPERAND_ae_r20,
+ OPERAND_ae_r10,
+ OPERAND_ae_s20,
+ OPERAND_ae_fld_ohba,
+ OPERAND_ae_fld_ohba2,
+ OPERAND_op0_s3,
+ OPERAND_ftsf12,
+ OPERAND_ftsf13,
+ OPERAND_ftsf14,
+ OPERAND_ftsf21ae_slot1,
+ OPERAND_ftsf22ae_slot1,
+ OPERAND_ftsf23ae_slot1,
+ OPERAND_ftsf24ae_slot1,
+ OPERAND_ftsf25ae_slot1,
+ OPERAND_ftsf26ae_slot1,
+ OPERAND_ftsf27ae_slot1,
+ OPERAND_ftsf28ae_slot1,
+ OPERAND_ftsf29ae_slot1,
+ OPERAND_ftsf30ae_slot1,
+ OPERAND_ftsf31ae_slot1,
+ OPERAND_ftsf32ae_slot1,
+ OPERAND_ftsf33ae_slot1,
+ OPERAND_ftsf34ae_slot1,
+ OPERAND_ftsf35ae_slot1,
+ OPERAND_ftsf36ae_slot1,
+ OPERAND_ftsf37ae_slot1,
+ OPERAND_ftsf38ae_slot1,
+ OPERAND_ftsf39ae_slot1,
+ OPERAND_ftsf40ae_slot1,
+ OPERAND_ftsf41ae_slot1,
+ OPERAND_ftsf42ae_slot1,
+ OPERAND_ftsf43ae_slot1,
+ OPERAND_ftsf44ae_slot1,
+ OPERAND_ftsf45ae_slot1,
+ OPERAND_ftsf46ae_slot1,
+ OPERAND_ftsf47ae_slot1,
+ OPERAND_ftsf48ae_slot1,
+ OPERAND_ftsf49ae_slot1,
+ OPERAND_ftsf50ae_slot1,
+ OPERAND_ftsf51ae_slot1,
+ OPERAND_ftsf52ae_slot1,
+ OPERAND_ftsf53ae_slot1,
+ OPERAND_ftsf54ae_slot1,
+ OPERAND_ftsf55ae_slot1,
+ OPERAND_ftsf56ae_slot1,
+ OPERAND_ftsf57ae_slot1,
+ OPERAND_ftsf58ae_slot1,
+ OPERAND_ftsf59ae_slot1,
+ OPERAND_ftsf60ae_slot1,
+ OPERAND_ftsf61ae_slot1,
+ OPERAND_ftsf63ae_slot1,
+ OPERAND_ftsf64ae_slot1,
+ OPERAND_ftsf66ae_slot1,
+ OPERAND_ftsf67ae_slot1,
+ OPERAND_ftsf69ae_slot1,
+ OPERAND_ftsf71ae_slot1,
+ OPERAND_ftsf72ae_slot1,
+ OPERAND_ftsf73ae_slot1,
+ OPERAND_ftsf75ae_slot1,
+ OPERAND_ftsf76ae_slot1,
+ OPERAND_ftsf77ae_slot1,
+ OPERAND_ftsf78ae_slot1,
+ OPERAND_ftsf79ae_slot1,
+ OPERAND_ftsf80ae_slot1,
+ OPERAND_ftsf81ae_slot1,
+ OPERAND_ftsf82ae_slot1,
+ OPERAND_ftsf84ae_slot1,
+ OPERAND_ftsf86ae_slot1,
+ OPERAND_ftsf87ae_slot1,
+ OPERAND_ftsf88ae_slot1,
+ OPERAND_ftsf89ae_slot1,
+ OPERAND_ftsf90ae_slot1,
+ OPERAND_ftsf91ae_slot1,
+ OPERAND_ftsf92ae_slot1,
+ OPERAND_ftsf94ae_slot1,
+ OPERAND_ftsf96ae_slot1,
+ OPERAND_ftsf97ae_slot1,
+ OPERAND_ftsf98ae_slot1,
+ OPERAND_ftsf99ae_slot1,
+ OPERAND_ftsf100ae_slot1,
+ OPERAND_ftsf101ae_slot1,
+ OPERAND_ftsf103ae_slot1,
+ OPERAND_ftsf104ae_slot1,
+ OPERAND_ftsf105ae_slot1,
+ OPERAND_ftsf106ae_slot1,
+ OPERAND_ftsf107ae_slot1,
+ OPERAND_ftsf108ae_slot1,
+ OPERAND_ftsf109ae_slot1,
+ OPERAND_ftsf110ae_slot1,
+ OPERAND_ftsf111ae_slot1,
+ OPERAND_ftsf112ae_slot1,
+ OPERAND_ftsf113ae_slot1,
+ OPERAND_ftsf114ae_slot1,
+ OPERAND_ftsf115ae_slot1,
+ OPERAND_ftsf116ae_slot1,
+ OPERAND_ftsf118ae_slot1,
+ OPERAND_ftsf119ae_slot1,
+ OPERAND_ftsf120ae_slot1,
+ OPERAND_ftsf122ae_slot1,
+ OPERAND_ftsf124ae_slot1,
+ OPERAND_ftsf125ae_slot1,
+ OPERAND_ftsf126ae_slot1,
+ OPERAND_ftsf127ae_slot1,
+ OPERAND_ftsf128ae_slot1,
+ OPERAND_ftsf129ae_slot1,
+ OPERAND_ftsf130ae_slot1,
+ OPERAND_ftsf131ae_slot1,
+ OPERAND_ftsf132ae_slot1,
+ OPERAND_ftsf133ae_slot1,
+ OPERAND_ftsf134ae_slot1,
+ OPERAND_ftsf135ae_slot1,
+ OPERAND_ftsf136ae_slot1,
+ OPERAND_ftsf137ae_slot1,
+ OPERAND_ftsf138ae_slot1,
+ OPERAND_ftsf139ae_slot1,
+ OPERAND_ftsf140ae_slot1,
+ OPERAND_ftsf141ae_slot1,
+ OPERAND_ftsf142ae_slot1,
+ OPERAND_ftsf143ae_slot1,
+ OPERAND_ftsf144ae_slot1,
+ OPERAND_ftsf145ae_slot1,
+ OPERAND_ftsf146ae_slot1,
+ OPERAND_ftsf147ae_slot1,
+ OPERAND_ftsf148ae_slot1,
+ OPERAND_ftsf149ae_slot1,
+ OPERAND_ftsf150ae_slot1,
+ OPERAND_ftsf151ae_slot1,
+ OPERAND_ftsf152ae_slot1,
+ OPERAND_ftsf153ae_slot1,
+ OPERAND_ftsf154ae_slot1,
+ OPERAND_ftsf155ae_slot1,
+ OPERAND_ftsf156ae_slot1,
+ OPERAND_ftsf157ae_slot1,
+ OPERAND_ftsf158ae_slot1,
+ OPERAND_ftsf159ae_slot1,
+ OPERAND_ftsf160ae_slot1,
+ OPERAND_ftsf161ae_slot1,
+ OPERAND_ftsf162ae_slot1,
+ OPERAND_ftsf163ae_slot1,
+ OPERAND_ftsf164ae_slot1,
+ OPERAND_ftsf165ae_slot1,
+ OPERAND_ftsf166ae_slot1,
+ OPERAND_ftsf167ae_slot1,
+ OPERAND_ftsf168ae_slot1,
+ OPERAND_ftsf169ae_slot1,
+ OPERAND_ftsf170ae_slot1,
+ OPERAND_ftsf171ae_slot1,
+ OPERAND_ftsf172ae_slot1,
+ OPERAND_ftsf173ae_slot1,
+ OPERAND_ftsf174ae_slot1,
+ OPERAND_ftsf175ae_slot1,
+ OPERAND_ftsf176ae_slot1,
+ OPERAND_ftsf177ae_slot1,
+ OPERAND_ftsf178ae_slot1,
+ OPERAND_ftsf179ae_slot1,
+ OPERAND_ftsf180ae_slot1,
+ OPERAND_ftsf181ae_slot1,
+ OPERAND_ftsf182ae_slot1,
+ OPERAND_ftsf183ae_slot1,
+ OPERAND_ftsf184ae_slot1,
+ OPERAND_ftsf185ae_slot1,
+ OPERAND_ftsf186ae_slot1,
+ OPERAND_ftsf187ae_slot1,
+ OPERAND_ftsf188ae_slot1,
+ OPERAND_ftsf189ae_slot1,
+ OPERAND_ftsf190ae_slot1,
+ OPERAND_ftsf191ae_slot1,
+ OPERAND_ftsf192ae_slot1,
+ OPERAND_ftsf193ae_slot1,
+ OPERAND_ftsf194ae_slot1,
+ OPERAND_ftsf195ae_slot1,
+ OPERAND_ftsf196ae_slot1,
+ OPERAND_ftsf197ae_slot1,
+ OPERAND_ftsf198ae_slot1,
+ OPERAND_ftsf199ae_slot1,
+ OPERAND_ftsf200ae_slot1,
+ OPERAND_ftsf201ae_slot1,
+ OPERAND_ftsf202ae_slot1,
+ OPERAND_ftsf203ae_slot1,
+ OPERAND_ftsf204ae_slot1,
+ OPERAND_ftsf205ae_slot1,
+ OPERAND_ftsf206ae_slot1,
+ OPERAND_ftsf207ae_slot1,
+ OPERAND_ftsf208,
+ OPERAND_ftsf209ae_slot1,
+ OPERAND_ftsf210ae_slot1,
+ OPERAND_ftsf211ae_slot1,
+ OPERAND_ftsf330ae_slot1,
+ OPERAND_ftsf332ae_slot1,
+ OPERAND_ftsf334ae_slot1,
+ OPERAND_ftsf336ae_slot1,
+ OPERAND_ftsf337ae_slot1,
+ OPERAND_ftsf338,
+ OPERAND_ftsf339ae_slot1,
+ OPERAND_ftsf340,
+ OPERAND_ftsf341ae_slot1,
+ OPERAND_ftsf342ae_slot1,
+ OPERAND_ftsf343ae_slot1,
+ OPERAND_ftsf344ae_slot1,
+ OPERAND_ftsf346ae_slot1,
+ OPERAND_ftsf347,
+ OPERAND_ftsf348ae_slot1,
+ OPERAND_ftsf349ae_slot1,
+ OPERAND_ftsf350ae_slot1,
+ OPERAND_op0_s4,
+ OPERAND_ftsf212ae_slot0,
+ OPERAND_ftsf213ae_slot0,
+ OPERAND_ftsf214ae_slot0,
+ OPERAND_ftsf215ae_slot0,
+ OPERAND_ftsf216ae_slot0,
+ OPERAND_ftsf217,
+ OPERAND_ftsf218ae_slot0,
+ OPERAND_ftsf219ae_slot0,
+ OPERAND_ftsf220ae_slot0,
+ OPERAND_ftsf221ae_slot0,
+ OPERAND_ftsf222ae_slot0,
+ OPERAND_ftsf223ae_slot0,
+ OPERAND_ftsf224ae_slot0,
+ OPERAND_ftsf225ae_slot0,
+ OPERAND_ftsf226ae_slot0,
+ OPERAND_ftsf227ae_slot0,
+ OPERAND_ftsf228ae_slot0,
+ OPERAND_ftsf229ae_slot0,
+ OPERAND_ftsf230ae_slot0,
+ OPERAND_ftsf231ae_slot0,
+ OPERAND_ftsf232ae_slot0,
+ OPERAND_ftsf233ae_slot0,
+ OPERAND_ftsf234ae_slot0,
+ OPERAND_ftsf235ae_slot0,
+ OPERAND_ftsf236ae_slot0,
+ OPERAND_ftsf237ae_slot0,
+ OPERAND_ftsf238ae_slot0,
+ OPERAND_ftsf239ae_slot0,
+ OPERAND_ftsf240ae_slot0,
+ OPERAND_ftsf241ae_slot0,
+ OPERAND_ftsf242ae_slot0,
+ OPERAND_ftsf243ae_slot0,
+ OPERAND_ftsf244ae_slot0,
+ OPERAND_ftsf245ae_slot0,
+ OPERAND_ftsf246ae_slot0,
+ OPERAND_ftsf247ae_slot0,
+ OPERAND_ftsf248ae_slot0,
+ OPERAND_ftsf249ae_slot0,
+ OPERAND_ftsf250ae_slot0,
+ OPERAND_ftsf251ae_slot0,
+ OPERAND_ftsf252ae_slot0,
+ OPERAND_ftsf253ae_slot0,
+ OPERAND_ftsf254ae_slot0,
+ OPERAND_ftsf255ae_slot0,
+ OPERAND_ftsf256ae_slot0,
+ OPERAND_ftsf257ae_slot0,
+ OPERAND_ftsf258ae_slot0,
+ OPERAND_ftsf259ae_slot0,
+ OPERAND_ftsf260ae_slot0,
+ OPERAND_ftsf261ae_slot0,
+ OPERAND_ftsf262ae_slot0,
+ OPERAND_ftsf263ae_slot0,
+ OPERAND_ftsf264ae_slot0,
+ OPERAND_ftsf265ae_slot0,
+ OPERAND_ftsf266ae_slot0,
+ OPERAND_ftsf267ae_slot0,
+ OPERAND_ftsf268ae_slot0,
+ OPERAND_ftsf269ae_slot0,
+ OPERAND_ftsf270ae_slot0,
+ OPERAND_ftsf271ae_slot0,
+ OPERAND_ftsf272ae_slot0,
+ OPERAND_ftsf273ae_slot0,
+ OPERAND_ftsf274ae_slot0,
+ OPERAND_ftsf275ae_slot0,
+ OPERAND_ftsf276ae_slot0,
+ OPERAND_ftsf277ae_slot0,
+ OPERAND_ftsf278ae_slot0,
+ OPERAND_ftsf279ae_slot0,
+ OPERAND_ftsf281ae_slot0,
+ OPERAND_ftsf282ae_slot0,
+ OPERAND_ftsf283ae_slot0,
+ OPERAND_ftsf284ae_slot0,
+ OPERAND_ftsf286ae_slot0,
+ OPERAND_ftsf288ae_slot0,
+ OPERAND_ftsf290ae_slot0,
+ OPERAND_ftsf292ae_slot0,
+ OPERAND_ftsf293,
+ OPERAND_ftsf294ae_slot0,
+ OPERAND_ftsf295ae_slot0,
+ OPERAND_ftsf296ae_slot0,
+ OPERAND_ftsf297ae_slot0,
+ OPERAND_ftsf298ae_slot0,
+ OPERAND_ftsf299ae_slot0,
+ OPERAND_ftsf300ae_slot0,
+ OPERAND_ftsf301ae_slot0,
+ OPERAND_ftsf302ae_slot0,
+ OPERAND_ftsf303ae_slot0,
+ OPERAND_ftsf304ae_slot0,
+ OPERAND_ftsf306ae_slot0,
+ OPERAND_ftsf308ae_slot0,
+ OPERAND_ftsf309ae_slot0,
+ OPERAND_ftsf310ae_slot0,
+ OPERAND_ftsf311ae_slot0,
+ OPERAND_ftsf312ae_slot0,
+ OPERAND_ftsf313ae_slot0,
+ OPERAND_ftsf314ae_slot0,
+ OPERAND_ftsf315ae_slot0,
+ OPERAND_ftsf316ae_slot0,
+ OPERAND_ftsf317ae_slot0,
+ OPERAND_ftsf318ae_slot0,
+ OPERAND_ftsf319,
+ OPERAND_ftsf320ae_slot0,
+ OPERAND_ftsf321,
+ OPERAND_ftsf322ae_slot0,
+ OPERAND_ftsf323ae_slot0,
+ OPERAND_ftsf324ae_slot0,
+ OPERAND_ftsf325ae_slot0,
+ OPERAND_ftsf326ae_slot0,
+ OPERAND_ftsf328ae_slot0,
+ OPERAND_ftsf329ae_slot0,
+ OPERAND_ftsf352ae_slot0,
+ OPERAND_ftsf353,
+ OPERAND_ftsf354ae_slot0,
+ OPERAND_ftsf356ae_slot0,
+ OPERAND_ftsf357,
+ OPERAND_ftsf358ae_slot0,
+ OPERAND_ftsf359ae_slot0,
+ OPERAND_ftsf360ae_slot0,
+ OPERAND_ftsf361ae_slot0,
+ OPERAND_ftsf362ae_slot0,
+ OPERAND_ftsf364ae_slot0,
+ OPERAND_ftsf365ae_slot0,
+ OPERAND_ftsf366ae_slot0,
+ OPERAND_ftsf368ae_slot0,
+ OPERAND_ftsf369ae_slot0
+};
+
+
+/* Iclass table. */
+
+static xtensa_arg_internal Iclass_xt_iclass_rfe_stateArgs[] = {
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfde_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar12 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx12_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar8 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx8_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx4_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_args[] = {
+ { { OPERAND_ars_entry }, 's' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm12x8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_entry_stateArgs[] = {
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movsp_stateArgs[] = {
+ { { STATE_WindowBase }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_args[] = {
+ { { OPERAND_simm4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rotw_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retw_stateArgs[] = {
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSWOE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfwou_stateArgs[] = {
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' },
+ { { STATE_WindowStart }, 'm' },
+ { { STATE_PSOWB }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_immrx4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32e_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowBase }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_windowstart_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WindowStart }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_add_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_n_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ai4const }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bz6_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm6 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loadi4_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_mov_n_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_n_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_simm7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_retn_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_storei4_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_lsi4x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_threadptr_stateArgs[] = {
+ { { STATE_THREADPTR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addmi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_simm8x256 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_addsub_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bit_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4const }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8b_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bbi }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsi8u_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_b4constu }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bst8_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bsz12_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_label12 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_call0_args[] = {
+ { { OPERAND_soffsetx4 }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_callx0_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ar0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_exti_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sae }, 'i' },
+ { { OPERAND_op2p1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jump_args[] = {
+ { { OPERAND_soffset }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_jumpx_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16ui_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l16si_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_uimm16x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32r_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l8i_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loop_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ulabel8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_loopz_stateArgs[] = {
+ { { STATE_LBEG }, 'o' },
+ { { STATE_LEND }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movi_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_simm12b }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_movz_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_neg_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_return_args[] = {
+ { { OPERAND__ars_invisible }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s16i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s8i_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_args[] = {
+ { { OPERAND_sas }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sari_stateArgs[] = {
+ { { STATE_SAR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shifts_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftst_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_shiftt_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_slli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_msalp32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srai_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_sargt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_srli_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sync_stateArgs[] = {
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsil_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lend_stateArgs[] = {
+ { { STATE_LEND }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lcount_stateArgs[] = {
+ { { STATE_LCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lcount_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_LCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_lbeg_stateArgs[] = {
+ { { STATE_LBEG }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_sar_stateArgs[] = {
+ { { STATE_SAR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'i' },
+ { { STATE_LITBEN }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'o' },
+ { { STATE_LITBEN }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_litbase_stateArgs[] = {
+ { { STATE_LITBADDR }, 'm' },
+ { { STATE_LITBEN }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_configid0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_configid1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'i' },
+ { { STATE_PSCALLINC }, 'i' },
+ { { STATE_PSOWB }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSUM }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ps_stateArgs[] = {
+ { { STATE_PSWOE }, 'm' },
+ { { STATE_PSCALLINC }, 'm' },
+ { { STATE_PSOWB }, 'm' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'm' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_epc2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPC2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excsave2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCSAVE2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_eps2_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EPS2 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_excvaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCVADDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_depc_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEPC }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'i' },
+ { { STATE_XTSYNC }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_exccause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_EXCCAUSE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC0 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_misc1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MISC1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_prid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_vecbase_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_VECBASE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul16_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_mul32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfi_stateArgs[] = {
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'm' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'm' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPC1 }, 'i' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_EPS2 }, 'i' },
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_args[] = {
+ { { OPERAND_s }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wait_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PSINTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_interrupt_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTERRUPT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intset_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intclear_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_intenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INTENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_args[] = {
+ { { OPERAND_imms }, 'i' },
+ { { OPERAND_immt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_break_n_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSINTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'i' },
+ { { STATE_DBNUM }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'o' },
+ { { STATE_DBNUM }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_debugcause_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DEBUGCAUSE }, 'm' },
+ { { STATE_DBNUM }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_ICOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_icountlevel_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ICOUNTLEVEL }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_DDR }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_args[] = {
+ { { OPERAND_imms }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdo_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' },
+ { { STATE_EPC2 }, 'i' },
+ { { STATE_PSWOE }, 'o' },
+ { { STATE_PSCALLINC }, 'o' },
+ { { STATE_PSOWB }, 'o' },
+ { { STATE_PSRING }, 'o' },
+ { { STATE_PSUM }, 'o' },
+ { { STATE_PSEXCM }, 'o' },
+ { { STATE_PSINTLEVEL }, 'o' },
+ { { STATE_EPS2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rfdd_stateArgs[] = {
+ { { STATE_InOCDMode }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool1_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool4_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbool8_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_bs8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bbranch_args[] = {
+ { { OPERAND_bs }, 'i' },
+ { { OPERAND_label8 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_bmove_args[] = {
+ { { OPERAND_arr }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_RSR_BR_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_brall }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_WSR_BR_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_brall }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_XSR_BR_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_brall }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOUNT }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccount_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_CCOUNT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare0_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE0 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'o' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ccompare1_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CCOMPARE1 }, 'm' },
+ { { STATE_INTERRUPT }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_icache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_licx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sicx_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm4x16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_ind_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dcache_inv_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_dpf_args[] = {
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sdct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldct_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_ptevaddr_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_PTBASE }, 'm' },
+ { { STATE_EXCVADDR }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_rasid_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'i' },
+ { { STATE_ASID2 }, 'i' },
+ { { STATE_ASID1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'o' },
+ { { STATE_ASID2 }, 'o' },
+ { { STATE_ASID1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_rasid_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ASID3 }, 'm' },
+ { { STATE_ASID2 }, 'm' },
+ { { STATE_ASID1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_itlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_itlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_INSTPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_dtlbcfg_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_dtlbcfg_stateArgs[] = {
+ { { STATE_XTSYNC }, 'o' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_DATAPGSZID4 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_idtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wdtlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_args[] = {
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_iitlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ritlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_witlb_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_ldpte_stateArgs[] = {
+ { { STATE_PTBASE }, 'i' },
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwitlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_hwwdtlba_stateArgs[] = {
+ { { STATE_EXCVADDR }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_cpenable_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_CPENABLE }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_clamp_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_minmax_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_nsa_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_sx_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_tp7 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_l32ai_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32ri_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_args[] = {
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_uimm8x4 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_s32c1i_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' },
+ { { STATE_XTSYNC }, 'i' },
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_scompare1_stateArgs[] = {
+ { { STATE_SCOMPARE1 }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_args[] = {
+ { { OPERAND_art }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'o' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_args[] = {
+ { { OPERAND_art }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_xsr_atomctl_stateArgs[] = {
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_ATOMCTL }, 'm' },
+ { { STATE_XTSYNC }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_args[] = {
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_rer_stateArgs[] = {
+ { { STATE_CCON }, 'i' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_MPSCORE }, 'i' }
+};
+
+static xtensa_interface Iclass_xt_iclass_rer_intfArgs[] = {
+ INTERFACE_RMPINT_Out,
+ INTERFACE_RMPINT_In
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_args[] = {
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_xt_iclass_wer_stateArgs[] = {
+ { { STATE_CCON }, 'm' },
+ { { STATE_PSEXCM }, 'i' },
+ { { STATE_PSRING }, 'i' },
+ { { STATE_WMPINT_DATA }, 'o' },
+ { { STATE_WMPINT_ADDR }, 'o' },
+ { { STATE_MPSCORE }, 'm' },
+ { { STATE_WMPINT_TOGGLEEN }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ovf_sar_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ovf_sar_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'i' },
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ovf_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ovf_sar_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'o' },
+ { { STATE_AE_SAR }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_bithead_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_bithead_stateArgs[] = {
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_bithead_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_bithead_stateArgs[] = {
+ { { STATE_AE_BITHEAD }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ts_fts_bu_bp_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_ts_fts_bu_bp_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_TABLESIZE }, 'i' },
+ { { STATE_AE_FIRST_TS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ts_fts_bu_bp_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_ts_fts_bu_bp_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'o' },
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_TABLESIZE }, 'o' },
+ { { STATE_AE_FIRST_TS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_sd_no_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_rur_ae_sd_no_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'i' },
+ { { STATE_AE_SEARCHDONE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_sd_no_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_wur_ae_sd_no_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_overflow_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_overflow_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_overflow_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_overflow_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_sar_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_sar_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_sar_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_sar_stateArgs[] = {
+ { { STATE_AE_SAR }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitptr_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitptr_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitptr_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitptr_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitsused_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_bitsused_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitsused_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_bitsused_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_tablesize_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_tablesize_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_tablesize_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_tablesize_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_first_ts_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_first_ts_stateArgs[] = {
+ { { STATE_AE_FIRST_TS }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_first_ts_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_first_ts_stateArgs[] = {
+ { { STATE_AE_FIRST_TS }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_nextoffset_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_nextoffset_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_nextoffset_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_nextoffset_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_searchdone_args[] = {
+ { { OPERAND_arr }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_rur_ae_searchdone_stateArgs[] = {
+ { { STATE_AE_SEARCHDONE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_searchdone_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_wur_ae_searchdone_stateArgs[] = {
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp16x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_i_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_iu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_x_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_xu_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lp24x2_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2s_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24x2f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm16 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp16f_l_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24s_l_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_i_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_iu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_x_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_xu_args[] = {
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sp24f_l_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_i_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_iu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_x_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_xu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq56_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_i_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_iu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_x_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_xu_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lq32f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_i_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_iu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_x_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_xu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq56s_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_i_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_i_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_iu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_lsimm32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_iu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_x_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_x_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_xu_args[] = {
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sq32f_xu_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zerop48_args[] = {
+ { { OPERAND_ps }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zerop48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_ll_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_lh_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hl_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hh_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_selp24_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp24x2_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp24x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp24x2_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp24x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp48_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp48_args[] = {
+ { { OPERAND_pr }, 'm' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movpa24x2_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movpa24x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24a32x2_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24a32x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_l_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_h_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvta32p24_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_ll_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_lh_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hl_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hh_args[] = {
+ { { OPERAND_pr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtp24a16x2_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24q48x2_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp24q48x2_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp16_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncp16_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48sym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48asym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp24q48asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48sym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48asym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16q48asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16sym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16asym_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsp16asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zeroq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_zeroq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtq56_args[] = {
+ { { OPERAND_qr1_w }, 'm' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movtq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfq56_args[] = {
+ { { OPERAND_qr1_w }, 'm' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bs }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movfq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48a32s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48a32s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_l_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_cvt_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_h_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_cvt_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_cvtq48p24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_satq48s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_satq48s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncq32_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_truncq32_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32sym_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32sym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32asym_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_roundsq32asym_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca32q48_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca32q48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_l_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_h_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_movap24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_l_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_h_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_trunca16p24s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbp24s_args[] = {
+ { { OPERAND_alupppb_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbp24s_args[] = {
+ { { OPERAND_alupppb_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' },
+ { { OPERAND_bt2 }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorp48_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorp48_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltp24s_args[] = {
+ { { OPERAND_bt2 }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltp24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lep24s_args[] = {
+ { { OPERAND_bt2 }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lep24s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqp24_args[] = {
+ { { OPERAND_bt2 }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqp24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_absq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bt }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_maxbq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_bt }, 'o' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_minbq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_addsq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_subsq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_negsq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_abssq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_andq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nandq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_orq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_xorq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllip24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllip24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlip24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlip24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraip24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraip24_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsp24_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsp24_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasp24_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasp24_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_ae_samt32 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssp24s_args[] = {
+ { { OPERAND_ps }, 'o' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssp24s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_slliq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_slliq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srliq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srliq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraiq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraiq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllsq56_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlsq56_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srasq56_stateArgs[] = {
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllaq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllaq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlaq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_srlaq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraaq56_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sraaq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ae_samt64 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllisq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllssq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_AE_SAR }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllasq56s_args[] = {
+ { { OPERAND_qr1_w }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sllasq56s_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltq56s_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_ltq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_leq56s_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_leq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqq56_args[] = {
+ { { OPERAND_bt }, 'o' },
+ { { OPERAND_qr0 }, 'i' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_eqq56_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nsaq56s_args[] = {
+ { { OPERAND_ars }, 'o' },
+ { { OPERAND_qr0_rw }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_nsaq56s_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfs32p16s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs32p16s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulap24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hl_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs32p16s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsp24s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafs56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulas56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_ll_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_lh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hl_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hl_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfs56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulss56p24s_hh_stateArgs[] = {
+ { { STATE_AE_OVERFLOW }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulfq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulafq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsfq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16s_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_l_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_l_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_h_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsq32sp16u_h_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaaq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsaq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_hh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_hh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16s_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_mac_qr0_rw }, 'i' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_mac_qr0 }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfq32sp16u_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzaap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzasp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzsap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'o' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulzssp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulaap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulasp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsafp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulsap24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hh_ll_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hh_ll_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssfp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hl_lh_args[] = {
+ { { OPERAND_mac_qr1_w }, 'm' },
+ { { OPERAND_pr }, 'i' },
+ { { OPERAND_pr0 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_mulssp24s_hl_lh_stateArgs[] = {
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sha32_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl32t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl32t_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'm' },
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'm' },
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'o' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16t_stateArgs[] = {
+ { { STATE_AE_TABLESIZE }, 'm' },
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'm' },
+ { { STATE_AE_SEARCHDONE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16c_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldl16c_stateArgs[] = {
+ { { STATE_AE_NEXTOFFSET }, 'm' },
+ { { STATE_AE_TABLESIZE }, 'm' },
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_AE_FIRST_TS }, 'i' },
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_SEARCHDONE }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldsht_args[] = {
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vldsht_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_AE_FIRST_TS }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_AE_TABLESIZE }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lb_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lb_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbi_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ae_ohba2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbi_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbk_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbk_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbki_args[] = {
+ { { OPERAND_arr }, 'o' },
+ { { OPERAND_ars }, 'i' },
+ { { OPERAND_ae_ohba2 }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_lbki_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_db_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_db_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_dbi_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_ae_ohba }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_dbi_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel32t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel32t_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel16t_args[] = {
+ { { OPERAND_br }, 'o' },
+ { { OPERAND_art }, 'm' },
+ { { OPERAND_ars }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vlel16t_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'o' },
+ { { STATE_AE_NEXTOFFSET }, 'o' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sb_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sb_stateArgs[] = {
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbi_args[] = {
+ { { OPERAND_ars }, 'm' },
+ { { OPERAND_art }, 'i' },
+ { { OPERAND_ae_ohba }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbi_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vles16c_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_vles16c_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'm' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_AE_BITSUSED }, 'i' },
+ { { STATE_AE_NEXTOFFSET }, 'i' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbf_args[] = {
+ { { OPERAND_ars }, 'm' }
+};
+
+static xtensa_arg_internal Iclass_ae_iclass_sbf_stateArgs[] = {
+ { { STATE_AE_BITPTR }, 'i' },
+ { { STATE_AE_BITHEAD }, 'm' },
+ { { STATE_CPENABLE }, 'i' }
+};
+
+static xtensa_iclass_internal iclasses[] = {
+ { 0, 0 /* xt_iclass_excw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_rfe */,
+ 3, Iclass_xt_iclass_rfe_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfde */,
+ 3, Iclass_xt_iclass_rfde_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_syscall */,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call12_args,
+ 1, Iclass_xt_iclass_call12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call8_args,
+ 1, Iclass_xt_iclass_call8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_call4_args,
+ 1, Iclass_xt_iclass_call4_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx12_args,
+ 1, Iclass_xt_iclass_callx12_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx8_args,
+ 1, Iclass_xt_iclass_callx8_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_callx4_args,
+ 1, Iclass_xt_iclass_callx4_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_entry_args,
+ 5, Iclass_xt_iclass_entry_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movsp_args,
+ 2, Iclass_xt_iclass_movsp_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rotw_args,
+ 3, Iclass_xt_iclass_rotw_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_retw_args,
+ 4, Iclass_xt_iclass_retw_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfwou */,
+ 6, Iclass_xt_iclass_rfwou_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l32e_args,
+ 2, Iclass_xt_iclass_l32e_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_s32e_args,
+ 2, Iclass_xt_iclass_s32e_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowbase_args,
+ 3, Iclass_xt_iclass_rsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowbase_args,
+ 3, Iclass_xt_iclass_wsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowbase_args,
+ 3, Iclass_xt_iclass_xsr_windowbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_windowstart_args,
+ 3, Iclass_xt_iclass_rsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_windowstart_args,
+ 3, Iclass_xt_iclass_wsr_windowstart_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_windowstart_args,
+ 3, Iclass_xt_iclass_xsr_windowstart_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_add_n_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bz6_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill_n */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_loadi4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_mov_n_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_n_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nopn */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_retn_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_storei4_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_rur_threadptr_args,
+ 1, Iclass_rur_threadptr_stateArgs, 0, 0 },
+ { 1, Iclass_wur_threadptr_args,
+ 1, Iclass_wur_threadptr_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_addi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addmi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_addsub_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bit_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8b_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bsi8u_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bst8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bsz12_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_call0_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_callx0_args,
+ 0, 0, 0, 0 },
+ { 4, Iclass_xt_iclass_exti_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_ill */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jump_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_jumpx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16ui_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l16si_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_l32r_args,
+ 2, Iclass_xt_iclass_l32r_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_l8i_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_loop_args,
+ 3, Iclass_xt_iclass_loop_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_loopz_args,
+ 3, Iclass_xt_iclass_loopz_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_movi_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_movz_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_neg_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_nop */,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_return_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_simcall */,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s16i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32i_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s8i_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_sar_args,
+ 1, Iclass_xt_iclass_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_sari_args,
+ 1, Iclass_xt_iclass_sari_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shifts_args,
+ 1, Iclass_xt_iclass_shifts_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_shiftst_args,
+ 1, Iclass_xt_iclass_shiftst_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_shiftt_args,
+ 1, Iclass_xt_iclass_shiftt_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_slli_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_srli_args,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_memw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_extw */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_isync */,
+ 0, 0, 0, 0 },
+ { 0, 0 /* xt_iclass_sync */,
+ 1, Iclass_xt_iclass_sync_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rsil_args,
+ 7, Iclass_xt_iclass_rsil_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lend_args,
+ 1, Iclass_xt_iclass_rsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lend_args,
+ 1, Iclass_xt_iclass_wsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lend_args,
+ 1, Iclass_xt_iclass_xsr_lend_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lcount_args,
+ 1, Iclass_xt_iclass_rsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lcount_args,
+ 2, Iclass_xt_iclass_wsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lcount_args,
+ 2, Iclass_xt_iclass_xsr_lcount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_lbeg_args,
+ 1, Iclass_xt_iclass_rsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_lbeg_args,
+ 1, Iclass_xt_iclass_wsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_lbeg_args,
+ 1, Iclass_xt_iclass_xsr_lbeg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_sar_args,
+ 1, Iclass_xt_iclass_rsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_sar_args,
+ 2, Iclass_xt_iclass_wsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_sar_args,
+ 1, Iclass_xt_iclass_xsr_sar_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_litbase_args,
+ 2, Iclass_xt_iclass_rsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_litbase_args,
+ 2, Iclass_xt_iclass_wsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_litbase_args,
+ 2, Iclass_xt_iclass_xsr_litbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid0_args,
+ 2, Iclass_xt_iclass_rsr_configid0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_configid0_args,
+ 2, Iclass_xt_iclass_wsr_configid0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_configid1_args,
+ 2, Iclass_xt_iclass_rsr_configid1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ps_args,
+ 7, Iclass_xt_iclass_rsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ps_args,
+ 7, Iclass_xt_iclass_wsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ps_args,
+ 7, Iclass_xt_iclass_xsr_ps_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc1_args,
+ 3, Iclass_xt_iclass_rsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc1_args,
+ 3, Iclass_xt_iclass_wsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc1_args,
+ 3, Iclass_xt_iclass_xsr_epc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave1_args,
+ 3, Iclass_xt_iclass_rsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave1_args,
+ 3, Iclass_xt_iclass_wsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave1_args,
+ 3, Iclass_xt_iclass_xsr_excsave1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_epc2_args,
+ 3, Iclass_xt_iclass_rsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_epc2_args,
+ 3, Iclass_xt_iclass_wsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_epc2_args,
+ 3, Iclass_xt_iclass_xsr_epc2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excsave2_args,
+ 3, Iclass_xt_iclass_rsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excsave2_args,
+ 3, Iclass_xt_iclass_wsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excsave2_args,
+ 3, Iclass_xt_iclass_xsr_excsave2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_eps2_args,
+ 3, Iclass_xt_iclass_rsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_eps2_args,
+ 3, Iclass_xt_iclass_wsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_eps2_args,
+ 3, Iclass_xt_iclass_xsr_eps2_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_excvaddr_args,
+ 3, Iclass_xt_iclass_rsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_excvaddr_args,
+ 3, Iclass_xt_iclass_wsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_excvaddr_args,
+ 3, Iclass_xt_iclass_xsr_excvaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_depc_args,
+ 3, Iclass_xt_iclass_rsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_depc_args,
+ 3, Iclass_xt_iclass_wsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_depc_args,
+ 3, Iclass_xt_iclass_xsr_depc_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_exccause_args,
+ 4, Iclass_xt_iclass_rsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_exccause_args,
+ 3, Iclass_xt_iclass_wsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_exccause_args,
+ 3, Iclass_xt_iclass_xsr_exccause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc0_args,
+ 3, Iclass_xt_iclass_rsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc0_args,
+ 3, Iclass_xt_iclass_wsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc0_args,
+ 3, Iclass_xt_iclass_xsr_misc0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_misc1_args,
+ 3, Iclass_xt_iclass_rsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_misc1_args,
+ 3, Iclass_xt_iclass_wsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_misc1_args,
+ 3, Iclass_xt_iclass_xsr_misc1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_prid_args,
+ 2, Iclass_xt_iclass_rsr_prid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_vecbase_args,
+ 3, Iclass_xt_iclass_rsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_vecbase_args,
+ 3, Iclass_xt_iclass_wsr_vecbase_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_vecbase_args,
+ 3, Iclass_xt_iclass_xsr_vecbase_stateArgs, 0, 0 },
+ { 3, Iclass_xt_mul16_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_mul32_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rfi_args,
+ 11, Iclass_xt_iclass_rfi_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wait_args,
+ 3, Iclass_xt_iclass_wait_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_interrupt_args,
+ 3, Iclass_xt_iclass_rsr_interrupt_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intset_args,
+ 4, Iclass_xt_iclass_wsr_intset_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intclear_args,
+ 4, Iclass_xt_iclass_wsr_intclear_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_intenable_args,
+ 3, Iclass_xt_iclass_rsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_intenable_args,
+ 3, Iclass_xt_iclass_wsr_intenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_intenable_args,
+ 3, Iclass_xt_iclass_xsr_intenable_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_break_args,
+ 2, Iclass_xt_iclass_break_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_break_n_args,
+ 2, Iclass_xt_iclass_break_n_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_debugcause_args,
+ 4, Iclass_xt_iclass_rsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_debugcause_args,
+ 4, Iclass_xt_iclass_wsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_debugcause_args,
+ 4, Iclass_xt_iclass_xsr_debugcause_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icount_args,
+ 3, Iclass_xt_iclass_rsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icount_args,
+ 4, Iclass_xt_iclass_wsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icount_args,
+ 4, Iclass_xt_iclass_xsr_icount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_icountlevel_args,
+ 3, Iclass_xt_iclass_rsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_icountlevel_args,
+ 3, Iclass_xt_iclass_wsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_icountlevel_args,
+ 3, Iclass_xt_iclass_xsr_icountlevel_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ddr_args,
+ 3, Iclass_xt_iclass_rsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ddr_args,
+ 4, Iclass_xt_iclass_wsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ddr_args,
+ 4, Iclass_xt_iclass_xsr_ddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rfdo_args,
+ 10, Iclass_xt_iclass_rfdo_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_rfdd */,
+ 1, Iclass_xt_iclass_rfdd_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_bbool1_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool4_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbool8_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_bbranch_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_bmove_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_RSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_WSR_BR_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_XSR_BR_args,
+ 0, 0, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccount_args,
+ 3, Iclass_xt_iclass_rsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccount_args,
+ 4, Iclass_xt_iclass_wsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccount_args,
+ 4, Iclass_xt_iclass_xsr_ccount_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare0_args,
+ 3, Iclass_xt_iclass_rsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare0_args,
+ 4, Iclass_xt_iclass_wsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare0_args,
+ 4, Iclass_xt_iclass_xsr_ccompare0_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ccompare1_args,
+ 3, Iclass_xt_iclass_rsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ccompare1_args,
+ 4, Iclass_xt_iclass_wsr_ccompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ccompare1_args,
+ 4, Iclass_xt_iclass_xsr_ccompare1_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_icache_inv_args,
+ 2, Iclass_xt_iclass_icache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_licx_args,
+ 2, Iclass_xt_iclass_licx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_sicx_args,
+ 2, Iclass_xt_iclass_sicx_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_ind_args,
+ 2, Iclass_xt_iclass_dcache_ind_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dcache_inv_args,
+ 2, Iclass_xt_iclass_dcache_inv_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_dpf_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_sdct_args,
+ 2, Iclass_xt_iclass_sdct_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ldct_args,
+ 2, Iclass_xt_iclass_ldct_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_wsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_ptevaddr_args,
+ 4, Iclass_xt_iclass_rsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_ptevaddr_args,
+ 5, Iclass_xt_iclass_xsr_ptevaddr_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_rasid_args,
+ 5, Iclass_xt_iclass_rsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_rasid_args,
+ 6, Iclass_xt_iclass_wsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_rasid_args,
+ 6, Iclass_xt_iclass_xsr_rasid_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_itlbcfg_args,
+ 3, Iclass_xt_iclass_rsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_itlbcfg_args,
+ 4, Iclass_xt_iclass_wsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_itlbcfg_args,
+ 4, Iclass_xt_iclass_xsr_itlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_dtlbcfg_args,
+ 3, Iclass_xt_iclass_rsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_dtlbcfg_args,
+ 4, Iclass_xt_iclass_wsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_dtlbcfg_args,
+ 4, Iclass_xt_iclass_xsr_dtlbcfg_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_idtlb_args,
+ 3, Iclass_xt_iclass_idtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rdtlb_args,
+ 2, Iclass_xt_iclass_rdtlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_wdtlb_args,
+ 3, Iclass_xt_iclass_wdtlb_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_iitlb_args,
+ 2, Iclass_xt_iclass_iitlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_ritlb_args,
+ 2, Iclass_xt_iclass_ritlb_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_witlb_args,
+ 2, Iclass_xt_iclass_witlb_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_ldpte */,
+ 2, Iclass_xt_iclass_ldpte_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwitlba */,
+ 1, Iclass_xt_iclass_hwwitlba_stateArgs, 0, 0 },
+ { 0, 0 /* xt_iclass_hwwdtlba */,
+ 1, Iclass_xt_iclass_hwwdtlba_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_cpenable_args,
+ 3, Iclass_xt_iclass_rsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_cpenable_args,
+ 3, Iclass_xt_iclass_wsr_cpenable_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_cpenable_args,
+ 3, Iclass_xt_iclass_xsr_cpenable_stateArgs, 0, 0 },
+ { 3, Iclass_xt_iclass_clamp_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_minmax_args,
+ 0, 0, 0, 0 },
+ { 2, Iclass_xt_iclass_nsa_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_sx_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_l32ai_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32ri_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_xt_iclass_s32c1i_args,
+ 3, Iclass_xt_iclass_s32c1i_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_scompare1_args,
+ 1, Iclass_xt_iclass_rsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_scompare1_args,
+ 1, Iclass_xt_iclass_wsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_scompare1_args,
+ 1, Iclass_xt_iclass_xsr_scompare1_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_rsr_atomctl_args,
+ 3, Iclass_xt_iclass_rsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_wsr_atomctl_args,
+ 4, Iclass_xt_iclass_wsr_atomctl_stateArgs, 0, 0 },
+ { 1, Iclass_xt_iclass_xsr_atomctl_args,
+ 4, Iclass_xt_iclass_xsr_atomctl_stateArgs, 0, 0 },
+ { 2, Iclass_xt_iclass_rer_args,
+ 4, Iclass_xt_iclass_rer_stateArgs, 2, Iclass_xt_iclass_rer_intfArgs },
+ { 2, Iclass_xt_iclass_wer_args,
+ 7, Iclass_xt_iclass_wer_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_ovf_sar_args,
+ 3, Iclass_rur_ae_ovf_sar_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_ovf_sar_args,
+ 3, Iclass_wur_ae_ovf_sar_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_bithead_args,
+ 2, Iclass_rur_ae_bithead_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_bithead_args,
+ 2, Iclass_wur_ae_bithead_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_ts_fts_bu_bp_args,
+ 5, Iclass_rur_ae_ts_fts_bu_bp_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_ts_fts_bu_bp_args,
+ 5, Iclass_wur_ae_ts_fts_bu_bp_stateArgs, 0, 0 },
+ { 1, Iclass_rur_ae_sd_no_args,
+ 3, Iclass_rur_ae_sd_no_stateArgs, 0, 0 },
+ { 1, Iclass_wur_ae_sd_no_args,
+ 3, Iclass_wur_ae_sd_no_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_overflow_args,
+ 2, Iclass_ae_iclass_rur_ae_overflow_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_overflow_args,
+ 2, Iclass_ae_iclass_wur_ae_overflow_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_sar_args,
+ 2, Iclass_ae_iclass_rur_ae_sar_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_sar_args,
+ 2, Iclass_ae_iclass_wur_ae_sar_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_bitptr_args,
+ 2, Iclass_ae_iclass_rur_ae_bitptr_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_bitptr_args,
+ 2, Iclass_ae_iclass_wur_ae_bitptr_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_bitsused_args,
+ 2, Iclass_ae_iclass_rur_ae_bitsused_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_bitsused_args,
+ 2, Iclass_ae_iclass_wur_ae_bitsused_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_tablesize_args,
+ 2, Iclass_ae_iclass_rur_ae_tablesize_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_tablesize_args,
+ 2, Iclass_ae_iclass_wur_ae_tablesize_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_first_ts_args,
+ 2, Iclass_ae_iclass_rur_ae_first_ts_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_first_ts_args,
+ 2, Iclass_ae_iclass_wur_ae_first_ts_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_nextoffset_args,
+ 2, Iclass_ae_iclass_rur_ae_nextoffset_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_nextoffset_args,
+ 2, Iclass_ae_iclass_wur_ae_nextoffset_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_rur_ae_searchdone_args,
+ 2, Iclass_ae_iclass_rur_ae_searchdone_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_wur_ae_searchdone_args,
+ 2, Iclass_ae_iclass_wur_ae_searchdone_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_i_args,
+ 1, Iclass_ae_iclass_lp16f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_iu_args,
+ 1, Iclass_ae_iclass_lp16f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_x_args,
+ 1, Iclass_ae_iclass_lp16f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16f_xu_args,
+ 1, Iclass_ae_iclass_lp16f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_i_args,
+ 1, Iclass_ae_iclass_lp24_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_iu_args,
+ 1, Iclass_ae_iclass_lp24_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_x_args,
+ 1, Iclass_ae_iclass_lp24_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24_xu_args,
+ 1, Iclass_ae_iclass_lp24_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_i_args,
+ 1, Iclass_ae_iclass_lp24f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_iu_args,
+ 1, Iclass_ae_iclass_lp24f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_x_args,
+ 1, Iclass_ae_iclass_lp24f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24f_xu_args,
+ 1, Iclass_ae_iclass_lp24f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_i_args,
+ 1, Iclass_ae_iclass_lp16x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_iu_args,
+ 1, Iclass_ae_iclass_lp16x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_x_args,
+ 1, Iclass_ae_iclass_lp16x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp16x2f_xu_args,
+ 1, Iclass_ae_iclass_lp16x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_i_args,
+ 1, Iclass_ae_iclass_lp24x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_iu_args,
+ 1, Iclass_ae_iclass_lp24x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_x_args,
+ 1, Iclass_ae_iclass_lp24x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2f_xu_args,
+ 1, Iclass_ae_iclass_lp24x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_i_args,
+ 1, Iclass_ae_iclass_lp24x2_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_iu_args,
+ 1, Iclass_ae_iclass_lp24x2_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_x_args,
+ 1, Iclass_ae_iclass_lp24x2_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lp24x2_xu_args,
+ 1, Iclass_ae_iclass_lp24x2_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_i_args,
+ 1, Iclass_ae_iclass_sp16x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_iu_args,
+ 1, Iclass_ae_iclass_sp16x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_x_args,
+ 1, Iclass_ae_iclass_sp16x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16x2f_xu_args,
+ 1, Iclass_ae_iclass_sp16x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_i_args,
+ 1, Iclass_ae_iclass_sp24x2s_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_iu_args,
+ 1, Iclass_ae_iclass_sp24x2s_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_x_args,
+ 1, Iclass_ae_iclass_sp24x2s_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2s_xu_args,
+ 1, Iclass_ae_iclass_sp24x2s_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_i_args,
+ 1, Iclass_ae_iclass_sp24x2f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_iu_args,
+ 1, Iclass_ae_iclass_sp24x2f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_x_args,
+ 1, Iclass_ae_iclass_sp24x2f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24x2f_xu_args,
+ 1, Iclass_ae_iclass_sp24x2f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_i_args,
+ 1, Iclass_ae_iclass_sp16f_l_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_iu_args,
+ 1, Iclass_ae_iclass_sp16f_l_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_x_args,
+ 1, Iclass_ae_iclass_sp16f_l_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp16f_l_xu_args,
+ 1, Iclass_ae_iclass_sp16f_l_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_i_args,
+ 1, Iclass_ae_iclass_sp24s_l_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_iu_args,
+ 1, Iclass_ae_iclass_sp24s_l_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_x_args,
+ 1, Iclass_ae_iclass_sp24s_l_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24s_l_xu_args,
+ 1, Iclass_ae_iclass_sp24s_l_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_i_args,
+ 1, Iclass_ae_iclass_sp24f_l_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_iu_args,
+ 1, Iclass_ae_iclass_sp24f_l_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_x_args,
+ 1, Iclass_ae_iclass_sp24f_l_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sp24f_l_xu_args,
+ 1, Iclass_ae_iclass_sp24f_l_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_i_args,
+ 1, Iclass_ae_iclass_lq56_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_iu_args,
+ 1, Iclass_ae_iclass_lq56_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_x_args,
+ 1, Iclass_ae_iclass_lq56_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq56_xu_args,
+ 1, Iclass_ae_iclass_lq56_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_i_args,
+ 1, Iclass_ae_iclass_lq32f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_iu_args,
+ 1, Iclass_ae_iclass_lq32f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_x_args,
+ 1, Iclass_ae_iclass_lq32f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lq32f_xu_args,
+ 1, Iclass_ae_iclass_lq32f_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_i_args,
+ 1, Iclass_ae_iclass_sq56s_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_iu_args,
+ 1, Iclass_ae_iclass_sq56s_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_x_args,
+ 1, Iclass_ae_iclass_sq56s_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq56s_xu_args,
+ 1, Iclass_ae_iclass_sq56s_xu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_i_args,
+ 1, Iclass_ae_iclass_sq32f_i_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_iu_args,
+ 1, Iclass_ae_iclass_sq32f_iu_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_x_args,
+ 1, Iclass_ae_iclass_sq32f_x_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sq32f_xu_args,
+ 1, Iclass_ae_iclass_sq32f_xu_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_zerop48_args,
+ 1, Iclass_ae_iclass_zerop48_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movp48_args,
+ 1, Iclass_ae_iclass_movp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_ll_args,
+ 1, Iclass_ae_iclass_selp24_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_lh_args,
+ 1, Iclass_ae_iclass_selp24_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_hl_args,
+ 1, Iclass_ae_iclass_selp24_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_selp24_hh_args,
+ 1, Iclass_ae_iclass_selp24_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movtp24x2_args,
+ 1, Iclass_ae_iclass_movtp24x2_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movfp24x2_args,
+ 1, Iclass_ae_iclass_movfp24x2_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movtp48_args,
+ 1, Iclass_ae_iclass_movtp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movfp48_args,
+ 1, Iclass_ae_iclass_movfp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movpa24x2_args,
+ 1, Iclass_ae_iclass_movpa24x2_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_truncp24a32x2_args,
+ 1, Iclass_ae_iclass_truncp24a32x2_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvta32p24_l_args,
+ 1, Iclass_ae_iclass_cvta32p24_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvta32p24_h_args,
+ 1, Iclass_ae_iclass_cvta32p24_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_ll_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_lh_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_hl_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_cvtp24a16x2_hh_args,
+ 1, Iclass_ae_iclass_cvtp24a16x2_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_truncp24q48x2_args,
+ 1, Iclass_ae_iclass_truncp24q48x2_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_truncp16_args,
+ 1, Iclass_ae_iclass_truncp16_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp24q48sym_args,
+ 2, Iclass_ae_iclass_roundsp24q48sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp24q48asym_args,
+ 2, Iclass_ae_iclass_roundsp24q48asym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16q48sym_args,
+ 2, Iclass_ae_iclass_roundsp16q48sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16q48asym_args,
+ 2, Iclass_ae_iclass_roundsp16q48asym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16sym_args,
+ 2, Iclass_ae_iclass_roundsp16sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsp16asym_args,
+ 2, Iclass_ae_iclass_roundsp16asym_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_zeroq56_args,
+ 1, Iclass_ae_iclass_zeroq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movq56_args,
+ 1, Iclass_ae_iclass_movq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movtq56_args,
+ 1, Iclass_ae_iclass_movtq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_movfq56_args,
+ 1, Iclass_ae_iclass_movfq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvtq48a32s_args,
+ 1, Iclass_ae_iclass_cvtq48a32s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvtq48p24s_l_args,
+ 1, Iclass_ae_iclass_cvtq48p24s_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_cvtq48p24s_h_args,
+ 1, Iclass_ae_iclass_cvtq48p24s_h_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_satq48s_args,
+ 2, Iclass_ae_iclass_satq48s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_truncq32_args,
+ 1, Iclass_ae_iclass_truncq32_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsq32sym_args,
+ 2, Iclass_ae_iclass_roundsq32sym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_roundsq32asym_args,
+ 2, Iclass_ae_iclass_roundsq32asym_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_trunca32q48_args,
+ 1, Iclass_ae_iclass_trunca32q48_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movap24s_l_args,
+ 1, Iclass_ae_iclass_movap24s_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_movap24s_h_args,
+ 1, Iclass_ae_iclass_movap24s_h_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_trunca16p24s_l_args,
+ 1, Iclass_ae_iclass_trunca16p24s_l_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_trunca16p24s_h_args,
+ 1, Iclass_ae_iclass_trunca16p24s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addp24_args,
+ 1, Iclass_ae_iclass_addp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subp24_args,
+ 1, Iclass_ae_iclass_subp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negp24_args,
+ 1, Iclass_ae_iclass_negp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_absp24_args,
+ 1, Iclass_ae_iclass_absp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_maxp24s_args,
+ 1, Iclass_ae_iclass_maxp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_minp24s_args,
+ 1, Iclass_ae_iclass_minp24s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_maxbp24s_args,
+ 1, Iclass_ae_iclass_maxbp24s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_minbp24s_args,
+ 1, Iclass_ae_iclass_minbp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addsp24s_args,
+ 2, Iclass_ae_iclass_addsp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subsp24s_args,
+ 2, Iclass_ae_iclass_subsp24s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negsp24s_args,
+ 2, Iclass_ae_iclass_negsp24s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_abssp24s_args,
+ 2, Iclass_ae_iclass_abssp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_andp48_args,
+ 1, Iclass_ae_iclass_andp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_nandp48_args,
+ 1, Iclass_ae_iclass_nandp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_orp48_args,
+ 1, Iclass_ae_iclass_orp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_xorp48_args,
+ 1, Iclass_ae_iclass_xorp48_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_ltp24s_args,
+ 1, Iclass_ae_iclass_ltp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lep24s_args,
+ 1, Iclass_ae_iclass_lep24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_eqp24_args,
+ 1, Iclass_ae_iclass_eqp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addq56_args,
+ 1, Iclass_ae_iclass_addq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subq56_args,
+ 1, Iclass_ae_iclass_subq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negq56_args,
+ 1, Iclass_ae_iclass_negq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_absq56_args,
+ 1, Iclass_ae_iclass_absq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_maxq56s_args,
+ 1, Iclass_ae_iclass_maxq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_minq56s_args,
+ 1, Iclass_ae_iclass_minq56s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_maxbq56s_args,
+ 1, Iclass_ae_iclass_maxbq56s_stateArgs, 0, 0 },
+ { 4, Iclass_ae_iclass_minbq56s_args,
+ 1, Iclass_ae_iclass_minbq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_addsq56s_args,
+ 2, Iclass_ae_iclass_addsq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_subsq56s_args,
+ 2, Iclass_ae_iclass_subsq56s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_negsq56s_args,
+ 2, Iclass_ae_iclass_negsq56s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_abssq56s_args,
+ 2, Iclass_ae_iclass_abssq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_andq56_args,
+ 1, Iclass_ae_iclass_andq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_nandq56_args,
+ 1, Iclass_ae_iclass_nandq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_orq56_args,
+ 1, Iclass_ae_iclass_orq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_xorq56_args,
+ 1, Iclass_ae_iclass_xorq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllip24_args,
+ 1, Iclass_ae_iclass_sllip24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_srlip24_args,
+ 1, Iclass_ae_iclass_srlip24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sraip24_args,
+ 1, Iclass_ae_iclass_sraip24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllsp24_args,
+ 2, Iclass_ae_iclass_sllsp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srlsp24_args,
+ 2, Iclass_ae_iclass_srlsp24_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srasp24_args,
+ 2, Iclass_ae_iclass_srasp24_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllisp24s_args,
+ 2, Iclass_ae_iclass_sllisp24s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllssp24s_args,
+ 3, Iclass_ae_iclass_sllssp24s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_slliq56_args,
+ 1, Iclass_ae_iclass_slliq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_srliq56_args,
+ 1, Iclass_ae_iclass_srliq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sraiq56_args,
+ 1, Iclass_ae_iclass_sraiq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllsq56_args,
+ 2, Iclass_ae_iclass_sllsq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srlsq56_args,
+ 2, Iclass_ae_iclass_srlsq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_srasq56_args,
+ 2, Iclass_ae_iclass_srasq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllaq56_args,
+ 1, Iclass_ae_iclass_sllaq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_srlaq56_args,
+ 1, Iclass_ae_iclass_srlaq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sraaq56_args,
+ 1, Iclass_ae_iclass_sraaq56_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllisq56s_args,
+ 2, Iclass_ae_iclass_sllisq56s_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sllssq56s_args,
+ 3, Iclass_ae_iclass_sllssq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sllasq56s_args,
+ 2, Iclass_ae_iclass_sllasq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_ltq56s_args,
+ 1, Iclass_ae_iclass_ltq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_leq56s_args,
+ 1, Iclass_ae_iclass_leq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_eqq56_args,
+ 1, Iclass_ae_iclass_eqq56_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_nsaq56s_args,
+ 1, Iclass_ae_iclass_nsaq56s_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_ll_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_ll_args,
+ 1, Iclass_ae_iclass_mulfp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_ll_args,
+ 1, Iclass_ae_iclass_mulp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_lh_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_lh_args,
+ 1, Iclass_ae_iclass_mulfp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_lh_args,
+ 1, Iclass_ae_iclass_mulp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_hl_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_hl_args,
+ 1, Iclass_ae_iclass_mulfp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_hl_args,
+ 1, Iclass_ae_iclass_mulp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfs32p16s_hh_args,
+ 2, Iclass_ae_iclass_mulfs32p16s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfp24s_hh_args,
+ 1, Iclass_ae_iclass_mulfp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulp24s_hh_args,
+ 1, Iclass_ae_iclass_mulp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_ll_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_ll_args,
+ 1, Iclass_ae_iclass_mulafp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_ll_args,
+ 1, Iclass_ae_iclass_mulap24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_lh_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_lh_args,
+ 1, Iclass_ae_iclass_mulafp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_lh_args,
+ 1, Iclass_ae_iclass_mulap24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_hl_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_hl_args,
+ 1, Iclass_ae_iclass_mulafp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_hl_args,
+ 1, Iclass_ae_iclass_mulap24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs32p16s_hh_args,
+ 2, Iclass_ae_iclass_mulafs32p16s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafp24s_hh_args,
+ 1, Iclass_ae_iclass_mulafp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulap24s_hh_args,
+ 1, Iclass_ae_iclass_mulap24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_ll_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_ll_args,
+ 1, Iclass_ae_iclass_mulsfp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_ll_args,
+ 1, Iclass_ae_iclass_mulsp24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_lh_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_lh_args,
+ 1, Iclass_ae_iclass_mulsfp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_lh_args,
+ 1, Iclass_ae_iclass_mulsp24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_hl_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_hl_args,
+ 1, Iclass_ae_iclass_mulsfp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_hl_args,
+ 1, Iclass_ae_iclass_mulsp24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs32p16s_hh_args,
+ 2, Iclass_ae_iclass_mulsfs32p16s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfp24s_hh_args,
+ 1, Iclass_ae_iclass_mulsfp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsp24s_hh_args,
+ 1, Iclass_ae_iclass_mulsp24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulas56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulas56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulas56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafs56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulafs56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulas56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulas56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_ll_args,
+ 2, Iclass_ae_iclass_mulss56p24s_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_lh_args,
+ 2, Iclass_ae_iclass_mulss56p24s_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_hl_args,
+ 2, Iclass_ae_iclass_mulss56p24s_hl_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfs56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulsfs56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulss56p24s_hh_args,
+ 2, Iclass_ae_iclass_mulss56p24s_hh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulfq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulfq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulfq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulfq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulfq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulafq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulafq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulafq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulafq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulafq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulaq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulaq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulaq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulaq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsfq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulsfq32sp16u_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16s_l_args,
+ 1, Iclass_ae_iclass_mulsq32sp16s_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16s_h_args,
+ 1, Iclass_ae_iclass_mulsq32sp16s_h_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16u_l_args,
+ 1, Iclass_ae_iclass_mulsq32sp16u_l_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsq32sp16u_h_args,
+ 1, Iclass_ae_iclass_mulsq32sp16u_h_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaaq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzaaq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzaafq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzaafq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzasq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzasfq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzasfq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsaq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzsaq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzsafq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzsafq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16s_ll_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16s_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16u_ll_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16u_ll_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16s_hh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16s_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16u_hh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16u_hh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16s_lh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16s_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzssq32sp16u_lh_stateArgs, 0, 0 },
+ { 5, Iclass_ae_iclass_mulzssfq32sp16u_lh_args,
+ 1, Iclass_ae_iclass_mulzssfq32sp16u_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzaafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzaap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzaafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzaap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzaap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzasfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzasp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzasfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzasp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzasp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzsafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzsap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzsafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzsap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzsap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzssfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulzssp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzssfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulzssp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulzssp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulaafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulaap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulaafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulaap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulaap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulasfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulasp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulasfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulasp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulasp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsafp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulsafp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsap24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulsap24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsafp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulsafp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulsap24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulsap24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssfp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulssfp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssp24s_hh_ll_args,
+ 1, Iclass_ae_iclass_mulssp24s_hh_ll_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssfp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulssfp24s_hl_lh_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_mulssp24s_hl_lh_args,
+ 1, Iclass_ae_iclass_mulssp24s_hl_lh_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sha32_args,
+ 0, 0, 0, 0 },
+ { 3, Iclass_ae_iclass_vldl32t_args,
+ 5, Iclass_ae_iclass_vldl32t_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_vldl16t_args,
+ 5, Iclass_ae_iclass_vldl16t_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_vldl16c_args,
+ 8, Iclass_ae_iclass_vldl16c_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_vldsht_args,
+ 6, Iclass_ae_iclass_vldsht_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_lb_args,
+ 3, Iclass_ae_iclass_lb_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_lbi_args,
+ 3, Iclass_ae_iclass_lbi_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lbk_args,
+ 3, Iclass_ae_iclass_lbk_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_lbki_args,
+ 3, Iclass_ae_iclass_lbki_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_db_args,
+ 3, Iclass_ae_iclass_db_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_dbi_args,
+ 3, Iclass_ae_iclass_dbi_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_vlel32t_args,
+ 3, Iclass_ae_iclass_vlel32t_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_vlel16t_args,
+ 3, Iclass_ae_iclass_vlel16t_stateArgs, 0, 0 },
+ { 2, Iclass_ae_iclass_sb_args,
+ 4, Iclass_ae_iclass_sb_stateArgs, 0, 0 },
+ { 3, Iclass_ae_iclass_sbi_args,
+ 3, Iclass_ae_iclass_sbi_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_vles16c_args,
+ 5, Iclass_ae_iclass_vles16c_stateArgs, 0, 0 },
+ { 1, Iclass_ae_iclass_sbf_args,
+ 3, Iclass_ae_iclass_sbf_stateArgs, 0, 0 }
+};
+
+enum xtensa_iclass_id {
+ ICLASS_xt_iclass_excw,
+ ICLASS_xt_iclass_rfe,
+ ICLASS_xt_iclass_rfde,
+ ICLASS_xt_iclass_syscall,
+ ICLASS_xt_iclass_call12,
+ ICLASS_xt_iclass_call8,
+ ICLASS_xt_iclass_call4,
+ ICLASS_xt_iclass_callx12,
+ ICLASS_xt_iclass_callx8,
+ ICLASS_xt_iclass_callx4,
+ ICLASS_xt_iclass_entry,
+ ICLASS_xt_iclass_movsp,
+ ICLASS_xt_iclass_rotw,
+ ICLASS_xt_iclass_retw,
+ ICLASS_xt_iclass_rfwou,
+ ICLASS_xt_iclass_l32e,
+ ICLASS_xt_iclass_s32e,
+ ICLASS_xt_iclass_rsr_windowbase,
+ ICLASS_xt_iclass_wsr_windowbase,
+ ICLASS_xt_iclass_xsr_windowbase,
+ ICLASS_xt_iclass_rsr_windowstart,
+ ICLASS_xt_iclass_wsr_windowstart,
+ ICLASS_xt_iclass_xsr_windowstart,
+ ICLASS_xt_iclass_add_n,
+ ICLASS_xt_iclass_addi_n,
+ ICLASS_xt_iclass_bz6,
+ ICLASS_xt_iclass_ill_n,
+ ICLASS_xt_iclass_loadi4,
+ ICLASS_xt_iclass_mov_n,
+ ICLASS_xt_iclass_movi_n,
+ ICLASS_xt_iclass_nopn,
+ ICLASS_xt_iclass_retn,
+ ICLASS_xt_iclass_storei4,
+ ICLASS_rur_threadptr,
+ ICLASS_wur_threadptr,
+ ICLASS_xt_iclass_addi,
+ ICLASS_xt_iclass_addmi,
+ ICLASS_xt_iclass_addsub,
+ ICLASS_xt_iclass_bit,
+ ICLASS_xt_iclass_bsi8,
+ ICLASS_xt_iclass_bsi8b,
+ ICLASS_xt_iclass_bsi8u,
+ ICLASS_xt_iclass_bst8,
+ ICLASS_xt_iclass_bsz12,
+ ICLASS_xt_iclass_call0,
+ ICLASS_xt_iclass_callx0,
+ ICLASS_xt_iclass_exti,
+ ICLASS_xt_iclass_ill,
+ ICLASS_xt_iclass_jump,
+ ICLASS_xt_iclass_jumpx,
+ ICLASS_xt_iclass_l16ui,
+ ICLASS_xt_iclass_l16si,
+ ICLASS_xt_iclass_l32i,
+ ICLASS_xt_iclass_l32r,
+ ICLASS_xt_iclass_l8i,
+ ICLASS_xt_iclass_loop,
+ ICLASS_xt_iclass_loopz,
+ ICLASS_xt_iclass_movi,
+ ICLASS_xt_iclass_movz,
+ ICLASS_xt_iclass_neg,
+ ICLASS_xt_iclass_nop,
+ ICLASS_xt_iclass_return,
+ ICLASS_xt_iclass_simcall,
+ ICLASS_xt_iclass_s16i,
+ ICLASS_xt_iclass_s32i,
+ ICLASS_xt_iclass_s8i,
+ ICLASS_xt_iclass_sar,
+ ICLASS_xt_iclass_sari,
+ ICLASS_xt_iclass_shifts,
+ ICLASS_xt_iclass_shiftst,
+ ICLASS_xt_iclass_shiftt,
+ ICLASS_xt_iclass_slli,
+ ICLASS_xt_iclass_srai,
+ ICLASS_xt_iclass_srli,
+ ICLASS_xt_iclass_memw,
+ ICLASS_xt_iclass_extw,
+ ICLASS_xt_iclass_isync,
+ ICLASS_xt_iclass_sync,
+ ICLASS_xt_iclass_rsil,
+ ICLASS_xt_iclass_rsr_lend,
+ ICLASS_xt_iclass_wsr_lend,
+ ICLASS_xt_iclass_xsr_lend,
+ ICLASS_xt_iclass_rsr_lcount,
+ ICLASS_xt_iclass_wsr_lcount,
+ ICLASS_xt_iclass_xsr_lcount,
+ ICLASS_xt_iclass_rsr_lbeg,
+ ICLASS_xt_iclass_wsr_lbeg,
+ ICLASS_xt_iclass_xsr_lbeg,
+ ICLASS_xt_iclass_rsr_sar,
+ ICLASS_xt_iclass_wsr_sar,
+ ICLASS_xt_iclass_xsr_sar,
+ ICLASS_xt_iclass_rsr_litbase,
+ ICLASS_xt_iclass_wsr_litbase,
+ ICLASS_xt_iclass_xsr_litbase,
+ ICLASS_xt_iclass_rsr_configid0,
+ ICLASS_xt_iclass_wsr_configid0,
+ ICLASS_xt_iclass_rsr_configid1,
+ ICLASS_xt_iclass_rsr_ps,
+ ICLASS_xt_iclass_wsr_ps,
+ ICLASS_xt_iclass_xsr_ps,
+ ICLASS_xt_iclass_rsr_epc1,
+ ICLASS_xt_iclass_wsr_epc1,
+ ICLASS_xt_iclass_xsr_epc1,
+ ICLASS_xt_iclass_rsr_excsave1,
+ ICLASS_xt_iclass_wsr_excsave1,
+ ICLASS_xt_iclass_xsr_excsave1,
+ ICLASS_xt_iclass_rsr_epc2,
+ ICLASS_xt_iclass_wsr_epc2,
+ ICLASS_xt_iclass_xsr_epc2,
+ ICLASS_xt_iclass_rsr_excsave2,
+ ICLASS_xt_iclass_wsr_excsave2,
+ ICLASS_xt_iclass_xsr_excsave2,
+ ICLASS_xt_iclass_rsr_eps2,
+ ICLASS_xt_iclass_wsr_eps2,
+ ICLASS_xt_iclass_xsr_eps2,
+ ICLASS_xt_iclass_rsr_excvaddr,
+ ICLASS_xt_iclass_wsr_excvaddr,
+ ICLASS_xt_iclass_xsr_excvaddr,
+ ICLASS_xt_iclass_rsr_depc,
+ ICLASS_xt_iclass_wsr_depc,
+ ICLASS_xt_iclass_xsr_depc,
+ ICLASS_xt_iclass_rsr_exccause,
+ ICLASS_xt_iclass_wsr_exccause,
+ ICLASS_xt_iclass_xsr_exccause,
+ ICLASS_xt_iclass_rsr_misc0,
+ ICLASS_xt_iclass_wsr_misc0,
+ ICLASS_xt_iclass_xsr_misc0,
+ ICLASS_xt_iclass_rsr_misc1,
+ ICLASS_xt_iclass_wsr_misc1,
+ ICLASS_xt_iclass_xsr_misc1,
+ ICLASS_xt_iclass_rsr_prid,
+ ICLASS_xt_iclass_rsr_vecbase,
+ ICLASS_xt_iclass_wsr_vecbase,
+ ICLASS_xt_iclass_xsr_vecbase,
+ ICLASS_xt_mul16,
+ ICLASS_xt_mul32,
+ ICLASS_xt_iclass_rfi,
+ ICLASS_xt_iclass_wait,
+ ICLASS_xt_iclass_rsr_interrupt,
+ ICLASS_xt_iclass_wsr_intset,
+ ICLASS_xt_iclass_wsr_intclear,
+ ICLASS_xt_iclass_rsr_intenable,
+ ICLASS_xt_iclass_wsr_intenable,
+ ICLASS_xt_iclass_xsr_intenable,
+ ICLASS_xt_iclass_break,
+ ICLASS_xt_iclass_break_n,
+ ICLASS_xt_iclass_rsr_debugcause,
+ ICLASS_xt_iclass_wsr_debugcause,
+ ICLASS_xt_iclass_xsr_debugcause,
+ ICLASS_xt_iclass_rsr_icount,
+ ICLASS_xt_iclass_wsr_icount,
+ ICLASS_xt_iclass_xsr_icount,
+ ICLASS_xt_iclass_rsr_icountlevel,
+ ICLASS_xt_iclass_wsr_icountlevel,
+ ICLASS_xt_iclass_xsr_icountlevel,
+ ICLASS_xt_iclass_rsr_ddr,
+ ICLASS_xt_iclass_wsr_ddr,
+ ICLASS_xt_iclass_xsr_ddr,
+ ICLASS_xt_iclass_rfdo,
+ ICLASS_xt_iclass_rfdd,
+ ICLASS_xt_iclass_bbool1,
+ ICLASS_xt_iclass_bbool4,
+ ICLASS_xt_iclass_bbool8,
+ ICLASS_xt_iclass_bbranch,
+ ICLASS_xt_iclass_bmove,
+ ICLASS_xt_iclass_RSR_BR,
+ ICLASS_xt_iclass_WSR_BR,
+ ICLASS_xt_iclass_XSR_BR,
+ ICLASS_xt_iclass_rsr_ccount,
+ ICLASS_xt_iclass_wsr_ccount,
+ ICLASS_xt_iclass_xsr_ccount,
+ ICLASS_xt_iclass_rsr_ccompare0,
+ ICLASS_xt_iclass_wsr_ccompare0,
+ ICLASS_xt_iclass_xsr_ccompare0,
+ ICLASS_xt_iclass_rsr_ccompare1,
+ ICLASS_xt_iclass_wsr_ccompare1,
+ ICLASS_xt_iclass_xsr_ccompare1,
+ ICLASS_xt_iclass_icache,
+ ICLASS_xt_iclass_icache_inv,
+ ICLASS_xt_iclass_licx,
+ ICLASS_xt_iclass_sicx,
+ ICLASS_xt_iclass_dcache,
+ ICLASS_xt_iclass_dcache_ind,
+ ICLASS_xt_iclass_dcache_inv,
+ ICLASS_xt_iclass_dpf,
+ ICLASS_xt_iclass_sdct,
+ ICLASS_xt_iclass_ldct,
+ ICLASS_xt_iclass_wsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_ptevaddr,
+ ICLASS_xt_iclass_xsr_ptevaddr,
+ ICLASS_xt_iclass_rsr_rasid,
+ ICLASS_xt_iclass_wsr_rasid,
+ ICLASS_xt_iclass_xsr_rasid,
+ ICLASS_xt_iclass_rsr_itlbcfg,
+ ICLASS_xt_iclass_wsr_itlbcfg,
+ ICLASS_xt_iclass_xsr_itlbcfg,
+ ICLASS_xt_iclass_rsr_dtlbcfg,
+ ICLASS_xt_iclass_wsr_dtlbcfg,
+ ICLASS_xt_iclass_xsr_dtlbcfg,
+ ICLASS_xt_iclass_idtlb,
+ ICLASS_xt_iclass_rdtlb,
+ ICLASS_xt_iclass_wdtlb,
+ ICLASS_xt_iclass_iitlb,
+ ICLASS_xt_iclass_ritlb,
+ ICLASS_xt_iclass_witlb,
+ ICLASS_xt_iclass_ldpte,
+ ICLASS_xt_iclass_hwwitlba,
+ ICLASS_xt_iclass_hwwdtlba,
+ ICLASS_xt_iclass_rsr_cpenable,
+ ICLASS_xt_iclass_wsr_cpenable,
+ ICLASS_xt_iclass_xsr_cpenable,
+ ICLASS_xt_iclass_clamp,
+ ICLASS_xt_iclass_minmax,
+ ICLASS_xt_iclass_nsa,
+ ICLASS_xt_iclass_sx,
+ ICLASS_xt_iclass_l32ai,
+ ICLASS_xt_iclass_s32ri,
+ ICLASS_xt_iclass_s32c1i,
+ ICLASS_xt_iclass_rsr_scompare1,
+ ICLASS_xt_iclass_wsr_scompare1,
+ ICLASS_xt_iclass_xsr_scompare1,
+ ICLASS_xt_iclass_rsr_atomctl,
+ ICLASS_xt_iclass_wsr_atomctl,
+ ICLASS_xt_iclass_xsr_atomctl,
+ ICLASS_xt_iclass_rer,
+ ICLASS_xt_iclass_wer,
+ ICLASS_rur_ae_ovf_sar,
+ ICLASS_wur_ae_ovf_sar,
+ ICLASS_rur_ae_bithead,
+ ICLASS_wur_ae_bithead,
+ ICLASS_rur_ae_ts_fts_bu_bp,
+ ICLASS_wur_ae_ts_fts_bu_bp,
+ ICLASS_rur_ae_sd_no,
+ ICLASS_wur_ae_sd_no,
+ ICLASS_ae_iclass_rur_ae_overflow,
+ ICLASS_ae_iclass_wur_ae_overflow,
+ ICLASS_ae_iclass_rur_ae_sar,
+ ICLASS_ae_iclass_wur_ae_sar,
+ ICLASS_ae_iclass_rur_ae_bitptr,
+ ICLASS_ae_iclass_wur_ae_bitptr,
+ ICLASS_ae_iclass_rur_ae_bitsused,
+ ICLASS_ae_iclass_wur_ae_bitsused,
+ ICLASS_ae_iclass_rur_ae_tablesize,
+ ICLASS_ae_iclass_wur_ae_tablesize,
+ ICLASS_ae_iclass_rur_ae_first_ts,
+ ICLASS_ae_iclass_wur_ae_first_ts,
+ ICLASS_ae_iclass_rur_ae_nextoffset,
+ ICLASS_ae_iclass_wur_ae_nextoffset,
+ ICLASS_ae_iclass_rur_ae_searchdone,
+ ICLASS_ae_iclass_wur_ae_searchdone,
+ ICLASS_ae_iclass_lp16f_i,
+ ICLASS_ae_iclass_lp16f_iu,
+ ICLASS_ae_iclass_lp16f_x,
+ ICLASS_ae_iclass_lp16f_xu,
+ ICLASS_ae_iclass_lp24_i,
+ ICLASS_ae_iclass_lp24_iu,
+ ICLASS_ae_iclass_lp24_x,
+ ICLASS_ae_iclass_lp24_xu,
+ ICLASS_ae_iclass_lp24f_i,
+ ICLASS_ae_iclass_lp24f_iu,
+ ICLASS_ae_iclass_lp24f_x,
+ ICLASS_ae_iclass_lp24f_xu,
+ ICLASS_ae_iclass_lp16x2f_i,
+ ICLASS_ae_iclass_lp16x2f_iu,
+ ICLASS_ae_iclass_lp16x2f_x,
+ ICLASS_ae_iclass_lp16x2f_xu,
+ ICLASS_ae_iclass_lp24x2f_i,
+ ICLASS_ae_iclass_lp24x2f_iu,
+ ICLASS_ae_iclass_lp24x2f_x,
+ ICLASS_ae_iclass_lp24x2f_xu,
+ ICLASS_ae_iclass_lp24x2_i,
+ ICLASS_ae_iclass_lp24x2_iu,
+ ICLASS_ae_iclass_lp24x2_x,
+ ICLASS_ae_iclass_lp24x2_xu,
+ ICLASS_ae_iclass_sp16x2f_i,
+ ICLASS_ae_iclass_sp16x2f_iu,
+ ICLASS_ae_iclass_sp16x2f_x,
+ ICLASS_ae_iclass_sp16x2f_xu,
+ ICLASS_ae_iclass_sp24x2s_i,
+ ICLASS_ae_iclass_sp24x2s_iu,
+ ICLASS_ae_iclass_sp24x2s_x,
+ ICLASS_ae_iclass_sp24x2s_xu,
+ ICLASS_ae_iclass_sp24x2f_i,
+ ICLASS_ae_iclass_sp24x2f_iu,
+ ICLASS_ae_iclass_sp24x2f_x,
+ ICLASS_ae_iclass_sp24x2f_xu,
+ ICLASS_ae_iclass_sp16f_l_i,
+ ICLASS_ae_iclass_sp16f_l_iu,
+ ICLASS_ae_iclass_sp16f_l_x,
+ ICLASS_ae_iclass_sp16f_l_xu,
+ ICLASS_ae_iclass_sp24s_l_i,
+ ICLASS_ae_iclass_sp24s_l_iu,
+ ICLASS_ae_iclass_sp24s_l_x,
+ ICLASS_ae_iclass_sp24s_l_xu,
+ ICLASS_ae_iclass_sp24f_l_i,
+ ICLASS_ae_iclass_sp24f_l_iu,
+ ICLASS_ae_iclass_sp24f_l_x,
+ ICLASS_ae_iclass_sp24f_l_xu,
+ ICLASS_ae_iclass_lq56_i,
+ ICLASS_ae_iclass_lq56_iu,
+ ICLASS_ae_iclass_lq56_x,
+ ICLASS_ae_iclass_lq56_xu,
+ ICLASS_ae_iclass_lq32f_i,
+ ICLASS_ae_iclass_lq32f_iu,
+ ICLASS_ae_iclass_lq32f_x,
+ ICLASS_ae_iclass_lq32f_xu,
+ ICLASS_ae_iclass_sq56s_i,
+ ICLASS_ae_iclass_sq56s_iu,
+ ICLASS_ae_iclass_sq56s_x,
+ ICLASS_ae_iclass_sq56s_xu,
+ ICLASS_ae_iclass_sq32f_i,
+ ICLASS_ae_iclass_sq32f_iu,
+ ICLASS_ae_iclass_sq32f_x,
+ ICLASS_ae_iclass_sq32f_xu,
+ ICLASS_ae_iclass_zerop48,
+ ICLASS_ae_iclass_movp48,
+ ICLASS_ae_iclass_selp24_ll,
+ ICLASS_ae_iclass_selp24_lh,
+ ICLASS_ae_iclass_selp24_hl,
+ ICLASS_ae_iclass_selp24_hh,
+ ICLASS_ae_iclass_movtp24x2,
+ ICLASS_ae_iclass_movfp24x2,
+ ICLASS_ae_iclass_movtp48,
+ ICLASS_ae_iclass_movfp48,
+ ICLASS_ae_iclass_movpa24x2,
+ ICLASS_ae_iclass_truncp24a32x2,
+ ICLASS_ae_iclass_cvta32p24_l,
+ ICLASS_ae_iclass_cvta32p24_h,
+ ICLASS_ae_iclass_cvtp24a16x2_ll,
+ ICLASS_ae_iclass_cvtp24a16x2_lh,
+ ICLASS_ae_iclass_cvtp24a16x2_hl,
+ ICLASS_ae_iclass_cvtp24a16x2_hh,
+ ICLASS_ae_iclass_truncp24q48x2,
+ ICLASS_ae_iclass_truncp16,
+ ICLASS_ae_iclass_roundsp24q48sym,
+ ICLASS_ae_iclass_roundsp24q48asym,
+ ICLASS_ae_iclass_roundsp16q48sym,
+ ICLASS_ae_iclass_roundsp16q48asym,
+ ICLASS_ae_iclass_roundsp16sym,
+ ICLASS_ae_iclass_roundsp16asym,
+ ICLASS_ae_iclass_zeroq56,
+ ICLASS_ae_iclass_movq56,
+ ICLASS_ae_iclass_movtq56,
+ ICLASS_ae_iclass_movfq56,
+ ICLASS_ae_iclass_cvtq48a32s,
+ ICLASS_ae_iclass_cvtq48p24s_l,
+ ICLASS_ae_iclass_cvtq48p24s_h,
+ ICLASS_ae_iclass_satq48s,
+ ICLASS_ae_iclass_truncq32,
+ ICLASS_ae_iclass_roundsq32sym,
+ ICLASS_ae_iclass_roundsq32asym,
+ ICLASS_ae_iclass_trunca32q48,
+ ICLASS_ae_iclass_movap24s_l,
+ ICLASS_ae_iclass_movap24s_h,
+ ICLASS_ae_iclass_trunca16p24s_l,
+ ICLASS_ae_iclass_trunca16p24s_h,
+ ICLASS_ae_iclass_addp24,
+ ICLASS_ae_iclass_subp24,
+ ICLASS_ae_iclass_negp24,
+ ICLASS_ae_iclass_absp24,
+ ICLASS_ae_iclass_maxp24s,
+ ICLASS_ae_iclass_minp24s,
+ ICLASS_ae_iclass_maxbp24s,
+ ICLASS_ae_iclass_minbp24s,
+ ICLASS_ae_iclass_addsp24s,
+ ICLASS_ae_iclass_subsp24s,
+ ICLASS_ae_iclass_negsp24s,
+ ICLASS_ae_iclass_abssp24s,
+ ICLASS_ae_iclass_andp48,
+ ICLASS_ae_iclass_nandp48,
+ ICLASS_ae_iclass_orp48,
+ ICLASS_ae_iclass_xorp48,
+ ICLASS_ae_iclass_ltp24s,
+ ICLASS_ae_iclass_lep24s,
+ ICLASS_ae_iclass_eqp24,
+ ICLASS_ae_iclass_addq56,
+ ICLASS_ae_iclass_subq56,
+ ICLASS_ae_iclass_negq56,
+ ICLASS_ae_iclass_absq56,
+ ICLASS_ae_iclass_maxq56s,
+ ICLASS_ae_iclass_minq56s,
+ ICLASS_ae_iclass_maxbq56s,
+ ICLASS_ae_iclass_minbq56s,
+ ICLASS_ae_iclass_addsq56s,
+ ICLASS_ae_iclass_subsq56s,
+ ICLASS_ae_iclass_negsq56s,
+ ICLASS_ae_iclass_abssq56s,
+ ICLASS_ae_iclass_andq56,
+ ICLASS_ae_iclass_nandq56,
+ ICLASS_ae_iclass_orq56,
+ ICLASS_ae_iclass_xorq56,
+ ICLASS_ae_iclass_sllip24,
+ ICLASS_ae_iclass_srlip24,
+ ICLASS_ae_iclass_sraip24,
+ ICLASS_ae_iclass_sllsp24,
+ ICLASS_ae_iclass_srlsp24,
+ ICLASS_ae_iclass_srasp24,
+ ICLASS_ae_iclass_sllisp24s,
+ ICLASS_ae_iclass_sllssp24s,
+ ICLASS_ae_iclass_slliq56,
+ ICLASS_ae_iclass_srliq56,
+ ICLASS_ae_iclass_sraiq56,
+ ICLASS_ae_iclass_sllsq56,
+ ICLASS_ae_iclass_srlsq56,
+ ICLASS_ae_iclass_srasq56,
+ ICLASS_ae_iclass_sllaq56,
+ ICLASS_ae_iclass_srlaq56,
+ ICLASS_ae_iclass_sraaq56,
+ ICLASS_ae_iclass_sllisq56s,
+ ICLASS_ae_iclass_sllssq56s,
+ ICLASS_ae_iclass_sllasq56s,
+ ICLASS_ae_iclass_ltq56s,
+ ICLASS_ae_iclass_leq56s,
+ ICLASS_ae_iclass_eqq56,
+ ICLASS_ae_iclass_nsaq56s,
+ ICLASS_ae_iclass_mulfs32p16s_ll,
+ ICLASS_ae_iclass_mulfp24s_ll,
+ ICLASS_ae_iclass_mulp24s_ll,
+ ICLASS_ae_iclass_mulfs32p16s_lh,
+ ICLASS_ae_iclass_mulfp24s_lh,
+ ICLASS_ae_iclass_mulp24s_lh,
+ ICLASS_ae_iclass_mulfs32p16s_hl,
+ ICLASS_ae_iclass_mulfp24s_hl,
+ ICLASS_ae_iclass_mulp24s_hl,
+ ICLASS_ae_iclass_mulfs32p16s_hh,
+ ICLASS_ae_iclass_mulfp24s_hh,
+ ICLASS_ae_iclass_mulp24s_hh,
+ ICLASS_ae_iclass_mulafs32p16s_ll,
+ ICLASS_ae_iclass_mulafp24s_ll,
+ ICLASS_ae_iclass_mulap24s_ll,
+ ICLASS_ae_iclass_mulafs32p16s_lh,
+ ICLASS_ae_iclass_mulafp24s_lh,
+ ICLASS_ae_iclass_mulap24s_lh,
+ ICLASS_ae_iclass_mulafs32p16s_hl,
+ ICLASS_ae_iclass_mulafp24s_hl,
+ ICLASS_ae_iclass_mulap24s_hl,
+ ICLASS_ae_iclass_mulafs32p16s_hh,
+ ICLASS_ae_iclass_mulafp24s_hh,
+ ICLASS_ae_iclass_mulap24s_hh,
+ ICLASS_ae_iclass_mulsfs32p16s_ll,
+ ICLASS_ae_iclass_mulsfp24s_ll,
+ ICLASS_ae_iclass_mulsp24s_ll,
+ ICLASS_ae_iclass_mulsfs32p16s_lh,
+ ICLASS_ae_iclass_mulsfp24s_lh,
+ ICLASS_ae_iclass_mulsp24s_lh,
+ ICLASS_ae_iclass_mulsfs32p16s_hl,
+ ICLASS_ae_iclass_mulsfp24s_hl,
+ ICLASS_ae_iclass_mulsp24s_hl,
+ ICLASS_ae_iclass_mulsfs32p16s_hh,
+ ICLASS_ae_iclass_mulsfp24s_hh,
+ ICLASS_ae_iclass_mulsp24s_hh,
+ ICLASS_ae_iclass_mulafs56p24s_ll,
+ ICLASS_ae_iclass_mulas56p24s_ll,
+ ICLASS_ae_iclass_mulafs56p24s_lh,
+ ICLASS_ae_iclass_mulas56p24s_lh,
+ ICLASS_ae_iclass_mulafs56p24s_hl,
+ ICLASS_ae_iclass_mulas56p24s_hl,
+ ICLASS_ae_iclass_mulafs56p24s_hh,
+ ICLASS_ae_iclass_mulas56p24s_hh,
+ ICLASS_ae_iclass_mulsfs56p24s_ll,
+ ICLASS_ae_iclass_mulss56p24s_ll,
+ ICLASS_ae_iclass_mulsfs56p24s_lh,
+ ICLASS_ae_iclass_mulss56p24s_lh,
+ ICLASS_ae_iclass_mulsfs56p24s_hl,
+ ICLASS_ae_iclass_mulss56p24s_hl,
+ ICLASS_ae_iclass_mulsfs56p24s_hh,
+ ICLASS_ae_iclass_mulss56p24s_hh,
+ ICLASS_ae_iclass_mulfq32sp16s_l,
+ ICLASS_ae_iclass_mulfq32sp16s_h,
+ ICLASS_ae_iclass_mulfq32sp16u_l,
+ ICLASS_ae_iclass_mulfq32sp16u_h,
+ ICLASS_ae_iclass_mulq32sp16s_l,
+ ICLASS_ae_iclass_mulq32sp16s_h,
+ ICLASS_ae_iclass_mulq32sp16u_l,
+ ICLASS_ae_iclass_mulq32sp16u_h,
+ ICLASS_ae_iclass_mulafq32sp16s_l,
+ ICLASS_ae_iclass_mulafq32sp16s_h,
+ ICLASS_ae_iclass_mulafq32sp16u_l,
+ ICLASS_ae_iclass_mulafq32sp16u_h,
+ ICLASS_ae_iclass_mulaq32sp16s_l,
+ ICLASS_ae_iclass_mulaq32sp16s_h,
+ ICLASS_ae_iclass_mulaq32sp16u_l,
+ ICLASS_ae_iclass_mulaq32sp16u_h,
+ ICLASS_ae_iclass_mulsfq32sp16s_l,
+ ICLASS_ae_iclass_mulsfq32sp16s_h,
+ ICLASS_ae_iclass_mulsfq32sp16u_l,
+ ICLASS_ae_iclass_mulsfq32sp16u_h,
+ ICLASS_ae_iclass_mulsq32sp16s_l,
+ ICLASS_ae_iclass_mulsq32sp16s_h,
+ ICLASS_ae_iclass_mulsq32sp16u_l,
+ ICLASS_ae_iclass_mulsq32sp16u_h,
+ ICLASS_ae_iclass_mulzaaq32sp16s_ll,
+ ICLASS_ae_iclass_mulzaafq32sp16s_ll,
+ ICLASS_ae_iclass_mulzaaq32sp16u_ll,
+ ICLASS_ae_iclass_mulzaafq32sp16u_ll,
+ ICLASS_ae_iclass_mulzaaq32sp16s_hh,
+ ICLASS_ae_iclass_mulzaafq32sp16s_hh,
+ ICLASS_ae_iclass_mulzaaq32sp16u_hh,
+ ICLASS_ae_iclass_mulzaafq32sp16u_hh,
+ ICLASS_ae_iclass_mulzaaq32sp16s_lh,
+ ICLASS_ae_iclass_mulzaafq32sp16s_lh,
+ ICLASS_ae_iclass_mulzaaq32sp16u_lh,
+ ICLASS_ae_iclass_mulzaafq32sp16u_lh,
+ ICLASS_ae_iclass_mulzasq32sp16s_ll,
+ ICLASS_ae_iclass_mulzasfq32sp16s_ll,
+ ICLASS_ae_iclass_mulzasq32sp16u_ll,
+ ICLASS_ae_iclass_mulzasfq32sp16u_ll,
+ ICLASS_ae_iclass_mulzasq32sp16s_hh,
+ ICLASS_ae_iclass_mulzasfq32sp16s_hh,
+ ICLASS_ae_iclass_mulzasq32sp16u_hh,
+ ICLASS_ae_iclass_mulzasfq32sp16u_hh,
+ ICLASS_ae_iclass_mulzasq32sp16s_lh,
+ ICLASS_ae_iclass_mulzasfq32sp16s_lh,
+ ICLASS_ae_iclass_mulzasq32sp16u_lh,
+ ICLASS_ae_iclass_mulzasfq32sp16u_lh,
+ ICLASS_ae_iclass_mulzsaq32sp16s_ll,
+ ICLASS_ae_iclass_mulzsafq32sp16s_ll,
+ ICLASS_ae_iclass_mulzsaq32sp16u_ll,
+ ICLASS_ae_iclass_mulzsafq32sp16u_ll,
+ ICLASS_ae_iclass_mulzsaq32sp16s_hh,
+ ICLASS_ae_iclass_mulzsafq32sp16s_hh,
+ ICLASS_ae_iclass_mulzsaq32sp16u_hh,
+ ICLASS_ae_iclass_mulzsafq32sp16u_hh,
+ ICLASS_ae_iclass_mulzsaq32sp16s_lh,
+ ICLASS_ae_iclass_mulzsafq32sp16s_lh,
+ ICLASS_ae_iclass_mulzsaq32sp16u_lh,
+ ICLASS_ae_iclass_mulzsafq32sp16u_lh,
+ ICLASS_ae_iclass_mulzssq32sp16s_ll,
+ ICLASS_ae_iclass_mulzssfq32sp16s_ll,
+ ICLASS_ae_iclass_mulzssq32sp16u_ll,
+ ICLASS_ae_iclass_mulzssfq32sp16u_ll,
+ ICLASS_ae_iclass_mulzssq32sp16s_hh,
+ ICLASS_ae_iclass_mulzssfq32sp16s_hh,
+ ICLASS_ae_iclass_mulzssq32sp16u_hh,
+ ICLASS_ae_iclass_mulzssfq32sp16u_hh,
+ ICLASS_ae_iclass_mulzssq32sp16s_lh,
+ ICLASS_ae_iclass_mulzssfq32sp16s_lh,
+ ICLASS_ae_iclass_mulzssq32sp16u_lh,
+ ICLASS_ae_iclass_mulzssfq32sp16u_lh,
+ ICLASS_ae_iclass_mulzaafp24s_hh_ll,
+ ICLASS_ae_iclass_mulzaap24s_hh_ll,
+ ICLASS_ae_iclass_mulzaafp24s_hl_lh,
+ ICLASS_ae_iclass_mulzaap24s_hl_lh,
+ ICLASS_ae_iclass_mulzasfp24s_hh_ll,
+ ICLASS_ae_iclass_mulzasp24s_hh_ll,
+ ICLASS_ae_iclass_mulzasfp24s_hl_lh,
+ ICLASS_ae_iclass_mulzasp24s_hl_lh,
+ ICLASS_ae_iclass_mulzsafp24s_hh_ll,
+ ICLASS_ae_iclass_mulzsap24s_hh_ll,
+ ICLASS_ae_iclass_mulzsafp24s_hl_lh,
+ ICLASS_ae_iclass_mulzsap24s_hl_lh,
+ ICLASS_ae_iclass_mulzssfp24s_hh_ll,
+ ICLASS_ae_iclass_mulzssp24s_hh_ll,
+ ICLASS_ae_iclass_mulzssfp24s_hl_lh,
+ ICLASS_ae_iclass_mulzssp24s_hl_lh,
+ ICLASS_ae_iclass_mulaafp24s_hh_ll,
+ ICLASS_ae_iclass_mulaap24s_hh_ll,
+ ICLASS_ae_iclass_mulaafp24s_hl_lh,
+ ICLASS_ae_iclass_mulaap24s_hl_lh,
+ ICLASS_ae_iclass_mulasfp24s_hh_ll,
+ ICLASS_ae_iclass_mulasp24s_hh_ll,
+ ICLASS_ae_iclass_mulasfp24s_hl_lh,
+ ICLASS_ae_iclass_mulasp24s_hl_lh,
+ ICLASS_ae_iclass_mulsafp24s_hh_ll,
+ ICLASS_ae_iclass_mulsap24s_hh_ll,
+ ICLASS_ae_iclass_mulsafp24s_hl_lh,
+ ICLASS_ae_iclass_mulsap24s_hl_lh,
+ ICLASS_ae_iclass_mulssfp24s_hh_ll,
+ ICLASS_ae_iclass_mulssp24s_hh_ll,
+ ICLASS_ae_iclass_mulssfp24s_hl_lh,
+ ICLASS_ae_iclass_mulssp24s_hl_lh,
+ ICLASS_ae_iclass_sha32,
+ ICLASS_ae_iclass_vldl32t,
+ ICLASS_ae_iclass_vldl16t,
+ ICLASS_ae_iclass_vldl16c,
+ ICLASS_ae_iclass_vldsht,
+ ICLASS_ae_iclass_lb,
+ ICLASS_ae_iclass_lbi,
+ ICLASS_ae_iclass_lbk,
+ ICLASS_ae_iclass_lbki,
+ ICLASS_ae_iclass_db,
+ ICLASS_ae_iclass_dbi,
+ ICLASS_ae_iclass_vlel32t,
+ ICLASS_ae_iclass_vlel16t,
+ ICLASS_ae_iclass_sb,
+ ICLASS_ae_iclass_sbi,
+ ICLASS_ae_iclass_vles16c,
+ ICLASS_ae_iclass_sbf
+};
+
+
+/* Opcode encodings. */
+
+static void
+Opcode_excw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2080;
+}
+
+static void
+Opcode_rfe_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000;
+}
+
+static void
+Opcode_rfde_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3200;
+}
+
+static void
+Opcode_syscall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5000;
+}
+
+static void
+Opcode_call12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35;
+}
+
+static void
+Opcode_call8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x25;
+}
+
+static void
+Opcode_call4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15;
+}
+
+static void
+Opcode_callx12_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf0;
+}
+
+static void
+Opcode_callx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe0;
+}
+
+static void
+Opcode_callx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd0;
+}
+
+static void
+Opcode_entry_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36;
+}
+
+static void
+Opcode_movsp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1000;
+}
+
+static void
+Opcode_rotw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x408000;
+}
+
+static void
+Opcode_retw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90;
+}
+
+static void
+Opcode_retw_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01d;
+}
+
+static void
+Opcode_rfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3400;
+}
+
+static void
+Opcode_rfwu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3500;
+}
+
+static void
+Opcode_l32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x90000;
+}
+
+static void
+Opcode_s32e_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x490000;
+}
+
+static void
+Opcode_rsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34800;
+}
+
+static void
+Opcode_wsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134800;
+}
+
+static void
+Opcode_xsr_windowbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614800;
+}
+
+static void
+Opcode_rsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x34900;
+}
+
+static void
+Opcode_wsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x134900;
+}
+
+static void
+Opcode_xsr_windowstart_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x614900;
+}
+
+static void
+Opcode_add_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa;
+}
+
+static void
+Opcode_addi_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb;
+}
+
+static void
+Opcode_beqz_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8c;
+}
+
+static void
+Opcode_bnez_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcc;
+}
+
+static void
+Opcode_ill_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf06d;
+}
+
+static void
+Opcode_l32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8;
+}
+
+static void
+Opcode_mov_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd;
+}
+
+static void
+Opcode_movi_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc;
+}
+
+static void
+Opcode_nop_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf03d;
+}
+
+static void
+Opcode_ret_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00d;
+}
+
+static void
+Opcode_s32i_n_Slot_inst16a_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9;
+}
+
+static void
+Opcode_rur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30e70;
+}
+
+static void
+Opcode_wur_threadptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3e700;
+}
+
+static void
+Opcode_addi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc002;
+}
+
+static void
+Opcode_addi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200040;
+}
+
+static void
+Opcode_addmi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd002;
+}
+
+static void
+Opcode_addmi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200080;
+}
+
+static void
+Opcode_add_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x800000;
+}
+
+static void
+Opcode_add_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b2000;
+}
+
+static void
+Opcode_sub_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00000;
+}
+
+static void
+Opcode_sub_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ca000;
+}
+
+static void
+Opcode_addx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x900000;
+}
+
+static void
+Opcode_addx2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b4000;
+}
+
+static void
+Opcode_addx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00000;
+}
+
+static void
+Opcode_addx4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b8000;
+}
+
+static void
+Opcode_addx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00000;
+}
+
+static void
+Opcode_addx8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b3000;
+}
+
+static void
+Opcode_subx2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd00000;
+}
+
+static void
+Opcode_subx2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1cc000;
+}
+
+static void
+Opcode_subx4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00000;
+}
+
+static void
+Opcode_subx4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1cb000;
+}
+
+static void
+Opcode_subx8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00000;
+}
+
+static void
+Opcode_subx8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1cd000;
+}
+
+static void
+Opcode_and_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_and_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b5000;
+}
+
+static void
+Opcode_or_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_or_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e0000;
+}
+
+static void
+Opcode_xor_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_xor_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ce000;
+}
+
+static void
+Opcode_beqi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x26;
+}
+
+static void
+Opcode_beqi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300000;
+}
+
+static void
+Opcode_bnei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x66;
+}
+
+static void
+Opcode_bnei_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300003;
+}
+
+static void
+Opcode_bgei_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe6;
+}
+
+static void
+Opcode_bgei_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300001;
+}
+
+static void
+Opcode_blti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa6;
+}
+
+static void
+Opcode_blti_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300004;
+}
+
+static void
+Opcode_bbci_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6007;
+}
+
+static void
+Opcode_bbci_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200000;
+}
+
+static void
+Opcode_bbsi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe007;
+}
+
+static void
+Opcode_bbsi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200020;
+}
+
+static void
+Opcode_bgeui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf6;
+}
+
+static void
+Opcode_bgeui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300002;
+}
+
+static void
+Opcode_bltui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb6;
+}
+
+static void
+Opcode_bltui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300008;
+}
+
+static void
+Opcode_beq_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1007;
+}
+
+static void
+Opcode_beq_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000a0;
+}
+
+static void
+Opcode_bne_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9007;
+}
+
+static void
+Opcode_bne_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_bge_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa007;
+}
+
+static void
+Opcode_bge_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000c0;
+}
+
+static void
+Opcode_blt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2007;
+}
+
+static void
+Opcode_blt_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000d0;
+}
+
+static void
+Opcode_bgeu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb007;
+}
+
+static void
+Opcode_bgeu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000b0;
+}
+
+static void
+Opcode_bltu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3007;
+}
+
+static void
+Opcode_bltu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000e0;
+}
+
+static void
+Opcode_bany_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8007;
+}
+
+static void
+Opcode_bany_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200060;
+}
+
+static void
+Opcode_bnone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7;
+}
+
+static void
+Opcode_bnone_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400010;
+}
+
+static void
+Opcode_ball_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4007;
+}
+
+static void
+Opcode_ball_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200050;
+}
+
+static void
+Opcode_bnall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc007;
+}
+
+static void
+Opcode_bnall_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000f0;
+}
+
+static void
+Opcode_bbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5007;
+}
+
+static void
+Opcode_bbc_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200070;
+}
+
+static void
+Opcode_bbs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd007;
+}
+
+static void
+Opcode_bbs_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x200090;
+}
+
+static void
+Opcode_beqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16;
+}
+
+static void
+Opcode_beqz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x180000;
+}
+
+static void
+Opcode_bnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x56;
+}
+
+static void
+Opcode_bnez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x190000;
+}
+
+static void
+Opcode_bgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd6;
+}
+
+static void
+Opcode_bgez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x160000;
+}
+
+static void
+Opcode_bltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x96;
+}
+
+static void
+Opcode_bltz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x170000;
+}
+
+static void
+Opcode_call0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5;
+}
+
+static void
+Opcode_callx0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc0;
+}
+
+static void
+Opcode_extui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_extui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x140000;
+}
+
+static void
+Opcode_ill_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0;
+}
+
+static void
+Opcode_j_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6;
+}
+
+static void
+Opcode_j_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x100000;
+}
+
+static void
+Opcode_jx_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa0;
+}
+
+static void
+Opcode_jx_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee031;
+}
+
+static void
+Opcode_l16ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1002;
+}
+
+static void
+Opcode_l16ui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400040;
+}
+
+static void
+Opcode_l16si_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9002;
+}
+
+static void
+Opcode_l16si_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400020;
+}
+
+static void
+Opcode_l32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2002;
+}
+
+static void
+Opcode_l32i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400080;
+}
+
+static void
+Opcode_l32r_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1;
+}
+
+static void
+Opcode_l32r_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500000;
+}
+
+static void
+Opcode_l8ui_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2;
+}
+
+static void
+Opcode_l8ui_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400030;
+}
+
+static void
+Opcode_loop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8076;
+}
+
+static void
+Opcode_loopnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9076;
+}
+
+static void
+Opcode_loopgtz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa076;
+}
+
+static void
+Opcode_movi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa002;
+}
+
+static void
+Opcode_movi_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1a0000;
+}
+
+static void
+Opcode_moveqz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x830000;
+}
+
+static void
+Opcode_moveqz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1be000;
+}
+
+static void
+Opcode_movnez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x930000;
+}
+
+static void
+Opcode_movnez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c8000;
+}
+
+static void
+Opcode_movltz_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30000;
+}
+
+static void
+Opcode_movltz_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c4000;
+}
+
+static void
+Opcode_movgez_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30000;
+}
+
+static void
+Opcode_movgez_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c2000;
+}
+
+static void
+Opcode_neg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600000;
+}
+
+static void
+Opcode_neg_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1d00;
+}
+
+static void
+Opcode_abs_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x600100;
+}
+
+static void
+Opcode_abs_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1c00;
+}
+
+static void
+Opcode_nop_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20f0;
+}
+
+static void
+Opcode_nop_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16105;
+}
+
+static void
+Opcode_nop_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee0b1;
+}
+
+static void
+Opcode_ret_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x80;
+}
+
+static void
+Opcode_simcall_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5100;
+}
+
+static void
+Opcode_s16i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5002;
+}
+
+static void
+Opcode_s16i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400050;
+}
+
+static void
+Opcode_s32i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6002;
+}
+
+static void
+Opcode_s32i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400060;
+}
+
+static void
+Opcode_s8i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4002;
+}
+
+static void
+Opcode_s8i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400070;
+}
+
+static void
+Opcode_ssr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x400000;
+}
+
+static void
+Opcode_ssr_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee071;
+}
+
+static void
+Opcode_ssl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x401000;
+}
+
+static void
+Opcode_ssl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee038;
+}
+
+static void
+Opcode_ssa8l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x402000;
+}
+
+static void
+Opcode_ssa8l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee034;
+}
+
+static void
+Opcode_ssa8b_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x403000;
+}
+
+static void
+Opcode_ssa8b_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee032;
+}
+
+static void
+Opcode_ssai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x404000;
+}
+
+static void
+Opcode_ssai_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ef0a0;
+}
+
+static void
+Opcode_sll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10000;
+}
+
+static void
+Opcode_sll_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f5003;
+}
+
+static void
+Opcode_src_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x810000;
+}
+
+static void
+Opcode_src_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c7000;
+}
+
+static void
+Opcode_srl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x910000;
+}
+
+static void
+Opcode_srl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1f00;
+}
+
+static void
+Opcode_sra_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10000;
+}
+
+static void
+Opcode_sra_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1e00;
+}
+
+static void
+Opcode_slli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_slli_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c0000;
+}
+
+static void
+Opcode_srai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x210000;
+}
+
+static void
+Opcode_srai_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b0000;
+}
+
+static void
+Opcode_srli_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x410000;
+}
+
+static void
+Opcode_srli_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c9000;
+}
+
+static void
+Opcode_memw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20c0;
+}
+
+static void
+Opcode_extw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20d0;
+}
+
+static void
+Opcode_isync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000;
+}
+
+static void
+Opcode_rsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2010;
+}
+
+static void
+Opcode_esync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2020;
+}
+
+static void
+Opcode_dsync_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2030;
+}
+
+static void
+Opcode_rsil_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000;
+}
+
+static void
+Opcode_rsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30100;
+}
+
+static void
+Opcode_wsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130100;
+}
+
+static void
+Opcode_xsr_lend_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610100;
+}
+
+static void
+Opcode_rsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30200;
+}
+
+static void
+Opcode_wsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130200;
+}
+
+static void
+Opcode_xsr_lcount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610200;
+}
+
+static void
+Opcode_rsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_wsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130000;
+}
+
+static void
+Opcode_xsr_lbeg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610000;
+}
+
+static void
+Opcode_rsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30300;
+}
+
+static void
+Opcode_wsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130300;
+}
+
+static void
+Opcode_xsr_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610300;
+}
+
+static void
+Opcode_rsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30500;
+}
+
+static void
+Opcode_wsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130500;
+}
+
+static void
+Opcode_xsr_litbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610500;
+}
+
+static void
+Opcode_rsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b000;
+}
+
+static void
+Opcode_wsr_configid0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b000;
+}
+
+static void
+Opcode_rsr_configid1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d000;
+}
+
+static void
+Opcode_rsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e600;
+}
+
+static void
+Opcode_wsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e600;
+}
+
+static void
+Opcode_xsr_ps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e600;
+}
+
+static void
+Opcode_rsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b100;
+}
+
+static void
+Opcode_wsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b100;
+}
+
+static void
+Opcode_xsr_epc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b100;
+}
+
+static void
+Opcode_rsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d100;
+}
+
+static void
+Opcode_wsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d100;
+}
+
+static void
+Opcode_xsr_excsave1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d100;
+}
+
+static void
+Opcode_rsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3b200;
+}
+
+static void
+Opcode_wsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13b200;
+}
+
+static void
+Opcode_xsr_epc2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61b200;
+}
+
+static void
+Opcode_rsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3d200;
+}
+
+static void
+Opcode_wsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13d200;
+}
+
+static void
+Opcode_xsr_excsave2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61d200;
+}
+
+static void
+Opcode_rsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c200;
+}
+
+static void
+Opcode_wsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c200;
+}
+
+static void
+Opcode_xsr_eps2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c200;
+}
+
+static void
+Opcode_rsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ee00;
+}
+
+static void
+Opcode_wsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ee00;
+}
+
+static void
+Opcode_xsr_excvaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ee00;
+}
+
+static void
+Opcode_rsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3c000;
+}
+
+static void
+Opcode_wsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13c000;
+}
+
+static void
+Opcode_xsr_depc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61c000;
+}
+
+static void
+Opcode_rsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e800;
+}
+
+static void
+Opcode_wsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e800;
+}
+
+static void
+Opcode_xsr_exccause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e800;
+}
+
+static void
+Opcode_rsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f400;
+}
+
+static void
+Opcode_wsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f400;
+}
+
+static void
+Opcode_xsr_misc0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f400;
+}
+
+static void
+Opcode_rsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f500;
+}
+
+static void
+Opcode_wsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f500;
+}
+
+static void
+Opcode_xsr_misc1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f500;
+}
+
+static void
+Opcode_rsr_prid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3eb00;
+}
+
+static void
+Opcode_rsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e700;
+}
+
+static void
+Opcode_wsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e700;
+}
+
+static void
+Opcode_xsr_vecbase_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e700;
+}
+
+static void
+Opcode_mul16u_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10000;
+}
+
+static void
+Opcode_mul16s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd10000;
+}
+
+static void
+Opcode_mull_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x820000;
+}
+
+static void
+Opcode_rfi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3010;
+}
+
+static void
+Opcode_waiti_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7000;
+}
+
+static void
+Opcode_rsr_interrupt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e200;
+}
+
+static void
+Opcode_wsr_intset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e200;
+}
+
+static void
+Opcode_wsr_intclear_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e300;
+}
+
+static void
+Opcode_rsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e400;
+}
+
+static void
+Opcode_wsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e400;
+}
+
+static void
+Opcode_xsr_intenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e400;
+}
+
+static void
+Opcode_break_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000;
+}
+
+static void
+Opcode_break_n_Slot_inst16b_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02d;
+}
+
+static void
+Opcode_rsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e900;
+}
+
+static void
+Opcode_wsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e900;
+}
+
+static void
+Opcode_xsr_debugcause_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e900;
+}
+
+static void
+Opcode_rsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ec00;
+}
+
+static void
+Opcode_wsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ec00;
+}
+
+static void
+Opcode_xsr_icount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ec00;
+}
+
+static void
+Opcode_rsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ed00;
+}
+
+static void
+Opcode_wsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ed00;
+}
+
+static void
+Opcode_xsr_icountlevel_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ed00;
+}
+
+static void
+Opcode_rsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36800;
+}
+
+static void
+Opcode_wsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136800;
+}
+
+static void
+Opcode_xsr_ddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616800;
+}
+
+static void
+Opcode_rfdo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e000;
+}
+
+static void
+Opcode_rfdd_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1e010;
+}
+
+static void
+Opcode_andb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_andb_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b6000;
+}
+
+static void
+Opcode_andbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x120000;
+}
+
+static void
+Opcode_andbc_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b7000;
+}
+
+static void
+Opcode_orb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x220000;
+}
+
+static void
+Opcode_orb_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c3000;
+}
+
+static void
+Opcode_orbc_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x320000;
+}
+
+static void
+Opcode_orbc_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c5000;
+}
+
+static void
+Opcode_xorb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x420000;
+}
+
+static void
+Opcode_xorb_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1cf000;
+}
+
+static void
+Opcode_any4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x8000;
+}
+
+static void
+Opcode_any4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2480;
+}
+
+static void
+Opcode_all4_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x9000;
+}
+
+static void
+Opcode_all4_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2800;
+}
+
+static void
+Opcode_any8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa000;
+}
+
+static void
+Opcode_any8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ef060;
+}
+
+static void
+Opcode_all8_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb000;
+}
+
+static void
+Opcode_all8_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ef020;
+}
+
+static void
+Opcode_bf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x76;
+}
+
+static void
+Opcode_bf_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300005;
+}
+
+static void
+Opcode_bt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1076;
+}
+
+static void
+Opcode_bt_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x300006;
+}
+
+static void
+Opcode_movf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc30000;
+}
+
+static void
+Opcode_movf_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1bf000;
+}
+
+static void
+Opcode_movt_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xd30000;
+}
+
+static void
+Opcode_movt_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d0000;
+}
+
+static void
+Opcode_rsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30400;
+}
+
+static void
+Opcode_wsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130400;
+}
+
+static void
+Opcode_xsr_br_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610400;
+}
+
+static void
+Opcode_rsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3ea00;
+}
+
+static void
+Opcode_wsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13ea00;
+}
+
+static void
+Opcode_xsr_ccount_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61ea00;
+}
+
+static void
+Opcode_rsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f000;
+}
+
+static void
+Opcode_wsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f000;
+}
+
+static void
+Opcode_xsr_ccompare0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f000;
+}
+
+static void
+Opcode_rsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3f100;
+}
+
+static void
+Opcode_wsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13f100;
+}
+
+static void
+Opcode_xsr_ccompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61f100;
+}
+
+static void
+Opcode_ipf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70c2;
+}
+
+static void
+Opcode_ihi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70e2;
+}
+
+static void
+Opcode_iii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x70f2;
+}
+
+static void
+Opcode_lict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf10000;
+}
+
+static void
+Opcode_licw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf12000;
+}
+
+static void
+Opcode_sict_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11000;
+}
+
+static void
+Opcode_sicw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf13000;
+}
+
+static void
+Opcode_dhwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7042;
+}
+
+static void
+Opcode_dhwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7052;
+}
+
+static void
+Opcode_diwb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x47082;
+}
+
+static void
+Opcode_diwbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x57082;
+}
+
+static void
+Opcode_dhi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7062;
+}
+
+static void
+Opcode_dii_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7072;
+}
+
+static void
+Opcode_dpfr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7002;
+}
+
+static void
+Opcode_dpfw_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7012;
+}
+
+static void
+Opcode_dpfro_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7022;
+}
+
+static void
+Opcode_dpfwo_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x7032;
+}
+
+static void
+Opcode_sdct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf19000;
+}
+
+static void
+Opcode_ldct_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf18000;
+}
+
+static void
+Opcode_wsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135300;
+}
+
+static void
+Opcode_rsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35300;
+}
+
+static void
+Opcode_xsr_ptevaddr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615300;
+}
+
+static void
+Opcode_rsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35a00;
+}
+
+static void
+Opcode_wsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135a00;
+}
+
+static void
+Opcode_xsr_rasid_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615a00;
+}
+
+static void
+Opcode_rsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35b00;
+}
+
+static void
+Opcode_wsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135b00;
+}
+
+static void
+Opcode_xsr_itlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615b00;
+}
+
+static void
+Opcode_rsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x35c00;
+}
+
+static void
+Opcode_wsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x135c00;
+}
+
+static void
+Opcode_xsr_dtlbcfg_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x615c00;
+}
+
+static void
+Opcode_idtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50c000;
+}
+
+static void
+Opcode_pdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50d000;
+}
+
+static void
+Opcode_rdtlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50b000;
+}
+
+static void
+Opcode_rdtlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50f000;
+}
+
+static void
+Opcode_wdtlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50e000;
+}
+
+static void
+Opcode_iitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x504000;
+}
+
+static void
+Opcode_pitlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x505000;
+}
+
+static void
+Opcode_ritlb0_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x503000;
+}
+
+static void
+Opcode_ritlb1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x507000;
+}
+
+static void
+Opcode_witlb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x506000;
+}
+
+static void
+Opcode_ldpte_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf1f000;
+}
+
+static void
+Opcode_hwwitlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x501000;
+}
+
+static void
+Opcode_hwwdtlba_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x509000;
+}
+
+static void
+Opcode_rsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3e000;
+}
+
+static void
+Opcode_wsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x13e000;
+}
+
+static void
+Opcode_xsr_cpenable_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x61e000;
+}
+
+static void
+Opcode_clamps_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x330000;
+}
+
+static void
+Opcode_clamps_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1b9000;
+}
+
+static void
+Opcode_min_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x430000;
+}
+
+static void
+Opcode_min_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1bb000;
+}
+
+static void
+Opcode_max_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x530000;
+}
+
+static void
+Opcode_max_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ba000;
+}
+
+static void
+Opcode_minu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x630000;
+}
+
+static void
+Opcode_minu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1bd000;
+}
+
+static void
+Opcode_maxu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x730000;
+}
+
+static void
+Opcode_maxu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1bc000;
+}
+
+static void
+Opcode_nsa_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40e000;
+}
+
+static void
+Opcode_nsau_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40f000;
+}
+
+static void
+Opcode_sext_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x230000;
+}
+
+static void
+Opcode_sext_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c6000;
+}
+
+static void
+Opcode_l32ai_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb002;
+}
+
+static void
+Opcode_s32ri_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf002;
+}
+
+static void
+Opcode_s32c1i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe002;
+}
+
+static void
+Opcode_rsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30c00;
+}
+
+static void
+Opcode_wsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x130c00;
+}
+
+static void
+Opcode_xsr_scompare1_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x610c00;
+}
+
+static void
+Opcode_rsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x36300;
+}
+
+static void
+Opcode_wsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x136300;
+}
+
+static void
+Opcode_xsr_atomctl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x616300;
+}
+
+static void
+Opcode_rer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x406000;
+}
+
+static void
+Opcode_wer_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x407000;
+}
+
+static void
+Opcode_rur_ae_ovf_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30f00;
+}
+
+static void
+Opcode_wur_ae_ovf_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3f000;
+}
+
+static void
+Opcode_rur_ae_bithead_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30f10;
+}
+
+static void
+Opcode_wur_ae_bithead_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3f100;
+}
+
+static void
+Opcode_rur_ae_ts_fts_bu_bp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30f20;
+}
+
+static void
+Opcode_wur_ae_ts_fts_bu_bp_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3f200;
+}
+
+static void
+Opcode_rur_ae_sd_no_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30f30;
+}
+
+static void
+Opcode_wur_ae_sd_no_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf3f300;
+}
+
+static void
+Opcode_rur_ae_overflow_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90804;
+}
+
+static void
+Opcode_wur_ae_overflow_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca0004;
+}
+
+static void
+Opcode_rur_ae_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90904;
+}
+
+static void
+Opcode_wur_ae_sar_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca1004;
+}
+
+static void
+Opcode_rur_ae_bitptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90a04;
+}
+
+static void
+Opcode_wur_ae_bitptr_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca2004;
+}
+
+static void
+Opcode_rur_ae_bitsused_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90b04;
+}
+
+static void
+Opcode_wur_ae_bitsused_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca3004;
+}
+
+static void
+Opcode_rur_ae_tablesize_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90c04;
+}
+
+static void
+Opcode_wur_ae_tablesize_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca4004;
+}
+
+static void
+Opcode_rur_ae_first_ts_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90d04;
+}
+
+static void
+Opcode_wur_ae_first_ts_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca5004;
+}
+
+static void
+Opcode_rur_ae_nextoffset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90e04;
+}
+
+static void
+Opcode_wur_ae_nextoffset_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca6004;
+}
+
+static void
+Opcode_rur_ae_searchdone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90f04;
+}
+
+static void
+Opcode_wur_ae_searchdone_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca7004;
+}
+
+static void
+Opcode_ae_lp16f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d1080;
+}
+
+static void
+Opcode_ae_lp16f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa50004;
+}
+
+static void
+Opcode_ae_lp16f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d2080;
+}
+
+static void
+Opcode_ae_lp16f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa90004;
+}
+
+static void
+Opcode_ae_lp16f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d3000;
+}
+
+static void
+Opcode_ae_lp16f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xac0004;
+}
+
+static void
+Opcode_ae_lp16f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d3080;
+}
+
+static void
+Opcode_ae_lp16f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaf0004;
+}
+
+static void
+Opcode_ae_lp24_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d6080;
+}
+
+static void
+Opcode_ae_lp24_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa58004;
+}
+
+static void
+Opcode_ae_lp24_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d7000;
+}
+
+static void
+Opcode_ae_lp24_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa98004;
+}
+
+static void
+Opcode_ae_lp24_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d7080;
+}
+
+static void
+Opcode_ae_lp24_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xac8004;
+}
+
+static void
+Opcode_ae_lp24_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d8080;
+}
+
+static void
+Opcode_ae_lp24_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaf8004;
+}
+
+static void
+Opcode_ae_lp24f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d9000;
+}
+
+static void
+Opcode_ae_lp24f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa60004;
+}
+
+static void
+Opcode_ae_lp24f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1da000;
+}
+
+static void
+Opcode_ae_lp24f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaa0004;
+}
+
+static void
+Opcode_ae_lp24f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1dc000;
+}
+
+static void
+Opcode_ae_lp24f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xad0004;
+}
+
+static void
+Opcode_ae_lp24f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d9080;
+}
+
+static void
+Opcode_ae_lp24f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb00004;
+}
+
+static void
+Opcode_ae_lp16x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d4080;
+}
+
+static void
+Opcode_ae_lp16x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa68004;
+}
+
+static void
+Opcode_ae_lp16x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d5000;
+}
+
+static void
+Opcode_ae_lp16x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xaa8004;
+}
+
+static void
+Opcode_ae_lp16x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d6000;
+}
+
+static void
+Opcode_ae_lp16x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xad8004;
+}
+
+static void
+Opcode_ae_lp16x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d5080;
+}
+
+static void
+Opcode_ae_lp16x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb08004;
+}
+
+static void
+Opcode_ae_lp24x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1dd000;
+}
+
+static void
+Opcode_ae_lp24x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa70004;
+}
+
+static void
+Opcode_ae_lp24x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1de000;
+}
+
+static void
+Opcode_ae_lp24x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xab0004;
+}
+
+static void
+Opcode_ae_lp24x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1dd080;
+}
+
+static void
+Opcode_ae_lp24x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xae0004;
+}
+
+static void
+Opcode_ae_lp24x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1de080;
+}
+
+static void
+Opcode_ae_lp24x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb10004;
+}
+
+static void
+Opcode_ae_lp24x2_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1da080;
+}
+
+static void
+Opcode_ae_lp24x2_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa78004;
+}
+
+static void
+Opcode_ae_lp24x2_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1db000;
+}
+
+static void
+Opcode_ae_lp24x2_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xab8004;
+}
+
+static void
+Opcode_ae_lp24x2_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1db080;
+}
+
+static void
+Opcode_ae_lp24x2_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xae8004;
+}
+
+static void
+Opcode_ae_lp24x2_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1dc080;
+}
+
+static void
+Opcode_ae_lp24x2_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb18004;
+}
+
+static void
+Opcode_ae_sp16x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e8000;
+}
+
+static void
+Opcode_ae_sp16x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb20004;
+}
+
+static void
+Opcode_ae_sp16x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f0000;
+}
+
+static void
+Opcode_ae_sp16x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb50004;
+}
+
+static void
+Opcode_ae_sp16x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e1080;
+}
+
+static void
+Opcode_ae_sp16x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb80004;
+}
+
+static void
+Opcode_ae_sp16x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e2080;
+}
+
+static void
+Opcode_ae_sp16x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbb0004;
+}
+
+static void
+Opcode_ae_sp24x2s_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ec000;
+}
+
+static void
+Opcode_ae_sp24x2s_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb28004;
+}
+
+static void
+Opcode_ae_sp24x2s_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e9080;
+}
+
+static void
+Opcode_ae_sp24x2s_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb58004;
+}
+
+static void
+Opcode_ae_sp24x2s_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ea080;
+}
+
+static void
+Opcode_ae_sp24x2s_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb88004;
+}
+
+static void
+Opcode_ae_sp24x2s_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1eb000;
+}
+
+static void
+Opcode_ae_sp24x2s_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbb8004;
+}
+
+static void
+Opcode_ae_sp24x2f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e7080;
+}
+
+static void
+Opcode_ae_sp24x2f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb30004;
+}
+
+static void
+Opcode_ae_sp24x2f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e8080;
+}
+
+static void
+Opcode_ae_sp24x2f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb60004;
+}
+
+static void
+Opcode_ae_sp24x2f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e9000;
+}
+
+static void
+Opcode_ae_sp24x2f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb90004;
+}
+
+static void
+Opcode_ae_sp24x2f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ea000;
+}
+
+static void
+Opcode_ae_sp24x2f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbc0004;
+}
+
+static void
+Opcode_ae_sp16f_l_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1df080;
+}
+
+static void
+Opcode_ae_sp16f_l_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb38004;
+}
+
+static void
+Opcode_ae_sp16f_l_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e1000;
+}
+
+static void
+Opcode_ae_sp16f_l_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb68004;
+}
+
+static void
+Opcode_ae_sp16f_l_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e2000;
+}
+
+static void
+Opcode_ae_sp16f_l_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb98004;
+}
+
+static void
+Opcode_ae_sp16f_l_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e4000;
+}
+
+static void
+Opcode_ae_sp16f_l_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbc8004;
+}
+
+static void
+Opcode_ae_sp24s_l_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e6000;
+}
+
+static void
+Opcode_ae_sp24s_l_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb40004;
+}
+
+static void
+Opcode_ae_sp24s_l_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e5080;
+}
+
+static void
+Opcode_ae_sp24s_l_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb70004;
+}
+
+static void
+Opcode_ae_sp24s_l_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e6080;
+}
+
+static void
+Opcode_ae_sp24s_l_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xba0004;
+}
+
+static void
+Opcode_ae_sp24s_l_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e7000;
+}
+
+static void
+Opcode_ae_sp24s_l_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbd0004;
+}
+
+static void
+Opcode_ae_sp24f_l_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e3000;
+}
+
+static void
+Opcode_ae_sp24f_l_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb48004;
+}
+
+static void
+Opcode_ae_sp24f_l_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e3080;
+}
+
+static void
+Opcode_ae_sp24f_l_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xb78004;
+}
+
+static void
+Opcode_ae_sp24f_l_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e4080;
+}
+
+static void
+Opcode_ae_sp24f_l_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xba8004;
+}
+
+static void
+Opcode_ae_sp24f_l_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1e5000;
+}
+
+static void
+Opcode_ae_sp24f_l_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbd8004;
+}
+
+static void
+Opcode_ae_lq56_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ed030;
+}
+
+static void
+Opcode_ae_lq56_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc10004;
+}
+
+static void
+Opcode_ae_lq56_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee010;
+}
+
+static void
+Opcode_ae_lq56_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc12004;
+}
+
+static void
+Opcode_ae_lq56_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee020;
+}
+
+static void
+Opcode_ae_lq56_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc20004;
+}
+
+static void
+Opcode_ae_lq56_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ef000;
+}
+
+static void
+Opcode_ae_lq56_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc22004;
+}
+
+static void
+Opcode_ae_lq32f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ed000;
+}
+
+static void
+Opcode_ae_lq32f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc11004;
+}
+
+static void
+Opcode_ae_lq32f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee000;
+}
+
+static void
+Opcode_ae_lq32f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc13004;
+}
+
+static void
+Opcode_ae_lq32f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ed010;
+}
+
+static void
+Opcode_ae_lq32f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc21004;
+}
+
+static void
+Opcode_ae_lq32f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ed020;
+}
+
+static void
+Opcode_ae_lq32f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc23004;
+}
+
+static void
+Opcode_ae_sq56s_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f0080;
+}
+
+static void
+Opcode_ae_sq56s_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc30004;
+}
+
+static void
+Opcode_ae_sq56s_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f00c0;
+}
+
+static void
+Opcode_ae_sq56s_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc38004;
+}
+
+static void
+Opcode_ae_sq56s_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3000;
+}
+
+static void
+Opcode_ae_sq56s_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc40004;
+}
+
+static void
+Opcode_ae_sq56s_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3040;
+}
+
+static void
+Opcode_ae_sq56s_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc48004;
+}
+
+static void
+Opcode_ae_sq32f_i_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ec080;
+}
+
+static void
+Opcode_ae_sq32f_i_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc34004;
+}
+
+static void
+Opcode_ae_sq32f_iu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ec0c0;
+}
+
+static void
+Opcode_ae_sq32f_iu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc3c004;
+}
+
+static void
+Opcode_ae_sq32f_x_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f4000;
+}
+
+static void
+Opcode_ae_sq32f_x_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc44004;
+}
+
+static void
+Opcode_ae_sq32f_xu_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f8000;
+}
+
+static void
+Opcode_ae_sq32f_xu_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc4c004;
+}
+
+static void
+Opcode_ae_zerop48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b88;
+}
+
+static void
+Opcode_ae_movp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16808;
+}
+
+static void
+Opcode_ae_movp48_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2400;
+}
+
+static void
+Opcode_ae_movp48_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90004;
+}
+
+static void
+Opcode_ae_selp24_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10780;
+}
+
+static void
+Opcode_ae_selp24_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10708;
+}
+
+static void
+Opcode_ae_selp24_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10688;
+}
+
+static void
+Opcode_ae_selp24_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10700;
+}
+
+static void
+Opcode_ae_movtp24x2_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c200;
+}
+
+static void
+Opcode_ae_movfp24x2_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c004;
+}
+
+static void
+Opcode_ae_movtp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10480;
+}
+
+static void
+Opcode_ae_movfp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10400;
+}
+
+static void
+Opcode_ae_movpa24x2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1df000;
+}
+
+static void
+Opcode_ae_movpa24x2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc00004;
+}
+
+static void
+Opcode_ae_truncp24a32x2_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1eb080;
+}
+
+static void
+Opcode_ae_truncp24a32x2_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc08004;
+}
+
+static void
+Opcode_ae_cvta32p24_l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3081;
+}
+
+static void
+Opcode_ae_cvta32p24_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcb0004;
+}
+
+static void
+Opcode_ae_cvta32p24_h_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3080;
+}
+
+static void
+Opcode_ae_cvta32p24_h_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xcb8004;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_ll_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d8000;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_ll_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbe0004;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_lh_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d4000;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_lh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbe8004;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hl_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d2000;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hl_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbf0004;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hh_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1d1000;
+}
+
+static void
+Opcode_ae_cvtp24a16x2_hh_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xbf8004;
+}
+
+static void
+Opcode_ae_truncp24q48x2_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x51000;
+}
+
+static void
+Opcode_ae_truncp16_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16b08;
+}
+
+static void
+Opcode_ae_roundsp24q48sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16e48;
+}
+
+static void
+Opcode_ae_roundsp24q48asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16e28;
+}
+
+static void
+Opcode_ae_roundsp16q48sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16e18;
+}
+
+static void
+Opcode_ae_roundsp16q48asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16e08;
+}
+
+static void
+Opcode_ae_roundsp16sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16908;
+}
+
+static void
+Opcode_ae_roundsp16asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16888;
+}
+
+static void
+Opcode_ae_zeroq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16085;
+}
+
+static void
+Opcode_ae_movq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16007;
+}
+
+static void
+Opcode_ae_movq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2500;
+}
+
+static void
+Opcode_ae_movq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90414;
+}
+
+static void
+Opcode_ae_movtq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f6000;
+}
+
+static void
+Opcode_ae_movtq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe50014;
+}
+
+static void
+Opcode_ae_movfq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f5000;
+}
+
+static void
+Opcode_ae_movfq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe60014;
+}
+
+static void
+Opcode_ae_cvtq48a32s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1ee030;
+}
+
+static void
+Opcode_ae_cvtq48a32s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe72034;
+}
+
+static void
+Opcode_ae_cvtq48p24s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16006;
+}
+
+static void
+Opcode_ae_cvtq48p24s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16005;
+}
+
+static void
+Opcode_ae_satq48s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50139;
+}
+
+static void
+Opcode_ae_truncq32_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16047;
+}
+
+static void
+Opcode_ae_roundsq32sym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16027;
+}
+
+static void
+Opcode_ae_roundsq32asym_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16017;
+}
+
+static void
+Opcode_ae_trunca32q48_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3086;
+}
+
+static void
+Opcode_ae_trunca32q48_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe70014;
+}
+
+static void
+Opcode_ae_movap24s_l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3084;
+}
+
+static void
+Opcode_ae_movap24s_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc70004;
+}
+
+static void
+Opcode_ae_movap24s_h_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3082;
+}
+
+static void
+Opcode_ae_movap24s_h_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc78004;
+}
+
+static void
+Opcode_ae_trunca16p24s_l_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3083;
+}
+
+static void
+Opcode_ae_trunca16p24s_l_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc80004;
+}
+
+static void
+Opcode_ae_trunca16p24s_h_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3088;
+}
+
+static void
+Opcode_ae_trunca16p24s_h_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc88004;
+}
+
+static void
+Opcode_ae_addp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10500;
+}
+
+static void
+Opcode_ae_subp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10788;
+}
+
+static void
+Opcode_ae_negp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c600;
+}
+
+static void
+Opcode_ae_absp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c480;
+}
+
+static void
+Opcode_ae_maxp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10580;
+}
+
+static void
+Opcode_ae_minp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10588;
+}
+
+static void
+Opcode_ae_maxbp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10000;
+}
+
+static void
+Opcode_ae_minbp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10200;
+}
+
+static void
+Opcode_ae_addsp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10600;
+}
+
+static void
+Opcode_ae_subsp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c400;
+}
+
+static void
+Opcode_ae_negsp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c488;
+}
+
+static void
+Opcode_ae_abssp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c500;
+}
+
+static void
+Opcode_ae_andp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10508;
+}
+
+static void
+Opcode_ae_nandp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10608;
+}
+
+static void
+Opcode_ae_orp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x10680;
+}
+
+static void
+Opcode_ae_xorp48_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c408;
+}
+
+static void
+Opcode_ae_ltp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c002;
+}
+
+static void
+Opcode_ae_lep24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c001;
+}
+
+static void
+Opcode_ae_eqp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1c000;
+}
+
+static void
+Opcode_ae_addq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x52000;
+}
+
+static void
+Opcode_ae_subq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50035;
+}
+
+static void
+Opcode_ae_negq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5003c;
+}
+
+static void
+Opcode_ae_absq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50039;
+}
+
+static void
+Opcode_ae_maxq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50032;
+}
+
+static void
+Opcode_ae_minq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50034;
+}
+
+static void
+Opcode_ae_maxbq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50000;
+}
+
+static void
+Opcode_ae_minbq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50010;
+}
+
+static void
+Opcode_ae_addsq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50030;
+}
+
+static void
+Opcode_ae_subsq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50036;
+}
+
+static void
+Opcode_ae_negsq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x500b9;
+}
+
+static void
+Opcode_ae_abssq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x5003a;
+}
+
+static void
+Opcode_ae_andq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50031;
+}
+
+static void
+Opcode_ae_nandq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50038;
+}
+
+static void
+Opcode_ae_orq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50033;
+}
+
+static void
+Opcode_ae_xorq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50037;
+}
+
+static void
+Opcode_ae_sllip24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14000;
+}
+
+static void
+Opcode_ae_srlip24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15000;
+}
+
+static void
+Opcode_ae_sraip24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x14800;
+}
+
+static void
+Opcode_ae_sllsp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a08;
+}
+
+static void
+Opcode_ae_srlsp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16a88;
+}
+
+static void
+Opcode_ae_srasp24_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16988;
+}
+
+static void
+Opcode_ae_sllisp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x18000;
+}
+
+static void
+Opcode_ae_sllssp24s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16c08;
+}
+
+static void
+Opcode_ae_slliq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1000;
+}
+
+static void
+Opcode_ae_slliq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc50004;
+}
+
+static void
+Opcode_ae_srliq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1800;
+}
+
+static void
+Opcode_ae_srliq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc50404;
+}
+
+static void
+Opcode_ae_sraiq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f1400;
+}
+
+static void
+Opcode_ae_sraiq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc50804;
+}
+
+static void
+Opcode_ae_sllsq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2600;
+}
+
+static void
+Opcode_ae_sllsq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90014;
+}
+
+static void
+Opcode_ae_srlsq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2504;
+}
+
+static void
+Opcode_ae_srlsq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90114;
+}
+
+static void
+Opcode_ae_srasq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2502;
+}
+
+static void
+Opcode_ae_srasq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90214;
+}
+
+static void
+Opcode_ae_sllaq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f5001;
+}
+
+static void
+Opcode_ae_sllaq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe10014;
+}
+
+static void
+Opcode_ae_srlaq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f5008;
+}
+
+static void
+Opcode_ae_srlaq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe20014;
+}
+
+static void
+Opcode_ae_sraaq56_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f5004;
+}
+
+static void
+Opcode_ae_sraaq56_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe30014;
+}
+
+static void
+Opcode_ae_sllisq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2000;
+}
+
+static void
+Opcode_ae_sllisq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc50c04;
+}
+
+static void
+Opcode_ae_sllssq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f2501;
+}
+
+static void
+Opcode_ae_sllssq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc90314;
+}
+
+static void
+Opcode_ae_sllasq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f5002;
+}
+
+static void
+Opcode_ae_sllasq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe40014;
+}
+
+static void
+Opcode_ae_ltq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50800;
+}
+
+static void
+Opcode_ae_leq56s_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50040;
+}
+
+static void
+Opcode_ae_eqq56_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x50020;
+}
+
+static void
+Opcode_ae_nsaq56s_Slot_ae_slot0_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1f3085;
+}
+
+static void
+Opcode_ae_nsaq56s_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe74014;
+}
+
+static void
+Opcode_ae_mulfs32p16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60101;
+}
+
+static void
+Opcode_ae_mulfp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6008b;
+}
+
+static void
+Opcode_ae_mulp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60180;
+}
+
+static void
+Opcode_ae_mulfs32p16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6008f;
+}
+
+static void
+Opcode_ae_mulfp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6008c;
+}
+
+static void
+Opcode_ae_mulp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60108;
+}
+
+static void
+Opcode_ae_mulfs32p16s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6008e;
+}
+
+static void
+Opcode_ae_mulfp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6008a;
+}
+
+static void
+Opcode_ae_mulp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60104;
+}
+
+static void
+Opcode_ae_mulfs32p16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6008d;
+}
+
+static void
+Opcode_ae_mulfp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60089;
+}
+
+static void
+Opcode_ae_mulp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60102;
+}
+
+static void
+Opcode_ae_mulafs32p16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60006;
+}
+
+static void
+Opcode_ae_mulafp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64000;
+}
+
+static void
+Opcode_ae_mulap24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000f;
+}
+
+static void
+Opcode_ae_mulafs32p16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60005;
+}
+
+static void
+Opcode_ae_mulafp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60100;
+}
+
+static void
+Opcode_ae_mulap24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000e;
+}
+
+static void
+Opcode_ae_mulafs32p16s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60003;
+}
+
+static void
+Opcode_ae_mulafp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60080;
+}
+
+static void
+Opcode_ae_mulap24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000d;
+}
+
+static void
+Opcode_ae_mulafs32p16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x68000;
+}
+
+static void
+Opcode_ae_mulafp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60008;
+}
+
+static void
+Opcode_ae_mulap24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000b;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60181;
+}
+
+static void
+Opcode_ae_mulsfp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6010b;
+}
+
+static void
+Opcode_ae_mulsp24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60189;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6010f;
+}
+
+static void
+Opcode_ae_mulsfp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6010c;
+}
+
+static void
+Opcode_ae_mulsp24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60187;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6010e;
+}
+
+static void
+Opcode_ae_mulsfp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6010a;
+}
+
+static void
+Opcode_ae_mulsp24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60186;
+}
+
+static void
+Opcode_ae_mulsfs32p16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6010d;
+}
+
+static void
+Opcode_ae_mulsfp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60109;
+}
+
+static void
+Opcode_ae_mulsp24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60185;
+}
+
+static void
+Opcode_ae_mulafs56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000c;
+}
+
+static void
+Opcode_ae_mulas56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60088;
+}
+
+static void
+Opcode_ae_mulafs56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6000a;
+}
+
+static void
+Opcode_ae_mulas56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60084;
+}
+
+static void
+Opcode_ae_mulafs56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60009;
+}
+
+static void
+Opcode_ae_mulas56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60082;
+}
+
+static void
+Opcode_ae_mulafs56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60007;
+}
+
+static void
+Opcode_ae_mulas56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60081;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60183;
+}
+
+static void
+Opcode_ae_mulss56p24s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6018d;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60188;
+}
+
+static void
+Opcode_ae_mulss56p24s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6018b;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60184;
+}
+
+static void
+Opcode_ae_mulss56p24s_hl_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6018c;
+}
+
+static void
+Opcode_ae_mulsfs56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60182;
+}
+
+static void
+Opcode_ae_mulss56p24s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6018a;
+}
+
+static void
+Opcode_ae_mulfq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15807;
+}
+
+static void
+Opcode_ae_mulfq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15806;
+}
+
+static void
+Opcode_ae_mulfq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1580a;
+}
+
+static void
+Opcode_ae_mulfq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15809;
+}
+
+static void
+Opcode_ae_mulq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1580b;
+}
+
+static void
+Opcode_ae_mulq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1580c;
+}
+
+static void
+Opcode_ae_mulq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1580e;
+}
+
+static void
+Opcode_ae_mulq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1580d;
+}
+
+static void
+Opcode_ae_mulafq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15800;
+}
+
+static void
+Opcode_ae_mulafq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16000;
+}
+
+static void
+Opcode_ae_mulafq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15802;
+}
+
+static void
+Opcode_ae_mulafq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15801;
+}
+
+static void
+Opcode_ae_mulaq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15808;
+}
+
+static void
+Opcode_ae_mulaq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15804;
+}
+
+static void
+Opcode_ae_mulaq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15805;
+}
+
+static void
+Opcode_ae_mulaq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x15803;
+}
+
+static void
+Opcode_ae_mulsfq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16001;
+}
+
+static void
+Opcode_ae_mulsfq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x1580f;
+}
+
+static void
+Opcode_ae_mulsfq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16004;
+}
+
+static void
+Opcode_ae_mulsfq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16002;
+}
+
+static void
+Opcode_ae_mulsq32sp16s_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16800;
+}
+
+static void
+Opcode_ae_mulsq32sp16s_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16008;
+}
+
+static void
+Opcode_ae_mulsq32sp16u_l_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x16003;
+}
+
+static void
+Opcode_ae_mulsq32sp16u_h_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x17000;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20007;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20002;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000c;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20003;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20005;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20000;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20009;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20004;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20006;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20001;
+}
+
+static void
+Opcode_ae_mulzaaq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000a;
+}
+
+static void
+Opcode_ae_mulzaafq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x20008;
+}
+
+static void
+Opcode_ae_mulzasq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30008;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000e;
+}
+
+static void
+Opcode_ae_mulzasq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30006;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30001;
+}
+
+static void
+Opcode_ae_mulzasq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30002;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000b;
+}
+
+static void
+Opcode_ae_mulzasq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30003;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000f;
+}
+
+static void
+Opcode_ae_mulzasq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30004;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x2000d;
+}
+
+static void
+Opcode_ae_mulzasq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30005;
+}
+
+static void
+Opcode_ae_mulzasfq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30000;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40000;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000a;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40004;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000d;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000e;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30007;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40001;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000c;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000f;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x30009;
+}
+
+static void
+Opcode_ae_mulzsaq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40002;
+}
+
+static void
+Opcode_ae_mulzsafq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x3000b;
+}
+
+static void
+Opcode_ae_mulzssq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000b;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16s_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40005;
+}
+
+static void
+Opcode_ae_mulzssq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000f;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16u_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40009;
+}
+
+static void
+Opcode_ae_mulzssq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000a;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16s_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40008;
+}
+
+static void
+Opcode_ae_mulzssq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000d;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16u_hh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40006;
+}
+
+static void
+Opcode_ae_mulzssq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000c;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16s_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40003;
+}
+
+static void
+Opcode_ae_mulzssq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x4000e;
+}
+
+static void
+Opcode_ae_mulzssfq32sp16u_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x40007;
+}
+
+static void
+Opcode_ae_mulzaafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64004;
+}
+
+static void
+Opcode_ae_mulzaap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64080;
+}
+
+static void
+Opcode_ae_mulzaafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64008;
+}
+
+static void
+Opcode_ae_mulzaap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64100;
+}
+
+static void
+Opcode_ae_mulzasfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64003;
+}
+
+static void
+Opcode_ae_mulzasp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64006;
+}
+
+static void
+Opcode_ae_mulzasfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64005;
+}
+
+static void
+Opcode_ae_mulzasp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64007;
+}
+
+static void
+Opcode_ae_mulzsafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64009;
+}
+
+static void
+Opcode_ae_mulzsap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6400c;
+}
+
+static void
+Opcode_ae_mulzsafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6400a;
+}
+
+static void
+Opcode_ae_mulzsap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6400b;
+}
+
+static void
+Opcode_ae_mulzssfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6400d;
+}
+
+static void
+Opcode_ae_mulzssp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6400f;
+}
+
+static void
+Opcode_ae_mulzssfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6400e;
+}
+
+static void
+Opcode_ae_mulzssp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64081;
+}
+
+static void
+Opcode_ae_mulaafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60000;
+}
+
+static void
+Opcode_ae_mulaap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60002;
+}
+
+static void
+Opcode_ae_mulaafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60001;
+}
+
+static void
+Opcode_ae_mulaap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60004;
+}
+
+static void
+Opcode_ae_mulasfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60083;
+}
+
+static void
+Opcode_ae_mulasp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60086;
+}
+
+static void
+Opcode_ae_mulasfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60085;
+}
+
+static void
+Opcode_ae_mulasp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60087;
+}
+
+static void
+Opcode_ae_mulsafp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60103;
+}
+
+static void
+Opcode_ae_mulsap24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60106;
+}
+
+static void
+Opcode_ae_mulsafp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60105;
+}
+
+static void
+Opcode_ae_mulsap24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x60107;
+}
+
+static void
+Opcode_ae_mulssfp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6018e;
+}
+
+static void
+Opcode_ae_mulssp24s_hh_ll_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64001;
+}
+
+static void
+Opcode_ae_mulssfp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x6018f;
+}
+
+static void
+Opcode_ae_mulssp24s_hl_lh_Slot_ae_slot1_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0x64002;
+}
+
+static void
+Opcode_ae_sha32_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00014;
+}
+
+static void
+Opcode_ae_vldl32t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa00004;
+}
+
+static void
+Opcode_ae_vldl16t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa10004;
+}
+
+static void
+Opcode_ae_vldl16c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe7e014;
+}
+
+static void
+Opcode_ae_vldsht_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xca8004;
+}
+
+static void
+Opcode_ae_lb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xc60004;
+}
+
+static void
+Opcode_ae_lbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00024;
+}
+
+static void
+Opcode_ae_lbk_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa20004;
+}
+
+static void
+Opcode_ae_lbki_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe00004;
+}
+
+static void
+Opcode_ae_db_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf01004;
+}
+
+static void
+Opcode_ae_dbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf02004;
+}
+
+static void
+Opcode_ae_vlel32t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa30004;
+}
+
+static void
+Opcode_ae_vlel16t_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xa40004;
+}
+
+static void
+Opcode_ae_sb_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf11004;
+}
+
+static void
+Opcode_ae_sbi_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xf00004;
+}
+
+static void
+Opcode_ae_vles16c_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe7c014;
+}
+
+static void
+Opcode_ae_sbf_Slot_inst_encode (xtensa_insnbuf slotbuf)
+{
+ slotbuf[0] = 0xe7d014;
+}
+
+static xtensa_opcode_encode_fn Opcode_excw_encode_fns[] = {
+ Opcode_excw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfe_encode_fns[] = {
+ Opcode_rfe_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfde_encode_fns[] = {
+ Opcode_rfde_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_syscall_encode_fns[] = {
+ Opcode_syscall_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call12_encode_fns[] = {
+ Opcode_call12_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call8_encode_fns[] = {
+ Opcode_call8_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_call4_encode_fns[] = {
+ Opcode_call4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx12_encode_fns[] = {
+ Opcode_callx12_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx8_encode_fns[] = {
+ Opcode_callx8_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx4_encode_fns[] = {
+ Opcode_callx4_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_entry_encode_fns[] = {
+ Opcode_entry_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movsp_encode_fns[] = {
+ Opcode_movsp_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rotw_encode_fns[] = {
+ Opcode_rotw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_encode_fns[] = {
+ Opcode_retw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_retw_n_encode_fns[] = {
+ 0, 0, Opcode_retw_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwo_encode_fns[] = {
+ Opcode_rfwo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfwu_encode_fns[] = {
+ Opcode_rfwu_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32e_encode_fns[] = {
+ Opcode_l32e_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32e_encode_fns[] = {
+ Opcode_s32e_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowbase_encode_fns[] = {
+ Opcode_rsr_windowbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowbase_encode_fns[] = {
+ Opcode_wsr_windowbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowbase_encode_fns[] = {
+ Opcode_xsr_windowbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_windowstart_encode_fns[] = {
+ Opcode_rsr_windowstart_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_windowstart_encode_fns[] = {
+ Opcode_wsr_windowstart_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_windowstart_encode_fns[] = {
+ Opcode_xsr_windowstart_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_add_n_encode_fns[] = {
+ 0, Opcode_add_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_n_encode_fns[] = {
+ 0, Opcode_addi_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_n_encode_fns[] = {
+ 0, 0, Opcode_beqz_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_n_encode_fns[] = {
+ 0, 0, Opcode_bnez_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_n_encode_fns[] = {
+ 0, 0, Opcode_ill_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_n_encode_fns[] = {
+ 0, Opcode_l32i_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mov_n_encode_fns[] = {
+ 0, 0, Opcode_mov_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_n_encode_fns[] = {
+ 0, 0, Opcode_movi_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_n_encode_fns[] = {
+ 0, 0, Opcode_nop_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_n_encode_fns[] = {
+ 0, 0, Opcode_ret_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_n_encode_fns[] = {
+ 0, Opcode_s32i_n_Slot_inst16a_encode, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_threadptr_encode_fns[] = {
+ Opcode_rur_threadptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_threadptr_encode_fns[] = {
+ Opcode_wur_threadptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_addi_encode_fns[] = {
+ Opcode_addi_Slot_inst_encode, 0, 0, 0, Opcode_addi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addmi_encode_fns[] = {
+ Opcode_addmi_Slot_inst_encode, 0, 0, 0, Opcode_addmi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_add_encode_fns[] = {
+ Opcode_add_Slot_inst_encode, 0, 0, 0, Opcode_add_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sub_encode_fns[] = {
+ Opcode_sub_Slot_inst_encode, 0, 0, 0, Opcode_sub_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx2_encode_fns[] = {
+ Opcode_addx2_Slot_inst_encode, 0, 0, 0, Opcode_addx2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx4_encode_fns[] = {
+ Opcode_addx4_Slot_inst_encode, 0, 0, 0, Opcode_addx4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_addx8_encode_fns[] = {
+ Opcode_addx8_Slot_inst_encode, 0, 0, 0, Opcode_addx8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx2_encode_fns[] = {
+ Opcode_subx2_Slot_inst_encode, 0, 0, 0, Opcode_subx2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx4_encode_fns[] = {
+ Opcode_subx4_Slot_inst_encode, 0, 0, 0, Opcode_subx4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_subx8_encode_fns[] = {
+ Opcode_subx8_Slot_inst_encode, 0, 0, 0, Opcode_subx8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_and_encode_fns[] = {
+ Opcode_and_Slot_inst_encode, 0, 0, 0, Opcode_and_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_or_encode_fns[] = {
+ Opcode_or_Slot_inst_encode, 0, 0, 0, Opcode_or_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_xor_encode_fns[] = {
+ Opcode_xor_Slot_inst_encode, 0, 0, 0, Opcode_xor_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqi_encode_fns[] = {
+ Opcode_beqi_Slot_inst_encode, 0, 0, 0, Opcode_beqi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnei_encode_fns[] = {
+ Opcode_bnei_Slot_inst_encode, 0, 0, 0, Opcode_bnei_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgei_encode_fns[] = {
+ Opcode_bgei_Slot_inst_encode, 0, 0, 0, Opcode_bgei_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_blti_encode_fns[] = {
+ Opcode_blti_Slot_inst_encode, 0, 0, 0, Opcode_blti_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbci_encode_fns[] = {
+ Opcode_bbci_Slot_inst_encode, 0, 0, 0, Opcode_bbci_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbsi_encode_fns[] = {
+ Opcode_bbsi_Slot_inst_encode, 0, 0, 0, Opcode_bbsi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeui_encode_fns[] = {
+ Opcode_bgeui_Slot_inst_encode, 0, 0, 0, Opcode_bgeui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltui_encode_fns[] = {
+ Opcode_bltui_Slot_inst_encode, 0, 0, 0, Opcode_bltui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beq_encode_fns[] = {
+ Opcode_beq_Slot_inst_encode, 0, 0, 0, Opcode_beq_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bne_encode_fns[] = {
+ Opcode_bne_Slot_inst_encode, 0, 0, 0, Opcode_bne_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bge_encode_fns[] = {
+ Opcode_bge_Slot_inst_encode, 0, 0, 0, Opcode_bge_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_blt_encode_fns[] = {
+ Opcode_blt_Slot_inst_encode, 0, 0, 0, Opcode_blt_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgeu_encode_fns[] = {
+ Opcode_bgeu_Slot_inst_encode, 0, 0, 0, Opcode_bgeu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltu_encode_fns[] = {
+ Opcode_bltu_Slot_inst_encode, 0, 0, 0, Opcode_bltu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bany_encode_fns[] = {
+ Opcode_bany_Slot_inst_encode, 0, 0, 0, Opcode_bany_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnone_encode_fns[] = {
+ Opcode_bnone_Slot_inst_encode, 0, 0, 0, Opcode_bnone_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ball_encode_fns[] = {
+ Opcode_ball_Slot_inst_encode, 0, 0, 0, Opcode_ball_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnall_encode_fns[] = {
+ Opcode_bnall_Slot_inst_encode, 0, 0, 0, Opcode_bnall_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbc_encode_fns[] = {
+ Opcode_bbc_Slot_inst_encode, 0, 0, 0, Opcode_bbc_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bbs_encode_fns[] = {
+ Opcode_bbs_Slot_inst_encode, 0, 0, 0, Opcode_bbs_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_beqz_encode_fns[] = {
+ Opcode_beqz_Slot_inst_encode, 0, 0, 0, Opcode_beqz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bnez_encode_fns[] = {
+ Opcode_bnez_Slot_inst_encode, 0, 0, 0, Opcode_bnez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bgez_encode_fns[] = {
+ Opcode_bgez_Slot_inst_encode, 0, 0, 0, Opcode_bgez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bltz_encode_fns[] = {
+ Opcode_bltz_Slot_inst_encode, 0, 0, 0, Opcode_bltz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_call0_encode_fns[] = {
+ Opcode_call0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_callx0_encode_fns[] = {
+ Opcode_callx0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extui_encode_fns[] = {
+ Opcode_extui_Slot_inst_encode, 0, 0, 0, Opcode_extui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ill_encode_fns[] = {
+ Opcode_ill_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_j_encode_fns[] = {
+ Opcode_j_Slot_inst_encode, 0, 0, 0, Opcode_j_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_jx_encode_fns[] = {
+ Opcode_jx_Slot_inst_encode, 0, 0, 0, Opcode_jx_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l16ui_encode_fns[] = {
+ Opcode_l16ui_Slot_inst_encode, 0, 0, 0, Opcode_l16ui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l16si_encode_fns[] = {
+ Opcode_l16si_Slot_inst_encode, 0, 0, 0, Opcode_l16si_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32i_encode_fns[] = {
+ Opcode_l32i_Slot_inst_encode, 0, 0, 0, Opcode_l32i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32r_encode_fns[] = {
+ Opcode_l32r_Slot_inst_encode, 0, 0, 0, Opcode_l32r_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l8ui_encode_fns[] = {
+ Opcode_l8ui_Slot_inst_encode, 0, 0, 0, Opcode_l8ui_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_loop_encode_fns[] = {
+ Opcode_loop_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopnez_encode_fns[] = {
+ Opcode_loopnez_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_loopgtz_encode_fns[] = {
+ Opcode_loopgtz_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_movi_encode_fns[] = {
+ Opcode_movi_Slot_inst_encode, 0, 0, 0, Opcode_movi_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_moveqz_encode_fns[] = {
+ Opcode_moveqz_Slot_inst_encode, 0, 0, 0, Opcode_moveqz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movnez_encode_fns[] = {
+ Opcode_movnez_Slot_inst_encode, 0, 0, 0, Opcode_movnez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movltz_encode_fns[] = {
+ Opcode_movltz_Slot_inst_encode, 0, 0, 0, Opcode_movltz_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movgez_encode_fns[] = {
+ Opcode_movgez_Slot_inst_encode, 0, 0, 0, Opcode_movgez_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_neg_encode_fns[] = {
+ Opcode_neg_Slot_inst_encode, 0, 0, 0, Opcode_neg_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_abs_encode_fns[] = {
+ Opcode_abs_Slot_inst_encode, 0, 0, 0, Opcode_abs_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nop_encode_fns[] = {
+ Opcode_nop_Slot_inst_encode, 0, 0, Opcode_nop_Slot_ae_slot1_encode, Opcode_nop_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ret_encode_fns[] = {
+ Opcode_ret_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_simcall_encode_fns[] = {
+ Opcode_simcall_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s16i_encode_fns[] = {
+ Opcode_s16i_Slot_inst_encode, 0, 0, 0, Opcode_s16i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s32i_encode_fns[] = {
+ Opcode_s32i_Slot_inst_encode, 0, 0, 0, Opcode_s32i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_s8i_encode_fns[] = {
+ Opcode_s8i_Slot_inst_encode, 0, 0, 0, Opcode_s8i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssr_encode_fns[] = {
+ Opcode_ssr_Slot_inst_encode, 0, 0, 0, Opcode_ssr_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssl_encode_fns[] = {
+ Opcode_ssl_Slot_inst_encode, 0, 0, 0, Opcode_ssl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8l_encode_fns[] = {
+ Opcode_ssa8l_Slot_inst_encode, 0, 0, 0, Opcode_ssa8l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssa8b_encode_fns[] = {
+ Opcode_ssa8b_Slot_inst_encode, 0, 0, 0, Opcode_ssa8b_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ssai_encode_fns[] = {
+ Opcode_ssai_Slot_inst_encode, 0, 0, 0, Opcode_ssai_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sll_encode_fns[] = {
+ Opcode_sll_Slot_inst_encode, 0, 0, 0, Opcode_sll_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_src_encode_fns[] = {
+ Opcode_src_Slot_inst_encode, 0, 0, 0, Opcode_src_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srl_encode_fns[] = {
+ Opcode_srl_Slot_inst_encode, 0, 0, 0, Opcode_srl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_sra_encode_fns[] = {
+ Opcode_sra_Slot_inst_encode, 0, 0, 0, Opcode_sra_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_slli_encode_fns[] = {
+ Opcode_slli_Slot_inst_encode, 0, 0, 0, Opcode_slli_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srai_encode_fns[] = {
+ Opcode_srai_Slot_inst_encode, 0, 0, 0, Opcode_srai_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_srli_encode_fns[] = {
+ Opcode_srli_Slot_inst_encode, 0, 0, 0, Opcode_srli_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_memw_encode_fns[] = {
+ Opcode_memw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_extw_encode_fns[] = {
+ Opcode_extw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_isync_encode_fns[] = {
+ Opcode_isync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsync_encode_fns[] = {
+ Opcode_rsync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_esync_encode_fns[] = {
+ Opcode_esync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dsync_encode_fns[] = {
+ Opcode_dsync_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsil_encode_fns[] = {
+ Opcode_rsil_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lend_encode_fns[] = {
+ Opcode_rsr_lend_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lend_encode_fns[] = {
+ Opcode_wsr_lend_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lend_encode_fns[] = {
+ Opcode_xsr_lend_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lcount_encode_fns[] = {
+ Opcode_rsr_lcount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lcount_encode_fns[] = {
+ Opcode_wsr_lcount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lcount_encode_fns[] = {
+ Opcode_xsr_lcount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_lbeg_encode_fns[] = {
+ Opcode_rsr_lbeg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_lbeg_encode_fns[] = {
+ Opcode_wsr_lbeg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_lbeg_encode_fns[] = {
+ Opcode_xsr_lbeg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_sar_encode_fns[] = {
+ Opcode_rsr_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_sar_encode_fns[] = {
+ Opcode_wsr_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_sar_encode_fns[] = {
+ Opcode_xsr_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_litbase_encode_fns[] = {
+ Opcode_rsr_litbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_litbase_encode_fns[] = {
+ Opcode_wsr_litbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_litbase_encode_fns[] = {
+ Opcode_xsr_litbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid0_encode_fns[] = {
+ Opcode_rsr_configid0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_configid0_encode_fns[] = {
+ Opcode_wsr_configid0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_configid1_encode_fns[] = {
+ Opcode_rsr_configid1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ps_encode_fns[] = {
+ Opcode_rsr_ps_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ps_encode_fns[] = {
+ Opcode_wsr_ps_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ps_encode_fns[] = {
+ Opcode_xsr_ps_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc1_encode_fns[] = {
+ Opcode_rsr_epc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc1_encode_fns[] = {
+ Opcode_wsr_epc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc1_encode_fns[] = {
+ Opcode_xsr_epc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave1_encode_fns[] = {
+ Opcode_rsr_excsave1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave1_encode_fns[] = {
+ Opcode_wsr_excsave1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave1_encode_fns[] = {
+ Opcode_xsr_excsave1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_epc2_encode_fns[] = {
+ Opcode_rsr_epc2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_epc2_encode_fns[] = {
+ Opcode_wsr_epc2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_epc2_encode_fns[] = {
+ Opcode_xsr_epc2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excsave2_encode_fns[] = {
+ Opcode_rsr_excsave2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excsave2_encode_fns[] = {
+ Opcode_wsr_excsave2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excsave2_encode_fns[] = {
+ Opcode_xsr_excsave2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_eps2_encode_fns[] = {
+ Opcode_rsr_eps2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_eps2_encode_fns[] = {
+ Opcode_wsr_eps2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_eps2_encode_fns[] = {
+ Opcode_xsr_eps2_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_excvaddr_encode_fns[] = {
+ Opcode_rsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_excvaddr_encode_fns[] = {
+ Opcode_wsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_excvaddr_encode_fns[] = {
+ Opcode_xsr_excvaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_depc_encode_fns[] = {
+ Opcode_rsr_depc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_depc_encode_fns[] = {
+ Opcode_wsr_depc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_depc_encode_fns[] = {
+ Opcode_xsr_depc_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_exccause_encode_fns[] = {
+ Opcode_rsr_exccause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_exccause_encode_fns[] = {
+ Opcode_wsr_exccause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_exccause_encode_fns[] = {
+ Opcode_xsr_exccause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc0_encode_fns[] = {
+ Opcode_rsr_misc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc0_encode_fns[] = {
+ Opcode_wsr_misc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc0_encode_fns[] = {
+ Opcode_xsr_misc0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_misc1_encode_fns[] = {
+ Opcode_rsr_misc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_misc1_encode_fns[] = {
+ Opcode_wsr_misc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_misc1_encode_fns[] = {
+ Opcode_xsr_misc1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_prid_encode_fns[] = {
+ Opcode_rsr_prid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_vecbase_encode_fns[] = {
+ Opcode_rsr_vecbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_vecbase_encode_fns[] = {
+ Opcode_wsr_vecbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_vecbase_encode_fns[] = {
+ Opcode_xsr_vecbase_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16u_encode_fns[] = {
+ Opcode_mul16u_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mul16s_encode_fns[] = {
+ Opcode_mul16s_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_mull_encode_fns[] = {
+ Opcode_mull_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfi_encode_fns[] = {
+ Opcode_rfi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_waiti_encode_fns[] = {
+ Opcode_waiti_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_interrupt_encode_fns[] = {
+ Opcode_rsr_interrupt_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intset_encode_fns[] = {
+ Opcode_wsr_intset_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intclear_encode_fns[] = {
+ Opcode_wsr_intclear_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_intenable_encode_fns[] = {
+ Opcode_rsr_intenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_intenable_encode_fns[] = {
+ Opcode_wsr_intenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_intenable_encode_fns[] = {
+ Opcode_xsr_intenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_encode_fns[] = {
+ Opcode_break_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_break_n_encode_fns[] = {
+ 0, 0, Opcode_break_n_Slot_inst16b_encode, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_debugcause_encode_fns[] = {
+ Opcode_rsr_debugcause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_debugcause_encode_fns[] = {
+ Opcode_wsr_debugcause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_debugcause_encode_fns[] = {
+ Opcode_xsr_debugcause_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icount_encode_fns[] = {
+ Opcode_rsr_icount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icount_encode_fns[] = {
+ Opcode_wsr_icount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icount_encode_fns[] = {
+ Opcode_xsr_icount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_icountlevel_encode_fns[] = {
+ Opcode_rsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_icountlevel_encode_fns[] = {
+ Opcode_wsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_icountlevel_encode_fns[] = {
+ Opcode_xsr_icountlevel_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ddr_encode_fns[] = {
+ Opcode_rsr_ddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ddr_encode_fns[] = {
+ Opcode_wsr_ddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ddr_encode_fns[] = {
+ Opcode_xsr_ddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdo_encode_fns[] = {
+ Opcode_rfdo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rfdd_encode_fns[] = {
+ Opcode_rfdd_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_andb_encode_fns[] = {
+ Opcode_andb_Slot_inst_encode, 0, 0, 0, Opcode_andb_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_andbc_encode_fns[] = {
+ Opcode_andbc_Slot_inst_encode, 0, 0, 0, Opcode_andbc_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_orb_encode_fns[] = {
+ Opcode_orb_Slot_inst_encode, 0, 0, 0, Opcode_orb_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_orbc_encode_fns[] = {
+ Opcode_orbc_Slot_inst_encode, 0, 0, 0, Opcode_orbc_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_xorb_encode_fns[] = {
+ Opcode_xorb_Slot_inst_encode, 0, 0, 0, Opcode_xorb_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_any4_encode_fns[] = {
+ Opcode_any4_Slot_inst_encode, 0, 0, 0, Opcode_any4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_all4_encode_fns[] = {
+ Opcode_all4_Slot_inst_encode, 0, 0, 0, Opcode_all4_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_any8_encode_fns[] = {
+ Opcode_any8_Slot_inst_encode, 0, 0, 0, Opcode_any8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_all8_encode_fns[] = {
+ Opcode_all8_Slot_inst_encode, 0, 0, 0, Opcode_all8_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bf_encode_fns[] = {
+ Opcode_bf_Slot_inst_encode, 0, 0, 0, Opcode_bf_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_bt_encode_fns[] = {
+ Opcode_bt_Slot_inst_encode, 0, 0, 0, Opcode_bt_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movf_encode_fns[] = {
+ Opcode_movf_Slot_inst_encode, 0, 0, 0, Opcode_movf_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_movt_encode_fns[] = {
+ Opcode_movt_Slot_inst_encode, 0, 0, 0, Opcode_movt_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_br_encode_fns[] = {
+ Opcode_rsr_br_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_br_encode_fns[] = {
+ Opcode_wsr_br_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_br_encode_fns[] = {
+ Opcode_xsr_br_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccount_encode_fns[] = {
+ Opcode_rsr_ccount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccount_encode_fns[] = {
+ Opcode_wsr_ccount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccount_encode_fns[] = {
+ Opcode_xsr_ccount_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare0_encode_fns[] = {
+ Opcode_rsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare0_encode_fns[] = {
+ Opcode_wsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare0_encode_fns[] = {
+ Opcode_xsr_ccompare0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ccompare1_encode_fns[] = {
+ Opcode_rsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ccompare1_encode_fns[] = {
+ Opcode_wsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ccompare1_encode_fns[] = {
+ Opcode_xsr_ccompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ipf_encode_fns[] = {
+ Opcode_ipf_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ihi_encode_fns[] = {
+ Opcode_ihi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iii_encode_fns[] = {
+ Opcode_iii_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_lict_encode_fns[] = {
+ Opcode_lict_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_licw_encode_fns[] = {
+ Opcode_licw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sict_encode_fns[] = {
+ Opcode_sict_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sicw_encode_fns[] = {
+ Opcode_sicw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwb_encode_fns[] = {
+ Opcode_dhwb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhwbi_encode_fns[] = {
+ Opcode_dhwbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwb_encode_fns[] = {
+ Opcode_diwb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_diwbi_encode_fns[] = {
+ Opcode_diwbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dhi_encode_fns[] = {
+ Opcode_dhi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dii_encode_fns[] = {
+ Opcode_dii_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfr_encode_fns[] = {
+ Opcode_dpfr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfw_encode_fns[] = {
+ Opcode_dpfw_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfro_encode_fns[] = {
+ Opcode_dpfro_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_dpfwo_encode_fns[] = {
+ Opcode_dpfwo_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sdct_encode_fns[] = {
+ Opcode_sdct_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldct_encode_fns[] = {
+ Opcode_ldct_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_ptevaddr_encode_fns[] = {
+ Opcode_wsr_ptevaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_ptevaddr_encode_fns[] = {
+ Opcode_rsr_ptevaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_ptevaddr_encode_fns[] = {
+ Opcode_xsr_ptevaddr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_rasid_encode_fns[] = {
+ Opcode_rsr_rasid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_rasid_encode_fns[] = {
+ Opcode_wsr_rasid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_rasid_encode_fns[] = {
+ Opcode_xsr_rasid_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_itlbcfg_encode_fns[] = {
+ Opcode_rsr_itlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_itlbcfg_encode_fns[] = {
+ Opcode_wsr_itlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_itlbcfg_encode_fns[] = {
+ Opcode_xsr_itlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_dtlbcfg_encode_fns[] = {
+ Opcode_rsr_dtlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_dtlbcfg_encode_fns[] = {
+ Opcode_wsr_dtlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_dtlbcfg_encode_fns[] = {
+ Opcode_xsr_dtlbcfg_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_idtlb_encode_fns[] = {
+ Opcode_idtlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pdtlb_encode_fns[] = {
+ Opcode_pdtlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb0_encode_fns[] = {
+ Opcode_rdtlb0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rdtlb1_encode_fns[] = {
+ Opcode_rdtlb1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wdtlb_encode_fns[] = {
+ Opcode_wdtlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_iitlb_encode_fns[] = {
+ Opcode_iitlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_pitlb_encode_fns[] = {
+ Opcode_pitlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb0_encode_fns[] = {
+ Opcode_ritlb0_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ritlb1_encode_fns[] = {
+ Opcode_ritlb1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_witlb_encode_fns[] = {
+ Opcode_witlb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ldpte_encode_fns[] = {
+ Opcode_ldpte_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwitlba_encode_fns[] = {
+ Opcode_hwwitlba_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_hwwdtlba_encode_fns[] = {
+ Opcode_hwwdtlba_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_cpenable_encode_fns[] = {
+ Opcode_rsr_cpenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_cpenable_encode_fns[] = {
+ Opcode_wsr_cpenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_cpenable_encode_fns[] = {
+ Opcode_xsr_cpenable_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_clamps_encode_fns[] = {
+ Opcode_clamps_Slot_inst_encode, 0, 0, 0, Opcode_clamps_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_min_encode_fns[] = {
+ Opcode_min_Slot_inst_encode, 0, 0, 0, Opcode_min_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_max_encode_fns[] = {
+ Opcode_max_Slot_inst_encode, 0, 0, 0, Opcode_max_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_minu_encode_fns[] = {
+ Opcode_minu_Slot_inst_encode, 0, 0, 0, Opcode_minu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_maxu_encode_fns[] = {
+ Opcode_maxu_Slot_inst_encode, 0, 0, 0, Opcode_maxu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_nsa_encode_fns[] = {
+ Opcode_nsa_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_nsau_encode_fns[] = {
+ Opcode_nsau_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_sext_encode_fns[] = {
+ Opcode_sext_Slot_inst_encode, 0, 0, 0, Opcode_sext_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_l32ai_encode_fns[] = {
+ Opcode_l32ai_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32ri_encode_fns[] = {
+ Opcode_s32ri_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_s32c1i_encode_fns[] = {
+ Opcode_s32c1i_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_scompare1_encode_fns[] = {
+ Opcode_rsr_scompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_scompare1_encode_fns[] = {
+ Opcode_wsr_scompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_scompare1_encode_fns[] = {
+ Opcode_xsr_scompare1_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rsr_atomctl_encode_fns[] = {
+ Opcode_rsr_atomctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wsr_atomctl_encode_fns[] = {
+ Opcode_wsr_atomctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_xsr_atomctl_encode_fns[] = {
+ Opcode_xsr_atomctl_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rer_encode_fns[] = {
+ Opcode_rer_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wer_encode_fns[] = {
+ Opcode_wer_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_ovf_sar_encode_fns[] = {
+ Opcode_rur_ae_ovf_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_ovf_sar_encode_fns[] = {
+ Opcode_wur_ae_ovf_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_bithead_encode_fns[] = {
+ Opcode_rur_ae_bithead_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_bithead_encode_fns[] = {
+ Opcode_wur_ae_bithead_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_ts_fts_bu_bp_encode_fns[] = {
+ Opcode_rur_ae_ts_fts_bu_bp_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_ts_fts_bu_bp_encode_fns[] = {
+ Opcode_wur_ae_ts_fts_bu_bp_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_sd_no_encode_fns[] = {
+ Opcode_rur_ae_sd_no_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_sd_no_encode_fns[] = {
+ Opcode_wur_ae_sd_no_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_overflow_encode_fns[] = {
+ Opcode_rur_ae_overflow_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_overflow_encode_fns[] = {
+ Opcode_wur_ae_overflow_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_sar_encode_fns[] = {
+ Opcode_rur_ae_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_sar_encode_fns[] = {
+ Opcode_wur_ae_sar_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_bitptr_encode_fns[] = {
+ Opcode_rur_ae_bitptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_bitptr_encode_fns[] = {
+ Opcode_wur_ae_bitptr_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_bitsused_encode_fns[] = {
+ Opcode_rur_ae_bitsused_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_bitsused_encode_fns[] = {
+ Opcode_wur_ae_bitsused_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_tablesize_encode_fns[] = {
+ Opcode_rur_ae_tablesize_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_tablesize_encode_fns[] = {
+ Opcode_wur_ae_tablesize_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_first_ts_encode_fns[] = {
+ Opcode_rur_ae_first_ts_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_first_ts_encode_fns[] = {
+ Opcode_wur_ae_first_ts_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_nextoffset_encode_fns[] = {
+ Opcode_rur_ae_nextoffset_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_nextoffset_encode_fns[] = {
+ Opcode_wur_ae_nextoffset_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_rur_ae_searchdone_encode_fns[] = {
+ Opcode_rur_ae_searchdone_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_wur_ae_searchdone_encode_fns[] = {
+ Opcode_wur_ae_searchdone_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_i_encode_fns[] = {
+ Opcode_ae_lp16f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_iu_encode_fns[] = {
+ Opcode_ae_lp16f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_x_encode_fns[] = {
+ Opcode_ae_lp16f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16f_xu_encode_fns[] = {
+ Opcode_ae_lp16f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_i_encode_fns[] = {
+ Opcode_ae_lp24_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_iu_encode_fns[] = {
+ Opcode_ae_lp24_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_x_encode_fns[] = {
+ Opcode_ae_lp24_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24_xu_encode_fns[] = {
+ Opcode_ae_lp24_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_i_encode_fns[] = {
+ Opcode_ae_lp24f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_iu_encode_fns[] = {
+ Opcode_ae_lp24f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_x_encode_fns[] = {
+ Opcode_ae_lp24f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24f_xu_encode_fns[] = {
+ Opcode_ae_lp24f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_i_encode_fns[] = {
+ Opcode_ae_lp16x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_iu_encode_fns[] = {
+ Opcode_ae_lp16x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_x_encode_fns[] = {
+ Opcode_ae_lp16x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp16x2f_xu_encode_fns[] = {
+ Opcode_ae_lp16x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp16x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_i_encode_fns[] = {
+ Opcode_ae_lp24x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_iu_encode_fns[] = {
+ Opcode_ae_lp24x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_x_encode_fns[] = {
+ Opcode_ae_lp24x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2f_xu_encode_fns[] = {
+ Opcode_ae_lp24x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_i_encode_fns[] = {
+ Opcode_ae_lp24x2_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_iu_encode_fns[] = {
+ Opcode_ae_lp24x2_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_x_encode_fns[] = {
+ Opcode_ae_lp24x2_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lp24x2_xu_encode_fns[] = {
+ Opcode_ae_lp24x2_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lp24x2_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_i_encode_fns[] = {
+ Opcode_ae_sp16x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_iu_encode_fns[] = {
+ Opcode_ae_sp16x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_x_encode_fns[] = {
+ Opcode_ae_sp16x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16x2f_xu_encode_fns[] = {
+ Opcode_ae_sp16x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_i_encode_fns[] = {
+ Opcode_ae_sp24x2s_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_iu_encode_fns[] = {
+ Opcode_ae_sp24x2s_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_x_encode_fns[] = {
+ Opcode_ae_sp24x2s_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2s_xu_encode_fns[] = {
+ Opcode_ae_sp24x2s_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2s_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_i_encode_fns[] = {
+ Opcode_ae_sp24x2f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_iu_encode_fns[] = {
+ Opcode_ae_sp24x2f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_x_encode_fns[] = {
+ Opcode_ae_sp24x2f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24x2f_xu_encode_fns[] = {
+ Opcode_ae_sp24x2f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24x2f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_i_encode_fns[] = {
+ Opcode_ae_sp16f_l_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_iu_encode_fns[] = {
+ Opcode_ae_sp16f_l_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_x_encode_fns[] = {
+ Opcode_ae_sp16f_l_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp16f_l_xu_encode_fns[] = {
+ Opcode_ae_sp16f_l_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp16f_l_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_i_encode_fns[] = {
+ Opcode_ae_sp24s_l_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_iu_encode_fns[] = {
+ Opcode_ae_sp24s_l_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_x_encode_fns[] = {
+ Opcode_ae_sp24s_l_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24s_l_xu_encode_fns[] = {
+ Opcode_ae_sp24s_l_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24s_l_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_i_encode_fns[] = {
+ Opcode_ae_sp24f_l_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_iu_encode_fns[] = {
+ Opcode_ae_sp24f_l_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_x_encode_fns[] = {
+ Opcode_ae_sp24f_l_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sp24f_l_xu_encode_fns[] = {
+ Opcode_ae_sp24f_l_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sp24f_l_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_i_encode_fns[] = {
+ Opcode_ae_lq56_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_iu_encode_fns[] = {
+ Opcode_ae_lq56_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_x_encode_fns[] = {
+ Opcode_ae_lq56_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq56_xu_encode_fns[] = {
+ Opcode_ae_lq56_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq56_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_i_encode_fns[] = {
+ Opcode_ae_lq32f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_iu_encode_fns[] = {
+ Opcode_ae_lq32f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_x_encode_fns[] = {
+ Opcode_ae_lq32f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lq32f_xu_encode_fns[] = {
+ Opcode_ae_lq32f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_lq32f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_i_encode_fns[] = {
+ Opcode_ae_sq56s_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_iu_encode_fns[] = {
+ Opcode_ae_sq56s_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_x_encode_fns[] = {
+ Opcode_ae_sq56s_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq56s_xu_encode_fns[] = {
+ Opcode_ae_sq56s_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq56s_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_i_encode_fns[] = {
+ Opcode_ae_sq32f_i_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_i_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_iu_encode_fns[] = {
+ Opcode_ae_sq32f_iu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_iu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_x_encode_fns[] = {
+ Opcode_ae_sq32f_x_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_x_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sq32f_xu_encode_fns[] = {
+ Opcode_ae_sq32f_xu_Slot_inst_encode, 0, 0, 0, Opcode_ae_sq32f_xu_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_zerop48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_zerop48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movp48_encode_fns[] = {
+ Opcode_ae_movp48_Slot_inst_encode, 0, 0, Opcode_ae_movp48_Slot_ae_slot1_encode, Opcode_ae_movp48_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_selp24_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_selp24_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movtp24x2_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movtp24x2_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movfp24x2_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movfp24x2_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movtp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movtp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movfp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_movfp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movpa24x2_encode_fns[] = {
+ Opcode_ae_movpa24x2_Slot_inst_encode, 0, 0, 0, Opcode_ae_movpa24x2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncp24a32x2_encode_fns[] = {
+ Opcode_ae_truncp24a32x2_Slot_inst_encode, 0, 0, 0, Opcode_ae_truncp24a32x2_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvta32p24_l_encode_fns[] = {
+ Opcode_ae_cvta32p24_l_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvta32p24_l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvta32p24_h_encode_fns[] = {
+ Opcode_ae_cvta32p24_h_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvta32p24_h_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_ll_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_ll_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_ll_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_lh_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_lh_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_lh_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_hl_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_hl_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_hl_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtp24a16x2_hh_encode_fns[] = {
+ Opcode_ae_cvtp24a16x2_hh_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtp24a16x2_hh_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncp24q48x2_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_truncp24q48x2_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncp16_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_truncp16_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp24q48sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp24q48sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp24q48asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp24q48asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16q48sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16q48sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16q48asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16q48asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsp16asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsp16asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_zeroq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_zeroq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movq56_encode_fns[] = {
+ Opcode_ae_movq56_Slot_inst_encode, 0, 0, Opcode_ae_movq56_Slot_ae_slot1_encode, Opcode_ae_movq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movtq56_encode_fns[] = {
+ Opcode_ae_movtq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_movtq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movfq56_encode_fns[] = {
+ Opcode_ae_movfq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_movfq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtq48a32s_encode_fns[] = {
+ Opcode_ae_cvtq48a32s_Slot_inst_encode, 0, 0, 0, Opcode_ae_cvtq48a32s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtq48p24s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_cvtq48p24s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_cvtq48p24s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_cvtq48p24s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_satq48s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_satq48s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_truncq32_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_truncq32_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsq32sym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsq32sym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_roundsq32asym_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_roundsq32asym_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_trunca32q48_encode_fns[] = {
+ Opcode_ae_trunca32q48_Slot_inst_encode, 0, 0, 0, Opcode_ae_trunca32q48_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movap24s_l_encode_fns[] = {
+ Opcode_ae_movap24s_l_Slot_inst_encode, 0, 0, 0, Opcode_ae_movap24s_l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_movap24s_h_encode_fns[] = {
+ Opcode_ae_movap24s_h_Slot_inst_encode, 0, 0, 0, Opcode_ae_movap24s_h_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_trunca16p24s_l_encode_fns[] = {
+ Opcode_ae_trunca16p24s_l_Slot_inst_encode, 0, 0, 0, Opcode_ae_trunca16p24s_l_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_trunca16p24s_h_encode_fns[] = {
+ Opcode_ae_trunca16p24s_h_Slot_inst_encode, 0, 0, 0, Opcode_ae_trunca16p24s_h_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_absp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_absp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxbp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxbp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minbp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minbp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addsp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addsp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subsp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subsp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negsp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negsp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_abssp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_abssp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_andp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_andp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_nandp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_nandp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_orp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_orp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_xorp48_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_xorp48_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_ltp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_ltp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lep24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_lep24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_eqp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_eqp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_absq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_absq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_maxbq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_maxbq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_minbq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_minbq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_addsq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_addsq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_subsq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_subsq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_negsq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_negsq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_abssq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_abssq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_andq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_andq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_nandq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_nandq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_orq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_orq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_xorq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_xorq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllip24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllip24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlip24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_srlip24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sraip24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sraip24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllsp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllsp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlsp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_srlsp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srasp24_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_srasp24_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllisp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllisp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllssp24s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_sllssp24s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_slliq56_encode_fns[] = {
+ Opcode_ae_slliq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_slliq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srliq56_encode_fns[] = {
+ Opcode_ae_srliq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srliq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sraiq56_encode_fns[] = {
+ Opcode_ae_sraiq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sraiq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllsq56_encode_fns[] = {
+ Opcode_ae_sllsq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllsq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlsq56_encode_fns[] = {
+ Opcode_ae_srlsq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srlsq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srasq56_encode_fns[] = {
+ Opcode_ae_srasq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srasq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllaq56_encode_fns[] = {
+ Opcode_ae_sllaq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllaq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_srlaq56_encode_fns[] = {
+ Opcode_ae_srlaq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_srlaq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sraaq56_encode_fns[] = {
+ Opcode_ae_sraaq56_Slot_inst_encode, 0, 0, 0, Opcode_ae_sraaq56_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllisq56s_encode_fns[] = {
+ Opcode_ae_sllisq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllisq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllssq56s_encode_fns[] = {
+ Opcode_ae_sllssq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllssq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sllasq56s_encode_fns[] = {
+ Opcode_ae_sllasq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_sllasq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_ltq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_ltq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_leq56s_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_leq56s_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_eqq56_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_eqq56_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_nsaq56s_encode_fns[] = {
+ Opcode_ae_nsaq56s_Slot_inst_encode, 0, 0, 0, Opcode_ae_nsaq56s_Slot_ae_slot0_encode
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfs32p16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfs32p16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs32p16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs32p16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulap24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulap24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs32p16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs32p16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsp24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsp24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafs56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafs56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulas56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulas56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_hl_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_hl_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfs56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfs56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulss56p24s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulss56p24s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulfq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulfq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulafq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulafq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsfq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsfq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16s_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16s_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16s_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16s_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16u_l_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16u_l_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsq32sp16u_h_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsq32sp16u_h_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaaq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaaq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsaq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsaq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16s_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16s_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16u_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16u_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16s_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16s_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16u_hh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16u_hh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16s_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16s_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfq32sp16u_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfq32sp16u_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzaap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzaap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzasp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzasp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzsap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzsap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulzssp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulzssp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulaap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulaap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulasp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulasp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsafp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsafp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsap24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsap24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsafp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsafp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulsap24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulsap24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssfp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssfp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssp24s_hh_ll_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssp24s_hh_ll_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssfp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssfp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_mulssp24s_hl_lh_encode_fns[] = {
+ 0, 0, 0, Opcode_ae_mulssp24s_hl_lh_Slot_ae_slot1_encode, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sha32_encode_fns[] = {
+ Opcode_ae_sha32_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldl32t_encode_fns[] = {
+ Opcode_ae_vldl32t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldl16t_encode_fns[] = {
+ Opcode_ae_vldl16t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldl16c_encode_fns[] = {
+ Opcode_ae_vldl16c_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vldsht_encode_fns[] = {
+ Opcode_ae_vldsht_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lb_encode_fns[] = {
+ Opcode_ae_lb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lbi_encode_fns[] = {
+ Opcode_ae_lbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lbk_encode_fns[] = {
+ Opcode_ae_lbk_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_lbki_encode_fns[] = {
+ Opcode_ae_lbki_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_db_encode_fns[] = {
+ Opcode_ae_db_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_dbi_encode_fns[] = {
+ Opcode_ae_dbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vlel32t_encode_fns[] = {
+ Opcode_ae_vlel32t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vlel16t_encode_fns[] = {
+ Opcode_ae_vlel16t_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sb_encode_fns[] = {
+ Opcode_ae_sb_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sbi_encode_fns[] = {
+ Opcode_ae_sbi_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_vles16c_encode_fns[] = {
+ Opcode_ae_vles16c_Slot_inst_encode, 0, 0, 0, 0
+};
+
+static xtensa_opcode_encode_fn Opcode_ae_sbf_encode_fns[] = {
+ Opcode_ae_sbf_Slot_inst_encode, 0, 0, 0, 0
+};
+
+
+/* Opcode table. */
+
+static xtensa_funcUnit_use Opcode_ae_vldl32t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vldl16t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vldl16c_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_shift32x5, 3 },
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vldsht_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_shift32x5, 3 },
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lb_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lbi_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lbk_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_lbki_funcUnit_uses[] = {
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_db_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_dbi_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vlel32t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vlel16t_funcUnit_uses[] = {
+ { FUNCUNIT_ae_add32, 3 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_sb_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_sbi_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_vles16c_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_funcUnit_use Opcode_ae_sbf_funcUnit_uses[] = {
+ { FUNCUNIT_ae_shift32x4, 2 },
+ { FUNCUNIT_ae_subshift, 2 }
+};
+
+static xtensa_opcode_internal opcodes[] = {
+ { "excw", ICLASS_xt_iclass_excw,
+ 0,
+ Opcode_excw_encode_fns, 0, 0 },
+ { "rfe", ICLASS_xt_iclass_rfe,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfe_encode_fns, 0, 0 },
+ { "rfde", ICLASS_xt_iclass_rfde,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfde_encode_fns, 0, 0 },
+ { "syscall", ICLASS_xt_iclass_syscall,
+ 0,
+ Opcode_syscall_encode_fns, 0, 0 },
+ { "call12", ICLASS_xt_iclass_call12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call12_encode_fns, 0, 0 },
+ { "call8", ICLASS_xt_iclass_call8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call8_encode_fns, 0, 0 },
+ { "call4", ICLASS_xt_iclass_call4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call4_encode_fns, 0, 0 },
+ { "callx12", ICLASS_xt_iclass_callx12,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx12_encode_fns, 0, 0 },
+ { "callx8", ICLASS_xt_iclass_callx8,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx8_encode_fns, 0, 0 },
+ { "callx4", ICLASS_xt_iclass_callx4,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx4_encode_fns, 0, 0 },
+ { "entry", ICLASS_xt_iclass_entry,
+ 0,
+ Opcode_entry_encode_fns, 0, 0 },
+ { "movsp", ICLASS_xt_iclass_movsp,
+ 0,
+ Opcode_movsp_encode_fns, 0, 0 },
+ { "rotw", ICLASS_xt_iclass_rotw,
+ 0,
+ Opcode_rotw_encode_fns, 0, 0 },
+ { "retw", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_encode_fns, 0, 0 },
+ { "retw.n", ICLASS_xt_iclass_retw,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_retw_n_encode_fns, 0, 0 },
+ { "rfwo", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwo_encode_fns, 0, 0 },
+ { "rfwu", ICLASS_xt_iclass_rfwou,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfwu_encode_fns, 0, 0 },
+ { "l32e", ICLASS_xt_iclass_l32e,
+ 0,
+ Opcode_l32e_encode_fns, 0, 0 },
+ { "s32e", ICLASS_xt_iclass_s32e,
+ 0,
+ Opcode_s32e_encode_fns, 0, 0 },
+ { "rsr.windowbase", ICLASS_xt_iclass_rsr_windowbase,
+ 0,
+ Opcode_rsr_windowbase_encode_fns, 0, 0 },
+ { "wsr.windowbase", ICLASS_xt_iclass_wsr_windowbase,
+ 0,
+ Opcode_wsr_windowbase_encode_fns, 0, 0 },
+ { "xsr.windowbase", ICLASS_xt_iclass_xsr_windowbase,
+ 0,
+ Opcode_xsr_windowbase_encode_fns, 0, 0 },
+ { "rsr.windowstart", ICLASS_xt_iclass_rsr_windowstart,
+ 0,
+ Opcode_rsr_windowstart_encode_fns, 0, 0 },
+ { "wsr.windowstart", ICLASS_xt_iclass_wsr_windowstart,
+ 0,
+ Opcode_wsr_windowstart_encode_fns, 0, 0 },
+ { "xsr.windowstart", ICLASS_xt_iclass_xsr_windowstart,
+ 0,
+ Opcode_xsr_windowstart_encode_fns, 0, 0 },
+ { "add.n", ICLASS_xt_iclass_add_n,
+ 0,
+ Opcode_add_n_encode_fns, 0, 0 },
+ { "addi.n", ICLASS_xt_iclass_addi_n,
+ 0,
+ Opcode_addi_n_encode_fns, 0, 0 },
+ { "beqz.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_n_encode_fns, 0, 0 },
+ { "bnez.n", ICLASS_xt_iclass_bz6,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_n_encode_fns, 0, 0 },
+ { "ill.n", ICLASS_xt_iclass_ill_n,
+ 0,
+ Opcode_ill_n_encode_fns, 0, 0 },
+ { "l32i.n", ICLASS_xt_iclass_loadi4,
+ 0,
+ Opcode_l32i_n_encode_fns, 0, 0 },
+ { "mov.n", ICLASS_xt_iclass_mov_n,
+ 0,
+ Opcode_mov_n_encode_fns, 0, 0 },
+ { "movi.n", ICLASS_xt_iclass_movi_n,
+ 0,
+ Opcode_movi_n_encode_fns, 0, 0 },
+ { "nop.n", ICLASS_xt_iclass_nopn,
+ 0,
+ Opcode_nop_n_encode_fns, 0, 0 },
+ { "ret.n", ICLASS_xt_iclass_retn,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_n_encode_fns, 0, 0 },
+ { "s32i.n", ICLASS_xt_iclass_storei4,
+ 0,
+ Opcode_s32i_n_encode_fns, 0, 0 },
+ { "rur.threadptr", ICLASS_rur_threadptr,
+ 0,
+ Opcode_rur_threadptr_encode_fns, 0, 0 },
+ { "wur.threadptr", ICLASS_wur_threadptr,
+ 0,
+ Opcode_wur_threadptr_encode_fns, 0, 0 },
+ { "addi", ICLASS_xt_iclass_addi,
+ 0,
+ Opcode_addi_encode_fns, 0, 0 },
+ { "addmi", ICLASS_xt_iclass_addmi,
+ 0,
+ Opcode_addmi_encode_fns, 0, 0 },
+ { "add", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_add_encode_fns, 0, 0 },
+ { "sub", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_sub_encode_fns, 0, 0 },
+ { "addx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx2_encode_fns, 0, 0 },
+ { "addx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx4_encode_fns, 0, 0 },
+ { "addx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_addx8_encode_fns, 0, 0 },
+ { "subx2", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx2_encode_fns, 0, 0 },
+ { "subx4", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx4_encode_fns, 0, 0 },
+ { "subx8", ICLASS_xt_iclass_addsub,
+ 0,
+ Opcode_subx8_encode_fns, 0, 0 },
+ { "and", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_and_encode_fns, 0, 0 },
+ { "or", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_or_encode_fns, 0, 0 },
+ { "xor", ICLASS_xt_iclass_bit,
+ 0,
+ Opcode_xor_encode_fns, 0, 0 },
+ { "beqi", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqi_encode_fns, 0, 0 },
+ { "bnei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnei_encode_fns, 0, 0 },
+ { "bgei", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgei_encode_fns, 0, 0 },
+ { "blti", ICLASS_xt_iclass_bsi8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blti_encode_fns, 0, 0 },
+ { "bbci", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbci_encode_fns, 0, 0 },
+ { "bbsi", ICLASS_xt_iclass_bsi8b,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbsi_encode_fns, 0, 0 },
+ { "bgeui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeui_encode_fns, 0, 0 },
+ { "bltui", ICLASS_xt_iclass_bsi8u,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltui_encode_fns, 0, 0 },
+ { "beq", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beq_encode_fns, 0, 0 },
+ { "bne", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bne_encode_fns, 0, 0 },
+ { "bge", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bge_encode_fns, 0, 0 },
+ { "blt", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_blt_encode_fns, 0, 0 },
+ { "bgeu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgeu_encode_fns, 0, 0 },
+ { "bltu", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltu_encode_fns, 0, 0 },
+ { "bany", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bany_encode_fns, 0, 0 },
+ { "bnone", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnone_encode_fns, 0, 0 },
+ { "ball", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_ball_encode_fns, 0, 0 },
+ { "bnall", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnall_encode_fns, 0, 0 },
+ { "bbc", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbc_encode_fns, 0, 0 },
+ { "bbs", ICLASS_xt_iclass_bst8,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bbs_encode_fns, 0, 0 },
+ { "beqz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_beqz_encode_fns, 0, 0 },
+ { "bnez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bnez_encode_fns, 0, 0 },
+ { "bgez", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bgez_encode_fns, 0, 0 },
+ { "bltz", ICLASS_xt_iclass_bsz12,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bltz_encode_fns, 0, 0 },
+ { "call0", ICLASS_xt_iclass_call0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_call0_encode_fns, 0, 0 },
+ { "callx0", ICLASS_xt_iclass_callx0,
+ XTENSA_OPCODE_IS_CALL,
+ Opcode_callx0_encode_fns, 0, 0 },
+ { "extui", ICLASS_xt_iclass_exti,
+ 0,
+ Opcode_extui_encode_fns, 0, 0 },
+ { "ill", ICLASS_xt_iclass_ill,
+ 0,
+ Opcode_ill_encode_fns, 0, 0 },
+ { "j", ICLASS_xt_iclass_jump,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_j_encode_fns, 0, 0 },
+ { "jx", ICLASS_xt_iclass_jumpx,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_jx_encode_fns, 0, 0 },
+ { "l16ui", ICLASS_xt_iclass_l16ui,
+ 0,
+ Opcode_l16ui_encode_fns, 0, 0 },
+ { "l16si", ICLASS_xt_iclass_l16si,
+ 0,
+ Opcode_l16si_encode_fns, 0, 0 },
+ { "l32i", ICLASS_xt_iclass_l32i,
+ 0,
+ Opcode_l32i_encode_fns, 0, 0 },
+ { "l32r", ICLASS_xt_iclass_l32r,
+ 0,
+ Opcode_l32r_encode_fns, 0, 0 },
+ { "l8ui", ICLASS_xt_iclass_l8i,
+ 0,
+ Opcode_l8ui_encode_fns, 0, 0 },
+ { "loop", ICLASS_xt_iclass_loop,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loop_encode_fns, 0, 0 },
+ { "loopnez", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopnez_encode_fns, 0, 0 },
+ { "loopgtz", ICLASS_xt_iclass_loopz,
+ XTENSA_OPCODE_IS_LOOP,
+ Opcode_loopgtz_encode_fns, 0, 0 },
+ { "movi", ICLASS_xt_iclass_movi,
+ 0,
+ Opcode_movi_encode_fns, 0, 0 },
+ { "moveqz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_moveqz_encode_fns, 0, 0 },
+ { "movnez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movnez_encode_fns, 0, 0 },
+ { "movltz", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movltz_encode_fns, 0, 0 },
+ { "movgez", ICLASS_xt_iclass_movz,
+ 0,
+ Opcode_movgez_encode_fns, 0, 0 },
+ { "neg", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_neg_encode_fns, 0, 0 },
+ { "abs", ICLASS_xt_iclass_neg,
+ 0,
+ Opcode_abs_encode_fns, 0, 0 },
+ { "nop", ICLASS_xt_iclass_nop,
+ 0,
+ Opcode_nop_encode_fns, 0, 0 },
+ { "ret", ICLASS_xt_iclass_return,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_ret_encode_fns, 0, 0 },
+ { "simcall", ICLASS_xt_iclass_simcall,
+ 0,
+ Opcode_simcall_encode_fns, 0, 0 },
+ { "s16i", ICLASS_xt_iclass_s16i,
+ 0,
+ Opcode_s16i_encode_fns, 0, 0 },
+ { "s32i", ICLASS_xt_iclass_s32i,
+ 0,
+ Opcode_s32i_encode_fns, 0, 0 },
+ { "s8i", ICLASS_xt_iclass_s8i,
+ 0,
+ Opcode_s8i_encode_fns, 0, 0 },
+ { "ssr", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssr_encode_fns, 0, 0 },
+ { "ssl", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssl_encode_fns, 0, 0 },
+ { "ssa8l", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8l_encode_fns, 0, 0 },
+ { "ssa8b", ICLASS_xt_iclass_sar,
+ 0,
+ Opcode_ssa8b_encode_fns, 0, 0 },
+ { "ssai", ICLASS_xt_iclass_sari,
+ 0,
+ Opcode_ssai_encode_fns, 0, 0 },
+ { "sll", ICLASS_xt_iclass_shifts,
+ 0,
+ Opcode_sll_encode_fns, 0, 0 },
+ { "src", ICLASS_xt_iclass_shiftst,
+ 0,
+ Opcode_src_encode_fns, 0, 0 },
+ { "srl", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_srl_encode_fns, 0, 0 },
+ { "sra", ICLASS_xt_iclass_shiftt,
+ 0,
+ Opcode_sra_encode_fns, 0, 0 },
+ { "slli", ICLASS_xt_iclass_slli,
+ 0,
+ Opcode_slli_encode_fns, 0, 0 },
+ { "srai", ICLASS_xt_iclass_srai,
+ 0,
+ Opcode_srai_encode_fns, 0, 0 },
+ { "srli", ICLASS_xt_iclass_srli,
+ 0,
+ Opcode_srli_encode_fns, 0, 0 },
+ { "memw", ICLASS_xt_iclass_memw,
+ 0,
+ Opcode_memw_encode_fns, 0, 0 },
+ { "extw", ICLASS_xt_iclass_extw,
+ 0,
+ Opcode_extw_encode_fns, 0, 0 },
+ { "isync", ICLASS_xt_iclass_isync,
+ 0,
+ Opcode_isync_encode_fns, 0, 0 },
+ { "rsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_rsync_encode_fns, 0, 0 },
+ { "esync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_esync_encode_fns, 0, 0 },
+ { "dsync", ICLASS_xt_iclass_sync,
+ 0,
+ Opcode_dsync_encode_fns, 0, 0 },
+ { "rsil", ICLASS_xt_iclass_rsil,
+ 0,
+ Opcode_rsil_encode_fns, 0, 0 },
+ { "rsr.lend", ICLASS_xt_iclass_rsr_lend,
+ 0,
+ Opcode_rsr_lend_encode_fns, 0, 0 },
+ { "wsr.lend", ICLASS_xt_iclass_wsr_lend,
+ 0,
+ Opcode_wsr_lend_encode_fns, 0, 0 },
+ { "xsr.lend", ICLASS_xt_iclass_xsr_lend,
+ 0,
+ Opcode_xsr_lend_encode_fns, 0, 0 },
+ { "rsr.lcount", ICLASS_xt_iclass_rsr_lcount,
+ 0,
+ Opcode_rsr_lcount_encode_fns, 0, 0 },
+ { "wsr.lcount", ICLASS_xt_iclass_wsr_lcount,
+ 0,
+ Opcode_wsr_lcount_encode_fns, 0, 0 },
+ { "xsr.lcount", ICLASS_xt_iclass_xsr_lcount,
+ 0,
+ Opcode_xsr_lcount_encode_fns, 0, 0 },
+ { "rsr.lbeg", ICLASS_xt_iclass_rsr_lbeg,
+ 0,
+ Opcode_rsr_lbeg_encode_fns, 0, 0 },
+ { "wsr.lbeg", ICLASS_xt_iclass_wsr_lbeg,
+ 0,
+ Opcode_wsr_lbeg_encode_fns, 0, 0 },
+ { "xsr.lbeg", ICLASS_xt_iclass_xsr_lbeg,
+ 0,
+ Opcode_xsr_lbeg_encode_fns, 0, 0 },
+ { "rsr.sar", ICLASS_xt_iclass_rsr_sar,
+ 0,
+ Opcode_rsr_sar_encode_fns, 0, 0 },
+ { "wsr.sar", ICLASS_xt_iclass_wsr_sar,
+ 0,
+ Opcode_wsr_sar_encode_fns, 0, 0 },
+ { "xsr.sar", ICLASS_xt_iclass_xsr_sar,
+ 0,
+ Opcode_xsr_sar_encode_fns, 0, 0 },
+ { "rsr.litbase", ICLASS_xt_iclass_rsr_litbase,
+ 0,
+ Opcode_rsr_litbase_encode_fns, 0, 0 },
+ { "wsr.litbase", ICLASS_xt_iclass_wsr_litbase,
+ 0,
+ Opcode_wsr_litbase_encode_fns, 0, 0 },
+ { "xsr.litbase", ICLASS_xt_iclass_xsr_litbase,
+ 0,
+ Opcode_xsr_litbase_encode_fns, 0, 0 },
+ { "rsr.configid0", ICLASS_xt_iclass_rsr_configid0,
+ 0,
+ Opcode_rsr_configid0_encode_fns, 0, 0 },
+ { "wsr.configid0", ICLASS_xt_iclass_wsr_configid0,
+ 0,
+ Opcode_wsr_configid0_encode_fns, 0, 0 },
+ { "rsr.configid1", ICLASS_xt_iclass_rsr_configid1,
+ 0,
+ Opcode_rsr_configid1_encode_fns, 0, 0 },
+ { "rsr.ps", ICLASS_xt_iclass_rsr_ps,
+ 0,
+ Opcode_rsr_ps_encode_fns, 0, 0 },
+ { "wsr.ps", ICLASS_xt_iclass_wsr_ps,
+ 0,
+ Opcode_wsr_ps_encode_fns, 0, 0 },
+ { "xsr.ps", ICLASS_xt_iclass_xsr_ps,
+ 0,
+ Opcode_xsr_ps_encode_fns, 0, 0 },
+ { "rsr.epc1", ICLASS_xt_iclass_rsr_epc1,
+ 0,
+ Opcode_rsr_epc1_encode_fns, 0, 0 },
+ { "wsr.epc1", ICLASS_xt_iclass_wsr_epc1,
+ 0,
+ Opcode_wsr_epc1_encode_fns, 0, 0 },
+ { "xsr.epc1", ICLASS_xt_iclass_xsr_epc1,
+ 0,
+ Opcode_xsr_epc1_encode_fns, 0, 0 },
+ { "rsr.excsave1", ICLASS_xt_iclass_rsr_excsave1,
+ 0,
+ Opcode_rsr_excsave1_encode_fns, 0, 0 },
+ { "wsr.excsave1", ICLASS_xt_iclass_wsr_excsave1,
+ 0,
+ Opcode_wsr_excsave1_encode_fns, 0, 0 },
+ { "xsr.excsave1", ICLASS_xt_iclass_xsr_excsave1,
+ 0,
+ Opcode_xsr_excsave1_encode_fns, 0, 0 },
+ { "rsr.epc2", ICLASS_xt_iclass_rsr_epc2,
+ 0,
+ Opcode_rsr_epc2_encode_fns, 0, 0 },
+ { "wsr.epc2", ICLASS_xt_iclass_wsr_epc2,
+ 0,
+ Opcode_wsr_epc2_encode_fns, 0, 0 },
+ { "xsr.epc2", ICLASS_xt_iclass_xsr_epc2,
+ 0,
+ Opcode_xsr_epc2_encode_fns, 0, 0 },
+ { "rsr.excsave2", ICLASS_xt_iclass_rsr_excsave2,
+ 0,
+ Opcode_rsr_excsave2_encode_fns, 0, 0 },
+ { "wsr.excsave2", ICLASS_xt_iclass_wsr_excsave2,
+ 0,
+ Opcode_wsr_excsave2_encode_fns, 0, 0 },
+ { "xsr.excsave2", ICLASS_xt_iclass_xsr_excsave2,
+ 0,
+ Opcode_xsr_excsave2_encode_fns, 0, 0 },
+ { "rsr.eps2", ICLASS_xt_iclass_rsr_eps2,
+ 0,
+ Opcode_rsr_eps2_encode_fns, 0, 0 },
+ { "wsr.eps2", ICLASS_xt_iclass_wsr_eps2,
+ 0,
+ Opcode_wsr_eps2_encode_fns, 0, 0 },
+ { "xsr.eps2", ICLASS_xt_iclass_xsr_eps2,
+ 0,
+ Opcode_xsr_eps2_encode_fns, 0, 0 },
+ { "rsr.excvaddr", ICLASS_xt_iclass_rsr_excvaddr,
+ 0,
+ Opcode_rsr_excvaddr_encode_fns, 0, 0 },
+ { "wsr.excvaddr", ICLASS_xt_iclass_wsr_excvaddr,
+ 0,
+ Opcode_wsr_excvaddr_encode_fns, 0, 0 },
+ { "xsr.excvaddr", ICLASS_xt_iclass_xsr_excvaddr,
+ 0,
+ Opcode_xsr_excvaddr_encode_fns, 0, 0 },
+ { "rsr.depc", ICLASS_xt_iclass_rsr_depc,
+ 0,
+ Opcode_rsr_depc_encode_fns, 0, 0 },
+ { "wsr.depc", ICLASS_xt_iclass_wsr_depc,
+ 0,
+ Opcode_wsr_depc_encode_fns, 0, 0 },
+ { "xsr.depc", ICLASS_xt_iclass_xsr_depc,
+ 0,
+ Opcode_xsr_depc_encode_fns, 0, 0 },
+ { "rsr.exccause", ICLASS_xt_iclass_rsr_exccause,
+ 0,
+ Opcode_rsr_exccause_encode_fns, 0, 0 },
+ { "wsr.exccause", ICLASS_xt_iclass_wsr_exccause,
+ 0,
+ Opcode_wsr_exccause_encode_fns, 0, 0 },
+ { "xsr.exccause", ICLASS_xt_iclass_xsr_exccause,
+ 0,
+ Opcode_xsr_exccause_encode_fns, 0, 0 },
+ { "rsr.misc0", ICLASS_xt_iclass_rsr_misc0,
+ 0,
+ Opcode_rsr_misc0_encode_fns, 0, 0 },
+ { "wsr.misc0", ICLASS_xt_iclass_wsr_misc0,
+ 0,
+ Opcode_wsr_misc0_encode_fns, 0, 0 },
+ { "xsr.misc0", ICLASS_xt_iclass_xsr_misc0,
+ 0,
+ Opcode_xsr_misc0_encode_fns, 0, 0 },
+ { "rsr.misc1", ICLASS_xt_iclass_rsr_misc1,
+ 0,
+ Opcode_rsr_misc1_encode_fns, 0, 0 },
+ { "wsr.misc1", ICLASS_xt_iclass_wsr_misc1,
+ 0,
+ Opcode_wsr_misc1_encode_fns, 0, 0 },
+ { "xsr.misc1", ICLASS_xt_iclass_xsr_misc1,
+ 0,
+ Opcode_xsr_misc1_encode_fns, 0, 0 },
+ { "rsr.prid", ICLASS_xt_iclass_rsr_prid,
+ 0,
+ Opcode_rsr_prid_encode_fns, 0, 0 },
+ { "rsr.vecbase", ICLASS_xt_iclass_rsr_vecbase,
+ 0,
+ Opcode_rsr_vecbase_encode_fns, 0, 0 },
+ { "wsr.vecbase", ICLASS_xt_iclass_wsr_vecbase,
+ 0,
+ Opcode_wsr_vecbase_encode_fns, 0, 0 },
+ { "xsr.vecbase", ICLASS_xt_iclass_xsr_vecbase,
+ 0,
+ Opcode_xsr_vecbase_encode_fns, 0, 0 },
+ { "mul16u", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16u_encode_fns, 0, 0 },
+ { "mul16s", ICLASS_xt_mul16,
+ 0,
+ Opcode_mul16s_encode_fns, 0, 0 },
+ { "mull", ICLASS_xt_mul32,
+ 0,
+ Opcode_mull_encode_fns, 0, 0 },
+ { "rfi", ICLASS_xt_iclass_rfi,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfi_encode_fns, 0, 0 },
+ { "waiti", ICLASS_xt_iclass_wait,
+ 0,
+ Opcode_waiti_encode_fns, 0, 0 },
+ { "rsr.interrupt", ICLASS_xt_iclass_rsr_interrupt,
+ 0,
+ Opcode_rsr_interrupt_encode_fns, 0, 0 },
+ { "wsr.intset", ICLASS_xt_iclass_wsr_intset,
+ 0,
+ Opcode_wsr_intset_encode_fns, 0, 0 },
+ { "wsr.intclear", ICLASS_xt_iclass_wsr_intclear,
+ 0,
+ Opcode_wsr_intclear_encode_fns, 0, 0 },
+ { "rsr.intenable", ICLASS_xt_iclass_rsr_intenable,
+ 0,
+ Opcode_rsr_intenable_encode_fns, 0, 0 },
+ { "wsr.intenable", ICLASS_xt_iclass_wsr_intenable,
+ 0,
+ Opcode_wsr_intenable_encode_fns, 0, 0 },
+ { "xsr.intenable", ICLASS_xt_iclass_xsr_intenable,
+ 0,
+ Opcode_xsr_intenable_encode_fns, 0, 0 },
+ { "break", ICLASS_xt_iclass_break,
+ 0,
+ Opcode_break_encode_fns, 0, 0 },
+ { "break.n", ICLASS_xt_iclass_break_n,
+ 0,
+ Opcode_break_n_encode_fns, 0, 0 },
+ { "rsr.debugcause", ICLASS_xt_iclass_rsr_debugcause,
+ 0,
+ Opcode_rsr_debugcause_encode_fns, 0, 0 },
+ { "wsr.debugcause", ICLASS_xt_iclass_wsr_debugcause,
+ 0,
+ Opcode_wsr_debugcause_encode_fns, 0, 0 },
+ { "xsr.debugcause", ICLASS_xt_iclass_xsr_debugcause,
+ 0,
+ Opcode_xsr_debugcause_encode_fns, 0, 0 },
+ { "rsr.icount", ICLASS_xt_iclass_rsr_icount,
+ 0,
+ Opcode_rsr_icount_encode_fns, 0, 0 },
+ { "wsr.icount", ICLASS_xt_iclass_wsr_icount,
+ 0,
+ Opcode_wsr_icount_encode_fns, 0, 0 },
+ { "xsr.icount", ICLASS_xt_iclass_xsr_icount,
+ 0,
+ Opcode_xsr_icount_encode_fns, 0, 0 },
+ { "rsr.icountlevel", ICLASS_xt_iclass_rsr_icountlevel,
+ 0,
+ Opcode_rsr_icountlevel_encode_fns, 0, 0 },
+ { "wsr.icountlevel", ICLASS_xt_iclass_wsr_icountlevel,
+ 0,
+ Opcode_wsr_icountlevel_encode_fns, 0, 0 },
+ { "xsr.icountlevel", ICLASS_xt_iclass_xsr_icountlevel,
+ 0,
+ Opcode_xsr_icountlevel_encode_fns, 0, 0 },
+ { "rsr.ddr", ICLASS_xt_iclass_rsr_ddr,
+ 0,
+ Opcode_rsr_ddr_encode_fns, 0, 0 },
+ { "wsr.ddr", ICLASS_xt_iclass_wsr_ddr,
+ 0,
+ Opcode_wsr_ddr_encode_fns, 0, 0 },
+ { "xsr.ddr", ICLASS_xt_iclass_xsr_ddr,
+ 0,
+ Opcode_xsr_ddr_encode_fns, 0, 0 },
+ { "rfdo", ICLASS_xt_iclass_rfdo,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdo_encode_fns, 0, 0 },
+ { "rfdd", ICLASS_xt_iclass_rfdd,
+ XTENSA_OPCODE_IS_JUMP,
+ Opcode_rfdd_encode_fns, 0, 0 },
+ { "andb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andb_encode_fns, 0, 0 },
+ { "andbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_andbc_encode_fns, 0, 0 },
+ { "orb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orb_encode_fns, 0, 0 },
+ { "orbc", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_orbc_encode_fns, 0, 0 },
+ { "xorb", ICLASS_xt_iclass_bbool1,
+ 0,
+ Opcode_xorb_encode_fns, 0, 0 },
+ { "any4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_any4_encode_fns, 0, 0 },
+ { "all4", ICLASS_xt_iclass_bbool4,
+ 0,
+ Opcode_all4_encode_fns, 0, 0 },
+ { "any8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_any8_encode_fns, 0, 0 },
+ { "all8", ICLASS_xt_iclass_bbool8,
+ 0,
+ Opcode_all8_encode_fns, 0, 0 },
+ { "bf", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bf_encode_fns, 0, 0 },
+ { "bt", ICLASS_xt_iclass_bbranch,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_bt_encode_fns, 0, 0 },
+ { "movf", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movf_encode_fns, 0, 0 },
+ { "movt", ICLASS_xt_iclass_bmove,
+ 0,
+ Opcode_movt_encode_fns, 0, 0 },
+ { "rsr.br", ICLASS_xt_iclass_RSR_BR,
+ 0,
+ Opcode_rsr_br_encode_fns, 0, 0 },
+ { "wsr.br", ICLASS_xt_iclass_WSR_BR,
+ 0,
+ Opcode_wsr_br_encode_fns, 0, 0 },
+ { "xsr.br", ICLASS_xt_iclass_XSR_BR,
+ 0,
+ Opcode_xsr_br_encode_fns, 0, 0 },
+ { "rsr.ccount", ICLASS_xt_iclass_rsr_ccount,
+ 0,
+ Opcode_rsr_ccount_encode_fns, 0, 0 },
+ { "wsr.ccount", ICLASS_xt_iclass_wsr_ccount,
+ 0,
+ Opcode_wsr_ccount_encode_fns, 0, 0 },
+ { "xsr.ccount", ICLASS_xt_iclass_xsr_ccount,
+ 0,
+ Opcode_xsr_ccount_encode_fns, 0, 0 },
+ { "rsr.ccompare0", ICLASS_xt_iclass_rsr_ccompare0,
+ 0,
+ Opcode_rsr_ccompare0_encode_fns, 0, 0 },
+ { "wsr.ccompare0", ICLASS_xt_iclass_wsr_ccompare0,
+ 0,
+ Opcode_wsr_ccompare0_encode_fns, 0, 0 },
+ { "xsr.ccompare0", ICLASS_xt_iclass_xsr_ccompare0,
+ 0,
+ Opcode_xsr_ccompare0_encode_fns, 0, 0 },
+ { "rsr.ccompare1", ICLASS_xt_iclass_rsr_ccompare1,
+ 0,
+ Opcode_rsr_ccompare1_encode_fns, 0, 0 },
+ { "wsr.ccompare1", ICLASS_xt_iclass_wsr_ccompare1,
+ 0,
+ Opcode_wsr_ccompare1_encode_fns, 0, 0 },
+ { "xsr.ccompare1", ICLASS_xt_iclass_xsr_ccompare1,
+ 0,
+ Opcode_xsr_ccompare1_encode_fns, 0, 0 },
+ { "ipf", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ipf_encode_fns, 0, 0 },
+ { "ihi", ICLASS_xt_iclass_icache,
+ 0,
+ Opcode_ihi_encode_fns, 0, 0 },
+ { "iii", ICLASS_xt_iclass_icache_inv,
+ 0,
+ Opcode_iii_encode_fns, 0, 0 },
+ { "lict", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_lict_encode_fns, 0, 0 },
+ { "licw", ICLASS_xt_iclass_licx,
+ 0,
+ Opcode_licw_encode_fns, 0, 0 },
+ { "sict", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sict_encode_fns, 0, 0 },
+ { "sicw", ICLASS_xt_iclass_sicx,
+ 0,
+ Opcode_sicw_encode_fns, 0, 0 },
+ { "dhwb", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwb_encode_fns, 0, 0 },
+ { "dhwbi", ICLASS_xt_iclass_dcache,
+ 0,
+ Opcode_dhwbi_encode_fns, 0, 0 },
+ { "diwb", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwb_encode_fns, 0, 0 },
+ { "diwbi", ICLASS_xt_iclass_dcache_ind,
+ 0,
+ Opcode_diwbi_encode_fns, 0, 0 },
+ { "dhi", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dhi_encode_fns, 0, 0 },
+ { "dii", ICLASS_xt_iclass_dcache_inv,
+ 0,
+ Opcode_dii_encode_fns, 0, 0 },
+ { "dpfr", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfr_encode_fns, 0, 0 },
+ { "dpfw", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfw_encode_fns, 0, 0 },
+ { "dpfro", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfro_encode_fns, 0, 0 },
+ { "dpfwo", ICLASS_xt_iclass_dpf,
+ 0,
+ Opcode_dpfwo_encode_fns, 0, 0 },
+ { "sdct", ICLASS_xt_iclass_sdct,
+ 0,
+ Opcode_sdct_encode_fns, 0, 0 },
+ { "ldct", ICLASS_xt_iclass_ldct,
+ 0,
+ Opcode_ldct_encode_fns, 0, 0 },
+ { "wsr.ptevaddr", ICLASS_xt_iclass_wsr_ptevaddr,
+ 0,
+ Opcode_wsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.ptevaddr", ICLASS_xt_iclass_rsr_ptevaddr,
+ 0,
+ Opcode_rsr_ptevaddr_encode_fns, 0, 0 },
+ { "xsr.ptevaddr", ICLASS_xt_iclass_xsr_ptevaddr,
+ 0,
+ Opcode_xsr_ptevaddr_encode_fns, 0, 0 },
+ { "rsr.rasid", ICLASS_xt_iclass_rsr_rasid,
+ 0,
+ Opcode_rsr_rasid_encode_fns, 0, 0 },
+ { "wsr.rasid", ICLASS_xt_iclass_wsr_rasid,
+ 0,
+ Opcode_wsr_rasid_encode_fns, 0, 0 },
+ { "xsr.rasid", ICLASS_xt_iclass_xsr_rasid,
+ 0,
+ Opcode_xsr_rasid_encode_fns, 0, 0 },
+ { "rsr.itlbcfg", ICLASS_xt_iclass_rsr_itlbcfg,
+ 0,
+ Opcode_rsr_itlbcfg_encode_fns, 0, 0 },
+ { "wsr.itlbcfg", ICLASS_xt_iclass_wsr_itlbcfg,
+ 0,
+ Opcode_wsr_itlbcfg_encode_fns, 0, 0 },
+ { "xsr.itlbcfg", ICLASS_xt_iclass_xsr_itlbcfg,
+ 0,
+ Opcode_xsr_itlbcfg_encode_fns, 0, 0 },
+ { "rsr.dtlbcfg", ICLASS_xt_iclass_rsr_dtlbcfg,
+ 0,
+ Opcode_rsr_dtlbcfg_encode_fns, 0, 0 },
+ { "wsr.dtlbcfg", ICLASS_xt_iclass_wsr_dtlbcfg,
+ 0,
+ Opcode_wsr_dtlbcfg_encode_fns, 0, 0 },
+ { "xsr.dtlbcfg", ICLASS_xt_iclass_xsr_dtlbcfg,
+ 0,
+ Opcode_xsr_dtlbcfg_encode_fns, 0, 0 },
+ { "idtlb", ICLASS_xt_iclass_idtlb,
+ 0,
+ Opcode_idtlb_encode_fns, 0, 0 },
+ { "pdtlb", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_pdtlb_encode_fns, 0, 0 },
+ { "rdtlb0", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb0_encode_fns, 0, 0 },
+ { "rdtlb1", ICLASS_xt_iclass_rdtlb,
+ 0,
+ Opcode_rdtlb1_encode_fns, 0, 0 },
+ { "wdtlb", ICLASS_xt_iclass_wdtlb,
+ 0,
+ Opcode_wdtlb_encode_fns, 0, 0 },
+ { "iitlb", ICLASS_xt_iclass_iitlb,
+ 0,
+ Opcode_iitlb_encode_fns, 0, 0 },
+ { "pitlb", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_pitlb_encode_fns, 0, 0 },
+ { "ritlb0", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb0_encode_fns, 0, 0 },
+ { "ritlb1", ICLASS_xt_iclass_ritlb,
+ 0,
+ Opcode_ritlb1_encode_fns, 0, 0 },
+ { "witlb", ICLASS_xt_iclass_witlb,
+ 0,
+ Opcode_witlb_encode_fns, 0, 0 },
+ { "ldpte", ICLASS_xt_iclass_ldpte,
+ 0,
+ Opcode_ldpte_encode_fns, 0, 0 },
+ { "hwwitlba", ICLASS_xt_iclass_hwwitlba,
+ XTENSA_OPCODE_IS_BRANCH,
+ Opcode_hwwitlba_encode_fns, 0, 0 },
+ { "hwwdtlba", ICLASS_xt_iclass_hwwdtlba,
+ 0,
+ Opcode_hwwdtlba_encode_fns, 0, 0 },
+ { "rsr.cpenable", ICLASS_xt_iclass_rsr_cpenable,
+ 0,
+ Opcode_rsr_cpenable_encode_fns, 0, 0 },
+ { "wsr.cpenable", ICLASS_xt_iclass_wsr_cpenable,
+ 0,
+ Opcode_wsr_cpenable_encode_fns, 0, 0 },
+ { "xsr.cpenable", ICLASS_xt_iclass_xsr_cpenable,
+ 0,
+ Opcode_xsr_cpenable_encode_fns, 0, 0 },
+ { "clamps", ICLASS_xt_iclass_clamp,
+ 0,
+ Opcode_clamps_encode_fns, 0, 0 },
+ { "min", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_min_encode_fns, 0, 0 },
+ { "max", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_max_encode_fns, 0, 0 },
+ { "minu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_minu_encode_fns, 0, 0 },
+ { "maxu", ICLASS_xt_iclass_minmax,
+ 0,
+ Opcode_maxu_encode_fns, 0, 0 },
+ { "nsa", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsa_encode_fns, 0, 0 },
+ { "nsau", ICLASS_xt_iclass_nsa,
+ 0,
+ Opcode_nsau_encode_fns, 0, 0 },
+ { "sext", ICLASS_xt_iclass_sx,
+ 0,
+ Opcode_sext_encode_fns, 0, 0 },
+ { "l32ai", ICLASS_xt_iclass_l32ai,
+ 0,
+ Opcode_l32ai_encode_fns, 0, 0 },
+ { "s32ri", ICLASS_xt_iclass_s32ri,
+ 0,
+ Opcode_s32ri_encode_fns, 0, 0 },
+ { "s32c1i", ICLASS_xt_iclass_s32c1i,
+ 0,
+ Opcode_s32c1i_encode_fns, 0, 0 },
+ { "rsr.scompare1", ICLASS_xt_iclass_rsr_scompare1,
+ 0,
+ Opcode_rsr_scompare1_encode_fns, 0, 0 },
+ { "wsr.scompare1", ICLASS_xt_iclass_wsr_scompare1,
+ 0,
+ Opcode_wsr_scompare1_encode_fns, 0, 0 },
+ { "xsr.scompare1", ICLASS_xt_iclass_xsr_scompare1,
+ 0,
+ Opcode_xsr_scompare1_encode_fns, 0, 0 },
+ { "rsr.atomctl", ICLASS_xt_iclass_rsr_atomctl,
+ 0,
+ Opcode_rsr_atomctl_encode_fns, 0, 0 },
+ { "wsr.atomctl", ICLASS_xt_iclass_wsr_atomctl,
+ 0,
+ Opcode_wsr_atomctl_encode_fns, 0, 0 },
+ { "xsr.atomctl", ICLASS_xt_iclass_xsr_atomctl,
+ 0,
+ Opcode_xsr_atomctl_encode_fns, 0, 0 },
+ { "rer", ICLASS_xt_iclass_rer,
+ 0,
+ Opcode_rer_encode_fns, 0, 0 },
+ { "wer", ICLASS_xt_iclass_wer,
+ 0,
+ Opcode_wer_encode_fns, 0, 0 },
+ { "rur.ae_ovf_sar", ICLASS_rur_ae_ovf_sar,
+ 0,
+ Opcode_rur_ae_ovf_sar_encode_fns, 0, 0 },
+ { "wur.ae_ovf_sar", ICLASS_wur_ae_ovf_sar,
+ 0,
+ Opcode_wur_ae_ovf_sar_encode_fns, 0, 0 },
+ { "rur.ae_bithead", ICLASS_rur_ae_bithead,
+ 0,
+ Opcode_rur_ae_bithead_encode_fns, 0, 0 },
+ { "wur.ae_bithead", ICLASS_wur_ae_bithead,
+ 0,
+ Opcode_wur_ae_bithead_encode_fns, 0, 0 },
+ { "rur.ae_ts_fts_bu_bp", ICLASS_rur_ae_ts_fts_bu_bp,
+ 0,
+ Opcode_rur_ae_ts_fts_bu_bp_encode_fns, 0, 0 },
+ { "wur.ae_ts_fts_bu_bp", ICLASS_wur_ae_ts_fts_bu_bp,
+ 0,
+ Opcode_wur_ae_ts_fts_bu_bp_encode_fns, 0, 0 },
+ { "rur.ae_sd_no", ICLASS_rur_ae_sd_no,
+ 0,
+ Opcode_rur_ae_sd_no_encode_fns, 0, 0 },
+ { "wur.ae_sd_no", ICLASS_wur_ae_sd_no,
+ 0,
+ Opcode_wur_ae_sd_no_encode_fns, 0, 0 },
+ { "rur.ae_overflow", ICLASS_ae_iclass_rur_ae_overflow,
+ 0,
+ Opcode_rur_ae_overflow_encode_fns, 0, 0 },
+ { "wur.ae_overflow", ICLASS_ae_iclass_wur_ae_overflow,
+ 0,
+ Opcode_wur_ae_overflow_encode_fns, 0, 0 },
+ { "rur.ae_sar", ICLASS_ae_iclass_rur_ae_sar,
+ 0,
+ Opcode_rur_ae_sar_encode_fns, 0, 0 },
+ { "wur.ae_sar", ICLASS_ae_iclass_wur_ae_sar,
+ 0,
+ Opcode_wur_ae_sar_encode_fns, 0, 0 },
+ { "rur.ae_bitptr", ICLASS_ae_iclass_rur_ae_bitptr,
+ 0,
+ Opcode_rur_ae_bitptr_encode_fns, 0, 0 },
+ { "wur.ae_bitptr", ICLASS_ae_iclass_wur_ae_bitptr,
+ 0,
+ Opcode_wur_ae_bitptr_encode_fns, 0, 0 },
+ { "rur.ae_bitsused", ICLASS_ae_iclass_rur_ae_bitsused,
+ 0,
+ Opcode_rur_ae_bitsused_encode_fns, 0, 0 },
+ { "wur.ae_bitsused", ICLASS_ae_iclass_wur_ae_bitsused,
+ 0,
+ Opcode_wur_ae_bitsused_encode_fns, 0, 0 },
+ { "rur.ae_tablesize", ICLASS_ae_iclass_rur_ae_tablesize,
+ 0,
+ Opcode_rur_ae_tablesize_encode_fns, 0, 0 },
+ { "wur.ae_tablesize", ICLASS_ae_iclass_wur_ae_tablesize,
+ 0,
+ Opcode_wur_ae_tablesize_encode_fns, 0, 0 },
+ { "rur.ae_first_ts", ICLASS_ae_iclass_rur_ae_first_ts,
+ 0,
+ Opcode_rur_ae_first_ts_encode_fns, 0, 0 },
+ { "wur.ae_first_ts", ICLASS_ae_iclass_wur_ae_first_ts,
+ 0,
+ Opcode_wur_ae_first_ts_encode_fns, 0, 0 },
+ { "rur.ae_nextoffset", ICLASS_ae_iclass_rur_ae_nextoffset,
+ 0,
+ Opcode_rur_ae_nextoffset_encode_fns, 0, 0 },
+ { "wur.ae_nextoffset", ICLASS_ae_iclass_wur_ae_nextoffset,
+ 0,
+ Opcode_wur_ae_nextoffset_encode_fns, 0, 0 },
+ { "rur.ae_searchdone", ICLASS_ae_iclass_rur_ae_searchdone,
+ 0,
+ Opcode_rur_ae_searchdone_encode_fns, 0, 0 },
+ { "wur.ae_searchdone", ICLASS_ae_iclass_wur_ae_searchdone,
+ 0,
+ Opcode_wur_ae_searchdone_encode_fns, 0, 0 },
+ { "ae_lp16f.i", ICLASS_ae_iclass_lp16f_i,
+ 0,
+ Opcode_ae_lp16f_i_encode_fns, 0, 0 },
+ { "ae_lp16f.iu", ICLASS_ae_iclass_lp16f_iu,
+ 0,
+ Opcode_ae_lp16f_iu_encode_fns, 0, 0 },
+ { "ae_lp16f.x", ICLASS_ae_iclass_lp16f_x,
+ 0,
+ Opcode_ae_lp16f_x_encode_fns, 0, 0 },
+ { "ae_lp16f.xu", ICLASS_ae_iclass_lp16f_xu,
+ 0,
+ Opcode_ae_lp16f_xu_encode_fns, 0, 0 },
+ { "ae_lp24.i", ICLASS_ae_iclass_lp24_i,
+ 0,
+ Opcode_ae_lp24_i_encode_fns, 0, 0 },
+ { "ae_lp24.iu", ICLASS_ae_iclass_lp24_iu,
+ 0,
+ Opcode_ae_lp24_iu_encode_fns, 0, 0 },
+ { "ae_lp24.x", ICLASS_ae_iclass_lp24_x,
+ 0,
+ Opcode_ae_lp24_x_encode_fns, 0, 0 },
+ { "ae_lp24.xu", ICLASS_ae_iclass_lp24_xu,
+ 0,
+ Opcode_ae_lp24_xu_encode_fns, 0, 0 },
+ { "ae_lp24f.i", ICLASS_ae_iclass_lp24f_i,
+ 0,
+ Opcode_ae_lp24f_i_encode_fns, 0, 0 },
+ { "ae_lp24f.iu", ICLASS_ae_iclass_lp24f_iu,
+ 0,
+ Opcode_ae_lp24f_iu_encode_fns, 0, 0 },
+ { "ae_lp24f.x", ICLASS_ae_iclass_lp24f_x,
+ 0,
+ Opcode_ae_lp24f_x_encode_fns, 0, 0 },
+ { "ae_lp24f.xu", ICLASS_ae_iclass_lp24f_xu,
+ 0,
+ Opcode_ae_lp24f_xu_encode_fns, 0, 0 },
+ { "ae_lp16x2f.i", ICLASS_ae_iclass_lp16x2f_i,
+ 0,
+ Opcode_ae_lp16x2f_i_encode_fns, 0, 0 },
+ { "ae_lp16x2f.iu", ICLASS_ae_iclass_lp16x2f_iu,
+ 0,
+ Opcode_ae_lp16x2f_iu_encode_fns, 0, 0 },
+ { "ae_lp16x2f.x", ICLASS_ae_iclass_lp16x2f_x,
+ 0,
+ Opcode_ae_lp16x2f_x_encode_fns, 0, 0 },
+ { "ae_lp16x2f.xu", ICLASS_ae_iclass_lp16x2f_xu,
+ 0,
+ Opcode_ae_lp16x2f_xu_encode_fns, 0, 0 },
+ { "ae_lp24x2f.i", ICLASS_ae_iclass_lp24x2f_i,
+ 0,
+ Opcode_ae_lp24x2f_i_encode_fns, 0, 0 },
+ { "ae_lp24x2f.iu", ICLASS_ae_iclass_lp24x2f_iu,
+ 0,
+ Opcode_ae_lp24x2f_iu_encode_fns, 0, 0 },
+ { "ae_lp24x2f.x", ICLASS_ae_iclass_lp24x2f_x,
+ 0,
+ Opcode_ae_lp24x2f_x_encode_fns, 0, 0 },
+ { "ae_lp24x2f.xu", ICLASS_ae_iclass_lp24x2f_xu,
+ 0,
+ Opcode_ae_lp24x2f_xu_encode_fns, 0, 0 },
+ { "ae_lp24x2.i", ICLASS_ae_iclass_lp24x2_i,
+ 0,
+ Opcode_ae_lp24x2_i_encode_fns, 0, 0 },
+ { "ae_lp24x2.iu", ICLASS_ae_iclass_lp24x2_iu,
+ 0,
+ Opcode_ae_lp24x2_iu_encode_fns, 0, 0 },
+ { "ae_lp24x2.x", ICLASS_ae_iclass_lp24x2_x,
+ 0,
+ Opcode_ae_lp24x2_x_encode_fns, 0, 0 },
+ { "ae_lp24x2.xu", ICLASS_ae_iclass_lp24x2_xu,
+ 0,
+ Opcode_ae_lp24x2_xu_encode_fns, 0, 0 },
+ { "ae_sp16x2f.i", ICLASS_ae_iclass_sp16x2f_i,
+ 0,
+ Opcode_ae_sp16x2f_i_encode_fns, 0, 0 },
+ { "ae_sp16x2f.iu", ICLASS_ae_iclass_sp16x2f_iu,
+ 0,
+ Opcode_ae_sp16x2f_iu_encode_fns, 0, 0 },
+ { "ae_sp16x2f.x", ICLASS_ae_iclass_sp16x2f_x,
+ 0,
+ Opcode_ae_sp16x2f_x_encode_fns, 0, 0 },
+ { "ae_sp16x2f.xu", ICLASS_ae_iclass_sp16x2f_xu,
+ 0,
+ Opcode_ae_sp16x2f_xu_encode_fns, 0, 0 },
+ { "ae_sp24x2s.i", ICLASS_ae_iclass_sp24x2s_i,
+ 0,
+ Opcode_ae_sp24x2s_i_encode_fns, 0, 0 },
+ { "ae_sp24x2s.iu", ICLASS_ae_iclass_sp24x2s_iu,
+ 0,
+ Opcode_ae_sp24x2s_iu_encode_fns, 0, 0 },
+ { "ae_sp24x2s.x", ICLASS_ae_iclass_sp24x2s_x,
+ 0,
+ Opcode_ae_sp24x2s_x_encode_fns, 0, 0 },
+ { "ae_sp24x2s.xu", ICLASS_ae_iclass_sp24x2s_xu,
+ 0,
+ Opcode_ae_sp24x2s_xu_encode_fns, 0, 0 },
+ { "ae_sp24x2f.i", ICLASS_ae_iclass_sp24x2f_i,
+ 0,
+ Opcode_ae_sp24x2f_i_encode_fns, 0, 0 },
+ { "ae_sp24x2f.iu", ICLASS_ae_iclass_sp24x2f_iu,
+ 0,
+ Opcode_ae_sp24x2f_iu_encode_fns, 0, 0 },
+ { "ae_sp24x2f.x", ICLASS_ae_iclass_sp24x2f_x,
+ 0,
+ Opcode_ae_sp24x2f_x_encode_fns, 0, 0 },
+ { "ae_sp24x2f.xu", ICLASS_ae_iclass_sp24x2f_xu,
+ 0,
+ Opcode_ae_sp24x2f_xu_encode_fns, 0, 0 },
+ { "ae_sp16f.l.i", ICLASS_ae_iclass_sp16f_l_i,
+ 0,
+ Opcode_ae_sp16f_l_i_encode_fns, 0, 0 },
+ { "ae_sp16f.l.iu", ICLASS_ae_iclass_sp16f_l_iu,
+ 0,
+ Opcode_ae_sp16f_l_iu_encode_fns, 0, 0 },
+ { "ae_sp16f.l.x", ICLASS_ae_iclass_sp16f_l_x,
+ 0,
+ Opcode_ae_sp16f_l_x_encode_fns, 0, 0 },
+ { "ae_sp16f.l.xu", ICLASS_ae_iclass_sp16f_l_xu,
+ 0,
+ Opcode_ae_sp16f_l_xu_encode_fns, 0, 0 },
+ { "ae_sp24s.l.i", ICLASS_ae_iclass_sp24s_l_i,
+ 0,
+ Opcode_ae_sp24s_l_i_encode_fns, 0, 0 },
+ { "ae_sp24s.l.iu", ICLASS_ae_iclass_sp24s_l_iu,
+ 0,
+ Opcode_ae_sp24s_l_iu_encode_fns, 0, 0 },
+ { "ae_sp24s.l.x", ICLASS_ae_iclass_sp24s_l_x,
+ 0,
+ Opcode_ae_sp24s_l_x_encode_fns, 0, 0 },
+ { "ae_sp24s.l.xu", ICLASS_ae_iclass_sp24s_l_xu,
+ 0,
+ Opcode_ae_sp24s_l_xu_encode_fns, 0, 0 },
+ { "ae_sp24f.l.i", ICLASS_ae_iclass_sp24f_l_i,
+ 0,
+ Opcode_ae_sp24f_l_i_encode_fns, 0, 0 },
+ { "ae_sp24f.l.iu", ICLASS_ae_iclass_sp24f_l_iu,
+ 0,
+ Opcode_ae_sp24f_l_iu_encode_fns, 0, 0 },
+ { "ae_sp24f.l.x", ICLASS_ae_iclass_sp24f_l_x,
+ 0,
+ Opcode_ae_sp24f_l_x_encode_fns, 0, 0 },
+ { "ae_sp24f.l.xu", ICLASS_ae_iclass_sp24f_l_xu,
+ 0,
+ Opcode_ae_sp24f_l_xu_encode_fns, 0, 0 },
+ { "ae_lq56.i", ICLASS_ae_iclass_lq56_i,
+ 0,
+ Opcode_ae_lq56_i_encode_fns, 0, 0 },
+ { "ae_lq56.iu", ICLASS_ae_iclass_lq56_iu,
+ 0,
+ Opcode_ae_lq56_iu_encode_fns, 0, 0 },
+ { "ae_lq56.x", ICLASS_ae_iclass_lq56_x,
+ 0,
+ Opcode_ae_lq56_x_encode_fns, 0, 0 },
+ { "ae_lq56.xu", ICLASS_ae_iclass_lq56_xu,
+ 0,
+ Opcode_ae_lq56_xu_encode_fns, 0, 0 },
+ { "ae_lq32f.i", ICLASS_ae_iclass_lq32f_i,
+ 0,
+ Opcode_ae_lq32f_i_encode_fns, 0, 0 },
+ { "ae_lq32f.iu", ICLASS_ae_iclass_lq32f_iu,
+ 0,
+ Opcode_ae_lq32f_iu_encode_fns, 0, 0 },
+ { "ae_lq32f.x", ICLASS_ae_iclass_lq32f_x,
+ 0,
+ Opcode_ae_lq32f_x_encode_fns, 0, 0 },
+ { "ae_lq32f.xu", ICLASS_ae_iclass_lq32f_xu,
+ 0,
+ Opcode_ae_lq32f_xu_encode_fns, 0, 0 },
+ { "ae_sq56s.i", ICLASS_ae_iclass_sq56s_i,
+ 0,
+ Opcode_ae_sq56s_i_encode_fns, 0, 0 },
+ { "ae_sq56s.iu", ICLASS_ae_iclass_sq56s_iu,
+ 0,
+ Opcode_ae_sq56s_iu_encode_fns, 0, 0 },
+ { "ae_sq56s.x", ICLASS_ae_iclass_sq56s_x,
+ 0,
+ Opcode_ae_sq56s_x_encode_fns, 0, 0 },
+ { "ae_sq56s.xu", ICLASS_ae_iclass_sq56s_xu,
+ 0,
+ Opcode_ae_sq56s_xu_encode_fns, 0, 0 },
+ { "ae_sq32f.i", ICLASS_ae_iclass_sq32f_i,
+ 0,
+ Opcode_ae_sq32f_i_encode_fns, 0, 0 },
+ { "ae_sq32f.iu", ICLASS_ae_iclass_sq32f_iu,
+ 0,
+ Opcode_ae_sq32f_iu_encode_fns, 0, 0 },
+ { "ae_sq32f.x", ICLASS_ae_iclass_sq32f_x,
+ 0,
+ Opcode_ae_sq32f_x_encode_fns, 0, 0 },
+ { "ae_sq32f.xu", ICLASS_ae_iclass_sq32f_xu,
+ 0,
+ Opcode_ae_sq32f_xu_encode_fns, 0, 0 },
+ { "ae_zerop48", ICLASS_ae_iclass_zerop48,
+ 0,
+ Opcode_ae_zerop48_encode_fns, 0, 0 },
+ { "ae_movp48", ICLASS_ae_iclass_movp48,
+ 0,
+ Opcode_ae_movp48_encode_fns, 0, 0 },
+ { "ae_selp24.ll", ICLASS_ae_iclass_selp24_ll,
+ 0,
+ Opcode_ae_selp24_ll_encode_fns, 0, 0 },
+ { "ae_selp24.lh", ICLASS_ae_iclass_selp24_lh,
+ 0,
+ Opcode_ae_selp24_lh_encode_fns, 0, 0 },
+ { "ae_selp24.hl", ICLASS_ae_iclass_selp24_hl,
+ 0,
+ Opcode_ae_selp24_hl_encode_fns, 0, 0 },
+ { "ae_selp24.hh", ICLASS_ae_iclass_selp24_hh,
+ 0,
+ Opcode_ae_selp24_hh_encode_fns, 0, 0 },
+ { "ae_movtp24x2", ICLASS_ae_iclass_movtp24x2,
+ 0,
+ Opcode_ae_movtp24x2_encode_fns, 0, 0 },
+ { "ae_movfp24x2", ICLASS_ae_iclass_movfp24x2,
+ 0,
+ Opcode_ae_movfp24x2_encode_fns, 0, 0 },
+ { "ae_movtp48", ICLASS_ae_iclass_movtp48,
+ 0,
+ Opcode_ae_movtp48_encode_fns, 0, 0 },
+ { "ae_movfp48", ICLASS_ae_iclass_movfp48,
+ 0,
+ Opcode_ae_movfp48_encode_fns, 0, 0 },
+ { "ae_movpa24x2", ICLASS_ae_iclass_movpa24x2,
+ 0,
+ Opcode_ae_movpa24x2_encode_fns, 0, 0 },
+ { "ae_truncp24a32x2", ICLASS_ae_iclass_truncp24a32x2,
+ 0,
+ Opcode_ae_truncp24a32x2_encode_fns, 0, 0 },
+ { "ae_cvta32p24.l", ICLASS_ae_iclass_cvta32p24_l,
+ 0,
+ Opcode_ae_cvta32p24_l_encode_fns, 0, 0 },
+ { "ae_cvta32p24.h", ICLASS_ae_iclass_cvta32p24_h,
+ 0,
+ Opcode_ae_cvta32p24_h_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.ll", ICLASS_ae_iclass_cvtp24a16x2_ll,
+ 0,
+ Opcode_ae_cvtp24a16x2_ll_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.lh", ICLASS_ae_iclass_cvtp24a16x2_lh,
+ 0,
+ Opcode_ae_cvtp24a16x2_lh_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.hl", ICLASS_ae_iclass_cvtp24a16x2_hl,
+ 0,
+ Opcode_ae_cvtp24a16x2_hl_encode_fns, 0, 0 },
+ { "ae_cvtp24a16x2.hh", ICLASS_ae_iclass_cvtp24a16x2_hh,
+ 0,
+ Opcode_ae_cvtp24a16x2_hh_encode_fns, 0, 0 },
+ { "ae_truncp24q48x2", ICLASS_ae_iclass_truncp24q48x2,
+ 0,
+ Opcode_ae_truncp24q48x2_encode_fns, 0, 0 },
+ { "ae_truncp16", ICLASS_ae_iclass_truncp16,
+ 0,
+ Opcode_ae_truncp16_encode_fns, 0, 0 },
+ { "ae_roundsp24q48sym", ICLASS_ae_iclass_roundsp24q48sym,
+ 0,
+ Opcode_ae_roundsp24q48sym_encode_fns, 0, 0 },
+ { "ae_roundsp24q48asym", ICLASS_ae_iclass_roundsp24q48asym,
+ 0,
+ Opcode_ae_roundsp24q48asym_encode_fns, 0, 0 },
+ { "ae_roundsp16q48sym", ICLASS_ae_iclass_roundsp16q48sym,
+ 0,
+ Opcode_ae_roundsp16q48sym_encode_fns, 0, 0 },
+ { "ae_roundsp16q48asym", ICLASS_ae_iclass_roundsp16q48asym,
+ 0,
+ Opcode_ae_roundsp16q48asym_encode_fns, 0, 0 },
+ { "ae_roundsp16sym", ICLASS_ae_iclass_roundsp16sym,
+ 0,
+ Opcode_ae_roundsp16sym_encode_fns, 0, 0 },
+ { "ae_roundsp16asym", ICLASS_ae_iclass_roundsp16asym,
+ 0,
+ Opcode_ae_roundsp16asym_encode_fns, 0, 0 },
+ { "ae_zeroq56", ICLASS_ae_iclass_zeroq56,
+ 0,
+ Opcode_ae_zeroq56_encode_fns, 0, 0 },
+ { "ae_movq56", ICLASS_ae_iclass_movq56,
+ 0,
+ Opcode_ae_movq56_encode_fns, 0, 0 },
+ { "ae_movtq56", ICLASS_ae_iclass_movtq56,
+ 0,
+ Opcode_ae_movtq56_encode_fns, 0, 0 },
+ { "ae_movfq56", ICLASS_ae_iclass_movfq56,
+ 0,
+ Opcode_ae_movfq56_encode_fns, 0, 0 },
+ { "ae_cvtq48a32s", ICLASS_ae_iclass_cvtq48a32s,
+ 0,
+ Opcode_ae_cvtq48a32s_encode_fns, 0, 0 },
+ { "ae_cvtq48p24s.l", ICLASS_ae_iclass_cvtq48p24s_l,
+ 0,
+ Opcode_ae_cvtq48p24s_l_encode_fns, 0, 0 },
+ { "ae_cvtq48p24s.h", ICLASS_ae_iclass_cvtq48p24s_h,
+ 0,
+ Opcode_ae_cvtq48p24s_h_encode_fns, 0, 0 },
+ { "ae_satq48s", ICLASS_ae_iclass_satq48s,
+ 0,
+ Opcode_ae_satq48s_encode_fns, 0, 0 },
+ { "ae_truncq32", ICLASS_ae_iclass_truncq32,
+ 0,
+ Opcode_ae_truncq32_encode_fns, 0, 0 },
+ { "ae_roundsq32sym", ICLASS_ae_iclass_roundsq32sym,
+ 0,
+ Opcode_ae_roundsq32sym_encode_fns, 0, 0 },
+ { "ae_roundsq32asym", ICLASS_ae_iclass_roundsq32asym,
+ 0,
+ Opcode_ae_roundsq32asym_encode_fns, 0, 0 },
+ { "ae_trunca32q48", ICLASS_ae_iclass_trunca32q48,
+ 0,
+ Opcode_ae_trunca32q48_encode_fns, 0, 0 },
+ { "ae_movap24s.l", ICLASS_ae_iclass_movap24s_l,
+ 0,
+ Opcode_ae_movap24s_l_encode_fns, 0, 0 },
+ { "ae_movap24s.h", ICLASS_ae_iclass_movap24s_h,
+ 0,
+ Opcode_ae_movap24s_h_encode_fns, 0, 0 },
+ { "ae_trunca16p24s.l", ICLASS_ae_iclass_trunca16p24s_l,
+ 0,
+ Opcode_ae_trunca16p24s_l_encode_fns, 0, 0 },
+ { "ae_trunca16p24s.h", ICLASS_ae_iclass_trunca16p24s_h,
+ 0,
+ Opcode_ae_trunca16p24s_h_encode_fns, 0, 0 },
+ { "ae_addp24", ICLASS_ae_iclass_addp24,
+ 0,
+ Opcode_ae_addp24_encode_fns, 0, 0 },
+ { "ae_subp24", ICLASS_ae_iclass_subp24,
+ 0,
+ Opcode_ae_subp24_encode_fns, 0, 0 },
+ { "ae_negp24", ICLASS_ae_iclass_negp24,
+ 0,
+ Opcode_ae_negp24_encode_fns, 0, 0 },
+ { "ae_absp24", ICLASS_ae_iclass_absp24,
+ 0,
+ Opcode_ae_absp24_encode_fns, 0, 0 },
+ { "ae_maxp24s", ICLASS_ae_iclass_maxp24s,
+ 0,
+ Opcode_ae_maxp24s_encode_fns, 0, 0 },
+ { "ae_minp24s", ICLASS_ae_iclass_minp24s,
+ 0,
+ Opcode_ae_minp24s_encode_fns, 0, 0 },
+ { "ae_maxbp24s", ICLASS_ae_iclass_maxbp24s,
+ 0,
+ Opcode_ae_maxbp24s_encode_fns, 0, 0 },
+ { "ae_minbp24s", ICLASS_ae_iclass_minbp24s,
+ 0,
+ Opcode_ae_minbp24s_encode_fns, 0, 0 },
+ { "ae_addsp24s", ICLASS_ae_iclass_addsp24s,
+ 0,
+ Opcode_ae_addsp24s_encode_fns, 0, 0 },
+ { "ae_subsp24s", ICLASS_ae_iclass_subsp24s,
+ 0,
+ Opcode_ae_subsp24s_encode_fns, 0, 0 },
+ { "ae_negsp24s", ICLASS_ae_iclass_negsp24s,
+ 0,
+ Opcode_ae_negsp24s_encode_fns, 0, 0 },
+ { "ae_abssp24s", ICLASS_ae_iclass_abssp24s,
+ 0,
+ Opcode_ae_abssp24s_encode_fns, 0, 0 },
+ { "ae_andp48", ICLASS_ae_iclass_andp48,
+ 0,
+ Opcode_ae_andp48_encode_fns, 0, 0 },
+ { "ae_nandp48", ICLASS_ae_iclass_nandp48,
+ 0,
+ Opcode_ae_nandp48_encode_fns, 0, 0 },
+ { "ae_orp48", ICLASS_ae_iclass_orp48,
+ 0,
+ Opcode_ae_orp48_encode_fns, 0, 0 },
+ { "ae_xorp48", ICLASS_ae_iclass_xorp48,
+ 0,
+ Opcode_ae_xorp48_encode_fns, 0, 0 },
+ { "ae_ltp24s", ICLASS_ae_iclass_ltp24s,
+ 0,
+ Opcode_ae_ltp24s_encode_fns, 0, 0 },
+ { "ae_lep24s", ICLASS_ae_iclass_lep24s,
+ 0,
+ Opcode_ae_lep24s_encode_fns, 0, 0 },
+ { "ae_eqp24", ICLASS_ae_iclass_eqp24,
+ 0,
+ Opcode_ae_eqp24_encode_fns, 0, 0 },
+ { "ae_addq56", ICLASS_ae_iclass_addq56,
+ 0,
+ Opcode_ae_addq56_encode_fns, 0, 0 },
+ { "ae_subq56", ICLASS_ae_iclass_subq56,
+ 0,
+ Opcode_ae_subq56_encode_fns, 0, 0 },
+ { "ae_negq56", ICLASS_ae_iclass_negq56,
+ 0,
+ Opcode_ae_negq56_encode_fns, 0, 0 },
+ { "ae_absq56", ICLASS_ae_iclass_absq56,
+ 0,
+ Opcode_ae_absq56_encode_fns, 0, 0 },
+ { "ae_maxq56s", ICLASS_ae_iclass_maxq56s,
+ 0,
+ Opcode_ae_maxq56s_encode_fns, 0, 0 },
+ { "ae_minq56s", ICLASS_ae_iclass_minq56s,
+ 0,
+ Opcode_ae_minq56s_encode_fns, 0, 0 },
+ { "ae_maxbq56s", ICLASS_ae_iclass_maxbq56s,
+ 0,
+ Opcode_ae_maxbq56s_encode_fns, 0, 0 },
+ { "ae_minbq56s", ICLASS_ae_iclass_minbq56s,
+ 0,
+ Opcode_ae_minbq56s_encode_fns, 0, 0 },
+ { "ae_addsq56s", ICLASS_ae_iclass_addsq56s,
+ 0,
+ Opcode_ae_addsq56s_encode_fns, 0, 0 },
+ { "ae_subsq56s", ICLASS_ae_iclass_subsq56s,
+ 0,
+ Opcode_ae_subsq56s_encode_fns, 0, 0 },
+ { "ae_negsq56s", ICLASS_ae_iclass_negsq56s,
+ 0,
+ Opcode_ae_negsq56s_encode_fns, 0, 0 },
+ { "ae_abssq56s", ICLASS_ae_iclass_abssq56s,
+ 0,
+ Opcode_ae_abssq56s_encode_fns, 0, 0 },
+ { "ae_andq56", ICLASS_ae_iclass_andq56,
+ 0,
+ Opcode_ae_andq56_encode_fns, 0, 0 },
+ { "ae_nandq56", ICLASS_ae_iclass_nandq56,
+ 0,
+ Opcode_ae_nandq56_encode_fns, 0, 0 },
+ { "ae_orq56", ICLASS_ae_iclass_orq56,
+ 0,
+ Opcode_ae_orq56_encode_fns, 0, 0 },
+ { "ae_xorq56", ICLASS_ae_iclass_xorq56,
+ 0,
+ Opcode_ae_xorq56_encode_fns, 0, 0 },
+ { "ae_sllip24", ICLASS_ae_iclass_sllip24,
+ 0,
+ Opcode_ae_sllip24_encode_fns, 0, 0 },
+ { "ae_srlip24", ICLASS_ae_iclass_srlip24,
+ 0,
+ Opcode_ae_srlip24_encode_fns, 0, 0 },
+ { "ae_sraip24", ICLASS_ae_iclass_sraip24,
+ 0,
+ Opcode_ae_sraip24_encode_fns, 0, 0 },
+ { "ae_sllsp24", ICLASS_ae_iclass_sllsp24,
+ 0,
+ Opcode_ae_sllsp24_encode_fns, 0, 0 },
+ { "ae_srlsp24", ICLASS_ae_iclass_srlsp24,
+ 0,
+ Opcode_ae_srlsp24_encode_fns, 0, 0 },
+ { "ae_srasp24", ICLASS_ae_iclass_srasp24,
+ 0,
+ Opcode_ae_srasp24_encode_fns, 0, 0 },
+ { "ae_sllisp24s", ICLASS_ae_iclass_sllisp24s,
+ 0,
+ Opcode_ae_sllisp24s_encode_fns, 0, 0 },
+ { "ae_sllssp24s", ICLASS_ae_iclass_sllssp24s,
+ 0,
+ Opcode_ae_sllssp24s_encode_fns, 0, 0 },
+ { "ae_slliq56", ICLASS_ae_iclass_slliq56,
+ 0,
+ Opcode_ae_slliq56_encode_fns, 0, 0 },
+ { "ae_srliq56", ICLASS_ae_iclass_srliq56,
+ 0,
+ Opcode_ae_srliq56_encode_fns, 0, 0 },
+ { "ae_sraiq56", ICLASS_ae_iclass_sraiq56,
+ 0,
+ Opcode_ae_sraiq56_encode_fns, 0, 0 },
+ { "ae_sllsq56", ICLASS_ae_iclass_sllsq56,
+ 0,
+ Opcode_ae_sllsq56_encode_fns, 0, 0 },
+ { "ae_srlsq56", ICLASS_ae_iclass_srlsq56,
+ 0,
+ Opcode_ae_srlsq56_encode_fns, 0, 0 },
+ { "ae_srasq56", ICLASS_ae_iclass_srasq56,
+ 0,
+ Opcode_ae_srasq56_encode_fns, 0, 0 },
+ { "ae_sllaq56", ICLASS_ae_iclass_sllaq56,
+ 0,
+ Opcode_ae_sllaq56_encode_fns, 0, 0 },
+ { "ae_srlaq56", ICLASS_ae_iclass_srlaq56,
+ 0,
+ Opcode_ae_srlaq56_encode_fns, 0, 0 },
+ { "ae_sraaq56", ICLASS_ae_iclass_sraaq56,
+ 0,
+ Opcode_ae_sraaq56_encode_fns, 0, 0 },
+ { "ae_sllisq56s", ICLASS_ae_iclass_sllisq56s,
+ 0,
+ Opcode_ae_sllisq56s_encode_fns, 0, 0 },
+ { "ae_sllssq56s", ICLASS_ae_iclass_sllssq56s,
+ 0,
+ Opcode_ae_sllssq56s_encode_fns, 0, 0 },
+ { "ae_sllasq56s", ICLASS_ae_iclass_sllasq56s,
+ 0,
+ Opcode_ae_sllasq56s_encode_fns, 0, 0 },
+ { "ae_ltq56s", ICLASS_ae_iclass_ltq56s,
+ 0,
+ Opcode_ae_ltq56s_encode_fns, 0, 0 },
+ { "ae_leq56s", ICLASS_ae_iclass_leq56s,
+ 0,
+ Opcode_ae_leq56s_encode_fns, 0, 0 },
+ { "ae_eqq56", ICLASS_ae_iclass_eqq56,
+ 0,
+ Opcode_ae_eqq56_encode_fns, 0, 0 },
+ { "ae_nsaq56s", ICLASS_ae_iclass_nsaq56s,
+ 0,
+ Opcode_ae_nsaq56s_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.ll", ICLASS_ae_iclass_mulfs32p16s_ll,
+ 0,
+ Opcode_ae_mulfs32p16s_ll_encode_fns, 0, 0 },
+ { "ae_mulfp24s.ll", ICLASS_ae_iclass_mulfp24s_ll,
+ 0,
+ Opcode_ae_mulfp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulp24s.ll", ICLASS_ae_iclass_mulp24s_ll,
+ 0,
+ Opcode_ae_mulp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.lh", ICLASS_ae_iclass_mulfs32p16s_lh,
+ 0,
+ Opcode_ae_mulfs32p16s_lh_encode_fns, 0, 0 },
+ { "ae_mulfp24s.lh", ICLASS_ae_iclass_mulfp24s_lh,
+ 0,
+ Opcode_ae_mulfp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulp24s.lh", ICLASS_ae_iclass_mulp24s_lh,
+ 0,
+ Opcode_ae_mulp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.hl", ICLASS_ae_iclass_mulfs32p16s_hl,
+ 0,
+ Opcode_ae_mulfs32p16s_hl_encode_fns, 0, 0 },
+ { "ae_mulfp24s.hl", ICLASS_ae_iclass_mulfp24s_hl,
+ 0,
+ Opcode_ae_mulfp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulp24s.hl", ICLASS_ae_iclass_mulp24s_hl,
+ 0,
+ Opcode_ae_mulp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulfs32p16s.hh", ICLASS_ae_iclass_mulfs32p16s_hh,
+ 0,
+ Opcode_ae_mulfs32p16s_hh_encode_fns, 0, 0 },
+ { "ae_mulfp24s.hh", ICLASS_ae_iclass_mulfp24s_hh,
+ 0,
+ Opcode_ae_mulfp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulp24s.hh", ICLASS_ae_iclass_mulp24s_hh,
+ 0,
+ Opcode_ae_mulp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.ll", ICLASS_ae_iclass_mulafs32p16s_ll,
+ 0,
+ Opcode_ae_mulafs32p16s_ll_encode_fns, 0, 0 },
+ { "ae_mulafp24s.ll", ICLASS_ae_iclass_mulafp24s_ll,
+ 0,
+ Opcode_ae_mulafp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulap24s.ll", ICLASS_ae_iclass_mulap24s_ll,
+ 0,
+ Opcode_ae_mulap24s_ll_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.lh", ICLASS_ae_iclass_mulafs32p16s_lh,
+ 0,
+ Opcode_ae_mulafs32p16s_lh_encode_fns, 0, 0 },
+ { "ae_mulafp24s.lh", ICLASS_ae_iclass_mulafp24s_lh,
+ 0,
+ Opcode_ae_mulafp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulap24s.lh", ICLASS_ae_iclass_mulap24s_lh,
+ 0,
+ Opcode_ae_mulap24s_lh_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.hl", ICLASS_ae_iclass_mulafs32p16s_hl,
+ 0,
+ Opcode_ae_mulafs32p16s_hl_encode_fns, 0, 0 },
+ { "ae_mulafp24s.hl", ICLASS_ae_iclass_mulafp24s_hl,
+ 0,
+ Opcode_ae_mulafp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulap24s.hl", ICLASS_ae_iclass_mulap24s_hl,
+ 0,
+ Opcode_ae_mulap24s_hl_encode_fns, 0, 0 },
+ { "ae_mulafs32p16s.hh", ICLASS_ae_iclass_mulafs32p16s_hh,
+ 0,
+ Opcode_ae_mulafs32p16s_hh_encode_fns, 0, 0 },
+ { "ae_mulafp24s.hh", ICLASS_ae_iclass_mulafp24s_hh,
+ 0,
+ Opcode_ae_mulafp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulap24s.hh", ICLASS_ae_iclass_mulap24s_hh,
+ 0,
+ Opcode_ae_mulap24s_hh_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.ll", ICLASS_ae_iclass_mulsfs32p16s_ll,
+ 0,
+ Opcode_ae_mulsfs32p16s_ll_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.ll", ICLASS_ae_iclass_mulsfp24s_ll,
+ 0,
+ Opcode_ae_mulsfp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulsp24s.ll", ICLASS_ae_iclass_mulsp24s_ll,
+ 0,
+ Opcode_ae_mulsp24s_ll_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.lh", ICLASS_ae_iclass_mulsfs32p16s_lh,
+ 0,
+ Opcode_ae_mulsfs32p16s_lh_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.lh", ICLASS_ae_iclass_mulsfp24s_lh,
+ 0,
+ Opcode_ae_mulsfp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulsp24s.lh", ICLASS_ae_iclass_mulsp24s_lh,
+ 0,
+ Opcode_ae_mulsp24s_lh_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.hl", ICLASS_ae_iclass_mulsfs32p16s_hl,
+ 0,
+ Opcode_ae_mulsfs32p16s_hl_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.hl", ICLASS_ae_iclass_mulsfp24s_hl,
+ 0,
+ Opcode_ae_mulsfp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulsp24s.hl", ICLASS_ae_iclass_mulsp24s_hl,
+ 0,
+ Opcode_ae_mulsp24s_hl_encode_fns, 0, 0 },
+ { "ae_mulsfs32p16s.hh", ICLASS_ae_iclass_mulsfs32p16s_hh,
+ 0,
+ Opcode_ae_mulsfs32p16s_hh_encode_fns, 0, 0 },
+ { "ae_mulsfp24s.hh", ICLASS_ae_iclass_mulsfp24s_hh,
+ 0,
+ Opcode_ae_mulsfp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulsp24s.hh", ICLASS_ae_iclass_mulsp24s_hh,
+ 0,
+ Opcode_ae_mulsp24s_hh_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.ll", ICLASS_ae_iclass_mulafs56p24s_ll,
+ 0,
+ Opcode_ae_mulafs56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.ll", ICLASS_ae_iclass_mulas56p24s_ll,
+ 0,
+ Opcode_ae_mulas56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.lh", ICLASS_ae_iclass_mulafs56p24s_lh,
+ 0,
+ Opcode_ae_mulafs56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.lh", ICLASS_ae_iclass_mulas56p24s_lh,
+ 0,
+ Opcode_ae_mulas56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.hl", ICLASS_ae_iclass_mulafs56p24s_hl,
+ 0,
+ Opcode_ae_mulafs56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.hl", ICLASS_ae_iclass_mulas56p24s_hl,
+ 0,
+ Opcode_ae_mulas56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulafs56p24s.hh", ICLASS_ae_iclass_mulafs56p24s_hh,
+ 0,
+ Opcode_ae_mulafs56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulas56p24s.hh", ICLASS_ae_iclass_mulas56p24s_hh,
+ 0,
+ Opcode_ae_mulas56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.ll", ICLASS_ae_iclass_mulsfs56p24s_ll,
+ 0,
+ Opcode_ae_mulsfs56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.ll", ICLASS_ae_iclass_mulss56p24s_ll,
+ 0,
+ Opcode_ae_mulss56p24s_ll_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.lh", ICLASS_ae_iclass_mulsfs56p24s_lh,
+ 0,
+ Opcode_ae_mulsfs56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.lh", ICLASS_ae_iclass_mulss56p24s_lh,
+ 0,
+ Opcode_ae_mulss56p24s_lh_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.hl", ICLASS_ae_iclass_mulsfs56p24s_hl,
+ 0,
+ Opcode_ae_mulsfs56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.hl", ICLASS_ae_iclass_mulss56p24s_hl,
+ 0,
+ Opcode_ae_mulss56p24s_hl_encode_fns, 0, 0 },
+ { "ae_mulsfs56p24s.hh", ICLASS_ae_iclass_mulsfs56p24s_hh,
+ 0,
+ Opcode_ae_mulsfs56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulss56p24s.hh", ICLASS_ae_iclass_mulss56p24s_hh,
+ 0,
+ Opcode_ae_mulss56p24s_hh_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16s.l", ICLASS_ae_iclass_mulfq32sp16s_l,
+ 0,
+ Opcode_ae_mulfq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16s.h", ICLASS_ae_iclass_mulfq32sp16s_h,
+ 0,
+ Opcode_ae_mulfq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16u.l", ICLASS_ae_iclass_mulfq32sp16u_l,
+ 0,
+ Opcode_ae_mulfq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulfq32sp16u.h", ICLASS_ae_iclass_mulfq32sp16u_h,
+ 0,
+ Opcode_ae_mulfq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulq32sp16s.l", ICLASS_ae_iclass_mulq32sp16s_l,
+ 0,
+ Opcode_ae_mulq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulq32sp16s.h", ICLASS_ae_iclass_mulq32sp16s_h,
+ 0,
+ Opcode_ae_mulq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulq32sp16u.l", ICLASS_ae_iclass_mulq32sp16u_l,
+ 0,
+ Opcode_ae_mulq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulq32sp16u.h", ICLASS_ae_iclass_mulq32sp16u_h,
+ 0,
+ Opcode_ae_mulq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16s.l", ICLASS_ae_iclass_mulafq32sp16s_l,
+ 0,
+ Opcode_ae_mulafq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16s.h", ICLASS_ae_iclass_mulafq32sp16s_h,
+ 0,
+ Opcode_ae_mulafq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16u.l", ICLASS_ae_iclass_mulafq32sp16u_l,
+ 0,
+ Opcode_ae_mulafq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulafq32sp16u.h", ICLASS_ae_iclass_mulafq32sp16u_h,
+ 0,
+ Opcode_ae_mulafq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16s.l", ICLASS_ae_iclass_mulaq32sp16s_l,
+ 0,
+ Opcode_ae_mulaq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16s.h", ICLASS_ae_iclass_mulaq32sp16s_h,
+ 0,
+ Opcode_ae_mulaq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16u.l", ICLASS_ae_iclass_mulaq32sp16u_l,
+ 0,
+ Opcode_ae_mulaq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulaq32sp16u.h", ICLASS_ae_iclass_mulaq32sp16u_h,
+ 0,
+ Opcode_ae_mulaq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16s.l", ICLASS_ae_iclass_mulsfq32sp16s_l,
+ 0,
+ Opcode_ae_mulsfq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16s.h", ICLASS_ae_iclass_mulsfq32sp16s_h,
+ 0,
+ Opcode_ae_mulsfq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16u.l", ICLASS_ae_iclass_mulsfq32sp16u_l,
+ 0,
+ Opcode_ae_mulsfq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulsfq32sp16u.h", ICLASS_ae_iclass_mulsfq32sp16u_h,
+ 0,
+ Opcode_ae_mulsfq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16s.l", ICLASS_ae_iclass_mulsq32sp16s_l,
+ 0,
+ Opcode_ae_mulsq32sp16s_l_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16s.h", ICLASS_ae_iclass_mulsq32sp16s_h,
+ 0,
+ Opcode_ae_mulsq32sp16s_h_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16u.l", ICLASS_ae_iclass_mulsq32sp16u_l,
+ 0,
+ Opcode_ae_mulsq32sp16u_l_encode_fns, 0, 0 },
+ { "ae_mulsq32sp16u.h", ICLASS_ae_iclass_mulsq32sp16u_h,
+ 0,
+ Opcode_ae_mulsq32sp16u_h_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16s.ll", ICLASS_ae_iclass_mulzaaq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzaaq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16s.ll", ICLASS_ae_iclass_mulzaafq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzaafq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16u.ll", ICLASS_ae_iclass_mulzaaq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzaaq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16u.ll", ICLASS_ae_iclass_mulzaafq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzaafq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16s.hh", ICLASS_ae_iclass_mulzaaq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzaaq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16s.hh", ICLASS_ae_iclass_mulzaafq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzaafq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16u.hh", ICLASS_ae_iclass_mulzaaq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzaaq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16u.hh", ICLASS_ae_iclass_mulzaafq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzaafq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16s.lh", ICLASS_ae_iclass_mulzaaq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzaaq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16s.lh", ICLASS_ae_iclass_mulzaafq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzaafq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzaaq32sp16u.lh", ICLASS_ae_iclass_mulzaaq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzaaq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzaafq32sp16u.lh", ICLASS_ae_iclass_mulzaafq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzaafq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16s.ll", ICLASS_ae_iclass_mulzasq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzasq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16s.ll", ICLASS_ae_iclass_mulzasfq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzasfq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16u.ll", ICLASS_ae_iclass_mulzasq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzasq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16u.ll", ICLASS_ae_iclass_mulzasfq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzasfq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16s.hh", ICLASS_ae_iclass_mulzasq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzasq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16s.hh", ICLASS_ae_iclass_mulzasfq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzasfq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16u.hh", ICLASS_ae_iclass_mulzasq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzasq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16u.hh", ICLASS_ae_iclass_mulzasfq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzasfq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16s.lh", ICLASS_ae_iclass_mulzasq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzasq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16s.lh", ICLASS_ae_iclass_mulzasfq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzasfq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzasq32sp16u.lh", ICLASS_ae_iclass_mulzasq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzasq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzasfq32sp16u.lh", ICLASS_ae_iclass_mulzasfq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzasfq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16s.ll", ICLASS_ae_iclass_mulzsaq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzsaq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16s.ll", ICLASS_ae_iclass_mulzsafq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzsafq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16u.ll", ICLASS_ae_iclass_mulzsaq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzsaq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16u.ll", ICLASS_ae_iclass_mulzsafq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzsafq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16s.hh", ICLASS_ae_iclass_mulzsaq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzsaq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16s.hh", ICLASS_ae_iclass_mulzsafq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzsafq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16u.hh", ICLASS_ae_iclass_mulzsaq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzsaq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16u.hh", ICLASS_ae_iclass_mulzsafq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzsafq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16s.lh", ICLASS_ae_iclass_mulzsaq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzsaq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16s.lh", ICLASS_ae_iclass_mulzsafq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzsafq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzsaq32sp16u.lh", ICLASS_ae_iclass_mulzsaq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzsaq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzsafq32sp16u.lh", ICLASS_ae_iclass_mulzsafq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzsafq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16s.ll", ICLASS_ae_iclass_mulzssq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzssq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16s.ll", ICLASS_ae_iclass_mulzssfq32sp16s_ll,
+ 0,
+ Opcode_ae_mulzssfq32sp16s_ll_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16u.ll", ICLASS_ae_iclass_mulzssq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzssq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16u.ll", ICLASS_ae_iclass_mulzssfq32sp16u_ll,
+ 0,
+ Opcode_ae_mulzssfq32sp16u_ll_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16s.hh", ICLASS_ae_iclass_mulzssq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzssq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16s.hh", ICLASS_ae_iclass_mulzssfq32sp16s_hh,
+ 0,
+ Opcode_ae_mulzssfq32sp16s_hh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16u.hh", ICLASS_ae_iclass_mulzssq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzssq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16u.hh", ICLASS_ae_iclass_mulzssfq32sp16u_hh,
+ 0,
+ Opcode_ae_mulzssfq32sp16u_hh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16s.lh", ICLASS_ae_iclass_mulzssq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzssq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16s.lh", ICLASS_ae_iclass_mulzssfq32sp16s_lh,
+ 0,
+ Opcode_ae_mulzssfq32sp16s_lh_encode_fns, 0, 0 },
+ { "ae_mulzssq32sp16u.lh", ICLASS_ae_iclass_mulzssq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzssq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzssfq32sp16u.lh", ICLASS_ae_iclass_mulzssfq32sp16u_lh,
+ 0,
+ Opcode_ae_mulzssfq32sp16u_lh_encode_fns, 0, 0 },
+ { "ae_mulzaafp24s.hh.ll", ICLASS_ae_iclass_mulzaafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzaafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzaap24s.hh.ll", ICLASS_ae_iclass_mulzaap24s_hh_ll,
+ 0,
+ Opcode_ae_mulzaap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzaafp24s.hl.lh", ICLASS_ae_iclass_mulzaafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzaafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzaap24s.hl.lh", ICLASS_ae_iclass_mulzaap24s_hl_lh,
+ 0,
+ Opcode_ae_mulzaap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzasfp24s.hh.ll", ICLASS_ae_iclass_mulzasfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzasfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzasp24s.hh.ll", ICLASS_ae_iclass_mulzasp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzasp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzasfp24s.hl.lh", ICLASS_ae_iclass_mulzasfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzasfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzasp24s.hl.lh", ICLASS_ae_iclass_mulzasp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzasp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzsafp24s.hh.ll", ICLASS_ae_iclass_mulzsafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzsafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzsap24s.hh.ll", ICLASS_ae_iclass_mulzsap24s_hh_ll,
+ 0,
+ Opcode_ae_mulzsap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzsafp24s.hl.lh", ICLASS_ae_iclass_mulzsafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzsafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzsap24s.hl.lh", ICLASS_ae_iclass_mulzsap24s_hl_lh,
+ 0,
+ Opcode_ae_mulzsap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzssfp24s.hh.ll", ICLASS_ae_iclass_mulzssfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzssfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzssp24s.hh.ll", ICLASS_ae_iclass_mulzssp24s_hh_ll,
+ 0,
+ Opcode_ae_mulzssp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulzssfp24s.hl.lh", ICLASS_ae_iclass_mulzssfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzssfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulzssp24s.hl.lh", ICLASS_ae_iclass_mulzssp24s_hl_lh,
+ 0,
+ Opcode_ae_mulzssp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulaafp24s.hh.ll", ICLASS_ae_iclass_mulaafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulaafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulaap24s.hh.ll", ICLASS_ae_iclass_mulaap24s_hh_ll,
+ 0,
+ Opcode_ae_mulaap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulaafp24s.hl.lh", ICLASS_ae_iclass_mulaafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulaafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulaap24s.hl.lh", ICLASS_ae_iclass_mulaap24s_hl_lh,
+ 0,
+ Opcode_ae_mulaap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulasfp24s.hh.ll", ICLASS_ae_iclass_mulasfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulasfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulasp24s.hh.ll", ICLASS_ae_iclass_mulasp24s_hh_ll,
+ 0,
+ Opcode_ae_mulasp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulasfp24s.hl.lh", ICLASS_ae_iclass_mulasfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulasfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulasp24s.hl.lh", ICLASS_ae_iclass_mulasp24s_hl_lh,
+ 0,
+ Opcode_ae_mulasp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulsafp24s.hh.ll", ICLASS_ae_iclass_mulsafp24s_hh_ll,
+ 0,
+ Opcode_ae_mulsafp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulsap24s.hh.ll", ICLASS_ae_iclass_mulsap24s_hh_ll,
+ 0,
+ Opcode_ae_mulsap24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulsafp24s.hl.lh", ICLASS_ae_iclass_mulsafp24s_hl_lh,
+ 0,
+ Opcode_ae_mulsafp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulsap24s.hl.lh", ICLASS_ae_iclass_mulsap24s_hl_lh,
+ 0,
+ Opcode_ae_mulsap24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulssfp24s.hh.ll", ICLASS_ae_iclass_mulssfp24s_hh_ll,
+ 0,
+ Opcode_ae_mulssfp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulssp24s.hh.ll", ICLASS_ae_iclass_mulssp24s_hh_ll,
+ 0,
+ Opcode_ae_mulssp24s_hh_ll_encode_fns, 0, 0 },
+ { "ae_mulssfp24s.hl.lh", ICLASS_ae_iclass_mulssfp24s_hl_lh,
+ 0,
+ Opcode_ae_mulssfp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_mulssp24s.hl.lh", ICLASS_ae_iclass_mulssp24s_hl_lh,
+ 0,
+ Opcode_ae_mulssp24s_hl_lh_encode_fns, 0, 0 },
+ { "ae_sha32", ICLASS_ae_iclass_sha32,
+ 0,
+ Opcode_ae_sha32_encode_fns, 0, 0 },
+ { "ae_vldl32t", ICLASS_ae_iclass_vldl32t,
+ 0,
+ Opcode_ae_vldl32t_encode_fns, 1, Opcode_ae_vldl32t_funcUnit_uses },
+ { "ae_vldl16t", ICLASS_ae_iclass_vldl16t,
+ 0,
+ Opcode_ae_vldl16t_encode_fns, 1, Opcode_ae_vldl16t_funcUnit_uses },
+ { "ae_vldl16c", ICLASS_ae_iclass_vldl16c,
+ 0,
+ Opcode_ae_vldl16c_encode_fns, 3, Opcode_ae_vldl16c_funcUnit_uses },
+ { "ae_vldsht", ICLASS_ae_iclass_vldsht,
+ 0,
+ Opcode_ae_vldsht_encode_fns, 3, Opcode_ae_vldsht_funcUnit_uses },
+ { "ae_lb", ICLASS_ae_iclass_lb,
+ 0,
+ Opcode_ae_lb_encode_fns, 1, Opcode_ae_lb_funcUnit_uses },
+ { "ae_lbi", ICLASS_ae_iclass_lbi,
+ 0,
+ Opcode_ae_lbi_encode_fns, 1, Opcode_ae_lbi_funcUnit_uses },
+ { "ae_lbk", ICLASS_ae_iclass_lbk,
+ 0,
+ Opcode_ae_lbk_encode_fns, 1, Opcode_ae_lbk_funcUnit_uses },
+ { "ae_lbki", ICLASS_ae_iclass_lbki,
+ 0,
+ Opcode_ae_lbki_encode_fns, 1, Opcode_ae_lbki_funcUnit_uses },
+ { "ae_db", ICLASS_ae_iclass_db,
+ 0,
+ Opcode_ae_db_encode_fns, 2, Opcode_ae_db_funcUnit_uses },
+ { "ae_dbi", ICLASS_ae_iclass_dbi,
+ 0,
+ Opcode_ae_dbi_encode_fns, 2, Opcode_ae_dbi_funcUnit_uses },
+ { "ae_vlel32t", ICLASS_ae_iclass_vlel32t,
+ 0,
+ Opcode_ae_vlel32t_encode_fns, 1, Opcode_ae_vlel32t_funcUnit_uses },
+ { "ae_vlel16t", ICLASS_ae_iclass_vlel16t,
+ 0,
+ Opcode_ae_vlel16t_encode_fns, 1, Opcode_ae_vlel16t_funcUnit_uses },
+ { "ae_sb", ICLASS_ae_iclass_sb,
+ 0,
+ Opcode_ae_sb_encode_fns, 2, Opcode_ae_sb_funcUnit_uses },
+ { "ae_sbi", ICLASS_ae_iclass_sbi,
+ 0,
+ Opcode_ae_sbi_encode_fns, 2, Opcode_ae_sbi_funcUnit_uses },
+ { "ae_vles16c", ICLASS_ae_iclass_vles16c,
+ 0,
+ Opcode_ae_vles16c_encode_fns, 2, Opcode_ae_vles16c_funcUnit_uses },
+ { "ae_sbf", ICLASS_ae_iclass_sbf,
+ 0,
+ Opcode_ae_sbf_encode_fns, 2, Opcode_ae_sbf_funcUnit_uses }
+};
+
+enum xtensa_opcode_id {
+ OPCODE_EXCW,
+ OPCODE_RFE,
+ OPCODE_RFDE,
+ OPCODE_SYSCALL,
+ OPCODE_CALL12,
+ OPCODE_CALL8,
+ OPCODE_CALL4,
+ OPCODE_CALLX12,
+ OPCODE_CALLX8,
+ OPCODE_CALLX4,
+ OPCODE_ENTRY,
+ OPCODE_MOVSP,
+ OPCODE_ROTW,
+ OPCODE_RETW,
+ OPCODE_RETW_N,
+ OPCODE_RFWO,
+ OPCODE_RFWU,
+ OPCODE_L32E,
+ OPCODE_S32E,
+ OPCODE_RSR_WINDOWBASE,
+ OPCODE_WSR_WINDOWBASE,
+ OPCODE_XSR_WINDOWBASE,
+ OPCODE_RSR_WINDOWSTART,
+ OPCODE_WSR_WINDOWSTART,
+ OPCODE_XSR_WINDOWSTART,
+ OPCODE_ADD_N,
+ OPCODE_ADDI_N,
+ OPCODE_BEQZ_N,
+ OPCODE_BNEZ_N,
+ OPCODE_ILL_N,
+ OPCODE_L32I_N,
+ OPCODE_MOV_N,
+ OPCODE_MOVI_N,
+ OPCODE_NOP_N,
+ OPCODE_RET_N,
+ OPCODE_S32I_N,
+ OPCODE_RUR_THREADPTR,
+ OPCODE_WUR_THREADPTR,
+ OPCODE_ADDI,
+ OPCODE_ADDMI,
+ OPCODE_ADD,
+ OPCODE_SUB,
+ OPCODE_ADDX2,
+ OPCODE_ADDX4,
+ OPCODE_ADDX8,
+ OPCODE_SUBX2,
+ OPCODE_SUBX4,
+ OPCODE_SUBX8,
+ OPCODE_AND,
+ OPCODE_OR,
+ OPCODE_XOR,
+ OPCODE_BEQI,
+ OPCODE_BNEI,
+ OPCODE_BGEI,
+ OPCODE_BLTI,
+ OPCODE_BBCI,
+ OPCODE_BBSI,
+ OPCODE_BGEUI,
+ OPCODE_BLTUI,
+ OPCODE_BEQ,
+ OPCODE_BNE,
+ OPCODE_BGE,
+ OPCODE_BLT,
+ OPCODE_BGEU,
+ OPCODE_BLTU,
+ OPCODE_BANY,
+ OPCODE_BNONE,
+ OPCODE_BALL,
+ OPCODE_BNALL,
+ OPCODE_BBC,
+ OPCODE_BBS,
+ OPCODE_BEQZ,
+ OPCODE_BNEZ,
+ OPCODE_BGEZ,
+ OPCODE_BLTZ,
+ OPCODE_CALL0,
+ OPCODE_CALLX0,
+ OPCODE_EXTUI,
+ OPCODE_ILL,
+ OPCODE_J,
+ OPCODE_JX,
+ OPCODE_L16UI,
+ OPCODE_L16SI,
+ OPCODE_L32I,
+ OPCODE_L32R,
+ OPCODE_L8UI,
+ OPCODE_LOOP,
+ OPCODE_LOOPNEZ,
+ OPCODE_LOOPGTZ,
+ OPCODE_MOVI,
+ OPCODE_MOVEQZ,
+ OPCODE_MOVNEZ,
+ OPCODE_MOVLTZ,
+ OPCODE_MOVGEZ,
+ OPCODE_NEG,
+ OPCODE_ABS,
+ OPCODE_NOP,
+ OPCODE_RET,
+ OPCODE_SIMCALL,
+ OPCODE_S16I,
+ OPCODE_S32I,
+ OPCODE_S8I,
+ OPCODE_SSR,
+ OPCODE_SSL,
+ OPCODE_SSA8L,
+ OPCODE_SSA8B,
+ OPCODE_SSAI,
+ OPCODE_SLL,
+ OPCODE_SRC,
+ OPCODE_SRL,
+ OPCODE_SRA,
+ OPCODE_SLLI,
+ OPCODE_SRAI,
+ OPCODE_SRLI,
+ OPCODE_MEMW,
+ OPCODE_EXTW,
+ OPCODE_ISYNC,
+ OPCODE_RSYNC,
+ OPCODE_ESYNC,
+ OPCODE_DSYNC,
+ OPCODE_RSIL,
+ OPCODE_RSR_LEND,
+ OPCODE_WSR_LEND,
+ OPCODE_XSR_LEND,
+ OPCODE_RSR_LCOUNT,
+ OPCODE_WSR_LCOUNT,
+ OPCODE_XSR_LCOUNT,
+ OPCODE_RSR_LBEG,
+ OPCODE_WSR_LBEG,
+ OPCODE_XSR_LBEG,
+ OPCODE_RSR_SAR,
+ OPCODE_WSR_SAR,
+ OPCODE_XSR_SAR,
+ OPCODE_RSR_LITBASE,
+ OPCODE_WSR_LITBASE,
+ OPCODE_XSR_LITBASE,
+ OPCODE_RSR_CONFIGID0,
+ OPCODE_WSR_CONFIGID0,
+ OPCODE_RSR_CONFIGID1,
+ OPCODE_RSR_PS,
+ OPCODE_WSR_PS,
+ OPCODE_XSR_PS,
+ OPCODE_RSR_EPC1,
+ OPCODE_WSR_EPC1,
+ OPCODE_XSR_EPC1,
+ OPCODE_RSR_EXCSAVE1,
+ OPCODE_WSR_EXCSAVE1,
+ OPCODE_XSR_EXCSAVE1,
+ OPCODE_RSR_EPC2,
+ OPCODE_WSR_EPC2,
+ OPCODE_XSR_EPC2,
+ OPCODE_RSR_EXCSAVE2,
+ OPCODE_WSR_EXCSAVE2,
+ OPCODE_XSR_EXCSAVE2,
+ OPCODE_RSR_EPS2,
+ OPCODE_WSR_EPS2,
+ OPCODE_XSR_EPS2,
+ OPCODE_RSR_EXCVADDR,
+ OPCODE_WSR_EXCVADDR,
+ OPCODE_XSR_EXCVADDR,
+ OPCODE_RSR_DEPC,
+ OPCODE_WSR_DEPC,
+ OPCODE_XSR_DEPC,
+ OPCODE_RSR_EXCCAUSE,
+ OPCODE_WSR_EXCCAUSE,
+ OPCODE_XSR_EXCCAUSE,
+ OPCODE_RSR_MISC0,
+ OPCODE_WSR_MISC0,
+ OPCODE_XSR_MISC0,
+ OPCODE_RSR_MISC1,
+ OPCODE_WSR_MISC1,
+ OPCODE_XSR_MISC1,
+ OPCODE_RSR_PRID,
+ OPCODE_RSR_VECBASE,
+ OPCODE_WSR_VECBASE,
+ OPCODE_XSR_VECBASE,
+ OPCODE_MUL16U,
+ OPCODE_MUL16S,
+ OPCODE_MULL,
+ OPCODE_RFI,
+ OPCODE_WAITI,
+ OPCODE_RSR_INTERRUPT,
+ OPCODE_WSR_INTSET,
+ OPCODE_WSR_INTCLEAR,
+ OPCODE_RSR_INTENABLE,
+ OPCODE_WSR_INTENABLE,
+ OPCODE_XSR_INTENABLE,
+ OPCODE_BREAK,
+ OPCODE_BREAK_N,
+ OPCODE_RSR_DEBUGCAUSE,
+ OPCODE_WSR_DEBUGCAUSE,
+ OPCODE_XSR_DEBUGCAUSE,
+ OPCODE_RSR_ICOUNT,
+ OPCODE_WSR_ICOUNT,
+ OPCODE_XSR_ICOUNT,
+ OPCODE_RSR_ICOUNTLEVEL,
+ OPCODE_WSR_ICOUNTLEVEL,
+ OPCODE_XSR_ICOUNTLEVEL,
+ OPCODE_RSR_DDR,
+ OPCODE_WSR_DDR,
+ OPCODE_XSR_DDR,
+ OPCODE_RFDO,
+ OPCODE_RFDD,
+ OPCODE_ANDB,
+ OPCODE_ANDBC,
+ OPCODE_ORB,
+ OPCODE_ORBC,
+ OPCODE_XORB,
+ OPCODE_ANY4,
+ OPCODE_ALL4,
+ OPCODE_ANY8,
+ OPCODE_ALL8,
+ OPCODE_BF,
+ OPCODE_BT,
+ OPCODE_MOVF,
+ OPCODE_MOVT,
+ OPCODE_RSR_BR,
+ OPCODE_WSR_BR,
+ OPCODE_XSR_BR,
+ OPCODE_RSR_CCOUNT,
+ OPCODE_WSR_CCOUNT,
+ OPCODE_XSR_CCOUNT,
+ OPCODE_RSR_CCOMPARE0,
+ OPCODE_WSR_CCOMPARE0,
+ OPCODE_XSR_CCOMPARE0,
+ OPCODE_RSR_CCOMPARE1,
+ OPCODE_WSR_CCOMPARE1,
+ OPCODE_XSR_CCOMPARE1,
+ OPCODE_IPF,
+ OPCODE_IHI,
+ OPCODE_III,
+ OPCODE_LICT,
+ OPCODE_LICW,
+ OPCODE_SICT,
+ OPCODE_SICW,
+ OPCODE_DHWB,
+ OPCODE_DHWBI,
+ OPCODE_DIWB,
+ OPCODE_DIWBI,
+ OPCODE_DHI,
+ OPCODE_DII,
+ OPCODE_DPFR,
+ OPCODE_DPFW,
+ OPCODE_DPFRO,
+ OPCODE_DPFWO,
+ OPCODE_SDCT,
+ OPCODE_LDCT,
+ OPCODE_WSR_PTEVADDR,
+ OPCODE_RSR_PTEVADDR,
+ OPCODE_XSR_PTEVADDR,
+ OPCODE_RSR_RASID,
+ OPCODE_WSR_RASID,
+ OPCODE_XSR_RASID,
+ OPCODE_RSR_ITLBCFG,
+ OPCODE_WSR_ITLBCFG,
+ OPCODE_XSR_ITLBCFG,
+ OPCODE_RSR_DTLBCFG,
+ OPCODE_WSR_DTLBCFG,
+ OPCODE_XSR_DTLBCFG,
+ OPCODE_IDTLB,
+ OPCODE_PDTLB,
+ OPCODE_RDTLB0,
+ OPCODE_RDTLB1,
+ OPCODE_WDTLB,
+ OPCODE_IITLB,
+ OPCODE_PITLB,
+ OPCODE_RITLB0,
+ OPCODE_RITLB1,
+ OPCODE_WITLB,
+ OPCODE_LDPTE,
+ OPCODE_HWWITLBA,
+ OPCODE_HWWDTLBA,
+ OPCODE_RSR_CPENABLE,
+ OPCODE_WSR_CPENABLE,
+ OPCODE_XSR_CPENABLE,
+ OPCODE_CLAMPS,
+ OPCODE_MIN,
+ OPCODE_MAX,
+ OPCODE_MINU,
+ OPCODE_MAXU,
+ OPCODE_NSA,
+ OPCODE_NSAU,
+ OPCODE_SEXT,
+ OPCODE_L32AI,
+ OPCODE_S32RI,
+ OPCODE_S32C1I,
+ OPCODE_RSR_SCOMPARE1,
+ OPCODE_WSR_SCOMPARE1,
+ OPCODE_XSR_SCOMPARE1,
+ OPCODE_RSR_ATOMCTL,
+ OPCODE_WSR_ATOMCTL,
+ OPCODE_XSR_ATOMCTL,
+ OPCODE_RER,
+ OPCODE_WER,
+ OPCODE_RUR_AE_OVF_SAR,
+ OPCODE_WUR_AE_OVF_SAR,
+ OPCODE_RUR_AE_BITHEAD,
+ OPCODE_WUR_AE_BITHEAD,
+ OPCODE_RUR_AE_TS_FTS_BU_BP,
+ OPCODE_WUR_AE_TS_FTS_BU_BP,
+ OPCODE_RUR_AE_SD_NO,
+ OPCODE_WUR_AE_SD_NO,
+ OPCODE_RUR_AE_OVERFLOW,
+ OPCODE_WUR_AE_OVERFLOW,
+ OPCODE_RUR_AE_SAR,
+ OPCODE_WUR_AE_SAR,
+ OPCODE_RUR_AE_BITPTR,
+ OPCODE_WUR_AE_BITPTR,
+ OPCODE_RUR_AE_BITSUSED,
+ OPCODE_WUR_AE_BITSUSED,
+ OPCODE_RUR_AE_TABLESIZE,
+ OPCODE_WUR_AE_TABLESIZE,
+ OPCODE_RUR_AE_FIRST_TS,
+ OPCODE_WUR_AE_FIRST_TS,
+ OPCODE_RUR_AE_NEXTOFFSET,
+ OPCODE_WUR_AE_NEXTOFFSET,
+ OPCODE_RUR_AE_SEARCHDONE,
+ OPCODE_WUR_AE_SEARCHDONE,
+ OPCODE_AE_LP16F_I,
+ OPCODE_AE_LP16F_IU,
+ OPCODE_AE_LP16F_X,
+ OPCODE_AE_LP16F_XU,
+ OPCODE_AE_LP24_I,
+ OPCODE_AE_LP24_IU,
+ OPCODE_AE_LP24_X,
+ OPCODE_AE_LP24_XU,
+ OPCODE_AE_LP24F_I,
+ OPCODE_AE_LP24F_IU,
+ OPCODE_AE_LP24F_X,
+ OPCODE_AE_LP24F_XU,
+ OPCODE_AE_LP16X2F_I,
+ OPCODE_AE_LP16X2F_IU,
+ OPCODE_AE_LP16X2F_X,
+ OPCODE_AE_LP16X2F_XU,
+ OPCODE_AE_LP24X2F_I,
+ OPCODE_AE_LP24X2F_IU,
+ OPCODE_AE_LP24X2F_X,
+ OPCODE_AE_LP24X2F_XU,
+ OPCODE_AE_LP24X2_I,
+ OPCODE_AE_LP24X2_IU,
+ OPCODE_AE_LP24X2_X,
+ OPCODE_AE_LP24X2_XU,
+ OPCODE_AE_SP16X2F_I,
+ OPCODE_AE_SP16X2F_IU,
+ OPCODE_AE_SP16X2F_X,
+ OPCODE_AE_SP16X2F_XU,
+ OPCODE_AE_SP24X2S_I,
+ OPCODE_AE_SP24X2S_IU,
+ OPCODE_AE_SP24X2S_X,
+ OPCODE_AE_SP24X2S_XU,
+ OPCODE_AE_SP24X2F_I,
+ OPCODE_AE_SP24X2F_IU,
+ OPCODE_AE_SP24X2F_X,
+ OPCODE_AE_SP24X2F_XU,
+ OPCODE_AE_SP16F_L_I,
+ OPCODE_AE_SP16F_L_IU,
+ OPCODE_AE_SP16F_L_X,
+ OPCODE_AE_SP16F_L_XU,
+ OPCODE_AE_SP24S_L_I,
+ OPCODE_AE_SP24S_L_IU,
+ OPCODE_AE_SP24S_L_X,
+ OPCODE_AE_SP24S_L_XU,
+ OPCODE_AE_SP24F_L_I,
+ OPCODE_AE_SP24F_L_IU,
+ OPCODE_AE_SP24F_L_X,
+ OPCODE_AE_SP24F_L_XU,
+ OPCODE_AE_LQ56_I,
+ OPCODE_AE_LQ56_IU,
+ OPCODE_AE_LQ56_X,
+ OPCODE_AE_LQ56_XU,
+ OPCODE_AE_LQ32F_I,
+ OPCODE_AE_LQ32F_IU,
+ OPCODE_AE_LQ32F_X,
+ OPCODE_AE_LQ32F_XU,
+ OPCODE_AE_SQ56S_I,
+ OPCODE_AE_SQ56S_IU,
+ OPCODE_AE_SQ56S_X,
+ OPCODE_AE_SQ56S_XU,
+ OPCODE_AE_SQ32F_I,
+ OPCODE_AE_SQ32F_IU,
+ OPCODE_AE_SQ32F_X,
+ OPCODE_AE_SQ32F_XU,
+ OPCODE_AE_ZEROP48,
+ OPCODE_AE_MOVP48,
+ OPCODE_AE_SELP24_LL,
+ OPCODE_AE_SELP24_LH,
+ OPCODE_AE_SELP24_HL,
+ OPCODE_AE_SELP24_HH,
+ OPCODE_AE_MOVTP24X2,
+ OPCODE_AE_MOVFP24X2,
+ OPCODE_AE_MOVTP48,
+ OPCODE_AE_MOVFP48,
+ OPCODE_AE_MOVPA24X2,
+ OPCODE_AE_TRUNCP24A32X2,
+ OPCODE_AE_CVTA32P24_L,
+ OPCODE_AE_CVTA32P24_H,
+ OPCODE_AE_CVTP24A16X2_LL,
+ OPCODE_AE_CVTP24A16X2_LH,
+ OPCODE_AE_CVTP24A16X2_HL,
+ OPCODE_AE_CVTP24A16X2_HH,
+ OPCODE_AE_TRUNCP24Q48X2,
+ OPCODE_AE_TRUNCP16,
+ OPCODE_AE_ROUNDSP24Q48SYM,
+ OPCODE_AE_ROUNDSP24Q48ASYM,
+ OPCODE_AE_ROUNDSP16Q48SYM,
+ OPCODE_AE_ROUNDSP16Q48ASYM,
+ OPCODE_AE_ROUNDSP16SYM,
+ OPCODE_AE_ROUNDSP16ASYM,
+ OPCODE_AE_ZEROQ56,
+ OPCODE_AE_MOVQ56,
+ OPCODE_AE_MOVTQ56,
+ OPCODE_AE_MOVFQ56,
+ OPCODE_AE_CVTQ48A32S,
+ OPCODE_AE_CVTQ48P24S_L,
+ OPCODE_AE_CVTQ48P24S_H,
+ OPCODE_AE_SATQ48S,
+ OPCODE_AE_TRUNCQ32,
+ OPCODE_AE_ROUNDSQ32SYM,
+ OPCODE_AE_ROUNDSQ32ASYM,
+ OPCODE_AE_TRUNCA32Q48,
+ OPCODE_AE_MOVAP24S_L,
+ OPCODE_AE_MOVAP24S_H,
+ OPCODE_AE_TRUNCA16P24S_L,
+ OPCODE_AE_TRUNCA16P24S_H,
+ OPCODE_AE_ADDP24,
+ OPCODE_AE_SUBP24,
+ OPCODE_AE_NEGP24,
+ OPCODE_AE_ABSP24,
+ OPCODE_AE_MAXP24S,
+ OPCODE_AE_MINP24S,
+ OPCODE_AE_MAXBP24S,
+ OPCODE_AE_MINBP24S,
+ OPCODE_AE_ADDSP24S,
+ OPCODE_AE_SUBSP24S,
+ OPCODE_AE_NEGSP24S,
+ OPCODE_AE_ABSSP24S,
+ OPCODE_AE_ANDP48,
+ OPCODE_AE_NANDP48,
+ OPCODE_AE_ORP48,
+ OPCODE_AE_XORP48,
+ OPCODE_AE_LTP24S,
+ OPCODE_AE_LEP24S,
+ OPCODE_AE_EQP24,
+ OPCODE_AE_ADDQ56,
+ OPCODE_AE_SUBQ56,
+ OPCODE_AE_NEGQ56,
+ OPCODE_AE_ABSQ56,
+ OPCODE_AE_MAXQ56S,
+ OPCODE_AE_MINQ56S,
+ OPCODE_AE_MAXBQ56S,
+ OPCODE_AE_MINBQ56S,
+ OPCODE_AE_ADDSQ56S,
+ OPCODE_AE_SUBSQ56S,
+ OPCODE_AE_NEGSQ56S,
+ OPCODE_AE_ABSSQ56S,
+ OPCODE_AE_ANDQ56,
+ OPCODE_AE_NANDQ56,
+ OPCODE_AE_ORQ56,
+ OPCODE_AE_XORQ56,
+ OPCODE_AE_SLLIP24,
+ OPCODE_AE_SRLIP24,
+ OPCODE_AE_SRAIP24,
+ OPCODE_AE_SLLSP24,
+ OPCODE_AE_SRLSP24,
+ OPCODE_AE_SRASP24,
+ OPCODE_AE_SLLISP24S,
+ OPCODE_AE_SLLSSP24S,
+ OPCODE_AE_SLLIQ56,
+ OPCODE_AE_SRLIQ56,
+ OPCODE_AE_SRAIQ56,
+ OPCODE_AE_SLLSQ56,
+ OPCODE_AE_SRLSQ56,
+ OPCODE_AE_SRASQ56,
+ OPCODE_AE_SLLAQ56,
+ OPCODE_AE_SRLAQ56,
+ OPCODE_AE_SRAAQ56,
+ OPCODE_AE_SLLISQ56S,
+ OPCODE_AE_SLLSSQ56S,
+ OPCODE_AE_SLLASQ56S,
+ OPCODE_AE_LTQ56S,
+ OPCODE_AE_LEQ56S,
+ OPCODE_AE_EQQ56,
+ OPCODE_AE_NSAQ56S,
+ OPCODE_AE_MULFS32P16S_LL,
+ OPCODE_AE_MULFP24S_LL,
+ OPCODE_AE_MULP24S_LL,
+ OPCODE_AE_MULFS32P16S_LH,
+ OPCODE_AE_MULFP24S_LH,
+ OPCODE_AE_MULP24S_LH,
+ OPCODE_AE_MULFS32P16S_HL,
+ OPCODE_AE_MULFP24S_HL,
+ OPCODE_AE_MULP24S_HL,
+ OPCODE_AE_MULFS32P16S_HH,
+ OPCODE_AE_MULFP24S_HH,
+ OPCODE_AE_MULP24S_HH,
+ OPCODE_AE_MULAFS32P16S_LL,
+ OPCODE_AE_MULAFP24S_LL,
+ OPCODE_AE_MULAP24S_LL,
+ OPCODE_AE_MULAFS32P16S_LH,
+ OPCODE_AE_MULAFP24S_LH,
+ OPCODE_AE_MULAP24S_LH,
+ OPCODE_AE_MULAFS32P16S_HL,
+ OPCODE_AE_MULAFP24S_HL,
+ OPCODE_AE_MULAP24S_HL,
+ OPCODE_AE_MULAFS32P16S_HH,
+ OPCODE_AE_MULAFP24S_HH,
+ OPCODE_AE_MULAP24S_HH,
+ OPCODE_AE_MULSFS32P16S_LL,
+ OPCODE_AE_MULSFP24S_LL,
+ OPCODE_AE_MULSP24S_LL,
+ OPCODE_AE_MULSFS32P16S_LH,
+ OPCODE_AE_MULSFP24S_LH,
+ OPCODE_AE_MULSP24S_LH,
+ OPCODE_AE_MULSFS32P16S_HL,
+ OPCODE_AE_MULSFP24S_HL,
+ OPCODE_AE_MULSP24S_HL,
+ OPCODE_AE_MULSFS32P16S_HH,
+ OPCODE_AE_MULSFP24S_HH,
+ OPCODE_AE_MULSP24S_HH,
+ OPCODE_AE_MULAFS56P24S_LL,
+ OPCODE_AE_MULAS56P24S_LL,
+ OPCODE_AE_MULAFS56P24S_LH,
+ OPCODE_AE_MULAS56P24S_LH,
+ OPCODE_AE_MULAFS56P24S_HL,
+ OPCODE_AE_MULAS56P24S_HL,
+ OPCODE_AE_MULAFS56P24S_HH,
+ OPCODE_AE_MULAS56P24S_HH,
+ OPCODE_AE_MULSFS56P24S_LL,
+ OPCODE_AE_MULSS56P24S_LL,
+ OPCODE_AE_MULSFS56P24S_LH,
+ OPCODE_AE_MULSS56P24S_LH,
+ OPCODE_AE_MULSFS56P24S_HL,
+ OPCODE_AE_MULSS56P24S_HL,
+ OPCODE_AE_MULSFS56P24S_HH,
+ OPCODE_AE_MULSS56P24S_HH,
+ OPCODE_AE_MULFQ32SP16S_L,
+ OPCODE_AE_MULFQ32SP16S_H,
+ OPCODE_AE_MULFQ32SP16U_L,
+ OPCODE_AE_MULFQ32SP16U_H,
+ OPCODE_AE_MULQ32SP16S_L,
+ OPCODE_AE_MULQ32SP16S_H,
+ OPCODE_AE_MULQ32SP16U_L,
+ OPCODE_AE_MULQ32SP16U_H,
+ OPCODE_AE_MULAFQ32SP16S_L,
+ OPCODE_AE_MULAFQ32SP16S_H,
+ OPCODE_AE_MULAFQ32SP16U_L,
+ OPCODE_AE_MULAFQ32SP16U_H,
+ OPCODE_AE_MULAQ32SP16S_L,
+ OPCODE_AE_MULAQ32SP16S_H,
+ OPCODE_AE_MULAQ32SP16U_L,
+ OPCODE_AE_MULAQ32SP16U_H,
+ OPCODE_AE_MULSFQ32SP16S_L,
+ OPCODE_AE_MULSFQ32SP16S_H,
+ OPCODE_AE_MULSFQ32SP16U_L,
+ OPCODE_AE_MULSFQ32SP16U_H,
+ OPCODE_AE_MULSQ32SP16S_L,
+ OPCODE_AE_MULSQ32SP16S_H,
+ OPCODE_AE_MULSQ32SP16U_L,
+ OPCODE_AE_MULSQ32SP16U_H,
+ OPCODE_AE_MULZAAQ32SP16S_LL,
+ OPCODE_AE_MULZAAFQ32SP16S_LL,
+ OPCODE_AE_MULZAAQ32SP16U_LL,
+ OPCODE_AE_MULZAAFQ32SP16U_LL,
+ OPCODE_AE_MULZAAQ32SP16S_HH,
+ OPCODE_AE_MULZAAFQ32SP16S_HH,
+ OPCODE_AE_MULZAAQ32SP16U_HH,
+ OPCODE_AE_MULZAAFQ32SP16U_HH,
+ OPCODE_AE_MULZAAQ32SP16S_LH,
+ OPCODE_AE_MULZAAFQ32SP16S_LH,
+ OPCODE_AE_MULZAAQ32SP16U_LH,
+ OPCODE_AE_MULZAAFQ32SP16U_LH,
+ OPCODE_AE_MULZASQ32SP16S_LL,
+ OPCODE_AE_MULZASFQ32SP16S_LL,
+ OPCODE_AE_MULZASQ32SP16U_LL,
+ OPCODE_AE_MULZASFQ32SP16U_LL,
+ OPCODE_AE_MULZASQ32SP16S_HH,
+ OPCODE_AE_MULZASFQ32SP16S_HH,
+ OPCODE_AE_MULZASQ32SP16U_HH,
+ OPCODE_AE_MULZASFQ32SP16U_HH,
+ OPCODE_AE_MULZASQ32SP16S_LH,
+ OPCODE_AE_MULZASFQ32SP16S_LH,
+ OPCODE_AE_MULZASQ32SP16U_LH,
+ OPCODE_AE_MULZASFQ32SP16U_LH,
+ OPCODE_AE_MULZSAQ32SP16S_LL,
+ OPCODE_AE_MULZSAFQ32SP16S_LL,
+ OPCODE_AE_MULZSAQ32SP16U_LL,
+ OPCODE_AE_MULZSAFQ32SP16U_LL,
+ OPCODE_AE_MULZSAQ32SP16S_HH,
+ OPCODE_AE_MULZSAFQ32SP16S_HH,
+ OPCODE_AE_MULZSAQ32SP16U_HH,
+ OPCODE_AE_MULZSAFQ32SP16U_HH,
+ OPCODE_AE_MULZSAQ32SP16S_LH,
+ OPCODE_AE_MULZSAFQ32SP16S_LH,
+ OPCODE_AE_MULZSAQ32SP16U_LH,
+ OPCODE_AE_MULZSAFQ32SP16U_LH,
+ OPCODE_AE_MULZSSQ32SP16S_LL,
+ OPCODE_AE_MULZSSFQ32SP16S_LL,
+ OPCODE_AE_MULZSSQ32SP16U_LL,
+ OPCODE_AE_MULZSSFQ32SP16U_LL,
+ OPCODE_AE_MULZSSQ32SP16S_HH,
+ OPCODE_AE_MULZSSFQ32SP16S_HH,
+ OPCODE_AE_MULZSSQ32SP16U_HH,
+ OPCODE_AE_MULZSSFQ32SP16U_HH,
+ OPCODE_AE_MULZSSQ32SP16S_LH,
+ OPCODE_AE_MULZSSFQ32SP16S_LH,
+ OPCODE_AE_MULZSSQ32SP16U_LH,
+ OPCODE_AE_MULZSSFQ32SP16U_LH,
+ OPCODE_AE_MULZAAFP24S_HH_LL,
+ OPCODE_AE_MULZAAP24S_HH_LL,
+ OPCODE_AE_MULZAAFP24S_HL_LH,
+ OPCODE_AE_MULZAAP24S_HL_LH,
+ OPCODE_AE_MULZASFP24S_HH_LL,
+ OPCODE_AE_MULZASP24S_HH_LL,
+ OPCODE_AE_MULZASFP24S_HL_LH,
+ OPCODE_AE_MULZASP24S_HL_LH,
+ OPCODE_AE_MULZSAFP24S_HH_LL,
+ OPCODE_AE_MULZSAP24S_HH_LL,
+ OPCODE_AE_MULZSAFP24S_HL_LH,
+ OPCODE_AE_MULZSAP24S_HL_LH,
+ OPCODE_AE_MULZSSFP24S_HH_LL,
+ OPCODE_AE_MULZSSP24S_HH_LL,
+ OPCODE_AE_MULZSSFP24S_HL_LH,
+ OPCODE_AE_MULZSSP24S_HL_LH,
+ OPCODE_AE_MULAAFP24S_HH_LL,
+ OPCODE_AE_MULAAP24S_HH_LL,
+ OPCODE_AE_MULAAFP24S_HL_LH,
+ OPCODE_AE_MULAAP24S_HL_LH,
+ OPCODE_AE_MULASFP24S_HH_LL,
+ OPCODE_AE_MULASP24S_HH_LL,
+ OPCODE_AE_MULASFP24S_HL_LH,
+ OPCODE_AE_MULASP24S_HL_LH,
+ OPCODE_AE_MULSAFP24S_HH_LL,
+ OPCODE_AE_MULSAP24S_HH_LL,
+ OPCODE_AE_MULSAFP24S_HL_LH,
+ OPCODE_AE_MULSAP24S_HL_LH,
+ OPCODE_AE_MULSSFP24S_HH_LL,
+ OPCODE_AE_MULSSP24S_HH_LL,
+ OPCODE_AE_MULSSFP24S_HL_LH,
+ OPCODE_AE_MULSSP24S_HL_LH,
+ OPCODE_AE_SHA32,
+ OPCODE_AE_VLDL32T,
+ OPCODE_AE_VLDL16T,
+ OPCODE_AE_VLDL16C,
+ OPCODE_AE_VLDSHT,
+ OPCODE_AE_LB,
+ OPCODE_AE_LBI,
+ OPCODE_AE_LBK,
+ OPCODE_AE_LBKI,
+ OPCODE_AE_DB,
+ OPCODE_AE_DBI,
+ OPCODE_AE_VLEL32T,
+ OPCODE_AE_VLEL16T,
+ OPCODE_AE_SB,
+ OPCODE_AE_SBI,
+ OPCODE_AE_VLES16C,
+ OPCODE_AE_SBF
+};
+
+
+/* Slot-specific opcode decode functions. */
+
+static int
+Slot_inst_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 0)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_ILL;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_RET;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_RETW;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_JX;
+ }
+ if (Field_m_Slot_inst_get (insn) == 3)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALLX0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALLX4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALLX8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALLX12;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_MOVSP;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_ISYNC;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RSYNC;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_ESYNC;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DSYNC;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ return OPCODE_EXCW;
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_MEMW;
+ if (Field_t_Slot_inst_get (insn) == 13)
+ return OPCODE_EXTW;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_NOP;
+ }
+ }
+ if (Field_r_Slot_inst_get (insn) == 3)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_RFE;
+ if (Field_s_Slot_inst_get (insn) == 2)
+ return OPCODE_RFDE;
+ if (Field_s_Slot_inst_get (insn) == 4)
+ return OPCODE_RFWO;
+ if (Field_s_Slot_inst_get (insn) == 5)
+ return OPCODE_RFWU;
+ }
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFI;
+ }
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BREAK;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SYSCALL;
+ if (Field_s_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SIMCALL;
+ }
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RSIL;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_WAITI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_ANY4;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_ALL4;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_ANY8;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_ALL8;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_OR;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_XOR;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_thi3_Slot_inst_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_RER;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_WER;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_ROTW;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_NSA;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_NSAU;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ {
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_HWWITLBA;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_RITLB0;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IITLB;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_PITLB;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_WITLB;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ return OPCODE_RITLB1;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_HWWDTLBA;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_RDTLB0;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_IDTLB;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_PDTLB;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_WDTLB;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_RDTLB1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_NEG;
+ if (Field_s_Slot_inst_get (insn) == 1)
+ return OPCODE_ABS;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_ADD;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_ADDX2;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_ADDX4;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_ADDX8;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_SUB;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_SUBX2;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_SUBX4;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_SUBX8;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 1)
+ {
+ if ((Field_op2_Slot_inst_get (insn) == 0 ||
+ Field_op2_Slot_inst_get (insn) == 1))
+ return OPCODE_SLLI;
+ if ((Field_op2_Slot_inst_get (insn) == 2 ||
+ Field_op2_Slot_inst_get (insn) == 3))
+ return OPCODE_SRAI;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_SRLI;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_XSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_XSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_XSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_XSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_XSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_XSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_XSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_XSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_XSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_XSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_XSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_XSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_XSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_XSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_XSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_XSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_XSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_XSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_XSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_XSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_XSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_XSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_XSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_XSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_XSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_XSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_XSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_XSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_XSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_XSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_XSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_XSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_XSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_XSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_XSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_SRC;
+ if (Field_op2_Slot_inst_get (insn) == 9 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRL;
+ if (Field_op2_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_SLL;
+ if (Field_op2_Slot_inst_get (insn) == 11 &&
+ Field_s_Slot_inst_get (insn) == 0)
+ return OPCODE_SRA;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MUL16U;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MUL16S;
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_LICT;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_SICT;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_LICW;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_SICW;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LDCT;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_SDCT;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_RFDO;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_RFDD;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_LDPTE;
+ }
+ }
+ if (Field_op1_Slot_inst_get (insn) == 2)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_ANDB;
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ return OPCODE_ANDBC;
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_ORB;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_ORBC;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_XORB;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MULL;
+ }
+ if (Field_op1_Slot_inst_get (insn) == 3)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_RSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_RSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_RSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_RSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_RSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_RSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_RSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_RSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_RSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_RSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_RSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_RSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_RSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_RSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_RSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_RSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_RSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_RSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_RSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_RSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 208)
+ return OPCODE_RSR_CONFIGID1;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_RSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_RSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_RSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_RSR_INTERRUPT;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_RSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_RSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_RSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_RSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_RSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_RSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 235)
+ return OPCODE_RSR_PRID;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_RSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_RSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_RSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_RSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_RSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_RSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_RSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 1)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 0)
+ return OPCODE_WSR_LBEG;
+ if (Field_sr_Slot_inst_get (insn) == 1)
+ return OPCODE_WSR_LEND;
+ if (Field_sr_Slot_inst_get (insn) == 2)
+ return OPCODE_WSR_LCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 3)
+ return OPCODE_WSR_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 4)
+ return OPCODE_WSR_BR;
+ if (Field_sr_Slot_inst_get (insn) == 5)
+ return OPCODE_WSR_LITBASE;
+ if (Field_sr_Slot_inst_get (insn) == 12)
+ return OPCODE_WSR_SCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 72)
+ return OPCODE_WSR_WINDOWBASE;
+ if (Field_sr_Slot_inst_get (insn) == 73)
+ return OPCODE_WSR_WINDOWSTART;
+ if (Field_sr_Slot_inst_get (insn) == 83)
+ return OPCODE_WSR_PTEVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 90)
+ return OPCODE_WSR_RASID;
+ if (Field_sr_Slot_inst_get (insn) == 91)
+ return OPCODE_WSR_ITLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 92)
+ return OPCODE_WSR_DTLBCFG;
+ if (Field_sr_Slot_inst_get (insn) == 99)
+ return OPCODE_WSR_ATOMCTL;
+ if (Field_sr_Slot_inst_get (insn) == 104)
+ return OPCODE_WSR_DDR;
+ if (Field_sr_Slot_inst_get (insn) == 176)
+ return OPCODE_WSR_CONFIGID0;
+ if (Field_sr_Slot_inst_get (insn) == 177)
+ return OPCODE_WSR_EPC1;
+ if (Field_sr_Slot_inst_get (insn) == 178)
+ return OPCODE_WSR_EPC2;
+ if (Field_sr_Slot_inst_get (insn) == 192)
+ return OPCODE_WSR_DEPC;
+ if (Field_sr_Slot_inst_get (insn) == 194)
+ return OPCODE_WSR_EPS2;
+ if (Field_sr_Slot_inst_get (insn) == 209)
+ return OPCODE_WSR_EXCSAVE1;
+ if (Field_sr_Slot_inst_get (insn) == 210)
+ return OPCODE_WSR_EXCSAVE2;
+ if (Field_sr_Slot_inst_get (insn) == 224)
+ return OPCODE_WSR_CPENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 226)
+ return OPCODE_WSR_INTSET;
+ if (Field_sr_Slot_inst_get (insn) == 227)
+ return OPCODE_WSR_INTCLEAR;
+ if (Field_sr_Slot_inst_get (insn) == 228)
+ return OPCODE_WSR_INTENABLE;
+ if (Field_sr_Slot_inst_get (insn) == 230)
+ return OPCODE_WSR_PS;
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WSR_VECBASE;
+ if (Field_sr_Slot_inst_get (insn) == 232)
+ return OPCODE_WSR_EXCCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 233)
+ return OPCODE_WSR_DEBUGCAUSE;
+ if (Field_sr_Slot_inst_get (insn) == 234)
+ return OPCODE_WSR_CCOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 236)
+ return OPCODE_WSR_ICOUNT;
+ if (Field_sr_Slot_inst_get (insn) == 237)
+ return OPCODE_WSR_ICOUNTLEVEL;
+ if (Field_sr_Slot_inst_get (insn) == 238)
+ return OPCODE_WSR_EXCVADDR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WSR_CCOMPARE0;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WSR_CCOMPARE1;
+ if (Field_sr_Slot_inst_get (insn) == 244)
+ return OPCODE_WSR_MISC0;
+ if (Field_sr_Slot_inst_get (insn) == 245)
+ return OPCODE_WSR_MISC1;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 2)
+ return OPCODE_SEXT;
+ if (Field_op2_Slot_inst_get (insn) == 3)
+ return OPCODE_CLAMPS;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_MIN;
+ if (Field_op2_Slot_inst_get (insn) == 5)
+ return OPCODE_MAX;
+ if (Field_op2_Slot_inst_get (insn) == 6)
+ return OPCODE_MINU;
+ if (Field_op2_Slot_inst_get (insn) == 7)
+ return OPCODE_MAXU;
+ if (Field_op2_Slot_inst_get (insn) == 8)
+ return OPCODE_MOVEQZ;
+ if (Field_op2_Slot_inst_get (insn) == 9)
+ return OPCODE_MOVNEZ;
+ if (Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVLTZ;
+ if (Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_MOVGEZ;
+ if (Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_MOVF;
+ if (Field_op2_Slot_inst_get (insn) == 13)
+ return OPCODE_MOVT;
+ if (Field_op2_Slot_inst_get (insn) == 14)
+ {
+ if (Field_st_Slot_inst_get (insn) == 231)
+ return OPCODE_RUR_THREADPTR;
+ if (Field_st_Slot_inst_get (insn) == 240)
+ return OPCODE_RUR_AE_OVF_SAR;
+ if (Field_st_Slot_inst_get (insn) == 241)
+ return OPCODE_RUR_AE_BITHEAD;
+ if (Field_st_Slot_inst_get (insn) == 242)
+ return OPCODE_RUR_AE_TS_FTS_BU_BP;
+ if (Field_st_Slot_inst_get (insn) == 243)
+ return OPCODE_RUR_AE_SD_NO;
+ }
+ if (Field_op2_Slot_inst_get (insn) == 15)
+ {
+ if (Field_sr_Slot_inst_get (insn) == 231)
+ return OPCODE_WUR_THREADPTR;
+ if (Field_sr_Slot_inst_get (insn) == 240)
+ return OPCODE_WUR_AE_OVF_SAR;
+ if (Field_sr_Slot_inst_get (insn) == 241)
+ return OPCODE_WUR_AE_BITHEAD;
+ if (Field_sr_Slot_inst_get (insn) == 242)
+ return OPCODE_WUR_AE_TS_FTS_BU_BP;
+ if (Field_sr_Slot_inst_get (insn) == 243)
+ return OPCODE_WUR_AE_SD_NO;
+ }
+ }
+ if ((Field_op1_Slot_inst_get (insn) == 4 ||
+ Field_op1_Slot_inst_get (insn) == 5))
+ return OPCODE_EXTUI;
+ if (Field_op1_Slot_inst_get (insn) == 9)
+ {
+ if (Field_op2_Slot_inst_get (insn) == 0)
+ return OPCODE_L32E;
+ if (Field_op2_Slot_inst_get (insn) == 4)
+ return OPCODE_S32E;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 1)
+ return OPCODE_L32R;
+ if (Field_op0_Slot_inst_get (insn) == 2)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_L32I;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_S16I;
+ if (Field_r_Slot_inst_get (insn) == 6)
+ return OPCODE_S32I;
+ if (Field_r_Slot_inst_get (insn) == 7)
+ {
+ if (Field_t_Slot_inst_get (insn) == 0)
+ return OPCODE_DPFR;
+ if (Field_t_Slot_inst_get (insn) == 1)
+ return OPCODE_DPFW;
+ if (Field_t_Slot_inst_get (insn) == 2)
+ return OPCODE_DPFRO;
+ if (Field_t_Slot_inst_get (insn) == 3)
+ return OPCODE_DPFWO;
+ if (Field_t_Slot_inst_get (insn) == 4)
+ return OPCODE_DHWB;
+ if (Field_t_Slot_inst_get (insn) == 5)
+ return OPCODE_DHWBI;
+ if (Field_t_Slot_inst_get (insn) == 6)
+ return OPCODE_DHI;
+ if (Field_t_Slot_inst_get (insn) == 7)
+ return OPCODE_DII;
+ if (Field_t_Slot_inst_get (insn) == 8)
+ {
+ if (Field_op1_Slot_inst_get (insn) == 4)
+ return OPCODE_DIWB;
+ if (Field_op1_Slot_inst_get (insn) == 5)
+ return OPCODE_DIWBI;
+ }
+ if (Field_t_Slot_inst_get (insn) == 12)
+ return OPCODE_IPF;
+ if (Field_t_Slot_inst_get (insn) == 14)
+ return OPCODE_IHI;
+ if (Field_t_Slot_inst_get (insn) == 15)
+ return OPCODE_III;
+ }
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_MOVI;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_L32AI;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_inst_get (insn) == 14)
+ return OPCODE_S32C1I;
+ if (Field_r_Slot_inst_get (insn) == 15)
+ return OPCODE_S32RI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 4)
+ {
+ if (Field_ae_r10_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_I;
+ if (Field_ae_r10_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_X;
+ if (Field_ae_r10_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_I;
+ if (Field_ae_r10_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_X;
+ if (Field_ae_r10_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_IU;
+ if (Field_ae_r10_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ56_XU;
+ if (Field_ae_r10_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_CVTQ48A32S;
+ if (Field_ae_r10_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_IU;
+ if (Field_ae_r10_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LQ32F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP24F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP24X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24S_L_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_ae_s3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVP48;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVPA24X2;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_CVTA32P24_L;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_LL;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_HL;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVAP24S_L;
+ if (Field_ae_r3_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_TRUNCA16P24S_L;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16X2F_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16X2F_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP16X2F_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP16X2F_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LP24X2_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_LP24X2_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24X2S_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 12 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP16F_L_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_I;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_IU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_X;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 13 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_SP24F_L_XU;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_TRUNCP24A32X2;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 11 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_CVTA32P24_H;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 14 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_LH;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 15 &&
+ Field_op2_Slot_inst_get (insn) == 11)
+ return OPCODE_AE_CVTP24A16X2_HH;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVAP24S_H;
+ if (Field_ae_r3_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 8 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_TRUNCA16P24S_H;
+ if (Field_ae_r32_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_I;
+ if (Field_ae_r32_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_X;
+ if (Field_ae_r32_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_TRUNCA32Q48;
+ if (Field_ae_r32_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_I;
+ if (Field_ae_r32_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_X;
+ if (Field_ae_r32_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_NSAQ56S;
+ if (Field_ae_r32_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_IU;
+ if (Field_ae_r32_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ56S_XU;
+ if (Field_ae_r32_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_IU;
+ if (Field_ae_r32_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SQ32F_XU;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLIQ56;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRLIQ56;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 2 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRAIQ56;
+ if (Field_ae_s_non_samt_Slot_inst_get (insn) == 3 &&
+ Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLISQ56S;
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SHA32;
+ if (Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLDL32T;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SLLAQ56;
+ if (Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLDL16T;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SRLAQ56;
+ if (Field_op1_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_LBK;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SRAAQ56;
+ if (Field_op1_Slot_inst_get (insn) == 3 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLEL32T;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SLLASQ56S;
+ if (Field_op1_Slot_inst_get (insn) == 4 &&
+ Field_op2_Slot_inst_get (insn) == 10)
+ return OPCODE_AE_VLEL16T;
+ if (Field_op1_Slot_inst_get (insn) == 5 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_MOVTQ56;
+ if (Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_MOVFQ56;
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_OVERFLOW;
+ if (Field_r_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_SBI;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_SAR;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_DB;
+ if (Field_r_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_SB;
+ if (Field_r_Slot_inst_get (insn) == 2 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_BITPTR;
+ if (Field_r_Slot_inst_get (insn) == 3 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_BITSUSED;
+ if (Field_r_Slot_inst_get (insn) == 4 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_TABLESIZE;
+ if (Field_r_Slot_inst_get (insn) == 5 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_FIRST_TS;
+ if (Field_r_Slot_inst_get (insn) == 6 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_NEXTOFFSET;
+ if (Field_r_Slot_inst_get (insn) == 7 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_WUR_AE_SEARCHDONE;
+ if (Field_r_Slot_inst_get (insn) == 8 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 10 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_VLDSHT;
+ if (Field_r_Slot_inst_get (insn) == 12 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_VLES16C;
+ if (Field_r_Slot_inst_get (insn) == 13 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_SBF;
+ if (Field_r_Slot_inst_get (insn) == 14 &&
+ Field_op1_Slot_inst_get (insn) == 7 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_VLDL16C;
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLSQ56;
+ if (Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 6 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_LB;
+ if (Field_s_Slot_inst_get (insn) == 1 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRLSQ56;
+ if (Field_s_Slot_inst_get (insn) == 2 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SRASQ56;
+ if (Field_s_Slot_inst_get (insn) == 3 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_SLLSSQ56S;
+ if (Field_s_Slot_inst_get (insn) == 4 &&
+ Field_t_Slot_inst_get (insn) == 1 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_AE_MOVQ56;
+ if (Field_s_Slot_inst_get (insn) == 8 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_OVERFLOW;
+ if (Field_s_Slot_inst_get (insn) == 9 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_SAR;
+ if (Field_s_Slot_inst_get (insn) == 10 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_BITPTR;
+ if (Field_s_Slot_inst_get (insn) == 11 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_BITSUSED;
+ if (Field_s_Slot_inst_get (insn) == 12 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_TABLESIZE;
+ if (Field_s_Slot_inst_get (insn) == 13 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_FIRST_TS;
+ if (Field_s_Slot_inst_get (insn) == 14 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_NEXTOFFSET;
+ if (Field_s_Slot_inst_get (insn) == 15 &&
+ Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op1_Slot_inst_get (insn) == 9 &&
+ Field_op2_Slot_inst_get (insn) == 12)
+ return OPCODE_RUR_AE_SEARCHDONE;
+ if (Field_t_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_LBKI;
+ if (Field_t_Slot_inst_get (insn) == 0 &&
+ Field_r_Slot_inst_get (insn) == 2 &&
+ Field_op2_Slot_inst_get (insn) == 15)
+ return OPCODE_AE_DBI;
+ if (Field_t_Slot_inst_get (insn) == 2 &&
+ Field_s_Slot_inst_get (insn) == 0 &&
+ Field_op2_Slot_inst_get (insn) == 14)
+ return OPCODE_AE_LBI;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 5)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_CALL0;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ return OPCODE_CALL4;
+ if (Field_n_Slot_inst_get (insn) == 2)
+ return OPCODE_CALL8;
+ if (Field_n_Slot_inst_get (insn) == 3)
+ return OPCODE_CALL12;
+ }
+ if (Field_op0_Slot_inst_get (insn) == 6)
+ {
+ if (Field_n_Slot_inst_get (insn) == 0)
+ return OPCODE_J;
+ if (Field_n_Slot_inst_get (insn) == 1)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQZ;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTZ;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEZ;
+ }
+ if (Field_n_Slot_inst_get (insn) == 2)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_BEQI;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ return OPCODE_BNEI;
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEI;
+ }
+ if (Field_n_Slot_inst_get (insn) == 3)
+ {
+ if (Field_m_Slot_inst_get (insn) == 0)
+ return OPCODE_ENTRY;
+ if (Field_m_Slot_inst_get (insn) == 1)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BF;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BT;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_LOOP;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_LOOPNEZ;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_LOOPGTZ;
+ }
+ if (Field_m_Slot_inst_get (insn) == 2)
+ return OPCODE_BLTUI;
+ if (Field_m_Slot_inst_get (insn) == 3)
+ return OPCODE_BGEUI;
+ }
+ }
+ if (Field_op0_Slot_inst_get (insn) == 7)
+ {
+ if (Field_r_Slot_inst_get (insn) == 0)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_inst_get (insn) == 1)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_inst_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_inst_get (insn) == 3)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_inst_get (insn) == 4)
+ return OPCODE_BALL;
+ if (Field_r_Slot_inst_get (insn) == 5)
+ return OPCODE_BBC;
+ if ((Field_r_Slot_inst_get (insn) == 6 ||
+ Field_r_Slot_inst_get (insn) == 7))
+ return OPCODE_BBCI;
+ if (Field_r_Slot_inst_get (insn) == 8)
+ return OPCODE_BANY;
+ if (Field_r_Slot_inst_get (insn) == 9)
+ return OPCODE_BNE;
+ if (Field_r_Slot_inst_get (insn) == 10)
+ return OPCODE_BGE;
+ if (Field_r_Slot_inst_get (insn) == 11)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_inst_get (insn) == 12)
+ return OPCODE_BNALL;
+ if (Field_r_Slot_inst_get (insn) == 13)
+ return OPCODE_BBS;
+ if ((Field_r_Slot_inst_get (insn) == 14 ||
+ Field_r_Slot_inst_get (insn) == 15))
+ return OPCODE_BBSI;
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16b_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16b_get (insn) == 12)
+ {
+ if (Field_i_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOVI_N;
+ if (Field_i_Slot_inst16b_get (insn) == 1)
+ {
+ if (Field_z_Slot_inst16b_get (insn) == 0)
+ return OPCODE_BEQZ_N;
+ if (Field_z_Slot_inst16b_get (insn) == 1)
+ return OPCODE_BNEZ_N;
+ }
+ }
+ if (Field_op0_Slot_inst16b_get (insn) == 13)
+ {
+ if (Field_r_Slot_inst16b_get (insn) == 0)
+ return OPCODE_MOV_N;
+ if (Field_r_Slot_inst16b_get (insn) == 15)
+ {
+ if (Field_t_Slot_inst16b_get (insn) == 0)
+ return OPCODE_RET_N;
+ if (Field_t_Slot_inst16b_get (insn) == 1)
+ return OPCODE_RETW_N;
+ if (Field_t_Slot_inst16b_get (insn) == 2)
+ return OPCODE_BREAK_N;
+ if (Field_t_Slot_inst16b_get (insn) == 3 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_NOP_N;
+ if (Field_t_Slot_inst16b_get (insn) == 6 &&
+ Field_s_Slot_inst16b_get (insn) == 0)
+ return OPCODE_ILL_N;
+ }
+ }
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_inst16a_decode (const xtensa_insnbuf insn)
+{
+ if (Field_op0_Slot_inst16a_get (insn) == 8)
+ return OPCODE_L32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 9)
+ return OPCODE_S32I_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 10)
+ return OPCODE_ADD_N;
+ if (Field_op0_Slot_inst16a_get (insn) == 11)
+ return OPCODE_ADDI_N;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_ae_slot0_decode (const xtensa_insnbuf insn)
+{
+ if (Field_ftsf212ae_slot0_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_J;
+ if (Field_ftsf213ae_slot0_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_EXTUI;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 6 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BGEZ;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 7 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BLTZ;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 8 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BEQZ;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 9 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_BNEZ;
+ if (Field_ftsf214ae_slot0_Slot_ae_slot0_get (insn) == 10 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVI;
+ if (Field_ftsf215ae_slot0_Slot_ae_slot0_get (insn) == 88 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRAI;
+ if (Field_ftsf215ae_slot0_Slot_ae_slot0_get (insn) == 96 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SLLI;
+ if (Field_ftsf215ae_slot0_Slot_ae_slot0_get (insn) == 123 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf364ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVTQ56;
+ if (Field_ftsf216ae_slot0_Slot_ae_slot0_get (insn) == 418 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_HH;
+ if (Field_ftsf217_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4 &&
+ Field_ae_r20_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_L32I;
+ if (Field_ftsf218ae_slot0_Slot_ae_slot0_get (insn) == 419 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_I;
+ if (Field_ftsf219ae_slot0_Slot_ae_slot0_get (insn) == 420 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_HL;
+ if (Field_ftsf220ae_slot0_Slot_ae_slot0_get (insn) == 421 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_IU;
+ if (Field_ftsf221ae_slot0_Slot_ae_slot0_get (insn) == 422 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_X;
+ if (Field_ftsf222ae_slot0_Slot_ae_slot0_get (insn) == 423 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16F_XU;
+ if (Field_ftsf223ae_slot0_Slot_ae_slot0_get (insn) == 424 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_LH;
+ if (Field_ftsf224ae_slot0_Slot_ae_slot0_get (insn) == 425 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_I;
+ if (Field_ftsf225ae_slot0_Slot_ae_slot0_get (insn) == 426 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_IU;
+ if (Field_ftsf226ae_slot0_Slot_ae_slot0_get (insn) == 427 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_XU;
+ if (Field_ftsf227ae_slot0_Slot_ae_slot0_get (insn) == 428 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP16X2F_X;
+ if (Field_ftsf228ae_slot0_Slot_ae_slot0_get (insn) == 429 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_I;
+ if (Field_ftsf229ae_slot0_Slot_ae_slot0_get (insn) == 430 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_IU;
+ if (Field_ftsf230ae_slot0_Slot_ae_slot0_get (insn) == 431 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_X;
+ if (Field_ftsf231ae_slot0_Slot_ae_slot0_get (insn) == 432 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTP24A16X2_LL;
+ if (Field_ftsf232ae_slot0_Slot_ae_slot0_get (insn) == 433 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24_XU;
+ if (Field_ftsf233ae_slot0_Slot_ae_slot0_get (insn) == 434 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_I;
+ if (Field_ftsf234ae_slot0_Slot_ae_slot0_get (insn) == 435 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_XU;
+ if (Field_ftsf235ae_slot0_Slot_ae_slot0_get (insn) == 436 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_IU;
+ if (Field_ftsf236ae_slot0_Slot_ae_slot0_get (insn) == 437 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_I;
+ if (Field_ftsf237ae_slot0_Slot_ae_slot0_get (insn) == 438 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_IU;
+ if (Field_ftsf238ae_slot0_Slot_ae_slot0_get (insn) == 439 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_X;
+ if (Field_ftsf239ae_slot0_Slot_ae_slot0_get (insn) == 440 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24F_X;
+ if (Field_ftsf240ae_slot0_Slot_ae_slot0_get (insn) == 441 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2_XU;
+ if (Field_ftsf241ae_slot0_Slot_ae_slot0_get (insn) == 442 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_I;
+ if (Field_ftsf242ae_slot0_Slot_ae_slot0_get (insn) == 443 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_X;
+ if (Field_ftsf243ae_slot0_Slot_ae_slot0_get (insn) == 444 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_IU;
+ if (Field_ftsf244ae_slot0_Slot_ae_slot0_get (insn) == 445 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LP24X2F_XU;
+ if (Field_ftsf245ae_slot0_Slot_ae_slot0_get (insn) == 446 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVPA24X2;
+ if (Field_ftsf246ae_slot0_Slot_ae_slot0_get (insn) == 447 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_I;
+ if (Field_ftsf247ae_slot0_Slot_ae_slot0_get (insn) == 450 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_IU;
+ if (Field_ftsf248ae_slot0_Slot_ae_slot0_get (insn) == 451 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_X;
+ if (Field_ftsf249ae_slot0_Slot_ae_slot0_get (insn) == 452 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_X;
+ if (Field_ftsf250ae_slot0_Slot_ae_slot0_get (insn) == 453 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_XU;
+ if (Field_ftsf251ae_slot0_Slot_ae_slot0_get (insn) == 454 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_I;
+ if (Field_ftsf252ae_slot0_Slot_ae_slot0_get (insn) == 455 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_IU;
+ if (Field_ftsf253ae_slot0_Slot_ae_slot0_get (insn) == 456 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16F_L_XU;
+ if (Field_ftsf254ae_slot0_Slot_ae_slot0_get (insn) == 457 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_X;
+ if (Field_ftsf255ae_slot0_Slot_ae_slot0_get (insn) == 458 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24F_L_XU;
+ if (Field_ftsf256ae_slot0_Slot_ae_slot0_get (insn) == 459 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_IU;
+ if (Field_ftsf257ae_slot0_Slot_ae_slot0_get (insn) == 460 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_I;
+ if (Field_ftsf258ae_slot0_Slot_ae_slot0_get (insn) == 461 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_X;
+ if (Field_ftsf259ae_slot0_Slot_ae_slot0_get (insn) == 462 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24S_L_XU;
+ if (Field_ftsf260ae_slot0_Slot_ae_slot0_get (insn) == 463 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_I;
+ if (Field_ftsf261ae_slot0_Slot_ae_slot0_get (insn) == 464 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_I;
+ if (Field_ftsf262ae_slot0_Slot_ae_slot0_get (insn) == 465 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_IU;
+ if (Field_ftsf263ae_slot0_Slot_ae_slot0_get (insn) == 466 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_X;
+ if (Field_ftsf264ae_slot0_Slot_ae_slot0_get (insn) == 467 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_IU;
+ if (Field_ftsf265ae_slot0_Slot_ae_slot0_get (insn) == 468 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2F_XU;
+ if (Field_ftsf266ae_slot0_Slot_ae_slot0_get (insn) == 469 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_X;
+ if (Field_ftsf267ae_slot0_Slot_ae_slot0_get (insn) == 470 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_XU;
+ if (Field_ftsf268ae_slot0_Slot_ae_slot0_get (insn) == 471 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_TRUNCP24A32X2;
+ if (Field_ftsf269ae_slot0_Slot_ae_slot0_get (insn) == 472 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP24X2S_I;
+ if (Field_ftsf270ae_slot0_Slot_ae_slot0_get (insn) == 946 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ32F_I;
+ if (Field_ftsf271ae_slot0_Slot_ae_slot0_get (insn) == 947 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ32F_IU;
+ if (Field_ftsf272ae_slot0_Slot_ae_slot0_get (insn) == 948 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ32F_I;
+ if (Field_ftsf273ae_slot0_Slot_ae_slot0_get (insn) == 949 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ32F_X;
+ if (Field_ftsf274ae_slot0_Slot_ae_slot0_get (insn) == 950 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ32F_XU;
+ if (Field_ftsf275ae_slot0_Slot_ae_slot0_get (insn) == 951 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ56_I;
+ if (Field_ftsf276ae_slot0_Slot_ae_slot0_get (insn) == 952 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ32F_IU;
+ if (Field_ftsf277ae_slot0_Slot_ae_slot0_get (insn) == 953 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ56_IU;
+ if (Field_ftsf278ae_slot0_Slot_ae_slot0_get (insn) == 954 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_LQ56_X;
+ if (Field_ftsf279ae_slot0_Slot_ae_slot0_get (insn) == 15280 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTQ48A32S;
+ if (Field_ftsf281ae_slot0_Slot_ae_slot0_get (insn) == 60977 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_JX;
+ if (Field_ftsf282ae_slot0_Slot_ae_slot0_get (insn) == 61041 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SSR;
+ if (Field_ftsf283ae_slot0_Slot_ae_slot0_get (insn) == 30577 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf352ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_ftsf284ae_slot0_Slot_ae_slot0_get (insn) == 7641 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf354ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSA8B;
+ if (Field_ftsf286ae_slot0_Slot_ae_slot0_get (insn) == 3821 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf356ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSA8L;
+ if (Field_ftsf288ae_slot0_Slot_ae_slot0_get (insn) == 1911 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf359ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSL;
+ if (Field_ftsf290ae_slot0_Slot_ae_slot0_get (insn) == 478 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s8_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_LQ56_XU;
+ if (Field_ftsf292ae_slot0_Slot_ae_slot0_get (insn) == 1913 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ALL8;
+ if (Field_ftsf293_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBCI;
+ if (Field_ftsf293_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBSI;
+ if (Field_ftsf294ae_slot0_Slot_ae_slot0_get (insn) == 1915 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_s_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ANY8;
+ if (Field_ftsf295ae_slot0_Slot_ae_slot0_get (insn) == 959 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf358ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_SSAI;
+ if (Field_ftsf296ae_slot0_Slot_ae_slot0_get (insn) == 480 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SP16X2F_IU;
+ if (Field_ftsf297ae_slot0_Slot_ae_slot0_get (insn) == 962 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_I;
+ if (Field_ftsf298ae_slot0_Slot_ae_slot0_get (insn) == 963 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_IU;
+ if (Field_ftsf299ae_slot0_Slot_ae_slot0_get (insn) == 964 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLIQ56;
+ if (Field_ftsf299ae_slot0_Slot_ae_slot0_get (insn) == 965 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SRAIQ56;
+ if (Field_ftsf299ae_slot0_Slot_ae_slot0_get (insn) == 966 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SRLIQ56;
+ if (Field_ftsf299ae_slot0_Slot_ae_slot0_get (insn) == 968 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLISQ56S;
+ if (Field_ftsf300ae_slot0_Slot_ae_slot0_get (insn) == 3868 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ABS;
+ if (Field_ftsf300ae_slot0_Slot_ae_slot0_get (insn) == 3869 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_NEG;
+ if (Field_ftsf300ae_slot0_Slot_ae_slot0_get (insn) == 3870 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRA;
+ if (Field_ftsf300ae_slot0_Slot_ae_slot0_get (insn) == 3871 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRL;
+ if (Field_ftsf301ae_slot0_Slot_ae_slot0_get (insn) == 7752 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf321_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVP48;
+ if (Field_ftsf301ae_slot0_Slot_ae_slot0_get (insn) == 7753 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf353_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ANY4;
+ if (Field_ftsf302ae_slot0_Slot_ae_slot0_get (insn) == 31016 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf321_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_MOVQ56;
+ if (Field_ftsf303ae_slot0_Slot_ae_slot0_get (insn) == 31017 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf321_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SLLSSQ56S;
+ if (Field_ftsf304ae_slot0_Slot_ae_slot0_get (insn) == 15509 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf369ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRASQ56;
+ if (Field_ftsf306ae_slot0_Slot_ae_slot0_get (insn) == 7755 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf368ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRLSQ56;
+ if (Field_ftsf308ae_slot0_Slot_ae_slot0_get (insn) == 1939 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf366ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SLLSQ56;
+ if (Field_ftsf309ae_slot0_Slot_ae_slot0_get (insn) == 485 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf360ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_ALL4;
+ if (Field_ftsf310ae_slot0_Slot_ae_slot0_get (insn) == 972 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_X;
+ if (Field_ftsf311ae_slot0_Slot_ae_slot0_get (insn) == 973 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SQ56S_XU;
+ if (Field_ftsf312ae_slot0_Slot_ae_slot0_get (insn) == 7792 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTA32P24_H;
+ if (Field_ftsf313ae_slot0_Slot_ae_slot0_get (insn) == 7793 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_CVTA32P24_L;
+ if (Field_ftsf314ae_slot0_Slot_ae_slot0_get (insn) == 7794 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVAP24S_H;
+ if (Field_ftsf315ae_slot0_Slot_ae_slot0_get (insn) == 7795 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_TRUNCA16P24S_L;
+ if (Field_ftsf316ae_slot0_Slot_ae_slot0_get (insn) == 7796 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVAP24S_L;
+ if (Field_ftsf317ae_slot0_Slot_ae_slot0_get (insn) == 7797 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf353_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_NSAQ56S;
+ if (Field_ftsf318ae_slot0_Slot_ae_slot0_get (insn) == 3899 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf365ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_TRUNCA32Q48;
+ if (Field_ftsf319_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3 &&
+ Field_ftsf361ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_BT;
+ if (Field_ftsf320ae_slot0_Slot_ae_slot0_get (insn) == 975 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_TRUNCA16P24S_H;
+ if (Field_ftsf321_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3 &&
+ Field_ae_s20_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_BLTUI;
+ if (Field_ftsf322ae_slot0_Slot_ae_slot0_get (insn) == 3920 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_MOVFQ56;
+ if (Field_ftsf323ae_slot0_Slot_ae_slot0_get (insn) == 3921 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLAQ56;
+ if (Field_ftsf324ae_slot0_Slot_ae_slot0_get (insn) == 3922 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AE_SLLASQ56S;
+ if (Field_ftsf325ae_slot0_Slot_ae_slot0_get (insn) == 3923 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SLL;
+ if (Field_ftsf326ae_slot0_Slot_ae_slot0_get (insn) == 981 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf357_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRAAQ56;
+ if (Field_ftsf328ae_slot0_Slot_ae_slot0_get (insn) == 491 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SRLAQ56;
+ if (Field_ftsf329ae_slot0_Slot_ae_slot0_get (insn) == 31 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ftsf362ae_slot0_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SQ32F_XU;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 178 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADD;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 179 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADDX8;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 180 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADDX2;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 181 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_AND;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 182 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ANDB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 183 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ANDBC;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 184 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ADDX4;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 185 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_CLAMPS;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 186 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MAX;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 187 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MIN;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 188 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MAXU;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 189 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MINU;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 190 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVEQZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 191 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVF;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 194 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVGEZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 195 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ORB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 196 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVLTZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 197 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_ORBC;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 198 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SEXT;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 199 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRC;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 200 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVNEZ;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 201 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SRLI;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 202 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 203 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUBX4;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 204 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUBX2;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 205 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_SUBX8;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 206 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_XOR;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 207 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_XORB;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 208 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_MOVT;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 224 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1)
+ return OPCODE_OR;
+ if (Field_imm8_Slot_ae_slot0_get (insn) == 244 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 1 &&
+ Field_ae_r32_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_AE_SQ32F_X;
+ if (Field_op0_s4_Slot_ae_slot0_get (insn) == 5)
+ return OPCODE_L32R;
+ if (Field_r_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_BNE;
+ if (Field_r_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_BNONE;
+ if (Field_r_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_L16SI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_L8UI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 4 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_ADDI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 4 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_L16UI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BALL;
+ if (Field_r_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_S16I;
+ if (Field_r_Slot_ae_slot0_get (insn) == 6 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BANY;
+ if (Field_r_Slot_ae_slot0_get (insn) == 6 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_S32I;
+ if (Field_r_Slot_ae_slot0_get (insn) == 7 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBC;
+ if (Field_r_Slot_ae_slot0_get (insn) == 7 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 4)
+ return OPCODE_S8I;
+ if (Field_r_Slot_ae_slot0_get (insn) == 8 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_ADDMI;
+ if (Field_r_Slot_ae_slot0_get (insn) == 9 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BBS;
+ if (Field_r_Slot_ae_slot0_get (insn) == 10 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BEQ;
+ if (Field_r_Slot_ae_slot0_get (insn) == 11 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BGEU;
+ if (Field_r_Slot_ae_slot0_get (insn) == 12 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BGE;
+ if (Field_r_Slot_ae_slot0_get (insn) == 13 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BLT;
+ if (Field_r_Slot_ae_slot0_get (insn) == 14 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BLTU;
+ if (Field_r_Slot_ae_slot0_get (insn) == 15 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 2)
+ return OPCODE_BNALL;
+ if (Field_t_Slot_ae_slot0_get (insn) == 0 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BEQI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 1 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BGEI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 2 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BGEUI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 3 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BNEI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 4 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3)
+ return OPCODE_BLTI;
+ if (Field_t_Slot_ae_slot0_get (insn) == 5 &&
+ Field_op0_s4_Slot_ae_slot0_get (insn) == 3 &&
+ Field_r_Slot_ae_slot0_get (insn) == 0)
+ return OPCODE_BF;
+ return XTENSA_UNDEFINED;
+}
+
+static int
+Slot_ae_slot1_decode (const xtensa_insnbuf insn)
+{
+ if (Field_ftsf100ae_slot1_Slot_ae_slot1_get (insn) == 115 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_NEGSP24S;
+ if (Field_ftsf101ae_slot1_Slot_ae_slot1_get (insn) == 29 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf348ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ABSSP24S;
+ if (Field_ftsf103ae_slot1_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf349ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_NEGP24;
+ if (Field_ftsf104ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MAXBQ56S;
+ if (Field_ftsf105ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MINBQ56S;
+ if (Field_ftsf106ae_slot1_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ae_r32_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_EQQ56;
+ if (Field_ftsf107ae_slot1_Slot_ae_slot1_get (insn) == 48 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ADDSQ56S;
+ if (Field_ftsf108ae_slot1_Slot_ae_slot1_get (insn) == 49 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ANDQ56;
+ if (Field_ftsf109ae_slot1_Slot_ae_slot1_get (insn) == 50 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MAXQ56S;
+ if (Field_ftsf110ae_slot1_Slot_ae_slot1_get (insn) == 51 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ORQ56;
+ if (Field_ftsf111ae_slot1_Slot_ae_slot1_get (insn) == 52 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_MINQ56S;
+ if (Field_ftsf112ae_slot1_Slot_ae_slot1_get (insn) == 53 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_SUBQ56;
+ if (Field_ftsf113ae_slot1_Slot_ae_slot1_get (insn) == 54 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_SUBSQ56S;
+ if (Field_ftsf114ae_slot1_Slot_ae_slot1_get (insn) == 55 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_XORQ56;
+ if (Field_ftsf115ae_slot1_Slot_ae_slot1_get (insn) == 56 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_NANDQ56;
+ if (Field_ftsf116ae_slot1_Slot_ae_slot1_get (insn) == 57 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_ABSQ56;
+ if (Field_ftsf118ae_slot1_Slot_ae_slot1_get (insn) == 185 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5)
+ return OPCODE_AE_NEGSQ56S;
+ if (Field_ftsf119ae_slot1_Slot_ae_slot1_get (insn) == 185 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf338_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_SATQ48S;
+ if (Field_ftsf12_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf341ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_LTQ56S;
+ if (Field_ftsf120ae_slot1_Slot_ae_slot1_get (insn) == 29 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf343ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ABSSQ56S;
+ if (Field_ftsf122ae_slot1_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf346ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_NEGQ56;
+ if (Field_ftsf124ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf339ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_LEQ56S;
+ if (Field_ftsf125ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf350ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_TRUNCP24Q48X2;
+ if (Field_ftsf126ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 5 &&
+ Field_ftsf344ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ADDQ56;
+ if (Field_ftsf127ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAFP24S_HH_LL;
+ if (Field_ftsf128ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAFP24S_HL_LH;
+ if (Field_ftsf129ae_slot1_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAP24S_HH_LL;
+ if (Field_ftsf13_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf12_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_SLLISP24S;
+ if (Field_ftsf130ae_slot1_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS32P16S_HL;
+ if (Field_ftsf131ae_slot1_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAAP24S_HL_LH;
+ if (Field_ftsf132ae_slot1_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS32P16S_LH;
+ if (Field_ftsf133ae_slot1_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS32P16S_LL;
+ if (Field_ftsf134ae_slot1_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_HH;
+ if (Field_ftsf135ae_slot1_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_HH;
+ if (Field_ftsf136ae_slot1_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_HL;
+ if (Field_ftsf137ae_slot1_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_LH;
+ if (Field_ftsf138ae_slot1_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_HH;
+ if (Field_ftsf139ae_slot1_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFS56P24S_LL;
+ if (Field_ftsf140ae_slot1_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_HL;
+ if (Field_ftsf141ae_slot1_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_LH;
+ if (Field_ftsf142ae_slot1_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAP24S_LL;
+ if (Field_ftsf143ae_slot1_Slot_ae_slot1_get (insn) == 16 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_HL;
+ if (Field_ftsf144ae_slot1_Slot_ae_slot1_get (insn) == 17 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_HH;
+ if (Field_ftsf145ae_slot1_Slot_ae_slot1_get (insn) == 18 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_HL;
+ if (Field_ftsf146ae_slot1_Slot_ae_slot1_get (insn) == 19 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASFP24S_HH_LL;
+ if (Field_ftsf147ae_slot1_Slot_ae_slot1_get (insn) == 20 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_LH;
+ if (Field_ftsf148ae_slot1_Slot_ae_slot1_get (insn) == 21 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASFP24S_HL_LH;
+ if (Field_ftsf149ae_slot1_Slot_ae_slot1_get (insn) == 22 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASP24S_HH_LL;
+ if (Field_ftsf150ae_slot1_Slot_ae_slot1_get (insn) == 23 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULASP24S_HL_LH;
+ if (Field_ftsf151ae_slot1_Slot_ae_slot1_get (insn) == 24 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAS56P24S_LL;
+ if (Field_ftsf152ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_HH;
+ if (Field_ftsf153ae_slot1_Slot_ae_slot1_get (insn) == 26 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_HL;
+ if (Field_ftsf154ae_slot1_Slot_ae_slot1_get (insn) == 27 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_LL;
+ if (Field_ftsf155ae_slot1_Slot_ae_slot1_get (insn) == 28 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFP24S_LH;
+ if (Field_ftsf156ae_slot1_Slot_ae_slot1_get (insn) == 29 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_HH;
+ if (Field_ftsf157ae_slot1_Slot_ae_slot1_get (insn) == 30 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_HL;
+ if (Field_ftsf158ae_slot1_Slot_ae_slot1_get (insn) == 31 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_LH;
+ if (Field_ftsf159ae_slot1_Slot_ae_slot1_get (insn) == 32 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_LH;
+ if (Field_ftsf160ae_slot1_Slot_ae_slot1_get (insn) == 33 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULFS32P16S_LL;
+ if (Field_ftsf161ae_slot1_Slot_ae_slot1_get (insn) == 34 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_HH;
+ if (Field_ftsf162ae_slot1_Slot_ae_slot1_get (insn) == 35 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAFP24S_HH_LL;
+ if (Field_ftsf163ae_slot1_Slot_ae_slot1_get (insn) == 36 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_HL;
+ if (Field_ftsf164ae_slot1_Slot_ae_slot1_get (insn) == 37 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAFP24S_HL_LH;
+ if (Field_ftsf165ae_slot1_Slot_ae_slot1_get (insn) == 38 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAP24S_HH_LL;
+ if (Field_ftsf166ae_slot1_Slot_ae_slot1_get (insn) == 39 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSAP24S_HL_LH;
+ if (Field_ftsf167ae_slot1_Slot_ae_slot1_get (insn) == 40 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_LH;
+ if (Field_ftsf168ae_slot1_Slot_ae_slot1_get (insn) == 41 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_HH;
+ if (Field_ftsf169ae_slot1_Slot_ae_slot1_get (insn) == 42 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_HL;
+ if (Field_ftsf170ae_slot1_Slot_ae_slot1_get (insn) == 43 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_LL;
+ if (Field_ftsf171ae_slot1_Slot_ae_slot1_get (insn) == 44 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFP24S_LH;
+ if (Field_ftsf172ae_slot1_Slot_ae_slot1_get (insn) == 45 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_HH;
+ if (Field_ftsf173ae_slot1_Slot_ae_slot1_get (insn) == 46 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_HL;
+ if (Field_ftsf174ae_slot1_Slot_ae_slot1_get (insn) == 47 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_LH;
+ if (Field_ftsf175ae_slot1_Slot_ae_slot1_get (insn) == 48 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULP24S_LL;
+ if (Field_ftsf176ae_slot1_Slot_ae_slot1_get (insn) == 49 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS32P16S_LL;
+ if (Field_ftsf177ae_slot1_Slot_ae_slot1_get (insn) == 50 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_HH;
+ if (Field_ftsf178ae_slot1_Slot_ae_slot1_get (insn) == 51 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_LL;
+ if (Field_ftsf179ae_slot1_Slot_ae_slot1_get (insn) == 52 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_HL;
+ if (Field_ftsf180ae_slot1_Slot_ae_slot1_get (insn) == 53 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_HH;
+ if (Field_ftsf181ae_slot1_Slot_ae_slot1_get (insn) == 54 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_HL;
+ if (Field_ftsf182ae_slot1_Slot_ae_slot1_get (insn) == 55 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_LH;
+ if (Field_ftsf183ae_slot1_Slot_ae_slot1_get (insn) == 56 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSFS56P24S_LH;
+ if (Field_ftsf184ae_slot1_Slot_ae_slot1_get (insn) == 57 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSP24S_LL;
+ if (Field_ftsf185ae_slot1_Slot_ae_slot1_get (insn) == 58 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_HH;
+ if (Field_ftsf186ae_slot1_Slot_ae_slot1_get (insn) == 59 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_LH;
+ if (Field_ftsf187ae_slot1_Slot_ae_slot1_get (insn) == 60 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_HL;
+ if (Field_ftsf188ae_slot1_Slot_ae_slot1_get (insn) == 61 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSS56P24S_LL;
+ if (Field_ftsf189ae_slot1_Slot_ae_slot1_get (insn) == 62 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSFP24S_HH_LL;
+ if (Field_ftsf190ae_slot1_Slot_ae_slot1_get (insn) == 63 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSFP24S_HL_LH;
+ if (Field_ftsf191ae_slot1_Slot_ae_slot1_get (insn) == 64 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULAFP24S_LL;
+ if (Field_ftsf192ae_slot1_Slot_ae_slot1_get (insn) == 65 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSP24S_HH_LL;
+ if (Field_ftsf193ae_slot1_Slot_ae_slot1_get (insn) == 66 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULSSP24S_HL_LH;
+ if (Field_ftsf194ae_slot1_Slot_ae_slot1_get (insn) == 67 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASFP24S_HH_LL;
+ if (Field_ftsf195ae_slot1_Slot_ae_slot1_get (insn) == 68 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZAAFP24S_HH_LL;
+ if (Field_ftsf196ae_slot1_Slot_ae_slot1_get (insn) == 69 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASFP24S_HL_LH;
+ if (Field_ftsf197ae_slot1_Slot_ae_slot1_get (insn) == 70 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASP24S_HH_LL;
+ if (Field_ftsf198ae_slot1_Slot_ae_slot1_get (insn) == 71 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZASP24S_HL_LH;
+ if (Field_ftsf199ae_slot1_Slot_ae_slot1_get (insn) == 72 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZAAFP24S_HL_LH;
+ if (Field_ftsf200ae_slot1_Slot_ae_slot1_get (insn) == 73 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAFP24S_HH_LL;
+ if (Field_ftsf201ae_slot1_Slot_ae_slot1_get (insn) == 74 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAFP24S_HL_LH;
+ if (Field_ftsf202ae_slot1_Slot_ae_slot1_get (insn) == 75 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAP24S_HL_LH;
+ if (Field_ftsf203ae_slot1_Slot_ae_slot1_get (insn) == 76 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSAP24S_HH_LL;
+ if (Field_ftsf204ae_slot1_Slot_ae_slot1_get (insn) == 77 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSSFP24S_HH_LL;
+ if (Field_ftsf205ae_slot1_Slot_ae_slot1_get (insn) == 78 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSSFP24S_HL_LH;
+ if (Field_ftsf206ae_slot1_Slot_ae_slot1_get (insn) == 79 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6)
+ return OPCODE_AE_MULZSSP24S_HH_LL;
+ if (Field_ftsf207ae_slot1_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf336ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULZAAP24S_HH_LL;
+ if (Field_ftsf209ae_slot1_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf336ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULZSSP24S_HL_LH;
+ if (Field_ftsf210ae_slot1_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf337ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULZAAP24S_HL_LH;
+ if (Field_ftsf211ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 6 &&
+ Field_ftsf332ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULAFS32P16S_HH;
+ if (Field_ftsf21ae_slot1_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MAXBP24S;
+ if (Field_ftsf22ae_slot1_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MINBP24S;
+ if (Field_ftsf23ae_slot1_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVFP48;
+ if (Field_ftsf24ae_slot1_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVTP48;
+ if (Field_ftsf25ae_slot1_Slot_ae_slot1_get (insn) == 20 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ADDP24;
+ if (Field_ftsf26ae_slot1_Slot_ae_slot1_get (insn) == 21 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ANDP48;
+ if (Field_ftsf27ae_slot1_Slot_ae_slot1_get (insn) == 22 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MAXP24S;
+ if (Field_ftsf28ae_slot1_Slot_ae_slot1_get (insn) == 23 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MINP24S;
+ if (Field_ftsf29ae_slot1_Slot_ae_slot1_get (insn) == 24 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ADDSP24S;
+ if (Field_ftsf30ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_NANDP48;
+ if (Field_ftsf31ae_slot1_Slot_ae_slot1_get (insn) == 26 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ORP48;
+ if (Field_ftsf32ae_slot1_Slot_ae_slot1_get (insn) == 27 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_HL;
+ if (Field_ftsf33ae_slot1_Slot_ae_slot1_get (insn) == 28 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_HH;
+ if (Field_ftsf34ae_slot1_Slot_ae_slot1_get (insn) == 29 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_LH;
+ if (Field_ftsf35ae_slot1_Slot_ae_slot1_get (insn) == 30 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SELP24_LL;
+ if (Field_ftsf36ae_slot1_Slot_ae_slot1_get (insn) == 31 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SUBP24;
+ if (Field_ftsf37ae_slot1_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SLLIP24;
+ if (Field_ftsf37ae_slot1_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRAIP24;
+ if (Field_ftsf37ae_slot1_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRLIP24;
+ if (Field_ftsf38ae_slot1_Slot_ae_slot1_get (insn) == 176 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16S_L;
+ if (Field_ftsf39ae_slot1_Slot_ae_slot1_get (insn) == 177 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16U_H;
+ if (Field_ftsf40ae_slot1_Slot_ae_slot1_get (insn) == 178 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16U_L;
+ if (Field_ftsf41ae_slot1_Slot_ae_slot1_get (insn) == 179 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16U_H;
+ if (Field_ftsf42ae_slot1_Slot_ae_slot1_get (insn) == 180 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16S_H;
+ if (Field_ftsf43ae_slot1_Slot_ae_slot1_get (insn) == 181 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16U_L;
+ if (Field_ftsf44ae_slot1_Slot_ae_slot1_get (insn) == 182 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16S_H;
+ if (Field_ftsf45ae_slot1_Slot_ae_slot1_get (insn) == 183 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16S_L;
+ if (Field_ftsf46ae_slot1_Slot_ae_slot1_get (insn) == 184 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAQ32SP16S_L;
+ if (Field_ftsf47ae_slot1_Slot_ae_slot1_get (insn) == 185 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16U_H;
+ if (Field_ftsf48ae_slot1_Slot_ae_slot1_get (insn) == 186 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULFQ32SP16U_L;
+ if (Field_ftsf49ae_slot1_Slot_ae_slot1_get (insn) == 187 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16S_L;
+ if (Field_ftsf50ae_slot1_Slot_ae_slot1_get (insn) == 188 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16S_H;
+ if (Field_ftsf51ae_slot1_Slot_ae_slot1_get (insn) == 189 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16U_H;
+ if (Field_ftsf52ae_slot1_Slot_ae_slot1_get (insn) == 190 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULQ32SP16U_L;
+ if (Field_ftsf53ae_slot1_Slot_ae_slot1_get (insn) == 191 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16S_H;
+ if (Field_ftsf54ae_slot1_Slot_ae_slot1_get (insn) == 192 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULAFQ32SP16S_H;
+ if (Field_ftsf55ae_slot1_Slot_ae_slot1_get (insn) == 193 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16S_L;
+ if (Field_ftsf56ae_slot1_Slot_ae_slot1_get (insn) == 194 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16U_H;
+ if (Field_ftsf57ae_slot1_Slot_ae_slot1_get (insn) == 195 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSQ32SP16U_L;
+ if (Field_ftsf58ae_slot1_Slot_ae_slot1_get (insn) == 196 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MULSFQ32SP16U_L;
+ if (Field_ftsf59ae_slot1_Slot_ae_slot1_get (insn) == 773 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_CVTQ48P24S_H;
+ if (Field_ftsf60ae_slot1_Slot_ae_slot1_get (insn) == 789 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ZEROQ56;
+ if (Field_ftsf61ae_slot1_Slot_ae_slot1_get (insn) == 405 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf330ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_NOP;
+ if (Field_ftsf63ae_slot1_Slot_ae_slot1_get (insn) == 198 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r10_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_CVTQ48P24S_L;
+ if (Field_ftsf64ae_slot1_Slot_ae_slot1_get (insn) == 1543 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVQ56;
+ if (Field_ftsf66ae_slot1_Slot_ae_slot1_get (insn) == 1559 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSQ32ASYM;
+ if (Field_ftsf67ae_slot1_Slot_ae_slot1_get (insn) == 791 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf342ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSQ32SYM;
+ if (Field_ftsf69ae_slot1_Slot_ae_slot1_get (insn) == 407 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf340_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_TRUNCQ32;
+ if (Field_ftsf71ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULSQ32SP16S_H;
+ if (Field_ftsf72ae_slot1_Slot_ae_slot1_get (insn) == 26 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULSQ32SP16S_L;
+ if (Field_ftsf73ae_slot1_Slot_ae_slot1_get (insn) == 417 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_MOVP48;
+ if (Field_ftsf75ae_slot1_Slot_ae_slot1_get (insn) == 419 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSP16ASYM;
+ if (Field_ftsf76ae_slot1_Slot_ae_slot1_get (insn) == 421 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSP16SYM;
+ if (Field_ftsf77ae_slot1_Slot_ae_slot1_get (insn) == 423 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRASP24;
+ if (Field_ftsf78ae_slot1_Slot_ae_slot1_get (insn) == 425 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SLLSP24;
+ if (Field_ftsf79ae_slot1_Slot_ae_slot1_get (insn) == 427 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SRLSP24;
+ if (Field_ftsf80ae_slot1_Slot_ae_slot1_get (insn) == 429 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_TRUNCP16;
+ if (Field_ftsf81ae_slot1_Slot_ae_slot1_get (insn) == 431 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ZEROP48;
+ if (Field_ftsf82ae_slot1_Slot_ae_slot1_get (insn) == 109 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r10_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_SLLSSP24S;
+ if (Field_ftsf84ae_slot1_Slot_ae_slot1_get (insn) == 881 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSP16Q48ASYM;
+ if (Field_ftsf86ae_slot1_Slot_ae_slot1_get (insn) == 883 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_ROUNDSP16Q48SYM;
+ if (Field_ftsf87ae_slot1_Slot_ae_slot1_get (insn) == 443 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf342ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSP24Q48ASYM;
+ if (Field_ftsf88ae_slot1_Slot_ae_slot1_get (insn) == 223 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf340_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ROUNDSP24Q48SYM;
+ if (Field_ftsf89ae_slot1_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf334ae_slot1_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MULSQ32SP16U_H;
+ if (Field_ftsf90ae_slot1_Slot_ae_slot1_get (insn) == 96 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_EQP24;
+ if (Field_ftsf91ae_slot1_Slot_ae_slot1_get (insn) == 97 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_LEP24S;
+ if (Field_ftsf92ae_slot1_Slot_ae_slot1_get (insn) == 49 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf208_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_LTP24S;
+ if (Field_ftsf94ae_slot1_Slot_ae_slot1_get (insn) == 25 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ftsf347_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MOVFP24X2;
+ if (Field_ftsf96ae_slot1_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_s20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_MOVTP24X2;
+ if (Field_ftsf97ae_slot1_Slot_ae_slot1_get (insn) == 112 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_SUBSP24S;
+ if (Field_ftsf98ae_slot1_Slot_ae_slot1_get (insn) == 113 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1)
+ return OPCODE_AE_XORP48;
+ if (Field_ftsf99ae_slot1_Slot_ae_slot1_get (insn) == 114 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 1 &&
+ Field_ae_r20_Slot_ae_slot1_get (insn) == 0)
+ return OPCODE_AE_ABSP24;
+ if (Field_t_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 0 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 1 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 2 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 3 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 4 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSAQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 5 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 6 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 7 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZASQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 8 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 9 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 10 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 11 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZAAQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 12 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAFQ32SP16U_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 13 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16S_LL;
+ if (Field_t_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAQ32SP16S_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 14 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16U_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 2)
+ return OPCODE_AE_MULZASFQ32SP16U_HH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 3)
+ return OPCODE_AE_MULZSAQ32SP16S_LH;
+ if (Field_t_Slot_ae_slot1_get (insn) == 15 &&
+ Field_op0_s3_Slot_ae_slot1_get (insn) == 4)
+ return OPCODE_AE_MULZSSQ32SP16U_LL;
+ return XTENSA_UNDEFINED;
+}
+
+
+/* Instruction slots. */
+
+static void
+Slot_x24_Format_inst_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = (insn[0] & 0xffffff);
+}
+
+static void
+Slot_x24_Format_inst_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffffff) | (slotbuf[0] & 0xffffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16a_Format_inst16a_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = (insn[0] & 0xffff);
+}
+
+static void
+Slot_x16b_Format_inst16b_0_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0xffff) | (slotbuf[0] & 0xffff);
+}
+
+static void
+Slot_ae_format_Format_ae_slot1_31_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x80000000) >> 31);
+ slotbuf[0] = (slotbuf[0] & ~0x7ffffe) | ((insn[1] & 0x3fffff) << 1);
+}
+
+static void
+Slot_ae_format_Format_ae_slot1_31_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x80000000) | ((slotbuf[0] & 0x1) << 31);
+ insn[1] = (insn[1] & ~0x3fffff) | ((slotbuf[0] & 0x7ffffe) >> 1);
+}
+
+static void
+Slot_ae_format_Format_ae_slot0_4_get (const xtensa_insnbuf insn,
+ xtensa_insnbuf slotbuf)
+{
+ slotbuf[1] = 0;
+ slotbuf[0] = ((insn[0] & 0x7ffffff0) >> 4);
+}
+
+static void
+Slot_ae_format_Format_ae_slot0_4_set (xtensa_insnbuf insn,
+ const xtensa_insnbuf slotbuf)
+{
+ insn[0] = (insn[0] & ~0x7ffffff0) | ((slotbuf[0] & 0x7ffffff) << 4);
+}
+
+static xtensa_get_field_fn
+Slot_inst_get_field_fns[] = {
+ Field_t_Slot_inst_get,
+ Field_bbi4_Slot_inst_get,
+ Field_bbi_Slot_inst_get,
+ Field_imm12_Slot_inst_get,
+ Field_imm8_Slot_inst_get,
+ Field_s_Slot_inst_get,
+ Field_imm12b_Slot_inst_get,
+ Field_imm16_Slot_inst_get,
+ Field_m_Slot_inst_get,
+ Field_n_Slot_inst_get,
+ Field_offset_Slot_inst_get,
+ Field_op0_Slot_inst_get,
+ Field_op1_Slot_inst_get,
+ Field_op2_Slot_inst_get,
+ Field_r_Slot_inst_get,
+ Field_sa4_Slot_inst_get,
+ Field_sae4_Slot_inst_get,
+ Field_sae_Slot_inst_get,
+ Field_sal_Slot_inst_get,
+ Field_sargt_Slot_inst_get,
+ Field_sas4_Slot_inst_get,
+ Field_sas_Slot_inst_get,
+ Field_sr_Slot_inst_get,
+ Field_st_Slot_inst_get,
+ Field_thi3_Slot_inst_get,
+ Field_imm4_Slot_inst_get,
+ Field_mn_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst_get,
+ Field_s2_Slot_inst_get,
+ Field_r2_Slot_inst_get,
+ Field_t4_Slot_inst_get,
+ Field_s4_Slot_inst_get,
+ Field_r4_Slot_inst_get,
+ Field_t8_Slot_inst_get,
+ Field_s8_Slot_inst_get,
+ Field_r8_Slot_inst_get,
+ Field_xt_wbr15_imm_Slot_inst_get,
+ Field_xt_wbr18_imm_Slot_inst_get,
+ Field_ae_r3_Slot_inst_get,
+ Field_ae_s_non_samt_Slot_inst_get,
+ Field_ae_s3_Slot_inst_get,
+ Field_ae_r32_Slot_inst_get,
+ Field_ae_samt_s_t_Slot_inst_get,
+ Field_ae_r20_Slot_inst_get,
+ Field_ae_r10_Slot_inst_get,
+ Field_ae_s20_Slot_inst_get,
+ Field_ae_fld_ohba_Slot_inst_get,
+ Field_ae_fld_ohba2_Slot_inst_get,
+ 0,
+ Field_ftsf12_Slot_inst_get,
+ Field_ftsf13_Slot_inst_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst_set_field_fns[] = {
+ Field_t_Slot_inst_set,
+ Field_bbi4_Slot_inst_set,
+ Field_bbi_Slot_inst_set,
+ Field_imm12_Slot_inst_set,
+ Field_imm8_Slot_inst_set,
+ Field_s_Slot_inst_set,
+ Field_imm12b_Slot_inst_set,
+ Field_imm16_Slot_inst_set,
+ Field_m_Slot_inst_set,
+ Field_n_Slot_inst_set,
+ Field_offset_Slot_inst_set,
+ Field_op0_Slot_inst_set,
+ Field_op1_Slot_inst_set,
+ Field_op2_Slot_inst_set,
+ Field_r_Slot_inst_set,
+ Field_sa4_Slot_inst_set,
+ Field_sae4_Slot_inst_set,
+ Field_sae_Slot_inst_set,
+ Field_sal_Slot_inst_set,
+ Field_sargt_Slot_inst_set,
+ Field_sas4_Slot_inst_set,
+ Field_sas_Slot_inst_set,
+ Field_sr_Slot_inst_set,
+ Field_st_Slot_inst_set,
+ Field_thi3_Slot_inst_set,
+ Field_imm4_Slot_inst_set,
+ Field_mn_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_inst_set,
+ Field_s2_Slot_inst_set,
+ Field_r2_Slot_inst_set,
+ Field_t4_Slot_inst_set,
+ Field_s4_Slot_inst_set,
+ Field_r4_Slot_inst_set,
+ Field_t8_Slot_inst_set,
+ Field_s8_Slot_inst_set,
+ Field_r8_Slot_inst_set,
+ Field_xt_wbr15_imm_Slot_inst_set,
+ Field_xt_wbr18_imm_Slot_inst_set,
+ Field_ae_r3_Slot_inst_set,
+ Field_ae_s_non_samt_Slot_inst_set,
+ Field_ae_s3_Slot_inst_set,
+ Field_ae_r32_Slot_inst_set,
+ Field_ae_samt_s_t_Slot_inst_set,
+ Field_ae_r20_Slot_inst_set,
+ Field_ae_r10_Slot_inst_set,
+ Field_ae_s20_Slot_inst_set,
+ Field_ae_fld_ohba_Slot_inst_set,
+ Field_ae_fld_ohba2_Slot_inst_set,
+ 0,
+ Field_ftsf12_Slot_inst_set,
+ Field_ftsf13_Slot_inst_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16a_get_field_fns[] = {
+ Field_t_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_get,
+ Field_st_Slot_inst16a_get,
+ 0,
+ Field_imm4_Slot_inst16a_get,
+ 0,
+ Field_i_Slot_inst16a_get,
+ Field_imm6lo_Slot_inst16a_get,
+ Field_imm6hi_Slot_inst16a_get,
+ Field_imm7lo_Slot_inst16a_get,
+ Field_imm7hi_Slot_inst16a_get,
+ Field_z_Slot_inst16a_get,
+ Field_imm6_Slot_inst16a_get,
+ Field_imm7_Slot_inst16a_get,
+ Field_t2_Slot_inst16a_get,
+ Field_s2_Slot_inst16a_get,
+ Field_r2_Slot_inst16a_get,
+ Field_t4_Slot_inst16a_get,
+ Field_s4_Slot_inst16a_get,
+ Field_r4_Slot_inst16a_get,
+ Field_t8_Slot_inst16a_get,
+ Field_s8_Slot_inst16a_get,
+ Field_r8_Slot_inst16a_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16a_set_field_fns[] = {
+ Field_t_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16a_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16a_set,
+ Field_st_Slot_inst16a_set,
+ 0,
+ Field_imm4_Slot_inst16a_set,
+ 0,
+ Field_i_Slot_inst16a_set,
+ Field_imm6lo_Slot_inst16a_set,
+ Field_imm6hi_Slot_inst16a_set,
+ Field_imm7lo_Slot_inst16a_set,
+ Field_imm7hi_Slot_inst16a_set,
+ Field_z_Slot_inst16a_set,
+ Field_imm6_Slot_inst16a_set,
+ Field_imm7_Slot_inst16a_set,
+ Field_t2_Slot_inst16a_set,
+ Field_s2_Slot_inst16a_set,
+ Field_r2_Slot_inst16a_set,
+ Field_t4_Slot_inst16a_set,
+ Field_s4_Slot_inst16a_set,
+ Field_r4_Slot_inst16a_set,
+ Field_t8_Slot_inst16a_set,
+ Field_s8_Slot_inst16a_set,
+ Field_r8_Slot_inst16a_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_inst16b_get_field_fns[] = {
+ Field_t_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_get,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_get,
+ Field_st_Slot_inst16b_get,
+ 0,
+ Field_imm4_Slot_inst16b_get,
+ 0,
+ Field_i_Slot_inst16b_get,
+ Field_imm6lo_Slot_inst16b_get,
+ Field_imm6hi_Slot_inst16b_get,
+ Field_imm7lo_Slot_inst16b_get,
+ Field_imm7hi_Slot_inst16b_get,
+ Field_z_Slot_inst16b_get,
+ Field_imm6_Slot_inst16b_get,
+ Field_imm7_Slot_inst16b_get,
+ Field_t2_Slot_inst16b_get,
+ Field_s2_Slot_inst16b_get,
+ Field_r2_Slot_inst16b_get,
+ Field_t4_Slot_inst16b_get,
+ Field_s4_Slot_inst16b_get,
+ Field_r4_Slot_inst16b_get,
+ Field_t8_Slot_inst16b_get,
+ Field_s8_Slot_inst16b_get,
+ Field_r8_Slot_inst16b_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_inst16b_set_field_fns[] = {
+ Field_t_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_Slot_inst16b_set,
+ 0,
+ 0,
+ Field_r_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_sr_Slot_inst16b_set,
+ Field_st_Slot_inst16b_set,
+ 0,
+ Field_imm4_Slot_inst16b_set,
+ 0,
+ Field_i_Slot_inst16b_set,
+ Field_imm6lo_Slot_inst16b_set,
+ Field_imm6hi_Slot_inst16b_set,
+ Field_imm7lo_Slot_inst16b_set,
+ Field_imm7hi_Slot_inst16b_set,
+ Field_z_Slot_inst16b_set,
+ Field_imm6_Slot_inst16b_set,
+ Field_imm7_Slot_inst16b_set,
+ Field_t2_Slot_inst16b_set,
+ Field_s2_Slot_inst16b_set,
+ Field_r2_Slot_inst16b_set,
+ Field_t4_Slot_inst16b_set,
+ Field_s4_Slot_inst16b_set,
+ Field_r4_Slot_inst16b_set,
+ Field_t8_Slot_inst16b_set,
+ Field_s8_Slot_inst16b_set,
+ Field_r8_Slot_inst16b_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_ae_slot1_get_field_fns[] = {
+ Field_t_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot1_get,
+ 0,
+ Field_ae_r20_Slot_ae_slot1_get,
+ Field_ae_r10_Slot_ae_slot1_get,
+ Field_ae_s20_Slot_ae_slot1_get,
+ 0,
+ 0,
+ Field_op0_s3_Slot_ae_slot1_get,
+ Field_ftsf12_Slot_ae_slot1_get,
+ Field_ftsf13_Slot_ae_slot1_get,
+ Field_ftsf14_Slot_ae_slot1_get,
+ Field_ftsf21ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf22ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf23ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf24ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf25ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf26ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf27ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf28ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf29ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf30ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf31ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf32ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf33ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf34ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf35ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf36ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf37ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf38ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf39ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf40ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf41ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf42ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf43ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf44ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf45ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf46ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf47ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf48ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf49ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf50ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf51ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf52ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf53ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf54ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf55ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf56ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf57ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf58ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf59ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf60ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf61ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf63ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf64ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf66ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf67ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf69ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf71ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf72ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf73ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf75ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf76ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf77ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf78ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf79ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf80ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf81ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf82ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf84ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf86ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf87ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf88ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf89ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf90ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf91ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf92ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf94ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf96ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf97ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf98ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf99ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf100ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf101ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf103ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf104ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf105ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf106ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf107ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf108ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf109ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf110ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf111ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf112ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf113ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf114ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf115ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf116ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf118ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf119ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf120ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf122ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf124ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf125ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf126ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf127ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf128ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf129ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf130ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf131ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf132ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf133ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf134ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf135ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf136ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf137ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf138ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf139ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf140ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf141ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf142ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf143ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf144ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf145ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf146ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf147ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf148ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf149ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf150ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf151ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf152ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf153ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf154ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf155ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf156ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf157ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf158ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf159ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf160ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf161ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf162ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf163ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf164ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf165ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf166ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf167ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf168ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf169ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf170ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf171ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf172ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf173ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf174ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf175ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf176ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf177ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf178ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf179ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf180ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf181ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf182ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf183ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf184ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf185ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf186ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf187ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf188ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf189ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf190ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf191ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf192ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf193ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf194ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf195ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf196ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf197ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf198ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf199ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf200ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf201ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf202ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf203ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf204ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf205ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf206ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf207ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf208_Slot_ae_slot1_get,
+ Field_ftsf209ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf210ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf211ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf330ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf332ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf334ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf336ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf337ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf338_Slot_ae_slot1_get,
+ Field_ftsf339ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf340_Slot_ae_slot1_get,
+ Field_ftsf341ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf342ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf343ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf344ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf346ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf347_Slot_ae_slot1_get,
+ Field_ftsf348ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf349ae_slot1_Slot_ae_slot1_get,
+ Field_ftsf350ae_slot1_Slot_ae_slot1_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_ae_slot1_set_field_fns[] = {
+ Field_t_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_t2_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot1_set,
+ 0,
+ Field_ae_r20_Slot_ae_slot1_set,
+ Field_ae_r10_Slot_ae_slot1_set,
+ Field_ae_s20_Slot_ae_slot1_set,
+ 0,
+ 0,
+ Field_op0_s3_Slot_ae_slot1_set,
+ Field_ftsf12_Slot_ae_slot1_set,
+ Field_ftsf13_Slot_ae_slot1_set,
+ Field_ftsf14_Slot_ae_slot1_set,
+ Field_ftsf21ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf22ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf23ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf24ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf25ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf26ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf27ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf28ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf29ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf30ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf31ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf32ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf33ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf34ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf35ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf36ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf37ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf38ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf39ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf40ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf41ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf42ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf43ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf44ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf45ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf46ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf47ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf48ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf49ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf50ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf51ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf52ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf53ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf54ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf55ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf56ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf57ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf58ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf59ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf60ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf61ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf63ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf64ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf66ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf67ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf69ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf71ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf72ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf73ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf75ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf76ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf77ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf78ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf79ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf80ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf81ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf82ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf84ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf86ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf87ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf88ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf89ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf90ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf91ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf92ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf94ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf96ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf97ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf98ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf99ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf100ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf101ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf103ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf104ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf105ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf106ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf107ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf108ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf109ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf110ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf111ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf112ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf113ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf114ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf115ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf116ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf118ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf119ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf120ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf122ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf124ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf125ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf126ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf127ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf128ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf129ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf130ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf131ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf132ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf133ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf134ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf135ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf136ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf137ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf138ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf139ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf140ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf141ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf142ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf143ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf144ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf145ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf146ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf147ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf148ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf149ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf150ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf151ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf152ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf153ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf154ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf155ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf156ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf157ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf158ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf159ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf160ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf161ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf162ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf163ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf164ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf165ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf166ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf167ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf168ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf169ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf170ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf171ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf172ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf173ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf174ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf175ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf176ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf177ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf178ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf179ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf180ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf181ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf182ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf183ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf184ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf185ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf186ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf187ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf188ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf189ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf190ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf191ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf192ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf193ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf194ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf195ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf196ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf197ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf198ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf199ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf200ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf201ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf202ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf203ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf204ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf205ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf206ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf207ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf208_Slot_ae_slot1_set,
+ Field_ftsf209ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf210ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf211ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf330ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf332ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf334ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf336ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf337ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf338_Slot_ae_slot1_set,
+ Field_ftsf339ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf340_Slot_ae_slot1_set,
+ Field_ftsf341ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf342ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf343ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf344ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf346ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf347_Slot_ae_slot1_set,
+ Field_ftsf348ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf349ae_slot1_Slot_ae_slot1_set,
+ Field_ftsf350ae_slot1_Slot_ae_slot1_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_get_field_fn
+Slot_ae_slot0_get_field_fns[] = {
+ Field_t_Slot_ae_slot0_get,
+ 0,
+ Field_bbi_Slot_ae_slot0_get,
+ Field_imm12_Slot_ae_slot0_get,
+ Field_imm8_Slot_ae_slot0_get,
+ Field_s_Slot_ae_slot0_get,
+ Field_imm12b_Slot_ae_slot0_get,
+ Field_imm16_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_offset_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_op2_Slot_ae_slot0_get,
+ Field_r_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_sae_Slot_ae_slot0_get,
+ Field_sal_Slot_ae_slot0_get,
+ Field_sargt_Slot_ae_slot0_get,
+ 0,
+ Field_sas_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_ae_slot0_get,
+ 0,
+ 0,
+ Field_s8_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot0_get,
+ Field_ae_samt_s_t_Slot_ae_slot0_get,
+ Field_ae_r20_Slot_ae_slot0_get,
+ Field_ae_r10_Slot_ae_slot0_get,
+ Field_ae_s20_Slot_ae_slot0_get,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s4_Slot_ae_slot0_get,
+ Field_ftsf212ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf213ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf214ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf215ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf216ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf217_Slot_ae_slot0_get,
+ Field_ftsf218ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf219ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf220ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf221ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf222ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf223ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf224ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf225ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf226ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf227ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf228ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf229ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf230ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf231ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf232ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf233ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf234ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf235ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf236ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf237ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf238ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf239ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf240ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf241ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf242ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf243ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf244ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf245ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf246ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf247ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf248ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf249ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf250ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf251ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf252ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf253ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf254ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf255ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf256ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf257ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf258ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf259ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf260ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf261ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf262ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf263ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf264ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf265ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf266ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf267ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf268ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf269ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf270ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf271ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf272ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf273ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf274ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf275ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf276ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf277ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf278ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf279ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf281ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf282ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf283ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf284ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf286ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf288ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf290ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf292ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf293_Slot_ae_slot0_get,
+ Field_ftsf294ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf295ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf296ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf297ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf298ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf299ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf300ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf301ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf302ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf303ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf304ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf306ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf308ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf309ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf310ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf311ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf312ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf313ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf314ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf315ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf316ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf317ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf318ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf319_Slot_ae_slot0_get,
+ Field_ftsf320ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf321_Slot_ae_slot0_get,
+ Field_ftsf322ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf323ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf324ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf325ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf326ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf328ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf329ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf352ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf353_Slot_ae_slot0_get,
+ Field_ftsf354ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf356ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf357_Slot_ae_slot0_get,
+ Field_ftsf358ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf359ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf360ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf361ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf362ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf364ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf365ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf366ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf368ae_slot0_Slot_ae_slot0_get,
+ Field_ftsf369ae_slot0_Slot_ae_slot0_get,
+ Implicit_Field_ar0_get,
+ Implicit_Field_ar4_get,
+ Implicit_Field_ar8_get,
+ Implicit_Field_ar12_get,
+ Implicit_Field_bt16_get,
+ Implicit_Field_bs16_get,
+ Implicit_Field_br16_get,
+ Implicit_Field_brall_get
+};
+
+static xtensa_set_field_fn
+Slot_ae_slot0_set_field_fns[] = {
+ Field_t_Slot_ae_slot0_set,
+ 0,
+ Field_bbi_Slot_ae_slot0_set,
+ Field_imm12_Slot_ae_slot0_set,
+ Field_imm8_Slot_ae_slot0_set,
+ Field_s_Slot_ae_slot0_set,
+ Field_imm12b_Slot_ae_slot0_set,
+ Field_imm16_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_offset_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_op2_Slot_ae_slot0_set,
+ Field_r_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_sae_Slot_ae_slot0_set,
+ Field_sal_Slot_ae_slot0_set,
+ Field_sargt_Slot_ae_slot0_set,
+ 0,
+ Field_sas_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_s4_Slot_ae_slot0_set,
+ 0,
+ 0,
+ Field_s8_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_ae_r32_Slot_ae_slot0_set,
+ Field_ae_samt_s_t_Slot_ae_slot0_set,
+ Field_ae_r20_Slot_ae_slot0_set,
+ Field_ae_r10_Slot_ae_slot0_set,
+ Field_ae_s20_Slot_ae_slot0_set,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Field_op0_s4_Slot_ae_slot0_set,
+ Field_ftsf212ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf213ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf214ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf215ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf216ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf217_Slot_ae_slot0_set,
+ Field_ftsf218ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf219ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf220ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf221ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf222ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf223ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf224ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf225ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf226ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf227ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf228ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf229ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf230ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf231ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf232ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf233ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf234ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf235ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf236ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf237ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf238ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf239ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf240ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf241ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf242ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf243ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf244ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf245ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf246ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf247ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf248ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf249ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf250ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf251ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf252ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf253ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf254ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf255ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf256ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf257ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf258ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf259ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf260ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf261ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf262ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf263ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf264ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf265ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf266ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf267ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf268ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf269ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf270ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf271ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf272ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf273ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf274ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf275ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf276ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf277ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf278ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf279ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf281ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf282ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf283ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf284ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf286ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf288ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf290ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf292ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf293_Slot_ae_slot0_set,
+ Field_ftsf294ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf295ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf296ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf297ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf298ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf299ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf300ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf301ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf302ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf303ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf304ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf306ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf308ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf309ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf310ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf311ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf312ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf313ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf314ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf315ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf316ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf317ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf318ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf319_Slot_ae_slot0_set,
+ Field_ftsf320ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf321_Slot_ae_slot0_set,
+ Field_ftsf322ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf323ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf324ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf325ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf326ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf328ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf329ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf352ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf353_Slot_ae_slot0_set,
+ Field_ftsf354ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf356ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf357_Slot_ae_slot0_set,
+ Field_ftsf358ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf359ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf360ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf361ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf362ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf364ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf365ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf366ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf368ae_slot0_Slot_ae_slot0_set,
+ Field_ftsf369ae_slot0_Slot_ae_slot0_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set,
+ Implicit_Field_set
+};
+
+static xtensa_slot_internal slots[] = {
+ { "Inst", "x24", 0,
+ Slot_x24_Format_inst_0_get, Slot_x24_Format_inst_0_set,
+ Slot_inst_get_field_fns, Slot_inst_set_field_fns,
+ Slot_inst_decode, "nop" },
+ { "Inst16a", "x16a", 0,
+ Slot_x16a_Format_inst16a_0_get, Slot_x16a_Format_inst16a_0_set,
+ Slot_inst16a_get_field_fns, Slot_inst16a_set_field_fns,
+ Slot_inst16a_decode, "" },
+ { "Inst16b", "x16b", 0,
+ Slot_x16b_Format_inst16b_0_get, Slot_x16b_Format_inst16b_0_set,
+ Slot_inst16b_get_field_fns, Slot_inst16b_set_field_fns,
+ Slot_inst16b_decode, "nop.n" },
+ { "ae_slot1", "ae_format", 1,
+ Slot_ae_format_Format_ae_slot1_31_get, Slot_ae_format_Format_ae_slot1_31_set,
+ Slot_ae_slot1_get_field_fns, Slot_ae_slot1_set_field_fns,
+ Slot_ae_slot1_decode, "nop" },
+ { "ae_slot0", "ae_format", 0,
+ Slot_ae_format_Format_ae_slot0_4_get, Slot_ae_format_Format_ae_slot0_4_set,
+ Slot_ae_slot0_get_field_fns, Slot_ae_slot0_set_field_fns,
+ Slot_ae_slot0_decode, "nop" }
+};
+
+
+/* Instruction formats. */
+
+static void
+Format_x24_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0;
+ insn[1] = 0;
+}
+
+static void
+Format_x16a_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0x8;
+ insn[1] = 0;
+}
+
+static void
+Format_x16b_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xc;
+ insn[1] = 0;
+}
+
+static void
+Format_ae_format_encode (xtensa_insnbuf insn)
+{
+ insn[0] = 0xf;
+ insn[1] = 0;
+}
+
+static int Format_x24_slots[] = { 0 };
+
+static int Format_x16a_slots[] = { 1 };
+
+static int Format_x16b_slots[] = { 2 };
+
+static int Format_ae_format_slots[] = { 4, 3 };
+
+static xtensa_format_internal formats[] = {
+ { "x24", 3, Format_x24_encode, 1, Format_x24_slots },
+ { "x16a", 2, Format_x16a_encode, 1, Format_x16a_slots },
+ { "x16b", 2, Format_x16b_encode, 1, Format_x16b_slots },
+ { "ae_format", 8, Format_ae_format_encode, 2, Format_ae_format_slots }
+};
+
+
+static int
+format_decoder (const xtensa_insnbuf insn)
+{
+ if ((insn[0] & 0x8) == 0 && (insn[1] & 0) == 0)
+ return 0; /* x24 */
+ if ((insn[0] & 0xc) == 0x8 && (insn[1] & 0) == 0)
+ return 1; /* x16a */
+ if ((insn[0] & 0xe) == 0xc && (insn[1] & 0) == 0)
+ return 2; /* x16b */
+ if ((insn[0] & 0xf) == 0xf && (insn[1] & 0xffc00000) == 0)
+ return 3; /* ae_format */
+ return -1;
+}
+
+static int length_table[256] = {
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ -1,
+ 8
+};
+
+static int
+length_decoder (const unsigned char *insn)
+{
+ int l = insn[0];
+ return length_table[l];
+}
+
+
+/* Top-level ISA structure. */
+
+xtensa_isa_internal xtensa_modules = {
+ 0 /* little-endian */,
+ 8 /* insn_size */, 0,
+ 4, formats, format_decoder, length_decoder,
+ 5, slots,
+ 389 /* num_fields */,
+ 454, operands,
+ 588, iclasses,
+ 656, opcodes, 0,
+ 8, regfiles,
+ NUM_STATES, states, 0,
+ NUM_SYSREGS, sysregs, 0,
+ { MAX_SPECIAL_REG, MAX_USER_REG }, { 0, 0 },
+ 2, interfaces, 0,
+ 4, funcUnits, 0
+};
diff --git a/target/xtensa/cores.list b/target/xtensa/cores.list
new file mode 100644
index 000000000..5772a00ab
--- /dev/null
+++ b/target/xtensa/cores.list
@@ -0,0 +1,9 @@
+core-dc232b.c
+core-dc233c.c
+core-de212.c
+core-de233_fpu.c
+core-dsp3400.c
+core-fsf.c
+core-sample_controller.c
+core-test_kc705_be.c
+core-test_mmuhifi_c3.c
diff --git a/target/xtensa/cpu-param.h b/target/xtensa/cpu-param.h
new file mode 100644
index 000000000..4fde21b94
--- /dev/null
+++ b/target/xtensa/cpu-param.h
@@ -0,0 +1,21 @@
+/*
+ * Xtensa cpu parameters for qemu.
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef XTENSA_CPU_PARAM_H
+#define XTENSA_CPU_PARAM_H 1
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 12
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#ifdef CONFIG_USER_ONLY
+#define TARGET_VIRT_ADDR_SPACE_BITS 30
+#else
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+#define NB_MMU_MODES 4
+
+#endif
diff --git a/target/xtensa/cpu-qom.h b/target/xtensa/cpu-qom.h
new file mode 100644
index 000000000..41d985967
--- /dev/null
+++ b/target/xtensa/cpu-qom.h
@@ -0,0 +1,62 @@
+/*
+ * QEMU Xtensa CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef QEMU_XTENSA_CPU_QOM_H
+#define QEMU_XTENSA_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+#include "qom/object.h"
+
+#define TYPE_XTENSA_CPU "xtensa-cpu"
+
+OBJECT_DECLARE_TYPE(XtensaCPU, XtensaCPUClass,
+ XTENSA_CPU)
+
+typedef struct XtensaConfig XtensaConfig;
+
+/**
+ * XtensaCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @config: The CPU core configuration.
+ *
+ * An Xtensa CPU model.
+ */
+struct XtensaCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceReset parent_reset;
+
+ const XtensaConfig *config;
+};
+
+
+#endif
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
new file mode 100644
index 000000000..224f72323
--- /dev/null
+++ b/target/xtensa/cpu.c
@@ -0,0 +1,247 @@
+/*
+ * QEMU Xtensa CPU
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "fpu/softfloat.h"
+#include "qemu/module.h"
+#include "migration/vmstate.h"
+
+
+static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool xtensa_cpu_has_work(CPUState *cs)
+{
+#ifndef CONFIG_USER_ONLY
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ return !cpu->env.runstall && cpu->env.pending_irq_level;
+#else
+ return true;
+#endif
+}
+
+#ifdef CONFIG_USER_ONLY
+static bool abi_call0;
+
+void xtensa_set_abi_call0(void)
+{
+ abi_call0 = true;
+}
+
+bool xtensa_abi_call0(void)
+{
+ return abi_call0;
+}
+#endif
+
+static void xtensa_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ XtensaCPU *cpu = XTENSA_CPU(s);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(cpu);
+ CPUXtensaState *env = &cpu->env;
+ bool dfpu = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_DFP_COPROCESSOR);
+
+ xcc->parent_reset(dev);
+
+ env->pc = env->config->exception_vector[EXC_RESET0 + env->static_vectors];
+ env->sregs[LITBASE] &= ~1;
+#ifndef CONFIG_USER_ONLY
+ env->sregs[PS] = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10;
+ env->pending_irq_level = 0;
+#else
+ env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT);
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_WINDOWED_REGISTER) &&
+ !xtensa_abi_call0()) {
+ env->sregs[PS] |= PS_WOE;
+ }
+ env->sregs[CPENABLE] = 0xff;
+#endif
+ env->sregs[VECBASE] = env->config->vecbase;
+ env->sregs[IBREAKENABLE] = 0;
+ env->sregs[MEMCTL] = MEMCTL_IL0EN & env->config->memctl_mask;
+ env->sregs[ATOMCTL] = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_ATOMCTL) ? 0x28 : 0x15;
+ env->sregs[CONFIGID0] = env->config->configid[0];
+ env->sregs[CONFIGID1] = env->config->configid[1];
+ env->exclusive_addr = -1;
+
+#ifndef CONFIG_USER_ONLY
+ reset_mmu(env);
+ s->halted = env->runstall;
+#endif
+ set_no_signaling_nans(!dfpu, &env->fp_status);
+ set_use_first_nan(!dfpu, &env->fp_status);
+}
+
+static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void xtensa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ info->private_data = cpu->env.config->isa;
+ info->print_insn = print_insn_xtensa;
+}
+
+static void xtensa_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+#ifndef CONFIG_USER_ONLY
+ xtensa_irq_init(&XTENSA_CPU(dev)->env);
+#endif
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cs->gdb_num_regs = xcc->config->gdb_regmap.num_regs;
+
+ qemu_init_vcpu(cs);
+
+ xcc->parent_realize(dev, errp);
+}
+
+static void xtensa_cpu_initfn(Object *obj)
+{
+ XtensaCPU *cpu = XTENSA_CPU(obj);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
+ CPUXtensaState *env = &cpu->env;
+
+ cpu_set_cpustate_pointers(cpu);
+ env->config = xcc->config;
+
+#ifndef CONFIG_USER_ONLY
+ env->address_space_er = g_malloc(sizeof(*env->address_space_er));
+ env->system_er = g_malloc(sizeof(*env->system_er));
+ memory_region_init_io(env->system_er, obj, NULL, env, "er",
+ UINT64_C(0x100000000));
+ address_space_init(env->address_space_er, env->system_er, "ER");
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static const VMStateDescription vmstate_xtensa_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps xtensa_sysemu_ops = {
+ .get_phys_page_debug = xtensa_cpu_get_phys_page_debug,
+};
+#endif
+
+#include "hw/core/tcg-cpu-ops.h"
+
+static const struct TCGCPUOps xtensa_tcg_ops = {
+ .initialize = xtensa_translate_init,
+ .debug_excp_handler = xtensa_breakpoint_handler,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = xtensa_cpu_tlb_fill,
+ .cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
+ .do_interrupt = xtensa_cpu_do_interrupt,
+ .do_transaction_failed = xtensa_cpu_do_transaction_failed,
+ .do_unaligned_access = xtensa_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+
+static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ XtensaCPUClass *xcc = XTENSA_CPU_CLASS(cc);
+
+ device_class_set_parent_realize(dc, xtensa_cpu_realizefn,
+ &xcc->parent_realize);
+
+ device_class_set_parent_reset(dc, xtensa_cpu_reset, &xcc->parent_reset);
+
+ cc->class_by_name = xtensa_cpu_class_by_name;
+ cc->has_work = xtensa_cpu_has_work;
+ cc->dump_state = xtensa_cpu_dump_state;
+ cc->set_pc = xtensa_cpu_set_pc;
+ cc->gdb_read_register = xtensa_cpu_gdb_read_register;
+ cc->gdb_write_register = xtensa_cpu_gdb_write_register;
+ cc->gdb_stop_before_watchpoint = true;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &xtensa_sysemu_ops;
+ dc->vmsd = &vmstate_xtensa_cpu;
+#endif
+ cc->disas_set_info = xtensa_cpu_disas_set_info;
+ cc->tcg_ops = &xtensa_tcg_ops;
+}
+
+static const TypeInfo xtensa_cpu_type_info = {
+ .name = TYPE_XTENSA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(XtensaCPU),
+ .instance_init = xtensa_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(XtensaCPUClass),
+ .class_init = xtensa_cpu_class_init,
+};
+
+static void xtensa_cpu_register_types(void)
+{
+ type_register_static(&xtensa_cpu_type_info);
+}
+
+type_init(xtensa_cpu_register_types)
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
new file mode 100644
index 000000000..02143f2f7
--- /dev/null
+++ b/target/xtensa/cpu.h
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XTENSA_CPU_H
+#define XTENSA_CPU_H
+
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "xtensa-isa.h"
+
+/* Xtensa processors have a weak memory model */
+#define TCG_GUEST_DEFAULT_MO (0)
+
+enum {
+ /* Additional instructions */
+ XTENSA_OPTION_CODE_DENSITY,
+ XTENSA_OPTION_LOOP,
+ XTENSA_OPTION_EXTENDED_L32R,
+ XTENSA_OPTION_16_BIT_IMUL,
+ XTENSA_OPTION_32_BIT_IMUL,
+ XTENSA_OPTION_32_BIT_IMUL_HIGH,
+ XTENSA_OPTION_32_BIT_IDIV,
+ XTENSA_OPTION_MAC16,
+ XTENSA_OPTION_MISC_OP_NSA,
+ XTENSA_OPTION_MISC_OP_MINMAX,
+ XTENSA_OPTION_MISC_OP_SEXT,
+ XTENSA_OPTION_MISC_OP_CLAMPS,
+ XTENSA_OPTION_COPROCESSOR,
+ XTENSA_OPTION_BOOLEAN,
+ XTENSA_OPTION_FP_COPROCESSOR,
+ XTENSA_OPTION_DFP_COPROCESSOR,
+ XTENSA_OPTION_DFPU_SINGLE_ONLY,
+ XTENSA_OPTION_MP_SYNCHRO,
+ XTENSA_OPTION_CONDITIONAL_STORE,
+ XTENSA_OPTION_ATOMCTL,
+ XTENSA_OPTION_DEPBITS,
+
+ /* Interrupts and exceptions */
+ XTENSA_OPTION_EXCEPTION,
+ XTENSA_OPTION_RELOCATABLE_VECTOR,
+ XTENSA_OPTION_UNALIGNED_EXCEPTION,
+ XTENSA_OPTION_INTERRUPT,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+
+ /* Local memory */
+ XTENSA_OPTION_ICACHE,
+ XTENSA_OPTION_ICACHE_TEST,
+ XTENSA_OPTION_ICACHE_INDEX_LOCK,
+ XTENSA_OPTION_DCACHE,
+ XTENSA_OPTION_DCACHE_TEST,
+ XTENSA_OPTION_DCACHE_INDEX_LOCK,
+ XTENSA_OPTION_IRAM,
+ XTENSA_OPTION_IROM,
+ XTENSA_OPTION_DRAM,
+ XTENSA_OPTION_DROM,
+ XTENSA_OPTION_XLMI,
+ XTENSA_OPTION_HW_ALIGNMENT,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+
+ /* Memory protection and translation */
+ XTENSA_OPTION_REGION_PROTECTION,
+ XTENSA_OPTION_REGION_TRANSLATION,
+ XTENSA_OPTION_MPU,
+ XTENSA_OPTION_MMU,
+ XTENSA_OPTION_CACHEATTR,
+
+ /* Other */
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ XTENSA_OPTION_PROCESSOR_INTERFACE,
+ XTENSA_OPTION_MISC_SR,
+ XTENSA_OPTION_THREAD_POINTER,
+ XTENSA_OPTION_PROCESSOR_ID,
+ XTENSA_OPTION_DEBUG,
+ XTENSA_OPTION_TRACE_PORT,
+ XTENSA_OPTION_EXTERN_REGS,
+};
+
+enum {
+ EXPSTATE = 230,
+ THREADPTR = 231,
+ FCR = 232,
+ FSR = 233,
+};
+
+enum {
+ LBEG = 0,
+ LEND = 1,
+ LCOUNT = 2,
+ SAR = 3,
+ BR = 4,
+ LITBASE = 5,
+ SCOMPARE1 = 12,
+ ACCLO = 16,
+ ACCHI = 17,
+ MR = 32,
+ PREFCTL = 40,
+ WINDOW_BASE = 72,
+ WINDOW_START = 73,
+ PTEVADDR = 83,
+ MMID = 89,
+ RASID = 90,
+ MPUENB = 90,
+ ITLBCFG = 91,
+ DTLBCFG = 92,
+ MPUCFG = 92,
+ ERACCESS = 95,
+ IBREAKENABLE = 96,
+ MEMCTL = 97,
+ CACHEATTR = 98,
+ CACHEADRDIS = 98,
+ ATOMCTL = 99,
+ DDR = 104,
+ MEPC = 106,
+ MEPS = 107,
+ MESAVE = 108,
+ MESR = 109,
+ MECR = 110,
+ MEVADDR = 111,
+ IBREAKA = 128,
+ DBREAKA = 144,
+ DBREAKC = 160,
+ CONFIGID0 = 176,
+ EPC1 = 177,
+ DEPC = 192,
+ EPS2 = 194,
+ CONFIGID1 = 208,
+ EXCSAVE1 = 209,
+ CPENABLE = 224,
+ INTSET = 226,
+ INTCLEAR = 227,
+ INTENABLE = 228,
+ PS = 230,
+ VECBASE = 231,
+ EXCCAUSE = 232,
+ DEBUGCAUSE = 233,
+ CCOUNT = 234,
+ PRID = 235,
+ ICOUNT = 236,
+ ICOUNTLEVEL = 237,
+ EXCVADDR = 238,
+ CCOMPARE = 240,
+ MISC = 244,
+};
+
+#define PS_INTLEVEL 0xf
+#define PS_INTLEVEL_SHIFT 0
+
+#define PS_EXCM 0x10
+#define PS_UM 0x20
+
+#define PS_RING 0xc0
+#define PS_RING_SHIFT 6
+
+#define PS_OWB 0xf00
+#define PS_OWB_SHIFT 8
+#define PS_OWB_LEN 4
+
+#define PS_CALLINC 0x30000
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_LEN 2
+
+#define PS_WOE 0x40000
+
+#define DEBUGCAUSE_IC 0x1
+#define DEBUGCAUSE_IB 0x2
+#define DEBUGCAUSE_DB 0x4
+#define DEBUGCAUSE_BI 0x8
+#define DEBUGCAUSE_BN 0x10
+#define DEBUGCAUSE_DI 0x20
+#define DEBUGCAUSE_DBNUM 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8
+
+#define DBREAKC_SB 0x80000000
+#define DBREAKC_LB 0x40000000
+#define DBREAKC_SB_LB (DBREAKC_SB | DBREAKC_LB)
+#define DBREAKC_MASK 0x3f
+
+#define MEMCTL_INIT 0x00800000
+#define MEMCTL_IUSEWAYS_SHIFT 18
+#define MEMCTL_IUSEWAYS_LEN 5
+#define MEMCTL_IUSEWAYS_MASK 0x007c0000
+#define MEMCTL_DALLOCWAYS_SHIFT 13
+#define MEMCTL_DALLOCWAYS_LEN 5
+#define MEMCTL_DALLOCWAYS_MASK 0x0003e000
+#define MEMCTL_DUSEWAYS_SHIFT 8
+#define MEMCTL_DUSEWAYS_LEN 5
+#define MEMCTL_DUSEWAYS_MASK 0x00001f00
+#define MEMCTL_ISNP 0x4
+#define MEMCTL_DSNP 0x2
+#define MEMCTL_IL0EN 0x1
+
+#define MAX_INSN_LENGTH 64
+#define MAX_INSNBUF_LENGTH \
+ ((MAX_INSN_LENGTH + sizeof(xtensa_insnbuf_word) - 1) / \
+ sizeof(xtensa_insnbuf_word))
+#define MAX_INSN_SLOTS 32
+#define MAX_OPCODE_ARGS 16
+#define MAX_NAREG 64
+#define MAX_NINTERRUPT 32
+#define MAX_NLEVEL 6
+#define MAX_NNMI 1
+#define MAX_NCCOMPARE 3
+#define MAX_TLB_WAY_SIZE 8
+#define MAX_NDBREAK 2
+#define MAX_NMEMORY 4
+#define MAX_MPU_FOREGROUND_SEGMENTS 32
+
+#define REGION_PAGE_MASK 0xe0000000
+
+#define PAGE_CACHE_MASK 0x700
+#define PAGE_CACHE_SHIFT 8
+#define PAGE_CACHE_INVALID 0x000
+#define PAGE_CACHE_BYPASS 0x100
+#define PAGE_CACHE_WT 0x200
+#define PAGE_CACHE_WB 0x400
+#define PAGE_CACHE_ISOLATE 0x600
+
+enum {
+ /* Static vectors */
+ EXC_RESET0,
+ EXC_RESET1,
+ EXC_MEMORY_ERROR,
+
+ /* Dynamic vectors */
+ EXC_WINDOW_OVERFLOW4,
+ EXC_WINDOW_UNDERFLOW4,
+ EXC_WINDOW_OVERFLOW8,
+ EXC_WINDOW_UNDERFLOW8,
+ EXC_WINDOW_OVERFLOW12,
+ EXC_WINDOW_UNDERFLOW12,
+ EXC_IRQ,
+ EXC_KERNEL,
+ EXC_USER,
+ EXC_DOUBLE,
+ EXC_DEBUG,
+ EXC_MAX
+};
+
+enum {
+ ILLEGAL_INSTRUCTION_CAUSE = 0,
+ SYSCALL_CAUSE,
+ INSTRUCTION_FETCH_ERROR_CAUSE,
+ LOAD_STORE_ERROR_CAUSE,
+ LEVEL1_INTERRUPT_CAUSE,
+ ALLOCA_CAUSE,
+ INTEGER_DIVIDE_BY_ZERO_CAUSE,
+ PC_VALUE_ERROR_CAUSE,
+ PRIVILEGED_CAUSE,
+ LOAD_STORE_ALIGNMENT_CAUSE,
+ EXTERNAL_REG_PRIVILEGE_CAUSE,
+ EXCLUSIVE_ERROR_CAUSE,
+ INSTR_PIF_DATA_ERROR_CAUSE,
+ LOAD_STORE_PIF_DATA_ERROR_CAUSE,
+ INSTR_PIF_ADDR_ERROR_CAUSE,
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+ INST_TLB_MISS_CAUSE,
+ INST_TLB_MULTI_HIT_CAUSE,
+ INST_FETCH_PRIVILEGE_CAUSE,
+ INST_FETCH_PROHIBITED_CAUSE = 20,
+ LOAD_STORE_TLB_MISS_CAUSE = 24,
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE,
+ LOAD_STORE_PRIVILEGE_CAUSE,
+ LOAD_PROHIBITED_CAUSE = 28,
+ STORE_PROHIBITED_CAUSE,
+
+ COPROCESSOR0_DISABLED = 32,
+};
+
+typedef enum {
+ INTTYPE_LEVEL,
+ INTTYPE_EDGE,
+ INTTYPE_NMI,
+ INTTYPE_SOFTWARE,
+ INTTYPE_TIMER,
+ INTTYPE_DEBUG,
+ INTTYPE_WRITE_ERR,
+ INTTYPE_PROFILING,
+ INTTYPE_IDMA_DONE,
+ INTTYPE_IDMA_ERR,
+ INTTYPE_GS_ERR,
+ INTTYPE_MAX
+} interrupt_type;
+
+struct CPUXtensaState;
+
+typedef struct xtensa_tlb_entry {
+ uint32_t vaddr;
+ uint32_t paddr;
+ uint8_t asid;
+ uint8_t attr;
+ bool variable;
+} xtensa_tlb_entry;
+
+typedef struct xtensa_tlb {
+ unsigned nways;
+ const unsigned way_size[10];
+ bool varway56;
+ unsigned nrefillentries;
+} xtensa_tlb;
+
+typedef struct xtensa_mpu_entry {
+ uint32_t vaddr;
+ uint32_t attr;
+} xtensa_mpu_entry;
+
+typedef struct XtensaGdbReg {
+ int targno;
+ unsigned flags;
+ int type;
+ int group;
+ unsigned size;
+} XtensaGdbReg;
+
+typedef struct XtensaGdbRegmap {
+ int num_regs;
+ int num_core_regs;
+ /* PC + a + ar + sr + ur */
+ XtensaGdbReg reg[1 + 16 + 64 + 256 + 256];
+} XtensaGdbRegmap;
+
+typedef struct XtensaCcompareTimer {
+ struct CPUXtensaState *env;
+ QEMUTimer *timer;
+} XtensaCcompareTimer;
+
+typedef struct XtensaMemory {
+ unsigned num;
+ struct XtensaMemoryRegion {
+ uint32_t addr;
+ uint32_t size;
+ } location[MAX_NMEMORY];
+} XtensaMemory;
+
+typedef struct opcode_arg {
+ uint32_t imm;
+ uint32_t raw_imm;
+ void *in;
+ void *out;
+ uint32_t num_bits;
+} OpcodeArg;
+
+typedef struct DisasContext DisasContext;
+typedef void (*XtensaOpcodeOp)(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[]);
+typedef uint32_t (*XtensaOpcodeUintTest)(DisasContext *dc,
+ const OpcodeArg arg[],
+ const uint32_t par[]);
+
+enum {
+ XTENSA_OP_ILL = 0x1,
+ XTENSA_OP_PRIVILEGED = 0x2,
+ XTENSA_OP_SYSCALL = 0x4,
+ XTENSA_OP_DEBUG_BREAK = 0x8,
+
+ XTENSA_OP_OVERFLOW = 0x10,
+ XTENSA_OP_UNDERFLOW = 0x20,
+ XTENSA_OP_ALLOCA = 0x40,
+ XTENSA_OP_COPROCESSOR = 0x80,
+
+ XTENSA_OP_DIVIDE_BY_ZERO = 0x100,
+
+ /* Postprocessing flags */
+ XTENSA_OP_CHECK_INTERRUPTS = 0x200,
+ XTENSA_OP_EXIT_TB_M1 = 0x400,
+ XTENSA_OP_EXIT_TB_0 = 0x800,
+ XTENSA_OP_SYNC_REGISTER_WINDOW = 0x1000,
+
+ XTENSA_OP_POSTPROCESS =
+ XTENSA_OP_CHECK_INTERRUPTS |
+ XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_SYNC_REGISTER_WINDOW,
+
+ XTENSA_OP_NAME_ARRAY = 0x8000,
+
+ XTENSA_OP_CONTROL_FLOW = 0x10000,
+ XTENSA_OP_STORE = 0x20000,
+ XTENSA_OP_LOAD = 0x40000,
+ XTENSA_OP_LOAD_STORE =
+ XTENSA_OP_LOAD | XTENSA_OP_STORE,
+};
+
+typedef struct XtensaOpcodeOps {
+ const void *name;
+ XtensaOpcodeOp translate;
+ XtensaOpcodeUintTest test_exceptions;
+ XtensaOpcodeUintTest test_overflow;
+ const uint32_t *par;
+ uint32_t op_flags;
+ uint32_t coprocessor;
+} XtensaOpcodeOps;
+
+typedef struct XtensaOpcodeTranslators {
+ unsigned num_opcodes;
+ const XtensaOpcodeOps *opcode;
+} XtensaOpcodeTranslators;
+
+extern const XtensaOpcodeTranslators xtensa_core_opcodes;
+extern const XtensaOpcodeTranslators xtensa_fpu2000_opcodes;
+extern const XtensaOpcodeTranslators xtensa_fpu_opcodes;
+
+struct XtensaConfig {
+ const char *name;
+ uint64_t options;
+ XtensaGdbRegmap gdb_regmap;
+ unsigned nareg;
+ int excm_level;
+ int ndepc;
+ unsigned inst_fetch_width;
+ unsigned max_insn_size;
+ uint32_t vecbase;
+ uint32_t exception_vector[EXC_MAX];
+ unsigned ninterrupt;
+ unsigned nlevel;
+ unsigned nmi_level;
+ uint32_t interrupt_vector[MAX_NLEVEL + MAX_NNMI + 1];
+ uint32_t level_mask[MAX_NLEVEL + MAX_NNMI + 1];
+ uint32_t inttype_mask[INTTYPE_MAX];
+ struct {
+ uint32_t level;
+ interrupt_type inttype;
+ } interrupt[MAX_NINTERRUPT];
+ unsigned nccompare;
+ uint32_t timerint[MAX_NCCOMPARE];
+ unsigned nextint;
+ unsigned extint[MAX_NINTERRUPT];
+
+ unsigned debug_level;
+ unsigned nibreak;
+ unsigned ndbreak;
+
+ unsigned icache_ways;
+ unsigned dcache_ways;
+ unsigned dcache_line_bytes;
+ uint32_t memctl_mask;
+
+ XtensaMemory instrom;
+ XtensaMemory instram;
+ XtensaMemory datarom;
+ XtensaMemory dataram;
+ XtensaMemory sysrom;
+ XtensaMemory sysram;
+
+ unsigned hw_version;
+ uint32_t configid[2];
+
+ void *isa_internal;
+ xtensa_isa isa;
+ XtensaOpcodeOps **opcode_ops;
+ const XtensaOpcodeTranslators **opcode_translators;
+ xtensa_regfile a_regfile;
+ void ***regfile;
+
+ uint32_t clock_freq_khz;
+
+ xtensa_tlb itlb;
+ xtensa_tlb dtlb;
+
+ uint32_t mpu_align;
+ unsigned n_mpu_fg_segments;
+ unsigned n_mpu_bg_segments;
+ const xtensa_mpu_entry *mpu_bg;
+
+ bool use_first_nan;
+};
+
+typedef struct XtensaConfigList {
+ const XtensaConfig *config;
+ struct XtensaConfigList *next;
+} XtensaConfigList;
+
+#ifdef HOST_WORDS_BIGENDIAN
+enum {
+ FP_F32_HIGH,
+ FP_F32_LOW,
+};
+#else
+enum {
+ FP_F32_LOW,
+ FP_F32_HIGH,
+};
+#endif
+
+typedef struct CPUXtensaState {
+ const XtensaConfig *config;
+ uint32_t regs[16];
+ uint32_t pc;
+ uint32_t sregs[256];
+ uint32_t uregs[256];
+ uint32_t phys_regs[MAX_NAREG];
+ union {
+ float32 f32[2];
+ float64 f64;
+ } fregs[16];
+ float_status fp_status;
+ uint32_t windowbase_next;
+ uint32_t exclusive_addr;
+ uint32_t exclusive_val;
+
+#ifndef CONFIG_USER_ONLY
+ xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE];
+ xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE];
+ xtensa_mpu_entry mpu_fg[MAX_MPU_FOREGROUND_SEGMENTS];
+ unsigned autorefill_idx;
+ bool runstall;
+ AddressSpace *address_space_er;
+ MemoryRegion *system_er;
+ int pending_irq_level; /* level of last raised IRQ */
+ qemu_irq *irq_inputs;
+ qemu_irq ext_irq_inputs[MAX_NINTERRUPT];
+ qemu_irq runstall_irq;
+ XtensaCcompareTimer ccompare[MAX_NCCOMPARE];
+ uint64_t time_base;
+ uint64_t ccount_time;
+ uint32_t ccount_base;
+#endif
+
+ int yield_needed;
+ unsigned static_vectors;
+
+ /* Watchpoints for DBREAK registers */
+ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
+} CPUXtensaState;
+
+/**
+ * XtensaCPU:
+ * @env: #CPUXtensaState
+ *
+ * An Xtensa CPU.
+ */
+struct XtensaCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNegativeOffsetState neg;
+ CPUXtensaState env;
+};
+
+
+#ifndef CONFIG_USER_ONLY
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void xtensa_cpu_do_interrupt(CPUState *cpu);
+bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
+void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+#endif
+void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void xtensa_count_regs(const XtensaConfig *config,
+ unsigned *n_regs, unsigned *n_core_regs);
+int xtensa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
+int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+
+#define cpu_list xtensa_cpu_list
+
+#define XTENSA_CPU_TYPE_SUFFIX "-" TYPE_XTENSA_CPU
+#define XTENSA_CPU_TYPE_NAME(model) model XTENSA_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_XTENSA_CPU
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define XTENSA_DEFAULT_CPU_MODEL "fsf"
+#define XTENSA_DEFAULT_CPU_NOMMU_MODEL "fsf"
+#else
+#define XTENSA_DEFAULT_CPU_MODEL "dc232b"
+#define XTENSA_DEFAULT_CPU_NOMMU_MODEL "de212"
+#endif
+#define XTENSA_DEFAULT_CPU_TYPE \
+ XTENSA_CPU_TYPE_NAME(XTENSA_DEFAULT_CPU_MODEL)
+#define XTENSA_DEFAULT_CPU_NOMMU_TYPE \
+ XTENSA_CPU_TYPE_NAME(XTENSA_DEFAULT_CPU_NOMMU_MODEL)
+
+void xtensa_collect_sr_names(const XtensaConfig *config);
+void xtensa_translate_init(void);
+void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
+void xtensa_breakpoint_handler(CPUState *cs);
+void xtensa_register_core(XtensaConfigList *node);
+void xtensa_sim_open_console(Chardev *chr);
+void check_interrupts(CPUXtensaState *s);
+void xtensa_irq_init(CPUXtensaState *env);
+qemu_irq *xtensa_get_extints(CPUXtensaState *env);
+qemu_irq xtensa_get_runstall(CPUXtensaState *env);
+void xtensa_cpu_list(void);
+void xtensa_sync_window_from_phys(CPUXtensaState *env);
+void xtensa_sync_phys_from_window(CPUXtensaState *env);
+void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta);
+void xtensa_restore_owb(CPUXtensaState *env);
+void debug_exception_env(CPUXtensaState *new_env, uint32_t cause);
+
+static inline void xtensa_select_static_vectors(CPUXtensaState *env,
+ unsigned n)
+{
+ assert(n < 2);
+ env->static_vectors = n;
+}
+void xtensa_runstall(CPUXtensaState *env, bool runstall);
+
+#define XTENSA_OPTION_BIT(opt) (((uint64_t)1) << (opt))
+#define XTENSA_OPTION_ALL (~(uint64_t)0)
+
+static inline bool xtensa_option_bits_enabled(const XtensaConfig *config,
+ uint64_t opt)
+{
+ return (config->options & opt) != 0;
+}
+
+static inline bool xtensa_option_enabled(const XtensaConfig *config, int opt)
+{
+ return xtensa_option_bits_enabled(config, XTENSA_OPTION_BIT(opt));
+}
+
+static inline int xtensa_get_cintlevel(const CPUXtensaState *env)
+{
+ int level = (env->sregs[PS] & PS_INTLEVEL) >> PS_INTLEVEL_SHIFT;
+ if ((env->sregs[PS] & PS_EXCM) && env->config->excm_level > level) {
+ level = env->config->excm_level;
+ }
+ return level;
+}
+
+static inline int xtensa_get_ring(const CPUXtensaState *env)
+{
+ if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MPU))) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+static inline int xtensa_get_cring(const CPUXtensaState *env)
+{
+ if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MPU)) &&
+ (env->sregs[PS] & PS_EXCM) == 0) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access);
+void reset_mmu(CPUXtensaState *env);
+void dump_mmu(CPUXtensaState *env);
+
+static inline MemoryRegion *xtensa_get_er_region(CPUXtensaState *env)
+{
+ return env->system_er;
+}
+#else
+void xtensa_set_abi_call0(void);
+bool xtensa_abi_call0(void);
+#endif
+
+static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
+{
+ return env->sregs[WINDOW_START] |
+ (env->sregs[WINDOW_START] << env->config->nareg / 4);
+}
+
+/* MMU modes definitions */
+#define MMU_USER_IDX 3
+
+static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
+{
+ return xtensa_get_cring(env);
+}
+
+#define XTENSA_TBFLAG_RING_MASK 0x3
+#define XTENSA_TBFLAG_EXCM 0x4
+#define XTENSA_TBFLAG_LITBASE 0x8
+#define XTENSA_TBFLAG_DEBUG 0x10
+#define XTENSA_TBFLAG_ICOUNT 0x20
+#define XTENSA_TBFLAG_CPENABLE_MASK 0x3fc0
+#define XTENSA_TBFLAG_CPENABLE_SHIFT 6
+#define XTENSA_TBFLAG_WINDOW_MASK 0x18000
+#define XTENSA_TBFLAG_WINDOW_SHIFT 15
+#define XTENSA_TBFLAG_YIELD 0x20000
+#define XTENSA_TBFLAG_CWOE 0x40000
+#define XTENSA_TBFLAG_CALLINC_MASK 0x180000
+#define XTENSA_TBFLAG_CALLINC_SHIFT 19
+
+#define XTENSA_CSBASE_LEND_MASK 0x0000ffff
+#define XTENSA_CSBASE_LEND_SHIFT 0
+#define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000
+#define XTENSA_CSBASE_LBEG_OFF_SHIFT 16
+
+typedef CPUXtensaState CPUArchState;
+typedef XtensaCPU ArchCPU;
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = 0;
+ *flags |= xtensa_get_ring(env);
+ if (env->sregs[PS] & PS_EXCM) {
+ *flags |= XTENSA_TBFLAG_EXCM;
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
+ target_ulong lend_dist =
+ env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
+
+ /*
+ * 0 in the csbase_lend field means that there may not be a loopback
+ * for any instruction that starts inside this page. Any other value
+ * means that an instruction that ends at this offset from the page
+ * start may loop back and will need loopback code to be generated.
+ *
+ * lend_dist is 0 when LEND points to the start of the page, but
+ * no instruction that starts inside this page may end at offset 0,
+ * so it's still correct.
+ *
+ * When an instruction ends at a page boundary it may only start in
+ * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE
+ * for the TB that contains this instruction.
+ */
+ if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
+ target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
+
+ *cs_base = lend_dist;
+ if (lbeg_off < 256) {
+ *cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT;
+ }
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
+ (env->sregs[LITBASE] & 1)) {
+ *flags |= XTENSA_TBFLAG_LITBASE;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ *flags |= XTENSA_TBFLAG_DEBUG;
+ }
+ if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
+ *flags |= XTENSA_TBFLAG_ICOUNT;
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
+ *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
+ (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t w = ctz32(windowstart | 0x8);
+
+ *flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE;
+ *flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT,
+ PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT;
+ } else {
+ *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
+ }
+ if (env->yield_needed) {
+ *flags |= XTENSA_TBFLAG_YIELD;
+ }
+}
+
+#endif
diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c
new file mode 100644
index 000000000..be1f81107
--- /dev/null
+++ b/target/xtensa/dbg_helper.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/address-spaces.h"
+
+static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
+{
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
+ &paddr, &page_size, &access);
+ if (ret == 0) {
+ tb_invalidate_phys_addr(&address_space_memory, paddr,
+ MEMTXATTRS_UNSPECIFIED);
+ }
+}
+
+void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
+{
+ uint32_t change = v ^ env->sregs[IBREAKENABLE];
+ unsigned i;
+
+ for (i = 0; i < env->config->nibreak; ++i) {
+ if (change & (1 << i)) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ }
+ }
+ env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
+}
+
+void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ tb_invalidate_virtual_addr(env, v);
+ }
+ env->sregs[IBREAKA + i] = v;
+}
+
+static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
+ uint32_t dbreakc)
+{
+ CPUState *cs = env_cpu(env);
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+ uint32_t mask = dbreakc | ~DBREAKC_MASK;
+
+ if (env->cpu_watchpoint[i]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ }
+ if (dbreakc & DBREAKC_SB) {
+ flags |= BP_MEM_WRITE;
+ }
+ if (dbreakc & DBREAKC_LB) {
+ flags |= BP_MEM_READ;
+ }
+ /* contiguous mask after inversion is one less than some power of 2 */
+ if ((~mask + 1) & ~mask) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
+ /* cut mask after the first zero bit */
+ mask = 0xffffffff << (32 - clo32(mask));
+ }
+ if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
+ flags, &env->cpu_watchpoint[i])) {
+ env->cpu_watchpoint[i] = NULL;
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Failed to set data breakpoint at 0x%08x/%d\n",
+ dbreaka & mask, ~mask + 1);
+ }
+}
+
+void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ uint32_t dbreakc = env->sregs[DBREAKC + i];
+
+ if ((dbreakc & DBREAKC_SB_LB) &&
+ env->sregs[DBREAKA + i] != v) {
+ set_dbreak(env, i, v, dbreakc);
+ }
+ env->sregs[DBREAKA + i] = v;
+}
+
+void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
+ if (v & DBREAKC_SB_LB) {
+ set_dbreak(env, i, env->sregs[DBREAKA + i], v);
+ } else {
+ if (env->cpu_watchpoint[i]) {
+ CPUState *cs = env_cpu(env);
+
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ env->cpu_watchpoint[i] = NULL;
+ }
+ }
+ }
+ env->sregs[DBREAKC + i] = v;
+}
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
new file mode 100644
index 000000000..9bc7f50d3
--- /dev/null
+++ b/target/xtensa/exc_helper.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+
+void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->exception_index = excp;
+ if (excp == EXCP_YIELD) {
+ env->yield_needed = 0;
+ }
+ cpu_loop_exit(cs);
+}
+
+void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ uint32_t vector;
+
+ env->pc = pc;
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = pc;
+ } else {
+ env->sregs[EPC1] = pc;
+ }
+ vector = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = pc;
+ vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+
+ env->sregs[EXCCAUSE] = cause;
+ env->sregs[PS] |= PS_EXCM;
+
+ HELPER(exception)(env, vector);
+}
+
+void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
+ uint32_t pc, uint32_t cause, uint32_t vaddr)
+{
+ env->sregs[EXCVADDR] = vaddr;
+ HELPER(exception_cause)(env, pc, cause);
+}
+
+void debug_exception_env(CPUXtensaState *env, uint32_t cause)
+{
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ HELPER(debug_exception)(env, env->pc, cause);
+ }
+}
+
+void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ unsigned level = env->config->debug_level;
+
+ env->pc = pc;
+ env->sregs[DEBUGCAUSE] = cause;
+ env->sregs[EPC1 + level - 1] = pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
+ (level << PS_INTLEVEL_SHIFT);
+ HELPER(exception)(env, EXC_DEBUG);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
+{
+ CPUState *cpu = env_cpu(env);
+
+ env->pc = pc;
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
+ (intlevel << PS_INTLEVEL_SHIFT);
+
+ qemu_mutex_lock_iothread();
+ check_interrupts(env);
+ qemu_mutex_unlock_iothread();
+
+ if (env->pending_irq_level) {
+ cpu_loop_exit(cpu);
+ return;
+ }
+
+ cpu->halted = 1;
+ HELPER(exception)(env, EXCP_HLT);
+}
+
+void HELPER(check_interrupts)(CPUXtensaState *env)
+{
+ qemu_mutex_lock_iothread();
+ check_interrupts(env);
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(intset)(CPUXtensaState *env, uint32_t v)
+{
+ qatomic_or(&env->sregs[INTSET],
+ v & env->config->inttype_mask[INTTYPE_SOFTWARE]);
+}
+
+static void intclear(CPUXtensaState *env, uint32_t v)
+{
+ qatomic_and(&env->sregs[INTSET], ~v);
+}
+
+void HELPER(intclear)(CPUXtensaState *env, uint32_t v)
+{
+ intclear(env, v & (env->config->inttype_mask[INTTYPE_SOFTWARE] |
+ env->config->inttype_mask[INTTYPE_EDGE]));
+}
+
+static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
+{
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_RELOCATABLE_VECTOR)) {
+ return vector - env->config->vecbase + env->sregs[VECBASE];
+ } else {
+ return vector;
+ }
+}
+
+/*!
+ * Handle penging IRQ.
+ * For the high priority interrupt jump to the corresponding interrupt vector.
+ * For the level-1 interrupt convert it to either user, kernel or double
+ * exception with the 'level-1 interrupt' exception cause.
+ */
+static void handle_interrupt(CPUXtensaState *env)
+{
+ int level = env->pending_irq_level;
+
+ if ((level > xtensa_get_cintlevel(env) &&
+ level <= env->config->nlevel &&
+ (env->config->level_mask[level] &
+ env->sregs[INTSET] & env->sregs[INTENABLE])) ||
+ level == env->config->nmi_level) {
+ CPUState *cs = env_cpu(env);
+
+ if (level > 1) {
+ env->sregs[EPC1 + level - 1] = env->pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] =
+ (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
+ env->pc = relocated_vector(env,
+ env->config->interrupt_vector[level]);
+ if (level == env->config->nmi_level) {
+ intclear(env, env->config->inttype_mask[INTTYPE_NMI]);
+ }
+ } else {
+ env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
+
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = env->pc;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ }
+ cs->exception_index = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ cs->exception_index =
+ (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+ env->sregs[PS] |= PS_EXCM;
+ }
+ }
+}
+
+/* Called from cpu_handle_interrupt with BQL held */
+void xtensa_cpu_do_interrupt(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->exception_index == EXC_IRQ) {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s(EXC_IRQ) level = %d, cintlevel = %d, "
+ "pc = %08x, a0 = %08x, ps = %08x, "
+ "intset = %08x, intenable = %08x, "
+ "ccount = %08x\n",
+ __func__, env->pending_irq_level,
+ xtensa_get_cintlevel(env),
+ env->pc, env->regs[0], env->sregs[PS],
+ env->sregs[INTSET], env->sregs[INTENABLE],
+ env->sregs[CCOUNT]);
+ handle_interrupt(env);
+ }
+
+ switch (cs->exception_index) {
+ case EXC_WINDOW_OVERFLOW4:
+ case EXC_WINDOW_UNDERFLOW4:
+ case EXC_WINDOW_OVERFLOW8:
+ case EXC_WINDOW_UNDERFLOW8:
+ case EXC_WINDOW_OVERFLOW12:
+ case EXC_WINDOW_UNDERFLOW12:
+ case EXC_KERNEL:
+ case EXC_USER:
+ case EXC_DOUBLE:
+ case EXC_DEBUG:
+ qemu_log_mask(CPU_LOG_INT, "%s(%d) "
+ "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
+ __func__, cs->exception_index,
+ env->pc, env->regs[0], env->sregs[PS],
+ env->sregs[CCOUNT]);
+ if (env->config->exception_vector[cs->exception_index]) {
+ uint32_t vector;
+
+ vector = env->config->exception_vector[cs->exception_index];
+ env->pc = relocated_vector(env, vector);
+ } else {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s(pc = %08x) bad exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ }
+ break;
+
+ case EXC_IRQ:
+ break;
+
+ default:
+ qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ break;
+ }
+ check_interrupts(env);
+}
+
+bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ cs->exception_index = EXC_IRQ;
+ xtensa_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c
new file mode 100644
index 000000000..ba3c29d19
--- /dev/null
+++ b/target/xtensa/fpu_helper.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+
+enum {
+ XTENSA_FP_I = 0x1,
+ XTENSA_FP_U = 0x2,
+ XTENSA_FP_O = 0x4,
+ XTENSA_FP_Z = 0x8,
+ XTENSA_FP_V = 0x10,
+};
+
+enum {
+ XTENSA_FCR_FLAGS_SHIFT = 2,
+ XTENSA_FSR_FLAGS_SHIFT = 7,
+};
+
+static const struct {
+ uint32_t xtensa_fp_flag;
+ int softfloat_fp_flag;
+} xtensa_fp_flag_map[] = {
+ { XTENSA_FP_I, float_flag_inexact, },
+ { XTENSA_FP_U, float_flag_underflow, },
+ { XTENSA_FP_O, float_flag_overflow, },
+ { XTENSA_FP_Z, float_flag_divbyzero, },
+ { XTENSA_FP_V, float_flag_invalid, },
+};
+
+void HELPER(wur_fpu2k_fcr)(CPUXtensaState *env, uint32_t v)
+{
+ static const int rounding_mode[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ };
+
+ env->uregs[FCR] = v & 0xfffff07f;
+ set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
+}
+
+void HELPER(wur_fpu_fcr)(CPUXtensaState *env, uint32_t v)
+{
+ static const int rounding_mode[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ };
+
+ if (v & 0xfffff000) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "MBZ field of FCR is written non-zero: %08x\n", v);
+ }
+ env->uregs[FCR] = v & 0x0000007f;
+ set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
+}
+
+void HELPER(wur_fpu_fsr)(CPUXtensaState *env, uint32_t v)
+{
+ uint32_t flags = v >> XTENSA_FSR_FLAGS_SHIFT;
+ int fef = 0;
+ unsigned i;
+
+ if (v & 0xfffff000) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "MBZ field of FSR is written non-zero: %08x\n", v);
+ }
+ env->uregs[FSR] = v & 0x00000f80;
+ for (i = 0; i < ARRAY_SIZE(xtensa_fp_flag_map); ++i) {
+ if (flags & xtensa_fp_flag_map[i].xtensa_fp_flag) {
+ fef |= xtensa_fp_flag_map[i].softfloat_fp_flag;
+ }
+ }
+ set_float_exception_flags(fef, &env->fp_status);
+}
+
+uint32_t HELPER(rur_fpu_fsr)(CPUXtensaState *env)
+{
+ uint32_t flags = 0;
+ int fef = get_float_exception_flags(&env->fp_status);
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(xtensa_fp_flag_map); ++i) {
+ if (fef & xtensa_fp_flag_map[i].softfloat_fp_flag) {
+ flags |= xtensa_fp_flag_map[i].xtensa_fp_flag;
+ }
+ }
+ env->uregs[FSR] = flags << XTENSA_FSR_FLAGS_SHIFT;
+ return flags << XTENSA_FSR_FLAGS_SHIFT;
+}
+
+float64 HELPER(abs_d)(float64 v)
+{
+ return float64_abs(v);
+}
+
+float32 HELPER(abs_s)(float32 v)
+{
+ return float32_abs(v);
+}
+
+float64 HELPER(neg_d)(float64 v)
+{
+ return float64_chs(v);
+}
+
+float32 HELPER(neg_s)(float32 v)
+{
+ return float32_chs(v);
+}
+
+float32 HELPER(fpu2k_add_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_add(a, b, &env->fp_status);
+}
+
+float32 HELPER(fpu2k_sub_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_sub(a, b, &env->fp_status);
+}
+
+float32 HELPER(fpu2k_mul_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_mul(a, b, &env->fp_status);
+}
+
+float32 HELPER(fpu2k_madd_s)(CPUXtensaState *env,
+ float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, 0, &env->fp_status);
+}
+
+float32 HELPER(fpu2k_msub_s)(CPUXtensaState *env,
+ float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+float64 HELPER(add_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ set_use_first_nan(true, &env->fp_status);
+ return float64_add(a, b, &env->fp_status);
+}
+
+float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_add(a, b, &env->fp_status);
+}
+
+float64 HELPER(sub_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ set_use_first_nan(true, &env->fp_status);
+ return float64_sub(a, b, &env->fp_status);
+}
+
+float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_sub(a, b, &env->fp_status);
+}
+
+float64 HELPER(mul_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ set_use_first_nan(true, &env->fp_status);
+ return float64_mul(a, b, &env->fp_status);
+}
+
+float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_mul(a, b, &env->fp_status);
+}
+
+float64 HELPER(madd_d)(CPUXtensaState *env, float64 a, float64 b, float64 c)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float64_muladd(b, c, a, 0, &env->fp_status);
+}
+
+float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_muladd(b, c, a, 0, &env->fp_status);
+}
+
+float64 HELPER(msub_d)(CPUXtensaState *env, float64 a, float64 b, float64 c)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float64_muladd(b, c, a, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_muladd(b, c, a, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+float64 HELPER(mkdadj_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ set_use_first_nan(true, &env->fp_status);
+ return float64_div(b, a, &env->fp_status);
+}
+
+float32 HELPER(mkdadj_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_div(b, a, &env->fp_status);
+}
+
+float64 HELPER(mksadj_d)(CPUXtensaState *env, float64 v)
+{
+ set_use_first_nan(true, &env->fp_status);
+ return float64_sqrt(v, &env->fp_status);
+}
+
+float32 HELPER(mksadj_s)(CPUXtensaState *env, float32 v)
+{
+ set_use_first_nan(env->config->use_first_nan, &env->fp_status);
+ return float32_sqrt(v, &env->fp_status);
+}
+
+uint32_t HELPER(ftoi_d)(CPUXtensaState *env, float64 v,
+ uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = env->fp_status;
+ uint32_t res;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+ res = float64_to_int32(float64_scalbn(v, scale, &fp_status), &fp_status);
+ set_float_exception_flags(get_float_exception_flags(&fp_status),
+ &env->fp_status);
+ return res;
+}
+
+uint32_t HELPER(ftoi_s)(CPUXtensaState *env, float32 v,
+ uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = env->fp_status;
+ uint32_t res;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+ res = float32_to_int32(float32_scalbn(v, scale, &fp_status), &fp_status);
+ set_float_exception_flags(get_float_exception_flags(&fp_status),
+ &env->fp_status);
+ return res;
+}
+
+uint32_t HELPER(ftoui_d)(CPUXtensaState *env, float64 v,
+ uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = env->fp_status;
+ float64 res;
+ uint32_t rv;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+
+ res = float64_scalbn(v, scale, &fp_status);
+
+ if (float64_is_neg(v) && !float64_is_any_nan(v)) {
+ set_float_exception_flags(float_flag_invalid, &fp_status);
+ rv = float64_to_int32(res, &fp_status);
+ } else {
+ rv = float64_to_uint32(res, &fp_status);
+ }
+ set_float_exception_flags(get_float_exception_flags(&fp_status),
+ &env->fp_status);
+ return rv;
+}
+
+uint32_t HELPER(ftoui_s)(CPUXtensaState *env, float32 v,
+ uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = env->fp_status;
+ float32 res;
+ uint32_t rv;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+
+ res = float32_scalbn(v, scale, &fp_status);
+
+ if (float32_is_neg(v) && !float32_is_any_nan(v)) {
+ rv = float32_to_int32(res, &fp_status);
+ if (rv) {
+ set_float_exception_flags(float_flag_invalid, &fp_status);
+ }
+ } else {
+ rv = float32_to_uint32(res, &fp_status);
+ }
+ set_float_exception_flags(get_float_exception_flags(&fp_status),
+ &env->fp_status);
+ return rv;
+}
+
+float64 HELPER(itof_d)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float64_scalbn(int32_to_float64(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float32 HELPER(itof_s)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(int32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float64 HELPER(uitof_d)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float64_scalbn(uint32_to_float64(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float32 HELPER(uitof_s)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(uint32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float64 HELPER(cvtd_s)(CPUXtensaState *env, float32 v)
+{
+ return float32_to_float64(v, &env->fp_status);
+}
+
+float32 HELPER(cvts_d)(CPUXtensaState *env, float64 v)
+{
+ return float64_to_float32(v, &env->fp_status);
+}
+
+uint32_t HELPER(un_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ return float64_unordered_quiet(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(un_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_unordered_quiet(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(oeq_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ return float64_eq_quiet(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(oeq_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_eq_quiet(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(ueq_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ FloatRelation v = float64_compare_quiet(a, b, &env->fp_status);
+
+ return v == float_relation_equal ||
+ v == float_relation_unordered;
+}
+
+uint32_t HELPER(ueq_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ FloatRelation v = float32_compare_quiet(a, b, &env->fp_status);
+
+ return v == float_relation_equal ||
+ v == float_relation_unordered;
+}
+
+uint32_t HELPER(olt_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ return float64_lt(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(olt_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_lt(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(ult_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ FloatRelation v = float64_compare_quiet(a, b, &env->fp_status);
+
+ return v == float_relation_less ||
+ v == float_relation_unordered;
+}
+
+uint32_t HELPER(ult_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ FloatRelation v = float32_compare_quiet(a, b, &env->fp_status);
+
+ return v == float_relation_less ||
+ v == float_relation_unordered;
+}
+
+uint32_t HELPER(ole_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ return float64_le(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(ole_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_le(a, b, &env->fp_status);
+}
+
+uint32_t HELPER(ule_d)(CPUXtensaState *env, float64 a, float64 b)
+{
+ FloatRelation v = float64_compare_quiet(a, b, &env->fp_status);
+
+ return v != float_relation_greater;
+}
+
+uint32_t HELPER(ule_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ FloatRelation v = float32_compare_quiet(a, b, &env->fp_status);
+
+ return v != float_relation_greater;
+}
diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c
new file mode 100644
index 000000000..b6696063e
--- /dev/null
+++ b/target/xtensa/gdbstub.c
@@ -0,0 +1,182 @@
+/*
+ * Xtensa gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/log.h"
+
+enum {
+ xtRegisterTypeArRegfile = 1, /* Register File ar0..arXX. */
+ xtRegisterTypeSpecialReg, /* CPU states, such as PS, Booleans, (rsr). */
+ xtRegisterTypeUserReg, /* User defined registers (rur). */
+ xtRegisterTypeTieRegfile, /* User define register files. */
+ xtRegisterTypeTieState, /* TIE States (mapped on user regs). */
+ xtRegisterTypeMapped, /* Mapped on Special Registers. */
+ xtRegisterTypeUnmapped, /* Special case of masked registers. */
+ xtRegisterTypeWindow, /* Live window registers (a0..a15). */
+ xtRegisterTypeVirtual, /* PC, FP. */
+ xtRegisterTypeUnknown
+};
+
+#define XTENSA_REGISTER_FLAGS_PRIVILEGED 0x0001
+#define XTENSA_REGISTER_FLAGS_READABLE 0x0002
+#define XTENSA_REGISTER_FLAGS_WRITABLE 0x0004
+#define XTENSA_REGISTER_FLAGS_VOLATILE 0x0008
+
+void xtensa_count_regs(const XtensaConfig *config,
+ unsigned *n_regs, unsigned *n_core_regs)
+{
+ unsigned i;
+ bool count_core_regs = true;
+
+ for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) {
+ if (config->gdb_regmap.reg[i].type != xtRegisterTypeTieState &&
+ config->gdb_regmap.reg[i].type != xtRegisterTypeMapped &&
+ config->gdb_regmap.reg[i].type != xtRegisterTypeUnmapped) {
+ ++*n_regs;
+ if (count_core_regs) {
+ if ((config->gdb_regmap.reg[i].flags &
+ XTENSA_REGISTER_FLAGS_PRIVILEGED) == 0) {
+ ++*n_core_regs;
+ } else {
+ count_core_regs = false;
+ }
+ }
+ }
+ }
+}
+
+int xtensa_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
+#ifdef CONFIG_USER_ONLY
+ int num_regs = env->config->gdb_regmap.num_core_regs;
+#else
+ int num_regs = env->config->gdb_regmap.num_regs;
+#endif
+ unsigned i;
+
+ if (n < 0 || n >= num_regs) {
+ return 0;
+ }
+
+ switch (reg->type) {
+ case xtRegisterTypeVirtual: /*pc*/
+ return gdb_get_reg32(mem_buf, env->pc);
+
+ case xtRegisterTypeArRegfile: /*ar*/
+ xtensa_sync_phys_from_window(env);
+ return gdb_get_reg32(mem_buf, env->phys_regs[(reg->targno & 0xff)
+ % env->config->nareg]);
+
+ case xtRegisterTypeSpecialReg: /*SR*/
+ return gdb_get_reg32(mem_buf, env->sregs[reg->targno & 0xff]);
+
+ case xtRegisterTypeUserReg: /*UR*/
+ return gdb_get_reg32(mem_buf, env->uregs[reg->targno & 0xff]);
+
+ case xtRegisterTypeTieRegfile: /*f*/
+ i = reg->targno & 0x0f;
+ switch (reg->size) {
+ case 4:
+ return gdb_get_reg32(mem_buf,
+ float32_val(env->fregs[i].f32[FP_F32_LOW]));
+ case 8:
+ return gdb_get_reg64(mem_buf, float64_val(env->fregs[i].f64));
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported size %d\n",
+ __func__, n, reg->size);
+ return gdb_get_zeroes(mem_buf, reg->size);
+ }
+
+ case xtRegisterTypeWindow: /*a*/
+ return gdb_get_reg32(mem_buf, env->regs[reg->targno & 0x0f]);
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported type %d\n",
+ __func__, n, reg->type);
+ return gdb_get_zeroes(mem_buf, reg->size);
+ }
+}
+
+int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ uint32_t tmp;
+ const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
+#ifdef CONFIG_USER_ONLY
+ int num_regs = env->config->gdb_regmap.num_core_regs;
+#else
+ int num_regs = env->config->gdb_regmap.num_regs;
+#endif
+
+ if (n < 0 || n >= num_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ switch (reg->type) {
+ case xtRegisterTypeVirtual: /*pc*/
+ env->pc = tmp;
+ break;
+
+ case xtRegisterTypeArRegfile: /*ar*/
+ env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
+ xtensa_sync_window_from_phys(env);
+ break;
+
+ case xtRegisterTypeSpecialReg: /*SR*/
+ env->sregs[reg->targno & 0xff] = tmp;
+ break;
+
+ case xtRegisterTypeUserReg: /*UR*/
+ env->uregs[reg->targno & 0xff] = tmp;
+ break;
+
+ case xtRegisterTypeTieRegfile: /*f*/
+ switch (reg->size) {
+ case 4:
+ env->fregs[reg->targno & 0x0f].f32[FP_F32_LOW] = make_float32(tmp);
+ return 4;
+ case 8:
+ env->fregs[reg->targno & 0x0f].f64 = make_float64(tmp);
+ return 8;
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported size %d\n",
+ __func__, n, reg->size);
+ return reg->size;
+ }
+
+ case xtRegisterTypeWindow: /*a*/
+ env->regs[reg->targno & 0x0f] = tmp;
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported type %d\n",
+ __func__, n, reg->type);
+ return reg->size;
+ }
+
+ return 4;
+}
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
new file mode 100644
index 000000000..29d216ec1
--- /dev/null
+++ b/target/xtensa/helper.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "qemu/error-report.h"
+#include "qemu/qemu-print.h"
+#include "qemu/host-utils.h"
+
+static struct XtensaConfigList *xtensa_cores;
+
+static void add_translator_to_hash(GHashTable *translator,
+ const char *name,
+ const XtensaOpcodeOps *opcode)
+{
+ if (!g_hash_table_insert(translator, (void *)name, (void *)opcode)) {
+ error_report("Multiple definitions of '%s' opcode in a single table",
+ name);
+ }
+}
+
+static GHashTable *hash_opcode_translators(const XtensaOpcodeTranslators *t)
+{
+ unsigned i, j;
+ GHashTable *translator = g_hash_table_new(g_str_hash, g_str_equal);
+
+ for (i = 0; i < t->num_opcodes; ++i) {
+ if (t->opcode[i].op_flags & XTENSA_OP_NAME_ARRAY) {
+ const char * const *name = t->opcode[i].name;
+
+ for (j = 0; name[j]; ++j) {
+ add_translator_to_hash(translator,
+ (void *)name[j],
+ (void *)(t->opcode + i));
+ }
+ } else {
+ add_translator_to_hash(translator,
+ (void *)t->opcode[i].name,
+ (void *)(t->opcode + i));
+ }
+ }
+ return translator;
+}
+
+static XtensaOpcodeOps *
+xtensa_find_opcode_ops(const XtensaOpcodeTranslators *t,
+ const char *name)
+{
+ static GHashTable *translators;
+ GHashTable *translator;
+
+ if (translators == NULL) {
+ translators = g_hash_table_new(g_direct_hash, g_direct_equal);
+ }
+ translator = g_hash_table_lookup(translators, t);
+ if (translator == NULL) {
+ translator = hash_opcode_translators(t);
+ g_hash_table_insert(translators, (void *)t, translator);
+ }
+ return g_hash_table_lookup(translator, name);
+}
+
+static void init_libisa(XtensaConfig *config)
+{
+ unsigned i, j;
+ unsigned opcodes;
+ unsigned formats;
+ unsigned regfiles;
+
+ config->isa = xtensa_isa_init(config->isa_internal, NULL, NULL);
+ assert(xtensa_isa_maxlength(config->isa) <= MAX_INSN_LENGTH);
+ assert(xtensa_insnbuf_size(config->isa) <= MAX_INSNBUF_LENGTH);
+ opcodes = xtensa_isa_num_opcodes(config->isa);
+ formats = xtensa_isa_num_formats(config->isa);
+ regfiles = xtensa_isa_num_regfiles(config->isa);
+ config->opcode_ops = g_new(XtensaOpcodeOps *, opcodes);
+
+ for (i = 0; i < formats; ++i) {
+ assert(xtensa_format_num_slots(config->isa, i) <= MAX_INSN_SLOTS);
+ }
+
+ for (i = 0; i < opcodes; ++i) {
+ const char *opc_name = xtensa_opcode_name(config->isa, i);
+ XtensaOpcodeOps *ops = NULL;
+
+ assert(xtensa_opcode_num_operands(config->isa, i) <= MAX_OPCODE_ARGS);
+ if (!config->opcode_translators) {
+ ops = xtensa_find_opcode_ops(&xtensa_core_opcodes, opc_name);
+ } else {
+ for (j = 0; !ops && config->opcode_translators[j]; ++j) {
+ ops = xtensa_find_opcode_ops(config->opcode_translators[j],
+ opc_name);
+ }
+ }
+#ifdef DEBUG
+ if (ops == NULL) {
+ fprintf(stderr,
+ "opcode translator not found for %s's opcode '%s'\n",
+ config->name, opc_name);
+ }
+#endif
+ config->opcode_ops[i] = ops;
+ }
+ config->a_regfile = xtensa_regfile_lookup(config->isa, "AR");
+
+ config->regfile = g_new(void **, regfiles);
+ for (i = 0; i < regfiles; ++i) {
+ const char *name = xtensa_regfile_name(config->isa, i);
+ int entries = xtensa_regfile_num_entries(config->isa, i);
+ int bits = xtensa_regfile_num_bits(config->isa, i);
+
+ config->regfile[i] = xtensa_get_regfile_by_name(name, entries, bits);
+#ifdef DEBUG
+ if (config->regfile[i] == NULL) {
+ fprintf(stderr, "regfile '%s' not found for %s\n",
+ name, config->name);
+ }
+#endif
+ }
+ xtensa_collect_sr_names(config);
+}
+
+static void xtensa_finalize_config(XtensaConfig *config)
+{
+ if (config->isa_internal) {
+ init_libisa(config);
+ }
+
+ if (config->gdb_regmap.num_regs == 0 ||
+ config->gdb_regmap.num_core_regs == 0) {
+ unsigned n_regs = 0;
+ unsigned n_core_regs = 0;
+
+ xtensa_count_regs(config, &n_regs, &n_core_regs);
+ if (config->gdb_regmap.num_regs == 0) {
+ config->gdb_regmap.num_regs = n_regs;
+ }
+ if (config->gdb_regmap.num_core_regs == 0) {
+ config->gdb_regmap.num_core_regs = n_core_regs;
+ }
+ }
+}
+
+static void xtensa_core_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
+ XtensaConfig *config = data;
+
+ xtensa_finalize_config(config);
+ xcc->config = config;
+
+ /*
+ * Use num_core_regs to see only non-privileged registers in an unmodified
+ * gdb. Use num_regs to see all registers. gdb modification is required
+ * for that: reset bit 0 in the 'flags' field of the registers definitions
+ * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
+ */
+ cc->gdb_num_core_regs = config->gdb_regmap.num_regs;
+}
+
+void xtensa_register_core(XtensaConfigList *node)
+{
+ TypeInfo type = {
+ .parent = TYPE_XTENSA_CPU,
+ .class_init = xtensa_core_class_init,
+ .class_data = (void *)node->config,
+ };
+
+ node->next = xtensa_cores;
+ xtensa_cores = node;
+ type.name = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), node->config->name);
+ type_register(&type);
+ g_free((gpointer)type.name);
+}
+
+static uint32_t check_hw_breakpoints(CPUXtensaState *env)
+{
+ unsigned i;
+
+ for (i = 0; i < env->config->ndbreak; ++i) {
+ if (env->cpu_watchpoint[i] &&
+ env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
+ return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
+ }
+ }
+ return 0;
+}
+
+void xtensa_breakpoint_handler(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ uint32_t cause;
+
+ cs->watchpoint_hit = NULL;
+ cause = check_hw_breakpoints(env);
+ if (cause) {
+ debug_exception_env(env, cause);
+ }
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+}
+
+void xtensa_cpu_list(void)
+{
+ XtensaConfigList *core = xtensa_cores;
+ qemu_printf("Available CPUs:\n");
+ for (; core; core = core->next) {
+ qemu_printf(" %s\n", core->config->name);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+void xtensa_cpu_do_unaligned_access(CPUState *cs,
+ vaddr addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ assert(xtensa_option_enabled(env->config,
+ XTENSA_OPTION_UNALIGNED_EXCEPTION));
+ cpu_restore_state(CPU(cpu), retaddr, true);
+ HELPER(exception_cause_vaddr)(env,
+ env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
+ addr);
+}
+
+bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, true, address, access_type,
+ mmu_idx, &paddr, &page_size, &access);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s(%08" VADDR_PRIx
+ ", %d, %d) -> %08x, ret = %d\n",
+ __func__, address, access_type, mmu_idx, paddr, ret);
+
+ if (ret == 0) {
+ tlb_set_page(cs,
+ address & TARGET_PAGE_MASK,
+ paddr & TARGET_PAGE_MASK,
+ access, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ cpu_restore_state(cs, retaddr, true);
+ HELPER(exception_cause_vaddr)(env, env->pc, ret, address);
+ }
+}
+
+void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
+ unsigned size, MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ cpu_restore_state(cs, retaddr, true);
+ HELPER(exception_cause_vaddr)(env, env->pc,
+ access_type == MMU_INST_FETCH ?
+ INSTR_PIF_ADDR_ERROR_CAUSE :
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+ addr);
+}
+
+void xtensa_runstall(CPUXtensaState *env, bool runstall)
+{
+ CPUState *cpu = env_cpu(env);
+
+ env->runstall = runstall;
+ cpu->halted = runstall;
+ if (runstall) {
+ cpu_interrupt(cpu, CPU_INTERRUPT_HALT);
+ } else {
+ qemu_cpu_kick(cpu);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/xtensa/helper.h b/target/xtensa/helper.h
new file mode 100644
index 000000000..ae938ceed
--- /dev/null
+++ b/target/xtensa/helper.h
@@ -0,0 +1,105 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_3(exception_cause, noreturn, env, i32, i32)
+DEF_HELPER_4(exception_cause_vaddr, noreturn, env, i32, i32, i32)
+DEF_HELPER_3(debug_exception, noreturn, env, i32, i32)
+
+DEF_HELPER_1(sync_windowbase, void, env)
+DEF_HELPER_4(entry, void, env, i32, i32, i32)
+DEF_HELPER_2(test_ill_retw, void, env, i32)
+DEF_HELPER_2(test_underflow_retw, void, env, i32)
+DEF_HELPER_2(retw, void, env, i32)
+DEF_HELPER_3(window_check, noreturn, env, i32, i32)
+DEF_HELPER_1(restore_owb, void, env)
+DEF_HELPER_2(movsp, void, env, i32)
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(simcall, void, env)
+#endif
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(waiti, void, env, i32, i32)
+DEF_HELPER_1(update_ccount, void, env)
+DEF_HELPER_2(wsr_ccount, void, env, i32)
+DEF_HELPER_2(update_ccompare, void, env, i32)
+DEF_HELPER_1(check_interrupts, void, env)
+DEF_HELPER_2(intset, void, env, i32)
+DEF_HELPER_2(intclear, void, env, i32)
+DEF_HELPER_3(check_atomctl, void, env, i32, i32)
+DEF_HELPER_4(check_exclusive, void, env, i32, i32, i32)
+DEF_HELPER_2(wsr_memctl, void, env, i32)
+
+DEF_HELPER_2(itlb_hit_test, void, env, i32)
+DEF_HELPER_2(wsr_rasid, void, env, i32)
+DEF_HELPER_FLAGS_3(rtlb0, TCG_CALL_NO_RWG_SE, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(rtlb1, TCG_CALL_NO_RWG_SE, i32, env, i32, i32)
+DEF_HELPER_3(itlb, void, env, i32, i32)
+DEF_HELPER_3(ptlb, i32, env, i32, i32)
+DEF_HELPER_4(wtlb, void, env, i32, i32, i32)
+DEF_HELPER_2(wsr_mpuenb, void, env, i32)
+DEF_HELPER_3(wptlb, void, env, i32, i32)
+DEF_HELPER_FLAGS_2(rptlb0, TCG_CALL_NO_RWG_SE, i32, env, i32)
+DEF_HELPER_FLAGS_2(rptlb1, TCG_CALL_NO_RWG_SE, i32, env, i32)
+DEF_HELPER_2(pptlb, i32, env, i32)
+
+DEF_HELPER_2(wsr_ibreakenable, void, env, i32)
+DEF_HELPER_3(wsr_ibreaka, void, env, i32, i32)
+DEF_HELPER_3(wsr_dbreaka, void, env, i32, i32)
+DEF_HELPER_3(wsr_dbreakc, void, env, i32, i32)
+#endif
+
+DEF_HELPER_2(wur_fpu2k_fcr, void, env, i32)
+DEF_HELPER_FLAGS_1(abs_s, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_1(neg_s, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_3(fpu2k_add_s, f32, env, f32, f32)
+DEF_HELPER_3(fpu2k_sub_s, f32, env, f32, f32)
+DEF_HELPER_3(fpu2k_mul_s, f32, env, f32, f32)
+DEF_HELPER_4(fpu2k_madd_s, f32, env, f32, f32, f32)
+DEF_HELPER_4(fpu2k_msub_s, f32, env, f32, f32, f32)
+DEF_HELPER_4(ftoi_s, i32, env, f32, i32, i32)
+DEF_HELPER_4(ftoui_s, i32, env, f32, i32, i32)
+DEF_HELPER_3(itof_s, f32, env, i32, i32)
+DEF_HELPER_3(uitof_s, f32, env, i32, i32)
+DEF_HELPER_2(cvtd_s, f64, env, f32)
+
+DEF_HELPER_3(un_s, i32, env, f32, f32)
+DEF_HELPER_3(oeq_s, i32, env, f32, f32)
+DEF_HELPER_3(ueq_s, i32, env, f32, f32)
+DEF_HELPER_3(olt_s, i32, env, f32, f32)
+DEF_HELPER_3(ult_s, i32, env, f32, f32)
+DEF_HELPER_3(ole_s, i32, env, f32, f32)
+DEF_HELPER_3(ule_s, i32, env, f32, f32)
+
+DEF_HELPER_2(wur_fpu_fcr, void, env, i32)
+DEF_HELPER_1(rur_fpu_fsr, i32, env)
+DEF_HELPER_2(wur_fpu_fsr, void, env, i32)
+DEF_HELPER_FLAGS_1(abs_d, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_1(neg_d, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_3(add_d, f64, env, f64, f64)
+DEF_HELPER_3(add_s, f32, env, f32, f32)
+DEF_HELPER_3(sub_d, f64, env, f64, f64)
+DEF_HELPER_3(sub_s, f32, env, f32, f32)
+DEF_HELPER_3(mul_d, f64, env, f64, f64)
+DEF_HELPER_3(mul_s, f32, env, f32, f32)
+DEF_HELPER_4(madd_d, f64, env, f64, f64, f64)
+DEF_HELPER_4(madd_s, f32, env, f32, f32, f32)
+DEF_HELPER_4(msub_d, f64, env, f64, f64, f64)
+DEF_HELPER_4(msub_s, f32, env, f32, f32, f32)
+DEF_HELPER_3(mkdadj_d, f64, env, f64, f64)
+DEF_HELPER_3(mkdadj_s, f32, env, f32, f32)
+DEF_HELPER_2(mksadj_d, f64, env, f64)
+DEF_HELPER_2(mksadj_s, f32, env, f32)
+DEF_HELPER_4(ftoi_d, i32, env, f64, i32, i32)
+DEF_HELPER_4(ftoui_d, i32, env, f64, i32, i32)
+DEF_HELPER_3(itof_d, f64, env, i32, i32)
+DEF_HELPER_3(uitof_d, f64, env, i32, i32)
+DEF_HELPER_2(cvts_d, f32, env, f64)
+
+DEF_HELPER_3(un_d, i32, env, f64, f64)
+DEF_HELPER_3(oeq_d, i32, env, f64, f64)
+DEF_HELPER_3(ueq_d, i32, env, f64, f64)
+DEF_HELPER_3(olt_d, i32, env, f64, f64)
+DEF_HELPER_3(ult_d, i32, env, f64, f64)
+DEF_HELPER_3(ole_d, i32, env, f64, f64)
+DEF_HELPER_3(ule_d, i32, env, f64, f64)
+
+DEF_HELPER_2(rer, i32, env, i32)
+DEF_HELPER_3(wer, void, env, i32, i32)
diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh
new file mode 100755
index 000000000..df66d0939
--- /dev/null
+++ b/target/xtensa/import_core.sh
@@ -0,0 +1,71 @@
+#! /bin/bash -e
+
+OVERLAY="$1"
+NAME="$2"
+FREQ=40000
+BASE=$(dirname "$0")
+TARGET="$BASE"/core-$NAME
+
+[ $# -ge 2 -a -f "$OVERLAY" ] || { cat <<EOF
+Usage: $0 overlay-archive-to-import core-name [frequency-in-KHz]
+ overlay-archive-to-import: file name of xtensa-config-overlay.tar.gz
+ to import configuration from.
+ core-name: QEMU name of the imported core. Must be valid
+ C identifier.
+ frequency-in-KHz: core frequency (40MHz if not specified).
+EOF
+exit
+}
+
+[ $# -ge 3 ] && FREQ="$3"
+mkdir -p "$TARGET"
+tar -xf "$OVERLAY" -C "$TARGET" --strip-components=2 \
+ xtensa/config/core-isa.h \
+ xtensa/config/core-matmap.h
+tar -xf "$OVERLAY" -O gdb/xtensa-config.c | \
+ sed -n '1,/*\//p;/XTREG/,/XTREG_END/p' > "$TARGET"/gdb-config.c.inc
+#
+# Fix up known issues in the xtensa-modules.c
+#
+tar -xf "$OVERLAY" -O binutils/xtensa-modules.c | \
+ sed -e 's/^\(xtensa_opcode_encode_fn.*\[\] =\)/static \1/' \
+ -e '/^int num_bypass_groups()/,/}/d' \
+ -e '/^int num_bypass_group_chunks()/,/}/d' \
+ -e '/^uint32 \*bypass_entry(int i)/,/}/d' \
+ -e '/^#include "ansidecl.h"/d' \
+ -e '/^Slot_[a-zA-Z0-9_]\+_decode (const xtensa_insnbuf insn)/,/^}/s/^ return 0;$/ return XTENSA_UNDEFINED;/' \
+ -e 's/#include <xtensa-isa.h>/#include "xtensa-isa.h"/' \
+ -e 's/^\(xtensa_isa_internal xtensa_modules\)/static \1/' \
+ > "$TARGET"/xtensa-modules.c.inc
+
+cat <<EOF > "${TARGET}.c"
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "core-$NAME/core-isa.h"
+#include "core-$NAME/core-matmap.h"
+#include "overlay_tool.h"
+
+#define xtensa_modules xtensa_modules_$NAME
+#include "core-$NAME/xtensa-modules.c.inc"
+
+static XtensaConfig $NAME __attribute__((unused)) = {
+ .name = "$NAME",
+ .gdb_regmap = {
+ .reg = {
+#include "core-$NAME/gdb-config.c.inc"
+ }
+ },
+ .isa_internal = &xtensa_modules,
+ .clock_freq_khz = $FREQ,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE($NAME)
+EOF
+
+grep -qxf core-${NAME}.c "$BASE"/cores.list || \
+ echo core-${NAME}.c >> "$BASE"/cores.list
diff --git a/target/xtensa/meson.build b/target/xtensa/meson.build
new file mode 100644
index 000000000..20bbf9b33
--- /dev/null
+++ b/target/xtensa/meson.build
@@ -0,0 +1,27 @@
+xtensa_ss = ss.source_set()
+
+xtensa_cores = fs.read('cores.list')
+xtensa_ss.add(files(xtensa_cores.strip().split('\n')))
+
+xtensa_ss.add(files(
+ 'cpu.c',
+ 'exc_helper.c',
+ 'fpu_helper.c',
+ 'gdbstub.c',
+ 'helper.c',
+ 'op_helper.c',
+ 'translate.c',
+ 'win_helper.c',
+ 'xtensa-isa.c',
+))
+
+xtensa_softmmu_ss = ss.source_set()
+xtensa_softmmu_ss.add(files(
+ 'dbg_helper.c',
+ 'mmu_helper.c',
+ 'monitor.c',
+ 'xtensa-semi.c',
+))
+
+target_arch += {'xtensa': xtensa_ss}
+target_softmmu_arch += {'xtensa': xtensa_softmmu_ss}
diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
new file mode 100644
index 000000000..57e319a1a
--- /dev/null
+++ b/target/xtensa/mmu_helper.c
@@ -0,0 +1,1192 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/qemu-print.h"
+#include "qemu/units.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define XTENSA_MPU_SEGMENT_MASK 0x0000001f
+#define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00
+#define XTENSA_MPU_ACC_RIGHTS_SHIFT 8
+#define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000
+#define XTENSA_MPU_MEM_TYPE_SHIFT 12
+#define XTENSA_MPU_ATTR_MASK 0x001fff00
+
+#define XTENSA_MPU_PROBE_B 0x40000000
+#define XTENSA_MPU_PROBE_V 0x80000000
+
+#define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001
+#define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002
+#define XTENSA_MPU_SYSTEM_TYPE_C 0x0003
+#define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003
+
+#define XTENSA_MPU_TYPE_SYS_C 0x0010
+#define XTENSA_MPU_TYPE_SYS_W 0x0020
+#define XTENSA_MPU_TYPE_SYS_R 0x0040
+#define XTENSA_MPU_TYPE_CPU_C 0x0100
+#define XTENSA_MPU_TYPE_CPU_W 0x0200
+#define XTENSA_MPU_TYPE_CPU_R 0x0400
+#define XTENSA_MPU_TYPE_CPU_CACHE 0x0800
+#define XTENSA_MPU_TYPE_B 0x1000
+#define XTENSA_MPU_TYPE_INT 0x2000
+
+void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
+{
+ /*
+ * Probe the memory; we don't care about the result but
+ * only the side-effects (ie any MMU or other exception)
+ */
+ probe_access(env, vaddr, 1, MMU_INST_FETCH,
+ cpu_mmu_index(env, true), GETPC());
+}
+
+void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
+{
+ v = (v & 0xffffff00) | 0x1;
+ if (v != env->sregs[RASID]) {
+ env->sregs[RASID] = v;
+ tlb_flush(env_cpu(env));
+ }
+}
+
+static uint32_t get_page_size(const CPUXtensaState *env,
+ bool dtlb, uint32_t way)
+{
+ uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
+
+ switch (way) {
+ case 4:
+ return (tlbcfg >> 16) & 0x3;
+
+ case 5:
+ return (tlbcfg >> 20) & 0x1;
+
+ case 6:
+ return (tlbcfg >> 24) & 0x1;
+
+ default:
+ return 0;
+ }
+}
+
+/*!
+ * Get bit mask for the virtual address bits translated by the TLB way
+ */
+static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env,
+ bool dtlb, uint32_t way)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ switch (way) {
+ case 4:
+ return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
+
+ case 5:
+ if (varway56) {
+ return 0xf8000000 << get_page_size(env, dtlb, way);
+ } else {
+ return 0xf8000000;
+ }
+
+ case 6:
+ if (varway56) {
+ return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
+ } else {
+ return 0xf0000000;
+ }
+
+ default:
+ return 0xfffff000;
+ }
+ } else {
+ return REGION_PAGE_MASK;
+ }
+}
+
+/*!
+ * Get bit mask for the 'VPN without index' field.
+ * See ISA, 4.6.5.6, data format for RxTLB0
+ */
+static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ if (way < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ return is32 ? 0xffff8000 : 0xffffc000;
+ } else if (way == 4) {
+ return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
+ } else if (way <= 6) {
+ uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (varway56) {
+ return mask << (way == 5 ? 2 : 3);
+ } else {
+ return mask << 1;
+ }
+ } else {
+ return 0xfffff000;
+ }
+}
+
+/*!
+ * Split virtual address into VPN (with index) and entry index
+ * for the given TLB way
+ */
+static void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v,
+ bool dtlb, uint32_t *vpn,
+ uint32_t wi, uint32_t *ei)
+{
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (!dtlb) {
+ wi &= 7;
+ }
+
+ if (wi < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
+ } else {
+ switch (wi) {
+ case 4:
+ {
+ uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
+ *ei = (v >> eibase) & 0x3;
+ }
+ break;
+
+ case 5:
+ if (varway56) {
+ uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x3;
+ } else {
+ *ei = (v >> 27) & 0x1;
+ }
+ break;
+
+ case 6:
+ if (varway56) {
+ uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x7;
+ } else {
+ *ei = (v >> 28) & 0x1;
+ }
+ break;
+
+ default:
+ *ei = 0;
+ break;
+ }
+ }
+ *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+}
+
+/*!
+ * Split TLB address into TLB way, entry index and VPN (with index).
+ * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
+ */
+static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t *wi, uint32_t *ei)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ *wi = v & (dtlb ? 0xf : 0x7);
+ split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+ } else {
+ *vpn = v & REGION_PAGE_MASK;
+ *wi = 0;
+ *ei = (v >> 29) & 0x7;
+ }
+}
+
+static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei)
+{
+ return dtlb ?
+ env->dtlb[wi] + ei :
+ env->itlb[wi] + ei;
+}
+
+static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
+ uint32_t v, bool dtlb, uint32_t *pwi)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ if (pwi) {
+ *pwi = wi;
+ }
+ return xtensa_tlb_get_entry(env, dtlb, wi, ei);
+}
+
+static void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
+ xtensa_tlb_entry *entry, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn,
+ uint32_t pte)
+{
+ entry->vaddr = vpn;
+ entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+ entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
+ entry->attr = pte & 0xf;
+}
+
+static void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei,
+ uint32_t vpn, uint32_t pte)
+{
+ CPUState *cs = env_cpu(env);
+ xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ if (entry->variable) {
+ if (entry->asid) {
+ tlb_flush_page(cs, entry->vaddr);
+ }
+ xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
+ tlb_flush_page(cs, entry->vaddr);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s %d, %d, %d trying to set immutable entry\n",
+ __func__, dtlb, wi, ei);
+ }
+ } else {
+ tlb_flush_page(cs, entry->vaddr);
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_REGION_TRANSLATION)) {
+ entry->paddr = pte & REGION_PAGE_MASK;
+ }
+ entry->attr = pte & 0xf;
+ }
+}
+
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ return ~0;
+}
+
+static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
+ const xtensa_tlb *tlb,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned wi, ei;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
+ entry[wi][ei].asid = 0;
+ entry[wi][ei].variable = true;
+ }
+ }
+}
+
+static void reset_tlb_mmu_ways56(CPUXtensaState *env,
+ const xtensa_tlb *tlb,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ if (!tlb->varway56) {
+ static const xtensa_tlb_entry way5[] = {
+ {
+ .vaddr = 0xd0000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xd8000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ static const xtensa_tlb_entry way6[] = {
+ {
+ .vaddr = 0xe0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xf0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ memcpy(entry[5], way5, sizeof(way5));
+ memcpy(entry[6], way6, sizeof(way6));
+ } else {
+ uint32_t ei;
+ for (ei = 0; ei < 8; ++ei) {
+ entry[6][ei].vaddr = ei << 29;
+ entry[6][ei].paddr = ei << 29;
+ entry[6][ei].asid = 1;
+ entry[6][ei].attr = 3;
+ }
+ }
+}
+
+static void reset_tlb_region_way0(CPUXtensaState *env,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned ei;
+
+ for (ei = 0; ei < 8; ++ei) {
+ entry[0][ei].vaddr = ei << 29;
+ entry[0][ei].paddr = ei << 29;
+ entry[0][ei].asid = 1;
+ entry[0][ei].attr = 2;
+ entry[0][ei].variable = true;
+ }
+}
+
+void reset_mmu(CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ env->sregs[RASID] = 0x04030201;
+ env->sregs[ITLBCFG] = 0;
+ env->sregs[DTLBCFG] = 0;
+ env->autorefill_idx = 0;
+ reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
+ reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) {
+ unsigned i;
+
+ env->sregs[MPUENB] = 0;
+ env->sregs[MPUCFG] = env->config->n_mpu_fg_segments;
+ env->sregs[CACHEADRDIS] = 0;
+ assert(env->config->n_mpu_bg_segments > 0 &&
+ env->config->mpu_bg[0].vaddr == 0);
+ for (i = 1; i < env->config->n_mpu_bg_segments; ++i) {
+ assert(env->config->mpu_bg[i].vaddr >=
+ env->config->mpu_bg[i - 1].vaddr);
+ }
+ } else {
+ env->sregs[CACHEATTR] = 0x22222222;
+ reset_tlb_region_way0(env, env->itlb);
+ reset_tlb_region_way0(env, env->dtlb);
+ }
+}
+
+static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
+{
+ unsigned i;
+ for (i = 0; i < 4; ++i) {
+ if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
+ return i;
+ }
+ }
+ return 0xff;
+}
+
+/*!
+ * Lookup xtensa TLB for the given virtual address.
+ * See ISA, 4.6.2.2
+ *
+ * \param pwi: [out] way index
+ * \param pei: [out] entry index
+ * \param pring: [out] access ring
+ * \return 0 if ok, exception cause code otherwise
+ */
+static int xtensa_tlb_lookup(const CPUXtensaState *env,
+ uint32_t addr, bool dtlb,
+ uint32_t *pwi, uint32_t *pei, uint8_t *pring)
+{
+ const xtensa_tlb *tlb = dtlb ?
+ &env->config->dtlb : &env->config->itlb;
+ const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
+ env->dtlb : env->itlb;
+
+ int nhits = 0;
+ unsigned wi;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ uint32_t vpn;
+ uint32_t ei;
+ split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
+ if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
+ unsigned ring = get_ring(env, entry[wi][ei].asid);
+ if (ring < 4) {
+ if (++nhits > 1) {
+ return dtlb ?
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE :
+ INST_TLB_MULTI_HIT_CAUSE;
+ }
+ *pwi = wi;
+ *pei = ei;
+ *pring = ring;
+ }
+ }
+ }
+ return nhits ? 0 :
+ (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
+}
+
+uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+ } else {
+ return v & REGION_PAGE_MASK;
+ }
+}
+
+uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
+ return entry->paddr | entry->attr;
+}
+
+void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ if (entry->variable && entry->asid) {
+ tlb_flush_page(env_cpu(env), entry->vaddr);
+ entry->asid = 0;
+ }
+ }
+}
+
+uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
+
+ switch (res) {
+ case 0:
+ if (ring >= xtensa_get_ring(env)) {
+ return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
+ }
+ break;
+
+ case INST_TLB_MULTI_HIT_CAUSE:
+ case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
+ HELPER(exception_cause_vaddr)(env, env->pc, res, v);
+ break;
+ }
+ return 0;
+ } else {
+ return (v & REGION_PAGE_MASK) | 0x1;
+ }
+}
+
+void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+}
+
+/*!
+ * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.5.10
+ */
+static unsigned mmu_attr_to_access(uint32_t attr)
+{
+ unsigned access = 0;
+
+ if (attr < 12) {
+ access |= PAGE_READ;
+ if (attr & 0x1) {
+ access |= PAGE_EXEC;
+ }
+ if (attr & 0x2) {
+ access |= PAGE_WRITE;
+ }
+
+ switch (attr & 0xc) {
+ case 0:
+ access |= PAGE_CACHE_BYPASS;
+ break;
+
+ case 4:
+ access |= PAGE_CACHE_WB;
+ break;
+
+ case 8:
+ access |= PAGE_CACHE_WT;
+ break;
+ }
+ } else if (attr == 13) {
+ access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
+ }
+ return access;
+}
+
+/*!
+ * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.3.3
+ */
+static unsigned region_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+/*!
+ * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, A.2.14 The Cache Attribute Register
+ */
+static unsigned cacheattr_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+struct attr_pattern {
+ uint32_t mask;
+ uint32_t value;
+};
+
+static int attr_pattern_match(uint32_t attr,
+ const struct attr_pattern *pattern,
+ size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; ++i) {
+ if ((attr & pattern[i].mask) == pattern[i].value) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static unsigned mpu_attr_to_cpu_cache(uint32_t attr)
+{
+ static const struct attr_pattern cpu_c[] = {
+ { .mask = 0x18f, .value = 0x089 },
+ { .mask = 0x188, .value = 0x080 },
+ { .mask = 0x180, .value = 0x180 },
+ };
+
+ unsigned type = 0;
+
+ if (attr_pattern_match(attr, cpu_c, ARRAY_SIZE(cpu_c))) {
+ type |= XTENSA_MPU_TYPE_CPU_CACHE;
+ if (attr & 0x10) {
+ type |= XTENSA_MPU_TYPE_CPU_C;
+ }
+ if (attr & 0x20) {
+ type |= XTENSA_MPU_TYPE_CPU_W;
+ }
+ if (attr & 0x40) {
+ type |= XTENSA_MPU_TYPE_CPU_R;
+ }
+ }
+ return type;
+}
+
+static unsigned mpu_attr_to_type(uint32_t attr)
+{
+ static const struct attr_pattern device_type[] = {
+ { .mask = 0x1f6, .value = 0x000 },
+ { .mask = 0x1f6, .value = 0x006 },
+ };
+ static const struct attr_pattern sys_nc_type[] = {
+ { .mask = 0x1fe, .value = 0x018 },
+ { .mask = 0x1fe, .value = 0x01e },
+ { .mask = 0x18f, .value = 0x089 },
+ };
+ static const struct attr_pattern sys_c_type[] = {
+ { .mask = 0x1f8, .value = 0x010 },
+ { .mask = 0x188, .value = 0x080 },
+ { .mask = 0x1f0, .value = 0x030 },
+ { .mask = 0x180, .value = 0x180 },
+ };
+ static const struct attr_pattern b[] = {
+ { .mask = 0x1f7, .value = 0x001 },
+ { .mask = 0x1f7, .value = 0x007 },
+ { .mask = 0x1ff, .value = 0x019 },
+ { .mask = 0x1ff, .value = 0x01f },
+ };
+
+ unsigned type = 0;
+
+ attr = (attr & XTENSA_MPU_MEM_TYPE_MASK) >> XTENSA_MPU_MEM_TYPE_SHIFT;
+ if (attr_pattern_match(attr, device_type, ARRAY_SIZE(device_type))) {
+ type |= XTENSA_MPU_SYSTEM_TYPE_DEVICE;
+ if (attr & 0x80) {
+ type |= XTENSA_MPU_TYPE_INT;
+ }
+ }
+ if (attr_pattern_match(attr, sys_nc_type, ARRAY_SIZE(sys_nc_type))) {
+ type |= XTENSA_MPU_SYSTEM_TYPE_NC;
+ }
+ if (attr_pattern_match(attr, sys_c_type, ARRAY_SIZE(sys_c_type))) {
+ type |= XTENSA_MPU_SYSTEM_TYPE_C;
+ if (attr & 0x1) {
+ type |= XTENSA_MPU_TYPE_SYS_C;
+ }
+ if (attr & 0x2) {
+ type |= XTENSA_MPU_TYPE_SYS_W;
+ }
+ if (attr & 0x4) {
+ type |= XTENSA_MPU_TYPE_SYS_R;
+ }
+ }
+ if (attr_pattern_match(attr, b, ARRAY_SIZE(b))) {
+ type |= XTENSA_MPU_TYPE_B;
+ }
+ type |= mpu_attr_to_cpu_cache(attr);
+
+ return type;
+}
+
+static unsigned mpu_attr_to_access(uint32_t attr, unsigned ring)
+{
+ static const unsigned access[2][16] = {
+ [0] = {
+ [4] = PAGE_READ,
+ [5] = PAGE_READ | PAGE_EXEC,
+ [6] = PAGE_READ | PAGE_WRITE,
+ [7] = PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ [8] = PAGE_WRITE,
+ [9] = PAGE_READ | PAGE_WRITE,
+ [10] = PAGE_READ | PAGE_WRITE,
+ [11] = PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ [12] = PAGE_READ,
+ [13] = PAGE_READ | PAGE_EXEC,
+ [14] = PAGE_READ | PAGE_WRITE,
+ [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ },
+ [1] = {
+ [8] = PAGE_WRITE,
+ [9] = PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ [10] = PAGE_READ,
+ [11] = PAGE_READ | PAGE_EXEC,
+ [12] = PAGE_READ,
+ [13] = PAGE_READ | PAGE_EXEC,
+ [14] = PAGE_READ | PAGE_WRITE,
+ [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ },
+ };
+ unsigned rv;
+ unsigned type;
+
+ type = mpu_attr_to_cpu_cache(attr);
+ rv = access[ring != 0][(attr & XTENSA_MPU_ACC_RIGHTS_MASK) >>
+ XTENSA_MPU_ACC_RIGHTS_SHIFT];
+
+ if (type & XTENSA_MPU_TYPE_CPU_CACHE) {
+ rv |= (type & XTENSA_MPU_TYPE_CPU_C) ? PAGE_CACHE_WB : PAGE_CACHE_WT;
+ } else {
+ rv |= PAGE_CACHE_BYPASS;
+ }
+ return rv;
+}
+
+static bool is_access_granted(unsigned access, int is_write)
+{
+ switch (is_write) {
+ case 0:
+ return access & PAGE_READ;
+
+ case 1:
+ return access & PAGE_WRITE;
+
+ case 2:
+ return access & PAGE_EXEC;
+
+ default:
+ return 0;
+ }
+}
+
+static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
+
+static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access, bool may_lookup_pt)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ uint32_t vpn;
+ uint32_t pte;
+ const xtensa_tlb_entry *entry = NULL;
+ xtensa_tlb_entry tmp_entry;
+ int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
+
+ if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
+ may_lookup_pt && get_pte(env, vaddr, &pte)) {
+ ring = (pte >> 4) & 0x3;
+ wi = 0;
+ split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
+
+ if (update_tlb) {
+ wi = ++env->autorefill_idx & 0x3;
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
+ env->sregs[EXCVADDR] = vaddr;
+ qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
+ __func__, vaddr, vpn, pte);
+ } else {
+ xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
+ entry = &tmp_entry;
+ }
+ ret = 0;
+ }
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (entry == NULL) {
+ entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+ }
+
+ if (ring < mmu_idx) {
+ return dtlb ?
+ LOAD_STORE_PRIVILEGE_CAUSE :
+ INST_FETCH_PRIVILEGE_CAUSE;
+ }
+
+ *access = mmu_attr_to_access(entry->attr) &
+ ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
+ *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+
+ return 0;
+}
+
+static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ uint32_t pt_vaddr =
+ (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
+ int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
+ &paddr, &page_size, &access, false);
+
+ if (ret == 0) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
+ __func__, vaddr, pt_vaddr, paddr);
+ } else {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
+ __func__, vaddr, pt_vaddr, ret);
+ }
+
+ if (ret == 0) {
+ MemTxResult result;
+
+ *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED,
+ &result);
+ if (result != MEMTX_OK) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: couldn't load PTE: transaction failed (%u)\n",
+ __func__, (unsigned)result);
+ ret = 1;
+ }
+ }
+ return ret == 0;
+}
+
+static int get_physical_addr_region(CPUXtensaState *env,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi = 0;
+ uint32_t ei = (vaddr >> 29) & 0x7;
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ *access = region_attr_to_access(entry->attr);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
+ *page_size = ~REGION_PAGE_MASK + 1;
+
+ return 0;
+}
+
+static int xtensa_mpu_lookup(const xtensa_mpu_entry *entry, unsigned n,
+ uint32_t vaddr, unsigned *segment)
+{
+ unsigned nhits = 0;
+ unsigned i;
+
+ for (i = 0; i < n; ++i) {
+ if (vaddr >= entry[i].vaddr &&
+ (i == n - 1 || vaddr < entry[i + 1].vaddr)) {
+ if (nhits++) {
+ break;
+ }
+ *segment = i;
+ }
+ }
+ return nhits;
+}
+
+void HELPER(wsr_mpuenb)(CPUXtensaState *env, uint32_t v)
+{
+ v &= (2u << (env->config->n_mpu_fg_segments - 1)) - 1;
+
+ if (v != env->sregs[MPUENB]) {
+ env->sregs[MPUENB] = v;
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void HELPER(wptlb)(CPUXtensaState *env, uint32_t p, uint32_t v)
+{
+ unsigned segment = p & XTENSA_MPU_SEGMENT_MASK;
+
+ if (segment < env->config->n_mpu_fg_segments) {
+ env->mpu_fg[segment].vaddr = v & -env->config->mpu_align;
+ env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK;
+ env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v);
+ tlb_flush(env_cpu(env));
+ }
+}
+
+uint32_t HELPER(rptlb0)(CPUXtensaState *env, uint32_t s)
+{
+ unsigned segment = s & XTENSA_MPU_SEGMENT_MASK;
+
+ if (segment < env->config->n_mpu_fg_segments) {
+ return env->mpu_fg[segment].vaddr |
+ extract32(env->sregs[MPUENB], segment, 1);
+ } else {
+ return 0;
+ }
+}
+
+uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s)
+{
+ unsigned segment = s & XTENSA_MPU_SEGMENT_MASK;
+
+ if (segment < env->config->n_mpu_fg_segments) {
+ return env->mpu_fg[segment].attr;
+ } else {
+ return 0;
+ }
+}
+
+uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v)
+{
+ unsigned nhits;
+ unsigned segment = XTENSA_MPU_PROBE_B;
+ unsigned bg_segment;
+
+ nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments,
+ v, &segment);
+ if (nhits > 1) {
+ HELPER(exception_cause_vaddr)(env, env->pc,
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE, v);
+ } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) {
+ return env->mpu_fg[segment].attr | segment | XTENSA_MPU_PROBE_V;
+ } else {
+ xtensa_mpu_lookup(env->config->mpu_bg,
+ env->config->n_mpu_bg_segments,
+ v, &bg_segment);
+ return env->config->mpu_bg[bg_segment].attr | segment;
+ }
+}
+
+static int get_physical_addr_mpu(CPUXtensaState *env,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access)
+{
+ unsigned nhits;
+ unsigned segment;
+ uint32_t attr;
+
+ nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments,
+ vaddr, &segment);
+ if (nhits > 1) {
+ return is_write < 2 ?
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE :
+ INST_TLB_MULTI_HIT_CAUSE;
+ } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) {
+ attr = env->mpu_fg[segment].attr;
+ } else {
+ xtensa_mpu_lookup(env->config->mpu_bg,
+ env->config->n_mpu_bg_segments,
+ vaddr, &segment);
+ attr = env->config->mpu_bg[segment].attr;
+ }
+
+ *access = mpu_attr_to_access(attr, mmu_idx);
+ if (!is_access_granted(*access, is_write)) {
+ return is_write < 2 ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+ *paddr = vaddr;
+ *page_size = env->config->mpu_align;
+ return 0;
+}
+
+/*!
+ * Convert virtual address to physical addr.
+ * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
+ *
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size,
+ unsigned *access)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return get_physical_addr_mmu(env, update_tlb,
+ vaddr, is_write, mmu_idx, paddr,
+ page_size, access, true);
+ } else if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
+ return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
+ paddr, page_size, access);
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) {
+ return get_physical_addr_mpu(env, vaddr, is_write, mmu_idx,
+ paddr, page_size, access);
+ } else {
+ *paddr = vaddr;
+ *page_size = TARGET_PAGE_SIZE;
+ *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >>
+ ((vaddr & 0xe0000000) >> 27));
+ return 0;
+ }
+}
+
+static void dump_tlb(CPUXtensaState *env, bool dtlb)
+{
+ unsigned wi, ei;
+ const xtensa_tlb *conf =
+ dtlb ? &env->config->dtlb : &env->config->itlb;
+ unsigned (*attr_to_access)(uint32_t) =
+ xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
+ mmu_attr_to_access : region_attr_to_access;
+
+ for (wi = 0; wi < conf->nways; ++wi) {
+ uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+ const char *sz_text;
+ bool print_header = true;
+
+ if (sz >= 0x100000) {
+ sz /= MiB;
+ sz_text = "MB";
+ } else {
+ sz /= KiB;
+ sz_text = "KB";
+ }
+
+ for (ei = 0; ei < conf->way_size[wi]; ++ei) {
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (entry->asid) {
+ static const char * const cache_text[8] = {
+ [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
+ [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
+ [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
+ [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
+ };
+ unsigned access = attr_to_access(entry->attr);
+ unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
+ PAGE_CACHE_SHIFT;
+
+ if (print_header) {
+ print_header = false;
+ qemu_printf("Way %u (%d %s)\n", wi, sz, sz_text);
+ qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n"
+ "\t---------- ---------- ---- ---- --- -------\n");
+ }
+ qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n",
+ entry->vaddr,
+ entry->paddr,
+ entry->asid,
+ entry->attr,
+ (access & PAGE_READ) ? 'R' : '-',
+ (access & PAGE_WRITE) ? 'W' : '-',
+ (access & PAGE_EXEC) ? 'X' : '-',
+ cache_text[cache_idx] ?
+ cache_text[cache_idx] : "Invalid");
+ }
+ }
+ }
+}
+
+static void dump_mpu(CPUXtensaState *env,
+ const xtensa_mpu_entry *entry, unsigned n)
+{
+ unsigned i;
+
+ qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n"
+ "\t%s ---------- ---------- ----- ----- ------------- ---------\n",
+ env ? "En" : " ",
+ env ? "--" : " ");
+
+ for (i = 0; i < n; ++i) {
+ uint32_t attr = entry[i].attr;
+ unsigned access0 = mpu_attr_to_access(attr, 0);
+ unsigned access1 = mpu_attr_to_access(attr, 1);
+ unsigned type = mpu_attr_to_type(attr);
+ char cpu_cache = (type & XTENSA_MPU_TYPE_CPU_CACHE) ? '-' : ' ';
+
+ qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ",
+ env ?
+ ((env->sregs[MPUENB] & (1u << i)) ? '+' : '-') : ' ',
+ entry[i].vaddr, attr,
+ (access0 & PAGE_READ) ? 'R' : '-',
+ (access0 & PAGE_WRITE) ? 'W' : '-',
+ (access0 & PAGE_EXEC) ? 'X' : '-',
+ (access1 & PAGE_READ) ? 'R' : '-',
+ (access1 & PAGE_WRITE) ? 'W' : '-',
+ (access1 & PAGE_EXEC) ? 'X' : '-');
+
+ switch (type & XTENSA_MPU_SYSTEM_TYPE_MASK) {
+ case XTENSA_MPU_SYSTEM_TYPE_DEVICE:
+ qemu_printf("Device %cB %3s\n",
+ (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n',
+ (type & XTENSA_MPU_TYPE_INT) ? "int" : "");
+ break;
+ case XTENSA_MPU_SYSTEM_TYPE_NC:
+ qemu_printf("Sys NC %cB %c%c%c\n",
+ (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n',
+ (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache,
+ (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache,
+ (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache);
+ break;
+ case XTENSA_MPU_SYSTEM_TYPE_C:
+ qemu_printf("Sys C %c%c%c %c%c%c\n",
+ (type & XTENSA_MPU_TYPE_SYS_R) ? 'R' : '-',
+ (type & XTENSA_MPU_TYPE_SYS_W) ? 'W' : '-',
+ (type & XTENSA_MPU_TYPE_SYS_C) ? 'C' : '-',
+ (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache,
+ (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache,
+ (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache);
+ break;
+ default:
+ qemu_printf("Unknown\n");
+ break;
+ }
+ }
+}
+
+void dump_mmu(CPUXtensaState *env)
+{
+ if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
+
+ qemu_printf("ITLB:\n");
+ dump_tlb(env, false);
+ qemu_printf("\nDTLB:\n");
+ dump_tlb(env, true);
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) {
+ qemu_printf("Foreground map:\n");
+ dump_mpu(env, env->mpu_fg, env->config->n_mpu_fg_segments);
+ qemu_printf("\nBackground map:\n");
+ dump_mpu(NULL, env->config->mpu_bg, env->config->n_mpu_bg_segments);
+ } else {
+ qemu_printf("No TLB for this CPU core\n");
+ }
+}
diff --git a/target/xtensa/monitor.c b/target/xtensa/monitor.c
new file mode 100644
index 000000000..fbf60d555
--- /dev/null
+++ b/target/xtensa/monitor.c
@@ -0,0 +1,39 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "monitor/hmp.h"
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env(mon);
+
+ if (!env1) {
+ monitor_printf(mon, "No CPU available\n");
+ return;
+ }
+ dump_mmu(env1);
+}
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
new file mode 100644
index 000000000..d85d3516d
--- /dev/null
+++ b/target/xtensa/op_helper.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/timer.h"
+
+#ifndef CONFIG_USER_ONLY
+
+void HELPER(update_ccount)(CPUXtensaState *env)
+{
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+
+ env->ccount_time = now;
+ env->sregs[CCOUNT] = env->ccount_base +
+ (uint32_t)((now - env->time_base) *
+ env->config->clock_freq_khz / 1000000);
+}
+
+void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
+{
+ int i;
+
+ HELPER(update_ccount)(env);
+ env->ccount_base += v - env->sregs[CCOUNT];
+ for (i = 0; i < env->config->nccompare; ++i) {
+ HELPER(update_ccompare)(env, i);
+ }
+}
+
+void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
+{
+ uint64_t dcc;
+
+ qatomic_and(&env->sregs[INTSET],
+ ~(1u << env->config->timerint[i]));
+ HELPER(update_ccount)(env);
+ dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
+ timer_mod(env->ccompare[i].timer,
+ env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
+ env->yield_needed = 1;
+}
+
+/*!
+ * Check vaddr accessibility/cache attributes and raise an exception if
+ * specified by the ATOMCTL SR.
+ *
+ * Note: local memory exclusion is not implemented
+ */
+void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
+{
+ uint32_t paddr, page_size, access;
+ uint32_t atomctl = env->sregs[ATOMCTL];
+ int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
+ xtensa_get_cring(env), &paddr, &page_size, &access);
+
+ /*
+ * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
+ * see opcode description in the ISA
+ */
+ if (rc == 0 &&
+ (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
+ rc = STORE_PROHIBITED_CAUSE;
+ }
+
+ if (rc) {
+ HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
+ }
+
+ /*
+ * When data cache is not configured use ATOMCTL bypass field.
+ * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
+ * under the Conditional Store Option.
+ */
+ if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
+ access = PAGE_CACHE_BYPASS;
+ }
+
+ switch (access & PAGE_CACHE_MASK) {
+ case PAGE_CACHE_WB:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_WT:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_BYPASS:
+ if ((atomctl & 0x3) == 0) {
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ }
+ break;
+
+ case PAGE_CACHE_ISOLATE:
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void HELPER(check_exclusive)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr,
+ uint32_t is_write)
+{
+ uint32_t paddr, page_size, access;
+ uint32_t atomctl = env->sregs[ATOMCTL];
+ int rc = xtensa_get_physical_addr(env, true, vaddr, is_write,
+ xtensa_get_cring(env), &paddr,
+ &page_size, &access);
+
+ if (rc) {
+ HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
+ }
+
+ /* When data cache is not configured use ATOMCTL bypass field. */
+ if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
+ access = PAGE_CACHE_BYPASS;
+ }
+
+ switch (access & PAGE_CACHE_MASK) {
+ case PAGE_CACHE_WB:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_WT:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_BYPASS:
+ if ((atomctl & 0x3) == 0) {
+ HELPER(exception_cause_vaddr)(env, pc,
+ EXCLUSIVE_ERROR_CAUSE, vaddr);
+ }
+ break;
+
+ case PAGE_CACHE_ISOLATE:
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
+ if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
+ env->config->icache_ways) {
+ deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
+ env->config->icache_ways);
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
+ if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
+ env->config->dcache_ways) {
+ deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
+ env->config->dcache_ways);
+ }
+ if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
+ env->config->dcache_ways) {
+ deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
+ env->config->dcache_ways);
+ }
+ }
+ env->sregs[MEMCTL] = v & env->config->memctl_mask;
+}
+
+#endif
+
+uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+ return address_space_ldl(env->address_space_er, addr,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+#else
+ return 0;
+#endif
+}
+
+void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+ address_space_stl(env->address_space_er, addr, data,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+#endif
+}
diff --git a/target/xtensa/overlay_tool.h b/target/xtensa/overlay_tool.h
new file mode 100644
index 000000000..78720734f
--- /dev/null
+++ b/target/xtensa/overlay_tool.h
@@ -0,0 +1,879 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define XTREG(idx, ofs, bi, sz, al, no, fl, cp, typ, grp, name, \
+ a1, a2, a3, a4, a5, a6) { \
+ .targno = (no), \
+ .flags = (fl), \
+ .type = (typ), \
+ .group = (grp), \
+ .size = (sz), \
+},
+#define XTREG_END { .targno = -1 },
+
+#ifndef XCHAL_HAVE_DEPBITS
+#define XCHAL_HAVE_DEPBITS 0
+#endif
+
+#ifndef XCHAL_HAVE_DFP
+#define XCHAL_HAVE_DFP 0
+#endif
+
+#ifndef XCHAL_HAVE_DFPU_SINGLE_ONLY
+#define XCHAL_HAVE_DFPU_SINGLE_ONLY 0
+#endif
+
+#ifndef XCHAL_HAVE_DFPU_SINGLE_DOUBLE
+#define XCHAL_HAVE_DFPU_SINGLE_DOUBLE XCHAL_HAVE_DFP
+#endif
+
+/*
+ * We need to know the type of FP unit, not only its precision.
+ * Unfortunately XCHAL macros don't tell this explicitly.
+ */
+#define XCHAL_HAVE_DFPU (XCHAL_HAVE_DFP || \
+ XCHAL_HAVE_DFPU_SINGLE_ONLY || \
+ XCHAL_HAVE_DFPU_SINGLE_DOUBLE)
+
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
+#ifndef XCHAL_UNALIGNED_LOAD_HW
+#define XCHAL_UNALIGNED_LOAD_HW 0
+#endif
+
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#define XCHAL_VECBASE_RESET_VADDR 0
+#endif
+
+#ifndef XCHAL_RESET_VECTOR0_VADDR
+#define XCHAL_RESET_VECTOR0_VADDR XCHAL_RESET_VECTOR_VADDR
+#endif
+
+#ifndef XCHAL_RESET_VECTOR1_VADDR
+#define XCHAL_RESET_VECTOR1_VADDR XCHAL_RESET_VECTOR_VADDR
+#endif
+
+#ifndef XCHAL_HW_VERSION
+#define XCHAL_HW_VERSION (XCHAL_HW_VERSION_MAJOR * 100 \
+ + XCHAL_HW_VERSION_MINOR)
+#endif
+
+#ifndef XCHAL_LOOP_BUFFER_SIZE
+#define XCHAL_LOOP_BUFFER_SIZE 0
+#endif
+
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
+#ifndef XCHAL_HAVE_MPU
+#define XCHAL_HAVE_MPU 0
+#endif
+
+#ifndef XCHAL_HAVE_EXCLUSIVE
+#define XCHAL_HAVE_EXCLUSIVE 0
+#endif
+
+#define XCHAL_OPTION(xchal, qemu) ((xchal) ? XTENSA_OPTION_BIT(qemu) : 0)
+
+#define XTENSA_OPTIONS ( \
+ XCHAL_OPTION(XCHAL_HAVE_DENSITY, XTENSA_OPTION_CODE_DENSITY) | \
+ XCHAL_OPTION(XCHAL_HAVE_LOOPS, XTENSA_OPTION_LOOP) | \
+ XCHAL_OPTION(XCHAL_HAVE_ABSOLUTE_LITERALS, XTENSA_OPTION_EXTENDED_L32R) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL16, XTENSA_OPTION_16_BIT_IMUL) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL32, XTENSA_OPTION_32_BIT_IMUL) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL32_HIGH, XTENSA_OPTION_32_BIT_IMUL_HIGH) | \
+ XCHAL_OPTION(XCHAL_HAVE_DIV32, XTENSA_OPTION_32_BIT_IDIV) | \
+ XCHAL_OPTION(XCHAL_HAVE_MAC16, XTENSA_OPTION_MAC16) | \
+ XCHAL_OPTION(XCHAL_HAVE_NSA, XTENSA_OPTION_MISC_OP_NSA) | \
+ XCHAL_OPTION(XCHAL_HAVE_MINMAX, XTENSA_OPTION_MISC_OP_MINMAX) | \
+ XCHAL_OPTION(XCHAL_HAVE_SEXT, XTENSA_OPTION_MISC_OP_SEXT) | \
+ XCHAL_OPTION(XCHAL_HAVE_CLAMPS, XTENSA_OPTION_MISC_OP_CLAMPS) | \
+ XCHAL_OPTION(XCHAL_HAVE_CP, XTENSA_OPTION_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_BOOLEANS, XTENSA_OPTION_BOOLEAN) | \
+ XCHAL_OPTION(XCHAL_HAVE_FP, XTENSA_OPTION_FP_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_DFPU, XTENSA_OPTION_DFP_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_DFPU_SINGLE_ONLY, \
+ XTENSA_OPTION_DFPU_SINGLE_ONLY) | \
+ XCHAL_OPTION(XCHAL_HAVE_RELEASE_SYNC, XTENSA_OPTION_MP_SYNCHRO) | \
+ XCHAL_OPTION(XCHAL_HAVE_S32C1I, XTENSA_OPTION_CONDITIONAL_STORE) | \
+ XCHAL_OPTION(((XCHAL_HAVE_S32C1I && XCHAL_HW_VERSION >= 230000) || \
+ XCHAL_HAVE_EXCLUSIVE), XTENSA_OPTION_ATOMCTL) | \
+ XCHAL_OPTION(XCHAL_HAVE_DEPBITS, XTENSA_OPTION_DEPBITS) | \
+ /* Interrupts and exceptions */ \
+ XCHAL_OPTION(XCHAL_HAVE_EXCEPTIONS, XTENSA_OPTION_EXCEPTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_VECBASE, XTENSA_OPTION_RELOCATABLE_VECTOR) | \
+ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_EXCEPTION, \
+ XTENSA_OPTION_UNALIGNED_EXCEPTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_INTERRUPTS, XTENSA_OPTION_INTERRUPT) | \
+ XCHAL_OPTION(XCHAL_HAVE_HIGHPRI_INTERRUPTS, \
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT) | \
+ XCHAL_OPTION(XCHAL_HAVE_CCOUNT, XTENSA_OPTION_TIMER_INTERRUPT) | \
+ /* Local memory, TODO */ \
+ XCHAL_OPTION(XCHAL_ICACHE_SIZE, XTENSA_OPTION_ICACHE) | \
+ XCHAL_OPTION(XCHAL_ICACHE_LINE_LOCKABLE, \
+ XTENSA_OPTION_ICACHE_INDEX_LOCK) | \
+ XCHAL_OPTION(XCHAL_DCACHE_SIZE, XTENSA_OPTION_DCACHE) | \
+ XCHAL_OPTION(XCHAL_DCACHE_LINE_LOCKABLE, \
+ XTENSA_OPTION_DCACHE_INDEX_LOCK) | \
+ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_HW, XTENSA_OPTION_HW_ALIGNMENT) | \
+ XCHAL_OPTION(XCHAL_HAVE_MEM_ECC_PARITY, \
+ XTENSA_OPTION_MEMORY_ECC_PARITY) | \
+ /* Memory protection and translation */ \
+ XCHAL_OPTION(XCHAL_HAVE_MIMIC_CACHEATTR, \
+ XTENSA_OPTION_REGION_PROTECTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_XLT_CACHEATTR, \
+ XTENSA_OPTION_REGION_TRANSLATION) | \
+ XCHAL_OPTION(XCHAL_HAVE_MPU, XTENSA_OPTION_MPU) | \
+ XCHAL_OPTION(XCHAL_HAVE_PTP_MMU, XTENSA_OPTION_MMU) | \
+ XCHAL_OPTION(XCHAL_HAVE_CACHEATTR, XTENSA_OPTION_CACHEATTR) | \
+ /* Other, TODO */ \
+ XCHAL_OPTION(XCHAL_HAVE_WINDOWED, XTENSA_OPTION_WINDOWED_REGISTER) | \
+ XCHAL_OPTION(XCHAL_HAVE_DEBUG, XTENSA_OPTION_DEBUG) |\
+ XCHAL_OPTION(XCHAL_NUM_MISC_REGS > 0, XTENSA_OPTION_MISC_SR) | \
+ XCHAL_OPTION(XCHAL_HAVE_THREADPTR, XTENSA_OPTION_THREAD_POINTER) | \
+ XCHAL_OPTION(XCHAL_HAVE_PRID, XTENSA_OPTION_PROCESSOR_ID) | \
+ XCHAL_OPTION(XCHAL_HAVE_EXTERN_REGS, XTENSA_OPTION_EXTERN_REGS))
+
+#ifndef XCHAL_WINDOW_OF4_VECOFS
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#endif
+
+#if XCHAL_HAVE_WINDOWED
+#define WINDOW_VECTORS \
+ [EXC_WINDOW_OVERFLOW4] = XCHAL_WINDOW_OF4_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW4] = XCHAL_WINDOW_UF4_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_OVERFLOW8] = XCHAL_WINDOW_OF8_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW8] = XCHAL_WINDOW_UF8_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_OVERFLOW12] = XCHAL_WINDOW_OF12_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW12] = XCHAL_WINDOW_UF12_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR,
+#else
+#define WINDOW_VECTORS
+#endif
+
+#define EXCEPTION_VECTORS { \
+ [EXC_RESET0] = XCHAL_RESET_VECTOR0_VADDR, \
+ [EXC_RESET1] = XCHAL_RESET_VECTOR1_VADDR, \
+ WINDOW_VECTORS \
+ [EXC_KERNEL] = XCHAL_KERNEL_VECTOR_VADDR, \
+ [EXC_USER] = XCHAL_USER_VECTOR_VADDR, \
+ [EXC_DOUBLE] = XCHAL_DOUBLEEXC_VECTOR_VADDR, \
+ [EXC_DEBUG] = XCHAL_DEBUG_VECTOR_VADDR, \
+ }
+
+#define INTERRUPT_VECTORS { \
+ 0, \
+ 0, \
+ XCHAL_INTLEVEL2_VECTOR_VADDR, \
+ XCHAL_INTLEVEL3_VECTOR_VADDR, \
+ XCHAL_INTLEVEL4_VECTOR_VADDR, \
+ XCHAL_INTLEVEL5_VECTOR_VADDR, \
+ XCHAL_INTLEVEL6_VECTOR_VADDR, \
+ XCHAL_INTLEVEL7_VECTOR_VADDR, \
+ }
+
+#define LEVEL_MASKS { \
+ [1] = XCHAL_INTLEVEL1_MASK, \
+ [2] = XCHAL_INTLEVEL2_MASK, \
+ [3] = XCHAL_INTLEVEL3_MASK, \
+ [4] = XCHAL_INTLEVEL4_MASK, \
+ [5] = XCHAL_INTLEVEL5_MASK, \
+ [6] = XCHAL_INTLEVEL6_MASK, \
+ [7] = XCHAL_INTLEVEL7_MASK, \
+ }
+
+#define INTTYPE_MASKS { \
+ [INTTYPE_EDGE] = XCHAL_INTTYPE_MASK_EXTERN_EDGE, \
+ [INTTYPE_NMI] = XCHAL_INTTYPE_MASK_NMI, \
+ [INTTYPE_SOFTWARE] = XCHAL_INTTYPE_MASK_SOFTWARE, \
+ }
+
+#define XTHAL_INTTYPE_EXTERN_LEVEL INTTYPE_LEVEL
+#define XTHAL_INTTYPE_EXTERN_EDGE INTTYPE_EDGE
+#define XTHAL_INTTYPE_NMI INTTYPE_NMI
+#define XTHAL_INTTYPE_SOFTWARE INTTYPE_SOFTWARE
+#define XTHAL_INTTYPE_TIMER INTTYPE_TIMER
+#define XTHAL_INTTYPE_TBD1 INTTYPE_DEBUG
+#define XTHAL_INTTYPE_TBD2 INTTYPE_WRITE_ERR
+#define XTHAL_INTTYPE_WRITE_ERROR INTTYPE_WRITE_ERR
+#define XTHAL_INTTYPE_PROFILING INTTYPE_PROFILING
+#define XTHAL_INTTYPE_IDMA_DONE INTTYPE_IDMA_DONE
+#define XTHAL_INTTYPE_IDMA_ERR INTTYPE_IDMA_ERR
+#define XTHAL_INTTYPE_GS_ERR INTTYPE_GS_ERR
+
+#ifndef XCHAL_NMILEVEL
+#define XCHAL_NMILEVEL (XCHAL_NUM_INTLEVELS + 1)
+#endif
+
+#define INTERRUPT(i) { \
+ .level = XCHAL_INT ## i ## _LEVEL, \
+ .inttype = XCHAL_INT ## i ## _TYPE, \
+ }
+
+#define INTERRUPTS { \
+ [0] = INTERRUPT(0), \
+ [1] = INTERRUPT(1), \
+ [2] = INTERRUPT(2), \
+ [3] = INTERRUPT(3), \
+ [4] = INTERRUPT(4), \
+ [5] = INTERRUPT(5), \
+ [6] = INTERRUPT(6), \
+ [7] = INTERRUPT(7), \
+ [8] = INTERRUPT(8), \
+ [9] = INTERRUPT(9), \
+ [10] = INTERRUPT(10), \
+ [11] = INTERRUPT(11), \
+ [12] = INTERRUPT(12), \
+ [13] = INTERRUPT(13), \
+ [14] = INTERRUPT(14), \
+ [15] = INTERRUPT(15), \
+ [16] = INTERRUPT(16), \
+ [17] = INTERRUPT(17), \
+ [18] = INTERRUPT(18), \
+ [19] = INTERRUPT(19), \
+ [20] = INTERRUPT(20), \
+ [21] = INTERRUPT(21), \
+ [22] = INTERRUPT(22), \
+ [23] = INTERRUPT(23), \
+ [24] = INTERRUPT(24), \
+ [25] = INTERRUPT(25), \
+ [26] = INTERRUPT(26), \
+ [27] = INTERRUPT(27), \
+ [28] = INTERRUPT(28), \
+ [29] = INTERRUPT(29), \
+ [30] = INTERRUPT(30), \
+ [31] = INTERRUPT(31), \
+ }
+
+#define TIMERINTS { \
+ [0] = XCHAL_TIMER0_INTERRUPT, \
+ [1] = XCHAL_TIMER1_INTERRUPT, \
+ [2] = XCHAL_TIMER2_INTERRUPT, \
+ }
+
+#define EXTINTS { \
+ [0] = XCHAL_EXTINT0_NUM, \
+ [1] = XCHAL_EXTINT1_NUM, \
+ [2] = XCHAL_EXTINT2_NUM, \
+ [3] = XCHAL_EXTINT3_NUM, \
+ [4] = XCHAL_EXTINT4_NUM, \
+ [5] = XCHAL_EXTINT5_NUM, \
+ [6] = XCHAL_EXTINT6_NUM, \
+ [7] = XCHAL_EXTINT7_NUM, \
+ [8] = XCHAL_EXTINT8_NUM, \
+ [9] = XCHAL_EXTINT9_NUM, \
+ [10] = XCHAL_EXTINT10_NUM, \
+ [11] = XCHAL_EXTINT11_NUM, \
+ [12] = XCHAL_EXTINT12_NUM, \
+ [13] = XCHAL_EXTINT13_NUM, \
+ [14] = XCHAL_EXTINT14_NUM, \
+ [15] = XCHAL_EXTINT15_NUM, \
+ [16] = XCHAL_EXTINT16_NUM, \
+ [17] = XCHAL_EXTINT17_NUM, \
+ [18] = XCHAL_EXTINT18_NUM, \
+ [19] = XCHAL_EXTINT19_NUM, \
+ [20] = XCHAL_EXTINT20_NUM, \
+ [21] = XCHAL_EXTINT21_NUM, \
+ [22] = XCHAL_EXTINT22_NUM, \
+ [23] = XCHAL_EXTINT23_NUM, \
+ [24] = XCHAL_EXTINT24_NUM, \
+ [25] = XCHAL_EXTINT25_NUM, \
+ [26] = XCHAL_EXTINT26_NUM, \
+ [27] = XCHAL_EXTINT27_NUM, \
+ [28] = XCHAL_EXTINT28_NUM, \
+ [29] = XCHAL_EXTINT29_NUM, \
+ [30] = XCHAL_EXTINT30_NUM, \
+ [31] = XCHAL_EXTINT31_NUM, \
+ }
+
+#define EXCEPTIONS_SECTION \
+ .excm_level = XCHAL_EXCM_LEVEL, \
+ .vecbase = XCHAL_VECBASE_RESET_VADDR, \
+ .exception_vector = EXCEPTION_VECTORS
+
+#define INTERRUPTS_SECTION \
+ .ninterrupt = XCHAL_NUM_INTERRUPTS, \
+ .nlevel = XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI, \
+ .nmi_level = XCHAL_NMILEVEL, \
+ .interrupt_vector = INTERRUPT_VECTORS, \
+ .level_mask = LEVEL_MASKS, \
+ .inttype_mask = INTTYPE_MASKS, \
+ .interrupt = INTERRUPTS, \
+ .nccompare = XCHAL_NUM_TIMERS, \
+ .timerint = TIMERINTS, \
+ .nextint = XCHAL_NUM_EXTINTERRUPTS, \
+ .extint = EXTINTS
+
+#if XCHAL_HAVE_PTP_MMU
+
+#define TLB_TEMPLATE(ways, refill_way_size, way56) { \
+ .nways = ways, \
+ .way_size = { \
+ (refill_way_size), (refill_way_size), \
+ (refill_way_size), (refill_way_size), \
+ 4, (way56) ? 4 : 2, (way56) ? 8 : 2, 1, 1, 1, \
+ }, \
+ .varway56 = (way56), \
+ .nrefillentries = (refill_way_size) * 4, \
+ }
+
+#define ITLB(varway56) \
+ TLB_TEMPLATE(7, 1 << XCHAL_ITLB_ARF_ENTRIES_LOG2, varway56)
+
+#define DTLB(varway56) \
+ TLB_TEMPLATE(10, 1 << XCHAL_DTLB_ARF_ENTRIES_LOG2, varway56)
+
+#define TLB_SECTION \
+ .itlb = ITLB(XCHAL_HAVE_SPANNING_WAY), \
+ .dtlb = DTLB(XCHAL_HAVE_SPANNING_WAY)
+
+#ifndef XCHAL_SYSROM0_PADDR
+#define XCHAL_SYSROM0_PADDR 0xfe000000
+#define XCHAL_SYSROM0_SIZE 0x02000000
+#endif
+
+#ifndef XCHAL_SYSRAM0_PADDR
+#define XCHAL_SYSRAM0_PADDR 0x00000000
+#define XCHAL_SYSRAM0_SIZE 0x08000000
+#endif
+
+#elif XCHAL_HAVE_XLT_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR
+
+#define TLB_TEMPLATE { \
+ .nways = 1, \
+ .way_size = { \
+ 8, \
+ } \
+ }
+
+#define TLB_SECTION \
+ .itlb = TLB_TEMPLATE, \
+ .dtlb = TLB_TEMPLATE
+
+#ifndef XCHAL_SYSROM0_PADDR
+#define XCHAL_SYSROM0_PADDR 0x50000000
+#define XCHAL_SYSROM0_SIZE 0x04000000
+#endif
+
+#ifndef XCHAL_SYSRAM0_PADDR
+#define XCHAL_SYSRAM0_PADDR 0x60000000
+#define XCHAL_SYSRAM0_SIZE 0x04000000
+#endif
+
+#elif XCHAL_HAVE_MPU
+
+#ifndef XTENSA_MPU_BG_MAP
+#ifdef XCHAL_MPU_BACKGROUND_MAP
+#define XCHAL_MPU_BGMAP(s, vaddr_start, vaddr_last, rights, memtype, x...) \
+ { .vaddr = (vaddr_start), .attr = ((rights) << 8) | ((memtype) << 12), },
+
+#define XTENSA_MPU_BG_MAP (xtensa_mpu_entry []){\
+ XCHAL_MPU_BACKGROUND_MAP(0) \
+}
+
+#define XTENSA_MPU_BG_MAP_ENTRIES XCHAL_MPU_BACKGROUND_ENTRIES
+#else
+#define XTENSA_MPU_BG_MAP (xtensa_mpu_entry []){\
+ { .vaddr = 0, .attr = 0x00006700, }, \
+}
+
+#define XTENSA_MPU_BG_MAP_ENTRIES 1
+#endif
+#endif
+
+#define TLB_SECTION \
+ .mpu_align = XCHAL_MPU_ALIGN, \
+ .n_mpu_fg_segments = XCHAL_MPU_ENTRIES, \
+ .n_mpu_bg_segments = XTENSA_MPU_BG_MAP_ENTRIES, \
+ .mpu_bg = XTENSA_MPU_BG_MAP
+
+#ifndef XCHAL_SYSROM0_PADDR
+#define XCHAL_SYSROM0_PADDR 0x50000000
+#define XCHAL_SYSROM0_SIZE 0x04000000
+#endif
+
+#ifndef XCHAL_SYSRAM0_PADDR
+#define XCHAL_SYSRAM0_PADDR 0x60000000
+#define XCHAL_SYSRAM0_SIZE 0x04000000
+#endif
+
+#else
+
+#ifndef XCHAL_SYSROM0_PADDR
+#define XCHAL_SYSROM0_PADDR 0x50000000
+#define XCHAL_SYSROM0_SIZE 0x04000000
+#endif
+
+#ifndef XCHAL_SYSRAM0_PADDR
+#define XCHAL_SYSRAM0_PADDR 0x60000000
+#define XCHAL_SYSRAM0_SIZE 0x04000000
+#endif
+
+#endif
+
+#if (defined(TARGET_WORDS_BIGENDIAN) != 0) == (XCHAL_HAVE_BE != 0)
+#define REGISTER_CORE(core) \
+ static void __attribute__((constructor)) register_core(void) \
+ { \
+ static XtensaConfigList node = { \
+ .config = &core, \
+ }; \
+ xtensa_register_core(&node); \
+ }
+#else
+#define REGISTER_CORE(core)
+#endif
+
+#define DEBUG_SECTION \
+ .debug_level = XCHAL_DEBUGLEVEL, \
+ .nibreak = XCHAL_NUM_IBREAK, \
+ .ndbreak = XCHAL_NUM_DBREAK
+
+#define CACHE_SECTION \
+ .icache_ways = XCHAL_ICACHE_WAYS, \
+ .dcache_ways = XCHAL_DCACHE_WAYS, \
+ .dcache_line_bytes = XCHAL_DCACHE_LINESIZE, \
+ .memctl_mask = \
+ (XCHAL_ICACHE_SIZE ? MEMCTL_IUSEWAYS_MASK : 0) | \
+ (XCHAL_DCACHE_SIZE ? \
+ MEMCTL_DALLOCWAYS_MASK | MEMCTL_DUSEWAYS_MASK : 0) | \
+ MEMCTL_ISNP | MEMCTL_DSNP | \
+ (XCHAL_HAVE_LOOPS && XCHAL_LOOP_BUFFER_SIZE ? MEMCTL_IL0EN : 0)
+
+#define MEM_LOCATION(name, n) \
+ { \
+ .addr = XCHAL_ ## name ## n ## _PADDR, \
+ .size = XCHAL_ ## name ## n ## _SIZE, \
+ }
+
+#define MEM_SECTIONS(name) \
+ MEM_LOCATION(name, 0), \
+ MEM_LOCATION(name, 1), \
+ MEM_LOCATION(name, 2), \
+ MEM_LOCATION(name, 3)
+
+#define MEM_SECTION(name) \
+ .num = XCHAL_NUM_ ## name, \
+ .location = { \
+ MEM_SECTIONS(name) \
+ }
+
+#define SYSMEM_SECTION(name) \
+ .num = 1, \
+ .location = { \
+ { \
+ .addr = XCHAL_ ## name ## 0_PADDR, \
+ .size = XCHAL_ ## name ## 0_SIZE, \
+ } \
+ }
+
+#define LOCAL_MEMORIES_SECTION \
+ .instrom = { \
+ MEM_SECTION(INSTROM) \
+ }, \
+ .instram = { \
+ MEM_SECTION(INSTRAM) \
+ }, \
+ .datarom = { \
+ MEM_SECTION(DATAROM) \
+ }, \
+ .dataram = { \
+ MEM_SECTION(DATARAM) \
+ }, \
+ .sysrom = { \
+ SYSMEM_SECTION(SYSROM) \
+ }, \
+ .sysram = { \
+ SYSMEM_SECTION(SYSRAM) \
+ }
+
+#define CONFIG_SECTION \
+ .hw_version = XCHAL_HW_VERSION, \
+ .configid = { \
+ XCHAL_HW_CONFIGID0, \
+ XCHAL_HW_CONFIGID1, \
+ }
+
+#define DEFAULT_SECTIONS \
+ .options = XTENSA_OPTIONS, \
+ .nareg = XCHAL_NUM_AREGS, \
+ .ndepc = (XCHAL_XEA_VERSION >= 2), \
+ .inst_fetch_width = XCHAL_INST_FETCH_WIDTH, \
+ .max_insn_size = XCHAL_MAX_INSTRUCTION_SIZE, \
+ .use_first_nan = !XCHAL_HAVE_DFPU, \
+ EXCEPTIONS_SECTION, \
+ INTERRUPTS_SECTION, \
+ TLB_SECTION, \
+ DEBUG_SECTION, \
+ CACHE_SECTION, \
+ LOCAL_MEMORIES_SECTION, \
+ CONFIG_SECTION
+
+
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 2
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 3
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 4
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 5
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 6
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 7
+#define XCHAL_INTLEVEL7_VECTOR_VADDR 0
+#endif
+
+
+#if XCHAL_NUM_INTERRUPTS <= 0
+#define XCHAL_INT0_LEVEL 0
+#define XCHAL_INT0_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 1
+#define XCHAL_INT1_LEVEL 0
+#define XCHAL_INT1_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 2
+#define XCHAL_INT2_LEVEL 0
+#define XCHAL_INT2_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 3
+#define XCHAL_INT3_LEVEL 0
+#define XCHAL_INT3_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 4
+#define XCHAL_INT4_LEVEL 0
+#define XCHAL_INT4_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 5
+#define XCHAL_INT5_LEVEL 0
+#define XCHAL_INT5_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 6
+#define XCHAL_INT6_LEVEL 0
+#define XCHAL_INT6_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 7
+#define XCHAL_INT7_LEVEL 0
+#define XCHAL_INT7_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 8
+#define XCHAL_INT8_LEVEL 0
+#define XCHAL_INT8_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 9
+#define XCHAL_INT9_LEVEL 0
+#define XCHAL_INT9_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 10
+#define XCHAL_INT10_LEVEL 0
+#define XCHAL_INT10_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 11
+#define XCHAL_INT11_LEVEL 0
+#define XCHAL_INT11_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 12
+#define XCHAL_INT12_LEVEL 0
+#define XCHAL_INT12_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 13
+#define XCHAL_INT13_LEVEL 0
+#define XCHAL_INT13_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 14
+#define XCHAL_INT14_LEVEL 0
+#define XCHAL_INT14_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 15
+#define XCHAL_INT15_LEVEL 0
+#define XCHAL_INT15_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 16
+#define XCHAL_INT16_LEVEL 0
+#define XCHAL_INT16_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 17
+#define XCHAL_INT17_LEVEL 0
+#define XCHAL_INT17_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 18
+#define XCHAL_INT18_LEVEL 0
+#define XCHAL_INT18_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 19
+#define XCHAL_INT19_LEVEL 0
+#define XCHAL_INT19_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 20
+#define XCHAL_INT20_LEVEL 0
+#define XCHAL_INT20_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 21
+#define XCHAL_INT21_LEVEL 0
+#define XCHAL_INT21_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 22
+#define XCHAL_INT22_LEVEL 0
+#define XCHAL_INT22_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 23
+#define XCHAL_INT23_LEVEL 0
+#define XCHAL_INT23_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 24
+#define XCHAL_INT24_LEVEL 0
+#define XCHAL_INT24_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 25
+#define XCHAL_INT25_LEVEL 0
+#define XCHAL_INT25_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 26
+#define XCHAL_INT26_LEVEL 0
+#define XCHAL_INT26_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 27
+#define XCHAL_INT27_LEVEL 0
+#define XCHAL_INT27_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 28
+#define XCHAL_INT28_LEVEL 0
+#define XCHAL_INT28_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 29
+#define XCHAL_INT29_LEVEL 0
+#define XCHAL_INT29_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 30
+#define XCHAL_INT30_LEVEL 0
+#define XCHAL_INT30_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 31
+#define XCHAL_INT31_LEVEL 0
+#define XCHAL_INT31_TYPE 0
+#endif
+
+
+#if XCHAL_NUM_EXTINTERRUPTS <= 0
+#define XCHAL_EXTINT0_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 1
+#define XCHAL_EXTINT1_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 2
+#define XCHAL_EXTINT2_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 3
+#define XCHAL_EXTINT3_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 4
+#define XCHAL_EXTINT4_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 5
+#define XCHAL_EXTINT5_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 6
+#define XCHAL_EXTINT6_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 7
+#define XCHAL_EXTINT7_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 8
+#define XCHAL_EXTINT8_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 9
+#define XCHAL_EXTINT9_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 10
+#define XCHAL_EXTINT10_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 11
+#define XCHAL_EXTINT11_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 12
+#define XCHAL_EXTINT12_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 13
+#define XCHAL_EXTINT13_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 14
+#define XCHAL_EXTINT14_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 15
+#define XCHAL_EXTINT15_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 16
+#define XCHAL_EXTINT16_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 17
+#define XCHAL_EXTINT17_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 18
+#define XCHAL_EXTINT18_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 19
+#define XCHAL_EXTINT19_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 20
+#define XCHAL_EXTINT20_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 21
+#define XCHAL_EXTINT21_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 22
+#define XCHAL_EXTINT22_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 23
+#define XCHAL_EXTINT23_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 24
+#define XCHAL_EXTINT24_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 25
+#define XCHAL_EXTINT25_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 26
+#define XCHAL_EXTINT26_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 27
+#define XCHAL_EXTINT27_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 28
+#define XCHAL_EXTINT28_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 29
+#define XCHAL_EXTINT29_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 30
+#define XCHAL_EXTINT30_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 31
+#define XCHAL_EXTINT31_NUM 0
+#endif
+
+
+#define XTHAL_TIMER_UNCONFIGURED 0
+
+#if XCHAL_NUM_INSTROM < 1
+#define XCHAL_INSTROM0_PADDR 0
+#define XCHAL_INSTROM0_SIZE 0
+#endif
+#if XCHAL_NUM_INSTROM < 2
+#define XCHAL_INSTROM1_PADDR 0
+#define XCHAL_INSTROM1_SIZE 0
+#endif
+#if XCHAL_NUM_INSTROM < 3
+#define XCHAL_INSTROM2_PADDR 0
+#define XCHAL_INSTROM2_SIZE 0
+#endif
+#if XCHAL_NUM_INSTROM < 4
+#define XCHAL_INSTROM3_PADDR 0
+#define XCHAL_INSTROM3_SIZE 0
+#endif
+#if XCHAL_NUM_INSTROM > MAX_NMEMORY
+#error XCHAL_NUM_INSTROM > MAX_NMEMORY
+#endif
+
+#if XCHAL_NUM_INSTRAM < 1
+#define XCHAL_INSTRAM0_PADDR 0
+#define XCHAL_INSTRAM0_SIZE 0
+#endif
+#if XCHAL_NUM_INSTRAM < 2
+#define XCHAL_INSTRAM1_PADDR 0
+#define XCHAL_INSTRAM1_SIZE 0
+#endif
+#if XCHAL_NUM_INSTRAM < 3
+#define XCHAL_INSTRAM2_PADDR 0
+#define XCHAL_INSTRAM2_SIZE 0
+#endif
+#if XCHAL_NUM_INSTRAM < 4
+#define XCHAL_INSTRAM3_PADDR 0
+#define XCHAL_INSTRAM3_SIZE 0
+#endif
+#if XCHAL_NUM_INSTRAM > MAX_NMEMORY
+#error XCHAL_NUM_INSTRAM > MAX_NMEMORY
+#endif
+
+#if XCHAL_NUM_DATAROM < 1
+#define XCHAL_DATAROM0_PADDR 0
+#define XCHAL_DATAROM0_SIZE 0
+#endif
+#if XCHAL_NUM_DATAROM < 2
+#define XCHAL_DATAROM1_PADDR 0
+#define XCHAL_DATAROM1_SIZE 0
+#endif
+#if XCHAL_NUM_DATAROM < 3
+#define XCHAL_DATAROM2_PADDR 0
+#define XCHAL_DATAROM2_SIZE 0
+#endif
+#if XCHAL_NUM_DATAROM < 4
+#define XCHAL_DATAROM3_PADDR 0
+#define XCHAL_DATAROM3_SIZE 0
+#endif
+#if XCHAL_NUM_DATAROM > MAX_NMEMORY
+#error XCHAL_NUM_DATAROM > MAX_NMEMORY
+#endif
+
+#if XCHAL_NUM_DATARAM < 1
+#define XCHAL_DATARAM0_PADDR 0
+#define XCHAL_DATARAM0_SIZE 0
+#endif
+#if XCHAL_NUM_DATARAM < 2
+#define XCHAL_DATARAM1_PADDR 0
+#define XCHAL_DATARAM1_SIZE 0
+#endif
+#if XCHAL_NUM_DATARAM < 3
+#define XCHAL_DATARAM2_PADDR 0
+#define XCHAL_DATARAM2_SIZE 0
+#endif
+#if XCHAL_NUM_DATARAM < 4
+#define XCHAL_DATARAM3_PADDR 0
+#define XCHAL_DATARAM3_SIZE 0
+#endif
+#if XCHAL_NUM_DATARAM > MAX_NMEMORY
+#error XCHAL_NUM_DATARAM > MAX_NMEMORY
+#endif
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
new file mode 100644
index 000000000..09430c1bf
--- /dev/null
+++ b/target/xtensa/translate.c
@@ -0,0 +1,7873 @@
+/*
+ * Xtensa ISA:
+ * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "tcg/tcg-op.h"
+#include "qemu/log.h"
+#include "qemu/qemu-print.h"
+#include "exec/cpu_ldst.h"
+#include "semihosting/semihost.h"
+#include "exec/translator.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "exec/log.h"
+
+
+struct DisasContext {
+ DisasContextBase base;
+ const XtensaConfig *config;
+ uint32_t pc;
+ int cring;
+ int ring;
+ uint32_t lbeg_off;
+ uint32_t lend;
+
+ bool sar_5bit;
+ bool sar_m32_5bit;
+ bool sar_m32_allocated;
+ TCGv_i32 sar_m32;
+
+ unsigned window;
+ unsigned callinc;
+ bool cwoe;
+
+ bool debug;
+ bool icount;
+ TCGv_i32 next_icount;
+
+ unsigned cpenable;
+
+ uint32_t op_flags;
+ xtensa_insnbuf_word insnbuf[MAX_INSNBUF_LENGTH];
+ xtensa_insnbuf_word slotbuf[MAX_INSNBUF_LENGTH];
+};
+
+static TCGv_i32 cpu_pc;
+static TCGv_i32 cpu_R[16];
+static TCGv_i32 cpu_FR[16];
+static TCGv_i64 cpu_FRD[16];
+static TCGv_i32 cpu_MR[4];
+static TCGv_i32 cpu_BR[16];
+static TCGv_i32 cpu_BR4[4];
+static TCGv_i32 cpu_BR8[2];
+static TCGv_i32 cpu_SR[256];
+static TCGv_i32 cpu_UR[256];
+static TCGv_i32 cpu_windowbase_next;
+static TCGv_i32 cpu_exclusive_addr;
+static TCGv_i32 cpu_exclusive_val;
+
+static GHashTable *xtensa_regfile_table;
+
+#include "exec/gen-icount.h"
+
+static char *sr_name[256];
+static char *ur_name[256];
+
+void xtensa_collect_sr_names(const XtensaConfig *config)
+{
+ xtensa_isa isa = config->isa;
+ int n = xtensa_isa_num_sysregs(isa);
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ int sr = xtensa_sysreg_number(isa, i);
+
+ if (sr >= 0 && sr < 256) {
+ const char *name = xtensa_sysreg_name(isa, i);
+ char **pname =
+ (xtensa_sysreg_is_user(isa, i) ? ur_name : sr_name) + sr;
+
+ if (*pname) {
+ if (strstr(*pname, name) == NULL) {
+ char *new_name =
+ malloc(strlen(*pname) + strlen(name) + 2);
+
+ strcpy(new_name, *pname);
+ strcat(new_name, "/");
+ strcat(new_name, name);
+ free(*pname);
+ *pname = new_name;
+ }
+ } else {
+ *pname = strdup(name);
+ }
+ }
+ }
+}
+
+void xtensa_translate_init(void)
+{
+ static const char * const regnames[] = {
+ "ar0", "ar1", "ar2", "ar3",
+ "ar4", "ar5", "ar6", "ar7",
+ "ar8", "ar9", "ar10", "ar11",
+ "ar12", "ar13", "ar14", "ar15",
+ };
+ static const char * const fregnames[] = {
+ "f0", "f1", "f2", "f3",
+ "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11",
+ "f12", "f13", "f14", "f15",
+ };
+ static const char * const mregnames[] = {
+ "m0", "m1", "m2", "m3",
+ };
+ static const char * const bregnames[] = {
+ "b0", "b1", "b2", "b3",
+ "b4", "b5", "b6", "b7",
+ "b8", "b9", "b10", "b11",
+ "b12", "b13", "b14", "b15",
+ };
+ int i;
+
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, pc), "pc");
+
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, regs[i]),
+ regnames[i]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ fregs[i].f32[FP_F32_LOW]),
+ fregnames[i]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_FRD[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUXtensaState,
+ fregs[i].f64),
+ fregnames[i]);
+ }
+
+ for (i = 0; i < 4; i++) {
+ cpu_MR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ sregs[MR + i]),
+ mregnames[i]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_BR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ sregs[BR]),
+ bregnames[i]);
+ if (i % 4 == 0) {
+ cpu_BR4[i / 4] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ sregs[BR]),
+ bregnames[i]);
+ }
+ if (i % 8 == 0) {
+ cpu_BR8[i / 8] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ sregs[BR]),
+ bregnames[i]);
+ }
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (sr_name[i]) {
+ cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ sregs[i]),
+ sr_name[i]);
+ }
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (ur_name[i]) {
+ cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState,
+ uregs[i]),
+ ur_name[i]);
+ }
+ }
+
+ cpu_windowbase_next =
+ tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, windowbase_next),
+ "windowbase_next");
+ cpu_exclusive_addr =
+ tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, exclusive_addr),
+ "exclusive_addr");
+ cpu_exclusive_val =
+ tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, exclusive_val),
+ "exclusive_val");
+}
+
+void **xtensa_get_regfile_by_name(const char *name, int entries, int bits)
+{
+ char *geometry_name;
+ void **res;
+
+ if (xtensa_regfile_table == NULL) {
+ xtensa_regfile_table = g_hash_table_new(g_str_hash, g_str_equal);
+ /*
+ * AR is special. Xtensa translator uses it as a current register
+ * window, but configuration overlays represent it as a complete
+ * physical register file.
+ */
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"AR 16x32", (void *)cpu_R);
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"AR 32x32", (void *)cpu_R);
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"AR 64x32", (void *)cpu_R);
+
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"MR 4x32", (void *)cpu_MR);
+
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"FR 16x32", (void *)cpu_FR);
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"FR 16x64", (void *)cpu_FRD);
+
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"BR 16x1", (void *)cpu_BR);
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"BR4 4x4", (void *)cpu_BR4);
+ g_hash_table_insert(xtensa_regfile_table,
+ (void *)"BR8 2x8", (void *)cpu_BR8);
+ }
+
+ geometry_name = g_strdup_printf("%s %dx%d", name, entries, bits);
+ res = (void **)g_hash_table_lookup(xtensa_regfile_table, geometry_name);
+ g_free(geometry_name);
+ return res;
+}
+
+static inline bool option_enabled(DisasContext *dc, int opt)
+{
+ return xtensa_option_enabled(dc->config, opt);
+}
+
+static void init_sar_tracker(DisasContext *dc)
+{
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = false;
+ dc->sar_m32_allocated = false;
+}
+
+static void reset_sar_tracker(DisasContext *dc)
+{
+ if (dc->sar_m32_allocated) {
+ tcg_temp_free(dc->sar_m32);
+ }
+}
+
+static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
+{
+ tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
+ if (dc->sar_m32_5bit) {
+ tcg_gen_discard_i32(dc->sar_m32);
+ }
+ dc->sar_5bit = true;
+ dc->sar_m32_5bit = false;
+}
+
+static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
+{
+ TCGv_i32 tmp = tcg_const_i32(32);
+ if (!dc->sar_m32_allocated) {
+ dc->sar_m32 = tcg_temp_local_new_i32();
+ dc->sar_m32_allocated = true;
+ }
+ tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
+ tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = true;
+ tcg_temp_free(tmp);
+}
+
+static void gen_exception(DisasContext *dc, int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void gen_exception_cause(DisasContext *dc, uint32_t cause)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_helper_exception_cause(cpu_env, tpc, tcause);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+ if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
+ cause == SYSCALL_CAUSE) {
+ dc->base.is_jmp = DISAS_NORETURN;
+ }
+}
+
+static void gen_debug_exception(DisasContext *dc, uint32_t cause)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_helper_debug_exception(cpu_env, tpc, tcause);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+ if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
+ dc->base.is_jmp = DISAS_NORETURN;
+ }
+}
+
+static bool gen_check_privilege(DisasContext *dc)
+{
+#ifndef CONFIG_USER_ONLY
+ if (!dc->cring) {
+ return true;
+ }
+#endif
+ gen_exception_cause(dc, PRIVILEGED_CAUSE);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return false;
+}
+
+static bool gen_check_cpenable(DisasContext *dc, uint32_t cp_mask)
+{
+ cp_mask &= ~dc->cpenable;
+
+ if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) && cp_mask) {
+ gen_exception_cause(dc, COPROCESSOR0_DISABLED + ctz32(cp_mask));
+ dc->base.is_jmp = DISAS_NORETURN;
+ return false;
+ }
+ return true;
+}
+
+static int gen_postprocess(DisasContext *dc, int slot);
+
+static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
+{
+ tcg_gen_mov_i32(cpu_pc, dest);
+ if (dc->icount) {
+ tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
+ }
+ if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
+ slot = gen_postprocess(dc, slot);
+ }
+ if (slot >= 0) {
+ tcg_gen_goto_tb(slot);
+ tcg_gen_exit_tb(dc->base.tb, slot);
+ } else {
+ tcg_gen_exit_tb(NULL, 0);
+ }
+ dc->base.is_jmp = DISAS_NORETURN;
+}
+
+static void gen_jump(DisasContext *dc, TCGv dest)
+{
+ gen_jump_slot(dc, dest, -1);
+}
+
+static int adjust_jump_slot(DisasContext *dc, uint32_t dest, int slot)
+{
+ return translator_use_goto_tb(&dc->base, dest) ? slot : -1;
+}
+
+static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
+{
+ TCGv_i32 tmp = tcg_const_i32(dest);
+ gen_jump_slot(dc, tmp, adjust_jump_slot(dc, dest, slot));
+ tcg_temp_free(tmp);
+}
+
+static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
+ int slot)
+{
+ TCGv_i32 tcallinc = tcg_const_i32(callinc);
+
+ tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
+ tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
+ tcg_temp_free(tcallinc);
+ tcg_gen_movi_i32(cpu_R[callinc << 2],
+ (callinc << 30) | (dc->base.pc_next & 0x3fffffff));
+ gen_jump_slot(dc, dest, slot);
+}
+
+static bool gen_check_loop_end(DisasContext *dc, int slot)
+{
+ if (dc->base.pc_next == dc->lend) {
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
+ tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
+ if (dc->lbeg_off) {
+ gen_jumpi(dc, dc->base.pc_next - dc->lbeg_off, slot);
+ } else {
+ gen_jump(dc, cpu_SR[LBEG]);
+ }
+ gen_set_label(label);
+ gen_jumpi(dc, dc->base.pc_next, -1);
+ return true;
+ }
+ return false;
+}
+
+static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
+{
+ if (!gen_check_loop_end(dc, slot)) {
+ gen_jumpi(dc, dc->base.pc_next, slot);
+ }
+}
+
+static void gen_brcond(DisasContext *dc, TCGCond cond,
+ TCGv_i32 t0, TCGv_i32 t1, uint32_t addr)
+{
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_brcond_i32(cond, t0, t1, label);
+ gen_jumpi_check_loop_end(dc, 0);
+ gen_set_label(label);
+ gen_jumpi(dc, addr, 1);
+}
+
+static void gen_brcondi(DisasContext *dc, TCGCond cond,
+ TCGv_i32 t0, uint32_t t1, uint32_t addr)
+{
+ TCGv_i32 tmp = tcg_const_i32(t1);
+ gen_brcond(dc, cond, t0, tmp, addr);
+ tcg_temp_free(tmp);
+}
+
+static uint32_t test_exceptions_sr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ return xtensa_option_enabled(dc->config, par[1]) ? 0 : XTENSA_OP_ILL;
+}
+
+static uint32_t test_exceptions_ccompare(DisasContext *dc,
+ const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ unsigned n = par[0] - CCOMPARE;
+
+ if (n >= dc->config->nccompare) {
+ return XTENSA_OP_ILL;
+ }
+ return test_exceptions_sr(dc, arg, par);
+}
+
+static uint32_t test_exceptions_dbreak(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ unsigned n = MAX_NDBREAK;
+
+ if (par[0] >= DBREAKA && par[0] < DBREAKA + MAX_NDBREAK) {
+ n = par[0] - DBREAKA;
+ }
+ if (par[0] >= DBREAKC && par[0] < DBREAKC + MAX_NDBREAK) {
+ n = par[0] - DBREAKC;
+ }
+ if (n >= dc->config->ndbreak) {
+ return XTENSA_OP_ILL;
+ }
+ return test_exceptions_sr(dc, arg, par);
+}
+
+static uint32_t test_exceptions_ibreak(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ unsigned n = par[0] - IBREAKA;
+
+ if (n >= dc->config->nibreak) {
+ return XTENSA_OP_ILL;
+ }
+ return test_exceptions_sr(dc, arg, par);
+}
+
+static uint32_t test_exceptions_hpi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ unsigned n = MAX_NLEVEL + 1;
+
+ if (par[0] >= EXCSAVE1 && par[0] < EXCSAVE1 + MAX_NLEVEL) {
+ n = par[0] - EXCSAVE1 + 1;
+ }
+ if (par[0] >= EPC1 && par[0] < EPC1 + MAX_NLEVEL) {
+ n = par[0] - EPC1 + 1;
+ }
+ if (par[0] >= EPS2 && par[0] < EPS2 + MAX_NLEVEL - 1) {
+ n = par[0] - EPS2 + 2;
+ }
+ if (n > dc->config->nlevel) {
+ return XTENSA_OP_ILL;
+ }
+ return test_exceptions_sr(dc, arg, par);
+}
+
+static MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop,
+ TCGv_i32 addr)
+{
+ if ((mop & MO_SIZE) == MO_8) {
+ return mop;
+ }
+ if ((mop & MO_AMASK) == MO_UNALN &&
+ !option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT)) {
+ mop |= MO_ALIGN;
+ }
+ if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
+ tcg_gen_andi_i32(addr, addr, ~0 << get_alignment_bits(mop));
+ }
+ return mop;
+}
+
+#ifndef CONFIG_USER_ONLY
+static void gen_waiti(DisasContext *dc, uint32_t imm4)
+{
+ TCGv_i32 pc = tcg_const_i32(dc->base.pc_next);
+ TCGv_i32 intlevel = tcg_const_i32(imm4);
+
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_waiti(cpu_env, pc, intlevel);
+ tcg_temp_free(pc);
+ tcg_temp_free(intlevel);
+}
+#endif
+
+static bool gen_window_check(DisasContext *dc, uint32_t mask)
+{
+ unsigned r = 31 - clz32(mask);
+
+ if (r / 4 > dc->window) {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ TCGv_i32 w = tcg_const_i32(r / 4);
+
+ gen_helper_window_check(cpu_env, pc, w);
+ dc->base.is_jmp = DISAS_NORETURN;
+ return false;
+ }
+ return true;
+}
+
+static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
+{
+ TCGv_i32 m = tcg_temp_new_i32();
+
+ if (hi) {
+ (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
+ } else {
+ (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
+ }
+ return m;
+}
+
+static void gen_zero_check(DisasContext *dc, const OpcodeArg arg[])
+{
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0, label);
+ gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
+ gen_set_label(label);
+}
+
+static inline unsigned xtensa_op0_insn_len(DisasContext *dc, uint8_t op0)
+{
+ return xtensa_isa_length_from_chars(dc->config->isa, &op0);
+}
+
+static int gen_postprocess(DisasContext *dc, int slot)
+{
+ uint32_t op_flags = dc->op_flags;
+
+#ifndef CONFIG_USER_ONLY
+ if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) {
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_check_interrupts(cpu_env);
+ }
+#endif
+ if (op_flags & XTENSA_OP_SYNC_REGISTER_WINDOW) {
+ gen_helper_sync_windowbase(cpu_env);
+ }
+ if (op_flags & XTENSA_OP_EXIT_TB_M1) {
+ slot = -1;
+ }
+ return slot;
+}
+
+struct opcode_arg_copy {
+ uint32_t resource;
+ void *temp;
+ OpcodeArg *arg;
+};
+
+struct opcode_arg_info {
+ uint32_t resource;
+ int index;
+};
+
+struct slot_prop {
+ XtensaOpcodeOps *ops;
+ OpcodeArg arg[MAX_OPCODE_ARGS];
+ struct opcode_arg_info in[MAX_OPCODE_ARGS];
+ struct opcode_arg_info out[MAX_OPCODE_ARGS];
+ unsigned n_in;
+ unsigned n_out;
+ uint32_t op_flags;
+};
+
+enum resource_type {
+ RES_REGFILE,
+ RES_STATE,
+ RES_MAX,
+};
+
+static uint32_t encode_resource(enum resource_type r, unsigned g, unsigned n)
+{
+ assert(r < RES_MAX && g < 256 && n < 65536);
+ return (r << 24) | (g << 16) | n;
+}
+
+static enum resource_type get_resource_type(uint32_t resource)
+{
+ return resource >> 24;
+}
+
+/*
+ * a depends on b if b must be executed before a,
+ * because a's side effects will destroy b's inputs.
+ */
+static bool op_depends_on(const struct slot_prop *a,
+ const struct slot_prop *b)
+{
+ unsigned i = 0;
+ unsigned j = 0;
+
+ if (a->op_flags & XTENSA_OP_CONTROL_FLOW) {
+ return true;
+ }
+ if ((a->op_flags & XTENSA_OP_LOAD_STORE) <
+ (b->op_flags & XTENSA_OP_LOAD_STORE)) {
+ return true;
+ }
+ while (i < a->n_out && j < b->n_in) {
+ if (a->out[i].resource < b->in[j].resource) {
+ ++i;
+ } else if (a->out[i].resource > b->in[j].resource) {
+ ++j;
+ } else {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Try to break a dependency on b, append temporary register copy records
+ * to the end of copy and update n_copy in case of success.
+ * This is not always possible: e.g. control flow must always be the last,
+ * load/store must be first and state dependencies are not supported yet.
+ */
+static bool break_dependency(struct slot_prop *a,
+ struct slot_prop *b,
+ struct opcode_arg_copy *copy,
+ unsigned *n_copy)
+{
+ unsigned i = 0;
+ unsigned j = 0;
+ unsigned n = *n_copy;
+ bool rv = false;
+
+ if (a->op_flags & XTENSA_OP_CONTROL_FLOW) {
+ return false;
+ }
+ if ((a->op_flags & XTENSA_OP_LOAD_STORE) <
+ (b->op_flags & XTENSA_OP_LOAD_STORE)) {
+ return false;
+ }
+ while (i < a->n_out && j < b->n_in) {
+ if (a->out[i].resource < b->in[j].resource) {
+ ++i;
+ } else if (a->out[i].resource > b->in[j].resource) {
+ ++j;
+ } else {
+ int index = b->in[j].index;
+
+ if (get_resource_type(a->out[i].resource) != RES_REGFILE ||
+ index < 0) {
+ return false;
+ }
+ copy[n].resource = b->in[j].resource;
+ copy[n].arg = b->arg + index;
+ ++n;
+ ++j;
+ rv = true;
+ }
+ }
+ *n_copy = n;
+ return rv;
+}
+
+/*
+ * Calculate evaluation order for slot opcodes.
+ * Build opcode order graph and output its nodes in topological sort order.
+ * An edge a -> b in the graph means that opcode a must be followed by
+ * opcode b.
+ */
+static bool tsort(struct slot_prop *slot,
+ struct slot_prop *sorted[],
+ unsigned n,
+ struct opcode_arg_copy *copy,
+ unsigned *n_copy)
+{
+ struct tsnode {
+ unsigned n_in_edge;
+ unsigned n_out_edge;
+ unsigned out_edge[MAX_INSN_SLOTS];
+ } node[MAX_INSN_SLOTS];
+
+ unsigned in[MAX_INSN_SLOTS];
+ unsigned i, j;
+ unsigned n_in = 0;
+ unsigned n_out = 0;
+ unsigned n_edge = 0;
+ unsigned in_idx = 0;
+ unsigned node_idx = 0;
+
+ for (i = 0; i < n; ++i) {
+ node[i].n_in_edge = 0;
+ node[i].n_out_edge = 0;
+ }
+
+ for (i = 0; i < n; ++i) {
+ unsigned n_out_edge = 0;
+
+ for (j = 0; j < n; ++j) {
+ if (i != j && op_depends_on(slot + j, slot + i)) {
+ node[i].out_edge[n_out_edge] = j;
+ ++node[j].n_in_edge;
+ ++n_out_edge;
+ ++n_edge;
+ }
+ }
+ node[i].n_out_edge = n_out_edge;
+ }
+
+ for (i = 0; i < n; ++i) {
+ if (!node[i].n_in_edge) {
+ in[n_in] = i;
+ ++n_in;
+ }
+ }
+
+again:
+ for (; in_idx < n_in; ++in_idx) {
+ i = in[in_idx];
+ sorted[n_out] = slot + i;
+ ++n_out;
+ for (j = 0; j < node[i].n_out_edge; ++j) {
+ --n_edge;
+ if (--node[node[i].out_edge[j]].n_in_edge == 0) {
+ in[n_in] = node[i].out_edge[j];
+ ++n_in;
+ }
+ }
+ }
+ if (n_edge) {
+ for (; node_idx < n; ++node_idx) {
+ struct tsnode *cnode = node + node_idx;
+
+ if (cnode->n_in_edge) {
+ for (j = 0; j < cnode->n_out_edge; ++j) {
+ unsigned k = cnode->out_edge[j];
+
+ if (break_dependency(slot + k, slot + node_idx,
+ copy, n_copy) &&
+ --node[k].n_in_edge == 0) {
+ in[n_in] = k;
+ ++n_in;
+ --n_edge;
+ cnode->out_edge[j] =
+ cnode->out_edge[cnode->n_out_edge - 1];
+ --cnode->n_out_edge;
+ goto again;
+ }
+ }
+ }
+ }
+ }
+ return n_edge == 0;
+}
+
+static void opcode_add_resource(struct slot_prop *op,
+ uint32_t resource, char direction,
+ int index)
+{
+ switch (direction) {
+ case 'm':
+ case 'i':
+ assert(op->n_in < ARRAY_SIZE(op->in));
+ op->in[op->n_in].resource = resource;
+ op->in[op->n_in].index = index;
+ ++op->n_in;
+ /* fall through */
+ case 'o':
+ if (direction == 'm' || direction == 'o') {
+ assert(op->n_out < ARRAY_SIZE(op->out));
+ op->out[op->n_out].resource = resource;
+ op->out[op->n_out].index = index;
+ ++op->n_out;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static int resource_compare(const void *a, const void *b)
+{
+ const struct opcode_arg_info *pa = a;
+ const struct opcode_arg_info *pb = b;
+
+ return pa->resource < pb->resource ?
+ -1 : (pa->resource > pb->resource ? 1 : 0);
+}
+
+static int arg_copy_compare(const void *a, const void *b)
+{
+ const struct opcode_arg_copy *pa = a;
+ const struct opcode_arg_copy *pb = b;
+
+ return pa->resource < pb->resource ?
+ -1 : (pa->resource > pb->resource ? 1 : 0);
+}
+
+static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
+{
+ xtensa_isa isa = dc->config->isa;
+ unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, &dc->base,
+ dc->pc)};
+ unsigned len = xtensa_op0_insn_len(dc, b[0]);
+ xtensa_format fmt;
+ int slot, slots;
+ unsigned i;
+ uint32_t op_flags = 0;
+ struct slot_prop slot_prop[MAX_INSN_SLOTS];
+ struct slot_prop *ordered[MAX_INSN_SLOTS];
+ struct opcode_arg_copy arg_copy[MAX_INSN_SLOTS * MAX_OPCODE_ARGS];
+ unsigned n_arg_copy = 0;
+ uint32_t debug_cause = 0;
+ uint32_t windowed_register = 0;
+ uint32_t coprocessor = 0;
+
+ if (len == XTENSA_UNDEFINED) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "unknown instruction length (pc = %08x)\n",
+ dc->pc);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ dc->base.pc_next = dc->pc + 1;
+ return;
+ }
+
+ dc->base.pc_next = dc->pc + len;
+ for (i = 1; i < len; ++i) {
+ b[i] = translator_ldub(env, &dc->base, dc->pc + i);
+ }
+ xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len);
+ fmt = xtensa_format_decode(isa, dc->insnbuf);
+ if (fmt == XTENSA_UNDEFINED) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "unrecognized instruction format (pc = %08x)\n",
+ dc->pc);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return;
+ }
+ slots = xtensa_format_num_slots(isa, fmt);
+ for (slot = 0; slot < slots; ++slot) {
+ xtensa_opcode opc;
+ int opnd, vopnd, opnds;
+ OpcodeArg *arg = slot_prop[slot].arg;
+ XtensaOpcodeOps *ops;
+
+ xtensa_format_get_slot(isa, fmt, slot, dc->insnbuf, dc->slotbuf);
+ opc = xtensa_opcode_decode(isa, fmt, slot, dc->slotbuf);
+ if (opc == XTENSA_UNDEFINED) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "unrecognized opcode in slot %d (pc = %08x)\n",
+ slot, dc->pc);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return;
+ }
+ opnds = xtensa_opcode_num_operands(isa, opc);
+
+ for (opnd = vopnd = 0; opnd < opnds; ++opnd) {
+ void **register_file = NULL;
+ xtensa_regfile rf;
+
+ if (xtensa_operand_is_register(isa, opc, opnd)) {
+ rf = xtensa_operand_regfile(isa, opc, opnd);
+ register_file = dc->config->regfile[rf];
+
+ if (rf == dc->config->a_regfile) {
+ uint32_t v;
+
+ xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
+ dc->slotbuf, &v);
+ xtensa_operand_decode(isa, opc, opnd, &v);
+ windowed_register |= 1u << v;
+ }
+ }
+ if (xtensa_operand_is_visible(isa, opc, opnd)) {
+ uint32_t v;
+
+ xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
+ dc->slotbuf, &v);
+ xtensa_operand_decode(isa, opc, opnd, &v);
+ arg[vopnd].raw_imm = v;
+ if (xtensa_operand_is_PCrelative(isa, opc, opnd)) {
+ xtensa_operand_undo_reloc(isa, opc, opnd, &v, dc->pc);
+ }
+ arg[vopnd].imm = v;
+ if (register_file) {
+ arg[vopnd].in = register_file[v];
+ arg[vopnd].out = register_file[v];
+ arg[vopnd].num_bits = xtensa_regfile_num_bits(isa, rf);
+ } else {
+ arg[vopnd].num_bits = 32;
+ }
+ ++vopnd;
+ }
+ }
+ ops = dc->config->opcode_ops[opc];
+ slot_prop[slot].ops = ops;
+
+ if (ops) {
+ op_flags |= ops->op_flags;
+ if (ops->test_exceptions) {
+ op_flags |= ops->test_exceptions(dc, arg, ops->par);
+ }
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "unimplemented opcode '%s' in slot %d (pc = %08x)\n",
+ xtensa_opcode_name(isa, opc), slot, dc->pc);
+ op_flags |= XTENSA_OP_ILL;
+ }
+ if (op_flags & XTENSA_OP_ILL) {
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return;
+ }
+ if (op_flags & XTENSA_OP_DEBUG_BREAK) {
+ debug_cause |= ops->par[0];
+ }
+ if (ops->test_overflow) {
+ windowed_register |= ops->test_overflow(dc, arg, ops->par);
+ }
+ coprocessor |= ops->coprocessor;
+
+ if (slots > 1) {
+ slot_prop[slot].n_in = 0;
+ slot_prop[slot].n_out = 0;
+ slot_prop[slot].op_flags = ops->op_flags & XTENSA_OP_LOAD_STORE;
+
+ opnds = xtensa_opcode_num_operands(isa, opc);
+
+ for (opnd = vopnd = 0; opnd < opnds; ++opnd) {
+ bool visible = xtensa_operand_is_visible(isa, opc, opnd);
+
+ if (xtensa_operand_is_register(isa, opc, opnd)) {
+ xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd);
+ uint32_t v = 0;
+
+ xtensa_operand_get_field(isa, opc, opnd, fmt, slot,
+ dc->slotbuf, &v);
+ xtensa_operand_decode(isa, opc, opnd, &v);
+ opcode_add_resource(slot_prop + slot,
+ encode_resource(RES_REGFILE, rf, v),
+ xtensa_operand_inout(isa, opc, opnd),
+ visible ? vopnd : -1);
+ }
+ if (visible) {
+ ++vopnd;
+ }
+ }
+
+ opnds = xtensa_opcode_num_stateOperands(isa, opc);
+
+ for (opnd = 0; opnd < opnds; ++opnd) {
+ xtensa_state state = xtensa_stateOperand_state(isa, opc, opnd);
+
+ opcode_add_resource(slot_prop + slot,
+ encode_resource(RES_STATE, 0, state),
+ xtensa_stateOperand_inout(isa, opc, opnd),
+ -1);
+ }
+ if (xtensa_opcode_is_branch(isa, opc) ||
+ xtensa_opcode_is_jump(isa, opc) ||
+ xtensa_opcode_is_loop(isa, opc) ||
+ xtensa_opcode_is_call(isa, opc)) {
+ slot_prop[slot].op_flags |= XTENSA_OP_CONTROL_FLOW;
+ }
+
+ qsort(slot_prop[slot].in, slot_prop[slot].n_in,
+ sizeof(slot_prop[slot].in[0]), resource_compare);
+ qsort(slot_prop[slot].out, slot_prop[slot].n_out,
+ sizeof(slot_prop[slot].out[0]), resource_compare);
+ }
+ }
+
+ if (slots > 1) {
+ if (!tsort(slot_prop, ordered, slots, arg_copy, &n_arg_copy)) {
+ qemu_log_mask(LOG_UNIMP,
+ "Circular resource dependencies (pc = %08x)\n",
+ dc->pc);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return;
+ }
+ } else {
+ ordered[0] = slot_prop + 0;
+ }
+
+ if ((op_flags & XTENSA_OP_PRIVILEGED) &&
+ !gen_check_privilege(dc)) {
+ return;
+ }
+
+ if (op_flags & XTENSA_OP_SYSCALL) {
+ gen_exception_cause(dc, SYSCALL_CAUSE);
+ return;
+ }
+
+ if ((op_flags & XTENSA_OP_DEBUG_BREAK) && dc->debug) {
+ gen_debug_exception(dc, debug_cause);
+ return;
+ }
+
+ if (windowed_register && !gen_window_check(dc, windowed_register)) {
+ return;
+ }
+
+ if (op_flags & XTENSA_OP_UNDERFLOW) {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+
+ gen_helper_test_underflow_retw(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ if (op_flags & XTENSA_OP_ALLOCA) {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+
+ gen_helper_movsp(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ if (coprocessor && !gen_check_cpenable(dc, coprocessor)) {
+ return;
+ }
+
+ if (n_arg_copy) {
+ uint32_t resource;
+ void *temp;
+ unsigned j;
+
+ qsort(arg_copy, n_arg_copy, sizeof(*arg_copy), arg_copy_compare);
+ for (i = j = 0; i < n_arg_copy; ++i) {
+ if (i == 0 || arg_copy[i].resource != resource) {
+ resource = arg_copy[i].resource;
+ if (arg_copy[i].arg->num_bits <= 32) {
+ temp = tcg_temp_local_new_i32();
+ tcg_gen_mov_i32(temp, arg_copy[i].arg->in);
+ } else if (arg_copy[i].arg->num_bits <= 64) {
+ temp = tcg_temp_local_new_i64();
+ tcg_gen_mov_i64(temp, arg_copy[i].arg->in);
+ } else {
+ g_assert_not_reached();
+ }
+ arg_copy[i].temp = temp;
+
+ if (i != j) {
+ arg_copy[j] = arg_copy[i];
+ }
+ ++j;
+ }
+ arg_copy[i].arg->in = temp;
+ }
+ n_arg_copy = j;
+ }
+
+ if (op_flags & XTENSA_OP_DIVIDE_BY_ZERO) {
+ for (slot = 0; slot < slots; ++slot) {
+ if (slot_prop[slot].ops->op_flags & XTENSA_OP_DIVIDE_BY_ZERO) {
+ gen_zero_check(dc, slot_prop[slot].arg);
+ }
+ }
+ }
+
+ dc->op_flags = op_flags;
+
+ for (slot = 0; slot < slots; ++slot) {
+ struct slot_prop *pslot = ordered[slot];
+ XtensaOpcodeOps *ops = pslot->ops;
+
+ ops->translate(dc, pslot->arg, ops->par);
+ }
+
+ for (i = 0; i < n_arg_copy; ++i) {
+ if (arg_copy[i].arg->num_bits <= 32) {
+ tcg_temp_free_i32(arg_copy[i].temp);
+ } else if (arg_copy[i].arg->num_bits <= 64) {
+ tcg_temp_free_i64(arg_copy[i].temp);
+ } else {
+ g_assert_not_reached();
+ }
+ }
+
+ if (dc->base.is_jmp == DISAS_NEXT) {
+ gen_postprocess(dc, 0);
+ dc->op_flags = 0;
+ if (op_flags & XTENSA_OP_EXIT_TB_M1) {
+ /* Change in mmu index, memory mapping or tb->flags; exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ } else if (op_flags & XTENSA_OP_EXIT_TB_0) {
+ gen_jumpi_check_loop_end(dc, 0);
+ } else {
+ gen_check_loop_end(dc, 0);
+ }
+ }
+ dc->pc = dc->base.pc_next;
+}
+
+static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
+{
+ uint8_t b0 = cpu_ldub_code(env, dc->pc);
+ return xtensa_op0_insn_len(dc, b0);
+}
+
+static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
+{
+ unsigned i;
+
+ for (i = 0; i < dc->config->nibreak; ++i) {
+ if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
+ env->sregs[IBREAKA + i] == dc->pc) {
+ gen_debug_exception(dc, DEBUGCAUSE_IB);
+ break;
+ }
+ }
+}
+
+static void xtensa_tr_init_disas_context(DisasContextBase *dcbase,
+ CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUXtensaState *env = cpu->env_ptr;
+ uint32_t tb_flags = dc->base.tb->flags;
+
+ dc->config = env->config;
+ dc->pc = dc->base.pc_first;
+ dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK;
+ dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring;
+ dc->lbeg_off = (dc->base.tb->cs_base & XTENSA_CSBASE_LBEG_OFF_MASK) >>
+ XTENSA_CSBASE_LBEG_OFF_SHIFT;
+ dc->lend = (dc->base.tb->cs_base & XTENSA_CSBASE_LEND_MASK) +
+ (dc->base.pc_first & TARGET_PAGE_MASK);
+ dc->debug = tb_flags & XTENSA_TBFLAG_DEBUG;
+ dc->icount = tb_flags & XTENSA_TBFLAG_ICOUNT;
+ dc->cpenable = (tb_flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
+ XTENSA_TBFLAG_CPENABLE_SHIFT;
+ dc->window = ((tb_flags & XTENSA_TBFLAG_WINDOW_MASK) >>
+ XTENSA_TBFLAG_WINDOW_SHIFT);
+ dc->cwoe = tb_flags & XTENSA_TBFLAG_CWOE;
+ dc->callinc = ((tb_flags & XTENSA_TBFLAG_CALLINC_MASK) >>
+ XTENSA_TBFLAG_CALLINC_SHIFT);
+ init_sar_tracker(dc);
+}
+
+static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ if (dc->icount) {
+ dc->next_icount = tcg_temp_local_new_i32();
+ }
+}
+
+static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
+{
+ tcg_gen_insn_start(dcbase->pc_next);
+}
+
+static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+ CPUXtensaState *env = cpu->env_ptr;
+ target_ulong page_start;
+
+ /* These two conditions only apply to the first insn in the TB,
+ but this is the first TranslateOps hook that allows exiting. */
+ if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
+ && (dc->base.tb->flags & XTENSA_TBFLAG_YIELD)) {
+ gen_exception(dc, EXCP_YIELD);
+ dc->base.pc_next = dc->pc + 1;
+ dc->base.is_jmp = DISAS_NORETURN;
+ return;
+ }
+
+ if (dc->icount) {
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_addi_i32(dc->next_icount, cpu_SR[ICOUNT], 1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, dc->next_icount, 0, label);
+ tcg_gen_mov_i32(dc->next_icount, cpu_SR[ICOUNT]);
+ if (dc->debug) {
+ gen_debug_exception(dc, DEBUGCAUSE_IC);
+ }
+ gen_set_label(label);
+ }
+
+ if (dc->debug) {
+ gen_ibreak_check(env, dc);
+ }
+
+ disas_xtensa_insn(env, dc);
+
+ if (dc->icount) {
+ tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
+ }
+
+ /* End the TB if the next insn will cross into the next page. */
+ page_start = dc->base.pc_first & TARGET_PAGE_MASK;
+ if (dc->base.is_jmp == DISAS_NEXT &&
+ (dc->pc - page_start >= TARGET_PAGE_SIZE ||
+ dc->pc - page_start + xtensa_insn_len(env, dc) > TARGET_PAGE_SIZE)) {
+ dc->base.is_jmp = DISAS_TOO_MANY;
+ }
+}
+
+static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
+{
+ DisasContext *dc = container_of(dcbase, DisasContext, base);
+
+ reset_sar_tracker(dc);
+ if (dc->icount) {
+ tcg_temp_free(dc->next_icount);
+ }
+
+ switch (dc->base.is_jmp) {
+ case DISAS_NORETURN:
+ break;
+ case DISAS_TOO_MANY:
+ gen_jumpi(dc, dc->pc, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void xtensa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps xtensa_translator_ops = {
+ .init_disas_context = xtensa_tr_init_disas_context,
+ .tb_start = xtensa_tr_tb_start,
+ .insn_start = xtensa_tr_insn_start,
+ .translate_insn = xtensa_tr_translate_insn,
+ .tb_stop = xtensa_tr_tb_stop,
+ .disas_log = xtensa_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
+{
+ DisasContext dc = {};
+ translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
+}
+
+void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ xtensa_isa isa = env->config->isa;
+ int i, j;
+
+ qemu_fprintf(f, "PC=%08x\n\n", env->pc);
+
+ for (i = j = 0; i < xtensa_isa_num_sysregs(isa); ++i) {
+ const uint32_t *reg =
+ xtensa_sysreg_is_user(isa, i) ? env->uregs : env->sregs;
+ int regno = xtensa_sysreg_number(isa, i);
+
+ if (regno >= 0) {
+ qemu_fprintf(f, "%12s=%08x%c",
+ xtensa_sysreg_name(isa, i),
+ reg[regno],
+ (j++ % 4) == 3 ? '\n' : ' ');
+ }
+ }
+
+ qemu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
+
+ for (i = 0; i < 16; ++i) {
+ qemu_fprintf(f, " A%02d=%08x%c",
+ i, env->regs[i], (i % 4) == 3 ? '\n' : ' ');
+ }
+
+ xtensa_sync_phys_from_window(env);
+ qemu_fprintf(f, "\n");
+
+ for (i = 0; i < env->config->nareg; ++i) {
+ qemu_fprintf(f, "AR%02d=%08x ", i, env->phys_regs[i]);
+ if (i % 4 == 3) {
+ bool ws = (env->sregs[WINDOW_START] & (1 << (i / 4))) != 0;
+ bool cw = env->sregs[WINDOW_BASE] == i / 4;
+
+ qemu_fprintf(f, "%c%c\n", ws ? '<' : ' ', cw ? '=' : ' ');
+ }
+ }
+
+ if ((flags & CPU_DUMP_FPU) &&
+ xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
+ qemu_fprintf(f, "\n");
+
+ for (i = 0; i < 16; ++i) {
+ qemu_fprintf(f, "F%02d=%08x (%-+15.8e)%c", i,
+ float32_val(env->fregs[i].f32[FP_F32_LOW]),
+ *(float *)(env->fregs[i].f32 + FP_F32_LOW),
+ (i % 2) == 1 ? '\n' : ' ');
+ }
+ }
+
+ if ((flags & CPU_DUMP_FPU) &&
+ xtensa_option_enabled(env->config, XTENSA_OPTION_DFP_COPROCESSOR) &&
+ !xtensa_option_enabled(env->config, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
+ qemu_fprintf(f, "\n");
+
+ for (i = 0; i < 16; ++i) {
+ qemu_fprintf(f, "F%02d=%016"PRIx64" (%-+24.16le)%c", i,
+ float64_val(env->fregs[i].f64),
+ *(double *)(&env->fregs[i].f64),
+ (i % 2) == 1 ? '\n' : ' ');
+ }
+ }
+}
+
+void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
+
+static void translate_abs(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_abs_i32(arg[0].out, arg[1].in);
+}
+
+static void translate_add(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_add_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_addi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_addi_i32(arg[0].out, arg[1].in, arg[2].imm);
+}
+
+static void translate_addx(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, arg[1].in, par[0]);
+ tcg_gen_add_i32(arg[0].out, tmp, arg[2].in);
+ tcg_temp_free(tmp);
+}
+
+static void translate_all(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ uint32_t shift = par[1];
+ TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_and_i32(tmp, arg[1].in, mask);
+ if (par[0]) {
+ tcg_gen_addi_i32(tmp, tmp, 1 << arg[1].imm);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, mask);
+ }
+ tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift);
+ tcg_gen_deposit_i32(arg[0].out, arg[0].out,
+ tmp, arg[0].imm, 1);
+ tcg_temp_free(mask);
+ tcg_temp_free(tmp);
+}
+
+static void translate_and(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_and_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_ball(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_and_i32(tmp, arg[0].in, arg[1].in);
+ gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm);
+ tcg_temp_free(tmp);
+}
+
+static void translate_bany(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_and_i32(tmp, arg[0].in, arg[1].in);
+ gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
+ tcg_temp_free(tmp);
+}
+
+static void translate_b(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_brcond(dc, par[0], arg[0].in, arg[1].in, arg[2].imm);
+}
+
+static void translate_bb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifdef TARGET_WORDS_BIGENDIAN
+ TCGv_i32 bit = tcg_const_i32(0x80000000u);
+#else
+ TCGv_i32 bit = tcg_const_i32(0x00000001u);
+#endif
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, arg[1].in, 0x1f);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_shr_i32(bit, bit, tmp);
+#else
+ tcg_gen_shl_i32(bit, bit, tmp);
+#endif
+ tcg_gen_and_i32(tmp, arg[0].in, bit);
+ gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
+ tcg_temp_free(tmp);
+ tcg_temp_free(bit);
+}
+
+static void translate_bbi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm);
+#else
+ tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm);
+#endif
+ gen_brcondi(dc, par[0], tmp, 0, arg[2].imm);
+ tcg_temp_free(tmp);
+}
+
+static void translate_bi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_brcondi(dc, par[0], arg[0].in, arg[1].imm, arg[2].imm);
+}
+
+static void translate_bz(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_brcondi(dc, par[0], arg[0].in, 0, arg[1].imm);
+}
+
+enum {
+ BOOLEAN_AND,
+ BOOLEAN_ANDC,
+ BOOLEAN_OR,
+ BOOLEAN_ORC,
+ BOOLEAN_XOR,
+};
+
+static void translate_boolean(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ static void (* const op[])(TCGv_i32, TCGv_i32, TCGv_i32) = {
+ [BOOLEAN_AND] = tcg_gen_and_i32,
+ [BOOLEAN_ANDC] = tcg_gen_andc_i32,
+ [BOOLEAN_OR] = tcg_gen_or_i32,
+ [BOOLEAN_ORC] = tcg_gen_orc_i32,
+ [BOOLEAN_XOR] = tcg_gen_xor_i32,
+ };
+
+ TCGv_i32 tmp1 = tcg_temp_new_i32();
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+
+ tcg_gen_shri_i32(tmp1, arg[1].in, arg[1].imm);
+ tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm);
+ op[par[0]](tmp1, tmp1, tmp2);
+ tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
+}
+
+static void translate_bp(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm);
+ gen_brcondi(dc, par[0], tmp, 0, arg[1].imm);
+ tcg_temp_free(tmp);
+}
+
+static void translate_call0(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next);
+ gen_jumpi(dc, arg[0].imm, 0);
+}
+
+static void translate_callw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_const_i32(arg[0].imm);
+ gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0));
+ tcg_temp_free(tmp);
+}
+
+static void translate_callx0(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp, arg[0].in);
+ tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void translate_callxw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, arg[0].in);
+ gen_callw_slot(dc, par[0], tmp, -1);
+ tcg_temp_free(tmp);
+}
+
+static void translate_clamps(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm);
+ TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1);
+
+ tcg_gen_smax_i32(tmp1, tmp1, arg[1].in);
+ tcg_gen_smin_i32(arg[0].out, tmp1, tmp2);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
+}
+
+static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ /* TODO: GPIO32 may be a part of coprocessor */
+ tcg_gen_andi_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], ~(1u << arg[0].imm));
+}
+
+static void translate_clrex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+}
+
+static void translate_const16(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 c = tcg_const_i32(arg[1].imm);
+
+ tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16);
+ tcg_temp_free(c);
+}
+
+static void translate_dcache(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ TCGv_i32 res = tcg_temp_new_i32();
+
+ tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
+ tcg_gen_qemu_ld8u(res, addr, dc->cring);
+ tcg_temp_free(addr);
+ tcg_temp_free(res);
+}
+
+static void translate_depbits(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_deposit_i32(arg[1].out, arg[1].in, arg[0].in,
+ arg[2].imm, arg[3].imm);
+}
+
+static void translate_diwbuip(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_addi_i32(arg[0].out, arg[0].in, dc->config->dcache_line_bytes);
+}
+
+static uint32_t test_exceptions_entry(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[0].imm > 3 || !dc->cwoe) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Illegal entry instruction(pc = %08x)\n", dc->pc);
+ return XTENSA_OP_ILL;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t test_overflow_entry(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ return 1 << (dc->callinc * 4);
+}
+
+static void translate_entry(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ TCGv_i32 s = tcg_const_i32(arg[0].imm);
+ TCGv_i32 imm = tcg_const_i32(arg[1].imm);
+ gen_helper_entry(cpu_env, pc, s, imm);
+ tcg_temp_free(imm);
+ tcg_temp_free(s);
+ tcg_temp_free(pc);
+}
+
+static void translate_extui(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ int maskimm = (1 << arg[3].imm) - 1;
+
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm);
+ tcg_gen_andi_i32(arg[0].out, tmp, maskimm);
+ tcg_temp_free(tmp);
+}
+
+static void translate_getex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1);
+ tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1);
+ tcg_gen_mov_i32(arg[0].out, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void translate_icache(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 addr = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(cpu_pc, dc->pc);
+ tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
+ gen_helper_itlb_hit_test(cpu_env, addr);
+ tcg_temp_free(addr);
+#endif
+}
+
+static void translate_itlb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 dtlb = tcg_const_i32(par[0]);
+
+ gen_helper_itlb(cpu_env, arg[0].in, dtlb);
+ tcg_temp_free(dtlb);
+#endif
+}
+
+static void translate_j(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_jumpi(dc, arg[0].imm, 0);
+}
+
+static void translate_jx(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_jump(dc, arg[0].in);
+}
+
+static void translate_l32e(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ MemOp mop;
+
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ mop = gen_load_store_alignment(dc, MO_TEUL, addr);
+ tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, mop);
+ tcg_temp_free(addr);
+}
+
+#ifdef CONFIG_USER_ONLY
+static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
+{
+}
+#else
+static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write)
+{
+ if (!option_enabled(dc, XTENSA_OPTION_MPU)) {
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 write = tcg_const_i32(is_write);
+
+ gen_helper_check_exclusive(cpu_env, tpc, addr, write);
+ tcg_temp_free(tpc);
+ tcg_temp_free(write);
+ }
+}
+#endif
+
+static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ MemOp mop;
+
+ tcg_gen_mov_i32(addr, arg[1].in);
+ mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr);
+ gen_check_exclusive(dc, addr, false);
+ tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, mop);
+ tcg_gen_mov_i32(cpu_exclusive_addr, addr);
+ tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out);
+ tcg_temp_free(addr);
+}
+
+static void translate_ldst(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ MemOp mop;
+
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ mop = gen_load_store_alignment(dc, par[0], addr);
+
+ if (par[2]) {
+ if (par[1]) {
+ tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL);
+ }
+ tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, mop);
+ } else {
+ tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, mop);
+ if (par[1]) {
+ tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL);
+ }
+ }
+ tcg_temp_free(addr);
+}
+
+static void translate_l32r(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp;
+
+ if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) {
+ tmp = tcg_const_i32(arg[1].raw_imm - 1);
+ tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp);
+ } else {
+ tmp = tcg_const_i32(arg[1].imm);
+ }
+ tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring);
+ tcg_temp_free(tmp);
+}
+
+static void translate_loop(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ uint32_t lend = arg[1].imm;
+
+ tcg_gen_subi_i32(cpu_SR[LCOUNT], arg[0].in, 1);
+ tcg_gen_movi_i32(cpu_SR[LBEG], dc->base.pc_next);
+ tcg_gen_movi_i32(cpu_SR[LEND], lend);
+
+ if (par[0] != TCG_COND_NEVER) {
+ TCGLabel *label = gen_new_label();
+ tcg_gen_brcondi_i32(par[0], arg[0].in, 0, label);
+ gen_jumpi(dc, lend, 1);
+ gen_set_label(label);
+ }
+
+ gen_jumpi(dc, dc->base.pc_next, 0);
+}
+
+enum {
+ MAC16_UMUL,
+ MAC16_MUL,
+ MAC16_MULA,
+ MAC16_MULS,
+ MAC16_NONE,
+};
+
+enum {
+ MAC16_LL,
+ MAC16_HL,
+ MAC16_LH,
+ MAC16_HH,
+
+ MAC16_HX = 0x1,
+ MAC16_XH = 0x2,
+};
+
+static void translate_mac16(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ int op = par[0];
+ unsigned half = par[1];
+ uint32_t ld_offset = par[2];
+ unsigned off = ld_offset ? 2 : 0;
+ TCGv_i32 vaddr = tcg_temp_new_i32();
+ TCGv_i32 mem32 = tcg_temp_new_i32();
+
+ if (ld_offset) {
+ MemOp mop;
+
+ tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset);
+ mop = gen_load_store_alignment(dc, MO_TEUL, vaddr);
+ tcg_gen_qemu_ld_tl(mem32, vaddr, dc->cring, mop);
+ }
+ if (op != MAC16_NONE) {
+ TCGv_i32 m1 = gen_mac16_m(arg[off].in,
+ half & MAC16_HX, op == MAC16_UMUL);
+ TCGv_i32 m2 = gen_mac16_m(arg[off + 1].in,
+ half & MAC16_XH, op == MAC16_UMUL);
+
+ if (op == MAC16_MUL || op == MAC16_UMUL) {
+ tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
+ if (op == MAC16_UMUL) {
+ tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
+ } else {
+ tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
+ }
+ } else {
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+
+ tcg_gen_mul_i32(lo, m1, m2);
+ tcg_gen_sari_i32(hi, lo, 31);
+ if (op == MAC16_MULA) {
+ tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
+ cpu_SR[ACCLO], cpu_SR[ACCHI],
+ lo, hi);
+ } else {
+ tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
+ cpu_SR[ACCLO], cpu_SR[ACCHI],
+ lo, hi);
+ }
+ tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+ tcg_temp_free(m1);
+ tcg_temp_free(m2);
+ }
+ if (ld_offset) {
+ tcg_gen_mov_i32(arg[1].out, vaddr);
+ tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32);
+ }
+ tcg_temp_free(vaddr);
+ tcg_temp_free(mem32);
+}
+
+static void translate_memw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+}
+
+static void translate_smin(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_smin_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_umin(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_umin_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_smax(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_smax_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_umax(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_umax_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_mov(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i32(arg[0].out, arg[1].in);
+}
+
+static void translate_movcond(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(par[0], arg[0].out,
+ arg[2].in, zero, arg[1].in, arg[0].in);
+ tcg_temp_free(zero);
+}
+
+static void translate_movi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_movi_i32(arg[0].out, arg[1].imm);
+}
+
+static void translate_movp(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm);
+ tcg_gen_movcond_i32(par[0],
+ arg[0].out, tmp, zero,
+ arg[1].in, arg[0].in);
+ tcg_temp_free(tmp);
+ tcg_temp_free(zero);
+}
+
+static void translate_movsp(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i32(arg[0].out, arg[1].in);
+}
+
+static void translate_mul16(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 v1 = tcg_temp_new_i32();
+ TCGv_i32 v2 = tcg_temp_new_i32();
+
+ if (par[0]) {
+ tcg_gen_ext16s_i32(v1, arg[1].in);
+ tcg_gen_ext16s_i32(v2, arg[2].in);
+ } else {
+ tcg_gen_ext16u_i32(v1, arg[1].in);
+ tcg_gen_ext16u_i32(v2, arg[2].in);
+ }
+ tcg_gen_mul_i32(arg[0].out, v1, v2);
+ tcg_temp_free(v2);
+ tcg_temp_free(v1);
+}
+
+static void translate_mull(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mul_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_mulh(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 lo = tcg_temp_new();
+
+ if (par[0]) {
+ tcg_gen_muls2_i32(lo, arg[0].out, arg[1].in, arg[2].in);
+ } else {
+ tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in);
+ }
+ tcg_temp_free(lo);
+}
+
+static void translate_neg(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_neg_i32(arg[0].out, arg[1].in);
+}
+
+static void translate_nop(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+}
+
+static void translate_nsa(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_clrsb_i32(arg[0].out, arg[1].in);
+}
+
+static void translate_nsau(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_clzi_i32(arg[0].out, arg[1].in, 32);
+}
+
+static void translate_or(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_or_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_ptlb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 dtlb = tcg_const_i32(par[0]);
+
+ tcg_gen_movi_i32(cpu_pc, dc->pc);
+ gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb);
+ tcg_temp_free(dtlb);
+#endif
+}
+
+static void translate_pptlb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_movi_i32(cpu_pc, dc->pc);
+ gen_helper_pptlb(arg[0].out, cpu_env, arg[1].in);
+#endif
+}
+
+static void translate_quos(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGLabel *label1 = gen_new_label();
+ TCGLabel *label2 = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, arg[1].in, 0x80000000,
+ label1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0xffffffff,
+ label1);
+ tcg_gen_movi_i32(arg[0].out,
+ par[0] ? 0x80000000 : 0);
+ tcg_gen_br(label2);
+ gen_set_label(label1);
+ if (par[0]) {
+ tcg_gen_div_i32(arg[0].out,
+ arg[1].in, arg[2].in);
+ } else {
+ tcg_gen_rem_i32(arg[0].out,
+ arg[1].in, arg[2].in);
+ }
+ gen_set_label(label2);
+}
+
+static void translate_quou(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_divu_i32(arg[0].out,
+ arg[1].in, arg[2].in);
+}
+
+static void translate_read_impwire(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ /* TODO: GPIO32 may be a part of coprocessor */
+ tcg_gen_movi_i32(arg[0].out, 0);
+}
+
+static void translate_remu(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_remu_i32(arg[0].out,
+ arg[1].in, arg[2].in);
+}
+
+static void translate_rer(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_rer(arg[0].out, cpu_env, arg[1].in);
+}
+
+static void translate_ret(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_jump(dc, cpu_R[0]);
+}
+
+static uint32_t test_exceptions_retw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (!dc->cwoe) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Illegal retw instruction(pc = %08x)\n", dc->pc);
+ return XTENSA_OP_ILL;
+ } else {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+
+ gen_helper_test_ill_retw(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ return 0;
+ }
+}
+
+static void translate_retw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_const_i32(1);
+ tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
+ tcg_gen_andc_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ tcg_gen_movi_i32(tmp, dc->pc);
+ tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30);
+ gen_helper_retw(cpu_env, cpu_R[0]);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void translate_rfde(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_jump(dc, cpu_SR[dc->config->ndepc ? DEPC : EPC1]);
+}
+
+static void translate_rfe(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
+ gen_jump(dc, cpu_SR[EPC1]);
+}
+
+static void translate_rfi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + arg[0].imm - 2]);
+ gen_jump(dc, cpu_SR[EPC1 + arg[0].imm - 1]);
+}
+
+static void translate_rfw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_const_i32(1);
+
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
+ tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
+
+ if (par[0]) {
+ tcg_gen_andc_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ } else {
+ tcg_gen_or_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ }
+
+ tcg_temp_free(tmp);
+ gen_helper_restore_owb(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1]);
+}
+
+static void translate_rotw(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_addi_i32(cpu_windowbase_next, cpu_SR[WINDOW_BASE], arg[0].imm);
+}
+
+static void translate_rsil(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i32(arg[0].out, cpu_SR[PS]);
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
+ tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], arg[1].imm);
+}
+
+static void translate_rsr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (sr_name[par[0]]) {
+ tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
+ } else {
+ tcg_gen_movi_i32(arg[0].out, 0);
+ }
+}
+
+static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_update_ccount(cpu_env);
+ tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
+#endif
+}
+
+static void translate_rsr_ptevaddr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_shri_i32(tmp, cpu_SR[EXCVADDR], 10);
+ tcg_gen_or_i32(tmp, tmp, cpu_SR[PTEVADDR]);
+ tcg_gen_andi_i32(arg[0].out, tmp, 0xfffffffc);
+ tcg_temp_free(tmp);
+#endif
+}
+
+static void translate_rtlb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ static void (* const helper[])(TCGv_i32 r, TCGv_env env, TCGv_i32 a1,
+ TCGv_i32 a2) = {
+ gen_helper_rtlb0,
+ gen_helper_rtlb1,
+ };
+ TCGv_i32 dtlb = tcg_const_i32(par[0]);
+
+ helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb);
+ tcg_temp_free(dtlb);
+#endif
+}
+
+static void translate_rptlb0(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_rptlb0(arg[0].out, cpu_env, arg[1].in);
+#endif
+}
+
+static void translate_rptlb1(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_rptlb1(arg[0].out, cpu_env, arg[1].in);
+#endif
+}
+
+static void translate_rur(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i32(arg[0].out, cpu_UR[par[0]]);
+}
+
+static void translate_setb_expstate(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ /* TODO: GPIO32 may be a part of coprocessor */
+ tcg_gen_ori_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], 1u << arg[0].imm);
+}
+
+#ifdef CONFIG_USER_ONLY
+static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
+{
+}
+#else
+static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+
+ gen_helper_check_atomctl(cpu_env, tpc, addr);
+ tcg_temp_free(tpc);
+}
+#endif
+
+static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_local_new_i32();
+ TCGv_i32 addr = tcg_temp_local_new_i32();
+ MemOp mop;
+
+ tcg_gen_mov_i32(tmp, arg[0].in);
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr);
+ gen_check_atomctl(dc, addr);
+ tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1],
+ tmp, dc->cring, mop);
+ tcg_temp_free(addr);
+ tcg_temp_free(tmp);
+}
+
+static void translate_s32e(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ MemOp mop;
+
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ mop = gen_load_store_alignment(dc, MO_TEUL, addr);
+ tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, mop);
+ tcg_temp_free(addr);
+}
+
+static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 prev = tcg_temp_new_i32();
+ TCGv_i32 addr = tcg_temp_local_new_i32();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGLabel *label = gen_new_label();
+ MemOp mop;
+
+ tcg_gen_movi_i32(res, 0);
+ tcg_gen_mov_i32(addr, arg[1].in);
+ mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr);
+ tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label);
+ gen_check_exclusive(dc, addr, true);
+ tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val,
+ arg[0].in, dc->cring, mop);
+ tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val);
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val,
+ prev, cpu_exclusive_val, prev, cpu_exclusive_val);
+ tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+ gen_set_label(label);
+ tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1);
+ tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1);
+ tcg_temp_free(prev);
+ tcg_temp_free(addr);
+ tcg_temp_free(res);
+}
+
+static void translate_salt(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_setcond_i32(par[0],
+ arg[0].out,
+ arg[1].in, arg[2].in);
+}
+
+static void translate_sext(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ int shift = 31 - arg[2].imm;
+
+ if (shift == 24) {
+ tcg_gen_ext8s_i32(arg[0].out, arg[1].in);
+ } else if (shift == 16) {
+ tcg_gen_ext16s_i32(arg[0].out, arg[1].in);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, arg[1].in, shift);
+ tcg_gen_sari_i32(arg[0].out, tmp, shift);
+ tcg_temp_free(tmp);
+ }
+}
+
+static uint32_t test_exceptions_simcall(DisasContext *dc,
+ const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifdef CONFIG_USER_ONLY
+ bool ill = true;
+#else
+ /* Between RE.2 and RE.3 simcall opcode's become nop for the hardware. */
+ bool ill = dc->config->hw_version <= 250002 && !semihosting_enabled();
+#endif
+ if (ill || !semihosting_enabled()) {
+ qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
+ }
+ return ill ? XTENSA_OP_ILL : 0;
+}
+
+static void translate_simcall(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ if (semihosting_enabled()) {
+ gen_helper_simcall(cpu_env);
+ }
+#endif
+}
+
+/*
+ * Note: 64 bit ops are used here solely because SAR values
+ * have range 0..63
+ */
+#define gen_shift_reg(cmd, reg) do { \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ tcg_gen_extu_i32_i64(tmp, reg); \
+ tcg_gen_##cmd##_i64(v, v, tmp); \
+ tcg_gen_extrl_i64_i32(arg[0].out, v); \
+ tcg_temp_free_i64(v); \
+ tcg_temp_free_i64(tmp); \
+ } while (0)
+
+#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
+
+static void translate_sll(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (dc->sar_m32_5bit) {
+ tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ TCGv_i32 s = tcg_const_i32(32);
+ tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
+ tcg_gen_andi_i32(s, s, 0x3f);
+ tcg_gen_extu_i32_i64(v, arg[1].in);
+ gen_shift_reg(shl, s);
+ tcg_temp_free(s);
+ }
+}
+
+static void translate_slli(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[2].imm == 32) {
+ qemu_log_mask(LOG_GUEST_ERROR, "slli a%d, a%d, 32 is undefined\n",
+ arg[0].imm, arg[1].imm);
+ }
+ tcg_gen_shli_i32(arg[0].out, arg[1].in, arg[2].imm & 0x1f);
+}
+
+static void translate_sra(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (dc->sar_m32_5bit) {
+ tcg_gen_sar_i32(arg[0].out, arg[1].in, cpu_SR[SAR]);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(v, arg[1].in);
+ gen_shift(sar);
+ }
+}
+
+static void translate_srai(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_sari_i32(arg[0].out, arg[1].in, arg[2].imm);
+}
+
+static void translate_src(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(v, arg[2].in, arg[1].in);
+ gen_shift(shr);
+}
+
+static void translate_srl(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (dc->sar_m32_5bit) {
+ tcg_gen_shr_i32(arg[0].out, arg[1].in, cpu_SR[SAR]);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(v, arg[1].in);
+ gen_shift(shr);
+ }
+}
+
+#undef gen_shift
+#undef gen_shift_reg
+
+static void translate_srli(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_shri_i32(arg[0].out, arg[1].in, arg[2].imm);
+}
+
+static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, arg[0].in, 3);
+ gen_left_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, arg[0].in, 3);
+ gen_right_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void translate_ssai(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_const_i32(arg[0].imm);
+ gen_right_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void translate_ssl(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_left_shift_sar(dc, arg[0].in);
+}
+
+static void translate_ssr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_right_shift_sar(dc, arg[0].in);
+}
+
+static void translate_sub(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_sub_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_subx(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, arg[1].in, par[0]);
+ tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in);
+ tcg_temp_free(tmp);
+}
+
+static void translate_waiti(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_waiti(dc, arg[0].imm);
+#endif
+}
+
+static void translate_wtlb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 dtlb = tcg_const_i32(par[0]);
+
+ gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb);
+ tcg_temp_free(dtlb);
+#endif
+}
+
+static void translate_wptlb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_wptlb(cpu_env, arg[0].in, arg[1].in);
+#endif
+}
+
+static void translate_wer(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_wer(cpu_env, arg[0].in, arg[1].in);
+}
+
+static void translate_wrmsk_expstate(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ /* TODO: GPIO32 may be a part of coprocessor */
+ tcg_gen_and_i32(cpu_UR[EXPSTATE], arg[0].in, arg[1].in);
+}
+
+static void translate_wsr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (sr_name[par[0]]) {
+ tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
+ }
+}
+
+static void translate_wsr_mask(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (sr_name[par[0]]) {
+ tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, par[2]);
+ }
+}
+
+static void translate_wsr_acchi(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_ext8s_i32(cpu_SR[par[0]], arg[0].in);
+}
+
+static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ uint32_t id = par[0] - CCOMPARE;
+ TCGv_i32 tmp = tcg_const_i32(id);
+
+ assert(id < dc->config->nccompare);
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
+ gen_helper_update_ccompare(cpu_env, tmp);
+ tcg_temp_free(tmp);
+#endif
+}
+
+static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_wsr_ccount(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_dbreaka(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ unsigned id = par[0] - DBREAKA;
+ TCGv_i32 tmp = tcg_const_i32(id);
+
+ assert(id < dc->config->ndbreak);
+ gen_helper_wsr_dbreaka(cpu_env, tmp, arg[0].in);
+ tcg_temp_free(tmp);
+#endif
+}
+
+static void translate_wsr_dbreakc(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ unsigned id = par[0] - DBREAKC;
+ TCGv_i32 tmp = tcg_const_i32(id);
+
+ assert(id < dc->config->ndbreak);
+ gen_helper_wsr_dbreakc(cpu_env, tmp, arg[0].in);
+ tcg_temp_free(tmp);
+#endif
+}
+
+static void translate_wsr_ibreaka(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ unsigned id = par[0] - IBREAKA;
+ TCGv_i32 tmp = tcg_const_i32(id);
+
+ assert(id < dc->config->nibreak);
+ gen_helper_wsr_ibreaka(cpu_env, tmp, arg[0].in);
+ tcg_temp_free(tmp);
+#endif
+}
+
+static void translate_wsr_ibreakenable(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_wsr_ibreakenable(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_icount(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ if (dc->icount) {
+ tcg_gen_mov_i32(dc->next_icount, arg[0].in);
+ } else {
+ tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
+ }
+#endif
+}
+
+static void translate_wsr_intclear(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_intclear(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_intset(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_intset(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_memctl(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_wsr_memctl(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_mpuenb(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_wsr_mpuenb(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_ps(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
+ PS_UM | PS_EXCM | PS_INTLEVEL;
+
+ if (option_enabled(dc, XTENSA_OPTION_MMU) ||
+ option_enabled(dc, XTENSA_OPTION_MPU)) {
+ mask |= PS_RING;
+ }
+ tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, mask);
+#endif
+}
+
+static void translate_wsr_rasid(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ gen_helper_wsr_rasid(cpu_env, arg[0].in);
+#endif
+}
+
+static void translate_wsr_sar(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, 0x3f);
+ if (dc->sar_m32_5bit) {
+ tcg_gen_discard_i32(dc->sar_m32);
+ }
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = false;
+}
+
+static void translate_wsr_windowbase(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_mov_i32(cpu_windowbase_next, arg[0].in);
+#endif
+}
+
+static void translate_wsr_windowstart(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in,
+ (1 << dc->config->nareg / 4) - 1);
+#endif
+}
+
+static void translate_wur(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i32(cpu_UR[par[0]], arg[0].in);
+}
+
+static void translate_xor(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_xor_i32(arg[0].out, arg[1].in, arg[2].in);
+}
+
+static void translate_xsr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (sr_name[par[0]]) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, arg[0].in);
+ tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
+ tcg_gen_mov_i32(cpu_SR[par[0]], tmp);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_movi_i32(arg[0].out, 0);
+ }
+}
+
+static void translate_xsr_mask(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (sr_name[par[0]]) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, arg[0].in);
+ tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
+ tcg_gen_andi_i32(cpu_SR[par[0]], tmp, par[2]);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_movi_i32(arg[0].out, 0);
+ }
+}
+
+static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+#ifndef CONFIG_USER_ONLY
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ gen_helper_update_ccount(cpu_env);
+ tcg_gen_mov_i32(tmp, cpu_SR[par[0]]);
+ gen_helper_wsr_ccount(cpu_env, arg[0].in);
+ tcg_gen_mov_i32(arg[0].out, tmp);
+ tcg_temp_free(tmp);
+
+#endif
+}
+
+#define gen_translate_xsr(name) \
+ static void translate_xsr_##name(DisasContext *dc, const OpcodeArg arg[], \
+ const uint32_t par[]) \
+{ \
+ TCGv_i32 tmp = tcg_temp_new_i32(); \
+ \
+ if (sr_name[par[0]]) { \
+ tcg_gen_mov_i32(tmp, cpu_SR[par[0]]); \
+ } else { \
+ tcg_gen_movi_i32(tmp, 0); \
+ } \
+ translate_wsr_##name(dc, arg, par); \
+ tcg_gen_mov_i32(arg[0].out, tmp); \
+ tcg_temp_free(tmp); \
+}
+
+gen_translate_xsr(acchi)
+gen_translate_xsr(ccompare)
+gen_translate_xsr(dbreaka)
+gen_translate_xsr(dbreakc)
+gen_translate_xsr(ibreaka)
+gen_translate_xsr(ibreakenable)
+gen_translate_xsr(icount)
+gen_translate_xsr(memctl)
+gen_translate_xsr(mpuenb)
+gen_translate_xsr(ps)
+gen_translate_xsr(rasid)
+gen_translate_xsr(sar)
+gen_translate_xsr(windowbase)
+gen_translate_xsr(windowstart)
+
+#undef gen_translate_xsr
+
+static const XtensaOpcodeOps core_ops[] = {
+ {
+ .name = "abs",
+ .translate = translate_abs,
+ }, {
+ .name = (const char * const[]) {
+ "add", "add.n", NULL,
+ },
+ .translate = translate_add,
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "addi", "addi.n", NULL,
+ },
+ .translate = translate_addi,
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "addmi",
+ .translate = translate_addi,
+ }, {
+ .name = "addx2",
+ .translate = translate_addx,
+ .par = (const uint32_t[]){1},
+ }, {
+ .name = "addx4",
+ .translate = translate_addx,
+ .par = (const uint32_t[]){2},
+ }, {
+ .name = "addx8",
+ .translate = translate_addx,
+ .par = (const uint32_t[]){3},
+ }, {
+ .name = "all4",
+ .translate = translate_all,
+ .par = (const uint32_t[]){true, 4},
+ }, {
+ .name = "all8",
+ .translate = translate_all,
+ .par = (const uint32_t[]){true, 8},
+ }, {
+ .name = "and",
+ .translate = translate_and,
+ }, {
+ .name = "andb",
+ .translate = translate_boolean,
+ .par = (const uint32_t[]){BOOLEAN_AND},
+ }, {
+ .name = "andbc",
+ .translate = translate_boolean,
+ .par = (const uint32_t[]){BOOLEAN_ANDC},
+ }, {
+ .name = "any4",
+ .translate = translate_all,
+ .par = (const uint32_t[]){false, 4},
+ }, {
+ .name = "any8",
+ .translate = translate_all,
+ .par = (const uint32_t[]){false, 8},
+ }, {
+ .name = (const char * const[]) {
+ "ball", "ball.w15", "ball.w18", NULL,
+ },
+ .translate = translate_ball,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bany", "bany.w15", "bany.w18", NULL,
+ },
+ .translate = translate_bany,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bbc", "bbc.w15", "bbc.w18", NULL,
+ },
+ .translate = translate_bb,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bbci", "bbci.w15", "bbci.w18", NULL,
+ },
+ .translate = translate_bbi,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bbs", "bbs.w15", "bbs.w18", NULL,
+ },
+ .translate = translate_bb,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bbsi", "bbsi.w15", "bbsi.w18", NULL,
+ },
+ .translate = translate_bbi,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "beq", "beq.w15", "beq.w18", NULL,
+ },
+ .translate = translate_b,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "beqi", "beqi.w15", "beqi.w18", NULL,
+ },
+ .translate = translate_bi,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "beqz", "beqz.n", "beqz.w15", "beqz.w18", NULL,
+ },
+ .translate = translate_bz,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "bf",
+ .translate = translate_bp,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ }, {
+ .name = (const char * const[]) {
+ "bge", "bge.w15", "bge.w18", NULL,
+ },
+ .translate = translate_b,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bgei", "bgei.w15", "bgei.w18", NULL,
+ },
+ .translate = translate_bi,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bgeu", "bgeu.w15", "bgeu.w18", NULL,
+ },
+ .translate = translate_b,
+ .par = (const uint32_t[]){TCG_COND_GEU},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bgeui", "bgeui.w15", "bgeui.w18", NULL,
+ },
+ .translate = translate_bi,
+ .par = (const uint32_t[]){TCG_COND_GEU},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bgez", "bgez.w15", "bgez.w18", NULL,
+ },
+ .translate = translate_bz,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "blt", "blt.w15", "blt.w18", NULL,
+ },
+ .translate = translate_b,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "blti", "blti.w15", "blti.w18", NULL,
+ },
+ .translate = translate_bi,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bltu", "bltu.w15", "bltu.w18", NULL,
+ },
+ .translate = translate_b,
+ .par = (const uint32_t[]){TCG_COND_LTU},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bltui", "bltui.w15", "bltui.w18", NULL,
+ },
+ .translate = translate_bi,
+ .par = (const uint32_t[]){TCG_COND_LTU},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bltz", "bltz.w15", "bltz.w18", NULL,
+ },
+ .translate = translate_bz,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bnall", "bnall.w15", "bnall.w18", NULL,
+ },
+ .translate = translate_ball,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bne", "bne.w15", "bne.w18", NULL,
+ },
+ .translate = translate_b,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bnei", "bnei.w15", "bnei.w18", NULL,
+ },
+ .translate = translate_bi,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bnez", "bnez.n", "bnez.w15", "bnez.w18", NULL,
+ },
+ .translate = translate_bz,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "bnone", "bnone.w15", "bnone.w18", NULL,
+ },
+ .translate = translate_bany,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "break",
+ .translate = translate_nop,
+ .par = (const uint32_t[]){DEBUGCAUSE_BI},
+ .op_flags = XTENSA_OP_DEBUG_BREAK,
+ }, {
+ .name = "break.n",
+ .translate = translate_nop,
+ .par = (const uint32_t[]){DEBUGCAUSE_BN},
+ .op_flags = XTENSA_OP_DEBUG_BREAK,
+ }, {
+ .name = "bt",
+ .translate = translate_bp,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ }, {
+ .name = "call0",
+ .translate = translate_call0,
+ }, {
+ .name = "call12",
+ .translate = translate_callw,
+ .par = (const uint32_t[]){3},
+ }, {
+ .name = "call4",
+ .translate = translate_callw,
+ .par = (const uint32_t[]){1},
+ }, {
+ .name = "call8",
+ .translate = translate_callw,
+ .par = (const uint32_t[]){2},
+ }, {
+ .name = "callx0",
+ .translate = translate_callx0,
+ }, {
+ .name = "callx12",
+ .translate = translate_callxw,
+ .par = (const uint32_t[]){3},
+ }, {
+ .name = "callx4",
+ .translate = translate_callxw,
+ .par = (const uint32_t[]){1},
+ }, {
+ .name = "callx8",
+ .translate = translate_callxw,
+ .par = (const uint32_t[]){2},
+ }, {
+ .name = "clamps",
+ .translate = translate_clamps,
+ }, {
+ .name = "clrb_expstate",
+ .translate = translate_clrb_expstate,
+ }, {
+ .name = "clrex",
+ .translate = translate_clrex,
+ }, {
+ .name = "const16",
+ .translate = translate_const16,
+ }, {
+ .name = "depbits",
+ .translate = translate_depbits,
+ }, {
+ .name = "dhi",
+ .translate = translate_dcache,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "dhi.b",
+ .translate = translate_nop,
+ }, {
+ .name = "dhu",
+ .translate = translate_dcache,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "dhwb",
+ .translate = translate_dcache,
+ }, {
+ .name = "dhwb.b",
+ .translate = translate_nop,
+ }, {
+ .name = "dhwbi",
+ .translate = translate_dcache,
+ }, {
+ .name = "dhwbi.b",
+ .translate = translate_nop,
+ }, {
+ .name = "dii",
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "diu",
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "diwb",
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "diwbi",
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "diwbui.p",
+ .translate = translate_diwbuip,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "dpfl",
+ .translate = translate_dcache,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "dpfm.b",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfm.bf",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfr",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfr.b",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfr.bf",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfro",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfw",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfw.b",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfw.bf",
+ .translate = translate_nop,
+ }, {
+ .name = "dpfwo",
+ .translate = translate_nop,
+ }, {
+ .name = "dsync",
+ .translate = translate_nop,
+ }, {
+ .name = "entry",
+ .translate = translate_entry,
+ .test_exceptions = test_exceptions_entry,
+ .test_overflow = test_overflow_entry,
+ .op_flags = XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_SYNC_REGISTER_WINDOW,
+ }, {
+ .name = "esync",
+ .translate = translate_nop,
+ }, {
+ .name = "excw",
+ .translate = translate_nop,
+ }, {
+ .name = "extui",
+ .translate = translate_extui,
+ }, {
+ .name = "extw",
+ .translate = translate_memw,
+ }, {
+ .name = "getex",
+ .translate = translate_getex,
+ }, {
+ .name = "hwwdtlba",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "hwwitlba",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "idtlb",
+ .translate = translate_itlb,
+ .par = (const uint32_t[]){true},
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "ihi",
+ .translate = translate_icache,
+ }, {
+ .name = "ihu",
+ .translate = translate_icache,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "iii",
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "iitlb",
+ .translate = translate_itlb,
+ .par = (const uint32_t[]){false},
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "iiu",
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = (const char * const[]) {
+ "ill", "ill.n", NULL,
+ },
+ .op_flags = XTENSA_OP_ILL | XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "ipf",
+ .translate = translate_nop,
+ }, {
+ .name = "ipfl",
+ .translate = translate_icache,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "isync",
+ .translate = translate_nop,
+ }, {
+ .name = "j",
+ .translate = translate_j,
+ }, {
+ .name = "jx",
+ .translate = translate_jx,
+ }, {
+ .name = "l16si",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TESW, false, false},
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "l16ui",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TEUW, false, false},
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "l32ai",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TEUL | MO_ALIGN, true, false},
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "l32e",
+ .translate = translate_l32e,
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_LOAD,
+ }, {
+ .name = "l32ex",
+ .translate = translate_l32ex,
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = (const char * const[]) {
+ "l32i", "l32i.n", NULL,
+ },
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TEUL, false, false},
+ .op_flags = XTENSA_OP_NAME_ARRAY | XTENSA_OP_LOAD,
+ }, {
+ .name = "l32r",
+ .translate = translate_l32r,
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "l8ui",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_UB, false, false},
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_NONE, 0, -4},
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_NONE, 0, 4},
+ .op_flags = XTENSA_OP_LOAD,
+ }, {
+ .name = "ldpte",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = (const char * const[]) {
+ "loop", "loop.w15", NULL,
+ },
+ .translate = translate_loop,
+ .par = (const uint32_t[]){TCG_COND_NEVER},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "loopgtz", "loopgtz.w15", NULL,
+ },
+ .translate = translate_loop,
+ .par = (const uint32_t[]){TCG_COND_GT},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "loopnez", "loopnez.w15", NULL,
+ },
+ .translate = translate_loop,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "max",
+ .translate = translate_smax,
+ }, {
+ .name = "maxu",
+ .translate = translate_umax,
+ }, {
+ .name = "memw",
+ .translate = translate_memw,
+ }, {
+ .name = "min",
+ .translate = translate_smin,
+ }, {
+ .name = "minu",
+ .translate = translate_umin,
+ }, {
+ .name = (const char * const[]) {
+ "mov", "mov.n", NULL,
+ },
+ .translate = translate_mov,
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "moveqz",
+ .translate = translate_movcond,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ }, {
+ .name = "movf",
+ .translate = translate_movp,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ }, {
+ .name = "movgez",
+ .translate = translate_movcond,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ }, {
+ .name = "movi",
+ .translate = translate_movi,
+ }, {
+ .name = "movi.n",
+ .translate = translate_movi,
+ }, {
+ .name = "movltz",
+ .translate = translate_movcond,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ }, {
+ .name = "movnez",
+ .translate = translate_movcond,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ }, {
+ .name = "movsp",
+ .translate = translate_movsp,
+ .op_flags = XTENSA_OP_ALLOCA,
+ }, {
+ .name = "movt",
+ .translate = translate_movp,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ }, {
+ .name = "mul.aa.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0},
+ }, {
+ .name = "mul.aa.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0},
+ }, {
+ .name = "mul.aa.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0},
+ }, {
+ .name = "mul.aa.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0},
+ }, {
+ .name = "mul.ad.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0},
+ }, {
+ .name = "mul.ad.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0},
+ }, {
+ .name = "mul.ad.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0},
+ }, {
+ .name = "mul.ad.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0},
+ }, {
+ .name = "mul.da.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0},
+ }, {
+ .name = "mul.da.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0},
+ }, {
+ .name = "mul.da.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0},
+ }, {
+ .name = "mul.da.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0},
+ }, {
+ .name = "mul.dd.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0},
+ }, {
+ .name = "mul.dd.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0},
+ }, {
+ .name = "mul.dd.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0},
+ }, {
+ .name = "mul.dd.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0},
+ }, {
+ .name = "mul16s",
+ .translate = translate_mul16,
+ .par = (const uint32_t[]){true},
+ }, {
+ .name = "mul16u",
+ .translate = translate_mul16,
+ .par = (const uint32_t[]){false},
+ }, {
+ .name = "mula.aa.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0},
+ }, {
+ .name = "mula.aa.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0},
+ }, {
+ .name = "mula.aa.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0},
+ }, {
+ .name = "mula.aa.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0},
+ }, {
+ .name = "mula.ad.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0},
+ }, {
+ .name = "mula.ad.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0},
+ }, {
+ .name = "mula.ad.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0},
+ }, {
+ .name = "mula.ad.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0},
+ }, {
+ .name = "mula.da.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0},
+ }, {
+ .name = "mula.da.hh.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, -4},
+ }, {
+ .name = "mula.da.hh.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 4},
+ }, {
+ .name = "mula.da.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0},
+ }, {
+ .name = "mula.da.hl.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, -4},
+ }, {
+ .name = "mula.da.hl.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 4},
+ }, {
+ .name = "mula.da.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0},
+ }, {
+ .name = "mula.da.lh.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, -4},
+ }, {
+ .name = "mula.da.lh.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 4},
+ }, {
+ .name = "mula.da.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0},
+ }, {
+ .name = "mula.da.ll.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, -4},
+ }, {
+ .name = "mula.da.ll.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 4},
+ }, {
+ .name = "mula.dd.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0},
+ }, {
+ .name = "mula.dd.hh.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, -4},
+ }, {
+ .name = "mula.dd.hh.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 4},
+ }, {
+ .name = "mula.dd.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0},
+ }, {
+ .name = "mula.dd.hl.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, -4},
+ }, {
+ .name = "mula.dd.hl.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 4},
+ }, {
+ .name = "mula.dd.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0},
+ }, {
+ .name = "mula.dd.lh.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, -4},
+ }, {
+ .name = "mula.dd.lh.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 4},
+ }, {
+ .name = "mula.dd.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0},
+ }, {
+ .name = "mula.dd.ll.lddec",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, -4},
+ }, {
+ .name = "mula.dd.ll.ldinc",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 4},
+ }, {
+ .name = "mull",
+ .translate = translate_mull,
+ }, {
+ .name = "muls.aa.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0},
+ }, {
+ .name = "muls.aa.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0},
+ }, {
+ .name = "muls.aa.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0},
+ }, {
+ .name = "muls.aa.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0},
+ }, {
+ .name = "muls.ad.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0},
+ }, {
+ .name = "muls.ad.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0},
+ }, {
+ .name = "muls.ad.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0},
+ }, {
+ .name = "muls.ad.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0},
+ }, {
+ .name = "muls.da.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0},
+ }, {
+ .name = "muls.da.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0},
+ }, {
+ .name = "muls.da.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0},
+ }, {
+ .name = "muls.da.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0},
+ }, {
+ .name = "muls.dd.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0},
+ }, {
+ .name = "muls.dd.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0},
+ }, {
+ .name = "muls.dd.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0},
+ }, {
+ .name = "muls.dd.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0},
+ }, {
+ .name = "mulsh",
+ .translate = translate_mulh,
+ .par = (const uint32_t[]){true},
+ }, {
+ .name = "muluh",
+ .translate = translate_mulh,
+ .par = (const uint32_t[]){false},
+ }, {
+ .name = "neg",
+ .translate = translate_neg,
+ }, {
+ .name = (const char * const[]) {
+ "nop", "nop.n", NULL,
+ },
+ .translate = translate_nop,
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "nsa",
+ .translate = translate_nsa,
+ }, {
+ .name = "nsau",
+ .translate = translate_nsau,
+ }, {
+ .name = "or",
+ .translate = translate_or,
+ }, {
+ .name = "orb",
+ .translate = translate_boolean,
+ .par = (const uint32_t[]){BOOLEAN_OR},
+ }, {
+ .name = "orbc",
+ .translate = translate_boolean,
+ .par = (const uint32_t[]){BOOLEAN_ORC},
+ }, {
+ .name = "pdtlb",
+ .translate = translate_ptlb,
+ .par = (const uint32_t[]){true},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "pfend.a",
+ .translate = translate_nop,
+ }, {
+ .name = "pfend.o",
+ .translate = translate_nop,
+ }, {
+ .name = "pfnxt.f",
+ .translate = translate_nop,
+ }, {
+ .name = "pfwait.a",
+ .translate = translate_nop,
+ }, {
+ .name = "pfwait.r",
+ .translate = translate_nop,
+ }, {
+ .name = "pitlb",
+ .translate = translate_ptlb,
+ .par = (const uint32_t[]){false},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "pptlb",
+ .translate = translate_pptlb,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "quos",
+ .translate = translate_quos,
+ .par = (const uint32_t[]){true},
+ .op_flags = XTENSA_OP_DIVIDE_BY_ZERO,
+ }, {
+ .name = "quou",
+ .translate = translate_quou,
+ .op_flags = XTENSA_OP_DIVIDE_BY_ZERO,
+ }, {
+ .name = "rdtlb0",
+ .translate = translate_rtlb,
+ .par = (const uint32_t[]){true, 0},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rdtlb1",
+ .translate = translate_rtlb,
+ .par = (const uint32_t[]){true, 1},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "read_impwire",
+ .translate = translate_read_impwire,
+ }, {
+ .name = "rems",
+ .translate = translate_quos,
+ .par = (const uint32_t[]){false},
+ .op_flags = XTENSA_OP_DIVIDE_BY_ZERO,
+ }, {
+ .name = "remu",
+ .translate = translate_remu,
+ .op_flags = XTENSA_OP_DIVIDE_BY_ZERO,
+ }, {
+ .name = "rer",
+ .translate = translate_rer,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = (const char * const[]) {
+ "ret", "ret.n", NULL,
+ },
+ .translate = translate_ret,
+ .op_flags = XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = (const char * const[]) {
+ "retw", "retw.n", NULL,
+ },
+ .translate = translate_retw,
+ .test_exceptions = test_exceptions_retw,
+ .op_flags = XTENSA_OP_UNDERFLOW | XTENSA_OP_NAME_ARRAY,
+ }, {
+ .name = "rfdd",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "rfde",
+ .translate = translate_rfde,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rfdo",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "rfe",
+ .translate = translate_rfe,
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "rfi",
+ .translate = translate_rfi,
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "rfwo",
+ .translate = translate_rfw,
+ .par = (const uint32_t[]){true},
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "rfwu",
+ .translate = translate_rfw,
+ .par = (const uint32_t[]){false},
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "ritlb0",
+ .translate = translate_rtlb,
+ .par = (const uint32_t[]){false, 0},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "ritlb1",
+ .translate = translate_rtlb,
+ .par = (const uint32_t[]){false, 1},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rptlb0",
+ .translate = translate_rptlb0,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rptlb1",
+ .translate = translate_rptlb1,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rotw",
+ .translate = translate_rotw,
+ .op_flags = XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_SYNC_REGISTER_WINDOW,
+ }, {
+ .name = "rsil",
+ .translate = translate_rsil,
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "rsr.176",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){176},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.208",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){208},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.acchi",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ACCHI,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "rsr.acclo",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ACCLO,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "rsr.atomctl",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ATOMCTL,
+ XTENSA_OPTION_ATOMCTL,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.br",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ BR,
+ XTENSA_OPTION_BOOLEAN,
+ },
+ }, {
+ .name = "rsr.cacheadrdis",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CACHEADRDIS,
+ XTENSA_OPTION_MPU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.cacheattr",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CACHEATTR,
+ XTENSA_OPTION_CACHEATTR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ccompare0",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ccompare1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE + 1,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ccompare2",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE + 2,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ccount",
+ .translate = translate_rsr_ccount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CCOUNT,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "rsr.configid0",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){CONFIGID0},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.configid1",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){CONFIGID1},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.cpenable",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CPENABLE,
+ XTENSA_OPTION_COPROCESSOR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.dbreaka0",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKA,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.dbreaka1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKA + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.dbreakc0",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKC,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.dbreakc1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKC + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ddr",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DDR,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.debugcause",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DEBUGCAUSE,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.depc",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DEPC,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.dtlbcfg",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DTLBCFG,
+ XTENSA_OPTION_MMU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EPC1,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc2",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc3",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc4",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc5",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc6",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.epc7",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 6,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eps2",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eps3",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eps4",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eps5",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eps6",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eps7",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.eraccess",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){ERACCESS},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.exccause",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCCAUSE,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCSAVE1,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave2",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave3",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave4",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave5",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave6",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excsave7",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 6,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.excvaddr",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCVADDR,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ibreaka0",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_ibreak,
+ .par = (const uint32_t[]){
+ IBREAKA,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ibreaka1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_ibreak,
+ .par = (const uint32_t[]){
+ IBREAKA + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ibreakenable",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ IBREAKENABLE,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.icount",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ICOUNT,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.icountlevel",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ICOUNTLEVEL,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.intclear",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTCLEAR,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.intenable",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTENABLE,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.interrupt",
+ .translate = translate_rsr_ccount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTSET,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "rsr.intset",
+ .translate = translate_rsr_ccount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTSET,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "rsr.itlbcfg",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ITLBCFG,
+ XTENSA_OPTION_MMU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.lbeg",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LBEG,
+ XTENSA_OPTION_LOOP,
+ },
+ }, {
+ .name = "rsr.lcount",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LCOUNT,
+ XTENSA_OPTION_LOOP,
+ },
+ }, {
+ .name = "rsr.lend",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LEND,
+ XTENSA_OPTION_LOOP,
+ },
+ }, {
+ .name = "rsr.litbase",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LITBASE,
+ XTENSA_OPTION_EXTENDED_L32R,
+ },
+ }, {
+ .name = "rsr.m0",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "rsr.m1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 1,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "rsr.m2",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 2,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "rsr.m3",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 3,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "rsr.memctl",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){MEMCTL},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mecr",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MECR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mepc",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MEPC,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.meps",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MEPS,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mesave",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESAVE,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mesr",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mevaddr",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.misc0",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.misc1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 1,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.misc2",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 2,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.misc3",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 3,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mpucfg",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MPUCFG,
+ XTENSA_OPTION_MPU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.mpuenb",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MPUENB,
+ XTENSA_OPTION_MPU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.prefctl",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){PREFCTL},
+ }, {
+ .name = "rsr.prid",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PRID,
+ XTENSA_OPTION_PROCESSOR_ID,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ps",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PS,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.ptevaddr",
+ .translate = translate_rsr_ptevaddr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PTEVADDR,
+ XTENSA_OPTION_MMU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.rasid",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ RASID,
+ XTENSA_OPTION_MMU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.sar",
+ .translate = translate_rsr,
+ .par = (const uint32_t[]){SAR},
+ }, {
+ .name = "rsr.scompare1",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ SCOMPARE1,
+ XTENSA_OPTION_CONDITIONAL_STORE,
+ },
+ }, {
+ .name = "rsr.vecbase",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ VECBASE,
+ XTENSA_OPTION_RELOCATABLE_VECTOR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.windowbase",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ WINDOW_BASE,
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsr.windowstart",
+ .translate = translate_rsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ WINDOW_START,
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "rsync",
+ .translate = translate_nop,
+ }, {
+ .name = "rur.expstate",
+ .translate = translate_rur,
+ .par = (const uint32_t[]){EXPSTATE},
+ }, {
+ .name = "rur.threadptr",
+ .translate = translate_rur,
+ .par = (const uint32_t[]){THREADPTR},
+ }, {
+ .name = "s16i",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TEUW, false, true},
+ .op_flags = XTENSA_OP_STORE,
+ }, {
+ .name = "s32c1i",
+ .translate = translate_s32c1i,
+ .op_flags = XTENSA_OP_LOAD | XTENSA_OP_STORE,
+ }, {
+ .name = "s32e",
+ .translate = translate_s32e,
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_STORE,
+ }, {
+ .name = "s32ex",
+ .translate = translate_s32ex,
+ .op_flags = XTENSA_OP_LOAD | XTENSA_OP_STORE,
+ }, {
+ .name = (const char * const[]) {
+ "s32i", "s32i.n", "s32nb", NULL,
+ },
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TEUL, false, true},
+ .op_flags = XTENSA_OP_NAME_ARRAY | XTENSA_OP_STORE,
+ }, {
+ .name = "s32ri",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_TEUL | MO_ALIGN, true, true},
+ .op_flags = XTENSA_OP_STORE,
+ }, {
+ .name = "s8i",
+ .translate = translate_ldst,
+ .par = (const uint32_t[]){MO_UB, false, true},
+ .op_flags = XTENSA_OP_STORE,
+ }, {
+ .name = "salt",
+ .translate = translate_salt,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ }, {
+ .name = "saltu",
+ .translate = translate_salt,
+ .par = (const uint32_t[]){TCG_COND_LTU},
+ }, {
+ .name = "setb_expstate",
+ .translate = translate_setb_expstate,
+ }, {
+ .name = "sext",
+ .translate = translate_sext,
+ }, {
+ .name = "simcall",
+ .translate = translate_simcall,
+ .test_exceptions = test_exceptions_simcall,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "sll",
+ .translate = translate_sll,
+ }, {
+ .name = "slli",
+ .translate = translate_slli,
+ }, {
+ .name = "sra",
+ .translate = translate_sra,
+ }, {
+ .name = "srai",
+ .translate = translate_srai,
+ }, {
+ .name = "src",
+ .translate = translate_src,
+ }, {
+ .name = "srl",
+ .translate = translate_srl,
+ }, {
+ .name = "srli",
+ .translate = translate_srli,
+ }, {
+ .name = "ssa8b",
+ .translate = translate_ssa8b,
+ }, {
+ .name = "ssa8l",
+ .translate = translate_ssa8l,
+ }, {
+ .name = "ssai",
+ .translate = translate_ssai,
+ }, {
+ .name = "ssl",
+ .translate = translate_ssl,
+ }, {
+ .name = "ssr",
+ .translate = translate_ssr,
+ }, {
+ .name = "sub",
+ .translate = translate_sub,
+ }, {
+ .name = "subx2",
+ .translate = translate_subx,
+ .par = (const uint32_t[]){1},
+ }, {
+ .name = "subx4",
+ .translate = translate_subx,
+ .par = (const uint32_t[]){2},
+ }, {
+ .name = "subx8",
+ .translate = translate_subx,
+ .par = (const uint32_t[]){3},
+ }, {
+ .name = "syscall",
+ .op_flags = XTENSA_OP_SYSCALL,
+ }, {
+ .name = "umul.aa.hh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_UMUL, MAC16_HH, 0},
+ }, {
+ .name = "umul.aa.hl",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_UMUL, MAC16_HL, 0},
+ }, {
+ .name = "umul.aa.lh",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_UMUL, MAC16_LH, 0},
+ }, {
+ .name = "umul.aa.ll",
+ .translate = translate_mac16,
+ .par = (const uint32_t[]){MAC16_UMUL, MAC16_LL, 0},
+ }, {
+ .name = "waiti",
+ .translate = translate_waiti,
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wdtlb",
+ .translate = translate_wtlb,
+ .par = (const uint32_t[]){true},
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wer",
+ .translate = translate_wer,
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "witlb",
+ .translate = translate_wtlb,
+ .par = (const uint32_t[]){false},
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wptlb",
+ .translate = translate_wptlb,
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wrmsk_expstate",
+ .translate = translate_wrmsk_expstate,
+ }, {
+ .name = "wsr.176",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "wsr.208",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "wsr.acchi",
+ .translate = translate_wsr_acchi,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ACCHI,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "wsr.acclo",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ACCLO,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "wsr.atomctl",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ATOMCTL,
+ XTENSA_OPTION_ATOMCTL,
+ 0x3f,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.br",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ BR,
+ XTENSA_OPTION_BOOLEAN,
+ 0xffff,
+ },
+ }, {
+ .name = "wsr.cacheadrdis",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CACHEADRDIS,
+ XTENSA_OPTION_MPU,
+ 0xff,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.cacheattr",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CACHEATTR,
+ XTENSA_OPTION_CACHEATTR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.ccompare0",
+ .translate = translate_wsr_ccompare,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.ccompare1",
+ .translate = translate_wsr_ccompare,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE + 1,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.ccompare2",
+ .translate = translate_wsr_ccompare,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE + 2,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.ccount",
+ .translate = translate_wsr_ccount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CCOUNT,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.configid0",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "wsr.configid1",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "wsr.cpenable",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CPENABLE,
+ XTENSA_OPTION_COPROCESSOR,
+ 0xff,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.dbreaka0",
+ .translate = translate_wsr_dbreaka,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKA,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.dbreaka1",
+ .translate = translate_wsr_dbreaka,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKA + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.dbreakc0",
+ .translate = translate_wsr_dbreakc,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKC,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.dbreakc1",
+ .translate = translate_wsr_dbreakc,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKC + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.ddr",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DDR,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.debugcause",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "wsr.depc",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DEPC,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.dtlbcfg",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DTLBCFG,
+ XTENSA_OPTION_MMU,
+ 0x01130000,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc1",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EPC1,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc2",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc3",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc4",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc5",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc6",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.epc7",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 6,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eps2",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eps3",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eps4",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eps5",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eps6",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eps7",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.eraccess",
+ .translate = translate_wsr_mask,
+ .par = (const uint32_t[]){
+ ERACCESS,
+ 0,
+ 0xffff,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.exccause",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCCAUSE,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave1",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCSAVE1,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave2",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave3",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave4",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave5",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave6",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excsave7",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 6,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.excvaddr",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCVADDR,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.ibreaka0",
+ .translate = translate_wsr_ibreaka,
+ .test_exceptions = test_exceptions_ibreak,
+ .par = (const uint32_t[]){
+ IBREAKA,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.ibreaka1",
+ .translate = translate_wsr_ibreaka,
+ .test_exceptions = test_exceptions_ibreak,
+ .par = (const uint32_t[]){
+ IBREAKA + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.ibreakenable",
+ .translate = translate_wsr_ibreakenable,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ IBREAKENABLE,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "wsr.icount",
+ .translate = translate_wsr_icount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ICOUNT,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.icountlevel",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ICOUNTLEVEL,
+ XTENSA_OPTION_DEBUG,
+ 0xf,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.intclear",
+ .translate = translate_wsr_intclear,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTCLEAR,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "wsr.intenable",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTENABLE,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "wsr.interrupt",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTSET,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "wsr.intset",
+ .translate = translate_wsr_intset,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTSET,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "wsr.itlbcfg",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ITLBCFG,
+ XTENSA_OPTION_MMU,
+ 0x01130000,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.lbeg",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LBEG,
+ XTENSA_OPTION_LOOP,
+ },
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.lcount",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LCOUNT,
+ XTENSA_OPTION_LOOP,
+ },
+ }, {
+ .name = "wsr.lend",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LEND,
+ XTENSA_OPTION_LOOP,
+ },
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.litbase",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LITBASE,
+ XTENSA_OPTION_EXTENDED_L32R,
+ 0xfffff001,
+ },
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.m0",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "wsr.m1",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 1,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "wsr.m2",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 2,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "wsr.m3",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 3,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "wsr.memctl",
+ .translate = translate_wsr_memctl,
+ .par = (const uint32_t[]){MEMCTL},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mecr",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MECR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mepc",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MEPC,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.meps",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MEPS,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mesave",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESAVE,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mesr",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mevaddr",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.misc0",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.misc1",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 1,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.misc2",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 2,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.misc3",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 3,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mmid",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MMID,
+ XTENSA_OPTION_TRACE_PORT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.mpuenb",
+ .translate = translate_wsr_mpuenb,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MPUENB,
+ XTENSA_OPTION_MPU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.prefctl",
+ .translate = translate_wsr,
+ .par = (const uint32_t[]){PREFCTL},
+ }, {
+ .name = "wsr.prid",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "wsr.ps",
+ .translate = translate_wsr_ps,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PS,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "wsr.ptevaddr",
+ .translate = translate_wsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PTEVADDR,
+ XTENSA_OPTION_MMU,
+ 0xffc00000,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.rasid",
+ .translate = translate_wsr_rasid,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ RASID,
+ XTENSA_OPTION_MMU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wsr.sar",
+ .translate = translate_wsr_sar,
+ .par = (const uint32_t[]){SAR},
+ }, {
+ .name = "wsr.scompare1",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ SCOMPARE1,
+ XTENSA_OPTION_CONDITIONAL_STORE,
+ },
+ }, {
+ .name = "wsr.vecbase",
+ .translate = translate_wsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ VECBASE,
+ XTENSA_OPTION_RELOCATABLE_VECTOR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "wsr.windowbase",
+ .translate = translate_wsr_windowbase,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ WINDOW_BASE,
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_SYNC_REGISTER_WINDOW,
+ }, {
+ .name = "wsr.windowstart",
+ .translate = translate_wsr_windowstart,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ WINDOW_START,
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "wur.expstate",
+ .translate = translate_wur,
+ .par = (const uint32_t[]){EXPSTATE},
+ }, {
+ .name = "wur.threadptr",
+ .translate = translate_wur,
+ .par = (const uint32_t[]){THREADPTR},
+ }, {
+ .name = "xor",
+ .translate = translate_xor,
+ }, {
+ .name = "xorb",
+ .translate = translate_boolean,
+ .par = (const uint32_t[]){BOOLEAN_XOR},
+ }, {
+ .name = "xsr.176",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.208",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.acchi",
+ .translate = translate_xsr_acchi,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ACCHI,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "xsr.acclo",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ACCLO,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "xsr.atomctl",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ATOMCTL,
+ XTENSA_OPTION_ATOMCTL,
+ 0x3f,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.br",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ BR,
+ XTENSA_OPTION_BOOLEAN,
+ 0xffff,
+ },
+ }, {
+ .name = "xsr.cacheadrdis",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CACHEADRDIS,
+ XTENSA_OPTION_MPU,
+ 0xff,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.cacheattr",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CACHEATTR,
+ XTENSA_OPTION_CACHEATTR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.ccompare0",
+ .translate = translate_xsr_ccompare,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.ccompare1",
+ .translate = translate_xsr_ccompare,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE + 1,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.ccompare2",
+ .translate = translate_xsr_ccompare,
+ .test_exceptions = test_exceptions_ccompare,
+ .par = (const uint32_t[]){
+ CCOMPARE + 2,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.ccount",
+ .translate = translate_xsr_ccount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CCOUNT,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.configid0",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.configid1",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.cpenable",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ CPENABLE,
+ XTENSA_OPTION_COPROCESSOR,
+ 0xff,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.dbreaka0",
+ .translate = translate_xsr_dbreaka,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKA,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.dbreaka1",
+ .translate = translate_xsr_dbreaka,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKA + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.dbreakc0",
+ .translate = translate_xsr_dbreakc,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKC,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.dbreakc1",
+ .translate = translate_xsr_dbreakc,
+ .test_exceptions = test_exceptions_dbreak,
+ .par = (const uint32_t[]){
+ DBREAKC + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.ddr",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DDR,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.debugcause",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.depc",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DEPC,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.dtlbcfg",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ DTLBCFG,
+ XTENSA_OPTION_MMU,
+ 0x01130000,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc1",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EPC1,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc2",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc3",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc4",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc5",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc6",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.epc7",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPC1 + 6,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eps2",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eps3",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eps4",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eps5",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eps6",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eps7",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EPS2 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.eraccess",
+ .translate = translate_xsr_mask,
+ .par = (const uint32_t[]){
+ ERACCESS,
+ 0,
+ 0xffff,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.exccause",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCCAUSE,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave1",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCSAVE1,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave2",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 1,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave3",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 2,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave4",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 3,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave5",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 4,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave6",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 5,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excsave7",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_hpi,
+ .par = (const uint32_t[]){
+ EXCSAVE1 + 6,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.excvaddr",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ EXCVADDR,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.ibreaka0",
+ .translate = translate_xsr_ibreaka,
+ .test_exceptions = test_exceptions_ibreak,
+ .par = (const uint32_t[]){
+ IBREAKA,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.ibreaka1",
+ .translate = translate_xsr_ibreaka,
+ .test_exceptions = test_exceptions_ibreak,
+ .par = (const uint32_t[]){
+ IBREAKA + 1,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.ibreakenable",
+ .translate = translate_xsr_ibreakenable,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ IBREAKENABLE,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0,
+ }, {
+ .name = "xsr.icount",
+ .translate = translate_xsr_icount,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ICOUNT,
+ XTENSA_OPTION_DEBUG,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.icountlevel",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ICOUNTLEVEL,
+ XTENSA_OPTION_DEBUG,
+ 0xf,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.intclear",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.intenable",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ INTENABLE,
+ XTENSA_OPTION_INTERRUPT,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_0 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "xsr.interrupt",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.intset",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.itlbcfg",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ ITLBCFG,
+ XTENSA_OPTION_MMU,
+ 0x01130000,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.lbeg",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LBEG,
+ XTENSA_OPTION_LOOP,
+ },
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.lcount",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LCOUNT,
+ XTENSA_OPTION_LOOP,
+ },
+ }, {
+ .name = "xsr.lend",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LEND,
+ XTENSA_OPTION_LOOP,
+ },
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.litbase",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ LITBASE,
+ XTENSA_OPTION_EXTENDED_L32R,
+ 0xfffff001,
+ },
+ .op_flags = XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.m0",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "xsr.m1",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 1,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "xsr.m2",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 2,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "xsr.m3",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MR + 3,
+ XTENSA_OPTION_MAC16,
+ },
+ }, {
+ .name = "xsr.memctl",
+ .translate = translate_xsr_memctl,
+ .par = (const uint32_t[]){MEMCTL},
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.mecr",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MECR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.mepc",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MEPC,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.meps",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MEPS,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.mesave",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESAVE,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.mesr",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.mevaddr",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MESR,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.misc0",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.misc1",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 1,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.misc2",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 2,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.misc3",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MISC + 3,
+ XTENSA_OPTION_MISC_SR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.mpuenb",
+ .translate = translate_xsr_mpuenb,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ MPUENB,
+ XTENSA_OPTION_MPU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.prefctl",
+ .translate = translate_xsr,
+ .par = (const uint32_t[]){PREFCTL},
+ }, {
+ .name = "xsr.prid",
+ .op_flags = XTENSA_OP_ILL,
+ }, {
+ .name = "xsr.ps",
+ .translate = translate_xsr_ps,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PS,
+ XTENSA_OPTION_EXCEPTION,
+ },
+ .op_flags =
+ XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_CHECK_INTERRUPTS,
+ }, {
+ .name = "xsr.ptevaddr",
+ .translate = translate_xsr_mask,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ PTEVADDR,
+ XTENSA_OPTION_MMU,
+ 0xffc00000,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.rasid",
+ .translate = translate_xsr_rasid,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ RASID,
+ XTENSA_OPTION_MMU,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ }, {
+ .name = "xsr.sar",
+ .translate = translate_xsr_sar,
+ .par = (const uint32_t[]){SAR},
+ }, {
+ .name = "xsr.scompare1",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ SCOMPARE1,
+ XTENSA_OPTION_CONDITIONAL_STORE,
+ },
+ }, {
+ .name = "xsr.vecbase",
+ .translate = translate_xsr,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ VECBASE,
+ XTENSA_OPTION_RELOCATABLE_VECTOR,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED,
+ }, {
+ .name = "xsr.windowbase",
+ .translate = translate_xsr_windowbase,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ WINDOW_BASE,
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED |
+ XTENSA_OP_EXIT_TB_M1 |
+ XTENSA_OP_SYNC_REGISTER_WINDOW,
+ }, {
+ .name = "xsr.windowstart",
+ .translate = translate_xsr_windowstart,
+ .test_exceptions = test_exceptions_sr,
+ .par = (const uint32_t[]){
+ WINDOW_START,
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ },
+ .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1,
+ },
+};
+
+const XtensaOpcodeTranslators xtensa_core_opcodes = {
+ .num_opcodes = ARRAY_SIZE(core_ops),
+ .opcode = core_ops,
+};
+
+
+static inline void get_f32_o1_i3(const OpcodeArg *arg, OpcodeArg *arg32,
+ int o0, int i0, int i1, int i2)
+{
+ if ((i0 >= 0 && arg[i0].num_bits == 64) ||
+ (o0 >= 0 && arg[o0].num_bits == 64)) {
+ if (o0 >= 0) {
+ arg32[o0].out = tcg_temp_new_i32();
+ }
+ if (i0 >= 0) {
+ arg32[i0].in = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(arg32[i0].in, arg[i0].in);
+ }
+ if (i1 >= 0) {
+ arg32[i1].in = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(arg32[i1].in, arg[i1].in);
+ }
+ if (i2 >= 0) {
+ arg32[i2].in = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(arg32[i2].in, arg[i2].in);
+ }
+ } else {
+ if (o0 >= 0) {
+ arg32[o0].out = arg[o0].out;
+ }
+ if (i0 >= 0) {
+ arg32[i0].in = arg[i0].in;
+ }
+ if (i1 >= 0) {
+ arg32[i1].in = arg[i1].in;
+ }
+ if (i2 >= 0) {
+ arg32[i2].in = arg[i2].in;
+ }
+ }
+}
+
+static inline void put_f32_o1_i3(const OpcodeArg *arg, const OpcodeArg *arg32,
+ int o0, int i0, int i1, int i2)
+{
+ if ((i0 >= 0 && arg[i0].num_bits == 64) ||
+ (o0 >= 0 && arg[o0].num_bits == 64)) {
+ if (o0 >= 0) {
+ tcg_gen_extu_i32_i64(arg[o0].out, arg32[o0].out);
+ tcg_temp_free_i32(arg32[o0].out);
+ }
+ if (i0 >= 0) {
+ tcg_temp_free_i32(arg32[i0].in);
+ }
+ if (i1 >= 0) {
+ tcg_temp_free_i32(arg32[i1].in);
+ }
+ if (i2 >= 0) {
+ tcg_temp_free_i32(arg32[i2].in);
+ }
+ }
+}
+
+static inline void get_f32_o1_i2(const OpcodeArg *arg, OpcodeArg *arg32,
+ int o0, int i0, int i1)
+{
+ get_f32_o1_i3(arg, arg32, o0, i0, i1, -1);
+}
+
+static inline void put_f32_o1_i2(const OpcodeArg *arg, const OpcodeArg *arg32,
+ int o0, int i0, int i1)
+{
+ put_f32_o1_i3(arg, arg32, o0, i0, i1, -1);
+}
+
+static inline void get_f32_o1_i1(const OpcodeArg *arg, OpcodeArg *arg32,
+ int o0, int i0)
+{
+ get_f32_o1_i2(arg, arg32, o0, i0, -1);
+}
+
+static inline void put_f32_o1_i1(const OpcodeArg *arg, const OpcodeArg *arg32,
+ int o0, int i0)
+{
+ put_f32_o1_i2(arg, arg32, o0, i0, -1);
+}
+
+static inline void get_f32_o1(const OpcodeArg *arg, OpcodeArg *arg32,
+ int o0)
+{
+ get_f32_o1_i1(arg, arg32, o0, -1);
+}
+
+static inline void put_f32_o1(const OpcodeArg *arg, const OpcodeArg *arg32,
+ int o0)
+{
+ put_f32_o1_i1(arg, arg32, o0, -1);
+}
+
+static inline void get_f32_i2(const OpcodeArg *arg, OpcodeArg *arg32,
+ int i0, int i1)
+{
+ get_f32_o1_i2(arg, arg32, -1, i0, i1);
+}
+
+static inline void put_f32_i2(const OpcodeArg *arg, const OpcodeArg *arg32,
+ int i0, int i1)
+{
+ put_f32_o1_i2(arg, arg32, -1, i0, i1);
+}
+
+static inline void get_f32_i1(const OpcodeArg *arg, OpcodeArg *arg32,
+ int i0)
+{
+ get_f32_i2(arg, arg32, i0, -1);
+}
+
+static inline void put_f32_i1(const OpcodeArg *arg, const OpcodeArg *arg32,
+ int i0)
+{
+ put_f32_i2(arg, arg32, i0, -1);
+}
+
+
+static void translate_abs_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_abs_d(arg[0].out, arg[1].in);
+}
+
+static void translate_abs_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ OpcodeArg arg32[2];
+
+ get_f32_o1_i1(arg, arg32, 0, 1);
+ gen_helper_abs_s(arg32[0].out, arg32[1].in);
+ put_f32_o1_i1(arg, arg32, 0, 1);
+}
+
+static void translate_fpu2k_add_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_fpu2k_add_s(arg[0].out, cpu_env,
+ arg[1].in, arg[2].in);
+}
+
+enum {
+ COMPARE_UN,
+ COMPARE_OEQ,
+ COMPARE_UEQ,
+ COMPARE_OLT,
+ COMPARE_ULT,
+ COMPARE_OLE,
+ COMPARE_ULE,
+};
+
+static void translate_compare_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ static void (* const helper[])(TCGv_i32 res, TCGv_env env,
+ TCGv_i64 s, TCGv_i64 t) = {
+ [COMPARE_UN] = gen_helper_un_d,
+ [COMPARE_OEQ] = gen_helper_oeq_d,
+ [COMPARE_UEQ] = gen_helper_ueq_d,
+ [COMPARE_OLT] = gen_helper_olt_d,
+ [COMPARE_ULT] = gen_helper_ult_d,
+ [COMPARE_OLE] = gen_helper_ole_d,
+ [COMPARE_ULE] = gen_helper_ule_d,
+ };
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 res = tcg_temp_new_i32();
+ TCGv_i32 set_br = tcg_temp_new_i32();
+ TCGv_i32 clr_br = tcg_temp_new_i32();
+
+ tcg_gen_ori_i32(set_br, arg[0].in, 1 << arg[0].imm);
+ tcg_gen_andi_i32(clr_br, arg[0].in, ~(1 << arg[0].imm));
+
+ helper[par[0]](res, cpu_env, arg[1].in, arg[2].in);
+ tcg_gen_movcond_i32(TCG_COND_NE,
+ arg[0].out, res, zero,
+ set_br, clr_br);
+ tcg_temp_free(zero);
+ tcg_temp_free(res);
+ tcg_temp_free(set_br);
+ tcg_temp_free(clr_br);
+}
+
+static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ static void (* const helper[])(TCGv_i32 res, TCGv_env env,
+ TCGv_i32 s, TCGv_i32 t) = {
+ [COMPARE_UN] = gen_helper_un_s,
+ [COMPARE_OEQ] = gen_helper_oeq_s,
+ [COMPARE_UEQ] = gen_helper_ueq_s,
+ [COMPARE_OLT] = gen_helper_olt_s,
+ [COMPARE_ULT] = gen_helper_ult_s,
+ [COMPARE_OLE] = gen_helper_ole_s,
+ [COMPARE_ULE] = gen_helper_ule_s,
+ };
+ OpcodeArg arg32[3];
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 res = tcg_temp_new_i32();
+ TCGv_i32 set_br = tcg_temp_new_i32();
+ TCGv_i32 clr_br = tcg_temp_new_i32();
+
+ tcg_gen_ori_i32(set_br, arg[0].in, 1 << arg[0].imm);
+ tcg_gen_andi_i32(clr_br, arg[0].in, ~(1 << arg[0].imm));
+
+ get_f32_i2(arg, arg32, 1, 2);
+ helper[par[0]](res, cpu_env, arg32[1].in, arg32[2].in);
+ tcg_gen_movcond_i32(TCG_COND_NE,
+ arg[0].out, res, zero,
+ set_br, clr_br);
+ put_f32_i2(arg, arg32, 1, 2);
+ tcg_temp_free(zero);
+ tcg_temp_free(res);
+ tcg_temp_free(set_br);
+ tcg_temp_free(clr_br);
+}
+
+static void translate_const_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ static const uint64_t v[] = {
+ UINT64_C(0x0000000000000000),
+ UINT64_C(0x3ff0000000000000),
+ UINT64_C(0x4000000000000000),
+ UINT64_C(0x3fe0000000000000),
+ };
+
+ tcg_gen_movi_i64(arg[0].out, v[arg[1].imm % ARRAY_SIZE(v)]);
+ if (arg[1].imm >= ARRAY_SIZE(v)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "const.d f%d, #%d, immediate value is reserved\n",
+ arg[0].imm, arg[1].imm);
+ }
+}
+
+static void translate_const_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ static const uint32_t v[] = {
+ 0x00000000,
+ 0x3f800000,
+ 0x40000000,
+ 0x3f000000,
+ };
+
+ if (arg[0].num_bits == 32) {
+ tcg_gen_movi_i32(arg[0].out, v[arg[1].imm % ARRAY_SIZE(v)]);
+ } else {
+ tcg_gen_movi_i64(arg[0].out, v[arg[1].imm % ARRAY_SIZE(v)]);
+ }
+ if (arg[1].imm >= ARRAY_SIZE(v)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "const.s f%d, #%d, immediate value is reserved\n",
+ arg[0].imm, arg[1].imm);
+ }
+}
+
+static void translate_float_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 scale = tcg_const_i32(-arg[2].imm);
+
+ if (par[0]) {
+ gen_helper_uitof_d(arg[0].out, cpu_env, arg[1].in, scale);
+ } else {
+ gen_helper_itof_d(arg[0].out, cpu_env, arg[1].in, scale);
+ }
+ tcg_temp_free(scale);
+}
+
+static void translate_float_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 scale = tcg_const_i32(-arg[2].imm);
+ OpcodeArg arg32[1];
+
+ get_f32_o1(arg, arg32, 0);
+ if (par[0]) {
+ gen_helper_uitof_s(arg32[0].out, cpu_env, arg[1].in, scale);
+ } else {
+ gen_helper_itof_s(arg32[0].out, cpu_env, arg[1].in, scale);
+ }
+ put_f32_o1(arg, arg32, 0);
+ tcg_temp_free(scale);
+}
+
+static void translate_ftoi_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 rounding_mode = tcg_const_i32(par[0]);
+ TCGv_i32 scale = tcg_const_i32(arg[2].imm);
+
+ if (par[1]) {
+ gen_helper_ftoui_d(arg[0].out, cpu_env, arg[1].in,
+ rounding_mode, scale);
+ } else {
+ gen_helper_ftoi_d(arg[0].out, cpu_env, arg[1].in,
+ rounding_mode, scale);
+ }
+ tcg_temp_free(rounding_mode);
+ tcg_temp_free(scale);
+}
+
+static void translate_ftoi_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 rounding_mode = tcg_const_i32(par[0]);
+ TCGv_i32 scale = tcg_const_i32(arg[2].imm);
+ OpcodeArg arg32[2];
+
+ get_f32_i1(arg, arg32, 1);
+ if (par[1]) {
+ gen_helper_ftoui_s(arg[0].out, cpu_env, arg32[1].in,
+ rounding_mode, scale);
+ } else {
+ gen_helper_ftoi_s(arg[0].out, cpu_env, arg32[1].in,
+ rounding_mode, scale);
+ }
+ put_f32_i1(arg, arg32, 1);
+ tcg_temp_free(rounding_mode);
+ tcg_temp_free(scale);
+}
+
+static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ MemOp mop;
+
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ mop = gen_load_store_alignment(dc, MO_TEUL, addr);
+ if (par[0]) {
+ tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, mop);
+ } else {
+ tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, mop);
+ }
+ if (par[1]) {
+ tcg_gen_mov_i32(arg[1].out, addr);
+ }
+ tcg_temp_free(addr);
+}
+
+static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr = tcg_temp_new_i32();
+ MemOp mop;
+
+ tcg_gen_add_i32(addr, arg[1].in, arg[2].in);
+ mop = gen_load_store_alignment(dc, MO_TEUL, addr);
+ if (par[0]) {
+ tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, mop);
+ } else {
+ tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, mop);
+ }
+ if (par[1]) {
+ tcg_gen_mov_i32(arg[1].out, addr);
+ }
+ tcg_temp_free(addr);
+}
+
+static void translate_fpu2k_madd_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_fpu2k_madd_s(arg[0].out, cpu_env,
+ arg[0].in, arg[1].in, arg[2].in);
+}
+
+static void translate_mov_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_mov_i64(arg[0].out, arg[1].in);
+}
+
+static void translate_mov_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[0].num_bits == 32) {
+ tcg_gen_mov_i32(arg[0].out, arg[1].in);
+ } else {
+ tcg_gen_mov_i64(arg[0].out, arg[1].in);
+ }
+}
+
+static void translate_movcond_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i64 arg2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(arg2, arg[2].in);
+ tcg_gen_movcond_i64(par[0], arg[0].out,
+ arg2, zero,
+ arg[1].in, arg[0].in);
+ tcg_temp_free_i64(zero);
+ tcg_temp_free_i64(arg2);
+}
+
+static void translate_movcond_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[0].num_bits == 32) {
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(par[0], arg[0].out,
+ arg[2].in, zero,
+ arg[1].in, arg[0].in);
+ tcg_temp_free(zero);
+ } else {
+ translate_movcond_d(dc, arg, par);
+ }
+}
+
+static void translate_movp_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i64 zero = tcg_const_i64(0);
+ TCGv_i32 tmp1 = tcg_temp_new_i32();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i32(tmp1, arg[2].in, 1 << arg[2].imm);
+ tcg_gen_extu_i32_i64(tmp2, tmp1);
+ tcg_gen_movcond_i64(par[0],
+ arg[0].out, tmp2, zero,
+ arg[1].in, arg[0].in);
+ tcg_temp_free_i64(zero);
+ tcg_temp_free_i32(tmp1);
+ tcg_temp_free_i64(tmp2);
+}
+
+static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[0].num_bits == 32) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm);
+ tcg_gen_movcond_i32(par[0],
+ arg[0].out, tmp, zero,
+ arg[1].in, arg[0].in);
+ tcg_temp_free(tmp);
+ tcg_temp_free(zero);
+ } else {
+ translate_movp_d(dc, arg, par);
+ }
+}
+
+static void translate_fpu2k_mul_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_fpu2k_mul_s(arg[0].out, cpu_env,
+ arg[1].in, arg[2].in);
+}
+
+static void translate_fpu2k_msub_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_fpu2k_msub_s(arg[0].out, cpu_env,
+ arg[0].in, arg[1].in, arg[2].in);
+}
+
+static void translate_neg_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_neg_d(arg[0].out, arg[1].in);
+}
+
+static void translate_neg_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ OpcodeArg arg32[2];
+
+ get_f32_o1_i1(arg, arg32, 0, 1);
+ gen_helper_neg_s(arg32[0].out, arg32[1].in);
+ put_f32_o1_i1(arg, arg32, 0, 1);
+}
+
+static void translate_rfr_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_extrh_i64_i32(arg[0].out, arg[1].in);
+}
+
+static void translate_rfr_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[1].num_bits == 32) {
+ tcg_gen_mov_i32(arg[0].out, arg[1].in);
+ } else {
+ tcg_gen_extrl_i64_i32(arg[0].out, arg[1].in);
+ }
+}
+
+static void translate_fpu2k_sub_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_fpu2k_sub_s(arg[0].out, cpu_env,
+ arg[1].in, arg[2].in);
+}
+
+static void translate_wfr_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_concat_i32_i64(arg[0].out, arg[2].in, arg[1].in);
+}
+
+static void translate_wfr_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (arg[0].num_bits == 32) {
+ tcg_gen_mov_i32(arg[0].out, arg[1].in);
+ } else {
+ tcg_gen_ext_i32_i64(arg[0].out, arg[1].in);
+ }
+}
+
+static void translate_wur_fpu2k_fcr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_wur_fpu2k_fcr(cpu_env, arg[0].in);
+}
+
+static void translate_wur_fpu2k_fsr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ tcg_gen_andi_i32(cpu_UR[par[0]], arg[0].in, 0xffffff80);
+}
+
+static const XtensaOpcodeOps fpu2000_ops[] = {
+ {
+ .name = "abs.s",
+ .translate = translate_abs_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "add.s",
+ .translate = translate_fpu2k_add_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ceil.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_up, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "float.s",
+ .translate = translate_float_s,
+ .par = (const uint32_t[]){false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "floor.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_down, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsi",
+ .translate = translate_ldsti,
+ .par = (const uint32_t[]){false, false},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsiu",
+ .translate = translate_ldsti,
+ .par = (const uint32_t[]){false, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsx",
+ .translate = translate_ldstx,
+ .par = (const uint32_t[]){false, false},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsxu",
+ .translate = translate_ldstx,
+ .par = (const uint32_t[]){false, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "madd.s",
+ .translate = translate_fpu2k_madd_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mov.s",
+ .translate = translate_mov_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "moveqz.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movf.s",
+ .translate = translate_movp_s,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movgez.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movltz.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movnez.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movt.s",
+ .translate = translate_movp_s,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "msub.s",
+ .translate = translate_fpu2k_msub_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mul.s",
+ .translate = translate_fpu2k_mul_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "neg.s",
+ .translate = translate_neg_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "oeq.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_OEQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ole.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_OLE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "olt.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_OLT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "rfr",
+ .translate = translate_rfr_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "round.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_nearest_even, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "rur.fcr",
+ .translate = translate_rur,
+ .par = (const uint32_t[]){FCR},
+ .coprocessor = 0x1,
+ }, {
+ .name = "rur.fsr",
+ .translate = translate_rur,
+ .par = (const uint32_t[]){FSR},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssi",
+ .translate = translate_ldsti,
+ .par = (const uint32_t[]){true, false},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssiu",
+ .translate = translate_ldsti,
+ .par = (const uint32_t[]){true, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssx",
+ .translate = translate_ldstx,
+ .par = (const uint32_t[]){true, false},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssxu",
+ .translate = translate_ldstx,
+ .par = (const uint32_t[]){true, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sub.s",
+ .translate = translate_fpu2k_sub_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "trunc.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_to_zero, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ueq.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_UEQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ufloat.s",
+ .translate = translate_float_s,
+ .par = (const uint32_t[]){true},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ule.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_ULE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ult.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_ULT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "un.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_UN},
+ .coprocessor = 0x1,
+ }, {
+ .name = "utrunc.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_to_zero, true},
+ .coprocessor = 0x1,
+ }, {
+ .name = "wfr",
+ .translate = translate_wfr_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "wur.fcr",
+ .translate = translate_wur_fpu2k_fcr,
+ .par = (const uint32_t[]){FCR},
+ .coprocessor = 0x1,
+ }, {
+ .name = "wur.fsr",
+ .translate = translate_wur_fpu2k_fsr,
+ .par = (const uint32_t[]){FSR},
+ .coprocessor = 0x1,
+ },
+};
+
+const XtensaOpcodeTranslators xtensa_fpu2000_opcodes = {
+ .num_opcodes = ARRAY_SIZE(fpu2000_ops),
+ .opcode = fpu2000_ops,
+};
+
+static void translate_add_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_add_d(arg[0].out, cpu_env, arg[1].in, arg[2].in);
+}
+
+static void translate_add_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
+ gen_helper_fpu2k_add_s(arg[0].out, cpu_env,
+ arg[1].in, arg[2].in);
+ } else {
+ OpcodeArg arg32[3];
+
+ get_f32_o1_i2(arg, arg32, 0, 1, 2);
+ gen_helper_add_s(arg32[0].out, cpu_env, arg32[1].in, arg32[2].in);
+ put_f32_o1_i2(arg, arg32, 0, 1, 2);
+ }
+}
+
+static void translate_cvtd_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 v = tcg_temp_new_i32();
+
+ tcg_gen_extrl_i64_i32(v, arg[1].in);
+ gen_helper_cvtd_s(arg[0].out, cpu_env, v);
+ tcg_temp_free_i32(v);
+}
+
+static void translate_cvts_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 v = tcg_temp_new_i32();
+
+ gen_helper_cvts_d(v, cpu_env, arg[1].in);
+ tcg_gen_extu_i32_i64(arg[0].out, v);
+ tcg_temp_free_i32(v);
+}
+
+static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr;
+ MemOp mop;
+
+ if (par[1]) {
+ addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ } else {
+ addr = arg[1].in;
+ }
+ mop = gen_load_store_alignment(dc, MO_TEQ, addr);
+ if (par[0]) {
+ tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(arg[0].out, addr, dc->cring, mop);
+ }
+ if (par[2]) {
+ if (par[1]) {
+ tcg_gen_mov_i32(arg[1].out, addr);
+ } else {
+ tcg_gen_addi_i32(arg[1].out, arg[1].in, arg[2].imm);
+ }
+ }
+ if (par[1]) {
+ tcg_temp_free(addr);
+ }
+}
+
+static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr;
+ OpcodeArg arg32[1];
+ MemOp mop;
+
+ if (par[1]) {
+ addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
+ } else {
+ addr = arg[1].in;
+ }
+ mop = gen_load_store_alignment(dc, MO_TEUL, addr);
+ if (par[0]) {
+ get_f32_i1(arg, arg32, 0);
+ tcg_gen_qemu_st_tl(arg32[0].in, addr, dc->cring, mop);
+ put_f32_i1(arg, arg32, 0);
+ } else {
+ get_f32_o1(arg, arg32, 0);
+ tcg_gen_qemu_ld_tl(arg32[0].out, addr, dc->cring, mop);
+ put_f32_o1(arg, arg32, 0);
+ }
+ if (par[2]) {
+ if (par[1]) {
+ tcg_gen_mov_i32(arg[1].out, addr);
+ } else {
+ tcg_gen_addi_i32(arg[1].out, arg[1].in, arg[2].imm);
+ }
+ }
+ if (par[1]) {
+ tcg_temp_free(addr);
+ }
+}
+
+static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr;
+ MemOp mop;
+
+ if (par[1]) {
+ addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, arg[1].in, arg[2].in);
+ } else {
+ addr = arg[1].in;
+ }
+ mop = gen_load_store_alignment(dc, MO_TEQ, addr);
+ if (par[0]) {
+ tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(arg[0].out, addr, dc->cring, mop);
+ }
+ if (par[2]) {
+ if (par[1]) {
+ tcg_gen_mov_i32(arg[1].out, addr);
+ } else {
+ tcg_gen_add_i32(arg[1].out, arg[1].in, arg[2].in);
+ }
+ }
+ if (par[1]) {
+ tcg_temp_free(addr);
+ }
+}
+
+static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ TCGv_i32 addr;
+ OpcodeArg arg32[1];
+ MemOp mop;
+
+ if (par[1]) {
+ addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, arg[1].in, arg[2].in);
+ } else {
+ addr = arg[1].in;
+ }
+ mop = gen_load_store_alignment(dc, MO_TEUL, addr);
+ if (par[0]) {
+ get_f32_i1(arg, arg32, 0);
+ tcg_gen_qemu_st_tl(arg32[0].in, addr, dc->cring, mop);
+ put_f32_i1(arg, arg32, 0);
+ } else {
+ get_f32_o1(arg, arg32, 0);
+ tcg_gen_qemu_ld_tl(arg32[0].out, addr, dc->cring, mop);
+ put_f32_o1(arg, arg32, 0);
+ }
+ if (par[2]) {
+ if (par[1]) {
+ tcg_gen_mov_i32(arg[1].out, addr);
+ } else {
+ tcg_gen_add_i32(arg[1].out, arg[1].in, arg[2].in);
+ }
+ }
+ if (par[1]) {
+ tcg_temp_free(addr);
+ }
+}
+
+static void translate_madd_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_madd_d(arg[0].out, cpu_env,
+ arg[0].in, arg[1].in, arg[2].in);
+}
+
+static void translate_madd_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
+ gen_helper_fpu2k_madd_s(arg[0].out, cpu_env,
+ arg[0].in, arg[1].in, arg[2].in);
+ } else {
+ OpcodeArg arg32[3];
+
+ get_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
+ gen_helper_madd_s(arg32[0].out, cpu_env,
+ arg32[0].in, arg32[1].in, arg32[2].in);
+ put_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
+ }
+}
+
+static void translate_mul_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_mul_d(arg[0].out, cpu_env, arg[1].in, arg[2].in);
+}
+
+static void translate_mul_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
+ gen_helper_fpu2k_mul_s(arg[0].out, cpu_env,
+ arg[1].in, arg[2].in);
+ } else {
+ OpcodeArg arg32[3];
+
+ get_f32_o1_i2(arg, arg32, 0, 1, 2);
+ gen_helper_mul_s(arg32[0].out, cpu_env, arg32[1].in, arg32[2].in);
+ put_f32_o1_i2(arg, arg32, 0, 1, 2);
+ }
+}
+
+static void translate_msub_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_msub_d(arg[0].out, cpu_env,
+ arg[0].in, arg[1].in, arg[2].in);
+}
+
+static void translate_msub_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
+ gen_helper_fpu2k_msub_s(arg[0].out, cpu_env,
+ arg[0].in, arg[1].in, arg[2].in);
+ } else {
+ OpcodeArg arg32[3];
+
+ get_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
+ gen_helper_msub_s(arg32[0].out, cpu_env,
+ arg32[0].in, arg32[1].in, arg32[2].in);
+ put_f32_o1_i3(arg, arg32, 0, 0, 1, 2);
+ }
+}
+
+static void translate_sub_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_sub_d(arg[0].out, cpu_env, arg[1].in, arg[2].in);
+}
+
+static void translate_sub_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ if (option_enabled(dc, XTENSA_OPTION_DFPU_SINGLE_ONLY)) {
+ gen_helper_fpu2k_sub_s(arg[0].out, cpu_env,
+ arg[1].in, arg[2].in);
+ } else {
+ OpcodeArg arg32[3];
+
+ get_f32_o1_i2(arg, arg32, 0, 1, 2);
+ gen_helper_sub_s(arg32[0].out, cpu_env, arg32[1].in, arg32[2].in);
+ put_f32_o1_i2(arg, arg32, 0, 1, 2);
+ }
+}
+
+static void translate_mkdadj_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_mkdadj_d(arg[0].out, cpu_env, arg[0].in, arg[1].in);
+}
+
+static void translate_mkdadj_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ OpcodeArg arg32[2];
+
+ get_f32_o1_i2(arg, arg32, 0, 0, 1);
+ gen_helper_mkdadj_s(arg32[0].out, cpu_env, arg32[0].in, arg32[1].in);
+ put_f32_o1_i2(arg, arg32, 0, 0, 1);
+}
+
+static void translate_mksadj_d(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_mksadj_d(arg[0].out, cpu_env, arg[1].in);
+}
+
+static void translate_mksadj_s(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ OpcodeArg arg32[2];
+
+ get_f32_o1_i1(arg, arg32, 0, 1);
+ gen_helper_mksadj_s(arg32[0].out, cpu_env, arg32[1].in);
+ put_f32_o1_i1(arg, arg32, 0, 1);
+}
+
+static void translate_wur_fpu_fcr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_wur_fpu_fcr(cpu_env, arg[0].in);
+}
+
+static void translate_rur_fpu_fsr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_rur_fpu_fsr(arg[0].out, cpu_env);
+}
+
+static void translate_wur_fpu_fsr(DisasContext *dc, const OpcodeArg arg[],
+ const uint32_t par[])
+{
+ gen_helper_wur_fpu_fsr(cpu_env, arg[0].in);
+}
+
+static const XtensaOpcodeOps fpu_ops[] = {
+ {
+ .name = "abs.d",
+ .translate = translate_abs_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "abs.s",
+ .translate = translate_abs_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "add.d",
+ .translate = translate_add_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "add.s",
+ .translate = translate_add_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "addexp.d",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "addexp.s",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "addexpm.d",
+ .translate = translate_mov_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "addexpm.s",
+ .translate = translate_mov_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ceil.d",
+ .translate = translate_ftoi_d,
+ .par = (const uint32_t[]){float_round_up, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ceil.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_up, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "const.d",
+ .translate = translate_const_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "const.s",
+ .translate = translate_const_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "cvtd.s",
+ .translate = translate_cvtd_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "cvts.d",
+ .translate = translate_cvts_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "div0.d",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "div0.s",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "divn.d",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "divn.s",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "float.d",
+ .translate = translate_float_d,
+ .par = (const uint32_t[]){false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "float.s",
+ .translate = translate_float_s,
+ .par = (const uint32_t[]){false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "floor.d",
+ .translate = translate_ftoi_d,
+ .par = (const uint32_t[]){float_round_down, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "floor.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_down, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ldi",
+ .translate = translate_ldsti_d,
+ .par = (const uint32_t[]){false, true, false},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ldip",
+ .translate = translate_ldsti_d,
+ .par = (const uint32_t[]){false, false, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ldiu",
+ .translate = translate_ldsti_d,
+ .par = (const uint32_t[]){false, true, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ldx",
+ .translate = translate_ldstx_d,
+ .par = (const uint32_t[]){false, true, false},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ldxp",
+ .translate = translate_ldstx_d,
+ .par = (const uint32_t[]){false, false, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ldxu",
+ .translate = translate_ldstx_d,
+ .par = (const uint32_t[]){false, true, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsi",
+ .translate = translate_ldsti_s,
+ .par = (const uint32_t[]){false, true, false},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsip",
+ .translate = translate_ldsti_s,
+ .par = (const uint32_t[]){false, false, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsiu",
+ .translate = translate_ldsti_s,
+ .par = (const uint32_t[]){false, true, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsx",
+ .translate = translate_ldstx_s,
+ .par = (const uint32_t[]){false, true, false},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsxp",
+ .translate = translate_ldstx_s,
+ .par = (const uint32_t[]){false, false, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "lsxu",
+ .translate = translate_ldstx_s,
+ .par = (const uint32_t[]){false, true, true},
+ .op_flags = XTENSA_OP_LOAD,
+ .coprocessor = 0x1,
+ }, {
+ .name = "madd.d",
+ .translate = translate_madd_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "madd.s",
+ .translate = translate_madd_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "maddn.d",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "maddn.s",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mkdadj.d",
+ .translate = translate_mkdadj_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mkdadj.s",
+ .translate = translate_mkdadj_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mksadj.d",
+ .translate = translate_mksadj_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mksadj.s",
+ .translate = translate_mksadj_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mov.d",
+ .translate = translate_mov_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mov.s",
+ .translate = translate_mov_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "moveqz.d",
+ .translate = translate_movcond_d,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "moveqz.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movf.d",
+ .translate = translate_movp_d,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movf.s",
+ .translate = translate_movp_s,
+ .par = (const uint32_t[]){TCG_COND_EQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movgez.d",
+ .translate = translate_movcond_d,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movgez.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_GE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movltz.d",
+ .translate = translate_movcond_d,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movltz.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_LT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movnez.d",
+ .translate = translate_movcond_d,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movnez.s",
+ .translate = translate_movcond_s,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movt.d",
+ .translate = translate_movp_d,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "movt.s",
+ .translate = translate_movp_s,
+ .par = (const uint32_t[]){TCG_COND_NE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "msub.d",
+ .translate = translate_msub_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "msub.s",
+ .translate = translate_msub_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mul.d",
+ .translate = translate_mul_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "mul.s",
+ .translate = translate_mul_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "neg.d",
+ .translate = translate_neg_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "neg.s",
+ .translate = translate_neg_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "nexp01.d",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "nexp01.s",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "oeq.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_OEQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "oeq.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_OEQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ole.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_OLE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ole.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_OLE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "olt.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_OLT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "olt.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_OLT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "rfr",
+ .translate = translate_rfr_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "rfrd",
+ .translate = translate_rfr_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "round.d",
+ .translate = translate_ftoi_d,
+ .par = (const uint32_t[]){float_round_nearest_even, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "round.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_nearest_even, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "rur.fcr",
+ .translate = translate_rur,
+ .par = (const uint32_t[]){FCR},
+ .coprocessor = 0x1,
+ }, {
+ .name = "rur.fsr",
+ .translate = translate_rur_fpu_fsr,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sdi",
+ .translate = translate_ldsti_d,
+ .par = (const uint32_t[]){true, true, false},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sdip",
+ .translate = translate_ldsti_d,
+ .par = (const uint32_t[]){true, false, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sdiu",
+ .translate = translate_ldsti_d,
+ .par = (const uint32_t[]){true, true, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sdx",
+ .translate = translate_ldstx_d,
+ .par = (const uint32_t[]){true, true, false},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sdxp",
+ .translate = translate_ldstx_d,
+ .par = (const uint32_t[]){true, false, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sdxu",
+ .translate = translate_ldstx_d,
+ .par = (const uint32_t[]){true, true, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sqrt0.d",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sqrt0.s",
+ .translate = translate_nop,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssi",
+ .translate = translate_ldsti_s,
+ .par = (const uint32_t[]){true, true, false},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssip",
+ .translate = translate_ldsti_s,
+ .par = (const uint32_t[]){true, false, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssiu",
+ .translate = translate_ldsti_s,
+ .par = (const uint32_t[]){true, true, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssx",
+ .translate = translate_ldstx_s,
+ .par = (const uint32_t[]){true, true, false},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssxp",
+ .translate = translate_ldstx_s,
+ .par = (const uint32_t[]){true, false, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "ssxu",
+ .translate = translate_ldstx_s,
+ .par = (const uint32_t[]){true, true, true},
+ .op_flags = XTENSA_OP_STORE,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sub.d",
+ .translate = translate_sub_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "sub.s",
+ .translate = translate_sub_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "trunc.d",
+ .translate = translate_ftoi_d,
+ .par = (const uint32_t[]){float_round_to_zero, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "trunc.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_to_zero, false},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ueq.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_UEQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ueq.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_UEQ},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ufloat.d",
+ .translate = translate_float_d,
+ .par = (const uint32_t[]){true},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ufloat.s",
+ .translate = translate_float_s,
+ .par = (const uint32_t[]){true},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ule.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_ULE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ule.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_ULE},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ult.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_ULT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "ult.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_ULT},
+ .coprocessor = 0x1,
+ }, {
+ .name = "un.d",
+ .translate = translate_compare_d,
+ .par = (const uint32_t[]){COMPARE_UN},
+ .coprocessor = 0x1,
+ }, {
+ .name = "un.s",
+ .translate = translate_compare_s,
+ .par = (const uint32_t[]){COMPARE_UN},
+ .coprocessor = 0x1,
+ }, {
+ .name = "utrunc.d",
+ .translate = translate_ftoi_d,
+ .par = (const uint32_t[]){float_round_to_zero, true},
+ .coprocessor = 0x1,
+ }, {
+ .name = "utrunc.s",
+ .translate = translate_ftoi_s,
+ .par = (const uint32_t[]){float_round_to_zero, true},
+ .coprocessor = 0x1,
+ }, {
+ .name = "wfr",
+ .translate = translate_wfr_s,
+ .coprocessor = 0x1,
+ }, {
+ .name = "wfrd",
+ .translate = translate_wfr_d,
+ .coprocessor = 0x1,
+ }, {
+ .name = "wur.fcr",
+ .translate = translate_wur_fpu_fcr,
+ .par = (const uint32_t[]){FCR},
+ .coprocessor = 0x1,
+ }, {
+ .name = "wur.fsr",
+ .translate = translate_wur_fpu_fsr,
+ .coprocessor = 0x1,
+ },
+};
+
+const XtensaOpcodeTranslators xtensa_fpu_opcodes = {
+ .num_opcodes = ARRAY_SIZE(fpu_ops),
+ .opcode = fpu_ops,
+};
diff --git a/target/xtensa/win_helper.c b/target/xtensa/win_helper.c
new file mode 100644
index 000000000..f6f96a64c
--- /dev/null
+++ b/target/xtensa/win_helper.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+
+static void copy_window_from_phys(CPUXtensaState *env,
+ uint32_t window, uint32_t phys, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n1 * sizeof(uint32_t));
+ memcpy(env->regs + window + n1, env->phys_regs,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+static void copy_phys_from_window(CPUXtensaState *env,
+ uint32_t phys, uint32_t window, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n1 * sizeof(uint32_t));
+ memcpy(env->phys_regs, env->regs + window + n1,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
+{
+ return a & (env->config->nareg / 4 - 1);
+}
+
+static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
+{
+ return 1 << windowbase_bound(a, env);
+}
+
+void xtensa_sync_window_from_phys(CPUXtensaState *env)
+{
+ copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
+}
+
+void xtensa_sync_phys_from_window(CPUXtensaState *env)
+{
+ copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
+}
+
+static void xtensa_rotate_window_abs(CPUXtensaState *env, uint32_t position)
+{
+ xtensa_sync_phys_from_window(env);
+ env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
+ xtensa_sync_window_from_phys(env);
+}
+
+void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta)
+{
+ xtensa_rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
+}
+
+void HELPER(sync_windowbase)(CPUXtensaState *env)
+{
+ xtensa_rotate_window_abs(env, env->windowbase_next);
+}
+
+void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
+{
+ int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
+
+ env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm;
+ env->windowbase_next = env->sregs[WINDOW_BASE] + callinc;
+ env->sregs[WINDOW_START] |= windowstart_bit(env->windowbase_next, env);
+}
+
+void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
+{
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t n = ctz32(windowstart) + 1;
+
+ assert(n <= w);
+
+ xtensa_rotate_window(env, n);
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ switch (ctz32(windowstart >> n)) {
+ case 0:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
+ break;
+ case 1:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
+ break;
+ default:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
+ break;
+ }
+}
+
+void HELPER(test_ill_retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+ int m = 0;
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = env->sregs[WINDOW_START];
+
+ if (windowstart & windowstart_bit(windowbase - 1, env)) {
+ m = 1;
+ } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
+ m = 2;
+ } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
+ m = 3;
+ }
+
+ if (n == 0 || (m != 0 && m != n)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
+ "PS = %08x, m = %d, n = %d\n",
+ pc, env->sregs[PS], m, n);
+ HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
+ }
+}
+
+void HELPER(test_underflow_retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+
+ if (!(env->sregs[WINDOW_START] &
+ windowstart_bit(env->sregs[WINDOW_BASE] - n, env))) {
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+
+ xtensa_rotate_window(env, -n);
+ /* window underflow */
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ if (n == 1) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
+ } else if (n == 2) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
+ } else if (n == 3) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
+ }
+ }
+}
+
+void HELPER(retw)(CPUXtensaState *env, uint32_t a0)
+{
+ int n = (a0 >> 30) & 0x3;
+
+ xtensa_rotate_window(env, -n);
+}
+
+void xtensa_restore_owb(CPUXtensaState *env)
+{
+ xtensa_rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
+}
+
+void HELPER(restore_owb)(CPUXtensaState *env)
+{
+ xtensa_restore_owb(env);
+}
+
+void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
+{
+ if ((env->sregs[WINDOW_START] &
+ (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
+ HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
+ }
+}
diff --git a/target/xtensa/xtensa-isa-internal.h b/target/xtensa/xtensa-isa-internal.h
new file mode 100644
index 000000000..40dd8bac9
--- /dev/null
+++ b/target/xtensa/xtensa-isa-internal.h
@@ -0,0 +1,231 @@
+/* Internal definitions for the Xtensa ISA library.
+ *
+ * Copyright (c) 2004-2011 Tensilica Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef XTENSA_ISA_INTERNAL_H
+#define XTENSA_ISA_INTERNAL_H
+
+#ifndef uint32
+#define uint32 uint32_t
+#endif
+
+/* Flags. */
+
+#define XTENSA_OPERAND_IS_REGISTER 0x00000001
+#define XTENSA_OPERAND_IS_PCRELATIVE 0x00000002
+#define XTENSA_OPERAND_IS_INVISIBLE 0x00000004
+#define XTENSA_OPERAND_IS_UNKNOWN 0x00000008
+
+#define XTENSA_OPCODE_IS_BRANCH 0x00000001
+#define XTENSA_OPCODE_IS_JUMP 0x00000002
+#define XTENSA_OPCODE_IS_LOOP 0x00000004
+#define XTENSA_OPCODE_IS_CALL 0x00000008
+
+#define XTENSA_STATE_IS_EXPORTED 0x00000001
+#define XTENSA_STATE_IS_SHARED_OR 0x00000002
+
+#define XTENSA_INTERFACE_HAS_SIDE_EFFECT 0x00000001
+
+/* Function pointer typedefs */
+typedef void (*xtensa_format_encode_fn)(xtensa_insnbuf);
+typedef void (*xtensa_get_slot_fn)(const xtensa_insnbuf, xtensa_insnbuf);
+typedef void (*xtensa_set_slot_fn)(xtensa_insnbuf, const xtensa_insnbuf);
+typedef int (*xtensa_opcode_decode_fn)(const xtensa_insnbuf);
+typedef uint32_t (*xtensa_get_field_fn)(const xtensa_insnbuf);
+typedef void (*xtensa_set_field_fn)(xtensa_insnbuf, uint32_t);
+typedef int (*xtensa_immed_decode_fn)(uint32_t *);
+typedef int (*xtensa_immed_encode_fn)(uint32_t *);
+typedef int (*xtensa_do_reloc_fn)(uint32_t *, uint32_t);
+typedef int (*xtensa_undo_reloc_fn)(uint32_t *, uint32_t);
+typedef void (*xtensa_opcode_encode_fn)(xtensa_insnbuf);
+typedef int (*xtensa_format_decode_fn)(const xtensa_insnbuf);
+typedef int (*xtensa_length_decode_fn)(const unsigned char *);
+
+typedef struct xtensa_format_internal_struct {
+ const char *name; /* Instruction format name. */
+ int length; /* Instruction length in bytes. */
+ xtensa_format_encode_fn encode_fn;
+ int num_slots;
+ int *slot_id; /* Array[num_slots] of slot IDs. */
+} xtensa_format_internal;
+
+typedef struct xtensa_slot_internal_struct {
+ const char *name; /* Not necessarily unique. */
+ const char *format;
+ int position;
+ xtensa_get_slot_fn get_fn;
+ xtensa_set_slot_fn set_fn;
+ xtensa_get_field_fn *get_field_fns; /* Array[field_id]. */
+ xtensa_set_field_fn *set_field_fns; /* Array[field_id]. */
+ xtensa_opcode_decode_fn opcode_decode_fn;
+ const char *nop_name;
+} xtensa_slot_internal;
+
+typedef struct xtensa_operand_internal_struct {
+ const char *name;
+ int field_id;
+ xtensa_regfile regfile; /* Register file. */
+ int num_regs; /* Usually 1; 2 for reg pairs, etc. */
+ uint32_t flags; /* See XTENSA_OPERAND_* flags. */
+ xtensa_immed_encode_fn encode; /* Encode the operand value. */
+ xtensa_immed_decode_fn decode; /* Decode the value from the field. */
+ xtensa_do_reloc_fn do_reloc; /* Perform a PC-relative reloc. */
+ xtensa_undo_reloc_fn undo_reloc; /* Undo a PC-relative relocation. */
+} xtensa_operand_internal;
+
+typedef struct xtensa_arg_internal_struct {
+ union {
+ int operand_id; /* For normal operands. */
+ xtensa_state state; /* For stateOperands. */
+ } u;
+ char inout; /* Direction: 'i', 'o', or 'm'. */
+} xtensa_arg_internal;
+
+typedef struct xtensa_iclass_internal_struct {
+ int num_operands; /* Size of "operands" array. */
+ xtensa_arg_internal *operands; /* Array[num_operands]. */
+
+ int num_stateOperands; /* Size of "stateOperands" array. */
+ xtensa_arg_internal *stateOperands; /* Array[num_stateOperands]. */
+
+ int num_interfaceOperands; /* Size of "interfaceOperands". */
+ xtensa_interface *interfaceOperands; /* Array[num_interfaceOperands]. */
+} xtensa_iclass_internal;
+
+typedef struct xtensa_opcode_internal_struct {
+ const char *name; /* Opcode mnemonic. */
+ int iclass_id; /* Iclass for this opcode. */
+ uint32_t flags; /* See XTENSA_OPCODE_* flags. */
+ xtensa_opcode_encode_fn *encode_fns; /* Array[slot_id]. */
+ int num_funcUnit_uses; /* Number of funcUnit_use entries. */
+ xtensa_funcUnit_use *funcUnit_uses; /* Array[num_funcUnit_uses]. */
+} xtensa_opcode_internal;
+
+typedef struct xtensa_regfile_internal_struct {
+ const char *name; /* Full name of the regfile. */
+ const char *shortname; /* Abbreviated name. */
+ xtensa_regfile parent; /* View parent (or identity). */
+ int num_bits; /* Width of the registers. */
+ int num_entries; /* Number of registers. */
+} xtensa_regfile_internal;
+
+typedef struct xtensa_interface_internal_struct {
+ const char *name; /* Interface name. */
+ int num_bits; /* Width of the interface. */
+ uint32_t flags; /* See XTENSA_INTERFACE_* flags. */
+ int class_id; /* Class of related interfaces. */
+ char inout; /* "i" or "o". */
+} xtensa_interface_internal;
+
+typedef struct xtensa_funcUnit_internal_struct {
+ const char *name; /* Functional unit name. */
+ int num_copies; /* Number of instances. */
+} xtensa_funcUnit_internal;
+
+typedef struct xtensa_state_internal_struct {
+ const char *name; /* State name. */
+ int num_bits; /* Number of state bits. */
+ uint32_t flags; /* See XTENSA_STATE_* flags. */
+} xtensa_state_internal;
+
+typedef struct xtensa_sysreg_internal_struct {
+ const char *name; /* Register name. */
+ int number; /* Register number. */
+ int is_user; /* Non-zero if a "user register". */
+} xtensa_sysreg_internal;
+
+typedef struct xtensa_lookup_entry_struct {
+ const char *key;
+ union {
+ xtensa_opcode opcode; /* Internal opcode number. */
+ xtensa_sysreg sysreg; /* Internal sysreg number. */
+ xtensa_state state; /* Internal state number. */
+ xtensa_interface intf; /* Internal interface number. */
+ xtensa_funcUnit fun; /* Internal funcUnit number. */
+ } u;
+} xtensa_lookup_entry;
+
+typedef struct xtensa_isa_internal_struct {
+ int is_big_endian; /* Endianness. */
+ int insn_size; /* Maximum length in bytes. */
+ int insnbuf_size; /* Number of insnbuf_words. */
+
+ int num_formats;
+ xtensa_format_internal *formats;
+ xtensa_format_decode_fn format_decode_fn;
+ xtensa_length_decode_fn length_decode_fn;
+
+ int num_slots;
+ xtensa_slot_internal *slots;
+
+ int num_fields;
+
+ int num_operands;
+ xtensa_operand_internal *operands;
+
+ int num_iclasses;
+ xtensa_iclass_internal *iclasses;
+
+ int num_opcodes;
+ xtensa_opcode_internal *opcodes;
+ xtensa_lookup_entry *opname_lookup_table;
+
+ int num_regfiles;
+ xtensa_regfile_internal *regfiles;
+
+ int num_states;
+ xtensa_state_internal *states;
+ xtensa_lookup_entry *state_lookup_table;
+
+ int num_sysregs;
+ xtensa_sysreg_internal *sysregs;
+ xtensa_lookup_entry *sysreg_lookup_table;
+
+ /*
+ * The current Xtensa ISA only supports 256 of each kind of sysreg so
+ * we can get away with implementing lookups with tables indexed by
+ * the register numbers. If we ever allow larger sysreg numbers, this
+ * may have to be reimplemented. The first entry in the following
+ * arrays corresponds to "special" registers and the second to "user"
+ * registers.
+ */
+ int max_sysreg_num[2];
+ xtensa_sysreg *sysreg_table[2];
+
+ int num_interfaces;
+ xtensa_interface_internal *interfaces;
+ xtensa_lookup_entry *interface_lookup_table;
+
+ int num_funcUnits;
+ xtensa_funcUnit_internal *funcUnits;
+ xtensa_lookup_entry *funcUnit_lookup_table;
+
+ int num_stages; /* Number of pipe stages. */
+} xtensa_isa_internal;
+
+int xtensa_isa_name_compare(const void *, const void *);
+
+extern xtensa_isa_status xtisa_errno;
+extern char xtisa_error_msg[];
+
+#endif /* XTENSA_ISA_INTERNAL_H */
diff --git a/target/xtensa/xtensa-isa.c b/target/xtensa/xtensa-isa.c
new file mode 100644
index 000000000..630b4f9da
--- /dev/null
+++ b/target/xtensa/xtensa-isa.c
@@ -0,0 +1,1743 @@
+/* Configurable Xtensa ISA support.
+ *
+ * Copyright (c) 2001-2011 Tensilica Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "xtensa-isa.h"
+#include "xtensa-isa-internal.h"
+
+xtensa_isa_status xtisa_errno;
+char xtisa_error_msg[1024];
+
+
+xtensa_isa_status xtensa_isa_errno(xtensa_isa isa __attribute__ ((unused)))
+{
+ return xtisa_errno;
+}
+
+
+char *xtensa_isa_error_msg(xtensa_isa isa __attribute__ ((unused)))
+{
+ return xtisa_error_msg;
+}
+
+
+#define CHECK_ALLOC(MEM, ERRVAL) \
+ do { \
+ if ((MEM) == 0) { \
+ xtisa_errno = xtensa_isa_out_of_memory; \
+ strcpy(xtisa_error_msg, "out of memory"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+#define CHECK_ALLOC_FOR_INIT(MEM, ERRVAL, ERRNO_P, ERROR_MSG_P) \
+ do { \
+ if ((MEM) == 0) { \
+ xtisa_errno = xtensa_isa_out_of_memory; \
+ strcpy(xtisa_error_msg, "out of memory"); \
+ if (ERRNO_P) { \
+ *(ERRNO_P) = xtisa_errno; \
+ } \
+ if (ERROR_MSG_P) { \
+ *(ERROR_MSG_P) = xtisa_error_msg; \
+ } \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+/* Instruction buffers. */
+
+int xtensa_insnbuf_size(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->insnbuf_size;
+}
+
+
+xtensa_insnbuf xtensa_insnbuf_alloc(xtensa_isa isa)
+{
+ xtensa_insnbuf result = (xtensa_insnbuf)
+ malloc(xtensa_insnbuf_size(isa) * sizeof(xtensa_insnbuf_word));
+
+ CHECK_ALLOC(result, 0);
+ return result;
+}
+
+
+void xtensa_insnbuf_free(xtensa_isa isa __attribute__ ((unused)),
+ xtensa_insnbuf buf)
+{
+ free(buf);
+}
+
+
+/*
+ * Given <byte_index>, the index of a byte in a xtensa_insnbuf, our
+ * internal representation of a xtensa instruction word, return the index of
+ * its word and the bit index of its low order byte in the xtensa_insnbuf.
+ */
+
+static inline int byte_to_word_index(int byte_index)
+{
+ return byte_index / sizeof(xtensa_insnbuf_word);
+}
+
+
+static inline int byte_to_bit_index(int byte_index)
+{
+ return (byte_index & 0x3) * 8;
+}
+
+
+/*
+ * Copy an instruction in the 32-bit words pointed at by "insn" to
+ * characters pointed at by "cp". This is more complicated than you
+ * might think because we want 16-bit instructions in bytes 2 & 3 for
+ * big-endian configurations. This function allows us to specify
+ * which byte in "insn" to start with and which way to increment,
+ * allowing trivial implementation for both big- and little-endian
+ * configurations....and it seems to make pretty good code for
+ * both.
+ */
+
+int xtensa_insnbuf_to_chars(xtensa_isa isa,
+ const xtensa_insnbuf insn,
+ unsigned char *cp,
+ int num_chars)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int insn_size = xtensa_isa_maxlength(isa);
+ int fence_post, start, increment, i, byte_count;
+ xtensa_format fmt;
+
+ if (num_chars == 0) {
+ num_chars = insn_size;
+ }
+
+ if (intisa->is_big_endian) {
+ start = insn_size - 1;
+ increment = -1;
+ } else {
+ start = 0;
+ increment = 1;
+ }
+
+ /*
+ * Find the instruction format. Do nothing if the buffer does not contain
+ * a valid instruction since we need to know how many bytes to copy.
+ */
+ fmt = xtensa_format_decode(isa, insn);
+ if (fmt == XTENSA_UNDEFINED) {
+ return XTENSA_UNDEFINED;
+ }
+
+ byte_count = xtensa_format_length(isa, fmt);
+ if (byte_count == XTENSA_UNDEFINED) {
+ return XTENSA_UNDEFINED;
+ }
+
+ if (byte_count > num_chars) {
+ xtisa_errno = xtensa_isa_buffer_overflow;
+ strcpy(xtisa_error_msg, "output buffer too small for instruction");
+ return XTENSA_UNDEFINED;
+ }
+
+ fence_post = start + (byte_count * increment);
+
+ for (i = start; i != fence_post; i += increment, ++cp) {
+ int word_inx = byte_to_word_index(i);
+ int bit_inx = byte_to_bit_index(i);
+
+ *cp = (insn[word_inx] >> bit_inx) & 0xff;
+ }
+
+ return byte_count;
+}
+
+
+/*
+ * Inward conversion from byte stream to xtensa_insnbuf. See
+ * xtensa_insnbuf_to_chars for a discussion of why this is complicated
+ * by endianness.
+ */
+
+void xtensa_insnbuf_from_chars(xtensa_isa isa,
+ xtensa_insnbuf insn,
+ const unsigned char *cp,
+ int num_chars)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int max_size, insn_size, fence_post, start, increment, i;
+
+ max_size = xtensa_isa_maxlength(isa);
+
+ /* Decode the instruction length so we know how many bytes to read. */
+ insn_size = (intisa->length_decode_fn)(cp);
+ if (insn_size == XTENSA_UNDEFINED) {
+ /*
+ * This should never happen when the byte stream contains a
+ * valid instruction. Just read the maximum number of bytes....
+ */
+ insn_size = max_size;
+ }
+
+ if (num_chars == 0 || num_chars > insn_size) {
+ num_chars = insn_size;
+ }
+
+ if (intisa->is_big_endian) {
+ start = max_size - 1;
+ increment = -1;
+ } else {
+ start = 0;
+ increment = 1;
+ }
+
+ fence_post = start + (num_chars * increment);
+ memset(insn, 0, xtensa_insnbuf_size(isa) * sizeof(xtensa_insnbuf_word));
+
+ for (i = start; i != fence_post; i += increment, ++cp) {
+ int word_inx = byte_to_word_index(i);
+ int bit_inx = byte_to_bit_index(i);
+
+ insn[word_inx] |= (*cp & 0xff) << bit_inx;
+ }
+}
+
+
+/* ISA information. */
+
+xtensa_isa xtensa_isa_init(void *xtensa_modules, xtensa_isa_status *errno_p,
+ char **error_msg_p)
+{
+ xtensa_isa_internal *isa = xtensa_modules;
+ int n, is_user;
+
+ /* Set up the opcode name lookup table. */
+ isa->opname_lookup_table =
+ malloc(isa->num_opcodes * sizeof(xtensa_lookup_entry));
+ CHECK_ALLOC_FOR_INIT(isa->opname_lookup_table, NULL, errno_p, error_msg_p);
+ for (n = 0; n < isa->num_opcodes; n++) {
+ isa->opname_lookup_table[n].key = isa->opcodes[n].name;
+ isa->opname_lookup_table[n].u.opcode = n;
+ }
+ qsort(isa->opname_lookup_table, isa->num_opcodes,
+ sizeof(xtensa_lookup_entry), xtensa_isa_name_compare);
+
+ /* Set up the state name lookup table. */
+ isa->state_lookup_table =
+ malloc(isa->num_states * sizeof(xtensa_lookup_entry));
+ CHECK_ALLOC_FOR_INIT(isa->state_lookup_table, NULL, errno_p, error_msg_p);
+ for (n = 0; n < isa->num_states; n++) {
+ isa->state_lookup_table[n].key = isa->states[n].name;
+ isa->state_lookup_table[n].u.state = n;
+ }
+ qsort(isa->state_lookup_table, isa->num_states,
+ sizeof(xtensa_lookup_entry), xtensa_isa_name_compare);
+
+ /* Set up the sysreg name lookup table. */
+ isa->sysreg_lookup_table =
+ malloc(isa->num_sysregs * sizeof(xtensa_lookup_entry));
+ CHECK_ALLOC_FOR_INIT(isa->sysreg_lookup_table, NULL, errno_p, error_msg_p);
+ for (n = 0; n < isa->num_sysregs; n++) {
+ isa->sysreg_lookup_table[n].key = isa->sysregs[n].name;
+ isa->sysreg_lookup_table[n].u.sysreg = n;
+ }
+ qsort(isa->sysreg_lookup_table, isa->num_sysregs,
+ sizeof(xtensa_lookup_entry), xtensa_isa_name_compare);
+
+ /* Set up the user & system sysreg number tables. */
+ for (is_user = 0; is_user < 2; is_user++) {
+ isa->sysreg_table[is_user] =
+ malloc((isa->max_sysreg_num[is_user] + 1) * sizeof(xtensa_sysreg));
+ CHECK_ALLOC_FOR_INIT(isa->sysreg_table[is_user], NULL,
+ errno_p, error_msg_p);
+
+ for (n = 0; n <= isa->max_sysreg_num[is_user]; n++) {
+ isa->sysreg_table[is_user][n] = XTENSA_UNDEFINED;
+ }
+ }
+ for (n = 0; n < isa->num_sysregs; n++) {
+ xtensa_sysreg_internal *sreg = &isa->sysregs[n];
+ is_user = sreg->is_user;
+
+ if (sreg->number >= 0) {
+ isa->sysreg_table[is_user][sreg->number] = n;
+ }
+ }
+
+ /* Set up the interface lookup table. */
+ isa->interface_lookup_table =
+ malloc(isa->num_interfaces * sizeof(xtensa_lookup_entry));
+ CHECK_ALLOC_FOR_INIT(isa->interface_lookup_table, NULL, errno_p,
+ error_msg_p);
+ for (n = 0; n < isa->num_interfaces; n++) {
+ isa->interface_lookup_table[n].key = isa->interfaces[n].name;
+ isa->interface_lookup_table[n].u.intf = n;
+ }
+ qsort(isa->interface_lookup_table, isa->num_interfaces,
+ sizeof(xtensa_lookup_entry), xtensa_isa_name_compare);
+
+ /* Set up the funcUnit lookup table. */
+ isa->funcUnit_lookup_table =
+ malloc(isa->num_funcUnits * sizeof(xtensa_lookup_entry));
+ CHECK_ALLOC_FOR_INIT(isa->funcUnit_lookup_table, NULL, errno_p,
+ error_msg_p);
+ for (n = 0; n < isa->num_funcUnits; n++) {
+ isa->funcUnit_lookup_table[n].key = isa->funcUnits[n].name;
+ isa->funcUnit_lookup_table[n].u.fun = n;
+ }
+ qsort(isa->funcUnit_lookup_table, isa->num_funcUnits,
+ sizeof(xtensa_lookup_entry), xtensa_isa_name_compare);
+
+ isa->insnbuf_size = ((isa->insn_size + sizeof(xtensa_insnbuf_word) - 1) /
+ sizeof(xtensa_insnbuf_word));
+ isa->num_stages = XTENSA_UNDEFINED;
+
+ return (xtensa_isa)isa;
+}
+
+
+void xtensa_isa_free(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int n;
+
+ /*
+ * With this version of the code, the xtensa_isa structure is not
+ * dynamically allocated, so this function is not essential. Free
+ * the memory allocated by xtensa_isa_init and restore the xtensa_isa
+ * structure to its initial state.
+ */
+
+ if (intisa->opname_lookup_table) {
+ free(intisa->opname_lookup_table);
+ intisa->opname_lookup_table = 0;
+ }
+
+ if (intisa->state_lookup_table) {
+ free(intisa->state_lookup_table);
+ intisa->state_lookup_table = 0;
+ }
+
+ if (intisa->sysreg_lookup_table) {
+ free(intisa->sysreg_lookup_table);
+ intisa->sysreg_lookup_table = 0;
+ }
+ for (n = 0; n < 2; n++) {
+ if (intisa->sysreg_table[n]) {
+ free(intisa->sysreg_table[n]);
+ intisa->sysreg_table[n] = 0;
+ }
+ }
+
+ if (intisa->interface_lookup_table) {
+ free(intisa->interface_lookup_table);
+ intisa->interface_lookup_table = 0;
+ }
+
+ if (intisa->funcUnit_lookup_table) {
+ free(intisa->funcUnit_lookup_table);
+ intisa->funcUnit_lookup_table = 0;
+ }
+}
+
+
+int xtensa_isa_name_compare(const void *v1, const void *v2)
+{
+ xtensa_lookup_entry *e1 = (xtensa_lookup_entry *)v1;
+ xtensa_lookup_entry *e2 = (xtensa_lookup_entry *)v2;
+
+ return strcasecmp(e1->key, e2->key);
+}
+
+
+int xtensa_isa_maxlength(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->insn_size;
+}
+
+
+int xtensa_isa_length_from_chars(xtensa_isa isa, const unsigned char *cp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return (intisa->length_decode_fn)(cp);
+}
+
+
+int xtensa_isa_num_pipe_stages(xtensa_isa isa)
+{
+ xtensa_opcode opcode;
+ xtensa_funcUnit_use *use;
+ int num_opcodes, num_uses;
+ int i, stage, max_stage;
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ /* Only compute the value once. */
+ if (intisa->num_stages != XTENSA_UNDEFINED) {
+ return intisa->num_stages;
+ }
+
+ max_stage = -1;
+
+ num_opcodes = xtensa_isa_num_opcodes(isa);
+ for (opcode = 0; opcode < num_opcodes; opcode++) {
+ num_uses = xtensa_opcode_num_funcUnit_uses(isa, opcode);
+ for (i = 0; i < num_uses; i++) {
+ use = xtensa_opcode_funcUnit_use(isa, opcode, i);
+ stage = use->stage;
+ if (stage > max_stage) {
+ max_stage = stage;
+ }
+ }
+ }
+
+ intisa->num_stages = max_stage + 1;
+ return intisa->num_states;
+}
+
+
+int xtensa_isa_num_formats(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_formats;
+}
+
+
+int xtensa_isa_num_opcodes(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_opcodes;
+}
+
+
+int xtensa_isa_num_regfiles(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_regfiles;
+}
+
+
+int xtensa_isa_num_states(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_states;
+}
+
+
+int xtensa_isa_num_sysregs(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_sysregs;
+}
+
+
+int xtensa_isa_num_interfaces(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_interfaces;
+}
+
+
+int xtensa_isa_num_funcUnits(xtensa_isa isa)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ return intisa->num_funcUnits;
+}
+
+
+/* Instruction formats. */
+
+
+#define CHECK_FORMAT(INTISA, FMT, ERRVAL) \
+ do { \
+ if ((FMT) < 0 || (FMT) >= (INTISA)->num_formats) { \
+ xtisa_errno = xtensa_isa_bad_format; \
+ strcpy(xtisa_error_msg, "invalid format specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+#define CHECK_SLOT(INTISA, FMT, SLOT, ERRVAL) \
+ do { \
+ if ((SLOT) < 0 || (SLOT) >= (INTISA)->formats[FMT].num_slots) { \
+ xtisa_errno = xtensa_isa_bad_slot; \
+ strcpy(xtisa_error_msg, "invalid slot specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+const char *xtensa_format_name(xtensa_isa isa, xtensa_format fmt)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_FORMAT(intisa, fmt, NULL);
+ return intisa->formats[fmt].name;
+}
+
+
+xtensa_format xtensa_format_lookup(xtensa_isa isa, const char *fmtname)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int fmt;
+
+ if (!fmtname || !*fmtname) {
+ xtisa_errno = xtensa_isa_bad_format;
+ strcpy(xtisa_error_msg, "invalid format name");
+ return XTENSA_UNDEFINED;
+ }
+
+ for (fmt = 0; fmt < intisa->num_formats; fmt++) {
+ if (strcasecmp(fmtname, intisa->formats[fmt].name) == 0) {
+ return fmt;
+ }
+ }
+
+ xtisa_errno = xtensa_isa_bad_format;
+ sprintf(xtisa_error_msg, "format \"%s\" not recognized", fmtname);
+ return XTENSA_UNDEFINED;
+}
+
+
+xtensa_format xtensa_format_decode(xtensa_isa isa, const xtensa_insnbuf insn)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_format fmt;
+
+ fmt = (intisa->format_decode_fn)(insn);
+ if (fmt != XTENSA_UNDEFINED) {
+ return fmt;
+ }
+
+ xtisa_errno = xtensa_isa_bad_format;
+ strcpy(xtisa_error_msg, "cannot decode instruction format");
+ return XTENSA_UNDEFINED;
+}
+
+
+int xtensa_format_encode(xtensa_isa isa, xtensa_format fmt,
+ xtensa_insnbuf insn)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_FORMAT(intisa, fmt, -1);
+ (*intisa->formats[fmt].encode_fn)(insn);
+ return 0;
+}
+
+
+int xtensa_format_length(xtensa_isa isa, xtensa_format fmt)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_FORMAT(intisa, fmt, XTENSA_UNDEFINED);
+ return intisa->formats[fmt].length;
+}
+
+
+int xtensa_format_num_slots(xtensa_isa isa, xtensa_format fmt)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_FORMAT(intisa, fmt, XTENSA_UNDEFINED);
+ return intisa->formats[fmt].num_slots;
+}
+
+
+xtensa_opcode xtensa_format_slot_nop_opcode(xtensa_isa isa, xtensa_format fmt,
+ int slot)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int slot_id;
+
+ CHECK_FORMAT(intisa, fmt, XTENSA_UNDEFINED);
+ CHECK_SLOT(intisa, fmt, slot, XTENSA_UNDEFINED);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+ return xtensa_opcode_lookup(isa, intisa->slots[slot_id].nop_name);
+}
+
+
+int xtensa_format_get_slot(xtensa_isa isa, xtensa_format fmt, int slot,
+ const xtensa_insnbuf insn, xtensa_insnbuf slotbuf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int slot_id;
+
+ CHECK_FORMAT(intisa, fmt, -1);
+ CHECK_SLOT(intisa, fmt, slot, -1);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+ (*intisa->slots[slot_id].get_fn)(insn, slotbuf);
+ return 0;
+}
+
+
+int xtensa_format_set_slot(xtensa_isa isa, xtensa_format fmt, int slot,
+ xtensa_insnbuf insn, const xtensa_insnbuf slotbuf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int slot_id;
+
+ CHECK_FORMAT(intisa, fmt, -1);
+ CHECK_SLOT(intisa, fmt, slot, -1);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+ (*intisa->slots[slot_id].set_fn)(insn, slotbuf);
+ return 0;
+}
+
+
+/* Opcode information. */
+
+
+#define CHECK_OPCODE(INTISA, OPC, ERRVAL) \
+ do { \
+ if ((OPC) < 0 || (OPC) >= (INTISA)->num_opcodes) { \
+ xtisa_errno = xtensa_isa_bad_opcode; \
+ strcpy(xtisa_error_msg, "invalid opcode specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_opcode xtensa_opcode_lookup(xtensa_isa isa, const char *opname)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_lookup_entry entry, *result = 0;
+
+ if (!opname || !*opname) {
+ xtisa_errno = xtensa_isa_bad_opcode;
+ strcpy(xtisa_error_msg, "invalid opcode name");
+ return XTENSA_UNDEFINED;
+ }
+
+ if (intisa->num_opcodes != 0) {
+ entry.key = opname;
+ result = bsearch(&entry, intisa->opname_lookup_table,
+ intisa->num_opcodes, sizeof(xtensa_lookup_entry),
+ xtensa_isa_name_compare);
+ }
+
+ if (!result) {
+ xtisa_errno = xtensa_isa_bad_opcode;
+ sprintf(xtisa_error_msg, "opcode \"%s\" not recognized", opname);
+ return XTENSA_UNDEFINED;
+ }
+
+ return result->u.opcode;
+}
+
+
+xtensa_opcode xtensa_opcode_decode(xtensa_isa isa, xtensa_format fmt, int slot,
+ const xtensa_insnbuf slotbuf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int slot_id;
+ xtensa_opcode opc;
+
+ CHECK_FORMAT(intisa, fmt, XTENSA_UNDEFINED);
+ CHECK_SLOT(intisa, fmt, slot, XTENSA_UNDEFINED);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+
+ opc = (intisa->slots[slot_id].opcode_decode_fn) (slotbuf);
+ if (opc != XTENSA_UNDEFINED) {
+ return opc;
+ }
+
+ xtisa_errno = xtensa_isa_bad_opcode;
+ strcpy(xtisa_error_msg, "cannot decode opcode");
+ return XTENSA_UNDEFINED;
+}
+
+
+int xtensa_opcode_encode(xtensa_isa isa, xtensa_format fmt, int slot,
+ xtensa_insnbuf slotbuf, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int slot_id;
+ xtensa_opcode_encode_fn encode_fn;
+
+ CHECK_FORMAT(intisa, fmt, -1);
+ CHECK_SLOT(intisa, fmt, slot, -1);
+ CHECK_OPCODE(intisa, opc, -1);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+ encode_fn = intisa->opcodes[opc].encode_fns[slot_id];
+ if (!encode_fn) {
+ xtisa_errno = xtensa_isa_wrong_slot;
+ sprintf(xtisa_error_msg,
+ "opcode \"%s\" is not allowed in slot %d of format \"%s\"",
+ intisa->opcodes[opc].name, slot, intisa->formats[fmt].name);
+ return -1;
+ }
+ (*encode_fn)(slotbuf);
+ return 0;
+}
+
+
+const char *xtensa_opcode_name(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, NULL);
+ return intisa->opcodes[opc].name;
+}
+
+
+int xtensa_opcode_is_branch(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ if ((intisa->opcodes[opc].flags & XTENSA_OPCODE_IS_BRANCH) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_opcode_is_jump(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ if ((intisa->opcodes[opc].flags & XTENSA_OPCODE_IS_JUMP) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_opcode_is_loop(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ if ((intisa->opcodes[opc].flags & XTENSA_OPCODE_IS_LOOP) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_opcode_is_call(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ if ((intisa->opcodes[opc].flags & XTENSA_OPCODE_IS_CALL) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_opcode_num_operands(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int iclass_id;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ return intisa->iclasses[iclass_id].num_operands;
+}
+
+
+int xtensa_opcode_num_stateOperands(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int iclass_id;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ return intisa->iclasses[iclass_id].num_stateOperands;
+}
+
+
+int xtensa_opcode_num_interfaceOperands(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int iclass_id;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ return intisa->iclasses[iclass_id].num_interfaceOperands;
+}
+
+
+int xtensa_opcode_num_funcUnit_uses(xtensa_isa isa, xtensa_opcode opc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ return intisa->opcodes[opc].num_funcUnit_uses;
+}
+
+
+xtensa_funcUnit_use *xtensa_opcode_funcUnit_use(xtensa_isa isa,
+ xtensa_opcode opc, int u)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_OPCODE(intisa, opc, NULL);
+ if (u < 0 || u >= intisa->opcodes[opc].num_funcUnit_uses) {
+ xtisa_errno = xtensa_isa_bad_funcUnit;
+ sprintf(xtisa_error_msg, "invalid functional unit use number (%d); "
+ "opcode \"%s\" has %d", u, intisa->opcodes[opc].name,
+ intisa->opcodes[opc].num_funcUnit_uses);
+ return NULL;
+ }
+ return &intisa->opcodes[opc].funcUnit_uses[u];
+}
+
+
+/* Operand information. */
+
+
+#define CHECK_OPERAND(INTISA, OPC, ICLASS, OPND, ERRVAL) \
+ do { \
+ if ((OPND) < 0 || (OPND) >= (ICLASS)->num_operands) { \
+ xtisa_errno = xtensa_isa_bad_operand; \
+ sprintf(xtisa_error_msg, "invalid operand number (%d); " \
+ "opcode \"%s\" has %d operands", (OPND), \
+ (INTISA)->opcodes[(OPC)].name, (ICLASS)->num_operands); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+static xtensa_operand_internal *get_operand(xtensa_isa_internal *intisa,
+ xtensa_opcode opc, int opnd)
+{
+ xtensa_iclass_internal *iclass;
+ int iclass_id, operand_id;
+
+ CHECK_OPCODE(intisa, opc, NULL);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ iclass = &intisa->iclasses[iclass_id];
+ CHECK_OPERAND(intisa, opc, iclass, opnd, NULL);
+ operand_id = iclass->operands[opnd].u.operand_id;
+ return &intisa->operands[operand_id];
+}
+
+
+const char *xtensa_operand_name(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return NULL;
+ }
+ return intop->name;
+}
+
+
+int xtensa_operand_is_visible(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_iclass_internal *iclass;
+ int iclass_id, operand_id;
+ xtensa_operand_internal *intop;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ iclass = &intisa->iclasses[iclass_id];
+ CHECK_OPERAND(intisa, opc, iclass, opnd, XTENSA_UNDEFINED);
+
+ /* Special case for "sout" operands. */
+ if (iclass->operands[opnd].inout == 's') {
+ return 0;
+ }
+
+ operand_id = iclass->operands[opnd].u.operand_id;
+ intop = &intisa->operands[operand_id];
+
+ if ((intop->flags & XTENSA_OPERAND_IS_INVISIBLE) == 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+char xtensa_operand_inout(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_iclass_internal *iclass;
+ int iclass_id;
+ char inout;
+
+ CHECK_OPCODE(intisa, opc, 0);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ iclass = &intisa->iclasses[iclass_id];
+ CHECK_OPERAND(intisa, opc, iclass, opnd, 0);
+ inout = iclass->operands[opnd].inout;
+
+ /* Special case for "sout" and "_sin" operands. */
+ if (inout == 's') {
+ return 'o';
+ }
+ if (inout == 't') {
+ return 'i';
+ }
+ return inout;
+}
+
+
+int xtensa_operand_get_field(xtensa_isa isa, xtensa_opcode opc, int opnd,
+ xtensa_format fmt, int slot,
+ const xtensa_insnbuf slotbuf, uint32_t *valp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+ int slot_id;
+ xtensa_get_field_fn get_fn;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return -1;
+ }
+
+ CHECK_FORMAT(intisa, fmt, -1);
+ CHECK_SLOT(intisa, fmt, slot, -1);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+ if (intop->field_id == XTENSA_UNDEFINED) {
+ xtisa_errno = xtensa_isa_no_field;
+ strcpy(xtisa_error_msg, "implicit operand has no field");
+ return -1;
+ }
+ get_fn = intisa->slots[slot_id].get_field_fns[intop->field_id];
+ if (!get_fn) {
+ xtisa_errno = xtensa_isa_wrong_slot;
+ sprintf(xtisa_error_msg,
+ "operand \"%s\" does not exist in slot %d of format \"%s\"",
+ intop->name, slot, intisa->formats[fmt].name);
+ return -1;
+ }
+ *valp = (*get_fn)(slotbuf);
+ return 0;
+}
+
+
+int xtensa_operand_set_field(xtensa_isa isa, xtensa_opcode opc, int opnd,
+ xtensa_format fmt, int slot,
+ xtensa_insnbuf slotbuf, uint32_t val)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+ int slot_id;
+ xtensa_set_field_fn set_fn;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return -1;
+ }
+
+ CHECK_FORMAT(intisa, fmt, -1);
+ CHECK_SLOT(intisa, fmt, slot, -1);
+
+ slot_id = intisa->formats[fmt].slot_id[slot];
+ if (intop->field_id == XTENSA_UNDEFINED) {
+ xtisa_errno = xtensa_isa_no_field;
+ strcpy(xtisa_error_msg, "implicit operand has no field");
+ return -1;
+ }
+ set_fn = intisa->slots[slot_id].set_field_fns[intop->field_id];
+ if (!set_fn) {
+ xtisa_errno = xtensa_isa_wrong_slot;
+ sprintf(xtisa_error_msg,
+ "operand \"%s\" does not exist in slot %d of format \"%s\"",
+ intop->name, slot, intisa->formats[fmt].name);
+ return -1;
+ }
+ (*set_fn)(slotbuf, val);
+ return 0;
+}
+
+
+int xtensa_operand_encode(xtensa_isa isa, xtensa_opcode opc, int opnd,
+ uint32_t *valp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+ uint32_t test_val, orig_val;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return -1;
+ }
+
+ if (!intop->encode) {
+ /*
+ * This is a default operand for a field. How can we tell if the
+ * value fits in the field? Write the value into the field,
+ * read it back, and then make sure we get the same value.
+ */
+ static xtensa_insnbuf tmpbuf;
+ int slot_id;
+
+ if (!tmpbuf) {
+ tmpbuf = xtensa_insnbuf_alloc(isa);
+ CHECK_ALLOC(tmpbuf, -1);
+ }
+
+ /*
+ * A default operand is always associated with a field,
+ * but check just to be sure....
+ */
+ if (intop->field_id == XTENSA_UNDEFINED) {
+ xtisa_errno = xtensa_isa_internal_error;
+ strcpy(xtisa_error_msg, "operand has no field");
+ return -1;
+ }
+
+ /* Find some slot that includes the field. */
+ for (slot_id = 0; slot_id < intisa->num_slots; slot_id++) {
+ xtensa_get_field_fn get_fn =
+ intisa->slots[slot_id].get_field_fns[intop->field_id];
+ xtensa_set_field_fn set_fn =
+ intisa->slots[slot_id].set_field_fns[intop->field_id];
+
+ if (get_fn && set_fn) {
+ (*set_fn)(tmpbuf, *valp);
+ return (*get_fn)(tmpbuf) != *valp;
+ }
+ }
+
+ /* Couldn't find any slot containing the field.... */
+ xtisa_errno = xtensa_isa_no_field;
+ strcpy(xtisa_error_msg, "field does not exist in any slot");
+ return -1;
+ }
+
+ /*
+ * Encode the value. In some cases, the encoding function may detect
+ * errors, but most of the time the only way to determine if the value
+ * was successfully encoded is to decode it and check if it matches
+ * the original value.
+ */
+ orig_val = *valp;
+ if ((*intop->encode)(valp) ||
+ (test_val = *valp, (*intop->decode)(&test_val)) ||
+ test_val != orig_val) {
+ xtisa_errno = xtensa_isa_bad_value;
+ sprintf(xtisa_error_msg, "cannot encode operand value 0x%08x", *valp);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int xtensa_operand_decode(xtensa_isa isa, xtensa_opcode opc, int opnd,
+ uint32_t *valp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return -1;
+ }
+
+ /* Use identity function for "default" operands. */
+ if (!intop->decode) {
+ return 0;
+ }
+
+ if ((*intop->decode)(valp)) {
+ xtisa_errno = xtensa_isa_bad_value;
+ sprintf(xtisa_error_msg, "cannot decode operand value 0x%08x", *valp);
+ return -1;
+ }
+ return 0;
+}
+
+
+int xtensa_operand_is_register(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return XTENSA_UNDEFINED;
+ }
+
+ if ((intop->flags & XTENSA_OPERAND_IS_REGISTER) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+xtensa_regfile xtensa_operand_regfile(xtensa_isa isa, xtensa_opcode opc,
+ int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return XTENSA_UNDEFINED;
+ }
+
+ return intop->regfile;
+}
+
+
+int xtensa_operand_num_regs(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return XTENSA_UNDEFINED;
+ }
+
+ return intop->num_regs;
+}
+
+
+int xtensa_operand_is_known_reg(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return XTENSA_UNDEFINED;
+ }
+
+ if ((intop->flags & XTENSA_OPERAND_IS_UNKNOWN) == 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_operand_is_PCrelative(xtensa_isa isa, xtensa_opcode opc, int opnd)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return XTENSA_UNDEFINED;
+ }
+
+ if ((intop->flags & XTENSA_OPERAND_IS_PCRELATIVE) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_operand_do_reloc(xtensa_isa isa, xtensa_opcode opc, int opnd,
+ uint32_t *valp, uint32_t pc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return -1;
+ }
+
+ if ((intop->flags & XTENSA_OPERAND_IS_PCRELATIVE) == 0) {
+ return 0;
+ }
+
+ if (!intop->do_reloc) {
+ xtisa_errno = xtensa_isa_internal_error;
+ strcpy(xtisa_error_msg, "operand missing do_reloc function");
+ return -1;
+ }
+
+ if ((*intop->do_reloc)(valp, pc)) {
+ xtisa_errno = xtensa_isa_bad_value;
+ sprintf(xtisa_error_msg,
+ "do_reloc failed for value 0x%08x at PC 0x%08x", *valp, pc);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int xtensa_operand_undo_reloc(xtensa_isa isa, xtensa_opcode opc, int opnd,
+ uint32_t *valp, uint32_t pc)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_operand_internal *intop;
+
+ intop = get_operand(intisa, opc, opnd);
+ if (!intop) {
+ return -1;
+ }
+
+ if ((intop->flags & XTENSA_OPERAND_IS_PCRELATIVE) == 0) {
+ return 0;
+ }
+
+ if (!intop->undo_reloc) {
+ xtisa_errno = xtensa_isa_internal_error;
+ strcpy(xtisa_error_msg, "operand missing undo_reloc function");
+ return -1;
+ }
+
+ if ((*intop->undo_reloc)(valp, pc)) {
+ xtisa_errno = xtensa_isa_bad_value;
+ sprintf(xtisa_error_msg,
+ "undo_reloc failed for value 0x%08x at PC 0x%08x", *valp, pc);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* State Operands. */
+
+
+#define CHECK_STATE_OPERAND(INTISA, OPC, ICLASS, STOP, ERRVAL) \
+ do { \
+ if ((STOP) < 0 || (STOP) >= (ICLASS)->num_stateOperands) { \
+ xtisa_errno = xtensa_isa_bad_operand; \
+ sprintf(xtisa_error_msg, "invalid state operand number (%d); " \
+ "opcode \"%s\" has %d state operands", (STOP), \
+ (INTISA)->opcodes[(OPC)].name, \
+ (ICLASS)->num_stateOperands); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_state xtensa_stateOperand_state(xtensa_isa isa, xtensa_opcode opc,
+ int stOp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_iclass_internal *iclass;
+ int iclass_id;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ iclass = &intisa->iclasses[iclass_id];
+ CHECK_STATE_OPERAND(intisa, opc, iclass, stOp, XTENSA_UNDEFINED);
+ return iclass->stateOperands[stOp].u.state;
+}
+
+
+char xtensa_stateOperand_inout(xtensa_isa isa, xtensa_opcode opc, int stOp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_iclass_internal *iclass;
+ int iclass_id;
+
+ CHECK_OPCODE(intisa, opc, 0);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ iclass = &intisa->iclasses[iclass_id];
+ CHECK_STATE_OPERAND(intisa, opc, iclass, stOp, 0);
+ return iclass->stateOperands[stOp].inout;
+}
+
+
+/* Interface Operands. */
+
+
+#define CHECK_INTERFACE_OPERAND(INTISA, OPC, ICLASS, IFOP, ERRVAL) \
+ do { \
+ if ((IFOP) < 0 || (IFOP) >= (ICLASS)->num_interfaceOperands) { \
+ xtisa_errno = xtensa_isa_bad_operand; \
+ sprintf(xtisa_error_msg, \
+ "invalid interface operand number (%d); " \
+ "opcode \"%s\" has %d interface operands", (IFOP), \
+ (INTISA)->opcodes[(OPC)].name, \
+ (ICLASS)->num_interfaceOperands); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_interface xtensa_interfaceOperand_interface(xtensa_isa isa,
+ xtensa_opcode opc,
+ int ifOp)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_iclass_internal *iclass;
+ int iclass_id;
+
+ CHECK_OPCODE(intisa, opc, XTENSA_UNDEFINED);
+ iclass_id = intisa->opcodes[opc].iclass_id;
+ iclass = &intisa->iclasses[iclass_id];
+ CHECK_INTERFACE_OPERAND(intisa, opc, iclass, ifOp, XTENSA_UNDEFINED);
+ return iclass->interfaceOperands[ifOp];
+}
+
+
+/* Register Files. */
+
+
+#define CHECK_REGFILE(INTISA, RF, ERRVAL) \
+ do { \
+ if ((RF) < 0 || (RF) >= (INTISA)->num_regfiles) { \
+ xtisa_errno = xtensa_isa_bad_regfile; \
+ strcpy(xtisa_error_msg, "invalid regfile specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_regfile xtensa_regfile_lookup(xtensa_isa isa, const char *name)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int n;
+
+ if (!name || !*name) {
+ xtisa_errno = xtensa_isa_bad_regfile;
+ strcpy(xtisa_error_msg, "invalid regfile name");
+ return XTENSA_UNDEFINED;
+ }
+
+ /* The expected number of regfiles is small; use a linear search. */
+ for (n = 0; n < intisa->num_regfiles; n++) {
+ if (!strcmp(intisa->regfiles[n].name, name)) {
+ return n;
+ }
+ }
+
+ xtisa_errno = xtensa_isa_bad_regfile;
+ sprintf(xtisa_error_msg, "regfile \"%s\" not recognized", name);
+ return XTENSA_UNDEFINED;
+}
+
+
+xtensa_regfile xtensa_regfile_lookup_shortname(xtensa_isa isa,
+ const char *shortname)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ int n;
+
+ if (!shortname || !*shortname) {
+ xtisa_errno = xtensa_isa_bad_regfile;
+ strcpy(xtisa_error_msg, "invalid regfile shortname");
+ return XTENSA_UNDEFINED;
+ }
+
+ /* The expected number of regfiles is small; use a linear search. */
+ for (n = 0; n < intisa->num_regfiles; n++) {
+ /*
+ * Ignore regfile views since they always have the same shortnames
+ * as their parents.
+ */
+ if (intisa->regfiles[n].parent != n) {
+ continue;
+ }
+ if (!strcmp(intisa->regfiles[n].shortname, shortname)) {
+ return n;
+ }
+ }
+
+ xtisa_errno = xtensa_isa_bad_regfile;
+ sprintf(xtisa_error_msg, "regfile shortname \"%s\" not recognized",
+ shortname);
+ return XTENSA_UNDEFINED;
+}
+
+
+const char *xtensa_regfile_name(xtensa_isa isa, xtensa_regfile rf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_REGFILE(intisa, rf, NULL);
+ return intisa->regfiles[rf].name;
+}
+
+
+const char *xtensa_regfile_shortname(xtensa_isa isa, xtensa_regfile rf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_REGFILE(intisa, rf, NULL);
+ return intisa->regfiles[rf].shortname;
+}
+
+
+xtensa_regfile xtensa_regfile_view_parent(xtensa_isa isa, xtensa_regfile rf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_REGFILE(intisa, rf, XTENSA_UNDEFINED);
+ return intisa->regfiles[rf].parent;
+}
+
+
+int xtensa_regfile_num_bits(xtensa_isa isa, xtensa_regfile rf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_REGFILE(intisa, rf, XTENSA_UNDEFINED);
+ return intisa->regfiles[rf].num_bits;
+}
+
+
+int xtensa_regfile_num_entries(xtensa_isa isa, xtensa_regfile rf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_REGFILE(intisa, rf, XTENSA_UNDEFINED);
+ return intisa->regfiles[rf].num_entries;
+}
+
+
+/* Processor States. */
+
+
+#define CHECK_STATE(INTISA, ST, ERRVAL) \
+ do { \
+ if ((ST) < 0 || (ST) >= (INTISA)->num_states) { \
+ xtisa_errno = xtensa_isa_bad_state; \
+ strcpy(xtisa_error_msg, "invalid state specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_state xtensa_state_lookup(xtensa_isa isa, const char *name)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_lookup_entry entry, *result = 0;
+
+ if (!name || !*name) {
+ xtisa_errno = xtensa_isa_bad_state;
+ strcpy(xtisa_error_msg, "invalid state name");
+ return XTENSA_UNDEFINED;
+ }
+
+ if (intisa->num_states != 0) {
+ entry.key = name;
+ result = bsearch(&entry, intisa->state_lookup_table,
+ intisa->num_states, sizeof(xtensa_lookup_entry),
+ xtensa_isa_name_compare);
+ }
+
+ if (!result) {
+ xtisa_errno = xtensa_isa_bad_state;
+ sprintf(xtisa_error_msg, "state \"%s\" not recognized", name);
+ return XTENSA_UNDEFINED;
+ }
+
+ return result->u.state;
+}
+
+
+const char *xtensa_state_name(xtensa_isa isa, xtensa_state st)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_STATE(intisa, st, NULL);
+ return intisa->states[st].name;
+}
+
+
+int xtensa_state_num_bits(xtensa_isa isa, xtensa_state st)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_STATE(intisa, st, XTENSA_UNDEFINED);
+ return intisa->states[st].num_bits;
+}
+
+
+int xtensa_state_is_exported(xtensa_isa isa, xtensa_state st)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_STATE(intisa, st, XTENSA_UNDEFINED);
+ if ((intisa->states[st].flags & XTENSA_STATE_IS_EXPORTED) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_state_is_shared_or(xtensa_isa isa, xtensa_state st)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_STATE(intisa, st, XTENSA_UNDEFINED);
+ if ((intisa->states[st].flags & XTENSA_STATE_IS_SHARED_OR) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Sysregs. */
+
+
+#define CHECK_SYSREG(INTISA, SYSREG, ERRVAL) \
+ do { \
+ if ((SYSREG) < 0 || (SYSREG) >= (INTISA)->num_sysregs) { \
+ xtisa_errno = xtensa_isa_bad_sysreg; \
+ strcpy(xtisa_error_msg, "invalid sysreg specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_sysreg xtensa_sysreg_lookup(xtensa_isa isa, int num, int is_user)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ if (is_user != 0) {
+ is_user = 1;
+ }
+
+ if (num < 0 || num > intisa->max_sysreg_num[is_user] ||
+ intisa->sysreg_table[is_user][num] == XTENSA_UNDEFINED) {
+ xtisa_errno = xtensa_isa_bad_sysreg;
+ strcpy(xtisa_error_msg, "sysreg not recognized");
+ return XTENSA_UNDEFINED;
+ }
+
+ return intisa->sysreg_table[is_user][num];
+}
+
+
+xtensa_sysreg xtensa_sysreg_lookup_name(xtensa_isa isa, const char *name)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_lookup_entry entry, *result = 0;
+
+ if (!name || !*name) {
+ xtisa_errno = xtensa_isa_bad_sysreg;
+ strcpy(xtisa_error_msg, "invalid sysreg name");
+ return XTENSA_UNDEFINED;
+ }
+
+ if (intisa->num_sysregs != 0) {
+ entry.key = name;
+ result = bsearch(&entry, intisa->sysreg_lookup_table,
+ intisa->num_sysregs, sizeof(xtensa_lookup_entry),
+ xtensa_isa_name_compare);
+ }
+
+ if (!result) {
+ xtisa_errno = xtensa_isa_bad_sysreg;
+ sprintf(xtisa_error_msg, "sysreg \"%s\" not recognized", name);
+ return XTENSA_UNDEFINED;
+ }
+
+ return result->u.sysreg;
+}
+
+
+const char *xtensa_sysreg_name(xtensa_isa isa, xtensa_sysreg sysreg)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_SYSREG(intisa, sysreg, NULL);
+ return intisa->sysregs[sysreg].name;
+}
+
+
+int xtensa_sysreg_number(xtensa_isa isa, xtensa_sysreg sysreg)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_SYSREG(intisa, sysreg, XTENSA_UNDEFINED);
+ return intisa->sysregs[sysreg].number;
+}
+
+
+int xtensa_sysreg_is_user(xtensa_isa isa, xtensa_sysreg sysreg)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_SYSREG(intisa, sysreg, XTENSA_UNDEFINED);
+ if (intisa->sysregs[sysreg].is_user) {
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Interfaces. */
+
+
+#define CHECK_INTERFACE(INTISA, INTF, ERRVAL) \
+ do { \
+ if ((INTF) < 0 || (INTF) >= (INTISA)->num_interfaces) { \
+ xtisa_errno = xtensa_isa_bad_interface; \
+ strcpy(xtisa_error_msg, "invalid interface specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_interface xtensa_interface_lookup(xtensa_isa isa, const char *ifname)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_lookup_entry entry, *result = 0;
+
+ if (!ifname || !*ifname) {
+ xtisa_errno = xtensa_isa_bad_interface;
+ strcpy(xtisa_error_msg, "invalid interface name");
+ return XTENSA_UNDEFINED;
+ }
+
+ if (intisa->num_interfaces != 0) {
+ entry.key = ifname;
+ result = bsearch(&entry, intisa->interface_lookup_table,
+ intisa->num_interfaces, sizeof(xtensa_lookup_entry),
+ xtensa_isa_name_compare);
+ }
+
+ if (!result) {
+ xtisa_errno = xtensa_isa_bad_interface;
+ sprintf(xtisa_error_msg, "interface \"%s\" not recognized", ifname);
+ return XTENSA_UNDEFINED;
+ }
+
+ return result->u.intf;
+}
+
+
+const char *xtensa_interface_name(xtensa_isa isa, xtensa_interface intf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_INTERFACE(intisa, intf, NULL);
+ return intisa->interfaces[intf].name;
+}
+
+
+int xtensa_interface_num_bits(xtensa_isa isa, xtensa_interface intf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_INTERFACE(intisa, intf, XTENSA_UNDEFINED);
+ return intisa->interfaces[intf].num_bits;
+}
+
+
+char xtensa_interface_inout(xtensa_isa isa, xtensa_interface intf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_INTERFACE(intisa, intf, 0);
+ return intisa->interfaces[intf].inout;
+}
+
+
+int xtensa_interface_has_side_effect(xtensa_isa isa, xtensa_interface intf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_INTERFACE(intisa, intf, XTENSA_UNDEFINED);
+ if ((intisa->interfaces[intf].flags &
+ XTENSA_INTERFACE_HAS_SIDE_EFFECT) != 0) {
+ return 1;
+ }
+ return 0;
+}
+
+
+int xtensa_interface_class_id(xtensa_isa isa, xtensa_interface intf)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_INTERFACE(intisa, intf, XTENSA_UNDEFINED);
+ return intisa->interfaces[intf].class_id;
+}
+
+
+/* Functional Units. */
+
+
+#define CHECK_FUNCUNIT(INTISA, FUN, ERRVAL) \
+ do { \
+ if ((FUN) < 0 || (FUN) >= (INTISA)->num_funcUnits) { \
+ xtisa_errno = xtensa_isa_bad_funcUnit; \
+ strcpy(xtisa_error_msg, "invalid functional unit specifier"); \
+ return ERRVAL; \
+ } \
+ } while (0)
+
+
+xtensa_funcUnit xtensa_funcUnit_lookup(xtensa_isa isa, const char *fname)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+ xtensa_lookup_entry entry, *result = 0;
+
+ if (!fname || !*fname) {
+ xtisa_errno = xtensa_isa_bad_funcUnit;
+ strcpy(xtisa_error_msg, "invalid functional unit name");
+ return XTENSA_UNDEFINED;
+ }
+
+ if (intisa->num_funcUnits != 0) {
+ entry.key = fname;
+ result = bsearch(&entry, intisa->funcUnit_lookup_table,
+ intisa->num_funcUnits, sizeof(xtensa_lookup_entry),
+ xtensa_isa_name_compare);
+ }
+
+ if (!result) {
+ xtisa_errno = xtensa_isa_bad_funcUnit;
+ sprintf(xtisa_error_msg,
+ "functional unit \"%s\" not recognized", fname);
+ return XTENSA_UNDEFINED;
+ }
+
+ return result->u.fun;
+}
+
+
+const char *xtensa_funcUnit_name(xtensa_isa isa, xtensa_funcUnit fun)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_FUNCUNIT(intisa, fun, NULL);
+ return intisa->funcUnits[fun].name;
+}
+
+
+int xtensa_funcUnit_num_copies(xtensa_isa isa, xtensa_funcUnit fun)
+{
+ xtensa_isa_internal *intisa = (xtensa_isa_internal *)isa;
+
+ CHECK_FUNCUNIT(intisa, fun, XTENSA_UNDEFINED);
+ return intisa->funcUnits[fun].num_copies;
+}
diff --git a/target/xtensa/xtensa-isa.h b/target/xtensa/xtensa-isa.h
new file mode 100644
index 000000000..0f0211f84
--- /dev/null
+++ b/target/xtensa/xtensa-isa.h
@@ -0,0 +1 @@
+#include "hw/xtensa/xtensa-isa.h"
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
new file mode 100644
index 000000000..fa21b7e11
--- /dev/null
+++ b/target/xtensa/xtensa-semi.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "chardev/char-fe.h"
+#include "exec/helper-proto.h"
+#include "semihosting/semihost.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+
+enum {
+ TARGET_SYS_exit = 1,
+ TARGET_SYS_read = 3,
+ TARGET_SYS_write = 4,
+ TARGET_SYS_open = 5,
+ TARGET_SYS_close = 6,
+ TARGET_SYS_lseek = 19,
+ TARGET_SYS_select_one = 29,
+
+ TARGET_SYS_argc = 1000,
+ TARGET_SYS_argv_sz = 1001,
+ TARGET_SYS_argv = 1002,
+ TARGET_SYS_memset = 1004,
+};
+
+enum {
+ SELECT_ONE_READ = 1,
+ SELECT_ONE_WRITE = 2,
+ SELECT_ONE_EXCEPT = 3,
+};
+
+enum {
+ TARGET_EPERM = 1,
+ TARGET_ENOENT = 2,
+ TARGET_ESRCH = 3,
+ TARGET_EINTR = 4,
+ TARGET_EIO = 5,
+ TARGET_ENXIO = 6,
+ TARGET_E2BIG = 7,
+ TARGET_ENOEXEC = 8,
+ TARGET_EBADF = 9,
+ TARGET_ECHILD = 10,
+ TARGET_EAGAIN = 11,
+ TARGET_ENOMEM = 12,
+ TARGET_EACCES = 13,
+ TARGET_EFAULT = 14,
+ TARGET_ENOTBLK = 15,
+ TARGET_EBUSY = 16,
+ TARGET_EEXIST = 17,
+ TARGET_EXDEV = 18,
+ TARGET_ENODEV = 19,
+ TARGET_ENOTDIR = 20,
+ TARGET_EISDIR = 21,
+ TARGET_EINVAL = 22,
+ TARGET_ENFILE = 23,
+ TARGET_EMFILE = 24,
+ TARGET_ENOTTY = 25,
+ TARGET_ETXTBSY = 26,
+ TARGET_EFBIG = 27,
+ TARGET_ENOSPC = 28,
+ TARGET_ESPIPE = 29,
+ TARGET_EROFS = 30,
+ TARGET_EMLINK = 31,
+ TARGET_EPIPE = 32,
+ TARGET_EDOM = 33,
+ TARGET_ERANGE = 34,
+ TARGET_ENOSYS = 88,
+ TARGET_ELOOP = 92,
+};
+
+static uint32_t errno_h2g(int host_errno)
+{
+ switch (host_errno) {
+ case 0: return 0;
+ case EPERM: return TARGET_EPERM;
+ case ENOENT: return TARGET_ENOENT;
+ case ESRCH: return TARGET_ESRCH;
+ case EINTR: return TARGET_EINTR;
+ case EIO: return TARGET_EIO;
+ case ENXIO: return TARGET_ENXIO;
+ case E2BIG: return TARGET_E2BIG;
+ case ENOEXEC: return TARGET_ENOEXEC;
+ case EBADF: return TARGET_EBADF;
+ case ECHILD: return TARGET_ECHILD;
+ case EAGAIN: return TARGET_EAGAIN;
+ case ENOMEM: return TARGET_ENOMEM;
+ case EACCES: return TARGET_EACCES;
+ case EFAULT: return TARGET_EFAULT;
+#ifdef ENOTBLK
+ case ENOTBLK: return TARGET_ENOTBLK;
+#endif
+ case EBUSY: return TARGET_EBUSY;
+ case EEXIST: return TARGET_EEXIST;
+ case EXDEV: return TARGET_EXDEV;
+ case ENODEV: return TARGET_ENODEV;
+ case ENOTDIR: return TARGET_ENOTDIR;
+ case EISDIR: return TARGET_EISDIR;
+ case EINVAL: return TARGET_EINVAL;
+ case ENFILE: return TARGET_ENFILE;
+ case EMFILE: return TARGET_EMFILE;
+ case ENOTTY: return TARGET_ENOTTY;
+#ifdef ETXTBSY
+ case ETXTBSY: return TARGET_ETXTBSY;
+#endif
+ case EFBIG: return TARGET_EFBIG;
+ case ENOSPC: return TARGET_ENOSPC;
+ case ESPIPE: return TARGET_ESPIPE;
+ case EROFS: return TARGET_EROFS;
+ case EMLINK: return TARGET_EMLINK;
+ case EPIPE: return TARGET_EPIPE;
+ case EDOM: return TARGET_EDOM;
+ case ERANGE: return TARGET_ERANGE;
+ case ENOSYS: return TARGET_ENOSYS;
+#ifdef ELOOP
+ case ELOOP: return TARGET_ELOOP;
+#endif
+ };
+
+ return TARGET_EINVAL;
+}
+
+typedef struct XtensaSimConsole {
+ CharBackend be;
+ struct {
+ char buffer[16];
+ size_t offset;
+ } input;
+} XtensaSimConsole;
+
+static XtensaSimConsole *sim_console;
+
+static IOCanReadHandler sim_console_can_read;
+static int sim_console_can_read(void *opaque)
+{
+ XtensaSimConsole *p = opaque;
+
+ return sizeof(p->input.buffer) - p->input.offset;
+}
+
+static IOReadHandler sim_console_read;
+static void sim_console_read(void *opaque, const uint8_t *buf, int size)
+{
+ XtensaSimConsole *p = opaque;
+ size_t copy = sizeof(p->input.buffer) - p->input.offset;
+
+ if (size < copy) {
+ copy = size;
+ }
+ memcpy(p->input.buffer + p->input.offset, buf, copy);
+ p->input.offset += copy;
+}
+
+void xtensa_sim_open_console(Chardev *chr)
+{
+ static XtensaSimConsole console;
+
+ qemu_chr_fe_init(&console.be, chr, &error_abort);
+ qemu_chr_fe_set_handlers(&console.be,
+ sim_console_can_read,
+ sim_console_read,
+ NULL, NULL, &console,
+ NULL, true);
+ sim_console = &console;
+}
+
+void HELPER(simcall)(CPUXtensaState *env)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t *regs = env->regs;
+
+ switch (regs[2]) {
+ case TARGET_SYS_exit:
+ exit(regs[3]);
+ break;
+
+ case TARGET_SYS_read:
+ case TARGET_SYS_write:
+ {
+ bool is_write = regs[2] == TARGET_SYS_write;
+ uint32_t fd = regs[3];
+ uint32_t vaddr = regs[4];
+ uint32_t len = regs[5];
+ uint32_t len_done = 0;
+
+ while (len > 0) {
+ hwaddr paddr = cpu_get_phys_page_debug(cs, vaddr);
+ uint32_t page_left =
+ TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
+ uint32_t io_sz = page_left < len ? page_left : len;
+ hwaddr sz = io_sz;
+ void *buf = cpu_physical_memory_map(paddr, &sz, !is_write);
+ uint32_t io_done;
+ bool error = false;
+
+ if (buf) {
+ vaddr += io_sz;
+ len -= io_sz;
+ if (fd < 3 && sim_console) {
+ if (is_write && (fd == 1 || fd == 2)) {
+ io_done = qemu_chr_fe_write_all(&sim_console->be,
+ buf, io_sz);
+ regs[3] = errno_h2g(errno);
+ } else if (!is_write && fd == 0) {
+ if (sim_console->input.offset) {
+ io_done = sim_console->input.offset;
+ if (io_sz < io_done) {
+ io_done = io_sz;
+ }
+ memcpy(buf, sim_console->input.buffer, io_done);
+ memmove(sim_console->input.buffer,
+ sim_console->input.buffer + io_done,
+ sim_console->input.offset - io_done);
+ sim_console->input.offset -= io_done;
+ qemu_chr_fe_accept_input(&sim_console->be);
+ } else {
+ io_done = -1;
+ regs[3] = TARGET_EAGAIN;
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s fd %d is not supported with chardev console\n",
+ is_write ?
+ "writing to" : "reading from", fd);
+ io_done = -1;
+ regs[3] = TARGET_EBADF;
+ }
+ } else {
+ io_done = is_write ?
+ write(fd, buf, io_sz) :
+ read(fd, buf, io_sz);
+ regs[3] = errno_h2g(errno);
+ }
+ if (io_done == -1) {
+ error = true;
+ io_done = 0;
+ }
+ cpu_physical_memory_unmap(buf, sz, !is_write, io_done);
+ } else {
+ error = true;
+ regs[3] = TARGET_EINVAL;
+ break;
+ }
+ if (error) {
+ if (!len_done) {
+ len_done = -1;
+ }
+ break;
+ }
+ len_done += io_done;
+ if (io_done < io_sz) {
+ break;
+ }
+ }
+ regs[2] = len_done;
+ }
+ break;
+
+ case TARGET_SYS_open:
+ {
+ char name[1024];
+ int rc;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(name); ++i) {
+ rc = cpu_memory_rw_debug(cs, regs[3] + i,
+ (uint8_t *)name + i, 1, 0);
+ if (rc != 0 || name[i] == 0) {
+ break;
+ }
+ }
+
+ if (rc == 0 && i < ARRAY_SIZE(name)) {
+ regs[2] = open(name, regs[4], regs[5]);
+ regs[3] = errno_h2g(errno);
+ } else {
+ regs[2] = -1;
+ regs[3] = TARGET_EINVAL;
+ }
+ }
+ break;
+
+ case TARGET_SYS_close:
+ if (regs[3] < 3) {
+ regs[2] = regs[3] = 0;
+ } else {
+ regs[2] = close(regs[3]);
+ regs[3] = errno_h2g(errno);
+ }
+ break;
+
+ case TARGET_SYS_lseek:
+ regs[2] = lseek(regs[3], (off_t)(int32_t)regs[4], regs[5]);
+ regs[3] = errno_h2g(errno);
+ break;
+
+ case TARGET_SYS_select_one:
+ {
+ uint32_t fd = regs[3];
+ uint32_t rq = regs[4];
+ uint32_t target_tv = regs[5];
+ uint32_t target_tvv[2];
+
+ struct timeval tv = {0};
+
+ if (target_tv) {
+ cpu_memory_rw_debug(cs, target_tv,
+ (uint8_t *)target_tvv, sizeof(target_tvv), 0);
+ tv.tv_sec = (int32_t)tswap32(target_tvv[0]);
+ tv.tv_usec = (int32_t)tswap32(target_tvv[1]);
+ }
+ if (fd < 3 && sim_console) {
+ if ((fd == 1 || fd == 2) && rq == SELECT_ONE_WRITE) {
+ regs[2] = 1;
+ } else if (fd == 0 && rq == SELECT_ONE_READ) {
+ regs[2] = sim_console->input.offset > 0;
+ } else {
+ regs[2] = 0;
+ }
+ regs[3] = 0;
+ } else {
+ fd_set fdset;
+
+ FD_ZERO(&fdset);
+ FD_SET(fd, &fdset);
+ regs[2] = select(fd + 1,
+ rq == SELECT_ONE_READ ? &fdset : NULL,
+ rq == SELECT_ONE_WRITE ? &fdset : NULL,
+ rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
+ target_tv ? &tv : NULL);
+ regs[3] = errno_h2g(errno);
+ }
+ }
+ break;
+
+ case TARGET_SYS_argc:
+ regs[2] = semihosting_get_argc();
+ regs[3] = 0;
+ break;
+
+ case TARGET_SYS_argv_sz:
+ {
+ int argc = semihosting_get_argc();
+ int sz = (argc + 1) * sizeof(uint32_t);
+ int i;
+
+ for (i = 0; i < argc; ++i) {
+ sz += 1 + strlen(semihosting_get_arg(i));
+ }
+ regs[2] = sz;
+ regs[3] = 0;
+ }
+ break;
+
+ case TARGET_SYS_argv:
+ {
+ int argc = semihosting_get_argc();
+ int str_offset = (argc + 1) * sizeof(uint32_t);
+ int i;
+ uint32_t argptr;
+
+ for (i = 0; i < argc; ++i) {
+ const char *str = semihosting_get_arg(i);
+ int str_size = strlen(str) + 1;
+
+ argptr = tswap32(regs[3] + str_offset);
+
+ cpu_memory_rw_debug(cs,
+ regs[3] + i * sizeof(uint32_t),
+ (uint8_t *)&argptr, sizeof(argptr), 1);
+ cpu_memory_rw_debug(cs,
+ regs[3] + str_offset,
+ (uint8_t *)str, str_size, 1);
+ str_offset += str_size;
+ }
+ argptr = 0;
+ cpu_memory_rw_debug(cs,
+ regs[3] + i * sizeof(uint32_t),
+ (uint8_t *)&argptr, sizeof(argptr), 1);
+ regs[3] = 0;
+ }
+ break;
+
+ case TARGET_SYS_memset:
+ {
+ uint32_t base = regs[3];
+ uint32_t sz = regs[5];
+
+ while (sz) {
+ hwaddr len = sz;
+ void *buf = cpu_physical_memory_map(base, &len, 1);
+
+ if (buf && len) {
+ memset(buf, regs[4], len);
+ cpu_physical_memory_unmap(buf, len, 1, len);
+ } else {
+ len = 1;
+ }
+ base += len;
+ sz -= len;
+ }
+ regs[2] = regs[3];
+ regs[3] = 0;
+ }
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s(%d): not implemented\n", __func__, regs[2]);
+ regs[2] = -1;
+ regs[3] = TARGET_ENOSYS;
+ break;
+ }
+}